././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315690.089614 nova-32.0.0/0000775000175000017500000000000000000000000012562 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/.coveragerc0000664000175000017500000000015400000000000014703 0ustar00zuulzuul00000000000000[run] branch = True source = nova omit = nova/tests/* concurrency = eventlet [report] ignore_errors = True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/.git-blame-ignore-revs0000664000175000017500000000064700000000000016671 0ustar00zuulzuul00000000000000# ignore codespell series f4852f4c81a6594dfd56eb18503648b3f449dcd4 2232ca95f22478bc90da5cb2d99cb0a4570fbc96 25fd9effd2e12a29643c6e7b5b5e2f5630ff3338 6314f09ed4574ca9d03a726c0cfd1d7108b1a12c 320c6fadde751e8de05e98416738a91b89c24de8 7402822f0bcc810eda6a2a55a9cc8fe778318557 # ignore sphinx-lint series 33a56781f48d603a54bc0cd9cd4dbe94f01fb88a df0a99a29adcd58c3c026f1ca1d0aa305d8809c4 0829c1b995610f414e4c33f5b3965ffeac21fa60 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/.mailmap0000664000175000017500000001726700000000000014220 0ustar00zuulzuul00000000000000# Format is: # # Alvaro Lopez Garcia Alvaro Lopez Andrew Bogott Andrew Bogott Andy Smith Andy Smith Andy Smith andy Andy Smith termie Andy Smith termie Anne Gentle annegentle Anthony Young Anthony Young Sleepsonthefloor Arvind Somya Arvind Somya Arvind Somya asomya@cisco.com <> Brad McConnell Brad McConnell bmcconne@rackspace.com <> Brian Lamar Brian Lamar brian-lamar Dan Wendlandt danwent Dan Wendlandt danwent Dan Wendlandt danwent@gmail.com <> Dan Wendlandt danwent@gmail.com Davanum Srinivas Davanum Srinivas Édouard Thuleau Thuleau Édouard Ethan Chu Guohui Liu Jake Dahn jakedahn Jason Koelker Jason Kölker Jay Pipes jaypipes@gmail.com <> Jiajun Liu Jian Wen Jian Wen Joe Gordon Joel Moore Joel Moore joelbm24@gmail.com <> John Griffith john-griffith John Tran John Tran Joshua Hesketh Joshua Hesketh Justin Santa Barbara Justin Santa Barbara Justin SB Justin Santa Barbara Superstack Kei Masumoto Kei Masumoto Kei masumoto Kei Masumoto masumotok Kun Huang lawrancejing Matt Dietz Matt Dietz Cerberus Matt Dietz Matthew Dietz Matt Dietz matt.dietz@rackspace.com <> Matt Dietz mdietz NTT PF Lab. NTT PF Lab. NTT PF Lab. Nachi Ueno NTT PF Lab. nova Nikolay Sokolov Nickolay Sokolov Paul Voccio paul@openstack.org <> Philip Knouff Phlip Knouff Renuka Apte renukaapte Sandy Walsh SandyWalsh Sateesh Chodapuneedi sateesh Tiantian Gao Tiantian Gao Vishvananda Ishaya Vishvananda Ishaya Vivek YS Vivek YS vivek.ys@gmail.com <> Yaguang Tang Yolanda Robla yolanda.robla Zhenguo Niu Zhongyue Luo ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/.pre-commit-config.yaml0000664000175000017500000000610000000000000017040 0ustar00zuulzuul00000000000000--- repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v5.0.0 hooks: # whitespace - id: trailing-whitespace - id: mixed-line-ending args: ['--fix', 'lf'] exclude: '.*\.(svg)$' # file format and permissions - id: fix-byte-order-marker - id: check-executables-have-shebangs - id: check-shebang-scripts-are-executable - id: check-json files: .*\.json$ - id: check-yaml files: .*\.(yaml|yml)$ # git - id: check-added-large-files - id: check-merge-conflict - id: check-case-conflict - id: detect-private-key exclude: '^doc/api_samples/.*|^nova/tests/.*' # python - id: debug-statements # nova/cmd/manage.py imports pdb on purpose. exclude: 'nova/cmd/manage.py' - id: check-docstring-first exclude: 'nova/virt/ironic/ironic_states.py' - repo: https://github.com/Lucas-C/pre-commit-hooks rev: v1.5.5 hooks: - id: remove-tabs exclude: '.*\.(svg)$' - repo: https://github.com/hhatto/autopep8 rev: v2.3.2 hooks: - id: autopep8 files: '^.*\.py$' # run hacking after autopep8 so that if it can fix # it it will. note it will still fail in ci because # fixing an issue changes the code and that should # not happen in ci. - repo: https://opendev.org/openstack/hacking rev: 7.0.0 hooks: - id: hacking additional_dependencies: [] exclude: '^(doc|releasenotes|tools)/.*$' - repo: https://github.com/codespell-project/codespell rev: v2.4.1 hooks: - id: codespell args: ['--ignore-words=doc/dictionary.txt'] - repo: https://github.com/pre-commit/mirrors-mypy rev: v1.15.0 hooks: - id: mypy additional_dependencies: - types-paramiko # keep this in-sync with '[mypy] files' in 'setup.cfg' files: | (?x)( nova/compute/manager.py | nova/compute/pci_placement_translator.py | nova/crypto.py | nova/filesystem.py | nova/limit/local.py | nova/limit/placement.py | nova/network/neutron.py | nova/pci | nova/privsep/path.py | nova/scheduler/client/report.py | nova/scheduler/request_filter.py | nova/scheduler/utils.py | nova/virt/driver.py | nova/virt/hardware.py | nova/virt/libvirt/machine_type_utils.py | nova/virt/libvirt/__init__.py | nova/virt/libvirt/cpu/__init__.py | nova/virt/libvirt/cpu/api.py | nova/virt/libvirt/cpu/core.py | nova/virt/libvirt/driver.py | nova/virt/libvirt/event.py | nova/virt/libvirt/guest.py | nova/virt/libvirt/host.py | nova/virt/libvirt/utils.py ) - repo: https://github.com/sphinx-contrib/sphinx-lint rev: v1.0.0 hooks: - id: sphinx-lint args: [--enable=default-role] files: ^doc/|releasenotes|api-guide types: [rst] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/.stestr.conf0000664000175000017500000000006100000000000015030 0ustar00zuulzuul00000000000000[DEFAULT] test_path=./nova/tests/unit top_dir=./ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/.zuul.yaml0000664000175000017500000010750600000000000014534 0ustar00zuulzuul00000000000000# See https://docs.openstack.org/infra/manual/drivers.html#naming-with-zuul-v3 # for job naming conventions. - job: name: nova-tox-functional-py310 parent: openstack-tox-functional-py310 description: | Run tox-based functional tests for the OpenStack Nova project under cPython version 3.10 with Nova specific irrelevant-files list. Uses tox with the ``functional-py310`` environment. This job also provides a parent for other projects to run the nova functional tests on their own changes. required-projects: # including nova here makes this job reusable by other projects - openstack/nova - openstack/placement irrelevant-files: &functional-irrelevant-files - ^.*\.rst$ - ^api-.*$ - ^doc/(source|test)/.*$ - ^nova/locale/.*$ - ^\.pre-commit-config\.yaml$ - ^releasenotes/.*$ vars: # explicitly stating the work dir makes this job reusable by other # projects zuul_work_dir: src/opendev.org/openstack/nova bindep_profile: test py310 timeout: 3600 - job: name: nova-tox-functional-py312 parent: openstack-tox-functional-py312 description: | Run tox-based functional tests for the OpenStack Nova project under cPython version 3.12 with Nova specific irrelevant-files list. Uses tox with the ``functional-py312`` environment. This job also provides a parent for other projects to run the nova functional tests on their own changes. required-projects: # including nova here makes this job reusable by other projects - openstack/nova - openstack/placement irrelevant-files: *functional-irrelevant-files vars: # explicitly stating the work dir makes this job reusable by other # projects zuul_work_dir: src/opendev.org/openstack/nova bindep_profile: test py312 timeout: 3600 - job: name: nova-tox-py312-threading parent: openstack-tox-py312 description: | Run tox-based unit tests for the OpenStack Nova project under cPython version 3.12 with eventlet disabled. Uses tox with the ``py312-threading`` environment. required-projects: # including nova here makes this job reusable by other projects and # allow depends-on placement to work with our placement fixture - openstack/nova - openstack/placement vars: # explicitly stating the work dir makes this job reusable by other # projects zuul_work_dir: src/opendev.org/openstack/nova bindep_profile: test py312 tox_envlist: py312-threading - job: name: nova-tox-validate-backport parent: openstack-tox description: | Determine whether a backport is ready to be merged by checking whether it has already been merged to master or more recent stable branches. Uses tox with the ``validate-backport`` environment. vars: tox_envlist: validate-backport - job: name: nova-live-migration parent: tempest-multinode-full-py3 description: | Run tempest live migration tests against local qcow2 ephemeral storage and shared LVM/iSCSI cinder volumes. irrelevant-files: - ^api-.*$ - ^(test-|)requirements.txt$ - ^.*\.rst$ - ^.git.*$ - ^doc/.*$ - ^nova/hacking/.*$ - ^nova/locale/.*$ - ^nova/policies/.*$ - ^nova/tests/.*$ - ^nova/test.py$ - ^nova/virt/ironic/.*$ - ^\.pre-commit-config\.yaml$ - ^releasenotes/.*$ - ^setup.cfg$ - ^tools/.*$ - ^tox.ini$ vars: tox_envlist: all tempest_test_regex: (^tempest\.api\.compute\.admin\.(test_live_migration|test_migration)) # revert this when bug #1940425 is fixed in neutron tempest_exclude_regex: (test_live_migration_with_trunk) devstack_localrc: &uec_image_vars # We are using the split kernel/initramfs image by default in an # effort to reduce the occurrence of guest kernel panics in the APIC # timer handler. We will cover testing of the full disk image by # using it in the nova-next job. CIRROS_VERSION: 0.6.2 DEFAULT_IMAGE_NAME: cirros-0.6.2-x86_64-uec DEFAULT_IMAGE_FILE_NAME: cirros-0.6.2-x86_64-uec.tar.gz # We need to set the IMAGE_URLS manually in order to get a UEC image # for what Tempest will use for CONF.compute.image_ref_alt. The first # image will be used for CONF.compute.image_ref and the second image # for CONF.compute.image_ref_alt. If we don't do this, the full disk # image will be downloaded and used for image_ref_alt instead. DOWNLOAD_DEFAULT_IMAGES: false IMAGE_URLS: http://download.cirros-cloud.net/0.6.2/cirros-0.6.2-x86_64-uec.tar.gz, http://download.cirros-cloud.net/0.6.1/cirros-0.6.1-x86_64-uec.tar.gz devstack_services: neutron-trunk: true openstack-cli-server: true devstack_local_conf: test-config: $TEMPEST_CONFIG: compute-feature-enabled: volume_backed_live_migration: true block_migration_for_live_migration: true block_migrate_cinder_iscsi: true post-run: playbooks/nova-live-migration/post-run.yaml - job: name: nova-ovs-hybrid-plug parent: tempest-multinode-full-py3 nodeset: devstack-two-node-debian-bookworm description: | Run move operations, reboot, and evacuation (via the same post-run hook as the nova-live-migration job) tests with the OVS network backend and the "iptables_hybrid" securitygroup firewall driver, aka "hybrid plug". The external events interactions between Nova and Neutron in these situations has historically been fragile. This job exercises them. irrelevant-files: &nova-base-irrelevant-files - ^api-.*$ - ^(test-|)requirements.txt$ - ^.*\.rst$ - ^.git.*$ - ^doc/.*$ - ^nova/hacking/.*$ - ^nova/locale/.*$ - ^nova/policies/.*$ - ^nova/tests/.*$ - ^nova/test.py$ - ^\.pre-commit-config\.yaml$ - ^releasenotes/.*$ - ^setup.cfg$ - ^tools/.*$ - ^tox.ini$ vars: tox_envlist: all # bug #1940425 only affect ml2/ovn so we execute # test_live_migration_with_trunk in this job to keep tempest_test_regex: (^tempest\..*compute\..*(migration|resize|reboot|spice).*) devstack_localrc: Q_AGENT: openvswitch Q_ML2_TENANT_NETWORK_TYPE: vxlan Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch ML2_L3_PLUGIN: router NOVA_VNC_ENABLED: False devstack_services: # Disable OVN services br-ex-tcpdump: false br-int-flows: false ovn-controller: false ovn-northd: false q-ovn-metadata-agent: false # Neutron services q-agt: true q-dhcp: true q-l3: true q-meta: true # NOTE(sean-k-mooney) this job is testing ml2/ovs with # hybrid plug we do not need to test cinder and we want to ensure # that we have at least one job without cinder in our gate so we # disable it in this job # Cinder services c-api: false c-bak: false c-sch: false c-vol: false # we also do not need swift in this job so disable it for speed s-account: false s-container: false s-object: false s-proxy: false openstack-cli-server: true # disable vnc and enable spice n-novnc: false n-spice: true devstack_local_conf: post-config: "/$NEUTRON_CORE_PLUGIN_CONF": securitygroup: firewall_driver: iptables_hybrid $NEUTRON_CONF: nova: live_migration_events: True $NOVA_CPU_CONF: compute: heal_instance_info_cache_interval: 60 group-vars: subnode: devstack_localrc: Q_AGENT: openvswitch Q_ML2_TENANT_NETWORK_TYPE: vxlan Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch ML2_L3_PLUGIN: router NOVA_VNC_ENABLED: False NOVA_SPICE_ENABLED: true devstack_services: # Disable OVN services br-ex-tcpdump: false br-int-flows: false ovn-controller: false ovn-northd: false ovs-vswitchd: false ovsdb-server: false q-ovn-metadata-agent: false # Neutron services q-agt: true # NOTE(sean-k-mooney) this job is testing ml2/ovs with # hybrid plug we do not need to test cinder and we want to ensure # that we have at least one job without cinder in our gate so we # disable it in this job # Cinder services c-api: false c-bak: false c-sch: false c-vol: false # we also do not need swift in this job so disable it for speed s-account: false s-container: false s-object: false s-proxy: false devstack_local_conf: post-config: "/$NEUTRON_CORE_PLUGIN_CONF": securitygroup: firewall_driver: iptables_hybrid $NEUTRON_CONF: nova: live_migration_events: True $NOVA_CPU_CONF: compute: heal_instance_info_cache_interval: 60 post-run: playbooks/nova-live-migration/post-run.yaml - job: name: nova-live-migration-ceph parent: devstack-plugin-ceph-multinode-tempest-py3 description: | Run tempest live migration tests against ceph ephemeral storage and cinder volumes. irrelevant-files: *nova-base-irrelevant-files vars: tox_envlist: all tempest_test_regex: (^tempest\.api\.compute\.admin\.(test_live_migration|test_migration)) # revert this when bug #1940425 is fixed in neutron tempest_exclude_regex: (test_live_migration_with_trunk) devstack_services: openstack-cli-server: true devstack_local_conf: test-config: $TEMPEST_CONFIG: compute-feature-enabled: volume_backed_live_migration: true block_migration_for_live_migration: false block_migrate_cinder_iscsi: false post-run: playbooks/nova-live-migration/post-run.yaml - job: name: nova-lvm parent: devstack-tempest description: | Run tempest compute API tests using LVM image backend. This only runs against nova/virt/libvirt/*, nova/privsep/* and .zuul.yaml changes. files: - ^nova/virt/libvirt/.*$ - ^nova/privsep/.*$ - .zuul.yaml vars: # Increase the swap size to avoid OOM errors when running the tests. configure_swap_size: 8192 # We use the "all" environment for tempest_test_regex and # tempest_exclude_regex. tox_envlist: all # Only run compute API tests. tempest_test_regex: ^tempest\.api\.compute # Skip slow tests. Also, skip some volume detach tests until bug#1998148 # is fixed. tempest_exclude_regex: (^tempest\.(api\.compute\.(volumes\.test_attach_volume\.AttachVolumeTestJSON\.test_attach_detach_volume|servers\.(test_server_rescue\.ServerStableDeviceRescueTest\.test_stable_device_rescue_disk_virtio_with_volume_attached|test_server_rescue_negative\.ServerRescueNegativeTestJSON\.test_rescued_vm_detach_volume)))|.*\[.*\bslow\b.*\]) devstack_local_conf: test-config: $TEMPEST_CONFIG: compute-feature-enabled: # NOTE(mriedem): resize of non-volume-backed lvm instances does # not yet work (bug 1831657). resize: false cold_migration: false devstack_localrc: NOVA_BACKEND: LVM # Do not waste time clearing volumes. LVM_VOLUME_CLEAR: none # Increase the size of the swift loopback device to accommodate RAW # snapshots from the LV based instance disks. # See bug #1913451 for more details. SWIFT_LOOPBACK_DISK_SIZE: 24G # As above, increase the total image limit per tenant to 10G GLANCE_LIMIT_IMAGE_SIZE_TOTAL: 10240 devstack_services: # Disable non-essential services that we don't need for this job. c-bak: false openstack-cli-server: true - job: name: nova-emulation parent: devstack-tempest description: | Run compute tests using emulated AARCH64 architecture. # NOTE(chateaulav): due to constraints with no IDE support for aarch64, # tests have been limited to eliminate any items that are incompatible. # This is to be re-evaluated as greater support is added and defined. files: - ^nova/virt/libvirt/.*$ - ^nova/objects/.*$ - ^nova/scheduler/.*$ - .zuul.yaml vars: tox_envlist: all tempest_concurrency: 4 tempest_test_regex: ^tempest\.(api\.compute\.servers|scenario\.test_network_basic_ops) tempest_exclude_regex: (^tempest\.(api\.compute\.servers\.(test_attach_interfaces.AttachInterfacesTestJSON.test_create_list_show_delete_interfaces_by_network_port|test_delete_server.DeleteServersTestJSON.test_delete_server_while_in_attached_volume.*|test_list_.*|test_disk_config|test_server_rescue.*|test_server_actions\.ServerActionsTestJSON\.test_resize.*|test_device_tag.*))|.*\[.*\bslow\b.*\]) devstack_localrc: <<: *uec_image_vars FORCE_CONFIG_DRIVE: False ADMIN_PASSWORD: emulation DATABASE_PASSWORD: $ADMIN_PASSWORD RABBIT_PASSWORD: $ADMIN_PASSWORD SERVICE_PASSWORD: $ADMIN_PASSWORD SWIFT_HASH: 1234abcd DOWNLOAD_DEFAULT_IMAGES: False IMAGE_URLS: "http://download.cirros-cloud.net/0.5.3/cirros-0.5.3-aarch64-disk.img" DEFAULT_INSTANCE_TYPE: m1.micro devstack_services: openstack-cli-server: true # Increase the swap size to avoid OOM errors when running the tests. configure_swap_size: 8192 devstack_local_conf: post-config: $NOVA_CPU_CONF: libvirt: # Use lower TB cache than default(1GiB), only applicable with # libvirt>=8.0.0 tb_cache_size: 128 pre-run: - playbooks/nova-emulation/pre.yaml # TODO(lucasagomes): Move this job to ML2/OVN when QoS Minimum Bandwidth # support is implemented. # See: https://docs.openstack.org/neutron/latest/ovn/gaps.html - job: name: nova-next parent: tempest-multinode-full-py3 description: | This job was added in Newton when placement and cellsv2 were optional. Placement and cellsv2 are required starting in Ocata. In Pike, the service user token functionality was added. This job is also unique in that it runs the post_test_hook from the nova repo, which runs post-test scripts to ensure those scripts are still working, e.g. archive_deleted_rows. In Queens, this job started testing the TLS console proxy code in the libvirt driver. Starting in Stein, the job was changed to run with python 3 and enabled volume multi-attach testing. Starting in Train, the job enabled counting quota usage from placement. Starting in Ussuri, the job was changed to multinode. Starting in Wallaby, the job defaults to the q35 machine type. Starting in Yoga, the job tests noVNC from source and enables unified limits. Starting in Caracal, the job compiles the mdev sample drivers from source. Runs all tempest compute API and most scenario tests concurrently. irrelevant-files: *nova-base-irrelevant-files # Run post-tempest tests like for nova-manage commands. post-run: playbooks/nova-next/post.yaml pre-run: playbooks/nova-next/pre.yaml required-projects: - novnc/novnc nodeset: openstack-two-node-noble vars: # We use the "all" environment for tempest_test_regex and # tempest_exclude_regex. tox_envlist: all # Run all compute API tests and most scenario tests at the default # concurrency (nproc/2 which is normally 4 in the gate). tempest_test_regex: ^tempest\.(scenario|api\.compute) # The tempest.scenario.test_network* tests are skipped because they # (1) take a long time and (2) are already covered in the # tempest-slow* job. If this regex gets more complicated use # tempest_test_exclude_list. # FIXME(lyarwood): The tempest.api.compute.admin.test_volume_swap tests # are skipped until bug #1929710 is resolved. # revert excluding test_live_migration_with_trunk when bug #1940425 # is fixed in neutron tempest_exclude_regex: ^tempest\.(scenario\.test_network_(?!qos)|api\.compute\.admin\.test_volume_swap)|tempest.api.compute.servers.test_device_tagging.TaggedAttachmentsTest.test_tagged_attachment|test_live_migration_with_trunk devstack_local_conf: post-config: $NOVA_CPU_CONF: libvirt: # Increase the number of PCIe ports per instance given the q35 # machine type attaches more devices by default than pc num_pcie_ports: 12 hw_machine_type: "x86_64=q35" # Use lower TB cache than default(1GiB), only applicable with # libvirt>=8.0.0 tb_cache_size: 128 compute: # Switch off the provider association refresh, which should # reduce the number of placement calls in steady state. Added in # Stein. resource_provider_association_refresh: 0 workarounds: # This wa is an improvement on hard reboot that cannot be turned # on unconditionally. But we know that ml2/ovs sends plug time # events so we can enable this in this ovs job for vnic_type # normal wait_for_vif_plugged_event_during_hard_reboot: normal $NOVA_CONF: quota: # Added in Train. count_usage_from_placement: True scheduler: # Added in Train. query_placement_for_image_type_support: True DEFAULT: # Added in Flamingo. Ensures that thread statistics is logged # each time a task is submitted to the executor so that web # can troubleshoot hanging threads easier. thread_pool_statistic_period: 0 "/$NEUTRON_CORE_PLUGIN_CONF": # Needed for QoS port heal allocation testing. ovs: bridge_mappings: public:br-ex resource_provider_bandwidths: br-ex:1000000:1000000 resource_provider_packet_processing_without_direction: :100 AGENT: tunnel_types: gre,vxlan ml2: type_drivers: flat,geneve,vlan,gre,local,vxlan test-config: $TEMPEST_CONFIG: network-feature-enabled: qos_placement_physnet: public qos_min_bw_and_pps: True compute-feature-enabled: # The q35 machine type doesn't support an IDE bus ide_bus: False # Added in Yoga. unified_limits: True neutron_plugin_options: available_type_drivers: flat,geneve,vlan,gre,local,vxlan devstack_localrc: LIBVIRT_TYPE: qemu Q_AGENT: openvswitch Q_ML2_TENANT_NETWORK_TYPE: vxlan Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch ML2_L3_PLUGIN: router # Enable TLS between the noVNC proxy & compute nodes; this requires # the tls-proxy service to be enabled. Added in Queens. NOVA_CONSOLE_PROXY_COMPUTE_TLS: True # Added in Stein. ENABLE_VOLUME_MULTIATTACH: True # Added in Ussuri. FORCE_CONFIG_DRIVE: True # Added in Yoga. NOVNC_FROM_PACKAGE: False NOVA_USE_UNIFIED_LIMITS: True # Added in Caracal. # Temporarly disabled due to # https://bugs.launchpad.net/nova/+bug/2110545 NOVA_COMPILE_MDEV_SAMPLES: False 'SYSTEMD_ENV_VARS["n-sch"]': OS_NOVA_DISABLE_EVENTLET_PATCHING=true 'SYSTEMD_ENV_VARS["n-api"]': OS_NOVA_DISABLE_EVENTLET_PATCHING=true 'SYSTEMD_ENV_VARS["n-api-meta"]': OS_NOVA_DISABLE_EVENTLET_PATCHING=true devstack_services: # Disable OVN services br-ex-tcpdump: false br-int-flows: false ovn-controller: false ovn-northd: false q-ovn-metadata-agent: false # Neutron services q-agt: true q-dhcp: true q-l3: true q-meta: true q-metering: true tls-proxy: true # neutron-* needed for QoS port heal allocation testing. neutron-placement: true neutron-qos: true # Disable non-essential services that we don't need for this job. c-bak: false openstack-cli-server: true devstack_plugins: # Needed for QoS port heal allocation testing. neutron: https://opendev.org/openstack/neutron nova: https://opendev.org/openstack/nova group-vars: subnode: devstack_localrc: LIBVIRT_TYPE: qemu Q_AGENT: openvswitch Q_ML2_TENANT_NETWORK_TYPE: vxlan Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch ML2_L3_PLUGIN: router NOVA_CONSOLE_PROXY_COMPUTE_TLS: True FORCE_CONFIG_DRIVE: True # Added in Yoga. NOVNC_FROM_PACKAGE: False devstack_services: # Disable OVN services br-ex-tcpdump: false br-int-flows: false ovn-controller: false ovn-northd: false ovs-vswitchd: false ovsdb-server: false q-ovn-metadata-agent: false # Neutron services q-agt: true tls-proxy: true c-bak: false - job: name: nova-tempest-v2-api parent: devstack-tempest branches: - master description: | This job runs the Tempest compute tests against v2.0 endpoint. Former names for this job was: * legacy-tempest-dsvm-nova-v20-api vars: tox_envlist: all tempest_test_regex: api.*compute devstack_localrc: TEMPEST_COMPUTE_TYPE: compute_legacy - job: name: nova-tempest-full-oslo.versionedobjects parent: tempest-full-py3 description: | Run test with git version of oslo.versionedobjects to check that changes to nova will work with the next released version of that library. required-projects: - openstack/oslo.versionedobjects vars: devstack_localrc: <<: *uec_image_vars - job: name: nova-grenade-multinode parent: grenade-multinode description: | Run a multinode grenade job and run the smoke, cold and live migration tests with the controller upgraded and the compute on the older release. The former names for this job were "nova-grenade-live-migration" and "legacy-grenade-dsvm-neutron-multinode-live-migration". irrelevant-files: *nova-base-irrelevant-files vars: devstack_local_conf: test-config: $TEMPEST_CONFIG: compute-feature-enabled: live_migration: true volume_backed_live_migration: true block_migration_for_live_migration: true block_migrate_cinder_iscsi: true tox_envlist: all tempest_test_regex: ((tempest\.(api\.compute|scenario)\..*smoke.*)|(^tempest\.api\.compute\.admin\.(test_live_migration|test_migration))|(^tempest\.api\.compute\.servers\.test_server_actions)) # revert this when bug #1940425 is fixed in neutron tempest_exclude_regex: (test_live_migration_with_trunk) - job: name: nova-multi-cell parent: tempest-multinode-full-py3 description: | Multi-node python3 job which runs with two nodes and two non-cell0 cells. The compute on the controller runs in cell1 and the compute on the subnode runs in cell2. irrelevant-files: *nova-base-irrelevant-files vars: # We use the "all" environment for tempest_test_regex and # tempest_test_exclude_list. tox_envlist: all # Run compute API and scenario tests. tempest_test_regex: ^tempest\.(scenario|(api\.compute)) tempest_test_exclude_list: '{{ ansible_user_dir }}/{{ zuul.projects["opendev.org/openstack/nova"].src_dir }}/devstack/nova-multi-cell-exclude-list.txt' devstack_local_conf: post-config: $NOVA_CONF: oslo_policy: # The default policy file is policy.json but the # setup-multi-cell-policy role will write to policy.yaml. policy_file: policy.yaml test-config: $TEMPEST_CONFIG: compute-feature-enabled: # Enable cold migration for migrating across cells. Note that # because NOVA_ALLOW_MOVE_TO_SAME_HOST=false, all cold migrations # will move across cells. cold_migration: true devstack_services: # Disable other non-essential services that we don't need for this job. c-bak: false openstack-cli-server: true devstack_localrc: # Setup two non-cell0 cells (cell1 and cell2). NOVA_NUM_CELLS: 2 # Disable resize to the same host so all resizes will move across # cells. NOVA_ALLOW_MOVE_TO_SAME_HOST: false # We only have two computes and we don't yet support cross-cell live # migration. LIVE_MIGRATION_AVAILABLE: false DEVSTACK_PARALLEL: True group-vars: peers: devstack_localrc: NOVA_ALLOW_MOVE_TO_SAME_HOST: true LIVE_MIGRATION_AVAILABLE: false subnode: devstack_localrc: # The subnode compute will get registered with cell2. NOVA_CPU_CELL: 2 devstack_services: # Disable other non-essential services that we don't need for this # job. c-bak: false # Perform setup for the multi-cell environment. Note that this runs # before devstack is setup on the controller host. pre-run: playbooks/nova-multi-cell/pre.yaml - job: name: nova-osprofiler-redis parent: tempest-smoke-py3-osprofiler-redis description: | Runs osprofiler with the Redis collector on a subset of compute-specific tempest-full-py3 smoke tests. irrelevant-files: *nova-base-irrelevant-files required-projects: - openstack/nova vars: # We use the "all" environment for tempest_test_regex. tox_envlist: all # Run compute API and only the test_server_basic_ops scenario tests. tempest_test_regex: ^tempest\.(scenario\.test_server_basic_ops|(api\.compute)) - job: name: nova-ceph-multistore parent: devstack-plugin-ceph-tempest-py3 description: | Just like the normal ceph job, but with glance multistore irrelevant-files: *nova-base-irrelevant-files required-projects: - openstack/nova timeout: 9000 pre-run: - playbooks/ceph/glance-setup.yaml vars: # revert this when bug #1940425 is fixed in neutron # FIXME(sean-k-mooney) skip the test_image_formats for now # as we have not configured this job correctly to run them tempest_exclude_regex: (test_live_migration_with_trunk|tempest.api.image.v2.test_images_formats) # NOTE(danms): Increase our swap size since we're dealing with # larger images and trigger OOMs. configure_swap_size: 8192 # NOTE(danms): These tests create an empty non-raw image, which nova # will refuse because we set never_download_image_if_on_rbd in this job. # Just skip these tests for this case. devstack_localrc: GLANCE_STANDALONE: True GLANCE_USE_IMPORT_WORKFLOW: True DEVSTACK_PARALLEL: True GLANCE_LIMIT_IMAGE_SIZE_TOTAL: 2048 MYSQL_REDUCE_MEMORY: True # NOTE(danms): This job is pretty heavy as it is, so we disable some # services that are not relevant to the nova-glance-ceph scenario # that this job is intended to validate. devstack_services: c-bak: false s-account: false s-container: false s-object: false s-proxy: false openstack-cli-server: true devstack_local_conf: test-config: $TEMPEST_CONFIG: image-feature-enabled: manage_locations: true volume: volume_size: 1 image: disk_formats: qcow2,ari,aki,vhd,vmdk,raw,ami,vdi,iso,vhdx post-config: $NOVA_CONF: libvirt: images_rbd_glance_store_name: robust workarounds: never_download_image_if_on_rbd: True $GLANCE_API_CONF: DEFAULT: enabled_backends: "cheap:file, robust:rbd, web:http" default_log_levels: "amqp=WARN, amqplib=WARN, boto=WARN, qpid=WARN, sqlalchemy=WARN, suds=INFO, oslo.messaging=INFO, oslo_messaging=INFO, iso8601=WARN, requests.packages.urllib3.connectionpool=WARN, urllib3.connectionpool=WARN, websocket=WARN, requests.packages.urllib3.util.retry=WARN, urllib3.util.retry=WARN, keystonemiddleware=WARN, routes.middleware=WARN, stevedore=WARN, taskflow=WARN, keystoneauth=WARN, oslo.cache=INFO, dogpile.core.dogpile=INFO, oslo_policy=DEBUG" glance_store: default_backend: cheap stores: file, http, rbd default_store: file robust: rbd_store_pool: images rbd_store_user: glance rbd_store_ceph_conf: /etc/ceph/ceph.conf cheap: filesystem_store_datadir: /opt/stack/data/glance/images/ web: https_insecure: false os_glance_staging_store: filesystem_store_datadir: /opt/stack/data/glance/os_glance_staging_store/ os_glance_tasks_store: filesystem_store_datadir: /opt/stack/data/glance/os_glance_tasks_store/ $GLANCE_IMAGE_IMPORT_CONF: image_import_opts: image_import_plugins: "['image_conversion']" image_conversion: output_format: raw - job: name: tempest-integrated-compute-rbac-old-defaults parent: tempest-integrated-compute description: | This job runs the Tempest tests with Nova RBAC old defaults irrelevant-files: &policies-irrelevant-files - ^api-.*$ - ^(test-|)requirements.txt$ - ^.*\.rst$ - ^.git.*$ - ^doc/.*$ - ^nova/hacking/.*$ - ^nova/locale/.*$ - ^nova/tests/.*$ - ^nova/test.py$ - ^\.pre-commit-config\.yaml$ - ^releasenotes/.*$ - ^setup.cfg$ - ^tools/.*$ - ^tox.ini$ vars: devstack_services: openstack-cli-server: true devstack_localrc: NOVA_ENFORCE_SCOPE: false - project: # Please try to keep the list of job names sorted alphabetically. templates: - check-requirements - integrated-gate-compute - openstack-cover-jobs - openstack-python3-jobs - openstack-python3-jobs-arm64 - periodic-stable-jobs - publish-openstack-docs-pti - release-notes-jobs-python3 check: jobs: # We define our own irrelevant-files so we don't run the job # on things like nova docs-only changes. - ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-tinyipa: voting: false irrelevant-files: *nova-base-irrelevant-files - nova-ceph-multistore: irrelevant-files: *nova-base-irrelevant-files - neutron-linuxbridge-tempest: files: # NOTE(mriedem): This job has its own irrelevant-files section # so that we only run it on changes to networking and libvirt/vif # code; we don't need to run this on all changes. - ^nova/network/.*$ - nova/virt/libvirt/vif.py - nova-live-migration - nova-live-migration-ceph - nova-lvm - nova-multi-cell - nova-next - nova-ovs-hybrid-plug - nova-tox-validate-backport: voting: false - nova-tox-functional-py310 - nova-tox-functional-py312 - openstack-tox-py312: voting: true - tempest-integrated-compute: # NOTE(gmann): Policies changes do not need to run all the # integration test jobs. Running only tempest and grenade # common jobs will be enough along with nova functional # and unit tests. irrelevant-files: *policies-irrelevant-files - grenade-skip-level-always: irrelevant-files: *policies-irrelevant-files - nova-grenade-multinode: irrelevant-files: *policies-irrelevant-files - tempest-ipv6-only: irrelevant-files: *nova-base-irrelevant-files - openstacksdk-functional-devstack: irrelevant-files: *nova-base-irrelevant-files - cyborg-tempest: irrelevant-files: *nova-base-irrelevant-files voting: false - barbican-tempest-plugin-simple-crypto: irrelevant-files: *nova-base-irrelevant-files voting: false - nova-tox-py312-threading gate: jobs: - nova-live-migration - nova-tox-functional-py310 - nova-tox-functional-py312 - nova-multi-cell - nova-next - nova-tox-validate-backport - nova-ceph-multistore: irrelevant-files: *nova-base-irrelevant-files - neutron-linuxbridge-tempest: files: # NOTE(mriedem): This job has its own irrelevant-files section # so that we only run it on changes to networking and libvirt/vif # code; we don't need to run this on all changes. - ^nova/network/.*$ - nova/virt/libvirt/vif.py - tempest-integrated-compute: irrelevant-files: *policies-irrelevant-files - grenade-skip-level-always: irrelevant-files: *policies-irrelevant-files - nova-grenade-multinode: irrelevant-files: *policies-irrelevant-files - tempest-ipv6-only: irrelevant-files: *nova-base-irrelevant-files - openstacksdk-functional-devstack: irrelevant-files: *nova-base-irrelevant-files - nova-tox-py312-threading periodic-weekly: jobs: # Runs emulation feature functionality test less frequently due # to being the initial release and experimental in nature. - nova-emulation - ironic-tempest-ipa-wholedisk-direct-tinyipa-multinode-shard - tempest-centos9-stream-fips - whitebox-devstack-multinode - tempest-integrated-compute-rbac-old-defaults experimental: jobs: - ironic-tempest-bfv: irrelevant-files: *nova-base-irrelevant-files - ironic-tempest-ipa-wholedisk-direct-tinyipa-multinode-shard: irrelevant-files: *nova-base-irrelevant-files - devstack-plugin-nfs-tempest-full: irrelevant-files: *nova-base-irrelevant-files - nova-osprofiler-redis - tempest-pg-full: irrelevant-files: *nova-base-irrelevant-files - nova-tempest-full-oslo.versionedobjects: irrelevant-files: *nova-base-irrelevant-files - nova-tempest-v2-api: irrelevant-files: *nova-base-irrelevant-files - neutron-ovs-tempest-dvr-ha-multinode-full: irrelevant-files: *nova-base-irrelevant-files - neutron-ovs-tempest-iptables_hybrid: irrelevant-files: *nova-base-irrelevant-files - os-vif-ovs: irrelevant-files: *nova-base-irrelevant-files - devstack-plugin-ceph-compute-local-ephemeral: irrelevant-files: *nova-base-irrelevant-files - devstack-tobiko-nova: irrelevant-files: *nova-base-irrelevant-files - tempest-centos9-stream-fips: irrelevant-files: *nova-base-irrelevant-files - nova-emulation - tempest-integrated-compute-centos-9-stream: irrelevant-files: *nova-base-irrelevant-files - whitebox-devstack-multinode - tempest-integrated-compute-rbac-old-defaults: irrelevant-files: *policies-irrelevant-files ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315688.0 nova-32.0.0/AUTHORS0000664000175000017500000021732200000000000013641 0ustar00zuulzuul00000000000000Aaron Lee Aaron Rosen Aaron Rosen Aarti Kriplani Abhijeet Malawade Abhijeet Malawade Abhishek Anand Abhishek Chanda Abhishek Kekane Abhishek Kekane Abhishek Sharma Abhishek Talwar Adalberto Medeiros Adam Gandelman Adam Gandelman Adam Gandelman Adam Johnson Adam Kacmarsky Adam Spiers Adam Young Ade Lee Adelina Tuvenie Aditi Rajagopal Aditi Raveesh Aditi Raveesh Aditya Prakash Vaja Adrian Chiris Adrian Smith Adrian Vladu Adrien Cunin Adrien Cunin Ahmad Hassan Akash Gangil Akihiro MOTOKI Akihiro Motoki Akira KAMIO Akira Yoshiyama Akira Yoshiyama Ala Rezmerita Alberto Planas Alessandro Pilotti Alessandro Pilotti Alessandro Tagliapietra Alessio Ababilov Alessio Ababilov Alex Deiter Alex Gaynor Alex Glikson Alex Handle Alex Hmelevsky Alex Holden Alex Meade Alex Szarka Alex Szarka Alex Xu AlexFrolov AlexMuresan Alexander Bochkarev Alexander Burluka Alexander Gordeev Alexander Gorodnev Alexander Sakhnov Alexander Schmidt Alexandra Settle Alexandra Settle Alexandre Arents Alexandre Levine Alexandru Muresan Alexei Kornienko Alexey I. Froloff Alexey Roytman Alexey Stupnikov Alexis Lee Alexis Lee Alfredo Moralejo Ali Safari Alin Balutoiu Alin Gabriel Serdean Allen Gao Allen Gao Alvaro Lopez Garcia Amandeep Ameed Ashour Ameed Ashour Amir Sadoughi Amit Uniyal Amrith Kumar Amy Fong Ana Krivokapic Anand Shanmugam Andras Gyacsok Andre Andre Aranha Andrea Frittoli Andrea Rosa Andrea Rosa Andreas Jaeger Andreas Jaeger Andreas Karis Andreas Scheuring Andrei Bacos Andrei V. Ostapenko Andrew Bogott Andrew Boik Andrew Bonney Andrew Clay Shafer Andrew Glen-Young Andrew James Andrew Laski Andrew Laski Andrew Lazarev Andrew Melton Andrew Woodward Andrey Brindeyev Andrey Kurilin Andrey Kurilin Andrey Pavlov Andrey Volkov Andy Hill Andy Hsiang Andy McCrae Andy Smith Andy Southgate Aneesh Puliyedath Udumbath Angus Lees Anh Tran Anish Bhatt Anita Kuno Ankit Agrawal Ann Kamyshnikova Anne Gentle Anne Gentle Ante Karamatic Ante Karamatić Ante Karamatić Anthony Lee Anthony PERARD Anthony Woods Anthony Young Anton Arefiev Anton Gorenkov Anton Kremenetsky Anton V. Yanchenko Antoni Segura Puimedon Antony Messerli Anuj Mathur Anush Krishnamurthy Anusha Unnam Arata Notsu Arathi Archit Modi Arefiev Anton Armando Migliaccio Armando Migliaccio Arnaud Legendre Arnaud Legendre Arnaud Morin Arnaud Morin Artem Goncharov Artem Vasilyev Arthur Dayne Artom Lifshitz Artur Malinowski Arvind Nadendla Arvind Somya Arx Cruz Asbjørn Sannes Aswad Rangnekar Atsushi SAKAI Attila Fazekas Augustina Ragwitz Author Name Author: Carl Morris Avinash Prasad Avishay Traeger Avishay Traeger Aysy Anne Duarte Ayush Garg Balazs Gibizer Balazs Gibizer Balazs Gibizer Balazs Gibizer Bartek Zurawski Bartosz Fic Beliveau, Ludovic Belmiro Moreira Ben McGraw Ben Nemec Ben Nemec Ben Nemec Ben Roble Ben Swartzlander Bence Romsics Bernhard M. Wiedemann Bhagyashri Shewale Bharath Thiruveedula Bhuvan Arumugam Bilal Akhtar Bill Owen Billy Olsen Bin Zhou Bo Quan Bo Wang Bob Ball Boden R Bogdan Dobrelya Boris Bobrov Boris Filippov Boris Pavlovic Brad Hall Brad McConnell Brad Pokorny Brandon Irizarry Brant Knudson Brendan Maguire Breno Leitao Brent Eagles Brent Tang Brett Milford Brian D. Elliott Brian Elliott Brian Elliott Brian Haley Brian Lamar Brian Moss Brian Rosmaita Brian Rosmaita Brian Schott Brian Waldon Brianna Poulos Brooks Kaminski Brooks Kaminski Bruce Benjamin Burt Holzman Béla Vancsics CDream Cady_Chen Cale Rath Callum Dickinson Cao ShuFeng Cao Xuan Hoang Carl Baldwin Carlos Goncalves Cedric Brandily Cedric LECOMTE Chandan Kumar Chang Bo Guo ChangBo Guo(gcb) Changbin Liu Chen Chen Fan Chen Hanxiao ChenZheng Chet Burgess Chiradeep Vittal Chmouel Boudjnah Chris Chris Behrens Chris Buccella Chris Dent Chris Friesen Chris J Arges Chris Jones Chris Krelle Chris Krelle Chris St. Pierre Chris Suttles Chris Yeoh Christian Berendt Christian Rohmann Christine Wang Christoph Manns Christoph Thiel Christopher Lefelhocz Christopher Lefelhocz Christopher MacGown Christopher Yeoh Chuck Carmack Chuck Short Chung Chih, Hung Cian O'Driscoll Clark Boylan Claudiu Belu Claxton Clay Gerrard Clemens Perz Clenimar Filemon Clif Houck Clint Byrum Cole Robinson Cor Cornelisse Corentin Ardeois Corey Bryant Corey Wright Cory Stone Cory Wright Craig Tracey Craig Vyvial Curt Moore Cyril Roelandt DamonLi Dan Dan Song Dan Emmons Dan Florea Dan Genin Dan Genin Dan Peschman Dan Prince Dan Smith Dan Smith Dan Smith Dan Wendlandt Dane Fichter Danfeng Danfly Daniel Abad Daniel Bengtsson Daniel Berrange (berrange@redhat.com) Daniel Berrange Daniel Genin Daniel Kuffner Daniel L Jones Daniel P. Berrange Daniel Pawlik Daniel Pawlik Daniel Stelter-Gliese Danil Akhmetov Danny Al-Gaaf Danylo Vodopianov Dao Cong Tien Darragh O'Reilly Darren Birkett Darren Sanders Darren Worrall Dat Le Davanum Srinivas Davanum Srinivas Dave Lapsley Dave McCowan Dave McNally Dave Walker (Daviey) Dave Walker (Daviey) David Besen David Bingham David Edery David Hill David Hill David Kang David McNally David Medberry David Peraza David Pravec David Rabel David Ripton David Scannell David Shrewsbury David Subiros David Wahlstrom David Xie Dazhao Dean Troyer Debo Dutta Debo~ Dutta Deepak C Shetty Deepak Garg Deliang Fan Demontiê Junior Dennis Kliban DennyZhang Derek Higgins Devananda van der Veen Devdatta Kulkarni Devdeep Singh Devendra Modium Devin Carlen Dharini Chandrasekar Dheeraj Gupta Diana Clarke Dima Shulyak Dimitri Mazmanov Dina Belova Dinesh Bhor Dirk Mueller Divya Dmitrii Shcherbakov Dmitriy Chubinidze Dmitriy Rabotyagov Dmitriy Rabotyagov Dmitry Borodaenko Dmitry Guryanov Dmitry Guryanov Dmitry Spikhalskiy Dmitry Tantsur Dmitry Tantsur Dmitry Tantsur Dolph Mathews Dominic Schlegel Dominik Heidler Don Dugger Donal Lafferty Dong Ma Dongcan Ye Dongdong Zhou Donovan Finch Dorin Paslaru Doug Goldstein Doug Hellmann Doug Hellmann Doug Royal Doug Szumski Doug Wiegley Douglas Mendizábal Douglas Viroel Dr. Jens Harbott Drew Fisher Drew Thorstensen DuYaHong Duan Jiong Duncan McGreggor Duong Ha-Quang Dustin Cowles Earle F. Philhower, III Ed Bak Ed Leafe EdLeafe Edan David Edgar Magana Eduardo Costa Edward Hope-Morley Edwin Zhai Eiich Aikawa Eiichi Aikawa Einst Crazy Eldar Nugaev Elena Ezhova Eli Qiao Eli Qiao Ellen Hui Elod Illes Előd Illés Emilien Macchi Emma Foley En Eoghan Glynn Eohyung Lee Eric Berglund Eric Blake Eric Brown Eric Day Eric Fried Eric Fried Eric Guo Eric Harney Eric Harney Eric M Gonzalez Eric Windisch Eric Windisch Eric Young Eric Young Erik Berg Erik Olof Gunnar Andersson Erik Zaadi Erlon R. Cruz Erwan Gallen Esha Seth Esra Celik Ethan Chu Euan Harris Eugene Kirpichov Eugene Nikanorov Eugeniya Kudryashova Evan Callicoat Evgeny Antyshev Evgeny Fedoruk Ewan Mellor Fabian Wiesel Facundo Farias Facundo Maldonado Fan Zhang Fang He Fang Jinxing Federico Ressi Fei Long Wang Fei Long Wang Felipe Monteiro Felix Huettner Felix Li Feng Xi Yan Fengqian Gao Feodor Tersin Feodor Tersin FionaLi-oc Flaper Fesp Flavia Missi Flavio Percoco Flavio Percoco Florent Flament Florian Haas Florian Haas Forest Romain ForestLee Francesco Santoro Francois Palin François Charlier Frederic Lepied Gabe Westmaas Gabor Antal Gabriel Adrian Samfira Gabriel Hurley Gabriel Samfira Gage Hugo Gage Hugo Gao Yuan Gary Kotton Gary Kotton Gaudenz Steinlin Gaurav Gupta Gauvain Pocentek Gauvain Pocentek Georg Hoesch George Shuklin Gergo Debreczeni Gerry Kopec Ghanshyam Ghanshyam Ghanshyam Maan Ghanshyam Mann Ghanshyam Mann Ghanshyan Mann Ghe Rivero Giampaolo Lauria Giridhar Jayavelu Giulio Fidente Gleb Stepanov Gonéri Le Bouder Gonéri Le Bouder Gordon Chung Gorka Eguileor Graham Hayes Grant Murphy Greg Althaus Greg Ball Gregory Haynes Grzegorz Grasza Guan Qiang Guang Yee Guangya Liu Guangyu Suo Guillaume Espanel Guohui Liu Guoqiang Ding Gustavo Santos Gyorgy Szombathelyi Gábor Antal Ha Van Tu Haiwei Xu Hamdy Khader Hang Yang Hang Yang Hans Lindgren Haomai Wang Harshada Mangesh Kakad Harshavardhan Metla Haruka Tanizawa He Jie Xu He Jie Xu He Jie Xu He Yongli Hemanth Makkapati Hemanth Nakkina Hendrik Volkmer Hengqing Hu Hervé Beraud Hesam Chobanlou Hieu LE Hirofumi Ichihara Hironori Shiina Hiroyuki Eguchi Hisaharu Ishii Hisaki Ohara Hongbin Lu Hongbin Lu Huan Xie Huang Rui Hyunsun Moon IWAMOTO Toshihiro Iago Estrela Ian Cordasco Ian Wells Ian Wienand Ianeta Hutchinson Ice Yao Ifat Afek Ihar Hrachyshka Ildiko Vancsa Ildiko Vancsa Ilya Alekseyev Ilya Etingof Ilya Pekelny Ilya Popov Ilya Shakhat Imran Hussain Inbar Ionuț Arțăriși Ionuț Bîru Irena Berezovsky Isaku Yamahata Itzik Brown Iury Gregory Melo Ferreira Ivan A. Melnikov Ivan Kolodyazhny Ivaylo Mitev Ivo Vasev J. Daniel Schmidt JC Martin Jack Ding Jack Lu Jackie Truong Jacob Cherkas Jake Dahn Jake Liu Jake Yip Jakub Pavlik Jakub Ruzicka James Carey James Chapman James E. Blair James E. Blair James Page James Page James Penick Jamie Lennox Jamie Lennox Jan Grant Jan Gutter Jan Hartkopf Jan Klippel Jan Klippel Jan Zerebecki Janis Gengeris Jared Culp Jared Winborne Jason Anderson Jason Cannavale Jason Dillaman Jason Koelker Jason.Zhao Javeme Jay Faulkner Jay Jahns Jay Lau Jay Lau Jay Lee Jay Pipes Jay S. Bryant Jean-Baptiste RANSY Jean-Marc Saffroy Jean-Philippe Evrard Jeegn Chen Jeegn Chen Jeffrey Zhang Jenkins Jennifer Mulsow Jenny Oshima Jens Harbott Jens Jorritsma Jens Rosenboom Jeremy Liu Jeremy Stanley Jesse Andrews Jesse J. Cook Jesse J. Cook Jesse Keating Jesse Keating Jesse Keating Jesse Pretorius JiaLei Shi Jiajun Liu Jialiang Jian Wen JiangPF Jianghua Wang Jie Li Jim Fehlig Jim Rollenhagen Jimmy Bergman Jin Hui JinLi Jinwoo 'Joseph' Suh Jiří Suchomel Joe Cropper Joe Gordon Joe Heck Joe Julian Joe Mills Joe Talerico Joel Coffman Joel Moore Johannes Erdfelt Johannes Erdfelt Johannes Kulik Johannes Kulik John John Bresnahan John Dewey John Garbutt John Garbutt John Garbutt John Garbutt John Griffith John Griffith John H. Tran John Haan John Herndon John Hua John Kennedy John L. Villalovos John Stanford John Tran John Tran John Warren Johnson koil raj Jolyon Brown Jon Bernard Jon Grimm Jonathan Bryce Jonathan Race Jordan Pittier Jordan Rinke JordanP JordanP Jorge Niedbalski Jorge San Emeterio Jorhson Deng Joris S'heeren Jose Castro Leon Joseph Suh Joseph W. Breu Josephine Seifert Josh Durgin Josh Durgin Josh Gachnang Josh Kearney Josh Kleinpeter Joshua Harlow Joshua Harlow Joshua Harlow Joshua Hesketh Joshua McKenty JuPing Juan Antonio Osorio Robles Juan Manuel Olle Juerg Haefliger Julia Kreger Julia Varlamova Julian Sy Julian Sy Julien Danjou Julien Danjou Julien Le Jeune Justin Hammond Justin Santa Barbara Justin Shepherd Jérôme Gallard KIYOHIRO ADACHI Kaitlin Farr Kamil Rykowski Kamil Sambor Kanagaraj Manickam Karen Bradshaw Karen Noel Karimullah Mohammed Kartik Bommepally Kashi Reddy Kashyap Chamarthy Kaushik Chandrashekar Kaushik Chandrashekar Kei Masumoto Keigo Noha Keisuke Tagami Ken Burger Ken Igarashi Ken Pepple Ken'ichi Ohmichi Ken'ichi Ohmichi Kengo Sakai Kenji Yasui Kent Wang Kentaro Matsumoto Keshava Bharadwaj Kevin Benton Kevin Benton Kevin Benton Kevin Bringard Kevin L. Mitchell Kevin Zhao Kevin Zhao Kevin Zhao Kevin Zhao KevinZhao Kevin_Zheng Kiall Mac Innes Kien Nguyen Kieran Spear Kiran Pawar Kirill Shileev Kiseok Kim Kobi Samoray Koert van der Veer Koichi Yoshigoe Koji Iida Komei Shimamura Konstantinos Samaras-Tsakiris Kost Kravchenko Pavel Krisztian Gacsal Kui Shi Kun Huang Kurt Taylor Kurt Taylor Kurtis Cobb Kylin CG LIU Yulong Lajos Katona Lan Qi song Lance Bragstad Lance Bragstad Lars Kellogg-Stedman Laszlo Hegedus Launchpad Translations on behalf of nova-core <> Lauren Taylor Leander Bessa Beernaert Leandro I. Costantino Lee Yarwood Leehom Li (feli5) Leehom Li Lenny Verkhovsky LeopardMa Li Chen Liam Kelleher Liam Young Lianhao Lu Likitha Shetty Lin Hua Cheng Lin Tan Lin Yang Lingxian Kong LingxianKong Lior Friedman LiuNanke Loganathan Parthipan Lorenzo Affetti Lorin Hochstein Lucas Alvares Gomes Lucian Petrut Lucian Petrut Lucio Seki Ludovic Beliveau Luigi Toscano Luis A. Garcia Luis Fernandez Alvarez Luis Pigueiras Luis Tomas Luiz Capitulino Luo Gangyi Luong Anh Tuan Luyao Zhong LuyaoZhong Lvov Maxim MORITA Kazutaka Maciej Jozefczyk Maciej Józefczyk Maciej Kucia Maciej Szankin Madhu Mohan Nelemane Madhuri Kumari Madhuri Kumari Mahesh K P Mahesh Panchaksharaiah Maho Koshiya Maithem Major Hayden Malini Bhandaru Mana Kaneko Mandar Vaze Mandell Degerness Manik Sidana Manjunath Patil Manuel Bentele Marcellin Fom Tchassem Marcin Juszkiewicz Marcin Juszkiewicz Marcio Roberto Starke Marco Sinhoreli Marcos Lobo Marcus Furlong Marian Horban Mario Villaplana Maris Fogels Mark Doffman Mark Giles Mark Goddard Mark Goddard Mark McClain Mark McLoughlin Mark Mielke Mark T. Voelker Mark Washenberger Markus Zoeller Martin Kletzander Martin Kopec Martin Packman Martin Schuppert Martins Jakubovics Maru Newby Masahito Muroi Masaki Matsushita Masanori Itoh Masanori Itoh Masayuki Igawa Mate Lakat Mathew Odden Mathieu Gagné Mathieu Mitchell Matt Dietz Matt Fischer Matt Joyce Matt Odden Matt Rabe Matt Riedemann Matt Riedemann Matt Stephenson Matt Thompson Matt Wisch Matteo Sposato Matthew Booth Matthew Edmonds Matthew Gilliard Matthew Hooker Matthew Macdonald-Wallace Matthew N Heler Matthew Oliver Matthew Sherborne Matthew Treinish Matthew Treinish Mauro S. M. Rodrigues Maxim Monin Maxim Nestratov Maxim Nestratov Maxime Leroy Md Nadeem Mehdi Abaakouk Mehdi Abaakouk Mehdi Abaakouk Melanie Witt Michael Bayer Michael Davies Michael Gundlach Michael H Wilson Michael Henkel Michael J Fork Michael Kerrin Michael Krotscheck Michael Still Michael Turek Michael Wilson Michael Wurtz Michal Michal Dulko Michal Pryc Miguel Herranz Miguel Lavalle Miguel Lavalle Mike Bayer Mike Dorman Mike Durnosvistov Mike Fedosin Mike Lowe Mike Lundy Mike Milner Mike Perez Mike Pittaro Mike Scherbakov Mike Spreitzer MikeG451 Mikhail Chernik Mikhail Durnosvistov Mikhail Feoktistov Mikyung Kang Ming Yang Minghong Hou Mitsuhiko Yamazaki Mitsuhiro SHIGEMATSU Mitsuhiro Tanino Mitya_Eremeev Mohammed Naser Monsyne Dragon Monty Taylor Morgan Fainberg Moshe Levi MotoKen Muawia Khan MultipleCrashes Muneyuki Noguchi NTT PF Lab. Nachi Ueno Nam Nguyen Hoai Nathan Kinder Naveed Massjouni Navneet Kumar Neha Alhat Neil Jerram Neil Jerram Newptone Ngo Quoc Cuong Nguyen Hai Truong Nguyen Hung Phuong Nguyen Phuong An Nicholas Kuechler Nick Bartos Nicolas Bock Nicolas Simonds Nikhil Komawar Nikhil Komawar Nikita Gerasimov Nikita Konovalov Nikola Dipanov Nikola Dipanov Nikola Đipanov Nikolai Korablin Nikolay Sokolov Nirmal Ranganathan Nisha Agarwal Nobuhiro MIKI Noorul Islam K M Numan Siddique OctopusZhang OctopusZhang Oleg Bondarev Olga Kopilova Oliver Walsh Olivier Chaze Ollie Leahy Ondřej Nový OpenStack Release Bot Oshrit Feder Pablo Fernando Cargnelutti Pallavi PanYaLian Patrick East Patrick Hampson Patrick Schaefer Paul Green Paul Griffin Paul McMillan Paul Murray Paul Murray Paul Voccio Paulo Matias Pavel Gluschak Pavel Glushchak Pavel Kholkin Pavel Kirpichyov Pavel Kravchenco Pavlo Shchelokovskyy Pavlo Shchelokovskyy Pawel Koniszewski Pawel Palucki Pedro Almeida Pedro Navarro Perez Pekelny "I159" Ilya Peng Li Peng Yong Pengfei Zhang Peter Feiner Peter Hamilton Peter Krempa Peter Penchev Petersingh Anburaj Petrut Lucian Phil Day Philip Knouff Philip Schwartz Philipp Marek Phong Ly Pierre Blanc Pierre LIBEAU Pierre Libeau Pierre Riteau Pierre-Samuel Le Stang Pierre-Samuel Le Stang Pooja Jadhav Praharshitha Metla Pranali Deore PranaliDeore Pranav Salunke Pranav Salunke Prashanth kumar reddy Prateek Arora Praveen Yalagandula Prem Karat Przemyslaw Czesnowicz Puneet Goyal Pushkar Umaranikar Pádraig Brady Qiang Guan Qiao, Liyong Qiaowei Ren Qin Zhao Qin Zhao Qing Wu Wang QingXin Meng Qiu Yu Qiu Yu QunyingRan Rabi Mishra Racha Ben Ali Radomir Dopieralski Radoslav Gerganov Radoslaw Smigielski Radosław Piliszek Rafael Folco Rafael Weingärtner Rafi Khardalian Rajat Dhasmana Rajat Jain Rajesh Tailor Rajesh Tailor Rajesh Tailor Rakesh H S Ralf Haferkamp Ram Nalluri Raoul Hidalgo Charman Ravi Shekhar Jethani Rawan Herzallah Ray Chen Ray Sun Rene Ribaud Renier Morales Renuka Apte René Ribaud Ricardo Carrillo Cruz Ricardo Noriega Riccardo Pittau Richard Jones Richard W.M. Jones Rick Bartra Rick Clark Rick Harris Rico Lin Rikimaru Honjo Rikimaru Honjo Ripal Nathuji Ripal Nathuji Rob Esker Robert Breker Robert Collins Robert Collins Robert Ellis Robert Kukura Robert Li Robert Pothier Robert Tingirica Robin Naundorf Rocky Rodolfo Alonso Hernandez Rodolfo Alonso Hernandez Rodrigo Barbieri Roey Chen Rohan Kanade Rohan Kanade Rohan Kanade Rohan Rhishikesh Kanade Rohit Karajgi Roland Hochmuth Romain Chantereau Romain Hardouin Roman Bogorodskiy Roman Bogorodskiy Roman Dobosz Roman Podoliaka Roman Podolyaka Ronald Bradford Ronen Kat Rongze Zhu RongzeZhu Rosario Di Somma Ruby Loo Rui Chen Rushi Agrawal Russell Bryant Russell Cloran Russell Sim Ryan Hsu Ryan Lane Ryan Lucio Ryan McNair Ryan Moe Ryan Moore Ryan Moore Ryan Rossiter Ryo Miki Ryota MIBU Ryu Ishimoto Sabari Kumar Murugesan Sachi King Sagar Ratnakara Nikam Sahid Orentino Ferdjaoui Sahid Orentino Ferdjaoui Sahid Orentino Ferdjaoui Sahid Orentino Ferdjaoui Salvatore Orlando Sam Alba Sam Betts Sam Morrison Sam Morrison Sam Yaple Samantha Blanco Sampath Priyankara Samuel Matzek Sandy Walsh Santiago Baldassin Sarafraj Singh Sascha Peilicke Sascha Peilicke Sascha Peilicke Sasha Andonov Sateesh Chodapuneedi Sathish Nagappan Satoru Moriya Satyanarayana Patibandla Satyanarayana Patibandla Saverio Proto Scott Moser Scott Moser Scott Reeve Scott Wilson Sean Chen Sean Dague Sean Dague Sean Dague Sean M. Collins Sean M. Collins Sean McCully Sean McGinnis Sean McGinnis Sean McGinnis Sean Mooney Sean Mooney Seif Lotfy Senhua Huang Sergey Nikitin Sergey Nikitin Sergey Skripnick Sergey Vilgelm Sergii Golovatiuk Sergio Cazzolato Shane Wang ShaoHe Feng Sharat Sharma Shawn Harsock Shawn Hartsock Shawn Hartsock Shi Yan Shih-Hao Li Shilla Saebi Shlomi Sasson Shoham Peller Shraddha Pandhe Shraddha Pandhe Shuangtai Tian ShunliZhou Shunya Kitada Shuquan Huang Sidharth Surana Sihan Wang Silvan Kaiser Simon Chang Simon Dodsley Simon Hensel Simon Pasquier Simon Pasquier Simona Iuliana Toader Sirisha Devineni Sirushti Murugesan Sivasathurappan Radhakrishnan Slawek Kaplonski Solly Ross Somik Behera Soren Hansen Soren Hansen Spencer Krum Spencer Yu Stanislaw Pitucha Stanisław Pitucha Stef T Stefan Amann Stephan Pampel Stephanie Reese Stephen Finucane Stephen Finucane Stephen Finucane Stephen Finucane Stephen Gran StephenSun Steve Baker Steve Baker Steve Kowalik Steve Noyes Steven Blatzheim Steven Dake Steven Hardy Steven Kaufer Steven Webster Stuart McLaren Subashini Soundararajan Subhadeep De Sudarshan Acharya Sudipta Biswas Suiong Ng Sujitha Sukhdev Kapur Sulochan Acharya Sumanth Nagadavalli Sumedh Degaonkar Sumit Naiksatam Sundar Nadathur Sunil Thaha Surojit Pathak Surya Surya Seetharaman Sven Anderson Svetlana Shturm Swami Reddy Swaminathan Vasudevan Swapnil Kulkarni (coolsvap) Sylvain Bauza Sylvain Bauza Sławek Kapłoński Tadayoshi Hosoya Takaaki Suzuki Takashi Kajinami Takashi Kajinami Takashi NATSUME Takashi Natsume Takashi Natsume Takashi Sogabe Takenori Yoshimatsu Taku Izumi Tang Chen Tang Chen Tao Li Tao Yang Tao Yang TaoBai Taylor Peoples Taylor Smith Teng Li Teran McKinney Tetsuro Nakamura Tetsuro Nakamura Thang Pham Thelo Gaultier Theodoros Tsioutsias Thierry Carrez Thomas Bachman Thomas Bechtold Thomas Bechtold Thomas Goirand Thomas Herve Thomas Kaergel Thomas Maddox Thomas Stewart Thorsten Tarrach Tiago Mello Tianpeng Wang Tiantian Gao Tim Miller Tim Potter Tim Pownall Tim Pownall Tim Rozet Tim Simpson Timofey Durakov Toan Nguyen Tobias Urdin Tobias Urdin Tobias Urdin Todd Willey Tom Cammann Tom Fifield Tom Fifield Tom Hancock Tom Patzig Tomi Juvonen Tomoe Sugihara Tomofumi Hayashi Tomoki Sekiyama Tomoki Sekiyama Tong Li Tony Breeds Tony NIU Tony Su Tony Xu Tony Yang Toshiaki Higuchi Tovin Seven Tracy Jones Travis Ankrom Trey Morris Tristan Cacqueray Tristan Cacqueray Troy Toman Trung Trinh Tsuyoshi Nagata TuanLAF Tushar Kalra Tushar Patil Tyler Blakeslee Unmesh Gurjar Unmesh Gurjar Unmesh Gurjar Vasiliy Shlykov Vasyl Saienko VeenaSL Venkateswarlu Pallamala Vern Hart Vic Howard Victor Coutellier Victor Morales Victor Sergeyev Victor Stinner Victor Stinner Vijaya Erukala Vikhyat Umrao Vilobh Meshram Vincent Hou Vincent Untz Vipin Balachandran Vishakha Agarwal Vishvananda Ishaya Vivek Agrawal Vivek YS Vladan Popovic Vladik Romanovsky Vladik Romanovsky Vladyslav Drok Vu Cong Tuan Vu Tran Vui Lam Vui Lam Waldemar Znoinski Walter A. Boring IV Wang Huaqiang Wangliangyu Wangpan Wangpan Wanlong Gao Wei Jiangang Wen Zhi Yu Wen Zhi Yu Wenhao Xu Wenzhi Yu Will Foster William Wolf Wonil Choi Wu Wenxiang Xavier Queralt Xiang Hui Xiangyang Chu Xiao Chen XiaohanZhang <15809181826@qq.com> XiaojueGuan Xiaowei Qian Xiaoyan Ding XieYingYun Xing Yang Xinyuan Huang Xu Ao Xu Han Peng Xuanzhou Perry Dong Xurong Yang YAMAMOTO Takashi YI-JIE,SYU Yaguang Tang Yaguang Tang Yang Hongyang Yang Yu YangLei Yassine Lamgarchal Yasuaki Nagata Yikun Jiang Yingxin Yingxin Cheng Yixing Jia Yolanda Robla Yong Sheng Gong Yongli He Yongli He Yongli He Yongli he Yoon Doyoul Yosef Berman Yosef Hoffman Yoshiaki Tamura Yoshihiko Atsumi You Ji YuYang Yufang Zhang Yuiko Takada Yuiko Takada YuikoTakada Yukihiro KAWADA Yulia Portnova Yun Mao Yun Shen Yunhong Jiang Yunhong, Jiang Yuriy Taraday Yuriy Zveryanskyy Yury Kulazhenkov Yusuke Okada Yuuichi Fujioka Yuzlikeev Eduard ZHU ZHU Zack Cornelius Zaina Afoulki Zane Bitter Zara Zed Shaw Zhang Hua ZhangShuaiyi Zhao Lei Zhen Qin Zheng Yue Zhengguang Zhenguo Niu Zhenguo Niu Zhenzan Zhou Zhi Yan Liu Zhi Yan Liu ZhiQiang Fan ZhiQiang Fan Zhihai Song Zhilong.JI Zhiteng Huang Zhiteng Huang ZhongShengping Zhongyue Luo Zhou Jianming Zhou ShaoYu ZhuRongze Ziad Sawalha Zoltan Arnold Nagy abdul nizamuddin abhilash-goyal abhishek-kekane abhishek.talwar abhishekkekane afariasa afazekas alecorps alexc20 alexpilotti andrewbogott ankitagrawal april arches armando-migliaccio armando-migliaccio arvindn05 as0 asarfaty ashoksingh aulbachj bailinzhang baiwenteng benjamin.grassart bhagyashris bhavani.cr boh.ricky bria4010 budebao <531648642@qq.com> caoyuan cedric.brandily chaochin@gmail.com chen chenaidong1 chenghuiyu chenpengzi <1523688226@qq.com> chenxiangui chenxiao chenxing chenxing chhagarw chinmay chohoor chris fattarsi csatari da52700 daisy-ycguo daisy-ycguo dane-fichter david martin deepak.mourya deepak_mourya deepak_mourya deepakmourya deevi rani dekehn dengzhaosen dimtruck dineshbhor divakar-padiyar-nandavar dzyu eddie-sheffield eewayhsu ejbaek elajkat ericxiett ericzhou esberglu esubramanian evikbas facundo Farias falseuser fpxie ftersin fujioka yuuichi fuzk galstrom21 gaofei gaozx garyk garyk gengchc2 gengjh gh159m ghanshyam ghanshyam ghanshyam ghanshyam mann git-harry gong yong sheng gongxiao gongysh grace.yu gregory.cunha gseverina guanzuoyu gugug guillaume-thouvenin guohliu gustavo panizzo hackertron hartsocks heha heijlong hgangwx hill hua zhang huang.zhiping huangpengtao huangtianhua huangtianhua huanhongda hussainchachuliya hutianhao27 hzguanqiang ianeta hutchinson iccha.sethi imacdonn inspurericzhang int32bit isethi iswarya_vakati ivan-zhu jakedahn javeme jay jaypei jcooklin jeckxie jenny-shieh jiajunsu jianghua wang jianghuaw jiangwt100 jiataotj jichen jichenjc jimmygc jinquanni jmeridth john.griffith8@gmail.com jokcylou jolie jskunda jufeng jufeng julykobe kairoaraujo kangyufei karimb karimull kashivreddy keerthivasan86 kevin shen <372712550@qq.com> kirankv kiwik-chenrui klyang ladquin lapgoon lawrancejing lei zhang leizhang lianghao lianghuifei liangjingtao libing licanwei likui linbing ling-yun linwwu liu-lixiu liu-sheng liudong liuhuajie liusheng liuyamin liwenjian lixipeng liyingjun liyingjun liyuanyuan lizheming lkhysy llg8212 lqslan lrqrun lvdongbing lyanchih m.benchchaoui@cloudbau.de m4cr0v maaoyu manas.mandlekar manchandavishal maqi mark.sturdevant mathieu-rohon mathrock mathrock mb mbasnight mdrabe melanie witt melanie witt melissaml mingyan bao mjbright mkislinska mmidolesov msdubov nafei yang naichuans oleksii panbalag pandatt pangliye park hei park hei parklong partys paul-carlton2 pcarlton pengyuwei piyush110786 pkholkin pmoosh pooja jadhav poojajadhav pran1990 preethipy pyw qiaomin qiufossen rackerjoe rajat29 ramboman ricolin root rsritesh rtmdk ruichen ryo.kurahashi s iwata saradpatel sarvesh-ranjan sarvesh-ranjan scottda scottda sdmitriev1 sean mooney shaofeng_cheng sharat.sharma shenxindi shi liang shihanzhang shilpa shreeduth-awasthi shuangtai shuangyang.qian smartu3 smccully songjie songwenping sonu.kumar space sridevik sridhargaddam srushti stanzgy stewie925 stewie925 sudhir_agarwal sunhao sunjia tamilhce tanlin tengqm thorst tianhui tianmaofu tilottama gaat to-niwa tonybrad uberj unicell vaddi-kiran venakata anil venkata anil venkatamahesh vijaya-erukala vladimir.p vsaienko wangbo wangdequn wangfaxin wanghao wanghongtaozz wanghongxu wangjiajing wangkuntian wangqi wangxiyuan wangzhengh warewang watanabe isao weiweigu whoami-rajat wingwj wnark wu.chunyang wuhao xhzhf xianming mao xiaoding xiaojueguan xiexs xulei xushichao ya.wang yan97ao yangyapeng yanpuqing yatin yatin karel yatinkarel yatinkarel ydoyeul yenai yingjisun yongiman yuanyue yugsuo yuhui_inspur yunhong jiang yuntong yuntongjin yuntongjin yushangbin yuval brave yuyafei zhang-jinnan zhang.lei zhang.yufei@99cloud.net <1004988384@qq.com> zhangbailin zhangboye zhangchao010 zhangchunlong zhangchunlong1@huawei.com zhangdaolong zhangdebo zhangdebo1987 zhangfeng zhanghao zhangshj zhangtralon zhangyangyang zhangyanxian zhangyanzi zhaoleilc <15247232416@163.com> zhaolihui zhengyao1 zhhuabj zhiyanliu zhiyanliu zhiyuan_cai zhong.zhou zhoudongshu zhoujunqiang zhouxinyong zhu.boxiang zhubx007 zhufl zhulingjie zhurong zhuzeyu zte-hanrong zwei Édouard Thuleau Édouard Thuleau Édouard Thuleau Édouard Thuleau Émilien Macchi 翟小君 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/CONTRIBUTING.rst0000664000175000017500000000111700000000000015223 0ustar00zuulzuul00000000000000The source repository for this project can be found at: https://opendev.org/openstack/nova Pull requests submitted through GitHub are not monitored. To start contributing to OpenStack, follow the steps in the contribution guide to set up and use Gerrit: https://docs.openstack.org/contributors/code-and-documentation/quick-start.html Bugs should be filed on Launchpad: https://bugs.launchpad.net/nova For more specific information about contributing to this repository, see the Nova contributor guide: https://docs.openstack.org/nova/latest/contributor/contributing.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315685.0 nova-32.0.0/ChangeLog0000664000175000017500000710265000000000000014347 0ustar00zuulzuul00000000000000CHANGES ======= 32.0.0 ------ * Update Debian qemu/libvirt/libguestfs versions * Add Flamingo prelude section * docs: Update libvirt version support matrix for Flamingo * Fix fast8 tox target * hypervisors: Optimize uptime retrieval for better performance * [pci]Keep used dev in Placement regardless of dev\_spec * [PCI tracker]Remove non configured devs when freed * Reproduce bug/2115905 * Fix bug 2114951 * Reproducer for bug 2114951 * Add service version for Falmingo * Update compute rpc alias for epoxy * doc: mark the maximum microversion for 2025.2 Flamingo * Fix duplicate words * Fix 'nova-manage image\_property set' command * Replace remaining usage of Ubuntu Jammy * libvirt: Disable VMCoreInfo device for SEV-encrypted instances * Follow-up of AMD SEV-ES support * Purge nested SEV RPs when SEV is disabled * Add functional test scenario for mixed SEV RPs * libvirt: Launch instances with SEV-ES memory encryption * Add hw\_mem\_encryption\_model image property * Detect AMD SEV-ES support * Add service role in Nova policy * Do not yield in threading mode * [CI]Make nova-tox-py312-threading voting * [test]RPC using threading or eventlet selectively * Run unit test with threading mode * Allow to start unit test without eventlet * Ask for pre-prod testing for native threading * Migrate MEM\_ENCRYPTION\_CONTEXT from root provider * Update min support for Flamingo * api: Only apply "soft" additionalProperties validation to requests * tests: Use valid UUIDs for cinder resources * api: Separate volume, snapshot and volume attachments * api: Address issues with images APIs * api: Fix validators for hw:cpu\_max\_\* extra specs * restrict swap volume to cinder * Run nova-api and -metadata in threaded mode * [tests] Add printing of sample and template paths * only show standard image properties in server show * Run nova-next with n-sch in threading mode * Allow services to start with threading * Remove logic for unsupported old libvirt/qemu * api: Deprecate v2 API * api: Remove '[api] auth\_strategy', NoAuthMiddlware * api: Remove deprecated pipeline\_factory * conf: Rename '[api] neutron\_default\_tenant\_id' * tests: Use v2.1 API in OSAPIFixture * tests: Remove unnecessary API version overrides * tests: Update to use '/v2.1' prefix, avoid project ID * Fix libvirt metadata upgrade release note * Drop unused method * db: Fix api\_db\_opts reading main\_db\_opts * Add image meta to libvirt XML metadata * Add more flavor metadata to libvirt guest XML * Fix image ID in libvirt metadata when unshelving * Enable E711 consistently * libvirt: Get info with abs path, rebase with rel path * mypy: fix type annotations for PciDevicePoolList and NUMATopologyLimits * Document native threading mode and tuneables * Fix doc comment on manager role change * Don't reset port dns\_name when shelving instances * Update contributor guide for 2025.2 Flamingo * api: Add response body schemas for images APIs * Replace eventlet.event.Event with threading.Event * Implement USB controller extra spec for libvirt * [test]Speed up qemu announce test by mocking sleep * [test]Speed up RBD test by decreasing retry interval * [test]Speed up ironic console test by decreasing timeout * [test]Speed up fs retry tests by mocking sleep * Print ThreadPool statistics * Add project manager role in Nova API policy rule * Drop unused parameter type validation method * Migrate ExceptionHelper to test utilities * Remove unused utilities * Make the default executor configurable * Rename DEFAULT\_GREEN\_POOL to DEFAULT\_EXECUTOR * Make nova.utils.pass\_context private * Move ConductorManager to use spawn\_on * FUP: Translate scatter-gather to futurist * Imported Translations from Zanata * sqlalchemy: Use built-in declarative * api: Add response body schemas for networks API * api: Add response body schemas for removed APIs * api: Address issues with server group APIs * api: Add response body schemas for server group APIs * api: Add response body schemas for server password APIs * api: Add response body schemas for server external events API * libvirt: Add objects and notifications for USB controller model * Implement sound model extra spec for libvirt * libvirt: Add objects and notifications for sound model * Fix pci\_tracker.save to delete all removed devs * Reproduce that only half of the PCI devs are removed * Move ComputeManager to use spawn\_on * Add spawn\_on * Replace utils.spawn\_n with spawn * Use futurist for \_get\_default\_green\_pool() * api: Correct expected errors * api: Address issues with remote consoles APIs * Note on RPC error decorators around build\_and\_run\_instance * Fix neutron client dict grabbing * Add Project Manager role context in unit tests * Revert^2 "Support glance's new location API" * Remove Unicode characters * Translate scatter-gather to futurist * Cache [pci]alias parsing * Validate [pci]alias at service startup * Validated that PCI alias has proper ids * Multiple spec per PCI alias limitation * Return HTTP400 for multi spec pci alias if PCI in Placement * api: Address issues with hypervisors APIs * libvirt: Enable autodeflate and freePageReporting for memballoon * api: Address issues with server diagnostics APIs * api: Address issues with keypairs APIs * db: Resolve alembic deprecation warning * api: Address issues with instance actions API * Fix small documentation issue * Remove unused config options * doc: Fix Caracal release number * Remove contrib/clean-on-delete.py * [doc]Clarify where to set pci\_in\_placement * doc: Adding link for RabbitMQ installation during nova deployment on controller node * Fix live migration error logging * Add functional reproducer for bug 2102038 * Revert "Support glance's new location API" * libvirt: Use common naming convention for ephemeral disk labels * [doc] Adding vGPUs max\_instances caveat fix for virtual-gpu * Mention IRC topic update in PTL post-PTG checklist * Example NVMe cleaning script for one-time-use * [tool] Fix backport validator for non-SLURP * Disable NOVA\_COMPILE\_MDEV\_SAMPLES to unblock CI * api: Add response body schemas for server diagnostics API * api: Add response body schemas for remote consoles * api: Add response body schemas for server topology API * api: Add response body schemas for image metadata APIs * api: Add response body schemas for keypairs APIs * api: Add response body schemas for server IPs APIs * api: Add response body schemas for hypervisors APIs (3/3) * api: Add response body schemas for hypervisors APIs (2/3) * api: Add response body schemas for hypervisors APIs (1/3) * api: Add response body schemas for instance actions * api: Add response body schemas for hosts APIs * doc: Add missing API samples * tests: Ensure all APIs have a response body schema * api: Simplify parameter types * api: Only run format checks on strings * api: Add new, simpler api\_version decorator * api: Only check minimum API version * api: Stop using wsgi.Controller.api\_version to switch between API versions * api: Adjust validation helpers for a single-method future * reorder and extend pre-commit hooks * Allow autopep8 to fix more things * update pre-commit version pins * wsgi: Don't create, use lock in same line * Replace eventlet semaphores with threading one * [quota]Refactor group counting to scatter-gather * Remove python 3.9 support * live migration: Avoid volume rollback mismatches * Remove superfluous monkey patching form func test * split monkey\_patching form import * Remove nova debugger functionality * [hacking] N373 do not use eventlet primitives * Replace eventlet sleep with time.sleep * Support glance's new location API * Remove WSGIServer related config options * [doc]Describe file based GMR triggering * [doc]Remove eventlet based API endpoints * Remove WSGIService and WSGIServer classes * Remove eventlet based WSGI server entry points * Remove workaround for ovn live migration * Amend functional reproducer for bug 1899835 * Use dict object for request\_specs\_dict in the \_list\_view * Functional tests for one-time-use devices * Add one-time-use devices docs and reno * Support "one-time-use" PCI devices * Invalidate PCI-in-placement cached RPs during claim * Extend invalidate\_rp to only invalidate cache * FUP improve and add integration tests for PCI SR-IOV servers * Fix disable memballoon device * FUP: Improve libvirt fixture for hostdevs * FUP Update pci-passthrough and virtual-gpu documentation * FUP Add a warning to make non-explicit live migration request debugging easier * FUP improve comment accuracy and variable naming for tag removal * FUP Remove unnecessary PCI check * wrap wsgi\_app.init\_application with latch\_error\_on\_raise * Ignore metadata tags in pci/stats \_find\_pool logic * Fix missing backtick in configuration option help * Remove tags from README * Imported Translations from Zanata * Update master for stable/2025.1 31.0.0 ------ * Add Epoxy prelude section * ironic: fix logging of validation errors * Reproduce bug/2098496 * Fix description of [pci] alias * doc: Remove non-existent [service\_user] auth\_strategy * doc: Drop deprecated [api] auth\_strategy * Add service version for Epoxy * Update compute rpc alias for epoxy * doc: mark the maximum microversion for 2025.1 Epoxy * unified limits: discover service ID and region ID * Don't calculate the minimum compute version repeatedly * Fix serial console for ironic * api: Address TODO in microversion v2.99 * tests: Filter out eventlet deprecation warnings * api: project/tenant and user IDs are not UUIDs * api: Add response body schemas for for console auth token APIs (v2.99) * Update driver to map the targeted address for SR-IOV PCI devices * Update libvirt fixtures to support hostdevs * Update conductor and filters allowing migration with SR-IOV devices * Update manager to allow vfio pci device live migration * Add live\_migratable flag to PCI device specification * Augment the LiveMigrateData object * Update driver to deal with managed flag * Fix microversion 2.98 doc/tests for update/rebuild APIs * Fix microversion 2.96 for update/rebuild APIs * libvirt: Add new option to enforce multipath volume connections * Bump MIN\_{LIBVIRT,QEMU} for "Epoxy" * libvirt: fix maxphysaddr passthrough dom parsing * Add support for showing scheduler\_hints in server details * Add managed flag to PCI device specification * libvirt: allow direct SPICE connections to qemu * libvirt: direct SPICE console database changes * libvirt: direct SPICE console object changes * libvirt: Fix regression of listDevices() return type * Reproducer for bug 2098892 * FUP for reno issues * Per-Property ImageMetaPropsWeigher * Add a new ImagePropertiesWeigher * move nova-ovs-hybrid-plug to deploy with spice and fix qxl default * Add fill\_metadata() to InstanceList * Fix case sensitive comparison * Add unit test coverage of get\_machine\_ips * Drop dependency on netifaces * Update InstanceNUMACell version in more cases * Add support for showing image properties in server show response * Replace oslo\_utils.encodeutils.exception\_to\_unicode * Bump os-traits to 3.3.0 in requirements * Update InstanceNUMACell version after data migration * allow discover host to be enabled in multiple schedulers * Fix parameter order in add\_instance\_info\_to\_node * Disable the heal instance info cache periodic task * Cleanup RBAC jobs in check/gate pipeline * [Trivial] Fix the typo error * Reproduce bug/2097359 * ironic: Fix ConflictException when deleting server * trivial: Remove legacy API artifact * api: Allow min/max\_version arguments to response * api: Allow min/max\_version arguments to expected\_errors * Fix device type when booting from ISO image * Restore nova.virt.\_\_init\_\_ * Correctly patch get\_by\_flavor\_id * Switch to using oslo.utils secretutils * Add ServersViewBuilderTestV296 unit test class * libvirt: Wrap un-proxied listDevices() and listAllDevices() * Run nova-next without periodic cache healing * Revert "libvirt: Wrap un-proxied listDevices() and listAllDevices()" * Drop environment for Python 3.8 * api-ref: Update note on flavor disabled property * doc: Use dnf instead of yum * Respect supplied arguments in novncproxy\_base\_url * Allow hyphen in cinder catalog\_info service-type * Refactor response schemas for share API * Fix unit tests on macOS * Adapt tests to new messages from jsonschema 4.23.0 * Drop compatibility code for Windows * Drop unused decoding of xml content * Update Nova bdm with updated swap info * Update contributor process documentation * Replace deprecated FormatChecker.cls\_checks * libvirt: Wrap un-proxied listDevices() and listAllDevices() * Deprecate [wsgi] secure\_proxy\_ssl\_header * Refactor test\_server\_shares: Mock in Base Class and trait verification * Manila shares admin guide documentation * Add virt/libvirt error test cases * Add libvirt test to ensure metadata are working * Reports instance events to the DB regarding attaching and detaching a share * Add instance.share\_detach\_error notification * Add instance.share\_attach\_error notification * Add shares to InstancePayload * Add instance.share\_detach notification * Add instance.share\_attach notification * Add helper methods to attach/detach shares * Attach Manila shares via virtiofs (API) * Check shares support (compute manager) * Allow to mount manila share using Cephfs protocol * Support rescuing an instance with shares * Add helper methods to rescue/unrescue shares * Support resuming an instance with shares (compute manager part) * Add share\_info parameter to resume method for each driver (driver part) * Support rebooting an instance with shares (compute manager part) * Add share\_info parameter to reboot method for each driver (driver part) * api: Remove use of microversion constants * docs: Add contributor docs for response body validation * api: Add response body schemas for floating IP pool APIs * api: Add response body schemas for flavors APIs * api: Add response body schemas for flavor extra specs APIs * api: Add response body schemas for flavor access API * api: Add response body schemas for console auth token APIs * api: Add response body schemas for bare metal node APIs * api: Add response body schemas for availability zone APIs * api: Add response body schemas for port interface APIs * Replace distutils * api: Add response body schemas for assisted volume snapshots APIs * api: Add response body schemas for versions APIs * api: Add response body schemas for extensions API * api: Add response body schemas for host aggregate APIs * api: Add response body schemas for host aggregate actions API * api: Add response body schemas for remaining server action APIs * Add [quota]unified\_limits\_resource\_(strategy|list) * Add metadata for shares * Deletion of associated share mappings on instance deletion * Mounting the shares as part of the initialization process * Attach Manila shares via virtiofs (drivers and compute manager part) * Use client token when talking to manila * Attach Manila shares via virtiofs (manila abstraction) * api: Add response body schemas for server action APIs * Improve heal\_instance\_info\_cache periodic * Amend DB model add a unique constraint * Amend ShareMappingStatus due to asynchronous call * nova-manage: Add flavor scanning to migrate\_to\_unified\_limits * zuul: Add missing context comments for nova-next * Fix typo in release note * Update gate jobs as per the 2025.1 cycle testing runtime * Remove default override for config options policy\_file * Fix case-sensitivity for metadata keys * Revert "[libvirt] Live migration fails when config\_drive\_format=iso9660" * Fix add/remove SecurityGroup action json schemas * Skip functional tests on pre-commit config update * Drop remaining logic for websockify < 0.9.0 * Handle iso+gpt detections * Route shared storage RPC to evac dest at startup * Reproduce bug 2085975 in functional * [doc]Developer doc about PCI and SRIOV testing * [doc]Add \`socket\` option to [pci]alias numa\_policy * [libvirt]Support hw\_vif\_model = igb * [ovo]Add igb value to hw\_vif\_model image property * Refactor obj\_make\_compatible to reduce complexity * Add whitebox-devstack-multinode job to periodic * Fix detaching devices by alias with mdevs * Add repoducer test for bug 2074219 * pre-commit: Bump versions * [codespell] Fixes for latest version * Clean up the remaining logic for Windows OS Support * Revert "Test live migration between hosts with differnet cpu\_shared\_sets" * Update contributor guide for 2025.1 Epoxy * libvirt: Deprecate volume driver for unsupported volume backends * Drop usage of pkg\_resources * Imported Translations from Zanata * VMware: updates resource provider trait list * Support creating servers with RBAC SGs * docs: Remove unused sphinx deps * Libvirt: updates resource provider trait list * Fix broken link * [doc]Fix the device\_spec config doc about placement * Replace deprecated constant\_time\_compare * Remove Python 3.8 support * Fix image format error on vol-backed snapshot * Remove workaround for eventlet < 0.27.0 * Drop CentOS 8 Stream * Support os-brick specific lock\_path * Update master for stable/2024.2 30.0.0 ------ * Drop SQLALCHEMY\_WARN\_20 * [tox] add LOCALE\_ARCHIVE to passenv * Fix regression with live migration on shared storage * allow upgrade of pre-victoria InstanceNUMACells * Add Dalmatian prelude section * repoduce post liberty pre vicoria instance numa db issue * only safety check bootable files created from glance * Add functional repoducer for ephemeral disks * docs: update libvirt support matrix for C and D * nova-manage: modify image properties in request\_spec * Add service version for Dalmatian * Update compute rpc alias for dalmatian * doc: mark the maximum microversion for 2024.2 Dalmatian * Vmware: Remove uuid parameter from get\_vmdk\_info call * docs: Show the recommended way to PXE boot an instance * docs: Change note:: to warning:: for service user token * Skip snapshot test when missing qemu-img * [ironic] Followup fixes around flavor ids * Ignore support-matrix.css * [ironic] Factor out metadata and send to ironic * Test live migration between hosts with differnet cpu\_shared\_sets * update nova-next to use ubuntu 24.04 * Delete ./doc/source/\_static/support-matrix.css file * add pyproject.toml to support pip 23.1 * Adding sysctl changes for post-copy migration * Use format\_inspector from oslo * Rephrase the help message * libvirt: Add config option to require secure SPICE * Reproducer test for image property hw\_architecture * libvirt: Launch instances with stateless firmware * Fix deepcopy usage for BlockDeviceMapping in get\_root\_info * Add hw\_firmware\_stateless image property * Report availability of stateless firmware support * [libvirt]log XML if nova fails to parse it * libvirt: call get\_capabilities() with all CPUs online * Fix PCI passthrough cleanup on reschedule * Skip new image format tests * hardware: Correct log * Deprecate AMI image formats * Functional test test\_boot\_reschedule\_with\_proper\_pci\_device\_count * libvirt: Report available TPM models * Fix s/addtional/additional/ typo * libvirt: Remove node device XML validate flags * Remove default override for RBAC config options * libvirt: Detect vtpm support by libvirt * libvirt: Ensure swtpm\_ioctl is available for vTPM support * docs: Correct unified limits CLI commands * Remove AMI snapshot format special case * Change force\_format strategy to catch mismatches * Fix test\_vmdk\_bad\_descriptor\_mem\_limit and test\_vmdk\_bad\_descriptor\_mem\_limit\_stream\_optimized * api: Fix typo * Correct info about volume-backed server rebuild * conf: Clarify '[api] response\_validation help' text * cpu: Only check governor type on online cores * [tools] Backport validator: handle unmaintained * [tools] Ignore bot generated patches * Stabilize iso format unit tests * [ironic] Ensure we test iterators when needed * fix qemu-img version dependent tests * Add iso file format inspector * Reproduce iso regression with deep format inspection * Fix port group network metadata generation * port format inspector tests from glance * scheduler: fix \_get\_sharing\_providers to support unlimited aggr * Fix vmdk\_allowed\_types checking * Additional qemu safety checking on base images * Check images with format\_inspector for safety * Reject qcow files with data-file attributes * Fix disk\_formats in ceph job tempest config * api: Correct bug in flavors schema * pre-commit: Bump autopep8 version and update repo * api: Add response body validation helper * conf: Add '[api] response\_validation' option * tests: Ensure all APIs have a request query schema * api: Add remaining missing query parameter schema * api: Don't do version check if nothing required * api: Add 'removed' decorator * [CI] Replace deprecated regex * api: Migrate to JSON Schema Draft 2020-12 * tests: Ensure API schemas are valid * api: Fix bugs in schemas * tests: Ensure all APIs have a request body schema * api: Add remaining missing request body schemas * api: Add request body schemas for SG APIs * Handle neutron-client conflict * retry write\_sys call on device busy * tweak emulation job to avoid OOM errors * docs: Follow up for persistent mdevs * [doc] Improve description for nova-manage db purge * add functional repoducer for bug 2065927 * fix py312 tox definitions * Stop using split UEC image (mostly) * Enable OCaaS for several nova jobs * docs: Add more information about unified limits * Fix formatting issues in extra-specs docs * Make python 3.12 unit and functional voting * Fix hacking test with syntax error * Fix notification object hashes for python 3.12 * do not use str(url) to stringify a URL for subsequent use * Upload glance image with --file in ceph job * Reject AZ changes during aggregate add / remove host * Enable virtio-scsi in nova-next * Fix device\_type=lun with boot\_index * Avoid setting serial on raw LUN devices * [ironic] Fix rebooting instance * Remove old excludes * reno: Update master for unmaintained/zed * Note the deleyad address view * scheduler: AggregateMultitenancyIsolation to support unlimited tenant * Do not close returned image-chunk iterator & get\_verifier early * Fix: migration configuration with cpu\_shared\_set (libvirt part) * Fix: migration configuration with cpu\_shared\_set (object part) * Update api-ref for 2.95: evacuate to STOPPED * Validate flavor image min ram when resize volume-backed instance * Regression test for bug 2007968 * ignore sphinx-lint series in git blame * fix sphinx-lint errors in docs and add ci * Remove SQLAlchemy tips jobs * api: Keep track of action controllers * api: Remove FlavorManageController * api: Add missing Controller inheritance * Update contributor guide for 2024.2 Dalmatian * libvirt: Create persistent mdevs * Update min support for Dalmatian * Make overcommit check for pinned instance pagesize aware * Fix the command to list hw\_machine\_type unset instances * Update master for stable/2024.1 29.0.1 ------ * Add a Caracal prelude section * Update compute rpc alias for caracal * Add service version for Caracal * doc: mark the maximum microversion for 2024.1 Caracal * pwr mgmt: handle live migrations correctly * Reproducer test for live migration with power management * Fix nova-manage image\_property show unexpected keyword * pwr mgmt: make API into a per-driver object * Power on cores for isolated emulator threads * Reproducer for not powering on isolated emulator threads cores * Add cpuset\_reserved helper to instance NUMA topology * reno: Update master for unmaintained/xena * reno: Update master for unmaintained/wallaby * reno: Update master for unmaintained/victoria * Add a functest for verifying multiple VGPU allocations * vgpu: Allow device\_addresses to not be set * libvirt: Cap with max\_instances GPU types * Add support for showing requested az in output * Refactor vf profile for PCI device * Removed explicit call to delete attachment * Disconnecting volume from the compute host * add multinode ironic shard job * testing: Add ephemeral encryption support to fixtures * docs: Further clarifications to the SG doc * Fix nova-metadata-api for ovn dhcp native networks * Modify the mdevs in the migrate XML * libvirt: make a sub element of * Add hw\_ephemeral\_encryption\_secret\_uuid image property * Add encryption support to convert\_image * imagebackend: Add support to libvirt\_info for LUKS based encryption * libvirt: Configure and teardown ephemeral encryption secrets * enforce remote console shutdown * libvirt: Support maxphysaddr * trivial doc fix * Make compute node rebalance safer * Add nova-manage ironic-compute-node-move * Limit nodes by ironic shard key * Server Rescue leads to Server ERROR state if original image is deleted * [S-RBAC] adapt nova-next for port's binding:profile field change * Catch ImageNotFound on snapshot failure * bump nova-lvm to use 8G of swap from 4G * Drop unnecessary mocking of get\_minimum\_version\_all\_cells * Reserve mdevs to return to the source * Separate OSError with ValueError * Added context manager for instance lock * block\_device: Add encryption attributes to swap disks * HyperV: Remove extra specs of HyperV driver * HyperV: Remove RDP console API * HyperV: Remove RDP console connection information API * HyperV: Add todo to remove HyperVLiveMigrateData object * Follow up patch to correct a minor documentation issue * libvirt: Ensure both swtpm and swtpm\_setup exist for vTPM support * Remove HyperV: cleanup doc/code ref * run-evacuate-hook: Check cinder before creating BFV server * reno: Update master for unmaintained/yoga * Correctly reset instance task state in rebooting hard * Remove the Hyper-V driver * Check if destination can support the src mdev types * tox: Drop envdir * check both source and dest compute libvirt versions for mdev lv * Augment the LibvirtLiveMigrateData object * Fix tipo * hardware: Fix image\_meta.id within get\_mem\_encryption\_constraint * Always delete NVRAM files when deleting instances * doc: Fix markup syntax and typo * doc: drop blockdiag usage * Attach Manila shares via virtiofs (objects) * Fix wrong nova-manage command in upgrade check * Fixes: bfv vm reboot ends up in an error state * Update python classifier in setup.cfg * Updates glance fixture for create image * Revert "[pwmgmt]ignore missin governor when cpu\_state used" * Cleanup setup.py * Fix test failures with oslo.limit 2.3.0 * Fix guard for NVMeOF volumes * libvirt: stop enabling hyperv feature reenlightenment * pre-commit: Bump linter versions * pre-commit: Add mypy * [ironic] Partition & use cache for list\_instance\* * tox: Use pre-commit for pep8 target * Resolve mypy error * Imported Translations from Zanata * Allow live migrate paused instance when post copy is enabled * Allow best effort sending of notifications * Call Neutron immediately upon \_post\_live\_migration() start * [codespell] ignore codespell in git blame * [codespell] fix final typos and enable ci * Bump hacking version * Remove deprecated [api] use\_forwarded\_for * docs: Address nits in new security group doc * Reproducers for bug 1869804 * libvirt: Stop unconditionally enabling evmcs * docs: Revamp the security groups guide * Fix regression breaking Ironic boot-from-volume * [pwmgmt]ignore missin governor when cpu\_state used * tests: Use clearer syntax for empty generator * Set UEC image vars for jobs not defined in Nova * Add new nova.wsgi module * Remove nova.wsgi module * Remove redundant setting of DEFAULT\_IMAGE\_NAME(\_FILE) * Use real SDK objects in tests * Remove unnecessary aliasing * Remove Ironic client wrapper * Use SDK for deprecated baremetal proxy API * Use SDK for remaining ironic driver calls * Use SDK for remaining network operations * Use SDK for node.vif\* * Use SDK for node.set\_power\_state * Use SDK for node.set\_provision\_state * Use SDK for volume\_target.\* * Use SDK for getting network metadata from node * Use SDK for add/remove instance info from node * Use split kernel/initramfs Cirros UEC image by default in jobs * Update contributor guide for 2024.1 Caracal * Fix traits to cpu flags mapping * Lower num\_pcie\_ports to 12 in the nova-next job * Packed virtqueue support was added * Use configuration for single enabled mdev type * Add retry on attachment\_update * Detect maximum number of SEV guests automatically * Allow enabling cpu\_power\_management with 0 dedicated CPUs * Attach Manila shares via virtiofs (db) * Allow config to support virtiofs (driver) * Fix a couple of typos * Detach disks using alias when possible * Set libvirt device alias for volumes * Support setting alias on libvirt disks * docs: Further tweaks to the CPU models document * [libvirt]Add migration\_inbound\_addr * Fix coverage issues with eventlet * Make sqlalchemy-2x job voting again * Fix unit tests broken by olso.utils * docs: Add documentation on server groups * Remove outdated comment about allocation ratios * Adding server actions tests to grenade-multinode * Fix rebuild compute RPC API exception for rolling-upgrades * add a regression test for all compute RPCAPI 6.x pinnings for rebuild * Drop remaining deprecated upgrade\_levels option for nova-cert * docs: Fix unified limits code block text alignment * Revert "Add upgrade check for compute-object-ids linkage" * Fix python shebang * Translate VF network capabilities to port binding * Install lxml before we need it in post-run * Do not manage CPU0's state * Compile mdev samples for nova-next * Clean up service\_get\_all() * Warn if we find compute services in cell0 * Adds server show in helpers * Fix URLs in status check results * [codespell] fix typos in tests * [codespell] doc,devstack and gate typos * [codespell] apply codespell to the releasenotes * [codespell] fix typos in api-ref * [codespell] start fixing all the typos * fix sphinx-lint issues in api guide * fix sphinx-lint issues in releasenotes * Sanity check that new hosts have no instances * doc: clarify that pre-commit is optional * pre-commit: Use native hacking pre-commit hook * pre-commit: Update plugin versions * doc: Remove crud from conf.py file * docs: update libvirt support matrix by adding min versions * Add job to test with SQLAlchemy master (2.x) * Imported Translations from Zanata * Implement add\_consumer, remove\_consumer KeyManager APIs * Update master for stable/2023.2 28.0.0 ------ * Add a Bobcat prelude section * Fix missing oslo.versionedobjects library option * disable ovn based testing of test\_live\_migration\_with\_trunk * Make our nova-ovs-hybrid-plug job omit cinder * Revert "Limit nodes by ironic shard key" * Revert "Add nova-manage ironic-compute-node-move" * Revert "Make compute node rebalance safter" * objects: Stop fetching from security\_groups table * db: Replace use of backref * adapt to oslo.log changes * Fixed an error when caching multiple images in aggregate * Add service version for Bobcat * Update compute rpc alias for bobcat * doc: mark the maximum microversion for 2023.2 Bobcat * Imported Translations from Zanata * only attempt to clean up dangling bdm if cinder is installed * Follow up for unified limits: PCPU and documentation * Make compute node rebalance safter * Add nova-manage ironic-compute-node-move * Delete dangling bdms * Limit nodes by ironic shard key * Deprecate ironic.peer\_list * Reproducer for dangling bdms * Add documentation for unified limits * nova-manage: Add 'limits migrate\_to\_unified\_limits' * Add function to get all attachments in Cinder.API module * [functional]Fix remaining greenlet leaks * [functional] Avoid leaking greenlet in UnifiedLimits tests * introduce global greenpool * Avoid lazy-loads in resize * Avoid lazy-loads on server create * Remove a lazy load on every server show * Avoid lazy-loading in resize and rebuild/evacuate * Log excessive lazy-loading behavior * [functional] Don't leak running live migration * Do not untrack resources of a server being unshelved * Reproduce bug #2025480 in a functional test * Fix bindep for Debian bookworm * Use OSC in run-evacuate-hook instead of novaclient * Update contributor guide for 2023.2 Bobcat * Fix tox docs target * Remove n-v ceph live migration job from gate * Enforce correct choice for [DEFAULT] enabled\_ssl\_apis * Add upgrade check for compute-object-ids linkage * Remove unused mocks * Bump MIN\_{LIBVIRT,QEMU} for "Bobcat" * libvirt: Add 'COMPUTE\_ADDRESS\_SPACE\_\*' traits support * Add a new NumInstancesWeigher * Remove deprecated AZ filter * Decorate only Flavor.get\_\* methods that execute queries * Fix up inconsistent max\_count argument in o-d-ms * Add config option to configure TB cache size * Refactor CinderFixture * Improve logging at '\_numa\_cells\_support\_network\_metadata' * docs: fix 404 for ironic docs * enable validations in nova-lvm * Add a new policy for cold-migrate with host * Pick next min libvirt / QEMU versions for "C" (2024) release * Drop Fedora support * database: Archive parent and child rows "trees" one at a time * testing: Fix and robustify archive\_deleted\_rows test * db: Store unmasked password * cpu: make governors to be optional * cpu: fix the privsep issue when offlining the cpu * Fix failed count for anti-affinity check * Add online migration for Instance.compute\_id * Online migrate missing Instance.compute\_id fields * Add compute\_id to Instance object * Add dest\_compute\_id to Migration object * Add compute\_id columns to instances, migrations * Populate ComputeNode.service\_id * Add integrated job to test the Nova RBAC old defaults * Enforce quota usage from placement when unshelving * Reproducer for bug 2003991 unshelving offloaded instance * Fixes a typo in availability-zone doc * Add debug logging when Instance raises OrphanedObjectError * [alembic] Alembic operations require keywords only arguments * tests: Use GreenThreadPoolExecutor.shutdown(wait=True) * testing: Use inspect.isfunction() to check signatures * CI: fix backport validator for new branch naming * Bump nova-ceph-multstore timeout * Enable use of service user token with admin context * Use force=True for os-brick disconnect during delete * Have host look for CPU controller of cgroupsv2 location * Fix get\_segments\_id with subnets without segment\_id * Revert "Debug Nova APIs call failures" * Add nova-tox-functional-py310 to gate jobs * Fix pep8 errors with new hacking * Save cell socket correctly when updating host NUMA topology * Reproduce bug 1995153 * Stop ignoring missing compute nodes in claims * Remove silent failure to find a node on rebuild * Remove focal job for 2023.2 * add hypervisor version weigher * db: Avoid relying on autobegin * tests: Add missing args to sqlalchemy.Table * tests: Pass parameters to sqlalchemy.text() as bindparams * db: Remove unnecessary 'insert()' argument * db: Don't rely on branched connections * doc: Update version info * Allow running functional-py311 * Update to the PTL guide * Process unlimited exceptions raised by unplug\_vifs * Verify a move operation for cross\_az\_attach=False * Fix a typo in this URL: https://docs.openstack.org/nova/latest/admin/availability-zones.html * mypy: Fix implicit optional usage * Add grenade-skip-level-always to nova * Make scheduler lazy-load the placement client * Update serial console example client for py3 * Imported Translations from Zanata * Update min support for Bobcat * Update master for stable/2023.1 27.0.0 ------ * Make nova-next reduce mysql memory * testing: Reset affinity support global variables * Add service version for Antelope * Add the 2023.1 Antelope prelude section * Doc: update live-migration cmd * fup for power management series * Add docs for cpu management * Bump jsonschema minimum to 4.0.0 * Use mysql memory reduction flags for ceph job * doc: mark the max microversion for 2023.1 Antelope * Fix wrong description about minimum values * Revert "Add logging to find test cases leaking libvirt threads" * Fix logging in MemEncryption-related checks * Enable cpus when an instance is spawning * libvirt: let CPUs be power managed * Add logging to find test cases leaking libvirt threads * cpu: interfaces for managing state and governor * Add docs for stable-compute-uuid behaviors * Fix 6.2 compute RPC version alias * fup: support evacuate target state * Stable compute uuid functional tests * Abort startup if nodename conflict is detected * Move comment about \_destroy\_evacuated\_instances() * Protect against a deleted node id file * Check our nodes for hypervisor\_hostname changes * db: Remove the legacy 'migration\_version' table * db: Remove legacy migrations * api: extend evacuate instance to support target state * compute: enhance compute evacuate instance to support target state * Detect host renames and abort startup * Fixup patch for stable-compute-uuid series * Make resource tracker use UUIDs instead of names * Add further workaround features for qemu\_monitor\_announce\_self * Check VMDK create-type against an allowed list * libvirt: Replace usage of compareCPU() with compareHypervisorCPU() * libvirt: At start-up rework compareCPU() usage with a workaround * Handle InstanceInvalidState exception * Factor out a mixin class for candidate aware filters * Persist existing node uuids locally * Add get\_available\_node\_uuids() to virt driver * Pass service ref to init\_host(), if exists * Add virt/node module for stable uuids * Clean up after ImportModulePoisonFixture * Bump openstack-placement version in functional tox env * Microversion 2.94: FQDN in hostname * Use new get\_rpc\_client API from oslo.messaging * libvirt: Add configuration options to set SPICE compression settings * Enable new defaults and scope checks by default * Fix huge-page doc * Remove basepython def from tox.ini * Split ignored\_tags in stats.py * FUP for the scheduler part of PCI in placement * Strictly follow placement allocation during PCI claim * Reproduce PCI pool filtering bug * Rename \_to\_device\_spec\_conf to \_to\_list\_of\_json\_str * Follow up for the PCI in placement series * Allow enabling PCI scheduling in Placement * Reproducer test of bug #1999674 * Add mock to avoid loading guestfs in unit test * Fix bug 2000069 * Adds a repoducer for post\_live\_migration\_at\_destination failures * Test multi create with PCI in placement * Test reschedule with PCI in placement * Support same host resize with PCI in placement * Support unshelve with PCI in placement * Support evacuate with PCI in placement * Support cold migrate and resize with PCI tracking in placement * Func test for PCI in placement scheduling * Store allocated RP in InstancePCIRequest * Remove use of removeprefix * Ironic: retry when node not available * Handle zero pinned CPU in a cell with mixed policy * Reproduce asym NUMA mixed CPU policy bug * doc: soft delete and shadow tables * Support multiple config file with mod\_wsgi * Unbind port when offloading a shelved instance * Don't provide MTU value in metadata service if DHCP is enabled * Adds check for VM snapshot fail while quiesce * Adds regression functional test for 1980720 * Temporary skip some volume detach test in nova-lvm job * Update gate jobs as per the 2023.1 cycle testing runtime * Make tenant network policy default to PROJECT\_READER\_OR\_ADMIN * Handle mdev devices in libvirt 7.7+ * Reproducer for bug 1951656 * Ironic nodes with instance reserved in placement * libvirt: fix typo in test\_config * Add a hacking rule for the setDaemon method * Bump minimum version of os-vif to 3.1.0 * Correct config help message related options * Test ceph-multistore with a real image * vmwareapi: Mark driver as experimental * hyperv: Mark driver as experimental * Add os\_vif in nova-config-generator * Update contributor guide for 2023.1 Antelope * record action log when deleting shelved instance * Enable glance location tests on ceph-multistore * Doc: Fix list servers detailed response param * DOC update remote console access * Correct doc references * Filter PCI pools based on Placement allocation * Make allocation candidates available for scheduler filters * Map PCI pools to RP UUIDs * Split PCI pools per PF * Support resource\_class and traits in PCI alias * Create RequestGroups from InstancePCIRequests * Improving logging at '\_allocate\_mdevs' * Correct wrong reference in docs * libvirt: Report ephemeral encryption traits based on imagebackend * libvirt: Add encryption support to qemu-img create command * [compute] always set instance.host in post\_livemigration * Remove deleted projects from flavor access list * update default overcommit * Adds a repoducer for post live migration fail * Remove the periodic Centos 8 job * Fix link to Cyborg device profiles API * Unit test exceptions raised duing live migration monitoring * fix typo in architecture document * Fix typos in nova docs * Update Availability zone doc page * Correct reST markup in config help string * requires os-traits >= 2.9.0 * Updated flavors doc * Imported Translations from Zanata * extend\_volume of libvirt/volume/fc should not use device\_path * Switch to 2023.1 Python3 unit tests and generic template name * Update master for stable/zed * Remove mentions of removed scheduler filters 26.0.0.0rc1 ----------- * db: Drop redundant indexes on instances and console\_auth\_tokens tables * Prelude section for Zed release * Update min supported service version for Zed * Bump min oslo.concurrencty to >= 5.0.1 * Update nova-manage doc page * Gracefully ERROR in \_init\_instance if vnic\_type changed * Reproduce bug 1981813 in func env * doc: mark the max microversion for zed * Update compute rpc version alias for zed * add header alingment for PrettyTable 3.4.0 * Doc follow up for PCI in placement * Follow up for the PCI in placement series * Add traits for viommu model * libvirt: Add vIOMMU device to guest * Skip UnshelveToHostMultiNodesTest in nova-multi-cell * Add API support for rebuilding BFV instances * Add missing descriptions in HACKING.rst * Add conductor RPC interface for rebuild * Add support for volume backed server rebuild * Add documentation and releasenotes for RBAC change * Handle "no RAM info was set" migration case * Generate request\_id for Flavor based InstancePCIRequest * Allow enabling PCI tracking in Placement * Handle PCI dev reconf with allocations * Heal allocation for same host resize * Fix rescue volume-based instance * Heal PCI allocation during resize * Heal missing simple PCI allocation in the resource tracker * Retry /reshape at provider generation conflict * Move provider\_tree RP creation to PciResourceProvider * Stop if tracking is disable after it was enabled before * Support [pci]device\_spec reconfiguration * Reject devname based device\_spec config * Ignore PCI devs with physical\_network tag * Reject mixed VF rc and trait config * Reject PCI dependent device config * Extend device\_spec with resource\_class and traits * Basics for PCI Placement reporting * Trigger reschedule if PCI consumption fail on compute * Reproduce bug 1986838 * Keep legacy admin behaviour in new RBAC * Add locked\_memory extra spec and image property * Remove system scope from all APIs * Revert "Test attached volume extend actions in the nova-next job" * Add VDPA support for suspend and livemigrate * Fix suspend for non hostdev sriov ports * Add source dev parsing for vdpa interfaces * Alphabetizes objects * Fix a deprecation warning about threading.Thread * doc: Update a PTL guide * Avoid n-cond startup abort for keystone failures * Unify placement client singleton implementations * nova-live-migration tests not needed for Ironic * Adapt websocketproxy tests for SimpleHTTPServer fix * enable blocked VDPA move operations * Add reno for fixing bug 1941005 * libvirt: Consolidate create\_cow\_image and create\_image * imagebackend: Add disk\_info\_mapping as an optional attribute of Image * blockinfo: Add encryption details to the disk\_info mappings when provided * virt: Add block\_device\_info helper to find encrypted disks * Fix a deprecation warning about distutils * Add limitation to docs about bug 1983570 * Fix exception catch when volume mount fails * Rename whitelist in tests * Rename exception.PciConfigInvalidWhitelist to PciConfigInvalidSpec * Rename [pci]passthrough\_whitelist to device\_spec * Add compute restart capability for libvirt func tests * Poison /sys access via various calls in test * Update RequestSpec.pci\_request for resize * Reproducer for bug 1983753 * update default numa allocation strategy * Imported Translations from Zanata * [doc] Fix BFV Rescue trait name in microversion * Remove double mocking... again * Adds img\_config\_drive in nova users docs * Remove workaround for SQLAlchemy < 1.4 * Transport context to all threads * For evacuation, ignore if task\_state is not None * block\_device: Add encryption attributes to image and ephemeral disks * block\_device: Add DriverImageBlockDevice to block\_device\_info * scheduler: Add an ephemeral encryption pre filter * virt: Add ephemeral encryption flag * compute: Update bdms with ephemeral encryption details when requested * BlockDeviceMapping: Add is\_local property * BlockDeviceMapping: Add encryption fields * image\_meta: Add ephemeral encryption properties * imagebackend: default by\_name image\_type to config correctly * libvirt: Remove defunct comment * libvirt: Improve creating images INFO log * block\_device\_info: Add swap to inline * Update libvirt enlightenments for Windows * Fix mocking SafeConnectedTestCase * Remove the PowerVM driver * hacking: force explicit import of python's mock * Remove double mocking * [docs] Fix mention of custom scheduling after Wallaby * Updated Suspend definition in server concepts doc * Use unittest.mock instead of third party mock * api: Drop generating a keypair and add special chars to naming * Add a workaround to skip hypervisor version check on LM * Modify the url of openstack client commands * add regression test case for bug 1978983 * [trivial] Simplify dict get call by removing unused default * Modify the command of getting serial console * Allow unshelve to a specific host (REST API part) * Allow unshelve to a specific host (Compute API part) * Move uwsgi-gmr reno to the proper place * Fix compatibility with jsonschema 4.x * Remove unused requirement * libvirt: Remove unnecessary TODO * libvirt: Ignore LibvirtConfigObject kwargs * Add a proper schema version to network\_data.json * libvirt: remove default cputune shares value * Remove use of pkg\_resources * etc: Highlight absence of packages from config gen * Test setting the nova job to centos-9-stream * Adds link in releasenotes for hw machine type bug * zuul: Put Centos9 Stream job periodic-weekly and experimental * Replace "db archive" with "db archive\_deleted\_raws" * update nova-next and nova-ovs-hybrid-plug to disable linuxbridge * Test attached volume extend actions in the nova-next job * Adds validation for hw machine type in host caps * Imported Translations from Zanata * ignore deleted server groups in validation * add repoducer test for bug 1890244 * Remove return from rpc cast * zuul: Temporarly put Centos9 Stream job non-voting * Make test\_wait\_for\_instance\_event\_\* test time independent * Optimize numa\_fit\_instance\_to\_host * Retry attachment delete API call for 504 Gateway Timeout * Change TooOldComputeService upgrade check code to failure * Optimize \_local\_delete calls by compute unit tests * docs: Correct path used for config on Hyper-V * Fix typos * Fix typos in help messages * Update [scheduler] limit\_tenants\_to\_placement\_aggregate config help * Add missing condition * Fix duplicates keys * Accept both 1 and Y as AMD SEV KVM kernel param value * libvirt: Add a workaround to skip compareCPU() on destination * neutron: Unbind remaining ports after PortNotFound * Fix race condition in \_get\_pci\_passthrough\_devices * Revert "zuul: Skip block migration with attached volumes tests due to bug #1931702" * trivial: fix deprecation warning in notification fixture * Fix extending non LUKSv1 encrypted volumes * Switch libvirt event timeout message to warning * Add Python 3.10 functional jobs * Adapt bindep ubuntu-jammy * Add releasenote about dropping pythin 3.6|7 support * Enable live\_migration\_events in nova-ovs-hybrid-plug * Allow claiming PCI PF if child VF is unavailable * Record SRIOV PF MAC in the binding profile * Log the exception returned from a cell during API.get() * Drop lower-constraints.txt and its testing * VMware: StableMoRefProxy for moref recovery * [CI] Install dependencies for docs target * Simulate bug 1969496 * Remove unavailable but not reported PCI devices at startup * Isolate PCI tracker unit tests * Fix LM rollback w/o multi port bindings extension * Reproduce live migration rollback w/o multi port bindings error * Fix segment-aware scheduling permissions error * Move centos stream testing to centos-9-stream * Deprecate [api] use\_forwarded\_for * VMware: Split out VMwareAPISession * db: Close connection on early return * enable locking test fixture * Update python testing as per zed cycle teting runtime * VMware: Early fail spawn if memory is not multiple of 4 * Store pf\_mac\_address and vf\_num in extra\_info * db: Resolve additional SAWarning warnings * db: Don't rely on autocommit behavior * db: Replace use of Column.copy() method * db: Remove inplicit coercion of SELECTs * db: Replace use of Connection.connect() method * db: Remove use of empty 'and\_()' * db: Replace use of strings in join, defer operations * db: Trivial rewrapping of warning filters * db: Narrow down deprecation warning filter * Add nova-emulation to the experimental queue too * pre-commit: Sync hacking version, bump plugins * hacking: Prevent use of six * Sync rootwrap.conf from oslo.rootwrap * Fix wrong attribute to find remote address * Imported Translations from Zanata * Fix pre\_live\_migration rollback * objects: Don't use generic 'Field' container * Retry in CellDatabases fixture when global DB state changes * objects: Remove 'NovaObjectDictCompat' from 'InstancePCIRequest' * objects: Remove 'NovaObjectDictCompat' from 'Migration' * doc: Remove useless contributor/api-2 doc * Fix unit tests when they are run with OS\_DEBUG=True * refactor: remove duplicated logic * Adds regression test for bug LP#1944619 * Fix the PCI device capability dict creation * Attempt to thin out nova-ceph-multistore * Move FIPS jobs to experimental and periodic queue * Update contributor guide for Zed * Add Python3 zed unit tests * Update master for stable/yoga * Changes Emulation CI to weekly-periodic 25.0.0 ------ * Revert "Adds regression test for bug LP#1944619" * Clean up when queued live migration aborted * Add functional tests to reproduce bug #1960412 * Add the Yoga prelude section * reenable greendns in nova * Fix migration with remote-managed ports & add FT * Follow up for unified limits * Enable unified limits in the nova-next job * Add grenade-skip-level irrelevant-files config * Move file system freeze after end of mirroring * mention the cycle highlights in the ptl guide * Fix unit test for oslo.context 4.1.0 * Lightos driver release note * Add oslo.limit options to generated nova.conf * Update compute rpc version alias for yoga * doc: mark the max microversion for yoga * zuul-job for Adds Pick guest CPU architecture based on host arch in libvirt driver support * Add volume-rebuild support to cinder module * Follow up for nova-manage image property commands * driver/secheduler/docs for Adds Pick guest CPU architecture based on host arch in libvirt driver support * object/notification for Adds Pick guest CPU architecture based on host arch in libvirt driver support * Complete phase-1 of RBAC community-wide goal * Separate flavor extra specs policy for server APIs * Modify remaining APIs as per RBAC new guidelines * Add reno for unified limits * Update quota apis with keystone limits and usage * Add legacy limits and usage to placement unified limits * Enforce resource limits using oslo.limit * Tell oslo.limit how to count nova resources * Update quota sets APIs * Update limit APIs * Update quota\_class APIs for db and api limits * Enforce api and db limits * Add logic to enforce local api and db limits * Make unified limits APIs return reserved of 0 * Assert quota related API behavior when noop * Add stub unified limits driver * Move keypair quota error message into exception * manage: Add image\_property commands * libvirt: Register defaults for undefined hw image properties * Update live\_migration\_downtime definition * Lightbits LightOS driver * Fix eventlet.tpool import * db: Retrieve VirtualInterface objects by UUID, not address * Server actions APIs scoped to project scope * Make more project level APIs scoped to project only * [nova/libvirt] Support for checking and enabling SMM when needed * Convert SYSTEM\_ADMIN|READER to Admin and system scope * Move rule\_if\_system() method to base test class * Test PROJECT\_ADMIN APIs with no legacy rule case * neutron: Allow to spawn VMs with port without IP address * trivial: Add a white space in an error message * VMware: Support volumes backed by VStorageObject * Support use\_multipath for NVME driver * Document remote-managed port usage considerations * Adds regression test for bug LP#1944619 * doc: Correct version for virtio-net multiqueue * doc: Split up notifications document * docs: Document virtio-net multiqueue * Gracefull recovery when attaching volume fails * [yoga] Add support for VNIC\_REMOTE\_MANAGED * Filter computes without remote-managed ports early * Add supports\_remote\_managed\_ports capability * Bump os-traits to 2.7.0 * Introduce remote\_managed tag for PCI devs * Join quota exception family trees * Fix to implement 'pack' or 'spread' VM's NUMA cells * [yoga] Include pf mac and vf num in port updates * Raise InstanceNotFound on fkey constraint fail saving info cache * Cleanup old resize instances dir before resize * Update announce self workaround opt description * Debug Nova APIs call failures * skip test\_tagged\_attachment in nova-next * docs: Follow-ups for cells v2, architecture docs * VmWare: Remove unused legacy\_nodename regex * Move 'hw:pmu', 'hw\_pmu' parsing to nova.virt.hardware * [yoga] Add PCI VPD Capability Handling * api: Reject duplicate port IDs in server create * Revert "Revert resize: wait for events according to hybrid plug" * Add nova-ovs-hybrid-plug job * tox: Install extras * Move optional build dependencies to 'extras' * requirements: Bump minimum testtools to 2.5.0 * Remove Python 2-specific imports * requirements: Remove os-xenapi * Add service version check workaround for FFU * Nova resize don't extend disk in one specific case * Add check job for FIPS * Move ReaderWriterLock to the test tree * Add fixtures to requirements * Remove deprecated opts from VNC conf * Update centos 8 py36 functional job nodeset to centos stream 8 * List auth plugin parameters for [keystone] section * Add wrapper for oslo.concurrency lockutils.ReaderWriterLock() * Test aborting queued live migration * libvirt: Add announce-self post live-migration workaround * Make the CellDatabases fixture work with fasteners >= 0.15 * Remove workaround for old python-ironicclient * Fill the AcceleratorRequestBindingFailed exception msg info * Re-add python 3.6 functional testing * functional: Add reproducer for #1907775 * Fill the exception msg * Log which instance event was timed out * [rt] Apply migration context for incoming migrations * Extend the reproducer for 1953359 and 1952915 * [doc] propose Review-Priority label for contribs * Reproduce bug 1953359 * Reattach mdevs to guest on resume * conf: Allow cinderclient and os\_brick to independently log at DEBUG * nova-next: Deploy noVNC from source instead of packages * nova-next: Drop NOVA\_USE\_SERVICE\_TOKEN from subnode * libvirt: Create qcow2 disks with the correct size without extending * block\_device: Ignore VolumeAttachmentNotFound during detach * Add regression test for bug #1937084 * libvirt: Ensure all volume drivers log the instance whenever possible * Migrate RequestSpec.numa\_topology to use pcpuset * Reproduce bug 1952941 * Revert project-specific APIs for servers * Allow per-context rule in error messages * Make API fixture pass roles * Deprecate the powervm driver * Updating tests with Yoga testing runtime * Retry image download if it's corrupted * api-ref: Adjust BFV rescue non-support note * docs: Update libvirt distro support matrix for Xena * Add 'hw:vif\_multiqueue\_enabled' flavor extra spec * db: Remove unnecessary warning filters * db: Remove use of 'bind' arguments * Catch an exception in power off procedure * db: Don't use legacy 'Row()' methods * db: Don't pass strings to 'Connection.execute' * db: Replace 'insert.inline' parameter with 'Insert.inline()' method * db: Replace use of legacy select() calling style * db: Replace use of 'autoload' parameter * db: Replace use of Executable.scalar(), Executable.execute() * tests: Enable SQLAlchemy 2.0 deprecation warnings * tests: Restore - don't reset - warning filters * Add debug log for scheduler weight calculation * Close Glance image if downloading failed * Log instance event wait times * Apply common irrelevant\_files for centos 8 job * Add autopep8 to tox and pre-commit * Update Interop doc * Fix interference in db unit test * Use ReplaceEngineFacade fixture * [Trival] Fix wrong microversion in TestClass name * Refactor Database fixture * Remove SESSION\_CONFIGURED global from DB fixture * Add a WA flag waiting for vif-plugged event during reboot * Enable min pps tempest testing in nova-next * Remove broken tempest-full-py3-opensuse15 job * db: Remove nova-network models * db: Remove models for removed services, features * objects: Remove 'bandwidth' fields from notifications * db: Remove models that were moved to the API database * Ignore plug\_vifs on the ironic driver * nova-manage: Always get BDMs using get\_by\_volume\_and\_instance * Reno for qos-minimum-guaranteed-packet-rate * Revert "Temp disable nova-manage placement heal\_allocation testing" * Temp disable nova-manage placement heal\_allocation testing * [nova-manage]support extended resource request * Fix unit test for oslo.concurrency 4.5 * Log failed metadata retrievals * Check Nova project changes with Tobiko scenario test cases * Clean up allocations left by evacuation when deleting service * Avoid unbound instance\_uuid var during delete * Query ports with admin client to get resource\_request * Fix instance's image\_ref lost on failed unshelving * ensure samples folder exists for microversion * Reproducer unit test for bug 1934094 * docs: Add new architecture guide * docs: Add a new cells v2 document * docs: Add notes about IoOpsWeigher, BuildFailureWeigher * db: Increase timeout for migration tests * objects: Stop querying the main DB for keypairs * db: Remove legacy placement models * db: Remove unused build\_requests columns * db: De-duplicate list of removed table columns * db: Enable auto-generation of API DB migrations * Prevent leaked eventlets to send notifications * Set "cache\_ok=True" in "TypeDecorator" inheriting classes * api: enable oslo.reports when using uWSGI * Define new functional test tox env for placement gate to run * Ensure MAC addresses characters are in the same case * zuul: Move live migration jobs back to voting * fup: Refactor and simplify Cinder fixture GET volume mock * fup: Move \_wait\_for\_volume\_{attach,detach} to os-volume\_attachments * compute: Update volume\_id within connection\_info during swap\_volume * Update min supported service version for Yoga * Add regression test for bug #1943431 * nova-manage: Ensure mountpoint is passed when updating attachment * Reproduce bug 1945310 * Store old\_flavor already on source host during resize * tools: Ignore bot-generated branch creation patches * db: Add migration to resolve shadow table discrepancies * tests: Silence noise from database tests * Reproduce bug 1944759 * tests: Address some nits with database migration series * tests: Walk database migrations in correct order * Fix the wrong exception used to retry detach API calls * Add missing \_\_init\_\_.py in nova/db/api * Update contributor guide for Yoga * Add Python3 yoga unit tests * Update master for stable/xena * Remove the code that never reach * Add section for 'nova-manage placement audit' tool 24.0.0.0rc1 ----------- * Add the Xena prelude section * docs: Add nova-volume volume\_attachment refresh admin workflow * Fix nova-manage db version * Reproduce bug/1943436 * Add more retries to TestMigrateFromDownHost tests * [doc] port-resource-request-groups not landed in Xena * fup: Print message logging uncaught nova-manage exceptions * Support Cpu Compararion on Aarch64 Platform * Allow 'bochs' as a display device option * Update compute rpc version alias for xena * Add --sleep option for archive\_deleted\_rows --until-complete * neutron: Remove references to dead 'qos-queue' extension * neutron: Rework how we check for extensions * Avoid excessive sleep in vmware unit test * Parse alias from domain hostdev * Convert features not supported error to HTTPBadRequest * [func test] move unshelve test to the proper place * Support interface attach / detach with new resource request format * workarounds: Remove rbd\_volume\_local\_attach * workarounds: Remove disable\_native\_luksv1 * Add two new hacking rules * [func test] refactor interface attach with qos * tests: Address nits for configurable-instance-hostnames series * Add force kwarg to delete\_allocation\_for\_instance * db: Handle parameters in DB strings * Remove module level caching * Support move ops with extended resource request * Expose the mdev class * Support boot with extended resource request * Provide and use other RCs for mdevs if needed * Provide the mdev class for every PCI device * VmWare: Use of id shadows built-in function * Vmware: Fix spelling in test * docs: admin/networking rename neutron\_tunneled to neutron\_tunnel * policy: Deprecate field from 'os-extended-server-attributes' policy * nova-manage: Introduce volume show, refresh, get\_connector commands * Add some missing parameters in docs of os-cells * Reproduce bug 1941005 * fup: Fix os-volume\_attachments api-ref parameters * Fix documentation about cpu topologies * api: Introduce microversion 2.89 adjusting os-volume\_attachments * api: Log actual number of UUIDs queried * compute: Query the service group API within check\_instance\_host * fup: Increase service\_down\_time beyond INITIAL\_REPORTING\_DELAY in test * conf: Remove deprecated aliases * tests: Merge 'test\_utils', 'test\_scheduler\_utils' * scheduler: Merge driver into manager * docs: Drop references to non-filter scheduler drivers * Avoid modifying the Mock class in test * address open redirect with 3 forward slashes * Transfer RequestLevelParams from ports to scheduling * Parse extended resource request from the port data * Add missing [oslo\_reports] options * [func test] ports with both bw and pps resources * [func test] refactor asserts in qos tests * [func test] refactor assertPortMatchesAllocation * Support same\_subtree in allocation\_canadidate query * Bump min placement microversion to 1.36 * Add same\_subtree field to RequestLevelParams * Reject server operations with extended resource req * Reject server create with extended resource req * Detect port-resource-request-groups neutron API extension * [func test] neutron fixture for extended resource request * [func test] move port creation to the NeutronFixture * [func test] create pps resource on OVS agent RP * [func test] move port resource request tests * Fix inactive session error in compute node creation * Prevent deletion of a compute node belonging to another host * workarounds: Add libvirt\_disable\_apic * fup: Remove unused legacy block\_device\_info format * fup: Move \_migrate\_stub into LibvirtMigrationMixin * fixtures: Add legacy APIs to cinder fixture * tests: Sanity check some tests * api: Remove unnecessary 'base\_' prefix from schemas * api: Pass request to 'addresses' view builder * api: Rename 'parameter\_types.hostname' -> 'fqdn' * VmWare: Fix deprecation warning in unit tests * VMWare: Use get\_hardware\_devices throughout * Fix 1vcpu error with multiqueue and vif\_type=tap * VMWare: Use WithRetrieval to get all results * libvirt: Abort live-migration job when monitoring fails * tests: Enable SADeprecationWarning warnings * db: Final cleanups * docs: Add documentation on database migrations * Accelerator smartnic SRIOV support * Invalidate provider tree when compute node disappears * Clear rebalanced compute nodes from resource tracker * Add functional regression test for bug 1853009 * fup: Assert state of connection\_info during LM rollback in func tests * fup: Make connection\_info returned by CinderFixture unique per attachment * fup: Mock all unix connectors in os-brick fixture * fup: Move OSBrickFixture into base.ServersTestBase * compute: Ensure updates to bdms during pre\_live\_migration are saved * Add a regression test for bug 1939545 * fake: Ensure need\_legacy\_block\_device\_info returns False * Revert "tox: Encode specific Python versions" * tests: Add test for bug #1936278 * db: Enable auto-generation of migrations * db: Integrate alembic * db: Normalize migrations tests * db: Trivial style changes * db: Add initial alembic migration for API DB * db: Add initial alembic migration for main DB * db: Avoid use of ALTER in initial migration * db: Post reshuffle cleanup * db: Move remaining 'nova.db.sqlalchemy' modules * db: Exclude the 'use\_db\_reconnect' option * db: Unify 'nova.db.api', 'nova.db.sqlalchemy.api' * db: Register database config options ourselves * Block servers with vGPU and device profile in heal\_allocations * func: Increase rpc\_response\_timeout in TestMultiCellMigrate tests * zuul: Mark live migration jobs as non-voting due to bug #1912310 * Change the admin-visible logs for mdev support * Add regression test for bug 1938326 * smartnic support - functional tests * smartnic support - reject server move and suspend * smartnic support - cleanup arqs * smartnic support - build instance with smartnic arqs * smartnic support - create arqs * smartnic support - new vnic type * Smartnic support - cyborg drive * Add functional test for bug 1937375 * fup: Move \_wait\_for\_volume\_attach into InstanceHelperMixin * compute: Avoid duplicate BDMs during reserve\_block\_device\_name * libvirt: Handle silent failures to extend volume within os-brick * Rename vgpu options to mdev * zuul: Increase GLANCE\_LIMIT\_IMAGE\_SIZE\_TOTAL for nova-lvm * Restore original time override in test\_archive\_task\_logs * Reduce mocking in test\_reject\_open\_redirect for compat * I2f9ad3df25306e070c8c3538bfed1212d6d8682f fup: add log * manager: Reduce unnecessary calls * tests: Remove 'stub\_out\_trusted\_certs' * trivial: Cleanup a comment about a now removed libvirt version check * Allow deletion of compute service with no compute nodes * Reproducer unit test for bug 1860312 * api: Improve extra spec validator help texts * use cinder v3 * Add tests for 'AggregateInstanceExtraSpecsFilter' * Add tests for 'AggregateImagePropertiesIsolation' * tox: Encode specific Python versions * Bump oslo.db to 10.0.0 * tests: Validate AZ values * doc: allow\_resize\_to\_same\_host needs nova-api restarted * Correct reno for archiving task\_log database records * Bump os-resource-classes to 1.1.0 * extend\_volume of libvirt/volume/iscsi should not use device\_path * tests: Remove unnecessary warnings filter * tests: Silence SQLAlchemy warnings * Fix request path to query a resource provider by uuid * Use neutronclient's port binding APIs * Make test\_archive\_task\_logs deterministic * Improve prep\_resize reschedule unit test * libvirt: Switch the default video model from 'cirrus' to 'virtio' * Add func test for nova-manage db archive\_deleted\_rows --before * docs: Replace 'nova boot' with 'openstack server create' * api: Handle invalid volume UUIDs during spawn * Remove the redundant code for attach interface * api: Align availability zone info with forced host * db: Drop support for experimental concurrency * db: Remove constant aliases from 'nova.db.api' * db: Move 'sqlalchemy.types' up a directory * db: Move main DB migrations * db: Move db.sqalchemy.migration to db.migration * db: Use module-level imports for sqlalchemy (for real) * Add logs when cannot fit numa * Fix oslo policy DeprecatedRule warnings * [ironic] Minimize window for a resource provider to be lost * scheduler: Merge 'FilterScheduler' into base class * trivial: Remove FakeScheduler (for realz) * scheduler: Remove 'hosts\_up' * scheduler: 'USES\_ALLOCATION\_CANDIDATES' removal cleanup * scheduler: Remove 'USES\_ALLOCATION\_CANDIDATES' * fix sr-iov support on Cavium ThunderX hosts * Make explicit the network backend used in the CI jobs * libvirt: Always delegate OVS plug to os-vif * Fix error '404 Not Found' * objects: Fix VIFMigrateData.supports\_os\_vif\_delegation setter * zuul: Add nova-tox-functional-centos8-py36 job * tests: Allow bindep and test-setup.sh to run on EL distros * Fix max cpu topologies with numa affinity * Test numa and vcpu topologies bug: #1910466 * db: Reintroduce validation of shadow table schema * Add --task-log option to nova-manage db archive\_deleted\_rows * Move 'check-cherry-picks' test to gate, n-v check * db: Clean up migration code * db: Synchronize function signatures * db: Copy docs from 'nova.db.\*' to 'nova.db.sqlalchemy.\*' * db: Remove 'nova.db.base' module * db: Fold in ForeignKey constraints * db: Fold in indexes * db: Use module-level imports for sqlalchemy * db: Remove unused DB methods * db: Remove 'nova.db.sqlalchemy.utils' * db: Remove dead code * Add test coverage for API version headers in CORS * gate: Remove test\_evacuate.sh * Allow X-OpenStack-Nova-API-Version header in CORS * Retry lvm volume and volume group query * Handle OPERATION\_FAILED error during detach * Add debug log for device detach libvirt error * Fix typos in minimum version policy docs * zuul: Skip block migration with attached volumes tests due to bug #1931702 * Revert "Removing mypy to fix the nova CI" * Move fake\_notifier impl under NotificationFixture * Test the NotificationFixture * Removing mypy to fix the nova CI * api: Log correct client IP if load balancer in use * docs: Explicitly state lack of support for node renaming * requirements: Add types-paramiko * libvirt: Set driver\_iommu when attaching virtio devices to SEV instance * libvirt: Do not destroy volume secrets during \_hard\_reboot * Remove unnecessary mocks from unit test * Make test\_refresh\_associations\_\* deterministic * Improve policy doc for supported scope info * Remove PROJECT\_ADMIN limitation from zero-disk and external-network policy * virt: Add destroy\_secrets kwarg to destroy and cleanup * Error anti-affinity violation on migrations * Deprecate filters that have been replaced by placement filters * docs: Remove duplicate ToC entry * Change nova doc after Freenode -> OFTC move * Fix RequestLevelParams persistence handling in RequestSpec * Add test coverage for security group checking * Honor [neutron]http\_retries in the manual client * hardware: Use image\_meta.id within get\_mem\_encryption\_constraint * zuul: Skip swap\_volume tests as part of nova-next * libvirt: Enable 'vmcoreinfo' feature by default * docs: Address TODO * Use NotificationFixture for legacy notifications too * Create a fixture around fake\_notifier * tests: Rename 'ImageBackendFixture' to 'LibvirtImageBackendFixture' * tests: Add os-brick fixture * tests: Move libvirt-specific fixtures * docs: Add releases to libvirt distro support matrix * image\_meta: Provide image\_ref as the id when fetching from instance * Describe the use of Review-Priority label * Use the new type HostDomainOpt * Change minversion of tox to 3.18.0 * Reject open redirection in the console proxy * Add regression test for bug #1928063 * tests: Split external service fixtures out * tests: Move remaining non-libvirt fixtures * tests: Move ConfFixture * tests: Create 'nova.tests.fixtures' module * Replace getargspec with getfullargspec * Stop leaking ceph df cmd in RBD utils * rbd: Get rbd\_utils unit tests running again * Fix typo in test\_utils * Adapt to SQLAlchemy 1.4 * Add unit test for importing ed25519 ssh key * libvirt: Delegate OVS plug to os-vif * Fix bond\_mode enum 802.1ad -> 802.3ad * zuul: Remove nova-dsvm-multinode-base * zuul: Replace grenade and nova-grenade-multinode with grenade-multinode * mypy: Add type annotations to 'nova.pci' * Update SRIOV port pci\_slot when unshelving * guestfs: With libguestfs >= v1.41.1 decode returned bytes to string * Consolidate device detach error handling * Move instance power state check to \_detach\_with\_retry * Drop support for SQLite < 3.7 * Remove references to 'sys.version\_info' * glance: Improve [glance]enable\_rbd\_download and associated options help text * docs: Document location of nova.conf files * docs: Add man pages for 'nova-policy' * vmware: Use oslo.vmware's get\_moref\_value() * libvirt: Remove dead error handling code * Follow up type hints for a634103 * Enable mypy on libvirt/guest.py * Move the guest.get\_disk test to test\_guest * Replace blind retry with libvirt event waiting in detach * Test SRIOV port move operations with PCI conflicts * vmware: Handle empty list attributes on vSphere objects * Update min supported service version for Xena * vmware: OptionValue.value cannot be Boolean * Explaining when a base image is considered unused to avoid confusion why some images aren't deleted * docs: Populate "See Also" section with meaningful links * docs: Document options for remaining nova-\* commands * [OVN] Explicitly set nova-next job to ML2/OVS * [OVN] Explicitly set grenade job to ML2/OVS * Update contributor guide for Xena * libvirt: Ignore device already in the process of unplug errors * Switch to new rolevar for run-tempest role * Trival change: spell error of Shelve * Remove unused DeleteFromSelect db api method * api: Reject requests to force up computes when \`done\` evacuation records exist * Add regression test for bug #1922053 * Neutron fixture: don't clobber profile and vif\_details if empty * docs: Correct typos in 'nova-manage cells\_v2' docs * docs: Correct typos in 'nova-manage db' docs * docs: Rewrite 'nova-manage libvirt' docs * docs: Rewrite 'nova-manage placement' docs * docs: Rewrite 'nova-manage api\_db', 'cell\_v2' docs * docs: Rewrite 'nova-manage db' docs to use proper directives * docs: Use proper headers in nova-manage man page * docs: Follow up for SEV doc * Bumping min os-brick ver to 4.3.1 * Follow up from bp/pci-socket-affinity series * tests: Remove duplicate policy tests * tests: Remove useless mocks * tests: Merge flavor tests * Remove references to 'inst\_type' * Remove (almost) all references to 'instance\_type' * tests: Speed up 'servers' API tests * Remove references to 'instance\_type' * Remove 'create\_db\_entry\_for\_new\_instance' * Add Python3 xena unit tests * [neutron] Get only ID and name of the SGs from Neutron * Update master for stable/wallaby 23.0.0.0rc1 ----------- * Fix check\_instance\_shared\_storage() call * Bump the Compute RPC API to version 6.0 * compute: Reject requests to commit intermediary snapshot of an inactive instance * docs: Clarify value for 'hw:cpu\_realtime' extra spec * doc: Use a non-numerical anchor when referencing microversions * [Arm64][libvirt] Fix read bytes from json files * Reset global wsgi app state in unit test * libvirt: Simplify device\_path check in \_detach\_encryptor * docs: Add libvirt misc doc * docs: Change formatting of hypervisor config guides * docs: Fold in MDS security flaw doc * docs: Clarify host-model, host-passthrough differences * docs: Add CPU models guide * docs: Add SEV guide * docs: Remove duplicated PCI passthrough extra spec info * docs: Remove duplicate TPM extra spec info * docs: Add a real-time guide * docs: Add a resource limits guide * Wallaby 23.0.0 prelude section * trivial: fix word duplication in api ref * zuul: Add nova-live-migration-ceph job * releasenotes: Fix typo * docs: Change 'add\_stylesheet' to 'add\_css\_file' * doc: mark the max microversion for wallaby * Add config parameter 'live\_migration\_scheme' to live migration with tls guide * Add missed accel\_uuids for \_poll\_shelved\_instances * Add release note for vDPA * tests: Add functional test for vDPA device * api: Block unsupported actions with vDPA * pci: Add vDPA vnic to PCI request mapping and filtering * libvirt: Deprecate \`live\_migration\_tunnelled\` * [OVN] Adapt the live-migration job scripts to work with OVN * docs: Document UEFI secure boot feature * libvirt: Add guest generation for vDPA * libvirt: Add vDPA nodedev parsing * Dynamically archive FK related records in archive\_deleted\_rows * tests: Remove duplicated 'start\_compute' helper * tests: Add functional tests for UEFI, secure boot * docs: Add note about rescuing bfv instances with the 2.87 microversion * libvirt: Move PCI host device parsing to 'host' * objects: Add 'VDPA' to 'PciDeviceType' * Support per port numa policies with SR-IOV * pci: implement the 'socket' NUMA affinity policy * add constants for vnic type vdpa * Make nova-ceph-multistore use policy.yaml * Retry on vmware create\_vm when it fails * fakelibvirt: make kB\_mem default not laughable * libvirt: Report secure boot support to scheduler * libvirt: Wire up 'os\_secure\_boot' property * libvirt: Use firmware metadata files to configure instance * setup.cfg: Resolve warning * libvirt: Move some host arch checks to guest checks * libvirt: Add parsing of firmware metadata files * docs: Add version{added,changed} notes to the hw\_machine\_type guide * pci: track host NUMA topology in stats * pci manager: replace node\_id parameter with compute\_node * scheduler: Translate secure boot requests to trait * compute: Report COMPUTE\_SECURITY\_UEFI\_SECURE\_BOOT * hardware: Start parsing 'os\_secure\_boot' * trivial: Clarify purpose of 'Host.supports\_\*' properties * libvirt: Add missing type hints * libvirt: Stop passing around virt\_type, caps * libvirt: Add tests for insecure UEFI configuration * nova-next: Start testing the q35 machine type * Differentiate between InstanceNotFound and ConstraintNotMet * Add device event constants to fakelibvirt * libvirt: Add firmware auto-configuration support * Placeholders for DB migration backports to Wallaby * libvirt: Add support for virtio-based input devices * tests: Stop mocking 'nova.virt.libvirt.utils.get\_arch' * Add functional test for bug 1837995 * libvirt: Allow disabling CPU flags via \`cpu\_model\_extra\_flags\` * Remove VFSLocalFS * Remove non-libguestfs file injection for libvirt * apidb: Compact Train database migrations * apidb: Compact Stein database migrations * apidb: Compact Rocky database migrations * apidb: Compact Queens database migrations * apidb: Compact Pike database migrations * apidb: Compact Ocata database migrations * docs: Add admin docs for configuring and updating machine types * libvirt: Add a config update workflow test for [libvirt]hw\_machine\_type * Fix ImageBackendFixture not to support direct\_snapshot * nova-status: Add hw\_machine\_type check for libvirt instances * nova-manage: Add libvirt list\_unset\_machine\_type command * nova-manage: Add libvirt update\_machine\_type command * nova-manage: Add libvirt get\_machine\_type command * console: Improve logging * hyper-v rbd volume support * requirements.txt: Bump os-brick to 4.2.0 * libvirt: Deprecate disable\_native\_luksv1 and rbd\_volume\_local\_attach * rpc: Rework 'get\_notifier', 'wrap\_exception' * libvirt: Remove dead code * Replace md5 for fips * Remove short call timeout from func test * scheduler: Add 'socket' PCI NUMA affinity policy trait * objects: Add 'socket' PCI NUMA affinity * Enable mypy on scheduler/report.py * Turn on mypy for nova/network/neutron.py * Follow up for QoS interface attach * Docs: Correct \`\`Password injection using the dashboard\`\` Explanation * libvirt: parse alias out from device config * libvirt: allow querying devices from the persistent domain * libvirt: add AsyncDeviceEventsHandler * libvirt: Define and emit DeviceRemovedEvent and DeviceRemovalFailedEvent * libvirt: start tracking NUMACell.socket for hosts * libvirt: Parse the 'os' element from domainCapabilities * tests: Poison os.uname * tests: Remove unnecessary mock * tests: Add 'nova.virt.libvirt.utils.get\_arch' stub to fixture * FUP: Catch and reraise routed nets exception * Add a routed networks scheduler pre-filter * Add net & utils methods for routed nets & segments * Handle instance = None in \_local\_delete\_cleanup * Fixes the elapsed time logged during a live migration * libvirt: add IP address to libvirt guest metadata * Add regression test for bug 1914777 * Change API unexpected exception message * tox: Add passenv DISABLE\_CHERRY\_PICK\_CHECK to pep8 * libvirt: Record the machine\_type of instances in system\_metadata * vmware: Handle folder renames in rescue cmd * Run nova-live-migration job with neutron-trunk enabled * nova-next: Drop NOVA\_USE\_SERVICE\_TOKEN as it is now True by default * Centralize sqlite FK constraint enforcement * libvirt: Don't drop CPU flags with policy='disable' from guest XML * Make a couple test jobs run async devstack * Remove unused Instance method * Support interface attach with QoS ports * Uncap PrettyTable * apidb: Compact Newton database migrations * apidb: Compact Mitaka database migrations * apidb: Compact Liberty database migrations * apidb: Add manage.py script * cmd: Remove 'nova-manage db ironic\_flavor\_migration' * conf: Clean up docs for scheduler options * conf: Drop '[scheduler] periodic\_task\_interval' * Drop support for custom schedulers * fup: Merge duplicate volume attachment checks * api: Reject volume attach requests when an active bdm exists * Add regression test for bug #1908075 * Remove \_\_unicode\_\_() from nova unit test Exception * Refactor and rename test\_tcp\_rst\_no\_compute\_rpcapi * Rename ensure\_network\_metadata to amend requested\_networks * Add requested\_networks field to RequestSpec object * cmd: Remove 'nova-manage db null\_instance\_uuid\_scan' * libvirt: Always enable USB controller on PPC64 * libvirt: Stub out 'nova.virt.libvirt.utils.get\_arch' * [ironic] correct capabilities for disk images supported * zuul: Mark nova-lvm as voting * zuul: Increase SWIFT\_LOOPBACK\_DISK\_SIZE within nova-lvm job * Correct test error * [FUP] this change removes an unused paramater * docs: Move the LibvirtDistroSupportMatrix wiki page into our docs * Restart glance after updating policy in job * Disallow CONF.compute.max\_disk\_devices\_to\_attach = 0 * glance: Remove [glance]/allowed\_direct\_url\_schemes * libvirt: Use specific user when probing encrypted rbd disks during extend * tox: Enable parallel docs build * libvirt: Rationalize attachment of USB tablet * api: Reject requests to detach a volume when the compute is down * Add regression test for bug #1909120 * Fix ForbiddenWithAccelerators to HTTPForbidden for shelve API * libvirt: Remove 'hypervisor\_version' from 'libvirt\_info' * zuul: Allow encryption tests in nova-ceph-multistore again * libvirt: Bump MIN\_{LIBVIRT,QEMU}\_VERSION and NEXT\_MIN\_{LIBVIRT,QEMU}\_VERSION * libvirt: Drop support for Xen * libvirt: Drop support for UML * libvirt: Remove MIN\_LIBVIRT\_VIR\_ERR\_DEVICE\_MISSING * docs: Add reference docs for internal block device structures * libvirt: Load and cache volume drivers on-demand * libvirt: Stop NVMe and ScaleIO unit tests from calling os-brick * zuul: Skip test\_attach\_cloned\_encrypted\_volume in nova-ceph-multistore * add openstack-python3-wallaby-jobs-arm64 job * Add generate schemas tool * Do not inherit os\_glance properties on snapshot * Refactor ResourceRequest constructor * Enable mypy on scheduler/utils.py * Refactor update\_pci\_request\_spec\_with\_allocated\_interface\_name * Factor out port resource deallocation * Make remove allocation symmetric with add allocation * Allow extending an existing instance allocation * Refactor \_claim\_pci\_device\_for\_interface\_attach to prepare for qos * Move query param generation to RequestGroup o.vo * api: Log os-resetState as an instance action * api-ref: Clarify 'all\_tenants' command * Cyborg shelve/unshelve support * Reuse code from oslo lib for JSON policy migration * use psycopg2 binary instead of source package * api: Add support for 'hostname' parameter * utils: Remove periods from instance hostnames * Fix typo in warning message * libvirt: Mock get\_arch during some stable rescue unit tests * tests: Clean up 'test\_hypervisors' * api: Drop statistics-style fields from os-hypervisors * api: Normalize exception handling for os-hypervisors * db: Compact Train database migrations * db: Compact Stein database migrations * db: Compact Rocky database migrations * db: Compact Queens database migrations * db: Compact Pike database migrations * db: Compact Ocata database migrations * db: Compact Newton database migrations * db: Compact Mitaka database migrations * db: Compact Liberty database migrations * db: Compact Kilo database migrations * db: Compact Juno database migration * db: Compact Icehouse database migrations * db: Drop "remove" of index * db: Merge in unique constraints * db: Clean up base migration * tests: Remove 'test\_extended\_hypervisors' * Use the non polling notification waiter in func test * Remove dead code from SchedulerReportClient * Improving the description for unshelve request body * api-ref: Add notes about volume attach and detach being async * Run the db migration tests in the same test worker * Trival Change: Remove redundant code in instance delete * Revert "nova-multi-cell: Skip test\_cold\_migrate\_unshelved\_instance" * stabilize set\_host libvirt driver unit test * stabilize unit test asserting log * Fallback to same-cell resize with qos ports * Reproduce bug 1907522 in functional test * only wait for plugtime events in pre-live-migration * tests: Merge 'test\_hypervisor\_status' into 'test\_hypervisors' * Functional tests removed direct post call * Refactoring of functional.regression.test\_bug\_1702454 * Initialize global data separately and run\_once in WSGI app init * Fix a deprecation warning of 'collections' * Add a description in the PTL guide * Remove six.text\_type (2/2) * Remove six.text\_type (1/2) * Remove six.binary\_type/integer\_types/string\_types * libvirt: make cross cell resize spawn from snapshot image * nova-grenade-multinode: Skip test\_live\_block\_migration\_paused * Disable qos resize tempest test for nova-multi-cell job * Enhance simple usage API documentation * nova-evacuate: Remove leftover playbook from standalone job * lower-constraints: Bump packaging to 20.4 * tools: Allow check-cherry-picks.sh to be disabled by an env var * Remove outdated comment from tox.ini * Omit resource inventories from placement update if zero * Docs: correct cpu\_thread\_policy explanation * nova-multi-cell: Skip test\_cold\_migrate\_unshelved\_instance * libvirt: check for AMD SEV only on x86-64 * libvirt: Skip encryption metadata lookups if secret already exists on host * Revert "zuul: Skip test\_attach\_scsi\_disk\_with\_config\_drive in nova-ceph-multistore" * pci: Improve testing of 'nova.pci.request' module * pci: Add logging for filtering * pci: Improve docs for '\_filter\_\*' functions * nova-live-migration: Disable \*all\* virt services during negative tests * zuul: Add devstack-plugin-ceph-compute-local-ephemeral to experimental * compute: Don't detach volumes when RescheduledException raised without retry * zuul: Skip test\_attach\_scsi\_disk\_with\_config\_drive in nova-ceph-multistore * pci: Add a '\_filter\_pools' helper function * doc: Fix rendering in the PTL guide * Support sys.argv in wsgi app * Fix config option default value for sample config file * virt: Remove 'get\_all\_bw\_counters' API * Make PCI claim NUMA aware during live migration * virt: Remove 'reset\_network' API * Fix invalid argument formatting in exception messages * Fix the instance\_uuid for test * Revert "Temporarily disable parts of heal port allocation test" * virt: Remove 'change\_instance\_metadata' API * Add missing exception * api-ref: Move 'os-agents' API to obsolete section * doc: require openstack client change for every new API microversion * Ignore PCI devices with 32bit domain * Reproduce bug 1897528 * [Trivial]Add missing white space in the log message * doc: Update the PTL guide * Restore retrying the RPC connection to conductor * [docs] Fix a placement client's command * functional: Wait for revert resize to complete * Add functional-py39 testing * Update contributor guide for Wallaby * Remove [database]connection defaulting * Improve error handling during service level check * Fix unplugging VIF when migrate/resize VM * Add os-volume\_attachments reference docs * functional: Expand SR-IOV live migration tests with NUMA * functional: Add live migration tests for PCI, SR-IOV servers * Remove compute service level check for qos ops * rbd: Only log import failures when the RbdDriver is used * zuul: Reintroduce nova-dsvm-multinode-base * Remove six.byte2int/int2byte * Remove six.iteritems/itervalues/iterkeys * Remove six.moves * zuul: Replace nova-live-migration with zuulv3 jobs * zuul: Merge nova-evacuate into nova-multinode-live-migration * Fix the compute RPC 5.12 issue * Bump the lowest eventlet version to 0.26.1 * Add a regression test for 5.12 compute API issue * Add upgrade check about old computes * Set instance host and drop migration under lock * Reproduce bug 1896463 in func env * Use \_evacuate\_server helper in func test * Prevent starting services with older than N-1 computes * Remove unused bindir config option * Replace md5 with oslo version * Modify glance's copy\_image permission for nova-ceph-multistore * tox: Stop linting as part of docs target * releasenotes: Add fixes note for bug #1882521 * libvirt: Only ask tpool.Proxy to autowrap vir\* classes * docs: Make JSON valid * trivial: Remove irrelevant comment * [doc]: Fix glance image\_metadata link * Add regression test for bug #1899835 * Use subqueryload() instead of joinedload() for (system\_)metadata * Ignore InstanceNotFound exception when host rejoin cluster * Add placeholder migrations for Victoria backports * Handle disabled CPU features to fix live migration failures * Fix the vGPU dynamic options race * Update pci stat pools based on PCI device changes * CinderFixture: Return a driver\_type of \`fake\` within connection\_info * Remove self.server dependency form \_evacuate\_server * Add regression test for bug #1899649 * Fix virsh domifstat to get vhostuser vif statistics * Prevent archiving of pci\_devices records because of 'instance\_uuid' * hardware: Check inventory of shared CPUs for 'mixed' policy * functional: Add test for #1898272 * functional: Add tests for mixed CPU policy * objects: Fix issue in exception type * Clean up the DynamicVendorData constructor * Use cell targeted context to query BDMs for metadata * Allow excluding image type capabilities * Include removal of ephemeral backing files in the image cache manager * compute: Lock by instance.uuid lock during swap\_volume * optimize the shelve code flow * Follow up for cherry-pick check for merge patch * libvirt: Only add a USB controller if it's necessary * libvirt: Remove support for '[libvirt] use\_usb\_tablet' * libvirt: 'video.vram' property must be an integer * libvirt: Increase incremental and max sleep time during device detach * libvirt: Log exception when unable to import rbd or rados * bindep: Install python3 and python3-devel on CentOS 8 and Fedora * libvirt: Remove MIN\_{LIBVIRT,QEMU}\_PMEM\_SUPPORT * libvirt: Remove MIN\_LIBVIRT\_VIDEO\_MODEL\_VERSIONS * nova-evacuate: Disable libvirtd service and sockets during negative tests * Temporarily disable parts of heal port allocation test * Add a hacking rule for assert\_has\_calls * Fix a hacking test * libvirt: Remove MIN\_LIBVIRT\_BETTER\_SIGKILL\_HANDLING * libvirt: Remove MIN\_{LIBVIRT,QEMU}\_NATIVE\_TLS\_VERSION * Add reproducer for bug #1894095 * Add Python3 wallaby unit tests * Update master for stable/victoria 22.0.0 ------ * zuul: Introduce nova-evacuate * Log stack trace when fails launching a defined domain * libvirt: make mdev types name attribute be optional * Test for disabling greendns * libvirt: Remove MIN\_LIBVIRT\_FILE\_BACKED\_DISCARD\_VERSION * libvirt: Bump MIN\_{LIBVIRT,QEMU}\_VERSION and NEXT\_MIN\_{LIBVIRT,QEMU}\_VERSION * test\_evacuate.sh: Stop using libvirt-bin * hyperv: Configures chassis asset tags for VMs * test\_evacuate.sh: Support libvirt-bin and libvirtd systemd services * releasenote: Add known issue for bug #1894804 * doc: Fix rendering in the accelerator support doc * doc: correct the link to user/flavor.rst * fakelibvirt: Use versionutils to set min versions found in the driver * Victoria 22.0.0 prelude section * compute: Skip cinder\_encryption\_key\_id check when booting from volume * Add regression test for bug #1895696 * docs: Improve 'AggregateImagePropertiesIsolation' docs * tests: Remove '\_FakeImageService' * functional: Add and use 'GlanceFixture' * functional: Enable real policy fixture by default * api: Set min, maxItems for server\_group.policies field * tests: Add regression test for bug 1894966 * remove python warnning from tox * Use absolute path during qemu img rebase * test\_evacuate: Wait until subnode is down before starting tests * fixtures: Handle binding of first port * functional: Add test for SR-IOV neutron ports * conductor: Don't use setattr * Correctly disable greendns * Update compute rpc version alias for victoria * Remove write\_to\_file * doc: mark the max microversion for victoria * Allow tap interface with multiqueue * objects: Remove Agent, AgentList * api: Remove 'os-agents' API * virt: Remove 'get\_per\_instance\_usage' API * virt: Remove various aggregate APIs * virt: Remove 'set\_bootable' API * virt: Remove 'post\_interrupted\_snapshot\_cleanup' API * virt: Remove 'get\_console\_pool\_info' API * trivial: Final cleanup * objects: Remove 'XenapiLiveMigrateData' * xenapi: Remove Xen-only code paths * virt: Remove 'is\_xenapi' helper * xenapi: Remove driver and tests * trivial: Remove useless comment * Support old & new versions of svm and vmx traits * Cleanup unused code * Follow up for I67504a37b0fe2ae5da3cba2f3122d9d0e18b9481 * Spell out 3pp in vmware undeprecation reno * Fix error that cannot overwrite policy rule for 'forced\_host' * Support SRIOV interface attach and detach * functional: Add cold migration tests for PCI servers * functional: Clean up PCI tests * functional: Use tempdir for CONF.instances\_path * functional: Add 'start\_compute' helper * Fix invalid assert\_has\_calls * Track error migrations in resource tracker * [Trivial] Replace ref of policy.json to policy.yaml * docs: Remove resize, cold migration from vTPM limitations * Migrate default policy file from JSON to YAML * Add support for resize and cold migration of emulated TPM files * Set 'old\_flavor', 'new\_flavor' on source before resize * Expand generic reproducer for bug #1879878 * Undeprecate the vmwareapi driver * Set migrate\_data.vifs only when using multiple port bindings * add functional regression test for bug #1888395 * Use UUID as vif and network\_id in vif tests * doc: Fix note directives * Run voting jobs in gate pipeline also * Add type hints to 'nova.virt.libvirt.utils' * Handle oslo.messaging ping endpoint * Raise 409 when removing security group from instance * Adapt the admin guide to describe the direct RBD export * vmware: Use cookiejar from oslo.vmware client directly * Move revert resize under semaphore * Move confirm resize under semaphore * Remove no-op fake calls from network\_info tests * Add note and daxio version to the vPMEM document * post live migration: don't call Neutron needlessly * func: Remove references to attachment\_id when volume\_id is used * libvirt: Remove '[vnc] keymap', '[spice] keymap' options * vmware tests: Support different moref backend representations * Describe '[glance]/enable\_rbd\_download' * Reject resize operation for accelerator * Remove support for Intel CMT events * trivial: Use constants for libvirt version checks * Only unplug vif after the device is detached from libvirt * Move equality check into LibvirtConfigGuestInterface * Remove unused vpn param from allocate\_for\_instance * Delete non tag related device metadata during detach * privsep: Add support for recursive chown, move\_tree operations * Don't unset Instance.old\_flavor, new\_flavor until necessary * libvirt: Deprecate support for non-QEMU/KVM backends * Revert "Handle Neutron errors in \_post\_live\_migration()" * libvirt: Use local variable instead of CONF * Update image\_base\_image\_ref during rebuild * Cyborg evacuate support * Provider Config File: Coding style and test cases improvement * tools: Remove xenserver tooling * docs: Remove references to XenAPI driver * hacking: Stop special casing 'plugins.xenserver' * virt: Highlight soon-to-be-unused driver APIs * Add ability to download Glance images into the libvirt image cache via RBD * doc: Update references to image properties * Default user\_id when not specified in check\_num\_instances\_quota * Add regression test for bug 1893284 * Add a lock to prevent race during detach/attach of interface * functional: Provide default 'host\_info' for '\_get\_connection' * Change default num\_retries for glance to 3 * vmware: Handle exception in destroy with attached volumes * functional: Don't inherit from 'ProviderUsageBaseTestCase' * Provider Config File: Enable loading and merging of provider configs * Avoid invalid file name, preventing git clone on win32 * Ensure source compute is up when confirming a resize * releasenotes: Detail support for server ops with vTPM * rbd: Move rbd\_utils out of libvirt driver under nova.storage * Add type hints to 'nova.compute.manager' * Provider Config File: Functions to merge provider configs to provider tree * Set different VirtualDevice.key * docs: Add docs for vTPM support * libvirt: Add emulated TPM support to Nova * tests: Add helpers for rebuild, cold migrate, and shelve/unshelve * libvirt: Provide VIR\_MIGRATE\_PARAM\_PERSIST\_XML during live migration * Add generic reproducer for bug #1879878 * Add reno for deprecated APIs policy changes * api: Reject non-spawn operations for vTPM * docs: fix aggregate weight multiplier property names * Few todo fixes for API new policies * db: fix database migrations when name includes dash * tests: Add reproducer for bug #1879878 * functional: Drop '\_api' suffix from placement fixture * functional: Move single-use function to its caller * libvirt: Add libvirt version mocks in test\_private\_destroy\_\* tests * libvirt: Fix indentation in test\_\_video\_model\_supported * Add checks for volume status when rebuilding * libvirt: Do not reference VIR\_ERR\_DEVICE\_MISSING when libvirt is < v4.1.0 * Fix indentation nits * [goal] Prepare for job migration to Ubuntu Focal (20.04) * zuul: use the new barbican simple-crypto job * Pass the actual target in FIP policy * Add new default roles in FIP policies * Pass the actual target in networks policy * Add new default roles in networks policies * Add scope and new default roles in extensions policies * Pass the actual target in baremetal nodes policy * Add new default roles in baremetal nodes policies * libvirt: Mock LoopingCallBase.\_sleep in rbd tests * libvirt: Pass context, instance to '\_create\_guest' * tests: Add helpers for suspend, resume and reboot of server * Remove six.reraise * Remove six.add\_metaclass * Remove six.PY2 and six.PY3 * Introduce scope\_types in FIP policy * Add test coverage of FIP policies * Rebase qcow2 images when unshelving an instance * Remove deprecated scheduler filters * Detach is broken for multi-attached fs-based volumes * fakelibvirt: Remove nova-network remnants * [Trivial] Remove wrong format\_message() conversion * Fix FIP policy for admin\_or\_owner * Remove dead volume driver code * Removes the delta file once image is extracted * Provider Config File: Function to further validate and retrieve configs * Add lsscsi to bindep * Handle Neutron errors in \_post\_live\_migration() * zuul: Start to migrate nova-live-migration to zuulv3 * Lookup nic feature by PCI address * doc: Add IPv6 metadata address * Add regression test for bug 1879787 * func: Introduce a server\_expected\_state kwarg to InstanceHelperMixin.\_live\_migrate * func: Add CinderFixture to \_IntegratedTestBase * compute: Don't delete the original attachment during pre LM rollback * Fix lower-constraints conflicts * Fix misleading documentation for live\_migration\_inbound\_addr * hardware: Reject requests for no hyperthreads on hosts with HT * tests: Add reproducer for bug #1889633 * Remove warning filters for legacy Python version * trivial: Remove unused attributes * Removed the host FQDN from the exception message * Provider Config File: YAML file loading and schema validation * compute: Validate a BDMs disk\_bus when provided * compute: As cellsv1 is no more fold \_detach\_volume back into detach\_volume * Add regression tests for bug #1889108 * func: Add live migration rollback volume attachment tests * Pass the actual target in volumes policy * Handle multiple 'vcpusched' elements during live migrate * Add new default roles in volumes policies * Make \_rebase\_with\_qemu\_img() generic * libvirt: Remove blockjob.end == 0 workaround resolved in libvirt v2.3.0 * Add test coverage of extensions policies * Add new default roles in hosts policies * Introduce scope\_types in hosts policy * Add test coverage of hosts policies * scheduler: Default request group to None * Use compression by default for 'SshDriver' * Pass the actual target in security\_groups policy * Add new default roles in security\_groups policies * Introduce scope\_types in volumes policy * Add test coverage of volumes policies * Pass the actual target in tenant networks policy * Add new default roles in tenant networks policies * Introduce scope\_types in tenant networks policy * Add test coverage of tenant networks policies * Introduce scope\_types in networks policy * Add test coverage of networks policies * Introduce scope\_types in security\_groups policy * Add test coverage of security\_groups policies * trivial: Test object backporting against correct version * Delete ARQs by UUID if Cyborg ARQ bind fails * QEMU/KVM: accept vmxnet3 NIC * objects: Update keypairs when saving an instance * resolve ResourceProviderSyncFailed issue * libvirt: Handle VIR\_ERR\_DEVICE\_MISSING when detaching devices * Add new default roles in multinic policies * Introduce scope\_types in multinic policy * Add test coverage of multinic policies * Fix multinic policy for admin\_or\_owner * libvirt: Use better variable names, types for '\_create\_guest' * libvirt: Remove workaround for really old QEMU * libvirt: Re-enable live snapshot for paused instances * test: add some notes and new tests for mixed instance * release note: add new features introduces by use-pcpu-and-vcpu-in-one-instance * metadata: export the vCPU IDs that are pinning on the host CPUs * hardware: create 'mixed' instance for realtime CPUs * hardware: Enable 'hw:cpu\_dedicated\_mask' for creating a mixed instance * compute: bump nova-compute version and check in API * Correct the check\_str and pass actual target in FIP pools policy * libvirt: Track blockjob status in DEBUG when polling for completion * libvirt: Remove two-level loop * Introduce scope\_types in FIP pools * Add test coverage of FIP pools policies * Introduce scope\_types in baremetal node * Add test coverage of baremetal nodes policies * Remove broken legacy zuul jobs * tests: Make '\_IntegratedTestBase' subclass 'PlacementInstanceHelperMixin' * tests: Add 'PlacementHelperMixin', 'PlacementInstanceHelperMixin' * tests: Remove 'test\_servers.ServersTestBase' * tests: Define constants in '\_IntegratedTestBase' * tests: Move single use constants to their callers * tests: Rename tests for '\_create\_guest\_with\_network' * manager: Prevent compute startup on invalid vTPM config * crypto: Add support for creating, destroying vTPM secrets * scheduler: Request vTPM trait based on flavor or image * libvirt: Add vTPM config support * libvirt: Simplify '\_create\_domain' function * scheduler: mixed instance CPU resource translating * Calculate the CPU usage for mixed instance * Validate CPU pinning configuration for mixed instance * libvirt: apply mixed instance CPU policy * hardware: refactor the code for CPU policy sanity check * objects: Introduce the 'CPUAllocationPolicy.MIXED' enum * notifications: add the pcpuset info to instance numa cell payload * Make our ceph job test with glance in multistore mode * objects: Introduce 'pcpuset' field for InstanceNUMACell * Move image verification and writing out of download method * doc: Correct typo * Replace assertItemsEqual with assertCountEqual * Reduce gen conflict in COMPUTE\_STATUS\_DISABLED handling * zuul: remove legacy-tempest-dsvm-neutron-dvr-multinode-full * Repro gen conflict in COMPUTE\_STATUS\_DISABLED handling * Document nova in tree virt drivers * test\_hardware: code formating for multiline construct * hardware: Tweak the 'cpu\_realtime\_mask' handling slightly * hardware: Allow 'hw:cpu\_realtime\_mask' to be omitted * Improve documentation of (unpin|pin)\_cpus\_with\_siblings() * crypto: Add type hints * compute: Do not allow rescue attempts using volume snapshot images * virt: Add 'context', drop 'network\_info' parameters for 'unrescue' * utils: Move 'get\_bdm\_image\_metadata' to nova.block\_device * Add traits for new AVX512 CPU feature * Fix mock for 'autospec' and 'new' together * catch libvirt exception when nodedev not found * Implement extend\_volume for libvirt NFS volume driver * Fix user creation with GRANT in MySQL 8.0(Ubuntu Focal) * Snapshot: offload glance upload in a native thread * Limit the number of concurrent snapshots * Extend is\_ipv6\_supported() to cover more error cases * Fix cherry-pick check for merge patch * Prevent libvirt driver starting on non Linux platform * Correct reported system memory * Remove deprecated nova.image.download hook * Raise InstanceMappingNotFound if StaleDataError is encountered * Make libvirt able to trigger a backend image copy when needed * Plumb image import functionality through our glance module * hardware: Remove '\_numa\_fit\_instance\_cell\_with\_pinning' * hardware: Invert order of NUMA topology generation * Switch from unittest2 compat methods to Python 3.x methods * Remove lxml deprecated methods * Delete resource provider in tree by top-down traversable order * Guard against missing image cache directory * hardware: Rework 'get\_realtime\_constraint' * hardware: Remove handling of pre-Train compute nodes * hardware: Don't consider overhead CPUs for unpinned instances * objects: Replace 'cpu\_pinning\_requested' helper * tests: Split instance NUMA object tests * hardware: Add validation for 'cpu\_realtime\_mask' * libvirt: Add typing information * hardware: Update and correct typing information * libvirt: ensure disk\_over\_commit is not negative * Remove hacking rules for python 2/3 compatibility * Check cherry-pick hashes in pep8 tox target * Update interop repo link * Fix pdf-docs build failing for TeX over capacity * Remove unused function parameters * Remove unnecessary retrieval of Migration object * libvirt: Don't allow "reserving" file-backed memory * libvirt: Mark e1000e VIF as supported * Use 'Exception.\_\_traceback\_\_' for versioned notifications * Add reproducer for bug #1881455 * libvirt: Remove unused host parameters * libvirt: Remove MIN\_\*\_INTERFACE\_MTU, MIN\_\*\_TX\_QUEUE\_SIZE * libvirt: Remove MIN\_LIBVIRT\_ETHERNET\_SCRIPT\_PATH\_NONE * libvirt: Remove workaround for unsupported libvirt version * Remove hooks * hacking: Modify checks for translated logs * trivial: Remove log translations * Cap jsonschema 3.2.0 as the minimal version * libvirt: Don't delete disks on shared storage during evacuate * Add functional test for bug 1550919 * zuul: Make devstack-plugin-ceph-tempest-py3 a voting check job again * [Nova] Add reference to Placement installation guide * Fix configure() called after DatabaseAtVersion fixture * Cleanup libvirt test\_mount unit tests * Add missing test bindep on openssl * Remove eventlet hub workaround for monotonic clock * zuul: Make barbican-simple-crypto-devstack-tempest a non-voting check job * Remove compatibility check from migrate\_data * Remove inject\_file from compute manager and virt driver * Fix an erroneous grammar in explanatory notes * hardware: Raise useful error for invalid mempage size * Remove unused object\_compat decorator * docs: Remove subtitles, metadata from man pages * [Trivial]Add missing white spaces between words in log messages * Fix pygments\_style * Add admin doc information about image cache resource accounting * rbd\_utils: increase \_destroy\_volume timeout * Reserve DISK\_GB resource for the image cache * Bump hacking min version to 3.1.0 * Add packages required for pdf-docs run to bindep.txt * Add link to PDF document * trivial: Remove remaining '\_LI' instances * trivial: Remove remaining '\_LW' instances * trivial: Remove remaining '\_LE' instances * Make quotas respect instance\_list\_per\_project\_cells * Switch to newer openstackdocstheme and reno versions * tox: Integrate mypy * compute: Remove snapshot quiesce tests for STOPPED and SUSPENDED instances * compute: Allow snapshots to be created from PAUSED volume backed instances * Bump hacking min version to 3.0.1 * Poison netifaces.interfaces() in tests * libvirt: Remove MIN\_LIBVIRT\_MULTIATTACH * Suppress remaining policy warnings in unit tests * Moving functional jobs to Victoria testing runtime * objects: Add MigrationTypeField * replace the "hide\_hypervisor\_id" to "hw:hide\_hypervisor\_id" * docs: Resolve issue with deprecated extra specs * remove support of oslo.messaging 9.8.0 warning message * Silence amqp heartbeat warning * Fix aggregate placement sync issue * Wait for all servers to be active when testing vGPUs * config: Explicitly register 'remote\_debug' CLI opts * Fix ut error on Aarch64(And other non-x86 platform) * objects: Add migrate-on-load behavior for legacy NUMA objects * Support for --force flag for nova-manage placement heal\_allocations command * docs: Add evacuation pre-conditions around the src host * Follow-up for NUMA live migration functional tests * Remove monotonic usage * Don't show upgr note for policy validation in V * Add py38 package metadata * doc: Fix list rendering in cli/nova-status.rst * Add nested resource providers limit for multi create * Add nova-status upgrade check and reno for policy new defaults * Add an online migration for PciDevice.uuid * Modify PciDevice.uuid generation code * Test multi create with vGPUs * Update contributor guide for Victoria * Fix list rendering in the accelerator support doc * zuul: Switch to the Zuulv3 grenade job * Remove translation sections from setup.cfg * Imported Translations from Zanata * Switch to TOX\_CONSTRAINTS\_FILE * Add placeholder migrations for Ussuri backports * Add Python3 victoria unit tests * Update master for stable/ussuri 21.0.0.0rc1 ----------- * FUP: Amend ussuri prelude to add docs for policy concepts * Add docs and releasenotes for BP policy-defaults-refresh * Ussuri 21.0.0 prelude section * Remove stale nested backport from InstancePCIRequests * Revert "Temporarily skip TestNovaMigrationsMySQL" * docs: Add stable device rescue docs * Allocate mdevs when resizing or reverting resize * Add new default roles in remaining servers policies * Introduce scope\_types in remaining servers Policies * Add test coverage of existing remaining servers policies * Add new default roles in servers attributes policies * Introduce scope\_types in servers attributes Policies * Remove Babel requirement * images: Make JSON the default output format of calls to qemu-img info * Fix follow up comments on policy work * fup: Fix [workarounds]/rbd\_volume\_local\_attach config docs * Fix server actions to be system and project scoped * Use oslo policy flag to disable default change warning instead of all * Add test coverage of existing server attributes policies * Add new default roles in servers policies * Introduce scope\_types in servers Policies * Add missing white spaces between words in log messages * Add test coverage of existing server policies * Fix servers policy for admin\_or\_owner * Pass the actual target in flavor access policy * Pass the actual target in quota class policy * Add new default roles in quota class policies * Update compute rpc version alias for ussuri * Add new default roles in server group policies * libvirt:driver:Disallow AIO=native when 'O\_DIRECT' is not available * Pass the actual target in flavor extra specs policy * Add new default roles in flavor extra specs policies * Introduce scope\_types in flavor extra spec policy * Add test coverage of existing flavor extra spec policies * Add new default roles in quota sets policies * Introduce scope\_types in quota set Policies * Add test coverage of existing quota sets policies * fix scsi disk unit number of the attaching volume when cdrom bus is scsi * Use placement stable version for functional job * doc: mark the max microversion for ussuri * doc: Fix term mismatch warnings in glossary * Pass the actual target in server external events policy * Pass the actual target in server group policy * Introduce scope\_types in quota class Policies * Add test coverage of existing quota class policies * Add new default roles in server external events policies * Pass the target in os-services APIs policy * Add new default roles in os-evacuate policies * Pass allocations to virt drivers when resizing * [Trivial] FUP: addressed comments in support non-admin filter instances * Pass the actual target in keypairs policy * Add new default roles in keypairs policies * Introduce scope\_types in keypairs * Add test coverage of existing keypairs policies * Add new default roles in shelve server policies * Introduce scope\_types in shelve server * Add test coverage of existing shelve policies * libvirt: Change UEFI check to handle AArch64 better * Functional test with pGPUs * Support different vGPU types per pGPU * libvirt: Calculate disk\_over\_committed for raw instances * fup: Add missing docstrings from get\_rescue\_device|bus diskinfo funcs * Temporarily skip TestNovaMigrationsMySQL * api: Allow custom traits * fup: Remove the use of the term \`unstable rescue\` INFO logs * fup: Combine SUPPORTED\_DEVICE\_BUS and SUPPORTED\_STORAGE\_BUSES * libvirt: Break up get\_disk\_mapping within blockinfo * libvirt: Support boot from volume stable device instance rescue * compute: Extract \_get\_bdm\_image\_metadata into nova.utils * api: Introduce microverion 2.87 allowing boot from volume rescue * compute: Report COMPUTE\_RESCUE\_BFV and check during rescue * libvirt: Add support for stable device rescue * virt: Provide block\_device\_info during rescue * Pass the actual target in os-aggregates policy * Add new default roles in os-aggregates policies * Pass the actual target in os-console-auth-tokens policy * Add new default roles in os-console-auth-tokens policies * Add new default roles in tenant tenant usage policies * FUP: add missing test for PUT volume attachments API * Reset the cell cache for database access in Service * Add new default roles in server password policies * Follow-up for flavor-extra-spec-validators series * docs: Add documentation for flavor extra specs * api: Add microversion for extra spec validation * Drop concept of '?validation' parameter * api: Add support for new cyborg extra specs * api: Add framework for extra spec validation * Convert delete\_on\_termination from string to boolean * Separate update and swap volume policies * Introduce scope\_types in server topology * Provide the parent pGPU when creating a new vGPU * Add new default roles in server topology policies * Add test coverage of existing server topology policies * fup: Add removal TODOs for disable\_native\_luksv1 and rbd\_volume\_local\_attach * Support live migration with vpmem * partial support for live migration with specific resources * Correct server topology policy check\_str * Correct server shelve policy check\_str * Add new default roles in server tags policies * Introduce scope\_types in server tags policy * Add test coverage of existing server tags policies * Fix server tags policy to be admin\_or\_owner * workarounds: Add option to locally attach RBD volumes on compute hosts * workarounds: Add option to disable native LUKSv1 decryption by QEMU * Fix new context comparison workaround in base tests class * Disable the policy warning temporary * Pass the actual target in os-flavor-manage policy * Add new default roles in os-flavor\_manage policies * Introduce scope\_types in os-flavor-manage * Pass the actual target in server migration policy * Add new default roles in server migration policies * Introduce scope\_types in server migration * Add test coverage of existing server migrations policies * Add test coverage of existing flavor\_manage policies * Introduce scope\_types in simple tenant usage * Add new default roles in suspend server policies * Introduce scope\_types in suspend server * Add test coverage of existing suspend server policies * Fix resume server policy to be admin\_or\_owner * Add test coverage of existing simple tenant usage policies * Introduce scope\_types in server password policy * Add test coverage of existing server password policies * Add new default roles in server metadata policies * Introduce scope\_types in server metadata * Add test coverage of existing server metadata policies * Fix server metadata policy to be admin\_or\_owner * Fix server password policy to be admin\_or\_owner * Add new default roles in security group policies * Allow versioned discovery unauthenticated * Repro bug 1845530: versioned discovery is authed * Stabilize functional tests * Add release notes for Cyborg-Nova integration * Introduce scope\_types in server group policy * Add test coverage of existing server group policies * Introduce scope\_types in server external events * Pass the actual target in limits policy * Add new default roles in limits policies * Introduce scope\_types in limits policy * Add test coverage of existing server external events policies * Introduce scope\_types in security groups policy * Add test coverage of existing security groups policies * Correct security groups policy check\_str * Pass the actual target in server diagnostics policy * Add test coverage of existing limits policies * Support for nova-manage placement heal\_allocations --cell * Allow PUT volume attachments API to modify delete\_on\_termination * Fix assertEqual param order in Accelerator tests * Add new default roles in server diagnostics policies * Introduce scope\_types in server diagnostics * Add test coverage of existing server diagnostics policies * Add new default roles in remote console policies * Combine the limits policies in single place * libvirt: Remove QEMU\_VERSION\_REQ\_SHARED * images: Remove Libvirt specific configurable use from qemu\_img\_info * libvirt: Always provide the size in bytes when calling virDomainBlockResize * Don't recompute weighers' minval/maxval attributes * Add new default roles in rescue server policies * Introduce scope\_types in rescue server policy * Add test coverage of existing rescue policies * Introduce scope\_types in remote consoles policy * Add test coverage of existing remote console policies * Pass the actual target in unlock override policy * Pass the actual target in migrate server policy * Add new default roles in migrate server policies * Introduce scope\_types in migrate server * Add info about affinity requests to the troubleshooting doc * Add new default roles in lock server policies * Pass the actual target in migrations policy * Add new default roles in migrations policies * Add new default roles in pause server policies * Introduce scope\_types in pause server policy * Add test coverage of existing pause server policies * Add test coverage of existing lock server policies * Add cyborg tempest job * Block unsupported instance operations with accelerators * Bump compute rpcapi version and reduce Cyborg calls * Fix unpause server policy to be admin\_or\_owner * Introduce scope\_types in list migrations * Add test coverage of existing migrations policies * Add test coverage of existing migrate server policies * Correct limits policy check\_str * Pass the actual target in os-hypervisors policy * Introduce scope\_types in os-hypervisors * Add test coverage of existing hypervisors policies * Pass the actual target in os-agents policy * Add new default roles in os-hypervisors policies * Add new default roles in os-agents policies * Feature matrix: update AArch64 information * Fix unlock server policy to be admin\_or\_owner * Pass the actual target in os-instance-usage-audit-log policy * Add new default roles in os-instance-usage-audit-log policies * FUP for Add a placement audit command * Add instance actions v284 samples test * Add new default roles in os-ips policies * Introduce scope\_types in os-ips * Add test coverage of existing ips policies * Fix os-ips policy to be admin\_or\_owner * Enable and use COMPUTE\_ACCELERATORS trait * Expose instance action event details out of the API * Add default cpu model for AArch64 * Introduce scope\_types in os-instance-usage-audit-log * Add test coverage of existing instance usage log policies * Update scheduler instance info at confirm resize * Reproduce bug 1869050 * libvirt: Use virDomainBlockCopy to swap volumes when using -blockdev * [Community goal] Update contributor documentation * Enable start/stop of instances with accelerators * Enable hard/soft reboot with accelerators * Delete ARQs for an instance when the instance is deleted * Add transform\_image\_metadata request filter * libvirt: Use domain capabilities to get supported device models * Remove future imports * NUMA LM: Add func test for bug 1845146 * Functional tests for NUMA live migration * tests: work around malformed serial XML * func tests: move \_run\_periodics() into base class * [Trivial] fixing some nits in instance actions policy tests * libvirt: Remove VIR\_DOMAIN\_BLOCK\_REBASE\_RELATIVE flag check * Compose accelerator PCI devices into domain XML in libvirt driver * Pass accelerator requests to each virt driver from compute manager * Create and bind Cyborg ARQs * Add Cyborg device profile groups to request spec * ksa auth conf and client for Cyborg access * nova-live-migration: Only stop n-cpu and q-agt during evacuation testing * Store instance action event exc\_val fault details * Make serialize\_args handle exception messages safely * Increase code reuse in test\_numa\_servers * libvirt: Fix unit test error block info on non x86 architecture * Add config option for neutron client retries * nova-live-migration: Ensure subnode is fenced during evacuation testing * Add new default roles in os-instance-actions policies * Add new default roles in os-flavor-access policies * Add service version check for evacuate with qos * Add service version check for live migrate with qos * Enable unshelve with qos ports * Support unshelve with qos ports * Bump python-subunit minimum to 1.4.0 * Introduce scope\_types in os-flavor-access * Add test coverage of existing flavor\_access policies * Switching new default roles in os-volumes-attachments policies * bug-fix: Reject live migration with vpmem * Refine and introduce correct parameters for test\_get\_guest\_config\_numa\_host\_instance\_topo\_cpu\_pinning * Ensures that COMPUTE\_RESOURCE\_SEMAPHORE usage is fair * Follow-ups for host\_status:unknown-only policy rule * Fix intermittently failing regression case * nova-live-migration: Wait for n-cpu services to come up after configuring Ceph * libvirt: Use oslo.utils >= 4.1.0 to fetch format-specific image data * libvirt: Correctly resize encrypted LUKSv1 volumes * virt: Pass request context to extend\_volume * images: Allow the output format of qemu-img info to be controlled * images: Move qemu-img info calls into privsep * Non-Admin user can filter their instances by more filters * Handle flavor disk mismatches when resizing * Cleanup test for system reader and reader\_or\_owner rules * vif: Remove dead code * Run sdk functional tests on nova changes * Deprecate the vmwareapi driver * Use fair locks in resource tracker * trivial: Use 'from foo import bar' * libvirt: don't log error if guest gone during interface detach * [Trivial] Fix code comment of admin password tests * nit: Fix NOTE error of fatal=False * Lowercase ironic driver hash ring and ignore case in cache * Add new default roles in os-atttach-inerfaces policies * trivial: Rename directory for os-keypairs samples * Fix os-keypairs pagination links * Introduce scope\_types in os-instance-action policy * Validate id as integer for os-aggregates * Introduce scope\_types in os-aggregates policy * Introduce scope\_types in os-volumes-attachments policy * Add test coverage of existing os-volumes-attachments policies * Fix os-volumes-attachments policy to be admin\_or\_owner * Catch exception when use invalid architecture of image * Introduce scope\_types in os-create-backup * Add test coverage of existing create\_backup policies * Fix os-create-backup policy to be admin\_or\_owner * Introduce scope\_types in os-console-output * Add test coverage of existing console\_output policies * Introduce scope\_types in os-deferred\_delete * Add a tests to check when legacy access is removed * Add new default roles in os-admin-password policies * Introduce scope\_types in os-admin-password * Add test coverage of existing os-instance-actions policies * Correct the actual target in os-instance-actions policy * Add new default roles in os-create-backup policies * Add new default roles in os-console-output policies * Add new default roles in os-deferred\_delete policies * Fix os-console-output policy to be admin\_or\_owner * Stop using PlacementDirect * Introduce scope\_types in os-attach-interfaces * Add test coverage of existing attach\_interfaces policies * Introduce scope\_types in os-console-auth-tokens * Remove oslo\_db.sqlalchemy.compat reference * libvirt: Remove native LUKS compat code * hyper-v: update support matrix * functional: Avoid race and fix use of self.api within test\_bug\_1831771 * Add test coverage of existing deferred\_delete policies * Fix os-os-deferred-delete policy to be admin\_or\_owner * Remove old policy enforcement in attach\_interfaces * Introduce scope\_types in os-agents policy * Add test coverage of existing os-console-auth-tokens policies * Pass the actual target in os-availability-zone policy * Ensure we pass a target in admin actions * Fix two test cases that use side effects in comprehensions * Add new default roles in Admin Action API policies * Pass the actual target in os-assisted\_volume\_snapshots policy * Add new default roles in os-assisted\_volume\_snapshots policies * Introduce scope\_types in os-assisted\_volume\_snapshots policy * Add test coverage of existing os-assisted\_volume\_snapshots policies * Fix os-attach-interfaces policy to be admin\_or\_owner * Add test coverage of existing os-agents policies * Define Cyborg ARQ binding notification event * Fix H702 pep8 error with latest hacking * libvirt: Provide the backing file format when creating qcow2 disks * Unplug VIFs as part of cleanup of networks * Name Enums * Remove unnecessary parentheses * Functional test for UnexpectedDeletingTaskStateError * Avoid allocation leak when deleting instance stuck in BUILD * Fix hypervisors paginted collection\_name * Enforce os-traits/SUPPORTED\_STORAGE\_BUSES sync * libvirt: Report storage bus traits * trivial: Update '\_get\_foo\_traits' docstrings * Follow-up: Add delete\_on\_termination to volume-attach API * libvirt: Check the guest support UEFI * Avoid PlacementFixture silently swallowing kwargs * trivial: Use recognized extra specs in tests * Use tempest-full-py3 as base job * docs: Improve documentation on writing custom scheduler filters * conf: Deprecate '[scheduler] driver' * trivial: Remove FakeScheduler * nova-net: Remove unused parameters * nova-net: Remove unused nova-network objects * nova-net: Remove unnecessary exception handling, mocks * Remove 'nova.image.api' module * Introduce scope\_types in os-evacuate * Add test coverage of existing evacuate policies * Reject boot request for unsupported images * Absolutely-non-inheritable image properties * Add JSON schema and test for network\_data.json * Support large network queries towards neutron * Add new default roles in os-availability-zone policies * Introduce scope\_types in os-availability-zone * Add test coverage of existing availability-zone policies * Correct os-availability-zone policy check\_str * Monkey patch original current\_thread \_active * Allow TLS ciphers/protocols to be configurable for console proxies * Skip to run all integration jobs for policy-only changes * set default value to 0 instead of '' * Clean up allocation if unshelve fails due to neutron * Add test coverage of existing os-aggregates policies * Reproduce bug 1862633 * Add test coverage of existing admin\_password policies * Fix instance.hidden migration and querying * Remove universal wheel configuration * trivial: Remove 'run\_once' helper * trivial: Merge unnecessary 'NovaProxyRequestHandlerBase' separation * libvirt: Rename \_is\_storage\_shared\_with to \_is\_path\_shared\_with * Don't error out on floating IPs without associated ports * Deprecate base rules in favor of new rules * trivial: Bump minimum version of websockify * trivial: Fetch 'Service' objects once when building AZs * trivial: Remove unused 'cache\_utils' APIs * remove DISTINCT ON SQL instruction that does nothing on MySQL * Minor improvements to cell commands * Avoid calling neutron for N networks * Handle neutron without the fip-port-details extension * Address release note nits for cpu-resources series * hardware: Add TODO to remove '(un)pin\_cpu\_with\_siblings' * Add retry to cinder API calls related to volume detach * Handle unset 'connection\_info' * Enable live migration with qos ports * Use common server create function for qos func tests * Remove extra instance.save() calls related to qos SRIOV ports * docs: Fix the monkeypatching of blockdiag * tests: Validate huge pages * Recalculate 'RequestSpec.numa\_topology' on resize * Add a placement audit command * Use COMPUTE\_SAME\_HOST\_COLD\_MIGRATE trait during migrate * Make RBD imagebackend flatten method idempotent * Avoid fetching metadata when no subnets found * zuul: Add Fedora based jobs to the experimental queue * libvirt: Add a default VirtIO-RNG device to guests * Remove remaining Python 2.7-only dependencies * nova-net: Update API reference guide * Func test for failed and aborted live migration * functional: Stop setting Flavor.id * Remove unused code * functional: Add '\_create\_server' helper * Make removal of host from aggregate consistent * Clarify fitting hugepages log message * Add ironic hypervisor doc * Fix typos for update\_available\_resource reference * nova-net: Remove layer of indirection in 'nova.network' * nova-net: Remove unnecessary 'neutronv2' prefixes * nova-net: Remove unused exceptions * functional: Add '\_delete\_server' to 'InstanceHelperMixin' * functional: Add unified '\_(build|create)\_flavor' helper functions * functional: Add unified '\_build\_server' helper function * nova-net: Kill it * Add NovaEphemeralObject class for non-persistent objects * pre-commit: Use Python 3 to run checks * nova-net: Remove now unnecessary nova-net workaround * Add a workaround config toggle to refuse ceph image upload * Fix typos in nova doc * doc: define boot from volume in the glossary * Update Testing NUMA documentation * nova-net: Remove dependency on nova-net from fake cache * nova-net: Add TODOs to remove security group-related objects * nova-net: Remove 'MetadataManager' * nova-net: Remove final references to nova-network * nova-net: Copy shared utils from nova-net module * nova-net: Remove firewall support (pt. 3) * Use Placement 1.35 (root\_required) * Fix the suppress of policy deprecation warnings * Fix excessive runtime of test test\_migrate\_within\_cell * libvirt: avoid cpu check at s390x arch * downgrade when host does not support capabilities * nova-net: Remove firewall support (pt. 2) * nova-net: Remove firewall support (pt. 1) * Report trait 'COMPUTE\_IMAGE\_TYPE\_PLOOP' * Fix duplicated words issue like "during during boot time" * Add missing parameter vdi\_uuid in log message * [Trivial]Fix typo instnace * Handle cell failures in get\_compute\_nodes\_by\_host\_or\_node * Fix an invalid assertIsNotNone statement * Add description of live\_migration\_timeout\_action option * [api-ref] Fix the incorrect link * FUP to Iff8194c868580facb1cc81b5567d66d4093c5274 * FUP for docs nits in cross-cell-resize series * Use graceful\_exit=True in ComputeTaskManager.revert\_snapshot\_based\_resize * Plumb graceful\_exit through to EventReporter * Fix accumulated non-docs nits for cross-cell-resize series * Add cross-cell resize tests for \_poll\_unconfirmed\_resizes * Implement cleanup\_instance\_network\_on\_host for neutron API * Simplify FinishResizeAtDestTask event handling * Add sequence diagrams for cross-cell-resize * Flesh out docs for cross-cell resize/cold migrate * Enable cross-cell resize in the nova-multi-cell job * Add cross-cell resize policy rule and enable in API * Remove 'nova-xvpvncproxy' * Print help if nova-manage subcommand is not specified * FakeDriver: adding and removing instances on live migration * docs: Add note about an image signature validation limitation when using rbd * Add api for instance action details * FUP for in-place numa rebuild * Ensure source service is up before resizing/migrating * Fix race in test\_create\_servers\_with\_vpmem * Move common test method up to base class * Func test for qos live migration reschedule * Fix get\_request\_group\_mapping doc * Support live migration with qos ports * Zuul v3: use devstack-plugin-nfs-tempest-full * Add recreate test for bug 1855927 * FUP: Remove noqa and tone down an exception * nova-net: Correct some broken VIF tests * nova-net: Remove nova-network security group driver * nova-net: Remove 'is\_neutron\_security\_groups' function * nova-net: Convert remaining unit tests to neutron * Use reasonable name for provider mapping * DRY: Build ImageMetaPropsPayload from ImageMetaProps * api-ref: avoid mushy wording around server.image description * Sync ImageMetaPropsPayload fields * Move \_update\_pci\_request\_spec\_with\_allocated\_interface\_name * Revert "(Temporarily) readd bare support for py27" * db: Remove unused ec2 DB APIs * Create instance action when burying in cell0 * Do not reschedule on ExternalNetworkAttachForbidden * libvirt: flatten rbd image during cross-cell move spawn at dest * Support cross-cell moves in external\_instance\_event * Add functional test for anti-affinity cross-cell migration * Add test\_resize\_cross\_cell\_weigher\_filtered\_to\_target\_cell\_by\_spec * Add CrossCellWeigher * Add archive\_deleted\_rows wrinkle to cross-cell functional test * Confirm cross-cell resize while deleting a server * Refresh target cell instance after finish\_snapshot\_based\_resize\_at\_dest * Add functional cross-cell revert test with detached volume * Revert cross-cell resize from the API * Add revert\_snapshot\_based\_resize conductor RPC method * Flesh out RevertResizeTask.rollback * Add RevertResizeTask * Add finish\_revert\_snapshot\_based\_resize\_at\_source compute method * Deal with cross-cell resize in \_remove\_deleted\_instances\_allocations * Add revert\_snapshot\_based\_resize\_at\_dest compute method * Confirm cross-cell resize from the API * Add confirm\_snapshot\_based\_resize conductor RPC method * Follow up to I5b9d41ef34385689d8da9b3962a1eac759eddf6a * Don't hardcode Python versions in test * Keep pre-commit inline with hacking and fix whitespace * Move \_get\_request\_group\_mapping() to RequestSpec * trivial: Remove dead code * nova-net: Remove db methods for ProviderMethod * nova-net: Remove unused 'stub\_out\_db\_network\_api' * Add resource provider allocation unset example to troubleshooting doc * trivial: Resolve (most) flake8 3.x issues * Add troubleshooting doc about rebuilding the placement db * support pci numa affinity policies in flavor and image * Do not mock setup net and migrate inst in NeutronFixture * Extend NeutronFixture to handle multiple bindings * Revert "nova shared storage: rbd is always shared storage" * nova-net: Convert remaining API tests to use neutron * nova-net: Drop nova-network-base security group tests * Create a controller for qga when SEV is used * Also enable iommu for virtio controllers and video in libvirt * Switch to uses\_virtio to enable iommu driver for AMD SEV * libvirt: Remove MIN\_{LIBVIRT,QEMU}\_FILE\_BACKED\_VERSION * libvirt: Remove MIN\_QEMU\_FILE\_BACKED\_DISCARD\_VERSION * Optimization for nova-api \_checks\_for\_create\_and\_rebuild * Disable NUMATopologyFilter on rebuild * Nix os-server-external-events 404 condition * Add ConfirmResizeTask * Imported Translations from Zanata * Fix Typo mistake in documentation of "host aggregates in nova" * Remove dead code from MigrationTask.\_execute * Restore test\_minbw\_allocation\_placement in nova-next job * Use provider mappings from Placement (mostly) * Remove dict compat from populate\_filter\_properties * Remove now invalid cells v1 comments from conductor code * functional: Make '\_IntegratedTestBase' subclass 'InstanceHelperMixin' * functional: Remove 'api' parameter * functional: Remove 'get\_invalid\_image' * functional: Unify '\_build\_minimal\_create\_server\_request' implementations * functional: Unify '\_wait\_until\_deleted' implementations * Fup for I63c1109dcdb9132cdbc41010654c5fdb31a4fe31 * Block rebuild when NUMA topology changed * Tie requester\_id to RequestGroup suffix * refactor: RequestGroup.is\_empty() and .strip\_zeros() * Use Placement 1.34 (string suffixes & mappings) * nova-net: Remove SG tests that don't apply to neutron * Skip test\_minbw\_allocation\_placement in nova-next job * Skip cpu comparison on AArch64 * Introduce scope\_types in Admin Actions * Add test coverage of existing admin\_actions policies * Handle ServiceNotFound in DbDriver.\_report\_state * Remove unused rootwrap filters * Add new default roles in os-services API policies * Add QoS tempest config so bw tests run * nova-net: Remove use of legacy 'SecurityGroup' object * Cache security group driver * nova-net: Remove use of legacy 'Network' object * nova-net: Remove use of legacy 'FloatingIP' object * libvirt: Remove MIN\_LIBVIRT\_KVM\_AARCH64\_VERSION * Extend NeutronFixture to allow live migration with ports * Make the binding:profile handling consistent in NeutronFixture * VMware: disk\_io\_limits settings are not reflected when resize * api-guide: flesh out the server actions section * nova-net: Remove remaining nova-network quotas * docs: Clarify configuration steps for PF devices * add [libvirt]/max\_queues config option * Add a way to exit early from a wait\_for\_instance\_event() * Reusable RequestGroup.add\_{resource|trait} * Process requested\_resources in ResourceRequest init * nova-net: Flatten class hierarchy for neutron SG tests * xenapi: Remove vestigial nova-network support * zvm: Remove vestigial nova-network support * vmware: Remove vestigial nova-network support * hyperv: Remove vestigial nova-network support * libvirt: Remove vestigial nova-network support * libvirt: Remove 'enable\_hairpin' * nova-net: Remove final references to nova-net from functional tests * docs: Blast final references to nova-network * nova-net: Remove references to nova-net service from tests * Follow up I18d73212f9d98bc75974a024cf6fd872fdfb1ca4 * nova-net: Make the security group API a module * requirements: Limit hacking to one minor version * Switch to hacking 2.x * Integrate 'pre-commit' * nova-net: Remove associate, disassociate network APIs * docs: Blast most references to nova-network * Mask the token used to allow access to consoles * nova-net: Remove 'nova-network' binary * Suppress policy deprecated warnings in tests * Add new default rules and mapping in policy base class * Add confirm\_snapshot\_based\_resize\_at\_source compute method * Add negative test for prep\_snapshot\_based\_resize\_at\_source failing * Add negative test for cross-cell finish\_resize failing * compute: Use long\_rpc\_timeout in reserve\_block\_device\_name * Fix incorrect command examples * Introduce scope\_types in os-services * Add test coverage of existing os-services policies * nova-net: Remove 'nova-dhcpbridge' binary * api-guide: remove empty sections about inter-service interactions * doc: remove admin/manage-users * api-guide: flesh out todos in user doc * api-guide: flesh out networking concepts * api-guide: flesh out flavor extra specs and image properties * Remove nova-manage network, floating commands * docs: Rewrite quotas documentation * test cleanup: Make base TestCase subclass oslotest * api-guide: fix the file injection considerations drift * api-guide: flesh out BUILD and ACTIVE server create transitions * Add sequence diagrams to resize/cold migrate contrib doc * Add contributor doc for resize and cold migrate * nova-net: Remove 'networks' quota * Remove 'nova-console' service, 'console' RPC API * Remove 'os-consoles' API * nova-net: Remove 'USE\_NEUTRON' from functional tests * Remove '/os-tenant-networks' REST API * compute: Take an instance.uuid lock when rebooting * Do not update root\_device\_name during guest config * block\_device: Copy original volume\_type when missing for snapshot based volumes * ZVM: Implement update\_provider\_tree * Avoid spurious error logging in \_get\_compute\_nodes\_in\_db * libvirt: Bump MIN\_{LIBVIRT,QEMU}\_VERSION for "Ussuri" * Pick NEXT\_MIN libvirt/QEMU versions for "V" release * Force config drive in nova-next multinode job * Specify what RPs \_ensure\_resource\_provider collects * zuul: Remove unnecessary 'USE\_PYTHON3' * zuul: Remove unnecessary 'tox\_install\_siblings' * Add zones wrinkle to TestMultiCellMigrate * Validate image/create during cross-cell resize functional testing * Handle target host cross-cell cold migration in conductor * Start README.rst with a better title * Don't delete compute node, when deleting service other than nova-compute * Drop neutron-grenade-multinode job * FUP to Ie1a0cbd82a617dbcc15729647218ac3e9cd0e5a9 * (Temporarily) readd bare support for py27 * functional: Make '\_wait\_for\_state\_change' behave consistently * Remove (most) '/os-networks' REST APIs * nova-net: Remove unused '\*\_default\_rules' security group DB APIs * Remove 'os-security-group-default-rules' REST API * nova-net: Add TODOs for remaining nova-network functional tests * zuul: Make functional job inherit from openstack parents * Stop testing Python 2 * doc: mention that rescuing a volume-backed server is not supported * Use wrapper class for NeutronFixture get\_client * docs: Strip '.rst' suffix * docs: Replacing underscores with dashes * docs: Remove 'adv-config', 'system-admin' subdocs * functional: Rework '\_delete\_server' * docs: Extract rescue from reboot * functional: Change order of two classes * Remove duplicate ServerMovingTests.\_resize\_and\_check\_allocations * docs: Change order of PCI configuration steps * Reset vm\_state to original value if rebuild claim fails * Block deleting compute services with in-progress migrations * Add functional recreate revert resize test for bug 1852610 * Add functional recreate test for bug 1852610 * Convert legacy nova-live-migration and nova-multinode-grenade to py3 * docs: update SUSPENDED server status wrt supported drivers * api-ref: mark device response param as optional for list/show vol attachments * doc: add troubleshooting guide for cleaning up orphaned allocations * Remove functional test specific nova code * "SUSPENDED" description changed in server\_concepts guide and API REF * Add image caching to the support matrix * Consolidate [image\_cache] conf options * Fix review link * api-ref: re-work migrate action post-conditions * Use named kwargs in compute.API.resize * Start functional testing for cross-cell resize * Filter duplicates from compute API get\_migrations\_sorted() * Make API always RPC cast to conductor for resize/migrate * Abort live-migration during instance\_init * Helper to start computes with different HostInfos * Remove unused CannotMigrateWithTargetHost * Remove TODO from ComputeTaskManager.\_live\_migrate * Fix driver tests on Windows * Remove TODOs around claim\_resources\_on\_destination * Resolve TODO in \_remove\_host\_allocations * Remove service\_uuids\_online\_data\_migration * FUP for Ib62ac0b692eb92a2ed364ec9f486ded05def39ad * Replace time.sleep(10) with service forced\_down in tests * Remove get\_minimum\_version mocks from test\_resource\_tracker * Move compute\_node\_to\_inventory\_dict to test-only code * Delete \_normalize\_inventory\_from\_cn\_obj * Drop compat for non-update\_provider\_tree code paths * Implement update\_provider\_tree for mocked driver in test\_resource\_tracker * Remove now invalid TODO from ComputeManager.\_confirm\_resize * Remove dead HostAPI.service\_delete code * Remove the TODO about using OSC for BFV in test\_evacuate.sh * Remove super old br- neutron network id compat code * Improve error log when snapshot fails * Remove unused 'nova-dsvm-base' job * Use ListOfUUIDField from oslo.versionedobjects * Use admin neutron client to see if instance has qos ports * Use admin neutron client to gather port resource requests * Use admin neutron client to query ports for binding * Revert "openstack server create" to "nova boot" in nova docs * Move rng device checks to the appropriate method * Improve metadata server performance with large security groups * Plumb allow\_cross\_cell\_resize into compute API resize() * Refresh instance in MigrationTask.execute Exception handler * Execute CrossCellMigrationTask from MigrationTask * Provide a better error when \_verify\_response hits a TypeError * libvirt: check job status for VIR\_DOMAIN\_EVENT\_SUSPENDED\_MIGRATED event * cond: rename 'recreate' var to 'evacuate' * Pass exception through TaskBase.rollback * Follow up to I3e28c0163dc14dacf847c5a69730ba2e29650370 * Log reason for remove\_host action failing * Remove PlacementAPIConnectFailure handling from AggregateAPI * Add FinishResizeAtDestTask * Add finish\_snapshot\_based\_resize\_at\_dest compute method * Document CD mentality policy for nova contributors * doc: link to nova code review guide from dev policies * Use long\_rpc\_timeout in conductor migrate\_server RPC API call * Default AZ for instance if cross\_az\_attach=False and checking from API * Add functional test for two-cell scheduler behaviors * Deprecate [glance]api\_servers * Avoid error 500 on shelve task\_state race * Only allow one scheduler service in tests * Nova compute: add in log exception to help debug failures * Add support matrix for Delete (Abort) on-going live migration * Fix race in test\_vcpu\_to\_pcpu\_reshape * api-ref: re-work resize action post-conditions * Add known limitation about resize not resizing ephemeral disks * Reset instance to current vm\_state if rolling back in resize\_instance * Pass RequestContext to oslo\_policy * Add Aggregate image caching progress notifications * Remove dead set\_admin\_password code to generate password * Log some stats for image pre-cache * Switch to devstack-plugin-ceph-tempest-py3 for ceph * Add new policy rule for viewing host status UNKNOWN * Fix policy doc for host\_status and extended servers attribute * Add notification sample test for aggregate.cache\_images.start|end * Stop building docs with (test-)requirements.txt * Enable evacuation with qos ports * Allow evacuating server with port resource request * Make nova-next multinode and drop tempest-slow-py3 * libvirt: Ignore volume exceptions during post\_live\_migration * Stop converting Migration objects to dicts for migrate\_instance\_start * Require Migration object arg to migrate\_instance\_finish method * Add image precaching docs for aggregates * Remove fixed sqlalchemy-migrate deprecation warning filters * doc: note the need to configure cinder auth in reclaim\_instance\_interval * Fix listing deleted servers with a marker * Add functional regression test for bug 1849409 * Added openssh-client into bindep * Revert "Log CellTimeout traceback in scatter\_gather\_cells" * Adds view builders for keypairs controller * [Trivial] Add missing ws between words * Revert "vif: Resolve a TODO and update another" * Don't populate resources for not-yet-migrated inst * Func: bug 1849165: mig race with \_populate\_assigned\_resources * Join migration\_context and flavor in Migration.instance * Always trait the compute node RP with COMPUTE\_NODE * Fix ItemMatcher to avoid false positives * ItemsMatcher: mock call list arg in any order * Refactor rebuild\_instance * Make sure tox install requirements.txt with upper-constraints * Move Destination object tests to their own test class * Switch to opensuse-15 nodeset * Add compute side revert allocation test for bug 1848343 * Add live migration recreate test for bug 1848343 * Set instance CPU policy to 'share' through image property * Add functional recreate test for bug 1848343 * Fix up some feedback on image precache support * Add image caching API for aggregates * Add PrepResizeAtSourceTask * Add prep\_snapshot\_based\_resize\_at\_source compute method * Add PrepResizeAtDestTask * Remove compute compat checks for aborting queued live migrations * cleanup to objects.fields * Remove redundant call to get/create default security group * Fix legacy issues in filter migrations by user\_id/project\_id * Add cache\_images() to conductor * Filter migrations by user\_id/project\_id * Stop using NoAuthMiddleware in tests * Add prep\_snapshot\_based\_resize\_at\_dest compute method * Update compute rpc version alias for train * Add regression test for bug 1824435 * setup.cfg: Cleanup * nova-net: Use deepcopy on value returned by NeutronFixture * Avoid using image with kernel in BDM large request func test * libvirt: Change \_compare\_cpu to raise InvalidCPUInfo * Fix unit of hw\_rng:rate\_period * api-guide: Fix available info in handling down cells * Add cache\_image() support to the compute/{rpcapi,api,manager} * Add cache\_image() driver method and libvirt implementation * Fix exception translation when creating volume * Deprecate [api]auth\_strategy and noauth2 * Add support for cloud-init on LXC instances * Cache image GETs for multi-create/multi-BDM requests * Add boot from volume functional test with a huge request * nova-net: Migrate 'test\_floating\_ips' functional tests * fixtures: Add support for security groups * Remove Stein compute compat checks for volume type support * Remove dead reserve\_volume compat code in \_validate\_bdm * doc: link to user/index from main home page * doc: link to user/availability-zones from user home page * docs: Add redirects for '/user/aggregates' * Skip functional test jobs for doc redirect changes * doc: fix formatting in mitigation-for-Intel-MDS-security-flaws * nova-net: Make even more nova-net stuff optional * Pull up compute node queries to init\_host * Refine comments about move\_allocations * compute: refactor volume bdm rollback error handling * Remove @safe\_connect from put\_allocations * doc: Improve PDF document structure * [Gate fix] Avoid use cell\_uuid before assignment * Remove workaround for bug #1709118 * docs: Rewrite host aggregate, availability zone docs * Avoid raise InstanceNotRunning exception * Update contributor guide for Ussuri * api-ref: Fix security groups parameters * trivial: Remove unused API sample template * trivial: Make it obvious where we're getting our names from * nova-net: Stop mocking the instance network cache * trivial: Change name of network provided by NeutronFixture * fixtures: Store 'device\_id' when creating port in NeutronFixture * fixtures: Handle iterable params for 'NeutronFixture.list\_\*' * fixtures: Beef up NeutronFixture * trivial: Neutron fixture cleanup * nova-net: Migrate 'test\_simple\_tenant\_usage' functional tests * Filter out alembic logs below WARNING in tests * Remove Rocky compute compat checks for live migration with port bindings * nova-net: Migrate 'test\_attach\_interfaces' functional tests * nova-net: Migrate 'test\_hypervisors' functional tests * nova-net: Migrate 'test\_rescue' functional tests * nova-net: Migrate 'test\_hosts' functional tests * nova-net: Migrate 'test\_servers' functional tests * nova-net: Migrate 'test\_server\_tags' functional tests * tests: Correctly mock out security groups in NeutronFixture * nova-net: Migrate 'test\_quota\_sets' functional tests * nova-net: Migrate 'test\_floating\_ip\_pools' functional tests * nova-net: Migrate 'test\_availability\_zone' functional tests * FUP to I4d181b44494f3b0b04537d5798537831c8fdf400 * FUP to I30916d8d10d70ce25523fa4961007cedbdfe8ad7 * Add reserved schema migrations for Ussuri * Restore console proxy deployment info to cells v2 layout doc * Update cells v2 up-call caveats doc * Set Instance AZ from Selection AZ during migrate reschedule * Set Instance AZ from Selection AZ during build reschedule * Add Selection.availability\_zone field * Add functional regression test for migrate part of bug 1781286 * Update the file for IPv4-only or IPv6-only network * docs: Remove a whole load of unused images, most remainder * nova-net: Remove explicit 'USE\_NEUTRON = True' * nova-net: Use nova-net explicitly in functional tests * Test heal port allocations in nova-next * Do not print default dicts during heal\_allocations * Add functional regression test for build part of bug 1781286 * Handle get\_host\_availability\_zone error during reschedule * libvirt: Ignore DiskNotFound during update\_available\_resource * make virtual pmem feature compatible with python3 * Replace 'fake' with a real project ID * test cleanup: Use oslotest's CaptureOutput fixture * test cleanup: Use oslotest's Timeout fixture * test cleanup: Remove skipIf test decorator * api: Remove 'Debug' middleware * ec2: Move ec2utils functions to their callers * Reduce scope of 'path' query parameter to noVNC consoles * Add TODO note for mox removal * conf: Remove deprecated 'project\_id\_regex' opt * tox: Stop overriding the 'install\_command' * tox: Use common 'command' definition for unit tests * Add functional tests for virtual persistent memory * Update master for stable/train * Reset forced\_destination before migration at a proper time * Functional reproduction for bug 1845291 20.0.0.0rc1 ----------- * Fix incorrect usages of fake moref in VMware tests * doc: attaching virtual persistent memory to guests * Ignore warning from sqlalchemy-migrate * Ignore sqla-migrate inspect.getargspec deprecation warnings on py36 * docs: Update resize doc * docs: Document how to revert, confirm a cold migration * docs: Update CPU topologies guide to reflect the new PCPU world * docs: Clarify everything CPU pinning * VMware VMDK detach: get adapter type from instance VM * Add a prelude for the Train release * Correct link to placement upgrade notes * Move HostNameWeigher to a common fixture * Isolate request spec handling from \_cold\_migrate * Handle legacy request spec dict in ComputeTaskManager.\_cold\_migrate * Stop filtering out 'accepted' for in-progress migrations * Add functional tests for [cinder]/cross\_az\_attach=False * docs: Rework the PCI passthrough guides * docs: Document global options for nova-manage * docs: Correct 'nova-manage db sync' documentation * docs: Note use of 'nova-manage db sync --config-file' * Add missing parameter * Move pre-3.44 Cinder post live migration test to test\_compute\_mgr * nova-net: Migrate some API sample tests off of nova-net * Remove upgrade specific info from user facing exception text * Reject migration with QoS port from conductor if RPC pinned * Log error when volume validation fails during boot from volume * Log CellTimeout traceback in scatter\_gather\_cells * Rename Claims resources to compute\_node * Sanity check instance mapping during scheduling * Remove 'test\_cold\_migrate\_with\_physnet\_fails' test * Error out interrupted builds * Functional reproduction for bug 1844993 * Create volume attachment during boot from volume in compute * Revert "Temporarily skip TestNovaMigrationsMySQL" * Clear instance.launched\_on when build fails * libvirt: Get the CPU model, not 'arch' from get\_capabilities() * Func test for migrate reschedule with pinned compute rpc * libvirt: Enable driver configuring PMEM namespaces * Add evacuate vs rebuild contributor doc * Temporarily skip TestNovaMigrationsMySQL * Remove mox in unit/network/test\_neutronv2.py (22) * Remove mox in unit/network/test\_neutronv2.py (21) * Remove mox in unit/network/test\_neutronv2.py (20) * Remove mox in unit/network/test\_neutronv2.py (19) * Remove mox in unit/network/test\_neutronv2.py (18) * Remove mox in unit/network/test\_neutronv2.py (17) * Remove mox in unit/network/test\_neutronv2.py (16) * Remove mox in unit/network/test\_neutronv2.py (15) * Remove mox in unit/network/test\_neutronv2.py (14) * Remove mox in unit/network/test\_neutronv2.py (13) * Add librsvg2\* to bindep * Mark "block\_migration" arg deprecation on pre\_live\_migration method * Refactor pre-live-migration work out of \_do\_live\_migration * make config drives sticky bug 1835822 * Add note about needing noVNC >= v1.1.0 with using ESX * Add func test for 'required' PCI NUMA policy * Trigger real BuildAbortException during migrate with bandwidth * objects: use all\_things\_equal from objects.base * trivial: Use sane indent * Add reshaper for PCPU * libvirt: Mock 'libvirt\_utils.file\_open' properly * fakelibvirt: Make 'Connection.getHostname' unique * Add support for translating CPU policy extra specs, image meta * Include both VCPU and PCPU in core quota count * tests: Additional functional tests for pinned instances * libvirt: Start reporting 'HW\_CPU\_HYPERTHREADING' trait * hardware: Differentiate between shared and dedicated CPUs * objects: Add 'NUMACell.pcpuset' field * Validate CPU config options against running instances * objects: Add 'InstanceNUMATopology.cpu\_pinning' property * libvirt: '\_get\_(v|p)cpu\_total' to '\_get\_(v|p)cpu\_available' * libvirt: Start reporting PCPU inventory to placement * Refactor volume connection cleanup out of \_post\_live\_migration * Remove SchedulerReportClient from AggregateRequestFiltersTest * Remove redundancies from AggregateRequestFiltersTest.setUp * Follow up for the bandwidth series * Centralize volume create code during boot from volume * Use SpawnIsSynchronousFixture in reschedule functional tests * libvirt: stub logging of host capabilities * api-ref: remove mention about os-migrations no longer being extended * Use os-brick locking for volume attach and detach * Follow up for I220fa02ee916728e241503084b14984bab4b0c3b * Fix a misuse of assertGreaterEqual * Add reminder to update corresponding glance docs * Parse vpmem related flavor extra spec * libvirt: Support VM creation with vpmems and vpmems cleanup * libvirt: report VPMEM resources by provider tree * libvirt: Enable driver discovering PMEM namespaces * Claim resources in resource tracker * Retrieve the allocations early * Add resources dict into \_Provider * object: Introduce Resource and ResourceList objs * db: Add resources column in instance\_extra table * VMware: Update flavor-related metadata on resize * doc: mark the max microversion for train * Remove an unused file and a related description * Cleanup reno live-migration-with-PCI-device * Docs for isolated aggregates request filter * Add a new request filter to isolate aggregates * DB API changes to get non-matching aggregates from metadata * Deprecate CONF.workarounds.enable\_numa\_live\_migration * NUMA live migration support * LM: Use Claims to update numa-related XML on the source * New objects for NUMA live migration * libvirt: Correctly handle non-CPU flag traits * Note about Destination.forbidden\_aggregates * Set user\_id/project\_id from context when creating a Migration * Add user\_id and project\_id column to Migration * Skip querying resource request in revert\_resize if no qos port * Follow up for Ib50b6b02208f5bd2972de8a6f8f685c19745514c * Improve dest service level func tests * Extract pf$N literals as constants from func test * Allow resizing server with port resource request * Allow migrating server with port resource request * Support migrating SRIOV port with bandwidth * trivial: Remove single-use classmethod * Add nova-status to man-pages list * Make SRIOV computes non symmetric in func test * Func test for migrate re-schedule with bandwidth * Support reverting migration / resize with bandwidth * Use multiple attachments in test\_list\_volume\_attachments * Fix race in \_test\_live\_migration\_force\_complete * Make \_revert\_allocation nested allocation aware * Fix the race in confirm resize func test * Fixing broken links * Improve SEV documentation and other minor tweaks * Enable booting of libvirt guests with AMD SEV memory encryption * Reject live migration and suspend on SEV guests * Apply SEV-specific guest config when SEV is required * Nova object changes for forbidden aggregates request filter * Don't duplicate PlacementFixture in libvirt func tests * doc: Fix a broken reference link * Remove stubs from VolumeAttachmentsSample API sample test * Get pci\_devices from \_list\_devices * Decouple NVMe tests from os-brick * api-ref: fix server topology "host\_numa\_node" field param name * Find instance in another cell during floating IP re-association * Deprecate the XenAPIDriver * Func test for migrate server with ports having resource request * prepare func test env for moving servers with bandwidth * resize: Add bw min service level check of source compute * migrate: Add bw min service level check of source compute * Add min service level check for migrate with bandwidth * Fix incorrect invocation of openstacksdk's baremetal.nodes() * Support reporting multi CPU model traits * Add compatibility checks for CPU mode and CPU models and extra flags * vCPU model selection * Use fields="instance\_uuid" when calling Ironic API * Bump min for oslo.service & .privsep to fix SIGHUP * doc: cleanup references to conductor doc * Remove old comments about caching scheduler compat * Move get\_machine\_type() test to test\_utils.py * Extract fake KVM guest fixture for reuse * Ensure non-q35 machine type is not used when booting with SEV * update allocation in binding profile during migrate * Add delete\_on\_termination to volume-attach API * PDF documentation build * Remove unused methods * Introduce live\_migration\_claim() * unit test: do not fill rp mapping for failed re-schedule * libvirt: Make scheduler filters customizable * Make \_get\_cpu\_feature\_traits() always return a dict * libvirt: Fold in argument to '\_update\_provider\_tree\_for\_vgpu' * objects: Rename 'fields' import to 'obj\_fields' * libvirt: Start checking compute usage in functional tests * libvirt: Simplify 'fakelibvirt.HostInfo' object * Use SDK for setting instance id * Use SDK for validating instance and node * Remove dead code * Tune up db.instance\_get\_all\_uuids\_by\_hosts * libvirt: Fix service-wide pauses caused by un-proxied libvirt calls * Refactor MigrationTask.\_execute * Nice to have test coverage for If1f465112b8e9b0304b8b5b864b985f72168d839 * Use microversion in put allocations in test\_report\_client * trivial: Rewrap definitions of 'NUMACell' * Fix the incorrect powershell command * Add and to config.py * Extract SEV-specific bits on host detection * Add extra spec parameter and image property for memory encryption * re-calculate provider mapping during migration * Add request\_spec to server move RPC calls * Pass network API to the conducor's MigrationTask * allow getting resource request of every bound ports of an instance * Add cold migrate and resize to nova-grenade-multinode * Rename the nova-grenade-live-migration job to nova-grenade-multinode * Indent fake libvirt host capabilities fixtures more nicely * Handle VirtDriverNotReady in \_cleanup\_running\_deleted\_instances * fix lxml compatibility issues * libvirt/host.py: remove unnecessary temporary variable * Provide HW\_CPU\_X86\_AMD\_SEV trait when SEV is supported * Add server sub-resource topology API * Use SDK for node.list * Add functional test for AggregateMultiTenancyIsolation + migrate * Add FUP unit test for port heal allocations * Move live\_migration test hooks under gate/ * DRY get\_sdk\_adapter tests * Ensure online migrations have a unique name * trivial: Rename 'nova.tests.unit.test\_nova\_manage' * Follow up for specifying az to unshelve * [Trivial]Remove unused helper should\_switch\_to\_postcopy * [Trivial]Removed unused helper \_extract\_query\_params * [Trivial]Remove unused helper get\_allocated\_disk\_size * Remove unused args from archive\_deleted\_rows calls * [Trivial]Remove unused helper check\_temp\_folder * Update help for image\_cache\_manager\_interval option * Change HostManager to allow scheduling to other cells * Add Destination.allow\_cross\_cell\_move field * Add power\_on kwarg to ComputeDriver.spawn() method * Refactor ComputeManager.remove\_volume\_connection * Add nova.compute.utils.delete\_image * Specify availability\_zone to unshelve * Remove 'hw:cpu\_policy', 'hw:mem\_page\_size' extra specs from API samples * scheduler: Flatten 'ResourceRequest.from\_extra\_specs', 'from\_image\_props' * libvirt: use native AIO mode for StorPool Cinder volumes * Add a "Caveats" section to the eventlet profiling docs * Verify archive\_deleted\_rows --all-cells in post test hook * nova-manage db archive\_deleted\_rows is not multi-cell aware * Avoid error state for recovered instances after failed migrations * Remove descriptions of nonexistent hacking rules * [Trivial]Remove unused helper get\_vm\_ref\_from\_name * [Trivial]Remove unused helper \_get\_min\_service\_version * tests: Split NUMA object tests * Add support for 'initenv' elements * Add test for create server with integer AZ * Trap and log errors from \_update\_inst\_info\_cache\_for\_disassociated\_fip * neutron: refactor nw info cache refresh out of associate\_floating\_ip * Introduces SDK to IronicDriver and uses for node.get * Allow strict\_proxies for sdk Connection * Docs and functional test for max\_local\_block\_devices * Update SDK fixture for openstacksdk 0.35.0 * Process [compute] in $NOVA\_CPU\_CONF in nova-next * [Trivial]Remove unused helper get\_vif\_devname\_with\_prefix * Add docstring to check\_availability\_zone function * doc: pretty up return code table for sync\_aggregates * docs: pretty up return code table or heal\_allocations * Handle websockify v0.9.0 in console proxy * Rework 'hardware.numa\_usage\_from\_instances' * Remove 'hardware.instance\_topology\_from\_instance' * Remove 'hardware.host\_topology\_and\_format\_from\_host' * Remove 'hardware.get\_host\_numa\_usage\_from\_instance' * trivial: Rename exception argument * claims: Remove useless caching * Update docstring of 'revert\_resize' function * Address nits from privsep series * Document map\_instances return codes in table format * Change nova-manage unexpected error return code to 255 * Document archive\_deleted\_rows return codes * Revert "Filter UnsupportedServiceVersion warning" * Make a failure to purge\_db fail in post\_test\_hook.sh * Remove deprecated [neutron]/url option * FUP for I5576fa2a67d2771614266022428b4a95487ab6d5 * Extract new base class for provider usage functional tests * Track libvirt host/domain capabilities for multiple machine types * Make memtune parameters consistent with libvirt docs and code * Split fake host capabilities into reusable variables * Add a hacking rule for useless assertions * Add a hacking rule for non-existent assertions * Fix missing rule description in HACKING.rst * Add blocker migration for completing services.uuid migration * Delete InstanceMapping in conductor if BuildRequest is already deleted * Deprecate Aggregate[Core|Ram|Disk]Filters * libvirt: Remove unnecessary argument * libvirt: Remove unnecessary try-catch around 'getCPUMap' * objects: Rename 'nova.objects.instance\_numa\_topology' * doc: Trivial fixes to API version history * docs: Scrub available quotas * fakelibvirt: Stop distinguishing between NUMA, non-NUMA * Restrict RequestSpec to cell when evacuating * Add functional recreate test for bug 1823370 * Libvirt: add support for vPMU configuration * doc: remove confusing docs about aggregate allocation ratios * Update api-ref for 2.75 to add config\_drive in server update response * Switch some GitHub URLs to point to opendev.org * api-ref: add config\_drive to 2.75 rebuild response parameters * doc: cleanup 2.75 REST API microversion history doc * Re-use DB MetaData during archive\_deleted\_rows * Make it easier to run a selection of tests relevant to ongoing work * Tests: autospecs all the mock.patch usages * Fix wrong assertions in unit tests * Fix 'has\_calls' method calls in unit tests * Limit get\_sdk\_adapter to requested service type * Avoid timeout from service update api unit tests * Move router advertisement daemon restarts to privsep * Move dnsmasq restarts to privsep * Move iptables rule fetching and setting to privsep * libvirt: Mock libvirt'y things in setUp * Rename 'nova.common.config' module to 'nova.middleware' * Fix non-existent method of Mock * Fix libvirt driver tests to use LibvirtConfigCapsGuest instances * Allow assertXmlEqual() to pass options to matchers.XMLMatches * API microversion 2.76: Add 'power-update' external event * Fix use of mock.patch with new\_callable=PropertyMock * config: remove deprecated checksum options * Bump minimum ksa (3.16.0) and sdk (0.34.0) * add InstanceList.get\_all\_uuids\_by\_hosts() method * Enhance SDK fixture for 0.34.0 * api-ref: Fix collapse of 'host\_status' description * lxc: make use of filter python3 compatible * Execute TargetDBSetupTask * Add CrossCellMigrationTask * Prevent init\_host test to interfere with other tests * [Trivial]Remove unused helper filter\_and\_format\_resource\_metadata * [Trivial]Remove unused helper \_get\_instances\_by\_filters * Fix misuse of nova.objects.base.obj\_equal\_prims * Restore soft-deleted compute node with same uuid * Add functional regression recreate test for bug 1839560 * rt: only map compute node if we created it * Avoid timeout from service update notification tests * DRY get\_flavor in flavor manage tests * Multiple API cleanup changes * Add a document that describes profiling eventlet services * Rename 'map' variable to avoid shadowing keywords * Drop usage of lxml's deprecated getchildren() method * [Trivial]Remove unused \_last\_bw\_usage\_cell\_update * trivial: Use NoDBTestCase instead of TestCase * Fix rebuild of baremetal instance when vm\_state is ERROR * Dump versioned notifications when len assertions fail * Skip test\_migrate\_disk\_and\_power\_off\_crash\_finish\_revert\_migration * Use :oslo.config:\* in nova-manage doc * Add TargetDBSetupTask * Add Instance.hidden field * Add InstanceAction/Event create() method * Clean up docstrings for archive\_deleted\_rows * Don't mention CONF.api\_database.connection in user-facing messages/docs * Add useful error log when \_determine\_version\_cap raises DBNotAllowed * trivial: Remove unused '\_instance\_to\_allocations\_dict' function * api-ref: document valid GET /os-migrations?migration\_type values * docs: update 2.23 REST API version history * Update comments in HostManager.\_get\_instance\_info * Cache host to cell mapping in HostManager * Convert HostMapping.cells to a dict * Replace non-nova server fault message * doc: fix physets typo * Don't claim that CLI user data requires manual base64 encoding * Retrun 400 if invalid query parameters are specified * Filter UnsupportedServiceVersion warning * Make nova-multi-cell job voting and gating * Add nova-osprofiler-redis job to experimental queue * Modernize nova-lvm job * Convert nova-lvm job to zuul v3 * doc: correct the information of 'cpu\_map' * Add the support of CPU feature 'AVX512-VNNI' * trivial: Remove unused function parameter * Follow-up for I2936ce8cb293dc80e1a426094fdae6e675461470 * Functional reproduce for bug 1833581 * nit: fix the test case of migration obj\_make\_compatible * libvirt: Handle alternative UEFI firmware binary paths * rt: soften warning case in \_remove\_deleted\_instances\_allocations * neutron: log something more useful in \_get\_instance\_nw\_info * Don't generate service UUID for deleted services * Add functional regression test for bug 1778305 * Add functional recreate test for bug 1764556 * Remove Request Spec Migration upgrade status check * Cleanup when hitting MaxRetriesExceeded from no host\_available * Add functional regression test for bug 1837955 * Move adding vlans to interfaces to privsep * Fix wrong huge pages in doc * Get rid of args to RBDDriver.\_\_init\_\_() * libvirt: harden Host.get\_domain\_capabilities() * Use a less chipper title for release notes * doc: fix links for server actions in api guide * api-ref: touch up the os-services docs * Remove usused umask argument to virt.libvirt.utils.write\_to\_file * Completely remove fake\_libvirt\_utils * Revert "[libvirt] Filter hypervisor\_type by virt\_type" * compute: Use source\_bdms to reset attachment\_ids during LM rollback * Remove 'nova.virt.driver.ComputeDriver.estimate\_instance\_overhead' * Remove deprecated CPU, RAM, disk claiming in resource tracker * Disable cinder-backup service in nova-next job * Pass extra\_specs to flavor in vif tests * Remove test\_pre\_live\_migration\_instance\_has\_no\_fixed\_ip * Remove fake\_libvirt\_utils users in functional testing * Remove super old unnecessary TODO from API start() method * Convert nova-next to a zuul v3 job * Remove deprecated Core/Ram/DiskFilter * Use OpenStack SDK for placement * Consts for need\_healing * Use the safe get\_binding\_profile * Introduces the openstacksdk to nova * Pass migration to finish\_revert\_migration() * Correct project/user id descriptions for os-instance-actions * Update api-ref location * Remove Newton-era min compute checks for server create with device tags * Add functional test for resize crash compute restart revert * Run 'tempest-ipv6-only' job in gate * Disambiguate logs in delete\_allocation\_for\_instance * Remove @safe\_connect from \_delete\_provider * libvirt: move checking CONF.my\_ip to init\_host() * Bump the openstackdocstheme extension to 1.20 * Replace "integrated-gate-py3" template with new "integrated-gate-compute" * Fix cleaning up console tokens * Avoid logging traceback when detach device not found * bindep: Remove dead markers * tox: Keeping going with docs * Restore RT.old\_resources if ComputeNode.save() fails * Defaults missing group\_policy to 'none' * Add 'resource\_request' to neutronv2/constants * Use neutron contants in cmd/manage.py * Move consts from neutronv2/api to constants module * Translatable output strings in heal allocation * Use Adapter global\_request\_id kwarg * Update supported transports for iscsi connector * nova-manage: heal port allocations * vif: Remove dead minimum libvirt checks * vif: Resolve a TODO and update another * vif: Stop using getattr for VIF lookups * vif: Remove 'plug\_vhostuser', 'unplug\_vhostuser' * Add method 'all\_required\_traits' to scheduler utils * Fix no propagation of nova context request\_id * Revert resize: wait for events according to hybrid plug * docs: Correct issues with 'openstack quota set' commands * ec2: Pre-move cleanup of utils * ec2: Remove ec2.CloudController * objects: Remove unused ec2 objects * ec2: Remove unused functions from 'ec2utils' * doc: Fix a parameter of NotificationPublisher * doc: Add links to novaclient contributor guide * doc: Replace a wiki link with API ref guide link * Perf: Use dicts for ProviderTree roots * libvirt: remove unused Service.get\_by\_compute\_host mocks * Update AZ admin doc to mention the new way to specify hosts * nova-lvm: Disable [validation]/run\_validation in tempest.conf * Add host and hypervisor\_hostname flag to create server * db: Add vpmems to instance\_extra * Remove assumption of http error if consumer not exists * Remove Rocky-era min compute trusted certs compat check * Remove old TODO about forced\_host policy check * Add Python 3 Train unit tests * Remove nova-consoleauth * libvirt: vif: Remove MIN\_LIBVIRT\_MACVTAP\_PASSTHROUGH\_VLAN * libvirt: Remove MIN\_LIBVIRT\_PERF\_VERSION * api-ref: Fix a broken link * Stop sending bad values from libosinfo to libvirt * libvirt: Remove unreachable native QEMU iSCSI initiator config code * libvirt: Remove MIN\_{QEMU,LIBVIRT}\_LUKS\_VERSION * Remove 'nova.virt.libvirt.compat' * Exit 1 when db sync runs before api\_db sync * Fix GET /servers/detail host\_status performance regression * Follow up for pre-filter-disabled-computes series * Sync COMPUTE\_STATUS\_DISABLED from API * Refactor HostAPI.service\_update * Add placement request pre-filter compute\_status\_filter * Update COMPUTE\_STATUS\_DISABLED from set\_host\_enabled compute call * [FUP] Follow-up patch for SR-IOV live migration * libvirt: Add a rbd\_connect\_timeout configurable * libvirt: manage COMPUTE\_STATUS\_DISABLED for hypervisor connection * Add VirtAPI.update\_compute\_provider\_status * Stabilize unshelve notification sample tests * Add neutron-tempest-iptables\_hybrid job to experimental queue * Clean up test\_virtapi * Set COMPUTE\_STATUS\_DISABLED trait from update\_provider\_tree flow * Rename CinderFixtureNewAttachFlow to CinderFixture * Remove mox in virt/test\_block\_device.py * Add integration testing for heal\_allocations * Init HostState.failed\_builds * Remove needs:\* todo from deprecated APIs api-ref * Fix invalid assertIsNone states * Add missing tests for flavor extra\_specs mv 2.61 * Fix test\_flavors to run with correct microversion * Remove 'MultiattachSupportNotYetAvailable' exception * Follow-up for I6a777b4b7a5729488f939df8c40e49bd40aec3dd * Drop pre-cinder 3.44 version compatibility * Un-safe\_connect and publicize get\_providers\_in\_tree * Require at least cryptography>=2.7 * libvirt: flatten rbd images when unshelving an instance * pull out put\_allocation call from \_heal\_\* * Prepare \_heal\_allocations\_for\_instance for nested allocations * reorder conditions in \_heal\_allocations\_for\_instance * Fix type error on call to mount device * Fix RT init arg order in test\_unsupported\_move\_type * Fix AttributeError in RT.\_update\_usage\_from\_migration * Privsep the ebtables modification code * Remove unused FP device creation and deletion methods * Privsepify ipv4 forwarding enablement * Remove no longer required "inner" methods * Grab fresh power state info from the driver * pull out functions from \_heal\_allocations\_for\_instance * Correct the comment of RequestSpec's network\_metadata * Deprecate non-update\_provider\_tree compat code * xenapi: implement update\_provider\_tree * Implement update\_provider\_tree * Fix update\_provider\_tree signature in reference docs * Add functional test coverage for bug 1724172 * Enhance service restart in functional env * (Re-)enable vnc console tests in nova-multi-cell job * nova-status: Remove consoleauth workaround check * tests: Use consistent URL regex substitution * hacking: Resolve W605 (invalid escape sequence) * hacking: Resolve E741 (ambiguous variable name) * hacking: Resolve W503 (line break occurred before a binary operator) * Remove orphaned comment from \_get\_group\_details * Revert "Revert resize: wait for events according to hybrid plug" * Remove comments about mirroring changes to nova/cells/messaging.py * doc: Fix nova-manage cell\_v2 list\_cells output * [FUP] fix backleveling unit test for video models * extend libvirt video model support * api-guide: better explain scheduler hints * Remove global state from the FakeDriver * conf: Rename 'configuration drive' to 'config drive' * docs: Rework all things metadata'y * vif: Skip most of 'get\_base\_config' if not using virtio * Ignore hw\_vif\_type for direct, direct-physical vNIC types * Revert resize: wait for events according to hybrid plug * Remove deprecated arguments in db sync command * rbd: use MAX\_AVAIL stat for reporting bytes available * Clarify --before help text in nova manage * xvp: Remove use of '\_LI' marker * xvp: Start using consoleauth tokens * Replace deprecated with\_lockmode with with\_for\_update * Log disk transfer stats in live migration monitor * Remove redundant group host setup * Validate requested host/node during servers create * Fix wrong assert methods * Clean up NumInstancesFilter related docs * Log quota legacy method warning only if counting from placement * Deprecate RetryFilter * Fix enabled\_filters default value in admin config docs * Remove file-backed memory live migration compat check * tests: Stop starting consoleauth in functional tests * docs: Remove references to nova-consoleauth * docs: remove the RamFilter from example * Ensure controllers all call super * Add 'path' query parameter to console access url * Always Set dhcp\_server in network\_info * Add a test for the \_joinedload\_all helper * Replace joinedload\_all with joinedload * Fix :param: in docstring * Optimize SchedulerReportClient.delete\_resource\_provider * Avoid unnecessary joins in delete\_resource\_provider * Literalize CLI options in docs * Delete resource providers for all nodes when deleting compute service * Warn for duplicate host mappings during discover\_hosts * Api-guide: Add Block Device Mapping * Make RequestContext(instance\_lock\_checked) fail * Fix a warning about flags in an expression string * update comment on ignore\_basepython\_conflict * Add Migration.cross\_cell\_move and get\_by\_uuid * update constraints url * Remove 'InstanceUnknownCell' exception * Add reno for removed cells v1 policies * Remove nova.compute.\*API() shims * filters: Stop handling cells v1 * Stop passing 'delete\_type' to 'terminate\_instance' * Stop passing 'kwargs' to 'rebuild\_instance' * Remove cells v1 parameter from 'ComputeTaskAPI.resize\_instance' * Fix double word hacking test * fup: Merge machine\_type\_mappings into get\_default\_machine\_type * libvirt: Use SATA bus for cdrom devices when using Q35 machine type * Make get\_provider\_by\_name public and remove safe\_connect * Refresh instance network info on deletion * Skip test\_check\_doubled\_words hacking check UT * Fix python3 compatibility of rbd get\_fsid * Remove unnecessary setUp methods * Replace 'is comprised of' with 'comprises' * Hacking N363: \`in (not\_a\_tuple)\` * Remove 'ComputeManager.\_reschedule' * Add functional recreate test for bug 1829479 and bug 1817833 * Cleanup quota user docs * Update quota known issues docs * [Docs] Update the confusing console output * Modifying install-guide to include public endpoint for identity service * Remove an unused method * Delete unused get\_all\_host\_states method * Document mitigation for Intel MDS security flaws * Make nova-next archive using --before * Change the default of notification\_format to unversioned * Hide hypervisor id on windows guests * Move default policy target * Simplfy test setup for TestNovaMigrations\* tests * Avoid lazy-loading instance.flavor in cold migration * Exclude broken ironicclient versions 2.7.1 * Follow up for counting quota usage from placement * Remove remaining vestiges of fake\_libvirt\_utils from unit tests * Set/get group uuid when transforming RequestSpec to/from filter\_properties * Workaround missing RequestSpec.instance\_group.uuid * Add regression recreate test for bug 1830747 * Add documentation for counting quota usage from placement * Use instance mappings to count server group members * Remove fake\_libvirt\_utils from libvirt imagebackend tests * Remove fake\_libvirt\_utils from virt driver tests * Bump openstackdocstheme to 1.30.0 * xenapi: log quality warning in init\_host * Remove zeromq from getting started with compute docs * Raise InstanceFaultRollback for UnableToMigrateToSelf from \_prep\_resize * Change InstanceFaultRollback handling in \_error\_out\_instance\_on\_exception * Blacklist python-cinderclient 4.0.0 * Robustify attachment tracking in CinderFixtureNewAttachFlow * Update usage in RT.drop\_move\_claim during confirm resize * Fix hard-delete of instance with soft-deleted referential constraints * conf: Remove cells v1 options, group * db: Remove cell APIs * Remove unnecessary wrapper * Stop handling 'InstanceUnknownCell' exception * libvirt: Rework 'EBUSY' (SIGKILL) error handling code path * docs: Don't version links to reno docs * Make all functional tests reusable by other projects * Fix the server group "policy" field type in api-ref * extract baselineCPU API call from \_get\_cpu\_traits() * Reduce logging of host hypervisor capabilities to DEBUG level * cleanup evacuated instances not on hypervisor * Remove mox in unit/network/test\_neutronv2.py (12) * Remove mox in unit/network/test\_neutronv2.py (11) * Remove mox in unit/network/test\_neutronv2.py (10) * Remove mox in unit/network/test\_neutronv2.py (9) * Remove mox in unit/network/test\_neutronv2.py (8) * Ensure that metadata proxy raises correct exception * Don't rely on SQLAlchemy collections magically initializing \_\_dict\_\_ * Move selective patching of open() to nova.test for reuse * Skip novnc tests in multi-cell job until bug 1830417 is fixed * Move patch\_exists() to nova.test.TestCase for reuse * Link versioned notification talk into docs * Set [quota]count\_usage\_from\_placement = True in nova-next * Count instances from mappings and cores/ram from placement * Avoid unnecessary joins in InstanceGroup.get\_hosts * Do not start nova-network in the notification func test * Fix live-migration when glance image deleted * Add --before to nova-manage db archive\_deleted\_rows * refactor nova-manage archive\_deleted\_rows * Skip existing VMs when hosts apply force\_config\_drive * Update description of valid whitelist for non-admin user * [Docs] Fix minor typo * Keep attach\_mode as top-level field in \_translate\_attachment\_ref * Block swap volume on volumes with >1 rw attachment * Replace colon with comma in route comment * Allow driver to properly unplug VIFs on destination on confirm resize * Extract provider tree functional tests into new file * Remove 'etc/nova/cells.json' * Remove conductor\_api and \_last\_host\_check from manager.py * Restore connection\_info after live migration rollback * Fix failure to boot instances with qcow2 format images * libvirt: Do not reraise DiskNotFound exceptions during resize * Remove cells code * Stop handling cells v1 for instance naming * Stop handling 'update\_cells' on 'BandwidthUsage.create' * Remove 'instance\_update\_from\_api' * Move get\_pci\_mapping\_for\_migration to MigrationContext * Remove redundant conductor from ServersTestBase.setUp() * Fix guestfs.set\_backend\_settings call * api-ref: mention default project filtering when listing servers * Add detection of SEV support from QEMU/AMD-SP/libvirt on AMD hosts * Add infrastructure for invoking libvirt's getDomainCapabilities API * [ironic] Don't remove instance info twice in destroy * Fix some issues with the newton release notes * Stop logging traceback when skipping quiesce * Cap sphinx for py2 to match global requirements * Fix retry of instance\_update\_and\_get\_original * Disable limit if affinity(anti)/same(different)host is requested * [Trivial doc change] Admin can overwrite the locked\_reason of an owner * Add functional confirm\_migration\_error test * Remove fake\_libvirt\_utils from snapshot tests * Remove fake\_libvirt\_utils from connection tests * Change some URLs to point to better targets * Microversion 2.73: Support adding the reason behind a server lock * Trivial: Adds comments and tests for scheduler * Move \_fill\_provider\_mapping to the scheduler\_utils * Remove unused param from \_fill\_provider\_mapping * Add extra logging to request filters * Update the contributor doc for macos * Update Python 3 test runtimes for Train * Revert "Fix target\_cell usage for scatter\_gather\_cells" * Fix SynchronousThreadPoolExecutorFixture mock of Future * Add docs for image type support request filter * Enable image type query support in nova-next * Add image type request filter * [Docs] Change the server query parameter display into a list * api-ref: fix mention of all\_tenants filter for non-admins * Add zvm driver image type capabilities * Add xenapi driver image type capabilities * Add vmware driver image type capabilities * Add ironic driver image type capabilities * Make libvirt expose supported image types * Expose Hyper-V supported image types * Fix assert methods in unit tests * Exclude fake marker instance when listing servers * Add regression test for bug 1825034 * [Trivial fix]Remove unnecessary slash * Log when port resource is leaked during port delete * Make nova-tox-functional-py36 reusable * Use run\_immediately=True for \_cleanup\_running\_deleted\_instances * Remove macs kwarg from allocate\_for\_instance * Remove ComputeDriver.macs\_for\_instance method * Improve metadata performance * Add nova-status upgrade check for minimum required cinder API version * Reset the stored logs at each notification test steps * Remove 'instance\_update\_at\_top', 'instance\_destroy\_at\_top' * Refactor bandwidth related functional tests * Test macvtap port with resource request * Require at least oslo.versionedobjects>=1.35.0 * Fix invalid privsep.readpty test * Fix help for ironic.peer\_list config * Remove deprecated 'default\_flavor' config option * Enable n-novnc in nova-multi-cell job * Add nova-multi-cell job * Remove 'get\_keypair\_at\_top' * Remove 'instance\_info\_cache\_update\_at\_top' * Remove 'instance\_fault\_create\_at\_top' * Correct spelling errors * Delete the placement code * libvirt: Avoid using os-brick encryptors when device\_path isn't provided * Add Venn diagram showing taxonomy of traits and capabilities * Remove unused context parameter from RT.\_get\_instance\_type * Add functional recreate test for bug 1818914 * Remove MIN\_COMPUTE\_MULTIATTACH conditions in API * Always pass HostAPI to get\_availability\_zones * Remove [ironic]api\_endpoint option * test\_rpc: Stop f\*\*\*\*\*\* with global state * libvirt: auto detach/attach sriov ports on migration * libvirt: Always disconnect volumes after libvirtError exceptions * libvirt: Stop ignoring unknown libvirtError exceptions during volume attach * Don't run tempest/devstack jobs on nova/test.py only changes * Make nova.compute.rpcapi.ComputeAPI.router a singleton * AZ list performance optimization: avoid double service list DB fetch * Add image type capability flags and trait conversions * Create request spec, build request and mappings in one transaction * Fix mock specs set to strings * Do not perform port update in case of baremetal instance * Replace git.openstack.org URLs with opendev.org URLs * Pass on region when we don't have a valid ironic endpoint * Improve test coverage of nova.privsep.utils * Drop source node allocations if finish\_resize fails * Add functional recreate test for regression bug 1825537 * Fix {min|max}\_version in ironic Adapter setup * SR-IOV Live migration indirect port support * Improve CinderFixtureNewAttachFlow * Fix ProviderUsageBaseTestCase.\_run\_periodics for multi-cell * OpenDev Migration Patch * Only set oslo\_messaging\_notifications.driver if using RPCFixture * Trivial: use default value in next() func * Add get\_usages\_counts\_for\_quota to SchedulerReportClient * libvirt: set device address tag only if setting disk unit * Remove FlavorNotFound dead code condition in API.resize * Update volume-backed comment in \_validate\_flavor\_image\_nostatus * Fix volume-backed resize with a smaller disk flavor * Add ids to sections of flavors guide to allow deep-linking * Query \`in\_tree\` to placement * Pass target host to RequestGroup.in\_tree * Add get\_compute\_nodes\_by\_host\_or\_node() * Add in\_tree field to RequestGroup object * Add functional regression recreate test for bug 1825020 * Remove 'bdm\_(update\_or\_create|destroy)\_at\_top' * Remove old-style cell v1 instance listing * Stop handling cells v1 for console authentication * Remove 'nova-manage cell' commands * Stop handling cells v1 in '/os-servers' API * Stop handling cells v1 in '/os-hypervisors' API * Remove '/os-cells' REST APIs * objects: Remove ConsoleAuthToken.to\_dict * conf: Undeprecate and move the 'dhcp\_domain' option * Handle unsetting '[DEFAULT] dhcp\_domain' * Include all network devices in nova diagnostics * Add BFV wrinkle to TestNovaManagePlacementHealAllocations * Add --instance option to heal\_allocations * Dropping the py35 testing * Add instance hard delete * Bump to hacking 1.1.0 * Add minimum value in max\_concurrent\_live\_migrations * Uncap jsonschema * Add --dry-run option to heal\_allocations CLI * trivial: Remove dead nova.db functions * Use update\_provider\_tree in vmware virt driver * Add get\_counts() to InstanceMappingList * Use InstanceList.get\_count\_by\_hosts when deleting a compute service * Remove 'nova-cells' service * Remove cells v1 jobs * Use migration\_status during volume migrating and retyping * Cleanup migrate flags * Add post-release checklist items to the PTL guide * Drop delete\_build\_requests\_with\_no\_instance\_uuid online migration * Soft delete virtual\_interfaces when instance is destroyed * Delete require\_instance\_exists\_using\_uuid * Add placeholder migrations for Stein backports * Change a log level for overwriting allocation * Remove query\_client from resource\_tracker * libvirt: disconnect volume when encryption fails * Don't report 'exiting' when mapping cells * Mention [cinder]/cross\_az\_attach in the AZ docs * Document restrictions for moving servers between availability zones * Add testing guide for down cells * xenapi/agent: Change openssl error handling * Remove dead code * Log notifications if assertion in \_test\_live\_migration\_force\_complete fails * Add test coverage for nova.privsep.qemu * Add test coverage for nova.privsep.libvirt * Improve test coverage of nova.privsep.fs, continued * Improve test coverage of nova.privsep.fs * Improve test coverage of nova.privsep.path * Hacking N362: Don't abbrev/alias privsep import * Handle PortLimitExceeded in POST /servers/{server\_id}/os-interface * Do not log a warning about not using compute monitors * Handle Invalid exceptions as expected in attach\_interface * Add docs on what not to include in notifications * devstack: Remove 'tempest-dsvm-tempest-xen-rc' * Remove CellMappingPayload database\_connection and transport\_url fields * api-ref: fix description of os-server-external-events 'events' param * api-ref: document ordering for instance actions and events * libvirt: remove conditional on VIR\_DOMAIN\_EVENT\_SUSPENDED\_POSTCOPY * libvirt: drop MIN\_LIBVIRT\_POSTCOPY\_VERSION * Drop migrate\_keypairs\_to\_api\_db data migration * Libvirt: gracefully handle non-nic VFs * trivial: Remove dead resource tracker code * trivial: Remove unused constants, functions * Leave brackets on Ceph IP addresses for libguestfs * systemd detection result caching nit fixes * trivial: Remove dead 'ALIAS' constant * zvm: Remove dead code * hacking: Fix dodgy check * trivial: Remove dead code * Docs: emulator threads: clarify expected behavior * Fix comment in test\_attach\_with\_multiattach\_fails\_not\_available * Fix a deprecation warning * Style corrections for privsep usage * Mock time.sleep() in unit tests * Add placement as required project to functional py36 and 37 * Correct lower-constraints.txt and the related tox job * Do not persist RequestSpec.ignore\_hosts * tests: Full stub out os\_vif * Pass --nic when creating servers in evacuate integration test script * tests: Stub out privsep modules * Remove flavor id and name validation code * Remove mox in unit/network/test\_neutronv2.py (7) * Remove mox in unit/network/test\_neutronv2.py (6) * Remove mox in unit/network/test\_neutronv2.py (5) * Remove mox in unit/network/test\_neutronv2.py (4) * Fix bug preventing forbidden traits from working * Adding tests to demonstrate bug #1821824 * Only call \_fill\_provider\_mapping if claim succeeds * Handle placement error during re-schedule * api-ref: add more details to confirmResize troubleshooting * Delete allocations even if \_confirm\_resize raises * Fix exception type in test\_boot\_reschedule\_fill\_provider\_mapping\_raises * Adds systemd detection result caching in Quobyte driver * Error out migration when confirm\_resize fails * Explain why disk\_available\_least can be negative * doc: Fix openstack CLI command * Move create of ComputeAPI object in websocketproxy * Change the TODO to NOTE about instance multi-create * Reproduce bug #1819460 in functional test * doc: Capitalize keystone domain name * Use aggregate\_add\_host in nova-manage * Add a reference PTL guide to the contributor docs * Add functional test for the JsonFilter * Document a warning about using the JsonFilter * Fix JsonFilter query hint examples in docs * Fix incomplete instance data returned after build failure * Add doc on VGPU allocs and inventories for nrp * Add functional regression test for bug 1669054 * Remove expiremental note in the VGPU docs * s,git://github.com/,https://git.openstack.org/, * Re-enable testing of console with TLS in nova-next job * Replace openstack.org git:// URLs with https:// * Remove last use of rc\_fields * Fix return param docstring in check\_can\_live\_migrate\* methods * Update contributor guide for Train * bdm: store empty object as connection\_info by default * Eventlet monkey patching should be as early as possible * Add description about sort order in API ref guideline * Imported Translations from Zanata * Update master for stable/stein * Stop running tempest-multinode-full 19.0.0.0rc1 ----------- * Trivial: remove unused var from policies.base.py * Override the 'get' method in DriverBlockDevice class * libvirt: smbfs: Use 'writeback' QEMU cache mode * libvirt: vzstorage: Use 'writeback' QEMU cache mode * libvirt: Use 'writeback' QEMU cache mode when 'none' is not viable * Fix links to neutron QoS minimum bandwidth doc * Don't register placement opts mutiple times in a test * Add known issue for minimum bandwidth resource leak * Add a prelude release note for the 19.0.0 Stein GA * docs: Misc cleanups * Address old TODO in claim\_resources\_on\_destination * Move libvirt calculation of machine type to utils.py * Give the policy vision document a facelift * Add docs for compute capabilities as traits * Cleanup comments around claim\_resources method * Clarify policy shortcomings in policy enforcement doc * Remove additional policy configuration details from policy doc * Remove unnecessary default provider\_tree when getting traits * qemu: Make disk image conversion dramatically faster * Remove obsolete policy configuration details from docs * Documentation for bandwidth support * Move slight bonkers IP management to privsep * Speed up test\_report * Remove "Fixing the Scheduler DB model" from schedule evolution doc * Remove stale aggregates notes from scheduler evolution doc * Trivial typo fix for REST API in policy enforcement docs * Remove resize caveat from conductor docs * docs: cleanup driver parity scope section * Pass kwargs to exception to get better format of error message * Avoid crashing while getting libvirt capabilities with unknown arch names * Re-enable Ceph in live migration testing * Customize irrelevant-files for nova-live-migration job * Update instance.availability\_zone on revertResize * Add functional recreate test for bug 1819963 * Migrate legacy jobs to Ubuntu Bionic * Disable the tls-proxy in nova-next & fix nova-tox-functional-py35 parent * Trivial: fix typo in reno * Skip the ceph based live migration testing * api-ref: Add description for BDM volume\_size * add python 3.7 unit test job * Trivialfix for help description of images\_type * Add retry\_on\_deadlock to migration\_update DB API * Add functional test to delete a server while in VERIFY\_RESIZE * Don't warn on network-vif-unplugged event during live migration * Require python-ironicclient>=2.7.0 * pass endpoint interface to Ironic client * Allow utime call to fail on qcow2 image base file * Update docs: User token times out during long-running operations * Update compute rpc version alias for stein * fix race in test\_interface\_detach\_with\_port\_with\_bandwidth\_request * Use Selection object to fill request group mapping * doc: Fix a typo * Remove fake\_libvirt\_utils from the cache concurrency tests * Add descriptions of numbered resource classes and traits * Add online data migration for populating user\_id * Populate InstanceMapping.user\_id during migrations and schedules * Add user\_id field to InstanceMapping * update gate test for removal of force evacuate * Use assertXmlEqual() helper for all XML comparison tests * Should not skip volume\_size check for bdm.image\_id == image\_ref case * doc: mark the max microversion for stein * Remove duplicate cleanup in functional tests * Add user\_id column to the instance\_mappings table * Set min=0 for block\_device\_allocate\_retries option * Clean up block\_device\_allocate\_retries config option help * docs: Fix nits in remote console guide * Add get\_instance\_pci\_request\_from\_vif * Allow per-port modification of vnic\_type and profile * Separate methods to free claimed and allocated devs * Add missing libvirt exception during device detach * FUP for test\_reshape * Test proper allocation of devices during reshape * Cleanup the exec\_ebtables code a little * Move killing processes to privsep * Move cleaning conntrack to privsep * Move arping to privsep * doc: cleanup pci.alias references * De-cruft compute manager live migration * Extend volume for libvirt network volumes (RBD) * Do not run tempest.scenario.test\_network\* tests in nova-next * Warn if group\_policy is missing from flavor * tests: Create PCI tests for NUMA'y tests * fakelibvirt: Add ability to generate fake PCI devices * objects: Store InstancePCIRequest.numa\_policy in DB * Update --max-rows parameter description for archive\_deleted\_rows * Validate PCI aliases early in resize * Move additional IP address management to privsep * Move route management to privsep * Convert additional IP management calls to privsep * Move DHCP releasing to privsep * Move set\_vf\_interface\_vlan to be with its only caller * Fix WeighedHost logging regression * Use errors\_out\_migration decorator on finish\_resize * Delete the obj\_as\_admin context manager * De-cruftify the finish\_resize methods * Temporarily mutate migration object in finish\_revert\_resize * Improve libvirt image and snapshot handling * Flavor extra spec and image properties validation from API * Handle missing exception in instance creation code * Support server create with ports having resource request * Ensure that bandwidth and VF are from the same PF * Revert "Fixes race condition with privsep utime" * Handle templated cell mappings in nova-status * Parse elements from virConnectGetCapabilities() * Exec systemd-run without --user flag in Quobyte driver * api-ref: typo service.disable\_reason * Use a placement conf when testing report client * Improve existing flavor and image metadata validation * Correct instance port binding for rebuilds * Add nits from Id2beaa7c4e5780199298f8e58fb6c7005e420a69 * Fix wrong consumer type in logging * Fix an error when generating a host ID * Remove mox in unit/network/test\_neutronv2.py (3) * Remove wrong description for auto resize confirm * Fixes race condition with privsep utime * fix bug with XML matcher handling missing children * api-ref: explain aggregate set\_metadata semantics * Check hosts have no instances for AZ rename * Remove TypeError handling for get\_info * ironic: check fresh data when sync\_power\_state doesn't line up * Add oslo.privsep to config-generator list * Stop using "nova" in API samples when creating a server * Add "links" in the response of "nova show" for a down-cell instance * Make nova-grenade-live-migration voting and gating * Move legacy-grenade-dsvm-neutron-multinode-live-migration in-tree * Convert driver supported capabilities to compute node provider traits * Adds the server group info into show server detail API * Ironic: bump minimum API version to 1.38 * Record requester in the InstancePCIRequest * Remove port allocation during detach * fix up numa-topology live migration hypervisor check * Add remove\_resources\_from\_instance\_allocation to report client * Test live migration with config drive * conf: Call out where pci.alias should be set * conf: Deprecate 'disable\_libvirt\_livesnapshot' option * Summarize output of sample configuration generator * FUP: docs nit * Add functional test for libvirt vgpu reshape * Optimize populate\_queued\_for\_delete online data migration * Cleanup no longer required filters and add a release note * ironic: partition compute services by conductor group * Fix the api sample docs for microversion 2.68 * Fup for the bandwidth series * We no longer need rootwrap * Cleanup the \_execute shim in nova/network * Change LibvirtDriver.capabilities to an instance variable * [Doc] Best practices for effectively tolerating down cells * libvirt: implement reshaper for vgpu * Use the correct mdev allocated from the pGPU * remove deprecated os\_brick import from ScaleIO driver * Move final bridge commands to privsep * Move setting of device trust to privsep * Move calls to ovs-vsctl to privsep * Fix resetting non-persistent fields when saving obj * Add unit tests for missing VirtualInterface in 2.70 os-interface * conf: Deprecated 'defer\_iptables\_apply' * Refactor "networks" processing in ServersController.create * Remove \_legacy\_dict methods * Remove misleading code from \_move\_operation\_alloc\_request() * Log why rescheduling is disabled * Dump config options on wsgi startup earlier * Follow up for I0c764e441993e32aafef0b18049a425c3c832a50 * Remove deprecated 'flavors' policy * Remove deprecated 'os-server-groups' policy * Fix a typo in configuration description * Add microversion to expose virtual device tags * FUP for Id7827fe8dc27112e342dc25c902c8dbc25f63b94 * Test boot with more ports with bandwidth request * Send RP uuid in the port binding * Recalculate request group - RP mapping during re-schedule * Pass resource provider mapping to neutronv2 api * Fill the RequestGroup mapping during schedule * Calculate RequestGroup resource provider mapping * Added mount fstype based validation of Quobyte mounts * Replace ansible --sudo with --become in live\_migration/hooks scripts * Fix typo in initial\_disk\_allocation\_ratio release note * API microversion 2.69: Handles Down Cells Documentation * Move create\_tap\_dev into privsep * Create specialist set\_macaddr\_and\_vlan helper * Fix fake DELETE in PlacementFixture * libvirt: Omit needless check on 'CONF.serial\_console' * libvirt: Drop MIN\_LIBVIRT\_PARALLELS\_SET\_ADMIN\_PASSWD * libvirt: Rewrite \_create\_pty\_device() to be clearer * libvirt: Bump MIN\_{LIBVIRT,QEMU}\_VERSION for "Stein" * API microversion 2.69: Handles Down Cells * Add context.target\_cell() stub to DownCellFixture * Plumbing required in servers ViewBuilder to construct partial results * Trim fake\_deserialize\_context in test\_conductor * Cleanup inflight rpc messages between test cases * Fix irrelevant-files for legacy-grenade-dsvm-neutron-multinode-live-migration * Stub out port binding create/delete in NeutronFixture * Make VolumeAttachmentsSampleV249 test other methods * Fix deps for api-samples tox env * Fix a missing policy in test policy data * Remove deprecated 'os-flavor-manage' policy * Drop the integrated-gate (py27) template * Address nits from I9e30a24a4c0640f282f507d0a96640d3cdefe43c * api-ref: Add descriptions for vol-backed snapshots * Change sqlalchemy warnings filter to an error * Libvirt: do not set MAC when unplugging macvtap VF * Lock detach\_volume * docs: ComputeDriver.update\_provider\_tree in nova * Document how to make tests log at DEBUG level * Drop specific versions of openSUSE-based distributions * Remove cells v1 (for the most part) from the docs * api-ref: mark os-cells as deprecated * Further de-dupe os-vif VIF tests * Validate bandwidth configuration for other VIF types * Remove get\_config\_vhostuser * Use math.gcd starting with python 3.5 * Adding cross refs for config options in scheduler filter guide * Avoid redundant initialize\_connection on source post live migration * Change nova-next tempest test regex * Ensure config regexes match the entire string * Make move\_allocations handle empty source allocations * RT: improve logging in \_update\_usage\_from\_migration * Make Claim.\_claim\_test handle SchedulerLimits object * Move finish\_resize.(start|end) notifications to helper method * Don't set bandwidth limits for vhostuser, hostdev interfaces * Use tox 3.1.1 fixes * tox: Don't write byte code (maybe) * Trivial: reorder hashes according to object\_hashes.txt * Use placement.inventory.inuse in report client * Provide a useful error message when trying to update non-compute services * Avoid BadRequest error log on volume attachment * Follow up (#2) for the bw resource provider series * Fix race in test\_volume\_swap\_server\_with\_error * Ignore VolumeAttachmentNotFound exception in compute.manager * Cleanup return\_reservation\_id in ServersController.create * Refactor bdm handling in ServersController.create method * Share snapshot image membership with instance owner * API: Remove evacuate/live-migrate 'force' parameter * Plumbing for allowing the all-tenants filter with down cells * Plumbing for ignoring list\_records\_by\_skipping\_down\_cells * Modify InstanceMappingList.get\_not\_deleted\_by\_cell\_and\_project() * Convert CPU\_TRAITS\_MAPPING to use os\_traits * Extend RequestGroup object for mapping * Transfer port.resource\_request to the scheduler * create\_veth\_pair is unused, remove it * Move binding ips to privsep * Change live\_migration\_wait\_for\_vif\_plug=True by default * Fix deprecation warning for threadgroup.add\_timer * doc: specify --os-compute-api-version when setting flavor description * Ignore sqla-migrate inspect.getargspec deprecation warnings * Switch to using os-resource-classes * Remove placement from contributor doc * Remove link to placement configuration from nova config docs * Remove placement from nova install docs * Update nova docs front page for placement removal * Update help messages for weight multipliers * Add minimum value in maximum\_instance\_delete\_attempts * Use :oslo-config: role in hypervisor-kvm doc * api-ref: mention policy defaults for aggregates * api-ref: warn about changing/unsetting AZ name with instances * Fix legacy-grenade-dsvm-neutron-multinode-live-migration * doc: mention description field in user flavors docs * api-ref: fix link to flavor extra specs docs * cleanup \*.pyc files in docs tox envs * update flavor admin docs * Fix InstanceMapping to always default queued\_for\_delete=False * Ignore some PendingDeprecationWarnings for os-vif * Replace glance command with openstack command * Extract compute API \_create\_image to compute.utils * Move resize.(start|end) notification sending to helper method * Move resize.prep.start/end notifications to helper method * Isolate cell-targeting code in MigrationTask * Remove PLACEMENT\_DB\_ENABLED from nova-next job config * Drop nova-multiattach job * Don't force evacuate/live migrate in notification sample tests * doc: Add solution to live migration ssh issues * Follow up for per-instance serial number change * Change nova-next job to run with python3 * doc: update the security groups admin doc * doc: link Kashyap's cpu model talk to the libvirt driver config docs * doc: link admin/configuration from admin home page * Fup for the bandwidth resource provider series * Per-instance serial number * PCI: do not force remove allocated devices * Ignore SAWarnings for "Evaluating non-mapped column expression" * Move retry from \_update to \_update\_to\_placement * Collect duplicate codepaths in os\_vif\_util * Duplicate os-vif datapath offload metadata * Add support for vrouter HW datapath offloads * Switch tempest-slow to be run on python 3 * Move interface disabling to privsep * Move setting mac addresses for network devices to privsep * Fix config docs for handle\_virt\_lifecycle\_events * Add configuration of maximum disk devices to attach * Force refresh instance info\_cache during heal * Add fill\_virtual\_interface\_list online\_data\_migration script * Fix string interpolations in logging calls * FUPs: ReportClient traffic series * Fix port dns\_name reset * Reject unshelve with port having resource request * Reject evacuate with port having resource request * Reject migrate with port having resource request * Reject resize with port having resource request * Reject server create with port having resource request * Read port resource request from Neutron * Include requested\_resources to allocation candidate query * Create RequestGroup from neutron port * Reject networks with QoS policy * Add a warning for max\_concurrent\_live\_migrations * Convert vrouter legacy plugging to os-vif * Fix ComputeNode ovo compatibility code * Remove unused quota options * Raise 403 instead of 500 error from attach volume API * Reject interface attach with QoS aware port * Skip checking of target\_dev for vhostuser * Make 'plugin' a required argument for '\_get\_vif\_instance' * Add missing ws seperator between words * Don't call begin\_detaching when detaching volume from shelved vm * Convert port to str when validate console port * docs: Update references to "QEMU-native TLS" document * libvirt: A few miscellaneous items related to "native TLS" * Per aggregate scheduling weight * Cleanup soft (anti)affinity weight multiplier options * unused images are always deleted (add to in-tree hper-v code) * Fix using template cell urls with nova-manage * Turn off rp association refresh in nova-next * Fix incompatible version handling in BuildRequest * Use a static resource tracker in compute manager * api-ref: Body verification for the lock action * Rip out the SchedulerClient * Rip the report client out of SchedulerClient * Commonize \_update code path * Consolidate inventory refresh * Reduce calls to placement from \_ensure * Fix ovo compatibility code unit tests * Fix overcommit for NUMA-based instances * Send context.global\_id on neutron calls * Use X-Forwarded-Proto as origin protocol if present * Add method to generate device names universally * docs: Secure live migration with QEMU-native TLS * The field instance\_name was added to InstanceCreatePayload * Make functional-py37 job work like others * Allow run metadata api per cell * Enhance exception raised when invalid power state * Doc: rebuild can result in SHUTOFF VM state * Rename Ironic jobs * Extend NeutronFixture to return port with resource request * libvirt: Support native TLS for migration and disks over NBD * Follow up for "Add API ref guideline for body text" * Remove args(os=False) in monkey\_patch * Run nova-lvm job on nova/privsep/\* changes * Fix circular import in nova.privsep.utils * Change to debug repetitive info messages * libvirt: Add workaround to cleanup instance dir when using rbd * Remove useless test samples for v2.66 * Fix rfc3986.is\_valid\_uri deprecation warnings * Use oslo\_db.sqlalchemy.test\_fixtures * libvirt: generalize rbd volume fallback removal statement * Ensure rbd auth fallback uses matching credentials * doc: Switch header styles in support doc * Add links to summit videos in user/cells.rst * Add functional regression recreate test for bug 1790204 * nit: Add space to feature support docs * vmware:add support for the hw\_video\_ram image property * Update instance.availability\_zone during live migration * Fix a broken link * Drop old service version check compat from \_delete\_while\_booting * Remove "API Service Version" upgrade check * Remove "Resource Providers" upgrade check * Fix an inaccurate link in nova doc * Pass request\_spec from compute to cell conductor on reschedule * Exclude build request marker from server listing * Document using service user tokens for long running operations * Redirect user/placement to placement docs * Handle unbound vif plug errors on compute restart * Fix a broken-link in nova doc * Fix a broken-link in nova doc * Use renamed template 'integrated-gate-py3' * Remove legacy RequestSpec compat from conductor rebuild\_instance * Remove legacy RequestSpec compat from conductor unshelve\_instance * Remove legacy RequestSpec compat code from live migrate task * Remove legacy request spec compat code from API * Address nits on I1f1fa1d0f79bec5a4101e03bc2d43ba581dd35a0 * Address nits on I08991796aaced2abc824f608108c0c786181eb65 * doc: Rework 'resize' user doc * Migrate "reboot an instance" user guide docs * Fix jsonutils.to\_primitive UserWarning * Move interface enabling to privsep * Move simple execute call to processutils * Move some linux network helpers to use privsep * Move bridge creation to privsep * Move a generic bridge helper to a linux\_net privsep file * Properly log request headers in metadata API * Default zero disk flavor to RULE\_ADMIN\_API in Stein * Drop request spec migration code * Fix best\_match() deprecation warning * Remove mox in libvirt/test\_driver.py (8) * Remove mox in libvirt/test\_driver.py (7) * Fix the link to the Placement API Version History * Add descriptions about microversions * Migrate upgrade checks to oslo.upgradecheck * Fix up force live migration completion docs * libvirt: remove live\_migration\_progress\_timeout config * libvirt: add live migration timeout action * Fail to live migration if instance has a NUMA topology * Add DownCellFixture * Remove GROUP BY clause from CellMapping.get\_by\_project\_id * Add py36/py37 functional jobs to the experimental queue * Add python 3.7 unit and functional tox jobs * Replace ThreadPoolExecutor with GreenThreadPoolExecutor * Fix destination\_type attribute in the bdm\_v2 documentation * Add irrelevant-files for grenade-py3 jobs * allow tcp-based consoles in get\_console\_output * Use external placement in functional tests * Remove lock on SchedulerReportClient.\_create\_client * DRY up SchedulerReportClient init * Only construct SchedulerReportClient on first access from API * Cleanup vendordata docs * Remove utils.execute() from virt.disk.api * Remove utils.execute() from the hyperv driver * Remove the final user of utils.execute() from virt.images * Remove final users of utils.execute() in libvirt * Imagebackend should call processutils.execute directly * Handle tags in \_bury\_in\_cell0 * Make compute rpcapi version calculation check all cells * Only warn about not having computes nodes once in rpcapi * Fix typo * Move nova.libvirt.utils away from using nova.utils.execute() * Remove utils.execute() from quobyte libvirt storage driver * Fix target used in nova.policy.check\_is\_admin * refactor get\_console\_output() for console logfiles * Final release note for versioned notification transformation * Add API ref guideline for body text * Remove allocations before setting vm\_status to SHELVED\_OFFLOADED * Drop pre-cellsv2 compat in compute API.get() * Move nova-cells-v1 to experimental queue * Ignore MoxStubout deprecation warnings * Fixed concurrent access to direct io test file * Add docs for (initial) allocation ratio configuration * Note the aggregate allocation ratio restriction in scheduler docs * Add compute\_node ratio online data migration script * Add ratio online data migration when load compute node * Use tempest [compute]/build\_timeout in evacuate tests * Update mailinglist from dev to discuss * Clean up header encoding handling in compute API * Remove utils.execute() from libvirt remotefs calls * Remove utils.execute() calls from xenapi * Create BDMs/tags in cell with instance when over-quota * Add secret=true to fixed\_key configuration parameter * Add functional regression test for bug 1806064 * Fix sloppy initialization of the new disk ops semaphore * Revert "Add regression test for bug 1550919" * Use new \`\`initial\_xxx\_allocation\_ratio\`\` CONF * Remove placement perf check * Mention size limit on user data in docs * Transform scheduler.select\_destinations notification * SIGHUP n-cpu to clear provider tree cache * libvirt: Refactor handling of PCIe root ports * Fix misuse of assertTrue * Workaround a race initialising version control in db\_version() * Make [cinder]/catalog\_info no longer require a service\_name * Remove get\_node\_uuid * Restore nova-consoleauth to install docs * Change the default values of XXX\_allocation\_ratio * Remove Placement API reference * Always read-deleted=yes on lazy-load * Refactor TestEvacuateDeleteServerRestartOriginalCompute * Fix InstanceNotFound during \_destroy\_evacuated\_instances * Give drop\_move\_claim() correct docstring * Add missing ws seperator between words * Drop cruft code for all\_tenants behaviour * Remove ironic/pike note from \*\_allocation\_ratio help * Use links to placement docs in nova docs * Add a bug tag for nova doc * Add I/O Semaphore to limit concurrent disk ops * Remove NovaException logging from scatter\_gather\_cells * Transform compute\_task notifications * Add HPET timer support for x86 guests * Consider root id is None in the database case * Remove v1 check in Cinder client version lookup * Add CellsV2 FAQ about API design decisions * Use long\_rpc\_timeout in select\_destinations RPC call * Allow driver to specify switch&port for faster lookup * Fix server query examples * Nix refs to ResourceProvider obj from libvirt UT * Skip double word hacking test * Fix regression in glance client call * Add description of custom resource classes * Make \_instances\_cores\_ram\_count() be smart about cells * Make supports\_direct\_io work on 4096b sector size * modify the avaliable link * api-ref: Add a description about sort order * Add debug logs when doubling-up allocations during scheduling * Delete NeutronLinuxBridgeInterfaceDriver * Mention meta key suffix in tenant isolation with placement docs * libvirt: change "Ignoring supplied device name" warning to info * Fix a help string in nova-manage * Use SleepFixture instead of mocking \_ThreadingEvent.wait * remove mocks of oslo.service private members * Harden placement init under wsgi * Fix version details API does not return 200 OK * Add a link to the doc contrib guide * Improve formats of the Compute API guide * Remove LazyLoad of Scheduler Clients * Allow resource\_provider\_association\_refresh=0 * prevent common kwargs from glance client failure * Fix support matrix for VMware UEFI support * Add bandwidth related standard resource classes * Add requested\_resources field to RequestSpec * Add request\_spec.RequestGroup versioned object * Update compute API.get() stubs in test\_access\_ips * Update compute API.get() stubs for test\_disk\_config * Update compute API.get() stubs for test\_\*security\_groups * Update compute API.get() stubs in test\_server\_actions * Update compute API.get() stubs in test\_serversV21 * Update compute API.get() mocks in test\_server\_metadata * Convert exception messages to strings * Trivial: add reminder to update Tempest's scheduler\_enabled\_filters * Update the description to make it more accuracy * Pass disk\_info dict to libvirt\_info * Fix libvirt volume tests passing invalid disk\_info * Default embedded instance.flavor.is\_public attribute * [Trivial Fix] Correct spelling error of "should" and "resource" * Clean up cpu\_shared\_set config docs * quota: remove defaults kwarg in get\_project\_quotas * quota: remove QuotaEngine.register\_resources() * PowerVM upt parity for reshaper, DISK\_GB reserved * Minimal construct plumbing for nova service-list when a cell is down * Minimal construct plumbing for nova show when a cell is down * Refactor scatter-gather utility to return exception objects * Minimal construct plumbing for nova list when a cell is down * Modify get\_by\_cell\_and\_project() to get\_not\_deleted\_by\_cell\_and\_project() * Explain cpu\_model\_extra\_flags and nested guest support * Run negative server moving tests with nested RPs * Kill @safe\_connect in \_get\_provider\_traits * libvirt: Avoid setting MTU during live migration if unset * Add tests for bug #1800511 * No longer call \_normalize\_inventory\_from\_cn\_obj from upt flow * Provide allocation\_ratio/reserved amounts from update\_provider\_tree() * Fix nits in I7cbd5d9fb875ebf72995362e0b6693492ce32051 * tox: Stop build \*all\* docs in 'docs' * Fix min config value for shutdown\_timeout option * Fix os-simple-tenant-usage result order * Add recreate test for bug 1799892 * Add nova-status upgrade check for consoles * PowerVM: update\_provider\_tree() (compatible) * Add functional regression test for bug 1794996 * Add volume-backed evacuate test * Add post-test hook for testing evacuate * Cleanups for the scheduler code * Use RequestSpec.user\_id in scheduler.utils.claim\_resources * Remove restart\_scheduler\_service() method * Drop legacy live migrate allocation compat code * Reject forced move with nested source allocation * Add API ref guideline for examples * api-ref: Add descriptions of error cases * api-ref: Remove unnecessary minimum microversion * Add a hacking rule for deprecated assertion methods * Make CellDatabases fixture reentrant * Add more documentation for online\_data\_migrations CLI * Add functional recreate test for bug 1799727 * quota: remove default kwarg on get\_class\_quotas() * Fix ironic client ironic\_url deprecation warning * Consider allocations invovling child providers during allocation cleanup * quota: remove QuotaDriver.destroy\_all\_by\_project() * Add restrictions on updated\_at when getting instance action records * Add restrictions on updated\_at when getting migrations * quota: remove unused Quota driver methods * quota: remove unused code * Add regression test for bug 1550919 * Fix test bug when host doesn't have /etc/machine-id * conductor: Recreate volume attachments during a reschedule * Add regression test for bug#1784353 * fixtures: Track volume attachments within CinderFixtureNewAttachFlow * Fix up compute rpcapi version for pike release * Rename tempest-nova job to follow conventions * Convert legacy-tempest-dsvm-neutron-src-oslo.versionedobjects job * Drop legacy cold migrate allocation compat code * Add debug logs for when provider inventory changes * Log the operation when updating generation in ProviderTree * api-ref: 'os-hypervisors' doesn't reflect overcommit ratio * Document each libvirt.sysinfo\_serial choice * Use tempfile for powervm config drive * Remove the CachingScheduler * Ensure attachment cleanup on failure in driver.pre\_live\_migration * Use assertRegex instead of assertRegexpMatches * Remove the extensions framework from wsgi.py * Remove more code related to extensions and testing * Remove the caching the resource on Request object * Fix block\_device\_mapping\_v2 mention in server create API reference * Fix typo in libvirt.hw\_machine\_type help * Bump os-brick version to 2.6.1 * Ignore uuid if already set in ComputeNode.update\_from\_virt\_driver * Fix formatting non-templated cell URLs with no config * Use unique consumer\_id when doing online data migration * Add recreate test for bug 1798163 * Handle online\_data\_migrations exceptions * Remove duplicate legacy-tempest-dsvm-multinode-full job * Handle volume API failure in \_post\_live\_migration * Move live\_migration.pre.start to the start of the method * Add some more docs for upgrade checkers * Don't persist RequestSpec.requested\_destination * Add microversion 2.67 to rest api version history * Deprecate the nova-xvpvncproxy service * Deprecate the nova-console service * doc: Add minimal documentation for MKS consoles * doc: Add minimal documentation for RDP consoles * doc: Rewrite the console doc * doc: update metadata service doc * Migrate nova v2.0 legacy job to zuulv3 * Fix deprecated base64.decodestring warning * Fix NoneType error in \_notify\_volume\_usage\_detach * Zuul: Update barbican experimental job * Increment versioning with pbr instruction * Add regression test for bug 1797580 * Use tempest-pg-full * Add microversion 2.67 to support volume\_type * Add compute API validation for when a volume\_type is requested * Add compute version 36 to support \`\`volume\_type\`\` * Use nova-consoleauth only if workaround enabled * fix "you" typo * Skip \_remove\_deleted\_instances\_allocations if compute is new * Replace openSUSE experimental check with newer version * Transform volume.usage notification * api-ref: Replace non UUID string with UUID * Remove useless TODO section * api-ref: Remove a description in servers-actions.inc * Make ResourceTracker.tracked\_instances a set * Properly track local root disk usage during moves * Add regression test for bug 1796737 * Fix missing import in test\_compute\_mgr * Move test.nested to utils.nested\_contexts * conf: Deprecated 'config\_drive\_format' * Fix nits in choices documentation * Remove an unnecessary duplicate flag * Not set instance to ERROR if set\_admin\_password failed * De-dupe subnet IDs when calling neutron /subnets API * Handle missing marker during online data migration * Run ServerMovingTests with nested resources * Refactor allocation checking in functional tests * Use provider tree in virt FakeDriver * Enable nested allocation candidates in scheduler * consumer gen: support claim\_resources * api-ref: Move the evacuate action to admin action * Add scatter-gather-single-cell utility * Handle IndexError in \_populate\_neutron\_binding\_profile * Fix logging parameter in \_populate\_pci\_mac\_address * Skip test\_parallel\_evacuate\_with\_server\_group until fixed * doc: fix and clarify --block-device usage in user docs * Placement: Remove usage of get\_legacy\_facade() * conf: Convert 'live\_migration\_inbound\_addr' to HostAddressOpt * conf: Gather 'live\_migration\_scheme', 'live\_migration\_inbound\_addr' * VMware: Live migration of instances * Remove redundant irrelevant-files from neutron-tempest-linuxbridge * Add hide server address tests in test\_serversV21.py * Fix neutron-tempest-linuxbridge irrelevant-files * Raise error on timeout in wait\_for\_versioned\_notifications * Replace usage of get\_legacy\_facade() with get\_engine() * Add volume\_type field to BlockDeviceMapping object * Remove unnecessary redirect * Update doc * Fix stacktraces with redis caching backend * remove commented-out code * Use INFO for logging no allocation candidates * Don't emit warning when ironic properties are zero * Null out instance.availability\_zone on shelve offload * Follow up for Ie991d4b53e9bb5e7ec26da99219178ab7695abf6 * Follow up for Iba230201803ef3d33bccaaf83eb10453eea43f20 * Follow up for Ib6f95c22ffd3ea235b60db4da32094d49c2efa2a * nova-manage - fix online\_data\_migrations counts * Add attach kwarg to base/nova-net allocate\_for\_instance methods * consumer gen: more tests for delete allocation cases * Pick next minimum libvirt / QEMU versions for "T" release * Enforce case-sensitive hostnames in aggregate host add * Revert "Make host\_aggregate\_map dictionary case-insensitive" * api-ref: add 'migrations' param to GET /os-migrations * Option "scheduler\_default\_filters" is deprecated * consumer gen: move\_allocations * doc:update virtual gpu doc * Consumer gen: remove\_provider\_from\_instance\_allocation * Consumer gen support for put allocations * Consumer gen support for delete instance allocations * api-ref: Fix wrong bold decoration * placement: Always reset conf.CONF when starting the wsgi app * Set defult value of num\_nvme\_discover\_tries=5 * Rename "polling\_changes-since\_parameter.rst" * Imported Translations from Zanata * Ignore VirtDriverNotReady in \_sync\_power\_states periodic task * nova-status - don't count deleted compute\_nodes * libvirt: fix disk\_bus handling for root disk * Remove deprecated nova-consoleauth reference from doc * Imported Translations from Zanata * Add get\_by\_cell\_and\_project() method to InstanceMappingList * Making instance/migration listing skipping down cells configurable * ironic: stop hammering ironic API in power sync loop * Nix update\_instance\_allocation, \_allocate\_for\_instance * Filter deleted computes from get\_all\_by\_uuids() * Fix missing specifying doctrees directory * libvirt: Drop MIN\_LIBVIRT\_PF\_WITH\_NO\_VFS\_CAP\_VERSION * Remove an unnecessary comment * Mention SR-IOV cold migration limitation in admin docs * Add contributor guide for upgrade status checks * libvirt: mdevs returning parent and vendor PCI info * Remove deprecated hide\_server\_address\_states option * Resource retrieving: add changes-before filter * cells: Be explicit in docs about service restarts * doc trivial: additional info to admin-password-injection * Add missing backticks in nova-manage docs * Fix some typos in nova api ref doc * Transform libvirt.error notification * Remove mox in test\_compute\_api.py (4) * Remove mox in libvirt/test\_driver.py (6) * Refactor NeutronFixture * libvirt: Use 'virt' as the default machine type for ARMv7 * add caching to \_build\_regex\_range * Allow ability for non admin users to use all filters on server list * Rename changes-since test sample file * remove virt driver requires\_allocation\_refresh * Fix docs and add functional test for AggregateMultiTenancyIsolation * Noop CantStartEngineError in targets\_cell if API DB not configured * Fix mock.patch usage in unit tests * Fix evacuate logging * conf: Use new-style choice values * Follow devstack-plugin-ceph job rename * Fix resource tracker updates during instance evacuation * Cleanup zuul.yaml * add python 3.6 unit test job * switch documentation job to new PTI * import zuul job settings from project-config * fix a spelling error * Update docs for live\_migration\_progress\_timeout option * Add an example to add more pci devices in nova.conf * Fix formatting in changes-since guide * Do not dump all instances in the scheduler * Use six.string\_types to improve python2/3 compatibility * doc: update info for hypervisors * fup: Fix import order and test nit * Remove redundant image GET call in \_do\_rebuild\_instance * Configure placement DB context manager for nova-manage/status * Use uuidsentinel from oslo.utils * Fix DB archiver AttributeError due to wrong table name attribute used * Fix nova-status "\_check\_resource\_providers" check * Fix TypeError in nova-manage cell\_v2 list\_cells * Document unset/reset wrinkle for \*\_allocation\_ratio options * Docs: update link for remote debugging * Removing pip-missing-reqs from default tox jobs * Fix a failure to format config sample * Other host allocs may appear in gafpt during evac * Move conductor wait\_until\_ready() delay before manager init * Don't persist zero allocation ratios in ResourceTracker * hardware: fix memory check usage for small/large pages * Fix nits: Compute: Handle reshaped provider trees * Fix reshaper report client functonal test nits * Document differences and similaries between extra specs and hints * Combine error handling blocks in \_do\_build\_and\_run\_instance * Time how long select\_destinations() takes in conductor * Add encrypted volume support to feature matrix docs * Remove old check\_attach version check in API * Delete instance\_group\_member records from API DB during archive * Add functional test for live migrate with anti-affinity group * Revert "libvirt: add method to configure migration speed" * (Re)start caching scheduler after starting computes in tests * Restart scheduler in TestNovaManagePlacementHealAllocations * [placement] Make \_ensure\_aggregate context not independent * Send soft\_delete from context manager * Transform missing delete notifications * doc: add info how to troubleshoot vmware specific problems * Fix a broken conf file description in networking doc * Mention (unused) RP generation in POST /allocs/{c} * Fail heal\_allocations if placement is borked * reshaper gabbit: Nix comments re doubled max\_unit * Do test\_reshape with an actual startup * Compute: Handle reshaped provider trees * Revert "Don't use '\_TransactionContextManager.\_async'" * Don't use '\_TransactionContextManager.\_async' * libvirt: skip setting rx/tx queue sizes for not virto interfaces * Make monkey patch work in uWSGI mode * privsep: Handle ENOENT when checking for direct IO support * [placement] split gigantor SQL query, add logging * Optimize global marker re-lookup in multi\_cell\_list * Record cell success/failure/timeout in CrossCellLister * Make instance\_list perform per-cell batching * Update volume-attachment API url in policies * Fix race condition in reshaper handler * Make scheduler.utils.setup\_instance\_group query all cells * Deprecate Core/Ram/DiskFilter * Document no content on POST /reshaper 204 * api-ref: add a warning about calling swap volume directly * api-ref: fix volume attachment update policy note * Report client: update\_from\_provider\_tree w/reshape * Report client: \_reshape helper, placement min bump * Report client: get\_allocations\_for\_provider\_tree * Report client: Real get\_allocs\_for\_consumer * List instances from all cells explicitly * Batch results per cell when doing cross-cell listing * doc: Note NUMA topology requirements for numa-aware-vswitches * api: Remove unnecessary default parameter * hyperv: Cleans up live migration Planned VM * Correct the release notes related to nova-consoleauth * tests: Create functional libvirt test base class * Fix create\_resource\_provider docstring * tests: Move mocking to setUp * Remove noisy DEBUG log * Make get\_allocations\_for\_resource\_provider raise * reshaper: Look up provider if not in inventories * [placement] Add functional test to verify presence of policy * Normalize dashless 'resource provider create' uuid * [placement] Add /reshaper handler for POST * Clarify which context is used by do\_query() * Make RecordWrapper record RequestContext and expose cell\_uuid * Stash the cell uuid on the context when targeting * Make CELL\_TIMEOUT a constant * [placement] Regex consts for placement schema * Wait for network-vif-plugged on resize revert * libvirt: Always escape IPv6 addresses when used in migration URI * Move str to six.string\_types * libvirt: Don't react to VIR\_DOMAIN\_EVENT\_SUSPENDED\_MIGRATED events * Set policy\_opt defaults in placement deploy unit test * Explicitly fail if trying to attach SR-IOV port * Filter out instances without a host when populating AZ * Set policy\_opt defaults in placement gabbi fixture * Fix soft deleting vm fails after "nova resize" vm * Use placement microversion 1.26 in update\_from\_provider\_tree * Remove ChanceScheduler * Doc: PowerVM does support shelve * comment correction for libvirt multiattach * Remove the deprecated API extensions policies * Update contributor guide for Stein * Add zvm CI information * Add zvm admin intro and hypervisor information * Update api-guide and api-ref to be clear about forced-down * Making consistent used of GiB and MiB in API ref * placement: use single-shot INSERT/DELETE agg * Add trait query to placement perf check * Add explanatory prefix to post\_test\_perf output * Py3 fix in fake image service * use static pages for mitaka and newton release notes * Revisons on notifications doc * VMware: add missing os types in vSphere sdk 6.5 * Ironic: report 0 for vcpus/memory\_mb/disk\_gb resources * Remove blacklisted py3 xen tests * Add placement perf info gathering hook to end of nova-next * Fix service list for disabled compute using MC driver * Delete instance\_id\_mappings record in instance\_destroy * Add functional test for affinity with multiple cells * [placement] api-ref: Add missing aggregates example * Remove mox in libvirt/test\_driver.py (5) * add zvm into support matrix * Trivial fix to remove extra 'will' on microversion doc * Imported Translations from Zanata * Handle unicode characters in migration params * placement: use simple code paths when possible * Test case for multiple forbidden traits * Adds a test for \_get\_provider\_ids\_matching() * Make Xen code py3-compatible * Revert "libvirt: slow live-migration to ensure network is ready" * improve migration script * placement: ignore policy scope check failures if not enforcing scope * api-ref: fix GET /flavors?is\_public description * Update reno for stable/rocky * Remove patching the mock lib * block\_device: Rollback volumes to in-use on DeviceDetachFailed * Quota details for key\_pair "in\_use" is 0 * Add additional info to resource provider aggregates update API 18.0.0.0rc1 ----------- * Handle binding\_failed vif plug errors on compute restart * Fix image-defined numa claims during evacuate * Add a prelude release note for the 18.0.0 Rocky GA * Nix 'new in 1.19' from 1.19 sections for rp aggs * libvirt: Use os.stat and os.path.getsize for RAW disk inspection * Trivial fix on migration doc * [placement] api-ref: add description for 1.29 * Update the parameter explain when updating a volume attachment * Update ssh configuration doc * Update nova network info when doing rebuild for evacuate operation * Docs: Add guide to migrate instance with snapshot * Update compute rpc version alias for rocky * Add the guideline to write API reference * get provider IDs once when building summaries * Remove Neutron MetaAPIProxy from cellsv2-layout * [placement] Avoid rp.get\_by\_uuid in allocation\_candidates * Fix host validity check for live-migration * libvirt: Reduce calls to qemu-img during update\_available\_resource * Refactor cell\_type in compute/api.py * Add explicit functional-py36 tox target * xx\_instance\_type\_id in list\_migrations should be integer * Fix bad links for admin-guide * api-ref: Add descriptions for rebuild * Add microversion info in the os-server-groups API samples * Update really old comments about vmware hosts managing multiple nodes * doc: mark the max microversion for rocky * Fix resize revert to use non-legacy alloc handling * api-ref: fix min\_version for parent\_provider\_uuid in responses * [placement] Add version directives in the history doc * Use common functions in granular fixture * Fix none-ascii char in doc * Update resources once in update\_available\_resource * Define irrelevant-files for tempest-full-py3 job * Add tempest-slow job to run the tempest slow tests * Not use project table for user table * Adds a test for getting allocations API * Update RequestSpec.flavor on resize\_revert * Use CONF.long\_rpc\_timeout in post\_live\_migration\_at\_destination * Optimize AZ lookup during schedule\_and\_build\_instances * [placement] ensure\_rc\_cache only at start of process * Remove unused flavor\_delete\_info() method * Reno for notification-transformation-rocky * Deprecate upgrade\_levels options for deprecated/removed services * [placement] Move resource\_class\_cache into placement hierarchy * [placement] Debug log per granular request group * Fix nits in resource\_provider.py * Remove unused request API sample template * Grease some more tests hitting RetryDecorator * Scrub hw:cpu\_model from API samples * Grease test\_try\_deallocate\_network\_retry\_direct * libvirt: guest: introduce blockStats instead of domain.blockStats * Improve NeutronFixture and remove unncessary stubbing * Remove unused stubbing function from test * doc: fix resize user guide link * tox: Ensure reused envdirs share the same deps * Fix a typo in comment in resource\_provider.py * Refactor AllocationFixture in placement test * Increase max\_unit in placement test fixture * Use common functions in NonSharedStorageFixture * Hook resource\_tracker to remove stale node information * Make ResourceTracker.stats node-specific * Add recreate test for RT.stats bug 1784705 * Reload oslo\_context after calling monkey\_patch() * Fix comments in \_anchors\_for\_sharing\_providers and related test * Ensure the order of AllocationRequestResources * Don't overwrite greenthread-local context in host manager * libvirt: Remove usage of migrateToURI{2} APIs * Remove unnecessary PlacementFixture setups * Don't poison Host.\_init\_events if it's already mocked * Remove redundant join in \_anchors\_for\_sharing\_providers * [placement] Retry allocation writes server side * [placement] api-ref: add traits parameter * Retry decorator fix for instances which go into ERROR state during bulk delete * Fix formatting for vcpu\_pin\_set and reserved\_huge\_pages * Updated AggregateImagePropertiesIsolation filter illustration * [placement] Use a simplified WarningsFixture * [placement] Use a non-nova log capture fixture * [placement] Use oslotest CaptureOutput fixture * [placement] Use own set\_middleware\_defaults * Extract \_update\_to\_placement method in resource tracker * Set default of oslo.privsep.daemon logging to INFO level * Remove superfluous network stubbing in func test * Add additional functional tests for NUMA networks * Add description for placement 1.26 18.0.0.0b3 ---------- * Add functional test for forced live migration rollback allocs * Assorted cleanups from numa-aware-vswitches series * libvirt: Revert non-reporting DISK\_GB if sharing * Pass source vifs to driver.cleanup in \_post\_live\_migration * Fix create\_all() to replace\_all() in comments * compute node local\_gb\_used include swap disks * Use source vifs when unplugging on source during post live migrate * Fix all invalid obj\_make\_compatible test case * Change deprecated policies to policy * api-ref: document user\_data length restriction * Fix accumulated nits from port binding for live migration series * [placement] Use base test in placement functional tests * Fix signature of \_FakeImageService.download * [placement] Extract base functional test case from test\_direct * Use vif.vif\_name in \_set\_config\_VIFGeneric * doc: add missing permission for the vCenter service account * Hyper-V + OVS: plug vifs before starting VMs * Use placement context in placement functional tests * ironic: Report resources as reserved when needed * doc: remove rocky-specific nova-scheduler min placement version * scheduler: Start utilizing RequestSpec.network\_metadata * Consider network NUMA affinity for move operations * Add nova-manage placement sync\_aggregates * Add functional tests for numa-aware-vswitches * libvirt: Start populating NUMACell.network\_metadata field * conf: Add '[neutron] physnets' and related options * tox: Silence psycopg2 warnings * FakeLibvirtFixture: mock get\_fs\_info * Add method to get cpu traits * Blacklist greenlet 0.4.14 * Enhance doc to guide user to use nova user * doc: link to AZ talk from the Rocky summit * doc: link to CERN summit video about upgrading from cells v1 to v2 * Update queued-for-delete from the ComputeAPI during deletion/restoration * Online data migration for queued\_for\_delete flag * ironic: add instance\_uuid before any other spawn activity * Use consumer generation in \_heal\_allocations\_for\_instance * Cache is\_bfv check in ResourceTracker * Add shelve/unshelve wrinkle to volume-backed disk func test * Fix wonky reqspec handling in conductor.unshelve\_instance * Heal RequestSpec.is\_bfv for legacy instances during moves * Report 0 root\_gb in resource tracker if instance is bfv * Docs: Add Placement to Nova system architecture * libvirt: Remove reference to transient domain when detaching devices * Add queued\_for\_delete field to InstanceMapping object * Rename auth\_uri to www\_authenticate\_uri * Func test for improper cn local DISK\_GB reporting * perform reshaper operations in single transaction * docs: add nova host-evacuate command to evacuate documentation * compute: Ensure pre-migrating instances are destroyed during init\_host * In Python3.7 async is a keyword [1] * Check provider generation and retry on conflict * Fix missing print format error * Remove stevedore extensions server\_create method * Update RequestSpec.instance\_uuid during scheduling * Add regression test for bug 1781710 * Skip test\_resize\_server\_revert\_with\_volume\_attached in nova-lvm * Disable limits if force\_hosts or force\_nodes is set * conductor: use port binding extended API in during live migrate * Port binding based on events during live migration * Annotate flows and handle PortBindingDeletionFailed in ComputeManager * Implement migrate\_instance\_start method for neutron * libvirt: use dest host vif migrate details for live migration * libvirt: use dest host port bindings during pre\_live\_migration * libvirt: factor out pre\_live\_migration plug\_vifs call * Add VIFMigrateData.get\_dest\_vif * Add VIFMigrateData object for live migration * [placement] disallow additional fields in allocations * Fix ServerMigrationSampleJsonTests to use sample files from version dir * Remove "DEPRECATED" tag from Obsolete APIs * Remove support for /os-floating-ip-dns REST API * Remove support for /os-floating-ips-bulk REST API * Avoid requesting DISK\_GB allocation for root\_gb on BFV instances * [placement] cover bad content-length header * [placement] Add gabbi coverage for inv of missing rp * [placement] Add gabbi coverage for an inventory change * clarify usage of upgrade\_levels group * Fix confusing log message in scheduler * libvirt: remove unused attribute driver for LibvirtConfigNodeDevice * Fix the incorrect description and sample * Transform metrics.update notification * update tox venv env to install all requirements * Fix "XLibvirt KVM (ppc64)" typo in feature support matrix docs * Call generate\_image\_url only for legacy notification * Add unshelve instance error info to fault table * Address nit in 79dac41fee178dabb547f4d7bc10609630767131 * Escalate UUID validation warning to error in test * Fix a newly introduced UUID warning in the unit test * Move legacy-tempest-dsvm-nova-os-vif in repo * API: add support to abort queued live migration in microversion 2.65 * Fix ServerMigrationSampleJsonTestsV2\_24 to use its own sample file * Compute: add support to abort queued live migration * Use ThreadPoolExecutor for max\_concurrent\_live\_migrations * Update HostState.instances during \_consume\_selected\_host * Replace support matrix ext with common library * Add UUID validation for consumer\_uuid * Address nits in server group policy series * Adjust log style and remove ocata support * z/VM Driver: add get console output * z/VM Driver: add power actions * z/VM Driver: add snapshot function * z/VM Driver: Spawn and destroy function of z/VM driver * z/VM Driver: Initial change set of z/VM driver * Transform instance.live\_migration\_force\_complete notification * Transform aggregate.update\_prop notification * Add note about reschedules and num\_attempts in filter\_properties * Add another up-call to the cells v2 caveats list * Stop using HostAPI.service\_delete * Handle HostMappingNotFound when deleting a compute service * Skip more rebuild tests for cells v1 job * Refactor \_heal\_instances\_in\_cell * Heal allocations with incomplete consumer information * fix cellv2 delete\_host * Imported Translations from Zanata * ironic: Log an error when API version is not available * Microversion 2.64 - Use new format policy in server group * virt/ironic: Implement rescue and unrescue * ironic: provide facilities to gracefully navigate versions * do not assume 1 consumer in AllocList.delete\_all() * Update process doc to be more generic about point of contact * Follow up for Ie49d605c66062d2548241d7e04f5a2a6b98c011e * Mention osc-placement for managing traits in docs * Handle rebuild of instances with image traits * Complete the api-ref of security group rule * Adapt \_validate\_instance\_group\_policy to new policy model * Change the ServerGroupAntiAffinityFilter to adapt to new policy * Add policy field to ServerGroup notification object * Add policy to InstanceGroup object * Add nova-status upgrade check for request spec migrations * Add placement.concurrent\_udpate to generation pre-checks * Delete orphan compute nodes before updating resources * Test for unsanitized consumer UUID * Revert "docs: Disable smartquotes" * [placement] add error.code on a ConcurrentUpdateDetected * Fix TypeError in prep\_resize allocation cleanup * Use hard coded values in schema than reference * Update some placement docs to reflect modern times * Remove unused variable in migration * Address nits from consumer generation * update project/user for consumer in allocation * Use nova.db.api directly * Update root providers in same tree * hardware: fix hugepages memory usage per intances * Add queued for delete to instance\_mappings table * Remove duplicate parameter in API sample documents * placement: delete auto-created consumers on fail * delete consumers which no longer have allocations * make incomplete\_consumer\_project\_id a valid UUID * Refactor policies to policy in InstanceGroup DB model * Add rules column to instance\_group\_policy table * objects: Add RequestSpec.network\_metadata * api-ref: Example verification for servers.inc * hardware: Start accounting for networks in NUMA fitting * objects: Add NUMATopologyLimits.network\_metadata * Transform instance.rebuild\_scheduled notification * Remove irrelevant comment * Avoid joins in \_server\_group\_count\_members\_by\_user * Fix server\_group\_members quota check * Add functional regressions tests for server\_group\_members OverQuota * Handle compare in test\_pre\_live\_migration\_volume\_backed\* directly * Resource\_provider API handler does not return specific error codes * Remove mox in unit/network/test\_neutronv2.py (2) * Add documentation for emulator threads policy * Fix whitespace damage * Use valid UUID in the placement gabbits * Transform instance.live\_migration\_post notification * Transform instance.live\_migration\_rollback\_dest notification * Update install guide for placement database configuration * move lookup of provider from \_new\_allocations() * Time how long pre\_live\_migration() takes * Add action initiator attribute to the instance payload * Default embedded instance.flavor.disabled attribute * objects: Add NUMACell.network\_metadata * network: Retrieve tunneled status in '\_get\_physnet\_info' * network: Always retrieve network information if available * Stop setting glance\_api\_version in cinder.conf in nova-live-migration * Wait for vif plugging during live migration job * cover migration cases with functional tests * Fix unbound local when saving an unchanged RequestSpec * Prevent updating an RP's parent to form a loop * Handle nested serialized json entries in assertJsonEqual * libvirt: add qemu version check when configuring mtu for network * conf: Resolve Sphinx errors * Remove unnecessary execute permissions of a file * Convert 'placement\_api\_docs' into a Sphinx extension * Regression test for bug 1779635 * Regression test for bug 1779818 * Update admin/flavors document * Fix missing versioned notification examples * [doc] enhance admin/configuration/api.rst * Use 'version2' when syncing placement db * [placement] fix allocation handler docstring typo * Fix placement incompatible with webob 1.7 * manage: Remove dead code * Define common variables for irrelevant-files * Fix nits in placement-return-all-resources series * Add microversion for nested allocation candidate * libvirt: Fix the rescue race for vGPU instances * More config drive docs updates * Remove file injection from config drive sample docs * Use ironic-tempest-dsvm-ipa-wholedisk-bios-agent\_ipmitool-tinyipa in tree * Mention PowerVM support of config drive * tox: Reuse envdirs * Update xenapi\_disable\_agent config option usage in docs * conf: Correct documentation for '[pci] passthrough\_whitelist' * tox: Document and dedupe mostly everything * trivial: Remove 'tools/releasenotes\_tox.sh' * Add regression test for bug #1764883 * Remove mox in sec group test and functional tests * Use nova.test.TestingException * libvirt: Add missing encryption\_secret\_uuid tests for pre\_live\_migration * Mention server status in api-ref when rebuild * Remove mox in unit/network/test\_neutronv2.py (1) * Make nova-lvm run in check on libvirt changes and compute API tests * Allow templated cell\_mapping URLs * Remove remaining legacy DB API instance\_group\* methods * Remove unused DB API instance\_group\_member\* methods * Remove unused DB API instance\_group\_delete method * Remove compatibility code for instance groups * [placement] demonstrate part of bug 1778591 with a gabbi test * Handle CannotDeleteParentResourceProvider to 409 Conflict * Fix unit test modifying global state * [placement] Fix capacity tracking in POST /allocations * Update scheduler to use image-traits * [placement] Add test demonstrating bug 1778743 * conf: libvirt: Make \`/dev/urandom\` the default for 'rng\_dev\_path' * Skip ServerShowV247Test.test\_update\_rebuild\_list\_server in nova-cells-v1 job * libvirt: Drop MIN\_LIBVIRT\_VHOSTUSER\_MQ * Fix CLI docs for nova-manage api\_db commands * Update API reference for os-floating-ip-pools * Fix API reference for os-floating-ip-dns * Fix API reference for os-floating-ips-bulk * Remove support for /os-fixed-ips REST API * Fix the duplicated config options of api\_database and placement\_database * network: Rename 'create\_pci\_requests\_for\_sriov\_ports' * network: Rename '\_get\_phynet\_info' * Make nova list and migration-list ignore down cells * Add instance.unlock notification * [placement] Demonstrate bug in consumer generation handling * Delete port bindings in setup\_networks\_on\_host if teardown=True * Add "activate\_port\_binding" neutron API method * Add "delete\_port\_binding" network API method * Add "bind\_ports\_to\_host" neutron API method * Test alloc\_cands with indirectly sharing RPs * Switch to oslo\_messaging.ConfFixture.transport\_url * network: Unchain '\_get\_phynet\_info' from '\_get\_port\_vnic\_info' * Adapter raise\_exc=False by default * Bump keystoneauth1 minimum to 3.9.0 * conf: Deprecate 'network\_manager' * Fix bug to filter\_scheduler * Fix bug to api-ref * [placement] Extract create\_allocation\_list * libvirt: Log breadcrumb for known encryption bug * Remove mox in test\_conductor.py (2) * Remove mox in test\_conductor.py (1) * api-ref: Fix parameters about trusted certificate IDs * Remove mox in nova/tests/unit/virt/xenapi/stubs.py * Fix nits from change Ia7cf4414feb335b3c2e863b4c8b4ff559b275c34 * Implement discard for file backed memory * Fix nits from change I676291ec0faa1dea0bd5050ef8e3426d171de4c6 * placement: s/None/null/ in consumer conflict msg * objects: Remove legacy '\_to\_dict' functions * objects: Remove NUMATopologyLimits.obj\_from\_db\_obj * Cleanup nits in placement database changes * Add instance.lock notification * fix PowerVM get\_bootdisk\_path docstring * Implement file backed memory for instances in libvirt * Comment proposed ironic fix for removal of ironic driver workaround * Ironic update\_provider\_tree: restore traits override * Fix nits from change Id609789ef6b4a4c745550cde80dd49cabe03869a * Add a microversion for consumer generation support * Be graceful about vif plugging in early ironic driver startup * Mention nova-status upgrade check CLI in upgrade doc * Add information of deprecation nova-network in system-admin.rst * Validate transport\_url in nova-manage cell\_v2 commands * Add check if neutron "binding-extended" extension is available * Wait for network-vif-plugged before starting live migration * Don't heal allocations for deleted servers * Convert ironic virt driver to update\_provider\_tree * Fix regression when listing build\_requests with marker and ip filter * Ensure that os-traits sync is attempted only at start of process * Isolate placement database config * Add full traceback to ExceptionPayload in versioned notifications * Optimize member\_of check for nested providers * Resource tracker: improve resource tracker periodic task * Clarify placement DB schema migration * Fix MigrateData object tests for compat routines * Nix unused raise\_if\_custom\_resource\_class\_pre\_v1\_1 * Skip ServerShowV263Test.test\_show\_update\_rebuild\_list\_server for cellsv1 * Simplify instance name generation * ironic: bugfix: ensure a host is set for volume connectors * Revert "Re-using the code of os brick cinder" * placement: Make API history doc more consistent * Make host\_aggregate\_map dictionary case-insensitive * Return all nested providers in tree * Add osprofiler config options to generated reference * Fix retrying lower bound in requirements.txt * unquiesce instance after quiesce failure * Add policy rule to block image-backed servers with 0 root disk flavor * Enforce placement minimum in nova.cmd.status * Update the disk\_cachemodes to mention an rbd detail * Add trusted certs to feature support matrix docs * Fix nits from trusted certs notification change * Remove max\_size parameter from fake\_libvirt\_utils.fetch\_\*image methods * Add PLACEMENT\_DB\_ENABLED=True to the nova-next job * Optional separate database for placement API * Add supplementary info for simple\_cell\_setup cmd * Add certificate validation docs * Add troubleshooting item about ignored microversions * Make check\_can\_live\_migrate\_destination use long\_rpc\_timeout * [placement] Add status and links fields to version document at / * Add notification support for trusted\_certs * Fix execute mock for test\_convert\_image\_with\_errors * rework allocation handler \_allocations\_dict() * placement: Allocation.consumer field * Ignore UserWarning for scope checks during test runs * Add trusted\_image\_certificates to REST API * Powervm configuration cleanup * [placement] replace deprecated accept.best\_match * Update nova-status & docs: require placement 1.25 * Remove network info stubbing in functional test * XenAPI: update the document related to vdi streaming * XenAPI: define a new image handler to use vdi streaming * api-ref: expand on various bdm parameters * Add enhanced KVM storage QoS quotas * Plumb trusted\_certs through the compute service * add consumers generation field * Implement certificate\_utils * Provide a direct interface to placement * libvirt: Don't report DISK\_GB if sharing * Remove nova dependencies from test\_resource\_provider * Adjust db using allocation unit tests * Move db using provider unit tests to functional * Update links in README * Remove unnecessary parameters from create volume API * VMware: remove reading resourcePool data * VMware: save VC reads for information that is static * Use oslo.messaging per-call monitoring * Refactor libvirt get\_memory\_used\_mb() * xenapi: drop deprecated vif\_driver config option * placement: always create consumer records * Document the internal online\_migrations function behaviors * libvirt: remove unused get\_ovs\_interfaceid() * doc follow https://review.openstack.org/#/c/572195 * Extract part of PlacementFixture to placement * fix tox python3 overrides * Remove mox in libvirt/test\_driver.py (4) * Remove mox in test\_compute\_api.py (3) 18.0.0.0b2 ---------- * Fix bug to doc:nova-status * Fix the file name of development-environment.rst * Fix issues in nova-show-usage-statistics-for-hosts-instances.rst * Change consecutive build failure limit to a weigher * Do not use nova.test in placement.test\_deploy * Do not use nova.test in placement.test\_microversion * Do not use nova.test in placement.test\_handler * Do not use nova.test in placement.test\_fault\_wrap * Do not use nova.test in placement.test\_requestlog * Do not use nova.test in placement.handlers.test\_aggregate * Do not use nova.test in placement.test\_util * sync\_guest\_time: use the proper errno * Remove support for /os-virtual-interfaces REST API * add mtu to libvirt xml for ethernet and bridge types * Fix doc nit * Ensure resource class cache when listing usages * api-ref: mention that you can't re-parent a resource provider * Transform instance.exists notification * Enhance api-guide general info some updates * Fix some wrong urls in doc * Trivial: let internal use only func has \_ prefix * Fix bug to doc * Re-base placement object unit tests on NoDBTestCase * [placement] Do not import oslo\_service for log\_options * Fix bug for hypervisors * Fix typo in enable\_certificate\_validation config option help * Fix some inconsistencies in doc * Only run placement request filters when Placement will be called * Downgrade overquota warning * Remove unused \_disk\_qcow2\_to\_raw * Add nova-manage placement heal\_allocations CLI * Trim the fat on HostState.instances * Restrict CONF.quota.driver to DB and noop quota drivers * Consider hostdev devices when building metadata * Refactor \_build\_device\_metadata * Fix invalid raise in test\_compute\_mgr * Mention running rootwrap in daemon mode if hitting vif plug timeouts * Match ComputeNode.uuid to ironic node uuid in RT * network: update pci request spec to handle trusted tags * metadata: add vf\_trusted field to device metadata * Skip ServerShowV254Test.test\_rebuild\_server in cells v1 job * libvirt: add vf\_trusted field for network metadata * libvirt: configure trust mode for vfs * mirror nova host aggregate members to placement * Use instance project/user when creating RequestSpec during resize reschedule * add parameter docstring for 'params' to libvirt.guest.Guest.migrate() * Set scope for remaining placement policy rules * Update overriden to overridden * pci: don't consider case when match tags specs * Remove mox in libvirt/test\_driver.py (3) * Adding NVMEoF for libvirt driver * Fix doc mistakes * Remove unused function * Re-using the code of os brick cinder * Fix nits in nested provider allocation candidates(2) * Return all resources in provider\_summaries * placement: Use INNER JOIN for requied traits * Delete duplicate functions in placement test * Use list instead of set for duplicate check * Support nested alloc cands with sharing providers * Fix nits in nested provider allocation candidates * Follow up changes to granular placement policy reviews * Add granular policy rules for allocation candidates * Add granular policy rules for placement allocations * Add granular policy rules for traits in placement * Add granular placement policy rules for aggregates * Add granular policy rules for usages * Change exception type while deattaching root device * libvirt: Deprecate support for monitoring Intel CMT \`perf\` events * Remove mox in tests/unit/api/openstack/compute * PowerVM Driver: vSCSI Fibre Channel volume adapter * Honor availability\_zone hint via placement * Remove the remaining of the removed option * Convert libvirt's RBD storage to using processutils.execute() * libvirt: Skip fetching the virtual size of block devices * Add traits check in nested provider candidates * Return nested providers in get\_by\_request * Expand tests for multiple shared resources case * Pushing image traits to ironic node * Update placement upgrade docs for nova-api dependency on placement * Avoid unnecessary joins in HostManager.\_get\_instances\_by\_host * Placement: allow to set reserved value equal to total for inventory * Update PowerVM hypervisor docs * Update nova-status and docs for required placement 1.24 * Granular requests to get\_allocation\_candidates * libvirt: get\_inventory => update\_provider\_tree * Normalize inventory from update\_provider\_tree * ProviderTree.has\_inventory\_changed for new fields * PowerVM Driver: Localdisk * Expose instance\_get\_all\_uuids\_by\_host() from DB API and use it * Make instance.refresh() avoid recursion better * Make instance able to lazy-load almost everything * Fix interpretation of max\_attempts for scheduling alternates * Update the deprecate os\_region\_name option * libvirt: place emulator threads on CONF.compute.cpu\_shared\_set * Fix inconsistency in docs * Remove mox in libvirt/test\_driver.py (2) * Fakelibvirt migrateToURI3 should provide args according to libvirt doc * Metadata-API fails to retrieve avz for instances created before Pike * PowerVM snapshot cleanup * Add granular policy rules for resource providers inventories * Add granular policy rules for /resource\_classes\* * Implement granular policy rules for placement * Deduplicate config/policy reference docs from main index * Make nova service-list use scatter-gather routine * Fix auth\_url example in hypervisor-hyper-v.rst * Drop API compat handling for old compute error cases * PowerVM Driver: DiskAdapter parent class * Remove deprecated monkey\_patch config options * Debug logs for allocation\_candidates filters * Cleanup ugly stub in TestLocalDeleteAllocations * Deprecate running API services under eventlet * Add retrying to requirements.txt * [placement] default to accept of application/json when \*/\* * We don't need utils.trycmd any more * Move image conversion to privsep * Update auth\_url in install docs * Add INVENTORY\_INUSE to DELETE /rp/{u}/inventories * placement: Fix HTTP error generation * Remove unnecessary 'to\_primitive' call * Remove mox in test\_xenapi.py (3) * Remove mox in tests/unit/api/\*/test\_volumes.py * Remove mox in test\_live\_migrate.py * Remove mox in libvirt/test\_driver.py (1) * Added ability to configure default architecture for ImagePropertiesFilter * \_\_str\_\_ methods for RequestGroup, ResourceRequest * add lower-constraints job * XenAPI: Pass expected return codes to resize2fs * Make scheduler client allow multiple member\_of query parameters * Add contributor docs on deprecating and removing compute REST APIs * Suppress UUID warning in map\_instance unit tests * Don't reschedule on RequestedVRamTooHigh errors * Flexibly test keystonmiddleware in placement stack * Fix HTTP500 error of changes-since on v2.0 API * libvirt: Report the virtual size of RAW disks * Fix irrelevant-files in nova-dsvm-multinode-base * Remove '\_apply\_instance\_name\_template' * Add connection\_parameters to list of items copied from database * XenAPI: deprecate the config for image handler class path * Remove mox in test\_compute\_api.py (2) * api-ref: Fix parameters for os-volume-attachments.inc * Avoid warning log when image not exist * update scheduler to use image-traits * Remove support for /os-fping REST API * Add test\_set\_device\_mtu\_default back in * Move set\_vf\_interface\_vlan to the new utility module * Move create\_tap\_dev to the new utility module * Address feedback from instance\_list smart-cell behavior * trivial: Explain how the marker works for instance-cell mapping * Add random sleep between retry calls to placement * Remove remaning log translation in scheduler * Remove mox in test\_xenapi.py (2) * Make get\_instance\_objects\_sorted() be smart about cells * Add CellMapping.get\_by\_project\_id() query method * Skip ServerActionsTestJSON.test\_rebuild\_server for cells v1 job * [doc] Add soft\_deleted flag * Expose driver\_block\_device fields consistently * Fix detach\_volume calls when rolling back a failed attach * remove IVS plug/unplug as they're moved to separate plugin * Followup for multiple member\_of qparams support * [Doc]Link policies file into api * libvirt: always pass emulator threads policy * compute: introduce cpu\_shared\_set option * Add docs for hw\_video:ram\_max\_mb flavor extra spec * Use .. deprecated:: theme for deprecations * doc: Don't confuse CPU pinning/NUMA as Hyper-V only * Add tests for alloc cands with poor local disk * placement: Granular GET /allocation\_candidates * libvirt: remove old rbd snapshot removal error handling * libvirt: check image type before removing snapshots in \_cleanup\_resize * Remove unused methods in nova/compute/utils.py * Remove mox in test\_xenapi.py (1) * Migrate tempest-dsvm-multinode-live-migration job in-tree * Fix typos in Host aggregates documentation * Remove mox in unit/virt/xenapi/test\_vmops.py * Remove mox in test\_compute\_api.py (1) * Changing scheduler sync event from INFO to DEBUG * placement: Object changes for granular * Use helpers in test\_resource\_provider (func) * Use test\_base symbols directly * Base test module/class for functional placement db * Fix being able to hard reboot a pausing instance * Handle @safe\_connect returns None side effect in \_ensure\_resource\_provider * Deprecate the nova-consoleauth service * Update layout docs for running console proxies * Convert websocketproxy to use db for token validation * Remove [scheduler]/host\_manager config option * doc: Start using openstackdoctheme's extlink extension * support multiple member\_of qparams * doc: Don't use single backticks in man pages * trivial: Fix file permissions * [doc]remove nova-cert leftover in doc * Add multi-cell negative test for cold migration with target host * Fix the request context in ServiceFixture * Get anchors for sharing providers * Remove IronicHostManager and baremetal scheduling options * libvirt: Drop MIN\_LIBVIRT\_REALTIME\_VERSION * libvirt: Drop MIN\_QEMU\_POSTCOPY\_VERSION * libvirt: Drop BAD\_LIBVIRT\_CPU\_POLICY\_VERSIONS * Convert configdrive to use processutils * Make association\_refresh configurable * Convert certificate generation to processutils * Convert xenapi's xvp console to processutils * Convert fping API to processutils.execute() * Replace Chinese punctuation with English punctuation * libvirt: fix setting tx\_queue\_size when rx\_queue\_size is not set * Remove stale pip-missing-reqs tox test * Fix shelving a paused instance * libvirt: Lift the restriction of choices for \`cpu\_model\_extra\_flags\` * libvirt: Make \`cpu\_model\_extra\_flags\` case-insensitive for real * Add user\_id to RequestSpec * Remove ExactCoreFilter ExactDiskFilter ExactRamFilter * libvirt: Fix misleading debug msg "Instance is running" * libvirt: Drop BAD\_LIBVIRT\_NUMA\_VERSIONS * Handle PortNotFoundClient exception when getting ports * libvirt: Drop MIN\_LIBVIRT\_NUMA\_VERSION\_PPC * libvirt: Drop MIN\_LIBVIRT\_BLOCK\_LM\_WITH\_VOLUMES\_VERSION * log stale allocations as WARNING instead of DEBUG * Make host\_manager use scatter-gather and ignore down cells * Make service all-cells min version helper use scatter-gather * Simplify logic in get\_enforcer * Fix tox -e docs * placement: resource requests for nested providers * Add host/hostId to instance action events API * Simplify BDM boot index checking * Remove explicit instance.info\_cache.delete() * Handle deprecation of inspect.getargspec * ServerActionsSampleJsonTest refactor * Fix dropped check for boot\_index 0 in \_validate\_bdm * PowerVM Driver: Snapshot * libvirt: fix hard reboot issue with mdevs * Bump pypowervm minimum to 1.1.15 * Make accept-language tests work with webob 1.8.x * Fix invalid UUIDs in test * Functional test: cold migrate to compute down * Use os.rename, not mv * Proxy is\_volume through DriverBlockDevice * Use ConsoleAuthToken object to generate authorizations * Address issues raised in adding member\_of to GET /a-c * docs: link to volume multi-attach demo recording * api-ref: mark block\_device\_mapping\_v2.boot\_index as required * doc: add note about xenapi aggregate upcall being resolved * Remove vestigial system\_metadata param from info\_from\_instance() * Drop MIN\_LIBVIRT\_SET\_ADMIN\_PASSWD * libvirt: Bump MIN\_{LIBVIRT,QEMU}\_VERSION for "Rocky" * libvirt: add support for virtio-net rx/tx queue sizes * libvirt: fix wrong driver name for vhostuser interface * libvirt: Add a debug log entry before / after invoking migrate() * xenapi: Documents update for XAPI pool shared SR migration * Remove deprecated [placement] opts * Fix link in placement contributor doc 18.0.0.0b1 ---------- * Add \`hide\_hypervisor\_id\` flavor extra\_spec * Mention that users need noVNC >= 0.6 * xenapi: handle InstanceNotFound in detach\_interface() * fix a typo * Update docs for [keystone\_authtoken] changes since Queens * Move some tests into nova.tests.unit.notifications.objects.test\_instance * Leave a hint when populate\_schema fails * Add request\_id to instance action notifications * Add root and parent provider uuid to group by clause * Improve check capacity sql * Rename recreate to evacuate in driver signatures * Deduplicate notification samples Rocky - 7 * Add periodic task to clean expired console tokens * xenapi: Use XAPI pool instead of aggregate pool for shared SR migration * Remove mox in unit/api/openstack/compute/test\_hosts.py * Cleanup RP and HM records while deleting a compute service * Delete allocations from API if nova-compute is down * Block deleting compute services which are hosting instances * Add functional test for deleting a compute service * mock utils.execute() in qemu-img unit test * Add CPUWeigher * Fix docs for confirmResize action * Remove placement config check * Parse forbidden in extra\_specs * Deduplicate notification samples Rocky - 6 * Deduplicate notification samples Rocky - 5 * Deduplicate notification samples Rocky - 4 * doc: BFV instances and IsolatedHostsFilter * Remove redundant \_do\_check\_can\_live\_migrate\_destination * Improve performance when list instances with IP filter * Remove mox in test\_serversV21.py (2) * Remove mox in test\_serversV21.py (1) * libvirt: Report the allocated size of preallocated file based disks * Document how to disable notifications * tests for alloc candidates with nested and traits * Add config drive link to api-guide * Move update\_task\_state out of try/except * Fix doc link for api * Address nits in I00d29e9fd80e6b8f7ba3bbd8e82dde9d4cb1522f * Extract generate\_hostid method into utils.py * Record the host info in EventReporter * Deduplicate notification samples Rocky - 3 * Deduplicate notification samples Rocky - 2 * Deduplicate notification samples Rocky - 1 * Provide framework for setting placement error codes * Update os\_compute\_api:os-flavor-extra-specs:index docs for 2.61 * Update os\_compute\_api:os-flavor-extra-specs:index docs for 2.47 * [placement] Support forbidden traits in API * [placement] Filter allocation candidates by forbidden traits in db * [placement] Filter resource providers by forbidden traits in db * [placement] Parse forbidden traits in query strings * doc: cleanup API guide about instance faults * Address nits in Idf57fb5fbc611abb83943bd7e36d3cebf03b3977 * tests: Fix how context managers are mocked * Cleanup patch for the cell-disable series * libvirt: refactor get\_base\_config to accept host arg * libvirt: move version to string in utils * Update link of metadata * Move xenapi partition copies to privsep * Sync xenapi and libvirt on what flags to pass e2fsck * Move xenapi disk resizing to privsep * Use Queens UCA for nova-multiattach job * Skip placement on rebuild in same host * Remove the branch specifier from the nova-multiattach job * Make the nova-multiattach job non-voting temporarily * Give volume DriverBlockDevice classes a common prefix * remove ec2 in service and cmd * Remove mox in test\_neutron\_security\_groups.py * Remove RequestContext.instance\_lock\_checked * Fix race fail in test\_resize\_with\_reschedule\_then\_live\_migrate * Remove :return from update\_provider\_tree docstring * uncap eventlet in nova * xenapi: Support live migration in pooled multi-nodes environment * trivial: fix a comment typo * Add microversion to support extra\_specs in flavor API * Imported Translations from Zanata * Remove mox in tests/unit/test\_utils.py * api-ref: Fix parameter order in rebuild * api-ref: Parameter verification for servers.inc (3/3) * api-ref: Parameter verification for servers.inc (2/3) * Remove mox in test\_virt\_drivers.py * Make ResourceClass.normalize\_name handle sharp S * Test case: ResourceClass.normalize\_name with ß * PowerVM: Add proc\_units\_factor conf option * Update wording in @safe\_connect placement warnings * Expose shutdown retry interval as config setting * Pick next minimum libvirt / QEMU versions for "Stein" * Remove mox in unit/virt/xenapi/test\_vm\_utils.py (3) * Remove mox in unit/virt/xenapi/test\_vm\_utils.py (2) * Remove mox in unit/virt/xenapi/test\_vm\_utils.py (1) * make metadata doc up to date * Update port device\_owner when unshelving * Log a warning and add nova-status check for old API service versions * Avoid dumping stack on BuildAbortException * Fix comments at the 'save' method of objects.Instance * libvirt: Block swapping to an encrypted volume when using QEMU to decrypt * Remove mox in unit/api/\*/test\_server\_metadata.py * Remove mox in unit/api/\*/test\_server\_password.py * Replace mox stubs with stub\_out in test\_extended\_volumes.py * Remove mox in unit/api/\*/test\_instance\_actions.py * Remove mox in test\_user\_data.py * Don't persist RequestSpec.retry * Add regression test for persisted RequestSpec.retry from failed resize * Move test\_report\_client out of placement namespace * Log a more useful error when cinder auth isn't configured * doc: add a link in the install guides about configuring neutron * Cleanup \_get\_request\_spec\_for\_select\_destinations for live migrate * Clarify/correct the ordering of API and Cell database schema updates * Rename network.utils to network.linux\_utils * Update ImageMetaProp object to expose traits * Use a pythonic delete, with a retry * [placement] Fix incorrect exception import * Update the cells FAQs and scheduler maintenance docs * Log a more useful error when neutron isn't configured * Update the Cell filters section of the scheduler docs * update\_provider\_tree devref and docstring updates * libvirt: Allow to specify granular CPU feature flags * Support extending attached ScaleIO volumes * Transform aggregate.update\_metadata notification * Add nova-status check for ironic flavor migration * Add \_\_repr\_\_ for NovaException * Add --enable and --disable options to nova-manage update\_cell * Noauth should also use request\_id from compute\_req\_id.py * Avoid unnecessary port update during live migration * DRY up test\_rollback\_live\_migration\_set\_migration\_status * Default to py3 for the pep8 tox env because it's stricter * Avoid showing password in log * Remove a outdated warning * Move xenapi xenstore\_read's to privsep * Move configurable mkfs to privsep * Request only instance\_uuid in ironic node list * Include only required fields in ironic node cache * network: add command to configure trusted mode for VFs * [placement] api-ref: Fix parameters * [Trivial]Add missing blank space in conf description * Add tests for \_get\_trees\_matching\_all() function * Fix cancel\_all\_events event name parsing * Get rid of 406 paths in report client * Move pypowervm requirement to 1.1.12 * Use an independent transaction for \_trait\_sync * Test case: traits don't sync if first access fails * Expand member\_of functional test cases * Fix member\_of with sharing providers * Add tests for alloc\_cands with member\_of * Fix a missing white space in exception message * Make generation optional in ProviderTree * Fix nits in update\_provider\_tree series * Use update\_provider\_tree from resource tracker * SchedulerReportClient.update\_from\_provider\_tree * Complement tests in allocation candidates * trivial: Fix nits in code comments * [placement] Add test for provider summaries * Fix unit tests to work with new oslo.config * Allow scheduling only to enabled cells (Filter Scheduler) * Teardown networking when rolling back live migration even if shared disk * Remove unnecessary code encoding specification * [placement] Add to contributor docs about handler testing * Add trusted\_certs object * Add trusted\_certs to instance\_extra * Move get\_stashed\_volume\_connector to compute.utils * Documentation for tenant isolation with placement * [placement] Fix bad management of \_TRAITS\_SYNCED flag * Fix N332 api\_version decorator hacking check * Use ksa session for cinder microversion check * vmware: Fixes \_detach\_instance\_volumes method * PowerVM Driver: Network interface attach/detach * Fix issue for pep8 on py3 * Add require\_tenant\_aggregate request filter * Add AggregateList.get\_by\_metadata() query method * Add an index on aggregate\_metadata.value * Make get\_allocation\_candidates() honor aggregate restrictions * Move two more generic network utilities to a move obvious place * Start untangling network utilities * Add aggregates list to Destination object * Add request filter functionality to scheduler * tox: Make everything work with Python 3 * VMware: add log message for VIF info details * Fix spelling mistake of HTTPNotFound exception * tests: fixes mock autospec usage * Use a pythonic delete * Remove duplicative implementation of temporary directories * api-ref: add a note about volume-backed rescue not being supported * Scheduling Optimization: Remove cell0 from the list of candidates * api-ref: Parameter verification for servers.inc (1/3) * Add host to API and Conductor * doc: Upgrade placement first * Fix allocation\_candidates not to ignore shared RPs * remove unnecessary short cut in placement * Fix comments in get\_all\_with\_shared() * Unit test framework: common FakeResponse * tox: Remove unnecessary configuration * tox: Fix indentation * Standardize '\_get\_XXX\_constraint' functions * Updated from global requirements * Fix api-ref: nova image-meta is deprecated from 2.39 * Docs: modernise links * Updated from global requirements * Modify nova-manage cell\_v2 list\_cells to display "disabled" column * Add disabled option to create\_cell command * Move \_make\_instance\_list call outside of DB transaction context * Stop using mox in virt/xenapi/image/test\_vdi\_through\_dev.py * Use microversion parse 0.2.1 * Add the version description for InstanceActionEventList * Updated from global requirements * Add host field to InstanceActionEvent * remove a comment about ec2 * Add functional regression test for bug 1746509 * Always deallocate networking before reschedule if using Neutron * Change compute mgr placement check to region\_name * make PowerVM capabilities explicit * Move placement test cases from db to placement * List instances performace optimization * Add CellMappingList.get\_by\_disabled() query method * libvirt: move vpu\_realtime\_scheduler in designer * libvirt: move get\_numa\_memnode in designer module * Remove translate and a TODO * Add more functional test for placement.usage * deprecate fping\_path config option * Remove useless run\_periodic\_tasks call in ClientRouter * Handle EndpointNotFound when building image\_ref\_url in notifications * Don't log a warning for InstanceNotFound with deleted VIFs * Preserve multiattach flag when refreshing connection\_info * ironic: stop lying to the RT when ironic is down * Clarify log in RT.\_update\_usage\_from\_migration * Add disabled field to CellMapping object * libvirt: handle DiskNotFound during update\_available\_resource * only increment disk address unit for scsi devices * Fix message for unexpected external event * Fix typos in release notes * libvirt: slow live-migration to ensure network is ready * Remove version/date from CLI documentation * Move placement exceptions into the placement package * Report client: Remove version discovery comment * add check before adding cpus to cpuset\_reserved * trivial: omit condition evaluations * remove \_cleanup\_running\_deleted\_instances repeat detach volume * [libvirt] Add \_get\_XXXpin\_cpuset() * [libvirt] Add \_get\_numa\_memnode() * Add disabled column to cell\_mappings table * Add placeholder migrations for Queens backports * Updated from global requirements * Add --by-service to discover\_hosts * api-ref: add a note in DELETE /os-services about deleting computes * conf: Remove 'db\_driver' config opt * Add 'member\_of' param to GET /allocation\_candidates * Follow the new PTI for document build * docs: Disable smartquotes * Updated from global requirements * Stop assuming initial provider generation is 0 * ProviderTree.{add|remove}\_{traits|aggregates} * Unmap compute nodes when deleting host mappings in delete cell operation * Cleanup tempest-dsvm-cells-rc blacklist * Make nova-cells-v1 run with neutron * ironic: Get correct inventory for deployed node * Marker reset option for nova-manage map\_instances * XenAPI/Stops the migration of volume backed VHDS * placement: Return new provider from POST /rps * placement: generation in provider aggregate APIs * Change TestNewtonCellsCheck to not rely on objects * Revert "Refine waiting for vif plug events during \_hard\_reboot" * Revert "Make the InstanceMapping marker UUID-like" * Update contributor/placement.rst to contemporary reality * Updated from global requirements * Make archive\_deleted\_rows handle a missing CONF.api\_database.connection * Transform live\_migration.post.dest notifications * Reparent placement objects to oslo\_versionedobjects * Move resource provider objects into placement hierarchy * Move resource class fields * Updated from global requirements * Fix N358 hacking check * New-style \_set\_inventory\_for\_provider * conf: Fix indentation of database options * conf: Remove deprecated 'allow\_instance\_snapshots' opt * conf: Remove deprecated 'multi\_instance\_display\_name\_template' opt * conf: Remove '[conductor] topic' opt * Update deprecated log-config option in docs * Updated from global requirements * remove unnecessary conf imports * Fix indentation in doc/source/cli/\* * Make nova build reproducible * Raise a proper exception in unit test * Rename '\_numa\_get\_constraints\_XXX' functions * Migrate tempest-dsvm-cells job to an in-tree job definition * Make nova-manage db purge take --all-cells * hardware: Rework get\_number\_of\_serial\_ports * hardware: Rework '\_get\_cpu\_topology\_constraints' * Add --purge helper flag to archive\_deleted\_rows * Re-work the metadata service docs * conf: Remove 'nova.crypto' opts * ca: Remove 'nova/CA' directory * crypto: Remove unused functions * Allow to configure amount of PCIe ports * ironic: Clean up resources after unprovision fails * Update the nova-manage db archive\_deleted\_rows description * Deprecate sparse LVs * Rename the 'recreate' param in rebuild\_instance to 'evacuate' * Add simple db purge command * Run post-test archive against cell1 * XenAPI: XCP2.1+ Swallow VDI\_NOT\_IN\_MAP Exception * conf: Deprecate 'keymap' options * Removed unnecessary parantheses in yield statements * Handle IpAddressAlreadyAllocated exception * Update contributor guide for Rocky * Handle not found error on taking snapshot * Save admin password to sysmeta in libvirt driver * Refactor WSGI apps and utils to limit imports * Transform servergroup.addmember notification * Add more functional test for placement.aggregates * Fix version cap when no nova-compute started * Check for multiattach before removing connections * Updated from global requirements * VMware: fix TypeError while get console log * Make the nova-next job voting and gating * Fix the notification devref location in exception * Updated from global requirements * Updated from global requirements * Pass user context to virt driver when detaching volume * Updated from global requirements * Move db MAX constants to own file * [placement] use simple FaultWrapper * Allow 'network' in RequestContext service\_catalog * Stop using mox in api/openstack/fakes.py * Move makefs to privsep * Convert users of tune2fs to privsep * libvirt: mask InjectionInfo.admin\_pass * Remove unused LOG variables * Clarify wording in listing instance actions for deleted instances * Add check for redundant import aliases * Make \_get\_sharing\_providers more efficient * Update noVNC deployment docs to mention non-US keymap fix in 1.0.0 * Check for leaked server resource allocations in post\_test\_hook * rp: GET /resource\_providers?required= * compute: Cleans up allocations after failed resize * Clarify \`resources\` query param for /r\_p and /a\_c * Handle spawning error on unshelving * Ensure attachment\_id always exists for block device mapping * Avoid exploding if guest refuses to detach a volume * [placement] api-ref: Fix a missing response code * Add functional test for deleting BFV server with old attach flow * Only attempt a rebuild claim for an evacuation to a new host * Detach volumes when deleting a BFV server pre-scheduling * Add functional recreate test of deleting a BFV server pre-scheduling * Clean up ports and volumes when deleting ERROR instance * libvirt: disconnect volume from host during detach * Functional test: evacuate with no compute * Extending delete\_cell --force to delete instance\_mappings * Return 400 when compute host is not found * Fix PatternPropertiesTestCase for py 3.6 * [placement] Add functional tests for traits API * Scheduler multiple workers support * Imported Translations from Zanata * Updated from global requirements * Remove single quotes from posargs on stestr run commands * Clarify update\_provider\_tree docstring * Only pull associated \*sharing\* providers * Fix error handling in compute API for multiattach errors * Trivial: Update help of enabled\_filters * Add a nova-caching-scheduler job to the experimental queue * api-ref: Further clarify placement aggregates * Enable native mode for ScaleIO volumes * trivial: Move \_\_init\_\_ function * Add admin guide doc on volume multiattach support * Detach volumes when VM creation fails * Python 3 fix for sphinx doc * doc: Clarify how to create your own filter * Add functional tests to ensure BDM removal on delete * Store block device mappings in cell0 * Drop extra loop which modifies Cinder volume status * Remove deprecated aggregate DB compatibility * Remove old flavor\_create db api method * Remove old flavor\_get\_all db api method * Remove old flavor\_get db api method * Remove old flavor\_get\_by\_name db api method * Remove old flavor\_get\_by\_flavor\_id db api method * Remove old flavor\_destroy db api method * Remove old flavor\_access\_get\_by\_flavor\_id db api method * Test websocketproxy with TLS in the nova-next job * Updated from global requirements * libvirt: add Linux distribution guest only description for inject\_xxx options * libvirt: remove TODO on validation of scsi model * Avoid inventory DELETE API (no conflict detection) * install-guide: Wrap long console command * install-guide: Make formatting of console consistent * Cleanup the manage-volumes admin doc * Remove warning in feature support matrix page * Use correct arguments in task inits * Remove the deprecated scheduler\_driver\_task\_period option * Clarify the help text for [scheduler]periodic\_task\_interval * Fix and update compute schedulers config guide * Lazy-load instance attributes with read\_deleted=yes * Fix warn api\_class is deprecated, use backend * Drop compute RPC 4.x compatibility * Don't JSON encode instance\_info.traits for ironic * Move the nova-next job in-tree and update it * Use dict.get() when accessing capabilities dict * Fix typo in NUMATopologyFilter docs * [libvirt] Add \_get\_vcpu\_realtime\_scheduler() * [placement] annotate loadapp as public interface * Replace Chinese quotes to English quotes * Fix docs for IsolatedHostsFilter * Handle volume-backed instances in IsolatedHostsFilter * Add regression test for BFV+IsolatedHostsFilter failure * doc: merge numa.rst to cpu-topologies.rst * [placement] Add sending global request ID in get * [placement] Add sending global request ID in put (3) * Ensure resource classes correctly * Provide basic data for AArch64 support matrix/functionality * TrivialFix: Add a space between messages * Fix grammar error * Update reno for stable/queens * Refine waiting for vif plug events during \_hard\_reboot 17.0.0.0rc1 ----------- * doc: mention that --on-shared-storage is not needed with nova evacuate * doc: fix the link for the evacuate cli * Check quota before creating volume snapshots * Add the ability to get absolute limits from Cinder * unquiesce instance on volume snapshot failure * VGPU: Modify the example of vgpu white\_list set * [placement] Move body examples to an isolated directory * Remove MigrationPreCheckClientException * Encode libvirt domain XML in UTF-8 * Clean up reservations in migrate\_task call path * Compute RPC client bump to 5.0 * Bump compute RPC API to version 5.0 * Bindep does not catch missing libpcre3-dev on Ubuntu * Fixed auto-convergence option name in doc * Workaround glanceclient bug when CONF.glance.api\_servers not set * Remove a duplicate colon * Use with method to consistent oslo timeutils usage * Add log for snapshot an instance * TrivialFix: Add a blankline * trivial: Fix microversion number in test comment * Remove unnecessary arguments in notification methods * Remove unnecessary variables * XenAPI: Provide support matrix and doc for VGPU * Make the InstanceMapping marker UUID-like * fix link * Make bdms querying in multi-cell use scatter-gather and ignore down cell * update docstring param description * Add a prelude release note for the 17.0.0 Queens GA * Address comments from I51adbbdf13711e463b4d25c2ffd4a3123cd65675 * Add late server group policy check to rebuild * Add regression test for bug 1735407 * Remove microversion fallback code from report client * Fix wrong link for "Manage Flavors" in CPU topologies doc * Make sure that we have usable input for graphical console * Use check\_string\_length from oslo\_utils * update the description of hypervisor statistics response * fix misspelling of 'projectUser' * Test case: new standard resource class unusable * Clarify CONF.scheduler.max\_attempts * Add release note for Aggregate[Core|Ram|Disk]Filter change * placement doc: Conflict caveat for DELETE APIs * Trivial fix a missleading comment * Provide support matrix and doc for VGPU * doc: update the GPU passthrough HPC feature entry * [placement] Add sending global request ID in put (2) * [placement] Add sending global request ID in put (1) * [placement] Add sending global request ID in post * Update cells v2 layout doc caveats for Queens * Not use thread alloc policy for emulator thread * Refix disk size during live migration with disk over-commit * Zuul: Remove project name * Doc: Nix os-traits link from POST resource\_classes * Only log during pop retry phase * docs: Add booting from an encrypted volume * libvirt: fix native luks encryption failure to find volume\_id * Don't wait for vif plug events during \_hard\_reboot * Don't rely on parse.urlencode in url comparisons * Reset the \_RC\_CACHE between tests * Fix invalid UUIDs in test\_compute.py * Fix the wrong description * doc: placement upgrade notes for queens * Add functional tests for traits-based scheduling * Ensure the JSON-Schema covers the legacy v2 API * Cleanup launch instance and manage IPs docs * Migrate "launch instance" user guide docs * Pass limit to /allocation\_requests * doc: mark the max microversions for queens * test\_compute\_mgr: fix couple of unit tests * Updated from global requirements * trivial: Fix few policy doc * Query all cells for service version in \_validate\_bdm * Remove old flavor\_access\_add db api methods * Remove old flavor\_access\_remove db api method * Remove old flavor\_extra\_specs\_get db api method * Remove old flavor\_extra\_specs\_delete db api method * Remove old flavor\_access\_get\_by\_flavor\_id db api method * add "--until-complete" option for nova-manage db archive\_deleted\_rows * Mention required traits in the flavors user docs * Fix nits in support traits changes * Log options at debug when starting API services under wsgi * set\_{aggregates|traits}\_for\_provider: tolerate set * ProviderTree.get\_provider\_uuids: Top-down ordering * SchedulerReportClient.\_delete\_provider * ComputeDriver.update\_provider\_tree() * report client: get\_provider\_tree\_and\_ensure\_root * Remove unused method \_parse\_node\_instance\_info * Add resource\_class to fields in ironic node cache * Update docstring for get\_traits virt driver method * trivial: Fix typos in release notes * Allow force-delete even if task\_state is not None * Invalid query parameter could lead to HTTP 500 * [Placement] Invalid query parameter could lead to HTTP 500 * Use util.validate\_query\_params in list\_traits * Add functional tests for virt driver get\_traits() method * Implement get\_traits() for the ironic virt driver * Add get\_traits() method to ComputeDriver * [placement] Separate API schemas (resource\_provider) * Remove compute nodes arg from ProviderTree init * Fix invalid UUIDs in remaining tests * Don't modify objects directly * trivial: Resolve "X is renamed to Y" warnings * trivial: Don't use 'Test' prefix for non-TestCase classes * Remove unused tempest-dsvm-lxc-rc * ProviderTree.new\_child: parent is either uuid or name * trivialfix: cleanup \_pack\_instance\_onto\_cores() * Always pass 'NUMACell.siblings' to \_pack\_instance\_onto\_cores' * Ensure emulator threads are always calculated * tests: refactors and cleans up test\_rbd.py * Don't filter out sibling sets with one core * Add server filters whitelist in server api-ref * reno for notification-transformation-queens * Add the nova-multiattach job * api-ref: provide more detail on what a provider aggregate is * Remove redundant call to add\_instance\_fault\_from\_exc in rebuild\_instance * Collapse duplicate error handling in rebuild\_instance * Rollback instance.image\_ref on failed rebuild * hyper-v: Logs tips on PortBindingFailed * Add unit tests for EmulatorThreadsTestCase * [libvirt] Filter hypervisor\_type by virt\_type * Updated from global requirements * SchedulerReportClient.set\_aggregates\_for\_provider * Fix a comment in a notification functional test * Bumping functional test job timeouts * Remove deprecated policy items from fake\_policy * Reduce policy deprecation warnings in test runs * Handle network-changed event for a specific port * Fix the incorrect RST convention * Fix SUSE Install Guide: Placement port * Log the events we timed out waiting for while plugging vifs * Reduce complexity of \_from\_db\_object 17.0.0.0b3 ---------- * Ironic: Get IP address for volume connector * Add release note for QEMU native LUKS decryption * Fix missing 'if\_notifications\_enabled' decorator * Fix missing marker functions * Fix bug case by none token context * Transform instance.resize\_prep notification * Move remaining uses of parted to privsep * Avoid suspending guest with attached vGPUs * placement: enable required traits from the flavor extra specs * placement: using the dict format for the allocation in claim\_resources * Update VMWare vSphere link address * Handle TZ change in iso8601 >=0.1.12 * Updated from global requirements * Fix the order of target host checks * Add the Nova libvirt StorPool attachment driver * Expand on when you might want to set --max-count for map\_instances * libvirt: pass the mdevs when rebooting the guest * Set server status to ERROR if rebuild failed * Fix nits in allocation candidate limit handling * libvirt: QEMU native LUKS decryption for encrypted volumes * Replace curly quotes with straight quotes * Fix 'all\_tenants' & 'all\_projects' type in api-ref * Use neutron port\_list when filtering instance by ip * Start moving users of parted to privsep * Add PowerVM to feature-classification * Fix update\_cell to ignore existing identical cells * Change compute RPC to use alternates for resize * Report Client: PUT empty (not None) JSON data * Send traits to ironic on server boot * PowerVM Driver: SEA * Recreate mediated devices on reboot * [api] Allow multi-attach in compute api * doc: Document TLS security setup for noVNC proxy * placement: support traits in allocation candidates API * Do not multiply megabytes with 1024 to get gigabytes * api-ref: Fix parameter type in server-migrations.inc * Transform instance-evacuate notification * [placement] Add sending global request ID in delete (3) * Add index(instance\_uuid, updated\_at) on instance\_actions table * Fix 500 in test\_resize\_server\_negative\_invalid\_state * Generalize DB conf group copying * Track tree-associated providers in report client * ProviderTree.populate\_from\_iterable * Raise on API errors getting aggregates/traits * Updated from global requirements * Remove redundant swap\_volume tests * Track associated sharing RPs in report client * SchedulerReportClient.set\_traits\_for\_provider * ProviderTree.data => ProviderData * Cleanup redundant want\_version assignment * Fix format in flavors.rst * libvirt: Introduce disk encryption config classes * libvirt: Collocate encryptor and volume driver calls * libvirt: create vGPU for instance * Deduplicate service status notification samples * libvirt: don't attempt to live snapshot paused instances * Pass multiattach flag to reserve\_block\_device\_name * Handle swapping to a multiattach volume * [libvirt] Allow multiple volume attachments * trivial: Remove crud from 'conf.py' * Fix openstackdocstheme options for api-ref * Updated from global requirements * [placement] Add functional tests for resource class API * correct referenced url in comments * Transform instance.resize\_confirm notification * placement: \_get\_trees\_matching\_all\_resources() * Account for deprecation of personality files * PowerVM driver: ovs vif * add \_has\_provider\_trees() utility function * func tests for nested providers in alloc candidate * Deduplicate aggregate notification samples * Fix accumulated nits * Make sure that functional test triggered on sample changes * Add taskflow to requirements * Updated from global requirements * Enable py36 unit tests in tox * Stop globally caching host states in scheduler HostManager * make unit tests compatible with os-vif 1.8.0 * Remove unnecessary execute permissions in files * Update plugs Contrail methods to work with privsep * [placement] Fix resource provider delete * Transform rescue/unrescue instance notifications * conf: Do not inherit image signature props with snapshots * Track provider traits in report client * Fix missing rps in allocation candidates * Add aggregates check in allocation candidates * Fix accumulated nits in refactor series * Test helper: validate provider summaries * Revert "Deduplicate service status notification samples" * console: Provide an RFB security proxy implementation * console: introduce the VeNCrypt RFB authentication scheme * console: introduce framework for RFB authentication * console: Send bytes to sockets * Update links in documents * Add a warning in 'nova-manage cell\_v2 delete\_cell' * Modify the test case of get\_disk\_mapping\_rescue\_with\_config * Rename block\_device\_info\_get\_root * Address nits in change I7e01f95d7173d9217f76e838b3ea71555151ef56 * trivial: Resolve 'oslo.context' deprecation warnings * Increase notification wait timeout in functional tests * [placement] Add sending global request ID in delete (2) * Fix comment in MigrationSortContext * Add index(updated\_at) on migrations table * Add pagination and Changes-since filter support for os-migrations * Deduplicate service status notification samples * Add exception to no-upcall note of cells doc * Fix typo in release note * Add cross cell sort support for get\_migrations * libvirt: add tests to check multipath in iscsi/fc volume connectors * libvirt: test to make sure volume\_use\_multipath is properly used * libvirt: use 'host-passthrough' as default on AArch64 * Add reference to policy sample * Add an additional description for 'token\_ttl' * Updated from global requirements * Qualify the Placement 1.15 release note * Add migration db and object pagination support * Add regression test for resize failing during retries * Fix race condition in retrying migrations * libvirt: Provide VGPU inventory for a single GPU type * Fix OpenStack capitalization * Update FAQs about listing hosts in cellv2 * Add ConsoleAuthToken object * Optionalize instance\_uuid in console\_auth\_token\_get\_valid() * Add index on token\_hash and instance\_uuid for console\_auth\_tokens * Add access\_url\_base to console\_auth\_tokens table * Add debug output for selected page size * Use method validate\_integer from oslo.utils * conf: hyperv: fix a comment typo * Remove a duplicate line in a unit test * Use volume shared\_targets to lock during attach/detach * Handle no allocations during migrate * Add regression test for resizing failing when using CachingScheduler * zuul: Move legacy jobs to project * Imported Translations from Zanata * log test: use fixtures.StandardLogging in setUp * Fix up formatting for deprecate-api-extensions-policies release note * Fix documentation nits in set\_and\_clear\_allocations * Document lack of side-effects in AllocationList.create\_all() * VMware: add support for different firmwares * hyper-v: Deprecates support for Windows / Hyper-V Server 2012 * Use UEFI as the default boot for AArch64 * Don't log a warning for InstanceNotFound in detach\_interface * manager: more detailed info of unsupported compute driver * Add test for assignment of uuid to a deleted BDM * Fix fake libvirt XML generation for disks * Handle glance exception during rotating instance backup * Move aggregates from report client to ProviderTree * Only call numa\_fit\_instance\_to\_host if necessary * Expose BDM uuid to drivers * DriverBlockDevice: make subclasses inherit \_proxy\_as\_attr * Add an online migration for BDM.uuid * Address nits in I46d483f9de6776db1b025f925890624e5e682ada * Add support for getting volume details with a specified microversion * XenAPI: Unit tests must mock os\_xenapi calls * Revert "Modify \_poll\_shelved\_instances periodic task call \_shelve\_offload\_instance()" * Remove 'nova-manage host' and 'nova-manage agent' * Remove 'nova-manage logs' command * setup.cfg: Explicitly set [build\_sphinx] builder * conf: Remove deprecated 'remap\_vbd\_dev' option * api-ref: Fix incorrect parameter name * [placement] Add sending global request ID in delete * trivial: conf: libvirt: remove a redundant space * Fix the formatting for 2.58 in the compute REST API history doc * trivial: Modify signature of \_filter\_non\_requested\_pfs * Add PCI NUMA policies * Document testing guide for new API contributions * trivial: use cn instead of rp * Updated from global requirements * Test allocation candidates: multiple aggregates * Fix functional tests for USE\_NEUTRON * Make conductor pass and use host\_lists * Don't try to delete build request during a reschedule * libvirt: don't log snapshot success unless it actually happens * Add retry\_on\_deadlock decorator to action\_event\_start * conf: libvirt: Cleanup CPU modelling related options * Remove dead parameter from '\_create\_domain\_and\_network' * Handle images with no data * tests: Use correct response type in tests * Remove the inherits parameter for the Resource object * Remove the LoadedExtensionInfo object * Initialize osprofiler in WSGI application * doc: update supported drivers for cpu topology * Do not set allocation.id in AllocationList.create\_all() * [placement] Fix getting placement request ID * [placement] Enable limiting GET /allocation\_candidates * Pass RequestSpec to ConductorTaskAPI.build\_instances * Fix an error in \_get\_host\_states when deleting a compute node * Provide example for placement last-modified header of now * objects: Add PCI NUMA policy fields * Workaround missing RequestSpec.project\_id when moving an instance * Use instance.project\_id when creating request specs for old instances * Fix duplicate allocation candidates * trivial: conf: libvirt: fix a typo * Remove extensions module * Fix 4 doc typos * Fix false positive server group functional tests * Updated from global requirements * api-ref: sort parameters for limits, quotas and quota classes * XenAPI: create vGPU for instance * update\_cell allows more than once cell to have the same db/transport url * [placement] Add x-openstack-request-id in API ref * [placement] Separate API schemas (allocation\_candidate) * [placement] Separate API schemas (allocation) * Implement set\_and\_clear\_allocations in report client * Make BlockDeviceMapping object support uuid * Add uuid column to BlockDeviceMapping * Remove unused argument from LibvirtDriver.\_disconnect\_volume * Removed unused argument from LibvirtDriver.\_connect\_volume * Fix unit test failures when direct IO not supported * [placement] Separate API schemas (resource\_class) * Updated from global requirements * Deduplicate functional test code * Aggregate ops on ProviderTree * Implement query param schema for migration index * Make request\_spec.spec MediumText * Fix the formatting for 2.56 in the compute REST API history doc * Delete the TypeAffinityFilter * live-mig: keep disk device address same * Traits ops on ProviderTree * SchedulerReportClient.\_get\_providers\_in\_aggregates * [placement] Separate API schemas (inventory) * [placement] Separate API schemas (aggregate) * [placement] Separate API schemas (trait) * [placement] Separate API schemas (usage) * Fix the bug report link of API Guide * Extract instance allocation removal code * Test alloc\_cands with one RP shared between two RPs * Test alloc\_cands with non overlapping sharing RPs * handle traits with sharing providers * Fix possible TypeError in VIF.fixed\_ips * Add pagination and changes-since for instance-actions * Updated common create server sample request because of microversion 2.57 * Fix some typos in nova doc * Retry \_trait\_sync on deadlock * Remove unnecessary connector stash in attachment\_update * Pass mountpoint to volume attachment\_create with connector * Pass bdms to versioned notifications during finish\_revert\_resize * Update and complete volume attachments during resize * Pass mountpoint to volume attachment\_update * Don't persist could-be-stale InstanceGroup fields in RequestSpec * Update nova-status and docs for nova-compute requiring placement 1.14 * Wait for live\_migration\_rollback.end notification * Some nit fix in multi\_cell\_list * Raise MarkerNotFound if BuildRequestList.get\_by\_filters doesn't find marker * Move flushing block devices to privsep * Convert ext filesystem resizes to privsep * [placement] Add info about last-modified to contrib docs * [placement] Add cache headers to placement api requests * Stabilize test\_live\_migration\_abort func test * doc: add note about fixing admin-only APIs without a microversion * Deprecate file injection * VMware: implement get\_inventory() driver method * VMware: expose max vCPUs and max memory per ESX host * VMware: fix memory stats * api-ref: Fix a description for 'guest\_format' * Move the claim\_resources method to scheduler utils * Change RPC for select\_destinations() * Re-use existing ComputeNode on ironic rebalance * placement: skip authentication on root URI * Add instance action db and obj pagination support * Update Instance action's updated\_at when action event updated * Make live migration hold resources with a migration allocation * Add instance action record for snapshot instances * Add quiesce and unquiesce in support matrix * libvirt: throw NotImplementedError if qga is not responsive when setting password * [placement] Fix API reference for microversion 1.14 * Unmap compute nodes when deleting host mapping * Follow up on removing old-style quotas code * Add API and nova-manage tests that use the NoopQuotaDriver * Add instance action record for backup instances * Don't launch guestfs in a thread pool if guestfs.debug is enabled * Remove confusing comment in compute\_node\_get API method * [placement] add name to resource provider create error * Improve error message on invalid BDM fields * doc: link in some Sydney summit content * trivial: more suitable log in set\_admin\_password * Add support for listing hosts in cellv2 * [placement] Add 'Location' parameters in API ref * [placement] Object changes to support last-modified headers 17.0.0.0b2 ---------- * Implement new attach Cinder flow * Add new style volume attachment support to block\_device.py * SchedulerReportClient.\_get\_providers\_in\_tree * Modify select\_destinations() to return objects and alts * Move the to\_dict() method to the Selection object * Return Selection objects from the scheduler driver * Refactor the code to check for sufficient hosts * Fix 'force' parameter in os-quota-sets PUT schema * Reformat \_get\_all\_with\_shared * Updated from global requirements * Deprecate configurable Hide Server Address Feature * XenAPI: update the picture in Xen hypervisor document * Deprecate API extensions policies * Avoid stashed connector lookup for new style detach * placement: update client to set parent provider * Scheduler set\_inventory\_for\_provider does nested * placement: adds REST API for nested providers * placement: allow filter providers in tree * XenAPI: Don't use nicira-iface-id for XenServer VIF * archive\_deleted\_instances is not atomic for insert/delete * Remove the unused request\_id filter from api-paste.ini * Add a new check to volume attach * Add instance action record for shelve\_offload instances * Modify \_poll\_shelved\_instances periodic task call \_shelve\_offload\_instance() * Add Selection objects * Fix doubling allocations on rebuild * Add PowerVM to compute\_driver options * Updated from global requirements * Fix wrong argument order in functional test * [placement] Fix an error message in API validation * Transform instance.resize\_revert notification * Mention API behavior change when over quota limit * [placement] Fix foreign key constraint error * [placement] Add aggregate link note in API ref * Fail fast if changing image on a volume-backed server rebuild * Get original image\_id from volume for volume-backed instance rebuild * Add regression test for rebuilding a volume-backed server * ProviderTree.get\_provider\_uuids() * Fix cellsv1 messaging test * Make \_Provider really private * Split instance\_list into instance and multi\_cell * Genericify the instance\_list stuff * Remove 'nova-manage account' and 'nova-manage project' * Remove 'nova-manage shell' command * Updated from global requirements * Fixes 'Not enough available memory' log message * Only log not correcting allocation once per period * Add description for resource class creation * Trivial: Nix duplicate PlacementFixture() in test * Check the return code when forcing TCG mode with libguestfs * [placement] re-use existing conf with auth token middleware * Fix disk size during live migration with disk over-commit * Use ksa adapter for keystone conf & requests * Downgrade log for keystone verify client fail * [placement]Enhance doc for placement allocation list * Update description of Rebuild in server\_concepts.rst * Use oslo\_db Session in resource\_provider.py * VMware: Handle concurrent registrations of the VC extension * Proper error handling by \_ensure\_resource\_provider * Refactor placement version check * Nix log translations from scheduler.client.report * Remove old-style quotas code * Remove direct usage of glance.generate\_image\_url * remove glance usage inside compute * Assert that we restrict cold migrations to the same cell * [placement] Fix format in placement API ref * Enable cold migration with target host(2/2) * qemu-img do not use cache=none if no O\_DIRECT support * remove reserve\_quota\_delta * Raise specific exception when swapping migration allocations fails * Remove vestigial extra\_info update in PciDevice.save() * Fix ValueError when loading old pci device record * Updated from global requirements * Remove the objects for describing the extension for v2.1 API * Remove the objects which related to the old v2 API implementation * Updated from global requirements * Save updated libvirt domain XML after swapping volume * placement: add nested resource providers * Deprecate the IronicHostManager * Fix some incorrect option references for scheduler filters * Remove deprecated TrustedFilter * Fix NoneType error when [service\_user] is misconfigured * check query param for server groups function * Deduplicate instance.create notification samples * Nits from Ic3ab7d60e4ac12b767fe70bef97b327545a86e74 * [placement] Fix GET PUT /allocations nits * [placement] POST /allocations to set allocations for >1 consumers * Add instance action record for lock/unlock instances * XenAPI: provide vGPU inventory in compute node * XenAPI: get vGPU stats from hypervisor * Add 'all\_tenants' for GET sec group api ref * Update the documentation links * Add instance action record for attach/detach/swap volumes * Add regression test for rebuild with new image doubling allocations * Refined fix for validating image on rebuild * Address nits from service create/destroy notification review * Versioned notifications for service create and delete * Remove unnecessary self.flags and ConfPatcher * Implement query param schema for delete assisted vol * Add ProviderSummary.resource\_class\_names @property * required traits for no sharing providers * Fix invalid minRam error message * finish refactor AllocCandidates.\_get\_by\_filters() * PowerVM support matrix update * Fix the format file name * Simplify BDM boot index checking * Remove unused global variables * Updated from global requirements * Implement query param schema for flavor index * Implement query param schema for fping index * Implement query param schema for sec group APIs * Finish stestr migration * Fix incorrect known vcpuset when CPUPinningUnknown raised * Enable cold migration with target host(1/2) * Update server query section in the API concept doc * [placement] Add 'CUSTOM\_' prefix description in API ref * [placement] Fix parameter order in placement API ref * Remove 'nova-manage quota refresh' command * Api-guide: Address TODOs in user\_concepts section * Update server status api guide * Api guide:add Server Consoles * Update Metadata api section of api guide * Implement query param schema for simple\_tenant\_usage * Transform instance-live\_migration\_pre notification * Use FakeLiveMigrateDriver in notification test * Change live\_migrate tests to use fakedriver * Test resource allocation during soft delete * factor out compute service start in ServerMovingTest * Moving more utils to ProviderUsageBaseTestCase * Don't overwrite binding-profile * Fix TypeError of \_get\_project\_id when project\_id is None * Regenerate and pass configdrive when rebuild Ironic nodes * Update bindep.txt for doc builds * [placement] Symmetric GET and PUT /allocations/{consumer\_uuid} * Service token is not experimental * Use ksa adapter for neutron client * Get auth from context for glance endpoint * vgpu: add enabled white list * cleanup mapping/reqspec after archive instance * Fix the usage of instance.snapshot notification sample * Update document related to host aggregate * api-ref: Add a description of 'key\_name' in rebuild * api-ref: Fix an example in "Delete Assisted Volume Snapshot" * Use the RequestSpec when getting scheduler\_hints in compute * Add migration\_get\_by\_uuid in db api * Add instance action record for attach/detach interface * placement: Document request headers in api-ref * Deduplicate keypair notification samples * Include project\_id and user\_id in AllocationList.get\_all\_by\_consumer\_id * Clean up exception caught in \_validate\_and\_build\_base\_options * Implement query param schema for volume, snapshot API * Implement query param schema for quota set APIs * api-ref: fix the type on the block\_device\_mapping\_v2 parameter * placement: Document \`in:\` prefix for ?member\_of= * libvirt: Re-initialise volumes, encryptors, and vifs on hard reboot * VMware: serial console log (completed) * PowerVM Driver: config drive * Fix TypeError in nova-manage db archive\_deleted\_rows * Remove setting of version/release from releasenotes * Fix the formatting for the 2.54 microversion REST API version history * doc: Adds Hyper-V PCI passthrough details * hyper-v: Do not allow instances with pinned CPUs to spawn * Updated from global requirements * Add microversion to allow setting flavor description * Fix docstring for GET /os-migrations and related DB API * Add a note about versioned notification samples being per-release * Document the real behavior of notify\_on\_state\_change * Use NoDBTestCase for powervm driver tests * create allocation request for single provider * build alloc request resources for shared resources * build ProviderSummary objects in sep function * begin refactor AllocCandidates.\_get\_by\_filters() * Add security release note for OSSA-2017-005 * Add error message on metadata API * api-ref: make a note about os:scheduler\_hints being a top-level key * doc: fix link to creating unit tests in contributor guide * Validate new image via scheduler during rebuild * Add FlavorPayload.description for versioned notifications * placement: AllocCands.get\_by\_{filters => requests} * Deduplicate server\_group samples * Correct log message when removing a security group * Updated from global requirements * Enable reset keypair while rebuilding instance * Test allocation\_candidates with only sharing RPs * Test alloc candidates with same RC in cn & shared * rt: Make resource tracker always invoking get\_inventory() * Revert "Don't overwrite binding-profile" * Cleanup build\_request\_spec * Refactor test\_allocation\_candidates * block\_device\_mapping\_v2.bus\_type is missing from api-ref * Remove incorrect comment about instance.locked * Don't overwrite binding-profile * Do not use “-y” for package install * [placement] set accept to application/json if accept not set * [placement] Fix a wrong redirection in placement doc * Handle InstanceNotFound when setting password via metadata * Extract allocation candidates functional tests * Deduplicate instance.reboot notification samples * Deduplicate instance.live\_migration notification samples * Deduplicate instance.interface\_attach samples * Deduplicate instance.power-off notification samples * Transform instance-live\_migration\_abort notification * Deduplicated instance.(un)pause notification samples * Factor out duplicated notification sample data (2) * Move last\_bytes into the path module * Fix test\_get\_volume\_config method * Fix missing versioned notification sample * Clean up allocations if instance deleted during build * Avoid deleting allocations for instances being built * libvirt: remove old code in post\_live\_migration\_at\_destination * Using --option ARGUMENT * Add Flavor.description attribute * Modify incorrect debug meaasge in \_inject\_data * Avoid redundant security group queries in GET /servers/{id}/os-security-groups * Update contributor microversion doc for compute * Updated from global requirements * Granularize resources\_from\_{flavor|request\_spec} * Parse granular resources/traits from extra\_specs * placement: Parse granular resources & traits * RequestGroup class for placement & consumers * Factor out duplicated notification sample data * libvirt: Don't VIR\_MIGRATE\_NON\_SHARED\_INC without migrate\_disks * libvirt: do unicode conversion for error messages * Remove cells v2 transition code from update\_instance * Cleanup update\_instance cell mapping handling * Fix return type in FilterScheduler.\_legacy\_find\_hosts * Implement power\_off/power\_on for the FakeDriver * Remove instance.keypairs migration code * conf: Validate '[api] vendordata\_providers' options * conf: Remove 'vendordata\_driver' opt * Trivial grammar fix * Fix warning on {'cell\_id': 1} is an invalid UUID * Move contrail vif plugging to privsep * Move plumgrid vif plugging to privsep * Move midonet vif plugging to privsep * Move infiniband vif plugging to privsep * Remove compatibility method from FlavorPayload * placement: Contributor doc microversion checklist * libvirt: do not remove inst\_base when volume-backed during resize * Refactor claim\_resources() to use retries decorator * Make put\_allocations() retry on concurrent update * [placement] avoid case issues microversions in gabbits * Fix format in live-migration-usage.rst * Don't update RT in \_allocate\_network * Transform keypair.import notification * api-ref: document caveats with scheduler hints * add whereto for testing redirect rules * rp: break functions out of \_set\_traits() * Use Migration object in ComputeManagerMigrationTestCase * check query param for used\_limits function * VMware: add support for graceful shutdown of instances * Pass requested\_destination in filter\_properties * Functional regression test for evacuate with a target * Fix indent in configuring-migrations.rst * XenAPI: resolve VBD unplug failure with VM\_MISSING\_PV\_DRIVERS error * libvirt: properly decode error message from qemu guest agent * Use ksa adapter for placement conf & requests * Only filter/weigh hosts once if scheduling a single instance * Update placement api-ref: allocations link in 1.11 * rt: Implement XenAPI get\_inventory() method * Fix instance lookup in hide\_server\_addresses extension * libvirt: remove extraneous retry assignment in cleanup method * libvirt: Don't disregard cache mode for instance boot disks * Fix live migration grenade ceph setup * Pass the correct image to build\_request\_spec in conductor.rebuild\_instance * rp: remove \_HasAResourceProvider mixin * rp: move RP.\_set\_traits() to module scope * rp: Remove RP.get\_traits() method * [placement] Limit number of attempts to delete allocations * [placement] Allow \_set\_allocations to delete allocations * conf: Move additional nova-net opts to 'network' * Do not attempt volume swap when guest is stopped/suspended * Convert IVS VIF plugging / unplugging to privsep * Move blkid calls to privsep * trivial: Rename 'policy\_check' -> 'policy' * test: Store the OutputStreamCapture fixture * Accept all standard resource classes in flavor extra specs * Fix AttributeError in BlockDeviceMapping.obj\_load\_attr * Move project\_id and user\_id to Allocation object * VGPU: Define vgpu resource class * Make migration uuid hold allocations for migrating instances * Fix wrapping of neutron forbidden error * Import user-data page from openstack-manuals * Import the config drive docs from openstack-manuals * Move kpartx calls to privsep * Move nbd commands to privsep * Move loopback setup and removal to privsep * Move the idmapshift binary into privsep * Include /resource\_providers/uuid/allocations link * xenapi: cached images should be cleaned up by time * Add test so we remember why CUSTOM\_ prefix added * Move xend existence probes to privsep * Move shred to privsep * Add alternate hosts * Implement query param schema for host index * conf: Remove deprecated 'null\_kernel' opt * Adds 'sata' as a valid disk bus for qemu and kvm hypervisors * propagate OSError to MigrationPreCheckError * Trivial: fix spelling of allocation\_request * Transform instance.trigger\_crash\_dump notification * Add debug information to metadata requests 17.0.0.0b1 ---------- * placement: integrate ProviderTree to report client * [Trivial] Fix up a docstring * Remove duplicate error info * [placement] Clean up TODOs in allocations.yaml gabbit * Add attachment\_get to refresh\_connection\_info * Add 'delete\_host' command in 'nova-manage cell\_v2' * Keep updating allocations for Ironic * docs: Explain the flow of the "serial console" feature * Send Allocations to spawn * Move lvm handling to privsep * Cleanup mount / umount and associated rmdir calls * Update live migration to use v3 cinder api * placement: set/check if inventory change in tree * Move restart\_compute\_service to a common place * Fix nova-manage commands that do not exist * fix cleaning up evacuated instances * doc: Fix command output in scheduler document * Refactor resource tracker to account for migration allocations * Revert allocations by migration uuid * Split get\_allocations\_for\_instance() into useful bits * Regenerate context during targeting * Pick ironic nodes without VCPU set * Don't use mock.patch.stopall * Move test\_uuid\_sentinels to NoDBTestCase * [placement] Confirm that empty resources query causes 400 * [placement] add coverage for update of standard resource class * api-ref: add warning about force evacuate for ironic * Add snapshot id to the snapshot notifications * Reproduce bug 1721652 in the functional test env * Add 'done' to migration\_get\_in\_progress\_by\_host\_and\_node filter * Update "SHUTOFF" description in API guide * api-ref: fix server status values in GET /servers docs * Fix connection info refresh for reboot * rp: rework AllocList.get\_all\_by\_consumer\_id() * rp: fix up AllocList.get\_by\_resource\_provider\_uuid * rp: remove ability to delete 1 allocation record * rp: remove dead code in Allocation.\_create\_in\_db() * rp: streamline InventoryList.get\_all\_by\_rp\_uuid() * rp: remove CRUD operations on Inventory class * Make expected notifications output easier to read in tests * Elevate existing RequestContext to get bandwidth usage * Fix target\_cell usage for scatter\_gather\_cells * Nix bug msg from ConfGroupForServiceTypeNotFound * nova-manage map\_instances is not using the cells info from the API database * Updated from global requirements * Update cinder in RequestContext service catalog * Target context for build notification in conductor * Don't fix protocol-less glance api\_servers anymore * Move user\_data max length check to schema * Remove unnecessary BDM destroy during instance delete * rp: Move RP.\_get|set\_aggregates() to module scope * rp: de-ORM ResourceProvider.get\_by\_uuid() * use already loaded BDM in instance.create * use already loaded BDM in instance. (2) * use already loaded BDM in instance. * Remove dead code of api.fault notification sending * Fix sending legacy instance.update notification * doc: Rework man pages * Fix typo in test\_prep\_resize\_errors\_migration * Fix minor input items from previous patches * nova.utils.get\_ksa\_adapter() * De-duplicate \_numa\_get\_flavor\_XXX\_map\_list * hardware: Flatten functions * Update libvirt volume drivers to use os-brick constants * Always put 'uuid' into sort\_keys for stable instance lists * Fix instance\_get\_by\_sort\_filters() for multiple sort keys * Deprecate allowed\_direct\_url\_schemes and nova.image.download.modules * Add error notification for instance.interface\_attach * Note TrustedFilter deprecation in docs * Make setenv consistent for unit, func, and api-samples * Blacklist test\_extend\_attached\_volume from cells v1 job * Pre-create migration object * Remove metadata/system\_metadata filter handling from get\_all * fix unstable shelve offload functional tests * TrivialFix: Fix the incorrect test case * stabilize test\_resize\_server\_error\_and\_reschedule\_was\_failed * api-ref: note that project\_id filter only works with all\_tenants * Avoid redundant BDM lookup in check\_can\_live\_migrate\_source * Only query BDMs once in API during rebuild * Make allocation cleanup honor new by-migration rules * Modernize set\_vm\_state\_and\_notify * Remove system\_metadata loading in Instance.\_load\_flavor * Stop joining on system\_metadata when listing instances * Remove old compat code from servers ViewBuilder.\_get\_metadata * Remove unused get\_all\_instance\_\*metadata methods * doc: Add documentation for cpu\_realtime, cpu\_realtime\_mask * Remove 400 as expected error * Remove doc todo related to bug/1506667 * api-ref: add note about rebuild not replacing volume-backed root disk * api-ref: remove redundant preserve\_ephemeral mention from rebuild docs * [placement] gabbi tests for shared custom resource class * Update RT aggregate map less frequently * libvirt: add method to configure migration speed * Set migration object attributes for source/dest during live migrate * Refactor duplicate code for looking up the compute node name * Fix CellDatabases fixture swallowing exceptions * Use improved instance\_list module in compute API * Fix a pagination logic bug in test\_bug\_1689692 * Add hints to what the Migration attribute values are * Move cell0 marker test to Cellsv1DeprecatedTestMixIn * Ensure instance can migrate when launched concurrently * console: introduce basic framework for security proxying * [placement] Update the placement deployment instructions * Move allocation manipulation out of drop\_move\_claim() * Do not monkey patch eventlet in unit tests * Do not setup conductor in BaseAPITestCase * Make etree.tostring() emit unicode everywhere * Fix inconsistency of 'NOTE:' description * Don't shell out to mkdir, use ensure\_tree() * Read from console ptys using privsep * Move ploop commands to privsep * Set group\_members when converting to legacy request spec * Support qemu >= 2.10 * Fix policy check performance in 2.47+ * doc: make host aggregates examples more discoverable * Remove dest node allocations during live migration rollback * Fix race in delete allocation in ServerMovingTests * xenapi: pass migrate\_data to recover\_method if live migrate fails * \_rollback\_live\_migration in live-migration seqdiag * Log consumer uuid when retrying claims in the scheduler * Add recreate test for live migrate rollback not cleaning up dest allocs * Add slowest command to tox.ini * Make TestRPC inherit from the base nova TestCase * Ensure errors\_out\_migration errors out migration * use context mgr in instance.delete * Implement query param schema for GET hypervisor(2.33) * Remove SCREEN\_LOGDIR from devstack install setting * Fix --max-count handling for nova-manage cell\_v2 map\_instances * Set the Pike release version for scheduler RPC * Add functional for live migrate delete * Fix IoOpsFilter test case class name * Add get\_node\_uuid() helper to ResourceTracker * Live Migration sequence diagram * Deprecate idle\_timeout in api\_database * cleanup test-requirements * Add 400 as error code for resource class delete * Implement query param schema for agent index * fix nova accepting invalid availability zone name with ':' * check query param for service's index function * Remove useless periodic task that expires quota reservations * Add attachment\_get call to volume/cinder\_api * Add functional migrate force\_complete test * Copy some tests to a cellsv1 mixin * Add get\_instance\_objects\_sorted() * Make 'fault' a valid joined query field for Instance * Change livesnapshot to true by default * docs: Rename cellsv2\_layout -> cellsv2-layout * Add datapath type information to OVS vif objects * libvirt: Make 'get\_domain' private * Fix 500 if list servers called with empty regex pattern * Vzstorage: synchronize volume connect * Add \_wait\_for\_action\_fail\_completion to InstanceHelperMixin * Remove allocations when unshelve fails on host * Updated from global requirements * Add instance.interface\_detach notification * Add default configuration files to data\_files * Remove method "\_get\_host\_ref\_from\_name" * Add a regression test for bug 1718455 * Add recreate test for unshelve offloaded instance spawn fail * Add PowerVM hypervisor configuration doc * Add tests to validate instance\_list handles faults correctly * Add fault-filling into instance\_get\_all\_by\_filters\_sort() * Support pagination in instance\_list * Add db.instance\_get\_by\_sort\_filters() * Make instance\_list honor global query limit * Add base implementation for efficient cross-cell instance listing * Fix hyperlinks in document * api-ref: fix default sort key when listing servers * Add instance.interface\_attach notification * libvirt: bandwidth param should be set in guest migrate * Updated from global requirements * Add connection pool size to vSphere settings * Add live.migration.force.complete to the legacy notification whitelist * Restore '[vnc] vnc\_\*' option support * neutron: handle binding:profile=None during migration * doc: Add documentation for emulator\_thread\_policy * doc: Split flavors docs into admin and user guides * VMware: Factor out relocate\_vm() * remove re-auth logic for ironic client wrapper * hyperv: report disk\_available\_least field * Allow shuffling hosts with the same best weight * Enable custom certificates for keystone communication * Fix the ocata config-reference URLs * Fix a typo * Account for compute.metrics.update in legacy notification whitelist * use unicode in tests to avoid SQLA warning * Move libvirts dmcrypt support to privsep * Squash dacnet\_admin privsep context * Squash dac\_admin privsep context * Move the dac\_admin privsep code to a new location * Use symbolic names for capabilities, expand sys\_admin context * stabilize test\_resize\_server\_error\_and\_reschedule\_was\_failed * Updated from global requirements * Drop support for the Cinder v2 API * Remove 400 as expected error * Set error state after failed evacuation * Add @targets\_cell for live\_migrate\_instance method in conductor * [placement] Removing versioning from resource\_provider objects * doc: rename the Indices and Tables section * doc: Further cleanup of doc contributor guide * [placement] Unregister the ResourceProvider object * [placement] Unregister the ResourceProviderList object * [placement] Unregister the Inventory object * [placement] Unregister the InventoryList object * [placement] Unregister the Allocation object * [placement] Unregister the AllocationList object * [placement] Unregister the Usage object * [placement] Unregister the UsageList object * [placement] Unregister the ResourceClass object * [placement] Unregister the ResourceClassList object * [placement] Unregister the Trait object * [placement] Unregister the TraitList object * Add '\_has\_qos\_queue\_extension' function * Add '\_has\_dns\_extension' function * Assume neutron auto\_allocate extension's enabled * Add single quotes for posargs on jobs * Add nova-manage db command for ironic flavor migrations * enhance api-ref for os-server-external-events * Have one list of reboot task\_states * Call terminate\_connection when shelve\_offloading * Revert "Enable test\_iscsi\_volume in live migration job" * Target context when setting instance to ERROR when over quota * Cleanup running of osprofiler tests * Fix test runner config issues with os-testr 1.0.0 * Fix missed chown call * Updated from global requirements * Tweak connection\_info translation for the new Cinder attach/detach API * Add attachment\_complete call to volume/cinder.py * Remove dest node allocation if evacuate MoveClaim fails * Add a test to make sure failed evacuate cleans up dest allocation * Add recreate test for evacuate claim failure * Create allocations against forced dest host during evacuate * fake\_notifier: Refactor wait\_for\_versioned\_notification * Transform instance.resize.error notifications * Update docs to include standardization of VM diagnostics * Refactor ServerMovingTests for non-move tests * Remove deprecated keymgr code * Move execs of tee to privsep * Add ComputeNodeList.get\_by\_hypervisor\_type() * Split out the core of the ironic flavor migration * Fix binary name * Revert "Revert "Fix AZ related API docs"" * [placement] Correct a comment in \_set\_allocations * Remove Xen networking plugin * Revert "Fix AZ related API docs" * [placement] correct error on bad resource class in allocation * api-ref: note the microversions for GET /resource\_providers query params * doc: fix flavor notes * Fix AZ related API docs * Transform aggregate.remove\_host notification * Transform servergroup.delete notification * Transform aggregate.add\_host notification * Cleanup unused get\_iscsi\_initiator * Remove two testing stubs which aren't really needed * Typo error about help resource\_classes.inc * Transform servergroup.create notification * Set regex flag on ostestr command for osprofiler tests * Transform keypair.delete notification * Move execs of touch to privsep * Move libvirt usages of chown to privsep * Enable test\_iscsi\_volume in live migration job * Refactor out claim\_resources\_on\_destination into a utility * Fix broken URLs * Ensure instance mapping is updated in case of quota recheck fails * Track which cell each instance is created in and use it consistently * Make ConductorTaskTestCase run with 2 cells * xenapi: Exception Error logs shown in Citrix XenServer CI * Update contributor guide for Queens * Allow setting up multiple cells in the base TestCase * Fix test\_rpc\_consumer\_isolation for oslo.messaging 5.31.0 * Fix broken link * First attempt at adding a privsep user to nova itself * Provide hints when nova-manage db sync fails to sync cell0 * Add release note for force live migration allocations * Handle exception on adding secgroup * doc: Add configuration index page * doc: Add user index page * spelling mistake * Fix ValueError if invalid max\_rows passed to db purge * Remove usage of kwarg retry\_on\_request in API * Add release note for requiring shred 8.22 or above * Make xen unit tests work with os-xenapi>=0.3.0 * Skip more racy rebuild failing tests with cells v1 * Add some inline code docs tracing the cold migrate flow * Mark LXC as missing for swap volume support * Remove compatibility code for flavors * rbd: Remove unnecessary 'encode' calls * Updated from global requirements * Pass config object to oslo\_reports * Replace http with https for doc links in nova * Put base policy rules at first * Amend uuid4 hacking rule * conf: Rename two VNC options * Correct examples in "Manage Compute services" documentation * Handle deleted instances when refreshing the info\_cache * Remove qpid description in doc * Replace dd with shred for zeroing lvm volumes * Update docs for \_destroy\_evacuated\_instances * doc: link to versioned notification samples from main index * doc: link to placement api-ref and history docs from main index * doc: fix online\_data\_migrations option in upgrades doc * Add recreate test for forced host evacuate not setting dest allocations * add online\_data\_migrations to nova docs * Glance download: only fsync files * Functional test for regression bug #1713783 * doc: fix show-hide sample in notification devref * Default the service version in the notification tests * api-ref: add warnings about forcing the host for live migrate/evacuate * HyperV: Perform proper cleanup after failed instance spawns * [placement] Update user doc with api-ref link * [placement] api-ref GET /traits name:startswith * Add video type virtio for AArch64 * Document tagged attach in the feature support matrix * [placement] Require at least one resource class in allocation * Enhance doc for nova services * Update doc to indicate nova-network deprecated * Updated from global requirements * [placement] Add test for empty resources in allocation * Refactor LiveMigrationTask.\_find\_destination * Cleanup allocations on invalid dest node during live migration * Hyper-V: Perform proper cleanup after cold migration * Test InstanceNotFound handling in 'nova usage' * Typo fix in admin doc ssh-configuration.html * iso8601.is8601.Utc No Longer Exists * Fix nova assisted volume snapshots * Fix \_delete\_inventory log message in report client * Add functional recreate test for live migration pre-check fails * doc: Remove deprecated call to sphinx.util.compat * Remove unneeded attributes from context * Updates to scheduling workflow doc * Add uuid online migration for migrations * Add uuid to migration object and migrate-on-load * Add uuid to migration table * Add placeholder migrations for Pike backports * Clarify the field usage guidelines * Optimize MiniDNS for fewer syscalls * [Trivial] docstrings, typos, minor refactoring * Update PCI passthrough doc for moved options * tests: De-duplicate some graphics tests * Reduce code complexity - linux\_net.py * Refactor init\_instance:resume\_guests\_state * conf: Allow users to unset 'keymap' options * Change default for [notifications]/default\_publisher\_id to $host * Deprecate CONF.monkey\_patch * Add device tag support info in support matrix * Prevent blank line at start of migration placeholders * Remove useless error handling in prep\_resize * De-duplicate two delete\_allocation\_for\_\* methods * Move hash ring initialization to init\_host() for ironic * Fix bug on vmware driver attach volume failed * fix a typo in format\_cpu\_spec doc * Cleanup allocations in failed prep\_resize * Add functional test for rescheduling during a migration * Remove allocation when booting instance rescheduled or aborted * Fix sample configuration generation for compute-related options * Add formatting to scheduling activity diagram * Monkey patch the blockdiag extension * docs: Document the scheduler workflow * Updated from global requirements * Delete instance allocations when the instance is deleted * How about not logging errors every time we shelve offload? * Add missing tests for \_remove\_deleted\_instances\_allocations * nova-manage: Deprecate 'cell' commands * Add missing unit tests for FilterScheduler.\_get\_all\_host\_states * api-ref: fix key\_name note formatting * Assume neutron port\_binding extensions enabled * libvirt: Fix getting a wrong guest object * pci: Validate behavior of empty devname * Tests: Add cleanup of 'instances' directory * Remove the section about extensions from the API concept doc * Restrict live migration to same cell * Remove source node allocation after live migration completes * Allocate resources on forced dest host during live migration * Add language for compute node configuration * trivial: Remove some single use function from utils * Add functional live migrate test * Add functional force live migrate test * doc: Address review comments for main index * trivial: Remove dead function, variable * tests: Remove useless test * Remove plug\_ovs\_hybrid, unplug\_ovs\_hybrid * Correct statement in api-ref * Fix a typo in code comment * Refactor libvirt.utils.execute() away * Fix quobyte test\_validate\_volume\_no\_mtab\_entry * Updated from global requirements * update comment for dropping support * Move common definition into common layer * Remove host filter for \_cleanup\_running\_deleted\_instances periodic task * Fix contributor documentation * replace chance with filter scheduler in func tests * Clean up resources at shelve offload * test shelve and shelve offload with placement * Amend the code review guide for microversion API * delete allocation of evacuated instance * Make scheduler.utils.merge\_resources ignore zero values * Fix a wrong link * Fix reporting inventory for provisioned nodes in the Ironic driver * Avoid race in test\_evacuate * Reset client session when placement endpoint not found * Update api doc with latest updates in api framework * doc: Extend nfv feature matrix with pinning/NUMA * Always use application/json accept header in report client * Fix messages in functional tests * Handle addition of new nodes/instances in ironic flavor migration * Skip test\_rebuild\_server\_in\_error\_state for cells v1 * test server evacuation with placement * doc: add superconductor up-call caveat for cross\_az\_attach=False * doc: add another up-call caveat for cells v2 for xenapi aggregates * Update reno for stable/pike * Deprecate bare metal filters 16.0.0.0rc1 ----------- * Remove "dhcp\_options\_for\_instance" * Clarifying node\_uuid usage in ironic driver * doc: address review comments in stable-api guide updates * Resource tracker compatibility with Ocata and Pike * placement: avoid returning duplicated alloc\_reqs when no sharing rp * Imported Translations from Zanata * [placement] Make placement\_api\_docs.py failing * [placement] Add api-ref for allocation\_candidates * Clarify that vlan feature means nova-network support * [placement] Add api-ref for RP usages * Remove ram/disk sched filters from default list * Remove provider allocs in confirm/revert resize * placement: refactor healing of allocations in RT * remove log message with potential stale info * doc: Address review comments for contributor index * Require Placement 1.10 in nova-status upgrade check * Mark Chance and Caching schedulers as deprecated * [placement] Add api-ref for usages * Clean up \*most\* ec2 / euca2ools references * Add documentation for documentation contributions * Structure cli page * doc: Import configuration reference * Add release note for shared storage known issue * Improve stable-api doc with current API state * update policy UT fixtures * Bulk import all config reference figures * rework index intro to describe nova * Mark max microversion for Pike in history doc * Add a prelude section for Pike * doc: provide more details on scheduling with placement * Add functional test for local delete allocations * Document service layout for consoles with cells * Add For Operators section to front page * Create For End Users index section * doc: code review considerations for online data migrations * Add track\_instance\_changes note in disable\_group\_policy\_check\_upcall * Cleanup release note about ignoring allow\_same\_net\_traffic * no instance info cache update if instance deleted * Add format\_dom for PCI device addresses * doc: Add additional content to admin guide * Create reference subpage * Raise NoValidHost if no allocation candidates * Fix all >= 2 hit 404s * Handle ironicclient failures in Ironic driver * Fix migrate single instance when it was created concurrently * trivial: Remove files from 'tools' * trivial: Remove "vif" script * tools/xenserver: Remove 'cleanup\_sm\_locks' * Test resize with too big flavor * [placement] Add api-ref for RP allocations * placement: filtering the resource provider id when delete trait association * Updated from global requirements * Add resource utilities to scheduler utils * Add Contributor Guide section page * Fix getting instance bdms in multiple cells * Update install guide to clearly define between package installs * doc: Import administration guide * doc: Import installation guide * Complete dostring of live\_migration related methods * Add a caveat section about cellsv2 upcalls * doc: Start using oslo\_policy.sphinxext * policies: Fix Sphinx issues * doc: Start using oslo\_config.sphinxext * doc: Rework README to reflect new doc URLs * doc: Remove dead files * nova-manage: Deprecate '--version' parameters * imagebackend: cleanup constructor args to Rbd * Sum allocations in the scheduler when resizing to the same host * doc: Make use of definition lists, literals * hardware offload support for openvswitch * reflow rpc doc to 80 columns * fix list rendering in policy-enforcement * Fix scope of errors\_out\_migration in finish\_resize * Fix scope of errors\_out\_migration in resize\_instance * Split Compute.errors\_out\_migration into a separate contextmanager * fix list rendering in cells * fix list rendering in aggregates * Fix list rendering in bdm doc * fix list rendering in rpc doc * Fix list rendering in code-review.rst * Fix whitespace in rest\_api\_version\_history * Fix lists in process doc * [placement] Avoid error log on 405 response * Keep the code consistent * Filter out stale migrations in resource audit * Test resize to same host with placement api * fix rpc broken rst comment * sort redirectmatch lines * add top 404 redirect * [placement] Require at least one allocation when PUT * Add redirect for api-microversion-history doc * Fix 409 handling in report client when deleting inventory * Detach device from live domain even if not found on persistent * Cleanup unnecessary logic in os-volume\_attachments controller code * Adopt new pypowervm power\_off APIs * placement: remove existing allocs when set allocs * Additional assertions to resize tests * Accept any scheduler driver entrypoint * add redirects for existing broken docs urls * Add some more cellsv2 doc goodness * Test resize with placement api * Deprecate cells v1 * Add release note for PUT /os-services/\* for non-compute services * Updated from global requirements * Don't warn on expected network-vif-unplugged events * do not pass proxy env variables by tox * Show quota detail when inject file quota exceeds * rootwrap.d cleanup mislabeled files * always show urls in list\_cells * api-ref: requested security groups are not applied to pre-existing ports * api-ref: fix security\_groups response parameter in os-security-groups * Clean variable names and docs around neutron allocate\_for\_instance * explain payload inheritance in notification devref * Update SSL cert used in testing * Remove RamFilter and DiskFilter in default filter * Enhance support matrix document * remove extension param and usage * Add description on maximum placement API version * Updated from global requirements * Add cinder keystone client opts to config reference * Updated from global requirements * fix test\_rebuild\_server\_exc instability * [placement] quash unicode warning with shared provider * add a redirect for the old cells landing page * Remove unnecessary code 16.0.0.0b3 ---------- * claim resources in placement API during schedule() * placement: account for move operations in claim * add description about key\_name * doc: add FAQ entry for cells v1 config options * Add oslo\_concurrency=INFO to default log levels for nova-manage * stabilize test\_create\_delete\_server functional test * Ensure we unshelve in the cell the instance is mapped * Fix example in \_serialize\_allocations\_for\_consumer * deprecate \`\`wsgi\_log\_format\`\` config variable * Move the note about '/os-volume\_boot' to the correct place * Remove the useless fake ExtensionManager from API unittests * Netronome SmartNIC Enablement * Updated from global requirements * Enhance support matrix document * add cli to support matrix * add a retry on DBDeadlock to \_set\_allocations() * docstring and unused code removal * libvirt: Post-migration, set cache value for Cinder volume(s) * use os\_traits.MISC\_SHARES\_VIA\_AGGREGATE * style-only: s/context/ctx/ * Instance remains in migrating state forever * Add helper method for waiting migrations in functional tests * Improve assertJsonEqual error reporting * Translate the return value of attachment\_create and \_update * Move the last\_bytes util method to libvirt * Do not import nova.conf into nova/exception.py * Set IronicNodeState.uuid in \_update\_from\_compute\_node * Add VIFHostDevice support to libvirt driver * Remove redundant free\_vcpus logging in \_report\_hypervisor\_resource\_view * Remove the useless extension block\_device\_mapping\_v1 object * Remove the useless FakeExt * Remove the code related to extension loading from APIRouterV21 * Add 'updated\_at' field to InstancePayload in notifications * Use wsgi-intercept in OSAPIFixture * API ref: associate floating IP requires Active status * Suppress some test warnings * Use enum value instead of string service name * rename binary to source in versioned notifications * Trim the fat from InstanceInfo * [placement] Use wsgi\_intercept in PlacementFixture * Replaces uuid.uuid4 with uuidutils.generate\_uuid() * Ironic: Support boot from Cinder volume * [placement] Flush RC\_CACHE after each gabbit sequence * Stop using mox stubs in cast\_as\_call.py * Add online migration to move quotas to API database * Migrate Ironic Flavors * Add tags to instance.create Notification * request\_log addition for running under uwsgi * Stop using mox stubs in test\_console\_auth\_tokens.py * Increase cpu time for image conversion * Remove an unnecessary argument in \_prep\_resize * Updated from global requirements * Using plain routes for the microversions test * Updated from global requirements * Updated from global requirements * placement: add retry tight loop claim\_resources() * Dump versioned notifications when test\_create\_delete\_server * retry on authentication failure in api\_client * Change default policy to view quota details * Implement interface attach/detach in ironic virt driver * Update policy description for 'instance\_actions' * Update ironic feature matrix * Updated from global requirements * doc: Switch to openstackdocstheme * Don't cast cinderclient microversions to float * Remove the unittest for plugin framework * Use plain routes list for versions instead of stevedore * Removed unused 'wrap' property * Make Quotas object favor the API database * Remove check\_detach * Remove improper LOG.exception() calls in placement * VMware: Handle missing volume vmdk during detach * Use \_error\_out\_instance\_on\_exception in finish\_resize * placement: proper JOIN order for shared resources * placement: alloc candidates only shared resources * Allow wrapping of closures * Updated from global requirements * provide interface-scoped nameserver information * Only setup iptables for metadata if using nova-net * Fix and optimize external\_events for multiple cells * Add policy granularity to the Flavors API * Deprecate useless quota\_usage\_refresh from nova-manage * add dict of allocation requests to select\_dests() * Handle None returned from get\_allocation\_candidates due to connect failure * Updated from global requirements * Update URL home-page in documents according to document migration * api-ref: Fix an expand button in os-quota-sets * Correct the description of 'disable-log-reason' api-ref * Consider instance flavor resource overrides in allocations * Do not mention that tags are case sensitive in docs * api-ref: fix max\_version for deprecated os-quota-class-sets parameters * Handle uuids in os-hypervisors API * Use uuid for id in os-services API * Remove 'reserved' count from used limits * Make security\_group\_rules use check\_deltas() for quota * Make key\_pairs use check\_deltas() for quota * Count instances to check quota * Use plain routes list for extension\_info instead of stevedore * Use plain routes list for os-snapshots instead of stevedore * Use plain routes list for os-volume-attachments instead of stevedore * doc: Populate the 'user' section * doc: Populate the 'reference' section * doc: Populate the 'contributor' section * doc: Populate the 'configuration' section * Add log info in scheduler to mark start of scheduling * [placement] Add api-ref for allocations * [placement] Add api-ref for RP traits * [placement] Add api-ref for traits * Remove translation of log messages * Fix indentation in policy doc * conf: remove \*\_topic config opts * Stop using mox stubs in test\_remote\_consoles.py * api-ref: Verify parameters in os-migrations.inc * Use URIOpt * Convert HostState.limits['numa\_topology'] to primitive * Log compute node uuid when the record is created * Remove key\_manager.api\_class hack * Update policy descriptions for base * Consistent policies * Support tag instances when boot(4/4) * Fix instance evacuation with PCI devices * [placement] fix 500 error when allocating to bad class * [placement] Update allocation-candidates.yaml for gabbi 1.35 * fix test\_volume\_swap\_server instability * XenAPI: Fix ValueError in test\_slave\_asks\_master\_to\_add\_slave\_to\_pool * api-ref: mention disk size limitations in resize flavor * [placement] cover deleting a custom resource class in use * [placement] cover deleting standard trait * Updated from global requirements * fix unshelve notification test instability * scheduler: isolate \_get\_sorted\_hosts() * Set wsgi.keep\_alive=False globally for tests * Dump tracked version notifications when swap volume tests fail * Default reservations=None in Cells v1 and conductor APIs * Avoid false positives of Jinja2 in Bandit scan * Updated from global requirements * Remove 'create\_rule\_default' * Use oslo.polcy DocumentedRuleDefault * trivial: Remove unnecessary function * doc: Populate the 'cli' section * Fix the releasenote and api-ref for quota-class API * Fix typo * Stop counting hw\_video:ram\_max\_mb against quota * Add ability to signal and perform online volume size change * api-ref: mark instance action events parameter as optional * Add BDM to InstancePayload * placement: add claim\_resources() to report client * doc: Enable pep8 on doc generation code * doc: Remove dead plugin * Use plain routes list for os-volumes instead of stevedore * Use plain routes list for os-baremetal-nodes endpoint instead of stevedore * Use plain routes list for os-security-group-default-rules instead of stevedore * Use plain routes list for os-security-group-rules instead of stevedore * Use plain routes list for server-security-groups instead of stevedore * Use plain routes list for os-security-groups instead of stevedore * Use plain routes list for image-metadata instead of stevedore * Use plain routes list for images instead of stevedore * Remove the test for the route '/resources.:(format)' * doc: Use consistent author, section for man pages * Use plain routes list for os-networks instead of stevedore * doc: Remove cruft from conf.py * Fix wrong log parm * Query deleted instance records during \_destroy\_evacuated\_instances * Skip boot from encrypted volume on Xen+libvirt * improve notification short-circuit * Use PCIAddressField in oslo.versionedobjects * Fix quota class set APIs * api-ref: Add X-Openstack-Request-Id description * Fix a missing classifier * api-ref: Add missing parameters in limits.inc * api-ref: Fix parameters in server-security-groups * Stop using deprecated 'message' attribute in Exception * Adjust error msg for ImageNUMATopologyAsymmetric * placement: scheduler uses allocation candidates * Trivial: Remove unnecessary format specifier * Handle Cinder 3.27 style attachments in swap\_volume * Support tag instances when boot(3/4) * Remove reverts\_task\_state decorator from swap\_volume * Pre-load instance.device\_metadata in InstanceMetadata * Updated from global requirements * [placement] Improve allocation\_candidates coverage * xenapi: avoid unnecessary BDM query when building device metadata * Add release note for xenapi virt device tagging support * Make notification publisher\_id consistent * Modify some comments for virt driver * Fix parameters and description for os-volume\_attachments * Remove nova.api.extensions.server.extensions usage * Fix error message when support matrix entry is missing a driver * Fix comment for API binary name in WSGIService * Fix arguments in calling \_delete\_nic\_metadata * Fix incorrect docstrings in neutron network API * Add 'networks' quota in quota sample files * Reset the traits sync flag in the placement fixtures * Add api-ref for os-quota-class-set APIs * trivial: Use valid UUIDs in test\_admin\_password * placement: filter usage records by resource provider id * Fix 'project-id' 'user-id' as required in server group * Reduce (notification) test duplication * Use plain routes list for os-cells endpoint instead of stevedore * Hyper-V: fix live migration with CSVs * placement: support GET /allocation\_candidates * Handle keypair not found from metadata server using cells * Don't delete neutron port when attach failed * Removes getfattr from Quobyte Nova driver * libvirt: update the logic to configure volume with scsi controller * libvirt: update logic to configure device for scsi controller * Updated from global requirements * conf: fix netconf, my\_ip and host are unclear * Remove wsdl\_location configuration option * hyperv: Fixes log message in livemigrationops * hyperv: stop serial console workers while deleting vm files * hyperv: Fixes Generation 2 VMs volume boot order * Ensure the JSON-Schema covers the legacy v2 API * API support for tagged device attachment * Delete disk metadata when detaching volume * Add scatter gather utilities for cells * Sanitize instance in InstanceMetadata to avoid un-pickleable context * remove the very old unmaintained wsgi scripts * Extract custom resource classes from flavors * Fix the log information argument mistake * Remove mox from nova.tests.unit.virt.xenapi.test\_vm\_utils.py * Handle version for PUT and POST in PlacementFixture * Add a reset for traits DB sync * Strengthen the warning on the old broken WSGI script * Add key\_name field to InstancePayload * Add keypairs field to InstanceCreatePayload * api-ref: Fix missing parameters in API Versions * placement: refactor driver select\_destinations() * Updated from global requirements * VStorage: changed default log path * Add python 3.5 in classifier * Delete nic metadata when detaching interface * Remove mox from nova.tests.unit.api.openstack.compute.test\_limits * Add get\_count\_by\_vm\_state() to InstanceList object * move resources\_from\_request\_spec() to utils * return 400 Bad Request when empty string resources * placement: adds ProviderTree for nested resources * Add missing microversion documentation * Remove mox in test\_availability\_zone.py * Stop using mox stubs in test\_keypairs.py * Plumbing for tagged nic attachment * Remove code that produces warning in modern Python * Plumbing for tagged volume attachment * Fix misuse of assertIsNone * Simplify a condition * Support paging over compute nodes with a uuid marker * Update api-ref to indicate swap param * \_schedule\_instances() supporting a RequestSpec object * Removes potentially bad exit value from accepted list in Quobyte volume driver * Switch Nova Quobyte volume driver to mount via systemd-run * Clean up volumes on boot failure * Explain why API services are filtered out of GET /os-services * Fix redundant BDM lookups during rebuild * Delete all inventory has its own method DELETE * Remove translation of log messages * hypervisor\_hostname must match get\_available\_nodes * Fix using targeted cell context when finding services in cells * [doc] Updated sqlalchemy URL in migrate readme * placement: separate normalize\_resources\_qs\_param * Updated from global requirements * Use more specific asserts in tests * Transform instance.soft\_delete notifications * Fix the note at the end of allocate\_for\_instance * Count floating ips to check quota * Add FloatingIPList.get\_count\_by\_project() * Count fixed ips to check quota * Add FixedIPList.get\_count\_by\_project() * Count security groups to check quota * Add SecurityGroupList.get\_counts() * Count networks to check quota * Provide a hint when \_verify\_response fails * Provide error message in MismatchError for api-samples tests * placement: produce set of allocation candidates * Reduce code duplication * Use plain routes list for os-remote-consoles instead of stevedore * Remove multiple create from stevedore * Use plain routes list for os-tenant-networks instead of stevedore * Use plain routes list for os-cloudpipe endpoint instead of stevedore * Use plain routes list for os-quota-classes endpoint instead of stevedore * Consolidate index and detail methods in HypervisorsController * Handle uuid in HostAPI.compute\_node\_get * api-ref: fix unshelve asynchronous postconditions typo * add missing notification samples to dev ref * Fix regression preventing reporting negative resources for overcommit * Add separate instance.create payload type * placement: Add GET /usages to placement API * placement project\_id, user\_id in PUT /allocations * api-ref: fix hypervisor\_hostname description for Ironic * Updated from global requirements * Provide original fault message when BFV fails * Add PowerVM to nova support matrix * remove null\_safe\_int from module scope * Fix a wrong comment * Stop caching compute nodes in the request * Centralize compute\_node\_search\_by\_hypervisor in os-hypervisors * api-ref: cleanup PUT /os-hypervisors/statistics docs * Make compute\_node\_statistics() work across cells * Only auto-disable new nova-compute services * Cleanup the plethora of libvirt live migration options * [placement] Update placement devref to modern features * Make all timestamps formats equal * Transform keypair.create notification * remove mox from unit/virt/vmwareapi/test\_driver\_api.py * XenAPI: device tagging * Updated from global requirements * api-ref: fix misleading description in PUT /os-services/disable * Remove service control from feature support matrix * Indicate Hyper-v supports fibre channel in support matrix * Use CONF.host for powervm nodename * Pull out code that builds VIF in \_build\_network\_info\_model * Use plain routes list for os-server-groups endpoint instead of stevedore * Use plain routes list for user\_data instead of stevedore * remove get\_nw\_info\_for\_instance from compute.utils * remove ugly local import * Add missing query filter params for GET /os-services API * XenAPI: Create linux bridge in dest host during live migration * Remove translation of log messages * Count server group members to check quota * Add bool\_from\_string for force-down action * Remove old service version check for mitaka * Clarify conf/compute.py help text for ListOpts * Use plain routes list for block\_device\_mapping instead of stevedore * Use plain routes list for os-consoles, os-console-auth-tokens endpoint instead of stevedore * [placement] Increase test coverage * Remove unused variable * pci: add uuid field to PciDevice object * libvirt: dump debug info when interface detach times out * Amend api-ref for multiple networks request * Remove translation of log messages * Calculate stopped instance's disk sizes for disk\_available\_least * Transform instance.live\_migration\_rollback notification * Add InstanceGroup.\_remove\_members\_in\_db 16.0.0.0b2 ---------- * Fix lookup of instance mapping in metadata set-password * libvirt: Extract method \_guest\_add\_spice\_channel * libvirt: Extract method \_guest\_add\_memory\_balloon * libvirt: Extract method \_guest\_add\_watchdog\_action * libvirt: Extract method \_guest\_add\_pci\_devices * libvirt: Extract method \_guest\_add\_video\_device * libvirt: fix alternative\_device\_name for detaching interfaces * [placement] Add api-ref for aggregates * Add docstring for test\_limit\_check\_project\_and\_user\_zero\_values * Skip microversion discovery check for update/delete volume attachments * Use 3.27 microversion when creating new style volume attachments * Use microversions for new style volume attachments * libvirt: handle missing rbd\_secret\_uuid from old connection info * Log a warning if there is only one cell when listing instances * [placement] Use util.extract\_json in allocations handler * [placement] Disambiguate resource provider conflict message * raise exception if create Virtuozzo container with swap disk * Convert additional disassociate tests to mock * Remove useless API tests * Remove \*\*kwargs passing in payload \_\_init\_\_ * Prefer non-PCI host nodes for non-PCI instances * Add PCIWeigher * XenAPI: Remove bittorrent.py which is already deprecated * Count server groups to check quota * Default to 0 when merging values in limit check * api-ref: fix type for hypervisor\_marker * Fix html\_last\_updated\_fmt for Python3 * nfs fix for xenial images * Remove unused CONF import from placement/auth.py * xen: pass Xen console in cmdline * Add earliest-version tags for stable branch renos * Fix the race condition with novnc * Add service\_token for nova-glance interaction * Adopts keystoneauth with glance client * placement: use separate tables for projects/users * Move rebuild notification tests into separate method * contrail: add vrouter VIF plugin type support * Fix cell0 naming when QS params on the connection * libvirt: Check if domain is persistent before detaching devices * Fix device metadata service version check for multiple cells * Remove cells topic configuration option * Add get\_minimum\_version\_all\_cells() helper for service * libvirt: rearange how scsi controller is defined * libvirt: set full description of the controller used by disk * libvirt: update LibvirtConfigGuestDeviceAddress to provide XML * Use plain routes list for os-services endpoint instead of stevedore * use plain routes list for os-virtual-interfaces * use plain routes list for hypervisor endpoint instead of stevedore * Use plain routes list for hosts endpoint instead of stevedore * Use plain routes list for os-fping endpoint * Use plain routes list for instance actions endpoint * Use plain routes list for server ips endpoint * XenAPI: use os-xenapi 0.2.0 in nova * Add InstanceGroupList.get\_counts() * Reset the \_TRAITS\_SYNCED global in Traits tests * Revert "Remove Babel from requirements.txt" * Avoid unnecessary lazy-loads in mutated\_migration\_context * libvirt: log vm and task state when vif plugging times out * Send out notifications when instance tags changed * Catch neutronclient.NotFound on floating deletion * Move notifications/objects/test\_base.py * Fixed some nits for microversion 2.48 * code comments incorrectness * Remove Babel from requirements.txt * Sync os-traits to Traits database table * Support tag instances when boot(2/4) * ComputeDriver.get\_info not limited to inst name * Replace messaging.get\_transport with get\_rpc\_transport * Be more tolerant of keystone catalog configuration * Send request\_id on glance calls * Updated from global requirements * [placement] Add api-ref for resource classes * Standardization of VM diagnostics info API * Remove unused exceptions * Refactor a test method including 7 test cases * Fix missing marker functions * Completed implementation of instance diagnostics for Xen * Updated from global requirements * Use VIR\_DOMAIN\_BLOCK\_REBASE\_COPY\_DEV when rebasing * show flavor info in server details * placement: Specific error for inventory in use * Updated from global requirements * Add database migration and model for consumers * add new test fixture flavor with extra\_specs * Updated from global requirements * Connecting Nova to DRBD storage nodes directly * Update server create networks API reference description for tags * libvirt: fix call args to destroy() during live migration rollback * Pass a list of instance UUIDs to scheduler * Fix call to driver\_detach in remove\_volume\_connection * Use plain routes list for server diagnostics endpoint * Use plain routes list for os-server-external-events endpoint * Use plain routes list for server-migrations endpoint instead of stevedore * Use plain routes list for server-tags instead of stevedore * Use plain routes list for os-interface endpoint instead of stevedore * Remove usage of parameter enforce\_type * placement: test for agg association not sharing * placement: test non-shared out of inventory * placement: tests for non-shared with shared * placement: shared resources when finding providers * Fix live migration devstack hook for multicell environment * Target cell on local delete * Updated from global requirements * Fix default\_availability\_zone docs * Send request\_id on neutron calls * Update policy description for os-volumes * Fix doc job with correct ref link * Remove oslo.config deprecated parameter enforce\_type * Completely remove mox from unit/network/test\_linux\_net.py * Add configuration options for certificate validation * Do not rely on dogpile internals for mocks * XenAPI: nova-compute cannot restart after manually delete VM * Add policy description for os-networks * Changing deleting stale allocations warning to debug * Replace diagnostics objects with Nova diagnostics objects * Added nova objects for intance diagnostics * [placement] adjust resource provider links by microversion * Add \`img\_hide\_hypervisor\_id\` image property * Catch InstanceNotFound when deleting allocations * Remove mox from nova/tests/unit/virt/xenapi/test\_xenapi.py[1] * [placement] Add api-ref for DELETE resource provider * [placement] Add api-ref for PUT resource provider * [placement] Add api-ref for GET resource provider * [placement] Add api-ref for POST resource provider * [placement] Add api-ref for DELETE RP inventory * [placement] Add api-ref for PUT RP inventory * [placement] Add api-ref for GET RP inventory * [placement] Add api-ref for DELETE RP inventories * [placement] Add api-ref for PUT RP inventories * Add check\_deltas() and limit\_check\_project\_and\_user() to Quotas * Enhancement comments on CountableResource * Deprecate TypeAffinityFilter * [placement] Add api-ref for GET RP inventories * Optimize creating security\_group * Limit the min length of string for integer JSON-Schema * Avoid lazy-loading instance.id when cross\_az\_attach=False * Use plain routes list for os-migrations endpoint instead of stevedore * Updated from global requirements * Migrate to oslo request\_id middleware - mv 2.46 * Ensure the value of filter parameter is unicode * XenAPI: Deprecate nicira-iface-id for XenServer VIF * Don't run ssh validation in cells v1 job * Fix MarkerNotFound when paging and marker was found in cell0 * Add recreate functional test for regression bug 1689692 * cinder: add attachment\_update method * cinder: add attachment\_create method * Use targeted context when burying instances in cell0 * Send request\_id on cinder calls * Remove unused migrate\_data kwarg from virt driver destroy() method * Fix the display of updated\_at time when using memcache driver * Check instance existing before check in mapping * Remove mox from unit/cells/test\_cells\_messaging.py * make sure to rebuild claim on recreate * Nix redundant dict in set\_inventory\_for\_provider * PowerVM Driver: SSP emphemeral disk support * Avoid lazy-load error when getting instance AZ * Handle conflict from neutron when addFloatingIP fails * re-Allow adding computes with no ComputeNodes to aggregates * Libvirt volume driver for Veritas HyperScale * Make the method to put allocations public * Don't delete allocation if instance being scheduled * Exclude deleted service records when calling hypervisor statistics * Modify incorrect comment on return\_reservation\_id * Remove incorrect comments in multiple\_create * Have nova.context use super from\_context * Handle new hosts for updating instance info in scheduler * [Trivial] Hyper-V: accept Glance vhdx images * Add strict option to discover\_hosts * make route and controller in alpha sequence * [placement] Fix placement-api-ref check tool * Use plain routes list for limits endpoint instead of stevedore * Updated from global requirements * Handle uuid in HostAPI.\_find\_service * doc: add cells v2 FAQ on mapping instances * doc: add cells v2 FAQ on refreshing global cells cache * doc: start a FAQs section for cells v2 * De-complicate some of the instance delete path * doc: add links to summit videos on cells v2 * Make target\_cell() yield a new context * Move to proper target\_cell calling convention * Updated from global requirements * Repair links in Nova documentation * api-ref: Fix parameter order in os-services.inc * fix typo * Deprecate unused policy from policy doc * trivial: Remove dead code * convert unicode to string before we connect to rados * Use plain routes list for os-quota-sets endpoint instead of stevedore * Use plain routes list for os-certificates endpoint instead of stevedore * Remove mox from cells/test\_cells\_rpc\_driver.py * api-ref: Example verification for servers-actions.inc * Updated from global requirements * nova-manage: Deprecate 'log' commands * nova-manage: Deprecate 'host' commands * nova-manage: Deprecate 'project', 'account' commands * libvirt: remove glusterfs volume driver * libvirt: remove scality volume driver * Deprecate scheduler trusted filter * XenAPI: remove hardcode dom0 plugin version in unit test * Change log level from ERROR to DEBUG for NotImplemented * Skip policy rules on attach\_network for none network allocation * Skip ceph in grenade live migration job due to restart failures * Correct \_ensure\_console\_log\_for\_instance implementation * Cache database and message queue connection objects * Correct the error message for query parameter validation * correctly log port id in neutron api * Fix uuid replacement in aggregate notification test * Remove DeviceIsBusy exception * Catch exception.OverQuota when create image for volume backed instance * Add policy description for os-host * Libvirt support for tagged volume attachment * Libvirt support for tagged nic attachment * Updated from global requirements * Add policy description for 'os-hide-server-addresses' * Add policy description for os-fixed-ips * Add policy description for networks\_associate * Add policy description for server\_usage * Modify the description of flat\_injected in nova.conf * Add policy description for multinic * Add policy description for 'limits' * Use plain routes list for server-password endpoint instead of stevedore * libvirt: expand checks for SubclassSignatureTestCase * fix InvalidSharedStorage exception message * Fix decoding of encryption key passed to dmcrypt * Make compute auto-disable itself if builds are failing * Make discover\_hosts only query for unmapped ComputeNode records * api-ref: Fix examples for add/removeFixedIp action * Fix a typo in code comment * Updated from global requirements * [BugFix] Change the parameter of the exception error message * Handle special characters in database connection URL netloc * fix typo in parameter type definition * Move null\_safe funcs to module level * do not log error for missing \_save\_tags * Add more description to policies in the keypairs.py * Add description to policies in extended\_status and extended\_volumes * Address comments when moving volume detach to block\_device.py * Updated from global requirements * Add a functional test for 'removeFloatingIp' action * Correct the wording about filter options * libvirt: Fix races with nfs volume mount/umount * libvirt: Pass instance to connect\_volume and disconnect\_volume * Remove the can\_host column * Totally freeze the extension\_info API * Trivial fix typo in document * Add missing rootwrap filter for cryptsetup * Add Cinder v3 detach to shutdown\_instance * Make NovaException format errors fatal for tests * Fix unit test exception KeyErrors * [BugFix] Release the memory quota for video ram when deleting an instance * Remove the rebuild extension help methods * service: use restart\_method='mutate' for all services * Verify project id for flavor access calls * Add a convenience attribute for reportclient * Add uuid to service.update notification payload * objects: add ComputeNode.get\_by\_uuid method * objects: add Service.get\_by\_uuid method * db api: add service\_get\_by\_uuid * Add online data migration for populating services.uuid * placement: get providers sharing capacity * Remove cloudpipe APIs * Replace newton to release\_name in upgrade.rst * Fix a typo * neutron: retrieve physical network name from a multi-provider network * Use six.text\_type() when logging Instance object * Fix typo in wsgi applications release note * Catching OverQuota Exception * Add description to policies in extended\_az and extend\_ser\_attrs * Add policy description for os-quota-classes * Add policy description for instance actions * Add policy description for fping * Updated from global requirements * Ensure sample policy help text correctly wrapped * Add policy description for extensions * Use plain routes list for server-metadata endpoint instead of stevedore * Transform instance.volume\_detach notification * Transform instance.volume\_attach.error notification * Transform instance.volume\_attach notification * Fix units for description of "flavor\_swap" parameter * Don't lazy-load flavor.projects during destroy() * devref and reno for nova-{api,metadata}-wsgi scripts * Add pbr-installed wsgi application for metadata api * Update devref with vendordata changes * remove unused functions * Use systemctl to restart services * Remove nova-cert leftovers * Add policy description for image size * Add policy description for instance-usage-audit-log * Add policy description for Servers IPs * Add policy description for config\_drive * XenAPI: update support matrix to support detach interface * Remove unnecessary execute permissions * Use plain routes list for os-fixed-ips endpoint instead of stevedore * Use plain routes list for os-availability-zone endpoint instead of stevedore * Use plain routes list for os-assisted-volume-snapshots endpoint * Use plain routes list for os-agents endpoint instead of stevedore * Use plain routes list for os-floating-ip-dns endpoint instead of stevedore * Add compute\_nodes\_uuid\_idx unique index * Use plain routes list for os-floating-ips-bulk endpoint instead of stevedore * Use plain routes list for os-floating-ip-pools endpoint instead of stevedore * Use plain routes list for os-floating-ips endpoint instead of stevedore * api-ref: Fix unnecessary description in servers-admin-action * api-ref: Fix parameters in servers-action-console-output * api-ref: Use 'note' directive * use plain routes list for os-simple-tenant-usage * Use plain routes list for os-instance-usage-audit-log endpoint instead of stevedore * Support tag instances when boot(1) * Add Cinder v3 detach call to \_terminate\_volume\_connections * placement: implement get\_inventory() for libvirt * nova-manage: Deprecate 'agent' commands * Add reserved\_host\_cpus option * Update description to policies in remaining flavor APIs * Add description to policies in migrations.py * Trivial fix: fix broken links * Remove nova-cert * Fixed a broken link in API Plugins document * Stop using mox int unit/virt/xenapi/image/test\_utils.py * Add ability to query for ComputeNodes by their mapped value * Add ComputeNode.mapped field * Updated from global requirements * Add a note to \*\_allocation\_ratio options about Ironic hardcode * Remove legacy v2.0 code from test\_flavor\_access * Do not log live migration success when it actually failed * Expose StandardLogging fixture for use * Add Cinder v3 detach to local\_cleanup * Don't check for file type in \_find\_base\_file * Rename \_handle\_base\_image to \_mark\_in\_use * Add context comments to \_handle\_base\_image * Add mock check and fix uuid's use in test * Revert "Prevent delete cell0 in nova-manage command" * Improve comment for PCI port binding update * Parse algorithm from cipher for ephemeral disk encryption * Add description to policies in floating\_ip files * Add description to policies in migrate\_server.py * Remove all discoverable policy rules * PowerVM Driver: console * Update doc/source/process.rst * 2.45: Remove Location header from createImage and createBackup responses * Clean up ClientRouter debt * api-ref: move createBackup to server-actions * Deprecate Multinic, floatingip action and os-virtual-interface API * Register osapi\_compute when nova-api is wsgi * disable keepalive for functional tests * Use plain routes list for '/os-aggregates' endpoint instead of stevedore * Use plain routes list for '/os-keypairs' endpoint instead of stevedore * Use plain routes list for flavors-access endpoint instead of stevedore * Use plain routes list for flavors-extraspecs endpoint instead of stevedore * Use plain routes list for flavor endpoint instead of stevedore[1] * Use plain routes list for '/servers' endpoint instead of stevedore * encryptors: Switch to os-brick encryptor classes * Fix unnecessary code block in a release note * Remove redundant code * api-ref: Fix a parameter description in servers.inc * api-ref: Parameter verification for servers-actions (4/4) * api-ref: Parameter verification for servers-actions (3/4) * Refactor a test method including 3 test cases * Sort CellMappingList.get\_all() for safety * Add workaround to disable group policy check upcall * Make server groups api aware of multiple cells for membership * libvirt: remove redundant and broken iscsi volume test * Remove BuildRequest.block\_device\_mapping clone workaround * libvirt: Always disconnect\_volume after rebase failures * Rework descriptions in os-hypervisors * Trivial Fix a typo * api-ref: Parameter verification for servers-actions (2/4) * Updated from global requirements * PowerVM Driver: spawn/destroy #4: full flavor * Remove archaic reference to QEMU errors during post live migration * Tell people that the nova-cells man page is for cells v1 * Add release note and update cell install guide for multi-cell limitations * PowerVM Driver: spawn/destroy #3: TaskFlow * Allow CONTENT\_LENGTH to be present but empty * libvirt: Remove is\_job\_complete polling after pivot * Adding auto\_disk\_config field to InstancePayload * add tags field to instance.update notification * Add description to policies in hypervisors.py * Explicitly define enum type as string in schema * PowerVM Driver: power\_on/off and reboot * Using max api version in notification sample test * PowerVM Driver: spawn/destroy #2: functional * Warn the user about orphaned extra records during keypair migration * Deprecate os-hosts API * Update resource tracker to PUT custom resource classes * [placement] Idempotent PUT /resource\_classes/{name} * Update detach to use V3 Cinder API * conf: Move 'floating\_ips' opts into 'network' * conf: Deprecate 'default\_floating\_pool' * conf: Add neutron.default\_floating\_pool * libvirt: Use config types to parse XML for root disk * libvirt: Add missing tests for utils.find\_disk * libvirt: Use config types to parse XML for instance disks * Updated from global requirements * Mock timeout in test\_\_get\_node\_console\_with\_reset\_wait\_timeout * Add test ensure all the microversions are sequential in placement API * fix overridden error * fix typos * Add interfaces functional negative tests * Remove unused os-pci API * Fix mitaka online migration for PCI devices * Fix port update exception when unshelving an instance with PCI devices * Fix docstring in \_validate\_requested\_port\_ids * Fix the evacuate API without json-schema validation in 2.13 * api-ref: Fix response code and parameters in evacuate * Remove json-schema extension variable for resize * Update etherpad url * Use deepcopy when process filters in db api * Add regression test for server filtering by tags bug 1682693 * remove unused parameter in rpc call * Remove usage of parameter enforce\_type * Remove test\_init\_nonexist\_schedulerdriver * Spelling error "paramenter" * api-ref: Parameter verification for servers-actions (1/4) * Revert "Make server\_groups determine deleted-ness from InstanceMappingList" 16.0.0.0b1 ---------- * Fix hypervisors api missing HostMappingNotFound handlers * Updated from global requirements * Fix HTTP 500 raised for getConsoleLog for stopped instance * Remove backend dependency for key types * Fix libvirt group selection in live migration test * Update network metadata type field for IPv6 * Add description to policies in servers.py * Add description to policies in security\_groups.py * api-ref: Nova Update Compute services Link * api-ref: Fix parameters in os-hosts.inc * Add uuid to Service model * Modify PciPassthroughFilter to accept lists * Deprecate CONF.api.allow\_instance\_snapshots * Read NIC features in libvirt * Fix api-ref for create servers response * placement: Add Traits API to placement service * Remove aggregate uuid generation on load from DB * Document and provide useful error message for volume-backed backup * PowerVM Driver: spawn/delete #1: no-ops * Refactor: Move post method to APIValidationTestCase base class * remove log translation tags from nova.cells * Get BDMs when we need to in \_handle\_cell\_delete * Remove dead db api code * Add description to policies in server\_password.py * remove flake8-import-order * Expand help text for [libvirt]/disk\_cachemodes * Updated from global requirements * Add description to policies in remote\_consoles.py * api-ref: fix os-extended-volumes:volumes\_attached in servers responses * Image meta min\_disk should be int in fake\_request\_spec * Optimize the link address * Add description to policies in quota\_sets.py * Fix joins in instance\_get\_all\_by\_host * Fix test\_instance\_get\_all\_by\_host * Remove the stevedore extension point for server create * Remove the json-schema extension point of server create * Remove the extension check for os-networks in servers API * Make server\_groups determine deleted-ness from InstanceMappingList * Add get\_by\_instance\_uuids() to InstanceMappingList * Remove Mitaka-era service version check * Teach HostAPI about cells * Make scheduler target cells to get compute node instance info * Deprecate the Cinder API v2 support * Limit exposure of network device types to the guest * Remove a fallacy in scheduler.driver config option help text * [placement] Allow PUT and POST without bodies * Use physical utilisation for cached images * Remove config opts for extension black/white list * Remove the usage of extension black/white list opt in scheduler hints * Cleanup wording on compute service version checks in API * Fix test\_no\_migrations\_have\_downgrade * Perform old-style local delete for shelved offloaded instances * Regression test for local delete with an attached volume * Set size/status during image create with FakeImageService * Commit usage decrement after destroying instance * Add regression test for quota decrement bug 1678326 * Short-circuit local delete path for cells v2 and InstanceNotFound * api-ref: make it clear that os-cells is for cells v1 * Add description to policies in security\_group\_default\_rules.py * Remove the usage of extension black/white list opt in user data * Add empty flavor object info in server api-ref * placement: Enable attach traits to ResourceProvider * docs: update description for AggregateInstanceExtraSpecsFilter * nova-net: remove get\_instance\_nw\_info from API subclass * API: accept None as content-length in HTTP requests * Switch from pip\_missing\_reqs to pip\_check\_reqs * nova-manage: Deprecate 'shell' commands * doc: Separate the releasenotes guide from the code-review section * Distinguish between cells v1 and v2 in upgrades doc * Use HostAddressOpt for opts that accept IP and hostnames * Stop using ResourceProvider in scheduler and RT * Updated from global requirements * Remove unnecessary tearDown function in testcase * Ensure reservation\_expire actually expires reservations * Remove unnecessary duplicated NOTE * Add description to policies in server\_diagnostics.py * Add description to policies in server\_external\_events.py * Add server-action-removefloatingip.json file and update servers-actions.inc * api-ref: networks is mandatory in Create Server * Trivial: Remove unused method * Make metadata doc more readable * Remove the usage of extension black/white list opt in AZ * Remove the usage of extension black/white list opt in config drive * Remove the usage of extension black/white list opts in multi-create * Remove the usage of extension black/white list opts in BDM tests * Rename the model object ResourceProviderTraits to ResourceProviderTrait * Short circuit notifications when not enabled * Add description to policies in services.py * compute: Move detach logic from manager into driver BDM * doc: Move code-review under developer policies * Add description to policies in servers\_migrations.py * Remove mox from nova/tests/unit/consoleauth/test\_consoleauth.py * Remove unnecessary setUp function in testcase * api-ref: Fix wrong HTTP response codes * Make conductor ask scheduler to limit migrates to same cell * Updated from global requirements * Consolidate unit tests for shelve API * Remove \_wait\_for\_state\_change() calls from notification (action)tests * Fix calling super function in setUp method * Remove namespace check in creating traits * Add description for /consoles * Ensure instance is in active state after notification test * Add description to policies in used\_limits * Add description to policies in lock\_server.py * Add description to policies in server\_metadata.py * Add description to policies in evacuate.py and rescue.py * Add description to policies in server\_groups.py * Use cursive for signature verification * Fix api-ref for adminPass behavior * Fix 'server' and 'instance' occurrence in api-ref * Add description to policies in flavor\_extra\_specs.py * code comment redundant * Add exclusion list for tempest for a libvirt+xen job * Add description to policies in cells\_scheduler.py * Add description to policies in aggregates.py * Add description to policies in pause\_server.py * Add description to policies in simple\_tenant\_usage.py * Add description to policies in keypairs.py * Remove unused policy rule in admin\_actions.py * Add description to policies in admin\_actions * Add description to policies in certificates.py * libvirt: Remove dead code * Add description to policies in console\_output.py * tox: Stop calling config/policy generators twice * There is a error on annotation about related options * Remove mox from nova.tests.unit.objects.test\_instance.py * fixed typos and reword stable api doc * Fix some reST field lists in docstrings * Add description to nova/policies/shelve.py * [placement] Split api-ref topics per file * Add description to policies in tenant\_networks.py * placement: Add Trait and TraitList objects * Remove legacy regeneration of RequestSpec in MigrationTask * remove i18n log markers from nova.api.\* * [placement] add api-ref for GET /resource\_providers * Structure for simply managing placement-api-ref * 'uplug' word spelling mistake * Make xenapi driver compatible with assert\_can\_migrate * Remove mox from nova/tests/unit/api/openstack/compute/test\_virtual\_interfaces.py * Remove mox from nova/tests/unit/api/openstack/compute/test\_quotas.py * Remove mox from nova/tests/unit/api/openstack/compute/test\_migrations.py * Fix wrong unit test about config option enabled\_apis * Do not attempt to load osinfo if we do not have os\_distro * Add confirm resized server functional negative tests * remove mox from unit/api/openstack/compute/test\_disk\_config.py * Revert "libvirt: Pass instance to connect\_volume and ..." * Add descripiton to policies in virtual\_interfaces.py * Add description to policies to availability\_zone * Add description to policies in suspend\_server.py * api-ref: fix description of volumeAttachment for attach/swap-volume * Get instance availability\_zone without hitting the api db * Set instance.availability\_zone whenever we schedule * [placement] Don't use floats in microversion handling * tests: fix uefi testcases * libvirt: make emulator threads to run on the reserved pCPU * libvirt: return a CPU overhead if isolate emulator threads requested * virt: update overhead to take into account vCPUs * numa: update numa usage to include reserved CPUs * numa: take into account cpus reserved * numa: fit instance NUMA node with cpus reserved onto host NUMA node * remove mox from unit/api/openstack/compute/test\_flavor\_manage.py * remove mox from unit/compute/test\_compute\_utils.py * api-ref: Complete all the verifications of remote consoles * remove mox from unit/virt/xenapi/image/test\_bittorrent.py * Fix some reST field lists in docstrings * Add lan9118 as valid nic for hw\_vif\_model property for qemu * Add description to policies in deferred\_delete.py * Add description to policies in create\_backup.py * Add description to policies in consoles.py * Add description to policies in cloudpipe.py * Add description to policies in console\_auth\_tokens.py * Add description to policies in baremetal\_nodes.py * conf: Final cleanups in conf/network * conf: Deprecate 'allow\_same\_net\_traffic' * libvirt: Ignore 'allow\_same\_net\_traffic' for port filters * conf: Deprecate 'use\_ipv6' * netutils: Ignore 'use\_ipv6' for network templates * Add check for invalid inventory amounts * Add check for invalid allocation amounts * Remove the Allocation.create() method * Add release note for CVE-2017-7214 * Add description to policies in cells.py * Tests: remove .testrepository/times.dbm in tox.ini (functional) * Pre-add functional tests stub to notification testing * libvirt: conditionally set script path for ethernet vif types * Add description to policies in agents.py * Add description to policies in admin\_password.py * libvirt: mark some Image backend methods as abstract * Add description to policies in assisted\_volume\_snapshots.py * Change os-server-tags default policy * Ironic: hardcode min\_unit for standard resources to 1 * Refactor: remove \_items() in nova/api/openstack/compute/attach\_interfaces.py * delete more i18n log markers * remove log translation from nova.api.metadata * update i18n guide for nova * Add description to policies in attach\_interfaces.py * Add description to policies in volumes\_attachments.py * Add description to policies in volumes.py * Fix rest\_api\_version\_history (2.40) * fix os-volume\_attachments policy checks * libvirt: Ignore 'use\_ipv6' for port filters * conf: Fix indentation in conf/netconf * Remove unused VIFModel.\_get\_legacy method * Add helper method to add additional data about policy rule * DELETE all inventory for a resource provider * nova-status: don't coerce version numbers to floats for comparison * remove mox from unit/api/openstack/compute/test\_flavors.y * Improve descriptions for hostId, host, and hypervisor\_hostname * compute: Only destroy BDMs after successful detach call * Remove old oslo.messaging transport aliases * Updated from global requirements * do not include context to exception notification * Add api-ref for filter/sort whitelist * Fix functional regression/recreate test for bug 1671648 * api-ref: fix description in os-services * flake8: Specify 'nova' as name of app * objects: Add attachment\_id to BlockDeviceMapping * db: Add attachment\_id to block\_device\_mapping * Updated from global requirements * Clarify os-stop API description * remove flake8-import-order for test requirements * Avoid lazy-loading projects during flavor notification * libvirt: add debug logging in detach\_device\_with\_retry * Transform instance.reboot.error notification * Transform instance.reboot notifications * remove hacking rule that enforces log translation * doc: configurable versioned notifications topics * Replace obsolete vanity openstack.org URLs * Add populate\_retry to schedule\_and\_build\_instances * Add a functional regression/recreate test for bug 1671648 * virt: implement get\_inventory() for Ironic * Fix the help for the disk\_weight\_multiplier option * Add a note about force\_hosts only ever having a single value * Make os-availability-zones know about cells * Introduce fast8 tox target * Duplicate JSON line ending check to pep8 * trivial: Remove \r\n line endings from JSON sample * [placement] Raising http codes on old microversion * Updated from global requirements * doc: add some documentation around quotas * Make versioned notifications topics configurable * Use proper user and tenant in the owner section of libvirt.xml * Prevent delete cell0 in nova-manage command * Refactor InstancePayload creation * nova-status: require placement >= 1.4 * Temporarily untarget context when deleting from cell0 * Decrement quota usage when deleting an instance in cell0 * VMware: use WithRetrieval in ds\_util module * VMware: use WithRetrieval in get\_network\_with\_the\_name * Remove VMware driver \_get\_vm\_ref\_from\_uuid method * trivial: Add a note about 'cells\_api' * Add description for Image location in snapshot * Typo fix in releasenotes: deprecate network options * api-ref: Fix parameters and examples in aggregate API * Transform instance.rebuild.error notification * Transform instance.rebuild notification * Add regression test for bug 1670627 * No API cell up-call to delete consoleauth tokens * Add identity helper property to CellMapping * Correctly set up deprecation warning * Add cell field to Destination object * Change MQ targeting to honor only what is in the context * Remove duplicate attributes in sample files * api-ref: Fix keypair API parameters * Fix missing instance.delete notification * conf: Fix formatting of network options * Teach simple\_tenant\_usage about cells * Teach os-migrations about cells * Teach os-aggregates about cells * Stop using mox in unit/virt/disk/test\_api.py * Avoid using fdatasync() when fetching images * Fix API doc about server attributes (2.3 API) * Refactor cell loading in compute/api * Target cell in super conductor operations * Ensure image conversion flushes output data to disk * fdatasync() downloaded images before use * conf: fix default values reporting infra worker * Error message should not include SQL command * Make consoleauth target the proper cell * Enlighten server tags API about cells * Update docstrings for legacy notification methods * conf: Deprecate most 'network' option * Use Cinder API v3 as default * get\_model method missing for Ploop image * trivial: Standardize naming of variables * trivial: Standardize indentation of test\_vif * autospec the virt driver mock in test\_resource\_tracker * Add functional test for bad res class in set\_inventory\_for\_provider * Remove unused placement\_database config options * libvirt: pass log\_path to \_create\_pty\_device for non-kvm/qemu * virt: add get\_inventory() virt driver API method * conf: remove console\_driver opt * Use flake8-import-order * numa: add numa constraints for emulator threads policy * Remove mox from nova.tests.unit.api.openstack.compute.test\_block\_device\_mapping * Revert "Add some metadata logging to root cause ssh failure" * Add comment to instance\_destroy() * Remove GlanceImageService * Use Sphinx 1.5 warning-is-error * Add warning on setting secure\_proxy\_ssl\_header * handle uninited fields in notification payload * Fix api-ref with Sphinx 1.5 * Imported Translations from Zanata * Reno for additional-notification-fields-for-searchlight * Default firewall\_driver to nova.virt.firewall.NoopFirewallDriver * Handle conflicts for os-assisted-volume-snapshots * Remove mox from nova.tests.unit.api.openstack.compute.test\_create\_backup * Log with cell.uuid if cell.name is not set * Updated from global requirements * re-orphan flavor after rpc deserialization * Stop using mox stubs in nova.tests.unit.api.openstack.compute.test\_serversV21 * Skip unit tests for SSL + py3 * Add functional test for ip filtering with regex * Add resize server functional negative tests * conf: resolved final todos in libvirt conf * Only create vendordata\_dynamic ksa session if needed * Check for 204 case in DynamicVendorData * Add some metadata logging to root cause ssh failure * Remove unused variable * Remove domains \*-log-\* from compile\_catalog * Updated from global requirements * Updated from global requirements * [placement] Add Traits related table to the api database * Remove mox from nova/tests/unit/db/test\_db\_api.py * Complete verification of servers-action-fixed-ip.inc * Remove mox in nova/tests/unit/compute/test\_shelve.py (3) * libvirt: Pass instance to connect\_volume and disconnect\_volume * Stop using mox in compute/test\_hypervisors.py * Add device\_id when creating ports * Make compute/api instance get set target cell on context * Remove mox from nova.tests.unit.virt.xenapi.test\_vmops[1] * Tests: remove .testrepository/times.dbm in tox.ini * Updated from global requirements * Remove invalid tests-py3 whitelist item * Ignore deleted services in minimum version calculation * Add RPC version aliases for Ocata * Remove mox from nova/tests/unit/test\_configdrive2.py * Remove usage of config option verbose * Remove check\_attach * Handle VolumeBDMIsMultiAttach in os-assisted-volume-snapshots * api/metadata/vendordata\_dynamic: don't import Requests for its constants * Fix typos detected by toolkit misspellings * remove a TODO as all set for tags * Clean up metadata param in doc * Remove extension in API layer * Correct some spelling errors * Fix typo in config drive support matrix docs * doc: Don't put comments inside toctree * Fix doc generation warnings * Remove run\_tests.sh * Fix spice channel type * Updated from global requirements * libvirt: drop MIN\_LIBVIRT\_HUGEPAGE\_VERSION * libvirt: drop MIN\_LIBVIRT\_NUMA\_VERSION * libvirt: drop MIN\_QEMU\_NUMA\_HUGEPAGE\_VERSION * libvirt: Fix misleading error in Ploop imagebackend * More usage of ostestr and cleanup an unused dependency * Ensure that instance directory is removed after success migration/resize * api-ref: Body verification for os-hypervisors.inc * Make conductor create InstanceAction in the proper cell * Allow nova-status to work with custom ca for placement * libvirt: Handle InstanceNotFound exception * Make scheduler get hosts from all cells * Make servers API use cell-targeted context * Make CellDatabases fixture work over RPC * Use the keystone session loader in the placement reporting * Verify project\_id when quotas are checked * Remove mox from nova/tests/unit/virt/vmwareapi/test\_vif.py * conf: Fix invalid rST comments * Revert "fix usage of opportunistic test cases with enginefacade" * Placement api: set custom json\_error\_formatter in resource\_class * Enable coverage report * Make server\_external\_events cells-aware * Remove service version check for Ocata/Newton placement decisions * Remove a dead cinder v1 check * Raise correct error instead of class exist in Placement API * Remove mox from nova/tests/unit/objects/test\_service.py * Skip soft-deleted records in 330\_enforce\_mitaka\_online\_migrations * Stop using mox from tests/unit/test\_service.py * Update placement\_dev with info about new decorator * Remove unused logging import * Deprecate xenserver.vif\_driver config option and change default * Fix live migrate with XenServer * Fix novncproxy for python3 * Remove mox stubs in api/openstack/compute/test\_server\_reset\_state.py * Fix some typo errors * Enable defaults for cell\_v2 update\_cell command * Remove dead code: \_safe\_destroy\_instance\_residue * Updated from global requirements * Make eventlet hub use a monotonic clock * Tolerate WebOb===1.7.1 * Tolerate jsonschema==2.6.0 * Stop using mox in test\_compute\_cells.py * Stop using mox in virt/xenapi/image/test\_glance.py * Remove mox from unit/api/openstack/compute/test\_aggregates.py * Remove mox from api/openstack/compute/test\_deferred\_delete.py * Typo fix: degredation => degradation * api-ref: Fix deprecated proxy API parameters * api-ref: note that boot ignores bdm:device\_name * Skip test\_stamp\_pattern in cells v1 job * Fix misuse of assertTrue * Fix improper prompt when update RC with existed one's name * Remove mox from nova/tests/unit/virt/vmwareapi/test\_configdrive.py * Placement api: set custom json\_error\_formatter in root * Cleanup some issues with CONF.placement.os\_interface * Placement api: set custom json\_error\_formatter in aggregate and usage * Fix suggested database migration command * Placement api: set custom json\_error\_formatter in resource\_provider * api-ref: Fix network\_label parameter type * Fix incorrect example for querying resource for RP * Use ListOfIntegersField in oslo.versionedobjects * libvirt: drop MIN\_QEMU\_PPC64\_VERSION * libvirt: drop MIN\_LIBVIRT\_AUTO\_CONVERGE\_VERSION * libvirt: drop MIN\_QEMU\_DISCARD\_VERSION * libvirt: drop MIN\_LIBVIRT\_HYPERV\_TIMER\_VERSION * libvirt: drop MIN\_LIBVIRT\_UEFI\_VERSION * libvirt: drop MIN\_LIBVIRT\_FSFREEZE\_VERSION * libvirt: drop MIN\_LIBVIRT\_BLOCKJOB\_RELATIVE\_VERSION * Bump minimum required libvirt/qemu versions for Pike * api-ref: fix instance action 'message' description * Placement api: set custom json\_error\_formatter in inventory * conf/libvirt: remove invalid TODOs * conf/compute: remove invalid TODOs * Remove straggling use of main db flavors in cellsv1 code * Add Cells V1 -> Cells V2 step-by-step example * nova-manage: Update deprecation timeline * Enable global hacking checks and removed local checks * Update hacking version * Use min parameter to restrict live-migration config options * Fix typo in nova/network/neutronv2/api.py * libvirt: wait for interface detach from the guest * libvirt: fix and break up \_test\_attach\_detach\_interface * api-ref: mark id as optional in POST /flavors * Fix nova-manage cell\_v2 metavar strings * Remove unused validation code from block\_device * Prepare for using standard python tests * Placement api: set custom json\_error\_formatter in allocations * [3/3]Replace six.iteritems() with .items() * conf: Deprecate 'firewall\_driver' * conf: Deprecate 'ipv6\_backend' * libvirt: set vlan tag for macvtap on SR-IOV VFs * Removed unnecessary parantheses and fixed formation * Fix the spelling mistake in host.py * Allow None for block\_device\_mapping\_v2.boot\_index * Edits for Cells V2 step-by-step examples * api-ref: fix delete server async postcondition docs * libvirt: check if we can quiesce before volume-backed snapshot * Default live\_migration\_progress\_timeout to off * libvirt: Remove redundant bdm serial mangling and saving during swap\_volume * Consider startup scenario in \_get\_compute\_nodes\_in\_db * libvirt: Introduce Guest.get\_config method * libvirt: Parse basic attributes of LibvirtConfigGuest from xml * libvirt: Parse filesystem elements of guest config * libvirt: Parse virt\_type attribute of LibvirtConfigGuest from xml * libvirt: Parse os attributes of LibvirtConfigGuest from xml * libvirt: Remove unused disk\_info parameter * libvirt: Simplify internal usage of get\_instance\_disk\_info * Stop failed live-migrates getting stuck migrating * Stop \_undefine\_domain erroring if domain not found * tests: fix vlan test type from int to str * Add an update\_cell command to nova-manage * allocations.consumer\_id is not used in query * api-ref: document the 'tenant\_id' query parameter * TrivialFix: replace list comprehension with 'for' * Reserve migration placeholders for Ocata backports * Update the upgrades part of devref * Cleanup the caches when deleting a resource provider * vomiting * Clarify the deployment of placement for cellsv1 users * conf: remove deprecated image url options * conf: add min parameter to scheduler opts * Add step-by-step examples for Cells V2 setup * Add nodename to \_claim\_test log messages * Update reno for stable/ocata 15.0.0.0rc1 ----------- * Add placement request id to log when GET or POST rps * Add placement request id to log when GET aggregates * Add more debug logging on RP inventory delete failures * Add more debug logging on RP inventory update failures * Delete a compute node's resource provider when node is deleted * Remove mox from unit/virt/libvirt/test\_imagebackend.py (end) * Mark compute/placement REST API max microversions for Ocata * Add release note for filter/sort whitelist * Clarify the language in the apache wsgi sample * Stop swap allocations being wrong due to MB vs GB * Clarify the [cells] config option help * Add offset & limit docs & tests * Report reserved\_host\_disk\_mb in GB not KB * Fix access\_ip\_v4/6 filters params for servers filter * Fix typo in cells v2 ocata reno * doc: add upgrade notes to the placement devref * Simplify uses of assert\_has\_calls * Fix typo in help for discover\_hosts\_in\_cells\_interval * Handle NotImplementedError in \_process\_instance\_vif\_deleted\_event * Fix the terminated\_at field in the server query params schema * Add release note for nova-status upgrade check CLI * Add prelude section for Ocata * Collected release notes for Ocata CellsV2 * reno for notification-transformation-ocata * Allow scheduler to run cell host discovery periodically * doc: update the man page entry for nova-manage db sync * doc: refer to the cell\_v2 man pages from the cells v2 doc * doc: add some detail to the map\_cell0 man page * Remove pre-cellsv2 short circuit in instance get * Continue processing build requests even if one is gone already * Allow placement endpoint interface to be set * Ensure build request exists before creating instance * placement-api: fix ResourceProviderList query * tests: Remove duplicate NumaHostInfo * tests: Combine multiple NUMA-generation functions * tests: Don't reinvent \_\_init\_\_ * Explain how allow\_resize\_to\_same\_host is useful * nova-status: relax the resource providers check * Read instances from API cell for cells v1 * [placement] Use modern attributes of oslo\_context * Fix map\_cell\_and\_hosts help * Fresh resource provider in RT must have generation 0 * libvirt: Limit destroying disks during cleanup to spawn * Use is\_valid\_cidr and is\_valid\_ipv6\_cidr from oslo\_utils * Ignore IOError when creating 'console.log' * Fix unspecified bahavior on GET /servers/detail?tenant\_id=X as admin * Remove unused exceptions from nova.exception * nova-manage docs: cell\_v2 delete\_cell * nova-manage docs: cell\_v2 list\_cells * nova-manage docs: cell\_v2 discover\_hosts * nova-manage docs: cell\_v2 create\_cell * nova-manage docs: cell\_v2 verify\_instance * nova-manage docs: cell\_v2 map\_cell\_and\_hosts * Fix tag attribute disappearing in 2.33 and 2.37 * Scheduler calling the Placement API * Block starting compute unless placement conf is provided * Added instance.reboot.error to the legacy notifications * Avoid redundant call to update\_resource\_stats from RT * api-ref: Fix path parameters in os-hypervisors.inc * libvirt: fix vCPU usage reporing for LXC/QEMU guests * Adding vlans field to Device tagging metadata * libvirt: expose virtual interfaces with vlans to metadata * objects: vlan field to NetworkInterfaceMetadata object * Move instance creation to conductor * Updated from global requirements * Fix server group functional test by using all filters * Hyper-V PCI Passthrough * Change exponential function to linear * Fixed indentation in virt/libvirt/driver.py * Cache boot time roles for vendordata * Optionally make dynamic vendordata failures fatal * Use a service account to make vendordata requests * libvirt: ephemeral disk support for virtuozzo containers 15.0.0.0b3 ---------- * ironic: Add trigger crash dump support to ironic driver * Only warn about hostmappings during ocata upgrade * nova-manage docs: cell\_v2 map\_instances * nova-manage docs: cell\_v2 map\_cell0 * nova-manage docs: cell\_v2 simple\_cell\_setup * Add new configuration option live\_migration\_scheme * Fix race condition in instance.update sample test * libvirt: Use the mirror element to detect job completion * libvirt: Mock is\_job\_complete in test\_driver * adding debug info for pinning calculation * PCI: Check pci\_requests object is empty before passing to support\_requests * Ironic: Add soft power off support to Ironic driver * Add sort\_key white list for server list/detail * Trivial-fix: replace "json" with "yaml" in policy README * Release PCI devices on drop\_move\_claim() * objects: add new field cpuset\_reserved in NUMACell * Make api\_samples tests use simple cell environment * Assign mac address to vf netdevice when using macvtap port * conf: Deprecate 'console\_driver' * libvirt: avoid generating script with empty path * placement: minor refactor \_allocate\_for\_instance() * placement: report client handle InventoryInUse * Multicell support for instance listing * scheduler: Don't modify RequestSpec.numa\_topology * Fix and add some notes to the cells v2 first time setup doc * Add deleting log when config drive was imported to rbd * Updated from global requirements * Amend the PlacementFixture * Prevent compute crash on discovery failure * Ironic: Add soft reboot support to ironic driver * os-vif: convert libvirt driver to use os-vif for fast path vhostuser * Updated from global requirements * Add a PlacementFixture * Set access\_policy for messaging's dispatcher * libvirt: make coherent logs when reboot success * Add ComputeNodeList.get\_all\_by\_uuids method * Fix typo in 216\_havana.py * placement: create aggregate map in report client * Support Ironic interface attach/detach in nova virt * Generate necessary network metadata for ironic port groups * Ensure we mark baremetal links as phy links * os-vif-util: set vif\_name for vhostuser ovs os-vif port * Move migration\_downtime\_steps to libvirt/migration * libvirt: fix nova can't delete the instance with nvram * Remove mox in libvirt destory tests * VMWare: Move constant power state strings to the constant.py * Remove references to Python 3.4 * hyperv: make sure to plug OVS VIFs after resize/migrate * Strict pattern match query parameters * Raise InvalidInput exception * Fix Nova to allow using cinder v3 endpoint * [py35] Fixes to get more tempest tests working * Move to tooz hash ring implementation * api-ref: Fix a parameter in os-availability-zone.inc * objects: remove cpu\_topology from \_\_init\_\_ of InstanceNUMATopology * Integrate OSProfiler and Nova * Remove mox from unit/virt/libvirt/test\_imagebackend.py (5) * Enable virt.vmwareapi test cases on Python * Enable virt.test\_virt\_drivers.AbstractDriverTestCase on Python 3 * Port compute.test\_user\_data.ServersControllerCreateTest to Python 3 * Add revert resized server functional negative tests * XenAPI: Fix vif plug problem during VM rescue/unrescue * Handle oslo.serialization type error and binascii error * Remove invalid URL in gabbi tests * nova-manage cell\_v2 map\_cell0 exit 0 * Add query parameters white list for server list/detail * nova-manage docs: add cells commands prep * Add --verbose option to discover\_hosts command * Add more details when test\_create\_delete\_server\_with\_instance\_update fails * Updated from global requirements * Add some cellsv2 setup docs * Fix the generated cell0 default database name * rt: use a single ResourceTracker object instance * Add nova-manage cell\_v2 delete\_cell command * Add InstanceMappingList.get\_by\_cell\_id * Create HostMappingList object * Add nova-manage cell\_v2 list\_cells command * Add nova-manage cell\_v2 create\_cell command * Add rudimentary CORS support to placement API * libvirt: workaround findmnt behaviour change * api-ref: Fix parameters whose values are 'null' * Fix broken link of Doc * api-ref: Fix parameters and response in os-quota-sets.inc * Remove nova-manage image from man pages * Updated from global requirements * Fixes to get all functional tests working on py35 * [placement] Add a bit about extraction plans to placement\_dev * [placement] Add an "Adding a Handler" section to placement\_dev * [placement] placement\_dev info for testing and gabbi * [placement] placement\_dev info for microversion handling * Updated from global requirements * placement: validate member\_of values are uuids * Make metadata server know about cell mappings * Remove redundant arg check in nova-manage cell\_v2 verify\_instance * Expose a REST API for a specific list of RPs * copy pasta error * Set sysinfo\_serial="none" in LibvirtDriverTestCase * [py35] Fixes to get rally scenarios working * Fix missing RP generation update * Add service\_token for nova-neutron interaction * rt: explicitly pass compute node to \_update() * Make unit tests work with os-vif 1.4.0 * Updated from global requirements * libvirt: make live migration possible with Virtuozzo * Small improvements to placement.rst * Better black list for py35 tests * Fix class type error in attach\_interface() function * Hyper-V: Adds vNUMA implementation * Don't bypass cellsv1 replication if cellsv2 maps are in place * Adds Hyper-V OVS ViF driver * docs - Connect to placement service & retries * Improve flavor sample in notification sample tests * xenapi: support the hotplug of a neutron port * Update notification for flavor * Add service\_token for nova-cinder interaction * Make allocate\_for\_instance take consistent args * XenAPI Remove useless files when use os-xenapi lib * XenAPI Use os-xenapi lib for nova * Make placement client keep trying to connect * releasenotes: Add missing releasenote for encryption provider constants * Stop using mox stubs in test\_attach\_interfaces.py * Remove mox from api/openstack/compute/test\_floating\_ip\_dns.py * Remove mox in nova/tests/unit/compute/test\_shelve.py (end) * Remove mox in unit/api/openstack/test\_wsgi.py * Document testing process for zero downtime upgrade * Remove mox in nova/tests/unit/compute/test\_shelve.py (2) * Notifications for flavor operations * Add debug possibility for nova-manage command * conf: Deprecate yet more nova-net options * conf: Resolve formatting issues with 'quota' * [2/3]Replace six.iteritems() with .items() * Port xenapi test\_vm\_utils to Python 3 * docs: sort the Architecture Concepts index * Make the SingleCellSimple fixture a little more comprehensive * Fix non-parameterized service id in hypervisors sample tests * Fix TypeError in \_update\_from\_compute\_node race * Trivial indentation fix * Add missing CLI commands in support-matrix.ini * tests: Replace use of CONF with monkey patching * correct misleading wording * Fix a typo in documents * Don't translate exceptions w/ no message * Fix ksa mocking in test\_cinderclient\_unsupported\_v1 * [placement] fix typo in call to create auth middleware * HTTP interface for resource providers by aggregates * Return uuid attribute for aggregates * Update docstring of \_schema\_validation\_helper * api-ref: use the examples with paging links * Port libvirt.test\_vif to Python 3 * Port libvirt.test\_firewall to Python 3 * Move quota options to a config group * Handle Unauthorized exception in report client's safe\_connect() * Remove mox from unit/virt/libvirt/test\_imagebackend.py (4) * Remove mox from unit/virt/libvirt/test\_imagebackend.py (3) * Remove mox from unit/virt/libvirt/test\_imagebackend.py (2) * Do not post allocations that are zero * Remove mox from unit/compute/test\_compute\_api.py (1) * Add aggregate notification related enum values * Transform aggregate.delete notification * Transform aggregate.create notification * Added missing decorator for instance.create.error * Enable Neutron by default * Port virt.libvirt.test\_imagebackend to Python 3 * move gate hooks to gate/ * tools: Remove 'colorizer' * tools: Remove 'with\_venv' * tools: Remove 'install\_venv', 'install\_venv\_common' * tools: Remove 'clean-vlans' * tools: Remove 'enable-pre-commit-hook' * Use JSON-Schema to validate query parameters for keypairs API * Adds support for versioned schema validation for query parameters * Remove mox from api/openstack/compute/test\_extended\_hypervisors.py * Stop using mox in compute/test\_server\_actions.py * Remove mox from unit/api/openstack/compute/test\_cloudpipe.py * Add support matrix for attach and detach interfaces * Make last remaining unit tests work with Neutron by default * Make test\_metadata pass with CONF.use\_neutron=True by default * Make test\_nova\_manage pass with CONF.use\_neutron=True by default * Stub out os\_vif.unplug in libvirt instance destroy tests * Make test\_attach\_interfaces work with use\_neutron=True by default * Make test\_floating\_ip\* pass with CONF.use\_neutron=True by default * Make several API unit tests pass with CONF.use\_neutron=True by default * Make test\_server\_usage work with CONF.use\_neutron=True by default * Make test\_security\_group\_default\_rules work with use\_neutron=True by default * Make test\_tenant\_networks pass with CONF.use\_neutron=True by default * Make test\_security\_groups work with CONF.use\_neutron=True by default * Make test\_virtual\_interfaces work with CONF.use\_neutron=True by default * Make test\_user\_data and test\_multiple\_create work with use\_neutron=True * Make test\_quota work with CONF.use\_neutron=True by default * Make test\_compute pass with CONF.use\_neutron=True by default * api-ref: Fix parameters in os-server-groups.inc * Remove mox in test\_block\_device\_mapping\_v1.py * placement: Do not save 0-valued inventory * Add 'disabled' to WatchdogAction field * Remove more deprecated nova-manage commands * Make servers api view load instance fault from proper cell * Add support for setting boot order in Hyper-V * Create schema generation for NetworkModel * conf: added notifications group * Missing usage next links in api-ref * [placement] start a placement\_dev doc * Stop handling differences in registerCloseCallback * Enable TestOSAPIFixture.test\_responds\_to\_version on Python 3 * pci: Clarify SR-IOV ports vs direct passthrough ports * nova-status: check for compute resource providers * doc: add recomendation for delete notifications * Move FlavorPayload to a seperate file * Remove Rules.load\_json warning * Handle unicode when dealing with duplicate aggregate errors during migration * Handle unicode when dealing with duplicate flavors during online migrations * Actually test online flavor migrations * Remove unused init\_only kwarg from wsgi app init * api-ref: add notes about POST/DELETE errors for os-tenant-networks * Remove unnecessary attrs from TenantNetworksDeprecationTest * api-ref: microversion 2.40 overview * Fix assertion in test\_instance\_fault\_get\_by\_instance * Add more field's in InstancePayload * api-ref: cleanup os-server-groups 'policies' parameter description * objects: add new field cpu\_emulator\_threads\_policy * Support filtering resource providers by aggregate membership * Resource tracker doesn't free resources on confirm resize * Stop using mox stubs in nova/tests/unit/cells * Add release note to PCI passthrough whitelist regex support * api-ref: Fix parameter type in servers-admin-action.inc * Port security group related tests to Python 3 * Add create image functional negative tests * Don't apply multi-queue to SRIOV ports * Avoid multiple initializations of Host class * placement: correct improper test case inheritance * Remove mox in tests/unit/objects/test\_instance\_info\_cache * Port compute unit tests to Python 3 * Fix urllib.urlencode issue in functional tests on Python 3 * Trival fix typo * Enble network.test\_neutronv2.TestNeutronv2 on Python 3 * Enble compute.test\_compute\_mgr.ComputeManagerUnitTestCase on Python 3 * Port api.openstack.compute.test\_disk\_config to Python 3 * Updated from global requirements * Ignore 404s when deleting allocation records * nova-status: return 255 for unexpected errors * VMware: Update supported OS types for ESX 6.5 * Replace "Openstack" with "OpenStack" * Use bdm destination type allowed values hard coded * Fix BDM JSON-Schema validation * [TrivialFix] Fix comment and function name typo error * [TrivialFix] Fix comment typo error * Fix python3 issues with devstack * [1/3]Replace six.iteritems() with .items() * Fix typo * Fix misleading port delete description * conf: remove deprecated barbican options * conf: Remove 'virt' file * Trival fix typos in api-ref * make 2.31 microversion wording better * Add soft delete wrinkle to api-ref * Add document update for get console usage * Trivial: add ability to define action description * Added missed "raises:" docstrings into numa\_get\_constraints() method * Removes unnecessary utf-8 encoding * Port test\_matchers.TestDictMatches.test\_\_str\_\_ to Python 3 * Skip network.test\_manager.LdapDNSTestCase on Python 3 * Remove mox in tests/unit/objects/test\_security\_group * Remove v2.40 from URL string in usage API docs * nova-status: add basic placement status checking * nova-status: check for cells v2 upgrade readiness * Add nova-status upgrade check command framework * rt: remove fluff from test\_resource\_tracker * rt: pass the nodename to public methods * conf: make 'default' upper case * conf: move few console opts to xenserver group * conf: remove deprecated ironic options * conf: refactor conf\_fixture.py * Add unit test for extract\_snapshot with compression enabled * Refactor the code to add generic schema validation helper * Updated from global requirements * Fix error if free\_disk\_gb is None in CellStateManager * nova-manage: squash oslo\_policy debug logging * Pre-load info\_cache when handling external events and handle NotFound * Make nova-manage cell\_v2 discover\_hosts tests use DBs * Fix nova-manage cell\_v2 discover\_hosts RequestContext * Make nova-manage emit a traceback when things blow up * XenAPI: Remove ovs\_integration\_bridge default value * rt: pass nodename to internal methods * Failing test (mac osx) - test\_cache\_ephemeral * Catch VolumeEncryptionNotSupported during spawn * Updated from global requirements * Fix exception message formatting error in test * osapi\_max\_limit -> max\_limit * Add more detail to help text for reclaim\_instance\_interval option * Added PRSM to HVType class for support PR/SM hypervisor * conf: Deprecate more nova-net options 15.0.0.0b2 ---------- * [test]Change fake image info to fit instance xml * Cleanup Newton Release Notes * Port libvirt.storage.test\_rbd to Python 3 * VMware: ensure that provider networks work for type 'portgroup' * libvirt: Stop misusing NovaException * Fix the file permissions of test\_compute\_mgr.py * Add detail to cellsv2-related release notes * Revert "Use liberty-eol tag for liberty release notes" * Fix some release notes in preparation for the o-2 beta release * Add schedule\_and\_build\_instances conductor method * libvirt: Detach volumes from a domain before detaching any encryptors * libvirt: Flatten 'get\_domain' function * fakelibvirt: Remove unused functions * libvirt: Remove slowpath listing of instances * Only return latest instance fault for instances * Remove dead begin/end code from InstanceUsageAuditLogController * Use liberty-eol tag for liberty release notes * api-ref: Fix description of os-instance-usage-audit-log * conf: fix formatting in base * Stop allowing tags as empty string * libvirt: remove hack for dom.vcpus() returning None * Add Python 3.5 functional tests in tox.ini * Simple tenant usage pagination * Modify mistake of scsi adapter type class * Remove the EC2 compatible API tags filter related codes * Port virt vmwareapi tests to Python 3 * Mark sibling CPUs as 'used' for cpu\_thread\_policy = 'isolated' * Added missed "raises:" docstrings into numa\_get\_constraints() method * Changed NUMACell to InstanceNUMACell in test\_stats.py * TrivialFix: changed log message * api-ref: Fix 'id' (attachment\_id) parameters * Move tags validation code to json schema * Let nova-manage cell\_v2 commands use transport\_url from CONF * Make test\_create\_delete\_server\_with\_instance\_update deterministic * restore locking in notification tests * Remove mox from unit/compute/test\_compute\_api.py(2) * Deprecate compute options * Remove support for the Cinder v1 API * Make simple\_cell\_setup fully idempotent * Corrects the type of a base64 encoded string * Fix instructions for running simple\_cell\_setup * Quiet unicode warnings in functional test\_resource\_provider * conf: Detail the 'injected\_network\_template' opt * Add more description for rx and tx param * move rest\_api\_version\_history.rst to compute layer * Enhance PCI passthrough whitelist to support regex * Better wording for micorversion 2.36 * Port test\_servers to py3 * Catch InstanceNotFound exception * Remove mox in tests/unit/objects/test\_compute\_node * Refactor REGEX filters to eliminate 500 errors * Fix crashing during guest config with pci\_devices=None * Provide an online data migration to cleanup orphaned build requests * Add SecurityGroup.identifier to prefer uuid over name * Setup CellsV2 environment in base test * conf: add warning for vm's max delete attempts * Cleanup after any failed libvirt spawn * Guestfs handle no passwd or group in image * Return 400 when name is more than 255 characters * Check that all JSON files don't have \r\n in line * Enable test\_bdm.BlockDeviceMappingEc2CloudTestCase on Python 3 * network id is uuid instead of id * fix for auth during live-migration * Don't trace on ImageNotFound in delete\_image\_on\_error * Cascade deletes of RP aggregate associations * Make resource provider objects not remotable * Bump prlimit cpu time for qemu from 2 to 8 * test: drop unused config option fake\_manager * conf: Remove config option compute\_ manager * Extend get\_all\_by\_filters to support resource criteria * Port test\_virt\_drivers to Python 3 * Don't use 'updated\_at' to check service's status * libvirt: Fix initialising of LVM ephemeral disks * Remove extra ^M for json file * Port virt.disk.mount.test\_nbd to Python 3 * Remove unnecessary comment of BDM validation * Update ironic driver get\_available\_nodes docstring * api-ref: note that os-virtual-interfaces is nova-network only * Fix up non-cells-aware context managers in test\_db\_api * Add SingleCellSimple fixture * [proxy-api] microversion 2.39 deprecates image-metadata proxy API * Make RPCFixture support multiple connections * tests: avoid starting compute service twice in sriov functional test * tests: generate correct pci addresses for fake pci devices * Fix nova-serialproxy when registering cli options * Updated from global requirements * Revert "reduce pep8 requirements to just hacking" * conf: Improve help text for network options * conf: Deprecate all nova-net related opts * libvirt: Mock imagebackend template funcs in ImageBackendFixture * libvirt: Combine injection info in InjectionInfo * Fix misuse of assertTrue * Return 400 when name is more than 200 characters * Replace the assertEqual(None,A) with assertIsNone(A) * Rename few tests as per new config options * Handle MarkerNotFound from cell0 database * Removed unused ComputeNode create/update\_inventory methods * Fix a typo in a comment in microversion history * Handle ImageNotFound exception during instance backup * Add a CellDatabases test fixture * Pass context as kwarg instead of positional arg to get\_engine * Transform instance.snapshot notifications * libvirt: virtlogd: use virtlogd for char devices * libvirt: create consoles in an understandable/extensible way * Add more log when delete orphan node * libvirt: Add comments in \_hard\_reboot * Update cors-to-versions-pipeline release note * Unity the comparison of hw\_qemu\_guest\_agent * Add metadata functional negative tests * Require cellsv2 setup before migrating to Ocata * Improving help text for xenapi\_vmops\_opts * convert libvirt driver to use os-vif for vhost-user with ovs * Handle ComputeHostNotFound when listing hypervisors * Improve the error message for failed RC deletion * refactor: move down \`\`dev\_number\`\` in xenapi * Fix placement API version history 1.1 title * placement: Perform build list of standard classes once * placement: REST API for resource classes * Add a retry loop to ResourceClass creation * conf: Remove deprecated service manager opts * support polling free notification testing * conf: Standardize formatting of virt * Updated from global requirements * Remove invalid tests for config option osapi\_compute\_workers * placement: adds ResourceClass.save() * Add CORS filter to versions pipeline * Create hyperv fake images under proper directory * Some improvement to the process doc * libvirt: Improve \_is\_booted\_from\_volume implementation * libvirt: Delete duplicate check when live-migrating * Add block\_device\_mapping\_v2.uuid to api-ref * Correct the sorting of datetimes for migrations * Fix pci\_alias that include white spaces * Raise DeviceNotFound detaching volume from persistent domain * Always use python2.7 for docs target * objects: Removes base code that already exists in o.vo * libvirt: Don't re-resize disks in finish\_migration() * libvirt: Never copy a swap disk during cold migration * libvirt: Rename Backend snapshot and image * libvirt: Cleanup test\_create\_configdrive * libvirt: Test disk creation in test\_hard\_reboot * libvirt: Rewrite \_test\_finish\_migration * guestfs: Don't report exception if there's read access to kernel * Fix for live-migration job * Handle maximum limit in schema for int and float type parameters * Port compute.test\_extended\_ip\* to Python 3 * Remove more tests from tests-py3.txt * Support detach interface with same MAC from instance * placement: adds ResourceClass.destroy() * Make test\_shelve work with CONF.use\_neutron=True by default * Restrict test\_compute\_cells to nova-network * Make test\_compute\_mgr work with CONF.use\_neutron=True by default * Make test\_compute\_api work with CONF.use\_neutron=True by default * Make nova.tests.unit.virt pass with CONF.use\_neutron=True by default * Make xenapi tests work with CONF.use\_neutron=True by default * Make libvirt unit tests work with CONF.use\_neutron=True by default * Make vmware unit tests work with CONF.use\_neutron=True * Explicitly use nova-network in nova-network network tests * Make test\_serversV21 tests work with neutron by default * neutron: handle no\_allocate in create\_pci\_requests\_for\_sriov\_ports * Add a releasenote for bug#1633518 * libvirt: prefer cinder rbd auth values over nova.conf * libvirt: cleanup network volume driver auth config * Fix wait for detach code to handle 'disk not found error' * [api-ref] Minor text clean-up, formatting * Convert live migration uri back to string * conf: improve libvirt lvm * conf: Trivial fix of indentation in 'api' * config options: improve libvirt utils * Never pass boolean deleted to instance\_create() * Port xenapi test\_xenapi to Python 3 * Port libvirt test\_driver to Python 3 * conf: Deprecate 'torrent\_' options * hacking: Use uuidutils or uuidsentinel to generate UUID * Replace uuid4() with uuidsentinel * Replace uuid4() with uuidsentinel * Replace uuid4() with uuidsentinel * Add os-start/stop functional negative tests * Port ironic unit tests to Python 3 * Port test\_keypairs to Python 3 * Port test\_metadata to Python 3 * Fix expected\_attrs kwarg in server\_external\_events * Check deleted flag in Instance.create() * Revert "Revert "Make n-net refuse to start unless using CellsV1"" * Revert "Log a warning when starting nova-net in non-cellsv1 deployments" * Default deleted if the instance from BuildRequest is not having it * docs: cleanup wording for 'SOFT\_DELETED' in api-guide * libvirt: Acquire TCP ports for console during live migration * conf: Deprecate 'remap\_vbd\_dev' option * conf: Covert StrOpt -> PortOpt * Check Config Options Consistency for xenserver.py * Add description for 2.9 microversion * Remove AdminRequired usage in flavor * Optional name in Update Server description in api-ref * List support for force-completing a live migration in Feature support matrix * Remove mox from nova/tests/unit/compute/test\_virtapi.py * Remove mox from nova/tests/unit/virt/test\_virt.py * Catch ImageNotAuthorized during boot instance * Remove require\_admin\_context * remove NetworkDuplicated exception * InstanceGroupPolicyNotFound not used anymore * UnsupportedBDMVolumeAuthMethod is not used * Port virt.xenapi.client.test\_session to Python 3 * vif: allow for creation of multiqueue taps in vrouter * conf: Move api options to a group * [scheduler][tests]: Fix incorrect aggr mock values * objects: Move 'vm\_mode' to 'fields.VMMode' * objects: Move 'hv\_type' to 'fields.HVType' * objects: Move 'cpumodel' to 'fields.CPU\*' * objects: Move 'arch' to 'fields.Architecture' * Show team and repo badges on README * Remove config option snapshot\_name\_template * Remove deprecated compute\_available\_monitors option * Improve help text for interval\_opts * config options: improve libvirt remotefs * Improve consistency in libvirt * Fix root\_device\_name for Xen * Move tag schema to parameter\_types.py * Remove tests from tests-py3.txt * hardware: Flatten functions * add host to vif.py set\_config\_\* functions * linux\_net: allow for creation of multiqueue taps * Fix notification doc generator * Config options: improve libvirt help text (2) * Placement api: Add informative message to 404 response * Remove sata bus for virtuozzo hypervisor * Fix a typo in nova/api/openstack/compute/volumes.py * Fix race in test\_volume\_swap\_server\_with\_error * libvirt: Call host connection callbacks asynchronously * conf: remove deprecated cert\_topic option * Return build\_requests instead of instances * conf: remove deprecated exception option * doc: Add guidline about notification payload * Port libvirt test\_imagecache to Python 3 * Port test\_serversV21 to Python 3 * encryptors: Introduce encryption provider constants * Add TODO for returning a 202 from the volume attach API * Fix typo in image\_meta.py & checks.py & flavor.py * Refactor two nearly useless secgroup tests * Transform instance.finish\_resize notifications * Remove redundant VersionedObject Fields * Transform instance.create.error notification * Transform instance.create notification * api-ref: add missing os-server-groups parameters * libvirt: prepare domain XML update for serial ports * [placement] increase gabbi coverage of handlers.resource\_provider * [placement] increase gabbi coverage of handlers.inventory * [placement] increase gabbi coverage of handlers.allocation * libvirt: do not return serial address if disabled on destination * Remove mox from api/openstack/compute/test\_fping.py * Add index on instances table across project\_id and updated\_at * Complete verification for os-floating-ips * libvirt: handle os-brick InvalidConnectorProtocol on init * placement: adds ResourceClass.get\_by\_name() * placement: adds ResourceClass.create() * Improve help text for libvirt options * Use byte string or utf8 depending on python version for wsgi * Separate CRUD policy for server\_groups * Stop using mox stubs in nova/tests/unit/virt/disk * Remove the description of compute\_api\_class option * Remove mox in virt/xenapi/image/test\_bittorrent.py * Add context param to confirm\_migration virt call * Use pick\_context\_manager throughout DB APIs * Database poison note * tests: verify cpu pinning with prefer policy * api-ref: Body verification for os-simple-tenant-usage.inc * remove additional param * Fix typo for 'infomation' * Delete checking a bool opt of None condition * Remove unused code in nova/api/openstack/wsgi.py * conf: remove deprecated cells driver option * Fix detach\_interface() call from external event handler * Implement get and set aggregates in the placement API * Add {get\_,set\_}aggregates to objects.ResourceProvider * Log a warning when starting nova-net in non-cellsv1 deployments * Revert "Make n-net refuse to start unless using CellsV1" * HyperV: use os-brick for volume related operations * INFO level logging should be useful in resource tracker * hyper-v: wait for neutron vif plug events * Remove mox in nova/tests/unit/api/openstack/compute (1) * Use available port binding constants * Rename PCS to Virtuozzo in error message * [PY3] byte/string conversions and enable PY3 test * Fix mock arg list order in test\_driver.py * Add handle for 2 exceptions in force\_delete * Typo error about help libvirt.py * Updated from global requirements * Introduce PowerVMLiveMigrateData * Make n-net refuse to start unless using CellsV1 * Store security groups in RequestSpec * api-ref: body verification for abort live migration * Fix data error in api samples doc 15.0.0.0b1 ---------- * Typo error servers.py * Typo error allocations.yaml * Refactor console checks in live migration process * Remove mox in tests/unit/objects/test\_pci\_device * Add microversion cap information * No return for flavor destroy * neutron: actually populate list in populate\_security\_groups * Clarify the approval process of specless blueprints * Add uuid field to SecurityGroup object * api-ref: body verification for force\_complete server migration * api-ref: body verification for show server migration * api-ref: body verification for list server migrations * api-ref: example verification for server-migrations * api-ref: parameter verification for server-migrations * api-ref: method verification for server-migrations * [placement] Enforce min\_unit, max\_unit and step\_size * Remove ceph install/config functions from l-m hook * Ceph bits for live-migration job * Avoid unnecessary db\_calls in objects.Instance.\_from\_db\_object() * placement: genericize on resource providers * api-ref: fix server\_id in metadata docs * Add the initial documentation for the placement API * API Ref: update server\_id params * conf: fix formatting in wsgi * Transform requested secgroup names to uuids * conf: fix formatting in availability\_zone * libvirt: Cleanup spawn tests * Rename security\_group parameter in compute.API:create * Change database poison warning to an exception * Fix database poison warnings, part 25 * Updated from global requirements * Correct wrong max\_unit in placement inventory * Add flavor extra\_spec info link to api\_ref * Fix database poison warnings in resource providers * Placement api: 404 response do not indicate what was not found * Instance obj\_clone leaves metadata as changed * Add a no-op wait method to NetworkInfo * Move driver\_dict\_from\_config to libvirt driver * Create schema generation for AddressBase * conf: Improve help text for ldap\_dns\_opts * conf: Fix indentation of network * Fix config option types * libvirt: Fix incorrect libvirt library patching in tests * libvirt: refactor console device creation methods * libvirt: read rotated "console.log" files * libvirt: change get\_console\_output as prep work for bp/libvirt-virtlogd * Updated from global requirements * api-ref: Fix a 'port' parameter in os-consoles.inc * Update nova api.auth tests to work with newer oslo.context * Remove ironic instance resize from support matrix doc * [placement] add a placement\_aggregates table to api\_db * libvirt: remove py26 compat code in "get\_console\_output" * Change RPC post\_live\_migration\_at\_destination from cast to call * libvirt: add migration flag VIR\_MIGRATE\_PERSIST\_DEST * Revert MTU hacks for bug 1623876 * Pass MTU into os-vif Network object * Updated from global requirements * api-ref: fix addFloatingIp action docs * Fix a TypeError in notification\_sample\_base.py * Add functional api\_samples test for addFloatingIp action * Fix qemu-img convert image incompatability in alpine linux * migration.source\_compute should be unchanged after finish\_revert\_resize * Add explicit dependency on testscenarios * Updated from global requirements * cors: update default configuration in config * api-ref: remove user\_id from keypair list response and fix 2.10 * Don't parse PCI whitelist every time neutron ports are created * conf: Remove deprecated 'compute\_stats\_class' opt * conf: Remove extraneous whitespace * hardware: Split '\_add\_cpu\_pinning\_constraint' * libvirt: Delete the lase\_device of find\_disk\_dev\_for\_disk\_bus * EventReporterStub * Catch all local/catch-all addresses for IPv6 * placement: add ResourceClass and ResourceClassList * placement: raise exc when resource class not found * fix connection context manager in rc cache * pci: remove pci device from claims and allocations when freeing it * PCI: Fix PCI with fully qualified address * Log warning when user set improper config option value * libvirt: fix incorrect host cpus giving to emulator threads when RT * Transform instance.shutdown notifications * encryptors: Workaround mangled passphrases * Fix cold migration with qcow2 ephemeral disks * Updated from global requirements * config options: Improve help for SPICE * Remove manual handling of old context variables * api-ref: cleanup bdm.delete\_on\_termination field * api-ref: document the power\_state enum values * libvirt: Pass Host instead of Driver to volume drivers * conf: Attempt to resolve TODOs in scheduler.py * conf: Remove 'scheduler\_json\_config\_location' * Remove unreachable code * [api-ref] Fix path parameter console\_id * doc: add a note about conditional support for xenserver change password * Replace admin check with policy check in placement API * Fix import statement order * Fix database poison warnings, part 24 * libvirt: sync time on resumed from suspend instances * Fix database poison warnings, part 23 * Add RPC version aliases for Newton * Transform instance.unpause notifications * Catch NUMA related exceptions in create server API method * Notification object version test depends on SCHEMA * Updated from global requirements * Virt: add context to attach and detach interface * Imported Translations from Zanata * Stop using mox stubs in test\_shelve.py * Fix SAWarning in TestResourceProvider * Transform instance.unshelve notifications * TrivialFix: Fixed typo in 'MemoryPageSizeInvalid' exception name in docstrings * Make build\_requests.instance MediumText * Use six.wraps * Transform instance.resume notifications * Transform instance.shelve\_offload notifications * api-ref: fix image GET response example * Fix exception raised in exception wrapper * Add missing compat routine for Usage object * Updated from global requirements * Transform instance.power\_off notifications * conf: Removed TODO note and updated desc * Set 'last\_checked' flag if start to check scheduler file * Remove bandit.yaml in favor of defaults * Pre-add instance actions to avoid merge conflicts * Add swap volume notifications (error) * libvirt: add supported vif types for virtuozzo virt\_type * fix testcase test\_check\_can\_live\_migrate\_dest\_fills\_listen\_addrs * doc: Integrate oslo\_policy.sphinxpolicygen * Using get() method to prevent KeyError * tests: verify pci passthrough with numa * tests: Adding functional tests to cover VM creation with sriov * [placement] Add support for a version\_handler decorator * pci: in free\_device(), compare by device id and not reference * Mention API V2 should no longer be used * doc: Update libvirt-numa guide * Remove deprecated nova-manage vm list command * Remove block\_migration from LM rollback * PCI: Avoid looping over PCI devices twice * Update docs for serial console support * Remove conductor local api:s and 'use\_local' config option * Cleanup before removal of conductor local apis * compute: fixes python 3 related unit tests * XenAPI: Fix VM live-migrate with iSCSI SR volume * Fix the scope of cm in ServersTestV219 * Explicitly name commands target environments * \_run\_pending\_deletes does not need info\_cache/security\_groups * Updated from global requirements * hardware: Standarized flavor/image meta extraction * Tests: improve assertJsonEqual diagnostic message * api-ref: Fix wrong parameters in os-volumes.inc * Remove mox from unit/virt/libvirt/test\_imagebackend.py (1) * Send events to all relevant hosts if migrating * Catch error and log warning when not able to update mtimes * Clarify what changed with scheduler\_host\_manager * Add related options to floating ip config options * Correct bug in microversion headers in placement * Ironic Driver: override get\_serial\_console() * Updated from global requirements * Drop deprecated support for hw\_watchdog\_action flavor extra spec * Remove watchdog\_actions module * Removal of tests with different result depending on testing env * Add debug to tox environment * Document experimental pipeline in Nova CI * Update rolling upgrade steps from upgrades documentation * Add migrate\_uri for invoking the migration * Fix bug in "nova/tests/unit/virt/test\_virt\_drivers.py" for os-vif * Remove redundant req setting * Changed the name of the standard resource classes * placement: change resource class to a StringField * Remove nova/openstack/\* from .coveragerc * Remove deprecated nova-all binary * Fix issue with not removing rbd rescue disk * Require WebOb>=1.6.0 * conf: Remove deprecated \`\`use\_glance\_v1\`\` * Adding hugepage and NUMA support check for aarch64 * hacking: Use assertIs(Not), assert(True|False) * Use more specific asserts in tests * Add quota related tables to the api database * doc: add dev policy about no new metrics monitors * Always use python2.7 for functional tests * doc: note the future of out of tree support * docs: update the Public Contractual API link * Remove \_set\_up\_controller() from attach tests * Add InvalidInput handling for attach-volume * placement: add cache for resource classes * placement: add new resource\_classes table * hardware: Rework docstrings * doc: Comment on latin1 vs utf8 charsets * Improve help text for libvirt options * block\_device: Make refresh\_conn\_infos py3 compatible * Add swap volume notifications (start, end) * Add a hacking rule for string interpolation at logging * Stop using mox stubs in test\_snapshots.py * Stop using mox from compute/test\_multiple\_create.py * Don't attempt to escalate nova-manage privileges * Improve help text for upgrade\_levels options * Remove dead link from notification devref * Stop using mox stubs in test\_evacuate.py * Tests: fix a typo * ENOENT error on '/dev/log' * Patch mkisofs calls * conf: Group scheduler options * conf: Move consoleauth options to a group * Fix exception due to BDM race in get\_available\_resource() * Delete traces of in-progress snapshot on VM being deleted * Add error handling for delete-volume API * Catch DevicePathInUse in attach\_volume * Enable release notes translation * Fix drop\_move\_claim() on revert resize * Updated from global requirements * Fix API doc for os-console-auth-tokens * tests: avoid creation of instances dir in the working directory * config options: improve libvirt imagebackend * libvirt: fix DiskSmallerThanImage when block migrate ephemerals * Remove unnecessary credential sanitation for logging * Replace uuid4() with uuidsentinel * Change log level to debug for migrations pairing * Remove the duplicated test function * Move get\_instance() calls from try-except block * Allow running db archiving continuously * Add some extra logging around external event handling * Fix a typo in driver.py * Avoid Forcing the Translation of Translatable Variables * Fix database poison warnings, part 21 * libvirt: Fix BlockDevice.wait\_for\_job when qemu reports no job * Stop using mox from compute/test\_used\_limits.py * Updated from global requirements * Remove mox from tests/unit/conductor/tasks/test\_live\_migrate.py(3) * Remove mox from tests/unit/conductor/tasks/test\_live\_migrate.py(2) * Remove mox from tests/unit/conductor/tasks/test\_live\_migrate.py(1) * Fix calling super function in setUp method * refresh instances\_path when shared storage used * Prevent us from sleeping during DB retry tests * Fix error status code on update-volume API * conf: Trivial cleanup of console.py * conf: Trivial cleanup of compute.py * conf: Trivial cleanup of 'cells' * conf: Deprecate all topic options * Updated from global requirements * Disable 'supports\_migrate\_to\_same\_host' HyperV driver capability * Fix periodic-nova-py{27,35}-with-oslo-master * Report actual request\_spec when MaxRetriesExceeded raised * Make db archival return a meaningful result code * Remove the sample policy file * libvirt/guest.py: Update docstrings of block device methods * Fix small RST markup errors * [Trivial] changes tiny RST markup error * Add get\_context helper method * Use gabbi inner\_fixtures for better error capture * Hyper-V: Fixes os\_type image property requirement * conf: Cleanup of glance.py * conf: Move PCI options to a PCI group * Add Apache 2.0 license to source file * Updated from global requirements * Make releasenotes reminder detect added and untracked notes * [placement] reorder middleware to correct logging context * Fixes RST markup error to create a code-box * libvirt: support user password settings in virtuozzo * Removing duplicates from columns\_to\_join list * Ignore BuildRequest during an instance reschedule * Remove stale pyc files when running the cover job * Add a post-test-hook to run the archive command * [placement] ensure that allow headers are native strings * Fix a few typos in API reference * Fix typo on api-ref parameters * Fix typo in comment * Remove mox in nova/tests/unit/compute/test\_shelve.py (1) * Let schema validate image metadata type and key lengths * Remove scheduled\_at attribute from instances table * Fix database poison warnings, part 22 * Archive instance-related rows when the parent instance is deleted * Unwind circular import issue with api / utils * Fix database poison warnings, part 18 * Remove context object in oslo.log method * libvirt: pick future min libvirt/qemu versions * Improve consistency in serial\_console * conf: Improve consistency in scheduler opts * Move notification\_format and delete rpc.py * config options: improve libvirt smbfs * Fix database poison warnings, part 17 * Updated from global requirements * Fix database poison warnings, part 16 * Hyper-V: Adds Hyper-V UEFI Secure Boot * Stop overwriting thread local context in ClientRouter * Cleanup some redundant USES\_DB\_SELF usage * Fix database poison warnings, part 20 * Fix database poison warnings, part 19 * use proper context in libvirt driver unit test * Renamed parameters name in config.py * [placement] Allow both /placement and /placement/ to work * numa: Fixes NUMA topology related unit tests * VMware: Do not check if folder already exists in vCenter * libvirt: fixes python 3 related unit tests * Clean up stdout/stderr leakage in cmd testing * Capture stdout in for test\_wsgi:test\_debug * Add destroy method to the RequestSpec object * Remove last sentence * VMware: Enforce minimum vCenter version of 5.5 * test:Remove unused method \_test\_get\_test\_network\_info * Determine disk\_format for volume-backed snapshot from schema * Fix database poison warnings, part 15 * Fix CONTAINER\_FORMATS\_ALL to have ova insteadk of vmdk * Config options consistency of ephemeral\_storage.py * docs: Clarify sections & note on filter scheduler * Fixes python 3 unit tests * Add Hyper-V storage QoS support * Add blocker migration to ensure for newton online migrations * hacking: Always use 'assertIs(Not)None' * Hyper-V: fix image handling when shared storage is being used * Annotate online db migrations with cycle added * properly capture logging during db functional tests * [placement] 404 responses do not cause exception logs * Fix pep8 E501 line too long * Remove unused code * Replace uuid4() with generate\_uuid() from oslo\_utils * Return instance of Guest from method write\_instance\_config * Mock.side\_effects does not exist, use Mock.side\_effect instead * Remove redundant str typecasting * VMware: deprecate wsdl\_location conf option * Remove nova.image.s3 and configs * Remove internal\_id attribute from instances table * Fix stdout leakage during opportunistic db tests * Updated from global requirements * Improve help text for glance options * libvirt: ignore conflict when defining network filters * Add placeholder DB migrations for Ocata * Remove PCI parent\_addr online migration * Make nova-manage online migrations more verbose * Fix check\_config\_option\_in\_central\_place * Skip malformed cookies * Fix database poison warnings, part 14 * Standardize output capture for nova-manage tests * Work around tests that don't use nova.test as a base * Don't print to stdout when executing hacking checks * Make test logging setup fixture disable future setup * Fix typo in docsting in test\_migrations.py * Remove support for deprecated driver import * conf: Add 'deprecated\_reason' to osapi opts * Add hacking checks for xrange() * Using assertIsNone() instead of assertEqual(None) * move os\_vif.initialize() to nova-compute start * Add deprecated\_since parameter * [placement] Manage log and other output in gabbi fixure * Reduce duplication and complexity in format\_dom * Fix invalid exception mock for InvalidNUMANodesNumber * libvirt: fix serial console not correctly defined after live-migration * Add more description when service delete * trivial: Rewrap guide at 79 characters * plugins/xenserver: Add '.py' extension * conf: Fix opt indentation for scheduler.py * conf: Reorder scheduler opts * Updated from global requirements * Revert "Set 'serial' to new volume ID in swap volumes" * [placement] Adjust the name of the gabbi tests * placement: refactor instance translate function * Move wsgi-intercept to test-requirements.txt * Add missing slash to dir path * Expand feature classification matrix with gate checks * [placement] Stringify class and provider uuid in error * [api-ref] Correct parameter type * Remove default=None for config options * libvirt: cleanup never used migratable flag checking * Remove unnecessary setUp and tearDown * Remove unused parameters * Remove duplicate key from dictionary * Updated from global requirements * placement: refactor translate from node to dict * stub out instances\_path in unit tests * Add a new release note * XenAPI: add unit test for plugin test\_pluginlib\_nova.py * Add link ref to nova api concept doc * libvirt: Use the recreated disk.config.rescue during a rescue * Add members in InstanceGroup object members field * Updates URL and removes trailing characters * Stop ovn networking failing on mtu * Update reno for stable/newton * Don't pass argument sqlite\_db in method set\_defaults 14.0.0.0rc1 ----------- * Override MTU for os\_vif attachments * Fix object assumption in remove\_deleted\_instances() * Add is\_cell0 helper method * Set a bigger TIMEOUT\_SCALING\_FACTOR value for migration tests * Update minimum requirement for netaddr * [placement] consolidate json handling in util module * Fix unnecessary string interpolation * Handle TypeError when disabling host service * Fix an error in archiving 'migrations' table * Remove deprecated flag in neutron.py * Clean up allocation when update available resources * [placement] Mark HTTP error responses for translation * [placement] prevent a KeyError in webob.dec.wsgify * Body Verification of api-ref os-volume-attachments.inc * Add functional regression test for bug 1595962 * Use tempest tox with regex first * libvirt: add ps2mouse in choice for pointer\_model * Doc fix for Nova API Guide, added missing word * conf: Make list->dict conversion more specific * Revert "tox: Don't create '.pyc' files" * Improve help text for xenapi\_session\_opts * Improve help text for service options * Correct image.inc for heading * Complete verification for os-cloudpipe.inc * Use assertEqual() instead of assertDictEqual() * Fix typo of stevedore * [placement] functional test for report client * Add regression test for immediate server name update * Fixed suspend for PCI passthrough * libvirt: Rewrite test\_rescue and test\_rescue\_config\_drive * Guard against failed cache refresh during inventory * More conservative allocation updates * [placement] Correct serialization of inventory collections * Switching expression order within if condition * Correct sort\_key and sort\_dir parameter for flavor * Correct address, version parameter in ips.inc * Use to\_policy\_values for policy credentials * Doc fix for Nova API Guide, fixed wording * Nova shelve creates duplicated images in cells * More conservative inventory updates * Fix server group name on api-ref * Update BuildRequest if instance currently being scheduled * Fix reno for removal of nova-manage service command * Add note about display\_name in \_populate\_instance\_names * Extended description for sync\_power\_state\_pool\_size option * Use recursive obj\_reset\_changes in BuildRequest * HyperV: ensure config drives are copied as well during resizes * [placement] make PUT inventory consistent with GET * Fill destination check data with VNC/SPICE listen addresses * Revert "libvirt: move graphic/serial consoles check to pre\_live\_migration" * Fix MonitorMetric obj\_make\_compatible * Using assertIsNotNone() instead of assertIsNot(None,) * [api-ref] fix availability\_zone for server create * Fix SafeConfigParser DeprecationWarning in Python 3.2 * Set 'serial' to new volume ID in swap volumes * Fix policy tests for project\_id enforcement * neutron: don't trace on port not found when unbinding ports * Remove RateLimitFault class * Rate limit is removed , update doc accordingly * Fix a typo from ID to Id * context: change the name 'rule' to 'action' in context.can * Add description for v2.20 changes in api-ref * Add sync\_power\_state\_pool\_size option * Additional logging for placement API * Fix resizing in imagebackend.cache() * [placement] cleanup some incorrect comments * Updated from global requirements * Compute: ensure that InvalidDiskFormat is handled correctly * Add keypairs\_links into resp * Add hypervisor\_links into hypervisor v2.33 * Throw exception if numa\_nodes is not set to integer greater than 0 * Add reserved param for v2.4 * Add more description on v2.9 history * libvirt: inject files when config drive is not requested * Pin maximum API version of microversion * XenAPI: resolve the fetch\_bandwidth failure * Fix api-ref doc for server-rebuild * [api-ref] Update configuration file * fix broken link in api-ref * Trivial fix remove not used var in parameters * Trival fix a typo * Increase BDM column in build\_requests table * VMware: Refactor the image transfer * Pass GENERATE\_HASHES to the tox test environment * [placement] add two ways to GET allocations * Handle ObjectActionError during cells instance delete * [placement] Add some tests ensuring unicode resource provider info * cleanup: separate the creation of a local root to it's own method * standardize release note page ordering * Remove misleading warning message * Add deprecated\_reason for use\_usb\_tablet option * db: retry on deadlocks while adding an instance * virt: handle unicode when logging LifecycleEvents * Ensure ResourceProvider/Inventory created before add Allocations record * Libvirt: Correct PERF\_EVENTS\_CPU\_FLAG\_MAPPING * Enable py3 tests for unit.api.openstack.compute.test\_console\_output * Implement setup\_networks\_on\_host for Neutron networks * Add tests for safe\_connect decorator * libvirt: improve logging for shared storage check * Cleanup allocation todo items * [placement] Allow inventory to violate allocations * Refresh info\_cache after deleting floating IP * Remove deprecated configuration option network\_device\_mtu * Example & Parameter verification of os-security-group-default-rules.inc * [placement] clean up some nits in the requestlog middleware * correctly join the usage to inventory for capacity accounting * Annotate db models that have moved to the nova\_api db * Stop using mox in virt/libvirt/test\_imagecache.py * Stop using mox in unit/fake\_processutils.py * [api-ref]: Correcting server\_groups\_list parameter's type * Fix race condition bug during live\_snapshot * ironic: Rename private methods for instance info * [placement] Fix misleading comment in wsgi loader * Remove mox from api/openstack/compute/test\_networks.py * Remove mox from api/openstack/compute/test\_rescue.py * Remove mox from api/openstack/compute/test\_image\_size.py * Remove mox from api/openstack/compute/test\_extended\_ips.py * Remove mox from nova/tests/unit/virt/xenapi/test\_driver.py * Remove mox from unit/api/openstack/compute/test\_hide\_server\_addresses.py * fixing block\_device\_mapping\_v2 data\_type * Updated from global requirements * Add bigswitch command to compute rootwrap filters * libvirt: add hugepages support for Power * incorrect description in nova-api.log about quota check * Removed enum duplication from nova.compute * Remove unused conf 14.0.0.0b3 ---------- * Remove deprecated cinder options * Simple instance allocations from resource tracker * Add support for allocations in placement API * Add create\_all and delete\_all for AllocationList * Pull from cell0 and build\_requests for instance list * Remove hacked test that fails with latest os-brick * Report compute node inventories through placement * Delete BuildRequest regardless of service\_version * Fix service version lookups * Remove BuildRequest when scheduling fails * Run cell0 db migrations during nova-manage simple\_cell\_setup * Move cell message queue switching and add caching * Add basic logging to placement api * Fixed indentation * Update placement config reno * Ignore generated merged policy files * Register keystone opts for placement sample config * Remove deprecated neutron options * ironic\_host\_manager: fix population of instances info on start * Eliminate additional DB queries in nova lists * Remove the incomplete wsgi script placement-api.py * ironic\_host\_manager: fix population of instances info on schedule * rt: ensure resource provider records exist from RT * Allow linear packing of cores * Return 400 error for non-existing snapshot\_id * create placement API wsgi entry point * Fix qemu version check * Documentation for the vendordata reboot * Add more vd2 unit tests * Add a TODO and add info to a releasenote * [placement] remove a comment that is no longer a todo * Make api-ref bug link point to nova * Api-ref: Improve os-migrateLive input parameters * Fix a typo in the driver.py file * New discover command to add new hosts to a cell * Clean up instance mappings, build requests on quota failure * Not allow overcommit ratios to be negative * Updated from global requirements * Use StableObjectJsonFixture from o.vo * test\_keypairs\_list\_for\_different\_users for v2.10 * Fix using filter() to meet python2,3 * Emit warning when use 'user\_id' in policy rule * Adds nova-policy-check cmd * Reduce code complexity - api.py * Use cls in class method instead of self \_delete\_domain is a class method, so cls should be used instead of self * Revert "Optional separate database for placement API" * Changed exception catching order * Add BuildRequestList object * In InventoryList.find() raise NotFound if invalid resource class * Updated from global requirements * Imported Translations from Zanata * TrivialFix: Remove cfg import unused * Add oslopolicy script runs to the docs tox target * Add entry\_point for oslo policy scripts * Tests: use fakes.HTTPRequest in compute tests * Remove conversion from dict to object from xenapi live\_migration * Hyper-V: properly handle shared storage during migrations * TrivialFix: Remove logging import unused * Hyper-V: properly handle UNC instance paths * Get ready for os-api-ref sphinx theme change * Update link in general purpose feature matrix * List system dependencies for running common tests * [api-ref]: Update link reference * Abort on HostNotCompatibleWithFixedIpsClient * Add warning if metadata\_proxy\_shared\_secret is not configured * devspec: remove unused dev\_count in devspec * TrivialFix: removed useless storing of sample directory * [api-guide]: Update reference links * Fix link reference in Nova API version * Provide more duplicate VLAN network error info * Correct microversions URL in api\_plugins.rst * Create Instance from BuildRequest if not in a cell * Added todo for deletion LiveMigrateData.detect\_implementation usage * driver.pre\_live\_migration migrate\_data is always an object * Manage db sync command for cell0 * Updated common create server sample request because of microversion 2.37 * Remove TODO for service version caching * removed db\_exc.DBDuplicateEntry in bw\_usage\_update * Add online migration to move instance groups to API database * Remove locals() for formatting strings * Hyper-V: update live migrate data object * Config options consistency of notifications.py * Add networks to quota's update json-schema when network quota enabled * rt: isolate report and query sched client tests * rt: remove ComputeNode.create\_inventory * rt: rename test\_tracker -> test\_resource\_tracker * rt: remove old test\_resource\_tracker.py * Updated from global requirements * Remove deprecated security\_group\_api config option * Added min\_version field to 'host\_status' in 'api-ref' * Make InstanceGroup object favor the API database * Doc: Update PCI configuration options * Don't maintain user\_id and project\_id in context * Add support for usages in the placement API * Add a Usage and UsageList object * Add support for inventories to placement API * Check capacity and allocations when changing Inventory * Add release note to warn about os-brick lock dir * config options: improve help netconf * Config options consistency for consoleauth.py * Support Identity v3 when connecting to Ironic * Copy edit feature classification * don't report network limits after 2.35 * Adding details in general purpose feature matrix [1] * Improve placement API 404 and 405 response tests * doc: fix disk=0 use case in flavor doc * Config options: improve libvirt help text (1) * Dump json for nova.network.model.Model objects * Improve error message for empty cached\_nwinfo * Return HTTP 400 on list for invalid status * Move some flavor fakes closer to where they are being used * Replace flavors.get\_all\_flavors\_sorted\_list() with object call * Refactor and objectify flavor fakes used in api tests * Fix 'No data to report' error * Change api-site to v2.1 format * Refuse to run simple\_cell\_setup on CellsV1 * In placement API send microversion header when error * libvirt: Improve mocking of imagebackend disks * Updated flags for XVP config options * Add unit tests for nova.virt.firewall.IpTablesFirewallDriver (Part 4) * [libvirt] Remove live\_migration\_flag & block\_migration\_flag * placement: add filtering by attrs to resource\_providers * Add support for resource\_providers urls * Remove nova/api/validator.py * Updated from global requirements * Change default value of live\_migration\_tunnelled to False * Remove code duplication in enums * [vncproxy] log for closing web is misleading * Return None in get\_instance\_id\_by\_floating\_address * Make simple\_cell\_setup work when multiple nodes are present * Add REST API support for get me a network * plugins/xenserver: Resolve PEP8 issues * Fix migration list + MigrationList operation * rt: Create multiple resize claim unit test * rt: Refactor unit test for trackable migrations * VIF: add in missing translation * Clean imports in code * Fix neutron security group tests for 5.1.0 neutronclient * modify description of "Inject guest networking config" * os-vif: do not set Route.interface if None * Check opt consistency for neutron.py * Improve help text for compute manager options * Make simple\_cell\_setup idempotent * Add cell\_v2 verify\_instance command * Remove unnecessary debug logs of normal API ops * Replace mox with mock in test\_validate\_bdm * Replace mox with mock in test\_cinder * Allow Nova Quotas to be Disabled * Allow authorization by user\_id for server evacuate * Allow authorization by user\_id for server update * Allow authorization by user\_id for server delete * Allow authorization by user\_id for server changePassword action * Update binding:profile for SR-IOV ports on resize-revert * Verified deprecation status for vnc options * Add tests for user\_id policy enforcement on trigger\_crash\_dump * Allow authorization by user\_id for server shelve action * Allow authorization by user\_id for force\_delete server * Allow authorization by user\_id for server resize action * Allow authorization by user\_id for server pause action * Add tests for user\_id policy enforcement on stop * Fix consistency in crypto conf * Add placement API web utility methods * Improve help text for XenServer Options * Improve help text for xenapi\_vm\_utils\_opts * network: fix handling of linux-bridge in os-vif conversion * Fix consistency in API conf * Improve consistency in WSGI opts * Add unit tests for nova.virt.firewall.IpTablesFirewallDriver (Part 3) * Improve help text for xenapi\_opts * Maintain backwards compat for listen opts * Allow authorization by user\_id for server rescue action * Allow authorization by user\_id for server rebuild * Allow authorization by user\_id for server suspend action * Allow authorization by user\_id for server lock action * Optional separate database for placement API * Replace fake\_utils by using Fixture * virt/image: between two words without a space in output message * config options: improve help text of database (related) options (2/2) * config options: improve help text of database (related) options (1/2) * Remove hacking check [N347] for config options * Skipping test\_volume\_backed\_live\_migration for live\_migration job * rt: New unit test for rebuild\_claim() * List instances for secgroup without joining on rules * Improve help text for vmwareapi\_opts * Updated from global requirements * vnc host options need to support hostnames * Removed flag "check\_opt\_group\_and\_type" from pci.py * Removed flag "check\_opt\_group\_and\_type" * libvirt: convert over to use os-vif for Linux Bridge & OVS * Remove left over conf placeholders * libvirt: Rename import of nova.virt.disk.api in driver * Fix server operations' policies to admin only * Add support for vd2 user context to other drivers * api-ref: Example verification for os-simple-tenant-usage.inc * Remove unused exception: ImageNotFoundEC2 * Fix opt description for s3.py * virt/hardware: Check for threads when "required" * Improve consistency in VNC opts * Improve help text for compute\_opts * Config options: Improve help text for console options * Config options: Consistency check for remote\_debug options * docs: update code-review guide for config options * Add separate create/delete policies to attach\_interface * Fix handling of status in placement API json\_error\_formatter * Use constraints for all tox environments * Move JSON linting to pep8 * HyperV: remove instance snapshot lock * rt: Move monitor unit tests into test\_tracker * rt: Move unit tests for update usage for instance * rt: Move unit tests for update mig usage * rt: Remove useless unit test in resource tracker * rt: Remove dup tests in test\_resource\_tracker * rt: Remove incorrect unit test of resize revert * rt: Refactor test\_dupe\_filter unit test * rt: Remove duplicate unit test for missing mig ctx * rt: Refactor resize claim abort unit test * rt: Refactor resize\_claim unit test * Set enforce\_type=True in method flags * Use constraints for releasenotes * Add some logging and a comment for shelve/unshelve operations * Run shelve/shelve\_offload\_instance in a semaphore * Check opt consistency for api.py * Allow empty CPU info of hypervisors in API response * Config options consistency of rdp.py * Improve consistency in workarounds opts * Refresh README and its docs links * Correct InventoryList model references * instance.name should be blank if instance.id is not set * Cells: Handle delete with BuildRequest * Add NoopConductorFixture * Make notification objects use flavor capacity attributes * Fix busted release notes * config options: Improve help for conductor * Config options: base path configuration * PCI: Fix network calls order on finish\_revert\_resize() * Remove deprecated legacy\_api config options * Config Options: Improve help text for Ipv6 options * Update tags for Image file url from filesystems config option * Check options consistency in hyperv.py * Improve help text for floating ips options * config options: Improve help for base * Improve consistency in API * cleanup: some update xml cases in test\_migration * Use stashed volume connector in \_local\_cleanup\_bdm\_volumes * Ironic: allow multiple compute services * api-ref: Parameter verification for os-simple-tenant-usage.inc * Ironic: report node.resource\_class * network: introduce helper APIs for dealing with os-vif objects * ironic: Cleanup instance information when spawn fails * update wording around pep8 exceptions * Remove backward compatibility with pre-grizzly releases * use the HostPortGroupSpec.vswitchName instead of HostPortGroup.vswitch.split * Replace functions 'Dict.get' and 'del' with 'Dict.pop' * Updated from global requirements * Strict ImageRef validation to UUID only * Add the ability to configure glanceclient debug logging * Deprecate cert option * Merged barbican and key\_manager conf files into one * Config options consistency of pci.py * config option: rename libvirt iscsi\_use\_multipath * Fix require thread policy for multi-NUMA computes * Allocate PCI devices on migration * TrivialFix: Fixed a typo in nova/test.py * Updated from global requirements * Improve help text of image\_file\_url * Ironic: enable multitenant networking * libvirt: Remove some unnecessary mocking in test\_driver * libvirt: Pass object to \_create\_images\_and\_backing in test * libvirt: Reset can\_fallocate in test setUp() * libvirt: Create console.log consistently * Fixed invalid UUIDs in unit tests * Remove deprecated manager option in cells.py * Refactor deallocate\_fixed tests to use one mock approach instead of three * Improve consistency in virt opts * Updated header flag in SSL opts * Updated from global requirements * Don't cache RPC pin when service\_version is 0 * Imported Translations from Zanata * Remove white space between print and () * Flavor: correct confusing error message about flavorRef * Consistency changes for osapi config options * Fixed typos in nova: compute, console and conf dir * Add objects.ServiceList.get\_all\_computes\_by\_hv\_type * Add InstanceList.get\_uuids\_by\_host() call * Conf options: updated flags for novnc * Address feedback on cell-aggregate-api-db patches * Updated from global requirements * Add data migration methods for Aggregate * Config options: Consistency check for quota options * Add server name verification in instance search * Fix typo in DeviceDetachFailed exception message * Straddle python-neutronclient 5.0 for testing * Initialise oslo.privsep early in main * Cells: Simple setup/migration command * Aggregate create and destroy work against API db * Make Aggregate.save work with the API db * Improve help text for vmware * Config options consistency of exceptions.py * Help text for the mks options * Trivial option fixes * Properly quote IPv6 address in RsyncDriver * rbd\_utils: wrap blocking calls in tpool.Proxy() * Resolve PCI devices on the host during Guest boot-up * Fixed typos in nova, nova/api, nova/cells directory * Fix misspellings * Trivial: add 'DEPRECATED' for os-certificates API ref * Mention proxy API deprecation microversion in api-ref * xenserver: fix an output format error in cleanup\_smp\_locks * Add log for instance without host field set * Improve consistency in crypto * Deprecate barbican options * Improve consistency in flavors * Improve the help text for the guestfs options * Reminder that release notes are built from commits * Add initial framing of placement API * Add missing ComputeHostNotFound exception in live-migration * Free new pci\_devices on revert-resize * Use oslo\_config new type PortOpt for port options * Updated from global requirements * Remove unused imports in api/openstack/fakes.py * Add docs about microversion testing in Tempest * Remove leftover list\_opts entry points * Remove nova.cache\_utils oslo.config.opts entrypoint * Remove nova.network namespace from nova-config-generator.conf * Remove neutronv2.api oslo.config.opt entry point * Follow up on Update binding:profile for SR-IOV ports * Improve consistency in servicegroup opts * Improve help text for cloudpipe * Remove the useless version calculation for proxy api deprecated version * numa: remove the redundant check for hw\_cpu/hw\_mem list * Add support for oslo.context 2.6.0 * Update tags for Cache config option * Remove unused validation code for quota\_sets * Revert "Don't assert exact to\_dict output" * cleanup\_live\_migration\_destination\_check spacing * Default image.size to 0 when extracting v1 image attributes * Add details to general purpose feature matrix * Adding functional tests for 2.3 microversion * compute: Skip driver detach calls for non local instances * libvirt: Fix invalid test data * libvirt: Fix fake \_disk\_info data in LibvirtDriverTestCase * Don't set empty kernel\_id and ramdisk\_id to glance image * Config options consistency for cell.py * Refuse to have negative console ttls * Option Consistency for availability\_zone.py * Add a small debug line to show selection location * Fix wrong override value of config option vswitch\_name * Fix wrong override value of config option proxyclient\_address * Call release\_dhcp via RPC to ensure correct host * Adjust MySQL access with eventlet * Improve consistency in cert * Updated from global requirements * rt: don't log pci\_devices twice when updating resources * Config options consistency for configdrive.py * Remove deprecated ironic.api\_version config option * Improve the help text for compute timeout\_opts * Deprecate the nova-manage commands that rely on nova-network * Improve consistency in xenserver * Add the 'min' param to IntOpts where applicable * Remove unused config option 'fake\_call' * Make Aggregate metadata functions work with API db * Use deprecated\_reason for network quota options * "nova list-extensions" not showing summary for all * Fix typos in deprecates-proxy-apis release note * Enable deferred IP on Neutron ports * Improve help text for XenServer pool opts * remove config option iqn\_prefix * Deprecate os-certificates * Update RequestSpec nested flavor when a resize comes in * New style vendordata support * Add metadata server fixture * Improve help text for quota options * Improve help text for consoleauth config options * Bump Microversion to 2.36 for Proxy API deprecation * api: use 'if else' instead of 'try exception' to get password value * Add better help to rdp options * Adding details in general purpose feature matrix * Enables Py34 tests for unit.api.openstack.compute.test\_server\_actions * Filter network related limits from limits API * Filter network related quotas out of quotas API * Deprecate Baremetal and fping API * Deprecate volumes related APIs * Deprecate SecurityGroup related proxy API * Deprecated floating ip related proxy APIs * Complete verification of os-instance-actions.inc * Check opt group and type for nova.conf.service.py * Fix links to network APIs from api-ref * Add comment about how status field changed * Fix database poison warnings, part 13 * Deprecate network quota configuration * Verify os-aggregates.inc on sample files * Cleanup: validate option at config read level * :Add missing %s in print message * api-ref: unify the no response output in delete operation * Return 400 when SecurityGroupCannotBeApplied is raised * network: handle forbidden exception from neutron * Avoid update resource if compute node not updated * Document update\_task\_state for ComputeDriver.snapshot * Config Option consistency for crypto.py * Fix database poison warnings, part 12 * Don't check cinder volume states during attach * Clean up test\_check\_attach\_availability\_zone\_differs * Fix database poison warnings, part 11 * Fix opt description and indentation for flavors.py * Remove redundant flag value check * Improve help context of ironic options * Update instance node on rebuild only when it is recreate * Remove unneeded bounds-checking code * Improve the help text for the linuxnet options (4) * Don't assert exact to\_dict output * Fix database poison warnings, part 10 * config options: help text for enable\_guestfs\_debug\_opts * Fix database poison warnings, part 9 * Improve help text of s3 options * Remove deprecated config option volume\_api\_class * Fix inappropriate notification send * libvirt: Fix signature and behaviour of fake get\_disk\_backing\_file * libvirt: Pass path to Image base class * Remove max\_size argument to images.fetch and fetch\_to\_raw * Update tox.ini: Constraints are possible for api\* jobs * Separate api-ref for list security groups by server * Deprecate FixedIP related proxy APIs * Deprecated networks related proxy APIs * Check option descriptions and indentations for configdriver.py * Make Aggregate host operations work against API db * libvirt: open RBD in read-only mode for read-only operations * Remove unnecessary code added for ec2 deprecation * Enhance notification doc generation with samples * Depracate Images Proxy APIs * Correct the network config option help text * config options: improve help for noVNC * Replace deprecated LOG.warn with LOG.warning * Fixed typos in api-ref and releasenotes directory * Fix invalid import order and remove import \* * Improve the help text for the network options (4) * Add async param to local conductor live\_migrate\_instance * libvirt: update guest time after suspend * libvirt: Modify the interface address object assignment * Update binding:profile for SR-IOV ports * Port nova test\_serversV21.Base64ValidationTest to Python 3 * Refactor instance action notification sample test * Config option update tasks for availability\_zone * Expand initial feature classification lists * Add prototype feature classification matrix * [libvirt] Live migration fails when config\_drive\_format=iso9660 * Modify docstring of numa\_get\_reserved\_huge\_pages method * Use constraints for coverage job * Remove compute host from all host aggregates when compute service is deleted * Fix incorrect cellid numbering for NUMA memnode * Fix opt descripton for cells.py * Fix host mapping saving * Example and body verification of os-quota-sets.inc * Remove deprecated network\_api\_class option * neutron: destroy VIFs if allocating ports fails * Validate pci\_passthrough\_whitelist when starting n-cpu * Rename compute manager \_check\_dev\_name to \_add\_missing\_dev\_names * Remove unused context argument to \_default\_block\_device\_names() * Fix typo in AdminPasswordController 14.0.0.0b2 ---------- * Use from\_environ when creating a context * Pass kwargs through to base context * Fix opt description and check deprecate status for hyperv.py * VMware: Enable disk.EnableUUID=True in vmx * hyper-v: device tagging * Add release notes for notification transformation * Assert reservation\_id in notification sample test * Remove redundant DEPRECATED tag from help messages * Fix PUT server tag 201 to return empty content * Clean up helper methods in ResourceProvider * Transform instance.restore notifications * neutron: delete VIFs when deallocating networking * Add VirtualInterface.destroy() * Make notifications module use flavor capacity attributes * Make ironic driver use flavor fields instead of legacy ones * Make xenapi driver use flavor fields instead of legacy ones * Make libvirt driver use flavor fields instead of legacy ones * Make hyperv driver use flavor fields instead of legacy ones * Make vmware driver use flavor fields instead of legacy ones * Bump service version for BuildRequest deletion * Stop instance build if BuildRequest deleted * Add block\_device\_mappings to BuildRequest * Improve help text of flavors config options * Improve help text for cinder config options * Microversion 2.35 adds keypairs pagination support * Fix up legacy resource fields in simple-tenant-usage * Use flavor attributes instead of deprecated instance resources * Typo fix: remove multiple whitespace * network: handle unauthorized exception from neutron * Fix the broken links * 'limit' and 'marker' support for db\_api and keypair\_obj * Improve help text for exceptions * Improve help text for compute running\_deleted\_opts * rest api version bumped for async pre live migration checks * Add user\_id request parameter in os-keypairs list * Revert "Detach volume after deleting instance with no host" * Don't overwrite MarkerNotFound error message * tox: Use conditional targets * tox: Don't create '.pyc' files * Improve help text for allocation\_ratio\_opts * Release note for vzstorage volume driver * Fix typo in \_update\_usage\_from\_migrations * Transform instance.resize notifications * Refactors nova.cmd utils * Replace DOS line ending with UNIX * migration volume failed for invalid type * api-ref: fix wrong description about response example in os-hypervisor * api-ref: body verification of os-agents * Fix wrong JSON format in API samples * Implement ResourceProvider.destroy() * Add Allocation and AllocationList objects * Deprecate nova-manage vm list command * Remove live-migration from nova-manage man page * Deprecate the quota\_driver config option * Allow irrelevant,self-defined specs in ComputeCapacityFilter * Transform instance.pause notifications * Fix opt description for scheduler.py * Verify "needs:check\_deprecation\_status" for serial\_console.py * API: catch InstanceNotReady exception * Transform instance.shelve notifications * Replace unicode with six.text\_type * Added support for new block device format in vmops * XenAPI: add unit test for plugin bandwidth * api-ref: unify the delete response infomation * Add nova-manage quota\_usage\_refresh command * Quota changes for the nova-manage quota\_usage\_refresh command * Remove DictCompat from SecurityGroup * Replace use of eval with ast.literal\_eval * libvirt: fix missed test in migration * Improve the help text for the network options (3) * Correct reraising of exception * api-ref: Parameter verification for servers-actions.inc Part 1 * Body verification of os-interface.inc * Parameter verification of os-instance-actions.inc * xvp: change the default xvp conf path to CONF.xvp group * libvirt:code flow problem in wait\_for\_job * Clean up service version history comments * Add a ResourceProviderList object * Refactor block\_device\_mapping handling during boot * Remove spaces around keyword argument * Use ovo in test\_obj\_make\_compatible() * Improve the help text for the network options (2) * Update mutable-config reno with LM timeout params * Added better error messages during (un)pinning CPUs * Remove duplicate policy test * Complete verification for os-virtual-interfaces * api-ref: os-volumes.inc * Enable python34 tests for nova.tests.unit.pci.test\_manager and test\_stats * api-ref: merge multiple create to servers.inc * Improve the help text for configdrive options * Revert "Remove manual creation of console.log" * Fix invalid import order * Fix invalid import order * Fix invalid import order * config options: improve help for notifications * Fix invalid import order * Fix invalid import order * Remove unused itype parameter from get migration context * Do not try to backport when db has older object version * Detach volume after deleting instance with no host * Transform instance.suspend notifications * Hacking check for \_ENFORCER.enforce() * Remove final use of \_ENFORCER.enforce * Hacking check for policy registration * Extract \_update\_ports\_for\_instance * Extract port create from allocate\_for\_instance * Improve help text for resource tracker options * Transform instance.power\_on notifications * Add a py35 environment to tox * api-ref: add note about os-certificates API * XenAPI: UT: Always mock logging configuration * Fix api\_validation for Python 3 * api-ref: verify assisted-volume-snapshots.inc * Delete reduplicate code in test\_compute\_mgr.py * Port test\_hacking to Python 3 * Fix comment for version 1.15 ComputeNodeList * Microversion 2.33 adds pagination support for hypervisors * VMware: create vif with resource limitations * policy: clean-up * Make VIF.address unique with port id for neutron * Device tagging metadata API support * trivial: remove unnecessary mock from servers API test * Return HTTP 200 on list for invalid status * Complete verification for os-floating-ips-bulk * Transform instance.update notification * Pre-add instance actions to avoid merge conflicts * Transform instance.delete notifications * XenAPI: Add UT for independent compute option * Log DB exception if VIF creation fails * Fixes compute API unit tests for python3 * Reduce complexity in \_stub\_allocate\_for\_instance * Reorder allocate\_for\_instance preamble * Make \_validate\_requested\_network\_ids return a dict * Extract \_validate\_requested\_network\_ids * Create \_validate\_requested\_port\_ids * Extract \_filter\_hypervisor\_macs * Always call port\_update in allocate\_for\_instance * Device tagging API support * Mapping power\_state from integer to string * Compute manager device tagging support * trivial: comment about vif object address field * Example verification for os-fixed-ips.inc * Revert "Detach volume after deleting instance with no host" * policy: Replaces 'authorize' in nova-api (part 5) * libvirt: add todo about bdms in \_build\_device\_metadata * libvirt: virtuozzo instance rescue mode support * api-ref: os-certificates.inc * policy: Replaces 'authorize' in nova-api (part 4) * Make LM timeout params mutable * Help text for the ephemeral storage options * Config Options: Improve help text for debugger * Make Ironic options definitions consistent * Fix some typos * Add namespace oslo.db.concurrency in nova-config-generator.conf * Remove mox in tests/unit/objects/test\_quotas * Remove network information from IOVisor vif * Add automatic switching to postcopy mode when migration is not progressing * Extend live-migration-force-complete to use postcopy if available * Add a test utility for checking mock calls with objects * Remove invalid test for config option scheduler\_host\_manager * Complete verification for api-ref os-flavor-extra-specs * policy: Replaces 'authorize' in nova-api (part 3) * libvirt: Add migration support for perf event support * Libvirt driver implementation of device tagging * Add policy sample generation * Cleanup instance device metadata object code * libvirt: virtuozzo instance resize support * Fix test\_ipv6 and simplify to\_global() * Remove russian from unit/image/test\_glance.py * Py3: fix serial console output * \_security\_group\_get\_by\_names cleanup * Add reminder comments for compute rpcapi version bump * Update get\_instance\_diagnostics for instance objects * Improve help text for wsgi options * Don't immediately null host/node when shelving * Evaluate 'task\_state' in resource (de)allocation * Add new configuration option to turn auto converge on/off * Add new configuration option to turn postcopy on/off * Improve nova.rpc conf options documentation * Fix spelling mistake * Add ability to select specific tests for py34 * Remove mox from unit/compute/test\_compute.py (4) * Remove mox from unit/compute/test\_compute.py (end) * Remove mox from unit/compute/test\_compute.py (11) * Remove mox from unit/compute/test\_compute.py (10) * Remove mox from unit/compute/test\_compute.py (9) * Remove mox from unit/compute/test\_compute.py (8) * Remove mox from unit/compute/test\_compute.py (7) * Remove mox from unit/compute/test\_compute.py (6) * Remove mox from unit/compute/test\_compute.py (5) * UT: cleanup typo in libvirt test\_config * Remove mox from unit/compute/test\_compute.py (3) * Remove mox from unit/compute/test\_compute.py (2) * Remove mox from unit/compute/test\_compute.py (1) * Improve image signature verification failure notification * libvirt: attach configdrive after instance XML * libvirt: add nova volume driver for vzstorage * Moving test helpers to a common place * On port update check port binding worked * Refactor to create \_ensure\_no\_port\_binding\_failure * policy: Replaces 'authorize' in nova-api (part 2) * XenAPI: Add option for running nova independently from hypervisor * XenAPI: Stream config drive to XAPI * XenAPI: Perform disk operations in dom0 * Port test\_ipv6 to py3 and simplify to\_global() * api-ref: Example verification for os-agents.inc * Allow monitor plugins to set own metric object * api-ref: correct the order of APIs in server-tags * Remove unused LOG * Remove unnecessary \_\_init\_\_ * Release notes: fix typos * Make print py3 compatible * libvirt: fix disk size calculation for VZ container instances * Fix error message for VirtualInterfaceUnplugException * libvirt: Add boot ordering to individual disks * image\_meta: Add hw\_rescue\_device and hw\_rescue\_bus * collapse servers.ViewBuilderV21 into servers.ViewBuilder * remove personality extension * remove preserve-ephemeral rebuild extension * remove access\_ips extension * Bump the service version for get-me-a-network support * neutron: handle 'auto' network request in allocate\_for\_instance * Add unit tests for nova.virt.firewall.IpTablesFirewallDriver (Part 2) * libvirt: split out code for recovering after migration tasks * libvirt: split out code for processing migration tasks * libvirt: split off code for updating migration stats in the DB * libvirt: split off code for updating live migration downtime * api-ref: verify images.inc * libvirt: split out code for determining if migration should abort * libvirt: split out code for detecting live migration job type * policy: Replaces 'authorize' in nova-api (part 1) * Check if flavor.vcpus is more than MAX\_TAP\_QUEUES * policy: Add defaults in code (part 6) * objects: Add devices\_metadata to instance object * objects: new InstanceDeviceMetadata object * db: add a device\_metadata column to instance\_extra * libvirt: add perf event support when create instance * Improve help text of crypto.py * objects: adding an update method to virtual\_interface * Rename driver method check\_can\_live\_migrate\_destination\_cleanup * api-ref: added docs for microversion 2.26 * policy: Add defaults in code (part 5) * policy: Add defaults in code (part 4) * policy: Add defaults in code (part 3) * policy: Add defaults in code (part 2) * add ploop support into qemu-img info * policy: Add defaults in code (part 1) * Handle UnableToAutoAllocateNetwork in \_build\_and\_run\_instance * Add note about preserve\_ephemeral limitations * Add console auth tokens db api methods * Remove mox from unit/virt/libvirt/volume/\*.py * Port cinder unit tests to Python 3 * Port test\_pipelib and test\_policy to Python 3 * Adding missing log translation hints * Add instance groups tables to the API database * Make live migration checks async * Check for None max\_count for Python 3 compat * Updated from global requirements * fix developer docs on API * libvirt: virtlogd: use "log" element in char devices * Fix ConsoleAuthTokens to work for all console types * remove os-disk-config part 4 * remove os-disk-config part 3 * remove load\_standard\_extensions method * Modify "policy.conf" to "policy.json" * Ensures that progress\_watermark and progress\_time are updated * Add a note for policy enforcement by user\_id * XenAPI: Support neutron security group * Added instance actions for conductor * Stop using mox stubs in nova/tests/unit/test\_metadata.py * remove support for legacy v2 generator extensions * Remove duplicate unit test resource tracker * Prevent instance disk overcommit against itself * api-ref: parameter verification os-agents * make failures on api\_samples more clear * api-ref, os-services.inc * api-ref: docs for microversion v2.28 * Update dhcp\_opts on both create and update * api-ref: Improve os-instance\_usage\_audit\_log samples * Add ironic mac address when updating and creating * pci: Deprecate is\_new from pci requests * Enhance notification sample test base * Handle multiple samples per versioned notification * Transform wrap\_exception notification to versioned format * XenAPI: OVS agent updates the wrong port with Neutron * Stop using mox from unit/fake\_server\_actions.py * objects: you want'em * libvirt: enhance method to return pointer\_model from image prop * Improve help text for service group options * Updated from global requirements * Skip network allocation if 'none' is requested * Separete notification object version test * [typo] replaced comupte to compute in test * api-ref, os-availability-zone.inc * Config: no need to set default=None * Add delete\_, update\_ and add\_ inventory to ResourceProvider * libvirt: fix typos in comments * Remove the nova.compute.resources entrypoint * Re-deprecate use\_usb\_tablet config option * Log the network when neutron won't apply security groups * api-ref: parameter verification os-fixed-ips * Add CellMappingList object * Add console auth tokens table and model * live migration check source failed caused bdm.device\_path lost * Use is\_valid\_ipv4 from oslo.utils * Include exception in \_try\_deallocate\_network error log * Remove mox from tests/unit/virt/test\_imagecache.py * Fix docstring nits from ResourceProvider.set\_inventory() review * fix errors in revert resize api docs * Add set\_inventory() method on ResourceProvider * Improve the help text for cells options (8) * VMware: Fix bug of TypeError when getting reference of VCenter cluster is None * XenAPI: Integers returned from XAPI are actually strings * Remove virt.block\_device.\_NoLegacy exception * rename libvirt has\_default\_ephemeral * Remove ec2\_code from exception * Add specific lazy-load method for instance.tags * Don't attempt to lazy-load tags on a deleted instance * Pre-load tags when showing server details * Policy-in-code servers rules * Fix image meta which is sent to glance v2 * Extract update\_port call into method * Refactor to create \_populate\_mac\_address * Rename \_populate\_mac\_address adding pci * Rename created\_port to created\_port\_id * Flip allocate\_for\_instance create or update if * libvirt: cleanup baselineCPU return value checking * Updated from global requirements * Remove mox from tests/unit/objects/test\_aggregate.py * Handle keypair not found from metadata server * Skip network validation if explicitly requesting no networks * nova-net: handle 'auto' network request in allocate\_for\_instance * neutron: validate auto-allocate is available * Add helpers to NetworkRequest(List) objects for auto/none cases * Remove api\_rate\_limit config option * Tear down of os-disk-config part 2 * Tear down os-disk-config part 1 * Disallow instance tag set for invalid instance states * Make instance as second arg in compute api calls * TrivialFix: Remove extra comma from json * Skip NFS and Ceph in live migration job test run * Added missed response to test\_server\_tags * api-ref: console types * api-ref: add version 2.3 parameters to servers * Remove extra expected error code (413) from image metadata * Use instance object instead of db record * Publish proxy APIs deprecation in api ref doc * Fix outdated parameter network\_info description in virt/driver * api-ref: Fix parameters in os-instance-usage-audit-log * Remove python code validation specific to legacy\_v2 * Remove DictCompat from instance\_info\_cache * Remove redundant test in test\_resource\_tracker * nova shared storage: rbd is always shared storage * Modify the disk bus and device name for Aarch64 * Remove mox from unit/compute/test\_compute\_mgr.py (end) * Remove mox in tests/unit/objects/test\_instance\_faults * Remove mox from unit/compute/test\_compute\_mgr.py (6) * Remove mox from unit/compute/test\_compute\_mgr.py (8) * Remove mox from unit/compute/test\_compute\_mgr.py (7) * Trivial-Fix: Fix typos * Fix some typos * Remove mox from unit/compute/test\_compute\_mgr.py (5) * Remove mox from unit/compute/test\_compute\_mgr.py (4) * Remove mox from unit/compute/test\_compute\_mgr.py (3) * Remove mox from unit/compute/test\_compute\_mgr.py (2) * Updated from global requirements * Make Aggregate.get\_by\_uuid use the API db * api-ref: parameter verification for os-aggregates * Improve help text for neutron\_opts * remove processing of blacklist/whitelist/corelist extensions * fix OS-SCH-HNT:scheduler\_hints location in sample * Fix reno from hyper-v-remotefx * Yield the thread when verifying image's signature * Remove invalid test methods for config option port\_range * libvirt: Prevent block live migration with tunnelled flag * Trivial: remove none existing py3 test from tests-py3.txt * Make host as second arg in compute api calls * Stop using mox stubs in tests/unit/fake\_notifier * Remove unused \_get\_flags method from integrated\_helpers * Enable all extension for all remaining sample tests * tox.ini: Remove unnecessary comments in api-ref target * Stop using mox stubs in nova/tests/unit * Updated from global requirements * Raise exception if BuildRequest deleted twice * Replace mox with mock for xenapi vm\_utils.lookup * Detach volume after deleting instance with no host * pci: Allow updating pci\_requests in instance\_extra * Change default fake\_ server status to ACTIVE * Fix update inventory for multiple providers * Default to using glance v2 * Enable all extension for remaining server API tests * Enable all extension for server API tests part-1 * Remove mox from unit/compute/test\_compute\_mgr.py (1) * Fixes py3 unit tests for nova.tests.unit.test\_block\_device.\* * Reno for mutable-config * Remove invalid test of config option default\_notification\_level * Improve the help text for cells options (7) * test: pass enable\_pass as kwarg in test\_evacuate * Remove config option config\_drive\_format's invalid value test * test: remove invalid test method in libvirt/test\_imagebackend * xenapi: Remove invalid values for config option image\_compression\_level * Remove mox from api/openstack/compute/test\_pci.py * Stop using mox from openstack/compute/test\_cells.py * Enable all extension for server actions sample tests * Enable all extension for Flavor API sample tests * Fix resource tracking for instances with no numa topology * Clarified "user" to plural type * Revert "Optimize \_cleanup\_incomplete\_migrations periodic task" * Remove unused authorizer methods * Remove legacy v2 policy rules * Add unit tests for nova.virt.firewall.IpTablesFirewallDriver (Part 1) * Make create\_inventory() handle name change * Add ResourceProvider.save() * Remove the skip\_policy\_check flags * api-ref: verify keypairs * Make Xenplugin to work with glance v2 api * Trival: version history 2.30 is not indented as others * Do not register notification objects * Move notification objects to a separate package * Move notification related code to separate package * Adjust field types and defaults on Inventory * Add InventoryList.find() method * Add a get\_by\_uuid for aggregates * Imported Translations from Zanata * get rid of the old \_vhd methods * Make Hyper-V to work with glance v2 api * Stop using mox stubs in stub\_out\_key\_pair\_funcs * Remove v2 extension setting from functional tests * Add name and generation to ResourceProvider object * Remove duplicate test of DELETED instances * Added support for new block device format in Hyper-V * Enable mutable config in Nova * Improve help text for availability zones options * tests: make XMLMatches work with Python3 * Catch PciRequestAliasNotDefined exception * api-ref: parameter verification for os-hypervisors * xen: skip two more racey mox py34 test classes * libvirt: handle reserved pages size * Fix nova-compute start failed when reserved\_huge\_pages has value * Make the base options definitions consistent * virt: set address space & CPU time limits when running qemu-img * Remove manual creation of console.log * Fix imagecache.get\_cache\_fname() to work in python3 * Remove policy checkpoints for SecurityGroupAPI and NetworkAPI * Remove policy checkpoints from ComputeAPI * Stop using mox from objects/test\_instance.py (3) * Stop using mox from objects/test\_instance.py (2) * Stop using mox from objects/test\_instance.py (1) * Fix wrong patch of unittest in unit/test\_metadata.py * Remove code referencing inventory table in cell DB * Handle SetAdminPasswdNotSupported raised by libvirt driver * Prevent boot if ephemeral disk size > flavor value * [libvirt] Incorrect parameters passed to migrateToURI3 * Revert inventory/allocation child DB linkage * Only chown console log in rescue * Don't chown a config disk which already exists * Don't overwrite config disk when using Rbd * Add 'update' method to GlanceImageServiceV2 * Add 'create' method to GlanceImageServiceV2 * Add 'detail' method to GlanceImageServiceV2 * Add 'delete' method to GlanceImageServiceV2 * Add 'download' method to GlanceImageServiceV2 * Add 'show' method to GlanceImageServiceV2 * Split the glance API path based on config * Remove image\_meta * add "needs:\*" tags to the config option modules * api-ref method verification for os-cells * API change for verifying the scheduler when live migrating * Stop using mox stubs in volume/encryptors/test\_base.py * Introduce a CONF flag to determine glance client version * fix a typo in comment * Fix white spaces in api-ref * Updated from global requirements * virt/hardware: Add diagnostic logs for scheduling * Use assertNotIn instead of assertTrue(all(A != B)) * Use assert(Not)Equal instead of assertTrue(A == X) * Use assertLess(Equal) instead of assertTrue(A > X) * Use assertGreater(A, X) instead of assertTrue(A > X) * Fall back to flat config drive if not found in rbd * libvirt: Fix the content of "disk.config" lost after migrate/resize * remove /v2.1/{tenant\_id} from all urls * Remove "or 'reserved'" from \_create\_volume\_bdm * pci: Move PCI devices and PCI requests into migration context * Updated from global requirements * Fixes invalid uuid usages in test\_neutronv2 * Clarify message for Invalid/Bad Request exception * Cancelled live migration are not in progress * set wrap\_width for config generator to 80 * API change for verifying the scheduler when evacuating * Fix invalid uuid warnings in virt testcases 14.0.0.0b1 ---------- * Remove mox from nova/tests/unit/virt/libvirt/test\_utils.py * Fix multipath iSCSI encrypted volume attach failure * libvirt: add "get\_job\_info" to Guest's object * Modify 'an network' to 'a network' * Remove legacy v2 API code completely * Remove the usage of RateLimitingMiddleware * Remove unused inner\_app\_v21 and ext\_mgr * Remove legacy API code from sample tests * Remove InstanceUsageAuditLogTest for legacy API * Change instance\_claim parameter from instance\_ref to instance * Make AggregateList.get\_ return API & cell db items * Make Aggregate.get operation favor the API db * Add aggregates tables to the API db * Microversion 2.28 changes cpu\_info string to JSON object * libvirt: Skip CPU compatibility check for emulated guests * Specify the default cdrom type "scsi" for AARCH64 * Remove mox from nova/tests/unit/test\_iptables\_network.py * Updated from global requirements * pci: Make sure PF is 'available' when last VF is freed * pci: related updates are done without DB lookups * pci: make sure device relationships are kept in memory * Remove mox from nova/tests/unit/virt/libvirt/test\_vif.py * verify api-ref os-migrations.inc * Nova UTs broken due to modifying loopingcall global var * Remove mox from unit/api/openstack/compute/test\_consoles.py * Stop using mox from virt/libvirt/storage/test\_lvm.py * Update functional tests for fixtures 3 * Stop using mox in test\_firewall * Add tests to attach/detach vols for shelved server * Remove unused \_vlan\_is\_disabled test flag * libvirt: New configuration classes to parse device address element * Fixed clean up process in confirm\_resize() after resize/cold migration * VMware: remove dead code in test\_get\_vm\_create\_spec() * Remove mox from compute/test\_scheduler\_hints.py * Updated from global requirements * Remove normal API operation logs from API layer * Remove unused LOG from v2.1 API code * Adds RemoteFX support to the Hyper-V driver * libvirt: fix serial ports lost after hard-reboot * Stop using mox stubs in test\_server\_usage.py * Remove mox from compute/test\_instance\_usage\_audit\_log.py * api-ref: os-consoles.inc * Add proxy middleware to application pipeline * api-ref: Example verification for os-interface.inc * Remove redundant orphan instances unit test * Remove duplicate migration RT unit tests * Redundant test of CPU resources in test\_tracker * Remove duplicate test of RT.stats.current\_workload * Remove duplicate test of claim context manager * Remove pointless "additive claims" unit test * Remove oversubscribe test in test\_resource\_tracker * api: Improve the \_check\_multiple\* function names readability * api-ref verify servers-action-deferred-delete.inc * Fix the order of expected error codes * Remove DictCompat from NetworkRequest * api-ref: Add a sample test for os-interface * Use oslo\_log instead of logging * Verify requested\_destination in the scheduler * Add requested\_destination field to RequestSpec * Remove mox from compute/test\_extended\_ips\_mac.py * Ironic nodes with instance\_uuid are not available * Updated from global requirements * Fixes python 3 urllib quote / unquote usage * Make compute nodes update their own inventory records * Remove unused WsgiLimiter * Remove unused args from RateLimitingMiddleware * Remove unused use\_no\_auth from wsgi\_app\_v21() * Fix incorrectly named vmwareapi test * Make Inventory and ResourceProvider objects use the API DB instead * Rename ImageCacheManager.\_list\_base\_images to \_scan\_base\_images * Remove all references to image\_popularity from image cache * Remove image cache image verification * Fix test\_age\_and\_verify\_swap\_images * api and availablity\_zone opt definition consistent * Rename Image.check\_image\_exists to Image.exists() * Remomve mox from api/openstack/compute/test\_console\_output.py * Remove mox from api/openstack/compute/test\_config\_drive.py * VMware: set service status based on vc connection * Return 400 HTTP error for invalid flavor attributes * Get transport\_url from config in Cells v2 cell map utility * Support for both microversion headers * Fix unit test after the replace of key manager * Fix "KeyError: u'instance\_id'" in string format operation * Save all instance extras in a single db call * Remove APIRouter of legacy v2 API code * Remove legacy v2 API tests which use wsgi\_app() * limits.inc example verification * Remove duplicate unit test in test\_tracker * Remove delete stubs in test\_resource\_tracker * Remove service crud from test\_resource\_tracker * Remove conductor from test\_resource\_tracker * Remove StatsDicTestCase from test\_resource\_tracker * rt-unit: Replace hard-coded strings with constants * Remove useless test of incorrect stats value * Remove RT duplicate unit test for PCI stats * Remove more duplicate RT unit tests * Removes test\_claim\_saves\_numa\_topology() * objects: added 'os\_secure\_boot' property to ImageMetaProps object * Trivial: Fixes serial console minor nits * Revert "glance:add helper method to get client version" * Add length check in comparing object lists * Update Support Matrix * Improve the help text for the rdp options * No disable reason defined for new services * api-ref: limits.inc validate parameters * Make available to build docs with python3 * Updated from global requirements * remove db2 support from tree * Adds Hyper-V imagecache cleanup * raise exception ComputeHostNotFound if host is not found * Skip instance name templating in API cell * Add http\_proxy\_to\_wsgi to api-paste * Stop using mox stubs in test\_pipelib.py * api-ref: Parameter verification for os-interface.inc * devspec: remove unused VIRTFN\_RE and re * Remove duplicate test of set inst host/node * Remove SchedulerClientTrackerTestCase * Move unit tests of set\_instance\_host\_and\_name() * Remove MissingComputeNodeTestCase for res tracker * Remove tests for missing get\_available\_resource() * api-ref, os-fping.inc * Pass OS\_DEBUG to the tox test environment * Hyper-V: Implement nova rescue * Add resource provider tables to the api database * HyperV: Nova serial console access support * Let setup.py compile\_catalog process all language files * use\_neutron\_default\_nets: StrOpt ->BoolOpt * api-ref: Add fault parameter details * be more explicit that rate limits are gone in v2.1 * Warn when using null cache backend * Enable 'null' value for user\_data in V2.1 API * Updated from global requirements * fix Quota related error return incorrect problem * Add online migration to move keypairs from main to API database * Completed migrations are not "in progress" * Make flavor-manage api call destroy with Flavor object * Move is\_volume\_backed\_instance to compute.utils * Updated from global requirements * api-ref: verify flavors.inc * Fix use of invalid assert calls * Config options: remove import\_opts from cloudpipe section * Enables Py34 tests for unit.api.openstack.compute.test\_server\_tags * Fix the versions API for api-ref * Update link for hypervisor support matrix message * api-ref: complete verification of baremetal api * Keep BuildRequest db entry around longer * Drop fields from BuildRequest object and model * Resize API operation passing down original RequestSpec * Augment release note for import\_object\_ns removal * pci: add safe-guard to \_\_eq\_\_ of PciDevice * deprecate config option "fatal\_exception\_format\_errors" * config options: centralize exception options * libvirt: Add serial ports to the migration data object * Hyper-V: Fixes disk overhead claim issue * Config options: move set default opt of db section to centralized place * [Trivial] Fix a grammar error in comments * api-ref: Example verification for servers-action-shelve.inc * [Ironic] Correct check for ready to deploy * api-ref: Fix parameters in servers-action-shelve.inc * api-ref: parameter verification for os-server-groups * api-ref: servers-action-evacuate.inc * remove FlavorCreateFailed exception * Add tests for floating\_ip private functions * Trivial: remove os-security-groups needs:method\_verification line * Add RC file for excluding tempest tests for LVM job * Move config options from nova/api directory (5) * libvirt: add method to configure max downtime when migrating * libvirt: add "abort\_job" to Guest's object * libvirt: add method "migrate" to Guest's object * Only attempt to inject files if the injection disk exists * Remove deprecated option libvirt.remove\_unused\_kernels * Rename Raw backend to Flat * deprecate s3 image service config options * Cold migrate using the RequestSpec object * Add a RequestSpec generation migration script * Enables Py34 tests for unit.compute.test\_compute * Fixes invalid uuid usages in functional tests * Make neutronapi get\_floating\*() methods return objects * Switch api unit tests to use v2.1 API * Remove mox used in tests/unit/api/openstack/compute/test\_server\_start\_stop * Remove marker from nova-manage cells\_v2 map\_instances UI * api-ref: complete verification for os-flavor-access * Make some build\_requests columns nullable * Add message queue switching through RequestContext * trivial: remove unused argument from a method * baseproxy: stop requiring CONF.verbose * Cleanup validation logic in \_get\_requested\_networks * api-ref: complete verification of servers-action-crash-dump.inc * migrate to os-api-ref * api-ref: image.inc - Update method validation * config options: centralize section "database" + "api\_database" * api-ref: parameter verification for os-quota-sets * Fix network mtu in network\_metadata * Add a note about egress rules to os-security-group-rules api-ref * ironic: fix call to \_cleanup\_deploy on config drive failure * Follow-up for the API config option patch * api-ref: reorder parameters.yaml * Network: fix typo * Add online migration to store keypairs with instances * Make Keypair object favor the API database * api-ref: ips.inc example verification * Fix spelling mistake in libvirt * Body Verification of os-aggregates.inc * Move placement api request logging to middleware * conf: Move cloudpipe options to a group * conf: Address nits in I92a03cb * Fix corrupt "host\_aggregates\_map" in host\_manager * Fix spelling mistake * api-ref: Example verification for os-volume\_attachments.inc * api-ref: Parameter verification for os-volume\_attachments.inc * Remove fake\_imagebackend.Raw and cleanup dependent tests * Remove unused arguments to images.fetch and images.fetch\_to\_raw * api-ref: finish validation for os-server-external-events.inc * report info if parameters are out of order * Method verification of os-floating-ips-bulk.inc * api-ref: os-volumes.inc method verification * config options: move s3 related options * deprecate "default\_flavor" config option * config options: centralize default flavor option * Return HTTP 400 on boot for invalid availability zone * Config options: remove import\_opts from completed section * Fix migration query with unicode status * Config options: centralize cache options * Change 5 space indent to 4 spaces * Remove deprecated "memcached\_server" in Default section * Updated from global requirements * Add a functional test for instance fault message with retry * api-ref: complete verification for extensions resource * live-migration ceph: fix typo in ruleset parsing * api-ref: os-floating-ip-dns.inc method verification * api-ref: Method verification for servers-actions * Eager load keypairs in instance metadata * Complete method verification of os-networks * Method verification of os-security-group-default-rules * virt: reserved number of mempages on compute host * deprecate "file transfer" feature for Glance images * centralized conf: nova/network/rpcapi.py * Config options: centralize remotefs libvirt options (end) * Config options: centralize smbfs libvirt options (16) * imagebackend: Check that the RBD image exists before trying to cleanup * Rewrite \_cleanup\_resize and finish\_migration unit tests to use mock instead of mox * Remove mox in test\_volume\_snapshot\_create\_outer\_success * api-ref: Method verification for os-volume\_attachments.inc * Improve the help text for the API options (4) * Improve the help text for the API options (3) * api-ref: ips.inc parameter verification * Add Keypairs to the API database * Create Instances with keypairs * Method verification for server-action-deferred-delete * method verification for server-action-remote-consoles * method verification of os-server-external-events * method verification of os-instance-usage-audit-log * Add keypairs to Instance object * Complete method verification of os-baremetal-nodes.inc * api-ref: parameter validation for os-security-group-rules * Fixed missing variable * api-ref: Method verification for os-floating-ips * force\_live\_migration remove redundant check * pci: create PCI tracker in RT.\_init\_compute\_node * Fix race condition for live-migration-force-complete * api-ref: servers-action-shelve.inc * Added fault response parameter to Show Server Details API * pci: Remove unused 'all\_devs' method * Corrected the typo * Denormalize personality extension * method verification of os-assisted-volume-snapshots * api-ref: os-certificates.inc method verification * Complete method verification of os-cloudpipe.inc * Fix service version to update the DB * method verification for servers-action-fixed-ip * Added new exception to handle CinderClientException * Drop paramiko < 2 compat code * Config options: centralize scality libvirt options (15) * Compute: Adds driver disk\_gb instance overhead estimation * config options: move image\_file\_url download options * crypto: Add support for Paramiko 2.x * Denormalize extensions for clarity * Complete method verification of os-fping * Complete method verification of os-security-group-rules * Fix invalid uuid warnings * Correct some misspell words in nova * Remove 404 for list and details actions of servers * Improve the help text for the API options (2) * Improve the help text for the API options (1) * Complete method verification of os-migrations * Move config options from nova/api directory (4) * api-ref: perform all 4 phases of verification for action console output * api-ref: add url parameter to expand all sections * api-ref: complete verification for diagnostics.inc * api-ref: update parameter validation on servers * Complete method verification of os-tenant-networks * trivial: removed unused networks var from os-tenant-networks:create * Complete method verification of os-security-groups * Move config options from nova/api directory (3) * Move config options from nova/api directory (2) * Move config options from nova/api directory (1) * api-ref: method verification and fixes for servers.inc * Instance mapping save, properly load cell mapping * Fix exception when vcpu\_pin\_set is set to "" * config: remove deprecated ironic.client\_log\_level * Complete method verification of os-quotas * Compelete method verification of os-servers-admin * Complete method verification of os-shevle * Add api-sample test for showing quota detail * Remove legacy v2 tests which use APIRouter * pci: eliminate DB lookup PCI requests during claim * pci: pass in instance PCI requests to claim * Remove rate\_limit param in builder * Remove comment on v3 API * Not talking about V2 API code in review doc guide * Add keypairs to instance\_extra * Trivial: No need to exclude TestMoveClaim from py34 tests * Remove 400 as expected error * Cleaned up request and response formats page * Complete method verification of os-agents * update servers policy in code to use formats * Complete method verification of os-fixed-ips * Consolidate image\_href to image uuid validation code * Fix TestNeutronv2.test\_deallocate\_for\_instance\_2\* race failures * Centralize config option for nova/network/driver.py * Don't raise error when filtering on custom metadata * Config options: centralize quobyte libvirt options (14) * Config options: centralize volume nfs libvirt options (13) * Config options: centralize volume net libvirt options (12) * Config options: centralize iser libvirt options (11) * Config options: centralize iscsi libvirt options (10) * Config options: centralize glusterfs libvirt options (9) * Config options: centralize aoe vol libvirt options (8) * Config options: centralize volume libvirt options (7) * Config options: centralize vif libvirt options (6) * Config options: centralize utils libvirt options (5) * Config options: centralize lvm libvirt options (4) * Remove legacy v2 unit tests[q-v] * Remove legacy v2 unit tests[f-n] * Remove Limits dependency of legacy v2 API code * Remove mox in unit/virt/xenapi/test\_agent.py * Set migration status to 'error' on live-migration failure * Add pycrypto explicitly * Centralize vif,xenpool & vol\_utils config options * Config options: centralize imagecache libvirt options (3) * Config options: centralize imagebackend libvirt options (2) * Remove the legacy v2 API entry from api-paste.ini * Update stable API doc to indicate code removal * Config options: centralize driver libvirt options (1) * UEFI - instance terminates after boot * Fix unit tests for v2.1 API * Remove legacy v2 unit tests[a-e] * Config options: Centralize servicegroup options * libvirt: release serial console ports when destroying guests * Remove mox from tests/unit/network/test\_api.py * Remove legacy v2 API functional tests * fix wrong key name in test code * Remove the legacy v2 API test scenarios from API sample tests * Remove 413 expect in servers.py * Remove core extension list * rt: remove unused image\_meta parameter * Fail to start nova-api if no APIs were able to be started * Test that nova-api ignores paste failures, but continues on * libvirt: introduces module to handle domain xml migration * Trivial: dead code * Fix database poison warnings, part 8 * docs: link to Laski's cells talk from the Austin summit * compute: Retain instance metadata for 'evacuate' on shared storage * Archive instance\_actions and instance\_actions\_event * Add os-interface functional negative tests * api-ref: verify os-server-groups.inc * Avoid unnessary \_get\_power\_state call * Remove mox in test\_certificates.py * api-ref: verfiy limits body * api-ref: body verification of ips.inc * Change message format of Forbidden * Updated from global requirements * api-ref verify of servers-admin-action.inc * pci: Allow to assign pci devices in pci device list * Fix typo in support-matrix.ini: re(set)=>(re)set * Add ability to filter migrations by instance uuid * Wrong mocks, wrong mock order * verify api-ref metadata.inc * verify api-ref os-server-password.inc * Updated from global requirements * Fix database poison warnings, part 7 * Declare nova.virt namespace * [doc] fix 5 typos * Make compute rpcapi 'live\_migration' backward compatible * Replace key manager with Castellan * Deprecate Nova Network * verify api-ref os-instance-usage-audit-log.inc * Only reset dns\_name when unbinding port if DNS is integrated * Changed the storage size from GB to GiB * Remove unused FAKE\_UUID variables * Deprecated the concept of extensions in v2.1 * Fix database poison warnings, part 6 * Fix database poison warnings, part 5 * Avoid unconditional warnings in nova-consoleauth * libvirt: remove version checks for hyperv PV features * libvirt: remove version checks for libvirt disk discard feature * libvirt: remove version checks for block job handling * libvirt: remove version checks for PCI device detach * libvirt: remove version checks for live snapshot feature * libvirt: add explicit check for min required QEMU version * libvirt: increase min required libvirt to 1.2.1 * network: Fix nova boot with multiple security-groups * Updated config description on live snapshot * Fix NoSuchOptError when referring to conf.neutron.auth\_plugin * api-ref host verification (os-hosts.inc) * api-ref verify os-floating-ip-pools.inc * Complete Verification of server-metadata * Complete method Verification of os-hypervisors * Fix invalid uuid warnings in compute api testcases * Fix invalid uuid warnings * complete Method Verification of aggregates * Complete Method Verification of ips * Fix resize to same host failed using anti-affinity group * Complete method Verification of consoles * Config options: Centralize netconf options * Remove 413 as expected error code * Complete Verification of os-server-password * Complete Verification of os-hosts * Add links to API guide to describe links * Complete Method Verification of os-interface * Complet Method Verification of flavor-access * Complete Verification of os-virtual-interfaces * Complet Method Verification of os-instance-actions * Complete Verification of os-flavor-extra-specs * Fix database poison warnings, part 4 * Complet Method Verification of flavor * Complet Method Verification of server group * Trivial: fix mock decorator order * Add test for nova-compute and nova-network main database blocks * Prevent nova-api from dying if enabled\_apis is wrong * Complet Method Verification of keypair * Complet Method Verification of availability-zone * Complet Method Verification of simple tenant usage * remove the use of import\_object\_ns * Fixed typo in word "were" * Complet Method Verification of os-services * Complet Method Verification of server diag * Remove mox in tests/unit/compute/test\_host\_api.py * Config options: completing centralize neutron options * Add instances into dict when handle exception * Complet Method Verification of limits * Improve the help text for the compute rpcapi option * Move config options from nova/compute/rpcapi.py file * Updated from global requirements * deprecate nova-all * Remove unused base\_options param from \_get\_image\_defined\_bdms * Change BuildRequest to contain a serialized instance * Split out part of map\_cell\_and\_hosts to return a uuid * Add manage command for cell0 * Config options: centralize section "ssl" * config options: centralize security\_group\_api opt * Imported Translations from Zanata * Stop using mox stubs in test\_multinic.py * libvirt: deprecate use\_usb\_tablet in favor of pointer\_model * Config options: Centralize neutron metadata options * add tags to files for the content verification phase * Config options: Centralize compute options * Add 415 to list of exceptions for microversions devref * Added validation for rescue image ref * Final warnings removals for api-ref * Clean port dns\_name in case of port detach * Fix remaining json reference warnings * Add validations for volume\_size and destination\_type * Remove duplicate api ref for os-networks/actions * Fix all remaining sample file path * Stop using mox stubs in test\_access\_ips.py * Stop using mox stubs in test\_admin\_password.py * libvirt - Add log if libguestfs can't read host kernel * Fix sample file path for 4 files * Fix invalid uuid warnings in objects testcases * Fix invalid uuid warnings in server-group unit tests * Create image for suspended instance booted from volume * Fix content and sample file for keypair, migration, networks * Fix sample file path for os-i\* API * Fix the parameters for os-agents API * Fix sample file path for fixed, floating ips API * Fix sample path for aggregate, certificate, console * Add remaining image API ref * Fix the schema of assisted\_volume\_snapshots * config options: conductor live migrate options * xenapi: Fix xmlrpclib marshalling error * fix samples references in security group files * fix samples references in os-services * Fix api samples references in 3 more files * Fix reverse\_upsize\_quota\_delta attempt to look up deleted flavors * Fix api ref for os-hosts, os-quota-sets and os-fping * Fix api ref for os-cells, os-cloudpipe and server-action-shelve * Fix api sample references in 2 more files * Updated from global requirements * hardware: thread policy default value applied even if specified * Fix api ref for ips, limits, metdata and agent * virt: use more realistic fake network / VIF data * Fix json response example heading in api ref * Fix database poison warnings, part 3 * Remove 40X and 50X from Normal response codes * Specify normal status code on os-baremetal-nodes * Remove unused rotation param from \_do\_snapshot\_instance * Remove unused filter\_class\_names kwarg from get\_filtered\_hosts * Remove deprecated ability to load scheduler\_host\_manager from path * Fix "Creates an aggregate" parameters * Unavailable hosts have no resources for use * HyperV: Add SerialConsoleOps class * HyperV: Add serial console handler class * HyperV: Add serial console proxy * fix samples references for 2 files * Update servers.inc to be as accurate as api-site * Fix database poison warnings, part 2 * Fix "Creates an agent build" parameters * Update get\_by\_project\_id on InstanceMappingList * Clean up cell handling in nova-manage cell\_v2 map\_instances * Properly clean up BDMs when \_provision\_instances fails * clean up versions.inc reference document * Collection of CSS fixes * Fixes unexpectedly passing functional test * move sphinx h3 to '-' instead of '^' * fix blockquote font size * Add 'Show All' / 'Hide All' toggle * use 'required' instead of 'optional' for parameters * Fix css references to the glyphicons font * Initial use of microversion\_parse * Changed an HTTP exception to return proper code * Compute API: omit disk/container formats when creating images of snapshots * Fix formatting of rst in parameters.yaml * Add instance/instance\_uuid to build\_requests table * network: make nova to handle port\_security\_enabled=False * BaseCoreFilter docstring and formating improved * Fix NoMoreNetworks functional test traces * Fix typo in nova release notes * Updated from global requirements * Fix generation of Guru Meditation Report * Fix invalid uuid warnings in cell api testcases * cleanup some issues in parameters.yaml * Import RST files for documentation * add combined parameters.yaml file * claims: Do not assume image-meta is a dict * Fix nova opts help info * Fix doc build if git is absent * Add checks for driver attach\_interfaces capability * Updated from global requirements * Add AllServicesCurrent fixture * Improve the help text for the linuxnet options (3) * Improve the help text for the linuxnet options (2) * Fix signature of copy\_image * libvirt: remove live migrate workaround for an unsupported ver * libvirt: move graphic/serial consoles check to pre\_live\_migration * Fix invalid uuid warnings in api testcases * Minor updates to the how\_to\_get\_involved docs * Put more into compute.api.\_populate\_instance\_for\_create * Remove unused parameter from \_get\_requested\_instance\_group * Improved test coverage * Check API versions intersects * virt/hardware: Fix 'isolate' case on non-SMT hosts * Migrate compute node resource information to Inventory objects * Drop compute node uuid online migration code * increase error handling for dirty files * config options: centralize 'spice' options * Fix max concurrent builds's unlimited semaphore * VMware: add in context for log messages * XenAPI: specify block size for writing config drive * Fix database poison warnings * Make swap-volume an admin-only API by default * Updated from global requirements * Improve the help text for the linuxnet options (1) * Config options: Centralize network options * Config options: centralize base path configuration * Add new NeutronFloatingIP object * Add "\_\_repr\_\_" method to class "Service" * remove alembic from requirements.txt * Config options: centralize section "xvp" * Imported Translations from Zanata * Updated from global requirements * allow samples testing for PUT to not have a body * libvirt: delete the last file link in \_supports\_direct\_io() * db: retry instance\_info\_cache\_update() on deadlock * Moved tags filtering tests to TestInstanceTagsFiltering test case * Move config options from nova/network/linux\_net.py * Remove nova-manage service subcommand * config options: centralize quota options * DB API changes for the nova-manage quota\_usage\_refresh command * Improve the help text for the network options (1) * Fix typo in compute node mega join comments * Add api-ref/build/\* to .gitignore * Improve help text for the network object options * Config options: Centralize console options * Config options: Centralize notification options * Remove mox from tests/unit/network/security\_group/test\_neutron\_driver.py * Added server tags support in nova-api * Added server tags controller * Added db API layer to add instance tag-list filtering support * Improve 'workarounds' conf options documentation * Config options: centralize "configdrive" options * config options: centralize baseproxy cli options * Check if a exception has a code on it before read the code * Fix import statement order in nova/rpc.py * Document our policy on fixing v2.0 API bugs * Config options: Centralize neutron options * Remove mox from tests/unit/compute/test\_compute\_xen.py * Fix typo in comments of affinity and anti-affinity * Fix up online\_data\_migrations manage command to be consistent * Adds missing discoverable rules in policy.json * Config options: Centralize ipv6 options * config options: centralize xenserver vmops opts * Config options: Centralize xenapi driver options * config options: centralize xenserver vm\_utils opts * Remove flavor seeding from the base migration * Rely on devstack to skip rescue tests for cells v1 * Replace topic with topics for messaging.Notifier * Updated from global requirements * Fix test for empty policy rules * Improve 'monkey\_patch' conf options documentation * conf: Remove 'destroy\_after\_evacuate' * config options: Move crypto options into a group * config options: centralize section: "crypto" * config options: Centralise 'monkeypatch' options * config options: Centralise 'utils' options * doc: clean up oslo-incubator related stuff * config option generation doesn't work with a generator * Add link to the latest nova.conf example * Change the nova tempest blacklist to use to idempotent ids * HyperV: Refactor livemigr, avoiding getting disk paths remotely * Remove DictCompat from mapping objects * Enhance value check for option notify\_on\_state\_change * Fix flavor migration tests and edge case found * config options: Centralize upgrade\_levels section * config options: Centralize mks options * Remove DictCompat from S3 object * config options: Centralize vmware section * config options: centralize section "service" * Define context.roles using base class * TrivialFix: removed unnecessary cycle in servicegroup/test\_api.py * Handle pre-migration flavor creation failures in the crusty old API * config options: centralize section "guestfs" * config options: centralize section "workarounds" * config options: Centralize 'nova.rpc' options * Cleanup NovaObjectDictCompat from BandwidthUsage * config options: fix the missed cli options of novncproxy * Add metadata objects for device tagging * Nuke cliutils from oslo-incubator * libvirt: pci detach devices should use dev.address * Fix stale file handle error in resource tracker * Updated from global requirements * config options: Centralize xenapi torrent options * Fix: unable to delete instance when cinder is down * Block flavor creation until main database is empty * Further hack up the n.t.unit.db.fakes module of horribleness * Add flavor migration routine * Make Flavor create() and destroy() work against API DB * Move config options from nova/objects/network.py * Add tag column to vifs and bdm * Remove extensible resource tracking * Fix error message of nova baremetal-node-delete * Enhanced error handling for rest\_parameters parser * Fix not supported error message * config options: Centralise 'image\_file\_url' options * neutron: Update the port with a MAC address for PFs * Remove mox from tests/unit/network/test\_rpcapi.py * Remove mox from tests/unit/objects/test\_migration.py * The 'record' option of the WebSocketProxy should be string * config options: centralize section: "glance" * Move resource provider staticmethods to proxies * Add Service.get\_minimum\_version\_multi() for multiple binaries * remove the ability to disable v2.1 * Make git clean actually remove covhtml * Set 'libvirt.sysinfo\_serial' to 'none' in RealTimeServersTest * Make compute\_node\_statistics() use new schema * remove glance deprecated config * Config options: Centralize consoleauth options * config options: centralize section "cloudpipe" * After migrate in-use volume the BDM information lost * Allow to update resource per single node * pci: Add utility method for getting the MAC addr 13.0.0 ------ * Imported Translations from Zanata * VMware: Use Port Group and Key in binding details * Config options: Centralize resource tracker options * Fixed incorrect behavior of xenapi driver * Remove DictCompat from ComputeNode * config options: Centralise 'virt.imagecache' options * neutron: pci\_request logic considers 'direct-physical' vnic type * config options: remove the scheduler import\_opt()s * Improve the help text for hyperv options (3) * Improve the help text for hyperv options (2) * Improve the help text for hyperv options (1) * Imported Translations from Zanata * Remove a redundant 'that' * Cleanup NovaObjectDictCompat from NumaTopology * Fix detach SR-IOV when using LibvirtConfigGuestHostdevPCI * Stop using mox in test\_security\_groups * Cleanup the exception LiveMigrationWithOldNovaNotSafe * Add sample API content * Create api-ref docs site * Config options: Centralize debugger options * config options: centralize section: "keymgr" * libvirt: fix ivs test to use the ivs vif object * libvirt: pass a real instance object into vif plug/unplug methods * Add a vnic type for PF passthrough and a new libvirt vif driver * libvirt: live\_migration\_flags/block\_migration\_flags default to 0 * Imported Translations from Zanata * config options: Centralize xenapi options * Populate instance\_mappings during boot * libvirt: exercise vif driver 'plug' method in tests * config options: centralize xenserver options * Fix detach SR-IOV when using LibvirtConfigGuestHostdevPCI * Reduce number of db calls during image cache manager periodic task * Imported Translations from Zanata * Update cells blacklist regex for test\_server\_basic\_ops * Update cells blacklist regex for test\_server\_basic\_ops * Remove mox from tests/functional/api\_sample\_tests/test\_cells.py * Remove mox from tests/unit/api/openstack/compute/test\_baremetal\_nodes.py * Config options: Centralize ldapdns options * Add NetworkRequestList.from\_tuples helper * Stop providing force\_hosts to the scheduler for move ops * Enforce migration tests for api database * Objectify test\_flavors and test\_flavors\_extra\_specs * Allow ironic driver to specify cafile * trivial: Fix alignment of wsgi options * config options: Remove 'wsgi\_' prefix from opts * VMware: Always update image size for sparse image * VMware: create temp parent directory when booting sparse image * VMware: Use datastore copy when the image is already in vSphere * Imported Translations from Zanata * Fix typos in document * Removes some redundant words * Stop providing force\_hosts to the scheduler for move ops * Include CellMapping in InstanceMapping object * Make flavor extra\_specs operations work against the API DB * Make Flavor access routines work against API database * Clarify the \`\`use\_neutron\`\` option upgrade notes 13.0.0.0rc2 ----------- * Imported Translations from Zanata * Try to repopulate instance\_group if it is None * Try to repopulate instance\_group if it is None * modify duplicate // to / in doc * change host to host\_migration * Fixup test\_connection\_switch functional test * Fix SAWarning in \_flavor\_get\_by\_flavor\_id\_from\_db * Update 'os-hypervisors.inc' in api-ref * Fix os-server-groups.inc * cinder: accommodate v1 cinder client in detach call * Move config options from nova/network/manager.py * Change adminPass for several server actions * Fix os-virtual-interfaces and flavors api-ref * Make FlavorList.get\_all() return results from the API and main DBs * Objectify some tests in test\_compute and test\_flavors * Objectify test\_instance\_type\_extra\_specs * Add a DatabasePoisonFixture * config options: Use OptGroup for listing options * Live migration failure in API leaves VM in MIGRATING state * Fix flavor-access and flavor-extras api-ref * Fix diagnostics, extensions api ref * Fix typo 'mappgins' to 'mappings' * Imported Translations from Zanata * Fix hosts and az api samples * Change "libvirt.xml" back to the original after doing unrescue * Fix os-service related reference missing * Add 'binary' and 'disable-reason' into os-service * Remove unused argument v3mode * Clean up the TestGlanceClientWrapper retry tests * stop setting mtu when plugging vhost-user ports * config options: Move wsgi options into a group * Rewrite 'test\_filter\_schedule\_skipping' method using Mock * Remove stub\_compute config options * Added missing "=" in debug message * libvirt: serial console ports count upper limit needs to be checked * Imported Translations from Zanata * Return 400 on boot for invalid image metadata * Fix JSON format of server\_concepts * Remove /v1.1 endpoint from api-guide * config options: centralize section: "rdp" * Fixes hex decoding related unit tests * Fix conversion of config disks to qcow2 during resize/migration * xenapi: Fix when auto block\_migration in the API * xenapi: Fix up passing of sr\_uuid\_map * xenapi: Fix the live-migrate aggregate check * Add rebuild action descriptions in support-matrix * Config options: centralize section "hyperv" * Removal of unnecessary \`import\_opt\`s for centralized config options * Imported Translations from Zanata * Fixes bug with notify\_decorator bad getattr default value * config options: centralize section "monitors" * config options: Centralise floating ip options * Fix API Error on hypervisor-uptime API * VMware: make the opaque network attachment more robust * Add functional test for v2.7 * avoid microversion header in functional test * Add backrefs to api db models * Update reno for stable/mitaka * stop setting mtu when plugging vhost-user ports * Removes redundant object fields * Blacklist TestOSAPIFixture.test\_responds\_to\_version in python3 * Fix conversion of config disks to qcow2 during resize/migration * Remove auto generated module api documentation * Imported Translations from Zanata * Mark 2.25 as Mitaka maxmium API version * Add a hacking check for test method closures * Make Flavor.get operations prefer the API database * xenapi: Fix when auto block\_migration in the API * xenapi: Fix up passing of sr\_uuid\_map * Update to openSUSE versions * xenapi: Fix the live-migrate aggregate check * Error on API Guide warnings * Add Newton sanity check migration * Add placeholder migrations for Mitaka backports * Update .gitreview for stable/mitaka * Set RPC version aliases for Mitaka 13.0.0.0rc1 ----------- * Fix reno reverts that are still shown * Wait for device to be mapped * Add a prelude section for Mitaka relnotes * Fix reno for RC1 * libvirt: Fix ssh driver to to prevent prompting * Support-matrix of vmware for chap is wrong * Imported Translations from Zanata * Allocate free bus for new SCSI controller * config options: centralize cinder options * Add os-brick rootwrap filter for privsep * Fix retry mechanism for generator results * Add a cell and host mapping utility to nova-manage * Add release note for policy sample file update * Fix vmware quota extra specs reno formatting * Avoid lazy-loads of ec2\_ids on Instance * Replace deprecated LOG.warn with LOG.warning * libvirt: Allow use of live snapshots with RBD snapshot/clone * Typo fix in documentation * Redundant parentheses removed * Trivial: Use exact docstring for quota module * Replace deprecated LOG.warn with LOG.warning * Revert "virt: reserved hugepages on compute host" * Make tuple actually a tuple * xenapi: Image cache cannot be disabled * VMware: enable a resize of instance with no root disk * fixed typo in word "OpenStack" * hyper-v: Copies back files on failed migration * Add functional test for OverQuota * Translate OverLimit exceptions in Cinder calls * Add regression test for Cinder 403 forwarding * register the config generator default hook with the right name * pci - Claim devices outside of Claim constructor * Get instance security\_groups from already fetched instance * Use migrate\_data.block\_migration instead of block\_migration * Fix pre\_live\_migration result processing from legacy computes * Add reno for disco driver * linux\_net: use new exception for ovs-vsctl failures * Insure resource tracker updated for deleted instances * VMware: use datacenter path to fetch image * libvirt: check for optional LibvirtLiveMigrateData attrs before loading * Change SpawnIsSynchronous fixture return * Report instance-actions for live migration force complete API * Add release notes for security fixes in 13.0.0 mitaka GA * API: Raise up HTTPNotFound when no availabe while get\_console\_output * libvirt: Comment non-obvious security implications of migrate code * Update the doc of notification * fixed log warning in sqlalchemy/api.py * Add include\_disabled parameter to service\_get\_all\_by\_binary * Imported Translations from Zanata * Set personality/injected\_files to empty list if not specified * Fix processing of libvirt disk.info in non-disk-image cases * pci: avoid parsing whitelist repeatedly * Add Forbidden to caught cinder exceptions * Missing info\_cache.save() in db sqlalchemy api * tests: Add some basic compute\_api tests for attaching volumes * Clean up networks with SR-IOV binding on reschedule * virt: refactor method compute\_driver\_matches * Make force\_ and ignore\_hosts comparisons case insensitive * xenapi: fix when tar exits early during download * Address nits in I83a5f06ad * Fix config generation for Neutron auth options * Remove an unused method in FakeResourceTracker * Rework 'limited' and 'get\_limit\_and\_marker' * plugins/xenserver: Resolve PEP8 issues * Remove unused variable and redundant code path * Soft delete instance group member when delete instance * VMware: Refactor the formatting instance metadata * Remove sizelimit.py in favor of oslo\_middleware.sizelimit * libvirt: make snapshots call suspend() instead of reimplementing it * Use generic wrapper for cinder exceptions * Add ppc64le architecture to some libvirt unit tests * Add Database fixture to sync to a specific version * Drop the use of magic openstack project\_id * Aggregate object fixups * Address nits in Ia2296302 * Remove duplicated oslo.log configuration setup * libvirt: Always copy or recreate disk.info during a migration * nova-manage: Print, not raise, exceptions * virt: reserved hugepages on compute host * XenAPI:Resolve Nova/Neutron race condition * Don't use locals() and globals(), use a dict instead * update the deprecated \`\`security\_group\_api\`\` and \`\`network\_api\_class\`\` * [Ironic]Match vif-pif mac address before setting 'vif\_port\_id' * Correct the wrong usage of 'format' jsonschema keyword in servers API * Add ComputeNode and Aggregate UUID operations to nova-manage online migrations * Extend FakeCryptoCertificate.cert\_not\_valid\_after to 2 hours * Revert "functional: Grab the service version from the module" * libvirt: Fix resize of instance with deleted glance image * Reno for libvirt libosinfo with OS * Fix hyperv use of deprecated network\_api\_class * Fix v2.12 microversion REST API history doc * Add docstrings for nova.network.base\_api.get\_vifs\_by\_instance * Style improvements * Reno for Ironic api\_version opt deprecation * Release notes: online\_data\_migrations nova-manage command * nova-manage: Declare a PciDevice online migration script * test\_fields: Remove all 'Enum' subclass tests * Make test cases test\_crypto.py from NoDBTestCase * Ironic: remove backwards compatibility code * Ironic: Use ironicclient native retries for connection errors * RT: aborting claims clears instance host and NUMA info * Provide correct connector for evacuate terminate * Reset instance progress when LM finishes * Forbid new legacy notification event\_type * VMware: Remove VMwareHTTPReadFile * API: Mapping ConsoleTypeInvalid exception to HTTPBadRequest * VMware: remove deprecation warnings from oslo\_versionedobjects * Reject empty-named AZ in aggregate metadata * add checking for new image metadata property 'hw\_cpu\_realtime\_mask' * Remove unused methods in nova/utils.py * Fix string interpolations at logging calls * Generate better validation error message when using name regexes * Return 400 for os-virtual-interfaces when using Neutron * Dump metric exception text to logs * Updated from global requirements * Use SensitiveStringField for BlockDeviceMapping.connection\_info * Add index on instances table across deleted/created\_at columns * Tweak the resize\_confirm\_window help text * Enable rebuild tests in cellsv1 job * libvirt: clean up help text for live\_migration\_inbound\_addr option * Add release note for nova using neutron mtu value for vif plugging * deprecate security\_group\_api config option * update tests for use\_neutron=True; fix exposed bugs * deprecate \`\`volume\_api\_class\`\` and \`\`network\_api\_class\`\` * deprecate \`\`compute\_stats\_class\`\` config option * Deprecate the \`\`vendordata\_driver\`\` config option * Deprecate db\_driver config option * deprecate manager class options * remove default=None for config options * Check 'destination\_type' instead of 'source\_type' in \_check\_and\_transform\_bdm * Documentation fix regarding triggering crash dump * Use db connection from RequestContext during queries * Ironic: Clean up if configdrive build fails * Revert "Generate better validation error message when using name regexes" * Add unit tests for live\_migration\_cleanup\_flags * Replaced unittest and unittest2 to testtools * Sample nova.conf file has missing/duplicated config options 13.0.0.0b3 ---------- * Fix missing of unit in HostState.\_\_repr\_\_() * Make InstanceMappings.cell\_id nullable * Create BuildRequest object during boot process * Add BuildRequest object * Api\_version\_request.matches does not accept a string or None * Added Keystone and RequestID headers to CORS middleware * Generate better validation error message when using name regexes * XenAPI: introduce unit test for XenAPI plugins * Abstract a driver API for triggering crash dump * Fix evacuate support with Nova cells v1 * libvirt: don't attempt to get baseline cpu features if host cpu model is None * Ensure there are no unreferenced closures in tests * libvirt: set libvirt.sysinfo\_serial='none' for virt driver tests * libvirt: Add ppc to supported arch for NUMA * Use new inventory schema in all compute\_node gets * Remove unused libvirt \_get\_all\_block\_devices and \_get\_interfaces * Use new inventory schema in compute\_node\_get\_all() * Deprecate nova.hooks * Adjust resource-providers models for resource-pools * Fix Cells RPC API by accepting a RequestSpec arg * API: Improve os-migrateLive input parameters * Allow block\_migration and disk\_over\_commit to be None * Update time is not updated when metadata of aggregate is updated * complete the removal of api\_version from rest client parameters * objects: add HyperVLiveMigrateData stub * functional: Grab the service version from the module * Added missed '-' to the rest api history doc * Gracefully handle cancelling all events more than once * Cleanup service.kill calls in functional tests * Do not use constraints for venv * VMware: Use actual VM state instead of using the instance vm\_state * Do not pass call\_xenapi unmarshallable type * check max\_net\_count against min\_count when booting * objects: Allow instance to reset the NUMA topology * Mark 'network\_device\_mtu' as deprecated * Add service binary/host to service is down log for context * Abort an ongoing live migration * Add new APIs and deprecate old API for migrations * Deprecate conductor manager option * Xen: Calculate block\_migration if it's None * Libvirt: Calculate block\_migration if it's None * NUMATopologyFilter raise exception and not continue filter next node * Updated from global requirements * Add specific method to lazy-load instance.pci\_devices * Move logging outside of LibvirtConfigObject.to\_xml * Update the help for deprecated glance host/port/protocol options * Added missing execution of the test * Add build\_requests database table and model * Make db.aggregate\_get a reader not a writer * Remove an unnecessary variable in a unit test * Remove duplicate test case flavor\_create * Don't lazy-load instance.services if the instance is deleted * Add functional regression test for list deleted instances on v2.16 * Use constant\_time\_compare from oslo.utils * Remove APIRouterV3 * reduce pep8 requirements to just hacking * fix usage of opportunistic test cases with enginefacade * add regression test for bug #1541691 * Creates flavor\* tables in API database * Add test for unshelve in the conductor API * add a place for functional test to block specific regressions * make microversion a client level construct for tests * Allocate uuids for aggregates as they are created or loaded * bug and tests in 'instance\_info\_cache' * fix typo in comment * Fix conductor to \*really\* pass the Spec obj * Updated from global requirements * Catch iscsi VolumeDeviceNotFound when detaching * Add note about using OS-EXT-\* prefix for attribute naming * Remove use of \`list\` as variable name * resource-provider versioned objects * Fix networking exceptions in ComputeTestCase * Fix online\_data\_migrations() not passing context * Fix two bugs in online\_data\_migrations() * Make online\_data\_migrations do smaller batches in unlimited case * Use MTU value from Neutron in OVS/LB VIF wiring * tox: Remove 'oslo.versionedobjects' dependency * Fix API Guide doc * Add functional regression test for bug 1552888 * Fix an unnecessary interpolation * Change wording of microversion bump about 503 * Validate subs in api samples base class to improve error handling * Add a column for uuid to aggregate\_hosts * Hyper-V: Removes pointless check in livemigrationops * XenAPI: Fix VIF plug and unplug problem * Update ComputeNode values with disk allocation ratios in the RT * Update HostManager and DiskFilter to use ComputeNode disk ratio * Add disk\_allocation\_ratio to ComputeNode * config options: Centralise 'virt.disk' options * config options: Centralise 'virt.netutils' options * Improve 'virt.firewall' conf options documentation * config options: Centralise 'virt.firewall' options * Improve 'virt.images' conf options documentation * config options: Centralise 'virt.images' options * Update wrong comment * Fix misuse of assertTrue in console and virt tests * Failed migration shoudn't be reported as in progress * Fix missing of unit in debug info * always use python2.7 for pep8 * servicegroup: remove the zookeeper driver * Hacking: check for deprecated os.popen() * Log successful reverts\_task\_state calls * Hyper-V: os\_win related updates * Partial revert of ec2 removal patch * Fixed leaked UnexpectedMethodCallErrors in test\_compute * Unshelve using the RequestSpec object * Provide ReqSpec to live-migrate conductor task * Fix cell capacity when compute nodes are down * Fix misleading test name * Default "discoverable" policies to "@" * build smaller name regexes for validation * Add reno for block live migraton with cinder volumes * Remove support for integer ids in compute\_api.get * Add annotation to the kill() method * Add missing os types: suseGuest64/suseGuest * Hypervisor support matrix: add feature "trigger crash dump" * Update example policy.json to remove "" policies * Fixed arguement order in remove\_volume\_connection * Add better help text to scheduler options (7) * Add better help text to scheduler options (6) * RT: Decrese usage for offloaded instances * Allow saving empty pci\_device\_pools in ComputeNode object * Add StableObjectJsonFixture and use it in our base test class * nova-manage: Add hooks for running data-migration scripts * always use pip constraints * Update instance host in post live migration even when exception occurs * Use imageutils from oslo.utils * Remove duplicate key from dictionary * reset task\_state after select\_destinations failed * Pass bdm info to \_get\_instance\_disk\_info method * Fix create snapshot failure on VMs with SRIOV * Reorder name normalization for DNS * Allocate UUID for compute node * rpc.init() is being called twice per test * Use instance hostname for Neutron DNS unit tests * objects: Rename PciDevice \_migrate\_parent\_addr method * Use assertRaises() to check specific exception * libvirt: make live\_migration\_uri flag dependent on virt\_type * Remove unused CONF imports * Add /usr/local/{sbin,bin} to rootwrap exec\_dirs * write live migration progress detail to DB in migration monitor * Add migration progress detail in DB * Tolerate installation of pycryptodome * neutron: handle attach interface case with no networks * Move Disk allocation ratio to ResourceTracker * Updated from global requirements * HyperV: Fix vm disk path issue * Removal of unnecessary \`import\_opt\`s for cells config options * Fix 500 error for showing deleted flavor details * Fix \_compare\_result type handling comparison * neutron: remove redundant request.network\_id assignment * Fix reported ppc64le bug on video selection * Improve 'virt.driver' conf options documentation * Improve unit tests for instance multiple create * Change populate\_security\_groups to return a SecurityGroupList * Fix error message in imagebackend * config options: Centralise 'virt.driver' options * Avoid lazy-loading flavor during usage audit * resource\_providers, allocations and inventories models * Revert "Add new test\_rebuild\_instance\_with\_volume to cells exclude list" * Update the CONF import path for VNC * Improve 'vnc' conf options documentation * Remove discoverable policy from server:migrations resource * Improve the help text for cells options (6) * Improve the help text for cells options (5) * Improve the help text for cells options (4) * Improve the help text for cells options (3) * Improve the help text for cells options (2) * Allow block live migration of an instance with attached volumes * Implement an indexed ResourceClass Enum object * Add check to limit maximum value of max\_rows * Fix spelling mistake * Add methods for RequestContext to switch db connection * virt: osinfo will report once if libosinfo is not loaded * Replace eventlet-based raw socket client with requests * Add a tool for reserving migration placeholders during release time * libvirt: check for interface when detach\_interface fails * libvirt: implement LibvirtConfigGuestInterface.parse\_dom * Filter APIs out from services list * Config options: centralize options in conductor api * Improve the help text for cells options (1) * VMware: add release notes for the limits * Get a ReqSpec in evacuate API and pass it to scheduler * Fixes cells py3 unit tests * Fixes network py3 unit tests * Fixes Python 3 unit tests for nova.compute * Add new test\_rebuild\_instance\_with\_volume to cells exclude list * Add some obvious detail to nw\_info warning log * Fix fallocate test on newer util-linux * Remove \_create\_local function * Trivial logic cleanup in libvirt pre\_live\_migration * Return HTTP 400 for invalid server-group uuid * Properly inject network\_data.json in configdrive * enginefacade: remove 'get\_session' and 'get\_api\_session' * enginefacade: 'request\_spec' object * Add new API to force live migration to complete * Add new DB API method to retrieve migration for instance * Imported Translations from Zanata * Updated from global requirements * Sync L3Driver, NullL3 interface with LinuxNetL3 * Top 100 slow tests: api.openstack.compute.test\_api * Top 100 slow tests: api.openstack.compute.test\_versions * Top 100 slow tests: legacy\_v2.test\_servers * Top 100 slow tests: api.openstack.compute.test\_flavor\* * Top 100 slow tests: api.openstack.compute.test\_image\_size * Top 100 slow tests: api.openstack.compute.test\_volumes * Confusing typo fixed * doc: all\_tenants query option incorrectly identified as non-admin * Update driver support matrix for Ironic * parametrize max\_api\_version in tests * libvirt: Race condition leads to instance in error * Avoid lazy-loads in metadata requests * Join flavor when re-querying instance for floating ip association * Allow all api\_samples tests to be run individually * Make os-instance-action read deleted instances * enginefacade: 'flavor' * Updated from global requirements * Use instance hostname for Neutron DNS * libvirt: Make behavior of os\_require\_quiesce consistent * Split-network-plane-for-live-migration * Database not needed for most cells messaging tests * libvirt: use osinfo when configuring the disk bus * libvirt: use osinfo when configuring network model * Database not needed for test class: ConsoleAPITestCase * Database not needed for test class: ConductorImportTest * virt: adjusting the osinfo tests to use fakelibosinfo * Database not needed for RPC serializer tests * Database not needed for most crypto tests * Database not needed for most nova manage tests * ebtables/libvirt workaround * Test that new tables don't use soft deletes * Use instance in setup\_networks\_on\_host * enginefacade: test\_db\_api cleanup, missed decorators * Database not needed for test class: PciGetInstanceDevs * Add test coverage to functional api tests \_compare\_result method * Remove and deprecate conductor provider\_fw\_rule\_get\_all() * Remove prelude from disk-weight-sch reno * Enable volume operations for shelved instances * Gracefully handle a deleting instance during rebuild * remove the unnecessary parem of set\_vm\_state\_and\_notify * tests: adding fake libosinfo module * config options: Centralise 'vnc' options * config options: Make noVNC proxy into vnc group * Improve 'pci' conf options documentation * config options: centralize section "wsgi" * libvirt: deprecate live/block\_migration\_flag opts * Tidy up scheduler\_evolution.rst * config options: add hacking check for help text length * xrange() is renamed to range() in Python 3 * Do not use "file" builtin, but "open" instead * Fix some word spellings in messages * No need to have ironicclient parameter in methods * Add a TODO to make ComputeNode.cpu\_info non-nullable * Fix missing marker functions in nova/pci * Adding volume operations for shelved instances * Optimize Instance.create() for optional extra fields * Optimize servers path by pre-joining numa\_topology * Trivial: Remove a duplicated word * Update the home-page * Add better help text to scheduler options (5) * Switch to oslo.cache lib * Remove all remaining references to Quantum * doc: remove detail about extensions * Add description for trigger crash dump * Object: Give more helpful error message in TestServiceVersion * Spread allocations of fixed ips * Updated from global requirements * Stop using mox (scheduler) * Fix xvpvncproxy config path when running n-xvnc * Optimize the instance fetched by floating\_ips API * Improve efficiency of Migration.instance property * Prevent \_heal\_instance\_info\_cache() periodic lazy-loads * Revert "Added new scheduler filter: AggregateTypeExtraSpecsAffinityFilter" * Remove unused provider firewall rules functionality in nova * enginefacade: 'instance\_tags' * Apply scheduler limits to Exact\* filters * Fix typos in nova/scheduler and nova/virt * Replace exit() by sys.exit() * Trivial: Fix a typo in test\_policy.py * neutronv2: Allow Neutron to specify OVS/LB bridge * HyperV: do not log twice with different level * Replace stubs.Set with stub\_out (db) * Add a disk space weight-based scheduler * Fix up live-migration method docstrings * Libvirt: Support ovs fp plug in vhostuser vif * xenapi: simplify swap\_xapi\_host() * Allow sending the migrate data objects over the wire * Added new scheduler filter: AggregateTypeExtraSpecsAffinityFilter * Replace "all\_mappings" variable by "block\_device\_mappings" * Add better help text to scheduler options (4) * Migrate from keystoneclient to keystoneauth * fast exit dhcpbridge on 'old' * Ironic: Lightweight fetching of nodes * Fix RequestSpec \_from\_db\_object * doc:Ask reviews to reject new legacy notifications * Generate doc for versioned notifications * doc: add devref about versioned notifications * Adds json sample for the versioned notifications * relocate os\_compute\_api:servers:discoverable * libvirt: convert to use instance.image\_meta property * Updated from global requirements * doc: fix malformed api sample * Persist the request spec during an instance boot * Revise the compute\_upgrade\_levels\_auto release note * Adding guard on None value for some helpers method * Return HTTP 400 if volume size is not defined * API: Rearrange HTTPBadRequest raising in \_resize * remove the wrong param of fake\_db\_migration initiation * Enable all extension for server PUT API sample tests * Config options: centralize options in availability\_zones * We now require gettext for dev environments * Revert "Pass host when call attach to Cinder" * update feature support matrix documentation * Config options: centralize section "cells" * Use uuidsentinel in host\_status test * remove not used tpl * Return 409 instead of 503 when cidr conflict * releasenotes: Note on CPU thread pinning support * Use extra\_data\_func to get fingerprints of objects * Use stevedore for scheduler driver * Use stevedore for scheduler host manager * Enables conductor py3 unit tests * REST API changes for user settable server description * Use get\_notification\_transport() for notifications * Stop using stubs.Set in vmwareapi unit tests * Add tests for nova.rpc module * libvirt: check min required qemu/kvm versions on ppc64/ppc64le * VMware: Handle image size correctly for OVA and streamOptimized images * enginefacade: 'instance\_group' * enginefacade: 'floating\_ip' * enginefacade: 'compute\_node' * enginefacade: 'service' * Hyper-V: Trace original exception before converting exception * Fixed incorrect names/comments for API version 2.18 * Remove mox from tests/unit/objects/test\_keypair.py * API: Remove unexpected from errors get\_console\_output * Updated from global requirements * Fix docstrings for sphinx * Make project\_id optional in v2.1 urls * remove not used tpl file * Log retries at INFO level per guidelines * make logic clearer about template selection * Add ITRI DISCO os-brick connector for libvirt * Fix misleading comment of pci\_stats * cleanup: remove python 2.6 compat assertNotIsInstance * Add better help text to scheduler options (3) * (lxc) Updated regex to ignore failing tests * Add better help text to scheduler options (2) * Add better help text to scheduler options (1) * Note in HypervisorSupportMatrix for Libvirt/LXC shutdown kernel bug * Ceph for live-migration job * enginefacade: 'security\_group' * enginefacade: 'instance' * enginefacade: 'fixed\_ip' * enginefacade: 'quota' and 'reservation' * Python3: Replace dict.iteritems with six.iteritems * Updated from global requirements * Object: Fix wrong usage migrate\_data\_obj * \_can\_fallocate should throw a warning instead of error * VMware: no longer convert image meta from dict to object * cleanup: add comments about the pre/post extension processing * cleanup: remove custom serializer support * Add description for server query * remove docs about format extensions * Remove catching of ComputeHostNotFound exception * Return empty object list instead [] * cleanup: remove configurable action\_peek * libvirt: use native AIO mode for cinder volumes * libvirt: use native AIO mode for image backends * Issue an info log msg when port quota is exceeded * Validate translations * Imported Translations from Zanata 13.0.0.0b2 ---------- * doc: add client interactive guideline for microversions * doc: add version discovery guideline in api concept doc * doc: completes microversion use-cases in api concept doc * Fix indents of servers-detail-resp.json * libvirt: make snapshot use RBD snapshot/clone when available * Improve the help text for the cert options * cleanup: remove infrastructure for content/type deserializer * Pass host when call attach to Cinder * Pass attachment\_id to Cinder when detach a volume * libvirt: Fix/implement revert-resize for RBD-backed images * Added super() call in some of the Model's child * enginefacade: 'ec2\_instance' and 'instance\_fault' * cleanup: collapse wsgi serializer test hierarchy * Add service status notification * cleanup: remove wsgi serialize/deserialize decorators * enginefacade: 'block\_device\_mapping' * Fix invalid import order * Add a REST API to trigger crash dump in an instance * libvirt: adding a class to retrieve hardware properties * virt: introduce libosinfo library to set hardware policy * pci: changing the claiming and allocation logic for PF/VF assignment * pci: store context when creating pci devices * Make emitting versioned notifications configurable * Add infra for versioned notifications * Make sure that we always have a parent\_addr set * change set\_stubs to use stub\_out in vmwareapi/stubs.py * Add note to ComputeNode.numa\_topology * Reno for lock policy * Clean up nova/conf/scheduler.py * Reno for Xen rename * config options: Make xvp proxy into vnc group * XenAPI: Fix race on rotate\_xen\_guest\_logs * Add exception handling in \_cleanup\_allocated\_network * hardware: check whether realtime capable in API * Remove releasenotes/build between releasenotes runs * Add python3\* packages to development quickstart guide * Make sure full stack trace is logged on RT update failure * Changed filter\_by() to filter() during filtering instances in db API * config options: Centralise PCI options * HyperV: Set disk serial number for attached volumes * Use "regex" of StrOpt to check option "port\_range" * enable uefi boot * VMware: convert to use instance.image\_meta property * Config drive: convert to use instance.image\_meta property * Use of six.PY3 should be forward compatible * Add host\_status attribute for servers/detail and servers/{server\_id} * Revert "Workaround reno reverts by accepting warnings" * Adds relase notes for soft affinity feature * libvirt: handle migrate\_data as object in cleanup method * Create filter\_properties earlier in boot request * Parse availability\_zone in API * Add object and database support for host\_status API * Workaround reno reverts by accepting warnings * ports & networks gather should validate existance * objects: add virtual 'image\_meta' property to Instance object * compute: convert manager to use nova.objects.ImageMeta * Replace stubs.Set with stub\_out (os) * Fix Mock assert\_called\_once\_with() usage * ServerGroupsV213SampleJsonTest should actually test v2.13 * Move config options from nova/cert directory * Remove dead code from reserve\_block\_device\_name rpcapi * Adapt the code to the new get\_by\_volume BDM functions * Fix undetected races when getting BDMs by volume id * Fix instance not destroyed after successful evacuation * Use TimeFixture from oslo\_utils in functional tests * Fix indexing of dict.keys() in python3 * libvirt: add a new live\_migration\_tunnelled config * libvirt: force config related migration flags * libvirt: force use of direct vs p2p migration * libvirt: force use/non-use of NON\_SHARED\_INC flag * libvirt: parse live migration flags at startup * enginefacade: 'aggregate' * Add helper shim for getting items * hacking: check for common double word typos * Fix backing file detection in libvirt live snapshot * trivial: Add additional logs for NUMA scheduling * Add 'hw:cpu\_threads\_policy=isolate' scheduling * Replaces itertools.izip with six.moves.zip * Clean up network resources when reschedule fails * Replace stubs.Set with stub\_out (fakes) * Add maximum microversions for each releases * Remove "or 'reserved'" condition from reserve\_block\_device\_name * live-migration hook ansible 2.0 compaitability * update min tox version to 2.0 * pci: adding support to specify a device\_type in pci requests * Block flaky python34 test : vmwareapi.test\_configdrive.ConfigDriveTestCase * Actually pass the migration data object down to the virt drivers * nova conf single point of entry: fix error message * Fix sphinx warnings from signature\_utils * Sets binding:profile to empty dic when unbinding port * Use timedelta.total\_second instead of calculating * Use stub\_out and mock to remove mox:part 3 * Replaces \_\_builtin\_\_ with six.moves.builtins * Remove mm-ctl from network.filters * Add mm-ctl to compute.filters * Add reviewing point related to REST API * Stop using mox stubs in nova.tests.unit.console * pci: do not filter out any SRIOV Physical Functions * objects: update the old location parent\_addr only if it has value * Add xenapi support for XenapiLiveMigrateData objects * Fixes Hyper-V unit tests for latest os\_win release * Add 'hw:cpu\_thread\_policy=require' scheduling * add "hw\_firmware\_type" image metadata * Docstring change for consistency * Add tests for metadata functions * libvirt: fix TypeError calling \_live\_migration\_copy\_disk\_paths * Add DiskFormat as Enum in fields * Remove DictCompat from EC2 objects * Remove DictCompat from DNSDomain * Add description on how to run ./run\_test.sh -8 * Propagate qemu-img errors to compute manager * Change assertEqual(True/False) to assertTrue/False * objects: adding a parent\_addr field to the PciDevice object * Add caching of service\_min\_versions in the conductor * Scheduler: enforce max attempts at service startup * Fix unit tests on Mac OS X * Stop using mox stubs in nova.tests.unit.api.openstack.compute.test\_services * libvirt: add discard support for attached volumes * Remove DictCompat from CellMapping * Remove NovaObjectDictCompat from Aggregate * XenAPI: Cope with more Cinder backends * single point of entry for sample config generation * Remove Deprecated EC2 and ObjectStore impl/tests * libvirt: add realtime support * Imported Translations from Zanata * libvirt: update to min required version to 0.10.2 * Remove null AZ tests from API tests * Replace stubs.Set with stub\_out (functional tests) * Updated from global requirements * doc: minor corrections to the API version docco * Refactor \_load\_support\_matrix * Fix format conversion in libvirt snapshot * Fix format detection in libvirt snapshot * api: add soft-affinity policies for server groups * scheduler: fill RequestSpec.instance\_group.members * scheduler: add soft-(anti-)affinity weighers * Implements proper UUID format for compute/test\_stats\* * Add image signature verification * Convert nova.tests.unit.image.fake.stub\_out\_image\_service to use stub\_out * Block more flaky py34 tests * Replace deprecated library function os.popen() with subprocess * Remove mox and Stubs from tests/unit/pci/test\_manager.py * Correct the code description * Fix advice for new contribs * libvirt: better error for bad live migration flag * Add argument to support-matrix sphinx extension * Wrong URL reported by the run\_tests.sh message * Make use of 'InstanceNUMACell.cpu\_policy' field * Add 'cpu\_policy' and 'cpu\_thread\_policy' fields * Add 'CPUThreadAllocationPolicy' enum field * Blacklist flaky tests and add warning * Modify Scheduler RPC API to use RequestSpec obj * Implements proper UUID format for test\_compute\_mgr * Remove get\_lock method and policy action * libvirt: sort block\_device\_list in volume\_in\_mapping log * Stop explicitly running test discovery for py34 * introduce \`\`stub\_out\`\` method to base test class * Cleanup NovaObjectDictCompat from security\_group\_rule * Remove useless header not need microversion * Implements proper UUID format for test\_compute * Move Process and Mentoring pages to devref * Document restrictions for working on cells v1 * api-guide: add a doc on users * Assignment (from method with no return) removed * remove use of \_get\_regexes in samples tests * Improve 'virt' conf options documentation * config options: Centralise 'virt.hardware' options * Get list of disks to copy early to avoid multiple DB hits * Remove non-unicode bind param warnings * Fix typo, ReST -> REST * Wrong spelling of defined * libvirt: fix typo in test\_init\_host\_migration\_flags * docs: update refs to mitaka release schedule * doc: add how to arrange order of scheduler filters * libvirt: only get instance.flavor if needed in get\_disk\_mapping * Replace backtick with apostrophe in lazy-loading debug log * libvirt: fix TypeError in find\_disk\_dev\_for\_disk\_bus * Fix RPC revision log entry for 4.6 * signature\_utils: move to explicit image metadata * Unreference mocks are listed in the wrong order * remove API v1.1 from testing * remove /v1.1 from default paste.ini * libvirt: verify cpu bw policy capability for host * Implements proper UUID format for test\_compute\_cells and test\_compute\_utils * Add the missing return value in the comment * Updated from global requirements * xen: block BootableTestCase from py34 testing * Modify conductor to use RequestSpec object * db: querry to retrieve all pci device by parent address * db: adding columns to PciDevice table * Replace except Exception with specific exception * pci: minor fix to exception message format * Python 3 deprecated the logger.warn method in favor of warning * Check added for mandatory parameter size in schema * Remove redundant driver initialization in test * enginefacade: 'instance\_metadata' * Misspelling in messages * Add lock to host-state consumption * Add lock to scheduler host state updating * Allow virt driver to define binding:host\_id * [python3] Webob request body should be bytes * Replace copy.deepcopy of RequestContext with copy.copy * DriverBlockDevice must receive a BDM object, not a dict * Misspelling in message * Wrong usage of "a" * Remove unused logging import and LOG global var * Reduce the number of db/rpc calls to get instance rules * Use is\_supported() to check microversion * SameHostFilter should fail if host does not have instances * VMware: add method for getting hosts attached to datastore * Trivial: Fix wrong comment in service version * signature\_utils: handle ECC curve unavailability * Updated from global requirements * tests: Remove duplicate check * enginefacade: 'bw\_usage', 'vol\_usage' and 's3\_image' * VMware: improve instance names on VC * VMware: add in folder support on VC * VMware: cleanup unit test global variable * signature\_utils: refactor the list of ECC curves * Nuke EC2 API from api-paste and remove wsgi support * Remove cruft for things o.vo handles * Make scheduler\_hints schema allow list of id * Change logging level for 'oslo\_db' * Remove unused compute\_api in ServerUsageController * network: Don't repopulate instance info cache from Neutron ports * Fix doc comment for get\_available\_resource * objects: lazy-load instance.security\_groups more efficiently * VMware: cleanup unit tests * Use SpawnIsSynchronousFixture in most unit tests * Use stub\_out and mock to remove mox: part 1 * Disable the in tree EC2 API by default * deprecate old glance config options * remove temporary GlanceEndpoint object * convert GlanceClientWrapper to endpoint * Use stub\_out and mock to remove mox: part 2 * Add a compute API to trigger crash dump in instance * Make libvirt driver return migrate data objects for source and dest checks * Use TimeFixture from oslo\_utils to override time in tests * enginefacade: 'vif' and 'task\_log' * review guide: add location details for config options * libvirt: wrapper list\_guests to Host's object * remove vestigial XML\_NS\_V11 variable * remove unused EXTENSION\_DESERIALIZE\_\* constants * config options: Centralise 'virt.ironic' options * remove unused pipeline\_factory\_v3 alias * remove unused methods from integrated\_helpers test class * remove unused extends\_name attribute * Add upload/download vhd2 interfaces * Replace unicode with six.text\_type * conductor: fix unbound local variable request\_spec * Use just ids in all request templates for flavors/images * extract non instance methods * remove unused trigger\_handler * remove unused update\_dhcp\_hostfile\_with\_text method * remove nova-cert from most functional tests * enginefacade: 'migration' * XenAPI: Fix race in rotate\_xen\_guest\_logs * libvirt: introduce "pause" to Guest's object * libvirt: introduce "shutdown" to Guest's object * libvirt: introduce "snapshot" to Guest's object * libvirt: introduce thaw filesystems * libvirt: introduce freeze filesystems * libvirt: replace direct libvirt's call AbortJobBlock * Allow to update 'v2.1' links in sample files * Do not update links for 'versions' tests * centeralized conf:compute/emphemeral\_storage\_encryption * Add instance.save() when handling reboot in init instance * Add transitional support for migrate data objects to compute manager * Implements proper UUID format for few objects tests * Filter by leased=False when allocating fixed IPs * Increase informations in nova-net warnings * docs: add concept guide for certificate * Fix reclaim\_instance\_interval < 0 never delete instance completely * Updated from global requirements * Add placeholders for config options * Implements proper UUID format for the fake\_network * Refresh stale volume BDMs in terminate\_connection * Block requests 2.9.0 * Implements proper UUID format for the test\_compute\_api * Remove onSharedStorage from evacuate API * Fix CPU pinning for odd number of CPUs w hyperthreading * hardware: stop using instance cell topology in CPU pinning logic * Check context before returning cached value * deprecate run\_tests.sh * remove archaic references to XML in api * simplify the request / response format document * Add signature\_utils module * Remove XML description from extension concept * remove ctype from classes * Remove cells service from api samples that don't test cells * Add uuidsentinel test module * Remove the wrong usage of api\_major\_version in api sample tests * Updated from global requirements * Fix wrong method name in doc filter\_scheduler * doc: update threading.rst * Makes GET extension info sample tests run for v2 also * update api\_samples code to use better variables * Remove incorrect comments about file injection * Remove a restriction on injection files * Remove unnecessary log when search servers * Deprecated tox -downloadcache option removed * rework warning messages for extension whitelist/blacklist * Make sure bdm.volume\_id is set after auto-creating volumes * Replace safe\_utils.getcallargs with inspect.getcallargs * Fix wrap\_exception to get all arguments for payload * Add hypervisor, aggregates, migration description * retool xen glance plugin to work with urls * always create clients with GlanceEndpoint * Implement GlanceEndpoint object * Clean up glance url handling * Use RequestSpec in the ChanceScheduler * Modify left filters for RequestSpec * Modify NUMA, PCI and num\_instances filters for RequestSpec * Improve inject\_nmi() in libvirt driver and add tests * Report compute-api bugs against nova * XenAPI: Expose labels for ephemeral disks * Fix use of safeutils.getcallargs * Cache SecurityGroupAPI results from neutron multiplexer * Remove the executable bit from several python files * Optimize \_cleanup\_incomplete\_migrations periodic task * [Py34] api.openstack.compute.legacy\_v2.test\_servers.Base64ValidationTest * [Py34] api.openstack.test\_faults.TestFaultWrapper * [Py34] Enable api.openstack.test\_wsgi unit test * default host to service name instead of uuid * Remove start\_service calls from the test case * Add SIGHUP handlers for compute rpcapi to console and conductor * Cache the automatic version pin to avoid repeated lookups * virt: allow for direct mounting of LocalBlockImages * Use testscenarios to set attributes directly * update API samples to use endpoints * Updated from global requirements * Add project-id and user-id when list server-groups * Fixes Python 3 compatibility for filter results * Remove duplicate default=None for option compute\_available\_monitors * Disable IPv6 on bridge devices * Don't load deleted instances * Improve Filter Scheduler doc clarity * libvirt: report pci Type-PF type even when VFs are disabled * Remove deprecated neutron auth options * Fix capitalization of IP * Add separated section for configure guest os * Add separated section for extra specs and image properties * Add a note about fixing "db type could not be determined" with py34 * neutron: skip test\_deallocate\_for\_instance\_2\* in py34 job * tighten regex on objectify * Replace os.path.join() for URLs * Add hv testing for ImageMetaProps.\_legacy\_property\_map * Edit the text to be more native-English sounding * docs: add test strategy and feature classification * Fix the endpoint of /v2 on concept doc * Drop JSON decoding for supported\_instances * docs: update old stuff in version section * Scheduler: honor the glance metadata for hypervisor details * Implements proper UUID format for the ComputeAPITestCase * docs: add microversions description in the concept doc * Make admin consistent * Add more concepts for servers * Make "ReSTful service" consistent * Add retry logic for detaching device using LibVirt * Fix Exception message consistency with input protocol * Remove SQLite BigInteger/Integer translation logic * xen: Drop JSON for supported\_instances * vmware: Drop JSON for supported\_instances * ironic: Drop JSON for supported\_instances * hyperv: Drop JSON for supported\_instances * libvirt: Drop JSON for supported\_instances * Drop JSON for stats in virt API * Replaces izip\_longest with six.moves.zip\_longest * Fixes dict keys and items references for Python 3 * Scheduler: correct control flow when forcing host * Replaces longs with ints * neutron: only get port id when listing ports in validate\_networks * neutron: only list ports if there is a quota limit when validating * Add reviewing point related to REST API * Revert "Enable options for oslo.reports" * Fix wrong CPU metric value in metrics\_filter * Reset the compute\_rpcapi in Compute manager on SIGHUP * Remove the unused sginfo rootwrap filter * docs: ensure third party tests pass before +2 * Config options: centralize section "scheduler" * add api-samples tox target * Remove Instance object flavor helper methods only used in tests * Remove unnecessary extra instance saves during resize * docs: using the correct format and real world example for fault message * VMware: cleanup ExtraSpecs * Remove HTTPRequestEntityTooLarge usage in test * Enables py3 unit tests for libvirt.host module * Replaces \_\_builtin\_\_ with six.moves.builtins * Converting nova.virt.hyperv to py3 * Hyper-V: removes \*Utils modules and unit tests * docs: update services description for concept guide * docs: remove duplicated section about error handling * Remove Useless element in migrate\_server shcema * Optimize "open" method with context manager * trivial: Add some logs to 'numa\_topology\_filter' * Updated from global requirements * Docs: update the concept guide for Host topics * Cleanup of compute api reboot method * Hyper-V: adds os-win library * Remove description about image from faults section * api-guide: add note about users * Updated from global requirements * xenapi: Add helper function and unit tests for client session * Config options: centralize section "scheduler" * Ironic: Workaround to mitigate bug #1341420 * Libvirt: Support fp plug in vhostuser vif * Remove version from setup.cfg 13.0.0.0b1 ---------- * Add note for automatic determination of compute\_rpc version by service * Add note for Virtuozzo supporting snapshots * Add note for HyperV 2008 drop of support * Imported Translations from Zanata * Add note for removing conductor RPC API v2 * Add note for dropping InstanceV1 objects * Add note for force\_config\_drive opt change * Add note for deprecating local conductor * Revert "Detach volume after deleting instance with no host" * force releasenotes warnings to be treated as errors * Fix reno warning for API DB relnote * Adding a new vnic\_type for Ironic/Neutron/Nova integration * Use o.vo DictOfListOfStringsField * libvirt: remove todo note not useful anymore * Modify metric-related filters for RequestSpec * Modify default filters for RequestSpec * servicegroup: stop zombie service due to exception * Add persistence to the RequestSpec object * Updated from global requirements * add hacking check for config options location * Correct some nits for moving servers in concept doc * use graduated oslo.policy * TrivialFix: remove 'deleted' flag * Make server concept guide use 'server' consistently * api-guide: fix up navigation bar * Use version convert methods from oslo\_utils.versionutils * docs: reorder move servers text * docs: add clarifications to move servers * Change some wording on server\_concepts.rst * Cleanup unused test code in test\_scheduler.py * Modify Aggregate filters for RequestSpec * Add code-review devref for release notes * Hyper-V: refines the exceptions raised in the driver * Use o.vo FlexibleBooleanField * docs: describe migration and other movement concepts * Double 'an' in message * Unify on \_schedule\_instances * Add review guideline to microversion API * Remove the TestRemoteObject class * Catch FixedIpNotFoundForAddress when create server * doc: add server status to concept.rst * docs: update the concept guide shelve actions * Fixed incorrect name of 'tag' and 'tag-any' filters * Fix resource tracker VCPU counting * Add relnote for change in default setting * use NoDBTestCase for KeypairPolicyTest * doc: change policies.rst to indicate API links * Remove useless code in \_poll\_volume\_usage function * Neutron: add logging context * Remove unused param of CertificatesController * Add user data into general concept * Fix a typo in api-guide doc * Make some classes inherit from NoDBTestCase * XenAPI: Workaround for 6.5 iSCSI bug * NFS setup for live-migration job * Fix ebtables-version release note * config options: enhance help text of section "serial\_console" * Updating nova config-reference doc * Updated from global requirements * Prevent redundant instance.update notifications * VMware: fix docstring for cluster management * api: remove re-declared type in migrate schema * enginefacade: 'agent' and 'action' * config options: centralize section "serial\_console" * Replaced private field in get\_session/engine with public method * SR-IOV: Improve the vnic type check in the neutron api * Simplified boolean variable check * update connect\_volume test * Enable options for oslo.reports * Reverse sort tables before archiving * scheduler: fix incorrect log message * Updated from global requirements * Add release note for API DB migration requirements * Replaced deprecated timeutils methods * Multinode job for live-migration * Use o.vo VersionPredicateField * Use flavor instead of flavour * Corrected few grammatical nitpics * Add more 'actions' for server concepts doc * libvirt: mlnx\_direct vif type removal * xen: mask passwords in volume connection\_data dict * Updated from global requirements * Use --concurrent with ebtables * Removed extra spaces from double line strings * Change test function name to make more sense * Change Invalid exception to a specified exception * Add 'lxd' to the list of recognized hypervisors * Add microversions schema unit test for None * Clean up legacy multi-version test constructs * Fix Nova's indirection fixture override * Remove skips for resize tests from tempest-dsvm-cells-rc * Modify Affinity filter for RequestSpec * Prepare filters for using RequestSpec object * Use ServiceList object rather than direct db call * Add relnote for ERT deprecation * Remove IN-predicate warnings * docs: update the API faults concept guide * Deprecate nova-manage service subcommand * Double detach volume causes server fault * Use JSON format instead of json format * Network: add in missing translation * cells is a sad panda about scheduler hints * VMware: expand support for Opaque networks * Fix is\_volume\_backed\_instance() for unset image\_ref * Add \_LE to LOG.error statement in nova/service * Add service records for nova-api services * Added method is\_supported to check API microversions * enginefacade: 'host\_mapping' * Removes support for Hyper-V Server 2008 R2 * Fix the bug of "Error spelling of 'explicitely'" * Claims: fix log message * Fix paths for api-guide build * Remove flavors.get\_flavor() only used in tests * VMware: Raise DiskNotFound for missing disk device * Remove two unneeded db lookups during delete of a resizing instance * Fix pci\_stats logging in resource tracker * live-mig: Mark migration as failed on fail to schedule * Move the Migration set-status-if-exists pattern to a method * Don't track migrations in 'accepted' state * live-migrate: Change the status Migration is created with * compute: split check\_can\_live\_migrate\_destination * Replace N block\_device\_mapping queries with 1 * Add "unreleased" release notes page * Add reno for release notes management * XenAPI: Correct hypervisor type in Horizon's admin view * Fix typo in test\_post\_select\_populate * Rearranges to create new Compute API Guide * Added CORS support to Nova * Aggregate Extra Specs Filter should return if extra\_specs is empty * cells: skip 5 networking scenario tests that use floating IPs * force\_config\_drive: StrOpt -> BoolOpt * Updated from global requirements * Add test coverage for both types of not-found-ness in neutronclient for floating * Fix impotent \_poll\_shelved\_instances tests * Fix race in \_poll\_shelved\_instances task * Handle a NeutronClientException 404 Error for floating ips * Handle DB failures in servicegroup DB driver * Hook for live-migration job * Omit RescheduledException in instance\_fault.message * Remove duplicate server.kill on test shutdown * make the driver.Scheduler as abstract class * Fix a spelling mistake in the log * objects: remove remote\_object\_calls from \_BaseTestCase * Repair and rename test\_is\_volume\_backed\_instance\_no\_bdms() * Use ObjectVersionChecker fixture from oslo.versionedobjects * VMware: add in vif resource limitations * Untie subobject versions * Block oslo.messaging 2.8.0 * Split up test\_is\_volume\_backed\_instance() into five functions * Avoid the dual-naming confusion * enginefacade: 'provider\_fw', 'console\_pool' and 'console' * enginefacade: 'network' * clean up regex in tempest-dsvm-cells-rc * skip lock\_unlock\_server test for cells * ScalityVolume:fix how remote FS mount is detected * OpenStack typo * Remove duplicate keys in policy.json * Add missing policy rules * devref:Don't suggest decorate private method * VMware: use a constant for 'iscsi' * Config drive: make use of an instance object * Fix attibute error when cloning raw images in Ceph * Properly log BlockDeviceMappingList in \_create\_block\_device\_mapping * Exclude all BDM checks for cells * glance:add helper method to get client version * enginefacade: 'dnsdomain' and 'ec2' * enginefacade: 'certificate' and 'pci\_device' * enginefacade: 'key\_pair' and 'cell' * enginefacade: 'instance\_mapping' * enginefacade: 'cell\_mapping' * enginefacade: 'instance\_info' and 'instance\_extra' * Use EngineFacade from oslo\_db.enginefacade * VMware: fix trivial indentations * Remove flavors.get\_all\_flavors() only used in tests * Make lock policy default to admin or owner * libvirt:Fix a typo of test cases * Deprecate local conductor mode * Deprecate Extensible Resource Tracker * Change image to instance in comment * VMware: use oslo\_config new type PortOpt * Remove vcpu resource from extensible resource tracker * Add logging to snapshot\_volume\_backed method * Remove unnecessary destroy call from Ironic virt driver * cells: add debug logging to bdm\_update\_or\_create\_at\_top * Drop Instance v1.x support * Check prefix with startswith() instead of slicing * Add debug logging for when boot sequence is invalid in \_validate\_bdm * remove the redundant policy check for SecurityGroupsOutputController * virt: add constraint to handle realtime policy * libvirt: add cpu schedular priority config * libvirt: rework membacking config to support future features * Do not mask original spawn failure if shutdown\_instance fails * Point to cinder options in nova block alloc docs * Fix booting fail when unlimited project quota * Remove useless get\_instance\_faults() * Remove "Can't resolve label reference" warnings * Remove reservation\_id from the logs when a schedule fails * Use RequestSpec object in HostManager * Use RequestSpec object in the FilterScheduler * Add ppcle architectures to libvirt blockinfo * Deprecated: failIf * Imported Translations from Zanata * Remove obj\_relationships from objects * Delete dead test code * Add tempest-dsvm-lxc-rc * Mark set-admin-password as complete for libvirt in support matrix * Hypervisor support matrix: define pause & unpause * Revert "Implement online schema migrations" * Fix the os-extended-volumes key reference in the REST API history docs * Remove get\_all method from servicegroup API * Remove SoftDeleteMixin from NovaBase * libvirt: support snapshots with parallels virt\_type * Use oslo.config choices kwarg with StrOpt for servicegroup\_driver * Imported Translations from Zanata * Add -constraints sections for CI jobs * Add "vnc" option group for sample nova.conf file * Updated from global requirements * Expands python34 unit tests list * Fix missing obj\_make\_compatible() for ImageMetaProps object * Fix error handling in nova.cmd.baseproxy * Change 'ec2-api' stackforge url to openstack url * Fixes Python 3 str issue in ConfigDrive creation * Revert "Store correct VirtCPUTopology" * Enable all extension for image API sample tests * Add tags to .gitignore * Updated from global requirements * Add a nova functional test for the os-server-groups GET API with all\_projects parameter * Image meta: treat legacy vmware adapter type values * Attempt rollback live migrate at dest even if network dealloc fails * hacking check for contextlib.nested for py34 support * Print number of rows archived per table in db archive\_deleted\_rows * Updated from global requirements * Fix more inconsistency between Nova-Net and Neutron * Fix metadata service security-groups when using Neutron * Remove redundant deps in tox.ini * Add some tests for map\_dev * Clean up tests for dropping obj\_relationships * Fix up Service object for manifest-based backports * Fix service\_version minimum calculation for compute RPC * docs: add the scheduler evolution plans * Revert "virt: Use preexec\_fn to ulimit qemu-img info call" * Updated from global requirements * Ensure Glance image 'size' attribute is 0, not 'None' * Ignore errorcode=4 when executing \`cryptsetup remove\` command * libvirt: Don't attempt to convert initrd images * Revert "Fixes Python 3 str issue in ConfigDrive creation" * Monkey patch nova-ec2 api * Compute: remove unused parameter 12.0.0 ------ * Omnibus stable/liberty fix * Drop outdated sqlite downgrade script * Updated from global requirements * Fix Status-Line in HTTP response * Imported Translations from Zanata * Default ConvertedException code to 500 * Updated from global requirements * VMware: fix bug for config drive when inventory folder is used * Fix a typo * code-review guidelines: add checklist for config options * Add a code-review guideline document * virt: Use preexec\_fn to ulimit qemu-img info call * Clean up some Instancev1 stuff in the tests * Updated from global requirements * Replaces contextlib.nested with test.nested * Sync cliutils from oslo-incubator * Make archive\_deleted\_rows\_for\_table private 12.0.0.0rc2 ----------- * load consoleauth\_topic option before using it * Revert "[libvirt] Move cleanup of imported files to imagebackend" * Add more documentation for RetryFilter * Fix InstanceV1 backports to use context * Imported Translations from Zanata * Add test of claim context manager abort * Log DBReferenceError in archive\_deleted\_rows\_for\_table * Use DBReferenceError in archive\_deleted\_rows\_for\_table * Add testresources used by oslo.db fixture * Remove unused context parameter from db.archive\_deleted\_rows\* methods * xenapi\_device\_id integer, expected string * Fix InstanceV1 backports to use context * Drop unused obj\_to\_primitive() override * Updated from global requirements * libvirt: remove unnecessary else in blockinfo.get\_root\_info * Make test cases in test\_test.py use NoDBTest * XenAPI: Fix unit tests for python34 * docs: re-organise the API concept docs * VMware: specify chunk size when reading image data * Make ConsoleauthTestCase inherit from NoDBTest * Change a test class of consoleauth to no db test * Imported Translations from Zanata * Catch 3 InvalidBDM related exc when boot instance * Move create vm states to svg diagram * Ironic: Fix bad capacity reporting if instance\_info is unset * Revert "[libvirt] Move cleanup of imported files to imagebackend" * Honor until\_refresh config when creating default security group * remove sphinxcontrib-seqdiag * [Py34] nova.tests.unit.api.openstack.test\_common * [Py34] Enable api.openstack.test\_mapper unit test * [Py34] Enable test\_legacy\_v2\_compatible\_wrapper * Extend the ServiceTooOld exception with more data * Make service create/update fail if version is too old * Allow automatic determination of compute\_rpc version by service * Add get\_minimum\_version() to Service object and DB API * Correct memory validation for live migration * devref: change error messages no need microversion * Replace f.func\_name and f.func\_code with f.\_\_name\_\_ and f.\_\_code\_\_ * Imported Translations from Zanata * Add a note about the 500->404 not requiring a microversion * Ensure Nova metrics derived from a set of metrics * Updated from global requirements * Fixes Python 3 str issue in ConfigDrive creation * Make secgroup rules refresh with refresh\_instance\_security\_rules() * Remove unused refresh\_security\_group\_members() call * Imported Translations from Zanata * Check DBReferenceError foreign key in Instance.save * Fix Instance unit test for DBReferenceError * Ironic: Fix bad capacity reporting if instance\_info is unset * libvirt: check if ImageMeta.disk\_format is set before accessing it * libvirt: check if ImageMeta.disk\_format is set before accessing it * Rollback is needed if initialize\_connection times out * Updated from global requirements * Add Pillow to test-requirements.txt * VMware: raise NotImplementedError for live migration methods * xapi-tools: fixes cache cleaner script * Cleanup of Translations * Add Pillow to test-requirements.txt * Update rpc version aliases for liberty * Remove orphaned code related to extended\_volumes * Add checkpoint logging when terminating an instance * Add checkpoint logging when building an instance in compute manager * Removed unused method from compute/rpcapi * Remove unused read-only cell code * Change warn to debug logs when migration context is missing * Use os-testr for py34 tox target * Add sample config file to nova docs * Remove lazy-loading property compute\_task\_api from compute api * Remove conductor 2.x RPC API * Reserve 10 migrations for backports * Use StrOpt's parameter choices to restritct option auth\_strategy * vmware: set default value in fake \_db\_content when creating objects * Avoid needless list copy in 'scheduler\_host\_subset\_size' case * libvirt: Log warning for wrong migration flag config options * Slightly better translation friendly formatting * Identify more py34 tests that already pass * rebuild: Apply migration context before calling the driver * hardware: improve parse\_cpu\_spec to handle exclusion range * Correct Instance type check to work with InstanceV1 * Imported Translations from Zanata * Correct Instance type check to work with InstanceV1 * Only create volumes with instance.az if cinder.cross\_az\_attach is False * Fix the help text of monkey\_patch config param * Rollback of live-migration fails with the NFS driver * Set TrustedFilter as experimental * doc: gmr: Update instructions to generate GMR error reports * rebuild: Apply migration context before calling the driver * Fix MetricWeigher to use MonitorMetricList * VMware: update log to be warning * Add more help text to the cinder.cross\_az\_attach option * Cleanup of Translations * Revert "Deprecate cinder.cross\_az\_attach option" * Fix some spelling typo in manual * Fix NoneType error when calling MetricsWeigher * wsgi: removing semicolon * Fix logging\_sample.conf to use oslo\_log formatter * Remove unused \_check\_string\_length() * Deprecate cinder.cross\_az\_attach option * Neutron: update cells when saving info\_cache * Fix MetricWeigher to use MonitorMetricList 12.0.0.0rc1 ----------- * Imported Translations from Zanata * Detach volume after deleting instance with no host * Remove unnecessary call to info\_cache.delete * Filter leading/trailing spaces for name field in v2.1 compat mode * Give instance default hostname if hostname is empty * If rescue failed set instance to ERROR * Add some devref for AZs * Change parameter name in utility function * RT: track evacuation migrations * rebuild: RPC sends additional args and claims are done * Cells: Limit instances pulled in \_heal\_instances * Open Mitaka development * Fix order of arguments in assertEqual * devref: update the nova architecture doc * Imported Translations from Zanata * Fix quota update in init\_instance on nova-compute restart * net: explicitly set mac on linux bridge * live-migration: Logs exception if operation failed * libvirt: add unit tests for the designer utility methods * Add test cases for some classes in objects.fields * Change ignore-errors to ignore\_errors * libvirt: fix direct OVS plugging * claims: move a debug msg to a warn on missing migration * Fix order of arguments in assertEqual * Remove duplicate VALID\_NAME\_REGEX * Pep8 didn't check api/openstack/common.py * Updated from global requirements * libvirt: Add unit tests for methods * Devref: Document why conductor has a task api/manager * Imported Translations from Zanata * Fix nova configuration options description * libvirt:on snapshot delete, use qemu-img to blockRebase if VM is stopped * Allow filtering using unicode characters * Updated from global requirements * Imported Translations from Zanata * Test both NoAuthMiddleware and NoAuthMiddlewareV3 * Remove redundant variable 'context' * Add 'OS-EXT-VIF-NET:net\_id' for v21 compatible mode * libvirt: Add NUMA cell count to cpu\_info * Xenapi: Don't access image\_meta.id when booting from a volume * Imported Translations from Zanata * Fix typo in HACKING.rst * Remove comment in wrong place * Fix string formatting in api/metadata/vendordata\_json.py * Raise exception.Migration earlier in REST API layer * Remove "shelved\_image\_id" key from instance system metadata * Only set access\_ip\_\* when instance goes ACTIVE * VMware: fix typo in comment * RT: Migration resource tracking uses migration context * compute: migrate/resize paths properly handle stashed numa\_topology * Claims: Make sure move claims create a migration context records * libvirt:update live\_migration\_monitor to use Guest * VMware: create method for getting datacenter from datastore * User APIRouterV21 instead of APIRouterV3 for v2.1 unittests * Remove TestOpenStackClientV3 from nova functional tests * Rename all the ViewBuilderV3 to ViewBuilderV21 * libvirt: Split out resize\_image logic from create\_image * Reuse method to convert key to passphrase * Creating instance fail when inject ssh key in cells mode * Fix the usage output of the nova-idmapshift command * Make test\_revoke\_cert\_project\_not\_found\_chdir\_fails deterministic * Reduce the number of Instance.get\_by\_uuid calls * Remove 'v3' from comments in Nova API code * xapi: cleanup volume sr on live migration rollback * Hyper-V: Implements attach\_interface and detach\_interface method * Remove unnecessary 'context' param from quotas reserve method call * VMware: Replace get\_dynamic\_properties with get\_object\_properties\_dict * VMware: Replace get\_dynamic\_property with get\_object\_property * Return empty PciDevicePoolList obj instead of None * libvirt: add debug logging for lxc teardown paths * Add API schema for different\_cell filter * Add microversion bump exception for scheduler-hint * Use six.text\_type instead of str in serialize\_args * Set vif and allocated when associating fixed ip * Fix ScaleIO commands in rootwrap filters * Add missing information to docstring * Add microversion rule when adding attr to request * Check unknown event name when create external server event * Don't expect meta attributes in object\_compat that aren't in the db obj * CONF.allow\_resize\_to\_same\_host should check only once in controller * Updated from global requirements * Fix debug log format in object\_backport\_versions() * Add version 3.0 of conductor RPC interface * Remove and deprecate conductor object\_backport() * Invalidate AZ cache when the instance AZ information is different * Consolidate code to get the correct availability zone of an instance * Fix order of arguments in assertEqual * Ironic: Call unprovison for nodes in DEPLOYING state * libvirt: use guest as parameter for get serial ports * Separate API schemas for v2.0 compatible API * api: allow any scheduler hints * API: Handle InstanceUnknownCell exceptions * Updated from global requirements * Add some explanation for the instance AZ field * Remove 'v3' from extension code * Remove more 'v3' references from the code * Sorting and pagination params used as filters * Freeze v1 Instance and InstanceList schema hashes * Imported Translations from Transifex * Remove unused parameter overwrite in elevated * Add missing delete policies in the sample file * Fix a few typos * ironic: convert driver to use nova.objects.ImageMeta * objects: convert config drive to use ImageMeta object * VMware: ensure that instance is deleted when volume is missing * libvirt:Rsync compression removed * xenapi: Support extra tgz images that with only a single VHD * Hyper-V: Fixes snapshoting inexistent VM issue * Hyper-V: Adds RDPConsoleOps unit tests * Rectify spelling mistake in nova * libvirt: Add a finish log * Remove old unused baremetal rootwrap filters * Relax restrictions on server name * network\_request\_obj: Clean up outdated code * Object: Fix KeyError when loading instance from db * Add os-brick's scsi\_id command to rootwrap * Expose keystoneclient's session and auth plugin loading parameters * Remove and deprecate conductor compute\_node\_create() * Drop unused conductor manager vol\_usage\_update() mock * Add constraint target to tox.ini * nova-net: fix missing log variable in deallocate\_fixed\_ip * Provide working SQLA\_VERSION attribute * Don't "lock" the DB on expand dry run * New sensible network bandwidth quota values in Nova tests * Fix Cells gate test by modifying the regressions regex * Add functional test for server group * Reject the cell name include '!', '.' and '@' for Nova API * Hyper-V: Adds HyperVDriver unit tests * claims: Remove compat code with instance dicts * Add Instance and InstanceList v2.0 objects * Teach conductor to do manifest-based object\_class\_action() things * Make the conductor fixture use version manifests * Update objects test infrastructure for multiple versions * Refactor Instance tests to use objects.Instance * Fix an issue with NovaObjectRegistry hook * Pull out the common bits of InstanceList into \_BaseInstanceList * Pull out the common bits of Instance into \_BaseInstance * Clarify max\_local\_block\_devices config option usage * Allow to use autodetection of volume device path * Remove the blacklisted nova-cells shelve tests * Update from global requirements * objects: Hook migration object into Instance * Fix order of arguments in assertEqual * Fix order of arguments in assertEqual * Detach and terminate conn if Cinder attach fails * [libvirt] Move cleanup of imported files to imagebackend * hyperv: convert driver to use nova.objects.ImageMeta 12.0.0.0b3 ---------- * Add notes explaining vmware's suds usage * Adds instance\_uuid index for instance\_system\_metadata * Handle nova-compute failure during a soft reboot * Fix mistake in UT:test\_detach\_unattached\_volume * Fix RequestSpec.instance\_group hydration * Remove unused root\_metadata method of BlockDeviceMappingList * Add JSON-Schema note to api\_plugins.rst * Compute: update finish\_revert\_resize log to have some context * Revert "Remove references to suds" * Fix API directories on the doc * Fix incomplete error message of quota exceeded * Add secgroup param checks for Neutron * Implement manifest-based backports * Delete orphaned instance files from compute nodes * Fixed incorrect keys in cpu\_pinning * api: deprecate the api v2 extension configuration * Remove the v3 word from help message of api\_rate\_limit option * Use the same pci\_requests field for all filters and HostManager * objects: Add MigrationContext object * Don't query database with an empty list of tags for creation * Remove duplicate NullHandler test fixture * Add migration policy to upgrades devref * Add warning log when deprecated v2 and v3 code get used * Update ComputeNode values with allocation ratios in the RT * Update HostManager and filters to use ComputeNode ratios * Add cpu\_allocation\_ratio and ram\_allocation\_ratio to ComputeNode * VMware: adds support for rescue image * filter pre\_assigned\_dev\_names when finding disk dev * Fix order of arguments in assertEqual * Fix order of arguments in assertEqual * Fix order of arguments in assertEqual * rt: Rewrite abort and update\_usage tests * Cleanup RT \_instance\_in\_resize\_state() * Compute: be consistent with logs about NotImplemented methods * VMware: pass network info to config drive * Remove/deprecate conductor instance\_update() * Make compute manager instance updates use objects * xenapi: add necessary timeout check * Fix permission issue of server group API * Make query to quota usage table order preserved * Change v3 to v21 for devref api\_plugins.rst * Remove duplicate exception * Don't trace on InstanceInfoCacheNotFound when refreshing network info\_cache * Cells: Improve block device mapping update/create calls * Rm openstack/common/versionutils from setup.cfg * Add a warning in the microversion docs around the usage of 'latest' * Fix exception message mistake in WSGI service * Replace "vol" variable by "bdm" * Remove v3 references in unit test 'contrib' * Removed unused dependency: discover * Rename tests so that they are run * Adds unit tests to test\_common.py * db: Add the migration\_context to the instance\_extra table * tests: Make test\_claims use Instance object * api: use v2.1 only in api-paste.ini * VMware: Update to return the correct ESX iqn * Pass block\_device\_info when delete an encrypted lvm * Handle neutron exception on bad floating ip create request * API: remove unused parameter * Consider that all scheduler calls are IO Ops * Add RequestSpec methods for primitiving into dicts * Add a note about the 400 response not requiring a microversion * api: deprecate the concept of extensions in v2.1 * Fix precedence of image bdms over image mappings * Cells: remove redundant check if cells are enabled * Strip the extra properties out when using legacy v2 compatible middleware * Remove unused sample files from /doc dir * Expose VIF net-id attribute in os-virtual-interfaces * libvirt: take account of disks in migration data size * Add deprecated\_for\_removal parm for deprecated neutron\_ops * Use compatibility methods from oslo * compute: Split the rebuild\_instance method * Allow for migration object to be passed to \_move\_claim * rt: move filtering of migration by type lower in the call stack * rt: generalize claim code to be useful for other move actions * libvirt: make guest to return power state * libvirt: move domain info to guest * Xen: import migrated ephemeral disk based on previous size * cleanup NovaObjectDictCompat from external\_event * cleanup NovaObjectDictCompat from agent * Catch invalid id input in service\_delete * Convert percent metrics back into the [0, 1] range * Cleanup for merging v2 and v2.1 functional tests * Remove doc/source/api and doc/build before building docs * Fixes a typo on nova.tests.unit.api.ec2.test\_api.py * Add a note about the 403 response not requiring a microversion * Pre-load expected attrs that the view builder needs for server details * Remove 'Retry-After' in server create and resize * Remove debug log message in SG API constructor * Updated from global requirements * Refactor test cases for live-migrate error case * Fixes Bug "destroy\_vm fails with HyperVException" * libvirt: refactor \_create\_domain\_setup\_lxc to use Image.get\_model * Set task\_state=None when booting instance failed * libvirt: Fix snapshot delete for network disk type for blockRebase op * [Ironic]Not count available resources of deployed ironic node * Catch OverQuota in volume create function * Don't allow instance to overcommit against itself * n-net: add more debug logging to release\_fixed\_ip * Fix scheduler code to use monitor metric objects * objects: add missing enum values to DiskBus field * Move objects registration in tests directory * xenapi: convert driver to use nova.objects.ImageMeta * libvirt: convert driver to use nova.objects.ImageMeta * Updated from global requirements * VMware: Delete vmdk UUID during volume detach * Move common sample files methods in test base class * Share server POST sample file for microversion too * Fix remote\_consoles microversion 2.8 not to run on /v3 * Remove merged sample tests and file for v2 tests * Move "versions" functional tests in v2.1 tests * Nil out inst.host and inst.node when build fails * Fix link's href to consider osapi\_compute\_link\_prefix * Fix abnormal quota usage after restore by admin * Specify current directory using new cwd param in processutils.execute * Remove and deprecate unused conductor method vol\_usage\_update() * Replace conductor proxying calls with the new VolumeUsage object * Add a VolumeUsage object * Updated from global requirements * Move CPU and RAM allocation ratios to ResourceTracker * Pull the all\_tenants search\_opts checking code into a common utility * Gate on nova.conf.sample generation * libvirt: use proper disk\_info in \_hard\_reboot * Update obj\_reset\_changes signatures to match * libvirt: only get bdm in \_create\_domain\_setup\_lxc if booted from volume * libvirt: \_create\_domain\_setup\_lxc needs to default disk mapping as a dict * libvirt: add docstring for \_get\_instance\_disk\_info * Add rootwrap daemon mode support * Removed duplicated keys in dictionary * Xenapi: Correct misaligned partitioning * libvirt:Remove duplicated check code for config option sysinfo\_serial * Test cases for better handling of SSH key comments * Allow compute monitors in different namespaces * cleanup NovaObjectDictCompat from hv\_spec * cleanup NovaObjectDictCompat from quota * Correct a wrong docstring * Create RequestSpec object * Clarify API microversion docs around handling 500 errors * libvirt: Fix KeyError during LXC instance boot * Xenapi: Handle missing aggregate metadata on startup * Handle NotFound exceptions while processing network-changed events * Added processing /compute URL * libvirt: enable live migration with serial console * Remove the useless require\_admin\_context decorator * Correct expected error code for os-resetState action * libvirt: add helper methods for getting guest devices/disks * compute: improve exceptions related to disk size checks * Improve error logs for start/stop of locked instance * pci: Remove nova.pci.device module * pci: Remove objects.InstancePCIRequests.save() * Remove unused db.security\_group\_rule\_get\_by\_security\_group\_grantee() * Revert "Make nova-network use conductor for security groups refresh" * Make compute\_api.trigger\_members\_refresh() issue a single db call * Fix cells use of legacy bdms during local instance delete operations * Hyper-V: Fixes serial port issue on Windows Threshold * Consolidate initialization of instance snapshot metadata * Fix collection of metadata for a snapshot of a volume-backed instance * Remove unnecessary ValueError exception * Update log's level when backup a volume backend instance * The API unit tests for serial console use http instead of ws * Drop scheduler RPC 3.x support * Move quota delta reserve methods from api to utils * nova.utils.\_get\_root\_helper() should be public * Host manager: add in missing log hints * Removing extension "OS-EXT-VIF-NET" from v2.1 extension-list * nova-manage: fix typo in docstring about mangaging * hyper-v: mock time.sleep in test\_rmtree * Remove tie between system\_metadata and extra.flavor * Fixes Hyper-V boot from volume fails when using ephemeral disk * Re-write way of compare APIVersionRequest's * Store "null api version" as 0.0 * add docstring to virt driver interface (as-is) [1 of ?] * Remove last of the plugins/v3 from unit tests * Rename classes containing 'v3' to 'v21' * Move the v2 api\_sample functional tests * Updated from global requirements * Add logging when filtering returns nothing * libvirt: cleanup() serial\_consoles after instance failure * Don't query database with an empty list of tags for IN clause * Libvirt: Make live\_migration\_bandwidth help msg more meaning * Move V2.1 API unittest to top level directory * Neutron: Check port binding status * Move legacy v2 api smaple tests * conductor: update comments for rpc and use object * Load flavor when getting instances for simple-tenant-usage * Make pagination tolerate a deleted marker * Updated from global requirements * Cleanup HTTPRequest for security\_groups test * Add api samples impact to microversion devref * Use min and max on IntOpt option types * Add hacking check for eventlet.spawn() * Updated from global requirements * neutron: filter None port\_ids from ports list in \_unbind\_ports * VMware: treat deletion exception with attached volumes * VMware: ensure that get\_info raises the correct exception * Allow resize root\_gb to 0 for volume-backed instances * Limit parallel live migrations in progress * Validate quota class\_name * Move V2 API unittests under legacy\_v2 directory * Updated from global requirements * Replace get\_cinder\_client\_version in cinder.py * Avoid querying for Service in resource tracker * Remove/deprecate unused parts of the compute node object * Make ComputeNode.service\_id nullable to match db schema * Add missing rules in policy.json * Add V2.1 API tests parity with V2 API tests * Fixed indentation * Simplify interface for creating snapshot of volume-backed instance * Add instance action events for live migration * Remove 'v3' directory for v2.1 json-schemas * Move v2.1 code to the main compute directory - remove v3 step3 * libvirt: qemu-img convert should be skipped when migrating * Add version counter to Service object * Fix the peer review link in the 'Patches and Reviews' policy section * Handle port delete initiated by neutron * Don't check flavor disk size when booting from volume * libvirt: make instance compulsory in blockinfo APIs * xapi: ensure pv driver info is present prior to live-migration * Move existing V2 to legacy\_v2 - step 2 * Move existing V2 to legacy\_v2 * Return v2 version info with v2 legacy compatible wrapper * Ironic: Add numa\_topology to get\_available\_resource return values * Fix three typos on nova/pci directory * Imported Translations from Transifex * pci: Use PciDeviceList for PciDevTracker.pci\_devs * pci: Remove get\_pci\_devices\_filter() method * pci: Move whitelist filtering inside PCI tracker * libvirt: call host.get\_capabilities after checking for bad numa versions * libvirt: log when BAD\_LIBVIRT\_NUMA\_VERSIONS detected * Use string substitution before raising exception * Hyper-V: deprecates support for Windows / Hyper-V Server 2008 R2 * VMware: Do not untar OVA on the file system * Add hacking check for greenthread.spawn() * Ironic: Use ironicclient native retries for Conflict in ClientWrapper * Prevent (un)pinning unknown CPUs * libvirt: use instance UUID with exception InstanceNotFound * Fix notify\_decorator errors * VMware: update supported vsphere 6.0 os types * libvirt: convert Scality vol driver to LibvirtBaseFileSystemVolumeDriver * libvirt: convert Quobyte driver to LibvirtBaseFileSystemVolumeDriver * pci: Use fields.Enum type for PCI device type * pci: Use fields.Enum type for PCI device status * More specific error messages on building BDM * Ensure test\_models\_sync() works with new Alembic releases * Hyper-V: Adds VolumeOps unit tests * Hyper-V: Adds MigrationOps unit tests * Suppress not image properties for image metadata from volume * Add non-negative integer and float fields * Fix DeprecationWarning when using BaseException.message * Added support for specifying units to hw:mem\_page\_size * Compute: use instance object for refresh\_instance\_security\_rules * libvirt: convert GPFS volume driver to LibvirtBaseFileSystemVolumeDriver * Updated from global requirements * Add os-brick based LibvirtVolumeDriver for ScaleIO * docs: add link to liberty summit session on v2.1 API * Refactor unit test for InstanceGroup objects * Don't pass the service catalog when making glance requests * libvirt: check min required qemu/libvirt versions on s390/s390x * libvirt: ensure LibvirtConfigGuestDisk parses readonly/shareable flags * libvirt: set caps on maximum live migration time * libvirt: support management of downtime during migration * cleanup NovaObjectDictCompat from numa object * Fix test\_relationships() for subobject versions * libvirt: don't open connection in driver constructor * Skip SO\_REUSEADDR tests on BSD * \_\_getitem\_\_ method not returning value * Compute: replace incorrect instance object with dict * Fix live-migrations usage of the wrong connector information * Honour nullability constraints of Glance schema in ImageMeta * Change docstring in test to comment * libvirt: convert GlusterFS driver to LibvirtBaseFileSystemVolumeDriver * libvirt: convert SMBFS vol driver to LibvirtBaseFileSystemVolumeDriver * libvirt: convert NFS volume driver to LibvirtBaseFileSystemVolumeDriver * Introduce LibvirtBaseFileSystemVolumeDriver * Add test to check relations at or below current * Add documentation for the nova-cells command * libvirt:Rsync remote FS driver was added * Clean the deprecated noauth middleware * Add os\_brick-based VolumeDriver for HGST connector * libvirt: add os\_admin\_user to use with set admin password * Fixed incorrect behaviour of method \_check\_instance\_exists * Squashing down update method * Fix the wrong file name for legacy v2 compatible wrapper functional test * Add scenario for API sample tests with legacy v2 compatible wrapper * Skip additionalProperties checks when LegacyV2CompatibleWrapper enabled * Libvirt: correct libvirt reference url link when live-migration failed * libvirt: enable virtio-net multiqueue * Replacing unichr() with six.unichr() and reduce with six.moves.reduce() * Fix resource leaking when consume\_from\_instance raise exception * :Add documentation for the nova-idmapshift command * RBD: Reading rbd\_default\_features from ceph.conf * New nova API call to mark nova-compute down * libvirt: move LibvirtISCSIVolumeDriver into it's own module * libvirt: move LibvirtNETVolumeDriver into it's own module * libvirt: move LibvirtISERVolumeDriver into it's own module * libvirt: move LibvirtNFSVolumeDriver into it's own module * allow live migration in case of a booted from volume instance * Handle MessageTimeout to MigrationPreCheckError * Create a new dictionary for type\_data in VMwareAPIVMTestCase class * resource tracker style pci resource management * Added missed '-' to the rest\_api\_version\_history.rst * Imported Translations from Transifex * Remove db layer hard-code permission checks for keypair * Fix a couple dead links in docs * cleanup NovaObjectDictCompat from virt\_cpu\_topology * Adding user\_id handling to keypair index, show and create api calls * Updated from global requirements * Remove legacy flavor compatibility code from Instance * libvirt: Fix root device name for volume-backed instances * Fix few typos in nova code and docs * Helper script for running under Apache2 * Raise NovaException for missing/empty machine-id * Fixed random failing of test\_describe\_instances\_with\_filters\_tags * libvirt: enhance libvirt to set admin password * libvirt: rework quiesce to not share "sensitive" informations * Metadata: support proxying loadbalancers * formely is not correct * Remove 'scheduled\_at' - DB cleanup * Remove unnecessary executable permission * Neutron: add in API method for updating VNIC index * Xen: convert image auto\_disk\_config value to bool before compare * Make BaseProxyTestCase.test\_proxy deterministic wrt traffic/verbose * Cells: Handle instance\_destroy\_at\_top failure * cleanup NovaObjectDictCompat from virtual\_interface * Fix test mock that abuses objects * VMware: map one nova-compute to one VC cluster * VMware: add serial port device * Handle SSL termination proxies for version list * Use urlencode instead of dict\_to\_query\_str function * libvirt: move LibvirtSMBFSVolumeDriver into it's own module * libvirt: move LibvirtAOEVolumeDriver into it's own module * libvirt: move LibvirtGlusterfsVolumeDriver into it's own module * libvirt: move LibvirtFibreChannelVolumeDriver into it's own module * VMware: set create\_virtual\_disk\_spec method as local * Retry live migration on pre-check failure * Handle config drives being stored on rbd * Change List objects to use obj\_relationships * Fixes delayed instance lifecycle events issue * libvirt-vif: Allow to configure a script on bridge interface * Include DiskFilter in the default list * Adding support for InfiniBand SR-IOV vif type * VMware: Add support for swap disk * libvirt: Add logging for dm-crypt error conditions * Service group drivers forced\_down flag utilization * libvirt: Replace stubs with mocks for test\_dmcrypt * clarify docs on 2.9 API change * Remove db layer hard-code permission checks for instance\_get\_all\_hung\_in\_rebooting * Undo tox -e docs pip install sphinx workaround * Set autodoc\_index\_modules=True so tox -e docs builds module docs again * Allow NUMA based reporting for Monitors * libvirt: don't add filesystem disk to parallels containers unconditionally * objects: add hw\_vif\_multiqueue\_enabled image property * Prepare for unicode enums from Oslo * rootwrap: remove obsolete filters for baremetal * Create class hierarchy for tasks in conductor * return more details on assertJsonEqual fail * Fix IronicHostManager to skip get\_by\_host() call * Store correct VirtCPUTopology * Add documentation for block device mapping * Show 'locked' information in server details * VMware: add resource limits for disk * VMware: store extra\_specs object * VMware: Resource limits for memory * VMware: create common object for limits, reservations and shares * VMware: add support for cores per socket * Add DiskNotFound and VolumeNotFound test * Not check rotation at compute level * Instance destroyed if ironic node in CLEANWAIT * Ironic: Better handle InstanceNotFound on destroy() * Fix overloading of block device on boot by device name * tweak graphviz formatting for readability * libvirt: rename parallels driver to virtuozzo * libvirt: Add macvtap as virtual interface (vif) type to Nova's libvirt driver * cells: document upgrade limitations/assumptions * rebuild: make sure server is shut down before volumes are detached * Implement compare-and-swap for instance update * docs: add a placeholder link to mentoring docs * libvirt: Kill rsync/scp processes before deleting instance * Updated from global requirements * Add console allowed origins setting * libvirt: move the LibvirtScalityVolumeDriver into it's own module * libvirt: move the LibvirtGPFSVolumeDriver into it's own module * libvirt: move the LibvirtQuobyteVolumeDriver into the quobyte module * libvirt: move volume/remotefs/quobyte modules under volume subdir * Add missing policy for limits extension * Move to using ovo's remotable decorators * Base NovaObject on VersionedObject * Document when we should have a microversion * libvirt: do relative block rebase only with non-null base * Add DictOfListOfStrings type of field * Get py34 subunit.run test discovery to work * Enable python34 tests for nova/tests/unit/scheduler/test\*.py * libvirt: mark NUMA huge page mappings as shared access * libvirt:Add a driver API to inject an NMI * virt: convert hardware module to use nova.objects.ImageMeta 12.0.0.0b2 ---------- * Replace openssl calls with cryptography lib * libvirt: move lvm/dmcrypt/rbd\_utils modules under storage subdir * Fix Instance object usage in test\_extended\_ips tests * Fix test\_extended\_server\_attributes for proper Instance object usage * Fix test\_security\_groups to use Instance object properly * Refactor test\_servers to use instance objects * Switch to using os-brick * Updated from global requirements * VMware: remove redundant check for block devices * Remove unused decorator on attach/detach volume * libvirt: test capability for supports\_migrate\_to\_same\_host * Added removing of tags from instance after its deletion * Remove unused import of the my\_ip option from the manager * Scheduler: enhance debug messages for multitenancy aggregates * VMware: Handle missing vmdk during volume detach * Running microversion v2.6 sample tests under '/v2' endpoint * VMware: implement get\_mks\_console() * Add MKS protocol for remote consoles * Add MKS console support * libvirt: improve logging in the driver.py code * Fix serializer supported version reporting in object\_backport * Updated from global requirements * Revert "Add error message to failed block device transform" * tox: make it possible to run pep8 on current patch only * Fix seven typos on nova documentation * Add two fields to ImageMetaProps object * Check flavor type before add tenant access * Switch to the oslo\_utils.fileutils * hypervisor support matrix: fix snapshot for libvirt Xen * libvirt: implement get\_device\_name\_for\_instance * libvirt: Always default device names at boot * Remove unused import of the compute\_topic option from the DB API * Remove unused call to \_get\_networks\_by\_uuids() * libvirt: fix disk I/O QOS support with RBD * Updated from global requirements * Remove unnecessary oslo namespace import checks * VMware: Fixed redeclared CONF = cfg.CONF * Execute \_poll\_shelved\_instances only if shelved\_offload\_time is > 0 * Switch to oslo.reports * Support Network objects in set\_network\_host * Fix Filter Schedulers doc to refer to all\_filters * Fixup uses of mock in hyperv tests * Cleanup log lines in nova.image.glance * Revert "Add config drive support for Virtuozzo containers" * Virt: fix debug log messages * Virt: use flavor object and not flavor dict * Add VersionPredicate type of field * Remove unnecessary method in FilterScheduler * Use utf8\_bin collation on the flavor extra-specs table in MySQL * docs: clear between current vs future plans * cleanup NovaObjectDictCompat subclassing from pci\_device * libvirt: make unit tests concise by setup guest object * libvirt: introduce method to wait for block device job * Decouple instance object tests from the api fakes module * Fixed typos in self parameter * Hyper-V: restart serial console workers after instance power change * Only work with ipv4 subnet metadata if one exists * Do not import using oslo namespace * Refresh instance info cache within lock * Remove db layer hard-code permission checks for fixed\_ip\_associate\_\* * Add middleware filterout Microversions http headers * Correct backup\_type param description * Fix a request body template for secgroup tests * Images: fix invalid exception message * Updated from global requirements * rebuild: fix rebuild of server with volume attached * objects: send PciDeviceList 1.2 to all code that can handle it * Fix libguestfs failure in test\_can\_resize\_need\_fs\_type\_specified * Fix the incorrect PciDeviceList version number * objects: Don't import CellMapping from the objects module * Deprecate the osapi\_v3.enabled option * Remove conductor api from resource tracker * Fix test\_tracker object mocks * Fix Python 3 issues in nova.utils and nova.tests * Remove db layer hard-code permission checks for instance\_get\_all\_by\_host\_and\_not\_type * Support all\_tenants search\_opts for neutron * libvirt : remove broken olso\_config choices option * Convert instance\_type to object in prep\_resize * VMware: clean up exceptions * Revert "Remove useless db call instance\_get\_all\_hung\_in\_rebooting" * VMware: Use virtual disk size instead of image size * Remove db layer hard-code permission checks for provider\_fw\_rule\_\* * Remove db layer hard-code permission checks for archive\_deleted\_rows\* * Revert "Implement compare-and-swap for instance update" * Add tool to build a doc latex pdf * make test\_save\_updates\_numa\_topology stable across python versions * Update HACKING.rst for running tests and building docs * Cleanup quota\_class unittest with appropriate request context * Remove db layer hard-code permission checks for quota\_class\_create/update * Remove db layer hard-code permission checks for quota\_class\_get\_all\_by\_name * Improve functional test base for microversion * Remove db layer hard-code permission checks for reservation\_expire * Introducing new forced\_down field for a Service object * Use stevedore for loading monitor extensions * libvirt: Remove dead code path in method clear\_volume * Switch to oslo.service library * Include project\_id in instance metadata * Convert test\_compute\_utils to use Instance object * Fix for mock-1.1.0 * Port crypto to Python 3 * Add HostMapping object * Remove useless db call instance\_get\_all\_hung\_in\_rebooting * Cleanup unused method fake\_set\_snapshot\_id * Handle KeyError when volume encryption is not supported * Expose Neutron network data in metadata service * Build Neutron network data for metadata service * Implement compare-and-swap for instance update * Added method exists to the Tag object * Add DB2 support * compute: rename ResizeClaim to MoveClaim * Fix the little spelling mistake of the comment * Remove db layer hard-code permission checks for quota\_create/update * Fix the typo from \_pre\_upgrade\_294 to \_pre\_upgrade\_295 for tests/unit/db/test\_migration * Ironic:check the configuration item api\_max\_retries * Modified testscenario for micro version 2.4 * Add some notifications to the evacuate path * Make evacuate leave a record for the source compute host to process * Fix incorrect enum in Migration object and DB model * Refactoring of the os-services module * libvirt: update docstring in blockinfo module for disk\_info * Ignore bridge already exists error when creating bridge * libvirt: handle rescue flag first in blockinfo.get\_disk\_mapping * libvirt: update volume delete snapshot to use Guest * libvirt: update live snapshot to use Guest object * libvirt: update swap volume to use Guest * libvirt: introduce GuestBlock to wrap around Block API * libvirt: rename GuestVCPUInfo to VCPUInfo * libvirt: save the memory state of guest * removed unused method \_get\_default\_deleted\_value * Remove flavor migration from db\_api and nova-manage * Rework monitor plugin interface and API * Adds MonitorMetric object * virt: add get\_device\_name\_for\_instance to the base driver class * libvirt: return whether a domain is persistent * Cells: fix indentation for configuration variable declaration * VMware: add unit tests for vmops attach and detach interface * Remove unneeded OS\_TEST\_DBAPI\_ADMIN\_CONNECTION * Switch from MySQL-python to PyMySQL * virt: fix picking CPU topologies based on desired NUMA topology * Port test\_exception to Python 3 * devref: virtual machine states and transitions * Consolidate the APIs for getting consoles * Remove db layer hard-code permission checks for floating\_ip\_dns * Fix typo in model doc string * virt: Fix AttributeError for raw image format * log meaningful error message on download exception * Updated from global requirements * Add bandit for security static analysis testing * Handle unexpected clear events call * Make on\_shared\_storage optional in compute manager * snapshot: Add device\_name to the snapshot bdms * compute: Make swap\_volume with resize updates BDM size * Make Nova better at keeping track of volume sizes in BDM * API: make sure a blank volume with no size is rejected * Ironic: Improve driver logs * Drop MANIFEST.in - it's not needed with PBR * Libvirt: Define system\_family for libvirt guests * Convert RT compute\_node to be a ComputeNode object * glance:check the num\_retries option * tests: Move test\_resource\_tracker to Instance objects * Remove compat\_instance() * Enable python34 tests for nova/tests/unit/objects/test\*.py * Soft delete system\_metadata when destroy instance * Remove python3 specific test-requirements file * Try luksFormat up to 3 times in case the device is in use * rootwrap: update ln --symbolic filter for FS and FC type volume drivers * Add wording to error message in TestObjectVersions.test\_relationships * Close temporary files in virt/disk/test\_api.py * Add BlockDeviceType enum field * Add BlockDeviceDestinationType enum field * Add BlockDeviceSourceType enum field * Avoid recursion in object relationships test * tests: move a test to the proper class in test\_resource\_tracker * Remove db layer hard-code permission checks for network\_set\_host * Block subtractive operations in migrations for Kilo and beyond * Remove db layer hard-code permission checks for network\_disassociate * libvirt: Correct domxml node name * Test relationships of List objects * libvirt: configuration for interface driver options * Fix Python 3 issues in nova.db.sqlalchemy * Update test\_db\_api for oslo.db 2.0 * Fix is\_image\_extendable() thinko * Validate maximum limit for quota * utils: ignore block device mapping in system metadata * libvirt: add in missing doc string for hypervisor\_version * Remove useless policy rule from fake\_policy.py * Replace ascii art architecture diagram with svg image * Adds MonitorMetricTypeField enum field * Unfudge tox -e genconfig wrt missing versionutils module * virt: update doctrings * hypervisor support matrix: add feature "evacuate" * XenAPI: Refactor rotate\_xen\_guest\_logs to avoid races * hypervisor support matrix: add feature "serial console" * hypervisor support matrix: add CLI commands to features * Fix typos detected by toolkit misspellings * hypervisor support matrix: fix "evacuate" for s390 and hyper-v * Make live migration create a migration object record * Cells: add instance cell registration utility to nova-manage * fix typos in docs * Logging corrected * Check mac for instance before disassociate in release\_fixed\_ip * Add the rule of separate plugin for Nova REST API in devref * Use flavor object in compute manager 12.0.0.0b1 ---------- * Changes conf.py for Sphinx build because oslosphinx now contains GA * Fix testing object fields with missing instance rows * Change group controller of V2 test cases * Reduce window for allocate\_fixed\_ip / release\_fixed\_ip race in nova-net * Make NoValidHost exceptions clearer * Hyper-V: Fixes method retrieving free SCSI controller slot on V1 * Refactor network API 'get\_instance\_nw\_info' * Removed extra '-' from rest\_api\_version\_history.rst * Remove an useless variable and fix a typo in api * VMware: convert driver to use nova.objects.ImageMeta * Bypass ironic server not available issue * Fix test\_create\_security\_group\_with\_no\_name * Remove unused "id" and "rules" from secgroup body * cells: add devstack/tempest-dsvm-cells-rc for gating * Add common function for v2.1 API flavor\_get * Fix comment typo * Fix up instance flavor usage in compute and network tests * Fix up ec2 tests for flavors on instances * Fix up xenapi tests for instance flavors * Fix up some bits of resource\_tracker to use instance flavors * Register the vnc config options under group 'vnc' * Cells: cell scheduler anti-affinity filter * Cells: add in missing unit test for get\_by\_uuid * VMware driver: Increasing speed of downloading image * Hyper-V: Fix virtual hard disk detach * Add flag to force experimental run of db contract * Make readonly field tests use exception from oslo.versionedobjects * Fixes "Hyper-V destroy vm fails on Windows Server 2008R2" * Add microversion to allow server search option ip6 for non-admin * Updated from global requirements * VMware: Handle port group not found case * Imported Translations from Transifex * libvirt: use correct translation format * Add explicit alembic dependency * network: add more debug logging context for race bug 1249065 * Add virt resource update to ComputeNode object * xenapi: remove bittorrent entry point lookup code * Use oslo-config-generator instead of generate\_sample.sh * Add unit tests for PCI utils * Support flavor object in migrate\_disk\_and\_power\_off * Remove usage of WritableLogger from oslo\_log * libvirt: Don't fetch kernel/ramdisk files if you already have them * Allow non-admin to list all tenants based on policy * Remove redundant policy check from security\_group\_default\_rule * Return bandwidth usage after updating * Update version for Liberty * neutron: remove deprecated allow\_duplicate\_networks config option * Validate maximum limit for integer * Improve the ability to resolve capabilities from Ironic * Fix the wrong address ref when the fixed\_ip is invalid * The devref for Nova stable API * Fix wrong check when use image in local * Fixes TypeError when libvirt version is BAD\_LIBVIRT\_CPU\_POLICY\_VERSIONS 12.0.0a0 -------- * Remove hv\_type translation shim for powervm * cells: remove deprecated mute\_weight\_value option * Make resize api of compute manager to send flavor object * VMware: detach cinder volume when instance destroyed * Add unit tests for the exact filters * test: add MatchType helper class as equivalent of mox.IsA * Validate int using utils.validate\_integer method * VMware: use min supported VC version in fake driver * Updated from global requirements * Added documentation around database upgrades * Avoid always saving flavor info in instance * Warn when CONF torrent\_base\_url is missing slash * Raise invalid input if use invalid ip for network to attach interface * Hyper-V: Removes old instance dirs after live migration * DB downgrades are no longer supported * Add Host Mapping table to API Database * VMware: verify vCenter server certificate * Implement online schema migrations * Hyper-V: Fixes live migration configdrive copy operation * Avoid resizing disk if the disk size doesn't change * Remove openstack/common/versionutils module * Fix TestObjEqualPrims test object registration * Remove references to suds * VMware: Remove configuration check * Remove and deprecate conductor task\_log methods * Remove unused compute utils methods * Make instance usage audit use the brand new TaskLog object * Add a TaskLog object * Updated from global requirements * Fix noVNC console access for an IPv6 setup * hypervisor support matrix: add status "unknown" * VMware: typo fix in config option help * Sync with latest oslo-incubator * Associating of floating IPs corrected * Minor refactor in nova.scheduler.filters.utils * Cleanup wording for the disable\_libvirt\_livesnapshot workaround option * Remove cell api overrides for force-delete * libvirt: convert imagebackend to support nova.virt.image.model classes * virt: convert disk API over to use nova.virt.image.model * Cells: Skip initial sync of block\_device\_mapping * Pass Down the Instance Name to Ironic Driver * Handle InstanceNotFound when sending instance update notification * Add an index to virtual\_interfaces.uuid * Updated from global requirements * Add config drive support for Virtuozzo containers * Update formatting of microversion 2.4 documentation * Consolidates scheduler utils tests into a single file * Send Instance object to cells instance\_update\_at\_top * VMware: use vCenter instead of VC * fix "down" nova-compute service spuriously marked as "up" * Improve formatting of rest\_api\_version\_history * Link to microversion history in docs * libvirt: fix live migration handling of disk\_info * libvirt: introduce method to get domain XML * libvirt: introduce method detach\_device to Guest object * Remove db layer hard-code permission checks for quota\_usage\_update * pass environment variables of proxy to tox * Remove db layer hard-code permission checks for quota\_get\_all\_\* * Fixed some misspellings * Clean up Fake\_Url for unit test of flavor\_access * Updated from global requirements * Add AggregateTypeAffinityFilter multi values support * volume: log which encryptor class is being used * VMware: Don't raise exception on resize of 0 disk * Hyper-V: sets supports\_migrate\_to\_same\_host capability * libvirt: remove \_get\_disk\_xml to use get\_disk from Guest * libvirt: introduce method to attach device * libvirt: update tests to use Mock instead of MagicMock * libvirt: Remove unnecessary JSON conversions * objects: fix parsing of NUMA cpu/mem properties * compute: remove get\_image\_metadata method * compute: only use non\_inheritable\_image\_properties if snapshotting * objects: add os\_require\_quiesce image property * libvirt: make default\_device\_names DRY-er * virt: Move building the block\_device\_info dict into a method * Objects: update missing adapter types * Add error handling for creating secgroup * libvirt: handle code=38 + sigkill (ebusy) in destroy() * Removed a non-conditional 'if' statement * Map uuid db field to instance\_uuid in BandwidthUsage object * Hyper-V: Fix missing WMI namespace issue on Windows 2008 R2 * Replace metaclass registry with explicit opt-in registry from oslo * Fix an objects layering violation in compute/api * Remove assertRemotes() from objects tests * Use fields from oslo.versionedobjects * Convert test objects to new field formats * Begin the transition to an explicit object registry * Set default event status to completed * Add a hacking rule for consistent HTTP501 message * Add and use raise\_feature\_not\_supported() * Objects: fix typo with exception * Remove useless volume when boot from volume failed * Hyper-V: Lock snapshot operation using instance uuid * Refactor show\_port() in neutron api * Ironic: Don't report resources for nodes without instances * libvirt: Remove unit tests for \_hard\_reboot * Adds hostutilsv2 to HyperV * libvirt: introduce method to delete domain config * libvirt: introduce method to get vcpus info * libvirt: Don't try to confine a non-NUMA instance * Removed explicit return from \_\_init\_\_ method * libvirt: introduce method resume to Guest object * libvirt: introduce method poweroff to Guest object * libvirt: make \_create\_domain return a Guest object * Raise InstanceNotFound when save FK constraint fails * Updated from global requirements * Add new VIF type VIF\_TYPE\_TAP * libvirt: Disable NUMA for broken libvirt * Handle FlavorNotFound when augmenting migrated flavors * virt: convert VFS API to use nova.virt.image.model * virt: convert disk mount API to use nova.virt.image.model * virt: introduce model for describing local image metadata * Remove unused instance\_group\_policy db calls * Improve compute swap\_volume logging * libvirt: introduce method get\_guest to Host object * libvirt: introduce a Guest to wrap around virDomain * Remove unused exceptions * Extract helper method to get image metadata from volume * Fix \_quota\_reserve test setup for incompatible type checking * Fixes referenced path in nova/doc/README.rst * Updated from global requirements * Handle cells race condition deleting unscheduled instance * Compute: tidy up legacy treatment for vif types * Allow libvirt cleanup completion when serial ports already released * objects: define the ImageMeta & ImageMetaProps objects * Ensure to store context in thread local after spawn/spawn\_n * Ironic: Parse and validate Node's properties * Hyper-V: Fix SMBFS volume attach race condition * Remove unit\_test doc * Make blueprints doc a reference for nova blueprints * Remove jenkins, launchpad and gerrit docs * Prune development.environment doc * docs: fixup libvirt NUMA testing docs to match reality * Fix some issues in devref for api\_microversions * nova response code 403 on block device quota error * Updated from global requirements * Remove unused variables from images api * Compute: improve logging using {} instead of dict * snapshot: Copy some missing attrs to the snapshot bdms * bdm: Make sure that delete\_on\_termination is a boolean * Get rid of oslo-incubator copy of middleware * Make nova-manage handle completely missing flavor information * Use oslo\_config choices support * Let soft-deleted instance\_system\_metadata readable * Make InstanceExternalEvent use an Enum for status * Add error message to failed block device transform * network: fix instance cache refresh for empty list * Imported Translations from Transifex * Add common function for v2 API flavor\_get * Remove cell policy check * VMware: replace hardcoded strings with constants * Add missing @require\_context * Standardize on assertJsonEqual in tests * Tolerate iso style timestamps for cells rpc communication * Force the value of LC\_ALL to be en\_US.UTF-8 * libvirt: disconnect\_volume does not return anything * Remove hash seed comment from tox.ini * Allow querying for migrations by source\_compute only * libvirt: Do not cache number of CPUs of the hypervisor * Create instance\_extra entry if it doesn't update * Ignore Cinder error when shutdown instance * Remove use of builtin name * Hyper-V: Fixes cold migration / resize issue * Fix cells capacity calculation for n:1 virt drivers * VMware: Log should use uuid instead of name * VMware: fill in instance metadata when resizing instances * VMware: fill in instance metadata when launching instances * Add the swap and ephemeral BDMs if needed * Updated from global requirements * Block oslo.vmware 0.13.0 due to a backwards incompatible change * hypervisor support matrix: update libvirt KVM (s390x) * Hyper-V: ensure only one log writer is spawned per VM * Prevent access to image when filesystem resize is disabled * Share admin password func test between v2 and v2.1 * VMware: remove dead function in vim\_util * Fix version unit test on Python 3 * Resource tracker: remove invalid conductor call from tests * Remove outdated TODO comment * Disable oslo.vmware test dependency on Python 3 * Run tests with PyMySQL on Python 3 * Drop explicit suds dependency * improve speed of some ec2 keypair tests * Add nova object equivalence based on prims * Cleanups for pci stats in preparation for RT using ComputeNode * Replace dict.iteritems() with six.iteritems(dict) * Add a maintainers file * virt: make sure convert\_all\_volumes catches blank volumes too * compute utils: Remove a useless context parameter * make SchedulerV3PassthroughTestCase use NoDBTest * Don't use dict.iterkeys() * VMware: enforce minimum support VC version * Split up and improve speed of keygen tests * Replace dict(obj.iteritems()) with dict(obj) * libvirt: Fix cpu\_compare tests and a wrong method when logging * Detect empty result when calling objects.BlockDeviceMapping.save() * remove \_rescan\_iscsi from disconnect\_volume\_multipath\_iscsi * Use six.moves.range for Python 3 * Use EnumField for instance external event name * Revert "Detach volume after deleting instance with no host" * Removed unused methods and classes * Removed unused variables * Removed unused "as e/exp/error" statements * Resource tracker: use instance objects for claims * Remove db layer hard-code permission checks for security\_group\_default\_rule\_destroy * Avoid AttributeError at instance.info\_cache.delete * Remove db layer hard-code permission checks for network\_associate * Remove db layer hard-code permission checks for network\_create\_safe * Pass project\_id when create networks by os-tenant-networks * Disassociate before deleting network in os-tenant-networks delete method * Remove db layer hard-code permission checks for v2.1 cells * Move unlock\_override policy enforcement into V2.1 REST API layer * tests: libvirt: Fix test\_volume\_snapshot\_delete tests * Add a finish log * Add nova-idmapshift to rootwrap filters * VMware: Missing docstring on parameter * Update docs layout * Add note to doc explaining scope * Show 'reserved' status in os-fixed-ips * Split instance event/tag correctly * libvirt: deprecate libvirt version usage < 0.10.2 * Fix race between resource audit and cpu pinning * Set migration\_type for existing cold migrations and resizes * Add migration\_type to Migration object * Add migration\_type and hidden to Migration database model * libvirt: improve logging * Fix pip-missing-reqs * objects: convert HVSpec to use named enums * objects: convert VirtCPUModel to use named enums * Ironic: Fix delete instance when spawning * Retry a cell delete if host constraint fails * objects: introduce BaseEnumField to allow subclassing * Add policy to cover snapshotting of volume backed instances * objects: add a FlexibleBoolean field type * Don't update RT status when set instance to ERROR * Delete shelved\_\* keys in n-cpu unshelve call * Fix loading things in instance\_extra for old instances * VMware: remove invalid comment * neutron: log hypervisor\_macs before raising PortNotUsable * VMware: use get\_object\_properties\_dict from oslo.vmware * VMware: use get\_datastore\_by\_ref from oslo.vmware * Unshelving volume backed instance fails * Avoid useless copy in get\_instance\_metadata() * Fix raise syntax for Python 3 * Replace iter.next() with next(iter) * libvirt: use instance UUID with exception InstanceNotFound * devref: add information to clarify nova scope * Refactor an unit test to use urlencode() * Additional cleanup after compute RPC 3.x removal * Drop compute RPC 3.x support * libvirt: deprecate the remove\_unused\_kernels config option * Updated from global requirements * libvirt: Use 'relative' flag for online snapshot's commit/rebase operations * Remove db layer hard-code permission checks for quota\_destroy\_all\_\* * Replace unicode with six.text\_type * Replace dict.itervalues() with six.itervalues(dict) * Use compute\_node consistently in ResourceTracker * Fix the wrong comment in the test\_servers.py file * Move ebrctl to compute.filter * libvirt: handle NotSupportedError in compareCPU * Hypervisor Support Matrix renders links in notes * Update fake flavor's root and ephemeral disk size * Code clean up db.instance\_get\_all\_by\_host() * use block\_dev.get\_bdm\_swap\_list in compute api * Catch SnapshotNotFound exception at os-volumes * Rename \_CellProxy.iteritems method to items on py3 * Overwrite NovaException message * API: remove unuseful expected error code from v2.1 service delete api * Fix quota-update of instances stuck in deleting when nova-compute startup finish * API: remove admin require from certificate\_\* from db layer * API: Add policy enforcement test cases for pci API * API: remove admin require for compute\_node(get\_all/search\_by\_hyperviso) from db * API: remove admin require for compute\_node\_create/update/delete from db layer * API: remove admin require from compute\_node\_get\_all\_by\_\* from db layer * Share deferred\_delete func tests between v2 and v2.1 * VMware: add support for NFS 4.1 * Compute: remove reverts\_task\_state from interface attach/detach * VMware: ensure that the adapter type is used * Fix failure of stopping instances during init host * Share assisted vol snapshots test between v2 and v2.1 * Compute: use instance object for \_deleted\_old\_enough method * API: remove instance\_get\_all\_by\_host(\_and\_node) hard-code admin check from db * Remove db layer hard-code permission checks for service\_get\_by\_host\* * Remove db layer hard-code permission checks for service\_get\_by\_compute\_host * Detach volume after deleting instance with no host * libvirt: safe\_decode xml for i18n logging * Fix scheduler issue when multiple-create failed * Move our ObjectListBase to subclass from the Oslo one * Fix cinder v1 warning with cinder\_catalog\_info option reference * Deprecate nova ironic driver's admin\_auth\_token * Handle return code 2 from blkid calls * Drop L from literal integer numbers for Python 3 * Libvirt: Use tpool to invoke guestfs api * Minor edits to support-matrix doc * hacking: remove unused variable author\_tag\_re * Update kilo version alias * Refactor tests that use compute's deprecated run\_instance() method * Helper scripts for running under Apache2 * downgrade log messages for memcache server (dis)connect events * don't report service group connection events as errors in dbdriver * Updated from global requirements * Switch to \_set\_instance\_obj\_error\_state in build\_and\_run\_instance * Add SpawnFixture * Log the actual instance.info\_cache when empty in floating ip associate * unify libvirt driver checks for qemu * VMware: Allow other nested hypervisors (HyperV) * servicegroup: remove get\_all method never used as public * libvirt: add todo note to avoid call to libvirt from the driver * libvirt: add method to compare cpu to Host * libvirt: add method to list pci devices to Host * libvirt: add method to get device by name to Host * libvirt: add method to define instance to host * libvirt: add method to get cpu stats to host * monitor: remove dependance with libvirt * Clean up ComputeManager.\_get\_instance\_nw\_info * Updated from global requirements * Cells: Call compute api methods with instance objects * Correct docstring info on two parameters * Start the conversion to oslo.versionedobjects * Cleanup conductor unused methods * Revert "Ironic: do not destroy if node is in maintenance" * fix network setup on evacuate * Reschedules sometimes do not allocate networks * Incorrect argument order passed to swap\_volume * Mark ironic credential config as secret * Fix missing format arg in compute manager * objects: remove field ListOfEnumField * Cleaning up debug messages from previous change in vmops.py * Remove orphaned tables - iscsi\_targets, volumes * console: clean tokens do not happen for all kind of consoles * Fix import order * Skip only one host weight calculation * Fix typo for test cases * VMWare: Isolate unit tests from requests * Imported Translations from Transifex * Cleanup docs landing page * Updated from global requirements * Add ability to inject routes in interfaces.template * tests: make API signature test also check static function * Make test\_version\_string\_with\_package\_is\_good work with pbr 0.11 * Fix disconnect\_volume issue when find\_multipath\_device returns None * Updated from global requirements * Fix assert on call count for encodeutils.safe\_decode mock * Don't wait for an event on a resize-revert * minor edit to policy\_enforcement.rst * Update self with db result in InstanceInfoCache.save * libvirt: retry to undefine network filters during \_post\_live\_migration * Wedge DB migrations if flavor migrations are not complete * Removed twice declared variables * Removed variables used not in the scope that they are declared * libvirt: add method to get hardware info to Host * libvirt: avoid call of listDefinedDomains when post live migration * Remove unused db.aggregate\_metadata\_get\_by\_metadata\_key() call * Removed 'PYTHONHASHSEED=0' from tox.ini * Changed logic in \_compare\_result api\_samples\_test\_base * Convert bandwidth\_usage related timestamp to UTC native datetime * Drop use of 'oslo' namespace package 2015.1.0 -------- * Add a method to skip cells syncs on instance.save * Add some testing for flavor migrations with deleted things * Add support for forcing migrate\_flavor\_data * Virt: update shared storage log information message * Fixed functional in tests\_servers, to pass with random PYTHONHASHSEED * Adds toctree to v2 section of docs * Fixes X509 keypair creation failure * Update rpc version aliases for kilo * libvirt/utils.py: Remove 'encryption' flag from create\_cow\_image * Libvirt: Correct logging information and progress when LM * libvirt/utils.py: Remove needless code from create\_cow\_image * libvirt/utils.py: Clarify comment in create\_cow\_image function * Fix documentation for scheduling filters * libvirt: check qemu version for NUMA & hugepage support * Add security group calls missing from latest compute rpc api version bump * Add security group calls missing from latest compute rpc api version bump * Make objects serialize\_args() handle datetimes in positional args * Imported Translations from Transifex * view hypervisor details rest api should be allowed for non-admins * n-net: turn down log level when vif isn't found in deallocate\_fixed\_ip * Associate floating IPs with first v4 fixed IP if none specified * Correct the help text for the compute option * Convert NetworkDuplicated to HTTPBadRequest for v2.1 API * Remove comment inconsistent with code * Remove db layer hard-code permission checks for fixed\_ip\_get\_\* * Fixed nova-network dhcp-hostsfile update during live-migration * Remove db layer hard-code permission checks for network\_get\_all\_by\_host * Remove db layer hard-code permission checks for security\_group\_default\_rule\_create * Remove db layer hard-code permission checks for floating\_ips\_bulk * sync oslo: service child process normal SIGTERM exit * Remove downgrade support from the cellsv2 api db * Fix migrate\_flavor\_data() to catch instances with no instance\_extra rows * libvirt: use importutils instead of python built-in 2015.1.0rc2 ----------- * Imported Translations from Transifex * Updated from global requirements * Control create/delete flavor api permissions using policy.json * Add config option to disable handling virt lifecycle events * Ironic: pass injected files through to configdrive * libvirt: Allow discrete online pCPUs for pinning * Fix migrate\_flavor\_data() to catch instances with no instance\_extra rows * libvirt: unused imported option default\_ephemeral\_format * libvirt: introduce new method to guest tablet device * Fix migrate\_flavor\_data string substitution * Object: Fix incorrect parameter set in flavor save\_extra\_specs * Fix max\_number for migrate\_flavor data * remove downgrade support from our database migrations * Add policy check for extension\_info * Cleanup unnecessary session creation in floating\_ip\_deallocate * Fix inefficient transaction usage in floating\_ip\_bulk\_destroy * Control create/delete flavor api permissions using policy.json * Fix handling of pci\_requests in consume\_from\_instance * Use list of requests in InstancePCIRequests.obj\_from\_db * Add numa\_node field to PciDevicePool * scheduler: re-calculate NUMA on consume\_from\_instance * VMware: remove unused method * VMware: enable configuring of console delay * Don't query compute\_node through service object in nova-manage * Fixed test in test\_tracker to work with random PYTHONHASHSEED * Update rpc version aliases for kilo * remove the CONF.allow\_migrate\_to\_same\_host * Fix kwargs['migration'] KeyError in @errors\_out\_migration decorator * Add equality operators to PciDeviceStats and PciDevice objects * libvirt: Add option to ssh to prevent prompting * Validate server group affinity policy * VMware: use oslo.vmware methods for handling tokens * Remove db layer hard-code permission checks for network\_get\_associated\_fixed\_ips * tests: use numa xml automatic generation in libvirt tests * Resource tracker: unable to restart nova compute * Include supported version information * Release Import of Translations from Transifex * Fixed tests in test\_glance to pass with random PYTHONHASHSEED * Refactored tests in test\_neutron\_driver to pass with random PYTHONHASHSEED * refactored test in vmware test\_read\_write\_util to pass with random PYTHONHASHSEED * fixed tests in test\_matchers to pass with random PYTHONHASHSEED * fix for vmware test\_driver\_api to pass with random PYTHONHASHSEED * Update hypervisor support matrix with kvm on system z * Fix kwargs['migration'] KeyError in @errors\_out\_migration decorator * VMware: remove unused parameter for VMOPS spawn * libvirt: make \_get\_instance\_disk\_info conservative * refactored tests to pass in test\_inject to pass with random PYTHONHASHSEED * fixed tests in test\_iptables\_network to work with random PYTHONHASHSEED * refactored tests in test\_objects to pass with random PYTHONHASHSEED * fixed tests in test\_instance to pass with random PYTHONHASHSEED * Replace ssh exec calls with paramiko lib * Fix handling of pci\_requests in consume\_from\_instance * Use list of requests in InstancePCIRequests.obj\_from\_db * Share hide server add tests between v2 and v2.1 * Share V2 and V2.1 images functional tests * change the reboot rpc call to local reboot * 'deleted' filter does not work properly * Spelling mistakes in nova/compute/api.py * Use kwargs from compute v4 proxy change\_instance\_metadata * Delay STOPPED lifecycle event for all domains, not just Xen * Use kwargs from compute v4 proxy change\_instance\_metadata * compute: stop handling virt lifecycle events in cleanup\_host() * Replace BareMetalDriver with IronicDriver in option help string * tests: introduce a NUMAServersTest class * Fix test\_set\_admin\_password\_bad\_state() * Fix test\_attach\_interface\_failure() * Fix test\_swap\_volume\_api\_usage() * Resource tracker: unable to restart nova compute * Forbid booting of QCOW2 images with virtual\_size > root\_gb * Pass migrate\_data to pre\_live\_migration * Fixed order of arguments during execution live\_migrate() * update .gitreview for stable/kilo * Add min/max of API microversions to version API * VMware: Fix attribute error in resize * Release bdm constraint source and dest type * Fix check\_can\_live\_migrate\_destination() in ComputeV4Proxy * compute: stop handling virt lifecycle events in cleanup\_host() * Store context in local store after spawn\_n * Fixed incorrect dhcp\_server value during nova-network creation * Share multiple create server tests between v2 and v2.1 * Remove power\_state.BUILDING * libvirt: cleanup unused lifecycle event handling variables from driver * Add min/max of API microversions to version API * Pass migrate\_data to pre\_live\_migration * libvirt: add debug logging to pre\_live\_migration * Don't ignore template argument in get\_injected\_network\_template * Refactor some service tests and make them not require db * Remove and deprecate unused conductor service calls * Convert service and servicegroup to objects * Add numa\_node field to PciDevicePool * Ironic: do not destroy if node is in maintenance * libvirt: remove unnecesary quotes * VMware: fix log warning * libvirt: quit early when mempages requested found * VMware: validate CPU limits level * Remove and deprecate conductor get\_ec2\_ids() * Remove unused metadata conductor parameter * Replace conductor get\_ec2\_ids() with new Instance.ec2\_ids attribute * Add EC2Ids object and link to Instance object as optional attribute * neutron: reduce complexity of allocate\_for\_instance (security\_groups) * neutron: reduce complexity of allocate\_for\_instance (requested\_networks) * Avoid indexing into an empty list in getcallargs * Fixed order of arguments during execution live\_migrate() * Fix check\_can\_live\_migrate\_destination() in ComputeV4Proxy 2015.1.0rc1 ----------- * Add compute RPC API v4.0 * Reserve 10 migrations for backports * Honor uuid parameter passed to nova-network create * Update compute version alias for kilo * Refactor nova-net cidr validation in prep for bug fix * Fix how service objects are looked up for Cells * websocketproxy: Make protocol validation use connection\_info * scheduler: re-calculate NUMA on consume\_from\_instance * Prevent scheduling new external events when compute is shutdown * Print choices in the config generator * Manage compute node that exposes no pci devices * libvirt: make fakelibvirt more customizable * Use cells.utils.ServiceProxy object within cells\_api * Fix Enum field, which allows unrestricted values * consoleauth: Store access\_url on token authorization * tests: add a ServersTestBase class * tests: enhance functional tests primitives * libvirt: Add version check when pinning guest CPUs * Open Liberty development * xenapi: pull vm\_mode and auto\_disk\_config from image when rescue * VMware: Fix attribute error in resize * Allow \_exec\_ebtables to parse stderr * Fix rebuild of an instance with a volume attached * Imported Translations from Transifex * Stacktrace on live migration monitoring * Add 'docker' to the list of known hypervisor types * Respect CONF.scheduler\_use\_baremetal\_filters * Make migration 274 idempotent so it can be backported * Add 'suspended' lifecycle event * Fix how the Cells API is returning ComputeNode objects * Ironic: fix log level manipulation * Fix serialization for Cells Responses * libvirt: fix disablement of NUMA & hugepages on unsupported platforms * Optimize periodic call to get\_by\_host * Fix multipath device discovery when UFN is enabled * Use retrying decorator from oslo\_db * virt: Make sure block device info is persisted * virt: Fix block\_device tests * instance termination with update\_dns\_entries set fails * Filter fixed IPs from requested\_networks in deallocate\_for\_instance * Fixes \_cleanup\_rbd code to capture ImageBusy exception * Remove old relation in Cells for ComputeNode and Service * consoleauth: remove an instance of mutation while iterating * Add json-schema for v2.1 fixed-ips * Share V2 and V2.1 tenant-networks functional tests * Share migrations tests between V2 and V2.1 * Merging instance\_actions tests between V2 and V2.1 * Share V2 and V2.1 hosts functional tests * Add serialization of context to FakeNotifier * Handle nova-network tuple format in legacy RPC calls * remove usage of policy.d which isn't cached * Update check before migrating flavor * Expand Origin header check for serial console * libvirt: reuse unfilter\_instance pass-through method * No need to create APIVersionRequest every time * Libvirt: preallocate\_images CONFIG can be arbitrary characters * Add some tests for the error path(s) in RBD cleanup\_volumes() * VMware: add instance to log messages * Hyper-V: checks for existent Notes in list\_instance\_notes * Fix incorrect statement in inline neutronv2 docs * Imported Translations from Transifex * Vmware:Find a SCSI adapter type for attaching iSCSI disk * Avoid MODULEPATH environment var in config generator * Be more forgiving to empty context in notification * Store cells credentials in transport\_url properly * Fix API links and labels * Stale rc.local file - vestige from cloudpipe.rst * Remove stale test + opensssl information from docs * Add the last of the oslo libraries to hacking check * Cancel all waiting events during compute node shutdown * Update hypervisor support matrix for ironic wrt pause/suspend * Scheduler: deprecate mute\_weight\_value option on weigher * Pass instance object to add\_instance\_fault\_from\_exc * Remove dead vmrc code * Add vnc\_keymap support for vmware compute * Remove compute/api.py::update() * add ironic hypervisor type * Removes XML MIME types from v2 API information * API: fix typo in unit tests * Add field name to error messages in object type checking * Remove obsolete TODO in scheduler filters * Expand valid server group name character set * Raise exception when backup volume-backed instance * Libvirt SMB volume driver: fix volume attach * Adds Compute API v2 docs * PCI tracker: make O(M \* N) clean\_usage algo linear * Fix v2.1 list-host to remove 'services' filter * Fix incorrect http\_conflict error message * Link to devstack guide for appropriate serial\_console instructions * Skip socket related unit tests on OSX * Add debug logging to quota\_reserve flow * Fix missing the cpu\_pinning request * Hyper-V: Sets \*DataRoot paths for instances * Refactored test in test\_neutron\_driver to pass with random PYTHONHASHSEED * fixed tests in test\_neutrounv2 to pass with random PYTHONHASHSEED * Refactored test in linux\_net to pass with random PYTHONHASHSEED * refactored tests in test\_wsgi to pass with random PYTHONHASHSEED * fixed tests in test\_simple\_tenant\_usage to pass with random PYTHONHASHSEED * Refactored test\_availability\_zone to work properly with random PYTHONHASHSEED * fixed test in test\_disk\_config to work with random PYTHONHASHSEED * Fixed test to work with random PYTHONHASHSEED * Fix \_instance\_action call for resize\_instance in cells * Add some logging in the quota.reserve flow * Check host cpu\_info if no cpu\_model for guest * Move ComputeNode creation at init stage in ResourceTracker * Releasing DHCP in nova-network fixed * Fix PCIDevicePool.to\_dict() when the object has no tags * Convert pci\_device\_pools dict to object before passing to scheduler * Sync from Oslo-Incubator - reload config files * Fix v2.1 hypervisor servers to return empty list * Add support for cleaning in Ironic driver * Adjust resource tracker for new Ironic states * Ironic: Remove passing Flavor's deploy\_{kernel, ramdisk} * don't 500 on invalid security group format * Adds cleanup on v2.2 keypair api and tests * Set conductor use\_local flag in compute manager tests * Use migration object in resource\_tracker * Move suds into test-requirements.txt * Make refresh\_instance\_security\_rules() handle non-object instances * Add a fixture for the NovaObject indirection API * Add missing \`shows\` to the RPC casts documentation * Override update\_available\_resources interval * Fix for deletes first preexisting port if second was attached to instance * Avoid load real policy from policy.d when using fake policy fixture * Neutron: simplify validate\_networks * Switch to newer cirros image in docs * Fix common misspellings * Scheduler: update doctring to use oslo\_config * Skip 'id' attribute to be explicitly deleted in TestCase * Remove unused class variables in extended\_volumes * libvirt: remove volume\_drivers config param * Make conductor use instance object * VMware: add VirtualVmxnet3 to the supported network types * Fix test cases still use v3 prefix * Typo in oslo.i18n url * Fix docs build break * Updated from global requirements * Fix typo in nova/tests/unit/test\_availability\_zones.py * mock out build\_instances/rebuild\_instance when not used * Make ComputeAPIIpFilterTestCase a NoDBTestCase * Remove vol\_get\_usage\_by\_time from conductor api/rpcapi * default tox cmd should also run 'functional' target * VMware: Consume the oslo.vmware objects * Release bdm constraint source and dest type * VMware: save instance object creation in test\_vmops * libvirt: Delay only STOPPED event for Xen domain * Remove invalid hacking recheck for baremetal driver * Adds Not Null constraint to KeyPair name * Fix orphaned ports on build failure * VMware: Fix volume relocate during detach 2015.1.0b3 ---------- * Fix AggregateCoreFilter return incorrect value * Remove comments on API policy, remove core param * Add policy check for consoles * Sync from oslo-incubator * Rename and move the v2.1 api policy into separated files * Disable oslo\_messaging debug logging * heal\_instance\_info\_cache\_interval help clearer * Forbid booting of QCOW2 images with virtual\_size > root\_gb * don't use oslo.messaging in mock * BDM: Avoiding saving if there were no changes * Tidy up sentinel comparison in pop\_instance\_event * Tidy up dict.setdefault() usage in prepare\_for\_instance\_event * Remove duplicate InvalidBDMVolumeNotBootable * libvirt: make default value of numa cell memory to 0 when not defined * Add the instance update calls from Compute * Save bdm.connection\_info before calling volume\_api.attach\_volume * Add InstanceMapping object * Add CellMapping object * load ram\_allocation\_ratio when asked * Remove pci\_device.update\_device helper function * Tox: reduce complexity level to 35 * Remove db layer hard-code permission checks for service\_get\_all * Expand help message on some quota config options * Test fixture for the api database * remove duplicate calls to cfg.get() * Remove context from remotable call signature * Actually stop passing context to remotable methods * Remove usage of remotable context parameter in service, tag, vif * Remove usage of remotable context parameter in security\_group\* * Remove usage of remotable context parameter in pci\_device, quotas * let fake virt track resources * doc: fix a docstext formatting * Update unique constraint of compute\_nodes with deleted column * Modify filters to get instance info from HostState * Add the RPC calls for instance updates * Implement instance update logic in Scheduler * Log exception from deallocate\_port\_for\_instance for triage * Remove usage of remotable context parameter in migration, network * Remove usage of remotable context parameter in compute\_node, keypair * Remove usage of remotable context parameter in instance\* objects * Remove usage of remotable context parameter in fixed\_ip, flavor, floating\_ip * Remove usage of remotable context parameter in ec2 object * libvirt: partial fix for live-migration with config drive * Added assertJsonEqual method to TestCase class * VMware: Improve reporting of path test failures * libvirt test\_cpu\_info method fixed random PYTHONHASHSEED compatibility * Remove usage of remotable context parameter in bandwidth, block\_device * Remove usage of remotable context parameter in agent, aggregate * Remove db layer hard-code permission checks for pci * Objects: use setattr rather than dict syntax in remotable * Split out NovaTimestampObject * libvirt: Resize down an instance booted from a volume * add neutron api NotImplemented test cases for Network V2.1 * Stop using exception.message * Remove unused oslo logging fixture * libvirt: don't allow to resize down the default ephemeral disk * Add api microvesion unit test case for wsgi.action * Change some comments for instance param * Hyper-V: Adds VMOps unit tests (part 2) * Add get\_api\_session to db api * Use the proper database engine for nova-manage * Add support for multiple database engines * Virt: update fake driver to use UUID as lookup key * VMware: use instance UUID as instance name * VMware: update test\_vm\_util to use instance object * Handle exception when doing detach\_interface * Variable 'name' already declared in 'for' loop * Handle RESIZE\_PREP status when nova compute do init\_instance * Move policy enforcement into REST API layer for v2.1 api volume\_attachment * Remove the elevated context when get network * Handles exception when unsupported virt-type given * Fix confusing log output in nova/nova/network/linux\_net.py * Workaround for race condition in libvirt * remove unneeded teardown related code * Fixed archiving of deleted records * libvirt: Remove minidom usage in driver.py * Stop spamming logs when creating context * Fix ComputeNode backport for Service.obj\_make\_compatible * Break out the child version calculation logic from obj\_make\_compatible() * Fix PciDeviceDBApiTestCase with referential constraint checking * Verify all quotas before updating the db * Add shadow table empty verification * Add @wrap\_exception() for 3 compute functions * Remove FK on service\_id and make service\_id nullable * Using Instance object instead of db call * Revert "Removed useless method \_get\_default\_deleted\_value." * Remove db layer hard-code permission checks for network\_count\_reserved\_ips * implement user negative testing for flavor manage * refactor policy fixtures to allow use of real policy * libvirt: remove unnecessary flavor parameter * Compute: no longer need to pass flavor to the spawn method * Update some ResizeClaimTestCase tests * Move InstanceClaimTestCase.test\_claim\_and\_audit * Handle exception when attaching interface failed * Deprecate Nova in tree EC2 APIs * cells: don't pass context to instance.save in instance\_update\_from\_api * ensure DatabaseFixture removes db on cleanup * objects: introduce numa topology limits objects * Add a test that validates object backports and child object versions * Fix ArchiveTestCase on MySQL due to differing exceptions * VMware: fix VM rescue problem with VNC console * VMware: Deprecation warning - map one nova-compute to one VC cluster * compute: don't trace on InstanceNotFound in reverts\_task\_state * Fix backporting objects with sub-objects that can look falsey * neutron: deprecate 'allow\_duplicate\_networks' config option * Fix Juno nodes checking service.compute\_node * Fix typo in \_live\_migration\_cleanup\_flags method * libvirt: add in missing translation for exception * Move policy enforcement into REST API layer for v2.1 extended\_volumes * Remove useless policy rules for v2.1 api which removed/disabled * Remove db layer hard-code permission checks for service\_get\_all\_by\_\* * Fix infinite recursion caused by unnecessary stub * Websocket Proxy should verify Origin header * Improve 'attach interface' exception handling * Remove unused method \_make\_stub\_method * Remove useless get\_one() method in SG API * Fix up join() and leave() methods of servicegroup * network: Fix another IPv6 test for Mac * Add InstanceList.get\_all method * Use session with neutronclient * Pass correct context to get\_by\_compute\_node() * Revert "Allow force-delete irrespective of VM task\_state" * Fix kwargs['instance'] KeyError in @reverts\_task\_state decorator * Fix copy configdrive during live-migration on HyperV * Move V2 sample files to respective directory * V2 tests -Reuse server post req/resp sample file * V2.1 tests - Reuse server post req/resp sample file * Remove an unused config import in nova-compute * Raise HTTPNotFound for Port/NetworkNotFound * neutronv2: only create client once when adding/removing fixed IPs * Stop stacktracing in \_get\_filter\_uuid * libvirt: Fix live migration failure cleanup on ceph * Sync with latest oslo-incubator * Better logging of resources * Preserve preexisting ports on server delete * Move oslo.vmware into test-requirements.txt * Remove db layer hard-code permission checks for network\_get\_by\_uuid * Refactor \_regex\_instance\_filter for testing * Add instance\_mappings table to api database * ec2: clean up in test\_cinder\_cloud * Remove unused method queue\_get\_for * Remove make\_ip\_dict method which is not used * Remove unused method delete\_subnet * Remove unused method disable\_vlan * Remove unused method get\_request\_extensions * Fix wrong log output in nova/nova/tests/unit/fake\_volume.py * Updated from global requirements * Remove db layer hard-code permission checks for network\_get\_by\_cidr * Add cell\_mappings table to api database * Ban passing contexts to remotable methods * Fix a remaining case of passing context to a remotable in scheduler * Fix several cases of passing context to quota-related remotable methods * Fix some cases of passing context to remotables with security groups * Replace RPC topic-based service queries with binary-based in cells * Replace RPC topic-based service queries with binary-based in scheduler * Fix some straggling uses of passing context to remotable methods in tests * VMware: remove code invoking deprecation warning * Fix typo in nova/scheduler/filters/utils.py * Remove db layer hard-code permission checks for network\_delete\_safe * Don't add exception instance in LOG.exception * Move policy enforcement into REST API layer for v2.1 servers * Move policy enforcement into REST API layer for v2.1 api attach\_interfaces * Remove db layer hard-code permission checks for flavor-manager * Remove db layer hard-code permission checks for service\_delete/service\_get * Remove db layer hard-code permission checks for service\_update * Fix 'nova show' return incorrect mac info * Use controller method in all admin actions tests * Remove db layer hard-code permission checks for flavor\_access * Modify filters so they can look to HostState * let us specify when samples tests need admin privs * Updated from global requirements * Remove cases of passing context to remotable methods in Flavor * Remove cases of passing context to remotable methods in Instance * Fix up PciDevice remotable context usage * libvirt: add comment for vifs\_already\_plugged=True in finish\_migration * neutron: check for same host in \_update\_port\_binding\_for\_instance * Move policy enforcement into REST API layer for v2.1 security groups * Keep instance state if lvm backend not impl * Replace RPC topic-based service queries in nova/api with binary-based * Remove service\_get\_by\_args from the DB API * Remove usage of db.service\_get\_by\_args * Make unit tests inherit from test.NoDBTestCase * Fixed incorrect behavior of method sqlalchemy.api.\_check\_instance\_exists * Remove db layer hard-code permission checks for migrations\_get\* * vmware: support both hard and soft reboot * xenapi: Fix session tests leaking state * libvirt: Cleanup snapshot tests * Change instance disappeared during destroy from Warning to Info * Replace instance flavor delete hacks with proper usage * Add delattr support to base object * Use flavor stored with instance in vmware driver * Use flavor stored with instance in ironic driver * Modify AggregateAPI methods to call the Scheduler client methods * Create Scheduler client methods for aggregates * Add update and delete \_aggregate() method to the Scheduler RPC API * Instantiate aggregates information when HostManager is starting * Add equivalence operators to NUMACell and NUMAPagesTopology * Adds x509 certificate keypair support * Better round trip for RequestContext<->Dict conversion * Make scheduler client reporting use ComputeNode object * Prevent update of ReadOnlyDict * Copy the default value for field * neutron: add logging during nw info\_cache refresh when port is gone * Add info for Standalone EC2 API to cut access to Nova DB * VMware: Fix disk UUID in instance's extra config * Update config generator to use new style list\_opts discovery * Avoid KeyError Exception in extract\_flavor() * Imported Translations from Transifex * Updated from global requirements * Move policy enforcement into REST API layer for v2.1 create backup * Truncate encoded instance sys meta to 255 or less * Adds keypair type in nova-api * Switch nova.virt.vmwareapi.\* to instance dot notation * Allow disabling the evacuate cleanup mechanism in compute manager * Change queries for network services to use binary instead of topic * Add Service.get\_by\_host\_and\_binary and ServiceList.get\_by\_binary * Compute: update config drive settings on instance * Fix docstrings for assorted methods * Config driver: update help text for force\_config\_drive * libvirt-numa.rst: trivial spelling fixes * Ensure bridge deleted with brctl delbr * create noauth2 * enhance flavor manage functional tests * Add API Response class for more complex testing * Add more log info around 'not found' error * Remove extended addresses from V2.1 update & rebuild * Switch nova.virt.hyperv.\* to instance dot notation * Revert instance task\_state when compareCPU fails * Libvirt: Fix error message when unable to preallocate image * Switch nova.virt.libvirt.\* to instance dot notation * Add nova-manage commands for the new api database * Add second migrate\_repo for cells v2 database migrations * Updated from global requirements * Force LANGUAGE=en\_US in test runs * neutron: consolidate common unbind ports logic * Sync oslo policy change * Remove compute\_node field from service\_get\_by\_compute\_host * Fix how the Service object is loading the compute\_node field * Remove compute\_node from service\_get\_by\_cn Cells API method * Remove want\_objects kwarg from nova.api.openstack.common.get\_instance * Switch nova.virt.\* to use the object dot notation * add string representation for context * Remove db layer hard-code permission checks for migration\_create/update * Disables pci plugin for v2.1 & microversions * Fix logic for checking if az can be updated * Add obj\_alternate\_context() helper * libvirt: remove libvirt import from tests so we only use fakelibvirt * capture stdout and logging for OSAPIfixture test * remove unused \_authorize\_context from security\_group\_default\_rules.py * Switch nova.context to actually use oslo.context * Fixed incorrect indent of test\_config\_read\_only\_disk * Fixed incorrect assertion in test\_db\_api * Remove TranslationFixture * Replace fanout to False for CastAsCall fixture * Make ConsoleAuthTokensExtensionTestV21 inherit from test.NoDBTestCase * Remove db layer hard-code permission checks for task\_log\_get\* * Remove db layer hard-code permission checks for task\_log\_begin/end\_task * Api: remove unusefull compute api from cells * Remove db layer hard-code permission checks for service\_create * Imported Translations from Transifex * Change v3 import to v21 in 2.1 api unit test * Fix NotImplementedError handling in interfaces API * Support specifing multiple values for aggregate keys * Remove attach/detach/swap from V2.1 extended\_volumes * Make metadata cache time configurable * Remove db layer hard-code permission checks for fixed\_ip\_disassociate\_all\_by\_timeout * Move policy enforcement into REST API layer for v2.1 api assisted\_volume\_snapshots * Fix tiny typo in api microversions doc * Fixes Hyper-V: configdrive is not migrated to destination * ensure that ram is >= 1 in random flavor creation * Fixes 500 error message and traces when no free ip is left * db: Add index on fixed\_ips updated\_at * Display host chosen for instance by scheduler * PYTHONHASHSEED bug fix in test\_utils * fixed tests in test\_vm\_util to work with random PYTHONHASHSEED * Add microversion allocation on devref * Remove OS-EXT-IPS attributes from V2.1 server ips * Remove 'locked\_by' from V2.1 extended server status * Remove 'id' from V2.1 update quota\_set resp * Fix bad exception logging * VMware: Ensure compute\_node.hypervisor\_hostname is unique * Inherit exceptions correctly * Remove en\_US translation * Move policy enforcement into REST API layer for v2.1 cloudpipe * Move policy enforcement into REST API layer for v2.1 security\_group\_default\_rules * linux\_net.metadata\_accept(): IPv6 support * Enforce in REST API layer on v2.1 api remote consoles * Remove accessips attribute from V2.1 POST server resp * Move policy enforcement into REST API layer for v2.1 floating\_ip\_dns * Fix bad interaction between @wsgi.extends and @wsgi.api\_version * Enforce in REST API layer on v2.1 shelve api * Move policy enforcement into REST API layer for v2.1 api evacuate * Add manual version comparison to microversion devref document * Switch to uuidutils from oslo\_utils library * Add developer documentation for writing V2.1 API plugins * Convert nova.compute.\* to use instance dot notation * Better power\_state logging in \_sync\_instance\_power\_state * Use instance objects in fping/instance\_actions/server\_metadata * Fix misspellings words in nova * Fix KeyErrors from incorrectly formatted NovaExceptions in unit tests * Move policy enforcement into REST API layer for v2.1 floating ips * Switch nova.network.\* to use instance dot notation * Revert : Switch off oslo.\* namespace check temporarily * Move policy enforcement into REST API layer for v2.1 networks related * Remove db layer hard-code permission checks for v2.1 agents * Move v2.1 virtual\_interfaces api policy enforcement into REST API layer * fix 'Empty module name' exception attaching volume * Use flavor stored with instance in libvirt driver * Handle 404 in os-baremetal-nodes GET * API: Change the API cpu\_info to be meaning ful * Updated from global requirements * Make compute unit tests inherit from test.NoDBTestCase * Request objects in security\_groups api extensions * Reuse is\_int\_like from oslo\_utils * VMware: fix network connectivity problems * Move policy enforcement into REST API layer for v2.1 admin password * Fix the order of base classes in migrations test cases * Libvirt: Allow missing volumes during delete * Move policy enforcement into REST API layer for v2.1 server\_diagnostics * Fix wrong log when reschedule is disabled * Replace select-for-update in fixed\_ip\_associate * Move policy enforcement into REST API layer for v2.1 fping * Consolidate use api request version header * Copy image from source host when ImageNotFound * VMware: update get\_available\_datastores to only use clusters * Add useful debug logging when policy checks fail * Remove unused conductor methods * Call notify\_usage\_exists() without conductor proxying * Updated from global requirements * Make notifications use BandwidthUsageList object * libvirt: Fix migration when image doesn't exist * Fix a typo of devref document for api\_plugin * console: add unit tests for baseproxy * libvirt: log host capabilities on startup * Allow configuring proxy\_host and proxy\_port in nova.conf * Fixes novncproxy logging.setup() * Add descriptions to some assertBooleans * Remove update\_store usage * Enforce policy checking in REST API layer for v2.1 server\_password * Add methods that convert any volume BDM to driver format * Split scheduler weight test on ram * Split scheduler weight test on metrics * Split scheduler weight test on ioops * Fix 500 when deleting a not existing ec2 security group * Remove backwards compat oslo.messaging entries from setup.cfg * Change utils.vpn\_ping() to return a Boolean * Enable retry when there are multiple force hosts/nodes * Use oslo.log * switch LOG.audit to LOG.info * Add catch FlavorExtraSpecsNotFound in V2 API * tests: remove duplicate keys from dictionary * Add blkid rootwrap filter * Fix idempotency of migration 269 * objects: fix issue in test cases for instance numa * VMware: Accept image and block device mappings * nova flavor manage functional test * extract API fixture * Fix V2 hide server address functional tests * Remove unused touch command filter * Add a test for block\_device\_make\_list\_from\_dicts * Move policy enforcement into REST API layer for v2.1 floating\_ip\_pools * libvirt: address test comments for zfcp volume driver changes * libvirt: Adjust Nova to support FCP on System z systems * Fix BM nodes extension to deal with missing node properties * VMware: update the support matrix for security groups * Ignore 'dynamic' addr flag on gateway initialization * Adds xend to rootwrap.d/compute.filters * Create volume in the same availability zone as instance * Wrap IPv6 address in square brackets for scp/rsync * fake: fix public API signatures to match virt driver * Added retries in 'network\_set\_host' function * Use NoDBTestCase instead of TestCase * Change microversion header name * VMware: ensure that resize treats CPU limits correctly * Compute: pass flavor object to migrate\_disk\_and\_power\_off * extract method from fc volume discovery * Set instance NUMA topology on HostState * Support live-migrate of instances in PAUSED state * Fix DB access by FormatMappingTestCase * api: report progress when instance is migrating * libvirt: proper monitoring of live migration progress * libvirt: using instance like object * libvirt: convert tests from mox to mock * XenAPI: Fix data loss on resize up * Delete instance files from dest host in revert-resize * Pass the capabilities to ironic node instance\_info * No need to re-fetch instance with sysmeta * Switch nova.api.\* to use instance dot notation * Objectify calls to service\_get\_by\_compute\_host * Refactor how to remove compute nodes when service is deleted * Move policy enforcement into REST API layer for v2.1 admin actions * Contrail VIF Driver changes for Nova-Compute * libvirt : Fix slightly misleading parameter name, validate param * libvirt: cleanup setattr usage in test\_host * libvirt: add TODOs for removing libvirt attribute stubs * Expand try/except for get\_machine\_ips * Switch nova.compute.manager to use instance dot notation * libvirt: stub out VIR\_CONNECT\_LIST\_DOMAINS\_INACTIVE * libvirt: stub out VIR\_SECRET\_USAGE\_TYPE\_ISCSI for older libvirt * Change calls to service information for Hypervisors API * Add handling for offlined CPUs to the nova libvirt driver * Make compute API create() use BDM objects * Remove redundant tearDown from ArchiveTestCase * libvirt: switch LibvirtConnTestCase back to NoDBTestCase * Replace usage of LazyPluggable by stevedore driver * Don't mock time.sleep with None * Libvirt: Support ovs plug in vhostuser vif * Removed duplicate key from dictionary * Fixes Attribute Error when trying to spawn instance from vhd on HyperV * Remove computenode relationship on service\_get * Remove nested service from DB API compute\_nodes * libvirt: Use XPath instead of loop in \_get\_interfaces * fixed tests to work with random PYTHONHASHSEED * Imported Translations from Transifex * Make the method \_op\_method() public * Quiesce boot from volume instances during live snapshot * Fix "Host Aggregate" section of the Nova Developer Guide * network: Fix another IPv6 test for Mac * Pre-load default filters during scheduler initialization * Libvirt: Gracefully Handle Destroy Error For LXC * libvirt: stub VIR\_CONNECT\_LIST\_DOMAINS\_ACTIVE for older libvirts * Fix VNC access, when reverse DNS lookups fail * Remove now useless requirements wsgiref * Add JSON schema for v2.1 add network API * Handle MessagingException in unshelving instance * Compute: make use of dot notation for console access * Compute: update exception handling for spice console * Add missing api samples for floating-ips api(v2) * Move v2.1 rescue api policy enforcement into REST API layer * Move policy enforcement into REST API layer for v2.1 ips * Move policy enforcement into REST API layer for v2.1 multinic * Move policy enforcement into REST API layer for v2.1 server\_metadata * VMware: fix resize of ephemeral disks * VMware: add in a utility method for detaching devices * VMware: address instance resize problems * Fixes logic in compute\_node\_statistics * Cover ListOfObjectField for relationship test * Replace oslo-incubator with oslo\_context * Libvirt: add in unit tests for driver capabilities * Ironic: add in unit tests for driver capabilities * Tests: Don't require binding to port 4444 * libvirt: fix overly strict CPU model comparison in live migration * Libvirt: vcpu\_model support * IP filtering is not accurate when used with limit * Change how the API is getting a list of compute nodes * Change how Cells are getting the list of compute nodes * Change how HostManager is calling the service information * Move scheduler.host\_manager to use ComputeNode object * patch out nova libvirt driver event thread in tests * Change outer to inner join in fixed IP DB API func * Small cleanup in pci\_device\_update * Remove useless NotFound exception catching for v2/v2.1 fping * V2.1 cleanup: Use concrete NotFound exception instead of generic * Drop deprecated namespace for oslo.rootwrap * Add vcpu\_model to instance object * Pass instance primitive to instance\_update\_at\_top() * Adds infrastructure for microversioned api samples * Libvirt: Support for generic vhostuser vif * Pull singleton config check cruft out of SG API * hacking: Got rid of unnecessary TODO * Remove unused function in test * Remove unused function * hardware: fix reported host mempages in numa cell * objects: fix numa obj relationships * objects: remove default values for numa cell * Move policy enforcement into REST API layer for v2.1 suspend/resume server * Move policy enforcement into REST API layer for v2.1 api console-output * Move policy enforcement into REST API layer for v2.1 deferred\_delete * Move migrate-server policy enforce into REST API * Add API schema for v2.1 tenant networks API * Move policy enforcement into REST API layer for v2.1 lock server * Libvirt: cleanup rescue lvm when unrescue * Sync simple\_tenant\_usage V2.1 exception with V2 and add test case * IP filtering can include duplicate instances * Add recursive flag to obj\_reset\_changes() * Compute: use dot convension for \_poll\_rescued\_instances * Add tests for nova-manage vm list * libvirt: add libvirt/parallels to hypervisor support matrix * Compute: update reboot\_instance to use dot instance notation * Fix incorrect compute api config indentation * libvirt: fix emulator thread pinning when doing strict CPU pinning * libvirt: rewrite NUMA topology generator to be more flexible * libvirt: Fix logically inconsistent host NUMA topology * libvirt: utils canonicalize now the image architecture property * A couple of grammar fixes in help strings * Implement api samples test for os-baremetal-nodes Part 2 * Compute: use consistant instance dot notation * Log warning if CONF.my\_ip is not found on system * libvirt: remove \_destroy\_instance\_files shim * virt: Fix interaction between disk API tests * network: Fix IPv6 tests for Mac * Use dot notation on instance object fields in \_delete\_instance * libvirt: memnodes shuold be set to a list instead of None * Cleanup add\_fixed\_ip\_to\_instance tests * Cleanup test\_instance\_dns * Fix detach\_sriov\_ports to get context to be able to get image metadata * Implement api samples test for os-baremetal-nodes * Fix description of parameters in nova functions * Stop making the database migration backend lazy pluggable * Updated from global requirements * Libvirt: Created Nova driver for Quobyte * Adds keypair type database migration * libvirt: Enable serial\_console feature for system z * Make tests use sha256 as openssl default digest algorithm * Improved performance of db method network\_in\_use\_on\_host * Replace select-for-update in floating\_ip\_allocate\_address * Move policy enforcement into REST API layer for v2.1 pause server * Libvirt: update log message * Update usage of exception MigrationError * Extract preserve ephemeral on rebuild from servers plugin * VMware: update get\_vm\_resize\_spec interface * VMware: Enable spawn from OVA image * Raise bad request for missing 'label' in tenant network * CWD is incorrectly set if exceptions are thrown * VMware: add disk device information to VmdkInfo * Use controller methods directly in test\_rescue * Call controller methods directly in test\_multinic * Add version specific test cases for microverison * Change v2.1 API status to CURRENT * Remove wsgi\_app usage from test\_server\_actions * Change some v2.1 extension names to v2 * Add VirtCPUModel nova objects * Add enum fieldtype field * Convert v2.1 extension\_info to show V2 API extension list * Remove compability check for ratelimit\_v3 * Keep instance state if ssh failed during migration * Cleanup and removal of unused code in scheduler unit tests * Fix incorrect use of mock in scheduler test * Make test re-use HTTPRequest part 5 * Refactor test\_filter\_scheduler use of fakes * consoliate set\_availability\_zones usage * Warn about zookeeper service group driver usage * Updated from global requirements * Update matrix for kvm on ppc64 * Switch off oslo.\* namespace check temporarily * Switch to using oslo\_\* instead of oslo.\* * Adjust object\_compat wrapper order * Add more tests for tenant network API * Sync with oslo-incubator * Make compute use objects usage 'best practice' * Enable BIOS bootmenu on AMI-based images 2015.1.0b2 ---------- * libvirt: fix console device for system z for log file * Fix references to non-existent "pause" section * libvirt: generate proper config for PCS containers * libvirt: add ability to add file and block based filesystem * libvirt: add ploop disks format support * Fix improper use of Stevedore * libvirt: Fail when live block migrating instance with volumes * Add notification for suspend * Add API schema for v2.1 networks API * Remove v1.1 from v2.1 extension description * Add \_LW for missing translations * Treat LOG.warning and LOG.warn same * Add JSON schema for v2.1 'quota\_class' API * Add missing setup.cfg entry for os-user-data plugin * Add api\_version parameter for API sample test base class * Add suggestion to dev docs for debugging odd test failures * Add max\_concurrent\_builds limit configuration * Fixes Hyper-V configdrive network injection issue * Update Power State after deleting instance * Remove temporary power state variables * Make obj\_set\_defaults() more useful * Adds devref for API Microversions * PCI NUMA filtering * Ensure publisher\_id is set correctly in notifications * libvirt: Use XPath instead of loop in \_get\_all\_block\_devices * libvirt: Use XPath instead of loop in get\_instance\_diagnostics * fix typo in rpcapi docstring * Fix conductor servicegroup joining when zk driver is used * Do not treat empty key\_name as None * Failed to discovery when iscsi multipath and CHAP both enabled * Fix network tests response code checking * Remove unused error from v2.1 create server * Fix corrupting the object repository with test instance objects * Change cell\_type values in nova-manage * Fix bad mocking of methods on Instance * Updated from global requirements * VMware: fix resume\_state\_on\_host\_boot * Fix cells rpc connection leak * Remove redundant assert of mock volume save call * Don't create block device mappings in the API cell * Add formal doc recording hypervisor feature capability matrix * Ironic: Adds config drive support * libvirt-xen: Fix block device prefix and disk bus * libvirt-xen: don't request features ACPI or APIC with PV guest * Make EC2 compatible with current AWS CLI * libvirt: remove pointless loop after live migration finishes * Remove useless argparse requirement * add asserts of DriverBlockDevice save call parameters * fix call of DriverVolumeBlockDevice save in swap\_volume * Use a workarounds group option to disable live snaphots * libvirt : Add support for --interface option in iscsiadm * Cells: Fix service\_get\_by\_compute\_host * Expand instances project\_id index to cover deleted as well * Remove unused conductor parameter from get\_host\_availability\_zone() * Fixes Hyper-V instance snapshot * Add more status when do \_poll\_rebooting\_instances * Adds barbican keymgr wrapper * libvirt: avoid setting the memnodes where when it's not a supported option * Make code compatible with v4 auth and workaround webob bug * Fix likely undesired use of redirection * Save bdm in swap\_volume * doc: document manual testing procedure for serial-console * nova net-delete network is not informative enough * Improvement in 'network\_set\_host' function * Fix typo in nova/virt/disk/vfs/localfs.py * Fix expected error in V2.1 add network API * libvirt: fix failure when attaching volume to iso instance * Add log message to is\_luks function * Access migration fields like an object in finish\_revert\_resize * Remove unused migration parameter from \_cleanup\_stored\_instance\_types * object: serialize set to list * Fix leaking exceptions from scheduler utils * Adds tests for Hyper-V LiveMigration utils * Adds tests for Hyper-V VHD utils * libvirt: fix missing block device mapping parameter * libvirt: add QEMU built-in iSCSI initiator support * Add update\_or\_create flag to BDM objects create() * Typos fixed * Remove unused method from test\_metadata * libvirt: Support iSCSI live migration for different iSCSI target * Add JSON schema for "associate\_host" API * Add migrate\_flavor\_data to nova-manage * Adds logging to ComputeCapabilitiesFilter failures * Add flavor fields to Instance object * Fix up some instance object creation issues in tests * Fix misspellings in hardware.py * VMware: add in utility methods for copying and deleting disks * Apply v2.1 API to href of version API * Revert "Raise if sec-groups and port id are provided on boot" * libvirt: always pass image\_meta when getting guest XML * libvirt: assume image\_meta is non-None in blockinfo module * libvirt: always pass image meta when getting disk info from bdm * Calls to superclass' \_\_init\_\_ function is optional * Enforce DB model matches results of DB migrations * Add missing foreign keys for sqlite * Fix an indentation in server group api samples template * Allow instances to attach to shared external nets * Handle ironic\_client non-existent case * Cells: Record initial database split in devref * Use a workarounds option to disable rootwrap * virt: Fix images test interaction * libvirt: add parallels virt\_type * Convert nova-manage list to use Instance objects * Create a 'workarounds' config group * Updated from global requirements * don't use exec cat when we can use read * don't assert\_called\_once\_with with a real time * Network: correct VMware DVS port group name lookup * Refactor ComputeCapabilitiesFilter as bugfix preparation * libvirt: Set SCSI as the default cdrom bus on System z * Adds common policy authorizer helper functions for Nova V2.1 API * Adds skip\_policy\_check flag to Compute/Network/SecurityGroup API * Make test re-use HTTPRequest part 4 * libvirt: update uri\_whitelist in fakelibvirt.Connection * Revert "Adds keypair type database migration" * Support for ext4 as default filesystem for ephemeral disks * Raise NotFound if attach interface with invalid net id or port id * Change default value of multi\_instance\_display\_name\_template * Check for LUKS device via 'isLuks' subcommand * disk: use new vfs method and option to extend * Replace select-for-update in fixed\_ip\_associate\_pool * Remove unused content\_type\_params() * libvirt: always pass image meta when getting disk mapping * libvirt: always pass image meta when getting disk info * Add API schema for v2.1 server reboot actions * objects: fix typo in changelog of compute\_node * Add API schema for v2.1 'removeFloatingIp' * Add API schema for v2.1 'addFloatingIp' * Add parameter\_types.ip\_address for cleanup * Reply with a meaningful exception when ports are over the quota limit * Adds keypair type database migration * A minor change of CamelCase parameter * Imported Translations from Transifex * Remove N331 hacking rules * GET details REST API next link missing 'details' * Add missing indexes in SQLite and PostgreSQL * libvirt: cleanup warning log formatting in \_set\_host\_enabled * Revert temporary hack to monkey patch the fake rpc timeout * Remove H238 comment from tox.ini * libvirt: use image\_meta when looking up default device names * Fix bdm transformation for volume backed servers * Removed host\_id check in ServersController.update * Fix policy validation in JSONSchema * Adds assert\_has\_no\_errors check * Removed useless method \_get\_default\_deleted\_value * virt: make tests pass instance object to get\_instance\_disk\_info * libvirt: rename conn variable in LibvirtConnTestCase * Raise if sec-groups and port id are provided on boot * Begin using ironic's "AVAILABLE" state * Transform IPAddress to string when creating port * Break base service group driver class out from API * Remove unused \_get\_ip\_and\_port() * Updated from global requirements * Add method for getting the CPU pinning constraint * libvirt: Consider CPU pinning when booting * Make ec2/cloud.py use get\_instance\_availability\_zone() helper * HACKING.rst: Update the location of unit tests' README.rst * Remove unused method log\_db\_contents * Make use of controller method in test\_flavor\_manage * libvirt: Use XPath instead of loop in \_get\_disk\_xml * Avoid bdms db call when cleaning deleted instance * Ignore warnings from contextlib.nested * Cleanup bad JSON files * Switch to oslo.vmware API for reading and writing files * Make test re-use HTTPRequest part 1 * Make test re-use HTTPRequest part 2 * Make test re-use HTTPRequest part 3 * Remove HTTPRequestV3 in scheduler\_hints test * Hyper-V: Adds instance missing metrics enabling * ephemeral file names should reflect fs type and mkfs command * Reschedule queries to nova-scheduler after a timeout occurs * libvirt: remove use of utils.instance\_sys\_meta * libvirt: remove use of fake\_instance.fake\_instance\_obj * Remove redundant catch for InstanceNotFound * Add to\_dict() method to PciDevicePool object * libvirt: rename self.conn in LibvirtVolume{Snapshot||Usage}TestCase * libvirt: rename self.libvirtconnection in LibvirtDriverTestCase * libvirt: convert LibvirtConnTestCase to use fakelibvirt fixture * Remove unused network rpcapi calls * Added hacking rule for assertEqual(a in b, True/False) * Add API schema for v2.1 createImage API * Fix errors in string formatting operations * libvirt: Create correct BDM object type for conn info update * Fixes undocumented commands * Make \_get\_instance\_block\_device\_info preserve root\_device\_name * Convert tests to NoDBTestCase * Fixes Hyper-V should log a clear error message * Provide compatibliity for db.compute\_node\_statistics * Update network resource when shelve offload instance * Update network resource when rescheduling instance * libvirt: Expanded test libvirt driver * Adds "file" disk driver support to Xen libvirt driver * Virt: remove unused 'host' parameter from get\_host\_uptime * Don't translate logs in tests * Don't translate exceptions in tests * disk/vfs: introduce new option to setup * disk/vfs: introduce new method get\_image\_fs * initialize objects with context in block device * Remove unused controller instance in test\_config\_drive * Fix v2.1 os-tenant-networks/networks API * Use controller methods in test\_floating\_ips * Cleanup in test\_admin\_actions * Calling controller methods directly in test\_snapshots * Add checking changePassword None in \_action\_change\_password(v2) * Add more exceptions handle when change server password (v2) * Share admin\_password unit test between V2 & V2.1 * Share server\_actions unit test between V2 & V2.1 * Fix server\_groups schema on v2.1 API * Implement a safe copy.copy() operation for Nova models * clean up extension loading logging * Hyper-V: Fixes wrong hypervisor\_version * console: introduce baseproxy and update consoles cmd * libvirt: update get\_capabilities to Host class * libvirt: add get\_connection doc string in Host class * Enable check for H238 rule * Call ComputeNode instead of Service for getting the nodes * Remove mox dependency * Fix JSONFilter docs * libvirt: move \_get\_hypervisor\_\* functions to Host class * libvirt: don't turn time.sleep into a no-op in tests * Adds Hyper-V generation 2 VMs implementation * VMware: ensure that correct disk details are returned * Improve api-microversion hacking check * Add unit test for getting project quota remains * Fix py27 gate failure - test\_create\_instance\_both\_bdm\_formats * Reduce complexity of the \_get\_guest\_config method * Cleanups in preparation of flavor attributes on Instance * Add flavor column to instance\_extra table * docs: document manual testing procedure for NUMA support * Add setup/cleanup\_instance\_network\_on\_host api for neutron/nova-network * Remove useless requirements * Make get\_best\_cpu\_topology consider NUMA requested CPU topology * Make libvirt driver expose sibling info in NUMA topology * VMware: snapshot as stream-optimized image * VMware: refactor utility functions related to VMDK * Get settable user quota maximum correctly * Add missing policy for nova in policy.json * Fix typo in nfs\_mount\_options option description * increase fake rpc POLL\_TIMEOUT to 0.1s * work around for until-failure * Fix inconsistencies in the ComputeNode object about service * Fixed incorrect initialization of availability zone tests * Revert "initialize objects with context in block device" * Fix wrong instructions for rebuilding API samples * Performance: leverage dict comprehension in PEP-0274 * Sync with latest oslo-incubator * initialize objects with context in VirtualInterface object tests * initialize objects with context in Tag object tests * initialize objects with context in Service object tests * Fixes Hyper-V boot from volume live migration * Expansion of matching XML strings logic * Xenapi: Attempt clean shutdown when deleting instance * don't use debug logs for object validation * create some unit of work logging in n-net * Make service-update work in API cells * oslo: remove useless modules * Do not use deprecated assertRaisesRegexp() * Honor shared storage on resize revert * Stub out instance action events in test\_compute\_mgr * Remove unused instance\_group\_metadata\_\* DB APIs * initialize objects with context in block device * Reduce the complexity of the create() method * speed up tests setting fake rpc polling timeout * xenapi: don't send terminating chunk on errors * Make service-delete work in API cells * Add version as request param for fake HTTPRequest * Fix OverQuota headroom KeyError in nova-network allocate\_fixed\_ip * Updated from global requirements * Make numa\_usage\_from\_instances consider CPU pinning * Cleanup in admin\_actions(v2.1api) * Cache ironic-client in ironic driver * tests: fix handling of TIMEOUT\_SCALING\_FACTOR * libvirt: remove/revert pointless logic for getVersion call * libvirt: move capabilities helper into host.py * libvirt: move domain list helpers into Host class * libvirt: move domain lookup helpers into Host class * Fix live migration RPC compatibility with older versions * Added \_get\_volume\_driver method in libvirt driver * fix wrong file path in docstring of hacking.checks * Make ec2 auth support v4 signature format * VMware: driver not handling port other than 443 * libvirt: use XPath in \_get\_serial\_ports\_from\_instance * Remove non existent rule N327 from HACKING.rst * Replace Hacking N315 with H105 * Enable W292 * Fix and re-gate on H306 * Move to hacking 0.10 * Fix nova-manage shell ipython * Make service-list output consistent * Updated from global requirements * Make V2.1 servers filtering (--tenant-id) same as V2 * Fix failure rebuilding instance after resize\_revert * Move WarningsFixture after DatabaseFixture so emit once * libvirt: Use arch.from\_host instead of platform.processor * Cells: Improve invalid hostname handling * Fix obj\_to\_primitive() expecting the dict interface methods * Remove unused XML\_WARNING variable in servers API * Guard against missing X-Instance-ID-Signature header * libvirt: not setting membacking when mempages are empty host topology * remove pylint source code annotations * Cleanup XML for api samples tests for Nova REST API * remove all traces of pylint testing infrastructure * initialize objects with context in SecurityGroupRule object tests * initialize objects with context in SecurityGroup object tests * initialize objects with context in base object tests * initialize objects with context in Migration object tests * initialize objects with context in KeyPair object tests * initialize objects with context in InstanceNUMATopology object tests * initialize objects with context in InstanceGroup object tests * initialize objects with context in InstanceFault object tests * Fix error message when no IP addresses available * Update WSGI SSL IPv6 test and SSL certificates * Catch more specific exception in \_get\_power\_state * Add WarningsFixture to only emit DeprecationWarning once in a test run * Maintain the creation order for vifs * Update docstring for wrap\_exception decorator * Doc: Adds python-tox to Ubuntu dependencies * Added hacking rule for assertTrue/False(A in B) * ironic: use instance object in driver.py * Add LibvirtGPFSVolumeDriver class * Make pagination work with deleted marker * Return 500 when unexpected exception raising when live migrate v2 * Remove no need LOG.exception on attach\_interface * Make LOG exception use format\_message * make IptablesRule debug calls meaningful * Switch to tempest-lib's packaged subunit-trace * Update eventlet API in libvirt driver * initialize objects with context in Instance object tests * initialize objects with context in Flavor object tests * initialize objects with context in FixedIP object tests * initialize objects with context in EC2 object tests * initialize objects with context in ComputeNode object tests * initialize objects with context in BlockDeviceMapping object tests * Nuke XML support from Nova REST API - Phase 3 * Return floating\_ip['fixed\_ip']['instance\_uuid'] from neutronv2 API * Add handling of BadRequest from Neutron * Add numa\_node to PCIDevice * Nuke XML support from Nova REST API - Phase 2 * Remove unused methods in nova utils * Use get\_my\_ipv4 from oslo.utils * Add cpu pinning check to numa\_fit\_instance\_to\_host * Add methods for calculating CPU pinning * Remove duplicated policy check at nova-network FlatManager * boot instance with same net-id for multiple --nic * XenAPI: Check image status before uploading data * XenAPI: Refactor message strings to remove locals * Cellsv2 devref addition * Nuke XML support from Nova REST API - Phase 1 * hardware: fix numa topology from image meta data * Support both list and dict for pci\_passthrough\_whitelist * libvirt: Add balloon period only if it is not None * Don't assume contents of values after aggregate\_update * Add API schema for server\_groups API * Remove unused function \_get\_flavor\_refs in flavor\_access extension * Make rebuild server schema 'additionalProperties' False * Tests with controller methods in test\_simple\_tenant\_usage * Convert wsgi call to controller in test\_virtual\_interfaces * Fix the comment of host index api * Imported Translations from Transifex * Use controller methods directly in test\_admin\_password * Drop workarounds for python2.6 * VMware: add in utility method for copying files * Remove lock files when remove libvirt images * Change log when set\_admin\_password failed * Catch InstanceInvalidState for start/stop action * Unshelving a volume backed instance doesn't work * Cache empty results in libvirt get\_volume\_connector * VMware: improve the performance of list\_instances * VMware: use power\_off\_instance instead of power\_off * VMware: refactor unit tests to use \_get\_info * libvirt: clean instance's directory when block migration fails * Remove unused scheduler driver methods * Reuse methods from netutils * VMware: make use of oslo.vmware logout * Remove unused directory nova/tests/unit/bundle * Prevent new code from using namespaced oslo imports * Move metadata filtering logic to utils.py * Make test\_consoles to directly call controller methods * Catch expected exceptions in remote console controller * Make direct call to controller test\_server\_password * Cleanup in test\_keypairs not to use wsgi\_app * Add ipv6 support to fake network models * Add host field when missing from compute\_node * Remove condition check for python2.6 in test\_glance * Cleanup in test\_availability\_zone not to use wsgi\_app * Call controller methods directly in test\_evacuate * VMware: Use datastore\_regex for disk stats * Add support for clean\_shutdown to resize in compute api layer * Fix Instance relationships in two objects * objects: remove NovaObjectDictCompat from Tag object * libvirt: introduce new helper for getting libvirt domain * libvirt: remove pointless \_get\_host\_uuid method * libvirt: pass Host object into firewall class * Cleanup in server group unit tests * Enhance EvacuateHostTestCase test cases * Call controller methods directly in test\_console\_output * Make direct call to controller in test\_console\_auth\_tokens * Populates retry info when unshelve offloaded instance * Catch NUMA related exceptions for create server v2.1 API * Remove unnecessary cleanup from ComputeAPITestCase * extract RPC setup into a fixture 2015.1.0b1 ---------- * Fix recent regression filling in flavor extra\_specs * remove detail method from LimitsController * Remove instance\_uuids from request\_spec * libvirt: remove unused get\_connection parameter from VIF driver * libvirt: sanitize use of mocking in test\_host.py * libvirt: convert test\_host.py to use FakeLibvirtFixture * libvirt: introduce a fixture for mocking out libvirt connections * Expand valid resource name character set * Set socket options in correct way * Make resize server schema 'additionalProperties' False * Make lock file use same function * Remove unused db.api.dnsdomain\_list * Remove unused db.api.instance\_get\_floating\_address * Remove unused db.api.aggregate\_host\_get\_by\_metadata\_key * Remove unused db.api.get\_ec2\_instance\_id\_by\_uuid * Join instances column before expecting it to exist * ec2: Change FormatMappingTestCase to NoDBTestCase * libvirt: enhance driver to configure guests based on hugepages * Fix ironic delete fails when flavor deleted * virt: pass instance object to block\_stats & get\_instance\_disk\_info * Add pci\_device\_pools to ComputeNode object * Handle invalid sort keys/dirs gracefully * hardware: determine whether a pagesize request is acceptable * objects: add method to verify requested hugepages * hardware: make get\_constraints to return topology for hugepages * hardware: add method to return requested memory page size * Cleanup in ResourceExtension ALIAS(v2.1api) * Replace use of handle\_schedule\_error() with set\_vm\_state\_and\_notify() * Fix set\_vm\_state\_and\_notify passing SQLA objects to send\_update() * Imported Translations from Transifex * Libvirt: use strutils.bool\_from\_string * Use constant for microversions header name (cleanup) * Adds support for versioned schema validation for microversions api * Add support for microversions API special version latest * Adds API microversion response headers * Use osapi\_compute worker for api v2 service * initialize objects with context in Aggregate object tests * Replace the rest of the non-object-using test\_compute tests * Fix using anyjson in fake\_notifier * Fix a bug in \_get\_instance\_nw\_info() where we re-query for sysmeta * Corrects link to API Reference on landing page * libvirt: disk\_bus setting is being lost when migration is reverted * libvirt: enable hyperv enlightenments for windows guests * libvirt: enhance to return avail free pages on cells * libvirt: move setting of guest features out into helper method * libvirt: add support for configuring hyperv enlightenments in XML * libvirt: change representation of guest features * libvirt: add support for hyperv timer source with windows guests * libvirt: move setting of clock out into helper method * libvirt: don't pass a module import into methods * Reject non existent mock assert calls * VMware: remove unused method in the fake module * Use oslo db concurrency to generate nova.conf.sample * Make instance\_get\_all\_\*() funtions support the smart extra.$foo columns * Make cells send Instance objects in build\_instance() * Fix spelling error in compute api * objects: fix changed fields for instance numa cell * Hyper-V: Fix volume attach issue caused by wrong constant name * Move test\_extension\_info from V3 dir to V2.1 * Make create server schema 'additionalProperties' False * Make update server schema 'additionalProperties' False * Updated from global requirements * Update devref with link to kilo priorities * Add vision of nova rest API policy improvement in devref * objects: remove dict compat support from all XXXList() objects * objects: stop conductor manager using dict field access on objects * objects: allow creation of objects without dict item compat * Remove duplicated constant DISK\_TYPE\_THIN * Hyper-V: Fix retrieving console logs on live migration * Remove FlavorExtraSpecsNotFound catch in v3 API * Add API schema for v2.1 block\_device\_mapping\_v1 * Add API schema for v2.1 block\_device\_mapping extension * VMware: Support volume hotplug * fix import of oslo.concurrency * libvirt: set guest cpu\_shares value as a multiple of guest vCPUs * Make objects use the generalized backport scheme * Fix base obj\_make\_compatible() handling ListOfObjectsField * VMware: make use of oslo.vmware pbm\_wsdl\_loc\_set * Replace stubs with mocks * Updated from global requirements * use more specific error messages in ec2 keystone auth * Add backoff to ebtables retry * Add support for clean\_shutdown to rescue in compute api layer * Add support for clean\_shutdown to shelve in compute api layer * Add support for clean\_shutdown to stop in compute api layer * Extend clean\_shutdown to the compute rpc layer * initialize objects with context in compute manager * Add obj\_as\_admin() to NovaPersistentObject * Bump major version of Scheduler RPC API to 4.0 * Use model\_query from oslo.db * Only check db/api.py for session in arguments * Small cleanup in db.sqlalchemy.api.action\_finish() * Inline \_instance\_extra\_get\_by\_instance\_uuid\_query * libvirt: Convert more tests to use instance objects * virt: Convert more tests to use instance objects * virt: delete unused 'interface\_stats' method * objects: fix version changelog in numa * libvirt: have \_get\_guest\_numa\_config return a named tuple * simplify database fixture to the features we use * extract the timeout setup as a fixture * Stop neutron.api relying on base neutron package * Move pci unit test from V3 to V2.1 * Clarify point of setting dirname in load\_standard\_extensions * Remove support for deprecated header X\_ROLE * move all conf overrides to conf\_fixture * move ServiceFixture and TranslationFixture * extract fixtures from nova.test to nova.test.fixtures * libvirt: Fix NUMA memnode assignments to host cells * libvirt: un-cruft \_get\_guest\_numa\_config * Make scheduler filters/weighers only load once * Refactor unit tests for scheduler weights * Fix cells RPC version 1.30 compatibility with dict-based Flavors * Objects: add in missing translation * network:Separate the translatable messages into different catalogs * objects: introduce numa pages topology as an object * check the configuration num\_vbd\_unplug\_retries * Doc: minor fixes to unit testing devref * Doc: Update i18n devref * VMware: remove flag in tests indicating VC is supported * virt: use instance object for attach in block\_device * VMware: clean up unit tests * Do not compute deltas when doing migration * Modify v21 alias name for compatible with v2 * Clean bdms and networks after deleting shelved VM * move eventlet GREENDNS override to top level * fix pep8 errors that apparently slipped in * include python-novaclient in abandon policy * replace httplib.HTTPSConnection in EC2KeystoneAuth * Re-revert "libvirt: add version cap tied to gate CI testing" * ironic: remove non-standard info in get\_available\_resource dict * hyperv: use standard architecture constants for CPU model * xenapi: fix structure of data reported for cpu\_info * ironic: delete cpu\_info data from get\_available\_resource * vmware: delete cpu\_info data from get\_available\_resource * pci: move filtering of devices up into resource tracker * Libvirt: Fsfreeze during live-snapshot of qemu/kvm instances * libvirt: Fixes live migration for volume backed instances * Updated from global requirements * Remove unused db.api.fixed\_ip\_get\_by\_address\_detailed * VMware: Remove unused \_check\_if\_folder\_file\_exists from vmops * VMware: Remove unused \_get\_orig\_vm\_name\_label from vmops * VMware: enable a cache prefix configuration parameter * Hyper-V: attach volumes via SMB * etc: replace NullHandler by Python one * Add cn\_get\_all\_by\_host and cn\_get\_by\_host\_and\_node to ComputeNode * Add host field to ComputeNode * Reject unsupported image to local BDM * Update LVM lockfile name identical to RAW and Qcow * Fix invalid read\_deleted value in \_validate\_unique\_server\_name() * Adds hacking check for api\_version decorator * Parse "networks" attribute if loading os-networks * Fixes interfaces template identification issue * VMware: support passing flavor object in spawn * Libvirt: make use of flavor passed by spawn method * Virt: change instance\_type to flavor * rename oslo.concurrency to oslo\_concurrency * Support macvtap for vif\_type being hw\_veb * downgrade 'No network configured!' to debug log level * Remove unnecessary timeutils override cleanup * Cleanup timeutils override in tests/functional/test\_servers * Downgrade quota exceeded log messages * libvirt: Decomposition plug hybrid methods in vif * Remove unused cinder code * Libvirt normalize numa cell ids * Remove needless workaround in utils module * Check for floating IP pool in nova-network * Remove except Exception cases * Fixes multi-line strings with missing spaces * Fix incorrectly formatted log message * libvirt: check value of need\_legacy\_block\_device\_info * Fixed typo in testcase and comment * Share server access ips tests between V2 & V2.1 * Workflow documentation is now in infra-manual * Add a validation format "cidr" * Use a copy of NEW\_NETWORK for test\_networks * Adds global API version check for microversions * Implement microversion support on api methods * Fix long hostname in dnsmasq * This patch fixes the check that 'options' object is empty correctly * Assert order of DB index members * Updated from global requirements * object-ify flavors manager side of the RPC * Add CPU pinning data to InstanceNUMACell object * Enforce unique instance uuid in data model * libvirt: Handle empty context on \_hard\_reboot * Move admin\_only\_action\_common out of v3 directory(cleanup) * Compute Add build\_instance hook in compute manager * SQL scripts should not manage transactions * Clear libvirt test on LibvirtDriverTestCase * Replacement \`\_\` on \`\_LW\` in all LOG.warning part 4 * Replacement \`\_\` on \`\_LW\` in all LOG.warning part 3 * Convert v3/v2.1 extension info to present v2 API format * Adds NUMA CPU Pinning object modeling * objects: Add several complex field types * VMware: ephemeral disk support * Imported Translations from Transifex * Fix disconnecting necessary iSCSI sessions issue * VMware: ensure that fake VM deletion returns a task * Compute: Catch binding failed exception while init host * libvirt: Fix domain creation for LXC * Xenapi: Allow volume backed instances to migrate * Break V2 XML Support * Libvirt: SMB volume driver * libvirt: Enable console and log for system z guests * libvirt: Set guest machine type on system z * Drop support for legacy server groups * Libvirt: Don't let get\_console\_output crash on missing console file * Hyper-V: Adds VMOps unit tests (part 1) * VMware: allow selection of vSAN datastores * libvirt: enhance config memory backing to handle hugepages * VMware: support spawn of stream-optimized image * libvirt: reuse defined method to return instance numa topology * Remove the volume api related useless policy rules * Error code for creating secgroup default rule * Don't mock external locks with Semaphore * Add shelve and unshelve info into devref doc * VMware: optimize resource pool usage * Added objects Tag and TagList * libvirt: video RAM setting should be passed in kb to libvirt * Switch to moxstubout and mockpatch from oslotest * Check that volume != root device during boot by image * Imported Translations from Transifex * Make a flavorRef validation strict * Add missing indexes from 203 migration to model * Fix type of uniq\_security\_groups0project\_id0name0deleted * Correct columns covered in migrations\_instance\_uuid\_and\_status\_idx * Add debug log for url not found * Optimize 'floating\_ip\_bulk\_create' function * factor out \_setup\_logging in test.py * extract \_setup\_timeouts in test.py * Scheduler: return a namedtuple from \_get\_group\_details * Use "is\_neutron\_security\_groups" check * Fix function name mismatch in test case * VMware: prevent exception with migrate\_disk\_and\_power\_off * Fix URL mapping of image metadata PUT request * Compute: catch correct exception when host does not exists * Fix URL mapping of server metadata PUT request * objects: move numa host and cell to objects * objects: introduce numa objects * Code cleanup: quota limit validation * Add api validation schema for image\_metadata * Correct InvalidAggregateAction translation&format * Remove blanks before ':' * Port virtual-interfaces plugin to v2.1(v3) API * Catch ComputeServiceUnavailable on v2 API * GET servers API sorting REST API updates * Add API validation schema for volume\_attachments * Changed testcase 'test\_send\_on\_vm\_change' to test vm change * VMware: associate instance with storage policy * VMware: use storage policy in datastore selection * VMWare: get storage policy from flavor * Share CreateBackup unit test between V2 & V2.1 * Share suspend\_server unit test between V2 & V2.1 * Share pause\_server unit test between V2 & V2.1 * Share lock\_server unit test between V2 & V2.1 * VMware: enable VMware driver to use new BDM format * Use admin only common test case in admin action unit test cases * objects: move virt numa instance to objects * Fix v2.1 API os-simple-tenant-usage policy * Set vm state error when raising unexpected exception in live migrate * Add delete not found unit testcase for floating\_ip api * Improve error return code of floating\_ips in v2/v2.1 api * Port floating\_ips extension to v2.1 * Removing the headroom calculation from db layer * Make multiple\_create unit tests share between v2 and v2.1 * Set API version request information on request objects * Change definition of API\_EXTENSION\_NAMESPACE to method * Adds APIVersionRequest class for API Microversions * Updated from global requirements * remove test.ReplaceModule from test.py * Added db API layer to add instance tag-list filtering support * Added db API layer for CRUD operations on instance tags * Implement 'personality' plugin for V2.1 * Fix API samples/templates of multinic-add-fixed-ip * move the integrated tests into the functional tree * Sync latest from oslo-incubator * Fix use of conf\_fixture * Make network/\* use Instance.get\_flavor() * Make metadata server use Instance.get\_flavor() * Fix use of extract\_flavor() in hyper-v driver * Check server group policy on migrate/evacuate * VMware: fix exception when multiple compute nodes are running * Add API json schema for server\_external\_event(v2.1) * Port v2 quota\_classes extension to work in v2.1(v3) framework * Share unit test case for server\_external\_events api * Add API schema for v2.1/v3 scheduler\_hints extension * Make compute/api.py::resize() use Instance.get\_flavor() * Make get\_image\_metadata() use Instance.get\_flavor() * Fix instance\_update() passing SQLA objects to send\_update() * Fix EC2 volume attachment state at attaching stage * Fixes Hyper-V agent IDE/SCSI related refactoring * dummy patch to let tox functional pass * Remove Python 2.6 classifier * Make aggregate filters use objects * hardware: clean test to use well defined fake flavor * Enable pep8 on ./tools directory * objects: Add test for instance \_save methods * Error code for creating duplicate floating\_ip\_bulk * Use HTTPRequest instead of HTTPRequestV3 for v2/v2.1 tests * objects: make instance numa topology versioned in db * Clean up in test\_server\_diagnostics unit test case * Add "x-compute-request-id" to a response header * Prevent admin role leak in context.elevated * Hyper-V: Refactors Hyper-V VMOps unit tests * Hyper-V: Adds Hyper-V SnapshotOps tests * Introduce a .z version element for backportable objects * Adds new RT unit tests for \_sync\_compute\_node * Fix for extra\_specs KeyError * Remove old Baremetal Host Manager * Remove unused network\_api.get\_instance\_uuids\_by\_ip\_filter() * Remove unused network\_api.get\_floating\_ips\_by\_fixed\_address() * add abandon\_old\_reviews script * Remove havana compat from nova.cert.rpcapi * Retry ebtables on race * Eventlet green threads not released back to pool * Hyper-V: Adds LiveMigrationOps unit tests * Hyper-V: Removes redundant utilsfactory tests from test\_hypervapi * Hyper-V: Adds HostOps unit tests * Make nova-api use quotas object for create\_security\_group * Make nova-api use quotas object for count() and limit\_check() * Add count and limit\_check methods to quota object * Make neutronapi get networks operations return objects * Hyper-V: fix tgt iSCSI targets disconnect issue * Network object: add missing translations * Adapting pylint runner to the new message format * Cleanup v2.1 controller inheritance * Load extension 2 times fix load sequence issue * Make get\_next\_device\_name() handle an instance object * Add obj\_set\_defaults() to NovaObject * Switch to oslo.config fixture * Remove VirtNUMAHostTopology.claim\_test() method * Instances with NUMA will be packed onto hosts * Make Instance.save() update numa\_topology * objects: remove VirtPageSize from hardware.py * VMware: enable backward compatibility with existing clusters * Make notifications use Instance.get\_flavor() * Make notify\_usage\_exists() take an Instance object * Convert hardware.VirtCPUTopology to nova object * Updated from global requirements * Replacement \`\_\` on \`\_LW\` in all LOG.warning part 2 * compute: rename hvtype.py to hv\_type.py * Replacement \`\_\` on \`\_LW\` in all LOG.warning part 1 * Replacement \`\_\` on \`\_LE\` in all LOG.exception * Use opportunistic approach for migration testing * Replacement \`\_\` on \`\_LI\` in all LOG.info - part 2 * Replacement \`\_\` on \`\_LI\` in all LOG.info - part 1 * Add ALL-IN operator to extra spec ops * Sync server\_external\_events v2 to v2.1 Part 2 * Sync server\_external\_events v2 to v2.1 Part 1 * Fix connecting unnecessary iSCSI sessions issue * Add API validation schema for services v2.1 plugin * Fix exception handling in \_get\_host\_metrics() * initialize objects with context in network manager tests * initialize objects with context in flavors * initialize objects with context in compute api * initialize objects with context in resource tracker * Use common get\_instance call in API plugins part 3 * Clean the test cases for service plugins * initialize objects with context in server groups api * initialize objects with context in cells * tests: update \_get\_instance\_xml to accept custom flavor object * libvirt: vif tests should use a flavor object * Compute: improve test\_compute\_utils time * Compute: improve usage of Xen driver support * libvirt: introduce new 'Host' class to manage the connection * Add CHAP credentials support * Document the upgrade plans * Move test\_hostops into nova/tests/unit * Fix get\_all API to pass search option filter to cinder api * VMware: remove ESX support for getting resource pool * objects: Makes sure Instance.\_save methods are called * Add support for fitting instance NUMA nodes onto a host * VMware: remove unnecessary brackets * Imported Translations from Transifex * Port volume\_attachments extension to v2.1 API * Only filter once for trusted filters * Indicate whether service is down for mc driver * Port assisted-volume-snapshots extension to v2.1 * Updated from global requirements * Add custom is\_backend\_avail() method * Fixes differencing VHDX images issue on Hyper-V * Add debug log when over quota exception occurs * Fix rule not found error in sec grp default rule API * Convert service v3 plugin to v2.1 API * Decrease admin context usage in \_get\_guest\_config * Catch NotImplemented nova exceptions in API extension * Add API json schema to volumes api(v2.1) * Don't modify columns\_to\_join formal parameter in \_manual\_join\_columns * Limit tcp/udp port to be empty string in json-schema * Fix the cell API with string rpc\_port failed * Add decorator expected\_errors for security\_group extension * Fix bulk floating ip ext to show uuid and fixed\_ip * Use session in cinderclient * Make objects.Flavor.\_orig\_projects a list * Refactor more compute tests to use Instance objects * Use Instance.get\_flavor() in more places * Support instance\_extra fields in expected\_attrs on Instance object * Adds host power actions support for Hyper-V * Exceptions: finish sentence with fullstop * Type conflict in trusted\_filter.py using attestation\_port default value * Get EC2 metadata localip return controller node ip * Rename private functions in db.sqla.api * Enable hard-reboot on more states * Better error message when check volume status * libvirt: use qemu (qdisk) disk driver for Xen >= 4.2.0 * Add resource types for JSON-Schema validation * Add integer types for JSON-Schema * Revert pause/unpause state when host restart * Extends use of ServiceProxy to more methods in HostAPI in cells * Nova devref: Fix the rpc documentation typos * Remove duplicated code in services api integrated test case * Share server\_password unit test between V2 & V2.1 * Key manager: ensure exception reason is translated * Virt: update spawn signature to pass instance\_type * Compute: set instance to ERROR if resume fails * Limit InstanceList join to system\_metadata in os-simple-tenant-usage * Pass expected\_attrs to instance\_get\_active\_by\_window\_joined * VMware: remove unused parameter (mountpoint) * Truncate encoded instance message to 255 or fewer * Only load necessary instance info for use in sync power state * Revert "Truncate encoded instance message to 255" * VMware: refactor cpu allocations * Fixes spawn issue on Hyper-V * Refine HTTP error code for os-interface * Share migrations unit test between V2 & V2.1 * Use common get\_instance call in API plugins part 2 * make get\_by\_host use slave in periodic task * Add update\_cells to BandwidthUsage.create() * Fix usage of BandwidthUsage.create() * Updated from global requirements * Hard reboot doesn't re-create instance folder * object-ify flavors api and compute/api side of RPC * Allow passing columns\_to\_join to instance\_get\_all\_by\_host\_and\_node() * Don't make a no-op DB call * Remove deprecated affinity filters * Generalize dependent object backporting * GET servers API sorting compute/instance/DB updates * Hyper-V: cleanup basevolumeutils * Specify storage IP for iscsi connector * Fix conductor processes race trying to join servicegroup (zk driver) * Remove unused db.api.floating\_ip\_set\_auto\_assigned * Remove unused db.api.flavor\_extra\_specs\_get\_item * Remove unused oslo.config import * Create instance\_extra items atomically with the instance itself * Shelve\_offload() should give guests a chance to shutdown * Fixes Hyper-V driver WMI issue on 2008 R2 * Fix circular reference error when live migration failed * Fix live migration api stuck when migrate to old nova node * Remove native security group api class * libvirt: pin emulator threads to union of vCPU cpuset * libvirt: add classes for emulator thread CPU pinning configuration * libvirt: set NUMA memory allocation policy for instances * Fixed quotas double decreasing problem * Convert v3 console plugin to v2.1 * Virt: make use of the InstanceInfo object * Virt: create an object InstanceInfo * Metadata service: make use of get\_instance\_availability\_zone * Metadata service: remove check for the instance object type * Metadata: use instance objects instead of dictionary * VMware: Fix problem transferring files with ipv6 host * VMware: pass vm\_ref to \_set\_machine\_id * VMware: pass vm\_ref to \_get\_and\_set\_vnc\_config * Add API schema for aggregates set\_metadata API * Compute: Add start notification for resume * VMware: fix regression for 'TaskInProgress' * Remove havana compat from nova.console.rpcapi * Remove havana compat from nova.consoleauth.rpcapi * Share console-auth-tokens tests between V2 & V2.1 * Raise HTTPNotFound in V2 console extension * Add 'instance-usage-audit-log' plugin for V2.1 * Truncate encoded instance message to 255 * Deduplicate some INFO and AUDIT level messages * move all tests to nova/tests/unit * Add tox -e functional * Don't touch info\_cache after refreshing it in Instance.refresh() * Drop max-complexity to 47 * Aggregate.save() shouldn't return a value * Remove useless host parameter in virt * Use real disk size to consider a resize down * Add virtual interface before add fixed IP on nova-network * image cache clean-up to clean swap disk * Make unit test floating ips bulk faster * Remove flush\_operations in the volume usage output * Updated from global requirements * xenapi plugins must target only Python 2.4 features * libvirt: add classes for NUMA memory binding configuration * libvirt: add in missing translation for LVM migration * Config bindings: remove redundant brackets * Config drive: delete deprecated config var config\_drive\_tempdir * Refactor Ironic driver tests as per review comment * Switch default cinder API to V2 * Remove deprecated spicehtml5 options * Fix xen plugin to retry on upload failure * Log sqlalchemy exception message in migration.py * Use six.text\_type instead of unicode * XENAPI add duration measure to log message * Quotas: remove deprecated configuration variable * Glance: remove deprecated config options * Cinder: remove deprecated configuration options * Neutron: remove deprecated config options * object: update instance numa object to handle pagesize * hardware: make cell instance topology to handle memory pages * hardware: introduce VirtNUMATopologyCellInstance * hardware: fix in doctstring the memory unit used * virt: introduce types VirtPageSize and VirtPagesTopology * Clearer default implmentation for dhcp\_options.. * Fix instance\_usage\_audit\_log test to use admin context * VMware: remove unused method \_get\_vmfolder\_ref * libvirt: safe\_decode domain.XMLDesc(0) for i18n logging * VMware: trivial fix for comment * Fix the uris in documentation * Make test\_security\_groups nose compatible * Make test\_quotas compatible with nosetests * Return HTTP 400 if use invalid fixed ip to attach interface * Fixed typos in nova.objects.base docstrings * Add note on running single tests to HACKING.rst * Use sizelimit from oslo.middleware * Use oslo.middleware * Make resource tracker always use Flavor objects * maint:Don't translate debug level logs * Make console show and delete exception msg better * Change error code of floating\_ip\_dns api(v2.1) * Make scheduler code use object with good practice * Switch Nova to use oslo.concurrency * scheduler: Remove assert on the exact number of weighers * Update docstring for check\_instance\_shared\_storage\_local * remove use of explicit lockutils invocation in tests * Delay STOPPED lifecycle event for Xen domains * Remove warning & change @periodic\_task behaviour * Fix nova-compute start issue after evacuate * Ignore DiskNotFound error on detaching volumes * Move setup\_instance\_group to conductor * Small doc fix in compute test * libvirt: introduce config to handle cells memory pages caps * Fixes DOS issue in instance list ip filter * Use 404 instead of 400 when security\_group is non-existed * Port security-group-default-rules extension into v2.1 * Port SecurityGroupRules controller into v2.1 * error if we don't run any tests * Revert "Switch Nova to use oslo.concurrency" * Updated from global requirements * Remove admin context which is not needed * Add API validation schema for disk\_config * Make test\_host\_filters a NoDBTestCase * Move group affinity filters tests to own test file * Split out metrics filter unit tests * Splits out retry filter unit tests * Split out compute filters unit tests * Update hooks from oslo-incubator copy * Split out aggregate disk filter unit tests * Split out core filter unit tests * Split out IO Ops filter unit tests * Split out num instances filter unit tests * Split and fix the type filters unit tests * Split and fix availability zone filter unit tests * Split out PCI passthrough filter unit tests * Use common get\_instance call in API plugins * Fix nova evacuate issues for RBD * DB API: Pass columns\_to\_join to instance\_get\_active\_by\_window\_joined * Read flavor even if it is already deleted * Use py27 version of assertRaisesRegexp * update retryable errors & instance fault on retry * xenapi: upload/download params consistency change * Use assertRaisesRegexp * Drop python26 support for Kilo nova * Switch Nova to use oslo.concurrency * Remove param check for backup type on v2.1 API * Set error state when unshelve an instance due to not found image * fix the error log print in encryptor \_\_init\_\_.py * Remove unused compute\_api in extend\_status * Compute: maint: adjust code to use instance object format * VMware: use instance.uuid instead of instance['uuid'] * Network: manage neutron client better in allocate\_for\_instance * Split out agg multitenancy isolation unit tests * Split agg image props isolation filter unit tests * Separate isolated hosts filter unit tests * Separate NUMA topology filter unit tests * resource-tracker: Begin refactor unit tests * Faster get\_attrname in nova/objects/base.py * Hyper-V: Skip logging out in-use targets * Compute: catch more specific exception for \_get\_instance\_nw\_info * typo in the policy.json "rule\_admin\_api" * Fix the unittest use wrong controller for SecurityGroups V2 * host manager: Log the host generating the warning * Add API validation schema for floating\_ip\_dns * Remove \`domain\` from floating-ip-dns-create-or-update-req body * Port floating\_ip\_dns extention to v2.1 * Remove LOG outputs from v2.1 API layer * Run build\_and\_run\_instance in a separate greenthread * VMware: Improve the efficiency of vm\_util.get\_host\_name\_for\_vm * VMware: Add fake.create\_vm() * Use wsgi.response for v2.1 API * Use wsgi.response for v2.1 unrescue API * Add API schema for v2.1 "resize a server" API * Remove use of unicode on exceptions * Fix error in comments * Make pci\_requests a proper field on Instance object * libvirt: fully parse PCI vendor/product IDs to integer data type * Remove uncessary instance.save in nova compute * api: add serial console API calls v2.1/v3 * Add API validation schema for cloudpipe api * Remove project id in ViewBuilder alternate link * Handle exception better in v2.1 attach\_interface * Cleanup of tenant network tests * Port floating\_ips\_bulk extention to v2.1 * Make v2.1 tests use wsgi\_app\_v21 and remove wsgi\_app\_v3 * Translate 'powervm' hypervisor\_type to 'phyp' for scheduling * Give a reason why NoValidHost in select\_destinations * ironic: use instance object for \`\_add\_driver\_fields\` * ironic: use instance object for \`\_wait\_for\_active\` * ironic: use instance object for \`get\_info\` * ironic: use instance object for \`rebuild\` * ironic: use instance object for plug\_vifs * Revert "Replace outdated oslo-incubator middleware" * Set logging level for glanceclient to WARN * Nova should be in charge of its log defaults * Reduce the complexity of \_get\_guest\_config() * VMware: fix compute node exception when no hosts in cluster * libvirt: use instance object for detach\_volume * libvirt: use instance object for attach\_volume * libvirt: use instance object for resume\_state\_on\_host\_boot * libvirt: treat suspend instance as an object * VMware: Remove redundant fake.reset() in test\_vm\_util * VMware: add tests for spawn with config drive enabled * Adds tests for Hyper-V Network utils * Adds tests for Hyper-V Host utils * Fix order of arguments in assertEqual * Replace custom patching in \`setUp\` on HypervisorsSampleJsonTests * Console: delete code for VMRCConsole and VMRCSessionConsole * VMware: delete the driver VMwareESXDriver * Replacement \`\_\` on \`\_LE\` in all LOG.error * VMware: rename vmware\_images to images * Remove unuseful parameter in cloudpipe api(v2/v2.1) * Moves trusted filter unit tests into own file * Port update method of cloudpipe\_update to v2.1(v3) * Clean up iSCSI multipath devices in Post Live Migration * Check fixed-cidr is within fixed-range-v4 * Porting baremetal\_nodes extension to v2.1/v3 * Port fixed\_ip extention to v2.1 * Separate filter unit tests for agg extra specs * Move JSON filter unit tests into own file * Separate compute caps filter unit tests * Separate image props filter unit tests * Separate disk filters out from test\_host\_filters * Separate and refactor RAM filter unit tests * Remove duplicate test * Reduce the complexity of stub\_out\_db\_network\_api() * Remove duplicate index from model * Remove useless join in nova.virt.vmwareapi.vm\_util * fixed typo in test name * Separate and refactor affinity filter tests * Pull extra\_specs\_ops tests from test\_host\_filters * Remove outdated docstring for XenApi driver's options * VMware: attach config drive if booting from a volume * Remove duplicated comments in virt/storage\_users * Compute: use instance object for vm\_state * libvirt: use six.text\_type when setting text node value in guest xml * Allow strategic loading of InstanceExtra columns * Create Nova Scheduler IO Ops Weighter * Put a cap on our cyclomatic complexity * Add notification for server group operations * Clean up the naming of PCI python modules * Port os-networks-associate plugin to v2.1(v3) infrastructure * Port os-tenant-networks plugin to v2.1(v3) infrastructure * Cleanup of exception handling in network REST API plugin * Fix instance\_extra backref * Refactor compute tests to not use \_objectify() * Refactor compute and conductor tests to use objects * Fix genconfig - missed one import from oslo cleanup * Handle Forbidden error from network\_api.show\_port in os-interface:show * Replace outdated oslo-incubator middleware * VMware: Improve logging on failure due to invalid guestId * Ironic: Continue pagination when listing nodes * Fix unit test failure due to tests sharing mocks * libvirt: fully parse PCI addresses to integer data type * libvirt: remove pointless HostState class * Porting SecurityGroup related controller into v2.1 * Allow force-delete irrespective of VM task\_state * Use response.text for returning unicode EC2 metadata * Remove unused modules copied from oslo-incubator * Remove unused code in pci\_manager.get\_instance\_pci\_devs() * VMWare: Remove unused exceptions * Switch to nova's jsonutils in oslo.serialization * VMware: mark virtual machines as 'belonging' to OpenStack * XenAPI: Inform XAPI who is connecting to it * Rename cli variable in ironic driver * Add more input validation of bdm param in server creation * Return HTTP 400 if use an in-use fixed ip to attach interface * VMware: get\_all\_cluster\_refs\_by\_name default to {} * Minor refactor of \_setup\_instance\_group() * add InstanceGroup.get\_by\_instance\_uuid * Add instance\_group\_get\_by\_instance to db.api * Updated from global requirements * Add supported\_hv\_specs to ComputeNode object * Pass block device info in pre\_live\_migration * Use 400 instead of 422 for security\_groups v2 API * Port floating\_ip\_pools extention to v2.1 * Imported Translations from Transifex * Sync with latest oslo-incubator * Don't translate unit test logs * Optimize get\_instance\_nw\_info and remove ipam * Convert migrate reqeusts to use joins * Use database joins for fixed ips to other objects * Keep migration status if instance still resizing * Don't log every (friggin) migration version step during unit tests * Remove init for object list in api layer * Revise compute API schemas and add tests * Add Quota roll back for deallocate fix ip in nova-network * Update README for openstack/common * Fix libvirt watchdog support * VMware: add support for default pbm policy * Remove unused imports from neutron api * Cleanup tenant networks plugin config creation * Port os-networks plugin to v2.1(v3) infrastructure * Use reasonable timeout for rpc service\_update() * Finish objects conversion in the os-interface API 2014.2 ------ * Fix pci\_request\_id break the upgrade from icehouse to juno * Fix pci\_request\_id break the upgrade from icehouse to juno * Updated translations * vfs: guestfs logging integration * Fix broken cert revocation * Port cloudpipe extension to v2.1 * Cleanup log marker in neutronv2 api * Add 'zvm' to the list of known hypervisor types * Fix wrong exception return in fixed\_ips v2 extention * Extend XML unicode test coverage * Remove unnecessary debug/info logs of normal API ops * Refactor of test case of floating\_ips * Make v2.1 API tests use v2 URLs(test\_[r-v].\*) * Make v2.1 API tests use v2 URLs(test\_[f-m].\*) * Break out over-quota calculation code from quota\_reserve() * Fix image metadata returned for volumes * Log quota refresh in\_use message at INFO level for logstash * Break out over-quota processing from quota\_reserve() * Remove obsolete vmware/esx tools * Fix broken cert revocation * Remove baremetal virt driver * Update rpc version aliases for juno * VMware: Set vmPathName properly in fake driver * Port disk\_config extension for V2.1 * Allow backup operation in paused and suspend state * Update NoMoreFixedIps message description * Make separate calls to libvirt volume * Correct VERSION of NetworkRequest * Break out quota usage refresh code from quota\_reserve() * libvirt: abort init\_host method on libvirt that is too old * Mask passwords in exceptions and error messages * Support message queue clusters in inter-cell communication * neutronv2: translate 401 and 404 neutron client errors in show\_port * Log id in raise\_http\_conflict\_for\_instance\_invalid\_state() * Use image metadata from source volume of a snapshot * Fix KeyError for euca-describe-images * Optimize 'fixed\_ip\_bulk\_create' function * Remove 'get\_host\_stats' virt driver API method * Suppressed misleading log in unshelve, resize api * Imported Translations from Transifex * libvirt: add \_get\_launch\_flags helper method in unit test * Refactoring of contrib.test\_networks tests * Make v2.1 API tests use v2 URLs(test\_[a-e].\*) * Port fping extension to work in v2.1/v3 framework * Use oslo.utils * Correctly catch InstanceExists in servers create API * Fix the os\_networks display to show cidr properly * Avoid using except Exception in unit test * nova-net: add more useful logging before raising FixedIpLimitExceeded * libvirt: convert conn test case to avoid DB usage * libvirt: convert driver test suite to avoid DB usage * Mask passwords in exceptions and error messages * Disable libvirt NUMA topology support if libvirt < 1.0.4 * Resource tracker: use brackets for line wrap * VMWare: Remove unnecessery method * console: make unsupported ws scheme in python < 2.7.4 * VMWare: Fix nova-compute crash when instance datastore not available * Disable libvirt NUMA topology support if libvirt < 1.0.4 * VMware: remove \_get\_vim() from VMwareAPISession * Compute: use an instance object in terminate\_instance * VMware: remove unnecessary deepcopy * Destroy orig VM during resize if triggered by user * Break out quota refresh check code from quota\_reserve() * move integrated api client to requests library * Fix unsafe SSL connection on TrustedFilter * Update rpc version aliases for juno * Fix the os\_networks display to show cidr properly * libvirt: convert mox to mock in test\_utils * Remove kombu as a dependency for Nova * Adds missing exception handling in resize and rebuild servers API * Remove keystoneclient requirement * Destroy orig VM during resize if triggered by user * VMware: Fix deletion of an instance with no files * console: introduce a new exception InvalidConnectionInfo * Remove the nova-manage flavor sub-command * support TRACE\_FAILONLY env variable * Ensure files are closed promptly when generating a key pair * libvirt: convert volume snapshot test case to avoid DB usage * libvirt: convert volume usage test case to avoid DB usage * libvirt: convert LibvirtNonblockingTestCase to avoid DB usage * libvirt: convert firewall tests to avoid DB usage * libvirt: convert HostStateTestCase to avoid DB usage * libvirt: split firewall tests out into test\_firewall.py * libvirt: convert utils test case to avoid DB usage * Add VIR\_ERR\_CONFIG\_UNSUPPORTED to fakelibvirt * Remove indexes that are prefix subsets of other indexes * remove scary error message in tox * Cleanup \_convert\_block\_devices * Enhance V2 disk\_config extension Unit Test * Add developer policy about contractual APIs * Reserve 10 migrations for backports * libvirt: Make sure volumes are well detected during block migration * Remove websocketproxy workaround * Fix unsafe SSL connection on TrustedFilter 2014.2.rc1 ---------- * Remove xmlutils module * libvirt: Make sure NUMA cell memory is in Kb in XML * Fix disk\_allocation\_ratio on filter\_scheduler.rst * Remove unused method within filter\_scheduler test * Open Kilo development * Correct missing vcpupin elements for numa case * VMware: remove unused variable from tests * Imported Translations from Transifex * VMWare: Fix VM leak when deletion of VM during resizing * Logging detail when attach interface failed * Removes unused code from wsgi \_to\_xml\_node * Fix XML UnicodeEncode serialization error * Add @\_retry\_on\_deadlock to \_instance\_update() * Remove duplicate entry from .gitignore file * console: fix bug when invalid connection info * console: introduce a new exception InvalidToken * cmd: update the default behavior of serial console * console: make websocketproxy handles token from path * VMware: Remove tests for None in fake.\_db\_content['files'] * Fix creating bdm for failed volume attachment * libvirt: consider vcpu\_pin\_set when choosing NUMA cells * Fix hook documentation on entry\_points config * Remove local version of generate\_request\_id * fix usage of obj\_reset\_changes() call in flavor * Fix Bad except clauses order * Typo in exception name - CellsUpdateProhibited * Log original error when attaching volume fails * Retry on closing of luks encrypted volume in case device is busy * VMware: Remove VMwareImage.file\_size\_in\_gb * VMware: remove unused argument from \_delete\_datastore\_file() * xenapi: deal with reboots while talking to agent * Ironic: Do not try to unplug VIF if not associated * Fix Typo in method name - parse\_Dom * Adds openSUSE support for developer documentation * VMware: Remove class orphaned by ESX driver removal * Fixes missing ec2 api address disassociate error on failure * Fixes potential reliablity issue with missing CONF import * Updated from global requirements * Port extended\_ips/extended\_ips\_mac extension to V2.1 * the value of retries is error in \_allocate\_network * Ironic driver must wait for power state changes * Fallback to legacy live migration if config error * libvirt: log exception info when interface detach failed * libvirt: support live migration with shared instances dir * Fix SecurityGroupExists error when booting instances * Undo changes to obj\_make\_compatible * Clarify virt driver test comments & log statement * move integrated api client to requests library * VMware: Make DatastorePath hashable * Remove usage of self.\_\_dict\_\_ for message var replacement * VMware: trivial formatting fix in fake driver * VMware: Improve logging of DatastorePath in error messages * VMware: Use vm\_mode constants * Imported Translations from Transifex * Updated from global requirements * do not use unittest.TestCase for tests * Neutron: Atomic update of instance info cache * Reduce the scope of RT work while holding the big lock * libvirt: convert CacheConcurrencyTestCase to avoid DB usage * Give context to the warning in \_sync\_power\_states * remove test\_multiprocess\_api * add time to logging in unit tests * XenAPI: clean up old snapshots before create new * Return vcpu pin set as set rather than list * Fix start/stop return active/stopped immediately in EC2 API * consistently set status as REBUILD when rebuilding * Add test case for vim header check * Add missing instance action record for start of live migration * Reduce the log level for the guestfs being missing * Sync network\_info if instance not found before \_build\_resources yield * Remove the AUDIT log message about loaded ext * Fix unset extra\_spec for a flavor * Add further debug logging for multiprocess test * Revert "libvirt: support live migrate of instances with conf drives" * Revert "libvirt: Uses correct imagebackend for configdrive" * Fixes server list filtering on metadata * Add good path test cases of osapi\_compute\_workers * Be less confusing about notification states * Remove unused py33 tox env * fix\_typo\_in\_heal\_instance\_info\_cache * Refactor test\_get\_port\_vnic\_info 2 and 3 * Revert "libvirt: reworks configdrive creation" * Making nova.compute.api to return Aggregate Objects * Scheduler: add log warning hints * Change test function from snapshot to backup * Fixes Hyper-V dynamic memory issue with vNUMA * Update InstanceInvalidState output * Add unit test for glanceclient ssl options * Fix Broken links in devref/filter\_scheduler.rst * Change "is lazy loaded" detection method in db\_api test * Handle VolumeBDMPathNotFound in \_get\_disk\_over\_committed\_size\_total * Handle volume bdm not found in lvm.get\_volume\_size * Updated from global requirements * Address nits in I6b4123590 * Add exists check to fetch\_func\_sync in libvirt imagebackend * libvirt: avoid changing UUID when redefining nwfilters * Vmware:Add support for ParaVirtualSCSIController * Fix floating\_ips\_bulk unit test name * refactor flavor manage tests in prep for object-ify flavors * refactor flavor db fakes in prep for object-ify flavors * move dict copy in prep for object-ify flavors * tests: kill worker pids as well on timeouts * Close standard fds in test child process * Mitigating performance impact with getting pci requests from DB * Return None from get\_swap() if input is not swap * Require tests for DB migrations * VMware: fix broken mock of ds\_util.mkdir * Fix KeyError for euca-describe-images * Fixes HyperV VM Console Log * FIX: Fail to remove the logical volume * correct \_sync\_instance\_power\_state log message * Add support for hypervisor type in IronicHostManager * Don't list entire module autoindex on docs index * Add multinic API unit test * Add plan for kilo blueprints: project priorities * make flavors use common limit and marker * Raise an exception if qemu-img fails * Libvirt: Always teardown lxc container on destroy * Mark nova-baremetal driver as deprecated in Juno, removed in K * libvirt: Unnecessary instance.save(s) called * Add progress and cell\_name into notifications * XenAPI: run vhd-util repair if VHD check fails * Get instance\_properties from request\_spec * libvirt: convert encrypted LVM test to avoid DB usage * libvirt: convert test\_dmcrypt to avoid DB usage * libvirt: convert test\_blockinfo.py to avoid DB usage * libvirt: convert test\_vif.py to avoid DB usage * libvirt: remove pointless class in util test suite * libvirt: avoid need for lockutils setup running test cases * VMware: Remove host argument to ds\_util.get\_datastore() * Fix DB migration 254 by adding missing unittest * postgresql: use postgres db instead of template1 * Assume VNIC\_NORMAL if binding:vnic\_type not set * mock.assert\_called\_once() is not a valid method * db: Add @\_retry\_on\_deadlock to service\_update() * Update ironic states and documentation * XenAPI improve post snapshot coalesce detection * Catch NotImplementedError on reset\_network for xen * VMware: Fix usage of assertEqual in test\_vmops * Add more information to generic \_add\_floating\_ip exception message * bring over pretty\_tox.sh from tempest * Console: warn that the Nova VMRC console driver will be deprecated in K * virt: use compute.vm\_mode constants and validate vm mode type * compute: tweaks to vm\_mode APIs to align with arch/hvtype * Fix NUMA fit testing in claims and filter class * consolidate apirequest tests to single file * ensure that we safely encode ec2 utf8 responses * instance\_topology\_from\_instance handles request\_spec properly * NUMA \_get\_constraint auto assumed Flavor object * Imported Translations from Transifex * Fix 'force' parameter for quota-update * Update devref * default=None is unneeded in config definitions * Remove unused elevated context param from quota helper methods * Remove stale code from ObjectListBase * Split up libvirt volume's connect\_volume method * Record instance faults during boot process * ironic/baremetal: add validation of host manager/state APIs * virt: move assertPublicAPISignatures into base test class * libvirt: avoid 30 second long test in LXC mount setup * Remove all redundant \`setUp\` methods * fix up assertEqual(None...) check to catch more cases * Fix object version hash test * disk/vfs: make docstring conventional to python * disk/vfs: ensure guestfs capabilities * NIST: increase RSA key length to 2048 bit * Fix incorrect exception when bdm with error state volume * ironic: Clean LOG usage * Improve secgroup create error message * Always log the releasing, even under failure * Fix race condition in update\_dhcp * Make obj\_make\_compatible consistent * Correct baremetal/ironic consume\_from\_instance.. * Fix parsing sloppiness from iscsiadm discover * correct inverted subtraction in quota check * Add quotas for Server Groups (quota checks) * Add quotas for Server Groups (V2 API change) * check network ambiguity before external network auth * Updated from global requirements * libvirt: Consider numa\_topology when booting * Add the NUMATopologyFilter * Make HostManager track NUMA usage * API boot process sets NUMA topology for instances * Make resource tracker track NUMA usage * Hook NUMA topology checking into claims * Stash numa-related flavor extra\_spec items in system\_metadata * Fixes network\_get\_all\_by\_host to use indexes * Add plan for kilo blueprints: when is a blueprint needed * Bump FakeDriver's resource numbers * delete python bytecode before every test run * Stop using intersphinx * Don't swallow exceptions in deallocate\_port\_for\_instance * neutronv2: attempt to delete all ports * Proxy nova baremetal commands to Ironic * Increase sleeps in baremetal driver * Improve logging of external events on the compute node * virt: use compute.virttype constants and validate virt type * compute: Add standard constants for hypervisor virt types * Fix test\_create\_instance\_invalid\_key\_name * Fix \`confirmResize\` action status code in V2 * Remove unnecessary imageRef setting from tests * Add unit test for add\_floating\_ip API * Remove unused config "service\_down\_time" reference * Clarify logging in lockutils * Make sure libvirt VIR\_ERR\_NO\_DOMAIN errors are handled correctly * Adds LOG statements in multiprocess API test * Block sqlalchemy migrate 0.9.2 as it breaks all of nova * Xen: Attempt to find and cleanup orphaned SR during delete * Nova-net: fix server side deallocate\_for\_instance() * Method for getting NUMA usage from an instance * Ironic: save DB calls for getting flavor * Imported Translations from Transifex * Fix 'os-interface' resource name for Nova V2.1 * Add new unit tests for PCI stats * Fixes AttributeError with api sample test fail * Fix "revertResize/confirmResize" for V2.1 API * Add unit test to os-agent API * check the block\_device\_allocate\_retries * Support SR-IOV networking in libvirt * Support SR-IOV networking in nova compute api and nova neutronv2 * Support SR-IOV networking in the PCI modules * Add request\_id in PciDevice * Replace pci\_request flavor storage with proper object usage * Adds a test for raw\_cpu\_arch in \_node\_resource * Stop stack tracing when trying to auto-stop a stopped instance * Add quotas for Server Groups (V2 API compatibility & V2.1 support) * Fixes Hyper-V volume mapping issue on reboot * Libvirt-Enable support for discard option for disk device * libvirt: set pae for Xen PVM and HVM * Add warning to periodic\_task with interval 0 * document why we disable usb\_tablet in code * Fix 'os-start/os-stop' server actions for V2.1 API * Fix 'createImage' server actions for V2.1 API * Add unit test to aggregate api * Handle exception better in v2 attach\_interface * Fix integrated test cases for assisted-volume-snapshots * libvirt: start lxc from block device * Remove exclude coverage regex from coverage job * Pass instance to set\_instance\_error\_state vs. uuid * Add InstancePCIRequests object * Drop verbose and useless nova-api log information * Add instance\_extra\_update\_by\_uuid() to DB API * Add pci\_requests to instance\_extra table * Add claims testing to VirtNUMAHostTopology class * Expose numa\_topology to the resource tracker * libvirt: fix bug when releasing port(s) * Specify correct operation type when NVH is raised * Ironic: don't canonicalize extra\_specs data * VMware: add tests for image fetch/cache functions * VMware: spawn refactor image fetch/cache * Ironic: Fix direct use of flavor and instance module objects * Ironic driver fetches extra\_specs when needed * Maint: neutronclient exceptions from a more appropriate module * Check requirements.txt files for missing (used) requirements * Sync oslo-incubator module log: * Add amd64 to arch.canonicalize() * Sync oslo lockutils to nova * libvirt: deprecated volume\_drivers config parameter * VMware: Remove get\_copy\_virtual\_disk\_spec from vmops and vm\_util * maint: various spelling fixes * Fix config generator to use keystonemiddleware * libvirt: improve unit test time * VMware: prevent race condition with VNC port allocation * VMware: Fix return type of get\_vnc\_console() * VMware: Remove VMwareVCVMOps * Network: enable instance deletion when dhcp release fails * Adds ephemeral storage encryption for LVM back-end images * Don't elevate context when rescheduling * Ironic driver backports: patch 7 * Improve Ironic driver performance: patch 6 * Import Ironic Driver & supporting files - part 5 * Import Ironic Driver & supporting files - part 4 * Import Ironic Driver & supporting files - part 3 * Import Ironic Driver & supporting files - part 2 * Import Ironic Driver & supporting files - part 1 * Add sqlite dev packages to devref env setup doc * Add user namespace support for libvirt-lxc * Move to oslo.db * api: add serial console API calls v2 * compute: add get\_serial\_console rpc and cells api calls * compute: add get\_serial\_console in manager.py * virt: add method get\_serial\_console to driver * Clean up LOG import in floating\_ips\_bulk v2 api 2014.2.b3 --------- * Update invalid state error message on reboot API * Fix race condition with vif plugging in finish migrate * Fix service groups with zookeeper * xenapi: send chunk terminator on subprocess exc * Add support for ipv6 nameservers * Remove unused oslo.config import * Support image property for config drive * warn against sorting requirements * VMware: remove unused \_get\_vmdk\_path from vmops * virt: use compute.arch constants and validate architectures * Change v3 quota-sets API to v2.1 * always set --no-hosts for dnsmasq * Allow \_poll\_bandwidth\_usage task to hit slave * Add bandwidth usage object * VMware: spawn refactor enlist image * VMware: image user functions for spawn() * Change v3 flavor\_manage API to v2.1 * Port used\_limits & used\_limits\_for\_admin into v2.1 * Add API schema for v2.1 access\_ips extension * Add API schema for v2.1 "rebuild a server" API * Add API schema for v2.1 "update a server" API * Enabled qemu memory balloon stats * Reset task state 'migrating' on nova compute restart * Pass certificate, key and cacert to glanceclient * Add a policy for handling retrospective vetos * Adds Hyper-V soft shutdown implementation * Fix swap\_volumes * Add API schema for v2.1/v3 multiple\_create extension * Return hydrated net info from novanet add/remove\_fixed\_ip calls * Add API schema for v2.1/v3 availability\_zone extension * Add API schema for v2.1/v3 server\_metadata API * Fixes a Hyper-V list\_instances localization issue * Adds list\_instance\_uuids to the Hyper-V driver * Change v3 admin\_actions to v2.1 * Change v3 aggregate API to v2.1 * Convert v3 ExtendedAvailabilityZone api to v2.1 * Convert v3 hypervisor plugin to v2.1 * Convert server\_usage v3 plugin to v2.1 API * Convert v3 servers return\_reservation\_id behaviour to v2.1 * the headroom infomation is incomplete * Port volumes extension to work in v2.1/v3 framework * vmwareapi oslo.vmware library integration * Allow forceDelete to delete running instances * Port limits extension to work in v2.1/v3 framework * Port image-size extension to work in v2.1/v3 framework * Port v2 image\_metadata extension to work in v2.1(v3) framework * Port v2 images extension to work in v2.1(v3) framework * Convert migrate\_server v3 plugin to v2.1 * Changes V3 evacuate extension into v2.1 * console: add typed console objects * virt: setup TCP chardevice in libvirt driver * Remove snapshot\_id from \_volume\_snapshot\_create() * Check min\_ram and min\_disk when boot from volume * Add API schema for v2.1 "create a server" API * InstanceNUMAToplogy object create remove uuid param * Change v3 flavor\_access to v2.1 * Convert rescue v3 plugin to v2.1 API * Change v3 security\_groups API to v2.1 * Changes V3 remote\_console extension into v2.1 * Use common get\_instance function in v2 consoles extension * Add API schema for v2.1/v3 user\_data extension * Convert v3 cells API to v2.1 * Convert v3 server metadata plugin to v2.1 * Convert multiple-create v3 plugin to v2.1 * Convert v3 flavor extraspecs plugin to v2.1 * Fix scheduler\_available\_filters help * cmd: add nova-serialproxy service * console: add serial console module * Changes V3 server\_actions extension into v2.1 * Change v3 version API to v2.1 * Change v3 shelve to v2.1 * Process power state syncs asynchronously * Made unassigned networks visible in flat networking * Add functions to setup user namespaced filesystems * Adds nova-idmapshift cli utility * Add idmap to libvirt config * Allow hard reboots when still attempting a soft reboot * Decrease amount of queries while adding aggregate metadata * Adds Hyper-V serial console log * Store original state when suspending * Fix NoopQuotasDriver.get\_settable\_quotas() * Use instance objects consistently in suspend tests * Instance objects: fix indentation issue * libvirt: Add method for getting host NUMA topology * Add instance\_extra table and related objects * Change v3 availability-zone API to v2.1 * Move and generalize decorator serialize\_args to nova.objects.base * Convert v3 certificate API to v2.1 * Make neutronapi use NetworkRequest for allocate\_for\_instance() * Use NetworkRequest objects through to nova-network * Add extension block\_device\_mapping\_v1 for v2.1 * Catch BDM related InvalidBDM exceptions for server create v2.1 * Changes block\_device\_mapping extension into v2.1 * Fix rootwrap for non openstack.org iqn's * Let update\_available\_resource hit slave * Plumb NetworkRequest objects through conductor and compute RPC * Updates available resources after live migration * Convert compute/api to use NetworkRequest object and list * Refactor the servers API to use NetworkRequest * Cells: Update set\_admin\_password for objects * Remove libvirt legacy LVM code * libvirt: reworks configdrive creation * Handle non dict metadata in server metadata V2 API * Fix wrong disk type limitation for disk IO throttling * Use v2.1 URLs instead of v3 ones in API unit tests * VMware: Add in support for CPU shares in event of resource contention * VMware: add resource limits for CPU * Refactor admin\_action plugin and test cases * Fix error in log when log exception in guestfs.py * Remove concatenation with translated messages * Port simple\_tenant\_usage into v2.1 * Convert console\_output v3 plugin to v2.1 * GET servers API sorting enhancements common utilities * Add \_security\_group\_ensure\_default() DBAPI method * Fix instance boot when Ceph is used for ephemeral storage * Add NetworkRequest object and associated list * Remove use of str on exceptions * Fix the current state name as 'shutting-down' * Explicitly handle exception ConsoleTypeUnavailable for v2 consoles * Convert v3 server diagnostics plugin to v2.1 * Porting v3 evacuate testcases to v2 * libvirt: Uses correct imagebackend for configdrive * Add v2.1 API router and endpoint * Change v3 keypairs API to v2.1 * Backport V3 hypervisor plugin unit tests to V2 * Remove duplicated negative factors in keypair test * filter: add per-aggregate filter to configure max\_instances\_per\_host * Updated from global requirements * Mask passwords in exceptions and error messages * Make strutils.mask\_password more secure * A minor change to a comments * Check vlan parameter is valid * filter: add per-aggregate filter to configure disk\_allocation\_ratio * Deprecate cinder\_\* configuration settings * Allow attaching external networks based on configurable policy * Fix CellStateManagerFile init to failure * Change v3 extended\_status to v2.1 * Fixes Hyper-V volume discovery exception message * Use default quota values in test\_quotas * libvirt: add validation of migration hostname * Add a Set and SetOfIntegers object fields * Add numa\_topology column to the compute\_node table * Preserve exception text during schedule retries * Change v3 admin-password to v2.1 * Make Object FieldType from\_primitive pass objects * Change V3 access\_ips extension into v2.1 * Update RESP message when failed to create flavor * Cleanup of V2 console output tests and add missing tests * Convert multinic v3 plugin to v2.1 * Change 'changes\_since'/'changes-since' into v2.1 style for servers * Backport v3 multinic tests to v2 * Change ViewBuilder into v2.1 for servers * Change v3 agents API to v2.1 * Change v3 attach\_interface to v2.1 * Backport V3 flavor extraspecs API unit tests to V2 * Return BadRequest instead of UnprocessableEntity for volumes API * Convert create\_backup v3 plugin to v2.1 API * Update instance state after compute service died for rebuilded instance * Make floatingip-ip-delete atomic with neutron * Add v3 versions plugin unit test to v2 * Remove duplicated code in test\_versions * Change v3 hosts to v2.1 * Change v3 extended\_server\_attibutes to v2.1 * Make test\_killed\_worker\_recover faster * Change v3 flavor\_rxtx to v2.1 * fix typo in docstring * libvirt: driver used memory tests cleanup * Avoid refreshing PCI devices on instance.save() * Updated from global requirements * Change v3 flavors to v2.1 * neutronv2: treat instance as object in deallocate\_for\_instance * Fix class name for ServerGroupAffinityFilter * Adds Hyper-V Compute Driver soft reboot implementation * Add QuotaError handling to servers rebuild API * Allow to create a flavor without specifying id * XenAPI: Remove interrupted snapshots * Fix typo in comment * Fix V2 unit tests to test hypervisor API as admin * Create compute api var at \_\_init\_\_ * libvirt: support live migrations of instances with config drives * Change v3 os-user-data extension to v2.1 * Remove duplicated code in test\_user\_data * Convert v3 server SchedulerHints plugin to v2.1 * Convert deferred\_delete v3 plugin to v2.1 API * Backport some v3 scheduler hints API UT to v2 API * Change error status code for out of quota to be 403 instead of 413 * Correct seconds of a day from 84400 to 86400 * VMware: add adapter type constants * Fix comment typo * scheduler sends select\_destinations notifications * Fix for volume detach error when use NFS as the cinder backend * objects: Add base test for obj\_make\_compatible() * objects: Fix InstanceGroup.obj\_make\_compatible() * Restore backward compat for int/float in extra\_specs * Convert v3 config drive plugin to v2.1 * Fix sample files miss for os-aggregates * Backport v3 config\_drive API unittest to v2 API * Backport some v3 availability zones API UT to v2 API * Handle non-ascii characters in spawn exception msg * Log warning message if volume quota is exceeded * Remove \_instance\_update usage in \_build\_instance * Treat instance like an object in \_build\_instance * Remove \_instance\_update usage in \_default\_block\_device\_names * Add missing flags to fakelibvirt for migration * Adds tests for Hyper-V Volume utils * Fix ability to generate object hashes in test\_objects.py * Fix expected error details from jsonschema * Extend the docstring for obj\_make\_compatible() with examples * HyperV Driver - Fix to implement hypervisor-uptime * Port os-server-groups extension to work in v2.1/v3 framework * Fix the exception for a nonexistent flavor * Add api extension for new network fields * Use real exceptions for network create and destroy * Support reserving ips at network create time * Adds get\_instance\_disk\_info to compute drivers * Use rfc3986 library to validate URL paths and URIs * Send create.end notification even if instance is deleted * Allow three periodic tasks to hit slave * Fixes Hyper-V unit test path separator issue * Share common test settings in test\_flavor\_manage * Shelve should give guests a chance to shutdown * Rescue should give guests a chance to shutdown * Resize should give guests a chance to shutdown * Power off commands should give guests a chance to shutdown * objects: Make use of utils.convert\_version\_to\_tuple() * tests: fix test\_compute to have predictable service list * libvirt: make sysinfo serial number configurable * Fixes Hyper-V resize down exception * Make usage\_from\_instances consider current usage * VMware: ensure test case for init\_host in driver * Add some v2 agents API tests * Libvirt: Do not raise ENOENT exception * Add missing create() method on SecurityGroupRule object * Add test for get\_instance\_disk\_info to test\_virt\_drivers * Move fake\_quotas and fake\_get\_quotas into a class * Objectify association in neutronapi * Objectify last uses of direct db access in network/floating\_ips * Update migration defaults * libvirt: reduce indentation in is\_vif\_model\_valid\_for\_virt * Fixes Hyper-V boot from volume root device issue * Fixes Hyper-V vm state issue * Imported Translations from Transifex * Share unittest between v2 and v2.1 for hide\_server\_addresses extension * Check compulsory flavor create parameters exist * Treat instance like an object in \_default\_block\_device\_names * Change 'image\_ref'/'flavor\_ref' into v2 style for servers * Change 'admin\_password' into v2 style for servers extension * Image caching tests: use list comprehension * Move \_is\_mapping to more central location * Stop augmenting oslo-incubators default log levels * Track object version relationships * Remove final use of glance\_stubs * Removes GlanceClient stubs * Pull transfer module unit tests from glance tests * VMware: remove specific VC support from class VMwareVolumeOps * VMware: remove Host class * Image cache tests: ensure that assertEquals has expected param first * VMware: spawn refactor \_configure\_config\_drive * VMware: refactor spawn() code to build a new VM * VMware: Fix type of VM's config.hardware.device in fake * VMware: Create fake VM with given datastore * VMware: Remove references to ebs\_root from spawn() * VMware: Create VMwareImage object for image metadata * Image caching: update image caching to use objects * Report all objects with hash mismatches in a single go * Include child\_versions in object hashes * Direct-load Instance.fault when lazy-loading * VMware: Remove unused variable in test\_configdrive * Raise HTTPNotFound error from V2 cert show API * Add dict and json methods to VirtNUMATopology classes * virt: helper for processing NUMA topology configuration * Raise Not Implemented error from V2 diagnostics API * Make NovaObjectSerializer work with dicts * Updated from global requirements * neutronv2: treat instance like object in allocate\_for\_instance * nova-network: treat instance like object in allocate\_for\_instance * Treat instance like object in \_validate\_instance\_group\_policy * Treat instance like an object in \_prebuild\_instance * Treat instance like an object in \_start\_building * Add graphviz to list of distro packages to install * Fixes Hyper-V agent force\_hyperv\_utils\_v1 flag issue * ec2: Use S3ImageMapping object * ec2: Add S3ImageMapping object * Remove unused db api methods * Get EC2 snapshot mappings with nova object * Use EC2SnapshotMapping for creating mappings * Add EC2SnapshotMapping object * Fix NotImplementedError in floating-ip-list * filter: add per-aggregate filter to configure max\_io\_ops\_per\_host * Hacking: a new hacking check was added that used an existing number * Fix hacking check for jsonutils * VMware: revert deletion of cleanup\_host * Use flavor in confirm-resize to drop claim * Add new db api get functions for ec2\_snapshot * Partial oslo-incubator sync -- log.py * Add unit tests for libvirt domain creation * Fix Trusted Filter to work with Mt. Wilson \`vtime\` * Fix 202 responses to contain valid content * Fix EC2 instance type for a volume backed instance * libvirt: add serial ports config * Split EC2 ID validator to validator per resource type * libvirt: do not fail instance destroy, if mount\_device is missing * libvirt: persist lxc attached volumes across reboots and power down * Resize block device after swap to larger volume * Make API name validation failure deterministic * VMware: spawn refactor add VirtualMachineConfigInfo * libvirt: Fix kwargs for \_create\_image * VMware: fix crash when VC driver boots * baremetal: Remove depenency on libvirt's fetch\_image method * libvirt: Remove unecessary suffix defaulting * Drop instance\_group\_metadata from the database * Neutron v2 API: fix get\_floating\_ip\_pools * libvirt: Allow specification of default machine type * Fix rebuild with cells * Added hacking check for jsonutils * Consistently use jsonutils instead of specific implementation * Convert network/api.py uses of vif database functions to objects * Convert last use of direct database instance fetching from network api * libvirt: skip disk resize when resize\_instance is False * libvirt: fix \_disk\_resize to make sure converted image will be restored * Backport some v3 certificate API unittest to v2 API * Backport some v3 aggregate API unittest to v2 API * Imported Translations from Transifex * More informative nova-scheduler log after NoValidHost is caught * Remove metadata/metadetails from instance/server groups * Prepend /dev/ to root\_device\_name in get\_next\_device\_name * Lock attach\_volume * Adjust audit logs to avoid negative disk info * Convert network/api.py to use FloatingIP object * Correct some IPAddress DB interaction in objects * docs - Set pbr 'warnerrors' option for doc build * docs - Fix errors,warnings from document generation * Provide a quick way to run flake8 * Add support for select\_destinations in Scheduler client * Create a Scheduler client library * VMware: handle case when VM snapshot delete fails * Use common get\_instance function in v2 attach\_interface * Add some v2 flavor\_manage API tests * Backport v3 api unittest into v2 api for attach\_interface extension * Fix the error status code of duplicated agents * Handle ExternalNetworkAttachForbidden exception * Allow empty volumes to be created * docs - Fix errors,warnings from document generation * docs - Fix exception in docs generation * docs - Fix docstring issues in virt tree * VMware: test\_driver\_api: Use local variables in closures * VMware: Remove ds\_util.build\_datastore\_path() * Use v1 as default for cinder\_catalog\_info * Fix live-migration failure in FC multipath case * Optimize instance\_floating\_address\_get\_all * Enhance PCI whitelist * Add a missing instance=instance in compute/mgr * Correct returned HTTP status code (Use 403 instead of 413) * Fix wrong command for \_rescan\_multipath * add log exception hints in some modules * Fix extension parameters in test\_multiple\_create * Standardize logging for v3 api extensions * Standardize logging for v2 api extensions * Add ListOfDictOfNullableString field type * Enable terminate for EC2 InstanceInitiatedShutdownBehavior * Remove duplicate test of passing glance params * Convert glance unit tests to not use stubs * Add decorator expected\_errors for ips v3 extension * Return 404 instead of 501 for unsupported actions * Return 404 when floating IP pool not found * Makes versions API output deterministic * Work on document structure and doc building * Catch NeutronClientException when showing a network * Add API schema for v2.1/v3 security\_groups extension * Add API schema for v2.1/v3 config\_drive extension * Remove pre-icehouse rpc client API compat * makes sure correct PCI device allocation * Adds tests for Hyper-V VM Utils * Make nova-api use quotas object for reservations * VMware: implement get\_host\_ip\_addr * Boot an instance with multiple vnics on same network * Optimize db.floating\_ip\_deallocate * Fixes wrong usage of mock.assert\_not\_called() * Code change for nova support cinder client v2 * libvirt: saving the lxc rootfs device in instance metadata * Add method for deallocating networks on reschedule * DB: use assertIsNotNone for unit test * Add expire reservations in backport position * Make network/api.py use Network object for associations * Migrate test\_glance from mox to mock * Add instanceset info to StartInstance response * Adds verbosity to child cell update log messages * Removes unnecessary instructions in test\_hypervapi * Diagnostics: add validation for types * Add missed discoverable policy rules for flavor-manage v3 * Rename rbd.py to rbd\_utils.py in libvirt driver directory * Correct a maybe-typo in pci\_manager * libvirt: make guestfs methods always return list of tuples * Revert "Deallocate the network if rescheduling for * libvirt: volume snapshot delete for network-attached disks * libvirt: parse disk backing chains from domain XML * Handle MacAddressInUseClient exception from Neutron when creating port * Updated from global requirements * Remove instance\_info\_cache\_delete() from conductor * Make spawn\_n() stub properly ignore errors in the child thread work * Update devref out-of-tree policy grammar error * Compute: add log exception hints * Handle NetworkAmbiguous error when booting a new instance with v3 api * Handle FloatingIpPoolNotFound exception in floating ip creation * Add policy on how patches and reviews go hand in hand * Add hacking check for explicit import of \_() * VMware: Do not read opaque type for DVS network * VMware: add in DVS VXLAN support * Network: add in a new network type - DVS * Network: interface attach and detach raised confusing exception * Deprecate metadata\_neutron\_\* configuration settings * Log cleanups for nova.network.neutron.api * Remove ESXDriver from Juno * Only get image location attributes if including locations * Use JSON instead of json in the parameter descriptions * Add a retry\_on\_deadlock to reservations\_expire * docs - Fix doc build errors with SQLAlchemy 0.9 * docs - Fix indentation for RPC API's * docs - Prevent eventlet exception during docs generation * docs - Add an index for the command line utilities * docs - fix missing references * Change LOG.warn to LOG.debug in \_shutdown\_instance * EC2: fixed AttributeError when metadata is not found * Import Ironic scheduler filters and host manager * EndpointNotFound deleting volume backend instance * Fix nova boot failure using admin role for another tenant * docs - Fix docstring issues * Update scheduler after instance delete * Remove duplicate index from block\_device\_mapping table * Fix ownership checking in get\_networks\_by\_uuid * Raises NotImplementedError for LVM migration * Convert network/api.py fixedip calls to use FixedIP object * Convert network/api.py get calls to use Network object * Add extensible resources to resource tracker (2) * Make DriverBlockDevice save() context arg optional * Improved error logging in nova-network for allocate\_fixed\_ip() * Issue multiple SQL statements in separate engine.execute() calls * Move check\_image\_exists out of try in \_inject\_data * Fix fake\_update in test\_update\_missing\_server * Add unit tests to cells conductor link * Revert "libvirt: add version cap tied to gate CI testing" * Use Ceph cluster stats to report disk info on RBD * Add trace logging to allocate\_fixed\_ip * Update devref setup docs for latest libvirt on ubuntu * libvirt re-define guest with wrong XML document * Improve logging when python-guestfs/libguestfs isn't working * Update dev env docs on libvirt-dev(el) requirement * Parse unicode cpu\_info as json before using it * Fix Resource tracker should report virt driver stats * Fix \_parse\_datetime in simple tenant usage extension * Add API schema for v2.1/v3 cells API * Fix attaching config drive issue on Hyper-V when migrate instances * Allow to unshelve instance booted from volume * libvirt: add support for guest NUMA topology in XML config * libvirt: remove pointless LibvirtBaseVIFDriver class * libvirt: remove 'vif\_driver' config parameter * libvirt: remove use of CONF.libvirt.virt\_type in vif.py * Handle NotImplementedError in server\_diagnostics v3 api * Remove useless check in \_add\_retry\_host * Initialize Ironic virt driver directory * Live migration is broken for NFS shared storage * Fix ImportError during docs generation * Updated from global requirements * Extend API schema for "create a server" extensions * Enable cloning for rbd-backed ephemeral disks * Add include\_locations kwarg to nova.image.API.get() * Add index for reservations on (deleted, expire) * VMWare Driver - Ignore datastore in maintenance mode * Remove outdated docstring for nova.network.manager * libvirt: remove 3 unused vif.py methods * Turn on pbr's autodoc feature * Remove api reference section in devref * Deduplicate module listings in devref * VMware: Resize operation fails to change disk size * Use library instead of CLI to cleanup RBD volumes * Move libvirt RBD utilities to a new file * Properly handle snatting for external gateways * Only use dhcp if enable\_dhcp is set on the network * Allow dhcp\_server to be set from new field * Set python hash seed to 0 in tox.ini * Make devref point to official devstack vagrant repo * Stop depending on sitepackages libvirt-python * libvirt: driver tests use non-mocked BDMs * Fix doc build errors in models.py * Make several ec2 API tests inherit from NoDBTestCase * Stub out rpc notifications in ec2 cloud unit tests * Add standard constants for CPU architectures * virt: switch order of args to assertEqual in guestfs test * virt: move disk tests into a sub-directory * virt: force TCG with libguestfs unless KVM is enabled in libvirt * Do not pass instances without host to compute API * Pass errors from detach methods back to api proc * libvirt: add tests for \_live\_snapshot and \_swap\_volume methods * libvirt: fill in metadata when launching instances * Increase min required libvirt to 0.9.11 * Rollback quota when confirm resize concurrently completed * API: Enable support for tenant option in nova absolute-limits * libvirt: removing lxc specific disk mapping * Method to filter non-root block device mappings * VMware: remove local variable * Use hypervisor hostname for compute trust level * Remove unused cell\_scheduler\_method * Fix the i18n for some warnings in compute utils * Fix FloatingIP.save() passing FixedIP object to sqlalchemy * Scheduler: throw exception if no configured affinity filter * xenapi: Attach original local disks during rescue * libvirt: remove VIF driver classes deprecated in Icehouse * Move logs of restore state to inner logic * Clean nova.compute.resource\_tracker:\_update\_usage\_from\_instances * Fix and Gate on E265 * Log translation hint for nova.api * Fix duplicated images in test\_block\_device\_mapping * Add Hyper-V driver in the "compute\_driver" option description * reduce network down time during live-migration * Augment oslo's default log levels with nova specific ones * Make the coding style consistent with other Controller in plugins/v3 * Fix extra metadata didn't assign into snapshot image * Add i18n log markers in disk api * VMware: improve log message for attachment of CDROM * Raise NotImplemented default-security-group-rule api with neutron * vmwareapi: remove some unused fake vim methods * Correct image\_metadata API use of nova.image.glance * Revert "Add extensible resources to resource tracker" * Update database columns nullable to match model * Updated from global requirements * Make quotas APIv3 extension use Quotas object for create/update * Make quotas APIv2 extension use Quotas object for create/update * Add quota limit create/update methods to Quotas object 2014.2.b2 --------- * libvirt: VM diagnostics (v3 API only) * Add ibmveth model as a supported network driver for KVM * libvirt: add support for memory tuning in config * libvirt: add support for memory backing parameters * libvirt: add support for per-vCPU pinning in guest XML * libvirt: add parsing of NUMA topology in capabilities XML * handle AutoDiskConfigDisabledByImage at API layer * Rollback quota in os\_tenant\_network * Raise specific error of network IP allocation * Convert to importutils * Catch CannotResizeDisk exception when resize to zero disk * VMware: do not cache image when root\_gb is 0 * Turn periodic tasks off in all unit tests * Rename virtutils to the more common libvirt\_utils * Check for resize path on libvirt instance delete * Return status for compute node * servers list API support specify multi-status * Deprecate scheduler prep\_resize * Updated from global requirements * Fix nova cells exiting on db failure at launch * Remove unneeded calls in test\_shelve to start instances * Correct InvalidAggregateAction reason for Xen * Handle a flavor create failed better * Add valid method check for quota resources * VMware: power\_off\_instance support * Add debug log for availability zone filter * Fix typo * Fix last of direct use of object modules * Check instance state before attach/detach interface * Fix error status code for cloudpipe\_update * Fix unit tests related to cloudpipe\_update * Add API schema for v2.1/v3 reset\_server\_state API * Adjust audit logs to avoid negative mem/cpu info * Re-add H803 to flake8 ignore list * Fix nova/pci direct use of object modules * Gate on F402/pep8 * Inject expected results for IBM Power when testing bus devices * Add extensible resources to resource tracker * libvirt: define XML schema for recording nova instance metadata * Sync loopingcall from oslo * Add APIv2 support to make host optional on evacuate * Add differencing vhdx resize support in Hyper-V Driver * Imported Translations from Transifex * Add context as param to cleanup function * Downgrade the warn log in network to debug * Correct use of nova.image.glance in compute API * Keep Migration status in automatic confirm-resize * Removes useless stub of glanceclient create * Remove rescue/unrescue NotImplementedError handle * Add missing foreign key on pci\_devices.compute\_node\_id * Revert "Add missing image to instance booted from volume" * Add debug log for pci passthrough filter * Cleanup and gate on hacking E711 and E712 rule * Keep resizing&resized instances when compute init * Commit quota when deallocate floating ip * Remove unnecessary error log in cell API * Remove stubs in favor of mock in test\_policy * Remove translation for debug message * Fix error status code for agents * Remove warn log for over quota * Use oslo.i18n * Cleanup: remove unused argument * Implement methods to modify volume metadata * Minor tweaks to hypervisor\_version to int * update ignore list for pep8 * Add decorator expected\_errors for v3 attach\_interfaces * Add instance to debug log at compute api * Don't truncate osapi\_glance\_link or osapi\_compute\_link prefixes * Add decorator expected\_errors to V3 servers core * Correctly reject request to add lists of hosts to an aggregate * Do not process events for instances without host * Fix Cells ImagePropertiesFilter can raise exceptions * libvirt: remove flawed get\_num\_instances method impl * libvirt: remove unused list\_instance\_ids method * libvirt: speed up \_get\_disk\_over\_committed\_size\_total method * Partial oslo-incubator sync * VMware: Remove unnecessary deepcopy()s in test\_configdrive * VMware: Convert vmops to use instance as an object * VMware: Trivial indentation cleanups in vmops * VMware: use datastore classes in file\_move/delete/exists, mkdir * VMware: use datastore classes get\_allowed\_datastores/\_sub\_folder * VMware: DatastorePath join() and \_\_eq\_\_() * VMware: consolidate datastore code * VMware: Consolidate fake\_session in test\_(vm|ds)\_util * Make BDM dict \_\_init\_\_ behave more like a dict * VMware: support the hotplug of a neutron port * Deallocate the network if rescheduling for Ironic * Make sure that metadata handler uses constant\_time\_compare() * Enable live migration unit test use instance object * Move volume\_clear option to where it's used * move the cloudpipe\_update API v2 extension to use objects * Avoid possible timing attack in metadata api * Move injected\_network\_template config to where it's used * Don't remove delete\_on\_terminate volumes on a reschedule * Defer raising an exception when deleting volumes * Xen: Cleanup orphan volume connections on boot failure * Adds more policy control to cells ext * shelve doesn't work on nova-cells environment * libvirt: add migrateToURI2 method to fakelibvirt * libvirt: fix recent test changes to work on libvirt < 0.9.13 * Update requirements to include decorator>=3.4.0 * Cleanup and gate on hacking E713 rule * libvirt: add version cap tied to gate CI testing * Small grammar fix in libvirt/driver.py. fix all occurrences * Correct exception for flavor extra spec create/update * Fixes Hyper-V SCSI slot selection * xenapi: Use netuils.get\_injected\_network\_template * libvirt: Support IPv6 with LXC * Improve shared storage checks for live migration * XenAPI: VM diagnostics for v3 API * Move retry of prep\_resize to conductor instead of scheduler * Retry db.api.instance\_destroy on deadlock * Translations: add LC to all LOG.critical messages * Remove redundant code in Libvirt driver * Virt: fix typo (flavour should be flavor) * Fix and gate on H305 and H307 * Remove unused instance variables from HostState * Send compute.instance.create.end after launched\_at is set * VMware: validate the network\_info is defined * Security groups: add missing translation * Standardization of nova.image.API.download * Catch InvalidAggregateAction when deleting an aggregate * Restore ability to delete aggregate metadata * Nova-api service throws error when SIGHUP is sent * Remove cell api overrides for lock and unlock * Don't mask out HostState details in WeighedHost * vmware: VM diagnostics (v3 API only) * Use pool/volume\_name notation when deleting RBD volumes * Add instanceset info to StopInstance response * Change compute updates from periodic to on demand * Store volume backed snapshot in current tenant * libvirt+lxc: Unmount guest FS from host on error * libvirt: speed up get\_memory\_mb\_used method * libvirt: speed up get\_vcpus method * libvirt: speed up get\_all\_block\_devices method * libvirt: speed up list\_instances method * libvirt: speed up list\_instance\_uuids method * Updated from global requirements * Fix interfaces template for two interfaces and IPv6 * Fix error status code for multinic * libvirt: fix typo in fakelibvirt listAllDomains() * Refactors VIF configuration logic * Add missing test coverage for MultiplePortsNotApplicable compute/api * Make the block device mapping retries configurable * Catch image and flavor exceptions in \_build\_and\_run\_instance * Restore instance flavor info when driver finish\_migration fails * synchronize 'stop' and power state periodic task * Fix more re-definitions and enable F811/F813 in gate * Prepend '/dev/' to supplied dev names in the API * Handle over quota exception from Neutron * Remove pause/unpause NotImplementedError API layer * Add test cases for 2 block\_device functions * Make compute api use util.check\_string\_length * add comment about why snapshot/backup have no lock check * VM diagnostics (v3 API only) * VM diagnostics: add serializer to Diagnostics object * VM diagnostics: add methods to class to update diagnotics * object-ify API v2 availability\_zone extension * object-ify availability\_zones * add get\_by\_metadata\_key to AggregateList object * xenapi: make boot from volume use volumeops * libvirt: Avoid Glance.show on hard\_reboot * Add host\_ip to compute node object * VMware: move fake.py to the test directory * libvirt: convert cpuset XML handling to use set instead of string * virt: add method for formatting CPU sets to strings * Fixes rbd backend image size * Prevent max\_count > 1 and specified ip address as input * Add aggregates.rst to devref index * VMware: virt unrescue method now supports objects * VMware: virt rescue method now supports objects * Remove duplicate python-pip from Fedora devref setup doc * Do not fail cell's instance deletion, if it's missing info\_cache * libvirt: more efficient method to list domains on host * vmwareapi: make method signatures match parent class * Remove duplicate keys from dictionaries * virt: split CPU spec parsing code out into helper method * virt: move get\_cpuset\_ids into nova.virt.hardware * Fix duplicate definitions of variables/methods * change the firewall debugging for clarity * VMware: consolidate common constants into one file * Require posix\_ipc for lockutils * hyperv: make method signatures match parent class * Format eph disk with specified format in libvirt * Resolve import dependency in consoleauth service * Add 'anon' kwarg to FakeDbBlockDeviceDict class * Make cells rpc bdm\_update\_or\_create\_at\_top use BDM objects * Improve BlockDeviceMapping object cells awareness * Add support for user\_id based authentication with Neutron * VMware: add in test utility to get correct VM backing * Change instance disappeared during destroy from Error to Warning * VMware: Fix race in spawn() when resizing cached image * VMware: add support for driver method instance\_exists * Object-ify APIv3 agents extension * Object-ify APIv2 agents extension * Avoid re-adding iptables rules for instances that have disappeared * libvirt: Save device\_path in connection\_info when booting from volume * sync periodic\_task fix from incubator * Fix virt BDM \_\_setattr\_\_ and \_\_getattr\_\_ * Handle InstanceUserDataTooLarge at api layer * Updated from global requirements * Mask node.session.auth.password in volume.py \_run\_iscsiadm debug logs * Nova api service doesn't handle SIGHUP properly * check ephemeral disk format at libvirt before use * Avoid referencing stale instance/network\_info dicts in firewall * Use mtu setting from table instead of flag * Add debug log for core\_filter * VMware: optimize VM spawn by caching the vm\_ref after creating VM * libvirt: Add configuration of guest VCPU topology * virt: add helper module for determining VCPU topology * Change the comments of SOFT\_DELETED race condition * Fix bad log message with glance client timeout * Move the instance\_type\_id judgment to the except-block * Update port binding when unshelve instance * Libvirt: Added suffix to configdrive\_path required for rescue * sync policy logging fix from incubator * Sync process utils from olso * Remove instance\_uuids argument to \_schedule * Add \_\_repr\_\_ handler for NovaObjects * Pass instance to \_reschedule rather than instance\_uuid * Pass instance to \_set\_instance\_error\_state * Pass instance to \_error\_out\_instance\_on\_exception * Add APIv3 support to make host optional on evacuate * Move rebuild to conductor and add find host logic * VMware: validate that VM exists on backend prior to deletion * VMware: remove duplicate key from test\_instance dict * ConfigDriveBuilder refactor for tempdir cleanliness * VMware: cleanup the constructors of the compute drivers * Fix wrong lock name for operating instance external events * VMware: remove unused parameter 'network\_info' * VM diagnostics: introduce Diagnostics model object * Fixes internal server error for add/remove tenant flavor access request * add repr for event objects * Sync oslo lockutils to nova * Neutronv2 api does not support neutron without port quota * Be explicit about objects in \_shutdown\_instance() * Pass instance object into \_shutdown\_instance() * Skip none value attributes for ec2 image bdm output * Fixed wrong assertion in test\_vmops.py * Remove a not used function \_get\_ip\_by\_id * make lifecycle event logs more clear * xenapi: make method signatures match parent class * libvirt: make method signatures match parent class * virt: add test helper for checking public driver API method names * virt: fix signature of set\_admin\_password method * virt: use context & instance as param names in migrate APIs * virt: add get\_instance\_disk\_info to virt driver API * vmwareapi: remove unused update\_host\_status method * libvirt: remove hack from ensure\_filtering\_rules\_for\_instance * libvirt: remove volume\_driver\_method API * libvirt: add '\_' prefix to remaining internal methods * Imported Translations from Transifex * Fake driver: remove unused method get\_disk\_available\_least * Baremetal driver: remove unused states * Fix nova/network direct use of object modules * Fix rest of API objects usage * Fix rest of compute objects usage * Clean conntrack records when removing floating ip * Updated from global requirements * Enforce task\_state is None in ec2 create\_image stop instance wait loop * Update compute rpcapi tests to use instance object instead of dict * Fix run\_instance() rpc method to pass instance object * Fix error in rescue rpcapi that prevents sending objects * add checksums to udp independent of /dev/vhost-net * Use dot notation to access instance object fields in ec2 create\_image * vmwareapi: remove unused fake vim logout method * vmware: remove unused delete\_disk fake vim method * Revert "Sync revert and finish resize on instance.uuid" * Add test cases for block\_device * Add assert\_called check for "brclt addif" test * Log when nova-conductor connection established * Xen: Remove extraneous logging of type information * Fix agent\_id with string type in API samples files for os-agents v2 * Fix update agent return agent\_id with string for os-agents v3 * VMware: Fix fake raising the wrong exception in \_remove\_file * VMware: refactor get\_datastore\_ref\_and\_name * libvirt: introduce separate class for cpu tune XML config * libvirt: test setting of CPU tuning data * Make Evacuate API use Instance objects * VMware: create utility function for reconfiguring a VM * effectively disable libvirt live snapshotting * Fix exception raised when a requested console type is disabled * Add missing image to instance booted from volume * Use default rpc\_response\_timeout in unit tests * vmware: Use exc\_info when logging exceptions * vmware: Reuse existing StorageError class * vmware: Refactor: fold volume\_util.py into volumeops.py * Use ebtables to isolate dhcp traffic * Replace nova.utils.cpu\_count() with processutils.get\_worker\_count() * Sync log and processutils from oslo * libvirt: add '\_' prefix to host state information methods * libvirt: add '\_' prefix to some get\_host\_\* methods * Deprecate and remove agent\_build\_get\_by\_triple() * Object-ify xenapi driver's use of agent\_build\_get\_by\_triple() * Add Agent object * Move the error check for "brctl addif" * Add API schema for v2.1/v3 quota\_sets API * Add API schema for v2.1/v3 flavors\_extraspecs API * Add API schema for v2.1/v3 attach\_interfaces API * Add API schema for v2.1/v3 remote\_consoles API * Use auth\_token from keystonemiddleware * Use \_set\_instance\_obj\_error\_state in compute manager set\_admin\_password * api: remove unused function * api: remove useless get\_actions() in consoles * Do not allow resize to zero disk flavor * api: remove dead code in WSGI XML serializer * Updated from global requirements * Standardize logging for nova.virt.libvirt * Fix log debug statement in compute manager * Add API schema for v2.1/v3 aggregates API * Fix object code direct use of other object modules * Fix the rest of direct uses of instance module objects * Imported Translations from Transifex * Add API schema for v2.1/v3 flavor\_manage API * Forcibly set libvirt uri in baremetal virtual power driver * Synced jsonutils and its dependencies * Sync revert and finish resize on instance.uuid * Object-ify APIv3 availability\_zone extension * Fix bug in TestObjectVersions * libvirt: add '\_' prefix to all get\_guest\_\*\_config methods * libvirt: remove unused 'get\_disks' method * Downgrade some exception LOG messages in the ec2 API * Conductor: remove irrelevant comment * Added statement for ... else * Avoid traceback logs from simple tenant usage extension * Fix detaching pci device failed * Adds instance lock check for live migrate * Don't follow HTTP\_PROXY when talking to localhost test server * Correct the variable name in trusted filter * Target host in evacuate can't be the original one * Add API schema for v2.1/v3 hosts API * Object-ify APIv3 flavor\_extraspecs extension * Object-ify APIv2 flavorextraspecs extension * Catch permission denied exception when update host * Fix resource cleanup in NetworkManager.allocate\_fixed\_ip * libvirt: Support snapshot creation via libgfapi * Allow evacuate from vm\_state=Error * xenapi: reorder volume\_utils * Replace assertTrue/False with assertEqual/NotEqual * Replace assert\* with more suitable asserts in tests * Replace assertTrue/False with assertIn/NotIn * VMware: remove unused code in vm\_util.py * Not count disabled compute node for statistics * Instance and volume cleanup when a build fails * wrap\_instance\_event() shouldn't swallow return codes * Don't replace instance object with dict in \_allocate\_network() * Determine shared ip from table instead of flag * Set reasonable defaults for new network values * Adds network fields to object * Add new fields to the networks table * Log exception if max scheduling attempts exceeded * Make remove\_volume\_connection() use objects * Create lvm.py module containing helper API for LVM * Reduce unit test times for glance * Should not delete active snapshot when instance is terminated * Add supported file system type check at virt layer * Don't store duplicate policies for server\_group * Make exception handling in get\_image\_metadata more specific * live migrate conductor tasks to use nova.image.API * Fix Flavor object extra\_specs and projects handling * Drop support for scheduler 2.x rpc interface * Drop support for conductor 1.x rpc interface * Deprecate glance\_\* configuration settings * Update websocketproxy to work with websockify 0.6 * XenAPI: disable/enable host will be failed when using XenServer * Remove traces of now unused host capabilities from scheduler * Fix BaremetalHostManager node detection logic * Add missing stats info to BaremetalNodeState * Replace assertTrue(not \*) with assertFalse(\*) * Clean nova.compute.api.API:\_check\_num\_instances\_quota * Fix the duplicated image params in a test * Imported Translations from Transifex * Fix "fixed\_ip" parameters in unit tests * Removes the use of mutables as default args * Add API schema for v2.1/v3 create\_backup API * Catch ProcessExecutionError in revoke\_cert * Updated from global requirements * Sync oslo lockutils to nova * devref policy: code is canonical source of truth for API * Log cleanups for nova.virt.libvirt.volume * Log cleanups for nova.virt.libvirt.imagecache * Rename VolumeMapping to EC2VolumeMapping * ec2: Convert to use EC2InstanceMapping object * Add EC2InstanceMapping object for use in EC2 * Add hook for network info update * Enhance and test exception safety in hooks * Object-ify server\_password APIv3 extension * Object-ify server\_password APIv2 extension * Move the fixed\_ips APIv2 extension to use objects * Completely object-ify the floating\_ips\_bulk V2 extension * Add bulk create/destroy functionality to FloatingIP * Cleanup and gate on pep8 rules that are stricter in hacking 0.9 * VMware: update file permissions and mode * Downgrade log level when create network failed * Updated from global requirements * libvirt: Use VIR\_DOMAIN\_AFFECT\_LIVE for paused instances * Initialize objects field in ObjectsListBase class * Remove bdms from run\_instance RPC conductor call * Sync "Prevent races in opportunistic db test cases" * Imported Translations from Transifex * Check the network\_info obj type before invoke wait function * Migrate nvp-qos to generic name qos-queue * Add test for HypervisorUnavailable on conductor * Test force\_config\_drive as a boolean as last resort * Add helper functions for getting local disk * Add more logging to nova-network * Make resize raise exception when no valid host found * Fix doc for service list * Handle service creation race by service workers * Add configurable HTTP timeout to cinder API calls * Prevent clean-up of migrating instances on compute init * Deprecate neutron\_\* configuration settings * Skip migrations test\_walk\_versions instead of pass * Remove duplicate code in Objects create() function * Fix object change detection * Fix object leak in nova.tests.objects.test\_fields.TestObject * Failure during termination should always leave state as error() * Make check\_instance\_shared\_storage() use objects * Save connection info in libvirt after volume connect * Remove unused code from test\_compute\_cells * libvirt: Don't pass None for image\_meta parameter in tests * Revert "Allow admin user to get all tenant's floating IPs" * libvirt: Remove use of db for flavor extra specs in tests * libvirt: Close opened file explicitly * Network: ensure that ports are 'unset' when instance is deleted * Don't translate debug level logs in nova * maint: Fixes wrong docstring of method get\_memory\_mb\_used * Ensure changes to api.QUOTA\_SYNC\_FUNCTIONS are restored * Fix the wrong dest of 'vlan' option and add new 'vlan\_start' option * Add deprecation warning to nova baremetal virt driver * Fixes typo error in Nova * Attach/detach interface to paused instance with affect live flag * Block device API missing translations for exceptions * Enabled swap disk to be resized when resizing instance * libvirt: return the correct instance path while cleanup\_resize * Remove the device handling from pci device object * Use new pci device handling code in pci\_manager * Separate the PCI device object handling code * xenapi: move find\_vbd\_by\_number into volume utils * Virt: remove unnecesary return code * Fixes hyper-v volume attach when host is AD member * Remove variability from object change detection unit test * Remove XML namespace from some v3 extensions 2014.2.b1 --------- * xenapi: Do not retry snapshot upload on 500 * Fix H401,H402 violations and re-enable gating * Bump hacking to 0.9.x series * Change listen address on libvirt live-migration * Make get\_console\_output() use objects * Add testing for hooks * Handle string types for InstanceActionEvent exc\_tb serialization * Revert "Remove broken quota-classes API" * Revert "Remove quota-class logic from context and make unit tests pass" * Fix cold-migrate missing retry info after scheduling * Disable rescheduling instance when no retry info * Fix infinitely reschedule instance due to miss retry info * Use VIF details dictionary to get physical\_network * Fix live\_migration method's docstring * Add subnet routes to network\_info when Neutron is used * fix nova test\_enforce\_http\_true unit test * novncproxy: Setup log when start nova-novncproxy * Make sure domain exists before referencing it * Network: add instance to the debug statement * V3 Pause: treat case when driver does not implement the operation * Don't translate debug level logs in nova.virt * Remove duplicate method * websocketproxy: remove leftover debug output * Remove unnecessary else block in compute manager set\_admin\_password * Treat instance objects like objects in set\_admin\_password flow * Move set\_admin\_password tests from test\_compute.py to api/mgr modules * Fix a wrong comment in the code * maint: correct docstring parameter description * libvirt: Remove dated docstring * Add unit tests for ipv4/ipv6 format validation * Cleanup allocating networks when InstanceNotFound is raised * Add test to verify ironic api contracts * VMware: spawn refactor - phase 1 - test for spawn * Revert "Fix migration and instance resize update order" * Simplify filter\_scheduler.populate\_retry() * libvirt: Use os\_command\_line when kernel\_id is set * libvirt: Make nwfilter driver use right filterref * libvirt: convert cpu features attribute from list to a set * Don't log TRACE info in notify\_about\_instance\_usage * xenapi: add tests for find\_bad\_volumes * Revert "Remove traces of now unused host capabilities from scheduler" * Check the length of aggregate metadata * Add out of tree support dev policy * Deprecate instance\_get\_by\_uuid() from conductor * Make metadata password routines use Instance object * Make SecurityGroupAPI use Object instead of instance\_get\_by\_uuid() * Add development policies section to devref * Add read\_only field attribute * Fix api direct use of instance module objects * Fix direct use of block\_device module objects * Fix InstanceActionEvent traceback parameter not serializable * Fix state mutation in cells image filter * libvirt: split and test finish\_migration disk resize * Use no\_timer\_check with soft-qemu * Add missing translation support * Update HACKING.rst to include N320 * Add tests to avoid inconsistent extension names * VMware: spawn refactor - Datastore class * VMware: remove dsutil.split\_datastore\_path * VMware: spawn refactor - DatastorePath class * Updated from global requirements * VMware: Fix memory leaks caused by caches * Allow user to specify image to use during rescue - V3 API changes * VMware: create utility functions * Check if volume is bootable when creating an instance * VMware: remove unused parameters in imagecache * xenapi: virt unrescue method now supports objects * libvirt: virt unrescue method now supports objects * libvirt: virt rescue method now supports objects * xenapi: virt rescue method now supports objects * Remove useless codes for server\_group * Catch InstanceInfoCacheNotFound during build\_instances * Do not replace the aggregate metadata when updating az * Move oslotest to test only requirements * libvirt: merge two utils tests files * libvirt: remove redundant 'libvirt\_' prefix in test case names * xenapi: refactor detach volume * Add API schema for v2.1/v3 migrate\_server API * Adds IVS unit tests for new VIF firewall logic * Don't set CONF options directly in unit tests * Fix docstring typo in need\_legacy\_block\_device\_info * Revert "Partially remove quota-class logic from nova.quotas" * Revert "Remove quota\_class params from rest of nova.quota" * Revert "Remove quota\_class db API calls" * Revert "Convert address to str in fixed\_ip\_obj.associate" * String-convert IPAddr objects for FixedIP.attach() * Updated from global requirements * Run instance root device determination fix * xenapi: tidy up volumeops tests * Don't return from a finally block * Support detection of fixed ip already in use * Rewrite nova policy to use the new changes of common policy * Treat instance objects as objects in unrescue API flow * Treat instance objects as objects in rescue API flow * Refactor test\_rescue\_unrescue into compute api/manager unit tests * Sync oslo network utils * Fix EC2 not found errors for volumes and snapshots * xenapi: refactor volumeops attach * xenapi: remove calls to call\_xenapi in volumeops * xenapi: move StorageError into global exception.py * Virt: ensure that instance\_exists uses objects * Use objects through the run\_instance() path * Deprecate run\_instance and remove unnecessary code * Change conductor to cast to build\_and\_run\_instance * Fix migration and instance resize update order * remove cpu feature duplications in libvirt * Add unit test trap for object change detection * Sync periodic\_task from oslo-incubator * VCDriver - Ignore host in Maintenance mode in stats update * Enable flake8 F841 checking * Imported Translations from Transifex * Reverse order of cinder.detach() and bdm.delete() * Correct exception info format of v3 flavor manage * Imported Translations from Transifex * Handle NetworkInUse exception in api layer * Correct exception handling when create aggregate * Properly skip coreutils readlink tests * Record right action name while migrate * Imported Translations from Transifex * Fix for multiple misspelled words * Refactor test to ensure file is closed * VM in rescue state must have a restricted set of actions * versions API: ignore request with a body * xenapi: fix live-migrate with volume attached * Add helper methods to convert disk * XenAPI: Tolerate multiple coalesces * Add helpers to create per-aggregate filters * Ensure live-migrate reverts if server not running * Raise HTTPInternalServerError when boot\_from\_volume with cinder down * Imported Translations from Transifex * [EC2]Correct the return status of attaching volume * Fix security group race condition while creating rule * VMware: spawn refactor - phase 1 - copy\_virtual\_disk * Catch InstanceNotFound exception if migration fails * Inject expected results for IBM Power when testing bus * Fix InstanceActionTestCase on PostgreSQL/MySQL * Fix ReservationTestCase on PostgreSQL * VMware: deprecate ESX driver from virt configuration * Add new ec2 instance db API calls * Remove two unused db.api methods * Fix direct use of aggregate module objects * Fix tests/compute direct use of instance module objects * share neutron admin auth tokens * Fix nova image-show with queued image * Catch missing Glance image attrs with None * Align internal image API with volume and network * Do not wait for neutron event if not powering on libvirt domain * Mask block\_device\_info auth\_password in virt driver debug logs * Remove all mostly untranslated PO files * Payload meta\_data is empty when remove metadata * Handle situation when key not memcached * Fix nova/compute direct use of instance module objects * Address issues with objects of same name * Register objects in more services * Imported Translations from Transifex * Default dhcp lease time of 120s is too short * Add VIF mac address to fixed\_ips in notifications * Call \_validate\_instance\_group\_policy in \_build\_and\_run\_instance * Add refresh=True to get\_available\_nodes call in build\_and\_run\_instance * Add better coverage support under tox * remove unneeded call to network\_api on detach\_interface * Cells: Pass instance objects to build\_instances * XenAPI: Add logging information for cache/download duration * Remove spaces from SSH public key comment * Make hacking test more accurate * Fix security group race condition while listing and deleting rules * On rebuild check for null image\_ref * Add a reference to the nova developer documentation * VMware: use default values in get\_info() when properties are missing * VMware: uncaught exception during snapshot deletion * Enforce query order for getting VIFs by instance * Fix typo in comment * Allow admin user to get all tenant's floating IPs * Defer applying iptable changes when nova-network start * Remove traces of now unused host capabilities from scheduler * Add log translation hints * Imported Translations from Transifex * Fix CIDR values denoting hosts in PostgreSQL * Sync common db and db/sqlalchemy * Remove quota\_class db API calls * Remove quota\_class params from rest of nova.quota * Fix wrong quota calculation when deleting a resizing instance * Ignore etc/nova/nova.conf.sample * Fix wrong method name assert\_called\_once * Correct pci resources log * Downgrade log when attach interface can't find resources * Fixes Hyper-V iSCSI target login method * VMware: spawn refactor - phase 1 - fetch\_image * vmware:Don't shadow builtin function type * Partially remove quota-class logic from nova.quotas and test\_quotas * Convert address to str in fixed\_ip\_obj.associate * Accurate exception info in api layer for aggregate * minor corrections to devref rpc page * libvirt: Handle unsupported host capabilities * Fix the duplicated extension summaries * Imported Translations from Transifex * Raise more information on V2 API volumes when resource not found * Remove comments since it's pointless * Downgrade and fix log message for floating ip already disassociated * Fix wrong method name for test\_hacking * Imported Translations from Transifex * Add specific regexp for timestamps in v2 xml * VMWare: spawn refactor - phase 1 - create\_virtual\_disk * VMware: spawn refactor - phase 1 - power\_on\_vm * Move tests into test\_volume\_utils * Tidy up xenapi/volume\_utils.py * Updated from global requirements * VMware: Fix usage of an alternate ESX/vCenter port * VMware: Add check for datacenter with no datastore * Remove unused instance\_update() method from virtapi * Make baremetal driver use Instance object for updates * Rename quota\_injected\_file\_path\_bytes * Imported Translations from Transifex * Fixes arguments parsing when executing command * Remove explicit dependency on amqplib * Deprecate action\_event\_\*() from conductor * Remove conductor usage from compute.utils.EventReporter * Unit test case for more than 1 ephemeral disks in BDM * Network: replace neutron check with decorator * Update links in README * Add mailmap entry * XenAPI: Remove unneeded instance argument from image downloading * XenAPI: adjust bittorrent settings * Fix a minor comments error * Code Improvement * Fix the explanation of HTTPNotFound for cell showing v2 API * Add Nova API Sample file & test for get keypair * Add a docstring to hacking unit tests * Make libvirt driver use instance object for updates * Make vmwareapi/vmops use Instance object for updates * Convert xenapi/vmops uses of instance\_update to objects * Make xenapi agent code use Instance object for updates * Check object's field * Use Field in fixed\_ip * Remove logging in libvirt \_connect\_auth\_cb to avoid eventlet locking * Fix v3 API extension names for camelcase * VMware: prevent image snapshot if no root disk defined * Remove unnecessary cleanup in test * Raise HTTPForbidden from os-floating-ips API rather than 404 * Improve hacking rule to avoid author markers * Remove and block DB access in dhcpbridge * Improve conductor error cases when unshelving * Dedup devref on unit tests * Shrink devref.unit\_tests, since info is in wiki * Fix calls to mock.assert\_not\_called() * VMware: reduce unit test times * Fix wrong used ProcessExecutionError exception * Clean up openstack-common.conf * Revert "Address the comments of the merged image handler patch" * Remove duplicated import in unit test * Fix security group list when not defined for an instance * Include pending task in log message on skip sync\_power\_state * Make cells use Fault obj for create * libvirt: Handle \`listDevices\` unsupported exception * libvirt: Stub O\_DIRECT in test if not supported * Deprecate instance\_fault\_create() from conductor * Remove conductor usage from add\_instance\_fault\_from\_exc() * Add create() method to InstanceFault object * Remove use of service\_\* conductor calls from xenapi host.py * Updated from global requirements * Optimize validate\_networks to query neutron only when needed * Remove quota-class logic from context and make unit tests pass * VMware: spawn refactor - phase 1 - execute\_create\_vm * xenapi: fixup agent tests * Don't translate debug level logs in nova.spice, storage, tests and vnc * libvirt: Refresh volume connection\_info after volume snapshot * Fix instance cross AZ check when attaching volumes * Raise descriptive error for over volume quota * Fix broken version responses * Don't translate debug level logs in objectstore, pci, rdp, servicegroup * Don't translate debug level logs in cloudpipe, hacking, ipv6, keymgr * Don't translate debug level logs in nova.cert, console and consoleauth * Don't translate debug level logs in nova.cmd and nova.db * Don't translate debug level logs in nova.objects * Don't translate debug level logs in nova.compute * Fix bad Mock calls to assert\_called\_once() * VCDriver - No longer returns uptime due to multiple hosts * Make live\_migration use instance objects * wrap\_check\_security\_groups\_policy is already defined * Updated from global requirements * Don't translate debug level logs in nova.conductor * Don't translate debug level logs in nova.cells * Use strtime() specific timestamp regexp * Use datetime object for fake network timestamps * Use datetime object for stub created\_at timestamp * Verify created\_at cloudpipe timestamp is isotime * Verify next-available limit timestamps are isotime * Verify created/updated timestamps are isotime * Use timeutils.isotime() in images view builder * Use actual fake timestamp in API templates * Normalize API extension updated timestamp format * Regenerate API samples for GET /extensions * objects: remove unused utils module * objects: restore some datetime field comments * Add fault wrapper for rescue function * Add x-openstack-request-id to nova v3 responses * Remove unnecessary wrapper for 5 compute APIs * Update block\_device\_info to contain swap and ephemeral disks * Hacking: add rule number to HACKING.rst * Create the image mappings BDMs earlier in the boot * Delete in-process snapshot when deleting instance * Imported Translations from Transifex * Fixed many typos * VMware: remove unneeded code * Rename NotAuthorized exception to Forbidden * Add warning to periodic\_task with interval 0 * Fix typo in unit tests * Remove a bogus and unnecessary docstring * Don't translate debug level logs in nova.api * Don't translate debug level logs in nova.volume * VMware: remove duplicate \_fake\_create\_session code * libvirt: Make \`fakelibvirt.libvirtError\` match * ec2utils: Use VolumeMapping object * ec2: create volume mapping using nova object * Add VolumeMapping object for use in EC2 * Add new ec2 volume db API calls * Remove legacy block device usage in ec2 API * Deprecate instance\_get\_active\_by\_window\_joined() from conductor * Deprecate instance\_get\_all\_by\_filters() from conductor * Don't translate debug level logs in nova.network * Fix bad param name in method docstring * Nova should pass device\_id='' instead of None to neutron.update\_port() * Set default auth\_strategy to keystone * Support multi-version pydevd * replace NovaException with VirtualInterfaceCreate when neutron fails * Spice proxy config setting to be read from the spice group in nova.conf * xenapi: make auto\_config\_disk persist boot flag * Deprecate compute\_unrescue() from conductor * Deprecate instance\_destroy() from conductor * libvirt: fix comment for get\_num\_instances * Fix exception message being changed by nested exception * DescribeInstances in ec2 shows wrong image-message * Imported Translations from Transifex * Remove unused nova.crypto.compute\_md5() * VMware: spawn refactor - phase 1 - get\_vif\_info * Remove comments and to-do for quota inconsistency * Set the volume access mode during volume attach * Fix a typo in compute/manager::remove\_volume\_connection() * XenAPI: Use local rsync rather than remote if possible * Delete image when backup operation failed on snapshot step * Fix migrate\_instance\_\*() using DB for floating addresses * Ignore errors when deleting non-existing vifs * Use eventlet.tpool.Proxy for DB API calls * Improve performance for checking hosts AZs * Correct the log in conductor unshelve\_instance * Imported Translations from Transifex * Make instance\_exists() take an instance, not instance\_name * Xen: Retry plugin call after connection reset * Remove metadata's network-api dependence on the database * Add helper method to determine disk size from instance properties * Deprecate nova-manage flavor subcommand * Updated from global requirements * Imported Translations from Transifex * VMware: remove unused variable * Scheduler: enable scheduler hint to pass the group name * Loosen import\_exceptions to cover all of gettextutils * Don't translate debug level scheduler logs * VMWare - Check for compute node before triggering destroy * Update version aliases for rpc version control * make ec2 errors not useless * VMware: ensure rescue instance is deleted when instance is deleted * Ensure info cache updates don't overwhelm cells * Remove utils.reset\_is\_neutron() to avoid races * Remove unnecessary call to fetch info\_cache * Remove deprecated config option names: Juno Edition * Don't overwrite instance object with dict in \_init\_instance() * Add specific doc build option to tox * Fix up import of conductor * Use one query instead of two for quota\_usages * VMware: Log additional details of suds faults * Disable nova-manage network commands with Neutron V2 * Fix the explanations of HTTPNotFound for keypair's API * remove unneeded call to network\_api on rebuild\_instance * Deprecate network\_migrate\_instance\_\* from conductor * Deprecate aggregate\_host\_\* operations in conductor * Convert instance\_usage\_audit() periodic task to objects * Return to using network\_api directly for migrations * Make \_is\_multi\_host() use objects * Remove unneeded call to fetch network info on shutdown * Instance groups: add method get\_by\_hint * Imported Translations from Transifex * GET details REST API next link missing 'details' * Don't explode if we fail to unplug VIFs after a failed boot * nit: correct docstring for FilterScheduler.schedule\_run\_instance * Revert "Fix network-api direct database hits in metadata server" * ec2: use BlockDeviceMappingList object * ec2: use SecurityGroup object * ec2: get services using ServiceList object * ec2: remove db.instance\_system\_metadata usage * Remove nova-clear-rabbit-queues * Allow -1 as the length of "get console output" API * Fix AvailabilityZone check for hosts in multiple aggregates * Move \_get\_locations to module level plus tests * Define constants for the VIF model types * Imported Translations from Transifex * Make aggregate host operations use Aggregate object * Convert poll\_rescued\_instances() periodic task to objects * Make update\_available\_resource() use objects * Add get\_by\_service() method to ComputeNodeList object * Add with\_compute\_node to service\_get() * Make \_get\_compute\_info() use objects * Pass configured auth strategy to neutronclient * Imported Translations from Transifex * Make quota rollback checks more robust in conductor tests * Updated from global requirements * Remove duplicate code from nova.db.sqlalchemy.utils * Downgrade the log level when automatic confirm\_resize fails * Refactor unit tests for image service CRUD * Finish \_delete\_instance() object conversion * Make detach\_volume() use objects * Add lock on API layer delete floating IP * ec2: Convert instance\_get\_by\_uuid calls to objects * Fix network-api direct database hits in metadata server * Scheduler: remove test scheduling methods that are not used * Add info\_cache as expected attribute when evacuate instance * Make compute manager use network api method return values * Allow user to specify image to use during rescue - V2 API changes * Allow user to specify image to use during rescue * Use debug level logging in unit tests, but don't save them * Update user\_id length to match Keystone schema in volume\_usage\_cache * Avoid the possibility of truncating disk info file * Read deleted instances during lifecycle events * Add RBAC policy for ec2 API security groups calls * compute: using format\_message() to convert exception to string * support local debug logging * Fix bug detach volume fails with "KeyError" in EC2 * Fix straggling uses of direct-to-database queries in nova-network * Xen: Do not resize root volumes * Remove mention of nova-manage.conf from nova-manage.rst * XenAPI: Add host information to glance download logs * Check image exists before calling inject\_data * xenapi: Cleanup tar process on glance error * Missing catch InstanceNotFound in v3 API * Recover from POWERING-\* state on compute manager start-up * Remove the unused \_validate\_device\_name() * Adds missing expected\_errors for V3 API multinic extension * Correct test boundary for libvirt\_driver.get\_info * Updated from global requirements * Update docs to reflect new default filters * Enable ServerGroup scheduler filters by default * Revert "Use debug level logging during unit tests" * Remove redundant tests from Qcow2TestCase * libvirt: remove\_logical\_volumes should remove each separately * VMware: Fixes the instance resize problem * Fix anti-affinity server-group boot failure * Nova utils: add in missing translation * Add exception handling in "nova diagnostics" * mark vif\_driver as deprecated and log warning * Revert object-assuming changes to \_post\_live\_migration() * Revert object-assuming changes to \_post\_live\_migration() * libvirt: optimize pause mode support * Check for None or timestamp in availability zone api sample * Refactor Network API * Require admin context for interfaces on ext network * remove redundant copy of test\_cache\_base\_dir\_exists * Make sure leases are maintained until release * Add tests for remaining expected conductor exceptions * Fix Jenkins translation jobs * libvirt: pause mode is not supported by all drivers * Reduce config access in scheduler * VMWare: add power off vm before detach disk during unrescue * Reduce logging in scheduler * xenapi: add a test for \_get\_partitions * Refactor network\_utils to new call\_xenapi pattern * Sync request\_id middleware bug fix from oslo * Make example 'entry\_points' parameter a dictionary * Localized error exception message on delete host aggregate * Note that XML support \*may\* be removed * Change errors\_out\_migration decorator to work with RPC * low hanging fruit oslo-incubator sync * Fix description of ServerGroupAffinityFilter * Added test cases in ConfKeyManagerTestCase to verify fixed\_key * Moved the registration of lifecycle event handler in init\_host() * Change NotFound to InstanceNotFound in server\_diagnostics.py * Remove unnecessary passing of task\_state to check\_instance\_state * Rename instance\_actions v3 to server\_actions * Drop nova-rpc-zmq-receiver man-page * Correct the keypairs-get-resp.json API sample file * Make hypervisor\_version an int in fakeVirt driver * Ensure network interfaces are in requested order * Reserve 10 migrations for backports * XenAPI: Calculate disk\_available\_least * Open Juno development 2014.1.rc1 ---------- * Fix getting instance events on subsequent attempts * Improved logs for add/remove security group rules * VMware: remove double import * VMware: clean up VNC console handling * Make conductor expect ActionEventNotFound for action methods * Remove zmq-receiver from setup.cfg * Add a note about deprecated group filters * Fix the section name in CONTRIBUTING.rst * Fix display of server group members * Add new style instance group scheduler filters * Automatically create groups that do not exist * Add InstanceGroup.get\_by\_name() * Remove unnecessary check for CONF.notify\_on\_state\_change * Add nova.conf.sample to gitignore * Use binding:vif\_details to control firewall * Disable volume attach/detach for suspended instances * Updated from global requirements * Persist image format to a file, to prevent attacks based on changing it * Add test cases for validate\_extra\_spec\_keys * Catch InstanceInLocked exception for rescue and instance metadata APIs * Imported Translations from Transifex * Make 'VDI too big' more verbose * Use osapi\_glance\_link\_prefix for image location header * postgres incompatibility in InstanceGroup.get\_hosts() * Add missing test for None in sqlalchemy query filter * Use instance data instead of flavor in simple\_tenant\_usage extension * Sync oslo imageutils, strutils to Nova * Use correct project/user id in conductor.manager * fix the extension of README in etc/nova * Tell pip to install packages it sees globally * Change exception type from HTTPBadRequest to HTTPForbidden * Don't attempt to fill faults for instance\_list if FlavorNotFound * Bypass the database if limit=0 for server-list requests * Fix availability-zone option miss when creates an instance * No longer any need to pass admin context to aggregate DB API methods * Updated Setting up Developer Environment for Ubuntu * Change libvirt close callback to use green thread * Re-work how debugger CLI opts are registered * Imported Translations from Transifex * \_translate\_from\_glance() can cause an unnecessary HTTP request * Add UNSHELVING and RESCUING into IoOPSFilter consideration state * VMware: fix booting from volume * Do not add current tenant to private flavor access * Disable oslo.messaging debug logs * Update vm\_mode when rebuilding instance with new image * VMware: fix list\_instances for multi-node driver * VMware: Add utility method to retrieve remote objects * Use project/user from instance for quotas * Refactors unit tests of image service detail() * Refactors nova.image.glance unit tests for show() * Revert deprecation warning on Neutron auth * V2 API: remove unused imports * Change HTTPUnprocessableEntity to HTTPBadRequest * Rename \_post\_live\_migration instance\_ref arg * Add a decorator decorator that checks func args * Updated from global requirements * Instance groups: cleanup * Use the list when get information from libvirt * Remove unused quota\_\* calls from conductor * Use correct project/user for quotas * Include proper Content-Type in the HTTP Headers * Fix inconsistent quota usage for security group * Handling unlimited values when updating quota * Fix service API and cells * Remove unnecessary stubbing in test\_services * InvalidCPUInfo exception added to except block * VMware: fix exception when no objects are returned * Don't allow empty or 0 volume size for images * Wait till message handling is done on service stop * Remove PciDeviceList usage in pci manager * Fix the rpc module import in the service module * Revert "VMware Driver update correct disk usage stat" * Catch HostBinaryNotFound exception in V2 API * Ignore InstanceNotFound while getting console output * Raise error on nova-api if missing subnets/fixed\_ips on networks/port * Fix the explanations of HTTPNotFound for new APIs * Remove the nova.config.sample file * Refuse to block migrate instances with config drive * Include next link when default limit is reached * Catch NotImplementedError on Network Associate * VMware: add a file to help config the firewall for vnc * Change initial delay for servicegroup api reporting * Fix KeyError if neutron security group is not TCP/UDP/ICMP and no ports * Prevent rescheduling on block device failure * Check if nfs/glusterfs export is already mounted * Make compute API resize methods use Quotas objects * Remove commented out code in test\_cinder\_cloud * Update quantum to neutron in comment * Add deleted\_at attribute in glance stub on delete() * Add API sample files of "unshelve a server" API * Remove unused method from fake\_network.py * Don't refresh network cache for instances building or deleting * GlanceImageService static methods to module scope * Remove XenAPI driver deprecation warning log message * VMware: bug fix for host operations when using VMwareVCDriver * xenapi: boot from volume without image\_ref * Use HTTPRequestV3 instead of HTTPRequest in v3 API tests * Cells: Send instance object for instance\_delete\_everywhere * Fix "computeFault" when v3 API "GET /versions/:(id)" is called * VMware: ensure that the task completed for resize operation * Change parameters of add\_timestamp in ComputeDriverCPUMonitor class * Cells API calls return 501 when cells disabled * Add version 2.0 of conductor rpc interface * Added missing raise statement when checking the config driver format * Make NovaObject report changed-ness of its children * Increase volume creation max waiting time * Remove action-args from nova-manage help * VMware: fix rescue disk location when image is not linked clone * Fix comment for block\_migration in nova/virt/libvirt/driver.py * Don't import library guestfs directly * Correct inheritance of nova.volume.cinder.API * VMware: enable booting an ISO with root disk size 0 * Remove bad log message in get\_remote\_image\_service * Raise NotImplementedError in NeutronV2 API * Remove block\_device\_mapping\_destroy() from conductor API * Make sure instance saves network\_info when we go ACTIVE * Fix sqlalchemy utils test cases for SA 0.9.x * Fix equal\_any() DB API helper * Remove migration\_update() from conductor API * Remove instance\_get() from conductor API * Remove aggregate\_get\_by\_host() from conductor API * add support for host driver cleanup during shutdown * Add security\_group\_rule to objects registry * Remove aggregate\_get() from conductor API * Delete meaningless lines in test\_server\_metadata.py * Imported Translations from Transifex * Move log statement to expose actually info\_cache value * Fix input validation for V2 API server group API extension * Adds test for rebuild in compute api * Specify spacing on periodic\_tasks in manager.py * network\_info cache should be cleared before being rescheduled * Don't sync [system\_]metadata down to cells on instance.save() * Fixes the Hyper-V agent individual disk metrics * VMware: remove unused code (\_delete method in vmops.py) * Fix docstring for shelve\_offload\_instance in compute manager * Block database access in nova-network binary * Make nova-network use conductor for security groups refresh * Make nova-network use quotas object * Reverts change to default state\_path * Fix raise\_http\_conflict\_for\_instance\_invalid\_state docstring * Cells: Pass instance objects to update/delete\_instance\_metadata * Don't detach root device volume * Revert "Adding image multiple location support" * Revert "Move libvirt RBD utilities to a new file" * Revert "enable cloning for rbd-backed ephemeral disks" * Add helper method for injecting data in an image * Add helper method for checking if VM is booting from a volume * Libvirt: Repair metadata injection into guests * Make linux\_net use objects for last fixed ip query * Add get\_by\_network() to FixedIPList * Update aggregate should not allow duplicated names * Recover from REBOOT-\* state on compute manager start-up * VMware: raise an exception for unsupported disk formats * VMware: ensure that deprecation does not appear for VC driver * rename ExtensionsResource to ExtensionsController * Ensure is\_image\_available handles V2 Glance API * libvirt: fix blockinfo get\_device\_name helper * Log Content-Type/Accept API request info * Remove the docker driver * xenapi: Speed up tests by not waiting on conductor * Updated from global requirements * xenapi: Fix test\_rescue test to ensure assertions are valid * VMware: image cache aging * Add py27local tox target * Fix broken API os-migrations * Catch FloatingIpNotFoundForHost exception * Fix get\_download\_hander() typo * Handle IpAddressGenerationClient neutron * Delete ERROR+DELETING VMs during compute startup * VMware: delete vm snapshot after nova snapshot * Fix difference between mysql & psql of flavor-show * Add version 3.0 of scheduler rpc interface * Make libvirt wait for neutron to confirm plugging before boot * Task cleanup\_running\_deleted\_instances can now use slave * Do not add HPET timer config to non x86 targets * Make test computations explicit * Instance groups: only display valid instances for policy members * Don't allow reboot when instance in rebooting\_hard * VMware: add missing translations * Fix typo and add test for refresh\_instance\_security\_rules * Add declaration of 'refresh\_instance\_security\_rules' to virt driver * Remove mention of removed dhcp\_options\_enabled * Fix compute\_node stats * Fix: Unshelving an instance uses original image * Noted that tox is the preferred unit tester * Updated development.environment.rst * Use instance object instead of \_instance\_update() * Remove compute virtapi BDM methods * enable cloning for rbd-backed ephemeral disks * Move libvirt RBD utilities to a new file * Fixup debug log statements in the nova compute manager * Use debug level logging during unit tests * Fix debug message formatting in server\_external\_events * VMware: VimException \_\_str\_\_ attempts to concatenate string to list * Mark ESX driver as deprecated * Volume operations should be blocked for non-null task state * xenapi: fix spawn servers with ephemeral disks * Fixes NoneType vcpu list returned by Libvirt driver * Add conversion type to LOG.exception's string * Remove compute API get\_instance\_bdms method * Move run\_instance compute to BDM objects * Move live migration callbacks to BDM objects * Instance groups: validate policy configuration * Add REST API for instance group api extension * VMware: boot from iso support * Store neutron port status in VIF model * Correct network\_model tests and \_\_eq\_\_ operator * Make network\_cache more robust with neutron * Error out failed migrations * Fix BDM legacy usage with objects * Fix anti-affinity race condition on boot * Initial scheduler support for instance\_groups * Add get\_hosts to InstanceGroup object * Add instance to instance group in compute.api * Add add\_members to InstanceGroup object * Remove run-time dependency on fixtures module by the nova baremetal * Make compute manager prune instance events on delete and migrate * Make compute manager's virtapi support waiting for events * Add os-server-external-events V3 API * Add os-server-external-events API * Add external\_instance\_event() method to compute manager * Fix invalid vim call in vim\_util.get\_dynamic\_properties() * Rescue API handle NotImplementedError * VMware: Add a test helper to mock the suds client * VMware: Ensure test VM is running in rescue tests * Move \_poll\_volume\_usage periodic task to BDM objects * Move instance\_resize code paths to BDM objects * Make swap\_volume code path use BDM objects * Fix log messages typos in rebuild\_instance function * Move detach\_volume and remove\_vol\_connection to BDM objects * Move instance delete to new-world BDM objects * VMware ESX: Boot from volume must not relocate vol * Fix development environment docs for redhat-based systems * neutron\_metadata\_proxy\_shared\_secret should not be written to log file * VMware: create datastore utility functions * Address the comments of the merged image handler patch * Ignore the image name when booting from volume 2014.1.b3 --------- * Fix typo in devref * VMware: refactor \_get\_volume\_uuid * Add return value to some network API methods * Fixing host\_ip configuration help message * No longer call check\_uptodate.sh in pep8 * notifier middleware broken by oslo.messaging * regenerate the config file to support 1.3.0a9 * Add doc update for 4 filters which is missing in filter\_scheduler.rst * Remove 3 unnecessary variables in scheduler * Adding image multiple location support * Move all shelve code paths to BDM objects * Move rebuild to BDM objects * sync sslutils to not conflict with oslo.messaging * Accurate comment in compute layer * Refactor xenapi/host.py to new call\_xenapi pattern * Add a missing space in a log message * VMware: iscsi target discovery fails while attaching volumes * Remove warn log in quota function on API layer * Sync the latest DB code from oslo-incubator * Prevent thrashing when deploying many bm instances * Support configuring libvirt watchdog from flavors * Add watchdog device support to libvirt driver * Remove extra space at the end of help string * Port libvirt copy\_image tests to mock * Updated from global requirements * Sync latest Guru Meditation Reports from Oslo * Skip sqlite-specific tests if sqlite is not configured * VMware: add in debug information for network selection * vmwareapi:Fix nova compute service down issue when injecting pure IPv6 * Make compute use quota object existing function * Fixes api samples for V2 os-assisted-volume-snapshots * Raise exception if volume snapshot id not found instead of return * Added os-security-groups prefix * VMware Driver update correct disk usage stat * attach/detach interface should raise exception when instance is locked * Restore get\_available\_resource method in docker driver * Make compute manager use InstanceInfoCache object for deletes * Deprecate conductor instance\_type\_get() and remove from VirtAPI * Make restore\_instance pass the Instance object to compute manager * Use uuid instead of name for lvm backend * Adds get\_console\_connect\_info API * Remove log\_handler module from oslo-incubator sync * Remove deleted module flakes from openstack-common.conf * When a claim is rejected, explain why * Move xenapi/agent.py to new call\_xenapi style * xenapi plugins: Make sure subprocesses finish executing * Update Oslo wiki link in README * Refactor pool.py to remove calls to call\_xenapi * Move vbd plug/unplug into session object * xenapi: make session calls more discoverable * Make error notifications more consistent * Adds unit test for etc/nova/policy.json data * Support IPv6 when booting instances * xenapi: changes the debug log formatting * libvirt: raises exception when attempt to resize disk down * xenapi: stop destroy\_vdi errors masking real error * Make resource\_tracker use Flavor object * Make compute manager use Flavor object * Make baremetal driver use Flavor object instead of VirtAPI * Sync latest config file generator from oslo-incubator * Fixes evacuate doesn't honor enable password conf for v3 * Removed copyright from empty files * Fix the explanations of HTTPNotFound response * VMware: support instance objects * Add support for tenant\_id based authentication with Neutron * Remove and recreate interface if already exists * Prevent caller from specifying id during Aggregate.create() * Enable flake8 H404 checking * Imported Translations from Transifex * Fix logic for aggregate\_metadata\_get\_by\_host\_with\_key test case * Use oslo-common's logging fixture * Re-Sync oslo-incubator fixtures * Updated from global requirements * Update pre\_live\_migration to take instance object * Remove unused method inject\_file() * Remove db query from deallocate\_fixed\_ip * update deallocate\_for\_instance to take instance obj * Update server\_diagnostics to use instance object * Move the metrics update to get\_metrics * Unmount the NFS and GlusterFS shares on detach * Add a caching scheduler driver * libvirt: image property variable already defined * Replaces exception re-raising in Hyper-V * Remove blank space after print * VMware: add instance detail to detach log message * libvirt: Enable custom video RAM setting * Remove trailing comma from sample JSON * Add pack\_action\_start/finish helper to InstanceAction object * Rewrite InstanceActionEvent object testcase using mock * Clean up \_make\_\*\_list in object models to use base.obj\_make\_list * libvirt: remove explicit /dev/random rng default * Document virt driver methods that take Instance objects * Make interface attach and detach use objects * Pass instance object to soft\_delete() and get\_info() * libvirt: setting a correct driver name for iscsi volumes * libvirt: host specific virtio-rng backend * Fix HTTP methods for test\_attach\_interfaces * Fix the calls of webob exception classes * VMware: remove unused parameter from \_wait\_for\_task * Downgrade the log level for floating IP associate * Removing redundant validation for rebuild request * VMware: add a test for driver capabilities * Catch HostBinaryNotFound exception when updating a service * VMware: ensure that datastore name exists prior to deleting disk * Move compute's \_get\_instance\_volume\_block\_device\_info to BDM objects * Use disk\_bus and device\_type in attaching volumes * Add device bus and type to virt attach\_volume call * Make volume attach use objects * compute: invalid gettext message format * VMware: fix the VNC port allocation * VMware: fix datastore selection when token is returned * Hyper-V log cleanups * vmware: driver races to create instance images * Introduce Guru Meditation Reports into Nova * Updated from global requirements * Revert "VMware: fix race for datastore directory existence" * Use instance object for delete * Update ubuntu dev env instructions * VMware: fix race for datastore directory existence * libvirt: adding a random number generator device to instances * Add 'use\_slave' to instance\_get\_all\_by\_filter in conductor * Fix the validation of flavor\_extraspecs v2 API * Make webob.exc.HTTPForbidden return correct message * Use image from the api in run\_instance, if present * Remove unused variables in the xenapi.vmops module * Describe addresses in ec2 api broken with neutron * Cleanup v3 test\_versions * Fix import order in log\_handler * Emit message which merged user-supplied argument in log\_handler * Adds service request parameter filter for V3 API os-hosts request * Fix comment typo in nova/compute/api.py * stop throwing deprecation warnings on init * Remove broken quota-classes API * VMware: fix instance lookup against vSphere * Add a new compute API method for deleting retired services * Fix instance\_get\_all\_by\_host to actually use slave * Periodic task poll\_bandwidth\_usage can use slave * Partially revert "XenAPI: Monitor the GC when coalescing" * Mark XML as deprecated in the v2 API * adjust version definition for v3 to be only json * Fix option indenting in compute manager * Adds create backup server extension for the V3 API * Catch InstanceNotFound exceptions for V2 API instance\_actions * Sync log.py from oslo * Make floating\_ips module use FloatingIP for associations * Remove \_\_del\_\_ usage in vmwareapi driver * Fixed spelling errors in nova * LibVirt: Disable hairpin when using Neutron * VMware: optimize instance reference access * Serialize the notification payload in json * Add resource tracking to unshelve\_instance() * Typo in the name 'libvirt\_snapshot\_compression' * Refactor driver BDM attach() to cover all uses * Fix assertEqual parameter order post V3 API admin-actions-split * Fix copyright messages after admin actions split for V3 API * Catch InstanceNotFound exceptions for V2 API virtual interfaces * Correct the assert() order in test\_libvirt\_blockinfo * Use disk\_bus when guessing the device name for vol * libvirt: add virtio-scsi disk interface support * libvirt: configuration element for virtual controller * VMware: factor out management of controller keys and unit numbers * Remove unused notifier and rpc modules from oslo sync * Imported Translations from Transifex * Remove XML support from schemas v3 * Treat port attachment failures correctly * Add experimental warning for Cells * Add boolean convertor to "create multiple servers" API * VMware: prevent race for vmdk deletion * VMware: raise more specific exceptions * Disable IGMP snooping on hybrid Linux bridge * libvirt: remove retval from libvirt \_set\_host\_enabled() * VMware: remove unused class * compute: format\_message is a method not an attribute * MetricsWeigher: Added support of unavailable metrics * Fix incorrect kwargs 'reason' for HTTPBadRequest * Fix the indents of v3 API sample docs * Refactor get\_iscsi\_initiator to a common location * Fix compute\_node\_update() compatibility with older clients * XenAPI: Add the mechanism to attach a pci device to a VM * Remove underscore for the STATE\_MAP variable * XenAPI: Add the support for updating the status of the host * libvirt: support configurable wipe methods for LVM backed instances * Fix InstanceNotFound error in \_delete\_instance\_files * Ensure parent dir exists while injecting files * Convert post\_live\_migration\_at\_destination to objects * Convert remove\_fixed\_ip\_to\_instance to objects * Convert add\_fixed\_ip\_to\_instance to objects * Fix invalid facilities documented in rootwrap.conf * VMware: improve unit test time * Replace assertEqual(None, \*) with assertIsNone in tests * Add comment/doc about utils.mkfs in rootwrap * Add mkfs to the baremetal-deploy-helper rootwrap * libvirt-volume: improve unit test time * Move consoleauth\_manager option into nova.service and fix imports * libvirt: improve unit test time * Imported Translations from Transifex * Make is\_neutron() thread-safe * Update the mailmap * Rewrite InstanceAction object test cases using mock * Make floating\_ips module use FloatingIP for updates * Make floating\_ips module use FloatingIP for (de-)allocations * Make floating\_ips module use FloatingIP for all get queries * Make floating\_ips module use Service object * Make floating\_ips module use Instance object * Make floating\_ips module use Network object * Make floating\_ips module use FixedIP object * Fix break in vm\_vdi\_cleaner after oslo changes * Fixes the Hyper-V VolumeOpsTestCase base class * libvirt: Uses available method get\_host\_state * Add V3 api for pci support * Update docstring for baremetal opportunistic tests * Fix upper bound checking for flavor create parameters * Fixed check in image cache unit test * Count memory and disk slots once in cells state manager * changed quantum to neutron in vif-openstack * Convert unrescue\_instance to objects * Don't allow compute\_node free\_disk\_gb to be None * compute: removes unnecessary condition * Rename Openstack to OpenStack * Support setting a machine type to enable ARMv7/AArch64 guests to boot * Catch InstanceNotFound exceptions for V2 API floating\_ips * Explicity teardown on error in libguestfs setup() * Catch InstanceNotFound exceptions for V2 API deferred delete * Replace oslo.sphinx with oslosphinx * Change assertTrue(isinstance()) by optimal assert * Make nova\_ipam\_lib use Network, FixedIP, and FloatingIP objects * Make nova-network use FixedIP for timeouts * Make nova-network use FixedIP object for updates * Make nova-network use FixedIP object for disassociations * Use six.moves.urllib.parse instead of urlparse * Add "body=" argument to v3 API unit tests * Remove unused methods * Adds migrate server extension for V3 API * Move policy check of start/stop to api layer * Refactor stats to avoid bad join * Remove @author from copyright statements * Remove character filtering from V3 API console\_output * DB: logging exceptions should use save\_and\_reraise * Fix incorrect check in aggregate/az test * xenapi: set viridian=false for linux servers * Delete baremetal image files after deployment * Make sure "volumeId" in req body on volume actions * Removes console output plugin from the core list * Using six.add\_metaclass * Fix bad log formatting * Remove quota classes extension from the V3 API * Group kvm image\_meta tests for get\_disk\_bus * Prefix private methods with \_ in docker driver * Fix the sample and unittest params of v3 scheduler-hints * Add a instance lookup helper to v3 plugins * Use raw string notation for regexes in hacking checks * Improve detection of imports in hacking check * Renumber some nova hacking checks * Docker cannot start a new instance because of an internal error * libvirt: configuration element for a random number generator device * VMware: fix instance rescue bug * Fix run\_tests.sh lockutils when run with -d * Adds tests to sqlachemy.api.\_retry\_on\_deadlock * Replace detail for explanation msgs on webob exceptions * Allow operators to customize max header size * Prevent caller from specifying id during Migration.create() * Prevent caller from specifying id during KeyPair.create() * Prevent caller from specifying id during Service.create() * Prevent caller from specifying id during ComputeNode.create() * Clean IMAGE\_SNAPSHOT\_PENDING state on compute manager start up * Fix trivial typo in libvirt test comment * Refactoring metadata/base * Removes XML namespace from V3 API test\_servers * correct the bugs reference url in man documents * Objectify instance\_action for cell scheduler * Remove tox locale overrides * libvirt: use to\_xml() in post\_live\_migration\_at\_destination * Removes os-instance-usage-audit-log from the V3 API * VMware: update test name * VMware: improve unit test performance * Fix english grammar in the quota error messages * Removes os-simple-tenant-usage from the V3 API * Fix a couple of unit test typos * Add HEAD api response for test s3 server BucketHandler * Removes XML support from security\_groups v3 API * Hyper-V driver RDP console access support * Make consoleauth token verification pass an Instance object * Adds RDP console support * Fix migrations changing the type of deleted column * Add hpet option for time drifting * Typo in backwards compat names for notification drivers * Support building wheels (PEP-427) * Fix misspellings in nova * Disable file injection in baremetal by default * Drop unused dump\_ SQL tables * Convert rescue\_instance to objects * Convert set\_admin\_password to objects * The object\_compat decorator should come first * Default video type to 'vga' for PowerKVM * Sync latest db.sqlalchemy from oslo-incubator * Guard against oversize flavor rxtx\_factor float * Make libvirt use Flavor object instead of using VirtAPI * Fix instance metadata tracking during resets * Make delete\_instance\_metadata() use objects * Break out the meat of the object hydration process * V2 Pause: treat case when driver does not implement the operation * VMware: fix bug for exceptions thrown in \_wait\_for\_task * Nova Docker: Metadata service doesn't work * nova: use RequestContextSerializer for notifications * Fix auto instance unrescue after poll period * Fix typos in hacking check warning numbers * Fix exception handling miss in remote\_consoles * Don't try to restore VM's in state ERROR * Make it possible to disable polling for bandwidth usage * XenAPI: Monitor the GC when coalescing * Revert "Allow deleting instances while uuid lock is held" * report port number for address already in use errors * Update my mailmap * libvirt: Adds missing tests to copy\_image * Sync latest gettextutils from oslo-incubator * Make change\_instance\_metadata() use objects * Add XenAPI driver deprecation warning log message * Adds host\_ip to hypervisor show API * VMware: update the default 'task\_poll\_interval' time * Fixes Hyper-V VHDX snapshot bigger than instance * Define common "name" parameter for Nova v3 API * Stacktrace on error from libvirt during unfilter * Disable libvirt driver file injection by default * Add super call to db Base class * Fix baremetal stats type * Fix bittorrent URL configuration option * Fix VirtualInterfaceMacAddressException message * Add serializer capability to fake\_notifier * Avoid deadlock when stringifying NetworkInfo model * Add hacking test to block cross-virt driver code usage * Hyper-V: Change variable in debug log message * Rename API schema modules with removing "\_schema" * Fixed naming issue of variable in a debug statement formatting * Use new images when spawning BM instances * Remove get\_instance\_type and get\_active\_by\_window from nova compute API * Make the simple\_tenant\_usage API use objects * Add instance\_get\_active\_by\_window\_joined to InstanceList * Update nova.conf.sample for python-keystoneclient 0.5.0 * Add ESX quality warning * Set SCSI as the default cdrom bus for PowerKVM * Enforce FlavorExtraSpecs Key format * Fix scheduler\_hints parameter of v3 API * Remove vi modelines * VMware: Remove some unused variables * Fix a bug in v3 API doc * Move logging out of BDM attach method * Add missing translation support * libvirt: making set\_host\_enabled to be a private methods * Remove unused variable * Call get\_pgsql\_connection\_info from \_test\_postgresql\_opportunistically * Port to oslo.messaging * Sync latest config file generator from oslo-incubator * Test guestfs without support for close\_on\_exit * Make nova-network use FixedIP object for vif queries and bulk create * Make nova-network use FixedIP for host and instance queries * Make nova-network use FixedIP object for associations * Make nova-network use FixedIP for get\_by\_address() queries * Add FixedIP.floating\_ips dynamic property * Add FloatingIP object implementation * Add FixedIP Object implementation * Deal with old versions of libguestfs * Destroy docker container if spawn fails to set up network * Adds suspend server extension for V3 API * Adds pause server extension for V3 API * Removes XML namespace definitions from V3 API plugins * Remove XML support from migrations pci multiple\_create v3 API plugins * Remove extra space in log message * Allow deleting instances while uuid lock is held * Add 'icehouse-compat' to [upgrade\_levels] compute= * Make os-service API return correct error messages * Make fixed\_ip\_get\_by\_address() take columns\_to\_join * Refactor return value of fixed\_ip\_associate calls * Make nova-network use Network object for deleting networks * Make nova-network use Network for associations * Make nova-network use Network object for set\_host() operation * Make nova-network use Network object for updates * Make nova-network use Network object for remaining "get" queries * Make nova-network use NetworkList for remaining "all" queries * Make nova-network use Network object for get-all-by-host query * Make nova-network a "conductor-using service" * Ignore 'dynamic' addr flag on bridge configuration * Remove XML support from some v3 API plugins * xenapi: clean up step decorator fake steps * Use objects internally in DriverBlockDevice class * Make snapshot\_volume\_backed use new-world objects * Make volume\_snapshot\_{create,delete} use objects * Move compute API is\_volume\_backed to BDM objects * Add block device mapping objects implementation * XenAPI: Wait for VDI on introduce * Shelve: The snapshot should be removed when delete instance * Revert "Allow deleting instances while uuid lock is held" * Retry reservation commit and rollback on deadlock * Adds lock server extension for V3 API * Remove duplicated method in mock\_key\_mgr * Add quality warning for non-standard libvirt configurations * Add docker driver removal warning * Remove V3 API XML entry points * Remove XML support from admin\_password V3 API plugin * Remove XML support from certificates v3 API * Remove XML support from some v3 API plugins(e.g. services) * Remove XML support from some extension v3 API plugins * Remove XML support from some server v3 API plugins * Remove XML support from quota and scheduler\_hints v3 API plugins * Remove XML support from flavor v3 API plugins * Revert "Fix race conditions between imagebackend and imagecache" * Remove XML support from v3 API plugins * Remove unused methods * Remove trace XML from unittests * removing xml from servers.py * Remove xml unit tests for v3 api plugins * Remove v3 xml API sample tests * Adds dmcrypt utility module * Adds ephemeral\_key\_uuid field to instance * Error message is malformed when removing a sec group from an instance * Do not set root device for libvirt+Xen * Docker Set Container name to Instance ID * Fix init of pci\_stats in resource tracker * Catch NotImplementedError in get\_spice\_console in v2/v3 API * Minor changes to make certificates test cases use HTTPRequestV3 * VMware: Only include connected hosts in cluster stats * disk/api.py: refactors extends and adds missing tests * Make nova-network use Network to create networks * Make obj\_to\_primitive() handle netaddr types * Add Network object * Make service workers gracefully handle service creation race * support stevedore >= 0.14 * Increase the default retry for iscsi connects * Finish compacting pre-Icehouse database migrations * Compact pre-Icehouse database migrations <= 210 * Compact pre-Icehouse database migrations <= 200 * Compact pre-Icehouse database migrations <= 190 * Fix cache lock for image not consistent * VMware: fix image snapshot with attached volume * Use block\_device\_info at post\_live\_migration\_at\_destination * Update policy check on each action for certificates * Use (# of CPUs) workers by default * Remove policy check in db layer for aggregates * Remove unused configurations * VMware: fix exception when using multiple compute nodes * Remove copyright from empty files in nova * disk/api.py: resize2fs fails silently + adds tests * remove 2 unused function in test\_volumes.py * Update log message to support translations * PCI address should be uniform * Remove flavor-disabled related policy rules for v3 api * Remove get\_all\_networks from nova.network.rpcapi * Remove get\_network from nova.network.rpcapi * Update nova.network to use DNSDomain object * Remove some dead dnsdomain code * Add DNSDomain object * Add db.dnsdomain\_get\_all() method * Update linux\_net to use VirtualInterface * Update nova\_ipam\_lib to use VirtualInterface * libvirt: Review of the code to use module units * Update network.manager to use VirtualInterface * Imported Translations from Transifex * Updated from global requirements * Define "supported\_instances" for fake compute * Remove get\_vif\_by\_mac\_address from network rpcapi * Remove unused method from network rpcapi * Allow delete when InstanceInfoCache entry is missing * libvirt: Fix root disk leak in live mig * Additional check for qemu-nbd hang * Correct host managers free disk calculation * Correct the state for PAUSED instances on reboot * XenAPI: Use get\_VALUE in preference to get\_record()['VALUE'] * XenAPI: Speedup get\_vhd\_parent\_uuid * XenAPI: Report the CPU details correctly * XenAPI: Tidy calls to get\_all\_ref\_and\_rec * XenAPI: get\_info was very expensive * Fix bug with not implemented virConnect.registerCloseCallback * Make test\_poll\_volume\_usage\_with\_data more reliable * Re-write sqlite BigInteger mapping test * Small edits on help strings * Make floating\_ip\_bulk\_destroy deallocate quota if not auto\_assigned * Sync processutils from oslo-incubator * Create common method for MTU treatment * Move fake\_network config option to linux\_net * libvirt: move unnecesary comment * Sync log.py from oslo-incubator * hyperv: Retry after WMI query fails to find dev * vmwareapi:remove unused variables in volumeops * Fix docstring in libvirt.driver.LibvirtDriver.get\_instance\_disk\_info() * Hide VIR\_CONNECT\_BASELINE\_CPU\_EXPAND\_FEATURES where needed * Make test\_different\_fname\_concurrency less racy * VMware: improve exception logging in driver.py 2014.1.b2 --------- * Add instance faults during live\_migrate errors * VMware: use .get() to access 'summary.accessible' * Nova Docker driver must remove network namespace * Added a new scheduler filter for metrics * Sync module units from oslo * Join pci\_devices for servers API * VMware: fix missing datastore regex with ESX driver * Fix the flavor\_ref type of unit tests * Sync unhandled exception logging change from Oslo * Fix race conditions between imagebackend and imagecache * Add explicit discussion of dependencies to README.rst * Add host and details column to instance\_actions\_events table * Join pci\_devices when getting all servers in API * Add sort() method to ObjectListBase * Add VirtualInterface object * VMware: Fix incorrect comment indentation * vmwareapi: simple refactor of config drive tests * Fix multi availability zone issue part 2 * Make exception message more friendly * disable debug in eventlet.wsgi server * Alphabetize core list for V3 API plugins * Ensure MTU is set when the OVS vif driver is used * remove redundant \_\_init\_\_() overwriting when getting ExtensionResources * Fix bug for neutron network-name * Fix rbd backend not working for none admin ceph user * Set objects indirection API in network service * Use oslo.rootwrap library instead of local copy * Remove admin auth when getting the list of Neutron API extensions * Fix the test parameter order for v3 evacuate test * Add API schema for v3 evacuate API * Remove unused code * Take a vm out of SNAPSHOTTING after Glance error * Corrected typo in metrics * libvirt: handle exception while get vcpu info * Fixed incorrect test case of test\_server\_metadata.py * Add API schema for v3 rescue API * Support preserve\_ephemeral in baremetal * Show bm deploy how to preserve ephemeral content * Add preserve\_ephemeral option to rebuild * Fix string formatting of exception.NoUniqueMatch message * docstring fix * xenapi: stop server destroy on live\_migrate errors * Ensure that exception raised in neutron are handled correctly * Fix updating device names when defaulting * libvirt: Fix confusing use of mox.StubOutWithMock * Sync request\_id middleware for nova * Calculate default security group into quota usage * Allow run\_image\_cache\_manager\_pass to hit db slave * Consolidate the blockdev related filters * VMware: upload images to temporary directory * Refactor CIDR field to use netaddr.IPNetwork * Make nova-network use Instance objects * Make nova-network use Service object * Allow \_check\_instance\_build\_time to hit db slave * Set objects indirection API in metadata service * libvirt: Configuration element for sVirt support * VMware: unnecessary session reconnection * Add API schema for v3 multinic API * API schema for v3 console\_output API * Workers verification for WSGI service * Remove unused dict BYTE\_MULTIPLIERS * Optimize libvirt live migration workflow at source * libvirt, fix test tpool\_execute\_calls\_libvirt * Using staticmethod to mock LibvirtDriver.\_supports\_direct\_io * Use the mangle checksum fill rule regardless to the multi\_host * Enabled Libvirt driver to read 'os\_command\_line' from image properties * Update nova.conf.sample * Capture exception for JSON load in virt.storage\_users * Ensure that headers are utf8, not unicode * Attribute snapshot not defined in libvirt/config.py * ec2 api should check 'max\_count'&'min\_count' para * nova docker driver cannot find cgroup in /proc/mounts on RHEL * VMware: fix rescue with disks are not hot-addable * VMware: bug fix for VM rescue when config drive is configured * Define common API parameter types * Fixed a problem in iSCSI multipath * Fix unhandled InvalidServerState exceptions in server start/stop * Cells rebuild regression fix * Fix potential fd leak * Rename instance\_type to flavor in libvirt virt driver tests * Rename instance\_type to flavor in vmware virt driver tests * Improve error message in services API * Make image props filter handle old vm\_modes * XenAPI: Use direct IO for writing config drive * Avoid unnecessary use of rootwrap for some network commands * Remove unused copyright from nova.api.\_\_init\_\_ * replace type() to isinstance() in nova * Make availability\_zone optional in create for aggregates * libvirt: Fix infinite loop waiting for block job * baremetal: stop deployment if block devices are not available * Cleanup 'deleting' instances on restart * Ignore duplicate delete requests * Let drivers override default rebuild() behaviour * Enable compute\_node\_update to tolerate deadlocks * xenapi: resize up ephemeral disks * xenapi: refactor generate\_ephemeral * xenapi: refactor resize\_up\_root\_vdi * Abstract add\_timestamp out of ComputeDriverCPUMonitor class * Revert "Whitelist external netaddr requirement" * The private method \_text\_node should be used as function * Add finer granularity to host aggregate APIs * Remove unused import * Adds new method nova.utils.get\_hash\_str * Make nova/quota use keypair objects * VMware: update test file names * Ensure instance action event list in order * Docker Driver doesn't respect CPU limit * libvirt: stop overwriting LibvirtConfigCPU in get\_host\_capabilities * Cleanup the flake8 section of tox.ini * Use the full string for localisation * Don't deallocate/reallocate networks on reschedules * Cleanup object usage in the rebuild path * Fix test case with wrong parameter in test\_quota\_classes * Remove unused variables in imagebackend.py * Remove unused code in test\_attach\_interfaces.py * Whitelist external netaddr requirement * Better exception handling for deletes during build * Translate the snapshot\_pending state for old instances * Prevent Instance.refresh() from returning a new info cache * Extends V3 os-hypervisor api for pci support * Sync config generator from oslo-incubator * Imported Translations from Transifex * Remove uneeded dhcp\_opts initialization * Update class/function name for test\_extended\_availability\_zone.py * Allow deleting instances while uuid lock is held * xenapi: add support for vcpu\_pin\_set * xenapi: more info when assert\_can\_migrate fails * fix ips to 'ips' in APIRouter * Hyper-V:Preserve config drive image after the instance is resized * fix log message in APIRouter * VMware: use session.call\_method to invoke api's * Rename instance\_type to flavor in hyper-v virt driver * Rename instance\_type to flavor in xenapi virt driver * Compact pre-Icehouse database migrations <= 180 * Change when exists notification is sent for rescue * Revert change of default FS from ext3 to etx4 * Convert nova.compute.manager's \_spawn to objects * Add alias as prefix for flavor\_rxtx v3 * Remove unused code in nova/api/ec2/\_\_init\_\_.py * Remove unused import * VMware: improve connection issue diagnostic * Fixes messages logged on Glance plugin retries * Aggregate: Hosts isolation based on image properties * Fix for qemu-nbd hang * Return policy error, not generic error * Fix lxc rootfs attached two devices in some action * Removes disk-config extension from v3 api * Fix typo'ed deprecated flag names in libvirt.imagebackend * Disable libguestfs' default atexit handlers * Add API schema for v3 extended\_volumes API * Catch InstanceIsLocked exception on server actions * Fix inconsistent "image" value on \_get\_image() * Add API schema for v3 keypairs API * Add API schema for v3 flavor\_access API * Add API schema for v3 agents API * Add API schema for v3 admin\_password API * Adds a PREPARED state after baremetal node power on * Make scheduler rpcapi use object serializer * Update log message when remove pci device * Add unit test for ListOfStrings field in object models * Sync oslo db.sqlalchemy.utils to nova * Remove duplicated test * Fixing availability-zone not take effect error * Fix image cache periodic task concurrent access bug * Fix interprocess locks for run\_tests.sh * lxc: Fix a bug of baselineCPU parse failure * platform independence for test\_virt unit tests * Imagecache: fix docstring * libvirt: Set "Disabled Reason" to None when enable nova compute * Change log from ERROR to WARNING when instance absent * VMware: clean up unnecessary help message of options * Don't use deprecated module commands * Add apache2 license header to appropriate files for enabling H102 * XenAPI: Allow use of clone\_vdi on all SR types * Remove unused variables in test\_conductor.py * Do not use contextlib.nested if only mock one function * Remove update\_service\_capabilities from nova * Adds user\_data extension to nova.api.v3.extensions * Add wsgiref to requirements.txt * pass the empty body into the controller * Imported Translations from Transifex * Revert recent change to ComputeNode * sync oslo service to fix SIGHUP handling * Fix parameter checking about quota update api * Spelling fix resouce=>resource * Change default ephemeral FS to ext4 * When inject admin password, no need to generate temp file * Make \_change\_index\_columns use existing utility methods * Fix interprocess locks when running unit-tests * Cleanup object usage in the delete path * Change RPC post\_live\_migration\_at\_destination from call to cast * Pass rbd\_user id and conf path as part of RBD URI for qemu-img * Allow some instance polling periodic tasks to hit db slave * Sync timeutils from oslo-incubator * Catch NotImplementedError for vnc in the api * List NotImplementedError as a client exception for vnc * remove vmwareapi.vmops.get\_console\_output() * Object-ify build\_and\_run\_instance * Retry on deadlock in instance\_metadata\_update * use 'os\_type' in ephemeral filename only if mkfs defined * ValueError should use '%' instead of ',' * Setting the xen vm device id on vm record * Rename instance\_type to flavor in nova.utils and nova.compute.utils * Rename instance\_type to flavor in nova.cloudpipe * Serialize instance object while building request\_spec * Make rebuild use Instance objects * Remove deprecated config aliases * Changed error message to match usage * Add configurable 120s timeout ovs-vsctl calls * Clarify rebuild\_instance's recreate parameter * Clean swap\_volume rollback, on libvirt exception * Image cache: move all of the variables to a common place * baremetal: set capabilites explicitly * Remove docker's unsupported capabilities * Set a sane default for state\_path * Fix incorrect exception on os-migrateLive * barematal: Cleanup the calls to assertEqual * Refactor time conversion helper function for objects in db api * Fixes ConfigDrive bug on Windows * Remove smoketests * Revert graceful shutdown patch * Handle InstanceUserDataMalformed in create server v2 api * Enable remote debugging for nova * Fix race in unit tests, which can cause gate job to fail * Add boolean convertor to cells sync\_instances API * Initialize iptables rules on initialization of MetadataManager * vmwareapi: raise on get\_console\_output * hyperv: remove get\_console\_output method * List NotImplementedError as client exception * api: handle NotImplementedError for console output * Make Serializer/Conductor able to backlevel objects * Make ec2 use Flavor object * Move restore and rebuild operations to Flavor objects * Add flavor access methods to Instance object * Rename instance\_type to flavor in nova.network tree * Stop, Rescue, and Delete should give guest a chance to shutdown * Remove middleware ratelimits from v3 api * Remove unused variables in neutron api interface and neutron tests * Remove unneeded call to conductor in network interface * Return client tokens in EC2 DescribeInstances * Require List objects to be able to backlevel their contents * Make Instance object compatible with older compute nodes * Deprecate/remove scheduler select\_hosts() * Pass Instance object to console output virt driver api * Send Instance object to validate\_console\_port * Pass Instance object to compute vnc rpc api * Update vnc virt driver api to take Instance object * Add error as not-in-progress migration status * Don't replace instance.info\_cache on each save * Add boolean convertors for migrate\_live API * VMWare: bug fix for Vim exception handling * XenAPI: Synchronize on all VBD plug/unplug per VM * Add IPAddress field type in object models * Fixes errors on start/stop unittest * Use a dictionary to eliminate the inner loop in \_choose\_host\_filters() * Correct uses of :params in docstrings * Delete iSCSI devices after volume detached * Prevent spoofing instance\_id from neutron to nova * Replaces call to lvs with blockdev * Refactor PXE DHCP Option support * Normalize the weights instead of using raw values * Compact pre-Icehouse database migrations <= 170 * XenAPI: Speedup host\_ref cannot change - get it once * Updated from global requirements * Rename instance\_type to flavor in test\_utils and nova.tests.utils * Rename instance\_type to flavor in baremetal virt driver * VMware: fix bug when more than one datacenter exists * Sync oslo lockutils for "fix lockutils.lock() to make it thread-safe" * Move calls to os.path.exists() in libvirt imagebackend * Ensure api\_paste\_conf is an absolute path * Log exception in \_heal\_instance\_info\_cache * Raise better exception if duplicate security groups * Remove the largely obsolete basepath helper * libvirt: Custom disk\_bus setting is being lost on hard\_reboot * Libvirt: Making the video driver element configurable * Give migrations tests more time to run * Remove the api\_thread\_pool option from libvirt driver * baremetal: volume driver refactoring and tests * Sync middleware audit, base, and notifier from oslo * Get test\_openAuth\_can\_refuse\_None\_uri to cleanup after itself * Hide injected\_file related quotas for V3 API * Make obj\_from\_primitive() preserve version information * Cells: check states on resize/rebuild updates * Make flavor\_access extension use Flavor object * libvirt: add a test to guard against set\_host\_enabled raising an error * Fix UnboundLocalError in libvirt.driver.\_close\_callback * Quota violations should not cause a stacktrace in the logs * Enforce permissions in snapshots temporary dir * Sync rpc fix from oslo-incubator * Fix changes-since filter for list-servers API * Make it possible to override test timeout value * Imported Translations from Transifex * libvirt: consider minimal I/O size when selecting cache type * Setup destination disk from virt\_disk\_size * Add Flavor object * Add atomic flavor access creation * Add extra\_resources field to compute\_nodes table * Recommend the right call instead of datetime.now() * libvirt: remove unused imports from fake libvirt utils * VMware: fix disk extend bug when no space on datastore * Fix monkey\_patch docstring bug * Change unit test for availability\_zones.reset\_cache * Make compute support monitors and store metrics * Added a new scheduler metrics weight plugin * LXC: Image device should be reset in mount() and teardown() * Add shutdown option to cleanup running periodic * xenapi: Update VM memory overhead estimation * Misc typos in nova * Add default arguments for Connection class * Update Instance from database after destroy * Libvirt: Adding video device to instances * Configuration element for describing video drivers * Don't log stacktrace for UnexpectedTaskStateError * Extends V3 servers api for pci support 2014.1.b1 --------- * LOG.warn() and LOG.error() should support translation * Minor change for typo from patch 80b11279b * network\_device\_mtu should be IntOpt * Fix HTTP response code for network APIs and improve error message * Use password masking utility provided in Oslo * Sync log.py from Oslo-incubator * xenapi: stop hang during glance download * Clean up test cases for compute.manager.\_check\_instance\_build\_time * Recover from IMAGE-\* state on compute manager start-up * Document when config options were deprecated * VMware: Fix unhandled session failure issues * Use utils method when getting instance metadata and system metadata * Add status mapping for shutoff instance when resize * Fix docstring on SnapshotController * Fix trivial typo 'descirption' * Compact pre-Icehouse database migrations <= 160 * Compact pre-Icehouse database migrations <= 150 * Compact pre-Icehouse database migrations <= 140 * Remove redundant body validation for createBackup * Change evacuate test hostnames to preferable ones * Change conductor live migrate task to use select\_destinations() * Ensure proper notifications are sent when build finishes * Periodic task \_heal\_instance\_info\_cache can now use slave db * docker: access system\_metadata as a dict * Don't overwrite marker when checking if it exists * There is no need to set VM status to ERROR on a failed migration * DB migration 209: Clean up child rows as well * Cleanup ec2/metadata/osapi address/port listen config option help * Recover from build state on compute manager start-up * Comply with new hacking 0.8 release * Correct network\_device\_mtu help string * Remove last of AssertEquals * Fix Neutron Authentication for Metadata Service * Update help for osapi\_compute\_listen\_port * libvirt: host update disable/enable report HTTP 400 * Catch InstanceIsLocked exception on server actions * VMware: enable driver to work with postgres database * Make test\_evacuate from compute API DRYer * Fix testcase config option imports * Fix "in" comparisons with one element tuples * Remove \_security\_group\_chain\_name from nova/virt/firewall.py * Remove duplicate setting of os\_type in libvirt config builder * Fix logic in LibvirtConnTestCase.\_check\_xml\_and\_uri * Remove unused flag 'host\_state\_interval' * Make object compat work with more positional args * Fix LibvirtGenericVIFDriver.get\_config() for quota * Fix a tiny double quote matching in field obj model * Move flags in libvirt's volume to the libvirt group * Check Neutron port quota during validate\_networks in API * Failure during termination should always leave state as Error(Deleting) * Remove duplicate FlavorNotFound exception handling in server create API * Make check more pythonic * Make sure report\_interval is less than service\_down\_time * Set is\_public to False by default for volume backed snapshots * Delete instance faults when deleting instance * Pass Instance object to spice compute rpc api * Pass Instance object to get\_spice\_console virt api * Remove update\_service\_capabilities from scheduler rpc api * Remove SchedulerDependentManager * powervm: remove powervm virt driver from nova * libvirt: Provide a port field for GlusterFS network disks * Add API input validation framework * Remove duplicate BuildAbortException block * Remove compute 2.x rpc api * Add v3 of compute rpc API * Fix incorrect argument position in DbQuotaDriver * Change ConductorManager to self.db when record cold\_migrate event * instance state will be stuck in unshelving when unshelve fails * Fix some i18n issue in nova/compute/manager.py * Don't gate on E125 * Supplement 'os-migrateLive' in actions list * Corrected typo in host\_manager * Fix a lazy-load exception in security\_group\_update() * fakevirt: return hypervisor\_version as an int instead of a string * Bump to sqlalchemy-migrate 0.8.2 * ComputeFilter shouldn't generate a warning for disabled hosts * Remove cert 1.X rpc api * Add V2 rpc api for cert * Remove console 1.X rpc api * Do not hide exception in update\_instance\_cache\_with\_nw\_info * Wrong handling of Instance expected\_task\_state * XenAPI: Fix caching of images * Extend LibvirtConfigGuest to parse guest cpu element info * Rename instance\_type parameter in migrate\_disk\_and\_power\_off to flavor * convert min\_count and max\_count to type int in nova v3 api * Add decorator expected\_errors for flavors\_extraspecs v3 * Remove nullable=True in models.py which is set by default * baremetal: Make api validate mac address * Use 204 instead of 202 for delete of keypairs v3 * Fix log message format issue for api * Remove "set()" from CoreAPIMissing exception * Move flag in libvirt's vif to the libvirt group * Move flag in libvirt's utils to the libvirt group * Move flags in libvirt's imagebackend to the libvirt group * Extend the scheduler HostState for metrics from compute\_node * docker: return hypervisor\_version as an int rather than a string * Sync Log Levels from OSLO * Removes check CONF.dhcp\_options\_enabled from nova * Improved debug ability for log message of cold migration * Adjust the order of notification for shelve instance * Add FloatField for objects * XenAPI: Fix config section usage * Fix performance of Server List with Neutron for Admins * Add context as parameter for two libvirt APIs * Add context as parameter for resume * xenapi: move session into new client module * xenapi: stop key\_init timeout failing set password * xenapi: workaround vbd.plug race * Address infinite loop in nova compute when getting network info * Use of logging in native thread causes deadlock connecting to libvirtd * Add v3 api samples for shelve * Imported Translations from Transifex * libvirt: Fix log message when disable/enable a host * Fix missing format specifier in ImagePropertiesFilter log message * Sync the DB2 communication error code change from olso * baremetal: refactor out powervm dependency * handle migration errors * Make compute manager \_init\_instance use native objects * Fix for reading the xenapi\_device\_id from image metadata * Check if reboot request type is None * Use model\_query() instead of session.query in db.instance\_destroy * Fix up spelling mistake * Periodic task \_poll\_unconfirmed\_resizes can now use slave db * Include image block device maps in info * Sync local from oslo * objects: declare some methods as static * Handle UnicodeEncodeError in validate\_integer * Remove traces of V3 personality extension from api samples * Removes os-personalities extension from the V3 API * VMware: add support for VM diagnostics * Remove useless api sample template files for flavor-rxtx v3 * Fix libvirt evacuate instance on shared storage fails * Fixes get\_vm\_storage\_paths issue for Hyper-V V2 API * Clean up how test env variables are parsed * Add missing argument max\_size in libvirt driver * VMware: Always upload a snapshot as a preallocated disk * Fix empty selector XML bug * Libvirt:Instance resize confirm issue against NFS * Add V2 rpc api for console * Fix sample parameter of agent API * VMware: fix snapshot failure when host in maintenance mode * Clean up unused variables * Add a driver method to toggle instance booting * Fix cells instance\_create extra kwarg * handle empty network info in instance cache * Remove deprecated instance\_type alias from nova-manage * xenapi: kernel and ramdisk missing after live-migrate * Remove V2 API version of coverage extensions * Remove V3 API version of coverage extension * Update openstack/common/periodic\_task * Use 201 instead of 200 for action create of flavor-manage v3 * Enforce metadata string type on key/value pairs * Fixes RequestContext initialization failure * Move flags in libvirt's imagecache to the libvirt group * Move base\_dir\_name option to somewhere more central * Move some libvirt specific flags into a group * Removed unused instance object helper function * Update openstack/common/lockutils * Rename InstanceType exceptions to Flavor * Added monitor (e.g. CPU) to monitor and collect data * Conditionalise automatic enabling of disabled host * Users with admin role in Nova should not re-auth with Neutron * Use 400 instead of 422 for invalid input in v3 servers core * Fix limits v3 follow API v3 rules * Remove used\_limits extension from the V3 API * Remove reduntant call to update\_instance\_info\_cache * Add flavor-extra-specs to core for V3 API * Add flavor-access to core for V3 API * Remove unused libvirt\_ovs\_bridge flag * Fix AttributeError(s) from get\_v4/6\_ips\_by\_interface * Raising exception for invalid floating\_ip's ID * libvirt: Allow delete to complete when a volume disconnect fails * replace assertNotEquals with assertNotEqual * Add V3 api samples for access\_ips * Add v3 api samples for scheduler-hints * Add v3 api samples for availability\_zone * Add V3 API sample for server's actions * Cache Neutron Client for Admin Scenarios * More instance\_type -> flavor renames in db.api * Cache compute node info in Hypervisor api * Reverse the quota reservation in revert\_resize * Rename virtapi.instance\_type\_get to flavor\_get * Xenapi: Allow windows builds with xentools 6.1 and 6.2 * Make baremetal support metadata for ephemeral block-device-mapping * Make baremetal\_deploy\_helper understand ephemeral disks * Removed unused methods from db.api * Fix type mismatch errors in NetworkTestCase * VMware: Detach volume should not delete vmdk * xenapi: Fix agent update message format * xenapi: Fix regression issue in agent update * Shrink the exception handling range * Moved quota headroom calculations into quota\_reserve * Remove dup of LibvirtISCSIVolumeDriver in LibvirtISERVolumeDriver * Replace assertEquals with assertEqual - tests/etc * libvirt: pass instance to a log() call in the standard way * xenapi: Move settings to their own config section * domainEventRegisterAny called too often * Allow configuring the wsgi pool size * driver tests (loose ends): replace assertEquals with assertEqual * baremetal: replace assertEquals with assertEqual * image tests: replace assertEquals with assertEqual * virt root tests: replace assertEquals with assertEqual * Remove unnecessary steps for cold snapshots * baremetal: Make volume driver use a correct source device * Update quota-class-set/quota-set throw 500 error * Add log\_handler to implement the publish\_errors config option * Imported Translations from Transifex * Enable non-ascii characters in flavor names * Move docker specific options into a group * Check return code of command instead of checking stderr * Added tests for get\_disk\_bus\_for\_disk\_dev function * Checking existence of index before dropping * add hints to api\_samples documentation * xenapi: check for IP address in live migration pre check * Remove live\_snapshot plumbing * Remove unused local variable in test\_compute * Make v3 admin\_password parameters consistent * Flavor name should not contain only white spaces * fix a typo error in test\_libvirt\_vif.py * Remove unused local variables in test case * Rename \_get\_vm\_state to \_get\_vm\_status * Ensure deleted instances' status is always DELETED * Let resource\_tracker report right migration status * Imported Translations from Transifex * nit: fix indentation * Always pass context to compute driver destroy() * Imported Translations from Transifex * db tests: replace assertEquals with assertEqual * compute tests: replace assertEquals with assertEqual * Catch exception while building due to instance being deleted * Refactor UnexpectedTaskStateError for handling of deleting instances * Parted 'invalid option' in XenAPI driver * Specify DB URL on command-line for schema\_diff.py * Fix \`NoopQuotaDriver.get\_(project|user)\_quotas\` format * Send delete.end with latest instance state * Add missing fields in DriverBlockDevice * Fix the boto version comparison * Add test for class InsertFromSelect * Process image BDM earlier to avoid duplicates * Clean BDM when snapshoting volume-backed instances * Remove superflous 'instances' joinedload * Fix OLE error for HyperV * Make the vmware pause/unpause unit tests actually test something * Fixes the destroy() method for the Docker virt driver * xenapi: converting XenAPIVolumeTestCase to NoDB * Move \`diff\_dict\` to compute API * Add compatibility for InstanceMetadata and primitives * Issue brctl/delif only if the bridge exists * ensure we don't boot oversized images * Add V3 API samples for config-drive * Remove duplicated test * Add notification for host operation * Sync log from oslo * Replace assertEquals with assertEqual - tests/scheduler * Make non-admin users can unshelve a server * Fix interface-attach removes existing interfaces from db * Correct exception handling * Utilizes assertIsNone and assertIsNotNone - tests/etc * Use elevated context in resource\_tracker.instance\_claim * Add updates and notifications to build\_and\_run\_instance * Add network handling to build\_and\_run\_instance * Make unshelve use new style BDM * Make \_get\_instance\_nw\_info() use Instance object * Convert evacuation code to use objects * Deprecate two security\_group-related methods from conductor * Make metadata server use objects for Instance and Security Groups * Replace assertEquals with assertEqual - tests/api * Remove security\_group-related methods from VirtAPI * Make virt/firewall use objects for Security Groups and Rules * Drop auth\_token configs for api-paste.ini * Add auth\_token settings to nova.conf.sample * Use \_get\_server\_admin\_password() * Pass volume\_api to get\_encryption\_metadata * Comments for db.api.compute\_node\_\*() methods * Fix migration 185 to work with old fkey names * Adds V3 API samples for user-data * Enforce compute:update policy in V3 API * tenant\_id implies all\_tenants for servers list in V3 API * Move get\_all\_tenants policy enforcement to API * all\_tenants=0 should not return instances from all tenants * Utilizes assertIsNone and assertIsNotNone - tests/virt * xenapi: workaround for failing vbd detach * xenapi: strip base\_mirror after live-migrate * xenapi: refactor get\_all\_vdis\_in\_sr * Remove unused expected\_sub\_attrs * Remove useless variable from libvirt/driver.py * Add a metadata type validation when creating vm * Update schema\_diff.py to use 'postgresql' URLs * Disable nova-compute on libvirt connectivity exceptions * Make InstanceInfoCache load base attributes * Add SecurityGroupRule object * Add ephemeral\_mb record to bm\_nodes * Stylistic improvement of models.ComputeNodeStat * clean up numeric expressions in tests * replaced e.message with unicode(e) * Add DeleteFromSelect to avoid database's limit * Imported Translations from Transifex * Utilizes assertIsNone and assertIsNotNone - tests/api * Include name/level in unit test log messages * Remove instance\_type\* proxy methods from nova.db.api * Add InstanceList.get\_by\_security\_group() * Make security\_group\_rule\_get\_by\_security\_group() honor columns * Claim IPv6 is unsupported if no interface with IPv6 configured * Pass thru credentials to allow re-authentication * network tests: replace assertEquals with assertEqual * Nova-all: Replace basestring by six for python3 compatability * clean up numeric expressions with byte constants * Adds upper bound checking for flavor create parameters * Remove fake\_vm\_ref from test\_vmwareapi.py * xen tests: replace assertEquals with assertEqual * Fix tests to work with mysql+postgres concurrently * Enable extension access\_ips for v3 API * Correct update extension point's check\_func for v3 server's controller * Updates the documentation for nova unit tests * Remove consoleauth 1.X rpc api * consoleauth: retain havana rpc client compat * Pull system\_metadata for notifications on instance.save() * Allow \_sync\_power\_states periodic task to hit slave DB * Fix power manager hangs while executing ipmitool * Update my mailmap * Stored metrics into compute\_nodes as a json dictionary * Bad except clauses order causes wrong text in http response * Add nova.db.migration.db\_initial\_version() * Fix consoleauth check\_token for rpcapi v2 * Nova db/api.py docstring cleanups.. * Adds XML namespace example for disk config extension * Remove multipath mapping device descriptor * VMware: fix VM resize bug * VMware: fix bug for reporting instance UUID's * Remove extra space in tox.ini * Fix migrate w/ cells * Add tests for compute (child) cell * Call baselineCPU for full feature list * Change testing of same flavor resize * Fix bad typo in cloudpipe.py * Fix compute\_api tests for migrate * Replace basestring by six for python3 compatability * Add flavor-manage to core for V3 API * Refactor unit tests code for python3 compatability * make libvirt driver get\_connection thread-safe * Remove duplicates from exceptions list * Apply six for metaclass * Add byte unit constants * Add block device handling to build\_and\_run\_instance * Reply with a meaningful exception, when libvirt connection is broken * Fix getting nwinfo for Instance obj * Make cells info\_cache updates more tolerant * Raise an error if module import fails * Drop RPC securemessage.py and crypto module * Remove deprecated libvirt VIF driver code * nova.exception does not have a ProcessExecutionError * Fix setting backdoor port in service start * Sync lockutils from oslo * Fix wrong description when updating quotas * Expose additional status in baremetal API extension * migrate server doesn't raise correct exception * Make security\_group\_get() more flexible about joins * Make Object FieldType take an object name instead of a class * Hyper-v: Change the hyper-v error log for debug when resize failed * Adds V3 API samples for the disk-config extension * Utilizes assertIn - tests/etc * Fix all scripts to honor the enabled\_ssl\_apis flag * Updated from global requirements * Fix i18n issue for nova/compute/manager.py * Change tab to blank space in hypervisors-detail-resp * Fixing ephemeral disk creation * Merging two mkfs commands * xenapi: ephemeral disk partition should fill disk * Fix the ConsolesController class doc string * xenapi: Speeding up the easy cases of test\_xenapi * xenapi: Speeding up more tests by switching to NoDB * Remove .pyc files before generating sample conf * xenapi: migrate multiple ephemeral disks * Fail quickly if file injection for boot volume * Add obj\_make\_compatible() * Updated from global requirements * Make cells 'flavorid' for resizes * Fixes unicode issue in the Hyper-V driver * Add missing ' to extra\_specs debug message * VMware: Fix ValueError unsupported format character in log message * graceful-shutdown: add graceful shutdown into compute * remove unused network module from certificates api extension * Sync fixture module from oslo * Fixes Invalid tag name error when using k:v tagname * Fix tests for migration 227 to check sqlite * Adds V3 API samples for console output * Add V2 rpc api for consoleauth * Update version aliases for rpc version control * Improve object instantiation syntax in some tests * A nicer calling convention for object instantiation * Updates OpenStack Style Commandments link * Updated from global requirements * Adding support for multiple hypervisor versions * Manage None value for the 'os\_type' property * Add CIDR field type * Validate parameters of agent API * Adding Read-Only volume attaching support to Nova * Update timeutils.py from oslo * Fix docstring related to create\_backup API * powervm tests: replace assertEquals with assertEqual * Add V3 API sample for admin-password * Remove duplicated test cases * Add extension access\_ips for v3 API * Ensure migration 209 works with NULL fkey values * Cells: Fix instance deletes * Uses oslo.imageutils * Add testr concurrency option for run\_tests.sh * Fix the image name of a shelved server * xenapi: test\_driver should use NoDBTestCase * xenapi: Speedup vm\_util and vmops tests * xenapi: speedup test\_wait\_for\_instance\_to\_start * Remove xenapi rpm building code * Fixes datastore selection bug * Fixes Hyper-V snapshot spawning issue * Make SecurityGroup receive context * Fix DB API mismatch with sqlalchemy API * Remove aggregate metadata methods from conductor and virtapi * Make XenAPI use Aggregate object * libvirt: add missing i18n support * Adds V3 API samples for attach-interfaces * Make aggregate methods use new-world objects * Add missing key attribute to AggregateList.get\_by\_host() * Fix i18n issue for nova/virt/baremetal/virtual\_power\_driver.py * Fix scheduler rpcapi deprecated method comment * Send notifications on keypair create/delete * Use \`versionutils.is\_compatible\` for Dom0 plugin * Use \`versionutils.is\_compatible\` for Nova Objects * Improve logging messages in libvirt driver * xenapi: stop agent errors stopping build * Fix NovaObject versioning attribute usage * xenapi: removes sleep after final upload retry * xenapi: stop using get\_all\_vdis\_in\_sr in spawn * populate local-ipv4 address in config drive * Harden version checking for boto * Handle MarkerNotFound better in Flavor API * Sanitize passwords when logging payload in wsgi * Remove unnecessary "LOG.error()" statement * xenapi: simplify \_migrate\_disk\_resizing\_up * xenapi: revert on \_migrate\_disk\_resizing\_up error * xenapi: make \_migrate\_disk\_resizing\_up use @step * libvirt tests: replace assertEquals with assertEqual * Use the oslo fixture module * Port server actions unittests to V3 API Part 2 * Remove unused method \_get\_res\_pool\_ref from VMware * Imported Translations from Transifex * Check for None when cleaning PCI dev usage * Fix vmwareapi driver get\_diagnostics calls * Remove instance\_info\_cache\_update() from conductor * compute api should throw exception if soft reboot invalid state VM * Make a note about Object deepcopy helper * Avoid caching quota.QUOTAS in Quotas object * Remove transitional callable field interface * Make the base object infrastructure use Fields * Migrate some tests that were using callable fields * Migrate NovaPersistentObject and ObjectListBase to Fields * Migrate Instance object to Fields * Utilizes assertIn - tests/api/etc * Utilizes assertIn - tests/virt * Utilizes assertIn - tests/api/contrib * Utilizes assertIn - tests/api/v3 * Make scheduler disk\_filter take swap into account * Add variable to expand for format string * Make quota sets update type handling a bit safer * Add test\_instance\_get\_active\_by\_window\_joined * Fixes error on live-migration of volume-backed vm * Migrate PciDevice object to Fields * Migrate InstanceInfoCache object to Fields * Migrate InstanceFault object to Fields * Migrate Service object to Fields * Migrate ComputeNode object to Fields * Migrate Quotas object to Fields * Migrate InstanceGroup object to Fields * Migrate InstanceAction and InstanceActionEvent objects to Fields * Move exception definitions out of db api * Remove unused scheduler rpcapi from compute api * Libvirt: disallow live-mig for volume-backed with local disk * xeanpi: pass network\_info to generate\_configdrive * Replace incorrect Null checking to return correctly * Fix nova DB 215 migration script logic error * Xenapi: set hostname when performing a network reset * Fix "resource" length in project\_user\_quotas table * Migrate SecurityGroup object to Fields * Migrate Migration object to Fields * VMware: fix regression attaching iscsi cinder volumes * Remove whitespace from cfg options * cleanup after boto 2.14 fix * Add boto special casing for param changes in 2.14 * xenapi: simplify PV vs HVM selection logic * fix missing host when unshelving * Fix a typo of tabstop * Fix error message of os-cells sync\_instances api * Log which filter failed when on log level INFO * Migrate KeyPair object to Fields * Migrate Aggregate object to Fields * Make field object support transitional call-based interface * Add Field model and tests * Fix conductor's object change detection * Remove obsolete redhat-eventlet.patch * Move is\_volume\_backed\_instance to new style BDM * Add a get\_root\_bdm utility function * Libvirt: allow more than one boot device * Libvirt: make boot dev a list in GuestConfig * Remove compute\_api\_class config option * Libvirt: add boot\_index to block device info dicts * Fixes Hyper-V issue with VHD file format * Update log message for add\_host\_to\_aggregate * Correct use of ConfigFilesNotFoundError * hyperv tests: replace assertEquals with assertEqual * Utilizes assertNotIn * VMware tests: replace assertEquals with assertEqual * Fix incorrect root partition size and compatible volume name * Imported Translations from Transifex * Utilize assertIsInstance * Fix typos in nova/api code * Make \`update\_test\` compatible with nose * Add a custom iboot power driver for nova bm * Fix FK violation errors in InstanceActionTestCase * Fix test\_shadow\_tables() on PostgreSQL/MySQL * Fix PCI devices DB API tests * Fix DB API tests depending on the order of rows * Use print function rather than print statement * Update default for running\_deleted\_instance\_action * Drop unused BM start\_console/stop\_console methods * VMware: Network fallback in case specified one not found * baremetal: Add missing method to volume driver * baremetal: Use network API to get fixed IPs * Replace decprecated method aliases in tests * catch exception in start and stop server api * Ensure that the netaddr import is in the 3rd party section * Fix status code of server's action confirm\_resize for v3 * Remove duplicated method in test\_compute\_api.py * Create flavor-access for the tenant when creating a private flavor * Fix root disk not be detached after deleting lxc container * fallocate image only when user has write access * Fixes typo in ListTargets CLI in hyperv driver * Fixes typos in nova/db code * Fixes typos in the files in the nova folder * Avoid clobbering {system\_,}metadata dicts passed to instance update * Baremetal: Be more patient with IPMI and BMC * VMware: fix bug with booting from volumes * Fixes typos in nova/compute files * Fixes typos in virt files * Fix docstring for disk\_cachemodes * Plug Vif into Midonet using Neutron port binding * VMware: remove deprecated configuration variable * Fix races in v3 cells extension tests * Add V3 API samples for consoles * Update allowvssprovider in xenstore\_data * Fix races in cells extension tests * Move \`utils.hash\_file\` -> \`imagecache.\_hash\_file\` * Remove \`utils.timefunc\` function * Remove \`utils.total\_seconds\` * Remove \`utils.get\_from\_path\` * Fix divergence in attach\_interfaces extensions * Replace assert\_ with assertTrue * Fixes several misc typos in scheduler code * Fix libvirt test on systems with real iSCSI devices * Reserve 10 migrations for backports * Sync three-part RPC versions support from Oslo * Remove unused dict functions from utils * Avoid mutable default args in \_test\_populate\_filter\_props * XenAPI: Add versioning for plugins * Add Docstring to some scheduler/driver.py methods * Libvirt: default device bus for floppy block devs * Fix filter\_properties of unshelve API * hyperv: Initialize target\_iqn in attach\_volume * Log if a quota\_usages sync updates usage information 2013.2.rc1 ---------- * Open Icehouse development * baremetal: Fix misuse of "instance" parameter of attach/detach\_volume * Fix the wrong params of attach/detach interface for compute driver * Imported Translations from Transifex * Adds missing entry in setup.cfg for V3 API shelve plugin * Avoid spamming conductor logs with object exceptions * Prefix \`utils.get\_root\_helper\` with underscore * Remove \`utils.debug\` * Remove \`utils.last\_octet\` * Remove \`utils.parse\_mailmap\` * Updated from global requirements * Remove unecessary \`get\_boolean\` function * Make Exception.format\_message aware of Messages * Disable lazy gettext * VMware: Check for the propSet attribute's existence before using * VMware: fix bug for invalid data access * Make rbd.libvirt\_info parent class compatible * Host aggregate configuration throws exception * VMware: Handle cases when there are no hosts in cluster * VMWare: Disabling linked clone doesn't cache images * Fixes inconsistency in flavors list with marker * Fix indentation in virt.libvirt.blockinfo module * Update jsonutils.py from oslo * Fix loading instance fault in servers view * Refactor test cases related to instance object * Use system locale for default request language * Update attach interface api to use new network model * Adds V3 API specific urlmap tests * Catch volume errors during local delete * Fix processutils.execute errors on windows * Fixes rescue doesn't honor enable password conf for v3 * VMware: Fix bug for root disk size * Fix incorrect exception raised during evacuate * Full sync of quota\_usages * Fix log format error in lazy-load message * xenapi: reduce impact of errors during SR.scan * Forced scheduling should be logged as Audit not Debug * xenapi: Resize operations could be faster * Resource limits check sometimes enforced for forced scheduling * Skip test if sqlite3 not installed * Add notification for pause/unpause instance * Make LiveMigrateTask use build\_request\_spec() * Ensure image property not set to None in build\_request\_spec() * Make sure periodic task sync\_power\_states continues on error * get\_all\_flavors uses id as key to be unique * fix the an Unexpected API Error issue in flavor API * Adds V3 API samples for srvcs, tenant usage, server\_diagnostics * VMware: Fix SwitchNotFound error when network exists * Fix unicode string values missing in previous patch * Fix stopping instance in sync\_power\_states * Remove deprecated task states * plug\_vif raise NotImplementedError instead of pass * Check instance exists or not when evacuate * xenapi: ignore 500 errors from agent resetnetwork * Add flavor name validation when create flavor * xenapi: enforce filters after live-migration * xenapi: set vcpu cap to ensure weight is applied * Get image metadata in to\_xml for generating xml * Add notification on deleting instance without host * Fix V3 API flavor returning empty string for attributes * Fix v3 server rebuild deserializer checking with wrong access\_ip key * Windows instances require the timezone to be "localtime" * Don't wrap Glance exceptions in NovaExceptions * Update rootwrap with code from oslo * fix typo & grammer in comment 363-364 * Make Instance.refresh() extra careful about recursive loads * Log object lazy-loads * Ensure we don't end up with invalid exceptions again * Fix console db can't load attribute pool * Fix HTTP response for PortNotFound during boot (v3 API) * Fixes assertion bug in test\_cells\_weights.py * Remove \_get\_compute\_info from filter\_scheduler.py * VMware: fix bug for incorrect cluster access * Add V3 API samples for security-groups * Correct lock path for storage-registry-lock * Moved registration of lifcycle events handler in init\_host() * Rebuilding stopped instance should not set terminated\_at * Require oslo.config 1.2.0 final * Removes pre\_live\_migration need for Fixed IPs * Move call to \_default\_block\_device\_names() inside try block * Fix several flake8 issues in the plugins/xenserver code * Fix type is overwritten when UPDATE cell without type specified * Adds v3 API samples for hide server addresses and keypairs * Always filter out multicast from reflection * VMware: fix bug with booting from volume * VMware: enable VNC access without user having to enter password * Remove exceptions.Duplicate * Add v3 API samples for rescue * Added 'page\_size' param to image list * Fix SecurityGroupsOutputTest v3 security group tests * Fixes file mode bits of compute/manager.py * Adds v3 API samples for hosts extension * Only update PCI stats if they are reported from the host * xenapi: Cleanup pluginlib\_nova * Fix Instance object assumptions about joins * Bring up interface when enslaving to a bridge * v3 API samples for servers * xenapi: refactor: move UpdateGlanceImage to common * Imported Translations from Transifex * Fixes modules with wrong file mode bits in virt package * Adds v3 API samples for ips and server\_metadata extensions * Fix V3 API server metadata XML serialization * libvirt: add test case for \_hard\_reboot * Add tests for pre\_live\_migration * Adds V3 API samples for evacuate,ext-az,ext-serv-attrs * Add V3 API samples for ext-status,hypervisor,admin-actions * Code change for regex filter matching * Convert TestCases to NoDBTestCase * VMware: ensure that resource exists prior to accessing * Fixes modules with wrong file mode bits * Fixes test scripts with wrong bitmode * Update sample config generator script * Instance object incorrectly handles None info\_cache * Don't allow pci\_devices/security\_groups to be None * Allow for nested object fields that cannot be None * Object cleanups * Convert TestCases to NoDBTestCase * Convert TestCases to NoDBTestCase * Actually fix info\_cache healing lazy load * Fixes host stats for VMWareVCDriver * libvirt: ignore false exception due to slow NFS on resize-revert * Syncs install\_venv\_common.py from oslo-incubator * Correct deleted\_at value in notification messages * VMwareVCDriver Fix sparse disk copy error on spawn * Remove unused \_instance\_update() method in compute api * Change service id to compute for compute/api.py * XenAPI raise InstanceNotFound in \_get\_vm\_opaque\_ref * Replace OpenStack LLC with OpenStack Foundation * Send notification for any updates to instance objects * Add flag to make baremetal.pxe file injection optional * Force textmode consoles on baremetal * Typo: certicates=>certificates in nova.conf * Remove print statement from test\_quotas that fails H233 check * Fix for os-availability-zone/detail returning 500 * Convert TestCases to NoDBTestCase * Fixes the usage of PowerVMFileTransferFailed class * MultiprocessWSGITest wait for workers to die bug * Prune node stats at compute node delete time * VMware: datastore regex not honoured * VMware: handle exceptions from RetrievePropertiesEx correctly * VMware: Fix volume detach failure * Remove two unused config options in baremetal * Adds API samples and unitests for os-server-usage V3 extension * xenapi: Make rescue safer * Add V3 API samples for quota-sets/class-sets,inst-usage-audit-log * Fix problem with starting Windows 7 instances using VMware Driver * VMware: bug fix for instance deletion with attached volume * Fix migration 201 tests to actually test changes * Don't change the default attach-method * Fix snapshot failure with VMwareVCDriver * Fix quota direct DB access in compute * Add new-world Quota object * Fix use of bare list/dict types in instance\_group object * Fix non-unicode string values on objects * Add missing get\_available\_nodes() refresh arg * Make Instance.Name() not lazy-load things * Add debugging to ComputeCapabilitiesFilter * xenapi: fix pep8 violations in nova plugins * Retry on deadlock in instance\_metadata\_delete * Make virt drivers use a consistent hostname * [VMware] Fix problem transferring files with ipv6 host * VMware: Fix ensure\_vlan\_bridge to work properly with existing DVS * Fix network info injection in pure IPv6 environment * delete a non existent flavor extra spec returns 204 * Don't use ModelBase.save() inside of transaction * send the good binding to neutron after live-migration * Add linked clone related unit tests for VMware Hyper * Ensure anti affinity scheduling works * pci passthrough bug fix:hasattr dones not work for dict * Fix rename q\_exc to n\_exc (from quantum to neutron) * Improve "keypair data is invalid" error message * Enable fake driver can live migration * Don't use sudo to discover ipv4 address * xenapi: Fix rescue * Fix create's response is different with requested for sec-grps V3 * Fix logging of failed baremetal commands * Add v3 API samples for os-extended-volumes * Better help for generate config * Fix hyper-v vhd real size bigger than flavor issue * Remove unused and duplicate code * Policy check for forced\_host should be before the instance is created * Remove cached console auth token after migration * Don't generate notifications when reaping running\_deleted instances * Add instance\_flavor\_id to the notification message * Edits for nova.conf.sample * xenapi: fix where root\_gb=0 causes problems * Wire in ConfKeyManager.\_generate\_hex\_key! * Drop unused logger from keymgr/\_\_init\_\_.py * Move required keymgr classes out of nova/tests * Translate more REST API error messages * pci passthrough fails while trying to decode extra\_info * Update requirements not to boto 2.13.0 * Port server actions unittests to V3 API Part 1 * Remove unused method in scheduler driver * Ignore H803 from Hacking * Fixes misuse of assertTrue in virt test scripts * Add missing notifications for rescue/unrescue * Libvirt: volume driver set correct device type * Make v3 API versions extensions core * Make Instance.save() log missing save handlers * Don't fail if volume has no image metadata * Get image properties instead of the whole image * Remove extra 'console' key for index in extensions consoles v3 * Fix V3 API server extension point exception propagation * VMware: nova-compute crashes if VC not available * Update mailmap for jhesketh * Code change for nova support glance ipv6 address * disassociate\_address response should match ec2 * Adds V3 API samples for remote consoles, deferred delete * Fix asymmetric view of object fields * Use test.TestingException where possible * Add encryption support for volumes to libvirt * VMware: fix driver support for hypervisor uptime * Wrong arguments when calling safe\_utils.getcallargs() * Add key manager implementation with static key * Remove duplication in disk checks * Change the duplicate class name TestDictMatches in test\_matches.py * Add alias as prefix to request params for config\_drive v3 * xenapi: Add per-instance memory overhead values * Fixes misuse of assertTrue in test scripts * Remove unused and wrong code in test\_compute.py * Remove cases of 'except Exception' in tests.image * Remove \_assert\_compute\_node\_has\_enough\_memory from filter\_scheduler.py * Fix regression issues with cells target filter * Remove out of date list of jenkins jobs * Don't lose exception info * Add filter for soft-deleted instances to periodic cleanup task * Don't return query from db API * Update fedora dev env instructions * Only return requested network ID's * Ensure get\_all\_flavors returns deleted items * Fix the order of query output for postgres * Fix migration 211 to downgrade with MySQL * Removed duplicated class in exception.py * Fix console api pass tuple as topic to console rpc api * Enable test\_create\_multiple\_servers test for V3 API * VMware image clone strategy settings and overrides * Reduce DB load caused by heal instance info cache * Clean up object comparison routines in tests * Clean up duplicated change-building code in objects * disable direct mounting of qcow2 images by default * xenapi: ensure finish\_migration cleans on errors * xenapi: regroup spawn steps for better progress * xenapi: stop injecting the hostname during resize * xenapi: add tests for finish\_migration and spawn * xenapi: tidy ups to some spawn related methods * xenapi: move kernel/ramdisk methods to vm\_utils * xenapi: ensure pool based migrate is live * Fix live-migrate when source image deleted * Adds v3 API samples for limits and simple tenant usage * Return a NetworkInfo object instead of a list * Fix compute\_node\_get\_all() for Nova Baremetal * Add Neutron port check for the creation of multiple instances * Remove unused exceptions * Add V3 API samples for flavor-manage,flavor-extra-specs * Add V3 API samples for flavors,flavor-rxtx,flavor-access * Catch more accuracy exception for \_lookup\_by\_name * Fixes race cond between delete and confirm resize * Fixes unexpected exception message in ProjectUserQuotaNotFound * Fixes unexpected exception message in PciConfigInvalidWhitelist * Add missing indexes back in from 152 * Fix the bootfile\_name method call in baremetal * update .mailmap * Don't stacktrace on ImageNotFound in image\_snapshot * Fix PCIDevice ignoring missing DB attributes * Revert "Call safe\_encode() instead of str()" * Avoid errors on some actions when image not usable * Add methods to get image metadata from instance * Fix inconsistent usages for network resources * Revert baremetal v3 API extension * Fixes misuse of assertTrue in compute test scripts * add conf for number of conductor workers * xenapi: Add efficient impl of instance\_exists() 2013.2.b3 --------- * Updated from global requirements * Fix failure to emit notification on Instance.save() * MultiprocessWSGITest wait for workers to die bug * Synchronize the key manager interface with Cinder * Remove indirect dependency from requirements.txt * Clean up check for migration 213 * Add V3 API samples for instance-actions,extenions * fix conversion type missing * Enable libvirt driver to use the new BDM format * Allow block devices without device\_name * Port to oslo.messaging.Notifier API * Add expected\_errors for extension aggregates v3 * Refresh network info cache for secgroups * Port "Make flavors is\_public option .." to v3 tree * Add missing Aggregate object tests * Generalize the \_make\_list() function for objects * PCI passthrough Libvirt vm config * Add columns\_to\_join to instance\_update\_and\_get\_original * XenAPI: Allow 10GB overhead on VHD file check size * Adds ephemeral storage support for Hyper-V * Adds Hyper-V VHDX support * Create mixin class for common DB fields * Deprecate conductor migration\_get() * Change finish\_revert\_resize paths to use objects * Change finish\_resize paths to use objects * Change resize\_instance paths to use objects * VMware: Nova boot from cinder volume * VMware: Multiple cluster support using single compute service * Nova support for vmware cinder driver * Adds Hyper-V dynamic memory support * xenapi: Fix download\_handler fallback * Ensure old style images can be resized * Add nova.utils.get\_root\_helper() * Inherit base image properties on instance creation * Use utils.execute instead of subprocess * Fixes misuse of assertTrue in Cells test scripts * Remove versioning from IOVisor APIs PATH * Revert "Importing correlation\_id middleware from oslo-incubator" * update neutronclient to 2.3.0 minimum * Adds metrics collection support in Hyper-V * Port all rpcapi modules to oslo.messaging interface * Fix a gross duplication of context code in objects tests * Make compute\_api use Aggregate objects * Add Aggregate object model * Add dict and list utility functions for object typing * VMware: remove conditional suds validation * Limit instance fault messages to 255 characters * Add os-assisted-volume-snapshots extension * Scheduler rpcapi 2.9 is not backwards compatible * Adds support for Hyper-V WMI V2 namespace * Port flavormanage extension to v3 API Part 2 * Add os-block-device-mapping to v3 API * Improves Hyper-V vmutils module for subclassing * xenapi: add support for auto\_disk\_config=disabled * Check ephemeral and swap size in the API * Adds V3 API samples for cells and multinic * Increase volume created checking retries to 60 * Fix changes\_since for V3 API * Make v3 API console-output extension core * Makes v3 API keypairs extension core * Add support for API message localization * Fix typo and indent error in isolated\_hosts\_filter.py * Adds 'instance\_type' param to build\_request\_spec * Guest-assisted-snaps libvirt implementation * Improve EC2 API error responses * Remove EC2 postfix from InvalidInstanceIDMalformedEC2 * Introduce InternalError EC2 error code * Introduce UnsupportedOperation EC2 error code * Introduce SecurityGroupLimitExceeded EC2 error code * Introduce IncorrectState EC2 error code * Introduce AuthFailure EC2 error code * Fix ArchiveTestCase on PostgreSQL * Fix AggregateDBApiTestCase on PostreSQL and MySQL * Port Cheetah templates to Jinja2 * Libvirt: call capabilites before getVersion() * Remove \_report\_driver\_status from compute/manager.py * Interpret BDM None size field as 0 on compute side * Add test cases for resume\_state\_on\_host\_boot * Add scheduler support for PCI passthrough * Fix v3 swap volume with wrong signature * vm\_state and task\_state not updated during instance delete * VMware: use VM uuid for volume attach and detach * xenapi: support raw tgz image download * xenapi: refactor - extract image\_utils * Add block\_device\_mapping\_get\_all\_by\_instance to virtapi * Sync rpc from oslo-incubator * Fix the multi-instance quota message * Fix virtual power driver fails silently * VMware: Config Drive Support * xenapi: skip metadata updates when VM not found * Make resource\_tracker record host\_ip * Disable compute fanout to scheduler * Make image\_props\_filter use information from DB not RPC * Make compute\_capabilities\_filter use information from DB not RPC * XenAPI: More operations with LVM-based SRs * XenAPI: make\_partition fixes for Dom0 * Fix wrong method call in baremetal * powervm: make start\_lpar timeout * Disable retry filter with force\_hosts or force\_nodes * Call safe\_encode() instead of str() * Fix usage of classmethod in various places * Fix V3 API quota\_set tests using V3 url and request * Handle port over-quota when allocating network for instance * Fix warning log message typo in resource\_tracker.instance\_claim * Sync filetuils from oslo-incubator * Fix VMware fakes * DRY up use of @wrap\_exception() decorator * Remove unused fake run\_instance() method * Use ExceptionHelper to bypass @client\_exceptions * Added new hypervisor to support Docker containers * Introduce InvalidPermission.Duplicate EC2 error code * Fix and gate on H302 (import only modules) * On snapshot errors delete the image * Remove dis/associate actions from security\_groups v3 * Add volume snapshot delete API test case * Assisted snapshots compute API plumbing * Adds V3 API samples for agents, aggregates and certificates * Adds support for security\_groups for V3 API server create * powervm: Use FixedIntervalLoopingCall for polling LPAR status * xenapi: agent not inject ssh-key if cloud-init * Tenant id filter test is not correct * Add PCI device tracker to compute resource tracker * PCI devices resource tracker * PCI device auto discover * Add PCI device filters support * Avoid swallowing exceptions in network manager * Make compute\_api use Service and ComputeNode objects * Adding VIF Driver to support Mellanox Plugin * Change prep\_resize paths to use objects * Make backup and snapshot use objects * Deprecate conductor migration\_create() * Make inject\_network\_info use objects * Convert reset\_network to use instance object * Make compute\_api use objects for lock/unlock * Add REUSE\_EXT in \_swap\_volume call to blockRebase * Remove unused \_decompress\_image\_file from powervm operator class * powervm: actually remove files after migration * Fix to disallow server name with all blank spaces (v3 API) * Add mock to test-requirements * xenapi: Improve test\_xenapi unit testing performance * Sets policy settings so V3 API extensions are discoverable * Pass objects for revert and confirm resizes * Convert \_poll\_unconfirmed\_resizes to use Migration object * Make compute\_api confirm/revert resize use objects * Make compute\_api migrate/resize paths use instance objects * Fix race when running initialize\_gateway\_device() * fix bad usage of exc\_info=True * Use implicit nullable=True in sqlalchemy model * Introduce Invalid\* EC2 error codes * Improve parameter related EC2 error codes * Disconnect from iSCSI volume sessions after live migration * Correct default ratelimits for v3 * Improve db\_sqlalchemy\_api test coverage * Safe db.api.compute\_node\_get\_all() performance improvement * Remove a couple of unused stubs * Fix Instance object issues * Adds API version discovery support for V3 * Port multiple\_create extension to V3 API * Add context information to download plugins * Adds V3 API samples for migrations * Filter network by project id * Added qemu guest agent support for qemu/kvm * PCI alias support * Add PCI stats * Raise timeout in fake RPC if no consumers found * Stub out instance\_update() in build instance tests * Mock out action event calls in build instance test * powervm: revert driver to pass for plug\_vifs * Remove capabilities.enabled from test\_host\_filters * xenapi: through-dev raw-tgz image upload to glance * Add PCI device object support * Store CONF.baremetal.instance\_type\_extra\_specs in DB * Pci Device DB support * VMware: remove redundant default=None for config options * Move live-migration control flow from scheduler to conductor * Fix v3 extensions inherit from wrong controller * Fix network creation in Vlan mode * compute rpcapi 2.29 is not backwards compatible * Fix the message of coverage directory error * Fix error messages in v3 aggregate API * compute rpcapi 2.37 is not backwards compatible * use 'exc\_info=True' instead of import traceback * Add env to make\_subprocess * Remove unused nova.common module * Adds Flavor ID validations * Imported Translations from Transifex * Add DocStrings for function allocate\_for\_instance * Removes V3 API images and image\_metadata extensions * Powervm driver now logs ssh stderr to warning * Update availability\_zone on time if it was changed * Add db.block\_device\_mapping\_get\_by\_id * Add volume snapshot APIs to driver interface * Pass the destination file name to download modules * Fix typo in baremetal docs * VMware: clean up get\_network\_with\_the\_name * Stylistic improvement of compute.api.API.update() * Removes fixed ips extension from V3 API * Libvirt: fix KeyError in set\_vif\_bandwidth\_config * Add expected\_errors for migrations v3 * Add alias as prefix to request params for user\_data v3 * Fix migrations index * Should finish allocating network before VM reaches ACTIVE * Fixes missing host in Hyper-V get\_volume\_connector * Fix various cells issues due to object changes * Document CONF.default\_flavor is for EC2 only * Revert task state when terminate\_instance fails * Revert "Make compute\_capabilities\_filter use ..." * Add resource tracking to build\_and\_run\_instance * Link Service.compute\_node with ComputeNode object * Add ComputeNode object implementation * Add Service object implementation * Make compute\_api use KeyPair objects * Add KeyPair object * Fix spice/vnc console api samples tests * Fix network manager tests to use correct network host * Stub out get\_console\_topic() in test\_create\_console * Stub out instance\_fault\_create() in compute tests * Fix confirm\_resize() mock in compute tests * Fix rpc calls on pre/post live migration tests * Stub out setup\_networks\_on\_host() in compute tests * maint: remove redundant disk\_cachemode validation entry * Fix unicode key of azcache can't be stored to memcache * XenAPI: SR location should default to location stored in PBD * XenAPI: Generic Fake.get\_all\_records\_where implementation * XenAPI: Return platform\_version if no product\_version * XenAPI: Support local connections * Delete expired instance console auth tokens * Fix aggregate creation/update with null or too long name * Fix live migration test for no scheduler running * Fix get\_diagnostics() test for no compute consumer * Stubout reserve\_block\_device\_name() in test * Stubout deallocate\_for\_instance() in compute tests * Stub out net API sooner in servers API test * PCI utils * Object support for instance groups * Add RBD supporting to libvirt for creating local volume * Add alias as prefix to request params for availability\_zone v3 * Remove deprecated legacy network info model in Hypervisor drivers * Correct the authorizer for extended-volumes v3 * emit warning while running flake8 without virtual env * Adds Instance UUID to rsync debug logging * Fixes sync issue for user level resources * Fix Fibre Channel attach for single WWN * nova.conf configurable gzip compression level * Stub out more net API methods floating IP DNS test * Enable CastAsCall for test\_api\_samples * Stub out attach\_volume() in test\_api\_samples * Fix remove\_fixed\_ip test with CastAsCall * Add add\_aggregate\_to\_host() to FakeDriver * Fix api samples image service stub * Add CastAsCall fixture * Enable consoleauth service during ec2 tests * Disable periodic tasks during integration tests * Use ExceptionHelper to bypass @client\_exceptions * Clean up some unused wrap\_exception() stuff * Add new compute method for building an instance * VMware: provide a coherent message to user when viewing console log * Use new BDM syntax when determining boot metadata * Allow more than one ephemeral device in the DB * Port flavormanage extension to v3 API part 1 * Correct the status code to 201 for create v3 * Pop extra keys from context in from\_dict() * Don't initialize neutronv2 state at module import * Remove instance exists check from rebuild\_instance * Remove unused variables in test\_compute\_cells * Fix fake image\_service import in v3 test\_disk\_config * Updates tools/config/README * xenapi: Added iPXE ISO boot support * Log exception details setting vm\_state to error * Fix instance metadata access in xenapi * Fix prep\_resize() stale system\_metadata issue * Implement hard reboot for powervm driver * Use the common function is\_neutron in servers.py * Make xenapi capabilities['enabled'] use service enabled * Remove duplicate test from V3 version of test\_hosts * Remove unused nova.tests.image.fake code * Remove unused fake run\_instance() method * Remove use of fake\_rabbit in Nova * libvirt: fix {attach,detach}\_interface() * Added test case in test\_migrations for migration 208 * Add flag to make IsolatedHostsFilter less restrictive * Add unique constraint to AggregateMetadata * Fix a typo in test\_migrations for migration 209 * Remove duplicate variable \_host\_state * enhance description of share\_dhcp\_address option * Adds missing V3 API scheduler hints testcase * [v3] Show detail of an quota in API os-quota-sets * Remove legacy network model in tests and compute manager * Remove redundant \_create\_instance method from test\_compute * Add jsonschema to Nova requirements.txt * Remove docstrings in tests * Fix scheduler prep\_resize deprecated comments * Search filters for get\_all\_system\_metadata should use lists * fix volume swap exception cases * Set VM back to its original state if cold migration failed * Enforce flavor access during instance boot * Stub out entry points in LookupTorrentURLTestCase * Port volumes swap to the new API-v3 * correct the name style issue of ExtendedServerAttributes in v3 api * Fix IVS vif to correctly delete interfaces on unplug * Adding support for iSER transport protocol * libvirt: allow passing 'os\_type' property to glance * Fixes auto confirm invalid error * Fix ratelimiting * quantum pxeboot-port support for baremetal * baremetal: Log IPMI power on/off timeouts * VMware: Added check for datastore state before selection * Boot from image destination - volume * Virt driver flag for different BDM formats * Refactor how BDMs are handled when booting * Change RPC to use new BDM format for instance boot * Make API part of instance boot use new BDM format * Add Migration object * Fix untranslated log messages in libvirt driver * Fix migration 210 tests for PostgreSQL * Handle InstanceInvalidState of soft\_delete * Don't pass RPC connection to pre\_start\_hook * VMware: Ensure Neutron networking works with VMware drivers * Unimplemented suspend/resume should not change vm state * Fix project\_user\_quotas\_user\_id\_deleted\_idx index * Allow Cinder to specify file format for NFS/GlusterFS * Add migration with missing fkeys * Implement front end rate-limiting for Cinder volume * Update mailmap * Fixup some non-unity-ness to conductor tests * Add scheduler utils unit tests * Convert admin\_actions ext tests to unit tests * Unit-ify the compute API resize tests * Raises masked AssertionError in \_test\_network\_api * Have tox install via setup.py develop * Set launch\_index to right value * Add passing a logging level to processutils.execute * Clear out service disabled reason on enable for V3 API * Fix HTTP response for PortInUse during boot (v3 API) * Adds infra for v3 API sample creation * Remove deprecated CONF.fixed\_range * Offer a paginated version of flavor\_get\_all * Port integrated tests for V3 API * Refactor integrated tests to support V2 and V3 API testing Part 2 * Refactor integrated tests to support V2 and V3 API testing * Fix cells manager RPC version * Upgrade to Hacking 0.7 * Fix logic in add\_host\_to\_aggregate() * Enforce compute:update policy in API * Removed the duplicated \_host\_state = None in libvirt driver * Sync gettextutils from oslo-incubator * Fix typo in exception message * Fix message for server name with whitespace * Demote personalities from core of API v3 as extensions os-personality * Port disk\_config API to v3 Part 2 * remove \_action\_change\_password the attribute in V3 server API * Fix exception handling in V3 API coverage extension * Remove "N309 Python 3.x incompatible construct" * Allow swap\_volume to be called by Cinder * Remove trivial cases of unused variables * Handle NeutronClientException in secgroup create * Fix bad check for openstack versions (vendor\_data/config drive) * Make compute\_capabilities\_filter use information from DB not RPC * Make affinity\_filters use host\_ip from DB not RPC * db: Add host\_ip and supported\_instances to compute\_nodes * Add supported\_instances to get\_available\_resource to all virt drivers * libvirt: sync get\_available\_resources and get\_host\_stats * Clean up unimplemented methods in the powervm driver * Make InvalidInstanceIDMalformed an EC2 exception * Fix one port can be attached to more devices * Removed code duplication in test\_get\_server\_\*\_by\_id * Add option for QEMU Gluster libgfapi support * Moves compute.rpcapi.prep\_resize call to conductor * Fix get\_available\_resource docstrings * Fix spelling in image\_props\_filter * Fix FK violation in ConsoleTestCase * Fix ReservationTestCase on PostgreSQL * Fix instance\_group\_delete() DB API method * Fix capitalization, it's OpenStack * Add test cases to validate neutron ports * Add expected\_errors for extension quota\_classes v3 * Fix leaking of image BDMs * Moved tests for server.delete * Fix VMwareVCDriver to support multi-datastore * Fixes typo in \_\_doc\_\_ of /libvirt/blockinfo.py * User quota update should not exceed project quota * Port "Accept is\_public=None .." to v3 tree * Remove clear\_rabbit\_queues script * Don't need to init testr in run\_tests.sh * Imported Translations from Transifex * Deprecate conductor's compute\_reboot() interface * Deprecate conductor's compute\_stop() interface * Make compute\_api use InstanceAction object * Add basic InstanceAction object * Add delete() operation to InstanceInfoCache * Make compute\_api use Instance.destroy() * Add Instance.destroy() * Make compute\_api use Instance.create() * Change swap\_volume volume\_api calls to use ID * Fix H501: Do not use locals() for string formatting * fix libguestfs mount order when inspecting * Imported Translations from Transifex * powervm: add test case for get\_available\_resource * Fix to allow ipv6 in host\_ip for ESX/vSphere driver * Improve performance of driver's get\_available\_nodes * Cleanup exception handling on evacuate * Removed code for modular exponentiation, pow() already does this * Remove unsafe XML parsing * Fix typo with network manager service\_name * Remove old legacy network info model in libvirt driver * maint: remove redundant default=None for config options * Fix simultaneous timeout with smart iptables usage * xenapi: send identity headers from glance plugin * Catch ldap ImportError * xenapi: refactor - extract get\_virtual\_size * xenapi: refactor - extract get\_stream\_funct\_for * xenapi: test functions for \_stream\_disk * Check host exists before evacuate * Fix EC2 API Fault wrapper * Fix deferred delete use of objects * Remove unsafe XML parsing * Update BareMetal driver to current nova.network.model * Personality files can be injected during server rebuild * Need to allow quota values to be set to zero * Merged flavor\_disabled extension into V3 core api * Merged flavorsextraspecs extension into core API * Code dedup in test\_update\_\* * Move tests test\_update\_\* to separate class * VMware: fix rescue/unrescue instance * Add an exception when doesn't have permissions to operate vm on hyper-v * Remove dead capabilities code * Spelling correction in test\_glance.py * Enhance object inheritance * Enable no\_parent and file\_only security * Add Instance.create() * Pull out instance object handling for use by create also * Make fake\_instance handle security groups * Fix instance actions testing * Sync models with migrations * Wrong unique key name in 200 migration * Remove unused variable * Make NovaObject.get() avoid lazy-load when defaulting * Fix migration downgrade 146 with mysql * Remove the indexes on downgrade to work with MySQL * Downgrade MySQL to the same state it used to be * Format CIDR strings as per storage * Fix migration downgrade 147 with mysql * Fix typo in compute.rpcapi comments * Imported Translations from Transifex * Avoid extra glance v2 locations call! * xenapi: Adding BitTorrent download handler * xenapi: remove dup code in make\_step\_decorator * Retry failed instance file deletes * xenapi: retry when plugin killed by signal * Do not use context in db.sqla.api private methods * Finish DB session cleanup * Clean up session in db.sqla.api.migration\_\* methods * Clean up session in db.sqla.api.network\_\* and sec\_groups\_\* methods * Don't inject files while resizing instance * Convert CamelCase attribute naming to camel\_case for servers V3 API * Convert camelCase attribute naming to camel\_case * Add plug-in modules for direct downloads of glance locations * Allow user and admin lock of an instance * Put fault message in the correct field * Fix Instance objects with empty security groups * db: Remove deprecated assert\_unicode attribute * VlanManager creates superfluous quota reservations * xenapi: allow non rsa key injection * Add expected\_errors for extensions simple\_tenant\_usage v3 * Clean destroy for project quota * Add expected\_errors for extension Console v3 * Add expected\_errors for extension baremetal v3 * Clean up session in db.sqla.api.get\_ec2 methods * Clean up db.sqla.api.instance\_\* methods * remove improper usage of 'assert' * Support networks without gateway * Raise 404 when instance not found in admin\_actions API * Switch to Oslo-Incubator's EnvFilter rootwrap * xenapi: Moving Glance fetch code into image/glance:download\_vhd * Performs hard reboot if libvirt soft reboot raises libvirtError * xenapi: Rename imageupload image * Make nbd reservation thread-safe * Code dedup in class QuotaReserveSqlAlchemyTestCase * Fix multi availability zone issue part 1 * Fix instance\_usage\_audit\_log v3 follow REST principles * Update mailmap * Add obj\_attr\_is\_set() method to NovaObject * Add ObjectActionFailed exception and make Instance use it * Fix change detection logic in conductor * Convert pause/unpause to use objects * Make delete/soft\_delete use objects * Refactor compute API's delete to properly do local soft\_deletes * Add identity headers while calling glanceclient * xenapi: Reduce code duplication in vmops * vendor-data minor format / style cleanups * maint: remove unused exceptions * Add support for Neutron https endpoint * Add index to reservations.uuid column * Refactor EC2 API error handling code * Cleanup copy/paste in test\_quota\_sets * Make EvacuateTest DRYer * Add expected\_errors for extensions quota\_sets and hypervisors * Remove generic exception catching for admin\_actions API v3 * Demote admin-passwd from core of API v3 as extensions os-admin-password * handle auto assigned flag on allocate floating ip * Add expected\_errors for extension shelve v3 * Use cached nwinfo for secgroup rules * Sync config.generator from Oslo * Remove \* import from xenserver plugins * EC2-API: Fix ambiguous ipAddress/dnsName issue * xenapi: no image upload retry on certain errors * Add error checking around host service checking * add vendor\_data to the md service and config drive * Moves compute.rpcapi.prep\_resize call to scheduler.manager * Removed scheduler doc costs section * Fix formatting on scheduler documentation * Add expected\_errors for extension server\_diagnostics V3 * Fix extensions agent follow API v3 rules * XenAPI: Change the default SR to be the pool default * Fix flavor\_access extension follow API V3 rules * Add notification for live migration call * Correct status code and response for quota\_sets API v3 * Fixes for v3 API servers tests * Remove sleep from service group db and mc tests * [xenapi] Unshadow an important test case class * Fix and Gate on H303 (no wildcard imports) * Remove unreachable code * powervm: pass on unimplemented aggregate operations * Fix timing issue in SimpleTenantUsageSample test * Code dedup in virt.libvirt.test\_imagecache.test\_verify\_checksum\_\* * Move tests test\_verify\_checksum\_\* to separate class * Logging virtual size of the QCOW2 * Add expected\_errors for extension certificates v3 * Support setting block size for block devices * Set the image\_meta for the instance booted from a volume * return 429 on API rate limiting occur * Add task\_state filter for nova list * Port server\_usage API to v3 part 2 * Port server\_usage API to v3 part 1 * Adds factory methods to load Hyper-V utils classes * Fix 2 pep8 errors in tests * Enabled hacking check for Python3 compatible print (H233) * Fix race between aggregate list and delete * Enforce authenticated connections to libvirt * Enabled the hacking warning for Py3 compatible octal literals (H232) * Remove fping plugin from V3 API * Moves scheduler.rpcapi.prep\_resize call on compute.api to conductor * Fix some Instance object class usage errors * xenapi: remove pv detection * Add expected\_errors for extension keypair and availablity\_zone * Add expected\_errors for extension console\_output v3 * Fix extension hosts follow API v3 rules * Use project quota as default user quota * Adds NoAuthMiddleware for V3 * xenapi: remove propagate xenapi\_use\_agent key * Update references with new Mailing List location * MinDisk size based on the flavor's Disk size * Use RetrievePropertiesEx instead of RetrieveProperties * Speed up test BareMetalPduTestCase.test\_exec\_pdutool * Port ips-extended to API-v3 ips core API Part 2 * Disable per-user rate limiting by default * Support EC2 API wildcards for DescribeTags filters * Remove the monkey patching of \_ into the builtins * Sync lockutils from Oslo * Set lock\_path in tests * Port ips-extended to API-v3 ips core API Part 1 * Fix postgresql failures related to Data type to API-v3 fixed-ip * Bypass queries which cause a contradiction * Add basic BDM format validation in the API layer * Servers API for the new BDM format * Fixes Hyper-V issues on versions prior to 2012 * Add expected\_errors for extension instance\_actions v3 * Fix extension server\_meta follow API v3 rules * Ensure that uuid is returned with mocked instance * Code dedup in class InstanceTypeExtraSpecsTestCase * Add expected\_errors for extension cells V3 * Add expected\_errors for extension\_info V3 * Add latest oslo DB support * Add note why E712 is ignored * Clarify instance\_type vs flavor in nova-manage * Fix leaky network tests * Fix HTTP response for PortNotFound during boot * Don't pass empty image to filter on live migration * Start using hacking 0.6 * Set VM back to its original state if cold migration failed * xenapi: ensure vcpu\_weight configured correctly * Fix failing network manager unit tests * Add expected\_errors for extensions services and server\_password v3 * Update oslo.config.generator * Fix the is\_volume\_backed\_instance check * Add support for volume swap * Fix policy failure on image\_metadata calls * Sync models for AgentBuild, Aggregates, AggregateHost tables * Imported Translations from Transifex * Make ServerXMLSerializationTest DRYer * Port migrations extension to v3 API part 2 * Port migrations extension to v3 API part 1 * xenapi: Fix console rotate script * Sync some of Instance\* models with migrations * Fix extension rescue follow API v3 rules * Per-project-user-quotas for more granularity * Add unique constraint to InstanceTypeExtraSpecs * Remove instance\_metadata\_get\_all\* from db api * Merged flavorextradata extension (ephemeral disk size) into core API * Fixed tests for flavor swap extension after merging in core API * Remove hostname param from XenApi after first boot * Cell Scheduler support for hypervisor versions * Fix flavor v3 follow API v3 rules * Sync sample config file generator with Oslo * Allow exceptions to propagate through stevedore map * Create vmware section * Sync latest rpc changes from oslo-incubator * Check instance on dest once during block migration * Revert "Add requests requirement capped <1.2.1." * Unit-ify compute\_api delete tests * Convert network API to use InfoCache object * Make InfoCache.network\_info be the network model * Make shelve pass old-world instance object to conductor * Make admin API state resets use Instance.save() * Deduplicate data in TestAddressesXMLSerialization * Move \_validate\_int\_value controller func to utils * Correct the action name for admin\_actions API v3 * Fixing dnsdomain\_get call in nova.network.manager * Raise exception if both port and fixed-ip are in requested networks * Sync eventlet\_backdoor from oslo-incubator * Fix up trivial license mismatches * Implements host uptime API call for cell setup * Ensure dates are dates, not strings * Use timeutils.utcnow() throughout the code * Add indexes to sqlite * Fix iptables rules when metadata\_host=127.0.0.1 * Sync gettextutils from oslo * Handle instance objects in conductor compute\_stop * Config drive attached as cdrom * Change EC2 client tokens to use system\_metadata * Check that the configuration file sample is up to date * Make servers::update() use Instance.save() to do the work * Make Instance.save() handle cells DB updates * Convert suspend/resume to use objects * Make compute\_api.reboot() use objects * Fix HTTP response for PortInUse during boot * Fix DB access when refreshing the network cache * Use valid IP addresses values in tests * Add ability to factor in per-instance overheads * Send updated aggregate to compute on add/rm host * Fix inconsistency between Nova-Net and Neutron * Fix parse\_transport\_url when url has query string * xenapi: no glance upload retry on 401 error * Code dedup in test\_libvirt\_vif * Raise exceptions when Spice/VNC are unavailable * xenapi: Pass string arguments to popen * Add rpcapi tests for shelving calls * Create key manager interface * Remove duplicate cells\_rpcapi test * ec2-api: Disable describing of instances using deleted tags as filter * Disable ssl layer compression for glance requests * Missed message -> msg\_fmt conversion * Refresh network cache when reassigning a floating IP in Neutron * Remove unnecessary comments for instance rebuild tests * Add missing tests for console\_\* methods * Force reopening eventlet's hub after fork * Remove project\_id from alternate image link path * Fixes wrong action comment 'lock' to 'unlock' * Add expected\_errors for extension extended\_volumes v3 * port BaremetalNodes API into v3 part2 * port baremetal\_nodes API into v3 part1 * Add validation of available\_zone during instance create * Move resource usage sync functions to db backend * Remove locals() from various places * Add expected\_errors for extension evacuate v3 * Add expected\_errors for extension deferred\_delete v3 * Fix accessing to '/' of metadata server without any checks to work * Fix duplicate osapi\_hide\_server\_address\_states config option * API for shelving * Fix shelve's use of system\_metadata * Fix Instance object handling of implied fields * Make Instance object properly update \*metadata * Support Client Token for EC2 RunInstances * Change get\_all\_instance\_metadata to use \_get\_instances\_by\_filters * Add a new GroupAffinityFilter * Move a migration test to MigrationTestCase * Use db.flavor\_ instead of db.instance\_type\_ * Periodic task for offloading shelved instances * Shelve/unshelve an instance * Code dedup in class ImagesControllerTest * Assert backing\_file should exist before attempting to create it * Add API-v3 merged core API into core API list * Don't ignore 'capabilities' flavor extra\_spec * Support scoped keys in aggregate extra specs filter * Fix blocking issue when powervm calculate checksum * Avoid shadowing Exception 'message' attribute * Code dedup in class TestServerActionRequestXMLDeserializer * Fix mig 186 downgrade when using sqlalchemy >= 0.8 * Move test\_stringified\_ips to InstanceTestCase * Move \*\_ec2\_\* tests in test\_db\_api to own test case * Code dedup in class ImageXMLSerializationTest * Fix malformed format string * Fix EC2 DescribeTags filter * Code dedup in test\_libvirt\_volume * Port AttachInterfaces API to v3 Part 2 * Make ServersViewBuilderTest DRYer * Move test\_security\_group\_update to SecurityGroupTestCase * Code dedup in class ServersControllerCreateTest * Code dedup in tests for server.\_action\_rebuild * Moved tests for server.\_action\_rebuild * Move bw\_usage\_\* tests in test\_db\_api to own test case * Move dnsdomain\_\* tests in test\_db\_api to own test case * Remove redundant if statements in cells.state * Move special cells logic for start/stop * Port used limits extension to v3 API Part 2 * Avoid deleting user-provided Neutron ports if VM spawn fails * Fix nic order not correct after reboot * Porting os-aggregates extensions to API v3 Part 2 * Porting os-aggregates extensions to API v3 Part 1 * Porting server metadata core API to API v3 Part 2 * Porting server metadata core api to API v3 Part 1 * Port limits core API to API-v3 Part 2 * xenapi: Only coalesce VHDs if needed * Don't attach to multiple Quantum networks by default * Load cell data from a configuration file * Fix filtering aggregate metadata by key * remove python-glanceclient cap * Remove duplicated key\_pair\* tests from test\_db\_api * Porting limits core api to API v3 Part 1 * Add missing tests for db.api.instance\_\* methods * Fix IPAddress and CIDR type decorators * Complete deletion when compute manager start-up * Port user\_data API to v3 Part 2 * Add legacy flag to get\_instance\_bdms * XenAPI: Refactor Fake to create pools, SRs and VIFs automatically * Port flavor\_rxtx extension to v3 API Part 2 * Port flavor\_rxtx extension to v3 API Part 1 * Fix aggregate\_get\_by\_host host filtering * Fix v3 hypervisor extension servers action follow REST principles * xenapi:populating hypervisor version in host state * Port attach and detach of volume-attachment into os-extended-volume v3 * Port deferredDelete API to v3 Part 2 * Fix status code for coverage API v3 * Port instance\_actions API to v3 Part 2 * port instance\_actions API into v3 part1 * Prompt error message when creating aggregate without aggregate name * Port used limits extension to v3 API Part 1 * Makes \_PATH\_CELL\_SEP a public global variable * port disk\_config API into v3 part1 * Imported Translations from Transifex * Remove locals() from virt directory * Handle ImageNotAuthorized exception * Port AvailabilityZone API to v3 Part 2 * port AvailabilityZone API into v3 part1 * Port service API to v3 Part 2 * Imported Translations from Transifex * Unify duplicate code for powering on an instance * Port hide srvr addresses extension to v3 API Pt2 * Sync v2/v3 console\_output API extensions * Port extended status extension to v3 API Part 2 * Port os-console-output extension to API v3 Part 2 * Changes select\_destinations to return dicts instead of objects * Better start/stop handling for cells * Make notifications properly string-convert instance datetimes * Fix default argument values on get\_all\_by\_filters() * Make db/api strip timezones for datetimes * Fix object\_compat decorator for non-kwargs * Imported Translations from Transifex * Remove unused recreate-db options from run\_test.sh * update Quantum usage to Neutron * Convert cells to use a transport URL * Fix aggregate update * Passing volume ID as id to InvalidBDMVolume exception * Handle instance being deleted while in filter scheduler * Port extended-availability-zone API into v3 part2 * Fix extensions os-remote-consoles to follow API v3 rules * Add unique constraints to AggregateHost * Unimplemented pause should not change vm state on PowerVM * Port server password extension to v3 API Part 2 * xenapi: Add disk config value to xenstore * Port hide srvr addresses extension to v3 API Pt1 * Add -U to the command line for pip * xenapi: support ephemeral disks bigger than 2TB * Cells: Make bandwidth\_update\_interval configurable * Add \_set\_instance\_obj\_error\_state() to compute manager * Update v3 servers API with objects changes * xenapi: enable attach volumes to non-running VM * Change force\_dhcp\_release default to True * Revert "Sync latest rpc changes from oslo-incubator" * Sync 10 DB models and migrations * Make compute\_api.get() use objects natively * port Host API into v3 part2 * Port admin-actions API into v3 part2 * Fix cells manager rpc api version * Allow ::/0 for IPv6 security group rules * Fix issue with pip installing oslo.config-1.2.0 * Sort output for unit tests in test\_describe\_tags before compare * Document rate limiting is per process * Properly pin pbr and d2to1 in setup.py * Add support for live\_snapshot in compute * xenapi: Stub out \_add\_torrent\_url for Vhd tests * Add Instance.get\_by\_id() query method * Fix duplicate fping\_path config option * Port images metadata functionality to v3 API Part 2 * Add unique constraint to ConsolePool * Enable core API-v3 to be optional when unit testing * Clarify flavorid vs instance\_type\_id in db * Sync db.models.Security\* and db.models.Volume\* * Sync db.models.Instance\* with migrations * Add "ExtendedVolumes" API extension * Fix misc issues with os-multinic v3 API extension * Port multinic extension to v3 API Part 2 * Port security groups extension to v3 API Part 2 * Port security groups extension to v3 API Part 1 * Add missing help messages for nova-manage command * Validate volume\_size in block\_device\_mapping * Imported Translations from Transifex * Fix info\_cache and bw\_usage update race * xenapi: glance plugin should close connections * Change db.api.instance\_type\_ to db.api.flavor\_ * Replace get\_instance\_metadata call in api.ec2.cloud.\_format\_instances * Add unique constraint to AgentBuild * Ensure flake8 tests run on all api code * Sync notifier change from oslo-incubator * Sync harmless changes from oslo-incubator * Sync latest rpc changes from oslo-incubator * Add missing matchmaker\_ring * Port extended-server-attributes API into v3 part2 * List migrations through Admin API * Add a VIF driver for IOVisor engine * port Service API into v3 part1 * Port admin-actions API into v3 part1 * Port fping extension to v3 API Part 2 * Disassociate fixed IPs not known to dnsmasq * Imported Translations from Transifex * Allow filters to only run once per request if their data is static * Port extended-availability-zone API into v3 part1 * Update openstack.common.config * Export just the volume metadata for the database to be populated * port Deferred\_delete API into v3 part1 * Misc fixes for v3 evacuate API extension * Imported Translations from Transifex * Baremetal ensures node is off before powering on * Remove references to deprecated DnsMasqFilter * Port user\_data API to v3 Part 1 * Update instance.node on evacuate * Fix formatting errors in documentation * Use oslo.sphinx and remove local copy of doc theme * Remove doc references to distribute * Sync install\_venv\_common from oslo * Make EC2 API request objects instead of converting them * Make instance show and index use objects * Remove conductor usage from consoleauth service * xenapi: Stub out entry points for BitTorrent tests * Fix debug message for GroupAntiAffinityFilter * Add unique constraints to Service * Add unique constraint to FixedIp * Fixed columns list in indexes * Add cinder cleanup to migrations * Change unique constraint in VirtualInterface * Changes ComputeTaskManager class to inherit base.Base * Moves populate retry logic to the scheduler utils * Exceptions raised by quantum validate\_networks result in 500 error * Fix and gate on E125 * Add object (de)serialization support to cells * Add cells get\_cell\_type() method * Add fill\_faults() batch operation to InstanceList * Make api\_samples reboot test use a plausible scenario * Fix compute\_api object handling code in cells messaging * Fix power\_state lookup in confirm\_resize * Make flavors is\_public option actually work * Imported Translations from Transifex * hyperv: Fix vmops.get\_info raises InstanceNotFound KeyError * Make instance\_update() string-convert IP addresses * Refactor compute\_api reboot tests to be unit-y * Refactors select\_destinations to return HostState objects * PowerVM resize and migrate test cases * Clear out service disabled reason on enable * Port agent API to v3 Part 2 * Fix v3 hypervisor extension search action follow REST principles * Fix resize ordering for COW VHD * Add inst\_type parameter * Store volume metadata as key/value pairs * Fixes a typo on AggregateCoreFilter documentation * xenapi: Tidy up Popen calls to avoid command injection attacks * Remove notify\_on\_any\_change option * Add unique constraints to Quota * Port images metadata functionality to v3 API Part 1 * Port scheduler hints extension to v3 API Part 2 * Adding action based authorization for keypairs * Port multinic extension to v3 API Part 1 * Port hypervisor API into v3 part2 * port Instance\_usage\_audit\_log API into v3 part2 * port Instance\_usage\_audit\_log API into v3 part1 * Fix metadata for create in child cell * update xen/vmware virt drivers not to hit db directly * Reduce nesting in instance\_usage\_audit * Port os-console-output extension to API v3 Part 1 * Fix to integer cast of length in console output extension * Imported Translations from Transifex * Add notifiers to both attach and detach volumes * Make test\_deferred\_delete() be deterministic * Added functionality for nova hooks pass functions * Fix compatibility with older confirm\_resize() calls * Pass instance host-id to Quantum using port bindings extension * libvirt: Fix spurious backing file existence check * Add unique constraint for security groups * powervm: make get\_host\_uptime output consistent with other virt drivers * Remove locals() from virt/vmwareapi package * Add HACKING check for db session param * Select disk driver for libvirt+Xen according to the Xen version * Port coverage API into v3 part2 * Port coverage API into v3 part1 * Fix grizzly compat issue in conducor rpc api * Xenapi shutdown should return True if vm is shutdown * Break out Compute Manager unit tests * Break out compute API unit tests * port Host API into v3 part1 * Imported Translations from Transifex * Standardize use of nova.db * Check system\_metadata type in \_populate\_instance\_for\_create * Clean up and make HACKING.rst DRYer * Sync db.models with migrations * Refactor ServerStatusTest class * Move tests db.api.instance\_\* to own class * Add tests for \`db.console\_pool\_\*()\` functions * Fix binding of SQL query params in DB utils * Make db.fakes stub out API not sqlalchemy * Reassign MAC address for vm when resize\_revert * test\_xmlutil.py covers more code in xmlutil.py * Handle UnexpectedTaskState and InstanceNotFound exceptions * Port quota classes extension to v3 API Part 2 * Ports image\_size extension to v3 API * xenapi: Add configurable BitTorrent URL fetcher * remove locals() from virt/hyperv package * Add resume state on host boot function to vmware Hyper * Port server\_diagnostics extension to v3 API Part2 * Port images functionality to v3 API Part 2 * Port cells extension to v3 API Part 2 * Notification support for host aggregate related operation * Fix vol\_usage\_update() DB API tests * Port consoles extension API into v3 part2 * Port consoles extension API into v3 part1 * Imported Translations from Transifex * New select\_destinations scheduler call * Session cleanup for db.security\_group\_\* methods * fix invalid logging * Port scheduler hints extension to v3 API Part 1 * Port config\_drive API to v3 Part 2 * Port config drive API to v3 Part 1 * Port images functionality to v3 API Part 1 * Moves scheduler.manager.\_set\_vm\_state\_and\_notify to scheduler.utils * VNC console does not work with VCDriver * Sane rest API rate limit defaults * Ignore lifecycle events for non-existent instances * Fix resizes with attached file-based volumes * Remove trivial cases of unused variables (3) * Remove locals() from compute directory * Hypervisor uptime fails if service is disabled * Fix metadata access in prep for instance objects * Sync to\_primitive() IPAddress support from Oslo * Merged flavor\_swap extension into core API * Fix typo for instance\_get\_all\_by\_filters() function * Implement get\_host\_uptime for powervm driver * Port flavor\_disabled extension to v3 API Part 2 * Fix sqlalchemy utils * Port flavor\_disabled extension to v3 API Part 1 * Port flavor\_access extension to v3 API Part 2 * Port flavor\_access extension to v3 API Part 1 * Fixes for quota\_sets v3 extension * Port server password extension to v3 API Part 1 * Port Simple\_tenant\_usage API to v3 Part 2 * xenapi: Remove vestigial \`compile\_metrics\` code * Add update() method to NovaObject for dict compatibility * Add obj\_to\_primitive() to recursively primitiveize objects * Make sure periodic instance reclaims continues on error * Remove broken config\_drive image\_href support * Report the az based on the value in the instance table * Allow retrying network allocations separately * Imported Translations from Transifex * Better default for my\_ip if 8.8.8.8 is unreachable * Fix a couple typos in the nova.exception module * Make fake\_network tolerant of objects * Prepare fake instance stubs for objects * Make info\_cache handle when network\_info is None * Fix instance object's use of a db query method parameter * Make NovaObject support the 'in' operator * Add Instance.fault * Add basic InstanceFault model * xenapi: Make BitTorrent url more flexible * xenapi: Improve cross-device linking error message * db.compute\_node\_update: ignore values['update\_at'] * Make sure periodic cleanup of instances continues on error * Fix for failure of periodic instance cleanup * Update instance properties values in child cells to create instance * port Attach\_interface API into v3 part1 * Sync models.Console\* with migrations * Port quota API into v3 part2 * Stop creating folders in virt unit tests * Imported Translations from Transifex * Refresh volume connections when starting instances * Fix trivial mismatch of license header * Exeption message of 'live migration' is not appropriate * Sync rpc from oslo-incubator * Fix types in test\_ec2\_ids\_not\_found\_are\_printable * Port quota API into v3 part1 * Skip security group code when there is no network * Sync db.models and migrations * Update pyparsing to 1.5.7 * Make InstanceList filter non-column extra attributes * Add Instance.security\_groups * Add basic SecurityGroup model * Revert XenApi virt driver should throw exception * Imported Translations from Transifex * Avoid redefining host to none in get\_instance\_nw\_info(...) * Extract live-migration scheduler logic from the scheduler driver * Fix the filtered characters list from console-log * Add invalid number checking in flavor creation api * Port quota classes extension to v3 API Part 1 * Remove usage of locals() from powervm virt package * Fix xenstore-rm race condition * Refactor db.security\_group\_get() instance join behavior * Fix serialization of iterable types * Fix orphaned instance from get\_by\_uuid() and \_from\_db\_object() * refactor security group api not to raise http exceptions * Perform additional check before live snapshotting * Do not raise NEW exceptions * Baremetal\_deploy\_helper error message formatting * Fix sys\_meta access in prep for instance object * Cells: Pass object for start/stop * Clarify the compute API is\_volume\_backed\_instance method * Add AggregateCoreFilter * Port extended-server-attributes into v3 part1 * Add AggregateRamFilter * Fix KeyError exception when scheduling to child cell * Port missing bits from httplib2 to requests * Revert "fixes nova resize bug when force\_config\_drive is set." * Port extended status extension to v3 API Part 1 * Fix quota logging on exceptions * XenApi virt driver should throw exception on failure * Retry quota\_reserve on DBDeadlock * Handle NoMoreFixedIps in \_shutdown\_instance * Make sure instance\_type has extra\_specs * Remove locals() from nova/virt/libvirt package * Fix importing InstanceInfoCache during register\_all() * Make \_poll\_unconfirmed\_resizes() use objects * Revert "Add oslo-config-1.2.0a2 and pbr>=0.5.16 to requirements." * Preserve network order when using ConfigDrive * Revert "Initial scheduler support for instance\_groups" * fixes nova resize bug when force\_config\_drive is set * Add troubleshoot to baremetal PXE template * Sync db.models.Quota\* with migrations * Modify \_assertEqualListsOfObjects() function * Port hypervisor API into v3 part1 * Remove a layer of nesting in \_poll\_unconfirmed\_resizes() * Use InstanceList for \_heal\_instance\_info\_cache() * Remove straggling use of all-kwarg object methods * Allow scheduler manager NoValidHost exception to pass over RPC * Imported Translations from Transifex * Add oslo-config-1.2.0a2 and pbr>=0.5.16 to requirements * Remove usage of locals() for formatting from nova.scheduler.\* * Libvirt driver: normalize variable names (part1) * xenapi: script to rotate the guest logs * Clean up scheduler tests * Drop unused \_virtual\_power\_settings global * Remove junk file when ftp transfer failure * xenapi: revisit error handling around calls to agent * Remove the unused plugins framework * Added unit tests for vmware cluster driver * Adds expected\_errors decorator for API v3 * Sync oslo-incubator gettextutils * port Simple\_tenant\_usage API into v3 part1 * Remove db session hack from conductor's vol\_usage\_update() * Converts scheduler.utils.build\_request\_spec return to json primitive * Revert "Delegate authentication to quantumclient" * Retry the sfdisk command up to 3 times * No support for double nested 64 bit guest using VCDriver * Fill context on objects in lists * Setting static ip= for baremetal PXE boot * Add tests for libvirt's reboot functionality * Check the instance ID before creating it * Add missing tests for nova.db.api.instance\_system\_metadata\_\* * Add err\_msg param to baremetal\_deploy\_helper * Remove \_is\_precooked pre-cells Zones hacks * Raise max header size to accommodate large tokens * Make NovaObject support extra attributes in items() * Imported Translations from Transifex * Fix instance obj refresh() * Fix overzealous conductor test for vol\_usage\_update * Add missing tests for certificate\_\* methods * Log xml in libvirt \_create\_domain failures * Add unique constraints to Cell * Accept is\_public=None when listing all flavors * Add missing tests for cell\_\* methods * Add missing tests for nova.db.api.instance\_metadata\_\* * Don't deallocate network if destroy time out * Port server\_diagnostics extension to v3 API Part1 * Add old display name to update notification * Port fping extension to v3 API Part 1 * libvirt fix resize/migrates with swap or ephemeral * Allow reboot or rebuild from vm\_state=Error * Initial scheduler support for instance\_groups * Fix the ServerPasswordController class doc string * Imported Translations from Transifex * Cleanup certificate API extension * Enforce sqlite-specific flow in drop\_unique\_constraint * Remove unused cert db method * Fix bad vm\_state change in reboot\_instance() * Add rpc client side version control * xenapi: ensure agent check respects image flags * Drop \`bm\_pxe\_ips\` table from baremetal database * Adding fixed\_ip in create.end notification * Improved tests for instance\_actions\_\* * Refactored tests for instance\_actions\_\* * Add missing tests for provider\_fw\_rule\_\* methods * Session cleanup for db.security\_group\_rule\_\* methods * Add tests for nova.db.api.security\_group\_rule\_\* methods * Refactors qemu image info parsing logic * Port cells extension to v3 API Part 1 * Organize limits units and per-units constants * Fix flavor extra\_specs filter doesn't work for number * Replace utils.to\_bytes() with strutils.to\_bytes() * Updates nova.conf.sample * Remove bin lookup in conf sample generator * Refactor conf sample generator script * Remove unused arg from make\_class\_properties.getter method * Fix obj\_load() in NovaObject base class * Backup and restore object registry for tests * Fix the wrong reference by CONF * Port flavors core API to v3 tree * Remove usage of locals() from xenapi package * Remove trivial cases of unused variables (1) * Don't make nova-compute depend on iSCSI * Change resource links when url has no project id * Make sync\_power\_state routines use InstanceList * Enhance the validation of the quotas update * Add missing tests for compute\_node\_\* methods * Fix VMware Hyper can't honor hw\_vif\_model image property * Remove use of locals() in db migrations * Don't advertise mute cells capabilities upwards * Allow confirm\_resize if instance is in 'deleting' status * Port certificates API to v3 Part 2 * port agent API into v3 part1 * Port certificates API to v3 Part 1 * Naming instance directory by uuid in VMware Hyper * Revert "Fix local variable 'root\_uuid' ref before assign" * Use Python 3.x compatible octal literals * Fix and enable H403 tests * Remove usage of locals() from manager.py * Fix local variable 'root\_uuid' ref before assign * Improve the performance of migration 186 * Update to the latest stevedore * Quantum API \_get\_floating\_ip\_by\_address mismatch with Nova-Net * xenapi: remove auto\_disk\_config check during resize * xenapi: implement get\_console\_output for XCP/XenServer * Check libvirt version earlier * update\_dns() method optimization * Sync can\_send\_version() helper from oslo-incubator * Remove unused db api call * Quantumapi returns an empty network list * Add missing tests for nova.db.api.network\_\* * Cleanup overshadowing in test\_evacuate.py * Give a way to save why a service has been disabled * Cells: Add support for global cinder * Fix race conditions with xenstore * Imported Translations from Transifex * Remove explicit distribute depend * Fix assumed port has port\_security\_enabled * Rename functions in nova.compute.flavors from instance\_type * Remove redundant architecture property update in powervm snapshot * Use an inner join on aggregate\_hosts in aggregate\_get\_by\_host * xenapi: ensure instance metadata always injected into xenstore * Nova instance group DB support * Fix to disallow server name with all blank spaces * Replace functions in utils with oslo.fileutils * Refactors get\_instance\_security\_groups to only use instance\_uuid * Create an image BDM for every instance * DB migration to the new BDM data format * Fix dangling LUN issue under load with multipath * Imported Translations from Transifex * Add missing tests for s3\_image\_\* methods * Register libvirt driver with closed connection callback * Enhance group handling in extract\_opts * Removed code duplication in conductor.api * Refactored tests for instance\_fault\_\* * Added verbose error message in tests helper mixin * Adds v3 API extension discovery filtering * Adds support for the Indigo Virtual Switch (IVS) * Some libvirt driver lookups lacks proper exception handling * Put VM UUID to live migration error notification * Fix db.models.Instance description * Fix db.models.Certificate description * Fix db.models.ComputeNodeStats description * Fix db.models.ComputeNode description * Fix db.models.Service description * BDM class and transformation functions * Remove unused method in VMware driver * Cleanup nova exception message conversion * Update analyze\_opts to work with new nova.conf sample format * Remove unused methods from VirtAPI * Make xenapi use Instance object for host\_maintenance\_mode() * Make xenapi/host use instance objects for \_uuid\_find * Use InstanceList object for init\_host * Add Instance.info\_cache * Use Instance Objects for Start/Stop * Add lists of instance objects * Add base mixin class for object lists * Add deleted flag to NovaObject base * Export volume metadata to new instances * Sending volume IO usage broken * Rename unique constraints due to new convention * Replace openstack-common with oslo in HACKING.rst * Fixes test\_config\_drive unittest * Port evacuate API to v3 Part 2 * Port evacuate API to v3 Part 1 * Speeding up scheduler tests * Port rescue API to v3 Part 2 * Port rescue API to v3 Part 1 * Handle security group quota exceeded gracefully * Adds check that the core V3 API is loaded * Call virt.driver.destroy before deallocating network * More KeypairAPI cleanups * Improve Keypair error messages in osapi * Fix Keypair exception messages * Moving more tests to appropriate locations * Skip ipv6 tests on system without ipv6 support * Keypair API test cleanup * Alphabetize v3 API extension entry point list * Add missing exception to cell\_update() * Refactors scheduler.chance.select\_hosts to raise NoValidHost * Enhance unit test code coverage for availability zone * Converts 'image' to json primitive on compute.rpcapi.prep\_resize * Import osapi\_v3/enabled option in nova/test * Regenerate missing resized backing files * Moving \`test\_misc\` tests to better locations * Allocate networks in the background * Make the datetime utility function coerce to UTC * API to get the Cell Capacity * Update rpc/impl\_qpid.py from oslo * More detailed log in failing aggregate extra filter * xenapi: Added logging for sparse copy * Make object actions pass positional arguments * Don't snat all traffic when force\_snat\_range set * Add x-compute-request-id header when no response body * Call scheduler for run\_instance from conductor * correctly set iface-id in vmware driver * Fix a race where a soft deleted instance might be removed by mistake * Fix quota checks while resizing up by admin * Refactor libvirt driver exception handling * Avoiding multiple code loops in filter scheduler * Don't log warn if v3 API is disabled * Link to explanation of --checksum-full rule * Imported Translations from Transifex * Stop libvirt errors from outputting to strerr * Delete unused bin directory * Make instance object tolerate isotime strings * Add fake\_instance.py * Fix postgresql failures related to Data type * hardcode pbr and d2to1 versions * Silence exceptions from qpid connection.close() (from oslo) * Add Davanum to the mailmap * Fix VMwareVCdriver reporting incorrect stats * Adds ability to black/whitelist v3 API extensions * Clean up vmwareapi.network\_util.get\_network\_with\_the\_name * Imported Translations from Transifex * Normalize path for finding api\_samples dir * Add yolanda to the mailmap * Add notes about how doc generation works * python3: Add py33 to tox.ini * Improve Python 3.x compatibility * Ports consoles API to v3 API * Fix nova-compute fails to start if quantum is down * Handle instance directories correctly for migrates * Remove unused launch\_time from instance * Launch\_at and terminated\_at on server(s) response * Fixed two minor docs niggles * Adds v3 API disable config option * Fix bug where consoleauth depended on remote conductor service * Only update cell capabilites once * Ports ips api to v3 API * Make pylint ignore nova/objects/ * Set resized instance back to original vm\_state * Add power\_on flag to virt driver finish/revert migration methods * Cosmetic fix to parameter name in DB API * compute.api call conductor ComputeTaskManager for live-migrate * Removed session from reservation\_create() * Raise exception instances not exception classes * \_s3\_create handles image being deleted * Imported Translations from Transifex * Add instance object * Add base object model * Enhance multipath parsing * Don't delete sys\_meta on instance delete * Fix volume IO usage notifications been sent too often * Add missing os.path.abspath around csrfile * Fix colorizier thowing exception when a test fails * Add db test that checks that shadow tables are up-to-date * Sync shadow table for 159 migration * Sync shadow table for 157 migration * Sync shadow table for 156 migration * Add missing tests for nova.db.api.quota\_\* methods * Add tests for some db.security\_group\_\* methods * Fix \_drop\_unique\_constraint\_in\_sqlite() function * Clean up failed image transfers in instance spawn * Make testr preserve existing OS\_\* env vars values * Fix msg version type sent to cells RPC API * Verify that CONF.compute\_driver is defined * Fix EC2 RegisterImage ImageLocation starts with / * Support Cinder mount options for NFS/GlusterFS * Raise exception instances, not exception classes * Add update method of security group name and description * Cell weighing class to handle mute child cells * Add posargs support to flake8 call * Enumerate Flake8 E12x ignores * Fix and enable flake8 F823 * Fix and enable flake8 F812 * libvirt: improve the specification of network disks * Imported Translations from Transifex * In utils.tempdir, pass CONF.tempdir as an argument * Delegate authentication to quantumclient * Pull binary name from sys.argv[0] * Rename policy auth for V3 os-fixed-ips * Fix internationalization for some LOG messages * Enumerate Flake8 Fxxx ignores * Enable flake8 E721 * Removing misleading error message * No relevant message when stop a stopped VM * Cells: Add filtering and weight support * API Extensions framework for v3 API Part 2 * fix a misleading docstring * xenapi: make the xenapi agent optional per image * Fix config drive code logical error * Add missing conversion specifier to ServiceGroupUnavailable * Deprecate compute\_api\_class option in the config * Add node as instance attribute for notification * removes project\_id/tenant\_id from v3 api urls * Set up 'compute\_task' conductor namespace * Removed superflous eval usage * Fix log message * Sync shadow table for 179 migration * Remove copy paste from 179 migration * Sync shadow table for 175 and 176 migration * Change db \`deleted\` column type utils * Fix tests for sqlalchemy utils * Add missing tests for nova.db.api.quota\_class\_\* * Moved sample network creation out of unittest base class constructor * Add missing tests for db.api.reservation\_\* * add xml api sample tests to os-tenant-network * Remove locals() usage from nova.virt.libvirt.utils * IPMI driver sets bootdev option persistently * update mailmap * Imported Translations from Transifex * Remove tempest hack for create/rebuild checks * Better error message on malformed request url * virt: Move generic virt tests to nova/tests/virt/ * vmwareapi: Move tests under tests/virt/vmwareapi/ * hyperv: Move tests under nova/tests/virt/hyperv * Fix UnboundLocalError in powervm lvm cleanup code * Delete a quota through admin api * Remove locals() usage from nova.virt.libvirt.volume * Importing correlation\_id middleware from oslo-incubator * Make a few places tolerant of sys\_meta being a dict * Remove locals() from scheduler filters * Rename requires files to standard names * Imported Translations from Transifex * translates empty remote\_ip\_prefix to valid cidr for nova * Reset task\_state when resetting vm\_state to ACTIVE * xenapi: Moving tests under tests/virt/xenapi/ * xenapi: Disable VDI size check when root\_gb=0 * Remove ImageTooLarge exception * Move ImageTooLarge check to Compute API * Share checks between create and rebuild * Remove path\_exists from NFS/GlusterFS drivers * Removed session from fixed\_ip\_\*() functions * Catch InstanceNotFound in instance\_actions GET * Using unicode() to handle image's properties * Adds live migration support to cells API * Raise AgentBuildNotFound on updating/destroying deleted object * Add missing tests for nova.db.api.agent\_build\_\* methods * Don't update API cell on get\_nwinfo * Optimize SecurityGroupsOutputController by len(servers) * get\_instance\_security\_groups() fails if no name on security group * libvirt: Moving tests under tests/virt/libvirt * Make it easier to add namespaced rpc APIs * baremetal: Move tests under tests/virt/baremetal * Disallow resize if image not available * powervm: Move tests under tests/virt/powervm * Sync RPC serializer changes from Oslo * Fix missing argument to logging warning call * set ERROR state when scheduler hits max attempts * Sync latest RPC changes from oslo * Add notification for live migration * Add requests requirement capped <1.2.1 * Adding tests for rebuild image checks * Add ImageNotActive check for instance rebuild * Fix error in instance\_get\_all\_by\_filters() use of soft\_deleted filter * Fix resize when instance has no image * Fixes encoding issues for nova api req body * Update run\_tests.sh to run flake8 too * Added validation for networks parameter value * Added attribute 'ip' to server search options * Make nova-api use servicegroup.API.service\_is\_up() * Add memorycache import into the oslo config * Fix require\_context() decorators * Imported Translations from Transifex * Remove locals() from nova/cells/\* * Update mailmap * Strip exec\_dirs prefix from rootwrap filters * Clean up test\_api\_samples a bit * Remove unnecessary parens in test\_volumes * Use strict=True instead of \`is\_valid\_boolstr\` * Editable default quota support * Remove usage of locals() for formatting from nova.api.\* * Switch to flake8+hacking * Fix flake8 errors in anticipation of flake8 * Don't update DB records for unchanged stats * baremetal: drop 'prov\_mac\_address' column * The vm\_state should not be modified until the task is complete * Return Customer's Quota Usage through Admin API * Use prettyxml output * Remove locals() from messages in virt/disk/api.py * 'm1.tiny' now has root\_gb=1 * Cast \`size\` to int before comparison * Don't raise unnecessary stack traces in EC2 API * Mox should cleanup before stubs * Reverse compare arguments in filters tests * Don't inject settings for dynamic network * Add ca cert file support to cinder client requests * libvirt: Catch VIR\_ERR\_NO\_DOMAIN in list\_instances * Revert "Include list of attached volumes with instance info" * Sync rpc from oslo * Remove openstack.common.version * Fix for missing multipath device name * Add missing tests for db.fixed\_ip\_\*(). functions * xenapi: ensure vdi is not too big when resizing down * Cells: Don't allow active -> build * Fix whitespace issue in indent * Pass the proper admin context to update\_dhcp * Fix quantum security group driver to accept none for from/to\_port * Reverse path SNAT for DNAT floating-ip * Use Oslo's \`bool\_from\_string\` * Handle IPMI transient failures better * Improve unit tests for DB archiving * Remove "#!/usr/bin/env python" from .py files under nova/cmd * Add missing unique constraint to KeyPair model * Refactored tests for db.key\_pair\_\*() functions * Refactor nova.volume.cinder.API to reduce roundtrips with Cinder * Fix response from snapshot create stub * Hide lock\_prefix argument using synchronized\_with\_prefix() * Cleanups for create-flavor * Cleanup create flavor tests * Imported Translations from Transifex * Test for remote directory creation before shutting down instance * Fix run\_tests.sh usage of tools/colorizer.py * Move get\_table() from test\_migrations to sqlalchemy.utils * Convert Nova to use Oslo service infrastructure * Show the cause of virt driver error * Detach volume fails when using multipath iscsi * API extensions framework for v3 API * Sync service and threadgroup modules from oslo * Fix header issue for baremetal\_deploy\_helper.py * Extract getting instance's AZ into a helper module * Allow different paths for deploy-helper helpers * Show exception details for failed deploys * Imported Translations from Transifex * Check QCOW2 image size during root disk creation * Adds useful debug logging to filter\_scheduler * fix non reporting of failures with floating IP assignment * Improve message and logging for corrupt VHD footers * Cleanup for test\_create\_server\_with\_deleted\_image * Check cached SSH connection in PowerVM driver * Allow a floating IP to be associated to a specific fixed IP * Record smoketest dependency on gFlags * Make resize/migrated shared storage aware * Imported Translations from Transifex * Add pointer to compute driver matrix wiki page * xenapi: cleanup vdi when disk too big exception raised * Update rootwrap with code from oslo * Fixes typo in server-evacuate-req.xml * Fix variable referenced before assginment in vmwareapi code * Remove invalid block\_device\_mapping volume\_size of '' * Architecture property updated in snapshot libvirt * Add sqlalchemy migration utils.create\_shadow\_table method * Add sqlalchemy migration utils.check\_shadow\_table method * Change type of cells.deleted from boolean to integer * Pass None to image if booted from volume in live migration * Raise InstanceInvalidState for double hard reboot * Removes duplicate assertEqual * Remove insecure default for signing\_dir option * Removes unnecessary check for admin context in evacuate * Fix zookeeper import and tests * Make sure that hypervisor nodename is set correctly in FakeDriver * Optimize db.instance\_floating\_address\_get\_all method * Session cleanup for db.floating\_ip\_\* methods * Optimize instance queries in compute manager * Remove duplicate gettext.install() calls * Include list of attached volumes with instance info * Catch volume create exception * Fixes KeyError bug with network api associate * Add unitests for VMware vif, and fix code logical error * Fix format error in claims * Fixes mock calls in Hyper-V test method * Adds instance root disk size checks during resize * Rename nova.compute.instance\_types to flavors * Convert to using newly imported processutils * Import new additions to oslo's processutils * Imported Translations from Transifex * Enable live block migration when using iSCSI volumes * Nova evacuate failed when VM is in SHUTOFF status * Transition from openstack.common.setup to pbr * Remove random print statements * Remove security\_group\_handler * Add cpuset attr to vcpu conf in libvirt xml * Imported Translations from Transifex * Remove referances to LegacyFormatter in example logging.conf * libvirt: ignore NOSTATE in resume\_state\_on\_host\_boot() method * Sync oslo-incubator print statement changes * Fix stub\_instance() to include missing attributes * Add an index to compute\_node\_stats * Convert to using oslo's execute() method * Import latest log module from oslo * Being more defensive around the use\_ipv6 config option * Update hypervisor\_hostname after live migration * Make nova-network support requested nic ordering * nova coverage creates lots of empty folders * fix broken WSDL logic * Remove race condition (in FloatingIps) * Add missing tests for db.floating\_ip\_\* methods * Deprecate show\_host\_resources() in scheduler manager * Add force\_nodes to filter properties * Adds --addn-hosts to the dnsmasq arg list * Update our import of oslo's processutils * Update oslo-incubator import * Delete InstanceSystemMetadata on instance deletion * vmwareapi: Add supported\_instances to host state * xenapi: Always set other\_config for VDIs * Copy the RHEL6 eventlet workaround from Oslo * Move db.fixed\_ip\_\* tests from DbApiTestCase to FixedIpTestCase * Checks if volume can be attached * Call format\_message on InstanceTypeNotFound exception * xenapi: Don't swallow missing SR exception * Prevent rescuing a VM with a partially mounted volume * Fix key error when create lpar instance failed * Reset migrating task state for MigrationError exceptions * Volume IO usage gets reset to 0 after a reboot / crash * Sync small and safe changes from oslo * Sync jsonutils from oslo * Fix EC2 instance bdm response * Rename \_check\_image\_size to \_get\_and\_check\_image\_metadata * Convert the cache key from unicode to a string * Catch glance image create exceptions * Update to using oslo periodic tasks implementation * Import oslo periodic tasks support * import and install gettext in vm\_vdi\_cleaner.py * Fix baremetal get\_available\_nodes * Fix attach when running as root without sysfsutils * Make \_build\_network\_info\_model testable * Fix building quantumapi network model with network list * Add the availability\_zone to the volume.usage notifications * Add delete\_net\_interface function * Performance optimization for contrib.flavorextraspecs * Small whitespace tweak * Kill off usage of locals() in the filter\_scheduler * Remove local variable only used in logging * Create instance with deleting image * Refactor work with db.instance\_type\_\* methods * Fix flakey TestS3ImageService bug * Add missing snapshot image properties for VMware Hyper * Imported Translations from Transifex * Fix VMware Hyper console url parameter error * Update NovaBase model per changes on oslo.db.sqlalchemy * Send a instance create error notification * Refactor \_run\_instance() to unify control flow * set bdm['volume\_id'] to None rather than delete it * Destroy conntrack table on source host during migration * Adds tests for isolated\_hosts\_filter * Fixes race condition of deleting floating ip * Imported Translations from Transifex * Wrong proxy port in nova.conf for Spice proxy * Fix missing kernel output via VNC/Spice on boot * Fix bug in db.instance\_type\_destroy * Move get\_backdoor\_port to base rpc API * Move db.instance\_type\_extra\_specs\_\* to db.instance\_type\_\* methods * Add missing test for db.instance\_type\_destroy method * Fix powervm driver resize instance error * Support FlatDHCP network for VMware Hyper * Imported Translations from Transifex * Deprecate conductor ping method * Add an rpc API common to all services * If rescue fails don't error the instance * Make os.services.update work with cells * Fix fixed\_ip\_count\_by\_project in DB API * Add unit tests for /db/api.py#fixed\_ip\_\* * Add option to exclude joins from instance\_get\_by\_uuid * Remove unnecessary method argument * Improve Python 3.x compatibility * ec2 CreateVolumes/DescribeVolumes status mapping * Can now reboot rescued instances in xenapi * Allows xenapi 'lookup' to look for rescue mode VMs * Adds tests to xenapi.vm\_utils's 'lookup' method * Imported Translations from Transifex * Stop vm\_state reset on reboot of rescued vm * Fix hyperv copy file error logged incorrect * Fix ec2 CreateVolumes/DescribeVolumes status * Imported Translations from Transifex * Don't swallow PolicyNotAuthorized for resize/reboot actions * Remove unused exception and variable from scheduler * Remove unnecessary full resource audits at the end of resizes * Update the log module from oslo-incubator * Translate NoMoreFloatingIps exception * Imported Translations from Transifex * Fix up regression tester * Delete extra space to api/volumes message * Map internal S3 image state to EC2 API values * removing unused variable from a test * Translate cinder NotFound exception * hypervisor tests more accurate db * Added comments to quantum api client * Cleanup and test volume usage on volume detach * Import and convert to oslo loopingcall * Remove orphaned db method instance\_test\_and\_set * baremetal: VirtualPowerDriver uses mac addresses in bm\_interfaces * Sync rpc from oslo-incubator * Correct disk's over committed size computing error * Imported Translations from Transifex * Allow listing fixed\_ips for a given compute host * Imported Translations from Transifex * baremetal: Change input for sfdisk * Make sure confirm\_resize finishes before setting vm\_state to ACTIVE * Completes the power\_state mapping from compute driver and manager * Make compute/manager use conductor for unrescue() * Add an extension to show the mac address of a ip in server(s) * Cleans up orphan compute\_nodes not cleaned up by compute manager * Allow for the power state interval to be configured * Imported Translations from Transifex * Fix bug in os-availability-zone extension * Remove unnecessary db call in scheduler driver live-migration code * baremetal: Change node api related to prov\_mac\_address * Don't join metadata twice in instance\_get\_all() * Imported Translations from Transifex * Don't hide stacktraces for unexpected errors in rescue * Fix issues with check\_instance\_shared\_storage * Remove "undefined name" pyflake errors * Optimize some of compute/manager's periodic tasks' DB queries * Optimize some of the periodic task database queries in n-cpu * Change DB API instance functions for selective metadata fetching * Replace metadata joins with another query * xenapi: Make \_connect\_volume exc handler eventlet safe * Fix typo: libvir => libvirt * Remove multi scheduler * Remove unnecessary LOG initialisation * Remove unnecessary parens * Simplify random host choice * Add NOVA\_LOCALEDIR env variable * Imported Translations from Transifex * Clarify volume related exception message * Cleanup trailing whitespace in api samples * Add tenant/ user id to volume usage notifications * Security groups may be unavailable * Encode consoleauth token in utf-8 to make it a str * Catch NoValidHost exception during live-migration * Evacuated instance disk not deleted * Fix a bad tearDown method in test\_quantumv2.py * Import eventlet in \_\_init\_\_.py * Raise correct exception for duplicate networks * Add an extension to show the network id of a virtual interface * Fix error message in pre\_live\_migration * Add reset function to nova coverage * Imported Translations from Transifex * nova-consoleauth start failed by consoleauth\_manager option missing * set timeout for paramiko ssh connection * Define LOG globally in baremetal\_deploy\_helper * Allow describe\_instances to use tags for searches * Correct network uuid field for os-network extension * Only call getLogger after configuring logging * Add SecurityGroups API sample tests * Cannot boot vm if quantum plugin does not support L3 api * Add missing tests for instance\_type\_extra\_specs\_\* methods * Remove race condition (in InstanceTypeProjects) * Deprecate old vif drivers * Optimize resource tracker queries for instances * baremetal: Integrate provisioning and non-provisioning interfaces * Move console scripts to entrypoints * Remove deprecated Grizzly code * Fallback to conductor if types are not stashed * Imported Translations from Transifex * Resolve conflicting mac address in resize * Simplify and correct the bm partition sizes * Fix legacy\_net\_info guard * Fix SecurityGroups XML sample tests * Modify \_verify\_response to validate response codes * Fix a typo in attach\_interface error path * After migrate, catch and remove deleted instances * Grab instance for migration before updating usage * Explain why the give methods are whitelisted * libvirt: Get driver type from base image type * Guard against content being None * Limit the checks for block device becoming available * Fix \_error\_out\_instance exception handler * Raise rather than generating millions of IPs * Add unit tests for nova.volume.cinder.API * Update latest oslo.setup * baremetal: Drop unused columns in bm\_nodes * Remove print statements * Imported Translations from Transifex * Fix the python version comparison * Remove gettext.install() from nova/\_\_init\_\_.py * Sync latest gettextutils from oslo-incubator * Return 409 on creating/importing same name keypair * Delete tests.baremetal.util.new\_bm\_deployment() * Return proper error message when network conflicts * Better iptables DROP removal * Query quantum once for instance's security groups * quantum security group driver nova list shows same group * Sync in matchmaker and qpid Conf changes from oslo * improve handling of an empty dnsmasq --domain * Fix automatic confirmation of resizes for no-db-compute * 'injected\_files' should be base 64 encoded * Add missing unit tests for FlavorActionController * Set default fixed\_ip quota to unlimited * Accepts aws-sdk-java timestamp format * Imported Translations from Transifex * get context from req rather than getting a new admin context * Use Cluster reference to reduce SDK calls * Fix missing punctuation in docstring * xenapi: fix support for iso boot * Ensure only pickle-able objects live in metadata * sync oslo db/sqlalchemy module * Convert host value from unicode to a string * always quote dhcp-domain, otherwise dnsmasq can fail to start * Fix typo in the XML serialization os-services API * Add CRUD methods for tags to the EC2 API * Fix migrating instance to the same host * Rework time handling in periodic tasks * Show quota 'in\_use' and 'reserved' info * Imported Translations from Transifex * Fix quantum nic allocation when only portid is specified * Make tenant\_usage fall back to instance\_type\_id * Use format\_message on exceptions instead of str() * Add a format\_message method to the Exceptions * List AZs fails if there are disabled services * Switch nova-baremetal-deploy-helper to use sfdisk * Bring back colorizer again with error results * Imported Translations from Transifex * Adds Tilera back-end for baremetal * Always store old instance\_type during a migration * Make more readable error msg on quantum client authentication failure * Adding netmask to dnsmasq argument --dhcp-range * Add missing tests for db.instance\_type\_access\_\* methods * Remove race condition (in InstanceTypes) * Add missing tests for db.instance\_type\_\* methods * Imported Translations from Transifex * set up FakeLogger for root logger * Fix /servers/os-security-groups using quantum * NoneType exception thrown if driver live-migration check returns None * Add missing info to docstring * Include Co-authored-by entries in AUTHORS * Do not test foreign keys with SQLite version < 3.7 * Avoid using whitespace in test\_safe\_parse\_xml * xenapi: Retrieve VM uuid from xenstore * Reformat openstack-common.conf * Imported Translations from Transifex * Fixes Nova API /os-hosts missing element "zone" * disable colorizer as it swallows fails * Make iptables drop action configurable * Fixes argument order of quantumv2.api.get\_instance\_nw\_info * Make \_downsize\_quota\_delta() use stashed instance types * py2.6 doesn't support TextTestRunner resultclass * Reset ec2 image cache between S3 tests * Sync everything from oslo-incubator * Sync rpc from oslo-incubator * Don't log traceback on rpc timeout * Adds return-type in two functions' docstrings * Remove unnecessary checks in api.py * translate cinder BadRequest exception * Initialize compute manager before loading driver * Add a comment to placeholder migrations * xenapi: fix console for rescued instance * Fixes passing arbitrary conductor\_api argument * Make nova.virt.fake.FakeDriver useable in integration testing * Remove unnecessary DB call to find EC2 AZs * Remove outdated try except block in ec2 code * nova-manage vm list fails looking 'instance\_type' * Update instance network info cache to include vif\_type * Bring back sexy colorized test results * Don't actually connect to libvirtd in unit tests * Add placeholder migrations to allow backports * Change arguments to volume\_detach() * Change type of ssh\_port option from Str to Int * xenapi: rpmbuild fixes * Set version to 2013.2 2013.1.rc1 ---------- * Fix Hyper V instance conflicts * Add caching for ec2 mapping ids * Imported Translations from Transifex * fix add-fixed-ip with quantum * Update the network info when using quantum * List InstanceNotFound as a client exception * Refactor db.service\_destroy and db.service\_update methods * Fix console support with cells * Fix missing argument to QemuImageInfo * Add missing tests for db.virtual\_interface\_\* methods * Fix multiple fixed-ips with quantum * Add missing tests for db.service\_\* methods * Ensure that headers are returned as strings, not integers * Enable tox use of site-packages for libvirt * Require netaddr>=0.7.6 to avoid UnboundLocalError * Pass project id in quantum driver secgroup list * Fixes PowerVM spawn failed as missing attr supported\_instances * Fix RequestContext crashes w/ no service catalog * Prevent volume-attach/detach from instances in rescue state * Fix XenAPI performance issue * xenapi: Adding logging for migration plugin * libvirt: Tolerate existing vm(s) with cdrom(s) * Remove dead code * Remove unused virt.disk.api methods bind/unbind * Imported Translations from Transifex * Revert "Remove the usage of instance['extra\_specs' * Add standard methods to the Limits API * Store project\_id for instance actions * rstrip() strips characters, not strings * Fix use of libvirt\_disk\_prefix * Revert 1154253 causes XenServer image compat issue * Reset migrating task state for more Exceptions * Fix db archiving bug with foreign key constraints * Imported Translations from Transifex * Update migration 153 for efficiency * Don't include traceback when wrapping exceptions * Fix exception message in Networks API extension * Make conductor's quota methods pass project\_id properly * Fix: improve API error responses from os-hosts extension * Add missing API doc for networks-post-req * Make os-services API extensions consistent * Fix system\_metadata "None" and created\_at values * Add the serial to connection info for boot volumes * Do not accept invalid keys in quota-update * Add quotas for fixed ips * Makes safe xml data calls raise 400 http error instead of 500 * Fixes an iSCSI connector issue in the Hyper-V driver * Check keypair destroy result operation * Resize/Migrate refactoring fixes and test cases * Fixes Hyper-V live migration with attached volumes * Force nova to use keystone v2.0 for auth\_token * Fix issues with cells and resize * Fix copyright - from LLC to Foundation * Don't log traceback on expected console error * Generalize console error handling during build * Remove sqlalchemy calling back to DB API * Make ssh key injection work with xenapi agent * Fix use of potentially-stale instance\_type in tenant\_usage * Drop gzip flag from tar command for OVF archives * Fix reconnecting to libvirt * List ComputeHostNotFound as a client exception * Fix: Nova aggregate API throws an uncaught exception on invalid host * Do cleaning up resource before rescheduling * nova-manage: remove unused import * Read instance resource quota info from "quota" namespace * LibvirtGenericVIFDriver update for stp * Switch to final 1.1.0 oslo.config release * Skip deleted fixed ip address for os-fixed-ips extension * Return error details to users in "dns-create-private-domain" * Lazy load CONF.quota\_driver * Fix cells instance deletion * Don't load system\_metadata when it isn't joined * List ConsoleTypeInvalid as a client exception * Make run\_instance() bail quietly if instance has been deleted * Delete instance metadata when delete VM * Virtual Power Driver list running vms quoting error * Refactor work with session in db.block\_device\_mapping\_\* methods * Add missing tests for db.block\_device\_mapping\_\* methods * websockify 0.4 is busted * Sync rpc from oslo-incubator * Fix: nova-manage throws uncaught exception on invalid host/service * Fix more OS-DCF:diskConfig XML handling * Fix: Managers that incorrectly derive from SchedulerDependentManager * Fix nova-manage --version * Pin SQLAlchemy to 0.7.x * Deprecate CONF.fixed\_range, do dynamic setup * Remove the usage of instance['extra\_specs'] * Fix behaviour of split\_cell\_and\_item * Fix quota issues with instance deletes * Fixes instance task\_state being left as migrating * Force resource updates to update updated\_at * Prepare services index method for use with cells * Handle vcpu counting failures gracefully * Return XML message with objectserver 404 * xenapi: Fix reboot with hung volumes * Rename LLC to Foundation * Pass migration\_ref when when auto-confirming * Revert changing to FQDN for hostnames * Add numerous fixes to test\_api\_samples * Fixes instance action exception in "evacuate" API * Remove instance['instance\_type'] relationship from db api * Refactor db tests to ensure that notdb driver is used * Rewrap two lines * Server create will only process "networks" if os-networks is loaded * Fixes nbd device can't be released error * Correct exception args in vfs/guestfs * Imported Translations from Transifex * Prevent nova services' coverage data from combining into nova-api's * Check if flavor id is an empty string * Simple syntax fix up * Fixes volume attach on Hyper-V with IPv6 * Add ability to control max utilization of a cell * Extended server attributes can show wrong hypervisor\_hostname * Imported Translations from Transifex * Remove uses of instance['instance\_type'] from nova/notifications * Libvirt driver create images even without meta * Prevent rescue for volume-backed instances * Fix OS-DCF:diskconfig XML handling * Imported Translations from Transifex * Compile BigInteger to INTEGER for sqlite * Add conductor to nova-all * Make bm model's deleted column match database * Update to Quantum Client 2.2.0 * Remove uses of instance['instance\_type'] from nova/scheduler * Remove uses of instance['instance\_type'] from nova/api * Remove uses of instance['instance\_type'] from nova/network * Remove uses of instance['instance\_type'] from nova/compute * Correct substring matching of baremetal VPD node names * Fix Wrong syntax for set:tag in dnsmasq startup option * Fix instance evacuate with shared storage * nova-manage: remove redundant 'dest' args * clear up method parameters for \_modify\_rules * Check CONF values \*after\* command line args are parsed * Make nova-manage db archive\_deleted\_rows more explicit * Fix for delete error in Hyper-V - missing CONF imports * add .idea folder to .gitignore pycharm creates this folder * Make 'os-hosts/node1' case sensitivity defer to DB * Fix access\_ip\_\* race * Add MultipleCreate template and fix conflict with other templates * Update tox.ini to support RHEL 6.x * Fix instance type cleanup when doing a same-id migration * Tiny typo * Remove unnecessary setUp() and tearDown() methods * Remove duplicate API logging * Remove uses of instance['instance\_type'] from libvirt driver * Remove uses of instance['instance\_type'] from powervm driver * Remove uses of instance['instance\_type'] from xenapi driver * Fixed image filter support for vmware * Switch to oslo.config * Fix instance\_system\_metadata deleted columns * Remove parameters containing passwords from Notifications * Add missing action\_start if deleting resized inst * Fix issues with re-raising exceptions * Don't traceback in the API on invalid keypair * delete deleted image 500 bug * Moves Hyper-V options to the hyperv section * Fix 'to integer' conversion of max and min count values * Standarize ip validation along the code * Adjusts reclaim instance interval of deferred delete tests * Fix Network object encoding issue when using qpid * Rename VMWare to VMware * Put options in a list * Bump instance updated\_at on network change * Catching InstanceNotFound exception during reboot instance * Imported Translations from Transifex * Remove completed FIXME * quantum security\_group driver queries db regression * Prevent reboot of rescued instance * Baremetal deploy helper sets ODIRECT * Read baremetal images from extra\_specs namespace * Rename source\_(group\_id/ip\_prefix) to remote\_(group\_id/ip\_prefix) * docs should indicate proper git commit limit * Imporove db.sqlalchemy.api.\_validate\_unique\_server\_name method * Remove unused db calls from nova.db.api * Fixes oslo-config update for deprecated\_group * fix postgresql drop race * Compute manager should remove dead resources * Fix an error in compute api snapshot\_volume\_backed bdm code * Fixes disk size issue during image boot on Hyper-V * Updating powervm driver snapshot with update\_task\_state flow * Imported Translations from Transifex * Add ssh port and key based auth to VPD * Make ComputeManager \_running\_deleted\_instances query by uuid * Refactor compute manager \_get\_instances\_by\_driver * Fix target host variable from being overwritten * Imported Translations from Transifex * Fixes live migration with attached volumes issue * Don't LOG.error on max\_depth (by default) * Set vm\_state to ERROR on net deallocate failure * validate security\_groups on server create * Fix IBM copyright strings * Implement rules\_exist method for quantum security group driver * Switch to using memorycache from oslo * Remove pylint errors for undefined GroupException members * Sync timeutils and memorycache from oslo * instance\_info\_cache\_update creates wrongly * Tone down logging while waiting for conductor * Add os-volumes extension to api samples * Regenerate nova.conf.sample * Fix ephemeral devices on LVM don't get mkfs'd * don't stack trace if long ints are passed to db * Pep8/pyflakes cleanup of deprecated\_api * Fix deprecated network api * Fixes the Hyper-V driver's method signature * Imported Translations from Transifex * Fixes a Hyper-V live migration issue * Don't use instance['instance\_type'] for scheduler filters in migration * Fallback coverage backdoor telnet connection to lo * Add instance\_type\_get() to virt api * Make compute manager revert crashed migrations on init\_host() * Adds API Sample tests for Volume Attachments * Ensure that FORWARD rule also supports DHCP * Remove duplicate options(joinedload) from aggregates db code * Shrink size of aggregate\_metadata\_get\_by\_host sql query * Remove old commented out code in sqlalchemy models * Return proper error messages while disassociating floating IP * Don't blindly skip first migration * Imported Translations from Transifex * Suppress retries on UnexpectedTaskStateErrors * Fix \`with\_data\` handling in test-migrations * BM Migration 004: Actually drop column * Actually run baremetal migration tests * Adds retry on upload\_vhd for xapi glance plugin * ec2 \_format\_security\_group() accesses db when using quantum\_driver * Remove un-needed methods * Prevent hacking.py from crashing on unexpected import exception * Bump python-quantumclient version to 2.1.2 * Improve output msgs for \_compare\_result * Add a 'hw\_' namespace to glance hardware config properties * Makes sure required powervm config options are set * Update OpenStack LLC to Foundation * Improve hackings docstring detection * Make sure no duplicate forward rules can exist * Use min\_ram of original image for snapshot, even with VHD * Revert IP Address column length to 39 * Additional tests for safe parsing with minidom * Make allocate\_for\_instance() return only info about ports allocated * Fix crash in quantumapi if no network or port id is specified * Unpin PasteDeploy dependency version * Unpin routes dependency version * Unpin suds dependency version * Unpin Cheetah dependency version * Allow zk driver be imported without zookeeper * Retry floating\_ip\_fixed\_ip\_associate on deadlock * Fix hacking.py to handle 'cannot import x' * Add missing import to fakelibvirt * Migration 148: Fix drop table dependency order * Minor code optimization in \_compute\_topic * Fix hacking.py to handle parenthesise in from import as * Fix redefinition of function test\_get\_host\_uptime * Migration 147: Prevent duplicate aggregate\_hosts * Rework instance actions to work with cells * Fix incorrect zookeeper group name * Sync nova with oslo DB exception cleanup * Fix broken baremetal migration tests * if reset fails, display the command that failed * Remove unused nova.db.api:instance\_get\_all\_by\_reservation * Add API Sample tests for Snapshots extension * Run libguestfs API calls in a thread pool * Change nova-dhcpbridge FLAGFILE to a list of files * Imported Translations from Transifex * Readd run\_tests.sh --debug option * Clean unused kernels and ramdisks from image cache * Imported Translations from Transifex * Ensure macs can be serialized * Remove Print Statement * Prevent default security group deletion * libvirt: lxml behavior breaks version check * Add missing import\_opt for flat\_injected * Add processutils from oslo * Updates to OSAPI sizelimit middleware * Remove compat cfg wrapper * Fix exception handling in baremetal API * Make guestfs use same libvirt URI as Nova * Make LibvirtDriver.uri() a staticmethod * Enable VM DHCP request to reach DHCP agent * Don't set filter name if we use Noop driver * Removes unnecessary qemu-img dependency on powervm driver * Migration 146: Execute delete call * Add \`post\_downgrade\` hook for migration tests * Fix migration snake-walk * BM Migrations 2 & 3: Fix drop\_column statements * Migration 144: Fix drop index statement * Remove function redefinitions * Migration 135: Fix drop\_column statement * Add missing ec2 security group quantum mixin * Fix baremetal migration skipping * Add module prefix to exception types * Flush tokens on instance delete * Fix launching libvirt instances with swap * Spelling: compatable=>compatible * import base\_dir\_name config option into vmwareapi * Fix ComputeAPI.get\_host\_uptime * Move DB thread pooling to DB API * Use a fake coverage module instead of real one * Standardize the coverage initializations * Sync eventlet\_backdoor from oslo-incubator * Sync rpc from oslo-incubator * Fix message envelope keys * Remove race condition (in Networks) * Move some context checking code from sqlalchemy * Baremetal driver returns accurate list of instance * Identify baremetal nodes by UUID * Improve performance of baremetal list\_instances * Better error handling in baremetal spawn & destroy * Wait for baremetal deploy inside driver.spawn * cfg should be imported from oslo.config * Add Nova quantum security group proxy * Add a volume driver in Nova for Scality SOFS * Make nova security groups more pluggable * libvirt: fix volume walk of /dev/disk/by-path * Add better status to baremetal deployments * Fix handling of source\_groups with no-db-compute * Improve I/O performance for periodic tasks * Allow exit code 21 for 'iscsiadm -m session' * Removed duplicate spawn code in PowerVM driver * Add API Sample tests for Hypervisors extension * Log lifecycle events to log INFO (not ERROR) * Sync rpc from oslo-incubator * sync oslo log updates * Adding ability to specify the libvirt cache mode for disk devices * Sync latest install\_venv\_common.py * Make add-fixed-ip update nwfilter wth in libvirt * Refactor nwfilter parameters * ensure we run db tests in CI * More gracefully handle TimeoutException in test * Multi-tenancy isolation with aggregates * Fix pep8 issues with test\_manager.py * Fix broken logging imports * Fix hacking test to handle namespace packages * Use oslo-config-2013.1b4 * support preallocated VM images * Fix instance directory path for lxc * Add snapshot methods to fakes.py * PowerVMDiskAdapter detach/cleanup refactoring * Make ComputeTestCase.test\_state\_revert faster * Add an extension to show image size * libvirt: Use uuid for instance directory name * Support running periodic tasks immediately at startup * Fix XMLMatcher error reporting * Fix XML config tests for disk/net/cpu tuning * Add support for network adapter hotplug * Handle lifecycle events in the compute manager * Add support for lifecycle events in the libvirt driver * Enhance IPAdresses migration tests * Add basic infrastructure for compute driver async events * Fix key check in instance actions formatter * Add a safe\_minidom\_parse\_string function * Documentation cleanups for nova devref * Fix leak of loop/nbd devices in injection using localfs * Add support for instance CPU consumption control * Add support for instance disk IO control * Retry bw\_usage\_update() on innodb Deadlock * Change CIDR column size on migration version 149 * Provide way to pass rxtx factor to quantum * Fibre channel block storage support (nova changes) * Default SG rules for the Security Group "Default" * create new cidr type for data storage * Ensure rpc result is primitive types * Change all instances of the non-word "inteface" to "interface" * Remove unused nova.db.api:network\_get\_by\_bridge * Fix a typo in two comments. networksa -> networks * Live migration with an auto selection of dest * Remove unused nova.db.api:network\_get\_by\_instance * Fix network list and show with quantum * Remove unused db calls from nova.db.sqlalchemy.api * Remove unused db calls * Small spelling fix in sqlalchemy utils * Fix \_get\_instance\_volume\_block\_device\_info call parameter * Do not use abbreviated config group names (zookeeper) * Prevent the unexpected with nova-manage network modify * Fix hacking tests on osx * Enable multipath for libvirt iSCSI Volume Driver * Add select\_hosts to scheduler manager rpc * Add and check data functions for test\_migrations 141 * fix incorrectly defined ints as strs * Remove race condition (in TaskLog) * Add generic dropper for duplicate rows * Imported Translations from Transifex * Fix typo/bug in generic UC dropper * remove intermediate libvirt downloaded images * Add support for instance vif traffic control * Add libvirt XML schema support for resource tuning parameters * Fix instance can not be deleted after soft reboot * Correct spelling of quantum * Make pep8 tests run inside virtualenv * Remove tests for non-existing SimpleScheduler * libvirt: Fix LXC container creation * Rename 'connection' to 'driver' in libvirt HostState * Ensure there is only one instance of LibvirtDriver * Stop unit test for prompting for a sudo password * clean up missing whitespace after ':' * Push 'Error' result from event to instance action * Speedup the revert\_state test * Add image to request\_spec during resize * Ensure start time is earlier than end time in simple\_tenant\_usage * Split out body of loop in \_sync\_power\_states in compute manager * Remove dead variable assignment in compute manager * Assign unique names with os-multiple-create * Nova network needs to take care of existing alias * Delete baremetal interfaces when their parent node is deleted * Harmonize PEP8 checking between tox and run\_tests.sh * VirtualPowerDriver catches ProcessExecutionError * [xenapi] Cooperatively yield during sparse copy * Allow archiving deleted rows to shadow tables, for performance * Adds API Sample tests for FlavorAccess extension * Add an update option to run\_tests.sh * filter\_scheduler: Select from a subset of hosts * use nova-conductor for live-migration * Fix script argument parsing * Add option to allow cross AZ attach configurable * relocatable roots doesn't handle testr args/opts * Remove a log message in test code * add config drive to api\_samples * Don't modify injected\_files inside PXE driver * Synchronize code from oslo * Canonizes IPv6 before insert it into the db * Only dhcp the first ip for each mac address * Use connection\_info on resize * Fix add-fixed-ip and remove-fixed-ip * API extension for accessing instance\_actions * Use joinedload for system\_metadata in db * Add migration with data test for migration 151 * Correct misspelling in PowerVM comment * Add GlusterFS libvirt volume connector * Module import style checking changes * Stub additional FloatingIP methods in FlatManager * Resize/Migrate functions for PowerVM driver * Added a service heartbeat driver using Memcached * Use a more specific error reporting invalid disk hardware * Allow VIF model to be chosen per image * Check the length of flavor name in "flavor-create" * Add API sample tests to Services extension * VMWare driver to use current nova.network.model * Add "is not" test to hacking.py * Update tools/regression\_tester * Fix passing conductor to get\_instance\_nw\_info() * Imported Translations from Transifex * Make compute manager use conductor for stopping instances * Move allowvssprovider=false to vm-data field * Allow aggregate create to have None as the az * Forces flavorRef to be string in servers resize api * xenapi: Remove unecessary exception handling * Sync jsonutils from openstack-common * Simplify and optimize az server output extension * Add an extension to show the type of an ip * Ensure that only one IP address is allocated * Make the metadata paths use conductor * Fix nova-compute use of missing DBError * Adding support for AoE block storage SANs * Update docs about testing * Allow generic rules in context\_is\_admin rule in policy * Implements resize / cold migration on Hyper-V * test\_(dis)associate\_by\_non\_existing\_security\_group\_name missing stub * Make scheduler remove dead nodes from its cache * More conductor support for resizes * Allow fixed to float ping with external gateway * Add generic UC dropper * Remove locking declarator in ServiceGroup \_\_new\_\_() * Use ServiceGroup API to show node liveness * Refine PowerVM MAC address generation algorithm * Fixes a bug in attaching volumes on Hyper-V * Fix unconsumed column name warning in test\_migrations * Fix regression in non-admin simple\_usage:show * Ensure 'subunit2pyunit' is run in venv from run\_tests.sh * Fix inaccuracies in the development environment doc * preserve order of pre-existing iptables chains * Adds API Sample tests for FloatingIPDNS extension * Don't call 'vif.plug' twice during VM startup * Disallow setting /0 for network other than 0.0.0.0 * Fix spelling in comment * Imported Translations from Transifex * make vmwareapi driver pass quantum port-id to ESX * Add control-M to list of characters to strip out * Update to simplified common oslo version code * Libvirt: Implement snapshots for LVM-backed roots * Properly write non-raw LVM images on creation * Changes GA code for tracking cross-domain * Return dest\_check\_data as expected by the Scheduler * Simplify libvirt snapshot code path * fix VM power state to be NOSTATE when instance not found * Fix missing key error in libvirt.driver * Update jsonutils from oslo-incubator * Update nova/compute/api to handle instance as dict * Use joined version of db.api calls * l3.py,add\_floating\_ip: setup NAT before binding * Regenerate nova.conf.sample * Fixes a race condition on updating security group rules * Ensure that LB VIF drivers creates the bridge if necessary * Remove nova.db call from baremetal PXE driver * Support for scheduler hints for VM groups * Fixed FlavorAccess serializer * Add a virtual PowerDriver for Baremetal testing * Optimize rpc handling for allocate and deallocate * Move floating ip db access to calling side * Implement ZooKeeper driver for ServiceGroup API * Added the build directory to the tox.ini list pep8 ignores * support reloctable venv roots in testing framework * Change to support custom nw filters * Allow multiple dns servers when starting dnsmasq * Clean up extended server output samples * maint: remove unused imports from bin/nova-\* * xenapi: Cleanup detach\_volume code * Access DB as dict not as attributes part 5 * Introduce support for 802.1qbg and 802.1qbh to Nova VIF model * Adds \_(prerun|check)\_134 functions to test\_migrations * Extension for rebuild-for-ha * Support hypervisor supplied macs in nova-network * Recache or rebuild missing images on hard\_reboot * Cells: Add cells support to hypervisors extension * Cells: Add cells support to instance\_usage\_audit\_log api extension * Update modules from common required for rpc with lock detection * Fix lazy load 'system\_metadata' failed problem * Ban database access in nova-compute * Move security\_groups refreshes to conductor * Fix inject\_files for storing binary file * Add regression testing tool * Change forward\_bridge\_interface to MultiStrOpt * Imported Translations from Transifex * hypervisor-supplied-nics support in PowerVM * Default the last parameter (state) in task\_log\_get to None * Sync latest install\_venv\_common from oslo * Remove strcmp\_const\_time * Adds original copyright notice to refactored files * Update .coveragerc * Allow disk driver to be chosen per image * Refactor code for setting up libvirt disk mappings * Refactor instance usage notifications for compute manager * Flavor Extra Specs should require admin privileges * Remove unused methods * Return to skipping filters when using force\_hosts * Refactor server password metadata to avoid direct db usage * lxc: Clean up namespace mounts * Move libvirt volume driver tests to separate test case * Move libvirt NFS volume driver impl into volume.py * replace ssh-keygen -m with a python equivalent * Allow connecting to self-signed quantum endpoints * Sync latest db and importutils from oslo * Use oslo database code * Fix check instance host for instance action * Make get\_dev\_name\_for\_instance() use stashed instance\_type info * Added Postgres CI opportunistic test case * Remove remaining instance\_types query from compute/manager * Make cells\_api fetch stashed instance\_type info * Teach resource tracker about stashed instance types * Fix up instance types in sys meta for resizes * lxc: virDomainGetVcpus is not supported by driver * Fix incorrect device name being raised * VMware VC Compute Driver * Default value of monkey\_patch\_modules is broken * Adds evacuate method to compute.api * Fix import for install\_venv.py * allow disabling file injection completely * separate libvirt injection and configdrive config variables * Add API sample tests to os-network * Fix incorrect logs in network * Update HACKING.rst per recent changes * Allow for specifying nfs mount options * Add REST API to show availability\_zone of instance * Make NFS mount hashes consistent with Cinder * Parse testr output through subunit2pyunit * Imported Translations from Transifex * Optimize floating ip list to make one db query * Remove hardcoded topic strings in network manager * Reimplement is\_valid\_ipv4() * Tweakify is\_valid\_boolstr() * Fix update quota with invalid value * Make system\_metadata update in place * Mark password config options with secret * Record instance actions and events * Postgres does not like empty strings for type inet * Add 'not in' test to tools/hacking.py * Split floating ip functionality into new file * Optimize network calls by moving them to api * Fixes unhandled exception in detach\_volume * Fixes FloatingIPDNS extension 'show' method * import tools/flakes from oslo * Use conductor for instance\_info\_cache\_update * Quantum metadata handler now uses X-Forwarded-For * instance.update notifications don't always identify the service * Handle compute node not available for live migration * Fixes 'not in' operator usage * Fixes "is not" usage * Make scheduler modules pass conductor to add\_instance\_fault * Condense multiple authorizers into a single one * Extend extension\_authorizer to enable cleaner code * Remove unnecessary deserializer test * Added sample tests to FlavorExtraSpecs API * Fix rebuild with volumes attached * DRYing up volume\_in\_mapping code * Use \_prep\_block\_device in rebuild * xenapi: Ax unecessary \`block\_device\_info\` params * Code cleanup for rebuild block device mapping * Fix eventlet/mysql db pooling code * Add support for compressing qcow2 snapshots * Remove deprecation notice in LibvirtBridgeDriver * Fix boto capabilities check * Add api samples to fping extension * Fix SQL Error with fixed ips under devstack/postgresql * Pass testropts in to setup.py in run\_tests.sh * Nova Hyper-V driver refactoring * Fixed grammar problems and typos in doc strings * Add option to control where bridges forward * xenapi: Add support for different image upload drivers * Removed print stmts in test cases * Fix get and update in FlavorExtraSpecs * Libvirt: Add support for live snapshots * Move task\_log functions to conductor * erase outdated comment * Keep flavor information in system\_metadata * Add instance\_fault\_create() to conductor * Adds API Sample tests for os-instance\_usage\_audit\_log extension * validate specified volumes to boot from at the API layer * Refactor libvirt volume driver classes to reduce duplication * Change ''' to """ in bin/nova-{novncproxy,spicehtml5proxy} * Pass parameter 'filter' back to model layer * Fix boot with image not active * refactored data upgrade tests in test\_migrations * Fix authorized\_keys file permissions * Finer access control in os-volume\_attachments * Stop including full service catalog in each RPC msg * Make sure there are no unused import * Fix missing wrap\_db\_error for Session.execute() method * Use install\_venv\_common.py from oslo * Add Region name to quantum client * Removes retry of set\_admin\_password * fix nova-baremetal-manage version printing * Refactoring/cleanup of compute and db apis * Fix an error in affinity filters * Fix a typo of log message in \_poll\_unconfirmed\_resizes * Allow users to specify a tmp location via config * Avoid hard dependency on python-coverage * iptables-restore error when table not loaded * Don't warn up front about libvirt loading issues in NWFilterFirewall * Relax API restrictions around the use of reboot * Strip out Traceback from HTTP response * VMware Compute Driver OVF Support * VMware Compute Driver Host Ops * VMware Compute Driver Networking * Move policy checks to calling side of rpc * Add api-samples to multinic extension * Add system\_metadata to db.instance\_get\_active\_by\_window\_joined * Enable N302: Import modules only * clean up api\_samples documentation * Fix bad imports that cause nova-novncproxy to fail * populate dnsmasq lease db with valid leases * Support optional 4 arg for nova-dhcpbridge * Add debug log when call out to glance * Increase maximum URI size for EC2 API to 16k * VMware Compute Driver Glance improvement * Refactored run\_command for better naming * Fix rendering of FixedIpNotFoundForNetworkHost * Fix hacking N302 import only modules * Avoid db lookup in info\_from\_instance() * Fixes task\_log\_get and task\_log\_get\_all signatures * Make failures in the periodic tests more detailed * Clearer debug when test\_terminate\_sigterm fails * Skip backup files when running pep8 * Added sample tests to floating-ip-pools API * \_sync\_compute\_node should log host and nodename * Don't pass the entire list of instances to compute * VMware Compute Driver Volume Management * Bump the base rpc version of the network api to 1.7 * Remove compute api from scheduler driver * Remove network manager from compute manager * Adds SSL support for API server * Provide creating real unique constraints for columns * Add version constraint for coverage * Correct a format string in virt/baremetal/ipmi.py * Add REST api to manage bare-metal nodes * Adding REST API to show all availability zones of an region * Fixed nova-manage argument parsing error * xenapi: Add cleanup\_sm\_locks script * Fix double reboot during resume\_state\_on\_host\_boot * Add support for memory overcommit in live-migration * Adds conductor support for instance\_get\_active\_by\_window\_joined * Make compare\_result show the difference in lists * Don't limit SSH keys generation to 1024 bits * Ensure service's servicegroup API is created first * Drop volume API * Fix for typo in xml API doc sample in nova * Avoid stuck task\_state on snapshot image failure * ensure failure to inject user files results in startup error * List servers having non-existent flavor should return empty list * Add version constraint for cinder * Remove duplicated tapdev creation code from libvirt VIF * Move helper APIs for OVS ports into linux\_net * Add 'ovs\_interfaceid' to nova network VIF model * Replace use of mkdtemp with fixtures.TempDir * Add trust level cache to trusted\_filter * Fix the wrong datatype in task\_log table * Cleanup of extract\_opts.py * Baremetal/utils should not log certain exceptions * Use setup.py testr to run testr in run\_tests.sh * Fix nova coverage * PXE driver should rmtree directories it created * Fix floating ips with external gateway * Add support for Option Groups in LazyPluggable * Fix incorrect use of context object * Unpin testtools * fix misspellings in logs, comments and tests * fix mysql race in tests * Fix get Floating ip pools action name to match with its policy * Generate coverage even if tests failed * Allow snapshots of paused and suspended instances * Update en\_US message translations * Sync latest cfg from oslo-incubator * Avoid testtools 0.9.25 * Cells: Add support for compute HostAPI() * Refactor compute\_utils to avoid db lookup * ensure zeros are written out when clearing volumes * fix service\_ref undefined problem * Add rootwrap filters for password injection with localfs * fix floating ip test that wasn't running * Prevent metadata updates until instance is active * More consistent libvirt XML handling and cleanup * pick up eventlet backdoor fix from oslo * Run\_as\_root to ensure resize2fs succeed for all image backends * libvirt: Fix typo in configdrive implementation * Refactor EC2 keypairs exception * Directly copy a file URL from glance * Remove restoring soft deleted entries part 2 * Remove restoring soft deleted entries part 1 * Use conductor in the servicegroup db driver * Add service\_update to conductor * Remove some db calls from db servicegroup driver * XenAPI: Fix volume detach * Refactor: extract method: driver\_dict\_from\_config * Cells: Fix for relaying instance info\_cache updates * Fix wrong quota reservation when deleting resizing instance * Go back to the original branch after pylint check * Ignore auto-generated files by lintstack * Add host to instance\_faults table * Clean up db network db calls for fixed and float * Remove obsolete baremetal override of MAC addresses * Fix multi line docstring tests in hacking.py * PXE driver should not accept empty kernel UUID * Use common rootwrap from oslo-incubator * Remove network\_host config option * Better instance fault message when rescheduling * libvirt: Optimize test\_connection and capabilities * don't allow crs in the code * enforce server\_id can only be uuid or int * Allow nova to use insecure cinderclient * Makes sure compute doesn't crash on failed resume * Fix fallback when Quantum doesn't provide a 'vif\_type' * Move compute node operations to conductor * correcting for proper use of the word 'an' * Correcting improper use of the word 'an' * Save password set through xen agent * Add encryption method using an ssh public key * Make resource tracker use conductor for listing instances * Make resource tracker use conductor for listing compute nodes * Updates prerequisite packages for fedora * Expose a get\_spice\_console RPC API method * Add a get\_spice\_console method to nova.virt.ComputeDriver API * Add nova-spicehtml5proxy helper * Pull NovaWebSocketProxy class out of nova-novncproxy binary * Add support for configuring SPICE graphics with libvirt * Add support for setting up elements in libvirt config * Add common config options for SPICE graphics * Create ports in quantum matching hypervisor MAC addresses * Make nova-api logs more useful * Override floating interface on callee side * Reject user ports that have MACs the hypervisor cannot use * Remove unused import * Reduce number of iptable-save restore loops * Clean up get\_instance\_id\_by\_floating\_address * Move migration\_get\_...\_by\_host\_and\_node to conductor * Make resource tracker use conductor for migration updates * minor improvements to nova/tests/test\_metadata.py * Cells: Add some cells support to admin\_actions extension * Populate service list with availability zone and correct unit test * Correct misspelling of fake\_service\_get\_all * Add 'devname' to nova.network.model.VIF class * Use testrepository setuptools support * Cleaning up exception handling * libvirt: use tap for non-blockdevice images on Xen * Export the MAC addresses of nodes for bare-metal * Cells: Add cells API extension * More HostAPI() cleanup for cells * Break out a helper function for working with bare metal nodes * Renames the new os-networks extension * Define a hypervisor driver method for getting MAC addresses * enables admin to view instance fault "details" * Revert "Use testr setuptools commands." * Revert "Populate service list with availability zone" * Fix typos in docstring * Fix problem with ipv6 link-local address(es) * Adds support for Quantum networking in Hyper-V * enable hacking.py self tests * Correct docstring on sizelimit middleware * sync latest log and lockutils from oslo * Fix addition of CPU features when running against legacy libvirt * Fix nova.availability\_zones docstring * Fix uses of service\_get\_all\_compute\_by\_host * VMware Compute Driver Rename * use postgresql INET datatype for storing IPs * Extract validation and provision code to separate method * Implement Quantum support for addition and removal of fixed IPs * Keep self and context out of error notification payload * Populate service list with availability zone * Add Compute API validations for block device map * Cells: Commit resize quota reservations immediately * Cells: Reduce the create\_image call depth for cells * Clean up compute API image\_create * Fix logic error in periodic task wait code * Centralize instance directory logic * Chown doesn't work on mounted vfat * instances\_path is now defined here * Convert ConfigDriveHelper to being a context manager itself * Use testr setuptools commands * Move migration\_create() to conductor * Move network call from compute API to the manager * Fix incorrect comment, and move a variable close to use * Make sure reboot\_instance uses updated instance * Cleanup reboot\_instance tests * Fix use of stale instance data in compute manager * Implements getPasswordData for ec2 * Add service\_destroy to conductor * Make nova.service get service through conductor * Add service\_create to conductor * Handle waiting for conductor in nova.service * Allow forcing local conductor * Make pinging conductor a part of conductor API * Fix some conductor manager return values * Handle directory conflicts with html output * Fix error in NovaBase.save() method * Skip domains on libvirt errors in get\_vcpu\_used() * Fix state sync logic related to the PAUSED VM state * Remove more unused opts from nova.scheduler.driver * Fix quota updating when admin deletes common user's instance * Tests for PXE bare-metal provisioning helper server * Correct the calculating of disk size when using lvm disk backend * Adding configdrive to xenapi * Validated device\_name value in block device map * Fix libvirt resume function call to get\_domain\_xml * Make it clearer that network.api.API is nova-network specific * Access instance as dict, not object in xenapi * Expand quota logging * Move logic from os-api-host into compute * Create a directory for servicegroup drivers * Move update\_instance\_info\_cache to conductor * Change ComputerDriver.legacy\_nwinfo to raise by default * Cleanup pyflakes in nova-manage * Add user/tenant shim to RequestContext * make runtests -p act more like tox * fix new N402 errors * Add host name to log message for \_local\_delete * Try out a new nova.conf.sample format * Regenerate nova.conf.sample * Make Quantum plugin fill in the 'bridge' name * Make nova network manager fill in vif\_type * Add some constants to the network model for drivers to use * Move libvirt VIF XML config into designer.py * Remove bogus 'unplug' calls from libvirt VIF test * Fix bash syntax error in run\_tests.sh * Update instance's cell\_name in API cell * Fix init\_host checking moved instances * Fix test cases in integrated.test\_multiprocess\_api * Map libvirt error to InstanceNotFound in get\_instance\_disk\_info * Fixed comment typo * Added sample tests to FlavorSwap API * Remove unused baremetal PXE options * Remove unused opt import in scheduler.driver * Move global service networking opts to new module * Move memcached\_servers opt into common.memorycache * Move service\_down\_time to nova.service * Move vpn\_key\_suffix into pipelib * fix N402 on tools/ * fix N402 for nova-manage * fix N402 for rest of nova * fix N402 for nova/c\* * fix N402 for nova/db * don't clear the database dicts in the tearDown method * Fixed typos in doc strings * Enhance wsgi to listen on ipv6 address * Adds a flag to allow configuring a region * Fix double reboot issue during soft reboot * Remove baremetal-compute-pxe.filters * Fix pyflakes issues in integrated tests * Adds option to rebuild instance with existing disk * Move common virt driver options to virt.driver * Move vpn\_image\_id to pipelib * Move enabled\_apis option into nova.service * Move default\_instance\_type into nova.compute * Move osapi\_compute\_unique\_server\_name\_scope to db * Move api\_class options to where they are used * Move manager options into nova.service * Move compute\_topic into nova.compute.rpcapi * fix N402 for nova/network * fix N402 for nova/scheduler * fix N402 for nova/tests * Fix N402 for nova/virt * Fix N402 for nova/api * New instance\_actions and events table, model, and api * Cope better with out of sync bm data * Import latest timeutils from oslo-incubator * Remove availability\_zones from service table * Enable Aggregate based availability zones * Sync log from oslo-incubator * Clarify the DBApi object in cells fakes * Fix lintstack check for multi-patch reviews * Adds to manager init\_host validation for instances location * Add to libvirt driver instance\_on\_disk method * add to driver option to keep disks when instance destroyed * Fix serialization in impl\_zmq * Added sample tests to FlavorRxtx API * Refresh instance metadata in-place * xenapi: Remove dead code, moves, tests * Fix baremetal VIFDriver * Adds a new tenant-centric network extension * CLI for bare-metal database sync * Move scheduler\_topic into nova.scheduler.rpcapi * Move console\_topic into nova.console.rpcapi * Move network\_topic into nova.network.rpcapi * Move cert\_topic into nova.cert.rpcapi * Move global s3 opts into nova.image.s3 * Move global glance opts into nova.image.glance * Remove unused osapi\_path option * attach/detach\_volume() take instance as a parameter * fix N401 errors, stop ignoring all N4\* errors * Add api extension to get and reset password * powervm: Implement snapshot for local volumes * Add exception handler for previous deleted flavor * Add NoopQuotaDriver * Conductor instance\_get\_all replaces \_by\_filters * Support cinderclient http retries * Sync rpc and notifier from oslo-incubator * PXE bare-metal provisioning helper server * Added sample tests to QuotaClasses API * Changed 'OpenStack, LLC' message to 'OpenStack Foundation' * Convert short doc strings to be on one line * Get instances from conductor in init\_host * Invert test stream capture logic for debugging * Upgrade WebOb to 1.2.3 * Make WebOb version specification more flexible * Refactor work with TaskLog in sqlalchemy.api * Check admin context in bm\_interface\_get\_all() * Provide a PXE NodeDriver for the Baremetal driver * Handle compute node records with no timestamp * config\_drive is missing in xml deserializer * Imported Translations from Transifex * NovaBase.delete() rename to NovaBase.soft\_delete() * livbirt: have a single source of console log file naming * Remove the global DATA * Add ping to conductor * Add two tests for resize action in ServerActionsControllerTest * Move service\_get\_all operations to conductor * Move migration\_get\_unconfirmed\_by\_dest\_compute to conductor * Move vol\_usage methods to conductor * Add test for resize server in ComputeAPITestCase * Allow pinging own float when using fixed gateway * Use full instance in virt driver volume usage * Imported Translations from Transifex * Refactor periodic tasks * Cells: Add periodic instance healing * Timeout individual tests after one minute * Fix regression in RetryFilter * Cells: Add the main code * Adding two snapshot related task states * update version urls to working v2 urls * Add helper methods to nova.paths * Move global path opts in nova.paths * Remove unused aws access key opts * Move fake\_network opt to nova.network.manager * Allow larger encrypted password posts to metadata * Move instance\_type\_get() to conductor * Move instance\_info\_cache\_delete() to conductor * Move instance\_destroy() to conductor * Move instance\_get\_\*() to conductor * Sync timeutils changes from Oslo * Remove system\_metadata db calls from compute manager * Move block\_device\_mapping destroy operations to conductor * Clean up setting of control\_exchange default * fix floating-ip in multihost case * Invalid EC2 ids should make the entire request fail * improve libguestfs exception handling * fix resize of unpartitioned images with libguestfs * xenapi: Avoid hotplugging volumes on resize * Remove unused VMWare VIF driver abstraction * Delete pointless nova.virt.VIFDriver class * Clarify & fix docs for nova-novncproxy * Removes unused imports * Imported Translations from Transifex * Fix spelling mistakes in nova.virt * Cells: Add cells commands to nova-manage * Add remaining get\_backdoor\_port() rpc calls to coverage * Fix race in resource tracker * Move block\_device\_mapping get operations to conductor * Move block\_device\_mapping update operations to conductor * Improve baremetal driver error handling * Add unit test to update server metadata * Add unit test to revert resize server action * Add compute build/resize errors to instance faults * Add unit test for too long metadata for server rebuild action * Adds os-volume\_attachments 'volume\_id' validation * Raise BadRequest when updating 'personality' * Imported Translations from Transifex * Ensure that Quantum uses configured fixed IP * Add conditions in compute APIRouter * Imported Translations from Transifex * CRUD on flavor extra spec extension should be admin-only * Report failures to mount in localfs correctly * Add API sample tests to FixedIPs extension * baremetal power driver takes \*\*kwargs * Implement IPMI sub-driver for baremetal compute * Fix tests/baremetal/test\_driver.py * Move baremetal options to [BAREMETAL] OptGroup * Adds test for HTTPUnprocessableEntity when rebooting * Make sure the loadables path is the absolute path * Fix bug and remove update lock in db.instance\_test\_and\_set() * Periodic update of DNS entries * Fix error in test\_get\_all\_by\_multiple\_options\_at\_once() * Remove session.flush() and session.query() monkey patching * Update nova-cert man page * Allow new XML API sample file generation * Remove unused imports * spelling in test\_migrations * Imported Translations from Transifex * Check for image\_meta in libvirt.driver.spawn * Adds test for 'itemNotFound' errors in 'Delete server' * Remove improper NotFound except block in list servers * Spelling: Compatability=>Compatibility * Imported Translations from Transifex * Ensure we add a new line when appending to rc.local * Verify the disk file exists before running qemu-img on it * Remove lxc attaching/detaching of volumes * Teardown container rootfs in host namespace for lxc * Fix cloudpipe instances query * Ensure datetimes can be properly serialized * Imported Translations from Transifex * Database metadata performance optimizations * db.network\_delete\_safe() method performance optimization * db.security\_group\_rule\_destroy() method performance optimization * Import missing exception * Ignore double messages to associate the same ip * Imported Translations from Transifex * Database reservations methods performance optimization * Using query.soft\_delete() method insead of soft deleting by hand * Create and use subclass of sqlalchemy Query with soft\_delete() method * Remove inconsistent usage of variable from hyperv * Log last compute error when rescheduling * Removed unused imports * Make libvirt driver default to virtio for KVM/QEMU NICs * Refactor libvirt VIF classes to reduce duplicate code * Makes sure to call crypto scripts with abspath * Enable nova exception format checking in tests * Eliminate race conditions in floating association * Imported Translations from Transifex * Provide a configdrive helper which uses contextlib * Add extension to allow hiding of addresses * Add html reports to report action in coverage extension * Add API samples tests for the coverage extension * Fix \_find\_ports() for when backdoor\_port is None * Parameterize database connection in test.py * fixing the typo of the error message from nbd * add 'random\_seed' entry to instance metadata * Baremetal VIF and Volume sub-drivers * Fix revert resize failure with disk.local not found * Fix a test isolation error in compute.test\_compute * New Baremetal provisioning framework * Move baremetal database tests to fixtures * address uuid overwriting * Add get\_backdoor\_port to cert * Add get\_backdoor\_port to scheduler * Add get\_backdoor\_port to console * Make libvirt driver.listinstances return defined * Add get\_backdoor\_port to consoleauth * Export custom SMBIOS info to QEMU/KVM guests * Make configdrive.py use version.product\_string() * Allow loading of product/vendor/package info from external file * Remove obsolete VCS version info completely * Trap exception when trying to write csr * Define a product, vendor & package strings in version.py * Extract image metadata from Cinder * Add expected exception to aggregate\_metadata\_delete() * Move aggregate\_get() to conductor * Add .testrepository/ directory to gitginore * Make load\_network\_driver load passed in driver * Fix race condition of resize confirmation * libvirt: Make vif\_driver.plug() returns None * Add an iptables mangle rule per-bridge for DHCP * Make NBD retry logic more generic, add retry to loop * Reliably include OS type in ephemeral filenames * Allow specification of libvirt guest interface backend driver * Fix "image\_meta" data passed in libvirt test case * Fix typos in vncserver\_listen config param help description * Traceback when user doesn't have permission * removed duplicate function definitions * network/api add\_fixed\_ip correctly passes uuid * Import cfg module in extract\_opts.py * Raise old exception instance instead of new one * Update exceptions to pass correct kwargs * Add option to make exception format errors fatal * allow for the ability to run partial coverage * Remove fake\_tests opt from test.py * Execute pygrub using nova-rootwrap in xenapi * Add DBDuplicateEntry exception for unique constraint violations * Fix stack trace on incorrect nova-manage args * Use service fixture in DB servicegroup tests * fix instance rescue without cmdline params in xml.rescue * Added sample tests to FlavorDisabled API * Reset the IPv6 API backend when resetting the conf stack * libvirt: Skip intermediate base files with qcow2 * fix test\_nbd using stubs * Imported Translations from Transifex * Properly remove the time override in quota tests * Fix API samples generation * Move TimeOverride to the general reusable-test-helper place * Added conf support for security groups * Add accounting for orphans to resource tracker * Add more association support to network API * Remove the WillNotSchedule exception * Replace fixtures.DetailStream with fixtures.StringStream * Move network\_driver into new nova.network.driver * Move DNS manager options into network.manager * Refactor xvp console * Move agent\_build\_get\_by\_triple to conductor * Move provider\_fw\_rule\_get\_all to conductor * Move security\_group operations in VirtAPI to conductor * Retry NBD device allocation * Use testr to run nova unittests * Add a developer trap for api samples * Update command on devref doc * Fixed deleting instance booted from invalid vol * Add general mechanism for testing api coverage * Add the missing replacement text in devref doc * Allow xenapi to work with empty image metadata * Imported Translations from Transifex * Fix for broken switch for config\_drive * Fix use of osapi\_compute\_extension option in api\_samples * Remove sleep in test\_consoleauth * Fix errors in used\_limits extension * Fix poll\_rescued\_instances periodic task * Add syslogging to nova-rootwrap * Clean up run\_tests.sh * Ensure that sql\_dbpool\_enable is a boolean value * Stop nbd leaks, remove pid race * Fixes KeyError: 'sr\_uuid' when booting from volume on xenapi * Add VirtAPI tests * Move remaining aggregate operations to conductor * remove session param from instance\_get * remove session param from instance\_get\_by\_uuid * Use nova.test.TestCase as the base test class * Ensure datetimes can be properly serialized * Fixes string formatting error * Adds API Sample tests for DiskConfig extension * Fix for correctly parsing snapshot uuid in ec2api * Autodetect nbd devices * Add Jian Wen to .mailmap * Move metadata\_{host,port} to network.linux\_net * Move API extension opts to api.openstack.compute * Move osapi\_max\_limit into api.openstack.common * Move link\_prefix options into api.openstack.common * Move some opts into nova.utils * Properly scope password options * Remove the deprecated quantum v1 code and directory * add and removed fixed ip now refresh cache * Implement an XML matcher * Add support for parsing the from libvirt host capabilities * Add support for libvirt domain XML config * Add support for libvirt domain XML config * Add coverage extension to nova API * Allow rpc-silent FloatingIP exceptions in n-net * Allow conductor exceptions to pass over RPC silently * Don't leak info from libvirt LVM backed instances * Add get\_backdoor\_port to nova-conductor * Properly scope isolated hosts config opts * Move monkey patch config opts into nova.utils * Move zombie\_instance\_updated\_at\_window option * Move some options into nova.image.glance * Move cache\_images to nova.virt.xenapi.vm\_utils * Move api\_rate\_limit and auth\_strategy to nova.api * Move api\_paste\_config option into nova.wsgi * Port to argparse based cfg * Cleanup the test DNS managers * Move all temporary files into a single /tmp subdir * Modified sample tests to FlavorExtraData API * Fix KeyError of log message in virt/libvirt/utils.py * Allows an instance to post encrypted password * Make nova/virt use aggregate['metadetails'] * Revert "Simplify how ephemeral disks are created and named." * Fix bw\_usage\_update issue with conductor * Correctly init XenAPIDriver in vm\_vdi\_cleaner.py * Set instance\_ref['node'] in \_set\_instance\_host\_and\_node * Consider reserved count in os-user-limits extension * Make DNS drivers inherit interface * Map cinder snapshot statuses to ec2 * i18n raise Exception messages * Set default DNS driver to No-op * Access DB values as dict not as attributes. Part 4 * Use conductor for bw\_usage operations * libvirt: enable apic setting for Xen or KVM guest * Improve virt/disk/mount/nbd test coverage * Add NFS to the libvirt volume driver list * Use admin user to read Quantum port * Add vif\_type to the VIF model * Make the nbd mounter respect CONF.max\_nbd\_devices * Imported Translations from Transifex * Raise NotImplementedError in dns\_driver.DNSDriver * Unpin lxml requirements * Added sample tests to FlavorManage API * Use fixtures library for nova test fixtures * Catch ProcessExecutionError when building config drives * Fix fname concurrency tests * Imported Translations from Transifex * Make ignore\_hosts and force\_hosts work again * Run test objectstore server on arbitrary free port * Fix network manager ipv6 tests * Prevent creation of extraneous resource trackers * Remove unused bridge interfaces * Use conductor for migration\_get() * Reset node to source in finish\_revert\_resize() * Simplify how ephemeral disks are created and named * Order instance faults by created\_at and id * Sync RPC logging-related bits from oslo * Fix bugs in test\_migrations.py * Fix regression allowing quotas to be applied to projects * Improve nova-manage usability * Add new cliutils code from oslo-incubator * Update tools/flakes to work with pydoc * Fix pep8 exclude logic for 1.3.3 * Avoid vm instance shutdown when power state is NOSTATE * Fix handling of unimplemented host actions * Fix positional arg swallow decorator * Fix minidns delete\_entry to work for hostname with mixed case chars * powervm: Refactored run\_command for better naming * Sync latest openstack.common.rpc * Ensure prep\_resize arguments can be serialized * Add host to get\_backdoor\_port() for network api * Add agent build API support for list/create/delete/modify agent build * Added sample tests to extended status API * Imported Translations from Transifex * Make policy.json not filesystem location specific * Use conductor for resourcetracker instance\_update * network managers: Pass elevated cxtx to update\_dhcp * Volume backed live migration w/o shared storage * Add pyflakes option to tox * Adds API Sample tests for Quotas extension * Boot from volume without image supplied * Implements volume usage metering * Configurable exec\_dirs to find rootwrap commands * Allow newer boto library versions * Add notifications when libvirtd goes down * Make update\_service\_capabilities() accept a list of capabilities * update mailmap to add my perferred mail * Fix test suite to use MiniDNS * Add support for new WMI iSCSI initiator API * Added sample tests to deferred delete API * On confirm\_resize, update correct resource tracker * Renaming xml test class in sample tests of consoles API * remove session param from certificate\_get * improve sessions for key\_pair\_(create,destroy) * powervm: add DiskAdapter for local volumes * Access DB values as dict not as attributes. Part 3 * Patch fake\_libvirt\_utils with fixtures.MonkeyPatch * Open test xenapi/vm\_rrd.xml relative to tests * Reset notifier\_api before each test * Reset volume\_api before cinder cloud tests * Fix rpc control\_exchange regression * Add generic customization hooks via decorator * add metadata support for overlapping networks * Split out part of compute's init\_host * Use elevated cxtx in resource\_tracker.resize\_claim * Fix test\_migrations for postgres * Add vpn ip/port setting support for CloudPipe * Access DB values as dict not as attributes. Part 2 * Enable debug in run\_tests using pdb * Add POWERVM\_STARTING state to powervm driver * Fix test\_inject\_admin\_password for OSX * Multi host DHCP networking and local DNS resolving * use file instead of tap for non-blockdevice images on Xen * use libvirt getInfo() to receive number of physical CPUs * Don't run the periodic task if ticks\_between\_runs is below zero * Fix args to AggregateError exception * Fix typo in inherit\_properties\_from\_image * Access DB values as dict not as attributes * Fix KeyError of log message in compute/api.py * Fix import problem in test\_virt\_disk\_vfs\_localfs * Remove start\_guests\_on\_host\_boot config option * Add aggregate\_host\_add and \_delete to conductor * Imported Translations from Transifex * Call plug\_vifs() for all instances in init\_host * Make compute manager use conductor for instance\_gets * Fixes HyperV compute "resume" tests * Convert datetimes for conductor instance\_update * Update migration so it supports PostgreSQL * Include 'hosts' and 'metadetails' in aggregate * Verify doc/api\_samples files along with the templates * Remove default\_image config option * Move ec2 config opts to nova.api.ec2.cloud * Move imagecache code from nova.virt.libvirt.utils * Use flags() helper method to override config in tests * RetryFilter checks 'node' as well as 'host' * Make resize and multi-node work properly together * Migration model update for multi-node resize fix * Add version to conductor migration\_update message * Validate rxtx\_factor as a float * Display errors when running nosetests * Respect the base\_dir\_name flag in imagebackend * Add exceptions to baremetal/db/api * Clean up unused methods in scheduler/driver * Provide better error message for aggregate-create * Imported Translations from Transifex * Allow multi\_host compute nodes to share dhcp ip * Add blank nova/virt/baremetal/\_\_init\_\_.py * Add migration\_update to conductor * Remove unnecessary topic argument * Add pluggable ServiceGroup monitoring APIs * Add SSL support to utils.generate\_glance\_url() * Add eventlet db\_pool use for mysql * Make compute manager use nova-conductor for instance\_update * Missing instance\_uuid in floating\_ip notifications * Make nova-dhcpbridge use CONFIG\_FILE over FLAGFILE * Rename instance\_info\_cache unique key constraints * Cleanup compute multi-node assignment of node * Imported Translations from Transifex * maint: remove an unused import from libvirt.utils * Encode consoleauth token in utf-8 to make it a str * nova-dhcpbridge should require the FLAGFILE is set * Added cpu\_info report to HyperV Compute driver * Remove stale flags unit tests * Truncate large console logs in libvirt * Move global fixture setup into nova/test.py * Complete API samples for Hosts extension * Fix HostDeserializer to enable multiple line xml * adjust rootwrap filters for recent file injection changes * Don't hard code the xen hvmloader path * Don't update arch twice when create server * remove db access in xen driver * Imported Translations from Transifex * Move compute\_driver into nova.virt.driver * Re-organize compute opts a bit * Move compute opts from nova.config * Add a CONTRIBUTING file * Compute doesn't set the 'host' field in instance * Xenapi: Don't resize down if not auto\_disk\_config * Cells: Re-add DB model and calls * Use more specific SecurityGroupHandler calls * Fix wait\_for\_deleted function in SmokeTests * Wrap log messages with \_() * Add methods to Host operations to fake hypervisor * Move sql options to nova.db.sqlalchemy.session * Add debug logging to disk mount modules * Remove the libguestfs disk mount API implementation * Remove img\_handlers config parameter usage * Convert file injection code to use the VFS APIs * Introduce a VFS implementation backed by the libguestfs APIs * Introduce a VFS implementation mapped to the host filesystem * Adds API for bulk creation/deletion of floating IPs * Remove obsolete config drive init.d example * Imported Translations from Transifex * Rename sql\_pool\_size to sql\_max\_pool\_size * Detect shared storage; handle base cleanup better * Allow VMs to be resumed after a hypervisor reboot * Fix non-primitive uses of instance in compute/manager * Remove extra space in exception * Adds missing index migrations by instance/status * Convert migrations.instance\_uuid to String(36) * Add missing binary * Change all tenants servers listing as policy-based * Fixes a bug in get\_info in the Hyper-V Driver * refactor: extract method: connect\_volume * Handle instances not being found in EC2 API responses * Pin pep8 to 1.3.3 * Return an error response if the specified flavor does not exists. (v4) * Send block device mappings to rebuild\_instance * Move db lookup for block device mappings * Use CONF.import\_opt() for nova.config opts * Imported Translations from Transifex * Remove nova.config.CONF * Add keystoneclient to pip-requires * Pass rpc connection to pre\_start\_hook * Fix typo: hpervisor=> hypervisor * Fix reversed args to call to \_reschedule * Add the beginnings of the nova-conductor service * remove old baremetal driver * Remove useless function quota\_usage\_create * Fix calls to private method in linux\_net * Drop unused PostgreSQL sequences from Folsom * Compact pre-Grizzly database migrations * Fix os-hosts extension can't return xml response correctly * Set node\_availability\_zone in XenAPIAggregateTestCase * Ignore editor backup files * Imported Translations from Transifex * Remove nova.flags * Remove FLAGS * Make fping extension use CONF * Use disk image path to setup lxc container * Use the auth\_token middleware from keystoneclient * improve session handling around instance\_ methods * add index to fixed\_ips * add instance\_type\_extra\_specs to instances * Change a toplevel function comment to a docstring * Ensure cat process is terminated * Add some sqlalchemy tweakables * Fixes an error reporting bug on Hyper-V * update api\_samples add os-server-start-stop * update api\_samples add os-services module * Switch to using eventlet\_backdoor from oslo * Sync eventlet\_backdoor from oslo * Added sample tests to consoles API * Fix use of 'volume' variable name * Ditch unused import and variable * Make ec2\_instance\_create db method consistant across db apis * Adds documentation for Hyper-V testing * Adds support for ConfigDriveV2 in Hyper-V * don't explode if a 413 didn't set Retry-After * Fix a couple uses of FLAGS * Remove nova.flags imports from scheduler code * Remove some unused imports from compute/\* * Remove importing of flags from compute/\* * Remove nova.flags imports from bin/\* * Move nova shared config options to nova.config * Fix use\_single\_default\_gateway * Update api\_samples README.rst to use tox * Do not alias stdlib uuid module as uuidutils, since nova has uuidutils * Allow group='foo' in self.flags() for tests * updated api\_samples with real hypervisor\_hostname * Issue a hard shutdown if clean fails on resize up * Introduce a VFS api abstraction for manipulating disk images * Fix network RPC API backwards compat * create\_db\_entry\_for\_new\_instance did not call sgh for default * Add support for backdoor\_port to be returned with a rpc call * Refactor scheduling filters * Unpin amqplib and kombu requirements * Add module for loading specific classes * Make sure instance data is always refreshed * Move all mount classes into a subdirectory * Add support for resizes to resource tracker * Fixes create instance \*without\* config drive test * Update db entry before upate the DHCP host file * Remove gen\_uuid() * Enhance compute capability filter to check multi-level * API extension for fpinging instances * Allow controller extensions to extend update/show * Isolate tests from the environment variable http\_proxy * Handle image cache hashing on shared storage * fix flag type define error * Simplify libvirt volume testing code * Migrate floating ip addresses in multi\_host live\_migration * Add DB query to get in-progress migrations * Try hard shutdown if clean fails on resize down * Restore self.test\_instance at LibvirtConnTestCase.setUp() * Fixes usage of migrate\_instance\_start * added getter methods for quantumv2 api * fix LVM backed VM logial volumes can't be deleted * Clean up \_\_main\_\_ execution from two tests for consistency * Imported Translations from Transifex * Update uuidutils from openstack common * Remove volume.driver and volume.iscsi * Use base image for rescue instance * Make xenapi shutdown mode explicit * Fix a bug in XenAPISession's use of virtapi * Ban db import from nova/virt * Update vol mount smoketest to wait for volume * Add missing webob to exc * Add missing exception NetworkDuplicated * Fix misuse of exists() * Rename config to vconfig * Move agent\_build\_get\_by\_triple to VirtAPI * Fix \_setup\_routes() signature in APIRouter * Move libvirt specific cgroups setup code out of nova.virt.disk.api * make libvirt with Xen more workable * script for configuring a vif in Xen in non-bridged mode * Upgrade pylint version to 0.26.0 * Removes fixed\_ip\_get\_network * improve session handling around virtual\_interfaces * improve sessions for reservation * improve session handling around quotas * Remove custom test assertions * Add nova option osapi\_compute\_unique\_server\_name\_scope * Add REST API support for list/enable/disable nova services * Switch from FLAGS to CONF in nova.compute * Switch from FLAGS to CONF in tests * Get rid of pylint E0203 in filter\_scheduler.py * Updated scheduler and compute for multiple capabilities * Switch from FLAGS to CONF in nova.db * Removed two unused imports * Remove unused functions * Fixes a bug in api.metadata.base.lookup() on Windows * Fixes a bug in nova.utils, due to Windows compatibility issues * improve session handling of dnsdomain\_list * Make tox.ini run pep8/hacking checks on bin * Fix import ordering in /bin scripts * add missing opts to test\_db\_api.py * clean up dnsdomain\_unregister * Make utils.mkfs() set label when fs=swap * Another case of dictionary access * Remove generic topic support from filter scheduler * Clarify server\_name, hostname, host * Refactor scheduling weights * update nova.conf.sample * Check instance\_type in compute capability filter * Sync latest code from oslo-incubator * Adds REST API support for Fixed IPs * Added separate bare-metal MySQL DB * Added bare-metal host manager * Remove unused volume exceptions * Adds a conf option for custom configdrive mkisofs * Fixed HyperV to get disk stats of instances drive * powervm: failed spawn should raise exception * Enable Quantum linux bridge VIF driver to use "bridge" type * Remove nova-volume DB * make diagnostics workable for libvirt with Xen * Avoid unnecessary system\_metadata db lookup * Make instance\_system\_metadata load with instance * Add some xenapi Bittorrent tests * Move security groups and firewall ops to VirtAPI * Move host aggregate operations to VirtAPI * Simplify topic handling in network rpcapi * Sync rpc from openstack-common * Send instance\_type to resize\_instance * Remove instance\_type db lookup in prep\_resize * Send all aggregate data to remove\_aggregate\_host * Fix incorrect LOG.error usage in \_compare\_cpu * Limit formatting routes when adding resources * Removes unnecessary db query for instance type * Fix verification in test\_api\_samples.py * Yield in between hash runs for the image cache manager * Remove unused function require\_instance\_exists * Refactor resource tracker claims and test logic * Remove out-of-date comment * Make HostManager.get\_all\_host\_states() return an iterator * Switch from FLAGS to CONF in nova.virt * 'BackupCreate' rotation parameter >= 0 * Corrects usage of db.api.network\_get * Switch from FLAGS to CONF in nova.console * Map NotAuthorized to 403 in floating ips extension * Decouple EC2 API from using instance id * libvirt: Regenerates xml instead of using on-disk * Imported Translations from Transifex * Fix to include error message in instance faults * Include hostname in notification payloads * Fix quota updating during soft delete and restore * Fix warnings found with pyflakes * make utils.mkfs() more general * Fixes snapshot instance failure on libvirt * Make ComputeDrivers send hypervisor\_hostname * Fixed instance deletion issue from Nova API * De-duplicate option: console\_public\_hostname * Don't verify image hashes if checksumming is disabled * Imported Translations from Transifex * Look up stuck-in-rebooting instances in manager * Use chance scheduler in EC2 tests * Send all aggregate data to add\_aggregate\_host * Send all migration data to finish\_revert\_resize * Send all migration data to revert\_resize * Fix migrations when not using multi-host network * Fix bandwidth polling exception * Fixes volume attach issue on Hyper-V * Shorten self.compute.resource\_tracker in test\_compute.py * Cleanup nova.db.sqlalchemy.api import * Use uuidutils.is\_uuid\_like for uuid validation * Add uuidutils module * Imported Translations from Transifex * Switch from FLAGS to CONF in nova.scheduler * Switch from FLAGS to CONF in nova.network * Switch from FLAGS to CONF in misc modules * Switch from FLAGS to CONF in nova.api * Switch from FLAGS to CONF in bin * Remove flags.DECLARE * Move parse\_args to nova.config * Forbid resizing instance to deleted instance types * Imported Translations from Transifex * Fix unused variables and wrong indent in test\_compute * Remove unnecessary db call from xenapi/vmops * xenapi: place boot lock when doing soft delete * Detangle soft delete and power off * Fix signing\_dir option for auth\_token middleware * Fix no attribute 'STD\_OUT\_HANDLE' on windows * Use elevated context in disassociate\_floating\_ip * Remove db.instance\_get\* from nova/virt * sync deprecated log method from openstack-common * move python-cinderclient to pip-requires * Tiny resource tracker cleanup * Fix Quantum v2 API method signatures * add doc to standardize session usage * improve sessions around floating\_ip\_get\_by\_address * Bump the base rpc version of the network api * Eliminates simultaneous schedule race * Introduce VirtAPI to nova/virt * Add some hooks for managers when service starts * Fix backwards compat of rpc to compute manager * xenapi: Make agent optional * Add xenapi host\_maintenance\_mode() test * refactor: extract \_attach\_mapped\_block\_devices * Make bdms primitive in rpcapi.terminate\_instance * Ability to specify a host restricted to admin * Improve EC2 describe\_security\_groups performance * Increased MAC address range to reduce conflicts * Move to a more canonicalized output from qemu-img info * Read deleted flavors when using to\_xml() * Fix copy-paste bug in block\_device\_info\_generation * Remove nova-volume scheduling support * Remove duplicate api\_paste\_config setting * Fixes hypervisor based image filtering on Hyper-V * make QuantumV2 support requested nic ordering * Add rxtx\_factor to network migration logic * Add scheduler retries for prep\_resize operations * Add call to reset quota usage * Make session.py reusable * Remove redundant code from PowerVM driver * Force earlier version of sqlalchemy * refactor: extract method vm\_ref\_or\_raise * Use env to set environ when starting dnsmasq * pep8 fixes for nova-manage * Fix VM deletion from down compute node * Remove database usage from libvirt check\_can\_live\_migrate\_destination * Clean up xenapi VM records on failed disk attaches * Remove nose detailed error reporting * Validate is-public parameter to flavor creation * refactor: extract \_terminate\_volume\_connections * improve sessions around compute\_node\_\* * Fix typo in xenapi/host.py * Remove extra print line in hacking.py * Ensures compute\_driver flag can be used by bdm * Add call to trigger\_instance[add/remove]\_security\_group\_refresh quantum * Validates Timestamp or Expiry time in EC2 requests * Add API samples to Admin Actions * Add live migration helper methods to fake hypervisor driver * Use testtools as the base testcase class * Clean up quantumv2.get\_client * Fix getattr usage * Imported Translations from Transifex * removes the nova-volume code from nova * Don't elevate context when calling run\_instance * remove session parameter from fixed\_ip\_get * Make instance\_get\_all() not require admin context * Fix compute tests abusing admin context * Fix use of elevated context for resize methods * Fix check for memory\_mb * Imported Translations from Transifex * Fix nova-network MAC collision logic * Fix rpcapi version for new methods * Remove useless return * Change hacking.py N306 to use logical\_lines * Add missing live migration methods to ComputeDriver base class * Fix hacking.py naivete regarding lines that look like imports * details the reboot behavior that a virt driver should follow * xenapi: refactor: Agent class * Send usage event on revert\_resize * Fix config-file overrides for nova-dhcpbridge * Make nova-rootwrap optional * Remove duplicated definition of is\_loaded() * Let scheduler know services' capabilities at startup * fetch\_images() method no more needed * Fix hardcoded topic strings with constants * Save exceptions earlier in finish\_resize * Correct \_extract\_query\_params in image.glance * Fix Broken XML Namespace Handling * More robust checking for empty requested\_networks * Imported Translations from Transifex * Rehydrate NetworkInfo in reboot\_instance() * Update common * Use cat instead of sleep for rootwrap test * Addtional 2 packages for dev environment on ubuntu * Let VlanManager keep network's DNS settings * Improve the performance of quantum detection * Support for nova client list hosts with specific zone * Remove unused imports in setup.py * Fixes fake for testing without qemu-img * libvirt: persist volume attachments into config * Extend IPv6 subnets to /64 if network\_size is set smaller than /64 * Send full migration data to finish\_resize * Send full migration to confirm\_resize * Send full migration to resize\_instance * Migrate to fileutils and lockutils * update sample for common logging * Add availability zone extension to API samples test * Refactor: config drive related functions * Fix live migration volume assignment * Remove unused table options dicts * Add better log line for undefined compute\_driver * Remove database usage from libvirt imagecache module * Return empty list when listing servers with bad status value * Consistent Rollback for instance creation failures * Refactor: move find\_guest\_agent to xenapi.agent * Fix Incorrect Exception when metadata is over 255 characters * Speed up volume and routing tests * Speed up api.openstack.compute.contrib tests * Allow loading only selected extensions * Migrate network of an instance * Don't require quantumclient when running nova-api * Handle the case where we encounter a snap shot correctly * Remove deprecated root\_helper config * More specific exception handling in migration 091 * Add virt driver capabilities definition * Remove is\_admin\_context from sqlalchemy.api * Remove duplicate methods from network/rpcapi.py * SanISCSIDriver SSH execution fixes * Fix bad Log statement in nova-manage * Move mkfs from libvirt.utils to utils * Fixes bug Snapshotting LXC instance fails * Fix bug in a test for the scheduler DiskFilter * Remove mountpoint from parse\_volume\_info * limit the usage of connection\_info * Sync with latest version of openstack.common.timeutils * nova-compute sends its capabilities to schedulers ASAP * Enable custom eventlet.wsgi.server log\_format * Fix the fail-on-zero-tests case so that it is tolerant of no output * add port support when QuantumV2 subclass is used * Add trove classifiers for PyPI * Fix and enable pep8 E502, E712 * Declare vpn client option in pipelib * Fix nova-volume-usage-audit * Fix error on invalid delete\_on\_termination value * Add Server diagnostics extension api samples * Add meaningful server diagnostic information to fake hypervisor * Use instance\_exists to check existence * Fix nova-volume-usage-audit * Imported Translations from Transifex * Avoid leaking BDMs for deleted instances * Deallocate network if instance is deleted in spawn * Create Flavors without Optional Arguments * Update policies * Add DNS records on IP allocation in VlanManager * update kwargs with args in wrap\_instance\_fault * Remove ComputeDriver.update\_host\_status() * Do not call directly vmops.attach\_volume * xenapi: fix bfv behavior when SR is not attached * Use consoleauth rpcapi in nova-novncproxy * Change install\_venv to use setup.py develop * Fixes syntax error in nova.tools.esx.guest\_tools.py * Allow local rbd user and secret\_uuid configuration * Set host prior to allocating network information * Remove db access for block devices and network info on reboot * Remove db access for block devices on terminate\_instance * Check parameter 'marker' before make request to glance * Imported Translations from Transifex * Internationalize nova-manage * Imported Translations from Transifex * Fixes live\_migration missing migrate\_data parameter in Hyper-V driver * handles empty dhcp\_domain with hostname in metadata * xenapi: Tag volumes in boot from volume case * Stops compute api import at import time * Fix imports in openstack compute tests * Make run\_tests.sh fail if no tests are actually run * Implement snapshots for raw backend * Used instance uuid rather than id in remove-fixed-ip * Migrate DHCP host info during resize * read\_deleted snapshot and volume id mappings * Make sure sleep can be found * Pass correct task\_state on snapshot * Update run\_tests.sh pep8 ignore list for pep8 1.2 * Clean up imports in test\_servers * Revert "Tell SQLite to enforce foreign keys." * Add api samples to simple tenant usage extension * Avoid RPC calls while holding iptables lock * Add util for image conversion * Add util for disk type retrieval * Fixes test\_libvirtr spawn\_with\_network\_info test * Remove unneeded temp variable * Add version to network rpc API * Remove cast\_to\_network from scheduler * Tell SQLite to enforce foreign keys * Use paramiko.AutoAddPolicy for the smoketests * nova-manage doesn't validate key to update the quota * Dis-associate an auto-assigned floating IP should return proper warning * Proxy floating IP calls to quantum * Handle invalid xml request to return BadRequest * Add api-samples to Used limits extension * handle IPv6 race condition due to hairpin mode * Imported Translations from Transifex * XenAPI should only snapshot root disk * Clarify trusted\_filter conf options * Fix pep8 error in bin/nova-manage * Set instance host field after resource claim * powervm: add polling timeout for LPAR stop command * Drop claim timeouts from resource tracker * Update kernel\_id and ramdisk\_id while rebuilding instance * Add Multiple Create extension to API sample tests * Fix typo in policy docstring * Fix reserve\_block\_device\_name while attach volume * Always use bdm in instance\_block\_mapping on Xen * Centralize sent\_meta definition * Move snapshot image property inheritance * Set read\_deleted='yes' for instance\_id\_mappings * Fix XML response for return\_reservation\_id * Stop network.api import on network import * libvirt: ignore deleted domain while get block dev * xenapi: Refactor snapshots during resize * powervm: remove broken instance filtering * Add ability to download images via BitTorrent * powervm: exception handling improvements * Return proper error messages while associating floating IP * Create util for root device path retrieval * Remove dependency on python-ldap for tests * Add api samples to Certificates extension * Add nova-cert service to integrated\_helpers * Compare lists in api samples against all matches * ip\_protocol for ec2 security groups * Remove unneeded lines from aggregates extension API sample tests * Remove deprecated Folsom code: config convert * Make resource tracker uses faster DB query * Remove deprecated Folsom code: bandwith\_poll\_interval * Add TestCase.stub\_module to make stubbing modules easier * Imported Translations from Transifex * Update tools hacking for pep8 1.2 and beyond * Remove outdated moduleauthor tags * remove deprecated connection\_type flag * Add aggregates extension to API samples test * Update RPM SPEC to include new bandwidth plugin * Remove TestCase.assertNotRaises * Imported Translations from Transifex * Imported Translations from Transifex * Use self.flags() instead of manipulating FLAGS by hand * Use test.TestCase provided self.mox and self.stubs * Remove unnecessary setUp, tearDown and \_\_init\_\_ in tests * xenapi: implement resume\_state\_on\_host\_boot * Revert "Add full test environment." * Synchronize docstring with actual implementation * Num instances scheduler filter * Add api samples to cloudpipe extension * Fix CloudPipe extension XML serialization * Max I/O ops per host scheduler filter * libvirt: continue detach if instance not found * libvirt: allows attach and detach from all domains * Fixes csv list required for qemu-img create * Added compute node stats to HostState * libvirt: Improve the idempotency of iscsi detach * Pass block\_device\_info to destroy in revert\_resize * Enable list with no dict objects to be sorted in api samples * Fixes error message for flavor-create duplicate ID * Loosen anyjson dependency to avoid clash with ceilometer * xenapi: make it easier to recover from failed migrations * Remove unnecessary check if migration\_ref is not None * Bump the version of SQLAlchemy in pip-requires * optimize slightly device lookup with LXC umounts * Support for several HA RabbitMQ servers * xenapi: Removing legacy swap-in-image * xenapi: increase timeout for resetnetwork agent request * Replaced default hostname function from gethostname to getfqdn * Fix issues deleting instances in RESIZED state * Modified 404 error response to show specific message * Updated code to update attach\_time of a volume while detaching * Check that an image is active before spawning instances * Fix issues with device autoassignment in xenapi * Deleting security group does not mark rules as deleted * Collect more accurate bandwidth data for XenServer * Zmq register opts fix in receiver * Revert explicit usage of tgt-adm --conf option * Fix booting a raw image on XenServer * Add servers/ips api\_samples tests * LOG.exception() should only be used in exception handler * Fix XenServer's ability to boot xen type images * all\_extensions api\_samples testing for server actions * Fixes remove\_export for IetAdm * libvirt: Fix \_cleanup\_resize * Imported Translations from Transifex * xenapi: fix undefined variable in logging message * Spelling: ownz=>owns * Fix NetAppCmodeISCSIDriver.\_get\_lun\_handle() method * Integration tests virtual interfaces API extension * Allow deletion of instance with failed vol cleanup * Fixes snapshotting of instances booted from volume * Move fakeldap.py from auth dir to tests * Remove refs to ATAoE from nova docs * Imported Translations from Transifex * Set volume status to error if scheduling fails * Update volume detach smoke test to check status * Fix config opts for Storwize/SVC volume driver * Ensure hybrid driver creates veth pair only once * Cleanup exception handling * Imported Translations from Transifex * Add lun number (0) to model\_update in HpSanDriver * libvirt: return after soft reboot successfully completes * Fixes to the SolarisISCSI Driver * Fix live migration when volumes are attached * Clarify dangerous use of exceptions in unit tests * Cleanup test\_api\_samples:\_compare\_result * Fix testContextClaimWithException * Fix solidfire unit tests * Stop double logging to the console * Recreate nw\_info after auto assigning floating ip * Re-generate sample config file * Use test.TestingException instead of duplicating it * Fix startup with DELETED instances * Fix solidfire option declaration * Restore SIGPIPE default action for subprocesses * Raise NotFound for non-existent volume snapshot create * Catch NotFound exception in FloatingIP add/remove * Adds API sample testing for rescue API extension * Fix bugs in resource tracker and cleanup * Replace builtin hash with MD5 to solve 32/64-bit issues * Properly create and delete Aggregates * No stack trace on bad nova aggregate-\* command * Clean up test\_state\_revert * Fix aggregate\_hosts.host migration for sqlite * Call compute manager methods with instance as keyword argument * Adds deserialization for block\_device\_mapping * Fix marker pagination for /servers * Send api.fault notification on API service faults * Always yield to other greenthreads after database calls * fix unused import * Don't include auto\_assigned ips in usage * Correct IetAdm remove\_iscsi\_target * Cleanup unused import in manager.py * xapi: fix create hypervisor pool * Bump version to 2013.1 * Add Keypairs extension to API samples test * sample api testing for os-floating-ips extension * Update quota when deleting volume that failed to be scheduled * Update scheduler rpc API version * Added script to find unused config options * Make sure to return an empty subnet list for a network without sunbet * Fix race condition in CacheConcurrencyTestCase * Makes scheduler hints and disk config xml correct * Add lookup by ip via Quantum for metadata service * Fix over rate limit error response * Add deserialization for multiple create and az * Fix doc/README.rst to render properly * Add user-data extension to API samples tests * Adds API sample testing for Extended server attributes extension * Inherit the base images qcow2 properties * Correct db migration 91 * make ensure\_default\_security\_group() call sgh * add ability to clone images * add get\_location method for images * Adds new volume API extensions * Add console output extension to API samples test * Raise BadRequest while creating server with invalid personality * Update 'unlimited' quota value to '-1' in db * Modified 404 error response for server actions * Fix volume id conversion in nova-manage volume * Improve error handling of scheduler * Fixes error handling during schedule\_run\_instance * Include volume\_metadata with object on vol create * Reset the task state after backup done * Allows waiting timers in libvirt to raise NotFound * Improve entity validation in volumes APIs * Fix volume deletion when device mapper is used * Add man pages * Make DeregisterImage respect AWS EC2 specification * Deserialize user\_data in xml servers request * Add api samples to Scheduler hints extension * Include Schedule Hints deserialization to XML API * Add admin actions extension * Allow older versions of libvirt to delete vms * Add security groups extension to API samples test * Sync a change to rpc from openstack-common * Add api\_samples tests for servers actions * Fix XML deserialization of rebuild parameters * All security groups not returned to admins by default * libvirt: Cleanup L2 and L3 rules when confirm vm resize * Corrects use of instance\_uuid for fixed ip * Clean up handling of project\_only in network\_get * Add README for doc folder * Correct typo in memory\_mb\_limit filter property * Add more useful logging around the unmount fail case * Imported Translations from Transifex * Make compute/manager.py use self.host instead of FLAGS.host * Add a resume delete on volume manager startup * Remove useless \_get\_key\_name() in servers API * Add entity body validation helper * Add 422 test unit test for servers API * Use tmpdir and avoid leaving test files behind * Includes sec group quota details in limits API response * Fixes import issue on Windows * Overload comment in generated SSH keys * Validate keypair create request body * Add reservations parameter when cast "create\_volume" to volume manager * Return 400 if create volume snapshot force parameter is invalid * Fix FLAGS.volumes\_dir help message * Adds more servers list and servers details samples * Makes key\_name show in details view of servers * Avoid VM task state revert on instance termination * Avoid live migrate overwriting the other task\_state * Backport changes from Cinder to Nova-Volume * Check flavor id on resize * Rename \_unplug\_vifs to unplug\_vifs * PowerVM: Establish SSH connection at use time * libvirt: Fix live block migration * Change comment for function \_destroy * Stop fetch\_ca from throwing IOError exceptions * Add 'detaching' to volume status * Reset task state before rescheduling * workaround lack of quantum/nova floatingip integration * fix rpcapi version * Added description of operators for extra\_specs * Convert to ints in VlanManager.create\_networks * Remove unused AddressAlreadyAllocated exception * Remove an unused import * Make ip block splitting a bit more self documenting * Prevent Partial terminations in EC2 * Add flag cinder\_endpoint\_template to volume.cinder * Handle missing network\_size in nova-manage * Adds API sample test for Flavors Extra Data extension * More specific lxml versions in tools/pip-requires * Fixes snat rules in complex networking configs * Fix flavor deletion when there is a deleted flavor * Make size optional when creating a volume from a snapshot * Add documentation for scheduler filters scope * Add and fix tests for attaching volumes * Fix auth parameter passed to libvirt openAuth() method * xapi: Fix live block migration * Add a criteria to sort a list of dict in api samples * delete a module never used * Update SolidFire volume driver * Adds get\_available\_resource to hyperv driver * Create image of volume-backed instance via native API * Improve floating IP delete speed * Have device mapping use autocreated device nodes * remove a never used import * fix unmounting of LXC containers in the presence of symlinks * Execute attach\_time query earlier in migration 98 * Add ServerStartStop extension API test * Set install\_requires in setup.py * Add Server Detail and Metadata tests * xenapi: Make dom0 serialization consistent * Refer to correct column names in migration 98 * Correct ephemeral disk cache filename * Stop lock decorator from leaving tempdirs in tests * Handle missing 'provider\_location' in rm\_export * Nail the pip requirement at 1.1 * Fix typo in tgtadm LOG.error() call * Call driver for attach/detach\_volume * rbd: implement create\_volume\_from\_snapshot * Use volume driver specific exceptions * Fake requests in tests should be to v1 * Implement paginate query use marker in nova-api * Simplify setting up test notifier * Specify the conf file when creating a volume * Generate a flavorid if needed at flavor creation * Fix EC2 cinder volume creation as an admin user * Allow cinder catalog match values to be configured * Fix synchronized decorator path cleanup * Fix and cleanup compute node stat tracking * avoid the buffer cache when copying volumes * Add missing argument to novncproxy websockify call * Use lvs instead of os.listdir in \_cleanup\_lvm * Fixing call to hasManagedSaveImage * Fix typo in simple\_tenant\_usage tests * Move api\_samples to doc dir * Add a tunable to control how many ARPs are sent * Get the extension alias to compose the path to save the api samples * Add scope to extra\_specs entries * Use bare container format by default * Sync some updates from openstack-common * Fix simple\_tenant\_usage's handing of future end times * Yield to another greenthread when some time-consuming task finished * Automatically convert device names * Fix creation of iscsi targets * Makes sure new flavors default to is\_public=True * Optimizes flavor\_access to not make a db request * Escape ec2 XML error responses * Skip tests in OSX due to readlink compat * Allow admins to de-allocate any floating IPs * Fix xml metadata for volumes api in nova-volume * Re-attach volumes after instance resize * Speed up creating floating ips * Adds API sample test for limits * Fix vmwareapi driver spawn() signature * Fix hyperv driver spawn() signature * Add API samples to images api * Add method to manage 'put' requests in api-sample tests * Add full python path to test stubbing modules for libvirt * Rename imagebackend arguments * Fixes sqlalchemy.api.compute\_node\_get\_by\_host * Fix instances query for compute stats * Allow hard reboot of a soft rebooting instance * On rebuild, the compute.instance.exists * Fix quota reservation expiration * Add api sample tests for flavors endpoint * Add extensions for flavor swap and rxtx\_factor * Address race condition from concurrent task state update * Makes sample testing handle out of order output * Avoid leaking security group quota reservations * Save the original base image ref for snapshots * Fixed boot from snapshot failure * Update zmq context cleanup to use term * Fix deallocate\_fixed\_ip invocation * fix issues with Nova security groups and Quantum * Clear up the .gitignore file * Allow for deleting VMs from down compute nodes * Update nova-rpc-zmq-receiver to load nova.conf * FLAG rename: bandwith\_poll\_\*=>bandwidth\_poll\_\* * Spelling: Persistant=>Persistent * Fix xml metadata for volumes extension * delete unused valiables * Clean up non-spec output in flavor extensions * Adds api sample testing for extensions endpoint * Makes api extension names consistent * Fixes spawn method signature for PowerVM driver * Spelling fix Retrive=> Retrieve * Update requires to glanceclient >=0.5.0 * Sort API extensions by alias * Remove scheduler RPC API version 1.x * Add version 2.0 of the scheduler RPC API * Remove some remnants of VSA support * hacking: Add driver prefix recommendation * Implements PowerVM get\_available\_resource method * Add a new exception for live migration * Assume virt disk size is consumed by instances * External locking for image caching * Stop using scheduler RPC API magic * Adds api sample testing for versions * Do not run pylint by default * Remove compute RPC API version 1.x * Add version 2.0 of compute RPC API * Accept role list from either X-Roles or X-Role * Fix PEP8 issues * Fix KeyError when test\_servers\_get fails * Update nova.conf.sample * Fixes backwards compatible rpc schedule\_run * Include launch-index in openstack style metadata * Port existing code to utils.ensure\_tree * Correct utils.execute() to check 0 in check\_exit\_code * Add the self parameter to NoopFirewallDriver methods * request\_spec['instance\_uuids'] as list in resize * Fix column variable typo * Add ops to aggregate\_instance\_extra\_specs filter * Implement project specific flavors API * Correct live\_migration rpc call in test * Allow connecting to a ssl-based glance * Move ensure\_tree to utils * Define default mode and device\_id\_string in Mount * Update .mailmap * Fix path to example extension implementation * Remove test\_keypair\_create\_quota\_limit() * Remove duplicated test\_migrate\_disk\_and\_power\_off() * Add missing import webob.exc * Fix broken SimpleScheduler.schedule\_run\_instance() * Add missing user\_id in revoke\_certs\_by\_user\_and\_project() * Rename class\_name to project\_id * Use the compute\_rpcapi instance not the module * Remove duplicated method VM\_migrate\_send * Add missing context argument to start\_transfer calls * Remove unused permitted\_instance\_types * Add lintstack error checker based on pylint * Make pre block migration create correct disk files * Remove unused and old methods in hyperv and powervm driver * Trap iscsiadm error * Check volume status before detaching * Simplify network create logic * Clean up network create exception handling * Adding indexes to frequently joined database columns * Ensure hairpin\_mode is set whenever vifs is added to bridge * Returns hypervisor\_hostname in xml of extension * Adds integration testing for api samples * Fix deallocate\_fixed\_ip() call by unifying signature * Make instance\_update\_and\_get\_original() atomic * Remove unused flags * Remove test\_instance\_update\_with\_instance\_id test * Remove unused instance id-to-uuid function * Re-work the handling of firewall\_driver default * Include CommonConfigOpts options in sample config * Re-generate nova.conf.sample * Ensure log formats are quoted in sample conf * Don't include hostname and IP in generated sample conf * Allow generate\_sample.sh to be run from toplevel dir * Let admin list instances in vm\_states.DELETED * Return actual availability zones * Provide a hint for missing EC2 image ids * Check association when removing floating ip * Add public network support when launching an instance * Re-define libvirt domain on "not found" exception * Add two prereq pkgs to nova devref env guide * Fix hyperv Cfgs: StrOpt to IntOpt * continue deleting instance even if quantum port delete fails * Typo fix: existant => existent * Fix hacking.py git checks to propagate errors * Don't show user-data when its not sent * Clarify nwfilter not found error message * Remove unused \_create\_network\_filters() * Adds missing assertion to FloatingIP tests * Restore imagebackend in test\_virt\_drivers.py * Add nosehtmloutput as a test dependency * Remove unused exceptions from nova/exception.py * Cleanup pip dependencies * Make glance image service check base exception classes * Add deprecated warning to SimpleScheduler * Have compute\_node\_get() join 'service' * XCP-XAPI version fix * add availability\_zone to openstack metadata * Remove stub\_network flag * Implements sending notification on metadata change * Code clean up * Implement network creation in compute API * Debugged extra\_specs\_ops.py * Fix typo in call in cinder.API unreserve\_volume * xenapi: Tag nova volumes during attach\_volume * Allow network to call get\_fixed\_ip\_by\_address * Add key\_name attribute in XML servers API * Fix is\_admin check via policy * Keep the ComputeNode model updated with usage * Remove hard-coded 'admin' role checking and use policy instead * Introduce ImagePropertiesFilter scheduler filter * Return HTTP 422 on bad server update PUT request * Makes sure instance deletion ok with deleted data * OpenStack capitalization added to HACKING.rst * Fix get\_vnc\_console race * Fix a TypeError that occurs in \_reschedule * Make missing imports flag in hacking settable * Makes sure tests don't leave lockfiles around * Update FilterScheduler doc * Disable I18N in Nova's test suites * Remove logging in volume tests * Refactor extra specs matching into a new module * Fix regression in compute\_capabilities filter * Refactor ComputeCapabilitiesFilter test cases * Revert per-user-quotas * Remove unused imports * Fix PEP8 issues * Sync changes from openstack common * Implement GET (show) in OS API keypairs extension * Fix spelling typos * Ignoring \*.sw[op] files * xenapi: attach root disk during rescue before boot * Allows libvirt to set a serial number for a volume * Adds support for serial to libvirt config disks * Remove unused variables * Always create the run\_instance records locally * Fix use of non-existant var pool * Adds Hyper-V support in nova-compute (with new network\_info model), including unit tests * Update sqlite to use PoolEvents for regexp * Remove unused function in console api * Allow nova to guess device if not passed to attach * Update disk config to check for 'server' in req * Changes default behavior of ec2 * Make ComputeFilter verify compute-related instance properties * Collect instance capabilities from compute nodes * Move volume size validation to api layer * Change IPtablesManager to preserve packet:byte counts * Add get\_key\_pair to compute API * Defined IMPL in global ipv6 namespace * xenapi: remove unnecessary json decoding of injected\_files * Remove unnecessary try/finally from snapshot * Port pre\_block\_migration to new image caching * Adding port attribute in network parameter of boot * Add support for NFS-based virtual block devices * Remove assigned, but unused variables from nova/db/sqlalchemy/api.py * xenapi: Support live migration without pools * Restore libvirt block storage connections on reboot * Added several operators on instance\_type\_extra\_specs * Revert to prior method of executing a libvirt hard\_reboot * Set task\_state=None when finished snapshotting * Implement get\_host\_uptime in libvirt driver * continue config-drive-v2, add openstack metadata api * Return values from wrapped functions in decorators * Allow XML payload for volume creation * Add PowerVM compute driver and unit tests * Revert task\_state on failed instance actions * Fix uuid related bug in console/api * Validate that min\_count & max\_count parameters are numeric * Allow stop API to be called in Error * Enforce quota limitations for instance resize * Fix rpc error with live\_migration * Simple checks for instance user data * Change time.sleep to greenthread.sleep * Add missing self. for parent * Rewrite image code to use python-glanceclient * Fix rpc error with live\_migration * volumes: fix check\_for\_export() in non-exporting volume drivers * Avoid {} and [] as default arguments * Improve bw\_usage\_update() performance * Update extra specs calls to use deleted: False * Don't stuff non-db data into instance dict * Fix type error in state comparison * update python-quantumclient dependency to >=2.0 * Key auto\_disk\_config in create server off of ext * Implement network association in OS API * Fix TypeError conversion in API layer * Key requested\_networks off of network extension * Key config\_drive off of config-drive extension * Make sure reservations is initialized * import module, not type * Config drive v2 * Don't accept key\_name if not enabled * Fix HTTP 500 on bad server create * Default behavior should restrict admins to tenant for volumes * remove nova code related to Quantum v1 API * Make sure ec2 mapping raises proper exceptions * Send host not ComputeNode into uptime RPC call * Making security group refresh more specific * Sync with latest version of openstack.common.cfg * Sync some cleanups from openstack.common * maint: compare singletons with 'is' not '==' * Compute restart causes period of network 'blackout' * Revert "Remove unused add\_network\_to\_project() method" * Add error log for live migration * Make FaultWrapper handle exception code = None * Don't accept scheduler\_hints if not enabled * Avoid double-reduction of quota for repeated delete * Traceback when over allocating IP addresses * xenapi: ensure all calls to agent get logged * Make update\_db an opt arg in scheduler manager * Key min\_count, max\_count, ret\_res\_id off of ext * Key availability\_zone in create server off of ext * Fix the inject\_metadata\_into\_fs in the disk API * Send updated instance model to schedule\_prep\_resize * Create unique volumes\_dir for testing * Fix stale instances being sent over rpc * Fix setting admin\_pass in rescue command * Key user\_data in create server off of extension * Key block\_device\_mapping off of volume extension * Moves security group functionality into extension * Adds ability to inherit wsgi extensions * Fixes KeyError when trying to rescue an instance * Make TerminateInstances compatible with EC2 api * Uniqueness checks for floating ip addresses * Driver for IBM Storwize and SVC storage * scheduler prep\_resize should not update instance['host'] * Add a 50 char git title limit test to hacking * Fix a bug on remove\_volume\_connection in compute/manager.py * Fix a bug on db.instance\_get\_by\_uuid in compute/manager.py * Make libvirt\_use\_virtio\_for\_bridges flag works for all drivers * xenapi: reduce polling interval for agent * xenapi: wait for agent resetnetwork response * Fix invalid exception format strings * General host aggregates part 2 * Update devref for general host aggregates * Cleanup consoles test cases * Return 409 error if get\_vnc\_console is called before VM is created * Move results filtering to db * Prohibit file injection writing to host filesystem * Added updated locations for iscsiadm * Check against unexpected method call * Remove deprecated use Exception.message * Remove temporary hack from checks\_instance\_lock * Remove temporary hack from wrap\_instance\_fault * Fix up some instance\_uuid usage * Update vmops to access metadata as dict * Improve external locking on Windows * Fix traceback when detaching volumes via EC2 * Update RPC code from common * Fixes parameter passing to tgt-admin for iscsi * Solve possible race in semaphor creation * Rename private methods of compute manager * Send full instance to compute live\_migration * Add underscore in front of post\_live\_migration * Send full instance to scheduler live\_migration * Send full instance to run\_instance * Use dict style access for image\_ref * Use explicit arguments in compute manager run\_instance * Remove topic from scheduler run\_instance * Use explicit args in run\_instance scheduler code * Update args to \_set\_vm\_state\_and\_notify * Reduce db access in prep\_resize in the compute manager * Remove instance\_id fallback from cast\_to\_compute\_host() * Remove unused InstanceInfo class * Adds per-user-quotas support for more detailed quotas management * Remove list\_instances\_detail from compute drivers * Move root\_helper deprecation warning into execute * Flavor extra specs extension use instance\_type id * Fix test\_resize\_xcp testcase - it never ran * tests: avoid traceback warning in test\_live\_migration * ensure\_tree calls mkdir -p * Only log deprecated config warnings once * Handle NetworkNotFound in \_shutdown\_instance * Drop AES functions and pycrypto dependency * Simplify file hashing * Allow loaded extensions to be checked from servers * Make extension aliases consistent * Remove old exception type * Fix test classes collision * Remove unused variables * Fix notification logic * Improve external lock implementation * maint: remove an unused import in libvirt.driver * Require eventlet >= 0.9.17 * Remove \*\*kwargs from prep\_resize in compute manager * Updates to the prep\_resize scheduler rpc call * Migrate a notifier patch from common: * Update list\_instances to catch libvirtError * Audit log messages in nova/compute/api.py * Rename \_self to self according to Python convention * import missing module time * Remove unused variables * Handle InstanceNotFound in libvirt list\_instances * Fix broken pep8 exclude processing * Update reset\_db to call setup if \_DB is None * Migrate a logging change from common: * Send 'create volume from snapshot' to the proper host * Fix regression with nova-manage floating list * Remove unused imports * Simple refactor of some db api tests * fix unmounting of LXC containers * Update usage of 'ip' to handle more return codes * Use function registration for policy checks * Check instance lock in compute/api * Fix a comment typo in db api * Audit log messages in nova/compute/manager.py * XenAPI: Add script to destroy cached images * Fix typo in db test * Fix issue with filtering where a value is unicode * Avoid using logging in signal handler * Fix traceback when using s3 * Don't pass kernel args to Xen HVM instances * Sync w/ latest openstack common log.py * Pass a full instance to rotate\_backups() * Remove agent\_update from the compute manager * Move tests.test\_compute\_utils into tests.compute * Send a full instance in terminate\_instance * maint: don't require write access when reading files * Fix get\_diagnostics RPC arg ordering * Fix failed iscsi tgt delete errors with new tgtadm * Deprecate root\_helper in favor of rootwrap\_config * Use instance\_get instead of instance\_by * Clarify TooManyInstances exception message * Setting root passwd no longer fails silently * XenAPI: Fix race-condition with cached images * Prevent instance\_info\_cache from being altered post instance * Update targets information when creating target * Avoid recursion from @refresh\_cache * Send a full instance in change\_instance\_metadata * Send a full instance in unrescue\_instance * Add check exit codes for vlans * Compute: Error out instance on rebuild and resize * Partially revert "Remove unused scheduler functions" * Use event.listen() instead of deprecated listeners kwarg * Avoid associating floating IP with two instances * Tidy up nova.image.glance * Fix arg to get\_instance\_volume\_block\_device\_info() * Send a full instance in snapshot\_instance * Send a full instance in set\_admin\_password * Send a full instance in revert\_resize * Send a full instance in rescue\_instance * Send a full instance in remove\_volume\_connection * Send a full instance in rollback\_live\_migration\_at\_destination * Send a full instance in resume\_instance * Send a full instance in resize\_instance * Send a full instance in reset\_network * Convert virtual\_interfaces to using instance\_uuid * Compute: VM-Mode should use instance dict * Fix image\_type=base after snapshot * Send a full instance in remove\_fixed\_ip\_from\_instance * Send a full instance in rebuild\_instance * Reverts fix lp1031004 * sync openstack-common log changes with nova * Set default keystone auth\_token signing\_dir loc * Resize.end now includes the correct instance\_type * Fix rootwrapper with tgt-admin * Use common parse\_isotime in GlanceImageService * Xen: VHD sequence validation should handle swap * Revert "Check for selinux before setting up selinux." * reduce debugging from utils.trycmd() * Avoid error during snapshot of ISO booted instance * Add a link from HACKING to wiki GitCommitMessages page * Instance cleanups from detach\_volumes * Check for selinux before setting up selinux * Prefer instance in reboot\_instance * maint: libvirt imagecache: remove redundant interpreter spec * Support external gateways in VLAN mode * Turn on base image cleanup by default * Make compute only auto-confirm its own instances * Fix state logic for auto-confirm resizes * Explicitly send primitive instances via rpc * Allow \_destroy\_vdis if a mapping has no VDI * Correct host count in instance\_usage\_audit\_log extension * Return location header on volume creation * Add persistent volumes for tgtd * xenapi: Use instance uuid when calling DB API * Fix HACKING violation in nova/api/openstack/volume/types.py * Remove ugly instance.\_rescue hack * Convert to using dict style key lookups in XenAPI * Implements notifications for more instance changes * Fix ip6tables support in xenapi bug 934603 * Moving where the fixed ip deallocation happens * Sanitize xenstore keys for metadata injection * Don't store system\_metadata in xenstore * use REDIRECT to forward local metadata request * Only enforce valid uuids if a uuid is passed * Send a full instance in pre\_live\_migration * Send a full instance in power\_on\_instance and start\_instance * Send a full instance in power\_off\_instance and stop\_instance * Make instance\_uuid backwards compat actually work * Send a full instance via rpc for post\_live\_migration\_at\_destination * Send a full instance via rpc for inject\_network\_info * Send a full instance via rpc for inject\_file * Send a full instance via rpc for get\_vnc\_console * Remove get\_instance\_disk\_info from compute rpcapi * Send a full instance via rpc for get\_diagnostics * Send a full instance via rpc for finish\_revert\_resize * Ensure instance is moved to ERROR on suspend failure * Avoid using 'is' operator when comparing strings * Revert "Add additional capabilities for computes" * Allow power\_off when instance doesn't exist * Fix resizing VDIs on XenServer >= 6 * Refactor glance image service code * Don't import libvirt\_utils in disk api * Call correct implementation for quota\_destroy\_all\_by\_project * Remove return values from some compute RPC methods * Reinstate instance locked error logging * Send a full instance via rpc for finish\_resize * Fix exception handling in libvirt attach\_volume() * Convert fixed\_ips to using instance\_uuid * Trim volume type representation * Fix a couple of PEP8 nits * Replace subprocess.check\_output with Popen * libvirt driver: set os\_type to support xen hvm/pv * Include architecture in instance base options passed to the scheduler * Fix typo of localhost's IP * Enhance nova-manage to set flavor extra specs * Send a full instance via rpc for detach\_volume * Remove unused methods from compute rpcapi * Send a full instance via rpc for confirm\_resize * Send a full instance via rpc for check\_can\_live\_migrate\_source * Send a full instance via rpc for check\_can\_live\_migrate\_destination * Remove unused scheduler functions * Send a full instance via rpc for attach\_volume * Send a full instance via rpc for add\_fixed\_ip\_to\_instance * Send a full instance via rpc for get\_console\_output * Send a full instance via rpc for suspend\_instance * Send a full instance via rpc for (un)pause\_instance * Don't use rpc to lock/unlock an instance * Convert reboot\_instance to take a full instance * Update decorators in compute manager * Include name in a primitive Instance * Shrink Simple Scheduler * Allow soft deletes from any state * Handle NULL deleted\_at in migration 112 * Add support for snapshots and volume types to netapp driver * Inject instance metadata into xenstore * Add missing tempfile import to libvirt driver * Fix docstring for SecurityGroupHandlerBase * Don't log debug auth token when using cinder * Remove temporary variable * Define cross-driver standardized vm\_mode values * Check for exception codes in openstack API results * Add missing parameters to novas cinder api * libvirt driver: set driver name consistently * Allow floating IP pools to be deleted * Fixes console/vmrc\_manager.py import error * EC2 DescribeImageAttribute by kernel/ramdisk * Xen: Add race-condition troubleshooting script * Return 400 in get\_console\_output for bad length * update compute\_fill\_first\_cost\_fn docstring * Xen: Validate VHD footer timestamps * Xen: Ensure snapshot is torn down on error * Provide rootwrap filters for nova-api-metadata * Fix a bug in compute\_node\_statistics * refactor all uses of the \`qemu-img info\` command * Xen: Fix snapshots when use\_cow=True * tests: remove misleading docstrings on libvirt tests * Update NovaKeystoneContext to use jsonutils * Use compute\_driver in vmware driver help messages * Use compute\_driver in xenapi driver help messages * Add call to get hypervisor statistics * Adds xcp disk resize support * Log snapshot UUID and not OpaqueRef * Remove unused user\_id and project\_id arguments * Fix wrong regex in cleanup\_file\_locks * Update jsonutils from openstack-common * Return 404 when attempting to remove a non-existent floating ip * Implements config\_drive as extension * use boto's HTTPResponse class for versions of boto >=2.5.2 * Migrations for deleted data for previously deleted instances * Add image\_name to create and rebuild notifications * Make it clear subnet\_bits is unused in ipam case * Remove unused add\_network\_to\_project() method * Adding networking rules to vm's on compute service startup * Avoid unrecognized content-type message * Updates migration 111 to work w/ Postgres * fixes for nova-manage not returning a full list of fixed IPs * Adds non\_inheritable\_image\_properties flag * Add git commit message validation to hacking.py * Remove unnecessary use of with\_lockmode * Improve VDI chain logging * Remove profane words * Adds logging for renaming and hardlinking * Don't create volumes if an incorrect size was given * set correct SELinux context for injected ssh keys * Fixes nova-manage fixed list with deleted networks * Move libvirt disk config setup out of main get\_guest\_config method * Refactor libvirt imagebackend module to reduce code duplication * Move more libvirt disk setup into the imagebackend module * Don't hardcode use of 'virtio' for root disk in libvirt driver * Ensure to use 'hdN' for IDE disk device in libvirt driver * Don't set device='cdrom' for all disks in libvirt driver * Move setup of libvirt disk cachemode into imagebackend module * Get rid of pointless 'suffix' parameter in libvirt imagebackend * Revert "Attach ISO as separate disk if given proper instruction" * Ensure VHDs in staging area are sequenced properly * Fix error in error handler in instance\_usage\_audit task * Fix SQL deadlock in quota reservations * Ensure 413 response for security group over-quota * fixes for nova-manage network list if network has been deleted * Allow NoMoreFloatingIps to bubble up to FaultWrapper * Fix cloudpipe keypair creation. Add pipelib tests * Don't let failure to delete filesystem block deletion of instances in libvirt * Static FaultWrapper status\_to\_type map * Make flavorextradata ignore deleted flavors * Tidy up handling of exceptions in floating\_ip\_dns * Raise NotImplementedError, not NotImplemented singleton * Fix the mis-use of NotImplemented * Update FilterSchedulerTestCase docstring * Remove unused testing.fake * Make snapshot work for stopped VMs * Split ComputeFilter up * Show all absolute quota limits in /limits * Info log to see which compute driver has loaded * Rename get\_lock() to \_get\_lock() * Remove obsolete line in host\_manager * improve efficiency of image transfer during migration * Remove unused get\_version\_from\_href() * Add debug output to RamFilter * Fixes bare-metal spawn error * Adds generic retries for build failures * Fix docstring typo * Fixes XenAPI driver import in vm\_vdi\_cleaner * Display key\_name only if keypairs extension is used * Fix EC2 CreateImage no\_reboot logic * Reject EC2 CreateImage for instance-store * EC2 DescribeImages reports correct rootDeviceType * Support EC2 CreateImage API for boot-from-volume * remove unused clauses[] variable * Partially implements blueprint xenapi-live-migration * Improved VM detection for bandwidth polling (XAPI) * Sync jsonutils from openstack-common * Adding granularity for quotas to list and update * Remove VDI chain limit for migrations * Refactoring required for blueprint xenapi-live-migration * Add the plugin framework from common; use and test * Catch rpc up to the common state-of-the-art * Support requested\_networks with quantum v2 * Return 413 status on over-quota in the native API * Fix venv wrapper to clean \*.pyc * Use all deps for tools/hacking.py tests in tox * bug 1024557 * General-host-aggregates part 1 * Attach ISO as separate disk if given proper instruction * Extension to show usage of limited resources in /limits response * Fix SADeprecationWarning: useexisting is deprecated * Fix spelling in docstrings * Fix RuntimeWarning nova\_manage not found * Exclude openstack-common from pep8 checks * Use explicit destination user in xenapi rsync call * Sync gettextutils fixes from openstack-common * Sync importutils from openstack-common * Sync cfg from openstack-common * Add SKIP\_WRITE\_GIT\_CHANGELOG to setup.py * Remove unnecessary logging from API * Sync a commit from openstack-common * Fix typo in docstring * Remove VDI chain limit for snapshots * Adds snapshot\_attached\_here contextmanager * Change base rpc version to 1.0 in compute rpcapi * Use \_lookup\_by\_name instead of \_conn.lookupByName * Use the dict syntax instead of attribute to access db objects * Raise HTTP 500 if service catalog is not json * Floating\_ip create /31,32 shouldn't silent error * Convert remaining network API casts to calls * network manager returns empty list, not raise an exception * add network creation call to network.api.API * overriden VlanManager.create\_networks must return a result * When over quota for floating ips, return HTTPRequestEntityTooLarge * Remove deprecated auth-related db code * Fix .mailmap to generate unique AUTHORS list * Imports base64 to fix xen file injection * Remove deprecated auth from GlanceImageService * Adds bootlocking to the xenserver suspend and resume * ensure libguestfs mounts are cleaned up * Making docs pretty! * allows setting accessIPvs to null via update call * Re-add nova.virt.driver import to xenapi driver * Always attempt to delete entire floating IP range * Adds network labels to the fixed ips in usages * only mount guest image once when injecting files * Remove unused find\_data\_files function in setup.py * Use compute\_api.get\_all in affinity filters * Refactors more snapshot code into vm\_utils * Clarifying which vm\_utils functions are private * Refactor instance\_usage\_audit. Add audit tasklog * Fixes api fails to unpack metadata using cinder * Remove deprecated auth docs * Raise Failure exception when setting duplicate other\_config key * Split xenapi agent code out to nova.virt.xenapi.agent * ensure libguestfs has completed before proceeding * flags documentation to deprecate connection\_type * refactor baremetal/proxy => baremetal/driver * refactor xenapi/connection => xenapi/driver * refactor vmwareapi\_conn => vmwareapi/driver * Don't block instance delete on missing block device volume * Adds diagnostics command for the libvirt driver * associate\_floating\_ip an ip already in use * When deleting an instance, avoid freakout if iscsi device is gone * Expose over-quota exceptions via native API * Fix snapshots tests failing bug 1022670 * Remove deprecated auth code * Remove deprecated auth-related api extensions * Make pep8 test work on Mac * Avoid lazy-loading errors on instance\_type * Fetch kernel/ramdisk images directly * Ignore failure to delete kernel/ramdisk in xenapi driver * Boot from volume for Xen * Fix 'instance %s: snapshotting' log message * Fix KeyError 'key\_name' when KeyPairExists raised * Propagate setup.py change from common * Properly name openstack.common.exception * Janitorial: Catch rpc up with a change in common * Make reboot work for halted xenapi instances * Removed a bunch of cruft files * Update common setup code to latest * fix metadata file injection with xen * Switch to common notifiers * Implements updating complete bw usage data * Fix rpc import path in nova-novncproxy * This patch stops metadata from being deleted when an instance is deleted * Set the default CPU mode to 'host-model' for Libvirt KVM/QEMU guests * Fallback to fakelibvirt in test\_libvirt.py test suite * Properly track VBD and VDI connections in xenapi fake * modify hacking.py to not choke on the def of \_() * sort .gitignore for readability * ignore project files for eclipse/pydev * Add checks for retrieving deleted instance metadata for notification events * Allow network\_uuids that begin with a prefix * Correct typo in tools/hacking.py l18n -> i18n * Add \*.egg\* to .gitignore * Remove auth-related nova-manage commands * Remove unnecessary target\_host flag in xenapi driver tests * Remove unnecessary setUp() method in xenapi driver tests * Finish AUTHORS transition * Don't catch & ignore exceptions when setting up LXC container filesystems * Ensure system metadata is sent on new image creation * Distinguish over-quota for volume size and number * Assign service\_catalog in NovaKeystoneContext * Fix some hacking violations in the quantum tests * Fix missing nova.log change to nova.openstack.common.log * Add Cinder Volume API to Nova * Modifies ec2/cloud to be able to use Cinder * Fix nova-rpc-zmq-receiver * Drop xenapi session.get\_imported\_xenapi() * Fix assertRaises(Exception, ...) HACKING violation * Make possible to store snapshots not in /tmp directory * Prevent file injection writing to host filesystem * Implement nova network API for quantum API 2.0 * Expand HACKING with commit message guidelines * Add ServiceCatalog entries to enable Cinder usage * Pass vdi\_ref to fake.create\_vbd() not a string * Switch to common logging * use import\_object\_ns for compute\_driver loading * Add compatibility for CPU model config with libvirt < 0.9.10 * Sync rpc from openstack-common * Redefine the domain's XML on volume attach/detach * Sync jsonutils from openstack-common * Sync iniparser from openstack-common * Sync latest importutils from openstack-common * Sync excutils from openstack-common * Sync cfg from openstack-common * Add missing gettextutils from openstack-common * Run hacking tests as part of the gate * Remove duplicate volume\_id * Make metadata content match the requested version of the metadata API * Create instance in DB before block device mapping * Get hypervisor uptime * Refactoring code to kernel Dom0 plugin * Ability to read deleted system metadata records * Add check for no domains in libvirt driver * Remove passing superfluous read\_deleted argument * Flesh out the README file with a little more useful information * Remove unused 'get\_open\_port' method from libvirt utils * deallocate\_fixed\_ip attempts to update deleted ip * Dom0 plugin now returns data in proper format * Add PEP8 checks back for Dom0 plugins * Add missing utils declaration to RPM spec * Fixes bug 1014194, metadata keys are incorrect for kernel-id and ramdisk-id * Clean up cruft in nova.image.glance * Retry against different Glance hosts * Fix some import ordering HACKING violations * Deal with unknown instance status * OS API should return SHUTOFF, not STOPPED * Implement blueprint ec2-id-compatibilty * Add multi-process support for API services * Allow specification of the libvirt guest CPU model per host * Refactor Dom0 Glance plugin * Switch libvirt get\_cpu\_info method over to use config APIs * Remove tpool stub in xenapi tests * Use setuptools-git plugin for MANIFEST * Remove duplicate check of server\_dict['name'] * Add missing nova-novncproxy to tarballs * Add libvirt config classes for handling capabilities XML doc * Refactor libvirt config classes for representing CPU models/features * Fix regression in test\_connection\_to\_primitive libvirt testcase * Rename the instance\_id column in instance\_info\_caches * Rename GlanceImageService.get to download * Use LOG.exception instead of logging.exception * Align run\_tests.py pep8 with tox * Add hypervisor information extension * Remove GlanceImageService.index in favor of detail * Swap VDI now uses correct name label * Remove image service show\_by\_name method * Cleanup of image service code * Adds default fall-through to the multi scheduler. Fixes bug 1009681 * Add missing netaddr import * Make nova list/show behave nicely on instance\_type deletion * refactor libvirt from connection -> driver * Switch to using new config parsing for vm\_vdi\_cleaner.py * Adds missing 'owner' attribute to image * Ignore floatingIpNotAssociated during disassociation * Avoid casts in network manager to prevent races * Stop nova\_ipam\_lib from changing the timeout setting * Remove extra DB calls for instances from OS API extensions * Allow single uuid to be specified for affinity * Fix invalid variable reference * Avoid reset on hard reboot if not supported * Fix several PEP-8 issues * Allow access to metadata server '/' without IP check * Fix db calls for snaphsot and volume mapping * Removes utils.logging\_error (no longer used) * Removes utils.fetch\_file (no longer used) * Improve filter\_scheduler performance * Remove unnecessary queries for network info in notifications * Re-factor instance DB creation * Fix hacking.py failures.. * fix libvirt get\_memory\_mb\_total() with xen * Migrate existing routes from flat\_interface * Add full test environment * Another killfilter test fix for Fedora 17 * Remove unknown shutdown kwarg in call to vmops.\_destroy * Refactor vm\_vdi\_cleaner.py connection use * Remove direct access to glance client * Fix import order of openstack.common * metadata: cleanup pubkey representation * Make tgtadm the default iscsi user-land helper * Move rootwrap filters definition to config files * Fixes ram\_allocation\_ratio based over subscription * Call libvirt\_volume\_driver with right mountpoint * XenAPI: Fixes Bug 1012878 * update refresh\_cache on compute calls to get\_instance\_nw\_info * vm state and task state management * Update pylint/pep8 issues jenkins job link * Addtional CommandFilters to fix rootwrap on SLES * Tidy up exception handling in contrib api consoles * do sync before fusermount to avoid busyness * Fix bug 1010581 * xenapi tests: changes size='0' to size=0 * fixes a bug in xenapi tests where a string should be int * Minor HACKING.rst exception fix * Make libvirt LoopingCalls actually wait() * Add instance\_id in Usage API response * Set libvirt\_nonblocking to true by default for Folsom * Admin action to reset states * Use rpc from openstack-common * add nova-manage bash completion script * Spelling fixes * Fix bug 1014925: fix os-hosts * Adjust the libvirt config classes' API contract for parsing * Move libvirt version comparison code into separate function helper * Remove two obsolete libvirt cheetah templates from MANIFEST.in * Propose nova-novncproxy back into nove core * Fix missing import in compute/utils.py * Add instance details to notifications * Xen Storage Manager: tests for xensm volume driver * SM volume driver: DB changes and tests * moved update cache functionality to the network api * Handle missing server when getting security groups * Imports cleanup * added deprecated.warn helper method * Enforce an instance uuid for instance\_test\_and\_set * Replaces functions in utils.py with openstack/common/timeutils.py * Add CPU arch filter scheduler support * Present correct ec2id format for volumes and snaps * xensm: Fix xensm volume driver after uuid changes * Cleanup instance\_update so it only takes a UUID * Updates the cache * Add libvirt min version check * Ensure dnsmasq accept rules are preset at startup * Re-add private \_compute\_node\_get call to sql api * bug #996880 change HostNotFound in hosts to HTTPNotFound * Unwrap httplib.HTTPConnection after WsgiLimiterProxyTest * Log warnings instead of full exceptions for AMQP reconnects * Add missing ack to impl\_qpid * blueprint lvm-disk-images * Remove unused DB calls * Update default policies for KVM guest PIT & RTC timers * Add support for configuring libvirt VM clock and timers * Dedupe native and EC2 security group APIs * Add two missing indexes for instance\_uuid columns * Revert "Fix nova-manage backend\_add with sr\_uuid" * Adds property to selectively enable image caching * Remove utils.deprecated functions * Log connection\_type deprecation message as WARNING * add unit tests for new virt driver loader * Do not attempt to kill already-dead dnsmasq * Only invoke .lower() on non-None protocols * Add indexes to new instance\_uuid columns * instance\_destroy now only takes a uuid * Do not always query deleted instance\_types * Rename image to image\_id * Avoid partially finished cache files * Fix power\_state mis-use bug 1010586 * Resolve unittest error in rpc/impl\_zmq * Fix whitespace in sqlite steps * Make eventlet backdoor play nicer with gettext * Add user\_name project\_name and color option to log * fixes bug 1010200 * Fixes affinity filters when hints is None * implement sql-comment-string stack traces * Finalize tox config * Fixes bug lp:999928 * Convert consoles to use instance uuid * Use OSError instead of ProcessExecutionError * Replace standard json module with openstack.common.jsonutils * Don't query nova-network on startup * Cleans up power\_off and power\_on semantics * Refactor libvirt create calls * Fix whitespace in sqlite steps * Update libvirt imagecache to support resizes * separate Metadata logic away from the web service * Fix bug 1006664: describe non existent ec2 keypair * Make live\_migration a first-class compute API * Add zeromq driver. Implements blueprint zeromq-rpc-driver * Fix up protocol case handling for security groups * Prefix all nova binaries with 'nova-' * Migrate security\_group\_instance\_association to use a uuid to refer to instances * Migrate instance\_metadata to use a uuid to refer to instances * Adds \`disabled\` field for instance-types * More meaningful help messages for libvirt migration options * fix the instance quota overlimit message * fix bug lp:1009041,add option "-F" to make mkfs non-interactive * Finally ack consumed message * Revert "blueprint " * Use openstack-common's policy module * Use openstack.common.cfg.CONF * bug #1006094 correct typo in addmethod.openstackapi.rst * Correct use of uuid in \_get\_instance\_volume\_bdm * Unused imports cleanup (folsom-2) * Quantum Manager disassociate floating-ips on instance delete * defensive coding against None inside bdm resolves bug 1007615 * Add missing import to quantum manager * Add a comment to rpc.queue\_get\_for() * Add shared\_storage\_test methods to compute rpcapi * Add get\_instance\_disk\_info to the compute rpcapi * Add remove\_volume\_connection to the compute rpcapi * blueprint * Implements resume\_state\_on\_host\_boot for libvirt * Fix libvirt rescue to work with whole disk images * Finish removing xenapi.HelperBase class * Remove network\_util.NetworkHelper class * Remove volume\_util.VolumeHelper class * Remove vm\_utils.VMHelper class * Start removing unnecessary classes from XenAPI driver * XenAPI: Don't hardcode userdevice for VBDs * convert virt drivers to fully dynamic loading * Add compare\_cpu to the compute rpcapi * Add get\_console\_topic() to the compute rpcapi * Add refresh\_provider\_fw\_rules() to compute rpcapi * Use compute rpcapi in nova-manage * Add post\_live\_migration\_at\_destination() to compute rpcapi * Add pre\_live\_migration() to the compute rpcapi * Add rollback\_live\_migration\_at\_destination() to compute rpcapi * Add finish\_resize() to the compute rpcapi * Add resize\_instance() to the compute rpcapi * Add finish\_revert\_resize() to the compute rpcapi * Add get\_console\_pool\_info() to the compute rpcapi * Fix destination host for remove\_volume\_connection * Don't deepcopy RpcContext * Remove resize function from virt driver * Cleans up extraneous volume\_api calls * Remove list\_disks/list\_interfaces from virt driver * Remove duplicate words in comments * Implement blueprint host-topic-matchmaking * Remove unnecessary setting of XenAPI module attribute * Prevent task\_state changes during VERIFY\_RESIZE * Eliminate a race condition on instance deletes * Make sure an exception is logged when config file isn't found * Removing double quotes from sample config file * Backslash continuation removal (Nova folsom-2) * Update .gitignore * Add a note on why quota classes are unused in Nova * Move queue\_get\_for() from db to rpc * Sample config file tool updates * Fix instance update notification publisher id * Use cfg's new global CONF object * Make xenapi fake match real xenapi a bit closer * Align ApiEc2TestCase to closer match api-paste.ini * Add attach\_time for EC2 Volumes * fixing issue with db.volume\_update not returning the volume\_ref * New RPC tests, docstring fixes * Fix reservation\_commit so it works w/ PostgreSQL * remove dead file nova/tests/db/nova.austin.sqlite * Fix the conf argument to get\_connection\_pool() * Remove Deprecated auth from EC2 * Revert "API users should not see deleted flavors." * Grammar fixes * Record instance architecture types * Grammar / spelling corrections * cleanup power state (partially implements bp task-management) * [PATCH] Allow [:print:] chars for security group names * Add scheduler filter for trustedness of a host * Remove nova.log usage from nova.rpc * Remove nova.context dependency from nova.rpc * \_s3\_create update only pertinent metadata * Allow adding fixed IPs by network UUID * Fix a minor spelling error * Run coverage tests via xcover for jenkins * Localize rpc options to rpc code * clean-up of the bare-metal framework * Use utils.utcnow rather than datetime.utcnow * update xen to use network\_model * fixes bug 1004153 * Bugfix in simple\_tenant\_usage API detail view * removed a dead db function register\_models() * add queue name argument to TopicConsumer * Cleanup tools/hacking using flake8 * Expose a limited networks API for users * Added a instance state update notification * Remove deprecated quota code * Update pep8 dependency to v1.1 * Nail pep8 dependencies to 1.0.1 * API users should not see deleted flavors * Add scheduler filter: TypeAffinityFilter * Add help string to option 'osapi\_max\_request\_body\_size' * Permit deleted instance types to be queried for active instances * Make validate\_compacted\_migration into general diff tool * Remove unused tools/rfc.sh * Finish quota refactor * Use utils.parse\_strtime rather than datetime.strptime * Add version to compute rpc API * Add version to scheduler rpc API * Add version to console rpc API * Remove wsgiref from requirements * More accurate rescue mode testing for XenAPI * Add tenant id in self link in /servers call for images * Add migration compaction validation tool * Enable checking for imports in alphabetical order * Include volume-usage-audit in tarballs * Fix XenServer diagnostics to provide correct details * Use cfg's new behavior of reset() clearing overrides * Sync with latest version of openstack.common.cfg * Only permit alpha-numerics and .\_- for instance type names * Use memcache to store consoleauth tokens * cert/manager.py not using crypto.fetch\_crl * Cleanup LOG.getLoggers to use \_\_name\_\_ * Imported Translations from Launchpad * Alphabetize imports in nova/tests/ * Fix Multi\_Scheduler to process host capabilities * fixed\_ip\_get\_by\_address read\_deleted from context * Fix for Quantum LinuxBridge Intf driver plug call * Add additional logging to compute filter * use a RequestContext object instead of context module * make get\_all\_bw\_usage() signature match for fake virt driver * Add unit test coverage for bug 1000261 * Moving network tests into the network folder * Add version to consoleauth rpc API * Add version to the cert rpc API * Add base support for rpc API versioning * fixes typo that completely broken Quantum/Nova integration * Make Iptables FW Driver handle dhcp\_server None * Add aliases to .mailmap for comstud and belliott * Add eventlet backdoor to facilitate troubleshooting * Update nova's copy of image metadata on rebuild * Optional timeout for servers stuck in build * Add configurable timeout to Quantum HTTP connections * Modify vm\_vdi\_cleaner to handle \`-orig\` * Add \_\_repr\_\_ to least\_cost scheduler * Bump XenServer plugin version * handle updated qemu-img info output * Rearchitect quota checking to partially fix bug 938317 * Add s3\_listen and s3\_listen\_port options * Misused and not used config options * Remove XenAPI use of eventlet tpool * Fixed compute periodic task. Fixes bug 973331 * get instance details results in volumes key error * Fix bug 988034 - Quantum Network Manager - not clearing ips * Stop using nova.exception from nova.rpc * Make use of openstack.common.jsonutils * Alphabetize imports in nova/api/ * Remove unused \_get\_target code from xenapi * Implement get\_hypervisor\_hostname for libvirt * Alphabetize imports * Alphabetize imports in nova/virt/ * Adding notifications for volumes * Pass 'nova' project into ConfigOpts * fixes bug 999206 * Create an internal key pair API * Make allocation failure a bit more friendly * Avoid setting up DHCP firewall rules with FlatManager * Migrate missing license info * Imported Translations from Launchpad * Fix libvirt Connection.get\_disks method * Create a utf8 version of the dns\_domains table * Setup logging, particularly for keystone middleware * Use default qemu-img cluster size in libvirt connection driver * Added img metadata validation. Fixes bug 962117 * Remove unnecessary stubout\_loopingcall\_start * Actually use xenapi fake setter * Provide a transition to new .info files * Store image properties with instance system\_metadata * Destroy system metadata when destroying instance * Fix XenServer windows agent issue * Use ConfigOpts.find\_file() to find paste config * Remove instance Foreign Key in volumes table, replace with instance\_uuid * Remove old flagfile support * Removed unused snapshot\_instance method * Report memory correctly on Xen. Fixes bug 997014 * Added image metadata to compute.instance.exists * Update PostgreSQL sequence names for zones/quotas * Minor help text related changes * API does need new image\_ref on rebuild immediately * Avoid unnecessary inst lookup in vmops \_shutdown * implement blueprint floating-ip-notification * Defer image\_ref update to manager on rebuild * fix bug 977007,make nova create correct size of qcow2 disk file * Remove unnecessary shutdown argument to \_destroy() * Do not fail on notify when quantum and melange are out of sync * Remove instance action logging mechanism * httplib throw "TypeError: an integer is required" when run quantum * fix bug 992008, we should config public interface on compute * A previous patch decoupled the RPC drivers from the nova.flags, breaking instance audit usage in the process. This configures the xvpvncproxy to configure the RPC drivers properly with FLAGS so that xvpvncproxy can run * Fix bug 983206 : \_try\_convert parsing string * pylint cleanup * Fix devref docs * Remove Deprecated AuthMiddleware * Allow sitepackages on jenkins * Replaces exceptions.Error with NovaException * Docs for vm/task state transitions * Fix a race with rpc.register\_opts in service.py * Mistake with the documentation about cost function's weight corrected * Remove state altering in live-migration code * Register fake flags with rpc init function * Generate a Changelog for Nova * Find context arg by type rather than by name * Default auto-increment for int primary key columns * Adds missing copyright to migration 082 * Add instance\_system\_metadata modeling * Use fake\_libvirt\_utils for libvirt console tests * Fix semantics for migration test environment var * Clean up weighted\_sum logic * Use ConfigOpts.find\_file() to locate policy.json * Sync to newer openstack.common.cfg * Fix test\_mysql\_innodb * Implement key pair quotas * Ensure that the dom0 we're connected to is the right one * Run ip link show in linux\_net.\_device\_exists as root * Compact pre-Folsom database migrations * Remove unused import * Pass context to notification drivers when we can * Use save\_and\_reraise\_exception() from common * Fix innodb tests again * Convert Volume and Snapshot IDs to use UUID * Remove unused images * Adding 'host' info to volume-compute connection information * Update common.importutils from openstack-common * Provide better quota error messages * Make kombu support optional for running unit tests * Fix nova.tests.test\_nova\_rootwrap on Fedora 17 * Xen has to create it's own tap device if using libvirt and QuantumLinuxBridgeVIFDriver * Fix test\_migrations to work with python 2.6 * Update api-paste.ini to remove unused settings * Fix test\_launcher\_app to ensure service actually got started * Minor refactor of servers viewbuider * A previous patch decoupled the RPC drivers from the nova.flags, breaking instance audit usage in the process. This configures the instance audit usage to configure the RPC drivers properly with FLAGS so that the job can run * Allow blank passwords in changePassword action * Allow blank adminPass on server create * Return a BadRequest on bad flavors param values * adjust logging levels for utils.py * Update integration tests to listen on 127.0.0.1 * Log instance consistently * Create name\_label local variable for logging message * Remove hack for xenapi driver tests * Migrate block\_device\_mapping to use instance uuids * Remove unnecessary return statements * Clean up ElementTree usage * Adds better bookending and robustness around the instance audit usage generation * Pass instance to resize\_disk() to fix exception * Minor spelling fix * Removes RST documentation and moves it to openstack-manuals * Trivial spelling fix * Remove workaround for sqlalchemy-migration < 0.6.4 * Remove unnecessary references to resize\_confirm\_window flag * Fix InnoDB migration bug in migrate script 86 * Use openstack.common.importutils * Ignore common code in coverage calculations * Use additional task states during resize * Add libvirt get\_console\_output tests: pty and file * Keep uuid with bandwidth usage tracking to handle the case where a MAC address could be recycled between instances * Added the validation for name check for rebuild of a server * Make KillFilter to handle 'deleted' w/o rstrip * Fix instance delete notifications * Disconnect stale instance VDIs when starting nova-compute * Fix timeout in EC2 CloudController.create\_image() * Add additional capabilities for computes * Move image checksums into a generic file * Add instance to several log messages * Imports to human alphabetical order * Fixes bug 989271, fixes launched\_at date on notifications * Enable InnoDB checking * make all mysql tables explicitly innodb * Use instance\_get\_by\_uuid since we're looking up a UUID * Use nova\_uuid attribute instead of trying to parse out name\_label * Add a force\_config\_drive flag * Fix 986922 * Improvement for the correct query extraction * Fixes bug 983024 * Make updating hostId raises BadRequest * Disallow network creation when label > 255. Fixes bug 965008 * Introduced \_atomic\_restart\_dhcp() Fixes Bug 977875 * Make the filename that image hashes are written to configurable * Xen: Pass session to destroy\_vdi * Add instance logging to vmware\_images.py * Add instance logging to vmops.py * fix bug #980452 set net.ipv4.ip\_forward=1 on network * Log instance * Log instance information for baremetal * Include instance in log message * Log instance * Ensure all messages include instance * Add instance to log messages * Include instance in log message * Refactor nova.rpc config handling * Don't leak RPC connections on timeouts or other exceptions * Small cleanup to attach\_volume logging * Implements EC2 DescribeAddresses by specific PublicIp * Introduced flag base\_dir\_name. Fixes bug 973194 * Set a more reasonable default RPC thread pool size * Number of missing imports should always be shown * Typo fix in bin/instance-usage-audit * Improved tools/hacking.py * Scope coverage report generation to nova module * Removes unnecessary code in \_run\_instance * Validate min\_ram/min\_disk on rebuild * Adding context to usage notifications * Making \`usage\_from\_instance\` private * Remove \_\_init\_\_.py from locale dir * Fixes bug 987335 * allow power state "BLOCKED" for live migrations if using Xen by libvirt * Exclude xenapi plugins from pep8/hacking checks * Imported Translations from Launchpad * Remove unnecessary power state translation messages * Add instance logging * Use utils.save\_and\_reraise\_exception * Removing XenAPI class variable, use session instead * Log instance consistently * Keep nova-manage commands sorted * Log instances consistently * Moves \`usage\_from\_instance\` into nova.compute.utils * Log instance * nova.virt.xenapi\_conn -> nova.virt.xenapi.connection * Remove unused time keyword arg * Remove unused variable * support a configurable libvirt injection partition * Refactor instance image property inheritance out to a method * Refactor availability zone handling out to a method * Include name being searched for in exception message * Be more tolerant of deleting failed builds * Logging updates in IptablesFirewallDriver * Implement security group quotas * Do not allow blank adminPass attribute on set password * Make rebuilds with an emtpy name raise BadRequest * Updates launched\_at in the finish and revert\_migration calls * Updated instance state on resize error * Reformat docstrings in n/c/a/o/servers as per HACKING * fix bug 982360, multi ip block for dmz\_cidr * Refactor checking instance count quota * Small code cleanup for config\_disk handling * Refactors kernel and ramdisk handling into their own method * Improve instance logging in compute/manager * Add deleted\_at to instance usage notification * Simplify \_get\_vm\_opaque\_ref in xenapi driver * Test unrescue works as well * Remove unused variable * Port types and extra specs to volume api * Make exposed methods clearer in xenapi.vmops * Fix error message to report correct operation * Make run\_tests.sh just a little bit less verbose * Log more information when sending notifications * xenapi\_conn -> xenapi.connection * Renamed current\_audit\_period function to last\_completed\_audit\_period to clarify its purpose * QuantumManager will start dnsmasq during startup. Fixes bug 977759 * Fixed metadata validation err. Fixes bug 965102 * Remove python-novaclient dependency from nova * Extend instance UUID logging * Remove references to RemoteError in os-networks * Fix errors in os-networks extension * Removes dead code around start\_tcp in Server * Improve grammar throughout nova * Improved localization testing * Log kwargs on a failed String Format Operation * Standardize quota flag format * Remove nova Direct API * migration\_get\_all\_unconfirmed() now uses lowercase "finished" Fixes bug 977719 * Run tools/hacking.py instead of pep8 mandatory * Delete fixed\_ips when network is deleted * Remove unecessary --repeat option for pep8 * Create compute.api.BaseAPI for compute APIs to use * Give all VDIs a reasonable name-label and name-description * Remove last two remaining hyperV references * bug 968452 * Add index to fixed\_ips.address * Use 'root' instead of 'os' in XenAPI driver * Information about DifferentHostFilter and SameHostFilter added * HACKING fixes, sqlalchemy fix * Add test to check extension timestamps * Fixes bug 952176 * Update doc to mention nova tool for type creation * Change Diablo document reference to trunk * Imported Translations from Launchpad * Cloudpipe tap vpn not always working * Allow instance logging to use just a UUID * Add the serialization of exceptions for RPC calls * Cleanup xenapi driver logging messages to include instance * Stop libvirt test from deleting instances dir * Move product\_version to XenAPISession * glance plugin no longer takes num\_retries parameter * Remove unused user\_id and project\_id parameters to fetch\_image() * Cleanup \_make\_plugin\_call() * Push id generation into \_make\_agent\_call() * Remove unused path argument for \_make\_agent\_call() * Remove unused xenstore methods * Combine call\_xenapi and call\_xenapi\_request * Fixed bug 962840, added a test case * Use -1 end-to-end for unlimited quotas * fix bug where nova ignores glance host in imageref * Remove unused \_parse\_xmlrpc\_value * Fix traceback in image cache manager * Fixes regression in release\_dhcp * Use thread local storage from openstack.common * Extend FilterScheduler documentation * Add validation on quota limits (negative numbers) * Get unit tests functional in OS X * Make sure cloudpipe extension can retrieve network * Treat -1 quotas as unlimited * Auto-confirming resizes would bail on exceptions * Grab the vif directly on release instead of lookup * Corrects an AttributeError in the quota API * Allow unprivileged RADOS users to access rbd volumes * Remove nova.rpc.impl\_carrot * Sync openstack.common.cfg from openstack-common * add libvirt\_inject\_key flag fix bug #971640 * Do not fail to build a snapshot if base image is not found * fix TypeError with unstarted threads in nova-network * remove unused flag: baremetal\_injected\_network\_template baremetal\_uri baremetal\_allow\_project\_net\_traffic * Imported Translations from Launchpad * fixed postgresql flavor-create * Add rootwrap for touch * Ensure floating ips are recreated on reboot * Handle instances being missing while listing floating IPs * Allow snapshots in error state to be deleted * Ensure a functional database connection * Add a faq to vnc docs * adjust logging levels for linux\_net * Handle not found in check for disk availability * Acccept metadata ip so packets aren't snatted * bug 965335 * Export user id as password to keystone when using noauth * Check that DescribeInstance works with deleted image * Check that volume has no snapshots before deletion * Fix libvirt rescue * Check vif exists before releasing ip * Make kombu failures retry on IOError * Adds middleware to limit request body sizes * Add validation for OSAPI server name length * adjust logging levels for libvirt error conditions * Fix exception type in \_get\_minram\_mindisk\_params * fixed bug lp:968019 ,fix network manager init floating ip problem * When dnsmasq fails to HUP log an error * Update KillFilter to handle 'deleted' exe's * Fix disassociate query to remove foreign keys * Touch in use image files when they're checked * Base image signature files are not images * Support timestamps as prefixes for traceback log lines * get\_instance\_uuids\_by\_ip\_filter to QM * Updated docstrings in /tools as per HACKING * Minor xenapi driver cleanups * Continue on the the next tenant\_id on 400 codes * Fix marker behavior for flavors * Remove auth\_uri, already have auth\_host, auth\_port * A missing checksum does not mean the image is corrupt * Default scheduler to spread-first * Reduce the image cache manager periodic interval * Handle Forbidden and NotAuthenticated glance exc * Destroy src and dest instances when deleting in RESIZE\_VERIFY * Allow self-referential groups to be created * Fix unrescue in invalid state * Clean up the shared storage check (#891756) * Don't set instance ACTIVE until it's really active * Fix traceback when sending invalid data * Support sql\_connection\_debug to get SQL diagnostic information * Improve performance of safe\_log() * Fix 'nova-manage config convert' * Add another libvirt get\_guest\_config() test case * Fix libvirt global name 'xml\_info' is not defined * Clean up read\_deleted support in host aggregates code * ensure atomic manipulation of libvirt disk images * Import recent openstack-common changes * makes volume versions display properly * Reordered the alphabet * Add periodic\_fuzzy\_delay option * Add a test case for generation of libvirt guest config * Convert libvirt connection class to use config APIs for CPU comparisons * Introduce a class for storing libvirt CPU configuration * Convert libvirt connection class to use config APIs for guests * Convert libvirt connection class to use config APIs for filesystem devices * Introduce a class for storing libvirt snapshot configuration * Move NIC devices back after disk devices * Convert libvirt connection class to use config APIs for disk devices * Convert libvirt connection class to use config APIs for input devices * Convert libvirt connection class to use config APIs for serial/console devices * Convert libvirt connection class to use config APIs for graphics * Convert libvirt vif classes over to use config API * Convert libvirt volume classes over to use config API * Delete the test\_preparing\_xml\_info libvirt test * Introduce a set of classes for storing libvirt guest configuration * Send a more appropriate error response for 403 in osapi * Use key in locals() that actually exists * Fix launching of guests where instances\_path is on GlusterFS * Volumes API now uses underscores for attrs * Remove unused certificate SQL calls * Assume migrate module missing \_\_version\_\_ is old * Remove tools/nova-debug * Inlining some single-use methods in XenAPI vmops * Change mycloud.com to example.com (RFC2606) * Remove useless dhcp\_domain flags in EC2 * Handle correctly QuotaError in EC2 API * Avoid unplugging VBDs for rescue instances * Imported Translations from Launchpad * Rollback create\_disks handles StorageError exception * Capture SIGTERM and Shut down python services cleanly * Fixed status validation. Fixes bug 960884 * Clarify HACKING's shadow built-in guidance * Strip auth token from log output * Fail-fast for invalid read\_deleted values * Only shutdown rescue instance if it's not already shutdown * Modify nova.wsgi.start() should check backlog parameter * Fix unplug\_vbd to retry a configurable number of times * Don't send snapshot requests through the scheduler * Implement quota classes * Fixes bug 949038 * Open Folsom * Fixes bug 957708 * Improvements/corrections to vnc docs * Allow rate limiting to be disabled via flag * Improve performance of generating dhcp leases * Fix lxc console regression * Strip out characters that should be escaped from console output * Remove unnecessary data from xenapi test * Correct accessIPv6 error message * Stop notifications from old leases * Fix typo in server diagnostics extension * Stub-implement floating-ip functions on FlatManager * Update etc/nova.conf.sample for ship * Make sqlite in-memory-db usable to unittest * Fix run/terminate race conditions * Workaround issue with greenthreads and lockfiles * allow the compute service to start with missing libvirt disks * Destroy rescue instance if main instance is destroyed * Tweak security port validation for ICMP * Debug messages for host filters * various cleanups * Remove Virtual Storage Array (VSA) code * Re-instate security group delete test case * db api: Remove check for security groups reference * Allow proper instance cleanup if state == SHUTOFF * Use getLogger for nova-all * Stop setting promisc on bridge * Fix OpenStack Capitalization * Remove improper use of redirect for hairpin mode * Fix OpenStack Capitalization * HACKING fixes, TODO authors * Keep context for logging intact in greenthreads * fix timestamps to match documented ec2 api * Include babel.cfg in tarballs * Fix LXC volume attach issue * Make extended status not admin-only by default * Add ssl and option to pass tenant to s3 register * Remove broken bin/\*spool\* tools * Allow errored volumes to be deleted * Fix up docstring * libvirt/connection.py: Set console.log permissions * nonblocking libvirt mode using tpool * metadata speed - revert logic changes, just caching * Refix mac change to work around libvirt issue * Update transfer\_vhd to handle unicode correctly * Fixes bug 954833 By adding the execute bit to the xenhost xenapi plugin * Cleanup flags * fix bug 954488 * Fix backing file cp/resize race condition * Use a FixedIp subquery to find networks by host * Changes remove\_fixed\_ip to pass the instance host * Map image ids to ec2 ids in metadata service * Remove date\_dhcp\_on\_disassociate comment and docs * Make fixed\_ip\_disassociate\_all\_by\_timeout work * Refactor glance id<->internal id conversion for s3 * Sort results from describe\_instances in EC2 API * virt/firewall: NoopFirewallDriver::instance\_filter\_exists must return True * fix nova-manage floating delete * fixed list warn when ip allocated to missing inst * Removes default use of obsolete ec2 authorizor * Additional extensions no longer break unit-tests * Use cPickle and not just pickle * Move (cast|call)\_compute\_message methods back into compute API class * Fix libvirt get\_console\_output for Python < 2.7 * doc/source/conf.py: Fix man page building * Update floating auto assignment to use the model * Make nova-manage syslog check /var/log/messages * improve speed of metadata * Fix linux\_net.py interface-driver loading * Change default of running\_deleted\_instance\_action * Nuke some unused SQL api calls * Avoid nova-manage floating create /32 * Add a serializer for os-quota-sets/defaults * Import nova.exception so exception can be used * refactoring code, check connection in Listener. refer to Bug #943031 * Fix live-migration in multi\_host network * add convert\_unicode to sqlalchemy connection arguments * Fixes xml representation of ext\_srv\_attr extension * Sub in InstanceLimitExceeded in overLimit message * Remove update lockmode from compute\_node\_get\_by\_host * Set 'dhcp\_server' in \_teardown\_network\_on\_host * Bug #922356 QuantumManager does not initiate unplug on the linux\_net driver * Clean up setup and teardown for dhcp managers * Display owner in ec2 describe images * EC2 KeyName validation * Fix issues with security group auths without ports * Replaced use of webob.Request.str\_GET * Allow soft\_reboot to work from more states: * Make snapshots with qemu-img instead of libvirt * Use utils.temporary\_chown to ensure permissions get reset * Add VDI chain cleanup script * Reduce duplicated code in xenapi * Since 'net' is of nova.network.model.VIF class and 'ips' is an empty list, net needs to be pulled from hydrated nw\_info.fixed\_ips(), and appended to ips * Fix nova-manage backend\_add with sr\_uuid * Update values in test\_flagfile to be different * Switch all xenapi async plugin calls to be sync * Hack to fixup absolute pybasedir in nova.conf.sample * fixup ldapdns default config * Use cache='none' for all disks * Update cfg from openstack-common * Add pybasedir and bindir options * Simply & unify console handling for libvirt drivers * Cleanup XenAPI tests * fix up nova-manage man page * Don't use glance when verifying images * Fixes os-volume/snapshot delete * Use a high number for our default mac addresses * Simplify unnecessary XenAPI Async calls to be synchronous * Remove an obsolete FIXME comment * Fixing image snapshots server links * Wait for rescue VM shutdown to complete before destroying it * Renaming user friendly fault name for HTTP 409 * Moving nova/network tests to more logical home * Change a fake classes variable to something other than id * Increase logging for xenapi plugin glance uploads * Deprecate carrot rpc code * Improve vnc proxy docs * Require a more recent version of glance * Make EC2 API a bit more user friendly * Add kwargs to RequestContext \_\_init\_\_ * info\_cache is related to deleted instance * Handle kwargs in deallocate\_fixed\_ip for FlatDHCP * Add a few missing tests regarding exception codes * Checks image virtual size before qemu-img resize * Set logdir to a tempdir in test\_network * Set lock\_path to a tempdir in TestLockCleanup * Exceptions unpacking rpc messages shouldn't hang the daemon * Use sqlalchemy reflection in migration 080 * Late load rabbit\_notifier in test\_notifier * boto shouldn't be required for production deploys * Don't use ec2 IDs in scheduler driver * pyflakes cleanups on libvirt/connection.py * Validate VDI chain before moving into SR * Fix racey snapshots * Don't swallow snapshot exceptions * allow block migration to talk to glance/keystone * Remove cruft and broken code from nova-manage * Update paste file to use service tenant * Further cleanup of XenAPI * Fix XML namespaces for limits extensions and versions * Remove the feature from UML/LXC guests * setup.py: Fix doc building * Add adjustable offset to audit\_period * nova-manage: allow use of /32 IP range * Clear created attributes when tearing down tests * Fix multi\_host column name in setup\_networks.. * HACKING fixes, all but sqlalchemy * Remove trailing whitespaces in regular file * remove undocumented, unused mpi 'extension' to ec2 metadata * Minor clarifications for the help strings in nova config options * Don't use \_ for variable name * Make test\_compute console tests more robust * test\_compute stubs same thing multiple times * Ignore InstanceNotFound when trying to set instance to ERROR * Cleans up the create\_conf tool * Fix bug 948611. Fix 'nova-manage logs errors' * api-paste.ini: Add /1.0 to default urlmap * Adds nova-manage command to convert a flagfile * bug 944145: race condition causes VM's state to be SHUTOFF * Cleanup some test docstrings * Cleans up a bunch of unused variables in XenAPI * Shorten FLAGS.rpc\_response\_timeout * Reset instance to ACTIVE when no hosts found * Replaces pipelines with flag for auth strategy * Setup and teardown networks during migration * Better glance exception handling * Distinguish rootwrap Authorization vs Not found errors * Bug #943178: aggregate extension lacks documentation * Rename files/dirs from 'rabbit' to 'rpc' * Change references to RabbitMQ to include Qpid * Avoid running code that uses logging in a thread * No longer ignoring man/novamanage * Fixing incorrect use of instance keyword in logging * Fix rst formatting and cross-references * Provide a provider for boto.utils * Only pass image uuids to compute api rebuild * Finally fix the docs venv bug * Get rid of all of the autodoc import errors * Rename DistributedScheduler as FilterScheduler * Allows new style config to be used for --flagfile * Add support for lxc consoles * Fix references to novncproxy\_base\_url in docs * Add assertRaises check to tools/hacking.py as N202 * fix restructuredtext formatting in docstrings that show up in the developer guide * Raise 409 when rescuing instance in RESCUE mode * Log a certain rare instance termination exception * Update fixed\_ip\_associate to not use relationships * Remove unnecessary code in test setUp/tearDown * Imported Translations from Launchpad * Only raw string literals should be used with \_() * assertRaises(Exception, ...) considered harmful * Added docs on MySQL queries blocking main thread * Fix test\_attach\_volume\_raise\_exception * Fix test\_unrescue to actually test unrescue * bug #941794 VIF and intf drivers for Quantum Linux Bridge plugin * Ensures that we don't exceed iptables chain max * Allows --flat\_interface flag to override db * Use self.mox instead of create a new self.mocker * Fix test\_migrate\_disk\_and\_power\_off\_exception * fakes.fake\_data\_store doesn't exist, so don't reset it * populate glance 'name' field through ec2-register * Remove unused \_setup\_other\_managers method from test case * Remove unused test\_obj parameter to setUp() * Use stubout instead of manually stubbing out os.path.exists * Remove superfluous \_\_init\_\_ from test case * Use test.TestCase instead of manually managing stubout * Handle InstanceNotFound during server update * Use stubout instead of manually stubbing out versions.VERSIONS * Remove unused session variable in test setup * Cleanup swap in \_create\_vm undo * Do not invoke kill dnsmasq if no pid file was found * Fixes for ec2 images * Retry download\_vhd with different glance host each time * Display error for invalid CIDR * Remove empty setUp/tearDown methods * Call super class tearDown correctly * Fixes bug 942556 and bug 944105 * update copyright, add version information to footer * Refactor spawn to use UndoManager * Fail gracefully when the db doesn't speak unicode * Remove unnecessary setting up and down of mox and stubout * Remove unnecessary variables from tests * Ensure image status filter matches glance format * fix for bug 821252. Smarter default scheduler * blueprint sphinx-doc-cleanup bug 944381 * Adds soft-reboot support to libvirt * Minor cleanup based on HACKING * libvirt driver calls unplug() twice on vm reboot * Add missing format string type on some exception messages * Fixing a request-id header bug * Test creating a server with metadata key too long * Fixes lp931801 and a key\_error * notifications for delete, snapshot and resize * Ensure that context read\_deleted is only one of 'no', 'yes' or 'only' * register Cell model, not Zone model * Option expose IP instead of dnshost in ec2 desc' * Fix \_sync\_power\_states to obtain correct 'state' * Ensures that keypair names are only AlphaNumeric * Cast vcpu\_weight to string before calling xen api * Add missing filters for new root commands * Destroy VM before VDIs during spawn cleanup * Include hypervisor\_hostname in the extended server attributes * Remove old ratelimiting code * Perform image show early in the resize process * Adds netapp volume driver * Fixes bug 943188 * Remove unused imports and variables from OS API * Return empty list when volume not attached * Be consistent with disabling periodic tasks * Cast volume-related ids to str * Fix for bug 942896: Make sure network['host'] is set * Allow xvd\* to be supplied for volume in xenapi * Initialize progress to 0 for build and resize * Fix issue starting nova-compute w/ XenServer * Provide retry-after guidance on throttled requests * Use constant time string comparisons for auth * Rename zones table to cells and Instance.zone\_name to cell\_name * Ensure temporary file gets cleaned up after test * Fixes bug 942549 * Use assertDictMatch to keep 2.6 unit tests passing * Handle case where instance['info\_cache'] is None * sm volume driver: fix backend adding failure * sm vol driver: Fix regression in sm\_backend\_conf\_update * TypeError API exceptions get logged incorrectly * Add NoopFirewallDriver * Add utils.tempdir() context manager for easy temp dirs * Check all migrations have downgrade in test\_misc * Remove monkey patching in carrot RPC driver * Call detach\_volume when attach fails * Do not hit the network\_api every poll * OS X Support fixed, bug 942352 * Make scheduler filters more pluggable * Adds temporary chown to sparse\_copy * make nova-network usable with Python < 2.6.5 * Re-adds ssl to kombu configuration and adds flags that are needed to pass through to kombu * Remove unused import * Make sure detail view works for volume snaphots * Imported Translations from Launchpad * Decode nova-manage args into unicode * Cleanup .rescue files in libvirt driver unrescue * Fixes cloudpipe extension to work with keystone * Add missing directive to tox.ini * Update EC2KeystoneAuth to grab tenant 'id' * Monkey patch migrate < 0.7.3 * Fixes bug lp#940734 - Adding manager import so AuthMiddleware works * Clean stale lockfiles on service startup : fixes bug 785955 * Fix nova-manage floating create docs * Fix MANIFEST.in to include missing files * Example config\_drive init script, label the config drive * fix unicode triggered failure in AuthManager * Fix bug 900864 Quantum Manager flag for IP injection * Include launch\_index when creating instances * Copy data when migration dst is on a different FS * bigger-than-unit test for cleanup\_running\_deleted\_instances * Nova options tool enhancements * Add hypervisor\_hostname to compute\_nodes table and use it in XenServer * Fixes error if Melange returns no networks * Print error if nova-manage should be run as root * Don't delete security group in use from OS API * nova-network can't deallocate ips from deleted instances * Making link prefixes support https * Prevent infinite loop in PublishErrorsHandler * blueprint host-aggregates: host maintenance - xenapi implementation * bug 939480 * libvirt vif-plugging fixes. Fixes bug 939252 , bug 939254 * Speeding up resize down with sparse\_copy * Remove network\_api fallback for info\_cache from APIs * Improve unit test coverage per bug/934566 * Return 40x for flavor.create duplicate * refactor a conditional for testing and understanding * Disable usb tablet support for LXC * Add Nexenta volume driver * Improve unit test coverage per bug/934566 * nova-manage: Fix 'fixed list' * Add lun number to provider\_location in create\_volume \* Fixes bug 938876 * Fix WeightedHost * Fix instance stop in EC2 create\_image * blueprint host-aggregates: improvements and clean-up * Move get\_info to taking an instance * Support fixed\_ip range that is a subnet of the network block * xenapi: nova-volume support for multiple luns * Fix error that causes 400 in flavor create * Makes HTTP Location Header return as utf-8 as opposed to Unicode * blueprint host-aggregates: host maintenance * blueprint host-aggregates: xenapi implementation * Rework base file checksums * Avoid copying file if dst is a directory * Add 'nova-manage export auth' * Alter output format of volume types resources * Scheduler notifications added * Don't store connection pool in RpcContext * Fix vnc docs: novaclient now supports vnc consoles * Clarify use of Use of deprecated md5 library * Extract get\_network in quantum manager * Add exception SnapshotIsBusy to be handled as VolumeIsBusy * Exception cleanup * Stop ignoring E202 * Support tox-based unittests * Add attaching state for Volumes * Fix quantum get\_all\_networks() signature (lp#936797) * Escape apostrophe in utils.xhtml\_escape() (lp#872450) * Backslash continuations (nova.api.openstack) * Fix broken method signiture * Handle OSError which can be thrown when removing tmpdir. Fixes bug 883326 * Update api-paste.ini with new auth\_token settings * Imported Translations from Launchpad * Don't tell Qpid to reconnect in a busy loop * Don't inherit controllers from each other, we don't want the methods of our parent * Improve unit test coverage per bug/934566 * Setting access ip values on server create * nova.conf sample tool * Imported Translations from Launchpad * Add support for admin\_password to LibVirt * Add ephemeral storage to flavors api * Resolve bug/934566 * Partial fix for bug 919051 * fix pre\_block\_migration() interaction with libvirt cache * Query directly for just the ip * bug 929462: compile\_diagnostics in xenapi erronously catch XenAPI.Failure * Use new style instance logging in compute api * Fix traceback running instance-usage-audit * Actual fix for bug 931608 * Support non-UTC timestamps in changes-since filter * Add additional information to servers output * Adding traceback to async faults * Pulls the main components out of deallocate * Add JSONFormatter * Allow file logging config * LOG.exception does not take an exc\_info keyword * InstanceNotFound exceptions for terminate\_intance now Log warning instead of throwing exeptions * bug 933620: Error during ComputeManager.\_poll\_bandwidth\_usage * Make database downgrade works * Run ovs-ofctl as root * 077\_convert\_to\_utf8: Convert \*all\* FK tables early * Fix bug 933147 Security group trigger notifications * Fixes nova-volume support for multiple luns * Normalize odd date formats * Remove all uniqueness constraints in migration 76 * Add RPC serialization checking, fix exposed problems * Don't send a SQLAlchemy model over rpc * Adds back e2fsck exit code checking * Syncs vncviewer mouse cursor when connected to Windows VMs * Backslash continuations (nova.tests) * The security\_group name should be an XML attribute * Core modifications for future zones service * Remove instance\_get stubs from server action tests * removed unused method and added another test * Enables hairpin\_mode for virtual bridge ports, allowing NAT reflection * Removed zones from api and distributed scheduler * Fix bug 929427 * Tests for a melange\_ipam\_lib, who is missing tests * Create a flag for force\_to\_raw for images * Resolve bug/927714 -- get instance names from db * Fix API extensions documentation, bug 931516 * misc networking fixes * Print friendly message if no floating IPs exist * Catch httplib.HTTPException as well * Expand Quantum Manager Unit Tests + Associated Fixes * bw\_usage takes a MAC address now * Adding tests for NovaException printing * fix a syntax error in libvirt.attach\_volume() with lxc * Prevent Duplicate VLAN IDs * tests: fix LdapDNS to allow running test\_network in isolation * Fix the description of the --vnc\_enabled option * Different exit code in new versions of iscsiadm * improve injection diagnostics when nbd unavailable. Bug 755854 * remove unused nwfilter methods and tests * LOG.exception only works while in an exception handler * \_() works best with string literals * Remove unnecessary constructors for exceptions * Don't allow EC2 removal of security group in use * improve stale libvirt images handling fix. Bug 801412 * Added resize support for Libvirt/KVM * Update migration 076 so it supports PostgreSQL * Replace ApiError with new exceptions * Simple way of returning per-server security groups * Declare deprecated auth flag before its used * e2fsck needs -y * Standardize logging delaration and use * Changing nova-manage error message * Fix WADL/PDF docs referenced in describedby links * bug 931604: improve how xenapi RRD records are retrieved * Resolve bug/931794 -- add uuid to fake * Use new style instance logging in compute manager * clean pyc files before running unit tests * Adding logging for 500 errors * typo fix * run\_tests.sh fix * get\_user behavior in ldapdriver * Fsck disk before removing journal * Don't query database with an empty list for IN clause * Use stubs in libvirt/utils get\_fs\_info test * Adding (-x | --stop) option back to runner.py * Remove duplicate variable * Fixing a unicode related metadata bug * bug 931356: nova-manage prints libvirt related warnings if libvirt isn't installed * Make melange\_port an integer * remove a private duplicate function * Changes for supporting fast cloning on Xenserver. Implements blueprint fast-cloning-for-xenserver 1. use\_cow\_images flag is reused for xenserver to check if copy on write images should be used. 2. image-id is used to tag an image which has already been streamed from glance. 3. If cow is true, when an instance of an image is created for the first time on a given xenserver, the image is streamed from glance and copy on write disk is created for the instance. 4. For subsequent instance creation requests (of the same image), a copy on write disk is created from the base image that is already present on the host. 5. If cow is false, when an instance of an image is created for the first time on a host, the image is streamed from glance and its copy is made to create a virtual disk for the instance. 6. For subsequent instance creation requests, a copy of disk is made for creating the disk for the instance. 7. Snapshot creation code was updated to handle cow=true. Now there can be upto 3 disks in the chain. The base disk needs to be uploaded too. 8. Also added a cache\_images flag. Depending on whether the flag is turned on on not, images will be cached on the host * Completes fix for LP #928910 - libvirt performance * Add some more comments to \_get\_my\_ip() * remove unused and buggy function from S3ImageService * Fix minor typo in runner.py * Remove relative imports from scheduler/filters * Converting db tables to utf8 * remove all instance\_type db lookups from network * Remedies LP Bug #928910 - Use libvirt lookupByName() to check existence * Force imageRef to be a string * Retry on network failure for melange GET requests * Handle network api failures more gracefully * Automatic confirmation of resizes on libvirt * Fix exception by passing timeout as None * Extend glance retries to show() as well * Disable ConfigParser interpolation (lp#930270) * fix FlatNetworkTestCase.test\_get\_instance\_nw\_info * remove unused and buggy function from baremetal proxy * Remove unused compute\_service from images controller * Backslash continuations (nova.virt.baremetal) * fixed bug 928749 * Log instance id consistently inside the firewall code * Remove the last of the gflags shim layer * Fix disk\_config typo * Pass instance to log messages * Fix logging in xenapi vmops * Ensures that hostId's are unique * Fix confirm\_resize policy handling * optimize libvirt image cache usage * bug 929428: pep8 validation on all xapi plugins * Move translations to babel locations * Get rid of distutils.extra * Backslash continuations (network, scheduler) * Remove unnecessary use of LoopingCall in nova/virt/xenapi/vm\_utils.py * Stop using LoopingCall in nova.virt.xenapi\_conn:wait\_for\_task() * Handle refactoring of libvirt image caching * linux\_net: Also ignore shell error 2 from ip addr * Consistently update instance in nova/compute/manager.py * Use named logger when available * Fix deprecated warning * Add support for LXC volumes * Added ability to load specific extensions * Add flag to include link local in port security * Allow e2fsck to exit with 1 * Removes constraints from instance and volume types * Handle service failures during finish\_resize gracefully * Set port security for all allocated ips * Move connection pool back into impl\_kombu/qpid * pep8 check on api-paste.ini when using devstack * Allows test\_virt\_drivers to work when run alone * Add an alias to the ServerStartStop extension * tests.integrated fails with devstack * Backslash continuations (nova.virt) * Require newer versions of SA and SA-Migrate * Optimizes ec2 keystone usage and handles errors * Makes sure killfilter doesn't raise ValueError * Fixes volume snapshotting issues and tests * Backslash continuations (misc.) * nova-rootwrap: wait() for return code before exit * Fix bug 921814 changes handling of adminPass in API * Send image properties to Glance * Check return code instead of output for iscsiadm * Make swap default to vdb if there is no ephemeral * Handle --flagfile by converting to .ini style * Update cfg from openstack-common * Fix xvpvncproxy error in nova-all (lp#928489) * Update MANIFEST.in to account for moved schemas * Remove ajaxterm from Nova * Adding the request id to response headers. Again * Update migration to work when data already exists * Fix support for --flagfile argument * Implements blueprint heterogeneous-tilera-architecture-support * Add nova/tests/policy.json to tarball * Fix quantum client filters * Store the correct tenant\_id/project\_id * dont show blank endpoint headers * Pass in project\_id in ext. authorizer * Fix \_poll\_bandwidth\_usage if no network on vif * Fix nova.virt.firewall debugging message to use UUID * Fix debugging log message to print instance UUID * mkfs takes vfat, not fat32 * Pass partition into libvirt file injection * bug 924266: connection\_type and firewall\_driver flags mismatch * bug 927507: fix quantum manager get\_port\_by\_attachment * Fix broken flag in test\_imagecache * Don't write a dns directive if there are no dns records in /etc/network/interfaces * Imported Translations from Launchpad * Backslash continuations (nova.db) * Add initiator to initialize\_connection * Allows nova to read files as root * Re-run nova-manage under sudo if unable to read conffile * Fix status transition when reverting resize * Adds flags for href prefixes * X\_USER is deprecated in favor of X\_USER\_ID * Move cfg to nova.openstack.common * Use Keystone Extension Syntax for EC2 Creds * Remove duplicate instances\_path option * Delete swap VDI if not used * Raise ApiError in response to InstanceTypeNotFound * Rename inst in \_create\_image, and pass instance to log msgs * Fix bug #924093 * Make sure tenant\_id is populated * Fix for bug 883310 * Increased coverage of nova/auth/dbdriver.py to 100%. Fixes 828609 * Make crypto use absolute imports * Remove duplicate logging\_debug\_format option * blueprint nova-image-cache-management phase1 * Set rescue instance hostnames appropriately * Throw an user error on creating duplicate keypairs Fixes bug 902162 * Fixes uuid lookup in virtual interfaces extension * Add comments to injected keys and network config * Remove hard coded m1.tiny behavior * Fix disassociation of fixed IPs when using FlatManager * Provides flag override for vlan interface * remove auto fsck feature from file injection. Bug 826794 * DRYing up Volume/Compute APIRouters * Excise M2Crypto! * Add missing dev. Fixes LP: #925607 * Capture bandwidth usage data before resize * Get rid of DeprecationWarning during db migration * Don't block forever for rpc.(multi)call response * Optionally disable file locking * Avoid weird test error when mox is missing * fix stale libvirt images on download failure. Bug 801412 * cleanup test case to use integers not strings * Respect availability\_zone parameter in nova api * Fix admin password skip check * Add support for pluggable l3 backends * Improve dom0 and template VM avoidance * Remove Hyper-V support * Fix logging to log correct filename and line numbers * Support custom routes for extensions * Make parsing of usage stats from XS more robust * lockfile.FileLock already appends .lock * Ties quantum, melange, and nova network model * Make sure multiple calls to \_get\_session() aren't nested * bug 921087: i18n-key and local-storage hard-coded in xenapi * optimize libvirt raw image handling. Bug 924970 * Boto 2.2.x failes. Capping pip-requires at 2.1.1 * fixed bug 920856 * Expand policies for admin\_actions extension * Correct checking existence of security group rule * Optionally pass a instance uuid to log methods * remove unsupported ec2 extensions * Fix VPN ping packet length * Use single call in ExtendedStatus extension * Add mkswap to rootwrap * Use "display\_name" in "nova-manage vm list" * Fix broken devref docs * Allow for auditing of API calls * Use os.path.basename() instead of string splitting * Remove utils.runthis() * Empty connection pool after test\_kombu * Clear out RPC connection pool before exit * Be more explicit about emptying connection pool * fixes melange ipam lib * bug 923798: On XenServer the DomU firewall driver fails with NotImplementedError * Return instancesSet in TerminateInstances ec2 api * Fix multinode libvirt volume attachment lp #922232 * Bug #923865: (xenapi driver)instance creation fails if no guest agent is avaiable for admin password configuration * Implementation of new Nova Volume driver for SolidFire ISCSI SAN * Handle kepair delete when not found * Add 'all\_tenants' filter to GET /servers * Use name filter in GlanceImageService show\_by\_name * Raise 400 if bad kepair data is provided * Support file injection on boot w/ Libvirt * Refactor away the flags.DEFINE\_\* helpers * Instances to be created with a bookmark link * fix \`nova-manage image convert\` exception * Added validation of name when creating a new keypair * Ignore case in policy role checks * Remove session arg from sm\_backend\_conf\_update * Remove session arguments from db.api * Add a note explaining why unhandled exceptions shouldn't be returned to users * Remove fetching of networks that weren't created via nova-manage * uses the instance uuid in libvirt by introducing a new variable 'uuid' for the used template instead of using a random uuid in libvirt * Fixing a rebuild race condition bug * Fixes bug 914418 * Remove LazySerializationMiddleware * Bug #921730: plugins/xenserver/xenapi/etc/xapi.d/plugins/objectstore no longer in use * Adding live migration server actions * bug 921931: fix Quantum Manager VM launch race condition * Fix authorization checks for simple\_usage.show * Simplify somewhat complicated reduce() into sum() * Ignore connection\_type when no instances exist * Add authorization checks to flavormanage extension * rootwrap: Fix KillFilter matching * Fix uptime calculation in simple\_usage * Fixing rebuilds on libvirt, seriously * Don't pass filter\_properites to managers * Fixing rebuilds on libvirt * Fix bug 921715 - 'nova x509-create-cert' fails * Return 403 instead of 401 when policies reject * blueprint host-aggregates: OSAPI extensions * blueprint host-aggregates: OSAPI/virt integration, via nova.compute.api * Fixes bug 921265 - i'nova-manage flavor create|list' * Remove unused flags.Help\*Flag * Convert vmwareapi code to UNIX style line endings * Blueprint xenapi-provider-firewall and Bug #915403 * Adds extension for retrieving certificates * Add os-start/os-stop server actions to OSAPI * Create nova cert worker for x509 support * Bug #916312: nova-manage network modify --network flag is inconsistent * Remove unused nova/api/mapper.py * Add nova.exception.InvalidRPCConnectionReuse * Add support for Qpid to nova.rpc * Add HACKING compliance testing to run\_test.sh * Remove admin\_only ext attr in favor of authz * usage: Fix time filtering * Add an API extension for creating+deleting flavors * extensions: Allow registering actions for create + delete * Explicitly encode string to utf8 before passing to ldap * Make a bunch of dcs into single-entry lists * Abstract out \_exact\_match\_filter() * Adds a bandwidth filter DB call * KVM and XEN Disk Management Parity * Tweak api-paste.ini to prepare for a devstack change * Remove deprecated serialization code * Add affinity filters updated to use scheduler\_hints and have non-douchey names * Do not output admin\_password in debug logs * Handle error in associate floating IP (bug 845507) * Brings back keystone middleware * Remove sensitive info from rpc logging * Error out instance on set password failure * Fixed limiting for flavors * Adds availability zone filter * Fixes nova-manage fixed list * API version check cleanups * ComputeNode Capacity support * blueprint host-aggregates: maintenance operations to host OSAPI exts * Add a specific filter for kill commands * Fix environment passing in DnsmasqFilter * Cleanups for rootwrap module * Fix 'nova-manage config list' * Add context and request spec to filter\_properties * Allow compute manager prep\_resize to accept kwargs * Adds isolated hosts filter * Make start\_instance cast directly to compute host * Refactor compute api messaging calls to compute manager * Refactor test\_scheduler into unit tests * Forgot to update chance scheduler for ignore\_hosts change * Add SchedulerHints compute extension * Add floating IP support to Quantum Manager * Support filter based on CPU core (over)allocation * bug 917397 * Add option to force hosts to scheduler * Change the logic for deleting a record dns\_domains * Handle FlavorNotFound on server list w/ filter * ERROR out instance if unrescue fails * Fix xenapi rescue without swap * Pull out ram\_filter into a separate filter * pass filter\_properties into scheduling requests for run\_instance * Fixes bug #919390 - Block Migration fails when keystone is un use * Fix nova-manage floating list (fixes bug 918804) * Imported Translations from Launchpad * scheduler host\_manager needs service for filters * Allow Quantum Manager to run in "Flat" mode * aws/ec2 api validation * Fix for bug 918502 * Remove deprecated extension code * Validating image id for rebuild * More cleanup of Imports to match HACKING * chmod nova-logspool * nova/network: pass network\_uuid to linuxnet\_interface\_driver and vif driver * Clean up crypto.py * Fix missing imports and bad call caught by pyflakes * Clarify error messages for admin passwords * Log uuid when instances fail to spawn * Removed references to FLAGS.floating\_ip\_dns\_domains * Removed some vestigial default args from DNS drivers * Allow config of vncserver\_proxyclient\_address * Rename 'zone' to 'domain.' * disk\_config extension now uses OS prefix * Do not write passwords to verbose logs. bug 916167 * Automatically clean up DNS when a floating IP is deallocated * Fix disassociating of auto assigned floating ips * Cleanup Imports to match HACKING guidelines * Added an LDAP/PowerDNS driver * Add dns domain manipulation to nova * fixes bug lp914962 * Fixed bug 912701 * Fix bug #917615 * Separate scheduler host management * Set instance\_ref property when creating snapshots * Implements blueprint vnc-console-cleanup * Rebuild/Resize support for disk-config * Allow instances in 'BUILD' state to be deleted * Stop allowing blank image names on snapshot/backup * Only update if there are networks to update * Drop FK constraint if it exists in migration 064 * Fix an error that prevents message from getting substituted * blueprint host-aggregates * Add missing scripts to setup.py (lp#917676) * Fixes bug 917128 * Clean up generate fingerprint * Add policy checking to nova.network.api.API * Add default policy rule * Super is not so super * Fixed the log line * Add tests for volume list and detail through new volume api, and fix error that the tests caught * Typofix for impl\_kombu * Refactoring logging \_log function * Update some extensions (1) * DECLARE osapi\_compute\_listen\_port for auth manager * Increase robustness of image filtering by server * Update some extensions (2) * Implement BP untie-nova-network-models * Add ipv4 and ipv6 validation * greenlet version inconsistency * Add policy checks to Volume.API * Remove unused extension decorator require\_admin * Fix volume api typo * Convert nova.volume.api.API to use volume objects * Remove a whole bunch of unused imports * have all quota errors return an http 413 * This import is not used * Refactor request and action extensions * Prefixing the request id with 'req-' to decrease confusion when looking at logs * Fixing a bug that was causing the logging to display the context info for the wrong user. bug: 915608 * Modify the fake ldap driver to fix compatibility * Create an instance DNS record based on instance UUID * Implements blueprint separate-nova-volumeapi * Implement more complete kombu reconnecting * First implementation of bp/live-migration-resource-calc * Remove 'status' from default snapshot properties * Clean up disk\_format mapping in xenapi.vm\_utils * Remove skipping of 2 tests * Make authz failures use proper response code * Remove compute.api.API.add\_network\_to\_project * Adds test for local.py * Fix policy import in nova.compute.api * Remove network\_api from Servers Controller * minor fix in comment * Updates linux\_net to ignore some shell errors * Add policy checks to Compute.API * Ensure nova is compatible with WebOb 1.2+ * improve handling of the img\_handlers config list * Unbreak start instance and fixes bug 905270 * catch InstanceInvalidState in more places * Fix some cfg test case naming conflicts * Remove 'location' from GlanceImageService * Makes common/cfg.py raise AttributeError * Call to instance\_info\_cache\_delete to use uuid * Bug #914907: register\_models in db/sqlalchemy/models.py references non-existent ExportDevice * Update logging in compute manager to use uuids * Do not overwrite project\_id from request params * Add optional revision field to version number * Imported Translations from Launchpad * nova-manage floating ip fixes * Add a modify function to the floating ip dns api * Adding the request id to response headers * Add @utils.deprecated() * Blueprint xenapi-security-groups * Fix call to compute\_api.resize from \_migrate * Fix metadata mapping in s3.\_s3\_parse\_manifest * Fix libguestfs operation with specified partitions * fix reboot\_instance typo * Fix bad test cases in smoketest * fix bug 914049: private key in log * Don't overwrite local context on elevated * Bug 885267: Fix GET /servers during instance delete * Adds support for floating ip pools * Adds simple policy engine support * Refactors utils.load\_cached\_file * Serialization, deserialization, and response code decorators * Isolate certain images on certain hosts * Workaround bug 852095 without importing mox * Bug #894683: nova.service does not handle attribute specific exceptions and client hangs * Bug #912858: test\_authors\_up\_to\_date does not deal with capitalized names properly * Adds workaround check for mox in to\_primitive * preload cache table and keep it up to date * Use instance\_properties in resize * Ensure tests are python 2.6 compatible * Return 409s instead of 500s when deleting certain instances * Update HACKING.rst * Tell users what is about to be installed via sudo * Fix LP912092 * Remove small unneeded code from impl\_kombu * Add missing space between XML attributes * Fix except format to match HACKING * Set VLAN MTU size when creating the vlan interface * Add instance\_name field to console detail command which will give the caller the necessary information to actually connect * Fix spelling of variable * Remove install\_requires processing * Send event notifications for suspend and resume * Call mkfs with the correct order of arguments * Fix bug 901899 * Fix typo in nova/rootwrap/compute.py. Fixes LP: #911880 * Make quantum\_use\_dhcp falsifiable * Fixing name not defined * PEP8 type comparison cleanup * Add cloudpipe/vpn api to openstack api contrib * Every string does not need to be internationalized * Adds running\_deleted\_instance\_reaper task * libvirt: implements boot from ISO images * Unused db.api cleanup * use name gateway\_v6 instead of gateway6 * PEP8 remove direct type comparisons * Install a good version of pip in the venv * Bug #910045: UnboundLocalError when failing to get metrics from XenAPI hosts * re-raising exceptions fix * use dhcp\_lease\_time for dnsmasq. Fix bug 894218 * Clean up pylint errors in top-level files * Ensure generated passwords meet minimum complexity * Fixing novaclient\_converter NameError * Bug 820059: bin/nova-manage.py VpnCommands.spawn calls non-existant method VpnCommands.\_vpn\_for - fixed * Bug 751229: Floating address range fixed * Brings some more files up to HACKING standards * Ensure queue is declared durable so messages aren't dropped * Create notification queues as durable * Adding index to instances project\_id column * Add an API for associating floating IPs with DNS entries * 'except:' to 'except Exception:' as per HACKING * Adds EC2 ImportKeyPair API support * Take the availability zone from the instance if available * Update glance Xen plugin w/ purge props header * Converting zones into true extension * Convering /users to admin extension * Add a DECLARE for dhcp\_doamin flag to metadata handler * Support local target for Solaris, use 'safe' command-line processing * Add 'os-networks' extension * Converting accounts resource to admin extension * Add exit\_code, stdout, stderr etc to ProcessExecutionException * Fixes LP bug #907898 * Switch extension namespace * Refactor Xen Vif drivers. Fixes LP907850 * Remove code in migration 064 to drop an fkey that does not exist. Fixes LP bug #907878 * Help clarify rpc API with docs and a bit of code * Use SQLAlchemy to drop foreign key in DB migrate * Move createBackup server action into extension * Bug#898257 support handling images with libguestfs * Bug#898257 abstract out disk image access methods * Move 'actions' subresource into extension * Make os-server-diagnostics extension admin-only * Remove unneeded broken test case * Fix spelling typos in comments * Allow accessIPv4 and accessIPv6 on rebuild action * Move 'diagnostics' subresource to admin extension * Cleaning up imports in compute and virt * Cleaning up imports in nova.api * Make reroute\_compute use functools.wraps. Fixes LP bug #906945 * Removing extra code from servers controller * Generate instance faults when instance errors * Clarify NoValidHost messages * Fix one last bug in os-console-output extension * Fix os-console-output extension integration * Set Location header in server create and rebuild actions * Consistently use REBUILDING vm\_state * Improve the minidns tests to handle zone matching * Remove unused FLAGS.block\_size * Make UUID format checking more correct * Set min\_ram and min\_disk on snapshot * Add support for port security to QuantumManager * Add a console output action to servers * Creating mechanism that loads Admin API extensions * Document return type from utils.execute() * Renamed the instance\_dns\_driver to dns\_driver for more general use * Specify -t rsa when calling ssh-keygen * create\_export and ensure\_export should pass up the return value, to update the database * Imported Translations from Launchpad * avoid error and trace on dom.vcpus() in lxc * Properly passes arg to run\_iscsiadm to fix logout * Makes disassociate by timeout work with multi-host * Call get\_instance\_nw\_info with elevated context, as documented in nova/network/manager.py * Adds missing joinedload for vif loading * missing comments about extensions to ec2 * Pull resource extensions into APIRouter * IPAM drivers aren't homogenous bug 903230 * use env to find 'false'. Fix for OS X * Fix scheduler error handler * Starting work on exposing service functionality * Bugfix for lp904932 * Ensure fkey is dropped before removing instance\_id * Fixes bug 723235 * nova.virt.libvirt.firewall: set static methods * Expose Asynchronous Fault entity in the OSAPI * Fix nova-manage flags declaration * Remove useless flags declaration * Remove useless input\_chain flags * Make XenAPI agent configuration synchronous * Switch disk\_config extension to use one DB query * Update utils.execute so that check\_exit\_code handles booleans. Fixes LP bug #904560 * Rename libvirt\_uri to uri * Make libvirt\_uri a property * Making pep8 output less verbose * Refactors handling of detach volume * Fixes bug 887402 * Bug 902626 * Make various methods static * Pass additional information from nova to Quantum * Refactor vm\_state and task\_state checking * Updates OVS rules applied to IPv4 VIFs * Follow-on to I665f402f to convert rxtx\_quota to rxtx\_factor in nova-manage and a couple of tests * Make sure the rxtx\_cap is used to set qos info * Fix some errors found by pychecker * Fix tgtadm off by one error. Fixes bug #871278 * Sanitize EC2 manifests and image tarballs * floating-ip: return UUID of instance rather than ID * Renaming instance\_actions.instance\_id column to instance\_uuid. blueprint: internal-uuids * Fix for bug 902175 * fixed typos. removed an unused import * Vm state management and error states * Added support for creating nova volume snapshots using OS API * Fix error when subnet doesn't have a cidr set * bug 899767: fix vif-plugging with live migration * Fixing snapshot failure task\_state * Imported Translations from Launchpad * Moves find config to utils because it is useful * fixed\_ips by vif does not raise * Add templates for selected resource extensions * Fix network forwarding rule initialization in QuantumManager * \_check\_image\_size returns are consistent * Fixed the perms on the linux test case file so that nose will run it * Add preparation for asynchronous instance faults * Add templates for selected resource extensions * Use more informative message when violating quota * Log it when we get a lock * removing TODO as we support Windows+XenServer and have no plans to support quiesce or VSS at the moment * Adds network model and network info cache * Rename .nova-venv to .venv * revert using git for novaclient * Port nova.flags to cfg * Make cfg work on python 2.6 * Relax novaclient and remove redis dependency * Relax dependency on boto 1.9b and nova-adminclient * Make QuantumManager no longer depend on the projects table * Imported Translations from Launchpad * Fix for bug 901459 * Updated the test runner module with a sys.path insert so that tests run in and outside a virtual environment * Add ability to see deleted and active records * Set instance['host'] to the original host value on revert resize * Fix race condition in XenAPI when using .get\_all * Clean up snapshot metadata * Handle the 'instance' half of blueprint public-and-private-dns * Refactors periodic tasks to use a decorator * Add new cfg module * Remove extra\_context support in Flags * A more secure root-wrapper alternative * Remove bzr related code in tests/test\_misc * Change cloudServersFault to computeFault * Update associate\_floating\_ip to use instance objs * vm\_state:=error on driver exceptions during resize * Use system M2Crypto package on Oneiric, bug 892271 * Update compute manager so that finish\_revert\_resize runs on the source compute host. Fixes bug #900849 * First steps towards consolidating testing infrastructure * Remove some remnants of ChangeLog and vcsversion.py generation * Pass '-r' option to 'collie cluster status' * Remove remnants of babel i18n infrastructure * Fixes a typo preventing attaching RBD volumes * Remove autogenerated pot file * Make admin\_password keyword in compute manager run\_instance method match what we send in the compute API. Fixes bug #900591 * remove duplicate netaddr in nova/utils * cleanup: remove .bzrignore * add index to instance\_uuid column in instances * Add missing documentation for shared folder issue with unit tests and Python lock file * Updated nova-manage to work with uuid images Fixes bug 899299 * Add availabity\_zone to the refresh list * Document nova-tarball Jenkins job * Adds extension documentation for some but not all extensions * Add templates for selected resource extensions * EC2 rescue/unrescue is broken, bug 899225 * Better exception handling during run\_instance * Implement resize down for XenAPI * Fix for EC2 API part of bug 897164 * Remove some unused imports from db * Replacing instance id's in in xenapi.vmops and the xen plugin with instance uuids. The only references to instance id's left are calls to the wait\_for\_task() method. I will address that in another branch. blueprint: internal-uuids * Convert get\_lock in compute to use uuids * Fix to correctly report memory on Linux 3.X * Replace more cases of instance ids with uuids * Make run\_instance only support instance uuids * Updates simple scheduler to allow strict availability\_zone scheduling * Remove VIF<->Network FK dependancy * Adds missing image\_meta to rescue's spawn() calls * Bug #898290: iSCSI volume backend treats FLAGS.host as a hostname * split rxtx\_factor into network and instance\_type * Fixing get\_info method implementations in virt drivers to accept instance\_name instead of instance\_id. The abstract class virt.ComputeDriver defines get\_info as: def get\_info(self, instance\_name). blueprint: internal-uuids * Fixes bug 767947 * Remove unused ec2.action\_args * Fix typo: priviledges -> privileges * Bug #896997: nova-vncproxy's flash socket policy port is not configurable * Convert compute manager delete methods to objects * Removing line dos line endings in vmwareapi\_conn.py * reboot & rebuild to use uuids in compute manager * Fix for bug 887712 * Add NAT/gateway support to QuantumManager * Fix QuantumManager update\_dhcp calls * Fix RPC responses to allow None response correctly * Use uuids for compute manager agent update * power\_on/power\_off in compute manager to use uuids * Use uuids for file injection * removed logic of throwing exception if no floating ip * Adding an install\_requires to the setup call. Now you can pip install nova on a naked machine * Removing obsolete bzr-related clauses in setup.py * Makes rpc\_allocate\_fixed\_ip return properly * Templatize extension handling * start/stop in compute manager to use uuids * Updating {add,remove}\_fixed\_ip\_from\_instance in compute.api and compute.manager to use instance uuid instead of instance id. blueprint internal-uuids * Use instance uuids for consoles and diagnostics * Fixes bug 888649 * Fix Bug #891718 * Bug #897091: "nova actions" fails with HTTP 400 / TypeError if a server action has been performed * Bug #897054: stack crashes with AttributeError on e.reason if the server returns an error * Refactor a few things inside the xenapi unit tests * New docs: unit tests, Launchpad, Gerrit, Jenkins * Fix trivial fourth quote in docstring * Fix deprecation warnings * Fix for bug 894431 * Remove boot-from-volume unreachable code path (#894172) * reset/inject network info in compute to use uuid * Updating set\_admin\_password in compute.api and compute.manager to use instance uuids instead of instance ids. Blueprint internal-uuids * rescue/unrescue in compute manager to use uuids * Updated development environment docs * Call df with -k instead of -B1 * Make fakelibvirt python2.6 compatible * Clean up compute api * Updating attach/detach in compute.api and compute.manager to use instance uuid instead of instance id. blueprint internal-uuids * Change compute API.update() to take object+params * Use XMLDictSerializer for resource extensions * Updating {add,remove}\_security\_group in compute.api to use instance uuids instead of instance ids. blueprint internal-uuids * Extend test\_virt\_driver to also test libvirt driver * poll\_rebooting\_instances passes an instance now * Revert "Fixes bug 757033" * Put instances in ERROR state when scheduler fails * Converted README to RST format * Workaround xenstore race conditions * Fix a minor memory leak * Implement schedule\_prep\_resize() * Fixes bug 886263 * snapshot/backup in compute manager to use uuids * Fixes bug 757033 * Converting tests to use v2 * lock/unlock in compute manager to use uuids * suspend/resume in compute manager to use uuids * Refactor metadata code out of ec2/cloud.py * pause/unpause in compute manager to use uuids * Creating new v2 namespace in nova.api.openstack * Add a "libvirt\_disk\_prefix" flag to libvirt driver * Added RST docs on how to use gettext * Refactoring/cleanup of some view builders * Convert remaining calls to use instance objects * Make run instances respect availability zone * Replacing disk config extension to match spec * Makes sure gateways forward properly * Convert security\_group calls to use instance objs * Remove hostname update() logic in compute.API * Fixes bug 890206 * Follow hostname RFCs * Reference Ron Pedde's cleanup script for DevStack * Remove contrib/nova.sh and other stale docs * Separate metadata api into its own service * Add logging, error handling to the xenstore lib * Converting lock/unlock to use instance objects * Deepcopy optparse defaults to avoid re-appending multistrings (#890489) * install\_venv: apply eventlet patch correctly with python 2.7 (#890461) * Fix multistring flags default handling (#890489) * Fixing image create in S3ImageService * Defining volumes table to allow FK constraint * Converting network methods to use instance objects * Handle null ramdisk/kernel in euca-describe-images * Bind engine to metadata in migration 054 * Adding downgrade for migration 57 plus test * Log the URL to an image\_ref and not just the ID * Converting attach\_volume to use instance object * Converting rescue/unrescue to use instance objects * Converting inject\_file to use instance objects * Bug #888719: openvswitch-nova runs after firstboot scripts * Bug #888730: vmwareapi suds debug logging very verbose * Converting consoles calls to use instance objects * Converting fixed ip calls to use instance objects * Convert pause/unpause, sus/res to use instance obj * fix rebuild sha1 not string error * Verify security group parameters * Converting set password to use instance objects * Converting snapshot/backup to use instance objects * Refactor of QuotaError * Fix a notification bug when creating instances * Converting metadata calls to use instance objects * nova-manage: exit with status 1 if an image registration fails * Converting start and stop to use instance objects * Converting delete to use instance objects * Capture exceptions happening in API layer * Removed some old cruft * Add more error handling to glance xenapi plugin * Fixes bug 871877 * Replace libvirt driver's use of libxml2 with ElementTree * Extend fake image service to let it hold image data * Bug #887805 Error during report\_driver\_status(): 'LibvirtConnection' object has no attribute '\_host\_state' * More spelling fixes inside of nova * Fixes LP878319 * Fix exception reraising in volume manager * Adding Chuck Short to .mailmap * Undefine libvirt saved instances * Split compute api/manager tests within module * Workaround for eventlet bug with unit tests in RHEL6.1 * Apply M2Crypto fix for all Fedora-based distributions * Fix failing libvirt test (bug 888250) * Spelling fixes in nova/api comments * Get MAC addresses from Melange * Refactor logging\_error into utils * Converting rebuild to use instance objects * Converting resize to use instance objects * Converting reboot to use instance objects * Reducing the number of compute calls to Glance * Remove duplicate method (2) * Move tests for extensions to contrib directory * Remove duplicate method * Remove debugging print * Adds extended status information via the Admin API to the servers calls * Wait until the instance is booted before setting VCPU\_params * changes logging reference in zone\_manager.py * Exception cleanup in scheduler * Fixing create\_vbd call per VolumeHelper refactoring * Switch glance XenAPI plugin to use urllib2 * Blueprint lasterror * Move failed instances to error state * Adding task\_states.REBOOTING\_HARD * Set task state to UPDATING\_PASSWORD when needed * Clean up docstrings for faults.Fault and it's usage * Fix typo in docstring * Add DHCP support to the QuantumManager and break apart dhcp/gateway * Change network delete to delete by uuid or cidr * Bug #886353: Faults raised by OpenStack API Resource handlers fail to be reported properly * Define faults.Fault.\_\_str\_\_ * Speed up tests a further 35 seconds * Removing duplicate kernel/ramdisk check in OSAPI * Remove unnecessary image list in OSAPI * Add auto-reloading JSON config file support to scheduler * Change floating-snat to float-snat * Allows non-admin users to use simple scheduler * Skip libvirt tests when libvirt not present * Correcting libvirt tests that were failing * Fix for launchpad bug #882568 * Gracefully handle Xen resize failure * Don't update database before resize * fix bug 816630 * Set nova-manage to executable Fixes LP885778 * Fixing immediate delete after boot on Libvirt * exception.KeypairNotFound usage correction * Add local storage of context for logging * Reserve memory/disk for dom0/host OS * Speed up tests yet another 45 seconds * APIs should not wait on scheduler for builds in single zone deployment * Added some documentation to db.api module docstring * Updated rst docs to include threading model * Adds documentation for Xen Storage Manager * Xen Storage Manager Volume Driver * Drop extra XML tests and remove \_json suffix from names * Fix empty group\_id to be considered invalid * Stop nova-ajax-console-proxy configuring its own logging * Bug 884863: nova logs everything to syslog twice * Log the exception when we get one * Use fat32 for Windows, linux-swap for Linux swap partitions * Fix KeyError when passed unknown format of time * flatten distributed scheduler * Bug #884534: nova-ajax-console-proxy crashes on shutdown * Bug 884527: ajax\_console\_proxy\_port needs to be an integer * Too much information is returned from POST /servers * Disable SQLite synchronous mode during tests * Creating uuid -> id mapping for S3 Image Service * Fix 'begining' typo in system usage data bug 884307 * Fixes lp883279 * Log original dropped exception when a new exception occurs * Fix lp:861160 -- newly created network has no uuid * Bug #884018: "stack help" prints stacktrace if it cannot connect to the server * Optional --no-site-packages in venv * fixes bug 883233. Added to Authors fix typo in scheduler/driver.py assert\_compute\_node\_has\_enough\_memory * Updated NoAuth to account for requests ending in / * Retry failed SQL connections (LP #876663) * Removed autogenerated API .rst files * Fix to a documentation generation script * Added code to libvirt backend to report state info * Adding bulk create fixed ips. The true issue here is the creation of IPs in the DB that are not currently used(we are building the entire block). This fix is just a bandaid, but it does cut ~25 seconds off of the quantum tests on my laptop * Fix overzealous use of faults.Fault() wrapper * Revert how APIs get IP address info for instances * Support server uuids with security groups * Support using server uuids when accessing consoles * Adding support for retrying glance image downloads * Fix deletion of instances without fixed ips * Speed up test suite by 20 seconds * Removed callback concept on VM driver methods: * Fix file injection for OSAPI rebuilds. Fixes 881649 * Replaces all references to nova.db.api with nova.db * venv: update distribute as well as pip * Fix undefined glance\_host in get\_glance\_client * Fix concurrency of XenAPI sessions * Server metadata must support server uuids * Add .gitreview config file for gerrit * Convert instancetype.flavorid to string * Make sure networks returned from get\_instance\_nw\_info have a label * Use UUIDs instead of IDs for OSAPI servers * Improve the liveness checking for services * Refactoring of extensions * Moves a-zone scheduling into simple scheduler * Adds ext4 and reiserfs to \_mount\_filesystem() * Remove nova dependency on vconfig on Linux * Upgrade pip in the venv when we build it * Fixes bug 872459 * Repartition and resize disk when marked as managed * Remove dead DB API call * Only log instance actions once if instance action logging is enabled (now disabled by default) * Start switching from gflags to optparse * Don't leak exceptions out to users * Fix EC2 test\_cloud timing issues * Redirects requests from /v#.# to /v#.#/ * Chain up to superclass tearDown in ServerActionsTest * Updated RST docs: bzr/launchpad -> git/github * Refactoring nova.tests.api.openstack.test\_flavors * Refactoring image and server metadata api tests * Refactoring nova.tests.api.openstack.test\_servers * Refactoring nova.tests.api.openstack.test\_images * Utility script that makes enforcing PEP8 within git's pre-commit hook as easy as possible * Add XML templates * Remove OSAPI v1.0 * Remove unused flag\_overrides from TestCase * Cancel any clean\_reboot tasks before issuing the hard\_reboot * Makes snapshots work for amis. Fixes bug 873156 * Xenapi driver can now generate swap from instance\_type * Adds the ability to automatically issue a hard reboot to instances that have been stuck in a 'rebooting' state for longer than a specified window * Remove redundant, dead code * Added vcpu\_weight to models * Updated links in the README that were out of date * Add INPUT chain rule for EC2 metadata requests (lp:856385) * Allow the user to choose either ietadm or tgtadm (lp:819997) * Remove VolumeDriver.sync\_exec method (lp:819997) * Adds more usage data to Nova's usage notifications * Fixes bug 862637 -- make instance\_name\_template more flexible * Update EC2 get\_metadata calls to search 'deleted': False. Fixes nova smoke\_tests!!! * Use new ip addr del syntax * Updating HACKING to make split up imports into three blocks * Remove RateLimitingMiddlewareTest * Remove AoE, Clean up volume code * Adds vcpu\_weight column to instance\_types table and uses this value when building XenServer instances * Further changes to the cleaner * Remove duplicated functions * Reference orphaned\_instance instead of instance * Continue to the next iteration of the loop if an instance is not found * Explicit errors on confirm/revertResize failures * Include original exception in ClassNotFound exception * Enable admin access to EC2 API server * Make sure unknown extensions return 404 * Handle pidfile exception for dnsmasq * Stop returning correct password on api calls * Restructure host filtering to be easier to use * Add support for header version parameter to specify API version * Set error state on spawn error + integration test * Allow db schema downgrades * moved floating ip db access and sanity checking from network api into network manager added floating ip get by fixed address added fixed\_ip\_get moved floating ip testing from osapi into the network tests where they belong * Adds a script that can automatically delete orphaned VDIs. Also had to move some flags around to avoid circular imports * Improve access check on images * Updating image progress to be more granular. Before, the image progress had only 2 states, 0 and 100. Now it can be 0, 25, 50 or 100 * Deallocate ip if build fails * Ensure non-default FLAGS.logfile\_mode is properly converted to an octet * Moving admin actions to extension * Fixes bug 862633 -- OS api consoles create() broken * Adds the tenant id to the create images response Location header Fixes bug 862672 * Fixes bug 862658 -- ec2 metadata issue getting IPs * Added ==1.0.4 version specifier to kombu in pip-requires to ensure tests pass in a clean venv * bug lp845714 * install\_venv: pip install M2Crypto doesn't work on Fedora * install\_venv: add support for distro specific code * install\_venv: remove versioned M2Crypto dependency * install\_venv: don't use --no-site-packages with virtualenv * install\_venv: pass the --upgrade argument to pip install * install\_venv: refactor out pip\_install helper * Replace socat with netcat * api.ec2.admin unit tests * Fixes Bug #861293 nova.auth.signer.Signer now honors the SignatureMethod parameter for SHA1 when creating signatures * Enforce snapshot cleanup * bug 861310 * Change 'recurse\_zones' to 'local\_zone\_only' * Fixes euca-describe-instances failing or not showing IPs * Fixes a test failure in master * Fixed bug lp850602. Adding backing file copy operation on kvm block migration * Add nova-all to run all services * Snapshots/backups can no longer happen simultaneously. Tests included * Accept message as sole argument to NovaException * Use latest version of SQLAlchemy * Fix 047 migration with SQLAlchemy 0.7.2 * Beef up nova/api/direct.py tests * Signer no longer fails if hashlib.sha256 is not available. test\_signer unit test added * Make snapshots private by default * use git config's review.username for rfc.sh * Raise InsufficientFreeMemory * Adding run\_test.sh artifacts to .gitignore * Make sure options is set before checking managed\_disk setting. Fixes bug 860520 * compute\_api create\*() and schedulers refactoring * Removed db\_pool complexities from nova.db.sqlalchemy.session. Fixes bug 838581 * Ensure minRam and minDisk are always integers * Call endheaders when auth\_token is None. Fixes bug 856721 * Catch ImageNotFound on image delete in OSAPI * Fix the grantee group loading for source groups * Add next links to images requests * put fully qualified domain name in local-hostname * Removing old code that snuck back in * Makes sure to recreate gateway for moved ip * Fix some minor issues due to premature merge of original code * \* Rework osapi to use network API not FK backref \* Fixes lp854585 * Allow tenant networks to be shared with domain 0 * Use ovs-vsctl iface-to-br to look up the bridge associated with the given VIF. This avoids assuming that vifX.Y is attached to xenbrY, which is untrue in the general case * Made jenkins email pruning more resilient * Fixing bug 857712 * Adds disk config * Adding xml schema validation for /versions resource * Fix bug 856664 overLimit errors now return 413 * Don't use GitPython for authors check * Fix outstanding pep8 errors for a clean trunk * Add minDisk and minRam to OSAPI image details * Fix rfc.sh's check for the project * Add rfc.sh to help with gerrit workflow * This patch adds flavor filtering, specifically the ability to flavor on minRam, minDisk, or both, per the 1.1 OSAPI spec * Add next links for server lists in OSAPI 1.1. This adds servers\_links to the json responses, and an extra atom:link element to the servers node in the xml response * Update exception.wrap\_exception so that all exceptions (not just Error and NovaException types) get logged correctly * Merging trunk * Adding OSAPI tests for flavor filtering * This patch adds instance progress which is used by the OpenStack API to indicate how far along the current executing action is (BUILD/REBUILD, MIGRATION/RESIZE) * Merging trunk * Fixes lp:855115 -- issue with disassociating floating ips * Renumbering instance progress migration * Fixing tests * Keystone support in Nova across Zones * trunk merge fixup * Fix keys in ec2 conversion to make sure not to use unicode * Adds an 'alternate' link to image views per 3.10 and 3.11 of http://docs.openstack.org/cactus/openstack-compute/developer/openstack-compute-api-1.1/content/LinksReferences.html * Typo * Fixing tests * Fixing tests * make sure kwargs are strings and not unicode * Merging trunk * Adding flavor filtering * Instance deletions in Openstack are immediate. This can cause data to be lost accidentally * Makes sure ips are moved on the bridge for nodes running dnsmasq so that the gateway ip is always first * pep8 * add tests and fix bug when no ip was set * fix diverged branch * migration conflict fixed * clean up based on cerberus review * clean up based on cerberus review * Remove keystone middlewares * fix moving of ips on flatdhcp bridge * Merged trunk * merged trunk * update floating ips tests * floating ip could have no project and we should allow access * actions on floating IPs in other projects for non-admins should not be allowed * floating\_ip\_get\_by\_address should check user's project\_id * Pep8 fixes * Merging trunk * Refactoring instance\_type\_get\_all * remove keystone url flag * merge trunk, fix conflicts * remove keystone * Include 'type' in XML output * Minor cleanup * Added another unit test * Fixed unit tests with some minor refactoring * Fix the display of swap units in nova manage * Refactored alternate link generation * pep8 fixes * Added function to construct a glance URL and unit test * merge from trunk * convert images that are not 'raw' to 'raw' during caching to node * show swap in Mb in nova manage * Address Soren's comments: \* clean up temp files if an ImageUnacceptable is going to be raised Note, a qemu-img execution error will not clean up the image, but I think thats reasonable. We leave the image on disk so the user can easily investigate. \* Change final 2 arguments to fetch\_to\_raw to not start with an \_ \* use 'env' utility to change environment variables LC\_ALL and LANG so that qemu-img output parsing is not locale dependent. Note, I considered the following, but found using 'env' more readable out, err = utils.execute('sh', '-c', 'export LC\_ALL=C LANG=C && exec "$@"', 'qemu-img', 'info', path) * Add iptables filter rules for dnsmasq (lp:844935) * create disk.local the same way ephemerals are created (LP: #851145) * merge with trunk r1601 * fix call to gettext * Fixed --uuid network command in nova-manage to desc to "uuid" instead of "net\_uuid" * removes warning set forth in d3 for deprecated setting of bridge automagically * Update migration 047 to dynamically lookup the name of the instance\_id fkey before dropping it. We can't hard code the name of the fkey since we didn't name it explicitly on create * added to authors cuz trey said I cant patch otherwise! * Fixed --uuid network command in nova-manage to desc to "uuid" instead of "net\_uuid" * merged with trunk * Update migration 047 to dynamically lookup the name of the instance\_id fkey before dropping it. We can't hard code the name of the fkey since we didn't name it explicitly on create * oops, add project\_id and 'servers' to next links * Fixes migration for Mysql to drop the FK on the right table * Reverted some changes to instance\_get\_all\_by\_filters() that was added in rev 1594. An additional argument for filtering on instance uuids is not needed, as you can add 'uuid: uuid\_list' into the filters dictionary. Just needed to add 'uuid' as an exact\_match\_filter. This restores the filtering to do a single DB query * fix syntax error in exception, remove "Dangerous!" comment * merged trunk and resolved conflict * run the alter on the right table * fix unrelated pep8 issue in trunk * use dictionary format for exception message * fix a test where list order was assumed * Removed the extra code added to support filtering instances by instance uuids. Instead, added 'uuid' to the list of exact\_filter\_match names. Updated the caller to add 'uuid: uuid\_list' to the filters dictionary, instead of passing it in as another argument. Updated the ID to UUID mapping code to return a dictionary, which allows the caller to be more efficient... It removes an extra loop there. A couple of typo fixes * Reworked the export command to be nova-manage shell export --filename=somefile * Adds the ability to automatically confirm resizes after the \`resize\_confirm\_window\` (0/disabled by default) * use '\_(' for exception messages * PEP8 cleanup * convert images that are not 'raw' to 'raw' during caching to node * now raising instead of setting bridge to br100 and warning as was noted * \* Remove the foreign key and backrefs tying vif<->instance \* Update instance filtering to pass ip related filters to the network manager \* move/update tests * Adds an optional flag to force dhcp releases on instance termination. This allows ips to be reused without having to wait for the lease to timeout * remove urllib import * Fixing case where OSAPI server create would return 500 on malformed body * Fix the issue with the new dnsmasq where it tries and fails to bind to ipv6 addresses * Merging trunk * Renaming progress migration to 47 * merge with trunk * Added unit test * Corrected the status in DB call * don't try to listen on ipv6 addresses, or new dnsmasq goes boom * make our own function instead of using urllib.urlencode since we apparently don't suppor urlencoded strings yet * Merged trunk * remove unused import * merge the sknurt * remove the polymorph * Fix typo in comment * Fixes the handling of snapshotting in libvirt driver to actually use the proper image type instead of using raw for everything. Also cleans up an unneeded flag. Based on doude's initial work * merge with trunk * removing extra newline * catching AttributeError and adding tests * Remove vestigial db call for fixed\_ips * Fixes the user credentials for installing a config-drive from imageRef * Some Linux systems can also be slow to start the guest agent. This branch extends the windows agent timeout to apply to all systems * remove extra line * get the interface using the network and instance * flag typo * add an optional flag to force dhcp release using dnsmasq-utils * Fix user\_id, project\_id reference for config\_drive with imageRefs * Fix a bug that would make spawning new instances fail if no port/protocol is given (for rules granting access for other security groups) * When swap is specified as block device mapping, its size becomes 0 wrongly. This patch make it set to correct size according to instance\_type * Fix pep8 issues * fixed grant user, added stdout support * This changes the interpretation of 'swap' for an instance-type to be in MB rather than GB * Fixing list prepend * Merging trunk * create disk.local the same way ephemerals are created (LP: #851145) * Fix failing test * Authorize to start a LXC instance withour, key, network file to inject or metadata * Update the v1.0 rescue admin action and the v1.1 rescue extension to generate 'adminPass'. Fixes an issue where rescue commands were broken on XenServer. lp#838518 * pep8 * merge the trunks * update tests to return fake\_nw\_info that is valid for the pre\_live\_migrate * make sure to raise since the tests require it * Pep8 Fix * Update test\_volumes to use FLAGS.password\_length * Zero out the progress when beginning a resize * Adding migration progress * Only log migration info if they exist * remove getting fixed\_ips directly from the db * removed unused import * Fixes libvirt rescue to use the same strategy as xen. Use a new copy of the base image as the rescue image. It leaves the original rescue image flags in, so a hand picked rescue image can still be used if desired * Fixing tests, PEP8 failures * fix permissions * Add a FakeVirDomainSnapshot and return it from snapshotCreateXML. Fixes libvirt snapshot tests * merge the trunks * Merged trunk * I am using iputils-arping package to send arping command. You will need to install this package on the network nodes using apt-get command apt-get install iputils-arping * Removed sudo from the arguments * Add a FakeVirDomainSnapshot and return it from snapshotCreateXML. Fixes libvirt snapshot tests * merge from trunk * Make sure grantee\_group is eagerly loaded * Merged trunk * compute/api: swap size issue * Update exception.wrap\_exception so that all exceptions (not just Error and NovaException types) get logged correctly * Removes the on-disk internal libvirt snapshot after it has been uploaded to glance * cleaned up * remove debugging * Merging trunk * Allowing resizes to the same machine * trunk merge * updates Exception.NoMoreFixedIps to subclass NovaException instead of Error * NoMoreFixedIps now subclasses NovaException instead of Error * merge trunk * was trying to create the FK when Should have been dropping * pep8 * well since sqlalchemy-migrate and sqlalchemy can't agree on what the FK is called, we fall back on just manually dropping it * tests working again * the table is the table for the reason its a table * uhh dialect doesn't exist, beavis * update comment * if no public-key is given (--key), do not show public-keys in metadata service * it merges the trunk; or else it gets the conflicts again * exceptions properly passed around now * merge with trunk at revno 1573 * add the fake\_network Manager to prevent rpc calls * This makes the OS api extension for booting from volumes work. The \_get\_view\_builder method was replaced in the parent class, but the BootFromVolume controller was not updated to use the new method * remove undedded imports and skips * pep8 fixes * Added a unit test * pass-through all other parameters in next links as well * update for the id->uuid flip * Merged trunk * Adding flavor extra data extension * Merged trunk * fix test * revert last change * Added virt-level support for polling unconfirmed resizes * build the query with the query builder * Removing toprettyxml from OSAPI xml serialization in favor of toxml * use uuids everywhere possible * make sure to use the uuid * update db api for split filterings searches * update tests * delete the internal libvirt snapshot after it is saved to glance * cleanup prints in tests * cleanup prints in tests * Add a simple test for the OS boot from volume api * get rid of debugs * Merged from trunk and resolved conflicts * Execute arping command using run\_as\_root=True instead of sudo * Return three rules for describe\_security\_groups if a rule refers to a foreign group, but does not specify protocol/port * pep8 issues * added xml support for servers\_list in response with tests * Merged trunk * added servers\_links in v1.1 with tests * added build\_list to servers controllers and view builder and kept all old tests passing * The 1.1 API specifies that two vendor content types are allowed in addition to the standard JSON and XML content types * pep8 * tests are back * PEP8 fix * Adding progress * In the unlikely case of an instance losing a host, make sure we still delete the instance when a forceDelete is done * 0 for the instance id is False ;) * Cleanup state management to use vm\_state instead of task\_state Add schedule\_delete() method so delete() actually does what it says it does * merge trunk * write out xml for rescue * fix up the filtering so it does not return duplicates if both the network and the db filters match * fix rescue to use the base image, reset firewall rules, and accept network\_info * make sure to pass in the context * move the FakeNetworkManager into fake\_network * Fix issue where floating ips don't get recreated when a network host reboots * ip tests were moved to networking * add tests * fix typo * allow matching on fixed\_ip without regex and don't break so all results are reported * add case where vif may not have an instance\_id associated with it * fix typo * Initial pass at automatically confirming resizes after a given window * Use the correct method to get a builder * merge trunks * pep8 * move ip filtering over to the network side * fix pep8 whitespace error * add necessary fields to flavor.rng schema * get all the vifs * get all the vifs * make sure we are grabbing out just the ids * flavor\_elem.setAttribute -> flavor\_elem.set, flavor -> flavor\_dict * minor changes to credentials for the correct format * Don't report the wrong content type if a mapped type doesn't exist * add stubs for future tests that need to be written * Test both content types for JSON and XML * Remove unnecessary vendor content types now that they are mapped to standard content types automatically * Add copyright * Map vendor content types to their standard content type before serializing or deserializing. This is so we don't have to litter the code with both types when they are treated identically * exporting auth to keystone (users, projects/tenants, roles, credentials) * make xml-api tests pass * update variable name after merge: flavor\_node -> flavor\_elem * resolve conflicts / merge with trunk revno 1569 * Fixes an issue where 'invalid literal for int' would occur when listing images after making a v1.1 server snapshot (with a UUID) * fixed tests * removing toprettyxml * add attributes to xml api * Remove debugging * Update test\_libvirt so that flags and fakes are used instead of mocks for utils.import\_class and utils.import\_object. Fixes #lp849329 * fix the test so that it fakes out the network * fix white space for pep8 * fix test\_extensions test to know of new extension FlavorExtraData * add extension description for FlavorExtraData * Adding migration for instance progress * Make tests pass * no need for the instance at all or compute * bump the migration * remove unused import, make call to network api to get vifs for the instance * merge the trunk * skip a bunch of tests for the moment since we will need to rework them * remove the vif joins, some dead code, and the ability to take in some instances for filtering * allow passing in of instances already * run the instances filter through the network api first, then through the db * add get\_vifs\_by\_instance and stub get\_instance\_ids\_by\_ip\_filter * change vifs to rpc call and add instance ids by ip * Multi-NIC support for vmwareapi virt driver in nova. Does injection of Multi-NIC information to instances with Operating system flavors Ubuntu, Windows and RHEL. vmwareapi virt driver now relies on calls to network manager instead of nova db calls for network configuration information of instance. Re-oranized VMWareVlanBridgeDriver and added session parmeter to methods to use existing session. Also removed session creation code as session comes as argument. Added check for flat\_inject flag before attempting an inject operation * last of the api.openstack.test\_images merge fixes * pep8 fixes * trunk merge * makes sure floating addresses are associated with host on associate so they come back * Deprecate aoe in preperation for removal in essex * Only allow up to 15 chars for a Windows hostname * pep8 * deprecate aoe * Fix instance rebooting (lp847604) by correcting a malformed cast in compute.api and an incorrect method signature in the libvirt driver * Fix mismerge * make tests pass * This patch teaches virt/libvirt how to format filesystem on ephemeral device depending on os\_type so that the behaviour matches with EC2's. Such behaviour isn't explicitly described in the documentation, but it is confirmed by checking realy EC2 instances. This patch introduces options virt\_mkfs as multistring. Its format is --virt\_mkfs== When creating ephemeral device, format it according to the option depending on os\_type. This addresses the bugs, https://bugs.launchpad.net/nova/+bug/827598 https://bugs.launchpad.net/nova/+bug/828357 * Test new vendor content types as well * Only allow up to 15 chars for a Windows hostname * Split accept tests to better match the name of the test * Remove debugging print * Inject hostname to xenstore upon creation * Update test\_libvirt so that flags and fakes are used instead of mocks for utils.import\_class and utils.import\_object. Fixes #lp849329 * interpret 'swap' to be in MB, not in GB * Actually test expected matches received * Test new content-types * This branch changes XML Serializers and their tests to use lxml.etree instead of minidom * add additional data to flavor's ViewBuilder * Inject hostname to xenstore upon creation * drop the virtual\_interfaces key back to instances * - remove translation of non-recognized attributes to user metadata, now just ignored - ensure all keys are defined in image dictionaries, defaulting to None if glance client doesn't provide one - remove BaseImageService - reorganize some GlanceImageService tests * And again * Update MANIFEST.in to match directory moves from rev1559 * we're back * Update MANIFEST.in to match directory moves from rev1559 * Moving tests/test\_cloud.py to tests/api/ec2/test\_cloud.py. They are EC2-specific tests, so this makes sense * Same as last time * Made tests version version links more robust * PEP8 cleanup * PEP8 cleanup * PEP8 cleanups * zone manager tests working * fixing import * working on getting tests back * relocating ec2 tests * merging trunk; resolving conflicts * Correctly map image statuses from Glance to OSAPI v1.1 * pep8 fixes in nova/db/sqlalchemy/api.py and nova/virt/disk.py * Add support for vendor content types * pep8 fixes * merging trunk; resolving conflicts * Update GlanceClient, GlanceImageService, and Glance Xen plugin to work with Glance keystone * Fix typo (woops) * pep8 fix * Some arches dont have dmidecode, check to see if libvirt is capable of running rather getInfo of the arch its running on * merging parent branch lp:~rackspace-titan/nova/glance-client-keystone * adding tests for deleted and pending\_delete statuses * Fixes rogue usage of sudo that crept in * fixups * remove unused dep * add test for method sig * parent merge * migration move * bug fixes * merging trunk * Fixes shutdown of lxc containers * Make quoting consistent * Fix rogue usage of 'sudo' bypassing the run\_as\_root=True method * trunk merge * region name * tweaks * fix for lp847604 to unbreak instance rebooting * use 'qemu-image resize' rather than 'truncate' to grow image files * When vpn=true in allocate ip, it attempts to allocate the ip that is reserved in the network. Unfortunately fixed\_ip\_associate attempts to ignore reserved ips. This fix allows to filter reserved ip address only when vpn=True * Do not require --bridge\_interface for FlatDHCPManager (lp:844944) * Makes nova-vncproxy listen for requests on the queue like it did before the bin files were refactored * Update GlanceClient, GlanceImageService, and Glance Xen plugin to work with Glance keystone * api/ec2/ebs: make metadata returns correct swap and ephemeral0 * api/ec2: make get\_metadata() return correct mappings * virt/libvirt: format ephemeral device and add fs label when formating ext3 fs * Fix spelling mistake * Stock zones follows a fill-first methodology—the current zone is filled with instances before other zones are considered. This adds a flag to nova to select a spread-first methodology. The implementation is simply adding a random.shuffle() prior to sorting the list of potential compute hosts by weights * Pass reboot\_type (either HARD or SOFT) to the virt layers from the API * merging trunk * fixing image status mapping * don't need random in abstract\_scheduler.py anymore.. * pull-up from trunk; move spread\_first into base\_scheduler.py * trunk merge * adding auth tokens to child zone calls * Add comment to document why random.shuffle() works * Merged trunk * Make whitespace consistent * Use triple quotes for docstrings to be consistent * Remove the unnecessary sudo from qemu-img as it is unneeded and doesn't work with our current packaging * Remove chanes\_since and key\_name from basic server entity * Merged trunk * remove extra line for pep8 * remove unnecessary qemu-img flag, use base image type by default * shorten comment to < 79 chars * merged rbp * remove sudo from qemu-img commands * adds a fake\_network module to tests to generate sensible network info for tests. It does not require using the db * Adding a can\_read\_deleted filter back to db.api.instance\_get\_all\_by\_filters that was removed in a recent merge * removing key\_name and config\_drive from non-detailed server entity * Authorize to start a LXC instance withour, key, network file to inject or metadata * Open Essex (switch version to 2012.1) * Last Diablo translations for Nova * Open Essex (switch version to 2012.1) * Last Diablo translations * pep 8 * Fixing security groups stuff * put key into meta-data, not top level 'data' * metadata key is 'public-keys', not 'keys' * fix for lp844364: fix check for fixed\_ip association in os-floating-ips * if no public-key is given (--key), do not show public-keys in metadata service * NetworkManager's add\_fixed\_ip\_to\_instance calls \_allocate\_fixed\_ips without vpn or requested\_networks parameters. If vpn or requested\_networks is not provided to the \_allocate\_fixed\_ips method, it throws an exception. This issue is fixed now * Merged trunk * First pass at adding reboot\_type to reboot codepath * child zone queries working with keystone now * Added docstring to explain usage of reserved keyword argument * One more bug fix to make zones work in trunk. Basic problem is that in novaclient using the 1.0 OSAPI, servers.create() takes an ipgroups argument, but when using the 1.1 OSAPI, it doesn't, which means booting instances in child zones won't work with OSAPI v1.0. This fix works around that by using keyword arguments for all the arguments after the flavor, and dropping the unused ipgroups argument * Fixes the reroute\_compute decorator in the scheduler API so that it properly: * make check for fixed\_ip association more defensive * Fix lp:844155 * Changing a behavior of update\_dhcp() to write out dhcp options file. This option file make dnsmasq offer a default gateway to only NICs of VM belonging to a network that the first NIC of VM belongs to. So, first NIC of VM must be connected to a network that a correct default gateway exists in. By means of this, VM will not get incorrect default gateways * merged trunk * merging trunk * merging trunk * merged trunk * Make weigh\_hosts() return a host per instance, instead of just a list of hosts * converting fix to just address ec2; updating test * Do not attempt to mount the swap VDI for file injection * Add a NOTE() * Merged trunk * Use .get instead * Do not attempt to mount the swap VDI for file injection * pull-up from trunk * pull-up from trunk * pull-up from trunk * adding can\_read\_deleted back to db api * Clean up shutdown of lxc containers * Cleanup some more comments * Cleanup some comments * fixes vncproxy service listening on rabbit * added tests for failure cases talking with zones * This code contains contains a new NetworkManager class that can leverage Quantum + Melange * comment fix * typo trying to raise InstanceNotFound when all zones returned nothing * create a new exception ZoneRequestError to use for returning errors when zone requests couldn't complete * pep8 fix for tests/api/openstack/test\_servers.py which is an issue in trunk * catch exceptions from novaclient when talking to child zones. store them and re-raise if no other child zones return any results. If no exceptions are raised but no results are returned, raise a NotFound exception * added test to cover case where no local hosts are available but child hosts are * remove the short circuit in abstract scheduler when no local hosts are available * fix for lp844364: improve check for fixed\_ip association * Ensure restore and forceDelete don't do anything unless the server is waiting to be reclaimed * actually shuffle the weighted\_hosts list.. * Check task\_state for queued delete * spread-first strategy * Make sure instance is deleted before allowing restore or forceDelete * Add local hostname to fix Authors test * delete\_instance\_interval -> reclaim\_instance\_interval * PEP8 cleanup * Restart compute with a lower periodic\_interval to make test run faster * merge trunk * properly handle the id resetters * removed vestige * pull-up from trunk * fix a couple of typos in the added unit test * modified unit tests, set use\_single\_default\_gateway flag to True whereever needed instead of setting it in the init method * exclude net tag from host\_dhcp if use\_single\_default\_gateway flag is set to false * forgot \_id * had used wrong variable * Fixes a case where if a VIF is returned with a NULL network it might not be able to be deleted. Added test case for that fix * Fix for LP Bug #837867 * weigh\_hosts() needs to return a list of hosts for the instances, not just a list of hosts * Merged trunk * Set flat\_injected to False by default * changed the fixed\_ip\_generator * PEP8 cleanup * Wait longer for all agents, not just Windows * merged trunk * updated floating\_ip generation * Tests for deferred delete, restore and forceDelete * An AMI image without ramdisk image should start * Added use\_single\_default\_gateway to switch from multiple default gateways to single default gateway * Fixed unit test * reverting change to GlanceImageService.\_is\_image\_available * At present, the os servers.detail api does not return server.user\_id or server.tenant\_id. This is problematic, since the servers.detail api defaults to returning all servers for all users of a tenant, which makes it impossible to tell which user is associated with which server * reverting xenapi change * Micro-fix; "exception" was misspelled as "exceptions" * Fix a misspelling of "exception" * revert changes to display description * merged trunk * novaclient v1\_0 has an ipgroups argument, but novaclient v1\_1 doesn't * Set flat\_injected to False by default * Fixes an issue where 'invalid literal for int' would occur when listing images after making a v1.1 server snapshot (with a UUID) * further cleanup * Default to 0 seconds (off) * PEP8 cleanups * Include new extension * Implement deferred delete of instances * trunk merge * cleaning up tests * zone name not overwritten * Update the v1.0 rescue admin action and the v1.1 rescue extension to generate 'adminPass'. Fixes an issue where rescue commands were broken on XenServer. lp#838518 * fix a mistaking of dataset and expected values on small test * fix a mistaking of deletion in ensure\_floating\_forward * revert codes for db * correct a method to collect instances from db add interface data to test * added me to Authors * meeging trunk * format for pep8 * format for pep8 * implement unit test for linux\_net * Adjust test\_api to account to multiple rules getting returned for a single set rule * Clean up security groups after use * Make a security group rule that references another security group return ipPermission for each of tcp, udp, and icmp * Multi-NIC support for vmwareapi virt driver in nova. Does injection of Multi-NIC information to instances with Operating system flavors Ubuntu, Windows and RHEL. vmwareapi virt driver now relies on calls to network manager instead of nova db calls for network configuration information of instance. Ensure if port group is properly associated with vlan\_interface specified in case of VLAN networking for instances. Re-oranized VMWareVlanBridgeDriver and added session parmeter to methods to use existing session. Also removed session creation code as session comes as argument. Added check for flat\_inject flag before attempting an inject operation. Removed stale code from vmwareapi stubs. Also updated some comments to be more meaningful. Did pep8 and pylint checks. Tried to improve pylint score for newly added lines of code * Fix bug #835919 that output a option file for dnsmasq not to offer a default gateway on second vif * Accidentally added instance to security group twice in the test. Fixed * Minor cleanup * Fixing xml serialization of limits resource * correct floating ip id to increment in fake\_network * Add iptables filter rules for dnsmasq * Merged trunk * Change non E ascii characte * Launchpad automatic translations update * Instance record is not inserted in db if the security group passed to the RunInstances API doesn't exists * Added unit tests to check instance record is not inserted in db when security groups passed to the instances are not existing * removed unneeded import * rick nits * alex meade issues * Added list of security groups to the newly added extension (Createserverext) for the Create Server and Get Server detail responses * default description to name * use 'qemu-image resize' rather than 'truncate' to grow image files * remove extra description stuff * fix pep8 violation * feedback from jk0's review, including removing a lot of spaces from docstrings * revert description changes, use metadata['description'] if it is set to populate field in db * merged trunk * change db migrate script again to match other similar scripts * Fix for LP Bug #839269 * move networks declarations within upgrade/downgrade methods * more review cleanup * remove import of 'fake' from nova manager, now that we've moved that to test\_quantum.py * Fixes a small bug which causes filters to not work at all. Also reworks a bit of exception handling to allow the exception related to the bug to propagate up * Email error again. Tired * Email error * Fixed review comments * Add documentation comment * pull-up from trunk * Forgot to handle return value * Add tests for flags 'snapshot\_image\_format' * Update snapshot image metada 'disk\_format' * Add flag 'snapshot\_image\_format' to select the disk format of the snapshot image generated with the libvirt driver * missing migration * Email contact error * Update Authors file * Merged trunk * Correct tests associated * Fix protocol-less security groups * Adding feedparser to pip-requires * Removing xml functions that are no longer called * Launchpad automatic translations update * Glance can now perform its own authentication/authorization checks when we're using keystone * import filters in scheduler/host\_filter.py so default\_host\_filter gets added to FLAGS; rework SchedulerManager() to only catch missing 'schedule\_' attribute and report other missing attributes * move content of quantum/fake.py to test\_quantum.py in unit testing class (most original content has been removed anyway) * melange testing cleanup, localization cleanup * remove references to MelangeIPAMTest, as they cannot be used yet * Deleted debug messages * Resolved conflicts and fixed pep8 errors * Fix a few references to state\_description that slipped through * added unit tests and cleanup of import statements * renamed fake\_network\_info.py * trunk merge * moved cidr\_v6 back * Probably shouldn't leave that commented out * Added test for NULL network * Fixed lp835242 * Fixes for minor network manager issues centered around deleting/accessing instances which don't have network information set * remove extra references to state\_description * pull-up from trunk * merge unit test from Chris MacGown * Adds test for image.glance.GlanceImageService.\_is\_image\_available * - implements changes-since for servers resource - default sort is now created\_at desc for instances * undo change in setting q\_tenant\_id in quantum\_manager.create\_network * additional review cleanup * docstring cleanup * merging trunk * Fixes NotFound exceptions to show the proper instance id in the ec2 api * typo * more review cleanup * another commit from brad * add specific exceptions for quantum client. Fix doc-strings in client.py * merge brad's changes that address most review feedback * fix for lp838583 - fixes bug in os-floating-ips view code that prevents instance\_id from being returned for associated addresses * Accept keypair when you launch a new server. These properties would be stored along with the other server properties in the database (like they are currently for ec2 api) * Launchpad automatic translations update * merge trunk, fix tests * fix for lp838583 - return instance\_id for associated floating\_ips, add test * removing unnecessary imports * remove BaseImageService * pep8 * move GlanceImageService tests to proper module; remove translation of non-standard image attributes to properties; ensure all image properties are available, defaulting to None if not provided * merge trunk * Add comment for an uncommon failure case that we need to fix * Fix for LP Bug #838466 * Correctly yield images from glance client through image service * Simple usage extension for nova. Uses db to calculate tenant\_usage for specified time periods * Fix for LP Bug #838251 * merge trunk, fix conflict * Validates that user-data is b64 encoded * Updated VersionsAtomSerializer.index to use lxml.etree to generate atom feed * remove extra test * merged trunk * Fixed and improved the way instance "states" are set. Instead of relying on solely the power\_state of a VM, there are now explicitly defined VM states and VM task states which respectively define the current state of the VM and the task which is currently being performed by the VM * Updating test for xml to use lxml * expect key\_name attribute in 1.1 * change to use \_get\_key\_name to retrieve the key * Implements lp:798876 which is 'switch carrot to kombu'. Leaves carrot as the default for now... decision will be made later to switch the default to kombu after further testing. There's a lot of code duplication between carrot and kombu, but I left it that way in preparation for ripping carrot out later and to keep minimal changes to carrot * Disassociated previously associated floating ips when calling network\_api.associate\_floating\_ip. Also guard against double-association in the network.manager * adding support for limiting in image service; updating tests with fixture ids and marker support * trunk merge * merging trunk * fix keypairs stubs * add explicit message for NoMoreFloatingIps exception * fix for chris behrens' comment - move tenant\_id => project\_id mapping to compute.api.get\_all * moved key\_name per review * zone\_add fixed to support zone name * kludge for kombu 1.1.3 memory transport bug * merged trunk * Removed extraneous import and s/vm\_state.STOP/vm\_states.STOPPED/ * Merged trunk * Code cleanup * Use feedparser to parse the generated atom feeds in the tests for the versions resource * add test to verify 400 response when out of addresses * switched default to kombu per vishy * use kombu.connection.BrokerConnection vs kombu.connection.Connection so that older versions of kombu (1.0.4) work as well as newer * fix FloatingIpAlreadyInUse to use correct string pattern, convert ApiErrors to 400 responses * Fix for LP Bug #782364 * Fix for LP Bug #782364 * more logging info to help identify bad payloads * Removed test\_parallel\_builds in the XenAPI tests due to it frequently hanging indefinitely * logging change when rpc pool creates new connection * pep8 fix * make default carrot again and delay the import in rpc/\_\_init\_\_.py * Removed debug messages * Fix for LP Bug #837534 * add kombu to pip-requires and contrib/nova.sh * restore old way FLAGS.rpc\_backend worked.. no short name support for consistency * fix remaining tests * Update RequestContext so that it correctly sets self.is\_admin from the roles array. Additionally add a bit of code to ignore case as well * pep8, fix fakes * fix a bunch of direct usages of db in compute api * make two functions instead of fast flag and add compute api commands instead of hitting db directly * fixing bug * fixing short-ciruit condition * yielding all the images * merged trunk * changing default sort to created\_at * The exception 'RamdiskNotFoundForImage' is no longer used * With OS API, if the property 'ramdisk\_id' isn't set on the AMI image, Nova can not instantiate it. With EC2 API, the AMI image can be instantiate * adding an assert * Use getCapabilities rather than getInfo() since some versions of libvirt dont provide dmi information * supporting changes-since * Fix a bad merge on my part, this fixes rebuilds\! * disassociate floating ips before re-associating, and prevent re-association of already associated floating ips in manager * Update RequestContext so that it correctly sets self.is\_admin from the roles array. Additionally add a bit of code to ignore case as well * Merged trunk * remove unneeded connection= in carrot Consumer init * pep8 fix for test\_rpc\_common.py * fix ajax console proxy for new create\_consumer method * doc string cleanup * created nova/tests/test\_rpc\_common.py which contains a rpc test base class so we can share tests between the rpc implementations * ditched rpc.create\_consumer(conn) interface... instead you now do conn.create\_consumer(. * Update the EC2 ToToken middleware to use eventlet.green.httplib instead of httplib2. Fixes issues where the JSON request body wasn't getting sent to Keystone * remove brackets from mailmap entry * access db directly in networkmanagers's delete\_network method, so stubbed test call works correctly * more logging info to help identify bad payloads * In the XenAPI simulator, set VM.domid, when creating the instance initially, and when starting the VM * remove 'uuid' param for nova-manage network delete that I had add previously * add alias to mailmap * update file name for db migrate script after merge (again) * update file name for db migrate script after merge * merged trunk * Fixes this bug by removing the test. The test has no asserts and seems to be raising more problems than it could solve * Removed test\_parallel\_builds * Merged trunk * Increased migration number * Fixes lp:813864 by removing the broken assert. The assert was a check for isinstance of 'int' that should have been 'long'. But it doesn't appear this assert really belongs, anyway * Merged trunk * Adds assertIn and assertNotIn support to TestCase for compatibility with python 2.6 This is a very minimal addition which doesn't require unittest2 * support the extra optional arguments for msg to assertIn and assertNotIn * removed broken assert for abstract\_scheduler * pep8 fixes * fix for assertIn and assertNotIn use which was added in python 2.7. this makes things work on 2.6 still * merge trunk * restore fixed\_ip\_associate\_pool in nova/db/sqlalchemy.py to its original form before this branch. Figured out how to make unit tests pass without requiring that this function changes * remove unused rpc connections in test\_cloud and test\_adminapi * carrot consumer thread fix * add carrot/kombu tests... small thread fix for kombu * add doc-strings for all major modules * remove fake IPAM lib, since qmanager must now access nova DB directly * Update the EC2 ToToken middleware to use eventlet.green.httplib instead of httplib2. Fixes issues where the JSON request body wasn't getting sent to Keystone * fix nova/tests/test\_test.py * fix nova-ajax-console-proxy * fix test\_rpc and kombu stuff * always set network\_id in virtual\_interfaces table, otherwise API commands that show IP addresses get confused * start to rework some consumer stuff * update melange ipam lib to use network uuid, not bridge * fix issue with setting 'Active' caused by Quantum API changes. Other misc fixes * Bug #835952: pep8 failures do not cause the tests to fail * Start domid's at 1, not 0, to avoid any confusion with dom0 * use 'uuid' field in networks table rather than 'bridge'. Specify project\_id when creating instance in unit test * Bug #835964: pep8 violations in IPv6 code * In the XenAPI simulator, set VM.domid, when creating the instance initially, and when starting the VM * Bug #835952: pep8 failures do not cause the tests to fail * Bug #835964: pep8 violations in IPv6 code * Virtual Storage Array (VSA) feature. - new Virtual Storage Array (VSA) objects / OS API extensions / APIs / CLIs - new schedulers for selecting nodes with particular volume capabilities - new special volume driver - report volume capabilities - some fixes for volume types * fix FALGS typo * changes a few double quotes to be single, as the rest in the vicinity are * Default rabbit max\_retries to forever Modify carrot code to handle retry backoffs and obey max\_retries = forever Fix some kombu issues from cut-n-paste Service should make sure to close the RPC connection * Updated VersionsXMLSerializer and corresponding tests to use lxml * v1.0 of server create injects first users keypair * add tests to verify NotFound exceptions are wrapped with the proper ids * use db layer for aggregation * merged trunk * flag for kombu connection backoff on retries * more fixes * more work done to restore original rpc interfaces * merge changes from brad due to recent quantum API changes * Minor changes based on recent quantum changes * start of kombu implementation, keeping the same RPC interfaces * doubles quotes to single * changed format string in nova-manage * removed self.test ip and \_setup\_networking from libvirt * updated libvirt test * merge trunk * stubbed some stuff in test\_libvirt * removed create\_volumes, added log & doc comment about experimental code * reverted CA files * couple of pep8s * Tiny tweaks to the migration script * updated fake values * updated fake values * Merged trunk and fixed conflicts * updated fake values * updated fake values * forgot ) * update libvirt tests * Update compute API and manager so that the image\_ref is set before spawning the rebuilt instance. Fixes issue where rebuild didn't actually change the image\_id * added debug prints for scheduler * update libvirt * updated instance type fake model * added vcpus to instance flavor test model * added memory\_mb to instance flavor test model * forgot test print statements * misplaced comma.. * Update compute API and manager so that the image\_ref is set before spawning the rebuilt instance. Fixes issue where rebuild didn't actually change the image\_id * Add brad to Authors file * replace accidental deletion in nova-mange * rearrange imports * fix for quantum api changes, change nova-mange to have quantum\_list command * merge brad's fixes * add priority for static networks * driver: added vsa\_id parameter for SN call * merged with rev.1499 * cosmetic cleanup * Updated server and image XML serializers to take advantage of the addresses and metadata serializers * VSA code redesign. Drive types completely replaced by Volume types * merged trunk * Just a couple of small changes I needed to get the migrations working with SQLAlchemy 0.7.x on Fedora 16 * Minor fixes * check log file's mode prior to calling chmod * The fix for run\_iscsiadm in rev 1489 changed the call to use a tuple because values were being passed as tuples. Unfortunately a few calls to the method were still passing strings * Add a set of generic tests for the virt drivers. Update a bit of documentation to match reality * updated LimitsXMLSerializer to use etree and supply the xml declaration * merge underlying fix for testing * merged trunk * updated additional limits test * pep8 * pass all commands to run\_iscsiadm as a tuple * altered fake network model * Updated limits serialization tests to use etree and added limits schema * Test fixup after last review feedback commit * Fix glance image authorization check now that glance can do authorization checks on its own; use correct image service when looking for ramdisk, etc.; fix a couple of PEP8 errors * forget a return * review feedback * Fixed integrated.test\_xml to be more robust * typo * fixed a couple of syntax errors * Add bug reference * updated tests * updated libvirt tests to use fake\_network\_info * Bumped migration number * Merged trunk * Review feedback * pep8 * DRYed up code by moving \_to\_xml into XMLDictSerializer * updated addresses serializer to use etree instead of minidom * Added addresses schema * updated addresses xml serialization tests to use etree instead of minidom * Updated ServerXMLSerializer to use etree instead of minidom * added unit tests to instance\_types for rainy day paths * Reverted two mistakes when looking over full diff * Updated MetadataXMLSerializer to use etree instead of minidom * Added: - volume metadata - volume types - volume types extra\_specs * Added schemas Updated metadata tests to use etree instead of minidom * Servers with metadata will now boot on xenserver with flat\_injected==False * moved import up * Verify resize needs to be set * changing comment * fixing bug * merged trunk * Updated ImagesXMLSerializer to use etree instead of minidom * Set error state when migration prep fails * Removed invalid test * Removed RESIZE-CONFIRM hack * Set state to RESIZING during resizing.. * Merged trunk * Another attempt at fixing hanging test * Once a network is associated with project, I can’t delete this network with ‘nova-manage network delete’. As you know, I can delete network by scrubbing the project with ‘nova-manage project scrub’. However it is too much. The cause of this problem is there is no modify command of network attribute * Update paste config so that EC2 admin API defaults to noauth * merged with volume types (based on rev.1490). no code rework yet * merged with volume\_types. no code refactoring yet * merged with nova 1490 * added new tables to list of DBs in migration.py * removes french spellings to satisfy american developers * added virtio flag; associate address for VSA; cosmetic changes. Prior to volume\_types merge * stub\_instance fix from merge conflict * moved import to the top * fixing inappropriate rubyism in test code * Added fix for parallel build test * Fixed silly ordering issue which was causing tons of test failures * merged trunk * change snapshot msg too * forgot to add new extension to test\_extensions * Add me to Authors * added Openstack APIs for volume types & extradata * Add comments for associate/dissociate logic * Updated ImageXMLSerialization tests to use etree instead of minidom Fixed incorrect server entity ids in tests * Merged from trunk * Add names to placeholders of formatting * The notifiers API was changed to take a list of notifiers. Some people might want to use more than one notifier so hopefully this will be accepted into trunk * use dict.get for user\_id, project\_id, and display\_description in servers view as suggested by ed leaf, so that not all tests require these fields * Updated flavors xml serialization to use lxml instead of minidom * merge trunk, fix tests * fix more tests * Removed unused imports * Updated FlavorsXMLSerialization tests to use etree and validation instead of minidom * Merged from trunk * split test\_modify() into specific unit tests * Added DELETED status to OSAPI just in case * Fixes iscsiadm commands to run properly * Fixed issue where we were setting the state to DELETED before it's actually deleted * merged with rev.1488 * Merged trunk and fixed conflicts * added volume type search by extra\_spec * Fix for trying rebuilds when instance is not active * Fixed rebuild naming issue and reverted other fix which didn't fix anythin * Attempt to fix issue when deleting an instance when it's still in BUILD * Fix default hostname generator so that it won't use underscores, and use minus signs instead * merged with 1487 * pep8 compliant * Merged from trunk * - rebuilds are functional again - OSAPI v1.1 rebuild will accept adminPass or generate a new one, returning it in a server entity - OSAPI v1.0 will generate a new password, but it doesn't communicate it back to the user * Fix flag override in unit test * merged with rev.1485 * add rainy day test to to\_global fixed to\_global to catch correct error from incorrect mac addresses * Let's be more elegant * similar to lp828614: add rainy day test and fix exception error catch to AddrFormatError * check log file mode prior to chmod * added unit tests for version.py * Merged trunk * Fix for migrations * Conversion to SQLAlchemy-style * dict formatting * Commit without test data in migration * Commit with test data in migration * Do not require --bridge\_interface for FlatDHCPManager * Fix quotas migration failure * Fix flavorid migration failure * fixed indentation * adding xml serialization and handling instance not found * removing extraneous imports * pep8 * Thou shalt not use underscores in hostnames * Catch exception for instances that aren't there * pep8 fixes * Couple of fixes to the review feedback changes * Launchpad automatic translations update * Address code review feedback from Rick and Matt * removing print statement * added volume metadata APIs (OS & volume layers), search volume by metadata & other * Update paste config so that EC2 admin API defaults to noauth * cleanup * updating tests * fix iscsi adm command * Fix pep8 * Merged from trunk * added volume\_types APIs * Fix not found exceptions to properly use ec2\_ips for not found * Stub out the DB in unit test. Fix 'nova-manage network modify' to use db.network\_update() * rebuilds are functional again * Adds a use\_deprecated\_auth flag to make sure creds generated using nova-manage commands will work with noauth * Merged from upstream * Fixed some pep8 and pylint issues * Forgot to set the flag for the test * I added notifications decorator for each API call using monkey\_patching. By this merge, users can get API call notification from any modules * Fixes bug that causes 400 status code when an instance wasn't attached to a network * fix for rc generation using noauth * Fixed doc string * Merged from upstream * Switched list\_notifier to log an exception each time notify is called, for each notification driver that failed to import * updating tests * merging trunk * Fixed some docstring Added default publisher\_id flagw * Removed blank line * Merged with trunk * Fixed typo and docstring and example class name * Updated migration number * Move use\_ipv6 into flags. Its used in multiple places (network manager and the OSAPI) and should be defined at the top level * Merged trunk * PEP8 fixes * 'use the ipv6' -- 'use ipv6' * Move use\_ipv6 into flags. Its used in multiple places (network manager and the OSAPI) and should be defined at the top level * Refresh translations * This branch does the final tear out of AuthManager from the main code. The NoAuth middlewares (active by default) allow a user to specify any user and project id through headers (os\_api) or access key (ec2\_api) * Implements first-pass of config-drive that adds a vfat format drive to a vm when config\_drive is True (or an image id) * Launchpad automatic translations update * pulling all qmanager changes into a branch based on trunk, as they were previously stacked on top of melange * Moved migration and fixed tests from upstream * Merged trunk * Added the fixes suggested by Eric Windisch from cloudscaling.. * removing unnecessary tthing * merge trunk, resolve conflicts, fix tests * unindented per review, added a note about auth v2 * Our goal is to add optional parameter to the Create server OS 1.0 and 1.1 API to achieve following objectives:- * fixing exception logging * Fixes bug 831627 where nova-manage does not exit when given a non-existent network address * Move documentation from nova.virt.fake into nova.virt.driver * initial cut on volume type APIs * fix pep8 issue * Change parameters of 'nova-manage network modify'. Move common test codes into private method * Merged from trunk,resolved conflicts and fixed broken unit tests due to changes in the extensions which now include ProjectMapper * xml deserialization, and test fixes * syntax * update test\_network test\_get\_instance\_nw\_info() * remove extra spaces * Fixed conflict with branch * merged trunk * The FixedIpCommandsTestCase in test\_nova\_manage previously accessed the database. This branch stubs out the database for these tests, lowering their run time from 104 secs -> .02 secs total * some readability fixes per ja feedback * fix comment * Update a few doc strings. Address a few pep8 issues. Add nova.tests.utils which provides a couple of handy methods for testing stuff * Make snapshot raise InstanceNotRunning when the instance isn't running * change NoAuth to actually use a tenant and user * Added Test Code, doc string, and fixed pip-requiresw * Merged trunk * Ensure that reserve and unreserve exit when an address is not found * Simple usage extension for nova. Uses db to calculate tenant\_usage for specified time periods * Stubbed out the database in order to improve tests * logging as exception rather than error * Merged from upstream * Changed list\_notifier to call sys.exit if a notification driver could not be found * merged trunk * implemented tenant ids to be included in request uris * Add a generic set of tests for hypervisor drivers * Upstream merge * Added ability to detect import errors in list\_notifier if one or more drivers could not be loaded * Fix pep8 * delete debug code * Fixes for a number of tests * Use 'vm\_state' instead of 'state' in instance filters query * Merged with Dan to fix some EC2 cases * Add 'nova-manage network modify' command * Fixes/updates to make test\_cloud pass * Fix scheduler and integrated tests * Update migration number * Merged with Dan * Merged task\_state -> task\_states and fixed test\_servers test * Update virt/fake to correct power state issue * fix test\_servers tests * update test\_security\_group tests that have been added * Merged trunk * Renamed task\_state to task\_states.. * Ec2 API updates * merge with trunk * Fixing merge conflicts * Launchpad automatic translations update * Adds accessIPv4 and accessIPv6 to servers requests and responses as per the current spec * adding import * Fixes utils.to\_primitive (again) to handle modules, builtins and whatever other crap might be hiding in an object * fixing bug lp:830817 * added test for bad project\_id ... although it may not be used * added exception catch and test for bad project\_id * added exception catch for bad prefix and matching test * added exception catch and test for bad prefix * comment strings * added unit tests for versions.py * Added OS APIs to associate/disassociate security groups to/from instances * add/remove security groups to/from the servers as server actions * lp:828610 * removed leftover netaddr import * added rainy day test for ipv6 tests. fixed ipv6.to\_global to trap correct exception * Merged from trunk * pep8 * improve test coverage for instance types / flavors * Launchpad automatic translations update * Assorted fixes to os-floating-ips to make it play nicely with an in-progress novaclient implementation, as well as some changes to make it more consistent with other os rest apis. Changes include: * finished fake network info, removed testing shims * updated a maths * updated a maths * Merged trunk * Lots of modifications surrounding the OSAPI to remove any mention of dealing with power states and exclusively using vm\_states and task\_state modules. Currently there are still a number of tests failing, but this is a stopping place for today * who cares * added return * Merged from trunk and fixed review comments * fixed formatting string * typo * typo * typo * typo * typo * typo * added fake network info * Fixed review comments * Fixed typo * better handle malformed input, and add associated tests * Fixed typo * initial committ * Fixed NoneType returned bugw * merged trunk * Updated accessIPv4 and accessIPv6 to always be in a servers response * Fixed mistake on mergew * tweak to comment * Merged with trunkw * a few tweaks - remove unused member functions, add comment * incorporate feedback from brian waldon and brian lamar. Move associate/disassociate to server actions * merge from trunk * pep8 * Finished changing ServerXMLSerializationTest to use XML validation and lxml * Added monkey patching notification code function w * Updated test\_show in ServerXMLSerializationTest to use XML validation * vm\_state --> vm\_states * Next round of prep for keystone integration * merge from trunk * Removes the incorrect hard-coded filter path * Revert irrelevant changes that accidentally crept into this patch :( * add tenant\_id to api. without tenant\_id, admins can't tell which servers belong to which tenants when retrieving lists * Merged from trunk * Fixes primitive with builtins, modules, etc * fix test\_virtual interfaces for tenant\_id stuff * fix test\_rescue tests for tenant\_id changes * Fix unit test for the change of 'nova-manage network list' format * Add copyright notices * merged trunk * Define FLAGS.default\_local\_format. By default it's None, to match current expected \_create\_local * Fix config\_drive migration, per Matt Dietz * updated migration number * merge with trunk * Bump migration number * pep8 * Start improving documentation * Added uuid column in virtual\_interfaces table, and an OpenStack extension API for virtual interfaces to expose these IDs. Also set this UUID as one of the external IDs in the OVS vif driver * Move documentation from nova.virt.fake to nova.virt.driver * add key\_name/data support to server stub * add user\_id and description. without user\_id, there is no way for a tenant to tell which user created the server. description should be added for ec2 parity * merge * Bugfix for lp 828429. Its still not clear to me exactly how this code path is actually invoked when nova is used, so I'm looking for input on whether we should be adding a test case for this, removing the code as unused, etc. Thanks * remove security groups, improve exception handling, add tests * Merged trunk * merged trunk * Currently, rescue/unrescue is only available over the admin API. Non-admin tenants also need to be able to access this functionality. This patch adds rescue functionality over an API extension * Makes all of the binary services launch using the same strategy.  \* Removes helper methods from utils for loading flags and logging  \* Changes service.serve to use Launcher  \* Changes service.wait to actually wait for all the services to exit  \* Changes nova-api to explicitly load flags and logging and use service.serve \* Fixes the annoying IOError when /etc/nova/nova.conf doesn't exist * tests pass * Fixes issue where ServersXMLSerializer was missing a method for update actions * follow same pattern as userdata (not metadata apporach) * rename the test method * Updated docs for the recent scheduler class changes * Passes empty string instead of None to MySQLdb driver if the DB password isn't set * merged trunk * added volume metadata. Fixed test\_volume\_types\_extra\_specs * declare the use\_forwarded\_for flag * merge trunk * Fixes lp828207 * Added unit test * allow specification of key pair/security group info via metadata * Fixed bug in which DescribeInstances was returning deleted instances. Added tests for pertinent api methods * Accept binary user\_data in radix-64 format when you launch a new server using OSAPI. This user\_data would be stored along with the other server properties in the database. Once the VM instance boots you can query for the user-data to do any custom installation of applications/servers or do some specific job like setting up networking route table * added unittests for volume\_extra\_data * Removed extra parameter from the call to \_provision\_resource\_locally() * resolve conflicts after upstream merge * Change the call name * Cleanup the '\_base' directory in libvirt tests * Oops * Review feedback * Added 'update' method to ServersXMLSerializer * Added more unit testcases for userdata functionality * Remove instances.admin\_pass column * merged trunk * Merged with trunk * typo * updated PUT to severs/id to handle accessIPv4 and accessIPv6 * DB password should be an empty string for MySQLdb * first cut on types & extra-data (only DB work, no tests) * merge from trunk * Better docstring for \_unrescue() * Review feedback * Need to pass the action * Updated the distributed scheduler docs with the latest changes to the classes * Syntax error * Moved compute calls to their own handler * Remove old comment * Don't send 'injected\_files' and 'admin\_pass' to db.update * fix docstrings in new api bins * one more * fix typo * remove signal handling and clean up service.serve * add separate api binaries * more cleanup of binaries per review * Changed the filter specified in \_ask\_scheduler\_to\_create\_instance() to None, since the value isn't used when creating an instance * Minor housecleaning * Fix to return 413 for over limit exceptions with instances, metadata and personality * Refactored a little and updated unit test * minor cleanup * dhcpbridge: add better error if NETWORK\_ID is not set, convert locals() to static dict * Added the fix for the missing parameter for the call to create\_db\_entry\_for\_new\_instance() * Updated a number of items to pave the way for new states * Corrected the hardcoded filter path. Also simplified the filter matching code in host\_filter.py * Added rescue mode extension * Fixed issue where accessIP was added in none detail responses * Updated ServersXMLSerializer to allow accessIPv4 and accessIPv6 in XML responses * Merged trunk * Added accessIPv4 and accessIPv6 to servers view builder Updated compute api to handle accessIPv4 and 6 * Fixed several logical errors in the scheduling process. Renamed the 'ZoneAwareScheduler' to 'AbstractScheduler', since the zone-specific designation is no longer relevant. Created a BaseScheduler class that has basic filter\_hosts() and weigh\_hosts() capabilities. Moved the filters out of one large file and into a 'filters' subdirectory of nova/scheduler * Merged trunk * Adds the enabled status of a host when XenServer reports its host's capabilities. This allows the scheduler to ignore hosts whose enabled is False when considering where to place a new instance * merge trunk and fix unit test errors * in dhcpbridge, only grab network id from env if needed * bug #828429: remove references to interface in nova-dhcpbridge * pep8 * remove extra reference in pipelib * clean up fake auth from server actions test * fix integration tests * make admin context the default, clean up pipelib * merged trunk * Merged with trunk and fixed broken testcases * merged with nova-1450 * nova-manage VSA print & forced update\_cap changes; fixed bug with report capabilities; added IP address to VSA APIs; added instances to APIs * Make all services use the same launching strategy * Updated compute manager/API to use vm/task states. Updated vm/task states to cover a few more cases I encountered * Updated server create XML deserializer to account for accessIPv4 and accessIPv6 * Added the host 'enabled' status to the host\_data returned by the plugin * Added accessip to models pep8 * Added migration for accessIPv4 and accessIPv6 * Fixed broken unit testcases * Initial instance states migration * pep8 fix * fix some naming inconsistencies, make associate/disassociate PUTs * Add NetworkCommandsTestCase into unit test of nova-manage * very minor cleanup * Undo an unecessary change * Merged trunk * Pep8 fixes * Split set state into vm, task, and power state functions * Add modules for task and vm states * Updated tests to correctly use the tenant id * DB object was being casted to dict() in API code. This did not work as intended and logic has been updated to reflect a more accurate way of getting information out of DB objects * merge from trunk * Cleaned up the extension metadata API data * Updated get\_updated time * Cleaned up the file * Fixed vif test to match the JSON key change * Added XML support and changed JSON output keys * Added virtual interfaces API test * Removed serverId from the response * Merged trunk * Merged Dan's branch to add VIF uuid to VIF drivers for Quantum * Removed a change from faults.py that was not required." * Changed return code to 413 for metadata, personality and instance quota issues * Append the project\_id to the SERVER-MANAGEMENT-URL header for v1.1 requests. Also, ensure that the project\_id is correctly parsed from the request * add new vif uuid for OVS vifplug for libvirt + xenserver * Remove instances.admin\_pass column * merge trunk * all tests passing * fix unit tests * Resolved conflicts and merged with trunk * Added uuid for networks and made changes to the Create server API format to accept network as uuid instead of id * I'm taking Thierry at his word that I should merge early and merge often :) * Fixes issue with exceptions getting eaten in image/s3.py if there is a failure during register. The variables referenced with locals() were actually out of scope * Allow local\_gb size to be 0. libvirt uses local\_gb as a secondary drive, but XenServer uses it as the root partition's size. Now we support both * Merged trunk * merge from trunk * make project\_id authorization work properly, with test * Use netaddr's subnet features to calculate subnets * make delete more consistant * Review feedback * Updated note * Allow local\_gb to be 0; PEP8 fixes * Updated ViewBuilderV10 as per feedback * \* Added search instance by metadata. \* instance\_get\_all\_by\_filters should filter deleted * This branch implements a nova api extension which allows you to manage and update tenant/project quotas * test improvements per peer review * fixing pep8 issue * defaults now is referred to using a tenant * fixing up the show quotas tests, and extension * making get project quotas require context which has access to the project/tenant) * fixing pep8 issues again * fixing spacing issues * cleaning up a few things from pyflakes * fixing pep8 errors * refactoring tests to not use authmanager, and now returning 403 when non admin user tries to update quotas * removed index, and separated out defaults into its own action * merging test\_extensions.py * another trunk merge * another trunk merge... a new change made it into nova before the code was merged * Cleanup the '\_base' directory in libvirt tests * Small bug fix...don't cast DB objects to dicts * merge from trunk * Updated the EC2 metadata controller so that it returns the correct value for instance-type metadata * Fix test\_metadata tests * merge the trunk * Merged with upstream * Added list\_notifier, a driver for the notifer api which calls a list of other drivers * merge with trunk * Refactored the HostFilterScheduler and LeastCostScheduler classes so that they can be combined into a single class that can do both host filtering and host weighting, allowing subclasses to override those processes as needed. Also renamed the ZoneAwareScheduler to AbstractScheduler, for two reasons: one, the 'zone-aware' designation was necessary when the zone code was being developed; now that it is part of nova, it is not an important distinction. Second, the 'Abstract' part clearly indicates that this is a class that is not designed to be used directly, but rather as the basis for specific scheduler subclasses * cosmetic change in test\_extensions. Avoids constant merge conflicts between proposals with new extensions * Validate the size of VHD files in OVF containers * Include vif UUID in the network info dictionary * Added uuid to allocate\_mac\_address * Fixed the naming of the extension * redux of floating ip api * Merged trunk * Merged trunk * log the full exception so we don't lose traceback through eventlet * fix error logging in s3.py * pep8 cleanup * Merged trunk * Removed newly added userdatarequesthandler for OS API, there is no need to add this handler since the existing Ec2 API metadatarequesthandler does the same job * got tests passing with logic changes * pep8 * pep8 * add note * have the tests call create\_networks directly * allow for finding a network that fits the size, also format string correctly * adding sqlalchemi api tests for test\_instance\_get\_all\_by\_filter to ensure doesn't return deleted instances * added cloud unit test for describe\_instances to ensure doesn't return deleted instances * return the created networks * pep8 fix * merge trunk * Adding kvm-block-migration feature * i hate these exceptions where it should just return an empty list * fix typo where I forgot a comma * merge trunk, remove \_validate\_cidrs and replace functionality with a double for loop * fix bug which DescribeInstances in EC2 api was returning deleted instances * We don't have source for open-wrt in the source tree, so we shouldn't use the images. Since the images are only there for uploading smoketests, They are now replaced with random images * Make response structure for list floating ips conform with rest of openstack api * put tenant\_id back in places where it was * This branch allows the standard inclusion of a body param which most http clients will send along with a POST request * Libvirt has some autogenerated network info that is breaking ha network * making body default to none * pep8 fix * Adding standard inclusion of a body param which most http clients will send along with a POST request * Fixed merging issue * Merged with trunk * Updated rate limiting tests to use tenants * Corrected names in TODO/FIXME * remove openwrt image * Fix the tests when libvirt actually exists * Merged trunk * Add durable flag for rabbit queues * Fixed merge conflict * merged trunk * Merged trunk * Dryed up contructors * make list response for floating ip match other apis * fix missing 'run\_as\_root' from bad merge * Added ability too boot VM from install ISO. System detects an image of type iso. Images is streamed to a VDI and mounted to the VM. Blank disk allocated to VM based on instance type * Add source-group filtering * added logic to make the creation of networks (IPv4 only) validation a bit smarter: - detects if the cidr is already in use - detects if any existing smaller networks are within the range of requested cidr(s) - detects if splitting a supernet into # of num\_networks && network\_size will fit - detects if requested cidr(s) are within range of already existing supernet (larger cidr) * fix InvalidPortRange exception shows up in euca2ools instead of UnknownError when euca-authorize is specified w/ invalid port # * Changes requests with an invalid server action to return an HTTP 400 instead of a 501 * Currently OS API doesn't accept availability zone parameter so there is no way to instruct scheduler (SimpleScheduler) to launch VM instance on specific host of specified zone * typo fix * Fix v1.1 /servers/ PUT request to match API documentation by returning 200 code and the server data in the body * Allow different schedulers for compute and volume * have NetworkManager generate MAC address and pass it to the driver for plugging. Sets the stage for being able to do duplicate checks on those MACs as well * make sure security groups come back on restart of nova-compute * fix all of the tests * rename project\_net to same\_net * use dhcp server instead of gateway for filter exception * get rid of network\_info hack and pass it everywhere * fix issue introduced in merge * merge trunk, fix conflict frim dprince's branch to remove hostname from bin/nova-dhcpbridge * merge in trunk, resolving conflicts with ttx's branch to switch from using sudo to run\_as\_root=True * remerge trunk * Added durable option for nova rabbit queues added queueu delete script for admin/debug purposes * Added add securitygroup to instance and remove securitygroup from instance functionality * Fix ugly little violations before someone says anything * Merged trunk * Updated logging * end of day * Check uncompressed VHD size * reworked test\_extensions code to avoid constant merge conflicts with newly added ext * nova-manage: fixed instance type in vsa creation * Stub out instance\_get as well so we can show the results of the name change * removed VSA/drive\_type code from EC2 cloud. changed nova-manage not to use cloud APIs * Merged with trunk and fixed broken unit testcases * merged rev1418 and fixed code so that less than 1G image can be migrated * Created the filters directory in nova/scheduler * removed admincontext middleware * updates from review * merge from trunk * fix merges from trunk * Nuke hostname from nova-dhcpbridge. We don't use it * merge the trunk * need to actually assign the v4 network * Fixes to the OSAPI floating API extension DELETE. Updated to use correct args for self.disassociate (don't sweep exceptions which should cause test cases to fail under the rug). Additionally updated to pass network\_api.release\_floating\_ip the address instead of a dict * Merged trunk * Fixed unit tests * only run if the subnet and cidr exist * only run if the subnet and cidr exist * merge from trunk * make sure network\_size gets set * merge from trunk * don't require ipv4 * forgot the closing paren * use subnet iteration from netaddr for subnet calculation * Fix a typo that causes ami images to launch with a kernel as ramdisk when using xen * Fixing a 500 error when -1 is supplied for flavorRef on server create * rewriting parsing * fix typo that causes ami instances to launch with a kernal as ramdisk * Merged trunk * Allows for a tunable number of SQL connections to be maintained between services and the SQL server using new configuration flags. Only applies when using the MySQLdb dialect in SQLAlchemy * Merged trunk * Fixes pep8 issues in test\_keypairs.py * Merged trunk * start of day * Fixes to the OSAPI floating API extension DELETE. Updated to use correct args for self.disassociate (don't sweep exceptions which should cause test cases to fail under the rug). Additionally updated to pass network\_api.release\_floating\_ip the address instead of a dict * API needs virtual\_interfaces.instance joined when pulling instances from the DB. Updated instance\_get\_all() to match instance\_get\_all\_by\_filters() even though the former is only used by nova-manage now. (The latter is used by the API) * remove extra log statements * join virtual\_interfaces.instance for DB queries for instances. updates instance\_get\_all to match instance\_get\_all\_by\_filters * remove accidentally duplicated flag * merged trunk * add keystone middlewares for ec2 api * Merged with trunk * added userdata entry in the api paste ini * Initial version * Accidentally added inject\_files to merge * Support for management of security groups in OS API as a new extension * Updates to libvirt, write metadata, net, and key to the config drive * prefixed with os- for the newly added extensions * Merged with trunk * Author added * allow scheduling topics to multiple drivers * Check compressed image size and PEP8 cleanup * v1.1 API also requires the server be returned in the body * capabilities fix, run\_as\_root fix * lp824780: fixed typo in update\_service\_capabilities * fix pep8 * spacing fixes * fixed pep8 issue * merge from trunk * fixed v1.0 stuff with X-Auth-Project-Id header, and fixed broken integrated tests * merged with 1416 * fixing id parsing * moved vsa\_id to metadata. Added search my meta * Refactored the scheduler classes without changing functionality. Removed all 'zone-aware' naming references, as these were only useful during the zone development process. Also fixed some PEP8 problems in trunk code * Added search instance by metadata. get\_all\_by\_filters should filter deleted * got rid of tenant\_id everywhere, got rid of X-Auth-Project-Id header support (not in the spec), and updated tests * Silly fixes * v1.0 and v1.1 API differs for PUT, so split them out Update tests to match API * Removed postgres, bug in current ubuntu package which won't allow it to work easily. Will add a bug in LP * minor cleanup * Added availability zone support to the Create Server API * Make PUT /servers/ follow the API specs and return a 200 status * More logging * removed extra paren * Logging for SQLAlchemy type * merged trunk * Fixed per HACKING * \* Removes rogue direct usage of subprocess module by proper utils.execute calls \* Adds a run\_as\_root parameter to utils.execute, that prefixes your command with FLAG.root\_helper (which defaults to 'sudo') \* Turns all sudo calls into run\_as\_root=True calls \* Update fakes accordingly \* Replaces usage of "sudo -E" and "addl\_env" parameter into passing environment in the command (allows it to be compatible with alternative sudo\_helpers) \* Additionally, forces close\_fds=True on all utils.execute calls, since it's a more secure default * Remove doublequotes from env variable setting since they are literally passed * Changed bad server actions requests to raise an HTTP 400 * removed typos, end of line chars * Fixed broken unit testcases * Support for postgresql * merge from trunk * tenant\_id -> project\_id * Adding keypair support to the openstack contribute api * elif and FLAG feedback * Removed un-needed log line * Make sure to not use MySQLdb if you don't have it * get last extension-based tests to pass * Allows multiple MySQL connections to be maintained using eventlet's db\_pool * Removed verbose debugging output when capabilities are reported. This was clogging up the logs with kbytes of useless data, preventing actual helpful information from being retrieved easily * Removed verbose debugging output when capabilities are reported * Updated extensions to use the TenantMapper * fix pep8 issues * Fixed metadata PUT routing * These fixes are the result of trolling the pylint violations here * Pass py\_modules=[] to setup to avoid installing run\_tests.py as a top-level module * Add bug reference * Pass py\_modules=[] to setup to avoid installing run\_tests.py as a top-level module * fix servers test issues and add a test * added project\_id for flavors requests links * added project\_id for images requests * merge trunk * fix so that the exception shows up in euca2ools instead of UnknownError * Dropped vsa\_id from instances * import formatting - thx * List security groups project wise for admin users same as other users * Merged with trunk * merge with nova-1411. fixed * pep8 fix * use correct variable name * adding project\_id to flavor, server, and image links for /servers requests * Merged with trunk * tests pass * merge from trunk * merged with nova-1411 * This branch makes sure to detach fixed ips when their associated floating ip is deallocated from a project/tenant * adding other emails to mailmap * add Keypairs to test\_extensions * adding myself to authors * This adds the servers search capabilities defined in the OS API v1.1 spec.. and more for admins * Be more tolerant of agent failures. It is often the case there is only a problem with the agent, not with the instance, so don't claim it failed to boot so quickly * Updated the EC2 metadata controller so that it returns the correct value for instance-type metadata * added tests - list doesn't pass due to unicode issues * initial port * merged trunk * Be more tolerant of agent failures. The instance still booted (most likely) so don't treat it like it didn't * Updated extensions to expect tenant ids Updated extensions tests to use tenant ids * Update the OSAPI v1.1 server 'createImage' and 'createBackup' actions to limit the number of image metadata items based on the configured quota.allowed\_metadata\_items that is set * Fix pep8 error * fixing one pep8 failure * I think this restores the functionality .. * Adds missing nova/api/openstack/schemas to tarball * Instance metadata now functionally works (completely to spec) through OSAPI * updated v1.1 flavors tests to use tenant id * making usage of 'delete' argument more clear * Fix the two pep8 issues that sneaked in while the test was disabled * Fix remaining two pep8 violations * Updated TenantMapper to handle resources with parent resources * updating tests; fixing create output; review fixes * OSAPI v1.1 POST /servers now returns a 202 rather than a 200 * Include missing nova/api/openstack/schemas * Rename sudo\_helper FLAG into root\_helper * Minor fix to reduce diff * Initial validation for ec2 security groups name * Remove old commented line * Command args can be a tuple, convert them to list * Fix usage of sudo -E and addl\_env in dnsmasq/radvd calls, remove addl\_env support, fix fake\_execute allowed kwargs * Use close\_fds by default since it's good for you * Fix ajaxterm's use of shell=True, prevent vmops.py from running its own version of utils.execute * With this branch, boot-from-volume can be marked as completed in some sense. The remaining is minor if any and will be addressed as bug fixes * Update the curl command in the \_\_public\_instance\_is\_accessible function of test\_netadmin to return an error code which we can then check for and handle properly. This should allow calling functions to properly retry and timeout if an actual test failure happens * updating more test cases * changing server create response to 202 * Added xml schema validation for extensions resources. Added corresponding xml schemas. Added lxml dep, which is needed for doing xml schema validation * Fixing a bug in nova.utils.novadir() * Adds the ability to read/write to a local xenhost config. No changes to the nova codebase; this will be used only by admin tools that have yet to be created * fixed conditional because jk0 is very picky :) * Fixed typo found in review * removing log lines * added --purge optparse for flavor delete * making server metadata work functionally * cleaning up instance metadata api code * Updated servers tests to use tenant id * Set image progress to 100 if the image is active * Cleaned up merge messes * Merged trunk * cleaned up unneeded line * nova.exception.wrap\_exception will re-raise some exceptions, but in the process of possibly notifying that an exception has occurred, it may clobber the current exception information. nova.utils.to\_primitive in particular (used by the notifier code) will catch and handle an exception clobbering the current exception being handled in wrap\_exception. Eventually when using the bare 'raise', it will attempt to raise None resulting a completely different and unhelpful exception * remove obsolete script from setup.py * assert that vmops.revert\_migration is called * Import sys as well * Resolve conflicts and fixed broken unit testcases * This branch adds additional capability to the hosts API extension. The new options allow an admin to reboot or shutdown a host. I also added code to hide this extension if the --allow-admin-api is False, as regular users should have no access to host API calls * adding forgotten import for logging * Adds OS API 1.1 support * Updated test\_images to use tenant ids * Don't do anything with tenant\_id for now * Review fixes * fixed wrong syntax * Assign tenant id in nova.context * another trunk merge * Merged trunk * Merged trunk * Cleaned up some old code added by the last merge * Fixed some typos from the last refactoring * Moved the restriction on host startup to the xenapi layer.: * Remove nova/tests/network, which was accidentally included in commit * upper() is even better * merged with 1383 * Updated with code changes on LP * Merged trunk * Save exception and re-raise that instead of depending on thread local exception that may have been clobbered by intermediate processing * Adding \_\_init\_\_.py files * Adds ability to disable snapshots in the Openstack API * Sync trunk * Set image progress to 100 if the image is active * Sync trunk * Update the curl command in the \_\_public\_instance\_is\_accessible function of test\_netadmin to return an error code which we can then check for and handle properly. This should allow calling functions to properly retry and timout if an actual test failure happens * ZoneAwareScheduler classes couldn't build local instances due to an additional argument ('image') being added to compute\_api.create\_db\_entry\_for\_new\_instance() at some point * simplified test cases further, thanks to trunk changes * Added possibility to mark fixed ip like reserved and unreserved * Update the OSAPI v1.1 server 'createImage' and 'createBackup' actions to limit the number of image metadata items based on the configured quota.allowed\_metadata\_items that is set * Pep8 fix * zone\_aware\_scheduler classes couldn't build instances due to a change to compute api's create\_db\_entry\_for\_new\_instance call. now passing image argument down to the scheduler and through to the call. updated a existing test to cover this * Adding check to stub method * moving try/except block, and changing syntax of except statement * Fixes broken image\_convert. The context being passed to glance image service was not a real context * Using decorator for snapshots enabled check * Disable flag for V1 Openstack API * adding logging to exception in delete method * Pass a real context object into image service calls * Adding flag around image-create for v1.0 * Refactored code to reduce lines of code and changed method signature * If ip is deallocated from project, but attached to a fixed ip, it is now detached * Glance Image Service now understands how to use glance client to paginate through images * Allow actions queries by UUID and PEP8 fixes * Fixed localization review comment * Allow actions queries by UUID and PEP8 fixes * Fixed review comments * fixing filters get * fixed per peer review * fixed per peer review * re-enabling sort\_key/sort\_dir and fixing filters line * Make sure mapping['dns'] is formatted correctly before injecting via template into images. mapping['dns'] is retrieved from the network manager via info['dns'], which is a list constructed of multiple DNS servers * Add a generic image service test and run it against the fake image service * Implemented @test.skip\_unless and @test.skip\_if functionality in nova/test.py * merged with 1382 * Updates v1.1 servers/id/action requests to comply with the 1.1 spec * fix typo * Moving from assertDictEqual to assertDictMatch * merging trunk * merging trunk * Add exception logging for instance IDs in the \_\_public\_instance\_is\_accessible smoke test function. This should help troubleshoot an intermittent failure * adding --fixes * glance image service pagination * Pass tenant ids through on on requests * methods renamed * Add exception logging for instance IDs in the \_\_public\_instance\_is\_accessible smoke test function. This should help troubleshoot an intermittent failure * Removed most direct sudo calls, make them use run\_as\_root=True instead * pep8 violations sneaking into trunk? * pep8 violations sneaking into trunk? * trunk merge * Fixes lp821144 * Make disk\_format and container\_format optional for libvirt's snapshot implementation * pep8 * fixed up zones controller to properly work with 1.1 * Add generic image service tests * Add run\_as\_root parameter to utils.execute, uses new sudo\_helper FLAG to prefix command * Remove spurious direct use of subprocess * Added virtual interfaces REST API extension controller * Trunk contained PEP8 errors. Fixed * Trunk merge * fix mismerge * Added migration to add uuid to virtual interfaces. Added uuid column to models * merged trunk * merged with nova trunk * Launchpad automatic translations update * fixed pep8 issue * utilized functools.wraps * added missing tests * tests and merge with trunk * removed redundant logic * merged trunk * For nova-manage network create cmd, added warning when size of subnet(s) being created are larger than FLAG.network\_size, in attempt to alleviate confusion. For example, currently when 'nova-manage network create foo 192.168.0.0/16', the result is that it creates a 192.168.0.0/24 instead without any indication to why * Remove instances of the "diaper pattern" * Read response to reset the connection state-machine for the next request/response cycle * Added explanations to exceptions and cleaned up reboot types * fix pep8 issues * fixed bug , when logic searched for next avail cidr it would return cidrs that were out of range of original requested cidr block. added test for it * Adding missing module xmlutil * fixed bug, wasn't detecting smaller subnet conflict properly added test for it * Properly format mapping['dns'] before handing off to template for injection (Fixes LP Bug #821203) * Read response to reset HTTPConnection state machine * removed unnecessary context from test I had left there from prior * move ensure\_vlan\_bridge,ensure\_bridge,ensure\_vlan to the bridge/vlan specific vif-plugging driver * re-integrated my changes after merging trunk. fixed some pep8 issues. sorting the list of cidrs to create, so that it will create x.x.0.0 with a lower 'id' than x.x.1.0 (as an example). <- was causing libvirtd test to fail * Revert migration now finishes * The OSAPI v1.0 image create POST request should store the instance\_id as a Glance property * There was a recent change to how we should flip FLAGS in tests, but not all tests were fixed. This covers the rest of them. I also added a method to test.UnitTest so that FLAGS.verbose can be set. This removes the need for flags to be imported from a lot of tests * Bad method call * Forgot the instance\_id parameter in the finish call * Merged in the power action changes * Removed test show() method * Fixed rescue/unrescue since the swap changes landed in trunk. Minor refactoring (renaming callback to \_callback since it's not used here) * Updates to the XenServer glance plugin so that it obtains the set of existing headers and sends them along with the request to PUT a snapshotted image into glance * Added admin-only decorator * This updates nova-ajax-console-proxy to correctly use the new syntax introduced last week by Zed Shaw * Merged trunk * Changed all references to 'power state' to 'power action' as requested by review * Added missing tests for server actions Updated reboot to verify the reboot type is HARD or SOFT Fixed case of having an empty flavorref on resize * Added more informative docstring * Added XML serialization for server actions * Removed debugging code * Updated create image server action to respect 1.1 * Fixes lp819397 * Fixed rescue unit tests * Nuke hostname. We don't use it * Split serverXMLDeserializers into v1.0 and v1.1 * another merge * Removed temporary debugging raise * Merged trunk * modify \_setup\_network for flatDHCP as well * Merged trunk * Added xenhost config get/setting * fix syntax error * Fixed rescue and unrescue * remove storing original flags verbosity * remove set\_flags\_verbosity.. it's not needed * Merged trunk * OS v1.1 is now the default into novarc * added NOVA\_VERSION to novarc * remove unused reference to exception object * Add a test for empty dns list in network\_info * Fix comments * uses 2.6.0 novaclient (OS API 1.1 support) * Fix to nova-ajax-console-proxy to use the new syntax * Update the OS API servers metadata resource to match the current v1.1 specification - move /servers//meta to /servers//metadata - add PUT /servers//metadata * fix pep8 issues that are in trunk * test\_host\_filter setUp needs to call its super * fix up new test\_server\_actions.py file for flags verbosity change * merged trunk * fixing typo * Sync with latest tests * The logic for confirming and reverting resizes was flipped. As a result, reverting a resize would end up deleting the source (instead of the destination) instance, and confirming would end up deleting the destination (instead of the source) instance * Found a case where an UnboundLocalError would be raised in xenapi\_conn.py's wait\_for\_task() method. This fixes the problem by moving the definition of the unbound name outside of the conditional * Moves code restarting instances after compute node reboot from libvirt driver to compute manager; makes start\_guests\_on\_host\_boot flag global * Moved server actions tests to their own test file. Updated stubbing and how flags are set to be in line with how they're supposed to be set in tests * merging trunk * add test for spawning a xenapi instance with an empty dns list * Nova uses instance\_type\_id and flavor\_id interchangeably when they almost always different values. This can often lead to an instance changing instance\_type during migration because the values passed around internally are wrong. This branch changes nova to use instance\_type\_id internally and flavor\_id in the API. This will hopefully avoid confusion in the future * The OSAPI v1.0 image create POST request should store the instance\_id as a Glance property * Linked to bug * Changed the definition of the 'action' dict to always occur * Updates to the XenServer glance plugin so that it obtains the set of existing headers and sends them along with the request to PUT a snapshotted image into glance * Fixed rescue and unrescue * Added in tests that verify tests are skipped appropriately * Merged trunk * Merged dietz' branch * Update HACKING: - Make imports more explicit - Add some dict/list formatting guidelines - Add some long method signature/call guidelines - Add explanation of i18n * Pep8 cleanup * Defaults \`dns\` to '' if not present, just as we do with the other network info data * Removes extraneous bodies from certain actions in the OSAPI servers controller * Revert should be sent to destination node and confirm should be sent to source node * Conditionals were not actually runing the tests when they were supposed to. Renamed example testcases * fix pylint W0102 errors * Remove whitespaces from name and description before creating security group * Remove instances of the "diaper pattern" * Fixes lp819397 * Initial version * Load instance\_types in downgrade method too * Fix trailing whitespace (PEP8) * fix test\_cloud FLAGS setting * dist scheduler flag setting fixes * fix scheduler tests that set FLAGS * fix more tests that use FLAGS setting * all subclasses of ComputeDriver should fully implement the interface of the destroy method * align multi-line string * fix test\_s3 FLAGS uses * switch FLAGS.\* = in tests to self.flags(...) remove unused cases of FLAGS from tests modified test.TestCase's flags() to allow multiple overrides added missing license to test\_rpc\_amqp.py * follow convention when raising exceptions * pep8 fixes * use an existing exception * use correct exception name * fix duplicate function name * fix undefined variable error * fix potential runtime exception * remove unused imports * remove bit-rotted code * more cleanup of API tests regarding FLAGS * fix use of FLAGS in openstack API servers tests to use the new way * Removes extraneous body argument from server controller methods * Merged trunk * Merged trunk * Default dns to '' if not present * replaced raise Exception with self.fail() * Removed dependancy on os.getenv. Test cases now raise Exception if they are not properly skipped * PEP8 issue * whoops, got a little comma crazy * Merged trunk and fixed conflicts to make tests pass * fumigate non-pep8 code * Use flavorid only at the API level and use instance\_type\_id internally * Yet another conflict resolved * forgot to remove comment * updated to work w/ changes after merged trunk fixing var renaming. the logic which forces default to FLAGS.network\_size if requested cidr was larger, was also applying to requested cidrs smaller than FLAGS.network\_size. Requested cidrs smaller than FLAGS.network\_size should be ignored and not overriden * merged from trunk * merged from trunk * merge trunk * Launchpad automatic translations update * Resolved pep8 errors * renaming test\_skip\_unless\_env\_foo\_exists() * merging trunk * Removed trailing whitespace that somehow made it into trunk * Merged trunk * Removed duplicate methods created by previous merge * Fixes lp819523 * Fix for bug #798298 * fix for lp816713: In instance creation, when nova-api is passed imageRefs generated by itself, strip the url down to an id so that default glance connection params are used * Added check for --allow-admin-api to the host API extension code * Another unittest * Merged trunk * Add support for 300 Multiple Choice responses when no version identifier is used in the URI (or no version header is present) * Merged trunk * Glance has been updated for integration with keystone. That means that nova needs to forward the user's credentials (the auth token) when it uses the glance API. This patch, combined with a forth-coming patch for nova\_auth\_token.py in keystone, establishes that for nova itself and for xenapi; other hypervisors will need to set up the appropriate hooks for their use of glance * Added changes from mini server * raise correct error * Minor test fixes * fix failing tests * fix pep8 complaints * merge from trunk * Fixed a missing space * Bad merge res * merge the trunk * fix missing method call and add failing test * Removed duplicate xattr from pip-requires * Fixed merge issues * Merged trunk * merged trunk * remove unused parameter * Merged trunk * Merged from lab * fix pylint errors * fix pylint errors * merge from trunk * Moves image creation from POST /images to POST /servers//action * Fixed several typos * Changed migration to be an admin only method and updated the tests * - Remove Twisted dependency from pip-requires - Remove Twisted patch from tools/install\_venv.py - Remove eventlet patch from tools/install\_venv.py - Remove tools/eventlet-patch - Remove nova/twistd.py - Remove nova/tests/test\_twistd.py - Remove bin/nova-instancemonitor - Remove nova/compute/monitor.py - Add xattr to pip-requires until glance setup.py installs it correctly - Remove references to removed files from docs/translations/code * Fix an error in fetch\_image() * Get instance by UUID instead of id * Merged trunk * Added the powerstate changes to the plugin * pull-up from trunk/fix merge conflict * fixing typo * refactored tests * pull-up from trunk * Removing the xenapi\_image\_service flag in favor of image\_service * cleanup * Merged trunk * abstraction of xml deserialization * fixing method naming problem * removing compute monitor * merge from trunk * code was checking for key in sqlalchemy instance and will ignore if value is None, but wasn't working if floating\_ip was a non-sqlalchemy dict obj. Therefore, updated the error checking to work in both caes * While we currently trap JSON encoding exceptions and bail out, for error notification it's more important that \*some\* form of the message gets out. So, we take complex notification payloads and convert them to something we know can be expressed in JSON * Better error handling for resizing * Adds the auth token to nova's RequestContext. This will allow for delegation, i.e., use of a nova user's credentials when accessing other services such as glance, or perhaps for zones * merged trunk rev1348 * Launchpad automatic translations update * added some tests for network create & moved the ipv6 logic back into the function * merged with nova trunk * Added host shutdown/reboot conditioning * avoid explicit type checking, per brian waldon's comment * Added @test.skip\_unless and @test.skip\_if functionality. Also created nova/tests/test\_skip\_examples.py to show the skip cases usage * fix LinuxBridgeInterfaceDriver * merge trunk, resolve conflict in net/manater.py in favor of vif-plug * initial commit of vif-plugging for network-service interfaces * Merged trunk * pep8 fixes * Controller -> self * Added option for rebooting or shutting down a host * removed redundant logic * merged from trunk * adding a function with logic to make the creation of networks validation a bit smarter: - detects if the cidr is already in use - when specifying a supernet to be split into smaller subnets via num\_networks && network\_size, ensures none of the returned subnets are in use by either a subnet of the same size and range, nor a SMALLER size within the same range. - detects if splitting a supernet into # of num\_networks && network\_size will fit - detects if the supernet/cidr specified is conflicting with a network cidr that currently exists that may be a larger supernet already encompassing the specified cidr. " * Carry auth\_token in nova's RequestContext * merge with trunk, resolve conflicts * Revert hasattr() check on 'set\_auth\_token' for clients * it makes the pep8, or else it gets the vim again * merge from trunk * Fixes this issue that I may have introduced * Update compute tests to use new exceptions * Resync to trunk * Remove copy/paste error * Launchpad automatic translations update * Launchpad automatic translations update * Fixed review comments: Put parsing logic of network information in create\_instance\_helper module and refactored unit testcases as per the changed code * pep8 * wow, someone whent all crazy with exceptions, why not just return an empty list? * Only call set\_auth\_token() on the glance client if there's one available * Make unit tests pass * merging * only attempt to get a fixed\_up from a v4 subnet if there is a v4 subnet * FlavorNotFound already existed, no need to create another exception * Created exceptions for accepting in OSAPI, and handled them appropriately * only create fixed\_ips if we have an ipv4 range * Revert to using context; to avoid conflict, we import context module as nova\_context; add context to rescue * You see what happens Danny when you forget to close the parenthesis * Merged with trunk * Merged trunk * allow the manager to try to do the right thing * allow getting by the cidr\_v6 * the netmask is implied by the cidr, so use that to display the v6 subnet * either v4 or v6 is required * merging trunk * pull-up from trunk and conflict resolution * merge trunk * stwart the switch to just fixed\_range * typo * Round 1 of changes for keystone integration. \* Modified request context to allow it to hold all of the relevant data from the auth component. \* Pulled out access to AuthManager from as many places as possible \* Massive cleanup of unit tests \* Made the openstack api fakes use fake Authentication by default * require either v4 or v6 * pull-up from trunk * Fix various errors discovered by pylint and pyflakes * fixing underline * removing extra verbage * merged trunk * This change creates a minimalist API abstraction for the nova/rpc.py code so that it's possible to use other queue mechanisms besides Rabbit and/or AMQP, and even use other drivers for AMQP rather than Rabbit. The change is intended to give the least amount of interference with the rest of the code, fixes several bugs in the tests, and works with the current branch. I also have a small demo driver+server for using 0MQ which I'll submit after this patch is merged * removing dict() comment * adding more on return\_type in docstrings * Fixes issue with OSAPI passing compute API a flavorid instead of an instance identifier. Added tests * made the whole instance handling thing optional * Reorganize the code to satisfy review comments * pull-up from trunk; fix problem obscuring context module with context param; fix conflicts and no-longer-skipped tests * remove unused import * --Stolen from https://code.launchpad.net/~cerberus/nova/lp809909/+merge/68602 * removing 'Defining Methods' paragraph * rewording * Use the util.import\_object to import a module * rewording * one last change * upgrades * expanding * merged trunk and fix time call * updating HACKING * Fixing lxml version requirement * Oops, I wasn't actually being compatible with the spec here * bumping novaclient version * Fixes lp:818050 * Updated resize to call compute API with instance\_type identifiers instead of flavor identifiers. Updated tests * fix run\_tests.sh * merge trunk * Fixed changes missed in merge * fix more spacing issues, and removed self link from versions template data * merged trunk * added instance support to to\_primitive and tests * merged trunk and fixed post\_live\_migratioin\_at\_destination to get nw\_info * Removing unnecessary imports * Added xml schema validation for extensions resources. Added corresponding xml schemas. Added lxml dep, which is needed for doing xml schema validation * remove extra log statement * api/ec2: rename CloudController.\_get\_instance\_mapping into \_format\_instance\_mapping * fixed typo * merge with trunk * fixed pep8 issues and removed unnecessary factory function * returned vsa\_manager, nova-manage arg and print changes * Added the config values to the return of the host\_data method * Adds XML serialization for servers responses that match the current v1.1 spec * Added methods to read/write values to a config file on the XenServer host * fix pep8 errors * minor cleanup * Removed unused Duplicate catch * Fix to\_dict() and elevated() to preserve auth\_token; revert an accidental change from context.get\_admin\_context() to simply context * Fixes bug 816604, which is the problem that timeformat in server responses for updated and created are incorrect. This fix just converts the datetime into the correct format * merging trunk * pep8 * moving server backup to /servers//action instead of POST /images * Simplified test cases * Rewrite ImageType enumeration to be more pythonic * refactoring and make self links correct (not hard coded) * Fix tests for checking pylint errors * Use utils.utcnow. Use True instead of literal 1 * Some tests for resolved pylint errors * simplify if statement * merge trunk * use wsgi XMLNS/ATOM vars * Updated deserialization of POST /servers in the OSAPI to match the latest v1.1 spec * Removed unused Duplicate catch * pull-up from trunk * Catch DBError for duplicate projects * Catch DBError for duplicate projects * Make network\_info truly optional * trunk infected with non-pep8 code * unicode instead of str() * Add a flag to set the default file mode of logs * merge trunk * make payload json serializable * moved test * Removed v1\_1 from individual tests * merge from trunk * merge to trunk * more commented code removed * some minor cosmetic work. addressed some dead code section * merged with nova-1336 * prior to nova-1336 merge * remove authman from images/s3.py and replace with flags * fix tests broken in the merge * merged trunk * fix undeclared name error * fix undeclared name error * fix undeclared name error * fix undeclared name errors * remove unused assignment which causes undeclared name error * fix undefined variable errors * fix call to nonexistant method to\_global\_ipv6. Add myself to authors file * Make network\_info truly optional * updates handling of arguments in nova-manage network create. updates a few of the arguments to nova-manage and related help. updates nova-manage to raise proper exceptions * forgot a line * fixed create\_networks ipv6 management * Fail silently * typo * --bridge defaults to br100 but with a deprecation warning and to be removed in d4 * Reverting to original code * use ATOM\_XMLNS everywhere * merge trunk * added unit testcase to increase code coverage * stub out VERSIONS for the tests * put run\_tests.sh back to how it was * Fixed conflict * Fail silently * Merged with trunk and fixed broken unit test cases * Fix the skipped tests in vmwareapi and misc spots. The vmware networking stuff is stubbed out, so the tests can be improved there by fixing the fakes * pep8 issue * refactoring MetadataXMLDeserializer in wsgi/common * move viewbuilder and serializer tests into their own test cases * Fix all of the skipped libvirt tests * fix typo * merged trunk * Fixes typo in attach volume * utilize \_create\_link\_nodes base class function * default the paramater to None, not sure why it was required to begin with * pass None in for nw\_info * added test for accept header of atom+xml on 300 responses to make sure it defaults back to json, and reworked some of the logic to make how this happens clearer * Drop FK before dropping instance\_id column * moved rest of build logic into builder * Drop FK before dropping instance\_id column * Removed FK import * Delete FK before dropping instance\_id column * oops! moved ipv6 block back into the for loop in network manager create\_networks * update everything to use global VERSIONS * merged trunk * change local variable name * updated handling of v6 in network manager create\_networks to it can receive None for v6 args * added ipv6 requirements to nova-manage network create. changed --network to --fixed\_range\_v4 * remove unexpected parameter * fixed xmlns issue * updated the bridge arg requirements based on manager * this change will require that local urls be input with a properly constructed local url: http://localhost/v1.1/images/[id]. Such urls are translated to ids at the api layer. Previously, any url ending with and int was ok * make atom+xml accept header be ignored on 300 responses in the VersionsRequestDeserializer * Removed superfluous parameter * Use auth\_token to set x-auth-token header in glance requests * Fixed the virt driver base * Some work on testing. Two cases related to lp816713 have some coverage already: using an id as an imageRef (test\_create\_instance\_v1\_1\_local\_href), and using a nova href as a url (test\_create\_instance\_v1\_1) * Remove xenapi\_inject\_image flag * Add a flag to set the default file mode of logs * fixed issue with factory for Versions Resource * Fix context argument in a test; add TODOs * improved the code per peer review * Add context argument a lot more places and make unit tests work * fix hidden breakage in test * Remove xenapi\_inject\_image flag * removed unused import * pep8 * pep8 * updated nova-manage create network. better help, handling of required args, and exceptions. Also updated FLAG flat\_network\_bridge to default to None * Re-enables and fixes test\_cloud tests that broke from multi\_nic * Fix for boto2 * Re-enables and fixes test\_cloud tests that broke from multi\_nic * add invalid device test and make sure NovaExceptions don't get wrapped * merge from trunk * pep8 * pep8 * updating common metadata xml serializer tests * Cleaned up test\_servers * Moved server/actions tests to test\_server\_actions.py * updating servers metadata resource * pull-up from trunk * Address merge review concerns * Makes security group rules with the newer version of the ec2 api and correctly supports boto 2.0 * merging parent branch servers-xml-serialization * updating tests * updated serializer tests for multi choice * pep8 cleanup * multi choice XML responses with tests * merged recent trunk * merge with trunk * Cherry-pick of tr3buchet's fix for add\_fixed\_ip\_to\_instance * Resolved conflicts with trunk * fix typo in attach\_volume * fix the last of them * fake plug for vif driver * couple more fixes * cleanup network create * code was checking for key in sqlalchemy instance but if floating\_ip is a non-sqlalchemy dict instance instead, value=None will cause NoneType exception * fix more tests * fix the first round of missing data * fix the skipped tests in vmwareapi xenapi and quota * Add myself to authors * Implements a simplified messaging abstraction with the least amount of impact to the code base * fix for lp816713: In instance creation, when nova-api is passed imageRefs generated by itself, strip the url down to an id so that default glance connection params are used * cloud tests all passing again * added multi\_choice test just to hit another resource * pep8 fixes * initial working 300 multiple choice stuff * cherry-pick tr3buchet's fix for milestone branch * cleanup * pep8 * pep8 * First pass at converting this stuff--pass context down into vmops. Still need to fix unit tests and actually use auth\_token from the context.. * pep8 and simplify rule refresh logic * pep8 * merging parent branch lp:~rackspace-titan/nova/osapi-create-server * adding xml deserialization for createImage action * remove some logging, remove extra if * compute now appends self.host to the call to add an additional fixed ip to an instance * Update security gropu rules to properly support new format and boto 2.0 * Updated test stubs to contain the correct data Updated created and updated in responses to use correct time format * pep8 compliance * VSA volume creation/deletion changes * moved v1.1 image creation from /images to /servers//action * fixed per peer review * passing host from the compute manager for add\_fixed\_ip\_to\_instance() * adding assert to check for progress attribute * removing extra function * Remove debugging code * cleanup * fixed minor issues * reverting tests to use imageRef, flavorRef * updating imageRef and flavorRef parsing * Updates to the compute API and manager so that rebuild, reboot, snapshots, and password resets work with the most recent versions of novaclient * merging trunk; resolving conflicts * Add OpenStack API support for block\_device\_mapping * queries in the models.Instance context need to reference the table by name (fixed\_ips) however queries in the models.FloatingIp context alias the tables out properly and return the data as fixed\_ip (which is why you need to reference it by fixed\_ip in that context) * added warning when size of subnet(s) being created are larger than FLAG.network\_size in attempt to alleviate confusion. For example, currently when 'nova-manage network create foo 192.168.0.0/16', the result is that it creates a 192.168.0.0/24 instead without any indication to why * xml deserialization works now * merged from trunk * merged trunk * merging trunk * pull-up from trunk * got rid of print * got rid of more xml string comparisons * atom test updates * got rid of some prints * got rid of string comparisons in serializer tests * removing objectstore and image\_service flag checking * Updates /servers requests to follow the v1.1 spec. Except for implementation of uuids replacing ids and access ips both of which are not yet implemented. Also, does not include serialized xml responses * fixed detail xml and json tests that got broken * updated atom tests * Updated ServerXMLSerializer to utilize the IPXMLSerializer * merged trunk * merge from trunk * fix pep8 issues * fix issue with failing test * merged trunk * I'm sorry, for my fail with rebasing. Any way previous branch grew to many other futures, so I supersede it. 1. Used optparse for parsing arg string 2. Added decorator for describe method params 3. Added option for assigning network to certain project. 4. Added field to "network list" for showing which project owns network * Moved the VIF network connectivity logic('ensure\_bridge' and 'ensure\_vlan\_bridge') from the network managers to the virt layer. In addition, VIF driver class is added to allow customized VIF configurations for various types of VIFs and underlying network technologies * merge with trunk, resolve conflicts * fix pep8 * Launchpad automatic translations update * removing rogue print * removing xenapi\_image\_service flag * adding to authors * fixing merge conflict * merge from trunk * initial stuff to get away from string comparisons for XML, and use ElementTree * merged with 1320 * volume name change. some cleanup * - Updates /images//meta and /images//meta/ to respect the latest specification - Renames ../meta to ../metadata - Adds PUT on ../metadata to set entire container (controller action is called update\_all) * Adds proper xml serialization for /servers//ips and /servers//ips/ * some cleanup. VSA flag status changes. returned some files * Pass on auth\_token * Warn user instead of ignoring * Added ensuring filter rules for all VMs * atom and xml\_detail working, with tests * Adds the -c|--coverage flag to run\_tests.sh to generate a local code coverage report * Estetic fix * Fix boot from volume failure for network block devices * Bug #796813: vmwareapi does not support distributed vswitch * modified to conform to latest AWS EC2 API spec for authorize & revoke ingress params using the IpPermissions data structure, which nests lists of CIDR blocks (IpRanges) as well as lists of Group data * Fixes faults to use xml serializers based on api version. This fixed bug 814228 * Fixes a typo in rescue instance in ec2 api. This is mnaser's fix, I just added a test to verify the change * Fixes bug 797250 where a create server request with the body '{"name":"server1"}' results in a HTTP 500 instead of HTTP 422 * adding xml serialization for /servers//ips and /servers//ips/ * add a simple broken test to verify the bug * Fixed old libvirt semantics, added resume\_guests\_state\_on\_host\_boot flag * xml version detail working with tests * adding testing to solidify handling of None in wsgi serialization * Added check to make sure there is a server entity in the create server request * Fixed some typos in log lines * removed prints, got versions detail tests passing, still need to do xml/atom * reverting some wsgi-related changes * merged trunk * removed print lines * This fixes the xml serialization of the /extensions and /extensions/foo resources. Add an ExtensionsXMLSerializer class and corresponding unit tests * added 1.0 detail test, added VersionRequestDeserializer to support Versions actions properly, started 300/multiple choice work * fix for reviews * Fixed bad test Fixed using wrong variable * Moved the exception handling of unplugging VIF from virt driver to VIF driver. Added better comments. Added OpenStack copyrights to libivrt vifs.py * pep8 + spelling fixes * Floating IP DB tests * Updated Faults controller to choose an xml serializer based on api version found in the request url * removing unnecessary assignments * Hotfix * Some estetic refactoring * Fixing PEP8 compliance issues * adding --fixes * fixing typos * add decorator for 'dns' params * merge with trunk, resolve conflicts * pep8 * Fixed logging * Fixed id * Fixed init\_host context name * Removed driver-specific autostart code * fix 'version' command * Add bug reference * Use admin context when fetching instances * Use subscript rather than attribute * Make IP allocation test work again * Adjust and re-enable relevant unit tests * some file attrib changes * some cosmetic changes. Prior to merge proposal * Added test\_serialize\_extenstions to test ExtensionsXMLSerializer.index() * tests: unit tests for describe instance attribute * tests: an unit test for nova.compute.api.API.\_ephemeral\_size() * tests: unit tests for nova.virt.libvirt.connection.\_volume\_in\_mapping() * tests/glance: unit tests for glance serializer * tests: unit tests for nova.virt * tests: unit tests for nova.block\_device * db/api: fix network\_get\_by\_cidr() * image/glance: teach glance block device mapping * tests/test\_cloud:test\_modify\_image: make it pass * nova/tests/test\_compute.py: make test\_compute.test\_update\_block\_device\_mapping happy * test\_metadata: make test\_metadata pass * test\_compute: make test\_compute pass * test\_libvirt: fix up for local\_gb * virt/libvirt: teach libvirt driver swap/ephemeral device * virt/libvirt: teach libvirt driver root device name * compute/api: pass down ephemeral device info * compute/manager, virt: pass down root device name/swap/ephemeral to virt driver * ec2/get\_metadata: teach block device mapping to get\_metadata() * api/ec2: implement describe\_instance\_attribute() * db/api: block\_device\_mapping\_update\_or\_create() * block\_device: introduce helper function to check swap or ephemeral device * ec2utils: factor generic helper function into generic place * Launchpad automatic translations update * Config-Drive happiness, minus smoketest * merged with latest nova-1308 * more unittest changes * Last patch broke libvirt mapping of network info. This fixes it * Fixes an issue with out of order operations in setup\_network for vlan mode in new ha-net code * Merged with 1306 + fix for dns change * update netutils in libvirt to match the 2 dns setup * merge * merge with 1305 * make sure dhcp\_server is available in vlan mode * Adds ability to set DNS entries on network create. Also allows 2 dns servers per network to be specified * pep8-compliant. Prior to merge with 1305 * Reverted volume driver part * pep cleanup * remove auth manager from instance helper * docstring update * pass in the right argument * pull out auth manager from db * merge trunk * default to None in the method signature * merged trunk * remove some more stubouts and fakes * clean up fake auth manager in other places * same as: https://code.launchpad.net/~tr3buchet/nova/lp812489/+merge/68448 fixes: https://bugs.launchpad.net/nova/+bug/812489 but in a slightly different context * pep8 * updating images metadata resource * ...and this is me snapping back into reality removing all trace of ipsets. Go me * fixed networks not defined error when creating instances when no networks exist * fix test\_access * This is me being all cocky, thinking I'll make it use ipsets.. * fix auth tests * Add i18n for logging, changed create\_bridge/vlan to should\_create\_bridge/vlan, changed unfilter\_instance's keyword param to positional, and added Dan's alternate ID to .mailmap * fix extensions tests * merge trunk * fix all tests * pep8 fixes * Updated the comments for VMWare VIF driver * initial test for v1.1 detail request * Moved restaring instances from livbirt driver to ComputeManager * Added network\_info to unfilter\_instance to avoid exceptions when shutting down instances * Removed unused exception object * Fixed the missing quotes for 802.1Qbh in libvirt template * add decorator for multi host option * Merged Dan's branch * Merged trunk * use new 'create\_vlan' field in XenAPIBridgeDriver * merge with trunk, resolve conflicts * remove IPy * for libvirt OVS driver, do not make device if it exists already * refactor xenapi vif plug to combine plug + get\_vif\_rec, tested and fixed XenAPIBridgeDriver * Correctly add xml namespaces to extensions xml * Added xml serialization for GET => /extensions. Added corresponding tests * merge ryu's branch * remove debugging * fix a whole bunch of tests * start removing references to AuthManager * change context to maintain exact time, store roles, use ids instead of objects and use a uuid for request\_id * Resolved conflict with trunk * Adds an XML serializer for limits and adds tests for the Limits view builder * pep8 * add in the right number of fields * pep8 * updated next-available to use utc time * merge trunk * rename in preperation for trunk merge * only include dns entries if they are not None in the database * Updated the compute API so that has\_finished\_migration uses instance\_uuid. Fixes some regressions with 1295-1296 * only use the flag if it evaluates true * Catch the FixedIpNotFoundForInstance exception when no fixed IP is mapped to instance * Updated time-available to be correct format Fixed old tests to respect this * This fixes issues with invalid flavorRef's being passed in returning a 500 instead of a 400, and adds tests to verify that two separate cases work * merge from trunk * Moving lp:~rackspace-titan/nova/extensions-xml-serialization to new branch based off of trunk. To remove dep on another branch * Perform fault wrapping in the openstack WSGI controller. This allows us to just raise webob Exceptions in OS API controllers with the appropriate explanations set. This resolves some inconsistencies with exception raising and returning that would cause HTML output to occur when faults weren't being handled correctly * pep8 and stuff * Some code was recently added to glance to allow the is\_public filter to be overridden. This allows us to get all images and filter properly on the nova side until keystone support is in glance. This fixes the issue with private images and snapshots disappearing from the image list * pep8 * Merged with trunk which includes ha-net changes * Updated the compute API so that has\_finished\_migration uses instance\_uuid. Fixes some regressions with 1295-1296 * Updating the /images and /images/detail OSAPI v1.1 endpoints to match spec w/ regards to query params * Ensure valid json/xml/atom responses for versions requests * Update OSAPI v1.1 /flavors, /flavors/detail, and /flavors/ to return correct xml responses * Renamed the virt driver resize methods to migration for marginally more understandable code * allow 2 dns servers to be specified on network create * allow 2 dns servers to be specified on network create * Fixes lp813006 * Fixes lp808949 - "resize doesn't work with recent novaclient" * minor fix * Some broken tests from my other merge * Fixed import issue * added tests, updated pep8 fixes * Changed test\_live\_migration\_raises\_exception to use mock for compte manager method * fixed another issue with invalid flavor\_id parsing, and added tests * minor cleanup * pep8 issue * cleanup * merge with trunk * Fixed the localization unit test error in the vif driver logging * cleanup tests and fix pep8 issues * removed vif API extension * Fixed Xenapi unit test error of test\_rescue * Slight indentation change * Merged Dan Wendlandt's branch and fixed pep8 errors * Added call to second coverage invocation * Fixed an issue where was invoked before it was defined in the case of a venv * - Add 'fixed\_ipv6' property to VirtualInterface model - Expose ipv6 addresses in each network in OSAPI v1.1 * forgot to add xenapi/vif.py * Perform fault wrapping in the openstack WSGI controller. This allows us to just raise webob Exceptions in OS API controllers with the appropriate explanations set. This resolves some inconsistencies with exception raising and returning that could cause HTML output to occur when an exception was raised * Added LimitsXMLSerializer Added LimitsViewBuidlerV11Test test case * Added create\_vlan/bridge in network unit test * Add OpenStack API support for block\_device\_mapping * Changed the default of VIF driver * Fixed PEP8 issues * Combined bridige and vlan VIF driver to allow better transition for current Nova users * Merged trunk * Merged lp:~~danwent/nova/network-refactoring * Adds HA networking (multi\_host) option to networks * CHanges based on feedback * Older Windows agents are very picky about the data sent to it. It also requires the public key for the password exchange to be in a string format and not an integer * adding flavors xml serialization * added versions list atom test and it passes * Set the status\_int on fault wrapped exceptions. Fixes WSGI logging issues when faults are returned * Fix plus passing tests * remove debug prints * merge ryu's branch * update for ryu's naming changes, fix some bugs. tested with OVSDriver only so far * Fixes bug #807764. Please disregard previous proposal with incorrect bug # * Whoops * Added LP bug num to TODO * Split tests into 2 * Fix email address in Author * Make sure reset\_network() call happens after we've determined the agent is running * pep8 * Merged trunk * Added Dan Wendlandt to Authors, and fixed failing network unit tests * merged trunk * Made all but one test pass for libvirt * Moved back allow\_project\_net\_traffic to libvirt conn * Set the status\_int on fault wrapped exceptions. Fixes WSGI logging issues when faults are returned * lp812489: better handling of periodic network host setup to prevent exception * add smoketests to verify image listing * default image to private on register * correct broken logic for lxc and uml to avoid adding vnc arguments (LP: #812553) * Stupid merge and fixed broken test * Most of the XenServer plugin files need the execute bit set to run properly. However, they are inconsistent as it is, with one file having the execute bit set, but the another having it set when it is not needed * Made the compute unit tests to pass * Host fix * Created \_get\_instance\_nw\_info method to clean up duplicate code * initial changes for application/atom+xml for versions * Update Authors file * network api release\_floating\_ip method will now check to see if an instance is associated to it, prior to releasing * merge from lp:~midokura/nova/network-refactoring-l2 * Corrects a bad model lookup in nova-manage * correct indentation * Fixes lp809587 * Fix permissions for plugins * Ya! Apparently sleep helps me fix failing tests * Some older windows agents will crash if the public key for the keyinit command is not a string * added 'update' field to versions * First attempt at vmware API VIF driver integration * Removed unnecessary context parameter * Merged get\_configurations and plug of VIF drivers * Moved ensure\_vlan\_bridge of vmware to VIF driver * Added network\_info parameter to all the appropriate places in virt layers and compute manager * remove xenapi\_net.py from network directory, as this functionality is now moved to virt layer * first cut of xenserver vif-plugging, some minor tweaks to libvirt plugging * Refactor device type checking * Modified alias ^Cd minor fixes * Merged with trunk * Reverted to original code, after network binding to project code is in integration code for testing new extension will be added * Fixed broken unit testcases after adding extension and minor code refactoring * Added a new extension instead of directly making changes to OS V1.1. API * have to use string 'none' and add a note * tell glance to not filter out private images * updated links to use proper atom:link per spec * Renamed setup\_vif\_network to plug\_vif * Fixes lp813006 - inconsistent DB API naming * move import network to the top * Merged lp:~danwent/nova/network-refactoring-l2 * merged from trunk * network api release\_floating\_ip method checks if an instance associated to the floating prior to releasing. added test * Added detroy\_vif\_network * Functionality fixed and new test passing * Updates to the compute API and manager so that rebuild, reboot, snapshots, and password resets work with the most recent versions of novaclient * better handling of periodic network host setup * Merged trunk * Removed blank lines * Fix unchecked key reference to mappings['gateway6']. Fixes LP #807764 * add downgrade * correct broken logic for lxc and uml to avoid adding vnc arguments (LP: #812553) * Beginnings of the patch * Fixed equality comparison bug in libvirt XML * Fixed bad parameters to setup\_vif\_networks * Zapped an extra newline * Merged with trunk * Add support for generating local code coverage report * respecting use\_ipv6 flag if set to False * merged trunk * merged trunk * fixed reviewer's comment. 1. ctxt -> context, 2. erase unnecessary exception message from nova.sccheduler.driver * cleanup * merge of ovs L2 branch * missed the vpn kwarg in rpc * fix bad merge * change migration number * merged trunk * This change adds the basic boot-from-volume support to the image service * Fixed the broken tests again * Merging from upstream * Some missed instance\_id casts * pep8 cleanup * adding --fixes * adding fixed\_ipv6 property to VirtualInterface model; exposing ipv6 in api * VSA schedulers reorg * Merged with trunk * fix issues that were breaking vlan mode * fixing bad lookup * Updates to the XenServer agent plugin to fix file injection: * Don't jsonify the inject\_file response. It is already json * localization changes. Removed vsa params from volume cloud API. Alex changes * Added auth info to XML * returncode is an integer * - Fixed the conflift in vmops.py * Check returncode in get\_agent\_features * resolved pep8 issues * merged from trunk * Updated servers to choose XML serializer based on api version * pep8 * updated servers to use ServerXMLSerializer * added 'create' to server XML serializer * added 'detail' to server XML serializer * convert group\_name to string, incase it's a long * nova/api/ec2/cloud.py: Rearranged imports to be alphabetical as per HACKING * pep8'd * Extended test to check for error specific error code and test cover for bad chars * Some basic validation for creating ec2 security groups. (LP: #715443) * changed to avoid localization test failure * Initial test case proving we have a bug of, ec2 security group name can exceed 255 chars * added index to servers xml serializer * Change \_agent\_has\_method to \_get\_agent\_features. Update the inject files function so that it calls \_get\_agent\_features only once per injected file * pep8 * Moved Metadata Serialization Test * Added ServerXMLSerializer with working 'show' method Factored out MetadataXMLSerializer from images and servers into common * added missing drive\_types.py * added missing instance\_get\_all\_by\_vsa * merged with 1280 * VSA: first cut. merged with 1279 * Added some unit and integration tests for updating the server name via the openstack api * renamed priv method arg\_to\_dict since it's not just used for revoke. modified to conform to latest AWS EC2 API spec for authorize & revoke ingress params using the IpPermissions data structure, which nests lists of CIDR blocks (IpRanges) as well as lists of Group data * got rid of return\_server\_with\_interfaces and added return\_server\_with\_attributes * Added ServerXMLSerializationTest * take out print statements * Ensures a bookmark link is returned in GET /images. Before, it was only returned in GET /images/detail * One last nit * Tests passing again * put maxDiff in setUp * remove get\_uuid\_from\_href and tests * stop using get\_uuid\_from\_href for now * Updated with some changes from manual testing * Updates to the XenServer agent plugin to fix file injection: * merging trunk * use id in links instead of uuid * pep8 fixes * fix ServersViewBuilderV11Tests * Adds greater configuration flexibility to rate limiting via api-paste.ini. In particular: * return id and uuid for now * merge with trunk * Adds distributed scheduler and multinic docs to the Developer Reference page * Added more view builder tests * merged wills revisions * Added ViewBuilderV11 tests Fixed bug with build detail * fix issues with uuid and old tests * - Present ip addresses in their actual networks, not just a static public/private - Floating ip addresses are grouped into the networks with their associated fixed ips - Add addresses attribute to server entities * Update the agent plugin so that it gets 'b64\_contents' from the args dict instead of 'b64\_file' (which isn't what nova sends) * Adding unit and integration tests for updating the server name via the 1.1 api * merge with trunk, resolve conflicts * remove argument help from docstrings + minor fix * Fixes Bug #810149 that had an incomplete regex * Existing Windows agent behaves differently than the Unix agents and require some workarounds to operate properly. Fixes are going into the Windows agent to make it behave better, but workarounds are needed for compatibility with existing installed base * Add possibility to call commands without subcommands * fix redundency * Updated Authors * Fixed remove\_version\_from\_href Added tests * mistakenly commited this code into my branch, reverting it to original from trunk * Merged with trunk and fixed pep errors * added integrated unit testcases and minor fixes * First pass * corrected catching NoNetworksDefined exception in host setup and getting networks for instance * catching the correct exception * Added ServersTestv1\_1 test case Changed servers links to use uuid instead of id * pep8 * Updated old tests * add support to write to stdout rather than file if '-' is specified. see bug 810157 * merging trunk * removed self links from flavors * added commands * exposing floating ips * updated image entity for servers requests * Update the agent plugin so that it gets 'b64\_contents' from the args dict instead of 'b64\_file' (which isn't what nova sends) * Use assertRaises instead of try/except--stupid brain-o * Added progress attribute to servers responses * fixing bad merge * pull-up from trunk, while we're at it * Comment on parse\_limits(); expand an exception message; add unit tests; fix a minor discovered bug * adding bookmark to images index * add updated and created to servers detail test, and make it work * removing mox object instantiation from each test; renaming \_param to filter\_name * add self to authors * use 'with' so that close is called on file handle * adding new query parameters * support '-' to indicate stdout in nova-manage project 'environment' and 'zip' * Improvements to nova-manage: 1. nova-manage network list now shows what belongs to what project, and what's the vlan id, simplifying management in case of several networks/projects 2. nova-manage server list [zone] - shows servers. Useful if you have many servers and want to list them in particular zone, instead of grep'ing nova-manage service list * Minor fixes * Merged with Trunk * updated to support and check for flavor links in server detail response * Updated responses for GET /images and GET /images/detail to respect the OSAPI v1.1 spec * merge * beginning server detail spec 1.1 fixup * Augment rate limiting to allow greater flexibility through the api-paste.ini configuration * merge from trunk * added unit testcases for validating the requested networks * Extends the exception.wrap\_exception decorator to optionally send an update to the notification system in the event of a failure * trunk merge * merging trunk * updating testing; simplifying instance-level code * pep8 * adding test; casting instance to dict to prevent sqlalchemy errors * merged branch lp:~rackspace-titan/nova/images-response-formatting * Add multinic doc and distributed scheduler doc to developer guide front page * merged trunk * Don't pop 'vpn' on kwargs inside a loop in RPCAllocateFixedIP.\_allocate\_fixed\_ips (fixes KeyError) * Added Mohammed Naser to Authors file * merge with trunk * fix reviewer's comment * Starting part of multi-nic support in the guest. Adds the remove\_fixed\_ip code, but is incomplete as it needs the API extension that Vek is working on * Don't pop 'vpn' on kwargs inside a loop in RPCAllocateFixedIP.\_allocate\_fixed\_ips (fixes KeyError's) * added unit test cases and minor changes (localization fix and added fixed\_ip validation) * Made sure the network manager accepts kwargs for FlatManager * Fix bug 809316. While attempting to launch cloudpipe instance via 'nova-manage vpn run' command, it comes up with IP from instances DHCP pool and not the second IP from the subnet, which break the forwarding rules that allow users to access the vpn. This is due 'allocate\_fixed\_ip' method in VlanManager doesn't receive 'vpn' as an argument from caller method and cloudpipe instances always considers as 'common' instances * cleanup * server create deserialization functional and tested * added xml deserialization unit test cases and fixe some pep errors * Updated some common.py functions to raise ValueErrors instead of HTTPBadRequests * Renamed 'nova-manage server list' -> 'nova-manage host list' to differentiate physical hosts from VMs * Allowed empty networks, handled RemoteError properly, implemented xml format for networks and fixed broken unit test cases * minor cleanup * Updated ImageXMLSerializer to serialize links in the server entity * Updated images viewbuilder to return links in server entity * updated images tests * merged trunk * pep8 * Updated remove\_version\_from\_href to be more intelligent Added tests * Fix PEP8 for 809316 bugfix * Fix 809316 bug which prevent cloudpipe to get valid IP * fix reviewer's comment * stray debug * pep8 * fixed marshalling problem to cast\_compute.. * fixed all failed unit test cases * This doesn't actually fix anything anymore, as the wsgi\_refactor branch from Waldon took care of the issue. However, a couple rescue unit tests would have caught this originally, so I'm proposing this to include those * fixes an issue where network host fails to start because a NoNetworksFound exception wasn't being handled correctly * Bad test * unknowingly made these changes, reverting to original * catch raise for networks not found in network host and instance setup * Merged with Trunk * add optional parameter networks to the Create server OS API * Changed broken perms * Tests * Made xen plugins rpm noarch * Set the proper return code for server delete requests * Making the xen plugins rpm to be noarch * merging trunk * Expanding OSAPI wsgi module to allow handling of headers and status codes * Updates some of the extra scripts in contrib and tools to current versions * updating code to implement tests * merging parent wsgi-refactor * allowing controllers to return Nonew * adding headers serializer * pep8 * minor refactoring * minor tweaks * Adds an extension which makes add\_fixed\_ip() available through an OpenStack extension * Comment out these two asserts; Sandy will uncomment in his merge-prop * Fix the bug 800759 * merging wsgi-refactor * adding 204 response code * pre trunk merge * Missing Author updated * Allows for ports in serverRef in image create through the openstack api * Adds security groups to metadata server. Also adds some basic tests for metadata code * fix comments * fix conflict * Added vif OS API extension to get started on it * Moved 'setup\_compute\_network' logic into the virt layer * Added myself to authors file * Fixed two typos in rescue API command * flaw in ec2 cloud api, \_get\_image method , if doing a search for aki-0000009, yet that image name doesn't exist, it strips off aki- and looks for any image\_id 0000009 and if there was an image match that happens to be an ami instead of aki, it will go ahead and deregister the ami instead. That behavior is unintended, so added logic to ensure that the original request image\_id matches the type of image being returned from database by matching against container\_format attr * Fixed up an incorrect key being used to check Zones * merged trunk * fix tests * make sure that old networks get the same dhcp ip so we don't break existing deployments * cleaned up on set network host to \_setup\_network and made networks allocate ips dynamically * Make the instance migration calls available via the API * Add a flag to disable ec2 or osapi * Add a flag to disable ec2 or osapi * refactor * easing up content-type restrictions * peer review fix - per vish: 'This method automatically converts unknown formats to ami, which is the same logic used to display unknown images in the ec2 api. This will allow you to properly deregister raw images, etc.' * Updated resize docstring * removing Content-Length requirement * Add docstrings for multinic extension * Add support for remove\_fixed\_ip() * Merged trunk * pull-up from trunk * Added unit tests * First take at migrations * Fixes bug #805604 "Multiprocess nova-api does not handles SIGTERM correctly." * image/fake: added teardown method * Updated mailmap due to wrong address in commit message * tests/test\_cloud: make an unit test, test\_create\_image, happy * nova/compute/api.py: fixed mismerge * ec2 api \_get\_image method logic flaw that strips the hex16 digit off of the image name, and does a search against the db for it and ignores that it may not be the correct image, such as if doing a search for aki-0000009, yet that image name doesn't exist, it strips off aki- and looks for any image\_id 0000009 and if there was an image match that happens to be an ami instead of aki, it will go ahead and deregister that. That behavior is unintended, so added logic to ensure that the original request image\_id matches the type of image being returned from database by matching against container\_format attr * sqlalchemy/migrate: resolved version conflict * merge with trunk * pull-up from trunk * unit test suite for the multinic extension * pull-up from trunk * Added server entity to images that only has id * Merging issues * Updated \_create\_link\_nodes to be consistent with other create\_\*\_nodes * Changed name of xml\_string to to\_xml\_string * Merging issuse * Temporarily moved create server node functionality into images.py Temporarily changed image XML tests to expect server entities with only ids * Removed serverRef from some tests and viewbuilder * Comments for bugfix800759 and pep8 * Removed bookmark link from non detailed image viewbuilder * implemented clean-up logic when VM fails to spawn for xenapi back-end * Adds the os-hosts API extension for interacting with hosts while performing maintenance. This differs from the previous merge prop as it uses a RESTful design instead of GET-based actions * Added param to keep current things from breaking until we update all of the xml serializers and view builders to reflect the current spec * Fixes Bug #805083: "libvirtError: internal error cannot determine default video type" when using UML * Dried up images XML serialization * Dried up images XML serialization * stricter zone\_id checking * trunk merge * cleanup * Added image index * pep8 fixes * Comments Incorporated for Bug800759 * Added API and supporting code for rebooting or shutting down XenServer hosts * fixed image create response test * Updated test\_detail * Merged trunk * make server and image metadata optional * Updated the links container for flavors to be compliant with the current spec * pep8 * Renamed function * moved remove\_version to common.py * unit tests * progress and server are optional * merged trunk * Add a socket server responding with an allowing flash socket policy for all requests from flash on port 843 to nova-vncproxy * pep8 compliance * Pull-up from trunk (post-multi\_nic) * changed calling signature to be (instance\_id, address) * correct test\_show * first round * removed extra comment * Further test update and begin correcting serialization * Removed a typo error in libvirt connection.py * updated expected xml in images show test to represent current spec * pep8 fixes * Added VIF driver concept * Added the missing 'self' parameter * after trunk merge * Changed the exception type for invalid requests to webob.exc.HTTPBadRequest * Added net\_attrs argument for ensure\_bridge/vlan methods * Added a L2 network driver for bridge/vlan creation * wrap list comparison in test with set()s * slightly more fleshed out call path * merged trunk * merge code i'd split from instance\_get\_fixed\_addresses\_v6 that's no longer needed to be split * fix metadata test since fixed\_ip searching now goes thru filters db api call instead of the get\_by\_fixed\_ip call * clean up compute\_api.get\_all filter name remappings. ditch fixed\_ip one-off code. fixed ec2 api call to this to compensate * clean up OS API servers getting * rename \_check\_servers\_options, add some comments and small cleanup in the db get\_by\_filters call * pep8 fix * convert filter value to a string just in case before running re.compile * add comment for servers\_search\_options list in the OS API Controllers * pep8 fixes * fix ipv6 search test and add test for multiple options at once * test fixes.. one more to go * resolved conflict incorrectly from trunk merge * merged trunk * doc string fix * fix OS API tests * test fixes and typos * typos * cleanup checking of options in the API before calling compute\_api's get\_all() * a lot of major re-work.. still things to finish up * merged trunk * remove debug from failing test * remove faults.Fault wrapper on exceptions * rework OS API checking of search options * merged trunk * missing doc strings for fixed\_ip calls I renamed * clarify a couple comments * test fixes after unknown option string changes * minor fixups * merged trunk * pep8 fixes * test fix for renamed get\_by\_fixed\_ip call * ec2 fixes * added API tests for search options fixed a couple of bugs the tests caught * allow 'marker' and 'limit' in search options. fix log format error * another typo * merged trunk * missed power\_state import in api fixed reversed compare in power\_state * more typos * typos * flavor needs to be converted to int from query string value * add image and flavor searching to v1.0 api fixed missing updates from cut n paste in some doc strings * added searching by 'image', 'flavor', and 'status' reverted ip/ip6 searching to be admin only * compute's get\_all should accept 'name' not 'display\_name' for searching Instance.display\_name. Removed 'server\_name' searching.. Fixed DB calls for searching to filter results based on context * Refactored OS API code to allow checking of invalid query string paremeters and admin api/context to the index/detail calls. v1.0 still ignores unknown parameters, but v1.1 will return 400/BadRequest on unknown options. admin\_api only commands are treated as unknown parameters if FLAGS.enable\_admin\_api is False. If enable\_admin\_api is True, non-admin context requests return 403/Forbidden * clean up checking for exclusive search options fix a cut n paste error with instance\_get\_all\_by\_name\_regexp * merged trunk * python-novaclient 2.5.8 is required * fix bugs with fixed\_ip returning a 404 instance searching needs to joinload more stuff * added searching by instance name added unit tests * pep8 fixes * Replace 'like' support with 'regexp' matching done in python. Since 'like' would result in a full table scan anyway, this is a bit more flexible. Make search options and matching a little more generic Return 404 when --fixed\_ip doesn't match any instance, instead of a 500 only when the IP isn't in the FixedIps table * start of re-work of compute/api's 'get\_all' to handle more search options * Silence warning in case tests.sqlite doesn't exist * fix libvirt test * update tests * don't set network host for multi\_host networks * add ability to set multi\_host in nova-manage and remove debugging issues * filter the dhcp to only respond to requests from this host * pass in dhcp server address, fix a bunch of bugs * PEP8 passed * Formatting fix * Proper Author section insertion (thx Eldar) * Signal handler cleanup, proper ^C handling * copy paste * make sure to filter out ips associated by host and add some sync for allocating ip to host * fixed zone id check * it is multi\_host not multi\_gateway * First round of changes for ha-flatdhcp * Updated the plugin to return the actual enabled status instead of just 'true' or 'false' * UML doesnt do vnc as well * fixed a bug which prevents suspend/resume after block-migration * Gracefull shutdown of nova-api * properly displays addresses in each network, not just public/private; adding addresses attribute to server entities * Gracefull shutdown of nova-api * Removing import of nova.test added to nova/\_\_init.py\_\_ as problem turned out to be somewhere else (not in nova source code tree) * Fixing weird error while running tests. Fix required patching nova/tests/\_\_\_init\_\_.py explictly importing nova.test * Added missing extension file and tests. Also modified the get\_host\_list() docstring to be more accurate about the return value * Silence warning in case tests.sqlite doesn't exist * Fix boot from volume failure for network block devices * Improvements to nova-manage: network list now includes vlan and projectID, added servers list filtered by zone if needed * removed unneeded old commented code * removed more stray debug output * removed debugging output * after trunk merge * Updated unit tests * remove logging statement * Found some additional fixed\_ip. entries in the Intance model contest that needed to be updated * use url parse instead of manually splitting * Changed fixed\_ip.network to be fixed\_ips.network, which is the correct DB field * Added the GroupId param to any pertinent security\_group methods that support it in the official AWS API * Removes 'import IPy' introduced in recent commit * removing IPy import * trunk merge * Fixed the case where an exception was thrown when trying to get a list of flavors via the api yet there were no flavors to list * fix up tests * tweak * review fixes * completed api changes. still need plugin changes * Update the fixed\_ip\_disassociate\_all\_by\_timeout in nova.db.api so that it supports Postgres. Fixes casting errors on postgres with this function * after trunk merge * Fixes MANIFEST.in so that migrate\_repo/versions/\*.sql files are now included in tarball * Include migrate\_repo/versions/\*.sql in tarball * Ensure auto-delete is false on Topic Queues * refactored the security\_group tests a bit and broke up a few of them into smaller tests * Reverses the self.auto\_delete = True that was added to TopicPublisher in the bugfix for lp804063. That bugfix should have only added auto\_delete = True to FanoutPublisher to match the previous change to FanoutConsumer * Added 'self.auto\_delete = True' to the two Publisher subclasses that lacked that setting * Added the '--fixes' tag to link to bug * Added self.auto\_delete = True to the Publisher subclasses that did not have that set * added multi-nic support * osapi test\_servers fixed\_ip -> fixed\_ips * updated osapi 1.0 addresses view to work with multiple fixed ips * trunk merge with migration renumbering * Allows subdirectory tests to run even if sqlite database doesn't exist * fix bug 800759 * Child Zone Weight adjustment available when adding Child Zones * trunk merge * blah * merge trunk * merged trunk * Windows instances will often take a few minutes setting up the image on first boot and then reboot. We should be more patient for those systems as well check if the domid changes so we can send agent requests to the current domid * Theese changes eliminate dependancy between hostname and ec2-id. As I understand, there already were no such dependancy, but still we had confusing names in code. Also I added more sophisticated generation of default hostname to give user possibility to set the custom one * updated images * updated servers * refactored flavors viewbuilder * fixes lp:803615 * added FlavorRef exception handling on create instance * refactored instance type code * Update the ec2 get\_metadata handler so it works with the most recent version of the compute API get\_all call which now returns a list if there is only a single record * - add metadata container to /images/detail and /images/ responses - update xml serialization to encode image entities properly * merging trunk * PEP8 fix * Adapt flash socket policy branch to new nova/wsgi.py refactoring * clean up * Update the ec2 get\_metadata handler so it works with the most recent version of the compute API get\_all call which now returns a list if there is only a single record * trunk merge * pep8 * pep8 * done and done * Update the fixed\_ip\_disassociate\_all\_by\_timeout in nova.db.api so that it supports Postgres. Fixes casting errors on postgres with this function * phew ... working * compute\_api.get\_all should be able to recurse zones (bug 744217). Also, allow to build more than one instance at once with zone\_aware\_scheduler types. Other cleanups with regards to zone aware scheduler.. * Updated v1.1 links in flavors to represent the curret spec * fix issue of recurse\_zones not being converted to bool properly add bool\_from\_str util call add test for bool\_from\_str slight rework of min/max\_count check * fixed incorrect assumption that nullable defaults to false * removed port\_id from virtual interfaces and set network\_id to nullable * changes a few instance refs * merged trunk * Rename one use of timeout to expiration to make the purpose clearer * pulled in koelkers test changes * merge with trey * major reactor of the network tests for multi-nic * Merged trunk * Fixes Bug #803563 by changing how nova passes options in to glance. Before, if limit or marker were not set, we would pass limit=0 and marker=0 in to glance. However, marker is supposed to be an image id. With this change, if limit or marker are not set, they are simply not passed into glance. Glance is free then to choose the default behavior * Fixed indentation issues Fixed min/max\_count checking issues Fixed a wrongly log message when zone aware scheduler finds no suitable hosts * Fixes Bug #803563 by changing how nova passes options in to glance. Before, if limit or marker were not set, we would pass limit=0 and marker=0 in to glance. However, marker is supposed to be an image id. With this change, if limit or marker are not set, they are simply not passed into glance. Glance is free then to choose the default behavior * Sets 'exclusive=True' on Fanout amqp queues. We create the queues with uuids, so the consumer should have exclusive access and they should get removed when done (service stop). exclusive implies auto\_delete. Fixes lp:803165 * don't pass zero in to glance image service if no limit or marker are present * more incorrect list type casting in create\_network * removed the list type cast in create\_network on the NETADDR projects * renumbered migrations again * Make sure test setup is run for subdirectories * merged trunk, fixed the floating\_ip fixed\_ip exception stupidity * trunk merge * "nova-manage vm list" was still referencing the old "image\_id" column, which was renamed to "image\_ref" at revision 1144 * Implement backup with rotation and expose this functionality in the OS API * Allow a port name in the server ref for image create * Fanout queues use unique queue names, so the consumer should have exclusive access. This means that they also get auto deleted when we're done with them, so they're not left around on a service restart. Fixes lp:803165 * pep8 fix * removed extra stubout, switched to isinstance and catching explicit exception * get latest branch * Deprecate -r for run\_tests.sh and adds -n, switching the default back to recreate * check\_domid\_changes is superfluous right now since it's only used when timeout is used. So simplify code a little bit * updated pip-requires for novaclient * Merged trunk * pip requires * adopt merge * clean up logging for iso SR search * moved to wrap\_exception approach * Fix 'undefined name 'e'' pylint error * change the default to recreate the db but allow -n for faster tests * Fix nova-manage vm list * Adding files for building an rpm for xenserver xenapi plugins * moved migration again & trunk merge * Brought back that encode under condition * Add test for hostname generation * Remove unnessesary (and possibly failing) encoding * Fix for bug 803186 that fixes the ability for nova-api to run from a source checkout * moved to wrap\_exception decorator * Review feedback * Merged trunk * Put possible\_topdir back in nova-api * Use milestone cut * Merged trunk * Let glance handle sorting * merging trunk * Review feedback * This adds system usage notifications using the notifications framework. These are designed to feed an external billing or similar system that subscribes to the nova feed and does the analysis * Refactored usage generation * pep8 * remove zombie file * remove unecessary cast to list * merge with trey * OOPS * Whoops * Review feedback * skipping another libvirt test * Fix merge issue in compute unittest * adding unicode support to image metadata * Fix thinko in previous fix :P * change variable names to remove future conflict with sandy's zone-offsets branch * Fix yet more merge-skew * merge with trey * This branch allows LdapDriver to reconnect to LDAP server if connection is lost * Fix issues due to renming of imange\_id attrib * Re-worked some of the WSGI and WSGIService code to make launching WSGI services easier, less error prone, and more testable. Added tests for WSGI server, new WSGI loader, and modified integration tests where needed * Merged trunk * update a test docstring to make it clear we're testing multiple instance builds * log formatting typo pep8 fixes * Prevent test case from ruining other tests. Make it work in earlier python versions * pep8 fix * I accidently the whole unittest2 * Adds support for "extra specs", additional capability requirements associated with instance types * refactoring to compute from scheduler * remove network to project bind * resync with trunk * Add test for spawn from an ISO * Add fake SR with ISO content type * Revise key used to identify the SR used to store ISO images streamed from Glance * remerged trunk * Fix pep8 nits in audit script * Re-merging code for generating system-usages to get around bzr merge braindeadness * getting started * Added floating IP support in OS API * This speeds up multiple runs of tests to start up much faster because it only runs db migrations if the test db doesn't exist. It also adds the -r/--recreate-db option to run\_tests.sh to delete the tests db so it will be recreated * small formatting change * breaking up into individual tests for security\_groups * Proposing this because it is a critical fix before milestone. Suggestions on testing it are welcome * logging fixes * removed unneded mac parameter to lease and release fixed ip functions * Made \_issue\_novaclient\_command() behave better. Fixed a bunch of tests * Review feedback * merge with trey * trunk merge, getting fierce. * Merged trunk * Added nova.version to utils.py * - Modified NOTE in vm\_util.py - Changed gettext line to nova default in guest\_tool.py * renaming tests * make sure basic filters are setup on instance restart * typo * changed extension alias to os-floating-ips * missed the bin line * Updating license to ASL 2.0 * update nova.sh * make nova-debug work with new style instances * Changed package name to openstack-xen-plugins per dprince's suggestion. All the files in /etc/xapi.d/plugins must be executable. Added dependency on parted. Renamed build.sh to build-rpm.sh * remove extra stuff from clean vlans * Clarify help verbiage * making key in images metadata xml serialization test null as well * making image metadata key in xml serialization test unicode * extracting images metadata xml serialization tests into specific class; adding unicode image metadata value test * merged blamar's simpler test * Pulled changes, passed the unit tests * Pulled trunk, merged boot from ISO changes * Removed now un-needed fake\_connection * Use webob to test WSGI app * fixed pep style * review issues fixed * sqlalchmey/migration: resolved version conflict * merge with trunk * Adding files for building an rpm for xenserver xenapi plugins * Upstream merge * merging trunk; adding error handling around image xml serialization * adding xml serialization test of zero images * pep8 * add metadata tests * add fake connection object to wsgi app * add support to list security groups * only create the db if it doesn't exist, add an option -r to run\_tests.py to delete it * Fix for bug #788265. Remove created\_at, updated\_at and deleted\_at from instance\_type dict returned by methods in sqlalchemy API * PEP8 fix * pep8 * Updated \_dict\_with\_extra\_specs docstring * Renamed \_inst\_type\_query\_to\_dict -> \_dict\_with\_extra\_specs * Merged from trunk * Add api methods to delete provider firewall rules * This small change restores single quotes and double quotes as they were before in the filter expression for retrieving the PIF (physical interface) xenapi should use for creating VLAN interfaces * Remove the unnecessary insertion of whitespace. This happens to be enough to make this patch apply on recent versions of XenServer / Xen Cloud Platform * Removes the usage of the IPy module in favor of the netaddr module * - update glance image fixtures with expected checksum attribute - ensure checksum attribute is handled properly in image service * mailmap * mailmap * configure number of attempts to create unique mac address * merged * trunk merged. conflicts resolved * added disassociate method to tests * fixes * tests * PEP8 cleanup * parenthesis issue in the migration * merge * some tests and refactoring * Trunk merge fixes * Merging trunk * implement list test * some tests * fix tests for extensions * Fixed snapshot logic * PEP8 cleanup * Refactored backup rotate * conflict resolved * stub tests * add stubs for flating api os api testing * merge with kirill * associate diassociate untested, first attept to test * Pep8 fix * Adding tests for backup no rotation, invalid image type * Fixed the default arguments to None instead of an empty list * Fixing PEP8 compliance issues * Trailing whitespace * Adding tests for snapshot no-name and backup no-name * Edited the host filter test case for extra specs * Removed an import * Merged from trunk * Remove extra debug line * Merged with trunk * Add reconnect test * Use simple\_bind\_s instead of bind\_s * Add reconnect on server fail to LDAP driver * ec2/cloud: typo * image/s3: typo * same typo i made before! * on 2nd run through filter\_hosts, we've already accounted for the topic memory needs converted to Bytes from MB * LeastCostScheduler wasn't checking for topic cost functions correctly. Added support so that --least\_cost\_scheduler\_cost\_functions only needs to have method names specified, instead of the full blown version with module and class name. Still works the old way, too * requested\_mem typo * more typos * typo in least cost scheduler * Unwind last commit, force anyjson to use our serialization methods * debug logging of number of instances to build in scheduler * missed passing in min/max\_count into the create/create\_all\_at\_once calls * Dealing with cases where extra\_specs wasn't defined * pep8 fixes * Renamed from flavor\_extra\_specs to extra\_specs * All tests passing * missed passing an argument to consume\_resources * Committing some broken code in advance of trying a different strategy for specifying args to extensions.ResoruceExtensions, using parent * Starting to transition instance type extra specs API to an extension API * Now automatically populates the instance\_type dict with extra\_specs upon being retrieved from the database * pep8 * Created Bootstrapper to handle Nova bootstrapping logic * alter test, alter some debug statements * altered some tests * freakin migration numbering * trunk merge * removing erroneous block, must've been a copy and paste fat finger * specify keyword, or direct\_api proxy method blows up * updated the way vifs/fixed\_ips are deallocated and their relationships, altered lease/release fixed\_ip * Fixed syntax errors * This adds a way to create global firewall blocks that apply to all instances in your nova installation * Accept a full serverRef to OSAPI POST /images (snapshot) * Cast rotation to int * PEP8 cleanup * Fixed filter property and added logging * added tests * Implemented view and added tests * Adding missing import * Fixed issue with zero flavors returning HTTP 500 * Adding dict with single 'meta' key to /imgages//meta/ GET and PUT * fixing 500 error on v1.0 images xml * Small refactoring around getting params * libvirt test for deleting provider firewall rules * Make firewall rules tests idempotent, move IPy=>netaddr, add deltete test * merge from trunk * altho security\_group authorize & revoke tests already exist in test\_api, adding some direct ec2 api method tests. added group\_id param support to the pertinent security group methods * Make sure there are actually rules to test against * Add test for listing provider firewall rules * pep8: remove newline at end of file * Add admin api test case (like cloud test case) with a test for fw rules * Move migration to newer version * an int() was missed being removed from UUID changes when zone rerouting kicks in * fixing 500 on None metadata value * proper xml serialization for images * "nova-manage checks if user is member of proj, prior to adding role for that project" * adding metadata container to /images/detail and /images/ calls * Add xml serialization for all /images//meta and /images//meta/ responses * trunk merge and migration bump * handle errors for listing an instance by IP address * Merged markwash's fixes * Merged list-zone-recurse * str\_GET is a property * Fixed typo * Merged trunk * minor fixups * fixes for recurse\_zones and None instances with compute's get\_all * typo * add support for compute\_api.get\_all() recursing zones for more than just reservation\_id * Change so that the flash socket policy server is using eventlet instead of twisted and is running in the same process as the main vnx proxy * ec2/cloud: address review * compute/api: an unit test for \_update\_{image\_}bdm * ec2/cloud: unit tests for parser/formatter of block device mapping * ec2/cloud: an unit test for \_format\_instance\_bdm() * ec2utils: an unit test for mapping\_prepend\_dev() * ec2: bundle block device mapping * ec2utils: introduce helper function to prepend '/dev/' in mappings * volume/api: an unit test for create\_snapshot\_force() * Add some resource checking for memory available when scheduling Various changes to d-sched to plan for scheduling on different topics, which cleans up some of the resource checking. Re-compute weights when building more than 1 instance, accounting for resources that would be consumed * Returned code to original location * Merged from trunk * run launcher first since it initializes global flags and logging * Now passing unit tests * Two tests passing * Now stubbing nova.db instead of nova.db.api * Bug fixing * Added flavor extra specs controller * Initial unit test (failing) * This catches the InstanceNotFound exception on create, and ignores it. This prevents errors in the compute log, and causes the server to not be built (it should only get InstanceNotFound if the server was deleted right after being created). This is a temporary fix that should be fixed correctly once no-db-messaging stuff is complete * allocate and release implementation * fixed pep8 issues * merge from trunk * image -> instance in comment * added virtual\_interface\_update method * Fixes issues with displaying exceptions regarding flavors in nova-manage * better debug statement around associating floating ips when multiple fixed\_ips exist * pep8 fixes * merging trunk * added fixed ip filtering by null virtual interface\_id to network get associated fixed ips * fixed ip gets now have floating IPs correctly loaded * reverting non-xml changes * Adding backup rotation * moving image show/update into 'meta' container * Check API request for min\_count/max\_count for number of instances to build * updated libvirt tests network\_info to be correct * fixed error * skipping more ec2 tests * skipping more ec2 tests * skipping more ec2 tests * skipping test\_run\_with\_snapshot * updated test\_cloud to set stub\_network to true * fixed incorrect exception * updating glance image fixtures with checksum attribute; fixing glance image service to use checksum attribute * Round 1 of backup with rotation * merge from trunk * fix some issues with flags and logging * Add a socket server responding with an allowing flash socket policy for all requests from flash on port 843 to nova-vncproxy * api/ec2: an unit test for create image * api/ec2, boot-from-volume: an unit test for describe instances * unittest: an unit test for ec2 describe image attribute * test\_cloud: an unit test for describe image with block device mapping * ec2utils: an unit test for ec2utils.properties\_root\_defice\_name * unittest, image/s3: unit tests for s3 image handler * image/s3: factor out \_s3\_create() for testability * ec2utils: unit tests for case insensitive true/false conversion * ec2utils: add an unit test for dict\_from\_dotted\_str() * test\_api: unit tests for ec2utils.id\_to\_ec2\_{snap, vol}\_id() * api/ec2: make CreateImage pass unit tests * volume/api: introduce create\_snapshot\_force() * api/ec2/image: make block device mapping pass unit tests * db/block\_device\_mapping/api: introduce update\_or\_create * db/migration: resolve version conflict * merge with trunk * ec2 api describe\_security\_groups allow group\_id param , added tests for create/delete security group in test\_cloud although also exists in test\_api this tests directly the ec2 method * pip-requires * pep8 * fixed zone update * Stop trying to set a body for HTTP methods that do not allow it. It renders the unit tests useless (since they're testing a situation that can never arise) and webob 1.0.8 fails if you do this * fixed local db create * omg stop making new migrations.. * trunk merge * merge from trunk * added try except around floating ip get by host in host init * This branch adds support to the xenapi driver for updating the guest agent on creation of a new instance. This ensures that the guest agent is running the latest code before nova starts configuring networking, setting root password or injecting files * renamed migrations again * merge from trunk * if we get InstanceNotFound error on create, ignore (means it has been deleted before we got the create message) * some libvirt multi-nic just to get it to work, from tushar * Removed whitespace * Fixed objectstore test * merge with trey * Very small alterations, switched from using start() to pass host/port, to just defining them up front in init. Doesn't make sense to set them in start because we can't start more than once any way. Also, unbroke binaries * Bump WebOb requirement to 1.0.8 in pip-requires * Oops, I broke --help on nova-api, fixed now * pep8 fix * Monkey patching 'os' kills multiprocessing's .join() functionality. Also, messed up the name of the eventlet WSGI logger * Filter out datetime fields from instance\_type * erase unnecessary TODO: statement * fixed reviewer's comment. 1. adding dest-instance-dir deleting operation to nova.compute.manager, 2. fix invalid raise statement * fix comment line * Stop trying to set a body for HTTP methods that do not allow it. It renders the unit tests useless (since they're testing a situation that can never arise) and webob 1.0.8 fails if you do this * log -> logging to keep with convention * Removed debugging and switched eventlet to monkey patch everything * Removed unneeded import * Tests for WSGI/Launcher * Remove the unnecessary insertion of whitespace. This happens to be enough to match this patch apply on recent versions of XenServer / Xen Cloud Platform * trunk merge * fix lp 798361 * Removed logging logic from \_\_init\_\_, added concept of Launcher...no tests for it yet * nova-manage checks if user is member of proj, prior to adding role for that project * Other migrations have been merged in before us, so renumber * Merged trunk * pep8 fixes * assert\_ -> assertTrue since assert\_ is deprecated * added adjust child zone test * tests working again * updated the exceptions around virtual interface creation, updated flatDHCP manager comment * more trunks * another trunk merge * This patch adds support for working with instances by UUID in addition to integer IDs * importing sqlalchemy IntegrityError * Moving add\_uuid migration to 025 * Merging trunk, fixing conflicts * Enclosing tokens for xenapi filter in double quotes * working commit * Fix objectstore test * Cleanup and addition of tests for WSGI server * Merged trunk * Check that server exists when interacting with /v1.1/servers//meta resource * No, really. Added tests for WSGI loader * Added tests for WSGI loader * nova.virt.libvirt.connection.\_live\_migration is changed * Cleanup * merged rev trunk 1198 * Introduced Loader concept, for paste decouple * fix pep8 check * fix comments at nova.virt.libvirt.connection * Cleanup of the cleanup * Further nova-api cleanup * Cleaned up nova-api binary and logging a bit * Removed debugging, made objectstore tests pass again * General cleanup and refactor of a lot of the API/WSGI service code * Adding tests for is\_uuid\_like * Using proper UUID format for uuids * Implements a portion of ec2 ebs boot. What's implemented - block\_device\_mapping option for run instance with volume (ephemeral device and no device isn't supported yet) - stop/start instance * updated fixed ip and floating ip exceptions * pep8: white space/blank lines * Merging trunk * renamed VirtualInterface exception and extend NovaException * moving instance existance logic down to api layer * Ensure os\_type and architecture get set correctly * Make EC2 update\_instance() only update updatable\_fields, rather than all fields. Patch courtesy of Vladimir Popovski * Fixes two minor bugs (lp795123 and lp795126) in the extension mechanism. The first bug is that each extension has \_check\_extension() called twice on it; this is a minor cosmetic problem, but the second is that extensions which flunk \_check\_extension() are still added. The proposed fix is to make \_check\_extensions() return True or False, then make \_add\_extension() call it from the top and return immediately if \_check\_extensions() returns False * Fixes a bug where a misleading error message is outputted when there's a sqlalchemy-migrate version conflict * Result is already in JSON format from \_wait\_for\_agent * Fix PEP8 * Fix for lp:796834 * Add new architecture attribute along with os\_type * bunch of docstring changes * adding check for serverRef hostname matching app url * Fix for Bug lp:796813 * Fix the volumes extension resource to have a proper prefix - /os-volumes * Fixes lp797017, which is broken as a result of a fragile method in the xenapi drivers that assumed there would only ever be one VBD attached to an instance * adding extra image service properties to compute api snapshot; adding instance\_ref property * Missed a pep8 fix * Remove thirdwheel.py and do the test with a now-public ExtensionManager.add\_extension() * Removes nova/image/local.py (LocalImageService) * Add some documentation for cmp\_version Add test cases for cmp\_version * Increased error message readability for the OpenStack API * fixing test case * Updated "get\_all\_across\_zones" in nova/compute/api.py to have "context = context.elevated()", allowing it to be run by non-admin users * merging trunk * more words * Cleaned up some pep8 issues in nova/api/openstack/create\_instance\_helper.py and nova/api/openstack/\_\_init\_\_.py * Pull-up from trunk * Add a test to ensure invalid extensions don't get added * Update xenapi/vm\_utils.py so that it calls find\_sr instead of get\_sr. Remove the old get\_sr function which by default looked for an SR named 'slices' * add vlan diagram and some text * Added context = context.elevated() to get\_all\_across\_zones * auto load table schema instead of stubbing it out * Fixed migration per review feedback * Made hostname independent from ec2 id. Add generation of hostnames based on display name * Fix for a problem where run\_tests.sh would output a seemingly unrelated error message when there was a sqlalchemy-migrate version number conflict * stub api methods * Missed a InstanceTypeMetadata -> InstanceTypeExtraSpecs rename in register\_models * Fix unitttest so that it actually fails without the fix * Make $my\_ip Glance's default host, not localhost * We don't check result in caller, so don't set variable to return value * Remove debugging statement * Fix lp795123 and lp795126 by making \_check\_extension() return True or False and checking the result only from the top of \_add\_extension() * Glance host defaults to rather than localhost * Upstream merge * add in dhcp drawing * Rename: intance\_type\_metadata -> instance\_type\_extra\_specs * erroneous self in virtual\_interface\_delete\_by\_instance() sqlalchemy api * Fixes a bug where a unit test sometimes fails due to a race condition * remove the network-host fromt he flat diagram * add multinic diagram * add the actual image * Renaming to \_build\_instance\_get * merged trunk * returned two files to their trunk versions, odd that they were altered in the first place * Added a new test for confirming failure when no primary VDI is present * Unit tests pass again * more doc (and by more I mean like 2 or 3 sentances) * Fix copyright date * PEP8 cleanup * Attempting to retrieve the correct VDI for snapshotting * Fixing another test * Fixing test\_servers\_by\_uuid * floating\_ips extension is loading to api now * initial commit of multinic doc * generated files should not be in source control * Fixed UUID migration * Added UUID migration * Clean up docstrings to match HACKING * merge with trey * Small tweaks * Merged reldan changes * First implementation of FloatingIpController * First implementation of FloatingIpController * compute/api: fix mismerge due to instance creation change * ec2/cloud.py: fix mismerge * fix conflict with rebasing * api/ec2: support CreateImage * api/ec2/image: support block device mapping * db/model: add root\_device\_name column to instances table * ec2utils: consolidate 'vol-%08x' and 'snap-%08x' * api/ec2: check user permission for start/stop instances * ec2utils: consolidate 'vol-%08x' and 'snap-%08x' * api/ec2: check user permission for start/stop instances * api/ec2: check user permission for start/stop instances * Adds 'joinedload' statements where they need to be to prevent access of a 'detached' object * novaclient changed to support projectID in authentication. Caused some minor issues with distributed scheduler. This fixes them up * Add trailing LF (\n) to password for compatibility with old agents * Workaround windows agent bugs where some responses have trailing \\r\\n * removed commented out shim on Instance class * Windows instances will often take a few minutes setting up the image on first boot and then reboot. We should be more patient for those systems as well check if the domid changes so we can send agent requests to the current domid * Split patch off to new branch instead * Add --fixes * First attempt to rewrite reroute\_compute * syntax * Merged trunk * Windows instances will often take a few minutes setting up the image on first boot and then reboot. We should be more patient for those systems as well check if the domid changes so we can send agent requests to the current domid * Fixed bug * Added metadata joinedloads * Prep-work to begin on reroute\_compute * specify mysql\_engine for the virtual\_interfaces table in the migration * Passed in explanation to 400 messages * Fixing case of volumes alias * The volumes resource extension should be prefixed by its alias - os-volumes * Adding uuid test * Pep8 Fixes * Fixing test\_servers.py * pep8 * Fixing private-ips test * adding server existence check to server metadata resource * Fixing test\_create\_instance * made the test\_xenapi work * test xenapi injected set to True * something else with tests * something with tests * i dont even care anymore * network\_info has injected in xenapi tests * Adding UUID test * network\_info passed in test\_xenapi, mac\_address no longer in instance values dict * added network injected to stub * added injected to network dict oportion of tuple returned by get\_instance\_nw\_info * don't provision to all child zones * network info to \_create\_vm * fix mismerge * updated xenapi\_conn finish\_resize arguments * stubbed out get\_instance\_nw\_info for compute\_test * pip novaclient bump * merge with nova trunk * fixed up some little project\_id things with new novaclient * typo * updated finish\_resize to accept network\_info, updated compute and tests in accordance * \_setup\_block\_device\_mapping: raise ApiError when db inconsistency found * db/block\_device\_mapping\_get\_all\_by\_instance: don't raise * Print list of agent builds a bit prettier * PEP8 cleanups * Rename to 024 since 023 was added already * pep8 * The Xen driver supports running instances in PV or HVM modes, but the method it uses to determine which to use is complicated and doesn't work in all cases. The result is that images that need to use HVM mode (such as FreeBSD 64-bit) end up setting a property named 'os' set to 'windows' * typo * None project\_id now default * Adds code to run\_tests.py which: * Fixing code to ensure unit tests for objectstore, vhd & snapshots pass * ec2utils: minor optimize \_try\_convert() * block\_device\_mapping: don't use [] as default argument * api/ec2: make the parameter parser an independent method * Show only if we have slow tests, elapsed only if test success * Showing elapsed time is now default * Ensuring pep8 runs even when nose optons are passed * network tests now teardown user * Removing seconds unit * network user only set if doesnt exist * net base project id now from context, removed incorrect floatnig ip host assignment * fixed instance[fixed\_ip] in ec2 api, removed fixed\_ip shim * various test fixes * Updated so that we use a 'tmp' subdirectory under the Xen SR when staging migrations. Fixes an issue where you would get a 'File exists' error because the directory under 'images' already existed (created via the rsync copy) * db fakes silly error fix * debug statements * updated db fakes * updated db fakes * Changed requests with malformed bodies to return a HTTP 400 Bad Request instead of a HTTP 500 error * updated db fakes and network base to work with virtual\_interface instead of mac\_address * Phew ... ok, this is the last dist-scheduler merge before we get into serious testing and minor tweaks. The heavy lifting is largely done * db fakes * db fakes * updated libvirt test * updated libvirt test * updated libvirt test * updated libvirt test * updated libvirt test * getting the test\_host\_filter.py file from trunk, mine is jacked somehow * removed extra init calls * fixed HACKING * Changed requests with malformed bodies to return a HTTP 400 Bad Request instead of a HTTP 500 error * duplicate routes moved to base class * fixed scary diff from trunk that shouldnt have been there * version passing cleanup * refactored out controller base class to use aggregation over inheritance * Move ipy commands to netaddr * merged trunk * mp fixes * Really PEP8? A tab is inferior to 2 spaces? * pep8 fix * upstream merge * Stub out the rpc call in a unit test to avoid a race condition * merged trunk rev 1178 * Making timing points stricter, only show slow/sluggish tests in summary * Improved errors * added kernel/ramdisk migrate support * Added faults wrapper * remove file that got ressurected * Cleaned up pep8 errors using the current version of pep8 located in pip-requires. This is to remove the cluttered output when using the virtualenv to run pep8 (as you should). This will make development easier until the virtualenv requires the latest version of pep8 (see bug 721867) * merge with trey * autoload with the appropriate engine during upgrade/downgrade * Created new exception for handling malformed requests Wrote tests Raise httpBadRequest on malformed request bodies * Fixed bug 796619 * Adds --show-elapsed option for run\_tests * pep8 * Alias of volumes extension should be OS-VOLUMES * Illustrations now added to Distributed Scheduler documentation (and fixed up some formatting) * Load table schema automatically instead of stubbing out * Removed clocksource=jiffies from PV\_args * Test now passes even if the rpc call does not complete on time * - fixes bug that prevented custom wsgi serialization * Removed clocksource=jiffies from PV\_args * merging trunk, fixing pep8 * pep8 * Improved tests * removing unnecessary lines * wsgi can now handle dispatching action None more elegantly * This fixes the server\_metadata create and update functions that were returning req.body (as a string) instead of body (deserialized body dictionary object). It also adds checks where appropriate to make sure that body is not empty (and return 400 if it is). Tests updated/added where appropriate * removed yucky None return types * merging trunk * trunk merge * zones image\_id/image\_href support for 1.0/1.1 * Update xenapi/vm\_utils.py so that it calls find\_sr instead of get\_sr. Remove the old get\_sr function which by default looked for an SR named 'slices' * fixed bug 796619 * merge trunk * check for none and empty string, this way empty dicts/lists will be ok * Updated so that we use a 'tmp' subdirectory under the Xen SR when staging migrations. Fixes an issue where you would get a 'File exists' error because the directory under 'images' already existed (created via the rsync copy) * fix method chaining in database layer to pass right parameters * Add a method to delete provider firewall rules * Add ability to list ip blocks * pep 8 whitespace fix * Move migration * block migration feature added * Reorder firewall rules so the common path is shorter * ec2 api method allocate\_address ; raises exception.NoFloatingIpsDefined instead of UnknownError when there aren't any floating ips available * in XML Serialization of output, the toprettyxml() call would sometimes return a str() and sometimes unicode(), I've forced encoding to utf-8 to ensure that we always get str(). This fixes the related bug * A recent commit added a couple of directories that don't belong in version control. Remove them again * adding support for cusom serialization methods * forgot a comma * floating ips can now move around the network hosts * A recent commit added a couple of directories that don't belong in version control. Remove them again * 'network list' prints project id * got rid of prints for debugging * small pep8 fixes * return body correctly as object instead of a string, with tests, also check for empty body on requests that need a body * adding xml support to /images//meta resource; moving show/update entities into meta container * removed posargs decorator, all methods decorated * Allows Nova to talk to multiple Glance APIs (without the need for an external load-balancer). Chooses a random Glance API for each request * forgot a comma * misc argument alterations * force utf-8 encoding on toprettyxml call for XMLDictSerializer * added new exception more descriptive of not having available floating addresses avail for allocation * raise instance instead of class * Fix copyright year * style change * Only update updateable fields * removing LocalImageService from nova-manage * rebase from trunk * decorators for action methods added * source illustrations added & spelling/grammar based on comstud's feedback * fixed reraise in trap\_error * forgot some debugging statements * trunk merge and ec2 tests fixed * Add some docstrings for new agent build DB functions * Add test for agent update * Multiple position dependent formats and internationalization don't work well together * Adding caveat * Fixing code per review comments * removed fixed\_ips virtual\_interface\_id foreignkey constraint from multi\_nic migration, and added it as a standalone migration with special sqlite files * Record architecture of image for matching to agent build later. Add code to automatically update agent running on instance on instance creation * Add version and agentupdate commands * Add an extension to allow for an addFixedIp action on instances * further changes * tests working after merge-3 update * 022 migration has already been added, so make ours 023 now * parse options with optparse, options prepended '--' * renamed migration again * Pull-up from multi\_nic * merged koelkers tests branch * remove file that keeps popping up * Merging trunk * Fixing the tests * matched the inner exception specifically, instead of catching all RemoteError exceptions * Support multiple glance-api servers * Merged trunk * Fix merge conflict * removing custom exception, instead using NoFloatingIpsDefined * raises exception.NoFloatingIpsDefined instead of UnknownError * Normalize and update database with used vm\_mode * added a test for allocate\_address & added error handling for api instead of returning 'UnknownError', will give information 'AllocateAddressError: NoMoreAddresses * merged trunk again * updated docstring for nova-manage network create * Now forwards create instance requests to child zones. Refactored nova.compute.api.create() to support deferred db entry creation * MySQL database tables are currently using the MyISAM engine. Created migration script nova/db/sqlalchemy/migrate\_repo/versions/021\_set\_engine\_mysql\_innodb.py to change all current tables to InnoDB * merged trunk again * Support for header "X-Auth-Project-Id" in osapi * Cleaned up some pylint errors * tweaks * PEP8 fix * removed network\_info shims in vmops * Fix for bug#794239 to allow pep8 in run\_tests.sh to use the virtual environment * adding Authorizer key for ImportPublicKey * fix exception type catched * Look for vm\_mode property on images and use that if it exists to determine if image should be run in PV or HVM mode. If it doesn't exist, fall back to existing logic * removed straggler code * trunk merge * merge trunk * pep8 * removed autogen file * added field NOVA\_PROJECT\_ID to template for future using * added tests for X-Auth-Project-Id header * fix fake driver for using string project * adding Authorizer key for ImportPublicKey * Cleaned up some of the larger pylint errors. Set to ignore some lines that pylint just couldn't understand * DRY up the image\_state logic. Fix an issue where glance style images (which aren't required to have an 'image\_state' property) couldn't be used to run instances on the EC2 controller * remove the debuging lines * remove the old stuff * tests all pass * Added virtual environment to PEP8 tests * Added test\_run\_instances\_image\_status\_active to test\_cloud * Add the option to specify a default IPv6 gateway * pep8 * Removed use of super * Added illustrations for Distributed Scheduler and fixed up formatting * Disabled pylint complaining about no 'self' parameter in a decorator function * DRY up the image\_state logic. Fix an issue where glance style images (which aren't required to have an 'image\_state' property) couldn't be used to run instances on the EC2 controller * Fixed incorrect error message Added missing import Fixed Typo (pylint "undefined variable NoneV") * removing local image service * Remove unnecessary docstrings * Add the option to specify a default IPv6 gateway * port the floating over to storing in a list * Make libvirt snapshotting work with images that don't have an 'architecture' property * take out the host * Removed empty init * Use IPNetwork rather than IPRange * Fixed type causing pylint "exception is not callable" Added param to fake\_instance\_create, fake objects should appear like the real object. pylint "No value passed for parameter 'values' in function call" * sanity check * run\_instances will check image for 'available' status before attempting to create a new instance * fixed up tests after trunk merge * Use True/False instead of 1/0 when setting updating 'deleted' column attributes. Fixes casting issues when running nova with Postgres * merged from trunk * Remove more stray import IPy * Dropped requirement for IPy * Convert stray import IPy * Use True/False instead of 1/0 when setting updating 'deleted' column attributes.Fixes casting issues when running nova with Postgres * Removed commented code * Added test case for snapshoting base image without architecture * Remove ipy from virt code and replace with netaddr * Remove ipy from network code and replace with netaddr * Remove ipy from nova/api/ec2/cloud.py and use netaddr * Remove ipy from nova-manage and use netaddr * This branch allows marker and limit parameters to be used on image listing (index and detail) requests. It parses the parameters from the request, and passes it along to the glance\_client, which can now handle these parameters. Essentially all of the logic for the pagination is handled in glance, we just pass along the correct parameters and do some error checking * merge from trunk, resolved conflicts * Update the OSAPI images controller to use 'serverRef' for image create requests * Changed the error raise to not be AdminRequired when admin is not, in fact, required * merge with trey * Change to a more generic error and update documentation * make some of the tests * Merged trunk * merge trunk * Ignore complaining about dynamic definition * Removed Duplicate method * Use super on an old style class * Removed extraneous code * Small pylint fixes * merge with trunk * Fixed incorrect exception * This branch removes nwfilter rules when instances are terminated to prevent resource leakage and serious eventual performance degradation. Without this patch, launching instances and restarting nova-compute eventually become very slow * merge with trunk * resolve conflicts with trunk * Update migrate script version to 22 * Added 'config list' to nova-manage. This function will output all of the flags and their values * renamed migration * trunk merge after 2b hit * Distributed Scheduler developer docs * Updated to use the '/v1/images' URL when uploading images to glance in the Xen glance plugin. Fixes the issue where snapshots fail to upload correctly * merged trunk again * added 'nova-manage config list' which will list out all of the flags and their values. I also alphabetized the list of available categories * Updated to use the '/v1/images' URL when uploading images to glance in the Xen glance plugin. Fixes issue where snapshots failed to get uploaded * Removed "double requirement" from tools/pip-requires file * merged koelker migration changes, renumbered migration filename * fix comment * Fixed pip-requires double requirement * Added a test case for XML serialization * Removed unused and erroneous (yes, it was both) function * paramiko is not installed into the venv, but is required by smoketests/base.py. Added paramiko to tools/pip-requires * Changes all uses of utcnow to use the version in utils. This is a simple wrapper for datetime.datetime.utcnow that allows us to use fake values for tests * Set pylint to ignore correct lines that it could not determine were correct, due to the means by which eventlet.green imported subprocess Minimized the number of these lines to ignore * LDAP optimization and fix for one small bug caused huge performance leak. Dashboard's benchmarks showed overall x22 boost in page request completion time * Adds LeastCostScheduler which uses a series of cost functions and associated weights to determine which host to provision to * Make libvirt snapshotting work with images that don't have an 'architecture' property * Add serverRef to image metadata serialization list * Fixed pylint: no metadata member in models.py * Implement OSAPI v1.1 style image create * trunk merge * little tweaks * Flush AuthManager's cache before each test * Fixed FakeLdapDriver, made it call LdapDriver.\_\_init\_\_ * Merged with trunk * This change set adds the ability to create new servers with an href that points to a server image on any glance server (not only the default one configured). This means you can create a server with imageRef = http://glance1:9292/images/3 and then also create one with imageRef = http://glance2:9292/images/1. Using the old way of passing in an image\_id still works as well, and will use the default configured glance server (imageRef = 3 for instance) * added nova\_adminclient to tools/pip-requires * merged trunk * Added paramiko to tools/pip-requires * Tests that all exceptions can be raised properly, and fix the couple of instances where they couldn't be constructed due to typos * merge trunk... yay.. * switch zones to use utcnow * make all uses of utcnow use our testable utils.utcnow * Fix error with % as replacement string * Fixing conflicts * Tests to assure all exceptions can be raised as well as fixing NotAuthorized * use %% because % is a replacement string character * some comment docstring modifications * Makes novarc work properly on a mac and also for zsh in addition to bash. Other shells are not guaranteed to work * This adds the ability to publish nova errors to an error queue * don't use python if readlink is available * Sudo chown the vbd device to the nova user before streaming data to it. This resolves an issue where nova-compute required 'root' privs to successfully create nodes with connection\_type=xenapi * Bugfix #780784. KeyError when creating custom image * Remove some of the extra image service calls from the OS API images controller * pep8 fixes * merge with trey * make it pass for the demo * Merged with Will * Minor comment formatting changes * got rid of more test debugging stuff that shouldnt have made it in * Remove comment about imageRef not being implemented * Remove a rogue comment * more tests (empty responses) * get\_all with reservation id across zone tests * move index and detail functions to v10 controller * got rid of prints * Refactored after review, fixed merge * image href should be passed through the rebuild pipeline, not the image id * merge from trunk * got rid of print debugs * cleanup based on waldon's comments, also caught a few other issues * missed a couple chars * Little cleanups * pep8 and all that * tests all passing again * list --reservation now works across zones * fix novarc to work on mac and zsh * merged, with trunk, fixed the test failure, and split the test into 3 as per peer review * Fixes nova-manage bug. When a nova-network host has allocated floating ips \*AND\* some associated, the nova-manage floating list would throw exception because was expecting hash with 'ec2\_id' key , however, the obj returned is a sqlalchemy obj and the attr we need is 'hostname' * start the flat network * more testing fun * fixed as per peer review to make more consistent * merged from trunk * Implement the v1.1 style resize action with support for flavorRef * Updates to the 018\_rename\_server\_management\_url migration to avoid adding and dropping a column. Just simply rename the column * Support SSL AMQP connections * small fixes * Allow SSL AMQP connections * reservation id's properly forwarded to child zones on create * merge from trunk * fix pep8 issue from merge * coose the network\_manager based on instance variable * fix the syntax * forgot a comma * This just fixes a bunch of pep8 issues that have been lingering around for a while and bothering me :) * touch ups * Updates to the 018\_rename\_server\_management\_url to avoid adding and dropping a column. Just simply rename the column * basic reservation id support to GET /servers * - move osapi-specific wsgi code from nova/wsgi.py to nova/api/openstack/wsgi.py - refactor wsgi modules to use more object-oriented approach to wsgi request handling: - Resource object steps up to original Controller position - Resource coordinates deserialization, dispatch to controller, serialization - serialization and deserialization broken down to be more testable/flexible * merge from trunk * make the stubs * use the host * da stubs * Bumped migration number * Merged from trunk * updates to keep things looking better * merge from trunk * fix pep8 issues * PEP8 fix * Moved memcached driver import to the top of modules * fix pep8 issues * pep8 fixes * Cleanup instances\_path in the test\_libvirt test\_spawn\_with\_network\_info test. Fixes issue where the nova/tests/instance-00000001/ is left in the nova source tree when running run\_test.sh -N * fix filtering tests * Renamed migration to 020 * osapi: added support for header X-Auth-Project-Id * added /zones/boot reservation id tests * Adds hooks for applying ovs flows when vifs are created and destroyed for XenServer instances * Logs the exception if metadata fails and returns a 500 with an error message to the client * Fixing a bunch of conflicts * add new base * refator existing fakes, and start stubbing out the network for the new manager tests * pep8 * Incremented version of migration script to reflect changes in trunk * basic zone-boot test in place * Incremented version of migration script to reflect changes in trunk * Incremented version of migration script to reflect changes in trunk * switch to using webob exception * Added new snapshots table to InnoDB migrations * Adds a few more status messages to error states on image register for the ec2 api. This will hopefully provide users of the ec2 api with a little more info if their registration fails * Cleaned up bug introduced after fixing pep8 errors * Fixing Scheduler Tests * Cleaned up bug introduced after fixing ^Cp8 errors * Basic hook-up to HostFilter and fixed up the passing of InstanceType spec to the scheduler * make the old tests still pass * rename da stuffs * rename da stuffs * Resolving conflict and finish test\_images * merge * added tests for image detail requests * Merged trunk * Merged trunk and fixed conflicts * Whitespace cleanups * added pause/suspend implementation to nova.virt.libvirt\_conn * Change version number of migration * Update the rebuild\_instance function in the compute manager so that it accepts the arguments that our current compute API sends * Moved everything from thread-local storage to class attributes * Added the filtering of image queries with image metadata. This is exposing the filtering functionality recently added to Glance. Attempting to filter using the local image service will be ignored * This enables us to create a new volume from a snapshot with the EC2 api * Use a new instance\_metadata\_delete\_all DB api call to delete existing metadata when updating a server * added tests for GlanceImageService * Add vnc\_keymap flag, enable setting keymap for vnc console and fix bug #782611 * Add refresh\_provider\_fw\_rules to virt/driver.py#ComputeDriver so virtualization drivers other than libvirt will raise NotImplemented * Rebased to trunk rev 1120 * trunk merge * added get\_pagination\_params function in common with tests, allow fake and local image services to accept filters, markers, and limits (but ignore them for now) * Cleaned up text conflict * pep8 fixed * pep8 fixes * Cleaned up text conflict * removing semicolon * Cleaned up text conflict * skip the vlam test, not sure why it doesn't work * Cleaned up pep8 errors * Fixed the APIError typo * MySQL database tables are currently using the MyISAM engine. Created migration script nova/db/sqlalchemy/migrate\_repo/versions/020\_set\_engine\_mysql\_innodb.py to change all current tables to InnoDB * MySQL database tables are currently using the MyISAM engine. Created migration script nova/db/sqlalchemy/migrate\_repo/versions/020\_set\_engine\_mysql\_innodb.py to change all current tables to InnoDB * Handle the case when a v1.0 api tries to list servers that contain image hrefs * Added myself to Authors file * edits based on ed's feedback * More specific error messages for resize requests * pep8 fixes * merge trunk * tests passing again * Actually remove the \_action\_resize code from the base Servers controller. The V11 and V10 controllers implement these now * merge from trunk * This adds a volume snapshot support with the EC2 api * Fixed the typo of APIError with ApiError * nova/auth/novarc.template: Changed NOVA\_KEY\_DIR to allow symlink support * Updated compute api and manager to support image\_refs in rebuild * zone-boot working * regular boot working again * regular boot working again * first pass at reservation id support * Updates so that 'name' can be updated when doing a OS API v1.1 rebuild. Fixed issue where metadata wasn't getting deleted when an empty dict was POST'd on a rebuild * first cut complete * project\_id moved to be last * add support for keyword arguments * fixed nova.virt.libvirt\_conn.resume() method - removing try-catch * reservation\_id's done * basic flow done * lots more * starting * boot-from-volume: some comments and NOTE(user name) * Use metadata variable when calling \_metadata\_refs * Implement the v1.1 style resize action with support for flavorRef * Fixes to the SQLAlchmeny API such that metadata is saved on an instance\_update. Added integration test to test that instance metadata is updated on a rebuild * Update the rebuild\_instance function in the compute manager so that it accepts the arguments that our current compute API sends * Cleanup instances\_path in test\_libvirt test\_spawn\_with\_network\_info test * Added missing nova import to image/\_\_init\_\_.py * Another image\_id location in hyperv * Fixing nova.tests.api.openstack.fakes.stub\_out\_image\_service. It now stubs out the get\_image\_service and get\_default\_image\_service functions. Also some pep8 whitespace fixes * Fixing xen and vmware tests by correctly mocking glance client * Fixing integration tests by correctly stubbing image service * More image\_id to image\_ref stuff. Also fixed tests in test\_servers * When encrypting passwords in xenapi's SimpleDH(), we shouldn't send a final newline to openssl, as it'll use that as encryption data. However, we do need to make sure there's a newline on the end when we write the base64 string for decoding.. Made these changes and updated the test * Fixes the bug introduced by rpc-multicall that caused some test\_service.py tests to fail by pip-requiring a later version of mox * added \n is not needed with -A * now pip-requires mox version 0.5.3 * added -A back in to pass to openssl * merge with dietz * merge with dietz * XenAPI tests pass * fixed so all the new encryption tests pass.. including data with newlines and so forth * Glance client updates for xenapi and vmware API to work with image refs * Merged lp:~rackspace-titan/nova/lp788979 * get the right args * Fixing pep8 problems * Modified instance\_type\_create to take metadata * Added test for instance type metadata create * merge with trey * Added test for instance type metadata update * Added delete instance metadata unit test * Added a unit test * Adding test code * Changed metadata to meta to avoid sqlalchemy collision * Adding accessor methods for instance type metadata * remove errant print statement * prevent encryption from adding newlines on long messages * trunk merge * nova/auth/novarc.template: Changed NOVA\_KEY\_DIR to allow symlink support * docstrings again and import ordering * fix encryption handling of newlines again and restructure the code a bit * Libvirt updates for image\_ref * Commit the migration script * fixed docstrings and general tidying * remove \_take\_action\_to\_instance * fix calls to openssl properly now. Only append \n to stdin when decoding. Updated the test slightly, also * fixed read\_only check * Fix pep8 errors * Fix pep8 violations * Fix a description of 'snapshot\_name\_template' * unittest: make unit tests happy * unittest: tests for boot from volume and stop/start instances * compute: implement ec2 stop/start instances * compute, virt: support boot-from-volume without ephemeral device and no device * db: add a table for block device mapping * volume/api: allow volume clone from snapshot without size * api/ec2: parse ec2 block device mapping and pass it down to compute api * teach ec2 parser multi dot-separted argument * api/ec2: make ec2 api accept true/false * Adds the ability to make a call that returns multiple times (a call returning a generator). This is also based on the work in rpc-improvements + a bunch of fixes Vish and I worked through to get all the tests to pass so the code is a bit all over the place * fix a minor bug unrelated to this change * updated the way allocate\_for\_instance and deallocate\_for\_instance handle kwargs * Rename instances.image\_id to instances.image\_ref * changes per review * merge with dietz * stub out passing the network * Virt tests passing while assuming the old style single nics * adding TODOs per dabo's review * Fixes from Ed Leafe's review suggestions * merge trunk * move udev file so it follows the xen-backend.rules * Essentially adds support for wiring up a swap disk when building * add a comment when calling glance:download\_vhd so it's clear what is returned * make the fakes be the correct * skip vmware tests, since they need to be updated for multi-nic by someone who knows the backend * put back the hidden assert check i accidentally removed from glance plugin * fix image\_path in glance plugin * Merged trunk * skip the network tests for now * Change the return from glance to be a list of dictionaries describing VDIs Fix the rest of the code to account for this Add a test for swap * cleaning up getattr calls with default param * branch 2a merge (including trunk) * trunk merge * remerged with 2a * tests pass and pep8'ed * review fixups * Expanded tests * In vmwareapi\_net.py removed the code that defines the flag 'vlan\_interface' and added code to set default value for the flag 'vlan\_interface' to 'vmnic0'. This will now avoid flag re-definition issue * missed a driver reference * exceptions are logged via the raise, so just log an error message * log upload errors * instance obj returned is not a hash, instead is sqlalchemy obj and hostname attr is what the logic is looking for * we don't need the mac or the host anymore * Test tweaks * instances don't need a mac\_address to be created anymore * Make a cleaner log message and use [] instead of . to get database fields * use the skip decorator rather than comment out * merging trunk * Adding some pluralization * Double quotes are ugly #3 * merge with dietz * fix typo introduced during merge conflict resolution * Remove spurious newline at end of file * Move migration to fix ordering * remove dead/duplicate code * Double quotes are ugly #2 * Double quotes are ugly * refactoring compute.api.create() * Fix test\_cloud tests * Restricted image filtering by name and status only * Switch the run\_instances call in the EC2 back to 'image\_id'. Incoming requests use 'imageId' so we shouldn't modify this for image HREF's * Switching back to chown. I'm fine w/ setfacl too but nova already has 'chown' via sudoers so this seems reasonable for now * replace double quatation to single quatation at nova.virt.libvirt\_conn * remove unnecessary import inspect at nova.virt.libvirt\_conn * creating \_take\_action\_to\_instance to nova.virt.libvirt\_conn.py * Instead of redefining the flag 'vlan\_interface', just setting a default value (vmnic0) in vmwareapi\_net.py * Renamed image\_ref variables to image\_href. Since the convention is that x\_ref vars may imply that they are db objects * Added test skipper class * change the behavior of calling a multicall * move consumerset killing into stop * don't put connection back in pool * replace removed import * cleanups * cleanup the code for merging * make sure that using multicall on a call with a single result still functions * lots of fixes for rpc and extra imports * don't need to use a separate connection * almost everything working with fake\_rabbit * bring back commits lost in merge * connection pool tests and make the pool LIFO * Add rpc\_conn\_pool\_size flag for the new connection pool * Always create Service consumers no matter if report\_interval is 0 Fix tests to handle how Service loads Consumers now * catch greenlet.GreenletExit when shutting service down * fix consumers to actually be deleted and clean up cloud test * fakerabbit's declare\_consumer should support more than 1 consumer. also: make fakerabbit Backend.consume be an iterator like it should be. * convert fanout\_cast to ConnectionPool * pep8 and comment fixes * Add a connection pool for rpc cast/call Use the same rabbit connection for all topic listening and wait to be notified vs doing a 0.1 second poll for each * add commented out unworking code for yield-based returns * make the test more expicit * add support to rpc for multicall * merge with dietz * Fixing divergence * Merged trunk * Added params to local and base image service * Fixed the mistyped line referred to in bug 787023 * Merged trunk and resolved conflicts * Fixed a typo * make the test work * Merged with trunk * Several changes designed to bring the openstack api 1.1 closer to spec - add ram limits to the nova compute quotas - enable injected file limits and injected file size limits to be overridden in the quota database table - expose quota limits as absolute limits in the openstack api 1.1 limits resource - add support for controlling 'unlimited' quotas to nova-manage * During the API create call, the API would kick off a build and then loop in a greenthread waiting for the scheduler to pick a host for the instance. After API would see a host was picked, it would cast to the compute node's set\_admin\_password method * starting breakdown of nova.compute.api.create() * fix test. instance is not updated in DB with admin password in the API anymore * Merged upstream * pep8 fixes * Initial tests * fix forever looping on a password reset API call * updating admin\_pass moved down to compute where the password is actually reset. only update if it succeeds * merged trunk * change install\_ref.admin\_password to instance\_ref.admin\_pass to match the DB * Merged trunk * remove my print * we're getting a list of tuples now' * we have a list of tuples, not a list of dicts * pep8 fixes * return the result of the function * Updated tests to use mox pep8 * InstanceTypesMetadata is now registered * make some changes to the manager so dupe keywords don't get passed * Fixing the InstanceTypesMetadata table definition * try out mox for testing image request filters * Adding the migrate code to add the new table * dist-sched-2a merge * Created new libvirt directory, moved libvirt\_conn.py to libvirt/connection.py, moved libvirt templates, broke out firewall and network utilities * make the column name correct * The code for getting an opaque reference to an instance assumed that there was a reference to an instance obj available when raising an exception. I changed this from raising an InstanceNotFound exception to a NotFound, as this is more appropriate for the failure, and doesn't require an instance ID * merge against 2a * trunk merge * simplified the limiting differences for different versions of the API * New tests added * Changed the exception type to not require an instance ID * Added model for InstanceTypeMetadata * Added test * Avoid wildcard import * Add unittests for cloning volumes * merged recent trunk * merged recent trunk * Make snapshot\_id=None a default value in VolumeManager:create\_volume(). It is not a regular case to create a volume from a snapshot * Don't need to import json * Fix wrong call of the volume api create() * pep8 fix in nova/compute/api.py * instead of the API spawning a greenthread to wait for a host to be picked, the instance to boot, etc for setting the admin password... let's push the admin password down to the scheduler so that compute can just take care of setting the password as a part of the build process * tests working again * eventlet.spawn\_n() expects the function and arguments, but it expects the arguments unpacked since it uses \*args * Don't pass a tuple since spawn\_n will get the arguments with \*args anyway * move devices back * Using the root-password subcommand of the nova client results in the password being changed for the instance specified, but to a different unknown password. The patch changes nova to use the password specified in the API call * Pretty simple. We call openssl to encrypt the admin password, but the recent changes around this code forgot to strip the newline off the read from stdout * DHSimple's decrypt needs to append \n when writing to stdin * need to strip newline from openssl stdout data * merge with trey * work on * merge trunk * moved auto assign floating ip functionality from compute manager to network manager * create a mac address entry and blindly use the first network * create a mac address entry and blindly use the first network * create a mac address entry and blindly use the first network * need to return the ref * Added filtering on image properties * Fixes a bug related to incorrect reparsing of flags and prevents many extra reparses * no use mac * comment out the direct cloud case * make fake\_flags set defaults instead of runtime values * add a test from vish and fix the issues * Properly reparse flags when adding dynamic flags * no use mac * instances don't have mac's anymore and address is now plural * let the fake driver accept the network info * Comment out the 2 tests that require the instance to contain mac/ip * initial use of limited\_by\_marker * more fix up * many tests pass now * its a dict, not a class * we don't get the network in a tuples anymore * specified image\_id keyword in exception arg * When adding a keypair with ec2 API that already exists, give a friendly error and no traceback in nova-api * added imageid string to exception, per peer review * Fixes some minor doc issues - misspelled flags in zones doc and also adds zones doc to an index for easier findability * removed most of debugging code * Fixing docstring * Synchronise with Diablo development * make \_make\_fixture respect name passed in * zone1 merge * sending calls * accepting calls * Fixing \_get\_kernel\_ramdisk\_from\_image to use the correct image service * Fixing year of copyright * merge * select partially going through * merge from trunk * make image\_ref and image\_id usage more consistant, eliminate redundancy in compute\_api.create() call * take out irrelevant TODO * blah * uhhh yea * local tweaks * getting closer to working select call * swap should use device 1 and rescue use device 2 * merged from trunk * fix tests, have glance plugin return json encoded string of vdi uuids * make sure to get a results, not the query * merged from trunk * Removing code duplication between parse\_image\_ref and get\_image service. Made parse\_image\_ref private * Changed ec2 api dupe key exception log handler info->debug * Added test case for attempting to create a duplicate keypair * Removing debug print line * Renaming service\_image\_id vars to image\_id to reduce confusion. Also some minor cleanup * cleanup and fixes * got rid of print statement * initial fudging in of swap disk * make the test\_servers pass by removing the address tests for 1.1, bug filed * port the current create\_networks over to the new network scheme * need to have the complete table def since sqlalchemy/sqlite won't reload the model * must have the class defined before referencing it * make the migration run with tests * get rid of all mention of drivers ... it's filter only now * merge trunk * Fixes euca-attach-volume for iscsi using Xenserver * fix typo * merge branch lp:~rackspace-titan/nova/ram-limits * Added test * Fixes missing space * Fixed mistyped line * Rebased to trunk rev 1101 * merge from trunk * moved utils functions into nova/image/ * Trunk merge * Fix bug #744150 by starting nova-api on an unused port * Removing utils.is\_int() * Added myself to Authors * When adding a keypair that already exists, give a friendly error and no traceback in nova-api * --dhcp-lease-max=150 by default. This prevents >150 instances in one network * Minor cleanup * No reason to modify the way file names are generated for kernel and ramdisk, since the kernel\_id and ramdisk\_id is still guaranteed to be ints * found a typo in the xenserver glance plugin that doesn't work with glance trunk. Also modified the image url to fetch from /v1/image/X instead of /image/X as that returned a 300 * fixing glance plugin bug and setting the plugin to use /v1 of the glance api * merge trunk * move init start position to 96 to allow openvswitch time to fully start * Include data files for public key tests in the tarball * minor cleanup * Makes sure vlan creation locks so we don't race and fail to create a vlan * merging trunk * Include data files for public key tests in the tarball * Merged with trunk * renaming resource\_factory to create\_resource * combined the exception catching to eliminate duplication * synchronize vlan creation * print information about nova-manage project problems * merge from trunk * fix comments * make nwfilter mock more 'realistic' by having it remember which filters have been defined * fix pep8 issue * fixed silly issue with variable needing to be named 'id' for the url mapper, also caught new exception type where needed * This is the groundwork for the upcoming distributed scheduler changes. Nothing is actually wired up here, so it shouldn't break any existing code (and all tests pass) * Merging trunk * Get rid of old virt/images.py functions that are no longer needed. Checked for any loose calls to these functions and found none. All tests pass for me * Update OSAPI v1.1 extensions so that it supports RequestExtensions. ResponseExtensions were removed since the new RequestExtension covers both use cases. This branch also removes some of the odd serialization code in the RequestExtensionController that converted dictionary objects into webob objects. RequestExtension handlers should now always return proper webob objects * Addressing bug #785763. Usual default for maximum number of DHCP leases in dnsmasq is 150. This prevents instances to obtain IP addresses from DHCP in case we have more than 150 in our network. Adding myself to Authors * foo * syntax errors * temp fixes * added support for reserving certain network for certain project * Fixed some tests * merge with trunk * Added an EC2 API endpoint that'll allow import of public key. Prior, api only allowed generation of new keys * This fix ensures that kpartx -d is called in the event that tune2fs fails during key injection, as it does when trying to inject a key into a windows instance. Bug #760921 is a symptom of this issue, as if kpartx -d is not called then partitions remain mapped that prevent the underlying nbd from being reused * Add new flag 'max\_kernel\_ramdisk\_size' to specify a maximum size of kernel or ramdisk so we don't copy large files to dom0 and fill up /boot/guest * The XenAPI driver uses openssl as part of the nova-agent implementation to set the password for root. It uses a temporary file insecurely and unnecessarily. Change the code to write the password directly to stdin of the openssl process instead * The tools/\* directory is now included in pep8 runs. Added an opt-out system for excluding files/dirs from pep8 (using GLOBIGNORE) * fill out the absolute limit tests for limits v1.0 controller * add absolute limits support to 1.0 api as well * Merged with trunk * fixed pep8 issue * merge from trunk * Fail early if requested imageRef does not exist when creating a server * Separate out tests for when unfilter is called from iptables vs. nwfilter driver. Re: lp783705 * Moved back templates and fixed pep8 issue. Template move was due to breaking packaging with template moves. That will need to happen in a later merge * further refactoring of wsgi module; adding documentation and tests * don't give instance quota errors with negative values * Merged trunk and resolved horrible horrible conflicts * No reason to hash ramdisk\_id and kernel\_id. They are ints * temp * waldon's naming feedback * Fixing role names to match code * Merging trunk * updated the hypervisors and ec2 api to support receiving lists from pluralized mac\_addresses and fixed\_ips * fname should have been root\_fname * minor cleanup, plus had to merge because of diverged-branches issue * Minor cleanup * merge from trunk * Fix comments * Add a unitest to test EC2 snapshot APIs * Avoid wildcard import * Simple change to sort the list of controllers/methods before printing to make it easier to read * missed the new wsgi test file * removing controller/serializer code from wsgi.py; updating other code to use new modules * merge lp:nova * fixup absolute limits to latest 1.1 spec * refactoring wsgi to separate controller/serialization/deserialization logic; creating osapi-specific module * default to port 80 if it isnt in the href/uri * return dummy id per vishs suggestion * hackish patch to fix hrefs asking for their metadata in boot (this really shouldnt be in ec2 api?) * Sort list of controllers/methods before printing * use a manual 500 with error text instead of traceback for failure * log any exceptions that get thrown trying to retrieve metadata * skeleton of forwarding calls to child zones * fix typo in udev rule * merge trunk * libvirt fixes to use new image\_service stuff * On second thought, removing decorator * Adding FlagNotSet exception * Implements a basic mechanism for pushing notifications out to interested parties. The rationale for implementing notifications this way is that the responsibility for them shouldn't fall to Nova. As such, we simply will be pushing messages to a queue where another worker entirely can be written to push messages around to subscribers * Spacing changes * get real absolute limits in openstack api and verify absolute limit responses * Added missing xenhost plugin. This was causing warnings to pop up in the compute logs during periodic\_task runs. It must have not been bzr add'd when this code was merged * fixed bug with compute\_api not having actual image\_ref to use proper image service * Adding xenhost plugin * Merging trunk * Added missing xenhost plugin * Fix call to spawn\_n() instead. It expects a callable * fix pep8 issues * oops, took out commented out tests in integrated.test\_servers and made tests pass again * fixed api.openstack.test\_servers tests...again * fixed QuotaTestCases * fixed ComputeTestCase tests * made ImageControllerWithGlanceServiceTests pass * fixed test\_servers small tests as well * get integrated server\_tests passing * Removed all utils.import\_object(FLAGS.image\_service) and replaced with utils.get\_default\_image\_service() * MySQL database tables are using the MyISAM engine. Created migration script to change all current tables to InnoDB, updated version to 019 * MySQL database tables are using the MyISAM engine. Created migration script to change all current tables to InnoDB, updated version to 019 * Small cleanups * Moving into scheduler subdir and refactoring out common code * Moving tests into scheduler subdirectory * added is\_int function to utils * Pep8 fixes * made get\_image\_service calls in servers.py * use utils.get\_image\_service in compute\_api * updates to utils methods, initial usage in images.py * added util functions to get image service * Using import\_class to import filter\_host driver * Adding fill first cost function * add more statuses for ec2 image registration * Add --fixes * Add --fixes * Fixes the naming of the server\_management\_url in auth and tests * Merging in Sandy's changes adding Noop Cost Fn with tests * merged trunk * move migration 017 to 018 * merge ram-limits * Removed extra serialization metadata * Docstring cleanup and formatting (nova/network dir). Minor style fixes as well * pep8 * Fixes improper attribute naming around instance types that broke Resizes * merge ram-limits * support unlimited quotas in nova-manage and flags * fix test * Changed builder to match specs and added test * add migration for proper name * Update test case to ensure password gets set correctly * make token use typo that is in database. Also fix now -> utcnow and stop using . syntax for dealing with tokens * Added missing metadata join to instance\_get calls * Avoid using spawn\_n to fix LP784132 * add ram limits to instance quotas * Convert instance\_type\_ids in the instances table from strings to integers to enable joins with instance\_types. This in particular fixes a problem when using postgresql * Set password to one requested in API call * don't throw type errors on NoneType int conversions * Added network\_info into refresh\_security\_group\_rules That fixs https://bugs.launchpad.net/nova/+bug/773308 * Improved error notification in network create * Instead of using a temp file with openssl, just write directly to stdin * First cut at least cost scheduler * merge lp:nova * Implemented builder for absolute limits and updated tests * provision\_resource no longer returns value * provision working correctly now * Re-pull changed notification branch * PEP8 fixes * adding --fixes lp:781429 * Fixed mistyped key, caused huge performance leak * Moved memcached connection in AuthManager to thread-local storage. Added caching of LDAP connection in thread-local storage. Optimized LDAP queries, added similar memcached support to LDAPDriver. Add "per-driver-request" caching of LDAP results. (should be per-api-request) * ugh, fixed again * tests fixed and pep8'ed * Update comment on RequestExtension class * failure conditions are being sent back properly now * Added opt-out system for excluding files/dirs from pep8 (using GLOBIGNORE) * MySQL database tables are using the MyISAM engine. Created migration script to change all current tables to InnoDB * MySQL database tables are using the MyISAM engine. Created migration script to change all current tables to InnoDB * fix for lp783705 - remove nwfilters when instance is terminated * basic call going through * Added missing metadata join to instance\_get calls * add logging to migration and fix migration version * Migrate quota schema from hardcoded columns to a key-value approach. The hope is that this change would make it easier to change the quota system without future schema changes. It also adds the concept of quotas that are unlimited * Conceded :-D * updated the mac\_address delete function to actually delete the rows, and update fixed\_ips * Added missing flavorRef and imageRef checks in the os api xml deserialization code along with tests * Fixed minor pylint errors * This branch splits out the IPv6 address generation into pluggable backends. A new flag named ipv6\_backend specifies which backend to use * Reduce indentation to avoid PEP8 failures * merge koelker migration changes * using mac\_address from fixed\_ip instead of instance * PEP8 cleanups * Use new 3-argument API * add a todo * style fixing * Removed obsolete method and test * renamed test cases in nova/tests/api/openstack/test\_servers.py to use a consistent naming convention as used in nova/tests/api/openstack/test\_images.py. also fixed a couple of pylint #C0103 errors in test\_servers.py * make the migration work like we expect it to * Fixed all pep8 errors in tools/install\_venv.py. All tests pass * Added the imageRef and flavorRef attributes in the xml deserialization * Add vnc\_keymap flag and enable setting keymap for vnc console * Review changes and merge from trunk * Pep8 cleaning * Added response about error in nova-manage project operations * Removed tools/clean\_vlans and tools/nova-debug from pep8 tests as they are shell scripts * Added lines to include tools/\* (except ajaxterm) in pep8 tests * Add a unit test for snapshot\_volume * Define image state during snapshotting. Name snapshot to the name provided, not generate * Unit test for snapshotting (creating custom image) * fixed a few C0103 errors in test\_servers.py * renamed test cases to use a consistent naming convention as used in nova/tests/api/openstack/test\_images.py * fix sys.argv requirement * first cut at weighted-sum tests * merge trunk * add udev rules and modified ovs\_configure\_vif\_flows.py to work with udev rules * Adds proper error handling for images that can't be found and a test for deregister image * added |fixed\_ip\_get\_all\_by\_mac\_address| and |mac\_address\_get\_by\_fixed\_ip| to db and sqlalchemy APIs * started on integrating HostFilter * Add support for rbd snapshots * Merging in trunk * I'm assuming that openstack doesnt work with python < 2.6 here (which I read somewhere on the wiki). This patch will check to make sure python >= 2.6 is installed, and also allow it to work with python 2.7 (and greater in the future) * merge lp:nova * XenAPI was not implemented to allow for multiple simultaneous XenAPI requests. A single XenAPIConnection (and thus XenAPISession) is used for all queries. XenAPISession's wait\_for\_task method would set a self.loop = for looping calls to \_poll\_task until task completion. Subsequent (parallel) calls to wait\_for\_task for another query would overwrite this. XenAPISession.\_poll\_task was pulled into the XenAPISession.wait\_for\_task method to avoid having to store self.loop * pep8 fixes * Merged trunk * volume/driver: make unit test, test\_volume, pass * Make set\_admin\_password non-blocking to API * Merged trunk * Review feedback * Lost a flag pulling from another branch. Whoops * Update the compute manager so that it breaks out of a loop if set\_admin\_password is not implemented by the driver. This avoids excessively logging NotImplementedError exceptions * Merging in Sandy's changes * Make host timeout configurable * Make set\_admin\_password non-blocking to API * volume/driver: implement basic snapshot * merge trunk * Update the compute manager so that it breaks out of a loop if set\_admin\_password is not implemented by the driver * Add init script and sysconfig file for openvswitch-nova * volume/driver: factor out lvm opration * Authors: add myself to Authers file * trunk merge * Adding zones doc into index of devref plus a bug fix for flag spellings * fixup based on Lorin's feedback * added flag lost in migration * merge trunk * pep8 * Adding basic tests for call\_zone\_method * fixed\_ip disassociate now also unsets mac\_address\_id * Make sure imports are in alphabetical order * updated previous calls referring to the flags to use the column from the networks table instead * merged from trunk * handle instance\_type\_ids that are NULL during upgrade to integers * fix for lp760921. Previously, if tune2fs failed, as it does on windows hosts, kpartx -d also failed to be called which leaves mapped partitions that retain holds on the nbd device. These holds cause the observed errors * if a LoopingCall has canceled the loop, break out early instead of sleeping any more than needed * Add a test for parallel builds. verified this test fails before this fix and succeeds after this fix * incorporated ImageNotFound instead of NotFound * merged from trunk * misc related network manager refactor and cleanup * changed NotFound exception to ImageNotFound * Update comment * Variable renaming * Add test suite for IPv6 address generation * Accept and ignore project\_id * Make it so that ExtensionRequest objects now return proper webob objects. This avoids the odd serialization code in the RequestExtensionController class which converts JSON dicts to webobs for us * merged from trunk * Remove ResponseExtensions. The new RequestExtension covers both use cases * Initial work on request extensions * Added network\_info into refresh\_security\_group\_rules * fixed pep8 spacing issue * merge from trunk * rename quota column to 'hard\_limit' to make it simpler to avoid collisions with sql keyword 'limit' * Fix remote volume code * 1 Set default paths for nova.conf and api-paste.ini to /etc/nova/ 2 Changed countryName policy because https://bugs.launchpad.net/nova/+bug/724317 still affected * Implement IPv6 address generation that includes account identifier * messing around with the flow of create() and specs * Redundant line * changes per review * docstring cleanup, nova/network dir * make instance.instance\_type\_id an integer to support joins in postgres * merge from trunk and update .mailmap file * Merged trunk * Updated MANIFEST for template move * NoValidHost exception test * Fixes an issue with conversion of images that was introduced by exception refactoring. This makes the exceptions when trying to locate an ec2 id clearer and also adds some tests for the conversion methods * oops fixed a docstring * Pep8 stuff * Bluprint URL: https://blueprints.launchpad.net/nova/+spec/improve-pylint-scores/ * start of zone\_aware\_scheduler test * Moved everything into notifier/api * make sure proper exceptions are raised for ec2 id conversion and add tests * better function name * Updated the value of the nova-manager libvirt\_type * more filter alignment * Removed commented out 'from nova import log as logging' line, per request from Brian Lamar * merge trunk * align filters on query * better pylint scores on imports * Code cleanup * Merged trunk * Abstract out IPv6 address generation to pluggable backends * Merged trunk * First cut with tests passing * changing Authors file * removed unused wild card imports, replaced sqlalchemy wildcard import with explicit imports * removed unused wild card imports, replaced sqlalchemy wildcard import with explicit imports * Fix for #780276 (run\_tests.sh fails test\_authors\_up\_to\_date when using git repo) * extracted xenserver capability reporting from dabo's dist-scheduler branch and added tests * migrate back updated\_at correctly * added in log\_notifier for easier debugging * Add priority based queues to notifications. Remove duplicate json encoding in notifier (rpc.cast does encoding... ) make no\_op\_notifier match rabbit one for signature on notify() * Bugfix #780784. KeyError when creating custom image * removed unused wild card imports, replaced sqlalchemy wildcard import with explicit imports * removed unused wild card imports, replaced sqlalchemy wildcard import with explicit imports * removed unused wild card imports, replaced sqlalchemy wildcard import with explicit imports * Better tests * Add example * give a more informative message if pre-migration assertions fail * Whoops * fix migration bug * Pep8 * Test * remove stubbing of XenAPISession.wait\_for\_task for xenapi tests as it doesn't need to be faked. Also removed duplicate code that stubbed xenapi\_conn.\_parse\_xmlrpc\_value * migration bug fixes * Change xenapi's wait\_for\_task to handle multiple simultaenous queries to fix lp:766404 * Added GitPython to [install\_dir]/tools/pip-requires * got rid of unnecessary imports * Enable RightAWS style signature checking using server\_string without port number, add test cases for authenticate() and a new helper routine, and fix lp753660 * Better message format description * unified underscore/dash issue * update tests to handle unlimited resources in the db * pep8 * capabilities flattened and tests fixed * Set root password upon XenServer instance creation * trunk merge * clean up unused functions from virt/images.py * Removing a rogue try/catch expecting a non-existant exception.TimeoutException that is never raised * basic test working * db: fix db versioning * fix mismerge by 1059 * volume/driver: implement basic snapshot/clone * volume/driver: factor out lvm opration * Host Filtering for Distributed Scheduler (done before weighing) * Rebased to trunk rev 1057 * Adds coverage-related packages to the tools/pip-requires to allows users to generate coverage reporting when running unit tests with virtulenv * merge from trunk * Set publish\_errors default to False * convert quota table to key-value * Simple fix for this issue. Tries to raise an exception passing in a variable that doesn't exist, which causes an error * Fixed duplicate function * Review feedback * Review feedback * Fixed method in flavors * Review feedback * Review feedback * Merged trunk * Set root password upon XenServer instance creation * Added Python packages needed for coverage reports to virtualenv packages * Added interface functions * merge from trunk * added test for show\_by\_name ImageNotFound exception * tests pass again * Sanitize get\_console\_output results. See bug #758054 * revised file docs * New author in town * Changes to allow a VM to boot from iso image. A blank HD is also attached with a size corresponding to the instance type * Added stub function for a referenced, previously non-existant function * Merged trunk * grabbed from dist-sched branch * Explicitly casted a str to a str to please pylint * Removed incorrect, unreachable code * spacing fix * pep8 fix * Improved error notification in network create * Add two whitespaces to conform PEP8 * Publish errors via nova.notifier * Added myself to Authors file * terminology: no more plug-ins or queries. They are host filters and drivers * Added interface function to ViewBilder * Added interfaces to server controller * added self to authors * fixed issue with non-existent variable being passed to ImageNotFound exception * removing rogue TimeoutException * merge prop fixes * Merged trunk * print statements removed * merge with trunk * flipped service\_state in ZoneManager and fixed tests * pep8 * not = * not = * and or test * and or test * merge from trunk * Removed extra newline after get\_console\_output in fake virt driver * Moved all reencoding to compute manager to satisfy both Direct API and internal cloud call * Merged with current trunk * added myself to Authors * Adding a test case to show the xml deserialization failure for imageRef and flavorRef * Fixes for nova-manage vpn list * json parser * Don't fail the test suite in the absence of VCS history * It's ok if there's no commit history. Otherwise the test suite in the tarball will fail * Merged trunk * flavor test * Fix indentation * tests and better driver loading * Add missed hyphen * Adding OSAPI v1.1 limits resource * Adding support for server rebuild to v1.0 and v1.1 of the Openstack API * reduce policy for countyname * looking for default flagfile * adding debug log message * merging trunk * merging trunk * removing class imports * Merged trunk * Merged trunk * Moved reencoding logic to compute manager and cloud EC2 API * ensure create image conforms to OS API 1.1 spec * merge updates from trunk * Added support in the nova openstack api for requests with local hrefs, e.g., "imageRef":"2" Previously, it only supported "imageRef":"http://foo.com/images/2". The 1.1 api spec defines both approaches * Add a flag to allow the user to specify a dnsmasq configuration file for nova-network to use when starting dnsmasq. Currently the command line option is set to "--config-fil=" with nothing specified. This branch will leave it as it is if the user does not specify a config file, but will utilize the specific file if they do * merged from trunk * implemented review suggestion EAFP style, and fixed test stub fake\_show needs to have image\_state = available or other tests will fail * got rid of extra whitespace * Update tools/pip-requires and tools/install\_venv.py for python2.7 support (works in ubuntu 11.04) * No need to test length of admin password in local href test * merging trunk; resolving conflicts; fixing issue with ApiError test failing since r1043 * Added support in osapi for requests with local hrefs, e.g., "imageRef":"2" * initial pass * Implement get\_host\_ip\_addr in the libvirt compute driver * merging trunk; resolving conflicts * Modified the instance status returned by the OS api to more accurately represent its power state * Fixed 2 lines to allow pep8 check to pass * Since run\_tests.sh utilizes nose to run its tests, the -x, --stop flag works correctly for halting tests on the first failed test. The usage information for run\_tests.sh now includes the --stop flag * add support for git checking and a default of failing if the history can't be read * ApiError 'code' arg set to None, and will only display a 'code' as part of the str if specified * Fixed: Check for use of IPv6 missing * removed unused method and fixed imports * Change the links in the sidebar on the docs pages * Use my\_ip for libvirt version of get\_host\_ip\_addr * fix typo in import * removed unused method and fixed imports * small changes in libvirt tests * place ipv6\_rules creation under if ip\_v6 section * Added checking ip\_v6 flag and test for it * merging trunk * adding view file * Expose AuthManager.list\_projects user filter to nova-manage * Final cleanup of nova/exceptions.py in my series of refactoring branches * Uses memcached to cache roles so that ldap is actually usable * added nova version to usage output of bin/nova-manage for easy identification of installed codebase * Changing links in sidebar to previous release * Rebased to trunk rev 1035 * converted 1/0 comparison in db to True/False for Postgres cast compatibility * Changed test\_cloud and fake virt driver to show out the fix * converted 1/0 comparison to True/False for Postgres compatibility * pep8 * fixed docstring per jsb * added version list command to nova-manage * Added more unit-test for multi-nic-nova libvirt * Sanitize get\_console\_output in libvirt\_conn * added nova version output to usage printout for nova-manage * Make the import of distutils.extra non-mandatory in setup.py. Just print a warning that i18n commands are not available.. * Correcting exception case * further cleanup of nova/exceptions.py * added eagerloading mac adddresses for instance * merge with trunk and resolve conflicts * Added myself to authors file * pep8 fixes * Refactoring usage of nova.exception.NotFound * Let nova-mange limit project list by user * merging trunk * Make the import of distutils.extra non-mandatory in setup.py. Just print a warning that i18n commands are not available.. * Updated run\_tests.sh usage info to reflect the --stop flag * Fixed formatting to align with PEP 8 * Modified instance status for shutoff power state in OS api * Refactoring the usage of nova.exception.Duplicate * Rebased to trunk rev 1030 * removed extra newline * merged from trunk * updated tests to reflect serverRef as href (per Ilya Alekseyev) and refactored \_build\_server from ViewBuilder (per Eldar Nugaev) * Add a test checking spawn() works when network\_info is set, which currently doesn't. The following patch would fix parameter mismatch calling \_create\_image() from spawn() in libvirt\_conn.py * removed unused imports and renamed template variables * pep8 * merging trunk * Renamed test\_virt.py to test\_libvirt.py as per suggestion * fixing bad merge * Merged trunk and fixed simple exception conflict * merging trunk * Refactoring nova.exception.Invalid usage * adding gettext to setup.py * Use runtime XML instead of VM creation time XML for createXML() call in order to ensure volumes are attached after RebootInstances as a workaround, and fix bug #747922 * Created new libvirt directory, moved libvirt\_conn.py to libvirt/connection.py, moved libvirt templates, broke out firewall and network utilities * Rebased to trunk rev 1027, and resolved a conflict in nova/virt/libvirt\_conn.py * Rebased to trunk rev 1027 * clarifies error when trying to add duplicate instance\_type names or flavorids via nova-manage instance\_type * merge trunk * Rework completed. Added test cases, changed helper method name, etc * pep8 * merge trunk, resolved conflict * merge trunk * Abstracted libvirt's lookupByName method into \_lookup\_by\_name * Provide option of auto assigning floating ip to each instance. Depend on auto\_assign\_floating\_ip boolean flag value. False by default * Fixes per review * Restore volume state on migration failure to fix lp742256 * Fixes cloudpipe to get the proper ip address * merging trunk * Fix bug with content-type and small OpenStack API actions refactor * merge with trunk * merge trunk * merged trunk * -Fixed indent for \_get\_ip\_version -Added LoopingCall to destroy as suggested by earlier bug report -Standardized all LoopingCall uses to include useful logging and better error handling * Create a dictionary of instance\_types before executing SQL updates in the instance\_type\_id migration (014). This should resolve a "cannot commit transaction - SQL statements in progress" error with some versions of sqlite * create network now takes bridge for flat networks * Adapt DescribeInstances to EC2 API spec * Change response of the EC2 API CreateVolume method to match the API docs for EC2 * Merged trunk and fixed api servers conflict * pep8 * Fixes and reworkings based on review * pep8 * Addressing exception.NotFound across the project * fix logging in reboot OpenStack API * eager loaded mac\_address attributes for mac address get functions * updated image builder and tests for OS API 1.1 compatibility (serverRef) * forgot import * change action= to actions= * typo * forgot to save * moved get\_network\_topic to network.api * style cleaning * Fixed network\_info creation in libvirt driver. Now creating same dict as in xenapi driver * Modified instance status for shutdown power state in OS api * rebase trunk * altered imports * commit to push for testing * Rebased to trunk rev 1015 * Utility method reworked, etc * Docstring cleanup and formatting (nova/image dir). Minor style fixes as well * Docstring cleanup and formatting (nova/db dir). Minor style fixes as well * Docstring cleanup and formatting (nova dir). Minor style fixes as well * use vpn filter in basic filtering so cloudpipe works with iptables driver * use simpler interfaces * Docstring cleanup and formatting (console). Minor style fixes as well * Docstring cleanup and formatting (compute). Minor style fixes as well * merge trunk * Add privateIpAddress and ipAddress to EC2 API DescribeInstances response * style fixing * Fix parameter mismatch calling \_create\_image() from spawn() in libvirt\_conn.py * Add a test checking spawn() works when network\_info is set, which currently doesn't. The following patch would fix it * put up and down in the right dir * Makes metadata correctly display kernel-id and ramdisk-id * pep8 cleaning * style fix * revert changes that doesn't affect the bug * in doesn't work properly on instance\_ref * Another small round of pylint clean-up * Added an option to run\_tests.sh so you can run just pep8. So now you can: ./run\_tests.sh --just-pep8 or ./run\_tests.sh -p * merge trunk * fix display of vpn instance id and add output rule so it can be tested from network host * Exit early if tests fail, before pep8 is run * more changes per review * fixes per review * docstring cleanup, nova/image dir * Docstring cleanup and formatting. Minor style fixes as well * cleanups per code review * docstring cleanup, nova dir * fixed indentation * docstring cleanup, console * docstring cleanup, nova/db dir * attempts to make the docstring rules clearer * fix typo * docstring cleanup compute manager * bugfix signature * refactor the way flows are deleted/reset * remove ambiguity in test * Pylinted nova-compute * Pylinted nova-manage * replaced regex to webob.Request.content\_type * fix after review: style, improving tests, replacing underscore * merge with trunk * fix Request.get\_content\_type * Reverted bad merge * Rebased to trunk rev 1005 * Removed no longer relevant comment * Removed TODO we don't need * Removed \_ and replaced with real variable name * instance type get approach changed. tests fixed * Merged trunk * trunk merged * fix: mark floating ip as auto assigned * Add to Authors * Change response format of CreateVolume to match EC2 * revamped spacing per Rick Harris suggestion. Added exact error to nova-manage output * only apply ipv6 if the data exists in xenstore * Create a dictionary of instance\_types before executing SQL updates in the instance\_type\_id migration (014). This should resolve a "cannot commit transaction - SQL statements in progress" error with some versions of sqlite * add support for git checking and a default of failing if the history can't be read * strip output, str() link local * merging lp:~rackspace-titan/nova/exceptions-refactor-invalid * Round 1 of pylint cleanup * Review feedback * Implement quotas for the new v1.1 server metadata controller * fix doc typo * fix logging in reboot OpenStack API * make geninter.sh use the right tmpl file * pep8 fix * refactoring usage of exception.Duplicate errors * rename all versions of image\_ec2\_id * Abstracted lookupByName calls to \_lookup\_by\_name for centralized error handling * actually use the ec2\_id * remove typo * merging lp:~rackspace-titan/nova/exceptions-refactor-invalid * Fixes cloudpipe to get the proper ip address * add include file for doc interfaces * add instructions for setting up interfaces * Merged trunk and fixed small comment * Fixed info messages * Tweak to destroy loop logic * Pretty critical spelling error * Removed extra calls in exception handling and standardized the way LoopingCalls are done * one last i18n string * Merged trunk * multi-line string spacing * removing rogue print * moving dynamic i18n to static * refractoring * Add support for cloning a Sheepdog volume * Add support for cloning a Sheepdog volume * Add support for creating a new volume from a existing snapshot with EC2 API * Add support for creating a new volume from a existing snapshot with EC2 API * Add support for creating a Sheepdog snapshot * Add support for creating a Sheepdog snapshot * Add support for creating a snapshot of a nova volume with euca-create-snapshot * Add support for creating a snapshot of a nova volume with euca-create-snapshot * trunk merged * Implement get\_host\_ip\_addr in the libvirt compute driver * Adding projectname username to the nova-manage project commands to fix a doc bug, plus some edits and elimination of a few doc todos * pep8 fixes * Remove zope.interface from the requires file since it is not used anywhere * use 'is not None' instead of '!= None' * Fix loggin in creation server in OpenStack API 1.0 * Support admin password when specified in server create requests * First round of pylint cleanup * merge lp:nova and resolve conflicts * Change '== None' to 'is None' * remove zope.interface requires * use 'is not None' instead of '!= None' * pep8 fixes * Change '== None' to 'is None' * Fixes nova-manage image convert when the source directory is the same one that local image service uses * trunk merged * pep8 fixed * calc link local * not performing floating ip operation with auto allocated ips * it is rename not move * pep8 fix * Rebased to trunk rev 995 * Rebased to trunk rev 995 * merge trunk * add fault as response * Fix logging in openstack api * Fix logging in openstack api * Fix logging in openstack api * trunk merged. conflict resolved * trunk merged. conflict resolved * The change to utils.execute's call style missed this call somehow, this should get libvirt snapshots working again * Fix parameter mismatch calling to\_xml() from spawn() in libvirt\_conn.py * move name into main metadata instead of properties * change libvirt snapshot to new style execute * Add additional logging for WSGI and OpenStack API authentication * Rename the id * Added period to docstring for metadata test * Merged trunk * Empty commit to hopefully regenerate launchpad diff * Explicitly tell a user that they need to authenticate against a version root * Merged trunk * merging trunk * adding documentation & error handling * correcting tests; pep8 * Removed the unused self.interfaces\_xml variable * Only poll for instance states that compute should care about * Diablo versioning * Diablo versioning * Rebased to trunk rev 989 * Rebased to trunk rev 989 2011.2 ------ * Final versioning for Cactus * initial roundup of all 'exception.Invalid' cases * merge trunk * set the bridge on each OvsFlow * merge with trunk * bugfix * bugfix * Fix parameter mismatch calling to\_xml() from spawn() in libvirt\_conn.py * add kvm-pause and kvm-suspend 2011.2rc1 --------- * Rework GlanceImageService.\_translate\_base() to not call BaseImageService.\_translate\_base() otherwise the wrong class attributes are used in properties construction.. * Updated following to RIck's comments * Rebased to trunk rev 987 * Rework GlanceImageService.\_translate\_base() to not call BaseImageService.\_translate\_base() otherwise the wrong class attributes are used in properties construction.. * Try to be nicer to the DB when destroying a libvirt instance * pep8 * merge trunk * fixed error message i18n-ization. added test * Don't hammer on the DB * Debug code clean up * Rebased to trunk rev 986 * An ultimate workaround workd... :( * Zero out volumes during deletion to prevent data leaking between users * Minor formatting cleanup * jesse@aire.local to mailmap * Changed pep8 command line option from --just-pep8 to --pep8 * re-add broken code * merge trunk * Final versioning * Updates the documentation on creating and using a cloudpipe image * iSCSI/KVM test completed * Minor fixes * Fix RBDDriver in volume manager. discover\_volume was raising exception. Modified local\_path as well * Fixes VMware Connection to inherit from ComputeDriver * Fixes s3.py to allow looking up images by name. Smoketests run unmodified again with this change! * move from try\_execute to \_execute * Make VMWare Connection inherit from ComputeDriver * add up and down .sh * fix show\_by\_name in s3.py and give a helpful error message if image lookup fails * remove extra newline * dots * Rebased to trunk rev 980 * Rework importing volume\_manager * Blushed up a little bit * Merged trunk * Only warn about rouge instances that compute should know about * Added some tests * Dangerous whitespace mistake! :) * Cleanup after prereq merge * Add new flag 'max\_kernel\_ramdisk\_size' to specify a maximum size of kernel or ramdisk so we don't copy large files to dom0 and fill up /boot/guest * Rebased to trunk rev 980 * Merged lp:~rackspace-titan/nova/server\_metadata\_quotas as a prereq * Merged trunk * Docstring cleanup and formatting. Minor style fixes as well * Updated to use setfacl instead of chown * Commit for merge of metadata\_quotas preq * merge trunk * Removed extra call from try/except * Reverted some superfluous changes to make MP more concise * Merged trunk * Reverted some superfluous changes to make MP more concise * Replace instance ref from compute.api.get\_all with one from instance\_get. This should ensure it gets fully populated with all the relevant attributes * Add a unit test for terminate\_instances * pep8 * Fix RBDDriver in volume manager. discover\_volume was raising exception. Modified local\_path as well * pep8 fixes * migaration and pep8 fixes * update documentation on cloudpipe * Makes genvpn path actually refer to genvpn.sh instead of geninter.sh * typo * Merged trunk * Updating the runnova information and fixing bug 753352 * merge trunk * network manager changes, compute changes, various other * Floating ips auto assignment * Sudo chown the vbd device to the nova user before streaming data to it. This resolves an issue where nova-compute required 'root' privs to successfully create nodes with connection\_type=xenapi * Minor blush ups * A minor blush up * A minor blush up * Remove unused self.interfaces\_xml * Rebased to trunk rev 977 * Rebase to trunk rev 937 * debug tree status checkpoint 2 * docstring cleanup, direct api, part of compute * bzr ignore the top level CA dir that is created when running 'run\_tests.sh -N' * fix reference to genvpn to point to the right shell script * Set default stateOrProvice to 'supplied' in openssl.cnf.tmpl * merge trunk * This branch fixes https://bugs.launchpad.net/bugs/751231 * Replace instance ref from compute.api.get\_all with one from instance\_get. This should ensure it gets fully populated with all the relevant attributes * When using libvirt, remove the persistent domain definition when we call destroy, so that behavior on destroy is as it was when we were using transient instances * Rebased to trunk rev 973 * Currently terminating an instance will hang in a loop, this allows for deletion of instances when using a libvirt backend. Also I couldn't help add a debug log where an exception is caught and ignored * merge trunk * resolved lazy\_match conflict between bin/nova-manage instance and instance\_type by moving instance subcommand under vm command. documented vm command in man page. removed unused instance\_id from vm list subcommand * Ooops - redefining the \_ variable seems like a \_really\_ bad idea * Handle the case when the machine is already SHUTOFF * Split logic on shutdown and undefine, so that even if the machine is already shutdown we will be able to proceed * Remove the XML definition when we destroy a machine * Rebased to trunk rev 971 * debug tree status checkpoint * Reabased to trunk rev 971 * Fixed log message gaffe * pylintage * typo - need to get nova-volumes working on this machine :-/ * dd needs a count to succeed, and remove unused/non-working special case for size 0 * There is a race condition when a VDI is mounted and the device node is created. Sometimes (depending on the configuration of the Linux distribution) nova loses the race and will try to open the block device before it has been created in /dev * zero out volumes on delete using dd * Added RST file on using Zones * Fixes euca-attach-volume for iscsi using Xenserver * pep8 * merge trunk * removes log command from nova-manage as it no longer worked in multi-log setup * Added error message to exception logging * Fixes bug which hangs nova-compute when terminating an instance when using libvirt backend * missing 'to' * Short circuit non-existant device during unit tests. It won't ever be created because of the stubs used during the unit tests * Added a patch for python eventlet, when using install\_venv.py (see FAQ # 1485) * fixed LOG level and log message phrase * merge prop tweaks 2 * Set default stateOrProvice to 'supplied' in openssl.cnf.tmpl * This branch fixes https://bugs.launchpad.net/nova/+bug/751242 * Ignore errors when deleting the default route in the ensure\_bridge function * bzr ignore the CA dir * merge prop tweaks * Import translations from Launchpad * added Zones doc * Update the describe\_image\_attribute and modify\_image\_attribute functions in the EC2 API so they use the top level 'is\_public' attribute of image objects. This brings these functions in line with the base image service * Import from lp:~nova-core/nova/translations * corrects incorrect openstack api responses for metadata (numeric/string conversion issue) and image format status (not uppercase) * Implement a mechanism to enforce a configurable quota limit for image metadata (properties) within the OS API image metadata controller * Update the describe\_image\_attribute and modify\_image\_attribute functions in the ec2 API so they use the top level 'is\_public' attribute of image objects. This brings these functions in line with the base image service * Ignore errors when deleting the default route in the ensure\_bridge function * merge trunk * removed log command from nova-manage. no longer applicable with multiple logfiles * merge trunk * reminde admins of --purge option * Fixes issues with describe instances due to improperly set metadata * Keep guest instances when libvirt host restarts * fix tests from moving access check into update and delete * Added support for listing addresses of a server in the openstack api. Now you can GET \* /servers/1/ips \* /servers/1/ips/public \* /servers/1/ips/private Supports v1.0 json and xml. Added corresponding tests * Log libvirt errcode on exception * This fixes how the metadata and addresses collections are serialized in xml responses * Fix to correct libvirt error code when the domain is not found * merged trunk * Removed commented-out old 'delete instance on SHUTOFF' code * Automatically add the metadata address to the network host. This allows guests to ARP for the address properly * merged trunk and resolved conflict * slight typo * clarified nova-manage instance\_type create error output on duplicate flavorid * This branch is a patch for fixing below issue. > Bug #746821: live\_migration failing due to network filter not found Link a bug report * fix pep8 violation * Update instances table to use instance\_type\_id instead of the old instance\_type column which represented the name (ex: m1.small) of an instance type * Drop extra 'None' arg from dict.get call * Some i18n fixes to instance\_types * Renamed computeFault back to cloudServersFault in an effort to maintain consistency with the 1.0 API spec. We can look into distinguishing the two in the next release. Held off for now to avoid potential regression * adds a timeout on session.login\_with\_password() * Drop unneeded Fkey on InstanceTypes.id * Bypass a potential security vulnerability by not setting shell=True in xenstore.py, using johannes.erdfelt's patch * Renamed computeFault to cloudServersFault * fixed the way ip6 address were retrieved/returned in \_get\_network\_info in nova/virt/xenapi/vmops * added -manage vm [list|live-migration] to man page * removed unused instance parameter from vm list ... as it is unused. added parameters to docstring for vm list * moved -manage instance list command to -manage vm list to avoid lazy match conflict with instance\_types * Simplify by always adding to loopback * Remove and from AllocateAddress response, and fix bug #751176 * remove unused code * better error message * Blush up a bit * Rebased to trunk rev 949 * pep8 * adds timeout to login\_with\_password * test provider fw rules at the virt/ipteables layer. lowercase protocol names in admin api to match what the firewall driver expects. add provider fw rule chain in iptables6 as well. fix a couple of small typos and copy-paste errors * fixed based on reviewer's comment - 1. erase unnecessary blank line, 2. adding LOG.debug * Rebased to trunk rev 949 * fixed based on reviewer's comment - 'locals() should be off from \_() * Make description of volume\_id more generic * add the tests * pep8 cleanup * ApiError code should default to None, and will only display a code if one exists. Prior was output an 'ApiError: ApiError: error message' string, which is confusing * ec2 api run\_instances checks for image status must be 'available'. Overhauled test\_run\_instances for working set of test assertions * if we delete the old route when we move it we don't need to check for exists * merged trunk * removed comment on API compliance * Added an option to run\_tests.sh so you can run just pep8. So now you can: ./run\_tests.sh --just-pep8 or ./run\_tests.sh -p * Add automatic metadata ip to network host on start. Also fix race where gw is readded twice * Controllers now inherit from nova.api.openstack.common.OpenstackController * Merged trunk * Support providing an XML namespace on the XML output from the OpenStack API * Merged with trunk, fixed up test that wasn't checking namespace * Added support for listing addresses of a server in the openstack api. Now you can GET \* /servers/1/ips \* /servers/1/ips/public \* /servers/1/ips/private Supports v1.0 json and xml. Added corresponding tests * check visibility on delete and update * YADU (Yet Another Docstring Update) * Make sure ca\_folder is created before chdir()ing into it * another syntax error * Use a more descriptive name for the flag to make it easier to understand the purpose * Added logging statements for generic WSGI and specific OpenStack API requests * syntax error * Incorprate johannes.erdfelt's patch * updated check\_vm\_record in test\_xenapi to check the gateway6 correctly * updated get\_network\_info in libvirt\_conn to correctly insert ip6s and gateway6 into the network info, also small style fixes * add docstrings * updated \_prepare\_injectables() to use info[gateway6] instead of looking inside the ip6 address dict for the gateway6 information * Enable RightAWS style signing on server\_string without port number portion * modified behavior of inject\_network\_info and reset\_network related to a vm\_ref not being passed in * Create ca\_folder if it does not already exist * Wait for device node to be created after mounting image VDI * Improved unit tests Fixed docstring formatting * Only create ca\_path directory if it does not already exist * Added bug reference * Only create ca\_path directory if it does not already exist * Make "setup.py install" much more thorough. It now installs tools/ into /usr/share/nova and makes sure api-paste.conf lands in /etc/nova rather than /etc * fixed based on reviwer's comment * return image create response as image dict * Add a patch for python eventlet, when using install\_venv.py (see FAQ # 1485) * Undo use of $ in chain name where not needed * Testing for iptables manager changes * Don't double-apply provider fw rules in NWFilter and Iptables. Don't create provider fw rules for each instance, use a chain and jump to it. Fix docstrings * typo * remove -None for user roles * pep8 * fallback to status if image\_state is not set * update and fix tests * unite the filtering done by glance client and s3 * Removing naughty semicolon * merged trunk * remove extraneous empty lines * move error handling down into get\_password function * refactor to handle invalid adminPass * fixed comment * merged trunk * add support for specifying adminPass for JSON only in openstack api 1.1 * add tests for adminPass on server create * Fix a giant batch of copypasta * Remove file leftover from conflict * adding support for OSAPI v1.1 limits resource * Moved 'name' from to , corrected and fixes bug # 750482 * This branch contains the fix for lp:749973. VNC is assumed that is default for all in libvirt which LXC does not support yet * Remove comments * Separate CA/ dir into code and state * removed blank lines for pep8 fix * pep8 fixed * Fixed the addresses and metadata collections in xml responses. Added corresponding tests * Dont configure vnc if we are using lxc * Help paste\_config\_file find the api config now that we moved it * Add bug reference * Move api-paste.ini into a nova/ subdir of etc/ * Add a find\_data\_files method to setup.py. Use it to get tools/ installed under /usr/(local/)/share/nova * Nits * Add missing underscore * fix bug lp751242 * fix bug lp751231 * Automatically create CA state dir, and make sure the CA scripts look for the templates in the right places * fix bug 746821 * Remove and from AllocateAddress response, and fix bug #751176 * Allow CA code and state to be separated, and make sure CA code gets installed by setup.py install * Rebased to trunk 942 * fix bug lp:682888 - DescribeImages has no unit tests * Correct variable name * correct test for numeric/string metadata value conversion * openstack api metadata responses must be strings * openstack api requires uppercase image format status responses * merge trunk * Refactor so that instances.instance\_type is now instances.instance\_type\_id * splitting test\_get\_nic\_for\_xml into two functions * Network injection check fixed in libvirt driver * merging trunk * fixing log message * working with network\_ref like with mapping * add test for NWFilterFirewall * Removed adminclient.py and added reference to the new nova-adminclient project in tools/pip-requires * Don't prefix adminPass with the first 4 chars of the instance name * Declares the flag for vncproxy\_topic in compute.api * Fixes bug 741246. Ed Leafe's inject\_file method for the agent plugin was mistakenly never committed after having to fix commits under wrong email address. vmops makes calls to this (previously) missing method * Attempt to circumvent errors in the API from improper/malformed responses from image service * fixes incorrect case of OpenStack API status response * Fixed network\_info creating * Moved 'name' property from to , corrected and fixes bug # 750482 * corrected capitalization of openstack api status and added tests * libvirt\_con log fix * Ensure no errors for improper responses from image service * merge trunk * Fixes error which occurs when no name is specified for an image * improving tests * network injection check fixed * Only define 'VIMMessagePlugin' class if suds can be loaded * Make euca-get-ajax-console work with Euca2ools 1.3 * Add bug reference * Use keyword arguments * add multi\_nic\_test * added preparing\_xml test * split up to\_xml to creation xml\_info and filling the template * use novalib for vif\_rules.py, fix OvsFlow class * extract execute methods to a library for reuse * Poller needs to check for BUILDING not NOSTATE now, since we're being more explict about what is going on * Add checking if the floating\_ip is allocated or not before appending to result array in DescribeAddresses * Added synchronize\_session parameter to a query in fixed\_ip\_disassociate\_all\_by\_timeout() and fix #735974 * Made the fix simpler * Add checking if the floating\_ip is allocated or not before appending to result array * Added updated\_at field to update statement according to Jay's comment * change bridge * Add euca2ools import * Rebased to trunk 930 * Rebased to trunk 726 * lots of updates to ovs scripts * Make euca-get-ajax-console work with Euca2ools 1.3 * merge trunk * Hopefully absolved us of the suds issue? * Removes excessive logging message in the event of a rabbitmq failure * Add a change password action to /servers in openstack api v1.1, and associated tests * Removal of instance\_set\_state from driver code, it shouldnt be there, but instead should be in the compute manager * Merged trunk * Don't include first 4 chars of instance name in adminPass * Friendlier error message if there are no compute nodes are available * merge lp:nova * Merged waldon * Adding explanation keyword to HTTPConflict * Merged waldon * makes sure s3 filtering works even without metadata set properly * Merged waldon * Didn't run my code. Syntax error :( * Now using the new power state instead of string * adding servers view mapping for BUILDING power state * removes excessive logging on rabbitmq failure * Review feedback * Friendlier error message if there are no compute nodes are available * Merged with Waldon * Better error handling for spawn and destroy in libvirt * pep8 * adding 'building' power state; testing for 409 from OSAPI when rebuild requested on server being rebuild * More friendly error message * need to support python2.4, so can't use uuid module * If the floating ip address is not allocated or is allocated to another project, then the user trying to associate the floating ip address to an instance should get a proper error message * Update state between delete and spawn * adding metadata support for v1.1 * Rebuild improvements * Limit image metadata to the configured metadata quota for a project * Add volume.API.remove\_from\_compute instead of compute.API.remove\_volume * Rebased to trunk rev 925 * Removed adminclient and referred to pypi nova\_adminclient module * fixed review comment for i18n string multiple replacement strings need to use dictionary format * fixed review comment for i18n string multiple replacement strings need to use dictionary format * Add obviously-missing method that prevents an Hyper-V compute node from even starting up * Avoid any hard dependencies in nova.virt.vmwareapi.vim * review cleanup * Handles situation where Connection.\_instances doesn't exist (ie. production) * localize NotImplementedError() * Change '"%s" % e' to 'e' * Fix for LP Bug #745152 * Merged waldon * adding initial v1.1 rebuild action support * Add ed leafe's code for the inject\_file agent plugin method that somehow got lost (fixes bug 741246). Update TimeoutError string for i18n * submitting a unit test for terminate\_instance * Update docstrings and spacing * fixed ordering and spacing * removed trailing whitespace * updated per code review, replaced NotFound with exception.NotFound * Merged Waldon's API code * remove all references to image\_type and change nova-manage upload to set container format more intelligently * Rough implementation of rebuild\_instance in compute manager * adding v1.0 support for rebuild; adding compute api rebuild support * Key type values in ec2\_api off of container format * Whoops * Handle in vim.py * Refixed unit test to check XML ns * Merged with trunk (after faults change to return correct content-type) * OpenStack API faults have been changed to now return the appropriated Content-Type header * More tests that were checking for no-namespace * Some tests actually tested for the lack of a namespace :-) * pep8 fixes * Avoid hard dependencies * Implement quotas for the new v1.1 server metadata controller. Modified the compute API so that metadata is a dict (not an array) to ensure we are using unique key values for metadata. This is isn't explicit in the SPECs but it is implied by the new v1.1 spec since PUT requests modify individual items * Add XML namespaces to the OpenStack API * Merged with trunk * Fixed mis-merge: OS API version still has to be v1.1 * Store socket\_info as a dictionary rather than an array * Merged with trunk * Added synchronize\_session parameter to a query in fixed\_ip\_disassociate\_all\_by\_timeout() and fix #735974 * Key was converted through str() even if None, resulting in "None" being added to authorized\_keys when no key was specified * queues properly reconnect if rabbitmq is restarted * Moving server update adminPass support to be v1.0-specific OS API servers update tests actually assert and pass now Enforcing server name being a string of length > 0 * Adding Content-Type code to openstack.api.versions.Versions wsgi.Application * Fixes metadata for ec2\_api to specify owner\_id so that it filters properly * Makes the image decryption code use the per-project private key to decrpyt uploaded images if use\_project\_ca is set. This allows the decryption code to work properly when we are using a different ca per project * exception -> Fault * Merged trunk * Do not push 'None' to authorized\_keys when no key is specified * Add missing method that prevent HyperV compute nodes from starting up * TopicAdapterConsumer uses a different callback model than TopicConsumer. This patch updates the console proxy to use this pattern * merge trunk * Uses the proc filesystem to check the volume size in volume smoketests so that it works with a very limited busybox image * merged trunk * The VNC Proxy is an OpenStack component that allows users of Nova to access their instances through a websocket enabled browser (like Google Chrome) * make sure that flag is there in compute api * fix localization for multiple replacement strings * fix doc to refer to nova-vncproxy * Support for volumes in the OpenStack API * Deepcopy the images, because the string formatting transforms them in-place * name, created\_at, updated\_at are required * Merged with trunk * "Incubator" is no more. Long live "contrib" * Rename MockImageService -> FakeImageService * Removed unused super\_verbose argument left over from previous code * Renamed incubator => contrib * Wipe out the bad docstring on get\_console\_pool\_info * use project key for decrypting images * Fix a docstring * Found a better (?) docstring from get\_console\_pool\_info * Change volume so that it returns attachments in the same format as is used for the attachment object * Removed commented-out EC2 code from volumes.py * adding unit tests for describe\_images * Fix unit test to reflect fact that instance is no longer deleted, just marked SHUTOFF * Narrowly focused bugfix - don't lose libvirt instances on host reboot or if they crash * fix for lp742650 * Added missing blank line at end of multiline docstring * pep8 fixes * Reverted extension loading tweaks * conversion of properties should set owner as owner\_id not owner * add nova-vncproxy to setup.py * clarify test * add line * incorporate feedback from termie * Make dnsmasq\_interface configurable * Stop nova-manage from reporting an error every time. Apparently except: catches sys.exit(0) * add comment * switch cast to a call * move functions around * move flags per termie's feedback * initial unit test for describe images * don't print the error message on sys.exit(0) * added blank lines in between functions & removed the test\_describe\_images (was meant for a diff bug lp682888) * Make Dnsmasq\_interface configurable * fix flag names * Now checking that exists at least one network marked injected (libvirt and xenapi) * This branch adds support for linux containers (LXC) to nova. It uses the libvirt LXC driver to start and stop the instance * use manager pattern for auth token proxy * Style fixes * style fix * Glance used to return None when a date field wasn't set, now it returns ''. Glance used to return dates in format "%Y-%m-%dT%H:%M:%S", now it returns "%Y-%m-%dT%H:%M:%S.%f" * Fix up docstring * Added content\_type to OSAPI faults * accidentally dropped a sentence * Added checks that exists at least one network marked inhected in libvirt and xenapi * Adds support for versioned requests on /images through the OpenStack API * Import order * Switch string concat style * adding xml test case * adding code to explicitly set the content-type in versions controller; updating test * Merged trunk * Added VLAN networking support for XenAPI * pep8 * adding server name validation to create method; adding tests * merge lp:nova * use informative error messages * adding more tests; making name checks more robust * merge trunk * Fix pep8 error * Tweaking docstrings just in case * Catch the error that mount might through a bit better * sorted pep8 errors that were introduced during previous fixes * merge trunk * make all openstack status uppercase * Add remove\_volume to compute API * Pass along the nbd flags although we dont support it just yet * cleaned up var name * made changes per code review: 1) removed import of image from objectstore 2) changed to comments instaed of triple quotes * Displays an error message to the user if an exception is raised. This is vital because if logfile is set, the exception shows up in the log and the user has no idea something went wrong * Yet more docstring fixes * More style changes * Merged with trunk * Multi-line comments should end in a blankline * add note per review * More fixes to keep the stylebot happy * Cleaned up images/fake.py, including move to Duplicate exception * Code cleanup to keep the termie-bot happy * displays an error message if a command fails, so that the user knows something went wrong * Fixes volume smoketests to work with ami-tty * address some of termie's recommendations * add period, test github * pep8 * osapi servers update tests actually assert now; enforcing server name being a string of length > 0; moving server update adminPass support to be v1.0-specific * Moving shared\_ip\_groups controller to APIRouterV10 Replacing all shared\_ip\_groups contoller code with HTTPNotImplemented Adding shared\_ip\_groups testing * fix docstrings * Merged trunk * Updated docstrings to satisfy * Updated docstrings to satisfy * merge trunk * merge trunk * minor fix and comment * style fixes * merging trunk * Made param descriptions sphinx compatible * Toss an \_\_init\_\_ in the test extensions dir. This gets it included in the tarball * pep8 * Fix up libvirt.xml.template * This fixes EC2 API so that it returns image displayName and description properly * merged from trunk * Moving backup\_schedule route out of base router to OS API v1.0 All controller methods return HTTPNotImplemented to prevent further confusion Correcting tests that referred to incorrect url * Fixed superfluous parentheses around locals() * Added image name and description mapping to ec2 api * use self.flags in virt test * Fixed DescribeUser in the ec2 admin client to return None instead of an empty UserInfo object * Remove now useless try/except block * Dont make the test fail * backup\_schedule tests corrected; controller moved to APIRouterV10; making controller fully HTTPNotImplemented * when image\_id provided cannot be found, returns more informative error message * Adds support for snapshotting (to a new image) in the libvirt code * merge lp:nova * More pep8 corrections * adding shared\_ip\_groups testing; replacing all shared\_ip\_groups contoller code with HTTPNotImplemented; moving shared\_ip\_groups controller to APIRouterV10 * Merged trunk * pep8 whitespace * Add more unit tests for lxc * Decided to not break old format so this should work with the way Glance used to work and the way glace works now..The best of both worlds? * update glance params per review * add snapshot support for libvirt * HACKING update for docstrings * merge trunk * Fix libvirt merge mistake * lock down requirements for change password * merge trunk * Changed TopicConsumer to TopicAdapterConsumer in bin/nova-ajax-console-proxy to allow it to start up once again * style changes * Removed iso8601 dep from pip-requires * Merged trunk * Removed extra dependency as per suggestion, although it fixes the issue much better IMO, we should be safe sticking with using the format from python's isoformat() * Assume that if we don't find a VM for an instance in the DB, and the DB state is NOSTATE, that the db instance is in the process of being spawned, and don't mark it SHUTOFF * merge with trunk * Added MUCH more flexiable iso8601 parser dep for added stability * Fix formatting of TODO and NOTE - should be a space after the # * merge lp:nova * Mixins for tests confuse pylint no end, and aren't necessary... you can stop the base-class from being run as a test by prefixing the class name with an underscore * Merged the two periodic\_tasks functions, that snuck in due to parallel merges in compute.manager * Start up nova-api service on an unused port if 0 is specified. Fixes bug 744150 * Removed 'is not None' to do more general truth-checking. Added rather verbose testing * Merged with trunk * merge trunk * merge trunk, fixed conflicts * TopicConsumer -> TopicAdapterConsumer * Fix typo in libvirt xml template * Spell "warn" correctly * Updated Authors file * Removed extraneous white space * Add friendlier message if an extension fails to include a correctly named class or factory * addressed reviewers' concerns * addressed termies review (third round) * addressed termie's review (second round) * Do not load extensions that start with a "\_" * addressed termies review (first round) * Clarified note about scope of the \_poll\_instance\_states function * Fixed some format strings * pep8 fixes * Assume that if we don't find a VM for an instance in the DB, and the DB state is NOSTATE, that the db instance is in the process of being spawned * pep8 fixes * Added poll\_rescued\_instances to virt driver base class * There were two periodic\_tasks functions, due to parallel merges in compute.manager * pep8 fixes * Bunch of style fixes * Fix utils checking * use\_ipv6 now passing to interfaces.template as first level variable in libvirt\_conn * Replaced import of an object with module import as per suggestion * Updates to the newest version of nova.sh, which includes:  \* Installing new python dependencies  \* Allows for use of interfaces other than eth0  \* Adds a run\_detached mode for automated testing * Now that it's an extension, it has to be v1.1. Also fixed up all the things that changed in v1.1 * merge trunk addressing Trey's comments * Initial extensification of volumes * Merged with trunk, resolved conflicts & code-flicts * Removed print * added a simple test for describe\_images with mock for detail funciton * merged trunk * merge trunk * merge lp:nova * Adding links container to openstack api v1.1 servers entities * Merged trunk * Add license and copyright to nova/tests/api/openstack/extensions/\_\_init\_\_.py * Fixed a typo on line 677 where there was no space between % and FLAGS * fix typos * updated nova.sh * Added a flag to allow a user to specify a dnsmasq\_config\_file is they would like to fine tune the dnsmasq settings * disk\_format is now an ImageService property. Adds tests to prevent regression * Merged trunk * Merged trunk * merging trunk * merge trunk * Merged trunk and fixed broken/conflicted tests * - add a "links" container to versions entities for Openstack API v1.1 - add testing for the openstack api versions resource and create a view builder * merging trunk * This is basic network injection for XenServer, and includes: * merging trunk * Implement image metadata controller for the v1.1 OS API * merging trunk * Changed use\_ipv6 passing to interfaces.template * merging trunk, resolving conflicts * Add a "links" container to flavors entities for Openstack API v1.1 * Toss an \_\_init\_\_ in the test extensions dir. This gets it included in the tarball * Use metadata = image.get('properties', {}) * merge trunk * Revert dom check * merge trunk * Fix unit tests w/ latest trunk merge * merging trunk and resolving conflicts * Fix up destroy container * Fix up templating * Implement metadata resource for Openstack API v1.1. Includes: -GET /servers/id/meta -POST /servers/id/meta -GET /servers/id/meta/key -PUT /servers/id/meta/key -DELETE /servers/id/meta/key * Dont always assume qemu * Removed partition from setup\_container * pep8 fix * disk\_format is now an ImageService property * Restore volume state on migration failure * merge trunk, add unit test * merge trunk * merge trunk addressing reviewer's comments * clarify comment * add documentation * Empty commit? * minor pep8 fix in db/fakes.py * Support for markers for pagination as defined in the 1.1 spec * add hook for osapi * merge trunk * Ports the Tornado version of an S3 server to eventlet and wsgi, first step in deprecating the twistd-based objectstore * Merged with trunk Updated net injection for xenapi reflecting recent changes for libvirt * Fix lp741415 by splitting arguments of \_execute in the iSCSI driver * make everything work with trunk again * Support for markers for pagination as defined in the 1.1 spec * add descriptive docstring * don't require integrated tests to recycle connections * remove twisted objectstore * port the objectstore tests to the new tests * update test base class to monkey patch wsgi * rename objectstore tests * port s3server to eventlet/wsgi * add s3server, pre-modifications * merge trunk * Added detail keywork and i18n as per suggestions * incorporate feedback from termie * Implementation of blueprint hypervisor-vmware-vsphere-support. (Link to blueprint: https://blueprints.launchpad.net/nova/+spec/hypervisor-vmware-vsphere-support) * fix typo * Addressing Trey's comments. Removed disk\_get\_injectables, using \_get\_network\_info's return value * Adds serverId to OpenStack API image detail per related\_image blueprint * Fix for bug #740947 Executing parted with sudo in \_write\_partition (vm\_utils.py) * Implement API extensions for the Openstack API. Based on the Openstack 1.1 API the following types of extensions are supported: * Merging trunk * Adds unit test coverage for XenAPI Rescue & Unrescue * libvirt driver multi\_nic support. In this phase libvirt can work with and without multi\_nic support, as in multi\_nic support for xenapi: https://code.launchpad.net/~tr3buchet/nova/xs\_multi\_nic/+merge/53458 * Merging trunk * Review feedback * Merged trunk * Additions to the Direct API: * Merged trunk * Added test\_get\_servers\_with\_bad\_limit, test\_get\_servers\_with\_bad\_offset and test\_get\_servers\_with\_bad\_marker * pep8 cleanups * Added test\_get\_servers\_with\_limit\_and\_marker to test pagination with marker and limit request params * style and spacing fixed * better error handling and serialization * add some more docs and make it more obvious which parts are examples * add an example of a versioned api * add some more docs to direct.py * add Limited, an API limiting/versioning wrapper * improve the formatting of the stack tool * support volume and network in the direct api * Merged with trunk, fix problem with behaviour of (fake) virt driver when instance doesn't reach scheduling * In this branch we are forwarding incoming requests to child zones when the requested resource is not found in the current zone * trunk merge * Fixes a bug that was causing tests to fail on OS X by ensuring that greenthread sleep is called during retry loops * Merged trunk * Fix some errors that pylint found in nova/api/openstack/servers.py * Fix api logging to show proper path and controller:action * Merged trunk * Pylint 'Undefined variable' E0602 error fixes * Made service\_get\_all()'s disabled parameter default to None. Pass False for enabled services; True for disabled services. Calls to this method have been updated to remain consistent * Merged with trunk * Reconcile tests with latest trunk merges * Merged trunk and resolved conflict in nova/db/sqlalchemy/api.py * Don't try to parse the empty string as a datetime * change names for consistency with existing db api * Merged with trunk * Forgot one set of flags * Paginated results should not include the item starting at marker. Improved implementation of common.limited\_by\_marker as suggested by Matt Dietz. Added flag osapi\_max\_limit * Detect if user is running the default Lucid version of libvirt, and give a nicer error message * Updated to use new APIRouterV11 class in tests * Fix lp741514 by declaring libvirt\_type in nova-manage * Docstring fixes * get image metadata tests working after the datetime interface change in image services * adding versioned controllers * Addressed issues raised by Rick Harris' review * Stubbing out utils.execute for migrate tests * Aggregates capabilities from Compute, Network, Volume to the ZoneManager in Scheduler * merged trunk r864 * removing old Versions application and correcting fakes to use new controller * Renamed \_\_image and \_\_compute to better describe their purposes. Use os.path.join to create href as per suggestion. Added base get\_builder as per pychecker suggestion * merging trunk r864 * trunk merged. conflicts resolved * Merged trunk * merge trunk * merge trunk * Small refactor * Merged trunk and fixed tests * Couple of pep8 fixes * pep8 clearing * making servers.generate\_href more robust * merging trunk r863 * Fixes lp740322: cannot run test\_localization in isolation * couple of bugs fixed * Merged trunk * Dont use popen in dettaching the lxc loop * Fix up formatting of libvirt.xml.template * trunk merge * fix based on sirp's comments * Grrr... because we're not recycling the API yet, we have to configure flags the first time it's called * merge trunk * Fake out network service as well, otherwise we can't terminate the instance in test\_servers now that we've started a compute service * merge trunk * Sorted out a problem occurred with units tests for VM migration * pep8 fixes * Test for attach / detach (and associated fixes) * Pass a fake timing source to live\_migration\_pre in every test that expectes it to fail, shaving off a whole minute of test run time * merge trunk * Poll instance states periodically, so that we can detect when something changes 'behind the scenes' * Merged with conflict and resolved conflict (with my own patch, no less) * Added simple nova volume tests * Created simple test case for server creation, so that we can have something to attach to.. * Merged with trunk * Added volume\_attachments * Declare libvirt\_type to avoid AttributeError in live\_migration * minor tweak from termie feedback * Added a mechanism for versioned controllers for openstack api versions 1.0/1.1. Create servers in the 1.1 api now supports imageRef/flavorRef instead of imageId/flavorId * Fixed the docstring for common.get\_id\_from\_href * better logging of exceptions * Merged trunk * Merged trunk * Fix issues with certificate updating & whitespace removal * Offers the ability to run a periodic\_task that sweeps through rescued instances older than 24 hours and forcibly unrescues them * Merged trunk * Added hyperv stub * Don't try to parse a datetime if it is the empty string (or None) * Remove a blank line * pep8 fix * Split arguments of \_execute in the iSCSI driver * merge trunk * Added revert\_resize to base class * Addressing Rick Clark's comments * Merged with lp:nova, fixed conflicts * boto\_v6 module is imported if the flag "use\_ipv6" is set to True * pep8 fixes, backported some important fixes that didn't make it over from my testing system :-( * Move all types of locking into utils.synchronize decorator * Doh! Missed two places which were importing the old driver location * Review feedback * make missing noVNC error condition a bit more fool-proof * clean some pep8 issues * general cleanup, use whitelist for webserver security * Better method name * small fix * Added docstring * Updates the previously merged xs\_migration functionality to allow upsizing of the RAM and disk quotas for a XenServer instance * Fix lp735636 by standardizing the format of image timestamp properties as datetime objects * migration gateway\_v6 to network\_info * merge prop fixes * Should not call super \_\_init\_\_ twice in APIRouter * fix utils.execute retries for osx * Keep the fallback code - we may want to do better version checking in future * Give the user a nicer error message if they're using the Lucid libvirt * Only run periodic task when rescue\_timeout is greater than 0 * Fixed some typos * Forgot extraneous module import again * Merged trunk * Forgot extraneous module import * Automatically unrescue instances after a given timeout * trunk merge * indenting cleanup * fixing some dictionary get calls * Unit test cleanup * one more minor fix * Moving the migration yet again * xml template fixed * merge prop changes * pep8 fixed * trunk merged * added myself to authors file * Using super to call parent \_setup\_routes in APIRouter subclasses * Merged trunk * pep8 fix * Implement v1.1 image metadata * This branch contains the fix for bug #740929 It makes sure cidr\_v6 is not null before building the 'ip6s' key in the network info dictionary. This way utils.to\_global\_ipv6 does not fail because of cidr==None * review comments fixed * add changePassword action to os api v1.1 * Testing of XML and JSON for show(), and conformance to API spec for JSON * Fixed tests * Merged trunk * Removed some un-needed code, and started adding tests for show(), which I forgot\! * id -> instance\_id * Checking whether cidr\_v6 is not null before populating ipv6 key in network info map (VMOps.\_get\_network\_info) * Executing parted with sudo in \_write\_partition * We update update\_ra method to synchronize, in order to prevent crash when we request multiple instance at once * merged with trunk Updated xenapi network injection for IPv6 Updated unit tests * merge trunk * merge trunk * removed excess debug line * more progress * use the nova Server object * separating out components of vnc console * Earlier versions of the python libvirt binding had getVersion in the libvirt namespace, not on the connection object. Check both * Report the exception (happens when can't import libvirt) * Use subset\_dict * Removing dead code * Touching up comment * Merging trunk * Pep8 fixes * Adding tests for owned and non-existent images * More small cleanups * Fix for #740742 - format describe\_instance\_output correctly to prevent errors in dashboard * Cleaning up make\_image\_fixutres * Merged with lp:nova * Small cleanup of openstack/images.py * Fixed up the new location of driver.py * Fix for lp740742 - format describe\_instance\_output correctly to prevent errors in dashboard * Merged with lp:nova * Filtering images by user\_id now * Clarified my "Yuk" comment * Cleaned up comment about virsh domain.info() return format * Added space in between # and TODO in #TODO * Added note about the advantages of using a type vs using a set of global constants * Filled out the base-driver contract, so it's not a false-promise * Enable flat manager support for ipv6 * Adding a talk bubble to the nova.openstack.org site that points readers to the 2011.1 site and the docs.openstack.org site - similar to the swift.openstack.org site. I believe it helps people see more sites are available, plus they can get to the Bexar site if they want to. Going forward it'll be nice to use this talk bubble to point people to the trunk site from released sites * Correctly imports greenthread in libvirt\_conn.py. It is used by live\_migrate() * Forgot this in the rename of check\_instance -> check\_isinstance * Test the login behavior of the OpenStack API. Uncovered bug732866 * trunk merge * Renamed check\_instance -> check\_isinstance to make intent clearer * Fix some crypto strangeness (\n in file\_name field of certificates, wrong IMPL method for certificate\_update) * Added note agreeing with Brian Lamar that the namespace doesn't belong in wsgi * Fix to avoid db migration failure in virtualenv * Fixed up unit tests and direct api that was also calling \_serialize (naughty!) * Fix the describe\_vpns admin api call * pep8 and fixed up zone-list * Support setting the xmlns intelligently * get\_all cleanup * Refactored out \_safe\_translate code * Set XML namespace when returning XML * Fix for LP Bug #704300 * Fix a typo in the ec2 admin api * typo fix * Pep8 fix * Merging trunk * make executable * Adding BASE\_IMAGE\_ATTRS to ImageService * intermediate progress on vnc-nova integration. checking in to show vish * add in eventlet version of vnc proxy * Updating doc strings in accordance with PEP 257. Fixing order of imports in common.py * one more copyright fix * pep8 stupidness * Tweak * fixing copyright * tweak * tweak * Whoops * Changed default for disabled on service\_get\_all to None. Changed calls to service\_get\_all so that the results should still be as they previously were * Now using urlparse to parse a url to grab id out of it * Resolved conflicts * Fix * Remove unused global semaphore * Addressed reviewer's comments * pep8 fix * Apparantly a more common problem than first thought * Adding more docstrings. image\_id and instance\_type fields of an instance will always exist, so no reason to check if keys exist * Pass a fake timing source to test\_ensure\_filtering\_rules\_for\_instance\_timeout, shaving off 30 seconds of test run time * pep8 * Merged trunk * Add a test for leaked semaphores * Remove checks in \_cache\_image tests that were too implementation specific * adding view builder tests * Add correct bug fixing metadata * When updating or creating set 'delete = 0'. (thus reactivating a deleted row) Filter by 'deleted' on delete * merging trunk r843 * making Controller.\_get\_flavors is\_detail a keyword argument * merging trunk r843 * Fix locking problem in security group refresh code * merging trunk r843 * Add unit test and code updates to ensure that a PUT requests to create/update server metadata only contain a single key * Add call to unset all stubs * IptablesManager.semaphore is no more * Get rid of IptablesManager's explicit semaphore * Add --fixes lp: metadata * Convert \_cache\_image to use utils.synchronized decorator. Disable its test case, since I think it is no longer needed with the tests for synchronized * Make synchronized decorator not leak semaphores, at the expense of not being truly thread safe (but safe enough for Eventlet style green threads) * merge trunk * Wrap update\_ra in utils.synchronized * Make synchronized support both external (file based) locks as well as internal (semaphore based) locks. Attempt to make it native thread safe at the expense of never cleaning up semaphores * merge with trunk * vpn changes * added zone routing flag test * routing test coverage * routing test coverage * xenapi support for multi\_nic. This is a phase of multi\_nic which allows xenapi to work as is and with multi\_nic. The other virt driver(s) need to be updated with the same support * better comments. First redirect test * better comments. First redirect test * Remove \_get\_vm\_opaque\_ref() calls in rescue/unrescue * Remove dupe'd code * Wrap update\_dhcp in utils.synchronized * if fingerprint data not provided, added logic to calculate it using the pub key * get rid of another datetime alias * import greenthread in libvirt * merge lp:nova * make bcwaldon happy * fix licenses * added licenses * wrap and log errors getting image ids from local image store * merge lp:nova * merging trunk * Fix for LP Bug #739641 * pep8; various fixes * Provide more useful exception messages when unable to load the virtual driver * Added Gabe to Authors file. He helped code this up too * Added XenAPI rescue unit tests * added an enumerate to track device in vmops.create\_vifs() * pep8 * Openstack api 1.0 flavors resource now implemented to match the spec * more robust extraction of arguments * Updated comment per the extension naming convention we actually use * Added copyright header * Fix pep8 issues in nova/api/openstack/extensions.py * Fix limit unit tests (reconciles w/ trunk changes) * Changed fixed\_range (CIDR) to be required in the nova-manage command; changed default num\_networks to 1 * merging trunk r837 * zones3 and trunk merge * Added space * trunk merge * remove scheduler.api.API. naming changes * Changed error to TypeError so that we get the arguments list * Added my name to Authors Added I18n for network create string * merge with trunk * merge trunk * merge trunk * merge trunk * Add bug metadata * Wrap update\_dhcp in utils.synchronized * fixes nova-manage instance\_type compatibility with postgres db * Tell PyLint not to complain about the "\_" function * Make smoketests' exit code reveal whether they were succesful * pep8 * Added run\_instances method to the connection.py of the contrib/boto\_v6/ec2 which would return ReservationV6 object instead of Reservation in order to access attribute dns\_name\_v6 of an instance * cleanup another inconsistent use of 1 for True in nova-manage * Changed Copyright to NTT for newly added files for flatmanager ipv6 * merge trunk * \* committing ovs scripts * fix nova-manage instance\_type list for postgres compatibility * fixed migration instance\_types migration to support postgres correctly * comment more descriptive * Seriously? * Fixed netadmin smoketests for ipv6 * Merged trunk * Better errors when virt driver isn't loaded * merge lp:nova * fix date formatting in images controller show * huh * fix ups * merge trunk * uses True/False instead of 1/0 for Postgres compatibility * cleaned up tests stubs that were accidentally checked in * works again. woo hoo * created api endpoint to allow uploading of public key * api decorator * Cleanup of FakeAuthManager * Replaced all pylint "disable-msg=" with "disable=" and "enable-msg=" with "enable=" * Change cloud.id\_to\_ec2\_id to ec2utils.id\_to\_ec2\_id. Fixes EC2 API error handling when invalid instances and volume names are specified * A few more single-letter variable names bite the dust * Re-implementation (or just implementation in many cases) of Limits in the OpenStack API. Limits is now available through /limits and the concept of a limit has been extended to include arbitrary regex / http verb combinations along with correct XML/JSON serialization. Tests included * Avoid single-letter variable names * auth\_data is a list now (thanks Rick!) * merge with trunk * Mark instance metadata as deleted when we delete the instance * results * fixed up novaclient usage to include managers * Added test case * Minor fixes to replace occurances of "VI" by "VIM" in 2 comments * whoopsy2 * whoopsy * Fixed 'Undefined variable' errors generated by pylint (E0602) * Merged trunk * Change cloud.id\_to\_ec2\_id to ec2utils.id\_to\_ec2\_id. Fixes EC2 API error handling when invalid instances and volume names are specified * enable-msg -> enable * disable-msg -> disable * enable\_zone\_routing flag * PEP-8 * Make flag parsing work again * Using eventlets greenthreads for optimized image processing. Fixed minor issues and style related nits * Fixed issue arisen from recent feature update (utils.execute) * Make proxy.sh work with both openbsd and traditional variants of netcat * Query the size of the block device, not the size of the filesystem * merge trunk * Ensuring kernel/ramdisk files are always removed in case of failures * merge trunk * merge trunk * Implement metadata resource for Openstack API v1.1. Includes: -GET /servers/id/meta -POST /servers/id/meta -GET /servers/id/meta/key -PUT /servers/id/meta/key -DELETE /servers/id/meta/key * Make "ApiError" the default error code for ApiError instances, rather than "Unknown." * When changing the project manager, if the new manager is not yet a project member, be sure to make them be a project member * Make the rpc cast/call debug calls show what topic they are sending to. This aides in debuugging * Final touches and bug/pep8 fixes * Support for markers for pagination as defined in the 1.1 spec * Merged trunk * Become compatible with ironcamel and bcwaldon's implementations for standardness * pep8 * Merged dependant branch lp:~rackspace-titan/nova/openstack-api-versioned-controllers * Updated naming, removed some prints, and removed some invalid tests * adding servers container to openstack api v1.1 servers entities * decorator more generic now * Images now v1.1 supported...mostly * fixed up bzr mess * Fix for LP Bug #737240 * refactored out middleware, now it's a decorator on service.api * Fix for LP Bug #737240 * Add topic name to cast/call logs * Changing project manager should make sure that user is a project member * Invert some of the original logic and fix a typo * Make the smoketests pep8 compliant (they weren't when I started working on them..) * Update the Openstack API to handle case where personality is set but null in the request to create a server * Fix a couple of things that assume that libvirt == kvm/qemu * Made fixed\_range a required parameter for nova-manage network create. Changed default num\_networks to 1; 1000 seems large * Fix a number of place in the volume driver where the argv hadn't been fully split * fix for lp712982, and likely a variety of other dashboard error handling issues. This fix simply causes the default error code for ApiError to be 'ApiError' rather than 'Unknown', which makes dashboard handle the error gracefully, and makes euca error output slightly prettier * Fix mis-merge * pep8 is hard * syntax error * create vifs before inject network info to remove rxtx\_cap from network info (don't need to inject it) * Make utils.execute not overwrite std{in,out,err} args to Popen on retries. Make utils.execute reject unknown kwargs * merged trunk, merged qos, slight refactor regarding merges * - general approach for openstack api versioning - openstack api version now preserved in request context - added view builder classes to handle os api responses - added imageRef and flavorRef to os api v1.1 servers - modified addresses container structure in os api v1.1 servers * Pep8 * Test changes * pep8 * Adjust test cases * pep8 * merge * Mark instance metadata as deleted when we delete the instance * Backfix of bugfix of issue blocking creating servers with metadata * Better comment for fault. Improved readability of two small sections * Add support for network QoS (ratelimiting) for XenServer. Rate is pulled from the flavor (instance\_type) when constructing a vm * pep8 * I suck at merging * Now returns a 400 for a create server request with invalid hrefs for imageRef/flavorRef values. Also added tests * moving Versions app out of \_\_init\_\_.py into its own module; adding openstack versions tests; adding links to version entities * fixed code formatting nit * handle create and update requests, and update the base image service documentation to reflect the (defacto) behavior * Move the check for None personalities into the create method * Get the migration out * get api openstack test\_images working * merge trunk * Improved exception handling * better implementation of try..except..else * merging parent branch lp:~bcwaldon/nova/osapi-flavors-1\_1 * merging parent branch lp:~rackspace-titan/nova/openstack-api-version-split * iptables filter firewall changes merged * merged trunk * pep8 * adding serialization\_metadata to encode links on flavors * merge with libvirt\_multinic\_nova * pep8 * teach glance image server get to handle timestamps * merge trunk * merge trunk * fixes for NWFilterFirewall and net injection * moving code out of try/except that would never trigger NotFound * handle timestamps in glance service detail * fixed IpTablesFirewal * Fixes lp736343 - Incorrect mapping of instance type id to flavor id in Openstack API * Comparisons to None should not use == or != * Pep8 error, oddly specific to pep8 v0.5 < x > v0.6 * Remove unconditional raise, probably left over from debugging * Mapping the resize status * Mapping the resize status * Fixed pep8 violation * adding comments; removing returns from build\_extra; removing unnecessary backslash * refactor to simpler implementation * Foo * glance image service show testcases * oh come on * refactoring * Add tests and code to handle multiple ResponseExtension objects * Just use 'if foo' instead of 'if len(foo)'. It will fail as spectacularly if its not acting on a sequence anyways * bugfix * Remove unconditional raise, probably left over from debugging * No need to modify this test case function as well * refactored: network\_info creation extracted to method * Call \_create\_personality\_request\_dict within the personalities\_null test * Foo * more pep8 fixes * Switch back to 'is not None' for personality\_files check. (makes mark happy) * pep8 fixes * 1) Update few comments where whitespace is missing after '#' 2) Update document so that copy right notice doesn't appear in generated document 3) Now using self.flag(...) instead of setting the flags like FLAGS.vmwareapi\_username by direct assignment. 4) Added the missing double quote at the end a string in vim\_util.py * more pep8 fixes * Fix up tests * Replaced capability flags with List * Fix more pep8 errors * Remove me from mailmap * Fix up setup container * Merged trunk * Update the Openstack API to handle case where personality is set but null in the request to create a server * Make smoketests' exit code reveal whether they were succesful * merge with trunk. moved scheduler\_manager into manager. fixed tests * Set nbd to false when mounting the image * Fixed typo when I was trying to add test cases for lxc * Remove target\_partition for setup\_container but still hardcode because its needed when you inject the keys into the image * Remove nbd=FLAGS.use\_cow\_images for destroy container * Update mailmap * Fix a number of place in the volume driver where the argv hadn't been fully split * Fix pep8 errors * Update authors again * Improved exception handling: - catching appropriate errors (OSError, IOError, XenAPI.Failure) - reduced size of try blocks - moved exception handling code in separate method - verifing for appropriate exeception type in unit tests * get\_console\_output is not supported by lxc and libvirt * Update Authors and testsuite * Comparisons to None should not use == or != * Make error message match the check * Setting the api verion in the request in the auth middle is no longer needed. Also, common.get\_api\_version is no longer needed. As Eric Day noted, having versioned controllers will make that unnecessary * moving code out of try/except that would never trigger NotFound * Added mechanism for versioned controllers for openstack api versions 1.0/1.1. Create servers in the 1.1 api now supports imageRef/flavorRef instead of imageId/flavorId * fix up copyright * removed dead method * pep8 * pep8 * Remerge trunk * cleanup * added in network qos support for xenserver. Pull qos settings from flavor, use when creating instance * moved scheduler API check into db.api decorator * Add basic tests for lxc containers * Revert testsuite changes * MErge trunk * Fix a few of the more obvious non-errors while we're in here * hacks in place * Fix the errors that pylint was reporting on this file * foo * foo * commit before monster * Fix \_\_init\_\_ method on unit tests (they take a method\_name kwarg) * Don't warn about C0111 (No docstrings) * In order to disable the messages, we have to use disable, not disable-msg * Avoid mixins on image tests, keeping pylint much happier * Use \_ trick to hide base test class, thereby avoiding mixins and helping PyLint * hurr * hurr * get started testing * foo * Don't complain about the \_ function being used * Again * pep8 * converted new lines from CRLF to LF * adding bookmarks links to 1.1 flavor entities * Reverting * Log the use of utils.synchronized * expanding osapi flavors tests; rewriting flavors resource with view builders; adding 1.1 specific links to flavors resources * Dumb * Unit test update * Fix lp727225 by adding support for personality files to the openstack api * Changes * fixes bug 735298: start of nova-compute not possible because of wrong xml paths to the //host/cpu section in "virsh capabilities", used in nova/virt/libvirt\_conn.py * update image service documentation * merge lp:nova and resolve conflicts * User ids are strings, and are not necessarily == name. Also fix so that non-existent user gives a 404, not a 500 * Fudge * Keypairs are not required in the OpenStack API; don't require them! * Merging trunk * Add missing fallback chain for ipv6 * Typo fix * fixed pep8 issue * chchchchchanges * libvirt template and libvirt\_conn.spawn modified in way that was proposed for xenapi multinic support * Re-commit r805 * Re-commit r804 * Refactored ZoneRedirect into ZoneChildHelper so ZoneManager can use this too * Don't generate insecure passwords where it's easy to use urandom instead * merging openstack-api-version-split * chchchchchanges * chchchchchanges * Fixes euca-get-ajax-console returning Unknown Error, by using the correct exception in get\_open\_port() logic. Patch from Tushar Patil * chchchchchanges * Revert commit that modified CA/openssl.cnf.tmpl * Comment update * Derped again * Move mapper code into the \_action\_ext\_controllers and \_response\_ext\_controllers methods * The geebees * forgot to return network info - teehee * refactored, bugfixes * merge trunk * moving code out of try/except that would never trigger NotFound * merge trunk * Logging statements * added new class Instances for managaging instances added new method list in class Instances: * tweak * Stuff * Removing io\_util.py. We now use eventlets library instead * Some typos * \* Updated document vmware\_readme.rst to mention VLAN networking \* Corrected docstrings as per pep0257 recommentations. \* Stream-lined the comments. \* Updated code with locals() where ever applicable. \* VIM : It stands for VMware Virtual Infrastructure Methodology. We have used the terminology from VMware. we have added a question in FAQ inside vmware\_readme.rst in doc/source \* New fake db: vmwareapi fake module uses a different set of fields and hence the structures required are different. Ex: bridge : 'xenbr0' does not hold good for VMware environment and bridge : 'vmnic0' is used instead. Also return values varies, hence went for implementing separate fake db. \* Now using eventlet library instead and removed io\_utils.py from branch. \* Now using glance.client.Client instead of homegrown code to talk to Glance server to handle images. \* Corrected all mis-spelled function names and corresponding calls. Yeah, an auto-complete side-effect! * Implement top level extensions * Added i18n to error message * Checks locally before routing * Really fix testcase * More execvp fallout * Fix up testsuite for lxc * Error codes handled properly now * merge trunk * Adding unit test * Fix instance creation fail under use\_ipv6=false and FlatManager * pep8 clean * Fix a couple of things that assume that libvirt == kvm/qemu * Updating gateway\_v6 in \_on\_set\_network\_host() is not required for FlatManager * added correct path to cpu information (tested on a system with 1 installed cpu package) * Fix unknown exception error in euca-get-ajax-console * fixed pep8 errors (with version 0.5.0) * Use integer ids for (fake) users * req envirom param 'nova.api.openstack.version' should be 'api.version' * pep8 fixes * Fixed DescribeUser in ec2 admin client * openstack api 1.0 flavors resource now implemented; adding flavors request value testing * response working * Added tests back for RateLimitingMiddleware which now throw correctly serialized errors with correct error codes * Add ResponseExtensions * revised per code review * first pass openstack redirect working * Adding newlines for pep8 * Removed VIM specific stuff and changed copyright from 2010 to 2011 * Limits controller and testing with XML and JSON serialization * adding imageRef and flavorRef attributes to servers serialization metadata * Merged with trunk (and brian's previous fixes to fake auth) * Plugin * As suggested by Eric Day: \* changed request.environ version key to more descriptive 'api.version' \* removed python3 string formatting \* added licenses to headers on new files * Tweak * A few fixes * pep8 * merge lp:nova * ignore differently-named nodes in personality and metadata parsing * wrap errors getting image ids from local image store * Moving the migration again * Updating paste config * pep8 * internationalization * Per Eric Day's suggest, the verson is not store in the request environ instead of the nova.context * s/onset\_files/injected\_files/g * pep8 fixes * Add logging to lock check * Now that the fix for 732866, stop working around the bug * Major cosmetic changes to limits, but little-to-no functional changes. MUCH better testability now, no more relying on system time to tick by for limit testing * Merged with trunk to get fix for bug 732866 * Merged trunk * modifying paste config to support v1.1; adding v1.1 entry in versions resource ( GET /) * Fixed lp732866 by catching relevant \`exception.NotFound\` exception. Tests did not uncover this vulnerability due to "incorrect" FakeAuthManager. I say "incorrect" because potentially different implementations (LDAP or Database driven) of AuthManager might return different errors from \`get\_user\_from\_access\_key\` * refactor onset\_files quota checking * Code clean up. Removing \_decorate\_response methods. Replaced them with more explicit methods, \_build\_image, and \_build\_flavor * Use random.SystemRandom for easy secure randoms, configurable symbol set by default including mixed-case * merge lp:nova * Support testing the OpenStack API without key\_pairs * merge trunk * Fixed bugs in bug fix (plugin call) * adding missing view modules; modifying a couple of servers tests to use enumerate * just fixing a small typo in nova-manage vm live-migration * exception fixup * Make Authors check account for tests being run with different os.getcwd() depending on how they're run. Add missing people to Authors * Removed duplicated tests * PEP8 0.5.0 cleanup * Really delete the loop * Add comments about the destroy container function * Mount the right device * Merged trunk * Always put the ipv6 fallback in place. FLAGS.use\_ipv6 does not exist yet when the firewall driver is instantiated and the iptables manager takes care not to fiddle with ipv6 if not enabled * merged with trunk and removed conflicts * Merging trunk * Reapplied rename to another file * serverId returned as int per spec * Reapplied rename of Openstack -> OpenStack. Easier to do it by hand than to ask Bazaar to do it * Merged with trunk. Had to hold bazaar's hand as it got lost again * Derive unit test from standard nova.test.TestCase * pep8 fixes * adding flavors and images barebones view code; adding flavorRef and imageRef to v1.1 servers * Fixed problem with metadata creation (backported fix) * Clarify the logic in using 32 symbols * moving addresses views to new module; removing 'Data' from 'DataViewBuilder' * Don't generate insecure passwords where it's easy to use urandom instead * Added a views package and a views.servers module. For representing the response object before it is serialized * Make key\_pair optional with OpenStack API * Moved extended resource code into the extensions.py module * Moving fixtures to a factory * Refactor setup contianer/destroy container * Fixing API per spec, to get unit-tests to pass * Implements basic OpenStack API client, ready to support API tests * Fix capitalization of ApiError (it was mistakenly called APIError) * added migration to repo * Clarified message when a VM is not running but still in DB * Implemented Hyper-V list\_instances\_detail function. Needs a cleanup by someone that knows the Hyper-V code * So the first of those tests doesn't pass. Removing as it looks like it was meant to be deleted * Added test and fixed up code so that it works * Fix for LP Bug #704300 * fixed keyword arg error * pep8 * added structure to virt.xenapi.vmops to support network info being passed in * Removed duplicated test, renamed same-named (but non-identical) tests * merge trunk * PEP8 cleanup * Fixes other half of LP#733609 * Initial implementation of refresh instance states * Add missing fallback chain for ipv6 * The exception is called "ApiError", not "APIError" * Implement action extensions * Include cpuinfo.xml.template in tarball * Adding instance\_id as Glance image\_property * Add fixes metadata * Include cpuinfo.xml.template in tarball * Merged test\_network.py properly. Before I had deleted this file and added again, but this file status should be modified when you see the merged difference * removed conflicts and merged with trunk * Create v1\_0 and v1\_1 packages for the openstack api. Added a servers module to each. Added tests to validate the structure of ip addresses for a 1.1 request * committing to share * small typo in nova-manage vm live-migration * NTT's live-migration branch, merged with trunk, conflicts resolved, and migrate file renamed * Reverted unmodified files * Reverted unmodified files * Only include kernel and ramdisk ID in meta-data output if they are actually set * Test fixes and some typos * Test changes * Migration moved again * Compute test * merge trunk * merge trunk * Make nova-dhcpbridge output lease information in dnsmasq's leasesfile format * Merged my doc changes with trunk * Fixed pep8 errors * Fixed failing tests in test\_xenapi * Fixes link to 2011.1 instad of just to trunk docs * fixes: 733137 * Add a unit test * Make utils.execute not overwrite std{in,out,err} args to Popen on retries. Make utils.execute reject unknown kwargs * Removed excess LOG.debug line * merge trunk * The extension name is constructed from the camel cased module\_name + 'Extension' * Merged with trunk * Fix instructions for setting up the initial database * Fix instructions for setting up the initial database * merged with latest trunk and removed unwanted files * Removed \_translate\_keys() functions since it is no longer used. Moved private top level functions to bottom of module * Use a consistent naming scheme for XenAPI variables * oops * Review feedback * Review feedback * Review feedback * Some unit tests * Change capitalization of Openstack to OpenStack * fixed conflicts after merging with trunk with 787 * Adding a sidebar element to the nova.openstack.org site to point people to additional versions of the site * oops * Review feedback * Replace raw SQL calls through session.execute() with SQLAlchemy code * Review feedback * Remove vish comment * Remove race condition when refreshing security groups and destroying instances at the same time * Removed EOL whitespace in accordance with PEP-8 * Beginning of cleanup of FakeAuthManager * Make the fallback value None instead of False * Indentation adjustment (cosmetical) * Fixed lp732866 by catching relevant \`exception.NotFound\` exception. Tests did not uncover this vulnerability due to "incorrect" FakeAuthManager. I say "incorrect" because potentially different implementations (LDAP or Database driven) of AuthManager might return different errors from \`get\_user\_from\_access\_key\` * Merged trunk * This change adds the ability to boot Windows and Linux instances in XenServer using different sets of vm-params * merge trunk * New migration * Passes net variable as value of keyword argument process\_input. Prior to the execvp patch, this was passed positionally * Changes the output of status in describe\_volumes from showing the user as the owner of the volume to showing the project as the owner * Added support for ips resource: /servers/1/ips Refactored implmentation of how the servers response model is generated * merge trunk * Adds in multi-tenant support to openstack api. Allows for multiple accounts (projects) with admin api for creating accounts & users * merge trunk * remerge trunk (again). fix issues caused by changes to deserialization calls on controllers * Add config for osapi\_extensions\_path. Update the ExtensionManager so that it loads extensions in the osapi\_extensions\_path * process\_input for tee. fixes: 733439 * Minor stylistic updates affecting indentation * Make linux\_net ensure\_bridge commands that add and remove ip addr's from devices/bridges work with with the latest utils.execute method (execvp) * Added volume api from previous megapatch * Made changes to xs-ipv6 code impacted because of addition of flatmanger ipv6 support * Need to set version to '1.0' in the nova.context in test code for tests to be happy * merge from trunk.. * Discovered literal\_column(), which does exactly what I need * Merged trunk * Further vmops cleanup * cast execute commands to str * Remove broken test. At least this way, it'll actually fix the problem and be mergable * \* Updated the readme file with description about VLAN Manager support & guest console support. Also added the configuration instructions for the features. \* Added assumptions section to the readme file * \* Modified raise statements to raise nova defined Exceptions. \* Fixed Console errors and in network utils using HostSystem instead of Datacenter to fetch network list \* Added support for vmwareapi module in nova/virt/connection.py so that vmware hypervisor is supported by nova \* Removing self.loop to achieve synchronization * merge trunk * Moved vlan\_interface flag in network.manager removed needless carriage return in vm\_ops * Use self.instances.pop in unfilter\_instance to make the check/removal atomic * Make Authors check account for tests being run with different os.getcwd() depending on how they're run. Add missing people to Authors * Make linux\_net ensure\_bridge commands that add and remove ip addr's from devices/bridges work with with the latest utils.execute method (execvp) * \_translate\_keys now needs one more argument, the request object * Added version attribute to RequestContext class. Set the version in the nova.context object at the middleware level. Prototyped how we can serialize ip addresses based on the version * execvp: fix params * merge lp:nova * switch to a more consistent usage of onset\_files variable names * re-added a test change I removed thinking it was related to removed code. It wasn't :> * merge trunk * Document known bug numbers by the code which is degraded until the bugs are fixed * fix minor typo * Fix a fer nits jaypipes found in review * Pep8 / Style * Re-removed the code that was deleted upstream but somehow didn't get merged in. Bizarre! * More resize * Merged with upstream * pep8 fun * Test login. Uncovered bug732866 * Merged with upstream * Better logging, be more careful about when we throw login errors re bug732866 * Don't wrap keys and volumes till they're in the API * Add a new IptablesManager that takes care of all uses of iptables * Last un-magiced session.execute() replaced with SQLAlchemy code.. * PEP8 * Add basic test case * Implements basic OpenStack API client, ready to support API tests * Initial support fo extension resources. Tests * Partial revert of one conversion due to phantom magic exception from SQLAlchemy in unrelated code; convert all deletes * merge lp:nova * add docstring * fixed formatting and redundant imports * Cleaned up vmops * merge trunk * initializing instance power state on launch to 0 (fixes EC2 API bug) * Correct a misspelling * merge lp:nova * merge trunk * Use a FLAGS.default\_os\_type if available * Another little bit of fallout from the execvp branch * Updated the code to detect the exception by fault type. SOAP faults are embedded in the SOAP response as a property. Certain faults are sent as a part of the SOAP body as property of missingSet. E.g. NotAuthenticated fault. So we examine the response object for missingSet and try to check the property for fault type * Another little detail. * Fix a few things that were either missed in the execvp conversion or stuff that was merged after it, but wasn't updated accordingly * Introduces the ZoneManager to the Scheduler which polls the child zones and caches their availability and capabilities * One more thing. * merge trunk * Only include ramdisk and kernel id if they are actually set * Add bugfix metadata * More execvp fallout * Make nova.image.s3 catch up with the new execute syntax * Pass argv of dnsmasq and radvd to execute as individual args, not as a list * Split dnsmasq and radvd commands into their respective argv's * s/s.getuid()/os.getuid()/ * merge lp:nova and add stub image service to quota tests as needed * merged to trunk rev781 * fix pep8 check * merge lp:nova * Modifies S3ImageService to wrap LocalImageService or GlanceImageService. It now pulls the parts out of s3, decrypts them locally, and sends them to the underlying service. It includes various fixes for image/glance.py, image/local.py and the tests * add tests to verify the serialization of adminPass in server creation response * Fixes nova.sh to run properly the first time. We have to get the zip file after nova-api is running * minor fixes from review * merged trunk * fixed based on reviewer's comment * merge lp:nova * Moved umount container to disk.py and try to remove loopback when destroying the container * Merged trunk * Replace session.execute() calls performing raw UPDATE statements with SQLAlchemy code, with the exception of fixed\_ip\_disassociate\_all\_by\_timeout() * Fixes a race condition where multiple greenthreads were attempting to resize a file at the same time. Adds tests to verify that the image caching call will run concurrently for different files, but will block other greenthreads trying to cache the same file * maybe a int instead ? * merge lp:nova * merge, resolve conflicts, and update to reflect new standard deserialization function signature * Fixes doc build after execvp patch * execvp: fix docs * initializing instance power state on launch to 0 (fixes EC2 API bug) * - Content-Type and Accept headers handled properly - Content-Type added to responses - Query extensions no long cause computeFaults - adding wsgi.Request object - removing request-specific code from wsgi.Serializer * Fixes bug 726359. Passes unit tests * merge lp:nova, fix conflicts, fix tests * fix the copyright notice in migration * execvp: cleanup * remove the semaphore when there is no one waiting on it * merge lp:nova and resolve conflicts * Hi guys * Update the create server call in the Openstack API so that it generates an 'adminPass' and calls set\_admin\_password in the compute API. This gets us closer to parity with the Cloud Servers v1.0 spec * Added naming scheme comment * Merged trunk * execvp passes pep8 * merge trunk * Add a decorator that lets you synchronise actions across multiple binaries. Like, say, ensuring that only one worker manipulates iptables at a time * renaming wsgi.Request.best\_match to best\_match\_content\_type; correcting calls to that function in code from trunk * merge lp:nova * Fixes bug #729400. Invalid values for offset and limit params in http requests now return a 400 response with a useful message in the body. Also added and updated tests * Add password parameter to the set\_admin\_password call in the compute api. Updated servers password to use this parameter * stuff * rearrange functions and add docstrings * Fixes uses of process\_input * update authors file * merged trunk r771 * merge lp:nova * remove unneeded stubs * move my tests into their own testcase * replaced ConnectionFailed with Exception in tools/euca-get-ajax-console was not working for me with euca2tools 1.2 (version 2007-10-10, release 31337) * Fixed pep8 issues * remerge trunk * removed uneeded \*\*kw args leftover from removed account-in-url changes * fixed lp715427 * fixed lp715427 * Fix spacing * merge lp:nova and resolve conflicts * remove superfluous trailing blank line * add override to handle xml deserialization for server instance creation * Added 'adminPass' to the serialization\_metadata * merge trunk * Merged with trunk Updated exception handling according to spawn refactoring * Fixed pep8 violation in glance plugin * Added unit tests for ensuring VDI are cleaned up upon spawn failures * Stop assuming anything about the order in which the two processes are scheduled * make static method for testing without initializing libvirt * tests and semaphore fix for image caching * execvp: unit tests pass * merged to trunk rev 769 * execvp: almost passes tests * Refactoring nova-api to be a service, so that we can reuse it in unit tests * Added documentation about needed flags * a few fixes for the tests * Renamed FLAG.paste\_config -> FLAG.api\_paste\_config * Sorted imports correctly * merge trunk * Fixes lp730960 - mangled instance creation in virt drivers due to improper merge conflict resolution * Use disk\_format and container\_format in place of image type * using get\_uuid in place of get\_record in \_get\_vm\_opaqueref changed SessionBase.\_getter in fake xenapi in order to return HANDLE\_INVALID failure when reference is not in DB (was NotImplementedException) * Merging trunk * Fixing tests * Pep8 fixes * Accidentally left some bad data around * Fix the bug where fakerabbit is doing a sort of prefix matching on the AMQP routing key * merge trunk * Use disk\_format and container\_format instead of image type * merged trunk * update manpage * update code to work with new container and disk formats from glance * modify nova manage doc * Nits * abstracted network code in the base class for flat and vlan * Remerged trunk. fixed conflict * Removes VDIs from XenServer backend if spawn process fails before vm rec is created * Added ability to remove networks on nova-manage command * Remove addition of account to service url * refactored up nova/virt/xenapi/vmops \_get\_vm\_opaque\_ref() no longer inspects the param to check to see if it is an opaque ref works better for unittests * This fix is an updated version of Todd's lp720157. Adds SignatureVersion checking for Amazon EC2 API requests, and resolves bug #720157 * \* pep8 cleanups in migrations \* a few bugfixes * Removed stale references to XenAPI * Moved guest\_tool.py from etc/esx directory to tools/esx directory * Removed excess comment lines * Fix todo comment * execvp * Merged trunk * virt.xenapi.vmops.\_get\_vm\_opaque\_ref changed vm to vm\_ref and ref to obj * virt.xenapi.vmops.\_get\_vm\_opaque\_ref assumes VM.get\_record raises * add a delay before grabbing zipfile * Some more refactoring and a tighter unit test * Moved FLAGS.paste\_config to its re-usable location * Merged with trunk and fixed conflict. Sigh * Converted tabs to spaces in bin/nova-api * A few more changes * Inhibit inclusion of stack traces in the logs UNLESS --verbose has been specified. This should help keep the logs compact, helping admins find the messages they're interested in (e.g., "Can't connect to MySQL server on '127.0.0.1' (111)") without having to sort through the stack traces, while still allowing developers to see those traces at will * Addresses bugs 704985 and 705453 by: * And unit tests * A few formatting niceties * First part of the bug fix * virt.xenapi.vmops.\_get\_vm\_opaque\_ref checks for basestring instance instead of str * virt.xenapi.vmops.\_get\_vm\_opaque\_ref exception caught properly * cleaned up virt.xenapi.vmops.\_get\_vm\_opaque\_ref. more reliable approach to checking if param is an opaque ref. code is cleaner * deleted network\_is\_associated from nova.db api * move the images\_dir out of the way when converting * pep8 * rework register commands based on review * added network\_get\_by\_cidr method to nova.db api * Use IptablesManager.semapahore from securitygroups driver to ensure we don't apply half a rule set * Log failed command execution if there are more retry attempts left * Make iptables rules class \_\_ne\_\_ just be inverted \_\_eq\_\_ * Invalid values for offset and limit params in http requests now return a 400 response with a useful message in the body. Also added and updated tests * Create --paste\_config flag defaulting to api-paste.ini and mv etc/nova-api.conf to match * Implementation for XenServer migrations. There are several places for optimization but I based the current implementation on the chance scheduler just to be safe. Beyond that, a few features are missing, such as ensuring the IP address is transferred along with the migrated instance. This will be added in a subsequent patch. Finally, everything is implemented through the Openstack API resize hooks, but actual resizing of the instance RAM and hard drive space is not yet implemented * Generate 'adminPass' and call set\_password when creating servers * Merged with current trunk * merge trunk * Resolving excess conflicts due to criss-cross in branch history * Make "dhcpbridge init" output correctly formatted leases information * Rebased to nova revision 761 * Fixed some more pep8 errors * \* Updated readme file with installation of suds-0.4 through easy\_install. \* Removed pass functions \* Fixed pep8 errors \* Few bug fixes and other commits * zipfile needs to be extracted after nova is running * make compute get the new images properly, fix a bunch of tests, and provide conversion commands * avoid possible string/int comparison problems * merge lp:nova * select cleanups * Merged to trunk rev 760, and fixed comment line indent according to Jay's comment * Fix renaming of instance fields using update\_instance api method * apirequest -> apireq * \* os\_type is no longer \`not null\` * respond well if personality attribute is incomplete * Added initial support to delete networks nova-manage * move the id wrapping into cloud layer instead of image\_service * added flatmanager unit testcases and renamed test\_network.py to test\_vlan\_network.py * remove xml testing infrastructure since it is not feasible to use at present * refactor server tests to support xml and json separately * More unit tests and rabbit hooks * Fix renaming of instance fields using update\_instance method * Fix api logging to show proper path and controller:action * merged trunk * \* Tests to verify correct vm-params for Windows and Linux instances * More fixes * delete unnecessary DECLARE * Fixed based on reviewer's comment. Main changes are below. 1. get\_vcpu\_total()/get\_memory\_mb()/get\_memory\_mb\_used() is changed for users who used non-linux environment. 2. test code added to test\_virt * merge lp:nova * merge trunk * fixed wrong local variable name in vmops * Use %s for instance-delete logging in case instance\_id comes through as a string * remove ensure\_b64\_encoding * add the ec2utils file i forgot * spawn a greenthread for image registration because it is slow * fix a couple issues with local, update the glance fake to actually return the same types as the real client, fix the image tests * make local image service work * use LocalImageServiceByDefault * Replace objectstore images with S3 image service backending to glance or local * Merged to trunk rev 759 * Merged trunk rev 758 * remove ra\_server from model and fix migration issue while running unit tests * Removed properties added to fixed\_ips by xs-ipv6 BP * altered ra\_server name to gateway\_v6 * merge lp:nova * rename onset\_files to personality\_files all the way down to compute manager * Changing output of status from showing the user as the owner, to showing the project * enforce personality quotas * localize a few error messages * Refactor wsgi.Serializer away from handling Requests directly; now require Content-Type in all requests; fix tests according to new code * pep8 * Renaming my migration yet again * Merged with Trunk * Use %s in case instance\_id came through as a string * Basic notifications drivers and tests * adding wsgi.Controller and wsgi.Request testing; fixing format keyword argument exception * This fix changes a tag contained in the DescribeKeyPairs response from to so that Amazon EC2 access libraries which does more strict syntax checking can work with Nova * some comments are modified * Merged to trunk rev 757. Main changes are below. 1. Rename db table ComputeService -> ComputeNode 2. nova-manage option instance\_type is reserved and we cannot use option instance, so change instance -> vm * adding wsgi.Request class to add custom best\_match; adding new class to wsgify decorators; replacing all references to webob.Request in non-test code to wsgi.Request * Remerged trunk, fixed a few conflicts * Add in multi-tenant support in openstack api * Merged to trunk rev 758 * Fix regression in the way libvirt\_conn gets its instance\_types * Updated DescribeKeyPairs response tag checked in nova/tests/test\_cloud.py * merged to trunk rev757 * Fixed based on reviewer's comments. Main changes are below. 1. Rename nova.compute.manager.ComputeManager.mktmpfile for better naming. 2. Several tests code in tests/test\_virt.py are removed. Because it only works in libvirt environment. Only db-related testcode remains * Fix regression in the way libvirt\_conn gets its instance\_types * more rigorous testing and error handling for os api personality * Updated Authors and .mailmap * Merged to rev 757 * merges dynamic instance types blueprint (http://wiki.openstack.org/ConfigureInstanceTypesDynamically) and bundles blueprint (https://blueprints.launchpad.net/nova/+spec/flavors) * moved migration to 008 (sigh) * merged trunk * catching bare except: * added logging to instance\_types for DB errors per code review * Very simple change checking for < 0 values in "limit" and "offset" GET parameters. If either are negative, raise a HTTPBadRequest exception. Relevant tests included * requested style change * Fixes Bug #715424: nova-manage : create network crashes when subnet range provided is not enough , if the network range cannot fit the parameters passed, a ValueError is raised * adding new source docs * corrected error message * changed \_context * pep8 * added in req.environ for context * pep8 * fixed \_context typo * coding style change per devcamcar review * fixed coding style per devcamcar review notes * removed create and delete method (and corresponding tests) from flavors.py * Provide the ability to rescue and unrescue a XenServer instance * Enable IPv6 injection for XenServer instances. Added addressV6, netmaskV6 and gatewayV6 columns to the fixed\_ips table via migration #007 as per NTT FlatManager IPv6 spec * Updated docstrings * add support for quotas on file injection * Added IPv6 migrations * merge fixes * Inject IPv6 data into XenStore for instance * Change DescribeKeyPairs response tag from keypairsSet to keySet, and fix lp720133 * Port Todd's lp720157 fix to the current trunk, rev 752 * Changed \_get\_vm\_opaqueref removing test-specific code paths * Removed excess TODO comments and debug line * initial commit of vnc support * merged trunk * Changed ra\_server to gateway\_v6 and removed addressv6 column from fixed\_ips db table * \* Added first cut of migration for os\_type on instances table \* Track os\_type when taking snapshots * merging trunk * \* Added ability to launch XenServer instances with per-os vm-params * test osapi server create with multiple personalities * ensure personality contents are b64 encoded * Merged trunk * Fixed pep8 issues, applied jaypipes suggestion * Rebased to nova revision 752 * Use functools.wraps to make sure wrapped method's metadata (docstring and name) doesn't get mangled * merge from trunk * Fake database module for vmware vi api. Includes false injection layer at the level of API calls. This module is base for unit tests for vmwareapi module. The unit tests runs regardless of presence of ESX/ESXi server as computer provider in OpenStack * Review feedback * Updated the code to include support for guest consoles, VLAN networking for guest machines on ESX/ESXi servers as compute providers in OpenStack. Removed dependency on ZSI and now using suds-0.4 to generate the required stubs for VMware Virtual Infrastructure API on the fly for calls by vmwareapi module * Added support for guest console access for VMs running on ESX/ESXi servers as computer providers in OpenStack * Support for guest consoles for VMs running on VMware ESX/ESXi servers. Uses vmrc to provide the console access to guests * Minor modification to document. Removed excess flags * Moved the guest tools script that does IP injection inside VM on ESX server to etc/esx directory from etc/ directory * support adding a single personality in the osapi * corrected copyrights for new files * Updated with flags for nova-compute, nova-network and nova-console. Added the flags, --vlan\_interface= --network\_driver=nova.network.vmwareapi\_net [Optional, only for VLAN Networking] --flat\_network\_bridge= [Optional, only for Flat Networking] --console\_manager=nova.console.vmrc\_manager.ConsoleVMRCManager --console\_driver=nova.console.vmrc.VMRCSessionConsole [Optional for OTP (One time Passwords) as against host credentials] --vmwareapi\_wsdl\_loc=/vimService.wsdl> * Fixed trunk merge issues * Merged trunk * At previous commit, I forget to erase conflict - fixed it * merged to trunk rev 752 * Rebased at lp:nova 759 * test\_compute is changed b/c lack of import instance\_types * rename db migration script * 1. merged trunk rev749 2. rpc.call returns '/' as '\/', so nova.compute.manager.mktmpfile, nova.compute.manager.confirm.tmpfile, nova.scheduler.driver.Scheduler.mounted\_on\_same\_shared\_storage are modified followed by this changes. 3. nova.tests.test\_virt.py is modified so that other teams modification is easily detected since other team is using nova.db.sqlalchemy.models.ComputeService * updated docs * updated docs * Fixed xenapi tests Gave up on clever things with map stored as string in xenstore. Used ast.liteeral\_eval instead * This branch implements the openstack-api-hostid blueprint: "Openstack API support for hostId" * refactored adminclient * No reason to initialize metadata twice * Units tests fixed partially. Still need to address checking data injected into xenstore need to convert string into dict or similar. Also todo PEP8 fixes * replaced ugly INSTANCE\_TYPE constant with (slightly less ugly) stubs * add test for instance creation without personalities * fixed pep8 * Add a lock\_path flag for lock files * refactored nova-manage list (-all, ) and fixed docs * moved nova-manage flavors docs * Edited \`nova.api.openstack.common:limited\` method to raise an HTTPBadRequest exception if a negative limit or offset is given. I'm not confident that this is the correct approach, because I guess this method could be called out of an API/WSGI context, but the method \*is\* located in the OpenStack API module and is currently only used in WSGI-capable methods, so we should be safe * merge trunk * moving nova-manage integration tests to smoke tests * Wrapped the instance\_types comparison with an int and added a test case for it. Removed the inadvertently added newline * Rename migration to coincide with latest trunk changes * Adds VHD build support for XenServer driver * Suppress stack traces unless --verbose is specified * Removed extraneous newline * Merging trunk to my branch. Fixed a conflict in servers.py * Fixed obvious errors with flags. Note: tests still fail * Merging trunk * Fixed default value for xenapi\_agent\_path flag * 1) merge trunk 2) removed preconfigure\_xenstore 3) added jkey for broadcast address in inject\_network\_info 4) added 2 flags: 4.1) xenapi\_inject\_image (default True) This flag allows for turning off data injection by mounting the image in the VDI (agreed with Trey Morris) 4.2) xenapi\_agent\_path (default /usr/bin/xe-update-networking) This flag specifies the path where the agent should be located. It makes sense only if the above flag is True. If the agent is found, data injection is not performed * Wrap IptablesManager.apply() calls in utils.synchronized to avoid having different workers step on each other's toes * merge trunk * Add utils.synchronized decorator to allow for synchronising method entrance across multiple workers on the same host * execvp * execvp * execvp * execute: shell=True removed * Add lxc to the libvirt tests * Clean up the mount points when it shutsdown * Add ability to mount containers * Add lxc libvirt driver * Rebased to Nova revision 749 * added listing of instances running on a specific host * fixed FIXME * beautification.. * introduced new flag "max\_nbd\_devices" to set the number of possible NBD devices * renamed flag from maximum\_... to max\_.. * replaced ConnectionFailed with Exception in tools/euca-get-ajax-console was not working for me with euca2tools 1.2 (version 2007-10-10, release 31337) * Did a pull from trunk to be sure I had the latest, then deleted the test directory. I guess it appeared when I started using venv. Doh * Deleting test dir from a pull from trunk * introduced new flag "maximum\_nbd\_devices" to set the number of possible NBD devices * reverted my changes from https://code.launchpad.net/~berendt/nova/lp722554/+merge/50579 and reused the existing db api methods to add the disabled services. Looks much better now :) * add timeout and retry for ssh * Makes nova-api correctly load the default flagfile * force memcache key to be str * only create auth connection if cache misses * No reason to dump a stack trace just because the AMQP server is unreachable; an error notification should be sufficient * Add error message to the error report so we know why the AMQP server is unreachable * No reason to dump a stack trace just because we can't reach the AMQP servire; it ends up being just noise * DescribeInstances modified to return ipv6 fixed ip address in case of flatmanager * Bootlock original instance during rescue * merge with zones2 fixes and trunk * check if QUERY\_STRING is empty or not before building the request URL in bin/nova-ajax-console-proxy * trunk merge * API changed to new style class * trunk merge, pip-requires and novatools to novaclient changes * Fixes FlatDHCP by making it inherit from NetworkManager and moving some methods around * fixed: bin/nova-ajax-console-proxy:66:19: W601 .has\_key() is deprecated, use 'in' * merged trunk * add a caching layer to the has\_role call to increase performance * Removed unnecessary compute import * Set rescue instance VIF device * use default flagfile in nova-api * Add tests for 718999, fix a little brittle code introduced by the committed fix * Rename test to describe what it actually does * Copy over to current trunk my tests, the 401/500 fix, and a couple of fixes to the committed fix which was actually brittle around the edges.. * I'm working on consolidating install instructions specifically (they're the most asked-about right now) and pointing to the docs.openstack.org site for admin docs * check if QUERY\_STRING is empty or not before building the request URL * Teardown rescue instance * Merged trunk * Create rescue instance * Merging trunk, conflicts fixed * Verify status of image is active * Rebased at lp:nova 740 * merged with trunk * Cleanup db method names for dealing with auth\_tokens to follow standard naming pattern * The proposed bug fix stubs out the \_is\_vdi\_pv routine for testing purposes * revert a few unnecessary changes to base.py * removed unused references to unittest * add customizable tempdir and remove extra code * Pass id of token to be deleted to the db api, not the actual object * Removing unecessary headers * Rename auth\_token db methods to follow standard * Removing unecessary nokernel stuff * Adding \_make\_subprocess function * No longer users image/ directory in tarball * Merging trunk, small fixes * make smoketests run with nose * IPV6 FlatManager changes * Make tests start with a clean database for every test * merge trunk * merge clean db * merged trunk * sorry, pep8 * adds live network injection/reconfiguration. Some refactoring * forgot to get vm\_opaque\_ref * new tests * service capabilities test * moved network injection and vif creation to above vm start in vmops spawn * Merged trunk * nothing * Removes processName from debug output since we aren't using multiprocessing and it doesn't exist in python 2.6.1 * Add some methods to the ec2 admin api to work with VPNs. Also implements and properly documents the get\_hosts method * Fix copypasta pep8 violation * moved migrate script to 007 (again..sigh) * Don't require metadata (hotfix for bug 724143) * merge trunk * Merged trunk * Updated email in Authors * Easy and effective fix for getting the DNS value from flag file, when working in FlatNetworking mode * Some first steps towards resolving some of the issues brought up on the mailing list related to documenting flags * Support HP/LeftHand SANs. We control the SAN by SSHing and issuing CLIQ commands. Also improved the way iSCSI volumes are mounted: try to store the iSCSI connection info in the volume entity, in preference to doing discovery. Also CHAP authentication support * This fix checks whether the boot/guest directory exists on the hypervisor. If that is not the case, it creates it * Globally exclude \*.pyc files from generated tarballs * stubbing out \_is\_vdi\_pv for test purposes * merge trunk * Globally exclude .pyc files from tarball contents * Get DNS value from Flag, when working in FlatNetworking mode. Passing the flag was ineffective previously. This is an easy fix. I think we would need nova-manage to accept dns also from command line * xenapi plugin function now checks whether /boot/guest already exists. If not, it creates the directory * capability aggregation working * fix check for existing port 22 rule * move relevant code to baseclass and make flatdhcp not inherit from flat * Hotfix to not require metadata * Documentation fixes so that output looks better * more smoketest fixes * Removed Milind from Authors file, as individual Contributer's License Agreement & Ubuntu code of conduct are not yet signed * Fixed problems found in localized string formatting. Verified the fixes by running ./run\_tests.sh -V * Change missed reference to run\_tests.err.log * PEP 257 fixes * Merged with trunk * fix missed err.log * Tests all working again * remove extra flag in admin tests * Revert commit 709. This fixes issues with the Openstack API causing 'No user for access key admin' errors * Adds colors to output of tests and cleans up run\_tests.py * Reverted bad-fix to sqlalchemy code * Merged with trunk * added comments about where code came from * merge and fix conflicts * Prevent logging.setup() from generating a syslog handler if we didn't request one (breaks on mac) * fix pep8 * merged upstream * Changed create from a @staticmethod to a @classmethod * revert logfile redirection and make colors work by temporarily switching stdout * merged trunk * add help back to the scripts that don't use service.py * Alphabetize imports * remove processName from debug output since we aren't using multiprocessing and it doesn't exist in python 2.6.1 * updates to nova.flags to get help working better * Helper function that supports XPath style selectors to traverse an object tree e.g * tests working again * Put back the comments I accidentally removed * Make sure there are two blank links after the import * Rename minixpath\_select to get\_from\_path * Fixes the describe\_availability\_zones to use an elevated context when getting services and the db calls to pass parameters correctly so is\_admin check works * Fix pep8 violation (trailing whitespace) * fix describe\_availability\_zones * Cope when we pass a non-list to xpath\_select - wrap it in a list * Fixes existing smoketests and splits out sysadmin tests from netadmin tests * Created mini XPath implementation, to simplify mapping logic * move the deletion of the db into fixtures * merged upstream * Revert commit 709. This fixes issues with the Openstack API causing 'No user for access key admin' errors * put the redirection back in to run\_tests.sh and fix terminal colors by using original stdout * Deleted trailing whitespace * Fixes and optimizes filtering for describe\_security\_groups. Also adds a unit test * merged trunk * fix for failing describe\_instances test * merged trunk * use flags for sqlite db names and fix flags in dhcpbridge * merged trunk * Fixes lp715424, code now checks network range can fit num\_networks \* network\_size * The proposed branch prevents FlatManager from executing network initialisation tasks contained in linux\_net.init\_host(), which are unnecessary when flat networking is used * Adds some features to run\_tests.sh: - if it crashes right away with a short erorr log, print that directly - allow specifying tests without the nova.tests part * The kernel\_id and the ramdisk\_id are optional, yet the OpenStack API was requiring them. In addition, with the ObjectStore these properties are not under 'properties' (as they are with Glance) * merged trunk * merge trunk * Initial support for per-instance metadata, though the OpenStack API. Key/value pairs can be specified at instance creation time and are returned in the details view. Support limits based on quota system * Merged trunk * Removed pass * Changed unit test to refer to compute API, per Todd's suggestion. Avoids needing to extend our implementation of the EC2 API * Fixes lots of errors in the unit tests * dump error output directly on short import errors * allow users to omit 'nova.tests' with run\_tests * Merged trunk * \* Took care of localization of strings \* Addressed all one liner docstrings \* Added Sateesh, Milind to Authors file * Fixed pep8 errors * FlatManager.init\_host now inhibits call to method in superclass. Floating IP methods have been redefined in FlatManager to raise NotImplementedError * speed up network tests * merged trunk * move db creation into fixtures and clean db for each test * fix failures * remove unnecessary stubout * Lots of test fixing * Update the admin client to deal with VPNs and have a function host list * Removed unused import & formatting cleanups * Exit with exit code 1 if conf cannot be read * Return null if no kernel\_id / ramdisk\_id * Reverted change to focus on the core bug - kernel\_id and ramdisk\_id are optional * Make static create method behave more like other services * merged fix-describe-groups * add netadmin smoketests * separate out smoketests and add updated nova.sh * fix and optimize security group filtering * Support service-like wait behaviour for API service * Added create static method to ApiService * fix test * Refactoring nova-api to be a service, so that we can reuse it in tests * test that shows error on filtering groups * don't make a syslog handler if we didn't ask for one * Don't blindly concatenate queue name if second portiion is None * Missing import for nova.exceptions (!) * At the moment --pidfile is still used in some scripts in contrib/puppet/. I don't use puppet, please check if there are possible side effects * We're not using prefix matching on AMQP, so fakerabbit shouldn't be doing it! * merge fixes from anso branch * merged trunk * Removed block of code that resurrected itself in the last merge * Added Andy Southgate to the Authors file * Merged with trunk, including manual conflict resolution in nova/virt/disk.py and nova/virt/xenapi/vmops.py * Put the whitespace back \*sigh\* * Remove duplicate import gained across a merge * Rename "SNATTING" chain to "snat" * Fix DescribeRegion answer by introducing '{ec2,osapi}\_listen' flags instead of overloading {ec2,osapi}\_host. Get rid of paste\_config\_to\_flags, bin/nova-combined. Adds debug FLAGS dump at start of nova-api * Also remove nova-combined from setup.py * Fixed some docstring * Get rid of nova-combined, see rationale on ML * Merged trunk * no, really fix lp721297 this time * Updated import statements according to HACKING guidelines. Added docstrings to each document. Verified pep8 over all files. Replaced some constants by enums accordingly. Still little bit more left in vm\_util.py and vim\_util.py files * Add flags for listen\_port to nova-api. This allows us to listen on one port, but return another port (for a proxy or load balancer) in calls like describe\_regions, etc * Fix tiny mitakes! (remove unnecessary comment, etc) * Fixed based on reviewer's comment. 1. Change docstrings format 2. Fix comment grammer mistake, etc * PEP8 again * Account for the fact that iptables-save outputs rules with a space at the end. Reverse the rule deduplication so that the last one takes precedence * floating-ip-snat was too long. Use floating-snat instead * PEP8 adjustments * Remove leftover from debugging * Add a bunch of tests for everything * Fixes various issues regarding verbose logging and logging errors on import * merged trunk * Add a new chain, floating-ip-snat, at the top of SNATTING, so that SNATting for floating ips gets applied before the default SNAT rule * Address some review comments * Some quick test cleanups, first step towards standardizing the way we start services in tests * use a different flag for listen port for apis * added disabled services to the list of displayed services in bin/nova-manage * merged to trunk rev709. NEEDS to be fixed based on 3rd reviewer's comment * just add 005\_add\_live\_migration.py * Fixed based on reviewer's comment. 1. DB schema change vcpu/memory/hdd info were stored into Service table. but reviewer pointed out to me creating new table is better since Service table has too much columns * update based on prereq branch * update based on prereq branch * fixed newline and moved import fake\_flags into run\_tests where it makes more sense * merged fix * remove changes to test db * Fixed my confusion in documenting the syntax of iSCSI discovery * pretty colors for logs and a few optimizations * Renamed db\_update to model\_update, and lots more documentation * modify tests to use specific hosts rather than default * Merged with head * remove keyword argument, per review * move test\_cloud to use start\_service, too * add a start\_service method to our test baseclass * add a test for rpc consumer isolation * Merged with trunk * The OpenStack API was using the 'secret' as the 'access key'. There is an 'access key' and there is a 'secret key'. Access key ~= username. Secret key ~= password. This fix is necessary for the OpenStack Python API bindings to log in * Add a bunch of docs for the new iptables hotness * fix pep8 and remove extra reference to reset * switch to explicit call to logging.setup() * merged trunk * Adds translation catalogs and distutils.extra glue code that automates the process of compiling message catalogs into .mo files * Merged with trunk * make sure that ec2 response times are xs:dateTime parsable * Removing pesky DS\_Store files too. Begone * Updated to remove built docs * Removing duplicate installation docs and adding flag file information, plus pointing to docs.openstack.org for Admin-audience docs * introducing a new flag timeout\_nbd for manually setting the time in seconds for waiting for an upcoming NBD device * use tests.sqlite so it doesn't conflict with running db * cleanup from review * Duh, continue skips iteration, not pass. #iamanidiot * reset to notset if level isn't in flags * Enable rescue testing * PEP8 errors and remove check in authors file for nova-core, since nova-core owns the translation export branch * Merged trunk * Stub out VM create * \* Removed VimService\_services.py & VimService\_services\_types.py to reduce the diffs to normal. These 2 files are auto-generated files containing stubs for VI SDK API end points. The stub files are generated using ZSI SOAP stub generator module ZSI.commands.wsdl2py over Vimservice.wsdl distributed as part of VMware Virtual Infrastructure SDK package. To not include them in the repository we have few options to choose from, 1) Generate the stub files in build time and make them available as packages for distribution. 2) Generate the stub files in installation/configuration time if ESX/ESXi server is detected as compute provider. Further to this, we can try to reduce the size of stub files by attempting to create stubs only for the API end points required by the module vmwareapi * introducing a new flag timeout\_nbd for manually setting the time in seconds for waiting for an upcoming NBD device * \* Removed nova/virt/guest-tools/guest\_tool.bat & nova/virt/guest-tools/guest\_tool.sh as guest\_tool.py can be invoked directly during guest startup * More PEP-8 * Wrap ipv6 rules, too * PEP-8 fixes * Allow non-existing rules to be removed * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ NOVA-CORE DEVELOPERS SHOULD NOT REVIEW THIS MERGE PROPOSAL ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * merged with nova trunk revision #706 * Fix typo * Unfilter instance correctly on termination * move exception hook into appropriate location and remove extra stuff from module namespace * Also remove rules that jump to deleted chains * simplify logic for parsing log level flags * reset all loggers on flag change, not just root * add docstring to reset method * removed extra comments and initialized from flags * fix nova-api as well * Fix refresh sec groups * get rid of initialized flag * clean up location of method * remove extra references to logging.basicConfig * move the fake initialized into fake flags * fixes for various logging errors and issues * fanout works * fanout kinda working * service ping working * scheduler manager * tests passing * start of fanout * merge trunk * previous trunk merge * puppet scripts only there as an example, should be moved to some other place if they are still necessary * Various optimizations of lookups relating to users * If there are no keypairs registered on a create call, output a useful error message rather than an out-of-range exception * Fixes vpn images to use kernel and ramdisk specified by the image * added elif branch to handle the conversion of datetime instances to isoformat instead of plain string conversion * Calculate time correctly for ec2 request logs * fix ec2 launchtime response not in iso format bug * pep8 leftover * move from datetime.datetime.utcnow -> utils.utcnow * pass start time as a param instead of making it an attribute * store time when RequestLogging starts instead of using context's time * Fix FakeAuthManager so that unit tests pass; I believe it was matching the wrong field * more optimizations context.user.id to context.user\_id * remove extra * replace context.user.is\_admin() with context.is\_admin because it is much faster * remove the weird is\_vpn logic in compute/api.py * Don't crash if there's no 'fixed\_ip' attribute (was returning None, which was unsubscriptable) * ObjectStore doesn't use properties collection; kernel\_id and ramdisk\_id aren't required anyway * added purge option and tightened up testing * Wrap iptables calls in a semaphore * pep8 * added instance types purge test * Security group fallback is named sg-fallback * Rename a few things for more clarity * Port libvirt\_conn.IptablesDriver over to use linux\_net.IptablesManager * merged trunk * Typo fix * added admin api call for injecting network info, added api test for inject network info * If there are no keypairs, output a useful error message * Fix typo (?) in authentication logic * Changing type -> image\_type * Pep8 cleanup * moved creating vifs to its own function, moved inject network to its own function * sandy y u no read hacking guide and import classes? * Typo fix * XenAPI tests * Introduce IptablesManager in linux\_net. Port every use of iptables in linux\_net to it * Use WatchedFileHandler instead of RotatingFileHandler * Resize compute tests * Support for HP SAN * Merging trunk to my branch. Fixed conflicts in Authors file and .mailmap * Rename migration 004 => 005 * Added Author and tests * Merging trunk * fixups backed on merge comments * Fixed testing mode leftover * PEP8 fix * Remove paste\_config\_to\_flags since it's now unused * Port changes to nova-combined, rename flags to API\_listen and API\_listen\_port * Set up logging once FLAGS properly read, no need to redo logging config anymore (was inoperant anyway) * Switch to API\_listen and API\_listen\_port, drop wsgi.paste\_config\_to\_flags * added new class Instances to manage instances and added a new listing method into the class * added functionality to list only fixed ip addresses of one node and added exception handling to list method * Use WatchedFileHandler instead of RotatingFileHandler * Incorporating minor cleanups suggested by Rick Harris: \* Use assertNotEqual instead of assertTrue \* Use enumerate function instead of maintaining a counter * Resize compute tests * fixed based on reviewer's comment. 1. erase wrapper function(remove/exists/mktempfile) from nova.utils. 2. nova-manage service describeresource(->describe\_resource) 3. nova-manage service updateresource(->update\_resource) 4. erase "my mistake print" statement * Tests * pep8 * merged trunk * Makes FlatDHCPManager clean up old fixed\_ips like VlanManager * Correctly pass the associate paramater for project\_get\_network through the IMPL layer in the db api * changed migration to 006 for trunk compatibility * completed doc and added --purge option to instance type delete * moved inject network info to a function which accepts only instance, and call it from reset network * Test changes * Merged with trunk * Always compare incoming flavor\_id as an int * Initial support for per-instance metadata, though the OpenStack API. Key/value pairs can be specified at instance creation time and are returned in the details view. Support limits based on quota system * a few changes and a bunch of unit tests * remove leftover periodic tasks * Added support for feature parity with the current Rackspace Cloud Servers practice of "injecting" files into newly-created instances for configuration, etc. However, this is in no way restricted to only writing files to the guest when it is first created * missing docstring and fixed copyrights * move periodic tasks to base class based on class variable as per review * Correctly pass the associate paramater to project\_get\_network * Add \*\*kwargs to VlanManager's create\_networks so that optional args from other managers don't break * Uncommitted changes using the wrong author, and re-committing under the correct author * merge with zone phase 1 again * Added http://mynova/v1.0/zones/ api options for add/remove/update/delete zones. child\_zones table added to database and migration. Changed novarc vars from CLOUD\_SERVERS\_\* to NOVA\_\* to work with novatools. See python-novatools on github for help testing this * pip requires novatools * copyright notice * moved 003\_cactus.py migration file to 004\_add\_instance\_types.py to avoid naming collision with new trunk migration * Add \*\*kwargs to VlanManager's create\_networks so that optional args from other managers don't break * merge with zone phase 1 * changed from 003-004 migration * merged lp:~jk0/nova/dynamicinstancetypes * Merged trunk * merge from dev * fixed strings * multi positional string fix * Use a semaphore to ensure we don't run more than one iptables-restore at a time * Fixed unit test * merge with trunk * fixed zone list tests * Make eth0 the default for the public\_interface flag * Finished flavor OS API stubs * Re-alphabetise Authors, move extra addresses into .mailmap * Re-alphabetise Authors, move extra addressses into .mailmap * Move the ramdisk logging stuff * Hi guys * fixup * zone list now comes from scheduler zonemanager * Stop blowing away the ramdisk * Rebased at lp:nova 688 * Update the Openstack API so that it returns 'addresses' * I have a bug fix, additional tests for the \`limiter\` method, and additional commenting for a couple classes in the OpenStack API. Basically I've just tried to jump in somewhere to get my feet wet. Constructive criticism welcome * added labels to networks for use in multi-nic added writing network data to xenstore param-list added call to agent to reset network added reset\_network call to openstack api * Add a command to nova-manage to list fixed ip's * Foo * comments + Englilish, changed copyright in migration, removed network\_get\_all from db.api (vestigial) * Adding myself to Authors and .mailmap files * example: * Switched mailmap entries * Supporting networks with multiple PIFs. pep8 fixes unit tests passed * Merged kpepple * Merged trunk * More testing * Block diagram for vmwareapi module * added entry in the category list * Added vmwareapi module to add support of hypervisor vmware-vsphere to OpenStack * added new functionality to list all defined fixed ips * added more I18N * Merged trunk and fixed conflict with other Brian in Authors * removing superfluous pass statements; replacing list comprehension with for loop; alphabetizing imports * Rebased at lp:nova 687 * added i18n of 'No networks defined' * Make eth0 the default for FLAGS.public\_interface * Typo fixes * Merging trunk * Adding tests * first crack at instance types docs * merge trunk * style cleanup * polling tests * Use glance image type to determine disk type * Minor change. Adding a helper function stub\_instance() inside the test test\_get\_all\_server\_details\_with\_host for readability * Fixes ldapdriver so that it works properly with admin client. It now sanitizes all unicode data to strings before passing it into ldap driver. This may need to be rethought to work properly for internationalization * Moved definition of return\_servers\_with\_host stub to inside the test\_get\_all\_server\_details\_with\_host test * fixed * Pep8 fixes * Merging trunk * Adding basic test * Better exceptions * Update to our HACKING doc to add examples of our docstring style * add periodic disassociate from VlanManager to FlatDHCPManager * Flipped mailmap entries * -from migrate.versioning import exceptions as versioning\_exceptions + +try: + from migrate.versioning import exceptions as versioning\_exceptions +except ImportError: + try: + # python-migration changed location of exceptions after 1.6.3 + # See LP Bug #717467 + from migrate import exceptions as versioning\_exceptions + except ImportError: + sys.exit(\_("python-migrate is not installed. Exiting.")) * Accidently removed myself from Authors * Added alternate email to mailmap * zone manager tests * Merged to trunk * added test for reset\_network to openstack api tests, tabstop 5 to 4, renamed migration * Use RotatingFileHandler instead of FileHandler * pep8 fixes * sanitize all args to strings before sending them to ldap * Use a threadpool for handling requests coming in through RPC * Typos * Derp * Spell flags correctly (i.e. not in upper case) * Fixed merge error * novatools call to child zones done * novatools call to child zones done * Putting glance plugin under pep8 control * fixed authors, import sys in migration.py * Merged trunk * First commit of working code * Stubbed out flavor create/delete API calls * This implements the blueprint 'Openstack API support for hostId': https://blueprints.launchpad.net/nova/+spec/openstack-api-hostid Now instances will have a unique hostId which for now is just a hash of the host. If the instance does not have a host yet, the hostId will be '' * Fix for bug #716847 * merge trunk * First commit for xenapi-vlan-networking. Totally untested * added functionality to nova-manage to list created networks * Add back --logdir=DIR option. If set, a logfile named after the binary (e.g. nova-api.log) will be kept in DIR * Fix PEP-8 stuff * assertIsNone is a 2.7-ism * This branch should resolve nova bug #718675 (https://bugs.launchpad.net/nova/+bug/718675) * Added myself to the authors file * I fail at sessions * I fail at sessions * Foo * hurr durr * Merging trunk part 1 * stubbed out reset networkin xenapi VM tests to solve domid problem * foo * foo * Adding vhd hidden sanity check * Fixes 718994 * Make rpc thread pool size configurable * merge with trunk * fail * Fixing test by adding stub for get\_image\_meta * this bug bit me hard today. pv can be None, which does not translate to %d and this error gets clobbered by causing errors in the business in charge of capturing output and reporting errors * More pep8 fixes * Pep8 fixes * Set name-label on VDI * Merge * Don't hide RotatingFileHandler behind FileHandler's name * Refactor code that decides which logfile to use, if any * Fixing typo * polling working * Using Nova style nokernel * changed d to s * merge with trunk * More plugin lol * moved reset network to after boot durrrrr.. * Don't hid RotatingFileHandler behind FileHandler's name * removed flag --pidfile from nova/services.py * Added teammate Naveed to authors file for his help * plugin lol * Plugin changes * merging trunk back in; updating Authors conflict * Adding documentation * Regrouping methods so they make sense * zone/info works * Refactoring put\_vdis * Adding safe\_find\_sr * Merged lp:nova * Fixes tarball contents by adding missing scripts and files to setup.py / MANIFEST.in * Moving SR path code outside of glance plugin * When re-throwing an exception, use "raise", not "raise e". This way we don't lose the stack trace * Adding more documentation, code-cleanup * Replace placeholders in nova.pot with some actual values * The proposed fix puts a VM which fails to spawn in a (new) 'FAILED' power state. It does not perform a clean-up. This because the user needs to know what has happened to the VM he/she was trying to run. Normally, API users do not have access to log files. In this case, the only way for the user to know what happened to the instance is to query its state (e.g.: doing euca-describe-instances). If we perform a complete clean-up, no information about the instance which failed to spawn will be left * Some trivial cleanups in context.py, mostly just a test of using the updated git-bzr-ng * Use eventlet.green.subprocess instead of standard subprocess * derp * Better host acquisition * zones merge * fixed / renamed migration scripts * Merged trunk * Update .pot file with source file and line numbers after running python setup.py build * Adds Distutils.Extra support, removes Babel support, which is half-baked at best * Pull in .po message catalogs from lp:~nova-core/nova/translations * Fix sporadically failing unittests * Missing nova/tests/db/nova.austin.sqlite file * Translations will be shipped in po/, not locale/ * Adding missing scripts and files to setup.py / MANIFEST.in * Fixes issues when running euca-run-instances and euca-describe-image-attribute against the latest nova/trunk EC2 API * initial * Naïve attempt at threading rpc requests * Beautify it a little bit, thanks to dabo * OS-55: Moved conn\_common code into disk.py * Break out of the "for group in rv" loop in security group unit tests so that we are use we are dealing with the correct group * Tons o loggin * merged trunk * Refactored * Launchpad automatic translations update * trunk merge * better filtering * Adding DISK\_VHD to ImageTypes * Updates to that S3ImageService kernel\_id and ramdisk\_id mappings work with EC2 API * fixed nova-combined debug hack and renamed ChildZone to Zone * plugin * Removing testing statements * Adds missing flag that makes use\_nova\_chains work properly * bad plugin * bad plugin * bad plugin * fixed merge conflict * First cut on XenServer unified-images * removed debugging * fixed template and added migration * better filtering * Use RotatingFileHandler instead of FileHandler * Typo fixes * Resurrect logdir option * hurr * Some refactoring * hurr * Snapshot correctly * Added try clause to handle changed location of exceptions after 1.6.3 in python-migrate LP Bug #717467 * Use eventlet.green.subprocess instead of standard subprocess * Made kernel and ram disk be deleted in xen api upon instance termination * Snapshot correctly * merged recent version. no conflict, no big/important change to this branch * wharrgarbl * merge jk0 branch (with trunk merge) which added additional columns for instance\_types (which are openstack api specific) * corrected model for table lookup * More fixes * Derp * fix for bug #716847 - if a volume has not been assigned to a host, then delete from db and skip rpc * added call to reset\_network from openstack api down to vmops * merging with trunk * Got rid of BadParameter, just using standard python ValueError * Merged trunk * support for multiple IPs per network * Fix DescribeRegion answer by using specific 'listen' configuration parameter instead of overloading ec2\_host * Fixed tables creation order and added clearing db after errors * Modified S3ImageService to return the format defined in BaseService to allow EC2 API's DescribeImages to work against Glance * re-add input\_chain because it got deleted at some point * Launchpad automatic translations update * Fixes a typo in the auth checking for DescribeAvailabilityZones * Fixes describe\_security\_groups by forcing it to return a list instead of a generator * return a list instead of a generator from describe\_groups * Hi guys * Added missing doc string and made a few style tweaks * fix typo in auth checking for describe\_availability\_zones * now sorting by project, then by group * Launchpad automatic translations update * Made a few tweaks to format of S3 service implementation * Merged trunk * First attempt to make all image services use similar schemas * fix :returns: and add pep-0257 * Preliminary fix for issue, need more thorough testing before pushing to lp * Launchpad automatic translations update * More typos * More typos * More typos * More typos * More typos * fixed exceptions import from python migrate * Cast to host * This fixes a lazy-load issue in describe-instances, which causes a crash. The solution is to specifically load the network table when retrieving an instance * added instance\_type\_purge() to actually remove records from db * updated tests and added more error checking * Merged trunk * more error checking on inputs and better errors returned * Added more columns to instance\_types tables * Added LOG line to describe groups function to find out what's going * joinedload network so describe\_instances continues to work * zone api tests passing * Create a new AMQP connection by default * First, not all * Merged to trunk and fixed merge conflict in Authors * rough cut at zone api tests * Following Rick and Jay's suggestions: - Fixed LOG.debug for translation - improved vm\_utils.VM\_Helper.ensure\_free\_mem * Create a new AMQP connection by default * after hours of tracking his prey, ken slowly crept behind the elusive wilderbeast test import hiding in the libvirt\_conn.py bushes and gutted it with his steely blade * fixed destroy calls * Forgot the metadata includes * added get IPs by instance * added resetnetwork to the XenAPIPlugin.dispatch dict * Forgot the metadata includes * Forgot the metadata includes * Typo fixes and some stupidity about the models * passing instance to reset\_network instead of vm\_ref, also not converting to an opaque ref before making plugin call * Define sql\_idle\_timeout flag to be an integer * forgot to add network\_get\_all\_by\_instance to db.api * template adjusted to NOVA\_TOOLS, zone db & os api layers added * Spawn from disk * Some more cleanup * sql\_idle\_timeout should be an integer * merged model change: flavorid needs to unique in model * testing refactor * flavorid needs to unique in model * Add forwarding rules for floating IPs to the OUTPUT chain on the network node in addition to the PREROUTING chain * typo * refactored api call to use instance\_types * Use a NullPool for sqlite connections * Get a fresh connection in rpc.cast rather than using a recycled one * Make rpc.cast create a fresh amqp connection. Each API request has its own thread, and they don't multiplex well * Only use NullPool when using sqlite * Also add floating ip forwarding to OUTPUT chain * trunk merge * removed ZoneCommands from nova-manage * Try using NullPool instead of SingletonPool * Try setting isolation\_level=immediate * This branch fixes bug #708347: RunInstances: Invalid instance type gives improper error message * Wrap line to under 79 characters * Launchpad automatic translations update * adding myself to Authors file * 1. Merged to rev654(?) 2. Fixed bug continuous request. if user continuouslly send live-migration request to same host, concurrent request to iptables occurs, and iptables complains. This version add retry for this issue * forgot to register new instance\_types table * Plugin tidying and more migration implementation * fixed overlooked mandatory changes in Xen * Renamed migration plugin * A lot of stuff * - population of public and private addresses containers in openstack api - replacement of sqlalchemy model in instance stub with dict * Fixes the ordering of init\_host commands so that iptables chains are created before they are used * Pass timestamps to the db layer in fixed\_ip\_disassociate\_all\_by\_timeout rather than converting to strings ahead of time, otherwise comparison between timestamps would often fail * Added support for 'SAN' style volumes. A SAN's big difference is that the iSCSI target won't normally run on the same host as the volume service * added support to pull list of ALL instance types even those that are marked deleted * Indent args to ssh\_connect correctly * Fix PEP8 violations * Added myself to Authors * 1) Moved tests for limiter to test\_common.py (from \_\_init\_\_.py) and expanded test suite to include bad inputs and tests for custom limits (#2) * Added my mail alias (Part of an experiment in using github, which got messy fast...) * Fixed pep8 error in vm\_utils.py * Add my name to AUTHORS, remove parentheses from the substitution made in the previous commit * Don't convert datetime objects to a string using .isoformat(). Leave it to sqlalchmeny (or pysqlite or whatever it is that does the magic) to work it out * Added test case for 'not enough memory' Successfully ran unit tests Fixed pep8 errors * Give a better error message if the instance type specified is invalid * Launchpad automatic translations update * added testing for instance\_types.py and refactored nova-manage to use instance\_types.py instead of going directly to db * added create and delete methods to instance\_types in preparation to call them from nova-manage * added testing for nova-manage instance\_type * additional error checking for nova-manage instance\_type * Typos and primary keys * Automates the setup for FlatDHCP regardless of whether the interface has an ip address * add docstring and revert set\_ip changes as they are unnecessary * Commas help * Changes and bug fixes * avoiding HOST\_UNAVAILABLE exception: if there is not enough free memory does not spawn the VM at all. instance state is set to "SHUTDOWN" * merge lp:nova at revision #654 * merge with lp:nova * Fixed pep8 errors Unit tests passed * merge source and remove ifconfig * fixes #713766 and probably #710959, please test the patch before committing it * use route -n instead of route to avoid chopped names * Updates to the multinode install doc based on Wayne's findings. Merged with trunk so should easily merge in * Checks whether the instance id is a list or not before assignment. This is to fix a bug relating to nova/boto. The AWK-SDK libraries pass in a string, not a list. The euca tools pass in a list * Launchpad automatic translations update * Catching all socket errors in \_get\_my\_ip, since any socket error is likely enough to cause a failure in detection * Catching all socket errors in \_get\_my\_ip, since any socket error is likely enough to cause a failure in detection * blargh * Some stuff * added INSTANCE\_TYPES to test for compatibility with current tests * Checking whether the instance id is a list or not before assignment. This is to fix a bug relating to nova/boto. The AWK-SDK libraries pass in a string, not a list. the euca tools pass in a list * Added data\_transfer xapi plugin * Another quick fix to multinode install doc * Made updates to multinode install doc * fixed instance\_types methods to use database backend * require user context for most flavor/instance\_type read calls * added network\_get\_all\_by\_instance(), call to reset\_network in vmops * added new parameter --dhcp\_domain to set the used domain by dnsmasq in /etc/nova/nova.conf * minor * Fix for bug #714709 * A few changes * fixed format according to PEP8 * replaced all calls to ifconfig with calls to ip * added myself to the Authors file * applied http://launchpadlibrarian.net/63698868/713434.patch * Launchpad automatic translations update * aliased flavor to instance\_types in nova-manage. will probably need to make flavor a full fledged class as users will want to list flavors by flavor name * simplified instance\_types db calls to return entire row - we may need these extra columns for some features and there seems to be little downside in including them. still need to fix testing calls * refactor to remove ugly code in flavors * updated api.create to use instance\_type table * added preliminary testing for bin/nova-manage while i am somewhat conflicted about the path these tests have taken, i think it is better than no tests at all * rewrote nova-manage instance\_type to use correct db.api returned objects and have more robust error handling * instance\_types should return in predicatable order (by name currently) * flavorid and name need to be unique in the database for the ec2 and openstack apis, repectively * corrected db.instance\_types to return expect dict instead of lists. updated openstack flavors to expect dicts instead of lists. added deleted column to returned dict * converted openstack flavors over to use instance\_types table. a few pep changes * added FIXME(kpepple) comments for all constant usage of INSTANCE\_TYPES. updated api/ec2/admin.py to use the new instance\_types db table * Launchpad automatic translations update * allow for bridge to be the public interface * Removed (newly) unused exception variables * Didn't mean to actually make changes to the glance plugin * Added a bunch of stubbed out functionality * Moved ssh\_execute to utils; moved comments to docstring * Fixes for Vish & Devin's feedback * Fixes https://bugs.launchpad.net/nova/+bug/681417 * Don't swallow exception stack traces by doing 'raise e'; just use 'raise' * Implementation of 'SAN' volumes A SAN volume is 'special' because the volume service probably won't run on the iSCSI target. Initial support is for Solaris with COMSTAR (Solaris 11) * merging * Fixed PEP8 test problems, complaining about too many blank lines at line 51 * Adds logging.basicConfig() to run\_tests.py so that attempting to log debug messages from tests will work * Launchpad automatic translations update * flagged all INSTANCE\_TYPES usage with FIXME comment. Added basic usage to nova-manage (needs formatting). created api methods * added seed data to migration * Don't need a route for guests. Turns out the issue with routing from the guests was due to duplicate macs * Changes the behavior of run\_test.sh so that pep8 is only run in the default case (when running all tests). It will no longer run when individual test cases are being given as in: * open cactus * some updates to HACKING to describe the docstrings * Casting to the scheduler * moves driver.init\_host into the base class so it happens before floating forwards and sets up proper iptables chains 2011.1 ------ * Set FINAL = True in version.py * Open Cactus development * Set FINAL = True in version.py * pass the set\_ip from ensure\_vlan\_bridge * don't fail on ip add exists and recreate default route on ip move if needed * initial support for dynamic instance\_types: db migration and model, stub tests and stub methods * better setup for flatdhcp * added to inject networking data into the xenstore * forgot context param for network\_get\_all * Fixes bug #709057 * Add and document the provider\_fw method in virt/FakeConnection * Fix for LP Bug #709510 * merge trunk * fix pep8 error :/ * Changed default handler for uncaughted exceptions. It uses logging instead print to stderr * Launchpad automatic translations update * rpartition sticks the rhs in [2] * Fix for LP Bug #709510 * change ensure\_bridge so it doesn't overwrite existing ips * Fix for LP Bug #709510 * Enabled modification of projects using the EC2 admin API * Reorder insance rules for provider rules immediately after base, before secgroups * Merged trunk * Match the initial db version to the actual Austin release db schema * 1. Discard nova-manage host list Reason: nova-manage service list can be replacement. Changes: nova-manage * Only run pep8 after tests if running all the tests * add logging.basicConfig() to tests * fix austin->bexar db migration * woops * trivial cleanup for context.py * Made adminclient get\_user return None instead of throwing EC2Exception if requested user not available * pep8 * Added modify project to ec2 admin api * incorporate feedback from devin - use sql consistently in instance\_destroy also, set deleted\_at * Fixed whitespace * Made adminclient get\_user return None instead of throwing EC2Exception if requested user not available * OS-55: Fix typo for libvirt\_conn operation * merge trunk * remove extraneous line * Fixed pep8 errors * Changed default handler for uncaughted exceptions. Logging with level critical instead of print to stderr * Disassociate all floating ips on terminate instance * Fixes simple scheduler to able to be run\_instance by admins + availability zones * Makes having sphinx to build docs a conditional thing - if you have it, you can get docs. If you don't, you can't * Fixed a pep8 spacing issue * fixes for bug #709057 * Working on api / manager / db support for zones * Launchpad automatic translations update * Adds security group output to describe\_instances * Use firewall\_driver flag as expected with NWFilterFirewall. This way, either you use NWFilterFirewall directly, or you use IptablesFirewall, which creates its own instance of NWFilterFirewall for the setup\_basic\_filtering command. This removes the requirement that LibvirtConnection would always need to know about NWFirewallFilter, and cleans up the area where the flag is used for loading the firewall class * simplify get and remove extra reference to import logging * Added a test that checks for localized strings in the source code that contain position-based string formatting placeholders. If found, an exception message is generated that summarizes the problem, as well as the location of the problematic code. This will prevent future trunk commits from adding localized strings that cannot be properly translated * Made changes based on code review * makes sure that : is in the availability zone before it attempts to use it to send instances to a particular host * Makes sure all instance and volume commands that raise not found are changed to show the ec2\_id instead of the internal id * remove all floating addresses on terminate instance * Merged in trunk changes * Fixed formatting issues in current codebase * Added the test for localized string formatting * Fixes NotFound messages in api to show the ec2\_id * Changed cpu limit to a static value of 100000 (100%) instead of using the vcpu value of 1. There is no weight/limit variable now so I see no other solution than the static max limit * Make nova.virt.images fetch images from a Glance URL when Glance is used as the image service (rather than unconditionally fetch them from an S3/objectstore URL) * Fixed spacing... AGAIN * Make unit tests clean up their mess in /tmp after themselves * Make xml namespace match the API version requested * Missing import in xen plugin * Shortened comment for 80char limt * Added missing import * Naive, low-regression-risk fix enabling Glance to work with libvirt/hyperv * Add unit test for xmlns version matching request version * Properly pulling the name attribute from security\_group * adding testcode * Fix Bug #703037. ra\_server is None * Fix regression in s3 image service. This should be a feature freeze exception * I have a feeling if we try to migrate from imageId to id we'll be tracking it down a while * more instanceId => id fixes * Fix regression in imageId => id field rename in s3 image service * Apply lp:707675 to this branch to be able to test * merge trunk * A couple of bugfixes * Fixes a stupid mistake I made when I moved this method from a module into a class * Add dan.prince to Authors * Make xml namespace match the API version requested * Fix issue in s3.py causing where '\_fix\_image\_id' is not defined * added mapping parameter to write\_network\_config\_to\_xenstore * OS-55: Added a test case for XenAPI file-based network injection OS-55: Stubbed out utils.execute for all XenAPI VM tests, including command simulation where necessary * Simple little changes related to openstack api to work better with glance * Merged trunk * Cleaned up \_start() and \_shutdown() * Added missing int to string conversion * Simple little changes related to openstack api to work better with glance * use 'ip addr change' * Fix merge miss * Changed method signature of create\_network * merged r621 * Merged with http://bazaar.launchpad.net/~vishvananda/nova/lp703037 * Merged with vish branch * Prefixed ending multi-line docstring with a newline * Fixing documentation strings. Second attempt at pep8 * Removal of image tempdir in test tearDown. Also, reformatted a couple method comments to match the file's style * Add DescribeInstanceTypes to admin api. This lets the dashboard know what sizes can be launched (using the -t flag in euca-run-instances, for example) and what resources they provide * Rename Mock, since it wasn't a Mock * Add DescribeInstanceTypes to admin api (dashboard uses it) * Fix for LP Bug #699654 * Change how libvirt firewall drivers work to have meaningful flags * Fixed pep8 errors * This branch updates docs to reflect the db sync addition. It additionally adds some useful errors to nova-manage to help people that are using old guides. It wraps sqlalchemy errors in generic DBError. Finally, it updates nova.sh to use current settings * Added myself to the authors list * fix pep8 issue (and my commit hook that didn't catch it) * Add a host argument to virt drivers's init\_host method. It will be set to the name of host it's running on * merged trunk * Wraps the NotFound exception at the api layer to print the proper instance id. Does the same for volume. Note that euca-describe-volumes doesn't pass in volume ids properly, so you will get no error messages on euca-describe-volumes with improper ids. We may also need to wrap a few other calls as well * Fixes issue with SNATTING chain not getting created or added to POSTROUTING when nova-network starts * Fix for bug #702237 * Moving init\_host before metadata\_forward, as metadata\_forward modifies prerouting rules * another trunk merge * Limit all lines to a maximum of 79 characters * Perform same filtering for OUTPUT as FORWARD in iptables * Fixed up a little image\_id return * Trunk merged * This patch: * Trunk merged * In instance chains and rules for ipv4 and ipv6, ACCEPT target was missing * moved imageId change to s3 client * Migration for provider firewall rules * Updates for provider\_fw\_rules in admin api * Adds driver.init\_host() call to flatdhcp driver * Fixed pep8 errors * Fixed pep8 errors * No longer hard coding to "/tmp/nova/images/". Using tempdir so tests run by different people on the same development machine pass * Perform same filtering for OUTPUT as FORWARD in iptables. This removes a way around the filtering * Fix pep-8 problem from prereq branch * Add a host argument to virt driver's init\_host method. It will be set to the name of host it's running on * updated authors since build is failing * Adds conditional around sphinx inclusion * merge with trunk * Fixes project and role checking when a user's naming attribute is not uid * I am new to nova, and wanted to fix a fairly trivial bug in order to understand the process * Fix for LP Bug #707554 * Added iptables rule to IptablesFirewallDriver like in Hisaharu Ishii patch with some workaround * Set the default number of IP's to to reserve for VPN to 0 * Merged with r606 * Properly fixed spacing issue for pep8 * Fixed spacing issue for pep8 * Fixed merge conflict * Added myself to ./Authors file * Switches from project\_get\_network to network\_get\_by\_instance, which actually works with all networking modes. Also removes a couple duplicate lines from a bad merge * Set the default number of IP's to to reserver for VPN to 0 * Localized strings that employ formatting should not use positional arguments, as they prevent the translator from re-ordering the translated text; instead, they should use mappings (i.e., dicts). This change replaces all localized formatted strings that use more than one formatting placeholder with a mapping version * add ip and network to nwfilter test * merged ntt branch * use network\_get\_by\_instance * Added myself (John Dewey) to Authors * corrected nesting of the data dictionary * Updated a couple data structures to pass pep8 * Added static cpu limit of 100000 (100%) to hyperv.py instead of using the vcpu value of 1 * PEP8 fixes * Changes \_\_dn\_to\_uid to return the uid attribute from the user's object * OS-55: PEP8 fixes * merged branch to name net\_manager.create\_networks args * the net\_managers expect different args to create\_networks, so nova-manage's call to net\_manager.create\_networks was changed to use named args to prevent argument mismatching * OS-55: Post-merge fixes * Fix describe\_regions by changing renamed flags. Also added a test to catch future errors * changed nova-manage to use named arguments to net\_manager.create\_networks * Merged trunk * Removed tabs form source. Merged trunk changes * allow docs to build in virtualenv prevent setup.py from failing with sphinx in virtualenv * fixes doc build and setup.py fail in virtualenv * fix reversed assignment * fixes and refactoring of smoketests * remove extra print * add test and fix describe regions * merged trunk * This patch skips VM shutdown if already in the halted state * Use Glance to relate machine image with kernel and ramdisk * Skip shutdown if already halted * Refactoring \_destroy into steps * i18n! * merged trunk fixed whitespace in rst * wrap sqlalchemy exceptions in a generic error * Wrap instance at api layer to print the proper error. Use same logic for volumes * This patch adds two flags: * Using new style logging * Adding ability to remap VBD device * Resolved trunk merge conflicts * Adds gettext to pluginlib\_nova.py. Fixes #706029 * Adding getttext to pluginlib\_nova * Add provider\_fw\_rules awareness to iptables firewall driver * No longer chmod 0777 instance directories, since nova works just fine without them * Updated docs for db sync requirements; merged with Vish's similar doc updates * Change default log formats so that:  \* they include a timestamp (necessary to correlate logs)  \* no longer display version on every line (shorter lines)  \* use [-] instead of [N/A] (shorter lines, less scary-looking)  \* show level before logger name (better human-readability) * OS55: pylint fixes * OS-55: Added unit test for network injection via xenstore * fixed typo * OS-55: Fix current unit tests * Fixed for pep8 * Merged with rev597 * No longer chmod 0777 instance directories * Reverted log type from error to audit * undid moving argument * Fix for LP Bug #699654 * moved argument for label * fixed the migration * really added migration for networks label * added default label to nova-manage and create\_networks * syntax * syntax error * added plugin call for resetnetworking * Fix metadata using versions other than /later. Patch via ~ttx * should be writing some kindof network info to the xenstore now, hopefully * Use ttx's patch to be explict about paths, as urlmap doesn't work as I expected * Doc changes for db sync * Fixes issue with instance creation throwing errors when non-default groups are used * Saving a database call by getting the security groups from the instance object * Fixes issue with describe\_instances requiring an admin context * OS-55: pylint fixes * Fixing another instance of getting a list of ids instead of a list of objects * Adds security group output to describe\_instances * Finds and fixes remaining strings for i18n. Fixes bug #705186 * Pass a PluginManager to nose.config.Config(). This lets us use plugins like coverage, xcoverage, etc * i18n's strings that were missed or have been added since initial i18n strings branch * OS-55: Only modify Linux image with no or injection-incapable guest agent OS-55: Support network configuration via xenstore for Windows images * A couple of copypasta errors * Keep exception tracing as it was * Pass a PluginManager to nose.config.Config(). This lets us use plugins like coverage, xcoverage, etc * Also print version at nova-api startup, for consistency * Add timestamp to default log format, invert name and level for better readability, log version once at startup * When radvd is already running, not to hup, but to restart * fix ipv6 conditional * more smoketest fixes * Passing in an elevated context instead of making the call non-elevated * Added changes to make errors and recovery for volumes more graceful: * Fetches the security group from ID, allowing the object to be used properly, later * Changing service\_get\_all\_by\_host to not require admin context as it is used for describing instances, which any user in a project can do * Exclude vcsversion.py from pep8 check. It's not compliant, but out of our control * Exclude vcsversion.py from pep8 check. It's not compliant, but out of our control * Include paste config in tarball * Add etc/ directory to tarball * Fixes for bugs: * Return non-zero if either unit tests or pep8 fails * Eagerly load fixed\_ip.network in instance\_get\_by\_id * Add Rob Kost to Authors * Return non-zero if either unit tests or pep8 fails * Merged trunk * merge trunk * Add paste and paste.deploy installation to nova.sh, needed for api server * Updated trunk changes to work with localization * Implement provider-level firewall rules in nwfilter * Whitespace (pep8) cleanups * Exception string lacking 'G' for gigabytes unit * Fixes \*\*params unpacking to ensure all kwargs are strings for compatibility with python 2.6.1 * make sure params have no unicode keys * Removed unneeded line * Merged trunk * Refactor run\_tests.sh to allow us to run an extra command after the tests * update the docs to reflect db sync as well * add helpful error messages to nova-manage and update nova.sh * Fixed unit tests * Merged trunk * fixed pep8 error * Eagerly load instance's fixed\_ip.network attribute * merged trunk changes * minor code cleanup * minor code cleanup * remove blank from Authors * .mailmap rewrite * .mailmap updated * Refactor run\_tests.sh to allow us to run an extra command after the tests * Add an apply\_instance\_filter method to NWFilter driver * PEP-8 fixes * Revert Firewalldriver * Replace an old use of ec2\_id with id in describe\_addresses * various fixes to smoketests, including allowing admin tests to run as a user, better timing, and allowing volume tests to run on non-udev linux * merged trunk * replace old ec2\_id with proper id in describe\_addresses * merge vish's changes (which merged trunk and fixed a pep8 problem) * merged trunkand fixed conflicts and pep error * get\_my\_linklocal raises exception * Completed first pass at converting all localized strings with multiple format substitutions * Allows moving from the Austin-style db to the Bexar-style * move db sync into nosetests package-level fixtures so that the existing nosetests attempt in hudson will pass * previous commit breaks volume.driver. fix it. * per vish's feedback, allow admin to specify volume id in any of the acceptable manners (vol-, volume-, and int) * Merged trunk * Fixed unit tests * Fix merge conflict * add two more columns, set string lengths) * Enable the use\_ipv6 flag in unit tests by default * Fixed unit tests * merge from upstream and fix small issues * merged to trunk rev572 * fixed based on reviewer's comment * Basic stubbing throughout the stack * Enable the use\_ipv6 flag in unit tests by default * Add an apply\_instance\_filter method to NWFilter driver * update status to 'error\_deleting' on volumes where deletion fails * Merged trunk * This disables ipv6 by default. Most use cases will not need it on and it makes dependencies more complex * The live\_migration branch ( https://code.launchpad.net/~nttdata/nova/live-migration/+merge/44940 ) was not ready to be merged * merge from upstream to fix conflict * Trunk merge * s/cleanup/volume. volume commands will need their own ns in the long run * disable ipv6 by default * Merged trunk * Plug VBD to existing instance and minor cleanup * fixes related to #701749. Also, added nova-manage commands to recover from certain states: * Implement support for streaming images from Glance when using the XenAPI virtualization backend, as per the bexar-xenapi-support-for-glance blueprint * Works around the app-armor problem of requiring disks with backing files to be named appropriately by changing the name of our extra disks * fix test to respect xml changes * merged trunk * Add refresh\_security\_group\_\* methods to nova/virt/fake.py, as FakeConnection is the reference for documentation and method signatures that should be implemented by virt connection drivers * added paste pastedeploy to nova.sh * authors needed for test * revert live\_migration branch * This removes the need for the custom udev rule for iscsi devices. It instead attaches the device based on /dev/disk/by-path/ which should make the setup of nova-volume a little easier * Merged trunk * Risk of Regression: This patch don’t modify existing functionlities, but I have added some. 1. nova.db.service.sqlalchemy.model.Serivce (adding a column to database) 2. nova.service ( nova-compute needes to insert information defined by 1 above) * Docstrings aren't guaranteed to exist, so split() can't automatically be called on a method without first checking for the method docstring's existence. Fixes Bug #704447 * Removes circular import issues from bin/stack and replaces utils.loads with json.loads. Fixes Bug#704424 * ComputeAPI -> compute.API in bin/nova-direct-api. Fixes LP#704422 * Fixed apply\_instance\_filter is not implemented in NWFilterFirewall * pep8 * I might have gone overboard with documenting \_members * Add rules to database, cast refresh message and trickle down to firewall driver * Fixed error message in get\_my\_linklocal * openstack api fixes for glance * Stubbed-out code for working with provider-firewalls * Merged trunk * Merged with trunk revno 572 * Better shutdown handling * Change where paste.deploy factories live and how they are called. They are now in the nova.wsgi.Application/Middleware classes, and call the \_\_init\_\_ method of their class with kwargs of the local configuration of the paste file * Further decouple api routing decisions and move into paste.deploy configuration. This makes paste back the nova-api binary * Clean up openstack api test fake * Merged trunk * Add Start/Shutdown support to XenAPI * The Openstack API requires image metadata to be returned immediately after an image-create call * merge trunk * Fixing whitespace * Returning image\_metadata from snapshot() * Merging trunk * Merged trunk * merged trunk rev569 * merged to rev 561 and fixed based on reviewer's comment * Adds a developer interface with direct access to the internal inter-service APIs and a command-line tool based on reflection to interact with them * merge from upstream * pep8 fixes... largely to things from trunk? * merge from upstream * pep8 * remove print statement * This branch fixes two outstanding bugs in compute. It also fixes a bad method signature in network and removes an unused method in cloud * Re-removes TrialTestCase. It was accidentally added in by some merges and causing issues with running tests individually * removed rpc in cloud * merged trial fix again * fix bad function signature in create\_networks * undo accidental removal of fake\_flags * Merged trunk * merged lp:~vishvananda/nova/lp703012 * remove TrialTestCase again and fix merge issues * import re, remove extra call in cloud.py. Move get\_console\_output to compute\_api * Create and use a generic handler for RPC calls to compute * Create and use a generic handler for RPC calls to compute * Create and use a generic handler for RPC calls * Merged trunk * OS-55: Inject network settings in linux images * Merged with trunk revno 565 * use .local and .rescue for disk images so they don't make app-armor puke * Implements the blueprint for enabling the setting of the root/admin password on an instance * OpenStack Compute (Nova) IPv4/IPv6 dual stack support http://wiki.openstack.org/BexarIpv6supportReadme * Merged to rev.563 * This change introduces support for Sheepdog (distributed block storage system) which is proposed in https://blueprints.launchpad.net/nova/+spec/sheepdog-support * Sort Authors * Update Authors * merge from upstream: * pep8 fixes * update migration script to add new tables since merge * sort Authors * Merged with r562 * This modifies libvirt to use CoW images instead of raw images. This is much more efficient and allows us to use the snapshotting capabilities available for qcow2 images. It also changes local storage to be a separate drive instead of a separate partition * pep8. Someday I'll remember 2 blank lines between module methods * remove ">>>MERGE" iin nova/db/sqlalchemy/api.py * checking based on pep8 * merged trunk * Modified per sorens review * Fix for Pep-8 * Merged with r561 * Moved commands which needs sudo to nova.sh * Added netaddr for pip-requires * Marking snapshots as private for now * Merging Trunk * Fixing Image ID workaround and typo * Fixed based on the comments from code review. Merged to trunk rev 561 * Add a new method to firewall drivers to tell them to stop filtering a particular instance. Call it when an instance has been destroyed * merged to trunk rev 561 * Merged trunk * merge trunk rev560 * Fixes related to how EC2 ids are displayed and dealt with * Get reviewed and fixed based on comments. Merged latest version * Make libvirt and XenAPI play nice together * Spelling is hard. Typing even moreso * Revert changes to version.py * Minor code cleanups * Minor code cleanups * Minor code cleanups * Make driver calls compatible * Merged trunk * Stubbed out XenServer rescue/unrescue * Added unit tests for the Diffie-Hellman class. Merged recent trunk changes * Bring NWFilter driver up to speed on unfilter\_instance * Replaced home-grown Diffie-Hellman implementation with the M2Crypto version supplied by Soren * Instead of a set() to keep track of instances and security groups, use a dict(). \_\_eq\_\_ for stuff coming out of sqlalchemy does not do what I expected (probably due to our use of sessions) * Fixes broken call to \_\_generate\_rc in auth manager * Fixes bug #701055. Moves code for instance termination inline so that the manager doesn't prematurely mark an instance as deleted. Prematurely doing so causes find calls to fail, prevents instance data from being deleted, and also causes some other issues * Revert r510 and r512 because Josh had already done the same work * merged trunk * Fixed Authors * Merged with 557 * Fixed missing \_(). Fixed to follow logging to LOG changes. Fixed merge miss (get\_fixed\_ip was moved away). Update some missing comments * merge from upstream and fix leaks in console tests * make sure get\_all returns * Fixes a typo in the name of a variable * Fixes #701055. Move instance termination code inline to prevent manager from prematurely marking it as destroyed * fix invalid variable reference in cloud api * fix indentation * add support for database migration * fix changed call to generate\_rc * merged with r555 * fixed method signature of modify\_rules fixed unit\_test for ipv6 * standardize volume ids * standardize volume ids * standardize on hex for ids, allow configurable instance names * correct volume ids for ec2 * correct formatting for volume ids * Fix test failures on Python 2.7 by eagerly loading the fixed\_ip attribute on instances. No clue why it doesn't affect python 2.6, though * Adding TODO to clarify status * Merging trunk * Do joinedload\_all('fixed\_ip.floating\_ips') instead of joinedload('fixed\_ip') * Initialize logging in nova-manage so we don't see errors about missing handlers * \_wait\_with\_callback was changed out from under suspend/resume. fixed * Make rescue/unrescue available to API * Stop error messages for logs when running nova-manage * Fixing stub so tests pass * Merging trunk * Merging trunk, small fixes * This branch adds a backend for using RBD (RADOS Block Device) volumes in nova via libvirt/qemu. This is described in the blueprint here: https://blueprints.launchpad.net/nova/+spec/ceph-block-driver * Fix url matching for years 2010-forward * Update config for launching logger with cleaner factory * Update paste config for ec2 request logging * merged changes from trunk * cleaned up prior merge mess * Merged trunk * My previous modifications to novarc had CLOUDSERVER\_AUTH\_URL pointing to the ec2 api port. Now it's correctly pointing to os api port * Check for whole pool name in check\_for\_setup\_error * change novarc template from cc\_port to osapi\_port. Removed osapi\_port from bin scripts * Start to add rescue/unrescue support * fixed pause and resume * Fixed another issue in \_stream\_disk, as it did never execute \_write\_partition. Fixed fake method accordingly. Fixed pep8 errors * pep8 fixes * Fixing the stub for \_stream\_disk as well * Fix for \_stream\_disk * Merged with r551 * Support IPv6 firewall with IptablesFirewallDriver * Fixed syntax errors * Check whether 'device\_path' has ':' before splitting it * PEP8 fixes, and switch to using the new LOG in vm\_utils, matching what's just come in from trunk * Merged with trunk * Merged with Orlando's recent changes * Added support of availability zones for compute. models.Service got additional field availability\_zone and was created ZoneScheduler that make decisions based on this field. Also replaced fake 'nova' zone in EC2 cloud api * Eagerly load fixed\_ip property of instances * Had to abandon the other branch (~annegentle/nova/newscript) because the diffs weren't working right for me. This is a fresh branch that should be merged correctly with trunk. Thanks for your patience. :) * Added unit tests for the xenapi-glance integration. This adds a glance simulator that can stub in place of glance.client.Client, and enhances the xapi simulator to add the additional calls that the Glance-specific path requires * Merged with 549 * Change command to get link local address Remove superfluous code * This branch adds web based serial console access. Here is an overview of how it works (for libvirt): * Merged with r548 * Fixed bug * Add DescribeInstanceV6 for backward compatibility * Fixed test environments. Fixed bugs in \_fetch\_image\_objecstore and \_lookup\_image\_objcestore (objectstore was broken!) Added tests for glance * Fixed for pep8 Remove temporary debugging * changed exception class * Changing DN creation to do searches for entries * Fixes bug #701575: run\_tests.sh fails with a meaningless error if virtualenv is not installed. Proposed fix tries to use easy\_install to install virtualenv if not present * merge trunk, fix conflict * more useful prefix and fix typo in string * use by-path instead of custom udev script * Quick bugfix. Also make the error message more specific and unique in the equivalent code in the revoke method * remove extra whitspaces * Raise meaningful exception when there aren't enough params for a sec group rule * bah - pep8 errors * resolve pylint warnings * Removing script file * Read Full Spec for implementation details and notes on how to boot an instance using OS API. http://etherpad.openstack.org/B2RK0q1CYj * Added my name to Authors list * Changes per Edays comments * Fixed a number of issues with the iptables firewall backend: \* Port specifications for firewalls come back from the data store as integers, but were compared as strings. \* --icmp-type was misspelled as --icmp\_type (underscore vs dash) \* There weren't any unit tests for these issues * merged trunk changes * Removed unneeded SimpleDH code from agent plugin. Improved handling of plugin call failures * Now tries to install virtualenv via easy\_install if not present * Merging trunk * fixed issue in pluginlib\_nova.py * Trunk merge and conflcts resolved * Implementation of xs-console blueprint (adds support for console proxies like xvp) * Fixed a number of issues with the iptables firewall backend: \* Port specifications for firewalls come back from the data store as integers, but were compared as strings. \* --icmp-type was misspelled as --icmp\_type (underscore vs dash) \* There weren't any unit tests for these issues * Add support for EBS volumes to the live migration feature. Currently, only AoE is supported * Changed shared\_ip\_group detail routing * Changed shared\_ip\_group detail routing * A few more changes to the smoeketests. Allows smoketests to find the nova package from the checkout. Adds smoketests for security groups. Also fixes a couple of typos * Fixes the metadata forwarding to work by default * Adds support to nova-manage to modify projects * Add glance to pip-requires, as we're now using the Glance client code from Nova * Now removing kernel/ramdisk VDI after copy Code tested with PV and HVM guests Fixed pep8 errors * merged trunk changes * consolidate boto\_extensions.py and euca-get-ajax-console, fix bugs from previous trunk merge * Fixed issues raised by reviews * xenapi\_conn was not terminating utils/LoopingCall when an exception was occurring. This was causing the eventlet Event to have send\_exception() called more than once (a no-no) * merge trunk * whups, fix accidental change to nova-combined * remove uneeded superclass * Bugfix * Adds the requisite infrastructure for automating translation templates import/export to Launchpad * Added babel/gettext build support * Can now correctly launch images with external kernels through glance * re-merged in trunk to correct conflict * Fix describe\_availablity\_zones versobse * Typo fix * merged changes from trunk * Adding modify option for projects * Fixes describe\_instances to filter by a list of instance\_ids * Late import module for register\_models() so it doesn't create the db before flags are loaded * Checks for existence of volume group using vgs instead of checking to see if /dev/nova-volumes exists. The dev is created by udev and isn't always there even if the volume group does exist * Add a new firewall backend for libvirt, based on iptables * Create LibvirtConnection directly, rather than going through libvirt\_conn.get\_connection. This should remove the dependency on libvirt for tests * Fixed xenapi\_conn wait\_for\_task to properly terminate LoopingCall on exception * Fixed xenapi\_conn wait\_for\_task to properly terminate LoopingCall on exception * Fixed xenapi\_conn wait\_for\_task to properly terminate LoopingCall on exception * optimize to call get if instance\_id is specified since most of the time people will just be requesting one id * fix describe instances + test * Moved get\_my\_ip into flags because that is the only thing it is being used for and use it to set a new flag called my\_ip * fixes Document make configuration by updating nova version mechanism to conform to rev530 update * alphbetized Authors * added myself to authors and fixed typo to follow standard * typo correction * fixed small glitch in \_fetch\_image\_glance virtual\_size = imeta['size'] * fixed doc make process for new nova version (rev530) machanism * late import module for register\_models() so it doesn't create the db before flags are loaded * use safer vgs call * Return proper region info in describe\_regions * change API classname to match the way other API's are done * small cleanups * First cut at implementing partition-adding in combination with the Glance streaming. Untested * some small cleanups * merged from upstream and made applicable changes * Adds a mechanism to programmatically determine the version of Nova. The designated version is defined in nova/version.py. When running python setup.py from a bzr checkout, information about the bzr branch is put into nova/vcsversion.py which is conditionally imported in nova/version.py * Return region info in the proper format * Now that we aren't using twisted we can vgs to check for the existence of the volume group * s/canonical\_version/canonical\_version\_string/g * Fix indentation * s/string\_with\_vcs/version\_string\_with\_vcs/g * Some fixes to \_lookup\_image\_glance: fix the return value from lookup\_image, attach the disk read-only before running pygrub, and add some debug logging * Reverted formatting change no longer necessary * removed a merge conflict line I missed before * merged trunk changes * set the hostname factory in the service init * incorporated changes suggested by eday * Add copyright and license info to version.py * Fixes issue in trunk with downloading s3 images for instance creation * Fix pep8 errors * Many fixes to the Glance integration * Wrap logs so we can: \* use a "context" kwarg to track requests all the way through the system \* use a custom formatter so we get the data we want (configurable with flags) \* allow additional formatting for debug statements for easer debugging \* add an AUDIT level, useful for noticing changes to system components \* use named logs instead of the general logger where it makes sesnse * pep8 fixes * Bug #699910: Nova RPC layer silently swallows exceptions * Bug #699912: When failing to connect to a data store, Nova doesn't log which data store it tried to connect to * Bug #699910: Nova RPC layer silently swallows exceptions * pv/hvm detection with pygrub updated for glance * Bug #699912: When failing to connect to a data store, Nova doesn't log which data store it tried to connect to * Resolved merge differences * Additional cleanup prior to pushing * Merged with trunk * Fixing unescaped quote in nova-CC-install.sh script plus formatting fixes to multinode install * getting ready to push for merge prop * Fixing headers line by wrapping the headers in single quotes * Less code generation * grabbed the get\_info fix from my other branch * merged changes from trunk * Remove redundant import of nova.context. Use db instance attribute rather than module directly * Merging trunk * Removing some FIXMEs * Reserving image before uploading * merge * Half-finished implementation of the streaming from Glance to a VDI through nova-compute * Fix Nova not to immediately blow up when talking to Glance: we were using the wrong URL to get the image metadata, and ended up getting the whole image instead (and trying to parse it as json) * another merge with trunk to remedy instance\_id issues * merge * Include date in API action query * Review feedback * This branch implements lock functionality. The lock is stored in the compute worker database. Decorators have been added to the openstack API actions which alter instances in any way * Review feedback * Review feedback * Review feedback * typo * refers to instance\_id instead of instance\_ref[instance\_id] * passing the correct parameters to decorated function * accidentally left unlocked in there, it should have been locked * various cleanup and fixes * merged trunk * pep8 * altered argument handling * Got the basic 'set admin password' stuff working * Include date in action query * Let documentation get version from nova/version.py as well * Add default version file for developers * merge pep8 fixes from newlog2 * Track version info, and make available for logging * pep8 * Merged trunk * merge pep8 and tests from wsgirouter branch * Remove test for removed class * Pep8 * pep8 fix * merged trunk changes * commit before merging trunk * Fixes format\_instances error by passing reservation\_id as a kwarg instead of an arg. Also removes extraneous yields in test\_cloud that were causing tests to pass with broken code * Remove module-level factory methods in favor of having a factory class-method on wsgi components themselves. Local options from config are passed to the \_\_init\_\_ method of the component as kwargs * fix the broken tests that allowed the breakage in format to happen * Fix format\_run\_instances to pass in reservation id as a kwarg * Add factories into the wsgi classes * Add blank \_\_init\_\_ file for fixing importability. The stale .pyc masked this error locally * merged trunk changes * Introduces basic support for spawning, rebooting and destroying vms when using Microsoft Hyper-V as the hypervisor. Images need to be in VHD format. Note that although Hyper-V doesn't accept kernel and ramdisk separate from the image, the nova objectstore api still expects an image to have an associated aki and ari. You can use dummy aki and ari images -- the hyper-v driver won't use them or try to download them. Requires Python's WMI module * merged trunk changes * Renamed 'set\_root\_password' to 'set\_admin\_password' globally * merge with trunk * renamed sharedipgroups to shared\_ip\_groups and fixed tests for display\_name * Fix openstack api tests and add a FaultWrapper to turn exceptions to faults * Fixed display\_name on create\_instance * fix some glitches due to someone removing instanc.internal\_id (not that I mind) remove accidental change to nova-combined script * Fixed trunk merge conflicts as spotted by dubs * OS API parity: map image ID to numeric ID. Ensure all other OS operations are at least stubbed out and callable * add in separate public hostname for console hosts. flesh out console api data * allow smoketests to find nova package and add security rules * Fix a bunch of pep8 stuff * This addition to the docs clarifies that it is a requirement for contributors to be listed in the Authors file before their commits can be merged to trunk * merge trunk * another merge from trunk to the latest rev * pulled changes from trunk added console api to openstack api * Removed dependencies on nova server components for the admin client * Remove stale doc files so the autogeneration extension for sphinx will work properly * Add to Authors and mailmap * Make test case work again * This branch contains the internal API cleanup branches I had previously proposed, but combined together and with all the UUID key replacement ripped out. This allows multiple REST interfaces (or other tools) to use the internal API directly, rather than having the logic tied up in the ec2 cloud.py file * socat will need to be added to our nova sudoers * merged trunk changes * intermediate work * Created a XenAPI plugin that will allow nova code to read/write/delete from xenstore records for a given instance. Added the basic methods for working with xenstore data to the vmops script, as well as plugin support to xenapi\_conn.py * Merged trunk * Recover from a lost data store connection * Updated register\_models() docstring * simplify decorator into a wrapper fn * add in xs-console worker and tests * pep8 cleanup * more fixes, docstrings * fix injection and xml * Fixing formatting problems with multinode install document * Split internal API get calls to get and get\_all, where the former takes an ID and returns one resource, and the latter can optionally take a filter and return a list of resources * missing \_() * Fixed for pep8 * Fixed:Create instance fails when use\_ipv6=False * Removed debug message which is not needed * Fixed misspelled variable * Fixed bug in nova\_project\_filter\_v6 * The \_update method in base Instance class overides dns\_name\_v6,so fixed it * self.XENAPI.. * Changed Paused power state from Error to Paused * fixed json syntax error * stop using partitions and first pass at cow images * Remove stale doc files * pep8 * tests fixed up * Better method for eventlet.wsgi.server logging * Silence eventlet.wsgi.server so it doesn't go to stdout and pollute our logs * Declare a flag for test to run in isolation * Build app manually for test\_api since nova.ec2.API is gone * Recover from a lost data store connection * Added xenstore plugin changed * merged changes from trunk * some more cleanup * need one more newline * Redis dependency no longer needed * Make test\_access use ec2.request instead of .controller and .action * Revert some unneeded formatting since twistd is no longer used * pep8 fixes * Remove flags and unused API class from openstack api, since such things are specified in paste config now * i18n logging and exception strings * remove unused nova/api/\_\_init\_\_.py * Make paste the default api pattern * Rework how routing is done in ec2 endpoint * Change all 2010 Copyright statements to 2010-2011 in doc source directory only * rename easy to direct in the scripts * fix typo in stack tool * rename Easy API to Direct API * Moved \_\_init\_\_ api code to api.py and changed allowed\_instances quota method argument to accept all type data, not just vcpu count * Made the plugin output fully json-ified, so I could remove the exception handlers in vmops.py. Cleaned up some pep8 issues that weren't caught in earlier runs * merged from trunk * Renamed argument to represent possible types in volume\_utils * Removed leftover UUID reference * Removed UUID keys for instance and volume * Merged trunk * Final edits to multi-node doc and install script * Merged trunk changes * Some Bug Fix * Fixed bug in libvirt * Fixed bug * Fixed for pep8 * Fixed conflict with r515 * Merged and fiexed conflicts with r515 * some fixes per vish's feedback * Don't know where that LOG went.. * Final few log tweaks, i18n, levels, including contexts, etc * Apply logging changes as a giant patch to work around the cloudpipe delete + add issue in the original patch * dabo fix to update for password reset v2 * krm\_mapping.json sample file added * dabo fix to update for password reset * added cloudserver vars to novarc template * Update Authors * Add support for rbd volumes * Fixes LP688545 * First pass at feature parity. Includes Image ID hash * Fixing merge conflicts with new branch * merged in trunk changes * Fixing merge conflicts * Fixes LP688545 * Make sure we point to the right PPA's everywhere * Editing note about the database schema available on the wiki * Modifying based on reviewer comments * Uses paste.deploy to make application running configurable. This includes the ability to swap out middlewares, define new endpoints, and generally move away from having code to build wsgi routers and middleware chains into a configurable, extensible method for running wsgi servers * Modifications to the nova-CC-installer.sh based on review * Adds the pool\_recycle option to the sql engine startup call. This enables connection auto-timeout so that connection pooling will work properly. The recommended setting (per sqlalchemy FAQ page) has been provided as a default for a new configuration flag. What this means is that if a db connection sits idle for the configured # of seconds, the engine will automatically close the connection and return it to the available thread pool. See Bug #690314 for info * Add burnin support. Services are now by default disabled, but can have instances and volumes run on them using availability\_zone = nova:HOSTNAME. This lets the hardware be put through its paces without being put in the generally available pool of hardware. There is a 'service' subcommand for nova-manage where you can enable, disable, and list statuses of services * pep8 fixes * Merged compute-api-cleanup branch * Removed compute dependency in quota.py * add timeout constant, set to 5 minutes * removed extra whitespace chars at the end of the changed lines * Several documentation corrections and formatting fixes * Minor edits prior to merging changes to the script file * add stubs for xen driver * merge in trunk * merged latest trunk * merge trunk * merge trunk * temp * Stop returning generators in the refresh\_security\_group\_{rules,members} methods * Don't lie about which is the default firewall implementation * Move a closing bracket * Stub out init\_host in libvirt driver * Adjust test suite to the split between base firewall rules provided by nwfilter and the security group filtering * Fix a merge artifact * Remove references to nova-core/ppa and openstack/ppa PPA's * Updated the password generation code * Add support for Sheepdog volumes * Add support for various block device types (block, network, file) * Added agent.py plugin. Merged xenstore plugin changes * fixed pep8 issues * Added OpenStack's copyright to the xenstore plugin * fixed pep8 issues * merged in trunk and xenstore-plugin changes * Ignore CA/crl.pem * Before merge with xenstore-plugin code * Corrected the sloppy import in the xenstore plugin that was copied from other plugins * Ignore CA/crl.pem * Merged trunk * Merged trunk * deleting README.livemigration.txt and nova/livemigration\_test/\* * Merged trunk * Merged trunk * 最新バージョンにマージ。変更点は以下の通り。 Authorsに自分の所属を追加 utils.pyのgenerate\_uidがおかしいのでインスタンスIDがオーバーフローしていたが、 その処理を一時撤廃。後で試験しなおしとすることにした。 * Merged trunk * Auth Tokens assumed the user\_id was an int, not a string * Removed dependencies on flags.py from adminclient * Make InstanceActions and live diagnostics available through the Admin API * Cleanup * Improved test * removed some debugging code left in previous push * Converted the pool\_recycle setting to be a flag with a default of 3600 seconds * completed the basic xenstore read/write/delete functionality * Removed problematic test * PEP8 fix * \* Fix bad query in \_\_project\_to\_dn \* use \_\_find\_dns instead of \_\_find\_objects in \_\_uid\_to\_dn and \_\_project\_to\_dn * Moved network operation code in ec2 api into a generic network API class. Removed a circular dependency with compute/quota * Oopsies * merge trunk * merge trunk * Make compute.api methods verbs * Fail * Review feedback * Cleans up the output of run\_tests.sh to look closer to Trial * change exit code * Changing DN creation to do searches for entries * Merged trunk * Implemented review feedback * This patch is beginning of XenServer snapshots in nova. It adds: * Merged trunk * Calling compute api directly from OpenStack image create * Several documentation corrections * merge recent revision(version of 2010/12/28) Change: 1. Use greenthread instead of defer at nova.virt.libvirt\_conn.live\_migration. 2. Move nova.scheduler.manager.live\_migration to nova.scheduler.driver 3. Move nova.scheduler.manager.has\_enough\_resource to nova.scheduler.driver 4. Any check routine in nova-manage.instance.live\_migration is moved to nova.scheduler.driver.schedule\_live\_migration * Merging trunk * Note that contributors are required to be listed in Authors file before work can be merged into trunk * Mention Authors and .mailmap files in Developer Guide * pep 8 * remove cloudpipe from paste config * Clean up how we determine IP to bind to * Converted a few more ec2 calls to use compute api * Cleaned up the compute API, mostly consistency with other parts of the system and renaming redundant module names * fixed the compute lock test * altered the compute lock test * removed tests.api.openstack.test\_servers test\_lock, to hell with it. i'm not even sure if testing lock needs to be at this level * fixed up the compute lock test, was failing because the context was always admin * syntax error * moved check lock decorator from the compute api to the come manager... when it rains it pours * removed db.set\_lock, using update\_instance instead * added some logging * typo, trying to hurry.. look where that got me * altered error exception/logging * altered error exception/logging * fixd variables being out of scope in lock decorator * moved check lock decorator to compute api level. altered openstack.test\_servers according and wrote test for lock in tests.test\_compute * Moved ec2 volume operations into a volume API interface for other components to use. Added attach/detach as compute.api methods, since they operate in the context of instances (and to avoid a dependency loop) * pep8 fix, and add in flags that don't refernece my laptop * apt-get install socat, which is used to connect to the console * removed lock check from show and changed returning 404 to 405 * fix lp:695182, scheduler tests needed to DECLARE flag to run standalone * removed () from if (can't believe i did that) and renamed checks\_lock decorator * Add the pool\_recycle setting to enable connection pooling features for the sql engine. The setting is hard-coded to 3600 seconds (one hour) per the recommendation provided on sqlalchemy's site * i18n * Pep-8 cleanup * Fix scheduler testcase so it knows all flags and can run in isolation * removed some code i didn't end up using * fixed merge conflict with trunk * pep8 * fixed up test for lock * added tests for EC2 describe\_instances * PEP8 cleanup * This branch fixes an issue where VM creation fails because of a missing flag definition for 'injected\_network\_template'. See Bug #695467 for more info * Added tests * added test for lock to os api * refactor * Re-added flag definition for injected\_network\_template. Tested & verified fix in the same env as the original bug * forgot import * syntax error * Merged trunk * Added implementation availability\_zones to EC2 API * Updating Authors * merge * Changes and error fixes to help ensure basic parity with the Rackspace API. Some features are still missing, such as shared ip groups, and will be added in a later patch set * initial lock functionality commit * Merged with trunk * Additional edits in nova.concepts.rst while waiting for script changes * Bug #694880: nova-compute now depends upon Cheetah even when not using libvirt * add ajax console proxy to nova.sh * merge trunk * Fix pep8 violations * add in unit tests * removed superfluous line * Address bug #695157 by using a blank request class and setting an empty request path * Defualt services to enabled * Address bug #695157 by using a blank request class and setting an empty request path * Add flag --enable\_new\_services to toggle default state of service when created * merge from trunk * This commit introduces scripts to apply XenServer host networking protections * Whoops * merge from upstream and fix conflicts * Update .mailmap with both email addresses for Ant and myself * Make action log available through Admin API * Merging trunk * Add some basic snapshot tests * Added get\_diagnostics placeholders to libvirt and fake * Merged trunk * Added InstanceAction DB functions * merge trunk * Bug #694890: run\_tests.sh sometimes doesn't pass arguments to nosetest * Output of run\_tests.sh to be closer to trial * I've added suspend along with a few changes to power state as well. I can't imagine suspend will be controversial but I've added a new power state for "suspended" to nova.compute.power\_states which libvirt doesn't use and updated the xenapi power mapping to use it for suspended state. I also updated the mappings in nova.api.openstack.servers to map PAUSED to "error" and SUSPENDED to "suspended". Thoughts there are that we don't currently (openstack API v1.0) use pause, so if somehow an instance were to be paused an error occurred somewhere, or someone did something in error. Either way asking the xenserver host for the status would show "paused". Support for more power states needs to be added to the next version of the openstack API * fixed a line length * Bug #694880: nova-compute now depends upon Cheetah even when not using libvirt * Bug #694890: run\_tests.sh sometimes doesn't pass arguments to nosetest * fix bug #lp694311 * Typo fix * Renamed based on feedback from another branch * Added stack command-line tool * missed a couple of gettext \_() * Cleans up nova.api.openstack.images and fix it to work with cloudservers api. Previously "cloudservers image-list" wouldn't work, now it will. There are mappings in place to handle s3 or glance/local image service. In the future when the local image service is working, we can probably drop the s3 mappings * Fixing snapshots, pep8 fixes * translate status was returning the wrong item * Fixing bad merge * Converted Volume model and operation to use UUIDs * inst -> item * syntax error * renaming things to be a bit more descriptive * Merging trunk * Converted instance references to GUID type * Added custom guid type so we can choose the most efficient backend DB type easily * backup schedule changes * Merged trunk * Merging trunk, fixing failed tests * A few fixes * removed \ * Moving README to doc/networking.rst per recommendation from Jay Pipes * Merged trunk * couple of pep8s * merge trunk * Fixed after Jay's review. Integrated code from Soren (we now use the same 'magic number' for images without kernel & ramdisk * Fixed pep8 errors * launch\_at を前回コミット時に追加したが、lauched\_atというカラムが既に存在し、 紛らわしいのでlauched\_onにした。 * logs inner exception in nova/utils.py->import\_class * Fix Bug #693963 * remove requirement of sudo on tests * merge trunk * Merge * adding zones to api * Support IPv6 * test commit * テスト項目表を再び追加した状態でコミット * テスト項目表をローカルから一度削除した状態でコミット * テスト項目表がなぜか消えたので追加 * nova.compute.managerがこれまでの修正でデグレしていたので修正 CPUID, その他のチェックルーチンをnova.scheduler.manager.live\_migrationに追加 * nova.compute.managerがこれまでの修正でデグレしていたので修正 CPUID, その他のチェックルーチンをnova.scheduler.manager.live\_migrationに追加 * Make nova work even when user has LANG or LC\_ALL configured * merged trunk, resolved trivial conflict * merged trunk, resolved conflict * Faked out handling for shared ip groups so they return something * another typo * applied power state conversion to test * trying again * typo * fixed the os api image test for glance * updated the xenstore methods to reflect that they write to the param record of xenstore, not the actual xenstore itself * fixed typo * Merged with trunk All tests passed Could not fix some pep8 errors in nova/virt/libvirt\_conn.py * fixed merge conflict * updated since dietz moved the limited function * fixed error occuring when tests used glance attributes, fixed docstrings * Merged again from trunk * fixed a few docstrings, added \_() for gettext * added \_() for gettext and a couple of pep8s * adds a reflection api * unit test - should be reworked * Moves implementation specific Openstack API code from the middleware to the drivers. Also cleans up a few areas and ensures all the API tests are passing again * PEP8 fix * One more time * Pep8 cleanup * Resolved merge conflict * Merged trunk * Trying to remove twisted dependencies, this gets everything working under nosetests * Merged Monty's branch * Merged trunk and resolved conflicts * Working diagnostics API; removed diagnostics DB model - not needed * merged trunk * merged trunk * Superfluous images include and added basic routes for shared ip groups * Simplifies and improves ldap schema * xenapi iscsi support + unittests * Fixed trunk and PEP8 cleanup * Merged trunk * Added reference in setup.py so that python setup.py test works now * merge lp:nova * better bin name, and pep8 * pep8 fixes * some pep8 fixes * removing xen/uml specific switches. If they need special treatment, we can add it * add license * delete xtra dir * move euca-get-ajax-console up one directory * merge trunk * move port range for ajaxterm to flag * more tweaks * add in license * some cleanup * rewrite proxy to not use twisted * added power state logging to nova.virt.xenapi.vm\_utils * added suspend as a power state * last merge trunk before push * merge trunk, fixed unittests, added i18n strings, cleanups etc etc * And the common module * minor notes, commit before rewriting proxy with eventlet * There were a few unclaimed addresses in mailmap * first merge after i18n * remove some notes * Add Ryan Lane as well * added tests to ensure the easy api works as a backend for Compute API * fix commits from Anthony and Vish that were committed with the wrong email * remove some yields that snuck in * merge from trunk * Basic Easy API functionality * Fixes reboot (and rescue) to work even if libvirt doesn't know about the instance and the network doesn't exist * merged trunk * Fixes reboot (and rescue) to work even if libvirt doesn't know about the instance and the network doesn't exist * Adds a flag to use the X-Forwarded-For header to find the ip of the remote server. This is needed when you have multiple api servers with a load balancing proxy in front. It is a flag that defaults to False because if you don't have a sanitizing proxy in front, users could masquerade as other ips by passing in the header manually * Got basic xenstore operations working * Merged trunk * Modified InstanceDiagnostics and truncate action * removed extra files * merged trunk * Moves the ip allocation requests to the from the api host into calls to the network host made from the compute host * pep8 fix * merged trunk and fixed conflicts * Accidentally yanked the datetime line in auth * remove extra files that slipped in * merged trunk * add missing flag * Optimize creation of nwfilter rules so they aren't constantly being recreated * use libvirt python bindings instead of system call * fixed more conflicts * merged trunk again * add in support of openstack api * merge trunk and upgrade to cheetah templating * Optimize nwfilter creation and project filter * Merging trunk * fixed conflicts * Adding more comments regarding XS snapshots * working connection security * WSGI middleware for lockout after failed authentications of ec2 access key * Modifies nova-network to recreate important data on start * Puts the creation of nova iptables chains into the source code and cleans up rule creation. This makes nova play more nicely with other iptables rules that may be created on the host * Forgot the copyright info * i18n support for xs-snaps * Finished moving the middleware layers and fixed the API tests again * Zone scheduler added * Moved some things for testing * Merging trunk * Abstracted auth and ratelimiting more * Getting Snapshots to work with cloudservers command-line tool * merge trunk * Minor bug fix * Populate user\_data field from run-instances call parameter, default to empty string to avoid metadata base64 decoding failure, LP: #691598 * Adding myself and Antony Messerli to the Authors file * Fixes per-project vpns (cloudpipe) and adds manage commands and support for certificate revocation * merge trunk * merge antonymesserli's changes, fixed some formatting, and added copyright notice * merged i8n and fixed conflicts * Added networking protections readme * Moved xenapi into xenserver specific directory * after trunk merge * Fixes documentation builds for gettext.. * committing so that I can merge trunk changes * Log all XenAPI actions to InstanceActions * Merged trunk * merging trunk * merging trunk * Fix doc building endpoint for gettext * All merged with trunk and let's see if a new merge prop (with no pre-req) works. * Problem was with a missplaced parentheses. ugh * Adding me in the Authors file * Populate user\_data field from run-instances call parameter, default to empty string to avoid metadata base64 decoding failure, LP: #691598 * connecting ajax proxy to rabbit to allow token based security * remove a debugging line * a few more fixes after merge with trunk * merging in trunk * move prototype code from api into compute worker * Burnin support by specifying a specific host via availability\_zone for running instances and volumes on * Merged trunk * This stops the nova-network dhcp ip from being added to all of the compute hosts * prototype works with kvm. now moving call from api to compute * Style correction * fix reboot command to work even if a host is rebooted * Filter templates and dom0 from list\_instances() * removed unused import and fix docstring * merge fakerabbit fix and turn fake back on for cloud unit tests * Reworked fakerabbit backend so each connection has it's own. Moved queues and exchanges to be globals * PEP8 cleanup * Refactored duplicate rpc.cast() calls in nova/compute/api.py. Cleaned up some formatting issues * Log all XenAPI actions * correct xenapi resume call * activate fake rabbit for debugging * change virtualization to not get network through project * update db/api.py as well * don't allocate networks when getting vpn info * Added InstanceDiagnostics and InstanceActions DB models * PEP8 cleanup * Merged trunk * merge trunk * 1) Merged from trunk 2) 'type' parameter in VMHelper.fetch\_image converted in enum 3) Fixed pep8 errors 4) Passed unit tests * Remove ec2 config chain and move openstack versions to top-level application * Use paste.deploy for running the api server * pep8 and removed extra imports * add missing greenthread import * add a few extra joined objects to get instance * remove extra print statements * Tests pass after cleaning up allocation process * Merging trunk * Typo fix, stubbing out to use admin project for now * Close devnull filehandle * added suspend and resume * Rewrite of vif\_rules.py to meet coding standards and be more pythonic in general. Use absolute paths for iptables/ebtables/arptables in host-rules * Add raw disk image support * Add my @linux2go.dk address to .mailmap * fixed some pep8 business * directly copy ip allocation into compute * Minor spellchecking fixes * Adds support for Pause and Unpause of xenserver instances * Make column names more generic * don't add the ip to bridge on compute hosts * PEP8 fixups * Added InstanceActions DB model * initial commit of xenserver host protections * Merged trunk * Fixed pep8 errors * Integrated changes from Soren (raw-disk-images). Updated authors file. All tests passed * pep8 (again again) * pep8 (again) * small clean up * テストコードをレポジトリに追加 nova.compute.manager.pre\_live\_migration()について、異常終了しているのに正常終了の戻り値を返すことがあったため変更 - 正常終了の戻り値をTrueに変更 - fixed\_ipが見つからないときにはRemoteErrorをraiseする - それに合わせてnova.compute.manager.live\_migrationも変更 * テストコードをレポジトリに追加 nova.compute.manager.pre\_live\_migration()について、異常終了しているのに正常終了の戻り値を返すことがあったため変更 - 正常終了の戻り値をTrueに変更 - fixed\_ipが見つからないときにはRemoteErrorをraiseする - それに合わせてnova.compute.manager.live\_migrationも変更 * Support proxying api by using X-Forwarded-For * eventlet merge updates * Cleaned up TODOs, using flags now * merge trunk and minor fix(for whatever reason validator\_unittest did not get removed from run\_test.py) * fixed unittests and further clean-up post-eventlet merge * All API tests finally pass * Removing unneeded Trial specific code * A few more tweaks to get the OS API tests passing * Adding new install script plus changes to multinode install doc * Removing unneeded Trial specific code * Replaced the use of redis in fakeldap with a customized dict class. Auth unittests should now run fine without a redis server running, or without python-redis installed * Adding Ed Leafe to Authors file * Some tweaks * Adding in Ed Leafe so we can land his remove-redis test branch * Add wait\_for\_vhd\_coalesce * Some typo fixes * pep8 cleanup * Fixed some old code that was merged incorrectly * Replaced redis with a modified dict class * bug fixes * first revision after eventlet merge. Currently xenapi-unittests are broken, but everything else seems to be running okay * Integrated eventlet\_merge patch * Code reviewed * XenAPI Snapshots first cut * Fixed network test (thanks Vish!) and fixed run\_tests.sh * First pass at converting run\_tests.py to nosetests. The network and objctstore tests don't yet work. Also, we need to manually remove the sqlite file between runs * remerged for pep8 * pep8 * merged in project-vpns to get flag changes * clean up use of iptables chains * move some flags around * add conditional bind to linux net * make sure all network data is recreated when nova-network is rebooted * merged trunk * merged trunk, fixed conflicts and tests * Added Instance Diagnostics DB model * Put flags back in nova.virt.xenapi/vm\_utils * Removed unnecessary blank lines * Put flags back in vm\_utils * This branch removes most of the dependencies on twisted and moves towards the plan described by https://blueprints.launchpad.net/nova/+spec/unified-service-architecture * pep8 fixes for bin * PEP8 cleanups * use getent, update docstring * pep8 fixes * reviewed the FIXMEs, and spotted an uncaught exception in volume\_utils...yay! * fixed a couple of more syntax errors * Moved implementation specific stuff from the middleware into their respective modules * typo * fixed up openstack api images index and detail * fake session clean-up * Removed FakeInstance and introduced stubout for DB. Code clean-up * removed extra stuff used for debugging * Restore code which was changed for testing reasons to the original state. Kudos to Armando for spotting this * Make nova work even when user has LANG or LC\_ALL configured * Merged changes from trunk into the branch * Hostテーブルのカラム名を修正 FlatManager, FlatDHCPManagerに対応 * merged with trunk. fixed compute.pause test * fixup after merge with trunk * memcached requires strings not unicode * Fix 688220 Added dependency on Twisted>=10.1.0 to pip-requires * Make sure we properly close the bzr WorkingTree in our Authors up-to-datedness unit test * fixes for xenapi (thanks sandywalsh) * clean up tests and add overriden time method to utils * merged from upstream * add missing import * Adding back in openssh-lpk schema, as keys will likely be stored in LDAP again * basic conversion of xs-pause to eventlet done * brougth clean-up from unittests branch and tests * I made pep8 happy * \* code cleanup \* revised unittest approach \* added stubout and a number of tests * clean up code to use timeout instead of two keys * final cleanup * Restore alphabetical order in Authors file * removed temporary comment lines * Lots of PEP-8 work * refresh\_security\_group renamed to refresh\_security\_group\_rules * added volume tests and extended fake to support them * Make sure the new, consolidated template gets included * Make sure we unlock the bzr tree again in the authors unit test * The ppa was moved. This updates nova.sh to reflect that * merged upstream * remove some logging * Merged from trunk and fixed merge issues. Also fixed pep8 issues * Lockout middleware for ec2 api * updates per review * Initial work on i18n. This adds the installation of the nova domain in gettext to all the "endpoints", which are all the bin/\* files and run\_tests.py * For some reason, I forgot to commit the other endpoints.. * Remove default\_{kernel,ramdisk} flags. They are not used anymore * Don't attempt to fiddle with partitions for whole-disk-images * pep8 * Includes architecture on register. Additionally removes a couple lines of cruft * nothing * nothing * nothing * support for pv guests (in progress) * merge trunk * Now that we have a templating engine, let's use it. Consolidate all the libvirt templates into one, extending the unit tests to make sure I didn't mess up * first cut of unittest framework for xenapi * Added my contacts to Authors file * final cleanup, after moving unittest work into another branch * fixup after merge with trunk * added callback param to fake\_conn * added not implemented stubs for libvirt * merge with trey tests * Fixed power state update with Twisted callback * simplified version using original logic * moving xenapi unittests changes into another branch * Adds support to the ec2 api for filtering describe volumes by volume\_ids * Added LiveCD info as well as some changes to reflect consolidation of .conf files * Fix exception throwing with wrong instance type * Add myself * removing imports that should have not been there * second round for unit testing framework * Added Twisted version dependency into pip-requires * only needs work for distinguishing pv from hvm * Move security group refresh logic into ComputeAPI * Refactored smoketests to use novarc environment and to separate user and admin specific tests * Changed OpenStack API auth layer to inject a RequestContext rather than building one everywhere we need it * Elaborate a bit on ipsets comment * Final round of marking translation strings * First round of i18n-ifying strings in Nova * Initial i18n commit for endpoints. All endpoints must install gettext, which injects the \_ function into the builtins * Fixed spelling errors in index.rst * fix pep8 * Includes kernel and ramdisk on register. Additinally removes a couple lines of cruft * port new patches * merge-a-tat-tat upstream to this branch * Format fixes and modification of Vish's email address * There is always the odd change that one forgets! * \* pylint fixes \* code clean-up \* first cut for xenapi unit tests * added pause and unpause to fake connection * merged changes from sandy's branch * added unittest for pause * add back utils.default\_flagflie * removed a few more references to twisted * formatting and naming cleanup * remove service and rename service\_eventlet to service * get service unittests runnning again * whitespace fix * make nova binaries use eventlet * Converted the instance table to use a uuid instead of a auto\_increment ID and a random internal\_id. I had to use a String(32) column with hex and not a String(16) with bytes because SQLAlchemy doesn't like non-unicode strings going in for String types. We could try another type, but I didn't want a primary\_key on blob types * remove debug messages * merge with trey * pause and unpause code/tests in place. To the point it stuffs request in the queue * import module and not classe directely as per Soren recommendation * Make XenServer VM diagnostics available through nova.virt.xenapi * Merged trunk * Added exception handling to get\_rrd() * Changed OpenStack API auth layer to inject a RequestContext rather than building one everywhere we need it * changed resume to unpause * Import module instead of function * filter describe volumes by supplied ids. Includes unittest * merging sandy's branch * Make get\_diagnostics async * raw instances can now be launched in xenapi (only as hvm at the moment) * pause from compute.manager <-> xenapi * Merged Armando's XenAPI fix * merge with trunk to pull in admin-api branch * Flag to define which operations are exposed in the OpenStack API, disabling all others * Fixed Authors conflict and re-merged with trunk * fixes exception throwing with wrong instance type * Ignore security group rules that reference foreign security groups * fixed how the XenAPI library is loaded * remove some unused files * port volume manager to eventlet also * intermediate commit to checkpoint progress * some pylint caught changes to compute * added to Authors * adds bzr to the list of dependencies in pip-require so that upon checkout using run\_tests.sh succeeds * merge conflict * merged upstream changes * add bzr to the dev dependencies * Fixed docstrings * Merged trunk * Got get\_diagnostics in working order * merged updates to trunk * merge trunk * typo fix * removing extraneous config ilnes * Finished cleaning up the openstack servers API, it no longer touches the database directly. Also cleaned up similar things in ec2 API and refactored a couple methods in nova.compute.api to accommodate this work * Pushed terminate instance and network manager/topic methods into network.compute.api * Merged trunk * Moved the reboot/rescue methods into nova.compute.api * PEP8 fixes * Setting the default schema version to the new schema * Adding support for choosing a schema version, so that users can more easily migrate from an old schema to the new schema * merged with trunk. All clear! * Removing novaProject from the schema. This change may look odd at first; here's how it works: * test commit * コメントを除去 README.live\_migration.txtのレビュー結果を修正 * This change adds better support for LDAP integration with pre-existing LDAP infrastructures. A new configuration option has been added to specify the LDAP driver should only modify/add/delete attributes for user entries * More pep8 fixes to remove deprecated functions * pep8 fix * Clarifying previously commited exception message * Raising an exception if the user doesn't exist before trying to modify its attributes * Removing redundant check * Added livecd instructions plus fixed references to .conf files * pylint fixes * Initial diagnostics import -- needs testing and cleanup * Added a script to use OpenDJ as an LDAP server instead of OpenLDAP. Also modified nova.sh to add an USE\_OPENDJ option, that will be checked when USE\_LDAP is set * Reverting last change * a few more things ironed out * Make sure Authors check also works for pending merges (otherwise stuff can get merged that will make the next merge fail this check) * It looks like Soren fixed the author file, can I hit the commit button? * merge trunk * Make sure Authors check also works for pending merges (otherwise stuff can get merged that will make the next merge fail this check) * Add a helpful error message to nova-manage in case of NoMoreNetworks * Add Ryan Lucio to Authors * Adding myself to the authors list * Add Ryan Lucio to Authors * Addresses bug 677475 by changing the DB column for internal\_id in the instances table to be unsigned * importing XenAPI module loaded late * Added docstring for get\_instances * small fixes on Exception handling * first test commit * and yet another pylint fix * fixed pylint violations that slipped out from a previous check * \* merged with lp:~armando-migliaccio/nova/xenapi-refactoring \* fixed pylint score \* complied with HACKING guidelines * addressed review comments, complied with HACKING guidelines * adding README.livemigration.txt * rev439ベースにライブマイグレーションの機能をマージ このバージョンはEBSなし、CPUフラグのチェックなし * modified a few files * Fixed conflicts with gundlach's fixes * Remove dead test code * Add iptables based security groups implementation * Merged gundlach's fixes * Don't wrap HTTPAccepted in a fault. Correctly pass kwargs to update\_instance * fixed import module in \_\_init\_\_.py * minor changes to docstrings * added interim solution for target discovery. Now info can either be passed via flags or discovered via iscsiadm. Long term solution is to add a few more fields to the db in the iscsi\_target table with the necessary info and modify the iscsi driver to set them * merge with lp:~armando-migliaccio/nova/xenapi-refactoring * merge trunk * moved XenAPI namespace definition into xenapi/\_\_init\_\_.py * pylint and pep8 fixes * Decreased the maximum value for instance-id generation from uint32 to int32 to avoid truncation when being entered into the instance table. Reverted fix to make internal\_id column a uint * Finished cleaning up the openstack servers API, it no longer touches the database directly. Also cleaned up similar things in ec2 API and refactored a couple methods in nova.compute.api to accomodate this work * Merged reboot-rescue into network-manager * Merged trunk * Fixes a missing step (nova-manage network create IP/nn n nn) in the single-node install guide * Tired of seeing various test files in bzr stat * Updated sqlalchemy model to make the internal\_id column of the instances table as unsigned integer * \* Removes unused schema \* Removes MUST uid from novaUser \* Changes isAdmin to isNovaAdmin \* Adds two new configuration options: \*\* ldap\_user\_id\_attribute, with a default of uid \*\* ldap\_user\_name\_attribute, with a default of cn \* ldapdriver.py has been modified to use these changes * Pushed terminate instance and network manager/topic methods into network.compute.api * Fix bugs that prevented OpenStack API from supporting server rename * pep8 * Use newfangled compute\_api * Update tests to use proper id * Fixing single node install doc * Oops, update 'display\_name', not 'name'. And un-extract-method * Correctly translate instance ids to internal\_ids in some spots we neglected * Added test files to be ignored * Consolidated the start instance logic in the two API classes into a single method. This also cleans up a number of small discrepencies between the two * Moved reboot/rescue methods into nova.compute.api * Merged trunk and resolved conflicts. Again * Instances are assigned a display\_name if one is not passed in -- and now, they're assigned a display\_name even if None is explicitly passed in (as the EC2 API does.) * Merged trunk and resolved conflicts * Default Instance.display\_name to a value even when None is explicitly passed in * Refactor nwfilter code somewhat. For iptables based firewalls, I still want to leave it to nwfilter to protect against arp, mac, and ip spoofing, so it needed a bit of a split * Add a helpful error message to nova-manage in case of NoMoreNetworks * minor refactoring after merge * merge lp:~armando-migliaccio/nova/refactoring * merge trunk * typo fix * moved flags into xenapi/novadeps.py * Add a simple abstraction for firewalls * fix nova.sh to reflect new location of ppa * Changed null\_kernel flag from aki-00000000 to nokernel * Guarantee that the OpenStack API's Server-related responses will always contain a "name" value. And get rid of a redundant field in models.py * Going for a record commits per line changes ratio * Oops, internal\_id isn't available until after a save. This code saves twice; if I moved it into the DB layer we could do it in one save. However, we're moving to one sqlite db per compute worker, so I'd rather have two saves in order to keep the logic in the right layer * Todd points out that the API doesn't require a display\_name, so let's make a default. That way the OpenStack API can rest assured that its server responses will always have a name key * Adds in more documentation contributions from Citrix * Remove duplicate field and make OpenStack API return server.name for EC2-API-created instances * Move cc\_host and cc\_port flags into nova/network/linux\_net.py. They weren't used anywhere else * Add include\_package\_data=True to setup.py * With utils.default\_flagfile() in its old location, the flagfile isn't being read -- twistd.serve() loads flags earlier than that point. Move the utils.default\_flagfile() call earlier so the flagfile is included * Removed a blank line * Broke parts of compute manager out into compute.api to separate what gets run on the API side vs the worker side * Move default\_flagfile() call to where it will be parsed in time to load the flagfile * minor refactoring * Move cc\_host and cc\_port flags into nova/network/linux\_net.py. They weren't used anywhere else * Added a script to use OpenDJ as an LDAP server instead of OpenLDAP. Also modified nova.sh to add an USE\_OPENDJ option, that will be checked when USE\_LDAP is set * Fixed termie's tiny bits from the prior merge request * Delete unused flag in nova.sh * Moving the openldap schema out of nova.sh into it's own files, and adding sun (opends/opendj/sun directory server/fedora ds) schema files * OpenStack API returns the wrong x-server-management-url. Fix that * Cleaned up pep8 errors * brought latest changes from trunk * iscsi volumes attach/detach complete. There is only one minor issue on how to discover targets from device\_path * Fix unit tests * Fix DescribeImages EC2 API call * merged Justin Santa Barbara's raw-disk-image back into the latest trunk * If only I weren't so lazy * Rename imageSet variable to images * remove FAKE\_subdomain reference * Return the correct server\_management\_url * Default flagfile moved in trunk recently. This updates nova.sh to run properly with the new flagfile location * Correctly handle imageId list passed to DescribeImages API call * update of nova.sh because default flagfile moved * merged trunk * Add a templating mechanism in the flag parsing * Adjust state\_path default setting so that api unit tests find things where they used to find them * Import string instead of importing Template from string. This is how we do things * brought the xenapi refactoring in plus trunk changes * changes * pep8 fixes and further round of refactoring * Rename cloudServersFault to computeFault -- I missed this Rackspace branding when we renamed nova.api.rackspace to nova.api.openstack * Make sure templated flags work across calls to ParseNewFlags * Add include\_package\_data=True to setup.py * fixed deps * first cut of the refactoring of the XenAPIConnection class. Currently the class merged both the code for managing the XenAPI connection and the business logic for implementing Nova operations. If left like this, it would eventually become difficult to read, maintain and extend. The file was getting kind of big and cluttered, so a quick refactoring now will save a lot of headaches later * other round of refactoring * further refactoring * typos and pep8 fixes * first cut of the refactoring of the XenAPIConnection class. Currently the class merged both the code for managing the XenAPI connection and the business logic for implementing Nova operations. If left like this, it would eventually become difficult to read, maintain and extend. The file was getting kind of big and cluttered, so a quick refactoring now will save a lot of headaches later * PEP fixes * Adding support for modification only of user accounts * This modification should have occured in a different branch. Reverting * added attach\_volume implementation * work on attach\_volume, with a few things to iron out * A few more changes: \* Fixed up some flags \* Put in an updated nova.sh \* Broke out metadata forwarding so it will work in flatdhcp mode \* Added descriptive docstrings explaining the networking modes in more detail * small conflict resolution * first cut of changes for the attach\_volume call * The image server should throw not found errors, don't need to check in compute manager * Consolidated the start instance logic in the two API classes into a single method. This also cleans up a number of small discrepencies between the two * Setting "name" back to "cn", since id and name should be separate * Adding support for modification only of user accounts * don't error on edge case where vpn has been launched but fails to get a network * Make sure all workers look for their flagfile in the same spot * Fix typo "nova.util" -> "nova.utils" * Fix typo "nova.util" -> "nova.utils" * Added a .mailmap that maps addresses in bzr to people's real, preferred e-mail addresses. (I made a few guesses along the way, feel free to adjust according to what is actually the preferred e-mail) * Add a placeholder in doc/build. Although bzr handles empty directories just fine, setuptools does not, so to actually ship this directory in the tarball, we need a file in it * Add a placeholder in doc/build. Although bzr handles empty directories just fine, setuptools does not, so to actually ship this directory in the tarball, we need a file in it * Merged trunk * pep8 * merged trunk, added recent nova.sh * fix typos in docstring * docstrings, more flags, breakout of metadata forwarding * doc/build was recently accidentally removed from VCS. This adds it back, which makes the docs build again * Add doc/build dir back to bzr * Make aws\_access\_key\_id and aws\_secret\_access\_key configurable * add vpn ping and optimize vpn list * Add an alias for Armando * the serial returned by x509 is already formatted in hex * Adding developer documentation - setting up dev environment and how to add to the OpenStack API * Add a --logdir flag that will be prepended to the logfile setting. This makes it easier to share a flagfile between multiple workers while still having separate log files * Address pep8 complaints * Address PEP8 complaints * Remove FAKE\_subdomain from docs * Adding more polish * Adding developer howtos * Remove FAKE\_subdomain from docs * Make aws\_access\_key\_id and aws\_secret\_access\_key configurable * updated nova.sh * added flat\_interface for flat\_dhcp binding * changed bridge\_dev to vlan\_interface * * Add a --logdir flag that will be prepended to the logfile setting. This makes it easier to share a flagfile between multiple workers while still having separate log files * added svg files (state.svg is missing because its source is a screen snapshot) * Unify the location of the default flagfile. Not all workers called utils.default\_flagfile, and nova-manage explicitly said to use the one in /etc/nova/nova-manage.conf * Set and use AMQP retry interval and max retry FLAGS * Incorporating security groups info * Rename cloudServersFault (rackspace branding) to computeFault. Fixes bug lp680285 * Use FLAGS instead of constants * Incorporating more networking info * Make time.sleep() non-blocking * Removed unnecessary continue * Update Authors and add a couple of names to .mailmap (from people who failed to set bzr whoami properly) * Refactor AMQP retry loop * Allows user to specify hosts to listen on for nova-api and -objectstore * Make sure all the libvirt templates are included in the tarball (by replacing the explicitly listed set with a glob pattern) * fixed pep8 violations * Set and use AMQP retry interval and max retry constants * pep8 violations fix * added placeholders * added test for invalid handles * Make sure all templates are included (at least rescue tempaltes didn't used to be included) * Check for running AMQP instances * Use logging.exception instead * Reverted some changes * Added some comments * Adds images (only links one in), start for a nova-manage man file, and also documents all nova-manage commands. Can we merge it in even though the man page build isn't working? * Added some comments * Check for running AMQP instances * first cut of fixes for bug #676128 * Removed .DS\_Store files everywhere, begone! * Moves the EC2 API S3 image service into nova.service. There is still work to be done to make the APIs align, but this is the first step * PEP8 fixes, 2 lines were too long * First step to getting the image APIs consolidated. The EC2 API was using a one-off S3 image service wrapper, but this should be moved into the nova.image space and use the same interface as the others. There are still some mismatches between the various image service implementations, but this patch was getting large and wanted to keep it within a resonable size * Improved Pylint Score * Fixes improper display of api error messages that happen to be unicode * Make sure that the response body is a string and not unicode * Soren updated setup.py so that the man page builds. Will continue working on man pages for nova-compute and nova-network * Overwrite build\_sphinx, making it run once for each of the html and man builders * fixes flatdhcp, updates nova.sh, allows for empty bridge device * Update version to 2011.1 as that is the version we expect to release next * really adding images * adding images * Documenting all nova-manage commands * Documenting all nova-manage commands * Fixes eventlet race condition in cloud tests * fix greenthread race conditions in trunk and floating ip leakage * Testing man page build through conf.py * Improved Pylint Score * adjusting images size and bulleted list * merged with trunk * small edit * Further editing and added images * Update version to 2011.1 as that is the version we expect to release next * ec2\_api commands for describe\_addresses and associate\_address are broken in trunk. This happened during the switch to ec2\_id and internal\_id. We clearly didn't have any unit tests for this, so I've added a couple in addition to the three line change to actually fix the bugs * delete floating ips after tests * remove extra line and ref. to LOG that doesn't exist * fix leaking floating ip from network unittests and use of fakeldap driver * Adds nova-debug to tools directory, for debugging of instances that lose networking * fixes errors in describe address and associate address. Adds test cases * Ryan\_Lane's code to handle /etc/network not existing when we try to inject /etc/network/interfaces into an image * pep8 * First dump of content related to Nova RPC and RabbitMQ * Add docstrings to any methods I touch * pep8 * PEP8 fixes * added myself to Authors file. Enjoy spiders * Changed from fine-grained operation control to binary admin on/off setting * Changed from fine-grained operation control to binary admin on/off setting * Lots of documentation and docstring updates * The docs are just going to be wrong for now. I'll file a bug upstream * Change how wsgified doc wrapping happens to fix test * merge to trunk * pep8 * Adding contributors and names * merge with trunk * base commit * saw a duplicate import ... statement in the code while reading through unit tests - this removes the dupe * removed redundant unit test import * add in bzr link * adding a bit more networking documentation * remove tab * fix title * tweak * Fix heading * merge in anne's changes * tweak * Just a few more edits, misspellings and the like * fix spacing to enable block * merge to remote * unify env syntax * Add sample puppet scripts * fix install guide * getting started * create SPHINX\_DEBUG env var. Setting this will disable aggressive autodoc generation. Also provide some sample for P syntax * fix conf file from earlier merge * notes, and add code to enable sorted "..todo:: P[1-5] xyz" syntax * merge in more networking docs - still a work in progress * anne's changes to the networking documentation * Updated Networking doc * anne gentle's changes to community page * merge in heckj's corrections to multi-node install * Added a .mailmap that maps addresses in bzr to people's real, preferred e-mail addresses. (I made a few guesses along the way, feel free to adjust according to what is actually the preferred e-mail) * Updated community.rst to fix a link to the IRC logs * merging in changes from ~anso/nova/trunkdoc * fixed another spacing typo causing poor rendering * fixed spacing typo causing poor rendering * merge in anne's work * add docs for ubuntu 4, 10, others * Updated Cloud101 and admonition color * merge heckj's multi install notes * working on single node install * updating install notes to reference Vish' nova.sh and installing in MYSQL * Add Flat mode doc * Add Flat mode doc * Add Flat mode doc * Add VLAN Mode doc * Add VLAN Mode doc * merge in anne's changes * home page tweaks * Updated CSS and community.rst file * modifications and additions based on doc sprint * incorporate some feedback from todd and anne * merge in trunk * working on novadoc structure * add some info on authentication and keys * Since we're autodocumenting from a sphinx ext, we can scrap it in Makefile * Use the autodoc tools in the setup.py build\_sphinx toolchain * Fix include paths so setup.py build\_sphinx works again * Cleanups to doc process * quieter doc building (less warnings) * File moves from "merge" of termie's branch * back out stacked merge * Doc updates: \* quieter build (fewer warnings) \* move api reference out of root directory \* auto glob api reference into a TOC \* remove old dev entries for new-fangled auto-generated docs * Normalization of Dev reference docs * Switch to module-per-file for the module index * Allow case-by-case overriding of autodocs * add exec flags, apparently bzr shelve/unshelve does not keep track of them * Build autodocs for all our libraries * add dmz to flags and change a couple defaults * Per-project vpns, certificates, and revocation * remove finished todo * Fix docstrings for wsigfied methods * fix default twitter username * shrink tweet text a bit * Document nova.sh environment * add twitter feed to the home page * Community contact info * small tweaks before context switch * use include to grab todd's quickstart * add in custom todo, and custom css * Format TODO items for sphinx todo extension * additions to home page * Change order of secions so puppeting is last, add more initial setup tasks * update types of services that may run on machines * Change directory structure for great justice! * Refactored smoketests to use novarc environment and to separate user and admin specific tests * start adding info to multi-node admin guide * document purpose of documentation * Getting Started Guide * Nova quickstart: move vish's novascript into contrib, and convert reademe.md to a quickstart.rst * merge trunk * Add a templating mechanism in the flag parsing. Add a state\_path flag that will be used as the top-level dir for all other state (such as images, instances, buckets, networks, etc). This way you only need to change one flag to put all your state in e.g. /var/lib/nova * add missing file * Cleanup nova-manage section * have "contents" look the same as other headings * Enables the exclusive flag for DirectConsumer queues * Ensures that keys for context from the queue are passed to the context constructor as strings. This prevents hangs on older versions of python that can't handle unicode kwargs * Fix for bug #640400, enables the exclusive flag on the temporary queues * pep8 whitespace and line length fixes * make sure context keys are not unicode so they can be passed as kwargs * merged trunk * merged source * prettier theme * Added an extra argument to the objectstore listen to separate out the listening host from the connecting host * Change socket type in nova.utils.get\_my\_ip() to SOCK\_DGRAM. This way, we don't actually have to set up a connection. Also, change the destination host to an IP (chose one of Google's DNS's at random) rather than a hostname, so we avoid doing a DNS lookup * Fix for bug#613264, allowing hosts to be specified for nova-api and objectstore listeners * Fixes issue with security groups not being associated with instances * Doc cleanups * Fix flags help display * Change socket type in nova.utils.get\_my\_ip() to SOCK\_DGRAM. This way, we don't actually have to set up a connection. Also, change the destination host to an IP (chose one of Google's DNS's at random) rather than a hostname, so we avoid doing a DNS lookup * ISCSI Volume support * merged * more descriptive title for cloudpipe * update of the architecture and fix some links * Fixes after trunk merge * removed some old instructions and updated concepts * merge * Documentation on Services, Managers, and Drivers * Document final undocumented python modules * merged trunk * cloudpipe docs * Fixed --help display for non-twisted bin/\* commands * Adds support for multiple API ports, one for each API type (OS, EC2) * Fixed tests to work with new default API argument * Added support for OpenStack and EC2 APIs to run on different ports * More docs * Language change for conformity * Add ec2 api docs * Exceptions docs * API endpoint documentation * basics to get proxied ajaxterm working with virsh * :noindex: on the fakes page for virt.fakes which is included in compute.rst * Virt documentation * Change retrieval of security groups from kwargs so they are associated properly and add test to verify * don't check for vgroup in fake mode * merged trunk, just in case * Update compute/disk.py docs * Change volume TODO list * Volume documentation * Remove fakes duplication * Update database docs * Add support for google analytics to only the hudson-produced docs * Changes to conf.py * Updated location of layout.html and change conf.py to use a build variable * Update database page a bit * Fakes cleanup (stop duplicate autodoc of FakeAOEDriver) * Document Fakes * Remove "nova Packages and Dependencies" * Finished TODO item * Pep-257 * Pep-257 cleanups * Clean up todos and the like for docs * A shell script for showing modules that aren't documented in .rst files * merge trunkdoc * link binaries section to concepts * :func: links to python functions in the documentation * Todo cleanups in docs * cleanup todos * fix title levels * wip architecture, a few auth formatting fixes, binaries, and overview * volume cleanups * Remove objectstore, not referenced anywhere * Clean up volumes / storage info * Moves db writes into compute manager class. Cleans up sqlalchemy model/api to remove redundant calls for updating what is really a dict * Another heading was too distracting, use instead * Fix underlining -> heading in rst file * Whitespace and docstring cleanups * Remove outdated endpoint documentation * Clean up indentation error by preformatting * Add missing rst file * clean up the compute documentation a bit * Remove unused updated\_data variable * Fix wiki link * added nova-manage docs * merged and fixed conflicts * updates to auth, concepts, and network, fix of docstring * cleanup rrd doc generation * Doc skeleton from collaborative etherpad hack session * OK, let's try this one more time * Doc updates * updates from review, fix models.get and note about exception raising * Style cleanups and review from Eric * New structure for documentation * Fixes PEP8 violations from the last few merges * More PEP8 fixes that were introduced in the last couple commits * Adding Google Analytics code to nova.openstack.org * Fixes service unit tests after tornado excision * Added Google Analytics code * renamed target\_id to iscsi\_target * merged gundlach's excision * Oops, didn't mean to check this one in. Ninja-patch * Delete BaseTestCase and with it the last reference to tornado * fix completely broken ServiceTestCase * Removes some cruft from sqlalchemy/models.py like unused imports and the unused str\_id method * Adds rescue and unrescue commands * actually remove the conditional * fix tests by removing missed reference to prefix and unnecessary conditional in generate\_uid * Making net injection create /etc/network if non-existant * Documentation was missing; added * Moving the openldap schema out of nova.sh into it's own files, and adding sun (opends/opendj/sun directory server/fedora ds) schema files * validates device parameter for attach-volume * add nova-debug to setup.py * nova-debug, relaunch an instance with a serial console * Remove the last vestigial bits of tornado code still in use * pep8 cleanup * print the exception on fail, because it doesn't seem to reraise it * use libvirt connection for attaching disks and avoid the symlink * update error message * Exceptions in the OpenStack API will be converted to Faults as they should be, rather than barfing a stack trace to the user * pep8 * pep8 * Duplicate the two trivial escaping functions remaining from tornado's code and remove the dependency * more bugfixes, flag for local volumes * fix bugs, describe volumes, detach on terminate * ISCSI Volume support * Removed unused imports and left over references to str\_id * logging.warn not raise logging.Warn * whitespace * move create\_console to cloud.py from admin.py * merge lp:nova * add NotFound to fake.py and document it * add in the xen rescue template * pep 8 cleanup and typo in resize * add methods to cloud for rescue and unrescue * update tests * merged trunk and fixed conflicts/changes * part way through porting the codebase off of twisted * Another pep8 cleanup branch for nova/tests, should be merged after lp:~eday/nova/pep8-fixes-other. After this, the pep8 violation count is 0! * Changes block size for dd to a reasonable number * Another pep8 cleanup branch for nova/api, should be merged after lp:~eday/nova/pep8-fixes 2010.1 ------ * Created Authors file * Actually adding Authors file * Created Authors file and added to manifest for Austin Release * speed up disk generation by increasing block size * PEP8 cleanup in nova/tests, except for tests. There should be no functional changes here, just style changes to get violations down * PEP8 cleanup in nova/\*, except for tests. There should be no functional changes here, just style changes to get violations down * PEP8 cleanup in nova/db. There should be no functional changes here, just style changes to get violations down * PEP8 cleanup in nova/api. There should be no functional changes here, just style changes to get violations down * PEP8 and pylint cleanup. There should be no functional changes here, just style changes to get violations down * Moves db writes into compute manager class. Cleans up sqlalchemy model/api to remove redundant calls for updating what is really a dict * validate device in AttachDisk * Cleanup of doc for dependencies (redis optional, remove tornado, etc). Please check for accuracy * Delays the creation of the looping calls that that check the queue until startService is called * Made updates based on review comments * Authorize image access instead of just blindly giving it away * Checks the pid of dnsmasq to make sure it is actually referring to the right process * change boto version from 1.9b1 to 1.9b in pip-requires * Check the pid to make sure it refers to the correct dnsmasq process * make sure looping calls are created after service starts and add some tests to verify service delegation works * fix typo in boto line of pip-requires * Updated documentation * Update version set in setup.py to 2010.1 in preparation for Austin release * Also update version in docs * Update version to 2010.1 in preparation for Austin release * \* Fills out the Parallax/Glance API calls for update/create/delete and adds unit tests for them. \* Modifies the ImageController and GlanceImageService/LocalImageService calls to use index and detail routes to comply perfectly with the RS/OpenStack API * Makes disk.partition resize root drive to 10G, unless it is m1.tiny which just leaves it as is. Larger images are just used as is * reverted python-boto version in pip-requires to 1.9b1 * Construct exception instead of raising a class * Authorize Image before download * Add unit test for XML requests converting errors to Faults * Fixes https://bugs.launchpad.net/nova/+bug/663551 by catching exceptions at the top level of the API and turning them into Faults * Adds reasonable default local storage gb to instance sizes * reverted python-boto version in pip-requires to 1.9b1.\ * Fix typo in test case * Remember to call limited() on detail() in image controller * Makes nova-dhcpbridge notify nova-network on old network lease updates * add reasonable gb to instance types * it is flags.DEFINE\_integer, not FLAGS.define\_int * Makes disk.partition resize root drive to 10G, unless it is m1.tiny which just leaves it as is. Larger images are just used as is * update leases on old leases as well * Adds a simple nova-manage command called scrub to deallocate the network and remove security groups for a projeect * Refresh MANIFEST.in to make the tarball include all the stuff that belongs in the tarball * Added test case to reproduce bug #660668 and provided a fix by using the user\_id from the auth layer instead of the username header * Add the last few things to MANIFEST.in * Also add Xen template to manifest * Fix two problems with get\_console\_log: \* libvirt has this annoying "feature" where it chown()s your console to the uid running libvirt. That gets in the way of reading it. Add a call to "sudo chown ...." right before we read it to make sure it works out well. \* We were looking in the wrong directory for console.log. \*blush\* * This branch converts incoming data to the api into the proper type * Fixes deprecated use of context in nova-manage network create * Add a bunch of stuff to MANIFEST.in that has been added to the tree over the last couple of months * Fix the --help flag for printing help on twistd-based services * Fix two problems with get\_console\_log: libvirt has this annoying "feature" where it chown()s your console to the uid running libvirt. That gets in the way of reading it. We were looking in the wrong directory for console.log. \*blush\* * Fix for bug 660818 by adding the resource ID argument * Reorg the image services code to push glance stuff into its own directory * Fix some unit tests: \* One is a race due to the polling nature of rpc in eventlet based unit tests. \* The other is a more real problem. It was caused by datastore.py being removed. It wasn't caught earlier because the .pyc file was still around on the tarmac box * Add a greenthread.sleep(0.3) in get\_console\_output unit test. This is needed because, for eventlet based unit tests, rpc polls, and there's a bit of a race. We need to fix this properly later on * Perform a redisectomy on bin/nova-dhcpbridge * Removed 'and True' oddity * use context for create\_networks * Make Redis completely optional: * make --help work for twistd-based services * trivial style change * prevent leakage of FLAGS changes across tests * run\_tests.sh presents a prompt: * Also accept 'y' * A few more fixes for deprecations * make run\_tests.sh's default perform as expected * Added test case to reproduce bug #660668 and provided a fix by using the user\_id from the auth layer instead of the username header * get flags for nova-manage and fix a couple more deprecations * Fix for bug#660818, allows tests to pass since delete expects a resource ID * This branch modifies the fixes all of the deprecation warnings about empty context. It does this by adding the following fixes/features \* promotes api/context.py to context.py because it is used by the whole system \* adds more information to the context object \* passes the context through rpc \* adds a helper method for promoting to admin context (elevate()) \* modifies most checks to use context.project\_id instead of context.project.id to avoid trips to the database * timestamps are passed as unicode * Removed stray spaces that were causing an unnecessary diff line * merged trunk * Minimized diff, fixed formatting * remove nonexistent exception * Merged with trunk, fixed broken stuff * revert to generic exceptions * fix indent * Fixes LP Bug#660095 * Move Redis code into fakeldap, since it's the only thing that still uses it. Adjust auth unittests to skip fakeldap tests if Redis isn't around. Adjust auth unittests to actually run the fakeldap tests if Redis /is/ around * fix nosetests * Fixes a few concurrency issues with creating volumes and instances. Most importantly it adds retries to a number of the volume shell commands and it adds a unique constraint on export\_devices and a safe create so that there aren't multiple copies of export devices in the database * unit tests and fix * call stuff project\_id instead of project * review fixes * fix context in bin files * add scrub command to clean up networks and sec groups * merged trunk * merged concurrency * review comments * Added a unit test but not integrated it * merged trunk * fix remaining tests * cleaned up most of the issues * remove accidental paste * use context.project\_id because it is more efficient * elevate in proper places, fix a couple of typos * merged trunk * Fixes bug 660115 * Address cerberus's comment * Fix several problems keeping AuthMiddleware from functioning in the OpenStack API * Implement the REST calls for create/update/delete in Glance * Adds unit test for WSGI image controller for OpenStack API using Glance Service * Fixes LP Bug#660095 * Xen support * Adds flat networking + dhcpserver mode * This patch removes the ugly network\_index that is used by VlanManager and turns network itself into a pool. It adds support for creating the networks through an api command: nova-manage network create # creates all of the networks defined by flags or nova-manage network create 5 # create the first five networks * Newlines again, reorder imports * Remove extraneous newlines * Fix typo, fix import * merged upstream * cleanup leftover addresses * super teardown * fix tests * merged trunk * merged trunk * merged trunk * merged trunk * Revert the conversion to 64-bit ints stored in a PickleType column, because PickleType is incompatible with having a unique constraint * Revert 64 bit storage and use 32 bit again. I didn't notice that we verify that randomly created uids don't already exist in the DB, so the chance of collision isn't really an issue until we get to tens of thousands of machines. Even then we should only expect a few retries before finding a free ID * Add design doc, docstrings, document hyper-v wmi, python wmi usage. Adhere to pep-8 more closely * This patch adds support for EC2 security groups using libvirt's nwfilter mechanism, which in turn uses iptables and ebtables on the individual compute nodes. This has a number of benefits: \* Inter-VM network traffic can take the fastest route through the network without our having to worry about getting it through a central firewall. \* Not relying on a central firewall also removes a potential SPOF. \* The filtering load is distributed, offering great scalability * Change internal\_id from a 32 bit int to a 64 bit int * 32 bit internal\_ids become 64 bit. Since there is no 64 bit native type in SqlAlchemy, we use PickleType which uses the Binary SqlAlchemy type under the hood * Make Instance.name a string again instead of an integer * Now that the ec2 id is not the same as the name of the instance, don't compare internal\_id [nee ec2\_id] to instance names provided by the virtualization driver. Compare names directly instead * Fix bug 659330 * Catch exception.NotFound when getting project VPN data * Improve the virt unit tests * Remove spurious project\_id addition to KeyPair model * APIRequestContext.admin is no more. * Rename ec2\_id\_list back to instance\_id to conform to EC2 argument spec * Fix bug 657001 (rename all Rackspace references to OpenStack references) * Extracts the kernel and ramdisk id from manifests and puts in into images' metadata * Fix EC2 GetConsoleOutput method and add unit tests for it * Rename rsapi to osapi, and make the default subdomain for OpenStack API calls be 'api' instead of 'rs' * Fix bug 658444 * Adds --force option to run\_tests.sh to clear virtualenv. Useful when dependencies change * If machine manifest includes a kernel and/or ramdisk id, include it in the image's metadata * Rename ec2 get\_console\_output's instance ID argument to 'instance\_id'. It's passed as a kwarg, based on key in the http query, so it must be named this way * if using local copy (use\_s3=false) we need to know where to find the image * curl not available on Windows for s3 download. also os-agnostic local copy * Register the Hyper-V module into the list of virt modules * hyper-v driver created * Twisted pidfile and other flag parameters simply do not function on Windows * Renames every instance of "rackspace" in the API and test code base. Also includes a minor patch for the API Servers controller to use images correctly in the absence of Glance * That's what I get for not using a good vimrc * Mass renaming * Start stripping out the translators * Remove redis dependency from RS Images API * Remove redis dependency from Images controller * Since FLAGS.images\_path was not set for nova-compute, I could not launch instances due to an exception at \_fetch\_local\_image() trying to access to it. I think that this is the reason of Bug655217 * Imported images\_path from nova.objectstore for nova-compute. Without its setting, it fails to launch instances by exception at \_fetch\_local\_image * Defined images\_path for nova-compute. Without its setting, it fails to launch instances by exception at \_fetch\_local\_image * Cleans up a broken servers unit test * Huge sweeping changes * Adds stubs and tests for GlanceImageService and LocalImageService. Adds basic plumbing for ParallaxClient and TellerClient and hooks that into the GlanceImageService * Typo * Missed an ec2\_id conversion to internal\_id * Cleanup around the rackspace API for the ec2 to internal\_id transition * merge prop fixes * A little more clean up * Replace model.Instance.ec2\_id with an integer internal\_id so that both APIs can represent the ID to external users * Fix clause comparing id to internal\_id * Adds unit test for calling show() on a non-existing image. Changes return from real Parallax service per sirp's recommendation for actual returned dict() values * Remove debugging code, and move import to the top * Make (some) cloud unit tests run without a full-blown set up * Stub out ec2.images.list() for unit tests * Make rpc calls work in unit tests by adding extra declare\_consumer and consume methods on the FakeRabbit backend * Add a connect\_to\_eventlet method * Un-twistedify get\_console\_ouptut * Create and destroy user appropriately. Remove security group related tests (since they haven't been merged yet) * Run the virt tests by default * Keep handles to loggers open after daemonizing * merged trunk and fixed tests * Cleans up the unit tests that are meant to be run with nosetests * Update Parallax default port number to match Glance * One last bad line * merge from gundlach ec2 conversion * Adds ParallaxClient and TellerClient plumbing for GlanceImageService. Adds stubs FakeParallaxClient and unit tests for LocalImageService and GlanceImageService * Fix broken unit tests * Matches changes in the database / model layer with corresponding fixes to nova.virt.xenapi * Replace the embarrasingly crude string based tests for to\_xml with some more sensible ElementTree based stuff * A shiny, new Auth driver backed by SQLAlchemy. Read it and weep. I did * Move manager\_class instantiation and db.service\_\* calls out of nova.service.Service.\_\_init\_\_ into a new nova.service.Service.startService method which gets called by twisted. This delays opening db connections (and thus sqlite file creation) until after privileges have been shed by twisted * Add pylint thingamajig for startService (name defined by Twisted) * Revert r312 * Add a context of None to the call to db.instance\_get\_all * Honour the --verbose flag by setting the logging level to DEBUG * Accidentally renamed volume related stuff * More clean up and conflict resolution * Move manager\_class instantiation and db.service\_\* calls out of nova.service.Service.\_\_init\_\_ into a new nova.service.Service.startService method which gets called by twisted. This delays opening db connections (and thus sqlite file creation) until after privileges have been shed by twisted * Bug #653560: AttributeError in VlanManager.periodic\_tasks * Bug #653534: NameError on session\_get in sqlalchemy.api.service\_update * Fixes to address the following issues: * s/APIRequestContext/get\_admin\_context/ <-- sudo for request contexts * Bug #654034: nova-manage doesn't honour --verbose flag * Bug #654025: nova-manage project zip and nova-manage vpn list broken by change in DB semantics when networks are missing * Bug #654023: nova-manage vpn commands broken, resulting in erroneous "Wrong number of arguments supplied" message * fix typo in setup\_compute\_network * pack and unpack context * add missing to\_dict * Bug #653651: XenAPI support completely broken by orm-refactor merge * Bug #653560: AttributeError in VlanManager.periodic\_tasks * Bug #653534: NameError on session\_get in sqlalchemy.api.service\_update * Adjust db api usage according to recent refactoring * Make \_dhcp\_file ensure the existence of the directory containing the files it returns * Keep handles to loggers open after daemonizing * Adds BaseImageService and flag to control image service loading. Adds unit test for local image service * Cleans up the unit tests that are meant to be run with nosetests * Refactor sqlalchemy api to perform contextual authorization * automatically convert strings passed into the api into their respective original values * Fix the deprecation warnings for passing no context * Address a few comments from Todd * Merged trunk * Locked down fixed ips and improved network tests * merged remove-network-index * Fixed flat network manager with network index gone * merged trunk * show project ids for groups instead of user ids * create a new manager for flat networking including dhcp * First attempt at a uuid generator -- but we've lost a 'topic' input so i don't know what that did * Find other places in the code that used ec2\_id or get\_instance\_by\_ec2\_id and use internal\_id as appropriate * Convert EC2 cloud.py from assuming that EC2 IDs are stored directly in the database, to assuming that EC2 IDs should be converted to internal IDs * Method cleanup and fixing the servers tests * merged trunk, removed extra quotas * Adds support for periodic\_tasks on manager that are regularly called by the service and recovers fixed\_ips that didn't get disassociated properly * Replace database instance 'ec2\_id' with 'internal\_id' throughout the nova.db package. internal\_id is now an integer -- we need to figure out how to make this a bigint or something * merged trunk * refactoring * refactoring * Includes changes for creating instances via the Rackspace API. Utilizes much of the existing EC2 functionality to power the Rackspace side of things, at least for now * Get rid of mention of mongo, since we are using openstack/swift * Mongo bad, swift good * Add a DB backend for auth manager * Bug #652103: NameError in exception handler in sqlalchemy API layer * Bug #652103: NameError in exception handler in sqlalchemy API layer * Bug #651887: xenapi list\_instances completely broken * Grabbed the wrong copyright info * Cleaned up db/api.py * Refactored APIRequestContext * Bug #651887: xenapi list\_instances completely broken * Simplified authorization with decorators" " * Removed deprecated bits from NovaBase * Wired up context auth for keypairs * Completed quota context auth * Finished context auth for network * Finished instance context auth * Finished instance context auth * Made network tests pass again * Whoops, forgot the exception handling bit * Missed a few attributes while mirroring the ec2 instance spin up * pylint and pep8 cleanup * Forgot the context module * Some minor cleanup * Servers stuff * merge rsapi\_reboot from gundlach * Wired up context auth for services * Server creation up to, but not including, network configuration * Progress on volumes Fixed foreign keys to respect deleted flag * Support reboot in api.rackspace by extracting reboot function from api.ec2 into api.cloud * Make Fault raiseable, and add a test to verify that * Make Fault raiseable by inheriting from webob.exc.HTTPException * Related: https://code.launchpad.net/~anso/nova/authupdate/+merge/36925 * Remove debuggish print statement * Make update work correctly * Server update name and password * Support the pagination interface in RS API -- the &offset and &limit parameters are now recognized * Update from trunk to handle one-line merge conflict * Support fault notation in error messages in the RS API * Limit entity lists by &offset and &limit * After update from trunk, a few more exceptions that need to be converted to Faults * fix ordering of rules to actually allow out and drop in * fix the primary and secondary join * autocreate the models and use security\_groups * Began wiring up context authorization * Apply patch from Vish to fix a hardcoded id in the unit tests * removed a few extra items * merged with soren's branch * fix loading to ignore deleted items * Add user-editable name & notes/description to volumes, instances, and images * merged trunk * patch for test * fix join and misnamed method * fix eagerload to be joins that filter by deleted == False * \* Create an AuthManager#update\_user method to change keys and admin status. \* Refactor the auth\_unittest to not care about test order \* Expose the update\_user method via nova-manage * Updates the fix-iptables branch with a number of bugfixes * Fixes reversed arguments in nova-manage project environment * Makes sure that multiple copies of nova-network don't create multiple copies of the same NetworkIndex * Fix a few errors in api calls related to mistyped database methods for floating\_ips: specifically describe addresses and and associate address * Merged Termie's branch that starts tornado removal and fixed rpc test cases for twisted. Nothing is testing the Eventlet version of rpc.call though yet * Adds bpython support to nova-manage shell, because it is super sexy * Adds a disabled flag to service model and check for it when scheduling instances and volumes * Adds bpython support to nova-manage shell, because it is super sexy * Added random ec2 style id's for volumes and instances * fix security group revoke * Fixed tests * Removed str\_id from FixedIp references * missed a comma * improved commenting * Fault support * fix flag defaults * typo s/boo/bool * merged and removed duplicated methods * fixed merge conflicts * removed extra code that slipped in from a test branch * Fixed name property on instance model * Implementation of the Rackspace servers API controller * Added checks for uniqueness for ec2 id * fix test for editable image * Add authorization info for cloud endpoints * Remove TODO, since apparently newer boto doesn't die on extra fields * add disabled column to services and check for it in scheduler * Hook the AuthManger#modify\_user method into nova-manage commands * Refactored adminclient to support multiple regions * merged network-lease-fix * merged floating-ips * move default group creation to api * Implemented random instance and volume strings for ec2 api * Adds --force option to run\_tests.sh to clear virtualenv. Useful when dependencies change * merge from trunk * Instance & Image renaming fixes * merge from gundlach * Testing testing testing * get rid of network indexes and make networks into a pool * Add Serializer.deserialize(xml\_or\_json\_string) * merged trunk * return a value if possible from export\_device\_create\_safe * merged floating-ip-by-project * merged network-lease-fix * merged trunk * Stop trying to install nova-api-new (it's gone). Install nova-scheduler * Call out to 'sudo kill' instead of using os.kill. dnsmasq runs as root or nobody, nova may or may not be running as root, so os.kill won't work * Make sure we also start dnsmasq on startup if we're managing networks * Improve unit tests for network filtering. It now tracks recursive filter dependencies, so even if we change the filter layering, it still correctly checks for the presence of the arp, mac, and ip spoofing filters * Make sure arguments to string format are in the correct order * Make the incoming blocking rules take precedence over the output accept rules * db api call to get instances by user and user checking in each of the server actions * More cleanup, backup\_schedules controller, server details and the beginnings of the servers action route * This is getting ridiculous * Power state mapping * Set priority of security group rules to 300 to make sure they override the defaults * Recreate ensure\_security\_group\_filter. Needed for refresh * Clean up nwfilter code. Move our filters into the ipv4 chain * If neither a security group nor a cidr has been passed, assume cidr=0.0.0.0/0 * More re-work around the ORM changes and testing * Support content type detection in serializer * If an instance never got scheduled for whatever reason, its host will turn up as None. Filter those out to make sure refresh works * Only call \_on\_set\_network\_host on nova-network hosts * Allow DHCP requests through, pass the IP of the gateway as the dhcp server * Add a flag the specifies where to find nova-dhcpbridge * Ensure dnsmasq can read updates to dnsmasq conffile * Set up network at manager instantiation time to ensure we're ready to handle the networks we're already supposed to handle * Add db api methods for retrieving the networks for which a host is the designated network host * Apply IP configuration to bridge regardless of whether it existed before. The fixes a race condition on hosts running both compute and network where, if compute got there first, it would set up the bridge, but not do IP configuration (because that's meant to happen on the network host), and when network came around, it would see the interface already there and not configure it further * Removed extra logging from debugging * reorganize iptables clear and make sure use\_nova\_chains is a boolean * allow in and out for network and compute hosts * Modification of test stubbing to match new domain requirements for the router, and removal of the unnecessary rackspace base controller * Minor changes to be committed so trunk can be merged in * disable output drop for the moment because it is too restrictive * add forwarding ACCEPT for outgoing packets on compute host * fix a few missed calls to \_confirm\_rule and 80 char issues * allow mgmt ip access to api * flush the nova chains * Test the AuthManager interface explicitly, in case the user/project wrappers fail or change at some point. Those interfaces should be tested on their own * Update auth manager to have a update\_user method and better tests * add a reset command * Merged Termie's branch and fixed rpc test cases for tesited. Nothing is testing the Eventlet version of rpc.call though yet * improved the shell script for iptables * Finished making admin client work for multi-region * Install nova-scheduler * nova-api-new is no more. Don't attempt to install it * Add multi region support for adminclient * Merging in changes from rs\_auth, since I needed something modern to develop on while waiting for Hudson to right itself * whatever * Put EC2 API -> eventlet back into trunk, fixing the bits that I missed when I put it into trunk on 9/21 * Apply vish's patch * Applied vish's fixes * Implementation of Rackspace token based authentication for the Openstack API * fixed a few missing params from iptables rules * removed extra line in manage * made use of nova\_ chains a flag and fixed a few typos * put setup\_iptables in the right dir * Fixed rpc consumer to use unique return connection to prevent overlap. This could be reworked to share a connection, but it should be a wait operation and not a fast poll like it was before. We could also keep a cache of opened connections to be used between requests * fixed a couple of typos * Re-added the ramdisk line I accidentally removed * Added a primary\_key to AuthToken, fixed some unbound variables, and now all unit tests pass * Missed the model include, and fixed a broken test after the merge * Some more refactoring and another unit test * Refactored the auth branch based on review feedback * Replaced the existing Rackspace Auth Mechanism with one that mirrors the implementation in the design document * Merged gundlach's branch * renamed ipchains to iptables * merged trunk * Fixed cloudpipe lib init * merged fix-iptables * When calculating timedeltas make sure both timestamps are in UTC. For people ahead of UTC, it makes the scheduler unit tests pass. For people behind UTC, it makes their services time out after 60 seconds without a heart beat rather than X hours and 60 seconds without a heart beat (where X is the number of hours they're behind UTC) * Spot-fix endpoint reference * Wrap WSGI container in server.serve to make it properly handle command line arguments as well as daemonise properly. Moved api and wsgi imports in the main() function to delay their inclusion until after python-daemon has closed all the file descriptors. Without this, eventlet's epoll fd gets opened before daemonize is called and thus its fd gets closed leading to very, very, very confusing errors * Apply vish's patch * Added FLAGS.FAKE\_subdomain letting you manually set the subdomain for testing on localhost * Address Vishy's comments * All timestamps should be in UTC. Without this patch, the scheduler unit tests fail for anyone sufficiently East of Greenwich * Compare project\_id to '' using == (equality) rather than 'is' (identity). This is needed because '' isn't the same as u'' * Various loose ends for endpoint and tornado removal cleanup, including cloudpipe API addition, rpc.call() cleanup by removing tornado ioloop, and fixing bin/\* programs. Tornado still exists as part of some test cases and those should be reworked to not require it * Re-add root and metadata request handlers to EC2 API * Re-added the ramdisk line I accidentally removed * Soren's patch to fix part of ec2 * Add user display fields to instances & volumes * Responding to eday's feedback -- make a clearer inner wsgi app * Added a primary\_key to AuthToken, fixed some unbound variables, and now all unit tests pass * merge from trunk * typo in instance\_get * typo in instance\_get * User updatable name & description for images * merged trunk and fixed errors * cleaned up exception handling for fixed\_ip\_get * Added server index and detail differentiation * merged trunk * typo s/an/a * Reenable access\_unittest now that it works with new rbac * Rewrite rbac tests to use Authorizer middleware * Missed the model include, and fixed a broke test after the merge * Delete nova.endpoint module, which used Tornado to serve up the Amazon EC2 API. Replace it with nova.api.ec2 module, which serves up the same API via a WSGI app in Eventlet. Convert relevant unit tests from Twisted to eventlet * Remove eventlet test, now that eventlet 0.9.10 has indeed been replaced by 0.9.12 per mtaylor * In desperation, I'm raising eventlet.\_\_version\_\_ so I can see why the trunk tests are failing * merged trunk * bpython is amazing * Fix quota unittest and don't run rbac unit tests for the moment * merged trunk * Some more refactoring and another unit test * Implements quotas with overrides for instances, volumes, and floating ips * Renamed cc\_ip flag to cc\_host * Moves keypairs out of ldap and into the common datastore * Fixes server error on get metadata when instances are started without keypairs * allows api servers to have a list of regions, allowing multi-cluster support if you have a shared image store and user database * Don't use something the shell will escape as a separator. | is now = * Added modify project command to auth manager to allow changing of project manager and description * merged trunk * merged trunk * Refactored the auth branch based on review feedback * Whitespace fixes * Support querying version list, per the RS API spec. Fixes bug 613117 * Undo run\_tests.py modification in the hopes of making this merge * Add a RateLimitingMiddleware to the Rackspace API, implementing the rate limits as defined by the current Cloud Servers spec. The Middleware can do rate counting in memory, or (for deployments that have more than one API Server) can offload to a rate limiting service * Use assertRaises * A small fix to the install\_venv program to allow us to run it on the tarmac box as part of the tarmac build * Removes second copy of ProcessExecutionError that creeped in during a bad merge * Adds an omitted yield in compute manager detach\_volume * Move the code that extracts the console output into the virt drivers. Move the code that formats it up into the API layer. Add support for Xen console * Add Xen template and use it by default if libvirt\_type=xen * added rescue mode support and made reboot work from any state * Adds timing fields to instances and volumes to track launch times and schedule times * Fixes two errors in cloud.py in the nova\_orm branch: a) self.network is actually called network\_manager b) the logic for describe-instances check on is\_admin was reversed * Adds timing fields to instances and volumes to track launch times and schedule times * updated docstring * add in a few comments * s/\t/ /g, and add some comments * add in support for ajaxterm console access * add security and session timeout to ajaxterm * initial commit of ajaxterm * Replaced the existing Rackspace Auth Mechanism with one that mirrors the implementation in the design document * Whitespace fixes * Added missing masquerade rules * Fix things not quite merged perfectly -- all tests now pass * Better error message on the failure of a spawned process, and it's a ProcessExecutionException irrespective of how the process is run (twisted or not) * Added iptables host initial configuration * Added iptables host initial configuration * Proposing merge to get feedback on orm refactoring. I am very interested in feedback to all of these changes * Support querying version list * Add support for middleware proxying to a ratelimiting.WSGIApp, for deployments that use more than one API Server and thus can't store ratelimiting counters in memory * Test the WSGIApp * RateLimitingMiddleware tests * Address a couple of the TODO's: We now have half-decent input validation for AuthorizeSecurityGroupIngress and RevokeDitto * Clean up use of ORM to remove the need for scoped\_session * Roll back my slightly over-zealous clean up work * More ORM object cleanup * Clean up use of objects coming out of the ORM * RateLimitingMiddleware * Add ratelimiting package into Nova. After Austin it'll be pulled out into PyPI * When destroying a VM using the XenAPI backend, if the VM is still running (the usual case) the destroy fails. It needs to be powered-off first * Leave out the network setting from the interfaces template. It does not get passed anymore * Network model has network\_str attribute * Cast process input to a str. It must not be unicode, but stuff that comes out of the database might very well be unicode, so using such a value in a template makes the whole thing unicode * Make refresh\_security\_groups play well with inlineCallbacks * Fix up rule generation. It turns out nwfilter gets very, very wonky indeed if you mix rules and rules. Setting a TCP rule adds an early rule to ebtables that ends up overriding the rules which are last in that table * Add a bunch of TODO's to the API implementation * Multiple security group support * Remove power state constants that have ended up duplicated following a bad merge. They were moved from nova.compute.node.Instance into nova.compute.power\_state at the same time that Instance was moved into nova.compute.service. We've ended up with these constants in both places * now we can run files - thanks vish * Move vol.destroy() call out of the \_check method in test\_multiple\_volume\_race\_condition test and into a callback of the DeferredList. This should fix the intermittent failure of that test. I /think/ test\_too\_many\_volumes's failure was caused by test\_multiple\_volume\_race\_condition failure, since I have not been able to reproduce its failure after fixing this one * Adds 'shell run' to nova manage, which spawns a shell with flags properly imported * Finish pulling S3ImageService out of this mergeprop * Pull S3ImageService out of this mergeprop * Correctly pass ip\_address to templates * Fix call to listNWFilters * (Untested) Make changes to security group rules propagate to the relevant compute nodes * Filters all get defined when running an instance * added missing yield in detach\_volume * multiple network controllers will not create duplicate indexes * renamed \_get\_quota to get\_quota and moved int(size) into quota.py * add a shell to nova-manage, which respects flags (taken from django) * Move vol.destroy() call out of the \_check method in test\_multiple\_volume\_race\_condition test and into a callback of the DeferredList. This should fix the intermittent failure of that test. I /think/ test\_too\_many\_volumes's failure was caused by test\_multiple\_volume\_race\_condition failure, since I have not been able to reproduce its failure after fixing this one * removed second copy of ProcessExecutionError * move the warnings about leasing ips * simplified query * missed a space * set leased = 0 as well on disassociate update * speed up the query and make sure allocated is false * workaround for mysql select in update * Periodic callback for services and managers. Added code to automatically disassociate stale ip addresses * fixed typo * flag for retries on volume commands * auto all and start all exceptions should be ignored * generalized retry into try\_execute * more error handling in volume driver code * handle exceptions thrown by vblade stop and vblade destroy * merged trunk * deleting is set by cloud * re added missing volume update * Integrity error is in a different exc file * allow multiple volumes to run ensure\_blades without creating duplicates * fixed name for unique constraint * export devices unique * merged instance time and added better concurrency * make fixed\_ip\_get\_by\_address return the instance as well so we don't run into concurrency issues where it is disassociated in between * disassociate floating is supposed to take floating\_address * speed up generation of dhcp\_hosts and don't run into None errors if instance is deleted * don't allocate the same floating ip multiple times * don't allow deletion or attachment of volume unless it is available * fixed reference to misnamed method * manage command for project quotas * merged trunk * implement floating\_ip\_get\_all\_by\_project and renamed db methods that get more then one to get\_all\_by instead of get\_by * fixed reversed args in nova-manage project environment * merged scheduler * fix instance time * move volume to the scheduler * tests for volumes work * update query and test * merged quotas * use gigabytes and cores * use a string version of key name when constructing mpi dict because None doesn't work well in lookup * db not self.db * Security Group API layer cleanup * merged trunk * added terminated\_at to volume and moved setting of terminated\_at into cloud * remerged scheduler * merged trunk * merged trunk * merged trunk * merged trunk * fixed reversed admin logic on describe instances * fixed typo network => network\_manager in cloud.py * fixed old key reference and made keypair name constistent -> key\_pair * typo fixes, add flag to nova-dhcpbridge * fixed tests, added a flag for updating dhcp on disassociate * simplified network instance association * fix network association issue * merged trunk * improved network error case handling for fixed ips * it is called regionEndpoint, and use pipe as a separator * move keypair generation out of auth and fix tests * Fixed manager\_user reference in create\_project * Finished security group / project refactor * delete keypairs when a user is deleted * remove keypair from driver * moved keypairs to db using the same interface * multi-region flag for describe regions * make api error messages more readable * Refactored to security group api to support projects * set dnsName on describe * merged orm and put instance in scheduling state * just warn if an ip was already deallocated * fix mpi 500 on fixed ip * hostname should be string id * dhcpbridge needed host instead of node name * add a simple iterator to NovaBase to support converting into dictionary * Adjust a few things to make the unit tests happy again * First pass of nwfilter based security group implementation. It is not where it is supposed to be and it does not actually do anything yet * couple more errors in metadata * typo in metadata call * fixed messed up call in metadata * added modify project command to allow project manager and description to be updated * Change "exn" to "exc" to fit with the common style * Create and delete security groups works. Adding and revoking rules works. DescribeSecurityGroups returns the groups and rules. So, the API seems to be done. Yay * merged describe\_speed * merged scheduler * set host when item is scheduled * remove print statements * removed extra quotes around instance\_type * don't pass topic into schedule\_run\_instance * added scheduled\_at to instances and volumes * quotas working and tests passing * address test almost works * quota tests * merged orm * fix unittest * merged orm * fix rare condition where describe is called before instance has an ip * merged orm * make the db creates return refs instead of ids * add missing files for quota * kwargs don't work if you prepend an underscore * merged orm, added database methods for getting volume and ip data for projects * database support for quotas * Correct style issues brought up in termie's review * mocking out quotas * don't need to pass instance\_id to network on associate * floating\_address is the name for the cast * merged support code from orm branch * faster describe\_addresses * added floating ip commands and launched\_at terminated\_at, deleted\_at for objects * merged orm * solution that works with this version * fix describe addresses * remove extraneous get\_host calls that were requiring an extra db trip * pass volume['id'] instead of string id to delete volume * fix volume delete issue and volume hostname display * fix logging for scheduler to properly display method name * fixed logic in set\_state code to stop endless loops * Authorize and Revoke access now works * list command for floating ips * merged describe speed * merged orm * floating ip commands * removed extraneous rollback * speed up describe by loading fixed and floating ips * AuthorizeSecurityGroupIngress now works * switch to using utcnow * Alright, first hole poked all the way through. We can now create security groups and read them back * don't fail in db if context isn't a dict, since we're still using a class based context in the api * logging for backend is now info instead of error * merged orm * merged orm * set state everywhere * put soren's fancy path code in scheduler bin as well * missing deleted ref * merged orm * merged orm * consistent naming for instance\_set\_state * Tests turn things into inlineCallbacks * Missed an instance of attach\_to\_tornado * Remove tornado-related code from almost everything * It's annoying and confusing to have to set PYTHONPATH to point to your development tree before you run any of the scripts * deleted typo * merged orm * merged orm * fixed missing paren * merge orm * make timestamps for instances and volumes, includes additions to get deleted objects from db using deleted flag * merged orm * remove end of line slashes from models.py * Make the scripts in bin/ detect if they're being run from a bzr checkout or an extracted release tarball or whatever and adjust PYTHONPATH accordingly * merged orm * merged orm branch * set state moved to db layer * updated to the new orm code * changed a few unused context to \_context * a few formatting fixes and moved exception * fixed a few bugs in volume handling * merged trunk * Last of cleanup, including removing fake\_storage flage * more fixes from code review * review db code cleanup * review cleanup for compute manager * first pass at cleanup rackspace/servers.py * dhcpbridge fixes from review * more fixes to session handling * few typos in updates * don't log all sql statements * one more whitespace fix * whitespace fixes * fix for getting reference on service update * clean up of session handling * New version of eventlet handles Twisted & eventlet running at the same time * fix docstrings and formatting * Oops, APIRequestContext's signature has changed * merged orm * fix floating\_ip to follow standard create pattern * Add stubbed out handler for AuthorizeSecurityGroupIngress EC2 API call * merged orm\_deux * Merged trunk * Add a clean-traffic filterref to the libvirt templates to prevent spoofing and snooping attacks from the guests * Lots of fixes to make the nova commands work properly and make datamodel work with mysql properly * Bug #630640: Duplicated power state constants * Bug #630636: XenAPI VM destroy fails when the VM is still running * removed extra equals * Just a couple of UML-only fixes:  \* Due to an issue with libvirt, we need to chown the disk image to root.  \* Just point UML's console directly at a file, and don't bother with the pty. It was only used for debugging * removed extra file and updated sql note * merged fixed format instances from orm * fixed up format\_instances * merged server.py change from orm branch * reverting accidental search/replace change to server.py * merged orm * removed model from nova-manage * merged orm branch * removed references to compute.model * send ultimate topic in to scheduler * more scheduler tests * test for too many instances work * merged trunk * fix service unit tests * removed dangling files * merged orm branch * merged trunk and cleaned up test * renamed daemon to service and update db on create and destroy * pass all extra args from service to manager * fix test to specify host * inject host into manager * Servers API remodeling and serialization handling * Move nova.endpoint.images to api.ec2 and delete nova.endpoint * Cloud tests pass * OMG got api\_unittests to pass * send requests to the main API instead of to the EC2 subset -- so that it can parse out the '/services/' prefix. Also, oops, match on path\_info instead of path like we're supposed to * Remove unused APIRequestContext.handler * Use port that boto expects * merged orm branch * scheduler + unittests * removed underscores from used context * updated models a bit and removed service classes * Small typos, plus rework api\_unittest to use WSGI instead of Tornado * Replace an if/else with a dict lookup to a factory method * Nurrr * Abstractified generalization mechanism * Revert the changes to the qemu libvirt template and make the appropriate changes in the UML template where they belong * Create console.log ahead of time. This ensures that the user running nova-compute maintains read privileges * This improves the changelog generated as part of "setup.py sdist". If you look at it now, it says that Tarmac has done everything and every little commit is listed. With this patch, it only logs the "top-most" commit and credits the author rather than the committer * Fix simple errors to the point where we can run the tests [but not pass] * notes -- conversion 'complete' except now the unit tests won't work and surely i have bugs :) * Moved API tests into a sub-folder of the tests/ and added a stubbed-out test declarations to mirror existing API tickets * Delete rbac.py, moving @rbac decorator knowledge into api.ec2.Authorizer WSGI middleware * Break Router() into Router() and Executor(), and put Authorizer() (currently a stub) in between them * Return error Responses properly, and don't muck with req.params -- make a copy instead * merged orm branch * pylint clean of manager and service * pylint cleanup of db classes * rename node\_name to host * merged trunk * Call getInfo() instead of getVersion() on the libvirt connection object. virConnectGetVersion was not exposed properly in the python bindings until quite recently, so this makes us rather more backwards compatible * Better log formatter for Nova. It's just like gnuchangelog, but logs the author rather than the committer * Remove all Twisted defer references from cloud.py * Remove inlineCallbacks and yield from cloud.py, as eventlet doesn't need it * Move cloudcontroller and admincontroller into new api * Adjust setup.py to match nova-rsapi -> nova-api-new rename * small import cleanup * Get rid of some convoluted exception handling that we don't need in eventlet * First steps in reworking EC2 APIRequestHandler into separate Authenticate() and Router() WSGI apps * Call getInfo() instead of getVersion() on the libvirt connection object. virConnectGetVersion was not exposed properly in the python bindings until quite recently, so this makes us rather more backwards compatible * Fix up setup.py to match nova-rsapi -> nova-api-new rename * a little more cleanup in compute * pylint cleanup of tests * add missing manager classes * volume cleanup * more cleanup and pylint fixes * more pep8 * more pep8 * pep8 cleanup * add sqlalchemy to pip requires * merged trunk, fixed a couple errors * Delete \_\_init\_\_.py in prep for turning apirequesthandler into \_\_init\_\_ * Move APIRequestContext into its own file * Move APIRequest into its own file * run and terminate work * Move class into its own file * fix daemon get * Notes for converting Tornado to Eventlet * undo change to get\_my\_ip * all tests pass again * rollback on exit * merged session from devin * Added session.py * Removed get\_backup\_schedules from the image test * merged devin's sqlalchemy changes * Making tests pass * Reconnect to libvirt on broken connection * pylint fixes for /nova/virt/connection.py * pylint fixes for nova/objectstore/handler.py * ip addresses work now * Add Flavors controller supporting * Resolve conflicts and merge trunk * Detect if libvirt connection has been broken and reestablish it * instance runs * Dead code removal * remove creation of volume groups on boot * tests pass * Making tests pass * Making tests pass * Refactored orm to support atomic actions * moved network code into business layer * move None context up into cloud * split volume into service/manager/driver * moved models.py * removed the last few references to models.py * chown disk images to root for uml. Due to libvirt dropping CAP\_DAC\_OVERRIDE for uml, root needs to have explicit access to the disk images for stuff to work * Create console.log ahead of time. This ensures that the user running nova-compute maintains read privileges * fixed service mox test cases * Renamed test.py and moved a test as per merge proposal feedback * fixed volume unit tests * work endpoint/images.py into an S3ImageService. The translation isn't perfect, but it's a start * get to look like trunk * Set UML guests to use a file as their console. This halfway fixes get-console-output for them * network tests pass again * Fixes issue with the same ip being assigned to multiple instances * merged trunk and fixed tests * Support GET //detail * Moved API tests into a sub-folder of the tests/ and added a stubbed-out test declarations to mirror existing API tickets * Turn imageid translator into general translator for rackspace api ids * move network\_type flag so it is accesible in data layer * Use compute.instance\_types for flavor data instead of a FlavorService * more data layer breakouts, lots of fixes to cloud.py * merged jesse * Initial support for Rackspace API /image requests. They will eventually be backed by Glance * Fix a pep8 violation * improve the volume export - sleep & check export * missing context and move volume\_update to before the export * update volume create code * A few small changes to install\_venv to let venv builds work on the tarmac box * small tweaks * move create volume to work like instances * work towards volumes using db layer * merge vish * fix setup compute network * merge vish * merge vish * use vlan for network type since it works * merge vish * more work on getting running instances to work * merge vish * more cleanup * Flavors work * pep8 * Delete unused directory * Move imageservice to its own directory * getting run/terminate/describe to work * OK, break out ternary operator (good to know that it slowed you down to read it) * Style fixes * fix some errors with networking rules * typo in release\_ip * run instances works * Ensure that --gid and --uid options work for both twisted and non-twisted daemons * Fixes an error in setup\_compute\_network that was causing network setup to fail * add back in the needed calls for dhcpbridge * removed old imports and moved flags * merge and fixes to creates to all return id * bunch more fixes * moving network code and fixing run\_instances * jesse's run\_instances changes * fix daemons and move network code * Rework virt.xenapi's concurrency model. There were many places where we were inadvertently blocking the reactor thread. The reworking puts all calls to XenAPI on background threads, so that they won't block the reactor thread * merged trunk and fixed merge errors * Refactored network model access into data abstraction layer * Get the output formatting correct * Typo * Don't serialize in Controller subclass now that wsgi.Controller handles it for us * Move serialize() to wsgi.Controller so \_\_call\_\_ can serialize() action return values if they are dicts * Serialize properly * Support opaque id to rs int id as well * License * Moves auth.manager to the data layer * Add db abstraction and unittets for service.py * Clarified what the 'Mapped device not found' exception really means. Fixed TODO. Some formatting to be closer to 80 chars * Added missing "self." * Alphabetize the methods in the db layer * fix concurrency issue with multiple instances getting the same ip * small fixes to network * Fixed typo * Better error message on subprocess spawn fail, and it's a ProcessExecutionException irrespective of how the process is run * Check exit codes when spawning processes by default Also pass --fail to curl so that it sets exit code when download fails * PEP8/pylint cleanup in bin and nova/auth * move volume code into datalayer and cleanup * Complete the Image API against a LocalImageService until Glance's API exists (at which point we'll make a GlanceImageService and make the choice of ImageService plugin configurable.) * Added unit tests for WSGI helpers and base WSGI API * merged termies abstractions * Move deferredToThread into utils, as suggested by termie * Remove whitespace to match style guide * Data abstraction for compute service * this file isn't being used * Cleaned up pep8/pylint style issues in nova/auth. There are still a few pylint warnings in manager.py, but the patch is already fairly large * More pylintrc updates * fix report state * Removed old cloud\_topic queue setup, it is no longer used * last few test fixes * More bin/ pep8/pylint cleanup * fixing more network issues * Added '-' as possible charater in module rgx * Merged with trunk * Updated the tests to use webob, removed the 'called' thing and just use return values instead * Fix unit test bug this uncovered: don't release\_ip that we haven't got from issue\_ip * Fix to better reflect (my believed intent) as to the meaning of error\_ok (ignore stderr vs accept failure) * Merged with trunk * use with\_lockmode for concurrency issues * First in a series of patches to port the API from Tornado to WSGI. Also includes a few small style fixes in the new API code * Pull in ~eday/nova/api-port * Merged trunk * Merged api-port into api-port-1 * Since pylint=0.19 is our version, force everyone to use the disable-msg syntax * Missed one * Removed the 'controllers' directory under 'rackspace' due to full class name redundancy * pep8 typo * Changed our minds: keep pylint equal to Ubuntu Lucid version, and use disable-msg throughout * Fixed typo * Image API work * Newest pylint supports 'disable=', not 'disable-msg=' * Fix pep8 violation * tests pass * network tests pass * Added unittests for wsgi and api * almost there * progress on tests passing * remove references to deleted files so tests run * fix vpn access for auth * merged trunk * removed extra files * network datamodel code * In an effort to keep new and old API code separate, I've created a nova.api to put all new API code under. This means nova.endpoint only contains the old Tornado implementation. I also cleaned up a few pep8 and other style nits in the new API code * No longer installs a virtualenv automatically and adds new options to bypass the interactive prompt * Stylistic improvements * Add documentation to spawn, reboot, and destroy stating that those functions should return Deferreds. Update the fake implementations to do so (the libvirt ones already do, and making the xenapi ones do so is the subject of a current merge request) * start with model code * clean up linux\_net * merged refresh from sleepsonthefloor * See description of change... what's the difference between that message and this message again? * Move eventlet-using class out of endpoint/\_\_init\_\_.py into its own submodule, so that twisted-related code using endpoint.[other stuff] wouldn't run eventlet and make unit tests throw crazy errors about eventlet 0.9.10 not playing nicely with twisted * Remove duplicate definition of flag * The file that I create automates this step in http://wiki.openstack.org/InstallationNova20100729 : * Simpler installation, and, can run install\_venv from anywhere instead of just from checkout root * Use the argument handler specified by twistd, if any * Fixes quite a few style issues across the entire nova codebase bringing it much closer to the guide described in HACKING * merge from trunk * merged trunk * merged trunk and fixed conflicts * Fixes issues with allocation and deallocation of fixed and elastic addresses * Added documentation for the nova.virt connection interface, a note about the need to chmod the objectstore script, and a reference for the XenAPI module * Make individual disables for R0201 instead of file-level * All controller actions receive a 'req' parameter containing the webob Request * improve compatibility with ec2 clients * PEP8 and name corrections * rather comprehensive style fixes * fix launching and describing instances to work with sqlalchemy * Add new libvirt\_type option "uml" for user-mode-linux.. This switches the libvirt URI to uml:///system and uses a different template for the libvirt xml * typos * don't try to create and destroy lvs in fake mode * refactoring volume and some cleanup in model and compute * Add documentation to spawn, reboot, and destroy stating that those functions should return Deferreds. Update the fake implementations to do so (the libvirt ones already do, and making the xenapi ones do so is the subject of a current merge request) * Rework virt.xenapi's concurrency model. There were many places where we were inadvertently blocking the reactor thread. The reworking puts all calls to XenAPI on background threads, so that they won't block the reactor thread * add refresh on model * merge in latedt from vish * Catches and logs exceptions for rpc calls and raises a RemoteError exception on the caller side * Removes requirement of internet connectivity to run api server * Fixed path to keys directory * Update cloud\_unittest to match renamed internal function * Removes the workaround for syslog-ng of removing newlines * Fixes bug lp:616312 by reversing the order of args in nova-manage when it calls AuthManager.get\_credentials * merged trunk * Sets a hostname for instances that properly resolves and cleans up network classes * merged fix-hostname and fixed conflict * Implemented admin client / admin api for fetching user roles * Improves pep8 compliance and pylint score in network code * Bug #617776: DescribeImagesResponse contains type element, when it should be called imageType * Bug 617913: RunInstances response doesn't meet EC2 specification * remove more direct session interactions * refactor to have base helper class with shared session and engine * ComputeConnectionTestCase is almost working again * more work on trying to get compute tests passing * re-add redis clearing * make the fake-ldap system work again * got run\_tests.py to run (with many failed tests) * Bug #617776: DescribeImagesResponse contains type element, when it should be called imageType * initial commit for orm based models * Add a few unit tests for libvirt\_conn * Move interfaces template into virt/, too * Refactor LibvirtConnection a little bit for easier testing * Remove extra "uml" from os.type * Fixes out of order arguments in get\_credentials * pep8 and pylint cleanup * Support JSON and XML in Serializer * Added note regarding dependency upon XenAPI.py * Added documentation to the nova.virt interface * make rpc.call propogate exception info. Includes tests * Undo the changes to cloud.py that somehow diverged from trunk * Mergeprop cleanup * Mergeprop cleanup * Make WSGI routing support routing to WSGI apps or to controller+action * Make --libvirt\_type=uml do the right thing: Sets the correct libvirt URI and use a special template for the XML * renamed missed reference to Address * die classmethod * merged fix-dhcpbridge * remove class method * typo allocated should be relased * rename address stuff to avoid name collision and make the .all() iterator work again * keep track of leasing state so we can delete ips that didn't ever get leased * remove syslog-ng workaround * Merged with trunk * Implement the same fix as lp:~vishvananda/nova/fix-curl-project, but for virt.xenapi * Fix exception in get\_info * Move libvirt.xml template into nova/virt * Parameterise libvirt URI * Merged with trunk * fix dhcpbridge issues * Adapts the run\_tests.sh script to allow interactive or automated creation of virtualenv, or to run tests outside of a virtualenv * Prototype implementation of Servers controller * Working router that can target WSGI middleware or a standard controller+action * Added a xapi plugin that can pull images from nova-objectstore, and use that to get a disk, kernel, and ramdisk for the VM * Serializing in middleware after all... by tying to the router. maybe a good idea? * Merged with trunk * Actually pass in hostname and create a proper model for data in network code * Improved roles functionality (listing & improved test coverage) * support a hostname that can be looked up * updated virtualenv to add eventlet, which is now a requirement * Changes the run\_tests.sh and /tools/install\_venv.py scripts to be more user-friendly and not depend on PIP while not in the virtual environment * Fixed admin api for user roles * Merged list\_roles * fix spacing issue in ldapdriver * Fixes bug lp:615857 by changing the name of the zip export method in nova-manage * Wired up admin api for user roles * change get\_roles to have a flag for project\_roles or not. Don't show 'projectmanager' in list of roles * Throw exceptions for illegal roles on role add * Adds get\_roles commands to manager and driver classes * more pylint fixes * Implement VIF creation in the xenapi module * lots more pylint fixes * work on a router that works with wsgi and non-wsgi routing * Pylint clean of vpn.py * Further pylint cleanup * Oops, we need eventlet as well * pylint cleanup * pep8 cleanup * merged trunk * pylint fixes for nova/objectstore/handler.py * rename create\_zip to zipfile so lazy match works * Quick fix on location of printouts when trying to install virtualenv * Changes the run\_tests.sh and /tools/install\_venv.py scripts to be more user-friendly and not depend on PIP while not in the virtual environment. Running run\_tests.sh should not just work out of the box on all systems supporting easy\_install.. * 2 changes in doing PEP8 & Pylint cleaning: \* adding pep8 and pylint to the PIP requirements files for Tools \* light cleaning work (mostly formatting) on nova/endpoints/cloud.py * More changes to volume to fix concurrency issues. Also testing updates * Merge * Merged nova-tests-apitest into pylint * Merged nova-virt-connection into nova-tests-apitest * Pylint fixes for /nova/tests/api\_unittest.py * pylint fixes for nova/virt/connection.py * merged trunk, fixed an error with releasing ip * fix releasing to work properly * Add some useful features to our flags * pylint fixes for /nova/test.py * Fixes pylint issues in /nova/server.py * importing merges from hudson branch * fixing - removing unused imports per Eric & Jay review * initial cleanup of tests for network * Implement the same fix as lp:~vishvananda/nova/fix-curl-project, but for virt.xenapi * Run correctly even if called while in tools/ directory, as 'python install\_venv.py' * This branch builds off of Todd and Michael's API branches to rework the Rackspace API endpoint and WSGI layers * separated scheduler types into own modules * Fix up variable names instead of disabling pylint naming rule. Makes variables able to be a single letter in pylintrc * Disables warning about TODO in code comments in pylintrc * More pylint/pep8 cleanup, this time in bin/\* files * pylint fixes for nova/server.py * remove duplicated report\_state that exists in the base class more pylint fixes * Fixed docstring format per Jay's review * pylint fixes for /nova/test.py * Move the xenapi top level directory under plugins, as suggested by Jay Pipes * Pull trunk merge through lp:~ewanmellor/nova/add-contains * Pull trunk merge through lp:~ewanmellor/nova/xapi-plugin * Merged with trunk again * light cleanup - convention stuff mostly * convention and variable naming cleanup for pylint/pep8 * Used new (clearer) flag names when calling processes * Merged with trunk * Greater compliance with pep8/pylint style checks * removing what appears to be an unused try/except statement - nova.auth.manager.UserError doesn't exist in this codebase. Leftover? Something intended to be there but never added? * variable name cleanup * attempting some cleanup work * adding pep8 and pylint for regular cleanup tasks * Cleaned up pep8/pylint for bin/\* files. I did not fix rsapi since this is already cleaned up in another branch * Merged trunk * Reworked WSGI helper module and converted rackspace API endpoint to use it * Changed the network imports to use new network layout * merged with trunk * Change nova/virt/images.py's \_fetch\_local\_image to accept 4 args, since fetch() tries to call it with that many * Merged Todd and Michael's changes * pep8 and pylint cleanups * Some pylink and pep8 cleanups. Added a pylintrc file * fix copyrights for new files, etc * a few more commands were putting output on stderr. In general, exceptions on stderr output seems like a bad idea * Moved Scheduler classes into scheduler.py. Created a way to specify scheduler class that the SchedulerService uses.. * Make network its own worker! This separates the network logic from the api server, allowing us to have multiple network controllers. There a lot of stuff in networking that is ugly and should be modified with the datamodel changes. I've attempted not to mess with those things too much to keep the changeset small(ha!) * Fixed instance model associations to host (node) and added association to ip * Fixed write authorization for public images * Fixes a bug where if a user was removed from a group after he had a role, he could not be re-added * fix search/replace error * merged trunk * Start breaking out scheduler classes.. * WsgiStack class, eventletserver.serve. Trying to work toward a simple API that anyone can use to start an eventlet-based server composed of several WSGI apps * Use webob to simplify wsgi middleware * Made group membership check only search group instead of subtree. Roles in a group are removed when a user is removed from that group. Added test * Fixes bug#614090 -- nova.virt.images.\_fetch\_local\_image being called with 4 args but only has 3 * Fixed image modification authorization, API cleanup * fixed doc string * compute topic for a node is compute.node not compute:node! * almost there on random scheduler. not pushing to correct compute node topic, yet, apparently.. * First pass at making a file pass pep8 and pylint tests as an example * merged trunk * rename networkdata to vpn * remove extra line accidentally added * compute nodes should store total memory and disk space available for VMs * merged from trunk * added bin/nova-listinstances, which is mostly just a duplication of euca-describe-instances but doesn't go through the API * Fixes various concurrency issues in volume worker * Changed volumes to use a pool instead of globbing filesystem for concurrency reasons. Fixed broken tests * clean up nova-manage. If vpn data isn't set for user it skips it * method is called set\_network\_host * fixed circular reference and tests * renamed Vpn to NetworkData, moved the creation of data to inside network * fix rpc command line call, remove useless deferreds * fix error on terminate instance relating to elastic ip * Move the xenapi top level directory under plugins, as suggested by Jay Pipes * fixed tests, moved compute network config call, added notes, made inject option into a boolean * fix extra reference, method passing to network, various errors in elastic\_ips * use iteritems * reference to self.project instead of context.project + self.network\_model instead of network\_model * fixes in get public address and extra references to self.network * method should return network topic instead of network host * use deferreds in network * don't \_\_ module methods * inline commands use returnValue * it helps to save files BEFORE committing * Added note to README * Fixes the curl to pass in the project properly * Adds flag for libvirt type (hvm, qemu, etc) * Fix deprecation warning in AuthManager. \_\_new\_\_ isn't allowed to take args * created assocaition between project and host, modified commands to get host async, simplified calls to network * use get to retrieve node\_name from initial\_state * change network\_service flag to network\_type and don't take full class name * vblade commands randomly toss stuff into stderr, ignore it * delete instance doesn't fail if instances dir doesn't exist * Huge network refactor, Round I * Fixes boto imports to support both beta and older versions of boto * Get IP doesn't fail of you not connected to the intetnet * updated doc string and wrapper * add copyright headers * Fix exception in get\_info * Implement VIF creation * Define \_\_contains\_\_ on BasicModel, so that we can use "x in datamodel" * Fixed instance model associations to host (node) and added association to ip * Added a xapi plugin that can pull images from nova-objectstore, and use that to get a disk, kernel, and ramdisk for the VM. The VM actually boots! * Added project as parameter to admin client x509 zip file download * Turn the private \_image\_url(path) into a public image\_url(image). This will be used by virt.xenapi to instruct xapi as to which images to download * Merged in configurable libvirt\_uri, and fixes to raw disk images from the virtualbox branch * Fixed up some of the raw disk stuff that broke in the abstraction out of libvirt * Merged with raw disk image * Recognize 'magic' kernel value that means "don't use a kernel" - currently aki-00000000 * Fix Tests * Fixes nova volumes. The async commands yield properly. Simplified the call to create volume in cloud. Added some notes * another try on fix boto * use user.access instead of user.id * Fixes access key passing in curl statement * Accept a configurable libvirt\_uri * Added Cheetah to pip-requires * Removed duplicate toXml method * Merged with trunk * Merged with trunk, added note about suspicious behaviour * Added exit code checking to process.py (twisted process utils). A bit of class refactoring to make it work & cleaner. Also added some more instructive messages to install\_venv.py, because otherwise people that don't know what they're doing will install the wrong pip... i.e. I did :-) * Make nodaemon twistd processes log to stdout * Make nodaemon twistd processes log to stdout * use the right tag * flag for libvirt type * boto.s3 no longer imports connection, so we need to explicitly import it * Added project param to admin client zip download * boto.utils import doesn't work with new boto, import boto instead * fix imports in endpoint/images.py boto.s3 no longer imports connection, so we need to explicitly import it * Added --fail argument to curl invocations, so that HTTP request fails get surfaced as non-zero exit codes * Merged with trunk * Merged with trunk * strip out some useless imports * Add some useful features to our flags * Fixed pep8 in run\_test.py * Blank commit to get tarmac merge to pick up the tags * Fixed assertion "Someone released me too many times: too many tokens!" * Replace the second singleton unit test, lost during a merge * Merged with trunk to resolve merge conflicts * oops retry and add extra exception check * Fix deprecation warning in AuthManager. \_\_new\_\_ isn't allowed to take args * Added ChangeLog generation * Implemented admin api for rbac * Move the reading of API parameters above the call to \_get\_image, so that they have a chance to take effect * Move the reading of API parameters above the call to \_get\_image, so that they have a chance to take effect * Adds initial support for XenAPI (not yet finished) * More merges from trunk. Not everything came over the first time * Allow driver specification in AuthManager creation * pep8 * Fixed pep8 issues in setup.py - thanks redbo * Use default kernel and ramdisk properly by default * Adds optional user param to the get projects command * Ensures default redis keys are lowercase like they were in prior versions of the code * Pass in environment to dnsmasq properly * Releaed 0.9.0, now on 0.9.1 * Merged trunk * Added ChangeLog generation * Wired up get/add/remove project members * Merged lp:~vishvananda/nova/lp609749 * Removes logging when associating a model to something that isn't a model class * allow driver to be passed in to auth manager instead of depending solely on flag * make redis name default to lower case * Merged get-projects-by-user * Merged trunk * Fixed project api * Specify a filter by user for get projects * Create a model for storing session tokens * Fixed a typo from the the refactor of auth code * Makes ldap flags work again * bzr merge lp:nova/trunk * Tagged 0.9.0 and bumped the version to 0.9.1 * Silence logs when associated models aren't found. Also document methods used ofr associating things. And get rid of some duplicated code * Fix dnsmasq commands to pass in environment properly 0.9.0 ----- * Got the tree set for debian packaging * use default kernel and ramdisk and check for legal access * import ldapdriver for flags * Removed extra include * Added the gitignore files back in for the folks who are still on the git * Added a few more missing files to MANIFEST.in and added some placeholder files so that setup.py would carry the empty dir * Updated setup.py file to install stuff on a python setup.py install command * Removed gitignore files * Made run\_tests.sh executable * Put in a single MANIFEST.in file that takes care of things * Changed Makefile to shell script. The Makefile approach completely broke debhelper's ability to figure out that this was a python package * fixed typo from auth refactor * Add sdist make target to build the MANIFEST.in file * Removes debian dir from main tree. We'll add it back in in a different branch * Merged trunk * Wired up user:project auth calls * Bump version to 0.9.0 * Makes the compute and volume daemon workers use a common base class called Service. Adds a NetworkService in preparation for splitting out networking code. General cleanup and standardizarion of naming * fixed path to keys directory * Fixes Bug lp:610611: deleted project vlans are deleted from the datastore before they are reused * Add a 'sdist' make target. It first generates a MANIFEST.in based on what's in bzr, then calls python setup.py sdist * properly delete old vlans assigned to deleted projects * Remove debian/ from main branch * Bump version to 0.9.0. Change author to "OpenStack". Change author\_email to nova@lists.launchpad.net. Change url to http://www.openstack.org/. Change description to "cloud computing fabric controller" * Make "make test" detect whether to use virtualenv or not, thus making virtualenv optional * merged trunk * Makes the objectstore require authorization, checks it properly, and makes nova-compute provide it when fetching images * Automatically choose the correct type of test (virtualenv or system) * Ensure that boto's config has a "Boto" section before attempting to set a value in it * fixes buildpackage failing with dh\_install: missing files * removed old reference from nova-common.install and fixed spacing * Flag for SessionToken ttl setting * resolving conflict w/ merge, cleaning up virtenv setups * resolving conflict w/ merge, cleaning up virtenv setups * Fixes bug#610140. Thanks to Vish and Muharem for the patch * A few minor fixes to the virtualenv installer that were breaking on ubuntu * Give SessionToken an is\_expired method * Refactor of auth code * Fixes bug#610140. Thanks to Vish and Muharem for the patch * Share my updates to the Rackspace API * Fixes to the virtualenv installer * Ensure consistent use of filename for dhcp bridge flag file * renamed xxxservice to service * Began wiring up rbac admin api * fix auth\_driver flag to default to usable driver * Adds support scripts for installing deps into a virtualenv * In fact, it should delete them * Lookup should only not return expired tokens * Adds support scripts for installing deps into a virtualenv * default flag file full path * moved misnamed nova-dchp file * Make \_fetch\_s3\_image pass proper AWS Authorization headers so that image downloads work again * Make image downloads work again in S3 handler. Listing worked, but fetching the images failed because I wasn't clever enough to use twisted.web.static.File correctly * Move virtualenv installation out of the makefile * Expiry awareness for SessionToken * class based singleton for SharedPool * Basic standup of SessionToken model for shortlived auth tokens * merged trunk * merged trunk * Updated doc layout to the Sphinx two-dir layout * Replace hardcoded "nova" with FLAGS.control\_exchange * Add a simple set of tests for S3 API (using boto) * Fix references to image\_object. This caused an internal error when using euca-deregister * Set durable=False on TopicPublisher * Added missing import * Replace hardcoded example URL, username, and password with flags called xenapi\_connection\_url, xenapi\_connection\_username, xenapi\_connection\_password * Fix instance cleanup * Fix references to image\_object. This caused an internal error when using euca-deregister * removed unused assignment * More Cleanup of code * Fix references to get\_argument, fixing internal error when calling euca-deregister * Changes nova-volume to use twisted * Fixes up Bucket to throw proper NotFound and NotEmpty exceptions in constructor and delete() method, and fixes up objectstore\_unittest to properly use assertRaises() to check for proper exceptions and remove the assert\_ calls * Adds missing yield statement that was causing partitioning to intermittently fail * Merged lp:~ewanmellor/nova/lp609792 * Merged lp:~ewanmellor/nova/lp609791 * Replace hardcoded "nova" with FLAGS.control\_exchange * Set durable=False on TopicPublisher, so that it matches the flag on TopicConsumer. This ensures that either redeclaration of the control\_exchange will use the same flag, and avoid AMQPChannelException * Add an import so that nova-compute sees the images\_path flag, so that it can be used on the command line * Return a 404 when attempting to access a bucket that does not exist * Removed creation of process pools. We don't use these any more now that we're using process.simple\_execute * Fix assertion "Someone released me too many times: too many tokens!" when more than one process was running at the same time. This was caused by the override of SharedPool.\_\_new\_\_ not stopping ProcessPool.\_\_init\_\_ from being run whenever process.simple\_execute is called * Always make sure to set a Date headers, since it's needed to calculate the S3 Auth header * Updated the README file * Updated sphinx layout to a two-dir layout like swift. Updated a doc string to get rid of a Sphinx warning * Updated URLs in the README file to point to current locations * Add missing import following merge from trunk (cset 150) * Merged with trunk, since a lot of useful things have gone in there recently * fixed bug where partition code was sometimes failing due to initial dd not being yielded properly * Fixed bug 608505 - was freeing the wrong address (should have freed 'secondaddress', was freeing 'address') * renamed xxxnode to xxservice * Add (completely untested) code to include an Authorization header for the S3 request to fetch an image * Check signature for S3 requests * Fixes problem with describe-addresses returning all public ips instead of the ones for just the user's project * Fix for extra spaces in export statements in scripts relating to x509 certs * Adds a Makefile to fill dependencies for testing * Fix syslogging of exceptions by stripping newlines from the exception info * Merged fix for bug 608505 so unit tests pass * Check exit codes when spawning processes by default * Nobody wants to take on this twisted cleanup. It works for now, but could be much nicer if twisted has a nice hook-point for exception mapping * syslog changes * typo fixes and extra print statements removed * added todo for ABC * Fixed bug 608505 - was freeing the wrong address (should have freed 'secondaddress', was freeing 'address') * Merged trunk, fixed extra references to fake\_users * refactoring of imports for fakeldapdriver * make nova-network executable * refactor daemons to use common base class in preparation for network refactor * reorder import statement and remove commented-out test case that is the same as api\_unittest in objectstore\_unittest * Fixes up Bucket to throw proper NotFound and NotEmpty exceptions in constructor and delete() method, and fixes up objectstore\_unittest to properly use assertRaises() to check for proper exceptions and remove the assert\_ calls * Fix bug 607501. Raise 403, not exception if Authorization header not passed. Also added missing call to request.finish() & Python exception-handling style tweak * merge with twisted-volume * remove all of the unused saved return values from attach\_to\_twisted * fix for describe addresses showing everyone's public ips * update the logic for calculating network sizes * Locally administered mac addresses have the second least significant bit of the most significant byte set. If this byte is set then udev on ubuntu doesn't set persistent net rules * use a locally administered mac address so it isn't saved by udev * Convert processpool to a singleton, and switch node.py calls to use it. (Replaces passing a processpool object around all the time.) * Fixed the broken reference to * remove spaces from export statements in scripts relating to certs * Cleanups * Able to set up DNS, and remove udev network rules * Move self.ldap to global ldap to make changes easier if we ever implement settings * Cleanup per suggestions * network unittest clean up * Test cleanup, make driver return dictionaries and construct objects in manager * Able to boot without kernel or ramdisk. libvirt.xml.template is now a Cheetah template * Merged https://code.launchpad.net/~justin-fathomdb/nova/copy-error-handling * Merged bug fixes * Map exceptions to 404 / 403 codes, as was done before the move to twisted. However, I don't think this is the right way to do this in Twisted. For example, exceptions thrown after the render method returns will not be mapped * Merged lp:~justin-fathomdb/nova/bug607501 * Merged trunk. Fixed new references to UserManager * I put the call to request.finish() in the wrong place. :-( * More docstrings, don't autocreate projects * Raise 401, not exception if Authorization header not passed. Also minor fixes & Python exception-handling style tweak * LdapDriver cleanup: docstrings and parameter ordering * Ask curl to set exit code if resource was not found * Fixes to dhcp lease code to use a flagfile * merged trunk * Massive refactor of users.py * Hmm, serves me right for not understanding the request, eh? :) Now too\_many\_addresses test case is idempotent in regards to running in isolation and uses self.flags.network\_size instead of the magic number 32 * Redirect STDERR to output to an errlog file when running run\_tests.py * Send message ack in rpc.call and make queues durable * Fixed name change caused by remove-vendor merge * Replace tornado objectstore with twisted web * merged in trunk and fixed import merge errors * First commit of XenAPI-specific code (i.e. connections to the open-source community project Xen Cloud Platform, or the open-source commercial product Citrix XenServer) * Remove the tight coupling between nova.compute.monitor and libvirt. The libvirt-specific code was placed in nova.virt.libvirt\_conn by the last changeset. This greatly simplifies the monitor code, and puts the libvirt-specific XML record parsing in a libvirt-specific place * In preparation for XenAPI support, refactor the interface between nova.compute and the hypervisor (i.e. libvirt) * Fixed references to nova.utils that were broken by a change of import statement in the remove-vendor merge * Remove s3\_internal\_port setting. Objectstore should be able to handle the beatings now. As such, nginx is no longer needed, so it's removed from the dependencies and the configuration files are removed * Replace nova-objectstore with a twistd style wrapper. Add a get\_application method to objectstore handler * Minor post-merge fixes * Fixed \_redis\_name and \_redis\_key * Add build\_sphinx support * fix conf file to no longer have daemonize=1 because twistd daemonizes by default * make nova-volume start with twisteds daemonize stuff * Makin the queues non-durable by default * Ack messages during call so rabbit leaks less * simplify call to simple\_execute * merge extra singleton-pool changes * Added a config file to let setup.py drive building the sphinx docs * make simple method wrapper for process pool simple\_execute * change volume code to use twisted * remove calls to runthis from node * merge with singleton pool * Removed unused Pool from process.py, added a singleton pool called SharedPool, changed calls in node to use singleton pool * Fixes things that were not quite right after big merge party * Make S3 API handler more idiomatic Twisted Web-y * \_redis\_name wasn't picking up override\_type correctly, and \_redis\_key wasn't using it * Quick fix to variable names for consistency in documentation.. * Adds a fix to the idempotency of the test\_too\_many\_addresses test case by adding a simple property to the BaseNetwork class and calculating the number of available IPs by asking the network class to tell the test how many static and preallocated IP addresses are in use before entering the loop to "blow up" the address allocation.. * Adds a flag to redirect STDERR when running run\_tests.py. Defaults to a truncate-on-write logfile named run\_tests.err.log. Adds ignore rule for generated errlog file * no more print in storage unittest * reorder imports spacing * Fixes to dhcp lease code to use a flagfile * merged trunk * This branch fixes some unfortunate interaction between Nova and boto * Make sure we pass str objects instead of unicode objects to boto as our credentials * remove import of vendor since we have PPA now * Updates the test suite to work * Disabled a tmpdir cleanup * remove vendor * update copyrights * Volume\_ID identifier needed a return in the property. Also looking for race conditions in the destructor * bin to import images from canonical image store * add logging import to datastore * fix merge errors * change default vpn ports and remove complex vpn ip iteration * fix reference to BasicModel and imports * Cleanups related to BasicModel (whitespace, names, etc) * Updating buildbot address * Fixed buildbot * work on importing images * When destroying an Instance, disassociate with Node * Smiteme * Smiteme * Smiteme * Smiteme * Move BasicModel into datastore * Smiteme * Smiteme * Whitespace change * unhardcode the binary name * Fooish * Finish singletonizing UserManager usage * Debian package additions for simple network template * Foo * Whitespace fix * Remove debug statement * Foo * fix a typo * Added build-deps to debian/control that are needed to run test suite. Fixed an error in a test case * optimization to not load all instances when describe instances is called * More buildbot testing * More buildbot testing * More buildbot testing * More buildbot testing * More buildbot testing * More buildbot testing * Addin buildbot * Fix merge changelog and merge errors in utils.py * Fixes from code review * release 0.2.2-10 * fix for extra space in vblade-persist * Avoid using s-expr, pkcs1-conv, and lsh-export-key * release 0.2.2-9 * fixed bug in auth group\_exists * Move nova related configuration files into /etc/nova/ * move check for none before get mpi data * Refactored smoketests flags * Fixes to smoketest flags * Minor smoketest refactoring * fixes from code review * typo in exception in crypto * datetime import typo * added missing isotime method from utils * release 0.2.2-8 * missed a comma * release 0.2.2-7 * use a flag for cert subject * whitespace fixes and header changes * Fixed the os.environ patch (bogus) * Fixes as per Vish review (whitespace, import statements) * Off by one error in the allocation test (can someone check my subnet math?) * Adding more tests, refactoring for dhcp logic * Got dhcpleasor working, with test ENV for testing, and rpc.cast for real world * Capture signals from dnsmasq and use them to update network state * Relax the Twisted dependency to python-twisted-core (rather than the full stack) * releasing version 0.3.0+really0.2.2-0ubuntu0ppa3 * If set, pass KernelId and RamdiskId from RunInstances call to the target compute node * Add a default flag file for nova-manage to help it find the CA * Ship the CA directory in nova-common * Add a dependency on nginx from nova-objectsstore and install a suitable configuration file * releasing version 0.3.0+really0.2.2-0ubuntu0ppa2 * Don't pass --daemonize=1 to nova-compute. It's already daemonising by default * Add debian/nova-common.dirs to create var/lib/nova/{buckets,CA,images,instances,keys,networks} * keeper\_path is really caled datastore\_path * Fixed package version * Move templates from python directories to /usr/share/nova * Added --network\_path setting to nova-compute's flagfile * releasing version 0.3.0+really0.2.2-0ubuntu0ppa1 * Use rmdir instead of rm -rf to remove a tempdir * Set better defaults in flagfiles * Fixes and add interface template * Simple network injection * Simple Network avoids vlans * clean a few merge errors from network * Add curl as a dependency of nova-compute * getting started update * getting started update * Remove \_s errors from merge * fix typos in node from merge * remove spaces from default cert * Make sure get\_assigned\_vlans and BaseNetwork.hosts always return a dict, even if the key is currently empty in the KVS * Add \_s instance attribute to Instance class. It's referenced in a bunch of places, but is never set. This is unlikely to be the right fix (why have two attributes pointing to the same object?), but it seems to make ends meet * Replace spaces in x509 cert subject with underscores. It ends up getting split(' ')'ed and passed to subprocess.Popen, so it needs to not have spaces in it, otherwise openssl gets very upset * Expand somewhat on the short and long descriptions in debian/control * Use separate configuration files for the different daemons * Removed trailing whitespace from header * Updated licenses * Added flags to smoketests. General cleanup * removed all references to keeper * reformatting * Vpn ips and ports use redis * review reformat * code review reformat * We need to be able to look up Instance by Node (live migration) * Get rid of RedisModel * formatting fixes and refactoring from code review * reformatting to fit within 80 characters * simplified handling of tempdir for Fakes * fix for multiple shelves for each volume node * add object class violation exception to fakeldap * remove spaces from default cert * remove silly default from generate cert * fix of fakeldap imports and exceptions * More Comments, cleanup, and reformatting * users.py cleanup for exception handling and typo * Make fakeldap use redis * Refactor network.Vlan to be a BasicModel, since it touched Redis * bugfix: rename \_s to datamodel in Node in some places it was overlooked * fix key injection script * Fixes based on code review 27001 * added TODO * Admin API + Worker Tracking * fixed typo * style cleanup * add more info to vpn list * Use flag for vpn key suffix instead of hardcoded string * don't fail to create vpn key if dir exists * Create Volume should only take an integer between 0 and 1000 * Placeholders for missing describe commands * Set forward delay to zero (partial fix to bug #518) * more comment reformatting * fit comment within 80 lines * removed extraneous reference to rpc in objectstore unit test * Fix queue connection bugs * Fix deletion of user when he is the last member of the group * Fix error message for checking for projectmanager role * Installer now creates global developer role * Removed trailing whitespace from header * added nova-instancemonitor debian config * Updated licenses * Added flags to smoketests. General cleanup * A few missing files from the twisted patch * Tweaks to get instancemonitor running * Initial commit of nodemonitor * Create DescribeImageAttribute api method * release 0.2.2-6 * disk.py needed input for key injection to work * release 2.2-5 * message checking callbacks only need to run 10 times a second * release 2.2-4 * trackback formatting isn't logging correctly * documentation updates * fix missing tab in nova-manage * Release 2.2-3 * use logger to print trace of unhandled exceptions * add exit status to nova-manage * fix fakeldap so it can use redis keeper * fix is\_running failing because state was stored as a string * more commands in nova-manage for projects and roles * More volume test fixes * typo in reboot instances * Fix mount of drive for test image * don't need sudo anymore * Cleaning up smoketests * boto uses instance\_type not size * Fix to volume smoketests * fix display of project name for admin in describe instances * make sure to deexpress before we remove the host since deexpress uses the host * fix error in disassociate address * fixed reversed filtering logic * filter keypairs for vpn keys * allow multiple vpn connections with the same credentials * Added admin command to restart networks * hide vpn instances unless you are an admin and allow run\_instances to launch vpn image even if it is private * typo in my ping call * try to ping vpn instances * sensible defaults for instance types * add missing import to pipelib * Give vpns the proper ip address * Fix format addresses * Release 0.2.2-2 * fix more casing errors and make attachment set print * removed extraneous .volume\_id * don't allow volumes to be attached to the same mountpoint * fix case for volume attributes * fix sectors off by one * Don't use keeper for instances * fix default state to be 0 instead of pending * Release 0.2.2 * Fix for mpi cpu reporting * fix detach volume * fix status code printing in cloud * add project ids to volumes * add back accidentally removed bridge name. str is reserved, so don't use it as a variable name * whitespace fixes and format instances set of object fixes * Use instdir to iterate through instances * fix bridge name * Adding basic validation of volume size on creation, plus tests for it * finished gutting keeper from volume * First pass at validation unit tests. Haven't figured out class methods yet * Removing keeper sludge * Set volume status properly, first pass at validation decorators * Adding missing default values and fixing bare Redis fetch for volume list * one more handler typo * fix objectstore handler typo * fix modify image attribute typo * NetworkNode doesn't exist anymore * Added back in missing gateway property on networks * Refactored Instance to get rid of \_s bits, and fixed some bugs in state management * Delete instance files on shutdown * Flush redis db in setup and teardown of tests * Cleaning up my accidental merge of the docs branch * change pipelib to work with projects * Volumes support intermediate state. Don't have to cast to storage nodes for attach/detach anymore, just let node update redis with state * Adding nojekyll for directories * Fix for #437 (deleting attached volumes), plus some >9 blade\_id fixes * fix instance iteration to use self.instdir.all instead of older iterators * nasa ldap defaults * sensible rbac defaults * Tests for rbac code * Patch to allow rbac * Adding mpi data * Adding cloudpipe and vpn data back in to network.py * how we build our debs * Revert "fix a bug with AOE number generation" * re-added cloudpipe * devin's smoketests * tools to clean vlans and run our old install script * fix a bug with AOE number generation * Initial commit of nodemonitor * Create DescribeImageAttribute api method * Create DescribeImageAttribute api method * More rackspace API * git checkpoint commit post-wsgi * update spacing * implement image serving in objectstore so nginx isn't required in development * update twitter username * make a "Running" topic instead of having it flow under "Configuration" * Make nginx config be in a code block * More doc updates: nginx & pycurl * Add a README, because GitHub loves them. Update the getting started docs * update spacing * Commit what I have almost working before diverging * first go at moving from tornado to twisted * implement image serving in objectstore so nginx isn't required in development * update twitter username * Update documentation * fix for reactor.spawnProcess sending deprecation warning * patch from issue 4001 * Fix for LoopingCall failing Added in exception logging around amqp calls Creating deferred in receive before ack() message was causing IOError (interrupted system calls), probably because the same message was getting processed twice in some situations, causing the system calls to be doubled. Moving the ack() earlier fixed the problem. The code works now with an interval of 0 but that causes heavy processor usage. An interval of 0.01 keeps the cpu usage within reasonable limits * get rid of anyjson in rpc and fix bad reference to rpc.Connection * gateway undefined * fix cloud instances method * Various cloud fixes * make get\_my\_ip return 127.0.0.1 for testing * Adds a Twisted implementation of a process pool * make a "Running" topic instead of having it flow under "Configuration" * Make nginx config be in a code block * More doc updates: nginx & pycurl * Add a README, because GitHub loves them. Update the getting started docs * whitespace fixes for nova/utils.py * Add project methods to nova-manage * Fix novarc to use project when creating access key * removed reference to nonexistent flag * Josh's networking refactor, modified to work with projects * Merged Vish's work on adding projects to nova * missed the gitignore * initial commit ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/HACKING.rst0000664000175000017500000002136000000000000014362 0ustar00zuulzuul00000000000000Nova Style Commandments ======================= - Step 1: Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/ - Step 2: Read on Nova Specific Commandments --------------------------- - [N307] ``nova.db`` imports are not allowed in ``nova/virt/*`` - [N309] no db session in public API methods (disabled) This enforces a guideline defined in ``oslo.db.sqlalchemy.session`` - [N310] timeutils.utcnow() wrapper must be used instead of direct calls to datetime.datetime.utcnow() to make it easy to override its return value in tests - [N311] importing code from other virt drivers forbidden Code that needs to be shared between virt drivers should be moved into a common module - [N312] using config vars from other virt drivers forbidden Config parameters that need to be shared between virt drivers should be moved into a common module - [N313] capitalize help string Config parameter help strings should have a capitalized first letter - [N316] Change assertTrue(isinstance(A, B)) by optimal assert like assertIsInstance(A, B). - [N317] Change assertEqual(type(A), B) by optimal assert like assertIsInstance(A, B) - [N319] Validate that logs are not translated. - [N320] Setting CONF.* attributes directly in tests is forbidden. Use self.flags(option=value) instead. - [N322] Method's default argument shouldn't be mutable - [N323] Ensure that the _() function is explicitly imported to ensure proper translations. - [N324] Ensure that jsonutils.%(fun)s must be used instead of json.%(fun)s - [N326] Translated messages cannot be concatenated. String should be included in translated message. - [N332] Check that the api_version decorator is the first decorator on a method - [N334] Change assertTrue/False(A in/not in B, message) to the more specific assertIn/NotIn(A, B, message) - [N335] Check for usage of deprecated assertRaisesRegexp - [N336] Must use a dict comprehension instead of a dict constructor with a sequence of key-value pairs. - [N337] Don't import translation in tests - [N338] Change assertEqual(A in B, True), assertEqual(True, A in B), assertEqual(A in B, False) or assertEqual(False, A in B) to the more specific assertIn/NotIn(A, B) - [N339] Check common raise_feature_not_supported() is used for v2.1 HTTPNotImplemented response. - [N340] Check nova.utils.spawn() is used instead of greenthread.spawn() and eventlet.spawn() - [N341] contextlib.nested is deprecated - [N342] Config options should be in the central location ``nova/conf/`` - [N343] Check for common double word typos - [N348] Deprecated library function os.popen() - [N349] Check for closures in tests which are not used - [N350] Policy registration should be in the central location ``nova/policies/`` - [N351] Do not use the oslo_policy.policy.Enforcer.enforce() method. - [N352] LOG.warn is deprecated. Enforce use of LOG.warning. - [N353] Validate that context objects is not passed in logging calls. - [N355] Enforce use of assertTrue/assertFalse - [N356] Enforce use of assertIs/assertIsNot - [N357] Use oslo_utils.uuidutils or uuidsentinel(in case of test cases) to generate UUID instead of uuid4(). - [N358] Return must always be followed by a space when returning a value. - [N359] Check for redundant import aliases. - [N360] Yield must always be followed by a space when yielding a value. - [N361] Check for usage of deprecated assertRegexpMatches and assertNotRegexpMatches - [N362] Imports for privsep modules should be specific. Use "import nova.privsep.path", not "from nova.privsep import path". This ensures callers know that the method they're calling is using privilege escalation. - [N363] Disallow ``(not_a_tuple)`` because you meant ``(a_tuple_of_one,)``. - [N364] Check non-existent mock assertion methods and attributes. - [N365] Check misuse of assertTrue/assertIsNone. - [N366] The assert_has_calls is a method rather than a variable. - [N367] Disallow aliasing the mock.Mock and similar classes in tests. - [N368] Reject if the mock.Mock class is used as a replacement value instead of and instance of a mock.Mock during patching in tests. - [N369] oslo_concurrency.lockutils.ReaderWriterLock() or fasteners.ReaderWriterLock() does not function correctly with eventlet patched code. Use nova.utils.ReaderWriterLock() instead. - [N370] Don't use or import six - [N371] You must explicitly import python's mock: ``from unittest import mock`` - [N372] Don't use the setDaemon method. Use the daemon attribute instead. - [N373] Don't use eventlet specific concurrency primitives. Use the one from stdlib instead. E.g. eventlet.sleep => time.sleep Creating Unit Tests ------------------- For every new feature, unit tests should be created that both test and (implicitly) document the usage of said feature. If submitting a patch for a bug that had no unit test, a new passing unit test should be added. If a submitted bug fix does have a unit test, be sure to add a new one that fails without the patch and passes with the patch. For more information on creating unit tests and utilizing the testing infrastructure in OpenStack Nova, please read ``nova/tests/unit/README.rst``. Running Tests ------------- The testing system is based on a combination of tox and stestr. The canonical approach to running tests is to simply run the command ``tox``. This will create virtual environments, populate them with dependencies and run all of the tests that OpenStack CI systems run. Behind the scenes, tox is running ``stestr run``, but is set up such that you can supply any additional stestr arguments that are needed to tox. For example, you can run: ``tox -- --analyze-isolation`` to cause tox to tell stestr to add --analyze-isolation to its argument list. Python packages may also have dependencies that are outside of tox's ability to install. Please refer to `Development Quickstart`_ for a list of those packages on Ubuntu, Fedora and Mac OS X. To run a single or restricted set of tests, pass a regex that matches the class name containing the tests as an extra ``tox`` argument; e.g. ``tox -- TestWSGIServer`` (note the double-hypen) will test all WSGI server tests from ``nova/tests/unit/test_wsgi.py``; ``-- TestWSGIServer.test_uri_length_limit`` would run just that test, and ``-- TestWSGIServer|TestWSGIServerWithSSL`` would run tests from both classes. It is also possible to run the tests inside of a virtual environment you have created, or it is possible that you have all of the dependencies installed locally already. In this case, you can interact with the stestr command directly. Running ``stestr run`` will run the entire test suite. ``stestr run --concurrency=1`` will run tests serially (by default, stestr runs tests in parallel). More information about stestr can be found at: http://stestr.readthedocs.io/ Since when testing locally, running the entire test suite on a regular basis is prohibitively expensive, the ``tools/run-tests-for-diff.sh`` script is provided as a convenient way to run selected tests using output from ``git diff``. For example, this allows running only the test files changed/added in the working tree:: tools/run-tests-for-diff.sh However since it passes its arguments directly to ``git diff``, tests can be selected in lots of other interesting ways, e.g. it can run all tests affected by a single commit at the tip of a given branch:: tools/run-tests-for-diff.sh mybranch^! or all those affected by a range of commits, e.g. a branch containing a whole patch series for a blueprint:: tools/run-tests-for-diff.sh gerrit/master..bp/my-blueprint It supports the same ``-HEAD`` invocation syntax as ``flake8wrap.sh`` (as used by the ``fast8`` tox environment):: tools/run-tests-for-diff.sh -HEAD By default tests log at ``INFO`` level. It is possible to make them log at ``DEBUG`` level by exporting the ``OS_DEBUG`` environment variable to ``True``. .. _Development Quickstart: https://docs.openstack.org/nova/latest/contributor/development-environment.html Building Docs ------------- Normal Sphinx docs can be built via the setuptools ``build_sphinx`` command. To do this via ``tox``, simply run ``tox -e docs``, which will cause a virtualenv with all of the needed dependencies to be created and then inside of the virtualenv, the docs will be created and put into doc/build/html. Building a PDF of the Documentation ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ If you'd like a PDF of the documentation, you'll need LaTeX and ImageMagick installed, and additionally some fonts. On Ubuntu systems, you can get what you need with:: apt-get install texlive-full imagemagick Then you can use the ``build_latex_pdf.sh`` script in tools/ to take care of both the sphinx latex generation and the latex compilation. For example:: tools/build_latex_pdf.sh The script must be run from the root of the Nova repository and it'll copy the output pdf to Nova.pdf in that directory. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/LICENSE0000664000175000017500000002363700000000000013602 0ustar00zuulzuul00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/MAINTAINERS0000664000175000017500000000135400000000000014262 0ustar00zuulzuul00000000000000Nova doesn't have maintainers in the same way as the Linux Kernel. However, we do have sub-teams who maintain parts of Nova and a series of nominated "czars" to deal with cross functional tasks. Each of these sub-teams and roles are documented on our wiki at https://wiki.openstack.org/wiki/Nova You can find helpful contacts for many parts of our code repository at https://wiki.openstack.org/wiki/Nova#Developer_Contacts We also have a page which documents tips and mentoring opportunities for new Nova developers at https://wiki.openstack.org/wiki/Nova/Mentoring Finally, you should also check out our developer reference at https://docs.openstack.org/nova/latest/contributor/index.html Thanks for your interest in Nova, please come again! ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315690.089614 nova-32.0.0/PKG-INFO0000644000175000017500000001474300000000000013666 0ustar00zuulzuul00000000000000Metadata-Version: 2.1 Name: nova Version: 32.0.0 Summary: Cloud computing fabric controller Home-page: https://docs.openstack.org/nova/latest/ Author: OpenStack Author-email: openstack-discuss@lists.openstack.org Project-URL: Bug Tracker, https://bugs.launchpad.net/nova/ Project-URL: Documentation, https://docs.openstack.org/nova/ Project-URL: Source Code, https://opendev.org/openstack/nova Classifier: Development Status :: 5 - Production/Stable Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: 3.12 Classifier: Programming Language :: Python :: 3 :: Only Classifier: Programming Language :: Python :: Implementation :: CPython Requires-Python: >=3.10 License-File: LICENSE Requires-Dist: pbr>=5.8.0 Requires-Dist: SQLAlchemy>=1.4.13 Requires-Dist: decorator>=4.1.0 Requires-Dist: eventlet>=0.30.1 Requires-Dist: Jinja2>=2.10 Requires-Dist: keystonemiddleware>=4.20.0 Requires-Dist: lxml>=4.5.0 Requires-Dist: Routes>=2.3.1 Requires-Dist: cryptography>=2.7 Requires-Dist: WebOb>=1.8.2 Requires-Dist: greenlet>=0.4.15 Requires-Dist: PasteDeploy>=1.5.0 Requires-Dist: Paste>=2.0.2 Requires-Dist: PrettyTable>=0.7.1 Requires-Dist: alembic>=1.5.0 Requires-Dist: netaddr>=0.7.18 Requires-Dist: paramiko>=2.7.1 Requires-Dist: iso8601>=0.1.11 Requires-Dist: jsonschema>=4.0.0 Requires-Dist: python-cinderclient>=4.0.1 Requires-Dist: keystoneauth1>=3.16.0 Requires-Dist: python-neutronclient>=7.1.0 Requires-Dist: python-glanceclient>=4.7.0 Requires-Dist: requests>=2.25.1 Requires-Dist: stevedore>=1.20.0 Requires-Dist: websockify>=0.9.0 Requires-Dist: oslo.cache>=1.26.0 Requires-Dist: oslo.concurrency>=5.0.1 Requires-Dist: oslo.config>=8.6.0 Requires-Dist: oslo.context>=3.4.0 Requires-Dist: oslo.log>=4.6.1 Requires-Dist: oslo.limit>=1.5.0 Requires-Dist: oslo.reports>=1.18.0 Requires-Dist: oslo.serialization>=4.2.0 Requires-Dist: oslo.upgradecheck>=1.3.0 Requires-Dist: oslo.utils>=8.0.0 Requires-Dist: oslo.db>=10.0.0 Requires-Dist: oslo.rootwrap>=5.15.0 Requires-Dist: oslo.messaging>=14.1.0 Requires-Dist: oslo.policy>=4.5.0 Requires-Dist: oslo.privsep>=2.6.2 Requires-Dist: oslo.i18n>=5.1.0 Requires-Dist: oslo.service[threading]>=4.2.0 Requires-Dist: rfc3986>=1.2.0 Requires-Dist: oslo.middleware>=3.31.0 Requires-Dist: psutil>=3.2.2 Requires-Dist: oslo.versionedobjects>=1.35.0 Requires-Dist: os-brick>=6.10.0 Requires-Dist: os-resource-classes>=1.1.0 Requires-Dist: os-traits>=3.5.0 Requires-Dist: os-vif>=3.1.0 Requires-Dist: castellan>=0.16.0 Requires-Dist: microversion-parse>=0.2.1 Requires-Dist: tooz>=1.58.0 Requires-Dist: cursive>=0.2.1 Requires-Dist: retrying>=1.3.3 Requires-Dist: os-service-types>=1.7.0 Requires-Dist: python-dateutil>=2.7.0 Requires-Dist: futurist>=1.8.0 Requires-Dist: openstacksdk>=4.4.0 Requires-Dist: PyYAML>=5.1 Provides-Extra: osprofiler Requires-Dist: osprofiler>=1.4.0; extra == "osprofiler" Provides-Extra: zvm Requires-Dist: zVMCloudConnector>=1.3.0; sys_platform != "win32" and extra == "zvm" Provides-Extra: vmware Requires-Dist: oslo.vmware>=3.6.0; extra == "vmware" Provides-Extra: test Requires-Dist: hacking==6.1.0; extra == "test" Requires-Dist: coverage>=4.4.1; extra == "test" Requires-Dist: ddt>=1.2.1; extra == "test" Requires-Dist: fixtures>=3.0.0; extra == "test" Requires-Dist: psycopg2-binary>=2.8; extra == "test" Requires-Dist: PyMySQL>=0.8.0; extra == "test" Requires-Dist: python-barbicanclient>=4.5.2; extra == "test" Requires-Dist: requests-mock>=1.2.0; extra == "test" Requires-Dist: oslotest>=3.8.0; extra == "test" Requires-Dist: stestr>=2.0.0; extra == "test" Requires-Dist: osprofiler>=1.4.0; extra == "test" Requires-Dist: testresources>=2.0.0; extra == "test" Requires-Dist: testscenarios>=0.4; extra == "test" Requires-Dist: testtools>=2.5.0; extra == "test" Requires-Dist: bandit>=1.1.0; extra == "test" Requires-Dist: gabbi>=1.35.0; extra == "test" Requires-Dist: wsgi-intercept>=1.7.0; extra == "test" ============== OpenStack Nova ============== OpenStack Nova provides a cloud computing fabric controller, supporting a wide variety of compute technologies, including: libvirt (KVM, Xen, LXC and more), VMware and OpenStack Ironic. Use the following resources to learn more. API --- To learn how to use Nova's API, consult the documentation available online at: - `Compute API Guide `__ - `Compute API Reference `__ For more information on OpenStack APIs, SDKs and CLIs in general, refer to: - `OpenStack for App Developers `__ - `Development resources for OpenStack clouds `__ Operators --------- To learn how to deploy and configure OpenStack Nova, consult the documentation available online at: - `OpenStack Nova `__ In the unfortunate event that bugs are discovered, they should be reported to the appropriate bug tracker. If you obtained the software from a 3rd party operating system vendor, it is often wise to use their own bug tracker for reporting problems. In all other cases use the master OpenStack bug tracker, available at: - `Bug Tracker `__ Developers ---------- For information on how to contribute to Nova, please see the contents of the CONTRIBUTING.rst. Any new code must follow the development guidelines detailed in the HACKING.rst file, and pass all unit tests. To understand better the processes that the team is using, please refer to the `Process document `__. Further developer focused documentation is available at: - `Official Nova Documentation `__ - `Official Client Documentation `__ Other Information ----------------- During each `Summit`_ and `Project Team Gathering`_, we agree on what the whole community wants to focus on for the upcoming release. The plans for nova can be found at: - `Nova Specs `__ .. _Summit: https://www.openstack.org/summit/ .. _Project Team Gathering: https://www.openstack.org/ptg/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/README.rst0000664000175000017500000000436700000000000014263 0ustar00zuulzuul00000000000000============== OpenStack Nova ============== OpenStack Nova provides a cloud computing fabric controller, supporting a wide variety of compute technologies, including: libvirt (KVM, Xen, LXC and more), VMware and OpenStack Ironic. Use the following resources to learn more. API --- To learn how to use Nova's API, consult the documentation available online at: - `Compute API Guide `__ - `Compute API Reference `__ For more information on OpenStack APIs, SDKs and CLIs in general, refer to: - `OpenStack for App Developers `__ - `Development resources for OpenStack clouds `__ Operators --------- To learn how to deploy and configure OpenStack Nova, consult the documentation available online at: - `OpenStack Nova `__ In the unfortunate event that bugs are discovered, they should be reported to the appropriate bug tracker. If you obtained the software from a 3rd party operating system vendor, it is often wise to use their own bug tracker for reporting problems. In all other cases use the master OpenStack bug tracker, available at: - `Bug Tracker `__ Developers ---------- For information on how to contribute to Nova, please see the contents of the CONTRIBUTING.rst. Any new code must follow the development guidelines detailed in the HACKING.rst file, and pass all unit tests. To understand better the processes that the team is using, please refer to the `Process document `__. Further developer focused documentation is available at: - `Official Nova Documentation `__ - `Official Client Documentation `__ Other Information ----------------- During each `Summit`_ and `Project Team Gathering`_, we agree on what the whole community wants to focus on for the upcoming release. The plans for nova can be found at: - `Nova Specs `__ .. _Summit: https://www.openstack.org/summit/ .. _Project Team Gathering: https://www.openstack.org/ptg/ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315688.8296046 nova-32.0.0/api-guide/0000775000175000017500000000000000000000000014426 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315688.9896057 nova-32.0.0/api-guide/source/0000775000175000017500000000000000000000000015726 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-guide/source/accelerator-support.rst0000664000175000017500000001044300000000000022460 0ustar00zuulzuul00000000000000============================== Using accelerators with Cyborg ============================== Starting from microversion 2.82, nova supports creating servers with accelerators provisioned with the Cyborg service, which provides lifecycle management for accelerators. To launch servers with accelerators, the administrator (or an user with appropriate privileges) must do the following: * Create a device profile in Cyborg, which specifies what accelerator resources need to be provisioned. (See `Cyborg device profiles API`_.) .. _`Cyborg device profiles API`: https://docs.openstack.org/api-ref/accelerator/#device-profiles * Set the device profile name as an extra spec in a chosen flavor, with this syntax: .. code:: accel:device_profile=$device_profile_name The chosen flavor may be a newly created one or an existing one. * Use that flavor to create a server: .. code:: openstack server create --flavor $myflavor --image $myimage $servername Nova supports only specific operations for instances with accelerators. The lists of supported and unsupported operations are as below: * Supported operations. * Creation and deletion. * Reboots (soft and hard). * Pause and unpause. * Stop and start. * Take a snapshot. * Backup. * Rescue and unrescue. * Rebuild. * Evacuate. * Shelve and unshelve. * Unsupported operations * Resize. * Suspend and resume. * Cold migration. * Live migration. .. versionchanged:: 22.0.0(Victoria) Added support for rebuild and evacuate operations. .. versionchanged:: 23.0.0(Wallaby) Added support for shelve and unshelve operations. Some operations, such as lock and unlock, work as they are effectively no-ops for accelerators. Caveats ------- .. note:: This information is correct as of the 21.0.0 Ussuri release. Where improvements have been made or issues fixed, they are noted per item. For nested resource providers: * Creating servers with accelerators provisioned with the Cyborg service, if a flavor asks for resources that are provided by nested Resource Provider inventories (e.g. vGPU) and the user wants multi-create (i.e. say --max 2) then the scheduler could be returning a NoValidHosts exception even if each nested Resource Provider can support at least one specific instance, if the total wanted capacity is not supported by only one nested Resource Provider. (See `bug 1874664 `_.) For example, creating servers with accelerators provisioned with the Cyborg service, if two children RPs have 4 vGPU inventories each: * You can ask for a device profile in the flavor with 2 vGPU with --max 2. * But you can't ask for a device profile in the flavor with 4 vGPU and --max 2. ======================= Using SRIOV with Cyborg ======================= Starting from Xena release, nova supports creating servers with SRIOV provisioned with the Cyborg service. To launch servers with accelerators, the administrator (or an user with appropriate privileges) must do the following: * Create a device profile in Cyborg, which specifies what accelerator resources need to be provisioned. (See `Cyborg device profiles API`_, `Cyborg SRIOV Test Report`_.) .. _`Cyborg device profiles API`: https://docs.openstack.org/api-ref/accelerator/#device-profiles .. _`Cyborg SRIOV Test Report`: https://wiki.openstack.org/wiki/Cyborg/TestReport/IntelNic * create a 'accelerator-direct' vnic type port with the device-profile name set as cyborg device profile with this syntax: .. code:: openstack port create $port_name --network $network_name --vnic-type=accelerator-direct --device-profile $device_profile_name * create a server with that port: .. code:: openstack server create --flavor $myflavor --image $myimage $servername --nic port-id=$port-ID Nova supports only specific operations for instances with accelerators. The lists of supported and unsupported operations are as below: * Supported operations. * Creation and deletion. * Reboots (soft and hard). * Pause and unpause. * Stop and start. * Rebuild. * Rescue and unrescue. * Take a snapshot. * Backup. * Unsupported operations * Resize. * Suspend and resume. * Cold migration. * Live migration. * Shelve and unshelve. * Evacuate. * Attach/detach a port with device profile. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-guide/source/authentication.rst0000664000175000017500000000102300000000000021473 0ustar00zuulzuul00000000000000============== Authentication ============== Each HTTP request against the OpenStack Compute system requires the inclusion of specific authentication credentials. A single deployment may support multiple authentication schemes (OAuth, Basic Auth, Token). The authentication scheme is provided by the OpenStack Identity service. You can contact your provider to determine the best way to authenticate against the Compute API. .. note:: Some authentication schemes may require that the API operate using SSL over HTTP (HTTPS). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-guide/source/conf.py0000664000175000017500000000637300000000000017236 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Compute API Guide documentation build configuration file # Refer to the Sphinx documentation for advice on configuring this file: # # http://www.sphinx-doc.org/en/stable/config.html # -- General configuration ------------------------------------------------ # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'openstackdocstheme', 'sphinx.ext.todo', ] # The suffix of source filenames. source_suffix = '.rst' # The 'todo' and 'todolist' directive produce output. todo_include_todos = True # The master toctree document. master_doc = 'index' # General information about the project. project = 'Compute API Guide' copyright = '2015-present, OpenStack contributors' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '2.1.0' # The full version, including alpha/beta/rc tags. release = '2.1.0' # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'native' # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'openstackdocs' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = [] # If false, no index is generated. html_use_index = True # -- Options for LaTeX output --------------------------------------------- # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ( 'index', 'ComputeAPI.tex', 'Compute API Documentation', 'OpenStack contributors', 'manual', ), ] # -- Options for Internationalization output ------------------------------ locale_dirs = ['locale/'] # -- Options for PDF output -------------------------------------------------- pdf_documents = [ ( 'index', 'ComputeAPIGuide', 'Compute API Guide', 'OpenStack contributors', ) ] # -- Options for openstackdocstheme ------------------------------------------- openstackdocs_projects = [ 'glance', 'nova', 'neutron', 'placement', ] openstackdocs_bug_tag = 'api-guide' openstackdocs_repo_name = 'openstack/nova' openstackdocs_bug_project = 'nova' openstackdocs_auto_version = False openstackdocs_auto_name = False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-guide/source/down_cells.rst0000664000175000017500000003572100000000000020621 0ustar00zuulzuul00000000000000=================== Handling Down Cells =================== Starting from microversion 2.69 if there are transient conditions in a deployment like partial infrastructure failures (for example a cell not being reachable), some API responses may contain partial results (i.e. be missing some keys). The server operations which exhibit this behavior are described below: * List Servers (GET /servers): This operation may give partial constructs from the non-responsive portion of the infrastructure. A typical response, while listing servers from unreachable parts of the infrastructure, would include only the following keys from available information: - status: The state of the server which will be "UNKNOWN". - id: The UUID of the server. - links: Links to the servers in question. A sample response for a GET /servers request that includes one result each from an unreachable and a healthy part of the infrastructure is shown below. Response:: { "servers": [ { "status": "UNKNOWN", "id": "bcc6c6dd-3d0a-4633-9586-60878fd68edb", "links": [ { "rel": "self", "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/bcc6c6dd-3d0a-4633-9586-60878fd68edb" }, { "rel": "bookmark", "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/bcc6c6dd-3d0a-4633-9586-60878fd68edb" } ] }, { "id": "22c91117-08de-4894-9aa9-6ef382400985", "name": "test_server", "links": [ { "rel": "self", "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/22c91117-08de-4894-9aa9-6ef382400985" }, { "rel": "bookmark", "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/22c91117-08de-4894-9aa9-6ef382400985" } ] } ] } * List Servers Detailed (GET /servers/detail): This operation may give partial constructs from the non-responsive portion of the infrastructure. A typical response, while listing servers from unreachable parts of the infrastructure, would include only the following keys from available information: - status: The state of the server which will be "UNKNOWN". - id: The UUID of the server. - tenant_id: The tenant_id to which the server belongs to. - created: The time of server creation. - links: Links to the servers in question. - security_groups: One or more security groups. (Optional) A sample response for a GET /servers/details request that includes one result each from an unreachable and a healthy part of the infrastructure is shown below. Response:: { "servers": [ { "created": "2018-06-29T15:07:29Z", "id": "bcc6c6dd-3d0a-4633-9586-60878fd68edb", "status": "UNKNOWN", "tenant_id": "940f47b984034c7f8f9624ab28f5643c", "security_groups": [ { "name": "default" } ], "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/bcc6c6dd-3d0a-4633-9586-60878fd68edb", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/bcc6c6dd-3d0a-4633-9586-60878fd68edb", "rel": "bookmark" } ] }, { "OS-DCF:diskConfig": "AUTO", "OS-EXT-AZ:availability_zone": "nova", "OS-EXT-SRV-ATTR:host": "compute", "OS-EXT-SRV-ATTR:hostname": "new-server-test", "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", "OS-EXT-SRV-ATTR:kernel_id": "", "OS-EXT-SRV-ATTR:launch_index": 0, "OS-EXT-SRV-ATTR:ramdisk_id": "", "OS-EXT-SRV-ATTR:reservation_id": "r-y0w4v32k", "OS-EXT-SRV-ATTR:root_device_name": "/dev/sda", "OS-EXT-SRV-ATTR:user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "OS-SRV-USG:launched_at": "2017-10-10T15:49:09.516729", "OS-SRV-USG:terminated_at": null, "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", "OS-EXT-IPS:type": "fixed", "addr": "192.168.0.3", "version": 4 } ] }, "config_drive": "", "created": "2017-10-10T15:49:08Z", "description": null, "flavor": { "disk": 1, "ephemeral": 0, "extra_specs": { "hw:numa_nodes": "1" }, "original_name": "m1.tiny.specs", "ram": 512, "swap": 0, "vcpus": 1 }, "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6", "host_status": "UP", "id": "569f39f9-7c76-42a1-9c2d-8394e2638a6d", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/569f39f9-7c76-42a1-9c2d-8394e2638a6d", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/569f39f9-7c76-42a1-9c2d-8394e2638a6d", "rel": "bookmark" } ], "locked": false, "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "os-extended-volumes:volumes_attached": [], "progress": 0, "security_groups": [ { "name": "default" } ], "status": "ACTIVE", "tags": [], "tenant_id": "6f70656e737461636b20342065766572", "trusted_image_certificates": [ "0b5d2c72-12cc-4ba6-a8d7-3ff5cc1d8cb8", "674736e3-f25c-405c-8362-bbf991e0ce0a" ], "updated": "2017-10-10T15:49:09Z", "user_id": "fake" } ] } **Edge Cases** * **Filters:** If the user is listing servers using filters, results from unreachable parts of the infrastructure cannot be tested for matching those filters and thus no minimalistic construct will be provided. Note that by default ``openstack server list`` uses the ``deleted=False`` and ``project_id=tenant_id`` filters and since we know both of these fundamental values at all times, they are the only allowed filters to be applied to servers with only partial information available. Hence only doing ``openstack server list`` and ``openstack server list --all-projects`` (admin only) will show minimalistic results when parts of the infrastructure are unreachable. Other filters like ``openstack server list --deleted`` or ``openstack server list --host xx`` will skip the results depending on the administrator's configuration of the deployment. Note that the filter ``openstack server list --limit`` will also skip the results and if not specified will return 1000 (or the configured default) records from the available parts of the infrastructure. * **Marker:** If the user does ``openstack server list --marker`` it will fail with a 500 if the marker is an instance that is no longer reachable. * **Sorting:** We exclude the unreachable parts of the infrastructure just like we do for filters since there is no way of obtaining valid sorted results from those parts with missing information. * **Paging:** We ignore the parts of the deployment which are non-responsive. For example if we have three cells A (reachable state), B (unreachable state) and C (reachable state) and if the marker is half way in A, we would get the remaining half of the results from A, all the results from C and ignore cell B. .. note:: All the edge cases that are not supported for minimal constructs would give responses based on the administrator's configuration of the deployment, either skipping those results or returning an error. * Show Server Details (GET /servers/{server_id}): This operation may give partial constructs from the non-responsive portion of the infrastructure. A typical response while viewing a server from an unreachable part of the infrastructure would include only the following keys from available information: - status: The state of the server which will be "UNKNOWN". - id: The UUID of the server. - tenant_id: The tenant_id to which the server belongs to. - created: The time of server creation. - user_id: The user_id to which the server belongs to. This may be "UNKNOWN" for older servers. - image: The image details of the server. If it is not set like in the boot-from-volume case, this value will be an empty string. - flavor: The flavor details of the server. - availability_zone: The availability_zone of the server if it was specified during boot time and "UNKNOWN" otherwise. - power_state: Its value will be 0 (``NOSTATE``). - links: Links to the servers in question. - server_groups: The UUIDs of the server groups to which the server belongs. Currently this can contain at most one entry. Note that this key will be in the response only from the "2.71" microversion. A sample response for a GET /servers/{server_id} request that includes one server from an unreachable part of the infrastructure is shown below. Response:: { "server": [ { "created": "2018-06-29T15:07:29Z", "status": "UNKNOWN", "tenant_id": "940f47b984034c7f8f9624ab28f5643c", "id": "bcc6c6dd-3d0a-4633-9586-60878fd68edb", "user_id": "940f47b984034c7f8f9624ab28f5643c", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", }, "flavor": { "disk": 1, "ephemeral": 0, "extra_specs": { "hw:numa_nodes": "1" }, "original_name": "m1.tiny.specs", "ram": 512, "swap": 0, "vcpus": 1 }, "OS-EXT-AZ:availability_zone": "geneva", "OS-EXT-STS:power_state": 0, "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/bcc6c6dd-3d0a-4633-9586-60878fd68edb", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/bcc6c6dd-3d0a-4633-9586-60878fd68edb", "rel": "bookmark" } ], "server_groups": ["0fd77252-4eef-4ec4-ae9b-e05dfc98aeac"] } ] } * List Compute Services (GET /os-services): This operation may give partial constructs for the services with :program:`nova-compute` as their binary from the non-responsive portion of the infrastructure. A typical response while listing the compute services from unreachable parts of the infrastructure would include only the following keys for the :program:`nova-compute` services from available information while the other services like the :program:`nova-conductor` service will be skipped from the result: - binary: The binary name of the service which would always be ``nova-compute``. - host: The name of the host running the service. - status: The status of the service which will be "UNKNOWN". A sample response for a GET /servers request that includes two compute services from unreachable parts of the infrastructure and other services from a healthy one are shown below. Response:: { "services": [ { "binary": "nova-compute", "host": "host1", "status": "UNKNOWN" }, { "binary": "nova-compute", "host": "host2", "status": "UNKNOWN" }, { "id": 1, "binary": "nova-scheduler", "disabled_reason": "test1", "host": "host3", "state": "up", "status": "disabled", "updated_at": "2012-10-29T13:42:02.000000", "forced_down": false, "zone": "internal" }, { "id": 2, "binary": "nova-compute", "disabled_reason": "test2", "host": "host4", "state": "up", "status": "disabled", "updated_at": "2012-10-29T13:42:05.000000", "forced_down": false, "zone": "nova" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-guide/source/extra_specs_and_properties.rst0000664000175000017500000000340000000000000024073 0ustar00zuulzuul00000000000000======================================= Flavor Extra Specs and Image Properties ======================================= Flavor extra specs and image properties are used to control certain aspects or scheduling behavior for a server. The flavor of a server can be changed during a :nova-doc:`resize ` operation. The image of a server can be changed during a :nova-doc:`rebuild ` operation. By default, flavor extra specs are controlled by administrators of the cloud. If users are authorized to upload their own images to the image service, they may be able to specify their own image property requirements. There are many cases of flavor extra specs and image properties that are for the same functionality. In many cases the image property takes precedence over the flavor extra spec if both are used in the same server. Flavor Extra Specs ================== Refer to the :nova-doc:`user guide ` for a list of official extra specs. While there are standard extra specs, deployments can define their own extra specs to be used with host aggregates and custom scheduler filters as necessary. See the :nova-doc:`reference guide ` for more details. Image Properties ================ Refer to the image service documentation for a list of official :glance-doc:`image properties ` and :glance-doc:`metadata definition concepts `. Unlike flavor extra specs, image properties are standardized in the compute service and thus they must be `registered`_ within the compute service before they can be used. .. _registered: https://opendev.org/openstack/nova/src/branch/master/nova/objects/image_meta.py ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-guide/source/faults.rst0000664000175000017500000004120200000000000017755 0ustar00zuulzuul00000000000000====== Faults ====== This doc explains how to understand what has happened to your API request. Every HTTP request has a status code. 2xx codes signify the API call was a success. However, that is often not the end of the story. That generally only means the request to start the operation has been accepted. It does not mean the action you requested has successfully completed. Tracking Errors by Request ID ============================= There are two types of request ID. .. list-table:: :header-rows: 1 :widths: 2,8 * - Type - Description * - Local request ID - Locally generated unique request ID by each service and different between all services (Nova, Cinder, Glance, Neutron, etc.) involved in that operation. The format is ``req-`` + UUID (UUID4). * - Global request ID - User specified request ID which is utilized as common identifier by all services (Nova, Cinder, Glance, Neutron, etc.) involved in that operation. This request ID is same among all services involved in that operation. The format is ``req-`` + UUID (UUID4). It is extremely common for clouds to have an ELK (Elastic Search, Logstash, Kibana) infrastructure consuming their logs. The only way to query these flows is if there is a common identifier across all relevant messages. The global request ID immediately makes existing deployed tooling better for managing OpenStack. **Request Header** In each REST API request, you can specify the global request ID in ``X-Openstack-Request-Id`` header, starting from microversion 2.46. The format must be ``req-`` + UUID (UUID4). If not in accordance with the format, the global request ID is ignored by Nova. Request header example:: X-Openstack-Request-Id: req-3dccb8c4-08fe-4706-a91d-e843b8fe9ed2 **Response Header** In each REST API request, ``X-Compute-Request-Id`` is returned in the response header. Starting from microversion 2.46, ``X-Openstack-Request-Id`` is also returned in the response header. ``X-Compute-Request-Id`` and ``X-Openstack-Request-Id`` are local request IDs. The global request IDs are not returned. Response header example:: X-Compute-Request-Id: req-d7bc29d0-7b99-4aeb-a356-89975043ab5e X-Openstack-Request-Id: req-d7bc29d0-7b99-4aeb-a356-89975043ab5e Server Actions -------------- Most `server action APIs`_ are asynchronous. Usually the API service will do some minimal work and then send the request off to the ``nova-compute`` service to complete the action and the API will return a 202 response to the client. The client will poll the API until the operation completes, which could be a status change on the server but is usually at least always waiting for the server ``OS-EXT-STS:task_state`` field to go to ``null`` indicating the action has completed either successfully or with an error. If a server action fails and the server status changes to ``ERROR`` an :ref:`instance fault ` will be shown with the server details. The `os-instance-actions API`_ allows users end users to list the outcome of server actions, referencing the requested action by request id. This is useful when an action fails and the server status does not change to ``ERROR``. To illustrate, consider a server (vm1) created with flavor ``m1.tiny``: .. code-block:: console $ openstack server create --flavor m1.tiny --image cirros-0.4.0-x86_64-disk --wait vm1 +-----------------------------+-----------------------------------------------------------------+ | Field | Value | +-----------------------------+-----------------------------------------------------------------+ | OS-DCF:diskConfig | MANUAL | | OS-EXT-AZ:availability_zone | nova | | OS-EXT-STS:power_state | Running | | OS-EXT-STS:task_state | None | | OS-EXT-STS:vm_state | active | | OS-SRV-USG:launched_at | 2019-12-02T19:14:48.000000 | | OS-SRV-USG:terminated_at | None | | accessIPv4 | | | accessIPv6 | | | addresses | private=10.0.0.60, fda0:e0c4:2764:0:f816:3eff:fe03:806 | | adminPass | NgascCr3dYo4 | | config_drive | | | created | 2019-12-02T19:14:42Z | | flavor | m1.tiny (1) | | hostId | 22e88bec09a7e33606348fce0abac0ebbbe091a35e29db1498ec4e14 | | id | 344174b8-34fd-4017-ae29-b9084dcf3861 | | image | cirros-0.4.0-x86_64-disk (cce5e6d6-d359-4152-b277-1b4f1871557f) | | key_name | None | | name | vm1 | | progress | 0 | | project_id | b22597ea961545f3bde1b2ede0bd5b91 | | properties | | | security_groups | name='default' | | status | ACTIVE | | updated | 2019-12-02T19:14:49Z | | user_id | 046033fb3f824550999752b6525adbac | | volumes_attached | | +-----------------------------+-----------------------------------------------------------------+ The owner of the server then tries to resize the server to flavor ``m1.small`` which fails because there are no hosts available on which to resize the server: .. code-block:: console $ openstack server resize --flavor m1.small --wait vm1 Complete Despite the openstack command saying the operation completed, the server shows the original ``m1.tiny`` flavor and the status is not ``VERIFY_RESIZE``: .. code-block:: $ openstack server show vm1 -f value -c status -c flavor m1.tiny (1) ACTIVE Since the status is not ``ERROR`` there are is no ``fault`` field in the server details so we find the details by listing the events for the server: .. code-block:: console $ openstack server event list vm1 +------------------------------------------+--------------------------------------+--------+----------------------------+ | Request ID | Server ID | Action | Start Time | +------------------------------------------+--------------------------------------+--------+----------------------------+ | req-ea1b0dfc-3186-42a9-84ff-c4f4fb130fae | 344174b8-34fd-4017-ae29-b9084dcf3861 | resize | 2019-12-02T19:15:35.000000 | | req-4cdc4c93-0668-4ae6-98c8-a0a5fcc63d39 | 344174b8-34fd-4017-ae29-b9084dcf3861 | create | 2019-12-02T19:14:42.000000 | +------------------------------------------+--------------------------------------+--------+----------------------------+ To see details about the ``resize`` action, we use the Request ID for that action: .. code-block:: console $ openstack server event show vm1 req-ea1b0dfc-3186-42a9-84ff-c4f4fb130fae +---------------+------------------------------------------+ | Field | Value | +---------------+------------------------------------------+ | action | resize | | instance_uuid | 344174b8-34fd-4017-ae29-b9084dcf3861 | | message | Error | | project_id | b22597ea961545f3bde1b2ede0bd5b91 | | request_id | req-ea1b0dfc-3186-42a9-84ff-c4f4fb130fae | | start_time | 2019-12-02T19:15:35.000000 | | user_id | 046033fb3f824550999752b6525adbac | +---------------+------------------------------------------+ We see the message is "Error" but are not sure what failed. By default the event details for an action are not shown to users without the admin role so use microversion 2.51 to see the events (the ``events`` field is JSON-formatted here for readability): .. code-block:: $ openstack --os-compute-api-version 2.51 server event show vm1 req-ea1b0dfc-3186-42a9-84ff-c4f4fb130fae -f json -c events { "events": [ { "event": "cold_migrate", "start_time": "2019-12-02T19:15:35.000000", "finish_time": "2019-12-02T19:15:36.000000", "result": "Error" }, { "event": "conductor_migrate_server", "start_time": "2019-12-02T19:15:35.000000", "finish_time": "2019-12-02T19:15:36.000000", "result": "Error" } ] } By default policy configuration a user with the admin role can see a ``traceback`` for each failed event just like with an instance fault: .. code-block:: $ source openrc admin admin $ openstack --os-compute-api-version 2.51 server event show 344174b8-34fd-4017-ae29-b9084dcf3861 req-ea1b0dfc-3186-42a9-84ff-c4f4fb130fae -f json -c events { "events": [ { "event": "cold_migrate", "start_time": "2019-12-02T19:15:35.000000", "finish_time": "2019-12-02T19:15:36.000000", "result": "Error", "traceback": " File \"/opt/stack/nova/nova/conductor/manager.py\", line 301, in migrate_server\n host_list)\n File \"/opt/stack/nova/nova/conductor/manager.py\", line 367, in _cold_migrate\n raise exception.NoValidHost(reason=msg)\n" }, { "event": "conductor_migrate_server", "start_time": "2019-12-02T19:15:35.000000", "finish_time": "2019-12-02T19:15:36.000000", "result": "Error", "traceback": " File \"/opt/stack/nova/nova/compute/utils.py\", line 1410, in decorated_function\n return function(self, context, *args, **kwargs)\n File \"/opt/stack/nova/nova/conductor/manager.py\", line 301, in migrate_server\n host_list)\n File \"/opt/stack/nova/nova/conductor/manager.py\", line 367, in _cold_migrate\n raise exception.NoValidHost(reason=msg)\n" } ] } .. _server action APIs: https://docs.openstack.org/api-ref/compute/#servers-run-an-action-servers-action .. _os-instance-actions API: https://docs.openstack.org/api-ref/compute/#servers-actions-servers-os-instance-actions Logs ---- All logs on the system, by default, include the global request ID and the local request ID when available. This allows an administrator to track the API request processing as it transitions between all the different nova services or between nova and other component services called by nova during that request. When nova services receive the local request IDs of other components in the ``X-Openstack-Request-Id`` header, the local request IDs are output to logs along with the local request IDs of nova services. .. tip:: If a session client is used in client library, set ``DEBUG`` level to the ``keystoneauth`` log level. If not, set ``DEBUG`` level to the client library package. e.g. ``glanceclient``, ``cinderclient``. Sample log output is provided below. In this example, nova is using local request ID ``req-034279a7-f2dd-40ff-9c93-75768fda494d``, while neutron is using local request ID ``req-39b315da-e1eb-4ab5-a45b-3f2dbdaba787``:: Jun 19 09:16:34 devstack-master nova-compute[27857]: DEBUG keystoneauth.session [None req-034279a7-f2dd-40ff-9c93-75768fda494d admin admin] POST call to network for http://10.0.2.15:9696/v2.0/ports used request id req-39b315da-e1eb-4ab5-a45b-3f2dbdaba787 {{(pid=27857) request /usr/local/lib/python2.7/dist-packages/keystoneauth1/session.py:640}} .. note:: The local request IDs are useful to make 'call graphs'. .. _instance-fault: Instance Faults --------------- Nova often adds an instance fault DB entry for an exception that happens while processing an API request. This often includes more administrator focused information, such as a stack trace. For a server with status ``ERROR`` or ``DELETED``, a ``GET /servers/{server_id}`` request will include a ``fault`` object in the response body for the ``server`` resource. For example:: GET https://10.211.2.122/compute/v2.1/servers/c76a7603-95be-4368-87e9-7b9b89fb1d7e { "server": { "id": "c76a7603-95be-4368-87e9-7b9b89fb1d7e", "fault": { "created": "2018-04-10T13:49:40Z", "message": "No valid host was found.", "code": 500 }, "status": "ERROR", ... } } Notifications ------------- In many cases there are also notifications emitted that describe the error. This is an administrator focused API, that works best when treated as structured logging. .. _synchronous_faults: Synchronous Faults ================== If an error occurs while processing our API request, you get a non 2xx API status code. The system also returns additional information about the fault in the body of the response. **Example: Fault: JSON response** .. code:: { "itemNotFound":{ "code": 404, "message":"Aggregate agg_h1 could not be found." } } The error ``code`` is returned in the body of the response for convenience. The ``message`` section returns a human-readable message that is appropriate for display to the end user. The ``details`` section is optional and may contain information--for example, a stack trace--to assist in tracking down an error. The ``details`` section might or might not be appropriate for display to an end user. The root element of the fault (such as, computeFault) might change depending on the type of error. The following link contains a list of possible elements along with their associated error codes. For more information on possible error code, please see: http://specs.openstack.org/openstack/api-wg/guidelines/http/response-codes.html Asynchronous faults =================== An error may occur in the background while a server is being built or while a server is executing an action. In these cases, the server is usually placed in an ``ERROR`` state. For some operations, like resize, it is possible that the operation fails but the instance gracefully returned to its original state before attempting the operation. In both of these cases, you should be able to find out more from the `Server Actions`_ API described above. When a server is placed into an ``ERROR`` state, a fault is embedded in the offending server. Note that these asynchronous faults follow the same format as the synchronous ones. The fault contains an error code, a human readable message, and optional details about the error. Additionally, asynchronous faults may also contain a ``created`` timestamp that specifies when the fault occurred. **Example: Server in error state: JSON response** .. code:: { "server": { "id": "52415800-8b69-11e0-9b19-734f0000ffff", "tenant_id": "1234", "user_id": "5678", "name": "sample-server", "created": "2010-08-10T12:00:00Z", "hostId": "e4d909c290d0fb1ca068ffafff22cbd0", "status": "ERROR", "progress": 66, "image" : { "id": "52415800-8b69-11e0-9b19-734f6f007777" }, "flavor" : { "id": "52415800-8b69-11e0-9b19-734f216543fd" }, "fault" : { "code" : 500, "created": "2010-08-10T11:59:59Z", "message": "No valid host was found. There are not enough hosts available.", "details": [snip] }, "links": [ { "rel": "self", "href": "http://servers.api.openstack.org/v2/1234/servers/52415800-8b69-11e0-9b19-734f000004d2" }, { "rel": "bookmark", "href": "http://servers.api.openstack.org/1234/servers/52415800-8b69-11e0-9b19-734f000004d2" } ] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-guide/source/general_info.rst0000664000175000017500000002701700000000000021117 0ustar00zuulzuul00000000000000======================== Key Compute API Concepts ======================== The OpenStack Compute API is defined as a RESTful HTTP service. The API takes advantage of all aspects of the HTTP protocol (methods, URIs, media types, response codes, etc.) and providers are free to use existing features of the protocol such as caching, persistent connections, and content compression among others. Providers can return information identifying requests in HTTP response headers, for example, to facilitate communication between the provider and client applications. OpenStack Compute is a compute service that provides server capacity in the cloud. Compute Servers come in different flavors of memory, cores, disk space, and CPU, and can be provisioned in minutes. Interactions with Compute Servers can happen programmatically with the OpenStack Compute API. User Concepts ============= To use the OpenStack Compute API effectively, you should understand several key concepts: - **Server** A virtual machine (VM) instance, physical machine or a container in the compute system. Flavor and image are requisite elements when creating a server. A name for the server is also required. For more details, such as server actions and server metadata, please see: :doc:`server_concepts` - **Flavor** Virtual hardware configuration for the requested server. Each flavor has a unique combination of disk space, memory capacity and priority for CPU time. - **Flavor Extra Specs** Key and value pairs that can be used to describe the specification of the server which is more than just about CPU, disk and RAM. For example, it can be used to indicate that the server created by this flavor has PCI devices, etc. For more details, please see: :doc:`extra_specs_and_properties` - **Image** A collection of files used to create or rebuild a server. Operators provide a number of pre-built OS images by default. You may also create custom images from cloud servers you have launched. These custom images are useful for backup purposes or for producing "gold" server images if you plan to deploy a particular server configuration frequently. - **Image Properties** Key and value pairs that can help end users to determine the requirements of the guest operating system in the image. For more details, please see: :doc:`extra_specs_and_properties` - **Key Pair** An ssh or x509 keypair that can be injected into a server at it's boot time. This allows you to connect to your server once it has been created without having to use a password. If you don't specify a key pair, Nova will create a root password for you, and return it in plain text in the server create response. - **Volume** A block storage device that Nova can use as permanent storage. When a server is created it has some disk storage available, but that is considered ephemeral, as it is destroyed when the server is destroyed. A volume can be attached to a server, then later detached and used by another server. Volumes are created and managed by the Cinder service. For additional info, see :nova-doc:`Block device mapping ` - **Quotas** An upper limit on the amount of resources any individual tenant may consume. Quotas can be used to limit the number of servers a tenant creates, or the amount of disk space consumed, so that no one tenant can overwhelm the system and prevent normal operation for others. Changing quotas is an administrator-level action. For additional info, see :nova-doc:`Quotas ` - **Rate Limiting** Please see :doc:`limits` - **Availability zone** A grouping of host machines that can be used to control where a new server is created. There is some confusion about this, as the name "availability zone" is used in other clouds, such as Amazon Web Services, to denote a physical separation of server locations that can be used to distribute cloud resources for fault tolerance in case one zone is unavailable for any reason. Such a separation is possible in Nova if an administrator carefully sets up availability zones for that, but it is not the default. Networking Concepts ------------------- Networking is handled by the :neutron-doc:`networking service <>`. When working with a server in the compute service, the most important networking resource is a *port* which is part of a *network*. Ports can have *security groups* applied to control firewall access. Ports can also be linked to *floating IPs* for external network access depending on the networking service configuration. When creating a server or attaching a network interface to an existing server, zero or more networks and/or ports can be specified to attach to the server. If nothing is provided, the compute service will by default create a port on the single network available to the project making the request. If more than one network is available to the project, such as a public external network and a private tenant network, an error will occur and the request will have to be made with a specific network or port. If a network is specified the compute service will attempt to create a port on the given network on behalf of the user. More advanced types of ports, such as :neutron-doc:`SR-IOV ports `, must be pre-created and provided to the compute service. Refer to the `network API reference`_ for more details. .. _network API reference: https://docs.openstack.org/api-ref/network/ Administrator Concepts ====================== Some APIs are largely focused on administration of Nova, and generally focus on compute hosts rather than servers. - **Services** Services are provided by Nova components. Normally, the Nova component runs as a process on the controller/compute node to provide the service. These services may be end-user facing, such as the OpenStack Compute REST API service, but most just work with other Nova services. The status of each service is monitored by Nova, and if it is not responding normally, Nova will update its status so that requests are not sent to that service anymore. The service can also be controlled by an Administrator in order to run maintenance or upgrades, or in response to changing workloads. - **nova-osapi_compute** This service provides the OpenStack Compute REST API to end users and application clients. - **nova-metadata** This service provides the OpenStack Metadata API to servers. The metadata is used to configure the running servers. - **nova-scheduler** This service provides compute request scheduling by tracking available resources, and finding the host that can best fulfill the request. - **nova-conductor** This service provides database access for Nova and the other OpenStack services, and handles internal version compatibility when different services are running different versions of code. The conductor service also handles long-running requests. - **nova-compute** This service runs on every compute node, and communicates with a hypervisor for managing compute resources on that node. - **Services Actions** .. note:: The services actions described in this section apply only to **nova-compute** services. - **enable, disable, disable-log-reason** The service can be disabled to indicate the service is not available anymore. This is used by administrator to stop service for maintenance. For example, when Administrator wants to maintain a specific compute node, Administrator can disable nova-compute service on that compute node. Then nova won't dispatch any new compute request to that compute node anymore. Administrator also can add note for disable reason. - **forced-down** .. note:: This action is enabled in microversion 2.11. This action allows you set the state of service down immediately. Nova only provides a very basic health monitor of service status, there isn't any guarantee about health status of other parts of infrastructure, like the health status of data network, storage network and other components. If you have a more extensive health monitoring system external to Nova, and know that the service in question is dead (and disconnected from the network), this can be used to tell the rest of Nova it can trust that this service is never coming back, and allow actions such as evacuate. .. warning:: This must *only* be used if you have fully fenced the service in question, and that it can never send updates to the rest of the system. This can be done by powering off the node or completely isolating its networking. If you force-down a service that is not fenced you can corrupt the VMs that were running on that host. - **Hosts** Hosts are the *physical machines* that provide the resources for the virtual servers created in Nova. They run a **hypervisor** (see definition below) that handles the actual creation and management of the virtual servers. Hosts also run the **Nova compute service**, which receives requests from Nova to interact with the virtual servers on that machine. When compute service receives a request, it calls the appropriate methods of the driver for that hypervisor in order to carry out the request. The driver acts as the translator from generic Nova requests to hypervisor-specific calls. Hosts report their current state back to Nova, where it is tracked by the scheduler service, so that the scheduler can place requests for new virtual servers on the hosts that can best fit them. - **Host Actions** .. note:: These APIs are deprecated in Microversion 2.43. A *host action* is one that affects the physical host machine, as opposed to actions that only affect the virtual servers running on that machine. There are three 'power' actions that are supported: *startup*, *shutdown*, and *reboot*. There are also two 'state' actions: enabling/disabling the host, and setting the host into or out of maintenance mode. Of course, carrying out these actions can affect running virtual servers on that host, so their state will need to be considered before carrying out the host action. For example, if you want to call the 'shutdown' action to turn off a host machine, you might want to migrate any virtual servers on that host before shutting down the host machine so that the virtual servers continue to be available without interruption. - **Hypervisors** A hypervisor, or virtual machine monitor (VMM), is a piece of computer software, firmware or hardware that creates and runs virtual machines. In nova, each Host (see ``Hosts``) runs a hypervisor. Administrators are able to query the hypervisor for information, such as all the virtual servers currently running, as well as detailed info about the hypervisor, such as CPU, memory, or disk related configuration. Currently nova-compute also supports Ironic and LXC, but they don't have a hypervisor running. - **Aggregates** See :nova-doc:`Aggregates Developer Information `. - **Migrations** Migrations are the process where a virtual server is moved from one host to another. Please see :doc:`server_concepts` for details about moving servers. Administrators are able to query the records in database for information about migrations. For example, they can determine the source and destination hosts, type of migration, or changes in the server's flavor. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-guide/source/index.rst0000664000175000017500000000553200000000000017574 0ustar00zuulzuul00000000000000.. Copyright 2009-2015 OpenStack Foundation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =========== Compute API =========== The nova project has a RESTful HTTP service called the OpenStack Compute API. Through this API, the service provides massively scalable, on demand, self-service access to compute resources. Depending on the deployment those compute resources might be Virtual Machines, Physical Machines or Containers. This guide covers the concepts in the OpenStack Compute API. For a full reference listing, please see: `Compute API Reference `__. We welcome feedback, comments, and bug reports at `bugs.launchpad.net/nova `__. Intended audience ================= This guide assists software developers who want to develop applications using the OpenStack Compute API. To use this information, you should have access to an account from an OpenStack Compute provider, or have access to your own deployment, and you should also be familiar with the following concepts: * OpenStack Compute service * RESTful HTTP services * HTTP/1.1 * JSON data serialization formats End User and Operator APIs ========================== The Compute API includes all end user and operator API calls. The API works with keystone and oslo.policy to deliver RBAC (Role-based access control). The default policy file gives suggestions on what APIs should not be made available to most end users but this is fully configurable. API Versions ============ Following the Mitaka release, every Nova deployment should have the following endpoints: * / - list of available versions * /v2 - the first version of the Compute API, uses extensions (we call this Compute API v2.0) * /v2.1 - same API, except uses microversions While this guide concentrates on documenting the v2.1 API, please note that the v2.0 is (almost) identical to first microversion of the v2.1 API and are also covered by this guide. Contents ======== .. toctree:: :maxdepth: 2 users versions microversions general_info server_concepts authentication extra_specs_and_properties faults limits links_and_references paginated_collections polling_changes request_and_response_formats down_cells port_with_resource_request accelerator-support ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-guide/source/limits.rst0000664000175000017500000000530500000000000017764 0ustar00zuulzuul00000000000000====== Limits ====== Accounts may be pre-configured with a set of thresholds (or limits) to manage capacity and prevent abuse of the system. The system recognizes *absolute limits*. Absolute limits are fixed. Limits are configured by operators and may differ from one deployment of the OpenStack Compute service to another. Please contact your provider to determine the limits that apply to your account. Your provider may be able to adjust your account's limits if they are too low. Also see the API Reference for `Limits `__. Absolute limits ~~~~~~~~~~~~~~~ Absolute limits are specified as name/value pairs. The name of the absolute limit uniquely identifies the limit within a deployment. Please consult your provider for an exhaustive list of absolute limits names. An absolute limit value is always specified as an integer. The name of the absolute limit determines the unit type of the integer value. For example, the name maxServerMeta implies that the value is in terms of server metadata items. **Table: Sample absolute limits** +-------------------+-------------------+------------------------------------+ | Name | Value | Description | +-------------------+-------------------+------------------------------------+ | maxTotalRAMSize | 51200 | Maximum total amount of RAM (MB) | +-------------------+-------------------+------------------------------------+ | maxServerMeta | 5 | Maximum number of metadata items | | | | associated with a server. | +-------------------+-------------------+------------------------------------+ | maxImageMeta | 5 | Maximum number of metadata items | | | | associated with an image. | +-------------------+-------------------+------------------------------------+ | maxPersonality | 5 | The maximum number of file | | | | path/content pairs that can be | | | | supplied on server build. | +-------------------+-------------------+------------------------------------+ | maxPersonalitySize| 10240 | The maximum size, in bytes, for | | | | each personality file. | +-------------------+-------------------+------------------------------------+ Determine limits programmatically ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Applications can programmatically determine current account limits. For information, see `Limits `__. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-guide/source/links_and_references.rst0000664000175000017500000001005200000000000022621 0ustar00zuulzuul00000000000000==================== Links and references ==================== Often resources need to refer to other resources. For example, when creating a server, you must specify the image from which to build the server. You can specify the image by providing an ID or a URL to a remote image. When providing an ID, it is assumed that the resource exists in the current OpenStack deployment. **Example: ID image reference: JSON request** .. code:: { "server":{ "flavorRef":"http://openstack.example.com/openstack/flavors/1", "imageRef":"http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b", "metadata":{ "My Server Name":"Apache1" }, "name":"new-server-test", "personality":[ { "contents":"ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA==", "path":"/etc/banner.txt" } ] } } **Example: Full image reference: JSON request** .. code:: { "server": { "name": "server-test-1", "imageRef": "b5660a6e-4b46-4be3-9707-6b47221b454f", "flavorRef": "2", "max_count": 1, "min_count": 1, "networks": [ { "uuid": "d32019d3-bc6e-4319-9c1d-6722fc136a22" } ], "security_groups": [ { "name": "default" }, { "name": "another-secgroup-name" } ] } } For convenience, resources contain links to themselves. This allows a client to easily obtain rather than construct resource URIs. The following types of link relations are associated with resources: - A ``self`` link contains a versioned link to the resource. Use these links when the link is followed immediately. - A ``bookmark`` link provides a permanent link to a resource that is appropriate for long term storage. - An ``alternate`` link can contain an alternate representation of the resource. For example, an OpenStack Compute image might have an alternate representation in the OpenStack Image service. .. note:: The ``type`` attribute provides a hint as to the type of representation to expect when following the link. **Example: Server with self links: JSON** .. code:: { "server":{ "id":"52415800-8b69-11e0-9b19-734fcece0043", "name":"my-server", "links":[ { "rel":"self", "href":"http://servers.api.openstack.org/v2.1/servers/52415800-8b69-11e0-9b19-734fcece0043" }, { "rel":"bookmark", "href":"http://servers.api.openstack.org/servers/52415800-8b69-11e0-9b19-734fcece0043" } ] } } **Example: Server with alternate link: JSON** .. code:: { "image" : { "id" : "52415800-8b69-11e0-9b19-734f5736d2a2", "name" : "My Server Backup", "links": [ { "rel" : "self", "href" : "http://servers.api.openstack.org/v2.1/images/52415800-8b69-11e0-9b19-734f5736d2a2" }, { "rel" : "bookmark", "href" : "http://servers.api.openstack.org/images/52415800-8b69-11e0-9b19-734f5736d2a2" }, { "rel" : "alternate", "type" : "application/vnd.openstack.image", "href" : "http://glance.api.openstack.org/1234/images/52415800-8b69-11e0-9b19-734f5736d2a2" } ] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-guide/source/microversions.rst0000664000175000017500000001453000000000000021365 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============= Microversions ============= API v2.1 supports microversions: small, documented changes to the API. A user can use microversions to discover the latest API microversion supported in their cloud. A cloud that is upgraded to support newer microversions will still support all older microversions to maintain the backward compatibility for those users who depend on older microversions. Users can also discover new features easily with microversions, so that they can benefit from all the advantages and improvements of the current cloud. There are multiple cases which you can resolve with microversions: - **Older clients with new cloud** Before using an old client to talk to a newer cloud, the old client can check the minimum version of microversions to verify whether the cloud is compatible with the old API. This prevents the old client from breaking with backwards incompatible API changes. Currently the minimum version of microversions is ``2.1``, which is a microversion compatible with the legacy v2 API. That means the legacy v2 API user doesn't need to worry that their older client software will be broken when their cloud is upgraded with new versions. And the cloud operator doesn't need to worry that upgrading their cloud to newer versions will break any user with older clients that don't expect these changes. - **User discovery of available features between clouds** The new features can be discovered by microversions. The user client should check the microversions firstly, and new features are only enabled when clouds support. In this way, the user client can work with clouds that have deployed different microversions simultaneously. Version Discovery ================= The Version API will return the minimum and maximum microversions. These values are used by the client to discover the API's supported microversion(s). Requests to ``/`` will get version info for all endpoints. A response would look as follows:: { "versions": [ { "id": "v2.0", "links": [ { "href": "http://openstack.example.com/v2/", "rel": "self" } ], "status": "DEPRECATED", "version": "", "min_version": "", "updated": "2025-07-04T12:00:00Z" }, { "id": "v2.1", "links": [ { "href": "http://openstack.example.com/v2.1/", "rel": "self" } ], "status": "CURRENT", "version": "2.14", "min_version": "2.1", "updated": "2013-07-23T11:33:21Z" } ] } ``version`` is the maximum microversion, ``min_version`` is the minimum microversion. If the value is the empty string, it means this endpoint doesn't support microversions; it is a legacy v2 API endpoint -- for example, the endpoint ``http://openstack.example.com/v2/`` in the above sample. The endpoint ``http://openstack.example.com/v2.1/`` supports microversions; the maximum microversion is ``2.14``, and the minimum microversion is ``2.1``. The client should specify a microversion between (and including) the minimum and maximum microversion to access the endpoint. You can also obtain specific endpoint version information by performing a GET on the base version URL (e.g., ``http://openstack.example.com/v2.1/``). You can get more information about the version API at :doc:`versions`. Client Interaction ================== A client specifies the microversion of the API they want by using the following HTTP header:: X-OpenStack-Nova-API-Version: 2.4 Starting with microversion ``2.27`` it is also correct to use the following header to specify the microversion:: OpenStack-API-Version: compute 2.27 .. note:: For more detail on this newer form see the `Microversion Specification `_. This acts conceptually like the "Accept" header. Semantically this means: * If neither ``X-OpenStack-Nova-API-Version`` nor ``OpenStack-API-Version`` (specifying ``compute``) is provided, act as if the minimum supported microversion was specified. * If both headers are provided, ``OpenStack-API-Version`` will be preferred. * If ``X-OpenStack-Nova-API-Version`` or ``OpenStack-API-Version`` is provided, respond with the API at that microversion. If that's outside of the range of microversions supported, return 406 Not Acceptable. * If ``X-OpenStack-Nova-API-Version`` or ``OpenStack-API-Version`` has a value of ``latest`` (special keyword), act as if maximum was specified. .. warning:: The ``latest`` value is mostly meant for integration testing and would be dangerous to rely on in client code since microversions are not following semver and therefore backward compatibility is not guaranteed. Clients should always require a specific microversion but limit what is acceptable to the microversion range that it understands at the time. This means that out of the box, an old client without any knowledge of microversions can work with an OpenStack installation with microversions support. In microversions prior to ``2.27`` two extra headers are always returned in the response:: X-OpenStack-Nova-API-Version: microversion_number Vary: X-OpenStack-Nova-API-Version The first header specifies the microversion number of the API which was executed. The ``Vary`` header is used as a hint to caching proxies that the response is also dependent on the microversion and not just the body and query parameters. See :rfc:`2616` section 14.44 for details. From microversion ``2.27`` two additional headers are added to the response:: OpenStack-API-Version: compute microversion_number Vary: OpenStack-API-Version ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-guide/source/paginated_collections.rst0000664000175000017500000000565000000000000023020 0ustar00zuulzuul00000000000000===================== Paginated collections ===================== To reduce load on the service, list operations return a maximum number of items at a time. The maximum number of items returned is determined by the compute provider. To navigate the collection, the ``limit`` and ``marker`` parameters can be set in the URI. For example: .. code:: ?limit=100&marker=1234 The ``marker`` parameter is the ID of the last item in the previous list. By default, the service sorts items by create time in descending order. When the service cannot identify a create time, it sorts items by ID. The ``limit`` parameter sets the page size. Both parameters are optional. If the client requests a ``limit`` beyond one that is supported by the deployment an overLimit (413) fault may be thrown. A marker with an invalid ID returns a badRequest (400) fault. For convenience, collections should contain atom ``next`` links. They may optionally also contain ``previous`` links but the current implementation does not contain ``previous`` links. The last page in the list does not contain a link to "next" page. The following examples illustrate three pages in a collection of servers. The first page was retrieved through a **GET** to ``http://servers.api.openstack.org/v2.1/servers?limit=1``. In these examples, the *``limit``* parameter sets the page size to a single item. Subsequent links honor the initial page size. Thus, a client can follow links to traverse a paginated collection without having to input the ``marker`` parameter. **Example: Servers collection: JSON (first page)** .. code:: { "servers_links":[ { "href":"https://servers.api.openstack.org/v2.1/servers?limit=1&marker=fc45ace4-3398-447b-8ef9-72a22086d775", "rel":"next" } ], "servers":[ { "id":"fc55acf4-3398-447b-8ef9-72a42086d775", "links":[ { "href":"https://servers.api.openstack.org/v2.1/servers/fc45ace4-3398-447b-8ef9-72a22086d775", "rel":"self" }, { "href":"https://servers.api.openstack.org/v2.1/servers/fc45ace4-3398-447b-8ef9-72a22086d775", "rel":"bookmark" } ], "name":"elasticsearch-0" } ] } In JSON, members in a paginated collection are stored in a JSON array named after the collection. A JSON object may also be used to hold members in cases where using an associative array is more practical. Properties about the collection itself, including links, are contained in an array with the name of the entity an underscore (\_) and ``links``. The combination of the objects and arrays that start with the name of the collection and an underscore represent the collection in JSON. The approach allows for extensibility of paginated collections by allowing them to be associated with arbitrary properties. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-guide/source/polling_changes.rst0000664000175000017500000001005100000000000021611 0ustar00zuulzuul00000000000000================= Efficient polling ================= The REST API allows you to poll for the status of certain operations by performing a **GET** on various elements. Rather than re-downloading and re-parsing the full status at each polling interval, your REST client may use the ``changes-since`` and/or ``changes-before`` parameters to check for changes within a specified time. The ``changes-since`` time or ``changes-before`` time is specified as an `ISO 8601 `__ dateTime (``2011-01-24T17:08Z``). The form for the timestamp is **CCYY-MM-DDThh:mm:ss**. An optional time zone may be written in by appending the form ±hh:mm which describes the timezone as an offset from UTC. When the timezone is not specified (``2011-01-24T17:08``), the UTC timezone is assumed. The following situations need to be considered: * If nothing has changed since the ``changes-since`` time, an empty list is returned. If data has changed, only the items changed since the specified time are returned in the response. For example, performing a **GET** against:: https://api.servers.openstack.org/v2.1/servers?changes-since=2015-01-24T17:08Z would list all servers that have changed since Mon, 24 Jan 2015 17:08:00 UTC. * If nothing has changed earlier than or equal to the ``changes-before`` time, an empty list is returned. If data has changed, only the items changed earlier than or equal to the specified time are returned in the response. For example, performing a **GET** against:: https://api.servers.openstack.org/v2.1/servers?changes-before=2015-01-24T17:08Z would list all servers that have changed earlier than or equal to Mon, 24 Jan 2015 17:08:00 UTC. * If nothing has changed later than or equal to ``changes-since``, or earlier than or equal to ``changes-before``, an empty list is returned. If data has changed, only the items changed between ``changes-since`` time and ``changes-before`` time are returned in the response. For example, performing a **GET** against:: https://api.servers.openstack.org/v2.1/servers?changes-since=2015-01-24T17:08Z&changes-before=2015-01-25T17:08Z would list all servers that have changed later than or equal to Mon, 24 Jan 2015 17:08:00 UTC, and earlier than or equal to Mon, 25 Jan 2015 17:08:00 UTC. Microversion change history for servers, instance actions and migrations regarding ``changes-since`` and ``changes-before``: * The `2.21 microversion`_ allows reading instance actions for a deleted server resource. * The `2.58 microversion`_ allows filtering on ``changes-since`` when listing instance actions for a server. * The `2.59 microversion`_ allows filtering on ``changes-since`` when listing migration records. * The `2.66 microversion`_ adds the ``changes-before`` filter when listing servers, instance actions and migrations. The ``changes-since`` filter nor the ``changes-before`` filter change any read-deleted behavior in the os-instance-actions or os-migrations APIs. The os-instance-actions API with the 2.21 microversion allows retrieving instance actions for a deleted server resource. The os-migrations API takes an optional ``instance_uuid`` filter parameter but does not support returning deleted migration records. To allow clients to keep track of changes, the ``changes-since`` filter and ``changes-before`` filter displays items that have been *recently* deleted. Servers contain a ``DELETED`` status that indicates that the resource has been removed. Implementations are not required to keep track of deleted resources indefinitely, so sending a ``changes-since`` time or a ``changes-before`` time in the distant past may miss deletions. .. _2.21 microversion: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id19 .. _2.58 microversion: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id53 .. _2.59 microversion: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id54 .. _2.66 microversion: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id59 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-guide/source/port_with_resource_request.rst0000664000175000017500000000452700000000000024166 0ustar00zuulzuul00000000000000================================= Using ports with resource request ================================= Starting from microversion 2.72 nova supports creating servers with neutron ports having resource request visible as a admin-only port attribute ``resource_request``. For example a neutron port has resource request if it has a QoS minimum bandwidth rule attached. Deleting such servers or detaching such ports works since Stein version of nova without requiring any specific microversion. However the following API operations are still not supported in nova: * Creating servers with neutron networks having QoS minimum bandwidth rule is not supported. The user needs to pre-create the port in that neutron network and create the server with the pre-created port. * Attaching Neutron ports and networks having QoS minimum bandwidth rule is not supported. Also the following API operations are not supported in the 19.0.0 (Stein) version of nova: * Moving (resizing, migrating, live-migrating, evacuating, unshelving after shelve offload) servers with ports having resource request is not yet supported. As of 20.0.0 (Train), nova supports cold migrating and resizing servers with neutron ports having resource requests if both the source and destination compute services are upgraded to 20.0.0 (Train) and the ``[upgrade_levels]/compute`` configuration does not prevent the computes from using the latest RPC version. However cross cell resize and cross cell migrate operations are still not supported with such ports and Nova will fall back to same-cell resize if the server has such ports. As of 21.0.0 (Ussuri), nova supports evacuating, live migrating and unshelving servers with neutron ports having resource requests. As of 23.0.0 (Wallaby), nova supports attaching neutron ports having QoS minimum bandwidth rules. Extended resource request ~~~~~~~~~~~~~~~~~~~~~~~~~ It is expected that neutron 20.0.0 (Yoga) will implement an extended resource request format via the ``port-resource-request-groups`` neutron API extension. As of nova 24.0.0 (Xena), nova already supports this extension if every nova-compute service is upgraded to Xena version and the ``[upgrade_levels]/compute`` configuration does not prevent the computes from using the latest RPC version. See :nova-doc:`the admin guide ` for administrative details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-guide/source/request_and_response_formats.rst0000664000175000017500000000231100000000000024440 0ustar00zuulzuul00000000000000============================ Request and response formats ============================ The OpenStack Compute API only supports JSON request and response formats, with a mime-type of ``application/json``. As there is only one supported content type, all content is assumed to be ``application/json`` in both request and response formats. Request and response example ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The example below shows a request body in JSON format: **Example: JSON request with headers** .. code:: POST /v2.1/servers HTTP/1.1 Host: servers.api.openstack.org X-Auth-Token: eaaafd18-0fed-4b3a-81b4-663c99ec1cbb .. code:: JSON { "server": { "name": "server-test-1", "imageRef": "b5660a6e-4b46-4be3-9707-6b47221b454f", "flavorRef": "2", "max_count": 1, "min_count": 1, "networks": [ { "uuid": "d32019d3-bc6e-4319-9c1d-6722fc136a22" } ], "security_groups": [ { "name": "default" }, { "name": "another-secgroup-name" } ] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-guide/source/server_concepts.rst0000664000175000017500000011556100000000000021675 0ustar00zuulzuul00000000000000=============== Server concepts =============== For the OpenStack Compute API, a server is a virtual machine (VM) instance, a physical machine or a container. Server status ~~~~~~~~~~~~~ You can filter the list of servers by image, flavor, name, and status through the respective query parameters. Server contains a status attribute that indicates the current server state. You can filter on the server status when you complete a list servers request. The server status is returned in the response body. The server status is one of the following values: **Server status values** - ``ACTIVE``: The server is active. - ``BUILD``: The server has not yet finished the original build process. - ``DELETED``: The server is deleted. - ``ERROR``: The server is in error. - ``HARD_REBOOT``: The server is hard rebooting. This is equivalent to pulling the power plug on a physical server, plugging it back in, and rebooting it. - ``MIGRATING``: The server is migrating. This is caused by a live migration (moving a server that is active) action. - ``PASSWORD``: The password is being reset on the server. - ``PAUSED``: The server is paused. - ``REBOOT``: The server is in a soft reboot state. A reboot command was passed to the operating system. - ``REBUILD``: The server is currently being rebuilt from an image. - ``RESCUE``: The server is in rescue mode. - ``RESIZE``: Server is performing the differential copy of data that changed during its initial copy. Server is down for this stage. - ``REVERT_RESIZE``: The resize or migration of a server failed for some reason. The destination server is being cleaned up and the original source server is restarting. - ``SHELVED``: The server is in shelved state. Depends on the shelve offload time, the server will be automatically shelved off loaded. - ``SHELVED_OFFLOADED``: The shelved server is offloaded (removed from the compute host) and it needs unshelved action to be used again. - ``SHUTOFF``: The server was powered down by the user, either through the OpenStack Compute API or from within the server. For example, the user issued a :command:`shutdown -h` command from within the server. If the OpenStack Compute manager detects that the VM was powered down, it transitions the server to the SHUTOFF status. - ``SOFT_DELETED``: The server is marked as deleted but will remain in the cloud for some configurable amount of time. While soft-deleted, an authorized user can restore the server back to normal state. When the time expires, the server will be deleted permanently. - ``SUSPENDED``: The server is suspended, either by request or necessity. See the :nova-doc:`feature support matrix ` for supported compute drivers. When you suspend a server, its state is stored on disk, all memory is written to disk, and the server is stopped. Suspending a server is similar to placing a device in hibernation and its occupied resource will not be freed but rather kept for when the server is resumed. If an instance is infrequently used and the occupied resource needs to be freed to create other servers, it should be shelved. - ``UNKNOWN``: The state of the server is unknown. It could be because a part of the infrastructure is temporarily down (see :doc:`down_cells` for more information). Contact your cloud provider. - ``VERIFY_RESIZE``: System is awaiting confirmation that the server is operational after a move or resize. Server status is calculated from vm_state and task_state, which are exposed to administrators: - vm_state describes a VM's current stable (not transition) state. That is, if there is no ongoing compute API calls (running tasks), vm_state should reflect what the customer expect the VM to be. When combined with task states, a better picture can be formed regarding the server's health and progress. Refer to :nova-doc:`VM States `. - task_state represents what is happening to the instance at the current moment. These tasks can be generic, such as ``spawning``, or specific, such as ``block_device_mapping``. These task states allow for a better view into what a server is doing. Server creation ~~~~~~~~~~~~~~~ Status Transition: - ``BUILD`` While the server is building there are several task state transitions that can occur: - ``scheduling``: The request is being scheduled to a compute node. - ``networking``: Setting up network interfaces asynchronously. - ``block_device_mapping``: Preparing block devices (local disks, volumes). - ``spawning``: Creating the guest in the hypervisor. - ``ACTIVE`` The terminal state for a successfully built and running server. - ``ERROR`` (on error) When you create a server, the operation asynchronously provisions a new server. The progress of this operation depends on several factors including location of the requested image, network I/O, host load, and the selected flavor. The progress of the request can be checked by performing a **GET** on /servers/*{server_id}*, which returns a progress attribute (from 0% to 100% complete). The full URL to the newly created server is returned through the ``Location`` header and is available as a ``self`` and ``bookmark`` link in the server representation. Note that when creating a server, only the server ID, its links, and the administrative password are guaranteed to be returned in the request. You can retrieve additional attributes by performing subsequent **GET** operations on the server. Server query ~~~~~~~~~~~~ There are two APIs for querying servers ``GET /servers`` and ``GET /servers/detail``. Both of those APIs support filtering the query result by using query options. For different user roles, the user has different query options set: - For general user, there is limited set of attributes of the servers can be used as query option. The supported options are: - ``changes-since`` - ``flavor`` - ``image`` - ``ip`` - ``ip6`` (New in version 2.5) - ``name`` - ``not-tags`` (New in version 2.26) - ``not-tags-any`` (New in version 2.26) - ``reservation_id`` - ``status`` - ``tags`` (New in version 2.26) - ``tags-any`` (New in version 2.26) - ``changes-before`` (New in version 2.66) - ``locked`` (New in version 2.73) - ``availability_zone`` (New in version 2.83) - ``config_drive`` (New in version 2.83) - ``key_name`` (New in version 2.83) - ``created_at`` (New in version 2.83) - ``launched_at`` (New in version 2.83) - ``terminated_at`` (New in version 2.83) - ``power_state`` (New in version 2.83) - ``task_state`` (New in version 2.83) - ``vm_state`` (New in version 2.83) - ``progress`` (New in version 2.83) - ``user_id`` (New in version 2.83) Other options will be ignored by nova silently. - For administrator, most of the server attributes can be used as query options. Before the Ocata release, the fields in the database schema of server are exposed as query options, which may lead to unexpected API change. After the Ocata release, the definition of the query options and the database schema are decoupled. That is also the reason why the naming of the query options are different from the attribute naming in the servers API response. Precondition: there are 2 servers existing in cloud with following info:: { "servers": [ { "name": "t1", "OS-EXT-SRV-ATTR:host": "devstack1", ... }, { "name": "t2", "OS-EXT-SRV-ATTR:host": "devstack2", ... } ] } **Example: General user query server with administrator only options** Request with non-administrator context: ``GET /servers/detail?host=devstack1`` .. note:: The ``host`` query parameter is only for administrator users and the query parameter is ignored if specified by non-administrator users. Thus the API returns servers of both ``devstack1`` and ``devstack2`` in this example. Response:: { "servers": [ { "name": "t1", ... }, { "name": "t2", ... } ] } **Example: Administrator query server with administrator only options** Request with administrator context: ``GET /servers/detail?host=devstack1`` Response:: { "servers": [ { "name": "t1", ... } ] } There are also some special query options: - ``changes-since`` returns the servers updated after the given time. Please see: :doc:`polling_changes` - ``changes-before`` returns the servers updated before the given time. Please see: :doc:`polling_changes` - ``deleted`` returns (or excludes) deleted servers - ``soft_deleted`` modifies behavior of 'deleted' to either include or exclude instances whose vm_state is SOFT_DELETED - ``all_tenants`` is an administrator query option, which allows the administrator to query the servers in any tenant. **Example: User query server with special keys changes-since or changes-before** Request: ``GET /servers/detail`` Response:: { "servers": [ { "name": "t1", "updated": "2015-12-15T15:55:52Z", ... }, { "name": "t2", "updated": "2015-12-17T15:55:52Z", ... } ] } Request: ``GET /servers/detail?changes-since='2015-12-16T15:55:52Z'`` Response:: { { "name": "t2", "updated": "2015-12-17T15:55:52Z", ... } } Request: ``GET /servers/detail?changes-before='2015-12-16T15:55:52Z'`` Response:: { { "name": "t1", "updated": "2015-12-15T15:55:52Z", ... } } Request: ``GET /servers/detail?changes-since='2015-12-10T15:55:52Z'&changes-before='2015-12-28T15:55:52Z'`` Response:: { "servers": [ { "name": "t1", "updated": "2015-12-15T15:55:52Z", ... }, { "name": "t2", "updated": "2015-12-17T15:55:52Z", ... } ] } There are two kinds of matching in query options: Exact matching and regex matching. **Example: User query server using exact matching on host** Request with administrator context: ``GET /servers/detail`` Response:: { "servers": [ { "name": "t1", "OS-EXT-SRV-ATTR:host": "devstack" ... }, { "name": "t2", "OS-EXT-SRV-ATTR:host": "devstack1" ... } ] } Request with administrator context: ``GET /servers/detail?host=devstack`` Response:: { "servers": [ { "name": "t1", "OS-EXT-SRV-ATTR:host": "devstack" ... } ] } **Example: Query server using regex matching on name** Request with administrator context: ``GET /servers/detail`` Response:: { "servers": [ { "name": "test11", ... }, { "name": "test21", ... }, { "name": "t1", ... }, { "name": "t14", ... } ] } Request with administrator context: ``GET /servers/detail?name=t1`` Response:: { "servers": [ { "name": "test11", ... }, { "name": "t1", ... }, { "name": "t14", ... } ] } **Example: User query server using exact matching on host and regex matching on name** Request with administrator context: ``GET /servers/detail`` Response:: { "servers": [ { "name": "test1", "OS-EXT-SRV-ATTR:host": "devstack" ... }, { "name": "t2", "OS-EXT-SRV-ATTR:host": "devstack1" ... }, { "name": "test3", "OS-EXT-SRV-ATTR:host": "devstack1" ... } ] } Request with administrator context: ``GET /servers/detail?host=devstack1&name=test`` Response:: { "servers": [ { "name": "test3", "OS-EXT-SRV-ATTR:host": "devstack1" ... } ] } Request: ``GET /servers/detail?changes-since='2015-12-16T15:55:52Z'`` Response:: { { "name": "t2", "updated": "2015-12-17T15:55:52Z" ... } } Server actions ~~~~~~~~~~~~~~ - **Reboot** Use this function to perform either a soft or hard reboot of a server. With a soft reboot, the operating system is signaled to restart, which allows for a graceful shutdown of all processes. A hard reboot is the equivalent of power cycling the server. The virtualization platform should ensure that the reboot action has completed successfully even in cases in which the underlying domain/VM is paused or halted/stopped. - **Rebuild** Use this function to remove all data on the server and replaces it with the specified image. Server ID, flavor and IP addresses remain the same. - **Evacuate** Should a nova-compute service actually go offline, it can no longer report status about any of the servers on it. This means they'll be listed in an 'ACTIVE' state forever. Evacuate is a work around for this that lets an administrator forcibly rebuild these servers on another node. It makes no guarantees that the host was actually down, so fencing is left as an exercise to the deployer. - **Resize** (including **Confirm resize**, **Revert resize**) Use this function to convert an existing server to a different flavor, in essence, scaling the server up or down. The original server is saved for a period of time to allow rollback if there is a problem. All resizes should be tested and explicitly confirmed, at which time the original server is removed. The resized server may be automatically confirmed based on the administrator's configuration of the deployment. Confirm resize action will delete the old server in the virt layer. The spawned server in the virt layer will be used from then on. On the contrary, Revert resize action will delete the new server spawned in the virt layer and revert all changes. The original server will be used from then on. - **Pause**, **Unpause** You can pause a server by making a pause request. This request stores the state of the VM in RAM. A paused server continues to run in a frozen state. Unpause returns a paused server back to an active state. - **Suspend**, **Resume** Users might want to suspend a server if it is infrequently used or to perform system maintenance. When you suspend a server, its VM state is stored on disk, all memory is written to disk, and the virtual machine is stopped. Suspending a server is similar to placing a device in hibernation and its occupied resource will not be freed but rather kept for when the server is resumed. Resume will resume a suspended server to an active state. - **Snapshot** You can store the current state of the server root disk to be saved and uploaded back into the glance image repository. Then a server can later be booted again using this saved image. - **Backup** You can use backup method to store server's current state in the glance repository, in the mean time, old snapshots will be removed based on the given 'daily' or 'weekly' type. - **Start** Power on the server. - **Stop** Power off the server. - **Delete**, **Restore** Power off the given server first then detach all the resources associated to the server such as network and volumes, then delete the server. The configuration option 'reclaim_instance_interval' (in seconds) decides whether the server to be deleted will still be in the system. If this value is greater than 0, the deleted server will not be deleted immediately, instead it will be put into a queue until it's too old (deleted time greater than the value of reclaim_instance_interval). Administrator is able to use Restore action to recover the server from the delete queue. If the deleted server remains longer than the value of reclaim_instance_interval, it will be deleted by compute service automatically. - **Shelve**, **Shelve offload**, **Unshelve** Shelving a server indicates it will not be needed for some time and may be temporarily removed from the hypervisors. This allows its resources to be freed up for use by someone else. By default the configuration option 'shelved_offload_time' is 0 and the shelved server will be removed from the hypervisor immediately after shelve operation; Otherwise, the resource will be kept for the value of 'shelved_offload_time' (in seconds) so that during the time period the unshelve action will be faster, then the periodic task will remove the server from hypervisor after 'shelved_offload_time' time passes. Set the option 'shelved_offload_time' to -1 make it never offload. Shelve will power off the given server and take a snapshot if it is booted from image. The server can then be offloaded from the compute host and its resources deallocated. Offloading is done immediately if booted from volume, but if booted from image the offload can be delayed for some time or infinitely, leaving the image on disk and the resources still allocated. Shelve offload is used to explicitly remove a shelved server that has been left on a host. This action can only be used on a shelved server and is usually performed by an administrator. Unshelve is the reverse operation of Shelve. It builds and boots the server again, on a new scheduled host if it was offloaded, using the shelved image in the glance repository if booted from image. - **Lock**, **Unlock** Lock a server so the following actions by non-admin users are not allowed to the server. - Delete Server - Change Administrative Password (changePassword Action) - Confirm Resized Server (confirmResize Action) - Force-Delete Server (forceDelete Action) - Pause Server (pause Action) - Reboot Server (reboot Action) - Rebuild Server (rebuild Action) - Rescue Server (rescue Action) - Resize Server (resize Action) - Restore Soft-Deleted Instance (restore Action) - Resume Suspended Server (resume Action) - Revert Resized Server (revertResize Action) - Shelve-Offload (Remove) Server (shelveOffload Action) - Shelve Server (shelve Action) - Start Server (os-start Action) - Stop Server (os-stop Action) - Suspend Server (suspend Action) - Trigger Crash Dump In Server - Unpause Server (unpause Action) - Unrescue Server (unrescue Action) - Unshelve (Restore) Shelved Server (unshelve Action) - Attach a volume to an instance - Update a volume attachment - Detach a volume from an instance - Create Interface - Detach Interface - Create Or Update Metadata Item - Create or Update Metadata Items - Delete Metadata Item - Replace Metadata Items - Add (Associate) Fixed Ip (addFixedIp Action) (DEPRECATED) - Remove (Disassociate) Fixed Ip (removeFixedIp Action) (DEPRECATED) .. NOTE(takashin): The following APIs can be performed by administrators only by default. So they are not listed in the above list. - Migrate Server (migrate Action) - Live-Migrate Server (os-migrateLive Action) - Force Migration Complete Action (force_complete Action) - Delete (Abort) Migration - Inject Network Information (injectNetworkInfo Action) - Reset Networking On A Server (resetNetwork Action) But administrators can perform the actions on the server even though the server is locked. By default, only owner or administrator can lock the sever, and administrator can overwrite owner's lock along with the locked_reason if it is specified. Unlock will unlock a server in locked state so additional operations can be performed on the server by non-admin users. By default, only owner or administrator can unlock the server. - **Rescue**, **Unrescue** The rescue operation starts a server in a special configuration whereby it is booted from a special root disk image. This enables the tenant to try and restore a broken guest system. Unrescue is the reverse action of Rescue. The server spawned from the special root image will be deleted. - **Set administrator password** Sets the root/administrator password for the given server. It uses an optionally installed agent to set the administrator password. - **Migrate**, **Live migrate** Migrate is usually utilized by administrator, it will move a server to another host; it utilizes the 'resize' action but with same flavor, so during migration, the server will be powered off and rebuilt on another host. Live migrate also moves a server from one host to another, but it won't power off the server in general so the server will not suffer a down time. Administrators may use this to evacuate servers from a host that needs to undergo maintenance tasks. - **Trigger crash dump** Trigger crash dump usually utilized by either administrator or the server's owner, it will dump the memory image as dump file into the given server, and then reboot the kernel again. And this feature depends on the setting about the trigger (e.g. NMI) in the server. Server passwords ~~~~~~~~~~~~~~~~ You can specify a password when you create the server through the optional adminPass attribute. The specified password must meet the complexity requirements set by your OpenStack Compute provider. The server might enter an ``ERROR`` state if the complexity requirements are not met. In this case, a client can issue a change password action to reset the server password. If a password is not specified, a randomly generated password is assigned and returned in the response object. This password is guaranteed to meet the security requirements set by the compute provider. For security reasons, the password is not returned in subsequent **GET** calls. Server metadata ~~~~~~~~~~~~~~~ Custom server metadata can also be supplied at launch time. The maximum size of the metadata key and value is 255 bytes each. The maximum number of key-value pairs that can be supplied per server is determined by the compute provider and may be queried via the maxServerMeta absolute limit. Block Device Mapping ~~~~~~~~~~~~~~~~~~~~ Simply speaking, Block Device Mapping describes how block devices are exposed to the server. For some historical reasons, nova has two ways to mention the block device mapping in server creation request body: - ``block_device_mapping``: This is the legacy way and supports backward compatibility for EC2 API. - ``block_device_mapping_v2``: This is the recommended format to specify Block Device Mapping information in server creation request body. Users cannot mix the two formats in the same request. For more information, refer to `Block Device Mapping `_. For the full list of ``block_device_mapping_v2`` parameters available when creating a server, see the `API reference `_. **Example for block_device_mapping_v2** This will create a 100GB size volume type block device from an image with UUID of ``bb02b1a3-bc77-4d17-ab5b-421d89850fca``. It will be used as the first order boot device (``boot_index=0``), and this block device will not be deleted after we terminate the server. Note that the ``imageRef`` parameter is not required in this case since we are creating a volume-backed server. .. code-block:: json { "server": { "name": "volume-backed-server-test", "flavorRef": "52415800-8b69-11e0-9b19-734f1195ff37", "block_device_mapping_v2": [ { "boot_index": 0, "uuid": "bb02b1a3-bc77-4d17-ab5b-421d89850fca", "volume_size": "100", "source_type": "image", "destination_type": "volume", "delete_on_termination": false } ] } } Scheduler Hints ~~~~~~~~~~~~~~~ Scheduler hints are a way for the user to influence on which host the scheduler places a server. They are pre-determined key-value pairs specified as a dictionary separate from the main ``server`` dictionary in the server create request. Available scheduler hints vary from cloud to cloud, depending on the `cloud's configuration`_. .. code-block:: json { "server": { "name": "server-in-group", "imageRef": "52415800-8b69-11e0-9b19-734f6f006e54", "flavorRef": "52415800-8b69-11e0-9b19-734f1195ff37" }, "os:scheduler_hints": { "group": "05a81485-010f-4df1-bbec-7821c85686e8" } } For more information on how to specify scheduler hints refer to `the create-server-detail Request section`_ in the Compute API reference. For more information on how scheduler hints are different from flavor extra specs, refer to `this document`_. .. _cloud's configuration: https://docs.openstack.org/nova/latest/admin/configuration/schedulers.html .. _the create-server-detail Request section: https://docs.openstack.org/api-ref/compute/?expanded=create-server-detail#create-server .. _this document: https://docs.openstack.org/nova/latest/reference/scheduler-hints-vs-flavor-extra-specs.html#scheduler-hints Server Consoles ~~~~~~~~~~~~~~~ Server Consoles can also be supplied after server launched. There are several server console services available. First, users can get the console output from the specified server and can limit the lines of console text by setting the length. Secondly, users can access multiple types of remote consoles. The user can use ``novnc``, ``rdp-html5``, ``spice-html5``, ``serial``, and ``webmks`` (starting from microversion 2.8) through either the OpenStack dashboard or the command line. Refer to :nova-doc:`Configure remote console access `. Server networks ~~~~~~~~~~~~~~~ Networks to which the server connects can also be supplied at launch time. One or more networks can be specified. User can also specify a specific port on the network or the fixed IP address to assign to the server interface. Server access addresses ~~~~~~~~~~~~~~~~~~~~~~~ In a hybrid environment, the IP address of a server might not be controlled by the underlying implementation. Instead, the access IP address might be part of the dedicated hardware; for example, a router/NAT device. In this case, the addresses provided by the implementation cannot actually be used to access the server (from outside the local LAN). Here, a separate *access address* may be assigned at creation time to provide access to the server. This address may not be directly bound to a network interface on the server and may not necessarily appear when a server's addresses are queried. Nonetheless, clients that must access the server directly are encouraged to do so via an access address. In the example below, an IPv4 address is assigned at creation time. **Example: Create server with access IP: JSON request** .. code-block:: json { "server": { "name": "new-server-test", "imageRef": "52415800-8b69-11e0-9b19-734f6f006e54", "flavorRef": "52415800-8b69-11e0-9b19-734f1195ff37", "accessIPv4": "67.23.10.132" } } .. note:: Both IPv4 and IPv6 addresses may be used as access addresses and both addresses may be assigned simultaneously as illustrated below. Access addresses may be updated after a server has been created. **Example: Create server with multiple access IPs: JSON request** .. code-block:: json { "server": { "name": "new-server-test", "imageRef": "52415800-8b69-11e0-9b19-734f6f006e54", "flavorRef": "52415800-8b69-11e0-9b19-734f1195ff37", "accessIPv4": "67.23.10.132", "accessIPv6": "::babe:67.23.10.132" } } Moving servers ~~~~~~~~~~~~~~ There are several actions that may result in a server moving from one compute host to another including shelve, resize, migrations and evacuate. The following use cases demonstrate the intention of the actions and the consequence for operational procedures. Cloud operator needs to move a server ------------------------------------- Sometimes a cloud operator may need to redistribute work loads for operational purposes. For example, the operator may need to remove a compute host for maintenance or deploy a kernel security patch that requires the host to be rebooted. The operator has two actions available for deliberately moving work loads: cold migration (moving a server that is not active) and live migration (moving a server that is active). Cold migration moves a server from one host to another by copying its state, local storage and network configuration to new resources allocated on a new host selected by scheduling policies. The operation is relatively quick as the server is not changing its state during the copy process. The user does not have access to the server during the operation. Live migration moves a server from one host to another while it is active, so it is constantly changing its state during the action. As a result it can take considerably longer than cold migration. During the action the server is online and accessible, but only a limited set of management actions are available to the user. The following are common patterns for employing migrations in a cloud: - **Host maintenance** If a compute host is to be removed from the cloud all its servers will need to be moved to other hosts. In this case it is normal for the rest of the cloud to absorb the work load, redistributing the servers by rescheduling them. To prepare the host it will be disabled so it does not receive any further servers. Then each server will be migrated to a new host by cold or live migration, depending on the state of the server. When complete, the host is ready to be removed. - **Rolling updates** Often it is necessary to perform an update on all compute hosts which requires them to be rebooted. In this case it is not strictly necessary to move inactive servers because they will be available after the reboot. However, active servers would be impacted by the reboot. Live migration will allow them to continue operation. In this case a rolling approach can be taken by starting with an empty compute host that has been updated and rebooted. Another host that has not yet been updated is disabled and all its servers are migrated to the new host. When the migrations are complete the new host continues normal operation. The old host will be empty and can be updated and rebooted. It then becomes the new target for another round of migrations. This process can be repeated until the whole cloud has been updated, usually using a pool of empty hosts instead of just one. - **Resource Optimization** To reduce energy usage, some cloud operators will try and move servers so they fit into the minimum number of hosts, allowing some servers to be turned off. Sometimes higher performance might be wanted, so servers are spread out between the hosts to minimize resource contention. Migrating a server is not normally a choice that is available to the cloud user because the user is not normally aware of compute hosts. Management of the cloud and how servers are provisioned in it is the responsibility of the cloud operator. Recover from a failed compute host ---------------------------------- Sometimes a compute host may fail. This is a rare occurrence, but when it happens during normal operation the servers running on the host may be lost. In this case the operator may recreate the servers on the remaining compute hosts using the evacuate action. Failure detection can be proved to be impossible in compute systems with asynchronous communication, so true failure detection cannot be achieved. Usually when a host is considered to have failed it should be excluded from the cloud and any virtual networking or storage associated with servers on the failed host should be isolated from it. These steps are called fencing the host. Initiating these action is outside the scope of Nova. Once the host has been fenced its servers can be recreated on other hosts without worry of the old incarnations reappearing and trying to access shared resources. It is usual to redistribute the servers from a failed host by rescheduling them. Please note, this operation can result in data loss for the user's server. As there is no access to the original server, if there were any disks stored on local storage, that data will be lost. Evacuate does the same operation as a rebuild. It downloads any images from glance and creates new blank ephemeral disks. Any disks that were volumes, or on shared storage, are reconnected. There should be no data loss for those disks. This is why fencing the host is important, to ensure volumes and shared storage are not corrupted by two servers writing simultaneously. Evacuating a server is solely in the domain of the cloud operator because it must be performed in coordination with other operational procedures to be safe. A user is not normally aware of compute hosts but is adversely affected by their failure. User resizes server to get more resources ----------------------------------------- Sometimes a user may want to change the flavor of a server, e.g. change the quantity of cpus, disk, memory or any other resource. This is done by restarting the server with a new flavor. As the server is being moved, it is normal to reschedule the server to another host (although resize to the same host is an option for the operator). Resize involves shutting down the server, finding a host that has the correct resources for the new flavor size, moving the current server (including all storage) to the new host. Once the server has been given the appropriate resources to match the new flavor, the server is started again. After the resize operation, when the user is happy their server is working correctly after the resize, the user calls Confirm Resize. This deletes the 'before-the-resize' server that was kept on the source host. Alternatively, the user can call Revert Resize to delete the new resized server and restore the old that was stored on the source host. If the user does not manually confirm the resize within a configured time period, the resize is automatically confirmed, to free up the space the old is using on the source host. As with shelving, resize provides the cloud operator with an opportunity to redistribute work loads across the cloud according to the operators scheduling policy, providing the same benefits as above. Resizing a server is not normally a choice that is available to the cloud operator because it changes the nature of the server being provided to the user. User doesn't want to be charged when not using a server ------------------------------------------------------- Sometimes a user does not require a server to be active for a while, perhaps over a weekend or at certain times of day. Ideally they don't want to be billed for those resources. Just powering down a server does not free up any resources, but shelving a server does free up resources to be used by other users. This makes it feasible for a cloud operator to offer a discount when a server is shelved. When the user shelves a server the operator can choose to remove it from the compute hosts, i.e. the operator can offload the shelved server. When the user's server is unshelved, it is scheduled to a new host according to the operators policies for distributing work loads across the compute hosts, including taking disabled hosts into account. This will contribute to increased overall capacity, freeing hosts that are ear-marked for maintenance and providing contiguous blocks of resources on single hosts due to moving out old servers. Shelving a server is not normally a choice that is available to the cloud operator because it affects the availability of the server being provided to the user. Configure Guest OS ~~~~~~~~~~~~~~~~~~ Metadata API ------------ Nova provides a metadata API for servers to retrieve server specific metadata. Neutron ensures this metadata API can be accessed through a predefined IP address, ``169.254.169.254``. For more details, refer to the :nova-doc:`user guide `. Config Drive ------------ Nova is able to write metadata to a special configuration drive that attaches to the server when it boots. The server can mount this drive and read files from it to get information that is normally available through the metadata service. For more details, refer to the :nova-doc:`user guide `. User data --------- A user data file is a special key in the metadata service that holds a file that cloud-aware applications in the server can access. This information can be accessed via the metadata API or a config drive. The latter allows the deployed server to consume it by active engines such as cloud-init during its boot process, where network connectivity may not be an option. Server personality ------------------ You can customize the personality of a server by injecting data into its file system. For example, you might want to insert ssh keys, set configuration files, or store data that you want to retrieve from inside the server. This feature provides a minimal amount of launch-time personalization. If you require significant customization, create a custom image. Follow these guidelines when you inject files: - The maximum size of the file path data is 255 bytes. - Encode the file contents as a Base64 string. The maximum size of the file contents is determined by the compute provider and may vary based on the image that is used to create the server. Considerations: - The maximum limit refers to the number of bytes in the decoded data and not the number of characters in the encoded data. - The maximum number of file path/content pairs that you can supply is also determined by the compute provider and is defined by the maxPersonality absolute limit. - The absolute limit, maxPersonalitySize, is a byte limit that is guaranteed to apply to all images in the deployment. Providers can set additional per-image personality limits. - The file injection might not occur until after the server is built and booted. - After file injection, personality files are accessible by only system administrators. For example, on Linux, all files have root and the root group as the owner and group owner, respectively, and allow user and group read access only (octal 440). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-guide/source/users.rst0000664000175000017500000000472000000000000017624 0ustar00zuulzuul00000000000000.. Copyright 2015 OpenStack Foundation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ===== Users ===== The Compute API includes all end user and administrator API calls. Role based access control ========================= Keystone middleware is used to authenticate users and identify their roles. The Compute API uses these roles, along with oslo.policy, to decide what the user is authorized to do. Refer to the to :nova-doc:`compute admin guide ` for details. Personas used in this guide =========================== While the policy can be configured in many ways, to make it easy to understand the most common use cases the API have been designed for, we should standardize on the following types of user: * application deployer: creates/deletes servers, directly or indirectly via API * application developer: creates images and applications that run on the cloud * cloud administrator: deploys, operates and maintains the cloud Now in reality the picture is much more complex. Specifically, there are likely to be different roles for observer, creator and administrator roles for the application developer. Similarly, there are likely to be various levels of cloud administrator permissions, such as a read-only role that is able to view a lists of servers for a specific tenant but is not able to perform any actions on any of them. .. note:: This is not attempting to be an exhaustive set of personas that consider various facets of the different users but instead aims to be a minimal set of users such that we use a consistent terminology throughout this document. Discovering Policy ================== An API to discover what actions you are authorized to perform is still a work in progress. Currently this reported by a HTTP 403 :ref:`error `. Refer to the :nova-doc:`configuration guide ` for a list of policy rules along with their default values. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-guide/source/versions.rst0000664000175000017500000000762600000000000020343 0ustar00zuulzuul00000000000000======== Versions ======== The OpenStack Compute API uses both a URI and a MIME type versioning scheme. In the URI scheme, the first element of the path contains the target version identifier (e.g. `https://servers.api.openstack.org/ v2.1/`...). The MIME type versioning scheme uses HTTP content negotiation where the ``Accept`` or ``Content-Type`` headers contains a MIME type that identifies the version (application/vnd.openstack.compute.v2.1+json). A version MIME type is always linked to a base MIME type, such as application/json. If conflicting versions are specified using both an HTTP header and a URI, the URI takes precedence. **Example: Request with MIME type versioning** .. code:: GET /214412/images HTTP/1.1 Host: servers.api.openstack.org Accept: application/vnd.openstack.compute.v2.1+json X-Auth-Token: eaaafd18-0fed-4b3a-81b4-663c99ec1cbb **Example: Request with URI versioning** .. code:: GET /v2.1/214412/images HTTP/1.1 Host: servers.api.openstack.org Accept: application/json X-Auth-Token: eaaafd18-0fed-4b3a-81b4-663c99ec1cbb Permanent Links ~~~~~~~~~~~~~~~ The MIME type versioning approach allows for creating of permanent links, because the version scheme is not specified in the URI path: ``https://api.servers.openstack.org/224532/servers/123``. If a request is made without a version specified in the URI or via HTTP headers, then a multiple-choices response (300) follows that provides links and MIME types to available versions. **Example: Multiple choices: JSON response** .. code:: { "choices": [ { "id": "v2.0", "links": [ { "href": "http://servers.api.openstack.org/v2/7f5b2214547e4e71970e329ccf0b257c/servers/detail", "rel": "self" } ], "media-types": [ { "base": "application/json", "type": "application/vnd.openstack.compute+json;version=2" } ], "status": "DEPRECATED" }, { "id": "v2.1", "links": [ { "href": "http://servers.api.openstack.org/v2.1/7f5b2214547e4e71970e329ccf0b257c/servers/detail", "rel": "self" } ], "media-types": [ { "base": "application/json", "type": "application/vnd.openstack.compute+json;version=2.1" } ], "status": "CURRENT" } ] } The API with ``CURRENT`` status is the newest API and continues to be improved by the Nova project. The API with ``SUPPORTED`` status is the old API, where new features are frozen. The API with ``DEPRECATED`` status is the API that will be removed in the foreseeable future. Providers should work with developers and partners to ensure there is adequate time to migrate to the new version before deprecated versions are discontinued. For any API which is under development but isn't released as yet, the API status is ``EXPERIMENTAL``. Your application can programmatically determine available API versions by performing a **GET** on the root URL (i.e. with the version and everything following that truncated) returned from the authentication system. You can also obtain additional information about a specific version by performing a **GET** on the base version URL (such as, ``https://servers.api.openstack.org/v2.1/``). Version request URLs must always end with a trailing slash (``/``). If you omit the slash, the server might respond with a 302 redirection request. For examples of the list versions and get version details requests and responses, see `API versions `__. The detailed version response contains pointers to both a human-readable and a machine-processable description of the API service. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315688.8296046 nova-32.0.0/api-ref/0000775000175000017500000000000000000000000014105 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.005606 nova-32.0.0/api-ref/source/0000775000175000017500000000000000000000000015405 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/conf.py0000664000175000017500000000416400000000000016711 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Compute API Reference documentation build configuration file # Refer to the Sphinx documentation for advice on configuring this file: # # http://www.sphinx-doc.org/en/stable/config.html # -- General configuration ---------------------------------------------------- extensions = [ 'openstackdocstheme', 'os_api_ref', ] # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. copyright = '2010-present, OpenStack Foundation' # openstackdocstheme options openstackdocs_repo_name = 'openstack/nova' openstackdocs_bug_project = 'nova' openstackdocs_bug_tag = 'api-ref' # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'native' # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. html_theme = 'openstackdocs' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = { 'sidebar_mode': 'toc', } # -- Options for LaTeX output ------------------------------------------------- # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ( 'index', 'Nova.tex', 'OpenStack Compute API Documentation', 'OpenStack Foundation', 'manual', ), ] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/diagnostics.inc0000664000175000017500000000432700000000000020415 0ustar00zuulzuul00000000000000.. -*- rst -*- ============================================ Servers diagnostics (servers, diagnostics) ============================================ Shows the usage data for a server. Show Server Diagnostics ======================= .. rest_method:: GET /servers/{server_id}/diagnostics Shows basic usage data for a server. Policy defaults enable only users with the administrative role. Cloud providers can change these permissions through the ``policy.json`` file. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403), notfound(404), conflict(409), notimplemented(501) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path Response -------- Starting from **microversion 2.48** diagnostics response is standardized across all virt drivers. The response should be considered a debug interface only and not relied upon by programmatic tools. All response fields are listed below. If the virt driver is unable to provide a specific field then this field will be reported as ``None`` in the response. .. rest_parameters:: parameters.yaml - config_drive: config_drive_diagnostics - state: vm_state_diagnostics - driver: driver_diagnostics - hypervisor: hypervisor_diagnostics - hypervisor_os: hypervisor_os_diagnostics - uptime: uptime_diagnostics - num_cpus: num_cpus_diagnostics - num_disks: num_disks_diagnostics - num_nics: num_nics_diagnostics - memory_details: memory_details_diagnostics - cpu_details: cpu_details_diagnostics - disk_details: disk_details_diagnostics - nic_details: nic_details_diagnostics **Example Server diagnostics (2.48)** .. literalinclude:: ../../doc/api_samples/os-server-diagnostics/v2.48/server-diagnostics-get-resp.json :language: javascript .. warning:: Before **microversion 2.48** the response format for diagnostics was not well defined. Each hypervisor had its own format. **Example Server diagnostics (2.1)** Below is an example of diagnostics for a libvirt based instance. The unit of the return value is hypervisor specific, but in this case the unit of vnet1_rx* and vnet1_tx* is octets. .. literalinclude:: ../../doc/api_samples/os-server-diagnostics/server-diagnostics-get-resp.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/extensions.inc0000664000175000017500000000411200000000000020275 0ustar00zuulzuul00000000000000.. -*- rst -*- ===================================== Extensions (extensions) (DEPRECATED) ===================================== Lists available extensions and shows information for an extension, by alias. Nova originally supported the concept of API extensions, that allowed implementations of Nova to change the API (add new resources, or attributes to existing resource objects) via extensions. In an attempt to expose to the user what was supported in a particular site, the extensions resource provided a list of extensions and detailed information on each. The net result was gratuitous differentiation in the API that required all users of OpenStack clouds to write specific code to interact with every cloud. As such, the entire extensions concept is deprecated, and will be removed in the near future. List Extensions =============== .. rest_method:: GET /extensions Lists all extensions to the API. Normal response codes: 200 Error response codes: unauthorized(401) Response -------- .. rest_parameters:: parameters.yaml - extensions: extensions - name: extension_name - alias: alias - links: extension_links - namespace: namespace - description: extension_description - updated: updated **Example List Extensions** Lists all extensions to the API. .. literalinclude:: ../../doc/api_samples/extension-info/extensions-list-resp.json :language: javascript Show Extension Details ====================== .. rest_method:: GET /extensions/{alias} Shows details for an extension, by alias. Normal response codes: 200 Error response codes: unauthorized(401), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - alias: alias Response -------- .. rest_parameters:: parameters.yaml - extension: extension - name: extension_name - alias: alias - links: extension_links - namespace: namespace - description: extension_description - updated: updated **Example Show Extension Details** Shows details about the ``os-agents`` extension. .. literalinclude:: ../../doc/api_samples/extension-info/extensions-get-resp.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/flavors.inc0000664000175000017500000001550100000000000017556 0ustar00zuulzuul00000000000000.. -*- rst -*- ========= Flavors ========= Show and manage server flavors. Flavors are a way to describe the basic dimensions of a server to be created including how much ``cpu``, ``ram``, and ``disk space`` are allocated to a server built with this flavor. List Flavors ============ .. rest_method:: GET /flavors Lists all flavors accessible to your project. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403) Request ------- .. rest_parameters:: parameters.yaml - sort_key: sort_key_flavor - sort_dir: sort_dir_flavor - limit: limit - marker: marker - minDisk: minDisk - minRam: minRam - is_public: flavor_is_public_query Response -------- .. rest_parameters:: parameters.yaml - flavors: flavors - id: flavor_id_body - name: flavor_name - description: flavor_description_resp - links: links **Example List Flavors (v2.55)** .. literalinclude:: ../../doc/api_samples/flavors/v2.55/flavors-list-resp.json :language: javascript Create Flavor ============= .. rest_method:: POST /flavors Creates a flavor. Creating a flavor is typically only available to administrators of a cloud because this has implications for scheduling efficiently in the cloud. .. note:: Flavors with special characters in the flavor ID, except the hyphen '-', underscore '_', spaces and dots '.', are not permitted. Flavor IDs are meant to be UUIDs. Serialized strings separated/grouped by "-" represent the default flavor ID or UUID. eg: 01cc74d8-4816-4bef-835b-e36ff188c406. Only for backward compatibility, an integer as a flavor ID is permitted. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), conflict(409) Request ------- .. rest_parameters:: parameters.yaml - flavor: flavor - name: flavor_name - description: flavor_description - id: flavor_id_body_create - ram: flavor_ram - disk: flavor_disk - vcpus: flavor_cpus - OS-FLV-EXT-DATA:ephemeral: flavor_ephem_disk_in - swap: flavor_swap_in - rxtx_factor: flavor_rxtx_factor_in - os-flavor-access:is_public: flavor_is_public_in **Example Create Flavor (v2.55)** .. literalinclude:: ../../doc/api_samples/flavor-manage/v2.55/flavor-create-post-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - flavor: flavor - name: flavor_name - description: flavor_description_resp - id: flavor_id_body - ram: flavor_ram - disk: flavor_disk - vcpus: flavor_cpus - links: links - OS-FLV-EXT-DATA:ephemeral: flavor_ephem_disk - OS-FLV-DISABLED:disabled: flavor_disabled - swap: flavor_swap - rxtx_factor: flavor_rxtx_factor - os-flavor-access:is_public: flavor_is_public - extra_specs: extra_specs_2_61 **Example Create Flavor (v2.75)** .. literalinclude:: ../../doc/api_samples/flavor-manage/v2.75/flavor-create-post-resp.json :language: javascript List Flavors With Details ========================= .. rest_method:: GET /flavors/detail Lists flavors with details. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403) Request ------- .. rest_parameters:: parameters.yaml - sort_key: sort_key_flavor - sort_dir: sort_dir_flavor - limit: limit - marker: marker - minDisk: minDisk - minRam: minRam - is_public: flavor_is_public_query Response -------- .. rest_parameters:: parameters.yaml - flavors: flavors - name: flavor_name - description: flavor_description_resp - id: flavor_id_body - ram: flavor_ram - disk: flavor_disk - vcpus: flavor_cpus - links: links - OS-FLV-EXT-DATA:ephemeral: flavor_ephem_disk - OS-FLV-DISABLED:disabled: flavor_disabled - swap: flavor_swap - rxtx_factor: flavor_rxtx_factor - os-flavor-access:is_public: flavor_is_public - extra_specs: extra_specs_2_61 **Example List Flavors With Details (v2.75)** .. literalinclude:: ../../doc/api_samples/flavors/v2.75/flavors-detail-resp.json :language: javascript Show Flavor Details =================== .. rest_method:: GET /flavors/{flavor_id} Shows details for a flavor. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - flavor_id: flavor_id Response -------- .. rest_parameters:: parameters.yaml - flavor: flavor - name: flavor_name - description: flavor_description_resp - id: flavor_id_body - ram: flavor_ram - disk: flavor_disk - vcpus: flavor_cpus - links: links - OS-FLV-EXT-DATA:ephemeral: flavor_ephem_disk - OS-FLV-DISABLED:disabled: flavor_disabled - swap: flavor_swap - rxtx_factor: flavor_rxtx_factor - os-flavor-access:is_public: flavor_is_public - extra_specs: extra_specs_2_61 **Example Show Flavor Details (v2.75)** .. literalinclude:: ../../doc/api_samples/flavors/v2.75/flavor-get-resp.json :language: javascript Update Flavor Description ========================= .. rest_method:: PUT /flavors/{flavor_id} Updates a flavor description. This API is available starting with microversion 2.55. Policy defaults enable only users with the administrative role to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - flavor_id: flavor_id - flavor: flavor - description: flavor_description_required **Example Update Flavor Description (v2.55)** .. literalinclude:: ../../doc/api_samples/flavor-manage/v2.55/flavor-update-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - flavor: flavor - name: flavor_name - description: flavor_description_resp_no_min - id: flavor_id_body - ram: flavor_ram - disk: flavor_disk - vcpus: flavor_cpus - links: links - OS-FLV-EXT-DATA:ephemeral: flavor_ephem_disk - OS-FLV-DISABLED:disabled: flavor_disabled - swap: flavor_swap - rxtx_factor: flavor_rxtx_factor - os-flavor-access:is_public: flavor_is_public - extra_specs: extra_specs_2_61 **Example Update Flavor Description (v2.75)** .. literalinclude:: ../../doc/api_samples/flavor-manage/v2.75/flavor-update-resp.json :language: javascript Delete Flavor ============= .. rest_method:: DELETE /flavors/{flavor_id} Deletes a flavor. This is typically an admin only action. Deleting a flavor that is in use by existing servers is not recommended as it can cause incorrect data to be returned to the user under some operations. Normal response codes: 202 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - flavor_id: flavor_id Response -------- No body content is returned on a successful DELETE. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/images.inc0000664000175000017500000001751500000000000017356 0ustar00zuulzuul00000000000000.. -*- rst -*- ==================== Images (DEPRECATED) ==================== .. warning:: These APIs are proxy calls to the Image service. Nova has deprecated all the proxy APIs and users should use the native APIs instead. All the Image services proxy APIs except image metadata APIs will fail with a 404 starting from microversion 2.36. The image metadata APIs will fail with a 404 starting from microversion 2.39. See: `Relevant Image APIs `__. Lists, shows details and deletes images. Also sets, lists, shows details, create, update and deletes image metadata. An image is a collection of files that you use to create and rebuild a server. By default, operators provide pre-built operating system images. You can also create custom images. See: `Create Image Action `__. By default, the ``policy.json`` file authorizes all users to view the image size in the ``OS-EXT-IMG-SIZE:size`` extended attribute. List Images =========== .. rest_method:: GET /images List images. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403) Request ------- .. rest_parameters:: parameters.yaml - changes-since: changes-since - server: image_server_query - name: image_name_query - status: image_status_query - minDisk: minDisk - minRam: minRam - type : image_type_query - limit : limit - marker : marker Response -------- .. rest_parameters:: parameters.yaml - images: images - id: image_id_body - name: image_name - links: links **Example List Images: JSON response** .. literalinclude:: ../../doc/api_samples/images/images-list-get-resp.json :language: javascript List Images With Details ======================== .. rest_method:: GET /images/detail List images with details. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403) Request ------- .. rest_parameters:: parameters.yaml - changes-since: changes-since - server: image_server_query - name: image_name_query - status: image_status_query - minDisk: minDisk - minRam: minRam - type : image_type_query - limit : limit - marker : marker Response -------- .. rest_parameters:: parameters.yaml - images: images - id: image_id_body - name: image_name - minRam: minRam_body - minDisk: minDisk_body - metadata: metadata_object - created: created - updated: updated - status: image_status - progress: image_progress - links: links - server: image_server - OS-EXT-IMG-SIZE:size: image_size - OS-DCF:diskConfig: OS-DCF:diskConfig **Example List Images Details: JSON response** .. literalinclude:: ../../doc/api_samples/images/images-details-get-resp.json :language: javascript Show Image Details ================== .. rest_method:: GET /images/{image_id} Shows details for an image. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - image_id: image_id Response -------- .. rest_parameters:: parameters.yaml - images: images - id: image_id_body - name: image_name - minRam: minRam_body - minDisk: minDisk_body - metadata: metadata_object - created: created - updated: updated - status: image_status - progress: image_progress - links: links - server: image_server - OS-EXT-IMG-SIZE:size: image_size - OS-DCF:diskConfig: OS-DCF:diskConfig **Example Show Image Details: JSON response** .. literalinclude:: ../../doc/api_samples/images/image-get-resp.json :language: javascript Delete Image ============ .. rest_method:: DELETE /images/{image_id} Deletes an image. Normal response codes: 204 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - image_id: image_id Response -------- There is no body content for the response of a successful DELETE action. List Image Metadata =================== .. rest_method:: GET /images/{image_id}/metadata List metadata of an image. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - image_id: image_id Response -------- .. rest_parameters:: parameters.yaml - metadata: metadata_object **Example List Image Metadata Details: JSON response** .. literalinclude:: ../../doc/api_samples/images/image-metadata-get-resp.json :language: javascript Create Image Metadata ===================== .. rest_method:: POST /images/{image_id}/metadata Create an image metadata. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - image_id: image_id - metadata: metadata_object **Example Create Image Metadata: JSON request** .. literalinclude:: ../../doc/api_samples/images/image-metadata-post-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - metadata: metadata_object **Example Create Image Metadata: JSON response** .. literalinclude:: ../../doc/api_samples/images/image-metadata-post-resp.json :language: javascript Update Image Metadata ===================== .. rest_method:: PUT /images/{image_id}/metadata Update an image metadata Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - image_id: image_id - metadata: metadata_object **Example Update Image Metadata: JSON request** .. literalinclude:: ../../doc/api_samples/images/image-metadata-put-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - metadata: metadata_object **Example Update Image Metadata: JSON response** .. literalinclude:: ../../doc/api_samples/images/image-metadata-put-resp.json :language: javascript Show Image Metadata Item ======================== .. rest_method:: GET /images/{image_id}/metadata/{key} Shows metadata item, by key, for an image. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - image_id: image_id - key: key Response -------- .. rest_parameters:: parameters.yaml - meta: meta **Example Show Image Metadata Item Details: JSON response** .. literalinclude:: ../../doc/api_samples/images/image-meta-key-get.json :language: javascript Create Or Update Image Metadata Item ==================================== .. rest_method:: PUT /images/{image_id}/metadata/{key} Creates or updates a metadata item, by key, for an image. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - image_id: image_id - key: key - meta: meta **Example Create Or Update Image Metadata Item: JSON request** .. literalinclude:: ../../doc/api_samples/images/image-meta-key-put-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - meta: meta **Example Create Or Update Image Metadata Item: JSON response** .. literalinclude:: ../../doc/api_samples/images/image-meta-key-put-resp.json :language: javascript Delete Image Metadata Item ========================== .. rest_method:: DELETE /images/{image_id}/metadata/{key} Deletes a metadata item, by key, for an image. Normal response codes: 204 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - image_id: image_id - key: key Response -------- There is no body content for the response of a successful DELETE action. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/index.rst0000664000175000017500000000637700000000000017263 0ustar00zuulzuul00000000000000:tocdepth: 2 ============= Compute API ============= This is a reference for the OpenStack Compute API which is provided by the Nova project. To learn more about the OpenStack Compute API concepts, please refer to the `API guide `_. .. rest_expand_all:: .. include:: versions.inc .. include:: urls.inc .. include:: request-ids.inc .. include:: servers.inc .. include:: servers-actions.inc .. include:: servers-action-fixed-ip.inc .. include:: servers-action-deferred-delete.inc .. include:: servers-action-console-output.inc .. include:: servers-action-shelve.inc .. include:: servers-action-crash-dump.inc .. include:: servers-action-remote-consoles.inc .. include:: servers-admin-action.inc .. include:: servers-action-evacuate.inc .. include:: servers-remote-consoles.inc .. include:: server-security-groups.inc .. include:: diagnostics.inc .. include:: ips.inc .. include:: metadata.inc .. include:: os-instance-actions.inc .. include:: os-interface.inc .. include:: os-server-password.inc .. include:: os-server-shares.inc .. include:: os-volume-attachments.inc .. include:: flavors.inc .. include:: os-flavor-access.inc .. include:: os-flavor-extra-specs.inc .. include:: os-keypairs.inc .. include:: limits.inc .. include:: os-aggregates.inc .. include:: os-assisted-volume-snapshots.inc .. include:: os-availability-zone.inc .. include:: os-hypervisors.inc .. include:: os-instance-usage-audit-log.inc .. include:: os-migrations.inc .. include:: server-migrations.inc .. include:: os-quota-sets.inc .. include:: os-quota-class-sets.inc .. include:: os-server-groups.inc .. include:: os-server-tags.inc .. include:: os-services.inc .. include:: os-simple-tenant-usage.inc .. include:: os-server-external-events.inc .. include:: server-topology.inc ===================== Internal Service APIs ===================== .. warning:: The below Nova APIs are meant to communicate to OpenStack services. Those APIs are not supposed to be used by any users because they can make deployment or resources in unwanted state. .. include:: os-assisted-volume-snapshots.inc .. include:: os-volume-attachments-swap.inc .. include:: os-server-external-events.inc =============== Deprecated APIs =============== This section contains references for APIs which are deprecated and usually limited to some maximum microversion. .. include:: extensions.inc .. include:: os-networks.inc .. include:: os-volumes.inc .. include:: images.inc .. include:: os-baremetal-nodes.inc .. include:: os-tenant-network.inc .. include:: os-floating-ip-pools.inc .. include:: os-floating-ips.inc .. include:: os-security-groups.inc .. include:: os-security-group-rules.inc .. include:: os-hosts.inc ============= Obsolete APIs ============= This section contains the reference for APIs that were part of the OpenStack Compute API in the past, but no longer exist. .. include:: os-certificates.inc .. include:: os-cloudpipe.inc .. include:: os-fping.inc .. include:: os-virtual-interfaces.inc .. include:: os-fixed-ips.inc .. include:: os-floating-ips-bulk.inc .. include:: os-floating-ip-dns.inc .. include:: os-cells.inc .. include:: os-consoles.inc .. include:: os-security-group-default-rules.inc .. include:: os-agents.inc .. include:: servers-action-rdp-remote-consoles.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/ips.inc0000664000175000017500000000346200000000000016700 0ustar00zuulzuul00000000000000.. -*- rst -*- ============================ Servers IPs (servers, ips) ============================ Lists the IP addresses for an instance and shows details for an IP address. List Ips ======== .. rest_method:: GET /servers/{server_id}/ips Lists IP addresses that are assigned to an instance. Policy defaults enable only users with the administrative role or the owner of the server to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path Response -------- .. rest_parameters:: parameters.yaml - addresses: addresses_obj - network_label: network_label_body - addr: ip_address - version: version_ip **Example List Ips** .. literalinclude:: ../../doc/api_samples/server-ips/server-ips-resp.json :language: javascript Show Ip Details =============== .. rest_method:: GET /servers/{server_id}/ips/{network_label} Shows IP addresses details for a network label of a server instance. Policy defaults enable only users with the administrative role or the owner of the server to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - network_label: network_label Response -------- .. rest_parameters:: parameters.yaml - network_label: network_label_body - addr: ip_address - version: version_ip **Example Show Ip Details** .. literalinclude:: ../../doc/api_samples/server-ips/server-ips-network-resp.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/limits.inc0000664000175000017500000000274600000000000017412 0ustar00zuulzuul00000000000000.. -*- rst -*- ================= Limits (limits) ================= Shows rate and absolute limits for the project. Show Rate And Absolute Limits ============================= .. rest_method:: GET /limits Shows rate and absolute limits for the project. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403) Request ------- .. rest_parameters:: parameters.yaml - reserved: reserved_query - tenant_id: tenant_id_query Response -------- .. rest_parameters:: parameters.yaml - limits: limits - absolute: limits_absolutes - maxServerGroupMembers: server_group_members - maxServerGroups: server_groups - maxServerMeta: metadata_items - maxTotalCores: cores - maxTotalInstances: instances - maxTotalKeypairs: key_pairs - maxTotalRAMSize: ram - totalCoresUsed: total_cores_used - totalInstancesUsed: total_instances_used - totalRAMUsed: total_ram_used - totalServerGroupsUsed: total_server_groups_used - maxSecurityGroupRules: security_group_rules_quota - maxSecurityGroups: security_groups_quota - maxTotalFloatingIps: floating_ips - totalFloatingIpsUsed: total_floatingips_used - totalSecurityGroupsUsed: total_security_groups_used - maxImageMeta: image_metadata_items - maxPersonality: injected_files - maxPersonalitySize: injected_file_content_bytes - rate: limits_rates **Example Show Rate And Absolute Limits: JSON response** .. literalinclude:: ../../doc/api_samples/limits/limit-get-resp.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/metadata.inc0000664000175000017500000001371400000000000017666 0ustar00zuulzuul00000000000000.. -*- rst -*- ===================================== Server metadata (servers, metadata) ===================================== Lists metadata, creates or replaces one or more metadata items, and updates one or more metadata items for a server. Shows details for, creates or replaces, and updates a metadata item, by key, for a server. List All Metadata ================= .. rest_method:: GET /servers/{server_id}/metadata Lists all metadata for a server. Policy defaults enable only users with the administrative role or the owner of the server to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path Response -------- .. rest_parameters:: parameters.yaml - metadata: metadata_object **Example List All Metadata** .. literalinclude:: ../../doc/api_samples/server-metadata/server-metadata-all-resp.json :language: javascript Create or Update Metadata Items =============================== .. rest_method:: POST /servers/{server_id}/metadata Create or update one or more metadata items for a server. Creates any metadata items that do not already exist in the server, replaces exists metadata items that match keys. Does not modify items that are not in the request. Policy defaults enable only users with the administrative role or the owner of the server to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), conflict(409) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - metadata: metadata_object **Example Update Metadata Items** .. literalinclude:: ../../doc/api_samples/server-metadata/server-metadata-all-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - metadata: metadata_object **Example Update Metadata Items** .. literalinclude:: ../../doc/api_samples/server-metadata/server-metadata-all-resp.json :language: javascript Replace Metadata Items ====================== .. rest_method:: PUT /servers/{server_id}/metadata Replaces one or more metadata items for a server. Creates any metadata items that do not already exist in the server. Removes and completely replaces any metadata items that already exist in the server with the metadata items in the request. Policy defaults enable only users with the administrative role or the owner of the server to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), conflict(409) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - metadata: metadata_object **Example Create Or Replace Metadata Items** .. literalinclude:: ../../doc/api_samples/server-metadata/server-metadata-all-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - metadata: metadata_object **Example Create Or Replace Metadata Items** .. literalinclude:: ../../doc/api_samples/server-metadata/server-metadata-all-resp.json :language: javascript Show Metadata Item Details ========================== .. rest_method:: GET /servers/{server_id}/metadata/{key} Shows details for a metadata item, by key, for a server. Policy defaults enable only users with the administrative role or the owner of the server to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - key: key Response -------- .. rest_parameters:: parameters.yaml - meta: metadata_object **Example Show Metadata Item Details** .. literalinclude:: ../../doc/api_samples/server-metadata/server-metadata-resp.json :language: javascript Create Or Update Metadata Item ============================== .. rest_method:: PUT /servers/{server_id}/metadata/{key} Creates or replaces a metadata item, by key, for a server. Creates a metadata item that does not already exist in the server. Replaces existing metadata items that match keys with the metadata item in the request. Policy defaults enable only users with the administrative role or the owner of the server to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), conflict(409) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - key: key **Example Create Or Update Metadata Item** .. literalinclude:: ../../doc/api_samples/server-metadata/server-metadata-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - meta: metadata_object **Example Create Or Update Metadata Item** .. literalinclude:: ../../doc/api_samples/server-metadata/server-metadata-resp.json :language: javascript Delete Metadata Item ==================== .. rest_method:: DELETE /servers/{server_id}/metadata/{key} Deletes a metadata item, by key, from a server. Policy defaults enable only users with the administrative role or the owner of the server to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Normal response codes: 204 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), conflict(409) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - key: key Response -------- If successful, this method does not return content in the response body. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/os-agents.inc0000664000175000017500000000673100000000000020007 0ustar00zuulzuul00000000000000.. -*- rst -* ========================== Guest agents (os-agents) ========================== Creates, lists, updates, and deletes guest agent builds. Use guest agents to access files on the disk, configure networking, or run other applications or scripts in the guest while the agent is running. This hypervisor-specific extension is currently only for the Xen driver. Use of guest agents is possible only if the underlying service provider uses the Xen driver. .. warning:: These APIs only works with the Xen virt driver, which was deprecated in the 20.0.0 (Train) release. They were removed in the 22.0.0 (Victoria) release. List Agent Builds ================= .. rest_method:: GET /os-agents Lists agent builds. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403), gone(410) Request ------- .. rest_parameters:: parameters.yaml - hypervisor: hypervisor_query Response -------- .. rest_parameters:: parameters.yaml - agents: agents - agent_id: agent_id - architecture: architecture - hypervisor: hypervisor_type - md5hash: md5hash - os: os - url: url - version: version **Example List Agent Builds: JSON response** .. literalinclude:: ../../doc/api_samples/os-agents/agents-get-resp.json :language: javascript Create Agent Build ================== .. rest_method:: POST /os-agents Creates an agent build. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), conflict(409), gone(410) Request ------- .. rest_parameters:: parameters.yaml - agent: agent - hypervisor: hypervisor_type - os: os - architecture: architecture - version: version - md5hash: md5hash - url: url **Example Create Agent Build: JSON request** .. literalinclude:: ../../doc/api_samples/os-agents/agent-post-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - agent: agent - agent_id: agent_id - architecture: architecture - hypervisor: hypervisor_type - md5hash: md5hash - os: os - url: url - version: version **Example Create Agent Build: JSON response** .. literalinclude:: ../../doc/api_samples/os-agents/agent-post-resp.json :language: javascript Update Agent Build ================== .. rest_method:: PUT /os-agents/{agent_build_id} Updates an agent build. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), gone(410) Request ------- .. rest_parameters:: parameters.yaml - agent_build_id: agent_build_id - para: para - url: url - md5hash: md5hash - version: version **Example Update Agent Build: JSON request** .. literalinclude:: ../../doc/api_samples/os-agents/agent-update-put-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - agent: agent - agent_id: agent_id_str - md5hash: md5hash - url: url - version: version **Example Update Agent Build: JSON response** .. literalinclude:: ../../doc/api_samples/os-agents/agent-update-put-resp.json :language: javascript Delete Agent Build ================== .. rest_method:: DELETE /os-agents/{agent_build_id} Deletes an existing agent build. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), gone(410) Request ------- .. rest_parameters:: parameters.yaml - agent_build_id: agent_build_id Response -------- There is no body content for the response of a successful DELETE query ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/os-aggregates.inc0000664000175000017500000002263600000000000020641 0ustar00zuulzuul00000000000000.. -*- rst -*- ================================ Host aggregates (os-aggregates) ================================ Creates and manages host aggregates. An aggregate assigns metadata to groups of compute nodes. Policy defaults enable only users with the administrative role to perform operations with aggregates. Cloud providers can change these permissions through `policy file configuration `__. List Aggregates =============== .. rest_method:: GET /os-aggregates Lists all aggregates. Includes the ID, name, and availability zone for each aggregate. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403) Response -------- .. rest_parameters:: parameters.yaml - aggregates: aggregates - availability_zone: aggregate_az - created_at: created - deleted_at: deleted_at - deleted: deleted - hosts: aggregate_host_list - id: aggregate_id_body - metadata: aggregate_metadata_response - name: aggregate_name - updated_at: updated_consider_null - uuid: aggregate_uuid **Example List Aggregates (v2.41): JSON response** .. literalinclude:: ../../doc/api_samples/os-aggregates/v2.41/aggregates-list-get-resp.json :language: javascript Create Aggregate ================ .. rest_method:: POST /os-aggregates Creates an aggregate. If specifying an option availability_zone, the aggregate is created as an availability zone and the availability zone is visible to normal users. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), conflict(409) Request ------- .. rest_parameters:: parameters.yaml - aggregate: aggregate - name: aggregate_name - availability_zone: aggregate_az_optional_create **Example Create Aggregate: JSON request** .. literalinclude:: ../../doc/api_samples/os-aggregates/aggregate-post-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - aggregate: aggregate - availability_zone: aggregate_az - created_at: created - deleted_at: deleted_at - deleted: deleted - id: aggregate_id_body - name: aggregate_name - updated_at: updated_consider_null - uuid: aggregate_uuid **Example Create Aggregate (v2.41): JSON response** .. literalinclude:: ../../doc/api_samples/os-aggregates/v2.41/aggregate-post-resp.json :language: javascript Show Aggregate Details ====================== .. rest_method:: GET /os-aggregates/{aggregate_id} Shows details for an aggregate. Details include hosts and metadata. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - aggregate_id: aggregate_id Response -------- .. rest_parameters:: parameters.yaml - aggregate: aggregate - availability_zone: aggregate_az - created_at: created - deleted_at: deleted_at - deleted: deleted - hosts: hosts - id: aggregate_id_body - metadata: aggregate_metadata_response - name: aggregate_name - updated_at: updated_consider_null - uuid: aggregate_uuid **Example Show Aggregate Details (v2.41): JSON response** .. literalinclude:: ../../doc/api_samples/os-aggregates/v2.41/aggregates-get-resp.json :language: javascript Update Aggregate ================ .. rest_method:: PUT /os-aggregates/{aggregate_id} Updates either or both the name and availability zone for an aggregate. If the aggregate to be updated has host that already in the given availability zone, the request will fail with 400 error. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), conflict(409) Request ------- .. rest_parameters:: parameters.yaml - aggregate_id: aggregate_id - aggregate: aggregate - name: aggregate_name_optional - availability_zone: aggregate_az_optional_update **Example Update Aggregate: JSON request** .. literalinclude:: ../../doc/api_samples/os-aggregates/aggregate-update-post-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - aggregate: aggregate - availability_zone: aggregate_az - created_at: created - deleted_at: deleted_at - deleted: deleted - hosts: hosts - id: aggregate_id_body - metadata: aggregate_metadata_response - name: aggregate_name - updated_at: updated_consider_null - uuid: aggregate_uuid **Example Update Aggregate (v2.41): JSON response** .. literalinclude:: ../../doc/api_samples/os-aggregates/v2.41/aggregate-update-post-resp.json :language: javascript Delete Aggregate ================ .. rest_method:: DELETE /os-aggregates/{aggregate_id} Deletes an aggregate. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - aggregate_id: aggregate_id Response -------- There is no body content for the response of a successful DELETE action. Add Host ======== .. rest_method:: POST /os-aggregates/{aggregate_id}/action Adds a host to an aggregate. Specify the ``add_host`` action and host name in the request body. It is not allowed to move a host with servers between Availability Zones. Such request is rejected with 409 error. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), conflict(409) Request ------- .. rest_parameters:: parameters.yaml - aggregate_id: aggregate_id - add_host: aggregate_add_host - host: host_name_body **Example Add Host: JSON request** .. literalinclude:: ../../doc/api_samples/os-aggregates/aggregate-add-host-post-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - aggregate: aggregate - availability_zone: aggregate_az - created_at: created - deleted_at: deleted_at - deleted: deleted - hosts: hosts - id: aggregate_id_body - metadata: aggregate_metadata_response - name: aggregate_name - updated_at: updated_consider_null - uuid: aggregate_uuid **Example Add Host (v2.41): JSON response** .. literalinclude:: ../../doc/api_samples/os-aggregates/v2.41/aggregates-add-host-post-resp.json :language: javascript Remove Host =========== .. rest_method:: POST /os-aggregates/{aggregate_id}/action Removes a host from an aggregate. Specify the ``remove_host`` action and host name in the request body. It is not allowed to move a host with servers between Availability Zones. Such request is rejected with 409 error. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), conflict(409) Request ------- .. rest_parameters:: parameters.yaml - aggregate_id: aggregate_id - remove_host: aggregate_remove_host - host: host_name_body **Example Remove Host: JSON request** .. literalinclude:: ../../doc/api_samples/os-aggregates/aggregate-remove-host-post-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - aggregate: aggregate - availability_zone: aggregate_az - created_at: created - deleted_at: deleted_at - deleted: deleted - hosts: hosts - id: aggregate_id_body - metadata: aggregate_metadata_response - name: aggregate_name - updated_at: updated_consider_null - uuid: aggregate_uuid **Example Remove Host (v2.41): JSON response** .. literalinclude:: ../../doc/api_samples/os-aggregates/v2.41/aggregates-remove-host-post-resp.json :language: javascript Create Or Update Aggregate Metadata =================================== .. rest_method:: POST /os-aggregates/{aggregate_id}/action Creates or replaces metadata for an aggregate. Specify the ``set_metadata`` action and metadata info in the request body. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - aggregate_id: aggregate_id - set_metadata: set_metadata - metadata: aggregate_metadata_request **Example Create Or Update Aggregate Metadata: JSON request** .. literalinclude:: ../../doc/api_samples/os-aggregates/aggregate-metadata-post-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - aggregate: aggregate - availability_zone: aggregate_az - created_at: created - deleted_at: deleted_at - deleted: deleted - hosts: hosts - id: aggregate_id_body - metadata: aggregate_metadata_response - name: aggregate_name - updated_at: updated_consider_null - uuid: aggregate_uuid **Example Create Or Update Aggregate Metadata (v2.41): JSON response** .. literalinclude:: ../../doc/api_samples/os-aggregates/v2.41/aggregates-metadata-post-resp.json :language: javascript Request Image Pre-caching for Aggregate ======================================= .. rest_method:: POST /os-aggregates/{aggregate_id}/images Requests that a set of images be pre-cached on compute nodes within the referenced aggregate. This API is available starting with microversion 2.81. Normal response codes: 202 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - aggregate_id: aggregate_id - cache: cache - cache.id: image_id_body **Example Request Image pre-caching for Aggregate (v2.81): JSON request** .. literalinclude:: ../../doc/api_samples/os-aggregates/v2.81/aggregate-images-post-req.json :language: javascript Response -------- The response body is always empty. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/os-assisted-volume-snapshots.inc0000664000175000017500000000473200000000000023671 0ustar00zuulzuul00000000000000.. -*- rst -*- ========================================================== Assisted volume snapshots (os-assisted-volume-snapshots) ========================================================== Creates and deletes snapshots through an emulator/hypervisor. Only qcow2 file format is supported. This API is only implemented by the libvirt compute driver. An internal snapshot that lacks storage such as NFS can use an emulator/hypervisor to add the snapshot feature. This is used to enable snapshot of volumes on backends such as NFS by storing data as qcow2 files on these volumes. This API is only ever called by Cinder, where it is used to create a snapshot for drivers that extend the remotefs Cinder driver. Create Assisted Volume Snapshots ================================ .. rest_method:: POST /os-assisted-volume-snapshots Creates an assisted volume snapshot. Normal response codes: 200 Error response codes: badRequest(400),unauthorized(401), forbidden(403) Request ------- .. rest_parameters:: parameters.yaml - snapshot: snapshot - volume_id: volume_id - create_info: create_info - create_info.snapshot_id: snapshot_id - create_info.type: type-os-assisted-volume-snapshot - create_info.new_file: new_file - create_info.id: create_info_id **Example Create Assisted Volume Snapshots: JSON request** .. literalinclude:: ../../doc/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - snapshot: snapshot - id: create_info_id_resp - volumeId: volume_id **Example Create Assisted Volume Snapshots: JSON response** .. literalinclude:: ../../doc/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-resp.json :language: javascript Delete Assisted Volume Snapshot =============================== .. rest_method:: DELETE /os-assisted-volume-snapshots/{snapshot_id} Deletes an assisted volume snapshot. To make this request, add the ``delete_info`` query parameter to the URI, as follows: DELETE /os-assisted-volume-snapshots/421752a6-acf6-4b2d-bc7a-119f9148cd8c?delete_info='{"volume_id": "521752a6-acf6-4b2d-bc7a-119f9148cd8c"}' Normal response codes: 204 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - snapshot_id: snapshot_id_path - delete_info: delete_info Response -------- There is no body content for the response of a successful DELETE query ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/os-availability-zone.inc0000664000175000017500000000377200000000000022153 0ustar00zuulzuul00000000000000.. -*- rst -*- .. _os-availability-zone: =========================================== Availability zones (os-availability-zone) =========================================== Lists and gets detailed availability zone information. An availability zone is created or updated by setting the availability_zone parameter in the ``create``, ``update``, or ``create or update`` methods of the Host Aggregates API. See `Host Aggregates `_ for more details. Get Availability Zone Information ================================= .. rest_method:: GET /os-availability-zone Lists availability zone information. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403) Response -------- .. rest_parameters:: parameters.yaml - availabilityZoneInfo: availability_zone_info - hosts: hosts.availability_zone_none - zoneName: OS-EXT-AZ:availability_zone - zoneState: availability_zone_state - available: available | **Example Get availability zone information** .. literalinclude:: ../../doc/api_samples/os-availability-zone/availability-zone-list-resp.json :language: javascript Get Detailed Availability Zone Information ========================================== .. rest_method:: GET /os-availability-zone/detail Gets detailed availability zone information. Policy defaults enable only users with the administrative role to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403) Response -------- .. rest_parameters:: parameters.yaml - availabilityZoneInfo: availability_zone_info - hosts: hosts.availability_zone - zoneName: OS-EXT-AZ:availability_zone - zoneState: availability_zone_state - available: available | **Example Get detailed availability zone information** .. literalinclude:: ../../doc/api_samples/os-availability-zone/availability-zone-detail-resp.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/os-baremetal-nodes.inc0000664000175000017500000000414500000000000021565 0ustar00zuulzuul00000000000000.. -*- rst -*- =================================================== Bare metal nodes (os-baremetal-nodes) (DEPRECATED) =================================================== .. warning:: These APIs are proxy calls to the Ironic service. They exist for legacy compatibility, but no new applications should use them. Nova has deprecated all the proxy APIs and users should use the native APIs instead. These will fail with a 404 starting from microversion 2.36. See: `Relevant Bare metal APIs `__. Bare metal nodes. List Bare Metal Nodes ===================== .. rest_method:: GET /os-baremetal-nodes Lists the bare metal nodes known by the compute environment. Normal response codes: 202 Error response codes: unauthorized(401), forbidden(403), notImplemented(501) Response -------- .. rest_parameters:: parameters.yaml - nodes: baremetal_nodes - id: baremetal_id - interfaces: baremetal_interfaces - host: baremetal_host - task_state: baremetal_taskstate - cpus: baremetal_cpus - memory_mb: baremetal_mem - disk_gb: baremetal_disk **Example List Bare Metal Nodes** .. literalinclude:: ../../doc/api_samples/os-baremetal-nodes/baremetal-node-list-resp.json :language: javascript Show Bare Metal Node Details ============================ .. rest_method:: GET /os-baremetal-nodes/{node_id} Shows details for a bare metal node. Normal response codes: 202 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), notImplemented(501) Request ------- .. rest_parameters:: parameters.yaml - node_id: node_id Response -------- .. rest_parameters:: parameters.yaml - node: baremetal_node - id: baremetal_id - instance_uuid: baremetal_instance_uuid - interfaces: baremetal_interfaces - host: baremetal_host - task_state: baremetal_taskstate - cpus: baremetal_cpus - memory_mb: baremetal_mem - disk_gb: baremetal_disk **Example Show Bare Metal Node Details** .. literalinclude:: ../../doc/api_samples/os-baremetal-nodes/baremetal-node-get-resp.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/os-cells.inc0000664000175000017500000000730300000000000017624 0ustar00zuulzuul00000000000000.. -*- rst -*- ============================== Cells (os-cells, capacities) ============================== Adds neighbor cells, lists neighbor cells, and shows the capabilities of the local cell. By default, only administrators can manage cells. .. warning:: These APIs refer to a Cells v1 deployment which was deprecated in the 16.0.0 Pike release. These are not used with Cells v2 which is required beginning with the 15.0.0 Ocata release where all Nova deployments consist of at least one Cells v2 cell. They were removed in the 20.0.0 Train release. List Cells ========== .. rest_method:: GET /os-cells Lists cells. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), gone(410), notImplemented(501) Request ------- .. rest_parameters:: parameters.yaml - tenant_id: tenant_id - limit: limit_simple - offset: offset_simple Response -------- **Example List Cells: JSON response** .. literalinclude:: ../../doc/api_samples/os-cells/cells-list-resp.json :language: javascript Create Cell =========== .. rest_method:: POST /os-cells Create a new cell. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), gone(410), notImplemented(501) Capacities ========== .. rest_method:: GET /os-cells/capacities Retrieve capacities. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), gone(410), notImplemented(501) List Cells With Details ======================= .. rest_method:: GET /os-cells/detail Lists cells with details of capabilities. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), gone(410), notImplemented(501) Request ------- .. rest_parameters:: parameters.yaml - limit: limit_simple - offset: offset_simple Info For This Cell ================== .. rest_method:: GET /os-cells/info Retrieve info about the current cell. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), gone(410), notImplemented(501) Show Cell Data ============== .. rest_method:: GET /os-cells/{cell_id} Shows data for a cell. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), gone(410), notImplemented(501) Request ------- .. rest_parameters:: parameters.yaml - cell_id: cell_id Response -------- **Example Show Cell Data: JSON response** .. literalinclude:: ../../doc/api_samples/os-cells/cells-get-resp.json :language: javascript Update a Cell ============= .. rest_method:: PUT /os-cells/{cell_id} Update an existing cell. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), gone(410), notImplemented(501) Request ------- .. rest_parameters:: parameters.yaml - cell_id: cell_id Delete a Cell ============= .. rest_method:: DELETE /os-cells/{cell_id} Remove a cell. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), gone(410), notImplemented(501) Request ------- .. rest_parameters:: parameters.yaml - cell_id: cell_id Show Cell Capacities ==================== .. rest_method:: GET /os-cells/{cell_id}/capacities Shows capacities for a cell. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), gone(410), notImplemented(501) Request ------- .. rest_parameters:: parameters.yaml - cell_id: cell_id Response -------- **Example Show Cell Capacities: JSON response** .. literalinclude:: ../../doc/api_samples/os-cells/cells-capacities-resp.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/os-certificates.inc0000664000175000017500000000271400000000000021170 0ustar00zuulzuul00000000000000.. -*- rst -*- ==================================== Root certificates (os-certificates) ==================================== Creates and shows details for a root certificate. .. warning:: This API existed solely because of the need to build euca bundles when Nova had an in tree EC2 API. It no longer interacts with any parts of the system besides its own certificate daemon. It was removed in the 16.0.0 Pike release. Create Root Certificate ======================= .. rest_method:: POST /os-certificates Creates a root certificate. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403) Response -------- .. rest_parameters:: parameters.yaml - certificate: certificate - data: data - private_key: private_key | **Example Create Root Certificate** .. literalinclude:: ../../doc/api_samples/os-certificates/certificate-create-resp.json :language: javascript Show Root Certificate Details ============================= .. rest_method:: GET /os-certificates/root Shows details for a root certificate. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), notImplemented(501) Response -------- .. rest_parameters:: parameters.yaml - certificate: certificate - data: data - private_key: private_key | **Example Show Root Certificate Details** .. literalinclude:: ../../doc/api_samples/os-certificates/certificate-get-root-resp.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/os-cloudpipe.inc0000664000175000017500000000451500000000000020510 0ustar00zuulzuul00000000000000.. -*- rst -*- ========================= Cloudpipe (os-cloudpipe) ========================= .. warning:: This API only works with ``nova-network`` which is deprecated in favor of Neutron. It should be avoided in any new applications. It was removed in the 16.0.0 Pike release. Manages virtual VPNs for projects. List Cloudpipes =============== .. rest_method:: GET /os-cloudpipe Lists cloudpipes. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound (404) Response -------- .. rest_parameters:: parameters.yaml - cloudpipes: cloudpipes - created_at: created - instance_id: instance_id_cloudpipe - internal_ip: fixed_ip - project_id: project_id_server - public_ip: vpn_public_ip_resp - public_port: vpn_public_port_resp - state: vpn_state **Example List Cloudpipes: JSON response** .. literalinclude:: ../../doc/api_samples/os-cloudpipe/cloud-pipe-get-resp.json :language: javascript Create Cloudpipe ================ .. rest_method:: POST /os-cloudpipe Creates a cloudpipe. Normal response codes: 200 Error response codes: badRequest(400),unauthorized(401), forbidden(403) Request ------- .. rest_parameters:: parameters.yaml - cloudpipe: cloudpipe - project_id: project_id **Example Create Cloudpipe: JSON request** .. literalinclude:: ../../doc/api_samples/os-cloudpipe/cloud-pipe-create-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - instance_id: instance_id_cloudpipe **Example Create Cloudpipe: JSON response** .. literalinclude:: ../../doc/api_samples/os-cloudpipe/cloud-pipe-create-resp.json :language: javascript Update Cloudpipe ================ .. rest_method:: PUT /os-cloudpipe/configure-project Updates the virtual private network (VPN) IP address and port for a cloudpipe instance. Normal response codes: 202 Error response codes: badRequest(400), unauthorized(401), forbidden(403) Request ------- .. rest_parameters:: parameters.yaml - configure_project: configure_project_cloudpipe - vpn_ip: vpn_public_ip - vpn_port: vpn_public_port **Example Update Cloudpipe: JSON request** .. literalinclude:: ../../doc/api_samples/os-cloudpipe/cloud-pipe-update-req.json :language: javascript Response -------- There is no body content for the response of a successful PUT request ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/os-consoles.inc0000664000175000017500000000474200000000000020353 0ustar00zuulzuul00000000000000.. -*- rst -*- ================================================== XenServer VNC Proxy (XVP) consoles (os-consoles) ================================================== Manages server XVP consoles. .. warning:: These APIs are only applicable when using the XenServer virt driver. They were removed in the 21.0.0 (Ussuri) release. Lists Consoles ============== .. rest_method:: GET /servers/{server_id}/consoles Lists all consoles for a server. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403), gone(410) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path Response -------- .. rest_parameters:: parameters.yaml - consoles: consoles - console: console - console_type: console_type - id: console_id_in_body | **Example List Consoles** .. literalinclude:: ../../doc/api_samples/consoles/consoles-list-get-resp.json :language: javascript Create Console ============== .. rest_method:: POST /servers/{server_id}/consoles Creates a console for a server. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), gone(410) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path Response -------- If successful, this method does not return a response body. Show Console Details ==================== .. rest_method:: GET /servers/{server_id}/consoles/{console_id} Shows console details for a server. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), gone(410) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - console_id: console_id Response -------- .. rest_parameters:: parameters.yaml - console: console - console_type: console_type - host: console_host - id: console_id_in_body - instance_name: instance_name - password: console_password - port: port_number | **Example Show Console Details** .. literalinclude:: ../../doc/api_samples/consoles/consoles-get-resp.json :language: javascript Delete Console ============== .. rest_method:: DELETE /servers/{server_id}/consoles/{console_id} Deletes a console for a server. Normal response codes: 202 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), gone(410) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - console_id: console_id Response -------- If successful, this method does not return a response body. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/os-fixed-ips.inc0000664000175000017500000000363000000000000020411 0ustar00zuulzuul00000000000000.. -*- rst -*- ========================= Fixed IPs (os-fixed-ips) ========================= .. warning:: These APIs only work with **nova-network** which is deprecated. These will fail with a 404 starting from microversion 2.36. They were removed in the 18.0.0 Rocky release. Shows data for a fixed IP, such as host name, CIDR, and address. Also, reserves and releases a fixed IP address. Show Fixed Ip Details ===================== .. rest_method:: GET /os-fixed-ips/{fixed_ip} Shows details for a fixed IP address. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), gone(410) Request ------- .. rest_parameters:: parameters.yaml - fixed_ip: fixed_ip_path Response -------- .. rest_parameters:: parameters.yaml - fixed_ip: fixed_ip_obj - address: ip_address - cidr: cidr - host: fixed_ip_host - hostname: fixed_ip_hostname - reserved: reserved_fixedip **Example Show Fixed Ip Details: JSON response** .. literalinclude:: ../../doc/api_samples/os-fixed-ips/fixedips-get-resp.json :language: javascript Reserve Or Release A Fixed Ip ============================= .. rest_method:: POST /os-fixed-ips/{fixed_ip}/action Reserves or releases a fixed IP. To reserve a fixed IP address, specify ``reserve`` in the request body. To release a fixed IP address, specify ``unreserve`` in the request body. Normal response codes: 202 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), gone(410) Request ------- .. rest_parameters:: parameters.yaml - fixed_ip: fixed_ip_path - reserve: action_reserve - unreserve: action_unreserve **Example Reserve Or Release A Fixed Ip: JSON request** .. literalinclude:: ../../doc/api_samples/os-fixed-ips/fixedip-post-req.json :language: javascript Response -------- There is no body content for the response of a successful POST operation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/os-flavor-access.inc0000664000175000017500000000702700000000000021255 0ustar00zuulzuul00000000000000.. -*- rst -*- ============================================ Flavors access (flavors, os-flavor-access) ============================================ Lists tenants who have access to a private flavor and adds private flavor access to and removes private flavor access from tenants. By default, only administrators can manage private flavor access. A private flavor has ``is_public`` set to ``false`` while a public flavor has ``is_public`` set to ``true``. List Flavor Access Information For Given Flavor =============================================== .. rest_method:: GET /flavors/{flavor_id}/os-flavor-access Lists flavor access information. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - flavor_id: flavor_id Response -------- .. rest_parameters:: parameters.yaml - flavor_access: flavor_access - tenant_id: tenant_id_body - flavor_id: flavor_id_body **Example List Flavor Access Information For Given Flavor: JSON response** .. literalinclude:: ../../doc/api_samples/flavor-access/flavor-access-list-resp.json :language: javascript Add Flavor Access To Tenant (addTenantAccess Action) ==================================================== .. rest_method:: POST /flavors/{flavor_id}/action Adds flavor access to a tenant and flavor. Specify the ``addTenantAccess`` action and the ``tenant`` in the request body. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), conflict(409) - 400 - BadRequest - if the `tenant` is not found in your OpenStack deployment, a 400 is returned to prevent typos on the API call. Request ------- .. rest_parameters:: parameters.yaml - flavor_id: flavor_id - addTenantAccess: addTenantAccess - tenant: tenant_id_body **Example Add Flavor Access To Tenant: JSON request** .. literalinclude:: ../../doc/api_samples/flavor-access/flavor-access-add-tenant-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - flavor_access: flavor_access - tenant_id: tenant_id_body - flavor_id: flavor_id_body **Example Add Flavor Access To Tenant: JSON response** .. literalinclude:: ../../doc/api_samples/flavor-access/flavor-access-add-tenant-resp.json :language: javascript Remove Flavor Access From Tenant (removeTenantAccess Action) ============================================================ .. rest_method:: POST /flavors/{flavor_id}/action Removes flavor access from a tenant and flavor. Specify the ``removeTenantAccess`` action and the ``tenant`` in the request body. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), conflict(409) - 400 - BadRequest - if the `tenant` is not found in your OpenStack deployment, a 400 is returned to prevent typos on the API call. Request ------- .. rest_parameters:: parameters.yaml - flavor_id: flavor_id - removeTenantAccess: removeTenantAccess - tenant: tenant_id_body **Example Remove Flavor Access From Tenant: JSON request** .. literalinclude:: ../../doc/api_samples/flavor-access/flavor-access-remove-tenant-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - flavor_access: flavor_access - tenant_id: tenant_id_body - flavor_id: flavor_id_body **Example Remove Flavor Access From Tenant: JSON response** .. literalinclude:: ../../doc/api_samples/flavor-access/flavor-access-remove-tenant-resp.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/os-flavor-extra-specs.inc0000664000175000017500000001041200000000000022242 0ustar00zuulzuul00000000000000.. -*- rst -*- ====================================================== Flavors extra-specs (flavors, os-flavor-extra-specs) ====================================================== Lists, creates, deletes, and updates the extra-specs or keys for a flavor. Refer to `Compute Flavors `__ for available built-in extra specs. List Extra Specs For A Flavor ============================= .. rest_method:: GET /flavors/{flavor_id}/os-extra_specs Lists all extra specs for a flavor, by ID. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - flavor_id: flavor_id Response -------- .. rest_parameters:: parameters.yaml - extra_specs: extra_specs - key: flavor_extra_spec_key2 - value: flavor_extra_spec_value **Example List Extra Specs For A Flavor: JSON response** .. literalinclude:: ../../doc/api_samples/flavor-extra-specs/flavor-extra-specs-list-resp.json :language: javascript Create Extra Specs For A Flavor =============================== .. rest_method:: POST /flavors/{flavor_id}/os-extra_specs Creates extra specs for a flavor, by ID. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), conflict(409) Request ------- .. rest_parameters:: parameters.yaml - flavor_id: flavor_id - extra_specs: extra_specs - key: flavor_extra_spec_key2 - value: flavor_extra_spec_value **Example Create Extra Specs For A Flavor: JSON request** .. literalinclude:: ../../doc/api_samples/flavor-extra-specs/flavor-extra-specs-create-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - extra_specs: extra_specs - key: flavor_extra_spec_key2 - value: flavor_extra_spec_value **Example Create Extra Specs For A Flavor: JSON response** .. literalinclude:: ../../doc/api_samples/flavor-extra-specs/flavor-extra-specs-create-resp.json :language: javascript Show An Extra Spec For A Flavor =============================== .. rest_method:: GET /flavors/{flavor_id}/os-extra_specs/{flavor_extra_spec_key} Shows an extra spec, by key, for a flavor, by ID. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - flavor_id: flavor_id - flavor_extra_spec_key: flavor_extra_spec_key Response -------- .. rest_parameters:: parameters.yaml - key: flavor_extra_spec_key2 - value: flavor_extra_spec_value **Example Show An Extra Spec For A Flavor: JSON response** .. literalinclude:: ../../doc/api_samples/flavor-extra-specs/flavor-extra-specs-get-resp.json :language: javascript Update An Extra Spec For A Flavor ================================= .. rest_method:: PUT /flavors/{flavor_id}/os-extra_specs/{flavor_extra_spec_key} Updates an extra spec, by key, for a flavor, by ID. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403) itemNotFound(404), conflict(409) Request ------- .. rest_parameters:: parameters.yaml - flavor_id: flavor_id - flavor_extra_spec_key: flavor_extra_spec_key - key: flavor_extra_spec_key2 - value: flavor_extra_spec_value **Example Update An Extra Spec For A Flavor: JSON request** .. literalinclude:: ../../doc/api_samples/flavor-extra-specs/flavor-extra-specs-update-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - key: flavor_extra_spec_key2 - value: flavor_extra_spec_value **Example Update An Extra Spec For A Flavor: JSON response** .. literalinclude:: ../../doc/api_samples/flavor-extra-specs/flavor-extra-specs-update-resp.json :language: javascript Delete An Extra Spec For A Flavor ================================= .. rest_method:: DELETE /flavors/{flavor_id}/os-extra_specs/{flavor_extra_spec_key} Deletes an extra spec, by key, for a flavor, by ID. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - flavor_id: flavor_id - flavor_extra_spec_key: flavor_extra_spec_key Response -------- There is no body content for the response of a successful DELETE action. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/os-floating-ip-dns.inc0000664000175000017500000001052000000000000021510 0ustar00zuulzuul00000000000000.. -*- rst -*- .. NOTE(gmann): These APIs are deprecated so do not update this file even body, example or parameters are not complete. ============================================= Floating IP DNS records (os-floating-ip-dns) ============================================= .. warning:: Since these APIs are only implemented for **nova-network**, they are deprecated. These will fail with a 404 starting from microversion 2.36. They were removed in the 18.0.0 Rocky release. Manages DNS records associated with floating IP addresses. The API dispatches requests to a DNS driver that is selected at startup. List DNS Domains ================ .. rest_method:: GET /os-floating-ip-dns Lists registered DNS domains published by the DNS drivers. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403), gone(410), notImplemented(501) Response -------- **Example List Dns Domains: JSON response** .. literalinclude:: ../../doc/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.json :language: javascript Create Or Update DNS Domain =========================== .. rest_method:: PUT /os-floating-ip-dns/{domain} Creates or updates a DNS domain. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), gone(410), notImplemented(501) Request ------- .. rest_parameters:: parameters.yaml - domain: domain **Example Create Or Update Dns Domain: JSON request** .. literalinclude:: ../../doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.json :language: javascript Response -------- **Example Create Or Update Dns Domain: JSON response** .. literalinclude:: ../../doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.json :language: javascript Delete DNS Domain ================= .. rest_method:: DELETE /os-floating-ip-dns/{domain} Deletes a DNS domain and all associated host entries. Normal response codes: 202 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), gone(410), notImplemented(501) Request ------- .. rest_parameters:: parameters.yaml - domain: domain Response -------- List DNS Entries ================ .. rest_method:: GET /os-floating-ip-dns/{domain}/entries/{ip} Lists DNS entries for a domain and IP. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), gone(410), notImplemented(501) Request ------- .. rest_parameters:: parameters.yaml - domain: domain - ip: ip Response -------- **Example List DNS Entries: JSON response** .. literalinclude:: ../../doc/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.json :language: javascript Find Unique DNS Entry ===================== .. rest_method:: GET /os-floating-ip-dns/{domain}/entries/{name} Finds a unique DNS entry for a domain and name. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), gone(410), notImplemented(501) Request ------- .. rest_parameters:: parameters.yaml - domain: domain - name: name Response -------- **Example Find Unique DNS Entry: JSON response** .. literalinclude:: ../../doc/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.json :language: javascript Create Or Update DNS Entry ========================== .. rest_method:: PUT /os-floating-ip-dns/{domain}/entries/{name} Creates or updates a DNS entry. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403), gone(410), notImplemented(501) Request ------- .. rest_parameters:: parameters.yaml - domain: domain - name: name **Example Create Or Update DNS Entry: JSON request** .. literalinclude:: ../../doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.json :language: javascript Response -------- **Example Create Or Update DNS Entry: JSON response** .. literalinclude:: ../../doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.json :language: javascript Delete DNS Entry ================ .. rest_method:: DELETE /os-floating-ip-dns/{domain}/entries/{name} Deletes a DNS entry. Normal response codes: 202 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), gone(410), notImplemented(501) Request ------- .. rest_parameters:: parameters.yaml - domain: domain - name: name Response -------- ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/os-floating-ip-pools.inc0000664000175000017500000000243200000000000022063 0ustar00zuulzuul00000000000000.. -*- rst -*- ====================================================== Floating IP pools (os-floating-ip-pools) (DEPRECATED) ====================================================== .. warning:: This API is a proxy call to the Network service. Nova has deprecated all the proxy APIs and users should use the native APIs instead. This API will fail with a 404 starting from microversion 2.36. For the equivalent functionality in the Network service, one can request:: GET /networks?router:external=True&fields=name Manages groups of floating IPs. List Floating Ip Pools ====================== .. rest_method:: GET /os-floating-ip-pools Lists floating IP pools. Policy defaults enable only users with the administrative role or user who is authorized to operate on tenant to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403) Response -------- .. rest_parameters:: parameters.yaml - floating_ip_pools: floating_ip_pools - name: floating_ip_pool_name_or_id **Example List Floating Ip Pools: JSON response** .. literalinclude:: ../../doc/api_samples/os-floating-ip-pools/floatingippools-list-resp.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/os-floating-ips-bulk.inc0000664000175000017500000000734000000000000022052 0ustar00zuulzuul00000000000000.. -*- rst -*- ========================================= Floating IPs bulk (os-floating-ips-bulk) ========================================= .. warning:: Since these APIs are only implemented for **nova-network**, they are deprecated. These will fail with a 404 starting from microversion 2.36. They were removed in the 18.0.0 Rocky release. Bulk-creates, deletes, and lists floating IPs. Default pool name is ``nova``. To view available pools, use the ``os-floating-ip-pools`` extension. List Floating Ips ================= .. rest_method:: GET /os-floating-ips-bulk Lists all floating IPs. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), gone(410) Response -------- .. rest_parameters:: parameters.yaml - floating_ip_info : floating_ips_list - address : floating_ip - fixed_ip : fixed_ip_address - instance_uuid : server_id - interface : virtual_interface - pool: floating_ip_pool_name - project_id : project_id_value **Example List Floating Ips: JSON response** .. literalinclude:: ../../doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-resp.json :language: javascript Create Floating Ips =================== .. rest_method:: POST /os-floating-ips-bulk Bulk-creates floating IPs. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), conflict(409), gone(410) Request ------- .. rest_parameters:: parameters.yaml - floating_ips_bulk_create : floating_ip_bulk_object - ip_range : ip_range - interface : virtual_interface_id_optional - pool: floating_ip_pool_name_optional **Example Create Floating Ips: JSON request** .. literalinclude:: ../../doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - floating_ips_bulk_create : floating_ip_bulk_object - interface : virtual_interface - ip_range : ip_range - pool: floating_ip_pool_name **Example Create Floating Ips: JSON response** .. literalinclude:: ../../doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-resp.json :language: javascript Bulk-Delete Floating Ips ======================== .. rest_method:: PUT /os-floating-ips-bulk/delete Bulk-deletes floating IPs. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), gone(410) Request ------- .. rest_parameters:: parameters.yaml - ip_range: ip_range_delete **Example Bulk-Delete Floating Ips: JSON request** .. literalinclude:: ../../doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - floating_ips_bulk_delete : ip_range_delete **Example Bulk-Delete Floating Ips: JSON response** .. literalinclude:: ../../doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-resp.json :language: javascript List Floating Ips By Host ========================= .. rest_method:: GET /os-floating-ips-bulk/{host_name} Lists all floating IPs for a host. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), gone(410) Request ------- .. rest_parameters:: parameters.yaml - host_name: host_name Response -------- .. rest_parameters:: parameters.yaml - floating_ip_info : floating_ips_list - address : floating_ip - fixed_ip : fixed_ip_address - instance_uuid : server_id - interface : virtual_interface - pool: floating_ip_pool_name - project_id : project_id_value **Example List Floating Ips By Host: JSON response** .. literalinclude:: ../../doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-by-host-resp.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/os-floating-ips.inc0000664000175000017500000001322700000000000021120 0ustar00zuulzuul00000000000000.. -*- rst -*- ============================================ Floating IPs (os-floating-ips) (DEPRECATED) ============================================ .. warning:: These APIs are proxy calls to the Network service. Nova has deprecated all the proxy APIs and users should use the native APIs instead. These will fail with a 404 starting from microversion 2.36. See: `Relevant Network APIs `__. Lists floating IP addresses for a project. Also, creates (allocates) a floating IP address for a project, shows floating IP address details, and deletes (deallocates) a floating IP address from a project. The cloud administrator configures a pool of floating IP addresses in OpenStack Compute. The project quota defines the maximum number of floating IP addresses that you can allocate to the project. After you `allocate a floating IP address `__ for a project, you can: - `Add (associate) the floating IP address `__ with an instance in the project. You can associate only one floating IP address with an instance at a time. - `Remove (disassociate) the floating IP address `__ from an instance in the project. - Delete, or deallocate, a floating IP from the project, which automatically deletes any associations for that IP address. List Floating Ip Addresses ========================== .. rest_method:: GET /os-floating-ips Lists floating IP addresses associated with the tenant or account. Policy defaults enable only users with the administrative role or the owner of the server to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403) Response -------- .. rest_parameters:: parameters.yaml - floating_ips: floating_ips_list - fixed_ip: fixed_ip_address - id: floating_ip_id_value - instance_id: server_id - ip: floating_ip - pool: floating_ip_pool_name_or_id **Example List Floating Ip Addresses** .. literalinclude:: ../../doc/api_samples/os-floating-ips/floating-ips-list-resp.json :language: javascript Create (Allocate) Floating Ip Address ===================================== .. rest_method:: POST /os-floating-ips Creates, or allocates, a floating IP address for the current project. By default, the floating IP address is allocated from the public pool. If more than one floating IP address pool is available, use the ``pool`` parameter to specify from which pool to allocate the IP address. Policy defaults enable only users with the administrative role or the owner of the server to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - pool: floating_ip_pool_name_or_id **Example Create (Allocate) Floating Ip Address** .. literalinclude:: ../../doc/api_samples/os-floating-ips/floating-ips-create-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - floating_ip: floating_ip_obj - fixed_ip: fixed_ip_address - id: floating_ip_id_value - instance_id: server_id - ip: floating_ip - pool: floating_ip_pool_name_or_id **Example Create (Allocate) Floating Ip Address: JSON response** .. literalinclude:: ../../doc/api_samples/os-floating-ips/floating-ips-create-resp.json :language: javascript Show Floating Ip Address Details ================================ .. rest_method:: GET /os-floating-ips/{floating_ip_id} Shows details for a floating IP address, by ID, that is associated with the tenant or account. Policy defaults enable only users with the administrative role or the owner of the server to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - floating_ip_id: floating_ip_id Response -------- .. rest_parameters:: parameters.yaml - floating_ip: floating_ip_obj - fixed_ip: fixed_ip_address - id: floating_ip_id_value - instance_id: server_id - ip: floating_ip - pool: floating_ip_pool_name_or_id **Example Show Floating Ip Address Details: JSON response** .. literalinclude:: ../../doc/api_samples/os-floating-ips/floating-ips-get-resp.json :language: javascript Delete (Deallocate) Floating Ip Address ======================================= .. rest_method:: DELETE /os-floating-ips/{floating_ip_id} Deletes, or deallocates, a floating IP address from the current project and returns it to the pool from which it was allocated. If the IP address is still associated with a running instance, it is automatically disassociated from that instance. Policy defaults enable only users with the administrative role or the owner of the server to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Normal response codes: 202 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - floating_ip_id: floating_ip_id Response -------- There is no body content for the response of a successful DELETE action. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/os-fping.inc0000664000175000017500000000460600000000000017630 0ustar00zuulzuul00000000000000.. -*- rst -*- ========================== Ping instances (os-fping) ========================== .. warning:: This API only works with ``nova-network`` which is deprecated. It should be avoided in any new applications. These will fail with a 404 starting from microversion 2.36. It was removed in the 18.0.0 Rocky release. Pings instances and reports which instances are alive. Ping Instances ============== .. rest_method:: GET /os-fping Runs the fping utility to ping instances and reports which instances are alive. Specify the ``all_tenants=1`` query parameter to ping instances for all tenants. For example: :: GET /os-fping?all_tenants=1 Specify the ``include`` and ``exclude`` query parameters to filter the results. For example: :: GET /os-fping?all_tenants=1&include=uuid1,uuid2&exclude=uuid3,uuid4 Policy defaults enable only users with the administrative role or the owner of the server to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Normal response codes: 200 Error response codes: serviceUnavailable(503), unauthorized(401), forbidden(403), itemNotFound(404), gone(410) Request ------- .. rest_parameters:: parameters.yaml - all_tenants: all_tenants - include: include - exclude: exclude Response -------- .. rest_parameters:: parameters.yaml - servers: servers - alive: alive - id: server_id - project_id: project_id | **Example Ping Instances** .. literalinclude:: ../../doc/api_samples/os-fping/fping-get-resp.json :language: javascript Ping An Instance ================ .. rest_method:: GET /os-fping/{instance_id} Runs the fping utility to ping an instance and reports whether the instance is alive. Policy defaults enable only users with the administrative role or the owner of the server to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Normal response codes: 200 Error response codes: serviceUnavailable(503), unauthorized(401), forbidden(403), itemNotFound(404), gone(410) Request ------- .. rest_parameters:: parameters.yaml - instance_id: instance_id Response -------- .. rest_parameters:: parameters.yaml - server: server - alive: alive - id: server_id - project_id: project_id | **Example Ping An Instance** .. literalinclude:: ../../doc/api_samples/os-fping/fping-get-details-resp.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/os-hosts.inc0000664000175000017500000001601500000000000017662 0ustar00zuulzuul00000000000000.. -*- rst -*- =============================== Hosts (os-hosts) (DEPRECATED) =============================== .. warning:: The ``os-hosts`` API is deprecated as of the 2.43 microversion. Requests made with microversion >= 2.43 will result in a 404 error. To list and show host details, use the :ref:`os-hypervisors` API. To enable or disable a service, use the :ref:`os-services` API. There is no replacement for the `shutdown`, `startup`, `reboot`, or `maintenance_mode` actions as those are system-level operations which should be outside of the control of the compute service. Manages physical hosts. Some virt drivers do not support all host functions. For more information, see `nova virt support matrix `__ Policy defaults enable only users with the administrative role to perform all os-hosts related operations. Cloud providers can change these permissions through the ``policy.json`` file. List Hosts ========== .. rest_method:: GET /os-hosts Lists hosts. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403) Response -------- .. rest_parameters:: parameters.yaml - hosts: hosts - zone: host_zone - host_name: host_name_body - service: host_service **Example List Hosts** .. literalinclude:: ../../doc/api_samples/os-hosts/hosts-list-resp.json :language: javascript Show Host Details ================= .. rest_method:: GET /os-hosts/{host_name} Shows details for a host. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - host_name: host_name Response -------- .. rest_parameters:: parameters.yaml - host: host_resource_array - resource: host_resource - resource.project: host_project - resource.cpu: host_cpu - resource.memory_mb: host_memory_mb - resource.disk_gb: host_disk_gb - resource.host: host_name_body **Example Show Host Details** .. literalinclude:: ../../doc/api_samples/os-hosts/host-get-resp.json :language: javascript Update Host status ================== .. rest_method:: PUT /os-hosts/{host_name} Enables, disables a host or put a host in maintenance or normal mode. .. warning:: Putting a host into maintenance mode is only implemented by the XenServer compute driver and it has been reported that it does not actually evacuate all of the guests from the host, it just sets a flag in the Xen management console, and is therefore useless. There are other APIs that allow you to do the same thing which are supported across all compute drivers, which would be disabling a service and then migrating the instances off that host. See the `Operations Guide `_ for more information on maintenance. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), NotImplemented(501) Request ------- .. rest_parameters:: parameters.yaml - host_name: host_name - status: host_status_body_in - maintenance_mode: host_maintenance_mode_in **Example Enable Host: JSON request** .. literalinclude:: ../../doc/api_samples/os-hosts/host-put-maintenance-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - host: host_name_body - status: host_status_body - maintenance_mode: host_maintenance_mode **Example Enable Host** .. literalinclude:: ../../doc/api_samples/os-hosts/host-put-maintenance-resp.json :language: javascript Reboot Host =========== .. rest_method:: GET /os-hosts/{host_name}/reboot Reboots a host. .. warning:: This is only supported by the XenServer and Hyper-v drivers. The backing drivers do no orchestration of dealing with guests in the nova database when performing a reboot of the host. The nova-compute service for that host may be temporarily disabled by the service group health check which would take it out of scheduling decisions, and the guests would be down, but the periodic task which checks for unexpectedly stopped instances runs in the nova-compute service, which might be dead now so the nova API would show the instances as running when in fact they are actually stopped. This API is also not tested in a live running OpenStack environment. Needless to say, it is not recommended to use this API and it is deprecated as of the 2.43 microversion. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), NotImplemented(501) Request ------- .. rest_parameters:: parameters.yaml - host_name: host_name Response -------- .. rest_parameters:: parameters.yaml - host: host_name_body - power_action: host_power_action **Example Reboot Host: JSON response** .. literalinclude:: ../../doc/api_samples/os-hosts/host-get-reboot.json :language: javascript Shut Down Host ============== .. rest_method:: GET /os-hosts/{host_name}/shutdown Shuts down a host. .. warning:: This is only supported by the XenServer and Hyper-v drivers. The backing drivers do no orchestration of dealing with guests in the nova database when performing a shutdown of the host. The nova-compute service for that host may be temporarily disabled by the service group health check which would take it out of scheduling decisions, and the guests would be down, but the periodic task which checks for unexpectedly stopped instances runs in the nova-compute service, which might be dead now so the nova API would show the instances as running when in fact they are actually stopped. This API is also not tested in a live running OpenStack environment. Needless to say, it is not recommended to use this API and it is deprecated as of the 2.43 microversion. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), NotImplemented(501) Request ------- .. rest_parameters:: parameters.yaml - host_name: host_name Response -------- .. rest_parameters:: parameters.yaml - host: host_name_body - power_action: host_power_action **Example Shut Down Host** .. literalinclude:: ../../doc/api_samples/os-hosts/host-get-shutdown.json :language: javascript Start Host ========== .. rest_method:: GET /os-hosts/{host_name}/startup Starts a host. .. warning:: This is not implemented by any in-tree compute drivers and therefore will always fail with a `501 NotImplemented` error. Needless to say, it is not recommended to use this API and it is deprecated as of the 2.43 microversion. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), NotImplemented(501) Request ------- .. rest_parameters:: parameters.yaml - host_name: host_name Response -------- .. rest_parameters:: parameters.yaml - host: host_name_body - power_action: host_power_action **Example Start Host** .. literalinclude:: ../../doc/api_samples/os-hosts/host-get-startup.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/os-hypervisors.inc0000664000175000017500000003044700000000000021124 0ustar00zuulzuul00000000000000.. -*- rst -*- .. _os-hypervisors: ============================== Hypervisors (os-hypervisors) ============================== Lists all hypervisors, shows summary statistics for all hypervisors over all compute nodes, shows details for a hypervisor, shows the uptime for a hypervisor, lists all servers on hypervisors that match the given ``hypervisor_hostname_pattern`` or searches for hypervisors by the given ``hypervisor_hostname_pattern``. List Hypervisors ================ .. rest_method:: GET /os-hypervisors Lists hypervisors. Policy defaults enable only users with the administrative role to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403) Request ------- .. rest_parameters:: parameters.yaml - limit: hypervisor_limit - marker: hypervisor_marker - marker: hypervisor_marker_uuid - hypervisor_hostname_pattern: hypervisor_hostname_pattern_query - with_servers: hypervisor_with_servers_query Response -------- .. rest_parameters:: parameters.yaml - hypervisors: hypervisors - hypervisor_hostname: hypervisor_hostname - id: hypervisor_id_body - id: hypervisor_id_body_uuid - state: hypervisor_state - status: hypervisor_status - hypervisor_links: hypervisor_links - servers: hypervisor_servers - servers.uuid: hypervisor_servers_uuid - servers.name: hypervisor_servers_name **Example List Hypervisors (v2.33): JSON response** .. literalinclude:: ../../doc/api_samples/os-hypervisors/v2.33/hypervisors-list-resp.json :language: javascript **Example List Hypervisors With Servers (v2.53): JSON response** .. literalinclude:: ../../doc/api_samples/os-hypervisors/v2.53/hypervisors-with-servers-resp.json :language: javascript List Hypervisors Details ======================== .. rest_method:: GET /os-hypervisors/detail Lists hypervisors details. Policy defaults enable only users with the administrative role to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403) Request ------- .. rest_parameters:: parameters.yaml - limit: hypervisor_limit - marker: hypervisor_marker - marker: hypervisor_marker_uuid - hypervisor_hostname_pattern: hypervisor_hostname_pattern_query - with_servers: hypervisor_with_servers_query Response -------- .. rest_parameters:: parameters.yaml - hypervisors: hypervisors - cpu_info: cpu_info - current_workload: current_workload - status: hypervisor_status - state: hypervisor_state - disk_available_least: disk_available_least - host_ip: host_ip - free_disk_gb: hypervisor_free_disk_gb - free_ram_mb: free_ram_mb - hypervisor_hostname: hypervisor_hostname - hypervisor_type: hypervisor_type_body - hypervisor_version: hypervisor_version - id: hypervisor_id_body - id: hypervisor_id_body_uuid - local_gb: local_gb - local_gb_used: local_gb_used - memory_mb: memory_mb - memory_mb_used: memory_mb_used - running_vms: running_vms - servers: hypervisor_servers - servers.uuid: hypervisor_servers_uuid - servers.name: hypervisor_servers_name - service: hypervisor_service - service.host: host_name_body - service.id: service_id_body_2_52 - service.id: service_id_body_2_53 - service.disabled_reason: service_disable_reason - uptime: hypervisor_uptime - vcpus: hypervisor_vcpus - vcpus_used: hypervisor_vcpus_used - hypervisor_links: hypervisor_links **Example List Hypervisors Details (v2.33): JSON response** .. literalinclude:: ../../doc/api_samples/os-hypervisors/v2.33/hypervisors-detail-resp.json :language: javascript **Example List Hypervisors Details (v2.53): JSON response** .. literalinclude:: ../../doc/api_samples/os-hypervisors/v2.53/hypervisors-detail-resp.json :language: javascript **Example List Hypervisors Details (v2.88): JSON response** .. literalinclude:: ../../doc/api_samples/os-hypervisors/v2.88/hypervisors-detail-resp.json :language: javascript Show Hypervisor Statistics (DEPRECATED) ======================================= .. rest_method:: GET /os-hypervisors/statistics max_version: 2.87 Shows summary statistics for all enabled hypervisors over all compute nodes. .. warning:: This API is deprecated and will fail with HTTP 404 starting with microversion 2.88. Use placement to get information on resource usage across hypervisors. Policy defaults enable only users with the administrative role to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. .. note:: As noted, some of the parameters in the response representing totals do not take allocation ratios into account. This can result in a disparity between the totals and the usages. A more accurate representation of state can be obtained using `placement`__. __ https://docs.openstack.org/api-ref/placement/#list-resource-provider-usages Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404) Response -------- .. rest_parameters:: parameters.yaml - hypervisor_statistics: hypervisor_statistics - count: hypervisor_count - current_workload: current_workload_total - disk_available_least: disk_available_least_total - free_disk_gb: hypervisor_free_disk_gb_total - free_ram_mb: free_ram_mb_total - local_gb: local_gb_total - local_gb_used: local_gb_used_total - memory_mb: memory_mb_total - memory_mb_used: memory_mb_used_total - running_vms: running_vms_total - vcpus: hypervisor_vcpus_total - vcpus_used: hypervisor_vcpus_used_total **Example Show Hypervisor Statistics: JSON response** .. literalinclude:: ../../doc/api_samples/os-hypervisors/hypervisors-statistics-resp.json :language: javascript Show Hypervisor Details ======================= .. rest_method:: GET /os-hypervisors/{hypervisor_id} Shows details for a given hypervisor. Policy defaults enable only users with the administrative role to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. .. note:: As noted, some of the parameters in the response representing totals do not take allocation ratios into account. This can result in a disparity between the totals and the usages. A more accurate representation of state can be obtained using `placement`__. __ https://docs.openstack.org/api-ref/placement/#show-resource-provider-usages Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - hypervisor_id: hypervisor_id - hypervisor_id: hypervisor_id_uuid - with_servers: hypervisor_with_servers_query Response -------- .. rest_parameters:: parameters.yaml - hypervisor: hypervisor - cpu_info: cpu_info - state: hypervisor_state - status: hypervisor_status - current_workload: current_workload - disk_available_least: disk_available_least - host_ip: host_ip - free_disk_gb: hypervisor_free_disk_gb - free_ram_mb: free_ram_mb - hypervisor_hostname: hypervisor_hostname - hypervisor_type: hypervisor_type_body - hypervisor_version: hypervisor_version - id: hypervisor_id_body - id: hypervisor_id_body_uuid - local_gb: local_gb - local_gb_used: local_gb_used - memory_mb: memory_mb - memory_mb_used: memory_mb_used - running_vms: running_vms - servers: hypervisor_servers - servers.uuid: hypervisor_servers_uuid - servers.name: hypervisor_servers_name - service: hypervisor_service - service.host: host_name_body - service.id: service_id_body_2_52 - service.id: service_id_body_2_53 - service.disabled_reason: service_disable_reason - uptime: hypervisor_uptime - vcpus: hypervisor_vcpus - vcpus_used: hypervisor_vcpus_used **Example Show Hypervisor Details (v2.28): JSON response** .. literalinclude:: ../../doc/api_samples/os-hypervisors/v2.28/hypervisors-show-resp.json :language: javascript **Example Show Hypervisor Details With Servers (v2.53): JSON response** .. literalinclude:: ../../doc/api_samples/os-hypervisors/v2.53/hypervisors-show-with-servers-resp.json :language: javascript **Example Show Hypervisors Details (v2.88): JSON response** .. literalinclude:: ../../doc/api_samples/os-hypervisors/v2.88/hypervisors-show-with-servers-resp.json :language: javascript Show Hypervisor Uptime (DEPRECATED) =================================== .. rest_method:: GET /os-hypervisors/{hypervisor_id}/uptime max_version: 2.87 Shows the uptime for a given hypervisor. .. warning:: This API is deprecated and will fail with HTTP 404 starting with microversion 2.88. Use `Show Hypervisor Details`_ with microversion 2.88 and later to get this information. Policy defaults enable only users with the administrative role to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), NotImplemented(501) Request ------- .. rest_parameters:: parameters.yaml - hypervisor_id: hypervisor_id - hypervisor_id: hypervisor_id_uuid Response -------- .. rest_parameters:: parameters.yaml - hypervisor: hypervisor - hypervisor_hostname: hypervisor_hostname - id: hypervisor_id_body - id: hypervisor_id_body_uuid - state: hypervisor_state - status: hypervisor_status - uptime: uptime **Example Show Hypervisor Uptime: JSON response** .. literalinclude:: ../../doc/api_samples/os-hypervisors/hypervisors-uptime-resp.json :language: javascript **Example Show Hypervisor Uptime (v2.53): JSON response** .. literalinclude:: ../../doc/api_samples/os-hypervisors/v2.53/hypervisors-uptime-resp.json :language: javascript Search Hypervisor (DEPRECATED) ============================== .. rest_method:: GET /os-hypervisors/{hypervisor_hostname_pattern}/search max_version: 2.52 Search hypervisor by a given hypervisor host name or portion of it. .. warning:: This API is deprecated starting with microversion 2.53. Use `List Hypervisors`_ with the ``hypervisor_hostname_pattern`` query parameter with microversion 2.53 and later. Policy defaults enable only users with the administrative role to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Normal response code: 200 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - hypervisor_hostname_pattern: hypervisor_hostname_pattern Response -------- .. rest_parameters:: parameters.yaml - hypervisors: hypervisors - hypervisor_hostname: hypervisor_hostname - id: hypervisor_id_body_no_version - state: hypervisor_state - status: hypervisor_status **Example Search Hypervisor: JSON response** .. literalinclude:: ../../doc/api_samples/os-hypervisors/hypervisors-search-resp.json :language: javascript List Hypervisor Servers (DEPRECATED) ==================================== .. rest_method:: GET /os-hypervisors/{hypervisor_hostname_pattern}/servers max_version: 2.52 List all servers belong to each hypervisor whose host name is matching a given hypervisor host name or portion of it. .. warning:: This API is deprecated starting with microversion 2.53. Use `List Hypervisors`_ with the ``hypervisor_hostname_pattern`` and ``with_servers`` query parameters with microversion 2.53 and later. Policy defaults enable only users with the administrative role to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Normal response code: 200 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - hypervisor_hostname_pattern: hypervisor_hostname_pattern Response -------- .. rest_parameters:: parameters.yaml - hypervisors: hypervisors - hypervisor_hostname: hypervisor_hostname - id: hypervisor_id_body_no_version - state: hypervisor_state - status: hypervisor_status - servers: servers - servers.uuid: server_uuid - servers.name: server_name **Example List Hypervisor Servers: JSON response** .. literalinclude:: ../../doc/api_samples/os-hypervisors/hypervisors-with-servers-resp.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/os-instance-actions.inc0000664000175000017500000000720700000000000021767 0ustar00zuulzuul00000000000000.. -*- rst -*- ================================================ Servers actions (servers, os-instance-actions) ================================================ List actions and action details for a server. List Actions For Server ======================= .. rest_method:: GET /servers/{server_id}/os-instance-actions Lists actions for a server. Action information of deleted instances can be returned for requests starting with microversion 2.21. Policy defaults enable only users with the administrative role or the owner of the server to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - limit: instance_action_limit - marker: instance_action_marker - changes-since: changes_since_instance_action - changes-before: changes_before_instance_action Response -------- .. rest_parameters:: parameters.yaml - instanceActions: instanceActions - action: action - instance_uuid: instance_id_body - message: message - project_id: project_id_server_action - request_id: request_id_body - start_time: start_time - user_id: user_id_server_action - updated_at: updated_instance_action - links: instance_actions_next_links **Example List Actions For Server: JSON response** .. literalinclude:: ../../doc/api_samples/os-instance-actions/instance-actions-list-resp.json :language: javascript **Example List Actions For Server With Links (v2.58):** .. literalinclude:: ../../doc/api_samples/os-instance-actions/v2.58/instance-actions-list-with-limit-resp.json :language: javascript Show Server Action Details ========================== .. rest_method:: GET /servers/{server_id}/os-instance-actions/{request_id} Shows details for a server action. Action details of deleted instances can be returned for requests later than microversion 2.21. Policy defaults enable only users with the administrative role or the owner of the server to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - request_id: request_id Response -------- .. rest_parameters:: parameters.yaml - instanceAction: instanceAction - action: action - instance_uuid: instance_id_body - message: message - project_id: project_id_server_action - request_id: request_id_body - start_time: start_time - user_id: user_id_server_action - events: instance_action_events_2_50 - events: instance_action_events_2_51 - events.event: event - events.start_time: event_start_time - events.finish_time: event_finish_time - events.result: event_result - events.traceback: event_traceback - events.hostId: event_hostId - events.host: event_host - events.details: event_details - updated_at: updated_instance_action **Example Show Server Action Details For Admin (v2.62)** .. literalinclude:: ../../doc/api_samples/os-instance-actions/v2.62/instance-action-get-resp.json :language: javascript **Example Show Server Action Details For Non-Admin (v2.62)** .. literalinclude:: ../../doc/api_samples/os-instance-actions/v2.62/instance-action-get-non-admin-resp.json :language: javascript **Example Show Server Action Details For System Reader (v2.84)** .. literalinclude:: ../../doc/api_samples/os-instance-actions/v2.84/instance-action-get-resp.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/os-instance-usage-audit-log.inc0000664000175000017500000000552700000000000023321 0ustar00zuulzuul00000000000000.. -*- rst -*- ======================================================== Server usage audit log (os-instance-usage-audit-log) ======================================================== Audit server usage of the cloud. This API is dependent on the ``instance_usage_audit`` configuration option being set on all compute hosts where usage auditing is required. Policy defaults enable only users with the administrative role to perform all os-instance-usage-audit-log related operations. Cloud providers can change these permissions through the ``policy.json`` file. List Server Usage Audits ======================== .. rest_method:: GET /os-instance_usage_audit_log Lists usage audits for all servers on all compute hosts where usage auditing is configured. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403) Response -------- .. rest_parameters:: parameters.yaml - instance_usage_audit_logs: instance_usage_audit_logs - hosts_not_run: host_not_run - log: instance_usage_audit_log - errors: errors - instances: instances_usage_audit - message: instance_usage_audit_log_message - state: instance_usage_audit_task_state - num_hosts: host_num - num_hosts_done: host_done_num - num_hosts_not_run: host_not_run_num - num_hosts_running: host_running_num - overall_status: overall_status - period_beginning: period_beginning - period_ending: period_ending - total_errors: total_errors - total_instances: total_instances **Example List Usage Audits For All Servers** .. literalinclude:: ../../doc/api_samples/os-instance-usage-audit-log/inst-usage-audit-log-index-get-resp.json :language: javascript List Usage Audits Before Specified Time ======================================= .. rest_method:: GET /os-instance_usage_audit_log/{before_timestamp} Lists usage audits that occurred before a specified time. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403) Request ------- .. rest_parameters:: parameters.yaml - before_timestamp: before_timestamp Response -------- .. rest_parameters:: parameters.yaml - instance_usage_audit_log: instance_usage_audit_logs - hosts_not_run: host_not_run - log: instance_usage_audit_log - errors: errors - instances: instances_usage_audit - message: instance_usage_audit_log_message - state: instance_usage_audit_task_state - num_hosts: host_num - num_hosts_done: host_done_num - num_hosts_not_run: host_not_run_num - num_hosts_running: host_running_num - overall_status: overall_status - period_beginning: period_beginning - period_ending: period_ending - total_errors: total_errors - total_instances: total_instances **Example List Usage Audits Before Specified Time** .. literalinclude:: ../../doc/api_samples/os-instance-usage-audit-log/inst-usage-audit-log-show-get-resp.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/os-interface.inc0000664000175000017500000001133200000000000020457 0ustar00zuulzuul00000000000000.. -*- rst -*- ========================================= Port interfaces (servers, os-interface) ========================================= List port interfaces, show port interface details of the given server. Create a port interface and uses it to attach a port to the given server, detach a port interface from the given server. List Port Interfaces ==================== .. rest_method:: GET /servers/{server_id}/os-interface Lists port interfaces that are attached to a server. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), NotImplemented(501) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path Response -------- .. rest_parameters:: parameters.yaml - interfaceAttachments: interfaceAttachments - port_state: port_state - fixed_ips: fixed_ips_resp - ip_address: ip_address - subnet_id: subnet_id - mac_addr: mac_addr - net_id: net_id_resp - port_id: port_id_resp - tag: device_tag_nic_attachment_resp **Example List Port Interfaces: JSON response** .. literalinclude:: ../../doc/api_samples/os-attach-interfaces/attach-interfaces-list-resp.json :language: javascript **Example List Tagged Port Interfaces (v2.70): JSON response** .. literalinclude:: ../../doc/api_samples/os-attach-interfaces/v2.70/attach-interfaces-list-resp.json :language: javascript Create Interface ================ .. rest_method:: POST /servers/{server_id}/os-interface Creates a port interface and uses it to attach a port to a server. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), conflict(409), computeFault(500), NotImplemented(501) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - interfaceAttachment: interfaceAttachment - port_id: port_id - net_id: net_id - fixed_ips: fixed_ips - ip_address: ip_address_req - tag: device_tag_nic_attachment **Example Create Interface: JSON request** Create interface with ``net_id`` and ``fixed_ips``. .. literalinclude:: ../../doc/api_samples/os-attach-interfaces/attach-interfaces-create-net_id-req.json :language: javascript Create interface with ``port_id``. .. literalinclude:: ../../doc/api_samples/os-attach-interfaces/attach-interfaces-create-req.json :language: javascript **Example Create Tagged Interface (v2.49): JSON request** .. literalinclude:: ../../doc/api_samples/os-attach-interfaces/v2.49/attach-interfaces-create-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - interfaceAttachment: interfaceAttachment_resp - fixed_ips: fixed_ips_resp - ip_address: ip_address - subnet_id: subnet_id - mac_addr: mac_addr - net_id: net_id_resp - port_id: port_id_resp - port_state: port_state - tag: device_tag_nic_attachment_resp **Example Create Interface: JSON response** .. literalinclude:: ../../doc/api_samples/os-attach-interfaces/attach-interfaces-create-resp.json :language: javascript **Example Create Tagged Interface (v2.70): JSON response** .. literalinclude:: ../../doc/api_samples/os-attach-interfaces/v2.70/attach-interfaces-create-resp.json :language: javascript Show Port Interface Details =========================== .. rest_method:: GET /servers/{server_id}/os-interface/{port_id} Shows details for a port interface that is attached to a server. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - port_id: port_id_path Response -------- .. rest_parameters:: parameters.yaml - interfaceAttachment: interfaceAttachment_resp - port_state: port_state - fixed_ips: fixed_ips_resp - ip_address: ip_address - subnet_id: subnet_id - mac_addr: mac_addr - net_id: net_id_resp - port_id: port_id_resp - tag: device_tag_nic_attachment_resp **Example Show Port Interface Details: JSON response** .. literalinclude:: ../../doc/api_samples/os-attach-interfaces/attach-interfaces-show-resp.json :language: javascript **Example Show Tagged Port Interface Details (v2.70): JSON response** .. literalinclude:: ../../doc/api_samples/os-attach-interfaces/v2.70/attach-interfaces-show-resp.json :language: javascript Detach Interface ================ .. rest_method:: DELETE /servers/{server_id}/os-interface/{port_id} Detaches a port interface from a server. Normal response codes: 202 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), conflict(409), NotImplemented(501) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - port_id: port_id_path Response -------- No body is returned on successful request. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/os-keypairs.inc0000664000175000017500000000750500000000000020355 0ustar00zuulzuul00000000000000.. -*- rst -*- ===================== Keypairs (keypairs) ===================== Generates, imports, and deletes SSH keys. List Keypairs ============= .. rest_method:: GET /os-keypairs Lists keypairs that are associated with the account. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403) Request ------- .. rest_parameters:: parameters.yaml - user_id: keypair_user - limit: keypair_limit - marker: keypair_marker Response -------- .. rest_parameters:: parameters.yaml - keypairs: keypairs - keypair: keypair - name: keypair_name - public_key: keypair_public_key - fingerprint: keypair_fingerprint - type: keypair_type - keypairs_links: keypair_links **Example List Keypairs (v2.35): JSON response** .. literalinclude:: ../../doc/api_samples/os-keypairs/v2.35/keypairs-list-resp.json :language: javascript Import (or create) Keypair ========================== .. rest_method:: POST /os-keypairs Imports (or generates) a keypair. .. warning:: Generating a keypair is no longer possible starting from version 2.92. Normal response codes: 200, 201 .. note:: The success status code was changed from 200 to 201 in version 2.2 Error response codes: badRequest(400), unauthorized(401), forbidden(403), conflict(409) Request ------- .. rest_parameters:: parameters.yaml - keypair: keypair - name: keypair_name_in - public_key: keypair_public_key_in - type: keypair_type_in - user_id: keypair_userid_in **Example Create Or Import Keypair (v2.10): JSON request** .. literalinclude:: ../../doc/api_samples/os-keypairs/v2.10/keypairs-import-post-req.json :language: javascript **Example Import Keypair (v2.92): JSON request** .. literalinclude:: ../../doc/api_samples/os-keypairs/v2.92/keypairs-import-post-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - keypair: keypair - name: keypair_name - public_key: keypair_public_key - fingerprint: keypair_fingerprint - user_id: keypair_userid - private_key: keypair_private_key - type: keypair_type **Example Create Or Import Keypair (v2.10): JSON response** .. literalinclude:: ../../doc/api_samples/os-keypairs/v2.10/keypairs-import-post-resp.json :language: javascript **Example Import Keypair (v2.92): JSON response** .. literalinclude:: ../../doc/api_samples/os-keypairs/v2.92/keypairs-import-post-resp.json :language: javascript Show Keypair Details ==================== .. rest_method:: GET /os-keypairs/{keypair_name} Shows details for a keypair that is associated with the account. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - keypair_name: keypair_name_path - user_id: keypair_user Response -------- .. rest_parameters:: parameters.yaml - keypair: keypair - created_at: created - deleted: keypair_deleted - deleted_at: keypair_updated_deleted_at - fingerprint: keypair_fingerprint - id: keypair_id - name: keypair_name - public_key: keypair_public_key - updated_at: keypair_updated_deleted_at - user_id: keypair_userid - type: keypair_type **Example Show Keypair Details (v2.10): JSON response** .. literalinclude:: ../../doc/api_samples/os-keypairs/v2.10/keypairs-get-resp.json :language: javascript Delete Keypair ============== .. rest_method:: DELETE /os-keypairs/{keypair_name} Deletes a keypair. Normal response codes: 202, 204 .. note:: The normal return code is 204 in version 2.2 to match the fact that no body content is returned. Error response codes: unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - keypair_name: keypair_name_path - user_id: keypair_user Response -------- There is no body content for the response of a successful DELETE query ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/os-migrations.inc0000664000175000017500000000436200000000000020700 0ustar00zuulzuul00000000000000.. -*- rst -*- ========================================= Migrations (os-migrations) ========================================= Shows data on migrations. List Migrations =============== .. rest_method:: GET /os-migrations Lists migrations. Policy defaults enable only users with the administrative role to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Starting from microversion 2.59, the response is sorted by ``created_at`` and ``id`` in descending order. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403) Request ------- .. rest_parameters:: parameters.yaml - hidden: migration_hidden - host: migration_host - instance_uuid: migration_instance_uuid - migration_type: migration_type - source_compute: migration_source_compute - status: migration_status - limit: migration_limit - marker: migration_marker - changes-since: changes_since_migration - changes-before: changes_before_migration - user_id: user_id_query_migrations - project_id: project_id_query_migrations Response -------- .. rest_parameters:: parameters.yaml - migrations: migrations - created_at: created - dest_compute: migrate_dest_compute - dest_host: migrate_dest_host - dest_node: migrate_dest_node - id: migration_id - instance_uuid: server_id - new_instance_type_id: migration_new_flavor_id - old_instance_type_id: migration_old_flavor_id - source_compute: migrate_source_compute - source_node: migrate_source_node - status: migrate_status - updated_at: updated - migration_type: migration_type_2_23 - links: migration_links_2_23 - uuid: migration_uuid - migrations_links: migration_next_links_2_59 - user_id: user_id_migration_2_80 - project_id: project_id_migration_2_80 **Example List Migrations: JSON response** .. literalinclude:: ../../doc/api_samples/os-migrations/migrations-get.json :language: javascript **Example List Migrations (v2.80):** .. literalinclude:: ../../doc/api_samples/os-migrations/v2.80/migrations-get.json :language: javascript **Example List Migrations With Paging (v2.80):** .. literalinclude:: ../../doc/api_samples/os-migrations/v2.80/migrations-get-with-limit.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/os-networks.inc0000664000175000017500000001666500000000000020411 0ustar00zuulzuul00000000000000.. -*- rst -*- ====================================== Networks (os-networks) (DEPRECATED) ====================================== .. warning:: This API was designed to work with ``nova-network`` which was deprecated in the 14.0.0 (Newton) release and removed in the 21.0.0 (Ussuri) release. Some features are proxied to the Network service (neutron) when appropriate, but as with all translation proxies, this is far from perfect compatibility. These APIs should be avoided in new applications in favor of `using neutron directly`__. These will fail with a 404 starting from microversion 2.36. They were removed in the 21.0.0 (Ussuri) release. __ https://docs.openstack.org/api-ref/network/v2/#networks Creates, lists, shows information for, and deletes networks. Adds network to a project, disassociates a network from a project, and disassociates a project from a network. Associates host with and disassociates host from a network. List Networks ============= .. rest_method:: GET /os-networks Lists networks for the project. Policy defaults enable all users to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403) Response -------- **Example List Networks: JSON response** .. literalinclude:: ../../doc/api_samples/os-networks/networks-list-resp.json :language: javascript Create Network ============== .. rest_method:: POST /os-networks Creates a network. Policy defaults enable only users with the administrative role or the owner of the network to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), conflict(409), gone(410), notImplemented(501) Request ------- **Example Create Network: JSON request** .. literalinclude:: ../../doc/api_samples/os-networks/network-create-req.json :language: javascript Response -------- **Example Create Network: JSON response** .. literalinclude:: ../../doc/api_samples/os-networks/network-create-resp.json :language: javascript Add Network =========== .. rest_method:: POST /os-networks/add Adds a network to a project. Policy defaults enable only users with the administrative role to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Normal response codes: 202 Error response codes: badRequest(400), unauthorized(401), forbidden(403), gone(410), notImplemented(501) Request ------- **Example Add Network: JSON request** .. literalinclude:: ../../doc/api_samples/os-networks/network-add-req.json :language: javascript Response -------- Show Network Details ==================== .. rest_method:: GET /os-networks/{network_id} Shows details for a network. Policy defaults enable all users to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - network_id: network_id Response -------- **Example Show Network Details: JSON response** .. literalinclude:: ../../doc/api_samples/os-networks/network-show-resp.json :language: javascript Delete Network ============== .. rest_method:: DELETE /os-networks/{network_id} Deletes a network. Policy defaults enable only users with the administrative role or the owner of the network to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Normal response codes: 202 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), conflict(409), gone(410) Request ------- .. rest_parameters:: parameters.yaml - network_id: network_id Response -------- There is no body content for the response of a successful DELETE query. Associate Host ============== .. rest_method:: POST /os-networks/{network_id}/action Associates a network with a host. Specify the ``associate_host`` action in the request body. Policy defaults enable only users with the administrative role or the owner of the network to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Normal response codes: 202 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), gone(410), notImplemented(501) Request ------- .. rest_parameters:: parameters.yaml - network_id: network_id - associate_host: associate_host **Example Associate Host to Network: JSON request** .. literalinclude:: ../../doc/api_samples/os-networks-associate/network-associate-host-req.json :language: javascript Response -------- There is no body content for the response of a successful POST query. Disassociate Network ==================== .. rest_method:: POST /os-networks/{network_id}/action Disassociates a network from a project. You can then reuse the network. Specify the ``disassociate`` action in the request body. Policy defaults enable only users with the administrative role or the owner of the network to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Normal response codes: 202 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), gone(410), notImplemented(501) Request ------- .. rest_parameters:: parameters.yaml - network_id: network_id **Example Disassociate Network: JSON request** .. literalinclude:: ../../doc/api_samples/os-networks-associate/network-disassociate-req.json :language: javascript Response -------- There is no body content for the response of a successful POST query. Disassociate Host ================= .. rest_method:: POST /os-networks/{network_id}/action Disassociates a host from a network. Specify the ``disassociate_host`` action in the request body. Policy defaults enable only users with the administrative role or the owner of the network to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Normal response codes: 202 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), gone(410), notImplemented(501) Request ------- .. rest_parameters:: parameters.yaml - network_id: network_id **Example Disassociate Host from Network: JSON request** .. literalinclude:: ../../doc/api_samples/os-networks-associate/network-disassociate-host-req.json :language: javascript Response -------- There is no body content for the response of a successful POST query. Disassociate Project ==================== .. rest_method:: POST /os-networks/{network_id}/action Disassociates a project from a network. Specify the ``disassociate_project`` action in the request body. Policy defaults enable only users with the administrative role or the owner of the network to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Normal response codes: 202 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), gone(410), notImplemented(501) Request ------- .. rest_parameters:: parameters.yaml - network_id: network_id **Example Disassociate Project from Network: JSON request** .. literalinclude:: ../../doc/api_samples/os-networks-associate/network-disassociate-project-req.json :language: javascript Response -------- There is no body content for the response of a successful POST query. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/os-quota-class-sets.inc0000664000175000017500000001303600000000000021732 0ustar00zuulzuul00000000000000.. -*- rst -*- ======================================= Quota class sets (os-quota-class-sets) ======================================= Show, Create or Update the quotas for a Quota Class. Nova supports implicit 'default' Quota Class only. .. note:: Once a default limit is set via the ``default`` quota class via the API, that takes precedence over any changes to that resource limit in the configuration options. In other words, once you've changed things via the API, you either have to keep those synchronized with the configuration values or remove the default limit from the database manually as there is no REST API for removing quota class values from the database. For Example: If you updated default quotas for instances, to 20, but didn't change ``quota_instances`` in your ``nova.conf``, you'd now have default quota for instances as 20 for all projects. If you then change ``quota_instances=5`` in nova.conf, but didn't update the ``default`` quota class via the API, you'll still have a default quota of 20 for instances regardless of ``nova.conf``. Refer: `Quotas `__ for more details. .. warning:: There is a bug in the v2.1 API until microversion 2.49 and the legacy v2 compatible API which does not return the ``server_groups`` and ``server_group_members`` quotas in GET and PUT ``os-quota-class-sets`` API response, whereas the v2 API used to return those keys in the API response. There is workaround to get the ``server_groups`` and ``server_group_members`` quotas using "List Default Quotas For Tenant" API in :ref:`os-quota-sets` but that is per project quota. This issue is fixed in microversion 2.50, here onwards ``server_groups`` and ``server_group_members`` keys are returned in API response body. Show the quota for Quota Class ============================== .. rest_method:: GET /os-quota-class-sets/{id} Show the quota for the Quota Class. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403) Request ------- .. rest_parameters:: parameters.yaml - id: quota_class_id Response -------- .. rest_parameters:: parameters.yaml - quota_class_set: quota_class_set - cores: cores_quota_class - id: quota_class_id_body - instances: instances_quota_class - key_pairs: key_pairs_quota_class - metadata_items: metadata_items - ram: ram_quota_class - fixed_ips: fixed_ips_quota_class - floating_ips: floating_ips_quota_class - networks: networks_quota_optional - security_group_rules: security_group_rules_quota_class - security_groups: security_groups_quota_class - server_groups: server_groups_quota_class - server_group_members: server_group_members_quota_class - injected_file_content_bytes: injected_file_content_bytes - injected_file_path_bytes: injected_file_path_bytes - injected_files: injected_files_quota_class **Example Show A Quota Class: JSON response(2.50)** .. literalinclude:: ../../doc/api_samples/os-quota-class-sets/v2.50/quota-classes-show-get-resp.json :language: javascript Create or Update Quotas for Quota Class ======================================= .. rest_method:: PUT /os-quota-class-sets/{id} Update the quotas for the Quota Class. If the requested Quota Class is not found in the DB, then the API will create the one. Only 'default' quota class is valid and used to set the default quotas, all other quota class would not be used anywhere. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403) Request ------- .. rest_parameters:: parameters.yaml - id: quota_class_id - quota_class_set: quota_class_set - cores: cores_quota_class_optional - instances: instances_quota_class_optional - key_pairs: key_pairs_quota_class_optional - metadata_items: metadata_items_quota_optional - ram: ram_quota_class_optional - server_groups: server_groups_quota_class_optional - server_group_members: server_group_members_quota_optional - fixed_ips: fixed_ips_quota_class_optional - floating_ips: floating_ips_quota_class_optional - networks: networks_quota_optional - security_group_rules: security_group_rules_quota_class_optional - security_groups: security_groups_quota_class_optional - injected_file_content_bytes: injected_file_content_bytes_quota_optional - injected_file_path_bytes: injected_file_path_bytes_quota_optional - injected_files: injected_files_quota_class_optional **Example Update Quotas: JSON request(2.50)** .. literalinclude:: ../../doc/api_samples/os-quota-class-sets/v2.50/quota-classes-update-post-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - quota_class_set: quota_class_set - cores: cores_quota_class - instances: instances_quota_class - key_pairs: key_pairs_quota_class - metadata_items: metadata_items - ram: ram_quota_class - fixed_ips: fixed_ips_quota_class - floating_ips: floating_ips_quota_class - networks: networks_quota_optional - security_group_rules: security_group_rules_quota_class - security_groups: security_groups_quota_class - server_groups: server_groups_quota_class - server_group_members: server_group_members_quota_class - injected_file_content_bytes: injected_file_content_bytes - injected_file_path_bytes: injected_file_path_bytes - injected_files: injected_files_quota_class **Example Update Quotas: JSON response(2.50)** .. literalinclude:: ../../doc/api_samples/os-quota-class-sets/v2.50/quota-classes-update-post-resp.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/os-quota-sets.inc0000664000175000017500000001646500000000000020640 0ustar00zuulzuul00000000000000.. -*- rst -*- .. _os-quota-sets: ============================ Quota sets (os-quota-sets) ============================ Permits administrators, depending on policy settings, to view default quotas, view details for quotas, revert quotas to defaults, and update the quotas for a project or a project and user. Show A Quota ============ .. rest_method:: GET /os-quota-sets/{tenant_id} Show the quota for a project or a project and a user. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403) - 400 - BadRequest - the tenant_id is not valid in your cloud, perhaps because it was typoed. Request ------- .. rest_parameters:: parameters.yaml - tenant_id: tenant_id - user_id: user_id_query_quota Response -------- .. rest_parameters:: parameters.yaml - quota_set: quota_set - cores: cores - id: quota_tenant_or_user_id_body - instances: instances - key_pairs: key_pairs - metadata_items: metadata_items - ram: ram - server_groups: server_groups - server_group_members: server_group_members - fixed_ips: fixed_ips_quota - floating_ips: floating_ips - networks: networks_quota_set_optional - security_group_rules: security_group_rules_quota - security_groups: security_groups_quota - injected_file_content_bytes: injected_file_content_bytes - injected_file_path_bytes: injected_file_path_bytes - injected_files: injected_files **Example Show A Quota: JSON response** .. literalinclude:: ../../doc/api_samples/os-quota-sets/user-quotas-show-get-resp.json :language: javascript Update Quotas ============= .. rest_method:: PUT /os-quota-sets/{tenant_id} Update the quotas for a project or a project and a user. Users can force the update even if the quota has already been used and the reserved quota exceeds the new quota. To force the update, specify the ``"force": True`` attribute in the request body, the default value is ``false``. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403) - 400 - BadRequest - the tenant_id is not valid in your cloud, perhaps because it was typoed. Request ------- .. rest_parameters:: parameters.yaml - tenant_id: tenant_id - user_id: user_id_query_set_quota - quota_set: quota_set - force: force - cores: cores_quota_optional - instances: instances_quota_optional - key_pairs: key_pairs_quota_optional - metadata_items: metadata_items_quota_optional - ram: ram_quota_optional - server_groups: server_groups_quota_optional - server_group_members: server_group_members_quota_optional - fixed_ips: fixed_ips_quota_optional - floating_ips: floating_ips_quota_optional - networks: networks_quota_set_optional - security_group_rules: security_group_rules - security_groups: security_groups_quota_optional - injected_file_content_bytes: injected_file_content_bytes_quota_optional - injected_file_path_bytes: injected_file_path_bytes_quota_optional - injected_files: injected_files_quota_optional **Example Update Quotas: JSON request** .. literalinclude:: ../../doc/api_samples/os-quota-sets/quotas-update-post-req.json :language: javascript **Example Update Quotas with the optional ``force`` attribute: JSON request** .. literalinclude:: ../../doc/api_samples/os-quota-sets/quotas-update-force-post-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - quota_set: quota_set - cores: cores - instances: instances - key_pairs: key_pairs - metadata_items: metadata_items - ram: ram - server_groups: server_groups - server_group_members: server_group_members - fixed_ips: fixed_ips_quota - floating_ips: floating_ips - networks: networks_quota_set_optional - security_group_rules: security_group_rules_quota - security_groups: security_groups_quota - injected_file_content_bytes: injected_file_content_bytes - injected_file_path_bytes: injected_file_path_bytes - injected_files: injected_files **Example Update Quotas: JSON response** .. literalinclude:: ../../doc/api_samples/os-quota-sets/quotas-update-post-resp.json :language: javascript Revert Quotas To Defaults ========================= .. rest_method:: DELETE /os-quota-sets/{tenant_id} Reverts the quotas to default values for a project or a project and a user. To revert quotas for a project and a user, specify the ``user_id`` query parameter. Normal response codes: 202 Error response codes: unauthorized(401), forbidden(403) Request ------- .. rest_parameters:: parameters.yaml - tenant_id: tenant_id - user_id: user_id_query_quota_delete Response -------- There is no body content for the response of a successful DELETE operation. List Default Quotas For Tenant ============================== .. rest_method:: GET /os-quota-sets/{tenant_id}/defaults Lists the default quotas for a project. Normal response codes: 200 Error response codes: badrequest(400), unauthorized(401), forbidden(403) Request ------- .. rest_parameters:: parameters.yaml - tenant_id: tenant_id Response -------- .. rest_parameters:: parameters.yaml - quota_set: quota_set - cores: cores - id: quota_tenant_or_user_id_body - instances: instances - key_pairs: key_pairs - metadata_items: metadata_items - ram: ram - server_groups: server_groups - server_group_members: server_group_members - fixed_ips: fixed_ips_quota - floating_ips: floating_ips - networks: networks_quota_set_optional - security_group_rules: security_group_rules_quota - security_groups: security_groups_quota - injected_file_content_bytes: injected_file_content_bytes - injected_file_path_bytes: injected_file_path_bytes - injected_files: injected_files **Example List Default Quotas For Tenant: JSON response** .. literalinclude:: ../../doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json :language: javascript Show The Detail of Quota ======================== .. rest_method:: GET /os-quota-sets/{tenant_id}/detail Show the detail of quota for a project or a project and a user. To show a quota for a project and a user, specify the ``user_id`` query parameter. Normal response codes: 200 Error response codes: badrequest(400), unauthorized(401), forbidden(403) - 400 - BadRequest - the {tenant_id} is not valid in your cloud, perhaps because it was typoed. Request ------- .. rest_parameters:: parameters.yaml - tenant_id: tenant_id - user_id: user_id_query_quota Response -------- .. rest_parameters:: parameters.yaml - quota_set: quota_set - cores: cores_quota_details - id: quota_tenant_or_user_id_body - instances: instances_quota_details - key_pairs: key_pairs_quota_details - metadata_items: metadata_items_quota_details - ram: ram_quota_details - server_groups: server_groups_quota_details - server_group_members: server_group_members_quota_details - fixed_ips: fixed_ips_quota_details - floating_ips: floating_ips_quota_details - networks: networks_quota_set_optional - security_group_rules: security_group_rules_quota_details - security_groups: security_groups_quota_details - injected_file_content_bytes: injected_file_content_bytes_quota_details - injected_file_path_bytes: injected_file_path_bytes_quota_details - injected_files: injected_files_quota_details **Example Show A Quota: JSON response** .. literalinclude:: ../../doc/api_samples/os-quota-sets/quotas-show-detail-get-resp.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/os-security-group-default-rules.inc0000664000175000017500000001034700000000000024277 0ustar00zuulzuul00000000000000.. -*- rst -*- ==================================================================== Rules for default security group (os-security-group-default-rules) ==================================================================== .. warning:: This API only available with ``nova-network`` which is deprecated. It should be avoided in any new applications. These will fail with a 404 starting from microversion 2.36. They were completely removed in the 21.0.0 (Ussuri) release. Lists, shows information for, and creates default security group rules. List Default Security Group Rules ================================= .. rest_method:: GET /os-security-group-default-rules Lists default security group rules. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), gone(410), notImplemented(501) Response -------- .. rest_parameters:: parameters.yaml - security_group_default_rules: security_group_default_rules - from_port: from_port - id: secgroup_default_rule_id - ip_protocol: ip_protocol - ip_range: secgroup_rule_ip_range - ip_range.cidr: secgroup_rule_cidr - to_port: to_port **Example List default security group rules: JSON response** .. literalinclude:: ../../doc/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.json :language: javascript Show Default Security Group Rule Details ======================================== .. rest_method:: GET /os-security-group-default-rules/{security_group_default_rule_id} Shows details for a security group rule. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), gone(410), notImplemented(501) Request ------- .. rest_parameters:: parameters.yaml - security_group_default_rule_id: security_group_default_rule_id Response -------- .. rest_parameters:: parameters.yaml - security_group_default_rule: security_group_default_rule - from_port: from_port - id: secgroup_default_rule_id - ip_protocol: ip_protocol - ip_range: secgroup_rule_ip_range - ip_range.cidr: secgroup_rule_cidr - to_port: to_port **Example Show default security group rule: JSON response** .. literalinclude:: ../../doc/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.json :language: javascript Create Default Security Group Rule ================================== .. rest_method:: POST /os-security-group-default-rules Creates a default security group rule. If you specify a source port ( ``from_port`` ) or destination port ( ``to_port`` ) value, you must specify an IP protocol ( ``ip_protocol`` ) value. Otherwise, the operation returns the ``Bad Request (400)`` response code. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), conflict(409), gone(410), notImplemented(501) Request ------- .. rest_parameters:: parameters.yaml - security_group_default_rule: security_group_default_rule - ip_protocol: ip_protocol - from_port: from_port - to_port: to_port - cidr: secgroup_rule_cidr **Example Create default security group rule: JSON request** .. literalinclude:: ../../doc/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - security_group_default_rule: security_group_default_rule - from_port: from_port - id: secgroup_default_rule_id - ip_protocol: ip_protocol - ip_range: secgroup_rule_ip_range - ip_range.cidr: secgroup_rule_cidr - to_port: to_port **Example Create default security group rule: JSON response** .. literalinclude:: ../../doc/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp.json :language: javascript Delete Default Security Group Rule ================================== .. rest_method:: DELETE /os-security-group-default-rules/{security_group_default_rule_id} Deletes a security group rule. Normal response codes: 204 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), gone(410), notImplemented(501) Request ------- .. rest_parameters:: parameters.yaml - security_group_default_rule_id: security_group_default_rule_id Response -------- ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/os-security-group-rules.inc0000664000175000017500000000526300000000000022656 0ustar00zuulzuul00000000000000.. -*- rst -*- ================================================================ Rules for security group (os-security-group-rules) (DEPRECATED) ================================================================ .. warning:: These APIs are proxy calls to the Network service. Nova has deprecated all the proxy APIs and users should use the native APIs instead. These will fail with a 404 starting from microversion 2.36. See: `Relevant Network APIs `__. Creates and deletes security group rules. Create Security Group Rule ========================== .. rest_method:: POST /os-security-group-rules Creates a rule for a security group. Either ``cidr`` or ``group_id`` must be specified when creating a rule. .. note:: nova-network only supports ingress rules. If you want to define egress rules you must use the Neutron networking service. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403) Request ------- .. rest_parameters:: parameters.yaml - security_group_rule: security_group_rule - parent_group_id: parent_group_id - ip_protocol: ip_protocol - from_port: from_port - to_port: to_port - cidr: secgroup_rule_cidr - group_id: group_id **Example Create security group rule: JSON request** .. literalinclude:: ../../doc/api_samples/os-security-groups/security-group-rules-post-req.json :language: javascript Response -------- The ``group`` is empty if ``group_id`` was not provided on the request. The ``ip_range`` is empty if ``cidr`` was not provided on the request. .. rest_parameters:: parameters.yaml - security_group_rule: security_group_rule - ip_protocol: ip_protocol - from_port: from_port - to_port: to_port - ip_range: secgroup_rule_ip_range - ip_range.cidr: secgroup_rule_cidr - id: secgroup_rule_id - parent_group_id: parent_group_id - group: group - group.name: name_sec_group_optional - group.tenant_id: secgroup_tenant_id_body **Example Create security group rule: JSON response** .. literalinclude:: ../../doc/api_samples/os-security-groups/security-group-rules-post-resp.json :language: javascript Delete Security Group Rule ========================== .. rest_method:: DELETE /os-security-group-rules/{security_group_rule_id} Deletes a security group rule. Normal response codes: 202 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), conflict(409) Request ------- .. rest_parameters:: parameters.yaml - security_group_rule_id: security_group_rule_id Response -------- There is no body content for the response of a successful DELETE query. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/os-security-groups.inc0000664000175000017500000001064300000000000021707 0ustar00zuulzuul00000000000000.. -*- rst -*- .. NOTE(gmann): These APIs are deprecated so do not update this file even body, example or parameters are not complete. ================================================== Security groups (os-security-groups) (DEPRECATED) ================================================== .. warning:: These APIs are proxy calls to the Network service. Nova has deprecated all the proxy APIs and users should use the native APIs instead. These will fail with a 404 starting from microversion 2.36. See: `Relevant Network APIs `__. Lists, shows information for, creates, updates and deletes security groups. List Security Groups ==================== .. rest_method:: GET /os-security-groups Lists security groups. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - limit: limit_simple - offset: offset_simple - all_tenants: all_tenants_sec_grp_query Response -------- .. rest_parameters:: parameters.yaml - security_groups: security_groups_obj - description: description - id: security_group_id_body - name: name - rules: rules - tenant_id: tenant_id_body **Example List security groups: JSON response** .. literalinclude:: ../../doc/api_samples/os-security-groups/security-groups-list-get-resp.json :language: javascript Create Security Group ===================== .. rest_method:: POST /os-security-groups Creates a security group. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403) Request ------- .. rest_parameters:: parameters.yaml - security_group: security_group - name: name - description: description **Example Create security group: JSON request** .. literalinclude:: ../../doc/api_samples/os-security-groups/security-group-post-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - security_group: security_group - description: description - id: security_group_id_body - name: name - rules: rules - tenant_id: tenant_id_body **Example Create security group: JSON response** .. literalinclude:: ../../doc/api_samples/os-security-groups/security-groups-create-resp.json :language: javascript Show Security Group Details =========================== .. rest_method:: GET /os-security-groups/{security_group_id} Shows details for a security group. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - security_group_id: security_group_id Response -------- .. rest_parameters:: parameters.yaml - security_group: security_group - description: description - id: security_group_id_body - name: name - rules: rules - tenant_id: tenant_id_body **Example Show security group: JSON response** .. literalinclude:: ../../doc/api_samples/os-security-groups/security-groups-get-resp.json :language: javascript Update Security Group ===================== .. rest_method:: PUT /os-security-groups/{security_group_id} Updates a security group. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - security_group_id: security_group_id - name: name - description: description **Example Update security group: JSON request** .. literalinclude:: ../../doc/api_samples/os-security-groups/security-group-post-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - security_group: security_group - description: description - id: security_group_id_body - name: name - rules: rules - tenant_id: tenant_id_body **Example Update security group: JSON response** .. literalinclude:: ../../doc/api_samples/os-security-groups/security-groups-create-resp.json :language: javascript Delete Security Group ===================== .. rest_method:: DELETE /os-security-groups/{security_group_id} Deletes a security group. Normal response codes: 202 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - security_group_id: security_group_id Response -------- There is no body content for the response of a successful DELETE query. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/os-server-external-events.inc0000664000175000017500000000424300000000000023152 0ustar00zuulzuul00000000000000.. -*- rst -*- ==================================================== Create external events (os-server-external-events) ==================================================== .. warning:: This is an ``admin`` level service API only designed to be used by other OpenStack services. The point of this API is to coordinate between Nova and Neutron, Nova and Cinder, Nova and Ironic (and potentially future services) on activities they both need to be involved in, such as network hotplugging. Unless you are writing Neutron, Cinder or Ironic code you **should not** be using this API. Creates one or more external events. The API dispatches each event to a server instance. Run Events ========== .. rest_method:: POST /os-server-external-events Creates one or more external events, which the API dispatches to the host a server is assigned to. If the server is not currently assigned to a host the event will not be delivered. You will receive back the list of events that you submitted, with an updated ``code`` and ``status`` indicating their level of success. Normal response codes: 200, 207 A 200 will be returned if all events succeeded, 207 will be returned if any events could not be processed. The ``code`` attribute for the event will explain further what went wrong. Error response codes: badRequest(400), unauthorized(401), forbidden(403) .. note:: Prior to the fix for `bug 1855752`_, error response code 404 may be erroneously returned when all events failed. .. _bug 1855752: https://bugs.launchpad.net/nova/+bug/1855752 Request ------- .. rest_parameters:: parameters.yaml - events: events - name: event_name - server_uuid: server_uuid - status: event_status - tag: event_tag **Example Run Events** .. literalinclude:: ../../doc/api_samples/os-server-external-events/event-create-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - events: events - code: code - name: event_name - server_uuid: server_uuid - status: event_status - tag: event_tag **Example Run Events** .. literalinclude:: ../../doc/api_samples/os-server-external-events/event-create-resp.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/os-server-groups.inc0000664000175000017500000000701700000000000021347 0ustar00zuulzuul00000000000000.. -*- rst -*- ================================== Server groups (os-server-groups) ================================== Lists, shows information for, creates, and deletes server groups. List Server Groups ================== .. rest_method:: GET /os-server-groups Lists all server groups for the tenant. Administrative users can use the ``all_projects`` query parameter to list all server groups for all projects. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403) Request ------- .. rest_parameters:: parameters.yaml - all_projects: all_projects - limit: limit_simple - offset: offset_simple Response -------- .. rest_parameters:: parameters.yaml - server_groups: server_groups_list - id: server_group_id_body - name: name_server_group - policies: policies - members: members - metadata: metadata_server_group_max_2_63 - project_id: project_id_server_group - user_id: user_id_server_group - policy: policy_name - rules: policy_rules **Example List Server Groups (2.64): JSON response** .. literalinclude:: ../../doc/api_samples/os-server-groups/v2.64/server-groups-list-resp.json :language: javascript Create Server Group =================== .. rest_method:: POST /os-server-groups Creates a server group. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), conflict(409) Request ------- .. rest_parameters:: parameters.yaml - server_group: server_group - name: name_server_group - policies: policies - policy: policy_name - rules: policy_rules_optional **Example Create Server Group (2.64): JSON request** .. literalinclude:: ../../doc/api_samples/os-server-groups/v2.64/server-groups-post-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - server_group: server_group - id: server_group_id_body - name: name_server_group - policies: policies - members: members - metadata: metadata_server_group_max_2_63 - project_id: project_id_server_group - user_id: user_id_server_group - policy: policy_name - rules: policy_rules **Example Create Server Group (2.64): JSON response** .. literalinclude:: ../../doc/api_samples/os-server-groups/v2.64/server-groups-post-resp.json :language: javascript Show Server Group Details ========================= .. rest_method:: GET /os-server-groups/{server_group_id} Shows details for a server group. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - server_group_id: server_group_id Response -------- .. rest_parameters:: parameters.yaml - server_group: server_group - id: server_group_id_body - name: name_server_group - policies: policies - members: members - metadata: metadata_server_group_max_2_63 - project_id: project_id_server_group - user_id: user_id_server_group - policy: policy_name - rules: policy_rules **Example Show Server Group Details (2.64): JSON response** .. literalinclude:: ../../doc/api_samples/os-server-groups/v2.64/server-groups-get-resp.json :language: javascript Delete Server Group =================== .. rest_method:: DELETE /os-server-groups/{server_group_id} Deletes a server group. Normal response codes: 204 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - server_group_id: server_group_id Response -------- There is no body content for the response of a successful DELETE action. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/os-server-password.inc0000664000175000017500000000414000000000000021664 0ustar00zuulzuul00000000000000.. -*- rst -*- ================================================ Servers password (servers, os-server-password) ================================================ Shows the encrypted administrative password. Also, clears the encrypted administrative password for a server, which removes it from the metadata server. Show Server Password ==================== .. rest_method:: GET /servers/{server_id}/os-server-password Shows the administrative password for a server. This operation calls the metadata service to query metadata information and does not read password information from the server itself. The password saved in the metadata service is typically encrypted using the public SSH key injected into this server, so the SSH private key is needed to read the password. Policy defaults enable only users with the administrative role or the owner of the server to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path Response -------- .. rest_parameters:: parameters.yaml - password: password **Example Show Server Password** .. literalinclude:: ../../doc/api_samples/os-server-password/get-password-resp.json :language: javascript Clear Admin Password ==================== .. rest_method:: DELETE /servers/{server_id}/os-server-password Clears the encrypted administrative password for a server, which removes it from the database. This action does not actually change the instance server password. Policy defaults enable only users with the administrative role or the owner of the server to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Normal response codes: 204 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path Response -------- If successful, this method does not return content in the response body. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/os-server-shares.inc0000664000175000017500000000751100000000000021314 0ustar00zuulzuul00000000000000.. -*- rst -*- =================================================================== Servers with shares attachments (servers, shares) =================================================================== Attaches shares that are created through the Manila share API to server instances. Also, lists share attachments for a server, shows details for a share attachment, and detaches a share (New in version 2.97). List share attachments for an instance ======================================= .. rest_method:: GET /servers/{server_id}/shares List share attachments for an instance. Normal response codes: 200 Error response codes: badrequest(400), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path Response -------- .. rest_parameters:: parameters.yaml - shares: shares_body - share_id: share_id_body - status: share_status_body - tag: share_tag_body **Example List share attachments for an instance: JSON response** .. literalinclude:: ../../doc/api_samples/os-server-shares/v2.97/server-shares-list-resp.json :language: javascript Attach a share to an instance ============================== .. rest_method:: POST /servers/{server_id}/shares Attach a share to an instance. Normal response codes: 201 Error response codes: badRequest(400), forbidden(403), itemNotFound(404), conflict(409) .. note:: This action is only valid when the server is in ``STOPPED`` state. .. note:: This action also needs specific configurations, check the documentation requirements to configure your environment and support this feature. Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - share_id: share_id_body - tag: share_tag_body **Example Attach a share to an instance: JSON request** .. literalinclude:: ../../doc/api_samples/os-server-shares/v2.97/server-shares-create-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - shares: shares_body - share_id: share_id_body - status: share_status_body - tag: share_tag_body **Example Attach a share to an instance: JSON response** .. literalinclude:: ../../doc/api_samples/os-server-shares/v2.97/server-shares-create-resp.json :language: javascript Show a detail of a share attachment ==================================== .. rest_method:: GET /servers/{server_id}/shares/{share_id} Show a detail of a share attachment. Normal response codes: 200 Error response codes: badRequest(400), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - share_id: share_id_path Response -------- .. rest_parameters:: parameters.yaml - share: share_body - uuid: share_uuid_body - share_id: share_id_body - status: share_status_body - tag: share_tag_body - export_location: share_export_location_body .. note:: Optional fields can only be seen by admins. **Example Show a detail of a share attachment: JSON response** .. literalinclude:: ../../doc/api_samples/os-server-shares/v2.97/server-shares-show-resp.json :language: javascript **Example Show a detail of a share attachment with admin rights: JSON response** .. literalinclude:: ../../doc/api_samples/os-server-shares/v2.97/server-shares-admin-show-resp.json :language: javascript Detach a share from an instance ================================ .. rest_method:: DELETE /servers/{server_id}/shares/{share_id} Detach a share from an instance. Normal response codes: 200 Error response codes: badRequest(400), forbidden(403), itemNotFound(404), conflict(409) .. note:: This action is only valid when the server is in ``STOPPED`` or ``ERROR`` state. Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - share_id: share_id_path Response -------- No body is returned on successful request. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/os-server-tags.inc0000664000175000017500000000716400000000000020771 0ustar00zuulzuul00000000000000.. -*- rst -*- ============================= Server tags (servers, tags) ============================= Lists tags, creates, replaces or deletes one or more tags for a server, checks the existence of a tag for a server. Available since version 2.26 Tags have the following restrictions: - Tag is a Unicode bytestring no longer than 60 characters. - Tag is a non-empty string. - '/' is not allowed to be in a tag name - Comma is not allowed to be in a tag name in order to simplify requests that specify lists of tags - All other characters are allowed to be in a tag name - Each server can have up to 50 tags. List Tags ========= .. rest_method:: GET /servers/{server_id}/tags Lists all tags for a server. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path Response -------- .. rest_parameters:: parameters.yaml - tags: tags_no_min **Example List Tags:** .. literalinclude:: ../../doc/api_samples/os-server-tags/v2.26/server-tags-index-resp.json :language: javascript Replace Tags ============ .. rest_method:: PUT /servers/{server_id}/tags Replaces all tags on specified server with the new set of tags. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - tags: tags_no_min **Example Replace Tags:** .. literalinclude:: ../../doc/api_samples/os-server-tags/v2.26/server-tags-put-all-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - tags: tags_no_min **Example Replace Tags:** .. literalinclude:: ../../doc/api_samples/os-server-tags/v2.26/server-tags-put-all-resp.json :language: javascript Delete All Tags =============== .. rest_method:: DELETE /servers/{server_id}/tags Deletes all tags from the specified server. Normal response codes: 204 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path Response -------- There is no body content for the response of a successful DELETE query Check Tag Existence =================== .. rest_method:: GET /servers/{server_id}/tags/{tag} Checks tag existence on the server. If tag exists response with 204 status code will be returned. Otherwise returns 404. Normal response codes: 204 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - tag: tag Add a Single Tag ================ .. rest_method:: PUT /servers/{server_id}/tags/{tag} Adds a single tag to the server if server has no specified tag. Response code in this case is 201. If the server has specified tag just returns 204. Normal response codes: 201, 204 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - tag: tag Response -------- .. rest_parameters:: parameters.yaml - Location: tag_location Delete a Single Tag =================== .. rest_method:: DELETE /servers/{server_id}/tags/{tag} Deletes a single tag from the specified server. Normal response codes: 204 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - tag: tag Response -------- There is no body content for the response of a successful DELETE query ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/os-services.inc0000664000175000017500000002457600000000000020360 0ustar00zuulzuul00000000000000.. -*- rst -*- .. _os-services: ================================ Compute services (os-services) ================================ Lists all running Compute services in a region, enables or disables scheduling for a Compute service and deletes a Compute service. For an overview of Compute services, see `OpenStack Compute `__. List Compute Services ===================== .. rest_method:: GET /os-services Lists all running Compute services. Provides details why any services were disabled. .. note:: Starting with microversion 2.69 if service details cannot be loaded due to a transient condition in the deployment like infrastructure failure, the response body for those unavailable compute services in the down cells will be missing keys. See `handling down cells `__ section of the Compute API guide for more information on the keys that would be returned in the partial constructs. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403) Request ------- .. rest_parameters:: parameters.yaml - binary: binary_query - host: host_query_service Response -------- .. rest_parameters:: parameters.yaml - services: services - id: service_id_body_2_52 - id: service_id_body_2_53 - binary: binary - disabled_reason: disabled_reason_body - host: host_name_body - state: service_state - status: service_status - updated_at: updated - zone: OS-EXT-AZ:availability_zone - forced_down: forced_down_2_11 **Example List Compute Services (v2.11)** .. literalinclude:: ../../doc/api_samples/os-services/v2.11/services-list-get-resp.json :language: javascript **Example List Compute Services (v2.69)** This is a sample response for the services from the non-responsive part of the deployment. The responses for the available service records will be normal without any missing keys. .. literalinclude:: ../../doc/api_samples/os-services/v2.69/services-list-get-resp.json :language: javascript Disable Scheduling For A Compute Service ======================================== .. rest_method:: PUT /os-services/disable Disables scheduling for a Compute service. Specify the service by its host name and binary name. .. note:: Starting with microversion 2.53 this API is superseded by ``PUT /os-services/{service_id}``. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - host: host_name_body - binary: binary **Example Disable Scheduling For A Compute Service** .. literalinclude:: ../../doc/api_samples/os-services/service-disable-put-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - service: service - binary: binary - host: host_name_body - status: service_status **Example Disable Scheduling For A Compute Service** .. literalinclude:: ../../doc/api_samples/os-services/service-disable-put-resp.json :language: javascript Disable Scheduling For A Compute Service and Log Disabled Reason ================================================================ .. rest_method:: PUT /os-services/disable-log-reason Disables scheduling for a Compute service and logs information to the Compute service table about why a Compute service was disabled. Specify the service by its host name and binary name. .. note:: Starting with microversion 2.53 this API is superseded by ``PUT /os-services/{service_id}``. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - host: host_name_body - binary: binary - disabled_reason: disabled_reason_body **Example Disable Scheduling For A Compute Service and Log Disabled Reason** .. literalinclude:: ../../doc/api_samples/os-services/service-disable-log-put-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - service: service - binary: binary - disabled_reason: disabled_reason_body - host: host_name_body - status: service_status **Example Disable Scheduling For A Compute Service and Log Disabled Reason** .. literalinclude:: ../../doc/api_samples/os-services/service-disable-log-put-resp.json :language: javascript Enable Scheduling For A Compute Service ======================================= .. rest_method:: PUT /os-services/enable Enables scheduling for a Compute service. Specify the service by its host name and binary name. .. note:: Starting with microversion 2.53 this API is superseded by ``PUT /os-services/{service_id}``. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - host: host_name_body - binary: binary **Example Enable Scheduling For A Compute Service** .. literalinclude:: ../../doc/api_samples/os-services/service-enable-put-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - service: service - binary: binary - host: host_name_body - status: service_status **Example Enable Scheduling For A Compute Service** .. literalinclude:: ../../doc/api_samples/os-services/service-enable-put-resp.json :language: javascript Update Forced Down ================== .. rest_method:: PUT /os-services/force-down Set or unset ``forced_down`` flag for the service. ``forced_down`` is a manual override to tell nova that the service in question has been fenced manually by the operations team (either hard powered off, or network unplugged). That signals that it is safe to proceed with ``evacuate`` or other operations that nova has safety checks to prevent for hosts that are up. .. warning:: Setting a service forced down without completely fencing it will likely result in the corruption of VMs on that host. Action ``force-down`` available as of microversion 2.11. Specify the service by its host name and binary name. .. note:: Starting with microversion 2.53 this API is superseded by ``PUT /os-services/{service_id}``. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - host: host_name_body - binary: binary - forced_down: forced_down_2_11 **Example Update Forced Down** .. literalinclude:: ../../doc/api_samples/os-services/v2.11/service-force-down-put-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - service: service - binary: binary - host: host_name_body - forced_down: forced_down_2_11 | **Example Update Forced Down** .. literalinclude:: ../../doc/api_samples/os-services/v2.11/service-force-down-put-resp.json :language: javascript Update Compute Service ====================== .. rest_method:: PUT /os-services/{service_id} Update a compute service to enable or disable scheduling, including recording a reason why a compute service was disabled from scheduling. Set or unset the ``forced_down`` flag for the service. This operation is only allowed on services whose ``binary`` is ``nova-compute``. This API is available starting with microversion 2.53. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - service_id: service_id_path_2_53_no_version - status: service_status_2_53_in - disabled_reason: disabled_reason_2_53_in - forced_down: forced_down_2_53_in **Example Disable Scheduling For A Compute Service (v2.53)** .. literalinclude:: ../../doc/api_samples/os-services/v2.53/service-disable-log-put-req.json :language: javascript **Example Enable Scheduling For A Compute Service (v2.53)** .. literalinclude:: ../../doc/api_samples/os-services/v2.53/service-enable-put-req.json :language: javascript **Example Update Forced Down (v2.53)** .. literalinclude:: ../../doc/api_samples/os-services/v2.53/service-force-down-put-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - service: service - id: service_id_body_2_53_no_version - binary: binary - disabled_reason: disabled_reason_body - host: host_name_body - state: service_state - status: service_status - updated_at: updated - zone: OS-EXT-AZ:availability_zone - forced_down: forced_down_2_53_out **Example Disable Scheduling For A Compute Service (v2.53)** .. literalinclude:: ../../doc/api_samples/os-services/v2.53/service-disable-log-put-resp.json :language: javascript **Example Enable Scheduling For A Compute Service (v2.53)** .. literalinclude:: ../../doc/api_samples/os-services/v2.53/service-enable-put-resp.json :language: javascript **Example Update Forced Down (v2.53)** .. literalinclude:: ../../doc/api_samples/os-services/v2.53/service-force-down-put-resp.json :language: javascript Delete Compute Service ====================== .. rest_method:: DELETE /os-services/{service_id} Deletes a service. If it's a ``nova-compute`` service, then the corresponding host will be removed from all the host aggregates as well. Attempts to delete a ``nova-compute`` service which is still hosting instances will result in a 409 HTTPConflict response. The instances will need to be migrated or deleted before a compute service can be deleted. Similarly, attempts to delete a ``nova-compute`` service which is involved in in-progress migrations will result in a 409 HTTPConflict response. The migrations will need to be completed, for example confirming or reverting a resize, or the instances will need to be deleted before the compute service can be deleted. .. important:: Be sure to stop the actual ``nova-compute`` process on the physical host *before* deleting the service with this API. Failing to do so can lead to the running service re-creating orphaned **compute_nodes** table records in the database. Normal response codes: 204 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), conflict(409) Request ------- .. rest_parameters:: parameters.yaml - service_id: service_id_path_2_52 - service_id: service_id_path_2_53 Response -------- If successful, this method does not return content in the response body. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/os-simple-tenant-usage.inc0000664000175000017500000001254600000000000022411 0ustar00zuulzuul00000000000000.. -*- rst -*- ======================================== Usage reports (os-simple-tenant-usage) ======================================== Reports usage statistics of compute and storage resources periodically for an individual tenant or all tenants. The usage statistics will include all instances' CPU, memory and local disk during a specific period. .. warning:: The `os-simple-tenant-usage` will report usage statistics based on the latest flavor that is configured in the virtual machine (VM), and ignoring stop, pause, and other events that might have happened with the VM. Therefore, it uses the time the VM existed in the cloud environment to execute the usage accounting. More information can be found at http://eavesdrop.openstack.org/meetings/nova/2020/nova.2020-12-03-16.00.log.txt, and https://review.opendev.org/c/openstack/nova/+/711113 Microversion 2.40 added pagination (and ``next`` links) to the usage statistics via optional ``limit`` and ``marker`` query parameters. If ``limit`` isn't provided, the configurable ``max_limit`` will be used which currently defaults to 1000. Older microversions will not accept these new paging query parameters, but they will start to silently limit by ``max_limit``. .. code-block:: none /os-simple-tenant-usage?limit={limit}&marker={instance_uuid} /os-simple-tenant-usage/{tenant_id}?limit={limit}&marker={instance_uuid} .. note:: A tenant's usage statistics may span multiple pages when the number of instances exceeds ``limit``, and API consumers will need to stitch together the aggregate results if they still want totals for all instances in a specific time window, grouped by tenant. List Tenant Usage Statistics For All Tenants ============================================ .. rest_method:: GET /os-simple-tenant-usage Lists usage statistics for all tenants. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403) Request ------- .. rest_parameters:: parameters.yaml - detailed: detailed_simple_tenant_usage - end: end_simple_tenant_usage - start: start_simple_tenant_usage - limit: usage_limit - marker: usage_marker Response -------- .. rest_parameters:: parameters.yaml - tenant_usages: tenant_usages - start: start_simple_tenant_usage_body - stop: stop_simple_tenant_usage - tenant_id: tenant_id_body - total_hours: total_hours - total_local_gb_usage: total_local_gb_usage - total_memory_mb_usage: total_memory_mb_usage - total_vcpus_usage: total_vcpus_usage - server_usages: server_usages_optional - server_usages.ended_at: ended_at_optional - server_usages.flavor: flavor_name_optional - server_usages.hours: hours_optional - server_usages.instance_id: server_id_optional - server_usages.local_gb: local_gb_simple_tenant_usage_optional - server_usages.memory_mb: memory_mb_simple_tenant_usage_optional - server_usages.name: server_name_optional - server_usages.started_at: started_at_optional - server_usages.state: vm_state_optional - server_usages.tenant_id: tenant_id_optional - server_usages.uptime: uptime_simple_tenant_usage_optional - server_usages.vcpus: vcpus_optional - tenant_usages_links: usage_links **Example List Tenant Usage For All Tenants (v2.40): JSON response** If the ``detailed`` query parameter is not specified or is set to other than 1 (e.g. ``detailed=0``), the response is as follows: .. literalinclude:: ../../doc/api_samples/os-simple-tenant-usage/v2.40/simple-tenant-usage-get.json :language: javascript If the ``detailed`` query parameter is set to one (``detailed=1``), the response includes ``server_usages`` information for each tenant. The response is as follows: .. literalinclude:: ../../doc/api_samples/os-simple-tenant-usage/v2.40/simple-tenant-usage-get-detail.json :language: javascript Show Usage Statistics For Tenant ================================ .. rest_method:: GET /os-simple-tenant-usage/{tenant_id} Shows usage statistics for a tenant. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403) Request ------- .. rest_parameters:: parameters.yaml - tenant_id: tenant_id - end: end_simple_tenant_usage - start: start_simple_tenant_usage - limit: usage_limit - marker: usage_marker Response -------- .. rest_parameters:: parameters.yaml - tenant_usage: tenant_usage - server_usages: server_usages - server_usages.ended_at: ended_at - server_usages.flavor: flavor_name - server_usages.hours: hours - server_usages.instance_id: server_id - server_usages.local_gb: local_gb_simple_tenant_usage - server_usages.memory_mb: memory_mb_simple_tenant_usage - server_usages.name: server_name - server_usages.started_at: started_at - server_usages.state: OS-EXT-STS:vm_state - server_usages.tenant_id: tenant_id_body - server_usages.uptime: uptime_simple_tenant_usage - server_usages.vcpus: vcpus - start: start_simple_tenant_usage_body - stop: stop_simple_tenant_usage - tenant_id: tenant_id_body - total_hours: total_hours - total_local_gb_usage: total_local_gb_usage - total_memory_mb_usage: total_memory_mb_usage - total_vcpus_usage: total_vcpus_usage - tenant_usage_links: usage_links **Example Show Usage Details For Tenant (v2.40): JSON response** .. literalinclude:: ../../doc/api_samples/os-simple-tenant-usage/v2.40/simple-tenant-usage-get-specific.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/os-tenant-network.inc0000664000175000017500000000724700000000000021511 0ustar00zuulzuul00000000000000.. -*- rst -*- ==================================================== Project networks (os-tenant-networks) (DEPRECATED) ==================================================== .. warning:: These APIs are proxy calls to the Network service. Nova has deprecated all the proxy APIs and users should use the native APIs instead. These will fail with a 404 starting from microversion 2.36. See: `Relevant Network APIs `__. Creates, lists, shows information for, and deletes project networks. List Project Networks ===================== .. rest_method:: GET /os-tenant-networks Lists all project networks. Policy defaults enable only users with the administrative role or the owner of the network to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403) Response -------- **Example List Project Networks: JSON response** .. literalinclude:: ../../doc/api_samples/os-tenant-networks/networks-list-res.json :language: javascript Create Project Network ====================== .. rest_method:: POST /os-tenant-networks .. note:: This API is only implemented for the nova-network service and will result in a 503 error response if the cloud is using the Neutron networking service. Use the Neutron ``networks`` API to create a new network. Creates a project network. Policy defaults enable only users with the administrative role to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), conflict(409), gone(410), serviceUnavailable(503) **Example Create Project Network: JSON request** .. literalinclude:: ../../doc/api_samples/os-tenant-networks/networks-post-req.json :language: javascript Response -------- **Example Create Project Network: JSON response** .. literalinclude:: ../../doc/api_samples/os-tenant-networks/networks-post-res.json :language: javascript Show Project Network Details ============================ .. rest_method:: GET /os-tenant-networks/{network_id} Shows details for a project network. Policy defaults enable only users with the administrative role or the owner of the network to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - network_id: network_id Response -------- **Example Show Project Network Details: JSON response** .. literalinclude:: ../../doc/api_samples/os-tenant-networks/networks-post-res.json :language: javascript Delete Project Network ====================== .. rest_method:: DELETE /os-tenant-networks/{network_id} .. note:: This API is only implemented for the nova-network service and will result in a 500 error response if the cloud is using the Neutron networking service. Use the Neutron ``networks`` API to delete an existing network. Deletes a project network. Policy defaults enable only users with the administrative role or the owner of the network to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Normal response codes: 202 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), conflict(409), gone(410) Request ------- .. rest_parameters:: parameters.yaml - network_id: network_id Response -------- There is no body content for the response of a successful DELETE query. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/os-virtual-interfaces.inc0000664000175000017500000000421600000000000022331 0ustar00zuulzuul00000000000000.. -*- rst -*- ============================================================ Servers virtual interfaces (servers, os-virtual-interfaces) ============================================================ Lists virtual interfaces for a server. .. warning:: Since this API is only implemented for the nova-network, the API is deprecated from the Microversion 2.44. This API will fail with a 404 starting from microversion 2.44. It was removed in the 18.0.0 Rocky release. To query the server attached neutron interface, please use the API ``GET /servers/{server_uuid}/os-interface``. .. note:: This API is only implemented for the nova-network service and will result in a 400 error response if the cloud is using the Neutron networking service. Use the Neutron ``ports`` API to list ports for a given server by filtering ports based on the port ``device_id`` which is the ``{server_id}``. List Virtual Interfaces ======================= .. rest_method:: GET /servers/{server_id}/os-virtual-interfaces Lists the virtual interfaces for an instance. Policy defaults enable only users with the administrative role or the owner of the server to perform this operation. Change these permissions through the ``policy.json`` file. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), gone(410) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - limit: limit_simple - offset: offset_simple Response -------- .. rest_parameters:: parameters.yaml - virtual_interfaces: virtual_interfaces - id: virtual_interface_id - mac_address: mac_address - net_id: net_id_resp_2_12 .. note:: The API v2 returns the network ID in the "OS-EXT-VIF-NET:net_id" response attribute. But API v2.1 base version does not return the network ID. Network ID has been added in v2.12 micro-version and returns it in the "net_id" attribute. **Example List Virtual Interfaces: JSON response** .. literalinclude:: ../../doc/api_samples/os-virtual-interfaces/v2.12/vifs-list-resp.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/os-volume-attachments-swap.inc0000664000175000017500000000371500000000000023315 0ustar00zuulzuul00000000000000.. -*- rst -*- .. _os-volume-attachments-swap: =============================================================================== Update ("swapping") Server volume attachments (servers, os-volume\_attachments) =============================================================================== Update ("swapping") the server volume attachments which means swapping the volume attached to the server. Update(swapping) a volume attachment ==================================== .. rest_method:: PUT /servers/{server_id}/os-volume_attachments/{volume_id} Update a volume attachment. .. note:: This action only valid when the server is in ACTIVE, PAUSED and RESIZED state, or a conflict(409) error will be returned. .. Important:: When updating volumeId, this API **MUST** only be used as part of a larger orchestrated volume migration operation initiated in the block storage service via the ``os-retype`` or ``os-migrate_volume`` volume actions. Direct usage of this API is not supported and will be blocked by nova with a 409 conflict. Furthermore, updating ``volumeId`` via this API is only implemented by `certain compute drivers`_. .. _certain compute drivers: https://docs.openstack.org/nova/latest/user/support-matrix.html#operation_swap_volume Updating, or what is commonly referred to as "swapping", volume attachments with volumes that have more than one read/write attachment, is not supported. Normal response codes: 202 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), conflict(409) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - volume_id: volume_id_swap_src - volumeAttachment: volumeAttachment_put - volumeId: volumeId_swap **Example Update a volume attachment: JSON request** .. literalinclude:: ../../doc/api_samples/os-volume_attachments/update-volume-req.json :language: javascript Response -------- No body is returned on successful request. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/os-volume-attachments.inc0000664000175000017500000001715500000000000022350 0ustar00zuulzuul00000000000000.. -*- rst -*- =================================================================== Servers with volume attachments (servers, os-volume\_attachments) =================================================================== Attaches volumes that are created through the volume API to server instances. Also, lists volume attachments for a server, shows details for a volume attachment, and detaches a volume. List volume attachments for an instance ======================================= .. rest_method:: GET /servers/{server_id}/os-volume_attachments List volume attachments for an instance. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - limit: limit_simple - offset: offset_simple Response -------- .. rest_parameters:: parameters.yaml - volumeAttachments: volumeAttachments - id: volume_attachment_id_resp - serverId: server_id - volumeId: volumeId_resp - device: attachment_device_resp - tag: device_tag_bdm_attachment_resp - delete_on_termination: delete_on_termination_attachments_resp - attachment_id: attachment_volume_id_resp - bdm_uuid: attachment_bdm_id_resp **Example List volume attachments for an instance: JSON response** .. literalinclude:: ../../doc/api_samples/os-volume_attachments/list-volume-attachments-resp.json :language: javascript **Example List tagged volume attachments for an instance (v2.89): JSON response** .. literalinclude:: ../../doc/api_samples/os-volume_attachments/v2.89/list-volume-attachments-resp.json :language: javascript Attach a volume to an instance ============================== .. rest_method:: POST /servers/{server_id}/os-volume_attachments Attach a volume to an instance. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), conflict(409) .. note:: From v2.20 attach a volume to an instance in SHELVED or SHELVED_OFFLOADED state is allowed. .. note:: From v2.60, attaching a multiattach volume to multiple instances is supported for instances that are not SHELVED_OFFLOADED. The ability to actually support a multiattach volume depends on the volume type and compute hosting the instance. .. note:: This is an asynchronous API, callers should poll the status and list of attachments of the volume within the volume API to determine when the attachment has completed successfully. Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - volumeAttachment: volumeAttachment_post - volumeId: volumeId - device: device - tag: device_tag_bdm_attachment - delete_on_termination: delete_on_termination_attachments_req **Example Attach a volume to an instance: JSON request** .. literalinclude:: ../../doc/api_samples/os-volume_attachments/attach-volume-to-server-req.json :language: javascript **Example Attach a volume to an instance and tag it (v2.49): JSON request** .. literalinclude:: ../../doc/api_samples/os-volume_attachments/v2.49/attach-volume-to-server-req.json :language: javascript **Example Attach a volume to an instance with "delete_on_termination" (v2.79): JSON request** .. literalinclude:: ../../doc/api_samples/os-volume_attachments/v2.79/attach-volume-to-server-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - volumeAttachment: volumeAttachment - device: device_resp - id: attachment_id_resp - serverId: server_id - volumeId: volumeId_resp - tag: device_tag_bdm_attachment_resp - delete_on_termination: delete_on_termination_attachments_resp **Example Attach a volume to an instance: JSON response** .. literalinclude:: ../../doc/api_samples/os-volume_attachments/attach-volume-to-server-resp.json :language: javascript **Example Attach a tagged volume to an instance (v2.70): JSON response** .. literalinclude:: ../../doc/api_samples/os-volume_attachments/v2.70/attach-volume-to-server-resp.json :language: javascript **Example Attach a volume with "delete_on_termination" (v2.79): JSON response** .. literalinclude:: ../../doc/api_samples/os-volume_attachments/v2.79/attach-volume-to-server-resp.json :language: javascript Show a detail of a volume attachment ==================================== .. rest_method:: GET /servers/{server_id}/os-volume_attachments/{volume_id} Show a detail of a volume attachment. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - volume_id: volume_id_attached_path Response -------- .. rest_parameters:: parameters.yaml - volumeAttachment: volumeAttachment - id: volume_attachment_id_resp - serverId: server_id - volumeId: volumeId_resp - device: attachment_device_resp - tag: device_tag_bdm_attachment_resp - delete_on_termination: delete_on_termination_attachments_resp - attachment_id: attachment_volume_id_resp - bdm_uuid: attachment_bdm_id_resp **Example Show a detail of a volume attachment: JSON response** .. literalinclude:: ../../doc/api_samples/os-volume_attachments/volume-attachment-detail-resp.json :language: javascript **Example Show a detail of a tagged volume attachment (v2.89): JSON response** .. literalinclude:: ../../doc/api_samples/os-volume_attachments/v2.89/volume-attachment-detail-resp.json :language: javascript Update a volume attachment ========================== .. rest_method:: PUT /servers/{server_id}/os-volume_attachments/{volume_id} Update a volume attachment. Policy default role is 'rule:admin_or_owner', its scope is [project], which allow project members or admins to change the fields of an attached volume of a server. Cloud providers can change these permissions through the ``policy.yaml`` file. Normal response codes: 202 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), conflict(409) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - volume_id: volume_id_path - volumeAttachment: volumeAttachment_put - volumeId: volumeId_update - delete_on_termination: delete_on_termination_put_req - device: attachment_device_put_req - serverId: attachment_server_id_put_req - tag: device_tag_bdm_attachment_put_req - id: attachment_id_put_req .. note:: Other than ``volumeId``, as of v2.85 only ``delete_on_termination`` may be changed from the current value. **Example Update a volume attachment (v2.85): JSON request** .. literalinclude:: ../../doc/api_samples/os-volume_attachments/v2.85/update-volume-attachment-delete-flag-req.json :language: javascript Response -------- No body is returned on successful request. Detach a volume from an instance ================================ .. rest_method:: DELETE /servers/{server_id}/os-volume_attachments/{volume_id} Detach a volume from an instance. Normal response codes: 202 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), conflict(409) .. note:: From v2.20 detach a volume from an instance in SHELVED or SHELVED_OFFLOADED state is allowed. .. note:: This is an asynchronous API, callers should poll the list of volume attachments provided by ``GET /servers/{server_id}/os-volume_attachments`` to determine when the detachment of the volume has completed successfully. Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - volume_id: volume_id_to_detach_path Response -------- No body is returned on successful request. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/os-volumes.inc0000664000175000017500000002202300000000000020210 0ustar00zuulzuul00000000000000.. -*- rst -*- ========================================================= Volume extension (os-volumes, os-snapshots) (DEPRECATED) ========================================================= .. warning:: These APIs are proxy calls to the Volume service. Nova has deprecated all the proxy APIs and users should use the native APIs instead. These will fail with a 404 starting from microversion 2.36. See: `Relevant Volume APIs `__. Manages volumes and snapshots for use with the Compute API. Lists, shows details, creates, and deletes volumes and snapshots. List Volumes ============ .. rest_method:: GET /os-volumes Lists the volumes associated with the account. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403) Request ------- .. rest_parameters:: parameters.yaml - limit: limit_simple - offset: offset_simple Response -------- .. rest_parameters:: parameters.yaml - volumes: volumes - attachments: volumeAttachments - attachments.device: attachment_device_resp - attachments.id: attachment_id_resp - attachments.serverId: attachment_server_id_resp - attachments.volumeId: attachment_volumeId_resp - availabilityZone: OS-EXT-AZ:availability_zone - createdAt: created - displayDescription: display_description - displayName: display_name - id: volume_id_resp - metadata: metadata_object - size: size - snapshotId: snapshot_id - status: volume_status - volumeType: volume_type | **Example List Volumes** .. literalinclude:: ../../doc/api_samples/os-volumes/os-volumes-index-resp.json :language: javascript Create Volume ============= .. rest_method:: POST /os-volumes Creates a new volume. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - volume: volume - size: size - availability_zone: OS-EXT-AZ:availability_zone_optional - display_name: display_name_optional - display_description: display_description_optional - metadata: metadata - volume_type: volume_type_optional - snapshot_id: snapshot_id_optional **Example Create Volume** .. literalinclude:: ../../doc/api_samples/os-volumes/os-volumes-post-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - volume: volume - attachments: volumeAttachments - attachments.device: attachment_device_resp - attachments.id: attachment_id_resp - attachments.serverId: attachment_server_id_resp - attachments.volumeId: attachment_volumeId_resp - availabilityZone: OS-EXT-AZ:availability_zone - createdAt: created - displayName: display_name - displayDescription: display_description - id: volume_id_resp - metadata: metadata_object - size: size - snapshotId: snapshot_id - status: volume_status - volumeType: volume_type | **Example Create Volume** .. literalinclude:: ../../doc/api_samples/os-volumes/os-volumes-post-resp.json :language: javascript List Volumes With Details ========================= .. rest_method:: GET /os-volumes/detail Lists all volumes with details. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403) Request ------- .. rest_parameters:: parameters.yaml - limit: limit_simple - offset: offset_simple Response -------- .. rest_parameters:: parameters.yaml - volumes: volumes - attachments: volumeAttachments - attachments.device: attachment_device_resp - attachments.id: attachment_id_resp - attachments.serverId: attachment_server_id_resp - attachments.volumeId: attachment_volumeId_resp - availabilityZone: OS-EXT-AZ:availability_zone - createdAt: created - displayName: display_name - displayDescription: display_description - id: volume_id_resp - metadata: metadata_object - size: size - snapshotId: snapshot_id - status: volume_status - volumeType: volume_type | **Example List Volumes With Details** .. literalinclude:: ../../doc/api_samples/os-volumes/os-volumes-detail-resp.json :language: javascript Show Volume Details =================== .. rest_method:: GET /os-volumes/{volume_id} Shows details for a given volume. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - volume_id: volume_id_path Response -------- .. rest_parameters:: parameters.yaml - volume: volume - attachments: volumeAttachments - attachment.device: attachment_device_resp - attachments.id: attachment_id_resp - attachments.serverId: attachment_server_id_resp - attachments.volumeId: attachment_volumeId_resp - availabilityZone: OS-EXT-AZ:availability_zone - createdAt: created - displayName: display_name - displayDescription: display_description - id: volume_id_resp - metadata: metadata_object - size: size - snapshotId: snapshot_id - status: volume_status - volumeType: volume_type | **Example Show Volume Details** .. literalinclude:: ../../doc/api_samples/os-volumes/os-volumes-get-resp.json :language: javascript Delete Volume ============= .. rest_method:: DELETE /os-volumes/{volume_id} Deletes a volume. Normal response codes: 202 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - volume_id: volume_id_path Response -------- There is no body content for the response of a successful DELETE query List Snapshots ============== .. rest_method:: GET /os-snapshots Lists snapshots. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403) Request ------- .. rest_parameters:: parameters.yaml - limit: limit_simple - offset: offset_simple Response -------- .. rest_parameters:: parameters.yaml - snapshots: snapshots - id: snapshot_id - createdAt: created - displayName: snapshot_name - displayDescription: snapshot_description - size: size - status: snapshot_status - volumeId: volume_id | **Example List Snapshots** .. literalinclude:: ../../doc/api_samples/os-snapshots/snapshots-list-resp.json :language: javascript Create Snapshot =============== .. rest_method:: POST /os-snapshots Creates a new snapshot. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403) Request ------- .. rest_parameters:: parameters.yaml - snapshot: snapshot - volume_id: volume_id - display_description: snapshot_description_optional - display_name: snapshot_name_optional - force: force_snapshot **Example Create Snapshot** .. literalinclude:: ../../doc/api_samples/os-snapshots/snapshot-create-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - snapshot: snapshot - id: snapshot_id - createdAt: created - displayName: snapshot_name - displayDescription: snapshot_description - volumeId: volume_id - size: size - status: snapshot_status **Example Create Snapshot** .. literalinclude:: ../../doc/api_samples/os-snapshots/snapshot-create-resp.json :language: javascript List Snapshots With Details =========================== .. rest_method:: GET /os-snapshots/detail Lists all snapshots with details. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403) Request ------- .. rest_parameters:: parameters.yaml - limit: limit_simple - offset: offset_simple Response -------- .. rest_parameters:: parameters.yaml - snapshots: snapshots - id: snapshot_id - createdAt: created - displayName: snapshot_name - displayDescription: snapshot_description - volumeId: volume_id - size: size - status: snapshot_status | **Example List Snapshots With Details** .. literalinclude:: ../../doc/api_samples/os-snapshots/snapshots-detail-resp.json :language: javascript Show Snapshot Details ===================== .. rest_method:: GET /os-snapshots/{snapshot_id} Shows details for a given snapshot. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - snapshot_id: snapshot_id_path Response -------- .. rest_parameters:: parameters.yaml - snapshot: snapshot - id: snapshot_id - createdAt: created - displayName: snapshot_name - displayDescription: snapshot_description - volumeId: volume_id - size: size - status: snapshot_status | **Example Show Snapshot Details** .. literalinclude:: ../../doc/api_samples/os-snapshots/snapshots-show-resp.json :language: javascript Delete Snapshot =============== .. rest_method:: DELETE /os-snapshots/{snapshot_id} Deletes a snapshot from the account. This operation is asynchronous. You must list snapshots repeatedly to determine whether the snapshot was deleted. Normal response codes: 202 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - snapshot_id: snapshot_id_path Response -------- There is no body content for the response of a successful DELETE query ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/parameters.yaml0000664000175000017500000064121400000000000020444 0ustar00zuulzuul00000000000000# variables in header image_location: description: | The image location URL of the image or backup created, HTTP header "Location: " will be returned. .. note:: The URL returned may not be accessible to users and should not be relied upon. Use microversion 2.45 or simply parse the image ID out of the URL in the Location response header. in: header required: true type: string max_version: 2.44 server_location: description: | The location URL of the server, HTTP header "Location: " will be returned. in: header required: true type: string tag_location: description: | The location of the tag. It's individual tag URL which can be used for checking the existence of the tag on the server or deleting the tag from the server. in: header required: true type: string x-compute-request-id_resp: description: | The local request ID, which is a unique ID generated automatically for tracking each request to nova. It is associated with the request and appears in the log lines for that request. By default, the middleware configuration ensures that the local request ID appears in the log files. .. note:: This header exists for backward compatibility. in: header required: true type: string x-openstack-request-id_req: description: | The global request ID, which is a unique common ID for tracking each request in OpenStack components. The format of the global request ID must be ``req-`` + UUID (UUID4). If not in accordance with the format, it is ignored. It is associated with the request and appears in the log lines for that request. By default, the middleware configuration ensures that the global request ID appears in the log files. in: header required: false type: string min_version: 2.46 x-openstack-request-id_resp: description: | The local request ID, which is a unique ID generated automatically for tracking each request to nova. It is associated with the request and appears in the log lines for that request. By default, the middleware configuration ensures that the local request ID appears in the log files. in: header required: true type: string min_version: 2.46 # variables in path agent_build_id: description: | The id of the agent build. in: path required: true type: string aggregate_id: description: | The aggregate ID. in: path required: true type: integer api_version: in: path required: true type: string description: > The API version as returned in the links from the ``GET /`` call. before_timestamp: description: | Filters the response by the date and time before which to list usage audits. The date and time stamp format is as follows: :: CCYY-MM-DD hh:mm:ss.NNNNNN For example, ``2015-08-27 09:49:58`` or ``2015-08-27 09:49:58.123456``. in: path required: true type: string cell_id: description: | The UUID of the cell. in: path required: true type: string console_id: description: | The UUID of the console. in: path required: true type: string console_token: description: | Console authentication token. in: path required: true type: string # Used in the request path for PUT /os-services/disable-log-reason before # microversion 2.53. disabled_reason: description: | The reason for disabling a service. in: path required: false type: string domain: description: | The registered DNS domain that the DNS drivers publish. in: path required: true type: string fixed_ip_path: description: | The fixed IP of interest to you. in: path required: true type: string flavor_extra_spec_key: description: | The extra spec key for the flavor. in: path required: true type: string flavor_id: description: | The ID of the flavor. in: path required: true type: string floating_ip_id: description: | The ID of the floating IP address. in: path required: true type: string host_name: description: | The name of the host. in: path required: true type: string hypervisor_hostname_pattern: description: | The hypervisor host name or a portion of it. The hypervisor hosts are selected with the host name matching this pattern. in: path required: true type: string hypervisor_id: description: | The ID of the hypervisor. in: path required: true type: integer max_version: 2.52 hypervisor_id_uuid: description: | The ID of the hypervisor as a UUID. in: path required: true type: string min_version: 2.53 image_id: description: | The UUID of the image. in: path required: true type: string instance_id: description: | The UUID of the instance. in: path required: true type: string ip: description: | The IP address. in: path required: true type: string key: description: | The metadata item key, as a string. Maximum length is 255 characters. in: path required: true type: string keypair_name_path: description: | The keypair name. in: path required: true type: string migration_id_path: description: | The ID of the server migration. in: path required: true type: integer network_id: description: | The UUID of the network. in: path required: true type: string network_label: description: | The network label, such as ``public`` or ``private``. in: path required: true type: string node_id: description: | The node ID. in: path required: true type: string port_id_path: description: | The UUID of the port. in: path required: true type: string quota_class_id: "a_class_id description: | The ID of the quota class. Nova supports the ``default`` Quota Class only. in: path required: true type: string request_id: description: | The ID of the request. in: path required: true type: string security_group_default_rule_id: description: | The UUID of the security group rule. in: path required: true type: string security_group_id: description: | The ID of the security group. in: path required: true type: string security_group_rule_id: description: | The ID of the security group rule. in: path required: true type: string server_group_id: description: | The UUID of the server group. in: path required: true type: string server_id_path: description: | The UUID of the server. in: path required: true type: string service_id_path_2_52: description: | The id of the service. .. note:: This may not uniquely identify a service in a multi-cell deployment. in: path required: true type: integer max_version: 2.52 service_id_path_2_53: description: | The id of the service as a uuid. This uniquely identifies the service in a multi-cell deployment. in: path required: true type: string min_version: 2.53 service_id_path_2_53_no_version: description: | The id of the service as a uuid. This uniquely identifies the service in a multi-cell deployment. in: path required: true type: string share_id_path: description: | The UUID of the attached share. in: path required: true type: string snapshot_id_path: description: | The UUID of the snapshot. in: path required: true type: string tag: description: | The tag as a string. in: path required: true type: string tenant_id: description: | The UUID of the tenant in a multi-tenancy cloud. in: path required: true type: string volume_id_attached_path: description: | The UUID of the attached volume. in: path required: true type: string volume_id_path: description: | The unique ID for a volume. in: path required: true type: string volume_id_swap_src: description: | The UUID of the volume being replaced. in: path required: true type: string volume_id_to_detach_path: description: | The UUID of the volume to detach. in: path required: true type: string # variables in query access_ip_v4_query_server: in: query required: false type: string description: | Filter server list result by IPv4 address that should be used to access the server. access_ip_v6_query_server: in: query required: false type: string description: | Filter server list result by IPv6 address that should be used to access the server. all_projects: description: | Administrator only. Lists server groups for all projects. For example: ``GET /os-server-groups?all_projects=True`` If you specify a tenant ID for a non-administrative user with this query parameter, the call lists all server groups for the tenant, or project, rather than for all projects. Value of this query parameter is not checked, only presence is considered as request for all projects. in: query required: false type: string all_tenants: description: | Specify the ``all_tenants`` query parameter to ping instances for all tenants. By default this is only allowed by admin users. Value of this query parameter is not checked, only presence is considered as request for all tenants. in: query required: false type: string all_tenants_query: description: | Specify the ``all_tenants`` query parameter to list all instances for all projects. By default this is only allowed by administrators. If this parameter is specified without a value, the value defaults to ``True``. If the value is specified, ``1``, ``t``, ``true``, ``on``, ``y`` and ``yes`` are treated as ``True``. ``0``, ``f``, ``false``, ``off``, ``n`` and ``no`` are treated as ``False``. (They are case-insensitive.) in: query required: false type: boolean all_tenants_sec_grp_query: description: | Specify the ``all_tenants`` query parameter to list all security groups for all projects. This is only allowed for admin users. Value of this query parameter is not checked, only presence is considered as request for all tenants. in: query required: false type: string availability_zone_query_server: description: | Filter the server list result by server availability zone. This parameter is restricted to administrators until microversion 2.83. If non-admin users specify this parameter on a microversion less than 2.83, it will be ignored. in: query required: false type: string binary_query: description: | Filter the service list result by binary name of the service. in: query required: false type: string changes-since: description: | Filters the response by a date and time when the image last changed status. Use this query parameter to check for changes since a previous request rather than re-downloading and re-parsing the full status at each polling interval. If data has changed, the call returns only the items changed since the ``changes-since`` time. If data has not changed since the ``changes-since`` time, the call returns an empty list. To enable you to keep track of changes, this filter also displays images that were deleted if the ``changes-since`` value specifies a date in the last 30 days. Items deleted more than 30 days ago might be returned, but it is not guaranteed. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC. For example, ``2015-08-27T09:49:58-05:00``. If you omit the time zone, the UTC time zone is assumed. in: query required: false type: string changes_before_instance_action: description: | Filters the response by a date and time stamp when the instance actions last changed. Those instances that changed before or equal to the specified date and time stamp are returned. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC. For example, ``2015-08-27T09:49:58-05:00``. If you omit the time zone, the UTC time zone is assumed. When both ``changes-since`` and ``changes-before`` are specified, the value of the ``changes-before`` must be later than or equal to the value of the ``changes-since`` otherwise API will return 400. in: query required: false type: string min_version: 2.66 changes_before_migration: description: | Filters the response by a date and time stamp when the migration last changed. Those migrations that changed before or equal to the specified date and time stamp are returned. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC. For example, ``2015-08-27T09:49:58-05:00``. If you omit the time zone, the UTC time zone is assumed. When both ``changes-since`` and ``changes-before`` are specified, the value of the ``changes-before`` must be later than or equal to the value of the ``changes-since`` otherwise API will return 400. in: query required: false type: string min_version: 2.66 changes_before_server: description: | Filters the response by a date and time stamp when the server last changed. Those servers that changed before or equal to the specified date and time stamp are returned. To help keep track of changes this may also return recently deleted servers. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC. For example, ``2015-08-27T09:49:58-05:00``. If you omit the time zone, the UTC time zone is assumed. When both ``changes-since`` and ``changes-before`` are specified, the value of the ``changes-before`` must be later than or equal to the value of the ``changes-since`` otherwise API will return 400. in: query required: false type: string min_version: 2.66 changes_since_instance_action: description: | Filters the response by a date and time stamp when the instance action last changed. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC. For example, ``2015-08-27T09:49:58-05:00``. If you omit the time zone, the UTC time zone is assumed. When both ``changes-since`` and ``changes-before`` are specified, the value of the ``changes-since`` must be earlier than or equal to the value of the ``changes-before`` otherwise API will return 400. in: query required: false type: string min_version: 2.58 changes_since_migration: description: | Filters the response by a date and time stamp when the migration last changed. Those changed after the specified date and time stamp are returned. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC. For example, ``2015-08-27T09:49:58-05:00``. If you omit the time zone, the UTC time zone is assumed. When both ``changes-since`` and ``changes-before`` are specified, the value of the ``changes-since`` must be earlier than or equal to the value of the ``changes-before`` otherwise API will return 400. in: query required: false type: string min_version: 2.59 changes_since_server: description: | Filters the response by a date and time stamp when the server last changed status. To help keep track of changes this may also return recently deleted servers. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC. For example, ``2015-08-27T09:49:58-05:00``. If you omit the time zone, the UTC time zone is assumed. When both ``changes-since`` and ``changes-before`` are specified, the value of the ``changes-since`` must be earlier than or equal to the value of the ``changes-before`` otherwise API will return 400. in: query required: false type: string config_drive_query_server: description: | Filter the server list result by the config drive setting of the server. This parameter is restricted to administrators until microversion 2.83. If non-admin users specify this parameter on a microversion less than 2.83, it will be ignored. in: query required: false type: string created_at_query_server: description: | Filter the server list result by a date and time stamp when server was created. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC. For example, ``2015-08-27T09:49:58-05:00``. If you omit the time zone, the UTC time zone is assumed. This parameter is restricted to administrators until microversion 2.83. If non-admin users specify this parameter on a microversion less than 2.83, it will be ignored. in: query required: false type: string delete_info: description: | Information for snapshot deletion. Include the ID of the associated volume. For example: .. code-block:: javascript DELETE /os-assisted-volume-snapshots/421752a6-acf6-4b2d-bc7a-119f9148cd8c?delete_info='{"volume_id": "521752a6-acf6-4b2d-bc7a-119f9148cd8c"}' in: query required: true type: string deleted_query: in: query required: false type: boolean description: | Show deleted items only. In some circumstances deleted items will still be accessible via the backend database, however there is no contract on how long, so this parameter should be used with caution. ``1``, ``t``, ``true``, ``on``, ``y`` and ``yes`` are treated as ``True`` (case-insensitive). Other than them are treated as ``False``. This parameter is only valid when specified by administrators. If non-admin users specify this parameter, it is ignored. description_query_server: description: | Filter the server list result by description. This parameter is only valid when specified by administrators. If non-admin users specify this parameter, it is ignored. .. note:: ``display_description`` can also be requested which is alias of ``description`` but that is not recommended to use as that will be removed in future. in: query required: false type: string detailed_simple_tenant_usage: description: | Specify the ``detailed=1`` query parameter to get detail information ('server_usages' information). in: query required: false type: integer disk_config_query_server: description: | Filter the server list result by the ``disk_config`` setting of the server, Valid values are: - ``AUTO`` - ``MANUAL`` This parameter is only valid when specified by administrators. If non-admin users specify this parameter, it is ignored. in: query required: false type: string end_simple_tenant_usage: description: | The ending time to calculate usage statistics on compute and storage resources. The date and time stamp format is any of the following ones: :: CCYY-MM-DDThh:mm:ss For example, ``2015-08-27T09:49:58``. :: CCYY-MM-DDThh:mm:ss.NNNNNN For example, ``2015-08-27T09:49:58.123456``. :: CCYY-MM-DD hh:mm:ss.NNNNNN For example, ``2015-08-27 09:49:58.123456``. If you omit this parameter, the current time is used. in: query required: false type: string exclude: description: | Specify ``exclude=uuid[,uuid...]`` to exclude the instances from the results. in: query required: false type: string flavor_is_public_query: in: query required: false type: string description: | This parameter is only applicable to users with the administrative role. For all other non-admin users, the parameter is ignored and only public flavors will be returned. Filters the flavor list based on whether the flavor is public or private. If the value of this parameter is not specified, it is treated as ``True``. If the value is specified, ``1``, ``t``, ``true``, ``on``, ``y`` and ``yes`` are treated as ``True``. ``0``, ``f``, ``false``, ``off``, ``n`` and ``no`` are treated as ``False`` (they are case-insensitive). If the value is ``None`` (case-insensitive) both public and private flavors will be listed in a single request. flavor_query: description: | Filters the response by a flavor, as a UUID. A flavor is a combination of memory, disk size, and CPUs. in: query required: false type: string host_query_server: description: | Filter the server list result by the host name of compute node. This parameter is only valid when specified by administrators. If non-admin users specify this parameter, it is ignored. in: query required: false type: string host_query_service: description: | Filter the service list result by the host name. in: query required: false type: string hostname_query_server: description: | Filter the server list result by the host name of server. This parameter is only valid when specified by administrators until microversion 2.90, after which it can be specified by all users. If non-admin users specify this parameter before microversion 2.90, it is ignored. in: query required: false type: string hypervisor_hostname_pattern_query: description: | The hypervisor host name or a portion of it. The hypervisor hosts are selected with the host name matching this pattern. .. note:: ``limit`` and ``marker`` query parameters for paging are not supported when listing hypervisors using a hostname pattern. Also, ``links`` will not be returned in the response when using this query parameter. in: query required: false type: string min_version: 2.53 hypervisor_limit: description: | Requests a page size of items. Returns a number of items up to a limit value. Use the ``limit`` parameter to make an initial limited request and use the ID of the last-seen item from the response as the ``marker`` parameter value in a subsequent limited request. in: query required: false type: integer min_version: 2.33 hypervisor_marker: description: | The ID of the last-seen item. Use the ``limit`` parameter to make an initial limited request and use the ID of the last-seen item from the response as the ``marker`` parameter value in a subsequent limited request. in: query required: false type: integer min_version: 2.33 max_version: 2.52 hypervisor_marker_uuid: description: | The ID of the last-seen item as a UUID. Use the ``limit`` parameter to make an initial limited request and use the ID of the last-seen item from the response as the ``marker`` parameter value in a subsequent limited request. in: query required: false type: string min_version: 2.53 hypervisor_query: description: | Filters the response by a hypervisor type. in: query required: false type: string hypervisor_with_servers_query: description: | Include all servers which belong to each hypervisor in the response output. in: query required: false type: boolean min_version: 2.53 image_name_query: description: | Filters the response by an image name, as a string. in: query required: false type: string image_query: description: | Filters the response by an image, as a UUID. .. note:: 'image_ref' can also be requested which is alias of 'image' but that is not recommended to use as that will be removed in future. in: query required: false type: string image_server_query: description: | Filters the response by a server, as a URL. format: uri in: query required: false type: string image_status_query: description: | Filters the response by an image status, as a string. For example, ``ACTIVE``. in: query required: false type: string image_type_query: description: | Filters the response by an image type. For example, ``snapshot`` or ``backup``. in: query required: false type: string include: description: | Specify ``include=uuid[,uuid...]`` to include the instances in the results. in: query required: false type: string instance_action_limit: description: | Requests a page size of items. Returns a number of items up to a limit value. Use the ``limit`` parameter to make an initial limited request and use the last-seen item from the response as the ``marker`` parameter value in a subsequent limited request. in: query required: false type: integer min_version: 2.58 instance_action_marker: description: | The ``request_id`` of the last-seen instance action. Use the ``limit`` parameter to make an initial limited request and use the last-seen item from the response as the ``marker`` parameter value in a subsequent limited request. in: query required: false type: string min_version: 2.58 ip6_query: description: | An IPv6 address to filter results by. Up to microversion 2.4, this parameter is only valid when specified by administrators. If non-admin users specify this parameter, it is ignored. Starting from microversion 2.5, this parameter is valid for no-admin users as well as administrators. in: query required: false type: string ip_query: description: | An IPv4 address to filter results by. in: query required: false type: string kernel_id_query_server: in: query required: false type: string description: | Filter the server list result by the UUID of the kernel image when using an AMI. This parameter is only valid when specified by administrators. If non-admin users specify this parameter, it is ignored. key_name_query_server: description: | Filter the server list result by keypair name. This parameter is restricted to administrators until microversion 2.83. If non-admin users specify this parameter on a microversion less than 2.83, it will be ignored. in: query required: false type: string keypair_limit: description: | Requests a page size of items. Returns a number of items up to a limit value. Use the ``limit`` parameter to make an initial limited request and use the last-seen item from the response as the ``marker`` parameter value in a subsequent limited request. in: query required: false type: integer min_version: 2.35 keypair_marker: description: | The last-seen item. Use the ``limit`` parameter to make an initial limited request and use the last-seen item from the response as the ``marker`` parameter value in a subsequent limited request. in: query required: false type: string min_version: 2.35 keypair_user: in: query required: false type: string description: | This allows administrative users to operate key-pairs of specified user ID. min_version: 2.10 launch_index_query_server: description: | Filter the server list result by the sequence in which the servers were launched. This parameter is only valid when specified by administrators. If non-admin users specify this parameter, it is ignored. in: query required: false type: integer launched_at_query_server: description: | Filter the server list result by a date and time stamp when the instance was launched. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC. For example, ``2015-08-27T09:49:58-05:00``. If you omit the time zone, the UTC time zone is assumed. This parameter is restricted to administrators until microversion 2.83. If non-admin users specify this parameter on a microversion less than 2.83, it will be ignored. in: query required: false type: string limit: description: | Requests a page size of items. Returns a number of items up to a limit value. Use the ``limit`` parameter to make an initial limited request and use the ID of the last-seen item from the response as the ``marker`` parameter value in a subsequent limited request. in: query required: false type: integer limit_simple: description: | Used in conjunction with ``offset`` to return a slice of items. ``limit`` is the maximum number of items to return. If ``limit`` is not specified, or exceeds the configurable ``max_limit``, then ``max_limit`` will be used instead. in: query required: false type: integer locked_by_query_server: description: | Filter the server list result by who locked the server, possible value could be ``admin`` or ``owner``. This parameter is only valid when specified by administrators. If non-admin users specify this parameter, it is ignored. in: query required: false type: string locked_query_server: description: | Specify the ``locked`` query parameter to list all locked or unlocked instances. If the value is specified, ``1``, ``t``, ``true``, ``on``, ``y`` and ``yes`` are treated as ``True``. ``0``, ``f``, ``false``, ``off``, ``n`` and ``no`` are treated as ``False``. (They are case-insensitive.) Any other value provided will be considered invalid. in: query required: false type: boolean min_version: 2.73 marker: description: | The ID of the last-seen item. Use the ``limit`` parameter to make an initial limited request and use the ID of the last-seen item from the response as the ``marker`` parameter value in a subsequent limited request. in: query required: false type: string migration_hidden: description: | The 'hidden' setting of migration to filter. The 'hidden' flag is set if the value is 1. The 'hidden' flag is not set if the value is 0. But the 'hidden' setting of migration is always 0, so this parameter is useless to filter migrations. in: query required: false type: integer migration_host: description: | The source/destination compute node of migration to filter. in: query required: false type: string migration_instance_uuid: description: | The uuid of the instance that migration is operated on to filter. in: query required: false type: string migration_limit: description: | Requests a page size of items. Returns a number of items up to a limit value. Use the ``limit`` parameter to make an initial limited request and use the last-seen item from the response as the ``marker`` parameter value in a subsequent limited request. in: query required: false type: integer min_version: 2.59 migration_marker: description: | The UUID of the last-seen migration. Use the ``limit`` parameter to make an initial limited request and use the last-seen item from the response as the ``marker`` parameter value in a subsequent limited request. in: query required: false type: string min_version: 2.59 migration_source_compute: description: | The source compute node of migration to filter. in: query required: false type: string migration_status: description: | The status of migration to filter. in: query required: false type: string migration_type: description: | The type of migration to filter. Valid values are: * ``evacuation`` * ``live-migration`` * ``migration`` * ``resize`` in: query required: false type: string minDisk: description: | Filters the response by a minimum disk space, in GiB. For example, ``100``. in: query required: false type: integer minRam: description: | Filters the response by a minimum RAM, in MiB. For example, ``512``. in: query required: false type: integer node_query_server: description: | Filter the server list result by the node. This parameter is only valid when specified by administrators. If non-admin users specify this parameter, it is ignored. in: query required: false type: string not_tags_any_query: in: query required: false type: string description: | A list of tags to filter the server list by. Servers that don't match any tags in this list will be returned. Boolean expression in this case is 'NOT (t1 OR t2)'. Tags in query must be separated by comma. min_version: 2.26 not_tags_query: in: query required: false type: string description: | A list of tags to filter the server list by. Servers that don't match all tags in this list will be returned. Boolean expression in this case is 'NOT (t1 AND t2)'. Tags in query must be separated by comma. min_version: 2.26 offset_simple: description: | Used in conjunction with ``limit`` to return a slice of items. ``offset`` is where to start in the list. in: query required: false type: integer power_state_query_server: in: query required: false type: integer description: | Filter the server list result by server power state. Possible values are integer values that is mapped as:: 0: NOSTATE 1: RUNNING 3: PAUSED 4: SHUTDOWN 6: CRASHED 7: SUSPENDED This parameter is restricted to administrators until microversion 2.83. If non-admin users specify this parameter on a microversion less than 2.83, it will be ignored. progress_query_server: description: | Filter the server list result by the progress of the server. The value could be from 0 to 100 as integer. This parameter is restricted to administrators until microversion 2.83. If non-admin users specify this parameter on a microversion less than 2.83, it will be ignored. in: query required: false type: integer project_id_query_migrations: description: | Filter the migrations by the given project ID. in: query required: false type: string min_version: 2.80 project_id_query_server: description: | Filter the list of servers by the given project ID. This filter only works when the ``all_tenants`` filter is also specified. .. note:: 'tenant_id' can also be requested which is alias of 'project_id' but that is not recommended to use as that will be removed in future. in: query required: false type: string ramdisk_id_query_server: in: query required: false type: string description: | Filter the server list result by the UUID of the ramdisk image when using an AMI. This parameter is only valid when specified by administrators. If non-admin users specify this parameter, it is ignored. reservation_id_query: in: query required: false type: string description: | A reservation id as returned by a servers multiple create call. reserved_query: description: | Specify whether the result of resource total includes reserved resources or not. - ``0``: Not include reserved resources. - Other than 0: Include reserved resources. If non integer value is specified, it is the same as ``0``. in: query required: false type: integer server_name_query: description: | Filters the response by a server name, as a string. You can use regular expressions in the query. For example, the ``?name=bob`` regular expression returns both bob and bobb. If you must match on only bob, you can use a regular expression that matches the syntax of the underlying database server that is implemented for Compute, such as MySQL or PostgreSQL. .. note:: 'display_name' can also be requested which is alias of 'name' but that is not recommended to use as that will be removed in future. format: regexp in: query required: false type: string server_root_device_name_query: in: query required: false type: string description: | Filter the server list result by the root device name of the server. This parameter is only valid when specified by administrators. If non-admin users specify this parameter, it is ignored. server_status_query: description: | Filters the response by a server status, as a string. For example, ``ACTIVE``. Up to microversion 2.37, an empty list is returned if an invalid status is specified. Starting from microversion 2.38, a 400 error is returned in that case. in: query required: false type: string server_uuid_query: description: | Filter the server list result by the UUID of the server. This parameter is only valid when specified by administrators. If non-admin users specify this parameter, it is ignored. in: query required: false type: string soft_deleted_server: description: | Filter the server list by ``SOFT_DELETED`` status. This parameter is only valid when the ``deleted=True`` filter parameter is specified. in: query required: false type: boolean sort_dir_flavor: description: | Sort direction. A valid value is ``asc`` (ascending) or ``desc`` (descending). Default is ``asc``. You can specify multiple pairs of sort key and sort direction query parameters. If you omit the sort direction in a pair, the API uses the natural sorting direction of the flavor ``sort_key`` attribute. in: query required: false type: string sort_dir_server: description: | Sort direction. A valid value is ``asc`` (ascending) or ``desc`` (descending). Default is ``desc``. You can specify multiple pairs of sort key and sort direction query parameters. If you omit the sort direction in a pair, the API uses the natural sorting direction of the server ``sort_key`` attribute. in: query required: false type: string sort_key_flavor: description: | Sorts by a flavor attribute. Default attribute is ``flavorid``. You can specify multiple pairs of sort key and sort direction query parameters. If you omit the sort direction in a pair, the API uses the natural sorting direction of the flavor ``sort_key`` attribute. The sort keys are limited to: - ``created_at`` - ``description`` - ``disabled`` - ``ephemeral_gb`` - ``flavorid`` - ``id`` - ``is_public`` - ``memory_mb`` - ``name`` - ``root_gb`` - ``rxtx_factor`` - ``swap`` - ``updated_at`` - ``vcpu_weight`` - ``vcpus`` in: query required: false type: string sort_key_server: description: | Sorts by a server attribute. Default attribute is ``created_at``. You can specify multiple pairs of sort key and sort direction query parameters. If you omit the sort direction in a pair, the API uses the natural sorting direction of the server ``sort_key`` attribute. The sort keys are limited to: - ``access_ip_v4`` - ``access_ip_v6`` - ``auto_disk_config`` - ``availability_zone`` - ``config_drive`` - ``created_at`` - ``display_description`` - ``display_name`` - ``host`` - ``hostname`` - ``image_ref`` - ``instance_type_id`` - ``kernel_id`` - ``key_name`` - ``launch_index`` - ``launched_at`` - ``locked`` (New in version 2.73) - ``locked_by`` - ``node`` - ``power_state`` - ``progress`` - ``project_id`` - ``ramdisk_id`` - ``root_device_name`` - ``task_state`` - ``terminated_at`` - ``updated_at`` - ``user_id`` - ``uuid`` - ``vm_state`` ``host`` and ``node`` are only allowed for admin. If non-admin users specify them, a 403 error is returned. in: query required: false type: string start_simple_tenant_usage: description: | The beginning time to calculate usage statistics on compute and storage resources. The date and time stamp format is any of the following ones: :: CCYY-MM-DDThh:mm:ss For example, ``2015-08-27T09:49:58``. :: CCYY-MM-DDThh:mm:ss.NNNNNN For example, ``2015-08-27T09:49:58.123456``. :: CCYY-MM-DD hh:mm:ss.NNNNNN For example, ``2015-08-27 09:49:58.123456``. If you omit this parameter, the current time is used. in: query required: false type: string tags_any_query: in: query required: false type: string description: | A list of tags to filter the server list by. Servers that match any tag in this list will be returned. Boolean expression in this case is 't1 OR t2'. Tags in query must be separated by comma. min_version: 2.26 tags_query: in: query required: false type: string description: | A list of tags to filter the server list by. Servers that match all tags in this list will be returned. Boolean expression in this case is 't1 AND t2'. Tags in query must be separated by comma. min_version: 2.26 task_state_query_server: in: query required: false type: string description: | Filter the server list result by task state. This parameter is restricted to administrators until microversion 2.83. If non-admin users specify this parameter on a microversion less than 2.83, it will be ignored. tenant_id_query: description: | Specify the project ID (tenant ID) to show the rate and absolute limits. This parameter can be specified by admin only. in: query required: false type: string terminated_at_query_server: description: | Filter the server list result by a date and time stamp when instance was terminated. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC. For example, ``2015-08-27T09:49:58-05:00``. If you omit the time zone, the UTC time zone is assumed. This parameter is restricted to administrators until microversion 2.83. If non-admin users specify this parameter on a microversion less than 2.83, it will be ignored. in: query required: false type: string usage_limit: description: | Requests a page size of items. Calculate usage for the limited number of instances. Use the ``limit`` parameter to make an initial limited request and use the last-seen instance UUID from the response as the ``marker`` parameter value in a subsequent limited request. in: query required: false type: integer min_version: 2.40 usage_marker: description: | The last-seen item. Use the ``limit`` parameter to make an initial limited request and use the last-seen instance UUID from the response as the ``marker`` parameter value in a subsequent limited request. in: query required: false type: string min_version: 2.40 user_id_query_migrations: description: | Filter the migrations by the given user ID. in: query required: false type: string min_version: 2.80 user_id_query_quota: description: | ID of user to list the quotas for. in: query required: false type: string user_id_query_quota_delete: description: | ID of user to delete quotas for. in: query required: false type: string user_id_query_server: description: | Filter the list of servers by the given user ID. This parameter is restricted to administrators until microversion 2.83. If non-admin users specify this parameter on a microversion less than 2.83, it will be ignored. in: query required: false type: string user_id_query_set_quota: description: | ID of user to set the quotas for. in: query required: false type: string vm_state_query_server: description: | Filter the server list result by vm state. The value could be: - ``ACTIVE`` - ``BUILDING`` - ``DELETED`` - ``ERROR`` - ``PAUSED`` - ``RESCUED`` - ``RESIZED`` - ``SHELVED`` - ``SHELVED_OFFLOADED`` - ``SOFT_DELETED`` - ``STOPPED`` - ``SUSPENDED`` This parameter is restricted to administrators until microversion 2.83. If non-admin users specify this parameter on a microversion less than 2.83, it will be ignored. in: query required: false type: string # variables in body accessIPv4: in: body required: true type: string description: | IPv4 address that should be used to access this server. May be automatically set by the provider. accessIPv4_in: in: body required: false type: string description: | IPv4 address that should be used to access this server. accessIPv6: in: body required: true type: string description: | IPv6 address that should be used to access this server. May be automatically set by the provider. accessIPv6_in: in: body required: false type: string description: | IPv6 address that should be used to access this server. action: description: | The name of the action. in: body required: true type: string action_reserve: description: | The attribute to reserve an IP with a value of ``null``. in: body required: false type: string action_unreserve: description: | The attribute to release an IP with a value of ``null``. in: body required: false type: string addFixedIp: description: | The action to add a fixed ip address to a server. in: body required: true type: object addFloatingIp: description: | The action. Contains required floating IP ``address`` and optional ``fixed_address``. in: body required: true type: object address: description: | The floating IP address. in: body required: true type: string addresses: description: | The addresses for the server. Servers with status ``BUILD`` hide their addresses information. This view is not updated immediately. Please consult with OpenStack Networking API for up-to-date information. in: body required: true type: object addresses_obj: description: | The addresses information for the server. in: body required: true type: object addSecurityGroup: description: | The action to add a security group to a server. in: body required: true type: object addTenantAccess: description: | The action. in: body required: true type: string adminPass_change_password: description: | The administrative password for the server. in: body required: true type: string adminPass_evacuate: description: | An administrative password to access the evacuated instance. If you set ``enable_instance_password`` configuration option to ``False``, the API wouldn't return the ``adminPass`` field in response. in: body required: false type: string max_version: 2.13 adminPass_evacuate_request: description: | An administrative password to access the evacuated server. If you omit this parameter, the operation generates a new password. Up to API version 2.13, if ``onSharedStorage`` is set to ``True`` and this parameter is specified, an error is raised. in: body required: false type: string adminPass_request: description: | The administrative password of the server. If you omit this parameter, the operation generates a new password. in: body required: false type: string adminPass_rescue_request: description: | The password for the rescued instance. If you omit this parameter, the operation generates a new password. in: body required: false type: string adminPass_response: description: | The administrative password for the server. If you set ``enable_instance_password`` configuration option to ``False``, the API wouldn't return the ``adminPass`` field in response. in: body required: false type: string agent: description: | The guest agent object. in: body required: true type: object agent_id: description: | The agent ID. in: body required: true type: integer agent_id_str: description: | The agent ID. (This is a bug of API, this should be integer type which is consistent with the responses of agent create and list. This will be fixed in later microversion.) in: body required: true type: string agents: description: | A list of guest agent objects. in: body required: true type: array aggregate: description: | The host aggregate object. in: body required: true type: object aggregate_add_host: description: | The add_host object used to add host to aggregate. in: body required: true type: object aggregate_az: description: | The availability zone of the host aggregate. in: body required: true type: string aggregate_az_optional_create: description: | The availability zone of the host aggregate. You should use a custom availability zone rather than the default returned by the os-availability-zone API. The availability zone must not include ':' in its name. in: body required: false type: string aggregate_az_optional_update: description: | The availability zone of the host aggregate. You should use a custom availability zone rather than the default returned by the os-availability-zone API. The availability zone must not include ':' in its name. .. warning:: You should not change or unset the availability zone of an aggregate when that aggregate has hosts which contain servers in it since that may impact the ability for those servers to move to another host. in: body required: false type: string aggregate_host_list: description: | A list of host ids in this aggregate. in: body required: true type: array aggregate_id_body: description: | The ID of the host aggregate. in: body required: true type: integer aggregate_metadata_request: description: | Metadata key and value pairs associated with the aggregate. The maximum size for each metadata key and value pair is 255 bytes. New keys will be added to existing aggregate metadata. For existing keys, if the value is ``null`` the entry is removed, otherwise the value is updated. Note that the special ``availability_zone`` metadata entry cannot be unset to ``null``. .. warning:: You should not change the availability zone of an aggregate when that aggregate has hosts which contain servers in it since that may impact the ability for those servers to move to another host. in: body required: true type: object aggregate_metadata_response: description: | Metadata key and value pairs associated with the aggregate. in: body required: true type: object aggregate_name: description: | The name of the host aggregate. in: body required: true type: string aggregate_name_optional: description: | The name of the host aggregate. in: body required: false type: string aggregate_remove_host: description: | The add_host object used to remove host from aggregate. in: body required: true type: object aggregate_uuid: description: | The UUID of the host aggregate. in: body required: true type: string min_version: 2.41 aggregates: description: | The list of existing aggregates. in: body required: true type: array alias: description: | A short name by which this extension is also known. in: body required: true type: string alive: description: | Returns true if the instance is alive. in: body required: true type: boolean architecture: description: | The name of the cpu architecture. in: body required: true type: string associate_host: description: | The name of the host to associate. in: body required: true type: string attachment_bdm_id_resp: description: | The UUID of the block device mapping record in Nova for the attachment. in: body required: true type: string min_version: 2.89 attachment_device_put_req: description: | Name of the device in the attachment object, such as, ``/dev/vdb``. in: body required: false type: string min_version: 2.85 attachment_device_resp: description: | Name of the device in the attachment object, such as, ``/dev/vdb``. in: body required: false type: string attachment_id_put_req: description: | The UUID of the attachment. in: body required: false type: string min_version: 2.85 attachment_id_resp: description: | The UUID of the attachment. in: body required: false type: string attachment_server_id_put_req: description: | The UUID of the server. in: body required: false type: string min_version: 2.85 attachment_server_id_resp: description: | The UUID of the server. in: body required: false type: string attachment_volume_id_resp: description: | The UUID of the associated volume attachment in Cinder. in: body required: true type: string min_version: 2.89 attachment_volumeId_resp: description: | The UUID of the attached volume. in: body required: false type: string availability_zone: description: | The availability zone. in: body required: false type: string availability_zone_info: description: | The list of availability zone information. in: body required: true type: array availability_zone_state: description: | The current state of the availability zone. in: body required: true type: object availability_zone_unshelve: description: | The availability zone name. Specifying an availability zone is only allowed when the server status is ``SHELVED_OFFLOADED`` otherwise HTTP 409 conflict response is returned. Since microversion 2.91 ``"availability_zone":null`` allows unpinning the instance from any availability_zone it is pinned to. in: body required: false type: string min_version: 2.77 available: description: | Returns true if the availability zone is available. in: body required: true type: boolean backup_name: description: | The name of the image to be backed up. in: body required: true type: string backup_rotation: description: | The rotation of the back up image, the oldest image will be removed when image count exceed the rotation count. in: body required: true type: integer backup_type: description: | The type of the backup, for example, ``daily``. in: body required: true type: string baremetal_cpus: description: | Number of CPUs the node has. .. note:: This is a JSON string, even though it will look like an int value. in: body required: true type: string baremetal_disk: description: | Amount of disk in GiB the node has. .. note:: This is a JSON string, even though it will look like an int value. in: body required: true type: string baremetal_host: description: | This will always have the value ``IRONIC MANAGED``. in: body required: true type: string baremetal_id: description: | UUID of the baremetal node. in: body required: true type: string baremetal_instance_uuid: description: | UUID of the server instance on this node. in: body required: true type: string baremetal_interfaces: description: | A list of interface objects for active interfaces on the baremetal node. Each will have an ``address`` field with the address. in: body required: true type: array baremetal_mem: description: | Amount of memory in MiB the node has. .. note:: This is a JSON string, even though it will look like an int value. in: body required: true type: string baremetal_node: description: | A baremetal node object. in: body required: true type: object baremetal_nodes: description: | An array of baremetal node objects. in: body required: true type: array baremetal_taskstate: description: | The Ironic task state for the node. See Ironic project for more details. in: body required: true type: string binary: description: | The binary name of the service. in: body required: true type: string block_device_mapping_v2: description: | Enables fine grained control of the block device mapping for an instance. This is typically used for booting servers from volumes. An example format would look as follows: .. code-block:: javascript "block_device_mapping_v2": [{ "boot_index": "0", "uuid": "ac408821-c95a-448f-9292-73986c790911", "source_type": "image", "volume_size": "25", "destination_type": "volume", "delete_on_termination": true, "tag": "disk1", "disk_bus": "scsi"}] In microversion 2.32, ``tag`` is an optional string attribute that can be used to assign a tag to the block device. This tag is then exposed to the guest in the metadata API and the config drive and is associated to hardware metadata for that block device, such as bus (ex: SCSI), bus address (ex: 1:0:2:0), and serial. A bug has caused the ``tag`` attribute to no longer be accepted starting with version 2.33. It has been restored in version 2.42. in: body required: false type: array block_device_uuid: description: | This is the uuid of source resource. The uuid points to different resources based on the ``source_type``. For example, if ``source_type`` is ``image``, the block device is created based on the specified image which is retrieved from the image service. Similarly, if ``source_type`` is ``snapshot`` then the uuid refers to a volume snapshot in the block storage service. If ``source_type`` is ``volume`` then the uuid refers to a volume in the block storage service. in: body required: false type: string block_migration: description: | Set to ``True`` to migrate local disks by using block migration. If the source or destination host uses shared storage and you set this value to ``True``, the live migration fails. in: body required: true type: boolean max_version: 2.24 block_migration_2_25: description: | Migrates local disks by using block migration. Set to ``auto`` which means nova will detect whether source and destination hosts on shared storage. if they are on shared storage, the live-migration won't be block migration. Otherwise the block migration will be executed. Set to ``True``, means the request will fail when the source or destination host uses shared storage. Set to ``False`` means the request will fail when the source and destination hosts are not on the shared storage. in: body required: true type: string min_version: 2.25 boot_index: description: | Defines the order in which a hypervisor tries devices when it attempts to boot the guest from storage. Give each device a unique boot index starting from ``0``. To disable a device from booting, set the boot index to a negative value or use the default boot index value, which is ``None``. The simplest usage is, set the boot index of the boot device to ``0`` and use the default boot index value, ``None``, for any other devices. Some hypervisors might not support booting from multiple devices; these hypervisors consider only the device with a boot index of ``0``. Some hypervisors support booting from multiple devices but only if the devices are of different types. For example, a disk and CD-ROM. in: body required: true type: integer cache: description: A list of image objects to cache. in: body required: true type: array certificate: description: | The certificate object. in: body required: true type: object changePassword: description: | The action to change an administrative password of the server. in: body required: true type: object cidr: description: | The CIDR for address range. in: body required: true type: string cloudpipe: description: | The cloudpipe object. in: body required: true type: object cloudpipes: description: | The list of cloudpipe objects. in: body required: true type: array code: description: | The HTTP response code for the event. The following codes are currently used: * 200 - successfully submitted event * 400 - the request is missing required parameter * 404 - the instance specified by ``server_uuid`` was not found * 422 - no host was found for the server specified by ``server_uuid``, so there is no route to this server. in: body required: true type: string config_drive: description: | Indicates whether a config drive enables metadata injection. The config_drive setting provides information about a drive that the instance can mount at boot time. The instance reads files from the drive to get information that is normally available through the metadata service. This metadata is different from the user data. Not all cloud providers enable the ``config_drive``. Read more in the `OpenStack End User Guide `_. in: body required: false type: boolean config_drive_diagnostics: description: | Indicates whether or not a config drive was used for this server. in: body required: true type: boolean min_version: 2.48 config_drive_resp: description: | Indicates whether or not a config drive was used for this server. The value is ``True`` or an empty string. An empty string stands for ``False``. in: body required: true type: string config_drive_resp_update_rebuild: description: | Indicates whether or not a config drive was used for this server. The value is ``True`` or an empty string. An empty string stands for ``False``. in: body required: true type: string min_version: 2.75 configure_project_cloudpipe: description: | VPN IP and Port information to configure the cloudpipe instance.. in: body required: true type: object confirmResize: description: | The action to confirm a resize operation. in: body required: true type: none console: description: | The console object. in: body required: true type: object console_host: description: | The name or ID of the host. in: body required: false type: string console_id_in_body: description: | The UUID of the console. in: body required: true type: string console_output: description: | The console output as a string. Control characters will be escaped to create a valid JSON string. in: body required: true type: string console_password: description: | The password for the console. in: body required: true type: string console_type: description: | The type of the console. in: body required: true type: string consoles: description: | The list of console objects. in: body required: true type: array contents: description: | The file contents field in the personality object. in: body required: true type: string max_version: 2.56 cores: &cores description: | The number of allowed server cores for each tenant. in: body required: true type: integer cores_quota_class: &cores_quota_class <<: *cores description: | The number of allowed server cores for the quota class. cores_quota_class_optional: <<: *cores_quota_class required: false cores_quota_details: description: | The object of detailed cores quota, including in_use, limit and reserved number of cores. in: body required: true type: object cores_quota_optional: description: | The number of allowed server cores for each tenant. in: body required: false type: integer cpu_details_diagnostics: description: | The list of dictionaries with detailed information about VM CPUs. Following fields are presented in each dictionary: - ``id`` - the ID of CPU (Integer) - ``time`` - CPU Time in nano seconds (Integer) - ``utilisation`` - CPU utilisation in percents (Integer) in: body required: true type: array min_version: 2.48 cpu_info: description: | A dictionary that contains cpu information like ``arch``, ``model``, ``vendor``, ``features`` and ``topology``. The content of this field is hypervisor specific. .. note:: Since version 2.28 ``cpu_info`` field is returned as a dictionary instead of string. in: body required: true type: object max_version: 2.87 create_info: description: | Information for snapshot creation. in: body required: true type: object create_info_id: description: | Its an arbitrary string that gets passed back to the user. in: body required: false type: string create_info_id_resp: description: | Its the same arbitrary string which was sent in request body. .. note:: This string is passed back to user as it is and not being used in Nova internally. So use ``snapshot_id`` instead for further operation on this snapshot. in: body required: true type: string createBackup: description: | The action. in: body required: true type: object created: description: | The date and time when the resource was created. The date and time stamp format is `ISO 8601 `_ :: CCYY-MM-DDThh:mm:ss±hh:mm For example, ``2015-08-27T09:49:58-05:00``. The ``±hh:mm`` value, if included, is the time zone as an offset from UTC. In the previous example, the offset value is ``-05:00``. in: body required: true type: string createImage: description: | The action to create a snapshot of the image or the volume(s) of the server. in: body required: true type: object current_workload: description: | The current_workload is the number of tasks the hypervisor is responsible for. This will be equal or greater than the number of active VMs on the system (it can be greater when VMs are being deleted and the hypervisor is still cleaning up). in: body required: true type: integer max_version: 2.87 current_workload_total: description: | The current_workload is the number of tasks the hypervisors are responsible for. This will be equal or greater than the number of active VMs on the systems (it can be greater when VMs are being deleted and a hypervisor is still cleaning up). in: body required: true type: integer data: description: | The certificate. in: body required: true type: string delete_on_termination: description: | To delete the boot volume when the server is destroyed, specify ``true``. Otherwise, specify ``false``. Default: ``false`` in: body required: false type: boolean delete_on_termination_attachments_req: description: | To delete the attached volume when the server is destroyed, specify ``true``. Otherwise, specify ``false``. Default: ``false`` in: body required: false type: boolean min_version: 2.79 delete_on_termination_attachments_resp: description: | A flag indicating if the attached volume will be deleted when the server is deleted. in: body required: true type: boolean min_version: 2.79 delete_on_termination_put_req: description: | A flag indicating if the attached volume will be deleted when the server is deleted. in: body required: false type: boolean min_version: 2.85 deleted: description: | A boolean indicates whether this aggregate is deleted or not, if it has not been deleted, ``false`` will appear. in: body required: true type: boolean deleted_at: description: | The date and time when the resource was deleted. If the resource has not been deleted yet, this field will be ``null``, The date and time stamp format is `ISO 8601 `_ :: CCYY-MM-DDThh:mm:ss±hh:mm For example, ``2015-08-27T09:49:58-05:00``. The ``±hh:mm`` value, if included, is the time zone as an offset from UTC. In the previous example, the offset value is ``-05:00``. in: body required: true type: string description: description: | Security group description. in: body required: true type: string destination_type: description: | Defines where the block device mapping will reside. Valid values are: * ``local``: The ephemeral disk resides local to the compute host on which the server runs * ``volume``: The persistent volume is stored in the block storage service in: body required: false type: string device: description: | Name of the device such as, ``/dev/vdb``. Omit or set this parameter to null for auto-assignment, if supported. If you specify this parameter, the device must not exist in the guest operating system. Note that as of the 12.0.0 Liberty release, the Nova libvirt driver no longer honors a user-supplied device name. This is the same behavior as if the device name parameter is not supplied on the request. in: body required: false type: string device_name: description: | A path to the device for the volume that you want to use to boot the server. Note that as of the 12.0.0 Liberty release, the Nova libvirt driver no longer honors a user-supplied device name. This is the same behavior as if the device name parameter is not supplied on the request. in: body required: false type: string device_resp: description: | Name of the device such as, ``/dev/vdb``. in: body required: true type: string device_tag_bdm: description: | A device role tag that can be applied to a block device. The guest OS of a server that has devices tagged in this manner can access hardware metadata about the tagged devices from the metadata API and on the config drive, if enabled. .. note:: Due to a bug, block device tags are accepted in version 2.32 and subsequently starting with version 2.42. in: body required: false type: string min_version: 2.32 device_tag_bdm_attachment: description: | A device role tag that can be applied to a volume when attaching it to the VM. The guest OS of a server that has devices tagged in this manner can access hardware metadata about the tagged devices from the metadata API and on the config drive, if enabled. .. note:: Tagged volume attachment is not supported for shelved-offloaded instances. in: body required: false type: string min_version: 2.49 device_tag_bdm_attachment_put_req: description: | The device tag applied to the volume block device or ``null``. in: body required: true type: string min_version: 2.85 device_tag_bdm_attachment_resp: description: | The device tag applied to the volume block device or ``null``. in: body required: true type: string min_version: 2.70 device_tag_nic: description: | A device role tag that can be applied to a network interface. The guest OS of a server that has devices tagged in this manner can access hardware metadata about the tagged devices from the metadata API and on the config drive, if enabled. .. note:: Due to a bug, network interface tags are accepted between 2.32 and 2.36 inclusively, and subsequently starting with version 2.42. in: body required: false type: string min_version: 2.32 device_tag_nic_attachment: description: | A device role tag that can be applied to a network interface when attaching it to the VM. The guest OS of a server that has devices tagged in this manner can access hardware metadata about the tagged devices from the metadata API and on the config drive, if enabled. in: body required: false type: string min_version: 2.49 device_tag_nic_attachment_resp: description: | The device tag applied to the virtual network interface or ``null``. in: body required: true type: string min_version: 2.70 device_type: description: | The device type. For example, ``disk``, ``cdrom``. in: body required: false type: string device_volume_type: description: | The device ``volume_type``. This can be used to specify the type of volume which the compute service will create and attach to the server. If not specified, the block storage service will provide a default volume type. See the `block storage volume types API `_ for more details. There are some restrictions on ``volume_type``: - It can be a volume type ID or name. - It is only supported with ``source_type`` of ``blank``, ``image`` or ``snapshot``. - It is only supported with ``destination_type`` of ``volume``. in: body required: false type: string min_version: 2.67 # Optional input parameter in the body for PUT /os-services/{service_id} added # in microversion 2.53. disabled_reason_2_53_in: description: | The reason for disabling a service. The minimum length is 1 and the maximum length is 255. This may only be requested with ``status=disabled``. in: body required: false type: string disabled_reason_body: description: | The reason for disabling a service. in: body required: true type: string disk_available_least: description: | The actual free disk on this hypervisor(in GiB). If allocation ratios used for overcommit are configured, this may be negative. This is intentional as it provides insight into the amount by which the disk is overcommitted. in: body required: true type: integer max_version: 2.87 disk_available_least_total: description: | The actual free disk on all hypervisors(in GiB). If allocation ratios used for overcommit are configured, this may be negative. This is intentional as it provides insight into the amount by which the disk is overcommitted. in: body required: true type: integer disk_bus: description: | Disk bus type, some hypervisors (currently only libvirt) support specify this parameter. Some example disk_bus values can be: ``fdc``, ``ide``, ``sata``, ``scsi``, ``usb``, ``virtio``, ``xen``, ``lxc`` and ``uml``. Support for each bus type depends on the virtualization driver and underlying hypervisor. in: body required: false type: string disk_config: description: | Disk configuration. The value is either: - ``AUTO``. The API builds the server with a single partition the size of the target flavor disk. The API automatically adjusts the file system to fit the entire partition. - ``MANUAL``. The API builds the server by using the partition scheme and file system that is in the source image. If the target flavor disk is larger, The API does not partition the remaining disk space. in: body required: true type: string disk_details_diagnostics: description: | The list of dictionaries with detailed information about VM disks. Following fields are presented in each dictionary: - ``read_bytes`` - Disk reads in bytes (Integer) - ``read_requests`` - Read requests (Integer) - ``write_bytes`` - Disk writes in bytes (Integer) - ``write_requests`` - Write requests (Integer) - ``errors_count`` - Disk errors (Integer) in: body required: true type: array min_version: 2.48 disk_over_commit: description: | Set to ``True`` to enable over commit when the destination host is checked for available disk space. Set to ``False`` to disable over commit. This setting affects only the libvirt virt driver. in: body required: true type: boolean max_version: 2.25 display_description: description: | The volume description. in: body required: true type: string display_description_optional: description: | The volume description. in: body required: false type: string display_name: description: | The volume name. in: body required: true type: string display_name_optional: description: | The volume name. in: body required: false type: string driver_diagnostics: description: | The driver on which the VM is running. Possible values are: - ``libvirt`` - ``xenapi`` - ``vmwareapi`` - ``ironic`` in: body required: true type: string min_version: 2.48 ended_at: description: | The date and time when the server was deleted. The date and time stamp format is as follows: :: CCYY-MM-DDThh:mm:ss.NNNNNN For example, ``2015-08-27T09:49:58.123456``. If the server hasn't been deleted yet, its value is ``null``. in: body required: true type: string ended_at_optional: description: | The date and time when the server was deleted. The date and time stamp format is as follows: :: CCYY-MM-DDThh:mm:ss.NNNNNN For example, ``2015-08-27T09:49:58.123456``. If the server hasn't been deleted yet, its value is ``null``. in: body required: false type: string errors: description: | The number of errors. in: body required: true type: integer evacuate: description: | The action to evacuate a server to another host. in: body required: true type: object event: description: | The name of the event. in: body required: true type: string event_details: min_version: 2.84 description: | Details of the event. May be ``null``. in: body required: true type: string event_finish_time: description: | The date and time when the event was finished. The date and time stamp format is `ISO 8601 `_ :: CCYY-MM-DDThh:mm:ss±hh:mm For example, ``2015-08-27T09:49:58-05:00``. The ``±hh:mm`` value, if included, is the time zone as an offset from UTC. In the previous example, the offset value is ``-05:00``. in: body required: true type: string event_host: min_version: 2.62 description: | The name of the host on which the event occurred. Policy defaults enable only users with the administrative role to see an instance action event host. Cloud providers can change these permissions through the ``policy.json`` file. in: body required: false type: string event_hostId: min_version: 2.62 description: | An obfuscated hashed host ID string, or the empty string if there is no host for the event. This is a hashed value so will not actually look like a hostname, and is hashed with data from the project_id, so the same physical host as seen by two different project_ids will be different. This is useful when within the same project you need to determine if two events occurred on the same or different physical hosts. in: body required: true type: string event_name: description: | The event name. A valid value is: - ``network-changed`` - ``network-vif-plugged`` - ``network-vif-unplugged`` - ``network-vif-deleted`` - ``volume-extended`` (since microversion ``2.51``) - ``power-update`` (since microversion ``2.76``) - ``accelerator-request-bound`` (since microversion ``2.82``) - ``volume-reimaged`` (since microversion ``2.93``) in: body required: true type: string event_result: description: | The result of the event. in: body required: true type: string event_start_time: description: | The date and time when the event was started. The date and time stamp format is `ISO 8601 `_ :: CCYY-MM-DDThh:mm:ss±hh:mm For example, ``2015-08-27T09:49:58-05:00``. The ``±hh:mm`` value, if included, is the time zone as an offset from UTC. In the previous example, the offset value is ``-05:00``. in: body required: true type: string event_status: description: | The event status. A valid value is ``failed``, ``completed``, or ``in-progress``. Default is ``completed``. in: body required: false type: string event_tag: description: | A string value that identifies the event. Certain types of events require specific tags: - For the ``accelerator-request-bound`` event, the tag must be the accelerator request UUID. - For the ``power-update`` event the tag must be either be ``POWER_ON`` or ``POWER_OFF``. - For the ``volume-extended`` event the tag must be the volume id. in: body required: false type: string event_traceback: description: | The traceback stack if an error occurred in this event. Policy defaults enable only users with the administrative role to see an instance action event traceback. Cloud providers can change these permissions through the ``policy.json`` file. in: body required: true type: string events: description: | List of external events to process. in: body required: true type: array extension: description: | An ``extension`` object. in: body required: true type: object extension_description: description: | Text describing this extension's purpose. in: body required: true type: string extension_links: description: | Links pertaining to this extension. This is a list of dictionaries, each including keys ``href`` and ``rel``. in: body required: true type: array extension_name: description: | Name of the extension. in: body required: true type: string extensions: description: | List of ``extension`` objects. in: body required: true type: array extra_specs: description: | A dictionary of the flavor's extra-specs key-and-value pairs. It appears in the os-extra-specs' "create" REQUEST body, as well as the os-extra-specs' "create" and "list" RESPONSE body. in: body required: true type: object extra_specs_2_47: min_version: 2.47 description: | A dictionary of the flavor's extra-specs key-and-value pairs. This will only be included if the user is allowed by policy to index flavor extra_specs. in: body required: false type: object extra_specs_2_61: min_version: 2.61 description: | A dictionary of the flavor's extra-specs key-and-value pairs. This will only be included if the user is allowed by policy to index flavor extra_specs. in: body required: false type: object fault: description: | A fault object. Only displayed when the server status is ``ERROR`` or ``DELETED`` and a fault occurred. in: body required: false type: object fault_code: description: | The error response code. in: body required: true type: integer fault_created: description: | The date and time when the exception was raised. The date and time stamp format is `ISO 8601 `_ :: CCYY-MM-DDThh:mm:ss±hh:mm For example, ``2015-08-27T09:49:58-05:00``. The ``±hh:mm`` value, if included, is the time zone as an offset from UTC. In the previous example, the offset value is ``-05:00``. in: body required: true type: string fault_details: description: | The stack trace. It is available if the response code is not 500 or you have the administrator privilege in: body required: false type: string fault_message: description: | The error message. in: body required: true type: string fixed_address: description: | The fixed IP address with which you want to associate the floating IP address. in: body required: false type: string fixed_ip: description: | A fixed IPv4 address for the NIC. Valid with a ``neutron`` or ``nova-networks`` network. in: body required: false type: string fixed_ip_address: description: | Fixed IP associated with floating IP network. in: body required: true type: string fixed_ip_host: description: | The hostname of the host that manages the server that is associated with this fixed IP address. in: body required: true type: string fixed_ip_hostname: description: | The hostname of the server that is associated with this fixed IP address. in: body required: true type: string fixed_ip_obj: description: | A fixed IP address object. in: body required: true type: object fixed_ips: description: | Fixed IP addresses. If you request a specific fixed IP address without a ``net_id``, the request returns a ``Bad Request (400)`` response code. in: body required: false type: array fixed_ips_quota: description: | The number of allowed fixed IP addresses for each tenant. Must be equal to or greater than the number of allowed servers. in: body required: true type: integer max_version: 2.35 fixed_ips_quota_class: &fixed_ips_quota_class description: | The number of allowed fixed IP addresses for the quota class. Must be equal to or greater than the number of allowed servers. in: body required: true type: integer max_version: 2.49 fixed_ips_quota_class_optional: <<: *fixed_ips_quota_class required: false fixed_ips_quota_details: description: | The object of detailed fixed ips quota, including in_use, limit and reserved number of fixed ips. in: body required: true type: object max_version: 2.35 fixed_ips_quota_optional: description: | The number of allowed fixed IP addresses for each tenant. Must be equal to or greater than the number of allowed servers. in: body required: false type: integer max_version: 2.35 fixed_ips_resp: description: | Fixed IP addresses with subnet IDs. in: body required: true type: array flavor: description: | The ID and links for the flavor for your server instance. A flavor is a combination of memory, disk size, and CPUs. in: body required: true type: object flavor_access: description: | A list of objects, each with the keys ``flavor_id`` and ``tenant_id``. in: body required: true type: array flavor_cpus: in: body required: true type: integer description: | The number of virtual CPUs that will be allocated to the server. flavor_cpus_2_47: min_version: 2.47 in: body required: true type: integer description: | The number of virtual CPUs that were allocated to the server. flavor_description: type: string in: body required: false min_version: 2.55 description: | A free form description of the flavor. Limited to 65535 characters in length. Only printable characters are allowed. flavor_description_required: type: string in: body required: true description: | A free form description of the flavor. Limited to 65535 characters in length. Only printable characters are allowed. flavor_description_resp: description: | The description of the flavor. in: body required: true type: string min_version: 2.55 flavor_description_resp_no_min: description: | The description of the flavor. in: body required: true type: string flavor_disabled: in: body required: false type: boolean description: | Whether or not the flavor has been administratively disabled. This is an artifact of the legacy v2 API and will always be set to ``false``. There is currently no way to disable a flavor and set this to ``true``. flavor_disk: in: body required: true type: integer description: | The size of the root disk that will be created in GiB. If 0 the root disk will be set to exactly the size of the image used to deploy the instance. However, in this case the scheduler cannot select the compute host based on the virtual image size. Therefore, 0 should only be used for volume booted instances or for testing purposes. Volume-backed instances can be enforced for flavors with zero root disk via the ``os_compute_api:servers:create:zero_disk_flavor`` policy rule. flavor_disk_2_47: min_version: 2.47 in: body required: true type: integer description: | The size of the root disk that was created in GiB. flavor_ephem_disk: in: body required: true type: integer description: | The size of the ephemeral disk that will be created, in GiB. Ephemeral disks may be written over on server state changes. So should only be used as a scratch space for applications that are aware of its limitations. Defaults to 0. flavor_ephem_disk_2_47: min_version: 2.47 in: body required: true type: integer description: | The size of the ephemeral disk that was created, in GiB. flavor_ephem_disk_in: in: body required: false type: integer description: | The size of the ephemeral disk that will be created, in GiB. Ephemeral disks may be written over on server state changes. So should only be used as a scratch space for applications that are aware of its limitations. Defaults to 0. flavor_extra_spec_key2: description: | The extra spec key of a flavor. It appears in the os-extra-specs' "create" and "update" REQUEST body, as well as the os-extra-specs' "create", "list", "show", and "update" RESPONSE body. in: body required: true type: string flavor_extra_spec_key_2_47: description: | The extra spec key of a flavor. in: body required: true type: string min_version: 2.47 flavor_extra_spec_value: description: | The extra spec value of a flavor. It appears in the os-extra-specs' "create" and "update" REQUEST body, as well as the os-extra-specs' "create", "list", "show", and "update" RESPONSE body. in: body required: true type: string flavor_extra_spec_value_2_47: description: | The extra spec value of a flavor. in: body required: true type: string min_version: 2.47 flavor_id_body: description: | The ID of the flavor. While people often make this look like an int, this is really a string. in: body required: true type: string flavor_id_body_2_46: description: | The ID of the flavor. While people often make this look like an int, this is really a string. in: body required: true type: string max_version: 2.46 flavor_id_body_create: description: | Only alphanumeric characters with hyphen '-', underscore '_', spaces and dots '.' are permitted. If an ID is not provided, then a default UUID will be assigned. in: body required: false type: string flavor_is_public: description: | Whether the flavor is public (available to all projects) or scoped to a set of projects. Default is True if not specified. in: body required: true type: boolean flavor_is_public_in: description: | Whether the flavor is public (available to all projects) or scoped to a set of projects. Default is True if not specified. in: body required: false type: boolean flavor_links_2_46: description: | Links to the flavor resource. See `API Guide / Links and References `_ for more info. in: body required: true type: array max_version: 2.46 flavor_name: description: | The display name of a flavor. in: body required: true type: string flavor_name_optional: description: | The display name of a flavor. in: body required: false type: string flavor_original_name: description: | The display name of a flavor. in: body required: true type: string min_version: 2.47 flavor_ram: description: | The amount of RAM a flavor has, in MiB. in: body required: true type: integer flavor_ram_2_47: description: | The amount of RAM a flavor has, in MiB. in: body required: true type: integer min_version: 2.47 flavor_rxtx_factor: description: | The receive / transmit factor (as a float) that will be set on ports if the network backend supports the QOS extension. Otherwise it will be ignored. It defaults to 1.0. in: body required: true type: float flavor_rxtx_factor_in: description: | The receive / transmit factor (as a float) that will be set on ports if the network backend supports the QOS extension. Otherwise it will be ignored. It defaults to 1.0. in: body required: false type: float flavor_server: description: | Before microversion 2.47 this contains the ID and links for the flavor used to boot the server instance. This can be an empty object in case flavor information is no longer present in the system. As of microversion 2.47 this contains a subset of the actual flavor information used to create the server instance, represented as a nested dictionary. in: body required: true type: object flavor_swap: description: | The size of a dedicated swap disk that will be allocated, in MiB. If 0 (the default), no dedicated swap disk will be created. Currently, the empty string ('') is used to represent 0. As of microversion 2.75 default return value of swap is 0 instead of empty string. in: body required: true type: integer flavor_swap_2_47: description: | The size of a dedicated swap disk that was allocated, in MiB. in: body required: true type: integer min_version: 2.47 flavor_swap_in: description: | The size of a dedicated swap disk that will be allocated, in MiB. If 0 (the default), no dedicated swap disk will be created. in: body required: false type: integer flavorRef: description: | The flavor reference, as an ID (including a UUID) or full URL, for the flavor for your server instance. in: body required: true type: string flavorRef_resize: description: | The flavor ID for resizing the server. The size of the disk in the flavor being resized to must be greater than or equal to the size of the disk in the current flavor. If a specified flavor ID is the same as the current one of the server, the request returns a ``Bad Request (400)`` response code. in: body required: true type: string flavors: description: | An array of flavor objects. in: body required: true type: array floating_ip: description: | The floating ip address. in: body required: true type: string floating_ip_bulk_object: description: | The floating ip bulk address object. in: body required: true type: object floating_ip_id_value: description: | The floating IP id value. .. note:: For nova-network, the value will be of type integer, whereas for neutron, the value will be of type string. in: body required: true type: string floating_ip_obj: description: | A floating IP address object. in: body required: true type: object floating_ip_pool_name: description: | The name of the floating IP pool. in: body required: true type: string floating_ip_pool_name_optional: description: | The name of the floating IP pool in: body required: false type: string floating_ip_pool_name_or_id: description: | The name or ID of the floating IP pool. in: body required: true type: string floating_ip_pools: description: | The ``floating_ip_pools`` object. in: body required: true type: array floating_ips: description: | The number of allowed floating IP addresses for each tenant. in: body required: true type: integer max_version: 2.35 floating_ips_list: description: | An array of floating ip objects. in: body required: true type: array floating_ips_quota_class: &floating_ips_quota_class description: | The number of allowed floating IP addresses for the quota class. in: body required: true type: integer max_version: 2.49 floating_ips_quota_class_optional: <<: *floating_ips_quota_class required: false floating_ips_quota_details: description: | The object of detailed floating ips quota, including in_use, limit and reserved number of floating ips. in: body required: true type: object max_version: 2.35 floating_ips_quota_optional: description: | The number of allowed floating IP addresses for each tenant. in: body required: false type: integer max_version: 2.35 force: description: | You can force the update even if the quota has already been used and the reserved quota exceeds the new quota. To force the update, specify the ``"force": "True"``. Default is ``False``. in: body required: false type: boolean force_evacuate: description: | Force an evacuation by not verifying the provided destination host by the scheduler. .. warning:: This could result in failures to actually evacuate the instance to the specified host. It is recommended to either not specify a host so that the scheduler will pick one, or specify a host without ``force=True`` set. Furthermore, this should not be specified when evacuating instances managed by a clustered hypervisor driver like ironic since you cannot specify a node, so the compute service will pick a node randomly which may not be able to accommodate the instance. in: body required: false type: boolean min_version: 2.29 max_version: 2.67 force_live_migrate: description: | Force a live-migration by not verifying the provided destination host by the scheduler. .. warning:: This could result in failures to actually live migrate the instance to the specified host. It is recommended to either not specify a host so that the scheduler will pick one, or specify a host without ``force=True`` set. in: body required: false type: boolean min_version: 2.30 max_version: 2.67 force_migration_complete: description: | The action to force an in-progress live migration to complete. in: body required: true type: none force_snapshot: description: | Indicates whether to create a snapshot, even if the volume is attached. in: body required: false type: boolean # This is both the request and response parameter for # PUT /os-services/force-down which was added in 2.11. forced_down_2_11: description: | Whether or not this service was forced down manually by an administrator after the service was fenced. This value is useful to know that some 3rd party has verified the service should be marked down. in: body required: true type: boolean min_version: 2.11 # This is the optional request input parameter for # PUT /os-services/{service_id} added in 2.53. forced_down_2_53_in: description: | ``forced_down`` is a manual override to tell nova that the service in question has been fenced manually by the operations team (either hard powered off, or network unplugged). That signals that it is safe to proceed with ``evacuate`` or other operations that nova has safety checks to prevent for hosts that are up. .. warning:: Setting a service forced down without completely fencing it will likely result in the corruption of VMs on that host. in: body required: false type: boolean # This is the response output parameter for # PUT /os-services/{service_id} added in 2.53. forced_down_2_53_out: description: | Whether or not this service was forced down manually by an administrator after the service was fenced. This value is useful to know that some 3rd party has verified the service should be marked down. in: body required: true type: boolean forceDelete: description: | The action. in: body required: true type: none free_ram_mb: description: | The free RAM in this hypervisor(in MiB). This does not take allocation ratios used for overcommit into account so this value may be negative. in: body required: true type: integer max_version: 2.87 free_ram_mb_total: description: | The free RAM on all hypervisors(in MiB). This does not take allocation ratios used for overcommit into account so this value may be negative. in: body required: true type: integer from_port: description: | The port at start of range. in: body required: true type: integer group: description: | A ``group`` object. Includes the ``tenant_id`` and the source security group ``name``. in: body required: true type: object group_id: description: | The source security group ID. in: body required: false type: string guest_format: description: | Specifies the guest server disk file system format, such as ``ext2``, ``ext3``, ``ext4``, ``xfs`` or ``swap``. Swap block device mappings have the following restrictions: * The ``source_type`` must be ``blank`` * The ``destination_type`` must be ``local`` * There can only be one swap disk per server * The size of the swap disk must be less than or equal to the ``swap`` size of the flavor in: body required: false type: string host: description: | The name or ID of the host to which the server is evacuated. If you omit this parameter, the scheduler chooses a host. .. warning:: Prior to microversion 2.29, specifying a host will bypass validation by the scheduler, which could result in failures to actually evacuate the instance to the specified host, or over-subscription of the host. It is recommended to either not specify a host so that the scheduler will pick one, or specify a host with microversion >= 2.29 and without ``force=True`` set. in: body required: false type: string host_cpu: description: | The number of virtual CPUs on the host. in: body required: true type: integer host_disk_gb: description: | The disk size on the host (in GiB). in: body required: true type: integer host_done_num: description: | The number of the hosts whose instance audit tasks have been done. in: body required: true type: integer host_ip: description: | The IP address of the hypervisor's host. in: body required: true type: string host_maintenance_mode: description: | Mode of maintenance state, either ``on_maintenance`` or ``off_maintenance``. in: body required: false type: string host_maintenance_mode_in: description: | Mode of maintenance state, either ``enable`` or ``disable``. in: body required: false type: string host_memory_mb: description: | The memory size on the host (in MiB). in: body required: true type: integer host_migration: description: | The host to which to migrate the server. If this parameter is ``None``, the scheduler chooses a host. .. warning:: Prior to microversion 2.30, specifying a host will bypass validation by the scheduler, which could result in failures to actually migrate the instance to the specified host, or over-subscription of the host. It is recommended to either not specify a host so that the scheduler will pick one, or specify a host with microversion >= 2.30 and without ``force=True`` set. in: body required: true type: string host_migration_2_56: description: | The host to which to migrate the server. If you specify ``null`` or don't specify this parameter, the scheduler chooses a host. in: body required: false type: string min_version: 2.56 host_name_body: description: | The name of the host. in: body required: true type: string host_not_run: description: | A list of the hosts whose instance audit tasks have not run. in: body required: true type: array host_not_run_num: description: | The number of the hosts whose instance audit tasks have not run. in: body required: true type: integer host_num: description: | The number of the hosts. in: body required: true type: integer host_power_action: description: | The power action on the host. in: body required: true type: string host_project: description: | The project id (or special name like total, used_now, used_max). in: body required: true type: string host_resource: description: | The resource info of the host. in: body required: true type: object host_resource_array: description: | The array that includes resource info of the host. in: body required: true type: array host_running_num: description: | The number of the hosts whose instance audit tasks are running. in: body required: true type: integer host_service: description: | The name of the service which is running on the host. in: body required: true type: string host_status: description: | The host status. Values where next value in list can override the previous: - ``UP`` if nova-compute up. - ``UNKNOWN`` if nova-compute not reported by servicegroup driver. - ``DOWN`` if nova-compute forced down. - ``MAINTENANCE`` if nova-compute is disabled. - Empty string indicates there is no host for server. This attribute appears in the response only if the policy permits. By default, only administrators can get this parameter. in: body required: false type: string min_version: 2.16 host_status_body: description: | The status of the current host, either ``enabled`` or ``disabled``. in: body required: false type: string host_status_body_in: description: | The status of the host, either ``enable`` or ``disable``. in: body required: false type: string host_status_update_rebuild: description: | The host status. Values where next value in list can override the previous: - ``UP`` if nova-compute up. - ``UNKNOWN`` if nova-compute not reported by servicegroup driver. - ``DOWN`` if nova-compute forced down. - ``MAINTENANCE`` if nova-compute is disabled. - Empty string indicates there is no host for server. This attribute appears in the response only if the policy permits. By default, only administrators can get this parameter. in: body required: false type: string min_version: 2.75 host_unshelve: description: | The destination host name. Specifying a destination host is by default only allowed to project_admin, if it not the case HTTP 403 forbidden response is returned. in: body required: false type: string min_version: 2.91 host_zone: description: | The available zone of the host. in: body required: true type: string hostId: description: | An ID string representing the host. This is a hashed value so will not actually look like a hostname, and is hashed with data from the project_id, so the same physical host as seen by two different project_ids, will be different. It is useful when within the same project you need to determine if two instances are on the same or different physical hosts for the purposes of availability or performance. in: body required: true type: string hosts: description: | An array of host information. in: body required: true type: array hosts.availability_zone: description: | An object containing a list of host information. The host information is comprised of host and service objects. The service object returns three parameters representing the states of the service: ``active``, ``available``, and ``updated_at``. in: body required: true type: object hosts.availability_zone_none: description: | It is always ``null``. in: body required: true type: none hours: description: | The duration that the server exists (in hours). in: body required: true type: float hours_optional: description: | The duration that the server exists (in hours). in: body required: false type: float hypervisor: description: | The hypervisor object. in: body required: true type: object hypervisor_count: description: | The number of hypervisors. in: body required: true type: integer hypervisor_diagnostics: description: | The hypervisor on which the VM is running. Examples for libvirt driver may be: ``qemu``, ``kvm`` or ``xen``. in: body required: true type: string min_version: 2.48 hypervisor_free_disk_gb: description: | The free disk remaining on this hypervisor(in GiB). This does not take allocation ratios used for overcommit into account so this value may be negative. in: body required: true type: integer max_version: 2.87 hypervisor_free_disk_gb_total: description: | The free disk remaining on all hypervisors(in GiB). This does not take allocation ratios used for overcommit into account so this value may be negative. in: body required: true type: integer hypervisor_hostname: description: | The hypervisor host name provided by the Nova virt driver. For the Ironic driver, it is the Ironic node uuid. in: body required: true type: string hypervisor_id_body: description: | The id of the hypervisor. in: body required: true type: integer max_version: 2.52 hypervisor_id_body_no_version: description: | The id of the hypervisor. in: body required: true type: integer hypervisor_id_body_uuid: description: | The id of the hypervisor as a UUID. in: body required: true type: string min_version: 2.53 hypervisor_links: description: | Links to the hypervisors resource. See `API Guide / Links and References `_ for more info. in: body type: array min_version: 2.33 required: false hypervisor_os_diagnostics: description: | The hypervisor OS. in: body type: string required: true min_version: 2.48 hypervisor_servers: description: | A list of ``server`` objects. Before microversion 2.75, this field is only returned if non-empty. From microversion 2.75, this field will always be returned unless the ``with_servers`` query parameter is provided and is set to ``false``. in: body required: false type: array min_version: 2.53 hypervisor_servers_name: description: | The server name. in: body type: string min_version: 2.53 hypervisor_servers_uuid: description: | The server ID. in: body type: string min_version: 2.53 hypervisor_service: description: | The hypervisor service object. in: body required: true type: object hypervisor_state: description: | The state of the hypervisor. One of ``up`` or ``down``. in: body required: true type: string hypervisor_statistics: description: | The hypervisors statistics summary object. in: body required: true type: object hypervisor_status: description: | The status of the hypervisor. One of ``enabled`` or ``disabled``. in: body required: true type: string hypervisor_type: in: body required: true type: string description: | The hypervisor type for the agent. Currently only ``xen`` is supported. hypervisor_type_body: description: | The hypervisor type. in: body required: true type: string hypervisor_uptime: description: | The response format of this api depends on the virt driver in use on a given host. The libvirt driver returns the output of the `uptime` command directly, the z/VM driver returns the `ILP` time. All other drivers always return `null`. Note this value is cached and updated periodically. in: body required: true type: string min_version: 2.88 hypervisor_vcpus: description: | The number of vCPU in this hypervisor. This does not take allocation ratios used for overcommit into account so there may be disparity between this and the used count. in: body required: true type: integer max_version: 2.87 hypervisor_vcpus_total: description: | The number of vCPU on all hypervisors. This does not take allocation ratios used for overcommit into account so there may be disparity between this and the used count. in: body required: true type: integer hypervisor_vcpus_used: description: | The number of vCPU used in this hypervisor. in: body required: true type: integer max_version: 2.87 hypervisor_vcpus_used_total: description: | The number of vCPU used on all hypervisors. in: body required: true type: integer hypervisor_version: description: | The hypervisor version. in: body required: true type: integer hypervisors: description: | An array of hypervisor information. in: body required: true type: array image: description: | The UUID and links for the image for your server instance. The ``image`` object will be an empty string when you boot the server from a volume. in: body required: true type: object image_id_body: description: | The ID of the Image. in: body required: true type: string image_metadata: description: | Metadata key and value pairs for the image. The maximum size for each metadata key and value pair is 255 bytes. in: body required: false type: object image_metadata_items: description: | The number of allowed metadata items for each image. Starting from version 2.39 this field is dropped from 'os-limits' response, because 'image-metadata' proxy API was deprecated. in: body required: true type: integer max_version: 2.38 image_name: description: | The display name of an Image. in: body required: true type: string image_progress: description: | A percentage value of the image save progress. This can be one of: - ``ACTIVE``: 100 - ``SAVING``: 25 or 50 in: body required: true type: integer image_properties: description: | The image properties key/value pairs. in: body required: true type: object min_version: 2.98 image_server: description: | The server booted from image. in: body required: false type: object image_size: description: | The size of the image. in: body required: true type: integer image_status: description: | The status of image, as a string. This can be one of: - ``ACTIVE``: image is in active state - ``SAVING``: image is in queued or in saving process - ``DELETED``: image is deleted or in progress of deletion - ``ERROR``: image is in error state - ``UNKNOWN``: image is in unknown state in: body required: true type: string imageRef: description: | The UUID of the image to use for your server instance. This is not required in case of boot from volume. In all other cases it is required and must be a valid UUID otherwise API will return 400. in: body required: false type: string imageRef_rebuild: description: | The UUID of the image to rebuild for your server instance. It must be a valid UUID otherwise API will return 400. To rebuild a volume-backed server with a new image, at least microversion 2.93 needs to be provided in the request else the request will fall back to old behaviour i.e. the API will return 400 (for an image different from the image used when creating the volume). For non-volume-backed servers, specifying a new image will result in validating that the image is acceptable for the current compute host on which the server exists. If the new image is not valid, the server will go into ``ERROR`` status. in: body required: true type: string images: description: | An array of Image objects. in: body required: true type: array injected_file_content_bytes: description: | The number of allowed bytes of content for each injected file. in: body required: true type: integer max_version: 2.56 injected_file_content_bytes_quota_details: description: | The object of detailed injected file content bytes quota, including in_use, limit and reserved number of injected file content bytes. in: body required: true type: object max_version: 2.56 injected_file_content_bytes_quota_optional: description: | The number of allowed bytes of content for each injected file. in: body required: false type: integer max_version: 2.56 injected_file_path_bytes: description: | The number of allowed bytes for each injected file path. in: body required: true type: integer max_version: 2.56 injected_file_path_bytes_quota_details: description: | The object of detailed injected file path bytes quota, including in_use, limit and reserved number of injected file path bytes. in: body required: true type: object max_version: 2.56 injected_file_path_bytes_quota_optional: description: | The number of allowed bytes for each injected file path. in: body required: false type: integer max_version: 2.56 injected_files: &injected_files description: | The number of allowed injected files for each tenant. in: body required: true type: integer max_version: 2.56 injected_files_quota_class: &injected_files_quota_class <<: *injected_files description: | The number of allowed injected files for the quota class. injected_files_quota_class_optional: <<: *injected_files_quota_class required: false injected_files_quota_details: description: | The object of detailed injected files quota, including in_use, limit and reserved number of injected files. in: body required: true type: object max_version: 2.56 injected_files_quota_optional: description: | The number of allowed injected files for each tenant. in: body required: false type: integer max_version: 2.56 injectNetworkInfo: description: | The action. in: body required: true type: none instance_action_events_2_50: description: | The events which occurred in this action in descending order of creation. Policy defaults enable only users with the administrative role to see instance action event information. Cloud providers can change these permissions through the ``policy.json`` file. in: body required: false type: array max_version: 2.50 instance_action_events_2_51: description: | The events which occurred in this action in descending order of creation. Policy defaults enable only users with the administrative role or the owner of the server to see instance action event information. Cloud providers can change these permissions through the ``policy.json`` file. in: body required: true type: array min_version: 2.51 instance_actions_next_links: description: | Links pertaining to the instance action. This parameter is returned when paging and more data is available. See `Paginated collections `__ for more info. in: body required: false type: array min_version: 2.58 instance_id_body: description: | The UUID of the server. in: body required: true type: string instance_id_cloudpipe: description: | The UUID of the cloudpipe instance. in: body required: true type: string instance_name: description: | The name of the instance. in: body required: true type: string instance_usage_audit_log: description: | The object of instance usage audit logs. in: body required: true type: object instance_usage_audit_log_message: description: | The log message of the instance usage audit task. in: body required: true type: string instance_usage_audit_logs: description: | The object of instance usage audit log information. in: body required: true type: object instance_usage_audit_task_state: description: | The state of the instance usage audit task. ``DONE`` or ``RUNNING``. in: body required: true type: string instanceAction: description: | The instance action object. in: body required: true type: object instanceActions: description: | List of the actions for the given instance in descending order of creation. in: body required: true type: array instances: &instances description: | The number of allowed servers for each tenant. in: body required: true type: integer instances_quota_class: &instances_quota_class <<: *instances description: | The number of allowed servers for the quota class. instances_quota_class_optional: <<: *instances_quota_class required: false instances_quota_details: description: | The object of detailed servers quota, including in_use, limit and reserved number of instances. in: body required: true type: object instances_quota_optional: description: | The number of allowed servers for each tenant. in: body required: false type: integer instances_usage_audit: description: | The number of instances. in: body required: true type: integer interfaceAttachment: description: | Specify the ``interfaceAttachment`` action in the request body. in: body required: true type: string interfaceAttachment_resp: description: | The interface attachment. in: body required: true type: object interfaceAttachments: description: | List of the interface attachments. in: body required: true type: array internal_access_path: description: | The id representing the internal access path. in: body required: false type: string ip_address: description: | The IP address. in: body required: true type: string ip_address_req: description: | The IP address. It is required when ``fixed_ips`` is specified. in: body required: true type: string ip_host: description: | The name or ID of the host associated to the IP. in: body required: true type: string ip_protocol: description: | The IP protocol. A valid value is ICMP, TCP, or UDP. in: body required: true type: string ip_range: description: | The range of IP addresses to use for creating floating IPs. in: body required: true type: string ip_range_delete: description: | The range of IP addresses from which to bulk-delete floating IPs. in: body required: true type: string key_name: description: | Key pair name. .. note:: The ``null`` value was allowed in the Nova legacy v2 API, but due to strict input validation, it is not allowed in the Nova v2.1 API. in: body required: false type: string key_name_rebuild_req: description: | Key pair name for rebuild API. If ``null`` is specified, the existing keypair is unset. .. note:: Users within the same project are able to rebuild other user's instances in that project with a new keypair. Keys are owned by users (which is the only resource that's true of). Servers are owned by projects. Because of this a rebuild with a key_name is looking up the keypair by the user calling rebuild. in: body required: false type: string min_version: 2.54 key_name_rebuild_resp: description: | The name of associated key pair, if any. in: body required: true type: string min_version: 2.54 key_name_resp: description: | The name of associated key pair, if any. in: body required: true type: string key_name_resp_update: description: | The name of associated key pair, if any. in: body required: true type: string min_version: 2.75 key_pairs: &key_pairs description: | The number of allowed key pairs for each user. in: body required: true type: integer key_pairs_quota_class: &key_pairs_quota_class <<: *key_pairs description: | The number of allowed key pairs for the quota class. key_pairs_quota_class_optional: <<: *key_pairs_quota_class required: false key_pairs_quota_details: description: | The object of detailed key pairs quota, including in_use, limit and reserved number of key pairs. .. note:: ``in_use`` field value for keypair quota details is always zero. In Nova, key_pairs are a user-level resource, not a project- level resource, so for legacy reasons, the keypair in-use information is not counted. in: body required: true type: object key_pairs_quota_optional: description: | The number of allowed key pairs for each user. in: body required: false type: integer keypair: in: body type: object required: true description: | Keypair object keypair_deleted: description: | A boolean indicates whether this keypair is deleted or not. The value is always ``false`` (not deleted). in: body required: true type: boolean keypair_fingerprint: in: body required: true type: string description: | The fingerprint for the keypair. keypair_id: description: | The keypair ID. in: body required: true type: integer keypair_links: description: | Links pertaining to keypair. See `API Guide / Links and References `_ for more info. in: body type: array required: false min_version: 2.35 keypair_name: in: body required: true type: string description: | The name for the keypair. keypair_name_in: in: body required: true type: string description: | A name for the keypair which will be used to reference it later. .. note:: Since microversion 2.92, allowed characters are ASCII letters ``[a-zA-Z]``, digits ``[0-9]`` and the following special characters: ``[@._- ]``. keypair_private_key: description: | If you do not provide a public key on create, a new keypair will be built for you, and the private key will be returned during the initial create call. Make sure to save this, as there is no way to get this private key again in the future. in: body required: false type: string max_version: 2.91 keypair_public_key: description: | The keypair public key. in: body required: true type: string keypair_public_key_in: description: | The public ssh key to import. Was optional before microversion 2.92 : if you were omitting this value, a keypair was generated for you. in: body required: true type: string keypair_type: in: body required: true type: string description: | The type of the keypair. Allowed values are ``ssh`` or ``x509``. min_version: 2.2 keypair_type_in: in: body required: false type: string description: | The type of the keypair. Allowed values are ``ssh`` or ``x509``. min_version: 2.2 keypair_updated_deleted_at: description: | It is always ``null``. in: body required: true type: none # NOTE(mriedem): This is the user_id description for the keypair create/show # response which has always been returned. keypair_userid: in: body required: true type: string description: | The user_id for a keypair. keypair_userid_in: in: body required: false type: string description: | The user_id for a keypair. This allows administrative users to upload keys for other users than themselves. min_version: 2.10 keypairs: in: body type: array required: true description: | Array of Keypair objects length: description: | The number of lines to fetch from the end of console log. All lines will be returned if this is not specified. .. note:: This parameter can be specified as not only 'integer' but also 'string'. in: body required: false type: integer limits: description: | Data structure that contains both absolute limits within a deployment. in: body required: true type: object limits_absolutes: description: | Name/value pairs that set quota limits within a deployment and Name/value pairs of resource usage. in: body required: true type: object limits_rate_uri: description: | A human readable URI that is used as a friendly description of where the api rate limit is applied. in: body required: true type: string limits_rates: description: | An empty list for backwards compatibility purposes. in: body required: true type: array links: description: | Links to the resources in question. See `API Guide / Links and References `_ for more info. in: body required: true type: array local_gb: description: | The disk in this hypervisor (in GiB). This does not take allocation ratios used for overcommit into account so there may be disparity between this and the used count. in: body required: true type: integer max_version: 2.87 local_gb_simple_tenant_usage: description: | The sum of the root disk size of the server and the ephemeral disk size of it (in GiB). in: body required: true type: integer local_gb_simple_tenant_usage_optional: description: | The sum of the root disk size of the server and the ephemeral disk size of it (in GiB). in: body required: false type: integer local_gb_total: description: | The disk on all hypervisors (in GiB). This does not take allocation ratios used for overcommit into account so there may be disparity between this and the used count. in: body required: true type: integer local_gb_used: description: | The disk used in this hypervisor (in GiB). in: body required: true type: integer max_version: 2.87 local_gb_used_total: description: | The disk used on all hypervisors (in GiB). in: body required: true type: integer lock: description: | The action to lock a server. This parameter can be ``null``. Up to microversion 2.73, this parameter should be ``null``. in: body required: true type: object locked: description: | True if the instance is locked otherwise False. in: body required: true type: boolean min_version: 2.9 locked_reason_req: description: | The reason behind locking a server. Limited to 255 characters in length. in: body required: false type: string min_version: 2.73 locked_reason_resp: description: | The reason behind locking a server. in: body required: true type: string min_version: 2.73 mac_addr: description: | The MAC address. in: body required: true type: string mac_address: description: | The MAC address. in: body required: true type: string md5hash: description: | The MD5 hash. in: body required: true type: string media_types: description: | The `media types `_. It is an array of a fixed dict. .. note:: It is vestigial and provide no useful information. It will be deprecated and removed in the future. in: body required: true type: array members: description: | A list of members in the server group. in: body required: true type: array memory_details_diagnostics: description: | The dictionary with information about VM memory usage. Following fields are presented in the dictionary: - ``maximum`` - Amount of memory provisioned for the VM in MiB (Integer) - ``used`` - Amount of memory that is currently used by the guest operating system and its applications in MiB (Integer) in: body required: true type: array min_version: 2.48 memory_mb: description: | The memory of this hypervisor (in MiB). This does not take allocation ratios used for overcommit into account so there may be disparity between this and the used count. in: body required: true type: integer max_version: 2.87 memory_mb_simple_tenant_usage: description: | The memory size of the server (in MiB). in: body required: true type: integer memory_mb_simple_tenant_usage_optional: description: | The memory size of the server (in MiB). in: body required: false type: integer memory_mb_total: description: | The memory of all hypervisors (in MiB). This does not take allocation ratios used for overcommit into account so there may be disparity between this and the used count. in: body required: true type: integer memory_mb_used: description: | The memory used in this hypervisor (in MiB). in: body required: true type: integer max_version: 2.87 memory_mb_used_total: description: | The memory used on all hypervisors(in MiB). in: body required: true type: integer message: description: | The related error message for when an action fails. in: body required: true type: string meta: description: | The object of detailed key metadata items. in: body required: true type: object metadata: description: | Metadata key and value pairs. The maximum size of the metadata key and value is 255 bytes each. in: body required: false type: object metadata_compat: description: | A dictionary of metadata key-and-value pairs, which is maintained for backward compatibility. in: body required: true type: object metadata_items: description: | The number of allowed metadata items for each server. in: body required: true type: integer metadata_items_quota_details: description: | The object of detailed key metadata items quota, including in_use, limit and reserved number of metadata items. in: body required: true type: object metadata_items_quota_optional: description: | The number of allowed metadata items for each server. in: body required: false type: integer metadata_object: description: | Metadata key and value pairs. The maximum size for each metadata key and value pair is 255 bytes. in: body required: true type: object metadata_server_group_max_2_63: description: | Metadata key and value pairs. The maximum size for each metadata key and value pair is 255 bytes. It's always empty and only used for keeping compatibility. in: body required: true type: object max_version: 2.63 migrate: description: | The action to cold migrate a server. This parameter can be ``null``. Up to microversion 2.55, this parameter should be ``null``. in: body required: true type: object migrate_dest_compute: description: | The target compute for a migration. in: body required: true type: string migrate_dest_host: description: | The target host for a migration. in: body required: true type: string migrate_dest_node: description: | The target node for a migration. in: body required: true type: string migrate_disk_processed_bytes: description: | The amount of disk, in bytes, that has been processed during the migration. in: body required: true type: integer migrate_disk_remaining_bytes: description: | The amount of disk, in bytes, that still needs to be migrated. in: body required: true type: integer migrate_disk_total_bytes: description: | The total amount of disk, in bytes, that needs to be migrated. in: body required: true type: integer migrate_memory_processed_bytes: description: | The amount of memory, in bytes, that has been processed during the migration. in: body required: true type: integer migrate_memory_remaining_bytes: description: | The amount of memory, in bytes, that still needs to be migrated. in: body required: true type: integer migrate_memory_total_bytes: description: | The total amount of memory, in bytes, that needs to be migrated. in: body required: true type: integer migrate_source_compute: description: | The source compute for a migration. in: body required: true type: string migrate_source_node: description: | The source node for a migration. in: body required: true type: string migrate_status: description: | The current status of the migration. in: body required: true type: string migration: description: | The server migration object. in: body required: true type: object migration_id: description: | The ID of the server migration. in: body required: true type: integer migration_links_2_23: description: | Links to the migration. This parameter is returned if the migration type is ``live-migration`` and the migration status is one of ``queued``, ``preparing``, ``running`` and ``post-migrating``. See `Paginated collections `__ for more info. in: body required: false type: array min_version: 2.23 migration_new_flavor_id: description: | In ``resize`` case, the flavor ID for resizing the server. In the other cases, this parameter is same as the flavor ID of the server when the migration was started. .. note:: This is an internal ID and is not exposed in any other API. In particular, this is not the ID specified or automatically generated during flavor creation or returned via the ``GET /flavors`` API. in: body required: true type: integer migration_next_links_2_59: description: | Links pertaining to the migration. This parameter is returned when paging and more data is available. See `Paginated collections `__ for more info. in: body required: false type: array min_version: 2.59 migration_old_flavor_id: description: | The flavor ID of the server when the migration was started. .. note:: This is an internal ID and is not exposed in any other API. In particular, this is not the ID specified or automatically generated during flavor creation or returned via the ``GET /flavors`` API. in: body required: true type: integer migration_type_2_23: description: | The type of the server migration. This is one of ``live-migration``, ``migration``, ``resize`` and ``evacuation``. in: body required: true type: string min_version: 2.23 migration_uuid: description: | The UUID of the migration. in: body required: true type: string min_version: 2.59 migrations: description: | The list of server migration objects. in: body required: true type: array minDisk_body: description: | The minimum amount of disk space an image requires to boot, in GiB. For example, ``100``. in: body required: true type: integer minRam_body: description: | The minimum amount of RAM an image requires to function, in MiB. For example, ``512``. in: body required: true type: integer name: description: | The security group name. in: body required: true type: string name_sec_group_optional: description: | The security group name. in: body required: false type: string name_server_group: description: | The name of the server group. in: body required: true type: string name_update_rebuild: description: | The security group name. in: body required: true type: string min_version: 2.75 namespace: description: | A URL pointing to the namespace for this extension. in: body required: true type: string net_id: description: | The ID of the network for which you want to create a port interface. The ``net_id`` and ``port_id`` parameters are mutually exclusive. If you do not specify the ``net_id`` parameter, the OpenStack Networking API v2.0 uses the network information cache that is associated with the instance. in: body required: false type: string net_id_resp: description: | The network ID. in: body required: true type: string net_id_resp_2_12: description: | The network ID. in: body required: true type: string min_version: 2.12 network_label_body: description: | List of IP address and IP version pairs. The ``network_label`` stands for the name of a network, such as ``public`` or ``private``. in: body required: true type: array network_uuid: description: | To provision the server instance with a NIC for a network, specify the UUID of the network in the ``uuid`` attribute in a ``networks`` object. Required if you omit the ``port`` attribute. Starting with microversion 2.37, this value is strictly enforced to be in UUID format. in: body required: false type: string networks: description: | A list of ``network`` object. Required parameter when there are multiple networks defined for the tenant. When you do not specify the networks parameter, the server attaches to the only network created for the current tenant. Optionally, you can create one or more NICs on the server. To provision the server instance with a NIC for a network, specify the UUID of the network in the ``uuid`` attribute in a ``networks`` object. To provision the server instance with a NIC for an already existing port, specify the port-id in the ``port`` attribute in a ``networks`` object. If multiple networks are defined, the order in which they appear in the guest operating system will not necessarily reflect the order in which they are given in the server boot request. Guests should therefore not depend on device order to deduce any information about their network devices. Instead, device role tags should be used: introduced in 2.32, broken in 2.37, and re-introduced and fixed in 2.42, the ``tag`` is an optional, string attribute that can be used to assign a tag to a virtual network interface. This tag is then exposed to the guest in the metadata API and the config drive and is associated to hardware metadata for that network interface, such as bus (ex: PCI), bus address (ex: 0000:00:02.0), and MAC address. A bug has caused the ``tag`` attribute to no longer be accepted starting with version 2.37. Therefore, network interfaces could only be tagged in versions 2.32 to 2.36 inclusively. Version 2.42 has restored the ``tag`` attribute. Starting with microversion 2.37, this field is required and the special string values *auto* and *none* can be specified for networks. *auto* tells the Compute service to use a network that is available to the project, if one exists. If one does not exist, the Compute service will attempt to automatically allocate a network for the project (if possible). *none* tells the Compute service to not allocate a network for the instance. The *auto* and *none* values cannot be used with any other network values, including other network uuids, ports, fixed IPs or device tags. These are requested as strings for the networks value, not in a list. See the associated example. in: body required: true type: array networks_quota_optional: &networks_quota_optional description: | The number of private networks that can be created per project. in: body required: false type: integer max_version: 2.49 networks_quota_set_optional: <<: *networks_quota_optional max_version: 2.35 new_file: description: | The name of the qcow2 file that Block Storage creates, which becomes the active image for the VM. in: body required: true type: string nic_details_diagnostics: description: | The list of dictionaries with detailed information about VM NICs. Following fields are presented in each dictionary: - ``mac_address`` - Mac address of the interface (String) - ``rx_octets`` - Received octets (Integer) - ``rx_errors`` - Received errors (Integer) - ``rx_drop`` - Received packets dropped (Integer) - ``rx_packets`` - Received packets (Integer) - ``rx_rate`` - Receive rate in bytes (Integer) - ``tx_octets`` - Transmitted Octets (Integer) - ``tx_errors`` - Transmit errors (Integer) - ``tx_drop`` - Transmit dropped packets (Integer) - ``tx_packets`` - Transmit packets (Integer) - ``tx_rate`` - Transmit rate in bytes (Integer) in: body required: true type: array min_version: 2.48 no_device: description: | It is no device if ``True``. in: body required: false type: boolean num_cpus_diagnostics: description: | The number of vCPUs. in: body required: true type: integer min_version: 2.48 num_disks_diagnostics: description: | The number of disks. in: body required: true type: integer min_version: 2.48 num_nics_diagnostics: description: | The number of vNICs. in: body required: true type: integer min_version: 2.48 on_shared_storage: description: | Server on shared storage. .. note:: Starting since version 2.14, Nova automatically detects whether the server is on shared storage or not. Therefore this parameter was removed. in: body required: true type: boolean max_version: 2.13 os: description: | The name of the operating system. in: body required: true type: string os-availability-zone:availability_zone: description: | The availability zone from which to launch the server. When you provision resources, you specify from which availability zone you want your instance to be built. Typically, an admin user will use availability zones to arrange OpenStack compute hosts into logical groups. An availability zone provides a form of physical isolation and redundancy from other availability zones. For instance, if some racks in your data center are on a separate power source, you can put servers in those racks in their own availability zone. Availability zones can also help separate different classes of hardware. By segregating resources into availability zones, you can ensure that your application resources are spread across disparate machines to achieve high availability in the event of hardware or other failure. See `Availability Zones (AZs) `_ for more information. You can list the available availability zones by calling the :ref:`os-availability-zone` API, but you should avoid using the `default availability zone `_ when creating the server. The default availability zone is named ``nova``. This AZ is only shown when listing the availability zones as an admin. in: body required: false type: string OS-DCF:diskConfig: description: | Controls how the API partitions the disk when you create, rebuild, or resize servers. A server inherits the ``OS-DCF:diskConfig`` value from the image from which it was created, and an image inherits the ``OS-DCF:diskConfig`` value from the server from which it was created. To override the inherited setting, you can include this attribute in the request body of a server create, rebuild, or resize request. If the ``OS-DCF:diskConfig`` value for an image is ``MANUAL``, you cannot create a server from that image and set its ``OS-DCF:diskConfig`` value to ``AUTO``. A valid value is: - ``AUTO``. The API builds the server with a single partition the size of the target flavor disk. The API automatically adjusts the file system to fit the entire partition. - ``MANUAL``. The API builds the server by using whatever partition scheme and file system is in the source image. If the target flavor disk is larger, the API does not partition the remaining disk space. in: body required: false type: string OS-EXT-AZ:availability_zone: description: | The availability zone name. in: body required: true type: string OS-EXT-AZ:availability_zone_optional: description: | The availability zone name. in: body required: false type: string OS-EXT-AZ:availability_zone_update_rebuild: description: | The availability zone name. in: body required: true type: string min_version: 2.75 OS-EXT-SRV-ATTR:host: description: | The name of the compute host on which this instance is running. Appears in the response for administrative users only. in: body required: true type: string OS-EXT-SRV-ATTR:host_update_rebuild: description: | The name of the compute host on which this instance is running. Appears in the response for administrative users only. in: body required: true type: string min_version: 2.75 OS-EXT-SRV-ATTR:hypervisor_hostname: description: | The hypervisor host name provided by the Nova virt driver. For the Ironic driver, it is the Ironic node uuid. Appears in the response for administrative users only. in: body required: true type: string OS-EXT-SRV-ATTR:hypervisor_hostname_update_rebuild: description: | The hypervisor host name provided by the Nova virt driver. For the Ironic driver, it is the Ironic node uuid. Appears in the response for administrative users only. in: body required: true type: string min_version: 2.75 OS-EXT-SRV-ATTR:instance_name: description: | The instance name. The Compute API generates the instance name from the instance name template. Appears in the response for administrative users only. in: body required: true type: string OS-EXT-SRV-ATTR:instance_name_update_rebuild: description: | The instance name. The Compute API generates the instance name from the instance name template. Appears in the response for administrative users only. in: body required: true type: string min_version: 2.75 OS-EXT-STS:power_state: description: | The power state of the instance. This is an enum value that is mapped as:: 0: NOSTATE 1: RUNNING 3: PAUSED 4: SHUTDOWN 6: CRASHED 7: SUSPENDED in: body required: true type: integer OS-EXT-STS:power_state_update_rebuild: description: | The power state of the instance. This is an enum value that is mapped as:: 0: NOSTATE 1: RUNNING 3: PAUSED 4: SHUTDOWN 6: CRASHED 7: SUSPENDED in: body required: true type: integer min_version: 2.75 OS-EXT-STS:task_state: description: | The task state of the instance. in: body required: true type: string OS-EXT-STS:task_state_update_rebuild: description: | The task state of the instance. in: body required: true type: string min_version: 2.75 OS-EXT-STS:vm_state: description: | The VM state. in: body required: true type: string OS-EXT-STS:vm_state_update_rebuild: description: | The VM state. in: body required: true type: string min_version: 2.75 os-extended-volumes:volumes_attached: description: | The attached volumes, if any. in: body required: true type: array os-extended-volumes:volumes_attached.delete_on_termination: description: | A flag indicating if the attached volume will be deleted when the server is deleted. By default this is False. in: body required: true type: boolean min_version: 2.3 os-extended-volumes:volumes_attached.delete_on_termination_update_rebuild: description: | A flag indicating if the attached volume will be deleted when the server is deleted. By default this is False. in: body required: true type: boolean min_version: 2.75 os-extended-volumes:volumes_attached.id: description: | The attached volume ID. in: body required: true type: string os-extended-volumes:volumes_attached.id_update_rebuild: description: | The attached volume ID. in: body required: true type: string min_version: 2.75 os-extended-volumes:volumes_attached_update_rebuild: description: | The attached volumes, if any. in: body required: true type: array min_version: 2.75 os-getConsoleOutput: description: | The action to get console output of the server. in: body required: true type: object os-getRDPConsole: description: | The action. in: body required: true type: object os-getRDPConsole-type: description: | The type of RDP console. The only valid value is ``rdp-html5``. in: body required: true type: string os-getRDPConsole-url: description: | The URL used to connect to the RDP console. in: body required: true type: string os-getSerialConsole: description: | The action. in: body required: true type: object os-getSerialConsole-type: description: | The type of serial console. The only valid value is ``serial``. in: body required: true type: string os-getSerialConsole-url: description: | The URL used to connect to the Serial console. in: body required: true type: string os-getSPICEConsole: description: | The action. in: body required: true type: object os-getSPICEConsole-type: description: | The type of SPICE console. The only valid value is ``spice-html5``. in: body required: true type: string os-getSPICEConsole-url: description: | The URL used to connect to the SPICE console. in: body required: true type: string os-getVNCConsole: description: | The action. in: body required: true type: object os-getVNCConsole-type: description: | The type of VNC console. The only valid value is ``novnc``. in: body required: true type: string os-getVNCConsole-url: description: | The URL used to connect to the VNC console. in: body required: true type: string os-migrateLive: description: | The action. in: body required: true type: object os-resetState: description: | The action. in: body required: true type: object os-resetState_state: description: | The state of the server to be set, ``active`` or ``error`` are valid. in: body required: true type: string OS-SRV-USG:launched_at: description: | The date and time when the server was launched. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm For example, ``2015-08-27T09:49:58-05:00``. The ``hh±:mm`` value, if included, is the time zone as an offset from UTC. If the ``deleted_at`` date and time stamp is not set, its value is ``null``. in: body required: true type: string OS-SRV-USG:launched_at_update_rebuild: description: | The date and time when the server was launched. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm For example, ``2015-08-27T09:49:58-05:00``. The ``hh±:mm`` value, if included, is the time zone as an offset from UTC. If the ``deleted_at`` date and time stamp is not set, its value is ``null``. in: body required: true type: string min_version: 2.75 OS-SRV-USG:terminated_at: description: | The date and time when the server was deleted. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm For example, ``2015-08-27T09:49:58-05:00``. The ``±hh:mm`` value, if included, is the time zone as an offset from UTC. If the ``deleted_at`` date and time stamp is not set, its value is ``null``. in: body required: true type: string OS-SRV-USG:terminated_at_update_rebuild: description: | The date and time when the server was deleted. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm For example, ``2015-08-27T09:49:58-05:00``. The ``±hh:mm`` value, if included, is the time zone as an offset from UTC. If the ``deleted_at`` date and time stamp is not set, its value is ``null``. in: body required: true type: string min_version: 2.75 os-start: description: | The action to start a stopped server. in: body required: true type: none os-stop: description: | The action to stop a running server. in: body required: true type: none os:scheduler_hints: description: | The dictionary of data to send to the scheduler. Alternatively, you can specify ``OS-SCH-HNT:scheduler_hints`` as the key in the request body. .. note:: This is a top-level key in the request body, not part of the `server` portion of the request body. There are a few caveats with scheduler hints: * The request validation schema is per hint. For example, some require a single string value, and some accept a list of values. * Hints are only used based on the cloud scheduler configuration, which varies per deployment. * Hints are pluggable per deployment, meaning that a cloud can have custom hints which may not be available in another cloud. For these reasons, it is important to consult each cloud's user documentation to know what is available for scheduler hints. in: body required: false type: object os:scheduler_hints_build_near_host_ip: description: | Schedule the server on a host in the network specified with this parameter and a cidr (``os:scheduler_hints.cidr``). It is available when ``SimpleCIDRAffinityFilter`` is available on cloud side. in: body required: false type: string os:scheduler_hints_cidr: description: | Schedule the server on a host in the network specified with an IP address (``os:scheduler_hints:build_near_host_ip``) and this parameter. If ``os:scheduler_hints:build_near_host_ip`` is specified and this parameter is omitted, ``/24`` is used. It is available when ``SimpleCIDRAffinityFilter`` is available on cloud side. in: body required: false type: string os:scheduler_hints_different_cell: description: | A list of cell routes or a cell route (string). Schedule the server in a cell that is not specified. It is available when ``DifferentCellFilter`` is available on cloud side that is cell v1 environment. in: body required: false type: array os:scheduler_hints_different_host: description: | A list of server UUIDs or a server UUID. Schedule the server on a different host from a set of servers. It is available when ``DifferentHostFilter`` is available on cloud side. in: body required: false type: array os:scheduler_hints_group: description: | The server group UUID. Schedule the server according to a policy of the server group (``anti-affinity``, ``affinity``, ``soft-anti-affinity`` or ``soft-affinity``). It is available when ``ServerGroupAffinityFilter``, ``ServerGroupAntiAffinityFilter``, ``ServerGroupSoftAntiAffinityWeigher``, ``ServerGroupSoftAffinityWeigher`` are available on cloud side. in: body required: false type: string os:scheduler_hints_query: description: | Schedule the server by using a custom filter in JSON format. For example:: "query": "[\">=\",\"$free_ram_mb\",1024]" It is available when ``JsonFilter`` is available on cloud side. in: body required: false type: string os:scheduler_hints_same_host: description: | A list of server UUIDs or a server UUID. Schedule the server on the same host as another server in a set of servers. It is available when ``SameHostFilter`` is available on cloud side. in: body required: false type: array os:scheduler_hints_target_cell: description: | A target cell name. Schedule the server in a host in the cell specified. It is available when ``TargetCellFilter`` is available on cloud side that is cell v1 environment. in: body required: false type: string overall_status: description: | The overall status of instance audit tasks. :: M of N hosts done. K errors. The ``M`` value is the number of hosts whose instance audit tasks have been done in the period. The ``N`` value is the number of all hosts. The ``K`` value is the number of hosts whose instance audit tasks cause errors. If instance audit tasks have been done at all hosts in the period, the overall status is as follows: :: ALL hosts done. K errors. in: body required: true type: string para: description: | The parameter object. in: body required: true type: object parent_group_id: description: | Security group ID. in: body required: true type: string password: description: | The password returned from metadata server. in: body required: false type: string path: description: | The path field in the personality object. in: body required: true type: string max_version: 2.56 pause: description: | The action to pause a server. in: body required: true type: none period_beginning: description: | The beginning time of the instance usage audit period. For example, ``2016-05-01 00:00:00``. in: body required: true type: string period_ending: description: | The ending time of the instance usage audit period. For example, ``2016-06-01 00:00:00``. in: body required: true type: string personality: description: | The file path and contents, text only, to inject into the server at launch. The maximum size of the file path data is 255 bytes. The maximum limit is the number of allowed bytes in the decoded, rather than encoded, data. in: body required: false type: array max_version: 2.56 pinned_availability_zone: description: | This is the availability zone requested during server creation. Also when cross_az_attach option is false and booting an instance from volume, the instance can be pinned to AZ and in that case, instance will be scheduled on host belonging to pinned AZ. Also when default_schedule_zone config option set to specific AZ, in that case, instance would be pinned to that specific AZ, and instance will be scheduled on host belonging to pinned AZ. In case of no pinned availability zone, this value is set to `null`. in: body type: string min_version: 2.96 policies: description: | A list of exactly one policy name to associate with the server group. The current valid policy names are: - ``anti-affinity`` - servers in this group must be scheduled to different hosts. - ``affinity`` - servers in this group must be scheduled to the same host. - ``soft-anti-affinity`` - servers in this group should be scheduled to different hosts if possible, but if not possible then they should still be scheduled instead of resulting in a build failure. This policy was added in microversion 2.15. - ``soft-affinity`` - servers in this group should be scheduled to the same host if possible, but if not possible then they should still be scheduled instead of resulting in a build failure. This policy was added in microversion 2.15. in: body required: true type: array max_version: 2.63 policy_name: description: | The ``policy`` field represents the name of the policy. The current valid policy names are: - ``anti-affinity`` - servers in this group must be scheduled to different hosts. - ``affinity`` - servers in this group must be scheduled to the same host. - ``soft-anti-affinity`` - servers in this group should be scheduled to different hosts if possible, but if not possible then they should still be scheduled instead of resulting in a build failure. - ``soft-affinity`` - servers in this group should be scheduled to the same host if possible, but if not possible then they should still be scheduled instead of resulting in a build failure. in: body required: true type: string min_version: 2.64 policy_rules: description: | The ``rules`` field, which is a dict, can be applied to the policy. Currently, only the ``max_server_per_host`` rule is supported for the ``anti-affinity`` policy. The ``max_server_per_host`` rule allows specifying how many members of the anti-affinity group can reside on the same compute host. If not specified, only one member from the same anti-affinity group can reside on a given host. in: body required: true type: object min_version: 2.64 policy_rules_optional: description: | The ``rules`` field, which is a dict, can be applied to the policy. Currently, only the ``max_server_per_host`` rule is supported for the ``anti-affinity`` policy. The ``max_server_per_host`` rule allows specifying how many members of the anti-affinity group can reside on the same compute host. If not specified, only one member from the same anti-affinity group can reside on a given host. Requesting policy rules with any other policy than ``anti-affinity`` will be 400. in: body required: false type: object min_version: 2.64 pool: description: | Pool from which to allocate the IP address. If you omit this parameter, the call allocates the floating IP address from the public pool. If no floating IP addresses are available, the call returns the ``400`` response code with an informational message. Policy defaults enable only users with the administrative role or the owner of the server to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. in: body required: false type: string port: description: | To provision the server instance with a NIC for an already existing port, specify the port-id in the ``port`` attribute in a ``networks`` object. The port status must be ``DOWN``. Required if you omit the ``uuid`` attribute. Requested security groups are not applied to pre-existing ports. in: body required: false type: string port_id: description: | The ID of the port for which you want to create an interface. The ``net_id`` and ``port_id`` parameters are mutually exclusive. If you do not specify the ``port_id`` parameter, the OpenStack Networking API v2.0 allocates a port and creates an interface for it on the network. in: body required: false type: string port_id_resp: description: | The port ID. in: body required: true type: string port_number: description: | The port number. in: body required: true type: integer port_state: description: | The port state. in: body required: true type: string preserve_ephemeral: description: | Indicates whether the server is rebuilt with the preservation of the ephemeral partition (``true``). .. note:: This only works with baremetal servers provided by Ironic. Passing it to any other server instance results in a fault and will prevent the rebuild from happening. in: body required: false type: boolean previous: description: | Moves to the previous metadata item. format: uri in: body required: false type: string private_key: description: | The secret key. in: body required: true type: string progress: description: | A percentage value of the operation progress. This parameter only appears when the server status is ``ACTIVE``, ``BUILD``, ``REBUILD``, ``RESIZE``, ``VERIFY_RESIZE`` or ``MIGRATING``. in: body required: false type: integer project_id: description: | The UUID of the project. If omitted, the project ID defaults to the calling tenant. in: body required: false type: string project_id_migration_2_80: description: | The ID of the project which initiated the server migration. The value may be ``null`` for older migration records. in: body required: true type: string min_version: 2.80 project_id_server: description: | The ID of the project that this server belongs to. in: body required: true type: string project_id_server_action: description: | The ID of the project which initiated the server action. This can be ``null`` for ``nova-manage``-initiated actions. in: body required: true type: string project_id_server_group: description: | The project ID who owns the server group. min_version: 2.13 in: body required: true type: string project_id_value: description: | The project id under which the bulk ip addresses are created in: body required: true type: string quota_class_id_body: <<: *quota_class_id in: body quota_class_set: description: | A ``quota_class_set`` object. in: body required: true type: object quota_set: description: | A ``quota_set`` object. in: body required: true type: object quota_tenant_or_user_id_body: description: | The UUID of the tenant/user the quotas listed for. in: body required: true type: string ram: &ram description: | The amount of allowed server RAM, in MiB, for each tenant. in: body required: true type: integer ram_quota_class: &ram_quota_class <<: *ram description: | The amount of allowed instance RAM, in MiB, for the quota class. ram_quota_class_optional: <<: *ram_quota_class required: false ram_quota_details: description: | The object of detailed key ram quota, including in_use, limit and reserved number of ram. in: body required: true type: object ram_quota_optional: description: | The amount of allowed server RAM, in MiB, for each tenant. in: body required: false type: integer reboot: description: | The action to reboot a server. in: body required: true type: object reboot_type: description: | The type of the reboot action. The valid values are ``HARD`` and ``SOFT``. A ``SOFT`` reboot attempts a graceful shutdown and restart of the server. A ``HARD`` reboot attempts a forced shutdown and restart of the server. The ``HARD`` reboot corresponds to the power cycles of the server. in: body required: true type: string rebuild: description: | The action to rebuild a server. in: body required: true type: object remote_console: description: | The remote console object. in: body required: true type: object remote_console_protocol: description: | The protocol of remote console. The valid values are ``vnc``, ``spice``, ``rdp``, ``serial`` and ``mks``. The protocol ``mks`` is added since Microversion ``2.8``. The protocol ``rdp`` requires the Hyper-V driver which was removed in the 29.0.0 (Caracal) release. in: body required: true type: string remote_console_type: description: | The type of remote console. The valid values are ``novnc``, ``rdp-html5``, ``spice-html5``, ``spice-direct``, ``serial``, and ``webmks``. The type ``webmks`` was added in Microversion ``2.8`` and the type ``spice-direct`` was added in Microversion ``2.99``. The type ``rdp-html5`` requires the Hyper-V driver which was removed in the 29.0.0 (Caracal) release. in: body required: true type: string remote_console_url: description: | The URL is used to connect the console. in: body required: true type: string removeFixedIp: description: | The action to remove a fixed ip address from a server. in: body required: true type: object removeFloatingIp: description: | The action to remove or disassociate a floating IP address from the server. in: body required: true type: object removeSecurityGroup: description: | The action to remove a security group from the server. in: body required: true type: object removeTenantAccess: description: | The action. in: body required: true type: string request_id_body: description: | The request id generated when execute the API of this action. in: body required: true type: string rescue: description: | The action to rescue a server. in: body required: true type: object rescue_image_ref: description: | The image reference to use to rescue your server instance. Specify the image reference by ID or full URL. If you omit an image reference, default is the base image reference. in: body required: false type: string reservation_id: description: | The reservation id for the server. This is an id that can be useful in tracking groups of servers created with multiple create, that will all have the same reservation_id. in: body required: true type: string reserved: description: | The reserved quota value. in: body required: true type: integer reserved_fixedip: description: | True if the fixed ip is reserved, otherwise False. in: body required: true type: boolean min_version: 2.4 resetNetwork: description: | The action. in: body required: true type: none resize: description: | The action to resize a server. in: body required: true type: object restore: description: | The action. in: body required: true type: none resume: description: | The action to resume a suspended server. in: body required: true type: none return_reservation_id: description: | Set to ``True`` to request that the response return a reservation ID instead of instance information. Default is ``False``. in: body required: false type: boolean revertResize: description: | The action to revert a resize operation. in: body required: true type: none rules: description: | The list of security group rules. in: body required: true type: array running_vms: description: | The number of running VMs on this hypervisor. in: body required: true type: integer max_version: 2.87 running_vms_total: description: | The total number of running VMs on all hypervisors. in: body required: true type: integer scheduler_hints: description: | The dictionary of hints sent to the scheduler during server creation time. in: body required: true type: object min_version: 2.100 secgroup_default_rule_id: description: | The security group default rule ID. in: body required: true type: string secgroup_rule_cidr: description: | The CIDR for address range. in: body required: false type: string secgroup_rule_id: description: | The security group rule ID. in: body required: true type: string secgroup_rule_ip_range: description: | An IP range object. Includes the security group rule ``cidr``. in: body required: true type: object secgroup_tenant_id_body: description: | The UUID of the tenant that owns this security group. in: body required: false type: string security_group: description: | Specify the ``security_group`` action in the request body. in: body required: true type: string security_group_default_rule: description: | A ``security_group_default_rule`` object. in: body required: true type: object security_group_default_rules: description: | A list of the ``security_group_default_rule`` object. in: body required: true type: array security_group_id_body: description: | The ID of the security group. in: body required: true type: string security_group_rule: description: | A ``security_group_rule`` object. in: body required: true type: object security_group_rules: description: | The number of allowed rules for each security group. in: body required: false type: integer max_version: 2.35 security_group_rules_quota: description: | The number of allowed rules for each security group. in: body required: true type: integer max_version: 2.35 security_group_rules_quota_class: &security_group_rules_quota_class description: | The number of allowed rules for each security group. in: body required: true type: integer max_version: 2.49 security_group_rules_quota_class_optional: <<: *security_group_rules_quota_class required: false security_group_rules_quota_details: description: | The object of detailed security group rules quota, including in_use, limit and reserved number of security group rules. in: body required: true type: object max_version: 2.35 security_groups: description: | One or more security groups. Specify the name of the security group in the ``name`` attribute. If you omit this attribute, the API creates the server in the ``default`` security group. Requested security groups are not applied to pre-existing ports. in: body required: false type: array security_groups_obj: description: | One or more security groups objects. in: body required: true type: array security_groups_obj_optional: description: | One or more security groups objects. in: body required: false type: array security_groups_obj_update_rebuild: description: | One or more security groups objects. in: body required: false type: array min_version: 2.75 security_groups_quota: description: | The number of allowed security groups for each tenant. in: body required: true type: integer max_version: 2.35 security_groups_quota_class: &security_groups_quota_class description: | The number of allowed security groups for the quota class. in: body required: true type: integer max_version: 2.49 security_groups_quota_class_optional: <<: *security_groups_quota_class required: false security_groups_quota_details: description: | The object of detailed security groups, including in_use, limit and reserved number of security groups. in: body required: true type: object max_version: 2.35 security_groups_quota_optional: description: | The number of allowed security groups for each tenant. in: body required: false type: integer max_version: 2.35 server: description: | A ``server`` object. in: body required: true type: object server_description: type: string in: body required: false min_version: 2.19 description: | A free form description of the server. Limited to 255 characters in length. Before microversion 2.19 this was set to the server name. server_description_resp: description: | The description of the server. Before microversion 2.19 this was set to the server name. in: body required: true type: string min_version: 2.19 server_group: description: | The server group object. in: body required: true type: object server_group_id_body: description: | The UUID of the server group. in: body required: true type: string server_group_members: &server_group_members description: | The number of allowed members for each server group. in: body required: true type: integer server_group_members_quota_class: <<: *server_group_members min_version: 2.50 server_group_members_quota_details: description: | The object of detailed server group members, including in_use, limit and reserved number of server group members. in: body required: true type: object server_group_members_quota_optional: description: | The number of allowed members for each server group. in: body required: false type: integer server_groups: &server_groups description: | The number of allowed server groups for each tenant. in: body required: true type: integer server_groups_2_71: description: | The UUIDs of the server groups to which the server belongs. Currently this can contain at most one entry. in: body required: true type: array min_version: 2.71 server_groups_list: description: | The list of existing server groups. in: body required: true type: array server_groups_quota_class: <<: *server_groups description: | The number of allowed server groups for the quota class. min_version: 2.50 server_groups_quota_class_optional: <<: *server_groups description: | The number of allowed server groups for the quota class. required: false server_groups_quota_details: description: | The object of detailed server groups, including in_use, limit and reserved number of server groups. in: body required: true type: object server_groups_quota_optional: description: | The number of allowed server groups for each tenant. in: body required: false type: integer # This is the host in a POST (create instance) request body. server_host_create: description: | The name of the compute service host on which the server is to be created. The API will return 400 if no compute services are found with the given host name. By default, it can be specified by administrators only. in: body required: false type: string min_version: 2.74 server_hostname: &server_hostname in: body required: false type: string description: | The hostname of the instance reported in the metadata service. This parameter only appears in responses for administrators until microversion 2.90, after which it is shown for all users. .. note:: This information is published via the metadata service and requires application such as ``cloud-init`` to propagate it through to the instance. min_version: 2.3 server_hostname_req: in: body required: false type: string description: | The hostname to configure for the instance in the metadata service. Starting with microversion 2.94, this can be a Fully Qualified Domain Name (FQDN) of up to 255 characters in length. .. note:: This information is published via the metadata service and requires application such as ``cloud-init`` to propagate it through to the instance. min_version: 2.90 server_hostname_update_rebuild: <<: *server_hostname min_version: 2.75 # This is the hypervisor_hostname in a POST (create instance) request body. server_hypervisor_hostname_create: description: | The hostname of the hypervisor on which the server is to be created. The API will return 400 if no hypervisors are found with the given hostname. By default, it can be specified by administrators only. in: body required: false type: string min_version: 2.74 server_id: description: | The UUID of the server. in: body required: true type: string server_id_optional: description: | The UUID of the server. in: body required: false type: string server_kernel_id: in: body required: false type: string description: | The UUID of the kernel image when using an AMI. Will be null if not. By default, it appears in the response for administrative users only. min_version: 2.3 server_kernel_id_update_rebuild: in: body required: false type: string description: | The UUID of the kernel image when using an AMI. Will be null if not. By default, it appears in the response for administrative users only. min_version: 2.75 server_launch_index: in: body required: false type: integer description: | When servers are launched via multiple create, this is the sequence in which the servers were launched. By default, it appears in the response for administrative users only. min_version: 2.3 server_launch_index_update_rebuild: in: body required: false type: integer description: | When servers are launched via multiple create, this is the sequence in which the servers were launched. By default, it appears in the response for administrative users only. min_version: 2.75 server_links: description: | Links pertaining to the server. See `API Guide / Links and References `_ for more info. in: body type: array required: true server_name: description: | The server name. in: body required: true type: string server_name_optional: description: | The server name. in: body required: false type: string server_ramdisk_id: in: body required: false type: string description: | The UUID of the ramdisk image when using an AMI. Will be null if not. By default, it appears in the response for administrative users only. min_version: 2.3 server_ramdisk_id_update_rebuild: in: body required: false type: string description: | The UUID of the ramdisk image when using an AMI. Will be null if not. By default, it appears in the response for administrative users only. min_version: 2.75 server_reservation_id: in: body required: false type: string description: | The reservation id for the server. This is an id that can be useful in tracking groups of servers created with multiple create, that will all have the same reservation_id. By default, it appears in the response for administrative users only. min_version: 2.3 server_reservation_id_update_rebuild: in: body required: false type: string description: | The reservation id for the server. This is an id that can be useful in tracking groups of servers created with multiple create, that will all have the same reservation_id. By default, it appears in the response for administrative users only. min_version: 2.75 server_root_device_name: in: body required: false type: string description: | The root device name for the instance By default, it appears in the response for administrative users only. min_version: 2.3 server_root_device_name_update_rebuild: in: body required: false type: string description: | The root device name for the instance By default, it appears in the response for administrative users only. min_version: 2.75 server_status: description: | The server status. in: body required: true type: string server_tags_create: description: | A list of tags. Tags have the following restrictions: - Tag is a Unicode bytestring no longer than 60 characters. - Tag is a non-empty string. - '/' is not allowed to be in a tag name - Comma is not allowed to be in a tag name in order to simplify requests that specify lists of tags - All other characters are allowed to be in a tag name - Each server can have up to 50 tags. in: body required: false type: array min_version: 2.52 server_topology_nodes: description: | NUMA nodes information of a server. in: body required: true type: array server_topology_nodes_cpu_pinning: description: | The mapping of server cores to host physical CPU. for example:: cpu_pinning: { 0: 0, 1: 5} This means vcpu 0 is mapped to physical CPU 0, and vcpu 1 is mapped physical CPU 5. By default the ``cpu_pinning`` field is only visible to users with the administrative role. You can change the default behavior via the policy rule:: compute:server:topology:host:index in: body required: false type: dict server_topology_nodes_cpu_siblings: description: | A mapping of host cpus thread sibling. For example:: siblings: [[0,1],[2,3]] This means vcpu 0 and vcpu 1 belong to same CPU core, vcpu 2, vcpu 3 belong to another CPU core. By default the ``siblings`` field is only visible to users with the administrative role. You can change the default behavior via the policy rule:: compute:server:topology:host:index in: body required: false type: list server_topology_nodes_host_node: description: | The host NUMA node the virtual NUMA node is map to. By default the ``host_node`` field is only visible to users with the administrator role. You can change the default behavior via the policy rule:: compute:server:topology:host:index in: body required: false type: integer server_topology_nodes_memory_mb: description: | The amount of memory assigned to this NUMA node in MB. in: body required: false type: integer server_topology_nodes_vcpu_set: description: | A list of IDs of the virtual CPU assigned to this NUMA node. in: body required: false type: list server_topology_pagesize_kb: description: | The page size in KB of a server. This field is ``null`` if the page size information is not available. in: body required: true type: integer server_trusted_image_certificates_create_req: description: | A list of trusted certificate IDs, which are used during image signature verification to verify the signing certificate. The list is restricted to a maximum of 50 IDs. This parameter is optional in server create requests if allowed by policy, and is not supported for volume-backed instances. in: body required: false type: array min_version: 2.63 server_trusted_image_certificates_rebuild_req: description: | A list of trusted certificate IDs, which are used during image signature verification to verify the signing certificate. The list is restricted to a maximum of 50 IDs. This parameter is optional in server rebuild requests if allowed by policy, and is not supported for volume-backed instances. If ``null`` is specified, the existing trusted certificate IDs are either unset or reset to the configured defaults. in: body required: false type: array min_version: 2.63 server_trusted_image_certificates_resp: description: | A list of trusted certificate IDs, that were used during image signature verification to verify the signing certificate. The list is restricted to a maximum of 50 IDs. The value is ``null`` if trusted certificate IDs are not set. in: body required: true type: array min_version: 2.63 server_usages: description: | A list of the server usage objects. in: body required: true type: array server_usages_optional: description: | A list of the server usage objects. in: body required: false type: array server_user_data: in: body required: false type: string description: | The user_data the instance was created with. By default, it appears in the response for administrative users only. min_version: 2.3 server_user_data_update: in: body required: false type: string description: | The user_data the instance was created with. By default, it appears in the response for administrative users only. min_version: 2.75 server_uuid: description: | The UUID of the server instance to which the API dispatches the event. You must assign this instance to a host. Otherwise, this call does not dispatch the event to the instance. in: body required: true type: string servers: description: | A list of ``server`` objects. in: body required: true type: array servers_links: description: | Links to the next server. It is available when the number of servers exceeds ``limit`` parameter or ``[api]/max_limit`` in the configuration file. See `Paginated collections `__ for more info. in: body type: array required: false servers_max_count: in: body required: false type: integer description: | The max number of servers to be created. Defaults to the value of ``min_count``. servers_min_count: in: body required: false type: integer description: | The min number of servers to be created. Defaults to 1. servers_multiple_create_name: in: body required: true type: string description: | A base name for creating unique names during multiple create. service: description: | Object representing a compute service. in: body required: true type: object service_disable_reason: description: | The disable reason of the service, ``null`` if the service is enabled or disabled without reason provided. in: body required: true type: string service_id_body: description: | The id of the service. in: body required: true type: integer service_id_body_2_52: description: | The id of the service. in: body required: true type: integer max_version: 2.52 service_id_body_2_53: description: | The id of the service as a uuid. in: body required: true type: string min_version: 2.53 service_id_body_2_53_no_version: description: | The id of the service as a uuid. in: body required: true type: string service_state: description: | The state of the service. One of ``up`` or ``down``. in: body required: true type: string service_status: description: | The status of the service. One of ``enabled`` or ``disabled``. in: body required: true type: string # This is an optional input parameter to PUT /os-services/{service_id} added # in microversion 2.53. service_status_2_53_in: description: | The status of the service. One of ``enabled`` or ``disabled``. in: body required: false type: string services: description: | A list of service objects. in: body required: true type: array set_metadata: description: | The set_metadata object used to set metadata for host aggregate. in: body required: true type: object share_body: description: | A dictionary representation of a share attachment containing the fields ``uuid``, ``serverId``, ``status``, ``tag`` and ``export_location``. in: body required: true type: object share_export_location_body: description: | The export location used to attach the share to the underlying host. in: body required: false type: string share_id_body: description: | The UUID of the attached share. in: body required: true type: string share_status_body: description: | Status of the Share: - attaching: The share is being attached to the VM by the compute node. - detaching: The share is being detached from the VM by the compute node. - inactive: The share is attached but inactive because the VM is stopped. - active: The share is attached, and the VM is running. - error: The share is in an error state. in: body required: true type: string share_tag_body: description: | The device tag to be used by users to mount the share within the instance, if not provided then the share UUID will be used automatically. in: body required: true type: string share_uuid_body: description: | The UUID of the share attachment. in: body required: false type: string shares_body: description: | The list of share attachments. in: body required: true type: array shelve: description: | The action. in: body required: true type: none shelveOffload: description: | The action. in: body required: true type: none size: description: | The size of the volume, in gibibytes (GiB). in: body required: true type: integer snapshot: description: | A partial representation of a snapshot that is used to create a snapshot. in: body required: true type: object snapshot_description: description: | The snapshot description. in: body required: true type: string snapshot_description_optional: description: | The snapshot description. in: body required: false type: string snapshot_id: description: | The UUID for a snapshot. in: body required: true type: string snapshot_id_optional: description: | The UUID for a snapshot. in: body required: false type: string snapshot_id_resp_2_45: description: | The UUID for the resulting image snapshot. in: body required: true type: string min_version: 2.45 snapshot_name: description: | The snapshot name. in: body required: true type: string snapshot_name_optional: description: | The snapshot name. in: body required: false type: string snapshot_status: description: | The status of the snapshot. Valid status values are: - ``available`` - ``creating`` - ``deleting`` - ``error`` - ``error_deleting`` in: body required: true type: string snapshots: description: | A list of snapshot objects. in: body required: true type: array source_type: description: | The source type of the block device. Valid values are: * ``blank``: Depending on the ``destination_type`` and ``guest_format``, this will either be a blank persistent volume or an ephemeral (or swap) disk local to the compute host on which the server resides * ``image``: This is only valid with ``destination_type=volume``; creates an image-backed volume in the block storage service and attaches it to the server * ``snapshot``: This is only valid with ``destination_type=volume``; creates a volume backed by the given volume snapshot referenced via the ``block_device_mapping_v2.uuid`` parameter and attaches it to the server * ``volume``: This is only valid with ``destination_type=volume``; uses the existing persistent volume referenced via the ``block_device_mapping_v2.uuid`` parameter and attaches it to the server This parameter is required unless ``block_device_mapping_v2.no_device`` is specified. See `Block Device Mapping in Nova `_ for more details on valid source and destination types. in: body required: false type: string start_simple_tenant_usage_body: description: | The beginning time to calculate usage statistics on compute and storage resources. The date and time stamp format is as follows: :: CCYY-MM-DDThh:mm:ss.NNNNNN For example, ``2015-08-27T09:49:58.123456``. in: body required: true type: string start_time: description: | The date and time when the action was started. The date and time stamp format is `ISO 8601 `_ :: CCYY-MM-DDThh:mm:ss±hh:mm For example, ``2015-08-27T09:49:58-05:00``. The ``±hh:mm`` value, if included, is the time zone as an offset from UTC. In the previous example, the offset value is ``-05:00``. in: body required: true type: string started_at: description: | The date and time when the server was launched. The date and time stamp format is as follows: :: CCYY-MM-DDThh:mm:ss.NNNNNN For example, ``2015-08-27T09:49:58.123456``. in: body required: true type: string started_at_optional: description: | The date and time when the server was launched. The date and time stamp format is as follows: :: CCYY-MM-DDThh:mm:ss.NNNNNN For example, ``2015-08-27T09:49:58.123456``. in: body required: false type: string stop_simple_tenant_usage: description: | The ending time to calculate usage statistics on compute and storage resources. The date and time stamp format is as follows: :: CCYY-MM-DDThh:mm:ss.NNNNNN For example, ``2015-08-27T09:49:58.123456``. in: body required: true type: string subnet_id: description: | The UUID of the subnet. in: body required: true type: string suspend: description: | The action to suspend a server. in: body required: true type: none tags: description: | A list of tags. The maximum count of tags in this list is 50. in: body required: true type: array min_version: 2.26 tags_no_min: description: | A list of tags. The maximum count of tags in this list is 50. in: body required: true type: array tenant_id_body: description: | The UUID of the tenant in a multi-tenancy cloud. in: body required: true type: string tenant_id_optional: description: | The UUID of the tenant in a multi-tenancy cloud. in: body required: false type: string tenant_usage: description: | The tenant usage object. in: body required: true type: object tenant_usages: description: | A list of the tenant usage objects. in: body required: true type: array tls_port_number: description: | The port number of a port requiring a TLS connection. in: body required: false type: integer to_port: description: | The port at end of range. in: body required: true type: integer total_cores_used: description: | The number of used server cores in each tenant. If ``reserved`` query parameter is specified and it is not 0, the number of reserved server cores are also included. in: body required: true type: integer total_errors: description: | The total number of instance audit task errors. in: body required: true type: integer total_floatingips_used: description: | The number of used floating IP addresses in each tenant. If ``reserved`` query parameter is specified and it is not 0, the number of reserved floating IP addresses are also included. in: body required: true type: integer max_version: 2.35 total_hours: description: | The total duration that servers exist (in hours). in: body required: true type: float total_instances: description: | The total number of VM instances in the period. in: body required: true type: integer total_instances_used: description: | The number of servers in each tenant. If ``reserved`` query parameter is specified and it is not 0, the number of reserved servers are also included. in: body required: true type: integer total_local_gb_usage: description: | Multiplying the server disk size (in GiB) by hours the server exists, and then adding that all together for each server. in: body required: true type: float total_memory_mb_usage: description: | Multiplying the server memory size (in MiB) by hours the server exists, and then adding that all together for each server. in: body required: true type: float total_ram_used: description: | The amount of used server RAM in each tenant. If ``reserved`` query parameter is specified and it is not 0, the amount of reserved server RAM is also included. in: body required: true type: integer total_security_groups_used: description: | The number of used security groups in each tenant. If ``reserved`` query parameter is specified and it is not 0, the number of reserved security groups are also included. in: body required: true type: integer max_version: 2.35 total_server_groups_used: description: | The number of used server groups in each tenant. If ``reserved`` query parameter is specified and it is not 0, the number of reserved server groups are also included. in: body required: true type: integer total_vcpus_usage: description: | Multiplying the number of virtual CPUs of the server by hours the server exists, and then adding that all together for each server. in: body required: true type: float trigger_crash_dump: in: body required: true type: none description: | Specifies the trigger crash dump action should be run type-os-assisted-volume-snapshot: description: | The snapshot type. A valid value is ``qcow2``. in: body required: true type: string unlock: description: | The action to unlock a locked server. in: body required: true type: none unpause: description: | The action to unpause a paused server. in: body required: true type: none unrescue: description: | The action to unrescue a server in rescue mode. in: body required: true type: none unshelve: description: | The action. in: body required: true type: none updated: description: | The date and time when the resource was updated. The date and time stamp format is `ISO 8601 `_ :: CCYY-MM-DDThh:mm:ss±hh:mm For example, ``2015-08-27T09:49:58-05:00``. The ``±hh:mm`` value, if included, is the time zone as an offset from UTC. In the previous example, the offset value is ``-05:00``. in: body required: true type: string updated_consider_null: description: | The date and time when the resource was updated, if the resource has not been updated, this field will show as ``null``. The date and time stamp format is `ISO 8601 `_ :: CCYY-MM-DDThh:mm:ss±hh:mm For example, ``2015-08-27T09:49:58-05:00``. The ``±hh:mm`` value, if included, is the time zone as an offset from UTC. In the previous example, the offset value is ``-05:00``. in: body required: true type: string updated_instance_action: description: | The date and time when the instance action or the action event of instance action was updated. The date and time stamp format is `ISO 8601 `_ :: CCYY-MM-DDThh:mm:ss±hh:mm For example, ``2015-08-27T09:49:58-05:00``. The ``±hh:mm`` value, if included, is the time zone as an offset from UTC. In the previous example, the offset value is ``-05:00``. in: body required: true type: string min_version: 2.58 updated_version: description: | This is a fixed string. It is ``2011-01-21T11:33:21Z`` in version 2.0, ``2013-07-23T11:33:21Z`` in version 2.1. .. note:: It is vestigial and provides no useful information. It will be deprecated and removed in the future. in: body required: true type: string uptime: description: | The total uptime of the hypervisor and information about average load. in: body required: true type: string uptime_diagnostics: description: | The amount of time in seconds that the VM has been running. in: body required: true type: integer min_version: 2.48 uptime_simple_tenant_usage: description: | The uptime of the server. in: body required: true type: integer uptime_simple_tenant_usage_optional: description: | The uptime of the server. in: body required: false type: integer url: description: | The URL associated with the agent. in: body required: true type: string usage_links: description: | Links pertaining to usage. See `API Guide / Links and References `_ for more info. in: body type: array required: false min_version: 2.40 user_data: description: | Configuration information or scripts to use upon launch. Must be Base64 encoded. Restricted to 65535 bytes. .. note:: The ``null`` value allowed in Nova legacy v2 API, but due to the strict input validation, it isn't allowed in Nova v2.1 API. in: body required: false type: string user_data_rebuild_req: description: | Configuration information or scripts to use upon rebuild. Must be Base64 encoded. Restricted to 65535 bytes. If ``null`` is specified, the existing user_data is unset. in: body required: false type: string min_version: 2.57 user_data_rebuild_resp: in: body required: true type: string description: | The current user_data for the instance. min_version: 2.57 user_id: description: | The user ID of the user who owns the server. in: body required: true type: string user_id_migration_2_80: description: | The ID of the user which initiated the server migration. The value may be ``null`` for older migration records. in: body required: true type: string min_version: 2.80 user_id_server_action: description: | The ID of the user which initiated the server action. This can be ``null`` for ``nova-manage``-initiated actions. in: body required: true type: string user_id_server_group: description: | The user ID who owns the server group. min_version: 2.13 in: body required: true type: string vcpus: description: | The number of virtual CPUs that the server uses. in: body required: true type: integer vcpus_optional: description: | The number of virtual CPUs that the server uses. in: body required: false type: integer version: description: | The version. in: body required: true type: string version_id: type: string in: body required: true description: > A common name for the version in question. Informative only, it has no real semantic meaning. version_ip: description: | The IP version of the address associated with server. in: body required: true type: integer version_max: type: string in: body required: true description: > If this version of the API supports microversions, the maximum microversion that is supported. This will be the empty string if microversions are not supported. version_min: type: string in: body required: true description: > If this version of the API supports microversions, the minimum microversion that is supported. This will be the empty string if microversions are not supported. version_status: type: string in: body required: true description: | The status of this API version. This can be one of: - ``CURRENT``: this is the preferred version of the API to use - ``SUPPORTED``: this is an older, but still supported version of the API - ``DEPRECATED``: a deprecated version of the API that is slated for removal versions: type: array in: body required: true description: > A list of version objects that describe the API versions available. virtual_interface: description: | Virtual interface for the floating ip address. in: body required: true type: string virtual_interface_id: description: | The UUID of the virtual interface. in: body required: true type: string virtual_interface_id_optional: description: | Virtual interface for the floating ip address in: body required: false type: string virtual_interfaces: description: | An array of virtual interfaces. in: body required: true type: array vm_state_diagnostics: description: | A string enum denoting the current state of the VM. Possible values are: - ``pending`` - ``running`` - ``paused`` - ``shutdown`` - ``crashed`` - ``suspended`` in: body required: true type: string min_version: 2.48 vm_state_optional: description: | The VM state. in: body required: false type: string volume: description: | The ``volume`` object. in: body required: true type: object volume_attachment_id_resp: description: | The volume ID of the attachment. in: body required: true type: string max_version: 2.88 volume_id: description: | The source volume ID. in: body required: true type: string volume_id_resp: description: | The UUID of the volume. in: body required: true type: string volume_size: description: | The size of the volume (in GiB). This is integer value from range 1 to 2147483647 which can be requested as integer and string. This parameter must be specified in the following cases: - An image to volume case * ``block_device_mapping_v2.source_type`` is ``image`` * ``block_device_mapping_v2.destination_type`` is ``volume`` - A blank to volume case * ``block_device_mapping_v2.source_type`` is ``blank`` * ``block_device_mapping_v2.destination_type`` is ``volume`` in: body required: false type: integer volume_status: description: | The status of the volume. in: body required: true type: string volume_type: description: | The name or unique identifier for a volume type. in: body required: true type: string volume_type_optional: description: | The unique identifier for a volume type. in: body required: false type: string # This is the volumeAttachment in a response body. volumeAttachment: description: | A dictionary representation of a volume attachment containing the fields ``device``, ``id``, ``serverId`` and ``volumeId``. in: body required: true type: object # This is the volumeAttachment in a POST (attach volume) request body. volumeAttachment_post: description: | A dictionary representation of a volume attachment containing the fields ``device`` and ``volumeId``. in: body required: true type: object # This is the volumeAttachment in a PUT (swap volume) request body. volumeAttachment_put: description: | A dictionary representation of a volume attachment containing the field ``volumeId`` which is the UUID of the replacement volume, and other fields to update in the attachment. in: body required: true type: object volumeAttachments: description: | The list of volume attachments. in: body required: true type: array volumeId: description: | The UUID of the volume to attach. in: body required: true type: string volumeId_resp: description: | The UUID of the attached volume. in: body required: true type: string volumeId_swap: description: | The UUID of the volume to attach instead of the attached volume. in: body required: true type: string volumeId_update: description: | The UUID of the attached volume. in: body required: true type: string volumes: description: | The list of ``volume`` objects. in: body required: true type: array vpn_public_ip: description: | The VPN IP address. in: body required: true type: string vpn_public_ip_resp: description: | The VPN public IP address. in: body required: false type: string vpn_public_port: description: | The VPN port. in: body required: true type: string vpn_public_port_resp: description: | The VPN public port. in: body required: false type: string vpn_state: description: | The VPN state. in: body required: false type: string ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/request-ids.inc0000664000175000017500000000107300000000000020346 0ustar00zuulzuul00000000000000.. -*- rst -*- =========== Request IDs =========== Users can specify the global request ID in the request header. Users can receive the local request ID in the response header. For more details about Request IDs, please reference: `Faults `_ **Request** .. rest_parameters:: parameters.yaml - X-Openstack-Request-Id: x-openstack-request-id_req **Response** .. rest_parameters:: parameters.yaml - X-Compute-Request-Id: x-compute-request-id_resp - X-Openstack-Request-Id: x-openstack-request-id_resp ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/server-migrations.inc0000664000175000017500000001660700000000000021572 0ustar00zuulzuul00000000000000.. -*- rst -*- ========================================= Server migrations (servers, migrations) ========================================= List, show, perform actions on and delete server migrations. List Migrations =============== .. rest_method:: GET /servers/{server_id}/migrations Lists in-progress live migrations for a given server. .. note:: Microversion 2.23 or greater is required for this API. Policy defaults enable only users with the administrative role to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path Response -------- .. rest_parameters:: parameters.yaml - migrations: migrations - created_at: created - dest_compute: migrate_dest_compute - dest_host: migrate_dest_host - dest_node: migrate_dest_node - disk_processed_bytes: migrate_disk_processed_bytes - disk_remaining_bytes: migrate_disk_remaining_bytes - disk_total_bytes: migrate_disk_total_bytes - id: migration_id - memory_processed_bytes: migrate_memory_processed_bytes - memory_remaining_bytes: migrate_memory_remaining_bytes - memory_total_bytes: migrate_memory_total_bytes - server_uuid: server_id - source_compute: migrate_source_compute - source_node: migrate_source_node - status: migrate_status - updated_at: updated - uuid: migration_uuid - user_id: user_id_migration_2_80 - project_id: project_id_migration_2_80 **Example List Migrations (2.80)** .. literalinclude:: ../../doc/api_samples/server-migrations/v2.80/migrations-index.json :language: javascript Show Migration Details ====================== .. rest_method:: GET /servers/{server_id}/migrations/{migration_id} Show details for an in-progress live migration for a given server. .. note:: Microversion 2.23 or greater is required for this API. Policy defaults enable only users with the administrative role to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - migration_id: migration_id_path Response -------- .. rest_parameters:: parameters.yaml - migration: migration - created_at: created - dest_compute: migrate_dest_compute - dest_host: migrate_dest_host - dest_node: migrate_dest_node - disk_processed_bytes: migrate_disk_processed_bytes - disk_remaining_bytes: migrate_disk_remaining_bytes - disk_total_bytes: migrate_disk_total_bytes - id: migration_id - memory_processed_bytes: migrate_memory_processed_bytes - memory_remaining_bytes: migrate_memory_remaining_bytes - memory_total_bytes: migrate_memory_total_bytes - server_uuid: server_id - source_compute: migrate_source_compute - source_node: migrate_source_node - status: migrate_status - updated_at: updated - uuid: migration_uuid - user_id: user_id_migration_2_80 - project_id: project_id_migration_2_80 **Example Show Migration Details (2.80)** .. literalinclude:: ../../doc/api_samples/server-migrations/v2.80/migrations-get.json :language: javascript Force Migration Complete Action (force_complete Action) ======================================================= .. rest_method:: POST /servers/{server_id}/migrations/{migration_id}/action Force an in-progress live migration for a given server to complete. Specify the ``force_complete`` action in the request body. .. note:: Microversion 2.22 or greater is required for this API. .. note:: Not all `compute back ends`_ support forcefully completing an in-progress live migration. .. _compute back ends: https://docs.openstack.org/nova/latest/user/support-matrix.html#operation_force_live_migration_to_complete Policy defaults enable only users with the administrative role to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. **Preconditions** The server OS-EXT-STS:vm_state value must be ``active`` and the server OS-EXT-STS:task_state value must be ``migrating``. If the server is locked, you must have administrator privileges to force the completion of the server migration. The migration status must be ``running``. **Asynchronous Postconditions** After you make this request, you typically must keep polling the server status to determine whether the request succeeded. **Troubleshooting** If the server status remains ``MIGRATING`` for an inordinate amount of time, the request may have failed. Ensure you meet the preconditions and run the request again. If the request fails again, investigate the compute back end. More details can be found in the `admin guide `_. Normal response codes: 202 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), conflict(409) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - migration_id: migration_id_path - force_complete: force_migration_complete **Example Force Migration Complete (force_complete Action)** .. literalinclude:: ../../doc/api_samples/server-migrations/v2.22/force_complete.json :language: javascript Response -------- There is no body content for the response of a successful POST operation. Delete (Abort) Migration ======================== .. rest_method:: DELETE /servers/{server_id}/migrations/{migration_id} Abort an in-progress live migration. .. note:: Microversion 2.24 or greater is required for this API. .. note:: With microversion 2.65 or greater, you can abort live migrations also in ``queued`` and ``preparing`` status. .. note:: Not all `compute back ends`__ support aborting an in-progress live migration. .. __: https://docs.openstack.org/nova/latest/user/support-matrix.html#operation_abort_in_progress_live_migration Policy defaults enable only users with the administrative role to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. **Preconditions** The server OS-EXT-STS:task_state value must be ``migrating``. If the server is locked, you must have administrator privileges to force the completion of the server migration. For microversions from 2.24 to 2.64 the migration status must be ``running``, for microversion 2.65 and greater, the migration status can also be ``queued`` and ``preparing``. **Asynchronous Postconditions** After you make this request, you typically must keep polling the server status to determine whether the request succeeded. You may also monitor the migration using:: GET /servers/{server_id}/migrations/{migration_id} **Troubleshooting** If the server status remains ``MIGRATING`` for an inordinate amount of time, the request may have failed. Ensure you meet the preconditions and run the request again. If the request fails again, investigate the compute back end. Normal response codes: 202 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), conflict(409) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - migration_id: migration_id_path Response -------- There is no body content for the response of a successful DELETE operation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/server-security-groups.inc0000664000175000017500000000170100000000000022567 0ustar00zuulzuul00000000000000.. -*- rst -*- ====================================================== Servers Security Groups (servers, os-security-groups) ====================================================== Lists Security Groups for a server. List Security Groups By Server ============================== .. rest_method:: GET /servers/{server_id}/os-security-groups Lists security groups for a server. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path Response -------- .. rest_parameters:: parameters.yaml - security_groups: security_groups_obj - description: description - id: security_group_id_body - name: name - rules: rules - tenant_id: tenant_id_body **Example List security groups by server** .. literalinclude:: ../../doc/api_samples/os-security-groups/server-security-groups-list-resp.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/server-topology.inc0000664000175000017500000000264700000000000021271 0ustar00zuulzuul00000000000000.. -*- rst -*- ===================================== Servers Topology (servers, topology) ===================================== Shows the NUMA topology information for a server. Show Server Topology ==================== .. rest_method:: GET /servers/{server_id}/topology .. versionadded:: 2.78 Shows NUMA topology information for a server. Policy defaults enable only users with the administrative role or the owners of the server to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Normal response codes: 200 Error response codes: unauthorized(401), notfound(404), forbidden(403) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path Response -------- All response fields are listed below. If some information is not available or not allow by policy, the corresponding key value will not exist in response. .. rest_parameters:: parameters.yaml - nodes: server_topology_nodes - nodes.cpu_pinning: server_topology_nodes_cpu_pinning - nodes.vcpu_set: server_topology_nodes_vcpu_set - nodes.siblings: server_topology_nodes_cpu_siblings - nodes.memory_mb: server_topology_nodes_memory_mb - nodes.host_node: server_topology_nodes_host_node - pagesize_kb: server_topology_pagesize_kb **Example Server topology (2.xx)** .. literalinclude:: ../../doc/api_samples/os-server-topology/v2.78/servers-topology-resp.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/servers-action-console-output.inc0000664000175000017500000000252700000000000024050 0ustar00zuulzuul00000000000000.. -*- rst -*- Show Console Output (os-getConsoleOutput Action) ================================================ .. rest_method:: POST /servers/{server_id}/action Shows console output for a server. This API returns the text of the console since boot. The content returned may be large. Limit the lines of console text, beginning at the tail of the content, by setting the optional ``length`` parameter in the request body. The server to get console log from should set ``export LC_ALL=en_US.UTF-8`` in order to avoid incorrect unicode error. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403), notFound(404), conflict(409), methodNotImplemented(501) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - os-getConsoleOutput: os-getConsoleOutput - length: length **Example Show Console Output (os-getConsoleOutput Action)** This example requests the last 50 lines of console content from the specified server. .. literalinclude:: ../../doc/api_samples/os-console-output/console-output-post-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - output: console_output **Example Show Console Output (os-getConsoleOutput Action)** .. literalinclude:: ../../doc/api_samples/os-console-output/console-output-post-resp.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/servers-action-crash-dump.inc0000664000175000017500000000264200000000000023111 0ustar00zuulzuul00000000000000.. -*- rst -*- Trigger Crash Dump In Server ============================ .. rest_method:: POST /servers/{server_id}/action .. versionadded:: 2.17 Trigger a crash dump in a server. When a server starts behaving oddly at a fundamental level, it maybe be useful to get a kernel level crash dump to debug further. The crash dump action forces a crash dump followed by a system reboot of the server. Once the server comes back online, you can find a Kernel Crash Dump file in a certain location of the filesystem. For example, for Ubuntu you can find it in the ``/var/crash`` directory. .. warning:: This action can cause data loss. Also, network connectivity can be lost both during and after this operation. Normal response codes: 202 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), conflict(409) * 400 is returned if the server does not support a crash dump (either by configuration or because the backend does not support it) * 409 is returned if the server is not in a state where a crash dump action is allowed. Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - trigger_crash_dump: trigger_crash_dump **Example Trigger crash dump: JSON request** .. literalinclude:: ../../doc/api_samples/servers/v2.17/server-action-trigger-crash-dump.json :language: javascript Response -------- No body is returned on a successful submission. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/servers-action-deferred-delete.inc0000664000175000017500000000360100000000000024062 0ustar00zuulzuul00000000000000.. -*- rst -*- Force-Delete Server (forceDelete Action) ======================================== .. rest_method:: POST /servers/{server_id}/action Force-deletes a server before deferred cleanup. Specify the ``forceDelete`` action in the request body. Policy defaults enable only users with the administrative role or the owner of the server to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Normal response codes: 202 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), conflict(409) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - forceDelete: forceDelete **Example Force-Delete Server (forceDelete Action): JSON request** .. literalinclude:: ../../doc/api_samples/os-deferred-delete/force-delete-post-req.json :language: javascript Response -------- No body is returned on a successful submission. Restore Soft-Deleted Instance (restore Action) ============================================== .. rest_method:: POST /servers/{server_id}/action Restores a previously soft-deleted server instance. You cannot use this method to restore deleted instances. Specify the ``restore`` action in the request body. Policy defaults enable only users with the administrative role or the owner of the server to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Normal response codes: 202 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), conflict(409) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - restore: restore **Example Restore Soft-Deleted Instance (restore Action): JSON request** .. literalinclude:: ../../doc/api_samples/os-deferred-delete/restore-post-req.json :language: javascript Response -------- No body is returned on a successful submission. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/servers-action-evacuate.inc0000664000175000017500000000351100000000000022637 0ustar00zuulzuul00000000000000.. -*- rst -*- Evacuate Server (evacuate Action) ================================= .. rest_method:: POST /servers/{server_id}/action Evacuates a server from a failed host to a new host. - Specify the ``evacuate`` action in the request body. - In the request body, if ``onSharedStorage`` is set, then do not set ``adminPass``. - The target host should not be the same as the instance host. **Preconditions** - The failed host must be fenced and no longer running the original server. - The failed host must be reported as down or marked as forced down using `Update Forced Down`_. Starting from API version 2.68, the ``force`` parameter is no longer accepted as this could not be meaningfully supported by servers with complex resource allocations. Starting from API version 2.95, the server will remain stopped on the destination until it is manually started. If the server should end in the same power state on the destination as it had on the source before the evacuation, older microversions can be used. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), conflict(409) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - evacuate: evacuate - host: host - adminPass: adminPass_evacuate_request - onSharedStorage: on_shared_storage - force: force_evacuate | **Example Evacuate Server (evacuate Action)** .. literalinclude:: ../../doc/api_samples/os-evacuate/server-evacuate-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - adminPass: adminPass_evacuate .. note:: API does not return any Response for Microversion 2.14 or greater. **Example Evacuate Server (evacuate Action)** .. literalinclude:: ../../doc/api_samples/os-evacuate/server-evacuate-resp.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/servers-action-fixed-ip.inc0000664000175000017500000000500300000000000022545 0ustar00zuulzuul00000000000000.. -*- rst -*- Add (Associate) Fixed Ip (addFixedIp Action) (DEPRECATED) ========================================================== .. warning:: This API is deprecated and will fail with a 404 starting from microversion 2.44. This is replaced with using the Neutron networking service API. .. rest_method:: POST /servers/{server_id}/action Adds a fixed IP address to a server instance, which associates that address with the server. The fixed IP address is retrieved from the network that you specify in the request. Specify the ``addFixedIp`` action and the network ID in the request body. Policy defaults enable only users with the administrative role or the owner of the server to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Normal response codes: 202 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - addFixedIp: addFixedIp - networkId: net_id_resp **Example Add (Associate) Fixed Ip (addFixedIp Action)** .. literalinclude:: ../../doc/api_samples/os-multinic/multinic-add-fixed-ip-req.json :language: javascript Response -------- No response body is returned after a successful addFixedIp action. Remove (Disassociate) Fixed Ip (removeFixedIp Action) (DEPRECATED) =================================================================== .. warning:: This API is deprecated and will fail with a 404 starting from microversion 2.44. This is replaced with using the Neutron networking service API. .. rest_method:: POST /servers/{server_id}/action Removes, or disassociates, a fixed IP address from a server. Specify the ``removeFixedIp`` action in the request body. Policy defaults enable only users with the administrative role or the owner of the server to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Normal response codes: 202 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - removeFixedIp: removeFixedIp - address: ip_address **Example Remove (Disassociate) Fixed Ip (removeFixedIp Action)** .. literalinclude:: ../../doc/api_samples/os-multinic/multinic-remove-fixed-ip-req.json :language: javascript Response -------- No response body is returned after a successful removeFixedIp action. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/servers-action-rdp-remote-consoles.inc0000664000175000017500000000251600000000000024747 0ustar00zuulzuul00000000000000.. -*- rst -*- Get RDP Console (os-getRDPConsole Action) ========================================= .. rest_method:: POST /servers/{server_id}/action max_version: 2.5 Gets an `RDP `__ console for a server. .. warning:: Along with HyperV driver, this action was removed in Nova 29.0.0 (caracal) release. The only supported connect type is ``rdp-html5``. The ``type`` parameter should be set as ``rdp-html5``. Specify the ``os-getRDPConsole`` action in the request body. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), conflict(409), notImplemented(501) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - os-getRDPConsole: os-getRDPConsole - type: os-getRDPConsole-type **Example Get RDP Console (os-getRDPConsole Action)** .. literalinclude:: ../../doc/api_samples/os-remote-consoles/get-rdp-console-post-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - console: remote_console - type: os-getRDPConsole-type - url: os-getRDPConsole-url **Example Get RDP Console (os-getRDPConsole Action)** .. literalinclude:: ../../doc/api_samples/os-remote-consoles/get-rdp-console-post-resp.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/servers-action-remote-consoles.inc0000664000175000017500000001013200000000000024155 0ustar00zuulzuul00000000000000.. -*- rst -*- Get Serial Console (os-getSerialConsole Action) (DEPRECATED) ============================================================ .. rest_method:: POST /servers/{server_id}/action max_version: 2.5 Gets a serial console for a server. .. warning:: This action is deprecated in microversion 2.5 and superseded by the API `Server Consoles`_ in microversion 2.6. The new API offers a unified API for different console types. Specify the ``os-getSerialConsole`` action in the request body. The only supported connection type is ``serial``. The ``type`` parameter should be set as ``serial``. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), conflict(409), notImplemented(501) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - os-getSerialConsole: os-getSerialConsole - type: os-getSerialConsole-type **Example Get Serial Console (os-getSerialConsole Action)** .. literalinclude:: ../../doc/api_samples/os-remote-consoles/get-serial-console-post-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - console: remote_console - type: os-getSerialConsole-type - url: os-getSerialConsole-url **Example Get Serial Console (os-getSerialConsole Action)** .. literalinclude:: ../../doc/api_samples/os-remote-consoles/get-serial-console-post-resp.json :language: javascript Get SPICE Console (os-getSPICEConsole Action) (DEPRECATED) ========================================================== .. rest_method:: POST /servers/{server_id}/action max_version: 2.5 Gets a SPICE console for a server. .. warning:: This action is deprecated in microversion 2.5 and superseded by the API `Server Consoles`_ in microversion 2.6. The new API offers a unified API for different console types. Specify the ``os-getSPICEConsole`` action in the request body. The only supported connection type is ``spice-html5``. The ``type`` parameter should be set to ``spice-html5``. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), conflict(409), notImplemented(501) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - os-getSPICEConsole: os-getSPICEConsole - type: os-getSPICEConsole-type **Example Get Spice Console (os-getSPICEConsole Action)** .. literalinclude:: ../../doc/api_samples/os-remote-consoles/get-spice-console-post-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - console: remote_console - type: os-getSPICEConsole-type - url: os-getSPICEConsole-url **Example Get SPICE Console (os-getSPICEConsole Action)** .. literalinclude:: ../../doc/api_samples/os-remote-consoles/get-spice-console-post-resp.json :language: javascript Get VNC Console (os-getVNCConsole Action) (DEPRECATED) ====================================================== .. rest_method:: POST /servers/{server_id}/action max_version: 2.5 Gets a VNC console for a server. .. warning:: This action is deprecated in microversion 2.5 and superseded by the API `Server Consoles`_ in microversion 2.6. The new API offers a unified API for different console types. Specify the ``os-getVNCConsole`` action in the request body. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), conflict(409), notImplemented(501) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - os-getVNCConsole: os-getVNCConsole - type: os-getVNCConsole-type **Example Get Vnc Console (os-getVNCConsole Action)** .. literalinclude:: ../../doc/api_samples/os-remote-consoles/get-vnc-console-post-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - console: remote_console - type: os-getVNCConsole-type - url: os-getVNCConsole-url **Example Get VNC Console (os-getVNCConsole Action)** .. literalinclude:: ../../doc/api_samples/os-remote-consoles/get-vnc-console-post-resp.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/servers-action-shelve.inc0000664000175000017500000002546700000000000022346 0ustar00zuulzuul00000000000000.. -*- rst -*- Shelve Server (shelve Action) ============================= .. rest_method:: POST /servers/{server_id}/action Shelves a server. Specify the ``shelve`` action in the request body. All associated data and resources are kept but anything still in memory is not retained. To restore a shelved instance, use the ``unshelve`` action. To remove a shelved instance, use the ``shelveOffload`` action. Policy defaults enable only users with the administrative role or the owner of the server to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. **Preconditions** The server status must be ``ACTIVE``, ``SHUTOFF``, ``PAUSED``, or ``SUSPENDED``. If the server is locked, you must have administrator privileges to shelve the server. **Asynchronous Postconditions** After you successfully shelve a server, its status changes to ``SHELVED`` and the image status is ``ACTIVE``. The server instance data appears on the compute node that the Compute service manages. If you boot the server from volumes or set the ``shelved_offload_time`` option to 0, the Compute service automatically deletes the instance on compute nodes and changes the server status to ``SHELVED_OFFLOADED``. **Troubleshooting** If the server status does not change to ``SHELVED`` or ``SHELVED_OFFLOADED``, the shelve operation failed. Ensure that you meet the preconditions and run the request again. If the request fails again, investigate whether another operation is running that causes a race condition. Normal response codes: 202 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), conflict(409) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - shelve: shelve | **Example Shelve server (shelve Action)** .. literalinclude:: ../../doc/api_samples/os-shelve/os-shelve.json :language: javascript Response -------- If successful, this method does not return content in the response body. Shelf-Offload (Remove) Server (shelveOffload Action) ==================================================== .. rest_method:: POST /servers/{server_id}/action Shelf-offloads, or removes, a shelved server. Specify the ``shelveOffload`` action in the request body. Data and resource associations are deleted. If an instance is no longer needed, you can remove that instance from the hypervisor to minimize resource usage. Policy defaults enable only users with the administrative role or the owner of the server to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. **Preconditions** The server status must be ``SHELVED``. If the server is locked, you must have administrator privileges to shelve-offload the server. **Asynchronous Postconditions** After you successfully shelve-offload a server, its status changes to ``SHELVED_OFFLOADED``. The server instance data appears on the compute node. **Troubleshooting** If the server status does not change to ``SHELVED_OFFLOADED``, the shelve-offload operation failed. Ensure that you meet the preconditions and run the request again. If the request fails again, investigate whether another operation is running that causes a race condition. Normal response codes: 202 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), conflict(409) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - shelveOffload: shelveOffload | **Example Shelf-Offload server (shelveOffload Action)** .. literalinclude:: ../../doc/api_samples/os-shelve/os-shelve-offload.json :language: javascript Response -------- If successful, this method does not return content in the response body. Unshelve (Restore) Shelved Server (unshelve Action) =================================================== .. rest_method:: POST /servers/{server_id}/action Unshelves, or restores, a shelved server. Specify the ``unshelve`` action in the request body. Policy defaults enable only users with the administrative role or the owner of the server to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. **Preconditions** Unshelving a server without parameters requires its status to be ``SHELVED`` or ``SHELVED_OFFLOADED``. Unshelving a server with availability_zone and/or host parameters requires its status to be only ``SHELVED_OFFLOADED`` otherwise HTTP 409 conflict response is returned. If a server is locked, you must have administrator privileges to unshelve the server. As of ``microversion 2.91``, you can unshelve to a specific compute node if you have PROJECT_ADMIN privileges. This microversion also gives the ability to pin a server to an availability_zone and to unpin a server from any availability_zone. When a server is pinned to an availability_zone, the server move operations will keep the server in that availability_zone. However, when the server is not pinned to any availability_zone, the move operations can move the server to nodes in different availability_zones. The behavior according to unshelve parameters will follow the below table. +----------+---------------------------+----------+--------------------------------+ | Boot | AZ (1) | Host (1) | Result | +==========+===========================+==========+================================+ | No AZ | No AZ or AZ=null | No | Free scheduling (2) | +----------+---------------------------+----------+--------------------------------+ | No AZ | No AZ or AZ=null | Host1 | Schedule to Host1. | | | | | Server remains unpinned. | +----------+---------------------------+----------+--------------------------------+ | No AZ | AZ="AZ1" | No | Schedule to any host in "AZ1". | | | | | Server is pined to "AZ1". | +----------+---------------------------+----------+--------------------------------+ | No AZ | AZ="AZ1" | Host1 | Verify Host1 is in "AZ1", | | | | | then schedule to Host1, | | | | | otherwise reject the request. | | | | | Server is pined to "AZ1". | +----------+---------------------------+----------+--------------------------------+ | AZ1 | No AZ | No | Schedule to any host in "AZ1". | | | | | Server remains pined to "AZ1". | +----------+---------------------------+----------+--------------------------------+ | AZ1 | AZ=null | No | Free scheduling (2). | | | | | Server is unpinned. | +----------+---------------------------+----------+--------------------------------+ | AZ1 | No AZ | Host1 | Verify Host1 is in "AZ1", | | | | | then schedule to Host1, | | | | | otherwise reject the request. | | | | | Server remains pined to "AZ1". | +----------+---------------------------+----------+--------------------------------+ | AZ1 | AZ=null | Host1 | Schedule to Host1. | | | | | Server is unpinned. | +----------+---------------------------+----------+--------------------------------+ | AZ1 | AZ="AZ2" | No | Schedule to any host in "AZ2". | | | | | Server is pined to "AZ2". | +----------+---------------------------+----------+--------------------------------+ | AZ1 | AZ="AZ2" | Host1 | Verify Host1 is in "AZ2" then | | | | | schedule to Host1, | | | | | otherwise reject the request. | | | | | Server is pined to "AZ2". | +----------+---------------------------+----------+--------------------------------+ (1) Unshelve body parameters (2) Schedule to any host available. **Asynchronous Postconditions** After you successfully unshelve a server, its status changes to ``ACTIVE``. The server appears on the compute node. The shelved image is deleted from the list of images returned by an API call. **Troubleshooting** If the server status does not change to ``ACTIVE``, the unshelve operation failed. Ensure that you meet the preconditions and run the request again. If the request fails again, investigate whether another operation is running that causes a race condition. Normal response codes: 202 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), conflict(409) Request ------- .. note:: Since microversion 2.77, allowed request body schema are {"unshelve": null} or {"unshelve": {"availability_zone": }}. A request body of {"unshelve": {}} is not allowed. .. note:: Since microversion 2.91, allowed request body schema are - {"unshelve": null} (Keep compatibility with previous microversions) or - {"unshelve": {"availability_zone": }} (Unshelve and pin server to availability_zone) - {"unshelve": {"availability_zone": null}} (Unshelve and unpin server from any availability zone) - {"unshelve": {"host": }} - {"unshelve": {"availability_zone": , "host": }} - {"unshelve": {"availability_zone": null, "host": }} Everything else is not allowed, examples: - {"unshelve": {}} - {"unshelve": {"host": , "host": }} - {"unshelve": {"foo": }} .. rest_parameters:: parameters.yaml - server_id: server_id_path - unshelve: unshelve - availability_zone: availability_zone_unshelve - host: host_unshelve | **Example Unshelve server (unshelve Action)** .. literalinclude:: ../../doc/api_samples/os-shelve/os-unshelve.json :language: javascript **Example Unshelve server (unshelve Action) (v2.77)** .. literalinclude:: ../../doc/api_samples/os-shelve/v2.77/os-unshelve-az.json :language: javascript **Examples Unshelve server (unshelve Action) (v2.91)** .. literalinclude:: ../../doc/api_samples/os-shelve/v2.91/os-unshelve-host.json :language: javascript .. literalinclude:: ../../doc/api_samples/os-shelve/v2.91/os-unshelve-az-host.json :language: javascript .. literalinclude:: ../../doc/api_samples/os-shelve/v2.91/os-unshelve-host-and-unpin-az.json :language: javascript .. literalinclude:: ../../doc/api_samples/os-shelve/v2.91/os-unshelve-unpin-az.json :language: javascript Response -------- If successful, this method does not return content in the response body. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/servers-actions.inc0000664000175000017500000010552300000000000021235 0ustar00zuulzuul00000000000000.. -*- rst -*- .. needs:body_verification =========================================== Servers - run an action (servers, action) =========================================== Enables all users to perform an action on a server. Specify the action in the request body. There are many actions available for a server: * You can associate and disassociate a fixed or floating IP address with or from a server * You can create an image from a server * You can create a backup of a server * You can force-delete a server before deferred cleanup * You can lock, pause, reboot, rebuild, rescue, resize, resume, confirm the resize of, revert a pending resize for, shelve, shelf-offload, unshelve, start, stop, unlock, unpause, and unrescue a server * You can change the password of the server * You can add a security group to or remove a security group from a server * You can trigger a crash dump into a server * You can get a graphical or serial console for a server ...among others. Add (Associate) Floating Ip (addFloatingIp Action) (DEPRECATED) ================================================================ .. warning:: This API is deprecated and will fail with a 404 starting from microversion 2.44. This is replaced with using the Neutron networking service API. .. rest_method:: POST /servers/{server_id}/action Adds a floating IP address to a server, which associates that address with the server. A pool of floating IP addresses, configured by the cloud administrator, is available in OpenStack Compute. The project quota defines the maximum number of floating IP addresses that you can allocate to the project. After you `create (allocate) a floating IPaddress `__ for a project, you can associate that address with the server. Specify the ``addFloatingIp`` action in the request body. If an instance is connected to multiple networks, you can associate a floating IP address with a specific fixed IP address by using the optional ``fixed_address`` parameter. **Preconditions** The server must exist. You can only add a floating IP address to the server when its status is ``ACTIVE`` or ``STOPPED`` Normal response codes: 202 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - addFloatingIp: addFloatingIp - address: address - fixed_address: fixed_address **Example Add (Associate) Floating Ip (addFloatingIp Action)** .. literalinclude:: ../../doc/api_samples/servers/server-action-addfloatingip-req.json :language: javascript Response -------- If successful, this method does not return content in the response body. Add Security Group To A Server (addSecurityGroup Action) ======================================================== .. rest_method:: POST /servers/{server_id}/action Adds a security group to a server. Specify the ``addSecurityGroup`` action in the request body. Normal response codes: 202 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), conflict(409) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - addSecurityGroup: addSecurityGroup - name: name **Example Add Security Group To A Server (addSecurityGroup Action)** .. literalinclude:: ../../doc/api_samples/os-security-groups/security-group-add-post-req.json :language: javascript Response -------- If successful, this method does not return content in the response body. Change Administrative Password (changePassword Action) ====================================================== .. rest_method:: POST /servers/{server_id}/action Changes the administrative password for a server. Specify the ``changePassword`` action in the request body. Policy defaults enable only users with the administrative role or the owner of the server to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Normal response codes: 202 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), conflict(409), notImplemented(501) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - changePassword: changePassword - adminPass: adminPass_change_password **Example Change Administrative Password (changePassword Action)** .. literalinclude:: ../../doc/api_samples/os-admin-password/admin-password-change-password.json :language: javascript Response -------- If successful, this method does not return content in the response body. Confirm Resized Server (confirmResize Action) ============================================= .. rest_method:: POST /servers/{server_id}/action Confirms a pending resize action for a server. Specify the ``confirmResize`` action in the request body. After you make this request, you typically must keep polling the server status to determine whether the request succeeded. A successfully confirming resize operation shows a status of ``ACTIVE`` or ``SHUTOFF`` and a migration status of ``confirmed``. You can also see the resized server in the compute node that OpenStack Compute manages. **Preconditions** You can only confirm the resized server where the status is ``VERIFY_RESIZE``. If the server is locked, you must have administrator privileges to confirm the server. **Troubleshooting** If the server status remains ``VERIFY_RESIZE``, the request failed. Ensure you meet the preconditions and run the request again. If the request fails again, the server status should be ``ERROR`` and a migration status of ``error``. Investigate the compute back end or ask your cloud provider. There are some options for trying to correct the server status: * If the server is running and networking works, a user with proper authority could reset the status of the server to ``active`` using the :ref:`os-resetState` API. * If the server is not running, you can try hard rebooting the server using the :ref:`reboot` API. Note that the cloud provider may still need to cleanup any orphaned resources on the source hypervisor. Normal response codes: 204 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), conflict(409) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - confirmResize: confirmResize **Example Confirm Resized Server (confirmResize Action)** .. literalinclude:: ../../doc/api_samples/servers/server-action-confirm-resize.json :language: javascript Response -------- If successful, this method does not return content in the response body. Create Server Back Up (createBackup Action) =========================================== .. rest_method:: POST /servers/{server_id}/action Creates a back up of a server. .. note:: This API is not supported for volume-backed instances. Specify the ``createBackup`` action in the request body. Policy defaults enable only users with the administrative role or the owner of the server to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. .. note:: Starting from version 2.39 the image quota enforcement with Nova `metadata` is removed and quota checks should be performed using Glance API directly. Normal response codes: 202 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), conflict(409) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - createBackup: createBackup - name: backup_name - backup_type: backup_type - rotation: backup_rotation - metadata: metadata **Example Create Server Back Up (createBackup Action)** .. literalinclude:: ../../doc/api_samples/os-create-backup/create-backup-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - Location: image_location - image_id: snapshot_id_resp_2_45 **Example Create Server Back Up (v2.45)** .. literalinclude:: ../../doc/api_samples/os-create-backup/v2.45/create-backup-resp.json :language: javascript Create Image (createImage Action) ================================= .. rest_method:: POST /servers/{server_id}/action Creates an image from a server. Specify the ``createImage`` action in the request body. After you make this request, you typically must keep polling the status of the created image to determine whether the request succeeded. If the operation succeeds, the created image has a status of ``active`` and the server status returns to the original status. You can also see the new image in the image back end that OpenStack Image service manages. .. note:: Starting from version 2.39 the image quota enforcement with Nova `metadata` is removed and quota checks should be performed using Glance API directly. **Preconditions** The server must exist. You can only create a new image from the server when its status is ``ACTIVE``, ``SHUTOFF``, ``SUSPENDED`` or ``PAUSED`` (``PAUSED`` is only supported for image-backed servers). The project must have sufficient volume snapshot quota in the block storage service when the server has attached volumes. If the project does not have sufficient volume snapshot quota, the API returns a 403 error. **Asynchronous Postconditions** A snapshot image will be created in the Image service. In the image-backed server case, volume snapshots of attached volumes will not be created. In the volume-backed server case, volume snapshots will be created for all volumes attached to the server and then those will be represented with a ``block_device_mapping`` image property in the resulting snapshot image in the Image service. If that snapshot image is used later to create a new server, it will result in a volume-backed server where the root volume is created from the snapshot of the original root volume. The volumes created from the snapshots of the original other volumes will be attached to the server. **Troubleshooting** If the image status remains uploading or shows another error status, the request failed. Ensure you meet the preconditions and run the request again. If the request fails again, investigate the image back end. If the server status does not go back to an original server's status, the request failed. Ensure you meet the preconditions, or check if there is another operation that causes race conditions for the server, then run the request again. If the request fails again, investigate the compute back end or ask your cloud provider. If the request fails due to an error on OpenStack Compute service, the image is purged from the image store that OpenStack Image service manages. Ensure you meet the preconditions and run the request again. If the request fails again, investigate OpenStack Compute service or ask your cloud provider. Normal response codes: 202 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), conflict(409) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - createImage: createImage - name: image_name - metadata: image_metadata **Example Create Image (createImage Action)** .. literalinclude:: ../../doc/api_samples/servers/server-action-create-image.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - Location: image_location - image_id: snapshot_id_resp_2_45 **Example Create Image (v2.45)** .. literalinclude:: ../../doc/api_samples/servers/v2.45/server-action-create-image-resp.json :language: javascript Lock Server (lock Action) ========================= .. rest_method:: POST /servers/{server_id}/action Locks a server. Specify the ``lock`` action in the request body. Most actions by non-admin users are not allowed to the server after this operation is successful and the server is locked. See the "Lock, Unlock" item in `Server actions `_ for the restricted actions. But administrators can perform actions on the server even though the server is locked. Note that from microversion 2.73 it is possible to specify a reason when locking the server. The `unlock action `_ will unlock a server in locked state so additional actions can be performed on the server by non-admin users. You can know whether a server is locked or not and the ``locked_reason`` (if specified, from the 2.73 microversion) by the `List Servers Detailed API `_ or the `Show Server Details API `_. Policy defaults enable only users with the administrative role or the owner of the server to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Administrators can overwrite owner's lock. Normal response codes: 202 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - lock: lock - locked_reason: locked_reason_req **Example Lock Server (lock Action)** .. literalinclude:: ../../doc/api_samples/os-lock-server/lock-server.json :language: javascript **Example Lock Server (lock Action) (v2.73)** .. literalinclude:: ../../doc/api_samples/os-lock-server/v2.73/lock-server-with-reason.json :language: javascript Response -------- If successful, this method does not return content in the response body. Pause Server (pause Action) =========================== .. rest_method:: POST /servers/{server_id}/action Pauses a server. Changes its status to ``PAUSED``. Specify the ``pause`` action in the request body. Policy defaults enable only users with the administrative role or the owner of the server to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Normal response codes: 202 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), conflict(409), notImplemented(501) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - pause: pause **Example Pause Server (pause Action)** .. literalinclude:: ../../doc/api_samples/os-pause-server/pause-server.json :language: javascript Response -------- If successful, this method does not return content in the response body. .. _reboot: Reboot Server (reboot Action) ============================= .. rest_method:: POST /servers/{server_id}/action Reboots a server. Specify the ``reboot`` action in the request body. **Preconditions** The preconditions for rebooting a server depend on the type of reboot. You can only *SOFT* reboot a server when its status is ``ACTIVE``. You can only *HARD* reboot a server when its status is one of: * ``ACTIVE`` * ``ERROR`` * ``HARD_REBOOT`` * ``PAUSED`` * ``REBOOT`` * ``SHUTOFF`` * ``SUSPENDED`` If the server is locked, you must have administrator privileges to reboot the server. **Asynchronous Postconditions** After you successfully reboot a server, its status changes to ``ACTIVE``. Normal response codes: 202 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), conflict(409) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - reboot: reboot - type: reboot_type **Example Reboot Server (reboot Action)** .. literalinclude:: ../../doc/api_samples/servers/server-action-reboot.json :language: javascript Response -------- If successful, this method does not return content in the response body. Rebuild Server (rebuild Action) =============================== .. rest_method:: POST /servers/{server_id}/action Rebuilds a server. Specify the ``rebuild`` action in the request body. This operation recreates the root disk of the server. With microversion 2.93, we support rebuilding volume backed instances which will reimage the volume with the provided image. For microversion < 2.93, this operation keeps the contents of the volume given the image provided is same as the image with which the volume was created else the operation will error out. **Preconditions** The server status must be ``ACTIVE``, ``SHUTOFF`` or ``ERROR``. **Asynchronous Postconditions** If the server was in status ``SHUTOFF`` before the rebuild, it will be stopped and in status ``SHUTOFF`` after the rebuild, otherwise it will be ``ACTIVE`` if the rebuild was successful or ``ERROR`` if the rebuild failed. .. note:: With microversion 2.93, we support rebuilding volume backed instances. If any microversion < 2.93 is specified, there is a `known limitation`_ where the root disk is not replaced for volume-backed instances during a rebuild. .. _known limitation: https://bugs.launchpad.net/nova/+bug/1482040 Normal response codes: 202 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), conflict(409) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - rebuild: rebuild - imageRef: imageRef_rebuild - accessIPv4: accessIPv4_in - accessIPv6: accessIPv6_in - adminPass: adminPass_request - metadata: metadata - name: server_name_optional - OS-DCF:diskConfig: OS-DCF:diskConfig - personality: personality - personality.path: path - personality.contents: contents - preserve_ephemeral: preserve_ephemeral - description: server_description - key_name: key_name_rebuild_req - user_data: user_data_rebuild_req - trusted_image_certificates: server_trusted_image_certificates_rebuild_req - hostname: server_hostname_req **Example Rebuild Server (rebuild Action) (v2.63)** .. literalinclude:: ../../doc/api_samples/servers/v2.63/server-action-rebuild.json :language: javascript **Example Rebuild Server (rebuild Action) (v2.90)** .. literalinclude:: ../../doc/api_samples/servers/v2.90/server-action-rebuild.json :language: javascript **Example Rebuild Server (rebuild Action) (v2.94)** .. literalinclude:: ../../doc/api_samples/servers/v2.94/server-action-rebuild.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - Location: server_location - server: server - accessIPv4: accessIPv4 - accessIPv6: accessIPv6 - addresses: addresses_obj - created: created - flavor: flavor_server - flavor.id: flavor_id_body_2_46 - flavor.links: flavor_links_2_46 - flavor.vcpus: flavor_cpus_2_47 - flavor.ram: flavor_ram_2_47 - flavor.disk: flavor_disk_2_47 - flavor.ephemeral: flavor_ephem_disk_2_47 - flavor.swap: flavor_swap_2_47 - flavor.original_name: flavor_original_name - flavor.extra_specs: extra_specs_2_47 - flavor.extra_specs.key: flavor_extra_spec_key_2_47 - flavor.extra_specs.value: flavor_extra_spec_value_2_47 - hostId: hostId - id: server_id - image: image - image.id: image_id_body - image.links: links - image.properties: image_properties - links: server_links - metadata: metadata_object - name: server_name - OS-DCF:diskConfig: disk_config - status: server_status - tenant_id: tenant_id_body - updated: updated - user_id: user_id - adminPass: adminPass_response - pinned_availability_zone: pinned_availability_zone - progress: progress - locked: locked - description: server_description_resp - tags: tags - key_name: key_name_rebuild_resp - user_data: user_data_rebuild_resp - trusted_image_certificates: server_trusted_image_certificates_resp - server_groups: server_groups_2_71 - locked_reason: locked_reason_resp - config_drive: config_drive_resp_update_rebuild - OS-EXT-AZ:availability_zone: OS-EXT-AZ:availability_zone_update_rebuild - OS-EXT-SRV-ATTR:host: OS-EXT-SRV-ATTR:host_update_rebuild - OS-EXT-SRV-ATTR:hypervisor_hostname: OS-EXT-SRV-ATTR:hypervisor_hostname_update_rebuild - OS-EXT-SRV-ATTR:instance_name: OS-EXT-SRV-ATTR:instance_name_update_rebuild - OS-EXT-STS:power_state: OS-EXT-STS:power_state_update_rebuild - OS-EXT-STS:task_state: OS-EXT-STS:task_state_update_rebuild - OS-EXT-STS:vm_state: OS-EXT-STS:vm_state_update_rebuild - OS-EXT-SRV-ATTR:hostname: server_hostname_update_rebuild - OS-EXT-SRV-ATTR:reservation_id: server_reservation_id_update_rebuild - OS-EXT-SRV-ATTR:launch_index: server_launch_index_update_rebuild - OS-EXT-SRV-ATTR:kernel_id: server_kernel_id_update_rebuild - OS-EXT-SRV-ATTR:ramdisk_id: server_ramdisk_id_update_rebuild - OS-EXT-SRV-ATTR:root_device_name: server_root_device_name_update_rebuild - os-extended-volumes:volumes_attached: os-extended-volumes:volumes_attached_update_rebuild - os-extended-volumes:volumes_attached.id: os-extended-volumes:volumes_attached.id_update_rebuild - os-extended-volumes:volumes_attached.delete_on_termination: os-extended-volumes:volumes_attached.delete_on_termination_update_rebuild - OS-SRV-USG:launched_at: OS-SRV-USG:launched_at_update_rebuild - OS-SRV-USG:terminated_at: OS-SRV-USG:terminated_at_update_rebuild - scheduler_hints: scheduler_hints - security_groups: security_groups_obj_update_rebuild - security_group.name: name_update_rebuild - host_status: host_status_update_rebuild **Example Rebuild Server (rebuild Action) (v2.100)** .. literalinclude:: /../../doc/api_samples/servers/v2.100/server-action-rebuild-resp.json **Example Rebuild Server (rebuild Action) (v2.98)** .. literalinclude:: ../../doc/api_samples/servers/v2.98/server-action-rebuild-resp.json :language: javascript **Example Rebuild Server (rebuild Action) (v2.96)** .. literalinclude:: ../../doc/api_samples/servers/v2.96/server-action-rebuild-resp.json :language: javascript **Example Rebuild Server (rebuild Action) (v2.75)** .. literalinclude:: ../../doc/api_samples/servers/v2.75/server-action-rebuild-resp.json :language: javascript Remove (Disassociate) Floating Ip (removeFloatingIp Action) (DEPRECATED) ========================================================================= .. warning:: This API is deprecated and will fail with a 404 starting from microversion 2.44. This is replaced with using the Neutron networking service API. .. rest_method:: POST /servers/{server_id}/action Removes, or disassociates, a floating IP address from a server. The IP address is returned to the pool of IP addresses that is available for all projects. When you remove a floating IP address and that IP address is still associated with a running instance, it is automatically disassociated from that instance. Specify the ``removeFloatingIp`` action in the request body. Normal response codes: 202 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), conflict(409) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - removeFloatingIp: removeFloatingIp - address: address **Example Remove (Disassociate) Floating Ip (removeFloatingIp Action)** .. literalinclude:: ../../doc/api_samples/servers/server-action-removefloatingip-req.json :language: javascript Response -------- If successful, this method does not return content in the response body. Remove Security Group From A Server (removeSecurityGroup Action) ================================================================ .. rest_method:: POST /servers/{server_id}/action Removes a security group from a server. Specify the ``removeSecurityGroup`` action in the request body. Normal response codes: 202 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), conflict(409) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - removeSecurityGroup: removeSecurityGroup - name: name **Example Remove Security Group From A Server (removeSecurityGroup Action)** .. literalinclude:: ../../doc/api_samples/os-security-groups/security-group-remove-post-req.json :language: javascript Response -------- If successful, this method does not return content in the response body. Rescue Server (rescue Action) ============================= .. rest_method:: POST /servers/{server_id}/action Puts a server in rescue mode and changes its status to ``RESCUE``. .. note:: Until microversion 2.87, this API is not supported for volume-backed instances. Specify the ``rescue`` action in the request body. If you specify the ``rescue_image_ref`` extended attribute, the image is used to rescue the instance. If you omit an image reference, the base image reference is used by default. **Asynchronous Postconditions** After you successfully rescue a server and make a ``GET /servers/​{server_id}​`` request, its status changes to ``RESCUE``. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), conflict(409), notImplemented(501) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - rescue: rescue - adminPass: adminPass_rescue_request - rescue_image_ref: rescue_image_ref **Example Rescue server (rescue Action)** .. literalinclude:: ../../doc/api_samples/os-rescue/server-rescue-req-with-image-ref.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - adminPass: adminPass_response **Example Rescue server (rescue Action)** .. literalinclude:: ../../doc/api_samples/os-rescue/server-rescue.json :language: javascript Resize Server (resize Action) ============================= .. rest_method:: POST /servers/{server_id}/action Resizes a server. Specify the ``resize`` action in the request body. **Preconditions** You can only resize a server when its status is ``ACTIVE`` or ``SHUTOFF``. If the server is locked, you must have administrator privileges to resize the server. **Asynchronous Postconditions** A successfully resized server shows a ``VERIFY_RESIZE`` status and ``finished`` migration status. If the cloud has configured the `resize_confirm_window`_ option of the Compute service to a positive value, the Compute service automatically confirms the resize operation after the configured interval. .. _resize_confirm_window: https://docs.openstack.org/nova/latest/configuration/config.html#DEFAULT.resize_confirm_window .. note:: There is a `known limitation `__ that ephemeral disks are not resized. Normal response codes: 202 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), conflict(409) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - resize: resize - flavorRef: flavorRef_resize - OS-DCF:diskConfig: OS-DCF:diskConfig **Example Resize Server (Resize Action)** .. literalinclude:: ../../doc/api_samples/servers/server-action-resize.json :language: javascript Response -------- If successful, this method does not return content in the response body. Resume Suspended Server (resume Action) ======================================= .. rest_method:: POST /servers/{server_id}/action Resumes a suspended server and changes its status to ``ACTIVE``. Specify the ``resume`` action in the request body. Policy defaults enable only users with the administrative role or the owner of the server to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Normal response codes: 202 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), conflict(409) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - resume: resume **Example Resume Suspended Server (Resume Action)** .. literalinclude:: ../../doc/api_samples/os-suspend-server/server-resume.json :language: javascript Response -------- If successful, this method does not return content in the response body. Revert Resized Server (revertResize Action) =========================================== .. rest_method:: POST /servers/{server_id}/action Cancels and reverts a pending resize action for a server. Specify the ``revertResize`` action in the request body. **Preconditions** You can only revert the resized server where the status is ``VERIFY_RESIZE`` and the OS-EXT-STS:vm_state is ``resized``. If the server is locked, you must have administrator privileges to revert the resizing. **Asynchronous Postconditions** After you make this request, you typically must keep polling the server status to determine whether the request succeeded. A reverting resize operation shows a status of ``REVERT_RESIZE`` and a task_state of ``resize_reverting``. If successful, the status will return to ``ACTIVE`` or ``SHUTOFF``. You can also see the reverted server in the compute node that OpenStack Compute manages. **Troubleshooting** If the server status remains ``VERIFY_RESIZE``, the request failed. Ensure you meet the preconditions and run the request again. If the request fails again, investigate the compute back end. The server is not reverted in the compute node that OpenStack Compute manages. Normal response codes: 202 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), conflict(409) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - revertResize: revertResize **Example Revert Resized Server (revertResize Action)** .. literalinclude:: ../../doc/api_samples/servers/server-action-revert-resize.json :language: javascript Response -------- If successful, this method does not return content in the response body. Start Server (os-start Action) ============================== .. rest_method:: POST /servers/{server_id}/action Starts a stopped server and changes its status to ``ACTIVE``. Specify the ``os-start`` action in the request body. **Preconditions** The server status must be ``SHUTOFF``. If the server is locked, you must have administrator privileges to start the server. **Asynchronous Postconditions** After you successfully start a server, its status changes to ``ACTIVE``. **Troubleshooting** If the server status does not change to ``ACTIVE``, the start operation failed. Ensure that you meet the preconditions and run the request again. If the request fails again, investigate whether another operation is running that causes a race condition. Normal response codes: 202 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), conflict(409) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - os-start: os-start **Example Start server** .. literalinclude:: ../../doc/api_samples/servers/server-action-start.json :language: javascript Response -------- If successful, this method does not return content in the response body. Stop Server (os-stop Action) ============================ .. rest_method:: POST /servers/{server_id}/action Stops a running server and changes its status to ``SHUTOFF``. Specify the ``os-stop`` action in the request body. **Preconditions** The server status must be ``ACTIVE`` or ``ERROR``. If the server is locked, you must have administrator privileges to stop the server. **Asynchronous Postconditions** After you successfully stop a server, its status changes to ``SHUTOFF``. This API operation does not delete the server instance data and the data will be available again after ``os-start`` action. Normal response codes: 202 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), conflict(409) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - os-stop: os-stop **Example Stop server** .. literalinclude:: ../../doc/api_samples/servers/server-action-stop.json :language: javascript Response -------- If successful, this method does not return content in the response body. Suspend Server (suspend Action) =============================== .. rest_method:: POST /servers/{server_id}/action Suspends a server and changes its status to ``SUSPENDED``. Specify the ``suspend`` action in the request body. Policy defaults enable only users with the administrative role or the owner of the server to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Normal response codes: 202 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), conflict(409) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - suspend: suspend **Example Suspend Server (suspend Action)** .. literalinclude:: ../../doc/api_samples/os-suspend-server/server-suspend.json :language: javascript Response -------- If successful, this method does not return content in the response body. Unlock Server (unlock Action) ============================= .. rest_method:: POST /servers/{server_id}/action Unlocks a locked server. Specify the ``unlock`` action in the request body. Policy defaults enable only users with the administrative role or the owner of the server to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Normal response codes: 202 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - unlock: unlock **Example Unlock Server (unlock Action)** .. literalinclude:: ../../doc/api_samples/os-lock-server/unlock-server.json :language: javascript Response -------- If successful, this method does not return content in the response body. Unpause Server (unpause Action) =============================== .. rest_method:: POST /servers/{server_id}/action Unpauses a paused server and changes its status to ``ACTIVE``. Specify the ``unpause`` action in the request body. Policy defaults enable only users with the administrative role or the owner of the server to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Normal response codes: 202 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), conflict(409), notImplemented(501) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - unpause: unpause **Example Unpause Server (unpause Action)** .. literalinclude:: ../../doc/api_samples/os-pause-server/unpause-server.json :language: javascript Response -------- If successful, this method does not return content in the response body. Unrescue Server (unrescue Action) ================================= .. rest_method:: POST /servers/{server_id}/action Unrescues a server. Changes status to ``ACTIVE``. Specify the ``unrescue`` action in the request body. **Preconditions** The server must exist. You can only unrescue a server when its status is ``RESCUE``. **Asynchronous Postconditions** After you successfully unrescue a server and make a ``GET /servers/​{server_id}​`` request, its status changes to ``ACTIVE``. Normal response codes: 202 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), conflict(409), notImplemented(501) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - unrescue: unrescue **Example Unrescue server** .. literalinclude:: ../../doc/api_samples/os-rescue/server-unrescue-req.json :language: javascript Response -------- If successful, this method does not return content in the response body. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/servers-admin-action.inc0000664000175000017500000001710100000000000022132 0ustar00zuulzuul00000000000000.. -*- rst -*- ========================================================== Servers - run an administrative action (servers, action) ========================================================== Enables administrators to perform an action on a server. Specify the action in the request body. You can inject network information into, migrate, live-migrate, reset networking on, reset the state of a server, and evacuate a server from a failed host to a new host. Inject Network Information (injectNetworkInfo Action) ===================================================== .. rest_method:: POST /servers/{server_id}/action Injects network information into a server. Specify the ``injectNetworkInfo`` action in the request body. Policy defaults enable only users with the administrative role to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. .. warning:: There is very limited support on this API, For more information, see `nova virt support matrix `__ Normal response codes: 202 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), conflict(409) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - injectNetworkInfo: injectNetworkInfo **Example Inject Network Information (injectNetworkInfo Action)** .. literalinclude:: ../../doc/api_samples/os-admin-actions/admin-actions-inject-network-info.json :language: javascript Response -------- If successful, this method does not return content in the response body. Migrate Server (migrate Action) =============================== .. rest_method:: POST /servers/{server_id}/action Migrates a server to a host. Specify the ``migrate`` action in the request body. Up to microversion 2.55, the scheduler chooses the host. Starting from microversion 2.56, the ``host`` parameter is available to specify the destination host. If you specify ``null`` or don't specify this parameter, the scheduler chooses a host. **Asynchronous Postconditions** A successfully migrated server shows a ``VERIFY_RESIZE`` status and ``finished`` migration status. If the cloud has configured the `resize_confirm_window`_ option of the Compute service to a positive value, the Compute service automatically confirms the migrate operation after the configured interval. .. _resize_confirm_window: https://docs.openstack.org/nova/latest/configuration/config.html#DEFAULT.resize_confirm_window There are two different policies for this action, depending on whether the host parameter is set. Both defaults enable only users with the administrative role to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Normal response codes: 202 Error response codes: badRequest(400), unauthorized(401), forbidden(403) itemNotFound(404), conflict(409) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - migrate: migrate - host: host_migration_2_56 **Example Migrate Server (migrate Action) (v2.1)** .. literalinclude:: ../../doc/api_samples/os-migrate-server/migrate-server.json :language: javascript **Example Migrate Server (migrate Action) (v2.56)** .. literalinclude:: ../../doc/api_samples/os-migrate-server/v2.56/migrate-server.json :language: javascript Response -------- If successful, this method does not return content in the response body. Live-Migrate Server (os-migrateLive Action) =========================================== .. rest_method:: POST /servers/{server_id}/action Live-migrates a server to a new host without rebooting. Specify the ``os-migrateLive`` action in the request body. Use the ``host`` parameter to specify the destination host. If this param is ``null``, the scheduler chooses a host. If a scheduled host is not suitable to do migration, the scheduler tries up to ``migrate_max_retries`` rescheduling attempts. Starting from API version 2.25, the ``block_migration`` parameter could be to ``auto`` so that nova can decide value of block_migration during live migration. Policy defaults enable only users with the administrative role to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Starting from REST API version 2.34 pre-live-migration checks are done asynchronously, results of these checks are available in ``instance-actions``. Nova responds immediately, and no pre-live-migration checks are returned. The instance will not immediately change state to ``ERROR``, if a failure of the live-migration checks occurs. Starting from API version 2.68, the ``force`` parameter is no longer accepted as this could not be meaningfully supported by servers with complex resource allocations. Normal response codes: 202 Error response codes: badRequest(400), unauthorized(401), forbidden(403) itemNotFound(404), conflict(409) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - os-migrateLive: os-migrateLive - host: host_migration - block_migration: block_migration - block_migration: block_migration_2_25 - disk_over_commit: disk_over_commit - force: force_live_migrate **Example Live-Migrate Server (os-migrateLive Action)** .. literalinclude:: ../../doc/api_samples/os-migrate-server/v2.30/live-migrate-server.json :language: javascript Response -------- If successful, this method does not return content in the response body. Reset Networking On A Server (resetNetwork Action) (DEPRECATED) =============================================================== .. rest_method:: POST /servers/{server_id}/action Resets networking on a server. .. warning:: This action was only supported by the XenAPI virt driver, which was deprecated in the 20.0.0 (Train) release and removed in the 22.0.0 (Victoria) release. This action should be avoided in new applications. It was removed in the 23.0.0 (Wallaby) release. Specify the ``resetNetwork`` action in the request body. Policy defaults enable only users with the administrative role to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Normal response codes: 202 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), conflict(409), gone(410) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - resetNetwork: resetNetwork **Example Reset Networking On A Server (resetNetwork Action)** .. literalinclude:: ../../doc/api_samples/os-admin-actions/admin-actions-reset-network.json :language: javascript Response -------- If successful, this method does not return content in the response body. .. _os-resetState: Reset Server State (os-resetState Action) ========================================= .. rest_method:: POST /servers/{server_id}/action Resets the state of a server. Specify the ``os-resetState`` action and the ``state`` in the request body. Policy defaults enable only users with the administrative role to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. Normal response codes: 202 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - os-resetState: os-resetState - os-resetState.state: os-resetState_state **Example Reset Server State (os-resetState Action)** .. literalinclude:: ../../doc/api_samples/os-admin-actions/admin-actions-reset-server-state.json :language: javascript Response -------- If successful, this method does not return content in the response body. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/servers-remote-consoles.inc0000664000175000017500000000633700000000000022716 0ustar00zuulzuul00000000000000.. -*- rst -*- ================= Server Consoles ================= Manage server consoles. Create Console ============== .. rest_method:: POST /servers/{server_id}/remote-consoles .. note:: Microversion 2.6 or greater is required for this API. The API provides a unified request for creating a remote console. The user can get a URL to connect the console from this API. The URL includes the token which is used to get permission to access the console. Servers may support different console protocols. To return a remote console using a specific protocol, such as VNC, set the ``protocol`` parameter to ``vnc``. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), conflict(409), notImplemented(501) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - remote_console: remote_console - protocol: remote_console_protocol - type: remote_console_type **Example Get Remote VNC Console** .. literalinclude:: ../../doc/api_samples/os-remote-consoles/v2.6/create-vnc-console-req.json :language: javascript **Example Get Remote spice-direct Console** *``spice-direct`` consoles were added in microversion 2.99.* .. literalinclude:: ../../doc/api_samples/os-remote-consoles/v2.99/create-spice-direct-console-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - remote_console: remote_console - protocol: remote_console_protocol - type: remote_console_type - url: remote_console_url **Example Get Remote VNC Console** .. literalinclude:: ../../doc/api_samples/os-remote-consoles/v2.6/create-vnc-console-resp.json :language: javascript **Example Get Remote spice-direct Console** *``spice-direct`` consoles were added in microversion 2.99.* .. literalinclude:: ../../doc/api_samples/os-remote-consoles/v2.99/create-spice-direct-console-resp.json :language: javascript Show Console Connection Information =================================== .. rest_method:: GET /os-console-auth-tokens/{console_token} Given the console authentication token for a server, shows the related connection information. Nova HyperV driver has been removed therefore requests for RDP console connection information will always return an http 400 error. Starting from microversion 2.31 it's available for all other console types. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - console_token: console_token Response -------- .. rest_parameters:: parameters.yaml - console: console - instance_uuid: instance_id_body - host: console_host - port: port_number - tls_port: tls_port_number - internal_access_path: internal_access_path **Example Show Console Authentication Token** .. literalinclude:: ../../doc/api_samples/os-console-auth-tokens/v2.31/get-console-connect-info-get-resp.json :language: javascript **Example Console Connection Information for a spice-direct Console** *``spice-direct`` consoles were added in microversion 2.99.* .. literalinclude:: ../../doc/api_samples/os-console-auth-tokens/v2.99/get-console-connect-info-get-resp.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/servers.inc0000664000175000017500000010772200000000000017602 0ustar00zuulzuul00000000000000.. -*- rst -*- .. needs:body_verification =================== Servers (servers) =================== Lists, creates, shows details for, updates, and deletes servers. **Passwords** When you create a server, you can specify a password through the optional adminPass attribute. The password must meet the complexity requirements set by your OpenStack Compute provider. The server might enter an ``ERROR`` state if the complexity requirements are not met. In this case, a client might issue a change password action to reset the server password. If you do not specify a password, the API generates and assigns a random password that it returns in the response object. This password meets the security requirements set by the compute provider. For security reasons, subsequent GET calls do not require this password. **Server metadata** You can specify custom server metadata at server launch time. The maximum size for each metadata key-value pair is 255 bytes. The compute provider determines the maximum number of key-value pairs for each server. You can query this value through the ``maxServerMeta`` absolute limit. **Server networks** You can specify one or more networks to which the server connects at launch time. Users can also specify a specific port on the network or the fixed IP address to assign to the server interface. .. note:: You can use both IPv4 and IPv6 addresses as access addresses, and you can assign both addresses simultaneously. You can update access addresses after you create a server. **Server personality** .. note:: The use of personality files is deprecated starting with the 2.57 microversion. Use ``metadata`` and ``user_data`` to customize a server instance. To customize the personality of a server instance, you can inject data into its file system. For example, you might insert ssh keys, set configuration files, or store data that you want to retrieve from inside the instance. This customization method provides minimal launch-time personalization. If you require significant customization, create a custom image. Follow these guidelines when you inject files: - The maximum size of the file path data is 255 bytes. - Encode the file contents as a Base64 string. The compute provider determines the maximum size of the file contents. The image that you use to create the server determines this value. .. note:: The maximum limit refers to the number of bytes in the decoded data and not to the number of characters in the encoded data. - The ``maxPersonality`` absolute limit defines the maximum number of file path and content pairs that you can supply. The compute provider determines this value. - The ``maxPersonalitySize`` absolute limit is a byte limit that applies to all images in the deployment. Providers can set additional per-image personality limits. The file injection might not occur until after the server builds and boots. After file injection, only system administrators can access personality files. For example, on Linux, all files have root as the owner and the root group as the group owner, and allow only user and group read access (``chmod 440``). **Server access addresses** In a hybrid environment, the underlying implementation might not control the IP address of a server. Instead, the access IP address might be part of the dedicated hardware; for example, a router/NAT device. In this case, you cannot use the addresses that the implementation provides to access the server from outside the local LAN. Instead, the API might assign a separate access address at creation time to provide access to the server. This address might not be directly bound to a network interface on the server and might not necessarily appear when you query the server addresses. However, clients should use an access address to access the server directly. List Servers ============ .. rest_method:: GET /servers Lists IDs, names, and links for servers. By default the servers are filtered using the project ID associated with the authenticated request. Servers contain a status attribute that indicates the current server state. You can filter on the server status when you complete a list servers request. The server status is returned in the response body. The possible server status values are: - ``ACTIVE``. The server is active. - ``BUILD``. The server has not finished the original build process. - ``DELETED``. The server is permanently deleted. - ``ERROR``. The server is in error. - ``HARD_REBOOT``. The server is hard rebooting. This is equivalent to pulling the power plug on a physical server, plugging it back in, and rebooting it. - ``MIGRATING``. The server is being migrated to a new host. - ``PASSWORD``. The password is being reset on the server. - ``PAUSED``. In a paused state, the state of the server is stored in RAM. A paused server continues to run in frozen state. - ``REBOOT``. The server is in a soft reboot state. A reboot command was passed to the operating system. - ``REBUILD``. The server is currently being rebuilt from an image. - ``RESCUE``. The server is in rescue mode. A rescue image is running with the original server image attached. - ``RESIZE``. Server is performing the differential copy of data that changed during its initial copy. Server is down for this stage. - ``REVERT_RESIZE``. The resize or migration of a server failed for some reason. The destination server is being cleaned up and the original source server is restarting. - ``SHELVED``: The server is in shelved state. Depending on the shelve offload time, the server will be automatically shelved offloaded. - ``SHELVED_OFFLOADED``: The shelved server is offloaded (removed from the compute host) and it needs unshelved action to be used again. - ``SHUTOFF``. The server is powered off and the disk image still persists. - ``SOFT_DELETED``. The server is marked as deleted but the disk images are still available to restore. - ``SUSPENDED``. The server is suspended, either by request or necessity. When you suspend a server, its state is stored on disk, all memory is written to disk, and the server is stopped. Suspending a server is similar to placing a device in hibernation and its occupied resource will not be freed but rather kept for when the server is resumed. If a server is infrequently used and the occupied resource needs to be freed to create other servers, it should be shelved. - ``UNKNOWN``. The state of the server is unknown. Contact your cloud provider. - ``VERIFY_RESIZE``. System is awaiting confirmation that the server is operational after a move or resize. There is whitelist for valid filter keys. Any filter key other than from whitelist will be silently ignored. - For non-admin users, whitelist is different from admin users whitelist. The valid whitelist can be configured using the ``os_compute_api:servers:allow_all_filters`` policy rule. By default, the valid whitelist for non-admin users includes - ``changes-since`` - ``flavor`` - ``image`` - ``ip`` - ``ip6`` (New in version 2.5) - ``name`` - ``not-tags`` (New in version 2.26) - ``not-tags-any`` (New in version 2.26) - ``reservation_id`` - ``status`` - ``tags`` (New in version 2.26) - ``tags-any`` (New in version 2.26) - ``changes-before`` (New in version 2.66) - ``locked`` (New in version 2.73) - ``availability_zone`` (New in version 2.83) - ``config_drive`` (New in version 2.83) - ``key_name`` (New in version 2.83) - ``created_at`` (New in version 2.83) - ``launched_at`` (New in version 2.83) - ``terminated_at`` (New in version 2.83) - ``power_state`` (New in version 2.83) - ``task_state`` (New in version 2.83) - ``vm_state`` (New in version 2.83) - ``progress`` (New in version 2.83) - ``user_id`` (New in version 2.83) - For admin user, whitelist includes all filter keys mentioned in :ref:`list-server-request` Section. .. note:: Starting with microversion 2.69 if server details cannot be loaded due to a transient condition in the deployment like infrastructure failure, the response body for those unavailable servers will be missing keys. See `handling down cells `__ section of the Compute API guide for more information on the keys that would be returned in the partial constructs. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403) .. _list-server-request: Request ------- .. rest_parameters:: parameters.yaml - access_ip_v4: access_ip_v4_query_server - access_ip_v6: access_ip_v6_query_server - all_tenants: all_tenants_query - auto_disk_config: disk_config_query_server - availability_zone: availability_zone_query_server - changes-since: changes_since_server - config_drive: config_drive_query_server - created_at: created_at_query_server - deleted: deleted_query - description: description_query_server - flavor: flavor_query - host: host_query_server - hostname: hostname_query_server - image: image_query - ip: ip_query - ip6: ip6_query - kernel_id: kernel_id_query_server - key_name: key_name_query_server - launch_index: launch_index_query_server - launched_at: launched_at_query_server - limit: limit - locked_by: locked_by_query_server - marker: marker - name: server_name_query - node: node_query_server - power_state: power_state_query_server - progress: progress_query_server - project_id: project_id_query_server - ramdisk_id: ramdisk_id_query_server - reservation_id: reservation_id_query - root_device_name: server_root_device_name_query - soft_deleted: soft_deleted_server - sort_dir: sort_dir_server - sort_key: sort_key_server - status: server_status_query - task_state: task_state_query_server - terminated_at: terminated_at_query_server - user_id: user_id_query_server - uuid: server_uuid_query - vm_state: vm_state_query_server - not-tags: not_tags_query - not-tags-any: not_tags_any_query - tags: tags_query - tags-any: tags_any_query - changes-before: changes_before_server - locked: locked_query_server Response -------- .. rest_parameters:: parameters.yaml - servers: servers - id: server_id - links: links - name: server_name - servers_links: servers_links **Example List Servers** .. literalinclude:: ../../doc/api_samples/servers/servers-list-resp.json :language: javascript **Example List Servers (2.69)** This is a sample response for the servers from the non-responsive part of the deployment. The responses for the available server records will be normal without any missing keys. .. literalinclude:: ../../doc/api_samples/servers/v2.69/servers-list-resp.json :language: javascript Create Server ============= .. rest_method:: POST /servers Creates a server. The progress of this operation depends on the location of the requested image, network I/O, host load, selected flavor, and other factors. To check the progress of the request, make a ``GET /servers/{id}`` request. This call returns a progress attribute, which is a percentage value from 0 to 100. The ``Location`` header returns the full URL to the newly created server and is available as a ``self`` and ``bookmark`` link in the server representation. When you create a server, the response shows only the server ID, its links, and the admin password. You can get additional attributes through subsequent ``GET`` requests on the server. Include the ``block_device_mapping_v2`` parameter in the create request body to boot a server from a volume. Include the ``key_name`` parameter in the create request body to add a keypair to the server when you create it. To create a keypair, make a `create keypair `__ request. .. note:: Starting with microversion 2.37 the ``networks`` field is required. **Preconditions** - The user must have sufficient server quota to create the number of servers requested. - The connection to the Image service is valid. **Asynchronous postconditions** - With correct permissions, you can see the server status as ``ACTIVE`` through API calls. - With correct access, you can see the created server in the compute node that OpenStack Compute manages. **Troubleshooting** - If the server status remains ``BUILDING`` or shows another error status, the request failed. Ensure you meet the preconditions then investigate the compute node. - The server is not created in the compute node that OpenStack Compute manages. - The compute node needs enough free resource to match the resource of the server creation request. - Ensure that the scheduler selection filter can fulfill the request with the available compute nodes that match the selection criteria of the filter. Normal response codes: 202 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), conflict(409) .. TODO(sdague): leave these notes for later when fixing the body language. They are commented out so they won't render, but are useful to not have to look this up again later. A conflict(409) is returned in the event of trying to allocated already allocated resources (such as networks) to the server in question. entityTooLarge(413) is returned if the ``user_data`` exceeds what is allowed by the backend. All other failure conditions map to 400, and will need to be disambiguated by the error string returned. Request ------- .. rest_parameters:: parameters.yaml - server: server - flavorRef: flavorRef - name: server_name - networks: networks - networks.uuid: network_uuid - networks.port: port - networks.fixed_ip: fixed_ip - networks.tag: device_tag_nic - accessIPv4: accessIPv4_in - accessIPv6: accessIPv6_in - adminPass: adminPass_request - availability_zone: os-availability-zone:availability_zone - block_device_mapping_v2: block_device_mapping_v2 - block_device_mapping_v2.boot_index: boot_index - block_device_mapping_v2.delete_on_termination: delete_on_termination - block_device_mapping_v2.destination_type: destination_type - block_device_mapping_v2.device_name: device_name - block_device_mapping_v2.device_type: device_type - block_device_mapping_v2.disk_bus: disk_bus - block_device_mapping_v2.guest_format: guest_format - block_device_mapping_v2.no_device: no_device - block_device_mapping_v2.source_type: source_type - block_device_mapping_v2.uuid: block_device_uuid - block_device_mapping_v2.volume_size: volume_size - block_device_mapping_v2.tag: device_tag_bdm - block_device_mapping_v2.volume_type: device_volume_type - config_drive: config_drive - imageRef: imageRef - key_name: key_name - metadata: metadata - OS-DCF:diskConfig: OS-DCF:diskConfig - personality: personality - security_groups: security_groups - user_data: user_data - description: server_description - hostname: server_hostname_req - tags: server_tags_create - trusted_image_certificates: server_trusted_image_certificates_create_req - host: server_host_create - hypervisor_hostname: server_hypervisor_hostname_create - os:scheduler_hints: os:scheduler_hints - os:scheduler_hints.build_near_host_ip: os:scheduler_hints_build_near_host_ip - os:scheduler_hints.cidr: os:scheduler_hints_cidr - os:scheduler_hints.different_cell: os:scheduler_hints_different_cell - os:scheduler_hints.different_host: os:scheduler_hints_different_host - os:scheduler_hints.group: os:scheduler_hints_group - os:scheduler_hints.query: os:scheduler_hints_query - os:scheduler_hints.same_host: os:scheduler_hints_same_host - os:scheduler_hints.target_cell: os:scheduler_hints_target_cell **Example Create Server** .. literalinclude:: ../../doc/api_samples/servers/server-create-req.json :language: javascript **Example Create Server With Networks(array) and Block Device Mapping V2 (v2.32)** .. literalinclude:: ../../doc/api_samples/servers/v2.32/server-create-req.json :language: javascript **Example Create Server With Automatic Networking (v2.37)** .. literalinclude:: ../../doc/api_samples/servers/v2.37/server-create-req.json :language: javascript **Example Create Server With Trusted Image Certificates (v2.63)** .. literalinclude:: ../../doc/api_samples/servers/v2.63/server-create-req.json :language: javascript **Example Create Server With Host and Hypervisor Hostname (v2.74)** .. literalinclude:: ../../doc/api_samples/servers/v2.74/server-create-req-with-host-and-node.json :language: javascript **Example Create Server With Hostname (v2.90)** .. literalinclude:: ../../doc/api_samples/servers/v2.90/server-create-req.json :language: javascript **Example Create Server With FQDN in Hostname (v2.94)** .. literalinclude:: ../../doc/api_samples/servers/v2.94/server-create-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - Location: server_location - server: server - id: server_id - links: links - OS-DCF:diskConfig: disk_config - security_groups: security_groups_obj - security_groups.name: name - adminPass: adminPass_response **Example Create Server** .. literalinclude:: ../../doc/api_samples/servers/server-create-resp.json :language: javascript Create Multiple Servers ======================= .. rest_method:: POST /servers There is a second kind of create call which can build multiple servers at once. This supports all the same parameters as create with a few additional attributes specific to multiple create. Error handling for multiple create is not as consistent as for single server create, and there is no guarantee that all the servers will be built. This call should generally be avoided in favor of clients doing direct individual server creates. Request (Additional Parameters) ------------------------------- These are the parameters beyond single create that are supported. .. rest_parameters:: parameters.yaml - name: servers_multiple_create_name - min_count: servers_min_count - max_count: servers_max_count - return_reservation_id: return_reservation_id **Example Multiple Create with reservation ID** .. literalinclude:: ../../doc/api_samples/os-multiple-create/multiple-create-post-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - reservation_id: reservation_id If ``return_reservation_id`` is set to ``true`` only the ``reservation_id`` will be returned. This can be used as a filter with list servers detailed to see the status of all the servers being built. **Example Create multiple servers with reservation ID** .. literalinclude:: ../../doc/api_samples/os-multiple-create/multiple-create-post-resp.json :language: javascript If ``return_reservation_id`` is set to ``false`` a representation of the ``first`` server will be returned. **Example Create multiple servers without reservation ID** .. literalinclude:: ../../doc/api_samples/os-multiple-create/multiple-create-no-resv-post-resp.json :language: javascript List Servers Detailed ===================== .. rest_method:: GET /servers/detail For each server, shows server details including config drive, extended status, and server usage information. The extended status information appears in the OS-EXT-STS:vm_state, OS-EXT-STS:power_state, and OS-EXT-STS:task_state attributes. The server usage information appears in the OS-SRV-USG:launched_at and OS-SRV-USG:terminated_at attributes. HostId is unique per account and is not globally unique. .. note:: Starting with microversion 2.69 if server details cannot be loaded due to a transient condition in the deployment like infrastructure failure, the response body for those unavailable servers will be missing keys. See `handling down cells `__ section of the Compute API guide for more information on the keys that would be returned in the partial constructs. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403) Request ------- .. rest_parameters:: parameters.yaml - access_ip_v4: access_ip_v4_query_server - access_ip_v6: access_ip_v6_query_server - all_tenants: all_tenants_query - auto_disk_config: disk_config_query_server - availability_zone: availability_zone_query_server - changes-since: changes_since_server - config_drive: config_drive_query_server - created_at: created_at_query_server - deleted: deleted_query - description: description_query_server - flavor: flavor_query - host: host_query_server - hostname: hostname_query_server - image: image_query - ip: ip_query - ip6: ip6_query - kernel_id: kernel_id_query_server - key_name: key_name_query_server - launch_index: launch_index_query_server - launched_at: launched_at_query_server - limit: limit - locked_by: locked_by_query_server - marker: marker - name: server_name_query - node: node_query_server - power_state: power_state_query_server - progress: progress_query_server - project_id: project_id_query_server - ramdisk_id: ramdisk_id_query_server - reservation_id: reservation_id_query - root_device_name: server_root_device_name_query - soft_deleted: soft_deleted_server - sort_dir: sort_dir_server - sort_key: sort_key_server - status: server_status_query - task_state: task_state_query_server - terminated_at: terminated_at_query_server - user_id: user_id_query_server - uuid: server_uuid_query - vm_state: vm_state_query_server - not-tags: not_tags_query - not-tags-any: not_tags_any_query - tags: tags_query - tags-any: tags_any_query - changes-before: changes_before_server - locked: locked_query_server Response -------- .. rest_parameters:: parameters.yaml - servers: servers - accessIPv4: accessIPv4 - accessIPv6: accessIPv6 - addresses: addresses - config_drive: config_drive_resp - created: created - flavor: flavor_server - flavor.id: flavor_id_body_2_46 - flavor.links: flavor_links_2_46 - flavor.vcpus: flavor_cpus_2_47 - flavor.ram: flavor_ram_2_47 - flavor.disk: flavor_disk_2_47 - flavor.ephemeral: flavor_ephem_disk_2_47 - flavor.swap: flavor_swap_2_47 - flavor.original_name: flavor_original_name - flavor.extra_specs: extra_specs_2_47 - flavor.extra_specs.key: flavor_extra_spec_key_2_47 - flavor.extra_specs.value: flavor_extra_spec_value_2_47 - hostId: hostId - id: server_id - image: image - key_name: key_name_resp - links: links - metadata: metadata_compat - name: server_name - OS-DCF:diskConfig: disk_config - OS-EXT-AZ:availability_zone: OS-EXT-AZ:availability_zone - OS-EXT-SRV-ATTR:host: OS-EXT-SRV-ATTR:host - OS-EXT-SRV-ATTR:hostname: server_hostname - OS-EXT-SRV-ATTR:hypervisor_hostname: OS-EXT-SRV-ATTR:hypervisor_hostname - OS-EXT-SRV-ATTR:instance_name: OS-EXT-SRV-ATTR:instance_name - OS-EXT-SRV-ATTR:kernel_id: server_kernel_id - OS-EXT-SRV-ATTR:launch_index: server_launch_index - OS-EXT-SRV-ATTR:ramdisk_id: server_ramdisk_id - OS-EXT-SRV-ATTR:reservation_id: server_reservation_id - OS-EXT-SRV-ATTR:root_device_name: server_root_device_name - OS-EXT-SRV-ATTR:user_data: server_user_data - OS-EXT-STS:power_state: OS-EXT-STS:power_state - OS-EXT-STS:task_state: OS-EXT-STS:task_state - OS-EXT-STS:vm_state: OS-EXT-STS:vm_state - os-extended-volumes:volumes_attached: os-extended-volumes:volumes_attached - os-extended-volumes:volumes_attached.id: os-extended-volumes:volumes_attached.id - os-extended-volumes:volumes_attached.delete_on_termination: os-extended-volumes:volumes_attached.delete_on_termination - OS-SRV-USG:launched_at: OS-SRV-USG:launched_at - OS-SRV-USG:terminated_at: OS-SRV-USG:terminated_at - status: server_status - tenant_id: tenant_id_body - updated: updated - user_id: user_id - fault: fault - fault.code: fault_code - fault.created: fault_created - fault.message: fault_message - fault.details: fault_details - pinned_availability_zone: pinned_availability_zone - progress: progress - scheduler_hints: scheduler_hints - security_groups: security_groups_obj_optional - security_group.name: name - servers_links: servers_links - locked: locked - host_status: host_status - description: server_description_resp - tags: tags - trusted_image_certificates: server_trusted_image_certificates_resp - locked_reason: locked_reason_resp **Example List Servers Detailed (2.100)** .. literalinclude:: /../../doc/api_samples/servers/v2.100/servers-details-resp.json :language: javascript **Example List Servers Detailed (2.98)** .. literalinclude:: /../../doc/api_samples/servers/v2.98/servers-details-resp.json :language: javascript **Example List Servers Detailed (2.96)** .. literalinclude:: /../../doc/api_samples/servers/v2.96/servers-details-resp.json :language: javascript **Example List Servers Detailed (2.73)** .. literalinclude:: /../../doc/api_samples/servers/v2.73/servers-details-resp.json :language: javascript **Example List Servers Detailed (2.69)** This is a sample response for the servers from the non-responsive part of the deployment. The responses for the available server records will be normal without any missing keys. .. literalinclude:: ../../doc/api_samples/servers/v2.69/servers-details-resp.json :language: javascript Show Server Details =================== .. rest_method:: GET /servers/{server_id} Shows details for a server. Includes server details including configuration drive, extended status, and server usage information. The extended status information appears in the ``OS-EXT-STS:vm_state``, ``OS-EXT-STS:power_state``, and ``OS-EXT-STS:task_state`` attributes. The server usage information appears in the ``OS-SRV-USG:launched_at`` and ``OS-SRV-USG:terminated_at`` attributes. HostId is unique per account and is not globally unique. **Preconditions** The server must exist. .. note:: Starting with microversion 2.69 if the server detail cannot be loaded due to a transient condition in the deployment like infrastructure failure, the response body for the unavailable server will be missing keys. See `handling down cells `__ section of the Compute API guide for more information on the keys that would be returned in the partial constructs. Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path Response -------- .. rest_parameters:: parameters.yaml - server: server - accessIPv4: accessIPv4 - accessIPv6: accessIPv6 - addresses: addresses - config_drive: config_drive_resp - created: created - flavor: flavor_server - flavor.id: flavor_id_body_2_46 - flavor.links: flavor_links_2_46 - flavor.vcpus: flavor_cpus_2_47 - flavor.ram: flavor_ram_2_47 - flavor.disk: flavor_disk_2_47 - flavor.ephemeral: flavor_ephem_disk_2_47 - flavor.swap: flavor_swap_2_47 - flavor.original_name: flavor_original_name - flavor.extra_specs: extra_specs_2_47 - flavor.extra_specs.key: flavor_extra_spec_key_2_47 - flavor.extra_specs.value: flavor_extra_spec_value_2_47 - hostId: hostId - id: server_id - image: image - key_name: key_name_resp - links: links - metadata: metadata_compat - name: server_name - OS-DCF:diskConfig: disk_config - OS-EXT-AZ:availability_zone: OS-EXT-AZ:availability_zone - OS-EXT-SRV-ATTR:host: OS-EXT-SRV-ATTR:host - OS-EXT-SRV-ATTR:hostname: server_hostname - OS-EXT-SRV-ATTR:hypervisor_hostname: OS-EXT-SRV-ATTR:hypervisor_hostname - OS-EXT-SRV-ATTR:instance_name: OS-EXT-SRV-ATTR:instance_name - OS-EXT-SRV-ATTR:kernel_id: server_kernel_id - OS-EXT-SRV-ATTR:launch_index: server_launch_index - OS-EXT-SRV-ATTR:ramdisk_id: server_ramdisk_id - OS-EXT-SRV-ATTR:reservation_id: server_reservation_id - OS-EXT-SRV-ATTR:root_device_name: server_root_device_name - OS-EXT-SRV-ATTR:user_data: server_user_data - OS-EXT-STS:power_state: OS-EXT-STS:power_state - OS-EXT-STS:task_state: OS-EXT-STS:task_state - OS-EXT-STS:vm_state: OS-EXT-STS:vm_state - os-extended-volumes:volumes_attached: os-extended-volumes:volumes_attached - os-extended-volumes:volumes_attached.id: os-extended-volumes:volumes_attached.id - os-extended-volumes:volumes_attached.delete_on_termination: os-extended-volumes:volumes_attached.delete_on_termination - OS-SRV-USG:launched_at: OS-SRV-USG:launched_at - OS-SRV-USG:terminated_at: OS-SRV-USG:terminated_at - status: server_status - tenant_id: tenant_id_body - updated: updated - user_id: user_id - fault: fault - fault.code: fault_code - fault.created: fault_created - fault.message: fault_message - fault.details: fault_details - pinned_availability_zone: pinned_availability_zone - progress: progress - scheduler_hints: scheduler_hints - security_groups: security_groups_obj_optional - security_group.name: name - locked: locked - host_status: host_status - description: server_description_resp - tags: tags - trusted_image_certificates: server_trusted_image_certificates_resp - server_groups: server_groups_2_71 - locked_reason: locked_reason_resp **Example Show Server Details (2.100)** .. literalinclude:: ../../doc/api_samples/servers/v2.100/server-get-resp.json :language: javascript **Example Show Server Details (2.98)** .. literalinclude:: ../../doc/api_samples/servers/v2.98/server-get-resp.json :language: javascript **Example Show Server Details (2.96)** .. literalinclude:: ../../doc/api_samples/servers/v2.96/server-get-resp.json :language: javascript **Example Show Server Details (2.73)** .. literalinclude:: ../../doc/api_samples/servers/v2.73/server-get-resp.json :language: javascript **Example Show Server Details (2.69)** This is a sample response for a server from the non-responsive part of the deployment. The responses for available server records will be normal without any missing keys. .. literalinclude:: ../../doc/api_samples/servers/v2.69/server-get-resp.json :language: javascript Update Server ============= .. rest_method:: PUT /servers/{server_id} Updates the editable attributes of an existing server. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404) Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path - server: server - accessIPv4: accessIPv4_in - accessIPv6: accessIPv6_in - name: server_name_optional - hostname: server_hostname_req - OS-DCF:diskConfig: OS-DCF:diskConfig - description: server_description .. note:: You can specify parameters to update independently. e.g. ``name`` only, ``description`` only, ``name`` and ``description``, etc. **Example Update Server (2.63)** .. literalinclude:: ../../doc/api_samples/servers/v2.63/server-update-req.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - server: server - accessIPv4: accessIPv4 - accessIPv6: accessIPv6 - addresses: addresses - created: created - flavor: flavor_server - flavor.id: flavor_id_body_2_46 - flavor.links: flavor_links_2_46 - flavor.vcpus: flavor_cpus_2_47 - flavor.ram: flavor_ram_2_47 - flavor.disk: flavor_disk_2_47 - flavor.ephemeral: flavor_ephem_disk_2_47 - flavor.swap: flavor_swap_2_47 - flavor.original_name: flavor_original_name - flavor.extra_specs: extra_specs_2_47 - flavor.extra_specs.key: flavor_extra_spec_key_2_47 - flavor.extra_specs.value: flavor_extra_spec_value_2_47 - hostId: hostId - id: server_id - image: image - links: links - metadata: metadata_compat - name: server_name - OS-DCF:diskConfig: disk_config - status: server_status - tenant_id: tenant_id_body - updated: updated - user_id: user_id - fault: fault - fault.code: fault_code - fault.created: fault_created - fault.message: fault_message - fault.details: fault_details - pinned_availability_zone: pinned_availability_zone - progress: progress - locked: locked - description: server_description_resp - tags: tags - trusted_image_certificates: server_trusted_image_certificates_resp - server_groups: server_groups_2_71 - locked_reason: locked_reason_resp - config_drive: config_drive_resp_update_rebuild - OS-EXT-AZ:availability_zone: OS-EXT-AZ:availability_zone_update_rebuild - OS-EXT-SRV-ATTR:host: OS-EXT-SRV-ATTR:host_update_rebuild - OS-EXT-SRV-ATTR:hostname: server_hostname_update_rebuild - OS-EXT-SRV-ATTR:hypervisor_hostname: OS-EXT-SRV-ATTR:hypervisor_hostname_update_rebuild - OS-EXT-SRV-ATTR:instance_name: OS-EXT-SRV-ATTR:instance_name_update_rebuild - OS-EXT-SRV-ATTR:kernel_id: server_kernel_id_update_rebuild - OS-EXT-SRV-ATTR:launch_index: server_launch_index_update_rebuild - OS-EXT-SRV-ATTR:ramdisk_id: server_ramdisk_id_update_rebuild - OS-EXT-SRV-ATTR:reservation_id: server_reservation_id_update_rebuild - OS-EXT-SRV-ATTR:root_device_name: server_root_device_name_update_rebuild - OS-EXT-SRV-ATTR:user_data: server_user_data_update - OS-EXT-STS:power_state: OS-EXT-STS:power_state_update_rebuild - OS-EXT-STS:task_state: OS-EXT-STS:task_state_update_rebuild - OS-EXT-STS:vm_state: OS-EXT-STS:vm_state_update_rebuild - os-extended-volumes:volumes_attached: os-extended-volumes:volumes_attached_update_rebuild - os-extended-volumes:volumes_attached.id: os-extended-volumes:volumes_attached.id_update_rebuild - os-extended-volumes:volumes_attached.delete_on_termination: os-extended-volumes:volumes_attached.delete_on_termination_update_rebuild - OS-SRV-USG:launched_at: OS-SRV-USG:launched_at_update_rebuild - OS-SRV-USG:terminated_at: OS-SRV-USG:terminated_at_update_rebuild - security_groups: security_groups_obj_update_rebuild - security_group.name: name_update_rebuild - host_status: host_status_update_rebuild - key_name: key_name_resp_update **Example Update Server Details (2.98)** .. literalinclude:: ../../doc/api_samples/servers/v2.98/server-update-resp.json :language: javascript **Example Update Server Details (2.96)** .. literalinclude:: ../../doc/api_samples/servers/v2.96/server-update-resp.json :language: javascript **Example Update Server (2.75)** .. literalinclude:: ../../doc/api_samples/servers/v2.75/server-update-resp.json :language: javascript Delete Server ============= .. rest_method:: DELETE /servers/{server_id} Deletes a server. By default, the instance is going to be (hard) deleted immediately from the system, but you can set ``reclaim_instance_interval`` > 0 to make the API soft delete the instance, so that the instance won't be deleted until the ``reclaim_instance_interval`` has expired since the instance was soft deleted. The instance marked as ``SOFT_DELETED`` can be recovered via ``restore`` action before it's really deleted from the system. **Preconditions** - The server must exist. - Anyone can delete a server when the status of the server is not locked and when the policy allows. - If the server is locked, you must have administrator privileges to delete the server. **Asynchronous postconditions** - With correct permissions, you can see the server status as ``deleting``. - The ports attached to the server, which Nova created during the server create process or when attaching interfaces later, are deleted. - The server does not appear in the list servers response. - If hard delete, the server managed by OpenStack Compute is deleted on the compute node. **Troubleshooting** - If server status remains in ``deleting`` status or another error status, the request failed. Ensure that you meet the preconditions. Then, investigate the compute back end. - The request returns the HTTP 409 response code when the server is locked even if you have correct permissions. Ensure that you meet the preconditions then investigate the server status. - The server managed by OpenStack Compute is not deleted from the compute node. Normal response codes: 204 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), conflict(409) .. TODO(sdague): for later phase of updating body. conflict is returned under 2 conditions. When the instance is locked, so can't be deleted, or if the instance is in some other state which makes it not possible to delete. Request ------- .. rest_parameters:: parameters.yaml - server_id: server_id_path Response -------- There is no body content for the response of a successful DELETE query ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/urls.inc0000664000175000017500000000210300000000000017061 0ustar00zuulzuul00000000000000.. -*- rst -*- ============== Service URLs ============== All API calls described throughout the rest of this document require authentication with the OpenStack Identity service. After authentication, a base ``service url`` can be extracted from the Identity token of type ``compute``. This ``service url`` will be the root url that every API call uses to build a full path. For instance, if the ``service url`` is ``http://mycompute.pvt/compute/v2.1`` then the full API call for ``/servers`` is ``http://mycompute.pvt/compute/v2.1/servers``. Depending on the deployment, the Compute ``service url`` might be http or https, a custom port, a custom path, and include your tenant id. The only way to know the urls for your deployment is by using the service catalog. The Compute URL should never be hard coded in applications, even if they are only expected to work at a single site. It should always be discovered from the Identity token. As such, for the rest of this document we will be using short hand where ``GET /servers`` really means ``GET {your_compute_service_url}/servers``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/api-ref/source/versions.inc0000664000175000017500000000535600000000000017761 0ustar00zuulzuul00000000000000.. -*- rst -*- ============== API Versions ============== In order to bring new features to users over time, the Nova API supports versioning. There are two kinds of versions in Nova. - ''major versions'', which have dedicated urls - ''microversions'', which can be requested through the use of the ``X-OpenStack-Nova-API-Version`` header, or since microversion 2.27 the ``OpenStack-API-Version`` header may also be used. For more details about Microversions, please reference: `Microversions `_ .. note:: The maximum microversion supported by each release varies. Please reference: `API Microversion History `__ for API microversion history details. The Version APIs work differently from other APIs as they *do not* require authentication. List All Major Versions ======================= .. rest_method:: GET / This fetches all the information about all known major API versions in the deployment. Links to more specific information will be provided for each API version, as well as information about supported min and max microversions. Normal Response Codes: 200 Response -------- .. rest_parameters:: parameters.yaml - versions: versions - id: version_id - links: links - min_version: version_min - status: version_status - updated: updated_version - version: version_max Response Example ---------------- This demonstrates the expected response from a bleeding edge server that supports up to the current microversion. When querying OpenStack environments you will typically find the current microversion on the v2.1 API is lower than listed below. .. literalinclude:: /../../doc/api_samples/versions/versions-get-resp.json :language: javascript Show Details of Specific API Version ==================================== .. rest_method:: GET /{api_version}/ This gets the details of a specific API at its root. Nearly all this information exists at the API root, so this is mostly a redundant operation. .. TODO(sdague) we should probably deprecate this call as everything that's needed is really in the root now Normal Response Codes: 200 Request ------- .. rest_parameters:: parameters.yaml - api_version: api_version Response -------- .. rest_parameters:: parameters.yaml - version: version - id: version_id - links: links - media-types: media_types - min_version: version_min - status: version_status - updated: updated_version - version: version_max Response Example ---------------- This is an example of a ``GET /v2.1/`` on a relatively current server. .. literalinclude:: /../../doc/api_samples/versions/v21-version-get-resp.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/bindep.txt0000664000175000017500000000427100000000000014570 0ustar00zuulzuul00000000000000# This is a cross-platform list tracking distribution packages needed for install and tests; # see https://docs.openstack.org/infra/bindep/ for additional information. build-essential [platform:dpkg test] # required for mkfs.vfat dosfstools [platform:dpkg test] # fonts-freefont-otf is needed for pdf docs builds with the 'xelatex' engine fonts-freefont-otf [pdf-docs] gcc [platform:rpm test] # gettext and graphviz are needed by doc builds only. For transition, # have them in both doc and test. # TODO(jaegerandi): Remove test once infra scripts are updated. gettext [doc test] graphviz [doc test] # libsrvg2 is needed for sphinxcontrib-svg2pdfconverter in docs builds. librsvg2-tools [doc platform:rpm] librsvg2-bin [doc platform:dpkg] language-pack-en [platform:ubuntu] latexmk [pdf-docs] libffi-dev [platform:dpkg test] libffi-devel [platform:rpm test] libmariadb-dev-compat [platform:debian] libmysqlclient-dev [platform:ubuntu] libpq-dev [platform:dpkg test] libsqlite3-dev [platform:dpkg test] libxml2-dev [platform:dpkg test] libxslt-devel [platform:rpm test] libxslt1-dev [platform:dpkg test] locales [platform:debian] mysql [platform:rpm !platform:redhat] mysql-client [platform:dpkg !platform:debian] mysql-devel [platform:rpm !platform:redhat test] mysql-server [!platform:redhat !platform:debian] mariadb-devel [platform:rpm platform:redhat test] mariadb-server [platform:rpm platform:redhat platform:debian] openssh-client [platform:dpkg] openssh-clients [platform:rpm] openssl pkg-config [platform:dpkg test] pkgconfig [platform:rpm test] postgresql postgresql-client [platform:dpkg] postgresql-devel [platform:rpm test] postgresql-server [platform:rpm] python3-all [platform:dpkg] python3-all-dev [platform:dpkg] python3 [platform:rpm test] python3-devel [platform:rpm test] sqlite-devel [platform:rpm test] texlive [pdf-docs] texlive-latex-recommended [pdf-docs] texlive-xetex [pdf-docs] libpcre3-dev [platform:dpkg test] pcre-devel [platform:rpm test] # Nova uses lsscsi via os-brick. Due to bindep usage in devstack and # elsewhere, we add it here to make sure it is picked up and available at # runtime and in unit tests. Net result is the same that lsscsi will be # installed for any nova installation. lsscsi ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.005606 nova-32.0.0/devstack/0000775000175000017500000000000000000000000014366 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.005606 nova-32.0.0/devstack/lib/0000775000175000017500000000000000000000000015134 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/devstack/lib/mdev_samples0000664000175000017500000000327700000000000017547 0ustar00zuulzuul00000000000000function compile_mdev_samples { set -x local kver=$(uname -r) local kvariant=$(uname -r | awk -F - '{print $NF}') if [[ "$kvariant" == "kvm" ]]; then echo "NOTE: The kvm variant of the kernel you are running does not " \ "have the mdev support required to enable the mdev samples." echo "Install the generic variant and retry." exit 1 elif [[ "$kvariant" != "generic" ]]; then echo "NOTE: This may not work on your kernel variant of $kvariant!" echo "Recommend installing the generic variant kernel instead." fi if grep deb-src /etc/apt/sources.list; then sudo sed -i 's/^# deb-src/deb-src/' /etc/apt/sources.list else sudo tee -a /etc/apt/sources.list < kernel-source.log cd linux-*/samples/vfio-mdev sed -i 's/obj-[^ ]*/obj-m/' Makefile make -C /lib/modules/$(uname -r)/build M=$(pwd) modules sudo make -C /lib/modules/$(uname -r)/build M=$(pwd) modules_install sudo depmod for mod in $NOVA_MDEV_SAMPLES; do sudo modprobe $mod done lsmod | grep mdev } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/devstack/nova-multi-cell-exclude-list.txt0000664000175000017500000000160000000000000022534 0ustar00zuulzuul00000000000000# --exclude-list contents for the nova-multi-cell job defined in .zuul.yaml # See: https://stestr.readthedocs.io/en/latest/MANUAL.html#test-selection # Exclude tempest.scenario.test_network tests since they are slow and # only test advanced neutron features, unrelated to multi-cell testing. ^tempest.scenario.test_network # Also exclude resize and migrate tests with qos ports as qos is currently # not supported in cross cell resize case . See # https://bugs.launchpad.net/nova/+bug/1907511 for details test_migrate_with_qos_min_bw_allocation test_resize_with_qos_min_bw_allocation # Also exclude unshelve to specific host test cases as unshelve cannot move VMs across cells # See https://bugs.launchpad.net/nova/+bug/1988316 tempest.api.compute.admin.test_servers_on_multinodes.UnshelveToHostMultiNodesTest # revert this when bug #1940425 is fixed in neutron test_live_migration_with_trunk././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/devstack/plugin.sh0000664000175000017500000000075200000000000016224 0ustar00zuulzuul00000000000000NOVA_PLUGINDIR=$(readlink -f $(dirname "${BASH_SOURCE[0]}")) source $NOVA_PLUGINDIR/lib/mdev_samples if [[ $1 == "stack" ]]; then case $2 in install) if [[ "$NOVA_COMPILE_MDEV_SAMPLES" == True ]]; then async_runfunc compile_mdev_samples fi ;; extra) if [[ "$NOVA_COMPILE_MDEV_SAMPLES" == True ]]; then async_wait compile_mdev_samples fi ;; esac elif [[ $1 == "clean" ]]; then rm -Rf $NOVA_KERNEL_TEMP fi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/devstack/settings0000664000175000017500000000046400000000000016155 0ustar00zuulzuul00000000000000# Whether or not to compile the mdev sample drivers from source NOVA_COMPILE_MDEV_SAMPLES=$(trueorfalse True NOVA_COMPILE_MDEV_SAMPLES) # Insert these mdev sample modules NOVA_MDEV_SAMPLES=${NOVA_MDEV_SAMPLES:-mtty mdpy mdpy-fb mbochs} # Temporary directory for kernel source NOVA_KERNEL_TEMP=$DEST/kernel ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.005606 nova-32.0.0/doc/0000775000175000017500000000000000000000000013327 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/README.rst0000664000175000017500000000066400000000000015024 0ustar00zuulzuul00000000000000OpenStack Nova Documentation README =================================== Both contributor developer documentation and REST API documentation are sourced here. Contributor developer docs are built to: https://docs.openstack.org/nova/latest/ API guide docs are built to: https://docs.openstack.org/api-guide/compute/ For more details, see the "Building the Documentation" section of doc/source/contributor/development-environment.rst. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315688.8736048 nova-32.0.0/doc/api_samples/0000775000175000017500000000000000000000000015624 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.005606 nova-32.0.0/doc/api_samples/consoles/0000775000175000017500000000000000000000000017451 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/consoles/consoles-get-resp.json0000664000175000017500000000030600000000000023714 0ustar00zuulzuul00000000000000{ "console": { "console_type": "fake", "host": "fake", "id": 1, "instance_name": "instance-00000001", "password": "C4jBpJ6x", "port": 5999 } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/consoles/consoles-list-get-resp.json0000664000175000017500000000022600000000000024666 0ustar00zuulzuul00000000000000{ "consoles": [ { "console": { "console_type": "fake", "id": 1 } } ] }././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.005606 nova-32.0.0/doc/api_samples/extension-info/0000775000175000017500000000000000000000000020571 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/extension-info/extensions-get-resp.json0000664000175000017500000000040400000000000025405 0ustar00zuulzuul00000000000000{ "extension": { "alias": "os-agents", "description": "Agents support.", "links": [], "name": "Agents", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/extension-info/extensions-list-resp-v21-compatible.json0000664000175000017500000007632200000000000030340 0ustar00zuulzuul00000000000000{ "extensions": [ { "alias": "NMN", "description": "Multiple network support.", "links": [], "name": "Multinic", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-DCF", "description": "Disk Management Extension.", "links": [], "name": "DiskConfig", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-EXT-AZ", "description": "Extended Availability Zone support.", "links": [], "name": "ExtendedAvailabilityZone", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-EXT-IMG-SIZE", "description": "Adds image size to image listings.", "links": [], "name": "ImageSize", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-EXT-IPS", "description": "Adds type parameter to the ip list.", "links": [], "name": "ExtendedIps", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-EXT-IPS-MAC", "description": "Adds mac address parameter to the ip list.", "links": [], "name": "ExtendedIpsMac", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-EXT-SRV-ATTR", "description": "Extended Server Attributes support.", "links": [], "name": "ExtendedServerAttributes", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-EXT-STS", "description": "Extended Status support.", "links": [], "name": "ExtendedStatus", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-EXT-VIF-NET", "description": "Adds network id parameter to the virtual interface list.", "links": [], "name": "ExtendedVIFNet", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-FLV-DISABLED", "description": "Support to show the disabled status of a flavor.", "links": [], "name": "FlavorDisabled", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-FLV-EXT-DATA", "description": "Provide additional data for flavors.", "links": [], "name": "FlavorExtraData", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-SCH-HNT", "description": "Pass arbitrary key/value pairs to the scheduler.", "links": [], "name": "SchedulerHints", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-SRV-USG", "description": "Adds launched_at and terminated_at on Servers.", "links": [], "name": "ServerUsage", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-access-ips", "description": "Access IPs support.", "links": [], "name": "AccessIPs", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-admin-actions", "description": "Enable admin-only server actions\n\n Actions include: resetNetwork, injectNetworkInfo, os-resetState\n ", "links": [], "name": "AdminActions", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-admin-password", "description": "Admin password management support.", "links": [], "name": "AdminPassword", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-agents", "description": "Agents support.", "links": [], "name": "Agents", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-aggregates", "description": "Admin-only aggregate administration.", "links": [], "name": "Aggregates", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-assisted-volume-snapshots", "description": "Assisted volume snapshots.", "links": [], "name": "AssistedVolumeSnapshots", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-attach-interfaces", "description": "Attach interface support.", "links": [], "name": "AttachInterfaces", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-availability-zone", "description": "1. Add availability_zone to the Create Server API.\n 2. Add availability zones describing.\n ", "links": [], "name": "AvailabilityZone", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-baremetal-ext-status", "description": "Add extended status in Baremetal Nodes v2 API.", "links": [], "name": "BareMetalExtStatus", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-baremetal-nodes", "description": "Admin-only bare-metal node administration.", "links": [], "name": "BareMetalNodes", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-block-device-mapping", "description": "Block device mapping boot support.", "links": [], "name": "BlockDeviceMapping", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-block-device-mapping-v2-boot", "description": "Allow boot with the new BDM data format.", "links": [], "name": "BlockDeviceMappingV2Boot", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-cell-capacities", "description": "Adding functionality to get cell capacities.", "links": [], "name": "CellCapacities", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-cells", "description": "Enables cells-related functionality such as adding neighbor cells,\n listing neighbor cells, and getting the capabilities of the local cell.\n ", "links": [], "name": "Cells", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-certificates", "description": "Certificates support.", "links": [], "name": "Certificates", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-cloudpipe", "description": "Adds actions to create cloudpipe instances.\n\n When running with the Vlan network mode, you need a mechanism to route\n from the public Internet to your vlans. This mechanism is known as a\n cloudpipe.\n\n At the time of creating this class, only OpenVPN is supported. Support for\n a SSH Bastion host is forthcoming.\n ", "links": [], "name": "Cloudpipe", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-cloudpipe-update", "description": "Adds the ability to set the vpn ip/port for cloudpipe instances.", "links": [], "name": "CloudpipeUpdate", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-config-drive", "description": "Config Drive Extension.", "links": [], "name": "ConfigDrive", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-console-auth-tokens", "description": "Console token authentication support.", "links": [], "name": "ConsoleAuthTokens", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-console-output", "description": "Console log output support, with tailing ability.", "links": [], "name": "ConsoleOutput", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-consoles", "description": "Interactive Console support.", "links": [], "name": "Consoles", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-create-backup", "description": "Create a backup of a server.", "links": [], "name": "CreateBackup", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-create-server-ext", "description": "Extended support to the Create Server v1.1 API.", "links": [], "name": "Createserverext", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-deferred-delete", "description": "Instance deferred delete.", "links": [], "name": "DeferredDelete", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-evacuate", "description": "Enables server evacuation.", "links": [], "name": "Evacuate", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-evacuate-find-host", "description": "Enables server evacuation without target host. Scheduler will select one to target.", "links": [], "name": "ExtendedEvacuateFindHost", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-floating-ips", "description": "Adds optional fixed_address to the add floating IP command.", "links": [], "name": "ExtendedFloatingIps", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-hypervisors", "description": "Extended hypervisors support.", "links": [], "name": "ExtendedHypervisors", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-networks", "description": "Adds additional fields to networks.", "links": [], "name": "ExtendedNetworks", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-quotas", "description": "Adds ability for admins to delete quota and optionally force the update Quota command.", "links": [], "name": "ExtendedQuotas", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-rescue-with-image", "description": "Allow the user to specify the image to use for rescue.", "links": [], "name": "ExtendedRescueWithImage", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-services", "description": "Extended services support.", "links": [], "name": "ExtendedServices", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-services-delete", "description": "Extended services deletion support.", "links": [], "name": "ExtendedServicesDelete", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-status", "description": "Extended Status support.", "links": [], "name": "ExtendedStatus", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-volumes", "description": "Extended Volumes support.", "links": [], "name": "ExtendedVolumes", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-fixed-ips", "description": "Fixed IPs support.", "links": [], "name": "FixedIPs", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-flavor-access", "description": "Flavor access support.", "links": [], "name": "FlavorAccess", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-flavor-extra-specs", "description": "Flavors extra specs support.", "links": [], "name": "FlavorExtraSpecs", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-flavor-manage", "description": "Flavor create/delete API support.", "links": [], "name": "FlavorManage", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-flavor-rxtx", "description": "Support to show the rxtx status of a flavor.", "links": [], "name": "FlavorRxtx", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-flavor-swap", "description": "Support to show the swap status of a flavor.", "links": [], "name": "FlavorSwap", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-floating-ip-dns", "description": "Floating IP DNS support.", "links": [], "name": "FloatingIpDns", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-floating-ip-pools", "description": "Floating IPs support.", "links": [], "name": "FloatingIpPools", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-floating-ips", "description": "Floating IPs support.", "links": [], "name": "FloatingIps", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-floating-ips-bulk", "description": "Bulk handling of Floating IPs.", "links": [], "name": "FloatingIpsBulk", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-fping", "description": "Fping Management Extension.", "links": [], "name": "Fping", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-hide-server-addresses", "description": "Support hiding server addresses in certain states.", "links": [], "name": "HideServerAddresses", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-hosts", "description": "Admin-only host administration.", "links": [], "name": "Hosts", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-hypervisor-status", "description": "Show hypervisor status.", "links": [], "name": "HypervisorStatus", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-hypervisors", "description": "Admin-only hypervisor administration.", "links": [], "name": "Hypervisors", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-instance-actions", "description": "View a log of actions and events taken on an instance.", "links": [], "name": "InstanceActions", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-instance_usage_audit_log", "description": "Admin-only Task Log Monitoring.", "links": [], "name": "OSInstanceUsageAuditLog", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-keypairs", "description": "Keypair Support.", "links": [], "name": "Keypairs", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-lock-server", "description": "Enable lock/unlock server actions.", "links": [], "name": "LockServer", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-migrate-server", "description": "Enable migrate and live-migrate server actions.", "links": [], "name": "MigrateServer", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-migrations", "description": "Provide data on migrations.", "links": [], "name": "Migrations", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-multiple-create", "description": "Allow multiple create in the Create Server v2.1 API.", "links": [], "name": "MultipleCreate", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-networks", "description": "Admin-only Network Management Extension.", "links": [], "name": "Networks", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-networks-associate", "description": "Network association support.", "links": [], "name": "NetworkAssociationSupport", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-pause-server", "description": "Enable pause/unpause server actions.", "links": [], "name": "PauseServer", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-personality", "description": "Personality support.", "links": [], "name": "Personality", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-preserve-ephemeral-rebuild", "description": "Allow preservation of the ephemeral partition on rebuild.", "links": [], "name": "PreserveEphemeralOnRebuild", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-quota-class-sets", "description": "Quota classes management support.", "links": [], "name": "QuotaClasses", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-quota-sets", "description": "Quotas management support.", "links": [], "name": "Quotas", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-rescue", "description": "Instance rescue mode.", "links": [], "name": "Rescue", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-security-group-default-rules", "description": "Default rules for security group support.", "links": [], "name": "SecurityGroupDefaultRules", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-security-groups", "description": "Security group support.", "links": [], "name": "SecurityGroups", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-diagnostics", "description": "Allow Admins to view server diagnostics through server action.", "links": [], "name": "ServerDiagnostics", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-external-events", "description": "Server External Event Triggers.", "links": [], "name": "ServerExternalEvents", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-group-quotas", "description": "Adds quota support to server groups.", "links": [], "name": "ServerGroupQuotas", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-groups", "description": "Server group support.", "links": [], "name": "ServerGroups", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-list-multi-status", "description": "Allow to filter the servers by a set of status values.", "links": [], "name": "ServerListMultiStatus", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-password", "description": "Server password support.", "links": [], "name": "ServerPassword", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-sort-keys", "description": "Add sorting support in get Server v2 API.", "links": [], "name": "ServerSortKeys", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-start-stop", "description": "Start/Stop instance compute API support.", "links": [], "name": "ServerStartStop", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-services", "description": "Services support.", "links": [], "name": "Services", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-shelve", "description": "Instance shelve mode.", "links": [], "name": "Shelve", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-simple-tenant-usage", "description": "Simple tenant usage extension.", "links": [], "name": "SimpleTenantUsage", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-suspend-server", "description": "Enable suspend/resume server actions.", "links": [], "name": "SuspendServer", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-tenant-networks", "description": "Tenant-based Network Management Extension.", "links": [], "name": "OSTenantNetworks", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-used-limits", "description": "Provide data on limited resources that are being used.", "links": [], "name": "UsedLimits", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-used-limits-for-admin", "description": "Provide data to admin on limited resources used by other tenants.", "links": [], "name": "UsedLimitsForAdmin", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-user-data", "description": "Add user_data to the Create Server API.", "links": [], "name": "UserData", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-user-quotas", "description": "Project user quota support.", "links": [], "name": "UserQuotas", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-virtual-interfaces", "description": "Virtual interface support.", "links": [], "name": "VirtualInterfaces", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-volume-attachment-update", "description": "Support for updating a volume attachment.", "links": [], "name": "VolumeAttachmentUpdate", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-volumes", "description": "Volumes support.", "links": [], "name": "Volumes", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/extension-info/extensions-list-resp.json0000664000175000017500000007560700000000000025622 0ustar00zuulzuul00000000000000{ "extensions": [ { "alias": "NMN", "description": "Multiple network support.", "links": [], "name": "Multinic", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-DCF", "description": "Disk Management Extension.", "links": [], "name": "DiskConfig", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-EXT-AZ", "description": "Extended Availability Zone support.", "links": [], "name": "ExtendedAvailabilityZone", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-EXT-IMG-SIZE", "description": "Adds image size to image listings.", "links": [], "name": "ImageSize", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-EXT-IPS", "description": "Adds type parameter to the ip list.", "links": [], "name": "ExtendedIps", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-EXT-IPS-MAC", "description": "Adds mac address parameter to the ip list.", "links": [], "name": "ExtendedIpsMac", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-EXT-SRV-ATTR", "description": "Extended Server Attributes support.", "links": [], "name": "ExtendedServerAttributes", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-EXT-STS", "description": "Extended Status support.", "links": [], "name": "ExtendedStatus", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-FLV-DISABLED", "description": "Support to show the disabled status of a flavor.", "links": [], "name": "FlavorDisabled", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-FLV-EXT-DATA", "description": "Provide additional data for flavors.", "links": [], "name": "FlavorExtraData", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-SCH-HNT", "description": "Pass arbitrary key/value pairs to the scheduler.", "links": [], "name": "SchedulerHints", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-SRV-USG", "description": "Adds launched_at and terminated_at on Servers.", "links": [], "name": "ServerUsage", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-access-ips", "description": "Access IPs support.", "links": [], "name": "AccessIPs", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-admin-actions", "description": "Enable admin-only server actions\n\n Actions include: resetNetwork, injectNetworkInfo, os-resetState\n ", "links": [], "name": "AdminActions", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-admin-password", "description": "Admin password management support.", "links": [], "name": "AdminPassword", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-agents", "description": "Agents support.", "links": [], "name": "Agents", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-aggregates", "description": "Admin-only aggregate administration.", "links": [], "name": "Aggregates", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-assisted-volume-snapshots", "description": "Assisted volume snapshots.", "links": [], "name": "AssistedVolumeSnapshots", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-attach-interfaces", "description": "Attach interface support.", "links": [], "name": "AttachInterfaces", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-availability-zone", "description": "1. Add availability_zone to the Create Server API.\n 2. Add availability zones describing.\n ", "links": [], "name": "AvailabilityZone", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-baremetal-ext-status", "description": "Add extended status in Baremetal Nodes v2 API.", "links": [], "name": "BareMetalExtStatus", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-baremetal-nodes", "description": "Admin-only bare-metal node administration.", "links": [], "name": "BareMetalNodes", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-block-device-mapping", "description": "Block device mapping boot support.", "links": [], "name": "BlockDeviceMapping", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-block-device-mapping-v2-boot", "description": "Allow boot with the new BDM data format.", "links": [], "name": "BlockDeviceMappingV2Boot", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-cell-capacities", "description": "Adding functionality to get cell capacities.", "links": [], "name": "CellCapacities", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-cells", "description": "Enables cells-related functionality such as adding neighbor cells,\n listing neighbor cells, and getting the capabilities of the local cell.\n ", "links": [], "name": "Cells", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-certificates", "description": "Certificates support.", "links": [], "name": "Certificates", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-cloudpipe", "description": "Adds actions to create cloudpipe instances.\n\n When running with the Vlan network mode, you need a mechanism to route\n from the public Internet to your vlans. This mechanism is known as a\n cloudpipe.\n\n At the time of creating this class, only OpenVPN is supported. Support for\n a SSH Bastion host is forthcoming.\n ", "links": [], "name": "Cloudpipe", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-cloudpipe-update", "description": "Adds the ability to set the vpn ip/port for cloudpipe instances.", "links": [], "name": "CloudpipeUpdate", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-config-drive", "description": "Config Drive Extension.", "links": [], "name": "ConfigDrive", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-console-auth-tokens", "description": "Console token authentication support.", "links": [], "name": "ConsoleAuthTokens", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-console-output", "description": "Console log output support, with tailing ability.", "links": [], "name": "ConsoleOutput", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-consoles", "description": "Interactive Console support.", "links": [], "name": "Consoles", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-create-backup", "description": "Create a backup of a server.", "links": [], "name": "CreateBackup", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-create-server-ext", "description": "Extended support to the Create Server v1.1 API.", "links": [], "name": "Createserverext", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-deferred-delete", "description": "Instance deferred delete.", "links": [], "name": "DeferredDelete", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-evacuate", "description": "Enables server evacuation.", "links": [], "name": "Evacuate", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-evacuate-find-host", "description": "Enables server evacuation without target host. Scheduler will select one to target.", "links": [], "name": "ExtendedEvacuateFindHost", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-floating-ips", "description": "Adds optional fixed_address to the add floating IP command.", "links": [], "name": "ExtendedFloatingIps", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-hypervisors", "description": "Extended hypervisors support.", "links": [], "name": "ExtendedHypervisors", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-networks", "description": "Adds additional fields to networks.", "links": [], "name": "ExtendedNetworks", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-quotas", "description": "Adds ability for admins to delete quota and optionally force the update Quota command.", "links": [], "name": "ExtendedQuotas", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-rescue-with-image", "description": "Allow the user to specify the image to use for rescue.", "links": [], "name": "ExtendedRescueWithImage", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-services", "description": "Extended services support.", "links": [], "name": "ExtendedServices", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-services-delete", "description": "Extended services deletion support.", "links": [], "name": "ExtendedServicesDelete", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-status", "description": "Extended Status support.", "links": [], "name": "ExtendedStatus", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-volumes", "description": "Extended Volumes support.", "links": [], "name": "ExtendedVolumes", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-fixed-ips", "description": "Fixed IPs support.", "links": [], "name": "FixedIPs", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-flavor-access", "description": "Flavor access support.", "links": [], "name": "FlavorAccess", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-flavor-extra-specs", "description": "Flavors extra specs support.", "links": [], "name": "FlavorExtraSpecs", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-flavor-manage", "description": "Flavor create/delete API support.", "links": [], "name": "FlavorManage", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-flavor-rxtx", "description": "Support to show the rxtx status of a flavor.", "links": [], "name": "FlavorRxtx", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-flavor-swap", "description": "Support to show the swap status of a flavor.", "links": [], "name": "FlavorSwap", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-floating-ip-dns", "description": "Floating IP DNS support.", "links": [], "name": "FloatingIpDns", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-floating-ip-pools", "description": "Floating IPs support.", "links": [], "name": "FloatingIpPools", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-floating-ips", "description": "Floating IPs support.", "links": [], "name": "FloatingIps", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-floating-ips-bulk", "description": "Bulk handling of Floating IPs.", "links": [], "name": "FloatingIpsBulk", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-fping", "description": "Fping Management Extension.", "links": [], "name": "Fping", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-hide-server-addresses", "description": "Support hiding server addresses in certain states.", "links": [], "name": "HideServerAddresses", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-hosts", "description": "Admin-only host administration.", "links": [], "name": "Hosts", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-hypervisor-status", "description": "Show hypervisor status.", "links": [], "name": "HypervisorStatus", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-hypervisors", "description": "Admin-only hypervisor administration.", "links": [], "name": "Hypervisors", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-instance-actions", "description": "View a log of actions and events taken on an instance.", "links": [], "name": "InstanceActions", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-instance_usage_audit_log", "description": "Admin-only Task Log Monitoring.", "links": [], "name": "OSInstanceUsageAuditLog", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-keypairs", "description": "Keypair Support.", "links": [], "name": "Keypairs", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-lock-server", "description": "Enable lock/unlock server actions.", "links": [], "name": "LockServer", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-migrate-server", "description": "Enable migrate and live-migrate server actions.", "links": [], "name": "MigrateServer", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-migrations", "description": "Provide data on migrations.", "links": [], "name": "Migrations", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-multiple-create", "description": "Allow multiple create in the Create Server v2.1 API.", "links": [], "name": "MultipleCreate", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-networks", "description": "Admin-only Network Management Extension.", "links": [], "name": "Networks", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-networks-associate", "description": "Network association support.", "links": [], "name": "NetworkAssociationSupport", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-pause-server", "description": "Enable pause/unpause server actions.", "links": [], "name": "PauseServer", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-personality", "description": "Personality support.", "links": [], "name": "Personality", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-preserve-ephemeral-rebuild", "description": "Allow preservation of the ephemeral partition on rebuild.", "links": [], "name": "PreserveEphemeralOnRebuild", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-quota-class-sets", "description": "Quota classes management support.", "links": [], "name": "QuotaClasses", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-quota-sets", "description": "Quotas management support.", "links": [], "name": "Quotas", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-rescue", "description": "Instance rescue mode.", "links": [], "name": "Rescue", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-security-group-default-rules", "description": "Default rules for security group support.", "links": [], "name": "SecurityGroupDefaultRules", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-security-groups", "description": "Security group support.", "links": [], "name": "SecurityGroups", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-diagnostics", "description": "Allow Admins to view server diagnostics through server action.", "links": [], "name": "ServerDiagnostics", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-external-events", "description": "Server External Event Triggers.", "links": [], "name": "ServerExternalEvents", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-group-quotas", "description": "Adds quota support to server groups.", "links": [], "name": "ServerGroupQuotas", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-groups", "description": "Server group support.", "links": [], "name": "ServerGroups", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-list-multi-status", "description": "Allow to filter the servers by a set of status values.", "links": [], "name": "ServerListMultiStatus", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-password", "description": "Server password support.", "links": [], "name": "ServerPassword", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-sort-keys", "description": "Add sorting support in get Server v2 API.", "links": [], "name": "ServerSortKeys", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-start-stop", "description": "Start/Stop instance compute API support.", "links": [], "name": "ServerStartStop", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-services", "description": "Services support.", "links": [], "name": "Services", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-shelve", "description": "Instance shelve mode.", "links": [], "name": "Shelve", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-simple-tenant-usage", "description": "Simple tenant usage extension.", "links": [], "name": "SimpleTenantUsage", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-suspend-server", "description": "Enable suspend/resume server actions.", "links": [], "name": "SuspendServer", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-tenant-networks", "description": "Tenant-based Network Management Extension.", "links": [], "name": "OSTenantNetworks", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-used-limits", "description": "Provide data on limited resources that are being used.", "links": [], "name": "UsedLimits", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-used-limits-for-admin", "description": "Provide data to admin on limited resources used by other tenants.", "links": [], "name": "UsedLimitsForAdmin", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-user-data", "description": "Add user_data to the Create Server API.", "links": [], "name": "UserData", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-user-quotas", "description": "Project user quota support.", "links": [], "name": "UserQuotas", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-virtual-interfaces", "description": "Virtual interface support.", "links": [], "name": "VirtualInterfaces", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-volume-attachment-update", "description": "Support for updating a volume attachment.", "links": [], "name": "VolumeAttachmentUpdate", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-volumes", "description": "Volumes support.", "links": [], "name": "Volumes", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" } ] } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.009606 nova-32.0.0/doc/api_samples/flavor-access/0000775000175000017500000000000000000000000020354 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/flavor-access/flavor-access-add-tenant-req.json0000664000175000017500000000010300000000000026573 0ustar00zuulzuul00000000000000{ "addTenantAccess": { "tenant": "fake_tenant" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/flavor-access/flavor-access-add-tenant-resp.json0000664000175000017500000000021700000000000026763 0ustar00zuulzuul00000000000000{ "flavor_access": [ { "flavor_id": "10", "tenant_id": "6f70656e737461636b20342065766572" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/flavor-access/flavor-access-list-resp.json0000664000175000017500000000021700000000000025717 0ustar00zuulzuul00000000000000{ "flavor_access": [ { "flavor_id": "10", "tenant_id": "6f70656e737461636b20342065766572" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/flavor-access/flavor-access-remove-tenant-req.json0000664000175000017500000000010600000000000027343 0ustar00zuulzuul00000000000000{ "removeTenantAccess": { "tenant": "fake_tenant" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/flavor-access/flavor-access-remove-tenant-resp.json0000664000175000017500000000004000000000000027522 0ustar00zuulzuul00000000000000{ "flavor_access": [ ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/flavor-access/flavor-create-req.json0000664000175000017500000000026600000000000024572 0ustar00zuulzuul00000000000000{ "flavor": { "name": "test_flavor", "ram": 1024, "vcpus": 2, "disk": 10, "id": "10", "os-flavor-access:is_public": false } } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.009606 nova-32.0.0/doc/api_samples/flavor-access/v2.7/0000775000175000017500000000000000000000000021050 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/flavor-access/v2.7/flavor-access-add-tenant-req.json0000664000175000017500000000010300000000000027267 0ustar00zuulzuul00000000000000{ "addTenantAccess": { "tenant": "fake_tenant" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/flavor-access/v2.7/flavor-create-req.json0000664000175000017500000000026500000000000025265 0ustar00zuulzuul00000000000000{ "flavor": { "name": "test_flavor", "ram": 1024, "vcpus": 2, "disk": 10, "id": "10", "os-flavor-access:is_public": true } } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.009606 nova-32.0.0/doc/api_samples/flavor-extra-specs/0000775000175000017500000000000000000000000021351 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/flavor-extra-specs/flavor-extra-specs-create-req.json0000664000175000017500000000013700000000000030020 0ustar00zuulzuul00000000000000{ "extra_specs": { "hw:cpu_policy": "shared", "hw:numa_nodes": "1" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/flavor-extra-specs/flavor-extra-specs-create-resp.json0000664000175000017500000000013700000000000030202 0ustar00zuulzuul00000000000000{ "extra_specs": { "hw:cpu_policy": "shared", "hw:numa_nodes": "1" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/flavor-extra-specs/flavor-extra-specs-get-resp.json0000664000175000017500000000003500000000000027513 0ustar00zuulzuul00000000000000{ "hw:numa_nodes": "1" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/flavor-extra-specs/flavor-extra-specs-list-resp.json0000664000175000017500000000013700000000000027712 0ustar00zuulzuul00000000000000{ "extra_specs": { "hw:cpu_policy": "shared", "hw:numa_nodes": "1" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/flavor-extra-specs/flavor-extra-specs-update-req.json0000664000175000017500000000003500000000000030034 0ustar00zuulzuul00000000000000{ "hw:numa_nodes": "2" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/flavor-extra-specs/flavor-extra-specs-update-resp.json0000664000175000017500000000003500000000000030216 0ustar00zuulzuul00000000000000{ "hw:numa_nodes": "2" } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.009606 nova-32.0.0/doc/api_samples/flavor-manage/0000775000175000017500000000000000000000000020343 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/flavor-manage/flavor-create-post-req.json0000664000175000017500000000024500000000000025541 0ustar00zuulzuul00000000000000{ "flavor": { "name": "test_flavor", "ram": 1024, "vcpus": 2, "disk": 10, "id": "10", "rxtx_factor": 2.0 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/flavor-manage/flavor-create-post-resp.json0000664000175000017500000000123200000000000025720 0ustar00zuulzuul00000000000000{ "flavor": { "OS-FLV-DISABLED:disabled": false, "disk": 10, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "10", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/10", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/10", "rel": "bookmark" } ], "name": "test_flavor", "ram": 1024, "swap": "", "rxtx_factor": 2.0, "vcpus": 2 } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0136058 nova-32.0.0/doc/api_samples/flavor-manage/v2.55/0000775000175000017500000000000000000000000021122 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/flavor-manage/v2.55/flavor-create-post-req.json0000664000175000017500000000032000000000000026312 0ustar00zuulzuul00000000000000{ "flavor": { "name": "test_flavor", "ram": 1024, "vcpus": 2, "disk": 10, "id": "10", "rxtx_factor": 2.0, "description": "test description" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/flavor-manage/v2.55/flavor-create-post-resp.json0000664000175000017500000000130500000000000026500 0ustar00zuulzuul00000000000000{ "flavor": { "OS-FLV-DISABLED:disabled": false, "disk": 10, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "10", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/10", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/10", "rel": "bookmark" } ], "name": "test_flavor", "ram": 1024, "swap": "", "rxtx_factor": 2.0, "vcpus": 2, "description": "test description" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/flavor-manage/v2.55/flavor-update-req.json0000664000175000017500000000010700000000000025351 0ustar00zuulzuul00000000000000{ "flavor": { "description": "updated description" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/flavor-manage/v2.55/flavor-update-resp.json0000664000175000017500000000130100000000000025530 0ustar00zuulzuul00000000000000{ "flavor": { "OS-FLV-DISABLED:disabled": false, "disk": 1, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "1", "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/flavors/1", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ], "name": "m1.tiny", "ram": 512, "swap": "", "vcpus": 1, "rxtx_factor": 1.0, "description": "updated description" } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0136058 nova-32.0.0/doc/api_samples/flavor-manage/v2.61/0000775000175000017500000000000000000000000021117 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/flavor-manage/v2.61/flavor-create-post-req.json0000664000175000017500000000032000000000000026307 0ustar00zuulzuul00000000000000{ "flavor": { "name": "test_flavor", "ram": 1024, "vcpus": 2, "disk": 10, "id": "10", "rxtx_factor": 2.0, "description": "test description" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/flavor-manage/v2.61/flavor-create-post-resp.json0000664000175000017500000000134000000000000026474 0ustar00zuulzuul00000000000000{ "flavor": { "OS-FLV-DISABLED:disabled": false, "disk": 10, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "10", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/10", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/10", "rel": "bookmark" } ], "name": "test_flavor", "ram": 1024, "swap": "", "rxtx_factor": 2.0, "vcpus": 2, "description": "test description", "extra_specs": {} } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/flavor-manage/v2.61/flavor-update-req.json0000664000175000017500000000010700000000000025346 0ustar00zuulzuul00000000000000{ "flavor": { "description": "updated description" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/flavor-manage/v2.61/flavor-update-resp.json0000664000175000017500000000133400000000000025533 0ustar00zuulzuul00000000000000{ "flavor": { "OS-FLV-DISABLED:disabled": false, "disk": 1, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "1", "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/flavors/1", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ], "name": "m1.tiny", "ram": 512, "swap": "", "vcpus": 1, "rxtx_factor": 1.0, "description": "updated description", "extra_specs": {} } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0136058 nova-32.0.0/doc/api_samples/flavor-manage/v2.75/0000775000175000017500000000000000000000000021124 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/flavor-manage/v2.75/flavor-create-post-req.json0000664000175000017500000000032000000000000026314 0ustar00zuulzuul00000000000000{ "flavor": { "name": "test_flavor", "ram": 1024, "vcpus": 2, "disk": 10, "id": "10", "rxtx_factor": 2.0, "description": "test description" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/flavor-manage/v2.75/flavor-create-post-resp.json0000664000175000017500000000133700000000000026507 0ustar00zuulzuul00000000000000{ "flavor": { "OS-FLV-DISABLED:disabled": false, "disk": 10, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "10", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/10", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/10", "rel": "bookmark" } ], "name": "test_flavor", "ram": 1024, "swap": 0, "rxtx_factor": 2.0, "vcpus": 2, "description": "test description", "extra_specs": {} } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/flavor-manage/v2.75/flavor-update-req.json0000664000175000017500000000010700000000000025353 0ustar00zuulzuul00000000000000{ "flavor": { "description": "updated description" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/flavor-manage/v2.75/flavor-update-resp.json0000664000175000017500000000133300000000000025537 0ustar00zuulzuul00000000000000{ "flavor": { "OS-FLV-DISABLED:disabled": false, "disk": 1, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "1", "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/flavors/1", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ], "name": "m1.tiny", "ram": 512, "swap": 0, "vcpus": 1, "rxtx_factor": 1.0, "description": "updated description", "extra_specs": {} } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0136058 nova-32.0.0/doc/api_samples/flavors/0000775000175000017500000000000000000000000017300 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/flavors/flavor-get-resp.json0000664000175000017500000000122100000000000023204 0ustar00zuulzuul00000000000000{ "flavor": { "OS-FLV-DISABLED:disabled": false, "disk": 1, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "1", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/1", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ], "name": "m1.tiny", "ram": 512, "swap": "", "vcpus": 1, "rxtx_factor": 1.0 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/flavors/flavors-detail-resp.json0000664000175000017500000001053000000000000024055 0ustar00zuulzuul00000000000000{ "flavors": [ { "OS-FLV-DISABLED:disabled": false, "disk": 1, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "1", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/1", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ], "name": "m1.tiny", "ram": 512, "swap": "", "vcpus": 1, "rxtx_factor": 1.0 }, { "OS-FLV-DISABLED:disabled": false, "disk": 20, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "2", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/2", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/2", "rel": "bookmark" } ], "name": "m1.small", "ram": 2048, "swap": "", "vcpus": 1, "rxtx_factor": 1.0 }, { "OS-FLV-DISABLED:disabled": false, "disk": 40, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "3", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/3", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/3", "rel": "bookmark" } ], "name": "m1.medium", "ram": 4096, "swap": "", "vcpus": 2, "rxtx_factor": 1.0 }, { "OS-FLV-DISABLED:disabled": false, "disk": 80, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "4", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/4", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/4", "rel": "bookmark" } ], "name": "m1.large", "ram": 8192, "swap": "", "vcpus": 4, "rxtx_factor": 1.0 }, { "OS-FLV-DISABLED:disabled": false, "disk": 160, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "5", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/5", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/5", "rel": "bookmark" } ], "name": "m1.xlarge", "ram": 16384, "swap": "", "vcpus": 8, "rxtx_factor": 1.0 }, { "OS-FLV-DISABLED:disabled": false, "disk": 1, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "6", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/6", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/6", "rel": "bookmark" } ], "name": "m1.tiny.specs", "ram": 512, "swap": "", "vcpus": 1, "rxtx_factor": 1.0 } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/flavors/flavors-list-resp.json0000664000175000017500000000542200000000000023572 0ustar00zuulzuul00000000000000{ "flavors": [ { "id": "1", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/1", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ], "name": "m1.tiny" }, { "id": "2", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/2", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/2", "rel": "bookmark" } ], "name": "m1.small" }, { "id": "3", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/3", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/3", "rel": "bookmark" } ], "name": "m1.medium" }, { "id": "4", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/4", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/4", "rel": "bookmark" } ], "name": "m1.large" }, { "id": "5", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/5", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/5", "rel": "bookmark" } ], "name": "m1.xlarge" }, { "id": "6", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/6", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/6", "rel": "bookmark" } ], "name": "m1.tiny.specs" } ] } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0136058 nova-32.0.0/doc/api_samples/flavors/v2.55/0000775000175000017500000000000000000000000020057 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/flavors/v2.55/flavor-get-resp.json0000664000175000017500000000131300000000000023765 0ustar00zuulzuul00000000000000{ "flavor": { "OS-FLV-DISABLED:disabled": false, "disk": 20, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "7", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/7", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/7", "rel": "bookmark" } ], "name": "m1.small.description", "ram": 2048, "swap": "", "vcpus": 1, "rxtx_factor": 1.0, "description": "test description" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/flavors/v2.55/flavors-detail-resp.json0000664000175000017500000001247000000000000024641 0ustar00zuulzuul00000000000000{ "flavors": [ { "OS-FLV-DISABLED:disabled": false, "disk": 1, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "1", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/1", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ], "name": "m1.tiny", "ram": 512, "swap": "", "vcpus": 1, "rxtx_factor": 1.0, "description": null }, { "OS-FLV-DISABLED:disabled": false, "disk": 20, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "2", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/2", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/2", "rel": "bookmark" } ], "name": "m1.small", "ram": 2048, "swap": "", "vcpus": 1, "rxtx_factor": 1.0, "description": null }, { "OS-FLV-DISABLED:disabled": false, "disk": 40, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "3", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/3", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/3", "rel": "bookmark" } ], "name": "m1.medium", "ram": 4096, "swap": "", "vcpus": 2, "rxtx_factor": 1.0, "description": null }, { "OS-FLV-DISABLED:disabled": false, "disk": 80, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "4", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/4", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/4", "rel": "bookmark" } ], "name": "m1.large", "ram": 8192, "swap": "", "vcpus": 4, "rxtx_factor": 1.0, "description": null }, { "OS-FLV-DISABLED:disabled": false, "disk": 160, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "5", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/5", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/5", "rel": "bookmark" } ], "name": "m1.xlarge", "ram": 16384, "swap": "", "vcpus": 8, "rxtx_factor": 1.0, "description": null }, { "OS-FLV-DISABLED:disabled": false, "disk": 1, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "6", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/6", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/6", "rel": "bookmark" } ], "name": "m1.tiny.specs", "ram": 512, "swap": "", "vcpus": 1, "rxtx_factor": 1.0, "description": null }, { "OS-FLV-DISABLED:disabled": false, "disk": 20, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "7", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/7", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/7", "rel": "bookmark" } ], "name": "m1.small.description", "ram": 2048, "swap": "", "vcpus": 1, "rxtx_factor": 1.0, "description": "test description" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/flavors/v2.55/flavors-list-resp.json0000664000175000017500000000674600000000000024363 0ustar00zuulzuul00000000000000{ "flavors": [ { "id": "1", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/1", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ], "name": "m1.tiny", "description": null }, { "id": "2", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/2", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/2", "rel": "bookmark" } ], "name": "m1.small", "description": null }, { "id": "3", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/3", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/3", "rel": "bookmark" } ], "name": "m1.medium", "description": null }, { "id": "4", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/4", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/4", "rel": "bookmark" } ], "name": "m1.large", "description": null }, { "id": "5", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/5", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/5", "rel": "bookmark" } ], "name": "m1.xlarge", "description": null }, { "id": "6", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/6", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/6", "rel": "bookmark" } ], "name": "m1.tiny.specs", "description": null }, { "id": "7", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/7", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/7", "rel": "bookmark" } ], "name": "m1.small.description", "description": "test description" } ] } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.017606 nova-32.0.0/doc/api_samples/flavors/v2.61/0000775000175000017500000000000000000000000020054 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/flavors/v2.61/flavor-get-resp.json0000664000175000017500000000146700000000000023774 0ustar00zuulzuul00000000000000{ "flavor": { "OS-FLV-DISABLED:disabled": false, "disk": 20, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "7", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/7", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/7", "rel": "bookmark" } ], "name": "m1.small.description", "ram": 2048, "swap": "", "vcpus": 1, "rxtx_factor": 1.0, "description": "test description", "extra_specs": { "hw:cpu_policy": "shared", "hw:numa_nodes": "1" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/flavors/v2.61/flavors-detail-resp.json0000664000175000017500000001324000000000000024632 0ustar00zuulzuul00000000000000{ "flavors": [ { "OS-FLV-DISABLED:disabled": false, "disk": 1, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "1", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/1", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ], "name": "m1.tiny", "ram": 512, "swap": "", "vcpus": 1, "rxtx_factor": 1.0, "description": null, "extra_specs": {} }, { "OS-FLV-DISABLED:disabled": false, "disk": 20, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "2", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/2", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/2", "rel": "bookmark" } ], "name": "m1.small", "ram": 2048, "swap": "", "vcpus": 1, "rxtx_factor": 1.0, "description": null, "extra_specs": {} }, { "OS-FLV-DISABLED:disabled": false, "disk": 40, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "3", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/3", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/3", "rel": "bookmark" } ], "name": "m1.medium", "ram": 4096, "swap": "", "vcpus": 2, "rxtx_factor": 1.0, "description": null, "extra_specs": {} }, { "OS-FLV-DISABLED:disabled": false, "disk": 80, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "4", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/4", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/4", "rel": "bookmark" } ], "name": "m1.large", "ram": 8192, "swap": "", "vcpus": 4, "rxtx_factor": 1.0, "description": null, "extra_specs": {} }, { "OS-FLV-DISABLED:disabled": false, "disk": 160, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "5", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/5", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/5", "rel": "bookmark" } ], "name": "m1.xlarge", "ram": 16384, "swap": "", "vcpus": 8, "rxtx_factor": 1.0, "description": null, "extra_specs": {} }, { "OS-FLV-DISABLED:disabled": false, "disk": 1, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "6", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/6", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/6", "rel": "bookmark" } ], "name": "m1.tiny.specs", "ram": 512, "swap": "", "vcpus": 1, "rxtx_factor": 1.0, "description": null, "extra_specs": { "hw:numa_nodes": "1" } }, { "OS-FLV-DISABLED:disabled": false, "disk": 20, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "7", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/7", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/7", "rel": "bookmark" } ], "name": "m1.small.description", "ram": 2048, "swap": "", "vcpus": 1, "rxtx_factor": 1.0, "description": "test description", "extra_specs": { "hw:cpu_policy": "shared", "hw:numa_nodes": "1" } } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/flavors/v2.61/flavors-list-resp.json0000664000175000017500000000674600000000000024360 0ustar00zuulzuul00000000000000{ "flavors": [ { "id": "1", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/1", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ], "name": "m1.tiny", "description": null }, { "id": "2", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/2", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/2", "rel": "bookmark" } ], "name": "m1.small", "description": null }, { "id": "3", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/3", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/3", "rel": "bookmark" } ], "name": "m1.medium", "description": null }, { "id": "4", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/4", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/4", "rel": "bookmark" } ], "name": "m1.large", "description": null }, { "id": "5", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/5", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/5", "rel": "bookmark" } ], "name": "m1.xlarge", "description": null }, { "id": "6", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/6", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/6", "rel": "bookmark" } ], "name": "m1.tiny.specs", "description": null }, { "id": "7", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/7", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/7", "rel": "bookmark" } ], "name": "m1.small.description", "description": "test description" } ] } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.017606 nova-32.0.0/doc/api_samples/flavors/v2.75/0000775000175000017500000000000000000000000020061 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/flavors/v2.75/flavor-get-resp.json0000664000175000017500000000146600000000000024000 0ustar00zuulzuul00000000000000{ "flavor": { "OS-FLV-DISABLED:disabled": false, "disk": 20, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "7", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/7", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/7", "rel": "bookmark" } ], "name": "m1.small.description", "ram": 2048, "swap": 0, "vcpus": 1, "rxtx_factor": 1.0, "description": "test description", "extra_specs": { "hw:cpu_policy": "shared", "hw:numa_nodes": "1" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/flavors/v2.75/flavors-detail-resp.json0000664000175000017500000001323100000000000024637 0ustar00zuulzuul00000000000000{ "flavors": [ { "OS-FLV-DISABLED:disabled": false, "disk": 1, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "1", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/1", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ], "name": "m1.tiny", "ram": 512, "swap": 0, "vcpus": 1, "rxtx_factor": 1.0, "description": null, "extra_specs": {} }, { "OS-FLV-DISABLED:disabled": false, "disk": 20, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "2", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/2", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/2", "rel": "bookmark" } ], "name": "m1.small", "ram": 2048, "swap": 0, "vcpus": 1, "rxtx_factor": 1.0, "description": null, "extra_specs": {} }, { "OS-FLV-DISABLED:disabled": false, "disk": 40, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "3", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/3", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/3", "rel": "bookmark" } ], "name": "m1.medium", "ram": 4096, "swap": 0, "vcpus": 2, "rxtx_factor": 1.0, "description": null, "extra_specs": {} }, { "OS-FLV-DISABLED:disabled": false, "disk": 80, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "4", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/4", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/4", "rel": "bookmark" } ], "name": "m1.large", "ram": 8192, "swap": 0, "vcpus": 4, "rxtx_factor": 1.0, "description": null, "extra_specs": {} }, { "OS-FLV-DISABLED:disabled": false, "disk": 160, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "5", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/5", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/5", "rel": "bookmark" } ], "name": "m1.xlarge", "ram": 16384, "swap": 0, "vcpus": 8, "rxtx_factor": 1.0, "description": null, "extra_specs": {} }, { "OS-FLV-DISABLED:disabled": false, "disk": 1, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "6", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/6", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/6", "rel": "bookmark" } ], "name": "m1.tiny.specs", "ram": 512, "swap": 0, "vcpus": 1, "rxtx_factor": 1.0, "description": null, "extra_specs": { "hw:numa_nodes": "1" } }, { "OS-FLV-DISABLED:disabled": false, "disk": 20, "OS-FLV-EXT-DATA:ephemeral": 0, "os-flavor-access:is_public": true, "id": "7", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/7", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/7", "rel": "bookmark" } ], "name": "m1.small.description", "ram": 2048, "swap": 0, "vcpus": 1, "rxtx_factor": 1.0, "description": "test description", "extra_specs": { "hw:cpu_policy": "shared", "hw:numa_nodes": "1" } } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/flavors/v2.75/flavors-list-resp.json0000664000175000017500000000674600000000000024365 0ustar00zuulzuul00000000000000{ "flavors": [ { "id": "1", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/1", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ], "name": "m1.tiny", "description": null }, { "id": "2", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/2", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/2", "rel": "bookmark" } ], "name": "m1.small", "description": null }, { "id": "3", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/3", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/3", "rel": "bookmark" } ], "name": "m1.medium", "description": null }, { "id": "4", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/4", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/4", "rel": "bookmark" } ], "name": "m1.large", "description": null }, { "id": "5", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/5", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/5", "rel": "bookmark" } ], "name": "m1.xlarge", "description": null }, { "id": "6", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/6", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/6", "rel": "bookmark" } ], "name": "m1.tiny.specs", "description": null }, { "id": "7", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/7", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/7", "rel": "bookmark" } ], "name": "m1.small.description", "description": "test description" } ] } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.017606 nova-32.0.0/doc/api_samples/images/0000775000175000017500000000000000000000000017071 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/images/image-get-resp.json0000664000175000017500000000226400000000000022576 0ustar00zuulzuul00000000000000{ "image": { "OS-DCF:diskConfig": "AUTO", "OS-EXT-IMG-SIZE:size": 74185822, "created": "2011-01-01T01:02:03Z", "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" }, { "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "alternate", "type": "application/vnd.openstack.image" } ], "metadata": { "architecture": "x86_64", "auto_disk_config": "True", "kernel_id": "nokernel", "ramdisk_id": "nokernel" }, "minDisk": 0, "minRam": 0, "name": "fakeimage7", "progress": 100, "status": "ACTIVE", "updated": "2011-01-01T01:02:03Z" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/images/image-meta-key-get.json0000664000175000017500000000006700000000000023340 0ustar00zuulzuul00000000000000{ "meta": { "kernel_id": "nokernel" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/images/image-meta-key-put-req.json0000664000175000017500000000007400000000000024154 0ustar00zuulzuul00000000000000{ "meta": { "auto_disk_config": "False" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/images/image-meta-key-put-resp.json0000664000175000017500000000007300000000000024335 0ustar00zuulzuul00000000000000{ "meta": { "auto_disk_config": "False" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/images/image-metadata-get-resp.json0000664000175000017500000000024300000000000024347 0ustar00zuulzuul00000000000000{ "metadata": { "architecture": "x86_64", "auto_disk_config": "True", "kernel_id": "nokernel", "ramdisk_id": "nokernel" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/images/image-metadata-post-req.json0000664000175000017500000000013100000000000024367 0ustar00zuulzuul00000000000000{ "metadata": { "kernel_id": "False", "Label": "UpdatedImage" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/images/image-metadata-post-resp.json0000664000175000017500000000030100000000000024550 0ustar00zuulzuul00000000000000{ "metadata": { "Label": "UpdatedImage", "architecture": "x86_64", "auto_disk_config": "True", "kernel_id": "False", "ramdisk_id": "nokernel" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/images/image-metadata-put-req.json0000664000175000017500000000013200000000000024213 0ustar00zuulzuul00000000000000{ "metadata": { "auto_disk_config": "True", "Label": "Changed" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/images/image-metadata-put-resp.json0000664000175000017500000000013200000000000024375 0ustar00zuulzuul00000000000000{ "metadata": { "Label": "Changed", "auto_disk_config": "True" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/images/images-details-get-resp.json0000664000175000017500000003242600000000000024407 0ustar00zuulzuul00000000000000{ "images": [ { "OS-EXT-IMG-SIZE:size": 25165824, "created": "2011-01-01T01:02:03Z", "id": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/155d900f-4e14-4e4c-a73d-069cbf4541e6", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/155d900f-4e14-4e4c-a73d-069cbf4541e6", "rel": "bookmark" }, { "href": "http://glance.openstack.example.com/images/155d900f-4e14-4e4c-a73d-069cbf4541e6", "rel": "alternate", "type": "application/vnd.openstack.image" } ], "metadata": { "architecture": "x86_64", "kernel_id": "nokernel", "ramdisk_id": "nokernel" }, "minDisk": 0, "minRam": 0, "name": "fakeimage123456", "progress": 100, "status": "ACTIVE", "updated": "2011-01-01T01:02:03Z" }, { "OS-EXT-IMG-SIZE:size": 58145823, "created": "2011-01-01T01:02:03Z", "id": "a2459075-d96c-40d5-893e-577ff92e721c", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/a2459075-d96c-40d5-893e-577ff92e721c", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/a2459075-d96c-40d5-893e-577ff92e721c", "rel": "bookmark" }, { "href": "http://glance.openstack.example.com/images/a2459075-d96c-40d5-893e-577ff92e721c", "rel": "alternate", "type": "application/vnd.openstack.image" } ], "metadata": { "kernel_id": "nokernel", "ramdisk_id": "nokernel" }, "minDisk": 0, "minRam": 0, "name": "fakeimage123456", "progress": 100, "status": "ACTIVE", "updated": "2011-01-01T01:02:03Z" }, { "OS-EXT-IMG-SIZE:size": 83594576, "created": "2011-01-01T01:02:03Z", "id": "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6", "rel": "bookmark" }, { "href": "http://glance.openstack.example.com/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6", "rel": "alternate", "type": "application/vnd.openstack.image" } ], "metadata": { "architecture": "x86_64", "kernel_id": "nokernel", "ramdisk_id": "nokernel" }, "minDisk": 0, "minRam": 0, "name": "fakeimage123456", "progress": 100, "status": "ACTIVE", "updated": "2011-01-01T01:02:03Z" }, { "OS-EXT-IMG-SIZE:size": 84035174, "created": "2011-01-01T01:02:03Z", "id": "cedef40a-ed67-4d10-800e-17455edce175", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/cedef40a-ed67-4d10-800e-17455edce175", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/cedef40a-ed67-4d10-800e-17455edce175", "rel": "bookmark" }, { "href": "http://glance.openstack.example.com/images/cedef40a-ed67-4d10-800e-17455edce175", "rel": "alternate", "type": "application/vnd.openstack.image" } ], "metadata": { "kernel_id": "nokernel", "ramdisk_id": "nokernel" }, "minDisk": 0, "minRam": 0, "name": "fakeimage123456", "progress": 100, "status": "ACTIVE", "updated": "2011-01-01T01:02:03Z" }, { "OS-EXT-IMG-SIZE:size": 26360814, "created": "2011-01-01T01:02:03Z", "id": "c905cedb-7281-47e4-8a62-f26bc5fc4c77", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77", "rel": "bookmark" }, { "href": "http://glance.openstack.example.com/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77", "rel": "alternate", "type": "application/vnd.openstack.image" } ], "metadata": { "kernel_id": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "ramdisk_id": null }, "minDisk": 0, "minRam": 0, "name": "fakeimage123456", "progress": 100, "status": "ACTIVE", "updated": "2011-01-01T01:02:03Z" }, { "OS-DCF:diskConfig": "MANUAL", "OS-EXT-IMG-SIZE:size": 49163826, "created": "2011-01-01T01:02:03Z", "id": "a440c04b-79fa-479c-bed1-0b816eaec379", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/a440c04b-79fa-479c-bed1-0b816eaec379", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/a440c04b-79fa-479c-bed1-0b816eaec379", "rel": "bookmark" }, { "href": "http://glance.openstack.example.com/images/a440c04b-79fa-479c-bed1-0b816eaec379", "rel": "alternate", "type": "application/vnd.openstack.image" } ], "metadata": { "architecture": "x86_64", "auto_disk_config": "False", "kernel_id": "nokernel", "ramdisk_id": "nokernel" }, "minDisk": 0, "minRam": 0, "name": "fakeimage6", "progress": 100, "status": "ACTIVE", "updated": "2011-01-01T01:02:03Z" }, { "OS-DCF:diskConfig": "AUTO", "OS-EXT-IMG-SIZE:size": 74185822, "created": "2011-01-01T01:02:03Z", "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" }, { "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "alternate", "type": "application/vnd.openstack.image" } ], "metadata": { "architecture": "x86_64", "auto_disk_config": "True", "kernel_id": "nokernel", "ramdisk_id": "nokernel" }, "minDisk": 0, "minRam": 0, "name": "fakeimage7", "progress": 100, "status": "ACTIVE", "updated": "2011-01-01T01:02:03Z" }, { "OS-EXT-IMG-SIZE:size": 25165824, "created": "2011-01-01T01:02:03Z", "id": "95fad737-9325-4855-b37e-20a62268ec88", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/95fad737-9325-4855-b37e-20a62268ec88", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/95fad737-9325-4855-b37e-20a62268ec88", "rel": "bookmark" }, { "href": "http://glance.openstack.example.com/images/95fad737-9325-4855-b37e-20a62268ec88", "rel": "alternate", "type": "application/vnd.openstack.image" } ], "metadata": { "hw_ephemeral_encryption": "True" }, "minDisk": 0, "minRam": 0, "name": "fakeimage123456", "progress": 100, "status": "ACTIVE", "updated": "2011-01-01T01:02:03Z" }, { "OS-EXT-IMG-SIZE:size": 25165824, "created": "2011-01-01T01:02:03Z", "id": "535426d4-5d75-44f4-9591-a2123d23c33f", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/535426d4-5d75-44f4-9591-a2123d23c33f", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/535426d4-5d75-44f4-9591-a2123d23c33f", "rel": "bookmark" }, { "href": "http://glance.openstack.example.com/images/535426d4-5d75-44f4-9591-a2123d23c33f", "rel": "alternate", "type": "application/vnd.openstack.image" } ], "metadata": { "hw_ephemeral_encryption": "False" }, "minDisk": 0, "minRam": 0, "name": "fakeimage123456", "progress": 100, "status": "ACTIVE", "updated": "2011-01-01T01:02:03Z" }, { "OS-EXT-IMG-SIZE:size": 25165824, "created": "2011-01-01T01:02:03Z", "id": "5f7d4f5b-3781-4a4e-9046-a2a800e807e5", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/5f7d4f5b-3781-4a4e-9046-a2a800e807e5", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/5f7d4f5b-3781-4a4e-9046-a2a800e807e5", "rel": "bookmark" }, { "href": "http://glance.openstack.example.com/images/5f7d4f5b-3781-4a4e-9046-a2a800e807e5", "rel": "alternate", "type": "application/vnd.openstack.image" } ], "metadata": { "hw_ephemeral_encryption": "True", "hw_ephemeral_encryption_format": "luks" }, "minDisk": 0, "minRam": 0, "name": "fakeimage123456", "progress": 100, "status": "ACTIVE", "updated": "2011-01-01T01:02:03Z" }, { "OS-EXT-IMG-SIZE:size": 25165824, "created": "2011-01-01T01:02:03Z", "id": "261b52ed-f693-4147-8f3b-d25df5efd968", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/261b52ed-f693-4147-8f3b-d25df5efd968", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/261b52ed-f693-4147-8f3b-d25df5efd968", "rel": "bookmark" }, { "href": "http://glance.openstack.example.com/images/261b52ed-f693-4147-8f3b-d25df5efd968", "rel": "alternate", "type": "application/vnd.openstack.image" } ], "metadata": { "hw_ephemeral_encryption": "True", "hw_ephemeral_encryption_format": "plain" }, "minDisk": 0, "minRam": 0, "name": "fakeimage123456", "progress": 100, "status": "ACTIVE", "updated": "2011-01-01T01:02:03Z" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/images/images-list-get-resp.json0000664000175000017500000002162700000000000023736 0ustar00zuulzuul00000000000000{ "images": [ { "id": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/155d900f-4e14-4e4c-a73d-069cbf4541e6", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/155d900f-4e14-4e4c-a73d-069cbf4541e6", "rel": "bookmark" }, { "href": "http://glance.openstack.example.com/images/155d900f-4e14-4e4c-a73d-069cbf4541e6", "rel": "alternate", "type": "application/vnd.openstack.image" } ], "name": "fakeimage123456" }, { "id": "a2459075-d96c-40d5-893e-577ff92e721c", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/a2459075-d96c-40d5-893e-577ff92e721c", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/a2459075-d96c-40d5-893e-577ff92e721c", "rel": "bookmark" }, { "href": "http://glance.openstack.example.com/images/a2459075-d96c-40d5-893e-577ff92e721c", "rel": "alternate", "type": "application/vnd.openstack.image" } ], "name": "fakeimage123456" }, { "id": "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6", "rel": "bookmark" }, { "href": "http://glance.openstack.example.com/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6", "rel": "alternate", "type": "application/vnd.openstack.image" } ], "name": "fakeimage123456" }, { "id": "cedef40a-ed67-4d10-800e-17455edce175", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/cedef40a-ed67-4d10-800e-17455edce175", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/cedef40a-ed67-4d10-800e-17455edce175", "rel": "bookmark" }, { "href": "http://glance.openstack.example.com/images/cedef40a-ed67-4d10-800e-17455edce175", "rel": "alternate", "type": "application/vnd.openstack.image" } ], "name": "fakeimage123456" }, { "id": "c905cedb-7281-47e4-8a62-f26bc5fc4c77", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77", "rel": "bookmark" }, { "href": "http://glance.openstack.example.com/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77", "rel": "alternate", "type": "application/vnd.openstack.image" } ], "name": "fakeimage123456" }, { "id": "a440c04b-79fa-479c-bed1-0b816eaec379", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/a440c04b-79fa-479c-bed1-0b816eaec379", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/a440c04b-79fa-479c-bed1-0b816eaec379", "rel": "bookmark" }, { "href": "http://glance.openstack.example.com/images/a440c04b-79fa-479c-bed1-0b816eaec379", "rel": "alternate", "type": "application/vnd.openstack.image" } ], "name": "fakeimage6" }, { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" }, { "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "alternate", "type": "application/vnd.openstack.image" } ], "name": "fakeimage7" }, { "id": "a2293931-dc33-45cc-85ef-232aa9491710", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/a2293931-dc33-45cc-85ef-232aa9491710", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/a2293931-dc33-45cc-85ef-232aa9491710", "rel": "bookmark" }, { "href": "http://glance.openstack.example.com/images/a2293931-dc33-45cc-85ef-232aa9491710", "rel": "alternate", "type": "application/vnd.openstack.image" } ], "name": "fakeimage123456" }, { "id": "e78f0ee9-96ef-4ce7-accf-e816f273be45", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/e78f0ee9-96ef-4ce7-accf-e816f273be45", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/e78f0ee9-96ef-4ce7-accf-e816f273be45", "rel": "bookmark" }, { "href": "http://glance.openstack.example.com/images/e78f0ee9-96ef-4ce7-accf-e816f273be45", "rel": "alternate", "type": "application/vnd.openstack.image" } ], "name": "fakeimage123456" }, { "id": "54eadb78-eeb6-4b13-beed-20b9894eeadf", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/54eadb78-eeb6-4b13-beed-20b9894eeadf", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/54eadb78-eeb6-4b13-beed-20b9894eeadf", "rel": "bookmark" }, { "href": "http://glance.openstack.example.com/images/54eadb78-eeb6-4b13-beed-20b9894eeadf", "rel": "alternate", "type": "application/vnd.openstack.image" } ], "name": "fakeimage123456" }, { "id": "eb7458f3-d003-4187-8027-595591dc2723", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/eb7458f3-d003-4187-8027-595591dc2723", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/eb7458f3-d003-4187-8027-595591dc2723", "rel": "bookmark" }, { "href": "http://glance.openstack.example.com/images/eb7458f3-d003-4187-8027-595591dc2723", "rel": "alternate", "type": "application/vnd.openstack.image" } ], "name": "fakeimage123456" } ] }././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.017606 nova-32.0.0/doc/api_samples/limits/0000775000175000017500000000000000000000000017125 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/limits/limit-get-resp.json0000664000175000017500000000141200000000000022660 0ustar00zuulzuul00000000000000{ "limits": { "absolute": { "maxImageMeta": 128, "maxPersonality": 5, "maxPersonalitySize": 10240, "maxSecurityGroupRules": -1, "maxSecurityGroups": -1, "maxServerMeta": 128, "maxTotalCores": 20, "maxTotalFloatingIps": -1, "maxTotalInstances": 10, "maxTotalKeypairs": 100, "maxTotalRAMSize": 51200, "maxServerGroups": 10, "maxServerGroupMembers": 10, "totalCoresUsed": 0, "totalInstancesUsed": 0, "totalRAMUsed": 0, "totalSecurityGroupsUsed": 0, "totalFloatingIpsUsed": 0, "totalServerGroupsUsed": 0 }, "rate": [] } } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.017606 nova-32.0.0/doc/api_samples/limits/v2.36/0000775000175000017500000000000000000000000017703 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/limits/v2.36/limit-get-resp.json0000664000175000017500000000110400000000000023434 0ustar00zuulzuul00000000000000{ "limits": { "absolute": { "maxImageMeta": 128, "maxPersonality": 5, "maxPersonalitySize": 10240, "maxServerMeta": 128, "maxTotalCores": 20, "maxTotalInstances": 10, "maxTotalKeypairs": 100, "maxTotalRAMSize": 51200, "maxServerGroups": 10, "maxServerGroupMembers": 10, "totalCoresUsed": 0, "totalInstancesUsed": 0, "totalRAMUsed": 0, "totalServerGroupsUsed": 0 }, "rate": [] } } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.021606 nova-32.0.0/doc/api_samples/limits/v2.39/0000775000175000017500000000000000000000000017706 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/limits/v2.39/limit-get-resp.json0000664000175000017500000000104300000000000023441 0ustar00zuulzuul00000000000000{ "limits": { "absolute": { "maxPersonality": 5, "maxPersonalitySize": 10240, "maxServerMeta": 128, "maxTotalCores": 20, "maxTotalInstances": 10, "maxTotalKeypairs": 100, "maxTotalRAMSize": 51200, "maxServerGroups": 10, "maxServerGroupMembers": 10, "totalCoresUsed": 0, "totalInstancesUsed": 0, "totalRAMUsed": 0, "totalServerGroupsUsed": 0 }, "rate": [] } } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.021606 nova-32.0.0/doc/api_samples/limits/v2.57/0000775000175000017500000000000000000000000017706 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/limits/v2.57/limit-get-resp.json0000664000175000017500000000073100000000000023444 0ustar00zuulzuul00000000000000{ "limits": { "absolute": { "maxServerMeta": 128, "maxTotalCores": 20, "maxTotalInstances": 10, "maxTotalKeypairs": 100, "maxTotalRAMSize": 51200, "maxServerGroups": 10, "maxServerGroupMembers": 10, "totalCoresUsed": 0, "totalInstancesUsed": 0, "totalRAMUsed": 0, "totalServerGroupsUsed": 0 }, "rate": [] } } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.021606 nova-32.0.0/doc/api_samples/os-admin-actions/0000775000175000017500000000000000000000000020771 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-admin-actions/admin-actions-inject-network-info.json0000664000175000017500000000004200000000000030300 0ustar00zuulzuul00000000000000{ "injectNetworkInfo": null } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-admin-actions/admin-actions-reset-network.json0000664000175000017500000000003500000000000027217 0ustar00zuulzuul00000000000000{ "resetNetwork": null } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-admin-actions/admin-actions-reset-server-state.json0000664000175000017500000000007300000000000030154 0ustar00zuulzuul00000000000000{ "os-resetState": { "state": "active" } } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.021606 nova-32.0.0/doc/api_samples/os-admin-password/0000775000175000017500000000000000000000000021173 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-admin-password/admin-password-change-password.json0000664000175000017500000000007700000000000030105 0ustar00zuulzuul00000000000000{ "changePassword" : { "adminPass" : "foo" } } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.021606 nova-32.0.0/doc/api_samples/os-agents/0000775000175000017500000000000000000000000017524 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-agents/agent-post-req.json0000664000175000017500000000035700000000000023272 0ustar00zuulzuul00000000000000{ "agent": { "hypervisor": "xen", "os": "os", "architecture": "x86", "version": "8.0", "md5hash": "add6bb58e139be103324d04d82d8f545", "url": "http://example.com/path/to/resource" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-agents/agent-post-resp.json0000664000175000017500000000040600000000000023447 0ustar00zuulzuul00000000000000{ "agent": { "agent_id": 1, "architecture": "x86", "hypervisor": "xen", "md5hash": "add6bb58e139be103324d04d82d8f545", "os": "os", "url": "http://example.com/path/to/resource", "version": "8.0" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-agents/agent-update-put-req.json0000664000175000017500000000023600000000000024371 0ustar00zuulzuul00000000000000{ "para": { "url": "http://example.com/path/to/resource", "md5hash": "add6bb58e139be103324d04d82d8f545", "version": "7.0" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-agents/agent-update-put-resp.json0000664000175000017500000000027000000000000024551 0ustar00zuulzuul00000000000000{ "agent": { "agent_id": "1", "md5hash": "add6bb58e139be103324d04d82d8f545", "url": "http://example.com/path/to/resource", "version": "7.0" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-agents/agents-get-resp.json0000664000175000017500000000046700000000000023433 0ustar00zuulzuul00000000000000{ "agents": [ { "agent_id": 1, "architecture": "x86", "hypervisor": "xen", "md5hash": "add6bb58e139be103324d04d82d8f545", "os": "os", "url": "http://example.com/path/to/resource", "version": "8.0" } ] } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.025606 nova-32.0.0/doc/api_samples/os-aggregates/0000775000175000017500000000000000000000000020354 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-aggregates/aggregate-add-host-post-req.json0000664000175000017500000000011700000000000026445 0ustar00zuulzuul00000000000000{ "add_host": { "host": "21549b2f665945baaa7101926a00143c" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-aggregates/aggregate-metadata-post-req.json0000664000175000017500000000021200000000000026516 0ustar00zuulzuul00000000000000{ "set_metadata": { "metadata": { "key": "value" } } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-aggregates/aggregate-post-req.json0000664000175000017500000000013700000000000024746 0ustar00zuulzuul00000000000000{ "aggregate": { "name": "name", "availability_zone": "london" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-aggregates/aggregate-post-resp.json0000664000175000017500000000036200000000000025130 0ustar00zuulzuul00000000000000{ "aggregate": { "availability_zone": "london", "created_at": "2013-08-18T12:17:55.751757", "deleted": false, "deleted_at": null, "id": 1, "name": "name", "updated_at": null } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-aggregates/aggregate-remove-host-post-req.json0000664000175000017500000000012200000000000027206 0ustar00zuulzuul00000000000000{ "remove_host": { "host": "bf1454b3d71145d49fca2101c56c728d" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-aggregates/aggregate-update-post-req.json0000664000175000017500000000014000000000000026220 0ustar00zuulzuul00000000000000{ "aggregate": { "name": "newname", "availability_zone": "nova2" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-aggregates/aggregate-update-post-resp.json0000664000175000017500000000055300000000000026412 0ustar00zuulzuul00000000000000{ "aggregate": { "availability_zone": "nova2", "created_at": "2013-08-18T12:17:56.259751", "deleted": false, "deleted_at": null, "hosts": [], "id": 1, "metadata": { "availability_zone": "nova2" }, "name": "newname", "updated_at": "2013-08-18T12:17:56.286720" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-aggregates/aggregates-add-host-post-resp.json0000664000175000017500000000061200000000000027012 0ustar00zuulzuul00000000000000{ "aggregate": { "availability_zone": "london", "created_at": "2013-08-18T12:17:56.297823", "deleted": false, "deleted_at": null, "hosts": [ "21549b2f665945baaa7101926a00143c" ], "id": 1, "metadata": { "availability_zone": "london" }, "name": "name", "updated_at": null } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-aggregates/aggregates-get-resp.json0000664000175000017500000000052200000000000025103 0ustar00zuulzuul00000000000000{ "aggregate": { "availability_zone": "london", "created_at": "2013-08-18T12:17:56.380226", "deleted": false, "deleted_at": null, "hosts": [], "id": 1, "metadata": { "availability_zone": "london" }, "name": "name", "updated_at": null } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-aggregates/aggregates-list-get-resp.json0000664000175000017500000000066500000000000026064 0ustar00zuulzuul00000000000000{ "aggregates": [ { "availability_zone": "london", "created_at": "2013-08-18T12:17:56.856455", "deleted": false, "deleted_at": null, "hosts": ["21549b2f665945baaa7101926a00143c"], "id": 1, "metadata": { "availability_zone": "london" }, "name": "name", "updated_at": null } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-aggregates/aggregates-metadata-post-resp.json0000664000175000017500000000060600000000000027072 0ustar00zuulzuul00000000000000{ "aggregate": { "availability_zone": "london", "created_at": "2013-08-18T12:17:55.959571", "deleted": false, "deleted_at": null, "hosts": [], "id": 1, "metadata": { "availability_zone": "london", "key": "value" }, "name": "name", "updated_at": "2013-08-18T12:17:55.986540" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-aggregates/aggregates-remove-host-post-resp.json0000664000175000017500000000052200000000000027557 0ustar00zuulzuul00000000000000{ "aggregate": { "availability_zone": "london", "created_at": "2013-08-18T12:17:56.990581", "deleted": false, "deleted_at": null, "hosts": [], "id": 1, "metadata": { "availability_zone": "london" }, "name": "name", "updated_at": null } } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.025606 nova-32.0.0/doc/api_samples/os-aggregates/v2.41/0000775000175000017500000000000000000000000021126 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-aggregates/v2.41/aggregate-add-host-post-req.json0000664000175000017500000000006500000000000027221 0ustar00zuulzuul00000000000000{ "add_host": { "host": "compute" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-aggregates/v2.41/aggregate-metadata-post-req.json0000664000175000017500000000021200000000000027270 0ustar00zuulzuul00000000000000{ "set_metadata": { "metadata": { "key": "value" } } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-aggregates/v2.41/aggregate-post-req.json0000664000175000017500000000013400000000000025515 0ustar00zuulzuul00000000000000{ "aggregate": { "name": "name", "availability_zone": "nova" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-aggregates/v2.41/aggregate-post-resp.json0000664000175000017500000000045200000000000025702 0ustar00zuulzuul00000000000000{ "aggregate": { "availability_zone": "london", "created_at": "2016-12-27T22:51:32.877711", "deleted": false, "deleted_at": null, "id": 1, "name": "name", "updated_at": null, "uuid": "86a0da0e-9f0c-4f51-a1e0-3c25edab3783" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-aggregates/v2.41/aggregate-remove-host-post-req.json0000664000175000017500000000007000000000000027762 0ustar00zuulzuul00000000000000{ "remove_host": { "host": "compute" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-aggregates/v2.41/aggregate-update-post-req.json0000664000175000017500000000014000000000000026772 0ustar00zuulzuul00000000000000{ "aggregate": { "name": "newname", "availability_zone": "nova2" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-aggregates/v2.41/aggregate-update-post-resp.json0000664000175000017500000000064200000000000027163 0ustar00zuulzuul00000000000000{ "aggregate": { "availability_zone": "nova2", "created_at": "2016-12-27T23:47:32.897139", "deleted": false, "deleted_at": null, "hosts": [], "id": 1, "metadata": { "availability_zone": "nova2" }, "name": "newname", "updated_at": "2016-12-27T23:47:33.067180", "uuid": "6f74e3f3-df28-48f3-98e1-ac941b1c5e43" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-aggregates/v2.41/aggregates-add-host-post-resp.json0000664000175000017500000000065100000000000027567 0ustar00zuulzuul00000000000000{ "aggregate": { "availability_zone": "london", "created_at": "2016-12-27T23:47:30.594805", "deleted": false, "deleted_at": null, "hosts": [ "compute" ], "id": 1, "metadata": { "availability_zone": "london" }, "name": "name", "updated_at": null, "uuid": "d1842372-89c5-4fbd-ad5a-5d2e16c85456" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-aggregates/v2.41/aggregates-get-resp.json0000664000175000017500000000061200000000000025655 0ustar00zuulzuul00000000000000{ "aggregate": { "availability_zone": "london", "created_at": "2016-12-27T23:47:30.563527", "deleted": false, "deleted_at": null, "hosts": [], "id": 1, "metadata": { "availability_zone": "london" }, "name": "name", "updated_at": null, "uuid": "fd0a5b12-7e8d-469d-bfd5-64a6823e7407" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-aggregates/v2.41/aggregates-list-get-resp.json0000664000175000017500000000076600000000000026640 0ustar00zuulzuul00000000000000{ "aggregates": [ { "availability_zone": "london", "created_at": "2016-12-27T23:47:32.911515", "deleted": false, "deleted_at": null, "hosts": [ "compute" ], "id": 1, "metadata": { "availability_zone": "london" }, "name": "name", "updated_at": null, "uuid": "6ba28ba7-f29b-45cc-a30b-6e3a40c2fb14" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-aggregates/v2.41/aggregates-metadata-post-resp.json0000664000175000017500000000067600000000000027653 0ustar00zuulzuul00000000000000{ "aggregate": { "availability_zone": "london", "created_at": "2016-12-27T23:59:18.623100", "deleted": false, "deleted_at": null, "hosts": [], "id": 1, "metadata": { "availability_zone": "london", "key": "value" }, "name": "name", "updated_at": "2016-12-27T23:59:18.723348", "uuid": "26002bdb-62cc-41bd-813a-0ad22db32625" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-aggregates/v2.41/aggregates-remove-host-post-resp.json0000664000175000017500000000061200000000000030331 0ustar00zuulzuul00000000000000{ "aggregate": { "availability_zone": "london", "created_at": "2016-12-27T23:47:30.594805", "deleted": false, "deleted_at": null, "hosts": [], "id": 1, "metadata": { "availability_zone": "london" }, "name": "name", "updated_at": null, "uuid": "d1842372-89c5-4fbd-ad5a-5d2e16c85456" } } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.029606 nova-32.0.0/doc/api_samples/os-aggregates/v2.81/0000775000175000017500000000000000000000000021132 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-aggregates/v2.81/aggregate-add-host-post-req.json0000664000175000017500000000006500000000000027225 0ustar00zuulzuul00000000000000{ "add_host": { "host": "compute" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-aggregates/v2.81/aggregate-images-post-req.json0000664000175000017500000000012300000000000026762 0ustar00zuulzuul00000000000000{ "cache": [ {"id": "70a599e0-31e7-49b7-b260-868f441e862b"} ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-aggregates/v2.81/aggregate-metadata-post-req.json0000664000175000017500000000021200000000000027274 0ustar00zuulzuul00000000000000{ "set_metadata": { "metadata": { "key": "value" } } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-aggregates/v2.81/aggregate-post-req.json0000664000175000017500000000013600000000000025523 0ustar00zuulzuul00000000000000{ "aggregate": { "name": "name", "availability_zone": "london" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-aggregates/v2.81/aggregate-post-resp.json0000664000175000017500000000045100000000000025705 0ustar00zuulzuul00000000000000{ "aggregate": { "availability_zone": "london", "created_at": "2019-10-08T15:15:27.988513", "deleted": false, "deleted_at": null, "id": 1, "name": "name", "updated_at": null, "uuid": "a25e34a2-4fc1-4876-82d0-cf930fa04b82" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-aggregates/v2.81/aggregate-remove-host-post-req.json0000664000175000017500000000007000000000000027766 0ustar00zuulzuul00000000000000{ "remove_host": { "host": "compute" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-aggregates/v2.81/aggregate-update-post-req.json0000664000175000017500000000014000000000000026776 0ustar00zuulzuul00000000000000{ "aggregate": { "name": "newname", "availability_zone": "nova2" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-aggregates/v2.81/aggregate-update-post-resp.json0000664000175000017500000000064200000000000027167 0ustar00zuulzuul00000000000000{ "aggregate": { "availability_zone": "nova2", "created_at": "2019-10-11T14:19:00.718841", "deleted": false, "deleted_at": null, "hosts": [], "id": 1, "metadata": { "availability_zone": "nova2" }, "name": "newname", "updated_at": "2019-10-11T14:19:00.785838", "uuid": "4e7fa22f-f6cf-4e81-a5c7-6dc485815f81" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-aggregates/v2.81/aggregates-add-host-post-resp.json0000664000175000017500000000065000000000000027572 0ustar00zuulzuul00000000000000{ "aggregate": { "availability_zone": "london", "created_at": "2019-10-11T14:19:05.250053", "deleted": false, "deleted_at": null, "hosts": [ "compute" ], "id": 1, "metadata": { "availability_zone": "london" }, "name": "name", "updated_at": null, "uuid": "47832b50-a192-4900-affe-8f7fdf2d7f22" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-aggregates/v2.81/aggregates-get-resp.json0000664000175000017500000000061100000000000025660 0ustar00zuulzuul00000000000000{ "aggregate": { "availability_zone": "london", "created_at": "2019-10-11T14:19:07.366577", "deleted": false, "deleted_at": null, "hosts": [], "id": 1, "metadata": { "availability_zone": "london" }, "name": "name", "updated_at": null, "uuid": "7c5ff84a-c901-4733-adf8-06875e265080" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-aggregates/v2.81/aggregates-list-get-resp.json0000664000175000017500000000076500000000000026643 0ustar00zuulzuul00000000000000{ "aggregates": [ { "availability_zone": "london", "created_at": "2019-10-11T14:19:07.386637", "deleted": false, "deleted_at": null, "hosts": [ "compute" ], "id": 1, "metadata": { "availability_zone": "london" }, "name": "name", "updated_at": null, "uuid": "070cb72c-f463-4f72-9c61-2c0556eb8c07" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-aggregates/v2.81/aggregates-metadata-post-resp.json0000664000175000017500000000067500000000000027656 0ustar00zuulzuul00000000000000{ "aggregate": { "availability_zone": "london", "created_at": "2019-10-11T14:19:03.103465", "deleted": false, "deleted_at": null, "hosts": [], "id": 1, "metadata": { "availability_zone": "london", "key": "value" }, "name": "name", "updated_at": "2019-10-11T14:19:03.169058", "uuid": "0843db7c-f161-446d-84c8-d936320da2e8" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-aggregates/v2.81/aggregates-remove-host-post-resp.json0000664000175000017500000000061100000000000030334 0ustar00zuulzuul00000000000000{ "aggregate": { "availability_zone": "london", "created_at": "2019-10-11T14:19:05.250053", "deleted": false, "deleted_at": null, "hosts": [], "id": 1, "metadata": { "availability_zone": "london" }, "name": "name", "updated_at": null, "uuid": "47832b50-a192-4900-affe-8f7fdf2d7f22" } }././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.029606 nova-32.0.0/doc/api_samples/os-assisted-volume-snapshots/0000775000175000017500000000000000000000000023407 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-req.json0000664000175000017500000000047600000000000032013 0ustar00zuulzuul00000000000000{ "snapshot": { "volume_id": "521752a6-acf6-4b2d-bc7a-119f9148cd8c", "create_info": { "snapshot_id": "421752a6-acf6-4b2d-bc7a-119f9148cd8c", "type": "qcow2", "new_file": "new_file_name", "id": "421752a6-acf6-4b2d-bc7a-119f9148cd8c" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-resp.json0000664000175000017500000000021400000000000032163 0ustar00zuulzuul00000000000000{ "snapshot": { "id": "421752a6-acf6-4b2d-bc7a-119f9148cd8c", "volumeId": "521752a6-acf6-4b2d-bc7a-119f9148cd8c" } }././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.033606 nova-32.0.0/doc/api_samples/os-attach-interfaces/0000775000175000017500000000000000000000000021630 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-attach-interfaces/attach-interfaces-create-net_id-req.json0000664000175000017500000000031200000000000031372 0ustar00zuulzuul00000000000000{ "interfaceAttachment": { "fixed_ips": [ { "ip_address": "192.168.1.3" } ], "net_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-attach-interfaces/attach-interfaces-create-req.json0000664000175000017500000000014000000000000030131 0ustar00zuulzuul00000000000000{ "interfaceAttachment": { "port_id": "ce531f90-199f-48c0-816c-13e38010b442" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-attach-interfaces/attach-interfaces-create-resp.json0000664000175000017500000000062200000000000030320 0ustar00zuulzuul00000000000000{ "interfaceAttachment": { "fixed_ips": [ { "ip_address": "192.168.1.3", "subnet_id": "f8a6e8f8-c2ec-497c-9f23-da9616de54ef" } ], "mac_addr": "fa:16:3e:4c:2c:30", "net_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6", "port_id": "ce531f90-199f-48c0-816c-13e38010b442", "port_state": "ACTIVE" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-attach-interfaces/attach-interfaces-list-resp.json0000664000175000017500000000071700000000000030035 0ustar00zuulzuul00000000000000{ "interfaceAttachments": [ { "fixed_ips": [ { "ip_address": "192.168.1.3", "subnet_id": "f8a6e8f8-c2ec-497c-9f23-da9616de54ef" } ], "mac_addr": "fa:16:3e:4c:2c:30", "net_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6", "port_id": "ce531f90-199f-48c0-816c-13e38010b442", "port_state": "ACTIVE" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-attach-interfaces/attach-interfaces-show-resp.json0000664000175000017500000000062200000000000030035 0ustar00zuulzuul00000000000000{ "interfaceAttachment": { "fixed_ips": [ { "ip_address": "192.168.1.3", "subnet_id": "f8a6e8f8-c2ec-497c-9f23-da9616de54ef" } ], "mac_addr": "fa:16:3e:4c:2c:30", "net_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6", "port_id": "ce531f90-199f-48c0-816c-13e38010b442", "port_state": "ACTIVE" } }././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.033606 nova-32.0.0/doc/api_samples/os-attach-interfaces/v2.49/0000775000175000017500000000000000000000000022412 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-attach-interfaces/v2.49/attach-interfaces-create-req.json0000664000175000017500000000016700000000000030724 0ustar00zuulzuul00000000000000{ "interfaceAttachment": { "port_id": "ce531f90-199f-48c0-816c-13e38010b442", "tag": "foo" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-attach-interfaces/v2.49/attach-interfaces-create-resp.json0000664000175000017500000000062300000000000031103 0ustar00zuulzuul00000000000000{ "interfaceAttachment": { "fixed_ips": [ { "ip_address": "192.168.1.3", "subnet_id": "f8a6e8f8-c2ec-497c-9f23-da9616de54ef" } ], "mac_addr": "fa:16:3e:4c:2c:30", "net_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6", "port_id": "ce531f90-199f-48c0-816c-13e38010b442", "port_state": "ACTIVE" } } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.033606 nova-32.0.0/doc/api_samples/os-attach-interfaces/v2.70/0000775000175000017500000000000000000000000022404 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-attach-interfaces/v2.70/attach-interfaces-create-net_id-req.json0000664000175000017500000000034200000000000032151 0ustar00zuulzuul00000000000000{ "interfaceAttachment": { "fixed_ips": [ { "ip_address": "192.168.1.3" } ], "net_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6", "tag": "public" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-attach-interfaces/v2.70/attach-interfaces-create-req.json0000664000175000017500000000017100000000000030711 0ustar00zuulzuul00000000000000{ "interfaceAttachment": { "port_id": "ce531f90-199f-48c0-816c-13e38010b442", "tag": "public" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-attach-interfaces/v2.70/attach-interfaces-create-resp.json0000664000175000017500000000065300000000000031100 0ustar00zuulzuul00000000000000{ "interfaceAttachment": { "fixed_ips": [ { "ip_address": "192.168.1.3", "subnet_id": "f8a6e8f8-c2ec-497c-9f23-da9616de54ef" } ], "mac_addr": "fa:16:3e:4c:2c:30", "net_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6", "port_id": "ce531f90-199f-48c0-816c-13e38010b442", "port_state": "ACTIVE", "tag": "public" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-attach-interfaces/v2.70/attach-interfaces-list-resp.json0000664000175000017500000000075400000000000030612 0ustar00zuulzuul00000000000000{ "interfaceAttachments": [ { "fixed_ips": [ { "ip_address": "192.168.1.3", "subnet_id": "f8a6e8f8-c2ec-497c-9f23-da9616de54ef" } ], "mac_addr": "fa:16:3e:4c:2c:30", "net_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6", "port_id": "ce531f90-199f-48c0-816c-13e38010b442", "port_state": "ACTIVE", "tag": "public" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-attach-interfaces/v2.70/attach-interfaces-show-resp.json0000664000175000017500000000065300000000000030615 0ustar00zuulzuul00000000000000{ "interfaceAttachment": { "fixed_ips": [ { "ip_address": "192.168.1.3", "subnet_id": "f8a6e8f8-c2ec-497c-9f23-da9616de54ef" } ], "mac_addr": "fa:16:3e:4c:2c:30", "net_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6", "port_id": "ce531f90-199f-48c0-816c-13e38010b442", "port_state": "ACTIVE", "tag": "public" } }././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.033606 nova-32.0.0/doc/api_samples/os-availability-zone/0000775000175000017500000000000000000000000021666 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-availability-zone/availability-zone-detail-resp.json0000664000175000017500000000207300000000000030415 0ustar00zuulzuul00000000000000{ "availabilityZoneInfo": [ { "hosts": { "conductor": { "nova-conductor": { "active": true, "available": true, "updated_at": null } }, "scheduler": { "nova-scheduler": { "active": true, "available": true, "updated_at": null } } }, "zoneName": "internal", "zoneState": { "available": true } }, { "hosts": { "compute": { "nova-compute": { "active": true, "available": true, "updated_at": null } } }, "zoneName": "nova", "zoneState": { "available": true } } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-availability-zone/availability-zone-list-resp.json0000664000175000017500000000030200000000000030117 0ustar00zuulzuul00000000000000{ "availabilityZoneInfo": [ { "hosts": null, "zoneName": "nova", "zoneState": { "available": true } } ] } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.033606 nova-32.0.0/doc/api_samples/os-baremetal-nodes/0000775000175000017500000000000000000000000021305 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-baremetal-nodes/baremetal-node-get-resp.json0000664000175000017500000000046400000000000026607 0ustar00zuulzuul00000000000000{ "node": { "cpus": "2", "disk_gb": "10", "host": "IRONIC MANAGED", "id": "058d27fa-241b-445a-a386-08c04f96db43", "instance_uuid": "1ea4e53e-149a-4f02-9515-590c9fb2315a", "interfaces": [], "memory_mb": "1024", "task_state": "active" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-baremetal-nodes/baremetal-node-list-resp.json0000664000175000017500000000106100000000000026775 0ustar00zuulzuul00000000000000{ "nodes": [ { "cpus": "2", "disk_gb": "10", "host": "IRONIC MANAGED", "id": "058d27fa-241b-445a-a386-08c04f96db43", "interfaces": [], "memory_mb": "1024", "task_state": "active" }, { "cpus": "2", "disk_gb": "10", "host": "IRONIC MANAGED", "id": "e2025409-f3ce-4d6a-9788-c565cf3b1b1c", "interfaces": [], "memory_mb": "1024", "task_state": "active" } ] }././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.033606 nova-32.0.0/doc/api_samples/os-cells/0000775000175000017500000000000000000000000017345 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-cells/cells-capacities-resp.json0000664000175000017500000000116200000000000024414 0ustar00zuulzuul00000000000000{ "cell": { "capacities": { "disk_free": { "total_mb": 1052672, "units_by_mb": { "0": 0, "163840": 5, "20480": 46, "40960": 23, "81920": 11 } }, "ram_free": { "total_mb": 7680, "units_by_mb": { "16384": 0, "2048": 3, "4096": 1, "512": 13, "8192": 0 } } } } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-cells/cells-get-resp.json0000664000175000017500000000023500000000000023066 0ustar00zuulzuul00000000000000{ "cell": { "name": "cell3", "rpc_host": null, "rpc_port": null, "type": "child", "username": "username3" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-cells/cells-list-resp.json0000664000175000017500000000160400000000000023263 0ustar00zuulzuul00000000000000{ "cells": [ { "name": "cell1", "rpc_host": null, "rpc_port": null, "type": "child", "username": "username1" }, { "name": "cell3", "rpc_host": null, "rpc_port": null, "type": "child", "username": "username3" }, { "name": "cell5", "rpc_host": null, "rpc_port": null, "type": "child", "username": "username5" }, { "name": "cell2", "rpc_host": null, "rpc_port": null, "type": "parent", "username": "username2" }, { "name": "cell4", "rpc_host": null, "rpc_port": null, "type": "parent", "username": "username4" } ] }././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.033606 nova-32.0.0/doc/api_samples/os-certificates/0000775000175000017500000000000000000000000020710 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-certificates/certificate-create-resp.json0000664000175000017500000000672300000000000026305 0ustar00zuulzuul00000000000000{ "certificate": { "data": "Certificate:\n Data:\n Version: 1 (0x0)\n Serial Number: 1018 (0x3fa)\n Signature Algorithm: md5WithRSAEncryption\n Issuer: O=NOVA ROOT, L=Mountain View, ST=California, C=US\n Validity\n Not Before: Aug 12 07:20:30 2013 GMT\n Not After : Aug 12 07:20:30 2014 GMT\n Subject: C=US, ST=California, O=OpenStack, OU=NovaDev, CN=openstack-fake-2013-08-12T07:20:30Z\n Subject Public Key Info:\n Public Key Algorithm: rsaEncryption\n Public-Key: (1024 bit)\n Modulus:\n 00:ac:ff:b1:d1:ed:54:4e:35:6c:34:b4:8f:0b:04:\n 50:25:a3:e2:4f:02:4c:4f:26:59:bd:f3:fd:eb:da:\n 18:c2:36:aa:63:42:72:1f:88:4f:3a:ec:e7:9f:8e:\n 44:2a:d3:b8:94:7b:20:41:f8:48:02:57:91:4c:16:\n 62:f1:21:d4:f2:40:b5:86:50:d9:61:f0:be:ff:d8:\n 8d:9f:4b:aa:6a:07:38:a2:7f:87:21:fc:e6:6e:1d:\n 0a:95:1a:90:0e:60:c2:24:e9:8e:e8:68:1b:e9:f3:\n c6:b0:7c:da:c5:20:66:9b:85:ea:f5:c9:a7:de:ee:\n 16:b1:51:a0:4d:e3:95:98:df\n Exponent: 65537 (0x10001)\n Signature Algorithm: md5WithRSAEncryption\n 15:42:ca:71:cc:32:af:dc:cf:45:91:df:8a:b8:30:c4:7f:78:\n 80:a7:25:c2:d9:81:3e:b3:dd:22:cc:3b:f8:94:e7:8f:04:f6:\n 93:04:9e:85:d4:10:40:ff:5a:07:47:24:b5:ae:93:ad:8d:e1:\n e6:54:4a:8d:4a:29:53:c4:8d:04:6b:0b:f6:af:38:78:02:c5:\n 05:19:89:82:2d:ba:fd:11:3c:1e:18:c9:0c:3d:03:93:6e:bc:\n 66:70:34:ee:03:78:8a:1d:3d:64:e8:20:2f:90:81:8e:49:1d:\n 07:37:15:66:42:cb:58:39:ad:56:ce:ed:47:c6:78:0b:0e:75:\n 29:ca\n-----BEGIN CERTIFICATE-----\nMIICNDCCAZ0CAgP6MA0GCSqGSIb3DQEBBAUAME4xEjAQBgNVBAoTCU5PVkEgUk9P\nVDEWMBQGA1UEBxMNTW91bnRhaW4gVmlldzETMBEGA1UECBMKQ2FsaWZvcm5pYTEL\nMAkGA1UEBhMCVVMwHhcNMTMwODEyMDcyMDMwWhcNMTQwODEyMDcyMDMwWjB2MQsw\nCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTESMBAGA1UECgwJT3BlblN0\nYWNrMRAwDgYDVQQLDAdOb3ZhRGV2MSwwKgYDVQQDDCNvcGVuc3RhY2stZmFrZS0y\nMDEzLTA4LTEyVDA3OjIwOjMwWjCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA\nrP+x0e1UTjVsNLSPCwRQJaPiTwJMTyZZvfP969oYwjaqY0JyH4hPOuznn45EKtO4\nlHsgQfhIAleRTBZi8SHU8kC1hlDZYfC+/9iNn0uqagc4on+HIfzmbh0KlRqQDmDC\nJOmO6Ggb6fPGsHzaxSBmm4Xq9cmn3u4WsVGgTeOVmN8CAwEAATANBgkqhkiG9w0B\nAQQFAAOBgQAVQspxzDKv3M9Fkd+KuDDEf3iApyXC2YE+s90izDv4lOePBPaTBJ6F\n1BBA/1oHRyS1rpOtjeHmVEqNSilTxI0Eawv2rzh4AsUFGYmCLbr9ETweGMkMPQOT\nbrxmcDTuA3iKHT1k6CAvkIGOSR0HNxVmQstYOa1Wzu1HxngLDnUpyg==\n-----END CERTIFICATE-----\n", "private_key": "-----BEGIN RSA PRIVATE KEY-----\nMIICXgIBAAKBgQCs/7HR7VRONWw0tI8LBFAlo+JPAkxPJlm98/3r2hjCNqpjQnIf\niE867OefjkQq07iUeyBB+EgCV5FMFmLxIdTyQLWGUNlh8L7/2I2fS6pqBziif4ch\n/OZuHQqVGpAOYMIk6Y7oaBvp88awfNrFIGabher1yafe7haxUaBN45WY3wIDAQAB\nAoGBAIrcr2I/KyWf0hw4Nn10V9TuyE/9Gz2JHg3QFKjFJox2DqygADT5WAeHc6Bq\nNKNf0NA2SL1LSpm+ql01tvOw4VjE5TF6OHiIzHuTTnXggG6vuA8rxp6L24HtkAcC\n0CBno9ggSX6jVornJPBfxpkwITYSvH57BUFVD7ovbPyWGzS5AkEA1JeUtL6zxwps\nWRr1aJ8Ill2uQk/RUIvSZOU61s+B190zvHikFy8LD8CI6vvBmjC/IZuZVedufjqs\n4vX82uDO3QJBANBSh2b2dyB4AGVFY9vXMRtALAspJHbLHy+zTKxlGPFiuz7Se3ps\n8Kehz4C/CBXgQkk194dwFSGE19/PQfyJROsCQQCFFDJZhrtBUMwMZ2zSRiN5BUGt\nbwuncS+OS1Su3Yz5VRYq2BZYEPHKtYrAFkLWQ8eRwTaWaN5pFE/fb38OgQXdAkA4\nDm0W/K0zlHbuyUxEpNQ28/6mBi0ktiWvLT0tioq6sYmXLwZA/D2JrhXrG/xt/ol3\nr8jqrfNRsLByLhAgh0N/AkEAl2eR0O97lTEgFNqzIQwVmIAn9mBO3cnf3tycvlDU\nm6eb2CS242y4QalfCCAEjxoJURdfsm3/D1iFo00X+IWF+A==\n-----END RSA PRIVATE KEY-----\n" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-certificates/certificate-get-root-resp.json0000664000175000017500000000214400000000000026573 0ustar00zuulzuul00000000000000{ "certificate": { "data": "-----BEGIN CERTIFICATE-----\nMIICyzCCAjSgAwIBAgIJAJ8zSIxUp/m4MA0GCSqGSIb3DQEBBAUAME4xEjAQBgNV\nBAoTCU5PVkEgUk9PVDEWMBQGA1UEBxMNTW91bnRhaW4gVmlldzETMBEGA1UECBMK\nQ2FsaWZvcm5pYTELMAkGA1UEBhMCVVMwHhcNMTIxMDE3MDEzMzM5WhcNMTMxMDE3\nMDEzMzM5WjBOMRIwEAYDVQQKEwlOT1ZBIFJPT1QxFjAUBgNVBAcTDU1vdW50YWlu\nIFZpZXcxEzARBgNVBAgTCkNhbGlmb3JuaWExCzAJBgNVBAYTAlVTMIGfMA0GCSqG\nSIb3DQEBAQUAA4GNADCBiQKBgQDXW4QfQQxJG4MqurqK8nU/Lge0mfNKxXj/Gwvg\n2sQVwxzmKfoxih8Nn6yt0yHMNjhoji1UoWI03TXUnPZRAZmsypGKZeBd7Y1ZOCPB\nXGZVGrQm+PB2kZU+3cD8fVKcueMLLeZ+LRt5d0njnoKhc5xjqMlfFPimHMba4OL6\nTnYzPQIDAQABo4GwMIGtMAwGA1UdEwQFMAMBAf8wHQYDVR0OBBYEFKyoKu4SMOFM\ngx5Ec7p0nrCkabvxMH4GA1UdIwR3MHWAFKyoKu4SMOFMgx5Ec7p0nrCkabvxoVKk\nUDBOMRIwEAYDVQQKEwlOT1ZBIFJPT1QxFjAUBgNVBAcTDU1vdW50YWluIFZpZXcx\nEzARBgNVBAgTCkNhbGlmb3JuaWExCzAJBgNVBAYTAlVTggkAnzNIjFSn+bgwDQYJ\nKoZIhvcNAQEEBQADgYEAXuvXlu1o/SVvykSLhHW8QiAY00yzN/eDzYmZGomgiuoO\n/x+ayVzbrz1UWZnBD+lC4hll2iELSmf22LjLoF+s/9NyPqHxGL3FrfatBkndaiF8\nAx/TMEyCPl7IQWi+3zzatqOKHSHiG7a9SGn/7o2aNTIWKVulfy5GvmbBjBM/0UE=\n-----END CERTIFICATE-----\n", "private_key": null } }././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.037606 nova-32.0.0/doc/api_samples/os-cloudpipe/0000775000175000017500000000000000000000000020227 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-cloudpipe/cloud-pipe-create-req.json0000664000175000017500000000013200000000000025205 0ustar00zuulzuul00000000000000{ "cloudpipe": { "project_id": "059f21e3-c20e-4efc-9e7a-eba2ab3c6f9a" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-cloudpipe/cloud-pipe-create-resp.json0000664000175000017500000000007500000000000025375 0ustar00zuulzuul00000000000000{ "instance_id": "1e9b8425-34af-488e-b969-4d46f4a6382e" }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-cloudpipe/cloud-pipe-get-resp.json0000664000175000017500000000056500000000000024715 0ustar00zuulzuul00000000000000{ "cloudpipes": [ { "created_at": "2012-11-27T17:18:01Z", "instance_id": "27deecdb-baa3-4a26-9c82-32994b815b01", "internal_ip": "192.168.1.30", "project_id": "fa1765bd-a352-49c7-a6b7-8ee108a3cb0c", "public_ip": "127.0.0.1", "public_port": 22, "state": "down" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-cloudpipe/cloud-pipe-update-req.json0000664000175000017500000000014000000000000025223 0ustar00zuulzuul00000000000000{ "configure_project": { "vpn_ip": "192.168.1.1", "vpn_port": "2000" } }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315688.8376045 nova-32.0.0/doc/api_samples/os-console-auth-tokens/0000775000175000017500000000000000000000000022145 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.037606 nova-32.0.0/doc/api_samples/os-console-auth-tokens/v2.31/0000775000175000017500000000000000000000000022716 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-console-auth-tokens/v2.31/create-serial-console-req.json0000664000175000017500000000013000000000000030550 0ustar00zuulzuul00000000000000{ "remote_console": { "protocol": "serial", "type": "serial" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-console-auth-tokens/v2.31/get-console-connect-info-get-resp.json0000664000175000017500000000032600000000000032135 0ustar00zuulzuul00000000000000{ "console": { "instance_uuid": "b48316c5-71e8-45e4-9884-6c78055b9b13", "host": "localhost", "port": 5900, "internal_access_path": "51af38c3-555e-4884-a314-6c8cdde37444" } } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.037606 nova-32.0.0/doc/api_samples/os-console-auth-tokens/v2.99/0000775000175000017500000000000000000000000022734 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-console-auth-tokens/v2.99/create-spice-direct-console-req.json0000664000175000017500000000013500000000000031667 0ustar00zuulzuul00000000000000{ "remote_console": { "protocol": "spice", "type": "spice-direct" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-console-auth-tokens/v2.99/get-console-connect-info-get-resp.json0000664000175000017500000000033000000000000032146 0ustar00zuulzuul00000000000000{ "console": { "host": "fakespiceconsole.com", "instance_uuid": "16802173-4e67-44f9-ba84-6d99080b81b5", "internal_access_path": null, "port": 6969, "tls_port": 6970 } }././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.037606 nova-32.0.0/doc/api_samples/os-console-output/0000775000175000017500000000000000000000000021243 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-console-output/console-output-post-req.json0000664000175000017500000000007400000000000026707 0ustar00zuulzuul00000000000000{ "os-getConsoleOutput": { "length": 50 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-console-output/console-output-post-resp.json0000664000175000017500000000007300000000000027070 0ustar00zuulzuul00000000000000{ "output": "FAKE CONSOLE OUTPUT\nANOTHER\nLAST LINE" }././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.037606 nova-32.0.0/doc/api_samples/os-create-backup/0000775000175000017500000000000000000000000020751 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-create-backup/create-backup-req.json0000664000175000017500000000016200000000000025136 0ustar00zuulzuul00000000000000{ "createBackup": { "name": "Backup 1", "backup_type": "daily", "rotation": 1 } } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.037606 nova-32.0.0/doc/api_samples/os-create-backup/v2.45/0000775000175000017500000000000000000000000021527 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-create-backup/v2.45/create-backup-req.json0000664000175000017500000000016300000000000025715 0ustar00zuulzuul00000000000000{ "createBackup": { "name": "Backup 1", "backup_type": "weekly", "rotation": 1 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-create-backup/v2.45/create-backup-resp.json0000664000175000017500000000007200000000000026076 0ustar00zuulzuul00000000000000{ "image_id": "0e7761dd-ee98-41f0-ba35-05994e446431" }././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.037606 nova-32.0.0/doc/api_samples/os-deferred-delete/0000775000175000017500000000000000000000000021263 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-deferred-delete/force-delete-post-req.json0000664000175000017500000000003400000000000026261 0ustar00zuulzuul00000000000000{ "forceDelete": null } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-deferred-delete/restore-post-req.json0000664000175000017500000000002700000000000025410 0ustar00zuulzuul00000000000000{ "restore": null }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0416062 nova-32.0.0/doc/api_samples/os-evacuate/0000775000175000017500000000000000000000000020040 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-evacuate/server-evacuate-find-host-req.json0000664000175000017500000000014400000000000026511 0ustar00zuulzuul00000000000000{ "evacuate": { "adminPass": "MySecretPass", "onSharedStorage": "False" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-evacuate/server-evacuate-find-host-resp.json0000664000175000017500000000004400000000000026672 0ustar00zuulzuul00000000000000{ "adminPass": "MySecretPass" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-evacuate/server-evacuate-req.json0000664000175000017500000000023000000000000024614 0ustar00zuulzuul00000000000000{ "evacuate": { "host": "b419863b7d814906a68fb31703c0dbd6", "adminPass": "MySecretPass", "onSharedStorage": "False" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-evacuate/server-evacuate-resp.json0000664000175000017500000000004400000000000025001 0ustar00zuulzuul00000000000000{ "adminPass": "MySecretPass" } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0416062 nova-32.0.0/doc/api_samples/os-evacuate/v2.14/0000775000175000017500000000000000000000000020612 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-evacuate/v2.14/server-evacuate-find-host-req.json0000664000175000017500000000010000000000000027253 0ustar00zuulzuul00000000000000{ "evacuate": { "adminPass": "MySecretPass" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-evacuate/v2.14/server-evacuate-req.json0000664000175000017500000000013400000000000025371 0ustar00zuulzuul00000000000000{ "evacuate": { "host": "testHost", "adminPass": "MySecretPass" } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0416062 nova-32.0.0/doc/api_samples/os-evacuate/v2.29/0000775000175000017500000000000000000000000020620 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-evacuate/v2.29/server-evacuate-find-host-req.json0000664000175000017500000000007700000000000027276 0ustar00zuulzuul00000000000000{ "evacuate": { "adminPass": "MySecretPass" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-evacuate/v2.29/server-evacuate-req.json0000664000175000017500000000016400000000000025402 0ustar00zuulzuul00000000000000{ "evacuate": { "host": "testHost", "adminPass": "MySecretPass", "force": false } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0416062 nova-32.0.0/doc/api_samples/os-evacuate/v2.68/0000775000175000017500000000000000000000000020623 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-evacuate/v2.68/server-evacuate-find-host-req.json0000664000175000017500000000007700000000000027301 0ustar00zuulzuul00000000000000{ "evacuate": { "adminPass": "MySecretPass" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-evacuate/v2.68/server-evacuate-req.json0000664000175000017500000000013300000000000025401 0ustar00zuulzuul00000000000000{ "evacuate": { "host": "testHost", "adminPass": "MySecretPass" } }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0416062 nova-32.0.0/doc/api_samples/os-evacuate/v2.95/0000775000175000017500000000000000000000000020623 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-evacuate/v2.95/server-evacuate-find-host-req.json0000664000175000017500000000003400000000000027272 0ustar00zuulzuul00000000000000{ "evacuate": { } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-evacuate/v2.95/server-evacuate-req.json0000664000175000017500000000006700000000000025407 0ustar00zuulzuul00000000000000{ "evacuate": { "host": "testHost" } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0416062 nova-32.0.0/doc/api_samples/os-fixed-ips/0000775000175000017500000000000000000000000020133 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-fixed-ips/fixedip-post-req.json0000664000175000017500000000002700000000000024225 0ustar00zuulzuul00000000000000{ "reserve": null }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-fixed-ips/fixedips-get-resp.json0000664000175000017500000000023700000000000024367 0ustar00zuulzuul00000000000000{ "fixed_ip": { "address": "192.168.1.1", "cidr": "192.168.1.0/24", "host": "host", "hostname": "compute.host.pvt" } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0416062 nova-32.0.0/doc/api_samples/os-fixed-ips/v2.4/0000775000175000017500000000000000000000000020624 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-fixed-ips/v2.4/fixedip-post-req.json0000664000175000017500000000002700000000000024716 0ustar00zuulzuul00000000000000{ "reserve": null }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-fixed-ips/v2.4/fixedips-get-resp.json0000664000175000017500000000027200000000000025057 0ustar00zuulzuul00000000000000{ "fixed_ip": { "address": "192.168.1.1", "cidr": "192.168.1.0/24", "host": "host", "hostname": "compute.host.pvt", "reserved": false } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0456061 nova-32.0.0/doc/api_samples/os-floating-ip-dns/0000775000175000017500000000000000000000000021236 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.json0000664000175000017500000000012400000000000032204 0ustar00zuulzuul00000000000000{ "dns_entry": { "ip": "192.168.53.11", "dns_type": "A" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.json0000664000175000017500000000024700000000000032374 0ustar00zuulzuul00000000000000{ "dns_entry": { "domain": "domain1.example.org", "id": null, "ip": "192.168.1.1", "name": "instance1", "type": "A" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.json0000664000175000017500000000013100000000000031043 0ustar00zuulzuul00000000000000{ "domain_entry": { "scope": "public", "project": "project1" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.json0000664000175000017500000000024400000000000031232 0ustar00zuulzuul00000000000000{ "domain_entry": { "availability_zone": null, "domain": "domain1.example.org", "project": "project1", "scope": "public" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.json0000664000175000017500000000025000000000000030004 0ustar00zuulzuul00000000000000{ "dns_entry": { "domain": "domain1.example.org", "id": null, "ip": "192.168.1.1", "name": "instance1", "type": null } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.json0000664000175000017500000000032200000000000030200 0ustar00zuulzuul00000000000000{ "dns_entries": [ { "domain": "domain1.example.org", "id": null, "ip": "192.168.1.1", "name": "instance1", "type": null } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.json0000664000175000017500000000031200000000000027040 0ustar00zuulzuul00000000000000{ "domain_entries": [ { "availability_zone": null, "domain": "domain1.example.org", "project": "project1", "scope": "public" } ] }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0456061 nova-32.0.0/doc/api_samples/os-floating-ip-pools/0000775000175000017500000000000000000000000021606 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-floating-ip-pools/floatingippools-list-resp.json0000664000175000017500000000020500000000000027627 0ustar00zuulzuul00000000000000{ "floating_ip_pools": [ { "name": "pool1" }, { "name": "pool2" } ] }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0456061 nova-32.0.0/doc/api_samples/os-floating-ips/0000775000175000017500000000000000000000000020637 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-floating-ips/floating-ips-create-req.json0000664000175000017500000000003100000000000026146 0ustar00zuulzuul00000000000000{ "pool": "public" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-floating-ips/floating-ips-create-resp.json0000664000175000017500000000030200000000000026331 0ustar00zuulzuul00000000000000{ "floating_ip": { "fixed_ip": null, "id": "8baeddb4-45e2-4c36-8cb7-d79439a5f67c", "instance_id": null, "ip": "172.24.4.17", "pool": "public" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-floating-ips/floating-ips-get-resp.json0000664000175000017500000000030200000000000025645 0ustar00zuulzuul00000000000000{ "floating_ip": { "fixed_ip": null, "id": "8baeddb4-45e2-4c36-8cb7-d79439a5f67c", "instance_id": null, "ip": "172.24.4.17", "pool": "public" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-floating-ips/floating-ips-list-empty-resp.json0000664000175000017500000000003300000000000027176 0ustar00zuulzuul00000000000000{ "floating_ips": [] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-floating-ips/floating-ips-list-resp.json0000664000175000017500000000066700000000000026057 0ustar00zuulzuul00000000000000{ "floating_ips": [ { "fixed_ip": null, "id": "8baeddb4-45e2-4c36-8cb7-d79439a5f67c", "instance_id": null, "ip": "172.24.4.17", "pool": "public" }, { "fixed_ip": null, "id": "05ef7490-745a-4af9-98e5-610dc97493c4", "instance_id": null, "ip": "172.24.4.78", "pool": "public" } ] } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0456061 nova-32.0.0/doc/api_samples/os-floating-ips-bulk/0000775000175000017500000000000000000000000021572 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-req.json0000664000175000017500000000020600000000000030040 0ustar00zuulzuul00000000000000{ "floating_ips_bulk_create": { "ip_range": "192.168.1.0/24", "pool": "nova", "interface": "eth0" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-resp.json0000664000175000017500000000020500000000000030221 0ustar00zuulzuul00000000000000{ "floating_ips_bulk_create": { "interface": "eth0", "ip_range": "192.168.1.0/24", "pool": "nova" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-req.json0000664000175000017500000000004400000000000030037 0ustar00zuulzuul00000000000000{ "ip_range": "192.168.1.0/24" }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-resp.json0000664000175000017500000000006400000000000030223 0ustar00zuulzuul00000000000000{ "floating_ips_bulk_delete": "192.168.1.0/24" }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-by-host-resp.json0000664000175000017500000000037100000000000031320 0ustar00zuulzuul00000000000000{ "floating_ip_info": [ { "address": "10.10.10.3", "instance_uuid": null, "fixed_ip": null, "interface": "eth0", "pool": "nova", "project_id": null } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-resp.json0000664000175000017500000000124700000000000027740 0ustar00zuulzuul00000000000000{ "floating_ip_info": [ { "address": "10.10.10.1", "instance_uuid": null, "fixed_ip": null, "interface": "eth0", "pool": "nova", "project_id": null }, { "address": "10.10.10.2", "instance_uuid": null, "fixed_ip": null, "interface": "eth0", "pool": "nova", "project_id": null }, { "address": "10.10.10.3", "instance_uuid": null, "fixed_ip": null, "interface": "eth0", "pool": "nova", "project_id": null } ] }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0456061 nova-32.0.0/doc/api_samples/os-fping/0000775000175000017500000000000000000000000017346 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-fping/fping-get-details-resp.json0000664000175000017500000000024000000000000024507 0ustar00zuulzuul00000000000000{ "server": { "alive": false, "id": "f5e6fd6d-c0a3-4f9e-aabf-d69196b6d11a", "project_id": "6f70656e737461636b20342065766572" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-fping/fping-get-resp.json0000664000175000017500000000030100000000000023062 0ustar00zuulzuul00000000000000{ "servers": [ { "alive": false, "id": "1d1aea35-472b-40cf-9337-8eb68480aaa1", "project_id": "6f70656e737461636b20342065766572" } ] }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0496063 nova-32.0.0/doc/api_samples/os-hosts/0000775000175000017500000000000000000000000017403 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-hosts/host-get-reboot.json0000664000175000017500000000012100000000000023312 0ustar00zuulzuul00000000000000{ "host": "9557750dbc464741a89c907921c1cb31", "power_action": "reboot" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-hosts/host-get-resp.json0000664000175000017500000000140600000000000023000 0ustar00zuulzuul00000000000000{ "host": [ { "resource": { "cpu": 2, "disk_gb": 1028, "host": "c1a7de0ac9d94e4baceae031d05caae3", "memory_mb": 8192, "project": "(total)" } }, { "resource": { "cpu": 0, "disk_gb": 0, "host": "c1a7de0ac9d94e4baceae031d05caae3", "memory_mb": 512, "project": "(used_now)" } }, { "resource": { "cpu": 0, "disk_gb": 0, "host": "c1a7de0ac9d94e4baceae031d05caae3", "memory_mb": 0, "project": "(used_max)" } } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-hosts/host-get-shutdown.json0000664000175000017500000000012300000000000023675 0ustar00zuulzuul00000000000000{ "host": "77cfa0002e4d45fe97f185968111b27b", "power_action": "shutdown" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-hosts/host-get-startup.json0000664000175000017500000000012200000000000023523 0ustar00zuulzuul00000000000000{ "host": "4b392b27930343bbaa27fd5d8328a564", "power_action": "startup" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-hosts/host-put-maintenance-req.json0000664000175000017500000000007600000000000025131 0ustar00zuulzuul00000000000000{ "status": "enable", "maintenance_mode": "disable" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-hosts/host-put-maintenance-resp.json0000664000175000017500000000016700000000000025314 0ustar00zuulzuul00000000000000{ "host": "65c5d5b7e3bd44308e67fc50f362aee6", "maintenance_mode": "off_maintenance", "status": "enabled" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-hosts/hosts-list-resp.json0000664000175000017500000000072100000000000023356 0ustar00zuulzuul00000000000000{ "hosts": [ { "host_name": "b6e4adbc193d428ea923899d07fb001e", "service": "conductor", "zone": "internal" }, { "host_name": "09c025b0efc64211bd23fc50fa974cdf", "service": "compute", "zone": "nova" }, { "host_name": "abffda96592c4eacaf4111c28fddee17", "service": "scheduler", "zone": "internal" } ] } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0496063 nova-32.0.0/doc/api_samples/os-hypervisors/0000775000175000017500000000000000000000000020640 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-hypervisors/hypervisors-detail-resp.json0000664000175000017500000000175600000000000026350 0ustar00zuulzuul00000000000000{ "hypervisors": [ { "cpu_info": "{\"arch\": \"x86_64\", \"model\": \"Nehalem\", \"vendor\": \"Intel\", \"features\": [\"pge\", \"clflush\"], \"topology\": {\"cores\": 1, \"threads\": 1, \"sockets\": 4}}", "current_workload": 0, "status": "enabled", "state": "up", "disk_available_least": 0, "host_ip": "1.1.1.1", "free_disk_gb": 1028, "free_ram_mb": 7680, "hypervisor_hostname": "fake-mini", "hypervisor_type": "fake", "hypervisor_version": 1000, "id": 1, "local_gb": 1028, "local_gb_used": 0, "memory_mb": 8192, "memory_mb_used": 512, "running_vms": 0, "service": { "host": "e6a37ee802d74863ab8b91ade8f12a67", "id": 2, "disabled_reason": null }, "vcpus": 2, "vcpus_used": 0 } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-hypervisors/hypervisors-list-resp.json0000664000175000017500000000026300000000000026051 0ustar00zuulzuul00000000000000{ "hypervisors": [ { "hypervisor_hostname": "fake-mini", "id": 1, "state": "up", "status": "enabled" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-hypervisors/hypervisors-search-resp.json0000664000175000017500000000026300000000000026343 0ustar00zuulzuul00000000000000{ "hypervisors": [ { "hypervisor_hostname": "fake-mini", "id": 1, "state": "up", "status": "enabled" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-hypervisors/hypervisors-show-resp.json0000664000175000017500000000157100000000000026061 0ustar00zuulzuul00000000000000{ "hypervisor": { "cpu_info": "{\"arch\": \"x86_64\", \"model\": \"Nehalem\", \"vendor\": \"Intel\", \"features\": [\"pge\", \"clflush\"], \"topology\": {\"cores\": 1, \"threads\": 1, \"sockets\": 4}}", "state": "up", "status": "enabled", "current_workload": 0, "disk_available_least": 0, "host_ip": "1.1.1.1", "free_disk_gb": 1028, "free_ram_mb": 7680, "hypervisor_hostname": "fake-mini", "hypervisor_type": "fake", "hypervisor_version": 1000, "id": 1, "local_gb": 1028, "local_gb_used": 0, "memory_mb": 8192, "memory_mb_used": 512, "running_vms": 0, "service": { "host": "043b3cacf6f34c90a7245151fc8ebcda", "id": 2, "disabled_reason": null }, "vcpus": 2, "vcpus_used": 0 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-hypervisors/hypervisors-statistics-resp.json0000664000175000017500000000055700000000000027276 0ustar00zuulzuul00000000000000{ "hypervisor_statistics": { "count": 1, "current_workload": 0, "disk_available_least": 0, "free_disk_gb": 1028, "free_ram_mb": 7680, "local_gb": 1028, "local_gb_used": 0, "memory_mb": 8192, "memory_mb_used": 512, "running_vms": 0, "vcpus": 2, "vcpus_used": 0 } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-hypervisors/hypervisors-uptime-resp.json0000664000175000017500000000035200000000000026400 0ustar00zuulzuul00000000000000{ "hypervisor": { "hypervisor_hostname": "fake-mini", "id": 1, "state": "up", "status": "enabled", "uptime": " 08:32:11 up 93 days, 18:25, 12 users, load average: 0.20, 0.12, 0.14" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-hypervisors/hypervisors-with-servers-resp.json0000664000175000017500000000100200000000000027530 0ustar00zuulzuul00000000000000{ "hypervisors": [ { "hypervisor_hostname": "fake-mini", "id": 1, "state": "up", "status": "enabled", "servers": [ { "name": "test_server1", "uuid": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa" }, { "name": "test_server2", "uuid": "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb" } ] } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-hypervisors/hypervisors-without-servers-resp.json0000664000175000017500000000026300000000000030270 0ustar00zuulzuul00000000000000{ "hypervisors": [ { "hypervisor_hostname": "fake-mini", "id": 1, "state": "up", "status": "enabled" } ] } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0536063 nova-32.0.0/doc/api_samples/os-hypervisors/v2.28/0000775000175000017500000000000000000000000021417 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-hypervisors/v2.28/hypervisors-detail-resp.json0000664000175000017500000000227000000000000027117 0ustar00zuulzuul00000000000000{ "hypervisors": [ { "cpu_info": { "arch": "x86_64", "model": "Nehalem", "vendor": "Intel", "features": [ "pge", "clflush" ], "topology": { "cores": 1, "threads": 1, "sockets": 4 } }, "current_workload": 0, "status": "enabled", "state": "up", "disk_available_least": 0, "host_ip": "1.1.1.1", "free_disk_gb": 1028, "free_ram_mb": 7680, "hypervisor_hostname": "fake-mini", "hypervisor_type": "fake", "hypervisor_version": 1000, "id": 1, "local_gb": 1028, "local_gb_used": 0, "memory_mb": 8192, "memory_mb_used": 512, "running_vms": 0, "service": { "host": "e6a37ee802d74863ab8b91ade8f12a67", "id": 2, "disabled_reason": null }, "vcpus": 2, "vcpus_used": 0 } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-hypervisors/v2.28/hypervisors-list-resp.json0000664000175000017500000000026300000000000026630 0ustar00zuulzuul00000000000000{ "hypervisors": [ { "hypervisor_hostname": "fake-mini", "id": 1, "state": "up", "status": "enabled" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-hypervisors/v2.28/hypervisors-search-resp.json0000664000175000017500000000026300000000000027122 0ustar00zuulzuul00000000000000{ "hypervisors": [ { "hypervisor_hostname": "fake-mini", "id": 1, "state": "up", "status": "enabled" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-hypervisors/v2.28/hypervisors-show-resp.json0000664000175000017500000000201700000000000026634 0ustar00zuulzuul00000000000000{ "hypervisor": { "cpu_info": { "arch": "x86_64", "model": "Nehalem", "vendor": "Intel", "features": [ "pge", "clflush" ], "topology": { "cores": 1, "threads": 1, "sockets": 4 } }, "state": "up", "status": "enabled", "current_workload": 0, "disk_available_least": 0, "host_ip": "1.1.1.1", "free_disk_gb": 1028, "free_ram_mb": 7680, "hypervisor_hostname": "fake-mini", "hypervisor_type": "fake", "hypervisor_version": 1000, "id": 1, "local_gb": 1028, "local_gb_used": 0, "memory_mb": 8192, "memory_mb_used": 512, "running_vms": 0, "service": { "host": "043b3cacf6f34c90a7245151fc8ebcda", "id": 2, "disabled_reason": null }, "vcpus": 2, "vcpus_used": 0 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-hypervisors/v2.28/hypervisors-statistics-resp.json0000664000175000017500000000055700000000000030055 0ustar00zuulzuul00000000000000{ "hypervisor_statistics": { "count": 1, "current_workload": 0, "disk_available_least": 0, "free_disk_gb": 1028, "free_ram_mb": 7680, "local_gb": 1028, "local_gb_used": 0, "memory_mb": 8192, "memory_mb_used": 512, "running_vms": 0, "vcpus": 2, "vcpus_used": 0 } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-hypervisors/v2.28/hypervisors-uptime-resp.json0000664000175000017500000000035200000000000027157 0ustar00zuulzuul00000000000000{ "hypervisor": { "hypervisor_hostname": "fake-mini", "id": 1, "state": "up", "status": "enabled", "uptime": " 08:32:11 up 93 days, 18:25, 12 users, load average: 0.20, 0.12, 0.14" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-hypervisors/v2.28/hypervisors-with-servers-resp.json0000664000175000017500000000100200000000000030307 0ustar00zuulzuul00000000000000{ "hypervisors": [ { "hypervisor_hostname": "fake-mini", "id": 1, "state": "up", "status": "enabled", "servers": [ { "name": "test_server1", "uuid": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa" }, { "name": "test_server2", "uuid": "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb" } ] } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-hypervisors/v2.28/hypervisors-without-servers-resp.json0000664000175000017500000000026300000000000031047 0ustar00zuulzuul00000000000000{ "hypervisors": [ { "hypervisor_hostname": "fake-mini", "id": 1, "state": "up", "status": "enabled" } ] } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0536063 nova-32.0.0/doc/api_samples/os-hypervisors/v2.33/0000775000175000017500000000000000000000000021413 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-hypervisors/v2.33/hypervisors-detail-resp.json0000664000175000017500000000255200000000000027116 0ustar00zuulzuul00000000000000{ "hypervisors": [ { "cpu_info": { "arch": "x86_64", "model": "Nehalem", "vendor": "Intel", "features": [ "pge", "clflush" ], "topology": { "cores": 1, "threads": 1, "sockets": 4 } }, "current_workload": 0, "status": "enabled", "state": "up", "disk_available_least": 0, "host_ip": "1.1.1.1", "free_disk_gb": 1028, "free_ram_mb": 7680, "hypervisor_hostname": "host1", "hypervisor_type": "fake", "hypervisor_version": 1000, "id": 2, "local_gb": 1028, "local_gb_used": 0, "memory_mb": 8192, "memory_mb_used": 512, "running_vms": 0, "service": { "host": "host1", "id": 6, "disabled_reason": null }, "vcpus": 2, "vcpus_used": 0 } ], "hypervisors_links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/os-hypervisors/detail?limit=1&marker=2", "rel": "next" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-hypervisors/v2.33/hypervisors-list-resp.json0000664000175000017500000000057100000000000026626 0ustar00zuulzuul00000000000000{ "hypervisors": [ { "hypervisor_hostname": "host1", "id": 2, "state": "up", "status": "enabled" } ], "hypervisors_links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/os-hypervisors?limit=1&marker=2", "rel": "next" } ] } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0536063 nova-32.0.0/doc/api_samples/os-hypervisors/v2.53/0000775000175000017500000000000000000000000021415 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-hypervisors/v2.53/hypervisors-detail-resp.json0000664000175000017500000000272700000000000027124 0ustar00zuulzuul00000000000000{ "hypervisors": [ { "cpu_info": { "arch": "x86_64", "model": "Nehalem", "vendor": "Intel", "features": [ "pge", "clflush" ], "topology": { "cores": 1, "threads": 1, "sockets": 4 } }, "current_workload": 0, "status": "enabled", "state": "up", "disk_available_least": 0, "host_ip": "1.1.1.1", "free_disk_gb": 1028, "free_ram_mb": 7680, "hypervisor_hostname": "host2", "hypervisor_type": "fake", "hypervisor_version": 1000, "id": "1bb62a04-c576-402c-8147-9e89757a09e3", "local_gb": 1028, "local_gb_used": 0, "memory_mb": 8192, "memory_mb_used": 512, "running_vms": 0, "service": { "host": "host1", "id": "62f62f6e-a713-4cbe-87d3-3ecf8a1e0f8d", "disabled_reason": null }, "vcpus": 2, "vcpus_used": 0 } ], "hypervisors_links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/os-hypervisors/detail?limit=1&marker=1bb62a04-c576-402c-8147-9e89757a09e3", "rel": "next" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-hypervisors/v2.53/hypervisors-detail-with-servers-resp.json0000664000175000017500000000306600000000000031561 0ustar00zuulzuul00000000000000{ "hypervisors": [ { "cpu_info": { "arch": "x86_64", "model": "Nehalem", "vendor": "Intel", "features": [ "pge", "clflush" ], "topology": { "cores": 1, "threads": 1, "sockets": 4 } }, "current_workload": 0, "status": "enabled", "servers": [ { "name": "test_server1", "uuid": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa" }, { "name": "test_server2", "uuid": "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb" } ], "state": "up", "disk_available_least": 0, "host_ip": "1.1.1.1", "free_disk_gb": 1028, "free_ram_mb": 7680, "hypervisor_hostname": "fake-mini", "hypervisor_type": "fake", "hypervisor_version": 1000, "id": "b1e43b5f-eec1-44e0-9f10-7b4945c0226d", "local_gb": 1028, "local_gb_used": 0, "memory_mb": 8192, "memory_mb_used": 512, "running_vms": 0, "service": { "host": "host1", "id": "5d343e1d-938e-4284-b98b-6a2b5406ba76", "disabled_reason": null }, "vcpus": 2, "vcpus_used": 0 } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-hypervisors/v2.53/hypervisors-list-resp.json0000664000175000017500000000070100000000000026623 0ustar00zuulzuul00000000000000{ "hypervisors": [ { "hypervisor_hostname": "host2", "id": "1bb62a04-c576-402c-8147-9e89757a09e3", "state": "up", "status": "enabled" } ], "hypervisors_links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/os-hypervisors?limit=1&marker=1bb62a04-c576-402c-8147-9e89757a09e3", "rel": "next" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-hypervisors/v2.53/hypervisors-search-resp.json0000664000175000017500000000033000000000000027113 0ustar00zuulzuul00000000000000{ "hypervisors": [ { "hypervisor_hostname": "fake-mini", "id": "b1e43b5f-eec1-44e0-9f10-7b4945c0226d", "state": "up", "status": "enabled" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-hypervisors/v2.53/hypervisors-show-resp.json0000664000175000017500000000213100000000000026627 0ustar00zuulzuul00000000000000{ "hypervisor": { "cpu_info": { "arch": "x86_64", "model": "Nehalem", "vendor": "Intel", "features": [ "pge", "clflush" ], "topology": { "cores": 1, "threads": 1, "sockets": 4 } }, "state": "up", "status": "enabled", "current_workload": 0, "disk_available_least": 0, "host_ip": "1.1.1.1", "free_disk_gb": 1028, "free_ram_mb": 7680, "hypervisor_hostname": "fake-mini", "hypervisor_type": "fake", "hypervisor_version": 1000, "id": "b1e43b5f-eec1-44e0-9f10-7b4945c0226d", "local_gb": 1028, "local_gb_used": 0, "memory_mb": 8192, "memory_mb_used": 512, "running_vms": 0, "service": { "host": "043b3cacf6f34c90a7245151fc8ebcda", "id": "5d343e1d-938e-4284-b98b-6a2b5406ba76", "disabled_reason": null }, "vcpus": 2, "vcpus_used": 0 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-hypervisors/v2.53/hypervisors-show-with-servers-resp.json0000664000175000017500000000260000000000000031270 0ustar00zuulzuul00000000000000{ "hypervisor": { "cpu_info": { "arch": "x86_64", "model": "Nehalem", "vendor": "Intel", "features": [ "pge", "clflush" ], "topology": { "cores": 1, "threads": 1, "sockets": 4 } }, "state": "up", "status": "enabled", "servers": [ { "name": "test_server1", "uuid": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa" }, { "name": "test_server2", "uuid": "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb" } ], "current_workload": 0, "disk_available_least": 0, "host_ip": "1.1.1.1", "free_disk_gb": 1028, "free_ram_mb": 7680, "hypervisor_hostname": "fake-mini", "hypervisor_type": "fake", "hypervisor_version": 1000, "id": "b1e43b5f-eec1-44e0-9f10-7b4945c0226d", "local_gb": 1028, "local_gb_used": 0, "memory_mb": 8192, "memory_mb_used": 512, "running_vms": 0, "service": { "host": "043b3cacf6f34c90a7245151fc8ebcda", "id": "5d343e1d-938e-4284-b98b-6a2b5406ba76", "disabled_reason": null }, "vcpus": 2, "vcpus_used": 0 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-hypervisors/v2.53/hypervisors-statistics-resp.json0000664000175000017500000000055700000000000030053 0ustar00zuulzuul00000000000000{ "hypervisor_statistics": { "count": 1, "current_workload": 0, "disk_available_least": 0, "free_disk_gb": 1028, "free_ram_mb": 7680, "local_gb": 1028, "local_gb_used": 0, "memory_mb": 8192, "memory_mb_used": 512, "running_vms": 0, "vcpus": 2, "vcpus_used": 0 } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-hypervisors/v2.53/hypervisors-uptime-resp.json0000664000175000017500000000041700000000000027157 0ustar00zuulzuul00000000000000{ "hypervisor": { "hypervisor_hostname": "fake-mini", "id": "b1e43b5f-eec1-44e0-9f10-7b4945c0226d", "state": "up", "status": "enabled", "uptime": " 08:32:11 up 93 days, 18:25, 12 users, load average: 0.20, 0.12, 0.14" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-hypervisors/v2.53/hypervisors-with-servers-resp.json0000664000175000017500000000104700000000000030316 0ustar00zuulzuul00000000000000{ "hypervisors": [ { "hypervisor_hostname": "fake-mini", "id": "b1e43b5f-eec1-44e0-9f10-7b4945c0226d", "state": "up", "status": "enabled", "servers": [ { "name": "test_server1", "uuid": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa" }, { "name": "test_server2", "uuid": "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb" } ] } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-hypervisors/v2.53/hypervisors-without-servers-resp.json0000664000175000017500000000033000000000000031040 0ustar00zuulzuul00000000000000{ "hypervisors": [ { "hypervisor_hostname": "fake-mini", "id": "b1e43b5f-eec1-44e0-9f10-7b4945c0226d", "state": "up", "status": "enabled" } ] } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0576062 nova-32.0.0/doc/api_samples/os-hypervisors/v2.88/0000775000175000017500000000000000000000000021425 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-hypervisors/v2.88/hypervisors-detail-resp.json0000664000175000017500000000141200000000000027122 0ustar00zuulzuul00000000000000{ "hypervisors": [ { "host_ip": "192.168.1.135", "hypervisor_hostname": "host2", "hypervisor_type": "fake", "hypervisor_version": 1000, "id": "f6d28711-9c10-470e-8b31-c03f498b0032", "service": { "disabled_reason": null, "host": "host2", "id": "21bbb5fb-ec98-48b3-89cf-c94402c55611" }, "state": "up", "status": "enabled", "uptime": null } ], "hypervisors_links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/os-hypervisors/detail?limit=1&marker=f6d28711-9c10-470e-8b31-c03f498b0032", "rel": "next" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-hypervisors/v2.88/hypervisors-detail-with-servers-resp.json0000664000175000017500000000155300000000000031570 0ustar00zuulzuul00000000000000{ "hypervisors": [ { "host_ip": "192.168.1.135", "hypervisor_hostname": "fake-mini", "hypervisor_type": "fake", "hypervisor_version": 1000, "id": "28b0e607-d58a-4602-a511-efe18024f4d5", "servers": [ { "name": "test_server1", "uuid": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa" }, { "name": "test_server2", "uuid": "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb" } ], "service": { "disabled_reason": null, "host": "compute", "id": "40e769a5-7489-4cf3-be46-f6bd3e4e3c25" }, "state": "up", "status": "enabled", "uptime": null } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-hypervisors/v2.88/hypervisors-list-resp.json0000664000175000017500000000070000000000000026632 0ustar00zuulzuul00000000000000{ "hypervisors": [ { "hypervisor_hostname": "host2", "id": "bfb90ba3-e13e-4413-90ff-5cdbfea727e2", "state": "up", "status": "enabled" } ], "hypervisors_links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/os-hypervisors?limit=1&marker=bfb90ba3-e13e-4413-90ff-5cdbfea727e2", "rel": "next" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-hypervisors/v2.88/hypervisors-search-resp.json0000664000175000017500000000032700000000000027131 0ustar00zuulzuul00000000000000{ "hypervisors": [ { "hypervisor_hostname": "fake-mini", "id": "6b7876c5-9ae7-4fa7-a5c8-28c796d17381", "state": "up", "status": "enabled" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-hypervisors/v2.88/hypervisors-show-resp.json0000664000175000017500000000072300000000000026644 0ustar00zuulzuul00000000000000{ "hypervisor": { "host_ip": "192.168.1.135", "hypervisor_hostname": "fake-mini", "hypervisor_type": "fake", "hypervisor_version": 1000, "id": "f79c1cce-9972-44c6-aa30-1d9e6526ce37", "service": { "disabled_reason": null, "host": "compute", "id": "7e6b27b8-f563-4c21-baa4-a40d579ed8c4" }, "state": "up", "status": "enabled", "uptime": null } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-hypervisors/v2.88/hypervisors-show-with-servers-resp.json0000664000175000017500000000137200000000000031305 0ustar00zuulzuul00000000000000{ "hypervisor": { "host_ip": "192.168.1.135", "hypervisor_hostname": "fake-mini", "hypervisor_type": "fake", "hypervisor_version": 1000, "id": "a68a56ab-9c42-47c0-9309-879e4a6dbe86", "servers": [ { "name": "test_server1", "uuid": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa" }, { "name": "test_server2", "uuid": "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb" } ], "service": { "disabled_reason": null, "host": "compute", "id": "8495059a-a079-4ab4-ad6f-cf45b81c877d" }, "state": "up", "status": "enabled", "uptime": null } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-hypervisors/v2.88/hypervisors-with-servers-resp.json0000664000175000017500000000104600000000000030325 0ustar00zuulzuul00000000000000{ "hypervisors": [ { "hypervisor_hostname": "fake-mini", "id": "39b0c938-8e2f-49da-bb52-e85c78d4ff2a", "servers": [ { "name": "test_server1", "uuid": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa" }, { "name": "test_server2", "uuid": "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb" } ], "state": "up", "status": "enabled" } ] }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0576062 nova-32.0.0/doc/api_samples/os-instance-actions/0000775000175000017500000000000000000000000021505 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-instance-actions/instance-action-get-resp.json0000664000175000017500000000121500000000000027202 0ustar00zuulzuul00000000000000{ "instanceAction": { "action": "stop", "events": [ { "event": "compute_stop_instance", "finish_time": "2018-04-25T01:26:29.585618", "result": "Success", "start_time": "2018-04-25T01:26:29.299627", "traceback": null } ], "instance_uuid": "e0a7ed34-899c-4b4d-8637-11ca627346ef", "message": null, "project_id": "6f70656e737461636b20342065766572", "request_id": "req-14122cb1-4256-4a16-a4f9-6faf494afaa7", "start_time": "2018-04-25T01:26:29.074293", "user_id": "admin" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-instance-actions/instance-actions-list-resp.json0000664000175000017500000000140300000000000027560 0ustar00zuulzuul00000000000000{ "instanceActions": [ { "action": "stop", "instance_uuid": "fcd19ef2-b593-40b1-90a5-fc31063fa95c", "message": null, "project_id": "6f70656e737461636b20342065766572", "request_id": "req-f8a59f03-76dc-412f-92c2-21f8612be728", "start_time": "2018-04-25T01:26:29.092892", "user_id": "admin" }, { "action": "create", "instance_uuid": "fcd19ef2-b593-40b1-90a5-fc31063fa95c", "message": null, "project_id": "6f70656e737461636b20342065766572", "request_id": "req-50189019-626d-47fb-b944-b8342af09679", "start_time": "2018-04-25T01:26:25.877278", "user_id": "admin" } ] }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0576062 nova-32.0.0/doc/api_samples/os-instance-actions/v2.21/0000775000175000017500000000000000000000000022255 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-instance-actions/v2.21/instance-action-get-resp.json0000664000175000017500000000121500000000000027752 0ustar00zuulzuul00000000000000{ "instanceAction": { "action": "stop", "events": [ { "event": "compute_stop_instance", "finish_time": "2018-04-25T01:26:29.262877", "result": "Success", "start_time": "2018-04-25T01:26:29.012774", "traceback": null } ], "instance_uuid": "a53525ef-9ed5-4169-9f2e-dd141d575d87", "message": null, "project_id": "6f70656e737461636b20342065766572", "request_id": "req-343506f4-4dc3-4153-8de5-de6a60cb26ab", "start_time": "2018-04-25T01:26:28.757301", "user_id": "admin" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-instance-actions/v2.21/instance-actions-list-resp.json0000664000175000017500000000140300000000000030330 0ustar00zuulzuul00000000000000{ "instanceActions": [ { "action": "stop", "instance_uuid": "07cacdbb-2e7f-4048-b69c-95cbdc47af6f", "message": null, "project_id": "6f70656e737461636b20342065766572", "request_id": "req-c022e6dc-d962-426e-b623-1cdbac0da64b", "start_time": "2018-04-25T01:26:28.752049", "user_id": "admin" }, { "action": "create", "instance_uuid": "07cacdbb-2e7f-4048-b69c-95cbdc47af6f", "message": null, "project_id": "6f70656e737461636b20342065766572", "request_id": "req-862ef1ff-da4f-4b3b-9a29-6d621442c76c", "start_time": "2018-04-25T01:26:25.595858", "user_id": "admin" } ] }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0576062 nova-32.0.0/doc/api_samples/os-instance-actions/v2.51/0000775000175000017500000000000000000000000022260 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-instance-actions/v2.51/instance-action-get-non-admin-resp.json0000664000175000017500000000115100000000000031632 0ustar00zuulzuul00000000000000{ "instanceAction": { "action": "stop", "events": [ { "event": "compute_stop_instance", "finish_time": "2018-04-25T01:26:29.565338", "result": "Success", "start_time": "2018-04-25T01:26:29.294207" } ], "instance_uuid": "11a932ff-48b8-46ed-a409-7d9e50ec75d0", "message": null, "project_id": "6f70656e737461636b20342065766572", "request_id": "req-fad89d20-0311-44bd-a8c2-ee3f2411bcf0", "start_time": "2018-04-25T01:26:29.073738", "user_id": "fake" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-instance-actions/v2.51/instance-action-get-resp.json0000664000175000017500000000121500000000000027755 0ustar00zuulzuul00000000000000{ "instanceAction": { "action": "stop", "events": [ { "event": "compute_stop_instance", "finish_time": "2018-04-25T01:26:30.798227", "result": "Success", "start_time": "2018-04-25T01:26:30.526590", "traceback": null } ], "instance_uuid": "07afdfe5-3791-48e3-9bda-1a0804796bab", "message": null, "project_id": "6f70656e737461636b20342065766572", "request_id": "req-f574c934-6f67-4945-b357-5a52a28a46a6", "start_time": "2018-04-25T01:26:30.301030", "user_id": "admin" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-instance-actions/v2.51/instance-actions-list-resp.json0000664000175000017500000000140300000000000030333 0ustar00zuulzuul00000000000000{ "instanceActions": [ { "action": "stop", "instance_uuid": "a0dbc3b0-6f14-4fb7-8500-172e82584d05", "message": null, "project_id": "6f70656e737461636b20342065766572", "request_id": "req-ca4313e0-b514-46ea-a9b9-e49f8a1ad344", "start_time": "2018-04-25T01:26:29.206664", "user_id": "admin" }, { "action": "create", "instance_uuid": "a0dbc3b0-6f14-4fb7-8500-172e82584d05", "message": null, "project_id": "6f70656e737461636b20342065766572", "request_id": "req-a897e43f-8733-499d-a9cc-78993de2b8e8", "start_time": "2018-04-25T01:26:25.910998", "user_id": "admin" } ] }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0576062 nova-32.0.0/doc/api_samples/os-instance-actions/v2.58/0000775000175000017500000000000000000000000022267 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-instance-actions/v2.58/instance-action-get-non-admin-resp.json0000664000175000017500000000123500000000000031644 0ustar00zuulzuul00000000000000{ "instanceAction": { "action": "stop", "events": [ { "event": "compute_stop_instance", "finish_time": "2018-04-25T01:26:30.518082", "result": "Success", "start_time": "2018-04-25T01:26:30.261571" } ], "instance_uuid": "ee4c91a6-f214-486d-8e2a-efa29ad91ecd", "message": null, "project_id": "6f70656e737461636b20342065766572", "request_id": "req-ada57283-2dd7-4703-9781-d287aaa4eb95", "start_time": "2018-04-25T01:26:30.041225", "updated_at": "2018-04-25T01:26:30.518082", "user_id": "fake" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-instance-actions/v2.58/instance-action-get-resp.json0000664000175000017500000000130100000000000027760 0ustar00zuulzuul00000000000000{ "instanceAction": { "action": "stop", "events": [ { "event": "compute_stop_instance", "finish_time": "2018-04-25T01:26:29.409773", "result": "Success", "start_time": "2018-04-25T01:26:29.203170", "traceback": null } ], "instance_uuid": "cab10fb8-6702-40ba-a91c-18009cec0a09", "message": null, "project_id": "6f70656e737461636b20342065766572", "request_id": "req-0514d54b-0f2c-4611-963d-fc24afb57f1f", "start_time": "2018-04-25T01:26:28.996024", "updated_at": "2018-04-25T01:26:29.409773", "user_id": "admin" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-instance-actions/v2.58/instance-actions-list-resp.json0000664000175000017500000000156100000000000030347 0ustar00zuulzuul00000000000000{ "instanceActions": [ { "action": "stop", "instance_uuid": "247cc793-7cf4-424a-a529-11bd62f960b6", "message": null, "project_id": "6f70656e737461636b20342065766572", "request_id": "req-1448f44f-490d-42ff-8781-c3181d103a7c", "start_time": "2018-04-25T01:26:28.793416", "updated_at": "2018-04-25T01:26:29.292649", "user_id": "fake" }, { "action": "create", "instance_uuid": "247cc793-7cf4-424a-a529-11bd62f960b6", "message": null, "project_id": "6f70656e737461636b20342065766572", "request_id": "req-de0561a0-09d9-4902-b1fc-23ee95b14c67", "start_time": "2018-04-25T01:26:25.527791", "updated_at": "2018-04-25T01:26:28.749039", "user_id": "fake" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-instance-actions/v2.58/instance-actions-list-with-changes-since.json0000664000175000017500000000071100000000000033052 0ustar00zuulzuul00000000000000{ "instanceActions": [ { "action": "stop", "instance_uuid": "84fb2511-ed79-418c-ac0d-11337e1a1d76", "message": null, "project_id": "6f70656e737461636b20342065766572", "request_id": "req-0176a4e5-15ae-4038-98a9-5444aa277c31", "start_time": "2018-04-25T01:26:29.051607", "updated_at": "2018-04-25T01:26:29.538648", "user_id": "admin" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-instance-actions/v2.58/instance-actions-list-with-limit-resp.json0000664000175000017500000000134000000000000032427 0ustar00zuulzuul00000000000000{ "instanceActions": [ { "action": "stop", "instance_uuid": "ca3d3be5-1a40-427f-9515-f5e181f479d0", "message": null, "project_id": "6f70656e737461636b20342065766572", "request_id": "req-4dbefbb7-d743-4d42-b0a1-a79cbe256138", "start_time": "2018-04-25T01:26:28.909887", "updated_at": "2018-04-25T01:26:29.400606", "user_id": "admin" } ], "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/ca3d3be5-1a40-427f-9515-f5e181f479d0/os-instance-actions?limit=1&marker=req-4dbefbb7-d743-4d42-b0a1-a79cbe256138", "rel": "next" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-instance-actions/v2.58/instance-actions-list-with-marker-resp.json0000664000175000017500000000071200000000000032574 0ustar00zuulzuul00000000000000{ "instanceActions": [ { "action": "create", "instance_uuid": "31f35617-317d-4688-8046-bb600286e6b6", "message": null, "project_id": "6f70656e737461636b20342065766572", "request_id": "req-922232c3-faf8-4628-9c40-0e8f0cdab020", "start_time": "2018-04-25T01:26:33.694447", "updated_at": "2018-04-25T01:26:35.944525", "user_id": "fake" } ] }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0616064 nova-32.0.0/doc/api_samples/os-instance-actions/v2.62/0000775000175000017500000000000000000000000022262 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-instance-actions/v2.62/instance-action-get-non-admin-resp.json0000664000175000017500000000136300000000000031641 0ustar00zuulzuul00000000000000{ "instanceAction": { "action": "stop", "events": [ { "event": "compute_stop_instance", "finish_time": "2018-04-25T01:26:34.784165", "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6", "result": "Success", "start_time": "2018-04-25T01:26:34.612020" } ], "instance_uuid": "79edaa44-ad4f-4af7-b994-154518c2b927", "message": null, "project_id": "6f70656e737461636b20342065766572", "request_id": "req-8eb28d4a-db6c-4337-bab8-ce154e9c620e", "start_time": "2018-04-25T01:26:34.388280", "updated_at": "2018-04-25T01:26:34.784165", "user_id": "fake" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-instance-actions/v2.62/instance-action-get-resp.json0000664000175000017500000000147200000000000027764 0ustar00zuulzuul00000000000000{ "instanceAction": { "action": "stop", "events": [ { "event": "compute_stop_instance", "finish_time": "2018-04-25T01:26:36.790544", "host": "compute", "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6", "result": "Success", "start_time": "2018-04-25T01:26:36.539271", "traceback": null } ], "instance_uuid": "4bf3473b-d550-4b65-9409-292d44ab14a2", "message": null, "project_id": "6f70656e737461636b20342065766572", "request_id": "req-0d819d5c-1527-4669-bdf0-ffad31b5105b", "start_time": "2018-04-25T01:26:36.341290", "updated_at": "2018-04-25T01:26:36.790544", "user_id": "admin" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-instance-actions/v2.62/instance-actions-list-resp.json0000664000175000017500000000156300000000000030344 0ustar00zuulzuul00000000000000{ "instanceActions": [ { "action": "stop", "instance_uuid": "15835b6f-1e14-4cfa-9f66-1abea1a1c0d5", "message": null, "project_id": "6f70656e737461636b20342065766572", "request_id": "req-f04d4b92-6241-42da-b82d-2cedb225c58d", "start_time": "2018-04-25T01:26:36.036697", "updated_at": "2018-04-25T01:26:36.525308", "user_id": "admin" }, { "action": "create", "instance_uuid": "15835b6f-1e14-4cfa-9f66-1abea1a1c0d5", "message": null, "project_id": "6f70656e737461636b20342065766572", "request_id": "req-d8790618-9bbf-4df0-8af8-fc9e24de29c0", "start_time": "2018-04-25T01:26:33.692125", "updated_at": "2018-04-25T01:26:35.993821", "user_id": "admin" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-instance-actions/v2.62/instance-actions-list-with-changes-since.json0000664000175000017500000000071100000000000033045 0ustar00zuulzuul00000000000000{ "instanceActions": [ { "action": "stop", "instance_uuid": "2150964c-30fe-4214-9547-8822375aa7d0", "message": null, "project_id": "6f70656e737461636b20342065766572", "request_id": "req-0c3b2079-0a44-474d-a5b2-7466d4b4c642", "start_time": "2018-04-25T01:26:29.594237", "updated_at": "2018-04-25T01:26:30.065061", "user_id": "admin" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-instance-actions/v2.62/instance-actions-list-with-limit-resp.json0000664000175000017500000000133700000000000032430 0ustar00zuulzuul00000000000000{ "instanceActions": [ { "action": "stop", "instance_uuid": "7a580cc0-3469-441a-9736-d5fce91003f9", "message": null, "project_id": "6f70656e737461636b20342065766572", "request_id": "req-b8ffb713-61a2-4e7c-a705-37052cba9d6e", "start_time": "2018-04-25T01:26:28.955571", "updated_at": "2018-04-25T01:26:29.414973", "user_id": "fake" } ], "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/7a580cc0-3469-441a-9736-d5fce91003f9/os-instance-actions?limit=1&marker=req-b8ffb713-61a2-4e7c-a705-37052cba9d6e", "rel": "next" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-instance-actions/v2.62/instance-actions-list-with-marker-resp.json0000664000175000017500000000071200000000000032567 0ustar00zuulzuul00000000000000{ "instanceActions": [ { "action": "create", "instance_uuid": "9bde1fd5-8435-45c5-afc1-bedd0605275b", "message": null, "project_id": "6f70656e737461636b20342065766572", "request_id": "req-4510fb10-447f-4572-a64d-c2324547d86c", "start_time": "2018-04-25T01:26:33.710291", "updated_at": "2018-04-25T01:26:35.374936", "user_id": "fake" } ] }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0616064 nova-32.0.0/doc/api_samples/os-instance-actions/v2.66/0000775000175000017500000000000000000000000022266 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-instance-actions/v2.66/instance-action-get-non-admin-resp.json0000664000175000017500000000136300000000000031645 0ustar00zuulzuul00000000000000{ "instanceAction": { "action": "stop", "events": [ { "event": "compute_stop_instance", "finish_time": "2018-04-25T01:26:34.784165", "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6", "result": "Success", "start_time": "2018-04-25T01:26:34.612020" } ], "instance_uuid": "79edaa44-ad4f-4af7-b994-154518c2b927", "message": null, "project_id": "6f70656e737461636b20342065766572", "request_id": "req-8eb28d4a-db6c-4337-bab8-ce154e9c620e", "start_time": "2018-04-25T01:26:34.388280", "updated_at": "2018-04-25T01:26:34.784165", "user_id": "fake" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-instance-actions/v2.66/instance-action-get-resp.json0000664000175000017500000000147200000000000027770 0ustar00zuulzuul00000000000000{ "instanceAction": { "action": "stop", "events": [ { "event": "compute_stop_instance", "finish_time": "2018-04-25T01:26:36.790544", "host": "compute", "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6", "result": "Success", "start_time": "2018-04-25T01:26:36.539271", "traceback": null } ], "instance_uuid": "4bf3473b-d550-4b65-9409-292d44ab14a2", "message": null, "project_id": "6f70656e737461636b20342065766572", "request_id": "req-0d819d5c-1527-4669-bdf0-ffad31b5105b", "start_time": "2018-04-25T01:26:36.341290", "updated_at": "2018-04-25T01:26:36.790544", "user_id": "admin" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-instance-actions/v2.66/instance-actions-list-resp.json0000664000175000017500000000156300000000000030350 0ustar00zuulzuul00000000000000{ "instanceActions": [ { "action": "stop", "instance_uuid": "15835b6f-1e14-4cfa-9f66-1abea1a1c0d5", "message": null, "project_id": "6f70656e737461636b20342065766572", "request_id": "req-f04d4b92-6241-42da-b82d-2cedb225c58d", "start_time": "2018-04-25T01:26:36.036697", "updated_at": "2018-04-25T01:26:36.525308", "user_id": "admin" }, { "action": "create", "instance_uuid": "15835b6f-1e14-4cfa-9f66-1abea1a1c0d5", "message": null, "project_id": "6f70656e737461636b20342065766572", "request_id": "req-d8790618-9bbf-4df0-8af8-fc9e24de29c0", "start_time": "2018-04-25T01:26:33.692125", "updated_at": "2018-04-25T01:26:35.993821", "user_id": "admin" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-instance-actions/v2.66/instance-actions-list-with-changes-before.json0000664000175000017500000000156400000000000033221 0ustar00zuulzuul00000000000000{ "instanceActions": [ { "action": "stop", "instance_uuid": "2150964c-30fe-4214-9547-8822375aa7d0", "message": null, "project_id": "6f70656e737461636b20342065766572", "request_id": "req-0c3b2079-0a44-474d-a5b2-7466d4b4c642", "start_time": "2018-04-25T01:26:29.594237", "updated_at": "2018-04-25T01:26:30.065061", "user_id": "admin" }, { "action": "create", "instance_uuid": "15835b6f-1e14-4cfa-9f66-1abea1a1c0d5", "message": null, "project_id": "6f70656e737461636b20342065766572", "request_id": "req-d8790618-9bbf-4df0-8af8-fc9e24de29c0", "start_time": "2018-04-25T01:26:33.692125", "updated_at": "2018-04-25T01:26:35.993821", "user_id": "admin" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-instance-actions/v2.66/instance-actions-list-with-changes-since.json0000664000175000017500000000071100000000000033051 0ustar00zuulzuul00000000000000{ "instanceActions": [ { "action": "stop", "instance_uuid": "2150964c-30fe-4214-9547-8822375aa7d0", "message": null, "project_id": "6f70656e737461636b20342065766572", "request_id": "req-0c3b2079-0a44-474d-a5b2-7466d4b4c642", "start_time": "2018-04-25T01:26:29.594237", "updated_at": "2018-04-25T01:26:30.065061", "user_id": "admin" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-instance-actions/v2.66/instance-actions-list-with-limit-resp.json0000664000175000017500000000134000000000000032426 0ustar00zuulzuul00000000000000{ "instanceActions": [ { "action": "stop", "instance_uuid": "ca3d3be5-1a40-427f-9515-f5e181f479d0", "message": null, "project_id": "6f70656e737461636b20342065766572", "request_id": "req-4dbefbb7-d743-4d42-b0a1-a79cbe256138", "start_time": "2018-04-25T01:26:28.909887", "updated_at": "2018-04-25T01:26:29.400606", "user_id": "admin" } ], "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/ca3d3be5-1a40-427f-9515-f5e181f479d0/os-instance-actions?limit=1&marker=req-4dbefbb7-d743-4d42-b0a1-a79cbe256138", "rel": "next" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-instance-actions/v2.66/instance-actions-list-with-marker-resp.json0000664000175000017500000000071200000000000032573 0ustar00zuulzuul00000000000000{ "instanceActions": [ { "action": "create", "instance_uuid": "9bde1fd5-8435-45c5-afc1-bedd0605275b", "message": null, "project_id": "6f70656e737461636b20342065766572", "request_id": "req-4510fb10-447f-4572-a64d-c2324547d86c", "start_time": "2018-04-25T01:26:33.710291", "updated_at": "2018-04-25T01:26:35.374936", "user_id": "fake" } ] }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0656064 nova-32.0.0/doc/api_samples/os-instance-actions/v2.84/0000775000175000017500000000000000000000000022266 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-instance-actions/v2.84/instance-action-get-non-admin-resp.json0000664000175000017500000000136300000000000031645 0ustar00zuulzuul00000000000000{ "instanceAction": { "action": "stop", "events": [ { "event": "compute_stop_instance", "finish_time": "2018-04-25T01:26:34.784165", "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6", "result": "Success", "start_time": "2018-04-25T01:26:34.612020" } ], "instance_uuid": "79edaa44-ad4f-4af7-b994-154518c2b927", "message": null, "project_id": "6f70656e737461636b20342065766572", "request_id": "req-8eb28d4a-db6c-4337-bab8-ce154e9c620e", "start_time": "2018-04-25T01:26:34.388280", "updated_at": "2018-04-25T01:26:34.784165", "user_id": "fake" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-instance-actions/v2.84/instance-action-get-resp.json0000664000175000017500000000153400000000000027767 0ustar00zuulzuul00000000000000{ "instanceAction": { "action": "stop", "events": [ { "event": "compute_stop_instance", "finish_time": "2018-04-25T01:26:36.790544", "host": "compute", "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6", "result": "Success", "start_time": "2018-04-25T01:26:36.539271", "traceback": null, "details": null } ], "instance_uuid": "4bf3473b-d550-4b65-9409-292d44ab14a2", "message": null, "project_id": "6f70656e737461636b20342065766572", "request_id": "req-0d819d5c-1527-4669-bdf0-ffad31b5105b", "start_time": "2018-04-25T01:26:36.341290", "updated_at": "2018-04-25T01:26:36.790544", "user_id": "admin" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-instance-actions/v2.84/instance-actions-list-resp.json0000664000175000017500000000156300000000000030350 0ustar00zuulzuul00000000000000{ "instanceActions": [ { "action": "stop", "instance_uuid": "15835b6f-1e14-4cfa-9f66-1abea1a1c0d5", "message": null, "project_id": "6f70656e737461636b20342065766572", "request_id": "req-f04d4b92-6241-42da-b82d-2cedb225c58d", "start_time": "2018-04-25T01:26:36.036697", "updated_at": "2018-04-25T01:26:36.525308", "user_id": "admin" }, { "action": "create", "instance_uuid": "15835b6f-1e14-4cfa-9f66-1abea1a1c0d5", "message": null, "project_id": "6f70656e737461636b20342065766572", "request_id": "req-d8790618-9bbf-4df0-8af8-fc9e24de29c0", "start_time": "2018-04-25T01:26:33.692125", "updated_at": "2018-04-25T01:26:35.993821", "user_id": "admin" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-instance-actions/v2.84/instance-actions-list-with-changes-before.json0000664000175000017500000000156400000000000033221 0ustar00zuulzuul00000000000000{ "instanceActions": [ { "action": "stop", "instance_uuid": "2150964c-30fe-4214-9547-8822375aa7d0", "message": null, "project_id": "6f70656e737461636b20342065766572", "request_id": "req-0c3b2079-0a44-474d-a5b2-7466d4b4c642", "start_time": "2018-04-25T01:26:29.594237", "updated_at": "2018-04-25T01:26:30.065061", "user_id": "admin" }, { "action": "create", "instance_uuid": "15835b6f-1e14-4cfa-9f66-1abea1a1c0d5", "message": null, "project_id": "6f70656e737461636b20342065766572", "request_id": "req-d8790618-9bbf-4df0-8af8-fc9e24de29c0", "start_time": "2018-04-25T01:26:33.692125", "updated_at": "2018-04-25T01:26:35.993821", "user_id": "admin" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-instance-actions/v2.84/instance-actions-list-with-changes-since.json0000664000175000017500000000071100000000000033051 0ustar00zuulzuul00000000000000{ "instanceActions": [ { "action": "stop", "instance_uuid": "2150964c-30fe-4214-9547-8822375aa7d0", "message": null, "project_id": "6f70656e737461636b20342065766572", "request_id": "req-0c3b2079-0a44-474d-a5b2-7466d4b4c642", "start_time": "2018-04-25T01:26:29.594237", "updated_at": "2018-04-25T01:26:30.065061", "user_id": "admin" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-instance-actions/v2.84/instance-actions-list-with-limit-resp.json0000664000175000017500000000134000000000000032426 0ustar00zuulzuul00000000000000{ "instanceActions": [ { "action": "stop", "instance_uuid": "ca3d3be5-1a40-427f-9515-f5e181f479d0", "message": null, "project_id": "6f70656e737461636b20342065766572", "request_id": "req-4dbefbb7-d743-4d42-b0a1-a79cbe256138", "start_time": "2018-04-25T01:26:28.909887", "updated_at": "2018-04-25T01:26:29.400606", "user_id": "admin" } ], "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/ca3d3be5-1a40-427f-9515-f5e181f479d0/os-instance-actions?limit=1&marker=req-4dbefbb7-d743-4d42-b0a1-a79cbe256138", "rel": "next" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-instance-actions/v2.84/instance-actions-list-with-marker-resp.json0000664000175000017500000000071200000000000032573 0ustar00zuulzuul00000000000000{ "instanceActions": [ { "action": "create", "instance_uuid": "9bde1fd5-8435-45c5-afc1-bedd0605275b", "message": null, "project_id": "6f70656e737461636b20342065766572", "request_id": "req-4510fb10-447f-4572-a64d-c2324547d86c", "start_time": "2018-04-25T01:26:33.710291", "updated_at": "2018-04-25T01:26:35.374936", "user_id": "fake" } ] }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0656064 nova-32.0.0/doc/api_samples/os-instance-usage-audit-log/0000775000175000017500000000000000000000000023034 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-instance-usage-audit-log/inst-usage-audit-log-index-get-resp.json0000664000175000017500000000225300000000000032524 0ustar00zuulzuul00000000000000{ "instance_usage_audit_logs": { "hosts_not_run": [ "samplehost3" ], "log": { "samplehost0": { "errors": 1, "instances": 1, "message": "Instance usage audit ran for host samplehost0, 1 instances in 0.01 seconds.", "state": "DONE" }, "samplehost1": { "errors": 1, "instances": 2, "message": "Instance usage audit ran for host samplehost1, 2 instances in 0.01 seconds.", "state": "DONE" }, "samplehost2": { "errors": 1, "instances": 3, "message": "Instance usage audit ran for host samplehost2, 3 instances in 0.01 seconds.", "state": "DONE" } }, "num_hosts": 4, "num_hosts_done": 3, "num_hosts_not_run": 1, "num_hosts_running": 0, "overall_status": "3 of 4 hosts done. 3 errors.", "period_beginning": "2012-06-01 00:00:00", "period_ending": "2012-07-01 00:00:00", "total_errors": 3, "total_instances": 6 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-instance-usage-audit-log/inst-usage-audit-log-show-get-resp.json0000664000175000017500000000225200000000000032374 0ustar00zuulzuul00000000000000{ "instance_usage_audit_log": { "hosts_not_run": [ "samplehost3" ], "log": { "samplehost0": { "errors": 1, "instances": 1, "message": "Instance usage audit ran for host samplehost0, 1 instances in 0.01 seconds.", "state": "DONE" }, "samplehost1": { "errors": 1, "instances": 2, "message": "Instance usage audit ran for host samplehost1, 2 instances in 0.01 seconds.", "state": "DONE" }, "samplehost2": { "errors": 1, "instances": 3, "message": "Instance usage audit ran for host samplehost2, 3 instances in 0.01 seconds.", "state": "DONE" } }, "num_hosts": 4, "num_hosts_done": 3, "num_hosts_not_run": 1, "num_hosts_running": 0, "overall_status": "3 of 4 hosts done. 3 errors.", "period_beginning": "2012-06-01 00:00:00", "period_ending": "2012-07-01 00:00:00", "total_errors": 3, "total_instances": 6 } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0656064 nova-32.0.0/doc/api_samples/os-keypairs/0000775000175000017500000000000000000000000020072 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-keypairs/keypairs-get-resp.json0000664000175000017500000000140000000000000024333 0ustar00zuulzuul00000000000000{ "keypair": { "fingerprint": "44:fe:29:6e:23:14:b9:53:5b:65:82:58:1c:fe:5a:c3", "name": "keypair-6638abdb-c4e8-407c-ba88-c8dd7cc3c4f1", "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC1HTrHCbb9NawNLSV8N6tSa8i637+EC2dA+lsdHHfQlT54t+N0nHhJPlKWDLhc579j87vp6RDFriFJ/smsTnDnf64O12z0kBaJpJPH2zXrBkZFK6q2rmxydURzX/z0yLSCP77SFJ0fdXWH2hMsAusflGyryHGX20n+mZK6mDrxVzGxEz228dwQ5G7Az5OoZDWygH2pqPvKjkifRw0jwUKf3BbkP0QvANACOk26cv16mNFpFJfI1N3OC5lUsZQtKGR01ptJoWijYKccqhkAKuo902tg/qup58J5kflNm7I61sy1mJon6SGqNUSfoQagqtBH6vd/tU1jnlwZ03uUroAL Generated-by-Nova\n", "user_id": "fake", "deleted": false, "created_at": "2014-05-07T12:06:13.681238", "updated_at": null, "deleted_at": null, "id": 1 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-keypairs/keypairs-import-post-req.json0000664000175000017500000000053100000000000025673 0ustar00zuulzuul00000000000000{ "keypair": { "name": "keypair-d20a3d59-9433-4b79-8726-20b431d89c78", "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGgB4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0lRE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYcpSxsIbECHw== Generated-by-Nova" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-keypairs/keypairs-import-post-resp.json0000664000175000017500000000067600000000000026067 0ustar00zuulzuul00000000000000{ "keypair": { "fingerprint": "1e:2c:9b:56:79:4b:45:77:f9:ca:7a:98:2c:b0:d5:3c", "name": "keypair-803a1926-af78-4b05-902a-1d6f7a8d9d3e", "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGgB4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0lRE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYcpSxsIbECHw== Generated-by-Nova", "user_id": "fake" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-keypairs/keypairs-list-resp.json0000664000175000017500000000124100000000000024532 0ustar00zuulzuul00000000000000{ "keypairs": [ { "keypair": { "fingerprint": "7e:eb:ab:24:ba:d1:e1:88:ae:9a:fb:66:53:df:d3:bd", "name": "keypair-50ca852e-273f-4cdc-8949-45feba200837", "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCkF3MX59OrlBs3dH5CU7lNmvpbrgZxSpyGjlnE8Flkirnc/Up22lpjznoxqeoTAwTW034k7Dz6aYIrZGmQwe2TkE084yqvlj45Dkyoj95fW/sZacm0cZNuL69EObEGHdprfGJQajrpz22NQoCD8TFB8Wv+8om9NH9Le6s+WPe98WC77KLw8qgfQsbIey+JawPWl4O67ZdL5xrypuRjfIPWjgy/VH85IXg/Z/GONZ2nxHgSShMkwqSFECAC5L3PHB+0+/12M/iikdatFSVGjpuHvkLOs3oe7m6HlOfluSJ85BzLWBbvva93qkGmLg4ZAc8rPh2O+YIsBUHNLLMM/oQp Generated-by-Nova\n" } } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-keypairs/keypairs-post-req.json0000664000175000017500000000013100000000000024357 0ustar00zuulzuul00000000000000{ "keypair": { "name": "keypair-ab9ff2e6-a6d7-4915-a241-044c369c07f9" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-keypairs/keypairs-post-resp.json0000664000175000017500000000445500000000000024556 0ustar00zuulzuul00000000000000{ "keypair": { "fingerprint": "7e:eb:ab:24:ba:d1:e1:88:ae:9a:fb:66:53:df:d3:bd", "name": "keypair-50ca852e-273f-4cdc-8949-45feba200837", "private_key": "-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEApBdzF+fTq5QbN3R+QlO5TZr6W64GcUqcho5ZxPBZZIq53P1K\ndtpaY856ManqEwME1tN+JOw8+mmCK2RpkMHtk5BNPOMqr5Y+OQ5MqI/eX1v7GWnJ\ntHGTbi+vRDmxBh3aa3xiUGo66c9tjUKAg/ExQfFr/vKJvTR/S3urPlj3vfFgu+yi\n8PKoH0LGyHsviWsD1peDuu2XS+ca8qbkY3yD1o4Mv1R/OSF4P2fxjjWdp8R4EkoT\nJMKkhRAgAuS9zxwftPv9djP4opHWrRUlRo6bh75CzrN6Hu5uh5Tn5bkifOQcy1gW\n772vd6pBpi4OGQHPKz4djvmCLAVBzSyzDP6EKQIDAQABAoIBAQCB+tU/ZXKlIe+h\nMNTmoz1QfOe+AY625Rwx9cakGqMk4kKyC62VkgcxshfXCToSjzyhEuyEQOFYloT2\n7FY2xXb0gcS861Efv0pQlcQhbbz/GnQ/wC13ktPu3zTdPTm9l54xsFiMTGmYVaf4\n0mnMmhyjmKIsVGDJEDGZUD/oZj7wJGOFha5M4FZrZlJIrEZC0rGGlcC0kGF2no6B\nj1Mu7HjyK3pTKf4dlp+jeRikUF5Pct+qT+rcv2rZ3fl3inxtlLEwZeFPbp/njf/U\nIGxFzZsuLmiFlsJar6M5nEckTB3p25maWWaR8/0jvJRgsPnuoUrUoGDq87DMKCdk\nlw6by9fRAoGBANhnS9ko7Of+ntqIFR7xOG9p/oPATztgHkFxe4GbQ0leaDRTx3vE\ndQmUCnn24xtyVECaI9a4IV+LP1npw8niWUJ4pjgdAlkF4cCTu9sN+cBO15SfdACI\nzD1DaaHmpFCAWlpTo68VWlvWll6i2ncCkRJR1+q/C/yQz7asvl4AakElAoGBAMId\nxqMT2Sy9xLuHsrAoMUvBOkwaMYZH+IAb4DvUDjVIiKWjmonrmopS5Lpb+ALBKqZe\neVfD6HwWQqGwCFItToaEkZvrNfTapoNCHWWg001D49765UV5lMrArDbM1vXtFfM4\nDRYM6+Y6o/6QH8EBgXtyBxcYthIDBM3wBJa67xG1AoGAKTm8fFlMkIG0N4N3Kpbf\nnnH915GaRoBwIx2AXtd6QQ7oIRfYx95MQY/fUw7SgxcLr+btbulTCkWXwwRClUI2\nqPAdElGMcfMp56r9PaTy8EzUyu55heSJrB4ckIhEw0VAcTa/1wnlVduSd+LkZYmq\no2fOD11n5iycNXvBJF1F4LUCgYAMaRbwCi7SW3eefbiA5rDwJPRzNSGBckyC9EVL\nzezynyaNYH5a3wNMYKxa9dJPasYtSND9OXs9o7ay26xMhLUGiKc+jrUuaGRI9Asp\nGjUoNXT2JphN7s4CgHsCLep4YqYKnMTJah4S5CDj/5boIg6DM/EcGupZEHRYLkY8\n1MrAGQKBgQCi9yeC39ctLUNn+Ix604gttWWChdt3ozufTZ7HybJOSRA9Gh3iD5gm\nzlz0xqpGShKpOY2k+ftvja0poMdGeJLt84P3r2q01IgI7w0LmOj5m0W10dHysH27\nBWpCnHdBJMxnBsMRPoM4MKkmKWD9l5PSTCTWtkIpsyuDCko6D9UwZA==\n-----END RSA PRIVATE KEY-----\n", "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCkF3MX59OrlBs3dH5CU7lNmvpbrgZxSpyGjlnE8Flkirnc/Up22lpjznoxqeoTAwTW034k7Dz6aYIrZGmQwe2TkE084yqvlj45Dkyoj95fW/sZacm0cZNuL69EObEGHdprfGJQajrpz22NQoCD8TFB8Wv+8om9NH9Le6s+WPe98WC77KLw8qgfQsbIey+JawPWl4O67ZdL5xrypuRjfIPWjgy/VH85IXg/Z/GONZ2nxHgSShMkwqSFECAC5L3PHB+0+/12M/iikdatFSVGjpuHvkLOs3oe7m6HlOfluSJ85BzLWBbvva93qkGmLg4ZAc8rPh2O+YIsBUHNLLMM/oQp Generated-by-Nova\n", "user_id": "fake" } }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0656064 nova-32.0.0/doc/api_samples/os-keypairs/v2.10/0000775000175000017500000000000000000000000020640 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-keypairs/v2.10/keypairs-get-resp.json0000664000175000017500000000142700000000000025112 0ustar00zuulzuul00000000000000{ "keypair": { "fingerprint": "44:fe:29:6e:23:14:b9:53:5b:65:82:58:1c:fe:5a:c3", "name": "keypair-6638abdb-c4e8-407c-ba88-c8dd7cc3c4f1", "type": "ssh", "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC1HTrHCbb9NawNLSV8N6tSa8i637+EC2dA+lsdHHfQlT54t+N0nHhJPlKWDLhc579j87vp6RDFriFJ/smsTnDnf64O12z0kBaJpJPH2zXrBkZFK6q2rmxydURzX/z0yLSCP77SFJ0fdXWH2hMsAusflGyryHGX20n+mZK6mDrxVzGxEz228dwQ5G7Az5OoZDWygH2pqPvKjkifRw0jwUKf3BbkP0QvANACOk26cv16mNFpFJfI1N3OC5lUsZQtKGR01ptJoWijYKccqhkAKuo902tg/qup58J5kflNm7I61sy1mJon6SGqNUSfoQagqtBH6vd/tU1jnlwZ03uUroAL Generated-by-Nova\n", "user_id": "fake", "deleted": false, "created_at": "2014-05-07T12:06:13.681238", "updated_at": null, "deleted_at": null, "id": 1 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-keypairs/v2.10/keypairs-import-post-req.json0000664000175000017500000000061400000000000026443 0ustar00zuulzuul00000000000000{ "keypair": { "name": "keypair-d20a3d59-9433-4b79-8726-20b431d89c78", "type": "ssh", "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGgB4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0lRE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYcpSxsIbECHw== Generated-by-Nova", "user_id": "fake" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-keypairs/v2.10/keypairs-import-post-resp.json0000664000175000017500000000072500000000000026630 0ustar00zuulzuul00000000000000{ "keypair": { "fingerprint": "1e:2c:9b:56:79:4b:45:77:f9:ca:7a:98:2c:b0:d5:3c", "name": "keypair-803a1926-af78-4b05-902a-1d6f7a8d9d3e", "type": "ssh", "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGgB4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0lRE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYcpSxsIbECHw== Generated-by-Nova", "user_id": "fake" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-keypairs/v2.10/keypairs-list-resp.json0000664000175000017500000000130000000000000025274 0ustar00zuulzuul00000000000000{ "keypairs": [ { "keypair": { "fingerprint": "7e:eb:ab:24:ba:d1:e1:88:ae:9a:fb:66:53:df:d3:bd", "name": "keypair-50ca852e-273f-4cdc-8949-45feba200837", "type": "ssh", "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCkF3MX59OrlBs3dH5CU7lNmvpbrgZxSpyGjlnE8Flkirnc/Up22lpjznoxqeoTAwTW034k7Dz6aYIrZGmQwe2TkE084yqvlj45Dkyoj95fW/sZacm0cZNuL69EObEGHdprfGJQajrpz22NQoCD8TFB8Wv+8om9NH9Le6s+WPe98WC77KLw8qgfQsbIey+JawPWl4O67ZdL5xrypuRjfIPWjgy/VH85IXg/Z/GONZ2nxHgSShMkwqSFECAC5L3PHB+0+/12M/iikdatFSVGjpuHvkLOs3oe7m6HlOfluSJ85BzLWBbvva93qkGmLg4ZAc8rPh2O+YIsBUHNLLMM/oQp Generated-by-Nova\n" } } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-keypairs/v2.10/keypairs-post-req.json0000664000175000017500000000021400000000000025127 0ustar00zuulzuul00000000000000{ "keypair": { "name": "keypair-ab9ff2e6-a6d7-4915-a241-044c369c07f9", "type": "ssh", "user_id": "fake" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-keypairs/v2.10/keypairs-post-resp.json0000664000175000017500000000450500000000000025320 0ustar00zuulzuul00000000000000{ "keypair": { "fingerprint": "7e:eb:ab:24:ba:d1:e1:88:ae:9a:fb:66:53:df:d3:bd", "name": "keypair-ab9ff2e6-a6d7-4915-a241-044c369c07f9", "type": "ssh", "private_key": "-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEApBdzF+fTq5QbN3R+QlO5TZr6W64GcUqcho5ZxPBZZIq53P1K\ndtpaY856ManqEwME1tN+JOw8+mmCK2RpkMHtk5BNPOMqr5Y+OQ5MqI/eX1v7GWnJ\ntHGTbi+vRDmxBh3aa3xiUGo66c9tjUKAg/ExQfFr/vKJvTR/S3urPlj3vfFgu+yi\n8PKoH0LGyHsviWsD1peDuu2XS+ca8qbkY3yD1o4Mv1R/OSF4P2fxjjWdp8R4EkoT\nJMKkhRAgAuS9zxwftPv9djP4opHWrRUlRo6bh75CzrN6Hu5uh5Tn5bkifOQcy1gW\n772vd6pBpi4OGQHPKz4djvmCLAVBzSyzDP6EKQIDAQABAoIBAQCB+tU/ZXKlIe+h\nMNTmoz1QfOe+AY625Rwx9cakGqMk4kKyC62VkgcxshfXCToSjzyhEuyEQOFYloT2\n7FY2xXb0gcS861Efv0pQlcQhbbz/GnQ/wC13ktPu3zTdPTm9l54xsFiMTGmYVaf4\n0mnMmhyjmKIsVGDJEDGZUD/oZj7wJGOFha5M4FZrZlJIrEZC0rGGlcC0kGF2no6B\nj1Mu7HjyK3pTKf4dlp+jeRikUF5Pct+qT+rcv2rZ3fl3inxtlLEwZeFPbp/njf/U\nIGxFzZsuLmiFlsJar6M5nEckTB3p25maWWaR8/0jvJRgsPnuoUrUoGDq87DMKCdk\nlw6by9fRAoGBANhnS9ko7Of+ntqIFR7xOG9p/oPATztgHkFxe4GbQ0leaDRTx3vE\ndQmUCnn24xtyVECaI9a4IV+LP1npw8niWUJ4pjgdAlkF4cCTu9sN+cBO15SfdACI\nzD1DaaHmpFCAWlpTo68VWlvWll6i2ncCkRJR1+q/C/yQz7asvl4AakElAoGBAMId\nxqMT2Sy9xLuHsrAoMUvBOkwaMYZH+IAb4DvUDjVIiKWjmonrmopS5Lpb+ALBKqZe\neVfD6HwWQqGwCFItToaEkZvrNfTapoNCHWWg001D49765UV5lMrArDbM1vXtFfM4\nDRYM6+Y6o/6QH8EBgXtyBxcYthIDBM3wBJa67xG1AoGAKTm8fFlMkIG0N4N3Kpbf\nnnH915GaRoBwIx2AXtd6QQ7oIRfYx95MQY/fUw7SgxcLr+btbulTCkWXwwRClUI2\nqPAdElGMcfMp56r9PaTy8EzUyu55heSJrB4ckIhEw0VAcTa/1wnlVduSd+LkZYmq\no2fOD11n5iycNXvBJF1F4LUCgYAMaRbwCi7SW3eefbiA5rDwJPRzNSGBckyC9EVL\nzezynyaNYH5a3wNMYKxa9dJPasYtSND9OXs9o7ay26xMhLUGiKc+jrUuaGRI9Asp\nGjUoNXT2JphN7s4CgHsCLep4YqYKnMTJah4S5CDj/5boIg6DM/EcGupZEHRYLkY8\n1MrAGQKBgQCi9yeC39ctLUNn+Ix604gttWWChdt3ozufTZ7HybJOSRA9Gh3iD5gm\nzlz0xqpGShKpOY2k+ftvja0poMdGeJLt84P3r2q01IgI7w0LmOj5m0W10dHysH27\nBWpCnHdBJMxnBsMRPoM4MKkmKWD9l5PSTCTWtkIpsyuDCko6D9UwZA==\n-----END RSA PRIVATE KEY-----\n", "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCkF3MX59OrlBs3dH5CU7lNmvpbrgZxSpyGjlnE8Flkirnc/Up22lpjznoxqeoTAwTW034k7Dz6aYIrZGmQwe2TkE084yqvlj45Dkyoj95fW/sZacm0cZNuL69EObEGHdprfGJQajrpz22NQoCD8TFB8Wv+8om9NH9Le6s+WPe98WC77KLw8qgfQsbIey+JawPWl4O67ZdL5xrypuRjfIPWjgy/VH85IXg/Z/GONZ2nxHgSShMkwqSFECAC5L3PHB+0+/12M/iikdatFSVGjpuHvkLOs3oe7m6HlOfluSJ85BzLWBbvva93qkGmLg4ZAc8rPh2O+YIsBUHNLLMM/oQp Generated-by-Nova\n", "user_id": "fake" } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0696063 nova-32.0.0/doc/api_samples/os-keypairs/v2.2/0000775000175000017500000000000000000000000020561 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-keypairs/v2.2/keypairs-get-resp.json0000664000175000017500000000142700000000000025033 0ustar00zuulzuul00000000000000{ "keypair": { "fingerprint": "44:fe:29:6e:23:14:b9:53:5b:65:82:58:1c:fe:5a:c3", "name": "keypair-6638abdb-c4e8-407c-ba88-c8dd7cc3c4f1", "type": "ssh", "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC1HTrHCbb9NawNLSV8N6tSa8i637+EC2dA+lsdHHfQlT54t+N0nHhJPlKWDLhc579j87vp6RDFriFJ/smsTnDnf64O12z0kBaJpJPH2zXrBkZFK6q2rmxydURzX/z0yLSCP77SFJ0fdXWH2hMsAusflGyryHGX20n+mZK6mDrxVzGxEz228dwQ5G7Az5OoZDWygH2pqPvKjkifRw0jwUKf3BbkP0QvANACOk26cv16mNFpFJfI1N3OC5lUsZQtKGR01ptJoWijYKccqhkAKuo902tg/qup58J5kflNm7I61sy1mJon6SGqNUSfoQagqtBH6vd/tU1jnlwZ03uUroAL Generated-by-Nova\n", "user_id": "fake", "deleted": false, "created_at": "2014-05-07T12:06:13.681238", "updated_at": null, "deleted_at": null, "id": 1 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-keypairs/v2.2/keypairs-import-post-req.json0000664000175000017500000000056000000000000026364 0ustar00zuulzuul00000000000000{ "keypair": { "name": "keypair-d20a3d59-9433-4b79-8726-20b431d89c78", "type": "ssh", "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGgB4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0lRE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYcpSxsIbECHw== Generated-by-Nova" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-keypairs/v2.2/keypairs-import-post-resp.json0000664000175000017500000000072500000000000026551 0ustar00zuulzuul00000000000000{ "keypair": { "fingerprint": "1e:2c:9b:56:79:4b:45:77:f9:ca:7a:98:2c:b0:d5:3c", "name": "keypair-803a1926-af78-4b05-902a-1d6f7a8d9d3e", "type": "ssh", "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGgB4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0lRE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYcpSxsIbECHw== Generated-by-Nova", "user_id": "fake" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-keypairs/v2.2/keypairs-list-resp.json0000664000175000017500000000130000000000000025215 0ustar00zuulzuul00000000000000{ "keypairs": [ { "keypair": { "fingerprint": "7e:eb:ab:24:ba:d1:e1:88:ae:9a:fb:66:53:df:d3:bd", "name": "keypair-50ca852e-273f-4cdc-8949-45feba200837", "type": "ssh", "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCkF3MX59OrlBs3dH5CU7lNmvpbrgZxSpyGjlnE8Flkirnc/Up22lpjznoxqeoTAwTW034k7Dz6aYIrZGmQwe2TkE084yqvlj45Dkyoj95fW/sZacm0cZNuL69EObEGHdprfGJQajrpz22NQoCD8TFB8Wv+8om9NH9Le6s+WPe98WC77KLw8qgfQsbIey+JawPWl4O67ZdL5xrypuRjfIPWjgy/VH85IXg/Z/GONZ2nxHgSShMkwqSFECAC5L3PHB+0+/12M/iikdatFSVGjpuHvkLOs3oe7m6HlOfluSJ85BzLWBbvva93qkGmLg4ZAc8rPh2O+YIsBUHNLLMM/oQp Generated-by-Nova\n" } } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-keypairs/v2.2/keypairs-post-req.json0000664000175000017500000000016000000000000025050 0ustar00zuulzuul00000000000000{ "keypair": { "name": "keypair-ab9ff2e6-a6d7-4915-a241-044c369c07f9", "type": "ssh" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-keypairs/v2.2/keypairs-post-resp.json0000664000175000017500000000450400000000000025240 0ustar00zuulzuul00000000000000{ "keypair": { "fingerprint": "7e:eb:ab:24:ba:d1:e1:88:ae:9a:fb:66:53:df:d3:bd", "name": "keypair-50ca852e-273f-4cdc-8949-45feba200837", "type": "ssh", "private_key": "-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEApBdzF+fTq5QbN3R+QlO5TZr6W64GcUqcho5ZxPBZZIq53P1K\ndtpaY856ManqEwME1tN+JOw8+mmCK2RpkMHtk5BNPOMqr5Y+OQ5MqI/eX1v7GWnJ\ntHGTbi+vRDmxBh3aa3xiUGo66c9tjUKAg/ExQfFr/vKJvTR/S3urPlj3vfFgu+yi\n8PKoH0LGyHsviWsD1peDuu2XS+ca8qbkY3yD1o4Mv1R/OSF4P2fxjjWdp8R4EkoT\nJMKkhRAgAuS9zxwftPv9djP4opHWrRUlRo6bh75CzrN6Hu5uh5Tn5bkifOQcy1gW\n772vd6pBpi4OGQHPKz4djvmCLAVBzSyzDP6EKQIDAQABAoIBAQCB+tU/ZXKlIe+h\nMNTmoz1QfOe+AY625Rwx9cakGqMk4kKyC62VkgcxshfXCToSjzyhEuyEQOFYloT2\n7FY2xXb0gcS861Efv0pQlcQhbbz/GnQ/wC13ktPu3zTdPTm9l54xsFiMTGmYVaf4\n0mnMmhyjmKIsVGDJEDGZUD/oZj7wJGOFha5M4FZrZlJIrEZC0rGGlcC0kGF2no6B\nj1Mu7HjyK3pTKf4dlp+jeRikUF5Pct+qT+rcv2rZ3fl3inxtlLEwZeFPbp/njf/U\nIGxFzZsuLmiFlsJar6M5nEckTB3p25maWWaR8/0jvJRgsPnuoUrUoGDq87DMKCdk\nlw6by9fRAoGBANhnS9ko7Of+ntqIFR7xOG9p/oPATztgHkFxe4GbQ0leaDRTx3vE\ndQmUCnn24xtyVECaI9a4IV+LP1npw8niWUJ4pjgdAlkF4cCTu9sN+cBO15SfdACI\nzD1DaaHmpFCAWlpTo68VWlvWll6i2ncCkRJR1+q/C/yQz7asvl4AakElAoGBAMId\nxqMT2Sy9xLuHsrAoMUvBOkwaMYZH+IAb4DvUDjVIiKWjmonrmopS5Lpb+ALBKqZe\neVfD6HwWQqGwCFItToaEkZvrNfTapoNCHWWg001D49765UV5lMrArDbM1vXtFfM4\nDRYM6+Y6o/6QH8EBgXtyBxcYthIDBM3wBJa67xG1AoGAKTm8fFlMkIG0N4N3Kpbf\nnnH915GaRoBwIx2AXtd6QQ7oIRfYx95MQY/fUw7SgxcLr+btbulTCkWXwwRClUI2\nqPAdElGMcfMp56r9PaTy8EzUyu55heSJrB4ckIhEw0VAcTa/1wnlVduSd+LkZYmq\no2fOD11n5iycNXvBJF1F4LUCgYAMaRbwCi7SW3eefbiA5rDwJPRzNSGBckyC9EVL\nzezynyaNYH5a3wNMYKxa9dJPasYtSND9OXs9o7ay26xMhLUGiKc+jrUuaGRI9Asp\nGjUoNXT2JphN7s4CgHsCLep4YqYKnMTJah4S5CDj/5boIg6DM/EcGupZEHRYLkY8\n1MrAGQKBgQCi9yeC39ctLUNn+Ix604gttWWChdt3ozufTZ7HybJOSRA9Gh3iD5gm\nzlz0xqpGShKpOY2k+ftvja0poMdGeJLt84P3r2q01IgI7w0LmOj5m0W10dHysH27\nBWpCnHdBJMxnBsMRPoM4MKkmKWD9l5PSTCTWtkIpsyuDCko6D9UwZA==\n-----END RSA PRIVATE KEY-----\n", "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCkF3MX59OrlBs3dH5CU7lNmvpbrgZxSpyGjlnE8Flkirnc/Up22lpjznoxqeoTAwTW034k7Dz6aYIrZGmQwe2TkE084yqvlj45Dkyoj95fW/sZacm0cZNuL69EObEGHdprfGJQajrpz22NQoCD8TFB8Wv+8om9NH9Le6s+WPe98WC77KLw8qgfQsbIey+JawPWl4O67ZdL5xrypuRjfIPWjgy/VH85IXg/Z/GONZ2nxHgSShMkwqSFECAC5L3PHB+0+/12M/iikdatFSVGjpuHvkLOs3oe7m6HlOfluSJ85BzLWBbvva93qkGmLg4ZAc8rPh2O+YIsBUHNLLMM/oQp Generated-by-Nova\n", "user_id": "fake" } }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0696063 nova-32.0.0/doc/api_samples/os-keypairs/v2.35/0000775000175000017500000000000000000000000020647 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-keypairs/v2.35/keypairs-list-resp.json0000664000175000017500000000166000000000000025314 0ustar00zuulzuul00000000000000{ "keypairs": [ { "keypair": { "fingerprint": "7e:eb:ab:24:ba:d1:e1:88:ae:9a:fb:66:53:df:d3:bd", "name": "keypair-5d935425-31d5-48a7-a0f1-e76e9813f2c3", "type": "ssh", "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCkF3MX59OrlBs3dH5CU7lNmvpbrgZxSpyGjlnE8Flkirnc/Up22lpjznoxqeoTAwTW034k7Dz6aYIrZGmQwe2TkE084yqvlj45Dkyoj95fW/sZacm0cZNuL69EObEGHdprfGJQajrpz22NQoCD8TFB8Wv+8om9NH9Le6s+WPe98WC77KLw8qgfQsbIey+JawPWl4O67ZdL5xrypuRjfIPWjgy/VH85IXg/Z/GONZ2nxHgSShMkwqSFECAC5L3PHB+0+/12M/iikdatFSVGjpuHvkLOs3oe7m6HlOfluSJ85BzLWBbvva93qkGmLg4ZAc8rPh2O+YIsBUHNLLMM/oQp Generated-by-Nova\n" } } ], "keypairs_links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/os-keypairs?limit=1&marker=keypair-5d935425-31d5-48a7-a0f1-e76e9813f2c3", "rel": "next" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-keypairs/v2.35/keypairs-list-user1-resp.json0000664000175000017500000000130000000000000026340 0ustar00zuulzuul00000000000000{ "keypairs": [ { "keypair": { "fingerprint": "7e:eb:ab:24:ba:d1:e1:88:ae:9a:fb:66:53:df:d3:bd", "name": "keypair-5d935425-31d5-48a7-a0f1-e76e9813f2c3", "type": "ssh", "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCkF3MX59OrlBs3dH5CU7lNmvpbrgZxSpyGjlnE8Flkirnc/Up22lpjznoxqeoTAwTW034k7Dz6aYIrZGmQwe2TkE084yqvlj45Dkyoj95fW/sZacm0cZNuL69EObEGHdprfGJQajrpz22NQoCD8TFB8Wv+8om9NH9Le6s+WPe98WC77KLw8qgfQsbIey+JawPWl4O67ZdL5xrypuRjfIPWjgy/VH85IXg/Z/GONZ2nxHgSShMkwqSFECAC5L3PHB+0+/12M/iikdatFSVGjpuHvkLOs3oe7m6HlOfluSJ85BzLWBbvva93qkGmLg4ZAc8rPh2O+YIsBUHNLLMM/oQp Generated-by-Nova\n" } } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-keypairs/v2.35/keypairs-list-user2-resp.json0000664000175000017500000000167600000000000026361 0ustar00zuulzuul00000000000000{ "keypairs": [ { "keypair": { "fingerprint": "7e:eb:ab:24:ba:d1:e1:88:ae:9a:fb:66:53:df:d3:bd", "name": "keypair-5d935425-31d5-48a7-a0f1-e76e9813f2c3", "type": "ssh", "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCkF3MX59OrlBs3dH5CU7lNmvpbrgZxSpyGjlnE8Flkirnc/Up22lpjznoxqeoTAwTW034k7Dz6aYIrZGmQwe2TkE084yqvlj45Dkyoj95fW/sZacm0cZNuL69EObEGHdprfGJQajrpz22NQoCD8TFB8Wv+8om9NH9Le6s+WPe98WC77KLw8qgfQsbIey+JawPWl4O67ZdL5xrypuRjfIPWjgy/VH85IXg/Z/GONZ2nxHgSShMkwqSFECAC5L3PHB+0+/12M/iikdatFSVGjpuHvkLOs3oe7m6HlOfluSJ85BzLWBbvva93qkGmLg4ZAc8rPh2O+YIsBUHNLLMM/oQp Generated-by-Nova\n" } } ], "keypairs_links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/os-keypairs?limit=1&marker=keypair-5d935425-31d5-48a7-a0f1-e76e9813f2c3&user_id=user2", "rel": "next" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-keypairs/v2.35/keypairs-post-req.json0000664000175000017500000000021400000000000025136 0ustar00zuulzuul00000000000000{ "keypair": { "name": "keypair-ab9ff2e6-a6d7-4915-a241-044c369c07f9", "type": "ssh", "user_id": "fake" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-keypairs/v2.35/keypairs-post-resp.json0000664000175000017500000000450500000000000025327 0ustar00zuulzuul00000000000000{ "keypair": { "fingerprint": "7e:eb:ab:24:ba:d1:e1:88:ae:9a:fb:66:53:df:d3:bd", "name": "keypair-ab9ff2e6-a6d7-4915-a241-044c369c07f9", "type": "ssh", "private_key": "-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEApBdzF+fTq5QbN3R+QlO5TZr6W64GcUqcho5ZxPBZZIq53P1K\ndtpaY856ManqEwME1tN+JOw8+mmCK2RpkMHtk5BNPOMqr5Y+OQ5MqI/eX1v7GWnJ\ntHGTbi+vRDmxBh3aa3xiUGo66c9tjUKAg/ExQfFr/vKJvTR/S3urPlj3vfFgu+yi\n8PKoH0LGyHsviWsD1peDuu2XS+ca8qbkY3yD1o4Mv1R/OSF4P2fxjjWdp8R4EkoT\nJMKkhRAgAuS9zxwftPv9djP4opHWrRUlRo6bh75CzrN6Hu5uh5Tn5bkifOQcy1gW\n772vd6pBpi4OGQHPKz4djvmCLAVBzSyzDP6EKQIDAQABAoIBAQCB+tU/ZXKlIe+h\nMNTmoz1QfOe+AY625Rwx9cakGqMk4kKyC62VkgcxshfXCToSjzyhEuyEQOFYloT2\n7FY2xXb0gcS861Efv0pQlcQhbbz/GnQ/wC13ktPu3zTdPTm9l54xsFiMTGmYVaf4\n0mnMmhyjmKIsVGDJEDGZUD/oZj7wJGOFha5M4FZrZlJIrEZC0rGGlcC0kGF2no6B\nj1Mu7HjyK3pTKf4dlp+jeRikUF5Pct+qT+rcv2rZ3fl3inxtlLEwZeFPbp/njf/U\nIGxFzZsuLmiFlsJar6M5nEckTB3p25maWWaR8/0jvJRgsPnuoUrUoGDq87DMKCdk\nlw6by9fRAoGBANhnS9ko7Of+ntqIFR7xOG9p/oPATztgHkFxe4GbQ0leaDRTx3vE\ndQmUCnn24xtyVECaI9a4IV+LP1npw8niWUJ4pjgdAlkF4cCTu9sN+cBO15SfdACI\nzD1DaaHmpFCAWlpTo68VWlvWll6i2ncCkRJR1+q/C/yQz7asvl4AakElAoGBAMId\nxqMT2Sy9xLuHsrAoMUvBOkwaMYZH+IAb4DvUDjVIiKWjmonrmopS5Lpb+ALBKqZe\neVfD6HwWQqGwCFItToaEkZvrNfTapoNCHWWg001D49765UV5lMrArDbM1vXtFfM4\nDRYM6+Y6o/6QH8EBgXtyBxcYthIDBM3wBJa67xG1AoGAKTm8fFlMkIG0N4N3Kpbf\nnnH915GaRoBwIx2AXtd6QQ7oIRfYx95MQY/fUw7SgxcLr+btbulTCkWXwwRClUI2\nqPAdElGMcfMp56r9PaTy8EzUyu55heSJrB4ckIhEw0VAcTa/1wnlVduSd+LkZYmq\no2fOD11n5iycNXvBJF1F4LUCgYAMaRbwCi7SW3eefbiA5rDwJPRzNSGBckyC9EVL\nzezynyaNYH5a3wNMYKxa9dJPasYtSND9OXs9o7ay26xMhLUGiKc+jrUuaGRI9Asp\nGjUoNXT2JphN7s4CgHsCLep4YqYKnMTJah4S5CDj/5boIg6DM/EcGupZEHRYLkY8\n1MrAGQKBgQCi9yeC39ctLUNn+Ix604gttWWChdt3ozufTZ7HybJOSRA9Gh3iD5gm\nzlz0xqpGShKpOY2k+ftvja0poMdGeJLt84P3r2q01IgI7w0LmOj5m0W10dHysH27\nBWpCnHdBJMxnBsMRPoM4MKkmKWD9l5PSTCTWtkIpsyuDCko6D9UwZA==\n-----END RSA PRIVATE KEY-----\n", "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCkF3MX59OrlBs3dH5CU7lNmvpbrgZxSpyGjlnE8Flkirnc/Up22lpjznoxqeoTAwTW034k7Dz6aYIrZGmQwe2TkE084yqvlj45Dkyoj95fW/sZacm0cZNuL69EObEGHdprfGJQajrpz22NQoCD8TFB8Wv+8om9NH9Le6s+WPe98WC77KLw8qgfQsbIey+JawPWl4O67ZdL5xrypuRjfIPWjgy/VH85IXg/Z/GONZ2nxHgSShMkwqSFECAC5L3PHB+0+/12M/iikdatFSVGjpuHvkLOs3oe7m6HlOfluSJ85BzLWBbvva93qkGmLg4ZAc8rPh2O+YIsBUHNLLMM/oQp Generated-by-Nova\n", "user_id": "fake" } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0696063 nova-32.0.0/doc/api_samples/os-keypairs/v2.92/0000775000175000017500000000000000000000000020652 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-keypairs/v2.92/keypairs-import-post-req.json0000664000175000017500000000061000000000000026451 0ustar00zuulzuul00000000000000{ "keypair": { "name": "me.and.myself@this.nice.domain.com mooh.", "type": "ssh", "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGgB4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0lRE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYcpSxsIbECHw== Generated-by-Nova", "user_id": "fake" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-keypairs/v2.92/keypairs-import-post-resp.json0000664000175000017500000000072200000000000026637 0ustar00zuulzuul00000000000000{ "keypair": { "fingerprint": "1e:2c:9b:56:79:4b:45:77:f9:ca:7a:98:2c:b0:d5:3c", "name": "me.and.myself@this.nice.domain.com mooh.", "type": "ssh", "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGgB4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0lRE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYcpSxsIbECHw== Generated-by-Nova", "user_id": "fake" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-keypairs/v2.92/keypairs-post-req.json0000664000175000017500000000014200000000000025141 0ustar00zuulzuul00000000000000{ "keypair": { "name": "foo", "type": "ssh", "user_id": "fake" } }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0696063 nova-32.0.0/doc/api_samples/os-lock-server/0000775000175000017500000000000000000000000020477 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-lock-server/lock-server.json0000664000175000017500000000002400000000000023622 0ustar00zuulzuul00000000000000{ "lock": null }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-lock-server/unlock-server.json0000664000175000017500000000002600000000000024167 0ustar00zuulzuul00000000000000{ "unlock": null }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0696063 nova-32.0.0/doc/api_samples/os-lock-server/v2.73/0000775000175000017500000000000000000000000021256 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-lock-server/v2.73/lock-server-with-reason.json0000664000175000017500000000007100000000000026641 0ustar00zuulzuul00000000000000{ "lock": {"locked_reason": "I don't want to work"} }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-lock-server/v2.73/lock-server.json0000664000175000017500000000002400000000000024401 0ustar00zuulzuul00000000000000{ "lock": null }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-lock-server/v2.73/unlock-server.json0000664000175000017500000000002600000000000024746 0ustar00zuulzuul00000000000000{ "unlock": null }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0696063 nova-32.0.0/doc/api_samples/os-migrate-server/0000775000175000017500000000000000000000000021177 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-migrate-server/live-migrate-server.json0000664000175000017500000000023200000000000025760 0ustar00zuulzuul00000000000000{ "os-migrateLive": { "host": "01c0cadef72d47e28a672a76060d492c", "block_migration": false, "disk_over_commit": false } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-migrate-server/migrate-server.json0000664000175000017500000000002700000000000025025 0ustar00zuulzuul00000000000000{ "migrate": null }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0736065 nova-32.0.0/doc/api_samples/os-migrate-server/v2.25/0000775000175000017500000000000000000000000021753 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-migrate-server/v2.25/live-migrate-server.json0000664000175000017500000000017000000000000026535 0ustar00zuulzuul00000000000000{ "os-migrateLive": { "host": "01c0cadef72d47e28a672a76060d492c", "block_migration": "auto" } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0736065 nova-32.0.0/doc/api_samples/os-migrate-server/v2.30/0000775000175000017500000000000000000000000021747 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-migrate-server/v2.30/live-migrate-server.json0000664000175000017500000000022000000000000026525 0ustar00zuulzuul00000000000000{ "os-migrateLive": { "host": "01c0cadef72d47e28a672a76060d492c", "block_migration": "auto", "force": false } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0736065 nova-32.0.0/doc/api_samples/os-migrate-server/v2.56/0000775000175000017500000000000000000000000021757 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-migrate-server/v2.56/migrate-server-null.json0000664000175000017500000000002700000000000026555 0ustar00zuulzuul00000000000000{ "migrate": null }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-migrate-server/v2.56/migrate-server.json0000664000175000017500000000006300000000000025605 0ustar00zuulzuul00000000000000{ "migrate": { "host": "host1" } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0736065 nova-32.0.0/doc/api_samples/os-migrate-server/v2.68/0000775000175000017500000000000000000000000021762 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-migrate-server/v2.68/live-migrate-server.json0000664000175000017500000000017000000000000026544 0ustar00zuulzuul00000000000000{ "os-migrateLive": { "host": "01c0cadef72d47e28a672a76060d492c", "block_migration": "auto" } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0736065 nova-32.0.0/doc/api_samples/os-migrations/0000775000175000017500000000000000000000000020417 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-migrations/migrations-get.json0000664000175000017500000000206200000000000024243 0ustar00zuulzuul00000000000000{ "migrations": [ { "created_at": "2012-10-29T13:42:02.000000", "dest_compute": "compute2", "dest_host": "1.2.3.4", "dest_node": "node2", "id": 1234, "instance_uuid": "8600d31b-d1a1-4632-b2ff-45c2be1a70ff", "new_instance_type_id": 2, "old_instance_type_id": 1, "source_compute": "compute1", "source_node": "node1", "status": "done", "updated_at": "2012-10-29T13:42:02.000000" }, { "created_at": "2013-10-22T13:42:02.000000", "dest_compute": "compute20", "dest_host": "5.6.7.8", "dest_node": "node20", "id": 5678, "instance_uuid": "9128d044-7b61-403e-b766-7547076ff6c1", "new_instance_type_id": 6, "old_instance_type_id": 5, "source_compute": "compute10", "source_node": "node10", "status": "done", "updated_at": "2013-10-22T13:42:02.000000" } ] }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0736065 nova-32.0.0/doc/api_samples/os-migrations/v2.23/0000775000175000017500000000000000000000000021171 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-migrations/v2.23/migrations-get.json0000664000175000017500000000534100000000000025020 0ustar00zuulzuul00000000000000{ "migrations": [ { "created_at": "2016-01-29T13:42:02.000000", "dest_compute": "compute2", "dest_host": "1.2.3.4", "dest_node": "node2", "id": 1, "instance_uuid": "8600d31b-d1a1-4632-b2ff-45c2be1a70ff", "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/8600d31b-d1a1-4632-b2ff-45c2be1a70ff/migrations/1", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/8600d31b-d1a1-4632-b2ff-45c2be1a70ff/migrations/1", "rel": "bookmark" } ], "new_instance_type_id": 2, "old_instance_type_id": 1, "source_compute": "compute1", "source_node": "node1", "status": "running", "migration_type": "live-migration", "updated_at": "2016-01-29T13:42:02.000000" }, { "created_at": "2016-01-29T13:42:02.000000", "dest_compute": "compute2", "dest_host": "1.2.3.4", "dest_node": "node2", "id": 2, "instance_uuid": "8600d31b-d1a1-4632-b2ff-45c2be1a70ff", "new_instance_type_id": 2, "old_instance_type_id": 1, "source_compute": "compute1", "source_node": "node1", "status": "error", "migration_type": "live-migration", "updated_at": "2016-01-29T13:42:02.000000" }, { "created_at": "2016-01-22T13:42:02.000000", "dest_compute": "compute20", "dest_host": "5.6.7.8", "dest_node": "node20", "id": 3, "instance_uuid": "9128d044-7b61-403e-b766-7547076ff6c1", "new_instance_type_id": 6, "old_instance_type_id": 5, "source_compute": "compute10", "source_node": "node10", "status": "error", "migration_type": "resize", "updated_at": "2016-01-22T13:42:02.000000" }, { "created_at": "2016-01-22T13:42:02.000000", "dest_compute": "compute20", "dest_host": "5.6.7.8", "dest_node": "node20", "id": 4, "instance_uuid": "9128d044-7b61-403e-b766-7547076ff6c1", "new_instance_type_id": 6, "old_instance_type_id": 5, "source_compute": "compute10", "source_node": "node10", "status": "migrating", "migration_type": "resize", "updated_at": "2016-01-22T13:42:02.000000" } ] } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0736065 nova-32.0.0/doc/api_samples/os-migrations/v2.59/0000775000175000017500000000000000000000000021202 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-migrations/v2.59/migrations-get-with-changes-since.json0000664000175000017500000000237700000000000030515 0ustar00zuulzuul00000000000000{ "migrations": [ { "created_at": "2016-06-23T14:42:02.000000", "dest_compute": "compute20", "dest_host": "5.6.7.8", "dest_node": "node20", "id": 4, "instance_uuid": "9128d044-7b61-403e-b766-7547076ff6c1", "new_instance_type_id": 6, "old_instance_type_id": 5, "source_compute": "compute10", "source_node": "node10", "status": "migrating", "migration_type": "resize", "updated_at": "2016-06-23T14:42:02.000000", "uuid": "42341d4b-346a-40d0-83c6-5f4f6892b650" }, { "created_at": "2016-06-23T13:42:02.000000", "dest_compute": "compute20", "dest_host": "5.6.7.8", "dest_node": "node20", "id": 3, "instance_uuid": "9128d044-7b61-403e-b766-7547076ff6c1", "new_instance_type_id": 6, "old_instance_type_id": 5, "source_compute": "compute10", "source_node": "node10", "status": "error", "migration_type": "resize", "updated_at": "2016-06-23T13:42:02.000000", "uuid": "32341d4b-346a-40d0-83c6-5f4f6892b650" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-migrations/v2.59/migrations-get-with-limit.json0000664000175000017500000000157300000000000027121 0ustar00zuulzuul00000000000000{ "migrations": [ { "created_at": "2016-06-23T14:42:02.000000", "dest_compute": "compute20", "dest_host": "5.6.7.8", "dest_node": "node20", "id": 4, "instance_uuid": "9128d044-7b61-403e-b766-7547076ff6c1", "new_instance_type_id": 6, "old_instance_type_id": 5, "source_compute": "compute10", "source_node": "node10", "status": "migrating", "migration_type": "resize", "updated_at": "2016-06-23T14:42:02.000000", "uuid": "42341d4b-346a-40d0-83c6-5f4f6892b650" } ], "migrations_links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/os-migrations?limit=1&marker=42341d4b-346a-40d0-83c6-5f4f6892b650", "rel": "next" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-migrations/v2.59/migrations-get-with-marker.json0000664000175000017500000000217400000000000027262 0ustar00zuulzuul00000000000000{ "migrations": [ { "created_at": "2016-01-29T11:42:02.000000", "dest_compute": "compute2", "dest_host": "1.2.3.4", "dest_node": "node2", "id": 1, "instance_uuid": "8600d31b-d1a1-4632-b2ff-45c2be1a70ff", "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/8600d31b-d1a1-4632-b2ff-45c2be1a70ff/migrations/1", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/8600d31b-d1a1-4632-b2ff-45c2be1a70ff/migrations/1", "rel": "bookmark" } ], "new_instance_type_id": 1, "old_instance_type_id": 1, "source_compute": "compute1", "source_node": "node1", "status": "running", "migration_type": "live-migration", "updated_at": "2016-01-29T11:42:02.000000", "uuid": "12341d4b-346a-40d0-83c6-5f4f6892b650" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-migrations/v2.59/migrations-get.json0000664000175000017500000000572100000000000025033 0ustar00zuulzuul00000000000000{ "migrations": [ { "created_at": "2016-06-23T14:42:02.000000", "dest_compute": "compute20", "dest_host": "5.6.7.8", "dest_node": "node20", "id": 4, "instance_uuid": "9128d044-7b61-403e-b766-7547076ff6c1", "new_instance_type_id": 6, "old_instance_type_id": 5, "source_compute": "compute10", "source_node": "node10", "status": "migrating", "migration_type": "resize", "updated_at": "2016-06-23T14:42:02.000000", "uuid": "42341d4b-346a-40d0-83c6-5f4f6892b650" }, { "created_at": "2016-06-23T13:42:02.000000", "dest_compute": "compute20", "dest_host": "5.6.7.8", "dest_node": "node20", "id": 3, "instance_uuid": "9128d044-7b61-403e-b766-7547076ff6c1", "new_instance_type_id": 6, "old_instance_type_id": 5, "source_compute": "compute10", "source_node": "node10", "status": "error", "migration_type": "resize", "updated_at": "2016-06-23T13:42:02.000000", "uuid": "32341d4b-346a-40d0-83c6-5f4f6892b650" }, { "created_at": "2016-01-29T12:42:02.000000", "dest_compute": "compute2", "dest_host": "1.2.3.4", "dest_node": "node2", "id": 2, "instance_uuid": "8600d31b-d1a1-4632-b2ff-45c2be1a70ff", "new_instance_type_id": 1, "old_instance_type_id": 1, "source_compute": "compute1", "source_node": "node1", "status": "error", "migration_type": "live-migration", "updated_at": "2016-01-29T12:42:02.000000", "uuid": "22341d4b-346a-40d0-83c6-5f4f6892b650" }, { "created_at": "2016-01-29T11:42:02.000000", "dest_compute": "compute2", "dest_host": "1.2.3.4", "dest_node": "node2", "id": 1, "instance_uuid": "8600d31b-d1a1-4632-b2ff-45c2be1a70ff", "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/8600d31b-d1a1-4632-b2ff-45c2be1a70ff/migrations/1", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/8600d31b-d1a1-4632-b2ff-45c2be1a70ff/migrations/1", "rel": "bookmark" } ], "new_instance_type_id": 1, "old_instance_type_id": 1, "source_compute": "compute1", "source_node": "node1", "status": "running", "migration_type": "live-migration", "updated_at": "2016-01-29T11:42:02.000000", "uuid": "12341d4b-346a-40d0-83c6-5f4f6892b650" } ] } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0736065 nova-32.0.0/doc/api_samples/os-migrations/v2.66/0000775000175000017500000000000000000000000021200 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-migrations/v2.66/migrations-get-with-changes-before.json0000664000175000017500000000217400000000000030647 0ustar00zuulzuul00000000000000{ "migrations": [ { "created_at": "2016-01-29T11:42:02.000000", "dest_compute": "compute2", "dest_host": "1.2.3.4", "dest_node": "node2", "id": 1, "instance_uuid": "8600d31b-d1a1-4632-b2ff-45c2be1a70ff", "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/8600d31b-d1a1-4632-b2ff-45c2be1a70ff/migrations/1", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/8600d31b-d1a1-4632-b2ff-45c2be1a70ff/migrations/1", "rel": "bookmark" } ], "new_instance_type_id": 1, "old_instance_type_id": 1, "source_compute": "compute1", "source_node": "node1", "status": "running", "migration_type": "live-migration", "updated_at": "2016-01-29T11:42:02.000000", "uuid": "12341d4b-346a-40d0-83c6-5f4f6892b650" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-migrations/v2.66/migrations-get-with-changes-since.json0000664000175000017500000000237700000000000030513 0ustar00zuulzuul00000000000000{ "migrations": [ { "created_at": "2016-06-23T14:42:02.000000", "dest_compute": "compute20", "dest_host": "5.6.7.8", "dest_node": "node20", "id": 4, "instance_uuid": "9128d044-7b61-403e-b766-7547076ff6c1", "new_instance_type_id": 6, "old_instance_type_id": 5, "source_compute": "compute10", "source_node": "node10", "status": "migrating", "migration_type": "resize", "updated_at": "2016-06-23T14:42:02.000000", "uuid": "42341d4b-346a-40d0-83c6-5f4f6892b650" }, { "created_at": "2016-06-23T13:42:02.000000", "dest_compute": "compute20", "dest_host": "5.6.7.8", "dest_node": "node20", "id": 3, "instance_uuid": "9128d044-7b61-403e-b766-7547076ff6c1", "new_instance_type_id": 6, "old_instance_type_id": 5, "source_compute": "compute10", "source_node": "node10", "status": "error", "migration_type": "resize", "updated_at": "2016-06-23T13:42:02.000000", "uuid": "32341d4b-346a-40d0-83c6-5f4f6892b650" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-migrations/v2.66/migrations-get-with-limit.json0000664000175000017500000000157400000000000027120 0ustar00zuulzuul00000000000000 { "migrations": [ { "created_at": "2016-06-23T14:42:02.000000", "dest_compute": "compute20", "dest_host": "5.6.7.8", "dest_node": "node20", "id": 4, "instance_uuid": "9128d044-7b61-403e-b766-7547076ff6c1", "new_instance_type_id": 6, "old_instance_type_id": 5, "source_compute": "compute10", "source_node": "node10", "status": "migrating", "migration_type": "resize", "updated_at": "2016-06-23T14:42:02.000000", "uuid": "42341d4b-346a-40d0-83c6-5f4f6892b650" } ], "migrations_links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/os-migrations?limit=1&marker=42341d4b-346a-40d0-83c6-5f4f6892b650", "rel": "next" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-migrations/v2.66/migrations-get-with-marker.json0000664000175000017500000000217400000000000027260 0ustar00zuulzuul00000000000000{ "migrations": [ { "created_at": "2016-01-29T11:42:02.000000", "dest_compute": "compute2", "dest_host": "1.2.3.4", "dest_node": "node2", "id": 1, "instance_uuid": "8600d31b-d1a1-4632-b2ff-45c2be1a70ff", "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/8600d31b-d1a1-4632-b2ff-45c2be1a70ff/migrations/1", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/8600d31b-d1a1-4632-b2ff-45c2be1a70ff/migrations/1", "rel": "bookmark" } ], "new_instance_type_id": 1, "old_instance_type_id": 1, "source_compute": "compute1", "source_node": "node1", "status": "running", "migration_type": "live-migration", "updated_at": "2016-01-29T11:42:02.000000", "uuid": "12341d4b-346a-40d0-83c6-5f4f6892b650" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-migrations/v2.66/migrations-get.json0000664000175000017500000000572100000000000025031 0ustar00zuulzuul00000000000000{ "migrations": [ { "created_at": "2016-06-23T14:42:02.000000", "dest_compute": "compute20", "dest_host": "5.6.7.8", "dest_node": "node20", "id": 4, "instance_uuid": "9128d044-7b61-403e-b766-7547076ff6c1", "new_instance_type_id": 6, "old_instance_type_id": 5, "source_compute": "compute10", "source_node": "node10", "status": "migrating", "migration_type": "resize", "updated_at": "2016-06-23T14:42:02.000000", "uuid": "42341d4b-346a-40d0-83c6-5f4f6892b650" }, { "created_at": "2016-06-23T13:42:02.000000", "dest_compute": "compute20", "dest_host": "5.6.7.8", "dest_node": "node20", "id": 3, "instance_uuid": "9128d044-7b61-403e-b766-7547076ff6c1", "new_instance_type_id": 6, "old_instance_type_id": 5, "source_compute": "compute10", "source_node": "node10", "status": "error", "migration_type": "resize", "updated_at": "2016-06-23T13:42:02.000000", "uuid": "32341d4b-346a-40d0-83c6-5f4f6892b650" }, { "created_at": "2016-01-29T12:42:02.000000", "dest_compute": "compute2", "dest_host": "1.2.3.4", "dest_node": "node2", "id": 2, "instance_uuid": "8600d31b-d1a1-4632-b2ff-45c2be1a70ff", "new_instance_type_id": 1, "old_instance_type_id": 1, "source_compute": "compute1", "source_node": "node1", "status": "error", "migration_type": "live-migration", "updated_at": "2016-01-29T12:42:02.000000", "uuid": "22341d4b-346a-40d0-83c6-5f4f6892b650" }, { "created_at": "2016-01-29T11:42:02.000000", "dest_compute": "compute2", "dest_host": "1.2.3.4", "dest_node": "node2", "id": 1, "instance_uuid": "8600d31b-d1a1-4632-b2ff-45c2be1a70ff", "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/8600d31b-d1a1-4632-b2ff-45c2be1a70ff/migrations/1", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/8600d31b-d1a1-4632-b2ff-45c2be1a70ff/migrations/1", "rel": "bookmark" } ], "new_instance_type_id": 1, "old_instance_type_id": 1, "source_compute": "compute1", "source_node": "node1", "status": "running", "migration_type": "live-migration", "updated_at": "2016-01-29T11:42:02.000000", "uuid": "12341d4b-346a-40d0-83c6-5f4f6892b650" } ] } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0776064 nova-32.0.0/doc/api_samples/os-migrations/v2.80/0000775000175000017500000000000000000000000021174 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-migrations/v2.80/migrations-get-with-changes-before.json0000664000175000017500000000237500000000000030646 0ustar00zuulzuul00000000000000{ "migrations": [ { "created_at": "2016-01-29T11:42:02.000000", "dest_compute": "compute2", "dest_host": "1.2.3.4", "dest_node": "node2", "id": 1, "instance_uuid": "8600d31b-d1a1-4632-b2ff-45c2be1a70ff", "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/8600d31b-d1a1-4632-b2ff-45c2be1a70ff/migrations/1", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/8600d31b-d1a1-4632-b2ff-45c2be1a70ff/migrations/1", "rel": "bookmark" } ], "new_instance_type_id": 1, "old_instance_type_id": 1, "source_compute": "compute1", "source_node": "node1", "status": "running", "migration_type": "live-migration", "updated_at": "2016-01-29T11:42:02.000000", "uuid": "12341d4b-346a-40d0-83c6-5f4f6892b650", "user_id": "5c48ebaa-193f-4c5d-948a-f559cc92cd5e", "project_id": "ef92ccff-00f3-46e4-b015-811110e36ee4" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-migrations/v2.80/migrations-get-with-changes-since.json0000664000175000017500000000300100000000000030470 0ustar00zuulzuul00000000000000{ "migrations": [ { "created_at": "2016-06-23T14:42:02.000000", "dest_compute": "compute20", "dest_host": "5.6.7.8", "dest_node": "node20", "id": 4, "instance_uuid": "9128d044-7b61-403e-b766-7547076ff6c1", "new_instance_type_id": 6, "old_instance_type_id": 5, "source_compute": "compute10", "source_node": "node10", "status": "migrating", "migration_type": "resize", "updated_at": "2016-06-23T14:42:02.000000", "uuid": "42341d4b-346a-40d0-83c6-5f4f6892b650", "user_id": "78348f0e-97ee-4d70-ad34-189692673ea2", "project_id": "9842f0f7-1229-4355-afe7-15ebdbb8c3d8" }, { "created_at": "2016-06-23T13:42:02.000000", "dest_compute": "compute20", "dest_host": "5.6.7.8", "dest_node": "node20", "id": 3, "instance_uuid": "9128d044-7b61-403e-b766-7547076ff6c1", "new_instance_type_id": 6, "old_instance_type_id": 5, "source_compute": "compute10", "source_node": "node10", "status": "error", "migration_type": "resize", "updated_at": "2016-06-23T13:42:02.000000", "uuid": "32341d4b-346a-40d0-83c6-5f4f6892b650", "user_id": "78348f0e-97ee-4d70-ad34-189692673ea2", "project_id": "9842f0f7-1229-4355-afe7-15ebdbb8c3d8" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-migrations/v2.80/migrations-get-with-limit.json0000664000175000017500000000177500000000000027117 0ustar00zuulzuul00000000000000 { "migrations": [ { "created_at": "2016-06-23T14:42:02.000000", "dest_compute": "compute20", "dest_host": "5.6.7.8", "dest_node": "node20", "id": 4, "instance_uuid": "9128d044-7b61-403e-b766-7547076ff6c1", "new_instance_type_id": 6, "old_instance_type_id": 5, "source_compute": "compute10", "source_node": "node10", "status": "migrating", "migration_type": "resize", "updated_at": "2016-06-23T14:42:02.000000", "uuid": "42341d4b-346a-40d0-83c6-5f4f6892b650", "user_id": "78348f0e-97ee-4d70-ad34-189692673ea2", "project_id": "9842f0f7-1229-4355-afe7-15ebdbb8c3d8" } ], "migrations_links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/os-migrations?limit=1&marker=42341d4b-346a-40d0-83c6-5f4f6892b650", "rel": "next" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-migrations/v2.80/migrations-get-with-marker.json0000664000175000017500000000237500000000000027257 0ustar00zuulzuul00000000000000{ "migrations": [ { "created_at": "2016-01-29T11:42:02.000000", "dest_compute": "compute2", "dest_host": "1.2.3.4", "dest_node": "node2", "id": 1, "instance_uuid": "8600d31b-d1a1-4632-b2ff-45c2be1a70ff", "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/8600d31b-d1a1-4632-b2ff-45c2be1a70ff/migrations/1", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/8600d31b-d1a1-4632-b2ff-45c2be1a70ff/migrations/1", "rel": "bookmark" } ], "new_instance_type_id": 1, "old_instance_type_id": 1, "source_compute": "compute1", "source_node": "node1", "status": "running", "migration_type": "live-migration", "updated_at": "2016-01-29T11:42:02.000000", "uuid": "12341d4b-346a-40d0-83c6-5f4f6892b650", "user_id": "5c48ebaa-193f-4c5d-948a-f559cc92cd5e", "project_id": "ef92ccff-00f3-46e4-b015-811110e36ee4" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-migrations/v2.80/migrations-get-with-user-or-project-id.json0000664000175000017500000000376100000000000031430 0ustar00zuulzuul00000000000000{ "migrations": [ { "created_at": "2016-01-29T12:42:02.000000", "dest_compute": "compute2", "dest_host": "1.2.3.4", "dest_node": "node2", "id": 2, "instance_uuid": "8600d31b-d1a1-4632-b2ff-45c2be1a70ff", "new_instance_type_id": 1, "old_instance_type_id": 1, "source_compute": "compute1", "source_node": "node1", "status": "error", "migration_type": "live-migration", "updated_at": "2016-01-29T12:42:02.000000", "uuid": "22341d4b-346a-40d0-83c6-5f4f6892b650", "user_id": "5c48ebaa-193f-4c5d-948a-f559cc92cd5e", "project_id": "ef92ccff-00f3-46e4-b015-811110e36ee4" }, { "created_at": "2016-01-29T11:42:02.000000", "dest_compute": "compute2", "dest_host": "1.2.3.4", "dest_node": "node2", "id": 1, "instance_uuid": "8600d31b-d1a1-4632-b2ff-45c2be1a70ff", "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/8600d31b-d1a1-4632-b2ff-45c2be1a70ff/migrations/1", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/8600d31b-d1a1-4632-b2ff-45c2be1a70ff/migrations/1", "rel": "bookmark" } ], "new_instance_type_id": 1, "old_instance_type_id": 1, "source_compute": "compute1", "source_node": "node1", "status": "running", "migration_type": "live-migration", "updated_at": "2016-01-29T11:42:02.000000", "uuid": "12341d4b-346a-40d0-83c6-5f4f6892b650", "user_id": "5c48ebaa-193f-4c5d-948a-f559cc92cd5e", "project_id": "ef92ccff-00f3-46e4-b015-811110e36ee4" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-migrations/v2.80/migrations-get.json0000664000175000017500000000672500000000000025032 0ustar00zuulzuul00000000000000{ "migrations": [ { "created_at": "2016-06-23T14:42:02.000000", "dest_compute": "compute20", "dest_host": "5.6.7.8", "dest_node": "node20", "id": 4, "instance_uuid": "9128d044-7b61-403e-b766-7547076ff6c1", "new_instance_type_id": 6, "old_instance_type_id": 5, "source_compute": "compute10", "source_node": "node10", "status": "migrating", "migration_type": "resize", "updated_at": "2016-06-23T14:42:02.000000", "uuid": "42341d4b-346a-40d0-83c6-5f4f6892b650", "user_id": "78348f0e-97ee-4d70-ad34-189692673ea2", "project_id": "9842f0f7-1229-4355-afe7-15ebdbb8c3d8" }, { "created_at": "2016-06-23T13:42:02.000000", "dest_compute": "compute20", "dest_host": "5.6.7.8", "dest_node": "node20", "id": 3, "instance_uuid": "9128d044-7b61-403e-b766-7547076ff6c1", "new_instance_type_id": 6, "old_instance_type_id": 5, "source_compute": "compute10", "source_node": "node10", "status": "error", "migration_type": "resize", "updated_at": "2016-06-23T13:42:02.000000", "uuid": "32341d4b-346a-40d0-83c6-5f4f6892b650", "user_id": "78348f0e-97ee-4d70-ad34-189692673ea2", "project_id": "9842f0f7-1229-4355-afe7-15ebdbb8c3d8" }, { "created_at": "2016-01-29T12:42:02.000000", "dest_compute": "compute2", "dest_host": "1.2.3.4", "dest_node": "node2", "id": 2, "instance_uuid": "8600d31b-d1a1-4632-b2ff-45c2be1a70ff", "new_instance_type_id": 1, "old_instance_type_id": 1, "source_compute": "compute1", "source_node": "node1", "status": "error", "migration_type": "live-migration", "updated_at": "2016-01-29T12:42:02.000000", "uuid": "22341d4b-346a-40d0-83c6-5f4f6892b650", "user_id": "5c48ebaa-193f-4c5d-948a-f559cc92cd5e", "project_id": "ef92ccff-00f3-46e4-b015-811110e36ee4" }, { "created_at": "2016-01-29T11:42:02.000000", "dest_compute": "compute2", "dest_host": "1.2.3.4", "dest_node": "node2", "id": 1, "instance_uuid": "8600d31b-d1a1-4632-b2ff-45c2be1a70ff", "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/8600d31b-d1a1-4632-b2ff-45c2be1a70ff/migrations/1", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/8600d31b-d1a1-4632-b2ff-45c2be1a70ff/migrations/1", "rel": "bookmark" } ], "new_instance_type_id": 1, "old_instance_type_id": 1, "source_compute": "compute1", "source_node": "node1", "status": "running", "migration_type": "live-migration", "updated_at": "2016-01-29T11:42:02.000000", "uuid": "12341d4b-346a-40d0-83c6-5f4f6892b650", "user_id": "5c48ebaa-193f-4c5d-948a-f559cc92cd5e", "project_id": "ef92ccff-00f3-46e4-b015-811110e36ee4" } ] } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0776064 nova-32.0.0/doc/api_samples/os-multinic/0000775000175000017500000000000000000000000020067 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-multinic/multinic-add-fixed-ip-req.json0000664000175000017500000000013100000000000025617 0ustar00zuulzuul00000000000000{ "addFixedIp": { "networkId": "e1882e38-38c2-4239-ade7-35d644cb963a" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-multinic/multinic-remove-fixed-ip-req.json0000664000175000017500000000007700000000000026375 0ustar00zuulzuul00000000000000{ "removeFixedIp": { "address": "10.0.0.4" } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0776064 nova-32.0.0/doc/api_samples/os-multiple-create/0000775000175000017500000000000000000000000021337 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-multiple-create/multiple-create-no-resv-post-req.json0000664000175000017500000000041700000000000030467 0ustar00zuulzuul00000000000000{ "server": { "name": "new-server-test", "imageRef": "70a599e0-31e7-49b7-b260-868f441e862b", "flavorRef": "1", "metadata": { "My Server Name": "Apache1" }, "min_count": "2", "max_count": "3" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-multiple-create/multiple-create-no-resv-post-resp.json0000664000175000017500000000124500000000000030651 0ustar00zuulzuul00000000000000{ "server": { "OS-DCF:diskConfig": "AUTO", "adminPass": "wfksH3GTTseP", "id": "440cf918-3ee0-4143-b289-f63e1d2000e6", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/440cf918-3ee0-4143-b289-f63e1d2000e6", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/440cf918-3ee0-4143-b289-f63e1d2000e6", "rel": "bookmark" } ], "security_groups": [ { "name": "default" } ] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-multiple-create/multiple-create-post-req.json0000664000175000017500000000047000000000000027077 0ustar00zuulzuul00000000000000{ "server": { "name": "new-server-test", "imageRef": "70a599e0-31e7-49b7-b260-868f441e862b", "flavorRef": "1", "metadata": { "My Server Name": "Apache1" }, "return_reservation_id": "True", "min_count": "2", "max_count": "3" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-multiple-create/multiple-create-post-resp.json0000664000175000017500000000004600000000000027260 0ustar00zuulzuul00000000000000{ "reservation_id": "r-3fhpjulh" }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0776064 nova-32.0.0/doc/api_samples/os-networks/0000775000175000017500000000000000000000000020117 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-networks/network-add-req.json0000664000175000017500000000002200000000000024010 0ustar00zuulzuul00000000000000{ "id": "1" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-networks/network-create-req.json0000664000175000017500000000044500000000000024534 0ustar00zuulzuul00000000000000{ "network": { "label": "new net 111", "cidr": "10.20.105.0/24", "mtu": 9000, "dhcp_server": "10.20.105.2", "enable_dhcp": false, "share_address": true, "allowed_start": "10.20.105.10", "allowed_end": "10.20.105.200" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-networks/network-create-resp.json0000664000175000017500000000173300000000000024717 0ustar00zuulzuul00000000000000{ "network": { "bridge": null, "bridge_interface": null, "broadcast": "10.20.105.255", "cidr": "10.20.105.0/24", "cidr_v6": null, "created_at": null, "deleted": null, "deleted_at": null, "dhcp_server": "10.20.105.2", "dhcp_start": "10.20.105.2", "dns1": null, "dns2": null, "enable_dhcp": false, "gateway": "10.20.105.1", "gateway_v6": null, "host": null, "id": "d7a17c0c-457e-4ab4-a99c-4fa1762f5359", "injected": null, "label": "new net 111", "mtu": 9000, "multi_host": null, "netmask": "255.255.255.0", "netmask_v6": null, "priority": null, "project_id": null, "rxtx_base": null, "share_address": true, "updated_at": null, "vlan": null, "vpn_private_address": null, "vpn_public_address": null, "vpn_public_port": null } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-networks/network-show-resp.json0000664000175000017500000000163200000000000024432 0ustar00zuulzuul00000000000000{ "network": { "bridge": null, "bridge_interface": null, "broadcast": null, "cidr": null, "cidr_v6": null, "created_at": null, "deleted": null, "deleted_at": null, "dhcp_server": null, "dhcp_start": null, "dns1": null, "dns2": null, "enable_dhcp": null, "gateway": null, "gateway_v6": null, "host": null, "id": "20c8acc0-f747-4d71-a389-46d078ebf047", "injected": null, "label": "private", "mtu": null, "multi_host": null, "netmask": null, "netmask_v6": null, "priority": null, "project_id": null, "rxtx_base": null, "share_address": null, "updated_at": null, "vlan": null, "vpn_private_address": null, "vpn_public_address": null, "vpn_public_port": null } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-networks/networks-list-resp.json0000664000175000017500000000205700000000000024612 0ustar00zuulzuul00000000000000{ "networks": [ { "bridge": null, "bridge_interface": null, "broadcast": null, "cidr": null, "cidr_v6": null, "created_at": null, "deleted": null, "deleted_at": null, "dhcp_server": null, "dhcp_start": null, "dns1": null, "dns2": null, "enable_dhcp": null, "gateway": null, "gateway_v6": null, "host": null, "id": "20c8acc0-f747-4d71-a389-46d078ebf047", "injected": null, "label": "private", "mtu": null, "multi_host": null, "netmask": null, "netmask_v6": null, "priority": null, "project_id": null, "rxtx_base": null, "share_address": null, "updated_at": null, "vlan": null, "vpn_private_address": null, "vpn_public_address": null, "vpn_public_port": null } ] } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0816064 nova-32.0.0/doc/api_samples/os-networks-associate/0000775000175000017500000000000000000000000022070 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-networks-associate/network-associate-host-req.json0000664000175000017500000000004400000000000030163 0ustar00zuulzuul00000000000000{ "associate_host": "testHost" }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-networks-associate/network-disassociate-host-req.json0000664000175000017500000000004100000000000030660 0ustar00zuulzuul00000000000000{ "disassociate_host": null }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-networks-associate/network-disassociate-project-req.json0000664000175000017500000000004400000000000031354 0ustar00zuulzuul00000000000000{ "disassociate_project": null }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-networks-associate/network-disassociate-req.json0000664000175000017500000000003400000000000027707 0ustar00zuulzuul00000000000000{ "disassociate": null }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0816064 nova-32.0.0/doc/api_samples/os-pause-server/0000775000175000017500000000000000000000000020664 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-pause-server/pause-server.json0000664000175000017500000000002500000000000024175 0ustar00zuulzuul00000000000000{ "pause": null }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-pause-server/unpause-server.json0000664000175000017500000000002700000000000024542 0ustar00zuulzuul00000000000000{ "unpause": null }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0816064 nova-32.0.0/doc/api_samples/os-preserve-ephemeral-rebuild/0000775000175000017500000000000000000000000023462 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=nova-32.0.0/doc/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild-preserve-ephemeral-resp.json 22 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild-preserve-ephemeral-r0000664000175000017500000000343100000000000033643 0ustar00zuulzuul00000000000000{ "server": { "OS-DCF:diskConfig": "AUTO", "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.1.30", "version": 4 } ] }, "adminPass": "seekr3t", "created": "2013-12-30T12:28:14Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "ee8ea077f8548ce25c59c2d5020d0f82810c815c210fd68194a5c0f8", "id": "810e78d5-47fe-48bf-9559-bfe5dc918685", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/810e78d5-47fe-48bf-9559-bfe5dc918685", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/810e78d5-47fe-48bf-9559-bfe5dc918685", "rel": "bookmark" } ], "metadata": { "meta_var": "meta_val" }, "name": "foobar", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-12-30T12:28:15Z", "user_id": "fake" } }././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=nova-32.0.0/doc/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild-preserve-ephemeral.json 22 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild-preserve-ephemeral.j0000664000175000017500000000037100000000000033634 0ustar00zuulzuul00000000000000{ "rebuild": { "imageRef": "70a599e0-31e7-49b7-b260-868f441e862b", "name": "foobar", "adminPass": "seekr3t", "metadata": { "meta_var": "meta_val" }, "preserve_ephemeral": false } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0816064 nova-32.0.0/doc/api_samples/os-quota-class-sets/0000775000175000017500000000000000000000000021453 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-quota-class-sets/quota-classes-show-get-resp.json0000664000175000017500000000064700000000000027643 0ustar00zuulzuul00000000000000{ "quota_class_set": { "cores": 20, "fixed_ips": -1, "floating_ips": -1, "id": "test_class", "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, "injected_files": 5, "instances": 10, "key_pairs": 100, "metadata_items": 128, "ram": 51200, "security_group_rules": -1, "security_groups": -1 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-quota-class-sets/quota-classes-update-post-req.json0000664000175000017500000000061300000000000030162 0ustar00zuulzuul00000000000000{ "quota_class_set": { "instances": 50, "cores": 50, "ram": 51200, "floating_ips": -1, "fixed_ips": -1, "metadata_items": 128, "injected_files": 5, "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, "security_groups": -1, "security_group_rules": -1, "key_pairs": 100 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-quota-class-sets/quota-classes-update-post-resp.json0000664000175000017500000000061300000000000030344 0ustar00zuulzuul00000000000000{ "quota_class_set": { "cores": 50, "fixed_ips": -1, "floating_ips": -1, "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, "injected_files": 5, "instances": 50, "key_pairs": 100, "metadata_items": 128, "ram": 51200, "security_group_rules": -1, "security_groups": -1 } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0816064 nova-32.0.0/doc/api_samples/os-quota-class-sets/v2.50/0000775000175000017500000000000000000000000022225 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-quota-class-sets/v2.50/quota-classes-show-get-resp.json0000664000175000017500000000056000000000000030407 0ustar00zuulzuul00000000000000{ "quota_class_set": { "cores": 20, "id": "test_class", "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, "injected_files": 5, "instances": 10, "key_pairs": 100, "metadata_items": 128, "ram": 51200, "server_groups": 10, "server_group_members": 10 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-quota-class-sets/v2.50/quota-classes-update-post-req.json0000664000175000017500000000052400000000000030735 0ustar00zuulzuul00000000000000{ "quota_class_set": { "instances": 50, "cores": 50, "ram": 51200, "metadata_items": 128, "injected_files": 5, "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, "key_pairs": 100, "server_groups": 10, "server_group_members": 10 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-quota-class-sets/v2.50/quota-classes-update-post-resp.json0000664000175000017500000000052400000000000031117 0ustar00zuulzuul00000000000000{ "quota_class_set": { "cores": 50, "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, "injected_files": 5, "instances": 50, "key_pairs": 100, "metadata_items": 128, "ram": 51200, "server_groups": 10, "server_group_members": 10 } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0816064 nova-32.0.0/doc/api_samples/os-quota-class-sets/v2.57/0000775000175000017500000000000000000000000022234 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-quota-class-sets/v2.57/quota-classes-show-get-resp.json0000664000175000017500000000037400000000000030421 0ustar00zuulzuul00000000000000{ "quota_class_set": { "cores": 20, "id": "test_class", "instances": 10, "key_pairs": 100, "metadata_items": 128, "ram": 51200, "server_groups": 10, "server_group_members": 10 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-quota-class-sets/v2.57/quota-classes-update-post-req.json0000664000175000017500000000034000000000000030740 0ustar00zuulzuul00000000000000{ "quota_class_set": { "instances": 50, "cores": 50, "ram": 51200, "metadata_items": 128, "key_pairs": 100, "server_groups": 10, "server_group_members": 10 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-quota-class-sets/v2.57/quota-classes-update-post-resp.json0000664000175000017500000000034000000000000031122 0ustar00zuulzuul00000000000000{ "quota_class_set": { "cores": 50, "instances": 50, "key_pairs": 100, "metadata_items": 128, "ram": 51200, "server_groups": 10, "server_group_members": 10 } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0856066 nova-32.0.0/doc/api_samples/os-quota-sets/0000775000175000017500000000000000000000000020350 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json0000664000175000017500000000074300000000000027072 0ustar00zuulzuul00000000000000{ "quota_set": { "cores": 20, "fixed_ips": -1, "floating_ips": -1, "id": "fake_tenant", "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, "injected_files": 5, "instances": 10, "key_pairs": 100, "metadata_items": 128, "ram": 51200, "security_group_rules": -1, "security_groups": -1, "server_groups": 10, "server_group_members": 10 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-quota-sets/quotas-show-detail-get-resp.json0000664000175000017500000000321100000000000026516 0ustar00zuulzuul00000000000000{ "quota_set": { "cores": { "in_use": 0, "limit": 20, "reserved": 0 }, "fixed_ips": { "in_use": 0, "limit": -1, "reserved": 0 }, "floating_ips": { "in_use": 0, "limit": -1, "reserved": 0 }, "id": "fake_tenant", "injected_file_content_bytes": { "in_use": 0, "limit": 10240, "reserved": 0 }, "injected_file_path_bytes": { "in_use": 0, "limit": 255, "reserved": 0 }, "injected_files": { "in_use": 0, "limit": 5, "reserved": 0 }, "instances": { "in_use": 0, "limit": 10, "reserved": 0 }, "key_pairs": { "in_use": 0, "limit": 100, "reserved": 0 }, "metadata_items": { "in_use": 0, "limit": 128, "reserved": 0 }, "ram": { "in_use": 0, "limit": 51200, "reserved": 0 }, "security_group_rules": { "in_use": 0, "limit": -1, "reserved": 0 }, "security_groups": { "in_use": 0, "limit": -1, "reserved": 0 }, "server_group_members": { "in_use": 0, "limit": 10, "reserved": 0 }, "server_groups": { "in_use": 0, "limit": 10, "reserved": 0 } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-quota-sets/quotas-show-get-resp.json0000664000175000017500000000074300000000000025265 0ustar00zuulzuul00000000000000{ "quota_set": { "cores": 20, "fixed_ips": -1, "floating_ips": -1, "id": "fake_tenant", "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, "injected_files": 5, "instances": 10, "key_pairs": 100, "metadata_items": 128, "ram": 51200, "security_group_rules": -1, "security_groups": -1, "server_groups": 10, "server_group_members": 10 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-quota-sets/quotas-update-force-post-req.json0000664000175000017500000000011500000000000026700 0ustar00zuulzuul00000000000000{ "quota_set": { "force": "True", "instances": 45 } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-quota-sets/quotas-update-force-post-resp.json0000664000175000017500000000070600000000000027070 0ustar00zuulzuul00000000000000{ "quota_set": { "cores": 20, "fixed_ips": -1, "floating_ips": -1, "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, "injected_files": 5, "instances": 45, "key_pairs": 100, "metadata_items": 128, "ram": 51200, "security_group_rules": -1, "security_groups": -1, "server_groups": 10, "server_group_members": 10 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-quota-sets/quotas-update-post-req.json0000664000175000017500000000006100000000000025604 0ustar00zuulzuul00000000000000{ "quota_set": { "cores": 45 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-quota-sets/quotas-update-post-resp.json0000664000175000017500000000070600000000000025774 0ustar00zuulzuul00000000000000{ "quota_set": { "cores": 45, "fixed_ips": -1, "floating_ips": -1, "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, "injected_files": 5, "instances": 10, "key_pairs": 100, "metadata_items": 128, "ram": 51200, "security_group_rules": -1, "security_groups": -1, "server_groups": 10, "server_group_members": 10 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-quota-sets/user-quotas-show-get-resp.json0000664000175000017500000000074300000000000026241 0ustar00zuulzuul00000000000000{ "quota_set": { "cores": 20, "fixed_ips": -1, "floating_ips": -1, "id": "fake_tenant", "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, "injected_files": 5, "instances": 10, "key_pairs": 100, "metadata_items": 128, "ram": 51200, "security_group_rules": -1, "security_groups": -1, "server_groups": 10, "server_group_members": 10 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-quota-sets/user-quotas-update-post-req.json0000664000175000017500000000011400000000000026557 0ustar00zuulzuul00000000000000{ "quota_set": { "force": "True", "instances": 9 } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-quota-sets/user-quotas-update-post-resp.json0000664000175000017500000000070500000000000026747 0ustar00zuulzuul00000000000000{ "quota_set": { "cores": 20, "fixed_ips": -1, "floating_ips": -1, "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, "injected_files": 5, "instances": 9, "key_pairs": 100, "metadata_items": 128, "ram": 51200, "security_group_rules": -1, "security_groups": -1, "server_groups": 10, "server_group_members": 10 } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0896065 nova-32.0.0/doc/api_samples/os-quota-sets/v2.36/0000775000175000017500000000000000000000000021126 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-quota-sets/v2.36/quotas-show-defaults-get-resp.json0000664000175000017500000000055300000000000027647 0ustar00zuulzuul00000000000000{ "quota_set": { "cores": 20, "id": "fake_tenant", "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, "injected_files": 5, "instances": 10, "key_pairs": 100, "metadata_items": 128, "ram": 51200, "server_groups": 10, "server_group_members": 10 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-quota-sets/v2.36/quotas-show-detail-get-resp.json0000664000175000017500000000227500000000000027305 0ustar00zuulzuul00000000000000{ "quota_set": { "cores": { "in_use": 0, "limit": 20, "reserved": 0 }, "id": "fake_tenant", "injected_file_content_bytes": { "in_use": 0, "limit": 10240, "reserved": 0 }, "injected_file_path_bytes": { "in_use": 0, "limit": 255, "reserved": 0 }, "injected_files": { "in_use": 0, "limit": 5, "reserved": 0 }, "instances": { "in_use": 0, "limit": 10, "reserved": 0 }, "key_pairs": { "in_use": 0, "limit": 100, "reserved": 0 }, "metadata_items": { "in_use": 0, "limit": 128, "reserved": 0 }, "ram": { "in_use": 0, "limit": 51200, "reserved": 0 }, "server_group_members": { "in_use": 0, "limit": 10, "reserved": 0 }, "server_groups": { "in_use": 0, "limit": 10, "reserved": 0 } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-quota-sets/v2.36/quotas-show-get-resp.json0000664000175000017500000000055300000000000026042 0ustar00zuulzuul00000000000000{ "quota_set": { "cores": 20, "id": "fake_tenant", "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, "injected_files": 5, "instances": 10, "key_pairs": 100, "metadata_items": 128, "ram": 51200, "server_groups": 10, "server_group_members": 10 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-quota-sets/v2.36/quotas-update-force-post-req.json0000664000175000017500000000011500000000000027456 0ustar00zuulzuul00000000000000{ "quota_set": { "force": "True", "instances": 45 } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-quota-sets/v2.36/quotas-update-force-post-resp.json0000664000175000017500000000051600000000000027645 0ustar00zuulzuul00000000000000{ "quota_set": { "cores": 20, "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, "injected_files": 5, "instances": 45, "key_pairs": 100, "metadata_items": 128, "ram": 51200, "server_groups": 10, "server_group_members": 10 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-quota-sets/v2.36/quotas-update-post-req.json0000664000175000017500000000006400000000000026365 0ustar00zuulzuul00000000000000{ "quota_set": { "instances": 45 } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-quota-sets/v2.36/quotas-update-post-resp.json0000664000175000017500000000051600000000000026551 0ustar00zuulzuul00000000000000{ "quota_set": { "cores": 20, "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, "injected_files": 5, "instances": 45, "key_pairs": 100, "metadata_items": 128, "ram": 51200, "server_groups": 10, "server_group_members": 10 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-quota-sets/v2.36/user-quotas-show-get-resp.json0000664000175000017500000000055300000000000027016 0ustar00zuulzuul00000000000000{ "quota_set": { "cores": 20, "id": "fake_tenant", "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, "injected_files": 5, "instances": 10, "key_pairs": 100, "metadata_items": 128, "ram": 51200, "server_groups": 10, "server_group_members": 10 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-quota-sets/v2.36/user-quotas-update-post-req.json0000664000175000017500000000011400000000000027335 0ustar00zuulzuul00000000000000{ "quota_set": { "force": "True", "instances": 9 } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-quota-sets/v2.36/user-quotas-update-post-resp.json0000664000175000017500000000051500000000000027524 0ustar00zuulzuul00000000000000{ "quota_set": { "cores": 20, "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, "injected_files": 5, "instances": 9, "key_pairs": 100, "metadata_items": 128, "ram": 51200, "server_groups": 10, "server_group_members": 10 } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0896065 nova-32.0.0/doc/api_samples/os-quota-sets/v2.57/0000775000175000017500000000000000000000000021131 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-quota-sets/v2.57/quotas-show-defaults-get-resp.json0000664000175000017500000000036700000000000027655 0ustar00zuulzuul00000000000000{ "quota_set": { "cores": 20, "id": "fake_tenant", "instances": 10, "key_pairs": 100, "metadata_items": 128, "ram": 51200, "server_groups": 10, "server_group_members": 10 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-quota-sets/v2.57/quotas-show-detail-get-resp.json0000664000175000017500000000151200000000000027301 0ustar00zuulzuul00000000000000{ "quota_set": { "cores": { "in_use": 0, "limit": 20, "reserved": 0 }, "id": "fake_tenant", "instances": { "in_use": 0, "limit": 10, "reserved": 0 }, "key_pairs": { "in_use": 0, "limit": 100, "reserved": 0 }, "metadata_items": { "in_use": 0, "limit": 128, "reserved": 0 }, "ram": { "in_use": 0, "limit": 51200, "reserved": 0 }, "server_group_members": { "in_use": 0, "limit": 10, "reserved": 0 }, "server_groups": { "in_use": 0, "limit": 10, "reserved": 0 } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-quota-sets/v2.57/quotas-show-get-resp.json0000664000175000017500000000036700000000000026050 0ustar00zuulzuul00000000000000{ "quota_set": { "cores": 20, "id": "fake_tenant", "instances": 10, "key_pairs": 100, "metadata_items": 128, "ram": 51200, "server_groups": 10, "server_group_members": 10 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-quota-sets/v2.57/quotas-update-force-post-req.json0000664000175000017500000000011500000000000027461 0ustar00zuulzuul00000000000000{ "quota_set": { "force": "True", "instances": 45 } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-quota-sets/v2.57/quotas-update-force-post-resp.json0000664000175000017500000000033200000000000027644 0ustar00zuulzuul00000000000000{ "quota_set": { "cores": 20, "instances": 45, "key_pairs": 100, "metadata_items": 128, "ram": 51200, "server_groups": 10, "server_group_members": 10 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-quota-sets/v2.57/quotas-update-post-req.json0000664000175000017500000000006400000000000026370 0ustar00zuulzuul00000000000000{ "quota_set": { "instances": 20 } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-quota-sets/v2.57/quotas-update-post-resp.json0000664000175000017500000000033200000000000026550 0ustar00zuulzuul00000000000000{ "quota_set": { "cores": 20, "instances": 20, "key_pairs": 100, "metadata_items": 128, "ram": 51200, "server_groups": 10, "server_group_members": 10 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-quota-sets/v2.57/user-quotas-show-get-resp.json0000664000175000017500000000036700000000000027024 0ustar00zuulzuul00000000000000{ "quota_set": { "cores": 20, "id": "fake_tenant", "instances": 10, "key_pairs": 100, "metadata_items": 128, "ram": 51200, "server_groups": 10, "server_group_members": 10 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-quota-sets/v2.57/user-quotas-update-post-req.json0000664000175000017500000000006300000000000027343 0ustar00zuulzuul00000000000000{ "quota_set": { "instances": 9 } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-quota-sets/v2.57/user-quotas-update-post-resp.json0000664000175000017500000000033100000000000027523 0ustar00zuulzuul00000000000000{ "quota_set": { "cores": 20, "instances": 9, "key_pairs": 100, "metadata_items": 128, "ram": 51200, "server_groups": 10, "server_group_members": 10 } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0856066 nova-32.0.0/doc/api_samples/os-quota-sets-noop/0000775000175000017500000000000000000000000021321 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-quota-sets-noop/quotas-show-defaults-get-resp.json0000664000175000017500000000073300000000000030042 0ustar00zuulzuul00000000000000{ "quota_set": { "cores": -1, "fixed_ips": -1, "floating_ips": -1, "id": "fake_tenant", "injected_file_content_bytes": -1, "injected_file_path_bytes": -1, "injected_files": -1, "instances": -1, "key_pairs": -1, "metadata_items": -1, "ram": -1, "security_group_rules": -1, "security_groups": -1, "server_group_members": -1, "server_groups": -1 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-quota-sets-noop/quotas-show-detail-get-resp.json0000664000175000017500000000323500000000000027475 0ustar00zuulzuul00000000000000{ "quota_set": { "cores": { "in_use": -1, "limit": -1, "reserved": -1 }, "fixed_ips": { "in_use": -1, "limit": -1, "reserved": -1 }, "floating_ips": { "in_use": -1, "limit": -1, "reserved": -1 }, "id": "fake_tenant", "injected_file_content_bytes": { "in_use": -1, "limit": -1, "reserved": -1 }, "injected_file_path_bytes": { "in_use": -1, "limit": -1, "reserved": -1 }, "injected_files": { "in_use": -1, "limit": -1, "reserved": -1 }, "instances": { "in_use": -1, "limit": -1, "reserved": -1 }, "key_pairs": { "in_use": -1, "limit": -1, "reserved": -1 }, "metadata_items": { "in_use": -1, "limit": -1, "reserved": -1 }, "ram": { "in_use": -1, "limit": -1, "reserved": -1 }, "security_group_rules": { "in_use": -1, "limit": -1, "reserved": -1 }, "security_groups": { "in_use": -1, "limit": -1, "reserved": -1 }, "server_group_members": { "in_use": -1, "limit": -1, "reserved": -1 }, "server_groups": { "in_use": -1, "limit": -1, "reserved": -1 } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-quota-sets-noop/quotas-show-get-resp.json0000664000175000017500000000073300000000000026235 0ustar00zuulzuul00000000000000{ "quota_set": { "cores": -1, "fixed_ips": -1, "floating_ips": -1, "id": "fake_tenant", "injected_file_content_bytes": -1, "injected_file_path_bytes": -1, "injected_files": -1, "instances": -1, "key_pairs": -1, "metadata_items": -1, "ram": -1, "security_group_rules": -1, "security_groups": -1, "server_group_members": -1, "server_groups": -1 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-quota-sets-noop/quotas-update-force-post-req.json0000664000175000017500000000011500000000000027651 0ustar00zuulzuul00000000000000{ "quota_set": { "force": "True", "instances": 45 } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-quota-sets-noop/quotas-update-force-post-resp.json0000664000175000017500000000067600000000000030047 0ustar00zuulzuul00000000000000{ "quota_set": { "cores": -1, "fixed_ips": -1, "floating_ips": -1, "injected_file_content_bytes": -1, "injected_file_path_bytes": -1, "injected_files": -1, "instances": -1, "key_pairs": -1, "metadata_items": -1, "ram": -1, "security_group_rules": -1, "security_groups": -1, "server_group_members": -1, "server_groups": -1 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-quota-sets-noop/quotas-update-post-req.json0000664000175000017500000000007200000000000026557 0ustar00zuulzuul00000000000000{ "quota_set": { "security_groups": 45 } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-quota-sets-noop/quotas-update-post-resp.json0000664000175000017500000000067600000000000026753 0ustar00zuulzuul00000000000000{ "quota_set": { "cores": -1, "fixed_ips": -1, "floating_ips": -1, "injected_file_content_bytes": -1, "injected_file_path_bytes": -1, "injected_files": -1, "instances": -1, "key_pairs": -1, "metadata_items": -1, "ram": -1, "security_group_rules": -1, "security_groups": -1, "server_group_members": -1, "server_groups": -1 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-quota-sets-noop/user-quotas-show-get-resp.json0000664000175000017500000000073300000000000027211 0ustar00zuulzuul00000000000000{ "quota_set": { "cores": -1, "fixed_ips": -1, "floating_ips": -1, "id": "fake_tenant", "injected_file_content_bytes": -1, "injected_file_path_bytes": -1, "injected_files": -1, "instances": -1, "key_pairs": -1, "metadata_items": -1, "ram": -1, "security_group_rules": -1, "security_groups": -1, "server_group_members": -1, "server_groups": -1 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-quota-sets-noop/user-quotas-update-post-req.json0000664000175000017500000000011400000000000027530 0ustar00zuulzuul00000000000000{ "quota_set": { "force": "True", "instances": 9 } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-quota-sets-noop/user-quotas-update-post-resp.json0000664000175000017500000000067600000000000027727 0ustar00zuulzuul00000000000000{ "quota_set": { "cores": -1, "fixed_ips": -1, "floating_ips": -1, "injected_file_content_bytes": -1, "injected_file_path_bytes": -1, "injected_files": -1, "instances": -1, "key_pairs": -1, "metadata_items": -1, "ram": -1, "security_group_rules": -1, "security_groups": -1, "server_group_members": -1, "server_groups": -1 } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0936065 nova-32.0.0/doc/api_samples/os-remote-consoles/0000775000175000017500000000000000000000000021361 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-remote-consoles/get-rdp-console-post-req.json0000664000175000017500000000010000000000000027015 0ustar00zuulzuul00000000000000{ "os-getRDPConsole": { "type": "rdp-html5" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-remote-consoles/get-rdp-console-post-resp.json0000664000175000017500000000021300000000000027204 0ustar00zuulzuul00000000000000{ "console": { "type": "rdp-html5", "url": "http://127.0.0.1:6083/?token=191996c3-7b0f-42f3-95a7-f1839f2da6ed" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-remote-consoles/get-serial-console-post-req.json0000664000175000017500000000010000000000000027507 0ustar00zuulzuul00000000000000{ "os-getSerialConsole": { "type": "serial" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-remote-consoles/get-serial-console-post-resp.json0000664000175000017500000000020500000000000027677 0ustar00zuulzuul00000000000000{ "console": { "type": "serial", "url":"ws://127.0.0.1:6083/?token=f9906a48-b71e-4f18-baca-c987da3ebdb3" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-remote-consoles/get-spice-console-post-req.json0000664000175000017500000000010300000000000027336 0ustar00zuulzuul00000000000000{ "os-getSPICEConsole": { "type": "spice-html5" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-remote-consoles/get-spice-console-post-resp.json0000664000175000017500000000023300000000000027524 0ustar00zuulzuul00000000000000{ "console": { "type": "spice-html5", "url": "http://127.0.0.1:6082/spice_auto.html?token=a30e5d08-6a20-4043-958f-0852440c6af4" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-remote-consoles/get-vnc-console-post-req.json0000664000175000017500000000007300000000000027027 0ustar00zuulzuul00000000000000{ "os-getVNCConsole": { "type": "novnc" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-remote-consoles/get-vnc-console-post-resp.json0000664000175000017500000000023500000000000027211 0ustar00zuulzuul00000000000000{ "console": { "type": "novnc", "url": "http://127.0.0.1:6080/vnc_auto.html?path=%3Ftoken%3Ddaae261f-474d-4cae-8f6a-1865278ed8c9" } }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0936065 nova-32.0.0/doc/api_samples/os-remote-consoles/v2.6/0000775000175000017500000000000000000000000022054 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-remote-consoles/v2.6/create-rdp-console-req.json0000664000175000017500000000013000000000000027214 0ustar00zuulzuul00000000000000{ "remote_console": { "protocol": "rdp", "type": "rdp-html5" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-remote-consoles/v2.6/create-vnc-console-req.json0000664000175000017500000000012500000000000027221 0ustar00zuulzuul00000000000000{ "remote_console": { "protocol": "vnc", "type": "novnc" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-remote-consoles/v2.6/create-vnc-console-resp.json0000664000175000017500000000030200000000000027400 0ustar00zuulzuul00000000000000{ "remote_console": { "protocol": "vnc", "type": "novnc", "url": "http://example.com:6080/vnc_auto.html?path=%3Ftoken%3Db60bcfc3-5fd4-4d21-986c-e83379107819" } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0936065 nova-32.0.0/doc/api_samples/os-remote-consoles/v2.8/0000775000175000017500000000000000000000000022056 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-remote-consoles/v2.8/create-mks-console-req.json0000664000175000017500000000012600000000000027230 0ustar00zuulzuul00000000000000{ "remote_console": { "protocol": "mks", "type": "webmks" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-remote-consoles/v2.8/create-mks-console-resp.json0000664000175000017500000000026400000000000027415 0ustar00zuulzuul00000000000000{ "remote_console": { "protocol": "mks", "type": "webmks", "url": "http://example.com:6090/mks.html?token=b60bcfc3-5fd4-4d21-986c-e83379107819" } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0936065 nova-32.0.0/doc/api_samples/os-remote-consoles/v2.99/0000775000175000017500000000000000000000000022150 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-remote-consoles/v2.99/create-spice-direct-console-req.json0000664000175000017500000000013600000000000031104 0ustar00zuulzuul00000000000000{ "remote_console": { "protocol": "spice", "type": "spice-direct" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-remote-consoles/v2.99/create-spice-direct-console-resp.json0000664000175000017500000000027000000000000031265 0ustar00zuulzuul00000000000000{ "remote_console": { "protocol": "spice", "type": "spice-direct", "url": "http://127.0.0.1:13002/nova?token=aeabd4ec-3acb-4898-9130-10521ccbe5f3" } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0976067 nova-32.0.0/doc/api_samples/os-rescue/0000775000175000017500000000000000000000000017531 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-rescue/server-get-resp-rescue.json0000664000175000017500000000450700000000000024750 0ustar00zuulzuul00000000000000{ "server": { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.1.30", "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-09-18T07:22:09Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "f04994c5b4aac1cacbb83b09c2506e457d97dd54f620961624574690", "id": "2fd0c66b-50af-41d2-9253-9fa41e7e8dd8", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/2fd0c66b-50af-41d2-9253-9fa41e7e8dd8", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/2fd0c66b-50af-41d2-9253-9fa41e7e8dd8", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "status": "RESCUE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-09-18T07:22:11Z", "user_id": "fake", "config_drive": "", "OS-DCF:diskConfig": "AUTO", "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-STS:power_state": 4, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "rescued", "os-extended-volumes:volumes_attached": [], "OS-SRV-USG:launched_at": "2013-09-23T13:37:00.880302", "OS-SRV-USG:terminated_at": null, "security_groups": [ { "name": "default" } ] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-rescue/server-get-resp-unrescue.json0000664000175000017500000000453500000000000025314 0ustar00zuulzuul00000000000000{ "server": { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.1.30", "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-09-18T07:22:09Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "53cd4520a6cc639eeabcae4a0512b93e4675d431002e0b60e2dcfc04", "id": "edfc3905-1f3c-4819-8fc3-a7d8131cfa22", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/edfc3905-1f3c-4819-8fc3-a7d8131cfa22", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/edfc3905-1f3c-4819-8fc3-a7d8131cfa22", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-09-18T07:22:12Z", "user_id": "fake", "config_drive": "", "OS-DCF:diskConfig": "AUTO", "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "os-extended-volumes:volumes_attached": [], "OS-SRV-USG:launched_at": "2013-09-23T13:37:00.880302", "OS-SRV-USG:terminated_at": null, "security_groups": [ { "name": "default" } ] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-rescue/server-rescue-req-with-image-ref.json0000664000175000017500000000020200000000000026600 0ustar00zuulzuul00000000000000{ "rescue": { "adminPass": "MySecretPass", "rescue_image_ref": "70a599e0-31e7-49b7-b260-868f441e862b" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-rescue/server-rescue-req.json0000664000175000017500000000007600000000000024006 0ustar00zuulzuul00000000000000{ "rescue": { "adminPass": "MySecretPass" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-rescue/server-rescue.json0000664000175000017500000000004400000000000023214 0ustar00zuulzuul00000000000000{ "adminPass": "MySecretPass" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-rescue/server-unrescue-req.json0000664000175000017500000000003100000000000024340 0ustar00zuulzuul00000000000000{ "unrescue": null } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0976067 nova-32.0.0/doc/api_samples/os-rescue/v2.87/0000775000175000017500000000000000000000000020315 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-rescue/v2.87/server-get-resp-rescue.json0000664000175000017500000000471600000000000025536 0ustar00zuulzuul00000000000000{ "server": { "OS-DCF:diskConfig": "AUTO", "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-STS:power_state": 4, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "rescued", "OS-SRV-USG:launched_at": "2020-02-07T17:39:49.259481", "OS-SRV-USG:terminated_at": null, "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "addr": "192.168.1.30", "version": 4 } ] }, "config_drive": "", "created": "2020-02-07T17:39:48Z", "description": null, "flavor": { "disk": 1, "ephemeral": 0, "extra_specs": {}, "original_name": "m1.tiny", "ram": 512, "swap": 0, "vcpus": 1 }, "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6", "id": "69bebe1c-3bdb-4feb-9b79-afa3d4782d95", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/69bebe1c-3bdb-4feb-9b79-afa3d4782d95", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/69bebe1c-3bdb-4feb-9b79-afa3d4782d95", "rel": "bookmark" } ], "locked": false, "locked_reason": null, "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "os-extended-volumes:volumes_attached": [], "security_groups": [ { "name": "default" } ], "server_groups": [], "status": "RESCUE", "tags": [], "tenant_id": "6f70656e737461636b20342065766572", "trusted_image_certificates": null, "updated": "2020-02-07T17:39:49Z", "user_id": "fake" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-rescue/v2.87/server-get-resp-unrescue.json0000664000175000017500000000474400000000000026102 0ustar00zuulzuul00000000000000{ "server": { "OS-DCF:diskConfig": "AUTO", "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "OS-SRV-USG:launched_at": "2020-02-07T17:39:55.632592", "OS-SRV-USG:terminated_at": null, "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "addr": "192.168.1.30", "version": 4 } ] }, "config_drive": "", "created": "2020-02-07T17:39:54Z", "description": null, "flavor": { "disk": 1, "ephemeral": 0, "extra_specs": {}, "original_name": "m1.tiny", "ram": 512, "swap": 0, "vcpus": 1 }, "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6", "id": "5a0ffa96-ae59-4f82-b7a6-e0c9007cd576", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/5a0ffa96-ae59-4f82-b7a6-e0c9007cd576", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/5a0ffa96-ae59-4f82-b7a6-e0c9007cd576", "rel": "bookmark" } ], "locked": false, "locked_reason": null, "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "os-extended-volumes:volumes_attached": [], "progress": 0, "security_groups": [ { "name": "default" } ], "server_groups": [], "status": "ACTIVE", "tags": [], "tenant_id": "6f70656e737461636b20342065766572", "trusted_image_certificates": null, "updated": "2020-02-07T17:39:56Z", "user_id": "fake" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-rescue/v2.87/server-rescue-req-with-image-ref.json0000664000175000017500000000020100000000000027363 0ustar00zuulzuul00000000000000{ "rescue": { "adminPass": "MySecretPass", "rescue_image_ref": "70a599e0-31e7-49b7-b260-868f441e862b" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-rescue/v2.87/server-rescue-req.json0000664000175000017500000000007500000000000024571 0ustar00zuulzuul00000000000000{ "rescue": { "adminPass": "MySecretPass" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-rescue/v2.87/server-rescue.json0000664000175000017500000000004300000000000023777 0ustar00zuulzuul00000000000000{ "adminPass": "MySecretPass" }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-rescue/v2.87/server-unrescue-req.json0000664000175000017500000000003000000000000025123 0ustar00zuulzuul00000000000000{ "unrescue": null }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.0976067 nova-32.0.0/doc/api_samples/os-security-group-default-rules/0000775000175000017500000000000000000000000024016 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=nova-32.0.0/doc/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.json 22 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.0000664000175000017500000000024000000000000033614 0ustar00zuulzuul00000000000000{ "security_group_default_rule": { "ip_protocol": "TCP", "from_port": "80", "to_port": "80", "cidr": "10.10.10.0/24" } }././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=nova-32.0.0/doc/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp.json 22 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp0000664000175000017500000000032100000000000033720 0ustar00zuulzuul00000000000000{ "security_group_default_rule": { "from_port": 80, "id": 1, "ip_protocol": "TCP", "ip_range": { "cidr": "10.10.10.0/24" }, "to_port": 80 } }././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=nova-32.0.0/doc/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.json 22 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.j0000664000175000017500000000040200000000000033660 0ustar00zuulzuul00000000000000{ "security_group_default_rules": [ { "from_port": 80, "id": 1, "ip_protocol": "TCP", "ip_range": { "cidr": "10.10.10.0/24" }, "to_port": 80 } ] }././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=nova-32.0.0/doc/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.json 22 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.j0000664000175000017500000000032100000000000033665 0ustar00zuulzuul00000000000000{ "security_group_default_rule": { "from_port": 80, "id": 1, "ip_protocol": "TCP", "ip_range": { "cidr": "10.10.10.0/24" }, "to_port": 80 } }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.1016066 nova-32.0.0/doc/api_samples/os-security-groups/0000775000175000017500000000000000000000000021427 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-security-groups/security-group-add-post-req.json0000664000175000017500000000007200000000000027620 0ustar00zuulzuul00000000000000{ "addSecurityGroup": { "name": "test" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-security-groups/security-group-post-req.json0000664000175000017500000000013600000000000027073 0ustar00zuulzuul00000000000000{ "security_group": { "name": "test", "description": "description" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-security-groups/security-group-remove-post-req.json0000664000175000017500000000007500000000000030370 0ustar00zuulzuul00000000000000{ "removeSecurityGroup": { "name": "test" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-security-groups/security-group-rules-post-req.json0000664000175000017500000000032500000000000030223 0ustar00zuulzuul00000000000000{ "security_group_rule": { "parent_group_id": "21111111-1111-1111-1111-111111111112", "ip_protocol": "tcp", "from_port": 22, "to_port": 22, "cidr": "10.0.0.0/24" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-security-groups/security-group-rules-post-resp.json0000664000175000017500000000050400000000000030404 0ustar00zuulzuul00000000000000{ "security_group_rule": { "from_port": 22, "group": {}, "id": "00000000-0000-0000-0000-000000000000", "ip_protocol": "tcp", "ip_range": { "cidr": "10.0.0.0/24" }, "parent_group_id": "11111111-1111-1111-1111-111111111111", "to_port": 22 } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-security-groups/security-groups-create-resp.json0000664000175000017500000000027400000000000027721 0ustar00zuulzuul00000000000000{ "security_group": { "description": "default", "id": 1, "name": "default", "rules": [], "tenant_id": "6f70656e737461636b20342065766572" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-security-groups/security-groups-get-resp.json0000664000175000017500000000027400000000000027235 0ustar00zuulzuul00000000000000{ "security_group": { "description": "default", "id": 1, "name": "default", "rules": [], "tenant_id": "6f70656e737461636b20342065766572" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-security-groups/security-groups-list-get-resp.json0000664000175000017500000000034500000000000030205 0ustar00zuulzuul00000000000000{ "security_groups": [ { "description": "default", "id": 1, "name": "default", "rules": [], "tenant_id": "6f70656e737461636b20342065766572" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-security-groups/server-security-groups-list-resp.json0000664000175000017500000000034500000000000030734 0ustar00zuulzuul00000000000000{ "security_groups": [ { "description": "default", "id": 1, "name": "default", "rules": [], "tenant_id": "6f70656e737461636b20342065766572" } ] }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.1016066 nova-32.0.0/doc/api_samples/os-server-diagnostics/0000775000175000017500000000000000000000000022056 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-server-diagnostics/server-diagnostics-get-resp.json0000664000175000017500000000060300000000000030307 0ustar00zuulzuul00000000000000{ "cpu0_time": 17300000000, "memory": 524288, "vda_errors": -1, "vda_read": 262144, "vda_read_req": 112, "vda_write": 5778432, "vda_write_req": 488, "vnet1_rx": 2070139, "vnet1_rx_drop": 0, "vnet1_rx_errors": 0, "vnet1_rx_packets": 26701, "vnet1_tx": 140208, "vnet1_tx_drop": 0, "vnet1_tx_errors": 0, "vnet1_tx_packets": 662 } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.1016066 nova-32.0.0/doc/api_samples/os-server-diagnostics/v2.48/0000775000175000017500000000000000000000000022637 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-server-diagnostics/v2.48/server-diagnostics-get-resp.json0000664000175000017500000000201500000000000031067 0ustar00zuulzuul00000000000000{ "config_drive": true, "cpu_details": [ { "id": 0, "time": 17300000000, "utilisation": 15 } ], "disk_details": [ { "errors_count": 1, "read_bytes": 262144, "read_requests": 112, "write_bytes": 5778432, "write_requests": 488 } ], "driver": "libvirt", "hypervisor": "kvm", "hypervisor_os": "ubuntu", "memory_details": { "maximum": 524288, "used": 0 }, "nic_details": [ { "mac_address": "01:23:45:67:89:ab", "rx_drop": 200, "rx_errors": 100, "rx_octets": 2070139, "rx_packets": 26701, "rx_rate": 300, "tx_drop": 500, "tx_errors": 400, "tx_octets": 140208, "tx_packets": 662, "tx_rate": 600 } ], "num_cpus": 1, "num_disks": 1, "num_nics": 1, "state": "running", "uptime": 46664 } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.1016066 nova-32.0.0/doc/api_samples/os-server-external-events/0000775000175000017500000000000000000000000022673 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-server-external-events/event-create-req.json0000664000175000017500000000031600000000000026735 0ustar00zuulzuul00000000000000{ "events": [ { "name": "test-event", "tag": "foo", "status": "completed", "server_uuid": "3df201cf-2451-44f2-8d25-a4ca826fc1f3" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-server-external-events/event-create-resp.json0000664000175000017500000000035400000000000027121 0ustar00zuulzuul00000000000000{ "events": [ { "code": 200, "name": "network-changed", "server_uuid": "ff1df7b2-6772-45fd-9326-c0a3b05591c2", "status": "completed", "tag": "foo" } ] }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.1016066 nova-32.0.0/doc/api_samples/os-server-groups/0000775000175000017500000000000000000000000021066 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-server-groups/server-groups-get-resp.json0000664000175000017500000000030300000000000026324 0ustar00zuulzuul00000000000000{ "server_group": { "id": "5bbcc3c4-1da2-4437-a48a-66f15b1b13f9", "name": "test", "policies": ["anti-affinity"], "members": [], "metadata": {} } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-server-groups/server-groups-list-resp.json0000664000175000017500000000035400000000000026526 0ustar00zuulzuul00000000000000{ "server_groups": [ { "id": "616fb98f-46ca-475e-917e-2563e5a8cd19", "name": "test", "policies": ["anti-affinity"], "members": [], "metadata": {} } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-server-groups/server-groups-post-req.json0000664000175000017500000000013600000000000026354 0ustar00zuulzuul00000000000000{ "server_group": { "name": "test", "policies": ["anti-affinity"] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-server-groups/server-groups-post-resp.json0000664000175000017500000000030300000000000026532 0ustar00zuulzuul00000000000000{ "server_group": { "id": "5bbcc3c4-1da2-4437-a48a-66f15b1b13f9", "name": "test", "policies": ["anti-affinity"], "members": [], "metadata": {} } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.1016066 nova-32.0.0/doc/api_samples/os-server-groups/v2.13/0000775000175000017500000000000000000000000021637 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-server-groups/v2.13/server-groups-get-resp.json0000664000175000017500000000043000000000000027076 0ustar00zuulzuul00000000000000{ "server_group": { "id": "5bbcc3c4-1da2-4437-a48a-66f15b1b13f9", "name": "test", "policies": ["anti-affinity"], "members": [], "metadata": {}, "project_id": "6f70656e737461636b20342065766572", "user_id": "fake" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-server-groups/v2.13/server-groups-list-resp.json0000664000175000017500000000051100000000000027272 0ustar00zuulzuul00000000000000{ "server_groups": [ { "id": "616fb98f-46ca-475e-917e-2563e5a8cd19", "name": "test", "policies": ["anti-affinity"], "members": [], "metadata": {}, "project_id": "6f70656e737461636b20342065766572", "user_id": "fake" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-server-groups/v2.13/server-groups-post-req.json0000664000175000017500000000013600000000000027125 0ustar00zuulzuul00000000000000{ "server_group": { "name": "test", "policies": ["anti-affinity"] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-server-groups/v2.13/server-groups-post-resp.json0000664000175000017500000000043000000000000027304 0ustar00zuulzuul00000000000000{ "server_group": { "id": "5bbcc3c4-1da2-4437-a48a-66f15b1b13f9", "name": "test", "policies": ["anti-affinity"], "members": [], "metadata": {}, "project_id": "6f70656e737461636b20342065766572", "user_id": "fake" } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.1056066 nova-32.0.0/doc/api_samples/os-server-groups/v2.64/0000775000175000017500000000000000000000000021645 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-server-groups/v2.64/server-groups-get-resp.json0000664000175000017500000000045100000000000027107 0ustar00zuulzuul00000000000000{ "server_group": { "id": "5bbcc3c4-1da2-4437-a48a-66f15b1b13f9", "name": "test", "policy": "anti-affinity", "rules": {"max_server_per_host": 3}, "members": [], "project_id": "6f70656e737461636b20342065766572", "user_id": "fake" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-server-groups/v2.64/server-groups-list-resp.json0000664000175000017500000000053200000000000027303 0ustar00zuulzuul00000000000000{ "server_groups": [ { "id": "616fb98f-46ca-475e-917e-2563e5a8cd19", "name": "test", "policy": "anti-affinity", "rules": {"max_server_per_host": 3}, "members": [], "project_id": "6f70656e737461636b20342065766572", "user_id": "fake" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-server-groups/v2.64/server-groups-post-req.json0000664000175000017500000000020700000000000027132 0ustar00zuulzuul00000000000000{ "server_group": { "name": "test", "policy": "anti-affinity", "rules": {"max_server_per_host": 3} } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-server-groups/v2.64/server-groups-post-resp.json0000664000175000017500000000045100000000000027315 0ustar00zuulzuul00000000000000{ "server_group": { "id": "5bbcc3c4-1da2-4437-a48a-66f15b1b13f9", "name": "test", "policy": "anti-affinity", "rules": {"max_server_per_host": 3}, "members": [], "project_id": "6f70656e737461636b20342065766572", "user_id": "fake" } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.1056066 nova-32.0.0/doc/api_samples/os-server-password/0000775000175000017500000000000000000000000021411 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-server-password/get-password-resp.json0000664000175000017500000000055600000000000025700 0ustar00zuulzuul00000000000000{ "password": "xlozO3wLCBRWAa2yDjCCVx8vwNPypxnypmRYDa/zErlQ+EzPe1S/Gz6nfmC52mOlOSCRuUOmG7kqqgejPof6M7bOezS387zjq4LSvvwp28zUknzy4YzfFGhnHAdai3TxUJ26pfQCYrq8UTzmKF2Bq8ioSEtVVzM0A96pDh8W2i7BOz6MdoiVyiev/I1K2LsuipfxSJR7Wdke4zNXJjHHP2RfYsVbZ/k9ANu+Nz4iIH8/7Cacud/pphH7EjrY6a4RZNrjQskrhKYed0YERpotyjYk1eDtRe72GrSiXteqCM4biaQ5w3ruS+AcX//PXk3uJ5kC7d67fPXaVz4WaQRYMg==" }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315688.8576047 nova-32.0.0/doc/api_samples/os-server-shares/0000775000175000017500000000000000000000000021034 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.1056066 nova-32.0.0/doc/api_samples/os-server-shares/v2.97/0000775000175000017500000000000000000000000021621 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-server-shares/v2.97/server-shares-admin-create-resp.json0000664000175000017500000000037300000000000030606 0ustar00zuulzuul00000000000000{ "share": { "uuid": "68ba1762-fd6d-4221-8311-f3193dd93404", "share_id": "e8debdc0-447a-4376-a10a-4cd9122d7986", "status": "attaching", "export_location": "10.0.0.50:/mnt/foo", "tag": "e8debdc0-447a-4376-a10a-4cd9122d7986" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-server-shares/v2.97/server-shares-admin-show-resp.json0000664000175000017500000000037200000000000030322 0ustar00zuulzuul00000000000000{ "share": { "uuid": "68ba1762-fd6d-4221-8311-f3193dd93404", "share_id": "e8debdc0-447a-4376-a10a-4cd9122d7986", "status": "inactive", "export_location": "10.0.0.50:/mnt/foo", "tag": "e8debdc0-447a-4376-a10a-4cd9122d7986" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-server-shares/v2.97/server-shares-create-req.json0000664000175000017500000000015700000000000027336 0ustar00zuulzuul00000000000000{ "share": { "share_id": "3cdf5132-64f2-4584-876a-bd296ae7eabd", "tag": "my-share" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-server-shares/v2.97/server-shares-create-resp.json0000664000175000017500000000023200000000000027512 0ustar00zuulzuul00000000000000{ "share": { "share_id": "e8debdc0-447a-4376-a10a-4cd9122d7986", "status": "attaching", "tag": "e8debdc0-447a-4376-a10a-4cd9122d7986" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-server-shares/v2.97/server-shares-create-tag-req.json0000664000175000017500000000015400000000000030104 0ustar00zuulzuul00000000000000{ "share": { "share_id": "e8debdc0-447a-4376-a10a-4cd9122d7986", "tag": "my-tag" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-server-shares/v2.97/server-shares-delete-req.json0000664000175000017500000000012300000000000027326 0ustar00zuulzuul00000000000000{ "share": { "share_id": "e8debdc0-447a-4376-a10a-4cd9122d7986" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-server-shares/v2.97/server-shares-list-resp.json0000664000175000017500000000025400000000000027226 0ustar00zuulzuul00000000000000{ "shares": [ { "share_id": "e8debdc0-447a-4376-a10a-4cd9122d7986", "status": "inactive", "tag": "e8debdc0-447a-4376-a10a-4cd9122d7986" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-server-shares/v2.97/server-shares-show-resp.json0000664000175000017500000000023100000000000027226 0ustar00zuulzuul00000000000000{ "share": { "share_id": "e8debdc0-447a-4376-a10a-4cd9122d7986", "status": "inactive", "tag": "e8debdc0-447a-4376-a10a-4cd9122d7986" } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315688.8576047 nova-32.0.0/doc/api_samples/os-server-tags/0000775000175000017500000000000000000000000020505 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.1056066 nova-32.0.0/doc/api_samples/os-server-tags/v2.26/0000775000175000017500000000000000000000000021262 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-server-tags/v2.26/server-tags-index-resp.json0000664000175000017500000000004100000000000026466 0ustar00zuulzuul00000000000000{ "tags": ["tag1", "tag2"] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-server-tags/v2.26/server-tags-put-all-req.json0000664000175000017500000000004100000000000026553 0ustar00zuulzuul00000000000000{ "tags": ["tag1", "tag2"] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-server-tags/v2.26/server-tags-put-all-resp.json0000664000175000017500000000004100000000000026735 0ustar00zuulzuul00000000000000{ "tags": ["tag1", "tag2"] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-server-tags/v2.26/server-tags-show-details-resp.json0000664000175000017500000000466500000000000030002 0ustar00zuulzuul00000000000000{ "server": { "tags": ["tag1", "tag2"], "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.1.30", "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2012-12-02T02:11:55Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "c949ab4256cea23b6089b710aa2df48bf6577ed915278b62e33ad8bb", "id": "5046e2f2-3b33-4041-b3cf-e085f73e78e7", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/5046e2f2-3b33-4041-b3cf-e085f73e78e7", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/5046e2f2-3b33-4041-b3cf-e085f73e78e7", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2012-12-02T02:11:55Z", "key_name": null, "user_id": "fake", "locked": false, "description": null, "config_drive": "", "OS-DCF:diskConfig": "AUTO", "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "os-extended-volumes:volumes_attached": [], "OS-SRV-USG:launched_at": "2013-09-23T13:37:00.880302", "OS-SRV-USG:terminated_at": null, "security_groups": [ { "name": "default" } ] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-server-tags/v2.26/servers-tags-details-resp.json0000664000175000017500000000534200000000000027200 0ustar00zuulzuul00000000000000{ "servers": [ { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.1.30", "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-09-03T04:01:32Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "bcf92836fc9ed4203a75cb0337afc7f917d2be504164b995c2334b25", "id": "f5dc173b-6804-445a-a6d8-c705dad5b5eb", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-09-03T04:01:32Z", "user_id": "fake", "locked": false, "tags": ["tag1", "tag2"], "description": null, "config_drive": "", "OS-DCF:diskConfig": "AUTO", "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "os-extended-volumes:volumes_attached": [], "OS-SRV-USG:launched_at": "2013-09-23T13:53:12.774549", "OS-SRV-USG:terminated_at": null, "security_groups": [ { "name": "default" } ] } ] } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315688.8576047 nova-32.0.0/doc/api_samples/os-server-topology/0000775000175000017500000000000000000000000021423 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.1056066 nova-32.0.0/doc/api_samples/os-server-topology/v2.78/0000775000175000017500000000000000000000000022207 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-server-topology/v2.78/servers-topology-resp-user.json0000664000175000017500000000104400000000000030367 0ustar00zuulzuul00000000000000{ "nodes": [ { "memory_mb": 1024, "siblings": [ [ 0, 1 ] ], "vcpu_set": [ 0, 1 ] }, { "memory_mb": 2048, "siblings": [ [ 2, 3 ] ], "vcpu_set": [ 2, 3 ] } ], "pagesize_kb": 4 } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-server-topology/v2.78/servers-topology-resp.json0000664000175000017500000000142200000000000027413 0ustar00zuulzuul00000000000000{ "nodes": [ { "cpu_pinning": { "0": 0, "1": 5 }, "host_node": 0, "memory_mb": 1024, "siblings": [ [ 0, 1 ] ], "vcpu_set": [ 0, 1 ] }, { "cpu_pinning": { "2": 1, "3": 8 }, "host_node": 1, "memory_mb": 2048, "siblings": [ [ 2, 3 ] ], "vcpu_set": [ 2, 3 ] } ], "pagesize_kb": 4 } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.1096067 nova-32.0.0/doc/api_samples/os-services/0000775000175000017500000000000000000000000020066 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-services/service-disable-log-put-req.json0000664000175000017500000000012500000000000026172 0ustar00zuulzuul00000000000000{ "host": "host1", "binary": "nova-compute", "disabled_reason": "test2" }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-services/service-disable-log-put-resp.json0000664000175000017500000000022600000000000026356 0ustar00zuulzuul00000000000000{ "service": { "binary": "nova-compute", "disabled_reason": "test2", "host": "host1", "status": "disabled" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-services/service-disable-put-req.json0000664000175000017500000000006500000000000025416 0ustar00zuulzuul00000000000000{ "host": "host1", "binary": "nova-compute" }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-services/service-disable-put-resp.json0000664000175000017500000000016200000000000025576 0ustar00zuulzuul00000000000000{ "service": { "binary": "nova-compute", "host": "host1", "status": "disabled" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-services/service-enable-put-req.json0000664000175000017500000000006500000000000025241 0ustar00zuulzuul00000000000000{ "host": "host1", "binary": "nova-compute" }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-services/service-enable-put-resp.json0000664000175000017500000000016100000000000025420 0ustar00zuulzuul00000000000000{ "service": { "binary": "nova-compute", "host": "host1", "status": "enabled" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-services/services-list-get-resp.json0000664000175000017500000000227300000000000025305 0ustar00zuulzuul00000000000000{ "services": [ { "id": 1, "binary": "nova-scheduler", "disabled_reason": "test1", "host": "host1", "state": "up", "status": "disabled", "updated_at": "2012-10-29T13:42:02.000000", "zone": "internal" }, { "id": 2, "binary": "nova-compute", "disabled_reason": "test2", "host": "host1", "state": "up", "status": "disabled", "updated_at": "2012-10-29T13:42:05.000000", "zone": "nova" }, { "id": 3, "binary": "nova-scheduler", "disabled_reason": null, "host": "host2", "state": "down", "status": "enabled", "updated_at": "2012-09-19T06:55:34.000000", "zone": "internal" }, { "id": 4, "binary": "nova-compute", "disabled_reason": "test4", "host": "host2", "state": "down", "status": "disabled", "updated_at": "2012-09-18T08:03:38.000000", "zone": "nova" } ] } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.1096067 nova-32.0.0/doc/api_samples/os-services/v2.11/0000775000175000017500000000000000000000000020635 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-services/v2.11/service-disable-log-put-req.json0000664000175000017500000000012500000000000026741 0ustar00zuulzuul00000000000000{ "host": "host1", "binary": "nova-compute", "disabled_reason": "test2" }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-services/v2.11/service-disable-log-put-resp.json0000664000175000017500000000022600000000000027125 0ustar00zuulzuul00000000000000{ "service": { "binary": "nova-compute", "disabled_reason": "test2", "host": "host1", "status": "disabled" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-services/v2.11/service-disable-put-req.json0000664000175000017500000000006500000000000026165 0ustar00zuulzuul00000000000000{ "host": "host1", "binary": "nova-compute" }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-services/v2.11/service-disable-put-resp.json0000664000175000017500000000016200000000000026345 0ustar00zuulzuul00000000000000{ "service": { "binary": "nova-compute", "host": "host1", "status": "disabled" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-services/v2.11/service-enable-put-req.json0000664000175000017500000000006500000000000026010 0ustar00zuulzuul00000000000000{ "host": "host1", "binary": "nova-compute" }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-services/v2.11/service-enable-put-resp.json0000664000175000017500000000016100000000000026167 0ustar00zuulzuul00000000000000{ "service": { "binary": "nova-compute", "host": "host1", "status": "enabled" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-services/v2.11/service-force-down-put-req.json0000664000175000017500000000011700000000000026623 0ustar00zuulzuul00000000000000{ "host": "host1", "binary": "nova-compute", "forced_down": true } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-services/v2.11/service-force-down-put-resp.json0000664000175000017500000000016200000000000027005 0ustar00zuulzuul00000000000000{ "service": { "binary": "nova-compute", "host": "host1", "forced_down": true } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-services/v2.11/services-list-get-resp.json0000664000175000017500000000250300000000000026050 0ustar00zuulzuul00000000000000{ "services": [ { "id": 1, "binary": "nova-scheduler", "disabled_reason": "test1", "host": "host1", "state": "up", "status": "disabled", "updated_at": "2012-10-29T13:42:02.000000", "forced_down": false, "zone": "internal" }, { "id": 2, "binary": "nova-compute", "disabled_reason": "test2", "host": "host1", "state": "up", "status": "disabled", "updated_at": "2012-10-29T13:42:05.000000", "forced_down": false, "zone": "nova" }, { "id": 3, "binary": "nova-scheduler", "disabled_reason": null, "host": "host2", "state": "down", "status": "enabled", "updated_at": "2012-09-19T06:55:34.000000", "forced_down": false, "zone": "internal" }, { "id": 4, "binary": "nova-compute", "disabled_reason": "test4", "host": "host2", "state": "down", "status": "disabled", "updated_at": "2012-09-18T08:03:38.000000", "forced_down": false, "zone": "nova" } ] } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.1136067 nova-32.0.0/doc/api_samples/os-services/v2.53/0000775000175000017500000000000000000000000020643 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-services/v2.53/service-disable-log-put-req.json0000664000175000017500000000010200000000000026742 0ustar00zuulzuul00000000000000{ "status": "disabled", "disabled_reason": "maintenance" }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-services/v2.53/service-disable-log-put-resp.json0000664000175000017500000000052300000000000027133 0ustar00zuulzuul00000000000000{ "service": { "id": "e81d66a4-ddd3-4aba-8a84-171d1cb4d339", "binary": "nova-compute", "disabled_reason": "maintenance", "host": "host1", "state": "up", "status": "disabled", "updated_at": "2012-10-29T13:42:05.000000", "forced_down": false, "zone": "nova" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-services/v2.53/service-disable-put-req.json0000664000175000017500000000003400000000000026167 0ustar00zuulzuul00000000000000{ "status": "disabled" }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-services/v2.53/service-disable-put-resp.json0000664000175000017500000000051200000000000026352 0ustar00zuulzuul00000000000000{ "service": { "id": "e81d66a4-ddd3-4aba-8a84-171d1cb4d339", "binary": "nova-compute", "disabled_reason": null, "host": "host1", "state": "up", "status": "disabled", "updated_at": "2012-10-29T13:42:05.000000", "forced_down": false, "zone": "nova" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-services/v2.53/service-enable-put-req.json0000664000175000017500000000003300000000000026011 0ustar00zuulzuul00000000000000{ "status": "enabled" }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-services/v2.53/service-enable-put-resp.json0000664000175000017500000000051100000000000026174 0ustar00zuulzuul00000000000000{ "service": { "id": "e81d66a4-ddd3-4aba-8a84-171d1cb4d339", "binary": "nova-compute", "disabled_reason": null, "host": "host1", "state": "up", "status": "enabled", "updated_at": "2012-10-29T13:42:05.000000", "forced_down": false, "zone": "nova" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-services/v2.53/service-force-down-put-req.json0000664000175000017500000000003300000000000026626 0ustar00zuulzuul00000000000000{ "forced_down": true }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-services/v2.53/service-force-down-put-resp.json0000664000175000017500000000051600000000000027016 0ustar00zuulzuul00000000000000{ "service": { "id": "e81d66a4-ddd3-4aba-8a84-171d1cb4d339", "binary": "nova-compute", "disabled_reason": "test2", "host": "host1", "state": "down", "status": "disabled", "updated_at": "2012-10-29T13:42:05.000000", "forced_down": true, "zone": "nova" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-services/v2.53/services-list-get-resp.json0000664000175000017500000000272700000000000026066 0ustar00zuulzuul00000000000000{ "services": [ { "id": "c4726392-27de-4ff9-b2e0-5aa1d08a520f", "binary": "nova-scheduler", "disabled_reason": "test1", "host": "host1", "state": "up", "status": "disabled", "updated_at": "2012-10-29T13:42:02.000000", "forced_down": false, "zone": "internal" }, { "id": "e81d66a4-ddd3-4aba-8a84-171d1cb4d339", "binary": "nova-compute", "disabled_reason": "test2", "host": "host1", "state": "up", "status": "disabled", "updated_at": "2012-10-29T13:42:05.000000", "forced_down": false, "zone": "nova" }, { "id": "bbd684ff-d3f6-492e-a30a-a12a2d2db0e0", "binary": "nova-scheduler", "disabled_reason": null, "host": "host2", "state": "down", "status": "enabled", "updated_at": "2012-09-19T06:55:34.000000", "forced_down": false, "zone": "internal" }, { "id": "13aa304e-5340-45a7-a7fb-b6d6e914d272", "binary": "nova-compute", "disabled_reason": "test4", "host": "host2", "state": "down", "status": "disabled", "updated_at": "2012-09-18T08:03:38.000000", "forced_down": false, "zone": "nova" } ] } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.1136067 nova-32.0.0/doc/api_samples/os-services/v2.69/0000775000175000017500000000000000000000000020652 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-services/v2.69/services-list-get-resp.json0000664000175000017500000000041300000000000026063 0ustar00zuulzuul00000000000000{ "services": [ { "binary": "nova-compute", "host": "host1", "status": "UNKNOWN" }, { "binary": "nova-compute", "host": "host2", "status": "UNKNOWN" } ] } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.1136067 nova-32.0.0/doc/api_samples/os-shelve/0000775000175000017500000000000000000000000017531 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-shelve/os-shelve-offload.json0000664000175000017500000000003600000000000023740 0ustar00zuulzuul00000000000000{ "shelveOffload": null } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-shelve/os-shelve.json0000664000175000017500000000002600000000000022327 0ustar00zuulzuul00000000000000{ "shelve": null }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-shelve/os-unshelve.json0000664000175000017500000000003100000000000022666 0ustar00zuulzuul00000000000000{ "unshelve": null } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.1136067 nova-32.0.0/doc/api_samples/os-shelve/v2.77/0000775000175000017500000000000000000000000020314 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-shelve/v2.77/os-shelve.json0000664000175000017500000000002600000000000023112 0ustar00zuulzuul00000000000000{ "shelve": null }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-shelve/v2.77/os-unshelve-az.json0000664000175000017500000000010200000000000024060 0ustar00zuulzuul00000000000000{ "unshelve": { "availability_zone": "us-west" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-shelve/v2.77/os-unshelve.json0000664000175000017500000000003000000000000023450 0ustar00zuulzuul00000000000000{ "unshelve": null }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.1176066 nova-32.0.0/doc/api_samples/os-shelve/v2.91/0000775000175000017500000000000000000000000020310 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-shelve/v2.91/os-shelve.json0000664000175000017500000000002600000000000023106 0ustar00zuulzuul00000000000000{ "shelve": null }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-shelve/v2.91/os-unshelve-az-host.json0000664000175000017500000000013200000000000025032 0ustar00zuulzuul00000000000000{ "unshelve": { "availability_zone": "nova", "host": "host01" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-shelve/v2.91/os-unshelve-az.json0000664000175000017500000000010200000000000024054 0ustar00zuulzuul00000000000000{ "unshelve": { "availability_zone": "us-west" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-shelve/v2.91/os-unshelve-host-and-unpin-az.json0000664000175000017500000000013000000000000026717 0ustar00zuulzuul00000000000000{ "unshelve": { "availability_zone": null, "host": "host01" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-shelve/v2.91/os-unshelve-host.json0000664000175000017500000000006500000000000024427 0ustar00zuulzuul00000000000000{ "unshelve": { "host": "host01" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-shelve/v2.91/os-unshelve-unpin-az.json0000664000175000017500000000007600000000000025215 0ustar00zuulzuul00000000000000{ "unshelve": { "availability_zone": null } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-shelve/v2.91/os-unshelve.json0000664000175000017500000000003000000000000023444 0ustar00zuulzuul00000000000000{ "unshelve": null }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.1176066 nova-32.0.0/doc/api_samples/os-simple-tenant-usage/0000775000175000017500000000000000000000000022125 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-simple-tenant-usage/simple-tenant-usage-get-detail.json0000664000175000017500000000174600000000000030727 0ustar00zuulzuul00000000000000{ "tenant_usages": [ { "start": "2012-10-08T20:10:44.587336", "stop": "2012-10-08T21:10:44.587336", "tenant_id": "6f70656e737461636b20342065766572", "total_hours": 1.0, "total_local_gb_usage": 1.0, "total_memory_mb_usage": 512.0, "total_vcpus_usage": 1.0, "server_usages": [ { "ended_at": null, "flavor": "m1.tiny", "hours": 1.0, "instance_id": "1f1deceb-17b5-4c04-84c7-e0d4499c8fe0", "local_gb": 1, "memory_mb": 512, "name": "new-server-test", "started_at": "2012-10-08T20:10:44.541277", "state": "active", "tenant_id": "6f70656e737461636b20342065766572", "uptime": 3600, "vcpus": 1 } ] } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-simple-tenant-usage/simple-tenant-usage-get-specific.json0000664000175000017500000000156500000000000031251 0ustar00zuulzuul00000000000000{ "tenant_usage": { "server_usages": [ { "ended_at": null, "flavor": "m1.tiny", "hours": 1.0, "instance_id": "1f1deceb-17b5-4c04-84c7-e0d4499c8fe0", "local_gb": 1, "memory_mb": 512, "name": "new-server-test", "started_at": "2012-10-08T20:10:44.541277", "state": "active", "tenant_id": "6f70656e737461636b20342065766572", "uptime": 3600, "vcpus": 1 } ], "start": "2012-10-08T20:10:44.587336", "stop": "2012-10-08T21:10:44.587336", "tenant_id": "6f70656e737461636b20342065766572", "total_hours": 1.0, "total_local_gb_usage": 1.0, "total_memory_mb_usage": 512.0, "total_vcpus_usage": 1.0 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-simple-tenant-usage/simple-tenant-usage-get.json0000664000175000017500000000056100000000000027461 0ustar00zuulzuul00000000000000{ "tenant_usages": [ { "start": "2012-10-08T21:10:44.587336", "stop": "2012-10-08T22:10:44.587336", "tenant_id": "6f70656e737461636b20342065766572", "total_hours": 1.0, "total_local_gb_usage": 1.0, "total_memory_mb_usage": 512.0, "total_vcpus_usage": 1.0 } ] } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.1176066 nova-32.0.0/doc/api_samples/os-simple-tenant-usage/v2.40/0000775000175000017500000000000000000000000022676 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-simple-tenant-usage/v2.40/simple-tenant-usage-get-all.json0000664000175000017500000000474500000000000031010 0ustar00zuulzuul00000000000000{ "tenant_usages": [ { "server_usages": [ { "ended_at": null, "flavor": "m1.tiny", "hours": 1.0, "instance_id": "1f1deceb-17b5-4c04-84c7-e0d4499c8f06", "local_gb": 1, "memory_mb": 512, "name": "instance-3", "started_at": "2018-10-09T11:29:04.166194", "state": "active", "tenant_id": "0000000e737461636b20342065000000", "uptime": 3600, "vcpus": 1 } ], "start": "2018-10-09T11:29:04.166194", "stop": "2018-10-09T12:29:04.166194", "tenant_id": "0000000e737461636b20342065000000", "total_hours": 1.0, "total_local_gb_usage": 1.0, "total_memory_mb_usage": 512.0, "total_vcpus_usage": 1.0 }, { "server_usages": [ { "ended_at": null, "flavor": "m1.tiny", "hours": 1.0, "instance_id": "1f1deceb-17b5-4c04-84c7-e0d4499c8f00", "local_gb": 1, "memory_mb": 512, "name": "instance-1", "started_at": "2018-10-09T11:29:04.166194", "state": "active", "tenant_id": "6f70656e737461636b20342065766572", "uptime": 3600, "vcpus": 1 }, { "ended_at": null, "flavor": "m1.tiny", "hours": 1.0, "instance_id": "1f1deceb-17b5-4c04-84c7-e0d4499c8f03", "local_gb": 1, "memory_mb": 512, "name": "instance-2", "started_at": "2018-10-09T11:29:04.166194", "state": "active", "tenant_id": "6f70656e737461636b20342065766572", "uptime": 3600, "vcpus": 1 } ], "start": "2018-10-09T11:29:04.166194", "stop": "2018-10-09T12:29:04.166194", "tenant_id": "6f70656e737461636b20342065766572", "total_hours": 2.0, "total_local_gb_usage": 2.0, "total_memory_mb_usage": 1024.0, "total_vcpus_usage": 2.0 } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-simple-tenant-usage/v2.40/simple-tenant-usage-get-detail.json0000664000175000017500000000245300000000000031474 0ustar00zuulzuul00000000000000{ "tenant_usages": [ { "start": "2012-10-08T20:10:44.587336", "stop": "2012-10-08T21:10:44.587336", "tenant_id": "6f70656e737461636b20342065766572", "total_hours": 1.0, "total_local_gb_usage": 1.0, "total_memory_mb_usage": 512.0, "total_vcpus_usage": 1.0, "server_usages": [ { "ended_at": null, "flavor": "m1.tiny", "hours": 1.0, "instance_id": "1f1deceb-17b5-4c04-84c7-e0d4499c8fe0", "local_gb": 1, "memory_mb": 512, "name": "instance-2", "started_at": "2012-10-08T20:10:44.541277", "state": "active", "tenant_id": "6f70656e737461636b20342065766572", "uptime": 3600, "vcpus": 1 } ] } ], "tenant_usages_links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/os-simple-tenant-usage?detailed=1&end=2016-10-12+18%3A22%3A04.868106&limit=1&marker=1f1deceb-17b5-4c04-84c7-e0d4499c8fe0&start=2016-10-12+18%3A22%3A04.868106", "rel": "next" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-simple-tenant-usage/v2.40/simple-tenant-usage-get-specific.json0000664000175000017500000000231700000000000032016 0ustar00zuulzuul00000000000000{ "tenant_usage": { "server_usages": [ { "ended_at": null, "flavor": "m1.tiny", "hours": 1.0, "instance_id": "1f1deceb-17b5-4c04-84c7-e0d4499c8fe0", "local_gb": 1, "memory_mb": 512, "name": "instance-2", "started_at": "2012-10-08T20:10:44.541277", "state": "active", "tenant_id": "6f70656e737461636b20342065766572", "uptime": 3600, "vcpus": 1 } ], "start": "2012-10-08T20:10:44.587336", "stop": "2012-10-08T21:10:44.587336", "tenant_id": "6f70656e737461636b20342065766572", "total_hours": 1.0, "total_local_gb_usage": 1.0, "total_memory_mb_usage": 512.0, "total_vcpus_usage": 1.0 }, "tenant_usage_links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/os-simple-tenant-usage/6f70656e737461636b20342065766572?end=2016-10-12+18%3A22%3A04.868106&limit=1&marker=1f1deceb-17b5-4c04-84c7-e0d4499c8fe0&start=2016-10-12+18%3A22%3A04.868106", "rel": "next" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-simple-tenant-usage/v2.40/simple-tenant-usage-get.json0000664000175000017500000000126000000000000030227 0ustar00zuulzuul00000000000000{ "tenant_usages": [ { "start": "2012-10-08T21:10:44.587336", "stop": "2012-10-08T22:10:44.587336", "tenant_id": "6f70656e737461636b20342065766572", "total_hours": 1.0, "total_local_gb_usage": 1.0, "total_memory_mb_usage": 512.0, "total_vcpus_usage": 1.0 } ], "tenant_usages_links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/os-simple-tenant-usage?end=2016-10-12+18%3A22%3A04.868106&limit=1&marker=1f1deceb-17b5-4c04-84c7-e0d4499c8fe0&start=2016-10-12+18%3A22%3A04.868106", "rel": "next" } ] } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.1176066 nova-32.0.0/doc/api_samples/os-snapshots/0000775000175000017500000000000000000000000020265 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-snapshots/snapshot-create-req.json0000664000175000017500000000030300000000000025041 0ustar00zuulzuul00000000000000{ "snapshot": { "display_name": "snap-001", "display_description": "Daily backup", "volume_id": "521752a6-acf6-4b2d-bc7a-119f9148cd8c", "force": false } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-snapshots/snapshot-create-resp.json0000664000175000017500000000050400000000000025226 0ustar00zuulzuul00000000000000{ "snapshot": { "createdAt": "2025-06-12T14:56:30.215430", "displayDescription": "Daily backup", "displayName": "snap-001", "id": "fe06e9f2-e6b0-47d1-a63e-c0a15ad51994", "size": 100, "status": "available", "volumeId": "521752a6-acf6-4b2d-bc7a-119f9148cd8c" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-snapshots/snapshots-detail-resp.json0000664000175000017500000000211200000000000025405 0ustar00zuulzuul00000000000000{ "snapshots": [ { "createdAt": "2025-06-12T14:56:30.070910", "displayDescription": "Default description", "displayName": "Default name", "id": "02d9627d-9e4d-42d7-aff2-3b23a0caf990", "size": 100, "status": "available", "volumeId": "a41718ce-4f74-46cf-acb4-6ebfc435ad6e" }, { "createdAt": "2025-06-12T14:56:30.070925", "displayDescription": "Default description", "displayName": "Default name", "id": "9780ef6b-6820-478b-8d01-c972e683a8aa", "size": 100, "status": "available", "volumeId": "a41718ce-4f74-46cf-acb4-6ebfc435ad6e" }, { "createdAt": "2025-06-12T14:56:30.070934", "displayDescription": "Default description", "displayName": "Default name", "id": "29b0f3f1-6cd7-4c65-b431-882cf54f158c", "size": 100, "status": "available", "volumeId": "a41718ce-4f74-46cf-acb4-6ebfc435ad6e" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-snapshots/snapshots-list-resp.json0000664000175000017500000000211200000000000025116 0ustar00zuulzuul00000000000000{ "snapshots": [ { "createdAt": "2025-06-12T14:56:28.865898", "displayDescription": "Default description", "displayName": "Default name", "id": "f3ddb3ee-7fed-4237-ac25-1d4d8c0fb127", "size": 100, "status": "available", "volumeId": "4fe5b5eb-694e-4ead-ba98-ecffc6ff1d21" }, { "createdAt": "2025-06-12T14:56:28.865915", "displayDescription": "Default description", "displayName": "Default name", "id": "d018c6d5-0a75-4001-aef2-9f9df82feb6e", "size": 100, "status": "available", "volumeId": "4fe5b5eb-694e-4ead-ba98-ecffc6ff1d21" }, { "createdAt": "2025-06-12T14:56:28.865925", "displayDescription": "Default description", "displayName": "Default name", "id": "df0a2535-bab9-4bec-b03b-656f741b1c45", "size": 100, "status": "available", "volumeId": "4fe5b5eb-694e-4ead-ba98-ecffc6ff1d21" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-snapshots/snapshots-show-resp.json0000664000175000017500000000051700000000000025132 0ustar00zuulzuul00000000000000{ "snapshot": { "createdAt": "2025-06-12T14:56:28.922532", "displayDescription": "Default description", "displayName": "Default name", "id": "ec675a13-5be8-4077-8381-9d70a12f77fb", "size": 100, "status": "available", "volumeId": "d7bccf30-48a2-4cfc-a9c0-71c5ee144f61" } }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.1176066 nova-32.0.0/doc/api_samples/os-suspend-server/0000775000175000017500000000000000000000000021230 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-suspend-server/server-resume.json0000664000175000017500000000002600000000000024725 0ustar00zuulzuul00000000000000{ "resume": null }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-suspend-server/server-suspend.json0000664000175000017500000000002700000000000025107 0ustar00zuulzuul00000000000000{ "suspend": null }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.1216068 nova-32.0.0/doc/api_samples/os-tenant-networks/0000775000175000017500000000000000000000000021406 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-tenant-networks/networks-list-res.json0000664000175000017500000000024500000000000025716 0ustar00zuulzuul00000000000000{ "networks": [ { "cidr": "None", "id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6", "label": "private" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-tenant-networks/networks-post-req.json0000664000175000017500000000024500000000000025726 0ustar00zuulzuul00000000000000{ "network": { "label": "public", "cidr": "172.0.0.0/24", "vlan_start": 1, "num_networks": 1, "network_size": 255 } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-tenant-networks/networks-post-res.json0000664000175000017500000000021300000000000025723 0ustar00zuulzuul00000000000000{ "network": { "cidr": "172.0.0.0/24", "id": "5bbcc3c4-1da2-4437-a48a-66f15b1b13f9", "label": "public" } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.1216068 nova-32.0.0/doc/api_samples/os-virtual-interfaces/0000775000175000017500000000000000000000000022052 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.1216068 nova-32.0.0/doc/api_samples/os-virtual-interfaces/v2.12/0000775000175000017500000000000000000000000022622 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-virtual-interfaces/v2.12/vifs-list-resp.json0000664000175000017500000000034100000000000026402 0ustar00zuulzuul00000000000000{ "virtual_interfaces": [ { "id": "cec8b9bb-5d22-4104-b3c8-4c35db3210a6", "mac_address": "fa:16:3e:3c:ce:6f", "net_id": "cec8b9bb-5d22-4104-b3c8-4c35db3210a7" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-virtual-interfaces/vifs-list-resp-v2.json0000664000175000017500000000036000000000000026160 0ustar00zuulzuul00000000000000{ "virtual_interfaces": [ { "id": "cec8b9bb-5d22-4104-b3c8-4c35db3210a6", "mac_address": "fa:16:3e:3c:ce:6f", "OS-EXT-VIF-NET:net_id": "cec8b9bb-5d22-4104-b3c8-4c35db3210a7" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-virtual-interfaces/vifs-list-resp.json0000664000175000017500000000024200000000000025632 0ustar00zuulzuul00000000000000{ "virtual_interfaces": [ { "id": "cec8b9bb-5d22-4104-b3c8-4c35db3210a6", "mac_address": "fa:16:3e:3c:ce:6f" } ] }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.1216068 nova-32.0.0/doc/api_samples/os-volume_attachments/0000775000175000017500000000000000000000000022145 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-volume_attachments/attach-volume-to-server-req.json0000664000175000017500000000017400000000000030324 0ustar00zuulzuul00000000000000{ "volumeAttachment": { "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", "device": "/dev/sdb" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-volume_attachments/attach-volume-to-server-resp.json0000664000175000017500000000035600000000000030510 0ustar00zuulzuul00000000000000{ "volumeAttachment": { "device": "/dev/sdb", "id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", "serverId": "802db873-0373-4bdd-a433-d272a539ba18", "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-volume_attachments/list-volume-attachments-resp.json0000664000175000017500000000100300000000000030572 0ustar00zuulzuul00000000000000{ "volumeAttachments": [ { "device": "/dev/sdc", "id": "227cc671-f30b-4488-96fd-7d0bf13648d8", "serverId": "4b293d31-ebd5-4a7f-be03-874b90021e54", "volumeId": "227cc671-f30b-4488-96fd-7d0bf13648d8" }, { "device": "/dev/sdb", "id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", "serverId": "4b293d31-ebd5-4a7f-be03-874b90021e54", "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-volume_attachments/update-volume-req.json0000664000175000017500000000013600000000000026414 0ustar00zuulzuul00000000000000{ "volumeAttachment": { "volumeId": "227cc671-f30b-4488-96fd-7d0bf13648d8" } }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.1216068 nova-32.0.0/doc/api_samples/os-volume_attachments/v2.49/0000775000175000017500000000000000000000000022727 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-volume_attachments/v2.49/attach-volume-to-server-req.json0000664000175000017500000000016400000000000031105 0ustar00zuulzuul00000000000000{ "volumeAttachment": { "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", "tag": "foo" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-volume_attachments/v2.49/attach-volume-to-server-resp.json0000664000175000017500000000035600000000000031272 0ustar00zuulzuul00000000000000{ "volumeAttachment": { "device": "/dev/sdb", "id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", "serverId": "69d19439-fa5f-4d6e-8b78-1868e7eb93a5", "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-volume_attachments/v2.49/list-volume-attachments-resp.json0000664000175000017500000000100300000000000031354 0ustar00zuulzuul00000000000000{ "volumeAttachments": [ { "device": "/dev/sdc", "id": "227cc671-f30b-4488-96fd-7d0bf13648d8", "serverId": "1453a6a8-10ec-4797-9b9e-da3c703579d5", "volumeId": "227cc671-f30b-4488-96fd-7d0bf13648d8" }, { "device": "/dev/sdb", "id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", "serverId": "1453a6a8-10ec-4797-9b9e-da3c703579d5", "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-volume_attachments/v2.49/update-volume-req.json0000664000175000017500000000013600000000000027176 0ustar00zuulzuul00000000000000{ "volumeAttachment": { "volumeId": "227cc671-f30b-4488-96fd-7d0bf13648d8" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-volume_attachments/v2.49/volume-attachment-detail-resp.json0000664000175000017500000000035600000000000031472 0ustar00zuulzuul00000000000000{ "volumeAttachment": { "device": "/dev/sdb", "id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", "serverId": "9ad0352c-48ff-4290-9db8-3385a676f035", "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" } }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.1256068 nova-32.0.0/doc/api_samples/os-volume_attachments/v2.70/0000775000175000017500000000000000000000000022721 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-volume_attachments/v2.70/attach-volume-to-server-req.json0000664000175000017500000000016400000000000031077 0ustar00zuulzuul00000000000000{ "volumeAttachment": { "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", "tag": "foo" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-volume_attachments/v2.70/attach-volume-to-server-resp.json0000664000175000017500000000040400000000000031256 0ustar00zuulzuul00000000000000{ "volumeAttachment": { "device": "/dev/sdb", "id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", "serverId": "70f5c62a-972d-4a8b-abcf-e1375ca7f8c0", "tag": "foo", "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-volume_attachments/v2.70/list-volume-attachments-resp.json0000664000175000017500000000106600000000000031357 0ustar00zuulzuul00000000000000{ "volumeAttachments": [ { "device": "/dev/sdc", "id": "227cc671-f30b-4488-96fd-7d0bf13648d8", "serverId": "68426b0f-511b-4cb3-8169-bba2e7a8bc89", "tag": null, "volumeId": "227cc671-f30b-4488-96fd-7d0bf13648d8" }, { "device": "/dev/sdb", "id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", "serverId": "68426b0f-511b-4cb3-8169-bba2e7a8bc89", "tag": "foo", "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-volume_attachments/v2.70/update-volume-req.json0000664000175000017500000000013600000000000027170 0ustar00zuulzuul00000000000000{ "volumeAttachment": { "volumeId": "227cc671-f30b-4488-96fd-7d0bf13648d8" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-volume_attachments/v2.70/volume-attachment-detail-resp.json0000664000175000017500000000040400000000000031456 0ustar00zuulzuul00000000000000{ "volumeAttachment": { "device": "/dev/sdb", "id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", "serverId": "d989feee-002d-40f6-b47d-f0dbee48bbc1", "tag": "foo", "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" } }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.1256068 nova-32.0.0/doc/api_samples/os-volume_attachments/v2.79/0000775000175000017500000000000000000000000022732 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-volume_attachments/v2.79/attach-volume-to-server-req.json0000664000175000017500000000023300000000000031105 0ustar00zuulzuul00000000000000{ "volumeAttachment": { "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", "tag": "foo", "delete_on_termination": true } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-volume_attachments/v2.79/attach-volume-to-server-resp.json0000664000175000017500000000045300000000000031273 0ustar00zuulzuul00000000000000{ "volumeAttachment": { "delete_on_termination": true, "device": "/dev/sdb", "id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", "serverId": "09b3b9d1-b8c5-48e1-841d-62c3ef967a88", "tag": "foo", "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-volume_attachments/v2.79/list-volume-attachments-resp.json0000664000175000017500000000121500000000000031364 0ustar00zuulzuul00000000000000{ "volumeAttachments": [ { "delete_on_termination": false, "device": "/dev/sdc", "id": "227cc671-f30b-4488-96fd-7d0bf13648d8", "serverId": "d5e4ae35-ac0e-4311-a8c5-0ee863e951d9", "tag": null, "volumeId": "227cc671-f30b-4488-96fd-7d0bf13648d8" }, { "delete_on_termination": true, "device": "/dev/sdb", "id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", "serverId": "d5e4ae35-ac0e-4311-a8c5-0ee863e951d9", "tag": "foo", "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-volume_attachments/v2.79/update-volume-req.json0000664000175000017500000000013600000000000027201 0ustar00zuulzuul00000000000000{ "volumeAttachment": { "volumeId": "227cc671-f30b-4488-96fd-7d0bf13648d8" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-volume_attachments/v2.79/volume-attachment-detail-resp.json0000664000175000017500000000045300000000000031473 0ustar00zuulzuul00000000000000{ "volumeAttachment": { "delete_on_termination": true, "device": "/dev/sdb", "id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", "serverId": "2aad99d3-7aa4-41e9-b4e6-3f960b115d68", "tag": "foo", "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" } }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.1256068 nova-32.0.0/doc/api_samples/os-volume_attachments/v2.85/0000775000175000017500000000000000000000000022727 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-volume_attachments/v2.85/attach-volume-to-server-req.json0000664000175000017500000000023300000000000031102 0ustar00zuulzuul00000000000000{ "volumeAttachment": { "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", "tag": "foo", "delete_on_termination": true } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-volume_attachments/v2.85/attach-volume-to-server-resp.json0000664000175000017500000000045300000000000031270 0ustar00zuulzuul00000000000000{ "volumeAttachment": { "delete_on_termination": true, "device": "/dev/sdb", "id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", "serverId": "09b3b9d1-b8c5-48e1-841d-62c3ef967a88", "tag": "foo", "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-volume_attachments/v2.85/list-volume-attachments-resp.json0000664000175000017500000000121500000000000031361 0ustar00zuulzuul00000000000000{ "volumeAttachments": [ { "delete_on_termination": false, "device": "/dev/sdc", "id": "227cc671-f30b-4488-96fd-7d0bf13648d8", "serverId": "d5e4ae35-ac0e-4311-a8c5-0ee863e951d9", "tag": null, "volumeId": "227cc671-f30b-4488-96fd-7d0bf13648d8" }, { "delete_on_termination": true, "device": "/dev/sdb", "id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", "serverId": "d5e4ae35-ac0e-4311-a8c5-0ee863e951d9", "tag": "foo", "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" } ] }././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=nova-32.0.0/doc/api_samples/os-volume_attachments/v2.85/update-volume-attachment-delete-flag-req.json 22 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-volume_attachments/v2.85/update-volume-attachment-delete-flag-req.jso0000664000175000017500000000020600000000000033313 0ustar00zuulzuul00000000000000{ "volumeAttachment": { "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", "delete_on_termination": true } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-volume_attachments/v2.85/update-volume-req.json0000664000175000017500000000013600000000000027176 0ustar00zuulzuul00000000000000{ "volumeAttachment": { "volumeId": "227cc671-f30b-4488-96fd-7d0bf13648d8" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-volume_attachments/v2.85/volume-attachment-detail-resp.json0000664000175000017500000000045300000000000031470 0ustar00zuulzuul00000000000000{ "volumeAttachment": { "delete_on_termination": true, "device": "/dev/sdb", "id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", "serverId": "2aad99d3-7aa4-41e9-b4e6-3f960b115d68", "tag": "foo", "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" } }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.1256068 nova-32.0.0/doc/api_samples/os-volume_attachments/v2.89/0000775000175000017500000000000000000000000022733 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-volume_attachments/v2.89/attach-volume-to-server-req.json0000664000175000017500000000023300000000000031106 0ustar00zuulzuul00000000000000{ "volumeAttachment": { "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", "tag": "foo", "delete_on_termination": true } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-volume_attachments/v2.89/attach-volume-to-server-resp.json0000664000175000017500000000045300000000000031274 0ustar00zuulzuul00000000000000{ "volumeAttachment": { "delete_on_termination": true, "device": "/dev/sdb", "id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", "serverId": "7ebed2ce-85b3-40b5-84ae-8cc725c37ed2", "tag": "foo", "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-volume_attachments/v2.89/list-volume-attachments-resp.json0000664000175000017500000000144400000000000031371 0ustar00zuulzuul00000000000000{ "volumeAttachments": [ { "attachment_id": "979ce4f8-033a-409d-85e6-6b5c0f6a6302", "delete_on_termination": false, "device": "/dev/sdc", "serverId": "7696780b-3f53-4688-ab25-019bfcbbd806", "tag": null, "volumeId": "227cc671-f30b-4488-96fd-7d0bf13648d8", "bdm_uuid": "c088db45-92b8-49e8-81e2-a1b77a144b3b" }, { "attachment_id": "c5684109-0311-4fca-9814-350e46ab7d2a", "delete_on_termination": true, "device": "/dev/sdb", "serverId": "7696780b-3f53-4688-ab25-019bfcbbd806", "tag": "foo", "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", "bdm_uuid": "1aa24536-6fb5-426c-8894-d627f39aa48b" } ] } ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=nova-32.0.0/doc/api_samples/os-volume_attachments/v2.89/update-volume-attachment-delete-flag-req.json 22 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-volume_attachments/v2.89/update-volume-attachment-delete-flag-req.jso0000664000175000017500000000045300000000000033323 0ustar00zuulzuul00000000000000{ "volumeAttachment": { "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", "id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", "serverId": "fddf0901-8caf-42c9-b496-133c570b171b", "device": "/dev/sdb", "tag": "foo", "delete_on_termination": true } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-volume_attachments/v2.89/volume-attachment-detail-resp.json0000664000175000017500000000056300000000000031476 0ustar00zuulzuul00000000000000{ "volumeAttachment": { "attachment_id": "721a5c82-5ebc-4c6a-8339-3d33d8d027ed", "delete_on_termination": true, "device": "/dev/sdb", "serverId": "7ebed2ce-85b3-40b5-84ae-8cc725c37ed2", "tag": "foo", "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", "bdm_uuid": "c088db45-92b8-49e8-81e2-a1b77a144b3b" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-volume_attachments/volume-attachment-detail-resp.json0000664000175000017500000000035600000000000030710 0ustar00zuulzuul00000000000000{ "volumeAttachment": { "device": "/dev/sdb", "id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", "serverId": "1ad6852e-6605-4510-b639-d0bff864b49a", "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" } }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.1296067 nova-32.0.0/doc/api_samples/os-volumes/0000775000175000017500000000000000000000000017735 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-volumes/os-volumes-detail-resp.json0000664000175000017500000000141700000000000025153 0ustar00zuulzuul00000000000000{ "volumes": [ { "attachments": [ { "device": "/", "id": "a26887c6-c47b-4654-abb5-dfadf7d3f803", "serverId": "3912f2b4-c5ba-4aec-9165-872876fe202e", "volumeId": "a26887c6-c47b-4654-abb5-dfadf7d3f803" } ], "availabilityZone": "dublin", "createdAt": "1999-01-01T01:01:01.000000", "displayDescription": "Volume Description", "displayName": "Volume Name", "id": "a26887c6-c47b-4654-abb5-dfadf7d3f803", "metadata": {}, "size": 100, "snapshotId": null, "status": "in-use", "volumeType": "vol_type_name" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-volumes/os-volumes-get-resp.json0000664000175000017500000000126200000000000024466 0ustar00zuulzuul00000000000000{ "volume": { "attachments": [ { "device": "/", "id": "a26887c6-c47b-4654-abb5-dfadf7d3f803", "serverId": "3912f2b4-c5ba-4aec-9165-872876fe202e", "volumeId": "a26887c6-c47b-4654-abb5-dfadf7d3f803" } ], "availabilityZone": "dublin", "createdAt": "2013-02-18T14:51:18.528085", "displayDescription": "Volume Description", "displayName": "Volume Name", "id": "a26887c6-c47b-4654-abb5-dfadf7d3f803", "metadata": {}, "size": 100, "snapshotId": null, "status": "in-use", "volumeType": "vol_type_name" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-volumes/os-volumes-index-resp.json0000664000175000017500000000141700000000000025020 0ustar00zuulzuul00000000000000{ "volumes": [ { "attachments": [ { "device": "/", "id": "a26887c6-c47b-4654-abb5-dfadf7d3f803", "serverId": "3912f2b4-c5ba-4aec-9165-872876fe202e", "volumeId": "a26887c6-c47b-4654-abb5-dfadf7d3f803" } ], "availabilityZone": "dublin", "createdAt": "2013-02-19T20:01:40.274897", "displayDescription": "Volume Description", "displayName": "Volume Name", "id": "a26887c6-c47b-4654-abb5-dfadf7d3f803", "metadata": {}, "size": 100, "snapshotId": null, "status": "in-use", "volumeType": "vol_type_name" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-volumes/os-volumes-post-req.json0000664000175000017500000000026100000000000024510 0ustar00zuulzuul00000000000000{ "volume": { "availability_zone": "dublin", "display_name": "Volume Name", "display_description": "Volume Description", "size": 100 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/os-volumes/os-volumes-post-resp.json0000664000175000017500000000126200000000000024674 0ustar00zuulzuul00000000000000{ "volume": { "attachments": [ { "device": "/", "id": "a26887c6-c47b-4654-abb5-dfadf7d3f803", "serverId": "3912f2b4-c5ba-4aec-9165-872876fe202e", "volumeId": "a26887c6-c47b-4654-abb5-dfadf7d3f803" } ], "availabilityZone": "dublin", "createdAt": "2013-02-18T14:51:17.970024", "displayDescription": "Volume Description", "displayName": "Volume Name", "id": "a26887c6-c47b-4654-abb5-dfadf7d3f803", "metadata": {}, "size": 100, "snapshotId": null, "status": "in-use", "volumeType": "vol_type_name" } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.1296067 nova-32.0.0/doc/api_samples/server-ips/0000775000175000017500000000000000000000000017723 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/server-ips/server-ips-network-resp.json0000664000175000017500000000015300000000000025352 0ustar00zuulzuul00000000000000{ "private": [ { "addr": "192.168.1.30", "version": 4 } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/server-ips/server-ips-resp.json0000664000175000017500000000023400000000000023663 0ustar00zuulzuul00000000000000{ "addresses": { "private": [ { "addr": "192.168.1.30", "version": 4 } ] } }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.1296067 nova-32.0.0/doc/api_samples/server-metadata/0000775000175000017500000000000000000000000020710 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/server-metadata/server-metadata-all-req.json0000664000175000017500000000006700000000000026225 0ustar00zuulzuul00000000000000{ "metadata": { "foo": "Foo Value" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/server-metadata/server-metadata-all-resp.json0000664000175000017500000000006600000000000026406 0ustar00zuulzuul00000000000000{ "metadata": { "foo": "Foo Value" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/server-metadata/server-metadata-req.json0000664000175000017500000000006300000000000025453 0ustar00zuulzuul00000000000000{ "meta": { "foo": "Bar Value" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/server-metadata/server-metadata-resp.json0000664000175000017500000000006300000000000025635 0ustar00zuulzuul00000000000000{ "meta": { "foo": "Foo Value" } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315688.8656049 nova-32.0.0/doc/api_samples/server-migrations/0000775000175000017500000000000000000000000021304 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.1296067 nova-32.0.0/doc/api_samples/server-migrations/v2.22/0000775000175000017500000000000000000000000022055 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/server-migrations/v2.22/force_complete.json0000664000175000017500000000003700000000000025736 0ustar00zuulzuul00000000000000{ "force_complete": null } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/server-migrations/v2.22/live-migrate-server.json0000664000175000017500000000023200000000000026636 0ustar00zuulzuul00000000000000{ "os-migrateLive": { "host": "01c0cadef72d47e28a672a76060d492c", "block_migration": false, "disk_over_commit": false } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.1296067 nova-32.0.0/doc/api_samples/server-migrations/v2.23/0000775000175000017500000000000000000000000022056 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/server-migrations/v2.23/migrations-get.json0000664000175000017500000000120500000000000025700 0ustar00zuulzuul00000000000000{ "migration": { "created_at": "2016-01-29T13:42:02.000000", "dest_compute": "compute2", "dest_host": "1.2.3.4", "dest_node": "node2", "id": 1, "server_uuid": "4cfba335-03d8-49b2-8c52-e69043d1e8fe", "source_compute": "compute1", "source_node": "node1", "status": "running", "memory_total_bytes": 123456, "memory_processed_bytes": 12345, "memory_remaining_bytes": 111111, "disk_total_bytes": 234567, "disk_processed_bytes": 23456, "disk_remaining_bytes": 211111, "updated_at": "2016-01-29T13:42:02.000000" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/server-migrations/v2.23/migrations-index.json0000664000175000017500000000133200000000000026231 0ustar00zuulzuul00000000000000{ "migrations": [ { "created_at": "2016-01-29T13:42:02.000000", "dest_compute": "compute2", "dest_host": "1.2.3.4", "dest_node": "node2", "id": 1, "server_uuid": "4cfba335-03d8-49b2-8c52-e69043d1e8fe", "source_compute": "compute1", "source_node": "node1", "status": "running", "memory_total_bytes": 123456, "memory_processed_bytes": 12345, "memory_remaining_bytes": 111111, "disk_total_bytes": 234567, "disk_processed_bytes": 23456, "disk_remaining_bytes": 211111, "updated_at": "2016-01-29T13:42:02.000000" } ] } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.1296067 nova-32.0.0/doc/api_samples/server-migrations/v2.24/0000775000175000017500000000000000000000000022057 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/server-migrations/v2.24/live-migrate-server.json0000664000175000017500000000023200000000000026640 0ustar00zuulzuul00000000000000{ "os-migrateLive": { "host": "01c0cadef72d47e28a672a76060d492c", "block_migration": false, "disk_over_commit": false } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.1296067 nova-32.0.0/doc/api_samples/server-migrations/v2.59/0000775000175000017500000000000000000000000022067 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/server-migrations/v2.59/migrations-get.json0000664000175000017500000000127500000000000025720 0ustar00zuulzuul00000000000000{ "migration": { "created_at": "2016-01-29T13:42:02.000000", "dest_compute": "compute2", "dest_host": "1.2.3.4", "dest_node": "node2", "id": 1, "server_uuid": "4cfba335-03d8-49b2-8c52-e69043d1e8fe", "source_compute": "compute1", "source_node": "node1", "status": "running", "memory_total_bytes": 123456, "memory_processed_bytes": 12345, "memory_remaining_bytes": 111111, "disk_total_bytes": 234567, "disk_processed_bytes": 23456, "disk_remaining_bytes": 211111, "updated_at": "2016-01-29T13:42:02.000000", "uuid": "12341d4b-346a-40d0-83c6-5f4f6892b650" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/server-migrations/v2.59/migrations-index.json0000664000175000017500000000142600000000000026246 0ustar00zuulzuul00000000000000{ "migrations": [ { "created_at": "2016-01-29T13:42:02.000000", "dest_compute": "compute2", "dest_host": "1.2.3.4", "dest_node": "node2", "id": 1, "server_uuid": "4cfba335-03d8-49b2-8c52-e69043d1e8fe", "source_compute": "compute1", "source_node": "node1", "status": "running", "memory_total_bytes": 123456, "memory_processed_bytes": 12345, "memory_remaining_bytes": 111111, "disk_total_bytes": 234567, "disk_processed_bytes": 23456, "disk_remaining_bytes": 211111, "updated_at": "2016-01-29T13:42:02.000000", "uuid": "12341d4b-346a-40d0-83c6-5f4f6892b650" } ] } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.133607 nova-32.0.0/doc/api_samples/server-migrations/v2.65/0000775000175000017500000000000000000000000022064 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/server-migrations/v2.65/live-migrate-server.json0000664000175000017500000000013200000000000026644 0ustar00zuulzuul00000000000000{ "os-migrateLive": { "host": null, "block_migration": "auto" } } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.133607 nova-32.0.0/doc/api_samples/server-migrations/v2.80/0000775000175000017500000000000000000000000022061 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/server-migrations/v2.80/live-migrate-server.json0000664000175000017500000000013200000000000026641 0ustar00zuulzuul00000000000000{ "os-migrateLive": { "host": null, "block_migration": "auto" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/server-migrations/v2.80/migrations-get.json0000664000175000017500000000146600000000000025714 0ustar00zuulzuul00000000000000{ "migration": { "created_at": "2016-01-29T13:42:02.000000", "dest_compute": "compute2", "dest_host": "1.2.3.4", "dest_node": "node2", "id": 1, "server_uuid": "4cfba335-03d8-49b2-8c52-e69043d1e8fe", "source_compute": "compute1", "source_node": "node1", "status": "running", "memory_total_bytes": 123456, "memory_processed_bytes": 12345, "memory_remaining_bytes": 111111, "disk_total_bytes": 234567, "disk_processed_bytes": 23456, "disk_remaining_bytes": 211111, "updated_at": "2016-01-29T13:42:02.000000", "uuid": "12341d4b-346a-40d0-83c6-5f4f6892b650", "user_id": "8dbaa0f0-ab95-4ffe-8cb4-9c89d2ac9d24", "project_id": "5f705771-3aa9-4f4c-8660-0d9522ffdbea" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/server-migrations/v2.80/migrations-index.json0000664000175000017500000000162700000000000026243 0ustar00zuulzuul00000000000000{ "migrations": [ { "created_at": "2016-01-29T13:42:02.000000", "dest_compute": "compute2", "dest_host": "1.2.3.4", "dest_node": "node2", "id": 1, "server_uuid": "4cfba335-03d8-49b2-8c52-e69043d1e8fe", "source_compute": "compute1", "source_node": "node1", "status": "running", "memory_total_bytes": 123456, "memory_processed_bytes": 12345, "memory_remaining_bytes": 111111, "disk_total_bytes": 234567, "disk_processed_bytes": 23456, "disk_remaining_bytes": 211111, "updated_at": "2016-01-29T13:42:02.000000", "uuid": "12341d4b-346a-40d0-83c6-5f4f6892b650", "user_id": "8dbaa0f0-ab95-4ffe-8cb4-9c89d2ac9d24", "project_id": "5f705771-3aa9-4f4c-8660-0d9522ffdbea" } ] } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.1376069 nova-32.0.0/doc/api_samples/servers/0000775000175000017500000000000000000000000017315 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/server-action-addfloatingip-req.json0000664000175000017500000000015300000000000026360 0ustar00zuulzuul00000000000000{ "addFloatingIp" : { "address": "10.10.10.10", "fixed_address": "192.168.1.30" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/server-action-confirm-resize.json0000664000175000017500000000003600000000000025722 0ustar00zuulzuul00000000000000{ "confirmResize" : null }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/server-action-create-image.json0000664000175000017500000000020000000000000025302 0ustar00zuulzuul00000000000000{ "createImage" : { "name" : "foo-image", "metadata": { "meta_var": "meta_val" } } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/server-action-reboot.json0000664000175000017500000000006200000000000024257 0ustar00zuulzuul00000000000000{ "reboot" : { "type" : "HARD" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/server-action-rebuild-resp.json0000664000175000017500000000343200000000000025366 0ustar00zuulzuul00000000000000{ "server": { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "OS-DCF:diskConfig": "AUTO", "addresses": { "private": [ { "addr": "192.168.1.30", "version": 4 } ] }, "adminPass": "seekr3t", "created": "2013-11-14T06:29:00Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "28d8d56f0e3a77e20891f455721cbb68032e017045e20aa5dfc6cb66", "id": "a0a80a94-3d81-4a10-822a-daa0cf9e870b", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/a0a80a94-3d81-4a10-822a-daa0cf9e870b", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/a0a80a94-3d81-4a10-822a-daa0cf9e870b", "rel": "bookmark" } ], "metadata": { "meta_var": "meta_val" }, "name": "foobar", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-11-14T06:29:02Z", "user_id": "admin" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/server-action-rebuild.json0000664000175000017500000000156400000000000024423 0ustar00zuulzuul00000000000000{ "rebuild" : { "accessIPv4" : "1.2.3.4", "accessIPv6" : "80fe::", "OS-DCF:diskConfig": "AUTO", "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", "name" : "foobar", "adminPass" : "seekr3t", "metadata" : { "meta_var" : "meta_val" }, "personality": [ { "path": "/etc/banner.txt", "contents": "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6 b25zLiINCg0KLVJpY2hhcmQgQmFjaA==" } ] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/server-action-removefloatingip-req.json0000664000175000017500000000010500000000000027122 0ustar00zuulzuul00000000000000{ "removeFloatingIp": { "address": "172.16.10.7" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/server-action-resize.json0000664000175000017500000000013100000000000024263 0ustar00zuulzuul00000000000000{ "resize" : { "flavorRef" : "2", "OS-DCF:diskConfig": "AUTO" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/server-action-revert-resize.json0000664000175000017500000000003500000000000025573 0ustar00zuulzuul00000000000000{ "revertResize" : null }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/server-action-start.json0000664000175000017500000000003100000000000024116 0ustar00zuulzuul00000000000000{ "os-start" : null }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/server-action-stop.json0000664000175000017500000000003000000000000023745 0ustar00zuulzuul00000000000000{ "os-stop" : null }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/server-create-req-v237.json0000664000175000017500000000233400000000000024245 0ustar00zuulzuul00000000000000{ "server" : { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "name" : "new-server-test", "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", "flavorRef" : "1", "availability_zone": "us-west", "OS-DCF:diskConfig": "AUTO", "metadata" : { "My Server Name" : "Apache1" }, "personality": [ { "path": "/etc/banner.txt", "contents": "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6 b25zLiINCg0KLVJpY2hhcmQgQmFjaA==" } ], "security_groups": [ { "name": "default" } ], "user_data" : "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==", "networks": "auto" }, "OS-SCH-HNT:scheduler_hints": { "same_host": "48e6a9f6-30af-47e0-bc04-acaed113bb4e" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/server-create-req-v257.json0000664000175000017500000000114700000000000024250 0ustar00zuulzuul00000000000000{ "server" : { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "name" : "new-server-test", "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", "flavorRef" : "http://openstack.example.com/flavors/1", "availability_zone": "us-west", "OS-DCF:diskConfig": "AUTO", "metadata" : { "My Server Name" : "Apache1" }, "security_groups": [ { "name": "default" } ], "user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==", "networks": "auto" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/server-create-req.json0000664000175000017500000000230000000000000023537 0ustar00zuulzuul00000000000000{ "server" : { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "name" : "new-server-test", "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", "flavorRef" : "1", "availability_zone": "us-west", "OS-DCF:diskConfig": "AUTO", "metadata" : { "My Server Name" : "Apache1" }, "personality": [ { "path": "/etc/banner.txt", "contents": "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6 b25zLiINCg0KLVJpY2hhcmQgQmFjaA==" } ], "security_groups": [ { "name": "default" } ], "user_data" : "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==" }, "OS-SCH-HNT:scheduler_hints": { "same_host": "48e6a9f6-30af-47e0-bc04-acaed113bb4e" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/server-create-resp.json0000664000175000017500000000124500000000000023730 0ustar00zuulzuul00000000000000{ "server": { "OS-DCF:diskConfig": "AUTO", "adminPass": "6NpUwoz2QDRN", "id": "f5dc173b-6804-445a-a6d8-c705dad5b5eb", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb", "rel": "bookmark" } ], "security_groups": [ { "name": "default" } ] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/server-get-resp.json0000664000175000017500000000524400000000000023247 0ustar00zuulzuul00000000000000{ "server": { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.1.30", "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-09-03T04:01:32Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "92154fab69d5883ba2c8622b7e65f745dd33257221c07af363c51b29", "id": "0e44cc9c-e052-415d-afbf-469b0d384170", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "config_drive": "", "OS-DCF:diskConfig": "AUTO", "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-SRV-ATTR:host": "b8b357f7100d4391828f2177c922ef93", "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "os-extended-volumes:volumes_attached": [ { "id": "volume_id1" }, { "id": "volume_id2" } ], "OS-SRV-USG:launched_at": "2013-09-23T13:37:00.880302", "OS-SRV-USG:terminated_at": null, "progress": 0, "security_groups": [ { "name": "default" } ], "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-09-03T04:01:33Z", "user_id": "admin" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/server-update-req.json0000664000175000017500000000024200000000000023561 0ustar00zuulzuul00000000000000{ "server": { "accessIPv4": "4.3.2.1", "accessIPv6": "80fe::", "OS-DCF:diskConfig": "AUTO", "name" : "new-server-test" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/server-update-resp.json0000664000175000017500000000341000000000000023743 0ustar00zuulzuul00000000000000{ "server": { "OS-DCF:diskConfig": "AUTO", "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.1.30", "version": 4 } ] }, "created": "2012-12-02T02:11:57Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "6e84af987b4e7ec1c039b16d21f508f4a505672bd94fb0218b668d07", "id": "324dfb7d-f4a9-419a-9a19-237df04b443b", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/324dfb7d-f4a9-419a-9a19-237df04b443b", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/324dfb7d-f4a9-419a-9a19-237df04b443b", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2012-12-02T02:11:58Z", "user_id": "admin" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/servers-details-resp.json0000664000175000017500000000632600000000000024302 0ustar00zuulzuul00000000000000{ "servers": [ { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.1.30", "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-09-03T04:01:32Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "bcf92836fc9ed4203a75cb0337afc7f917d2be504164b995c2334b25", "id": "f5dc173b-6804-445a-a6d8-c705dad5b5eb", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "config_drive": "", "OS-DCF:diskConfig": "AUTO", "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-SRV-ATTR:host": "c3f14e9812ad496baf92ccfb3c61e15f", "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "os-extended-volumes:volumes_attached": [ { "id": "volume_id1" }, { "id": "volume_id2" } ], "OS-SRV-USG:launched_at": "2013-09-23T13:53:12.774549", "OS-SRV-USG:terminated_at": null, "progress": 0, "security_groups": [ { "name": "default" } ], "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-09-03T04:01:32Z", "user_id": "admin" } ], "servers_links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/detail?limit=1&marker=f5dc173b-6804-445a-a6d8-c705dad5b5eb", "rel": "next" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/servers-list-resp.json0000664000175000017500000000147600000000000023631 0ustar00zuulzuul00000000000000{ "servers": [ { "id": "22c91117-08de-4894-9aa9-6ef382400985", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/22c91117-08de-4894-9aa9-6ef382400985", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/22c91117-08de-4894-9aa9-6ef382400985", "rel": "bookmark" } ], "name": "new-server-test" } ], "servers_links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers?limit=1&marker=22c91117-08de-4894-9aa9-6ef382400985", "rel": "next" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/servers-list-status-resp.json0000664000175000017500000000151400000000000025143 0ustar00zuulzuul00000000000000{ "servers": [ { "id": "22c91117-08de-4894-9aa9-6ef382400985", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/22c91117-08de-4894-9aa9-6ef382400985", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/22c91117-08de-4894-9aa9-6ef382400985", "rel": "bookmark" } ], "name": "new-server-test" } ], "servers_links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers?limit=1&status=error&marker=22c91117-08de-4894-9aa9-6ef382400985", "rel": "next" } ] } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.1376069 nova-32.0.0/doc/api_samples/servers/v2.100/0000775000175000017500000000000000000000000020143 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.100/server-action-rebuild-resp.json0000664000175000017500000000564200000000000026221 0ustar00zuulzuul00000000000000{ "server": { "OS-DCF:diskConfig": "AUTO", "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-SRV-ATTR:hostname": "updated-hostname.example.com", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "OS-SRV-USG:launched_at": "2021-08-19T15:16:22.177882", "OS-SRV-USG:terminated_at": null, "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "addr": "192.168.1.30", "version": 4 } ] }, "adminPass": "seekr3t", "config_drive": "", "created": "2019-04-23T17:10:22Z", "description": null, "flavor": { "disk": 1, "ephemeral": 0, "extra_specs": {}, "original_name": "m1.tiny", "ram": 512, "swap": 0, "vcpus": 1 }, "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6", "id": "0c37a84a-c757-4f22-8c7f-0bf8b6970886", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ], "properties": { "architecture": "x86_64", "auto_disk_config": "True" } }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/0c37a84a-c757-4f22-8c7f-0bf8b6970886", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/0c37a84a-c757-4f22-8c7f-0bf8b6970886", "rel": "bookmark" } ], "locked": false, "locked_reason": null, "metadata": { "meta_var": "meta_val" }, "name": "foobar", "os-extended-volumes:volumes_attached": [], "progress": 0, "pinned_availability_zone": "us-west", "scheduler_hints": { "same_host": [ "48e6a9f6-30af-47e0-bc04-acaed113bb4e" ] }, "security_groups": [ { "name": "default" } ], "server_groups": [], "status": "ACTIVE", "tags": [], "tenant_id": "6f70656e737461636b20342065766572", "trusted_image_certificates": null, "updated": "2019-04-23T17:10:24Z", "user_data": "ZWNobyAiaGVsbG8gd29ybGQi", "user_id": "fake" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.100/server-action-rebuild.json0000664000175000017500000000064600000000000025251 0ustar00zuulzuul00000000000000{ "rebuild" : { "accessIPv4" : "1.2.3.4", "accessIPv6" : "80fe::", "OS-DCF:diskConfig": "AUTO", "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", "name" : "foobar", "adminPass" : "seekr3t", "hostname": "updated-hostname.example.com", "metadata" : { "meta_var" : "meta_val" }, "user_data": "ZWNobyAiaGVsbG8gd29ybGQi" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.100/server-create-req.json0000664000175000017500000000132000000000000024366 0ustar00zuulzuul00000000000000{ "server" : { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "name" : "new-server-test", "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", "flavorRef" : "1", "OS-DCF:diskConfig": "AUTO", "availability_zone": "us-west", "metadata" : { "My Server Name" : "Apache1" }, "security_groups": [ { "name": "default" } ], "user_data" : "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==", "networks": "auto", "hostname": "new-server-test" }, "OS-SCH-HNT:scheduler_hints": { "same_host": "48e6a9f6-30af-47e0-bc04-acaed113bb4e" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.100/server-create-resp.json0000664000175000017500000000124500000000000024556 0ustar00zuulzuul00000000000000{ "server": { "OS-DCF:diskConfig": "AUTO", "adminPass": "6NpUwoz2QDRN", "id": "f5dc173b-6804-445a-a6d8-c705dad5b5eb", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb", "rel": "bookmark" } ], "security_groups": [ { "name": "default" } ] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.100/server-get-resp.json0000664000175000017500000000573400000000000024101 0ustar00zuulzuul00000000000000{ "server": { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.1.30", "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-09-03T04:01:32Z", "description": null, "locked": false, "locked_reason": null, "flavor": { "disk": 1, "ephemeral": 0, "extra_specs": {}, "original_name": "m1.tiny", "ram": 512, "swap": 0, "vcpus": 1 }, "hostId": "92154fab69d5883ba2c8622b7e65f745dd33257221c07af363c51b29", "id": "0e44cc9c-e052-415d-afbf-469b0d384170", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ], "properties": { "architecture": "x86_64", "auto_disk_config": "True" } }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "config_drive": "", "OS-DCF:diskConfig": "AUTO", "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-SRV-ATTR:hostname": "new-server-test", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "os-extended-volumes:volumes_attached": [ {"id": "volume_id1", "delete_on_termination": false}, {"id": "volume_id2", "delete_on_termination": false} ], "OS-SRV-USG:launched_at": "2013-09-23T13:37:00.880302", "OS-SRV-USG:terminated_at": null, "pinned_availability_zone": "us-west", "progress": 0, "scheduler_hints": { "same_host": [ "48e6a9f6-30af-47e0-bc04-acaed113bb4e" ] }, "security_groups": [ { "name": "default" } ], "server_groups": [], "status": "ACTIVE", "tags": [], "tenant_id": "6f70656e737461636b20342065766572", "trusted_image_certificates": null, "updated": "2013-09-03T04:01:33Z", "user_id": "fake" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.100/server-update-req.json0000664000175000017500000000026700000000000024416 0ustar00zuulzuul00000000000000{ "server": { "accessIPv4": "4.3.2.1", "accessIPv6": "80fe::", "OS-DCF:diskConfig": "AUTO", "hostname" : "new-server-hostname.example.com" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.100/server-update-resp.json0000664000175000017500000000561400000000000024601 0ustar00zuulzuul00000000000000{ "server": { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.1.30", "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-09-03T04:01:32Z", "description": null, "locked": false, "locked_reason": null, "flavor": { "disk": 1, "ephemeral": 0, "extra_specs": {}, "original_name": "m1.tiny", "ram": 512, "swap": 0, "vcpus": 1 }, "hostId": "92154fab69d5883ba2c8622b7e65f745dd33257221c07af363c51b29", "id": "0e44cc9c-e052-415d-afbf-469b0d384170", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ], "properties": { "architecture": "x86_64", "auto_disk_config": "True" } }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "config_drive": "", "OS-DCF:diskConfig": "AUTO", "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-SRV-ATTR:hostname": "new-server-hostname.example.com", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "os-extended-volumes:volumes_attached": [], "OS-SRV-USG:launched_at": "2013-09-23T13:37:00.880302", "OS-SRV-USG:terminated_at": null, "pinned_availability_zone": "us-west", "progress": 0, "scheduler_hints": { "same_host": [ "48e6a9f6-30af-47e0-bc04-acaed113bb4e" ] }, "security_groups": [ { "name": "default" } ], "server_groups": [], "status": "ACTIVE", "tags": [], "tenant_id": "6f70656e737461636b20342065766572", "trusted_image_certificates": null, "updated": "2013-09-03T04:01:33Z", "user_id": "fake" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.100/servers-details-resp.json0000664000175000017500000000702100000000000025121 0ustar00zuulzuul00000000000000{ "servers": [ { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.1.30", "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-09-03T04:01:32Z", "description": "", "flavor": { "disk": 1, "ephemeral": 0, "extra_specs": {}, "original_name": "m1.tiny", "ram": 512, "swap": 0, "vcpus": 1 }, "hostId": "bcf92836fc9ed4203a75cb0337afc7f917d2be504164b995c2334b25", "id": "f5dc173b-6804-445a-a6d8-c705dad5b5eb", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ], "properties": { "architecture": "x86_64", "auto_disk_config": "True" } }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "config_drive": "", "locked": false, "locked_reason": "", "OS-DCF:diskConfig": "AUTO", "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-SRV-ATTR:hostname": "new-server-test", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "os-extended-volumes:volumes_attached": [ {"id": "volume_id1", "delete_on_termination": false}, {"id": "volume_id2", "delete_on_termination": false} ], "OS-SRV-USG:launched_at": "2013-09-23T13:53:12.774549", "OS-SRV-USG:terminated_at": null, "pinned_availability_zone": "us-west", "progress": 0, "scheduler_hints": { "same_host": [ "48e6a9f6-30af-47e0-bc04-acaed113bb4e" ] }, "security_groups": [ { "name": "default" } ], "status": "ACTIVE", "tags": [], "tenant_id": "6f70656e737461636b20342065766572", "trusted_image_certificates": null, "updated": "2013-09-03T04:01:32Z", "user_id": "fake" } ], "servers_links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/detail?limit=1&marker=f5dc173b-6804-445a-a6d8-c705dad5b5eb", "rel": "next" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.100/servers-list-resp.json0000664000175000017500000000147700000000000024460 0ustar00zuulzuul00000000000000{ "servers": [ { "id": "3cfb801c-f03c-45ce-834b-d097b34e9534", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/3cfb801c-f03c-45ce-834b-d097b34e9534", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/3cfb801c-f03c-45ce-834b-d097b34e9534", "rel": "bookmark" } ], "name": "new-server-test" } ], "servers_links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers?limit=1&marker=3cfb801c-f03c-45ce-834b-d097b34e9534", "rel": "next" } ] } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.1416068 nova-32.0.0/doc/api_samples/servers/v2.16/0000775000175000017500000000000000000000000020071 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.16/server-get-resp.json0000664000175000017500000000630400000000000024021 0ustar00zuulzuul00000000000000{ "server": { "addresses": { "private": [ { "addr": "192.168.1.30", "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-09-16T02:55:07Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "3bf189131c61d0e71b0a8686a897a0f50d1693b48c47b721fe77155b", "id": "c278163e-36f9-4cf2-b1ac-80db4c63f7a8", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/c278163e-36f9-4cf2-b1ac-80db4c63f7a8", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/c278163e-36f9-4cf2-b1ac-80db4c63f7a8", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "config_drive": "", "OS-DCF:diskConfig": "AUTO", "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-SRV-ATTR:host": "c5f474bf81474f9dbbc404d5b2e4e9b3", "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", "OS-EXT-SRV-ATTR:hostname": "new-server-test", "OS-EXT-SRV-ATTR:launch_index": 0, "OS-EXT-SRV-ATTR:reservation_id": "r-12345678", "OS-EXT-SRV-ATTR:root_device_name": "/dev/sda", "OS-EXT-SRV-ATTR:kernel_id": null, "OS-EXT-SRV-ATTR:ramdisk_id": null, "OS-EXT-SRV-ATTR:user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "os-extended-volumes:volumes_attached": [ { "id": "volume_id1", "delete_on_termination": false }, { "id": "volume_id2", "delete_on_termination": false } ], "OS-SRV-USG:launched_at": "2013-09-23T13:37:00.880302", "OS-SRV-USG:terminated_at": null, "security_groups": [ { "name": "default" } ], "locked": false, "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "progress": 0, "status": "ACTIVE", "host_status": "UP", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-09-16T02:55:08Z", "user_id": "admin" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.16/servers-details-resp.json0000664000175000017500000000744200000000000025056 0ustar00zuulzuul00000000000000{ "servers": [ { "addresses": { "private": [ { "addr": "192.168.1.30", "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-09-16T02:55:03Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "63cf07a9fd82e1d2294926ec5c0d2e1e0ca449224246df75e16f23dc", "id": "a8c1c13d-ec7e-47c7-b4ff-077f72c1ca46", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/a8c1c13d-ec7e-47c7-b4ff-077f72c1ca46", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/a8c1c13d-ec7e-47c7-b4ff-077f72c1ca46", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "config_drive": "", "OS-DCF:diskConfig": "AUTO", "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-SRV-ATTR:host": "bc8efe4fdb7148a4bb921a2b03d17de6", "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", "OS-EXT-SRV-ATTR:hostname": "new-server-test", "OS-EXT-SRV-ATTR:launch_index": 0, "OS-EXT-SRV-ATTR:reservation_id": "r-12345678", "OS-EXT-SRV-ATTR:root_device_name": "/dev/sda", "OS-EXT-SRV-ATTR:kernel_id": null, "OS-EXT-SRV-ATTR:ramdisk_id": null, "OS-EXT-SRV-ATTR:user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==", "locked": false, "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "os-extended-volumes:volumes_attached": [ { "id": "volume_id1", "delete_on_termination": false }, { "id": "volume_id2", "delete_on_termination": false } ], "OS-SRV-USG:launched_at": "2013-09-23T13:53:12.774549", "OS-SRV-USG:terminated_at": null, "progress": 0, "security_groups": [ { "name": "default" } ], "status": "ACTIVE", "host_status": "UP", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-09-16T02:55:05Z", "user_id": "admin" } ], "servers_links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/detail?limit=1&marker=a8c1c13d-ec7e-47c7-b4ff-077f72c1ca46", "rel": "next" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.16/servers-list-resp.json0000664000175000017500000000147600000000000024405 0ustar00zuulzuul00000000000000{ "servers": [ { "id": "22c91117-08de-4894-9aa9-6ef382400985", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/22c91117-08de-4894-9aa9-6ef382400985", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/22c91117-08de-4894-9aa9-6ef382400985", "rel": "bookmark" } ], "name": "new-server-test" } ], "servers_links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers?limit=1&marker=22c91117-08de-4894-9aa9-6ef382400985", "rel": "next" } ] }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.1416068 nova-32.0.0/doc/api_samples/servers/v2.17/0000775000175000017500000000000000000000000020072 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.17/server-action-trigger-crash-dump.json0000664000175000017500000000004300000000000027245 0ustar00zuulzuul00000000000000{ "trigger_crash_dump": null } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.1416068 nova-32.0.0/doc/api_samples/servers/v2.19/0000775000175000017500000000000000000000000020074 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.19/server-action-rebuild-resp.json0000664000175000017500000000354500000000000026152 0ustar00zuulzuul00000000000000{ "server": { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.1.30", "version": 4 } ] }, "adminPass": "seekr3t", "created": "2013-11-14T06:29:00Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "28d8d56f0e3a77e20891f455721cbb68032e017045e20aa5dfc6cb66", "id": "a0a80a94-3d81-4a10-822a-daa0cf9e870b", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/a0a80a94-3d81-4a10-822a-daa0cf9e870b", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/a0a80a94-3d81-4a10-822a-daa0cf9e870b", "rel": "bookmark" } ], "locked": false, "metadata": { "meta_var": "meta_val" }, "name": "foobar", "description" : "description of foobar", "progress": 0, "status": "ACTIVE", "OS-DCF:diskConfig": "AUTO", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-11-14T06:29:02Z", "user_id": "admin" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.19/server-action-rebuild.json0000664000175000017500000000051600000000000025176 0ustar00zuulzuul00000000000000{ "rebuild" : { "accessIPv4" : "1.2.3.4", "accessIPv6" : "80fe::", "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", "name" : "foobar", "description" : "description of foobar", "adminPass" : "seekr3t", "metadata" : { "meta_var" : "meta_val" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.19/server-create-req.json0000664000175000017500000000057000000000000024325 0ustar00zuulzuul00000000000000{ "server" : { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "name" : "new-server-test", "description" : "new-server-description", "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", "flavorRef" : "http://openstack.example.com/flavors/1", "metadata" : { "My Server Name" : "Apache1" } } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.19/server-create-resp.json0000664000175000017500000000124400000000000024506 0ustar00zuulzuul00000000000000{ "server": { "adminPass": "rySfUy7xL4C5", "OS-DCF:diskConfig": "AUTO", "id": "19923676-e78b-46fb-af62-a5942aece2ac", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/19923676-e78b-46fb-af62-a5942aece2ac", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/19923676-e78b-46fb-af62-a5942aece2ac", "rel": "bookmark" } ], "security_groups": [ { "name": "default" } ] } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.19/server-get-resp.json0000664000175000017500000000626400000000000024031 0ustar00zuulzuul00000000000000{ "server": { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "addr": "192.168.1.30", "version": 4 } ] }, "created": "2015-12-07T17:24:14Z", "description": "new-server-description", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "c656e68b04b483cfc87cdbaa2346557b174ec1cb6be6afbd2a0133a0", "id": "ddb205dc-717e-496e-8e96-88a3b31b075d", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/ddb205dc-717e-496e-8e96-88a3b31b075d", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/ddb205dc-717e-496e-8e96-88a3b31b075d", "rel": "bookmark" } ], "locked": false, "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "config_drive": "", "OS-DCF:diskConfig": "AUTO", "OS-EXT-AZ:availability_zone": "nova", "OS-EXT-SRV-ATTR:host": "b8b357f7100d4391828f2177c922ef93", "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", "OS-EXT-SRV-ATTR:reservation_id": "r-00000001", "OS-EXT-SRV-ATTR:launch_index": 0, "OS-EXT-SRV-ATTR:kernel_id": "", "OS-EXT-SRV-ATTR:ramdisk_id": "", "OS-EXT-SRV-ATTR:hostname": "fake-hostname", "OS-EXT-SRV-ATTR:root_device_name": "/dev/sda", "OS-EXT-SRV-ATTR:user_data": "", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "os-extended-volumes:volumes_attached": [ { "id": "volume_id1", "delete_on_termination": false }, { "id": "volume_id2", "delete_on_termination": false } ], "OS-SRV-USG:launched_at": "2013-09-23T13:37:00.880302", "OS-SRV-USG:terminated_at": null, "progress": 0, "security_groups": [ { "name": "default" } ], "host_status": "UP", "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2015-12-07T17:24:15Z", "user_id": "admin" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.19/server-put-req.json0000664000175000017500000000017000000000000023666 0ustar00zuulzuul00000000000000{ "server" : { "name" : "updated-server-test", "description" : "updated-server-description" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.19/server-put-resp.json0000664000175000017500000000353200000000000024055 0ustar00zuulzuul00000000000000{ "server": { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "OS-DCF:diskConfig": "AUTO", "addresses": { "private": [ { "addr": "192.168.1.30", "version": 4 } ] }, "created": "2015-12-07T19:19:36Z", "description": "updated-server-description", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "4e17a358ca9bbc8ac6e215837b6410c0baa21b2463fefe3e8f712b31", "id": "c509708e-f0c6-461f-b2b3-507547959eb2", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/c509708e-f0c6-461f-b2b3-507547959eb2", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/c509708e-f0c6-461f-b2b3-507547959eb2", "rel": "bookmark" } ], "locked": false, "metadata": { "My Server Name": "Apache1" }, "name": "updated-server-test", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2015-12-07T19:19:36Z", "user_id": "admin" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.19/servers-details-resp.json0000664000175000017500000000742600000000000025063 0ustar00zuulzuul00000000000000{ "servers": [ { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "addr": "192.168.1.30", "version": 4 } ] }, "created": "2015-12-07T19:54:48Z", "description": "new-server-description", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "a672ab12738567bfcb852c846d66a6ce5c3555b42d73db80bdc6f1a4", "id": "91965362-fd86-4543-8ce1-c17074d2984d", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/91965362-fd86-4543-8ce1-c17074d2984d", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/91965362-fd86-4543-8ce1-c17074d2984d", "rel": "bookmark" } ], "locked": false, "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "config_drive": "", "OS-DCF:diskConfig": "AUTO", "OS-EXT-AZ:availability_zone": "nova", "OS-EXT-SRV-ATTR:host": "c3f14e9812ad496baf92ccfb3c61e15f", "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", "OS-EXT-SRV-ATTR:reservation_id": "r-00000001", "OS-EXT-SRV-ATTR:launch_index": 0, "OS-EXT-SRV-ATTR:kernel_id": "", "OS-EXT-SRV-ATTR:ramdisk_id": "", "OS-EXT-SRV-ATTR:hostname": "fake-hostname", "OS-EXT-SRV-ATTR:root_device_name": "/dev/sda", "OS-EXT-SRV-ATTR:user_data": "", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "os-extended-volumes:volumes_attached": [ { "id": "volume_id1", "delete_on_termination": false }, { "id": "volume_id2", "delete_on_termination": false } ], "OS-SRV-USG:launched_at": "2013-09-23T13:53:12.774549", "OS-SRV-USG:terminated_at": null, "progress": 0, "security_groups": [ { "name": "default" } ], "host_status": "UP", "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2015-12-07T19:54:49Z", "user_id": "admin" } ], "servers_links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/detail?limit=1&marker=91965362-fd86-4543-8ce1-c17074d2984d", "rel": "next" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.19/servers-list-resp.json0000664000175000017500000000147600000000000024410 0ustar00zuulzuul00000000000000{ "servers": [ { "id": "78d95942-8805-4597-b1af-3d0e38330758", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/78d95942-8805-4597-b1af-3d0e38330758", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/78d95942-8805-4597-b1af-3d0e38330758", "rel": "bookmark" } ], "name": "new-server-test" } ], "servers_links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers?limit=1&marker=22c91117-08de-4894-9aa9-6ef382400985", "rel": "next" } ] }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.1416068 nova-32.0.0/doc/api_samples/servers/v2.26/0000775000175000017500000000000000000000000020072 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.26/server-action-rebuild-resp.json0000664000175000017500000000360700000000000026147 0ustar00zuulzuul00000000000000{ "server": { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.1.30", "version": 4 } ] }, "adminPass": "seekr3t", "created": "2013-11-14T06:29:00Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "28d8d56f0e3a77e20891f455721cbb68032e017045e20aa5dfc6cb66", "id": "a0a80a94-3d81-4a10-822a-daa0cf9e870b", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/a0a80a94-3d81-4a10-822a-daa0cf9e870b", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/a0a80a94-3d81-4a10-822a-daa0cf9e870b", "rel": "bookmark" } ], "metadata": { "meta_var": "meta_val" }, "name": "foobar", "OS-DCF:diskConfig": "AUTO", "progress": 0, "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-11-14T06:29:02Z", "user_id": "admin", "locked": false, "description" : "description of foobar", "tags": ["tag1", "tag2"] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.26/server-action-rebuild.json0000664000175000017500000000171500000000000025176 0ustar00zuulzuul00000000000000{ "rebuild" : { "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", "accessIPv4" : "1.2.3.4", "accessIPv6" : "80fe::", "adminPass" : "seekr3t", "metadata" : { "meta_var" : "meta_val" }, "name" : "foobar", "OS-DCF:diskConfig": "AUTO", "personality" : [ { "path" : "/etc/banner.txt", "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6 b25zLiINCg0KLVJpY2hhcmQgQmFjaA==" } ], "preserve_ephemeral": false, "description" : "description of foobar" } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.1416068 nova-32.0.0/doc/api_samples/servers/v2.3/0000775000175000017500000000000000000000000020005 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.3/server-get-resp.json0000664000175000017500000000621000000000000023731 0ustar00zuulzuul00000000000000{ "server": { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.1.30", "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-09-03T04:01:32Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "92154fab69d5883ba2c8622b7e65f745dd33257221c07af363c51b29", "id": "0e44cc9c-e052-415d-afbf-469b0d384170", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "config_drive": "", "OS-DCF:diskConfig": "AUTO", "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-SRV-ATTR:host": "b8b357f7100d4391828f2177c922ef93", "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", "OS-EXT-SRV-ATTR:reservation_id": "r-00000001", "OS-EXT-SRV-ATTR:launch_index": 0, "OS-EXT-SRV-ATTR:kernel_id": "", "OS-EXT-SRV-ATTR:ramdisk_id": "", "OS-EXT-SRV-ATTR:hostname": "fake-hostname", "OS-EXT-SRV-ATTR:root_device_name": "/dev/sda", "OS-EXT-SRV-ATTR:user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "os-extended-volumes:volumes_attached": [ { "id": "volume_id1", "delete_on_termination": false }, { "id": "volume_id2", "delete_on_termination": false } ], "OS-SRV-USG:launched_at": "2013-09-23T13:37:00.880302", "OS-SRV-USG:terminated_at": null, "progress": 0, "security_groups": [ { "name": "default" } ], "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-09-03T04:01:33Z", "user_id": "admin" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.3/servers-details-resp.json0000664000175000017500000000733600000000000024774 0ustar00zuulzuul00000000000000{ "servers": [ { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.1.30", "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-09-03T04:01:32Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "bcf92836fc9ed4203a75cb0337afc7f917d2be504164b995c2334b25", "id": "f5dc173b-6804-445a-a6d8-c705dad5b5eb", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "config_drive": "", "OS-DCF:diskConfig": "AUTO", "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-SRV-ATTR:host": "c3f14e9812ad496baf92ccfb3c61e15f", "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", "OS-EXT-SRV-ATTR:reservation_id": "r-00000001", "OS-EXT-SRV-ATTR:launch_index": 0, "OS-EXT-SRV-ATTR:kernel_id": "", "OS-EXT-SRV-ATTR:ramdisk_id": "", "OS-EXT-SRV-ATTR:hostname": "fake-hostname", "OS-EXT-SRV-ATTR:root_device_name": "/dev/sda", "OS-EXT-SRV-ATTR:user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "os-extended-volumes:volumes_attached": [ { "id": "volume_id1", "delete_on_termination": false }, { "id": "volume_id2", "delete_on_termination": false } ], "OS-SRV-USG:launched_at": "2013-09-23T13:53:12.774549", "OS-SRV-USG:terminated_at": null, "progress": 0, "security_groups": [ { "name": "default" } ], "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-09-03T04:01:32Z", "user_id": "admin" } ], "servers_links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/detail?limit=1&marker=f5dc173b-6804-445a-a6d8-c705dad5b5eb", "rel": "next" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.3/servers-list-resp.json0000664000175000017500000000147600000000000024321 0ustar00zuulzuul00000000000000{ "servers": [ { "id": "22c91117-08de-4894-9aa9-6ef382400985", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/22c91117-08de-4894-9aa9-6ef382400985", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/22c91117-08de-4894-9aa9-6ef382400985", "rel": "bookmark" } ], "name": "new-server-test" } ], "servers_links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers?limit=1&marker=22c91117-08de-4894-9aa9-6ef382400985", "rel": "next" } ] }././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.145607 nova-32.0.0/doc/api_samples/servers/v2.32/0000775000175000017500000000000000000000000020067 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.32/server-create-req.json0000664000175000017500000000102300000000000024312 0ustar00zuulzuul00000000000000{ "server" : { "name" : "device-tagging-server", "flavorRef" : "http://openstack.example.com/flavors/1", "networks" : [{ "uuid" : "3cb9bc59-5699-4588-a4b1-b87f96708bc6", "tag": "nic1" }], "block_device_mapping_v2": [{ "uuid": "70a599e0-31e7-49b7-b260-868f441e862b", "source_type": "image", "destination_type": "volume", "boot_index": 0, "volume_size": "1", "tag": "disk1" }] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.32/server-create-resp.json0000664000175000017500000000124700000000000024504 0ustar00zuulzuul00000000000000{ "server": { "adminPass": "rojsEujtu7GB", "OS-DCF:diskConfig": "AUTO", "id": "05ec6bde-40bf-47e8-ac07-89c12b2eee03", "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/05ec6bde-40bf-47e8-ac07-89c12b2eee03", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/05ec6bde-40bf-47e8-ac07-89c12b2eee03", "rel": "bookmark" } ], "security_groups": [ { "name": "default" } ] } } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.145607 nova-32.0.0/doc/api_samples/servers/v2.37/0000775000175000017500000000000000000000000020074 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.37/server-create-req.json0000664000175000017500000000033100000000000024320 0ustar00zuulzuul00000000000000{ "server": { "name": "auto-allocate-network", "imageRef": "70a599e0-31e7-49b7-b260-868f441e862b", "flavorRef": "http://openstack.example.com/flavors/1", "networks": "auto" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.37/server-create-resp.json0000664000175000017500000000124400000000000024506 0ustar00zuulzuul00000000000000{ "server": { "adminPass": "rySfUy7xL4C5", "OS-DCF:diskConfig": "AUTO", "id": "19923676-e78b-46fb-af62-a5942aece2ac", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/19923676-e78b-46fb-af62-a5942aece2ac", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/19923676-e78b-46fb-af62-a5942aece2ac", "rel": "bookmark" } ], "security_groups": [ { "name": "default" } ] } }././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.145607 nova-32.0.0/doc/api_samples/servers/v2.42/0000775000175000017500000000000000000000000020070 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.42/server-create-req.json0000664000175000017500000000102300000000000024313 0ustar00zuulzuul00000000000000{ "server" : { "name" : "device-tagging-server", "flavorRef" : "http://openstack.example.com/flavors/1", "networks" : [{ "uuid" : "3cb9bc59-5699-4588-a4b1-b87f96708bc6", "tag": "nic1" }], "block_device_mapping_v2": [{ "uuid": "70a599e0-31e7-49b7-b260-868f441e862b", "source_type": "image", "destination_type": "volume", "boot_index": 0, "volume_size": "1", "tag": "disk1" }] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.42/server-create-resp.json0000664000175000017500000000124600000000000024504 0ustar00zuulzuul00000000000000{ "server": { "OS-DCF:diskConfig": "AUTO", "adminPass": "S5wqy9sPYUvU", "id": "97108291-2fd7-4dc2-a909-eaae0306a6a9", "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/97108291-2fd7-4dc2-a909-eaae0306a6a9", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/97108291-2fd7-4dc2-a909-eaae0306a6a9", "rel": "bookmark" } ], "security_groups": [ { "name": "default" } ] } }././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.145607 nova-32.0.0/doc/api_samples/servers/v2.45/0000775000175000017500000000000000000000000020073 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.45/server-action-create-image-resp.json0000664000175000017500000000007200000000000027036 0ustar00zuulzuul00000000000000{ "image_id": "0e7761dd-ee98-41f0-ba35-05994e446431" }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.45/server-action-create-image.json0000664000175000017500000000020000000000000026060 0ustar00zuulzuul00000000000000{ "createImage" : { "name" : "foo-image", "metadata": { "meta_var": "meta_val" } } }././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.145607 nova-32.0.0/doc/api_samples/servers/v2.47/0000775000175000017500000000000000000000000020075 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.47/server-action-rebuild-resp.json0000664000175000017500000000347100000000000026151 0ustar00zuulzuul00000000000000{ "server": { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.1.30", "version": 4 } ] }, "adminPass": "seekr3t", "created": "2013-11-14T06:29:00Z", "flavor": { "disk": 1, "ephemeral": 0, "extra_specs": {}, "original_name": "m1.tiny", "ram": 512, "swap": 0, "vcpus": 1 }, "hostId": "28d8d56f0e3a77e20891f455721cbb68032e017045e20aa5dfc6cb66", "id": "a0a80a94-3d81-4a10-822a-daa0cf9e870b", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/a0a80a94-3d81-4a10-822a-daa0cf9e870b", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/a0a80a94-3d81-4a10-822a-daa0cf9e870b", "rel": "bookmark" } ], "locked": false, "metadata": { "meta_var": "meta_val" }, "name": "foobar", "description" : null, "progress": 0, "status": "ACTIVE", "OS-DCF:diskConfig": "AUTO", "tags": [], "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-11-14T06:29:02Z", "user_id": "admin" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.47/server-action-rebuild.json0000664000175000017500000000156700000000000025206 0ustar00zuulzuul00000000000000{ "rebuild" : { "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", "accessIPv4" : "1.2.3.4", "accessIPv6" : "80fe::", "adminPass" : "seekr3t", "metadata" : { "meta_var" : "meta_val" }, "name" : "foobar", "OS-DCF:diskConfig": "AUTO", "personality" : [ { "path" : "/etc/banner.txt", "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6 b25zLiINCg0KLVJpY2hhcmQgQmFjaA==" } ] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.47/server-create-req.json0000664000175000017500000000236300000000000024330 0ustar00zuulzuul00000000000000{ "server" : { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "name" : "new-server-test", "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", "flavorRef" : "6", "availability_zone": "us-west", "OS-DCF:diskConfig": "AUTO", "metadata" : { "My Server Name" : "Apache1" }, "personality": [ { "path": "/etc/banner.txt", "contents": "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6 b25zLiINCg0KLVJpY2hhcmQgQmFjaA==" } ], "security_groups": [ { "name": "default" } ], "user_data" : "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==", "networks": "auto" }, "OS-SCH-HNT:scheduler_hints": { "same_host": "[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.47/server-create-resp.json0000664000175000017500000000124600000000000024511 0ustar00zuulzuul00000000000000{ "server": { "OS-DCF:diskConfig": "AUTO", "adminPass": "S5wqy9sPYUvU", "id": "97108291-2fd7-4dc2-a909-eaae0306a6a9", "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/97108291-2fd7-4dc2-a909-eaae0306a6a9", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/97108291-2fd7-4dc2-a909-eaae0306a6a9", "rel": "bookmark" } ], "security_groups": [ { "name": "default" } ] } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.47/server-get-resp.json0000664000175000017500000000634500000000000024032 0ustar00zuulzuul00000000000000{ "server": { "OS-DCF:diskConfig": "AUTO", "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-SRV-ATTR:host": "compute", "OS-EXT-SRV-ATTR:hostname": "new-server-test", "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", "OS-EXT-SRV-ATTR:kernel_id": "", "OS-EXT-SRV-ATTR:launch_index": 0, "OS-EXT-SRV-ATTR:ramdisk_id": "", "OS-EXT-SRV-ATTR:reservation_id": "r-ov3q80zj", "OS-EXT-SRV-ATTR:root_device_name": "/dev/sda", "OS-EXT-SRV-ATTR:user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "OS-SRV-USG:launched_at": "2017-02-14T19:23:59.895661", "OS-SRV-USG:terminated_at": null, "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "addr": "192.168.1.30", "version": 4 } ] }, "config_drive": "", "created": "2017-02-14T19:23:58Z", "description": null, "flavor": { "disk": 1, "ephemeral": 0, "extra_specs": { "hw:numa_nodes": "1" }, "original_name": "m1.tiny.specs", "ram": 512, "swap": 0, "vcpus": 1 }, "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6", "host_status": "UP", "id": "9168b536-cd40-4630-b43f-b259807c6e87", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/9168b536-cd40-4630-b43f-b259807c6e87", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/9168b536-cd40-4630-b43f-b259807c6e87", "rel": "bookmark" } ], "locked": false, "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "os-extended-volumes:volumes_attached": [ { "delete_on_termination": false, "id": "volume_id1" }, { "delete_on_termination": false, "id": "volume_id2" } ], "progress": 0, "security_groups": [ { "name": "default" } ], "status": "ACTIVE", "tags": [], "tenant_id": "6f70656e737461636b20342065766572", "updated": "2017-02-14T19:24:00Z", "user_id": "admin" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.47/server-update-req.json0000664000175000017500000000031700000000000024344 0ustar00zuulzuul00000000000000{ "server": { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "OS-DCF:diskConfig": "AUTO", "name": "new-server-test", "description": "Sample description" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.47/server-update-resp.json0000664000175000017500000000346600000000000024536 0ustar00zuulzuul00000000000000{ "server": { "OS-DCF:diskConfig": "AUTO", "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.1.30", "version": 4 } ] }, "created": "2012-12-02T02:11:57Z", "description": "Sample description", "flavor": { "disk": 1, "ephemeral": 0, "extra_specs": {}, "original_name": "m1.tiny", "ram": 512, "swap": 0, "vcpus": 1 }, "hostId": "6e84af987b4e7ec1c039b16d21f508f4a505672bd94fb0218b668d07", "id": "324dfb7d-f4a9-419a-9a19-237df04b443b", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/324dfb7d-f4a9-419a-9a19-237df04b443b", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/324dfb7d-f4a9-419a-9a19-237df04b443b", "rel": "bookmark" } ], "locked": false, "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "progress": 0, "status": "ACTIVE", "tags": [], "tenant_id": "6f70656e737461636b20342065766572", "updated": "2012-12-02T02:11:58Z", "user_id": "admin" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.47/servers-details-resp.json0000664000175000017500000000752300000000000025062 0ustar00zuulzuul00000000000000{ "servers": [ { "OS-DCF:diskConfig": "AUTO", "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-SRV-ATTR:host": "compute", "OS-EXT-SRV-ATTR:hostname": "new-server-test", "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", "OS-EXT-SRV-ATTR:kernel_id": "", "OS-EXT-SRV-ATTR:launch_index": 0, "OS-EXT-SRV-ATTR:ramdisk_id": "", "OS-EXT-SRV-ATTR:reservation_id": "r-iffothgx", "OS-EXT-SRV-ATTR:root_device_name": "/dev/sda", "OS-EXT-SRV-ATTR:user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "OS-SRV-USG:launched_at": "2017-02-14T19:24:43.891568", "OS-SRV-USG:terminated_at": null, "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "addr": "192.168.1.30", "version": 4 } ] }, "config_drive": "", "created": "2017-02-14T19:24:42Z", "description": null, "flavor": { "disk": 1, "ephemeral": 0, "extra_specs": { "hw:numa_nodes": "1" }, "original_name": "m1.tiny.specs", "ram": 512, "swap": 0, "vcpus": 1 }, "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6", "host_status": "UP", "id": "764e369e-a874-4401-b7ce-43e4760888da", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/764e369e-a874-4401-b7ce-43e4760888da", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/764e369e-a874-4401-b7ce-43e4760888da", "rel": "bookmark" } ], "locked": false, "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "os-extended-volumes:volumes_attached": [ { "delete_on_termination": false, "id": "volume_id1" }, { "delete_on_termination": false, "id": "volume_id2" } ], "progress": 0, "security_groups": [ { "name": "default" } ], "status": "ACTIVE", "tags": [], "tenant_id": "6f70656e737461636b20342065766572", "updated": "2017-02-14T19:24:43Z", "user_id": "admin" } ], "servers_links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/detail?limit=1&marker=764e369e-a874-4401-b7ce-43e4760888da", "rel": "next" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.47/servers-list-resp.json0000664000175000017500000000150000000000000024375 0ustar00zuulzuul00000000000000{ "servers": [ { "id": "6e3a87e6-a133-452e-86e1-a31291c1b1c8", "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/6e3a87e6-a133-452e-86e1-a31291c1b1c8", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/6e3a87e6-a133-452e-86e1-a31291c1b1c8", "rel": "bookmark" } ], "name": "new-server-test" } ], "servers_links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers?limit=1&marker=6e3a87e6-a133-452e-86e1-a31291c1b1c8", "rel": "next" } ] }././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.149607 nova-32.0.0/doc/api_samples/servers/v2.52/0000775000175000017500000000000000000000000020071 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.52/server-create-req.json0000664000175000017500000000244300000000000024323 0ustar00zuulzuul00000000000000{ "server" : { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "name" : "new-server-test", "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", "flavorRef" : "http://openstack.example.com/flavors/1", "availability_zone": "us-west", "OS-DCF:diskConfig": "AUTO", "metadata" : { "My Server Name" : "Apache1" }, "personality": [ { "path": "/etc/banner.txt", "contents": "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6 b25zLiINCg0KLVJpY2hhcmQgQmFjaA==" } ], "security_groups": [ { "name": "default" } ], "user_data" : "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==", "networks": "auto", "tags": ["tag1", "tag2"] }, "OS-SCH-HNT:scheduler_hints": { "same_host": "48e6a9f6-30af-47e0-bc04-acaed113bb4e" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.52/server-create-resp.json0000664000175000017500000000124600000000000024505 0ustar00zuulzuul00000000000000{ "server": { "OS-DCF:diskConfig": "AUTO", "adminPass": "S5wqy9sPYUvU", "id": "97108291-2fd7-4dc2-a909-eaae0306a6a9", "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/97108291-2fd7-4dc2-a909-eaae0306a6a9", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/97108291-2fd7-4dc2-a909-eaae0306a6a9", "rel": "bookmark" } ], "security_groups": [ { "name": "default" } ] } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.52/server-get-resp.json0000664000175000017500000000627300000000000024026 0ustar00zuulzuul00000000000000{ "server": { "OS-DCF:diskConfig": "AUTO", "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-SRV-ATTR:host": "compute", "OS-EXT-SRV-ATTR:hostname": "new-server-test", "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", "OS-EXT-SRV-ATTR:kernel_id": "", "OS-EXT-SRV-ATTR:launch_index": 0, "OS-EXT-SRV-ATTR:ramdisk_id": "", "OS-EXT-SRV-ATTR:reservation_id": "r-ov3q80zj", "OS-EXT-SRV-ATTR:root_device_name": "/dev/sda", "OS-EXT-SRV-ATTR:user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "OS-SRV-USG:launched_at": "2017-02-14T19:23:59.895661", "OS-SRV-USG:terminated_at": null, "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "addr": "192.168.1.30", "version": 4 } ] }, "config_drive": "", "created": "2017-02-14T19:23:58Z", "description": null, "flavor": { "disk": 1, "ephemeral": 0, "extra_specs": {}, "original_name": "m1.tiny", "ram": 512, "swap": 0, "vcpus": 1 }, "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6", "host_status": "UP", "id": "9168b536-cd40-4630-b43f-b259807c6e87", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/9168b536-cd40-4630-b43f-b259807c6e87", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/9168b536-cd40-4630-b43f-b259807c6e87", "rel": "bookmark" } ], "locked": false, "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "os-extended-volumes:volumes_attached": [ { "delete_on_termination": false, "id": "volume_id1" }, { "delete_on_termination": false, "id": "volume_id2" } ], "progress": 0, "security_groups": [ { "name": "default" } ], "status": "ACTIVE", "tags": ["tag1", "tag2"], "tenant_id": "6f70656e737461636b20342065766572", "updated": "2017-02-14T19:24:00Z", "user_id": "admin" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.52/servers-details-resp.json0000664000175000017500000000744100000000000025055 0ustar00zuulzuul00000000000000{ "servers": [ { "OS-DCF:diskConfig": "AUTO", "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-SRV-ATTR:host": "compute", "OS-EXT-SRV-ATTR:hostname": "new-server-test", "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", "OS-EXT-SRV-ATTR:kernel_id": "", "OS-EXT-SRV-ATTR:launch_index": 0, "OS-EXT-SRV-ATTR:ramdisk_id": "", "OS-EXT-SRV-ATTR:reservation_id": "r-iffothgx", "OS-EXT-SRV-ATTR:root_device_name": "/dev/sda", "OS-EXT-SRV-ATTR:user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "OS-SRV-USG:launched_at": "2017-02-14T19:24:43.891568", "OS-SRV-USG:terminated_at": null, "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "addr": "192.168.1.30", "version": 4 } ] }, "config_drive": "", "created": "2017-02-14T19:24:42Z", "description": null, "flavor": { "disk": 1, "ephemeral": 0, "extra_specs": {}, "original_name": "m1.tiny", "ram": 512, "swap": 0, "vcpus": 1 }, "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6", "host_status": "UP", "id": "764e369e-a874-4401-b7ce-43e4760888da", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/764e369e-a874-4401-b7ce-43e4760888da", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/764e369e-a874-4401-b7ce-43e4760888da", "rel": "bookmark" } ], "locked": false, "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "os-extended-volumes:volumes_attached": [ { "delete_on_termination": false, "id": "volume_id1" }, { "delete_on_termination": false, "id": "volume_id2" } ], "progress": 0, "security_groups": [ { "name": "default" } ], "status": "ACTIVE", "tags": ["tag1", "tag2"], "tenant_id": "6f70656e737461636b20342065766572", "updated": "2017-02-14T19:24:43Z", "user_id": "admin" } ], "servers_links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/detail?limit=1&marker=764e369e-a874-4401-b7ce-43e4760888da", "rel": "next" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.52/servers-list-resp.json0000664000175000017500000000150000000000000024371 0ustar00zuulzuul00000000000000{ "servers": [ { "id": "6e3a87e6-a133-452e-86e1-a31291c1b1c8", "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/6e3a87e6-a133-452e-86e1-a31291c1b1c8", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/6e3a87e6-a133-452e-86e1-a31291c1b1c8", "rel": "bookmark" } ], "name": "new-server-test" } ], "servers_links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers?limit=1&marker=6e3a87e6-a133-452e-86e1-a31291c1b1c8", "rel": "next" } ] }././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.149607 nova-32.0.0/doc/api_samples/servers/v2.54/0000775000175000017500000000000000000000000020073 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.54/server-action-rebuild-resp.json0000664000175000017500000000355300000000000026150 0ustar00zuulzuul00000000000000{ "server": { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.1.30", "version": 4 } ] }, "adminPass": "seekr3t", "created": "2013-11-14T06:29:00Z", "flavor": { "disk": 1, "ephemeral": 0, "extra_specs": {}, "original_name": "m1.tiny", "ram": 512, "swap": 0, "vcpus": 1 }, "hostId": "28d8d56f0e3a77e20891f455721cbb68032e017045e20aa5dfc6cb66", "id": "a0a80a94-3d81-4a10-822a-daa0cf9e870b", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/a0a80a94-3d81-4a10-822a-daa0cf9e870b", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/a0a80a94-3d81-4a10-822a-daa0cf9e870b", "rel": "bookmark" } ], "locked": false, "metadata": { "meta_var": "meta_val" }, "name": "foobar", "key_name": "new-key", "description" : "description of foobar", "progress": 0, "status": "ACTIVE", "OS-DCF:diskConfig": "AUTO", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-11-14T06:29:02Z", "user_id": "admin", "tags": [] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.54/server-action-rebuild.json0000664000175000017500000000055500000000000025200 0ustar00zuulzuul00000000000000{ "rebuild" : { "accessIPv4" : "1.2.3.4", "accessIPv6" : "80fe::", "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", "name" : "foobar", "key_name": "new-key", "description" : "description of foobar", "adminPass" : "seekr3t", "metadata" : { "meta_var" : "meta_val" } } } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.149607 nova-32.0.0/doc/api_samples/servers/v2.57/0000775000175000017500000000000000000000000020076 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.57/server-action-rebuild-resp.json0000664000175000017500000000363300000000000026152 0ustar00zuulzuul00000000000000{ "server": { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.1.30", "version": 4 } ] }, "adminPass": "seekr3t", "created": "2013-11-14T06:29:00Z", "flavor": { "disk": 1, "ephemeral": 0, "extra_specs": {}, "original_name": "m1.tiny", "ram": 512, "swap": 0, "vcpus": 1 }, "hostId": "28d8d56f0e3a77e20891f455721cbb68032e017045e20aa5dfc6cb66", "id": "a0a80a94-3d81-4a10-822a-daa0cf9e870b", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/a0a80a94-3d81-4a10-822a-daa0cf9e870b", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/a0a80a94-3d81-4a10-822a-daa0cf9e870b", "rel": "bookmark" } ], "locked": false, "metadata": { "meta_var": "meta_val" }, "name": "foobar", "key_name": "new-key", "description": "description of foobar", "progress": 0, "status": "ACTIVE", "OS-DCF:diskConfig": "AUTO", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-11-14T06:29:02Z", "user_id": "admin", "tags": [], "user_data": "ZWNobyAiaGVsbG8gd29ybGQi" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.57/server-action-rebuild.json0000664000175000017500000000062700000000000025203 0ustar00zuulzuul00000000000000{ "rebuild" : { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "imageRef": "70a599e0-31e7-49b7-b260-868f441e862b", "name": "foobar", "key_name": "new-key", "description": "description of foobar", "adminPass": "seekr3t", "metadata" : { "meta_var": "meta_val" }, "user_data": "ZWNobyAiaGVsbG8gd29ybGQi" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.57/server-create-req.json0000664000175000017500000000114700000000000024330 0ustar00zuulzuul00000000000000{ "server" : { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "name" : "new-server-test", "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", "flavorRef" : "http://openstack.example.com/flavors/1", "availability_zone": "us-west", "OS-DCF:diskConfig": "AUTO", "metadata" : { "My Server Name" : "Apache1" }, "security_groups": [ { "name": "default" } ], "user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==", "networks": "auto" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.57/server-create-resp.json0000664000175000017500000000124600000000000024512 0ustar00zuulzuul00000000000000{ "server": { "OS-DCF:diskConfig": "AUTO", "adminPass": "S5wqy9sPYUvU", "id": "97108291-2fd7-4dc2-a909-eaae0306a6a9", "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/97108291-2fd7-4dc2-a909-eaae0306a6a9", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/97108291-2fd7-4dc2-a909-eaae0306a6a9", "rel": "bookmark" } ], "security_groups": [ { "name": "default" } ] } }././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.149607 nova-32.0.0/doc/api_samples/servers/v2.63/0000775000175000017500000000000000000000000020073 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.63/server-action-rebuild-resp.json0000664000175000017500000000416100000000000026144 0ustar00zuulzuul00000000000000{ "server": { "OS-DCF:diskConfig": "AUTO", "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.1.30", "version": 4 } ] }, "adminPass": "seekr3t", "created": "2017-10-10T16:06:02Z", "flavor": { "disk": 1, "ephemeral": 0, "extra_specs": { "hw:numa_nodes": "1" }, "original_name": "m1.tiny.specs", "ram": 512, "swap": 0, "vcpus": 1 }, "hostId": "28d8d56f0e3a77e20891f455721cbb68032e017045e20aa5dfc6cb66", "id": "a0a80a94-3d81-4a10-822a-daa0cf9e870b", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/a4baaf2a-3768-4e45-8847-13becef6bc5e", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/a4baaf2a-3768-4e45-8847-13becef6bc5e", "rel": "bookmark" } ], "locked": false, "metadata": { "meta_var": "meta_val" }, "name": "foobar", "key_name": "new-key", "description" : "description of foobar", "progress": 0, "status": "ACTIVE", "tags": [], "user_data": "ZWNobyAiaGVsbG8gd29ybGQi", "tenant_id": "6f70656e737461636b20342065766572", "trusted_image_certificates": [ "0b5d2c72-12cc-4ba6-a8d7-3ff5cc1d8cb8", "674736e3-f25c-405c-8362-bbf991e0ce0a" ], "updated": "2017-10-10T16:06:03Z", "user_id": "admin" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.63/server-action-rebuild.json0000664000175000017500000000113500000000000025173 0ustar00zuulzuul00000000000000{ "rebuild" : { "accessIPv4" : "1.2.3.4", "accessIPv6" : "80fe::", "OS-DCF:diskConfig": "AUTO", "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", "name" : "foobar", "key_name": "new-key", "description" : "description of foobar", "adminPass" : "seekr3t", "metadata" : { "meta_var" : "meta_val" }, "user_data": "ZWNobyAiaGVsbG8gd29ybGQi", "trusted_image_certificates": [ "0b5d2c72-12cc-4ba6-a8d7-3ff5cc1d8cb8", "674736e3-f25c-405c-8362-bbf991e0ce0a" ] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.63/server-create-req.json0000664000175000017500000000155100000000000024324 0ustar00zuulzuul00000000000000{ "server" : { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "name" : "new-server-test", "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", "flavorRef" : "6", "availability_zone": "%(availability_zone)s", "OS-DCF:diskConfig": "AUTO", "metadata" : { "My Server Name" : "Apache1" }, "security_groups": [ { "name": "default" } ], "user_data" : "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==", "networks": "auto", "trusted_image_certificates": [ "0b5d2c72-12cc-4ba6-a8d7-3ff5cc1d8cb8", "674736e3-f25c-405c-8362-bbf991e0ce0a" ] }, "OS-SCH-HNT:scheduler_hints": { "same_host": "[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.63/server-create-resp.json0000664000175000017500000000124600000000000024507 0ustar00zuulzuul00000000000000{ "server": { "OS-DCF:diskConfig": "AUTO", "adminPass": "wKLKinb9u7GM", "id": "aab35fd0-b459-4b59-9308-5a23147f3165", "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/aab35fd0-b459-4b59-9308-5a23147f3165", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/aab35fd0-b459-4b59-9308-5a23147f3165", "rel": "bookmark" } ], "security_groups": [ { "name": "default" } ] } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.63/server-get-resp.json0000664000175000017500000000622700000000000024027 0ustar00zuulzuul00000000000000{ "server": { "OS-DCF:diskConfig": "AUTO", "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-SRV-ATTR:host": "compute", "OS-EXT-SRV-ATTR:hostname": "new-server-test", "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", "OS-EXT-SRV-ATTR:kernel_id": "", "OS-EXT-SRV-ATTR:launch_index": 0, "OS-EXT-SRV-ATTR:ramdisk_id": "", "OS-EXT-SRV-ATTR:reservation_id": "r-ov3q80zj", "OS-EXT-SRV-ATTR:root_device_name": "/dev/sda", "OS-EXT-SRV-ATTR:user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "OS-SRV-USG:launched_at": "2017-02-14T19:23:59.895661", "OS-SRV-USG:terminated_at": null, "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "addr": "192.168.1.30", "version": 4 } ] }, "config_drive": "", "created": "2017-02-14T19:23:58Z", "description": null, "flavor": { "disk": 1, "ephemeral": 0, "extra_specs": { "hw:numa_nodes": "1" }, "original_name": "m1.tiny.specs", "ram": 512, "swap": 0, "vcpus": 1 }, "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6", "host_status": "UP", "id": "9168b536-cd40-4630-b43f-b259807c6e87", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/9168b536-cd40-4630-b43f-b259807c6e87", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/9168b536-cd40-4630-b43f-b259807c6e87", "rel": "bookmark" } ], "locked": false, "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "os-extended-volumes:volumes_attached": [], "progress": 0, "security_groups": [ { "name": "default" } ], "status": "ACTIVE", "tags": [], "tenant_id": "6f70656e737461636b20342065766572", "trusted_image_certificates": [ "0b5d2c72-12cc-4ba6-a8d7-3ff5cc1d8cb8", "674736e3-f25c-405c-8362-bbf991e0ce0a" ], "updated": "2017-02-14T19:24:00Z", "user_id": "admin" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.63/server-update-req.json0000664000175000017500000000031700000000000024342 0ustar00zuulzuul00000000000000{ "server": { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "OS-DCF:diskConfig": "AUTO", "name": "new-server-test", "description": "Sample description" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.63/server-update-resp.json0000664000175000017500000000402000000000000024517 0ustar00zuulzuul00000000000000{ "server": { "OS-DCF:diskConfig": "AUTO", "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.1.30", "version": 4 } ] }, "created": "2012-12-02T02:11:57Z", "description": "Sample description", "flavor": { "disk": 1, "ephemeral": 0, "extra_specs": { "hw:numa_nodes": "1" }, "original_name": "m1.tiny.specs", "ram": 512, "swap": 0, "vcpus": 1 }, "hostId": "6e84af987b4e7ec1c039b16d21f508f4a505672bd94fb0218b668d07", "id": "324dfb7d-f4a9-419a-9a19-237df04b443b", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/324dfb7d-f4a9-419a-9a19-237df04b443b", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/324dfb7d-f4a9-419a-9a19-237df04b443b", "rel": "bookmark" } ], "locked": false, "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "progress": 0, "status": "ACTIVE", "tags": [], "tenant_id": "6f70656e737461636b20342065766572", "trusted_image_certificates": [ "0b5d2c72-12cc-4ba6-a8d7-3ff5cc1d8cb8", "674736e3-f25c-405c-8362-bbf991e0ce0a" ], "updated": "2012-12-02T02:11:58Z", "user_id": "admin" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.63/servers-details-resp.json0000664000175000017500000000736100000000000025060 0ustar00zuulzuul00000000000000{ "servers": [ { "OS-DCF:diskConfig": "AUTO", "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-SRV-ATTR:host": "compute", "OS-EXT-SRV-ATTR:hostname": "new-server-test", "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", "OS-EXT-SRV-ATTR:kernel_id": "", "OS-EXT-SRV-ATTR:launch_index": 0, "OS-EXT-SRV-ATTR:ramdisk_id": "", "OS-EXT-SRV-ATTR:reservation_id": "r-y0w4v32k", "OS-EXT-SRV-ATTR:root_device_name": "/dev/sda", "OS-EXT-SRV-ATTR:user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "OS-SRV-USG:launched_at": "2017-10-10T15:49:09.516729", "OS-SRV-USG:terminated_at": null, "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "addr": "192.168.1.30", "version": 4 } ] }, "config_drive": "", "created": "2017-10-10T15:49:08Z", "description": null, "flavor": { "disk": 1, "ephemeral": 0, "extra_specs": { "hw:numa_nodes": "1" }, "original_name": "m1.tiny.specs", "ram": 512, "swap": 0, "vcpus": 1 }, "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6", "host_status": "UP", "id": "569f39f9-7c76-42a1-9c2d-8394e2638a6d", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/569f39f9-7c76-42a1-9c2d-8394e2638a6d", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/569f39f9-7c76-42a1-9c2d-8394e2638a6d", "rel": "bookmark" } ], "locked": false, "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "os-extended-volumes:volumes_attached": [], "progress": 0, "security_groups": [ { "name": "default" } ], "status": "ACTIVE", "tags": [], "tenant_id": "6f70656e737461636b20342065766572", "trusted_image_certificates": [ "0b5d2c72-12cc-4ba6-a8d7-3ff5cc1d8cb8", "674736e3-f25c-405c-8362-bbf991e0ce0a" ], "updated": "2017-10-10T15:49:09Z", "user_id": "admin" } ], "servers_links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/detail?limit=1&marker=569f39f9-7c76-42a1-9c2d-8394e2638a6d", "rel": "next" } ] } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.153607 nova-32.0.0/doc/api_samples/servers/v2.66/0000775000175000017500000000000000000000000020076 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.66/server-create-req.json0000664000175000017500000000153300000000000024327 0ustar00zuulzuul00000000000000{ "server" : { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "name" : "new-server-test", "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", "flavorRef" : "6", "availability_zone": "us-west", "OS-DCF:diskConfig": "AUTO", "metadata" : { "My Server Name" : "Apache1" }, "security_groups": [ { "name": "default" } ], "user_data" : "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==", "networks": "auto", "trusted_image_certificates": [ "0b5d2c72-12cc-4ba6-a8d7-3ff5cc1d8cb8", "674736e3-f25c-405c-8362-bbf991e0ce0a" ] }, "OS-SCH-HNT:scheduler_hints": { "same_host": "[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.66/server-create-resp.json0000664000175000017500000000124600000000000024512 0ustar00zuulzuul00000000000000{ "server": { "OS-DCF:diskConfig": "AUTO", "adminPass": "wKLKinb9u7GM", "id": "aab35fd0-b459-4b59-9308-5a23147f3165", "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/aab35fd0-b459-4b59-9308-5a23147f3165", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/aab35fd0-b459-4b59-9308-5a23147f3165", "rel": "bookmark" } ], "security_groups": [ { "name": "default" } ] } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.66/servers-details-with-changes-before.json0000664000175000017500000000700700000000000027730 0ustar00zuulzuul00000000000000{ "servers": [ { "OS-DCF:diskConfig": "AUTO", "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-SRV-ATTR:host": "compute", "OS-EXT-SRV-ATTR:hostname": "new-server-test", "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", "OS-EXT-SRV-ATTR:kernel_id": "", "OS-EXT-SRV-ATTR:launch_index": 0, "OS-EXT-SRV-ATTR:ramdisk_id": "", "OS-EXT-SRV-ATTR:reservation_id": "r-y0w4v32k", "OS-EXT-SRV-ATTR:root_device_name": "/dev/sda", "OS-EXT-SRV-ATTR:user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "OS-SRV-USG:launched_at": "2018-10-10T15:49:09.516729", "OS-SRV-USG:terminated_at": null, "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "addr": "192.168.0.1", "version": 4 } ] }, "config_drive": "", "created": "2018-10-10T15:49:08Z", "description": null, "flavor": { "disk": 1, "ephemeral": 0, "extra_specs": { "hw:numa_nodes": "1" }, "original_name": "m1.tiny.specs", "ram": 512, "swap": 0, "vcpus": 1 }, "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6", "host_status": "UP", "id": "569f39f9-7c76-42a1-9c2d-8394e2638a6e", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/569f39f9-7c76-42a1-9c2d-8394e2638a6d", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/569f39f9-7c76-42a1-9c2d-8394e2638a6d", "rel": "bookmark" } ], "locked": false, "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "os-extended-volumes:volumes_attached": [], "progress": 0, "security_groups": [ { "name": "default" } ], "status": "ACTIVE", "tags": [], "tenant_id": "6f70656e737461636b20342065766572", "trusted_image_certificates": [ "0b5d2c72-12cc-4ba6-a8d7-3ff5cc1d8cb8", "674736e3-f25c-405c-8362-bbf991e0ce0a" ], "updated": "2018-10-10T15:49:09Z", "user_id": "admin" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.66/servers-list-with-changes-before.json0000664000175000017500000000113700000000000027254 0ustar00zuulzuul00000000000000{ "servers": [ { "id": "6e3a87e6-a133-452e-86e1-a31291c1b1c8", "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/6e3a87e6-a133-452e-86e1-a31291c1b1c8", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/6e3a87e6-a133-452e-86e1-a31291c1b1c8", "rel": "bookmark" } ], "name": "new-server-test" } ] } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.153607 nova-32.0.0/doc/api_samples/servers/v2.67/0000775000175000017500000000000000000000000020077 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.67/server-create-req.json0000664000175000017500000000107500000000000024331 0ustar00zuulzuul00000000000000{ "server" : { "name" : "bfv-server-with-volume-type", "flavorRef" : "http://openstack.example.com/flavors/1", "networks" : [{ "uuid" : "3cb9bc59-5699-4588-a4b1-b87f96708bc6", "tag": "nic1" }], "block_device_mapping_v2": [{ "uuid": "70a599e0-31e7-49b7-b260-868f441e862b", "source_type": "image", "destination_type": "volume", "boot_index": 0, "volume_size": "1", "tag": "disk1", "volume_type": "lvm-1" }] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.67/server-create-resp.json0000664000175000017500000000124600000000000024513 0ustar00zuulzuul00000000000000{ "server": { "OS-DCF:diskConfig": "AUTO", "adminPass": "S5wqy9sPYUvU", "id": "97108291-2fd7-4dc2-a909-eaae0306a6a9", "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/97108291-2fd7-4dc2-a909-eaae0306a6a9", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/97108291-2fd7-4dc2-a909-eaae0306a6a9", "rel": "bookmark" } ], "security_groups": [ { "name": "default" } ] } }././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.153607 nova-32.0.0/doc/api_samples/servers/v2.69/0000775000175000017500000000000000000000000020101 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.69/server-create-req.json0000664000175000017500000000107700000000000024335 0ustar00zuulzuul00000000000000{ "server" : { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "name" : "new-server-test", "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", "flavorRef" : "http://openstack.example.com/flavors/1", "OS-DCF:diskConfig": "AUTO", "metadata" : { "My Server Name" : "Apache1" }, "security_groups": [ { "name": "default" } ], "user_data" : "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==", "networks": "auto" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.69/server-create-resp.json0000664000175000017500000000124600000000000024515 0ustar00zuulzuul00000000000000{ "server": { "OS-DCF:diskConfig": "AUTO", "adminPass": "mqtDAwb2y7Zh", "id": "6f81aefe-472a-49d8-ba8d-758a5082c7e5", "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/6f81aefe-472a-49d8-ba8d-758a5082c7e5", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/6f81aefe-472a-49d8-ba8d-758a5082c7e5", "rel": "bookmark" } ], "security_groups": [ { "name": "default" } ] } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.69/server-get-resp.json0000664000175000017500000000243200000000000024027 0ustar00zuulzuul00000000000000{ "server": { "OS-EXT-AZ:availability_zone": "UNKNOWN", "OS-EXT-STS:power_state": 0, "created": "2018-12-03T21:06:18Z", "flavor": { "disk": 1, "ephemeral": 0, "extra_specs": {}, "original_name": "m1.tiny", "ram": 512, "swap": 0, "vcpus": 1 }, "id": "33748c23-38dd-4f70-b774-522fc69e7b67", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "status": "UNKNOWN", "tenant_id": "6f70656e737461636b20342065766572", "user_id": "admin", "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/33748c23-38dd-4f70-b774-522fc69e7b67", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/33748c23-38dd-4f70-b774-522fc69e7b67", "rel": "bookmark" } ] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.69/servers-details-resp.json0000664000175000017500000000130500000000000025056 0ustar00zuulzuul00000000000000{ "servers": [ { "created": "2018-12-03T21:06:18Z", "id": "b6b0410f-b65f-4473-855e-5d82a71759e0", "status": "UNKNOWN", "tenant_id": "6f70656e737461636b20342065766572", "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/b6b0410f-b65f-4473-855e-5d82a71759e0", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/b6b0410f-b65f-4473-855e-5d82a71759e0", "rel": "bookmark" } ] } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.69/servers-list-resp.json0000664000175000017500000000113000000000000024400 0ustar00zuulzuul00000000000000{ "servers": [ { "id": "2e136db7-b4a4-4815-8a00-25d9bfe59617", "status": "UNKNOWN", "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/2e136db7-b4a4-4815-8a00-25d9bfe59617", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/2e136db7-b4a4-4815-8a00-25d9bfe59617", "rel": "bookmark" } ] } ] }././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.157607 nova-32.0.0/doc/api_samples/servers/v2.71/0000775000175000017500000000000000000000000020072 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.71/server-action-rebuild-resp.json0000664000175000017500000000401200000000000026136 0ustar00zuulzuul00000000000000{ "server": { "OS-DCF:diskConfig": "AUTO", "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.1.30", "version": 4 } ] }, "adminPass": "seekr3t", "created": "2019-02-28T03:16:19Z", "description": null, "flavor": { "disk": 1, "ephemeral": 0, "extra_specs": {}, "original_name": "m1.tiny", "ram": 512, "swap": 0, "vcpus": 1 }, "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6", "id": "36b2afd5-1684-4d18-a49c-915bf0f5344c", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/36b2afd5-1684-4d18-a49c-915bf0f5344c", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/36b2afd5-1684-4d18-a49c-915bf0f5344c", "rel": "bookmark" } ], "locked": false, "metadata": { "meta_var": "meta_val" }, "name": "foobar", "progress": 0, "server_groups": [ "f3d86fe6-4246-4be8-b87c-eb894626c741" ], "status": "ACTIVE", "tags": [], "tenant_id": "6f70656e737461636b20342065766572", "trusted_image_certificates": null, "updated": "2019-02-28T03:16:20Z", "user_data": "ZWNobyAiaGVsbG8gd29ybGQi", "user_id": "admin" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.71/server-action-rebuild.json0000664000175000017500000000056200000000000025175 0ustar00zuulzuul00000000000000{ "rebuild" : { "accessIPv4" : "1.2.3.4", "accessIPv6" : "80fe::", "OS-DCF:diskConfig": "AUTO", "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", "name" : "foobar", "adminPass" : "seekr3t", "metadata" : { "meta_var" : "meta_val" }, "user_data": "ZWNobyAiaGVsbG8gd29ybGQi" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.71/server-create-req.json0000664000175000017500000000117500000000000024325 0ustar00zuulzuul00000000000000{ "server" : { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "name" : "new-server-test", "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", "flavorRef" : "1", "OS-DCF:diskConfig": "AUTO", "metadata" : { "My Server Name" : "Apache1" }, "security_groups": [ { "name": "default" } ], "user_data" : "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==", "networks": "auto" }, "OS-SCH-HNT:scheduler_hints": { "group": "f3d86fe6-4246-4be8-b87c-eb894626c741" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.71/server-create-resp.json0000664000175000017500000000124600000000000024506 0ustar00zuulzuul00000000000000{ "server": { "OS-DCF:diskConfig": "AUTO", "adminPass": "DB2bQBhxvq8a", "id": "84e2b49d-39a9-4d32-9100-e62161c236db", "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/84e2b49d-39a9-4d32-9100-e62161c236db", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/84e2b49d-39a9-4d32-9100-e62161c236db", "rel": "bookmark" } ], "security_groups": [ { "name": "default" } ] } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.71/server-get-down-cell-resp.json0000664000175000017500000000256300000000000025707 0ustar00zuulzuul00000000000000{ "server": { "OS-EXT-AZ:availability_zone": "UNKNOWN", "OS-EXT-STS:power_state": 0, "created": "2019-02-28T03:16:19Z", "flavor": { "disk": 1, "ephemeral": 0, "extra_specs": {}, "original_name": "m1.tiny", "ram": 512, "swap": 0, "vcpus": 1 }, "id": "2669556b-b4a3-41f1-a0c1-f9c7ff75e53c", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "server_groups": [ "f3d86fe6-4246-4be8-b87c-eb894626c741" ], "status": "UNKNOWN", "tenant_id": "6f70656e737461636b20342065766572", "user_id": "admin", "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/2669556b-b4a3-41f1-a0c1-f9c7ff75e53c", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/2669556b-b4a3-41f1-a0c1-f9c7ff75e53c", "rel": "bookmark" } ] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.71/server-get-resp.json0000664000175000017500000000610600000000000024022 0ustar00zuulzuul00000000000000{ "server": { "OS-DCF:diskConfig": "AUTO", "OS-EXT-AZ:availability_zone": "nova", "OS-EXT-SRV-ATTR:host": "compute", "OS-EXT-SRV-ATTR:hostname": "new-server-test", "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", "OS-EXT-SRV-ATTR:kernel_id": "", "OS-EXT-SRV-ATTR:launch_index": 0, "OS-EXT-SRV-ATTR:ramdisk_id": "", "OS-EXT-SRV-ATTR:reservation_id": "r-0scisg0g", "OS-EXT-SRV-ATTR:root_device_name": "/dev/sda", "OS-EXT-SRV-ATTR:user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "OS-SRV-USG:launched_at": "2019-02-28T03:16:19.600768", "OS-SRV-USG:terminated_at": null, "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "addr": "192.168.1.30", "version": 4 } ] }, "config_drive": "", "created": "2019-02-28T03:16:18Z", "description": null, "flavor": { "disk": 1, "ephemeral": 0, "extra_specs": {}, "original_name": "m1.tiny", "ram": 512, "swap": 0, "vcpus": 1 }, "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6", "host_status": "UP", "id": "84e2b49d-39a9-4d32-9100-e62161c236db", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/84e2b49d-39a9-4d32-9100-e62161c236db", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/84e2b49d-39a9-4d32-9100-e62161c236db", "rel": "bookmark" } ], "locked": false, "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "os-extended-volumes:volumes_attached": [], "progress": 0, "security_groups": [ { "name": "default" } ], "server_groups": [ "f3d86fe6-4246-4be8-b87c-eb894626c741" ], "status": "ACTIVE", "tags": [], "tenant_id": "6f70656e737461636b20342065766572", "trusted_image_certificates": null, "updated": "2019-02-28T03:16:19Z", "user_id": "admin" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.71/server-groups-post-req.json0000664000175000017500000000012400000000000025355 0ustar00zuulzuul00000000000000{ "server_group": { "name": "test", "policy": "affinity" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.71/server-groups-post-resp.json0000664000175000017500000000041400000000000025541 0ustar00zuulzuul00000000000000{ "server_group": { "id": "f3d86fe6-4246-4be8-b87c-eb894626c741", "members": [], "name": "test", "policy": "affinity", "project_id": "6f70656e737461636b20342065766572", "rules": {}, "user_id": "admin" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.71/server-update-req.json0000664000175000017500000000031600000000000024340 0ustar00zuulzuul00000000000000{ "server": { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "OS-DCF:diskConfig": "AUTO", "name": "new-server-test", "description": "Sample description" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.71/server-update-resp.json0000664000175000017500000000367500000000000024535 0ustar00zuulzuul00000000000000{ "server": { "OS-DCF:diskConfig": "AUTO", "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.1.30", "version": 4 } ] }, "created": "2019-02-28T03:16:19Z", "description": "Sample description", "flavor": { "disk": 1, "ephemeral": 0, "extra_specs": {}, "original_name": "m1.tiny", "ram": 512, "swap": 0, "vcpus": 1 }, "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6", "id": "60e840f8-dd17-476b-bd1d-33785066c496", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/60e840f8-dd17-476b-bd1d-33785066c496", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/60e840f8-dd17-476b-bd1d-33785066c496", "rel": "bookmark" } ], "locked": false, "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "progress": 0, "server_groups": [ "f3d86fe6-4246-4be8-b87c-eb894626c741" ], "status": "ACTIVE", "tags": [], "tenant_id": "6f70656e737461636b20342065766572", "trusted_image_certificates": null, "updated": "2019-02-28T03:16:19Z", "user_id": "admin" } } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.157607 nova-32.0.0/doc/api_samples/servers/v2.73/0000775000175000017500000000000000000000000020074 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.73/lock-server-with-reason.json0000664000175000017500000000007100000000000025457 0ustar00zuulzuul00000000000000{ "lock": {"locked_reason": "I don't want to work"} }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.73/server-action-rebuild-resp.json0000664000175000017500000000375400000000000026154 0ustar00zuulzuul00000000000000{ "server": { "OS-DCF:diskConfig": "AUTO", "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.1.30", "version": 4 } ] }, "adminPass": "seekr3t", "created": "2019-04-23T17:10:22Z", "description": null, "flavor": { "disk": 1, "ephemeral": 0, "extra_specs": {}, "original_name": "m1.tiny", "ram": 512, "swap": 0, "vcpus": 1 }, "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6", "id": "0c37a84a-c757-4f22-8c7f-0bf8b6970886", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/0c37a84a-c757-4f22-8c7f-0bf8b6970886", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/0c37a84a-c757-4f22-8c7f-0bf8b6970886", "rel": "bookmark" } ], "locked": false, "locked_reason": null, "metadata": { "meta_var": "meta_val" }, "name": "foobar", "progress": 0, "server_groups": [], "status": "ACTIVE", "tags": [], "tenant_id": "6f70656e737461636b20342065766572", "trusted_image_certificates": null, "updated": "2019-04-23T17:10:24Z", "user_data": "ZWNobyAiaGVsbG8gd29ybGQi", "user_id": "admin" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.73/server-action-rebuild.json0000664000175000017500000000056200000000000025177 0ustar00zuulzuul00000000000000{ "rebuild" : { "accessIPv4" : "1.2.3.4", "accessIPv6" : "80fe::", "OS-DCF:diskConfig": "AUTO", "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", "name" : "foobar", "adminPass" : "seekr3t", "metadata" : { "meta_var" : "meta_val" }, "user_data": "ZWNobyAiaGVsbG8gd29ybGQi" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.73/server-create-req.json0000664000175000017500000000103200000000000024317 0ustar00zuulzuul00000000000000{ "server" : { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "name" : "new-server-test", "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", "flavorRef" : "1", "OS-DCF:diskConfig": "AUTO", "metadata" : { "My Server Name" : "Apache1" }, "security_groups": [ { "name": "default" } ], "user_data" : "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==", "networks": "auto" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.73/server-create-resp.json0000664000175000017500000000124600000000000024510 0ustar00zuulzuul00000000000000{ "server": { "OS-DCF:diskConfig": "AUTO", "adminPass": "kJTmMkszoB6A", "id": "ae10adbb-9b5e-4667-9cc5-05ebdc80a941", "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/ae10adbb-9b5e-4667-9cc5-05ebdc80a941", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/ae10adbb-9b5e-4667-9cc5-05ebdc80a941", "rel": "bookmark" } ], "security_groups": [ { "name": "default" } ] } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.73/server-get-resp.json0000664000175000017500000000607300000000000024027 0ustar00zuulzuul00000000000000{ "server": { "OS-DCF:diskConfig": "AUTO", "OS-EXT-AZ:availability_zone": "nova", "OS-EXT-SRV-ATTR:host": "compute", "OS-EXT-SRV-ATTR:hostname": "new-server-test", "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", "OS-EXT-SRV-ATTR:kernel_id": "", "OS-EXT-SRV-ATTR:launch_index": 0, "OS-EXT-SRV-ATTR:ramdisk_id": "", "OS-EXT-SRV-ATTR:reservation_id": "r-t61j9da6", "OS-EXT-SRV-ATTR:root_device_name": "/dev/sda", "OS-EXT-SRV-ATTR:user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "OS-SRV-USG:launched_at": "2019-04-23T15:19:10.855016", "OS-SRV-USG:terminated_at": null, "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "addr": "192.168.1.30", "version": 4 } ] }, "config_drive": "", "created": "2019-04-23T15:19:09Z", "description": null, "flavor": { "disk": 1, "ephemeral": 0, "extra_specs": {}, "original_name": "m1.tiny", "ram": 512, "swap": 0, "vcpus": 1 }, "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6", "host_status": "UP", "id": "0e12087a-7c87-476a-8f84-7398e991cecc", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/0e12087a-7c87-476a-8f84-7398e991cecc", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/0e12087a-7c87-476a-8f84-7398e991cecc", "rel": "bookmark" } ], "locked": true, "locked_reason": "I don't want to work", "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "os-extended-volumes:volumes_attached": [], "progress": 0, "security_groups": [ { "name": "default" } ], "server_groups": [], "status": "ACTIVE", "tags": [], "tenant_id": "6f70656e737461636b20342065766572", "trusted_image_certificates": null, "updated": "2019-04-23T15:19:11Z", "user_id": "admin" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.73/server-update-req.json0000664000175000017500000000031600000000000024342 0ustar00zuulzuul00000000000000{ "server": { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "OS-DCF:diskConfig": "AUTO", "name": "new-server-test", "description": "Sample description" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.73/server-update-resp.json0000664000175000017500000000363700000000000024535 0ustar00zuulzuul00000000000000{ "server": { "OS-DCF:diskConfig": "AUTO", "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.1.30", "version": 4 } ] }, "created": "2019-04-23T17:37:48Z", "description": "Sample description", "flavor": { "disk": 1, "ephemeral": 0, "extra_specs": {}, "original_name": "m1.tiny", "ram": 512, "swap": 0, "vcpus": 1 }, "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6", "id": "f9a6c4fe-28e0-48a9-b02c-164e4d04d0b2", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/f9a6c4fe-28e0-48a9-b02c-164e4d04d0b2", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/f9a6c4fe-28e0-48a9-b02c-164e4d04d0b2", "rel": "bookmark" } ], "locked": false, "locked_reason": null, "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "progress": 0, "server_groups": [], "status": "ACTIVE", "tags": [], "tenant_id": "6f70656e737461636b20342065766572", "trusted_image_certificates": null, "updated": "2019-04-23T17:37:48Z", "user_id": "admin" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.73/servers-details-resp.json0000664000175000017500000000657600000000000025070 0ustar00zuulzuul00000000000000{ "servers": [ { "OS-DCF:diskConfig": "AUTO", "OS-EXT-AZ:availability_zone": "nova", "OS-EXT-SRV-ATTR:host": "compute", "OS-EXT-SRV-ATTR:hostname": "new-server-test", "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", "OS-EXT-SRV-ATTR:kernel_id": "", "OS-EXT-SRV-ATTR:launch_index": 0, "OS-EXT-SRV-ATTR:ramdisk_id": "", "OS-EXT-SRV-ATTR:reservation_id": "r-l0i0clt2", "OS-EXT-SRV-ATTR:root_device_name": "/dev/sda", "OS-EXT-SRV-ATTR:user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "OS-SRV-USG:launched_at": "2019-04-23T15:19:15.317839", "OS-SRV-USG:terminated_at": null, "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "addr": "192.168.1.30", "version": 4 } ] }, "config_drive": "", "created": "2019-04-23T15:19:14Z", "description": null, "flavor": { "disk": 1, "ephemeral": 0, "extra_specs": {}, "original_name": "m1.tiny", "ram": 512, "swap": 0, "vcpus": 1 }, "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6", "host_status": "UP", "id": "2ce4c5b3-2866-4972-93ce-77a2ea46a7f9", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/2ce4c5b3-2866-4972-93ce-77a2ea46a7f9", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/2ce4c5b3-2866-4972-93ce-77a2ea46a7f9", "rel": "bookmark" } ], "locked": true, "locked_reason": "I don't want to work", "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "os-extended-volumes:volumes_attached": [], "progress": 0, "security_groups": [ { "name": "default" } ], "status": "ACTIVE", "tags": [], "tenant_id": "6f70656e737461636b20342065766572", "trusted_image_certificates": null, "updated": "2019-04-23T15:19:15Z", "user_id": "admin" } ] }././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.157607 nova-32.0.0/doc/api_samples/servers/v2.74/0000775000175000017500000000000000000000000020075 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.74/server-create-req-with-host-and-node.json0000664000175000017500000000123000000000000027727 0ustar00zuulzuul00000000000000{ "server" : { "adminPass": "MySecretPass", "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "name" : "new-server-test", "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", "flavorRef" : "6", "OS-DCF:diskConfig": "AUTO", "metadata" : { "My Server Name" : "Apache1" }, "security_groups": [ { "name": "default" } ], "user_data" : "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==", "networks": "auto", "host": "openstack-node-01", "hypervisor_hostname": "openstack-node-01" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.74/server-create-req-with-only-host.json0000664000175000017500000000114400000000000027227 0ustar00zuulzuul00000000000000{ "server" : { "adminPass": "MySecretPass", "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "name" : "new-server-test", "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", "flavorRef" : "6", "OS-DCF:diskConfig": "AUTO", "metadata" : { "My Server Name" : "Apache1" }, "security_groups": [ { "name": "default" } ], "user_data" : "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==", "networks": "auto", "host": "openstack-node-01" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.74/server-create-req-with-only-node.json0000664000175000017500000000116300000000000027200 0ustar00zuulzuul00000000000000{ "server" : { "adminPass": "MySecretPass", "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "name" : "new-server-test", "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", "flavorRef" : "6", "OS-DCF:diskConfig": "AUTO", "metadata" : { "My Server Name" : "Apache1" }, "security_groups": [ { "name": "default" } ], "user_data" : "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==", "networks": "auto", "hypervisor_hostname": "openstack-node-01" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.74/server-create-resp.json0000664000175000017500000000124600000000000024511 0ustar00zuulzuul00000000000000{ "server": { "OS-DCF:diskConfig": "AUTO", "adminPass": "DB2bQBhxvq8a", "id": "84e2b49d-39a9-4d32-9100-e62161c236db", "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/84e2b49d-39a9-4d32-9100-e62161c236db", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/84e2b49d-39a9-4d32-9100-e62161c236db", "rel": "bookmark" } ], "security_groups": [ { "name": "default" } ] } }././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.161607 nova-32.0.0/doc/api_samples/servers/v2.75/0000775000175000017500000000000000000000000020076 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.75/server-action-rebuild-resp.json0000664000175000017500000000601700000000000026151 0ustar00zuulzuul00000000000000{ "server": { "OS-DCF:diskConfig": "AUTO", "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-SRV-ATTR:host": "compute", "OS-EXT-SRV-ATTR:hostname": "new-server-test", "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", "OS-EXT-SRV-ATTR:kernel_id": "", "OS-EXT-SRV-ATTR:launch_index": 0, "OS-EXT-SRV-ATTR:ramdisk_id": "", "OS-EXT-SRV-ATTR:reservation_id": "r-t61j9da6", "OS-EXT-SRV-ATTR:root_device_name": "/dev/sda", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "OS-SRV-USG:launched_at": "2019-04-23T15:19:10.855016", "OS-SRV-USG:terminated_at": null, "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "addr": "192.168.1.30", "version": 4 } ] }, "adminPass": "seekr3t", "config_drive": "", "created": "2019-04-23T17:10:22Z", "description": null, "flavor": { "disk": 1, "ephemeral": 0, "extra_specs": {}, "original_name": "m1.tiny", "ram": 512, "swap": 0, "vcpus": 1 }, "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6", "host_status": "UP", "id": "0c37a84a-c757-4f22-8c7f-0bf8b6970886", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/0c37a84a-c757-4f22-8c7f-0bf8b6970886", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/0c37a84a-c757-4f22-8c7f-0bf8b6970886", "rel": "bookmark" } ], "locked": false, "locked_reason": null, "metadata": { "meta_var": "meta_val" }, "name": "foobar", "os-extended-volumes:volumes_attached": [], "progress": 0, "security_groups": [ { "name": "default" } ], "server_groups": [], "status": "ACTIVE", "tags": [], "tenant_id": "6f70656e737461636b20342065766572", "trusted_image_certificates": null, "updated": "2019-04-23T17:10:24Z", "user_data": "ZWNobyAiaGVsbG8gd29ybGQi", "user_id": "admin" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.75/server-action-rebuild.json0000664000175000017500000000056200000000000025201 0ustar00zuulzuul00000000000000{ "rebuild" : { "accessIPv4" : "1.2.3.4", "accessIPv6" : "80fe::", "OS-DCF:diskConfig": "AUTO", "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", "name" : "foobar", "adminPass" : "seekr3t", "metadata" : { "meta_var" : "meta_val" }, "user_data": "ZWNobyAiaGVsbG8gd29ybGQi" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.75/server-update-req.json0000664000175000017500000000031700000000000024345 0ustar00zuulzuul00000000000000{ "server": { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "OS-DCF:diskConfig": "AUTO", "name": "new-server-test", "description": "Sample description" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.75/server-update-resp.json0000664000175000017500000000607300000000000024534 0ustar00zuulzuul00000000000000{ "server": { "OS-DCF:diskConfig": "AUTO", "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-SRV-ATTR:host": "compute", "OS-EXT-SRV-ATTR:hostname": "new-server-test", "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", "OS-EXT-SRV-ATTR:kernel_id": "", "OS-EXT-SRV-ATTR:launch_index": 0, "OS-EXT-SRV-ATTR:ramdisk_id": "", "OS-EXT-SRV-ATTR:reservation_id": "r-t61j9da6", "OS-EXT-SRV-ATTR:root_device_name": "/dev/sda", "OS-EXT-SRV-ATTR:user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "OS-SRV-USG:launched_at": "2019-04-23T15:19:10.855016", "OS-SRV-USG:terminated_at": null, "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "addr": "192.168.1.30", "version": 4 } ] }, "config_drive": "", "created": "2012-12-02T02:11:57Z", "description": "Sample description", "flavor": { "disk": 1, "ephemeral": 0, "extra_specs": {}, "original_name": "m1.tiny", "ram": 512, "swap": 0, "vcpus": 1 }, "hostId": "6e84af987b4e7ec1c039b16d21f508f4a505672bd94fb0218b668d07", "host_status": "UP", "id": "324dfb7d-f4a9-419a-9a19-237df04b443b", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/324dfb7d-f4a9-419a-9a19-237df04b443b", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/324dfb7d-f4a9-419a-9a19-237df04b443b", "rel": "bookmark" } ], "locked": false, "locked_reason": null, "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "os-extended-volumes:volumes_attached": [], "progress": 0, "security_groups": [ { "name": "default" } ], "server_groups": [], "status": "ACTIVE", "tags": [], "tenant_id": "6f70656e737461636b20342065766572", "trusted_image_certificates": null, "updated": "2012-12-02T02:11:58Z", "user_id": "admin" } } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.161607 nova-32.0.0/doc/api_samples/servers/v2.9/0000775000175000017500000000000000000000000020013 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.9/server-get-resp.json0000664000175000017500000000624100000000000023743 0ustar00zuulzuul00000000000000{ "server": { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.1.30", "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-09-03T04:01:32Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "92154fab69d5883ba2c8622b7e65f745dd33257221c07af363c51b29", "id": "0e44cc9c-e052-415d-afbf-469b0d384170", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "config_drive": "", "OS-DCF:diskConfig": "AUTO", "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-SRV-ATTR:host": "b8b357f7100d4391828f2177c922ef93", "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", "OS-EXT-SRV-ATTR:reservation_id": "r-00000001", "OS-EXT-SRV-ATTR:launch_index": 0, "OS-EXT-SRV-ATTR:kernel_id": "", "OS-EXT-SRV-ATTR:ramdisk_id": "", "OS-EXT-SRV-ATTR:hostname": "fake-hostname", "OS-EXT-SRV-ATTR:root_device_name": "/dev/sda", "OS-EXT-SRV-ATTR:user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "os-extended-volumes:volumes_attached": [ { "id": "volume_id1", "delete_on_termination": false }, { "id": "volume_id2", "delete_on_termination": false } ], "OS-SRV-USG:launched_at": "2013-09-23T13:37:00.880302", "OS-SRV-USG:terminated_at": null, "progress": 0, "security_groups": [ { "name": "default" } ], "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-09-03T04:01:33Z", "user_id": "admin", "locked": false } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.9/servers-details-resp.json0000664000175000017500000000737300000000000025003 0ustar00zuulzuul00000000000000{ "servers": [ { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.1.30", "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-09-03T04:01:32Z", "flavor": { "id": "1", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", "rel": "bookmark" } ] }, "hostId": "bcf92836fc9ed4203a75cb0337afc7f917d2be504164b995c2334b25", "id": "f5dc173b-6804-445a-a6d8-c705dad5b5eb", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "config_drive": "", "OS-DCF:diskConfig": "AUTO", "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-SRV-ATTR:host": "c3f14e9812ad496baf92ccfb3c61e15f", "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", "OS-EXT-SRV-ATTR:reservation_id": "r-00000001", "OS-EXT-SRV-ATTR:launch_index": 0, "OS-EXT-SRV-ATTR:kernel_id": "", "OS-EXT-SRV-ATTR:ramdisk_id": "", "OS-EXT-SRV-ATTR:hostname": "fake-hostname", "OS-EXT-SRV-ATTR:root_device_name": "/dev/sda", "OS-EXT-SRV-ATTR:user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "os-extended-volumes:volumes_attached": [ { "id": "volume_id1", "delete_on_termination": false }, { "id": "volume_id2", "delete_on_termination": false } ], "OS-SRV-USG:launched_at": "2013-09-23T13:53:12.774549", "OS-SRV-USG:terminated_at": null, "progress": 0, "security_groups": [ { "name": "default" } ], "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-09-03T04:01:32Z", "user_id": "admin", "locked": false } ], "servers_links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/detail?limit=1&marker=f5dc173b-6804-445a-a6d8-c705dad5b5eb", "rel": "next" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.9/servers-list-resp.json0000664000175000017500000000147600000000000024327 0ustar00zuulzuul00000000000000{ "servers": [ { "id": "22c91117-08de-4894-9aa9-6ef382400985", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/22c91117-08de-4894-9aa9-6ef382400985", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/22c91117-08de-4894-9aa9-6ef382400985", "rel": "bookmark" } ], "name": "new-server-test" } ], "servers_links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers?limit=1&marker=22c91117-08de-4894-9aa9-6ef382400985", "rel": "next" } ] }././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.161607 nova-32.0.0/doc/api_samples/servers/v2.90/0000775000175000017500000000000000000000000020073 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.90/server-action-rebuild-resp.json0000664000175000017500000000513700000000000026150 0ustar00zuulzuul00000000000000{ "server": { "OS-DCF:diskConfig": "AUTO", "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-SRV-ATTR:hostname": "updated-hostname", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "OS-SRV-USG:launched_at": "2021-08-19T15:16:22.177882", "OS-SRV-USG:terminated_at": null, "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "addr": "192.168.1.30", "version": 4 } ] }, "adminPass": "seekr3t", "config_drive": "", "created": "2019-04-23T17:10:22Z", "description": null, "flavor": { "disk": 1, "ephemeral": 0, "extra_specs": {}, "original_name": "m1.tiny", "ram": 512, "swap": 0, "vcpus": 1 }, "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6", "id": "0c37a84a-c757-4f22-8c7f-0bf8b6970886", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/0c37a84a-c757-4f22-8c7f-0bf8b6970886", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/0c37a84a-c757-4f22-8c7f-0bf8b6970886", "rel": "bookmark" } ], "locked": false, "locked_reason": null, "metadata": { "meta_var": "meta_val" }, "name": "foobar", "os-extended-volumes:volumes_attached": [], "progress": 0, "security_groups": [ { "name": "default" } ], "server_groups": [], "status": "ACTIVE", "tags": [], "tenant_id": "6f70656e737461636b20342065766572", "trusted_image_certificates": null, "updated": "2019-04-23T17:10:24Z", "user_data": "ZWNobyAiaGVsbG8gd29ybGQi", "user_id": "fake" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.90/server-action-rebuild.json0000664000175000017500000000063200000000000025174 0ustar00zuulzuul00000000000000{ "rebuild" : { "accessIPv4" : "1.2.3.4", "accessIPv6" : "80fe::", "OS-DCF:diskConfig": "AUTO", "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", "name" : "foobar", "adminPass" : "seekr3t", "hostname": "custom-hostname", "metadata" : { "meta_var" : "meta_val" }, "user_data": "ZWNobyAiaGVsbG8gd29ybGQi" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.90/server-create-req.json0000664000175000017500000000234700000000000024330 0ustar00zuulzuul00000000000000{ "server" : { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "name" : "new-server-test", "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", "flavorRef" : "1", "availability_zone": "us-west", "OS-DCF:diskConfig": "AUTO", "hostname": "custom-hostname", "metadata" : { "My Server Name" : "Apache1" }, "personality": [ { "path": "/etc/banner.txt", "contents": "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6 b25zLiINCg0KLVJpY2hhcmQgQmFjaA==" } ], "security_groups": [ { "name": "default" } ], "user_data" : "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==" }, "OS-SCH-HNT:scheduler_hints": { "same_host": "48e6a9f6-30af-47e0-bc04-acaed113bb4e" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.90/server-create-resp.json0000664000175000017500000000124500000000000024506 0ustar00zuulzuul00000000000000{ "server": { "OS-DCF:diskConfig": "AUTO", "adminPass": "6NpUwoz2QDRN", "id": "f5dc173b-6804-445a-a6d8-c705dad5b5eb", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb", "rel": "bookmark" } ], "security_groups": [ { "name": "default" } ] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.90/server-get-resp.json0000664000175000017500000000524200000000000024023 0ustar00zuulzuul00000000000000{ "server": { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.1.30", "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-09-03T04:01:32Z", "description": null, "locked": false, "locked_reason": null, "flavor": { "disk": 1, "ephemeral": 0, "extra_specs": {}, "original_name": "m1.tiny", "ram": 512, "swap": 0, "vcpus": 1 }, "hostId": "92154fab69d5883ba2c8622b7e65f745dd33257221c07af363c51b29", "id": "0e44cc9c-e052-415d-afbf-469b0d384170", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "config_drive": "", "OS-DCF:diskConfig": "AUTO", "OS-EXT-AZ:availability_zone": "nova", "OS-EXT-SRV-ATTR:hostname": "custom-hostname", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "os-extended-volumes:volumes_attached": [ {"id": "volume_id1", "delete_on_termination": false}, {"id": "volume_id2", "delete_on_termination": false} ], "OS-SRV-USG:launched_at": "2013-09-23T13:37:00.880302", "OS-SRV-USG:terminated_at": null, "progress": 0, "security_groups": [ { "name": "default" } ], "server_groups": [], "status": "ACTIVE", "tags": [], "tenant_id": "6f70656e737461636b20342065766572", "trusted_image_certificates": null, "updated": "2013-09-03T04:01:33Z", "user_id": "fake" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.90/server-update-req.json0000664000175000017500000000025300000000000024341 0ustar00zuulzuul00000000000000{ "server": { "accessIPv4": "4.3.2.1", "accessIPv6": "80fe::", "OS-DCF:diskConfig": "AUTO", "hostname" : "new-server-hostname" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.90/server-update-resp.json0000664000175000017500000000503500000000000024526 0ustar00zuulzuul00000000000000{ "server": { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.1.30", "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-09-03T04:01:32Z", "description": null, "locked": false, "locked_reason": null, "flavor": { "disk": 1, "ephemeral": 0, "extra_specs": {}, "original_name": "m1.tiny", "ram": 512, "swap": 0, "vcpus": 1 }, "hostId": "92154fab69d5883ba2c8622b7e65f745dd33257221c07af363c51b29", "id": "0e44cc9c-e052-415d-afbf-469b0d384170", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "config_drive": "", "OS-DCF:diskConfig": "AUTO", "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-SRV-ATTR:hostname": "new-server-hostname", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "os-extended-volumes:volumes_attached": [], "OS-SRV-USG:launched_at": "2013-09-23T13:37:00.880302", "OS-SRV-USG:terminated_at": null, "progress": 0, "security_groups": [ { "name": "default" } ], "server_groups": [], "status": "ACTIVE", "tags": [], "tenant_id": "6f70656e737461636b20342065766572", "trusted_image_certificates": null, "updated": "2013-09-03T04:01:33Z", "user_id": "fake" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.90/servers-details-resp.json0000664000175000017500000000625700000000000025063 0ustar00zuulzuul00000000000000{ "servers": [ { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.1.30", "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-09-03T04:01:32Z", "description": "", "flavor": { "disk": 1, "ephemeral": 0, "extra_specs": {}, "original_name": "m1.tiny", "ram": 512, "swap": 0, "vcpus": 1 }, "hostId": "bcf92836fc9ed4203a75cb0337afc7f917d2be504164b995c2334b25", "id": "f5dc173b-6804-445a-a6d8-c705dad5b5eb", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "config_drive": "", "locked": false, "locked_reason": "", "OS-DCF:diskConfig": "AUTO", "OS-EXT-AZ:availability_zone": "nova", "OS-EXT-SRV-ATTR:hostname": "custom-hostname", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "os-extended-volumes:volumes_attached": [ {"id": "volume_id1", "delete_on_termination": false}, {"id": "volume_id2", "delete_on_termination": false} ], "OS-SRV-USG:launched_at": "2013-09-23T13:53:12.774549", "OS-SRV-USG:terminated_at": null, "progress": 0, "security_groups": [ { "name": "default" } ], "status": "ACTIVE", "tags": [], "tenant_id": "6f70656e737461636b20342065766572", "trusted_image_certificates": null, "updated": "2013-09-03T04:01:32Z", "user_id": "fake" } ], "servers_links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/detail?limit=1&marker=f5dc173b-6804-445a-a6d8-c705dad5b5eb", "rel": "next" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.90/servers-list-resp.json0000664000175000017500000000147600000000000024407 0ustar00zuulzuul00000000000000{ "servers": [ { "id": "22c91117-08de-4894-9aa9-6ef382400985", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/22c91117-08de-4894-9aa9-6ef382400985", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/22c91117-08de-4894-9aa9-6ef382400985", "rel": "bookmark" } ], "name": "new-server-test" } ], "servers_links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers?limit=1&marker=22c91117-08de-4894-9aa9-6ef382400985", "rel": "next" } ] }././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.165607 nova-32.0.0/doc/api_samples/servers/v2.94/0000775000175000017500000000000000000000000020077 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.94/server-action-rebuild-resp.json0000664000175000017500000000515300000000000026152 0ustar00zuulzuul00000000000000{ "server": { "OS-DCF:diskConfig": "AUTO", "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-SRV-ATTR:hostname": "updated-hostname.example.com", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "OS-SRV-USG:launched_at": "2021-08-19T15:16:22.177882", "OS-SRV-USG:terminated_at": null, "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "addr": "192.168.1.30", "version": 4 } ] }, "adminPass": "seekr3t", "config_drive": "", "created": "2019-04-23T17:10:22Z", "description": null, "flavor": { "disk": 1, "ephemeral": 0, "extra_specs": {}, "original_name": "m1.tiny", "ram": 512, "swap": 0, "vcpus": 1 }, "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6", "id": "0c37a84a-c757-4f22-8c7f-0bf8b6970886", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/0c37a84a-c757-4f22-8c7f-0bf8b6970886", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/0c37a84a-c757-4f22-8c7f-0bf8b6970886", "rel": "bookmark" } ], "locked": false, "locked_reason": null, "metadata": { "meta_var": "meta_val" }, "name": "foobar", "os-extended-volumes:volumes_attached": [], "progress": 0, "security_groups": [ { "name": "default" } ], "server_groups": [], "status": "ACTIVE", "tags": [], "tenant_id": "6f70656e737461636b20342065766572", "trusted_image_certificates": null, "updated": "2019-04-23T17:10:24Z", "user_data": "ZWNobyAiaGVsbG8gd29ybGQi", "user_id": "fake" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.94/server-action-rebuild.json0000664000175000017500000000064600000000000025205 0ustar00zuulzuul00000000000000{ "rebuild" : { "accessIPv4" : "1.2.3.4", "accessIPv6" : "80fe::", "OS-DCF:diskConfig": "AUTO", "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", "name" : "foobar", "adminPass" : "seekr3t", "hostname": "custom-hostname.example.com", "metadata" : { "meta_var" : "meta_val" }, "user_data": "ZWNobyAiaGVsbG8gd29ybGQi" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.94/server-create-req.json0000664000175000017500000000236300000000000024332 0ustar00zuulzuul00000000000000{ "server" : { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "name" : "new-server-test", "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", "flavorRef" : "1", "availability_zone": "us-west", "OS-DCF:diskConfig": "AUTO", "hostname": "custom-hostname.example.com", "metadata" : { "My Server Name" : "Apache1" }, "personality": [ { "path": "/etc/banner.txt", "contents": "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6 b25zLiINCg0KLVJpY2hhcmQgQmFjaA==" } ], "security_groups": [ { "name": "default" } ], "user_data" : "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==" }, "OS-SCH-HNT:scheduler_hints": { "same_host": "48e6a9f6-30af-47e0-bc04-acaed113bb4e" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.94/server-create-resp.json0000664000175000017500000000124500000000000024512 0ustar00zuulzuul00000000000000{ "server": { "OS-DCF:diskConfig": "AUTO", "adminPass": "6NpUwoz2QDRN", "id": "f5dc173b-6804-445a-a6d8-c705dad5b5eb", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb", "rel": "bookmark" } ], "security_groups": [ { "name": "default" } ] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.94/server-get-resp.json0000664000175000017500000000525600000000000024034 0ustar00zuulzuul00000000000000{ "server": { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.1.30", "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-09-03T04:01:32Z", "description": null, "locked": false, "locked_reason": null, "flavor": { "disk": 1, "ephemeral": 0, "extra_specs": {}, "original_name": "m1.tiny", "ram": 512, "swap": 0, "vcpus": 1 }, "hostId": "92154fab69d5883ba2c8622b7e65f745dd33257221c07af363c51b29", "id": "0e44cc9c-e052-415d-afbf-469b0d384170", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "config_drive": "", "OS-DCF:diskConfig": "AUTO", "OS-EXT-AZ:availability_zone": "nova", "OS-EXT-SRV-ATTR:hostname": "custom-hostname.example.com", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "os-extended-volumes:volumes_attached": [ {"id": "volume_id1", "delete_on_termination": false}, {"id": "volume_id2", "delete_on_termination": false} ], "OS-SRV-USG:launched_at": "2013-09-23T13:37:00.880302", "OS-SRV-USG:terminated_at": null, "progress": 0, "security_groups": [ { "name": "default" } ], "server_groups": [], "status": "ACTIVE", "tags": [], "tenant_id": "6f70656e737461636b20342065766572", "trusted_image_certificates": null, "updated": "2013-09-03T04:01:33Z", "user_id": "fake" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.94/server-update-req.json0000664000175000017500000000026700000000000024352 0ustar00zuulzuul00000000000000{ "server": { "accessIPv4": "4.3.2.1", "accessIPv6": "80fe::", "OS-DCF:diskConfig": "AUTO", "hostname" : "new-server-hostname.example.com" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.94/server-update-resp.json0000664000175000017500000000505100000000000024530 0ustar00zuulzuul00000000000000{ "server": { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.1.30", "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-09-03T04:01:32Z", "description": null, "locked": false, "locked_reason": null, "flavor": { "disk": 1, "ephemeral": 0, "extra_specs": {}, "original_name": "m1.tiny", "ram": 512, "swap": 0, "vcpus": 1 }, "hostId": "92154fab69d5883ba2c8622b7e65f745dd33257221c07af363c51b29", "id": "0e44cc9c-e052-415d-afbf-469b0d384170", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "config_drive": "", "OS-DCF:diskConfig": "AUTO", "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-SRV-ATTR:hostname": "new-server-hostname.example.com", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "os-extended-volumes:volumes_attached": [], "OS-SRV-USG:launched_at": "2013-09-23T13:37:00.880302", "OS-SRV-USG:terminated_at": null, "progress": 0, "security_groups": [ { "name": "default" } ], "server_groups": [], "status": "ACTIVE", "tags": [], "tenant_id": "6f70656e737461636b20342065766572", "trusted_image_certificates": null, "updated": "2013-09-03T04:01:33Z", "user_id": "fake" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.94/servers-details-resp.json0000664000175000017500000000627300000000000025065 0ustar00zuulzuul00000000000000{ "servers": [ { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.1.30", "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-09-03T04:01:32Z", "description": "", "flavor": { "disk": 1, "ephemeral": 0, "extra_specs": {}, "original_name": "m1.tiny", "ram": 512, "swap": 0, "vcpus": 1 }, "hostId": "bcf92836fc9ed4203a75cb0337afc7f917d2be504164b995c2334b25", "id": "f5dc173b-6804-445a-a6d8-c705dad5b5eb", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "config_drive": "", "locked": false, "locked_reason": "", "OS-DCF:diskConfig": "AUTO", "OS-EXT-AZ:availability_zone": "nova", "OS-EXT-SRV-ATTR:hostname": "custom-hostname.example.com", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "os-extended-volumes:volumes_attached": [ {"id": "volume_id1", "delete_on_termination": false}, {"id": "volume_id2", "delete_on_termination": false} ], "OS-SRV-USG:launched_at": "2013-09-23T13:53:12.774549", "OS-SRV-USG:terminated_at": null, "progress": 0, "security_groups": [ { "name": "default" } ], "status": "ACTIVE", "tags": [], "tenant_id": "6f70656e737461636b20342065766572", "trusted_image_certificates": null, "updated": "2013-09-03T04:01:32Z", "user_id": "fake" } ], "servers_links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/detail?limit=1&marker=f5dc173b-6804-445a-a6d8-c705dad5b5eb", "rel": "next" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.94/servers-list-resp.json0000664000175000017500000000147700000000000024414 0ustar00zuulzuul00000000000000{ "servers": [ { "id": "22c91117-08de-4894-9aa9-6ef382400985", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/22c91117-08de-4894-9aa9-6ef382400985", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/22c91117-08de-4894-9aa9-6ef382400985", "rel": "bookmark" } ], "name": "new-server-test" } ], "servers_links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers?limit=1&marker=22c91117-08de-4894-9aa9-6ef382400985", "rel": "next" } ] } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.165607 nova-32.0.0/doc/api_samples/servers/v2.96/0000775000175000017500000000000000000000000020101 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.96/server-action-rebuild-resp.json0000664000175000017500000000523200000000000026152 0ustar00zuulzuul00000000000000{ "server": { "OS-DCF:diskConfig": "AUTO", "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-SRV-ATTR:hostname": "updated-hostname.example.com", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "OS-SRV-USG:launched_at": "2025-02-27T01:27:30.210952", "OS-SRV-USG:terminated_at": null, "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "addr": "192.168.1.30", "version": 4 } ] }, "adminPass": "seekr3t", "config_drive": "", "created": "2025-02-27T01:27:28Z", "description": null, "flavor": { "disk": 1, "ephemeral": 0, "extra_specs": {}, "original_name": "m1.tiny", "ram": 512, "swap": 0, "vcpus": 1 }, "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6", "id": "a7920f90-14be-4c71-a6f9-6182f5df17c2", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/a7920f90-14be-4c71-a6f9-6182f5df17c2", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/a7920f90-14be-4c71-a6f9-6182f5df17c2", "rel": "bookmark" } ], "locked": false, "locked_reason": null, "metadata": { "meta_var": "meta_val" }, "name": "foobar", "os-extended-volumes:volumes_attached": [], "pinned_availability_zone": "us-west", "progress": 0, "security_groups": [ { "name": "default" } ], "server_groups": [], "status": "ACTIVE", "tags": [], "tenant_id": "6f70656e737461636b20342065766572", "trusted_image_certificates": null, "updated": "2025-02-27T01:27:30Z", "user_data": "ZWNobyAiaGVsbG8gd29ybGQi", "user_id": "fake" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.96/server-action-rebuild.json0000664000175000017500000000064600000000000025207 0ustar00zuulzuul00000000000000{ "rebuild" : { "accessIPv4" : "1.2.3.4", "accessIPv6" : "80fe::", "OS-DCF:diskConfig": "AUTO", "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", "name" : "foobar", "adminPass" : "seekr3t", "hostname": "updated-hostname.example.com", "metadata" : { "meta_var" : "meta_val" }, "user_data": "ZWNobyAiaGVsbG8gd29ybGQi" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.96/server-get-resp.json0000664000175000017500000000532500000000000024033 0ustar00zuulzuul00000000000000{ "server": { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.1.30", "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-09-03T04:01:32Z", "description": null, "locked": false, "locked_reason": null, "flavor": { "disk": 1, "ephemeral": 0, "extra_specs": {}, "original_name": "m1.tiny", "ram": 512, "swap": 0, "vcpus": 1 }, "hostId": "92154fab69d5883ba2c8622b7e65f745dd33257221c07af363c51b29", "id": "0e44cc9c-e052-415d-afbf-469b0d384170", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "config_drive": "", "OS-DCF:diskConfig": "AUTO", "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-SRV-ATTR:hostname": "new-server-test", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "os-extended-volumes:volumes_attached": [ {"id": "volume_id1", "delete_on_termination": false}, {"id": "volume_id2", "delete_on_termination": false} ], "OS-SRV-USG:launched_at": "2013-09-23T13:37:00.880302", "OS-SRV-USG:terminated_at": null, "pinned_availability_zone": "us-west", "progress": 0, "security_groups": [ { "name": "default" } ], "server_groups": [], "status": "ACTIVE", "tags": [], "tenant_id": "6f70656e737461636b20342065766572", "trusted_image_certificates": null, "updated": "2013-09-03T04:01:33Z", "user_id": "fake" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.96/server-update-req.json0000664000175000017500000000026500000000000024352 0ustar00zuulzuul00000000000000{ "server": { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "OS-DCF:diskConfig": "AUTO", "hostname": "new-server-hostname.example.com" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.96/server-update-resp.json0000664000175000017500000000513200000000000024532 0ustar00zuulzuul00000000000000{ "server": { "OS-DCF:diskConfig": "AUTO", "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-SRV-ATTR:hostname": "new-server-hostname.example.com", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "OS-SRV-USG:launched_at": "2025-02-27T01:17:47.110181", "OS-SRV-USG:terminated_at": null, "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "addr": "192.168.1.30", "version": 4 } ] }, "config_drive": "", "created": "2025-02-27T01:17:46Z", "description": null, "flavor": { "disk": 1, "ephemeral": 0, "extra_specs": {}, "original_name": "m1.tiny", "ram": 512, "swap": 0, "vcpus": 1 }, "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6", "id": "374b76b3-d46b-48bc-b36e-6ab70b0ea217", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/374b76b3-d46b-48bc-b36e-6ab70b0ea217", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/374b76b3-d46b-48bc-b36e-6ab70b0ea217", "rel": "bookmark" } ], "locked": false, "locked_reason": null, "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "os-extended-volumes:volumes_attached": [], "pinned_availability_zone": "us-west", "progress": 0, "security_groups": [ { "name": "default" } ], "server_groups": [], "status": "ACTIVE", "tags": [], "tenant_id": "6f70656e737461636b20342065766572", "trusted_image_certificates": null, "updated": "2025-02-27T01:17:47Z", "user_id": "fake" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.96/servers-details-resp.json0000664000175000017500000000634500000000000025067 0ustar00zuulzuul00000000000000{ "servers": [ { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.1.30", "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-09-03T04:01:32Z", "description": "", "flavor": { "disk": 1, "ephemeral": 0, "extra_specs": {}, "original_name": "m1.tiny", "ram": 512, "swap": 0, "vcpus": 1 }, "hostId": "bcf92836fc9ed4203a75cb0337afc7f917d2be504164b995c2334b25", "id": "f5dc173b-6804-445a-a6d8-c705dad5b5eb", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ] }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "config_drive": "", "locked": false, "locked_reason": "", "OS-DCF:diskConfig": "AUTO", "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-SRV-ATTR:hostname": "new-server-test", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "os-extended-volumes:volumes_attached": [ {"id": "volume_id1", "delete_on_termination": false}, {"id": "volume_id2", "delete_on_termination": false} ], "OS-SRV-USG:launched_at": "2013-09-23T13:53:12.774549", "OS-SRV-USG:terminated_at": null, "pinned_availability_zone": "us-west", "progress": 0, "security_groups": [ { "name": "default" } ], "status": "ACTIVE", "tags": [], "tenant_id": "6f70656e737461636b20342065766572", "trusted_image_certificates": null, "updated": "2013-09-03T04:01:32Z", "user_id": "fake" } ], "servers_links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/detail?limit=1&marker=f5dc173b-6804-445a-a6d8-c705dad5b5eb", "rel": "next" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.96/servers-list-resp.json0000664000175000017500000000147700000000000024416 0ustar00zuulzuul00000000000000{ "servers": [ { "id": "3cfb801c-f03c-45ce-834b-d097b34e9534", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/3cfb801c-f03c-45ce-834b-d097b34e9534", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/3cfb801c-f03c-45ce-834b-d097b34e9534", "rel": "bookmark" } ], "name": "new-server-test" } ], "servers_links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers?limit=1&marker=3cfb801c-f03c-45ce-834b-d097b34e9534", "rel": "next" } ] } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.1696072 nova-32.0.0/doc/api_samples/servers/v2.98/0000775000175000017500000000000000000000000020103 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.98/server-action-rebuild-resp.json0000664000175000017500000000543200000000000026156 0ustar00zuulzuul00000000000000{ "server": { "OS-DCF:diskConfig": "AUTO", "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-SRV-ATTR:hostname": "updated-hostname.example.com", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "OS-SRV-USG:launched_at": "2021-08-19T15:16:22.177882", "OS-SRV-USG:terminated_at": null, "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "addr": "192.168.1.30", "version": 4 } ] }, "adminPass": "seekr3t", "config_drive": "", "created": "2019-04-23T17:10:22Z", "description": null, "flavor": { "disk": 1, "ephemeral": 0, "extra_specs": {}, "original_name": "m1.tiny", "ram": 512, "swap": 0, "vcpus": 1 }, "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6", "id": "0c37a84a-c757-4f22-8c7f-0bf8b6970886", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ], "properties": { "architecture": "x86_64", "auto_disk_config": "True" } }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/0c37a84a-c757-4f22-8c7f-0bf8b6970886", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/0c37a84a-c757-4f22-8c7f-0bf8b6970886", "rel": "bookmark" } ], "locked": false, "locked_reason": null, "metadata": { "meta_var": "meta_val" }, "name": "foobar", "os-extended-volumes:volumes_attached": [], "pinned_availability_zone": "us-west", "progress": 0, "security_groups": [ { "name": "default" } ], "server_groups": [], "status": "ACTIVE", "tags": [], "tenant_id": "6f70656e737461636b20342065766572", "trusted_image_certificates": null, "updated": "2019-04-23T17:10:24Z", "user_data": "ZWNobyAiaGVsbG8gd29ybGQi", "user_id": "fake" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.98/server-action-rebuild.json0000664000175000017500000000064600000000000025211 0ustar00zuulzuul00000000000000{ "rebuild" : { "accessIPv4" : "1.2.3.4", "accessIPv6" : "80fe::", "OS-DCF:diskConfig": "AUTO", "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", "name" : "foobar", "adminPass" : "seekr3t", "hostname": "updated-hostname.example.com", "metadata" : { "meta_var" : "meta_val" }, "user_data": "ZWNobyAiaGVsbG8gd29ybGQi" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.98/server-get-resp.json0000664000175000017500000000552500000000000024037 0ustar00zuulzuul00000000000000{ "server": { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.1.30", "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-09-03T04:01:32Z", "description": null, "locked": false, "locked_reason": null, "flavor": { "disk": 1, "ephemeral": 0, "extra_specs": {}, "original_name": "m1.tiny", "ram": 512, "swap": 0, "vcpus": 1 }, "hostId": "92154fab69d5883ba2c8622b7e65f745dd33257221c07af363c51b29", "id": "0e44cc9c-e052-415d-afbf-469b0d384170", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ], "properties": { "architecture": "x86_64", "auto_disk_config": "True" } }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "config_drive": "", "OS-DCF:diskConfig": "AUTO", "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-SRV-ATTR:hostname": "new-server-test", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "os-extended-volumes:volumes_attached": [ {"id": "volume_id1", "delete_on_termination": false}, {"id": "volume_id2", "delete_on_termination": false} ], "OS-SRV-USG:launched_at": "2013-09-23T13:37:00.880302", "OS-SRV-USG:terminated_at": null, "pinned_availability_zone": "us-west", "progress": 0, "security_groups": [ { "name": "default" } ], "server_groups": [], "status": "ACTIVE", "tags": [], "tenant_id": "6f70656e737461636b20342065766572", "trusted_image_certificates": null, "updated": "2013-09-03T04:01:33Z", "user_id": "fake" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.98/server-update-req.json0000664000175000017500000000026500000000000024354 0ustar00zuulzuul00000000000000{ "server": { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "OS-DCF:diskConfig": "AUTO", "hostname": "new-server-hostname.example.com" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.98/server-update-resp.json0000664000175000017500000000533100000000000024535 0ustar00zuulzuul00000000000000{ "server": { "OS-DCF:diskConfig": "AUTO", "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-SRV-ATTR:hostname": "new-server-hostname.example.com", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "OS-SRV-USG:launched_at": "2025-02-27T03:58:15.509865", "OS-SRV-USG:terminated_at": null, "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "addr": "192.168.1.30", "version": 4 } ] }, "config_drive": "", "created": "2025-02-27T03:58:14Z", "description": null, "flavor": { "disk": 1, "ephemeral": 0, "extra_specs": {}, "original_name": "m1.tiny", "ram": 512, "swap": 0, "vcpus": 1 }, "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6", "id": "39115d82-00be-4432-84b5-381a89d7ebde", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ], "properties": { "architecture": "x86_64", "auto_disk_config": "True" } }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/39115d82-00be-4432-84b5-381a89d7ebde", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/39115d82-00be-4432-84b5-381a89d7ebde", "rel": "bookmark" } ], "locked": false, "locked_reason": null, "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "os-extended-volumes:volumes_attached": [], "pinned_availability_zone": "us-west", "progress": 0, "security_groups": [ { "name": "default" } ], "server_groups": [], "status": "ACTIVE", "tags": [], "tenant_id": "6f70656e737461636b20342065766572", "trusted_image_certificates": null, "updated": "2025-02-27T03:58:15Z", "user_id": "fake" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.98/servers-details-resp.json0000664000175000017500000000656500000000000025075 0ustar00zuulzuul00000000000000{ "servers": [ { "accessIPv4": "1.2.3.4", "accessIPv6": "80fe::", "addresses": { "private": [ { "addr": "192.168.1.30", "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "version": 4 } ] }, "created": "2013-09-03T04:01:32Z", "description": "", "flavor": { "disk": 1, "ephemeral": 0, "extra_specs": {}, "original_name": "m1.tiny", "ram": 512, "swap": 0, "vcpus": 1 }, "hostId": "bcf92836fc9ed4203a75cb0337afc7f917d2be504164b995c2334b25", "id": "f5dc173b-6804-445a-a6d8-c705dad5b5eb", "image": { "id": "70a599e0-31e7-49b7-b260-868f441e862b", "links": [ { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", "rel": "bookmark" } ], "properties": { "architecture": "x86_64", "auto_disk_config": "True" } }, "key_name": null, "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb", "rel": "bookmark" } ], "metadata": { "My Server Name": "Apache1" }, "name": "new-server-test", "config_drive": "", "locked": false, "locked_reason": "", "OS-DCF:diskConfig": "AUTO", "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-SRV-ATTR:hostname": "new-server-test", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "os-extended-volumes:volumes_attached": [ {"id": "volume_id1", "delete_on_termination": false}, {"id": "volume_id2", "delete_on_termination": false} ], "OS-SRV-USG:launched_at": "2013-09-23T13:53:12.774549", "OS-SRV-USG:terminated_at": null, "pinned_availability_zone": "us-west", "progress": 0, "security_groups": [ { "name": "default" } ], "status": "ACTIVE", "tags": [], "tenant_id": "6f70656e737461636b20342065766572", "trusted_image_certificates": null, "updated": "2013-09-03T04:01:32Z", "user_id": "fake" } ], "servers_links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/detail?limit=1&marker=f5dc173b-6804-445a-a6d8-c705dad5b5eb", "rel": "next" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers/v2.98/servers-list-resp.json0000664000175000017500000000147700000000000024420 0ustar00zuulzuul00000000000000{ "servers": [ { "id": "3cfb801c-f03c-45ce-834b-d097b34e9534", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/3cfb801c-f03c-45ce-834b-d097b34e9534", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/3cfb801c-f03c-45ce-834b-d097b34e9534", "rel": "bookmark" } ], "name": "new-server-test" } ], "servers_links": [ { "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers?limit=1&marker=3cfb801c-f03c-45ce-834b-d097b34e9534", "rel": "next" } ] } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.1376069 nova-32.0.0/doc/api_samples/servers-sort/0000775000175000017500000000000000000000000020302 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/servers-sort/server-sort-keys-list-resp.json0000664000175000017500000000113500000000000026361 0ustar00zuulzuul00000000000000{ "servers": [ { "id": "e08e6d34-fcc1-480e-b11e-24a675b479f8", "links": [ { "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/e08e6d34-fcc1-480e-b11e-24a675b479f8", "rel": "self" }, { "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/e08e6d34-fcc1-480e-b11e-24a675b479f8", "rel": "bookmark" } ], "name": "new-server-test" } ] } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.1696072 nova-32.0.0/doc/api_samples/versions/0000775000175000017500000000000000000000000017474 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/versions/v2-version-get-resp.json0000664000175000017500000000122500000000000024125 0ustar00zuulzuul00000000000000{ "version": { "id": "v2.0", "links": [ { "href": "http://openstack.example.com/v2/", "rel": "self" }, { "href": "http://docs.openstack.org/", "rel": "describedby", "type": "text/html" } ], "media-types": [ { "base": "application/json", "type": "application/vnd.openstack.compute+json;version=2" } ], "status": "DEPRECATED", "version": "", "min_version": "", "updated": "2025-07-04T12:00:00Z" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/versions/v21-version-get-resp.json0000664000175000017500000000123500000000000024207 0ustar00zuulzuul00000000000000{ "version": { "id": "v2.1", "links": [ { "href": "http://openstack.example.com/v2.1/", "rel": "self" }, { "href": "http://docs.openstack.org/", "rel": "describedby", "type": "text/html" } ], "media-types": [ { "base": "application/json", "type": "application/vnd.openstack.compute+json;version=2.1" } ], "status": "CURRENT", "version": "2.100", "min_version": "2.1", "updated": "2013-07-23T11:33:21Z" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_samples/versions/versions-get-resp.json0000664000175000017500000000136000000000000023763 0ustar00zuulzuul00000000000000{ "versions": [ { "id": "v2.0", "links": [ { "href": "http://openstack.example.com/v2/", "rel": "self" } ], "status": "DEPRECATED", "version": "", "min_version": "", "updated": "2025-07-04T12:00:00Z" }, { "id": "v2.1", "links": [ { "href": "http://openstack.example.com/v2.1/", "rel": "self" } ], "status": "CURRENT", "version": "2.100", "min_version": "2.1", "updated": "2013-07-23T11:33:21Z" } ] } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.1696072 nova-32.0.0/doc/api_schemas/0000775000175000017500000000000000000000000015603 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_schemas/config_drive.json0000664000175000017500000000122200000000000021131 0ustar00zuulzuul00000000000000{ "anyOf": [ { "type": "object", "properties": { "meta_data": { "type": "object" }, "network_data": { "type": "object" }, "user_data": { "type": [ "object", "array", "string", "null" ] } }, "additionalProperties": false }, { "type": [ "string", "null" ] } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/api_schemas/network_data.json0000664000175000017500000004024400000000000021164 0ustar00zuulzuul00000000000000{ "$schema": "http://json-schema.org/draft-07/schema#", "id": "http://openstack.org/nova/network_data.json", "type": "object", "title": "OpenStack Nova network metadata schema", "description": "Schema of Nova instance network configuration information", "required": [ "links", "networks", "services" ], "properties": { "links": { "$id": "#/properties/links", "type": "array", "title": "L2 interfaces settings", "items": { "$id": "#/properties/links/items", "oneOf": [ { "$ref": "#/definitions/l2_link" }, { "$ref": "#/definitions/l2_bond" }, { "$ref": "#/definitions/l2_vlan" } ] } }, "networks": { "$id": "#/properties/networks", "type": "array", "title": "L3 networks", "items": { "$id": "#/properties/networks/items", "oneOf": [ { "$ref": "#/definitions/l3_ipv4_network" }, { "$ref": "#/definitions/l3_ipv6_network" } ] } }, "services": { "$ref": "#/definitions/services" } }, "definitions": { "l2_address": { "$id": "#/definitions/l2_address", "type": "string", "pattern": "(?i)^([0-9A-F]{2}[:-]){5}([0-9A-F]{2})$", "title": "L2 interface address", "examples": [ "fa:16:3e:9c:bf:3d" ] }, "l2_id": { "$id": "#/definitions/l2_id", "type": "string", "title": "L2 interface ID", "examples": [ "eth0" ] }, "l2_mtu": { "$id": "#/definitions/l2_mtu", "title": "L2 interface MTU", "anyOf": [ { "type": "number", "minimum": 1, "maximum": 65535 }, { "type": "null" } ], "examples": [ 1500 ] }, "l2_vif_id": { "$id": "#/definitions/l2_vif_id", "type": "string", "title": "Virtual interface ID", "examples": [ "cd9f6d46-4a3a-43ab-a466-994af9db96fc" ] }, "l2_link": { "$id": "#/definitions/l2_link", "type": "object", "title": "L2 interface configuration settings", "required": [ "ethernet_mac_address", "id", "type" ], "properties": { "id": { "$ref": "#/definitions/l2_id" }, "ethernet_mac_address": { "$ref": "#/definitions/l2_address" }, "mtu": { "$ref": "#/definitions/l2_mtu" }, "type": { "$id": "#/definitions/l2_link/properties/type", "type": "string", "enum": [ "bridge", "dvs", "hw_veb", "hyperv", "ovs", "tap", "vhostuser", "vif", "phy" ], "title": "Interface type", "examples": [ "bridge" ] }, "vif_id": { "$ref": "#/definitions/l2_vif_id" } } }, "l2_bond": { "$id": "#/definitions/l2_bond", "type": "object", "title": "L2 bonding interface configuration settings", "required": [ "ethernet_mac_address", "id", "type", "bond_mode", "bond_links" ], "properties": { "id": { "$ref": "#/definitions/l2_id" }, "ethernet_mac_address": { "$ref": "#/definitions/l2_address" }, "mtu": { "$ref": "#/definitions/l2_mtu" }, "type": { "$id": "#/definitions/l2_bond/properties/type", "type": "string", "enum": [ "bond" ], "title": "Interface type", "examples": [ "bond" ] }, "vif_id": { "$ref": "#/definitions/l2_vif_id" }, "bond_mode": { "$id": "#/definitions/bond/properties/bond_mode", "type": "string", "title": "Port bonding type", "enum": [ "802.3ad", "balance-rr", "active-backup", "balance-xor", "broadcast", "balance-tlb", "balance-alb" ], "examples": [ "802.3ad" ] }, "bond_links": { "$id": "#/definitions/bond/properties/bond_links", "type": "array", "title": "Port bonding links", "items": { "$id": "#/definitions/bond/properties/bond_links/items", "type": "string" } } } }, "l2_vlan": { "$id": "#/definitions/l2_vlan", "type": "object", "title": "L2 VLAN interface configuration settings", "required": [ "vlan_mac_address", "id", "type", "vlan_link", "vlan_id" ], "properties": { "id": { "$ref": "#/definitions/l2_id" }, "vlan_mac_address": { "$ref": "#/definitions/l2_address" }, "mtu": { "$ref": "#/definitions/l2_mtu" }, "type": { "$id": "#/definitions/l2_vlan/properties/type", "type": "string", "enum": [ "vlan" ], "title": "VLAN interface type", "examples": [ "vlan" ] }, "vif_id": { "$ref": "#/definitions/l2_vif_id" }, "vlan_id": { "$id": "#/definitions/l2_vlan/properties/vlan_id", "type": "integer", "title": "VLAN ID" }, "vlan_link": { "$id": "#/definitions/l2_vlan/properties/vlan_link", "type": "string", "title": "VLAN link name" } } }, "l3_id": { "$id": "#/definitions/l3_id", "type": "string", "title": "Network name", "examples": [ "network0" ] }, "l3_link": { "$id": "#/definitions/l3_link", "type": "string", "title": "L2 network link to use for L3 interface", "examples": [ "99e88329-f20d-4741-9593-25bf07847b16" ] }, "l3_network_id": { "$id": "#/definitions/l3_network_id", "type": "string", "title": "Network ID", "examples": [ "99e88329-f20d-4741-9593-25bf07847b16" ] }, "l3_ipv4_type": { "$id": "#/definitions/l3_ipv4_type", "type": "string", "enum": [ "ipv4", "ipv4_dhcp" ], "title": "L3 IPv4 network type", "examples": [ "ipv4_dhcp" ] }, "l3_ipv6_type": { "$id": "#/definitions/l3_ipv6_type", "type": "string", "enum": [ "ipv6", "ipv6_dhcp", "ipv6_slaac" ], "title": "L3 IPv6 network type", "examples": [ "ipv6_dhcp" ] }, "l3_ipv4_host": { "$id": "#/definitions/l3_ipv4_host", "type": "string", "pattern": "^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$", "title": "L3 IPv4 host address", "examples": [ "192.168.81.99" ] }, "l3_ipv6_host": { "$id": "#/definitions/l3_ipv6_host", "type": "string", "pattern": "^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))(/[0-9]{1,2})?$", "title": "L3 IPv6 host address", "examples": [ "2001:db8:3:4::192.168.81.99" ] }, "l3_ipv4_netmask": { "$id": "#/definitions/l3_ipv4_netmask", "type": "string", "pattern": "^(254|252|248|240|224|192|128|0)\\.0\\.0\\.0|255\\.(254|252|248|240|224|192|128|0)\\.0\\.0|255\\.255\\.(254|252|248|240|224|192|128|0)\\.0|255\\.255\\.255\\.(254|252|248|240|224|192|128|0)$", "title": "L3 IPv4 network mask", "examples": [ "255.255.252.0" ] }, "l3_ipv6_netmask": { "$id": "#/definitions/l3_ipv6_netmask", "type": "string", "pattern": "^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7})|(::))$", "title": "L3 IPv6 network mask", "examples": [ "ffff:ffff:ffff:ffff::" ] }, "l3_ipv4_nw": { "$id": "#/definitions/l3_ipv4_nw", "type": "string", "pattern": "^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$", "title": "L3 IPv4 network address", "examples": [ "0.0.0.0" ] }, "l3_ipv6_nw": { "$id": "#/definitions/l3_ipv6_nw", "type": "string", "pattern": "^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7})|(::))$", "title": "L3 IPv6 network address", "examples": [ "8000::" ] }, "l3_ipv4_gateway": { "$id": "#/definitions/l3_ipv4_gateway", "type": "string", "pattern": "^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$", "title": "L3 IPv4 gateway address", "examples": [ "192.168.200.1" ] }, "l3_ipv6_gateway": { "$id": "#/definitions/l3_ipv6_gateway", "type": "string", "pattern": "^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))$", "title": "L3 IPv6 gateway address", "examples": [ "2001:db8:3:4::192.168.81.99" ] }, "l3_ipv4_network_route": { "$id": "#/definitions/l3_ipv4_network_route", "type": "object", "title": "L3 IPv4 routing configuration item", "required": [ "gateway", "netmask", "network" ], "properties": { "network": { "$ref": "#/definitions/l3_ipv4_nw" }, "netmask": { "$ref": "#/definitions/l3_ipv4_netmask" }, "gateway": { "$ref": "#/definitions/l3_ipv4_gateway" }, "services": { "$ref": "#/definitions/ipv4_services" } } }, "l3_ipv6_network_route": { "$id": "#/definitions/l3_ipv6_network_route", "type": "object", "title": "L3 IPv6 routing configuration item", "required": [ "gateway", "netmask", "network" ], "properties": { "network": { "$ref": "#/definitions/l3_ipv6_nw" }, "netmask": { "$ref": "#/definitions/l3_ipv6_netmask" }, "gateway": { "$ref": "#/definitions/l3_ipv6_gateway" }, "services": { "$ref": "#/definitions/ipv6_services" } } }, "l3_ipv4_network": { "$id": "#/definitions/l3_ipv4_network", "type": "object", "title": "L3 IPv4 network configuration", "required": [ "id", "link", "network_id", "type" ], "properties": { "id": { "$ref": "#/definitions/l3_id" }, "link": { "$ref": "#/definitions/l3_link" }, "network_id": { "$ref": "#/definitions/l3_network_id" }, "type": { "$ref": "#/definitions/l3_ipv4_type" }, "ip_address": { "$ref": "#/definitions/l3_ipv4_host" }, "netmask": { "$ref": "#/definitions/l3_ipv4_netmask" }, "routes": { "$id": "#/definitions/l3_ipv4_network/routes", "type": "array", "title": "L3 IPv4 network routes", "items": { "$ref": "#/definitions/l3_ipv4_network_route" } } } }, "l3_ipv6_network": { "$id": "#/definitions/l3_ipv6_network", "type": "object", "title": "L3 IPv6 network configuration", "required": [ "id", "link", "network_id", "type" ], "properties": { "id": { "$ref": "#/definitions/l3_id" }, "link": { "$ref": "#/definitions/l3_link" }, "network_id": { "$ref": "#/definitions/l3_network_id" }, "type": { "$ref": "#/definitions/l3_ipv6_type" }, "ip_address": { "$ref": "#/definitions/l3_ipv6_host" }, "netmask": { "$ref": "#/definitions/l3_ipv6_netmask" }, "routes": { "$id": "#/definitions/properties/l3_ipv6_network/routes", "type": "array", "title": "L3 IPv6 network routes", "items": { "$ref": "#/definitions/l3_ipv6_network_route" } } } }, "ipv4_service": { "$id": "#/definitions/ipv4_service", "type": "object", "title": "Service on a IPv4 network", "required": [ "address", "type" ], "properties": { "address": { "$ref": "#/definitions/l3_ipv4_host" }, "type": { "$id": "#/definitions/ipv4_service/properties/type", "type": "string", "enum": [ "dns" ], "title": "Service type", "examples": [ "dns" ] } } }, "ipv6_service": { "$id": "#/definitions/ipv6_service", "type": "object", "title": "Service on a IPv6 network", "required": [ "address", "type" ], "properties": { "address": { "$ref": "#/definitions/l3_ipv6_host" }, "type": { "$id": "#/definitions/ipv4_service/properties/type", "type": "string", "enum": [ "dns" ], "title": "Service type", "examples": [ "dns" ] } } }, "ipv4_services": { "$id": "#/definitions/ipv4_services", "type": "array", "title": "Network services on IPv4 network", "items": { "$id": "#/definitions/ipv4_services/items", "$ref": "#/definitions/ipv4_service" } }, "ipv6_services": { "$id": "#/definitions/ipv6_services", "type": "array", "title": "Network services on IPv6 network", "items": { "$id": "#/definitions/ipv6_services/items", "$ref": "#/definitions/ipv6_service" } }, "services": { "$id": "#/definitions/services", "type": "array", "title": "Network services", "items": { "$id": "#/definitions/services/items", "anyOf": [ { "$ref": "#/definitions/ipv4_service" }, { "$ref": "#/definitions/ipv6_service" } ] } } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/dictionary.txt0000664000175000017500000000015100000000000016232 0ustar00zuulzuul00000000000000ot hda ser ned deques affinitized noes wile usera dettach excpt imigration childs assertin notin OTU otu ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.1696072 nova-32.0.0/doc/ext/0000775000175000017500000000000000000000000014127 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/ext/__init__.py0000664000175000017500000000000000000000000016226 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/ext/extra_specs.py0000664000175000017500000001566200000000000017033 0ustar00zuulzuul00000000000000# Copyright 2020, Red Hat, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Display extra specs in documentation. Provides a single directive that can be used to list all extra specs validators and, thus, document all extra specs that nova recognizes and supports. """ import typing as ty from docutils import nodes from docutils.parsers import rst from docutils.parsers.rst import directives from docutils import statemachine from sphinx import addnodes from sphinx import directives as sphinx_directives from sphinx import domains from sphinx import roles from sphinx.util import logging from sphinx.util import nodes as sphinx_nodes from nova.api.validation.extra_specs import base from nova.api.validation.extra_specs import validators LOG = logging.getLogger(__name__) class ExtraSpecXRefRole(roles.XRefRole): """Cross reference a extra spec. Example:: :nova:extra-spec:`hw:cpu_policy` """ def __init__(self): super(ExtraSpecXRefRole, self).__init__( warn_dangling=True, ) def process_link(self, env, refnode, has_explicit_title, title, target): # The anchor for the extra spec link is the extra spec name return target, target class ExtraSpecDirective(sphinx_directives.ObjectDescription): """Document an individual extra spec. Accepts one required argument - the extra spec name, including the group. Example:: .. extra-spec:: hw:cpu_policy """ def handle_signature(self, sig, signode): """Transform an option description into RST nodes.""" # Insert a node into the output showing the extra spec name signode += addnodes.desc_name(sig, sig) signode['allnames'] = [sig] return sig def add_target_and_index(self, firstname, sig, signode): cached_options = self.env.domaindata['nova']['extra_specs'] signode['ids'].append(sig) self.state.document.note_explicit_target(signode) # Store the location of the option definition for later use in # resolving cross-references cached_options[sig] = self.env.docname def _indent(text, count=1): if not text: return text padding = ' ' * (4 * count) return padding + text def _format_validator_group_help( validators: ty.Dict[str, base.ExtraSpecValidator], summary: bool, ): """Generate reStructuredText snippets for a group of validators.""" for validator in validators.values(): for line in _format_validator_help(validator, summary): yield line def _format_validator_help( validator: base.ExtraSpecValidator, summary: bool, ): """Generate reStructuredText snippets for the provided validator. :param validator: A validator to document. :type validator: nova.api.validation.extra_specs.base.ExtraSpecValidator """ yield f'.. nova:extra-spec:: {validator.name}' yield '' # NOTE(stephenfin): We don't print the pattern, if present, since it's too # internal. Instead, the description should provide this information in a # human-readable format yield _indent(f':Type: {validator.value["type"].__name__}') if validator.value.get('min') is not None: yield _indent(f':Min: {validator.value["min"]}') if validator.value.get('max') is not None: yield _indent(f':Max: {validator.value["max"]}') yield '' if not summary: for line in validator.description.splitlines(): yield _indent(line) yield '' if validator.deprecated: yield _indent('.. warning::') yield _indent( 'This extra spec has been deprecated and should not be used.', 2 ) yield '' class ExtraSpecGroupDirective(rst.Directive): """Document extra specs belonging to the specified group. Accepts one optional argument - the extra spec group - and one option - whether to show a summary view only (omit descriptions). Example:: .. extra-specs:: hw_rng :summary: """ required_arguments = 0 optional_arguments = 1 option_spec = { 'summary': directives.flag, } has_content = False def run(self): result = statemachine.ViewList() source_name = self.state.document.current_source group = self.arguments[0] if self.arguments else None summary = self.options.get('summary', False) if group: group_validators = { n.split(':', 1)[1]: v for n, v in validators.VALIDATORS.items() if ':' in n and n.split(':', 1)[0].split('{')[0] == group } else: group_validators = { n: v for n, v in validators.VALIDATORS.items() if ':' not in n } if not group_validators: LOG.warning("No validators found for group '%s'", group or '') for count, line in enumerate( _format_validator_group_help(group_validators, summary) ): result.append(line, source_name, count) LOG.debug('%5d%s%s', count, ' ' if line else '', line) node = nodes.section() node.document = self.state.document sphinx_nodes.nested_parse_with_titles(self.state, result, node) return node.children class NovaDomain(domains.Domain): """nova domain.""" name = 'nova' label = 'nova' object_types = { 'configoption': domains.ObjType( 'extra spec', 'spec', ), } directives = { 'extra-spec': ExtraSpecDirective, } roles = { 'extra-spec': ExtraSpecXRefRole(), } initial_data = { 'extra_specs': {}, } def resolve_xref( self, env, fromdocname, builder, typ, target, node, contnode, ): """Resolve cross-references""" if typ == 'extra-spec': return sphinx_nodes.make_refnode( builder, fromdocname, env.domaindata['nova']['extra_specs'][target], target, contnode, target, ) return None def merge_domaindata(self, docnames, otherdata): for target, docname in otherdata['extra_specs'].items(): if docname in docnames: self.data['extra_specs'][target] = docname def setup(app): app.add_domain(NovaDomain) app.add_directive('extra-specs', ExtraSpecGroupDirective) return { 'parallel_read_safe': True, 'parallel_write_safe': True, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/ext/feature_matrix.py0000664000175000017500000005214700000000000017531 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ This provides a sphinx extension able to render from an ini file (from the doc/source/* directory) a feature matrix into the developer documentation. It is used via a single directive in the .rst file .. feature_matrix:: feature_classification.ini """ import configparser import re from docutils import nodes from docutils.parsers import rst class Matrix(object): """Matrix represents the entire feature matrix parsed from an ini file This includes: * self.features is a list of MatrixFeature instances, the rows and cells * self.targets is a dict of (MatrixTarget.key, MatrixTarget), the columns """ def __init__(self): self.features = [] self.targets = {} class MatrixTarget(object): def __init__(self, key, title, driver, hypervisor=None, architecture=None, link=None): """MatrixTarget modes a target, a column in the matrix This is usually a specific CI system, or collection of related deployment configurations. :param key: Unique identifier for the hypervisor driver :param title: Human friendly name of the hypervisor :param driver: Name of the Nova driver :param hypervisor: (optional) Name of the hypervisor, if many :param architecture: (optional) Name of the architecture, if many :param link: (optional) URL to docs about this target """ self.key = key self.title = title self.driver = driver self.hypervisor = hypervisor self.architecture = architecture self.link = link class MatrixImplementation(object): STATUS_COMPLETE = "complete" STATUS_PARTIAL = "partial" STATUS_MISSING = "missing" STATUS_UNKNOWN = "unknown" STATUS_ALL = [STATUS_COMPLETE, STATUS_PARTIAL, STATUS_MISSING, STATUS_UNKNOWN] def __init__(self, status=STATUS_MISSING, notes=None, release=None): """MatrixImplementation models a cell in the matrix This models the current state of a target for a specific feature :param status: One off complete, partial, missing, unknown. See the RST docs for a definition of those. :param notes: Arbitrary string describing any caveats of the implementation. Mandatory if status is 'partial'. :param release: Letter of the release entry was last updated. i.e. m=mitaka, c=cactus. If not known it is None. """ self.status = status self.notes = notes self.release = release class MatrixFeature(object): MATURITY_INCOMPLETE = "incomplete" MATURITY_EXPERIMENTAL = "experimental" MATURITY_COMPLETE = "complete" MATURITY_DEPRECATED = "deprecated" MATURITY_ALL = [MATURITY_INCOMPLETE, MATURITY_EXPERIMENTAL, MATURITY_COMPLETE, MATURITY_DEPRECATED] def __init__(self, key, title, notes=None, cli=None, maturity=None, api_doc_link=None, admin_doc_link=None, tempest_test_uuids=None): """MatrixFeature models a row in the matrix This initialises ``self.implementations``, which is a dict of (MatrixTarget.key, MatrixImplementation) This is the list of cells for the given row of the matrix. :param key: used as the HTML id for the details, and so in the URL :param title: human friendly short title, used in the matrix :param notes: Arbitrarily long string describing the feature :param cli: list of cli commands related to the feature :param maturity: incomplete, experimental, complete or deprecated for a full definition see the rst doc :param api_doc_link: URL to the API ref for this feature :param admin_doc_link: URL to the admin docs for using this feature :param tempest_test_uuids: uuids for tests that validate this feature """ if cli is None: cli = [] if tempest_test_uuids is None: tempest_test_uuids = [] self.key = key self.title = title self.notes = notes self.implementations = {} self.cli = cli self.maturity = maturity self.api_doc_link = api_doc_link self.admin_doc_link = admin_doc_link self.tempest_test_uuids = tempest_test_uuids class FeatureMatrixDirective(rst.Directive): """The Sphinx directive plugin Has single required argument, the filename for the ini file. The usage of the directive looks like this:: .. feature_matrix:: feature_matrix_gp.ini """ required_arguments = 1 def run(self): matrix = self._load_feature_matrix() return self._build_markup(matrix) def _load_feature_matrix(self): """Reads the feature-matrix.ini file and populates an instance of the Matrix class with all the data. :returns: Matrix instance """ cfg = configparser.ConfigParser() env = self.state.document.settings.env filename = self.arguments[0] rel_fpath, fpath = env.relfn2path(filename) with open(fpath) as fp: cfg.read_file(fp) # This ensures that the docs are rebuilt whenever the # .ini file changes env.note_dependency(rel_fpath) matrix = Matrix() matrix.targets = self._get_targets(cfg) matrix.features = self._get_features(cfg, matrix.targets) return matrix def _get_targets(self, cfg): """The 'targets' section is special - it lists all the hypervisors that this file records data for. """ targets = {} for section in cfg.sections(): if not section.startswith("target."): continue key = section[7:] title = cfg.get(section, "title") link = cfg.get(section, "link") driver = key.split("-")[0] target = MatrixTarget(key, title, driver, link=link) targets[key] = target return targets def _get_features(self, cfg, targets): """All sections except 'targets' describe some feature of the Nova hypervisor driver implementation. """ features = [] for section in cfg.sections(): if section == "targets": continue if section.startswith("target."): continue if not cfg.has_option(section, "title"): raise Exception( "'title' field missing in '[%s]' section" % section) title = cfg.get(section, "title") maturity = MatrixFeature.MATURITY_INCOMPLETE if cfg.has_option(section, "maturity"): maturity = cfg.get(section, "maturity").lower() if maturity not in MatrixFeature.MATURITY_ALL: raise Exception( "'maturity' field value '%s' in ['%s']" "section must be %s" % (maturity, section, ",".join(MatrixFeature.MATURITY_ALL))) notes = None if cfg.has_option(section, "notes"): notes = cfg.get(section, "notes") cli = [] if cfg.has_option(section, "cli"): cli = cfg.get(section, "cli") api_doc_link = None if cfg.has_option(section, "api_doc_link"): api_doc_link = cfg.get(section, "api_doc_link") admin_doc_link = None if cfg.has_option(section, "admin_doc_link"): admin_doc_link = cfg.get(section, "admin_doc_link") tempest_test_uuids = [] if cfg.has_option(section, "tempest_test_uuids"): tempest_test_uuids = cfg.get(section, "tempest_test_uuids") feature = MatrixFeature(section, title, notes, cli, maturity, api_doc_link, admin_doc_link, tempest_test_uuids) # Now we've got the basic feature details, we must process # the hypervisor driver implementation for each feature for item in cfg.options(section): key = item.replace("driver-impl-", "") if key not in targets: # TODO(johngarbutt) would be better to skip known list if item.startswith("driver-impl-"): raise Exception( "Driver impl '%s' in '[%s]' not declared" % (item, section)) continue impl_status_and_release = cfg.get(section, item) impl_status_and_release = impl_status_and_release.split(":") impl_status = impl_status_and_release[0] release = None if len(impl_status_and_release) == 2: release = impl_status_and_release[1] if impl_status not in MatrixImplementation.STATUS_ALL: raise Exception( "'%s' value '%s' in '[%s]' section must be %s" % (item, impl_status, section, ",".join(MatrixImplementation.STATUS_ALL))) noteskey = "driver-notes-" + item[12:] notes = None if cfg.has_option(section, noteskey): notes = cfg.get(section, noteskey) target = targets[key] impl = MatrixImplementation(impl_status, notes, release) feature.implementations[target.key] = impl for key in targets: if key not in feature.implementations: raise Exception("'%s' missing in '[%s]' section" % (key, section)) features.append(feature) return features def _build_markup(self, matrix): """Constructs the docutils content for the feature matrix""" content = [] self._build_summary(matrix, content) self._build_details(matrix, content) return content def _build_summary(self, matrix, content): """Constructs the docutils content for the summary of the feature matrix. The summary consists of a giant table, with one row for each feature, and a column for each hypervisor driver. It provides an 'at a glance' summary of the status of each driver """ summarytitle = nodes.subtitle(text="Summary") summary = nodes.table() cols = len(matrix.targets.keys()) cols += 2 summarygroup = nodes.tgroup(cols=cols) summarybody = nodes.tbody() summaryhead = nodes.thead() for i in range(cols): summarygroup.append(nodes.colspec(colwidth=1)) summarygroup.append(summaryhead) summarygroup.append(summarybody) summary.append(summarygroup) content.append(summarytitle) content.append(summary) # This sets up all the column headers - two fixed # columns for feature name & status header = nodes.row() blank = nodes.entry() blank.append(nodes.emphasis(text="Feature")) header.append(blank) blank = nodes.entry() blank.append(nodes.emphasis(text="Maturity")) header.append(blank) summaryhead.append(header) # then one column for each hypervisor driver impls = sorted(matrix.targets.keys()) for key in impls: target = matrix.targets[key] implcol = nodes.entry() header.append(implcol) if target.link: uri = target.link target_ref = nodes.reference("", refuri=uri) target_txt = nodes.inline() implcol.append(target_txt) target_txt.append(target_ref) target_ref.append(nodes.strong(text=target.title)) else: implcol.append(nodes.strong(text=target.title)) # We now produce the body of the table, one row for # each feature to report on for feature in matrix.features: item = nodes.row() # the hyperlink target name linking to details id = re.sub("[^a-zA-Z0-9_]", "_", feature.key) # first the to fixed columns for title/status keycol = nodes.entry() item.append(keycol) keyref = nodes.reference(refid=id) keytxt = nodes.inline() keycol.append(keytxt) keytxt.append(keyref) keyref.append(nodes.strong(text=feature.title)) maturitycol = nodes.entry() item.append(maturitycol) maturitycol.append(nodes.inline( text=feature.maturity, classes=["fm_maturity_" + feature.maturity])) # and then one column for each hypervisor driver impls = sorted(matrix.targets.keys()) for key in impls: target = matrix.targets[key] impl = feature.implementations[key] implcol = nodes.entry() item.append(implcol) id = re.sub("[^a-zA-Z0-9_]", "_", feature.key + "_" + key) implref = nodes.reference(refid=id) impltxt = nodes.inline() implcol.append(impltxt) impltxt.append(implref) impl_status = "" if impl.status == MatrixImplementation.STATUS_COMPLETE: impl_status = u"\u2714" elif impl.status == MatrixImplementation.STATUS_MISSING: impl_status = u"\u2716" elif impl.status == MatrixImplementation.STATUS_PARTIAL: impl_status = u"\u2714" elif impl.status == MatrixImplementation.STATUS_UNKNOWN: impl_status = u"?" implref.append(nodes.literal( text=impl_status, classes=["fm_impl_summary", "fm_impl_" + impl.status])) if impl.release: implref.append(nodes.inline(text=" %s" % impl.release)) summarybody.append(item) def _build_details(self, matrix, content): """Constructs the docutils content for the details of the feature matrix. This is generated as a bullet list of features. Against each feature we provide the description of the feature and then the details of the hypervisor impls, with any driver specific notes that exist """ detailstitle = nodes.subtitle(text="Details") details = nodes.bullet_list() content.append(detailstitle) content.append(details) # One list entry for each feature we're reporting on for feature in matrix.features: item = nodes.list_item() # The hypervisor target name linked from summary table id = re.sub("[^a-zA-Z0-9_]", "_", feature.key) # Highlight the feature title name item.append(nodes.strong(text=feature.title, ids=[id])) if feature.notes is not None: para_notes = nodes.paragraph() para_notes.append(nodes.inline(text=feature.notes)) item.append(para_notes) self._add_feature_info(item, feature) if feature.cli: item.append(self._create_cli_paragraph(feature)) para_divers = nodes.paragraph() para_divers.append(nodes.strong(text="drivers:")) # A sub-list giving details of each hypervisor target impls = nodes.bullet_list() for key in feature.implementations: target = matrix.targets[key] impl = feature.implementations[key] subitem = nodes.list_item() id = re.sub("[^a-zA-Z0-9_]", "_", feature.key + "_" + key) subitem += [ nodes.strong(text=target.title + ": "), nodes.literal(text=impl.status, classes=["fm_impl_" + impl.status], ids=[id]), ] if impl.release: release_letter = impl.release.upper() release_text = \ ' (updated in "%s" release)' % release_letter subitem.append(nodes.inline(text=release_text)) if impl.notes is not None: subitem.append(self._create_notes_paragraph(impl.notes)) impls.append(subitem) para_divers.append(impls) item.append(para_divers) details.append(item) def _add_feature_info(self, item, feature): para_info = nodes.paragraph() para_info.append(nodes.strong(text="info:")) info_list = nodes.bullet_list() maturity_literal = nodes.literal(text=feature.maturity, classes=["fm_maturity_" + feature.maturity]) self._append_info_list_item(info_list, "Maturity", items=[maturity_literal]) self._append_info_list_item(info_list, "API Docs", link=feature.api_doc_link) self._append_info_list_item(info_list, "Admin Docs", link=feature.admin_doc_link) tempest_items = [] if feature.tempest_test_uuids: for uuid in feature.tempest_test_uuids.split(";"): base = "https://github.com/openstack/tempest/search?q=%s" link = base % uuid inline_ref = self._get_uri_ref(link, text=uuid) tempest_items.append(inline_ref) tempest_items.append(nodes.inline(text=", ")) # removing trailing punctuation tempest_items = tempest_items[:-1] self._append_info_list_item(info_list, "Tempest tests", items=tempest_items) para_info.append(info_list) item.append(para_info) def _get_uri_ref(self, link, text=None): if not text: text = link ref = nodes.reference("", text, refuri=link) inline = nodes.inline() inline.append(ref) return inline def _append_info_list_item(self, info_list, title, text=None, link=None, items=None): subitem = nodes.list_item() subitem.append(nodes.strong(text="%s: " % title)) if items: for item in items: subitem.append(item) elif link: inline_link = self._get_uri_ref(link, text) subitem.append(inline_link) elif text: subitem.append(nodes.literal(text=text)) info_list.append(subitem) def _create_cli_paragraph(self, feature): """Create a paragraph which represents the CLI commands of the feature The paragraph will have a bullet list of CLI commands. """ para = nodes.paragraph() para.append(nodes.strong(text="CLI commands:")) commands = nodes.bullet_list() for c in feature.cli.split(";"): cli_command = nodes.list_item() cli_command += nodes.literal(text=c, classes=["fm_cli"]) commands.append(cli_command) para.append(commands) return para def _create_notes_paragraph(self, notes): """Constructs a paragraph which represents the implementation notes The paragraph consists of text and clickable URL nodes if links were given in the notes. """ para = nodes.paragraph() # links could start with http:// or https:// link_idxs = [m.start() for m in re.finditer('https?://', notes)] start_idx = 0 for link_idx in link_idxs: # assume the notes start with text (could be empty) para.append(nodes.inline(text=notes[start_idx:link_idx])) # create a URL node until the next text or the end of the notes link_end_idx = notes.find(" ", link_idx) if link_end_idx == -1: # In case the notes end with a link without a blank link_end_idx = len(notes) uri = notes[link_idx:link_end_idx + 1] para.append(nodes.reference("", uri, refuri=uri)) start_idx = link_end_idx + 1 # get all text after the last link (could be empty) or all of the # text if no link was given para.append(nodes.inline(text=notes[start_idx:])) return para def setup(app): app.add_directive('feature_matrix', FeatureMatrixDirective) app.add_css_file('feature-matrix.css') return { 'parallel_read_safe': True, 'parallel_write_safe': True, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/ext/versioned_notifications.py0000664000175000017500000001314400000000000021433 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ This provides a sphinx extension able to list the implemented versioned notifications into the developer documentation. It is used via a single directive in the .rst file .. versioned_notifications:: """ import os from docutils import nodes from docutils.parsers import rst import importlib from oslo_serialization import jsonutils import pkgutil from nova.notifications.objects import base as notification from nova.objects import base from nova.tests import json_ref import nova.utils class VersionedNotificationDirective(rst.Directive): SAMPLE_ROOT = 'doc/notification_samples/' TOGGLE_SCRIPT = """ """ def run(self): notifications = self._collect_notifications() return self._build_markup(notifications) def _import_all_notification_packages(self): list(map(lambda module: importlib.import_module(module), ('nova.notifications.objects.' + name for _, name, _ in pkgutil.iter_modules(nova.notifications.objects.__path__)))) def _collect_notifications(self): # If you do not see your notification sample showing up in the docs # be sure that the sample filename matches what is registered on the # versioned notification object class using the # @base.notification_sample decorator. self._import_all_notification_packages() base.NovaObjectRegistry.register_notification_objects() notifications = {} ovos = base.NovaObjectRegistry.obj_classes() for name, cls in ovos.items(): cls = cls[0] if (issubclass(cls, notification.NotificationBase) and cls != notification.NotificationBase): payload_name = cls.fields['payload'].objname payload_cls = ovos[payload_name][0] for sample in cls.samples: if sample in notifications: raise ValueError('Duplicated usage of %s ' 'sample file detected' % sample) notifications[sample] = ((cls.__name__, payload_cls.__name__, sample)) return sorted(notifications.values()) def _build_markup(self, notifications): content = [] cols = ['Event type', 'Notification class', 'Payload class', 'Sample'] table = nodes.table() content.append(table) group = nodes.tgroup(cols=len(cols)) table.append(group) head = nodes.thead() group.append(head) for _ in cols: group.append(nodes.colspec(colwidth=1)) body = nodes.tbody() group.append(body) # fill the table header row = nodes.row() body.append(row) for col_name in cols: col = nodes.entry() row.append(col) text = nodes.strong(text=col_name) col.append(text) # fill the table content, one notification per row for name, payload, sample_file in notifications: event_type = sample_file[0: -5].replace('-', '.') row = nodes.row() body.append(row) col = nodes.entry() row.append(col) text = nodes.literal(text=event_type) col.append(text) col = nodes.entry() row.append(col) text = nodes.literal(text=name) col.append(text) col = nodes.entry() row.append(col) text = nodes.literal(text=payload) col.append(text) col = nodes.entry() row.append(col) with open(os.path.join(self.SAMPLE_ROOT, sample_file), 'r') as f: sample_content = f.read() sample_obj = jsonutils.loads(sample_content) sample_obj = json_ref.resolve_refs( sample_obj, base_path=os.path.abspath(self.SAMPLE_ROOT)) sample_content = jsonutils.dumps(sample_obj, sort_keys=True, indent=4, separators=(',', ': ')) event_type = sample_file[0: -5] html_str = self.TOGGLE_SCRIPT % ((event_type, ) * 3) html_str += ("" % event_type) html_str += ("
%s
" % (event_type, sample_content)) raw = nodes.raw('', html_str, format="html") col.append(raw) return content def setup(app): app.add_directive( 'versioned_notifications', VersionedNotificationDirective) return { 'parallel_read_safe': True, 'parallel_write_safe': True, } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.2016072 nova-32.0.0/doc/notification_samples/0000775000175000017500000000000000000000000017541 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/aggregate-add_host-end.json0000664000175000017500000000041400000000000024710 0ustar00zuulzuul00000000000000{ "priority": "INFO", "payload": { "$ref": "common_payloads/AggregatePayload.json#", "nova_object.data": { "hosts": ["compute"] } }, "event_type": "aggregate.add_host.end", "publisher_id": "nova-api:fake-mini" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/aggregate-add_host-start.json0000664000175000017500000000030400000000000025275 0ustar00zuulzuul00000000000000{ "priority": "INFO", "payload": { "$ref": "common_payloads/AggregatePayload.json#" }, "event_type": "aggregate.add_host.start", "publisher_id": "nova-api:fake-mini" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/aggregate-cache_images-end.json0000664000175000017500000000042000000000000025510 0ustar00zuulzuul00000000000000{ "priority": "INFO", "payload": { "$ref": "common_payloads/AggregatePayload.json#", "nova_object.data": { "hosts": ["compute"] } }, "event_type": "aggregate.cache_images.end", "publisher_id": "nova-api:fake-mini" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/aggregate-cache_images-progress.json0000664000175000017500000000115700000000000026616 0ustar00zuulzuul00000000000000{ "priority": "INFO", "payload": { "nova_object.version": "1.0", "nova_object.namespace": "nova", "nova_object.name": "AggregateCachePayload", "nova_object.data": { "name": "my-aggregate", "uuid": "788608ec-ebdc-45c5-bc7f-e5f24ab92c80", "host": "compute", "total": 1, "index": 1, "images_cached": ["155d900f-4e14-4e4c-a73d-069cbf4541e6"], "images_failed": [], "id": 1 } }, "event_type": "aggregate.cache_images.progress", "publisher_id": "nova-conductor:fake-mini" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/aggregate-cache_images-start.json0000664000175000017500000000042200000000000026101 0ustar00zuulzuul00000000000000{ "priority": "INFO", "payload": { "$ref": "common_payloads/AggregatePayload.json#", "nova_object.data": { "hosts": ["compute"] } }, "event_type": "aggregate.cache_images.start", "publisher_id": "nova-api:fake-mini" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/aggregate-create-end.json0000664000175000017500000000030000000000000024360 0ustar00zuulzuul00000000000000{ "priority": "INFO", "payload": { "$ref": "common_payloads/AggregatePayload.json#" }, "event_type": "aggregate.create.end", "publisher_id": "nova-api:fake-mini" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/aggregate-create-start.json0000664000175000017500000000043500000000000024760 0ustar00zuulzuul00000000000000{ "priority": "INFO", "payload": { "$ref": "common_payloads/AggregatePayload.json#", "nova_object.data": { "hosts": null, "id": null } }, "event_type": "aggregate.create.start", "publisher_id": "nova-api:fake-mini" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/aggregate-delete-end.json0000664000175000017500000000030000000000000024357 0ustar00zuulzuul00000000000000{ "priority": "INFO", "payload": { "$ref": "common_payloads/AggregatePayload.json#" }, "event_type": "aggregate.delete.end", "publisher_id": "nova-api:fake-mini" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/aggregate-delete-start.json0000664000175000017500000000030200000000000024750 0ustar00zuulzuul00000000000000{ "priority": "INFO", "payload": { "$ref": "common_payloads/AggregatePayload.json#" }, "event_type": "aggregate.delete.start", "publisher_id": "nova-api:fake-mini" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/aggregate-remove_host-end.json0000664000175000017500000000030500000000000025454 0ustar00zuulzuul00000000000000{ "priority": "INFO", "payload": { "$ref": "common_payloads/AggregatePayload.json#" }, "event_type": "aggregate.remove_host.end", "publisher_id": "nova-api:fake-mini" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/aggregate-remove_host-start.json0000664000175000017500000000042100000000000026042 0ustar00zuulzuul00000000000000{ "priority": "INFO", "payload": { "$ref": "common_payloads/AggregatePayload.json#", "nova_object.data": { "hosts": ["compute"] } }, "event_type": "aggregate.remove_host.start", "publisher_id": "nova-api:fake-mini" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/aggregate-update_metadata-end.json0000664000175000017500000000050600000000000026247 0ustar00zuulzuul00000000000000{ "priority": "INFO", "payload": { "$ref": "common_payloads/AggregatePayload.json#", "nova_object.data": { "metadata": { "availability_zone": "AZ-1" } } }, "event_type": "aggregate.update_metadata.end", "publisher_id": "nova-api:fake-mini" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/aggregate-update_metadata-start.json0000664000175000017500000000031300000000000026632 0ustar00zuulzuul00000000000000{ "priority": "INFO", "payload": { "$ref": "common_payloads/AggregatePayload.json#" }, "event_type": "aggregate.update_metadata.start", "publisher_id": "nova-api:fake-mini" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/aggregate-update_prop-end.json0000664000175000017500000000042500000000000025447 0ustar00zuulzuul00000000000000{ "priority": "INFO", "payload": { "$ref": "common_payloads/AggregatePayload.json#", "nova_object.data": { "name": "my-new-aggregate" } }, "event_type": "aggregate.update_prop.end", "publisher_id": "nova-api:fake-mini" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/aggregate-update_prop-start.json0000664000175000017500000000042700000000000026040 0ustar00zuulzuul00000000000000{ "priority": "INFO", "payload": { "$ref": "common_payloads/AggregatePayload.json#", "nova_object.data": { "name": "my-new-aggregate" } }, "event_type": "aggregate.update_prop.start", "publisher_id": "nova-api:fake-mini" } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.2096074 nova-32.0.0/doc/notification_samples/common_payloads/0000775000175000017500000000000000000000000022725 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/common_payloads/AggregatePayload.json0000664000175000017500000000053500000000000027023 0ustar00zuulzuul00000000000000{ "nova_object.version": "1.1", "nova_object.namespace": "nova", "nova_object.name": "AggregatePayload", "nova_object.data": { "name": "my-aggregate", "metadata": { "availability_zone": "nova" }, "uuid": "788608ec-ebdc-45c5-bc7f-e5f24ab92c80", "hosts": [], "id": 1 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/common_payloads/AuditPeriodPayload.json0000664000175000017500000000041100000000000027337 0ustar00zuulzuul00000000000000{ "nova_object.data": { "audit_period_beginning": "2012-10-01T00:00:00Z", "audit_period_ending": "2012-10-29T13:42:11Z" }, "nova_object.name": "AuditPeriodPayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/common_payloads/BlockDevicePayload.json0000664000175000017500000000052100000000000027302 0ustar00zuulzuul00000000000000{ "nova_object.data": { "boot_index": null, "delete_on_termination": false, "device_name": "/dev/sdb", "tag": null, "volume_id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" }, "nova_object.name": "BlockDevicePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/common_payloads/ComputeTaskPayload.json0000664000175000017500000000137300000000000027375 0ustar00zuulzuul00000000000000{ "nova_object.version": "1.0", "nova_object.namespace": "nova", "nova_object.name": "ComputeTaskPayload", "nova_object.data": { "instance_uuid": "d5e6a7b7-80e5-4166-85a3-cd6115201082", "reason": {"$ref": "ExceptionPayload.json#"}, "request_spec": { "$ref": "RequestSpecPayload.json#", "nova_object.data": { "flavor": { "nova_object.data": { "extra_specs": { "hw:numa_cpus.0": "0", "hw:numa_mem.0": "512", "hw:numa_nodes": "1" } } }, "numa_topology": {"$ref": "InstanceNUMATopologyPayload.json#"} } }, "state": "error" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/common_payloads/ExceptionPayload.json0000664000175000017500000000074500000000000027076 0ustar00zuulzuul00000000000000{ "nova_object.version": "1.1", "nova_object.namespace": "nova", "nova_object.name": "ExceptionPayload", "nova_object.data": { "function_name": "_schedule_instances", "module_name": "nova.conductor.manager", "exception": "NoValidHost", "exception_message": "No valid host was found. There are not enough hosts available.", "traceback": "Traceback (most recent call last):\n File \"nova/conductor/manager.py\", line ..." } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/common_payloads/FlavorPayload.json0000664000175000017500000000112400000000000026361 0ustar00zuulzuul00000000000000{ "nova_object.name": "FlavorPayload", "nova_object.data": { "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", "name": "test_flavor", "root_gb": 1, "vcpus": 1, "ephemeral_gb": 0, "memory_mb": 512, "disabled": false, "rxtx_factor": 1.0, "extra_specs": { "hw:watchdog_action": "disabled" }, "projects": null, "swap": 0, "is_public": true, "vcpu_weight": 0, "description": null }, "nova_object.version": "1.4", "nova_object.namespace": "nova" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/common_payloads/ImageMetaPayload.json0000664000175000017500000000143000000000000026761 0ustar00zuulzuul00000000000000{ "nova_object.namespace": "nova", "nova_object.data": { "checksum": null, "container_format": "raw", "created_at": "2011-01-01T01:02:03Z", "direct_url": null, "disk_format": "raw", "id": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "min_disk": 0, "min_ram": 0, "name": "fakeimage123456", "owner": null, "properties": {"$ref":"ImageMetaPropsPayload.json#"}, "protected": false, "size": 25165824, "status": "active", "tags": [ "tag1", "tag2" ], "updated_at": "2011-01-01T01:02:03Z", "virtual_size": null, "visibility": "public" }, "nova_object.name": "ImageMetaPayload", "nova_object.version": "1.0" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/common_payloads/ImageMetaPropsPayload.json0000664000175000017500000000030100000000000030001 0ustar00zuulzuul00000000000000{ "nova_object.namespace": "nova", "nova_object.data": { "hw_architecture": "x86_64" }, "nova_object.name": "ImageMetaPropsPayload", "nova_object.version": "1.18" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/common_payloads/InstanceActionPayload.json0000664000175000017500000000032100000000000030030 0ustar00zuulzuul00000000000000{ "$ref": "InstancePayload.json", "nova_object.data":{ "fault":null }, "nova_object.name":"InstanceActionPayload", "nova_object.namespace":"nova", "nova_object.version":"1.9" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/common_payloads/InstanceActionPayloadShareActive.json0000664000175000017500000000032100000000000032147 0ustar00zuulzuul00000000000000{ "$ref": "InstancePayloadShareActive.json", "nova_object.data": { "fault": null }, "nova_object.name": "InstanceActionPayload", "nova_object.namespace": "nova", "nova_object.version": "1.9" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/common_payloads/InstanceActionPayloadShareInactive.json0000664000175000017500000000032300000000000032500 0ustar00zuulzuul00000000000000{ "$ref": "InstancePayloadShareInactive.json", "nova_object.data": { "fault": null }, "nova_object.name": "InstanceActionPayload", "nova_object.namespace": "nova", "nova_object.version": "1.9" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/common_payloads/InstanceActionRebuildPayload.json0000664000175000017500000000050000000000000031336 0ustar00zuulzuul00000000000000{ "$ref": "InstanceActionPayload.json", "nova_object.data": { "architecture": null, "image_uuid": "a2459075-d96c-40d5-893e-577ff92e721c", "trusted_image_certificates": ["rebuild-cert-id-1", "rebuild-cert-id-2"] }, "nova_object.name": "InstanceActionRebuildPayload", "nova_object.version": "1.10" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/common_payloads/InstanceActionRescuePayload.json0000664000175000017500000000033400000000000031203 0ustar00zuulzuul00000000000000{ "$ref": "InstanceActionPayload.json", "nova_object.data": { "rescue_image_ref": "a2459075-d96c-40d5-893e-577ff92e721c" }, "nova_object.name": "InstanceActionRescuePayload", "nova_object.version": "1.4" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/common_payloads/InstanceActionResizePrepPayload.json0000664000175000017500000000146200000000000032050 0ustar00zuulzuul00000000000000{ "$ref": "InstanceActionPayload.json", "nova_object.data": { "new_flavor": { "nova_object.name": "FlavorPayload", "nova_object.data": { "description": null, "disabled": false, "ephemeral_gb": 0, "extra_specs": { "hw:watchdog_action": "reset" }, "flavorid": "d5a8bb54-365a-45ae-abdb-38d249df7845", "is_public": true, "memory_mb": 256, "name": "other_flavor", "projects": null, "root_gb": 1, "rxtx_factor": 1.0, "swap": 0, "vcpu_weight": 0, "vcpus": 1 }, "nova_object.namespace": "nova", "nova_object.version": "1.4" }, "task_state": "resize_prep" }, "nova_object.name": "InstanceActionResizePrepPayload", "nova_object.version": "1.4" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/common_payloads/InstanceActionSharePayload.json0000664000175000017500000000040500000000000031016 0ustar00zuulzuul00000000000000{ "$ref": "InstanceActionPayload.json", "nova_object.data":{ "share_id": "e8debdc0-447a-4376-a10a-4cd9122d7986" }, "nova_object.name": "InstanceActionSharePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/common_payloads/InstanceActionSnapshotPayload.json0000664000175000017500000000040300000000000031551 0ustar00zuulzuul00000000000000{ "$ref": "InstanceActionPayload.json", "nova_object.data": { "snapshot_image_id": "d2aae36f-785c-4518-8016-bc9534d9fc7f" }, "nova_object.name": "InstanceActionSnapshotPayload", "nova_object.namespace": "nova", "nova_object.version": "1.10" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/common_payloads/InstanceActionVolumePayload.json0000664000175000017500000000037000000000000031224 0ustar00zuulzuul00000000000000{ "$ref": "InstanceActionPayload.json", "nova_object.data": { "volume_id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" }, "nova_object.name": "InstanceActionVolumePayload", "nova_object.namespace": "nova", "nova_object.version": "1.7" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/common_payloads/InstanceActionVolumeSwapPayload.json0000664000175000017500000000052100000000000032055 0ustar00zuulzuul00000000000000{ "$ref": "InstanceActionPayload.json", "nova_object.data": { "new_volume_id": "227cc671-f30b-4488-96fd-7d0bf13648d8", "old_volume_id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" }, "nova_object.name": "InstanceActionVolumeSwapPayload", "nova_object.namespace": "nova", "nova_object.version": "1.9" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/common_payloads/InstanceCreatePayload.json0000664000175000017500000000163200000000000030024 0ustar00zuulzuul00000000000000{ "$ref": "InstanceActionPayload.json", "nova_object.data": { "block_devices": [], "keypairs": [ { "nova_object.version": "1.0", "nova_object.namespace": "nova", "nova_object.name": "KeypairPayload", "nova_object.data": { "user_id": "fake", "name": "my-key", "fingerprint": "1e:2c:9b:56:79:4b:45:77:f9:ca:7a:98:2c:b0:d5:3c", "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGgB4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0lRE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYcpSxsIbECHw== Generated-by-Nova", "type": "ssh" } } ], "tags": ["tag"], "trusted_image_certificates": ["cert-id-1", "cert-id-2"], "instance_name": "instance-00000001" }, "nova_object.name": "InstanceCreatePayload", "nova_object.version": "1.13" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/common_payloads/InstanceExistsPayload.json0000664000175000017500000000042300000000000030075 0ustar00zuulzuul00000000000000{ "$ref": "InstancePayload.json", "nova_object.data": { "audit_period": { "$ref": "AuditPeriodPayload.json#" } }, "nova_object.name": "InstanceExistsPayload", "nova_object.namespace": "nova", "nova_object.version": "2.1" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/common_payloads/InstanceNUMACellPayload.json0000664000175000017500000000065400000000000030164 0ustar00zuulzuul00000000000000{ "nova_object.version": "1.2", "nova_object.namespace": "nova", "nova_object.name": "InstanceNUMACellPayload", "nova_object.data": { "cpu_pinning_raw": null, "cpu_policy": null, "cpu_thread_policy": null, "cpu_topology": null, "cpuset": [0], "pcpuset": [], "cpuset_reserved": null, "id": 0, "memory": 512, "pagesize": null } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/common_payloads/InstanceNUMATopologyPayload.json0000664000175000017500000000053700000000000031121 0ustar00zuulzuul00000000000000{ "nova_object.version": "1.0", "nova_object.namespace": "nova", "nova_object.name": "InstanceNUMATopologyPayload", "nova_object.data": { "cells": [ {"$ref": "InstanceNUMACellPayload.json#"} ], "emulator_threads_policy": null, "instance_uuid": "75cab9f7-57e2-4bd1-984f-a0383d9ee60e" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/common_payloads/InstancePCIRequestsPayload.json0000664000175000017500000000037000000000000030766 0ustar00zuulzuul00000000000000{ "nova_object.version": "1.0", "nova_object.namespace": "nova", "nova_object.name": "InstancePCIRequestsPayload", "nova_object.data":{ "instance_uuid": "d5e6a7b7-80e5-4166-85a3-cd6115201082", "requests": [] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/common_payloads/InstancePayload.json0000664000175000017500000000303100000000000026673 0ustar00zuulzuul00000000000000{ "nova_object.data":{ "architecture":"x86_64", "availability_zone": "nova", "block_devices": [ {"$ref": "BlockDevicePayload.json#"} ], "created_at":"2012-10-29T13:42:11Z", "deleted_at":null, "display_name":"some-server", "display_description":"some-server", "host":"compute", "host_name":"some-server", "ip_addresses": [ {"$ref": "IpPayload.json#"} ], "kernel_id":"", "key_name": "my-key", "launched_at":"2012-10-29T13:42:11Z", "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "metadata":{}, "locked":false, "node":"fake-mini", "os_type":null, "progress":0, "ramdisk_id":"", "reservation_id":"r-npxv0e40", "state":"active", "task_state":null, "power_state":"running", "tenant_id":"6f70656e737461636b20342065766572", "terminated_at":null, "auto_disk_config":"MANUAL", "flavor": {"$ref": "FlavorPayload.json#"}, "updated_at": "2012-10-29T13:42:11Z", "user_id":"fake", "uuid":"178b0921-8f85-4257-88b6-2e743b5a975c", "request_id": "req-5b6c791d-5709-4f36-8fbe-c3e02869e35d", "action_initiator_user": "fake", "action_initiator_project": "6f70656e737461636b20342065766572", "locked_reason": null, "shares": [] }, "nova_object.name":"InstancePayload", "nova_object.namespace":"nova", "nova_object.version":"1.9" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/common_payloads/InstancePayloadShareActive.json0000664000175000017500000000333700000000000031023 0ustar00zuulzuul00000000000000{ "nova_object.data": { "architecture": "x86_64", "availability_zone": "nova", "block_devices": [ { "$ref": "BlockDevicePayload.json#" } ], "created_at": "2012-10-29T13:42:11Z", "deleted_at": null, "display_name": "some-server", "display_description": "some-server", "host": "compute", "host_name": "some-server", "ip_addresses": [ { "$ref": "IpPayload.json#" } ], "kernel_id": "", "key_name": "my-key", "launched_at": "2012-10-29T13:42:11Z", "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "metadata": {}, "locked": false, "node": "fake-mini", "os_type": null, "progress": 0, "ramdisk_id": "", "reservation_id": "r-npxv0e40", "state": "active", "task_state": null, "power_state": "running", "tenant_id": "6f70656e737461636b20342065766572", "terminated_at": null, "auto_disk_config": "MANUAL", "flavor": { "$ref": "FlavorPayload.json#" }, "updated_at": "2012-10-29T13:42:11Z", "user_id": "fake", "uuid": "178b0921-8f85-4257-88b6-2e743b5a975c", "request_id": "req-5b6c791d-5709-4f36-8fbe-c3e02869e35d", "action_initiator_user": "fake", "action_initiator_project": "6f70656e737461636b20342065766572", "locked_reason": null, "shares": [ { "$ref": "ShareActivePayload.json#" } ] }, "nova_object.name": "InstancePayload", "nova_object.namespace": "nova", "nova_object.version": "1.9" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/common_payloads/InstancePayloadShareInactive.json0000664000175000017500000000334100000000000031345 0ustar00zuulzuul00000000000000{ "nova_object.data": { "architecture": "x86_64", "availability_zone": "nova", "block_devices": [ { "$ref": "BlockDevicePayload.json#" } ], "created_at": "2012-10-29T13:42:11Z", "deleted_at": null, "display_name": "some-server", "display_description": "some-server", "host": "compute", "host_name": "some-server", "ip_addresses": [ { "$ref": "IpPayload.json#" } ], "kernel_id": "", "key_name": "my-key", "launched_at": "2012-10-29T13:42:11Z", "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "metadata": {}, "locked": false, "node": "fake-mini", "os_type": null, "progress": 0, "ramdisk_id": "", "reservation_id": "r-npxv0e40", "state": "active", "task_state": null, "power_state": "running", "tenant_id": "6f70656e737461636b20342065766572", "terminated_at": null, "auto_disk_config": "MANUAL", "flavor": { "$ref": "FlavorPayload.json#" }, "updated_at": "2012-10-29T13:42:11Z", "user_id": "fake", "uuid": "178b0921-8f85-4257-88b6-2e743b5a975c", "request_id": "req-5b6c791d-5709-4f36-8fbe-c3e02869e35d", "action_initiator_user": "fake", "action_initiator_project": "6f70656e737461636b20342065766572", "locked_reason": null, "shares": [ { "$ref": "ShareInactivePayload.json#" } ] }, "nova_object.name": "InstancePayload", "nova_object.namespace": "nova", "nova_object.version": "1.9" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/common_payloads/InstanceUpdatePayload.json0000664000175000017500000000161500000000000030044 0ustar00zuulzuul00000000000000{ "$ref": "InstancePayload.json", "nova_object.data": { "audit_period": { "nova_object.data": { "audit_period_beginning": "2012-10-01T00:00:00Z", "audit_period_ending": "2012-10-29T13:42:11Z" }, "nova_object.name": "AuditPeriodPayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" }, "block_devices": [], "old_display_name": null, "state_update": { "nova_object.data": { "new_task_state": null, "old_state": "active", "old_task_state": null, "state": "active" }, "nova_object.name": "InstanceStateUpdatePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" }, "tags": [], "updated_at": "2012-10-29T13:42:11Z" }, "nova_object.name": "InstanceUpdatePayload", "nova_object.namespace": "nova", "nova_object.version": "2.1" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/common_payloads/IpPayload.json0000664000175000017500000000060100000000000025477 0ustar00zuulzuul00000000000000{ "nova_object.name": "IpPayload", "nova_object.namespace": "nova", "nova_object.version": "1.0", "nova_object.data": { "mac": "fa:16:3e:4c:2c:30", "address": "192.168.1.3", "port_uuid": "ce531f90-199f-48c0-816c-13e38010b442", "meta": {}, "version": 4, "label": "private", "device_name": "tapce531f90-19" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/common_payloads/KeypairPayload.json0000664000175000017500000000105100000000000026533 0ustar00zuulzuul00000000000000{ "nova_object.version": "1.0", "nova_object.namespace": "nova", "nova_object.name": "KeypairPayload", "nova_object.data": { "user_id": "fake", "name": "my-key", "fingerprint": "1e:2c:9b:56:79:4b:45:77:f9:ca:7a:98:2c:b0:d5:3c", "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGgB4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0lRE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYcpSxsIbECHw== Generated-by-Nova", "type": "ssh" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/common_payloads/RequestSpecPayload.json0000664000175000017500000000146700000000000027405 0ustar00zuulzuul00000000000000{ "nova_object.namespace": "nova", "nova_object.data": { "availability_zone": null, "flavor": {"$ref": "FlavorPayload.json#"}, "ignore_hosts": null, "image": {"$ref": "ImageMetaPayload.json#"}, "instance_uuid": "d5e6a7b7-80e5-4166-85a3-cd6115201082", "num_instances": 1, "numa_topology": null, "pci_requests": {"$ref": "InstancePCIRequestsPayload.json#"}, "project_id": "6f70656e737461636b20342065766572", "scheduler_hints": {}, "security_groups": ["default"], "force_hosts": null, "force_nodes": null, "instance_group": null, "requested_destination": null, "retry": null, "user_id": "fake" }, "nova_object.name": "RequestSpecPayload", "nova_object.version": "1.1" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/common_payloads/ServerGroupPayload.json0000664000175000017500000000100400000000000027410 0ustar00zuulzuul00000000000000{ "nova_object.version": "1.1", "nova_object.namespace": "nova", "nova_object.name": "ServerGroupPayload", "nova_object.data": { "uuid": "788608ec-ebdc-45c5-bc7f-e5f24ab92c80", "name": "test-server-group", "project_id": "6f70656e737461636b20342065766572", "user_id": "fake", "policies": [ "anti-affinity" ], "policy": "anti-affinity", "rules": {"max_server_per_host": "3"}, "members": [], "hosts": null } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/common_payloads/ServiceStatusPayload.json0000664000175000017500000000076700000000000027750 0ustar00zuulzuul00000000000000{ "nova_object.name": "ServiceStatusPayload", "nova_object.namespace": "nova", "nova_object.version": "1.1", "nova_object.data": { "host": "host2", "disabled": false, "binary": "nova-compute", "topic": "compute", "disabled_reason": null, "forced_down": false, "version": 23, "availability_zone": null, "uuid": "fa69c544-906b-4a6a-a9c6-c1f7a8078c73", "last_seen_up": null, "report_count": 0 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/common_payloads/ShareActivePayload.json0000664000175000017500000000063000000000000027327 0ustar00zuulzuul00000000000000{ "nova_object.data": { "share_mapping_uuid": "f7c1726d-7622-42b3-8b2c-4473239d60d1", "export_location": "10.0.0.50:/mnt/foo", "share_id": "e8debdc0-447a-4376-a10a-4cd9122d7986", "status": "active", "tag": "e8debdc0-447a-4376-a10a-4cd9122d7986" }, "nova_object.name": "SharePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/common_payloads/ShareInactivePayload.json0000664000175000017500000000063200000000000027660 0ustar00zuulzuul00000000000000{ "nova_object.data": { "share_mapping_uuid": "f7c1726d-7622-42b3-8b2c-4473239d60d1", "export_location": "10.0.0.50:/mnt/foo", "share_id": "e8debdc0-447a-4376-a10a-4cd9122d7986", "status": "inactive", "tag": "e8debdc0-447a-4376-a10a-4cd9122d7986" }, "nova_object.name": "SharePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/compute-exception.json0000664000175000017500000000121300000000000024101 0ustar00zuulzuul00000000000000{ "event_type": "compute.exception", "payload": { "nova_object.data": { "exception": "AggregateNameExists", "exception_message": "Aggregate versioned_exc_aggregate already exists.", "function_name": "_aggregate_create_in_db", "module_name": "nova.objects.aggregate", "traceback": "Traceback (most recent call last):\n File \"nova/compute/manager.py\", line ..." }, "nova_object.name": "ExceptionPayload", "nova_object.namespace": "nova", "nova_object.version": "1.1" }, "priority": "ERROR", "publisher_id": "nova-api:fake-mini" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/compute_task-build_instances-error.json0000664000175000017500000000031000000000000027417 0ustar00zuulzuul00000000000000{ "event_type": "compute_task.build_instances.error", "payload": {"$ref":"common_payloads/ComputeTaskPayload.json#"}, "priority": "ERROR", "publisher_id": "nova-conductor:fake-mini" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/compute_task-migrate_server-error.json0000664000175000017500000000043300000000000027275 0ustar00zuulzuul00000000000000{ "event_type": "compute_task.migrate_server.error", "payload": { "$ref":"common_payloads/ComputeTaskPayload.json#", "nova_object.data":{ "state": "active" } }, "priority": "ERROR", "publisher_id": "nova-conductor:fake-mini" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/compute_task-rebuild_server-error.json0000664000175000017500000000032600000000000027274 0ustar00zuulzuul00000000000000{ "event_type": "compute_task.rebuild_server.error", "payload": { "$ref": "common_payloads/ComputeTaskPayload.json#" }, "priority": "ERROR", "publisher_id": "nova-conductor:fake-mini" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/flavor-create.json0000664000175000017500000000134700000000000023173 0ustar00zuulzuul00000000000000{ "priority": "INFO", "payload": { "nova_object.namespace": "nova", "nova_object.version": "1.4", "nova_object.name": "FlavorPayload", "nova_object.data": { "name": "test_flavor", "memory_mb": 1024, "ephemeral_gb": 0, "disabled": false, "vcpus": 2, "swap": 0, "rxtx_factor": 2.0, "is_public": true, "root_gb": 10, "vcpu_weight": 0, "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", "extra_specs": null, "projects": [], "description":null } }, "event_type": "flavor.create", "publisher_id": "nova-api:fake-mini" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/flavor-delete.json0000664000175000017500000000135100000000000023165 0ustar00zuulzuul00000000000000{ "priority": "INFO", "payload": { "nova_object.namespace": "nova", "nova_object.version": "1.4", "nova_object.name": "FlavorPayload", "nova_object.data": { "name": "test_flavor", "memory_mb": 1024, "ephemeral_gb": 0, "disabled": false, "vcpus": 2, "swap": 0, "rxtx_factor": 2.0, "is_public": true, "root_gb": 10, "vcpu_weight": 0, "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", "extra_specs": null, "projects": null, "description":null } }, "event_type": "flavor.delete", "publisher_id": "nova-api:fake-mini" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/flavor-update.json0000664000175000017500000000147200000000000023211 0ustar00zuulzuul00000000000000{ "priority": "INFO", "payload": { "nova_object.namespace": "nova", "nova_object.version": "1.4", "nova_object.name": "FlavorPayload", "nova_object.data": { "name": "test_flavor", "memory_mb": 1024, "ephemeral_gb": 0, "disabled": false, "vcpus": 2, "extra_specs": { "hw:numa_nodes": "2" }, "projects": ["6f70656e737461636b20342065766572"], "swap": 0, "rxtx_factor": 2.0, "is_public": false, "root_gb": 10, "vcpu_weight": 0, "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", "description":null } }, "event_type": "flavor.update", "publisher_id": "nova-api:fake-mini" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-create-end.json0000664000175000017500000000026300000000000024246 0ustar00zuulzuul00000000000000{ "event_type":"instance.create.end", "payload":{"$ref":"common_payloads/InstanceCreatePayload.json#"}, "priority":"INFO", "publisher_id":"nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-create-error.json0000664000175000017500000000210100000000000024622 0ustar00zuulzuul00000000000000{ "event_type":"instance.create.error", "payload":{ "$ref":"common_payloads/InstanceCreatePayload.json#", "nova_object.data": { "fault": { "nova_object.data": { "exception": "FlavorDiskTooSmall", "exception_message": "The created instance's disk would be too small.", "function_name": "_build_resources", "module_name": "nova.tests.functional.notification_sample_tests.test_instance", "traceback": "Traceback (most recent call last):\n File \"nova/compute/manager.py\", line ..." }, "nova_object.name": "ExceptionPayload", "nova_object.namespace": "nova", "nova_object.version": "1.1" }, "ip_addresses": [], "launched_at": null, "power_state": "pending", "state": "building", "host": null, "node": null } }, "priority":"ERROR", "publisher_id":"nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-create-start.json0000664000175000017500000000070700000000000024640 0ustar00zuulzuul00000000000000{ "event_type":"instance.create.start", "payload":{ "$ref":"common_payloads/InstanceCreatePayload.json#", "nova_object.data": { "host": null, "ip_addresses": [], "launched_at": null, "node": null, "updated_at": null, "power_state": "pending", "state": "building" } }, "priority":"INFO", "publisher_id":"nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-delete-end.json0000664000175000017500000000066000000000000024246 0ustar00zuulzuul00000000000000{ "event_type":"instance.delete.end", "payload":{ "$ref":"common_payloads/InstanceActionPayload.json#", "nova_object.data":{ "deleted_at":"2012-10-29T13:42:11Z", "ip_addresses":[], "power_state":"pending", "state":"deleted", "terminated_at":"2012-10-29T13:42:11Z" } }, "priority":"INFO", "publisher_id":"nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-delete-end_compute_down.json0000664000175000017500000000065100000000000027031 0ustar00zuulzuul00000000000000{ "event_type":"instance.delete.end", "payload":{ "$ref":"common_payloads/InstanceActionPayload.json#", "nova_object.data":{ "block_devices":[], "deleted_at":"2012-10-29T13:42:11Z", "ip_addresses":[], "state":"deleted", "terminated_at":"2012-10-29T13:42:11Z" } }, "priority":"INFO", "publisher_id":"nova-api:fake-mini" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-delete-end_not_scheduled.json0000664000175000017500000000110700000000000027143 0ustar00zuulzuul00000000000000{ "event_type":"instance.delete.end", "payload":{ "$ref":"common_payloads/InstanceActionPayload.json#", "nova_object.data":{ "availability_zone": null, "block_devices":[], "deleted_at":"2012-10-29T13:42:11Z", "host":null, "ip_addresses":[], "launched_at":null, "node":null, "power_state":"pending", "state":"deleted", "terminated_at":"2012-10-29T13:42:11Z" } }, "priority":"INFO", "publisher_id":"nova-api:fake-mini" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-delete-start.json0000664000175000017500000000041700000000000024635 0ustar00zuulzuul00000000000000{ "event_type":"instance.delete.start", "payload":{ "$ref":"common_payloads/InstanceActionPayload.json#", "nova_object.data":{ "task_state":"deleting" } }, "priority":"INFO", "publisher_id":"nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-delete-start_compute_down.json0000664000175000017500000000041500000000000027416 0ustar00zuulzuul00000000000000{ "event_type":"instance.delete.start", "payload":{ "$ref":"common_payloads/InstanceActionPayload.json#", "nova_object.data":{ "task_state":"deleting" } }, "priority":"INFO", "publisher_id":"nova-api:fake-mini" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-delete-start_not_scheduled.json0000664000175000017500000000100700000000000027531 0ustar00zuulzuul00000000000000{ "event_type":"instance.delete.start", "payload":{ "$ref":"common_payloads/InstanceActionPayload.json#", "nova_object.data":{ "availability_zone": null, "block_devices":[], "host":null, "ip_addresses":[], "launched_at":null, "node":null, "power_state":"pending", "state":"error", "task_state":"deleting" } }, "priority":"INFO", "publisher_id":"nova-api:fake-mini" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-evacuate.json0000664000175000017500000000056600000000000024042 0ustar00zuulzuul00000000000000{ "event_type": "instance.evacuate", "payload": { "$ref": "common_payloads/InstanceActionPayload.json#", "nova_object.data": { "host": "host2", "node": "host2", "task_state": "rebuilding", "action_initiator_user": "admin" } }, "priority": "INFO", "publisher_id": "nova-api:host2" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-exists.json0000664000175000017500000000055500000000000023562 0ustar00zuulzuul00000000000000{ "event_type":"instance.exists", "payload":{ "$ref":"common_payloads/InstanceExistsPayload.json#", "nova_object.data":{ "architecture":null, "image_uuid":"a2459075-d96c-40d5-893e-577ff92e721c", "task_state":"rebuilding" } }, "priority":"INFO", "publisher_id":"nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-interface_attach-end.json0000664000175000017500000000274700000000000026300 0ustar00zuulzuul00000000000000{ "publisher_id": "nova-compute:compute", "event_type": "instance.interface_attach.end", "priority": "INFO", "payload": { "$ref":"common_payloads/InstanceActionPayload.json#", "nova_object.data":{ "ip_addresses": [ { "nova_object.data": { "device_name": "tapce531f90-19", "address": "192.168.1.3", "version": 4, "label": "private", "port_uuid": "ce531f90-199f-48c0-816c-13e38010b442", "mac": "fa:16:3e:4c:2c:30", "meta": {} }, "nova_object.name": "IpPayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" }, { "nova_object.data": { "device_name": "tap88dae9fa-0d", "address": "192.168.1.30", "version": 4, "label": "private", "port_uuid": "88dae9fa-0dc6-49e3-8c29-3abc41e99ac9", "mac": "00:0c:29:0d:11:74", "meta": {} }, "nova_object.name": "IpPayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ] } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-interface_attach-error.json0000664000175000017500000000157100000000000026655 0ustar00zuulzuul00000000000000{ "priority": "ERROR", "event_type": "instance.interface_attach.error", "payload":{ "$ref":"common_payloads/InstanceActionPayload.json#", "nova_object.data": { "fault": { "nova_object.data": { "exception": "InterfaceAttachFailed", "exception_message": "dummy", "function_name": "_unsuccessful_attach_interface", "module_name": "nova.tests.functional.notification_sample_tests.test_instance", "traceback": "Traceback (most recent call last):\n File \"nova/compute/manager.py\", line ..." }, "nova_object.name": "ExceptionPayload", "nova_object.namespace": "nova", "nova_object.version": "1.1" } } }, "publisher_id": "nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-interface_attach-start.json0000664000175000017500000000030200000000000026650 0ustar00zuulzuul00000000000000{ "priority": "INFO", "event_type": "instance.interface_attach.start", "payload":{"$ref":"common_payloads/InstanceActionPayload.json#"}, "publisher_id": "nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-interface_detach-end.json0000664000175000017500000000030000000000000026243 0ustar00zuulzuul00000000000000{ "publisher_id": "nova-compute:compute", "event_type": "instance.interface_detach.end", "priority": "INFO", "payload":{"$ref":"common_payloads/InstanceActionPayload.json#"} } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-interface_detach-start.json0000664000175000017500000000275100000000000026646 0ustar00zuulzuul00000000000000{ "priority": "INFO", "event_type": "instance.interface_detach.start", "payload": { "$ref":"common_payloads/InstanceActionPayload.json#", "nova_object.data":{ "ip_addresses": [ { "nova_object.data": { "device_name": "tapce531f90-19", "address": "192.168.1.3", "version": 4, "label": "private", "port_uuid": "ce531f90-199f-48c0-816c-13e38010b442", "mac": "fa:16:3e:4c:2c:30", "meta": {} }, "nova_object.name": "IpPayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" }, { "nova_object.data": { "device_name": "tap88dae9fa-0d", "address": "192.168.1.30", "version": 4, "label": "private", "port_uuid": "88dae9fa-0dc6-49e3-8c29-3abc41e99ac9", "mac": "00:0c:29:0d:11:74", "meta": {} }, "nova_object.name": "IpPayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ] } }, "publisher_id": "nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-live_migration_abort-end.json0000664000175000017500000000051300000000000027200 0ustar00zuulzuul00000000000000{ "event_type":"instance.live_migration_abort.end", "payload":{ "$ref": "common_payloads/InstanceActionPayload.json#", "nova_object.data":{ "task_state": "migrating", "action_initiator_user": "admin" } }, "priority":"INFO", "publisher_id":"nova-compute:compute" }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-live_migration_abort-start.json0000664000175000017500000000051500000000000027571 0ustar00zuulzuul00000000000000{ "event_type":"instance.live_migration_abort.start", "payload":{ "$ref": "common_payloads/InstanceActionPayload.json#", "nova_object.data":{ "task_state": "migrating", "action_initiator_user": "admin" } }, "priority":"INFO", "publisher_id":"nova-compute:compute" }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-live_migration_force_complete-end.json0000664000175000017500000000053200000000000031060 0ustar00zuulzuul00000000000000{ "event_type": "instance.live_migration_force_complete.end", "payload": { "$ref": "common_payloads/InstanceActionPayload.json#", "nova_object.data": { "action_initiator_user": "admin", "task_state": "migrating" } }, "priority": "INFO", "publisher_id": "nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-live_migration_force_complete-start.json0000664000175000017500000000053400000000000031451 0ustar00zuulzuul00000000000000{ "event_type": "instance.live_migration_force_complete.start", "payload": { "$ref": "common_payloads/InstanceActionPayload.json#", "nova_object.data": { "action_initiator_user": "admin", "task_state": "migrating" } }, "priority": "INFO", "publisher_id": "nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-live_migration_post-end.json0000664000175000017500000000052000000000000027054 0ustar00zuulzuul00000000000000{ "event_type": "instance.live_migration_post.end", "payload": { "$ref": "common_payloads/InstanceActionPayload.json#", "nova_object.data": { "action_initiator_user": "admin", "task_state": "migrating" } }, "priority": "INFO", "publisher_id": "nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-live_migration_post-start.json0000664000175000017500000000052200000000000027445 0ustar00zuulzuul00000000000000{ "event_type": "instance.live_migration_post.start", "payload": { "$ref": "common_payloads/InstanceActionPayload.json#", "nova_object.data": { "action_initiator_user": "admin", "task_state": "migrating" } }, "priority": "INFO", "publisher_id": "nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-live_migration_post_dest-end.json0000664000175000017500000000054100000000000030076 0ustar00zuulzuul00000000000000{ "event_type":"instance.live_migration_post_dest.end", "payload":{ "$ref": "common_payloads/InstanceActionPayload.json#", "nova_object.data":{ "host": "host2", "node": "host2", "action_initiator_user": "admin" } }, "priority":"INFO", "publisher_id":"nova-compute:host2" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-live_migration_post_dest-start.json0000664000175000017500000000052000000000000030462 0ustar00zuulzuul00000000000000{ "event_type":"instance.live_migration_post_dest.start", "payload":{ "$ref": "common_payloads/InstanceActionPayload.json#", "nova_object.data":{ "task_state": "migrating", "action_initiator_user": "admin" } }, "priority":"INFO", "publisher_id":"nova-compute:host2" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-live_migration_pre-end.json0000664000175000017500000000051500000000000026661 0ustar00zuulzuul00000000000000{ "event_type": "instance.live_migration_pre.end", "payload": { "$ref": "common_payloads/InstanceActionPayload.json#", "nova_object.data": { "task_state": "migrating", "action_initiator_user": "admin" } }, "priority": "INFO", "publisher_id": "nova-compute:host2" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-live_migration_pre-start.json0000664000175000017500000000051700000000000027252 0ustar00zuulzuul00000000000000{ "event_type": "instance.live_migration_pre.start", "payload": { "$ref": "common_payloads/InstanceActionPayload.json#", "nova_object.data": { "task_state": "migrating", "action_initiator_user": "admin" } }, "priority": "INFO", "publisher_id": "nova-compute:host2" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-live_migration_rollback-end.json0000664000175000017500000000044700000000000027670 0ustar00zuulzuul00000000000000{ "event_type":"instance.live_migration_rollback.end", "payload":{ "$ref":"common_payloads/InstanceActionPayload.json#", "nova_object.data":{ "action_initiator_user": "admin" } }, "priority":"INFO", "publisher_id":"nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-live_migration_rollback-start.json0000664000175000017500000000052000000000000030247 0ustar00zuulzuul00000000000000{ "event_type":"instance.live_migration_rollback.start", "payload":{ "$ref":"common_payloads/InstanceActionPayload.json#", "nova_object.data":{ "action_initiator_user": "admin", "task_state": "migrating" } }, "priority":"INFO", "publisher_id":"nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-live_migration_rollback_dest-end.json0000664000175000017500000000052700000000000030706 0ustar00zuulzuul00000000000000{ "event_type": "instance.live_migration_rollback_dest.end", "payload": { "$ref": "common_payloads/InstanceActionPayload.json#", "nova_object.data": { "action_initiator_user": "admin", "task_state": "migrating" } }, "priority": "INFO", "publisher_id": "nova-compute:host2" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-live_migration_rollback_dest-start.json0000664000175000017500000000053100000000000031270 0ustar00zuulzuul00000000000000{ "event_type": "instance.live_migration_rollback_dest.start", "payload": { "$ref": "common_payloads/InstanceActionPayload.json#", "nova_object.data": { "action_initiator_user": "admin", "task_state": "migrating" } }, "priority": "INFO", "publisher_id": "nova-compute:host2" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-lock-with-reason.json0000664000175000017500000000045200000000000025425 0ustar00zuulzuul00000000000000{ "event_type":"instance.lock", "payload":{ "$ref": "common_payloads/InstanceActionPayload.json#", "nova_object.data":{ "locked":true, "locked_reason":"global warming" } }, "priority":"INFO", "publisher_id":"nova-api:fake-mini" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-lock.json0000664000175000017500000000043700000000000023172 0ustar00zuulzuul00000000000000{ "event_type":"instance.lock", "payload":{ "$ref": "common_payloads/InstanceActionPayload.json#", "nova_object.data":{ "locked":true, "locked_reason": null } }, "priority":"INFO", "publisher_id":"nova-api:fake-mini" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-pause-end.json0000664000175000017500000000041000000000000024112 0ustar00zuulzuul00000000000000{ "event_type":"instance.pause.end", "payload":{ "$ref": "common_payloads/InstanceActionPayload.json#", "nova_object.data": { "state": "paused" } }, "priority":"INFO", "publisher_id":"nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-pause-start.json0000664000175000017500000000042000000000000024502 0ustar00zuulzuul00000000000000{ "event_type":"instance.pause.start", "payload":{ "$ref": "common_payloads/InstanceActionPayload.json#", "nova_object.data": { "task_state": "pausing" } }, "priority":"INFO", "publisher_id":"nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-power_off-end.json0000664000175000017500000000046100000000000024771 0ustar00zuulzuul00000000000000{ "event_type":"instance.power_off.end", "payload":{ "$ref": "common_payloads/InstanceActionPayload.json#", "nova_object.data":{ "state":"stopped", "power_state":"shutdown" } }, "priority":"INFO", "publisher_id":"nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-power_off-start.json0000664000175000017500000000042700000000000025362 0ustar00zuulzuul00000000000000{ "event_type":"instance.power_off.start", "payload":{ "$ref": "common_payloads/InstanceActionPayload.json#", "nova_object.data":{ "task_state":"powering-off" } }, "priority":"INFO", "publisher_id":"nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-power_on-end.json0000664000175000017500000000026600000000000024636 0ustar00zuulzuul00000000000000{ "event_type":"instance.power_on.end", "payload":{"$ref": "common_payloads/InstanceActionPayload.json#"}, "priority":"INFO", "publisher_id":"nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-power_on-start.json0000664000175000017500000000053200000000000025221 0ustar00zuulzuul00000000000000{ "event_type":"instance.power_on.start", "payload":{ "$ref": "common_payloads/InstanceActionPayload.json#", "nova_object.data":{ "task_state":"powering-on", "state":"stopped", "power_state":"shutdown" } }, "priority":"INFO", "publisher_id":"nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-power_on_share-end.json0000664000175000017500000000030500000000000026012 0ustar00zuulzuul00000000000000{ "event_type": "instance.power_on.end", "payload": { "$ref": "common_payloads/InstanceActionPayloadShareActive.json#" }, "priority": "INFO", "publisher_id": "nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-power_on_share-start.json0000664000175000017500000000050700000000000026405 0ustar00zuulzuul00000000000000{ "event_type": "instance.power_on.start", "payload": { "$ref": "common_payloads/InstanceActionPayloadShareInactive.json#", "nova_object.data": { "task_state": "powering-on", "state": "stopped", "power_state": "shutdown" } }, "priority": "INFO", "publisher_id": "nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-reboot-end.json0000664000175000017500000000026300000000000024275 0ustar00zuulzuul00000000000000{ "event_type":"instance.reboot.end", "payload":{"$ref":"common_payloads/InstanceActionPayload.json#"}, "priority":"INFO", "publisher_id":"nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-reboot-error.json0000664000175000017500000000171100000000000024657 0ustar00zuulzuul00000000000000{ "event_type":"instance.reboot.error", "payload":{ "$ref":"common_payloads/InstanceActionPayload.json#", "nova_object.data": { "fault": { "nova_object.data": { "exception": "UnsupportedVirtType", "exception_message": "Virtualization type 'FakeVirt' is not supported by this compute driver", "function_name": "_hard_reboot", "module_name": "nova.tests.functional.notification_sample_tests.test_instance", "traceback": "Traceback (most recent call last):\n File \"nova/compute/manager.py\", line ..." }, "nova_object.name": "ExceptionPayload", "nova_object.namespace": "nova", "nova_object.version": "1.1" }, "task_state":"reboot_started_hard" } }, "priority":"ERROR", "publisher_id":"nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-reboot-start.json0000664000175000017500000000043200000000000024662 0ustar00zuulzuul00000000000000{ "event_type":"instance.reboot.start", "payload":{ "$ref":"common_payloads/InstanceActionPayload.json#", "nova_object.data":{ "task_state":"reboot_pending_hard" } }, "priority":"INFO", "publisher_id":"nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-rebuild-end.json0000664000175000017500000000030000000000000024421 0ustar00zuulzuul00000000000000{ "event_type": "instance.rebuild.end", "publisher_id": "nova-compute:compute", "payload": {"$ref": "common_payloads/InstanceActionRebuildPayload.json#"}, "priority": "INFO" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-rebuild-error.json0000664000175000017500000000171100000000000025013 0ustar00zuulzuul00000000000000{ "priority": "ERROR", "payload": { "$ref": "common_payloads/InstanceActionRebuildPayload.json#", "nova_object.data": { "fault": { "nova_object.name": "ExceptionPayload", "nova_object.data": { "module_name": "nova.tests.functional.notification_sample_tests.test_instance", "exception_message": "Virtual Interface creation failed", "function_name": "_virtual_interface_create_failed", "exception": "VirtualInterfaceCreateException", "traceback": "Traceback (most recent call last):\n File \"nova/compute/manager.py\", line ..." }, "nova_object.version": "1.1", "nova_object.namespace": "nova" }, "task_state": "rebuilding" } }, "publisher_id": "nova-compute:compute", "event_type": "instance.rebuild.error" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-rebuild-start.json0000664000175000017500000000044000000000000025015 0ustar00zuulzuul00000000000000{ "priority": "INFO", "event_type": "instance.rebuild.start", "publisher_id": "nova-compute:compute", "payload": { "$ref": "common_payloads/InstanceActionRebuildPayload.json#", "nova_object.data": { "task_state": "rebuilding" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-rebuild_scheduled.json0000664000175000017500000000044600000000000025710 0ustar00zuulzuul00000000000000{ "priority": "INFO", "event_type": "instance.rebuild_scheduled", "publisher_id": "nova-conductor:compute", "payload": { "$ref": "common_payloads/InstanceActionRebuildPayload.json#", "nova_object.data": { "task_state": "rebuilding" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-rescue-end.json0000664000175000017500000000047100000000000024272 0ustar00zuulzuul00000000000000{ "event_type": "instance.rescue.end", "payload": { "$ref": "common_payloads/InstanceActionRescuePayload.json#", "nova_object.data": { "state": "rescued", "power_state": "shutdown" } }, "priority":"INFO", "publisher_id":"nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-rescue-start.json0000664000175000017500000000043400000000000024660 0ustar00zuulzuul00000000000000{ "event_type": "instance.rescue.start", "payload": { "$ref": "common_payloads/InstanceActionRescuePayload.json#", "nova_object.data": { "task_state": "rescuing" } }, "priority": "INFO", "publisher_id": "nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-resize-end.json0000664000175000017500000000042700000000000024306 0ustar00zuulzuul00000000000000{ "event_type":"instance.resize.end", "payload": { "$ref": "common_payloads/InstanceActionPayload.json#", "nova_object.data":{ "task_state": "resize_migrated" } }, "priority":"INFO", "publisher_id":"nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-resize-error.json0000664000175000017500000000170500000000000024671 0ustar00zuulzuul00000000000000{ "event_type":"instance.resize.error", "payload":{ "$ref": "common_payloads/InstanceActionPayload.json#", "nova_object.data":{ "fault":{ "nova_object.data":{ "exception":"FlavorDiskTooSmall", "exception_message":"The created instance's disk would be too small.", "function_name":"_build_resources", "module_name":"nova.tests.functional.notification_sample_tests.test_instance", "traceback":"Traceback (most recent call last):\n File \"nova/compute/manager.py\", line ..." }, "nova_object.name":"ExceptionPayload", "nova_object.namespace":"nova", "nova_object.version":"1.1" }, "block_devices": [], "task_state": "resize_prep" } }, "priority":"ERROR", "publisher_id":"nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-resize-start.json0000664000175000017500000000043100000000000024670 0ustar00zuulzuul00000000000000{ "event_type":"instance.resize.start", "payload":{ "$ref": "common_payloads/InstanceActionPayload.json#", "nova_object.data":{ "task_state": "resize_migrating" } }, "priority":"INFO", "publisher_id":"nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-resize_confirm-end.json0000664000175000017500000000074700000000000026030 0ustar00zuulzuul00000000000000{ "event_type":"instance.resize_confirm.end", "payload": { "$ref": "common_payloads/InstanceActionPayload.json#", "nova_object.data": { "flavor": { "nova_object.data": { "flavorid": "2", "memory_mb": 2048, "name": "m1.small", "root_gb":20 } } } }, "priority":"INFO", "publisher_id":"nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-resize_confirm-start.json0000664000175000017500000000101100000000000026400 0ustar00zuulzuul00000000000000{ "event_type":"instance.resize_confirm.start", "payload": { "$ref": "common_payloads/InstanceActionPayload.json#", "nova_object.data": { "flavor": { "nova_object.data": { "flavorid": "2", "memory_mb": 2048, "name": "m1.small", "root_gb":20 } }, "state": "resized" } }, "priority":"INFO", "publisher_id":"nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-resize_finish-end.json0000664000175000017500000000111400000000000025640 0ustar00zuulzuul00000000000000{ "event_type":"instance.resize_finish.end", "payload":{ "$ref": "common_payloads/InstanceActionPayload.json#", "nova_object.data":{ "flavor": { "nova_object.data": { "name": "other_flavor", "flavorid": "d5a8bb54-365a-45ae-abdb-38d249df7845", "extra_specs": {"hw:watchdog_action": "reset"}, "memory_mb": 256 } }, "state": "resized" } }, "priority":"INFO", "publisher_id":"nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-resize_finish-start.json0000664000175000017500000000113100000000000026226 0ustar00zuulzuul00000000000000{ "event_type":"instance.resize_finish.start", "payload":{ "$ref": "common_payloads/InstanceActionPayload.json#", "nova_object.data":{ "flavor": { "nova_object.data": { "name": "other_flavor", "flavorid": "d5a8bb54-365a-45ae-abdb-38d249df7845", "extra_specs": {"hw:watchdog_action": "reset"}, "memory_mb": 256 } }, "task_state": "resize_finish" } }, "priority":"INFO", "publisher_id":"nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-resize_prep-end.json0000664000175000017500000000030600000000000025330 0ustar00zuulzuul00000000000000{ "event_type": "instance.resize_prep.end", "payload": {"$ref":"common_payloads/InstanceActionResizePrepPayload.json#"}, "priority": "INFO", "publisher_id": "nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-resize_prep-start.json0000664000175000017500000000031000000000000025712 0ustar00zuulzuul00000000000000{ "event_type": "instance.resize_prep.start", "payload": {"$ref":"common_payloads/InstanceActionResizePrepPayload.json#"}, "priority": "INFO", "publisher_id": "nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-resize_revert-end.json0000664000175000017500000000027200000000000025673 0ustar00zuulzuul00000000000000{ "event_type":"instance.resize_revert.end", "payload":{"$ref":"common_payloads/InstanceActionPayload.json#"}, "priority":"INFO", "publisher_id":"nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-resize_revert-start.json0000664000175000017500000000124700000000000026265 0ustar00zuulzuul00000000000000{ "event_type":"instance.resize_revert.start", "payload":{ "$ref":"common_payloads/InstanceActionPayload.json#", "nova_object.data": { "flavor":{ "nova_object.data": { "flavorid": "d5a8bb54-365a-45ae-abdb-38d249df7845", "name": "other_flavor", "memory_mb": 256, "extra_specs": { "hw:watchdog_action": "reset" } } }, "state":"resized", "task_state":"resize_reverting" } }, "priority":"INFO", "publisher_id":"nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-restore-end.json0000664000175000017500000000030100000000000024457 0ustar00zuulzuul00000000000000{ "event_type":"instance.restore.end", "payload":{ "$ref":"common_payloads/InstanceActionPayload.json#" }, "priority":"INFO", "publisher_id":"nova-compute:compute" }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-restore-start.json0000664000175000017500000000046500000000000025061 0ustar00zuulzuul00000000000000{ "event_type":"instance.restore.start", "payload":{ "$ref":"common_payloads/InstanceActionPayload.json#", "nova_object.data": { "state":"soft-delete", "task_state":"restoring" } }, "priority":"INFO", "publisher_id":"nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-resume-end.json0000664000175000017500000000030100000000000024274 0ustar00zuulzuul00000000000000{ "event_type":"instance.resume.end", "payload":{ "$ref":"common_payloads/InstanceActionPayload.json#" }, "priority":"INFO", "publisher_id":"nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-resume-start.json0000664000175000017500000000046700000000000024700 0ustar00zuulzuul00000000000000{ "event_type": "instance.resume.start", "payload":{ "$ref": "common_payloads/InstanceActionPayload.json#", "nova_object.data": { "state": "suspended", "task_state": "resuming" } }, "priority": "INFO", "publisher_id": "nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-share_attach-end.json0000664000175000017500000000031500000000000025427 0ustar00zuulzuul00000000000000{ "event_type": "instance.share_attach.end", "payload": { "$ref": "common_payloads/InstanceActionSharePayload.json#" }, "priority": "INFO", "publisher_id": "nova-api:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-share_attach-error.json0000664000175000017500000000145500000000000026020 0ustar00zuulzuul00000000000000{ "event_type": "instance.share_attach.error", "payload": { "$ref": "common_payloads/InstanceActionSharePayload.json#", "nova_object.data": { "fault": { "nova_object.data": { "exception": "ShareAccessGrantError", "exception_message": "Share access could not be granted to share id 8db0037b-e98f-4bde-ae71-f96a077c19a4.\nReason: Connection timed out.", "function_name": "_execute_mock_call", "module_name": "unittest.mock", "traceback": "Traceback (most recent call last):\n File \"nova/compute/manager.py\", line ..." }, "nova_object.name": "ExceptionPayload", "nova_object.namespace": "nova", "nova_object.version": "1.1" } } }, "priority": "ERROR", "publisher_id": "nova-api:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-share_attach-start.json0000664000175000017500000000031700000000000026020 0ustar00zuulzuul00000000000000{ "event_type": "instance.share_attach.start", "payload": { "$ref": "common_payloads/InstanceActionSharePayload.json#" }, "priority": "INFO", "publisher_id": "nova-api:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-share_detach-end.json0000664000175000017500000000031500000000000025413 0ustar00zuulzuul00000000000000{ "event_type": "instance.share_detach.end", "payload": { "$ref": "common_payloads/InstanceActionSharePayload.json#" }, "priority": "INFO", "publisher_id": "nova-api:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-share_detach-error.json0000664000175000017500000000165500000000000026006 0ustar00zuulzuul00000000000000{ "event_type": "instance.share_detach.error", "payload": { "$ref": "common_payloads/InstanceActionSharePayload.json#", "nova_object.data": { "fault": { "nova_object.data": { "exception": "ShareAccessRemovalError", "exception_message": "Share access could not be removed from share id 8db0037b-e98f-4bde-ae71-f96a077c19a4.\nReason: Connection timed out.", "function_name": "_execute_mock_call", "module_name": "unittest.mock", "traceback": "Traceback (most recent call last):\n File \"nova/compute/manager.py\", line ..." }, "nova_object.name": "ExceptionPayload", "nova_object.namespace": "nova", "nova_object.version": "1.1" } } }, "priority": "ERROR", "publisher_id": "nova-api:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-share_detach-start.json0000664000175000017500000000031700000000000026004 0ustar00zuulzuul00000000000000{ "event_type": "instance.share_detach.start", "payload": { "$ref": "common_payloads/InstanceActionSharePayload.json#" }, "priority": "INFO", "publisher_id": "nova-api:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-shelve-end.json0000664000175000017500000000046000000000000024270 0ustar00zuulzuul00000000000000{ "event_type":"instance.shelve.end", "payload":{ "$ref": "common_payloads/InstanceActionPayload.json#", "nova_object.data":{ "state": "shelved", "power_state": "shutdown" } }, "priority":"INFO", "publisher_id":"nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-shelve-start.json0000664000175000017500000000042100000000000024654 0ustar00zuulzuul00000000000000{ "event_type":"instance.shelve.start", "payload":{ "$ref": "common_payloads/InstanceActionPayload.json#", "nova_object.data":{ "task_state": "shelving" } }, "priority":"INFO", "publisher_id":"nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-shelve_offload-end.json0000664000175000017500000000063500000000000025766 0ustar00zuulzuul00000000000000{ "event_type":"instance.shelve_offload.end", "payload":{ "$ref": "common_payloads/InstanceActionPayload.json#", "nova_object.data":{ "availability_zone": null, "state": "shelved_offloaded", "power_state": "shutdown", "host": null, "node": null } }, "priority":"INFO", "publisher_id":"nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-shelve_offload-start.json0000664000175000017500000000055400000000000026355 0ustar00zuulzuul00000000000000{ "event_type":"instance.shelve_offload.start", "payload":{ "$ref": "common_payloads/InstanceActionPayload.json#", "nova_object.data":{ "state": "shelved", "task_state": "shelving_offloading", "power_state": "shutdown" } }, "priority":"INFO", "publisher_id":"nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-shutdown-end.json0000664000175000017500000000046100000000000024656 0ustar00zuulzuul00000000000000{ "event_type":"instance.shutdown.end", "payload":{ "$ref": "common_payloads/InstanceActionPayload.json#", "nova_object.data":{ "ip_addresses": [], "task_state": "deleting" } }, "priority":"INFO", "publisher_id":"nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-shutdown-start.json0000664000175000017500000000042300000000000025243 0ustar00zuulzuul00000000000000{ "event_type":"instance.shutdown.start", "payload":{ "$ref": "common_payloads/InstanceActionPayload.json#", "nova_object.data":{ "task_state": "deleting" } }, "priority":"INFO", "publisher_id":"nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-snapshot-end.json0000664000175000017500000000031400000000000024637 0ustar00zuulzuul00000000000000{ "event_type":"instance.snapshot.end", "payload":{ "$ref": "common_payloads/InstanceActionSnapshotPayload.json#" }, "priority":"INFO", "publisher_id":"nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-snapshot-start.json0000664000175000017500000000044000000000000025226 0ustar00zuulzuul00000000000000{ "event_type":"instance.snapshot.start", "payload":{ "$ref": "common_payloads/InstanceActionSnapshotPayload.json#", "nova_object.data":{ "task_state":"image_snapshot" } }, "priority":"INFO", "publisher_id":"nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-soft_delete-end.json0000664000175000017500000000050700000000000025301 0ustar00zuulzuul00000000000000{ "event_type":"instance.soft_delete.end", "payload":{ "$ref": "common_payloads/InstanceActionPayload.json#", "nova_object.data": { "deleted_at": "2012-10-29T13:42:11Z", "state": "soft-delete" } }, "priority":"INFO", "publisher_id":"nova-compute:fake-mini" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-soft_delete-start.json0000664000175000017500000000052000000000000025663 0ustar00zuulzuul00000000000000{ "event_type":"instance.soft_delete.start", "payload":{ "$ref": "common_payloads/InstanceActionPayload.json#", "nova_object.data": { "deleted_at": "2012-10-29T13:42:11Z", "task_state": "soft-deleting" } }, "priority":"INFO", "publisher_id":"nova-compute:fake-mini" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-suspend-end.json0000664000175000017500000000041400000000000024462 0ustar00zuulzuul00000000000000{ "event_type":"instance.suspend.end", "payload":{ "$ref": "common_payloads/InstanceActionPayload.json#", "nova_object.data":{ "state": "suspended" } }, "priority":"INFO", "publisher_id":"nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-suspend-start.json0000664000175000017500000000042400000000000025052 0ustar00zuulzuul00000000000000{ "event_type":"instance.suspend.start", "payload":{ "$ref": "common_payloads/InstanceActionPayload.json#", "nova_object.data":{ "task_state": "suspending" } }, "priority":"INFO", "publisher_id":"nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-trigger_crash_dump-end.json0000664000175000017500000000032000000000000026645 0ustar00zuulzuul00000000000000{ "event_type":"instance.trigger_crash_dump.end", "payload":{ "$ref": "common_payloads/InstanceActionPayload.json#" }, "priority": "INFO", "publisher_id": "nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-trigger_crash_dump-start.json0000664000175000017500000000032200000000000027236 0ustar00zuulzuul00000000000000{ "event_type":"instance.trigger_crash_dump.start", "payload":{ "$ref": "common_payloads/InstanceActionPayload.json#" }, "priority": "INFO", "publisher_id": "nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-unlock.json0000664000175000017500000000027400000000000023534 0ustar00zuulzuul00000000000000{ "event_type":"instance.unlock", "payload":{ "$ref": "common_payloads/InstanceActionPayload.json#" }, "priority":"INFO", "publisher_id":"nova-api:fake-mini" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-unpause-end.json0000664000175000017500000000026500000000000024465 0ustar00zuulzuul00000000000000{ "event_type":"instance.unpause.end", "payload":{"$ref": "common_payloads/InstanceActionPayload.json#"}, "priority":"INFO", "publisher_id":"nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-unpause-start.json0000664000175000017500000000046300000000000025054 0ustar00zuulzuul00000000000000{ "event_type":"instance.unpause.start", "payload":{ "$ref": "common_payloads/InstanceActionPayload.json#", "nova_object.data": { "state": "paused", "task_state": "unpausing" } }, "priority":"INFO", "publisher_id":"nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-unrescue-end.json0000664000175000017500000000027100000000000024633 0ustar00zuulzuul00000000000000{ "event_type": "instance.unrescue.end", "payload":{"$ref": "common_payloads/InstanceActionPayload.json#"}, "priority": "INFO", "publisher_id": "nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-unrescue-start.json0000664000175000017500000000054000000000000025221 0ustar00zuulzuul00000000000000{ "event_type": "instance.unrescue.start", "payload":{ "$ref": "common_payloads/InstanceActionPayload.json#", "nova_object.data": { "power_state": "shutdown", "task_state": "unrescuing", "state": "rescued" } }, "priority": "INFO", "publisher_id": "nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-unshelve-end.json0000664000175000017500000000030400000000000024630 0ustar00zuulzuul00000000000000{ "event_type":"instance.unshelve.end", "payload":{ "$ref": "common_payloads/InstanceActionPayload.json#" }, "priority":"INFO", "publisher_id":"nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-unshelve-start.json0000664000175000017500000000063200000000000025223 0ustar00zuulzuul00000000000000{ "event_type":"instance.unshelve.start", "payload":{ "$ref": "common_payloads/InstanceActionPayload.json#", "nova_object.data":{ "state": "shelved_offloaded", "power_state": "shutdown", "task_state": "unshelving", "host": null, "node": null } }, "priority":"INFO", "publisher_id":"nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-update-tags-action.json0000664000175000017500000000040500000000000025726 0ustar00zuulzuul00000000000000{ "event_type": "instance.update", "payload": { "$ref":"common_payloads/InstanceUpdatePayload.json#", "nova_object.data": { "tags": ["tag1"] } }, "priority": "INFO", "publisher_id": "nova-api:fake-mini" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-update.json0000664000175000017500000000132700000000000023523 0ustar00zuulzuul00000000000000{ "event_type": "instance.update", "payload": { "$ref":"common_payloads/InstanceUpdatePayload.json#", "nova_object.data": { "ip_addresses": [], "updated_at": null, "power_state": "pending", "launched_at": null, "task_state": "scheduling", "state_update": { "nova_object.data": { "old_state": "building", "state": "building"}, "nova_object.name": "InstanceStateUpdatePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0"} } }, "priority": "INFO", "publisher_id": "nova-compute:fake-mini" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-volume_attach-end.json0000664000175000017500000000032300000000000025633 0ustar00zuulzuul00000000000000{ "event_type": "instance.volume_attach.end", "payload": { "$ref": "common_payloads/InstanceActionVolumePayload.json#" }, "priority": "INFO", "publisher_id": "nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-volume_attach-error.json0000664000175000017500000000163700000000000026227 0ustar00zuulzuul00000000000000{ "event_type": "instance.volume_attach.error", "payload": { "$ref": "common_payloads/InstanceActionVolumePayload.json#", "nova_object.data": { "fault": { "nova_object.data": { "exception": "CinderConnectionFailed", "exception_message": "Connection to cinder host failed: Connection timed out", "function_name": "attach_volume", "module_name": "nova.tests.functional.notification_sample_tests.test_instance", "traceback": "Traceback (most recent call last):\n File \"nova/compute/manager.py\", line ..." }, "nova_object.name": "ExceptionPayload", "nova_object.namespace": "nova", "nova_object.version": "1.1" } } }, "priority": "ERROR", "publisher_id": "nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-volume_attach-start.json0000664000175000017500000000032500000000000026224 0ustar00zuulzuul00000000000000{ "event_type": "instance.volume_attach.start", "payload": { "$ref": "common_payloads/InstanceActionVolumePayload.json#" }, "priority": "INFO", "publisher_id": "nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-volume_detach-end.json0000664000175000017500000000032300000000000025617 0ustar00zuulzuul00000000000000{ "event_type": "instance.volume_detach.end", "payload": { "$ref": "common_payloads/InstanceActionVolumePayload.json#" }, "priority": "INFO", "publisher_id": "nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-volume_detach-start.json0000664000175000017500000000032500000000000026210 0ustar00zuulzuul00000000000000{ "event_type": "instance.volume_detach.start", "payload": { "$ref": "common_payloads/InstanceActionVolumePayload.json#" }, "priority": "INFO", "publisher_id": "nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-volume_swap-end.json0000664000175000017500000000136200000000000025345 0ustar00zuulzuul00000000000000{ "event_type": "instance.volume_swap.end", "payload": { "$ref": "common_payloads/InstanceActionVolumeSwapPayload.json#", "nova_object.data": { "block_devices": [{ "nova_object.data": { "boot_index": null, "delete_on_termination": false, "device_name": "/dev/sdb", "tag": null, "volume_id": "227cc671-f30b-4488-96fd-7d0bf13648d8" }, "nova_object.name": "BlockDevicePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" }] } }, "priority": "INFO", "publisher_id": "nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-volume_swap-error.json0000664000175000017500000000177000000000000025733 0ustar00zuulzuul00000000000000{ "event_type": "instance.volume_swap.error", "payload": { "$ref": "common_payloads/InstanceActionVolumeSwapPayload.json#", "nova_object.data": { "new_volume_id": "9c6d9c2d-7a8f-4c80-938d-3bf062b8d489", "old_volume_id": "828419fa-3efb-4533-b458-4267ca5fe9b1", "fault": { "nova_object.data": { "exception": "TypeError", "exception_message": "'tuple' object does not support item assignment", "function_name": "_init_volume_connection", "module_name": "nova.compute.manager", "traceback": "Traceback (most recent call last):\n File \"nova/compute/manager.py\", line ..." }, "nova_object.name": "ExceptionPayload", "nova_object.namespace": "nova", "nova_object.version": "1.1" } } }, "priority": "ERROR", "publisher_id": "nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/instance-volume_swap-start.json0000664000175000017500000000032700000000000025734 0ustar00zuulzuul00000000000000{ "event_type": "instance.volume_swap.start", "payload": { "$ref": "common_payloads/InstanceActionVolumeSwapPayload.json#" }, "priority": "INFO", "publisher_id": "nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/keypair-create-end.json0000664000175000017500000000025600000000000024110 0ustar00zuulzuul00000000000000{ "priority": "INFO", "payload": {"$ref": "common_payloads/KeypairPayload.json#"}, "event_type": "keypair.create.end", "publisher_id": "nova-api:fake-mini" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/keypair-create-start.json0000664000175000017500000000044700000000000024501 0ustar00zuulzuul00000000000000{ "priority": "INFO", "payload": { "$ref": "common_payloads/KeypairPayload.json#", "nova_object.data": { "fingerprint": null, "public_key": null } }, "event_type": "keypair.create.start", "publisher_id": "nova-api:fake-mini" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/keypair-delete-end.json0000664000175000017500000000025500000000000024106 0ustar00zuulzuul00000000000000{ "priority": "INFO", "payload": {"$ref": "common_payloads/KeypairPayload.json#"}, "event_type": "keypair.delete.end", "publisher_id": "nova-api:fake-mini" }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/keypair-delete-start.json0000664000175000017500000000025700000000000024477 0ustar00zuulzuul00000000000000{ "priority": "INFO", "payload": {"$ref": "common_payloads/KeypairPayload.json#"}, "event_type": "keypair.delete.start", "publisher_id": "nova-api:fake-mini" }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/keypair-import-end.json0000664000175000017500000000025500000000000024156 0ustar00zuulzuul00000000000000{ "priority": "INFO", "payload": {"$ref": "common_payloads/KeypairPayload.json#"}, "event_type": "keypair.import.end", "publisher_id": "nova-api:fake-mini" }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/keypair-import-start.json0000664000175000017500000000040600000000000024543 0ustar00zuulzuul00000000000000{ "priority": "INFO", "payload": { "$ref": "common_payloads/KeypairPayload.json#", "nova_object.data": { "fingerprint": null } }, "event_type": "keypair.import.start", "publisher_id": "nova-api:fake-mini" }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/libvirt-connect-error.json0000664000175000017500000000170300000000000024666 0ustar00zuulzuul00000000000000{ "event_type": "libvirt.connect.error", "payload": { "nova_object.data": { "reason": { "nova_object.data": { "exception": "libvirtError", "exception_message": "Sample exception for versioned notification test.", "function_name": "_get_connection", "module_name": "nova.virt.libvirt.host", "traceback": "Traceback (most recent call last):\n File \"nova/virt/libvirt/host.py\", line ..." }, "nova_object.name": "ExceptionPayload", "nova_object.namespace": "nova", "nova_object.version": "1.1" }, "ip": "10.0.2.15" }, "nova_object.name": "LibvirtErrorPayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" }, "priority": "ERROR", "publisher_id": "nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/metrics-update.json0000664000175000017500000001161000000000000023361 0ustar00zuulzuul00000000000000{ "event_type": "metrics.update", "payload": { "nova_object.version": "1.0", "nova_object.name": "MetricsPayload", "nova_object.namespace": "nova", "nova_object.data": { "host_ip": "10.0.2.15", "host": "compute", "nodename": "fake-mini", "metrics":[ { "nova_object.version": "1.0", "nova_object.name": "MetricPayload", "nova_object.namespace": "nova", "nova_object.data": { "timestamp": "2012-10-29T13:42:11Z", "source": "fake.SmallFakeDriver", "numa_membw_values": null, "name": "cpu.iowait.percent", "value": 0 } }, { "nova_object.version": "1.0", "nova_object.name": "MetricPayload", "nova_object.namespace": "nova", "nova_object.data": { "timestamp": "2012-10-29T13:42:11Z", "source": "fake.SmallFakeDriver", "numa_membw_values": null, "name": "cpu.frequency", "value": 800 } }, { "nova_object.version": "1.0", "nova_object.name": "MetricPayload", "nova_object.namespace": "nova", "nova_object.data": { "timestamp": "2012-10-29T13:42:11Z", "source": "fake.SmallFakeDriver", "numa_membw_values": null, "name": "cpu.idle.percent", "value": 97 } }, { "nova_object.version": "1.0", "nova_object.name": "MetricPayload", "nova_object.namespace": "nova", "nova_object.data": { "timestamp": "2012-10-29T13:42:11Z", "source": "fake.SmallFakeDriver", "numa_membw_values": null, "name": "cpu.iowait.time", "value": 6121490000000 } }, { "nova_object.version": "1.0", "nova_object.name": "MetricPayload", "nova_object.namespace": "nova", "nova_object.data": { "timestamp": "2012-10-29T13:42:11Z", "source": "fake.SmallFakeDriver", "numa_membw_values": null, "name": "cpu.kernel.percent", "value": 0 } }, { "nova_object.version": "1.0", "nova_object.name": "MetricPayload", "nova_object.namespace": "nova", "nova_object.data": { "timestamp": "2012-10-29T13:42:11Z", "source": "fake.SmallFakeDriver", "numa_membw_values": null, "name": "cpu.kernel.time", "value": 5664160000000 } }, { "nova_object.version": "1.0", "nova_object.name": "MetricPayload", "nova_object.namespace": "nova", "nova_object.data": { "timestamp": "2012-10-29T13:42:11Z", "source": "fake.SmallFakeDriver", "numa_membw_values": null, "name": "cpu.percent", "value": 2 } }, { "nova_object.version": "1.0", "nova_object.name": "MetricPayload", "nova_object.namespace": "nova", "nova_object.data": { "timestamp": "2012-10-29T13:42:11Z", "source": "fake.SmallFakeDriver", "numa_membw_values": null, "name": "cpu.user.percent", "value": 1 } }, { "nova_object.version": "1.0", "nova_object.name": "MetricPayload", "nova_object.namespace": "nova", "nova_object.data": { "timestamp": "2012-10-29T13:42:11Z", "source": "fake.SmallFakeDriver", "numa_membw_values": null, "name": "cpu.user.time", "value": 26728850000000 } }, { "nova_object.version": "1.0", "nova_object.name": "MetricPayload", "nova_object.namespace": "nova", "nova_object.data": { "timestamp": "2012-10-29T13:42:11Z", "source": "fake.SmallFakeDriver", "numa_membw_values": null, "name": "cpu.idle.time", "value": 1592705190000000 } } ] } }, "priority": "INFO", "publisher_id": "nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/scheduler-select_destinations-end.json0000664000175000017500000000030700000000000027217 0ustar00zuulzuul00000000000000{ "priority": "INFO", "payload": {"$ref": "common_payloads/RequestSpecPayload.json#"}, "event_type": "scheduler.select_destinations.end", "publisher_id": "nova-scheduler:fake-mini" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/scheduler-select_destinations-start.json0000664000175000017500000000031100000000000027601 0ustar00zuulzuul00000000000000{ "priority": "INFO", "payload": {"$ref": "common_payloads/RequestSpecPayload.json#"}, "event_type": "scheduler.select_destinations.start", "publisher_id": "nova-scheduler:fake-mini" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/server_group-add_member.json0000664000175000017500000000045600000000000025240 0ustar00zuulzuul00000000000000{ "priority": "INFO", "payload": { "$ref": "common_payloads/ServerGroupPayload.json#", "nova_object.data": { "members": ["54238a20-f9be-47a7-897e-d7cb0e4c03d0"] } }, "event_type": "server_group.add_member", "publisher_id": "nova-api:fake-mini" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/server_group-create.json0000664000175000017500000000026300000000000024420 0ustar00zuulzuul00000000000000{ "priority": "INFO", "payload": {"$ref": "common_payloads/ServerGroupPayload.json#"}, "event_type": "server_group.create", "publisher_id": "nova-api:fake-mini" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/server_group-delete.json0000664000175000017500000000026300000000000024417 0ustar00zuulzuul00000000000000{ "priority": "INFO", "payload": {"$ref": "common_payloads/ServerGroupPayload.json#"}, "event_type": "server_group.delete", "publisher_id": "nova-api:fake-mini" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/service-create.json0000664000175000017500000000027700000000000023343 0ustar00zuulzuul00000000000000{ "priority": "INFO", "payload": { "$ref": "common_payloads/ServiceStatusPayload.json#" }, "event_type": "service.create", "publisher_id": "nova-compute:host2" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/service-delete.json0000664000175000017500000000027700000000000023342 0ustar00zuulzuul00000000000000{ "priority": "INFO", "payload": { "$ref": "common_payloads/ServiceStatusPayload.json#" }, "event_type": "service.delete", "publisher_id": "nova-compute:host2" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/service-update.json0000664000175000017500000000052700000000000023360 0ustar00zuulzuul00000000000000{ "priority": "INFO", "payload": { "$ref": "common_payloads/ServiceStatusPayload.json#", "nova_object.data": { "host": "host1", "last_seen_up": "2012-10-29T13:42:05Z", "report_count": 1 } }, "event_type": "service.update", "publisher_id": "nova-compute:host1" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/notification_samples/volume-usage.json0000664000175000017500000000132700000000000023050 0ustar00zuulzuul00000000000000{ "event_type": "volume.usage", "payload": { "nova_object.data": { "availability_zone": "nova", "instance_uuid": "88fde343-13a8-4047-84fb-2657d5e702f9", "last_refreshed": "2012-10-29T13:42:11Z", "project_id": "6f70656e737461636b20342065766572", "read_bytes": 0, "reads": 0, "user_id": "fake", "volume_id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", "write_bytes": 0, "writes": 0 }, "nova_object.name": "VolumeUsagePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" }, "priority": "INFO", "publisher_id": "nova-compute:compute" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/requirements.txt0000664000175000017500000000054600000000000016620 0ustar00zuulzuul00000000000000sphinx>=2.1.1 # BSD sphinxcontrib-svg2pdfconverter>=0.1.0 # BSD sphinx-feature-classification>=1.1.0 # Apache-2.0 os-api-ref>=1.4.0 # Apache-2.0 openstackdocstheme>=2.2.0 # Apache-2.0 # releasenotes reno>=3.1.0 # Apache-2.0 # redirect tests in docs whereto>=0.3.0 # Apache-2.0 # needed to generate osprofiler config options osprofiler>=1.4.0 # Apache-2.0 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.2096074 nova-32.0.0/doc/source/0000775000175000017500000000000000000000000014627 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.2096074 nova-32.0.0/doc/source/_extra/0000775000175000017500000000000000000000000016111 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/_extra/.htaccess0000664000175000017500000001717200000000000017717 0ustar00zuulzuul00000000000000redirectmatch 301 ^/nova/([^/]+)/addmethod.openstackapi.html$ /nova/$1/contributor/api.html redirectmatch 301 ^/nova/([^/]+)/admin/flavors2.html$ /nova/$1/admin/flavors.html redirectmatch 301 ^/nova/([^/]+)/admin/numa.html$ /nova/$1/admin/cpu-topologies.html redirectmatch 301 ^/nova/([^/]+)/admin/quotas2.html$ /nova/$1/admin/quotas.html redirectmatch 301 ^/nova/([^/]+)/aggregates.html$ /nova/$1/user/aggregates.html redirectmatch 301 ^/nova/([^/]+)/api_microversion_dev.html$ /nova/$1/contributor/microversions.html redirectmatch 301 ^/nova/([^/]+)/api_microversion_history.html$ /nova/$1/reference/api-microversion-history.html redirectmatch 301 ^/nova/([^/]+)/api_plugins.html$ /nova/$1/contributor/api.html redirectmatch 301 ^/nova/([^/]+)/architecture.html$ /nova/$1/admin/architecture.html redirectmatch 301 ^/nova/([^/]+)/block_device_mapping.html$ /nova/$1/user/block-device-mapping.html redirectmatch 301 ^/nova/([^/]+)/blueprints.html$ /nova/$1/contributor/blueprints.html redirectmatch 301 ^/nova/([^/]+)/cells.html$ /nova/$1/admin/cells.html redirectmatch 301 ^/nova/([^/]+)/code-review.html$ /nova/$1/contributor/code-review.html redirectmatch 301 ^/nova/([^/]+)/conductor.html$ /nova/$1/user/conductor.html redirectmatch 301 ^/nova/([^/]+)/development.environment.html$ /nova/$1/contributor/development-environment.html redirectmatch 301 ^/nova/([^/]+)/devref/api.html /nova/$1/contributor/api.html redirectmatch 301 ^/nova/([^/]+)/devref/cells.html /nova/$1/admin/cells.html redirectmatch 301 ^/nova/([^/]+)/devref/filter_scheduler.html /nova/$1/admin/scheduling.html # catch all, if we hit something in devref assume it moved to # reference unless we have already triggered a hit above. redirectmatch 301 ^/nova/([^/]+)/devref/([^/]+).html /nova/$1/reference/$2.html redirectmatch 301 ^/nova/([^/]+)/feature_classification.html$ /nova/$1/user/feature-classification.html redirectmatch 301 ^/nova/([^/]+)/filter_scheduler.html$ /nova/$1/admin/scheduling.html redirectmatch 301 ^/nova/([^/]+)/gmr.html$ /nova/$1/reference/gmr.html redirectmatch 301 ^/nova/([^/]+)/how_to_get_involved.html$ /nova/$1/contributor/how-to-get-involved.html redirectmatch 301 ^/nova/([^/]+)/i18n.html$ /nova/$1/reference/i18n.html redirectmatch 301 ^/nova/([^/]+)/man/index.html$ /nova/$1/cli/index.html redirectmatch 301 ^/nova/([^/]+)/man/nova-api-metadata.html$ /nova/$1/cli/nova-api-metadata.html redirectmatch 301 ^/nova/([^/]+)/man/nova-api-os-compute.html$ /nova/$1/cli/nova-api-os-compute.html redirectmatch 301 ^/nova/([^/]+)/man/nova-api.html$ /nova/$1/cli/nova-api.html redirectmatch 301 ^/nova/([^/]+)/man/nova-cells.html$ /nova/$1/cli/nova-cells.html # this is gone and never coming back, indicate that to the end users redirectmatch 301 ^/nova/([^/]+)/man/nova-compute.html$ /nova/$1/cli/nova-compute.html redirectmatch 301 ^/nova/([^/]+)/man/nova-conductor.html$ /nova/$1/cli/nova-conductor.html redirectmatch 301 ^/nova/([^/]+)/man/nova-dhcpbridge.html$ /nova/$1/cli/nova-dhcpbridge.html redirectmatch 301 ^/nova/([^/]+)/man/nova-manage.html$ /nova/$1/cli/nova-manage.html redirectmatch 301 ^/nova/([^/]+)/man/nova-network.html$ /nova/$1/cli/nova-network.html redirectmatch 301 ^/nova/([^/]+)/man/nova-novncproxy.html$ /nova/$1/cli/nova-novncproxy.html redirectmatch 301 ^/nova/([^/]+)/man/nova-rootwrap.html$ /nova/$1/cli/nova-rootwrap.html redirectmatch 301 ^/nova/([^/]+)/man/nova-scheduler.html$ /nova/$1/cli/nova-scheduler.html redirectmatch 301 ^/nova/([^/]+)/man/nova-serialproxy.html$ /nova/$1/cli/nova-serialproxy.html redirectmatch 301 ^/nova/([^/]+)/man/nova-spicehtml5proxy.html$ /nova/$1/cli/nova-spicehtml5proxy.html redirectmatch 301 ^/nova/([^/]+)/man/nova-status.html$ /nova/$1/cli/nova-status.html redirectmatch 301 ^/nova/([^/]+)/notifications.html$ /nova/$1/reference/notifications.html redirectmatch 301 ^/nova/([^/]+)/placement.html$ /nova/$1/user/placement.html redirectmatch 301 ^/nova/([^/]+)/placement_dev.html$ /nova/$1/contributor/placement.html redirectmatch 301 ^/nova/([^/]+)/policies.html$ /nova/$1/contributor/policies.html redirectmatch 301 ^/nova/([^/]+)/policy_enforcement.html$ /nova/$1/reference/policy-enforcement.html redirectmatch 301 ^/nova/([^/]+)/process.html$ /nova/$1/contributor/process.html redirectmatch 301 ^/nova/([^/]+)/project_scope.html$ /nova/$1/contributor/project-scope.html redirectmatch 301 ^/nova/([^/]+)/quotas.html$ /nova/$1/user/quotas.html redirectmatch 301 ^/nova/([^/]+)/releasenotes.html$ /nova/$1/contributor/releasenotes.html redirectmatch 301 ^/nova/([^/]+)/rpc.html$ /nova/$1/reference/rpc.html redirectmatch 301 ^/nova/([^/]+)/sample_config.html$ /nova/$1/configuration/sample-config.html redirectmatch 301 ^/nova/([^/]+)/sample_policy.html$ /nova/$1/configuration/sample-policy.html redirectmatch 301 ^/nova/([^/]+)/scheduler_evolution.html$ /nova/$1/reference/scheduler-evolution.html redirectmatch 301 ^/nova/([^/]+)/services.html$ /nova/$1/reference/services.html redirectmatch 301 ^/nova/([^/]+)/stable_api.html$ /nova/$1/reference/stable-api.html redirectmatch 301 ^/nova/([^/]+)/support-matrix.html$ /nova/$1/user/support-matrix.html redirectmatch 301 ^/nova/([^/]+)/test_strategy.html$ /nova/$1/contributor/testing.html redirectmatch 301 ^/nova/([^/]+)/testing/libvirt-numa.html$ /nova/$1/contributor/testing/libvirt-numa.html redirectmatch 301 ^/nova/([^/]+)/testing/serial-console.html$ /nova/$1/contributor/testing/serial-console.html redirectmatch 301 ^/nova/([^/]+)/testing/zero-downtime-upgrade.html$ /nova/$1/contributor/testing/zero-downtime-upgrade.html redirectmatch 301 ^/nova/([^/]+)/threading.html$ /nova/$1/reference/threading.html redirectmatch 301 ^/nova/([^/]+)/upgrade.html$ /nova/$1/admin/upgrades.html redirectmatch 301 ^/nova/([^/]+)/user/aggregates.html$ /nova/$1/admin/aggregates.html redirectmatch 301 ^/nova/([^/]+)/user/architecture.html$ /nova/$1/admin/architecture.html redirectmatch 301 ^/nova/([^/]+)/user/cells.html$ /nova/$1/admin/cells.html redirectmatch 301 ^/nova/([^/]+)/user/cellsv2-layout.html$ /nova/$1/admin/cells.html redirectmatch 301 ^/nova/([^/]+)/user/cellsv2_layout.html$ /nova/$1/admin/cells.html redirectmatch 301 ^/nova/([^/]+)/user/config-drive.html$ /nova/$1/user/metadata.html redirectmatch 301 ^/nova/([^/]+)/user/filter-scheduler.html$ /nova/$1/admin/scheduling.html redirectmatch 301 ^/nova/([^/]+)/user/metadata-service.html$ /nova/$1/user/metadata.html redirectmatch 301 ^/nova/([^/]+)/user/placement.html$ /placement/$1/ redirectmatch 301 ^/nova/([^/]+)/user/upgrade.html$ /nova/$1/admin/upgrades.html redirectmatch 301 ^/nova/([^/]+)/user/user-data.html$ /nova/$1/user/metadata.html redirectmatch 301 ^/nova/([^/]+)/user/vendordata.html$ /nova/$1/user/metadata.html redirectmatch 301 ^/nova/([^/]+)/vendordata.html$ /nova/$1/user/metadata.html redirectmatch 301 ^/nova/([^/]+)/vmstates.html$ /nova/$1/reference/vm-states.html redirectmatch 301 ^/nova/([^/]+)/wsgi.html$ /nova/$1/user/wsgi.html redirectmatch 301 ^/nova/([^/]+)/admin/arch.html$ /nova/$1/admin/architecture.html redirectmatch 301 ^/nova/([^/]+)/admin/adv-config.html$ /nova/$1/admin/index.html redirectmatch 301 ^/nova/([^/]+)/admin/configuration/schedulers.html$ /nova/$1/admin/scheduling.html redirectmatch 301 ^/nova/([^/]+)/admin/system-admin.html$ /nova/$1/admin/index.html redirectmatch 301 ^/nova/([^/]+)/admin/port_with_resource_request.html$ /nova/$1/admin/ports-with-resource-requests.html redirectmatch 301 ^/nova/([^/]+)/admin/manage-users.html$ /nova/$1/admin/architecture.html redirectmatch 301 ^/nova/([^/]+)/admin/mitigation-for-Intel-MDS-security-flaws.html /nova/$1/admin/cpu-models.html redirectmatch 301 ^/nova/([^/]+)/contributor/api-2.html$ /nova/$1/contributor/api.html redirectmatch 301 ^/nova/([^/]+)/admin/security-groups.html /nova/$1/user/security-groups.html ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.2096074 nova-32.0.0/doc/source/_static/0000775000175000017500000000000000000000000016255 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/_static/feature-matrix.css0000664000175000017500000000100400000000000021717 0ustar00zuulzuul00000000000000.fm_maturity_complete, .fm_impl_complete { color: rgb(0, 120, 0); font-weight: normal; } .fm_maturity_deprecated, .fm_impl_missing { color: rgb(120, 0, 0); font-weight: normal; } .fm_maturity_experimental, .fm_impl_partial { color: rgb(170, 170, 0); font-weight: normal; } .fm_maturity_incomplete, .fm_impl_unknown { color: rgb(170, 170, 170); font-weight: normal; } .fm_impl_summary { font-size: 2em; } .fm_cli { font-family: monospace; background-color: #F5F5F5; } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.2176075 nova-32.0.0/doc/source/_static/images/0000775000175000017500000000000000000000000017522 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/_static/images/architecture.dia0000664000175000017500000001527200000000000022672 0ustar00zuulzuul00000000000000]ms7_V'F7 `V^Kmr]ȼHI9}~h[P"kV%eiF/]OWr 0tq-.|?/_}O,Z}fO^fa=,Wf>6WW/\L6fL/LuZ^/.^4W\w/Wg'/_z6{\N߬mͯtuWYd݃KG57WE˯f3]Z7~_ۗ*hs5Y]e7ƻ$/neqo+n~\q㊛_[6lPr>,dUǂwe,L]wz/WǍ[boJw?[̧m~vS??Pm".O[b~B^Noo.{Ԫ߿nׅi>aɇ|ܪn~9ofo}&75=<4K=?{ytO}=Y|$/f_{w?jq+nzYc bJ#Uy)ys_*r>mD̲l " 7lih3uj ;o}:^>_)JkXd8{PC HSd.nG YNz0k6/daK׫jqZM/|w~"onOj00]\_0uckw.WY.6w-&fv^O΋|۶oge9O6ϛɺmOC& #HFRHct&#p"9 wKe wDJ ODU;#G)du7mZv8}\\\o+E'? : G@t*Z$"I.K 0ʪS8e h B" bGz|hZH5w[I`q|D H=jzG嫿/b8cیR7*YjzoOAݵwvŬ0S;.8F͒{-cJa츏%Tvi:ʎwbǩv2* b]v@q#3d+;~L5;v!xE)ʱܕJH褰)ZNJ+=Vz܉3rnCH1 46. vjƒruR)`(rϑ2{wv-A` %vTY n@#QuZg#Bd3(EVV*GG*yP< 8gr !h(%$LP|X*b %вLSex# (V'9 `q-_B՗W_^}n{$ ! "nɐg[>[8 =R dFK~ejλw&/rdʑ#w ל,@u92s*}M! GH話];hrpPy#-(WE捣VH՘/2 1;±Y]XO׶mG,Ze-eWz8Ki6r]'!0Q) !oԵ9BMձcsv(H}foC]CH!Y'yiG&aH]vT!T1Rɻ($͑4ᰓ >ÄzHUOE]*ܘLL=Z maY.4V6x`aaZ>ꍒjq}!l ^ ed ʑAm/yc>s. V] CJ<^w kĺn|dmLY ls ΋:]G;cD8BQ\lh8M[LE;7LQS?US>L}:|D4I_T34Kѹ" ȍNbه>PK 4As4Sl.-p̹ěYUd" m{cGfD.ӏl?ѳ?Nލ8vGsoQOr&MUc@L `̺!LDz wJ--7M@cПDyQ: "Q40)XB.r9aZSBH"zVQ9J0$FN"UADDNDY@*#gmaouN#g=P"ϺDW \DńI>d myv Ѻrm:Tu>3JI78DT8[X?q>im"xkS?83o_6+g>&Ec21,󥹚ד^O j׵Dkkwd}H}]]dRh1 *wU+w p׀B\ʈco,}vGVzLڮ_OhKF=̱f:;E2'TX$3v%a̱nK}JeO5Zy=x~R&WLX[J#'oj8i&%~ɣKkR" xV/K -d%`YQ%!:ꨞʶ9b]De* di*aNn;]/TutZj#4u<zGla$"0 GCB~rfFN.jl}8|"(ffn˝䃫S0 #{%EInM:K)Ga{X]L)I&DIOHvHcӷFG7k]ȾrϘF.J[+{PF A9`%q䇯nylSvvr V[}n5@[8vv$fp!y3[1}Tz0䙛 }>Ieڡˏ606^2KKtv"KYϝ +AF{)rΏ Du 2b(LҔUr&%g J ?A'@ Y#KIȣصYډ×P d%J;GTJʉ!=$Ljll "7c=(IA'$$'ɡ+bvCvת`k IljBvy$%T$YIn$"'r#ouqCJ;<,"dOkLQk@.D! RYz99 Qzc:P|X)B &І@S(ªɤH 3^eg,+b Cp+wc*Kl͈.Afc$C&L*m}Z+Ǖ8hr渆CHzlNA×PidJlnǺ4 KB&GoxqRiph2>2*WLŮϢJIH nER"u{͢j}mI[QT(N{M {I%YaF7ξp.Tз=gSKK?/Ko~/KgJb$Ti۪%biNlʆ&@(yFcdj7)|󅶰=Ҹ~,Ԅ1Ea=b݈}XOsnr)r'zv֒q"Tφ!^,[z=TܘTT@lM3i;clCԕ)tIA"Kija+_Ц"<ΠB Mv W9[@L&LQ(Z " "=T=+)Rlr0ݎEm/\ RKftP4I4[00R]C>W.0I P._# !lI &O ?r лcVҰ"˱42 1c4F.cJwǡVt,L$Xhr*o3t&ʍPHá|8@#/VG(0;,1eAPٱceʎ;ft# PU.q3#u@ٱ'Rpq|d{k&`/=a"D l\$S:Blc_ ~RYCU!jaPS}Y8 ܇trA ׫u^G,dND:LSF~6\.ZܟtpQK0ESPC9ǜI97 CL_I)@iK_:W:|҇  AJLoaaL[Uxv=6,+aRj l01tR+GMQ˜j.MgɡU/Q/el@$WSzD6ix ҄ D l{̇fWɕcP Q 9 "}nD$fȱ* 8Cbl=i}6 A =IU 9m y,L|c$ !ª^Gƍ Ǝ sh$|VPCPvpd+#ٚ`$$OcG$ P \78z7BGoUP 'e2Xyz_y28g0YcVx+01fי{&l}֛2MCT[ EGuL^Y1N@0{vVh8zsHٵb솃!}ds;Rv& 'sRݵ>폹-K ΨRS?wrq}b=TEH 4p∝!zv*(]Q%-cLdfx>crO1<@|`n}"Y'9ɣtpQBJ?仭KERjH~0!y!uGmS9([IKhH(|f\($Vm:9`&`'}}ѻeFaZ4TJ Hӳ#w*d^DMS?^x;rVޗ3+ŒU+LI ꭋۀWƪ T'~@N(t͛jrz);././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/_static/images/architecture.svg0000664000175000017500000013026000000000000022727 0ustar00zuulzuul00000000000000 image/svg+xml oslo.messaging DB HTTP Nova service External service API Conductor API API Conductor Conductor Scheduler Scheduler Scheduler DB Compute Compute Compute Keystone Glance & Cinder Hypervisor Neutron Placement ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/_static/images/attach_volume.dia0000664000175000017500000000542000000000000023035 0ustar00zuulzuul00000000000000seqdiag { user; nova-api; nova-conductor; nova-compute; libvirt-driver; os-brick; cinder-api; edge_length = 300; span_height = 16; activation = none; default_note_color = white; user -> nova-api [label = "POST /servers/{server_id}/os-volume_attachments"]; nova-api -> nova-compute [label = "RPC call reserve_block_device_name"]; nova-compute -> nova-compute [label = "instance.uuid lock"]; nova-compute ->> nova-conductor [label = "bdm.create"]; nova-compute <<- nova-conductor [label = "return BlockDeviceMapping"]; nova-compute -> libvirt-driver [label = "get_device_name_for_instance"]; nova-compute <- libvirt-driver [label = "Return get_device_name_for_instance"]; nova-api <- nova-compute [label = "Return reserve_block_device_name"]; nova-api -> cinder-api [label = "POST /v3/{project_id}/attachments"]; nova-api <- cinder-api [label = "Return HTTP 200 (without connection_info)"]; nova-api ->> nova-compute [label = "RPC cast attach_volume"]; user <- nova-api [label = "Return HTTP 200 (includes device_name)"]; nova-compute -> nova-compute [label = "instance.uuid lock"]; nova-compute -> os-brick [label = "cinder_backend.uuid lock"]; nova-compute -> cinder-api [label = "PUT /v3/{project_id}/attachments/{attachment_id}"]; nova-compute <- cinder-api [label = "Return HTTP 200 (includes connection_info)"]; nova-compute -> libvirt-driver [label = "attach_volume"]; libvirt-driver -> os-brick [label = "connect_volume"]; os-brick -> os-brick [label = "connect_volume lock"]; libvirt-driver <- os-brick; libvirt-driver -> libvirt-driver [label = "guest.attach_device"]; libvirt-driver -> libvirt-driver [label = "_build_device_metadata"]; libvirt-driver ->> nova-conductor [label = "instance.save"]; nova-compute <- libvirt-driver [label = "Return attach_volume"]; nova-compute ->> nova-conductor [label = "bdm.save"]; nova-compute -> cinder-api [label = "POST /v3/{project_id}/attachments/{attachment_id}/action (os-complete)"]; nova-compute <- cinder-api [label = "Return HTTP 200"]; } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/_static/images/attach_volume.svg0000664000175000017500000004236600000000000023111 0ustar00zuulzuul00000000000000 blockdiag seqdiag { user; nova-api; nova-conductor; nova-compute; libvirt-driver; os-brick; cinder-api; edge_length = 300; span_height = 16; activation = none; default_note_color = white; user -> nova-api [label = "POST /servers/{server_id}/os-volume_attachments"]; nova-api -> nova-compute [label = "RPC call reserve_block_device_name"]; nova-compute -> nova-compute [label = "instance.uuid lock"]; nova-compute ->> nova-conductor [label = "bdm.create"]; nova-compute <<- nova-conductor [label = "return BlockDeviceMapping"]; nova-compute -> libvirt-driver [label = "get_device_name_for_instance"]; nova-compute <- libvirt-driver [label = "Return get_device_name_for_instance"]; nova-api <- nova-compute [label = "Return reserve_block_device_name"]; nova-api -> cinder-api [label = "POST /v3/{project_id}/attachments"]; nova-api <- cinder-api [label = "Return HTTP 200 (without connection_info)"]; nova-api ->> nova-compute [label = "RPC cast attach_volume"]; user <- nova-api [label = "Return HTTP 200 (includes device_name)"]; nova-compute -> nova-compute [label = "instance.uuid lock"]; nova-compute -> os-brick [label = "cinder_backend.uuid lock"]; nova-compute -> cinder-api [label = "PUT /v3/{project_id}/attachments/{attachment_id}"]; nova-compute <- cinder-api [label = "Return HTTP 200 (includes connection_info)"]; nova-compute -> libvirt-driver [label = "attach_volume"]; libvirt-driver -> os-brick [label = "connect_volume"]; os-brick -> os-brick [label = "connect_volume lock"]; libvirt-driver <- os-brick; libvirt-driver -> libvirt-driver [label = "guest.attach_device"]; libvirt-driver -> libvirt-driver [label = "_build_device_metadata"]; libvirt-driver ->> nova-conductor [label = "instance.save"]; nova-compute <- libvirt-driver [label = "Return attach_volume"]; nova-compute ->> nova-conductor [label = "bdm.save"]; nova-compute -> cinder-api [label = "POST /v3/{project_id}/attachments/{attachment_id}/action (os-complete)"]; nova-compute <- cinder-api [label = "Return HTTP 200"]; } user nova-api nova-conductor nova-compute libvirt-driver os-brick cinder-api POST /servers/{server_id}/os-volume_attachment s RPC call reserve_block_device_name instance.uuid lock bdm.create return BlockDeviceMapping get_device_name_for_instance Return get_device_name_for_instance Return reserve_block_device_name POST /v3/{project_id}/attachments Return HTTP 200 (without connection_info) RPC cast attach_volume Return HTTP 200 (includes device_name) instance.uuid lock cinder_backend.uuid lock PUT /v3/{project_id}/attachments/{attachment_id} Return HTTP 200 (includes connection_info) attach_volume connect_volume connect_volume lock guest.attach_device _build_device_metadata instance.save Return attach_volume bdm.save POST /v3/{project_id}/attachments/{attachment_id}/action (os-complete) Return HTTP 200 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/_static/images/create-vm-states.dia0000664000175000017500000000177300000000000023375 0ustar00zuulzuul00000000000000seqdiag { edge_length = 250; span_height = 40; node_width=200; default_note_color = lightblue; // Use note (put note on rightside) api [label="Compute.api"]; manager [label="Compute.manager"]; api -> manager [label = "create_db_entry_for_new_instance", note = "VM: Building Task: Scheduling Power: No State"]; manager -> manager [label="_start_building", note ="VM: Building Task: None"]; manager -> manager [label="_allocate_network", note ="VM: Building Task: Networking"]; manager -> manager [label="_prep_block_device", note ="VM: Building Task: Block_Device_Mapping"]; manager -> manager [label="_spawn", note ="VM: Building Task: Spawning"]; api <-- manager [note ="VM: Active Task: None"]; } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/_static/images/create-vm-states.svg0000664000175000017500000002420400000000000023431 0ustar00zuulzuul00000000000000 blockdiag seqdiag { edge_length = 250; span_height = 40; node_width=200; default_note_color = lightblue; // Use note (put note on rightside) api [label="Compute.api"]; manager [label="Compute.manager"]; api -> manager [label = "create_db_entry_for_new_instance", note = "VM: Building Task: Scheduling Power: No State"]; manager -> manager [label="_start_building", note ="VM: Building Task: None"]; manager -> manager [label="_allocate_network", note ="VM: Building Task: Networking"]; manager -> manager [label="_prep_block_device", note ="VM: Building Task: Block_Device_Mapping"]; manager -> manager [label="_spawn", note ="VM: Building Task: Spawning"]; api <-- manager [note ="VM: Active Task: None"]; } Compute.api Compute.manager VM: Building Task: Scheduling Power: No State VM: Building Task: None VM: Building Task: Networking VM: Building Task: Block_Device_Mapping VM: Building Task: Spawning VM: Active Task: None create_db_entry_for_new_instance _start_building _allocate_network _prep_block_device _spawn ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/_static/images/evolution-of-api.png0000664000175000017500000007316500000000000023441 0ustar00zuulzuul00000000000000PNG  IHDRsRGBgAMA a pHYsodv IDATx^ۓə]IÒ 9]m01RkQfHDʻ<6%Y  ipA C7;vְatэ>O̬;Whb7˓cʼn3 6A@Y 4[sSI(@;ެ}SkbmOn_d 줭-^; P41ԛ;&6?~rthb7{s 66} M pgݓ¶-^;{D&`^D&iOmmfqM G/ҖncA@1v+v%RP{e;Z8 A@/Җ'w֓0hi WN.&.`A@)Xm7xM xَ&Qm%`$A@dnչhbhg<ہmܿM M-;2bA@xrn;GWO?[Oh&&-msv2q #oz{[uh/ @gvʼn3rll~o&@CA@rَ:uq M S=kg7̠Y!dَb?57 M Wv'/2a!pَxJ1414/Ql'N'yCA@CDz-mmAR-hbh}Z-88&пe;Z•SB'&pI8(j 'Ro˓c;Ii>9 `َp⽤@PPewLuM av49bo+)T414&:Hj y:uayr̓X=ZI zQ+ m=͏՘)41t 638~&XBؖncE_A@B,Gs"•Shb@W8~&]wwgQ1qS=M 0BmMOzb -\9x/9 pot1\l㽉LCנᴐJyu)OBa39bo+9hb"On_d4ȚxArM@jXīQr@G*1OagMq& Z'MGWO33C6g'+ J&`l0r뼸ؒJ&#`lXgkɅ@D0V?ɥ Uĕ8F'/"ZP7BZspzOnM%4@0#"Vng81EQA$o,mMM_ZvSJrC Fý{M<~UO7`Xm~6F yԅGWO{BF֦/$7ȃ&?[8 lƹ>y@tg0aX1M MhO 3JxiL@@}1"&ڵ+{ P/?}&_aXp'@&Js|t7ƹ_xkb{M/G:q>M.hbs" ]˜'[B !I.'}Q;z0U' @ =Jxmf51τ$=?<9+W>?[{͏ǂhCd"oohWq$PlyATý-ڲWĕdA>kiy-=$fj41@UXxob9ðb[DOV޾K7m/Q 410a5{{Mq-孷i~>aU̕<,[8#ޫCM 08V6g'ă$֦xX:-}gc#>Yq]8%O^wֽRa0TQ hb~<63@a6?~R'?ՁeQ1lX_McSM KD+c5lzSi7/&ʰymlXLG =M Yd %*.^{ө}S ; waՃz s>m=<:H;2yed˓c=M 6LUvb ʧ<:H'?,gv񓢪sz ,LUT\3QlX?lkNg85N6t]G<:`{@+s[u^=/llܿƪlz?9QOjv^ٰڣ ! `z'Ri/>!Q۶qhb Rm?WYOGE{ezhkjo+hbiw֟)?N}Evzezeg+hbEf25~oOSi?DLyezezv7hb!Xqa<'>IՁ?eXz+[zzAA@9ښZ k'l7>ucwXfe٦߼VcNI侻x+ֽ M M`{Fҍ~>aO۾Ͼ+r2}+ֽmܿi0 V֦/-\95-l]'?>i~Opo=1T&#~o簑5U|7En>YIszn$hb+z0H`7_zS7}]丿R,0`P3.ay{t^k?{_WnJui+뺅&pa +~Ɠo|#["ʀuipoKG0hb: ư&y<;yBWZ~p_b$ 41T cX {r{Ƨ?•.mmzp@@`0uc'VtM C%V°>}s_/>a͂]-^;^``mmNfk0 뇭L~ow^x>_|C"'}q?Obvq'+?i"hb˳On_ `O/, G |,uYON_msv2n6hb;;30UV^z0:lm>ES7DΫ/!lyr,zhb# wVn10̷;ɳ*z^y^ai~8<1g6N/Oy~ 0̶[&T41%0 +c>JP9t ck;g(T 41tJbG /o.[xV =ðM_JP1˳SH٣3[Z~0~pGf/9a}yU6g'+0{Ăv08qF<@@@)s|knj^h_U6,߼78yiG 1Sp`XǶpQwJ7Lei&JdM -8ZYZV/N&zO}c޺ɉ690LS[w'\rL6ޛHJ&(|?gL(Z]'G<[`_䇯Nx13auֻ[sr-!$hb dOzbgE~=!C'>Ϛ5V3]zC~G?e:&=^gޙO556WMxcѠv,8 ƈ{uPƺ7=U5=~Uqpfz7>?d5'֠j1pW,'G_= q-ũG 8F;BXmN&NjYY?g=C{Yc9 =3X%yGZdq%jJ 6-*g?'d{h|EByYcelƹX4-p_^o(ibe?' M Pu5aOʞ,>{G$cD ibU\ZnVծ;+LvN]H|AAT!6__r{b3 iٞ8ĉfD_Ś8 19qJB>&lMye[i.i hbj!*>d˟hwYCGA}S4":،v(Įz+TrYɚv_gsPHƺ-\9?! $4%O\zJppnF.&B=}{bU3X>[k~q۵_䨉άxb@j$a`K6$&4B N]Xe|Wd}̓%MiC||tPhq='rt7yVWz Q-rҍsh$hbqNp=@vwkپO1G{f,iF75 /XX-5q[Or441 `pm˻K૟oN}c{X J X8?]eY& MIl_Q'Zcsv2IM _aISKc|=,ON|^Y<ۼЯܾ(Ztؔ_;&ij&NFD6" W^vvm)lJAX<9z|t LL%-NY{$~/^ M KA c8=&:BzwVf 75=cOL 'A.?r 0bM !P< F:X>Ͷ }l)}%s=[NHzblƑήzc̺AZj# [\3rَ?G>=PX.:+ =*j41`sv0m~⹗rdhbJZoreEsV=dF2Czcv&[Co..Tjm@)s̕{Pe%VX km֌ 1VŠ&`E:LZ{"d"Zeh9jTF M %qI Y)wJ&_zs~(UI4,T R)6k[`UuC[sSp]W ڕUlbye,| v8.f\T%cM #ͳ1Oac6Mݞ)TCkTԒEP"b5`Iftr欨* m '4QZrɫ4rEe_LNc:He]ReZ0XB, Wמ^\#r,鼒aD9YrQGWO^+ 9'B6'Oun&R3C70׃Q*A"/"cw k8Qd'(FʟAj\Ƀ۾4$'ʇ ۡ0+~ap[nĎ w.Iqv5kI`\K+,a@ȡ0``t:=[TYI۠i5Vymx1^8|rF4n*h FK^i⢕}q&3'i\ Zh#y)Khvv, O7$8+vܶK D׌enAh<ѱ!Ao-\Gisj\u)JEF<5VҾPVȓa @d}b CF4S\jWDmm'Gˉ[@ހY*+D E09jnj~ڴQ#jfz&;/VHV41 ;+{ kw94bNCB6 Vm5n XN;al;xԒ4·_5\K)-aAC%ޛ`DMթ ըX2SkLȴ3RUed7B!MrqZMMV*&QїACYiOBaͰ1lm>9كę hD *%lS&ҭӮ4}3\&M m&r`13-kTr4qLhazi?%_›0}ifM #M ͳɱ&N5%֗2ь7S$RUqA4,K&zp&Nz]m}ӄM"kX$M M ́0=Z5qV.8A+T1_颉⠉  I+Fpԓ][G:LHY`^zˡ:r;_.;!yhb= w,QS[vv{sɹm-yC -Hc3vwJui M 5`k;z2 鹄w֓  |:tܢU:}U~Q.ZuZaMy.M D,q:3Kj}Yfĕz[IsWC?54-sx~v?~<[_v֓\X5M|00:et}wEjǑJ{n"N艇=ՅUЖnJP[\JOhqi~ Ƒ=Ǒ6S 6am J&ڰ<8q^Xly9P#*n,<;5z5ū5٘i[&p6}ɓ_XEl~F+kOxⲚzf']r_2+M-AC]e •SEegArX]-ĥ;W.(-M.]yFGu)I`lRKpm.]LO0PQN/\9 2lXlP9wI)|:G@AC؜d%ęo+ 'zh W^^}q{sƄ_{wo!ab_J M ç 5/$W^W>+j+dU:6hiriwe&(7}liW`alo+/.KrR<>>N~PԪHA}%#CaCHzblƑήDQ]R)l+fv% M än|pg]g$wOSr^^;»˳C^y*w>`!>s\*ejI4{z !?I퉱c;Y,6N]6]Y̔EȔ9=(FѲQ+uT3hb{[}=,q_맿7_G}7|Ͻ?^_*ɯWn6m Ć>@G?% %ůZ9\z?yaEL*HlmQL˜Wb4q]ו,veŷ!WrXTQ۹ :.41 ~=h!>>>?;M7g~;BOUH^}Oj&Kc%Qy,)ReRo"\%4vxp;C_U,|מ޲Pg0PV.xX 8>~79q?xgGG"Vg{ΫCT 5hTǵB*\ )oPv`F_Oj/W3M( *Ibrv(̊vܖ f=[z]g7D̗?LJ{⵳f 5 C=lwkZK>N'yK?w{^1 pp+@c@CZAۗ/Iv<=>:wֽ,k !&&pz(6UٞlS/?j/Å*mڂuoXM/Åzl_hi'uo?X-^;C+41㣃oznW8::|O_~n;O7vO]_Xyz+~9:|LLu0JãӐ[bm3e9= $36(ҡ[tGd:F #}2}sRݚN7!d6Z5{nl?65?e~IL4NQ)4:jWьqpuB+[o>b(W rhq-Io^B{OEiMh)T-MV^xeƘr.#ؕt:h}GIgϭU ZF2DN"ZPA[0tM񒗴'/%RTZKKdg w^;N: U cp.NWeKD/ʌr5 /<OmjL`,r'&hb(v%b&K; w<),⽵w;b"XȞE#MbLZi,!Z鰬gT"%%CvŖ5cMƏ&ܦ}&aJQZr#$:M ax.F4ޖ'ǶO\>ؑ};(}JnjZ0ZJvmiDt&Lx~ / ԧ*d,;)i]&{[By*6?~R*o,%5PSdgԩm٩ZHĔ0M>32~~x;pk<]1D>8"ym,N$Sω_T41<[Z0=Ͱ7.((`9@ R%%Ԯ- O}ɨHc.|\LAQ?_)q[Ɂ;qBzNEAn!k=~Uh6hbX'G&0"ArW=E8px=`6 M rNmzxM hQgg^3f Җncn5MZIjF4(rt㜧GbAfA{s#;xqAR 4h!'GĖ'Ƕ$&hQ`mfS ?o| yΫ:lf)`tA#$ %ί/& /^MnZ`ɜA ii_86V-rrZfC& y3"hh&<;:SLM_:YO '8&4c,čeᴧjXX+Hn M[.!(,ɑӖ/Srʁ&= wvd̙a< ,f`P$kD3JW&(elӼn)5/N]៸f&<:4U݉r*ٺCbZNIN 0 '%`On_D kdc:ZU6ķ)HTZq&;QPd)m}9+KRSܽJE*;_sj@xf@W+8N||ZA9qs;v$hSA\s'yH!hɸp'qc~r4DSY{ @PL,_0tL2^r{*<͏n8 ϐ/Y~Zdۍ/r#;UxGx%?i}UV4+-mT;͍x; 4y6_ A\QX3(C>rJP-^0 Q%_ԯhɮ<4u&]j nn> b1ZgUZ`Wd($vb\&0eLM)VRՌK^=&u|9[עdܶ8P=O+ [χջuh%;F`'׫P[}7=U:,?9; z@Rjk]JuNrl75ɿ[+0U6?Be6(wP",IfB9׋,kpgĊ_J0S.te.v1[zK=XYH(3-[eHN_)f _,;2@oqt%l.C݇2Jt]BfɤX܃"_7~'VHB&n(2gZ ,)qjCJYk18yNW{[`^ls,NbG:v .H0³61@:YDBEŸޮPl !=En!Mr^Thb41M!3Ή8m{WDý+/{6?~rsvR Ě$5j*e? .O8V4kWg:g^X6Sf;;\ᔞ޵&|㧇 ӬL.lm{m'>@>7ya8U.[ICӋ +7 bZRek"XȎCnSYdskA5t!MgK'ԍxUM6{sųF+Dp~ϊ<m7#EѾjk@:MuәmT♥ HI.Ћl.6tKn ̻-H9OV\JV }\vbEXϖ^OB M;iWW?)}>=XϤ]Pɶ![S-^65Ԡ'rKZ Ug[N(~x*dbb%g;f? /X\MZaޏ<.qk$jX_iv@8E5)7BGlJf@ҍsr͛и"vL.ylkԭlRN_5*-i\ A:ڻe|J)D,MLw9˖ɁgLыS{rku;Y'e d:փa9 1CWNaK:v bqQyF-v[K$).7BU&n'/z5h}SۋH<߳9]7X U @(5^B8k"JkEKjnųtcʹ`WҦuD:Ξ[* d9a2=heԕ<ܹp9^T-&=;I*\94V/H3pJc|:eps(m`M74n䈬H@:DL'}SyY}8qڼ:@I~kK<@Zz"kqST ڪkFu?_HApg})#y[•P^{I'Ϗ~h:^Lw"LSY^y4OY Ԗ&NDg-^O!TXvS~C2_*j4n6@)-uYOu7}ݓھOnmML?wag gֳk!SRW䯮"4 hOBOlFTX"#VYq4z{Gd Mew9c{toΙ``j/`a]8}a-cfѫ0ÒE@CAYlOLlK4E41@ظӲ=_bQmKfBHKj釛ⴾYt"yV LM]0i3CV\L#.70hb(oOn_d~ &cCT56SesHE$DF(K(p3Q83U8u S*&v8zt'gzgh2Xlm ^3G܎%x?\呿rn8@ ZL-sn5hbg#P`ߘ ˍtBbXB;&*&QJ#2? Y3x)@ƴ.SռYY&xMOvi˓cL0*MEm]ɇ OU(XB_s% &l9i7H,d!ԛ1ٰ'NƔz:~?h6RR4qIOvcwd1a7-vkÂU4l L]G y1јc`0]+ADocʯ*5# pr!RYPi >H#qc?Mc嬨y{uXb81uFV9OcM]~Og- *jnQr#/HX;҄$fl;PvFW?s#znl{vb:Nj\a@4YeZ}*˟$@K%#{UgY•C7,~upqPͺ]kQ /gݬ^{VĒKM_.^yqb9p@Eɵ+A#YKj)Cۦ3X3Qf  o7aN4_[) ySb\cuM9iۡNh=M†&z6PG" (B~b4@X՘$veHv[{pĜ$IQ'P*i/1CAvԹ.0ꚸk8oܿ8͐OQҶ&NѷJ.>ѥ #I!olұrW-bj5x-*#;5!YO\@}qJ'pZ5 }ͨs'B j\uSxԍ)q%;Z(oxP=FBw0jբPY"]ήlbZ:\'o/!m5QY|[&l*QDX࿥!DP"˼r9,⣉0wq4n^+(|Mթ O1їW}G[dbs)^L%֯(ER2G:E5q4ήp[tD`g*?R?N {b?h@?p; Fh. Bڶ;jbmfNM NN*_x4΃X޴,6*y:TV<[lyn8mJH[wh!rŢ&޶GO~fT;B5`q⌧zc6?~rg^+,+&,+9\?8ݨYGʦv9#e6 ܃GdG",/ge [v/ܽJS Mc57=3&!n8JrisJbpW%L R<ͪqvZ>76{eD Tf(#-b׮ӭU0'' eVlXbK9IPff>Fyv8}4Fijr۳:l;M%,m҉B6F.(&,3z?8z18 Sieu \[WcըF+d)N*414Fi[=YiZ4rfcN6c/h|5%jîJ:*U` e T8,NY [բ|MSzy%ǐ"KfPgwy سL^&)%&V)cYtmFBPOd^S+URI:+QK%_r"7LD܌-yR4h▟ r@c Yw1[L'6Ήhݲ(hTjD^qv#4--ZK;EdW'Ѭ+ֹpakC!wէӑ:/>C@"lM*@%gNvU;94Do7`)&?mod YNEXqm݌9mCf̕ ^6z~"UT+7*O}ލ/rtr\KnY*/[M: ~ء T ]0 :* 'NͻJVWԳ,ybA4Al|Z7XA,mhԵôRN"\ۢ[͕N(h#M{LE̴Omhb+ƒ+ 4} b&Ch O׹Ѻs ^"`͵')IZ3<+R=lq I4OkByg^i6C%N[k֔P`ơe= ?94]zpLZ$ĪW5EL$Z0qZ E%-0ph<½< !-bGtXx ã8iĻ˳IH p4eۆkKXR'4t. oaфB:exV u#TRoM||t8qSª(%LHCNT?=DOYɹj,ɒLϸzN, >cζu (p.GHX*x~RE/Wfo/8*8A0ꭉ7_԰ bBs{mz+~"%ԮQZBEԞ?c\_9vp:"nK%RN4%XS|fv)xh|22ۻColJ"ScM|nʩ$SpCPR,<MjjhsjPpdXWT|zͷ{\uW˿yzQ@ߨ&?lm>N1:>=uEϷ!׼/"T&\v*hͦ',0SrzJ<8eOW[6ԞNR*Lt2f8W/@ߩ&^qzC.VHi_FԤ_FG+N'NmKW.#N^Y.rG]~y~ҹݚUG*9_LU6IT4key3כj2Myn['G8D]+<,\PT*T80XjM'qՍjIm2<\hp Uvu"O({\/\P`7S;ovVTCU*1댸f3<{IE+b^@A*S?UUx#5ytnJIR'MlB%w$X~?Dj#C=LdsZl~J7 Mp``XC2mVEx[Shb!v;3E++3=%?Mh9q2-{,Am4 ޚJBѣIbq*u|܋l{Cc'lm rՁc v|A̕1^5"@ Չ' \ݠ"[*0Q-mya$8@f$ER*9j18drZ.'1ˋHG/ĺxdFꦵ$RWY+΢Iʝ<ϖNo}hL]ݘEŴZ5d2Ms*JO*mf8|Y&rRNdU:B+d,kT*r)MfqWXbܰb0c'?ć{[ WNR 7s-{L*M(uN=,ovvXrb۱q]K֏mE#&2w\v މpwţCh3w?i?`[jLvo.ĴRv=NNaW"vdjfƓQhq5qL4WU2IP?$?\hb'5U_CW@;>&QM &^Ǝ3` d+ MZ 3x&gכLR/v;M 'Yr!qUmhѿF5e§*#X*a~ڔ\^`4!Tѵq x{P!&-V:QR/] [K&&"7kY 5޲6 i^_0Χ4˺|Df} {zĄh=Fc=b1 czIKnŌhOf+kW/Hf<2n1pze|$fԇ'Mh?( c8Yu9PF }?s3m3Y xq|cAWZ%1f":L;vA|X&)^@̃&W dxaFBBGZUhb~"Kd"4hضґsc3n1 N%Nm>+0Kpr;ۅRa-\T1;x1c9p-ͮq֢0X|gN%,/΂뺊VH V MA69v\s 8d/!h(u=iݓb&${o`k"jhLk' f F>qETn||+_+<ܰ2䁓b\+ hb.p7iŐ^K]9nl[HgdHQIF^T33$ 3 ,tR2W m*Im EѬBg0Y$M Pab: } /ЙAG _yM Pa"fPt0/X ,# kDإ`2`p6u:Oͨ?F]?d`~]Yn`aeC7ҢU၃u bƀ& R_IY.")bQPƗ>s-Fdʒ D(~,D,*Յf3VFoRup `QbSq_Vxܖ p:FCBђ]u%z3 :g}o&RKu Q%E>76cu$ᙟpFv^ԑ:By\e A T@ejtO&#mCDVXG05ٖ –6衇M @: J"%7=rn/'0lCX@|{{e41F41:hbu0ꠉ`A&QM ƛWtOU.)mABGԡ byx<&q%Bc֐}ebzUAfsAfd\!9ҲbΉ೑t6ki;_Qቓ G,Jc]-g-IkVT,ǔs$t`pT7XY)NeK zZWKp#\TaYZ! :tngv)wKVT4njųQت@A砉F-y[[R8q"NXvb8t6+MO,`d /=41! uF TTbAZi'ŗᖊuVݜOA̭G)b1I4~4Gh.vk:M 0RztapvЉ[7܋P@̕&'K+ı7:h|41hcV3VU2 iDRI#c4i+X$G ?ϖH$R87SDD)4]{a["n`*щ ~0ɞs4mP*E0Q`&\g11<بH0_ضR6\6 o{AuWsx]c۶-$d;1{=fgY-Q|>RCCBЪh+D gO&˛%Hat59I&yVXS'N`r';ԛM޺q#14/S' `hpP |D KRm#h'Rf{|<~~OZd+< tpӹb^A4ʹD"dﱘ0=zqGŤIvoLY["2Mr E<%+Haw&[SS, Ttf|pa9J ^}-GvfmՙYbq.?GW&''Y(IBZ*r#CDhuv:KkkKs"@ gʪ&7ޞb'>2PЪh*e|tTɎ87ofFjl>sKnqٳ|3\ejvR%B01Y"*즨a\&9:E LLP{\~N#oD"ɣ .X*ƈ+ D#ezi\>/P#30OT$:4EEa`bmquf'oD"O,+Ih2 3ei`>avy`*#Aƭ؎#oD"Α/7!Oz=LNOj5J\"x`$rp'/U%=6Jr[T"dgtLϣA`J<fz(D4<$ט[X HaL޾MuH l"%ŵ7hX&C7{>4KjUD" k,Wd76rJɖͫWCŢ2#M$)LcR]fY.bFE$**GJQhfׯ|D %N=čí),ۖ)?t]Zğcxw2SU(j,e2RH$Rl qz: "()w;ܼ}[&J>qvsmM&}%a0,,H$R<B2ѣ<;!# J%i2n#cQõDz|6K8DQﭑ\EpBT]Gx<ܜ";y{lYXğG]m& \-Har͙i*$.֮_l)O8t:>{ss/,pI9J| L$3ݦj1=Le`_4D~+Had [UAbFfB5jB,JH'Z%dWVW+'NCcFfG2~E9QnJ>fhXTٳh$)LޛVlf 4vtbxs$;sN|2r 0@"XI=b.`xliKrרV;;E!=2L.%1x6J$]YF-,.S6%Jz,MD§Jn]\.#0(w>DT;mih=[6t:4 i4sgշVqx^/z0rV&J$RܛJJv@4+Uts#>yVz r7T*1XLrtMSZ!n)䉦Ө??7YEH ;F.wZ)ޑh_ZC&#ŽD"ɏ `~q  9-'1kQ¡/JַoP0gCa^6);a*p׻Mu=>ТD}sj%L$RK$R|q\L/}GHc2-qN-ORpחTz; 9n߾ͅem 0 EBuzϓ lSWp݉T KQ19O>)'Cܼuk9X ͪnBZ-BjdZ~exqI$Ur5 ]TQ !yԁɭF]RfE8|jT{&}'N`LDK299/Jq/Av 8 'gP16nWDrI\aSruMkNV[u?uA?ۄжmfBH0qy<{vli=zq]Eqgn}jxjEБH0Ԫ{&~9;XʨlRHJveyuE{u[\BxdS9Pdqu%#J]]H07uۻs"Wgq9[}L !p]{?µiVɾNH*ɲ,} iw:R?ҵ;d_rF*X{±J$.L,rZ^,FI._KJV/^oۺkRus_c^WԺ]yHKQ(ҒBGHxW'ƻ^u73e閿ԭ봘 ^yP#Q =`iyGpH ;Q5Mrk"}]\EhFHdmTp4+l M a1& ɳ PU:&h-^37 2; 8J!?=4XnG?-U,BÄChR "ig7G\{Z">=3ֱtjd"!OBjht;NelD35UA4|^/!Ϗ' B;"L kk@G:>۝%Wok:c,2u/XTP5 6'?ѣTKSYͽE8re'.q:$d0$(ҳk"nFin^[ =b/pOn]sr&G818LJQo( SI ac_nO1CO]|Lt$JuvK .]aqoNJO8CXgN_OCS)qd|\dGBPVɮ+q4@,J*x3/PuK0u]z.k+ngp`>8$/; K34 ¥VK\,Aս]MQF3;C\ר1#ۗt*ܭK ceuU }JvyG7s~~ <(D>E  x'x$^?Ñ5V Q;4'~,Y96FmR.8a K/Q o(--DcKǑG%[ƶm^|!H0LS@?uYko160=#NM~Ucx|73].-~л^RP~^{?fR%,pz.ȃ2-R{ىv!o:?t޶n|/K4a xq]b%:{Urq?=wM{<qqQJrEǏmN^/0qܹ=!ʷ-bV;zu3}L[iu:zb@80|>Jrt2%g}D\2ރ?M[9?'8%QP| Dܚr 0rľHV`4gyq뭷8y],T^A~mP""3ڷ Jj 8-O⪦ YY~,Z]^ ![%9&x8IX)Ũk8#Vr_l_c.J0"N;?=:ys]}l赻]HTZ*mN=9Br!WiB8RIapbJ4[p[%:?U.uV'OIN˛׮\' [V5 crv]{<0B(h.-DI fdMNRcy`<(QwDI =EU+F~%Nޠi?1w.൭B,ƛ30 ~ba_2)W{(J'~jtDۆedĺp *D2GyzDI&(v#%WoܠbI'D@* C nK%GT{е*CG-N?5F?FK)7؎-{c6z]n<(YEg>?T:e9ܒ,ׯSv=G˃(DC!(8>ce(.aHSOKL7SχjuR{ N=n Lsǭ|IZ(JOq3mt=긼B_:-duy:`4B0E4FG0q W(|EXNQh:cD;(( Ϸ]N{|ڪ_@p"}O,`;ξ*Aa,+*㏝9d`KK;_vzU2D4!jtK %'~\5LӔҳ,ߺEWC??>J6Kl9Ȏ-{[ƴ-bm+ o^| <Gn)@8DZ2vm#;&Jo (V\]t(Y(Ɠ} qiluU G;HB f<"R̿I[`Lt[i8JZ% J=d!jv#H2D@_- bcG2{7VhDn IDAT|4mz׋@ސdQo6p6|LX\D3y}}tfxBDzc Pr"I/| !Pw.buTnQo8x{b R*+})({(Dh1rn٧[&algi%(i]Tj" 72}}rbCKetom>-!0 3tȷ3yQ?x{=  NLO1w&g~Q/.S,7 >lpXYKKy,qzC"L\L.J#_\ıc:mItMZah,X9^E>RCD9_0HRdžcSmcT._'lK$xK=NNO!HOFq6j$ Bx~u4 XO4BeS*6g|}?uץnKarHBp{jm186?U'YQ_L n b)) Nl>O:">ޒ0i68Blku .;${RnW =Bӥiۺm:0wmr H$O$v@7Hap Jcg"x(bV38즠Z_y@b'<ѡm%oP,qa`gQՈcÇrlH|佷u*s lA>R1c{~ʪO=0ܒ0)U+h CۖE yT`uQrR;'TCTpX:{z`L7|/9}U:*0@>Epa)sdz2P[SOMo\~;xTbDb۾5]1L*" ՝nFM4aMt:GuC,6m7Mkk6rJ(YXʲP^q]m(z؂k֖:ç.}P.b/0|`(5VRg!y> G^EKLXD {fwECxk#uoЖy&vrHrd%9C=Mѷ8~| M;DLNj1G<8F.=ПOV-CfU[h)x÷ r !F훻'VˢݞmZIYVKa9KSl44W Ggg7_3^<Duq&"N ~ôy;_gBp5fnw?8.Bd@4CZW&oݤ$"j}eN(bB@> ם%Xc5PL*x¤TyRĵUT50~wXy@7 \U^Ka4 mK"@Ӓxo+ICFgZ]_e Vz}D*9|Rg.㿄~XVM[ޅ`J ցlRn!\}pD@z[\qfI&z~`ť%CmUvтj/ƁpW4S㱋jU oG/ZkXUF o^9ķ> i4x_Bmno>'=ae 3F湽 Ez>5,˒܂"rdo2R}@i yם#Zz`M%),.QTH>D䁄iht:D2k5y`WW#Z/7V$K7{QuQVO63ZypE6_y L~qWC1tG>n DD2s&>&CͶiw:D09P f%~,}01~Pt^]4s -lO^<3ss$"j@¤`]RK`v r@vc5v`WI.tWHђkς' jrbf=,p՗u[/!"3'0lu[E:5Wa TF#A$-{&Z@Uی>Qn#֗&73K\&Ln=(\Qu7P GYOv]v5"0QUrC:NH`ڙS'Nln+/qJ)±x!gkWŽS?@uc$rBۡotD6jhY\wk=H5< ؇(j59D^C3i|j9t^_9* xԝ37>L?Y$lr_wʯlUvU//\/'90ђ%a]m4nbDAܑ}BZ ¥\mr!\NfcVX\Vnf#hzPTڑ4=4N}}fX"ǾAAա7+(D^/mK41fDX{B!=i6MX`H5)<~M\E<<&~!lF/'X4 $ b:(Ds{ryK4 |}ݍڬ}7˙ cEݘ4ӡk8~7yqğ217 k5An?,; >o^k ީׄ}r2OuEݑ˹x=^LԤ"fewĤn(r1-՘ӵ;vӬc lǾgm!e?<֏٘dfk>SNyՅ/Y[BnеOHztOKUG eI 꺈 E4*&W^X8XS"'^tߥݭΆ+fy dw(غrA4UhJmVEB`ގ,eʶGLW?@DUש(F rg~ϋٸ[_ޮ#_Gט_LD肣OoT=_oY fgLg=KATT6@@z#lbUÇi^U5>'[vc`'96zşz8A|80z.B Ω&I>A kp4n( gčz@fQi;Z*QtTAwwo*phz]4\]|@Ws}Ӡ-L=*ԧX^Nk+B˶a噾Ԕ8}Э S7S:IV@ pu ,f^(3X(#bt(Op#M ޤ﫳:zt*%gGeBl6Q cOV¤BJrd 7/S\[= %),~ NBe'G?sln70y=|7(*#9at% r+@h}s/ JU>qr5?l1~SOXlD}K !0@c=/l)D|->,OۻxTcc; Lj5|^0+*+nZ%c~0`Unnе?zZ%p}>|Q}FgJ 25 E?j:O,|iCCvEFVP-\F< яUAS֙~'\/9Q$/xzquN^ 2PM /o0|^ڽ~N鶞a䐁PXcm]t O>~vk,du8\ď ٻ/#a++ w)q0-(̥3xQ11e*q.VKZۧ,Fwu[SO_O2pxvWbjɋxuW~w40640ug9j$k e} fIO{f\Q4TUG}[(fB|hlKtz5T-*"‰vzSg6&{R1&cO٧%a6yv 완P]U1SX;ˉ[莃a[ OJ#\@C-,W{@D>SC^e5?6I4=-VP7*~F/"ߦ5o35msdh^g{b<噊*Z-<V%ij p KFY6E,N1]L}xxF-14"@HdK|ӼfWK"תptnn3chz$.Th6Q}j.kRթfO@5||s4VC|?s  &c A6`|bJ;"Ov= QZt+ F1zI4hw~鹗vBp hZPho-EA pd9||qx4C2CyÍ49N6n?⣨*C&.#&7K BU@~cK A #nMDU8+ Feo9(:O8O[{O w|[_Z*p7ǸV.1jo*|6.%RPz,2N&B0HHsɘU~Z(Ptx"YVOCH$Lm`ZSqPv3g1<3OQ ęJHM`cM ;}M=`>w8I%I,5O _215%I9ӹ|-1%#oR,ci+ѱ{꽍M*"(^:l4yrcl4FIKWêon:l>v9 f.\6E(H.*2w0Lg2HB $ -I$]N ́p08oN.0 CǴl,ƒ:N'})ۥ0< ܿEEI IDATT}BA0X{jeۘl{}*@]8VZw8I2eS)\><_55# dpeEɁ~0%|^ΊaL) (ߦqmi헬(H>wQxO`Lf-&I /Wu2F۶\ޚm12k$+ 4]'L =G%J1 n}7!$ ccb?PT=,-IX{= L"'ЮO.X"[ΚG,2RC1b{ Jzd,[&|εi01nd4B]u N/LݕW?m*U۲6W/ Z8EH$^Z!I֜ж!6'HoH⪕a0dU%Ls՜ D,}# dGu[H <[QUҖElj/L22 }L*hZhL@zzߊ|fd8- UJ-4JF6X 3yt.>@1%u? "Wd+at:>ϲ+Xbw1i-NrI]"EBui&Y+|2,arȃ۹AiDF1͢yP*&,Ҧ QT <fD[@*E&'皤it] >^#$&NRZz IZ:Zu:c]5:ɀ$!-sReZ|za/Vdy}MmHccXV zXV^}1, EYm@8 oA#V4ӌ~D4ŶKW ,VM#O 9IӘ^ W>īO[(N1ɓAbqJK/!K;=nfq,vpwWL92m̈PhO,H8ccv% !EA q(4IJ,edLl.FWԡ(uP4SD"D\it&sLfgD"_\+Y1w7{PL0Q,[`yoɀeaY6k^;Iln3L}D"uc$-cO`3#HLT$Hܛ1l }Nme}-DIM$YSw.tH\`8b @A|,c}Դ%$&p1݃ɲ-|t=[h_L+ynSܯϒԑwKar͉>"7CI65fPրɚFr6if&Y 6я_Xpa׮`?[[tK^%El&f:aڔ e5_= @  j~3p=熻I[ϣJ0'8JIQ5p ݃hMVLzٙA)t !5ϘdY1 +L6l(Gr{oaL8?{Ӥ_/ō0[I.ɉ7ZE]TVT!w4^L,zCGm~QW傤3idE3et{EU-lԐrG- Jk֥q!GN(W B[j2Y0qq6WZ]-{t:*̜gMdYB]C9RرyyjQ>D 4<`QL3aT4 FNrn]guͿJ^m ŏh73/W϶} ~M^dU>F@,\%5MPp my'uƣv񯈧ӴU$M…u CF)G_9&V1 9q6e2 yf2 _;TפS*o}{ MC] Ty!}n9BaR!o&KP:oMtڤCN,$/)2}3q4M YY=L4Ճ&٤aJEQ .:U_%9ss?C;̛ZOnUmt05oYK(Yimh9]xUA0ƙߍSN0p0rw`{"SBšy~hT0Hfp.C$EQINH2Z9x/[-ɮ'!>x}p)Wmãz(h K\pMQz2ͩB9"p9%17ž$y܋1L3K852sgPR^izYJM#V9ˮf:vP9w~C4b?$SQ FbT/6k4$yMǹ6|{)Gdx}uEdZ us/oPUAR$v3kԶO9ifӦTw ;yQtr+3<ږFNЍ$ [H9ʘxuNg@!E4&Wzޡ@~Ǒ]L]%ϜGT\6ZP|#4AWd$ w>ɵO8=[4UoZQAQW^V߯먺G[2s3DR[Px[YVC4v. ږoSXrx d7`3^ AsoRlr$ Ytl)対pHhv>$4#l,iU,]Ε>+}' a6zQ$AiI)jJ{Ov;q߰YGR- eM-Ӓt>7ȿӳ^/CExyg6TbqKk p줗MijᅣtK?;ڹ9M4us:d5D?DndIcKS6+Ȋey»o.eST$ ~j@R3IvP\u +6(%VU9Ĵm^© [:; s۶:XIR)*xr.t||;Iq,Upۨ-z먎BYF+Bq%uԡ9^؅/k78u,B6NNR7rL@Ws?`Te_un*NBeTs'8! s'"ŷ./ݮʤLD /Q$-]T5W+D- ˵恉:Wu JMZwQT_ob ڣTda%Pon3#Xu)$W8ڐ(75,3/:mnW6G7 V~}N)L6*Gbfձ3!l٩i,!1(..Ǘk655ŋuA} Ź {p02Zm6+]qEz7Ԓ<{r&''222O?O:::(,,'Hqx sU;pXsF)o( |%N{vWP]Sd>x( lKWc4sQ_t>K4yC"PT$f=fF/Őari>9}WR}< L')Ԕ:bv.CՊl%9||*Cqq[=|VY'N;mmmݻgy]v E,kBEaDdG1ξFq]Q}j|G|ޡ ,K.,瞣ʗk?etډbPܱm6utճ٧) 48ewy??%PYYISSv-[PXXa?Ӱ$6Fќ֖d,er066M6~{1:;;;IsQ706cRb1KPwGa`dP+W 5tRyיr7w #>b|‹Ļo3nك8?{PYVY$<ÎcN7Q][Ihۣlj'. +MC/1>Aӵ=}_|)**{w^#$͎TuˣQ# sdSl /M1v=m[ΌSV_=[V IDAT1tzѷ$db7 3gpCOd2`W!, Ӵo:&F0$gi=~BY!Jڋ{xU}/f6rr~q8OΈU J|'.2<$X}OMA&cMEmbX3gꫯxj뿶2m19¡ K ULD6/+!9B^4S*VР@?8ڶͥKn+;PRR®]kjd /|Fq?f/*z ożegTMo4IGru{SXBvqb7ۄ&cQmTW"J\;!7좼T3NEӣH:TIYI~-*.+bF T~yaL&L j] yUvn'~xu;vZFQ:ġCBl۶Vyr ;Y.ɯ]r% ^g욁݂YAm3:zٙi+ݦR)&o]/;^?~3g'{_eq~n藨.zZ}~/ !1._1<`&\RcWu!I:%lсfPN梳 LS/#/?H=ztѿwvm$|.6r,-rT:$$'p@V$+6a#<+vAqff4ǪKNׇ{~Nm۸;J~?.J0i-)*HڵkVΊ*B+K|f{02c.g6*D0h҆`G0rul%$h*ί* ,laJesNCcn㮷&/ L<kt:| T0: $FWF-/@]|AN`"IRv*^2v5]^$A(FW;N>]}W_}/*^MӨ۷SL*B&x~oJ4D2gx}|VYloQirQ$;osꍿ瘭RƮP[Zpikk[ 8}4LM(**"sAmSO=WB|/~@@92 Fq%4L!I2ΜDaۘlۏ\bY&)(BGu'? p8۷uV۹64H_*w/V')߱E|psuT Y9ۿmiφe,#%84BUA*X,~9;/CCXi+?͹)w۶3{,(n{!k.:;;ٹs'TׯXPSC@4Mi;)?,4A&K.EIq7lPrlIZqYV:S餽Jhiia۶mrBi߶m3+ P_s-)R\DB%`屗~ݩ9Ʈ|cS8jPϮIaa!@]ٲe {'sCFA]?w*~'σi!>?tQ-kQ?IK$ 85E 3DAoVFyQ%԰ bl4(]F6U'{b>sٲe ;w\ЗɯҼx'2Qjlz\ VEVROsB C S_(#f8I|ж /@I'OQ_;~^XFm&FL}A_Y!TY;~cS9ݟcx*I}.suu5?0]]]tuuyfBX OK$5 tؙ3g φ0Ȳ=I,2B0x9蕍J26֏e} Ţ^ )++B1BTJ%TSHMC%9զ*WN}ĴZ8=_'m`f"ne7nll_f۶m߿j:^؂QU}=G J'(G]bVT$# dܸ^%!M!{s塧wpX2Oֲ,"D"CXV {ޠ~|Jn+SVɔ\OE؃h?-uDhh:0{:Goxkh(C@v(.vS> TK AfHxM;9r}tV8). #'Lqf楤tȣ9{ ^ R\죬"OedΝ?`0w$ #4 f]5=$ꥸxˆ0JU ^gjOaJ10XtnW^y完ʗ%zt$I!TF PTME#>z(_`I m g.\/MdGqY G>>ƵO"2@-ٌ>6Kgg'`,ll:~}=zwnI`r&JIBUGLtG|1HnF:/P,SrHdph;̇$IQVVvןC,Iũt]M&'JkyZR  ACRYRy虯STFm>E>rP+͖'^dmϽD$p]KEE( 4[ P":6e]A~Lŋ-oP_TF pj2O|WcZ6KAIxWl2R𓔶 6$gKoWQxӰ0Ԡ@Eqs 2wm(,WqH.(d`HfyjU(p3 i>x $W^ ܗ `|f.gW])NEYsќ;̽AI/OǨlCry<܀* |y>4 &ĉ/xCݒ^|[MǤtyq46z_C]UhYB-7DّCxx4Yfg>\4iY<+J/(Zslbt0]ޭJUn/Gا&H  xw+?J6^qb_%;(w4vOi|/8~@OK;gD3xjќ;Jڐ0+wqiD?+ZNuTƕN1l}ɇTKe\"OsG`E=W]ڏdx$-;yk/u[=C+>E7ԁTUC$'>> "x<ƺ%>fg(.AV D&,ˋrdYVF\?U "1L~\HLr 'e9{%H1t;SD2y~oUƟqmW`fS/jUb2YHzUX'$+:v0E(BIŰ,KUwI#t6@Ԩeee8zwÚ{&#cj(/< ;LJ}yfCa':n,T,VZlBQ (֎֍_ttC^>N[WO͢zK+!,$ QWd+Î#oN$n'tc2QJ:P k0.Dzp͌`&²k.,$#eiD5 /tec v" wǤOS/_$ ׻Gw(=z_]Bߠ略q !r]ۖs~%!čj#}?|a9˩y=t?v BS"v&\9M_%*YHv곧Xۉe8\v8MO}PкۇQ>C}eó2 ݄(" HS5l3t'\!A&塾i(`g|z(D&:1w5j 6V[<>ۜ7M4dFZT,ؓe$N)+Xfif H29KtKiԅJOkϿ$"c Hi,6.gX*{o,qA^۠ضlT5Ϙ0q:&V11U~:[Go?9_現:}_%vF(>Cm;񳟆9gQHm(80TEtM>_iƧF TJ7qG(O }P]"я0 rqlc{S2u-!LJGKuw/m7),0аm\\UF{΄ǚwIG.9BqVP>IofkV4v:Lhj hA N51…LDf/g j$}ym1,E3 ]ǶU ږblקub3*6p`M2`fB'6bKa&zDZ”UUiʽ7fi}?@ڑ&ZlBe{3&"g 3?PmkhTp+,?ٿPp~o)"m'g3>xoPq_gl$c$';Mn} }8DٶO%Es)* at"a$p7OZ(D}S8'M==?qBLJ0R^9DhNh`YI.:9le1?{;HFdK)^xe}Nj?Ϳ'$ %}ϱ )LQ#p*.ό К4Ewi+W/)ۈ27)@&BH(:o%EòSXDI+l+hb,r׹~Uש /~g Ʀ']ʕO1'JP5 mtעض=C- Н^ 40B*~,#1$*H2%5/xl?') ϜY'W{ +K;*+( =LwsczQCJ"G(i!AzXmXQ!Zy!9oFWIon{>T5h0e?@<|CnSN0\X#|-fc`l7xd@ $E emrsAe`?0U伏/{c,q$kow5{7# {)C<)AKg=nԍdf( Ma(2X/%.vt%=~[\ff.` 񐎷PUV+R 8Ns]w1/1( ]wݾ]!QV5T8@+\Gk >~:G_g-}ccoq5/Gk+4 4$&o5rIX\S*`p{EITO%ǂdkHkbYyï. o}Δ/`0\V1M}L>8eYzp/m{#%k%:{O*s{UQ5C ق05i0XH~t'OpSeloNLяe+MQDEHdDy$w2])]ܑ_=@WqKWEdIE_˗IPu]76.:E ]S?o??Ӛ$qU nsutM["jM{xOkޠ<j2+uR. |>S_x/H*_4vY x-HX|钉0I#~;FV"kgY]|t}t,ͽF4{_GסgW\'&M}eK%%nɲm$Pǫ_Qg GZ #Y"u&{!S='8j}?zh?#{ zWEҙ5u$1/UڂD}wn'q?G<\W\B'uO%*ZH4 m}}>]I}7:_u:лݷyu/x9Z[@Bq(ĻKFX  IDAT N-4~.nU**zY\#6dn\0ks[u3@s$A>%^3ᰕp]$a~1eTeca3."X#I{6ᮃ(1DUׇmqf/r{p҇h́?d\;-)n`kG3Q&_MJ})6{Hn,\EV-p jӞ5PQ" :@k&?No, /Xa{ ۅexk+DCXGxBW2wsM~۹9Q]b˛^,3zٵ=9ML6\Ѯ k<))MWXfp\uo@QAB_mͺ~z]ՉG J 6T%YޗY9XǤ$O567Aj(U](,]ſpć ^'V .\ɒLWzTw)K:0-${8NL$ 4!I hpJj!dE`(a4p7i2[_ro05=&$7 iHб4=Ǯޜz4 \{xްp+h8./Y]fP '@W(/iMR l$LJ_:-e9O[pd A5TI±׍.FJ寓{?XYd3k岷3 !%q'7$'0P$YB¡b2G= 15}5e~In A=z\N /`Ƕeɋ7( 4$Au?C==!"N$=s.v $Cy0MU- lSS];@TtM{ 7VDK xs¶mk2m&kON$`9{,KzY`&T0t­sίP:҃۔g*­$&,lt΄cۨ^cc`y-Ȭw ` rYJaPY7!$6'/e gJaٵgCh; kW0MʥϚJh]MJ,#[pɉg.!DUjh!-QӧjͳkEVfpaujMj Z 4`嬵K䧊?w lY0٪BЬ>Rнyħ딪QjWZ0͖I M#&eab^9]7}8M"Ŭ`,DvbgX\XB߄|Tf3 ۢ&b+C!\FtCLʹmГI0}&+WqJi"Յ kkT*~`ژ[*äqG4!+3T2Adyg@rSSD0׭ڵ,LaD|CpJNɺɦ@rYfv.4W-{>T~X\I~bpxwrU$) LoڳmiL]Gb}&s2w.ߡ tR"&A D2gǐD+2'?p7_2yuZpxgBͭ!қ+yĤQeEQ7ArNe5H&%B2;^P"`U+q#!;y&;{."}OJ,…0 Hu9iX$>c3Hjо/02Ѷwj6$y &&o 1.ݢBw !fo37wbBMyZ 4UJ^qkOJj5 ",vnVT1!}|4 Y5RaV!N(o'ꭓKBhF|ҪVuKk E!QVRVhGvRR*͗"(Hƛu]nݺ|@%(fur3DġCԶm.\J*uYq]Zd2aP,>]ש52'Rb,v1I(Է YXDPEۂI!I8bRǓT~Ν{$ gnnRhVaZ]oNy q??YXXOID"Z,>_e{9G47,b) dE}Eah#ѵGw $tK ٥MAX >c"I@ju9+DB!l1B s qZiU̹s瘟_^?H$9w$Ittt0:3cۨ-fM@,?c"IHسv V yn!&D0hlL0yK6uͫUΝ;4_}>~? ###~O) Z5_%|,챊# kk[⫔j߽sͅKԺ׼P..æ U^Vf =} w+K|=:eJ,@!©(W"ie±w7KW*9s۷osIP/I*iB(\r`+Ȯ R A\FIib 7˭je }m*7.X-h+)ǿmVf Eƹ#j1.wԍìH'2H-2f%:?z]^Vpv óyJ~mn޼ɉ'9p`'d0p>$. Wd.dy~ZYA22t/\[bb45fp$ǞMӝKX"OML>a[Κsc "aS*\)u2CE?y":vs uG@%e[ZLR0!~d@$Iv^r/G,w}kׯ:tnݞjc dZ?SvP=:ˁ=U|ĮhBc٧:NU;KK\>r#\K?n?~/J*hy/_IۜY~-ylb-2f-mA9w Q@u_e_CMq ~acd䯣LL~Zn0^R_{\.eG)w Io8m}(,ǤZ* =ͷH( P,mw2]]dO8} Fk:FBoۚ&pŵ[^' ^}ؽeG\hR"g >G+ l ʕ+9|ߖJI4'WH07`jAšH{A֝ ~u\Y*ҕ ZPsWvK>('?C(Tj0>3OML(+hvҗy$&YX}+%z+qm He(&9x94n#jm5†(%]8/oX}BTSmdAm? zZqƾ=ڵר1bmjxk2Ƨ72YS-a9{JWJ+%Q4 %Ok`jncGn},o0>C.]ċ/#G,ӖLrknp<4J0b# W}/:ȮBvg>j3=ӟ, M,:|qȝA,_X$qREz<1Bfq]ւJz7yxL9 v [Șf`M Èc;\ξ_d!7c7t$q^ZMѤG=py>C9‘#GFigg(^-qm#s_|/N[A.?^~v.CL]!3;CH|>V^7~i_x|7E|ǟ| )cR,)s|[3MMϵE8'L4~P8t7$(v7"?/VEXVy5PrE"b_-Gv27o|[>c(U DL|DU k\?uQ(AH I$4=z8pAO~ .222±c3MIfV@L-m;C'du|ܬW_8Tħj? eq4?]y 뱰= -aj\|3{=6uzb;1V]@;Ph dM3|D˗IyGKWp73@ީ"QQ,LS:.ttd'X̗iUlb˥Hіnߺ RK*ib4L{Dq"l⸳ ]V,}SBӨ̵mzre~m/7x$I1DͲ>ǪcoCVNRWBHUXѯOړGA~ʗ9kp C%>OVEi1,,bS%? ( >lznqUL%lto3._@B@(@ɟ4c$IB[2-5ȝ?FS ^(ɄZ^}ۿ- Ii~ܖcP$&03mT(,.iߑd_T`z?2 t$aJ? $IRi-vjbx+WNk$(z ]=oP\<|  p$aZ4~i&8ÕW9[  ˯`lG1Y[\sӾs8T#HL鸽6X 3sD*qfg_P̘9'Ԝ?]%z EFۨMإC}(OѻLD o#} Y3xmȒL,uV !if}K&jUd4=a (P=-$ݻȊGkHK+# j#rH[ӆMB(B8vSguDZ #B 9JЧ!K-:kخ-CCq]dŇ鋡2F)^>ُ#d$n9A*:11z=+r,4}8`cIP#u%%ׯ_睷fhpǏo)$=]]]pg94f1 T^jpV.-=G =To:$"GwIG+iyJA*q8фeF)h$zI:X1|(fЌ|x#IZ R4*MtކBɊpye5@!y})Izkj\?HBPTne.nwa__'Nh'*YB-]+)+._]To41F'H9ʼn(|᫋gܹBR\;SkL_|s_q~hT|7$ IDATɵ;~Po/Zus L "G#C)SyKd!Dk/'hD1IJDFZl~@;6:Gt9ɻ5:ʹsM ɲL|p m?sŜ:|Wgw_+i Z _ 3` |/zWy^ßm-Yܐ=Ah#J"5vOd[~UxU']0pqVEޝx4JX>tYP#%{SռV %nqi9qĦoh8BXәAR8-Ajב$ŵ ===ϔGDD$ Rۭ:@Q#}b>Z,FEo2bvb<3)Phzck @#ψQN>M[*ʼn'ѓ$Øc&} TWi G{KVViDH>Ag&&X|RcJJ9$5=,\Qz{MG0@ٰ=<);dtey0663gH&>Ιgb:ujyo%Ibd~N C%ZzA06[\"=Ѱ%>>]Zu,i3Իty #ݜD:\!cn, G- ŞBEa8]vGQ{c4hU芊kTdfvg 9u$:\\1Lӳ}{Qvq.o'_:_!/e2h={ң鉗^4" k"dt靿H"]QnR,%}bL.-"\j[ BKU+/lz 4<>bR.зpJJ8{ sIɭ;I$dIn$I\IQnU-bɲ=]vO 8u`پxj=y NU[`Y5tX_c BHFd6__ґnbiyg.[כusU*?BȔ-K;= V嵓JNTB*=4"A4"Px$Tf{ؐRU%O5N>M.^{}>VΟHUoSFdZZDa1( <7C2gba\\[5*)Ey[)h^~c!p2 'Ow[~]ɵ;>ϰ4'0m?1$D[7{ȗXX*vF[VH}룤@Yώ`0*I ]Ԙ1I)g=U0pldLb=q<#|>[o*'Nd޽z:yIxg^Y+)*6$)D"0ԪUfV4S@YX8O2W7bR-r0 H H>o1)djL^M "I;kN< #ľ|> .9w mOJdIf`> ̏޳h/-հ&yOML S֙dVATzvFs,.^T҉R?a DC! %D2`yj!+ Ts~dygWr,-MP.0 y{N2Νcff'Nׇ, "i#|p"+S]9q;tbtwv6l-(B2/ԗ,m]CJ/Q(Q?кHK" (u>G1k3qej.D(;H  Vbu6؉ӌ/3߿cH' #JelyR#4^yԚLe!0LAn%)wt}昛D>"D}kZ %[>aP ?P9c*g- -0ss I0H<7<%[_?2Y*XB&բÝcׂD"C;!f'Eןb CRCZٳ\~'O;z n q%Ax/>IRd$Jny'2 !(L]_O B-0?a!(Z[l=<Z[qguR2z:ŕA$igLf1Ezo>Y2p *ɕJQE! !%t /塪*H%L.B2zksΖj5, Dzk:q/?R˼KS0Ёt;ݑ *UJ+qnw|fb( HERO(ڎ>B@qX۪Y"DLfǯU:M,-D<^9</^9|xWt]A.|1wzTlD===. LL$Iُ/qͱm ]@USA,Xh)0kP]hnĢэSe|'KT:+q\`RX"{<.^?Gz#irdd׮49Euxs˸"twu5Dn`U*OI1ݠn#ȲLku]n޼;'N7)Ý9H1?vb6eY)++tR/$l&$qst5Y"nC$]J\+ؗn]Ӷl.\hK&)e bsﰘ>[KX,OkN-sc õN )[Bf` *ikm6G[*!I `ە[Ǯ#er) ­P,̮ \ܾ.4νB$V~8m^{2ԻJm $K)kI$wm۷9s4D'O=a%+ĢQ?zCCC$Mi'&)5$IVܪEqq +txa0t}KSMOQ.o‡^kA2nۏ&Kﳖ_B j 7L̝'("L{޻\4U:A89o3 hhA9_ `vyNi,dd,~uERH(nk/+  IAo2YV+ܙx  [E.^ 7iWhJ֖7[J$Ϝ!q)O4,ňEE_ZbyiI0CA DxUUy\^$RհbTFX8DwXl[(V|e2 #&]Rl{\;斸}0Je?Jm[M8 c+3SGqLUϓU i{M1hWl 5O>Gl)lT7 uG J1{ 5Bk2qj&F %l~ޗ~" h?? &)abm{Z(^d>[BeUeh+MD.SaI~]C j_̾ЖcλqYB NmΖ$BaÞ. +kkQp@V$MEuT]C%$IBmlkp㠫`doh4ۖ^+3K R6 [UIuUez2s{#Y_}A*;8j;{L0)q4Ag|m?'Ū䰊P-M19[SM%'R vHyaDKkخȸ +LSD d+ص5׾bہ.IU@ 8@mnE82}wΜ>MZ7:zzY>8LbJn18aX]T-ΰZIߦՕq,W !V >"F!˷ɕVZ,7k  s׳Clk#O։`bO{ڕs@!rI00(lݶ3gŹfu 37f9gmٖ%YJRb H9* @)R%Q DU~]:̇X<=8-?"c%|_E,رcdYKmmt &6jNmwjdb5ΤN#(9P>D6qW!Q".hltrژ=Fg4#" `R?俓Zes;muP,jmCA U'x[>lɎӊ x~BYan0+s*)HY]gaO̩ N;jo-e&3>>)@FAU=&p't64>2McǏ'No^`w?>_(0t ꪻ}'|Y7SY?IDᦃX;FٱZM͓LhBDC %Eca, osS~ :qz: ݧ`J(tPQ IQP*@nWMzmhl h43Xy<ǎcl|;w٩]N]dRMV}J:7ܓ(P9\+͡><"8vp\f 7:0F#ussXvjZBxˠ=^`1ޠIZ؅dI=;{GgkGd-VSr8MMҷDUUΞ=˥KشM6'ttau5XHFxA N݉HpqKyL&UAbu80Yz|2ښF4Ν;ǹпdY/@w%$I4QN)A׾B&/Р؈j%Y +(ѹv PwqCCC477k.LG&Iۋ]4KX# Z@VU ]ug6HOWN;- qq;?`Ϟ="G~IAho`yyũijZ b89Mۍ[=a2uGt5ũiNU~&LNNr1<{q9_x1=΃_%f諂,g/](KḢK6l#|-4M( 1iHI6isss9ryCC <ַ~t c16OsFZC!.^'gIc|?ݝ|}Sٻw/`Ptta@ <&zz!8ĉI2. IzKԹ@CC=5b1 ̺y7@drdih4'e Rtt֝0llŦ̺/7K"SX }}zFPs "͡UA+ه X$M"T"`hht* PHo:0yq:l!DT-l 89E߿<\iij!PEjaq5C*e ''qM lܸ.DI:fhhh4ݻijj>n= t8ӋKYCyOH.꠪[JtOqJLdT,YrIQ5|6_ȩgqèFrOR"bk_[-g fRZv=\6'XP  }}C? f&爗JUE02%'HM_OﺨUU9uΟB˗?_iw>` ~x.jKm=@sO,}V;] ^ÖZ3_aFo9E'r,]mXoQ%e?q5aMdru6'h54׿Vaa}x{XT4jX63N:z(4b"j'P-;F#/_fnb_}=f5MJ[x]+N٬NLi5bH}0H:9F#2<<(YTUEeZ۶5MswhNS+D秉,'D#FDA@Si" T`O>|bWީ!-![67awLzJmK+NfIOE`͈F1evjBYSd4J.bK]S=4b:tBIU@mQPL]h\`5 KϬi(<rE fuML"s't[8/Lckm3@.a~nRuMTW94 j+sVBujdfsux3@us3M2)ml] ! ˳FuM8FBѱ%LO,M]k7Jfk3iBnsT6N3ODGmԆjY,Mg8n|c2Y&Py@xr (@cC>3<<^ntp%"SӸXA`z.uR$&2R|N| wR>x'/bv8%9^%j<6o~xӎaaf‹=sD;tpH)PË<^x۽&O'&4ME/op@! [ֵy,X*Ze45<Nzap xcTοo8qg2W=-UV8'Mr`?vt5I qpOi8˿;\16 c0}p`[+3XO7dx1-/$ll>0xFfDj7lED~4ȹW.>im=M¾[)Μضʱ1KFlJ7㧶BnUHE4Bco?vtP\;331\K;y_̫ho3رe cT :@e\A(4Bz,u.67Ssu3l6^, 2EThmn!XU'F0O H[FiRLx5T&'Vj$@\Sd|#yy;hhi!L bihbWHMFsO/vcd>b%LFxBoh41VgP A" 3"ڱj'r6TkcN:z;0&03#ND#|խ"wT 2r Wrh"I6[*.k~A(Ik @#MM̗A2 ᵙ|Wau/rĵ F4R2,/V"P Qj`0yhhk`|nMihE$LvRʥ.a Qc0 tl;4ƃ;f{bܘ :=0 kHge#NJP|ɶ͛YX\drfh:vw܆%A@-IEc("~PɤYZ{~fjWQO}DPUU>$I(к ?k f.l~%u|6yk^erbOV^R*i*!kοFM[A6mNšŧRQe&+H ď _efj~q+O<эX6,k T-}h3sD6zeQӈKp! ckGkX߾` EA`4ߟn'FvXVd1Ӫ[ d zF*"44* hvsa=t)Q= a $S\N,NwvzG+)3R)&3!UU"#f=$jk [\`9t`u8M7f(r94K<[Zq\zeMCT(d (84Vzhyzb2iaqiiEfn7F`@D{I wTRT"I!a2xVr: xI M|i>]*qqGb9`6Zp` L `Q_&8cw:nkC;~;eL\Yboh /#m۸9u>ӗh@Gb ھ~n39._5ݛ7q'kj0bF?sKc\>/V%Ź#2o)[ k_&۲Ʌp'l۾^9{bUcbx[nxVV #W9w.zZrW(;QaƦl|9Z,d0PэQu^I ҷ[K//qvrAD& RfT Ycd S4 `@+(Ӥ#2(rYFn%YWnCY'VB\FDXݎQUEAUUDm5PP)q2(Zb6G¹dllT.GMSn'M+n6毠 | O}AˋK{پ(rS'Y /ɕ=[۬DHlYYd%C5ٴom5QT&HIrq66P*&Ҁ( V x-3>n v_Q#0O,Gvְ47u-pշe6|6|3@}(sHV?M mkX<,FeJ6(8&#PU^V I˓ΖԆhixK[woa(ʸCT{iy̞ uU(v ~JT) H"NDX* 6.χu94M#ϓNHe234Je"˹uas_'?1N'wP )FN}gtmV{R[_S'/I,3ա$h"WOsN[9:::::k3FP,#"F ,!T bC-)UT F3g.R|&HT*%R]#hB!\QDf UUJyUCMXfDbLلI(RT$ qN>F#ь erl 99یN-R.k~{j >;,%ؼ̞C+L}{70q4W͢\exdi :sQEgh+̬ȘA|j9 =]qxO7 yEhz| D3 Pr({xrGqu0۩ zɦ{ 4ȵp6D<ߺ{9u+WG(`Sűz2/DKݷ oU2̍:y"{C>>wU";ĘQ5Gx$Ŧ'# $w%/Fg9I̭x~==YX0yGNNӾi^Ugo3C+F4y R7Ws|>O-oR<ť#0bcKdߎv2❪I+Fpm.Cǖ-47:Kmm{\oH Ox*q#o&Wf1a6o촊`"S,1fq.Lx93=9I"S6r7bsog}("tǎ~n^A&+\![P((Z\x'ǙJtR̖ikCEWYIh|CbgFQ5ɍ[~"_}I Nlm$aq)EYdP5 D+-ݴm&h%XqWS;==Դ׆zE1he, tٿ Bl!P@KǦ]TXDAbr])!R(xk(,M07 }tR߻fw3'?cC>"Sc)bH<$ \.3v A0z In _QSERPd<3ĒY@0Ȧ}ܷpUWfP, Y FH5rh΀*H1j-&%!ݳ _M;6R,μꥥ+-:v*dYNx<%se%CkK-F-,ƩEFm餷4ߟEVZ9CrZFvrT^]q#X,U@Elt sf8{Êd3b~ʩ8o`mNRFZ`0 | VSK$y*S4A 2Z IDAT%Sqykl.3qědb ꗖI3֞AQ$7]lGiS ̱Djs(gy)hFLFW\h0bW3@6gjVfQV!XY%YBe3,px]LGRDGGGG/L?V I~%X-j6] r,JrS^@!gP3dz P)d("O` w'F\N35;_[6jbIŠ2 S_A#ȣ)4Q]X򿱩Mvӗ/}ɱ duٳ_>uM')"NC} 3׮0!ĩݾj%FEԚ[{?#h\݁X]IlQ2W2)rFb{ƗZR6vZ#H*-[9:::::$ri+\>yxRlLQab _bqb1;'dx#T ߇lP4n3c\# sl$#ydF s,D'OȔdC5^̍F2>1M|$ l"I|adW"ʷ[0є{ASW ėp WWdDP ,dʽ5Du[ O̲861RsX`j`^Ν9WTZ ]6-z͋ "miuN/ | zD.y*]UMs7]ѷ@29܏i Z5yl76O-!7?z–BxyEE2qÆǞ"!X=lx[ٺ23Sػ b@^@>h|'%[y3oElÇS6}5o b!!Z44jD6\Q%Vٰ)%2 eEC6[۬jDN㱭h3)2&IɊݱMMې8l"ɔ=ކry4рDP R9,n/j*a7F> _TX"j@&f6Q(ct|8ţ08($cn QXɧӫe$ˉ,>G6CX2sӨf.1:G Is}`qz0"t\AVR$Q@-YkFb bEJ+S=MA!Hmwo #|mM&:::::_rea> dReSʱw͔Q*fg|^\/)NLEGu{}[Naf|2Vj9Ùf#5<&pe+eUk!NT'B*/0[xsÕ#qP1cG]o& `*2vJ*2Kmߌ]Hr" P|v?͵.O正6Q+,)߽ ;G=ܱaM|F. c>-s7ʢ0~,JUΗK )YjFDU}=M8~jC 2ScXT\ CSW 9SZ4VjE@ziuR҄îQ'L#H˓LϡFI /nz6j!$fftHO1>|XTI]@v0\<~=:ЊbEEdad(ttdHv6-}=hy?rTqՊ`tكZ SMKgQbv~|jL X~ZlkBh fLFJ7_LήP߷{b󽢔VϢ "첒,!h*L.Za|$e~ k(۰e@T*QJ'I̍SZ2[kо~{h{Aov 4t`?|:CA2 |h6Z[J%2-w[>{o|Pc5b YXݒLiV#+\ƩP?%.Lttttt6AB@)VbŸc-xn,wғ %_6`]lA6lht*tdlħi_ @6Zpݸc}kϐ!_1b:PʐHdR$/b 8z5\_^'82qӺe'~3 G)Uʹ5:| U;$/O07 VF8 ܚ5JǮ6VO_$`ʊvo'4,5r=;iۺll7_̱ٴ dm1LP:;f&N @Oҩc߾mwX>q"unK#op [|L4 9%ukd--;l2 3C̋UdjRSI 07l#ɒe[[k}m˒7 J{Yd 6&34$F |R_-|0y,x|3M"ŗG4\L18REuyxmC捴q U节FM&A͛T١Aw /aJ/GIX}htgpdoXqz?;Elۿ"|+_۫ !SOO8루v_e(Ir+KmٌGo1h ,L2nHu] j|m\ԍ+a}D.b /CD1D%̎y+3izx cm7HKcir 4CP'5BePBlmtwhڿBi43q7.m N~>J^~1eE~zqѩ[wkt]˽ye{)J 1sIY-q̏c[*0V|vgcmCM$ ةE&ձisQ;/q-w"b,3=(jXkLN-SRH>c \e!ePcN399OA afGzJ>`9{~<㾟qtnم{o]^Zx84Y'd2i+,,+〪h(8E&c,rq`Y6*:zg3 :V0͕r2a]rmX[IdY S'ܲ~qu(膱2kmeZznd"ɢᆗC&Q4;mZ;cYd- б25}U3T,e֧k <'}͠&ʹ};'WnqRO]w.,[iCӔQ]XQuU}3UMc jtKk$ݸ[ Ρwۆ5%**˼s]Pu|}\麽CN[J1:0@ƾǹm?5<ʧob2x9ˣ|HOǔzi?ʒ奪8Ń<+-&B!TrώaxlT>`-q ^_O팶|̙(Uٱq $H&6P }ŭj;˩"x jƱ:'p|a=zM5(8,^ L,y;ͱ!} WO~@y?'8ߓd3pK$R69@}4@穣Ϛ) [3' kAh]5͇g$'N-r<͵8wCS؊Nqu?FЄӴ^ba) RmuI}>3~=8p=fimc.~$~.AЫxPٶ"5IBצdt0%Nfo4~Q}ZgM{tNQ-h6j7puJ%,ݟ7ࣣ=0} mR"e4ys-ApWi»DZN0˒f6ǨzM:zn(*팍/db \n2CcsVKeXlm!r ٍ;67et.MrO?GJPPD ZZnˮ}dfz%eTnffd9d|de8[\aÎ473}^N!@=NeXB+RťWVP#%k˦6<7hp/o_>*:mŘJ_h"-&Bgv?\Z5ȖT(u`ybWf9bl{M&ϣʆm83a(jI_|Oev+&!-,„`sORp-p,d雰9x) 6w]gSuCUreV.FGFt3PS]µa^)߱ю#^*a_|kJ=>Aģ՝"Kqtu,7y^IHrI|fEǪpcgcgrU{bv+]#T%voBq2X9rH%x yTT@Cb4@SlҩL6cN&A mT?U8;he}zXFqk4] L_r;Nu5MPE-{2\udp>κ:>;r^leU%?FoPZbȭ1OlI ]^~3XʁkLO9/e[cۤSIIL6s$O @^0Lby7ohA:6T2NÇزى1[rwF\/Vf(.@i׃ǟI[(B!% LIHb(IF(0=A$4n>gj'cI;hX,-.Jgqn c1‡m硧^a}> m OĨl,pR\C+$[AUTe=ҊNUד] e; + +Mӏ_лSm:L㥼ζoc]ХN0e!V +Io2AjHՖ]'i8cK$iL?[=֯y\mH 18yx}--AgpIDAT{ m%XZWW 7|n'COG'Hle`"?ӅwyP;__86;ʮ0 j~m.0pUk ˧/>~G= hmH^No8X(N]l^쑷0^7rZ?<{([`K1]3G=86p%MƦ(04gQ{D´\ꡢ]Ui(3G8oPU ˂R.v9jbw#.CSl#ͣ-snU y[( hy%4pQ9}($JdYOU O̯^%HdeB2뽑N2UҪ)/a}+tS 3Wa|5SLN3JXF$@j3ZӉḑ||5M8rYbǺ =W:C|:6rRRg/^cD<'Ȳ#ի,sgnԛ2WqWM!e>182nje9(+LyEhT ߒ$ϋQHqEluyE~z/wvVfgqY˺q(;O p|hX%TV>ri6K\=L$Djk;!ydnH\f| `*iv\Vd`| =p P.15_5T,2=9Y2[X ?iR(qHB@ ݰ4LRD_k%*!c'T/pذ{'GTӾ"! !aw:qy#rL²#f|bW`w) [ d373KX|r9g)&0 S¥`bbPhx$V a:jki#0KDd_\>i^,L 6_hpejS<NWS+9IGVȲLպ]Ӷ]'{ח˜kp@3ǚ/ c1|UPɅ$6Uyu4Klؐ@UWWFenNef&nUAnv#|̆ mؤwvjk9u mUYgn}93͉C'ԷS_6F(eDU$Lv,L q*S Dkh,݇æaqoy*jWrBMuJcE@]eIMxa-xoB9{~sIWWiR']@S6v ]?|vhtk sG(f7t)>ݧ bUr?g}[![Bő뢔Gob6f#W]峩3y6GXXXBxLrd"A:Y{ʊ-Rv B $DT$ *{y$csДM(6:-d)&p:QdXkndy<'rE o:>0 29rӧEH:ot\F&[ вi3tChi*=PSz&rn݊X KJ1=éO?gRDRT{ؾcroR$ Bw( $Esқp@`dxuFXyK @$.^Nбe#z~sOQ> l!D/]]kp`s D* 6~YS2Jnd 9v<%te >3پMZ=?jXS#37C"DH pǁ$QaxP) íSP%ISLM{H~~+!9GOuk)fsQzִRBǣgD=FyS!JN?5LR6; (]i~:R{dE IBOQb bzдAS/t#Ǚc[oîB0;pw~iﹱ ;IR vC+wAN=tGi#0|Ffm}n(5.>B＀^sΟ"sptGȦS PDiZd 2sTD"N[@R =TI,9:#xjS8zFbCe2, Y2vz&2s\< |Au:*Z%a(0ä.eyE+Rꢘҡ|?vUba_6o 0u^&p8RfZ#l]$XJs'͟U_B>1ų}4As 9ui?%!0y~ΩǙYLs3 [D&_>5nmMgOxblW1 zO vR"I>fmR.(/wrn A:1,2p%>8L"ZSc$* ꢁŀ"Q&&W07~zs?FϋkK,ӓD5-E%U!.M5\x#u56Ѵ"Nu. }Nը h0 :Cl U5( .#)GSIcsg/rĎgh:O&ʉ1~rO};uU>TMC@Ul5-D+/0,)] w;œ?\DOcvx|1M|tt~;>@?璉Y`rK2.4B:m^ir$Wh̓Oj LTtF47d͙lz-68o97eIĖfC2STh 3RW$z38\ϋa068\dmgiۖӒwgx`q(^$myo\^!ݓGHkhC꒤]ӈy7Q "m)M3;4nx5?٬2kΎ 9Ĺ[8KxE`2pz额}ol|I&ۚqh[30q f'uYN}) _};ֳ=pϽ.HU( MCP, KibIMe/ȩ vYL b~D!cw<3`tHS]YpJM=>}0\;ѓ-ff4t?΂ś,?ryc<#5THC 5+ F}qGx7bkȩ9]ᢳ,p<=7_#$0D5R?Nv4|xpL, <%IJub-0֚ˊ,-.=av-,>i"-;KeuqԱUZvH I]T Ӡ\.i[ Ɍ NZa05y3O} LΕX']*}2g\>~,Q*̏?ͮ7gfhqVԲo.?YV"mN*>BhR ?U$joL3iik xػՉzlb'R]l9siJV=ف$OcLO܆eCTjb\8IYwʱ35вai,_U檝/JGN_9H]HHb ~?FD Yͮ9.q96r>C]z9پ%UC Ȳ+w.Iߥ)hK_.[B8* .xy7E5(ʒnjP0I:(ӓ=TS, 囩y6/U!4FCkK ?VlzH.J1_@ɋuGUeYι|p?>D/<1^<$ ݋sfSY>bMƆK[]2 ktV$*sRߓylaPddn($#Yq!Me !'ZC3`Rpxd*mK_$Fe8_>ծaWFX?9BI#dIc;oDG(h ہ=K*MWxP;>3\juZ2n2_tNv|[߃XùdK+x(B0VCǖU !cc+%)nrc҇ N~)#Su"m~a>OBȁG[yis3^s6}1k}aRGh1)v3 غ?H]g9.M)t W/rT9~9M!Y;ߛD#x ʔT| ,d1ۖ D>_ _("L\.G^l7O[~6l&V(6/.B%1ȸB5ktոVVe\16B3-u4"Ɏ=>j2ϒ,MdI"5M4ͧ"%$r[wmi͎r%tM,\O$&U]Փ'8 kF&8|8)f |kd;< Ӵ6xa6w`'YCI_b䗧F29fyͳ۞k֠*X=jkȀ&q1>bRP$4lz9_`ϼ$9p84|55_Mo VNbsHfwk7p2._ЇP_{oqd, FNsSN2=?Ğv)KYX?bdd.1DOj1Ջh0ϼ2 UK<剐l{)I`J*/̖4&#&]Wku~O4zك.!)21T́X0 Iz0GzK-/ƦܸcCN4WPIbfO(x(6'HnۧQ=KP$IgoSK<.݆{^o{)>PR_Srv$Iƚȅ|s a bo3K{x?eޠ.6?{C:3@$g( E/;qmCmYv=(+oah_Vyo~L -eiha█!9=t>L/oAv7!wW G_#A!2Ay:7Cl\ivkĪd wC( 4L4KMkFx<$YCx:be<3KQ >vocs1_F~kv;? IOa^3|uk x8[C0Vw_:6C5ި1nK^'STClZ :T$B6\r|:<*k<]bBMÝuYs42:̳546I,@}P ش'V@,ӹv-2Ϙ\htj3mTH IDAT)Ef㮝t]ev*AE:lY*Vo߼` `u~j.) '_Jҫâ?X߁"?^IhX˳7F{$\w]@m6Y2_?+KOl^!2x4.=zWg{)7o& Z22_B&WYRfcSg'M-,,I07m"zg#sIZL@WYB6L&: y9y$Ѩbxry@,*(a.-jW=Lɟ (6$ݱ,wMS0<:FX>kD]MgAQhNk7dfff1a19yhtuʼkIJ=c/#y@ݪ96+o]0 LX'#+ʊGlH1d7E4K&U5 ˏ^ 9؈$=@&-<̻ȣU4ڜ̿33;82e˻x!VlNV̇nJnBPLc*4lه)'?a 01JtY3&s16a[۷8fLPFuڰZQN|B"ܕkmZ腏܏VYG] `#D(I)=0|E bYYy 9sH,cV2+<3Y/LI7ʗ9GxWӜ8ߍacSRUӨ3ar¾beY"9v5N$IB"?K@Z/~ݿ%<z9R;p5*٬`Pӳl k.CLP.`*njCڎ{d!^" X7FLLqg C w[ ȥg&sDbK>'vmo>o26DW3y-$ʲ|b N^$Lʹ,݋򥴝$)H&5~ bWL2M!JӣD[i޵KbޏeD.* j-G[VfrHE]srrrh+_3<Fn!1tXj^24_݊L3OS6!X#q(g?#xm}ӆ}B *o";p\~l2HTUVa)sBGWЅJUXM3+ᱤ{}')[& RRD)Gb ŒIU+:\9櫧mwn0z`y@|b̥d6 ` PM{H.[® cӸQJ)/fk?Ė"Ρ*& }3lv(N\ 3F 몄|QUt J@H*BZ[? ˱rkn%jjVKaxX&gqM$IѰ;Fn%i1-h:k5kס ~X5R,86R,$@V%S>)fP+_X+QU9 JJQOH-x2Wto&;|lNU0Bj<D"# r3̌\%J|Z Uף WRfGy!M??LkDcBipV%=>BcwY iRvSv`won%*sWQaɧBBsxt^~$-~&IHHw#!K#f/ Wls#hj}%Bc$/1UfΜ`jl/܅GrLӴmEH*~4!eQNo(6h;m_+ \:N2^s>J !Dt9p|>. gt|nf=^'ZU&uW?$2 l4?t)k{ _m)z][$0@ו_+[H{*$0v.Wbfxmf@!ZN=@&\RR#hC =F`Kn oz:۩EASfca<ϙ2v$ņ'$f_ _.1}A$Eż< f1"ֽ'?Aߡ_#;Bؗ2NJL͏$k*.&;:ַc8~d46R.EY4SDZ0 I ulW_K dbCblWV8Tkۅ.'vߘ4Md-@ 2B{=+FPzTN #t%OFvҶ%f3[i~{uY{Puw3#f,~Cҙ A5P۹ۍr1{"ؔ"l BLMq+ ݳx ʗX};n 6i=Km4hnűTxV? 11l"*{]xNRgp%Vӊz&z/9-Ԉ!^:Q* 6ʅI)lؗ\"P%N2oHRiٛQa %p*G l6r+k6mg1u;B諽I`d'HfRD;~j0IG,iTG$Pb>%B * GN}JocɥC&/3u?࡟34:xAFSjpc}GhM15n88Y|9Ix{×&2JK۾H=sJ~ GZ$1#޽Und'Rv|r/O=5S`&+ɩO8GM~i>$EcÙij<=qAEV;w8qiҍל u2צ]잎$hvt3BӘ,x&"K!5u\,=;cd8WIQQBU[3v[ EQ@w~)s:Nϱkvf]7yb5Eu=ڗ31q `'_yg=Cyc=9f3`3)QEJZn~ؿakzwז%]Y-ʦ(sAAf0{BzD!J%4'<< [3ߦ}h5>S%?~"W]VHj?<7q$ϡ @3.hv$ Z8|k9k1/?=w=i%O25sj Q$;;'܇?Ck',}G!wu#|߿U:+R n݋G~ muRW m_zI m'|4␟u:n@%zi~_9*P4U4~U23+ANsF)k5I%u:o>'񛢞vK?Wx_$M߀ ފM7PtnҸ4_Z>'agcřI\ӓ#lA08MRCLs~w0R1+Fo7C/S4NOT0 ]T!pleeBPDpTJ TL̫C& &{It<}Q\A|;;f|v@ #Lώ D=l| J5s<)U1kY`Fr7G*5O9:bzW*o"Qzm@d躬s%ݒPSp!E{G"{ Tb cS5,WInMS?Q%[p1|>=EU"GW$5.A>m&Ex[@QVT͕4n| h+hcmE7jj@j6BW]D{\pv}0B<{ֵy'ݖ~+q*tw !07n\{/1_@LrF3*VahL='ރ7Ǜ~ѐ&>}Ftpre E'ԾP8U֤tvnZaZ]B{C:;=SR+3juǕA0ar^w&>|{ض\OzCXsvoZWw߲gk"qƮw\ I݅~ɗ"9رC^CѮC=C$>FlՃ7ECմEk|aa[@ao`u7t֔W~ٶkJQ(<ϠE^k_4ʔ(ִM|E`~zkPm s<6aޠ^]כ%ekiK5a\0x24wm(ȃغ 4)+p|w(X ] 7Ჰ@T{ [7iZ(Mi psK4KƬau;іE0LNVO b5k. XwvtvpS#餷 41X\3 7Җ2˼xh --Z[w,>:xkɦkI*m6Sw&ZG,Rc>::4K{}c5a.oz_[8,Ո[6wA͹Chh] UiaT״c+O5oUaM?m#Kv8vvB- d-)c}Je˪6k07o(;=5ɋ'DcĻ0IW/{ r]Mi\y?2jpfZ2UXCk~Bcl}5&/37'ش[ 5~xzp!t ΞX[s7[*z9p%R3!6`~$?;Wc$ 5KLh4.~WϨ|b ZZfik t)aq́yl[uoRC̏e8hQi̞"7qӎ^qH"y2(k)ulcvãTV!4~Bf[O*#&̯Τ9%K+\,adeS./J|;+yt27( s)g9(;(ӴLA9̌")qV\@m׃Z\D`"8R>@\KOS7*̏M3U+vYg|Ƃy˼X\$,P.@w̏97"O.OAʻn?'O.hp@%b~Cl^#̬ێpǨ@J; G SԂIXɊ\]@h)~6ڄ \K` UgRb R[%{eciꭍs%0{9z6f,NR~BڲM|O/q{)yBcGFbZJ$q i#_ZX֟iJf{/-Xsnc4a~0;l&12fl\'w,0[)h Acܽs^T g79?D x(to=Rb#|[(s&Bxə`'UddgYX&ЗEjGN #,V!8v694rw;R~QN8*gfSyi'9Xg*6p$GU*TH uvR.q)Q*skPKD*:q)w? <~7H;څ7 F2c^M,Ny7Z`Pt 1 lIYV' T*;莄$^ 08'd 1l3/ffǧ9o[!I bfHLmڋ&H}*Z"VeOs 1̞@'~La1R^yrn4ˏAR*Y_{s0QP ss9l,~|faBWδЁbX3.%KT;;McVesmIX-LoO}TIN!86B8BNQC1{cZ~J,܊++J\Kא4ٹ< -@)5Dv~ O 0}Cr<5&\=T/Y-kb_epBww9YrhIjʝ#Av7)mƗCll_R?v*v-8A~@'(: z)&;n;]Rl.ӯKA !O(>U nݏw)uzPʑ9Hpe @S>Wd_8Om`ٴ)Mi)_E-fdgQ S @] }t-\qW{wSWwTgIz'bc(-yJ2w{M]u?2װsU\b8ΊtӴ;wpȳsE4;~nJMin<"`\UA803۟V_W͍+xo梞^mLb=R%*uI5!m>RRn!0 ,bX i,!O?UYӡ;|mx]BG-3MQ{6EQ.xO@o)ylG_`y ~<)v!(Z)%:q5p*uU}XZ/p@jJ1Mx4ULfh;kNJp8MfgylK5އ-q([x>w7xy,r3[B90yr"jԌ Di!R8zra[H͸\;5БX(E wot~i11zJXm{;P#kS*A+.Pyma{!z >B]j!QōuS\]Q̃]|TЪgA1<~<2=ϒ<>o5d }5XRg/Vd 9ҥu@@rH濝{RC% |~NqqKHaא9Uk!\Μ*:ykQc28 yyʅ4OX"TOB @m۸K)ql+&.ZMb D`YW 4@#sփ0Y %I|+Zs*pUÃ9 ɣ-rX{wBݵB۩e֮lÖߩ7Lg|&J8k?~F1*Yza;6<ֆHMaߊ 8Ɗ )R\hꄺ(DNE5E$F{cZ~yls)k8!nG-;J>%%+?y>=׋T\Ppm/6$'Kd"Od0[Բ<1qRSt^-`_J0q ;G_z61vQsIge|[/7"DT3Dhz U3QjɈ`y)4GPI?ϣ}5Oml!EQ ω[/,0| ~޳!}į_)k۵B)%tۮ_܆RbWy5˾#U UI_Sos&KWTb1@:PH kb_V؎D(9|{a傺y?+-^_Xך^S=6[]xϽM>j WuPːp/NJ{h')FͯY\u*my=<%⣿A7B8~OD)XrU\0]gCjF?{A'uYP$ ۇCfap'5? @0Bbس bOj,fvIq5"`&r :=^TJzjbUR"U$d׳ _U`:^)GoJZ0) !N'Xȏc*.먙E`MOQ*RH x`DLBz^A=w1ff6_%qnwCRIg!`e񏾅Q-z~S?~ݺ_k&ɷ_f;vf0)+S^C]uvȧgPk5 !(BVT}rȭ_X`䬍-!4yk VlQ 8F aڣn?~|lZk. s\0.>٥N _v5i j g}bgO*FDU` 1*l a{,bP%v?ho2 (}~oPlK y-ad>Ne^"ܺ3Z*,m\֪m:&0} O-BeWJ5e?_G9مTÛPiu c, 1v) ][niM(bY\+2ëշ]\U7K[|mK9 +Y>%N0vx1 }lů]<$ɉO ptV!(z5oq/PFb̟Wybngϵͮl[z}&p)S*fYckOOR°}&G^6z SGk]3G1dQ P8MtҚ1)1?Y&~bB8TTé+cv xNDh+j4;U~l P@Jc~(u6:,ȷ.qFB!&''I8"$07kq`^r֢P#e⫂0wȿ𢡄x< tc:C~քU~NX#V?"XCGOlv?f!QNxV!4Co͇hEg7%ô|(ߧFVmc,sfݯ+u;Jw/0ss(1CTC ^fw)L7J5EPxjj=zB `y} wr6zJsuJH=ڈh"scQYS,+Fc5SD+&(V 糥`| ?"68oO2Qޙ#T?H"kLakZnn ɖb'϶eF4=N\MRAA vS WJh"H!"L7նs=3?kֻ T*DAO0{ϟ1!C{?!yu cS"8Za\gs%'{ erTpjCo/{}_"عl*uSSb&x)(JB(A#9Ͽ_%>e)Wl,Mg&i g;OՊ<y9^"Sw+j)JE#Ep2F>O1A)QA-?JS2ӣ-Fy뙣zg%bB-HhVZ({cWߒjU\V+ُTTsKJ̩8Dfx+QZbG'}OIx~xY`،`):uVSEuCP*I[// YTܦ8ǮCb=< [~t+PE̪YY9St4`Kz۳^j8*7`:tL& E-Et *5Bv?'%#szg)g3\ek h|0~F"oQoU%VmI_ҽxf֒OzG(ue4נY ~M{<_QW9S y ݟ~wVq5J D kD2(2P>[b74ұyPW~@ IDATS=C%5RF[c,ae1JEjXYT%?G.S'۶s-@fqVйi~=F瀄|5*~|J[c9`r2y˭]`ݫɫc(Rо)6C_`!Mսv$sEuQK@8Ohz@sqUrN28Vd[Fxr bjq! Ywީ^ RIޔƴk(s)uȮ{|9* 6[//+B@T0K#;#"1*E*HlVͭH[BT<Ǐs֭[nw!$h U n#aZC ]]a8|J&8۷У!>_&=caFL{6aJFJőkwGơcU5Gl[SE&B`{zȭyfncwݟW{Ȍ0ՏH$Yܒ[b74oam3 vI%MC8 0D^u ® PB`%7pA c/f*!q)7RN| ܳ@'#BDvf!9)mv#@ 2Az}Rr|l~G,VG]bHL;yl}o)'ra#L_'{T6_KR 8r$|CO\\(Z5_`HmH?U]u tj_5U,yӲW/tytxb>C^/:u.MC<K0~`xK-˼ ^/_^V5ZA뀏%{Bb[ۄPHc >SK7;_Pbs?ss0SCW)>콛"#yDGw~T:!euwMB玑K ݏbJG_$Bj:Z+(/`G˰6Uj!2񡫯!#LvW(t;(vbY0 SuCsq޵n\^s_oW$?"5?OR\ezftz-_bcЅJaddn^ﲭ9/S&`ĭ.y07>{s)2{<G6 av!@_f( i*_Ğ:*mo='曤Wo|oW1TQ47z T%9!ZWy+?Hҕh:=gnɮZDu\#7XLvb1̙32<b]f >(~*ֹKAu:|^;MiJSҔ@%9t) b1줵P(</4}MiJS*q\ɹyJ9(U7hAONNP,ܹcFs0@v`WԋDhM|SrC0gd9MKc;w$‵jʾ} k.6lpѥtd&)j 9ΊHtTZ͖HFƁp" #$*?Γm75MSr0فEKins"xEɖRJFGG9~8>UV144tK`~>rwh D.)abؘ˶m?bxxm5ONgk5O̸kabAaUPr)M~-mʗ¼F0}INq T*:trLgg'|7WuΞG>6E9I)(Q,fzUB„/rrֻ!a>7=H- {hJSK%̛eH cTiΞ=(tttqSouB.6}} Ç(JX3vxa~r35PyڄySr0 yk ds'OR,I$ ޔuFG?!{ƃy/n 0HAl830ѬxՔ KM/a-Hq/|ER߱@./?F*7 ̏`\w,f9v; cÁfI֦\SscLWY|| sd9fǩ^q)l&HJXX)0)4a@OPPRцTFeLᎃÖ %¸ֺ" 4ΑB[JTq-Wb\7j!7\ .ر7sLM 8w.adDQJH|dw2B5\/N3V5̛RE\FVL-AFK)%`cNj"l)%Pb) 7鴋m[Re#]Z6U4)ŭ?4mc{[)P&nt*lOeFgybp]e~~|>%L(7*b1Tj1,eiA5e)8sS'|Xo0HS[R/~Jj(ߡfKP y㻦}f H.s2?7W?`KuYMY ;DR,, O4,UIMY]IM~MR&y귘YtaZ Bu;"f蔣kɇF_!9vh#ݽpW Ӑw hVBfJH:Q>x^SK.Ckn)t ¿L&ܥ5?BH Q9CjmI|S`t0^~MHg+"eqPg"TpJbU)VKY\`1bF(c%_}`T_8RJ0i6 4еt_r# H/+<y4WH!@.b{[p=X:q4ȦUSS2W掳V]P 8|Xty25/vK8X4SVl})ԭafWd(QitKzhi2<Y F)QjsCş,]j͒T%1(94G<7RJza,M/YVFQL2THEІAT0U6B=R!].%z% F B`pe öo{$f2=koz9fא 5(D'8b!P&)\#k>k爟>e5Uz}#rnh7t}E.9Ep-8S_NԱF.#"'(2̫MӾ!l8o){nw[#0&3`V[*Q c%uST"eYH)Qeŀ.$7gQVZ5X.SvC )%z:. s6RIDH5O}H7ޙ0DljUv&VT9pWЩDgZe!޶4v7l]Eq@C(h8k0B|BFXG-<(k# \XH_?ԝ2(M6[k%Aա$&bbꘋ1KP;Tg(PvFu$'NSEqvOa! j?{\ו{~I7 0(Ԓ(R'wkeTMux5So]kV؊T$E9D7sΞ. R$VD\޻k{BA*Q4NRsO~@!;|YFxh/0[zKŦhOG-D+#9!gI7&8W)F:)%e!ehv7~f@uF?uigmuE,[p QxkMTg_|i\{y=d['>Oxi?oɀT 9]~ߒ͙Xvc_YL4~4EmJf6K!դVLeDZtV'<)([عV b=t!-m-,d%¶.ZNRHf|4Jx_HL`*ªQ$E7*7Rllx G(wv-̯:mXDwG~G\eo+A0&M'OШƷiހUR&2>UohgK-Y &7 |?* #7ulO'IU+tU–ڟZ?M(}A!+rhzǬa.RYM$Ѯ L7Gȵ>g7Ӕ-ɶwx\Dɣ?#$*תDΑ^EOlc2h%]"XO)-zhG1/Q UeD^PT5/Gj̅l=E WuJݎviK, LuYhs@PGk΁W3*.6^?(;YbZ8uKK̋GkHΖ( ֶ$'07NWejeYY%<?h>7ćߧ}ȓtߏV[2[)FQi]\Iϒfh3騋] t&7t2S X{*=vHaA-`/UG`k^{=/(L̥Yq S wp>JPpqb{_aAD4]~LtjAkD Oɇ[" J|gvs /JI(|e&P 9%<AFaOniT5B! mM%1s`a{(HӦj%h\W]V 6˾Z\nrX8#{IRޏD,CS@Mg:G(gkL%Ҳ9NEa8oYđ|l6.k$~vųsdn}B֤X;6]t)TjM ٞƼQc+P2ދB#! 2H l}lS-I Z~_fLt?B^bnkY S=pS!qT]C1shZօhRgF/S\`6Ã5=a:*5ʊBj>:k{ݬ%]U+!]y(c^rlw9@_n37-%ihP.hLZ!=ʉg BtÛ䬌݇#qPcxΓ8{=OPzK| 1e_vB pȚ|sXeWvjabOϜ%q`iOP(KF?F6[y!vmЌ/Uý^SirTU!-T-,VvKdLwifqj^k/p*uވ.Aەb{VÆ(+X%,5/*loSч֧K al8*j A*M!ecAzöqcA\3l(z6Fۺ 'O90qWOh撼_>ur$_QuɅ|lOPqPb)rR*Z&"z|>bgDfP B"})Lm~i>UAl V_z-hʗ 7 Lw %Lb6SqC +:@h[$8-GKUPke ϐ6аjx5a_jȅ-=m[H` ,kS0@<҂d[w_t Z`Fi@GRh% {v?wq.~tM=a\ӔWh :g.T6D]šeb<祿;&B@fS$LWͫ=qON p!wEoK?5m|ǣNC1ۻSwc`nzr/O{/tǽ"'|󎋓0l")r~,}qZ6&4=C:$i+ T2 /I H-\3bkQ:"9:],c6IEZ%:6r䒏RsŨx\-j$w{.َރo:حmٞݬmFבMC hLL% ͎Z˄JVd۞Zbct\PojKBPnadSϏlgj+ zĶ;Nz(Q"#"63=^Ltr|3/tɻ75 mO+zawRj|ejnNץ>``\bM-"$ڶJDHPFz|ޑm&mzae?[zBb19IEG꼸}-m;7E]U@Aʱ 6ZN-S}!,{1`7مY-xz6Wڶh$n\fG bz|/b2{nذtEASO{46n@Ԡ?j\5 ƚW#ټ6jnX\lڶJ:F"+[`GxxCsF́z6KCHRLyiB]awW,K%i#*70nYs~\hw83 ĠN/֭YK}wjB U}0wu0O$JN\.M?d>nהj_=ˎ*tmt*YSmw|)aw.^vw:3|oW/2ͥzGqVK38>vJCOSRХ/]"lkdЭ<=29BNVyt;蓓S_3.m?J|svT1Ў>o/65(qKY8Yfݮ%,νdj:U5-09 Хc/]̶f%+=Ie|C|8дfI/aUyfc}}!68@ҁ12FyDu?.\tw t}[Rԩ"릱w̥ٖIS ,&!gؑyzb3(r͕0Yy0?ztx<|FDzХ/ sIOU.i2ҳ( R] OtaHUu8$ _gU{mX,Il~%]̶ZQ0wN` ff3 6\ߓ\*RQ@BeS@)N#rft+,-'8L90A;ȑ33KR‘\_ 垝:)/ӎG+<]Խ.4k"'Q,AX(lMDO˂QQ*#qx;|: ;'ƚe(^*Y&G`s``,.%-V3gi[mw\/>Sqi ?JqP{;#VT M'3ELQ}>ֽKB% us %T>3^!) 0(Ym$lj/cKʡc>,O(_‘Is49N~8AO@2L;irH|YFF998D= ;ēe^A15N=cvBcJ L18NǛ3}M`@p؆WYK"UdSnTH[v۴ڽʉ.cY*^)9csY]s)8rdei֙(~k`<=t oÁUI!\9{}VBkzKQjiB>d\:~u|ܚbʕwurU\w}d$`i/^)um[_xlLط W;+nRBYC):6Ε)E| K}7gy78xx)Q*'YU*_qr@yъ%n02hr=j10MlOɵ^6%Zad hZZB[ELD`!/qkL>ѻ{{aZ9vӚ}. crnK/N/ T)%G2jqȿev>"w]]k%7AQ\2'BafʱuGHA5CСIcG<Cg ^dze!d:L x0EyqBJjYN(ڲ 1|ɋG\>ƗK Lsª!@* Vz"T?/ c":X֫ bWtۏoxjfDv #W ZK]{헄F&PL˗dɧOOhRNBLyloW*L< 4?3;^8ǰڻP.9j$RrZq+WnۘցDH [U^ 3N* rkyyvf-8U*u[lwyŹ:ѽ*k7z VkcDZ4wc,l*6v/~O\:;h⮈ i`xEZ_s%MoQچghg"喝VA5FŌa5bYA#;}wխ`-&W릟cے @(l'#(>E)˜8Eajm5>$xO~(NotZshJc=""u ƚ>]W@=C+cc#IS37uu|S3T0(^$N07(FTo3}s(i< q?" {_)2`Ь8_%mM%0Ҫ5/nE9r xqZZ]{(/f -?k>ŗJŲ.oѳ2ZRb=4z>M^YTS!I݃\CV1)-l6m.@^rP9=D o?34y4ajɡ"Ll(nK@BbQ7mN,`Yd5.;]tmvW}/i mO`"t~?I48bh\[ қBR?8W4u.p `̎8?{DB<[۸ݮw1?$HoQl!kv3ajncĎT"573?O1C1ݓm˄p蘁s۾BzӦHkt`3a^h ֢^3"}qqb4UpU\dVǞ: EQGZ"x9X˜@̀Z:^_8۰J-knJvw#~j!=_JX}$jf(_6C_$] =Žz鉒~\j&fu_ۚ'40* *kIwU%Se9&ʗ},F6%XJykdyxAf+ぇD<0hjՙzBlk6L tfoho<r.SM 3O+,桒hNMXQ*鹗0Oj#?5.u]7loZЇ>v߈~PBu_EPl_4_?Z7]~~A*OY&I&ׯU䛿IlBT <`m -AՑz0XE-ost7|.vێGw}v9Kgʁ?E%D:~`ĊuQ Wk $Z1e#]EV`{OvD E~u!nMTZСw̆:ۼRR\(uS"=PM$`桯Qha[ FD3:Z}i6n`V|:.H+İ.G%ª#a[WVInjg+byT:z39F>@B*MM:~a팆*MfvّTBm15Q{9^ X3c P5a}XzwR}SۼQ8X g|/5TV*V堯%%9~3k#pռdMy<X?2V[%2SU4ߓ.G#|t?v}96ع% +"Q,n- gj|j4X/=~Ɋq+F90lGRi*LB*: *1B K1"1=i ΃rE$-wY"o/=A%[hT2Gwݏ4gszw0sg f~"Y&>V{_KǨb2"t9tn OVY}oæ^5=.(R7%*ȤLM:*Bl[r|Lz:LLwDKI[:)7\66DagH9Bۯa^fy|KUkB8L7 Q|ڙ۱|W׍ E# ĺ"ȯF GK0=5ՄBLՋJ}T%?NQkIVC5\n{3M>嚺(FiͶk,lB6&wg)UΞ5xqjB4?shsI/X<>^(A~ 6й9b(=H(UE0|>aC4tc"W/ľƂ@|.U!3 Z P ζRZž{6Rs튢 0o~hÏ[/In]?M_#D)lE$rszw\V4sl$lwwp-™vlea0\Lտ%~[h {wXk^!;>G5@OQiSDIxDS~g.0_|Gwt!,]X.׊;Œxzؼ EqV$! ( 75;!mY}0K{`9cxfgA1&;(72~e+r kSz  vOU UNTl6)R;fmeԽR^C{V_v2mQliE9&voY{ݨ\.E/Nމ<ʳ[;rhi`^+̻ gat}k>E2ٺ e\Y&e6lYhjg#!Ys-ڸaU?-I6RsC5k<~vBM07MͲ٬Ph=I"Ѻh.cTJS+ÔS[/@Ydzi UfS2 ?Pv.-ڳ\ afײ,ZZX[0rU[2WX"%]p]E&oK6n4 nhN )%1\WzM2?O{eϞ=N\wXVWh+{Ѵ[WGdY.ErWG'Ӝ !P,MABNCM@wȑaX'_qׯܚ$,*GпTzlHv&#G7hFf 8vbߗi3BfxʘL6UU{n*A[iuplFN/Mui-:8>sTyvrɑ'0kc'+1/ٹtCd9:0?>ٿ4mfO156LM 53 04<P.< 186ǎD,M^J-Ϟm^t̡R>U̿O^ ۠lv<[\/[K)O_s+tv.2R>\b̡P躏{,Zb,V57ϸ4JgKw(uj14Z(̎5n#̥r 6`I8$*/KK Y>E5뢚ϬIU29>LI垹eY| ,+-]tv3p9'3S թQAJ?hoϼTر7qFYfӧ\:05=|-<=rVJaJ\7l)(ҜrX{1&7!|f)A\Z73넺cruv9w߈:ι(oEw (62b3;+9UR.#W GA,mkfEjS|gX?]|~/S{ M*5)uI R 9@Jvk̜yO I8>2v=FgŁ#GI>%PT4WOտ:W]g%ڶ89FضkcA [>+c{a]T'q|o1mdncfƆ"lǩ"p}#`2Q◥)-kYd;rcV6M`8yPS'Djvb.h !)'LTO@4E=s{Q[xBEmRAw .օP%{II[ :IUmO_%ս{ق#>@ULXe7k~ z: H a'|[W*39rhIg6lFxaz}# obժ{'03zHj`%rU&(ΎSc:V{\D:yBL,rSX{uau*AB[#(5EOijh(=:KQ( 2&Β=WT-wQb&Yf)-_!}iRAUω H>B)DVc*Mݸus-lF]hOP%2A|jIe(3oba߂ɹTe(U4[lFX[װ籧Qk[p?oC^B&"M]cPĚQ,1XZnBm Bo Mk[ dn(GKϣ1(FukH΁,vm^M2\q=WqH{:,{{\xH"%JTRuwm5eoوnb7bwg{H*e { E$5'ss;svd,2e2 gx",$Q _`ϰ>OmKe#oPR깇TgS^zϴ?`Cj;ZQ5ljv2o41@:h9,f{\J`3tmlQ'1yj~ i ܶ\Y%J=R#=*Y>Yc5]wZdzhʓvZ9[%ڰ9wGBB|^R$Fj,;UoE* |x(T+,Aܥ%̇pWT:;r x+[շ35=JQAVF1H :ܵGG.Hɗ-3L~[.j^t~b*$'Qt.lf!Ҳʞ)ob˘G.0:tF@~%.ΐ= z1`Pl}L,╶3xoQXai6NDzsЋ*~0Ȼz)܈(>uQPdQ-sw@jmorYUr#~; J*KٜeOHSU !|Ԟx 9ėFT5ƾ)0>UYD8oâ۱[ >"4tmӡHЯ˶+Eqd Pc嗝GvO?2vuwzS}C0# HQrUUv|Ir$9:;L2o|]+ٚge:H5v7fcFL)f,Ϡۍ@oUF08?)Y$tq=~OMzν+]$ID*:ʵ?E-_:8~6pZoҁQ:]d,5UR@їZ 1?K&A29.3c=f*Dn"Sc3t?M*Z#v:%[ܻ.bKskDU@ͬ:=0Uح0ҿ@.ߍB> cpQ 3@"\Vy.:J;^Y}ŢQ`4 t/1Jqʑ$ҶW06@cTbx=oYPS8\@M㋢AX"ldfifg5^'ea7a}FdsRG)ѭu<-g]#aHd37_,NK}3~YGɆ&R3q,/(w> E;N&$x{koVHfޑ3.ژCV,^Ϥ1:_۬~]*j5ݕj_觶+AUHE6Q .N `tU/H$O(?@J>[O )(r#/%:ӯ zD1;"~cY0PRVl=z.xyJv9KrE0gUқӬ]޽&ZGVTPe6cvlb^IJǑv${NjZZu_U#UJ<ŢҕB&ARseBw/O/EAR*f,ŗ ͂CރN[ez=\X86??i*R~ #츫:Mwu޻eV&HDB,k"uФ* G噚HgP$.}6?'Y%9I#CqS 7QUVJxOdd=6Q/`u8XZ%{ޜ']re.2#;-pSu)7dbI<$ tw4թL>#a<2)>$7TbQ,U/Q~!&w" PHH'`b21V;#,mZf#&/yM g3Jo I$@g)p" st6A2b-l1#?^XPЙ]8| /{J61ʉ'n˗q՟IȇYM^;v3G.$ Q;P9UEz&seuLSQG[[Ӝi9AgHkY`{>b*^R(gjym`Kb~)+8;ևYa>nrznYuQޙ&H:qg9dnh:Iajnl6x-!kv;U_UJkl̐O'-McR%9,G,7`kYBV!,N( F;/bU~?!vNHNJl:6vn=5tAUt8{ 7Wl& uxU_p퓏Qt}QNP4-7~ϼ,{c?F"ɂ[Eu z 18k҃ Qoh߳1{#+TYAOj]I!Ml}T"`U( I?|5G-dQ>n=9/kv{/J-,# * >#hK C<3MPDgr| }|o0SYװ "N.`=/^xs?3@$fěDsMFFYռlpc6JDxW:%+Z$M]R/P2:QA.$]J1%4G8PځeǶ(,فQɱ2:%K\H 6Lhns<N:уmVFOGdwUZBw']"+Rx/җ )<Դ_~4[FVuWn|ˁ~fj肨CvZ 1;D[Ȋ@L,B"O;u/=$#1z"iۻDW+0$ d|&k祫}Aɓ+L%uQo"]TzV3 |r UXɇ|dXH#tyFj 꿰stWy&.}¼=@mޯ052B[TUWn8AiEAf;iCKXdP5ӕ䝪k쾷fnUݔei-L$Lj*^GxzXZOqG0/b`5|H'ꏾYyـl4ci_qXV(iy;GSoSS "QIBr E='/6,p_I b9|m$ ;]v#U NT.`JD"ʯ&+ _ޏsN/=kU\,~!n0wTz7 : n3}݄$mg я7PF*`s31X_'PY$ (25=gm9>s%bIRFx@N6!+S$]8 IDATzK#Ʉ Wlx|=eob1PU\6/>.2: (t|aaWV}cgRķWfAImLs)i~݋NDcv1 {*gI9TFmz߶SwePrfި?CWuCwn*z^A\++)gXJ8 m߇ 8ЮX$HEszr{ fo3_fu~DT]G kS4wHIID[R ,[wsa^(<5L<*zcwwVS18+h9h@4Yiz/XJb{[ig0;J ?rQDhz+}xC]GX2~!Zo Ya6,"}Si8jmxVRc/H2X\z̘uZ=ǰSyɈl@G_Q҃^'Y%81^?RycC!%.$c | ~?yLfC?o$]xAw"@a'"ekeW%>ߎ׬w<$&46_5d, ^?P1q!-bHN]'{ %I:P,x뢈^'ax;nvF@o#K225q*KP yr;7(҈J_Ǯ (i@C[پ>[cܿ΢3!#6^6uZs4j:%&ʺ&p i 5v~π4;$HJ_ (mRަ( xY_[K0}&nEM{#;}#{ \.xͦSYJ3;ԲS:P 9~8&g 2,,[ʞ]*+SSJ܈ Ȫ,v"&k$>E"  {s~+0p.O_׆_s ν #I6gHE#XˏRtJ_B!}{s+-B f4 ΄;<˲,;s&&PȅŲCNgN#M9lrjŠ?؏KQzH5;=zI5fCI Kh %yZ`45Cp\UUTUE$Fy=.2Usm8Ez{{tl$i ho!K֗C4XrU<2 Ջ "ԴbPBu&/_ z#UݯasVV2GyTq$)ZDA3, zh(zL&&ć Wq8\L5B?JB한ix'9r9nܸ˗IR8 ԜkʻbyID{<¡&r hsLR4ߐM%P`dYOe*E~2uw^ @t@NGQVMx>%Ҝ;NGso2$͢o"W케~$H(OJ ̅mNu5p`_OR\vk׮pNs^E ̓Igr39qDicс͸XGj)[o$PLMESIqȃUNj+hETl6dnUUmyy9GL&^KsB1 1VAIiണaAf`0xtbX~RYYO4 LKHR;~z333CYYX dz qfDO})L|߾VQ5<<<׮]chd|B`ZvS(Ķ' 3u5i =11^ t:|Ȅ$ 65kJU~ ?EGnܸ+Wp?;-}DØL&1hm.]DKs MMMY{'J,#NkVd佰:n)))ٗ Lݯ42e~._S鞚]N"d2{o@<ҥK,,.7&>M311ǟ~\z֑dsk+4ؤ!.]D2uB>"M\tA&&ӳ7OJ67ob2q\5OOOH&9qz^ ӏoh4clu:, :Q B,--!I)qn޼,ugrʠIW-nܸ ,SS[X@?kƢ SWWGYY& C2("" ",1yPb.`0o!)q׮]X,3r=jzzk׮1;;l6gA#51>>JG AhkkpQdTK4errUU|= JUU&''z*\OM&'tUU?&jkd$:;gbb@ @C}myFUUBjjjhll|lIp:b6f=$tAA&AQ|>fY${e9y6U!"/vw?5h4ʵkr |mmm78KZIC6erj`0Hmm-2Z-:נ 2x<S᠄B!._CQWW@>gjj χݜEOEaxx-Ξ=Kuu5L&dPÜ{EP>}UUIӌ099$@GGc*;v9$Iz9@$ehkkA{d20+*4I 8l'^W666ꫯd2X,9rn@zffM xGt~^/OLLdhmmdAÏ  HOOS#fKkk+---.UUfeuǎ=sXxBeb(ϟjf"? jrrf|>k#$333NA׿d2aT,&Ȅa}}p:@4h3A,QG ݸyX,K6pOV+'yp:  4l *h~r]UU!JJJOr_\\dmmv_A?/KKK34weB*A>IfMQ L***AÏ 1>1AEy8(SZZx=ׇ(h;"LPԠaemmh$X?+d;EMO ߺDWWׁ 311ACCv@^/嚾!dYfjrJyy&'777c CQFGGm [[[lnnԤeHeZ'г,}}->iZZZpݚ#Aß'&&ZPk;4;;$TUUiyVAOw6}0f-:נ"ƕ{d2,8y:~:0TUUim<ڮs  BE&>p|ں:M Og2q\Ԗ#r##~ D" }O+*(z5t ֎h4J,ta}mvm׹ 3yѣ{.뵵5(Z),,,0:6F{{;ھD:fllmk 3Ǽj:weIZ@說H&Ѯ]wJnh:D<^+ 1C!666hllԮtBWҙ {zl"444PZZ E?|> >OF*Z&[۩i>eEޮeF4hx֖8KK464h}UУ(}}}+*+03==Mss3C bI|>Z^ۨٹ9v-zPEQ:ari5hx@lmoJCCVcxt:5TBWUɩ)8|0^W{,yiooRY4<nu[;N5'xm2x:x }{{~j먯cfL3L4< ( kͻ{B4~L\zNOOF҆4<Earj ^mlnnI}]vÇq:b38###TWW4kHRRWW9cj(cbbEQв#4<` 25=MPQ1ba45DBOܼyơC42y Fik{{gLc#Gs4hxdYn} :&''tڌJ芢022v(-eelllgysehX&-هoqqQ+mTBu`ll6*}(-,3?7G[[v] B,Uh)РSx^::::CW F6wh"3;;KMu5n[OeAϟ}wSUUM?CȷMΞ=lAگi]k;5-ikk?M.=#[Ȧ&' A(S,'`0H QQQA0AbcceN9s׼B BA1&`0xeN*d Ed=F*++osO]UU8WTz^ j߇"7Zۨ*SSSi%UU Br9M4v~W#2333\{v.--G75PK(byi鶝|{2fM)) qD'.2r9kJ>G$КP}H$ W&I>3"8:m!gTC &#mCAo4[ZZ[od Lʖ|UP5<Ә$UUL&ޅQamx<x6I3 UUu:rH$MK3AEmQ##uw<}}}@ww6T1X,244bAS `PUU d2RUU8H*,..ݍ҈1ť%VWW9t萶\$)*++q'ڪMMMtP]UU677IK|Xk"y뼶 -:נ!B8E#=ƭ^1 ??kMUUVVVJTUX,"2*fY{` r9Z[[5AC֘WC vRbEQX, {B,..rQFm޸5Ad2ioBfQ, IF$INSNx4iﱂLMM҂P( 2`45נG /,Ex<N:ua?"* z<B!677Fp1f'J,cssbɴ3_6vp8G###D2 W^q $cGR]] J36G"#JvNss3>gVUkI#-u%U)0?8@觥%(( SCMTD /1^mN6^*T lEzLck43lnmS(X].Z[[)//32WU: K$TQ sPUt*jA c~l]GeH@dBRzM z=f^ &LjnxI%Rvb\>df}cłd@s]Ȧ hdY1!a1o,BTVf3$eSUl*h{<ܧI&2X6Dn&b $2vL2lǠ6_bXX,X{l&U5||DlT5NDjko%l2F4$_PK2+cWfE]s;b1_8rRjJD}]`ߏnc4Eq셪X~DVO2g.ss~*[o`4 |Ʀ_,wRs̭dVR.qNU%8'\U-Ι ӧNQڏ+}10EBpPYYsq__c/^z*T-_oNz UCɄ<ǩ ?3+4sS6*1>ޢyl.BIrÌ߸8ٚCvG|gi=Al }_e 磬;MזH%&4ry҂Rr-&%Ե41,X*zI$brdd?D&PUb~bFƶ6npU9\Rى,J=6L&?AʬMy?njZ2q2QA!_TH01FAg_@UsyJl 6{~P2;1E$`PҌaFƙ#bwd { 喎lMwuT[++eP 9R4  i#WUl2A._DXvbK߲yp[ E`j|/bO{$<*,~g]8c LU#LTE)dI&Ө-6LFݎL2A # X6lo&jYaI'e`j3G*\T*jhYcrbo9*+}*N rQN:Sa67M;ٝTQonJgI(nǠGF*5,x`90u-H |T9b6-Lpӯ48imFL*Λߑ1݆$J."Ɂ(a;BxJbt:3bsXU$ 2΀aCv҆Y"zl;AmK38k!DflR zSwQLCpTE&LP\eD"W j,a}qSu%V֩U߿G`Wbg~t xE+\z .bʚ2\敿ﰢ5;̬_@R,\D(6պbQQ I})3enXHkc7-5|5>,uWeQg9nxH)H¢dܸ_ aGY* qFOYgPUJ쵋\ Dy ]Y{IYÔ]HEf7eIo+T|EYT̮2=fzY]"l03t9,Օx &~>M oKGLxK^d#BQ̫/S0Eꪪ8te8ֲF^y5~ +;x8v5*yֶ(I"#} BY%vU'VwΛ@8Swd|ѧn%@? O]Ͼ%- rO^Ӌcx}>L d蛤 ڞ3<Ѿ$qyqh5#.# 4얛-\̈́O]`cDnL0!z'7fmgEn ReQGiΜ@Dꘀ즪:@;aj~tO_cz!_ Agl9PLh_;?\׌,|R_PfS7>W5BcW%fytcD2fY.(k=87/2us,./fڏƣK0;F᷸p 5,.0t_x:ia8]m\Kx޿t-3#S]d֦X{fsjWRb"(E~bWy~կx7o {:yQk3,ljjב9U(bBQ^9x4ZLN@)LnHίqIIvmEWyW 8JjV>r~őfäA^J!WYyWob\p>ESC. ]~*.8r(mm9y̕g&h3G5-.!7yn;j!b29tݗF5#E^}]-׾ddbzӿk c1lz'(e;/8dmzX1C 7U.0̄\k9]ae)V‰֧/nkbqqi-ct $YjNX/r2mXB:3_ec+j!DpzGb{[A*J7) ɍY.y9T| P߾?F5, HNQA9WE8` V`@!W(Q-O> /k`5[c [ۀA#[m]ˣV_ ^FG]@ yv/;z 1>kʏOaPl6694eKc7MBޚ!;9O2:4.lf^}6~~@o#} ܼF4/|Vi4Eپ˲Vȣ")m(IŨVSPfDA\%HONQbnBGl zq[˪q&ygv?`Nh5j;c!μs}xՓq :s;<\hElc=ʭ"o)F$PJٷax:ͣ H*l D݂E4 2:9A,^Bgz{rMV^}ED) ۸M7pYd%BipoᶩRRsX{lJfsy9F{۫{d "Yqg>*JW&UA,J#{\]؋gHD}h5&xx#%~&a&@_bf@M[)U uW GӔkH 1>^O(fɑ^ x[*Y29 0;&䙙'\=Rt jhBgaS7jT7Vq:"-A p.N/etfl+qxKP43Kq&]޾j iٍes4|Yznl b+YrTcŅy&LCC7z .+6x9Y,+od17;C&a ,cNu &qԃI6;<.n=`Al^ y&[hA.s_""3/`s< K s`sC:҆+gRR;K:_,R zn%rE+/()n^^#Ԅo&[X'ٱeJNp8w.{YN&6ky0 EDu ,TՔ#޲}<[ӁZ)o;&:D}XUb ᶸq`q)0u4@OM$EIfQP$E]x[ 8Q4P?.7Li [X}8A8\ Xy3 ݈ƃA=_'~oI.b8u[X"w<W$jB4v)(4yāg?0[m8ʏKu\N?ơʉFvR "!M'_ZSq8TWNjs564QV;ߩOw7ƁtѪ$Xlv,j1W+('!H26Yq6W. xʚx(E3E\B$}5Di#P-8&K$IWWW%R^j|m ZlKJ ۑВIH85E Kt4D%X* (͆Ţn]PV3r:]74?@P`vSuKhIfCjnיzdYa_C6=N4 j-j".`ٶ ۮ%u qX-۹w\%Պ,5){l)ۦ`M ><-OB ɥ%w?&H|)+++^9-T;E~=;xe_xY~>AۙSX?(b5F)+*=Kvje7O~|d0dhfǶ-?{I]?6==|?Ay)-Ze 8?G={_Y ?ʏ* l6AIRxy֯!4Mt@?Ǒ\1&:g|Mf'1,>)LOLዕ}f$-+ⷋMBM&Z7xյa~K I2 M3"nk躱:Zȷ_ >¿X_IMov.WIrww)|Ȭr?u{A>ܾE?51]9)w /;j^k 4Mpmf>k75INa8؍e.}rʃg*s703t~z {AZKePdf9s=% LANWHy8qj?F!Გ.k:KJٜǍ; Yw&5; qZQ5 iz_!mЁ2_u]m4 ''GX,w^mJ +膀+%o,19*$B69@YI/+K HY)VV7yCbg}gyvZQqw8XSj:9Hg1p"^`enl!/㴂27jr$@ FأaJ&w75ޯrtRJpeL +ǥݼh]%M 榊iVhIme{Ab%OX8L L5%z`Y\ZD90H-α7REW(?HsK=W]$="0s{av3I3'J)2`#"vU$Q$q֥#b;AD% ]0LDYFz"BϦcôY@qByǴb۸QzY444]@&Aox4bD[7IE0SC9} hhjȊk I!454[a2,b:Emo.P@Vԭ$&ZACV"4E>$y;vMqÏQrHc~7"HE/ ؚ,o=9Q&aKd+D 9Hve{4$^VFƦ|pj#ŚeDڣK1M1%lħ/lxl "Rlc&Yl&Iאb|o&ّLA˯^++]iOlUGk2?1(/ N0/o&9m 4F]wFMp9}&I..Ǧp{sӛH4瘹 IDAT Xn\7_0;4=,̌2 0?WȂN&/p7h|aM[\L+zxOau80t['ZYe~Ac~vRkQ@6?2a^Ki>FkeR$Wk M^Ƥ]cmq)R|X~V7Q ){-1>*Չ v:8Q;U,2d9jNcjKzn X@Lgh4BGCܿ~tC)S#\vNnc&S]_ K+8 ~GEիLLJ(k9-ȂLm}f+@3T%;Й%6͵$Txí^76UGhi6KW_Bu9s?ǽ4 ܾ@:*9h  @Y-GNyJ C g0S'ɍwс2Qm1hec2Eg~pU]t-r~`#r q߶@,q((ϜDX=R9ʶ#4,6xu]lzTG ZKHͧ_6exyiOCgOp4_1 i]=\ƣ4[|ULQ%^¡-H>e|zǏE)+qs=NDEL?W vEB,/_ejvHu#GOc v;L:[Q(7;:/q4@JX7;ﲶAR픷!L,XkP_cƇɩ!~pYFGTU3 :Eܦ@ǒ72/7?7hj[jj+QJiz*4+kxl w/o`xVy?ѵjO~sB1ܑֆ:"Sv?ɩe?Z`ej~nE[C`Yrյ6"ًKP][x:nOPzHts-un$#i9rcyO/tPg*s;eI&)o"Ya=Qvʨ]J>=ȅ.'3qj\xcVӅݮHr;+ƁSǰ䦹~*\|Crj#uO/r?AñӴ61u?W BC'hkfez #KT74Q<5K \=%=Y䏴Fnm{~(br{XC=yE`zb%/+"wΓ*X 'h Ex.Ajé d*"xoz}dJJQg8ߥLnx 㖫B6R`e /VTxM9UXMmj@Mί'W.Fqp9,HXH2*J?$~dwQRV7`MO勷~NuK߱nkY_tȲB)"#nI SxY%vCu)UV఩.?XD-!K8Ur VnY*y_y0T =Q[.A@uz8mX~BA(h9vh@e fh8##V}M$)Eqx5QQ[`m3cb(xvYuR޸ƖF4"7,0i Yaiu DBZV&Fa6 'OSYWKۉXL224MIah8rʨqr A#8B}1l.dwu&G ns4`5VV1 O0SD(o_`rjlj*jYb~5)9curcAV"!6秙^i>uNVXfv~i ~B0*8}A:Bmcu 6ג䳛'CŔ*SClhJ!Jޏ\g "hX>:C#L bVzTƶj?{c7W?a Ta|p"`zNibUU<n$$+4C8{ J}5T4`azt~5 ӆ!Zpl bb`OM3mZ`"p9-S{١AL_w,?kTWDW2k2h )l^?F[݆4G"?By]#Zؘgu#'q *}, Y 0x8iWGWï81\Rko[7G 0,2Pti4MT4WND-pwvXwOq8@ԧ{=>:9ik](edIoT:Ams8(ibbQQՊ$niVBR-8fݚA&e#d]e0WSL$[ݯTCϰŐdvbG ӛdswl lVi|ՍE0k!jEt#E%յ]2UVe0 Sg#Z]DEi aEV`31?A0 H/Q|@;ULm1F2ź4Mdqkq]n}.RuGۂ>,Nc.v^epu~Bbe;U!ϰZ;]L2kd6sXvDb/̡HRV0Qƃ |kDM >ggi!]JQZNв:@V][.5Y0uoy#g~.[K۹Whi(yz\ҨoG͹uDm&AgfjB9E˻,-R0@pJL<&bgsjMP8WBnfy@g8-GL=C~aŵenv9Դ$gZr R(dHӤǘ\Шom ^QFz~٥~Z;}d-!jZPR`k+IܢV$P^$r p@t Ahɡ!-e(fVU54K2k8 aDL20@M[3J+ܟ]$nX. )vƗ QX[]Eh<դ/A _EiЎ$* {BϬsH'W-n^'cm-CG%JNd*U A˲Sş DIc[yS("l$Aux@gvh%›o5R],$':Y1p;.R3~+Zv `fnC6%Ә> U8mg.fQ{vU$Z!kKE ?+!:k+Ng2FA7+KJIDĠoX^lG Y ko=FIm [s3T_zF>&S %X_[`(Z C^[mUҚ#ؙ{ <ē3ɸr+:V`\`/?KEON0zҮ4'#H5Y "\T8T kjlClqލ{İLs7७HHǵw&5p܎Ds7؝$WCkS q笲J8x(7owljF%lNcw<]5Ni>u6`*ۈ >IG]~L]SFkK w/sF78vwXb^`zhk5ځUE$CGe\{蹦":D- %̵Uνt׊ k[imHhgL[XYWpl"dӧH~rL[ɑ#{qbBg |,6>*0x.$k: 8_DaAR\`%f_fAjK8Z6;Da U5OX46H֖˧:_]b@#QQr K8ԇ7na!n3.z[&Y4bg.x#3T:CB18}7o2K9ҹIaJ߿ho04`"?_a$ : a(8*8 <:+ms{ݽoLPUGh,G VPQxD(ېIs*[K JES3vs%^auG8vU~MܹsNR(ƦR{hM tsOqiƦf(v/!훨aGY(uMtvq]a7R-K{JXE2Z8l}wHL-y +Xn, : cCܸp9dZ*T'߹O/ؘ]yǛhg=.|``/12Ɖ7~V]Q&^߀2-I_!΀qz:;Xc=oІE*N7[ۖT"Vۈ#:?D~f?C[_VgzV&r jx5XB2s!?ۿey%dq#n{o FD!~ikqr71\Q|NW ӹ95ͬ-v EY* 9է? VWU;h"h?I&b aUe'_$ZfQRB8L ~@lDc!J+1ޯLm~N#Cøm{µͬod%w0a%AAsAm^ Kdr6$@Q%ڱ_TJ 7LA$\$Viߢ,&\G0B,v?1TYMuH.,3)k>āM(h肕r6B6"^A0u4C TVeid!XU O˺PRQ?& M"~.&5Yg*iOrqG#g'"6,/':s` T`e~R-M &x9'`,Vȡ:CG%_M`uaFHU-(hr"0b!Cre ?Fvd=%@q{<!T,`Uތ":}c2k,/#4l'ۉHPȐCR˫((Ϝ4yfņF? *  %47)OhGX܏$ɄʱIP__/ũ.-a.\NI%qVPt5 ӍM+kX|1:I,Ǫl6DBFDbTH.i:~C=wQ}T#Y_^deewvode~Mvt$ua+ r" v|+HuZ[ }hIRjbrxD66628av Hba+$8zVm$PP[Qɱ<>/vNjr;Q-vn;(xP{OR,icxĦţ}W:1S`M)OxmG|, 3ny3;BO~ԊE'_<^5v6[(}=k> zsˬ5GtDq+{<ءccxj @1(K,aל嫅o}sg HMa%Jծ _h2T;n OD|;S?OxA.|'뮿ŧÒϢ{\|ϵlEbLDRxrrgy9QylB^]w(>9'_uOL|Wa9W [}{,%@KUlʟ!X<[k{{/ &<~mwV!3gb3[gA-w 4I/MrwK6)G&}SklT@»c ͮ_OϮqS>;]>ss'lczk~b2|or7M! )`umkC]ֳ.BM7o)M q}Mx] MMwɩE dU4֣eё$*y·GO&?78ٜ@;ug{{{7o3:r9َ xQF2r)ƧD u;>.L }IDATqw`/ v3:BXb@j}rVw8Gϝtc```9sF[xRq/)NITה> ^duM|0#GV}g_9bVy0@Z"3Tvǚ\55Fz#t_5-;w?߃=.3=ekCp Dأ/n X YRo!(v|Ve- Z>Cj-%,!3NE/{}.S1WqQݻ;:p48=؃Df}]H^B!G/"AE9]== yB$6 f.K.a:$ MtAƪHd]ɢ(ͦ](4u24GVm4 c%Ï,m9Ae1V#:_%x[Fjɴ=؃5O~%q)D8?ʭ{֐ft:#] UхYe-,r~c":⠼{/a/32m?Kx8-XNe^#bbd ͡q>%M=bM6b=؃1l wS̑\!>  N=ξNO &8{!r8N\>]TTWVm7$ vϼL+<켌L#O35y!Vje,QSqN&2t2 DZN|{OvKKc)#{ $# w)F%{MoSs/ i膉()(JQM,jEr,oZ0 ?1f% ?kAjB4b5u`70gC;MsqLҦ/%#4f$MRnRٯdX$ݵ&mק6{ gf&__?(p{kڔŐcrb8{=5ί,إݯ<{xqvK)v>c: : i㔛 6i|~WUwTky`ZlLzm\.s>K: : :: : : G rIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/_static/images/live-migration.dia0000664000175000017500000000242600000000000023133 0ustar00zuulzuul00000000000000seqdiag { Conductor; Source; Destination; edge_length = 300; span_height = 15; activation = none; default_note_color = white; Conductor -> Destination [label = "call", note = "check_can_live_migrate_destination"]; Source <- Destination [label = "call", leftnote = "check_can_live_migrate_source"]; Source --> Destination; Conductor <-- Destination; Conductor ->> Source [label = "cast", note = "live_migrate"]; Source -> Destination [label = "call", note = "pre_live_migration (set up dest)"]; Source <-- Destination; === driver.live_migration (success) === Source -> Source [leftnote = "post_live_migration (clean up source)"]; Source -> Destination [label = "call", note = "post_live_migration_at_destination (finish dest)"]; Source <-- Destination; === driver.live_migration (failure) === Source -> Source [leftnote = "_rollback_live_migration"]; Source -> Destination [label = "call", note = "remove_volume_connections"]; Source <-- Destination; Source ->> Destination [label = "cast", note = "rollback_live_migration_at_destination"]; }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/_static/images/live-migration.svg0000664000175000017500000003626400000000000023204 0ustar00zuulzuul00000000000000 blockdiag seqdiag { Conductor; Source; Destination; edge_length = 300; span_height = 15; activation = none; default_note_color = white; Conductor -> Destination [label = "call", note = "check_can_live_migrate_destination"]; Source <- Destination [label = "call", leftnote = "check_can_live_migrate_source"]; Source --> Destination; Conductor <-- Destination; Conductor ->> Source [label = "cast", note = "live_migrate"]; Source -> Destination [label = "call", note = "pre_live_migration (set up dest)"]; Source <-- Destination; === driver.live_migration (success) === Source -> Source [leftnote = "post_live_migration (clean up source)"]; Source -> Destination [label = "call", note = "post_live_migration_at_destination (finish dest)"]; Source <-- Destination; === driver.live_migration (failure) === Source -> Source [leftnote = "_rollback_live_migration"]; Source -> Destination [label = "call", note = "remove_volume_connections"]; Source <-- Destination; Source ->> Destination [label = "cast", note = "rollback_live_migration_at_destination"]; } Conductor Source Destination check_can_live_migrate_destination check_can_live_migrate_source live_migrate pre_live_migration (set up dest) post_live_migration (clean up source) post_live_migration_at_destination (finish dest) _rollback_live_migration remove_volume_connections rollback_live_migration_at_destination call call cast call call call cast rollback_live_migration_at_destination cast driver.live_migration (success) driver.live_migration (failure) blockdiag call pre_live_migration (failure) _rollback_live_migration Exception pre_live_migration (set up dest) except Exception: disconnect volumes delete new attachments ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/_static/images/nova-spec-process.graphml0000664000175000017500000012701500000000000024453 0ustar00zuulzuul00000000000000 API Cell Folder 1 nova-cells nova-cells rabbit cell slots cell slots nova-api child cell Folder 2 rabbit nova-scheduler mysql - hoststate hoststate nova-cells nova-compute <?xml version="1.0" encoding="utf-8"?> <svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" width="40px" height="48px" viewBox="0 0 40 48" enable-background="new 0 0 40 48" xml:space="preserve"> <defs> </defs> <linearGradient id="SVGID_1_" gradientUnits="userSpaceOnUse" x1="370.2002" y1="655.0938" x2="409.4502" y2="655.0938" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> <stop offset="0" style="stop-color:#4D4D4D"/> <stop offset="0.0558" style="stop-color:#5F5F5F"/> <stop offset="0.2103" style="stop-color:#8D8D8D"/> <stop offset="0.3479" style="stop-color:#AEAEAE"/> <stop offset="0.4623" style="stop-color:#C2C2C2"/> <stop offset="0.5394" style="stop-color:#C9C9C9"/> <stop offset="0.6247" style="stop-color:#C5C5C5"/> <stop offset="0.7072" style="stop-color:#BABABA"/> <stop offset="0.7885" style="stop-color:#A6A6A6"/> <stop offset="0.869" style="stop-color:#8B8B8B"/> <stop offset="0.9484" style="stop-color:#686868"/> <stop offset="1" style="stop-color:#4D4D4D"/> </linearGradient> <path fill="url(#SVGID_1_)" d="M19.625,37.613C8.787,37.613,0,35.738,0,33.425v10c0,2.313,8.787,4.188,19.625,4.188 c10.839,0,19.625-1.875,19.625-4.188v-10C39.25,35.738,30.464,37.613,19.625,37.613z"/> <linearGradient id="SVGID_2_" gradientUnits="userSpaceOnUse" x1="370.2002" y1="649.0938" x2="409.4502" y2="649.0938" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> <stop offset="0" style="stop-color:#B3B3B3"/> <stop offset="0.0171" style="stop-color:#B6B6B6"/> <stop offset="0.235" style="stop-color:#D7D7D7"/> <stop offset="0.4168" style="stop-color:#EBEBEB"/> <stop offset="0.5394" style="stop-color:#F2F2F2"/> <stop offset="0.6579" style="stop-color:#EEEEEE"/> <stop offset="0.7724" style="stop-color:#E3E3E3"/> <stop offset="0.8853" style="stop-color:#CFCFCF"/> <stop offset="0.9965" style="stop-color:#B4B4B4"/> <stop offset="1" style="stop-color:#B3B3B3"/> </linearGradient> <path fill="url(#SVGID_2_)" d="M19.625,37.613c10.839,0,19.625-1.875,19.625-4.188l-1.229-2c0,2.168-8.235,3.927-18.396,3.927 c-9.481,0-17.396-1.959-18.396-3.927l-1.229,2C0,35.738,8.787,37.613,19.625,37.613z"/> <linearGradient id="SVGID_3_" gradientUnits="userSpaceOnUse" x1="371.4297" y1="646" x2="408.2217" y2="646" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> <stop offset="0" style="stop-color:#C9C9C9"/> <stop offset="1" style="stop-color:#808080"/> </linearGradient> <ellipse fill="url(#SVGID_3_)" cx="19.625" cy="31.425" rx="18.396" ry="3.926"/> <linearGradient id="SVGID_4_" gradientUnits="userSpaceOnUse" x1="370.2002" y1="641.0938" x2="409.4502" y2="641.0938" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> <stop offset="0" style="stop-color:#4D4D4D"/> <stop offset="0.0558" style="stop-color:#5F5F5F"/> <stop offset="0.2103" style="stop-color:#8D8D8D"/> <stop offset="0.3479" style="stop-color:#AEAEAE"/> <stop offset="0.4623" style="stop-color:#C2C2C2"/> <stop offset="0.5394" style="stop-color:#C9C9C9"/> <stop offset="0.6247" style="stop-color:#C5C5C5"/> <stop offset="0.7072" style="stop-color:#BABABA"/> <stop offset="0.7885" style="stop-color:#A6A6A6"/> <stop offset="0.869" style="stop-color:#8B8B8B"/> <stop offset="0.9484" style="stop-color:#686868"/> <stop offset="1" style="stop-color:#4D4D4D"/> </linearGradient> <path fill="url(#SVGID_4_)" d="M19.625,23.613C8.787,23.613,0,21.738,0,19.425v10c0,2.313,8.787,4.188,19.625,4.188 c10.839,0,19.625-1.875,19.625-4.188v-10C39.25,21.738,30.464,23.613,19.625,23.613z"/> <linearGradient id="SVGID_5_" gradientUnits="userSpaceOnUse" x1="370.2002" y1="635.0938" x2="409.4502" y2="635.0938" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> <stop offset="0" style="stop-color:#B3B3B3"/> <stop offset="0.0171" style="stop-color:#B6B6B6"/> <stop offset="0.235" style="stop-color:#D7D7D7"/> <stop offset="0.4168" style="stop-color:#EBEBEB"/> <stop offset="0.5394" style="stop-color:#F2F2F2"/> <stop offset="0.6579" style="stop-color:#EEEEEE"/> <stop offset="0.7724" style="stop-color:#E3E3E3"/> <stop offset="0.8853" style="stop-color:#CFCFCF"/> <stop offset="0.9965" style="stop-color:#B4B4B4"/> <stop offset="1" style="stop-color:#B3B3B3"/> </linearGradient> <path fill="url(#SVGID_5_)" d="M19.625,23.613c10.839,0,19.625-1.875,19.625-4.188l-1.229-2c0,2.168-8.235,3.926-18.396,3.926 c-9.481,0-17.396-1.959-18.396-3.926l-1.229,2C0,21.738,8.787,23.613,19.625,23.613z"/> <linearGradient id="SVGID_6_" gradientUnits="userSpaceOnUse" x1="371.4297" y1="632" x2="408.2217" y2="632" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> <stop offset="0" style="stop-color:#C9C9C9"/> <stop offset="1" style="stop-color:#808080"/> </linearGradient> <ellipse fill="url(#SVGID_6_)" cx="19.625" cy="17.426" rx="18.396" ry="3.926"/> <linearGradient id="SVGID_7_" gradientUnits="userSpaceOnUse" x1="370.2002" y1="627.5938" x2="409.4502" y2="627.5938" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> <stop offset="0" style="stop-color:#4D4D4D"/> <stop offset="0.0558" style="stop-color:#5F5F5F"/> <stop offset="0.2103" style="stop-color:#8D8D8D"/> <stop offset="0.3479" style="stop-color:#AEAEAE"/> <stop offset="0.4623" style="stop-color:#C2C2C2"/> <stop offset="0.5394" style="stop-color:#C9C9C9"/> <stop offset="0.6247" style="stop-color:#C5C5C5"/> <stop offset="0.7072" style="stop-color:#BABABA"/> <stop offset="0.7885" style="stop-color:#A6A6A6"/> <stop offset="0.869" style="stop-color:#8B8B8B"/> <stop offset="0.9484" style="stop-color:#686868"/> <stop offset="1" style="stop-color:#4D4D4D"/> </linearGradient> <path fill="url(#SVGID_7_)" d="M19.625,10.113C8.787,10.113,0,8.238,0,5.925v10c0,2.313,8.787,4.188,19.625,4.188 c10.839,0,19.625-1.875,19.625-4.188v-10C39.25,8.238,30.464,10.113,19.625,10.113z"/> <linearGradient id="SVGID_8_" gradientUnits="userSpaceOnUse" x1="370.2002" y1="621.5938" x2="409.4502" y2="621.5938" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> <stop offset="0" style="stop-color:#B3B3B3"/> <stop offset="0.0171" style="stop-color:#B6B6B6"/> <stop offset="0.235" style="stop-color:#D7D7D7"/> <stop offset="0.4168" style="stop-color:#EBEBEB"/> <stop offset="0.5394" style="stop-color:#F2F2F2"/> <stop offset="0.6579" style="stop-color:#EEEEEE"/> <stop offset="0.7724" style="stop-color:#E3E3E3"/> <stop offset="0.8853" style="stop-color:#CFCFCF"/> <stop offset="0.9965" style="stop-color:#B4B4B4"/> <stop offset="1" style="stop-color:#B3B3B3"/> </linearGradient> <path fill="url(#SVGID_8_)" d="M19.625,10.113c10.839,0,19.625-1.875,19.625-4.188l-1.229-2c0,2.168-8.235,3.926-18.396,3.926 c-9.481,0-17.396-1.959-18.396-3.926L0,5.925C0,8.238,8.787,10.113,19.625,10.113z"/> <linearGradient id="SVGID_9_" gradientUnits="userSpaceOnUse" x1="371.4297" y1="618.5" x2="408.2217" y2="618.5" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> <stop offset="0" style="stop-color:#C9C9C9"/> <stop offset="1" style="stop-color:#808080"/> </linearGradient> <ellipse fill="url(#SVGID_9_)" cx="19.625" cy="3.926" rx="18.396" ry="3.926"/> <path opacity="0.24" fill="#FFFFFF" enable-background="new " d="M31.291,46.792c0,0-4.313,0.578-7.249,0.694 C20.917,47.613,15,47.613,15,47.613l-2.443-10.279l-0.119-2.283l-1.231-1.842L9.789,23.024l-0.082-0.119L9.3,20.715l-1.45-1.44 L5.329,8.793c0,0,5.296,0.882,7.234,1.07s8.375,0.25,8.375,0.25l3,9.875l-0.25,1.313l1.063,2.168l2.312,9.644l-0.375,1.875 l1.627,2.193L31.291,46.792z"/> </svg> <?xml version="1.0" encoding="utf-8"?> <svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" width="41px" height="48px" viewBox="-0.875 -0.887 41 48" enable-background="new -0.875 -0.887 41 48" xml:space="preserve"> <defs> </defs> <linearGradient id="SVGID_1_" gradientUnits="userSpaceOnUse" x1="642.8008" y1="-979.1445" x2="682.0508" y2="-979.1445" gradientTransform="matrix(1 0 0 -1 -642.8008 -939.4756)"> <stop offset="0" style="stop-color:#3C89C9"/> <stop offset="0.1482" style="stop-color:#60A6DD"/> <stop offset="0.3113" style="stop-color:#81C1F0"/> <stop offset="0.4476" style="stop-color:#95D1FB"/> <stop offset="0.5394" style="stop-color:#9CD7FF"/> <stop offset="0.636" style="stop-color:#98D4FD"/> <stop offset="0.7293" style="stop-color:#8DCAF6"/> <stop offset="0.8214" style="stop-color:#79BBEB"/> <stop offset="0.912" style="stop-color:#5EA5DC"/> <stop offset="1" style="stop-color:#3C89C9"/> </linearGradient> <path fill="url(#SVGID_1_)" d="M19.625,36.763C8.787,36.763,0,34.888,0,32.575v10c0,2.313,8.787,4.188,19.625,4.188 c10.839,0,19.625-1.875,19.625-4.188v-10C39.25,34.888,30.464,36.763,19.625,36.763z"/> <linearGradient id="SVGID_2_" gradientUnits="userSpaceOnUse" x1="642.8008" y1="-973.1445" x2="682.0508" y2="-973.1445" gradientTransform="matrix(1 0 0 -1 -642.8008 -939.4756)"> <stop offset="0" style="stop-color:#9CD7FF"/> <stop offset="0.0039" style="stop-color:#9DD7FF"/> <stop offset="0.2273" style="stop-color:#BDE5FF"/> <stop offset="0.4138" style="stop-color:#D1EEFF"/> <stop offset="0.5394" style="stop-color:#D9F1FF"/> <stop offset="0.6155" style="stop-color:#D5EFFE"/> <stop offset="0.6891" style="stop-color:#C9E7FA"/> <stop offset="0.7617" style="stop-color:#B6DAF3"/> <stop offset="0.8337" style="stop-color:#9AC8EA"/> <stop offset="0.9052" style="stop-color:#77B0DD"/> <stop offset="0.9754" style="stop-color:#4D94CF"/> <stop offset="1" style="stop-color:#3C89C9"/> </linearGradient> <path fill="url(#SVGID_2_)" d="M19.625,36.763c10.839,0,19.625-1.875,19.625-4.188l-1.229-2c0,2.168-8.235,3.927-18.396,3.927 c-9.481,0-17.396-1.959-18.396-3.927l-1.229,2C0,34.888,8.787,36.763,19.625,36.763z"/> <path fill="#3C89C9" d="M19.625,26.468c10.16,0,19.625,2.775,19.625,2.775c-0.375,2.721-5.367,5.438-19.554,5.438 c-12.125,0-18.467-2.484-19.541-4.918C-0.127,29.125,9.465,26.468,19.625,26.468z"/> <linearGradient id="SVGID_3_" gradientUnits="userSpaceOnUse" x1="642.8008" y1="-965.6948" x2="682.0508" y2="-965.6948" gradientTransform="matrix(1 0 0 -1 -642.8008 -939.4756)"> <stop offset="0" style="stop-color:#3C89C9"/> <stop offset="0.1482" style="stop-color:#60A6DD"/> <stop offset="0.3113" style="stop-color:#81C1F0"/> <stop offset="0.4476" style="stop-color:#95D1FB"/> <stop offset="0.5394" style="stop-color:#9CD7FF"/> <stop offset="0.636" style="stop-color:#98D4FD"/> <stop offset="0.7293" style="stop-color:#8DCAF6"/> <stop offset="0.8214" style="stop-color:#79BBEB"/> <stop offset="0.912" style="stop-color:#5EA5DC"/> <stop offset="1" style="stop-color:#3C89C9"/> </linearGradient> <path fill="url(#SVGID_3_)" d="M19.625,23.313C8.787,23.313,0,21.438,0,19.125v10c0,2.313,8.787,4.188,19.625,4.188 c10.839,0,19.625-1.875,19.625-4.188v-10C39.25,21.438,30.464,23.313,19.625,23.313z"/> <linearGradient id="SVGID_4_" gradientUnits="userSpaceOnUse" x1="642.8008" y1="-959.6948" x2="682.0508" y2="-959.6948" gradientTransform="matrix(1 0 0 -1 -642.8008 -939.4756)"> <stop offset="0" style="stop-color:#9CD7FF"/> <stop offset="0.0039" style="stop-color:#9DD7FF"/> <stop offset="0.2273" style="stop-color:#BDE5FF"/> <stop offset="0.4138" style="stop-color:#D1EEFF"/> <stop offset="0.5394" style="stop-color:#D9F1FF"/> <stop offset="0.6155" style="stop-color:#D5EFFE"/> <stop offset="0.6891" style="stop-color:#C9E7FA"/> <stop offset="0.7617" style="stop-color:#B6DAF3"/> <stop offset="0.8337" style="stop-color:#9AC8EA"/> <stop offset="0.9052" style="stop-color:#77B0DD"/> <stop offset="0.9754" style="stop-color:#4D94CF"/> <stop offset="1" style="stop-color:#3C89C9"/> </linearGradient> <path fill="url(#SVGID_4_)" d="M19.625,23.313c10.839,0,19.625-1.875,19.625-4.188l-1.229-2c0,2.168-8.235,3.926-18.396,3.926 c-9.481,0-17.396-1.959-18.396-3.926l-1.229,2C0,21.438,8.787,23.313,19.625,23.313z"/> <path fill="#3C89C9" d="M19.476,13.019c10.161,0,19.625,2.775,19.625,2.775c-0.375,2.721-5.367,5.438-19.555,5.438 c-12.125,0-18.467-2.485-19.541-4.918C-0.277,15.674,9.316,13.019,19.476,13.019z"/> <linearGradient id="SVGID_5_" gradientUnits="userSpaceOnUse" x1="642.8008" y1="-952.4946" x2="682.0508" y2="-952.4946" gradientTransform="matrix(1 0 0 -1 -642.8008 -939.4756)"> <stop offset="0" style="stop-color:#3C89C9"/> <stop offset="0.1482" style="stop-color:#60A6DD"/> <stop offset="0.3113" style="stop-color:#81C1F0"/> <stop offset="0.4476" style="stop-color:#95D1FB"/> <stop offset="0.5394" style="stop-color:#9CD7FF"/> <stop offset="0.636" style="stop-color:#98D4FD"/> <stop offset="0.7293" style="stop-color:#8DCAF6"/> <stop offset="0.8214" style="stop-color:#79BBEB"/> <stop offset="0.912" style="stop-color:#5EA5DC"/> <stop offset="1" style="stop-color:#3C89C9"/> </linearGradient> <path fill="url(#SVGID_5_)" d="M19.625,10.113C8.787,10.113,0,8.238,0,5.925v10c0,2.313,8.787,4.188,19.625,4.188 c10.839,0,19.625-1.875,19.625-4.188v-10C39.25,8.238,30.464,10.113,19.625,10.113z"/> <linearGradient id="SVGID_6_" gradientUnits="userSpaceOnUse" x1="642.8008" y1="-946.4946" x2="682.0508" y2="-946.4946" gradientTransform="matrix(1 0 0 -1 -642.8008 -939.4756)"> <stop offset="0" style="stop-color:#9CD7FF"/> <stop offset="0.0039" style="stop-color:#9DD7FF"/> <stop offset="0.2273" style="stop-color:#BDE5FF"/> <stop offset="0.4138" style="stop-color:#D1EEFF"/> <stop offset="0.5394" style="stop-color:#D9F1FF"/> <stop offset="0.6155" style="stop-color:#D5EFFE"/> <stop offset="0.6891" style="stop-color:#C9E7FA"/> <stop offset="0.7617" style="stop-color:#B6DAF3"/> <stop offset="0.8337" style="stop-color:#9AC8EA"/> <stop offset="0.9052" style="stop-color:#77B0DD"/> <stop offset="0.9754" style="stop-color:#4D94CF"/> <stop offset="1" style="stop-color:#3C89C9"/> </linearGradient> <path fill="url(#SVGID_6_)" d="M19.625,10.113c10.839,0,19.625-1.875,19.625-4.188l-1.229-2c0,2.168-8.235,3.926-18.396,3.926 c-9.481,0-17.396-1.959-18.396-3.926L0,5.925C0,8.238,8.787,10.113,19.625,10.113z"/> <linearGradient id="SVGID_7_" gradientUnits="userSpaceOnUse" x1="644.0293" y1="-943.4014" x2="680.8223" y2="-943.4014" gradientTransform="matrix(1 0 0 -1 -642.8008 -939.4756)"> <stop offset="0" style="stop-color:#9CD7FF"/> <stop offset="1" style="stop-color:#3C89C9"/> </linearGradient> <ellipse fill="url(#SVGID_7_)" cx="19.625" cy="3.926" rx="18.396" ry="3.926"/> <path opacity="0.24" fill="#FFFFFF" enable-background="new " d="M31.04,45.982c0,0-4.354,0.664-7.29,0.781 c-3.125,0.125-8.952,0-8.952,0l-2.384-10.292l0.044-2.108l-1.251-1.154L9.789,23.024l-0.082-0.119L9.5,20.529l-1.65-1.254 L5.329,8.793c0,0,4.213,0.903,7.234,1.07s8.375,0.25,8.375,0.25l3,9.875l-0.25,1.313l1.063,2.168l2.312,9.645l-0.521,1.416 l1.46,1.834L31.04,45.982z"/> </svg> ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/_static/images/nova-spec-process.svg0000664000175000017500000013200100000000000023607 0ustar00zuulzuul00000000000000 launchpad create a bug create a blueprint create a blueprint End states out of scope code merged bug fix? idea REST API change? submit spec for review a feature? spec merged blueprint approved for release spec required? add link on nova meeting agenda blueprint hit by feature freeze re-submit for next release blueprint unapproved apply procedural -2 upload code for review remove procedural -2 review blueprint in nova meeting no yes ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/_static/images/nova-weighting-hosts.png0000664000175000017500000022303000000000000024314 0ustar00zuulzuul00000000000000PNG  IHDRwSsRGBbKGD pHYs  tIME   tEXtCommentCreated with GIMPW IDATxw\םX9PD QD,rXso0O3ffwpQ9}($JdYOU O̯^%HdeB2뽑N2UҪ)/a}+tS 3Wa|5SLN3JXF$@j3ZӉḑ||5M8rYbǺ =W:C|:6rRRg/^cD<'Ȳ#ի,sgnԛ2WqWM!e>182nje9(+LyEhT ߒ$ϋQHqEluyE~z/wvVfgqY˺q(;O p|hX%TV>ri6K\=L$Djk;!ydnH\f| `*iv\Vd`| =p P.15_5T,2=9Y2[X ?iR(qHB@ ݰ4LRD_k%*!c'T/pذ{'GTӾ"! !aw:qy#rL²#f|bW`w) [ d373KX|r9g)&0 S¥`bbPhx$V a:jki#0KDd_\>i^,L 6_hpejS<NWS+9IGVȲLպ]Ӷ]'{ח˜kp@3ǚ/ c1|UPɅ$6Uyu4Klؐ@UWWFenNef&nUAnv#|̆ mؤwvjk9u mUYgn}93͉C'ԷS_6F(eDU$Lv,L q*S Dkh,݇æaqoy*jWrBMuJcE@]eIMxa-xoB9{~sIWWiR']@S6v ]?|vhtk sG(f7t)>ݧ bUr?g}[![Bő뢔Gob6f#W]峩3y6GXXXBxLrd"A:Y{ʊ-Rv B $DT$ *{y$csДM(6:-d)&p:QdXkndy<'rE o:>0 29rӧEH:ot\F&[ вi3tChi*=PSz&rn݊X KJ1=éO?gRDRT{ؾcroR$ Bw( $Esқp@`dxuFXyK @$.^Nбe#z~sOQ> l!D/]]kp`s D* 6~YS2Jnd 9v<%te >3پMZ=?jXS#37C"DH pǁ$QaxP) íSP%ISLM{H~~+!9GOuk)fsQzִRBǣgD=FyS!JN?5LR6; (]i~:R{dE IBOQb bzдAS/t#Ǚc[oîB0;pw~iﹱ ;IR vC+wAN=tGi#0|Ffm}n(5.>B＀^sΟ"sptGȦS PDiZd 2sTD"N[@R =TI,9:#xjS8zFbCe2, Y2vz&2s\< |Au:*Z%a(0ä.eyE+Rꢘҡ|?vUba_6o 0u^&p8RfZ#l]$XJs'͟U_B>1ų}4As 9ui?%!0y~ΩǙYLs3 [D&_>5nmMgOxblW1 zO vR"I>fmR.(/wrn A:1,2p%>8L"ZSc$* ꢁŀ"Q&&W07~zs?FϋkK,ӓD5-E%U!.M5\x#u56Ѵ"Nu. }Nը h0 :Cl U5( .#)GSIcsg/rĎgh:O&ʉ1~rO};uU>TMC@Ul5-D+/0,)] w;œ?\DOcvx|1M|tt~;>@?璉Y`rK2.4B:m^ir$Wh̓Oj LTtF47d͙lz-68o97eIĖfC2STh 3RW$z38\ϋa068\dmgiۖӒwgx`q(^$myo\^!ݓGHkhC꒤]ӈy7Q "m)M3;4nx5?٬2kΎ 9Ĺ[8KxE`2pz额}ol|I&ۚqh[30q f'uYN}) _};ֳ=pϽ.HU( MCP, KibIMe/ȩ vYL b~D!cw<3`tHS]YpJM=>}0\;ѓ-ff4t?΂ś,?ryc<#5THC 5+ F}qGx7bkȩ9]ᢳ,p<=7_#$0D5R?Nv4|xpL, <%IJub-0֚ˊ,-.=av-,>i"-;KeuqԱUZvH I]T Ӡ\.i[ Ɍ NZa05y3O} LΕX']*}2g\>~,Q*̏?ͮ7gfhqVԲo.?YV"mN*>BhR ?U$joL3iik xػՉzlb'R]l9siJV=ف$OcLO܆eCTjb\8IYwʱ35вai,_U檝/JGN_9H]HHb ~?FD Yͮ9.q96r>C]z9پ%UC Ȳ+w.Iߥ)hK_.[B8* .xy7E5(ʒnjP0I:(ӓ=TS, 囩y6/U!4FCkK ?VlzH.J1_@ɋuGUeYι|p?>D/<1^<$ ݋sfSY>bMƆK[]2 ktV$*sRߓylaPddn($#Yq!Me !'ZC3`Rpxd*mK_$Fe8_>ծaWFX?9BI#dIc;oDG(h ہ=K*MWxP;>3\juZ2n2_tNv|[߃XùdK+x(B0VCǖU !cc+%)nrc҇ N~)#Su"m~a>OBȁG[yis3^s6}1k}aRGh1)v3 غ?H]g9.M)t W/rT9~9M!Y;ߛD#x ʔT| ,d1ۖ D>_ _("L\.G^l7O[~6l&V(6/.B%1ȸB5ktոVVe\16B3-u4"Ɏ=>j2ϒ,MdI"5M4ͧ"%$r[wmi͎r%tM,\O$&U]Փ'8 kF&8|8)f |kd;< Ӵ6xa6w`'YCI_b䗧F29fyͳ۞k֠*X=jkȀ&q1>bRP$4lz9_`ϼ$9p84|55_Mo VNbsHfwk7p2._ЇP_{oqd, FNsSN2=?Ğv)KYX?bdd.1DOj1Ջh0ϼ2 UK<剐l{)I`J*/̖4&#&]Wku~O4zك.!)21T́X0 Iz0GzK-/ƦܸcCN4WPIbfO(x(6'HnۧQ=KP$IgoSK<.݆{^o{)>PR_Srv$Iƚȅ|s a bo3K{x?eޠ.6?{C:3@$g( E/;qmCmYv=(+oah_Vyo~L -eiha█!9=t>L/oAv7!wW G_#A!2Ay:7Cl\ivkĪd wC( 4L4KMkFx<$YCx:be<3KQ >vocs1_F~kv;? IOa^3|uk x8[C0Vw_:6C5ި1nK^'STClZ :T$B6\r|:<*k<]bBMÝuYs42:̳546I,@}P ش'V@,ӹv-2Ϙ\htj3mTH IDAT)Ef㮝t]ev*AE:lY*Vo߼` `u~j.) '_Jҫâ?X߁"?^IhX˳7F{$\w]@m6Y2_?+KOl^!2x4.=zWg{)7o& Z22_B&WYRfcSg'M-,,I07m"zg#sIZL@WYB6L&: y9y$Ѩbxry@,*(a.-jW=Lɟ (6$ݱ,wMS0<:FX>kD]MgAQhNk7dfff1a19yhtuʼkIJ=c/#y@ݪ96+o]0 LX'#+ʊGlH1d7E4K&U5 ˏ^ 9؈$=@&-<̻ȣU4ڜ̿33;82e˻x!VlNV̇nJnBPLc*4lه)'?a 01JtY3&s16a[۷8fLPFuڰZQN|B"ܕkmZ腏܏VYG] `#D(I)=0|E bYYy 9sH,cV2+<3Y/LI7ʗ9GxWӜ8ߍacSRUӨ3ar¾beY"9v5N$IB"?K@Z/~ݿ%<z9R;p5*٬`Pӳl k.CLP.`*njCڎ{d!^" X7FLLqg C w[ ȥg&sDbK>'vmo>o26DW3y-$ʲ|b N^$Lʹ,݋򥴝$)H&5~ bWL2M!JӣD[i޵KbޏeD.* j-G[VfrHE]srrrh+_3<Fn!1tXj^24_݊L3OS6!X#q(g?#xm}ӆ}B *o";p\~l2HTUVa)sBGWЅJUXM3+ᱤ{}')[& RRD)Gb ŒIU+:\9櫧mwn0z`y@|b̥d6 ` PM{H.[® cӸQJ)/fk?Ė"Ρ*& }3lv(N\ 3F 몄|QUt J@H*BZ[? ˱rkn%jjVKaxX&gqM$IѰ;Fn%i1-h:k5kס ~X5R,86R,$@V%S>)fP+_X+QU9 JJQOH-x2Wto&;|lNU0Bj<D"# r3̌\%J|Z Uף WRfGy!M??LkDcBipV%=>BcwY iRvSv`won%*sWQaɧBBsxt^~$-~&IHHw#!K#f/ Wls#hj}%Bc$/1UfΜ`jl/܅GrLӴmEH*~4!eQNo(6h;m_+ \:N2^s>J !Dt9p|>. gt|nf=^'ZU&uW?$2 l4?t)k{ _m)z][$0@ו_+[H{*$0v.Wbfxmf@!ZN=@&\RR#hC =F`Kn oz:۩EASfca<ϙ2v$ņ'$f_ _.1}A$Eż< f1"ֽ'?Aߡ_#;Bؗ2NJL͏$k*.&;:ַc8~d46R.EY4SDZ0 I ulW_K dbCblWV8Tkۅ.'vߘ4Md-@ 2B{=+FPzTN #t%OFvҶ%f3[i~{uY{Puw3#f,~Cҙ A5P۹ۍr1{"ؔ"l BLMq+ ݳx ʗX};n 6i=Km4hnűTxV? 11l"*{]xNRgp%Vӊz&z/9-Ԉ!^:Q* 6ʅI)lؗ\"P%N2oHRiٛQa %p*G l6r+k6mg1u;B諽I`d'HfRD;~j0IG,iTG$Pb>%B * GN}JocɥC&/3u?࡟34:xAFSjpc}GhM15n88Y|9Ix{×&2JK۾H=sJ~ GZ$1#޽Und'Rv|r/O=5S`&+ɩO8GM~i>$EcÙij<=qAEV;w8qiҍל u2צ]잎$hvt3BӘ,x&"K!5u\,=;cd8WIQQBU[3v[ EQ@w~)s:Nϱkvf]7yb5Eu=ڗ31q `'_yg=Cyc=9f3`3)QEJZn~ؿakzwז%]Y-ʦ(sAAf0{BzD!J%4'<< [3ߦ}h5>S%?~"W]VHj?<7q$ϡ @3.hv$ Z8|k9k1/?=w=i%O25sj Q$;;'܇?Ck',}G!wu#|߿U:+R n݋G~ muRW m_zI m'|4␟u:n@%zi~_9*P4U4~U23+ANsF)k5I%u:o>'񛢞vK?Wx_$M߀ ފM7PtnҸ4_Z>'agcřI\ӓ#lA08MRCLs~w0R1+Fo7C/S4NOT0 ]T!pleeBPDpTJ TL̫C& &{It<}Q\A|;;f|v@ #Lώ D=l| J5s<)U1kY`Fr7G*5O9:bzW*o"Qzm@d躬s%ݒPSp!E{G"{ Tb cS5,WInMS?Q%[p1|>=EU"GW$5.A>m&Ex[@QVT͕4n| h+hcmE7jj@j6BW]D{\pv}0B<{ֵy'ݖ~+q*tw !07n\{/1_@LrF3*VahL='ރ7Ǜ~ѐ&>}Ftpre E'ԾP8U֤tvnZaZ]B{C:;=SR+3juǕA0ar^w&>|{ض\OzCXsvoZWw߲gk"qƮw\ I݅~ɗ"9رC^CѮC=C$>FlՃ7ECմEk|aa[@ao`u7t֔W~ٶkJQ(<ϠE^k_4ʔ(ִM|E`~zkPm s<6aޠ^]כ%ekiK5a\0x24wm(ȃغ 4)+p|w(X ] 7Ჰ@T{ [7iZ(Mi psK4KƬau;іE0LNVO b5k. XwvtvpS#餷 41X\3 7Җ2˼xh --Z[w,>:xkɦkI*m6Sw&ZG,Rc>::4K{}c5a.oz_[8,Ո[6wA͹Chh] UiaT״c+O5oUaM?m#Kv8vvB- d-)c}Je˪6k07o(;=5ɋ'DcĻ0IW/{ r]Mi\y?2jpfZ2UXCk~Bcl}5&/37'ش[ 5~xzp!t ΞX[s7[*z9p%R3!6`~$?;Wc$ 5KLh4.~WϨ|b ZZfik t)aq́yl[uoRC̏e8hQi̞"7qӎ^qH"y2(k)ulcvãTV!4~Bf[O*#&̯Τ9%K+\,adeS./J|;+yt27( s)g9(;(ӴLA9̌")qV\@m׃Z\D`"8R>@\KOS7*̏M3U+vYg|Ƃy˼X\$,P.@w̏97"O.OAʻn?'O.hp@%b~Cl^#̬ێpǨ@J; G SԂIXɊ\]@h)~6ڄ \K` UgRb R[%{eciꭍs%0{9z6f,NR~BڲM|O/q{)yBcGFbZJ$q i#_ZX֟iJf{/-Xsnc4a~0;l&12fl\'w,0[)h Acܽs^T g79?D x(to=Rb#|[(s&Bxə`'UddgYX&ЗEjGN #,V!8v694rw;R~QN8*gfSyi'9Xg*6p$GU*TH uvR.q)Q*skPKD*:q)w? <~7H;څ7 F2c^M,Ny7Z`Pt 1 lIYV' T*;莄$^ 08'd 1l3/ffǧ9o[!I bfHLmڋ&H}*Z"VeOs 1̞@'~La1R^yrn4ˏAR*Y_{s0QP ss9l,~|faBWδЁbX3.%KT;;McVesmIX-LoO}TIN!86B8BNQC1{cZ~J,܊++J\Kא4ٹ< -@)5Dv~ O 0}Cr<5&\=T/Y-kb_epBww9YrhIjʝ#Av7)mƗCll_R?v*v-8A~@'(: z)&;n;]Rl.ӯKA !O(>U nݏw)uzPʑ9Hpe @S>Wd_8Om`ٴ)Mi)_E-fdgQ S @] }t-\qW{wSWwTgIz'bc(-yJ2w{M]u?2װsU\b8ΊtӴ;wpȳsE4;~nJMin<"`\UA803۟V_W͍+xo梞^mLb=R%*uI5!m>RRn!0 ,bX i,!O?UYӡ;|mx]BG-3MQ{6EQ.xO@o)ylG_`y ~<)v!(Z)%:q5p*uU}XZ/p@jJ1Mx4ULfh;kNJp8MfgylK5އ-q([x>w7xy,r3[B90yr"jԌ Di!R8zra[H͸\;5БX(E wot~i11zJXm{;P#kS*A+.Pyma{!z >B]j!QōuS\]Q̃]|TЪgA1<~<2=ϒ<>o5d }5XRg/Vd 9ҥu@@rH濝{RC% |~NqqKHaא9Uk!\Μ*:ykQc28 yyʅ4OX"TOB @m۸K)ql+&.ZMb D`YW 4@#sփ0Y %I|+Zs*pUÃ9 ɣ-rX{wBݵB۩e֮lÖߩ7Lg|&J8k?~F1*Yza;6<ֆHMaߊ 8Ɗ )R\hꄺ(DNE5E$F{cZ~yls)k8!nG-;J>%%+?y>=׋T\Ppm/6$'Kd"Od0[Բ<1qRSt^-`_J0q ;G_z61vQsIge|[/7"DT3Dhz U3QjɈ`y)4GPI?ϣ}5Oml!EQ ω[/,0| ~޳!}į_)k۵B)%tۮ_܆RbWy5˾#U UI_Sos&KWTb1@:PH kb_V؎D(9|{a傺y?+-^_Xך^S=6[]xϽM>j WuPːp/NJ{h')FͯY\u*my=<%⣿A7B8~OD)XrU\0]gCjF?{A'uYP$ ۇCfap'5? @0Bbس bOj,fvIq5"`&r :=^TJzjbUR"U$d׳ _U`:^)GoJZ0) !N'Xȏc*.먙E`MOQ*RH x`DLBz^A=w1ff6_%qnwCRIg!`e񏾅Q-z~S?~ݺ_k&ɷ_f;vf0)+S^C]uvȧgPk5 !(BVT}rȭ_X`䬍-!4yk VlQ 8F aڣn?~|lZk. s\0.>٥N _v5i j g}bgO*FDU` 1*l a{,bP%v?ho2 (}~oPlK y-ad>Ne^"ܺ3Z*,m\֪m:&0} O-BeWJ5e?_G9مTÛPiu c, 1v) ][niM(bY\+2ëշ]\U7K[|mK9 +Y>%N0vx1 }lů]<$ɉO ptV!(z5oq/PFb̟Wybngϵͮl[z}&p)S*fYckOOR°}&G^6z SGk]3G1dQ P8MtҚ1)1?Y&~bB8TTé+cv xNDh+j4;U~l P@Jc~(u6:,ȷ.qFB!&''I8"$07kq`^r֢P#e⫂0wȿ𢡄x< tc:C~քU~NX#V?"XCGOlv?f!QNxV!4Co͇hEg7%ô|(ߧFVmc,sfݯ+u;Jw/0ss(1CTC ^fw)L7J5EPxjj=zB `y} wr6zJsuJH=ڈh"scQYS,+Fc5SD+&(V 糥`| ?"68oO2Qޙ#T?H"kLakZnn ɖb'϶eF4=N\MRAA vS WJh"H!"L7նs=3?kֻ T*DAO0{ϟ1!C{?!yu cS"8Za\gs%'{ erTpjCo/{}_"عl*uSSb&x)(JB(A#9Ͽ_%>e)Wl,Mg&i g;OՊ<y9^"Sw+j)JE#Ep2F>O1A)QA-?JS2ӣ-Fy뙣zg%bB-HhVZ({cWߒjU\V+ُTTsKJ̩8Dfx+QZbG'}OIx~xY`،`):uVSEuCP*I[// YTܦ8ǮCb=< [~t+PE̪YY9St4`Kz۳^j8*7`:tL& E-Et *5Bv?'%#szg)g3\ek h|0~F"oQoU%VmI_ҽxf֒OzG(ue4נY ~M{<_QW9S y ݟ~wVq5J D kD2(2P>[b74ұyPW~@ IDATS=C%5RF[c,ae1JEjXYT%?G.S'۶s-@fqVйi~=F瀄|5*~|J[c9`r2y˭]`ݫɫc(Rо)6C_`!Mսv$sEuQK@8Ohz@sqUrN28Vd[Fxr bjq! Ywީ^ RIޔƴk(s)uȮ{|9* 6[//+B@T0K#;#"1*E*HlVͭH[BT<Ǐs֭[nw!$h U n#aZC ]]a8|J&8۷У!>_&=caFL{6aJFJőkwGơcU5Gl[SE&B`{zȭyfncwݟW{Ȍ0ՏH$Yܒ[b74oam3 vI%MC8 0D^u ® PB`%7pA c/f*!q)7RN| ܳ@'#BDvf!9)mv#@ 2Az}Rr|l~G,VG]bHL;yl}o)'ra#L_'{T6_KR 8r$|CO\\(Z5_`HmH?U]u tj_5U,yӲW/tytxb>C^/:u.MC<K0~`xK-˼ ^/_^V5ZA뀏%{Bb[ۄPHc >SK7;_Pbs?ss0SCW)>콛"#yDGw~T:!euwMB玑K ݏbJG_$Bj:Z+(/`G˰6Uj!2񡫯!#LvW(t;(vbY0 SuCsq޵n\^s_oW$?"5?OR\ezftz-_bcЅJaddn^ﲭ9/S&`ĭ.y07>{s)2{<G6 av!@_f( i*_Ğ:*mo='曤Wo|oW1TQ47z T%9!ZWy+?Hҕh:=gnɮZDu\#7XLvb1̙32<b]f >(~*ֹKAu:|^;MiJSҔ@%9t) b1줵P(</4}MiJS*q\ɹyJ9(U7hAONNP,ܹcFs0@v`WԋDhM|SrC0gd9MKc;w$‵jʾ} k.6lpѥtd&)j 9ΊHtTZ͖HFƁp" #$*?Γm75MSr0فEKins"xEɖRJFGG9~8>UV144tK`~>rwh D.)abؘ˶m?bxxm5ONgk5O̸kabAaUPr)M~-mʗ¼F0}INq T*:trLgg'|7WuΞG>6E9I)(Q,fzUB„/rrֻ!a>7=H- {hJSK%̛eH cTiΞ=(tttqSouB.6}} Ç(JX3vxa~r35PyڄySr0 yk ds'OR,I$ ޔuFG?!{ƃy/n 0HAl830ѬxՔ KM/a-Hq/|ER߱@./?F*7 ̏`\w,f9v; cÁfI֦\SscLWY|| sd9fǩ^q)l&HJXX)0)4a@OPPRцTFeLᎃÖ %¸ֺ" 4ΑB[JTq-Wb\7j!7\ .ر7sLM 8w.adDQJH|dw2B5\/N3V5̛RE\FVL-AFK)%`cNj"l)%Pb) 7鴋m[Re#]Z6U4)ŭ?4mc{[)P&nt*lOeFgybp]e~~|>%L(7*b1Tj1,eiA5e)8sS'|Xo0HS[R/~Jj(ߡfKP y㻦}f H.s2?7W?`KuYMY ;DR,, O4,UIMY]IM~MR&y귘YtaZ Bu;"f蔣kɇF_!9vh#ݽpW Ӑw hVBfJH:Q>x^SK.Ckn)t ¿L&ܥ5?BH Q9CjmI|S`t0^~MHg+"eqPg"TpJbU)VKY\`1bF(c%_}`T_8RJ0i6 4еt_r# H/+<y4WH!@.b{[p=X:q4ȦUSS2W掳V]P 8|Xty25/vK8X4SVl})ԭafWd(QitKzhi2<Y F)QjsCş,]j͒T%1(94G<7RJza,M/YVFQL2THEІAT0U6B=R!].%z% F B`pe öo{$f2=koz9fא 5(D'8b!P&)\#k>k爟>e5Uz}#rnh7t}E.9Ep-8S_NԱF.#"'(2̫MӾ!l8o){nw[#0&3`V[*Q c%uST"eYH)Qeŀ.$7gQVZ5X.SvC )%z:. s6RIDH5O}H7ޙ0DljUv&VT9pWЩDgZe!޶4v7l]Eq@C(h8k0B|BFXG-<(k# \XH_?ԝ2(M6[k%Aա$&bbꘋ1KP;Tg(PvFu$'NSEqvOa! j?{\ו{~I7 0(Ԓ(R'wkeTMux5So]kV؊T$E9D7sΞ. R$VD\޻k{BA*Q4NRsO~@!;|YFxh/0[zKŦhOG-D+#9!gI7&8W)F:)%e!ehv7~f@uF?uigmuE,[p QxkMTg_|i\{y=d['>Oxi?oɀT 9]~ߒ͙Xvc_YL4~4EmJf6K!դVLeDZtV'<)([عV b=t!-m-,d%¶.ZNRHf|4Jx_HL`*ªQ$E7*7Rllx G(wv-̯:mXDwG~G\eo+A0&M'OШƷiހUR&2>UohgK-Y &7 |?* #7ulO'IU+tU–ڟZ?M(}A!+rhzǬa.RYM$Ѯ L7Gȵ>g7Ӕ-ɶwx\Dɣ?#$*תDΑ^EOlc2h%]"XO)-zhG1/Q UeD^PT5/Gj̅l=E WuJݎviK, LuYhs@PGk΁W3*.6^?(;YbZ8uKK̋GkHΖ( ֶ$'07NWejeYY%<?h>7ćߧ}ȓtߏV[2[)FQi]\Iϒfh3騋] t&7t2S X{*=vHaA-`/UG`k^{=/(L̥Yq S wp>JPpqb{_aAD4]~LtjAkD Oɇ[" J|gvs /JI(|e&P 9%<AFaOniT5B! mM%1s`a{(HӦj%h\W]V 6˾Z\nrX8#{IRޏD,CS@Mg:G(gkL%Ҳ9NEa8oYđ|l6.k$~vųsdn}B֤X;6]t)TjM ٞƼQc+P2ދB#! 2H l}lS-I Z~_fLt?B^bnkY S=pS!qT]C1shZօhRgF/S\`6Ã5=a:*5ʊBj>:k{ݬ%]U+!]y(c^rlw9@_n37-%ihP.hLZ!=ʉg BtÛ䬌݇#qPcxΓ8{=OPzK| 1e_vB pȚ|sXeWvjabOϜ%q`iOP(KF?F6[y!vmЌ/Uý^SirTU!-T-,VvKdLwifqj^k/p*uވ.Aەb{VÆ(+X%,5/*loSч֧K al8*j A*M!ecAzöqcA\3l(z6Fۺ 'O90qWOh撼_>ur$_QuɅ|lOPqPb)rR*Z&"z|>bgDfP B"})Lm~i>UAl V_z-hʗ 7 Lw %Lb6SqC +:@h[$8-GKUPke ϐ6аjx5a_jȅ-=m[H` ,kS0@<҂d[w_t Z`Fi@GRh% {v?wq.~tM=a\ӔWh :g.T6D]šeb<祿;&B@fS$LWͫ=qON p!wEoK?5m|ǣNC1ۻSwc`nzr/O{/tǽ"'|󎋓0l")r~,}qZ6&4=C:$i+ T2 /I H-\3bkQ:"9:],c6IEZ%:6r䒏RsŨx\-j$w{.َރo:حmٞݬmFבMC hLL% ͎Z˄JVd۞Zbct\PojKBPnadSϏlgj+ zĶ;Nz(Q"#"63=^Ltr|3/tɻ75 mO+zawRj|ejnNץ>``\bM-"$ڶJDHPFz|ޑm&mzae?[zBb19IEG꼸}-m;7E]U@Aʱ 6ZN-S}!,{1`7مY-xz6Wڶh$n\fG bz|/b2{nذtEASO{46n@Ԡ?j\5 ƚW#ټ6jnX\lڶJ:F"+[`GxxCsF́z6KCHRLyiB]awW,K%i#*70nYs~\hw83 ĠN/֭YK}wjB U}0wu0O$JN\.M?d>nהj_=ˎ*tmt*YSmw|)aw.^vw:3|oW/2ͥzGqVK38>vJCOSRХ/]"lkdЭ<=29BNVyt;蓓S_3.m?J|svT1Ў>o/65(qKY8Yfݮ%,νdj:U5-09 Хc/]̶f%+=Ie|C|8дfI/aUyfc}}!68@ҁ12FyDu?.\tw t}[Rԩ"릱w̥ٖIS ,&!gؑyzb3(r͕0Yy0?ztx<|FDzХ/ sIOU.i2ҳ( R] OtaHUu8$ _gU{mX,Il~%]̶ZQ0wN` ff3 6\ߓ\*RQ@BeS@)N#rft+,-'8L90A;ȑ33KR‘\_ 垝:)/ӎG+<]Խ.4k"'Q,AX(lMDO˂QQ*#qx;|: ;'ƚe(^*Y&G`s``,.%-V3gi[mw\/>Sqi ?JqP{;#VT M'3ELQ}>ֽKB% us %T>3^!) 0(Ym$lj/cKʡc>,O(_‘Is49N~8AO@2L;irH|YFF998D= ;ēe^A15N=cvBcJ L18NǛ3}M`@p؆WYK"UdSnTH[v۴ڽʉ.cY*^)9csY]s)8rdei֙(~k`<=t oÁUI!\9{}VBkzKQjiB>d\:~u|ܚbʕwurU\w}d$`i/^)um[_xlLط W;+nRBYC):6Ε)E| K}7gy78xx)Q*'YU*_qr@yъ%n02hr=j10MlOɵ^6%Zad hZZB[ELD`!/qkL>ѻ{{aZ9vӚ}. crnK/N/ T)%G2jqȿev>"w]]k%7AQ\2'BafʱuGHA5CСIcG<Cg ^dze!d:L x0EyqBJjYN(ڲ 1|ɋG\>ƗK Lsª!@* Vz"T?/ c":X֫ bWtۏoxjfDv #W ZK]{헄F&PL˗dɧOOhRNBLyloW*L< 4?3;^8ǰڻP.9j$RrZq+WnۘցDH [U^ 3N* rkyyvf-8U*u[lwyŹ:ѽ*k7z VkcDZ4wc,l*6v/~O\:;h⮈ i`xEZ_s%MoQچghg"喝VA5FŌa5bYA#;}wխ`-&W릟cے @(l'#(>E)˜8Eajm5>$xO~(NotZshJc=""u ƚ>]W@=C+cc#IS37uu|S3T0(^$N07(FTo3}s(i< q?" {_)2`Ь8_%mM%0Ҫ5/nE9r xqZZ]{(/f -?k>ŗJŲ.oѳ2ZRb=4z>M^YTS!I݃\CV1)-l6m.@^rP9=D o?34y4ajɡ"Ll(nK@BbQ7mN,`Yd5.;]tmvW}/i mO`"t~?I48bh\[ қBR?8W4u.p `̎8?{DB<[۸ݮw1?$HoQl!kv3ajncĎT"573?O1C1ݓm˄p蘁s۾BzӦHkt`3a^h ֢^3"}qqb4UpU\dVǞ: EQGZ"x9X˜@̀Z:^_8۰J-knJvw#~j!=_JX}$jf(_6C_$] =Žz鉒~\j&fu_ۚ'40* *kIwU%Se9&ʗ},F6%XJykdyxAf+ぇD<0hjՙzBlk6L tfoho<r.SM 3O+,桒hNMXQ*鹗0Oj#?5.u]7loZЇ>v߈~PBu_EPl_4_?Z7]~~A*OY&I&ׯU䛿IlBT <`m -AՑz0XE-ost7|.vێGw}v9Kgʁ?E%D:~`ĊuQ Wk $Z1e#]EV`{OvD E~u!nMTZСw̆:ۼRR\(uS"=PM$`桯Qha[ FD3:Z}i6n`V|:.H+İ.G%ª#a[WVInjg+byT:z39F>@B*MM:~a팆*MfvّTBm15Q{9^ X3c P5a}XzwR}SۼQ8X g|/5TV*V堯%%9~3k#pռdMy<X?2V[%2SU4ߓ.G#|t?v}96ع% +"Q,n- gj|j4X/=~Ɋq+F90lGRi*LB*: *1B K1"1=i ΃rE$-wY"o/=A%[hT2Gwݏ4gszw0sg f~"Y&>V{_KǨb2"t9tn OVY}oæ^5=.(R7%*ȤLM:*Bl[r|Lz:LLwDKI[:)7\66DagH9Bۯa^fy|KUkB8L7 Q|ڙ۱|W׍ E# ĺ"ȯF GK0=5ՄBLՋJ}T%?NQkIVC5\n{3M>嚺(FiͶk,lB6&wg)UΞ5xqjB4?shsI/X<>^(A~ 6й9b(=H(UE0|>aC4tc"W/ľƂ@|.U!3 Z P ζRZž{6Rs튢 0o~hÏ[/In]?M_#D)lE$rszw\V4sl$lwwp-™vlea0\Lտ%~[h {wXk^!;>G5@OQiSDIxDS~g.0_|Gwt!,]X.׊;Œxzؼ EqV$! ( 75;!mY}0K{`9cxfgA1&;(72~e+r kSz  vOU UNTl6)R;fmeԽR^C{V_v2mQliE9&voY{ݨ\.E/Nމ<ʳ[;rhi`^+̻ gat}k>E2ٺ e\Y&e6lYhjg#!Ys-ڸaU?-I6RsC5k<~vBM07MͲ٬Ph=I"Ѻh.cTJS+ÔS[/@Ydzi UfS2 ?Pv.-ڳ\ afײ,ZZX[0rU[2WX"%]p]E&oK6n4 nhN )%1\WzM2?O{eϞ=N\wXVWh+{Ѵ[WGdY.ErWG'Ӝ !P,MABNCM@wȑaX'_qׯܚ$,*GпTzlHv&#G7hFf 8vbߗi3BfxʘL6UU{n*A[iuplFN/Mui-:8>sTyvrɑ'0kc'+1/ٹtCd9:0?>ٿ4mfO156LM 53 04<P.< 186ǎD,M^J-Ϟm^t̡R>U̿O^ ۠lv<[\/[K)O_s+tv.2R>\b̡P躏{,Zb,V57ϸ4JgKw(uj14Z(̎5n#̥r 6`I8$*/KK Y>E5뢚ϬIU29>LI垹eY| ,+-]tv3p9'3S թQAJ?hoϼTر7qFYfӧ\:05=|-<=rVJaJ\7l)(ҜrX{1&7!|f)A\Z73넺cruv9w߈:ι(oEw (62b3;+9UR.#W GA,mkfEjS|gX?]|~/S{ M*5)uI R 9@Jvk̜yO I8>2v=FgŁ#GI>%PT4WOտ:W]g%ڶ89FضkcA [>+c{a]T'q|o1mdncfƆ"lǩ"p}#`2Q◥)-kYd;rcV6M`8yPS'Djvb.h !)'LTO@4E=s{Q[xBEmRAw .օP%{II[ :IUmO_%ս{ق#>@ULXe7k~ z: H a'|[W*39rhIg6lFxaz}# obժ{'03zHj`%rU&(ΎSc:V{\D:yBL,rSX{uau*AB[#(5EOijh(=:KQ( 2&Β=WT-wQb&Yf)-_!}iRAUω H>B)DVc*Mݸus-lF]hOP%2A|jIe(3oba߂ɹTe(U4[lFX[װ籧Qk[p?oC^B&"M]cPĚQ,1XZnBm Bo Mk[ dn(GKϣ1(FukH΁,vm^M2\q=WqH{:,{{\xH"%JTRuwm5eoوnb7bwg{H*e { E$5'ss;svd,2e2 gx",$Q _`ϰ>OmKe#oPR깇TgS^zϴ?`Cj;ZQ5ljv2o41@:h9,f{\J`3tmlQ'1yj~ i ܶ\Y%J=R#=*Y>Yc5]wZdzhʓvZ9[%ڰ9wGBB|^R$Fj,;UoE* |x(T+,Aܥ%̇pWT:;r x+[շ35=JQAVF1H :ܵGG.Hɗ-3L~[.j^t~b*$'Qt.lf!Ҳʞ)ob˘G.0:tF@~%.ΐ= z1`Pl}L,╶3xoQXai6NDzsЋ*~0Ȼz)܈(>uQPdQ-sw@jmorYUr#~; J*KٜeOHSU !|Ԟx 9ėFT5ƾ)0>UYD8oâ۱[ >"4tmӡHЯ˶+Eqd Pc嗝GvO?2vuwzS}C0# HQrUUv|Ir$9:;L2o|]+ٚge:H5v7fcFL)f,Ϡۍ@oUF08?)Y$tq=~OMzν+]$ID*:ʵ?E-_:8~6pZoҁQ:]d,5UR@їZ 1?K&A29.3c=f*Dn"Sc3t?M*Z#v:%[ܻ.bKskDU@ͬ:=0Uح0ҿ@.ߍB> cpQ 3@"\Vy.:J;^Y}ŢQ`4 t/1Jqʑ$ҶW06@cTbx=oYPS8\@M㋢AX"ldfifg5^'ea7a}FdsRG)ѭu<-g]#aHd37_,NK}3~YGɆ&R3q,/(w> E;N&$x{koVHfޑ3.ژCV,^Ϥ1:_۬~]*j5ݕj_觶+AUHE6Q .N `tU/H$O(?@J>[O )(r#/%:ӯ zD1;"~cY0PRVl=z.xyJv9KrE0gUқӬ]޽&ZGVTPe6cvlb^IJǑv${NjZZu_U#UJ<ŢҕB&ARseBw/O/EAR*f,ŗ ͂CރN[ez=\X86??i*R~ #츫:Mwu޻eV&HDB,k"uФ* G噚HgP$.}6?'Y%9I#CqS 7QUVJxOdd=6Q/`u8XZ%{ޜ']re.2#;-pSu)7dbI<$ tw4թL>#a<2)>$7TbQ,U/Q~!&w" PHH'`b21V;#,mZf#&/yM g3Jo I$@g)p" st6A2b-l1#?^XPЙ]8| /{J61ʉ'n˗q՟IȇYM^;v3G.$ Q;P9UEz&seuLSQG[[Ӝi9AgHkY`{>b*^R(gjym`Kb~)+8;ևYa>nrznYuQޙ&H:qg9dnh:Iajnl6x-!kv;U_UJkl̐O'-McR%9,G,7`kYBV!,N( F;/bU~?!vNHNJl:6vn=5tAUt8{ 7Wl& uxU_p퓏Qt}QNP4-7~ϼ,{c?F"ɂ[Eu z 18k҃ Qoh߳1{#+TYAOj]I!Ml}T"`U( I?|5G-dQ>n=9/kv{/J-,# * >#hK C<3MPDgr| }|o0SYװ "N.`=/^xs?3@$fěDsMFFYռlpc6JDxW:%+Z$M]R/P2:QA.$]J1%4G8PځeǶ(,فQɱ2:%K\H 6Lhns<N:уmVFOGdwUZBw']"+Rx/җ )<Դ_~4[FVuWn|ˁ~fj肨CvZ 1;D[Ȋ@L,B"O;u/=$#1z"iۻDW+0$ d|&k祫}Aɓ+L%uQo"]TzV3 |r UXɇ|dXH#tyFj 꿰stWy&.}¼=@mޯ052B[TUWn8AiEAf;iCKXdP5ӕ䝪k쾷fnUݔei-L$Lj*^GxzXZOqG0/b`5|H'ꏾYyـl4ci_qXV(iy;GSoSS "QIBr E='/6,p_I b9|m$ ;]v#U NT.`JD"ʯ&+ _ޏsN/=kU\,~!n0wTz7 : n3}݄$mg я7PF*`s31X_'PY$ (25=gm9>s%bIRFx@N6!+S$]8 IDATzK#Ʉ Wlx|=eob1PU\6/>.2: (t|aaWV}cgRķWfAImLs)i~݋NDcv1 {*gI9TFmz߶SwePrfި?CWuCwn*z^A\++)gXJ8 m߇ 8ЮX$HEszr{ fo3_fu~DT]G kS4wHIID[R ,[wsa^(<5L<*zcwwVS18+h9h@4Yiz/XJb{[ig0;J ?rQDhz+}xC]GX2~!Zo Ya6,"}Si8jmxVRc/H2X\z̘uZ=ǰSyɈl@G_Q҃^'Y%81^?RycC!%.$c | ~?yLfC?o$]xAw"@a'"ekeW%>ߎ׬w<$&46_5d, ^?P1q!-bHN]'{ %I:P,x뢈^'ax;nvF@o#K225q*KP yr;7(҈J_Ǯ (i@C[پ>[cܿ΢3!#6^6uZs4j:%&ʺ&p i 5v~π4;$HJ_ (mRަ( xY_[K0}&nEM{#;}#{ \.xͦSYJ3;ԲS:P 9~8&g 2,,[ʞ]*+SSJ܈ Ȫ,v"&k$>E"  {s~+0p.O_׆_s ν #I6gHE#XˏRtJ_B!}{s+-B f4 ΄;<˲,;s&&PȅŲCNgN#M9lrjŠ?؏KQzH5;=zI5fCI Kh %yZ`45Cp\UUTUE$Fy=.2Usm8Ez{{tl$i ho!K֗C4XrU<2 Ջ "ԴbPBu&/_ z#UݯasVV2GyTq$)ZDA3, zh(zL&&ć Wq8\L5B?JB한ix'9r9nܸ˗IR8 ԜkʻbyID{<¡&r hsLR4ߐM%P`dYOe*E~2uw^ @t@NGQVMx>%Ҝ;NGso2$͢o"W케~$H(OJ ̅mNu5p`_OR\vk׮pNs^E ̓Igr39qDicс͸XGj)[o$PLMESIqȃUNj+hETl6dnUUmyy9GL&^KsB1 1VAIiണaAf`0xtbX~RYYO4 LKHR;~z333CYYX dz qfDO})L|߾VQ5<<<׮]chd|B`ZvS(Ķ' 3u5i =11^ t:|Ȅ$ 65kJU~ ?EGnܸ+Wp?;-}DØL&1hm.]DKs MMMY{'J,#NkVd佰:n)))ٗ Lݯ42e~._S鞚]N"d2{o@<ҥK,,.7&>M311ǟ~\z֑dsk+4ؤ!.]D2uB>"M\tA&&ӳ7OJ67ob2q\5OOOH&9qz^ ӏoh4clu:, :Q B,--!I)qn޼,ugrʠIW-nܸ ,SS[X@?kƢ SWWGYY& C2("" ",1yPb.`0o!)q׮]X,3r=jzzk׮1;;l6gA#51>>JG AhkkpQdTK4errUU|= JUU&''z*\OM&'tUU?&jkd$:;gbb@ @C}myFUUBjjjhll|lIp:b6f=$tAA&AQ|>fY${e9y6U!"/vw?5h4ʵkr |mmm78KZIC6erj`0Hmm-2Z-:נ 2x<S᠄B!._CQWW@>gjj χݜEOEaxx-Ξ=Kuu5L&dPÜ{EP>}UUIӌ099$@GGc*;v9$Iz9@$ehkkA{d20+*4I 8l'^W666ꫯd2X,9rn@zffM xGt~^/OLLdhmmdAÏ  HOOS#fKkk+---.UUfeuǎ=sXxBeb(ϟjf"? jrrf|>k#$333NA׿d2aT,&Ȅa}}p:@4h3A,QG ݸyX,K6pOV+'yp:  4l *h~r]UU!JJJOr_\\dmmv_A?/KKK34weB*A>IfMQ L***AÏ 1>1AEy8(SZZx=ׇ(h;"LPԠaemmh$X?+d;EMO ߺDWWׁ 311ACCv@^/嚾!dYfjrJyy&'777c CQFGGm [[[lnnԤeHeZ'г,}}->iZZZpݚ#Aß'&&ZPk;4;;$TUUiyVAOw6}0f-:נ"ƕ{d2,8y:~:0TUUim<ڮs  BE&>p|ں:M Og2q\Ԗ#r##~ D" }O+*(z5t ֎h4J,ta}mvm׹ 3yѣ{.뵵5(Z),,,0:6F{{;ھD:fllmk 3Ǽj:weIZ@說H&Ѯ]wJnh:D<^+ 1C!666hllԮtBWҙ {zl"444PZZ E?|> >OF*Z&[۩i>eEޮeF4hx֖8KK464h}UУ(}}}+*+03==Mss3C bI|>Z^ۨٹ9v-zPEQ:ari5hx@lmoJCCVcxt:5TBWUɩ)8|0^W{,yiooRY4<nu[;N5'xm2x:x }{{~j먯cfL3L4< ( kͻ{B4~L\zNOOF҆4<Earj ^mlnnI}]vÇq:b38###TWW4kHRRWW9cj(cbbEQв#4<` 25=MPQ1ba45DBOܼyơC42y Fik{{gLc#Gs4hxdYn} :&''tڌJ芢022v(-eelllgysehX&-هoqqQ+mTBu`ll6*}(-,3?7G[[v] B,Uh)РSx^::::CW F6wh"3;;KMu5n[OeAϟ}wSUUM?CȷMΞ=lAگi]k;5-ikk?M.=#[Ȧ&' A(S,'`0H QQQA0AbcceN9s׼B BA1&`0xeN*d Ed=F*++osO]UU8WTz^ j߇"7Zۨ*SSSi%UU Br9M4v~W#2333\{v.--G75PK(byi鶝|{2fM)) qD'.2r9kJ>G$КP}H$ W&I>3"8:m!gTC &#mCAo4[ZZ[od Lʖ|UP5<Ә$UUL&ޅQamx<x6I3 UUu:rH$MK3AEmQ##uw<}}}@ww6T1X,244bAS `PUU d2RUU8H*,..ݍ҈1ť%VWW9t萶\$)*++q'ڪMMMtP]UU677IK|Xk"y뼶 -:נ!B8E#=ƭ^1 ??kMUUVVVJTUX,"2*fY{` r9Z[[5AC֘WC vRbEQX, {B,..rQFm޸5Ad2ioBfQ, IF$INSNx4iﱂLMM҂P( 2`45נG /,Ex<N:ua?"* z<B!677Fp1f'J,cssbɴ3_6vp8G###D2 W^q $cGR]] J36G"#JvNss3>gVUkI#-u%U)0?8@觥%(( SCMTD /1^mN6^*T lEzLck43lnmS(X].Z[[)//32WU: K$TQ sPUt*jA c~l]GeH@dBRzM z=f^ &LjnxI%Rvb\>df}cłd@s]Ȧ hdY1!a1o,BTVf3$eSUl*h{<ܧI&2X6Dn&b $2vL2lǠ6_bXX,X{l&U5||DlT5NDjko%l2F4$_PK2+cWfE]s;b1_8rRjJD}]`ߏnc4Eq셪X~DVO2g.ss~*[o`4 |Ʀ_,wRs̭dVR.qNU%8'\U-Ι ӧNQڏ+}10EBpPYYsq__c/^z*T-_oNz UCɄ<ǩ ?3+4sS6*1>ޢyl.BIrÌ߸8ٚCvG|gi=Al }_e 磬;MזH%&4ry҂Rr-&%Ե41,X*zI$brdd?D&PUb~bFƶ6npU9\Rى,J=6L&?AʬMy?njZ2q2QA!_TH01FAg_@UsyJl 6{~P2;1E$`PҌaFƙ#bwd { 喎lMwuT[++eP 9R4  i#WUl2A._DXvbK߲yp[ E`j|/bO{$<*,~g]8c LU#LTE)dI&Ө-6LFݎL2A # X6lo&jYaI'e`j3G*\T*jhYcrbo9*+}*N rQN:Sa67M;ٝTQonJgI(nǠGF*5,x`90u-H |T9b6-Lpӯ48imFL*Λߑ1݆$J."Ɂ(a;BxJbt:3bsXU$ 2΀aCv҆Y"zl;AmK38k!DflR zSwQLCpTE&LP\eD"W j,a}qSu%V֩U߿G`Wbg~t xE+\z .bʚ2\敿ﰢ5;̬_@R,\D(6պbQQ I})3enXHkc7-5|5>,uWeQg9nxH)H¢dܸ_ aGY* qFOYgPUJ쵋\ Dy ]Y{IYÔ]HEf7eIo+T|EYT̮2=fzY]"l03t9,Օx &~>M oKGLxK^d#BQ̫/S0Eꪪ8te8ֲF^y5~ +;x8v5*yֶ(I"#} BY%vU'VwΛ@8Swd|ѧn%@? O]Ͼ%- rO^Ӌcx}>L d蛤 ڞ3<Ѿ$qyqh5#.# 4얛-\̈́O]`cDnL0!z'7fmgEn ReQGiΜ@Dꘀ즪:@;aj~tO_cz!_ Agl9PLh_;?\׌,|R_PfS7>W5BcW%fytcD2fY.(k=87/2us,./fڏƣK0;F᷸p 5,.0t_x:ia8]m\Kx޿t-3#S]d֦X{fsjWRb"(E~bWy~կx7o {:yQk3,ljjב9U(bBQ^9x4ZLN@)LnHίqIIvmEWyW 8JjV>r~őfäA^J!WYyWob\p>ESC. ]~*.8r(mm9y̕g&h3G5-.!7yn;j!b29tݗF5#E^}]-׾ddbzӿk c1lz'(e;/8dmzX1C 7U.0̄\k9]ae)V‰֧/nkbqqi-ct $YjNX/r2mXB:3_ec+j!DpzGb{[A*J7) ɍY.y9T| P߾?F5, HNQA9WE8` V`@!W(Q-O> /k`5[c [ۀA#[m]ˣV_ ^FG]@ yv/;z 1>kʏOaPl6694eKc7MBޚ!;9O2:4.lf^}6~~@o#} ܼF4/|Vi4Eپ˲Vȣ")m(IŨVSPfDA\%HONQbnBGl zq[˪q&ygv?`Nh5j;c!μs}xՓq :s;<\hElc=ʭ"o)F$PJٷax:ͣ H*l D݂E4 2:9A,^Bgz{rMV^}ED) ۸M7pYd%BipoᶩRRsX{lJfsy9F{۫{d "Yqg>*JW&UA,J#{\]؋gHD}h5&xx#%~&a&@_bf@M[)U uW GӔkH 1>^O(fɑ^ x[*Y29 0;&䙙'\=Rt jhBgaS7jT7Vq:"-A p.N/etfl+qxKP43Kq&]޾j iٍes4|Yznl b+YrTcŅy&LCC7z .+6x9Y,+od17;C&a ,cNu &qԃI6;<.n=`Al^ y&[hA.s_""3/`s< K s`sC:҆+gRR;K:_,R zn%rE+/()n^^#Ԅo&[X'ٱeJNp8w.{YN&6ky0 EDu ,TՔ#޲}<[ӁZ)o;&:D}XUb ᶸq`q)0u4@OM$EIfQP$E]x[ 8Q4P?.7Li [X}8A8\ Xy3 ݈ƃA=_'~oI.b8u[X"w<W$jB4v)(4yāg?0[m8ʏKu\N?ơʉFvR "!M'_ZSq8TWNjs564QV;ߩOw7ƁtѪ$Xlv,j1W+('!H26Yq6W. xʚx(E3E\B$}5Di#P-8&K$IWWW%R^j|m ZlKJ ۑВIH85E Kt4D%X* (͆Ţn]PV3r:]74?@P`vSuKhIfCjnיzdYa_C6=N4 j-j".`ٶ ۮ%u qX-۹w\%Պ,5){l)ۦ`M ><-OB ɥ%w?&H|)+++^9-T;E~=;xe_xY~>AۙSX?(b5F)+*=Kvje7O~|d0dhfǶ-?{I]?6==|?Ay)-Ze 8?G={_Y ?ʏ* l6AIRxy֯!4Mt@?Ǒ\1&:g|Mf'1,>)LOLዕ}f$-+ⷋMBM&Z7xյa~K I2 M3"nk躱:Zȷ_ >¿X_IMov.WIrww)|Ȭr?u{A>ܾE?51]9)w /;j^k 4Mpmf>k75INa8؍e.}rʃg*s703t~z {AZKePdf9s=% LANWHy8qj?F!Გ.k:KJٜǍ; Yw&5; qZQ5 iz_!mЁ2_u]m4 ''GX,w^mJ +膀+%o,19*$B69@YI/+K HY)VV7yCbg}gyvZQqw8XSj:9Hg1p"^`enl!/㴂27jr$@ FأaJ&w75ޯrtRJpeL +ǥݼh]%M 榊iVhIme{Ab%OX8L L5%z`Y\ZD90H-α7REW(?HsK=W]$="0s{av3I3'J)2`#"vU$Q$q֥#b;AD% ]0LDYFz"BϦcôY@qByǴb۸QzY444]@&Aox4bD[7IE0SC9} hhjȊk I!454[a2,b:Emo.P@Vԭ$&ZACV"4E>$y;vMqÏQrHc~7"HE/ ؚ,o=9Q&aKd+D 9Hve{4$^VFƦ|pj#ŚeDڣK1M1%lħ/lxl "Rlc&Yl&Iאb|o&ّLA˯^++]iOlUGk2?1(/ N0/o&9m 4F]wFMp9}&I..Ǧp{sӛH4瘹 IDAT Xn\7_0;4=,̌2 0?WȂN&/p7h|aM[\L+zxOau80t['ZYe~Ac~vRkQ@6?2a^Ki>FkeR$Wk M^Ƥ]cmq)R|X~V7Q ){-1>*Չ v:8Q;U,2d9jNcjKzn X@Lgh4BGCܿ~tC)S#\vNnc&S]_ K+8 ~GEիLLJ(k9-ȂLm}f+@3T%;Й%6͵$Txí^76UGhi6KW_Bu9s?ǽ4 ܾ@:*9h  @Y-GNyJ C g0S'ɍwс2Qm1hec2Eg~pU]t-r~`#r q߶@,q((ϜDX=R9ʶ#4,6xu]lzTG ZKHͧ_6exyiOCgOp4_1 i]=\ƣ4[|ULQ%^¡-H>e|zǏE)+qs=NDEL?W vEB,/_ejvHu#GOc v;L:[Q(7;:/q4@JX7;ﲶAR픷!L,XkP_cƇɩ!~pYFGTU3 :Eܦ@ǒ72/7?7hj[jj+QJiz*4+kxl w/o`xVy?ѵjO~sB1ܑֆ:"Sv?ɩe?Z`ej~nE[C`Yrյ6"ًKP][x:nOPzHts-un$#i9rcyO/tPg*s;eI&)o"Ya=Qvʨ]J>=ȅ.'3qj\xcVӅݮHr;+ƁSǰ䦹~*\|Crj#uO/r?AñӴ61u?W BC'hkfez #KT74Q<5K \=%=Y䏴Fnm{~(br{XC=yE`zb%/+"wΓ*X 'h Ex.Ajé d*"xoz}dJJQg8ߥLnx 㖫B6R`e /VTxM9UXMmj@Mί'W.Fqp9,HXH2*J?$~dwQRV7`MO勷~NuK߱nkY_tȲB)"#nI SxY%vCu)UV఩.?XD-!K8Ur VnY*y_y0T =Q[.A@uz8mX~BA(h9vh@e fh8##V}M$)Eqx5QQ[`m3cb(xvYuR޸ƖF4"7,0i Yaiu DBZV&Fa6 'OSYWKۉXL224MIah8rʨqr A#8B}1l.dwu&G ns4`5VV1 O0SD(o_`rjlj*jYb~5)9curcAV"!6秙^i>uNVXfv~i ~B0*8}A:Bmcu 6ג䳛'CŔ*SClhJ!Jޏ\g "hX>:C#L bVzTƶj?{c7W?a Ta|p"`zNibUU<n$$+4C8{ J}5T4`azt~5 ӆ!Zpl bb`OM3mZ`"p9-S{١AL_w,?kTWDW2k2h )l^?F[݆4G"?By]#Zؘgu#'q *}, Y 0x8iWGWï81\Rko[7G 0,2Pti4MT4WND-pwvXwOq8@ԧ{=>:9ik](edIoT:Ams8(ibbQQՊ$niVBR-8fݚA&e#d]e0WSL$[ݯTCϰŐdvbG ӛdswl lVi|ՍE0k!jEt#E%յ]2UVe0 Sg#Z]DEi aEV`31?A0 H/Q|@;ULm1F2ź4Mdqkq]n}.RuGۂ>,Nc.v^epu~Bbe;U!ϰZ;]L2kd6sXvDb/̡HRV0Qƃ |kDM >ggi!]JQZNв:@V][.5Y0uoy#g~.[K۹Whi(yz\ҨoG͹uDm&AgfjB9E˻,-R0@pJL<&bgsjMP8WBnfy@g8-GL=C~aŵenv9Դ$gZr R(dHӤǘ\Шom ^QFz~٥~Z;}d-!jZPR`k+IܢV$P^$r p@t Ahɡ!-e(fVU54K2k8 aDL20@M[3J+ܟ]$nX. )vƗ QX[]Eh<դ/A _EiЎ$* {BϬsH'W-n^'cm-CG%JNd*U A˲Sş DIc[yS("l$Aux@gvh%›o5R],$':Y1p;.R3~+Zv `fnC6%Ә> U8mg.fQ{vU$Z!kKE ?+!:k+Ng2FA7+KJIDĠoX^lG Y ko=FIm [s3T_zF>&S %X_[`(Z C^[mUҚ#ؙ{ <ē3ɸr+:V`\`/?KEON0zҮ4'#H5Y "\T8T kjlClqލ{İLs7७HHǵw&5p܎Ds7؝$WCkS q笲J8x(7owljF%lNcw<]5Ni>u6`*ۈ >IG]~L]SFkK w/sF78vwXb^`zhk5ځUE$CGe\{蹦":D- %̵Uνt׊ k[imHhgL[XYWpl"dӧH~rL[ɑ#{qbBg |,6>*0x.$k: 8_DaAR\`%f_fAjK8Z6;Da U5OX46H֖˧:_]b@#QQr K8ԇ7na!n3.z[&Y4bg.x#3T:CB18}7o2K9ҹIaJ߿ho04`"?_a$ : a(8*8 <:+ms{ݽoLPUGh,G VPQxD(ېIs*[K JES3vs%^auG8vU~MܹsNR(ƦR{hM tsOqiƦf(v/!훨aGY(uMtvq]a7R-K{JXE2Z8l}wHL-y +Xn, : cCܸp9dZ*T'߹O/ؘ]yǛhg=.|``/12Ɖ7~V]Q&^߀2-I_!΀qz:;Xc=oІE*N7[ۖT"Vۈ#:?D~f?C[_VgzV&r jx5XB2s!?ۿey%dq#n{o FD!~ikqr71\Q|NW ӹ95ͬ-v EY* 9է? VWU;h"h?I&b aUe'_$ZfQRB8L ~@lDc!J+1ޯLm~N#Cøm{µͬod%w0a%AAsAm^ Kdr6$@Q%ڱ_TJ 7LA$\$Viߢ,&\G0B,v?1TYMuH.,3)k>āM(h肕r6B6"^A0u4C TVeid!XU O˺PRQ?& M"~.&5Yg*iOrqG#g'"6,/':s` T`e~R-M &x9'`,Vȡ:CG%_M`uaFHU-(hr"0b!Cre ?Fvd=%@q{<!T,`Uތ":}c2k,/#4l'ۉHPȐCR˫((Ϝ4yfņF? *  %47)OhGX܏$ɄʱIP__/ũ.-a.\NI%qVPt5 ӍM+kX|1:I,Ǫl6DBFDbTH.i:~C=wQ}T#Y_^deewvode~Mvt$ua+ r" v|+HuZ[ }hIRjbrxD66628av Hba+$8zVm$PP[Qɱ<>/vNjr;Q-vn;(xP{OR,icxĦţ}W:1S`M)OxmG|, 3ny3;BO~ԊE'_<^5v6[(}=k> zsˬ5GtDq+{<ءccxj @1(K,aל嫅o}sg HMa%Jծ _h2T;n OD|;S?OxA.|'뮿ŧÒϢ{\|ϵlEbLDRxrrgy9QylB^]w(>9'_uOL|Wa9W [}{,%@KUlʟ!X<[k{{/ &<~mwV!3gb3[gA-w 4I/MrwK6)G&}SklT@»c ͮ_OϮqS>;]>ss'lczk~b2|or7M! )`umkC]ֳ.BM7o)M q}Mx] MMwɩE dU4֣eё$*y·GO&?78ٜ@;ug{{{7o3:r9َ xQF2r)ƧD u;>.L }IDATqw`/ v3:BXb@j}rVw8Gϝtc```9sF[xRq/)NITה> ^duM|0#GV}g_9bVy0@Z"3Tvǚ\55Fz#t_5-;w?߃=.3=ekCp Dأ/n X YRo!(v|Ve- Z>Cj-%,!3NE/{}.S1WqQݻ;:p48=؃Df}]H^B!G/"AE9]== yB$6 f.K.a:$ MtAƪHd]ɢ(ͦ](4u24GVm4 c%Ï,m9Ae1V#:_%x[Fjɴ=؃5O~%q)D8?ʭ{֐ft:#] UхYe-,r~c":⠼{/a/32m?Kx8-XNe^#bbd ͡q>%M=bM6b=؃1l wS̑\!>  N=ξNO &8{!r8N\>]TTWVm7$ vϼL+<켌L#O35y!Vje,QSqN&2t2 DZN|{OvKKc)#{ $# w)F%{MoSs/ i膉()(JQM,jEr,oZ0 ?1f% ?kAjB4b5u`70gC;MsqLҦ/%#4f$MRnRٯdX$ݵ&mק6{ gf&__?(p{kڔŐcrb8{=5ί,إݯ<{xqvK)v>c: : i㔛 6i|~WUwTky`ZlLzm\.s>K: : :: : : G rIENDB`././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.2176075 nova-32.0.0/doc/source/_static/images/resize/0000775000175000017500000000000000000000000021023 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.2216074 nova-32.0.0/doc/source/_static/images/resize/cross-cell/0000775000175000017500000000000000000000000023071 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/_static/images/resize/cross-cell/resize.dia0000664000175000017500000000145300000000000025054 0ustar00zuulzuul00000000000000seqdiag { API; Conductor; Scheduler; Source; Destination; edge_length = 300; span_height = 15; activation = none; default_note_color = white; API ->> Conductor [label = "cast", note = "resize_instance/migrate_server"]; Conductor => Scheduler [label = "MigrationTask", note = "select_destinations"]; Conductor -> Conductor [label = "TargetDBSetupTask"]; Conductor => Destination [label = "PrepResizeAtDestTask", note = "prep_snapshot_based_resize_at_dest"]; Conductor => Source [label = "PrepResizeAtSourceTask", note = "prep_snapshot_based_resize_at_source"]; Conductor => Destination [label = "FinishResizeAtDestTask", note = "finish_snapshot_based_resize_at_dest"]; Conductor -> Conductor [label = "FinishResizeAtDestTask", note = "update instance mapping"]; }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/_static/images/resize/cross-cell/resize.svg0000664000175000017500000002560200000000000025120 0ustar00zuulzuul00000000000000 blockdiag seqdiag { API; Conductor; Scheduler; Source; Destination; edge_length = 300; span_height = 15; activation = none; default_note_color = white; API ->> Conductor [label = "cast", note = "resize_instance/migrate_server"]; Conductor => Scheduler [label = "MigrationTask", note = "select_destinations"]; Conductor -> Conductor [label = "TargetDBSetupTask"]; Conductor => Destination [label = "PrepResizeAtDestTask", note = "prep_snapshot_based_resize_at_dest"]; Conductor => Source [label = "PrepResizeAtSourceTask", note = "prep_snapshot_based_resize_at_source"]; Conductor => Destination [label = "FinishResizeAtDestTask", note = "finish_snapshot_based_resize_at_dest"]; Conductor -> Conductor [label = "FinishResizeAtDestTask", note = "update instance mapping"]; } API Conductor Scheduler Source Destination resize_instance/migrate_server select_destinations prep_snapshot_based_resize_at_dest prep_snapshot_based_resize_at_source finish_snapshot_based_resize_at_dest update instance mapping cast MigrationTask TargetDBSetupTask PrepResizeAtDestTask PrepResizeAtSourceTask FinishResizeAtDestTask FinishResizeAtDestTask ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/_static/images/resize/cross-cell/resize_confirm.dia0000664000175000017500000000112300000000000026563 0ustar00zuulzuul00000000000000seqdiag { API; Conductor; Source; edge_length = 300; span_height = 15; activation = none; default_note_color = white; API ->> Conductor [label = "cast (or call if deleting)", note = "confirm_snapshot_based_resize"]; // separator to indicate everything after this is driven by ConfirmResizeTask === ConfirmResizeTask === Conductor => Source [label = "call", note = "confirm_snapshot_based_resize_at_source"]; Conductor -> Conductor [note = "hard delete source cell instance"]; Conductor -> Conductor [note = "update target cell instance status"]; }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/_static/images/resize/cross-cell/resize_confirm.svg0000664000175000017500000001610000000000000026626 0ustar00zuulzuul00000000000000 blockdiag seqdiag { API; Conductor; Source; edge_length = 300; span_height = 15; activation = none; default_note_color = white; API ->> Conductor [label = "cast (or call if deleting)", note = "confirm_snapshot_based_resize"]; // separator to indicate everything after this is driven by ConfirmResizeTask === ConfirmResizeTask === Conductor => Source [label = "call", note = "confirm_snapshot_based_resize_at_source"]; Conductor -> Conductor [note = "hard delete source cell instance"]; Conductor -> Conductor [note = "update target cell instance status"]; } API Conductor Source confirm_snapshot_based_resize confirm_snapshot_based_resize_at_source hard delete source cell instance update target cell instance status cast (or call if deleting) call ConfirmResizeTask ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/_static/images/resize/cross-cell/resize_revert.dia0000664000175000017500000000136100000000000026441 0ustar00zuulzuul00000000000000seqdiag { API; Conductor; Source; Destination; edge_length = 300; span_height = 15; activation = none; default_note_color = white; API ->> Conductor [label = "cast", note = "revert_snapshot_based_resize"]; // separator to indicate everything after this is driven by RevertResizeTask === RevertResizeTask === Conductor -> Conductor [note = "update records from target to source cell"]; Conductor -> Conductor [note = "update instance mapping"]; Conductor => Destination [label = "call", note = "revert_snapshot_based_resize_at_dest"]; Conductor -> Conductor [note = "hard delete target cell instance"]; Conductor => Source [label = "call", note = "finish_revert_snapshot_based_resize_at_source"]; }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/_static/images/resize/cross-cell/resize_revert.svg0000664000175000017500000002333400000000000026507 0ustar00zuulzuul00000000000000 blockdiag seqdiag { API; Conductor; Source; Destination; edge_length = 300; span_height = 15; activation = none; default_note_color = white; API ->> Conductor [label = "cast", note = "revert_snapshot_based_resize"]; // separator to indicate everything after this is driven by RevertResizeTask === RevertResizeTask === Conductor -> Conductor [note = "update records from target to source cell"]; Conductor -> Conductor [note = "update instance mapping"]; Conductor => Destination [label = "call", note = "revert_snapshot_based_resize_at_dest"]; Conductor -> Conductor [note = "hard delete target cell instance"]; Conductor => Source [label = "call", note = "finish_revert_snapshot_based_resize_at_source"]; } API Conductor Source Destination revert_snapshot_based_resize update records from target to source cell update instance mapping revert_snapshot_based_resize_at_dest hard delete target cell instance finish_revert_snapshot_based_resize_at_source cast call call RevertResizeTask ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/_static/images/resize/resize.dia0000664000175000017500000000107000000000000023001 0ustar00zuulzuul00000000000000seqdiag { API; Conductor; Scheduler; Source; Destination; edge_length = 300; span_height = 15; activation = none; default_note_color = white; API -> Conductor [label = "cast", note = "resize_instance/migrate_server"]; Conductor => Scheduler [label = "call", note = "select_destinations"]; Conductor -> Destination [label = "cast", note = "prep_resize"]; Source <- Destination [label = "cast", leftnote = "resize_instance"]; Source -> Destination [label = "cast", note = "finish_resize"]; }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/_static/images/resize/resize.svg0000664000175000017500000002064600000000000023055 0ustar00zuulzuul00000000000000 blockdiag seqdiag { API; Conductor; Scheduler; Source; Destination; edge_length = 300; span_height = 15; activation = none; default_note_color = white; API -> Conductor [label = "cast", note = "resize_instance/migrate_server"]; Conductor => Scheduler [label = "call", note = "select_destinations"]; Conductor -> Destination [label = "cast", note = "prep_resize"]; Source <- Destination [label = "cast", leftnote = "resize_instance"]; Source -> Destination [label = "cast", note = "finish_resize"]; } API Conductor Scheduler Source Destination resize_instance/migrate_server select_destinations prep_resize resize_instance finish_resize cast call cast cast cast ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/_static/images/resize/resize_confirm.dia0000664000175000017500000000032400000000000024517 0ustar00zuulzuul00000000000000seqdiag { API; Source; edge_length = 300; span_height = 15; activation = none; default_note_color = white; API -> Source [label = "cast (or call if deleting)", note = "confirm_resize"]; }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/_static/images/resize/resize_confirm.svg0000664000175000017500000000531600000000000024567 0ustar00zuulzuul00000000000000 blockdiag seqdiag { API; Source; edge_length = 300; span_height = 15; activation = none; default_note_color = white; API -> Source [label = "cast (or call if deleting)", note = "confirm_resize"]; } API Source confirm_resize cast (or call if deleting) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/_static/images/resize/resize_revert.dia0000664000175000017500000000044500000000000024375 0ustar00zuulzuul00000000000000seqdiag { API; Source; Destination; edge_length = 300; span_height = 15; activation = none; default_note_color = white; API -> Destination [label = "cast", note = "revert_resize"]; Source <- Destination [label = "cast", leftnote = "finish_revert_resize"]; }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/_static/images/resize/resize_revert.svg0000664000175000017500000001026000000000000024433 0ustar00zuulzuul00000000000000 blockdiag seqdiag { API; Source; Destination; edge_length = 300; span_height = 15; activation = none; default_note_color = white; API -> Destination [label = "cast", note = "revert_resize"]; Source <- Destination [label = "cast", leftnote = "finish_revert_resize"]; } API Source Destination revert_resize finish_revert_resize cast cast ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/_static/images/rpc-arch.png0000664000175000017500000006410200000000000021732 0ustar00zuulzuul00000000000000PNG  IHDRp HsRGB@} pHYs+tEXtSoftwareMicrosoft Office5qgIDATx \gy9g3q[j[ Xhq()X)ϴ~=:3mVqhV9xTy2^)Ea| #p)SK43:v Y9KH0wo{[?sU}~~o@vT \*!9 Ge#qUm, q }}ΛgtwY׽o@XɖF.ʶ[s+n^-;uWnhxbV>Mk6u9' N@ל7lt̗-'e#p鶹J!p&py\[n nWv} Z_+e+ufzC p \kdӆDo2 3K`N|7ls8FuI{+7nSx^9[k~@#p5뮹~n>KDޥ%iUӿy˹m̻eV p~pL[$Nn p p@ΥYuT>bs屺)7xJ^K[Usuݪ{_* n w9nI LMU@U_"U&V'dZ`KBDdE 7ɀҎLiI'p@'peI+bg[mGs@% \'py ѼymZR*N˜@8f89 W+rfDlۦZuͤwv\xWp2U蜛ULĭ2 nl\~p0ǁ&˾yt+pGۛ}g?W̞<8@8@=j-Q:ArU?Շ[g p T}"CT^(z~  p@յ@1_[ p{@-zV1UL"ކ#pR%ai8 pRU'a zi.S8@*:^}z p A7˻i p7WΛ6c#ru 1ۙ p{muM6, p}K;-o~7Ьey(]y啟߼yM6u#MlܸBaEٲe˅e{݈e?VN @zg۫_պ|辞Y۽5eNc{ ԓߖ^#Cx6}fVk>}K'}ǚA-2(/N$\2"m"[ܿ7i]DD$:vׯ4?~y8Ν;|'Zrk׮g4z'4 "6rOiYN4+p"\d_\+%Ѭ_8vX+5 /Id[oȨDgff>&QEN&f;ԗZr8yԏOY^ "q"t C{qD٤?Y$n?Vx3g&Bڽ$xѦD/YEf9`ed5|aJ4<ۥitff{ed+++S!mq,,ѹ 6|5$i8?9+^ \L 5"p i$VH&Qϛm 5u"pC8)#3%ږihfƍ;33+t0i'DD,Ҭj3 pC8I*#He$|#- LӼn!mذᖫ&e鸼$m۶F"w.pMdJR4m8҈ pm.˽okv…|gɈLKRK'y_G;xky,p~G$Ft2G~kuLC:8 oSO=o^'"^{G-4'9 C qeQ%pRtΝߑ/p"k?pGuz;8oh2 \;KIKS9an@`lN*(IxE^D>O~Z%J"`"Pmּ뮻ɖlKǮ#O8(p|'}{ُ/p;ɶ*~տΗemn$d*iFC8)%B'5$tiN|}INEeEt/>*hNoX񿇼~NU>w_]W?틧ø(\q!p C^L8۔ (Ӵ Q#=&LY_"W*p?X+ UwwmfڿIh]Y}I9WnýkEnx厛>!T0q*]`dw{m& )P,$PYKfm3 *E| nq/I["0'&IM@8nLNRsܟ>ſ ?Kj^s5gu4 NrW|;Y6 ZT&oe]F>ڪv?ML.#?wDDeLC8Pꫯڇ>u}x%ͪ"pqA^w[n&&pZo>ڣVn|9:"HVd2Mee85]G  mтvK]F~|}wҮ~+eV#۶mDZ^2[ٴiol (p*T6zr F+оty.9m7q7odPXo\o/i.ݵk׳ҿ0{*pފȑ/V>2ҸmHPB?,GD佢Q9v@&Ñe˖;\YY^裏6_>i&붏  pU"cڟL3-IGT"/mIA86;;333ۿ.ٳڦS+|Y8ی*BMPjI?7`#A'E?!Y@Ñ㒈]c=6u9IA6I0= gY1d-fNѮpFʺ2@͛(͆RjRs{= ]tыGI`"Niis BF V"pMj\m:YF[v/!p=TvHmOU*J:{MqܹL[#G{KXS}D̀)Q.jd<4iδD4凊.7wkojeIKyt^~/ wC:{lNSqGDHF "K0^ܫ|i %.97rH~ 8bO+5,8@Oщ%AIK/ȔE#y"ocƍ?KsE{pߋDDbǢ pT \*;I*8ynZ}^t[lމV%W>o[},[G88Fi81Ras2VM Y[u(鲡&T'sx{ߗiYÅW`Qp \vC8@{INiڌUEyiщޢ׬}hBE8@`.ois(N'%h?~NnW$:"0U'$r$B*aÎ Ҳˋ2)(F%mwF88@C@*@88fd~^@18M+h=AU/IL#7-' F88^ $ -KWy!-B8@"oEɼ4d CN^͗VOV lKk2)VkW~J}8,,F^!pPNu ҈H/,cבZ|^dL[ѓ:WvlQ{ݞJLSe}9D8Pi\ؠͬPDOh//u:WHTL5Gnr p-^ɰSe5jהvg?/+g*zU|c+p*AI͇$ p p8mT4Gr"EVO~6XN4=lDC8  p pS#p[_~ |7{CC8mx)y%N<98@88@8@8@C8@8Bo} rȮ/8@8@ƙNn]E`[:Z0wח8#(E4.(`Q]Ku[K>!\X͸9'ry..nl!7n+=3\}N͵uwmo+v'!p#*޴~>(镈^Q7h{wuf*!v?>ќT~;"q~,>r躪F!p.VMs9#m }IXAY uULd~FT("p!pOv_Y$nNVąXK/яrG:9بX}YIeY[nL)y*&bWw&C8@5ڇc/ymjΛVH#p'oU|ZUrQ5g}e'e8õMWqb`6ֽ抁\4mO/Bt p7>ק^hom>_|8w$>TupYv?7ѷ{ K&nIB94}@%#y(UmjU~o6@%.%`=0m]Wn14mޔܳL{?g!p I{aLCk˵'8Oa+plN8K2_85B^@D$fT^!p!p@q-H!p7zH Vd62CR6(%&&ͨڔ*5!p_5nڌM2}%/\&ӉQ]G%ǹ #8C\jMYVݘ*unN.H3"}ߤG8CF=D`YĹ*SN ,WbޫĐY_$ \`4\s/P8@8.k0ebaJej U3F :u]}As w'7绻kG߿3_U/I+z!p-vFiIX%_gmfJeB hVtpe?3rkK7\>/V`5w GW%&Q7i: !pHwвYm/H['` ߤ>pKȽُf^Pm:#pZ'ͧ0ivg=0=5mЭMAETڤߛD$H/8@8?\޳\ 0NW4P .p \ tX'pF?\M/:^ZJKGф!p7뀤0^n 93d"knE~]n[Ծmf=1^moe+__ovN8@xy>CC\*.iK0_u@E|gZN7%))8i T7K?88nn2'TL9MsUf7ɼ38[ӑ&TCv 6%z@FμnM/j8#y p\ّؗQ!p]cΘݫyNw|g4ij?MK=d9"9N1 ppz8M#"zM!pgvlnv pЗKӈhTȀ!pK$n;%pZUp*s!p\q-Aj'HN襤ŞD8TN"nkŞg 8HEN8s78kځ=KG9NJC8SH  Bഘ ͨA !pd }K2 p!pC^@`(gn!p\Qqn8&#Ry!pK{ p0pc*!p\_4I{ B C8NqN8; C8|&i/ p0XKMQ2 8@8 K{s8@8I{Cːg( p\F!peH(-pg+#=} :#i/ pq'ه'#CǾt_s۫_N}7qGs.wi I{C8h{-Y&&BҒ8_"p.iiJfϱ&??Z}0M"u<[o ܑ2'p$m/,w&m#ozs)?ҝw4Y8;-&Gܴ)5Ko=i굛e~/} 8䤥TYri~"y{ byڟ9w'#{N8NxC8DZG&VtsS|'DN?=KGA8!q8‰O Y8^$E8!q8 騒s(!pC#pS!prHڋ!pC#S!p6sBw<8=7'ed@Zfڐ4 Z 3fg7~~מ"25.ud8!q8m*6o/$GG8!űMexMC^/BhNI-g>B\wHdE4q> 8|AhN4x=\+Y&!pAn*eM)ZEc/&GN8c!pm\nZ"p pMn`By ceEhB 5q|p؄:M%85Mj}m$pc/ѵ2=)g^U=T\)amho{8ܚ/`~6v8;cn"cZbIͮyzqY֗8%)C$uȴ<385UEl3vG'Ix;=w FT-є5^3sQysЭSX0f4%?Jcs}]G?xg=pxi}w.E!p5iuhSmNS;7kg?7>֍S+h.$#p]nx˭."nP~tQ \hQ^q+ѫLkȱ1;:Y+\c]tAC8n v/nJ?ZOG|نVY}mC b6`f`ќJf;%nzNj%Jq;n|9^Cw'|I~w>̍{K$`c-p˧N)Rg+ _DRJF&T=4ݧD⒍m٨8]?TTM]7 ,8[0 70ȼ<.pI&{ jXϻ>:&C*IQ~pi' =v3MxY~EX_ܠFZyMV\Ɋ ڞ~TtZ{j.i7d>[i+/yilBO/6 7QgC=HRG$1lG.t3& K8˻6Q9<5MoH4 kO'?hD'.  _)/˽{6&hGM 9!\j 7Z4{sSFɛ]= \^ q֎1Vgf#֎#)'&vΗa$$=$ä"8}3FB{l4=_CWX~ŏN?\wMUݼm:Pň2.cCCAͺi$mGNE"JO}j2/&%M[KZ5Z 'fIzh]j%)6{.kIe_n˖󣽡h} @F"pClBuIMX>OBQ4tqY"նoM9% 5}jGAH \:ǹ8:Bk7W;6@/^ky5t}k.A4Ku¯Gg`;%pk"PRӈu칦sPkz[)ϵӈlٲiW2^JV K7ȒY!p}( T;SqM~"P h}ŴSd/5o{sE71d_I()AJ!}#ceͩr~K42Y;6mܱ{MȡLIe\ۚ^Y1 M |nu lbP\oB6RPs?tNjtzct?Cǰ} i)Nsv 3gW$p"#p f "p_#6!*~Iuա=iKLisD- F[?+NEfԁK78/I2)O85߱=)GD$č"Ljw؅dI>rX:(AaN4!$R p4C 鹥N8ӇͥX^lr(>oR)PscoSih6srIlS$5}gG&vJ8V=?A/pXz/t"ycr!p pCS4 дM 1G*IC NK)-=*Eo~6 &ˮ:{ pU3i^$%ErmʵzٶowF`h7L[1{@H oC:9^{j@7h:$NqcMu.n8 bP#!p9j"uE".x!p p,KsRk IjlWN /͸}u"pMY?IBe۽J*<8='p˒,4'[^#wv]nٚ~6N$p~~^j(#pC#pc)p:9%2Vp9SJZ4T*Uv%+O~Ύ:Wi ?G+lY>zqc!p)˵'v]ۋ%7qv'+Ez!t_!S1Gk%#8!q8PG)pK!p{=^{ٗ8'#G8X rT5P;=^.'\rT-&tϒi3h鶚K\ŕq:^8%7M1gRO :+TCٗ3iZ}8!q8QS*[n"NQ;}٬iH:8+uݗ?AL/<8=7K}Hj8/89.]M`:˝+nn :x9(J4"pKrjϱf&dt_qPrILkx厛>>G? :? C8sx87 .'ܹI.p}}GNm_<OC8w&ۑ]F/t"V.p:y9םǞB8[NCk{%/ pMMpҜ!p/ps+.!( pH{ght5nj7*;}͋.!i2%-U4=ѽ,i e{ρIõ~C 86߄1g 7ZK?C8J8;\; -< p7'ͫړT~er!p!p!pA'H!p!p8y3C8@;OʨT8CC#p˒C8@+pC8@\\]=!5R8@8C8.+TqC8C2"p.'ܹI(p!pMh+p85_C8! RmGtΞᜁn8C8=#"y!p .;wPR9oC8oeDNJNI+ppB󩧞 !p%Eͻo^}#p!pIݵؗs8n$6^dC8k{p!p7=\;"p!p\FN[99C*pZ5=!p@q8}}ӟnEgy4"!p2wv z<97Tѧ|>8qC8D^*pBN8@ DBR!puY[y78CpKy|8nh8|8C>Sι7pm/7Ji!pu}/G<7p MIE97pӺ6&B8n=G|I88Cz:q> \$&i*!p=.'9  E%?%'Є!p|4  p.r3S!psiIy!p p8)%8C"p2Uj5C8.5;9 7ț: "tRJ C8~,b? p@ΎFѧ"sA8󼾻wr -@ ܉'V+3ڨ!p6 pܘ DDz-99kG/@TN1< b@8K9'7 3w00o* RZ>!p@nF#\y啟߼yM6>7} Š[7oٲ傿e]l_9cZNFJ7k]}G8Cz>ˈT1ȬEK 1331.K/G*[z{m>-8}@Ξ?}A'?'ᵯ}"yߎNFl4/8}1 C8.\ 2!p"<lr/ȚҭGoٳǏ_kwDJ'El C8K 8`,E܈}O<^+++wnݺLC!p p-p.'9iN圃\.{[ltN:gle~ ۔biH⾙ Cos˜smذo{yE[iwĉ־5tƿ뮻n:yIF$C8@_$ᜃTNFJn߾z]gO&#'m-MgH[GyH@&_9ZrAj'?|TNNOSaבmؔv<ɲ2]~R%?]]_hX7F@88)%8 ^۶lrxR􏋋hYђyϙ|yt;S>%MLҧMiTqWfY8i%S%M"1Q#v?>coMm''$p{h(N% FgDb(jh+Kj%}6mWc+RC , \*x3{jie 9[ ,X'LW[_"^ *0i Q]UHJ6v"p=x0Μ9TMkN埡Y{GDÊXGT#i Uqϲ^rބۄڍ٨,/͠@麡"~tivW۾vJc*M4\or=5] ~2M8nNkA%L{5o:UmTt_ĹfčZA,C8FFo'H mSdsѰ_>|ͫ LTn"p pFj;$3gaBb?\J΍lU}fN#p  `@90l[pѪbH.h{BWksfCN _!_7=?҃!pr= ]CZyLҊʵrwm$o8䄻M0VpQ p!p^""z~0d!L~.jC&V91|Y?si=lʀWJ`F8`:سT=\} ps*޴F(Ȁ_BC&õ=!p8|V>0ꕽ*xӋ*1)s#{cwߪI[rIx~ {[hBOyh pc#p/_eN8n:PKF*Fƴ"&-!V4/M;Vn(8}yߵn=,7~\B%D:#D$!pEom#I m.gWer~=++yR;MpKѵsC%m!Q*y͐K WFLkAD81vSInzMkZ-f]ҡSw1Aw(֯k-W?p,K^85r$2eU쳑$On[ŀ)cdۊr@?!p*)&S`}7iF iM )@qC8 #p]CCFD8 #-vDKЈPzaK`3"r.an +g3]Mo. W\knre?ݚ'?݇08D~E}`:@Q;,׎uMpq ܺT^z7 foa/Fz\ Y i{>x& :¢W̾`*鶶iTCuL%[!nX~KKtT~tэLjɖ`k{w/מ@.gZ px=$'u*qO⋝\ޥյ*L\;" p\ZWtѰ7=Xʦ1T;9 Y p\KrM!p!,j{׊dõ=!p@V%7 pA2Hu@[팮.oI _҆ pxmCzKRvWf9+.j48 #@^#`TimTVk2_3mƑɢM\|?8I@*2*p~Ky侥?Mi_I؛ E\ݒWdWLbޒMŅ&u˗#u?*脳A_k_D3;ŗ]/BCG{Cs۫_7qGDN[/qU'Fk}"勞^qrXtSWt4B\g;|>O4b !}l}UZR'dµ,C\ƒF#_dɫaZJhYhKhUJlǟ^ @,r ܧ!pܿ/Vd̓eykeBӺn'l3MJjy'K5} g=fڼ[?ׅ8@ 8 .Y(֏?[)e5z=ѓn :VBr& :_ewM]'\ ڢ67秊=yf'oz͛gE p0Q{j" șHO͎"jg<=0]F$LI5g?vN'-#h4yr6gϘռFE7dRZylgA G͈)tU/O4O}="4"d6Y i+.2$Z:Me+6jnNӁz]ƒ\ p@H8'oʏw'=*IF▵"'k.VDԴ֧iM.{Y2U[G p}kxN%ITZn?}s}FΏMGFJ1˭ۏC#|kh<jB{8~MqҴګte pN/מ |(uNH>[)ҦJګUrfQ$gbuVNu`0HI8DoXJ *V*Gqԋv/2HU3q۝h9̴I b@8N%=1WRDlXR34CJ3O+qq42'ےmE |Yn(.~IJ,֢Il|ش !pɸ (sD(B7d)o=]+Y3yBhrUL~K[{ V ~t݊Nfy}ُ-gߨ/ p\ % 1 RrL%Z*:TwjEtb pyj-_ "QrHڤ91t*p1 杘j4ڊ%]\y>%BǤ~?`k/>k1.Egͩ +yNi m Hu"p%;xҴlL4[ݿ0B}5eUL_ӄ`G2&CElM8y:?:'1{~f+ !pU#ju#ꁁeoz$팋pi$oeJ&4[>4ɵ-?biHFMAI71|-xK5@:,l3Utb4I!& p5aOqvZd7qkJJV?h7 >.޼Ri[ެIDͭR;"LCH`puN;)iSMC/[\̀B׶|ݦFBQ=]&TP/0x봫ڍ:uH8_hqDGʶ{T.3f _i^nG>ډ b%UY22M뚪 ٮz}::O+Ifa{j?_ه~^ !p0T\(b'pzz>|R3 0Xay9SJZ%R:aD"pON#zf[XvL6]ͯw7.^\ oFv҄$I%WnIQ? $z%BBbASw&Y ^n'q~t7oƔG}tpM YHiz3{w#pzMvK=~H^ezopnuΛy @I[:r[ v8?YlWgж6ӆEF`0I8'_NuМqOC畴ļyjKbed6nW^[6Yy.|v"76: Ok'\IT V8$È!p0D1M1 BR^e.N~bM5P՝-[>k (SkQ8JfE%]].onFnjqi8mF$e!Tl׍:3#78mfИ^"%/WrQ ̳ͯ"pWiCҬ ^TfXJ ;D"K''"[}N viWN89I7o_ p7eyC`JGp uy]0rVItZ=$Q&lnޭS67om5 FmE3h\#p7ßo?zAo$y:\/oRm7'_e7Nk-SԨٶOW4Ӌ^@z"^*p!pϧrd@Ϧe +.!p%`G|٬2b.=8C`*k%HR?=Ȱ*# p&&[\i|hٸ}pQ7< KM3kn%ifeVs+*2O-*P!pW}*3׈@r7DElNmZC5MUdyM)mO_8`M FTD$7+y5:~mC8ۦSur\HMp~'lZFLK,BԡO-:ge04MZp5F+^\[k/_pڥ bѽ1ZRKrA \R=A]._wQI>! pᇌVTh fS[0^-Y#\gHe\-SCb(Ҥ5-4 p\zurȪi(pu͆)VG|Yo{fl 'pYH6Nde4MQ$ FIW4Q8/Z{[#gG+FCj_9mJ@Cĩt "j2 >P!Sl:*U0.JRu=:;@%ai12`͇:1+(`Gq$^>j~j6Tt 5"78>pc+0xٲ#OYx}+r!Ls8@8ٰeȹ A!p0 pTY~ e+njޤh/+Y4B)O}l#f+es-俜K9 ^b.꼙em-ՆB'gJ}/󾧢"NEWaQmO@8 [:n~/mTU'V4-}?w1f%JDI܂}rZU.H p(J[ɫP7Fl5-YjxߣNO %\MHa@8)._54ڧ%/JsM߹$R\U/ -m.e2=+RX=n0h ̜rk.ЯnMG4& N5Iӯ|'ܢ,8`XXz6Ħ_˙A%AOt|zW5uui~\L/@8Q \tL@lsZ[i޴Jq9,|@8 \3v) px0 8@@^BS$q$p;D`d<8IQSi3IENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/_static/images/rpc-arch.svg0000664000175000017500000004404400000000000021750 0ustar00zuulzuul00000000000000 Page-1 Box.8 Compute Compute Box.2 Volume Storage VolumeStorage Box Auth Manager Auth Manager Box.4 Cloud Controller CloudController Box.3 API Server API Server Box.6 Object Store ObjectStore Box.7 Node Controller NodeController Dynamic connector Dynamic connector.11 Dynamic connector.12 http http Circle Nova-Manage Nova-Manage Circle.15 Euca2ools Euca2ools Dynamic connector.16 Dynamic connector.17 Sheet.15 Project User Role Network VPN ProjectUserRoleNetworkVPN Sheet.16 VM instance Security group Volume Snapshot VM image IP address... VM instanceSecurity groupVolumeSnapshotVM imageIP addressSSH keyAvailability zone Box.20 Network Controller Network Controller Box.5 Storage Controller Storage Controller Dot & arrow Dot & arrow.14 Dynamic connector.13 Sheet.22 AMQP AMQP Sheet.23 AMQP AMQP Sheet.24 AMQP AMQP Sheet.25 REST REST Sheet.26 local method local method Sheet.27 local method local method Sheet.28 local method local method ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/_static/images/rpc-flow-1.png0000664000175000017500000012002600000000000022120 0ustar00zuulzuul00000000000000PNG  IHDRYiásRGBgAMA a cHRMz&u0`:pQ< pHYs&?IDATx^ ?pEPWJHb"nqGçDTB *"ʧĨ QED%cHL&1Im=3=<̽oWw_SU P*@T P*@T P*@T P*@T P*@T P*@T g}N۷oFm3Q̶xn+[)G!{84T P* mm}馛}|?1r;Z5e @v7Q^=ˇy7h555QCe\< *@PdžnD1WKW>  <6o~;wf 7VzMxT P*@@eP-|166P zٹ|Դv5T P*@@;v\cF Zllllh+WG;v9ϝ ύ P*@Ȏ=6d?~`6666.ḩb鮭1;kJT MMѥ-ו!llllͷM4bf |T dCF6Gh6666.]RU&]Ґ-gWebӿoZ8+C[m:.6666 r;O|VUOl ^,2fMNj0NȓC1X.&7S| _ ŹeI&fT P*Z'4<h}Fw33~*+@e T P*V7^8e N"iRmQ`.yT T@۶>XtE$FKoY=@lllnnMuhYLړrdTfW?}4m0 L9[mFKjXT P*@,W @dmQ=aKT P hhM'mmQ=]u.%?T $@> CғњGT `llllDlϮ=OQ1eOZK.grDe*@@h磾O`Nx# jyE!F"ˠT  `u ( kdlvEl F@> `"md ,V8z`x4*@@F =2DyxjX^ u{(A@ 6666i :u9N9 Ϩ^DQ6;eP*P4r)DѱKߵ[=աCL뮻s#)_9/QF5L= N/`25휅8 %YW SF$}Pkk/]w\>ǏWs{E]䔿K.jΜ9Ny0uSܦ͜g5֩'` N 3Q*@Z+)Pf>B@K&+ ` X> U(@e66H ` ^lp)JoXa&&I,6,` l޻5uVmm 6@%ֹK |`vW>T dBD פ==<lkXl&z*I%`x$*@r(@"mKQũ`Ū\zj~΢k>;>v!euh%&G lb}|/-_#a(@"mKc'Z9`[n%!X Ib/e=8B)r8hօa>C9U&_Y"GM{QxoXlnXl;ĄG+)i3g{ `3FG\_LwNG9KYiC u A CJ,oll,Pk`y[=a/+y&b?nf7JVs`zX,(˶g/׏KB`c!OSS oL}77*E9/P AN{@lj/ +R᥽aONNXl̨O)2Eaq)@KYK7y x Xv(edzYhzQj^as"'Ӌm⽕j6{" `pX*@GD׎e$K8'B&$N,!L4<#9ݳiEVU"*ٸSz# ɉ‰qd\ d ` 97pl,@ R:N{B'b+ rK[!¶E(|}ӌ~YS T " m/ 66H `F&²(1+[ <6USF` Kا=\XP: .ix^,>TV!mvnցrecK Ј1kk7m#*eZQ&ʐ|?VQlJ#̱ $0Ɉ NW>y|',[x/ {Y7yX7 4'X*W9A@,ʇAl/^&pF{PyL$ vɉ`vǝv"U_W>װ?`5g&&2[[ "l(pP~0><sHX,; vQCd=@Գ>I! !NJLµ|靧1B\h.8[Bm20dX.~/VC&sL@_^?")OnMuܹsziV%c5l Xly`^X+}b?٬+Kk/_'Qs6b{YbЉ2n:_f5Gƽ^BlN |f9~^W_Z.L 0H%zt.%p}?|RZ`4hP)OϭVSΈ^Y̋PO,Ao"jVg_y]_{냊 Wypd?˟^K^foCHfyvR/y>\]|W(,-7So}OX)C`cMyx] JPڵkWuM7zl{U޽K eYVWГK%֧e,<GVB!M@yڰ}k2-+Ʒ*;h&c.s\,ݲu+6N!,:KSz_:tu/{7Ϩ,k3:6g!<6 J^Q `voB_%E߉Gɱfn VR|ئ7ooг> S]` -mn=hQ8W,#کS'u 7իWGLݾ8~5$IL]u&` 9E?,Ҍv9cccc`mlgYs|,l[ظDf]}!`kj@ݙ@Xޠ8#F7xC^5Gj|숑VYr,Ko2E4+<ƕ:GOP9Sc3ks=!K؄*!:ֆ ^Wm~{Yj˱#fh8fxxe^!,:d,s;ҋ/=Xc|lguf jKKZyd`Ye#|a2k^ g dR&.Z֞1uAqƈVhpFaÖ EA5 ` Q.eCYW6lO'xBׯ@` oT;w.,r|l4,_NMMMS'L㐠8`j6o|7~CB6M<[>Kyr2eNuvkypw&e(?ͺ0-028i9,h}G1z~59c&o=8;Z $` X,Ee <럸\uH6QQ9@Lo@10ȋQvC2E7)PjzPy emB|^f} ,Va"(,G?zմin<@Ϫ?uMX?~SuMx,˞8J0 pg{FB|@s9$m(6Ӌ[ 4weJXt%- 8f5!=>SG/H\pBˠ6c)t^NH]pZh0aѣ6lZjU]!=sLէOj)Y, ` 6a_`ˁOMϪYF_ޠyoxXMFJ%vvi'/aaE(gl_ji9ۗS%وfmƎ>裺qƵ{ҩUz\QD%`s֓t6nJքz,f*L!)komӠR]v)o7=ǥL S0zY_ kN%X?)i{+;6'NrYM(A:czV2i V~McA~G{,pPE<&@+chM>+p6]l6M+B,8_Bul^}ݞVZt:=k_׺_޹Ոd٢l{k`c ?|?8qBh&7L m*Ui}>W{fcmw rqb@(<+~V&C۱ iXl-RvK{G=?\}]۶׿ZmV\,XShNX l~(seHHr%؉[US$Ne#ٳgP_)Mjv*+xq;?\T|I]ɠDXg紟 6fVZGuO\+C3^a3Vj>GGqSj-(XLd{LBًLdzq,n֘{Kl{'ڴU+Z1;0v[O<PI,E6fgHu FDѮ(^BӜI[/azZ,,$N2iI1&b(Ij'OP͜;B"c`1h=uu oxٳ:KCM:U}駉>LV?O?.Rl (le=f[Y2 Nv{>)[`3i륇x4.vj,.e`ᕑL)3 與՜^F.Ky8n~XoȌwy%胔rP1Ly*q#ۥI]r%&q1B,X,f&~ bR(f ?k^Ń>H `+[l}bAUk5x| )clSk6h S4@Sk ԬԼhaRX3,Nt CSR͚+[miُ[sT@dW]oT2lX`1 6mxa]ӏ5vpCue)BZz>}ݻ:՛oQ%_y['pLM'zȞQ=.oGNGc+@@v4b,jf?26.2`Y6PȎ! p^s5j 6 v9 ;n8oJX?O>PW]uٳ3fn,B}MqG^x {I 7<Փ|؉nYróJasGʳ}69?֩&:L `M`v\xmLM,Ʊ9 8R+m'zrFS%mYaEK#;!wɘ+Iخ`-ZcȧB|g-,oHou6 @f><.k{c]|X'S$39* `fyϣv"9x6lvI9! p9?mzBWP R gV:&w}j;=&MZ}քZl^HJ]ClcKmaQB<^rG km7l!xTk{zD0Dy_]#bRW=.5%wXFtjTsϩoQ=%,}: /r `}% 7:l0Ld/\O>K%BI7ݦ `ǃYW h.Ab;|E8&62W`su9~2mSH\xM]#t11BI{uPA,p}ꩧu1qSW\qE}#lP t uh:I{EݫJ8!IB o~ҞY<[wʖ !~]wݥ0$,cilKՏVڴ|=Iױǝ:q&K-7U+-3 ^Z$OUM+9~vjY!ZR9x~ͬ?fEVj`]ղN=Ƿf`1#0f6w3br0h E~|W_uޝ[~s`TuI{@8kv;YDL{:PCnnԬY2q輪.eK,\PaJXĉ` ̩ |ro+&YXշ~[]ry-e= u 7WꤣVWn-z"\x^7*[D8`zi>a귖jr^(/x~/d+o_Wc`csqw眣~zjwb‹uy]?6xaO<(;h5iXz<_l /&r lNeoNBXmN@"+0o9H"#)a~+!dIʖr\lC9&dC sT'z`. ZWj/j,ׄ}/}%@I~!P`'( aHXJ `S٧  cõSl6 T  d`1k5& ʨ: 96hmT*`cK 1ʕ+ը#=//'NPC[MK:~D߫N;[>yd V`|Me,,a  P/ꕵ6@fT d@,`PBY]֞؄Jke,$-C1#>*`c^ucZW{3'ք׫/l-{ԀexPuܸq(V1v"v;o,6.ǡ%`'% d`-&0(o VJPl{Rs0m/ z'[Y1<:ýz=X@묣~{UWy XK8&f ɘAy޼y^3^r= ɢ v@dwM 41@ DK:hq\mu_y&lnze+ȏ0`^w9AXWgl.2KXQu`_"bҦFH0†ixⷻᄏٓĽvU=kR`Xjc`W ^I[Hx)ݧErA2> `V,Ac`1v/`++kڳ'BmsgKXV:D&:L `T6,KƸժ !~%/:u^a}hu\U.S/f̘+ 0+/y`A)\/sZ½D6U];Lu : lX,[-RE0rh=t=>o_o&cdXٰc1GP \Cu\"D&:L `T6,KƐ,wq5g!;ݛmsgϮ'N&r_x)`Lt]j ܧ5$:X'QPF%` _c[;"P8eo A`u]^X,V\.o̘1jСjN.`"zT\>`l"<6,K0F :먣:J=c>sjᄏ6hyI{_&zgJK<}-D,Bh"ZTL>׏X'K:2m׮/G1eOQ9j="K%Vkxbc?d{b %KZ,ւuI#>[ >'O5ar.ӳ uM`v?lzEkn׎dFMMMSӭ}IN & 6 #,c.Z#`[&kvt-UW d{s56hmVs;kޚ=&[nEmmCLX3&lz lzE <Յd%`Cy?s`a'% DFAmQ Ɣ! pj0%m2O VсN:Cm.%Wbvߠ5Yz"P=O#}xc|kBMu5^H[/Ν;7t"`ueOMj{Z\"φn赕]o K6m8CYaA,֩s< I\JuW2JgDlo'~gtJ~V<4uV؇5 n>NQ=PݻP.D1%go ]tS~9s>vs᭭@~d-lgd$FMRmh d(Hz?~Ho\,:F=gyt7zc`` 7n,`#|Pa2(oϽD"R##a$x'&kJ`wd$FMRmh \bL:В7vM7U^{4N_.אq~4G?<$XeKM!`-ZW`jP>lF=K'S{>%; 1t\zm"4rԩjžiҤIb9z)-:vTM Ξ=[U>1mOd;HH{ ` 삣TKmma)9d}hS`U,ŀF$@$BnMB{O~ /԰ ̘FpҩgK\yN%`Gb'% `Sr!rR l IpI(,vv:rHhѢRz衇=T] E%lw@: dVXlj;O=JXO?* >LX;s/Ǟc|~H%`jBDZ9QuWp+ !:q-c3KYi/&Fq ΪVTKM=ar]wۣ| kΟ?K;m4/r-jРAxj< ٝxkqݻW/cܔ' vE͙3ǩh0uS{0ԠO;UhW=E[|X~;C(„G7 ӳ{SRVJ%AXlf ALԱc|jƌva "ZtjQk2si4(=HQZ $bx_x,`+Ʒa/+y6p N 3Q+@%f .Tc۴i*+\GH.Xl6v V?Pe,[Z?~fr#QH `3҉b#:vUzt^&))!m!` Y-[%xXl߬9TK4aR=[n{52}.iˤC%`sh]?%X{<+æU6'r2&[yG\O\=oAKHm\C%` ɛihXCXթGG="VEd D/PI֐E>7'63<#9ՠ)WK%`Xl򖂦%I7H)X ե@<+$Q,'t,"ٳrb˕W uyh*!غK:O'Ym XlV ~av%_f! u<+[McOTMiއ[;/C6 x|+p>=+5&6p ` ~kyeWk& i:6Qq 6.?)Q$%W"hiDv%ʎeep!` a˵WvmX+∣쁕tE޻͐{ĥBǥ.-X,@mK~۶m} yy>6ɲjT[ܝ TP^玘gT>)l6V}s٩.Cs?^)/2.]9s8 a6Ե}GB`szE]̉ -ey3؟`k`{myfW^:"최cfdĒZ41/]wA 1k-t~_tHtqM'l&|1t`R(ס [XdQXg#} l=`kR;6U˼؄'ݢ7mC؆e;;%lΩeCg/vcer,f~۰eoX,n..f9ucP-ͲN;_ٯ^(H6#z߾aa ?,]gu`hhKCP=IpTlI}&W= s`/6;jO$[l Ap.+0[I󵽺c;ز]gMn ݐl uu ZkOkz!c 3ܳ(9r|Am/7kn󋜲˗')yUR>g-l\~R6%"'@ ?_(;?fg 5y3T:U̐S@]FGǬ ?`K^U RtGlkQ F*aQ@K&ZONvq ն4\sáܾ.לyJ` JQ n zI6/4 `!*/ZK.04엲Ug_AHC4\ԁh1;ZVe9:yclϘ҉TW4e˸]s*3#|MPm28xq,bs68$`iģ@oB' z,x`74J$,4$sOTlB~@3ʸF|WX 5={fDž}e ~G2UfD.!ɘYׄF\(KB]rm85˖I!eH'.0.,Vf/tRGݾ9I$L`^fX˯ I_]Ji)ZJ`LrQ6fXVū߄Dk8ZZ~%dva6K9fÝέs˭Ejau~E9] F=>7:(:Ko3%%cP+"\p#{d#P)@%:r"`g;$ű`\Tnrbx[t%`sfOz:Xbu\1Hok.@~${"'&Ƕqc>R&kdt=Fܟ C,67KJuZ` 032gaBSCŒT ` XF#$B[k<}78Yɤ`ՇP*X,F@a_ux%.x<<*X,F@#` ʤ`3uϲTn ` 9K$݁xŞL#ueM%֭O Xd@ `[,f11@YTZ>hnO?T&yYK=uz^$Z8@IMX4st#G}~_%ݡO?vX`;wD͙3'0~j뭷'eW577;tM"ϥa}3w ` Q,l[ظDf]Ϊj?_*Ph!`Kr xw}4 3X{q^Sw+0ͷU;찓S{mvmwoS>#Lw=(+A%F htqnd2`љG)26X4bsf}#7ܷ9N: vxlXR&'6zV*V xl@%`nlf"fPk~cNMw/W3$Ya%`i2 0WYGʳё~-y9),i1YtuL1ј \j:9ԒqˀϢma)9d}hS|@6wQq/}&q\sj~͸K6/\t`MR`ibP /%.yd:+!1CMϤ{Z ͨ^:q@cO0Y;Y=H$ʶL6U~qQ>c}<灜,F>\ruvꀲ$ïiX,*@^JUIxuV4`Ő4biR?㽖o1 ^-QmUvh]Q"yt%4CʳNj#(7^W.+k2K=L Ica_9>`O֖{qKϢ䥔]5^]WK-9-F.O04=koڍ 6RX{oԣ\x1 6_Au[!ߘSR=96/H.HƫC%`Mxq= ^Q3$_/h3Q,3x|À[,7vHzuʥR¤;ӝr2VQ9xA;#` oXۀ@WՄVs `즷Ƌ0»k$n[ףv'<X'[j5)'3Q`K%>ZDȃY,#$i$X" !MViV_ tۃtl}7@lS(@MϵCM&@ hh*xƷt\/"ꫯzU3g]ۭ[w5GAW `[_@p1pq,X6C4Yg^{5oWLSNU{.l>+U˸ΛqKϢ䥔]5^]WKJhg`Xu=|b\a;*][nXa fRby)eW$e$!` /qypX.=0&`b؃7!I'Kw}j*u:@NTeRjrɌTjL]WKqJe@9JfS 0}Yzʕ+= 6C,„;d&*y)XDs$*wF%^2/UY3_2Jչ&%p=蠃ŋ{epbJ(ǁ>qyQdK Թ8d&*y)XDs4Mkt8s^6<":L `T6=ȠqnC]:aʤwkA#`ix^n{`:u}<Л?[sN뙐Ȝ`LQLe3Pbʮ4:,KeHi0駟V .T\sѣ+\5g;73$c)L<_,Z!:L `T6؅{vS45Ël,4kQi<=묳fmƎ>裺yۯݺuWnӖ)֩ `4х.DTtmrj13`#1uE.Ԋ+26[mm 5m>sjܹꨣb5k5UmŸ]}WfNzCu#Xm`tav [N-L+]h'zЪf;k5 ^-YD=jwVV/_Rt7頃Ju#'uFf(Ѡ}KuenZL%!<$ڞra ,H0Bq6 /Zl0a|~3[o?n kՎ=ZuС=ղ9Ku"ا Ff(Ѡqz`aFNhY0NM,ߝ gXԘeԅ#Gag=20uOv;1/gaPIjX@ !~ǔ,b|,@kbq7|Svi%mll"MDu2Ψ F NS[<Zn6D'LQm߷袋7tQC 'ekN ><0_|(wwT{vʋaZ_S~8~66P `^_1V3g_Z>CJ ۩&jk#+@` l[}L؂XBSAFcgm{ɓ'do[#SO;ep jСNy)L]s"to7tF>|~3^O4D+WT/~;?p[5=j/=~{iso` n`f"'zސr1 _0PrĕXxQキj?MJ' W_}kVu]v'|RkظKKX~[ptd6786{BLK%L9sǏ~aQb<[n^L:T&y"ڟeMtZiYF8eol?` Q֞|VXlZcZO:HuzyW}wۭbx`o7… ՠAڣh"駟ƞx'|R͘1|#`26߷'K'` )5{ yT]!K\s52ط~[&@;{ァ>0ٳլYc.K- 5סɞyG;)PCc;Sc1[cShhvN1]w.zj/+TݺuSƍSΨqߘkV ) 0ŘvKH\pA Vy7|qEhzTK,=9PnB h$NmUuNKXz&"ƼV DOzիzƪV>s\.>uXXlKCm ({x^ u<*#bO?_(õ^(z`遭g+{`yu 7Uf!~)Z|~ k9XLGycbw_o,_sp>Zdzg }w|YfLsFl;L!Ee3fFKI٢# osuJSƍᑕ Dxb<|)o.6K(a|4郺`c Klj<}SCetnf_/-YX\=?ϥY{F>80A(!/"^2:ۭ O3¾])+v[+մcAasǩ<lrI=THm-7xL rcFv#첪ڇmõ^(z`遭g㱃=X,fuZUӞX٦1|x(|wG,^dL淀+f2Lflٻ ۙXۙ;4ɨ?ؾvWÖKM>Ǒ LxEZm6 n{ W\qECѬSsL,=@%s=Տ=2:V&x /k^577:,^]V,b -f6&/;EulM{`ќ? U28[8kL`0Űe~, $lk޸>lN |Zv{ pXz`xl7,bM^xA-[[FcFbxa%wNo&sXYFǜ * [wL t"\?BXn{Υ'ծE@?TVV6Ӌ+b3c;I^8`kki߻kߜ.+y!9'qj}`遥<sO5.IJp[l `NА -JmW[4̚:^D, IO$? %)tOʀVs.:>qi5oݶ>B}skghK߄hbtv=MiF='% 4znB6?G)iڕP▥qZ2dp l|XlF}[jNag)lg>Fcam G(cB죈j8>V{m{oo~*/#J'tTD: lqEt&͂ծcLضE$oYuZ]{e>,FkBME7 m4zc^g=:va)ST{n OcoyeBrwV#< >"D\`1YӱǝzO']ho:c8#0-}K.JqXLi̩v( uYF{TwrK 53ACpe{`Q@<*D5 %KI5V@cnO}Z/E慓& 8&hv]A G՞W=#ӽf7\P0!$f͊ev{ Hf~@_xD;oك%Nb Vxִ` +Mg7X؟~a?w`+٥vbNJV&?=Ys~Z34ﭔyCg־pӸae4es;i_]+C'`[Ծ}o pBi έ^y$NǏW[n:g!AŋYzꙇpS6gĉLĘ燂XXx\`ًὝ={5k1cׯ"%e=Qwbf' v 0'rǹXN䚑vx1% RM\,l[ظDL`1 )7,b? ! ~RDʉ` )~P b,{+.\P=3 PxUWJ+c#9D;K0]OtO:نyF3&rI!xW A@ .kqgf9@+ٹ)!ftvmL /EB[b$D0Wj%DA7Frùx`˅d70|(țrӘgKm,!K,=yqԱ%/G=iU<2:AK4^1 0 5L%` IXaksTMY`a,6ٹoRzL6^JX\$Vbm+Ltxk"dr,}mLcr>oܲy RcӻE,=l<ݕSR bs9`L^vB,to[{cefG}v}`j!WQb} SnY\z`km?sT/qޠ3\።z;3xL픩37t->{챪I-K&[`m]tZ+a'qbX~*}4I8U ٕhC`ـل%"obs GtɜX %M}o V2$|юAWܟK,=9pɘ+Ucz~Ue=Sp^Y%whƛ]WmoW]Lb|]?c0K 3 lP Dl#eiQȋy[f16㶷 ڷu/QנZ`W6yU78O tKlP`oi-[2'yj;vl(y晪^J:dNgFe|9ڸ5XFyD%`T>6V6` --Sy(FXlLjHcRXV[mtRc~U 0/h;Hy=M\66dLgIl3s%tgX,{G` !fq&oWuةw=$O.aHWRƬY<;wnD͓%`uoܐ S+݇Cc3iFɢبˋרgb|l3֎x8u^;b`u]>j&Ϙ1A}rbKUsF3hYg@!?yRKrF!u`(z1>vq7V]wK$N:g)ƾXC m5'k2s!` R9,K%`+}A]!Cr bCwqGջwoή9uF}矌UYS|w dqw} kVJ&b6>#S}Ob3gz Om5IVÄ|X'CK%:*D%` S#FrJ?>'jOG8E5յl׼g ?['冩kpNUNxF}=&tС~xb$o;,Vnu_zKM%`ng"` بdoŋj|j.z=.:Z_kƮu"$q+m` n,td3S/]AVKmiC{C*8q-Ԥ̬zz2Y3/]7[nnP-*kj&?b9t饗M7ݴt!8ݰϣ':Y Xl 6.?)QYףYؔU0! [lYXMs`u˞y?X҆'^i3g{7H>}7cq<X$~_]v٥t c\n֩'`XFL)@- 2:ɵlpU%C{;`asakU۶m=\GzQGy橅 iӦyr K`r7N;+n^4p%:؂۷)9"عbʦ' sX~e6ئ6Šk{I%8K.D-X`t{~SOm5#/X'"R}_Wn)CN-2@Tۨa0eR5nȲ L`4Cf# a=0.CAn P/c6'.ƴBlp)yin-UW db u7~ f8מ={= m|IՍdD td9[-LlN)@c5V`RP&y„F:3hWO F H3 ]2R/9:<:u2^q<(Ko1ҠvS%w-^{ ϟ?_M:X#M4Il06W N}xIdtd994bZ~r6g*ht ~0} 7r[ hխE& ˱2BM/pjlݬ,o3KzmڴQ'|2ed;cK\gϘqlyV P:(@5h0cʱ /H.HƫCm} si)7=W*2Cih+PNJ]q%pXewv1,rΙ]㵋 `3n)YT ` BLa2Қ3%']p6P6@͸gQRʮ/H.HƫC%~F4o+Xs-3)'YW`^NuuN9 +@^ ( ʝ` ,Bl1:Hc`)wr,μf :Ru9N9 +@^ (CnHSX'3$39*@u*lMIʵX,֘6mm 6@uN21XG dQ,Ke`HUu{t*(#:GX'QPF*5Jeѓ666&G:`swIzBغσSx ` d `5P:ؔ]> Z: ~P sF=L0a&z-c&/m嫔:[vx)]ʽΩ6$g7kac P:YeYi<\!N&F/Giװ*Q60 P*v +cy'ּAmIaʼnn)=?tw'a'e|Mx8'*uR''`e ^*6 ]*Hȧ;%xZNOx-\͹s*{~cebxi!T7X/a_%| r̝uǐmm,)rpa_M;2D&v"ʅ}o!#яaP*PAj7\M `0@,XM煰RLXuugoːh9[xd v9AфvDo=&'8,d3IW@5+q02nլ<0dxQ y@`{zz`ۇKe``` `g DPcQS  ՜c QeWʰ!:SgT TP/A`|$ DEB 1`"5Z荠Gmmmm[ ,%RPwt4Zֶmq~\jjp#rOT P)9(jcxmmm5asFgc;)ed ъ%iU&]1<|t.y&0cԕֻ ء7nTo12~Z(cN~cBfI0;1Ca=rN86cHPWoy|f⑮5 s| 6IBRh]0#QNj,5a9;hf#n3Gō$J(׬ B5Ȣ79K Ģa% It9{$@&x@E{醲r^ԡ>2q<_<6aĊ l/}>(+M}ӡ Ûd,Va^Sl͡t0ZU)KhfJ'8 85!֮jY lOL^s"ۀx#]`v:c[KYI{ҩ>С=HPA] V+`2ZE%I|J]cx6OaS˛3, רa-;fWUv_2txzA{v",&J7ؠßuXUĝR\$gGuŎ =,;,E 3F}ze(f<8Dx-iFFN%F+I~KC&bpxH(`M[ۨT_$V }f>^|^J(5GnfWZ?M{fE7جH;E.Cx(@ `Qaإ=-"uOM;a|6l70m\q,F7AkB,SA y`f|['Vn80BM&aĵ< ޱc/ḩi1X| ++@~~]vd+Z< M"=S#foa`gͺӨ vʝK8Gi㧍YI[hwUNnP;ҩc yÌY{ 9aT9?wh⍓E3?&%%@mQ;l5ss6ӛ +6XIJy~}.1:[5+ǐ}Źoh2=4bsbxX&+_2y Y$jM1dB͘YWd\1qƝ:=rɧsL,'|B8hI8cH \s?cI1ܜ0Eډw]רQ f 7n@y[d{tÔ)y,n^?Tmvxd̕0l#7@=u#?2k:\-|1{| =T> 4gd JԞ v;V Gyrؖ@,uَ`˹͖lf8/V43Vǽ³ A`ޤaD #j˱7IvFzZWa΃ym^ml\Z!?S}zcD 2\кm.GƳ LaQdHoaʌ+ohŸ(˥T D@S 8E'.`, P*P'z6:UY*VsfcEwC[1ݧ {ъ^1GT P*@Q L;X!axLbn$?8TǢT P*@T P*@T P*@T P*@T P*@T P*@T P*@T P*@T P*@T P*@fĬXi\ẏSODT P*@@)%,uy֟eO ([9 ,>AL,K1d|rJ'\ʋݨT P*@FF@+jKj!ؕM]SΥ`fQOW~f+=ʍ. o, P*@T P0=Â7@(ҮJ+N~y ƒP,M^m 6]rKuQyT P*@ȼBvb]O$EpXxŻX),X~}P%@#1`d*@T P*@@%d Wp B~h,N~gy`_6 8Ts8=h@.QGW(:+?`ECeV5BrQ~pHnT P*@T +P `l(ȘN'߆#6!$^H? 3cL˘ScHMhϻ̺PNyhtuL5.Wwk?Zü[`%4Zݢ{1AT P*@T P_,\+J .M57˒ c?+k1pi?J1 jX_{WlzM-5?~!2|u2CŇ%T P*@TYs s< RBfz<()Pm' c6t1ߣXYmmv9WW>B)c'Ow:.۠\Q*@T P*@JKRUni9L`]ARH3+^a36 m ٨6hvg?5V4)w [ m% njT P*@T Psd9,V/i{* r". e{/XZ a~\",`yZG8f {рl5CT P*@(M$ڢ شZ[ !A;JwY:*E9rΑ۩T P*@ Txx[.&,03VF偅h8o$ X5k^.;78@2O /Pgc%5F@ J=b3]onkkyubM P*@T P(UZRԘ=gSpr8rk"O]}ʕe ǖ8 Page-1 Rounded rectangle ATM switch name: control_exchange (type: topic) Sheet.3 Sheet.4 Sheet.5 Sheet.6 Sheet.7 Sheet.8 name: control_exchange(type: topic) Sheet.9 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.17 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.25 Sheet.26 key: topic key: topic Sheet.27 key: topic.host key: topic.host Sheet.28 Rectangle Topic Consumer Topic Consumer Rectangle.30 Topic Consumer Topic Consumer Sheet.31 Sheet.32 Sheet.33 Rectangle.34 Rectangle.35 Direct Publisher DirectPublisher Sheet.36 Worker (e.g. compute) Worker(e.g. compute) ATM switch.37 name: msg_id (type: direct) Sheet.38 Sheet.39 Sheet.40 Sheet.41 Sheet.42 Sheet.43 name: msg_id(type: direct) Sheet.44 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.52 key: msg_id key: msg_id Sheet.53 Sheet.54 Rectangle.57 Rectangle.56 Direct Consumer DirectConsumer Sheet.57 Invoker (e.g. api) Invoker(e.g. api) Rectangle.55 Topic Publisher Topic Publisher Sheet.59 Sheet.60 Sheet.61 RabbitMQ Node RabbitMQ Node Sheet.62 Sheet.64 rpc.call (topic.host) rpc.call(topic.host) Sheet.63 Sheet.66 Sheet.67 Sheet.68 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/_static/images/rpc-flow-2.png0000664000175000017500000007367200000000000022137 0ustar00zuulzuul00000000000000PNG  IHDRWcsRGBgAMA a cHRMz&u0`:pQ< pHYs&?w#IDATx^ $E!CfoA@<("Ȣ0(=,.l31 #8 ( "*SdVFUeUETu]y9OƲj|PPPPPPPPPPPPPPPPPPPPPPPP`p ,v۩fϞ}Zkuc6 `fͺqbbҩ}+(((+ h=rbo1cpЋyq'.3o64l ?8 3>?%l-~~|xu{Ofzq(((c y|!/`eClF;X+ˆX, dfMܿWF c60~6pg*}xkfdۢQp@@@U9*oxv m݀GY:wqΜq-m3@@@3g/y٢ݿx^lmn)6xӥ8 h*~]ț?n<;6 ܳb-|ێfK] d̙kj_+v>_z /3w]6EV!T2o[[_7wW\3MiPJa^[džM45C`["MNmFܒݖoo]hGIPP 3"?&pU2^U̞=mHHADǵ)Ǖ|%l`«ޓ@.(0j 8@6 `M;P/`epD`q1@+6帒 `@u`q1@$`q8l`6p+[bK.B_>eWDw6K<;Ц`؁>r\ P Xe5b{>Q۬Yw޹6;Xtݍ6ڨX*n`ATz7lT7޸2lb酗<;AvM; } @ Š[3޶N*N9"+V&9sԦS#8XhQTZ%dMe˖Ew/tg}.Dh!f# =>D(xCPIͰOWl5 =>D(,3(:mk,a"gm"o@Woh!&l(Xvn`d*SNCB @& G |;X6fqP`XvPOA-XW0|l(XvZϞ`+5ڳE |i^m`siTΑأYRn!.ndV}_p@w0ox Ālˠ @ 0o9~vizɔbw(M`;NA>~i#y Mj`1j (Q?tC1 Q"# {{[+[AW`, _σ*|~jš{rקl,pѯ|-X6نp8{=-|t7cGTZ}ݏC0T!v8ݰ*HP<,uX #rm 0Hi*ES} VitLii=,<]y8*$}g6ƶX4}.Xi9vݔS|'6nU;@`w4zv  6]lٸ:`:;zQ) Kl0u+.UX%iS6_{hR3,X`'EءLg,s[ه5xVj ֑lj4UZyO|譥O-i >5ԒO VJE`>Nv@!r]/G寫y]\+SVOՌ,6DN,XF 2gDOڎcT 5gt`l`V`ʩ}3U.#f*^XEPe~MGbYlp/KZ >`dׇK< [_:]" l]\w;gyYl1yl)lhXucRk)  *{3ܾmU* .mvetF`#}#P  o,60@`a1ljeC4f-a @gm0|Ɇy_Y__Ua=XϏ8relv8߉-i މ(#ƪO]NX`bor;Ϻ*VXQ/̙SN 8bѢEQihM6)-[^u zCGߨ}*)\ݡGXk?y+vgT+Cl6/n[~<#?O[m3=度iS66v"O}vD`31 ;>u=;`؁l҆d6&NYD`g}WݾG֛= y}F.^Hw^*kƆz^5 9`}ē Mz+ !ύ!{ݒ73fhu=jm 1֡`N1u뢬ZNb\ډޯ둼Z7Thk]8QH$j `OVTl5~Xv /)nA \\sbM7?ˮX 2uescu[cWv*j==-|* ^}i+ՑM܂{s&ٲ`pb= XtMSN)f͚:xU' `XV, `Zu w/mU6JuȡizI|_]wÖS?:`&s-?ox+_9 4-GpL, ڄwv{n: ejaw(惮޹уtLinU_oDQ^_5-?-|[9²*d xfLquQl,.LaVVk;裋}{ŏ~MKt(:k;W\}]_LO lmY9&?un7Si xRZ㣎YJ ,|yj1;m:_u;SkJcmʧ!6>.2/7MDqmʍ\@FN̄]xw,?qvy9ldzK dI^u,H{&lD@A`؝X(4X] X]5A _@qܪ,~&Uy4 ;@PXo C w]3(~t?(N8UǪ;φn`XV)0[ 9}cfl_R׺[:=_wN5 8y= k9{a$?y[|[-Y5. -HK`ս7˲U.U9eY䶮 q-:\ Gݟ`XVAjۮ?_owvֳv߳[f~#i, ^6\VﶺI..mX$O!]+['[ [x;v$;IKZ~F\6`]x֬Pjos9f31,;j-iW2:;16&Mc,݊V۹#Jg+t g6ەIw[w>vu`;4l=j-r _ַg۴ϛ鷿t, ¶,Vhg ~Վ2q:ơ3[uuMOs4I6ѓ/6 qjIOwQp~s;NvVBu Ewu #29t v ^9`#ql{yu5,N??6Nôjqn*_~%}tܺ+?c!6Dޮue~[K^奁mMR3q`;؛okE.6+z;W}g:G, {ۺ6wtϷa=ٓ8 b|"1cF.vdIe][@VbӪ}86ŕFF&J`bwܾ/ ܟԧ7߼ws[v gS`o[o-.]W{zhh㪼ہ>;5fW݌>w:*>6t}0n~~T|h97Yy>bw.MnZ;׿6Eciv)lT&1nxcxWF{+/8*]e^9,ԁ`h`HiIoV݄} {&8뮻Z׼5nşgGyd_z.myyqt`_ [#f]M>[B\fH&%wp1wpM:Iu`6*@m6p0vHť`08䠃M]zqM&&JV3^[<.絢oiO<5.VewM9/n'ظT,+SB<*k]czΚv݇u}v `È߭8URe)Yzأr9:sTj{^Z?я[ne񖷼noۡlńoA,ASN| mM`؞V]dqf7Ty1]ݬdW`4~bw,ji:E`؎i'vbm`⁑Qn|[Zu+V(4_l%\2pXmFm`7Z1q`v~;wSq*]jbNj徏w1Vi3 ,>ZV3 c`-??}<i2gIfoWn۟r-a⧻gf`w~_nyI4ֺ[V{}?я ->/_PT>C5\Ӛ-,5՚_-|H`q¿wSC>lt3}KMȿk~gdK'm#n}®7tf# ft/v[-zscƦfaB(5X,˷*mlo`WU/V3^tgr$C'>Qlu!=_M^xÑG7Z\V]xӟ]vYV[{xxG_mV|K_j;ëM` 6on2`iy`Ѥˇ-O E[ [@:}mKj4$t7usbx zEakF9uM'`<a-[FcXΗ&)m=Z:lc^׬FH?m s=7`?ϊw-G>҂^7{ō7ؚL (E~tS}AD`}o+v;mfޝ-?uX]}n1񲚨hǝvx٘u`ΟJ1B:V5+sY8d}v!>؊r!Ů/fʕ+[}kl"cx=}_p*?|i]vݭ}㗶)ʩ:"׹گs۵_O۵_ ԔʢoS:uR2~,04- k`6b|MdZ |/6hbxn:[r@;Z6 `52:6LGfwON27*vi? MOj!7Dl}ۏ WL;QA F-iw4 @:fI+[WS׳4VVQY<5+_K n{ylt; FKE¾&=rɋݶ,mE'fo3`:viy:hV_vQIEF{ꢐݼC2wⷯ=-;8̍ύu[o-^W/]sU^VVbeV}of ifWu[3+2f{!ROG>kp((RTZT p-@Wf8.BoOgye SNFlT$QbwUGqr>9+M|x57UvBO}W<د녺oS,,;lE{f3gƝF`Ս8t" .lu"<#ٍMu'7`Bܐ558֚+/d ^ Bu{EOSVSN9xH O(:vZ˖-ku+^hQUXU:E^SX?~KwQHgPhm|G7?ܰp6lsvFlT$D6 t@OL|;GL$, d.jfb-7)JNU׬XM5]-RuוºunVd_6m/6[o?>;;wnZU)f~>ǭ 1U&cy i5]ֈ tnl8fVleyB>jRzXur:ֆ>sXո;Vڰe0)ϘtLdlWI?mMk9_\wKtI'x`5{Y)Q*žzVf$ֶ/(]}n<7tbDhS2k[+8 x ǜ*u-MdyҔN_WQ\g]O׳Q_>JdTMR nhHԲ9ro[D '|r曯 o^Ʀn]sܶk 2<lOy`&Nr`vmw `nbqد~n@ws7Io|cq}Cnp X߾Jt6VE[n&J6R+-IcqT;G"oݷ2/cݖ_x-) 2:ͷgfVa7Q ֻzm.== UV?(x`uYlf42`nߟ uS Xt~O8a.7u\<\o\lRnO&UeA0Vp&4QS]v#A㨕l~cٸ~\'25 ZgncnW]y{c]S\W`Ea7sT= 4r a57֕=5=96S+2NX=~Գ;I .ݒRU`$V0qT9yw6??:BtڧS0ky:}]YMj]>lpۉS^+_Jk)+~-=(~mwn|yrJCه)uh5~]_?k6lzl DIiTR'lt; FKEBWXBlQXQi`} yj_Z 뀳 >R^{)Y`m?]ϋ\4nWp@kb[dϫ9ն&X[sun8[ Q>Ѿ- k8'IPl"6X &(*Hֵp AuÞA6|بνVպoU]vokp*nU Vwviuk_Z+n,PPWU Ѯ%ڼ$Dj+dQbJԠ]Vk]` f{6;1لu`5A&qRv1JǻtT5Qԗ_k'ԧ*elu댓-l=?;J^6*`Җu 2,4е_E`mlٌ51P9k]٦b +n//|akYR?<ȖEQ_ᆱj&6ؠַ؛nmw AZ+Z{7^{m/}>WlF;HG]{Zcy]UohF|;M(P$Zw`fIlV&`R:jZ:LON4udPY-}Cn=\w`EJ7qcZ"׺I5לؽ]w^qnҪf0~'":GalK` LTomm&G`󵁑XE誺m&`עekhׅ؟MX{[ɚ`zs=*vw^zn?U]˺ھ<;ZGV3 ˶Tv酗T.ޠQ'|;M(kYl,Yv=m׭^"^Lbtl0[;TMU `b+UXiok.g} `5V٪m]Y@+zЋ^RVv1c8mW\}ݦz)l49 4 vr> ;y>~,"\=8j&}[u.Nq9裋]mXgDQU۠Wo^w->ymvs6s95|;M(kB8.J=l}[dAt>v[kVagQ?kt^wZ=uZ_lٲnÚIsλ4ŽM7%;Qxvp7WB))aS`؂lO(9^wuŖ믿J`8@aZ{v밺 enmM'yҾ7߼&W׿^̝95{~vk__r1Tw5Io}G݂rb"\NFYWO6>åNMBVllPޙZL t\7uccWz3ֈdKW@VݎMTvL{5״R\v~g5ɷ*`ϟl"uC9>4WݹM/^?);U|A]Sk]WnV@V/| {\p \WGث;j7 lZ1ȺkѮ- k`D|'`Bzg `o𗽬x/1~Ӌ\%X8)I&\vL{UW~{ M>Yp-6`"! 1 ;.`iMZFon*馛9._ܚj;5i-^vemӆy`Fl_ZݘΗֹ6ڨ5C0˺ASN| vhHblciG-e%aݠz;\xqq׷V2}+635U*iVmt\u39G AgU>+U w +l:uhע pW;-[ &}BFmCyM %m\D`_Yubu7I@v]7~Oo~X`떷{k2'U>E{]K`nD`Qqnɛ9s>N*e={[ D*2ΜnY^O?[O84/u?uMU/:4 >c\g`~O,t奁mMB0۝3K 4vaf![5<Κ՚xG|}nkwL `?϶Ƶv1nA`_M'yl .q>m lMQœ6U%o = `\N`sW]uUkv[^gZ `/B0/~>Gz6 2+<.J Mnʌն9e y.!S Fba:1c,[G]h X7qYkPsNum^}]mx`q콐7`N67xwxXY87am'a4[Mx FA>[o}&umOkNjcER;ݎ=BKLklq. cy4lQ\!Tk&rР튫+ g>Ph9q(*pXZ|WFoZvivQ]`t,5 ,&pZ؄+'â@;XM̤H⨣j-6E]T\q_nZgT0`X6C!"' ߼kEqWn*nFڟsF-6@%ՏRPu|`${Vk= 1C:3U,;Gw=*jYV阺+w?VaEzu[´lOB WNE`dN4jN;q{_~yk[ti{NfK/${L͙dyXkHYUn~b~}JouUzKc)w/`|Q*o,ls_Fmb#"߀O|-/~X'!؄+'âl6wy&`Rbm/vK̝;zYZ)7ڔ` |rlتqTE[ `n:n`X6`Um V0 fړa}' :0v, &dx۴ H ƥPFO`kK^V;m.X;[3'SPP`krlFS鸓Fb;GFnn;.^]uםUX6__̙36qE'&˖-۽SuZ?g4Nfp۩lm5XـN͝7w{{'C?v|p_4>˝``ky׊kmQo^xI60RDKx\qK_(S"1}ф^Ko`pnPAV GyvAk9. r)lQ[\yُ&J> ҅XA} ]k`O]sT\'| ,׺wڅ`8Ku $N` j0#@V [#$cepgt^x*?k)NAVٹo}ؾ `>Uÿ0ϪkuU~G>ƏVՃPVŮ>l&`~`ؑmHnևW]u /8GfIoM&dzk-2}n TemP; /mU`Ď۠ EkX#mZٷq5*.ku҅Xl۷ozƿײzu5xViUvl~`5  d +m/k]15 NP +bM*  2 |4ТzR??5>)x;Tf+lS:ǠJe+Ҕu!6k*]̵bGAu>CmοgrڏUꃿiaew; CgI6g*-Pr>XFw&zԤ|mIaQCȮe=бAaYuOv_M i 2gG=IOŽiT`k]2'aΠz;ԢeN5дb}K;ԶVUCiPH(E VW~i`)q>{`17y}ز/|t;~({MُC˗.kFjPaztY]~|,M>r/ ZwRWo3`bB`AߘAY;X ZUECMFۢcPU^NʉkʁPՏd???mwV^Н-(, 8$XlDmMLv=n,nXo`^?vZ U >M}*'a4[6 E-c%"a~4vzviҮZN" Eu~8 l *Ch?xCbC?~@<o?l?`ӪK&x^#@ ?.Fp.l\8~ +ˢ\-I_m4c[5 %5'F:f~:'x`˟:2wu{X{&h#P$v*1[`XcѶ^#l6&,_hCX4~7ܘ?35e͵? v_VkUVUׅαz`.V^~7eHit`؄|Q)J_"6PSIMh?HcN"zYuQ׻.ģ`~ZW? ҇il=аQjf1,kFlLmx{W~LVk5y9az+[/@?tB` (E; t`gRu@~-nz s{z6Yc˺c`;pO#ilT$PQ:"XcT4L>X6m&Ig `.g^1#c7=| g' ׉gج{ {Όq q:*N]e2rf]H3RX6y&U `Vl΁Nz =xgY)jȩ]STf^YI:̓W`b`ؑ&R>܍Clb~s+ 10w 3:ͨ2(* WA6\k` |Aq|^>lۋwnz~wL o6p j36(0dXc<.q8RC]`5qCn.l$,{0R Y2dCy @ pZ3ywu4R,Q[|H[.-ǁv6mҿ 6;suBJ(hk*}MS~GP̎#_H.^XlYԶ_|qmڳ:M~^{V7p3ΈJީQ:-Oq&>{כ=|ZZX)56 H)0{9w֥su|Ӡ n酗g[m>s7 oYN~…SJ哷*vQ7|tYW\}]6",P<`.6n֦Ȭ"8:ǠN\EJu҇cG혎oWޚ!tct]+gg]}6&Tcڴ . я QP(8 paw[1HU_ȭR4-s$ʣtSl׷%G4lYذˮҰ۱`1P*EU}ؕ"ԇ)lyX޺2PZֆݬ꒺l`%߄]AhP]Ɯ[ !$[ Sӵ+U?6 p elt loB6ߺ +>~2g #uk -16 btk?ln'm#֋z3尽]g`|]%Wt>Nq* g!dʢÏj Nu S12 %+SԺmCz ]`]&vެYn9s iPu||o};w-a&qH>eXyjMo$Q~e'n~7L!iL??S31TY{yfMh=?{beh  GZ+-;،+/k\""-'3)1GflE;met옮<:gxGX~2:ue}O)_Rs k7ۏ@gGyT}6U6FgѩI ,s&(M4%RS57XBiCgvX6U&ӧrp^t `5X{Lo'6Q`<6ɰh,J0Y8u 짳ډvkAup|կInQ fe}vnLԢ3*Iܰ;-{T6]콡rW/ ܦ )hdz fQ`A6ɰh,;AV`Թq퇓XgSe*sqֺǺ]m4'Ͷ=sַƆzaxn\DMΦ6a^mMe`݆6cߧ6Bg#ȹ+&\l•aXvHK?:ol8qA'󗪒f7F-YgYb7}ʹU+ʆTMtoTpv #ȹ+&\l•aXESsG<6X89} d2-k" #Uaڰ|׆ȾVm06C"@`oO=Ɂ, @lecam~7\ְpWl8N $Y}r 4 C`gP؄MMr2, /}:w6 -Yڃut* ' `OD`Iʺ{? fqPdl)w'8l 2:u<:#}$q>+E^Vllb#wjkα ,kYT\U?) ֵíRqf@3 ;p/kEz%K k@je4ialB y~fkww6ͱZ~~gX:P5.3lxzd*{^tj@l֡GR`>K 4elĵ^UKTuNUxS?i:N֕]^M^M`.3|PW^}ΟG`Xl 1nƃZW9MA `)6Z*(b"(0& , ZiFiU_k.F{QlT$`@), `@6F;YlT$`@)p*2<%¹)6 h5 3!f(0> 8rt. - QPPS=(Nl`X6FlT$D@@qo {u5l -`] 6Z* 6 h(3\яϷT.iE/&Q6 m=- Q _滢a(Р,r2e~lnhH*[wեNMBV@;@i*Y/ 96-gm-+&\yl•aX5 dΎ7em#ȹ+&\l•aX`lh ="l5&\9 `q\v\^BC4 ="l5&\9 `Xm#ȹ+&\l•aXǵa5' nlE]6` , `@6fQ`A6ɰh,ڰJa6F{D)I`6ɰh, 6 4llGtK9/:5 QZ6aC~Ghy)8 ;G(?Olw`.3|PW^3?ُ, 6 4ll#FKEXLDqmq%z{ 0w`(6Z* `Xla`,6Z* X )0s:޶nܣ(H 6 tclwFKEr'P PF''sl(6}`"! x 86 `MbRPPXLe>bRPPX>8MEpȇh 6 .- Q _&\ȷT.:86 flt[FKEBWz 4LyIl _`h6Z*@ %OX6_G'lnhH*[w)\)Slh5 &l"l•aъ ܗ":86 fltS FKE㓨lkة`qSs)6 kl[tK9#:5 QZ6a` ,R:6 4llE]6` 8 ;DQw]S6fQ`A6ɰh, 6 4llE]6` 8 ;MEpȇh 6 zD9w؄kMr2, ,6 ` GDsWM؄+'â,kÎ+Q|fuה zD9w؄kMr2, ,6 ` GDsWM؄+'â,kÎkS! klG4/:% Ql&\9 `Xmr)A^`{UOUA|P XǵaǕYQ3ꎺkhe b(N`Xl6`"aD`1Xǵaǵ 6ڋ`"! H`Xl6`"! H1f\ۖߍװG)uGa]RF}(c۽v6 jlFKEB@@`qqlhh(((e.6 h(((qm*C>D|mv1hH*0ኾ4SrHS(Sw6 m=- Q _滢a(Р,pj0&|mnhH*[wD|mvhHb(g3gm9lmvhHb(R`S7Al.hHX TB@QN<$6 - QPPSQɧl 5`] 6Z* 8ԜF`@ 6FlT$D@@el`lFKEBW W->%G4 1Q6;F(]AhPv4FR6`"! oQ`qzSpz)v >- Q _|.ŒbQ&v4FR6%`"aVL*2-w`qzSpz)v 4- kXOR iK`j zlvEKEB6_`K,ч%4Rp) ]4 M؄+'â, b6а zD9w؄kMr2, 6^ >`3(r pdX4`Xl6C"p  WNE`X׆T"@h$60<`3(r pdX4`Xl6C"p  WNE`X׆W^Ëz=ڧblE]6` , `@6F{D)I`6ɰh,ڰJrglG¥ozUU>?mnk6_e*`Xla`=a]W巶_9^MF[ SJt6^Qga=E3yw5^ ѮN}YK/C5}((0: spF@@HRzhglM=B@zJvW60j6 ((0,Nw6ω"B\#?XQsl PbP *Zt=lQv yrk\ P`$`qǴ~il`x6&Ѵm\P= ˂4>v r@UVSq{!og%#o\Š|reg`d=6 # ݊~j+eiTlT `OFouG6|9h>֥€X?_kܣmRz][ūAH%+zhgl*!U]TS;Ǣa[j+ >762,)_23.4J wa _ayv^av{q spFرs-kA,ł$,jc>~1x<}P1cǮaU2E t=p/{X(塏;Em-=O *@`lMo,j(U~da@kUBǂ/CJgk t ?+Շb[&M+)ڋ@5XZʻW;eׂؕ2\nVtp̬e%%;M7K$@is |~tR Ee_0M@^CVŁ5;F}xZ^f蛆KD3ކ<-~WfS"6Gvx6•QP9z8h `fl3msRգ/ z*Wt{6uXa6FL " @[XQsl ct(Yz >ۯway>`T93LKul͓G)fMLu8|sl`Tl)A@@9s؇pGŁ>el`x6S"@@5?%Çh `b8H>((((7pЋl yZkpZ+2F@@@[oJ8|60*6pu?eE@&ݭ a6н lrB[@@Q@ XUwª@ M:R`a7C;lgm݅!P0A_t '?Y>>9&+[]rBVT־cؿ?]4?JcW3ohƬYg{@l̘1M Kdռl*(چލ=t(eЧ_d^,UWS1u_9:麖Fy?je4q5&_5 v{p޺s ݰl`\m@_gF-'[}nq*~oY ܉Q4Y) '?xE#vUH~A_re+*i #5:a7 `@6:qoC&&TO@h`%(0{Ծگsѷ҄ gst;u$P!K^,UXh'k%å `>zX(=9ݩY)p-}bl>rE3gu5fq r (h60>/zr@>o|_Wi /-;ς;~9]ߘon `~ G=P;׺ۋ?K#g6k֬/Bt@34qq*'dO@P)Pϕ#*>\Tn:CW Au!u` EqrĸWxœrk'քQr {VE&gVrW/u2<^~9{DRbi]oAu/Gk;"?ֺI H ÞN;v!w l wa޼ ַ f>5ȞXom `AZ(Pu~[7a_pv'u$Y{u=<&FS`bb6b.U@,clso>G8g-j"h+R >6螀6/ZY4պ6QAm,-fX$@(, tjz0zUVČՃ]"GSmgϞ}Q#6 `g[̚xx?k4p.{*Njd0:[ClKP镶]/&ݧV\6l?z䍍-ln6Ef=s:akq^ D +GFva@.6pGO͞Vrkۄ}(i'`0,CO ,rփ&<ձ0O|529ul FO͝q+PS(ؼl79y+pZ᭷vMǝTh{;=`3V6 `y#^{[{u~"wxϻ٢S~3zukw=SwͲj_y(( * :-vߓlh `@v6`%SuG2zϞMS6LWA5PPPPPFu޴~WON.a&/k=? G3=AJsf2|PPPPPP` N­Bi=miXtLZBB+L^`*AYݢ~(n;qjoaܮAꤓ:V9PPPPPd Mĺ{X^Gi5T ([*u<:VIaewv F5A@@@@h\XEa=40w/~RFR ؘA,oyZDW=2Vb(((((zeQH[UUs= NFi@Ri>eZ:++#ۃqr* @^ vq:WA}WMQ(F+#k{:Yqm:f+ʤtK*ZG]ں!D|UNXi2_YX=>Wi.~:LU#0QPPPPW `}5ˢ|u  ,iװ|>U3UZ2{SY>v~̤Nu?Wj`rZ^~ުVki.u _QPPPPPT*U,<c?!G] ȧcS+'^VkyYe,Y>e[$Q_O!l[}?B\hGM&&E@@@@@U0@3X#ief@,i08a@Vآ&V)ӲX,[qP76.jݮ1 t@ZӪe윲n"M>`}.ezuQ=\~t8N].' ehv5u1FԕE"{ʫ@cN@[ǫ`} jϱ"޷p|o: q(((((+PUi죁:+P궚 J8+Ֆ rݏEЯ: ӖMskPPPPPPU[5VҺѵnWuޕ]4xentK;QU7]vV_X+lB\C@@@@@(PXժbl/j5*j]ҲTU*w6҄P\UUePz®nY]lvi/Fa];yY]@PPPPPLv[Yl7pXc(\ZzKmUe͇*ѪL++ge5mMTf@Դ@418!DZM[іQ xPPPPPPQ.d>뎪og0vXAct MYwU,bە߇[rVu:Oԅ״y*+}+:gj̮3YQ5u:_*CcUzv6tW:+WF:ڪ4`UnHxkn((((((*Gہa\((((((c@UW1E@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@1Pߎő>7IENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/_static/images/rpc-flow-2.svg0000664000175000017500000005775000000000000022151 0ustar00zuulzuul00000000000000 Page-1 Rounded rectangle ATM switch name: control_exchange (type: topic) Sheet.3 Sheet.4 Sheet.5 Sheet.6 Sheet.7 Sheet.8 name: control_exchange(type: topic) Sheet.9 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.17 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.25 Sheet.26 key: topic key: topic Sheet.27 key: topic.host key: topic.host Sheet.28 Rectangle Topic Consumer Topic Consumer Rectangle.30 Topic Consumer Topic Consumer Sheet.31 Sheet.32 Sheet.33 Rectangle.34 Sheet.36 Worker (e.g. compute) Worker(e.g. compute) Rectangle.57 Sheet.57 Invoker (e.g. api) Invoker(e.g. api) Rectangle.55 Topic Publisher Topic Publisher Sheet.59 Sheet.61 RabbitMQ Node RabbitMQ Node Sheet.62 Sheet.63 rpc.cast(topic) rpc.cast(topic) Sheet.64 Sheet.65 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/_static/images/rpc-rabt.png0000664000175000017500000012764400000000000021760 0ustar00zuulzuul00000000000000PNG  IHDR6sRGBgAMA a cHRMz&u0`:pQ< pHYs&? IDATx^ U H dWV5"AЈ#,FAAq (O"@%&D@v1 (*:3sm{9TuuuuZ6 `m`g͡޳f. @M2:qU_7M/*}șa]Fv lvs?sne +|ml*@@@J+tJ+'9_z{/l6Y\ykV]uo## `66[OwG +jk|ȏ~wXh?N76 `؀o;=5*GC`~МGl@6p#n׽2;,WÄQ~{'ˆȂ( $6y WslhВpZ׼4PP[e}͑ BAl`l`:!yo5OPPS/?w?@qs<l/M\eئ(((0 ?<8=qN:6 `aZO9~4IIW@ ,W^`yl`(m '>\uH`7؆^+@E٨a;606e4*@`t5h T89`6P9xWl;(0 GZԏ e, v՛Y7)}q5> n7M*l_a`9+ RRmVU~UWn3aC$ 0?0O<֒ t z[uUc_ٳg)SDh~\/n޸fx׻nA|tzӒoP! ,kcVHלl>9s8CKb4G2~} 6%S`zRU?5|0\{/ǕW-T O=;s_pTiӦnɝ}nݾF/iMӪu],[w-CT|) n7{xv;(; pp쬊).BS%̹7y: .M?;0o׷.ziU^:`X:,.Km{)[Y?rV4akֵr<i5cm[Qv7OSCE3 B-*8>ә?{׻5zݑ3q.~* ;X-VLLJR7*?"8#*UV{xcοR7[ەG)#_ByL dԵ@c6Y-qڧj{PX;70V;_,ɏ`Î3r{{gUW]z2&igD=p;i;g'j l-;d.++jXEXP!8'UbEfW}6.xZ몬w=׭mJMmqX~ץunVNu< U`ql1G&aǹnnpկJ}knҤI 5qq\=vP, ؂CvXA--bdw. jnVjbVGPUL+cpmuYR  leŃ_[) z[1MSY\~awGeYf˲;Cޥ`bleNED=AM9a *3ܦE|cV.Jk( ufYjuQH~`Ǎ3+IbN쏱&YsGRyGŋg}Ҵ`{RW2>V3k;v ]X-v?99} :Tɚq'z6݌.1]3zlݡc|u`%eVdj>ZĐi'RY%*Q-x 5&Y1vӧOww{纚.b֫5q^_ ZO`leRE'w= `ET~bAm]Mgkd(`֜<`yVwvpr98MdfsOf 1xc~ǝݍCҵ`ble~F+x Oj݃f*N돉MB![bOei~ ;WQFP[C蘴zÉXs[Nϱ^|U vhKll1G'G{UVi⪫9O^xQw'O 7ڸ'n<(s ,[wt L)vTW/6ߴ#slde`#`;6=ݡYg/Q+~g'>1Lid͘\r:N5`X}F: !kT,)LƤ1On`9:"_W>G?rozVF-ɌP/Z& e2/`K`XJ|r`9:1;|ww;mܣ>~5*]nʔ)L>̠;*5`X= V*li`ӟ/<3Ug?ӀleR"{]w uk;[oT:j?QY3+W Uչjl ->}f=[c,UB[ovmJˬ}E`+{p*-{?^׻|;_lTz'̙3nj41٘Zgόb6ݏz [o}ckSN9e еh|:\u^p}l? `;(,6.Ċw}wsql;C/FA.sm(Ns%8}m7|IgqK~򪼢sᇻ78MEME9zklm` usW~3~]F]V謺ݎ}@~ۧϣJYӶ=1[]S7=[kܩ?6.|n„ [vw-Xj&foOq;6J`167oϯrvS&oxct$7xjScN9M`5eMmFrUm`V&4@4UF7c) Zv`}+;kX؇~)R;cƌVD+tѨ㏻~v4ԩ`;U}6YSoQ"lYXEDZXu/#\?낫nFIYvvz޼QB;& neq'pB*zǜy{VF%iƌeٝj'm5S^me tjrqymVPv 63MS v޼y$bk^K^0%7{r"OOg}nTַ56̹`;m6ӭ.D` -F`($S`L9*F(h8c[ ._#p#}=[^ v~OQooh7 `jlYc`yc`:1]*a w˝Rȫ"XŋSO=4AN~򓟸Y/??}7v,l3K75eVsF5,U`f_u 'cRuX,XHCusjewi lW} `;wҺns>&c`?k]};<[{'kؗ^z}ݭ|`Ts}3'Ы h`ZuI/m՛^$ t!:8C"K-ԍU>[n'\VDžj)l٭2>8`v6mydE`\~y[fg!I~+sϹ:˭ڭ߀_| .׿oG?Tlg /^[1X>`+C&&%.[R6 i5تmTl6S) vnAlW_=<7+դN'reN}O?>O 7]veNYu2pUwaD>H/[bek#li"ۧ)߱ۨ60luDž^p!leRRP] lش:3ßYs=+nYmwfƬCu;KYt, b-ۿ23hM9m $}| Wy Nm'l8V)z8[A_Vmm!@Tum:^AS`k ^VIIA( 6W  `/H7[ɎH*hS#Xnn9Gs.pmove;Fg!3g"[EaՕ(o¤YMyM6q?Zլ$pGg5 Kt ׫WNr] f ~0) KD[ ԮyX}/n;5 *?Z-k6 vNuk`\lTdAIli5y+J*jWYj~&2"Vss4_IօVKwK^\1=, 뮻m?׿R=eVr9kO^ nMpsM CPմhQ"El# `6ܞ5a楃PLJ @in=]B<+1|%>9 ЅgϷ@ēN-Cy{=C_\0f/M_B~z{+b6wۚku5VWa?-x<O=b+B[v]V.9"DW44Sd3**p?cUWܢq^Bg2W LJ @(T$ai$ɭkU*7H7Vsϔ-}Qm"mtubV"i5ցtM~ j=guV4tŭɔ` YMn+GWCUa.[> <~>2T^O(E҅;9&sǿS=et> >㏻6޸>*ɚQ=s[?Y;[oeb9?dkl}݌ګnl{`+*P {>QS'cq!/86:U"/@Uweଲ p'X+S[0Zx["Ν*sΘ5ӓg$*{T!>?w?.ܘ|žs9j+XMKZGmx衇ZhsAo7~뮻57 =<&a 'b<|P  , |P,,*"yȫpb16u-6Ej<Ϫ`{{;\UwъYf̤MXE^ ^H@v{p5BEa-i 4!Ԏ]R `m`fU 2:>E*iKt 6V6`:l ꏱXK~6]ƖVuה+XE/^ ~|Aett(+{r0Uz=UDu7q x7:Fp߶ŋ&s;^O` W `Umnx̚؟ *` :k]  jl۹Õ̜VL"\7ovfEO=XEc7JgΞ:ooTR?v!2HZW20[AU(PMqi4)d6{+ykWegc ږQY~TyOOQa;Ͻa5Qǻt 'L/'|m2ՄNZSVsm~C8+ jYO:ia*1 qv<9c7W RYwfCZ\NdPk@),<~],MiPo]`# ;p!{mtXdGbTr4a2ɢieذMlS9Wdh_ZbǯVKc`N,xlNkMN_j݀#r_K"}XT0qj߾{>EO~Ҋ>mSS_"gҤRբD`;a![i ; U@e<2f=vz%`;wB=]* 8P$bkzH2v~Z3x≎ֆ6^șr7j?i}\{ CC"CzD;mǍ`;wBk -ksHY=_%u '-s„Qx=)9vV[fVM_cWۍk}rK `y jmYszʶQ0KWXθá![?Q l)`{yU8i8QXomn…|u;(^c{[K쬝@ dw*7/5`r#Ofswedҙvv tUE/ [21)iNeQ Ԡ 5hUWAsg7 `%I$v5pW_}ܔd Uex6r<@k_kMXMn7܊j4쬱D`c leźII++PXF s/kN>:k t >͓I`8yV[}ݣ_ynqۮK Ә.7[`ͮ|u׵h$T}{νpPЫsϪltӮ)5`+,ʤ`Xz,XX-GկvR,UUo׶0%["Io],Y tMf)I8K/=] leN[T,$̺`;wԲVݭ(m9`7yw(DOLn=k_Kf͚4@W+}{oT`;^Aۿ׮W63y1leRRP Mg.M4+1X%>9 ;V,+ Or8'p,ĚTdM%pdg@7cn7Xil.x?=] le[T fN*lw[E;j7zwƐjǍ1ZvK-x*r$w)<_jml`;^۟׭BB\V,(uwՌlG7g;;nܹcV /LG`yo}[nɜ4+7X+Cm c۹mn`;v"-U4~DԤdj>7'$pZk3eK`;wvNW%vm-X{'ft\ph"Z6Y+V3+ ;)鎬oiYPl+(-> 188a t!.A,q\7v檫Nzi"i5XկKerAQ㫪=Xhتr9Ze7qd'$3tI-UwauZؑGcً5Q {cʘ&nUWmՕV_ߊ |$: 56'6rCQY>%$O4VV) vhL(VPigItؽ op}\uX'Fd78]tEe`L"~qhj:85D`ÆMS2W =cߏ؟#Hߒ8K*|yd_jd:xmS޵^yJl}vkOYkÎt;sT~vMʫ2/s7ol]tWˮ,vjUF: XoD`r۩ 9}3>}o+2Zb@v$.X͓w `e]뷕29} g$%u+-Z6NƻǶh~[;A26^1lu~xH>Ul`{%0mFAvsG'h)ik5Ubu5r4nUXM"U6";`]GcWMͯr!SLmRkB*1s`#d+ 2Iw @`#{(9 +2 믿; Af!V qvZoѕMlm%dX(Nթ (^u 8`#mmh|cd2ҵ;쮺*wwm7h F dW9 B;I6 f!nSluG: 6YBHߧJ+`TjQ`zR Ol1GȏqZf5 J@tƌ[.\ `uxd<컦MsZՎ1]'.s{+)|9W,[`*lYD'زr PStͽz8 hiIG}Úk;oC{x(ܦ=ߴ_-}0w9 V|P l1G?EW};`ۯTޱo 9s渝vi8#g4h[ [`XؽV#U"nK[g98mjyMԏ>y{ح/(UHybN??hp\k`lÐ XUY LqX1`|O `͚(Pll1GD ~H!?;T>3Nr;Zp'V~Mn}l FWl1Gq) ~H!?[ZAͿ1 - {`X} F: :IC,;s6>c`#1[Ы6l)6a)b`8ϝ*`|k st 4 ,[7tU!vybNiגetۀϭV]uh~\ٳg)SDh~\9s8(C~ևoa}79_6@ϰ>,=y;tz7+|~l>/n5,|-d_XIipkߋj8/)}dP5U鸓m`#//;/֛aIyMkO'Ҵc]r \V _&~|[IFj&OѣjoWn>(ȫ>6mJpAyrͿ,]#dU`bQ|97_Vo 4A6*z0[AeͺU]W)~]X6a lPOMj'iHu,蔵Ɂs`b O'GMpFtm5P68Zt<+82X*x`˚`b? 4P.u8ݾVq۷"íϴOIHU:VyE4G#G-[y/^1]vn{Y)+OWS;Vm QI+-lWYUv>vLzk;ZK?_Otva'kb?Eg.Yc[D`쟴dbКZV=#[Ѥg ς߯}%P:QCX(=0Pҏ`P "zog0npp]ʷvXUiPC4hdD*G49(lN_ڦ:^ Ͽ F*OUP2iH~meZyt W=>N@ nZIwQ9a8.w\Q[!y sK%Ex6V`? u4bWI zBPAy  6i~\( X3PkAV,2@+S[C- :mR@hD0صs#jsxja~0:c|K!эwr:`l^ygKϠg2np2`*3fu[[:ҶOoվ:m}HPY hVd~-|aTYHgvQ, nkiE#,4xjږuJ{=tGdW럧{ܐ l|Kڌ=#%ې%늬m>K[+6*|Y4eX%ް~àzL`# @AjXb(`+ FCS՛_!kcpp986F:VLxu`XĤY_ y/ד={e{>c~$4`{fÉV+ga(?ígz 0+$+Ut Ml^vSV6`BBdaN7-*:"k!6@ KkgҔ;`XϺI+yN3%|akz{ļ\ {v aOzf+IWlwš +$ 4UD`j61gq YD2|V6`}nNcl`Nb $ۛ_ [jpx]C 6ζ]u|XvyQN`XM"ÅbVeY6[k޷AoޱlWM GRIkRߺv:և:+;i?En>?ipipo]Ah8Em5+Xp[ڟ!l= cu'/3u]®u{ZP`X>Ll+Rƴb5P=*ǯ#X/+wl;H#YƖ j\3k̨a6ɢWGtL(ZSO`-ZW4sÞ#ip8 ~xU]۽W]gY$y؏5)0+GOCN'0vy"uONQyQVy`XnQ_=,gtZ.U /3VZ>Z^VmɵɝT oT,NwyJlr$o3VYgJɸWSJ{,k9iCГp6 l@S|_T蘴zGacTn/fs9< XPll3.аkw\ `H\X~{+*rQ(Pll~yP=6P ,&cH]/As tG6RW-(tXte`8j`#'N85 Al, w8 Z stXte`XܽQ(pk `x,[xE"w5-.ot: `H\6`;D`kQ7K˔!I=Ui~e΁ Yvʔ)={+r̜9s1'Og l`8zFeNeR tU"ِC\/&E8~D^ iuN0QRtZjWEUE=9ԑÎ,66H!?$ 嘥; } 2l`mT7XmJM}5}8H(/6F: ITWn>(ȫf;r9:n oYAc8Ӻ> .{Mw׎ #H;aY-뗫\EdlЛl,[f$v!օneq(FH &k M ®Ƃ:ZY֝V 4Vx[F}ǛZ9Y#eO[d,mЪUVZ0kqEA63`ill,[ زq\Ԭ'G6`& J+8 ZC8A^15߰I|NsڧrԾ"MC.ayc\^6 46U`ز:59м)~hl#~$3tvFj{Yc] os6\i݊<~1p'`@7mmsJX vU^ G(;YKdM ljuMV~?qX"B^x/**u;ܺF@/+B"3 4C6~ l)i w'B6cʬx[+.6n291`7u $[%gBDS' t? *M.OzjNdT5VU?n&vjkVTl4,VT5d`\Jgb@N ݛuou卣󩢽l@+ţë`IlI4P5NΕE! .ѱHԍ_`.ѓ6ɱ>i G٘p4) ngif(.~@r-6 B `+"Pnl h׭7ʁLX|E@TvΤOmiW_iݪcl@VዯN)L^%;nۋ.BH_ U-ǕvM `mWjZ>) ڂo+Seile[Tӓ2f):TeF!YN wQ>h͈mk3iy=2Һ[ `pDǪ fZL$Iu=ء<Ir'`y톶Ql'LG@`mo%HԒ"@ ]4ov@=M~WkVJ4x`# kgҶ-$:C[l^.>a]AZltӏfu !`MXn:.l]~>(*9+,i@)F^N^Aol{6`$U&R5ӘkTy!Ԫ,"! ӺEt!ni_u#lxwMOl5`L8Al:HvԆp^[)-kc`K4c+еq9Z+h7p__$bΌNӮv$NC~66_#rԧ[Tllus4u#a4j]ӖQ9f!ƂFVݟXkcy?߀7mc.vmȯ_ߦ4}flÐ ׈)֧55U)$ 4͙=dY熓9 e%`Fk>#`}W6/ǏZ7mU]H[(m[ɖ*J[c6OC~66_#rԧ[Tl,PH}\m \fat>`#`aw^9l)#lt>`#`{4Nh 49n*+o{lÐ ׈)֧55U)$<z؝I:mtMLܜ(Pl-2SI j8u:ԅa@ltȆ@ۀ 4A6*8Ms&i6 `u0 K(rkȫ ((R `M6a  l]/Z(0}$!F,dӜIڃMb@6*@*059CΌlBG*((R `M MB`kjPJXɦ9:mҫ,/^Z 8u:ԅa@l!BUkϙ,dӜIڃMb@6CQCp,b"uaofs'ݰ?/[11)iNeQ Ԡ[TU+`Fly Zf &:SϽ96 VY(jPAd@`DN{l`IX7}}ǀ{6`mV[͝u@lC ̳`+RKiA ` :uƀf}ln„ yf[w ,[SV&%ե[$W4v?~(n /tO?t/2te|llU`+*P`zRr<_ʔD7zz瘣}tך̶ s9gMrcjw/JleRRP HȝT @iL6RJ"q t4p]{ٳݯ~ctx0g`5"G} iMM)F  pݰmēNu"&Mr'p[x{g;J*GYٛm<\_g6akDح\Q_ *F}y[{:r8G|IsU}QwGeYf=zPlt>P +kv)D[lm`΅8EB-**vÍ6v_z% [DOlÐ ׈)֧55U)$6I^iWw?яo~FuQc֛-,d;Y6akD`Ӛ*R&o01c[c5ܧ?8/B#OS}LÎ`|l F: |Q[ƍIC7>ll; `կo}l9soƤ+M2śyc|l9[`#llFO6 :h\*i6RL4606ϟ﮽Z]vzw]c7M6g}C~IĝP';MWtwqi5uZ/*_>O;-d#C%t` , `M* @CmG! wl-[Z(vrk @;sOk\_zw'Ig1cymlQϡ LLX%ih4zӒ +6ު%`[ DJJ|@+p/bcfOfmƌ=s_pO=" l 硆C47ޘX s*4-(pK;=(CtqxEi2wb?4&{cNf|l*t|}?7pёUIJͳn"lX.lD6ޱ |( ~o[y駟v~_;nحl뮽aؑh,L^~}Q ގQW2qg>N;GmW_wm]|W{&Nt$3s}ZN1{'>~aO}gqƤܽ3>v>2>6خ P, F,M9ƓY};i k*_9FtrT4Sٳg 蠌8;F|E]Zw]7UjM>hv[m?5&m;(.3~| y|,$䕶l lwZu~ŋ]GpYS4.r&YuUT4 .z?>V"wt˻ɽk =SꫯvSNuz׻C=P:~GvHV;`+v,**`+2,[ǝVqlc /gK r!~ss믴;}֨b'p/g_}~u _Q`+s,&&%U6  Vfm `X;:8`تfʋ)7n\O>>^{mw/=M_ֵ`*~cq&'`تl]9, qU\Dl E}qf:o~MI `z{oK'?qYkz饗vvR;,[kXY1RSf몎a%`d(/ޫ[1n‹^L~2\Bg.>>3WkI[p+vש]{uB\{H= CJq2`L9]ؙO8'`)ꋻ7) `V\ѽA;+[nqN,>swzp I'?8u:)*pڴwq馛Xo*V*pH)NO,{vuebcјK AS槁ne_J V[̚,[5(Q^o5Oet4 MWt+~'I&KT}#i[`߸n̙nM6q]vJ">h ^mk{le .IJs'Jjzu!@j`y;nSUleRsX6|w/5],UwMWY=72 60)b>n-,/~-Zk/{x#8WM$@O.֋vr`Rl8XsVyE1.rL/=8J9,t}qrNY+뷢ߨ-;lS>{`Ѕ.nY"28?':֌PoTȪ.ĭY#_A-srX6|w/5]<n<}~޿+#Y(q va7=LJjVݙX-C2 8zKS 6) %t ,H5ձ0x˜]v؆i. 3,'4-(`m:xѾ{9O ,hu!u`F (UW]8W?]q-{'ƺn FtNl.i`32+Oízr*rh CULe)جR@f7 <`MQ3p0o|l-(ƍ`^KM)`5)f Vz$عs3L;n緿=u}nu`0g#pHؕ7 2pjZoK`ZM0B2I]F'߬ 1z7< {%jXl9=,t}qrN?ywH2KևDcN_=䓭O?{gZR9'4͙3=i6s܍78fX\`_6aklͣ#Zq0 ɴYp8X xװkf&y`Eohl&q"t3l?^jN{v_ߚXi˱&waMdۓ1&-# #-gle^llЧ3l8!eD``t7P{BϢ~x?*+.kڊXX6&Z&6`ئ틻t`~a{>k-raӦ^5^֒'MrIO؞wyg4V-[\IIVI Z>Pz?nr0Yc`>zl2#,"D`0~H,$<5}#ہ7k lթm9З-sxX6|w/5]NVW\]vYO}j pq ~I#YEf6A`[_ذ1qDZMlaهz9 ha%~[U7a.'` +Dz8`^Ծ9^┺qΝ$ԇպ'Nt>hk 췓hwXDb'IVC}_ó҂SK[gtvfz+aoLlHX-B6Ӱe8iw.V 4Mn)) l7`2P85Tfeƻr$H%Q`9(>iw-VYŽAIÇzgZZU@)͟?5& [ܦHl~o̬>Ԇ&cubz\j~ڞIּN.~ «~bn:6`*2< `~~ǝ݉'n[4VՒnXmlfkيZ?(YVK~cJ VӒ`k֩UCMtw'nrZs{y_waG?] `oJS>4ϺϥkZ%C~Uy~O6Ρ`تfʋtXf7pH]{üB0U :]1%0{^2+{&\"m7&q,=+UVC^U/m[>Rʯn iG6ʰXԍ&]1yP:nܳ#X`GIZ7rt5X5q/Χr|WEфjtWEJ+qI6&FbRAoL7<[}o[}׊pg?s>,r8ց-]K9]R˛&qzܸ/<<4OM q6 v\w(Hs/~1;mD@6&5`w˾uXa7« VuGS Rۥ:&qzvSwuRHzSz `E{#0U:.oʸq9o4Oʃ?qn) `ӢGZFG>æk]آ%:ޫ {z_k[dzRkXuSJZzdv+ӂb #9C/U γJ}مXF"y!#1Cq_(wm֛˧aWdڮr|hqͯ'{cfEfkKu]:Nos¶uƻUVHlSmQi[ITEXLmwv%pjzTi=dօ1jIk *-<@n`>\XEmk3so6ƸdOݍ̸}c.ȂLIm3hQ(4pRmKoz_A)Oگm*˺Lox87Oћ9ZO6`~tisn'u;ݷѱ[f7!I X&~reG^4idccNf~馛ZyiL qN@Gs{nPV{`+3 `ȦE}ƈɓkC l@LFi@6?<FoVWo Exa4эrS(-r<[كTAl ygE`oI"I8}pK u$/#cmR3fMWܵ5b3]hIlyv2zD=1}?To}^.?2~'tNaO_6oGE`J,9]Sgz;nO{olZ" aY7rCݏpYC,r[=SMCSVcYSuq63l`/`/V֟+t;o-Y";3ۂʴفoF7yd⣒ΛI8{.3&`zK7ÝA|ߐȾ!K}!~ HYP^i=?TY~Pcɩ"~}ʬ_ 1VUFaƣrt!. v34B\:9)\ q6/F;;v…(jwOtw,gq[1 RAM6iɞ6Hf2`XPش^El[;3Ap+36;qT'>ֱlYmvolҥP0z7#3ó7 ~;Xzԃp҅8?K6Ζf'ƍ7؝}٭%o4V([C}sђ7'tR+L_WUw}wt`|Xc~#1p' {m+&`m VVNhe%w%.N/oT\9l ^ܽ2,:unWٵ^{kllt9,_j jD5~XM&U&tMNK=sk']> %N4u5K<l`rVXK/= o~[{e7%@֢GS[nT`^S~HVl}pK.QO,ꖨW|s7wHq6NIcx^d|Ig}{ĉ< bOO"h9\sM `uI, vhS+,\ y8l>qNsm7d>ڱEMd<,7pC?N&)BmKuX& ui9, x+igV^yd79s渻;*mF; b2:j ؗmر8ȔlwZuqNmny]ҫ_|VRIز[VkMF_5/6=f-royt!Nho4xtșǸWo}[_ >jyE_Їw1`$]|NcvJ+ ;۔΃q{f9 ==06]DU^Q,cqI6=Fci~N:f|2:Z'- `$@˻7߼5[+lE`e}ahE~HV,[e%K;:ph?v6pWu_(0&SOM½.?|%uh={_.-{Mwͪ9Cm3[)ܯ!jiy|_GUuնSկo"r&ʗ6VTgǨ qEf:>4r( ӜYZۧ6% ScjOUKlFtWtsUW~~5]yDQFmC~Jv$.d&V(GnyEbɌSsp4_?lhہeFhU":΢VZykt8!Z?1l-Fw gs[׿͚5=tzʶNCatUU2XC&)F^ cQİk ч\ߏpg]8eV ~ 5^U6i9 `CX`Z `k񱂬^z4"?aD\K.QF: |Ql}ZSSE B_H~wa`$Er8)F+!Zqi6-fk1g4]3>vpIpt>`#`9$N6Iuni3a*hL^EA5f.ayy2-Ӵ(.c`p^Y6^b' 0g`5"G} iMM)F s~et6yVPiqx3Ӵ樖Zڤ[Mڤr-8!ZdSN x`#llFO>"H!x'G **@󣗊k"Ӣayiδe(!XDmipE:~[M(6d?B/lz`#llFO>"H!ꝡ( ƔY6=Yup\o ȷ6akD`Ӛ*Rwn Hn7-ӺZg-Yx67v Ԛ9ydoәÓ<@#`qhDH&v.TF˱`ͱzmtȆ@۫Z6:J8 `@ltȆ@N@]o-yf9R8\l6a  l]/Z(0}$!Fl)zc6,`qP` աygF6O!@`Hr=li6ڤ21]^[l5)$4ՠ@ 8E=c3؀&tK9K+ڀfVמ;`*( KE8LNlh (\S(Џ l7tY9'=1"g>x4lEw6P-4tn ou߰m5StQzZ}V-irl[VVvֹ 6&OpSOZ(PlygcJ~kM}䈙NTJߏjm<_tOu18c i_nx=_<[ePV}nCqY'( a~+bM:ԁ((C]`A_l>0pɄ!|k#AS&v "'-qgSg l6``UvL+ׇTw+׏>}PC}7}LCrG@@:`scEqMr6 i|} (PH\dX[N!b[uۀ͟WoxQ}#0ni.lA[FGyնp=oEd:16˲AͻN?WϟY՞Prom<0iܔll#e7!0jD2bҩIQ)P#COt},šVX+W7}6?ڦK琶LhI;ElZj\f"A6le.Š$[=-O3`ºt~! `Nʀm)h <i TujeZ;غ%(`uܰll@{3ݚ.ƊZd6 kҘ6ʟPj@ d=rC~\jw{ .}PaXArVt5m x/%o ɹa@6 Uw_n̶̎`ֶ M` (U/ T'-H ,, bqY&C CtY|my@,aFb}F5! B-[~ V0ya+606tU_?mFufaP*V+|zMoZln*P`}6Enu!#~u, :v `6 T )`2U %g4_>-7}60‚߳X34lYiՅXMi݊n.[AǨm&oC}=mk5jYA'̆[c]~O4-xdG Hj{-F-c Hֈ&>doUB1og-].e`qb؆KfZ& 3jGJG:^ـ` ix)]3T[liUyCUg@RSX`닗aD7,ۢ*dQ``Aæd뭿{hImkjڼJkJײ68v枚! +*P2P &>p#41=6;H-E B˦iMݨKtWYe~#4 PPP`KU{CF\OlVҏTۭ g Vٴq~7Y*{Ë"Cueu,] ڡV鐥v.0M8a@@@>P`Vx,SĹ&//6`jpi[ x!*Ϗ1MꯒU>BiWhuvn~bMOU;i, :oCDZ;\x[inG6MD@@Xy-%gaۀ`,:u5aTpçAOI58w֧V( / 4`UO[emh9B܏]4?Sr-5<@@@>P`+_{9Ѕ.60P6`p.^| !ASDg9 D6@LJ0kX#G[>4PPP Q`OD?5 >XF- `|lu7v5iZ]g+oo(FobT<@SS^5Ŷև0{_/r606ۅXiMռl^43沠 f}ȓne0>9g@T@k( TfWGpE }Q08XǟuHvi,3I kc` f/mcHjXwf^Ӵ6뱵V2ӗN;í &r"{s)r*e?~g;/ 9^6`6^˰?q 8#uXeڌ!̽;IFƝZYUi+˖Q9~j[Vȟ9 7Mp~n̳c4 BZoV)U ]rSԎ)zmǥuoTKqӶ17/ߩ+U:!h"jKɜ}Lij1Im֡2~mWڕ!VYvNa{*29US-p'N? r+(lh.SdVIfiхcWoD72}>_# SeH!nAl(jYfc3 O\=NtY3w^=w+ 0<:Y-8HC0{ .aRӇGƬɐ"O+pou -h_OhYT+ڧn[(,˩`؀u.xRETc-zVw; rZ?% $m8RowX/P9w+Rc.pS7lzE4¥tK5R}p9VN.3yJx> 7[I1[5z5vb],Xf]3M>h_v3}ԆAGui^3\_Lߠڱu3ѥEvNV[TUF*ol׾ct(6 `ՋqϺ[I}P 4m(`?g*_umJ ;CȠiӼ|LشYOSQi|v-xFiŰLJݼ12Lu|,ݝZꪓO^8ԉS `6- Zmⳗ6u0teUӱ]sgi-YŏԆ+X7c7s줭R:6 X4+CLZЀޒ0(-bc-'rqPlE^ߺ~k~yٜшaSY=CùflioO+#wYuL6l$*҂tlo˴'v2DXq&`ONg"f~7F۲?Pzn[qi{t3.gzp̱lh ,\H<҆]aj³6tMHC?SVC woMM+cV:Z]3ea\GY^3lUŶ~ւd8v /4&aպ+s- m &Zflk3u:`6o[uҤgVC O&ZD:N?3p8C擫>}:u!qVIM6Z/BP A?7ʪ1MOVT9G|rmɋ"8QN6 `W<ۭj/N8=qӜaжav0d+)Y-Xծ/oijpgeZy~WN_ZS ֵڇh) @]G<vf!- ^"MhԺ b,}8VaSW^y{Xc?~șq, `6PO8//_p4yp{g跇˾KP>oj@kvk3 P~Lruc#i5?3\/GF̎umq *c4՝#\1vۑꍌ9)Iq 9 ows?snvl6`=sv:Ə_)<Mf};_qr-{9@@@@@X>)GEn_Tt2kФE?lB:h=BW/5츲8+K[t~e{ɋ(((((0 &YIQBXK?. ԥAky;gvv_yŜ{<l r< @KXA4m 1S'vEt v1GR"WTo)Xдv|1W<(((((Sذq^l0TENm't,>Eu!NXt&1݆4PPPPP Fvu~Eo"vݍCTF'-ke+Ϗ8eGcifg2&im^<4λiۂE۵ʻ1L@@@@@V `Ɍ|P ZwP6*Auv]~V7+ kQSՍZ2mjV9k;k_uUNkw]fZ~Ktژ`ٕ_Kyɩ X+ XZɴҢMб>M^P~g &X #ifAFuOx]Z4S~0rx6TyO(l 1~lY^]GficZ鬺 ,× iv⃯-¬sL%;@@@@@h)``֭VpQfЬm*fz> U )+j],=L7^K:㳺 !녅I>67k4]eQPPPP\?2h]j53P6oW+ˠ&EL>٘6mتtg>6"t6i^0OLd9\Hk ۖG@@@@T @;5 R$UaFqCP[+*X#UldNֵ8'eq-pM!=9]@@@@@_]OТ(((((}mσr]G6 _ce{U10N+W/j† @* pM8v%#x ۮV_nj. YPcF ڭYݙ^aLC;6jjf9_BMUF Ĵ!.Tڷ3ߞAu>mZ:ԝ(((((mH*smR%e߾ U *Q!ژU}Y3qaQW>JjYGtvܬˡsY9w V~pib6L.Ŵ!.o߀6\Z&,˺lG@@@@P@PNdvx3yT`Ӡ %BX lڸ߬(i AݱX@^0&FK6(iFѭ6X=ۆłoi܇UE#n 4Am18оp,mI!LBuv3e bISUWeHkC<]p[xiggڤ`^;cڐVFڶMɿo:<6Mա (((((PR0;ƕX Ni|Ӝ Ӗ`U+tlܱ̃S]> 9 PPPPU!OF% %`Mjp)pI2 ` " D?cam 1]v.YjB _b!ͮm>Q~Ft=qj?XV3 qv@@@@l_|Jn3kG- -QP^nYT"t?Vg8mW.;,2=m[x}ٙv/\{}YHp;_}^b#u#,_ٔl(C|V&(ە& duLZT-pF[ՙ,_EwϬxڢ㲎-]^Ӯm;߼6䝃owl[ Ŷ|((((( P mңe3}hNA4PPPPPPP4T~7>wFꖜ PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP KEmWIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/_static/images/rpc-rabt.svg0000664000175000017500000010200700000000000021755 0ustar00zuulzuul00000000000000 Page-1 Rounded rectangle ATM switch name: control_exchange (type: topic) Sheet.3 Sheet.4 Sheet.5 Sheet.6 Sheet.7 Sheet.8 name: control_exchange(type: topic) Sheet.17 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.9 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.25 Sheet.27 key: topic key: topic Sheet.28 key: topic.host key: topic.host Sheet.26 Rectangle Topic Consumer Topic Consumer Rectangle.30 Topic Consumer Topic Consumer Sheet.31 Sheet.32 Sheet.33 Rectangle.34 Rectangle.35 Direct Publisher DirectPublisher Sheet.36 Worker (e.g. compute) Worker(e.g. compute) ATM switch.37 name: msg_id (type: direct) Sheet.38 Sheet.39 Sheet.40 Sheet.41 Sheet.42 Sheet.43 name: msg_id(type: direct) Sheet.44 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.52 key: msg_id key: msg_id Sheet.53 Sheet.54 Rectangle.57 Rectangle.58 Direct Consumer DirectConsumer Sheet.59 Invoker (e.g. api) Invoker(e.g. api) Rectangle.55 Topic Publisher Topic Publisher Sheet.56 Sheet.60 Sheet.62 RabbitMQ Node (single virtual host context) RabbitMQ Node(single virtual host context) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/_static/images/rpc-state.png0000664000175000017500000011321700000000000022137 0ustar00zuulzuul00000000000000PNG  IHDR `JmsRGBgAMA a cHRMz&u0`:pQ< pHYsodIDATx^E]`vDfB[ IAN$$\qAeq]Yv{[շns={zoΩsРAbۦѶ 9snWڟ%S Г!K6nLcB_|} /ұ/9ߏ/5)հ9?>T>?D~ۧYSO @>i}mӇGA!m ۶lm+mnlMClfĶ16ц~F n}]ښ6,Žn&vAEF>li&>ͬ[K[ne }?|L}(sk39?t)Fxy?5l94zXeƴMF[ShtmoM?]3&QF(׍Y=w75#oEWgzq׮kk3<bX~wĘ>V+v5v#)cߎ]~յvوkvW{vW+lg]z%]xrve.(οȱ.Dٹ);*;sΣNy|tg*;ؓNcN(4dh{CC [ـ!{*?x07h(ͱ>PgAzۍH Pֵҫ?uWY}u֛g$X.=]؍`%*kݮZDtTֲMjQҞSyyλAW[ԝu҃5ӓӫ 6Di?knko=iX{>VB/tǾRàvrv0C>tңÏ>^ݢyvv '+;S{ҩܾc'5p{⩧ٞIɧ)[3)ʞy9f-*5Xfs9~.Ɩ?ٚu ΠV)+[3aZ;ͱ l]ʷ>6!l&1ߔ3Glme]ک)isOEソn.Zkt+\ά^N /q@R4T-`{ X*0^wF]u3l,0z,8`nY6 q46F>&00GmHk +lVA9W1.`R%W_ye^@ K'=L:3踓j"=/=8@ `MW ^w 뀕/ll~>₋ o" E m۩gLCV,̈́l@pIL Ӱ?1^d b`A[3vi f۵9×MW5/ tx1x+*@JZxF`]{3p={z1 \Bl{Сt^ ߃x yv G27ߢv>SlO+s [I_q% QyhGծϷk' 淚lٶNr#]o8~]}̏iʶmqh=Z5V'}ッ 8CswPC{-\7$V32] `U@s <6}k9~R~'Z~ʣ $6;ȴ18Fz;[|twn]÷f nM&F0dzǀM6@:Y_v`}5]^3ep籷{.Cs3q˰==ۣr'/eؚb'BV{/6'뇬b kzړ5AkBV{ jS ,ɺ^dK2,@M{a- 7j.gUQു5aukl5p;<^Xt=]Cw@.]7 .`k][ׁ [m^oe֬/TVgD^|޼Neﲭf{;i Z I6muwMϜI}wmx.Z˶lﲽŶ%t^C<W^}fd`;c `OkM@ [GkMk\V+yw;﹏~1{ࡌ[v}RÏ w?^Ox{)nRvݞƃ @X(dNcn875B8o_zT<~C{ O'qs<[!cj"dgÓY+T Ț&dŚ.=(ȶC-ȓ(u@ 0nlüX&tnkCtyvY{ 5ˀ^!f xݟY0*/k E}hcK|豥[lڒe1mK,[qr λtڋiOdٲ'd?DvM<8Ц =ԣiջlouzm5mw mw4}Ǯ > <M8J~eςl6u}]}Xz-c -c ʂm:*4T?<$֭bxa7r^*3w}>L?=8~=$?q2=2qJ0l{x$Jb=2#pax~<`zp/lӃsf0hO-| 9ƜE,.sp=l5lp37cY.dMXnXXd="dֆDzs6l[sf1?f,ȳ5iGk ޸ڰyۤsIA fw3$e ݘ]g^+^t t1\3&)rZtB .p۶;Л|?%pl&MAN${M9 \Ņ)i^-|` Țs6dMldh1?Bքo۳pM64at7jqUetܤsnfN3UxܨTAfhYCrETвb.7h>!e հ}i*ze&ΊQdע|MmҔ|-eiNVDvO|Ӧ@^;Sg /;ߏ_zmk+V]w1ue6;l,/ul]D]N:=xYg:흑[lo6zmյKȷTLҾ=`P2vT mпW^yƎufx&W/&3*xQFbu-,{$ٜf=1r1jY1m6C6>9 h),>{<~,kx;gϟ ~pD^U>۝X<2V-c2V>q̉l0Ze06zu1FdBldl`moVV\iz [ӫ5C札 %yUYBDVM CZlyA lo7B*żnkjOW /eXl*ؾlqO=۱qCCDm"ClyI' # sAϰ7x5<2=‹t!{{ӳ9V\y>|pe1b8Ui5W;޵u|{-]Y{չ=k>V߾uyk#Σl/t"lޚ1Օ=5WkZju\رd¶nC' Є,B],XX"ڳEwa ߣm>BB[ѱ|gT]_@} R{mle(tc"T Of" yn;45wER7|:ldZ17 U Y6+u؅ 0ȚaQa m H6(t5ʞ. [Y <ݠ&t5p|oXhٞ5P[^eضiA-TN}̓imX s/3oE0`9SO?CaVݻ}klT0_~Q1]{LCO]tgؖ=Yg Ul]r֥g;lﱽ ZkN^jeg*{v+I/B۳lO1:{apl5hYh1g. ڛwyG>O/]2{ D&sinKQ,2uoKQkLan wscs͞ac7q7xhg7<^\| »=a{1ǫQTs&kX2b9 jV-fM/Vd]bKhhL}miA6 -PBO ,g؞g{%n~sc9_ag[nm %GyQEv4_{6mr+ijMͳY e-kz;W=.V-Re?+ m{M3+H?\AvMsyknʏ{V92]~:Y"sv@lx71¦uCM%3h/e::m E{c:#ɼp *e=2:osEH^/>`coO66ŚmȚՐfMzgyۈ Zu Z:6XosLf[{mV 9)lu)ŭ`V!*l/mB̼p~]&3W.csXr ]rIYD6CIkS핗~*5`Dx2{P%j>Aאg|[LGvtỐAK$UA7?lŋ@iVz[+zmBV4mی-gK׳-ٜjNK{7ؖ=>7ۘTiXG/rg؞b[ƶw-UD{9Zhj4m"ۄ驫/Ailuv$, Ѕ>ز bjN,¹$'_2q;v=4/St΃ Ychٞ Ai)/Y%ʳ~f؞})[ʞ2Y{ ~HׄZF_23QiTr1 öm;ݓ#M{ll3O+: 4dE+6ۅQ r+]H 駟 s0Y!vMh{zh|bX1ì,l3z^nAKe}[Slٞg{0'wN8/Yؖ-\ػ%݊.iZ6g c =} [^yAk5zBvj0$gu@810C{Y:YfٓF:6|Yc1 ߇i8Cw Zt!{8=]{ҹsw=`˞xȁ-cH/a(/yA+D=Ʃigxι3i԰?mϡ{?^{vֽ|tjB&^Oןv'Mvfo>s)O\,zLa%=AŶ-zK!=Jy{{{:i*S5]0}wЄh4m ی:Ah.|ElKg{rpzvp{znH{zqH;p\~9&vt xq/0=-g{fHzmh "w9lfHSwHS&={Õ4h[ Z@@V-?(*HM7:l7\s<9dIcp:!}q'!cǟOLȿOƚf϶8'mv&ٛՇ`VWo϶p^h gAϲ=ۋ>q>}t/c{mkOϟ϶m1w}hLlSٖaGh[:CyZg1ӣ^-Gye 3y3psryr^χ-{R~_x ~:ۇ}x\e4 c˿܇<"Ǘ7<,* xj~x<}Jә|L2lG!| j!` 4 Nɏ Ex#';٣ݽ`f'|6mJVے*gwkpgóN^KNqQ|GP@ ~\@Ffb52rќlUݍS-P@X!kjj!i.~A Z X_ ڪm]mu@[Bo^5W-AUAs|ўϵ3Q \ˀ-l'M Loئw#Eiǜ|wNŽ)X#]N<:vi񚛉4g=iji|z ε~a]ď;v;2fm=2FvhF&?HJ3f6*v0ZؑCi)lO3=fp± V۴}[ ӏÖ;DSO5-f[t0Z6=t ;jz[iX]*[s~lǦk" lu4v<<@v*%`oOx鰱.5z02sJ:QI k/|2sa2NYg^4@׆nzAۀ] r `7*հl"${q.raruC̺PVV*-U:|5QU tJt<^x | @rb3xwi S6õs.<h_,m/Hed: /Wz c#"~ܵ0]cQ&MM#3'ܓylOҢSؖ=~ҳlϟy0p9˰sW\{om8UJE,gJٞa{Ciibz8 ?kJ9Amc<=]XX3q2onCW#c ]yNY}O I='t&@ a:@f8 Uyz 醂g!j{Y億ϺO/rW'z1ەHuX;' Ggw`F*3`O*:U{xVm2:T;=\G5؝Fw@v4exrhЈ-?ziU*$~BIBXUkyqY1B!9!#UEC8 #Z\$P\X.ٚ>6Ɓ ɅBZBY■lH6aq;[Tls]'5ݠ4f!{P|nV!"y-tY]3@ι+3`<7ܵ||+a$%*:c +y?uTvYv1KE]:vU7rͬX@7TfҢNEL"-DzɋN}'֓腋O='˗H.;^eo\~i[|?ˮ5~+NWV^Οɉv-w?uI vi e.ͼU]T70Eh5l*csձ.ȃ\w57I[*k %߮` bEnb١i:«d=w3M9gq$؞z#o9x;xOA9gU^'oQɾe@ {5[zyWx SPrVϻLwLOf*oN96cKC=ƞ;FO.؀\j[J_˫٫ŖdR9Z\uQ  -W{ _a$VQb{@kʚ{eò=%ʢ̼l[%к^NkBH%lmjO׮kg V&c\8->vC<ݨUne U +JM?^]bG<4~);/[ {-Zߖ>\ڞ⿟i0ǖ,{1\AjǞx֞ۓ|ߵǟ\ΎQ=ŏSמ[n߽mz%zszл3{hl6lf׶mmdzl.?`[l]w@kg6w-6ƍQ J [f^Nla(hN6h5i6'JjrJw;LGHnu(^V>M%}So5 Y~R 'HgoN--mQZQ[Z1Z1].%t\Wnmm]@'3 xz ǟq\tlvtL׶t4QlGv*#:-ճ=bZ \4nh5tV6(rl1.'y[7[0`=\qaS6R.: Hȣy0ާK 0zD7j@esK8ͼɘC^W/?2ICv\HQwVqޮ^,m=6r伮 [7 t3j tc"tѷZ-%0kF T\^Qz_0$9Syiʰ`0LJ! #r٪l' {mCm2sh[amiH1k*%1$ iAVLz&hMFC5#-Wy7ߢLmw xd`¾Pdebery%0ib4Y&G<0(ETu=N]ORaH24501Z΅afZUVGqym*|ԯEEZ^uPhXo9 ^SNixAV^T@/2Kus Tfa ݪеAcaЭpߪN mcj~*ǮzJ{7 7UV8s;@[rP ȣ.cp!Ze gc̹b`hn|]ʋP> .<Q^.<\n[f l/q[U{ ]DG3\</N(垜Ш< 7 ;3V+m=m|2im|ln1GYtA T Rs0 H`&V;t ]zjlW2+Չ(k='U^թǝgO- &P{8՝! Cjo֞)Ak9$#hyq T 2AFӞ͚ݥ)Cq!CuHݠ𲙍L6$U@٧]aa^nQ^nU<\xu!l h&1l ,O.x^ĐhΜ.6;'x.o|3aR W'0CAsI\3l{GI3ܸyܠ2`[a$!e&Wsڦa#jm_$Y ۠[ ] 8=^/8 _ /x|ks=@<BաT iouz$8a?9[2eS d/4X{mDE l"8]^'+ag ;E-YP@q O@!v-DRp^ ,vbpbTрEc4d{m=.dmІA^K\N8+qxYI0W)NV>lޓ[Eky|vٞ^Dce2knJ*,&,l`{$^VNg?hy-y:4Q8|e[f+oðO[J{%ezA {K={a^xQrop~aO/~ֱYɧUpqlRT-.| !C*r s#S^WdrmU*ϲQ^m2s-E (s0&ajy)js2LciV׈q9ӲWs2ma{&}qƋGY+}꾻@߆dn#6_Myw Az='`/6Gm=DC?DT6Er?!}i.c])_}e||y~^Sk+{E&4 o_}1 V0+kvXآy=5nZ$&m  D @$@V˔ DuS/o~~g/>nM_~~vm\y¶w [MHg& *4ak/ J`[% DuSin6lXq,.M4  TNN[lV ܼmi9X±2OFX2(/܎e+|VV4 CUb{M^-(VTPYw0+4lU(@'}}V-ڣ&jq{|=^[ ZaU?jD9ɮs{ݪklZ6#AgTl*ʩ4a|Jzg5ϋ{5- "$Wt\ IuՎI?ykjC9poCZ 괏addX mY2l?kPo68CW^6nM﫴\_K}|f<)Yﱎ Q[އ7.7?xG' #wzbგxU"{#|~J7H.+Ѿ)/uh#_!G;TaOBJ(,0e<Bm_5\Wk7})}cT?%w(bk@۫W,[Lr}~/|:agOH|_B=Os^5y [/|lymf;tlOUL;:v̅|ls>3{@c \*9ʌ8 NfTn?o@Nq|q AB?AOh1^8m=7': Ʀ^جVu<[;2' n3χ۞x"7קW}vo|<տ?:?$j]zMl5hKJJhvN;8>~[N{5g o]FD[{҄U0a-B_}I:ΔpöP؆7ƴ _噃Vվ8v{7A `w{y|Dog^gk3}sοm%NW[ vѡJFqtwxis׮>Ta+m\6+^w`Q AfJ {>tK3W*h4vvX5o>OKҾU=O wqПl b0JpNlu990rgƄBMYO| mپЕ"*sЪ{q4'5՟H~%/PRo^ՃWl#o2C3YaʠOھ~y1 A}yaH&d'i k$_ק , >'*`/@LOr|\s1GؚFvl͂l?grt S>Ysil)tA9R,픬jEjxr?I`۸2-JW+Eڿ^2KIǬ^ǴH{V=Ju~ = lJG㬿Yν{@~kfa{ϫwʺي[r~ 6(M QتNKV3ad҉EрhH Ͽ}E >\V% lEUO# % ۳ϻnjd*MYRgcDU@ZE)sO>ϽYleTc.(, s/8"FC0_٪8TG/יJrщᚬ1[S.Pr>|RBlQK. .L‚l>TF#E 7JG! m sϿek`뀶*5;UN=ͨ<ðzIU -R3RN\25! ҄0F7 ^ti`GvaiǫZO3 իճtrJθv O_!j -ؾel_1_=[3|'W=S4c0rlCAիi%):e%y^4"; [=_ fKռ-lI-9۠y XO3-آIh!ΐ0)LuP&u!j Wul0l8nْ, l{LROJ k׫P*hי"W"'9yMyH}@Z]tEy]Y`BkxpIA&W'0iJN4rӰ=ZԲulEh@4PH /q)Ub$`f<[ #K96bG$ ԮRUn@VR϶vOt>iрh@4P3H ZS _ХW\E.p2l*lE5#tigigрh65lᤂ`9[W%WPad7Yy ez*-рh@4H pR5KS_ p򫮡F\MFY~ZrB y|9 ԾR-;'X- W\u+IWXI+TTP9?y3*{*25/^5+r9T#}AIs  5:lqe'/l׌$W*;p+5VՇUpQu f*'$hUeVY>LC}|_1^[^umSoUK/ jSiܼFun7Mf^h@4  d4l_c؎!Q0˯ wF4F8ڳs[6I/ D` n^op0l΄qcil7Qoq)emRW' ҄ eʄ-4cxo-?RG4  ҂q,W_lk=6Iۘ[ͷJ'+NVh#t=u*`[=L` F+]ٛo lEh@4P4lMP7g5~'~]lųV4  ҂[M| #ڝ|{'jtdum.+^h } [8KQa-~nsw'l2Ih@4PH f7-{莻UVγ!kCwD`<*2WLyC,f=ۄb3ǹ^xqpdvY>鈅y R{)4S5a;ݯlkmֳuNtg믗R6r/;g/+dtr~ Siv5;s8wg;=qX Ԏ҂-[VF?-% ҂~D2?Pŝ˞%l-}[8} VtZqϛ$_&Z$=?;9/@Zx2(qgǟ|iZƷKx>'[9.EI:C> K75E`1;K1C=>E?3_i؈qSV= R4Tjrb/\K'L2ߋxqg[ՁB*F<9Fۭѝ4 a~)+3k#H]4 ȥ҂Iu4=ɷO2S|瞧gÞSO?ͻz^nd hly+¨z^^c# P`-z>̲UjP0KVn b0 IzmV.pgEiJ<=ru ˯°}i/({|mԳB[ -`׻u:Md]c­}>^,l|B/B9b5lqCa/;/V}wla OпeМ߳N6 VGB $8\w|Dii -I}4Kg+啯K/rl*/m(l0a[o֚ #d!vj{ ,Vϫsq~g J4 (P [8+^ZI+^v \7|g+*{y+>\9/1dѕlŝW_{^yuzUkŗ~ئP5[6u9\_'9K b~Kߙ0o^-z7ߤW_ꫜzuM >RW.uf@Z˯ `wxm|^gꫯs4G>C:h@4  ` '+ 3-;[oޡ7پm.HCr  EiN*c;wWl|#-!h@4 @je'2(x|}=~يg+[D@k -~N0d]\d {l5CoJ'+N&}D`7:Nk`` ;tB鄢рh5l9"p\c[vZn5l}}^zm1\g,;c9Ǣ@H rDxuc`Z{^[l2I- * ND}V>Գ*SʴLKRisNsZ0Cu(_R dhĵ#銫M2 V/ nD=[ʱS 麻zwcɪ}u dP" ҂UҥW^MF\x z?#ci^{lW6ʳg3NGW5^@?lSɅOΑh@4`k -[ ٓ6t/f龴3ŝ{07h(7<|gۘD+AVyۼ ɅT.@e5l[@쮬nCOlAP3p͝hճ m|=[tv@yhF4P?4lS~Q ]nK l|-` Ж;͂mB ~[K[JD0 ] [npٺzXb5VU*7Գ˅O4  Ҁm]vܝڏZsBYU!d v]r |i`9рh@4&l[wmOj٦{2׶f¦r&\y"nh@4  ҀmQ4m֣ԪloԢa #7RnQMG^!V: le9~/ϗ(!P Ҁm}=+[ l[p|m+j[DSg+;_94llܜK͋{PV]Nnֺ#vc&ņW3hÄ؊g'޽h@4 HMДiYK,{Zu u}ݤ`ۺ}Z3eζLF2 ҀmiԙԤE{jܢ ߶|nL=[,MְEY{/}@4P4 lIS+ڼXYlآ,B JHzрh@4PO4ls.\GYV VٰEYa4#TЩvP&+]b&UB/6S#2縞|UןQk9ע@*mZD&Md-3adlUe-` !86#UƮ窀k&⏬7['B:h@4 (\ ۿ6nAm҂vjڂaۂ֮ ltZ [O6MiY_Y' NTYf%% D9@:mΞk"mQluA:ޱU^.lXF;Rs+V4 Ha=f{jq9[F6K0$Uq jr{>jEрh l5p aˏ[{6Q=لa`Y!z9hणyE@j MejlEd+29rnE`ɔjV#!_.Hрh056lwܵYgS5 l SPr* 5&lwܥ)sFF(V<[E@}@ڰa @j6/n>5VЈD4a *u RjGdrAs- w n(lZtK  ?n֬ l5hGdrAs- w Ŝx{Ǔ/}@4P4+[H%iDрh5 A4  rmXBE+T$[ηh@4Z$1I4  ' lųh@4  X5 [I(#4*z ҂ ZB֖لʅH4   [[ұKǒ)Z L 8l%P.Bрhi m鯍 RV:[}l{EYG`[E'9@}@` 64]쳕V:^Ѽh~k@`Uw/_4 jVD'рh@4P5 V2LjD@5 q'u/+VPyzN"  lSmYE9V3\\W S-sUt@JJ4 ȵ ><=꾂Y=@7HZ ߀mzs?'ʳU-/g~t\w|јh@4+ lix>ju=X.Ϋ5a!ޫ -?X|[-@i ->21SGl F'򼰪^0d4)l|)G_g+]s[ZZ4_`4úګ䷋w# G zUrg[аߚ4 , A lmبZӂ=ېz^Vyen0*,>/^˨>z7E`;y*I 5gmMF4ڇ;B-`~_TY^mvY`+¾+@öYvFl`A+-рh@4PHzE2[g~YmaG.rE@2 (ljK0fjdleQh@4  Ԓ҄NM[Nw|-!}׸-2(= ,*ߞff@qf)y٪\Zč@!K>6+V/*q6{|}L`w1Xd}I_WۧVE/-}N҂- uLe6˳ؖUwwV:p pAh fw瓴_mq%/}T;&9$A^ps?G5 [7qfÝȱgNueF~8՛'6<皩S*f<s-3f`|Lcz {+t%"XGI:ޣeǞs~;R'H.+ѾQt|mm êx6TJQP6ώ,vCD?cOlTRoVivWL˺u2"y2LC׆|_wȸzq;pV [s!1ޯk{ʥTjt:|ywhqg{ύo6KVjq0%X|l"?AOh2C|zqwG~t~qBMc/9z5!1خa…D^U6,tWo6yrI`u#quVl>?-B?{P'gΚpP,➯6l"B7_p<ϟ7A I6qP{PX-$x|Wzӑ7Qk@l77yQs!ZwV:|\yƟ[{ǿlsaϪG[:iE=[4COx u8Z`ƭk?Ō&`#MW/IE1:7a]s]h5p l:\ X8pƇKמ~kzrrl4(ݹ[g[G`&Z#rSگr%JͳMviFg+}Q9s  ҆mSTs,M=>21`gk / nK I#_(]hS\OF{ij @mֺ5y[x,V`+MSY'рhhFaFމ| 0tQ8E@u4P mV$J D@ ֟]أm<&Z8 lUJ&`1יPr{et- ҂3yq{j޺=b۰QFlQ'h#R|*'O8*|fwR#9 Dta-v%cbu&%4atjZ:1lNa9NSO3sb/1me;߭SuJϯ/C·hf5lhYTTܑێ Z@#W`um{.N&Nɰ-I [-DUQ |%lju~yf;h4*lK:S[bvanmmǰm.;`M2hlU _.IQ 7Po3zO^t֢@:mljѦ+[7jٶ;lד6lL 6oo{ҮE=غ.EԼeHfb=ʹ`E_CA-z!9 [^PFBM[&r[Bdz:\ӌ$&z=[uf]]ƽ_."5}͉5l4l0hQqAԦjuO퇴v [Ynp[IK*m/AD`;m\h*mm?ڸ#v;*6/DM[x#guaUVuFz@:mEfsڡԮ>ԡǁi ۝L³mHjYĦsbE@h -Ngض;GlFB͊ERWK[t>L{]΅ рh4lٖtİa佨C9 ϖjd,^m/j=AI-j!{D@4l[ry.t6Jދa.rnyʣUI-8Ԅ35J' D@m̫jj" @v8mc΍<-Am5|tBрh@4 ȕҁ-'Pl{QkNhQ[J: R# k+RUԹqqDрh 4 l jlũ 83UZ;viA;pm[_=ۄ%j?q r DU@:-t8/r׺*߸a˅Sv]n?܌OL)6I!\1(OtU/"Wh } [kQ҉:ߝ?(v{;7ul9Ϊ'rzDWA7{N *+3N֋S)Յ|UT"Sc,/+ UNKBl=$kGJ/7+m*m*;H]϶\i[T6l:eoզ<ۦVt@d߬^tޠ4P=q (4plt7諯 (׮JW_ Cݹ0ȹs%HWցl nxg[N0&\ k!06km^"N-`M.>ou]:l_u_:oWSS4Pw4 ph9h¤u Js`Qa+ ,׷Zpa(؆_zk0+:4Mra;9WrDjV`W$ r}r´|~_e~6ݎ*))7MlǗ-[/ riVLNNрh } l%Md%/ҦҦkFa}YjzV{AIKDl lųV4  rmXFx2 DVFрh@4c lQzf!>*=RVFiI>K$Y lkk.SH=ۚr1 @6jPF3pL=V/iQƫk|?]?~Lic԰NNh@g+:N-#Z zIņzqUkܼYyJ>:7~)ەz׉&D4 @vzK"{ml#$]܁_U5(^v\nEBν{@5 q֐"zsҶI[D5mO|\lD@i@`+рh@4c ls2rh@4 leD+ D9ր6 ,9.D@i@`+h@4  X5۵MZxem͍d*m- jWJfj^t0@U4w5vVz"rT\KhTA*2 3znd}^ŘzTOUNG:h@4 o l2w3@hCͺS"fپ$b\A~Ks-\P%'r9?U@uma|ڲAxx(计mS`~,{Lrr=E@~k֪&lەBΏрh QԴ"9۳ W딣Ӟgdل5=Ux<^Uap8ir1 DuKy[ǮW ,RRϛc] rgbyb!z2'sрhi `њԋ[#\jA>[  ԤRYԲm'jѦ#tP~Fڦa#jmZąså^t\K>[% DiV`LR RE. R:h@4 _܉ vDрh QܬJ϶~d/[4 l1o ˚JgϝM~_4P5P+B)DMC7rq79rI[ Eрh QҼ5Prڞ]#'{Ue c@Ҍ|$~Ȩ66   lq² T{S!}i} u'ؤoz竾\wf*QUVS7k=_^ml[~46o`; Fǫ줳4scKR7+Cg(  6X ջڟgkכEج FNROC-(4rvY^Y8kP <6hP]zBT"9rn ][&NferƇt#ayy`eҬ$zJR63цZ,zp=+5Oq6E[4 G l-a$nȉV{!9n^/$m~벅v99Couׅ%(~рh HyLYl|X}7ݨnj>fH7ެ@h/mwko~|ml&i5յq IޯkCU\"& U lj#qK^8D H^1NjeF!W!Dmx,Rmr j x5r%ފdl#/ j@`+T DkV`IS[dt$DmG3Ngs%J4 ȕjMZmlӯZXls% \8Dx  lqӯ9HdZV:D|66 rԳue#&g qgEG4  Գ5r Wtt:h@4jo 4^,l?|'^>O.&рh4g~ԳoZ\4  jM[%"9ȳzNY<ۚm^Z4  lS7clcDǐDij `ϒ" Dm䋘8& 5P+ż-lrjdU4   O[leh@4  X5 fۑ^,md4.T4  AY`+Q:h@4 6ǡ(*r1 D~ l2W# D9@fPEsET]F2 D e[.+dGK"fD@vFj^ܞ")۳[ˍt8QPt^Rϓ.-w"㫧zzHG(HΑh57@ed׷Fz-`lV <L}E|U\!1<ެYDί_@2 l} 8!5FsCU `DSA2 DB@Z6cjיZD-tT#4lD MCZF֞m(lM0F̃FbK o7E:pj oDU@)jdaۀiy 5=P1< e/n2<_][J̪t4y\E[y[sAQsտ)`,R-ަ ̭?+'_οh~j o`+.]4 Hm9[m7n #ׇ(рh@4P4*lR[Yрh@4 04*lų59 DH7RH4  ? ]6'V<' h9 Dl{=Zd M2%˦Níi/k?@` tQTAHM>7+?"xIIDQ-@W__짟~#}7g}NoK*6֞mxJ'K0VBрh@4Pl'Mo~W_/f~A3ӆ7ަMЄ ##g#nNs-kIdn a\K/8,а՞͠W׿~'?>~v+o*6"gΖ=[6}4ܖ)tA%SqnʦϜM͊Z> cwE^4 M _@?GB[|H6l5k6oG[![Ocnk{kl6ڎ6nL]P;eiԼUqJX^ ۰zծ+,m-m- Ҁm-hTܙN]=۰3H5ږ4v{ڸy uu5YsS5$ ;rv9mvksd%-#{рh@4P;H-Z9ԦGn73p[6mz Fn((jӾf`Tn%FV%JXoW^;Bvv jSiiV4c\jׇzQlϴyg>{נ=}t1TMXo6O|\lD@h 6/)Y SA ummܺm=`mV8 ݏWټ.lu)U}*gFgF*'Nvv ԼҀmQ4{Bא96}O`?`;`Ra+h P)rj2𸐼JjzL$#Gŭ/na V&⋯BmW#Ԟ*,EtvӀ=b;PK`+'ȱE@5`hС4s#={,ضґ<*ݗm?ۗ6!m~gjVJ'6 D ^pT1{6=䓞˴z@ض։>q9o{ۿ0lysnCg +JS4   ZildGnuH `gHjdrA YF?s$>k 0lXeBʞg֟C>ngbjݶ@@ėDuXi[v@s=F=އzh;ho^ !m#/jԢ=bJjѲ (V@k 0rhc >2P!Fkܴy+u3(BƢV%"Xb lH? #7oۆf=:vߍ6m}N!-ԱG?U>4 BINWU#mey\Q=*5Ҟ@logʕ\gCjf4sY嚶]#;V`+ Ҁmcz9qkQVQ4Tw]ضlۉ`Sx"!Kb\ٸ{^lWwp\Jy&wOshlsc~u6 \r- m]^h76eΜEw8FضnEAEʦNgm. o]wߗ^s&5Nm>? zq߬z=o<<3 $2Qg?O`^' hk 6WaDӊ[[lU|~0Xc>Qya'L %'^,x N\?U#¶mت02^TV_zN #jzea'K@ KӳE d ?IV}+K`+؊ XlC`Il0rg Ƅa´zN1yY= " -I[5՟ߘɎ߿HInXs!Z[>؊h64`5lW?~{U u6{ﭥ7zVzo '~`^ad-`!&߂98ilD+x5$ hhU4믿enQ`}۫ߢW D-W l!dE2K)鷩\MEyv…W_ѯJ~ʻ??>oV.޳{4|س,r`@ki#͛ZƂ-R1 '5~mIǮI-8,Zl B#[GވC.r Dh &#k*M1;;V`+ D4`FN0m:#&yJ;D@gy- r~D@Yت{]TA4.UB"t+)) E (lv \ԵdԳmҬfœ$,рh@4P5P3:{7m]{S=3f-e!xрh@4P-? 5u46lmڼ:@Rn}hԬEn`Ѩ?9@-@۷o_ԩ]{A\p7NA8fy="hiV0u;ٍ:@̧V%[ъD@Ak ʳ=謳΢QFԌVR lCa-[?ރ^RGEqۂn4FD2 Du[as:|ҢE@LR>϶S'iG^р=CUbjզVFрh@4Pа|)“]dAEPni9{0 @h X ,Ѻ='O4 HC?ʁ 4K5 BFO;>oGѰ!*.XJxm?D>C:h@4  4l0@gmrWku ܣi G{p|m9.鸢рh@44l,X X6g(4xRR(MDgA4  4lÀ1 K[#qБh톑ų^D׀mLO[@jѠjUN"w-F΍hf4a hVbHm}|HOxO!?'MƯ׷v> e%tP[qǂSݢ55-jM6iBxnܴ +XӢV 1w?|F+zj31iрh@4 5p ']h?q:k=a*EC .L=y h4k|YmͪLg[~^?Sm}ߋp ~Y*a1cONϚl cQV1f*2M>L6&O irMl ?cL#M16aTMB {N&NGblWe֦Sy]v|Go. ԃ+u]5o[BmV{-JÆV9~Mq~--=num:Ryq{j֎ִU[&-ېi[PZTLҼ5EZm;5mIq&-ȴ4.8qTYafg޹)USmgg~tyyњҷJoA`]‚|<ߒ~P?N}/%7A}%f?I'*zX5mav? OP_ҿP4[tljܤ)APYou  ;T R`m:u15^t>6@M43fBV54.J mٶ k Y-r ZZynħ6=۫\CՃ-h-{ϏΧÎ:+" U@ k@"̫}!3 TM]tJN`0h0]\,\b*Tر$&" @ąɹmXXW6N0:3imTYآ/ա\Ml send-spec -> send-reqs -> query -> return-rps -> create -> filter -> claim -> return-hosts -> send-hosts; lane conductor { label = "Conductor"; build-spec [label = "Build request spec object", height = 38]; send-spec [label = "Submit request spec to scheduler", height = 38]; send-hosts [label = "Submit list of suitable hosts to target cell", height = 51]; } lane scheduler { label = "Scheduler"; send-reqs [label = "Submit resource requirements to placement", height = 64]; create [label = "Create a HostState object for each RP returned from Placement", height = 64]; filter [label = "Filter and weigh results", height = 38]; return-hosts [label = "Return a list of selected host & alternates, along with their allocations, to the conductor", height = 89]; } lane placement { label = "Placement"; query [label = "Query to determine the RPs representing compute nodes to satisfy requirements", height = 64]; return-rps [label = "Return list of resource providers and their corresponding allocations to scheduler", height = 89]; claim [label = "Create allocations against selected compute node", height = 64]; } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/_static/images/scheduling.svg0000664000175000017500000003177700000000000022407 0ustar00zuulzuul00000000000000 blockdiag actdiag { build-spec -> send-spec -> send-reqs -> query -> return-rps -> create -> filter -> claim -> return-hosts -> send-hosts; lane conductor { label = "Conductor"; build-spec [label = "Build request spec object", height = 38]; send-spec [label = "Submit request spec to scheduler", height = 38]; send-hosts [label = "Submit list of suitable hosts to target cell", height = 51]; } lane scheduler { label = "Scheduler"; send-reqs [label = "Submit resource requirements to placement", height = 64]; create [label = "Create a HostState object for each RP returned from Placement", height = 64]; filter [label = "Filter and weigh results", height = 38]; return-hosts [label = "Return a list of selected host & alternates, along with their allocations, to the conductor", height = 89]; } lane placement { label = "Placement"; query [label = "Query to determine the RPs representing compute nodes to satisfy requirements", height = 64]; return-rps [label = "Return list of resource providers and their corresponding allocations to scheduler", height = 89]; claim [label = "Create allocations against selected compute node", height = 64]; } } Conductor Scheduler Placement Build request spec ob ject Submit request spec t o scheduler Submit resource requi rements to placement Query to determine th e RPs representing co mpute nodes to satisf y requirements Return list of resour ce providers and thei r corresponding alloc ations to scheduler Create a HostState ob ject for each RP retu rned from Placement Filter and weigh resu lts Create allocations ag ainst selected comput e node Return a list of sele cted host & alternate s, along with their a llocations, to the co nductor Submit list of suitab le hosts to target ce ll ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/_static/images/traits-taxonomy.svg0000664000175000017500000074250000000000000023435 0ustar00zuulzuul00000000000000 image/svg+xml ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/_static/images/vmware-nova-driver-architecture.jpg0000664000175000017500000020440300000000000026442 0ustar00zuulzuul00000000000000JFIF``ExifMM*;JiR >cuz1509696 2015:10:12 22:42:552015:10:12 22:42:55cuz150 http://ns.adobe.com/xap/1.0/ 2015-10-12T22:42:55.962cuz150 C   '!%"."%()+,+ /3/*2'*+*C  ***************************************************" }!1AQa"q2#BR$3br %&'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz w!1AQaq"2B #3Rbr $4%&'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz ?F(((SM:tGG lG+= &3m[] 3t_?[ +(TBw@ _[ rQܻTy}J`Bw@ ?n]*?¼>O}fgܻT.?^{ER>3пn]*?[ =ԩ3=c(w@ (0Cw@ O[ =S/[ +hT 3= (w@ )N`Bw@ ?n]*?¼>O}fgܻT.?^{ES>3пn]*?—+(tfz-GOQ rQQ:}3= (w@ (> rQ.?^{ER>3пn]*?—+(Tfz-˿GORܻTy}J`Bw@ ?n]*?¼>O}bgܻT.?^{ER>3пn]*?[ =Y_.?G-˿GOWG/[ +h*cCw@ ?n]&?¼{ >L>3n]&?§#sFk/h/hs=Χ4foX&X&g>#sFk/h/hs=Χ4_' ɣ' ɣϰ{Nhr<7AM<7AM/g>uW/ _hs'SZİf-/ccN+ad ws֥ŭROaRPQEQEQEQEQEQEQEQEQEQGz(*9 E?S[-5.Ay4,d#VO ]VuVԻQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEP?5 n^mx_y8_ 8? r\^ǢP;Ҋ𺞸QE3M&h;[֭=6K(GxC_fW)#Z{}6ͯU2s^#Vm9Q^,{w}&hUZisqIE}&h,{qsqF)(=bRQG,{4f)r5?QG*IROv H`0TOLTv͎#c?\YBGsա:>'d+AM>2^iXNr֧$Sڻ*AV=rQEEPEPEPEPEPEPEPEPEPEPj)Էҥ5[Mn)l|^kTuoUk||l((@QE0 (Q@QHaEQtES((PEQp (QL(ES(hqހ (u0 (QGNR(h(EPEPER((?%wWzO_\8X_qKIK^Q@j9LGaRC q=sF NkA&19;{b\KE3Ek 1FJi;gZzAE. 80})!Y?_JM.-2;HTJ{K'ǥ&99D(83ZE]H3y9RN)Jnd@EZ\QL̾&hx샥xOs5#|-Za(E 3((((((((((*)ԿҥRJkq=uO ]fuVj}5/ &QEj@QE(`Q@`lgzE,b׵R >b+ }|$k1AGVo&VNMOM[&;L|k #F9uü_ 0d3#F^qiko}ego4ٗkSqemMkPoCFQO&ts|4"hN]B6#|*Ɛu5of)ˎn&sqi7B&#vzMy (䍇zs\߉nu(MsSXmh#ҵ( We>VAگLl-rX-jj{yvFRR IZ/գy[[8?L? G̈́ZfC捁hN - p|:Ȗ\ڦ2bK@A#!G"9'Q]7t{uTdY# OoM{gm7+ؠH7M\jXciPF Pur)6nU,]ZgѾl &Auw⋍6˧#vAG@(ah,CO;Ȫv}1G9UTa'5sQ?O%(71N.9OKn7 ~Q3U7dܪ䎸WcmR}V=8>[lnd =+Vm*NK|Ÿ9w n8_r0O5h)jbXlkFа Mkvvwf᷶߼UΤ>dBṆ@ɦ e)̨O ,qau2''#Wi?.A k4Y.MDkXML6R>a% 5c2]8]- ϲ&g(U <o`^ApIʜE%ZJZUF)yȯC{)2="]63$~dw?Ut7Q񞞑<2oP wXr}zͥ8f#w 텮soH_Kq[y!c|Z|QIxȊ̥vFzXRTS5t?]HҬV8ȮRٱӑS"AZR3̤@`^wkqKkkICĦEl;>J+]4|FZyS[LΉd)5pW%p)+o^徚[! [G'ȉO_L]9sIYQV@QE ((?ƽK/?-ֽK/oά7z5-%-xGQE%EsTN;=u ST!j}5Oֵw|61{ޑ#)$68iBU9]n.m'Zc>mxAy.t."iSjyI^6:SM]-=y-0Sieͯn,-0B^H@j/ .5y^irMEh7cDy湼R\m>k#Ov-zD־QkQGj?k"w5BU|:Fe[xgn"eZKltˉ| HRK+5FF6U${ȼJ$\DUXfVQ^nUP|xwdEXOdڵu-ob pGd˫i:VucxB4jzUׅ#uY&bѼkZ%k>dߋF^k1X:j״%״X۵H#?:^j2 gmGIYĝ&ITsveˆ'sZzF5$ld\} ]Ϩ3܉0 GcPAOh6ٴwk+3vމFN*ng뚄W춫koj98Z!ĽVI3_YIJg<͠QE43^: o5kGA_?!aΰ((((((((((RJK)q=uO ]VuVj}5/ &QEj@QE((`*A u Z1s5͸ړ5XԤq)kϥfh;fҟZݞMTZŴJ4y>㎵4[G;CBx_e,:{3cWw)imDG|{7Xt fen(D$m3A?a;zNi7 DȡzEji2̍ {P:}kOcsnAni?4=XԺֹo{kka@a;_5F{{Tk晪Y\5aZ{yo][]*Ƨdzt⨣eޥO.XX󊩨]h?dtҬ}:oXukxg&*wS]k6O]%I{qcO\MՖ/I>NW%=+#cȫtӏ) sTNӴXA /z\Ub(W}و\Ja 6;w.APūBK,+k*nGlZmoEpf_-'rqꖺ^ Y.;GbIaڳm\;CܭQ|G}u, k(_VY&ue6ۨ'a_ .2Ҳ;cZ(Xx3mnYq u3U4ZL" U*N[Xzа@6Is'q,0z|vJM(`QEQE0 (RK +ƽK/ oμ/z0J)(Š(?*PCt'γ!j]7y]MR4xu>6QEjd4R͖sbW&O.J=:{]9/y44.-g'jWjAc=Zz+ `vuld9 S׽֕ H"ky- 3ъ~tpZ?ɧ"4 dfN)[-7(pd`ӷJ;ҤKyBE#**1T&4 1EDQE (Os5_c׳C| + ( ( ( ( ( ( ( ( ( ( MME?ϡ>u?/w][ZMj[%?ΩRTaEVaER()((PER(((QEQEQE(((E(((((b ( ( ( ( ( ( ( (QC+Ծ2^[Eze#IcˑW7;p袖RQERW?8>y7y]MR?멪UԿŠ( (;I2/Z᛽'fk$ s׋RFLߛRHF-Q f̥*?[׮,-+Ro",X?:s\.t+-9b*֤z5-]o}>Fc{8\F6Di J *Mf>[&<۞-?2wS,mTk94(&O|潙o]Š^ PlI5xط%}1xYTMƽj:tu2 ] aL֐x2rEtфjI[BQ^Š(ES%5rڅƒ #a|Q243όX=+Og|eiO揬SF]γ>243ϔX=+Sc|gX)?*>O{tV#:|α>2T}bp3eV#ZQֱ>2T}bp3ekX*??_揬S}+Sc|eire6~}bp3eQZgZ6RG&Wgl>panlx8$Ŷ ?#%@RK( |ՕiuFb+/VCٳnoJƵB- nfEZ+S`|i??\Sw+KEj3ό?/OϱEj3ϔ?/G)c>]α>R43ϔX=tV#:|γ>24}bp22OFug|dh>deZ?'G#:|}ˢ?OFuF+Sg|diO揬SFV(j5όk?'G)c#+bOF1Zk'Iֱ>R4}bp22OFug|MX=tv'ҵ?OԶ&TșV]FO4*F f8\ 'JGwaX(n.s0^b5 yx(QՏKEvQ@ Q\ǻ KQ\ǻ 8'γ!j]7y]MR4xu>6QEhdQGP 9q{H$$zHO,2ڲuU_.0;GȠR[җ%qg9Nh1xd#^(4Y-Փ\|#c47W;f. G_ޕH)lm9 ܭl'^Ju:L6v&Bl(D(QE5<-Gk чFxRxot!*ux5ΛF}+RoL6.ԁ Et=+Ǵ7 2fNzF硯_FAҸ鄺2AE 4QEQEQEQEQEQEQEQEQEQEyNhhPvoʍSVBmR@( !}NY =(=)PCvJ6S,hQ@Y =(=)SY =(=)Ad7ҌJ 4]_Oҹ]k'~.P?r|~uJ?͋tz/TIlvh֭qk7;^FNqpnn=X~T+w=ƹ}{zںt G ,q@UⅫš޺e'L1,+wH4?+DmF0^4Տ'567s*)B(ƚIT;VbM 3zW> << )y$VqtNl@ˀ捤Y\or(P8"w|HVԿiZx>㼹s[zR0Ht{Gp#Z{#H֡C%7!H%n舤+K?:p|6qV#/feW1JС]>+p~|}I<֢,K~'jVhV*lإ 5i!URC;REAq y_x?} ^?5 |y?#9|Hh>J}fhQ@W*֖sF u" j &yÛ8nށsuuf!i.&fnٔ{HxQ5Oi܎E|}K5MWLpk<kJ)__Ϯ 6 kFzX~~Ql.gP!-|_h$]б }k-5+d4M*RLE 9"(((((((((((Vx)4-<Ocknb\NQF21޼H) r^jP)G+jsQKhBrzQF<)G>ԔQdrF: hcZXB`SO*(. (:zQV:SXm'TJj+R&v ,&ajZaXc.Om;٣XFxDUҥ6GqҺjháe%~rZIsMϝfI^Q^M.GԭC'}5o_ڰhPx{ֵg [FjKMQEQEQEQEQEQEQEQEQEQEQEQEQEQEQHhh=)rON+~#xGm,#O}&gml_%.X{(OP}Hs]T4٧oay&W :¸~#4 .uSs}yaF;,N]B+(QbtE@~9dGkWPon`83)Hx>E>5 ( ( ($GVh`14 hn"xHwU[Kku)nD8eY+Y-Q@ ( ( ( ( ( ( ( B)h%2JAk}AġX␌n(nxπ/cb#_}G|=@QQWBM<~ $gc%zWȻ~!rK1{bnXIQ[SwVf5#mQ |4❈B 'ua]04)QEQEQEQEQEQEQM&EV"I5ŏZEQرGŏ-^lc? @KY9/jO O 4Uo OB~cG4ep:P6h2 2Inħ[G&eӠ`d ;Go_ʴMvFu*(n} 9zk:E/!ui[h=kȴOĚMw:D(Kr897žKw\M~ѩIO9SQ~ܗ^T>n5b8\O|Ax'Ns}kK XPpҦX qصO2[ #Dt8N2}IZk}G-uZ[5慎&ʲDqXrܚ*t}p.#2Ȳ.QU9c) -ƑQrzF~#S@h6G}P*G}Qi?TfU-??ꃨZc>#i\S+}y}"O.5KHK}ʪ^pMu:l|Kˋ '{9?q Κ..+}-eʺF.-iIMZiw6SH2Fە"&%6M j7tcV}W#gRE*}Qi?Tfi?1ThYhZ*[; Xu-QA)8[kO>Kjd5{EuEq s .Ai?T g]|=/]hw=@{=|]%U^g-R@K7\}UW6s}_="9-t>"@4F'.vȿU9!>&m.ìD ]Mxi:OMsqZ# 7(<=2?R^e㫏Y:m㱯~_V]ݡIF8#eM]=QUhF~GG}V&Ś*G}Qi?1Tfi?1ThYhZ6@hG}P*}SFs#c@H .h((1F)h QERb@Mbf_M&GVc=qI9Mé߷iտLwRf76=룮sWnoMre7}7w_&?QE%((JZ(("j<8ahS]w iن=/Lԛku$^@NAOZo"h+o5_ᧈQLb/NSԁsz)iNŠ(EPEPEPEPEPII8kHY z֯/ B&sqKFs:;\N1iq\ω|a `$tUܶ+)UHVǵ}9տ稣[z6ϑGz6WsQQ έ=E?n>Gm(WsJuGBϑQ_mp %r?  "9ӜI~u|qiF3H+H6"Y_6֏.GHRz9bK<7#ź9?*|mI*%we-~U @?'VNuo(ٳ=\>m?ԡ j]j jMT\j)p9:ŋI Lњ; ıWq~k}DGN1@z'$fzuR~X4VQbһ'w6GX,L݇i=ֹ]K_ ېVy.5O- ;/4_jo?Rk]SS;/B;W w=Mt.sucZnZc̸4ԯ.J+A]7(h}j:Zn~tjS;AJ\/4ڷ!QP\;OX;fI'B+"5FssJצedF$aH5?=+KjйΜQFj9Xi2ivќn@ڦ-܆;v)N:?1O4T ;4uis3L7kZt$nwYE)$Rn~bZ| g}X3,DkBUh=?_j_ܿѪX6@*ֱxLlE${hd:SC0790j`s UK9h쏠t)̉t?1_=NܿѥӾQ2}?蟘t?1_>iܿѣN~SyA7G|}?rF;4Z>~~b*ǰ_=ig>I9xoS'vğ@F=@xRkJl5,}c޻O\5);3SWA)Uc%OQYo"vK/Enө =}kyqv~7Zr@QEQEQEQEQEn"oy7?j_!^7WWJ\B=G'JtRK+?K߶޷{,{8#X-]slWտ 7_OAm]T"cQE%Q@Q(4PE-!4f94s W كwaрWW9ݹ W ͧ?msZRfQ=L(Eӷ3ǹ ޻_ᬦXc;=J7`^Tj9-I#cu-FhQEQEs*rZ%\Z|T?{s€ d:On_M)liOGSg:FoT:_ʅԴmfQF9=IL4 (KDRQihh@k::Wa$@N3\~+ZSLZ&z+>UOdHA^^{f"|W/J6Sן]rݫfPۏRȑWMc[llp\OJZk#>s(74ZJ \(<ZWK:Jc3~r4`¸?ۃQ1[~b/XVmxc}#Ҹu.{/؞Bg(&( OxGiKxI-'&Om~aC 91QGAmc(guRbb (4QE,E==z6(s4^(d8;<ׄSFkOS1=,7-Q\Pu¬s"¿~Z(Q@Q@Q@Q@Q@!4^O_+zpoWM?v/xB=GݤRݤéVo?*X}1]sg:%y?g+?{k`*J ( JZC@ Bu Vv薞u*k1IOXzQDZ7]EXw~<ҡlC܏XW"akm\YR ~@Һc`tsrqgڭ/k'Ռ}Z?Z#?hx/?a53kWDOZT(tz3.lCB\% *P b^m^Q*xYEolY4/e$nXMrsUȾ [IXW-=Udw~-ƒ7Hݰߕl$ 8as^JG"X縧[Iu˩-Mc,/cH}h5/:Y1enT[z4lKETQEs)[sxlUPǜ94?)FoUU}Դ/-~-4ۅQ[w^hdk]ZhF-_gkW]<0\J]=X։hs)J xgL|#^Gk_ zT|oaor[F#ZΫܜg4n8Vqb#^q}F"DY0p {SC{(=>{2V6Ƕ+d}|]FEO%|5ay1W0Q=dhfKpsj9z+_h n/Eu~5Դ>EҴc-J9,hhjhsvk9Wy K$FQޕ'EԵbM!ƹZ|!Ɲ?1_gQAɽm?z^yi+6o/=?ޯBCkn?O:?օgk)@&{g^V17=^- m^84ǑL4"2}wW/j" |E9W_XkZX9?tDg@X8](?.7/ArxrqN4s@ɶ(ϢJ9]n6CgK+%Ʊch#ԝDB}ER#h&B69PH8Gy0:nӵߏQ>d2r>kr泧8 :vP JVe@Vvhg.9ǰÒiu N_jw|dڊ=tFhwiEq{W.9& xf.?#(/枵h:]n.ƺG[a>1ڭ @+Vφ J͙ך|E!#|E!#ɢ(x/aRox'a`8lxcw>'ՄJ^iXd"=+{M!6;WQ(+&^K+}r 1;U[M /],qȧ1ֹ1ʀ߽w>GkL&j7.v8] RGޤh#S*+ajz" 9Z^&|uuku7@qq:z}-64pWk nXhs#'k{躝Hw2@5(|W*ѧlM$n0r zuuw[s)'T"RyR$#OG:w;+;UofNꇋ_H5-CXRkU;Ys߁Ob};|1 pW_ O/ǶsZS}aˢi+K3@A7԰{ U#6NJ%9rj(N)i;Q@ E%RQ@ E%RQ@ Hii TJo Z?Bfo-5ЅoOfaSG'JI~NXu7sk65o'۟5W7{ߵ˪տcOـ[QE%Q@4Hh̼}z#5ܟuC}jr{R|Sv)ixWBt*ڝt5mrzUfW0oW#2'.)7)—Z\]Q@ KF(RR“4:^h\Zj"KT&,\Ϡ- j6.H.RJywW%i )&;R|=ķ g,} Ƚe\VHJ֢+;(c?bv/*ST7grje?)NlxSULcQ5Yk{]OZh'+]*;?WoWlꞌ=y|iévOnMrJFn̴:]o\^alnmtVzNo%kt\zs\)ފ؝:I(fKU$ NE_6(덃>v/PX{x(TG8.ö6e݂U\gv=dY4oFM:{K.ැ4Ҳ߰(< ܘqĮ?k?Ͻ=F(D7-CC_+mDu?z"CfPۏ)?>dz#c*]f+lmP g˃\fO>f0;{W)sE?i 5lt c4ϿtO=*kqC孰T2y =}MbuG^{.nj휺zhַMqXЯk&ܷR޸w?Zz]FscNR6ꐤB;_Oz})9q/Q[37A\roD\=k~]ǿ]xOp6ElgB jҽVuHv^J|E!#EPdVJOw]+ޥ"=F?⡻yǵw#5^xgV[@$&XW⦵8v[仵 ]EcԁXc𞣣5{rS`1[2ZxIvzgMڛJJXlWc[H5:c$v2Eѵ / iWj vMWQ,=ЅH&I0PQBFuzW#N5t̷1>Nj3Zݦ}ka:)Q{Fm^_xM-#Dь}+fľY!3 Bˈ݇|W*M= R'.褓QJ%9 ^V>Em7\c_]seƬ?|"o+:nLv ((4_5 )jZ '1MEqp.]N"s4 u 5dhr^*chrx"R1Rfzk>f ^ ֪)X麄3D}~UǕW 9B%Rc֥oH}"? Ȉ?Oѭnl{N A!z۫M^2 Z:a3ݰꢢ}fJ<oq$& fhtZLq6v\9lRÖe}v30<Uh-c`QYǘk,J3*&CM} mM} lחQ߄֢+= (Oث?bv t!dIG'ӳPOҹa.v¤W,r9(Yw~U_Z? o>ɛh@Ҏ?ȯC,ǎk)Rgx=Z?Z/B,~Tg<>x=@kؿ)T.9c'҇@#5&R^iejz`UTCQXzUH|&GMAPV4{Y|>CjP| io[C"hU'p1[i*彪Ę RmXSwE?ȟkKhhCHH֖եشs03>ZO<<>fY5´ 6JNM' )%~ 9 V_o"o+Y<{޺J7{Y<֏gټgz*1((w4<ݮ_vJ 'Q]>DŽj8}+sF%|[HܤJaX&Y`NUn#IRwY${gki\5,t~ }[pMZO ȿRT"/ҹf#G p4Eq<)"C3Kœ}]: &wiڷ C9XljAlj;Kg>AK;vhc=U=vRi.c9++9Pj#m.QtIr 4<?Qo;o/9GohvZ`(6}k7G%ݣ1;c8NqYŒʕDnjx}FNi2Br:uk5jRPrWl4y˙Q[tОv/ۥ}lו ע+= (Oث]b}Fy=(nه#9&5uvC惍8tn;7sU/Zoȭ!Y-e"+]`b3pP2}`,0pgn1@ :eŴ~dJ2472F)QS9mB$'z՟[ݭ ˟<}xiF(;FJ~ jZݎ^s\[[c%[8H㙱r 19 6fWne8 UI#hhe@kEyC]z@ Ckn?OЦRakIC$Ei%o*.JZ0"4_PZ]Ӽsq,JpQ=dPjE^m1XyU .,qK6 %HP ?*i-<,0@(oH`&>SV?e"IIA@UM:[qNtfhn"0pݠ *ޜmxIf۹*DO* MgQW.-!zthoo\[Z~&Д^| eݒH<-SW}+nm#:>ޔך|E!#q" E*ōگ=+]4GT7/(S֓X[Oo,1rAuh[Fw,>SJ*>_-]R Kf6Rȧp3zRgڗy=zeb%ĞT9'ڀ(cޮ5\G! Yu+e`QVf\'jh[dk[}Ì{Q# ,2LԮ ,Zt a#nh`fc1=He8v[;6wQ퓏J;krZdbs*֐b,~F6}uΙWYeJ1/ k¿~Z ( ( ( ( ( QE$tӈ6Ohe/Z=fˀ?u뤮owk՗[CȽh}.[((() -!"! pRտC־ 3|aE|kN:طkjyJx8Fe< Qln<)s! z/O2#ֹϱAutxI PKe$GZf; MNC p8b"ohUgA%~En5-᥊8u@#9D%,(Zgc&k+ m TXg-o 4\5IJm̮3ޛizsmHnlsJ5Ethzuos !{QGٍTtOkQ&dI#d,SKtQwO[.䃀*{GYo/<2{ڽS @XxeY#;]zӼ4}Nd<omom r}sVl}4uh}3rwm"ޛrOZU[F0ɔ gEsi3wǥ (wڙe<̅$Q$:}BA+?kjRũ 5-Jǫ]a7+rVYi#'If3{?+ЇJȌ!{0&Ҁ6L-C\~H֥ldr\ kJʱ'z(scOCs,TqY)+Дn#X[t.Qހ5mbFkUF¦J浗'ƚ[ 7ԑ,J[㸠 8-SYˤy*;U 6ـ u?a$Z$ݾwm&%)[M]~j72I;c⤊y`9F%ym$d)$p~7jlUUw1Y4K!ww=ÕjFX5YcrJ8]呣R#bƁ$Px^3tf~pIJ(=W֭)87Bc,P˲-fUmg=זWk&f c;o&eϕܨ3Ҁ6< [GSkIфgFگy+DxR6$fqяQ@s=Ŧ,m' :jkۧc\^lxZ&i0H=7_?J<;nVN^؍Z繪8 P89\Ąv<楴e/7HX4zkneݰ3HK,;BH7q9altsK3APhY^1(<Gv/*]CD \d\aޜ]oj82L3+*Ͽ3X8*}A ;[v+y.FgR.U W$K1,sܜIVb=7p(n %R6_I8z\qAӛn}jaj~XYw)8&A#8'z-m>+uJP,t$`HԷR^Cq /Uw1<.+9}I. Wsm=@x" O|TvsRF8ncq>4#ꐻ4K^JȪ9#}y{tM+ߗ޹<-KVvO Z((((((('ݧ$xvZvc}y?]W7ߵjZo.~Wټ?뺺1U-ɎE%Q@!<|S?9 wN vC_Ab?XԜgҊ==]} uZ_ k iRQ/pkV=b?IInaq114Ja^:xw'jwSMEj/{=(bzP]@tV#ҏF/eZ^J?Aמ^UcB\W'qk?u8׬Rcqɔoֹom SJom>dlt { FH ohph+fzQ(ˢ?1{=(.ʼk{ؒ_Ajw{f!at=+Ѐ )å!ozi \¹aֺZku*܃@ncf?.͗·VpXUE\M[ ^Jtc֧#ҏF/eZ^J?G׊4>hQ#vN;Wq-K " RɫsT5kyjA76c)+ѭ2/؊—wq(84KZ—hZzdV"hZe +Jo\d8ug`MrM6c\gMQrؗ$>bQ7I4a?kOO?.a5Q5@!;-IS ^J̤OF/^J̣JbzQ +SG#Ҁ2OF/Jˣ^>+#07@Vy2jH& akjeCdDZt(((((((Ii>sVow7^?wWH+ڵb/o}'o//~[((( ')4_3opw1 'p$EmEp?@ݫU̡feeu>l,Dq^* CsK."i)>[,cf3 YtO%C|BOnFœo/^to$}ap~-T|D]@k_m}0s>bhڦ%r0Ixvn(T䯯Jj^tٌ;QZƁ.}o2-]`wkA dڲh8TfK,i;P<c & [8w\{m~5Alhm*v 1.'<>}(ĚI'h+_s] oj5{Gټ|swAU-ɎETQEiƚM <`r^_)[`Iyr6 {9ǒǑڵiF! 3c?JקZr;nl;vN6G.XK'PġBDi?ЭR.idI33\ԷW3(Y%{rO69灉WVvnHk{TuzU tqNf7ՐA%  @?\W$Ȓ#ifc$KDt:oC4%Fs]tՇ]z|z̏w|?¸$#;2 xZ|WW)M"!= tŻRɶ y]VlSXQK~T*&׮Z - 9뢓Z$7W4hh&dY#mAe^u޽s T66ҍn~j:ڴ4O#ev* n&!eG}4%{7SKz(<яOһZ!_AkE/NsGBWivdahkI6/QEEPEu⠒j4f̎>Q.1@hǨ3sF=.t+~tn_Q^=F=G: =GYx??.jnpetctsz΍~usяs΂?:M~usяs΂{፽?:sяs΂K8B?:q`̩,g7)搨=8Cׇ+=qUWCvu<[ꈲ WnЩP0=jBV-|M|#M2MZ_ n#''MmaQ~rRV]F ߟZ~pg 7  RT8"ާdB5 AX&&1n.,Gifʝatg/z626b_GEQ_Kխ'6y)4JqgYEe}{ѹTs"jRѨѨ@h3 ⟆X0Wn!ڼd~m&W9&"(|AR}=*]=g̓-;Krn_Z}s|TsѤXzѺBIVFXq#?4NIHO5|Cյ zuӲ="YK-X FƊd0VH3c|>e))ב-!϶x+Eb;tXƁG*y MkD nN|J'kOރ1C|1W^ɩ7Qr'WHf77u?S0֗u/IlZ5ꈽ6hMO: =ZǹǹB57QѸzβ:?̇sSp/M~tn_Q^=F=G: ~tn{Ό{Ύt57Qѹ}GYx??.jn_Qo_Qf=IzΎt53HүpĞiQTUdC- ( ( ( ( ( ( ( lvM;ߵk_n=O#_>_s] oj5{Gټ|swAU-ɎETQER hcdAڹ kĴqsaֻcI3^ [-};jV䇀:WO?i4i~`W\1r,4YO?JiSʑxf\AUc'_WDqxCiN=kgМk^<؟ӇËa,OLװGz j~S'R+ꯩ)پC^$<c\ iQ,s~rS`WQ|8k\Gc^1*`1\NFWP 0? P)kɷ[QH(5 ,)M[~ jh!gWIMW~ XA{}x&? .?& =>>hV=? 3^? &?Iu_4}`9`ϸ>MW~M/$>.C(ϸVG$>q) μMW~M/$_XC=eFΠ{4=1yo$5'$XfzL;kme> ͧƒ۷iPxa,S[觹5Eܢ/K[=j~+Ƴa^鯖?H57uSB:J lw6|;u/yWPH}~a,gg~Wǭ@ ?hyN? N??f^ҼxYO5G{32=EMW~tjhQ^A ?&H_4}`~3+?$ ?&!rz3^>|IɣM[~MXCgf׏IɣMW~Z!r3^? &?-G$>{}ϸVG$> 3Я UI &? =>Կjɣ=ZאIy8w t_n{ >1FTR$,k"IZŘnf?CVV_ך 2篥mE4ӪQEQEQEQEQEQE>:'ݠwj5{Gټ|swA\߇~լkk}!QN((:Q@Q@Q@Q@ (Q@Š(AEPEPEPN"i֚ǯ~C&NN+R7 ݭ Ӗ[ڊ*aQ^3yַ(Q@Q@Q@Q@Q@6ONhڵb/o}'o/}J ߵk_n=O#_85RܘQEIAEPEPEPEPEPEPEPEPEPEPEPEPRpMPQ#֦m:u( zx5욧yI5Lj6CEWQE(((((((((()) v1nqL+.jzC`!b;UA\$Rm"#2!ek:6&,q2[xvԒ03ّۅP T:V_}1kʑ&fIHHrҫaw>d,yZ Qyc#=rT:ۃA(PsH%KF TgMiqjT\BEk/ YlNLà皌^ϪmA-2ȃeχo-{G8P=MfCm=˕]8TկVu iB;ڧ;?ٺjcNɒaLc bE4lvF57.$4\}Z}!`[im8xXMRM:O&Fj#'N06Š0(((((((((((_>~kq=\vJbvA^?Eb tOk}܎`5E-EMK[ ((((()}NMsufou_&W;wzAҹ :~:+=s] U-ɎETQEQEQEQEQEQEQEQEQEQEQEOҀ3$mGx n *@(]O<_^9yku3'S^;{9(Y,c!ܹFК縣Wcf?ՅvBD;fִ[D2YnOp+9@ou*+Ko5sy*pG5Qjڛ޴~S8QNqGNPƐQEQE ((((((((((%*i{EkǃZC9帴QEY!IJZ=vCO?QKqM<ɗM^@G՟py&%'cҚ8 5r"8!$ZO"Q"nRC/)H>`Rw2kǜ3&ȗy5zG"'"z~՟s^s<QK<gU?*><țyG/oʽ#fG"ҟ՟p6ȗy^wҏEJ>oK<ڏ"_ߕzG"}Yhy/oʏ"_ߕz?"/"V}oK<ȗy^IV}qK<ڏ"_ߕzG"QV}oK<ڏ"_QV}qK<ȗyz?"z~,G՟py6%m^G"V}oK<ȗy^G"V}oK<ڏ"oߕzG"}Yhy7j>/ݠ{@{kXFR!TX ͞]}PEx"1,zꥹ1((((((((((((~SO4u&?'ԕ~O+ *((((()c+~40AaiANV"r~"Z__د_a{c۶AVRkӤ1Sֈ[_أV+RX4,?^Ώp[_أv?W¥}¤i{:=S?kbx^M x?oKc<7wjZUE}>m=y/*[ƏT?ʗp[xGڭ/W¥ hIc<7wjZU}~"y7*KƓ-G9v=kV(UE}_T?/*KƏeKsTzڭ/Qo,?R4{*]Úc־m?b[_د%Ka<o-Ɵ\{oum3q^M x?H~X`Q.}xDo:+aW]C7WϚDŽ-ijDŽvOoZbOd0??ѿƺ*Ɠ2)z/Qo+ɿRi?R54RxG_د&Ia<7 x?o>jY]}~"y/*KƗ%G9v=gV/Qo,?R4.S_jGڭ_د%Ic<7 x7oR;o}{}_T?l=.S_jGڭ/W¤ i?R4{*]Úc־m?b[_د%Kc<7 x?oR;o}~"y/*KƏT?ʗqszأv/W¥iR4S?kbx^K x?o-K>jZU<_ؤ[ ד¤?ak^^ eFI.IbrsW5Y{Rpx3O+; \Z(Š(((([UaJ[QL((((-U-fj]&1%w;R%['Pq뢬/JRL]vzFخ[:f Wݜm#n+<#t:^cew5݇ˋ7߾QEQ@Š(()(h\G$]H7]zW]LmU s%u-c!kU-j(۪>]3ړn<#z})+;^՞ W V_KBAk4TEll5Qntyr)8;V=mX/cՠ8WP 1 T +`E"OISYɨx>5NџJNՏ,=n(M(([UaJ[R`- ((((-snhyu̶N"y#n\>+{l5Ryp$l{p݁{.A#TlG@:QE%Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@4NhL~O*8SRV3AETQEQEQE+]M}&p+]Mg3w1iʥ2ޘo.ݤXb{xyғɏz$hTQ'h;I=h{ʠeZxVy%S/]Yv\E$id/^?/ҭJ \,jɶ}Kjv\%ҭׇ|),@sxn8O ^O5]ϟQS ((((!sA3Z"k: 11M30.GaGEu>/g\ 6p[b:㞕 XDGQx E '<_~T}/?*,}/]=&C"pW;iwrltH"Fܧm;ON-ޞ\pL먣XQEQEQEQEvpIp*KPt?ξsU!Kk81|SI+u'7g zJh,^U>S٠4T( _e~GG`#U4~[H%´Ķx]3ցLZ[wS 7WIYbH2ח|mӴ!sW{g޼:%cNdQE`o`)KaJT4U )S-PEPEPHN-G3mZ,4c.sձam;-fcTfmǹ=I&}C֯5aK \FZ6rp0rkOD+Wp*J ( ( ( ( ( ( ( ( ( ( ( ( iM?t SRTq>g((((ҾjҏҾlS=s7$"1}'=+t'RňƫU-eIMMߞ_qP#5V=3^Z18dŢ*0)QECYgv+?\Lu>917Oof^_m [nzjk* oS-m#=M;sQ3s׃@}_E>/=? &9}w7F?iEL93s??\d=wMJI5YC!B+ @/&tI^,EݙguQIk,((((h_*j?? uᯕ5 \C^_LgS_Ρ{dm#~2ϛΡo΍oMG'=6}sPo@zw4*ԭ4Bi5)%e&rZ ԡ!#e_,b>~t!xpƣ+8},7WSч9> |Aҥ%[d5[.OHH??U`SIonZ)(KR)h7~;̂!X0c#zA2XW Z;d=wбJllp*{Nu__PYǜ?[@3-,X)(J&kBUzbz%0eTJZImz˞BS^Ԭd7y?TSPb%_}X梩>?G@}(Z,`ϯJЌrGz,v}1փB;ӵ=kk.scZCG/JZQE(((Q@lĞ=D_XQy"].q7 *3+䖌ħcwf'ڭ^_ֽU? _V+ pT i)~c E/AavKvJvPA sK5BR;Y68_K/OW??rz_>LrG ZJcMaJT,?WaEPF( ( M@ĴP8#i"bMV2ic42:מjSz嵊m;nUa~t;*J ( ( ( ( ( ( ( ( ( ( ( ( ( iM?t SRTq>g((((=v V/ZV*3]1Q#2渿W h=;W\nq`:\ԐtpTG%y-=nTN"nj"wQ="/ oaZ5Ÿisj[2>am-Qv8mcZմ xutĊI0+5ZKxgwBHmF6hk!LԴKKY7?Z6d^.b][N 1,'Vi'ʹO$JvB-eh6uw8?Ú+昑IYa*#w5~$kh;nX%o]W?_'m]W,QA<$QE ( B\7>Դu 2Dv5)Dl M3tbZ~Uьң;vV?O_,i&dhu qujk ZʌRb==s\#uxZ[h7t.B3kiz w\_P+2]LI5[ɹ24u[ CgAB=+TjԧH5F. ZIs5]BPӂŦ(y״DuzVXoFQ޻+ 4;[ \/t[RҵlZ6!sЌjgk zWA qTW%KGҝM_>QQE(())hCHJ$oݐnr\%w)YUGYh]jG\cױW&5 w(ӢCu?Z<ֿ?noy^՝M"ˊ|S[Ěe{ -mUv$\?X[}hs*$>$Z ֞aEFv\n6q^Hԕхr& [Tu'7:5 x.芐w˺ܯ,=?_kZɹ&ukgG~gVW2ڙ[=ˏ:ϚV)$yNjGd۬pJaY?\iKV؊ͮn9|8]G>go#_L/+ 1ۆZ(ŹKaJT,8J[QL(('m}El%KjYe LZIXzHfӭŖ׊7 5Q]Y-Fl^SjwfDۏkQE&4QE!Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@4NhL~O*8SRV3AETQEQEQE+O֬:Q^5r7tx5ûo-+TƉNr"kxYP6h0>cc%)Y;zRGI-cnokvzT,jу` ?bQ; 捎黅,.r ׵;.0+? \]K% Jo6O5… GaU4Q > /ֹ ҏĆ=a~SJZQQE((())i(#"x>-r"?yj1nEIYSPs=ks.?,H#Wۃt=*KgY{5},woH_[:)ڍɯ+ǿx0KH+̎Y5*P[%_0E0 ( i00)>X!k~lD݊ڲMH4uȸ % x徝t5@I 5i{FtGf gէ' waETQEQEQEQEQEQEQEQEQEQEQEQEQEQEM:~飨1>}MIXqQPEPEPEPER:@ ʱ|O&< 3շNQ>tռev e><9b*Bm2rK'P`1V|iXxwQ?j[ԟʾߊiЬ݃ST2)2z=K [ZHN$UާwQ?z HDp z? _ʶEAʖi9+Gu*ÚlO>G J4K5 V14-d;kףM-\FREY}jҪ *(((J3Zǰ~r+ѾSq^621یv2(u!a&l#ڹjaԉj *j?foy/йO!L5%u4t&@:CmmmBH AdurXX;_ޱ *v쫏k?,瘠6W?"ky,z<w^ 2clm@PA+1ݽ(4WWKQEQEQEQEQEQ)8ޘ_q;kMpq]#;AϰHppA2;iFQӎW[y jM"V%Z:I P吤>? xU; FHⴎ9I,:Z6n]HG+؎7": t/tTΙ~`؜f!WS( hTS^,|<@^)ɢZ2:dV/+1teʟ<פ68%ڀ>GvuB*(L`unY5*P[%_Š(Mw)Y#l 0({Y)w QUt ;d}_R%.DAWd&iLe+N$s!O=տuXQEQ@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@4NhL~O*8SRV3AETQEQEQEQ {c޸ mj"llsU~2k:G{G-܉n$CDŽ6d6PID db2NjMCMn#XmEʅ$0q^[m;~ b.rq]{O⇈4KdU0$K349ڸ+7]Ȳ[i F9rw|dx+,sEEiBfdN'cNdzQ]->WwHFT^%YOmiZnr:zQVi.--|ۦqc$T%?ac.g_D ?^Ock~7?7ر'~98j/ߤw/go%𶬺Y5]վ" w/[sԚ:o Oɱ@oa}],v02㖐bD4sx= $}s,6RH`XFS(:s\LibMC%B/ޮ Dft_mCfTy_ʤWH|(rGuݜsל|!b_e/f`%tt|-[F38iAfd V6M+(^.6W̾06rۆx !媎BksxO5HH;8axe. IGznx_{7;d CX4ksIE-@wk}jPWR3ETp"(%1eq>ۤ^1 0x|bH7ri#|M"}Y"Q0c20:uP@*R>/AhŠ((((((((((((((((~SO4u&?'ԕ~O+ *(((((/m w%|y \~^t2v!ۦ#wvT_֗{Nywֵ+QHݒr_0gGq{o&yiڬ KEU<A^*)>xź止ln-8 jԣ^>sz&+L~T,59')bz{]&់uXf-ݭ{"x@ _zMsIl̖qb1 z8y F3xQ{]0ĻX& klֻu柦k ZlJʲR0_7ud9ɮw1%c>+ӷ躕8Yy5ᆹ-,&}>Q[U?wk BX9?$2 JP8]블QcGi{9v E4{9ve(4~g.̻~9pzQ1ƎIve!x*ģO4+giRy?oMR{qoouci$vo;vA#>O\>Yve/S5~ϻ[~5iDŽu böw:f3JS ÓֽSȠz9e9sμx;: kfK']7R{f~i-{YmG+v0rhO |˹^(7xQӭ-b2ew9g|<ѼQ<3agq}.pAV7Q돳6I>?G ]I?e佦!&ZZlcu/L|/}Gßˡwv]6  `',˹:7>%i>%ntM;RԮK;*ڴLmEg{(rƴ{(LfIv2r=i)pI?r]Š?Ghr˸QGh ]w ( ˰s.E4{9ve) /h{\˹OTҬ6k N.-tqk1%mƦw%19ҽn=h?CG,2b=.&r%p}@o&1Ҳuz>+Z]̸&1u4me9sҾC-tf D~,zuO Kխu -9[ۏ1 >.̻>!.$U ܣz2.Z GʹcF=Ojv1,[Ҝd&r\scIhzKh_sǽz|=am,R̭!$ָGweۜHɥNq4u Z iشְݲzW]h:ۤ "(~zQE JZJkp&[%]w +:@KI{}ogn7[7I+Qɮ.&b5,9¨I5뚂4/qcci4Ss zl$/# y9,f.Hʀzrxa8P0¯'"!!8*t((((((((((((((((((iM@ɏ5%GߓjJ{( ( ( ( ( JZ(((=)hGw-NYA.˕$o75%@4ZvV&V1*5*?o_zebO?mԿ:?jCX)?(w<ߨ;-?G}ebMoԨw7U>ZQ寥Y}خ;*?w zϖyiO/{oG|]C~G#.RY-?(w<ߨ9*-?(=y?#0R?xKXOʏWs G#0ROj?(vF(>b9u*\狿n ?w zϖyi\Y}y7#.RߩWkGݧ~w<ߩQoԫ|O>]&w7T;*-?(Wsɿw JgOZu*<}b].73M,_0U-{(J> w9_};}؞sWWފ+ IݛF<()hIC$LyI8Zu'wIwir@屎U۲iP8^A #d={~]j/^Ѧ1 |'B7W7rI_OM%h⸎h((AEP0(((Z()h ( ('֐-@8ZU"渕!ZI*$zv77nNe], 6EΩ\xmI|$8<  [K[.;n"+b y5iv=YR !vO=M[UT@UF u]Il?IWꅇ*j (Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@!KH~PD~O*8SRV3AETQEQEQEQEQEQEQEQEQEQEQEQEQE'րw[ yX*ɟđ=.I +CxVO^JH_L UW.9Ƕjy;-ݔ}M4\K!圶}i|u>\lom׬gS^(0uKҗ!jnWk h-/h_c=ֱ?Q9 oO[{Ef10]?*Xc ǵ2(g]+9L{Fhbuwl%_θR#kh_)y/J[{jE>?=r/}[WiX{Sr88KX!Vd(ƵE0e5w"E%-0 ( ( ( ( ( (T/?N@gQ3tA d՛m4#GiZYc: }lJ-X{}'oN;sTjzKF[{UC2f[{Roqڛi.1['`bvZz~g,/,\N|۩U(n{QBd뤫Bt~C (Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@!KM?tL~O*8SRV2AETQEh)qF@Mh8A =hԕZ|A𞣩 >\;BJ?Q@-4U,xV;ָŕV{I{GZo 9 Ne05K+ͧ󻩨XOѫ.ruH ɤ k.&X}Tdc9= >t#Gv5m50U8DEEa}?"勯Zs!V`-2qOݦ~sGjzmYC|=/YvCԝM*+ bcQnOh: 9ڱVU=Jibt\CnEQ? ynTyS;䓎/:P>ִf2:WYQE (RPM,1%,Wi)v 'QS[3[t^|ƒ,r*ڶ ٶ6qso&p'*}pj^"6VrLAwsŠ-ڠǝ݂-We8_njְ̗Wo~,^ 99O}Xv[j xv $SKkwlcLcZץɽƒAERQEQEl~ieQE(((((((((((((((((();ZCL}48Oc (}S>ӷp0Vh<[#FAn:SZ-.-CFXVIȘ 9OZ4c~k\h5Wv^0L 6q:'/k^KxdJ}Xz$^ 6'޴G4(_la|]~lEվ~=T m1`iX[˩/JƳt6h#Uo+ac88c]oO _x06&k _O>.l0H B{kǟyk5VΈlX TAQvK%} D~}MKc?ʢ?xMhGC2kFt֠lto|USQzʶTJLﴙ>c2ޭqGh,VMɞ2tE&{Z ޡ|fB\̺}սx kes:ĶѶjֻi6 "(dtsG*b0O%*.U-eJ& }Rˉ.%sNwR3W6T;sG"ك.},|{3MI}KO\۟/<lx'˓I2 ?Jl:֑eZBv\9 }yʇsOе QY`irώ>5m^OUz8f7¤ץ:Qf 0Zz`<Β z_&a^fǎ+е :Ԋ+Rfj$Uv8c]H]4}-RnWs0&@)`*m\E q.G zz ub̹%XBuϵR{ 5ǥ0ǥe\jXKѮ9!+F@뚒^[\i/N#2ȇ`.eõg:e3\Ζ)#}`~ _Ԯ.AnH tٌᲵiY4H@,{G2q#-7 M*H[dMKV[x xsmQG7`T-.AGAXun*oV=(aEPEPEPGj) ~-N[-hV~>GsԵh C (Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@'jZ(yZ)Z2#Y4pzYu)+ EgU7(8 ϥ8tw`F8EOu|g!Lڻ}hE UXU4`vUEW`IFzq(x&LpU8H߿.hE0ǩ)4جo&BCk 2/STL\8y8#ҪGN=:Sl,6ƙt., u{\$Mmg8Ʊ=j\,];ecXgBN }E3Rׯ[Tѣ&k7gs;XvA(jrKm;AԼWFs^`,HIsZ+8v3F{K 7ZUw4s[JYc9Vci!&VvaV፬ݥq1-y 1ڶB03t9f#7pTZs:}+$1ȍºDka7N7"HB6^+#\٥ZYG5'1r=9(+h3k|q^e^t~"fuRⒽh(((((((((P`#Uf'uVaOUdf5n%E袷QEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQER0ҖoZ~ ;Gl=;Z¿)`JrWqtV}*|A{;r|n6(((((((((LREq~0k'$ޕMC56]&W"n<.O`yw5S</N lik:d,:2h#ēW~$tMVOFwIetN ^A2U@-UuP&׹t );261R |iy}fwӚ_/bw.wY}Z<h줃).WiAJ'70+7SOcͬidr2/XkbH7 @izޣ`$LVh95)㏬dR?@.\jl8~"/7lѷ@Ȭ)CZ$hE-fNhlLQ@Wg1凩T+ fv!Rg#Os!ޥ"K`n2c[L5ڙ]KNcj-FĊ=عc_ibrˍ /u;URsc]]֝8܉T%\mˉ(uHŒSOQұr+j(R QڷGR%+QZ( ( ( ( ( ( (KGҀ )qQMq3g]4-ս*/:TʴS9ֶ:;(浌@]:,cW@-XŠ((((((((((((((((((((((P}ˆ=cȺ<⺊B(]kauF*Z 0SZOV5ޟk|O*l\zLhz\?V4P!~u~i?SO}꟝hz\ah,?MTD??:?SGah@thzG!5X>?SG"D??:>OιH?9/!~u}~Ȁ~OΏC=Sks4}~DK~t}꟝s_es4r :_C=ST,?Me꟝hz\!iE4r :?C=We$4dymn^?ιϲҋ8W,}G?o[?M🩣l4/ҢmG~Yd-94P[o!*3b>o<SJ,4r %WA?ҏEt!(J64be|5eRE^?*li>o<SG" J='JNU *agaϮEcHAs4r 7VKLc({P8x:ŜY[?ȂD&Ɵ;0\حi Y*-ST$4}~P:O=ST$M/ai :OC=ST,?M'ai :_C=ST,?Me꟝hz\ah,?MTD??:?SGah@thzG!7X_ȀOΏC=Sos4dʀ~OΏ?U}~[+r1M ( A*V}rxF27j?_Zٳ+ilc_V; o2sxڠ}8`) BjEPQ@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/_static/images/xenserver_architecture.png0000664000175000017500000024641200000000000025024 0ustar00zuulzuul00000000000000PNG  IHDR}t/RbKGD pHYs B(xtIME (PN IDATxwXǿG]4)TEPA&[4QS4ݨXXH@c;Pc<=ξ;;;;`0+X?7A $/@.fSR%%Ś._' =sg,XĉfL*e{<駅ZѥpB%&xd,^աC-뢣>d0 Ӝ7M0= ĉS.=}]yyE2SΣ׺ gg;#Gp`*a0 AAZ;`,nj,S.M8s|V `|x|;]xvϼizVeccʕ&<{D `0׾Ї3O``|L00Zurvf3 `6))iZ6:T`sw[ˇ률 ϔ`0 hii(&% 䊋K/20>P @ ˙& `0^[$W2m0>tX=`ee&99u `0 3n\YL f37A+==6mt`0WD}}n=&̨g0}&[c'`Qq"<}Δ'==eeM~ii22^ kz>Enn>G~w~dg硠YYY9?|gSTTԌZ\\ikrmvWY%hիpW"k0jԧǡCص6wA``+{j6@eVVVORGѣpr; ,Z#{^+cȐOa_ysOT"/Ì߼s33|L^||2Y[Ϸ@ ߙxhyӦ?ѧV&>qkֲ1Q`0^'_$s~~!:ի7KϜBBBsJL|~&݅wesݽ>yz`ΜIosN; ЯW:zj&F~m[=D#w~͛wa /- M@Qޯ]㫯VP״q";&RS/, [[wCDH90}ZC̙3Qd>SXR^^av/X˗%'):#=| P" `aњ;>1Q}}hkkr. PRR=F q](++յ&NLp唗"̌%۷ /@ykIIXZAuu \*ZDbNN>bcAIInn-_tOO`oo-Q\\(,,LMPPP|Wpz=[⑛//n^')IG#aee.+H]~8w&ƭ33c()) ٰo))i6,JJȋ"rr5hFh}}=ӳ 99>ڴ1q~"RS3ЮD]o*+ ##}8:}-cVXsW `0 BE~QQʕDII:gmmA/NNHڷw ѱ-x$"b !D@RS/kk 89%vd$2OH*+Jo ssL"222M3󉳳IJ'c "66Đ""9~|;!D@**Ȁ݉Ȉ&'&j˅;EERTtYݜ916nE䉏O{E֯_Fd2n\@-#~~$33;ѱ-CZ6$VVDFFxyD.iD[[_>]$q^bl܊hj++s O6mV"Ӊ,16nE2ͽEϟNU6qv#v$- !D@vXMTTq+bhOtuId䟜̌hlG|>2'm۶!aaHHH@?Kڴ1#s-]!D@Nkk OM'˗biOƍ #&&Ƃ$//cbbH̙DuOBdܸ0ҥK EEwٿ=!D@VG44H˅ټyiƌ;:L2x/ӢE3u#FFDAAlذ\";}nDGGK˖!r@xx8e"DBD "nnDVVXYGf- K~F|>&J23c㬭-:/ɇ;ohhW2cXnޚ̘14>>Iuu~D&LJcVRm/K6PǫW굒"16nETUUq+rҾfu`K[[KlGFݽ{ObbbHq+|;6'&ڵ%vDFF ]l۸q9T'vǧ=P#DNOڴ1#ZZ̘ܺuߣG'2g$n;)Իḵܱc5QWW%ǧ=QWW%g";wH t !D@.]GLL ׾ҷomۆhii1DOOŶYå=O\{wOPP# gg;ȉ;89k|Eddd1QVV" $r&.Q" u" {Bɧ' ֒\"[$˅XZpLNNHHٮ]i;_ww'._VuIx<1cqmSI=,Y2(smԩ#ȋ.^'`0^ vQv̙3kgCR4IhhWRPG799>9p`#!D@]KYN9wn7!D@/Y33c.)a9|x @v?:cȨ!o*n??w))u?Cڴ1茛}}r65o8JpΝ׏'adRҪI"ASZzAc~ٲ9DCC<;gܥ^'&kj8CM_z[ !~7cXҶmΉ~XY/*a[wOq&KAB dgߔ0ȶmsi{'ƕH7M_C\wHrwl]] ͽլRR.q駅DII{^33c2e3,r߿kkDsNMӅ8:% H}#o߯ٻ ǧqpؖ64THLaB$$#222duF[[̚5ǓdҤ@F=!2}( |`'!ɹIBCssf ;dWRPGlm-' ޏxz:szOIDZ#Vo\UUP_\Q@3ԩr.α5rl=gF}}#ӞB p턜<ۖ%˗NJKsȁ3$9h`K͛;$rr|ǯW&ZZ-ݣ̽{K{ABDQQ3jݺŤ6$ݺ'5jvE {Nsi*ȑc "\*o1G ի7cU\(dϞ #1th_\}`gg 􂑑ÇOsN<`~7o%N++slݺ߿'qxbco{{+N222߈[…3)¼y+_^=zׯ?—OŋGӾcѵkBCb΃ᇯ<t l66s'ݻw#<}$<=LQr&_?MSh8z4]c_TO,rmѣ(--ǼyS:=th_a w~//t싋_WܻʼwOK>|ʊ4i(deec 5kܺ!+p dddZZ{ qL6{t??ws'3 ߿ƍw79 yPQQ  xli- //2kkk' <ľ$2wcZ䇣G#Sq-޽^,̌pZZ:v8>0u7{q& %;8BUURZ9J99>RR^بvmq:JII!#G~#%!rR5FFF7ĂOc222HNNE\\Tۥ ,)^感ssmhpg?oZ^^f@RmhKF}JJ3=H˖Ó'OvvYIb]-ƭ"5j6aggMa[r PZZ&nkn󥤤]]gѺ§UY#c?/#c̱Xv;6l)eG9הQp8~-Ъy2NmmM<|(~b# h"c)~yVތؽ{a?qP?|IO6PUU͍4ևt9xD_H<TǓ'ךf==mܿwwG\wkA*KÇ{ɩR^e ]/BB$n~=@p. 5<ǙXt-_7u⥟G wMVp\gɒPCrE1CStDA׸kh!33[ȑx8u˖CjjV+3f|/rԾ}A>:HJssPgqm"MOUU3E#ɢz{R;``s =z/nhkk"99L騩REPddUs $$qqؿ8 ?@KK]o߯y 7okvXOO7n}a="33Ett4ڰR;Q-x#-ݗ@{'w$H9QWW9D3d5}R¯.@Zmjj*HL|$PLLH=/tޑTT$x5gc0"ޡ./+CYY _}KJ|ښزe1N)ddd%**J曟8>8]ٳz | G#aff,ws…=HIICh{2 /]k^(--k6gDI|رϵ & C5zȑf}nee&ѱs'cO˥ %%E=? v¤IätHV 'zfPWW'O9ri{}Ͻ.TT⚽\yf͋PVVE.p&055¾}Ǜ[qq)?lyD[6̙c1bD?DGjR^ee$to1׋ť矦@/|99%ɉ~wW`7܉oT8v,V]]ӧ/=ҘAjjƋ zƇ )--%/ `ذaFѱ/n1uhյ nvŰaE` }-@?'1R?mHl۶660iP\- _ܣ8wn7u^-ؿ8oASSZ>>1qP 0=ztB׮Wv;v0Ν={ҲƏѣ?Ǐoյϟc`׮ @aa1N8U _G#ab⍑#O_.]:`i:4?:FNo__7ddd᯿NƍPQQҥOqHODξؼy34LL Ѷm? ^^ķ~ID\Æ ;#(0a>|o?ٳ^X'Ǐ?nz,4?{:CRR*ƏoRʕ75jttpY= m۶3¦aȐ8v,+^G`7LS ƍ"(h(ڶ{AKKo߇"֯_B5j¦CCC ر/󲲲%8p bccĈ~@[qc׮-Po߮@͛wc,2 莈7. 11qHHH;ƏG~гggt$ GܹSpI`ذPXܹ+Xz,-Mz| 0M>fע/=zF``t͛w朕KƬYKP[[ -i2ޥK|r 0[a޽#wΝ`p}al qq|m0g$;vvvA߿\\c9/nݒ6(54$]]wW0y0\]{`rk1jt` =z \\W0a9 Xb.:tܹ!1PUUAnn~ yWL=>>1~|8u`$'rUQk; <|:Wo۔ @VVRRpBԉ`,[7n܁q+,Z46 t6=зoWlݺ|>y^zٳ'g:wO{D}=AMM-]>4CNN2]z=z ,-MQ\\SS#nѯ@o"77CK u`̘Av-Xv1%  6U?tteΝ}tMMu|$$$#)).X+nQ*AE;ښ3fx<SQ\\޽9nǍwС'V^ytM[PSSEN-kX< :u򆺺ZCťф= t1f "99ٹĄ \xHN:v z!..<|3 n0% *[[K EE%Bغ;NW< ["=ztӧy(**ySХ?,-͸z`MUxph+xÇGq<ǃ-ѻѻ'0bD ++ppA55L4**J(/-uб7:twCP䠭^())Ǚ5j ƏԺ '';4=zK{AQ*z~< b . 5x< ťx`?,Z4**rxӑ^X[#::FDž01iw_P/RX3>ߧ`ff W>N}{9* ƻYф4USXX΃<=/|\}0̨g02::Z̙fɪL `0><ƇlԳO1o7M`0 6)g4aF=SVVJd`0 xy\Y6̨g0i`0  C&̨g0o ^߾rf=`0 7lc b GaG-`Kh_\nq^r$$ƍ;ׁ -aC׮F^^ݝ`cc ƭ[a l99w,ugMW10> ;ر'n|L7iiO6 55Xr""01>(!Xf ,X99>jkkݻȈ5iӧ/aذ!{;S[ƍ;$,l 99'L# f3ws,0>.A X{HEE%lm;!++55TUUr `*))YY9mLQq梲h*L0) sl#:tz36R`brƶn۶_NNNHCȠ))i55";;`l jj* Z1X@\|ٛ"44_KMM-S[ahg2q9OCii17$L7c` iW `^`#S=%M21ͨ |=Y NW,7X:ڵk|S.L{5X`0bv_T7S11S(kn`3a0oͨ EX.x#BŠyr,YZt> ƇD84@IlaO2U}f@$d!,jSx[ ~H 5O^n `g/Xz[aGhc:H Z}}pIAGXN7t5sk53q(ay!NLg.3ok8`)fa|`f @7_, Utd:a|,,-bJa0o7~?BhJD'#ۺ|ZapJ .4}{y-(7t* t@G| lH/g!t4yP >uoBa3(>C/`0 x῁s46c|D#hXDwe!ACŌY84Z^J(tΊׁ :| !<ε@ @x!tNtFs@ tdj`0 `0̨^~ᗚ' GKe+ [7_ʄx`!b{f9 @*ܯz$DW?G\5߃ft11a`0 `0 o"~ ͗Z'Z)40/F5_tt)e!::/Zoh7to/(~:240_(Ū<`0 `0m +#{E>w"@ߋޒfdBHz߄9N7e(? PɰBlYP'}P*#;Σa`0 `0)?' AW +4SWݏT KvS@?!:(z]lЩ^HED<9W}4ҡ;3 `0 =*.>LЕAy֠7>Lm :W^m`0 `0].x/q|<g0 `0 = x()~rdT`oH m4J荍KäM宬DXH{!22ߛ{+1V f355sg`NO%{70ptbEQC@Epqnj} * (+O9CYZө!&ܗL-+Ν"**giDBS㊋ ,z]7nVē'yy@|<5.8[g}9\$,jy4A"rrIIIyjju?~8m[hdŵk3=ջ/0u*=74.`gGLLr\Hѣ=2YYԿQYIX8m52p&/ȗx3q .D>3vՔ|!BSKˑc/%~`F=xKV_۷KHשqw5̷m {0ߡC o#.ёcOO`̆[Rի_`.slHw_F))'&ȕ+!:`t߹C MFu1r$q0}:1~ Sg(ظ{0oխxDc#y,ǔjwEAA! 紴iTFm-cgאv,oO'LۉT?Je?/Y=w:Oٻ>G*#SiuN]?܀`$^.]U0>VO//ZEAꔒM5FM˖}KԿԭC}|sFE?=C#VjEňE /_N󉈉>5Z],ر1^ ׭kA>}ʚ׍!]LqRʪ)ii&+ɒU!bW{c =OX6JV][o}0{lnd9֟_/UƜ{Rw 2 Y4کҩS7 ±jJ1M-7.ØuDi~ 3 Ƈ"aoN (ucz2L IDAT>pP㻱@d$ё߸66Vih` }UѺr%`A~Æ8l?5qqUKd`irٿBaèݹ3(Zž:!44im ((H{&dGUZtl:ϏFĉY::&L:f~22A jj'f $}o)Cz(*q?>{78> p =磶eUe-CwxYzaᡅtqn 33NR7BI^ =)5`&Z&qe=G} qzWNoQqWTQ.8}4&N1neǖqM.Ɗ+ݡ;*{iU't6>228|=ZtN__:ύ`rV@ythO΄iJ[Pc|jaPUUԤGK4i(gC:*7̉Y"%?[/Ì۰A`NJV^r)(Pcںinnt&L K齈lMMQRF` @\II۸QZG…40Ҕ,O>iHSQC:y֋LIߌ t[멪N-[@Oϕoɷ4g@X(߹vavNp0#GhLkϩFwi"#3C[y.c! Xpw~?:|}9 8va]ΜeaM$>+::HJs렦K]6 >X~=uc:<˩swE] {#{}5\>ssXsz ~ AvA@shhg"1o ~ P2j `-E’+cbʼne@//u;.@>wu*%zQE ܕ+`Юao/92x߸0THoki!Ϙ)=3M\'#TTB^ҢE itӆ~$'^׮jACcjj ~ I_Sn.5[gΤݸQǎI::t&Ffl == RPP@]]L,)#$ӭ[<#yJIƫe鱥P+@CIa㈍06Eaysdۉ_/?+?L\bץ/gп>m|ӱ'=ؗBcZEAoI Aďa?"q,' +kK7}GQoIjzW@C H` X.r/6T!+**bA ( U0 ZB !!{~f7H44V-=$?e>1Dr}?{z8q߬;rBC}9z=;=@ }{-y˗fy{O"" )I_(1^ǖna_Ӭj3.ݠr劗;kyc)Ǩ\>ǬgesS"H޿Mr[Μ] sdӦvo?|UkFNPDy?<ekdߋDⷬѣo\C|7lxnž׮f:?_Cv?۵K/m|[؃V!m@^x+; z:wTr]U5gl=喬i2kvo_J\]pj:u;;`GD=fwÆvdYO_G۸n0q]?hPUFvF,Աk}c2;ddr#<{G;6w;tȾg{_WXXtǎv%3N{UKE7xa:4c?ؽLrS;y=_۶M5T_ B[e]+lYu_e!7>W۩UdTQk?Uj--oᱯ^;TT]=,ߘ4iL߮N;O%CXpwOr\S`}ʊ)]wFI?}{?uTS/"Gg~vwiUsMů.c:bՀ~;ڣemG`_;vGNJg^R~Ŏ}6c;z챬7"|2c1n;-_y%cθm[թ950 C uG`(+$N07֭,22cծ ѣqv}͌'0{nTh.VuMv:nޜ14%~ܯ1ƕW%vߞy𺜴icr݂ 1s4f_ȑpImk[[-Mh0r5`n2žA0e{r0i}_];=bb iզ}%τ?yf3fYӕh"RRxW,jsW5*n/V}}3,۾{7zU-of㘻~.ii_]jT)U/W,Z+ExֵZNm[DMSjsGNOȒ?0w 9=v&9ϗT) "1f7vmcG"7Y)SƘ@cJ6&8ؘ3ׯō טxƔ*eLd1+3y}dL*X^ĉi>hLzI?-=ݘ&M}m[i֞^j׶{?4lh/cb)^ܘ4}J1aat4cF/4C3Ƙ%>sXDa†-w7a3sM0y)@q2$Ĕx Ӻ= 07O4 ŒbwѓG{MaUy`1>dy0b Nv2CCԓ~xǦ%LP L籝ĽO]>T|s0͔SAEւx 诔ޠ=vD咕y9I=Jܡ8‚èY1۸;G T\ԋ3''M[LDDDD¯zuODDDD@y""""""" EDDDDDDBR{7@'{aݺgmeJ).MJ .UQQVH?MV-^u{Tʈ?|$N("w t3#=)UY\4Bi&4i%\d^DDDDDDRP/""""""^DDDDDDDԋz"""""""^DDDD$?2Ƥ7A>'ΰ>\AJ"9pd~xRD.ԋ("""""""^DDDDDDDA(KA@D P*RRH~GډbՠCeX F 4"R>bߡtG%(tt r›@nDRZm@9^%Kk|b@{9j$ ꥰ20 P9O4gZtCRR)>!=S3"rLwQRɥILTJDc!!La\ ;CgB}p->m[|&3eSx,7&s6*)F2%owOZR4Jc ܭKBj*!uJH"ާ,[eR~zt0SޝUx6ylc适I剈䑟3/PJI!심S*H>"LL^DD,`W۰::J%sqTI>T2ȥt^-TJI!36N I&3?Tύ>+)RS͢@=Mw4h-"d>gY~v{_ϲ3>7e㙖ύ@y8%/~3`|)UV9њlN(ۮL=0V#Z ui}V4<%3^D$z@O)LTJ h S#"_0>=gP$K|EEDҵpzz-vfzhl ׵T.=YD yה.^DTNnʘT/)z^DDDD$w98tPQ\Ǖ"^DDDDr\9?i&zNX5 =&'kQZRh`s . (NI A80' ҃'nJBJLcG<%@} B7v SX_.Áݓg .tϿ jſsA:v֋)?eg:". "g]{N:9 񕼣B2pk>H@)7bA{Z4]|O. :EhO~.e[o:}XExi tk"GZܤ,!""""" `KA@Y58(|RR) ܁TDDDDDDAKS W9) LIDDDDDP@yǭFDDp 70|v#ҥ9tй~0"S>}d˖-,_ٕ(QdҔR TP]0}t5N:tZj۷y;(2eʐɓ'c *NIP(c?b#U^xo'44Ԍ?:tcÇomBCCe=2.m+7iČ7Ό7DFF۴i1Ƙ'u=vٱc1ƘSN Zj~Q)Z/V^6l7n4h;zƍqO<駟LBBILL4~Z٦@yyTTL[߼c… ϸlڴɸnocÆ kͫ>=pYʕ+=W_cYhYsWכtsiᅴ_kBs={0UYFA}^] MObӧiii|ڵ+}hٲ%ݻwѣL:{?@ٲe֭{<[у-[/H+;e'kQ̛7M6VVtŻ쭷ʑ#Gضm bΜ9,[ z)ΝK߾}R %KdĈto3.;Gn{ƍc֬YzȓO?e[l1ժU3Hf111~5|M6,|US/g'5ݺuv5vX;#|vZ-[?oÇ݆ z:tjr3g4u1g(!{}Y?zT^8ZhA׮] /dС4lؐxM^z%&L@2e6~I͚5/VS^/ʟnݚ2es7mDǎؿ?QQQӇs2w\^}U.\weŗM7ĉ'_>G&:::r7'Nн{w"Bpv1oLLo߾} ֫W~h\.99svz>:%*Z ^!8/Nn oپ}7~w1PR%op vǥKf)2/8X~=}Xb+Vȍ  00ʕ+sN*UD"Eعs繤1je+ bŊ=J˖-RF 6nxNyb7sT]y@*We:#<~'N\.5VXXSNO>;v>}xvQ8u座e˖ԪU+_d5-*o߾<;?y?AAA\.srjռ=ޱc*UZ˕ig:n bƘh p*&K~Ts=G͚59}4AAA?%Kpv[oseKMM{H/;͛7?E>|cǎ ~/ٹs'uLk^Q3C@J>ؗi;v[heY~O:|cͤ(><.N\.ZBM\\5kdΜ9%灄LAq65/j8Lnzz|~~2lٲ̚5-[Ozpdq/ n*]}*p0}z{7dɒߜپ׺obb"qqqT^ݯjڵR3yo3;vW_vU2w}7?~@֭[S~}nj3X|ba„ <Yq}] oҥKQߡHnL2{>+EbY[nVPdV\鷎&Mp嗓ȪUػwo^WɷAl1}N4iBTTdÆ lڴ)r޾ , 22reժU8qڵkg8ݱrDjYd(/Auy⠠ bbbY&. ˲X|9III~ӴiSý9 6lٲ$&&z VR {;nj֬ŋg_lWa&}```/\&664goi~_f…aݻe˖[f6oc _Inj- /7jI="{S0^AKAKBLAS_<UO& d/@xX~qx{o?cN_N[$e QP/A3p?v-K-(KHa$(>4¼|,Rb3r'PIP`aЕ UP$ MI+hѢ,רƘ[sl_f4izfa_M6>z⢠qpB L:E 8 ح$( !@-(;:vCǎ<=NwcיMn^63/gҍ1n'"wYsw{}vn;===cv[n}\Y\_<ȥ`@.O+RD-R 2eʸ˔)GmgO>VGOzQP/""""""" EDDDDDDԋzQP/""""""^DDDDDDDԋ$8B)%\ ŀ8%0y$}|MU6΢@k}-]W.Ձ@` ' FSkqNpP8 ,ʃ.3uד9g(G( ?)""9غukИ1c"10<==5j8ӦMfϞ]~wl*W\Jv˥:utߩSTZuE|衇0Ƙ-Z /,С3Ѽyٓ'OY~ĉ_G=Զm&MZ>?v.<^>T`:<8 fҁf@?'h)8fQi `zgvԏn1$}Wb0*MP p-,#S@S""#G~г-/'}]^{N-Z)6.˽k׮+ٳgVʕ=z뭫{@ӦMKNNHHHh㙿x⊀7/ Tq^J8kjNаw՝ % ؊]Op_v` Pʹ6{)Np |}%RUq~ao:8~E'Nu~[[Y[/2}C-¹9!&{2vZN̫ w[r/`SR|S&l9C9SV4?x^g^y_<nMTVDDrnݺ|!c0F*eYVPJƌ6Ƹkժuȑ#ׯOLLt5lD͚5j7lnʕ+RlSK.LJJ l߾%Jx[^Ν;CSRR\ժU;s0,,{۵kWݻCOj-["~rM6MhٲA?h||| g֯__ԩS-[DfͲ(ٳcǶMOO//|7lGŋyw#99փ>xor˖-E,#]men gz8k11>sz/?]CZu$MQڙ6gmzL6:~U|(=;ey79qާO8Aqׇ3}I{G:Ar;ȸQW{Fl;e}o D|5%eGdSf8e~[uj|gzgu>~uuM49\zƎ,+ղ˲NXu̲$˲[uвe,keY,ˊ])ԭ[7Ųem,*RHSN 6mZlYOePhTteWtttg2e_x ˲Z5%***)9D_}e,k_0!!!r|ͳ,zs;שSgeY-˺˲;,˺Ͳ[vysxxATXqeY-˺_lLL˲:ZgZDD&.]g˲.o֬hm,Ӽy{,˪fYVe˲*ZU޲e,+ҲeEXfYV˲"""j [ qMZ Ǯ- fl,v٬g$9vm|#zgx' \Ů ̍灩d?]EoU߾RyS49s|rE[o g`Kح`I>۹1>}y)Of97|}~9q y_}v>#OMgV }sqЖontq|pLfo9KvϔuED|4iRzBjjj޺lٲ~Dz>hU1:v\3f' ط5Sk,s~ :.e.{(pu|;즳5PFYX >?sz8|w{OOdNyǢӱt۝d4G`q9F-t7sʤzw:`NODDD)ShԨСnݺ={vSN3*U:t҅|5jرccǎ{]v IIIE7mT,GW_}5; %_~<ˍ5jάY>}]EƘ7|mzzk˖-~m۶mb`- tZ^y1'O!&&frTD`;hR욯٬#ܕ4O"Nqn~Og8v#f{Tym) K!% r}l?vm}m~.5@;[Oyb6ۭc^<=8@g><=' P46+2uoHN>^u->\3/'o:7 :D9/v~.]T'==ݵqưMz7o~r'눈T`'pիWm۶e{gϞ &~޴NLL ;ߥO>SO=u֮]_qɲ111?Gydwqcǚ4h@Rk̹ssq5ݙ/ӲY%Q洮sCg6ڲ><7(NcVa6]K{ʹ\fձ(,'87:6W'a7o4_}6\fJ͋d?c-\xxs/ ͝yy C|.ȷ91;<Ϝ_7&G{(t>=/ώe3g_:8=.G9cvwwwoi6ϲ;b?o!ؙwzqnL=`lsv+.fdY>|*ˊ֪U[n /_|ZHOZxٲerӧOxf͚>yd9s}k۶ ZO8ʕ+K3$$$4^x1pѓcbb[J4 UPjezGn&8,{T/GuuWyN>y v]La"YvSΏ41Y-ZL[FFM>v4zϢN=8ǽyqb΍7vvmqn<ߊ>ʹzҮFuSVd.v+S5vfr9ss]n]_:pzAAA. *7|Lv4Hs**`*v`77Ůᮀ]ciyvұxs>{vO9pbd/`7_3ms`O\ AC)bd?ع˝>;r'@f6efSu~䓪˗/NKK ꪫ=vРA_Ϝ9s19obŊ7jhv7oѰa#UV}vƍoWx8V@I"?&EDD" LxƞƮ KwNżZ>uԩSW+x"""""""S@D ﰛßVRz7H""""""" EDDDDDDDA(QP/""""""" EDDDDDDDAzQP/""""""" EDDDDDDDAzQP/""""""" EDDDDDDԋz, IDATQP/""""""" EDDDDDDԋzQP/""""""^DDDDDDDԋzQP/""""""^DDDDDDDԋz"""""""^DDDDDDDԋ("""""""^DDDDDDDԋ("""""""^DDDDDDDA("""""""^DDDDDDDA(""""""" EDDDDDDDA(""""""" EDDDDDDDA(QP/""""""" EDDDDDDDA(QP/""""""" EDDDDDDDAH!T?aX \PY "N(Dg*W}mwGP]q~gG$(SrT9w{o M""""""" EDDDDDDDA(QP/""""""" EDDDDDDDAzQP/"""""""@ХO=-9x*PZ4wøl} Z bT۝hZwE˚vd2t?e?qe.uysh;lxb"UjץBw{ТT^kv}vDF)S(/1p%JY FUz)!~7{۷Qfm\ػszV4~Cu^tLj7iAK)V_.z9odQd)*ŞRe<V|i߮82TYaοx},>"8Jw}繼AW)QP_]>yiӣǓ;b(Oڗ7-Hhݧ}SɼtKh; #G*׊fڛ07CE^}4qN1yE]NK?_ڛ%"y ؂͍GYle)%FqÐݠfΔXgn+"?5ԍMBjÍ"H7(^۵-kWC?OP\3W‘}._7:}=neokBBCikvnɵIfWf?ֵr40Q{ C̝AxD1:5zדb)U<=q\'1,֮K!|̏տP|EbI嚵X:w&kgM,S{quT*ۯc o@P,$2״dGx9p++Q*7 Nz u3Id!)'SY nEX;OyFRhII*D mr1/Q Mv`ݲ~ٷ3yeXq;~ >Ĩ{K/k0?| iO؁_g[᳏3R洸+~7]Ǖ:a.Ҳ[M)U5}xb:̾]q\sn~1UCo\qpo<µɡ}{Mߖ,`7V EDD K~DUɾ|Tjrߴ U[ЦGow3dl͛K_ͣf&4-o5I{Ӽ5>Gby#}0Y6bA4nٚwQT׀RQ(IN_nڶG$0Oi֮#[0ϼ˵#w_4+8:w1 +ը]͸@؅n?gO~:ayk.os5\qјx?~1K^rpڛжg_}ܶg_o&GhԲ5w=>N y躎mHF94nC)_NCy_K7rڌ|cұ3{O=KX<ΘOr&![ے4m.&SVVΟM yuӸUz=W^@tF*EYa}Yڿ_MIL[{Ijèw>.wUמY|{=ſ^zSc(/`_8n]gzPpߴƭdyczb,m[A}Upmڜ}_|Dj8TBBZ.#MzA tcQE)RD*UJ-CP=2dI;3Lf癹ͫWڲϿ`t׷hvwnזlc).'<,@2Źnj69k)>qAOںj֭ƕDFD{@JVf}7>_9ELt%Ķk*Zw/ҡcdT>W~c<Eqptֵ,Y* 2{;>{U6z8:; q(>Nw~9|N.77mLKOI}|p xYi|Κ#C&Uڲ5+KuAcM7o;_:ODX!FgQ\>I㷻?~3>|"=I^3Ir 9K!@kllliѽEs猙\P9hر+u~lc)an[wWx>gj4omT '6&ٲ?0.IXHCc~ɯd%+Wa29ZXfxH\l\3gM6ᡡX[b ^GOwZ'",иY1[uOs[bcc(V?E=U5~A$ ɑq^O37\2%uwKto~_g7nATdQ5΄jz3T0iۺMZѬk Q'ord2qt.޸[5"Mdգt9=DНۄ}1>}g*܉cx'U `Ri.έ6u"VUٶzl 2<ۃw=nJD\yЋ!>r,[V-巩 CLL4އ..U nxm-f%9zm՛ {~=sǏn G^>vl%,$(ydɞwo+_Srq(U'Gdx8k1fGMεټr aɞnO/c^^T|O}Mtb~;?N[[j6oMQ%J,[߇}EL kӈ  "[<=l Ɩ( .Ϩ󱶎Mjؾ]k|F*oε۰v*[?d:jn/B9z8?L&r`²ǦASlvP/"Ó|[68g=j֎Ҡ}{o~qpt|zK'7zпq Q/V8p8߻6ww}Nrs3 聓 1QQ,ж"TkGg~;?L[[jlKc0YY%:8f!DEFVƓlG'|?yE~53q;)[3Sm5[֨9EU#oᢼAnZkݺgBB0~#0W8_:zTo[{{܇Ȉj6SH*dw^ :C C֜1LƷ-/fΧtZ?X>H=;9%DD^ FZvlkk|M:D@ yqjgGpyj%;iG)#_L0lYN,PXY-eKsmNs&cևoK9k鍾 /"K Sr>vB`Ybkg@{!"""B`7RDD^6WDDD$ѓz%"""""""^DDDDDDDԋ(%"""""""^DDDDDD? <ͣ>zR/""""""^DDDDDDDԋC=7֎>Dk^DDDDDDDI4oB{Z҄={в`*k`u5\pH8QRHɒ%ٽ{ѹsgf̘5$$ŋsq8QRرc)P@XW;;;>3rȡ'">}XAq%4MTT3gpus@@]s%*̙3̖-[8rfbf͚%Kdʕ+ܸq(Ν;x{{{f1X cÆ \p-Ɔ͛@xx8ޘfX~=gϞMrSN_q!bbbtr^^^,_\x ~/V(gIul[oEÆ 0a;{f֭f} .L`` >>>Ջo(Amۖ7͚5cP|yOj׮z 2=JTT.]PBZ9s&2)[,֭_7jՊ`O,Yc̙/h"u lٲر2ep ^u >sܸtǏ_~;wFa2ȗ/.\QF|w:D۳gkX???7nL*U,|2Vʕ+*U7|[[[.\Hٲe)^<+*UH"\|M6q)Y&5jxuܽ{7SxqZnAAA,[  ,H6mpuu5]p!ժUʕ+(PVZ舧'7o͍ݻ;/_fƍk׎+VpI*VH˖-r<˗i֬1ܹsܹ~'N~={6kצPB=z 6p֭De%zRrrrbϬ^ĉ)Y$*Tl۶n:fΜɎ;,Zx1Wlٲ۷@j1Çs!5jcod?~SNqҥDO1?LoT^'OvZN8G}LD+W2zh4hݻٽ{7gÆ 4;vB L>۷oӧOԩChh(7nd̘18p^zacw~ԨQl޼tqZjeq79#G^z_;;;.]r|3vX*UdQ#jĈ :=zpU FϞ=5kzի3ݻcǎ4i;;;իWɓ'(7ޠB TXڵktKe?N,ʹ~:Ga,^?˗sf̘ٳYj~~~=G;'ZoRymI1opY{w@f!!!6C8&b._$,">Ȓ% gϞeҤIvI"Eh߾=fb͚5DFFZ\4TZs*UxbjUT!&&+W2dj5nXxZke2hٲ%VVVFp%7on]n]ҥK֭[([nl6憧'7o^ٓꏥ֣R'8iٞ;GF_ߏL -dO _"iOn2 4jFV,EӦ^ǽYSl ỦIÀ@ j_,ʀ UL>P'M.)ȓRܬYqL0'Pzu,Xĵe/RH2@ܹsd̘1Ex2eJm2acccTMgΜ!<ccǑ3gN̙Ä Xf SL̝;W<㹸MύKL2lڴ).SN,X~~zBCCiӦ7oޜK.QN<<o޼٢5jɪUEERJTT?v=k֬tޝ}w^`"RȞ=;&)S={7o^o͑#G8r-e˖FgvNb۶mL:XܹssdR̙3ʞ=? ///ﴓ'OOfpEÙ>}:Vٳg>Cw'yYy,vX{r|ZV/V##BBB _Cm-ڸ'-p0Ȩ_1 YDK"88.]н{wZjE3fQɓ7n{әҥKGŊ4i+V͛ :4ESX1*Wi޼9K,aܸqNߩS'f̘Aݻ7GAxwiР˗ҥK=S4jԈC/_ҷo_d"Rpppv,[ -z~J*L2.]2f̈`̙ejՊ@̙ck6W6nܘ 6M.\?cQmIo %K`6_> nݺue˖"[....Lcȑ#->'_gܸq֭[G;T^36nH-2dI&/F{5j(M={vNt$22ٳgӫW/d"ohѢԯ_ZjѼys6aޢEpwwf͚pwwwz-5kF)\0 ?ar7nÆ hѢtܙ2eʰvm\ C?~ʔ)CF(]4Yf}ij'""RJQV- رcק|TT RdIԩcQFժUqww7 Xv-YfeΜ9,Y|ѱcG6mJ\/_V;7ݦKH7 n悅(fk6cFF2?v:琮ss=z?L:4qpYX<D̃'L^{c4<ܭEkgD4od dK%MC")$QR//ܰa8|0yӓ(fΜ#"iFN⹻5999ѨуwO>ٛ@'6|%[cƆڵkSv$k2\+++WnQ+tѲeKaI4x>4*i׃ !=8dO 26[AȽ$ިe<:&?h񐐉>NC,)!]-n2vO$/"^3nܸ'd#}O}&NȮ]vݺu|z8ޓw9fURtDDI;v|2֭LIDD^UVM4DmFɛ(4mE^tE$Q"""""""JEDDDDDDDI(QR/"""""""/4}ttK.՞4Ӕ/QAI#V^ cPDHՙ5;kOK09P D'O(r1 O|5[ⵛ@y,5QDH?ey|E%Xx)V$I};QR/""򤊔@j"""":QR/"""""""JEDDDDDDԦ^DDD3L (y;éz<"e*#ͳg?*xXղ!"JEDD-kZ!)o^"#"R/+:kc9szII};v<7kONPDu`Ȑ i8f~y?#P_UD$M$_[èAڕ"qBVDDDDDԧ~wh7ȫHQR/"""""""JEDDDDDDDIzI,Y̙3_v={$((H3ҫW/= իWٳ'}=͛O?""JEDD$9pw~t5bifXl:  @@@ .|!޹s'۷o4).K CmM6ڕ"q‚cEGT$DQ?;vif=<< ""J_dRٶwٝ+Epq3W(%"ٌ3ppp s̝; ڶmKϞ=-[x1˖-֭[+W#FFll,{sԨQØ>((3|p+ƲeXz5OՕ зo_llJ5k 4b_~Iܹҥ ,'7o^Yl8::̴iXr%2d`ڴiܹs8q"3f`Ϟ=̜9/9r֭[f͚ 6tq\B 1c׮]q 49sp1f3ʕcȑdɒ`ȑ#_ػw/g˗>bN.%Tpp0cƌa)R#G&9۷/`=yCBBoٵk.]O>lٲpBnݺ jՊ޽{[,_eѢE888ХKQR2sd^aF}MBسg'k֬tޝ'N0l0eF˖-'Ǐk׮lْI&j*}8|0nݚ .0g7oΎ;"22e˖qTB޽:u*k%OAfcРAҶm[fsuFjѣ?;vdΝ{G2eصk~~~d˖j*0`|1l0Fȑ#ywX"_~%AAAT\Y;IDԋH_ξ}ppp0իWӲeKBBB4iO>UVʕٳg3h tB6myyѢEo;;;vd͚QF%7m{{{/_N׮]qssvn۷9x 0;;;*T@͓ښsRreL'rbرZ;vpQl \2bŊyfrsXzwbkצTR8pkԨ?oIΜ9P͛7o>(Z(ٳggmժC O<4nܘK.;wnOΙ3g8<2e^z_rooor @̙oׯӧg„ ׏>ȝ;v(_T\={pyBBB,ggg*VȁSYfeɒ%ۗϳckc'N0c ;?o$A Ѿ}{.\h$ .SNXY%_oaЧDٙ={6mڴŅݻwKlX`<8qH+T`$LXߟӧsN]Fll,VVV={"U񷭭-UV:XC-[6#ђ-ZWT ˗/;wnVZrB  }>=z4gϞ͍k׮Y,ՕҥKk'zy̙3'Jǿ+[ld˖geeENX`}e/^2eqׯOz۷/ХK"""]Ν;sY"""8x {ķ;\& ٌQz@39r$%K|`,ccc-Ν;Ӈ"ELZs֬Yq֧,88Y&Ok۠ 9IIn9!!!ңzy$)(PHdnJΝ-Չ'ra-ZD=q#44iӦ>_̲eRxqubUTPB)ކ?AW駟n:zŋ-ZD׮]&)ƶmX|9 6`׮]Io~۶mOmoNƍu@>e 0j?eLq9 fɒ%DGG'Oاc3fOOO^{5ٹs'5Ҏ4IyhҤ 3gd׮]0zh-ڇ{xxPre ŋرErc$K7ndѢEQFFct¢EXdc^Z5֯_@;vF@LL ݻw\r :sm6fΜiG >|8'N **իWsΝGZJ,ɖ-[ԩS|I6X|93e.];C)k֬Ό3[nq)cD= bԨQ\pӧOsUc>38z(f͢qƸakkKv3g'880$QR/"""Ofڴid˖zQ@~g~g+f1]Νٽ{7 66ٳ'9rgϞ|G󆇇ɭ[,wЁ+WpڴiX߳gO(W%JΝ;xzzuwY~gL&sf֬Y|G>|̙3zjN8A ȓ'3g?'&&O?!W\4iĢuӱcGرc)[ƧٙEzj-J5xoCR-ʢEXb%J wԪUˢDFpTR[[[Nj?~<ԪU r>EDRp[txr.uOݴ+E^i7{%7:\y\w빠ۈ1ca~ONS$C ~.۷ɓ'O{G\RNϏ,Y+ƅ ȑ#цi:mg4쐲kZ\̅ ș3'OT/QQQʕؿC ʕ+,Z@BCCqwwOr7n`kkksY ފ[Yx{{|/ )vux<"z.=O* ŸW" [aee",8G/U},[s|~@ǤJv`.ҥftCc-Oe ͕R IDATa[Q ˲] e0XD)+TƉܘ?T  |6$j #cw1R]DgYtMvLYkaYiC[o=jcc}R5]01ފeqV@ ˋw 'WBb(DƙiYE:O${L?EDDDIkS ?Mq=|si@:7yV3esq!NuXxy М| RFC֦RV&>^X4q團D5gZS hBqbK:{mO.4bɮGNuoY Nʀ)tQR/""")! 22@DD$Sz%"""""""^DDDDDDDԋ+""""6PYa%""7Pzs6!""""JEDR@:`'S%""߀r7ϢQDD}@d<?BDDDDԋ.'@sBDDDDԋ>?DDDDU3P ] Xh♝k_wy٘6mƍffQػMb\9n`t?p2_N Kr$6'XVllllll6^Gkќ{{8&62220<~b0yIiY)]!%<3&<9ttYxz]􋤹/rlLw5o?'{J$H{OU[CZV)J[+A"BqNHy^yss7ywI+ e<%B<^m*(!˜JKz!BIB܌&)![!B!IBSA:DK`!BHR/TkyIB!$Bazֵ'$B! !09>v\ O'/WWo.68rhyT^ou%HB!^!'6vvt sjCP,I4~-Q$BIq#]D) ~"^!+=7+y"rnk}*ICk|YZBIB<֕ zrϛÖqS[s:g_!gY&}Uigܱ\Ty+J7^b B<_=!x YX*8-B!IBStB!^!I*nE,O!Bz!&ɂ9!BIBgsR%B!^!ɱw 51W!B!IBSdA !BHR/9I{!BIB$' ҤB!$B!L !BHR/YZ)d!BIB\ssE!B!IBS f!BIB`Rb&-B! !0>OLz!BIB^R#B!$BaI}tB!^!IJO^B!$Barnf\R!BxX;Ne}YԀB'玤| a:R(p-T,gX-k2H1SdBܯHVAuooǛ͎|!0kgQ?6|Ofi{ԛʡ˼B?xa{^^㸛KJB3} &c,dc$0B<Ϣ ^`X*H`#(\ !pI B^!_ !BIB'8-B!^!iiAU!BHR/„% B! !0=$B!^!iz!BIBj!B!^!i 0!BHR/4e!BIB_ !Bǟ Tg3gBW{TZ)v͚Y&MQ}S>`D8@n/;>,syzVȸv-2((JܯǼw*WaϻIRƣPƔ)LK;lٴ^^SryKN]\Lڵ$'+ZW !BǟDjк[iРN=;zI aUY-fccoC GZ0M@d%|'mK2VD#ARݧO!@[g͚YRmB,,,0Հ}܁^@[UlWttv@}BȐ^!HT]ף_|0[]Avom Bxd[U5ѣԫWIIq/>`D=ǿx> -d{!B'ʣRoQz!?],U('&! !BHRY 9s&b93{̘IslB!$h7k׮f&U# lsO kDZ%&-B!ICQKNkM{Sv5֮ݍz7n$녽vvt⮷/JXiYsssʕsm;nӣG{ 鋛[Y>`fۧZRSә6m<ժyҩSKaEkK\Х_UU}oߜ+3r@7l˟h?|wr4k֐E~'9===+ЬYxaF} jժʧAŊn4n#Yjqq UaXO7z%&&|]0x.f͖ێgΜ|4}_MyҜ>QhBX l?]B!"Soknnp.b]oPtIssssZԮ]r\9}<Ўla st*u2JĪT?|_޽:GhdV=ɫ~¶m{pv.ARR2/j'Oў6m[Fnn.M[̜=ųjJ34l؍F܅Am>ҥ)S:d`:thQ8񵳳jJ9AÆu&S>…3g)cLܹpu-yۺ|M^yJm?wd:E˖,]:cS|툏7sZ?~)bK3[-7lXv۶ժU*kx|ѫvL #F7[^mFy۶56koܸM11W{耳cԴG0=E@"PB!=q=O4| \~^xk}6ʕ=ڵaܺ#ѣHH67;vqc_~ 'W)M UU ݵ---pv.qdk'Oe>T批9+}}dÆ;W}̘ma{Y?|Cagg՛\/\zhQòk%ڴi?ί~ʕ&~~ޜ;e4botu-׋;CiРm~o~7ݘ8Q0}}ص^{ ;MhQVAdNr\?|5U=>@( * B!()t7MFXڷoNZZ: dݺm\Q2neeg@rr*J93f$iN=~uWaSԬYgؽ _}]Νx WunLbӦط?9l?!11eKӪUzhǴi R"Fpoذ.=zrYzhϥKWHKKOF[/akkCǎXz[7c Νkל% KRR ].r}{{;5k1j Q}HO`Μtю}ȑ9s&]M7n23g.յ7}(/=пoԩID6o?|R6a͚-< dذXw y-[2zH/^Cn/2p`Ⱥu[:u]K@mok̞[ Mlڴਢx>jfLԓ>-!B('V5hsp|,Y{{;|}8th-իW6Z>ȋ4mڀĐըQÇ_{_NJF]6cw \\JUZp*.\UTdym&N|K3rʗ/CVMQL4ޤBrL.kIŊ&?~ +Vl`_{ig#Fގ*U*(˫:ژҥ]8r$ ;;,]:"_yGUY&2bGeʔbƌEDD\lҴkK/=gؾQzFy jlt!pؾ7&NRNM zVRLb駻˖-;zMԿ-^V59|0?Drr*իcfٲ|hC 9fMdȐwٹ3@YOdܸܼD ޽azjSQի{R3.]79cބH jC= |}(UE  M@^!B;R =ȪU3ޯXC#Rۏ>N\!$ @z!B_092#FB<0yyyĚE:6)`% !Bao޼t]Frrc]YY̝&::ŋ'!:p#"K:2w2ߗcشiM<56^@ ڸw&;\7ڰ$B!sRug:8;ť.ժ#9w.X]f ׮]+=55A""]oz#?Wݿ(}|E~g兆Og!F|˖|ےA8?~fMZ8'h?0(pkB!~G]@kk99$&&nʲzm>ω2BBrJwrYG*^{m0Yy'Oc%=]Z Xf 6s Klv_=` 'ty !BIoՠO?˫:D G}v$'##ss3ڶ 4ZݺaooKXi駅tư^={YA6Xc.BQ͛w`kkCeXb))i݉uk{.ر#>}:d:ΟS4k֐dVĩSԩ%AAaʍ\]0:E~Szٵ6,~Kg`,Ò%8zJ9ӧOgTXQQ KRR!~zZ"##-Ѯ]~}3pw/G>`Tʼnn%[XA&6VȆ mm};kG}âE I鋈'""[7%,4_|17$U:k}ܹǨ |OL%K:W_sС8E\]KѲe/}7Z5#Xj}vcӦ7xy FIKW>CVdRB۶sezΝ|d֯Cn[?99+W6͛D߾;wGs-ط'cI7vٹS™3!DrYn+Nys~a!!{Xэ V!43f|aHJ.\eԇW?1$7ng֬%ZAff&'ļyЯ_/-[ٳQtd(Ss=|9w-[x7[Y6mc_wNVի{rF7fԨϘ57lYXdbr,ABQYzÇȈ2S{KZnJǎA^x7v- q4hc({Ҥ9rm9s1s.("^>0G}: x{PGX9-]:U'wUuIe;={7EQ۵k~۾ukkX6{Wj7}aݺ9_s  6iR7ݺ5<SΩ` ߛ꫃ԺukϝZ:akIu„w n8Xs~Sd}$%P-,,Խ{WMСZVUUU}UU Sw3Mf ۤΝ{(s^W- ǪZ'W8G:ͷ"~z󤤔[fVԠ^DF^dɒqu-f_ Θ=|O@,,, m߾ZZx\xh֭P|Vp'6777jm++Kz`t<.\ƍDwN+ѤI}n m{P< @Qwo+nݺhј;T#'ȼrBCN߾ ˺uk-۶)r13oݾ}*gX܅xN<[ qs+k}Æuhժ …w1qcۖ6$77˗c kIڷo^=ӴLbb2{ocZ_~Yqݱ>oP^9OM=T64jT?܅lݺ?~K\寿ceee{s= u]Wג9Ǐf߾#A7@!B>&n~\*T(ORΔ.m|gy||sl$8pŋ'U`eeIVV6qq ?|90zF8; 33uqq2 #(Q2eJhoXht,_xv  6>>5^S濮K( _|1hyIII5J4$.:6F1vj&ŗ/beew1e߱׺wWZh%(oɩ! nWm˜JܶߩRQ;gcQ4芃Ԯ] {֮Ll׫1c"J9.Mf.ukC^J@͚5d9r-;7Ý  !B!Ih䳀?ٸjJ N>}iikh4[,^-۳GkTݽa *T(PUЊz̨{{[2Lx; __/~ۻ>^sc^ $~rp*U@VV6vEFEĥ 2:UGZ|툍7Zw;+( ۶);|9]:ffmQwhӦ'ڵ[iݺ)#GNp7f*xyU# !Bf8;w bذ^kzOevچ֮T_Obg~_i2E~'!!y=t(=If:ujI||kl ==ŋ7*Vdddi_6` ۷˗aԝS.]%99հ, R\1cYY٨̙ͣk6EӨQ=*Urgڴ_HLL6,/k6ƳlzEsQzeZsÇOpE~ :ujɂ+tm1pu-Enn.⮳pB:GƾxzVofz!EiҤ>fff|Lڴi @۶Xv+ǎӦM;/Μ|}lڴ ?Z@y 4xI(BHR`X<fw ի#fҽ{[ڶ $++Wvb߾#wUfέ>u{xz6#4(ӧO0L</SR37M{ѧOg{ʐK1͟J渺2*Z5OFϞ/ѼӸ7NZw;^:?3:u:з+nSƍ}qwoDz;w%J80}[Nj-[#o9 ޡVEcccͼy ً[#zJfyՏmQ^$8x $$d/3g~itEFhшFzLʁ4o˝2Srss }~oߏ`>֭O{zJ*iժ^^iҤ>nnZ}̙;;[/ž=(SݻC~52Pʒ@]K0܆~}rssqu-y9V%pLpH.E`bui7 K&rs9@˾뭍ՅB<=ȪU3F>|ӞgY1DF^^ٲgϡݱc?5ƻ:ѣpqqysJr1vY,--GE]"11[]ٳ4mʕ8c&ʕ+qF]R}YcN IDAT] ˫: s4cdž ۉqc_*Ur'4 (=űcӳ-Z4۱ #D!xtdR/^rLQ#xmذ!Cs3 |\Ir7=]_ t0Zkb}e8ȟ1SѓbZ-@(֋.Zw=Tc7-p\X,D`Vxz*4Be+z5P:~~KBI~L- zL.}|} ʼnUF} CS|p~*WwX0sI%B?Z q$$ܤf֤wc$BQ">z◠'_4(ígNOF :'{| ~ӓ E7ńfz"_V>KO+k \`ȷKOPR26yhRRל}L}n|ǒ-<$o1~,*d}@+ q /l/*/1Fh쀡B <1Ib]hɒ\vS ԲHPPc> '`h-$'{|z;J>[O!h-z z?}T M?uK`B~x}(^VZ+~̀wf{^ $MPC5>_DɿϬ|tg\/H~ʠZ 7G^O.un7"~m9 FY>[>VB!Roy#Feddr5<=+LSRRILLݽ#{qqױ_u\\faشiC;D.Wƚo@AڸQ_D= Th ==ֹ'~!y\=qg{7_;Z7кO/d1sC0}ZWv Da}.nguxmBV1璅6ߪ4I:FbYP|eo@;HY`|}d6l*U*vjR6t4&n_3jVߦk֬a֜9K?蛾OJ \zXf_22)gZ/$ ȏfcj}?蜜ƍƍ۱%8+Æ=wmlIꕩTݰl|O=ȾA:wH:z?~{zz6c7096!aaIMMqcd'Rq6.;\OxuWѺ,A1`DwC(#nέrk<9ГZ'Juo=.Zu_dzNbbTx yO@*RK? Cr$?[/~v^i]2E6Z#z/( OH g_smh.rZ/qHm.DU:{Ќ±w2~_4ݾn9x3?sĉ3/^յ$ éSSiܼXi۶g}' o[*UFVV6[2&LM߾}qK+~s>[Y8vg> /%11Q>e葼Nիwǐ!}~ɒ1% )YjcsN{f7wtRO|R? Uܶ,&%W0FO֬eDu26g$n_M %8zi3$ڤvwHNKꫠn6@nM,<$P~RdM` 0m@l=c|K=$Kx)&'E`'g=_?K Nz^m֝:|1##kדos!0UUQФI3W΄m;awj'n|Y.]zs8}:*U*R݆ΟյfwO T LO`ϞC.]ukmd:BB~E $331c&kcʕ8ϣ( 5kV8Ϟ~}ʖ5t2>>L*T(;E׮݋9t( sqp7lOnn.nn:AܼčT|kbVV6CUU둘LFF*W8NFF&~X4bbb#<\mp..}붂{[[5Y;vԫWe]Qؿ(UT4 HKKgϞC-[Feddr ]Q=Ғ%8x0 7h __"*;Źs4hP''G"".PZ%,,n}]KѓXSvvr`kkM8p {{;իm( ϴt=ERR SN#::*bx=!!4C}zx7!~o]<#g=ד1gu[|ߢx^mB8=)m]_E~@+qqƫԓz"?s~JZ]ғMP[O6Z/! c.zȭ!Yh-M4XZxq\p۵4 8S]S8suX~| ,[6L~#b kvzwc>2wܙ9.V"ԲFGG,PX1}$`zh׃zSH :+7SF݋qO&2h$<))hkkf.s,&<7?w?\A),>: ڵ[TjIzz# xHO޽pT魘g;+W嗿]9ȑ3rUN!A895С@…+3(*T(K``0 |C_ug0wseΜI8;;:2eJsv0sL⫯es'[X[[˥KWqvvdAQ_S!~DGvʕ+qM'1ll~Y3nnm8q?jլ cҤO3g1g7O~q_~JI %ؾ} `)TTS.ўN=U:t?Tҥѿkul~kk 9MJ!q4nlطIId6]2zn_943?ݬž{턲nؿ { rKxYyr8‹ģ_u|g/kh&{ٗ|椺Uum|+Nuw)vJ9#33SV< E/&)V]~o>`x᱇wrzsV6lة̢E+طGl0?f֜>wݺ>㏵ϝ̛7;wNœ'z_}5s8/0|iiFÇOOپ}7oi~g<߱wJs[7e?f6k8~'[ѱ;K8:lه7leg4G "Xp:#F|IpXԯ_{z 0wrxn8;;ЇFгI1 ([ky[, xD96^ J~]Nm^B }}mx⩂cb[`Ӧ=8:6zjg ]\FlqskÆ 4ٳ5kVͭ-iӜjլ՟LlE-YZZ/^L!O'@KɓGPDqtѣ==0a;޽GO`ڴQ(QR 4i8QxznO߾];vtEiS{ڵS&X.[ ggG\{޽+vvE:x1r7Tf_wNTn^'-- WזTZY}s'+g7ر 'Nή: `KN)S֭r _9r-[ciYy۶Y*]*[--+Ec˗o}V(.^c O#nժ1͛+OףbrV?qrj:ǖ-4tuuP( ܇ŋi\'%JgRDqvmq"__Ξi%D2y^Ck'6!+ᠾ 3A:SNLH)(ڛ455[(ŋKJAAYd됐0 ̧ҥx4Q4:8<dll)U>>^9ܔh=55csʪʙ_[ZV !)iiihk+nذ0Uk^^Mu܈%66Gdž;:6֭;ҲtŸߖEybcEIIIrBGD|JsVLi5ޫXQYW144m?&,>j`iY#5Zs>\Y0TT5s YNOJgjZ{ILLRaGo,L=cdd@b{3e_nʘkbRb ;koe$99Hou?\o#<~3/V=æM{P&`.[֌Kj|O Ra˖?^zyVMLJaff½{gyk]SFj׶9!B66VlڴDz! }}=Z[ć62]r6ߤI9CjjOd[fceٻ͚x.]ڐŽ]kk 5ll*s^D>C>MBw r劜>}Q6oާ :95a$$<;?gccETT4O&ifIMMq\f_Ⱶ裣cٿXy޾s;nok\Lvّ]q~dSml-,-8p!,##Ϯ#(:*##rA쿠[.^Z"kP fŊMԨњ?dٖ>37F =q*)SdĈ/rE?? _ٶm _ݻ01)O?}epp㏻ӣӧ_qsՑ/X@i۶[lY3qOmۏ\Mu ^0igWR i~vvuIeU0~,v8HN%pIL۞쉋K?ƍA,_zjEӦue?XONZz\љ/Ӻrxz8q8w{RvzhOus'sW>FGdžL յm6'* {?Sg/m4gѣ,-+2rG<B!xm ^NQdϟZ;oy;msYt e%GHLL`wweᘚ$'Dv,]::u[l:/BE  jD=^MR__3g:~i4ik+P(j,g eԉT{T)CQ(( ,2ueECcL0\:&,>3=Sb9<ʙ3`@O*WHUX <|Rjeոpe}g!ʄyof'Ȁ{0TfР^)cFDn޼!cR'1TSm\{ْ1]88]IZZ:nl逎2gCʕ4kcffС}#11 ;9NgX\苫kkak[Cޅ;z„aف={Vpgjժ1gO 5!g5@ ([;3A=Qvo 4S- pJjm([· Fzպ`5CUF9hP =7hP\aY4Q+Vxд=5kV%&&w1dw1ܻΈ_ҨU[{7w1С$$<} wjl/9s+m8 tcA^HFƍ2d/A&ҨzС9|:J>iذ: 1V͛2pD^6ݞt0~?9.setgϑu߳~l >M{ΜlڴGmWQ4hБΝwgII̚ ..hѢ73f,$))9K>[J۶ӸqW>snLk{/h}A&> bXh: Q#7ùsWG>F:&&3ڟ ;ѣ;2r-ZSq60(Q-##7PS(֢_U7lY3ukiݺ)%KJ)`ŋcޱRB|RR`+}w.p H(Q TPۮ g*_>7eτ""qA'!CvOİ} 6nMt\dd7Khkk1eԩNÆu166 ww7llHNNao?@ppŋۅ ֶGOgٲy^FͨQy^iViذ?~(4 [hi)5kn帽O>{sv>R Κ5[/ť|XLiCԳfΩSo#GhLy<{"Gř;w2ݻr@9ť۶K߿;gz/\}G1sNaúDE~4_ h,v_c&o)Wo㏻hҁ(""y"!)7&..Ga#G~̬YvoXR9)o޼ÂPF޸q_Aw7eF _oMzz:K"P1%ؠz46l ۪ >S((?#/͇P8__zji5ؐ+7gZ~'?o;H.\H|0"+Wm$'_><OϞ;w2nnm03k5[6=۲z~~]k~3{whL)3g>?~[q&~ѣ=-ݽ ehN=alҥЭ[;0gbv{1s/=Nj\\QRl Ϝ9)L8<6YNoF4hP\ή6۶_-P6mu~ƌȇ*cٝrڂ_~ݹ X {z~7nT_/˖P4T~͏CKP_ !'6l-LB:eF<ʙbP_"t@K͈Vlo?I FX޽;bhXfz:qK;@rrGUd MMͥKWs\?ڵkkKnOBS]IBSwyA&͙3^B:TΜI)(B~F eƌg%''seʪZ5fM4nlGbbYdN|)7[:{ևYLs.׮ή6}tbȐw=Ӯ] {ľ};3e<ݕ.]\}2yJ||Bi˕3'##*uWn`Pظ,ojj*I91Sfφ%KPg V/7xpo6lERR2;vx͇8ttt4h5W>3gNvj\6oXzE͚vk]Y?Сhi):t 9y|_z yF|l':8pEN>׸t*;v 99};_sytP7ǰvٳ>9rF Q00(@jBJGGH.s@v+n' \@٭t!>8,a\ɍ۽RCÒݑ+7b&UE _%g l K׏G_m6nnmiٲ~~/C%o;zKsJ(^%6mφٳiqիзo/~kԨJjj*խUIegu˵nݔ%ʔ1VxW#!cNqKvXhȖ\^?55۷_޽Gѣ=']Ԫe`07,-+б3?\O=r}y :O[PcÇQuDL& ;CjBOdyPf_Tʦ Aq? tw /xdvx6e__!0Pu."8]c!Cpe-[V4oވxyyI )hkkӠAVDDC1cFƍ ƍRSSY~ _92qq Iݳ6̝篰dؾ~꠿Sr9LZZIIuGKEfпZÕ+~/,=ټy/ 99?Xq#@KKj<Ǔ'1,[d={5YzxSRRWF*dmƍ}g޼%a y1jǬY OS'T$ߟ9s#$ 'Еc͖޽pF&Mѡ@֭{~Ec- e(՛r @{qU`_ nPvӷtC?gS]Pfrs֢ժYȀ=^ij0n I}[9矧s/ԯߞm[`ff>ꆃCW0{"f͚Je;v..(_ށEVfٳfM`YԧGoƪYWK5T?*Gٝ #prjB(]ڎU63n eO#1`bdT[\}Рޜ;wU-qtlzY~1e˚ڟҥ05e۶吃pUjnyCJًHKK#55SSd-,,Z%ժYo~yOT^MLɨQ_ahX>ԩ#57o FFup}}}&MD}cѢ,YSS[:u1`TbE*Uj̱c^67\][RLi46{*u HǶ {+ew3tj֬[J6a?hժ W@ppvvhժ1~膯?ٳ'2zN@qvvK62릫7cΜIbjjKbbׯ ݌5^BKKCqnƎHŊ7o ff&\mmm<>l*--0,,ʣݽ{ ʾu||EPF]OĐqtmmm*T(zˋxS| }q4GbnnJ&=z'TZ-g/ɩ/AA(U8x,-+^>V?dk#ضO@TDDb!GY(UʐzjjdtqiƎ3g_؈ky+<=O숖wrf-Hݺزezj̘9={vvnݡCAnݔ笑BYV([F([}rlT(eSYG^!WꑕCs3rk? ^f{)ջwBY8]]VeJ,/ QyJ⧫-?*͍mm24,01)I?+]$[3sDl)YUXo<x*,-+Я_MAŋ{\%J H |2KaFanԺuSbb3B-ޱ.BE-Z8ТE?m1d5z#OmٲO_A֯_| <14,!izzʟew*U,)VժY3oTtu_|u14,ɱc^xzb0wׯܹrNwiӦOvlY3865-0}}=ty0|?R5P͹ !!t0H=ydaQ|-Z8DKPFW^Me/FRTڴj՘]<9s:Q?ʕ+X[[pum (ǯ\Ukk 6o~$&&tw."kk Ba%! 6kAT"?xyɧobSĉs,ZB=m]TZp#:Y>fZ+ʕ3GPкuSsF7z[lmk1wIRDq>0w:V8zԋ&烼z'ߖTPXH5!Bֱ @ Ǎa&۹ +VY(_ff (WvT/3`@ q=A}rrFߦMl) <<mq m[J2s]VK ?5UNDZcg_:݄?q(O !zڴ Kabbyߏ6O $$\:::^޽p[Ǜ3G=WyS322 D*T(Ʀ{0O_8퓰;8ZIdJ;lJMMiWx9MLiWsР^GVP?ٸq7|{8РA͛u֭رrB3r_&śצGw==O |J;J6ɓŦo殅BAJq(~S B85Z5x' Cz)VIok_'@=9Ba_|Ag!oŊqM*WH4-߹I)ܹh`N޾s- NXYUʶLRR2WAժSx{_ѣ>˭ \#22ƍ}^fU,,4{&'p%QJq4Gbmm1EMaI]N-/I5!(j6l*?~6뷧U܆?SCNE޴hћ\Hݩ_=&ں9O"56N>kMD!M/44"ϑ{z]͛G|y:u~l[W4f/_~p||}Vn/W]zBϞ#qrjBHOo%<<~EOoȑ3Z%eX|Gzq6n5Y''0s/\#>>8}z+8;c?* BHMֺҥ'O."==]!3{#G~%rB*_C+oڴGdžt@z5۷36!##C\..4j`Z5k뗇wNԯ_[ѬlؠL{'%5[j {br%wBa~IBo2⍸? voBQ^VMx4QcJ:GdžXZV$&&.Wx8x4'FprjcccEժsF`]Wwť?'۷KTߏݣyF=Z#b8qԗ[0* CNatV.(Ƹ4 R KPODßK@&lxCjB!D!Ӱadjj̭[w4޻uZZZdIu~03SdnnJHHqq9œ910(G>< ~bry>nLv-8~'.U&|gl~@c¼yK-Wxcf͚^?@VDGK"_].^XRB! cEu&MpI5' IDATs{z+/֦QsX^rr Ph@rr ;wqOiذBDC?ck2sx6[c>}:ab՚c"44BNQSNASĤ!!k17yx쉗BQM: =TڒCk!n`߾UѾ4H׮믍hii`k[?BmtL@]WO3p`OzR/^+7ck[+OeW*뼛B[)3:rǎ=TjDTg6"VJ@{`+xxcvR= w_m+J. J0p`O222u+;:ll( 5P@:թV۶_rEww%>)AAh׮?uR<WV|A=?6ʙӳg{LMV '&\B`4k66Vԭ[C [ZTbcyLLJFlla߃O>ehZYUή6%)^n]m~k (SƌMZ2Ύ\|Zl00((pvvP!7ywaaoawc@`UB/##(."/RRRYZ:Ԇ@y;CB}+w:M^a݅ 1x902ˢߴST_!!=vv/_avW M߾}==O|wB `͚G !(bcSx7Mַ?^! Uuk4ڳg1.cB@FFFB0`tBQ?t7Ԅ=_>GaCP(j8i)p466Ҟ;wrdf6Ι3?xHGWW7ca]Ç`˖}6: &6~mv˶cA6dEf|`m?Й3g{KXDd?n Nh O!D0xpu IKKC[[gaS[(%^{Ł̬I.eU?6|cTنll#emCa2Hy cś"cꋎ=1ddiedd+2k$Ļc ׬xK{z!Bz!AE*t7 ^Y~xwTt' [ܥ'B!ۗOjBwDppXrFFʷ_ B!(w޳E ! ~ %AB!iF}2?hĎ%B!(V8E6 vQ($B!=_}m6EHEѸ qAAz!B! Prrr!)!DvHhhD/B!B>|>88KjC"cY:]zd!$B!8s7-==}뷤B(8֨Y*ˣ#D!BAJ[XTggWѢE3ZYU>|?6IZB!(\bBBº?MIIR3Be<ӉVj A9] -B!BN-7oɂ #{;֬Y%mߢrwuHֶvZ:лwe2-۠A=S Ւm_8zKUnD '}F^+g DwqǏ}$fCaC+)9!U!Y{վ PW Ucsno7`d!;@e_ KǀǪ;@p@n~6Qy߷Q,BAHEMWּP;Uc@y~fP2^!B!($B!xu@u^ZW\7gW1KV!AB!\YՇ 4fR4nf?!^!8v% YkQ&rWV$`4€TA}ur !$B!Y_M`UFr)({ ު\zGU}(:"6|-p~ Dɥ*xɔvB!2x@c6P\Qq %UYQꨞU!͇7e2콐f9n(G棚FuC"6 vBOG@!"߅?gy TA*X!X- 3dh?PS|Xu Z/Jn6d =?I9rbƝBw$B!xҀ[Η,ܶj& (WW{d<ħ# !B`6=:!"$QB!B!AB!B!$B!B!B!B!B!B!^!B! !B! !B!Bz!B!B:RB!@7!D4*BHP/B` .. rubric:: Password injection on libvirt-based hypervisors For hypervisors that use the libvirt back end (such as KVM, QEMU, and LXC), admin password injection is disabled by default. To enable it, set this option in ``/etc/nova/nova.conf``: .. code-block:: ini [libvirt] inject_password=true When enabled, Compute will modify the password of the admin account by editing the ``/etc/shadow`` file inside the virtual machine instance. .. note:: Linux distribution guest only. .. note:: Users can only use :command:`ssh` to access the instance by using the admin password if the virtual machine image is a Linux distribution, and it has been configured to allow users to use :command:`ssh` as the root user with password authorization. This is not the case for `Ubuntu cloud images `_ which, by default, does not allow users to use :command:`ssh` to access the root account, or `CentOS cloud images `_ which, by default, does not allow :command:`ssh` access to the instance with password. .. rubric:: Password injection and Windows images (all hypervisors) For Windows virtual machines, configure the Windows image to retrieve the admin password on boot by installing an agent such as `cloudbase-init `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/aggregates.rst0000664000175000017500000004476300000000000020600 0ustar00zuulzuul00000000000000=============== Host aggregates =============== Host aggregates are a mechanism for partitioning hosts in an OpenStack cloud, or a region of an OpenStack cloud, based on arbitrary characteristics. Examples where an administrator may want to do this include where a group of hosts have additional hardware or performance characteristics. Host aggregates started out as a way to use Xen hypervisor resource pools, but have been generalized to provide a mechanism to allow administrators to assign key-value pairs to groups of machines. Each node can have multiple aggregates, each aggregate can have multiple key-value pairs, and the same key-value pair can be assigned to multiple aggregates. This information can be used in the scheduler to enable advanced scheduling, to set up Xen hypervisor resource pools or to define logical groups for migration. Host aggregates are not explicitly exposed to users. Instead administrators map flavors to host aggregates. Administrators do this by setting metadata on a host aggregate, and matching flavor extra specifications. The scheduler then endeavors to match user requests for instances of the given flavor to a host aggregate with the same key-value pair in its metadata. Compute nodes can be in more than one host aggregate. Weight multipliers can be controlled on a per-aggregate basis by setting the desired ``xxx_weight_multiplier`` aggregate metadata. Administrators are able to optionally expose a host aggregate as an :term:`Availability Zone`. Availability zones are different from host aggregates in that they are explicitly exposed to the user, and hosts can only be in a single availability zone. Administrators can configure a default availability zone where instances will be scheduled when the user fails to specify one. For more information on how to do this, refer to :doc:`/admin/availability-zones`. .. note:: It is not allowed to move instances between Availability Zones. If adding a host to an aggregate or removing a host from an aggregate would cause an instance to move between Availability Zones, including moving from or moving to the default AZ, then the operation will be rejected. The administrator should drain the instances from the host first then the host can be moved. .. _config-sch-for-aggs: Configure scheduler to support host aggregates ---------------------------------------------- One common use case for host aggregates is when you want to support scheduling instances to a subset of compute hosts because they have a specific capability. For example, you may want to allow users to request compute hosts that have SSD drives if they need access to faster disk I/O, or access to compute hosts that have GPU cards to take advantage of GPU-accelerated code. To configure the scheduler to support host aggregates, the :oslo.config:option:`filter_scheduler.enabled_filters` configuration option must contain the ``AggregateInstanceExtraSpecsFilter`` in addition to the other filters used by the scheduler. Add the following line to ``nova.conf`` on the host that runs the ``nova-scheduler`` service to enable host aggregates filtering, as well as the other filters that are typically enabled: .. code-block:: ini [filter_scheduler] enabled_filters=...,AggregateInstanceExtraSpecsFilter Example: Specify compute hosts with SSDs ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This example configures the Compute service to enable users to request nodes that have solid-state drives (SSDs). You create a ``fast-io`` host aggregate in the ``nova`` availability zone and you add the ``ssd=true`` key-value pair to the aggregate. Then, you add the ``node1``, and ``node2`` compute nodes to it. .. code-block:: console $ openstack aggregate create --zone nova fast-io +-------------------+----------------------------+ | Field | Value | +-------------------+----------------------------+ | availability_zone | nova | | created_at | 2016-12-22T07:31:13.013466 | | deleted | False | | deleted_at | None | | id | 1 | | name | fast-io | | updated_at | None | +-------------------+----------------------------+ $ openstack aggregate set --property ssd=true 1 +-------------------+----------------------------+ | Field | Value | +-------------------+----------------------------+ | availability_zone | nova | | created_at | 2016-12-22T07:31:13.000000 | | deleted | False | | deleted_at | None | | hosts | [] | | id | 1 | | name | fast-io | | properties | ssd='true' | | updated_at | None | +-------------------+----------------------------+ $ openstack aggregate add host 1 node1 +-------------------+--------------------------------------------------+ | Field | Value | +-------------------+--------------------------------------------------+ | availability_zone | nova | | created_at | 2016-12-22T07:31:13.000000 | | deleted | False | | deleted_at | None | | hosts | [u'node1'] | | id | 1 | | metadata | {u'ssd': u'true', u'availability_zone': u'nova'} | | name | fast-io | | updated_at | None | +-------------------+--------------------------------------------------+ $ openstack aggregate add host 1 node2 +-------------------+--------------------------------------------------+ | Field | Value | +-------------------+--------------------------------------------------+ | availability_zone | nova | | created_at | 2016-12-22T07:31:13.000000 | | deleted | False | | deleted_at | None | | hosts | [u'node1', u'node2'] | | id | 1 | | metadata | {u'ssd': u'true', u'availability_zone': u'nova'} | | name | fast-io | | updated_at | None | +-------------------+--------------------------------------------------+ Use the :command:`openstack flavor create` command to create the ``ssd.large`` flavor called with an ID of 6, 8 GB of RAM, 80 GB root disk, and 4 vCPUs. .. code-block:: console $ openstack flavor create --id 6 --ram 8192 --disk 80 --vcpus 4 ssd.large +----------------------------+-----------+ | Field | Value | +----------------------------+-----------+ | OS-FLV-DISABLED:disabled | False | | OS-FLV-EXT-DATA:ephemeral | 0 | | disk | 80 | | id | 6 | | name | ssd.large | | os-flavor-access:is_public | True | | ram | 8192 | | rxtx_factor | 1.0 | | swap | | | vcpus | 4 | +----------------------------+-----------+ Once the flavor is created, specify one or more key-value pairs that match the key-value pairs on the host aggregates with scope ``aggregate_instance_extra_specs``. In this case, that is the ``aggregate_instance_extra_specs:ssd=true`` key-value pair. Setting a key-value pair on a flavor is done using the :command:`openstack flavor set` command. .. code-block:: console $ openstack flavor set \ --property aggregate_instance_extra_specs:ssd=true ssd.large Once it is set, you should see the ``extra_specs`` property of the ``ssd.large`` flavor populated with a key of ``ssd`` and a corresponding value of ``true``. .. code-block:: console $ openstack flavor show ssd.large +----------------------------+-------------------------------------------+ | Field | Value | +----------------------------+-------------------------------------------+ | OS-FLV-DISABLED:disabled | False | | OS-FLV-EXT-DATA:ephemeral | 0 | | disk | 80 | | id | 6 | | name | ssd.large | | os-flavor-access:is_public | True | | properties | aggregate_instance_extra_specs:ssd='true' | | ram | 8192 | | rxtx_factor | 1.0 | | swap | | | vcpus | 4 | +----------------------------+-------------------------------------------+ Now, when a user requests an instance with the ``ssd.large`` flavor, the scheduler only considers hosts with the ``ssd=true`` key-value pair. In this example, these are ``node1`` and ``node2``. Aggregates in Placement ----------------------- Aggregates also exist in placement and are not the same thing as host aggregates in nova. These aggregates are defined (purely) as groupings of related resource providers. Since compute nodes in nova are represented in placement as resource providers, they can be added to a placement aggregate as well. For example, get the UUID of the compute node using :command:`openstack hypervisor list` and add it to an aggregate in placement using :command:`openstack resource provider aggregate set`. .. code-block:: console $ openstack --os-compute-api-version=2.53 hypervisor list +--------------------------------------+---------------------+-----------------+-----------------+-------+ | ID | Hypervisor Hostname | Hypervisor Type | Host IP | State | +--------------------------------------+---------------------+-----------------+-----------------+-------+ | 815a5634-86fb-4e1e-8824-8a631fee3e06 | node1 | QEMU | 192.168.1.123 | up | +--------------------------------------+---------------------+-----------------+-----------------+-------+ $ openstack --os-placement-api-version=1.2 resource provider aggregate set \ --aggregate df4c74f3-d2c4-4991-b461-f1a678e1d161 \ 815a5634-86fb-4e1e-8824-8a631fee3e06 Some scheduling filter operations can be performed by placement for increased speed and efficiency. .. note:: The nova-api service attempts (as of nova 18.0.0) to automatically mirror the association of a compute host with an aggregate when an administrator adds or removes a host to/from a nova host aggregate. This should alleviate the need to manually create those association records in the placement API using the ``openstack resource provider aggregate set`` CLI invocation. .. _tenant-isolation-with-placement: Tenant Isolation with Placement ------------------------------- In order to use placement to isolate tenants, there must be placement aggregates that match the membership and UUID of nova host aggregates that you want to use for isolation. The same key pattern in aggregate metadata used by the :ref:`AggregateMultiTenancyIsolation` filter controls this function, and is enabled by setting :oslo.config:option:`scheduler.limit_tenants_to_placement_aggregate` to ``True``. .. code-block:: console $ openstack --os-compute-api-version=2.53 aggregate create myagg +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | availability_zone | None | | created_at | 2018-03-29T16:22:23.175884 | | deleted | False | | deleted_at | None | | id | 4 | | name | myagg | | updated_at | None | | uuid | 019e2189-31b3-49e1-aff2-b220ebd91c24 | +-------------------+--------------------------------------+ $ openstack --os-compute-api-version=2.53 aggregate add host myagg node1 +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | availability_zone | None | | created_at | 2018-03-29T16:22:23.175884 | | deleted | False | | deleted_at | None | | hosts | [u'node1'] | | id | 4 | | name | myagg | | updated_at | None | | uuid | 019e2189-31b3-49e1-aff2-b220ebd91c24 | +-------------------+--------------------------------------+ $ openstack project list -f value | grep 'demo' 9691591f913949818a514f95286a6b90 demo $ openstack aggregate set \ --property filter_tenant_id=9691591f913949818a514f95286a6b90 myagg $ openstack --os-placement-api-version=1.2 resource provider aggregate set \ --aggregate 019e2189-31b3-49e1-aff2-b220ebd91c24 \ 815a5634-86fb-4e1e-8824-8a631fee3e06 Note that the ``filter_tenant_id`` metadata key can be optionally suffixed with any string for multiple tenants, such as ``filter_tenant_id3=$tenantid``. Usage ----- Much of the configuration of host aggregates is driven from the API or command-line clients. For example, to create a new aggregate and add hosts to it using the :command:`openstack` client, run: .. code-block:: console $ openstack aggregate create my-aggregate $ openstack aggregate add host my-aggregate my-host To list all aggregates and show information about a specific aggregate, run: .. code-block:: console $ openstack aggregate list $ openstack aggregate show my-aggregate To set and unset a property on the aggregate, run: .. code-block:: console $ openstack aggregate set --property pinned=true my-aggregrate $ openstack aggregate unset --property pinned my-aggregate To rename the aggregate, run: .. code-block:: console $ openstack aggregate set --name my-awesome-aggregate my-aggregate To remove a host from an aggregate and delete the aggregate, run: .. code-block:: console $ openstack aggregate remove host my-aggregate my-host $ openstack aggregate delete my-aggregate For more information, refer to the :python-openstackclient-doc:`OpenStack Client documentation `. Configuration ------------- In addition to CRUD operations enabled by the API and clients, the following configuration options can be used to configure how host aggregates and the related availability zones feature operate under the hood: - :oslo.config:option:`default_schedule_zone` - :oslo.config:option:`scheduler.limit_tenants_to_placement_aggregate` - :oslo.config:option:`cinder.cross_az_attach` Finally, as discussed previously, there are a number of host aggregate-specific scheduler filters. These are: - :ref:`AggregateImagePropertiesIsolation` - :ref:`AggregateInstanceExtraSpecsFilter` - :ref:`AggregateIoOpsFilter` - :ref:`AggregateMultiTenancyIsolation` - :ref:`AggregateNumInstancesFilter` - :ref:`AggregateTypeAffinityFilter` The following configuration options are applicable to the scheduler configuration: - :oslo.config:option:`cpu_allocation_ratio` - :oslo.config:option:`ram_allocation_ratio` - :oslo.config:option:`filter_scheduler.max_instances_per_host` - :oslo.config:option:`filter_scheduler.aggregate_image_properties_isolation_separator` - :oslo.config:option:`filter_scheduler.aggregate_image_properties_isolation_namespace` .. _image-caching-aggregates: Image Caching ------------- Aggregates can be used as a way to target multiple compute nodes for the purpose of requesting that images be pre-cached for performance reasons. .. note:: `Some of the virt drivers`_ provide image caching support, which improves performance of second-and-later boots of the same image by keeping the base image in an on-disk cache. This avoids the need to re-download the image from Glance, which reduces network utilization and time-to-boot latency. Image pre-caching is the act of priming that cache with images ahead of time to improve performance of the first boot. .. _Some of the virt drivers: https://docs.openstack.org/nova/latest/user/support-matrix.html#operation_cache_images Assuming an aggregate called ``my-aggregate`` where two images should be pre-cached, running the following command will initiate the request: .. code-block:: console $ nova aggregate-cache-images my-aggregate image1 image2 Note that image pre-caching happens asynchronously in a best-effort manner. The images and aggregate provided are checked by the server when the command is run, but the compute nodes are not checked to see if they support image caching until the process runs. Progress and results are logged by each compute, and the process sends ``aggregate.cache_images.start``, ``aggregate.cache_images.progress``, and ``aggregate.cache_images.end`` notifications, which may be useful for monitoring the operation externally. References ---------- - `Curse your bones, Availability Zones! (Openstack Summit Vancouver 2018) `__ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/architecture.rst0000664000175000017500000003247600000000000021147 0ustar00zuulzuul00000000000000======================== Nova System Architecture ======================== Nova comprises multiple server processes, each performing different functions. The user-facing interface is a REST API, while internally Nova components communicate via an RPC message passing mechanism. The API servers process REST requests, which typically involve database reads/writes, optionally sending RPC messages to other Nova services, and generating responses to the REST calls. RPC messaging is done via the **oslo.messaging** library, an abstraction on top of message queues. Nova uses a messaging-based, "shared nothing" architecture and most of the major nova components can be run on multiple servers, and have a manager that is listening for RPC messages. The one major exception is the compute service, where a single process runs on the hypervisor it is managing (except when using the VMware or Ironic drivers). The manager also, optionally, has periodic tasks. For more details on our RPC system, refer to :doc:`/reference/rpc`. Nova uses traditional SQL databases to store information. These are (logically) shared between multiple components. To aid upgrade, the database is accessed through an object layer that ensures an upgraded control plane can still communicate with a compute nodes running the previous release. To make this possible, services running on the compute node proxy database requests over RPC to a central manager called the conductor. To horizontally expand Nova deployments, we have a deployment sharding concept called :term:`cells `. All deployments contain at least one cell. For more information, refer to :doc:`/admin/cells`. Components ---------- Below you will find a helpful explanation of the key components of a typical Nova deployment. .. image:: /_static/images/architecture.svg :width: 100% * **DB**: SQL database for data storage. * **API**: Component that receives HTTP requests, converts commands and communicates with other components via the **oslo.messaging** queue or HTTP. * **Scheduler**: Decides which host gets each instance. * **Compute**: Manages communication with hypervisor and virtual machines. * **Conductor**: Handles requests that need coordination (build/resize), acts as a database proxy, or handles object conversions. * **:placement-doc:`Placement <>`**: Tracks resource provider inventories and usages. While all services are designed to be horizontally scalable, you should have significantly more computes than anything else. Hypervisors ----------- Nova controls hypervisors through an API server. Selecting the best hypervisor to use can be difficult, and you must take budget, resource constraints, supported features, and required technical specifications into account. However, the majority of OpenStack development is done on systems using KVM-based hypervisors. For a detailed list of features and support across different hypervisors, see :doc:`/user/support-matrix`. You can also orchestrate clouds using multiple hypervisors in different availability zones. Nova supports the following hypervisors: - :ironic-doc:`Baremetal <>` - `Kernel-based Virtual Machine (KVM) `__ - `Linux Containers (LXC) `__ - `Quick Emulator (QEMU) `__ - `Virtuozzo `__ - `VMware vSphere `__ - `zVM `__ For more information about hypervisors, see :doc:`/admin/configuration/hypervisors` section in the Nova Configuration Reference. Projects, users, and roles -------------------------- To begin using Nova, you must create a user with the :keystone-doc:`Identity service <>`. The Nova system is designed to be used by different consumers in the form of projects on a shared system, and role-based access assignments. Roles control the actions that a user is allowed to perform. Projects are isolated resource containers that form the principal organizational structure within the Nova service. They typically consist of networks, volumes, instances, images, keys, and users. A user can specify the project by appending ``project_id`` to their access key. For projects, you can use quota controls to limit the number of processor cores and the amount of RAM that can be allocated. Other projects also allow quotas on their own resources. For example, :neutron-doc:`neutron
` allows you to manage the amount of networks that can be created within a project. Roles control the actions a user is allowed to perform. By default, most actions do not require a particular role, but you can configure them by editing the ``policy.yaml`` file for user roles. For example, a rule can be defined so that a user must have the ``admin`` role in order to be able to allocate a public IP address. A project limits users' access to particular images. Each user is assigned a user name and password. Keypairs granting access to an instance are enabled for each user, but quotas are set, so that each project can control resource consumption across available hardware resources. .. note:: Earlier versions of OpenStack used the term ``tenant`` instead of ``project``. Because of this legacy terminology, some command-line tools use ``--tenant_id`` where you would normally expect to enter a project ID. Block storage ------------- OpenStack provides two classes of block storage: storage that is provisioned by Nova itself, and storage that is managed by the block storage service, Cinder. .. rubric:: Nova-provisioned block storage Nova provides the ability to create a root disk and an optional "ephemeral" volume. The root disk will always be present unless the instance is a :term:`Boot From Volume` instance. The root disk is associated with an instance, and exists only for the life of this very instance. Generally, it is used to store an instance's root file system, persists across the guest operating system reboots, and is removed on an instance deletion. The amount of the root ephemeral volume is defined by the flavor of an instance. In addition to the root volume, flavors can provide an additional ephemeral block device. It is represented as a raw block device with no partition table or file system. A cloud-aware operating system can discover, format, and mount such a storage device. Nova defines the default file system for different operating systems as ext4 for Linux distributions, VFAT for non-Linux and non-Windows operating systems, and NTFS for Windows. However, it is possible to configure other filesystem types. .. note:: For example, the ``cloud-init`` package included into an Ubuntu's stock cloud image, by default, formats this space as an ext4 file system and mounts it on ``/mnt``. This is a cloud-init feature, and is not an OpenStack mechanism. OpenStack only provisions the raw storage. .. rubric:: Cinder-provisioned block storage The OpenStack Block Storage service, Cinder, provides persistent volumes that are represented by a persistent virtualized block device independent of any particular instance. Persistent volumes can be accessed by a single instance or attached to multiple instances. This type of configuration requires a traditional network file system to allow multiple instances accessing the persistent volume. It also requires a traditional network file system like NFS, CIFS, or a cluster file system such as Ceph. These systems can be built within an OpenStack cluster, or provisioned outside of it, but OpenStack software does not provide these features. You can configure a persistent volume as bootable and use it to provide a persistent virtual instance similar to the traditional non-cloud-based virtualization system. It is still possible for the resulting instance to keep ephemeral storage, depending on the flavor selected. In this case, the root file system can be on the persistent volume, and its state is maintained, even if the instance is shut down. For more information about this type of configuration, see :cinder-doc:`Introduction to the Block Storage service `. Building blocks --------------- In OpenStack the base operating system is usually copied from an image stored in the OpenStack Image service, glance. This is the most common case and results in an ephemeral instance that starts from a known template state and loses all accumulated states on virtual machine deletion. It is also possible to put an operating system on a persistent volume in the OpenStack Block Storage service. This gives a more traditional persistent system that accumulates states which are preserved on the OpenStack Block Storage volume across the deletion and re-creation of the virtual machine. To get a list of available images on your system, run: .. code-block:: console $ openstack image list +--------------------------------------+-----------------------------+--------+ | ID | Name | Status | +--------------------------------------+-----------------------------+--------+ | aee1d242-730f-431f-88c1-87630c0f07ba | Ubuntu 14.04 cloudimg amd64 | active | | 0b27baa1-0ca6-49a7-b3f4-48388e440245 | Ubuntu 14.10 cloudimg amd64 | active | | df8d56fc-9cea-4dfd-a8d3-28764de3cb08 | jenkins | active | +--------------------------------------+-----------------------------+--------+ The displayed image attributes are: ``ID`` Automatically generated UUID of the image ``Name`` Free form, human-readable name for image ``Status`` The status of the image. Images marked ``ACTIVE`` are available for use. ``Server`` For images that are created as snapshots of running instances, this is the UUID of the instance the snapshot derives from. For uploaded images, this field is blank. Virtual hardware templates are called ``flavors``. By default, these are configurable by admin users, however, that behavior can be changed by redefining the access controls ``policy.yaml`` on the ``nova-api`` server. For more information, refer to :doc:`/configuration/policy`. For a list of flavors that are available on your system: .. code-block:: console $ openstack flavor list +-----+-----------+-------+------+-----------+-------+-----------+ | ID | Name | RAM | Disk | Ephemeral | VCPUs | Is_Public | +-----+-----------+-------+------+-----------+-------+-----------+ | 1 | m1.tiny | 512 | 1 | 0 | 1 | True | | 2 | m1.small | 2048 | 20 | 0 | 1 | True | | 3 | m1.medium | 4096 | 40 | 0 | 2 | True | | 4 | m1.large | 8192 | 80 | 0 | 4 | True | | 5 | m1.xlarge | 16384 | 160 | 0 | 8 | True | +-----+-----------+-------+------+-----------+-------+-----------+ Nova service architecture ------------------------- These basic categories describe the service architecture and information about the cloud controller. .. rubric:: API server At the heart of the cloud framework is an API server, which makes command and control of the hypervisor, storage, and networking programmatically available to users. The API endpoints are basic HTTP web services which handle authentication, authorization, and basic command and control functions using various API interfaces under the Amazon, Rackspace, and related models. This enables API compatibility with multiple existing tool sets created for interaction with offerings from other vendors. This broad compatibility prevents vendor lock-in. .. rubric:: Message queue A messaging queue brokers the interaction between compute nodes (processing), the networking controllers (software which controls network infrastructure), API endpoints, the scheduler (determines which physical hardware to allocate to a virtual resource), and similar components. Communication to and from the cloud controller is handled by HTTP requests through multiple API endpoints. A typical message passing event begins with the API server receiving a request from a user. The API server authenticates the user and ensures that they are permitted to issue the subject command. The availability of objects implicated in the request is evaluated and, if available, the request is routed to the queuing engine for the relevant workers. Workers continually listen to the queue based on their role, and occasionally their type host name. When an applicable work request arrives on the queue, the worker takes assignment of the task and begins executing it. Upon completion, a response is dispatched to the queue which is received by the API server and relayed to the originating user. Database entries are queried, added, or removed as necessary during the process. .. rubric:: Compute worker Compute workers manage computing instances on host machines. The API dispatches commands to compute workers to complete these tasks: - Run instances - Delete instances (Terminate instances) - Reboot instances - Attach volumes - Detach volumes - Get console output .. rubric:: Network Controller The Network Controller manages the networking resources on host machines. The API server dispatches commands through the message queue, which are subsequently processed by Network Controllers. Specific operations include: - Allocating fixed IP addresses - Configuring VLANs for projects - Configuring networks for compute nodes ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/availability-zones.rst0000664000175000017500000003150500000000000022263 0ustar00zuulzuul00000000000000================== Availability Zones ================== .. note:: This section provides deployment and admin-user usage information about the availability zone feature. For end-user information about availability zones, refer to the :doc:`user guide
`. Availability Zones are an end-user visible logical abstraction for partitioning a cloud without knowing the physical infrastructure. They can be used to partition a cloud on arbitrary factors, such as location (country, datacenter, rack), network layout and/or power source. .. note:: Availability Zones should not be assumed to map to fault domains and provide no intrinsic HA benefit by themselves. Availability zones are not modeled in the database; rather, they are defined by attaching specific metadata information to an :doc:`aggregate ` The addition of this specific metadata to an aggregate makes the aggregate visible from an end-user perspective and consequently allows users to schedule instances to a specific set of hosts, the ones belonging to the aggregate. There are a few additional differences to note when comparing availability zones and host aggregates: - A host can be part of multiple aggregates but it can only be in one availability zone. - By default a host is part of a default availability zone even if it doesn't belong to an aggregate. The name of this default availability zone can be configured using :oslo.config:option:`default_availability_zone` config option. .. warning:: The use of the default availability zone name in requests can be very error-prone. Since the user can see the list of availability zones, they have no way to know whether the default availability zone name (currently ``nova``) is provided because a host belongs to an aggregate whose AZ metadata key is set to ``nova``, or because there is at least one host not belonging to any aggregate. Consequently, it is highly recommended for users to never ever ask for booting an instance by specifying an explicit AZ named ``nova`` and for operators to never set the AZ metadata for an aggregate to ``nova``. This can result is some problems due to the fact that the instance AZ information is explicitly attached to ``nova`` which could break further move operations when either the host is moved to another aggregate or when the user would like to migrate the instance. .. note:: Availability zone names must NOT contain ``:`` since it is used by admin users to specify hosts where instances are launched in server creation. See `Using availability zones to select hosts`_ for more information. .. note:: It is not allowed to move instances between Availability Zones. If adding a host to an aggregate or removing a host from an aggregate would cause an instance to move between Availability Zones, including moving from or moving to the default AZ, then the operation will be rejected. The administrator should drain the instances from the host first then the host can be moved. In addition, other services, such as the :neutron-doc:`networking service <>` and the :cinder-doc:`block storage service <>`, also provide an availability zone feature. However, the implementation of these features differs vastly between these different services. Consult the documentation for these other services for more information on their implementation of this feature. .. _availability-zones-with-placement: Availability Zones with Placement --------------------------------- In order to use placement to honor availability zone requests, there must be placement aggregates that match the membership and UUID of nova host aggregates that you assign as availability zones. An aggregate metadata key is used to controls this function. As of 28.0.0 (Bobcat), this is the only way to schedule instances to availability-zones. .. code-block:: console $ openstack --os-compute-api-version=2.53 aggregate create myaz +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | availability_zone | None | | created_at | 2018-03-29T16:22:23.175884 | | deleted | False | | deleted_at | None | | id | 4 | | name | myaz | | updated_at | None | | uuid | 019e2189-31b3-49e1-aff2-b220ebd91c24 | +-------------------+--------------------------------------+ $ openstack --os-compute-api-version=2.53 aggregate add host myaz node1 +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | availability_zone | None | | created_at | 2018-03-29T16:22:23.175884 | | deleted | False | | deleted_at | None | | hosts | [u'node1'] | | id | 4 | | name | myagg | | updated_at | None | | uuid | 019e2189-31b3-49e1-aff2-b220ebd91c24 | +-------------------+--------------------------------------+ $ openstack aggregate set --property availability_zone=az002 myaz Implications for moving servers ------------------------------- There are several ways to move a server to another host: evacuate, resize, cold migrate, live migrate, and unshelve. Move operations typically go through the scheduler to pick the target host. Prior to API microversion 2.68, using older openstackclient (pre-5.5.0) and novaclient, it was possible to specify a target host and the request forces the server to that host by bypassing the scheduler. Only evacuate and live migrate can forcefully bypass the scheduler and move a server to specified host and even then it is highly recommended to *not* force and bypass a scheduler. - live migrate with force host (works with older openstackclients(pre-5.5.0): .. code-block:: console $ openstack server migrate --live - live migrate without forcing: .. code-block:: console $ openstack server migrate --live-migration --host While support for 'server evacuate' command to openstackclient was added in 5.5.3 and there it never exposed ability to force an evacuation, but it was previously possible with novaclient. - evacuate with force host: .. code-block:: console $ nova evacuate --force - evacuate without forcing using novaclient: .. code-block:: console $ nova evacuate - evacuate without forcing using openstackclient: .. code-block:: console $ openstack server evacuate --host With respect to availability zones, a server is restricted to a zone if: 1. The server was created in a specific zone with the ``POST /servers`` request containing the ``availability_zone`` parameter. 2. If the server create request did not contain the ``availability_zone`` parameter but the API service is configured for :oslo.config:option:`default_schedule_zone` then by default the server will be scheduled to that zone. 3. The shelved offloaded server was unshelved by specifying the ``availability_zone`` with the ``POST /servers/{server_id}/action`` request using microversion 2.77 or greater. 4. :oslo.config:option:`cinder.cross_az_attach` is False, :oslo.config:option:`default_schedule_zone` is None, the server is created without an explicit zone but with pre-existing volume block device mappings. In that case the server will be created in the same zone as the volume(s) if the volume zone is not the same as :oslo.config:option:`default_availability_zone`. See `Resource affinity`_ for details. If the server was not created in a specific zone then it is free to be moved to other zones. Resource affinity ~~~~~~~~~~~~~~~~~ The :oslo.config:option:`cinder.cross_az_attach` configuration option can be used to restrict servers and the volumes attached to servers to the same availability zone. A typical use case for setting ``cross_az_attach=False`` is to enforce compute and block storage affinity, for example in a High Performance Compute cluster. By default ``cross_az_attach`` is True meaning that the volumes attached to a server can be in a different availability zone than the server. If set to False, then when creating a server with pre-existing volumes or attaching a volume to a server, the server and volume zone must match otherwise the request will fail. In addition, if the nova-compute service creates the volumes to attach to the server during server create, it will request that those volumes are created in the same availability zone as the server, which must exist in the block storage (cinder) service. As noted in the `Implications for moving servers`_ section, forcefully moving a server to another zone could also break affinity with attached volumes. .. note:: ``cross_az_attach=False`` is not widely used nor tested extensively and thus suffers from some known issues: * `Bug 1694844 `_. This is fixed in the 21.0.0 (Ussuri) release by using the volume zone for the server being created if the server is created without an explicit zone, :oslo.config:option:`default_schedule_zone` is None, and the volume zone does not match the value of :oslo.config:option:`default_availability_zone`. * `Bug 1781421 `_ .. _using-availability-zones-to-select-hosts: Using availability zones to select hosts ---------------------------------------- We can combine availability zones with a specific host and/or node to select where an instance is launched. For example: .. code-block:: console $ openstack server create --availability-zone ZONE:HOST:NODE ... SERVER .. note:: It is possible to use ``ZONE``, ``ZONE:HOST``, and ``ZONE::NODE``. .. note:: This is an admin-only operation by default, though you can modify this behavior using the ``os_compute_api:servers:create:forced_host`` rule in ``policy.yaml``. However, as discussed `previously `_, when launching instances in this manner the scheduler filters are not run. For this reason, this behavior is considered legacy behavior and, starting with the 2.74 microversion, it is now possible to specify a host or node explicitly. For example: .. code-block:: console $ openstack --os-compute-api-version 2.74 server create \ --host HOST --hypervisor-hostname HYPERVISOR ... SERVER .. note:: This is an admin-only operation by default, though you can modify this behavior using the ``compute:servers:create:requested_destination`` rule in ``policy.yaml``. This avoids the need to explicitly select an availability zone and ensures the scheduler filters are not bypassed. Usage ----- Creating an availability zone (AZ) is done by associating metadata with a :doc:`host aggregate `. For this reason, the :command:`openstack` client provides the ability to create a host aggregate and associate it with an AZ in one command. For example, to create a new aggregate, associating it with an AZ in the process, and add host to it using the :command:`openstack` client, run: .. code-block:: console $ openstack aggregate create --zone my-availability-zone my-aggregate $ openstack aggregate add host my-aggregate my-host .. note:: While it is possible to add a host to multiple host aggregates, it is not possible to add them to multiple availability zones. Attempting to add a host to multiple host aggregates associated with differing availability zones will result in a failure. Alternatively, you can set this metadata manually for an existing host aggregate. For example: .. code-block:: console $ openstack aggregate set \ --property availability_zone=my-availability-zone my-aggregate To list all host aggregates and show information about a specific aggregate, in order to determine which AZ the host aggregate(s) belong to, run: .. code-block:: console $ openstack aggregate list --long $ openstack aggregate show my-aggregate Finally, to disassociate a host aggregate from an availability zone, run: .. code-block:: console $ openstack aggregate unset --property availability_zone my-aggregate Configuration ------------- Refer to :doc:`/admin/aggregates` for information on configuring both host aggregates and availability zones. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/cells.rst0000664000175000017500000014152000000000000017556 0ustar00zuulzuul00000000000000========== Cells (v2) ========== .. versionadded:: 16.0.0 (Pike) This document describes the layout of a deployment with cells v2, including deployment considerations for security and scale and recommended practices and tips for running and maintaining cells v2 for admins and operators. It is focused on code present in Pike and later, and while it is geared towards people who want to have multiple cells for whatever reason, the nature of the cells v2 support in Nova means that it applies in some way to all deployments. Before reading any further, there is a nice overview presentation_ that Andrew Laski gave at the Austin (Newton) summit which may be worth watching. .. _presentation: https://www.openstack.org/videos/summits/austin-2016/nova-cells-v2-whats-going-on .. note:: Cells v2 is different to the cells feature found in earlier versions of nova, also known as cells v1. Cells v1 was deprecated in 16.0.0 (Pike) and removed entirely in Train (20.0.0). Overview -------- The purpose of the cells functionality in nova is to allow larger deployments to shard their many compute nodes into cells. All nova deployments are by definition cells deployments, even if most will only ever have a single cell. This means a multi-cell deployment will not be radically different from a "standard" nova deployment. Consider such a deployment. It will consists of the following components: - The :program:`nova-api-wsgi` service which provides the external REST API to users. - The :program:`nova-scheduler` and ``placement`` services which are responsible for tracking resources and deciding which compute node instances should be on. - An "API database" that is used primarily by :program:`nova-api-wsgi` and :program:`nova-scheduler` (called *API-level services* below) to track location information about instances, as well as a temporary location for instances being built but not yet scheduled. - The :program:`nova-conductor` service which offloads long-running tasks for the API-level services and insulates compute nodes from direct database access - The :program:`nova-compute` service which manages the virt driver and hypervisor host. - A "cell database" which is used by API, conductor and compute services, and which houses the majority of the information about instances. - A "cell0 database" which is just like the cell database, but contains only instances that failed to be scheduled. This database mimics a regular cell, but has no compute nodes and is used only as a place to put instances that fail to land on a real compute node (and thus a real cell). - A message queue which allows the services to communicate with each other via RPC. In smaller deployments, there will typically be a single message queue that all services share and a single database server which hosts the API database, a single cell database, as well as the required cell0 database. Because we only have one "real" cell, we consider this a "single-cell deployment". In larger deployments, we can opt to shard the deployment using multiple cells. In this configuration there will still only be one global API database but there will be a cell database (where the bulk of the instance information lives) for each cell, each containing a portion of the instances for the entire deployment within, as well as per-cell message queues and per-cell :program:`nova-conductor` instances. There will also be an additional :program:`nova-conductor` instance, known as a *super conductor*, to handle API-level operations. In these larger deployments, each of the nova services will use a cell-specific configuration file, all of which will at a minimum specify a message queue endpoint (i.e. :oslo.config:option:`transport_url`). Most of the services will also contain database connection configuration information (i.e. :oslo.config:option:`database.connection`), while API-level services that need access to the global routing and placement information will also be configured to reach the API database (i.e. :oslo.config:option:`api_database.connection`). .. note:: The pair of :oslo.config:option:`transport_url` and :oslo.config:option:`database.connection` configured for a service defines what cell a service lives in. API-level services need to be able to contact other services in all of the cells. Since they only have one configured :oslo.config:option:`transport_url` and :oslo.config:option:`database.connection`, they look up the information for the other cells in the API database, with records called *cell mappings*. .. note:: The API database must have cell mapping records that match the :oslo.config:option:`transport_url` and :oslo.config:option:`database.connection` configuration options of the lower-level services. See the ``nova-manage`` :ref:`man-page-cells-v2` commands for more information about how to create and examine these records. The following section goes into more detail about the difference between single-cell and multi-cell deployments. Service layout -------------- The services generally have a well-defined communication pattern that dictates their layout in a deployment. In a small/simple scenario, the rules do not have much of an impact as all the services can communicate with each other on a single message bus and in a single cell database. However, as the deployment grows, scaling and security concerns may drive separation and isolation of the services. Single cell ~~~~~~~~~~~ This is a diagram of the basic services that a simple (single-cell) deployment would have, as well as the relationships (i.e. communication paths) between them: .. graphviz:: digraph services { graph [pad="0.35", ranksep="0.65", nodesep="0.55", concentrate=true]; node [fontsize=10 fontname="Monospace"]; edge [arrowhead="normal", arrowsize="0.8"]; labelloc=bottom; labeljust=left; { rank=same api [label="nova-api"] apidb [label="API Database" shape="box"] scheduler [label="nova-scheduler"] } { rank=same mq [label="MQ" shape="diamond"] conductor [label="nova-conductor"] } { rank=same cell0db [label="Cell0 Database" shape="box"] celldb [label="Cell Database" shape="box"] compute [label="nova-compute"] } api -> mq -> compute conductor -> mq -> scheduler api -> apidb api -> cell0db api -> celldb conductor -> apidb conductor -> cell0db conductor -> celldb } All of the services are configured to talk to each other over the same message bus, and there is only one cell database where live instance data resides. The cell0 database is present (and required) but as no compute nodes are connected to it, this is still a "single cell" deployment. Multiple cells ~~~~~~~~~~~~~~ In order to shard the services into multiple cells, a number of things must happen. First, the message bus must be split into pieces along the same lines as the cell database. Second, a dedicated conductor must be run for the API-level services, with access to the API database and a dedicated message queue. We call this *super conductor* to distinguish its place and purpose from the per-cell conductor nodes. .. graphviz:: digraph services2 { graph [pad="0.35", ranksep="0.65", nodesep="0.55", concentrate=true]; node [fontsize=10 fontname="Monospace"]; edge [arrowhead="normal", arrowsize="0.8"]; labelloc=bottom; labeljust=left; subgraph api { api [label="nova-api"] scheduler [label="nova-scheduler"] conductor [label="super conductor"] { rank=same apimq [label="API MQ" shape="diamond"] apidb [label="API Database" shape="box"] } api -> apimq -> conductor api -> apidb conductor -> apimq -> scheduler conductor -> apidb } subgraph clustercell0 { label="Cell 0" color=green cell0db [label="Cell Database" shape="box"] } subgraph clustercell1 { label="Cell 1" color=blue mq1 [label="Cell MQ" shape="diamond"] cell1db [label="Cell Database" shape="box"] conductor1 [label="nova-conductor"] compute1 [label="nova-compute"] conductor1 -> mq1 -> compute1 conductor1 -> cell1db } subgraph clustercell2 { label="Cell 2" color=red mq2 [label="Cell MQ" shape="diamond"] cell2db [label="Cell Database" shape="box"] conductor2 [label="nova-conductor"] compute2 [label="nova-compute"] conductor2 -> mq2 -> compute2 conductor2 -> cell2db } api -> mq1 -> conductor1 api -> mq2 -> conductor2 api -> cell0db api -> cell1db api -> cell2db conductor -> cell0db conductor -> cell1db conductor -> mq1 conductor -> cell2db conductor -> mq2 } It is important to note that services in the lower cell boxes only have the ability to call back to the placement API but cannot access any other API-layer services via RPC, nor do they have access to the API database for global visibility of resources across the cloud. This is intentional and provides security and failure domain isolation benefits, but also has impacts on some things that would otherwise require this any-to-any communication style. Check :ref:`upcall` below for the most up-to-date information about any caveats that may be present due to this limitation. Database layout --------------- As mentioned previously, there is a split between global data and data that is local to a cell. These databases schema are referred to as the *API* and *main* database schemas, respectively. API database ~~~~~~~~~~~~ The API database is the database used for API-level services, such as :program:`nova-api-wsgi` and, in a multi-cell deployment, the superconductor. The models and migrations related to this database can be found in ``nova.db.api``, and the database can be managed using the :program:`nova-manage api_db` commands. Main (cell-level) database ~~~~~~~~~~~~~~~~~~~~~~~~~~ The main database is the database used for cell-level :program:`nova-conductor` instances. The models and migrations related to this database can be found in ``nova.db.main``, and the database can be managed using the :program:`nova-manage db` commands. Usage ----- As noted previously, all deployments are in effect now cells v2 deployments. As a result, setup of any nova deployment - even those that intend to only have one cell - will involve some level of cells configuration. These changes are configuration-related, both in the main nova configuration file as well as some extra records in the databases. All nova deployments must now have the following databases available and configured: 1. The "API" database 2. One special "cell" database called "cell0" 3. One (or eventually more) "cell" databases Thus, a small nova deployment will have an API database, a cell0, and what we will call here a "cell1" database. High-level tracking information is kept in the API database. Instances that are never scheduled are relegated to the cell0 database, which is effectively a graveyard of instances that failed to start. All successful/running instances are stored in "cell1". .. note:: Since Nova services make use of both configuration file and some databases records, starting or restarting those services with an incomplete configuration could lead to an incorrect deployment. Only restart the services once you are done with the described steps below. .. note:: The following examples show the full expanded command line usage of the setup commands. This is to make it easier to visualize which of the various URLs are used by each of the commands. However, you should be able to put all of that in the config file and :program:`nova-manage` will use those values. If need be, you can create separate config files and pass them as ``nova-manage --config-file foo.conf`` to control the behavior without specifying things on the command lines. Configuring a new deployment ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If you are installing Nova for the first time and have no compute hosts in the database yet then it will be necessary to configure cell0 and at least one additional "real" cell. To begin, ensure your API database schema has been populated using the :program:`nova-manage api_db sync` command. Ensure the connection information for this database is stored in the ``nova.conf`` file using the :oslo.config:option:`api_database.connection` config option: .. code-block:: ini [api_database] connection = mysql+pymysql://root:secretmysql@dbserver/nova_api?charset=utf8 Since there may be multiple "cell" databases (and in fact everyone will have cell0 and cell1 at a minimum), connection info for these is stored in the API database. Thus, the API database must exist and must provide information on how to connect to it before continuing to the steps below, so that :program:`nova-manage` can find your other databases. Next, we will create the necessary records for the cell0 database. To do that we will first use :program:`nova-manage cell_v2 map_cell0` to create and map cell0. For example: .. code-block:: bash $ nova-manage cell_v2 map_cell0 \ --database_connection mysql+pymysql://root:secretmysql@dbserver/nova_cell0?charset=utf8 .. note:: If you don't specify ``--database_connection`` then the commands will use the :oslo.config:option:`database.connection` value from your config file and mangle the database name to have a ``_cell0`` suffix .. warning:: If your databases are on separate hosts then you should specify ``--database_connection`` or make certain that the :file:`nova.conf` being used has the :oslo.config:option:`database.connection` value pointing to the same user/password/host that will work for the cell0 database. If the cell0 mapping was created incorrectly, it can be deleted using the :program:`nova-manage cell_v2 delete_cell` command before running :program:`nova-manage cell_v2 map_cell0` again with the proper database connection value. We will then use :program:`nova-manage db sync` to apply the database schema to this new database. For example: .. code-block:: bash $ nova-manage db sync \ --database_connection mysql+pymysql://root:secretmysql@dbserver/nova_cell0?charset=utf8 Since no hosts are ever in cell0, nothing further is required for its setup. Note that all deployments only ever have one cell0, as it is special, so once you have done this step you never need to do it again, even if you add more regular cells. Now, we must create another cell which will be our first "regular" cell, which has actual compute hosts in it, and to which instances can actually be scheduled. First, we create the cell record using :program:`nova-manage cell_v2 create_cell`. For example: .. code-block:: bash $ nova-manage cell_v2 create_cell \ --name cell1 \ --database_connection mysql+pymysql://root:secretmysql@127.0.0.1/nova?charset=utf8 \ --transport-url rabbit://stackrabbit:secretrabbit@mqserver:5672/ .. note:: If you don't specify the database and transport urls then :program:`nova-manage` will use the :oslo.config:option:`transport_url` and :oslo.config:option:`database.connection` values from the config file. .. note:: It is a good idea to specify a name for the new cell you create so you can easily look up cell UUIDs with the :program:`nova-manage cell_v2 list_cells` command later if needed. .. note:: The :program:`nova-manage cell_v2 create_cell` command will print the UUID of the newly-created cell if ``--verbose`` is passed, which is useful if you need to run commands like :program:`nova-manage cell_v2 discover_hosts` targeted at a specific cell. At this point, the API database can now find the cell database, and further commands will attempt to look inside. If this is a completely fresh database (such as if you're adding a cell, or if this is a new deployment), then you will need to run :program:`nova-manage db sync` on it to initialize the schema. Now we have a cell, but no hosts are in it which means the scheduler will never actually place instances there. The next step is to scan the database for compute node records and add them into the cell we just created. For this step, you must have had a compute node started such that it registers itself as a running service. You can identify this using the :program:`openstack compute service list` command: .. code-block:: bash $ openstack compute service list --service nova-compute Once that has happened, you can scan and add it to the cell using the :program:`nova-manage cell_v2 discover_hosts` command: .. code-block:: bash $ nova-manage cell_v2 discover_hosts This command will connect to any databases for which you have created cells (as above), look for hosts that have registered themselves there, and map those hosts in the API database so that they are visible to the scheduler as available targets for instances. Any time you add more compute hosts to a cell, you need to re-run this command to map them from the top-level so they can be utilized. You can also configure a periodic task to have Nova discover new hosts automatically by setting the :oslo.config:option:`scheduler.discover_hosts_in_cells_interval` to a time interval in seconds. The periodic task is run by the :program:`nova-scheduler` service, so you must be sure to configure it on all of your :program:`nova-scheduler` hosts. .. note:: In the future, whenever you add new compute hosts, you will need to run the :program:`nova-manage cell_v2 discover_hosts` command after starting them to map them to the cell if you did not configure automatic host discovery using :oslo.config:option:`scheduler.discover_hosts_in_cells_interval`. Adding a new cell to an existing deployment ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You can add additional cells to your deployment using the same steps used above to create your first cell. We can create a new cell record using :program:`nova-manage cell_v2 create_cell`. For example: .. code-block:: bash $ nova-manage cell_v2 create_cell \ --name cell2 \ --database_connection mysql+pymysql://root:secretmysql@127.0.0.1/nova?charset=utf8 \ --transport-url rabbit://stackrabbit:secretrabbit@mqserver:5672/ .. note:: If you don't specify the database and transport urls then :program:`nova-manage` will use the :oslo.config:option:`transport_url` and :oslo.config:option:`database.connection` values from the config file. .. note:: It is a good idea to specify a name for the new cell you create so you can easily look up cell UUIDs with the :program:`nova-manage cell_v2 list_cells` command later if needed. .. note:: The :program:`nova-manage cell_v2 create_cell` command will print the UUID of the newly-created cell if ``--verbose`` is passed, which is useful if you need to run commands like :program:`nova-manage cell_v2 discover_hosts` targeted at a specific cell. You can repeat this step for each cell you wish to add to your deployment. Your existing cell database will be reused - this simply informs the top-level API database about your existing cell databases. Once you've created your new cell, use :program:`nova-manage cell_v2 discover_hosts` to map compute hosts to cells. This is only necessary if you haven't enabled automatic discovery using the :oslo.config:option:`scheduler.discover_hosts_in_cells_interval` option. For example: .. code-block:: bash $ nova-manage cell_v2 discover_hosts .. note:: This command will search for compute hosts in each cell database and map them to the corresponding cell. This can be slow, particularly for larger deployments. You may wish to specify the ``--cell_uuid`` option, which will limit the search to a specific cell. You can use the :program:`nova-manage cell_v2 list_cells` command to look up cell UUIDs if you are going to specify ``--cell_uuid``. Finally, run the :program:`nova-manage cell_v2 map_instances` command to map existing instances to the new cell(s). For example: .. code-block:: bash $ nova-manage cell_v2 map_instances .. note:: This command will search for instances in each cell database and map them to the correct cell. This can be slow, particularly for larger deployments. You may wish to specify the ``--cell_uuid`` option, which will limit the search to a specific cell. You can use the :program:`nova-manage cell_v2 list_cells` command to look up cell UUIDs if you are going to specify ``--cell_uuid``. .. note:: The ``--max-count`` option can be specified if you would like to limit the number of instances to map in a single run. If ``--max-count`` is not specified, all instances will be mapped. Repeated runs of the command will start from where the last run finished so it is not necessary to increase ``--max-count`` to finish. An exit code of 0 indicates that all instances have been mapped. An exit code of 1 indicates that there are remaining instances that need to be mapped. Template URLs in Cell Mappings ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Starting in the 18.0.0 (Rocky) release, the URLs provided in the cell mappings for ``--database_connection`` and ``--transport-url`` can contain variables which are evaluated each time they are loaded from the database, and the values of which are taken from the corresponding base options in the host's configuration file. The base URL is parsed and the following elements may be substituted into the cell mapping URL (using ``rabbit://bob:s3kret@myhost:123/nova?sync=true#extra``): .. list-table:: Cell Mapping URL Variables :header-rows: 1 :widths: 15, 50, 15 * - Variable - Meaning - Part of example URL * - ``scheme`` - The part before the ``://`` - ``rabbit`` * - ``username`` - The username part of the credentials - ``bob`` * - ``password`` - The password part of the credentials - ``s3kret`` * - ``hostname`` - The hostname or address - ``myhost`` * - ``port`` - The port number (must be specified) - ``123`` * - ``path`` - The "path" part of the URL (without leading slash) - ``nova`` * - ``query`` - The full query string arguments (without leading question mark) - ``sync=true`` * - ``fragment`` - Everything after the first hash mark - ``extra`` Variables are provided in curly brackets, like ``{username}``. A simple template of ``rabbit://{username}:{password}@otherhost/{path}`` will generate a full URL of ``rabbit://bob:s3kret@otherhost/nova`` when used with the above example. .. note:: The :oslo.config:option:`database.connection` and :oslo.config:option:`transport_url` values are not reloaded from the configuration file during a SIGHUP, which means that a full service restart will be required to notice changes in a cell mapping record if variables are changed. .. note:: The :oslo.config:option:`transport_url` option can contain an extended syntax for the "netloc" part of the URL (i.e. ``userA:passwordA@hostA:portA,userB:passwordB@hostB:portB``). In this case, substitutions of the form ``username1``, ``username2``, etc will be honored and can be used in the template URL. The templating of these URLs may be helpful in order to provide each service host with its own credentials for, say, the database. Without templating, all hosts will use the same URL (and thus credentials) for accessing services like the database and message queue. By using a URL with a template that results in the credentials being taken from the host-local configuration file, each host will use different values for those connections. Assuming you have two service hosts that are normally configured with the cell0 database as their primary connection, their (abbreviated) configurations would look like this: .. code-block:: ini [database] connection = mysql+pymysql://service1:foo@myapidbhost/nova_cell0 and: .. code-block:: ini [database] connection = mysql+pymysql://service2:bar@myapidbhost/nova_cell0 Without cell mapping template URLs, they would still use the same credentials (as stored in the mapping) to connect to the cell databases. However, consider template URLs like the following:: mysql+pymysql://{username}:{password}@mycell1dbhost/nova and:: mysql+pymysql://{username}:{password}@mycell2dbhost/nova Using the first service and cell1 mapping, the calculated URL that will actually be used for connecting to that database will be:: mysql+pymysql://service1:foo@mycell1dbhost/nova Design ------ Prior to the introduction of cells v2, when a request hit the Nova API for a particular instance, the instance information was fetched from the database. The information contained the hostname of the compute node on which the instance was currently located. If the request needed to take action on the instance (which it generally would), the hostname was used to calculate the name of a queue and a message was written there which would eventually find its way to the proper compute node. The meat of the cells v2 feature was to split this hostname lookup into two parts that yielded three pieces of information instead of one. Basically, instead of merely looking up the *name* of the compute node on which an instance was located, we also started obtaining database and queue connection information. Thus, when asked to take action on instance $foo, we now: 1. Lookup the three-tuple of (database, queue, hostname) for that instance 2. Connect to that database and fetch the instance record 3. Connect to the queue and send the message to the proper hostname queue The above differs from the previous organization in two ways. First, we now need to do two database lookups before we know where the instance lives. Second, we need to demand-connect to the appropriate database and queue. Both of these changes had performance implications, but it was possible to mitigate them through the use of things like a memcache of instance mapping information and pooling of connections to database and queue systems. The number of cells will always be much smaller than the number of instances. There were also availability implications with the new feature since something like a instance list which might query multiple cells could end up with a partial result if there is a database failure in a cell. These issues can be mitigated, as discussed in :ref:`handling-cell-failures`. A database failure within a cell would cause larger issues than a partial list result so the expectation is that it would be addressed quickly and cells v2 will handle it by indicating in the response that the data may not be complete. Comparison with cells v1 ------------------------ Prior to the introduction of cells v2, nova had a very similar feature, also called cells or referred to as cells v1 for disambiguation. Cells v2 was an effort to address many of the perceived shortcomings of the cell v1 feature. Benefits of the cells v2 feature over the previous cells v1 feature include: - Native sharding of the database and queue as a first-class-feature in nova. All of the code paths will go through the lookup procedure and thus we won't have the same feature parity issues as we do with current cells. - No high-level replication of all the cell databases at the top. The API will need a database of its own for things like the instance index, but it will not need to replicate all the data at the top level. - It draws a clear line between global and local data elements. Things like flavors and keypairs are clearly global concepts that need only live at the top level. Providing this separation allows compute nodes to become even more stateless and insulated from things like deleted/changed global data. - Existing non-cells users will suddenly gain the ability to spawn a new "cell" from their existing deployment without changing their architecture. Simply adding information about the new database and queue systems to the new index will allow them to consume those resources. - Existing cells users will need to fill out the cells mapping index, shutdown their existing cells synchronization service, and ultimately clean up their top level database. However, since the high-level organization is not substantially different, they will not have to re-architect their systems to move to cells v2. - Adding new sets of hosts as a new "cell" allows them to be plugged into a deployment and tested before allowing builds to be scheduled to them. .. _cells-v2-caveats: Caveats ------- .. note:: Many of these caveats have been addressed since the introduction of cells v2 in the 16.0.0 (Pike) release. These are called out below. Cross-cell move operations ~~~~~~~~~~~~~~~~~~~~~~~~~~ Support for cross-cell cold migration and resize was introduced in the 21.0.0 (Ussuri) release. This is documented in :doc:`/admin/configuration/cross-cell-resize`. Prior to this release, it was not possible to cold migrate or resize an instance from a host in one cell to a host in another cell. It is not currently possible to live migrate, evacuate or unshelve an instance from a host in one cell to a host in another cell. Quota-related quirks ~~~~~~~~~~~~~~~~~~~~ Quotas are now calculated live at the point at which an operation would consume more resource, instead of being kept statically in the database. This means that a multi-cell environment may incorrectly calculate the usage of a tenant if one of the cells is unreachable, as those resources cannot be counted. In this case, the tenant may be able to consume more resource from one of the available cells, putting them far over quota when the unreachable cell returns. .. note:: Starting in the Train (20.0.0) release, it is possible to configure counting of quota usage from the placement service and API database to make quota usage calculations resilient to down or poor-performing cells in a multi-cell environment. See the :doc:`quotas documentation ` for more details. Starting in the 2023.2 Bobcat (28.0.0) release, it is possible to configure unified limits quotas, which stores quota limits as Keystone unified limits and counts quota usage from the placement service and API database. See the :doc:`unified limits documentation ` for more details. Performance of listing instances ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Prior to the 17.0.0 (Queens) release, the instance list operation may not sort and paginate results properly when crossing multiple cell boundaries. Further, the performance of a sorted list operation across multiple cells was considerably slower than with a single cell. This was resolved as part of the `efficient-multi-cell-instance-list-and-sort`__ spec. .. __: https://blueprints.launchpad.net/nova/+spec/efficient-multi-cell-instance-list-and-sort Notifications ~~~~~~~~~~~~~ With a multi-cell environment with multiple message queues, it is likely that operators will want to configure a separate connection to a unified queue for notifications. This can be done in the configuration file of all nodes. Refer to the :oslo.messaging-doc:`oslo.messaging configuration documentation ` for more details. .. _cells-v2-layout-metadata-api: Nova Metadata API service ~~~~~~~~~~~~~~~~~~~~~~~~~ Starting from the 19.0.0 (Stein) release, the :doc:`nova metadata API service ` can be run either globally or per cell using the :oslo.config:option:`api.local_metadata_per_cell` configuration option. .. rubric:: Global If you have networks that span cells, you might need to run Nova metadata API globally. When running globally, it should be configured as an API-level service with access to the :oslo.config:option:`api_database.connection` information. The nova metadata API service **must not** be run as a standalone service, using the :program:`nova-metadata-wsgi` service, in this case. .. rubric:: Local per cell Running Nova metadata API per cell can have better performance and data isolation in a multi-cell deployment. If your networks are segmented along cell boundaries, then you can run Nova metadata API service per cell. If you choose to run it per cell, you should also configure each :neutron-doc:`neutron-metadata-agent ` service to point to the corresponding :program:`nova-metadata-wsgi`. The nova metadata API service **must** be run as a standalone service, using the :program:`nova-metadata-wsgi` service, in this case. Console proxies ~~~~~~~~~~~~~~~ Starting from the 18.0.0 (Rocky) release, console proxies must be run per cell because console token authorizations are stored in cell databases. This means that each console proxy server must have access to the :oslo.config:option:`database.connection` information for the cell database containing the instances for which it is proxying console access. This functionality was added as part of the `convert-consoles-to-objects`__ spec. .. __: https://specs.openstack.org/openstack/nova-specs/specs/rocky/implemented/convert-consoles-to-objects.html .. _upcall: Operations requiring upcalls ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If you deploy multiple cells with a superconductor as described above, computes and cell-based conductors will not have the ability to speak to the scheduler as they are not connected to the same MQ. This is by design for isolation, but currently the processes are not in place to implement some features without such connectivity. Thus, anything that requires a so-called "upcall" will not function. This impacts the following: #. Instance reschedules during boot and resize (part 1) .. note:: This has been resolved in the `Queens release`__. .. __: https://specs.openstack.org/openstack/nova-specs/specs/queens/approved/return-alternate-hosts.html #. Instance affinity reporting from the compute nodes to scheduler #. The late anti-affinity check during server create and evacuate #. Querying host aggregates from the cell .. note:: This has been resolved in the `Rocky release`__. .. __: https://blueprints.launchpad.net/nova/+spec/live-migration-in-xapi-pool #. Attaching a volume and ``[cinder] cross_az_attach = False`` #. Instance reschedules during boot and resize (part 2) .. note:: This has been resolved in the `Ussuri release`__. .. __: https://review.opendev.org/q/topic:bug/1781286 The first is simple: if you boot an instance, it gets scheduled to a compute node, fails, it would normally be re-scheduled to another node. That requires scheduler intervention and thus it will not work in Pike with a multi-cell layout. If you do not rely on reschedules for covering up transient compute-node failures, then this will not affect you. To ensure you do not make futile attempts at rescheduling, you should set :oslo.config:option:`scheduler.max_attempts` to ``1`` in ``nova.conf``. The second two are related. The summary is that some of the facilities that Nova has for ensuring that affinity/anti-affinity is preserved between instances does not function in Pike with a multi-cell layout. If you don't use affinity operations, then this will not affect you. To make sure you don't make futile attempts at the affinity check, you should set :oslo.config:option:`workarounds.disable_group_policy_check_upcall` to ``True`` and :oslo.config:option:`filter_scheduler.track_instance_changes` to ``False`` in ``nova.conf``. The fourth was previously only a problem when performing live migrations using the since-removed XenAPI driver and not specifying ``--block-migrate``. The driver would attempt to figure out if block migration should be performed based on source and destination hosts being in the same aggregate. Since aggregates data had migrated to the API database, the cell conductor would not be able to access the aggregate information and would fail. The fifth is a problem because when a volume is attached to an instance in the *nova-compute* service, and ``[cinder]/cross_az_attach=False`` in nova.conf, we attempt to look up the availability zone that the instance is in which includes getting any host aggregates that the ``instance.host`` is in. Since the aggregates are in the API database and the cell conductor cannot access that information, so this will fail. In the future this check could be moved to the *nova-api* service such that the availability zone between the instance and the volume is checked before we reach the cell, except in the case of :term:`boot from volume ` where the *nova-compute* service itself creates the volume and must tell Cinder in which availability zone to create the volume. Long-term, volume creation during boot from volume should be moved to the top-level superconductor which would eliminate this AZ up-call check problem. The sixth is detailed in `bug 1781286`__ and is similar to the first issue. The issue is that servers created without a specific availability zone will have their AZ calculated during a reschedule based on the alternate host selected. Determining the AZ for the alternate host requires an "up call" to the API DB. .. __: https://bugs.launchpad.net/nova/+bug/1781286 .. _handling-cell-failures: Handling cell failures ---------------------- For an explanation on how ``nova-api`` handles cell failures please see the `Handling Down Cells`__ section of the Compute API guide. Below, you can find some recommended practices and considerations for effectively tolerating cell failure situations. .. __: https://docs.openstack.org/api-guide/compute/down_cells.html Configuration considerations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Since a cell being reachable or not is determined through timeouts, it is suggested to provide suitable values for the following settings based on your requirements. #. :oslo.config:option:`database.max_retries` is 10 by default meaning every time a cell becomes unreachable, it would retry 10 times before nova can declare the cell as a "down" cell. #. :oslo.config:option:`database.retry_interval` is 10 seconds and :oslo.config:option:`oslo_messaging_rabbit.rabbit_retry_interval` is 1 second by default meaning every time a cell becomes unreachable it would retry every 10 seconds or 1 second depending on if it's a database or a message queue problem. #. Nova also has a timeout value called ``CELL_TIMEOUT`` which is hardcoded to 60 seconds and that is the total time the nova-api would wait before returning partial results for the "down" cells. The values of the above settings will affect the time required for nova to decide if a cell is unreachable and then take the necessary actions like returning partial results. The operator can also control the results of certain actions like listing servers and services depending on the value of the :oslo.config:option:`api.list_records_by_skipping_down_cells` config option. If this is true, the results from the unreachable cells will be skipped and if it is false, the request will just fail with an API error in situations where partial constructs cannot be computed. Disabling down cells ~~~~~~~~~~~~~~~~~~~~ While the temporary outage in the infrastructure is being fixed, the affected cells can be disabled so that they are removed from being scheduling candidates. To enable or disable a cell, use :command:`nova-manage cell_v2 update_cell --cell_uuid --disable`. See the :ref:`man-page-cells-v2` man page for details on command usage. Known issues ~~~~~~~~~~~~ 1. **Services and Performance:** In case a cell is down during the startup of nova services, there is the chance that the services hang because of not being able to connect to all the cell databases that might be required for certain calculations and initializations. An example scenario of this situation is if :oslo.config:option:`upgrade_levels.compute` is set to ``auto`` then the ``nova-api`` service hangs on startup if there is at least one unreachable cell. This is because it needs to connect to all the cells to gather information on each of the compute service's version to determine the compute version cap to use. The current workaround is to pin the :oslo.config:option:`upgrade_levels.compute` to a particular version like "rocky" and get the service up under such situations. See `bug 1815697`__ for more details. Also note that in general during situations where cells are not reachable certain "slowness" may be experienced in operations requiring hitting all the cells because of the aforementioned configurable timeout/retry values. .. _cells-counting-quotas: 2. **Counting Quotas:** Another known issue is in the current approach of counting quotas where we query each cell database to get the used resources and aggregate them which makes it sensitive to temporary cell outages. While the cell is unavailable, we cannot count resource usage residing in that cell database and things would behave as though more quota is available than should be. That is, if a tenant has used all of their quota and part of it is in cell A and cell A goes offline temporarily, that tenant will suddenly be able to allocate more resources than their limit (assuming cell A returns, the tenant will have more resources allocated than their allowed quota). .. note:: Starting in the Train (20.0.0) release, it is possible to configure counting of quota usage from the placement service and API database to make quota usage calculations resilient to down or poor-performing cells in a multi-cell environment. See the :doc:`quotas documentation
` for more details. .. __: https://bugs.launchpad.net/nova/+bug/1815697 FAQs ---- - How do I find out which hosts are bound to which cell? There are a couple of ways to do this. #. Run :program:`nova-manage cell_v2 discover_hosts --verbose`. This does not produce a report but if you are trying to determine if a host is in a cell you can run this and it will report any hosts that are not yet mapped to a cell and map them. This command is idempotent. #. Run :program:`nova-manage cell_v2 list_hosts`. This will list hosts in all cells. If you want to list hosts in a specific cell, you can use the ``--cell_uuid`` option. - I updated the ``database_connection`` and/or ``transport_url`` in a cell using the ``nova-manage cell_v2 update_cell`` command but the API is still trying to use the old settings. The cell mappings are cached in the :program:`nova-api-wsgi` service worker so you will need to restart the worker process to rebuild the cache. Note that there is another global cache tied to request contexts, which is used in the nova-conductor and nova-scheduler services, so you might need to do the same if you are having the same issue in those services. As of the 16.0.0 (Pike) release there is no timer on the cache or hook to refresh the cache using a SIGHUP to the service. - I have upgraded from Newton to Ocata and I can list instances but I get a HTTP 404 (NotFound) error when I try to get details on a specific instance. Instances need to be mapped to cells so the API knows which cell an instance lives in. When upgrading, the :program:`nova-manage cell_v2 simple_cell_setup` command will automatically map the instances to the single cell which is backed by the existing nova database. If you have already upgraded and did not use the :program:`nova-manage cell_v2 simple_cell_setup` command, you can run the :program:`nova-manage cell_v2 map_instances` command with the ``--cell_uuid`` option to map all instances in the given cell. See the :ref:`man-page-cells-v2` man page for details on command usage. - Can I create a cell but have it disabled from scheduling? Yes. It is possible to create a pre-disabled cell such that it does not become a candidate for scheduling new VMs. This can be done by running the :program:`nova-manage cell_v2 create_cell` command with the ``--disabled`` option. - How can I disable a cell so that the new server create requests do not go to it while I perform maintenance? Existing cells can be disabled by running :program:`nova-manage cell_v2 update_cell` with the ``--disable`` option and can be re-enabled once the maintenance period is over by running this command with the ``--enable`` option. - I disabled (or enabled) a cell using the :program:`nova-manage cell_v2 update_cell` or I created a new (pre-disabled) cell(mapping) using the :program:`nova-manage cell_v2 create_cell` command but the scheduler is still using the old settings. The cell mappings are cached in the scheduler worker so you will either need to restart the scheduler process to refresh the cache, or send a SIGHUP signal to the scheduler by which it will automatically refresh the cells cache and the changes will take effect. - Why was the cells REST API not implemented for cells v2? Why are there no CRUD operations for cells in the API? One of the deployment challenges that cells v1 had was the requirement for the API and control services to be up before a new cell could be deployed. This was not a problem for large-scale public clouds that never shut down, but is not a reasonable requirement for smaller clouds that do offline upgrades and/or clouds which could be taken completely offline by something like a power outage. Initial devstack and gate testing for cells v1 was delayed by the need to engineer a solution for bringing the services partially online in order to deploy the rest, and this continues to be a gap for other deployment tools. Consider also the FFU case where the control plane needs to be down for a multi-release upgrade window where changes to cell records have to be made. This would be quite a bit harder if the way those changes are made is via the API, which must remain down during the process. Further, there is a long-term goal to move cell configuration (i.e. cell_mappings and the associated URLs and credentials) into config and get away from the need to store and provision those things in the database. Obviously a CRUD interface in the API would prevent us from making that move. - Why are cells not exposed as a grouping mechanism in the API for listing services, instances, and other resources? Early in the design of cells v2 we set a goal to not let the cell concept leak out of the API, even for operators. Aggregates are the way nova supports grouping of hosts for a variety of reasons, and aggregates can cut across cells, and/or be aligned with them if desired. If we were to support cells as another grouping mechanism, we would likely end up having to implement many of the same features for them as aggregates, such as scheduler features, metadata, and other searching/filtering operations. Since aggregates are how Nova supports grouping, we expect operators to use aggregates any time they need to refer to a cell as a group of hosts from the API, and leave actual cells as a purely architectural detail. The need to filter instances by cell in the API can and should be solved by adding a generic by-aggregate filter, which would allow listing instances on hosts contained within any aggregate, including one that matches the cell boundaries if so desired. - Why are the API responses for ``GET /servers``, ``GET /servers/detail``, ``GET /servers/{server_id}`` and ``GET /os-services`` missing some information for certain cells at certain times? Why do I see the status as "UNKNOWN" for the servers in those cells at those times when I run ``openstack server list`` or ``openstack server show``? Starting from microversion 2.69 the API responses of ``GET /servers``, ``GET /servers/detail``, ``GET /servers/{server_id}`` and ``GET /os-services`` may contain missing keys during down cell situations. See the `Handling Down Cells`__ section of the Compute API guide for more information on the partial constructs. For administrative considerations, see :ref:`handling-cell-failures`. .. __: https://docs.openstack.org/api-guide/compute/down_cells.html References ---------- A large number of cells v2-related presentations have been given at various OpenStack and OpenInfra Summits over the years. These provide an excellent reference on the history and development of the feature along with details from real-world users of the feature. - `Newton Summit Video - Nova Cells V2: What's Going On?`__ - `Pike Summit Video - Scaling Nova: How CellsV2 Affects Your Deployment`__ - `Queens Summit Video - Add Cellsv2 to your existing Nova deployment`__ - `Rocky Summit Video - Moving from CellsV1 to CellsV2 at CERN`__ - `Stein Summit Video - Scaling Nova with CellsV2: The Nova Developer and the CERN Operator perspective`__ - `Train Summit Video - What's new in Nova Cellsv2?`__ .. __: https://www.openstack.org/videos/austin-2016/nova-cells-v2-whats-going-on .. __: https://www.openstack.org/videos/boston-2017/scaling-nova-how-cellsv2-affects-your-deployment .. __: https://www.openstack.org/videos/sydney-2017/adding-cellsv2-to-your-existing-nova-deployment .. __: https://www.openstack.org/videos/summits/vancouver-2018/moving-from-cellsv1-to-cellsv2-at-cern .. __: https://www.openstack.org/videos/summits/berlin-2018/scaling-nova-with-cellsv2-the-nova-developer-and-the-cern-operator-perspective .. __: https://www.openstack.org/videos/summits/denver-2019/whats-new-in-nova-cellsv2 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.2336075 nova-32.0.0/doc/source/admin/common/0000775000175000017500000000000000000000000017207 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/common/nova-show-usage-statistics-for-hosts-instances.rst0000664000175000017500000002014400000000000031064 0ustar00zuulzuul00000000000000============================================= Show usage statistics for hosts and instances ============================================= You can show basic statistics on resource usage for hosts and instances. .. note:: For more sophisticated monitoring, see the `Ceilometer `__ project. You can also use tools, such as `Ganglia `__ or `Graphite `__, to gather more detailed data. Show host usage statistics ~~~~~~~~~~~~~~~~~~~~~~~~~~ The following examples show the host usage statistics for a host called ``devstack``. * List the hosts and the nova-related services that run on them: .. code-block:: console $ openstack host list +-----------+-------------+----------+ | Host Name | Service | Zone | +-----------+-------------+----------+ | devstack | conductor | internal | | devstack | compute | nova | | devstack | network | internal | | devstack | scheduler | internal | +-----------+-------------+----------+ * Get a summary of resource usage of all of the instances running on the host: .. code-block:: console $ openstack host show devstack +----------+----------------------------------+-----+-----------+---------+ | Host | Project | CPU | MEMORY MB | DISK GB | +----------+----------------------------------+-----+-----------+---------+ | devstack | (total) | 2 | 4003 | 157 | | devstack | (used_now) | 3 | 5120 | 40 | | devstack | (used_max) | 3 | 4608 | 40 | | devstack | b70d90d65e464582b6b2161cf3603ced | 1 | 512 | 0 | | devstack | 66265572db174a7aa66eba661f58eb9e | 2 | 4096 | 40 | +----------+----------------------------------+-----+-----------+---------+ The ``CPU`` column shows the sum of the virtual CPUs for instances running on the host. The ``MEMORY MB`` column shows the sum of the memory (in MB) allocated to the instances that run on the host. The ``DISK GB`` column shows the sum of the root and ephemeral disk sizes (in GB) of the instances that run on the host. The row that has the value ``used_now`` in the ``PROJECT`` column shows the sum of the resources allocated to the instances that run on the host, plus the resources allocated to the host itself. The row that has the value ``used_max`` in the ``PROJECT`` column shows the sum of the resources allocated to the instances that run on the host. .. note:: These values are computed by using information about the flavors of the instances that run on the hosts. This command does not query the CPU usage, memory usage, or hard disk usage of the physical host. Show instance usage statistics ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * Get CPU, memory, I/O, and network statistics for an instance. #. List instances: .. code-block:: console $ openstack server list +----------+----------------------+--------+------------------+--------+----------+ | ID | Name | Status | Networks | Image | Flavor | +----------+----------------------+--------+------------------+--------+----------+ | 84c6e... | myCirrosServer | ACTIVE | private=10.0.0.3 | cirros | m1.tiny | | 8a995... | myInstanceFromVolume | ACTIVE | private=10.0.0.4 | ubuntu | m1.small | +----------+----------------------+--------+------------------+--------+----------+ #. Get diagnostic statistics: .. note:: As of microversion v2.48, diagnostics information for all virt drivers will have a standard format as below. Before microversion 2.48, each hypervisor had its own format. For more details on diagnostics response message see `server diagnostics api `__ documentation. .. code-block:: console $ nova diagnostics myCirrosServer +----------------+------------------------------------------------------------------------+ | Property | Value | +----------------+------------------------------------------------------------------------+ | config_drive | False | | cpu_details | [] | | disk_details | [{"read_requests": 887, "errors_count": -1, "read_bytes": 20273152, | | | "write_requests": 89, "write_bytes": 303104}] | | driver | libvirt | | hypervisor | qemu | | hypervisor_os | linux | | memory_details | {"used": 0, "maximum": 0} | | nic_details | [{"rx_packets": 9, "rx_drop": 0, "tx_octets": 1464, "tx_errors": 0, | | | "mac_address": "fa:16:3e:fa:db:d3", "rx_octets": 958, "rx_rate": null, | | | "rx_errors": 0, "tx_drop": 0, "tx_packets": 9, "tx_rate": null}] | | num_cpus | 0 | | num_disks | 1 | | num_nics | 1 | | state | running | | uptime | 5528 | +----------------+------------------------------------------------------------------------+ ``config_drive`` indicates if the config drive is supported on the instance. ``cpu_details`` contains a list of details per vCPU. ``disk_details`` contains a list of details per disk. ``driver`` indicates the current driver on which the VM is running. ``hypervisor`` indicates the current hypervisor on which the VM is running. ``nic_details`` contains a list of details per vNIC. ``uptime`` is the amount of time in seconds that the VM has been running. | Diagnostics prior to v2.48: .. code-block:: console $ nova diagnostics myCirrosServer +---------------------------+--------+ | Property | Value | +---------------------------+--------+ | memory | 524288 | | memory-actual | 524288 | | memory-rss | 6444 | | tap1fec8fb8-7a_rx | 22137 | | tap1fec8fb8-7a_rx_drop | 0 | | tap1fec8fb8-7a_rx_errors | 0 | | tap1fec8fb8-7a_rx_packets | 166 | | tap1fec8fb8-7a_tx | 18032 | | tap1fec8fb8-7a_tx_drop | 0 | | tap1fec8fb8-7a_tx_errors | 0 | | tap1fec8fb8-7a_tx_packets | 130 | | vda_errors | -1 | | vda_read | 2048 | | vda_read_req | 2 | | vda_write | 182272 | | vda_write_req | 74 | +---------------------------+--------+ * Get summary statistics for each project: .. code-block:: console $ openstack usage list Usage from 2013-06-25 to 2013-07-24: +---------+---------+--------------+-----------+---------------+ | Project | Servers | RAM MB-Hours | CPU Hours | Disk GB-Hours | +---------+---------+--------------+-----------+---------------+ | demo | 1 | 344064.44 | 672.00 | 0.00 | | stack | 3 | 671626.76 | 327.94 | 6558.86 | +---------+---------+--------------+-----------+---------------+ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/compute-node-identification.rst0000664000175000017500000000723300000000000024044 0ustar00zuulzuul00000000000000=========================== Compute Node Identification =========================== Nova requires that compute nodes maintain a constant and consistent identity during their lifecycle. With the exception of the ironic driver, starting in the 2023.1 release, this is achieved by use of a file containing the node unique identifier that is persisted on disk. Prior to 2023.1, a combination of the compute node's hostname and the :oslo.config:option:`host` value in the configuration file were used. The 2023.1 and later compute node identification file must remain unchanged during the lifecycle of the compute node. Changing the value or removing the file will result in a failure to start and may require advanced techniques for recovery. The file is read once at ``nova-compute`` startup, at which point it is validated for formatting and the corresponding node is located or created in the database. .. note:: Even after 2023.1, the compute node's hostname may not be changed after the initial registration with the controller nodes, it is just not used as the primary method for identification. The behavior of ``nova-compute`` is different when using the ironic driver, as the (UUID-based) identity and mapping of compute nodes to compute manager service hosts is dynamic. In that case, no single node identity is maintained by the compute host and thus no identity file is read or written. Thus none of the sections below apply to hosts with :oslo.config:option:`compute_driver` set to ``ironic``. Self-provisioning of the node identity -------------------------------------- By default, ``nova-compute`` will automatically generate and write a UUID to disk the first time it starts up, and will use that going forward as its stable identity. Using the :oslo.config:option:`state_path` (which is ``/var/lib/nova`` on most systems), a ``compute_id`` file will be created with a generated UUID. Since this file (and its parent directory) is writable by nova, it may be desirable to move this to one of the other locations that nova looks for the identification file. Deployment provisioning of the node identity -------------------------------------------- In addition to the location mentioned above, nova will also search the parent directories of any config file in use (either the defaults or provided on the command line) for a ``compute_id`` file. Thus, a deployment tool may, on most systems, pre-provision the node's UUID by writing one to ``/etc/nova/compute_id``. The contents of the file should be a single UUID in canonical textual representation with no additional whitespace or other characters. The following should work on most Linux systems: .. code-block:: shell $ uuidgen > /etc/nova/compute_id .. note:: **Do not** execute the above command blindly in every run of a deployment tool, as that will result in overwriting the ``compute_id`` file each time, which *will* prevent nova from working properly. Upgrading from pre-2023.1 ------------------------- Before release 2023.1, ``nova-compute`` only used the hostname (combined with :oslo.config:option:`host`, if set) to identify its compute node objects in the database. When upgrading from a prior release, the compute node will perform a one-time migration of the hostname-matched compute node UUID to the ``compute_id`` file in the :oslo.config:option:`state_path` location. .. note:: It is imperative that you allow the above migration to run and complete on compute nodes that are being upgraded. Skipping this step by pre-provisioning a ``compute_id`` file before the upgrade will **not** work and will be equivalent to changing the compute node UUID after it has already been created once. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/concurrency.rst0000664000175000017500000001173200000000000021007 0ustar00zuulzuul00000000000000Nova service concurrency ======================== For a long time nova services relied almost exclusively on the Eventlet library for processing multiple API requests, RPC requests and other tasks that needed concurrency. Since Eventlet is not expected to support the next major cPython version the OpenStack TC set a `goal`__ to replace Eventlet and therefore Nova has started transitioning its concurrency model to native threads. During this transition Nova maintains the Eventlet based concurrency mode while building up support for the native threading mode. .. __: https://governance.openstack.org/tc/goals/selected/remove-eventlet.html .. note:: The native threading mode is experimental. Do not use it in production without first testing it in pre-production. If you do so please let us now how it went on the mailing list openstack-discuss@lists.openstack.org. Selecting concurrency mode for a service ---------------------------------------- Nova still uses Eventlet by default, but allows switching services to native threading mode at service startup via setting the environment variable ``OS_NOVA_DISABLE_EVENTLET_PATCHING=true``. .. note:: Since nova 32.0.0 (2025.2 Flamingo) the nova-scheduler, nova-metadata, and nova-api can be switched to native threading mode. Tunables for the native threading mode -------------------------------------- As native threads are more expensive resources than greenthreads Nova provides a set of configuration options to allow fine tuning the deployment based on load and resource constraints. The default values are selected to support a basic, small deployment without consuming substantially more memory resources, than the legacy Eventlet mode. Increasing the size of the below thread pools means that the given service will consume more memory but will also allow more tasks to be executed concurrently. * :oslo.config:option:`cell_worker_thread_pool_size`: Used to execute tasks across all the cells within the deployment. E.g. To generate the result of the ``openstack server list`` CLI command, the nova-api service will use one native thread for each cell to load the nova instances from the related cell database. So if the deployment has many cells then the size of this pool probably needs to be increased. This option is only relevant for nova-api, nova-metadata, nova-scheduler, and nova-conductor as these are the services doing cross cell operations. * :oslo.config:option:`executor_thread_pool_size`: Used to handle incoming RPC requests. Services with many more inbound requests will need larger pools. For example, a single conductor serves requests from many computes as well as the scheduler. A compute node only serves requests from the API for lifecycle operations and other computes during migrations. This option is only relevant for nova-scheduler, nova-conductor, and nova-compute as these are the services acting as RPC servers. * :oslo.config:option:`default_thread_pool_size`: Used by various concurrent tasks in the service that are not categorized into the above pools. This option is relevant to every nova service using ``nova.utils.spawn()``. Seeing the usage of the pools ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When new work is submitted to any of these pools in both concurrency modes Nova logs the statistics of the pool (work executed, threads available, work queued, etc). This can be useful when fine tuning of the pool size is needed. The parameter :oslo.config:option:`thread_pool_statistic_period` defines how frequently such logging happens from a specific pool in seconds. A value of 60 seconds means that stats will be logged from a pool maximum once every 60 seconds. The value 0 means that logging happens every time work is submitted to the pool. The default value is -1 meaning that the stats logging is disabled. Preventing hanging threads ~~~~~~~~~~~~~~~~~~~~~~~~~~ Threads from a pool are not cancellable once they are executing a task, therefore it is important to ensure external dependencies cannot hold up a task execution indefinitely as that will lead to having fewer threads in the pool available for incoming work and therefore reduced overall capacity. Nova's RPC interface already uses proper timeout handling to avoid hanging threads. But adding timeout handling to the Nova's database interface is database server and database client library dependent. For mysql-server the `max_execution_time`__ configuration option can be used to limit the execution time of a database query on the server side. Similar options exist for other database servers. .. __: https://dev.mysql.com/doc/refman/8.4/en/server-system-variables.html#sysvar_max_execution_time For the pymysql database client a client side timeout can be implemented by adding the `read_timeout`__ connection parameter to the connection string. .. __: https://pymysql.readthedocs.io/en/latest/modules/connections.html#module-pymysql.connections We recommend using both in deployments where Nova services are running in native threading mode. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/config-drive.rst0000664000175000017500000000773700000000000021043 0ustar00zuulzuul00000000000000============= Config drives ============= .. note:: This section provides deployment information about the config drive feature. For end-user information about the config drive feature and instance metadata in general, refer to the :doc:`user guide
`. Config drives are special drives that are attached to an instance when it boots. The instance can mount this drive and read files from it to get information that is normally available through :doc:`the metadata service `. There are many use cases for the config drive. One such use case is to pass a networking configuration when you do not use DHCP to assign IP addresses to instances. For example, you might pass the IP address configuration for the instance through the config drive, which the instance can mount and access before you configure the network settings for the instance. Another common reason to use config drives is load. If running something like the OpenStack puppet providers in your instances, they can hit the :doc:`metadata servers ` every fifteen minutes, simultaneously for every instance you have. They are just checking in, and building facts, but it's not insignificant load. With a config drive, that becomes a local (cached) disk read. Finally, using a config drive means you're not dependent on the metadata service being up, reachable, or performing well to do things like reboot your instance that runs `cloud-init`_ at the beginning. Any modern guest operating system that is capable of mounting an ISO 9660 or VFAT file system can use the config drive. Requirements and guidelines --------------------------- To use the config drive, you must follow the following requirements for the compute host and image. .. rubric:: Compute host requirements The following virt drivers support the config drive: libvirt and VMware. The Bare Metal service also supports the config drive. - To use config drives with libvirt or VMware, you must first install the :command:`genisoimage` package on each compute host. Use the :oslo.config:option:`mkisofs_cmd` config option to set the path where you install the :command:`genisoimage` program. If :command:`genisoimage` is in the same path as the :program:`nova-compute` service, you do not need to set this flag. - To use config drives with the Bare Metal service, you do not need to prepare anything. .. rubric:: Image requirements An image built with a recent version of the `cloud-init`_ package can automatically access metadata passed through the config drive. The cloud-init package version 0.7.1 works with Ubuntu, Fedora based images (such as Red Hat Enterprise Linux) and openSUSE based images (such as SUSE Linux Enterprise Server). If an image does not have the cloud-init package installed, you must customize the image to run a script that mounts the config drive on boot, reads the data from the drive, and takes appropriate action such as adding the public key to an account. For more details about how data is organized on the config drive, refer to the :ref:`user guide `. Configuration ------------- The :program:`nova-compute` service accepts the following config drive-related options: - :oslo.config:option:`api.config_drive_skip_versions` - :oslo.config:option:`force_config_drive` - :oslo.config:option:`config_drive_format` For example, to ensure nova always provides a config drive to instances but versions ``2018-08-27`` (Rocky) and ``2017-02-22`` (Ocata) are skipped, add the following to :file:`nova.conf`: .. code-block:: ini [DEFAULT] force_config_drive = True [api] config_drive_skip_versions = 2018-08-27 2017-02-22 .. note:: The ``img_config_drive`` image metadata property can be used to force enable the config drive. In addition, users can explicitly request a config drive when booting instances. For more information, refer to the :ref:`user guide `. .. _cloud-init: https://cloudinit.readthedocs.io/en/latest/ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.2376077 nova-32.0.0/doc/source/admin/configuration/0000775000175000017500000000000000000000000020566 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/configuration/api.rst0000664000175000017500000000225500000000000022075 0ustar00zuulzuul00000000000000========================= Compute API configuration ========================= The Compute API, is the component of OpenStack Compute that receives and responds to user requests, whether they be direct API calls, or via the CLI tools or dashboard. Configure Compute API password handling --------------------------------------- The OpenStack Compute API enables users to specify an administrative password when they create, rebuild, rescue or evacuate a server instance. If the user does not specify a password, a random password is generated and returned in the API response. In practice, how the admin password is handled depends on the hypervisor in use and might require additional configuration of the instance. For example, you might have to install an agent to handle the password setting. If the hypervisor and instance configuration do not support setting a password at server create time, the password that is returned by the create API call is misleading because it was ignored. To prevent this confusion, set the ``enable_instance_password`` configuration to ``False`` to disable the return of the admin password for installations that do not support setting instance passwords. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/configuration/cross-cell-resize.rst0000664000175000017500000002635000000000000024673 0ustar00zuulzuul00000000000000================= Cross-cell resize ================= .. note:: This document describes how to configure nova for cross-cell resize. For information on :term:`same-cell resize `, refer to :doc:`/admin/configuration/resize`. For information on the cells v2 feature, refer to :doc:`/admin/cells`. Historically resizing and cold migrating a server has been explicitly `restricted`__ to within the same cell in which the server already exists. The cross-cell resize feature allows configuring nova to allow resizing and cold migrating servers across cells. The full design details are in the `Ussuri spec`__ and there is a `video`__ from a summit talk with a high-level overview. .. __: https://opendev.org/openstack/nova/src/tag/20.0.0/nova/conductor/tasks/migrate.py#L164 .. __: https://specs.openstack.org/openstack/nova-specs/specs/ussuri/approved/cross-cell-resize.html .. __: https://www.openstack.org/videos/summits/denver-2019/whats-new-in-nova-cellsv2 Use case -------- There are many reasons to use multiple cells in a nova deployment beyond just scaling the database and message queue. Cells can also be used to shard a deployment by hardware generation and feature functionality. When sharding by hardware generation, it would be natural to setup a host aggregate for each cell and map flavors to the aggregate. Then when it comes time to decommission old hardware the deployer could provide new flavors and request that users resize to the new flavors, before some deadline, which under the covers will migrate their servers to the new cell with newer hardware. Administrators could also just cold migrate the servers during a maintenance window to the new cell. Requirements ------------ To enable cross-cell resize functionality the following conditions must be met. Minimum compute versions ~~~~~~~~~~~~~~~~~~~~~~~~ All compute services must be upgraded to 21.0.0 (Ussuri) or later and not be pinned to older RPC API versions in :oslo.config:option:`upgrade_levels.compute`. Policy configuration ~~~~~~~~~~~~~~~~~~~~ The policy rule ``compute:servers:resize:cross_cell`` controls who can perform a cross-cell resize or cold migrate operation. By default the policy disables the functionality for *all* users. A microversion is not required to opt into the behavior, just passing the policy check. As such, it is recommended to start by allowing only certain users to be able to perform a cross-cell resize or cold migration, for example by setting the rule to ``rule:admin_api`` or some other rule for test teams but not normal users until you are comfortable supporting the feature. Compute driver ~~~~~~~~~~~~~~ There are no special compute driver implementations required to support the feature, it is built on existing driver interfaces used during resize and shelve/unshelve. However, only the libvirt compute driver has integration testing in the ``nova-multi-cell`` CI job. Networking ~~~~~~~~~~ The networking API must expose the ``Port Bindings Extended`` API extension which was added in the 13.0.0 (Rocky) release for Neutron. Notifications ------------- The types of events and their payloads remain unchanged. The major difference from same-cell resize is the *publisher_id* may be different in some cases since some events are sent from the conductor service rather than a compute service. For example, with same-cell resize the ``instance.resize_revert.start`` notification is sent from the source compute host in the `finish_revert_resize`__ method but with cross-cell resize that same notification is sent from the conductor service. Obviously the actual message queue sending the notifications would be different for the source and target cells assuming they use separate transports. .. __: https://opendev.org/openstack/nova/src/tag/20.0.0/nova/compute/manager.py#L4326 Instance actions ---------------- The overall instance actions named ``resize``, ``confirmResize`` and ``revertResize`` are the same as same-cell resize. However, the *events* which make up those actions will be different for cross-cell resize since the event names are generated based on the compute service methods involved in the operation and there are different methods involved in a cross-cell resize. This is important for triage when a cross-cell resize operation fails. Scheduling ---------- The :ref:`CrossCellWeigher ` is enabled by default. When a scheduling request allows selecting compute nodes from another cell the weigher will by default *prefer* hosts within the source cell over hosts from another cell. However, this behavior is configurable using the :oslo.config:option:`filter_scheduler.cross_cell_move_weight_multiplier` configuration option if, for example, you want to drain old cells when resizing or cold migrating. Code flow --------- The end user experience is meant to not change, i.e. status transitions. A successfully cross-cell resized server will go to ``VERIFY_RESIZE`` status and from there the user can either confirm or revert the resized server using the normal `confirmResize`__ and `revertResize`__ server action APIs. Under the covers there are some differences from a traditional same-cell resize: * There is no inter-compute interaction. Everything is synchronously `orchestrated`__ from the (super)conductor service. This uses the :oslo.config:option:`long_rpc_timeout` configuration option. * The orchestration tasks in the (super)conductor service are in charge of creating a copy of the instance and its related records in the target cell database at the beginning of the operation, deleting them in case of rollback or when the resize is confirmed/reverted, and updating the ``instance_mappings`` table record in the API database. * Non-volume-backed servers will have their root disk uploaded to the image service as a temporary snapshot image just like during the `shelveOffload`__ operation. When finishing the resize on the destination host in the target cell that snapshot image will be used to spawn the guest and then the snapshot image will be deleted. .. __: https://docs.openstack.org/api-ref/compute/#confirm-resized-server-confirmresize-action .. __: https://docs.openstack.org/api-ref/compute/#revert-resized-server-revertresize-action .. __: https://opendev.org/openstack/nova/src/branch/master/nova/conductor/tasks/cross_cell_migrate.py .. __: https://docs.openstack.org/api-ref/compute/#shelf-offload-remove-server-shelveoffload-action Sequence diagram ---------------- The following diagrams are current as of the 21.0.0 (Ussuri) release. .. NOTE(mriedem): These diagrams could be more detailed, for example breaking down the individual parts of the conductor tasks and the calls made on the source and dest compute to the virt driver, cinder and neutron, but the diagrams could (1) get really complex and (2) become inaccurate with changes over time. If there are particular sub-sequences that should have diagrams I would suggest putting those into separate focused diagrams. Resize ~~~~~~ This is the sequence of calls to get the server to ``VERIFY_RESIZE`` status. .. image:: /_static/images/resize/cross-cell/resize.svg :alt: Resize standard workflow Confirm resize ~~~~~~~~~~~~~~ This is the sequence of calls when confirming `or deleting`_ a server in ``VERIFY_RESIZE`` status. .. image:: /_static/images/resize/cross-cell/resize_confirm.svg :alt: Resize confirm workflow .. _or deleting: https://opendev.org/openstack/nova/src/tag/20.0.0/nova/compute/api.py#L2171 Revert resize ~~~~~~~~~~~~~ This is the sequence of calls when reverting a server in ``VERIFY_RESIZE`` status. .. image:: /_static/images/resize/cross-cell/resize_revert.svg :alt: Resize revert workflow Limitations ----------- These are known to not yet be supported in the code: * Instances with ports attached that have :doc:`bandwidth-aware ` resource provider allocations. Nova falls back to same-cell resize if the server has such ports. * Rescheduling to alternative hosts within the same target cell in case the primary selected host fails the ``prep_snapshot_based_resize_at_dest`` call. These may not work since they have not been validated by integration testing: * Instances with PCI devices attached. * Instances with a NUMA topology. Other limitations: * The config drive associated with the server, if there is one, will be re-generated on the destination host in the target cell. Therefore if the server was created with `personality files`__ they will be lost. However, this is no worse than `evacuating`__ a server that had a config drive when the source and destination compute host are not on shared storage or when shelve offloading and unshelving a server with a config drive. If necessary, the resized server can be rebuilt to regain the personality files. * The ``_poll_unconfirmed_resizes`` periodic task, which can be :oslo.config:option:`configured ` to automatically confirm pending resizes on the target host, *might* not support cross-cell resizes because doing so would require an :ref:`up-call ` to the API to confirm the resize and cleanup the source cell database. .. __: https://docs.openstack.org/api-guide/compute/server_concepts.html#server-personality .. __: https://docs.openstack.org/api-ref/compute/#evacuate-server-evacuate-action Troubleshooting --------------- Timeouts ~~~~~~~~ Configure a :ref:`service user ` in case the user token times out, e.g. during the snapshot and download of a large server image. If RPC calls are timing out with a ``MessagingTimeout`` error in the logs, check the :oslo.config:option:`long_rpc_timeout` option to see if it is high enough though the default value (30 minutes) should be sufficient. Recovering from failure ~~~~~~~~~~~~~~~~~~~~~~~ The orchestration tasks in conductor that drive the operation are built with rollbacks so each part of the operation can be rolled back in order if a subsequent task fails. The thing to keep in mind is the ``instance_mappings`` record in the API DB is the authority on where the instance "lives" and that is where the API will go to show the instance in a ``GET /servers/{server_id}`` call or any action performed on the server, including deleting it. So if the resize fails and there is a copy of the instance and its related records in the target cell, the tasks should automatically delete them but if not you can hard-delete the records from whichever cell is *not* the one in the ``instance_mappings`` table. If the instance is in ``ERROR`` status, check the logs in both the source and destination compute service to see if there is anything that needs to be manually recovered, for example volume attachments or port bindings, and also check the (super)conductor service logs. Assuming volume attachments and port bindings are OK (current and pointing at the correct host), then try hard rebooting the server to get it back to ``ACTIVE`` status. If that fails, you may need to `rebuild`__ the server on the source host. Note that the guest's disks on the source host are not deleted until the resize is confirmed so if there is an issue prior to confirm or confirm itself fails, the guest disks should still be available for rebuilding the instance if necessary. .. __: https://docs.openstack.org/api-ref/compute/#rebuild-server-rebuild-action ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/configuration/fibre-channel.rst0000664000175000017500000000154200000000000024017 0ustar00zuulzuul00000000000000================================= Configuring Fibre Channel Support ================================= Fibre Channel support in OpenStack Compute is remote block storage attached to compute nodes for VMs. .. todo:: This below statement needs to be verified for current release Fibre Channel supported only the KVM hypervisor. Compute and Block Storage support Fibre Channel automatic zoning on Brocade and Cisco switches. On other hardware Fibre Channel arrays must be pre-zoned or directly attached to the KVM hosts. KVM host requirements ~~~~~~~~~~~~~~~~~~~~~ You must install these packages on the KVM host: ``sysfsutils`` Nova uses the ``systool`` application in this package. ``sg3-utils`` or ``sg3_utils`` Nova uses the ``sg_scan`` and ``sginfo`` applications. Installing the ``multipath-tools`` or ``device-mapper-multipath`` package is optional. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/configuration/hypervisor-ironic.rst0000664000175000017500000000571100000000000025017 0ustar00zuulzuul00000000000000====== Ironic ====== Introduction ------------ The ironic hypervisor driver wraps the Bare Metal (ironic) API, enabling Nova to provision baremetal resources using the same user-facing API as for server management. This is the only driver in nova where one compute service can map to many hosts, meaning a ``nova-compute`` service can manage multiple ``ComputeNodes``. An ironic driver managed compute service uses the ironic ``node uuid`` for the compute node ``hypervisor_hostname`` (nodename) and ``uuid`` fields. The relationship of ``instance:compute node:ironic node`` is 1:1:1. Scheduling of bare metal nodes is based on custom resource classes, specified via the ``resource_class`` property on a node and a corresponding resource property on a flavor (see the :ironic-doc:`flavor documentation `). The RAM and CPU settings on a flavor are ignored, and the disk is only used to determine the root partition size when a partition image is used (see the :ironic-doc:`image documentation `). Configuration ------------- - :ironic-doc:`Configure the Compute service to use the Bare Metal service `. - :ironic-doc:`Create flavors for use with the Bare Metal service `. - :ironic-doc:`Conductors Groups `. Scaling and performance issues ------------------------------ - It is typical for a single nova-compute process to support several hundred Ironic nodes. There are known issues when you attempt to support more than 1000 Ironic nodes associated with a single nova-compute process, even though Ironic is able to scale out a single conductor group to much larger sizes. There are many other factors that can affect what is the maximum practical size of a conductor group within your deployment. - The ``update_available_resource`` periodic task reports all the resources managed by Ironic. Depending the number of nodes, it can take a lot of time. The nova-compute will not perform any other operations when this task is running. You can use conductor groups to help shard your deployment between multiple nova-compute processes by setting :oslo.config:option:`ironic.conductor_group`. - The nova-compute process using the Ironic driver can be moved between different physical servers using active/passive failover. But when doing this failover, you must ensure :oslo.config:option:`host` is the same no matter where the nova-compute process is running. Similarly you must ensure there are at most one nova-compute processes running for each conductor group. - Running multiple nova-compute processes that point at the same conductor group is now deprecated. Please never have more than one host in the peer list: :oslo.config:option:`ironic.peer_list` Known limitations / Missing features ------------------------------------ * Migrate * Resize * Snapshot * Pause * Shelve * Evacuate ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/configuration/hypervisor-kvm.rst0000664000175000017500000003150400000000000024330 0ustar00zuulzuul00000000000000=== KVM === KVM is configured as the default hypervisor for Compute. .. note:: This document contains several sections about hypervisor selection. If you are reading this document linearly, you do not want to load the KVM module before you install ``nova-compute``. The ``nova-compute`` service depends on qemu-kvm, which installs ``/lib/udev/rules.d/45-qemu-kvm.rules``, which sets the correct permissions on the ``/dev/kvm`` device node. The KVM hypervisor supports the following virtual machine image formats: * Raw * QEMU Copy-on-write (QCOW2) * QED Qemu Enhanced Disk * VMware virtual machine disk format (vmdk) This section describes how to enable KVM on your system. For more information, see the following distribution-specific documentation: * `Fedora: Virtualization Getting Started Guide`__ * `Ubuntu: KVM/Installation`__ * `Debian: KVM Guide`__ * `Red Hat Enterprise Linux (RHEL): Getting started with virtualization`__ * `openSUSE: Setting Up a KVM VM Host Server`__ * `SLES: Virtualization with KVM`__. .. __: https://docs.fedoraproject.org/en-US/quick-docs/getting-started-with-virtualization/ .. __: https://help.ubuntu.com/community/KVM/Installation .. __: https://wiki.debian.org/KVM .. __: https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/configuring_and_managing_virtualization/getting-started-with-virtualization-in-rhel-8_configuring-and-managing-virtualization .. __: https://doc.opensuse.org/documentation/leap/virtualization/html/book-virt/cha-qemu-host.html .. __: https://documentation.suse.com/sles/11-SP4/html/SLES-all/book-kvm.html Configuration ------------- To enable KVM explicitly, add the following configuration options to the ``/etc/nova/nova.conf`` file: .. code-block:: ini [DEFAULT] compute_driver = libvirt.LibvirtDriver [libvirt] virt_type = kvm .. _enable-kvm: Enable KVM ---------- The following sections outline how to enable KVM based hardware virtualization on different architectures and platforms. To perform these steps, you must be logged in as the ``root`` user. For x86-based systems ~~~~~~~~~~~~~~~~~~~~~ #. To determine whether the ``svm`` or ``vmx`` CPU extensions are present, run this command: .. code-block:: console # grep -E 'svm|vmx' /proc/cpuinfo This command generates output if the CPU is capable of hardware-virtualization. Even if output is shown, you might still need to enable virtualization in the system BIOS for full support. If no output appears, consult your system documentation to ensure that your CPU and motherboard support hardware virtualization. Verify that any relevant hardware virtualization options are enabled in the system BIOS. The BIOS for each manufacturer is different. If you must enable virtualization in the BIOS, look for an option containing the words ``virtualization``, ``VT``, ``VMX``, or ``SVM``. #. To list the loaded kernel modules and verify that the ``kvm`` modules are loaded, run this command: .. code-block:: console # lsmod | grep kvm If the output includes ``kvm_intel`` or ``kvm_amd``, the ``kvm`` hardware virtualization modules are loaded and your kernel meets the module requirements for OpenStack Compute. If the output does not show that the ``kvm`` module is loaded, run this command to load it: .. code-block:: console # modprobe -a kvm Run the command for your CPU. For Intel, run this command: .. code-block:: console # modprobe -a kvm-intel For AMD, run this command: .. code-block:: console # modprobe -a kvm-amd Because a KVM installation can change user group membership, you might need to log in again for changes to take effect. If the kernel modules do not load automatically, use the procedures listed in these subsections. If the checks indicate that required hardware virtualization support or kernel modules are disabled or unavailable, you must either enable this support on the system or find a system with this support. .. note:: Some systems require that you enable VT support in the system BIOS. If you believe your processor supports hardware acceleration but the previous command did not produce output, reboot your machine, enter the system BIOS, and enable the VT option. If KVM acceleration is not supported, configure Compute to use a different hypervisor, such as :ref:`QEMU `. These procedures help you load the kernel modules for Intel-based and AMD-based processors if they do not load automatically during KVM installation. .. rubric:: Intel-based processors If your compute host is Intel-based, run these commands as root to load the kernel modules: .. code-block:: console # modprobe kvm # modprobe kvm-intel Add these lines to the ``/etc/modules`` file so that these modules load on reboot: .. code-block:: console kvm kvm-intel .. rubric:: AMD-based processors If your compute host is AMD-based, run these commands as root to load the kernel modules: .. code-block:: console # modprobe kvm # modprobe kvm-amd Add these lines to ``/etc/modules`` file so that these modules load on reboot: .. code-block:: console kvm kvm-amd For POWER-based systems ~~~~~~~~~~~~~~~~~~~~~~~ KVM as a hypervisor is supported on POWER system's PowerNV platform. #. To determine if your POWER platform supports KVM based virtualization run the following command: .. code-block:: console # cat /proc/cpuinfo | grep PowerNV If the previous command generates the following output, then CPU supports KVM based virtualization. .. code-block:: console platform: PowerNV If no output is displayed, then your POWER platform does not support KVM based hardware virtualization. #. To list the loaded kernel modules and verify that the ``kvm`` modules are loaded, run the following command: .. code-block:: console # lsmod | grep kvm If the output includes ``kvm_hv``, the ``kvm`` hardware virtualization modules are loaded and your kernel meets the module requirements for OpenStack Compute. If the output does not show that the ``kvm`` module is loaded, run the following command to load it: .. code-block:: console # modprobe -a kvm For PowerNV platform, run the following command: .. code-block:: console # modprobe -a kvm-hv Because a KVM installation can change user group membership, you might need to log in again for changes to take effect. For AArch64-based systems ~~~~~~~~~~~~~~~~~~~~~~~~~ .. todo:: Populate this section. Configure Compute backing storage --------------------------------- Backing Storage is the storage used to provide the expanded operating system image, and any ephemeral storage. Inside the virtual machine, this is normally presented as two virtual hard disks (for example, ``/dev/vda`` and ``/dev/vdb`` respectively). However, inside OpenStack, this can be derived from one of these methods: ``lvm``, ``qcow``, ``rbd`` or ``flat``, chosen using the :oslo.config:option:`libvirt.images_type` option in ``nova.conf`` on the compute node. .. note:: The option ``raw`` is acceptable but deprecated in favor of ``flat``. The Flat back end uses either raw or QCOW2 storage. It never uses a backing store, so when using QCOW2 it copies an image rather than creating an overlay. By default, it creates raw files but will use QCOW2 when creating a disk from a QCOW2 if :oslo.config:option:`force_raw_images` is not set in configuration. QCOW is the default backing store. It uses a copy-on-write philosophy to delay allocation of storage until it is actually needed. This means that the space required for the backing of an image can be significantly less on the real disk than what seems available in the virtual machine operating system. Flat creates files without any sort of file formatting, effectively creating files with the plain binary one would normally see on a real disk. This can increase performance, but means that the entire size of the virtual disk is reserved on the physical disk. Local `LVM volumes `__ can also be used. Set the :oslo.config:option:`libvirt.images_volume_group` configuration option to the name of the LVM group you have created. Direct download of images from Ceph ----------------------------------- When the Glance image service is set up with the Ceph backend and Nova is using a local ephemeral store (``[libvirt]/images_type!=rbd``), it is possible to configure Nova to download images directly into the local compute image cache. With the following configuration, images are downloaded using the RBD export command instead of using the Glance HTTP API. In some situations, especially for very large images, this could be substantially faster and can improve the boot times of instances. On the Glance API node in ``glance-api.conf``: .. code-block:: ini [DEFAULT] show_image_direct_url=true On the Nova compute node in nova.conf: .. code-block:: ini [glance] enable_rbd_download=true rbd_user=glance rbd_pool=images rbd_ceph_conf=/etc/ceph/ceph.conf rbd_connect_timeout=5 Nested guest support -------------------- You may choose to enable support for nested guests --- that is, allow your Nova instances to themselves run hardware-accelerated virtual machines with KVM. Doing so requires a module parameter on your KVM kernel module, and corresponding ``nova.conf`` settings. Host configuration ~~~~~~~~~~~~~~~~~~ To enable nested KVM guests, your compute node must load the ``kvm_intel`` or ``kvm_amd`` module with ``nested=1``. You can enable the ``nested`` parameter permanently, by creating a file named ``/etc/modprobe.d/kvm.conf`` and populating it with the following content: .. code-block:: none options kvm_intel nested=1 options kvm_amd nested=1 A reboot may be required for the change to become effective. Nova configuration ~~~~~~~~~~~~~~~~~~ To support nested guests, you must set your :oslo.config:option:`libvirt.cpu_mode` configuration to one of the following options: Host passthrough (``host-passthrough``) In this mode, nested virtualization is automatically enabled once the KVM kernel module is loaded with nesting support. .. code-block:: ini [libvirt] cpu_mode = host-passthrough However, do consider the other implications that :doc:`host passthrough ` mode has on compute functionality. Host model (``host-model``) In this mode, nested virtualization is automatically enabled once the KVM kernel module is loaded with nesting support, **if** the matching CPU model exposes the ``vmx`` feature flag to guests by default (you can verify this with ``virsh capabilities`` on your compute node). If your CPU model does not pass in the ``vmx`` flag, you can force it with :oslo.config:option:`libvirt.cpu_model_extra_flags`: .. code-block:: ini [libvirt] cpu_mode = host-model cpu_model_extra_flags = vmx Again, consider the other implications that apply to the :doc:`host model ` mode. Custom (``custom``) In custom mode, the same considerations apply as in host-model mode, but you may *additionally* want to ensure that libvirt passes not only the ``vmx``, but also the ``pcid`` flag to its guests: .. code-block:: ini [libvirt] cpu_mode = custom cpu_models = IvyBridge cpu_model_extra_flags = vmx,pcid More information on CPU models can be found in :doc:`/admin/cpu-models`. Limitations ~~~~~~~~~~~~ When enabling nested guests, you should be aware of (and inform your users about) certain limitations that are currently inherent to nested KVM virtualization. Most importantly, guests using nested virtualization will, *while nested guests are running*, * fail to complete live migration; * fail to resume from suspend. See `the KVM documentation `_ for more information on these limitations. KVM performance tweaks ---------------------- The `VHostNet `_ kernel module improves network performance. To load the kernel module, run the following command as root: .. code-block:: console # modprobe vhost_net Troubleshooting --------------- Trying to launch a new virtual machine instance fails with the ``ERROR`` state, and the following error appears in the ``/var/log/nova/nova-compute.log`` file: .. code-block:: console libvirtError: internal error no supported architecture for os type 'hvm' This message indicates that the KVM kernel modules were not loaded. If you cannot start VMs after installation without rebooting, the permissions might not be set correctly. This can happen if you load the KVM module before you install ``nova-compute``. To check whether the group is set to ``kvm``, run: .. code-block:: console # ls -l /dev/kvm If it is not set to ``kvm``, run: .. code-block:: console # udevadm trigger ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/configuration/hypervisor-lxc.rst0000664000175000017500000000321700000000000024321 0ustar00zuulzuul00000000000000====================== LXC (Linux containers) ====================== LXC (also known as Linux containers) is a virtualization technology that works at the operating system level. This is different from hardware virtualization, the approach used by other hypervisors such as KVM, Xen, and VMware. LXC (as currently implemented using libvirt in the Compute service) is not a secure virtualization technology for multi-tenant environments (specifically, containers may affect resource quotas for other containers hosted on the same machine). Additional containment technologies, such as AppArmor, may be used to provide better isolation between containers, although this is not the case by default. For all these reasons, the choice of this virtualization technology is not recommended in production. If your compute hosts do not have hardware support for virtualization, LXC will likely provide better performance than QEMU. In addition, if your guests must access specialized hardware, such as GPUs, this might be easier to achieve with LXC than other hypervisors. .. note:: Some OpenStack Compute features might be missing when running with LXC as the hypervisor. See the `hypervisor support matrix `_ for details. Configuration ------------- To enable LXC, configure :oslo.config:option:`DEFAULT.compute_driver` = ``libvirt.LibvirtDriver`` and :oslo.config:option:`libvirt.virt_type` = ``lxc``. For example: .. code-block:: ini [DEFAULT] compute_driver = libvirt.LibvirtDriver [libvirt] virt_type = lxc On Ubuntu, enable LXC support in OpenStack by installing the ``nova-compute-lxc`` package. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/configuration/hypervisor-qemu.rst0000664000175000017500000000301600000000000024477 0ustar00zuulzuul00000000000000.. _compute_qemu: ==== QEMU ==== From the perspective of the Compute service, the QEMU hypervisor is very similar to the KVM hypervisor. Both are controlled through libvirt, both support the same feature set, and all virtual machine images that are compatible with KVM are also compatible with QEMU. The main difference is that QEMU does not support native virtualization. Consequently, QEMU has worse performance than KVM and is a poor choice for a production deployment. The typical uses cases for QEMU are * Running on older hardware that lacks virtualization support. * Running the Compute service inside of a virtual machine for development or testing purposes, where the hypervisor does not support native virtualization for guests. Configuration ------------- To enable QEMU, configure :oslo.config:option:`DEFAULT.compute_driver` = ``libvirt.LibvirtDriver`` and :oslo.config:option:`libvirt.virt_type` = ``qemu``. For example: .. code-block:: ini [DEFAULT] compute_driver = libvirt.LibvirtDriver [libvirt] virt_type = qemu For some operations you may also have to install the :command:`guestmount` utility: On Ubuntu: .. code-block:: console # apt-get install guestmount On Red Hat Enterprise Linux, Fedora, or CentOS: .. code-block:: console # dnf install libguestfs-tools On openSUSE: .. code-block:: console # zypper install guestfs-tools The QEMU hypervisor supports the following virtual machine image formats: * Raw * QEMU Copy-on-write (qcow2) * VMware virtual machine disk format (vmdk) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/configuration/hypervisor-virtuozzo.rst0000664000175000017500000000223600000000000025626 0ustar00zuulzuul00000000000000========= Virtuozzo ========= Virtuozzo 7.0.0 (or newer), or its community edition OpenVZ, provides both types of virtualization: Kernel Virtual Machines and OS Containers. The type of instance to span is chosen depending on the ``hw_vm_type`` property of an image. .. note:: Some OpenStack Compute features may be missing when running with Virtuozzo as the hypervisor. See :doc:`/user/support-matrix` for details. Configuration ------------- To enable LXC, configure :oslo.config:option:`DEFAULT.compute_driver` = ``libvirt.LibvirtDriver`` and :oslo.config:option:`libvirt.virt_type` = ``parallels``. For example: .. code-block:: ini [DEFAULT] compute_driver = libvirt.LibvirtDriver force_raw_images = False [libvirt] virt_type = parallels images_type = ploop connection_uri = parallels:///system inject_partition = -2 To enable Virtuozzo Virtual Machines, set the following options in ``/etc/nova/nova.conf`` on all hosts running the ``nova-compute`` service. .. code-block:: ini [DEFAULT] compute_driver = libvirt.LibvirtDriver [libvirt] virt_type = parallels images_type = qcow2 connection_uri = parallels:///system ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/configuration/hypervisor-vmware.rst0000664000175000017500000007045400000000000025043 0ustar00zuulzuul00000000000000============== VMware vSphere ============== Introduction ------------ OpenStack Compute supports the VMware vSphere product family and enables access to advanced features such as vMotion, High Availability, and Dynamic Resource Scheduling (DRS). This section describes how to configure VMware-based virtual machine images for launch. The VMware driver supports vCenter version 5.5.0 and later. The VMware vCenter driver enables the ``nova-compute`` service to communicate with a VMware vCenter server that manages one or more ESX host clusters. The driver aggregates the ESX hosts in each cluster to present one large hypervisor entity for each cluster to the Compute scheduler. Because individual ESX hosts are not exposed to the scheduler, Compute schedules to the granularity of clusters and vCenter uses DRS to select the actual ESX host within the cluster. When a virtual machine makes its way into a vCenter cluster, it can use all vSphere features. The following sections describe how to configure the VMware vCenter driver. High-level architecture ----------------------- The following diagram shows a high-level view of the VMware driver architecture: .. rubric:: VMware driver architecture .. figure:: /_static/images/vmware-nova-driver-architecture.jpg :width: 100% As the figure shows, the OpenStack Compute Scheduler sees three hypervisors that each correspond to a cluster in vCenter. ``nova-compute`` contains the VMware driver. You can run with multiple ``nova-compute`` services. It is recommended to run with one ``nova-compute`` service per ESX cluster thus ensuring that while Compute schedules at the granularity of the ``nova-compute`` service it is also in effect able to schedule at the cluster level. In turn the VMware driver inside ``nova-compute`` interacts with the vCenter APIs to select an appropriate ESX host within the cluster. Internally, vCenter uses DRS for placement. The VMware vCenter driver also interacts with the Image service to copy VMDK images from the Image service back-end store. The dotted line in the figure represents VMDK images being copied from the OpenStack Image service to the vSphere data store. VMDK images are cached in the data store so the copy operation is only required the first time that the VMDK image is used. After OpenStack boots a VM into a vSphere cluster, the VM becomes visible in vCenter and can access vSphere advanced features. At the same time, the VM is visible in the OpenStack dashboard and you can manage it as you would any other OpenStack VM. You can perform advanced vSphere operations in vCenter while you configure OpenStack resources such as VMs through the OpenStack dashboard. The figure does not show how networking fits into the architecture. For details, see :ref:`vmware-networking`. Configuration overview ---------------------- To get started with the VMware vCenter driver, complete the following high-level steps: #. Configure vCenter. See :ref:`vmware-prereqs`. #. Configure the VMware vCenter driver in the ``nova.conf`` file. See :ref:`vmware-vcdriver`. #. Load desired VMDK images into the Image service. See :ref:`vmware-images`. #. Configure the Networking service (neutron). See :ref:`vmware-networking`. .. _vmware-prereqs: Prerequisites and limitations ----------------------------- Use the following list to prepare a vSphere environment that runs with the VMware vCenter driver: Copying VMDK files In vSphere 5.1, copying large image files (for example, 12 GB and greater) from the Image service can take a long time. To improve performance, VMware recommends that you upgrade to VMware vCenter Server 5.1 Update 1 or later. For more information, see the `Release Notes `_. DRS For any cluster that contains multiple ESX hosts, enable DRS and enable fully automated placement. Shared storage Only shared storage is supported and data stores must be shared among all hosts in a cluster. It is recommended to remove data stores not intended for OpenStack from clusters being configured for OpenStack. Clusters and data stores Do not use OpenStack clusters and data stores for other purposes. If you do, OpenStack displays incorrect usage information. Networking The networking configuration depends on the desired networking model. See :ref:`vmware-networking`. Security groups If you use the VMware driver with OpenStack Networking and the NSX plug-in, security groups are supported. .. note:: The NSX plug-in is the only plug-in that is validated for vSphere. VNC The port range 5900 - 6105 (inclusive) is automatically enabled for VNC connections on every ESX host in all clusters under OpenStack control. .. note:: In addition to the default VNC port numbers (5900 to 6000) specified in the above document, the following ports are also used: 6101, 6102, and 6105. You must modify the ESXi firewall configuration to allow the VNC ports. Additionally, for the firewall modifications to persist after a reboot, you must create a custom vSphere Installation Bundle (VIB) which is then installed onto the running ESXi host or added to a custom image profile used to install ESXi hosts. For details about how to create a VIB for persisting the firewall configuration modifications, see `Knowledge Base `_. .. note:: The VIB can be downloaded from `openstack-vmwareapi-team/Tools `_. To use multiple vCenter installations with OpenStack, each vCenter must be assigned to a separate availability zone. This is required as the OpenStack Block Storage VMDK driver does not currently work across multiple vCenter installations. VMware vCenter service account ------------------------------ OpenStack integration requires a vCenter service account with the following minimum permissions. Apply the permissions to the ``Datacenter`` root object, and select the :guilabel:`Propagate to Child Objects` option. .. list-table:: vCenter permissions tree :header-rows: 1 :widths: 12, 12, 40, 36 * - All Privileges - - - * - - Datastore - - * - - - Allocate space - * - - - Browse datastore - * - - - Low level file operation - * - - - Remove file - * - - Extension - - * - - - Register extension - * - - Folder - - * - - - Create folder - * - - Host - - * - - - Configuration - * - - - - Maintenance * - - - - Network configuration * - - - - Storage partition configuration * - - Network - - * - - - Assign network - * - - Resource - - * - - - Assign virtual machine to resource pool - * - - - Migrate powered off virtual machine - * - - - Migrate powered on virtual machine - * - - Virtual Machine - - * - - - Configuration - * - - - - Add existing disk * - - - - Add new disk * - - - - Add or remove device * - - - - Advanced * - - - - CPU count * - - - - Change resource * - - - - Disk change tracking * - - - - Host USB device * - - - - Memory * - - - - Modify device settings * - - - - Raw device * - - - - Remove disk * - - - - Rename * - - - - Set annotation * - - - - Swapfile placement * - - - Interaction - * - - - - Configure CD media * - - - - Power Off * - - - - Power On * - - - - Reset * - - - - Suspend * - - - Inventory - * - - - - Create from existing * - - - - Create new * - - - - Move * - - - - Remove * - - - - Unregister * - - - Provisioning - * - - - - Clone virtual machine * - - - - Customize * - - - - Create template from virtual machine * - - - Snapshot management - * - - - - Create snapshot * - - - - Remove snapshot * - - Profile-driven storage - - * - - - Profile-driven storage view - * - - Sessions - - * - - - - Validate session * - - - - View and stop sessions * - - vApp - - * - - - Export - * - - - Import - .. _vmware-vcdriver: VMware vCenter driver --------------------- Use the VMware vCenter driver (VMwareVCDriver) to connect OpenStack Compute with vCenter. This recommended configuration enables access through vCenter to advanced vSphere features like vMotion, High Availability, and Dynamic Resource Scheduling (DRS). VMwareVCDriver configuration options ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Add the following VMware-specific configuration options to the ``nova.conf`` file: .. code-block:: ini [DEFAULT] compute_driver = vmwareapi.VMwareVCDriver [vmware] host_ip = host_username = host_password = cluster_name = datastore_regex = .. note:: * Clusters: The vCenter driver can support only a single cluster. Clusters and data stores used by the vCenter driver should not contain any VMs other than those created by the driver. * Data stores: The ``datastore_regex`` setting specifies the data stores to use with Compute. For example, ``datastore_regex="nas.*"`` selects all the data stores that have a name starting with "nas". If this line is omitted, Compute uses the first data store returned by the vSphere API. It is recommended not to use this field and instead remove data stores that are not intended for OpenStack. * Reserved host memory: The ``reserved_host_memory_mb`` option value is 512 MB by default. However, VMware recommends that you set this option to 0 MB because the vCenter driver reports the effective memory available to the virtual machines. * The vCenter driver generates instance name by instance ID. Instance name template is ignored. * The minimum supported vCenter version is 5.5.0. Starting in the OpenStack Ocata release any version lower than 5.5.0 will be logged as a warning. In the OpenStack Pike release this will be enforced. A ``nova-compute`` service can control one or more clusters containing multiple ESXi hosts, making ``nova-compute`` a critical service from a high availability perspective. Because the host that runs ``nova-compute`` can fail while the vCenter and ESX still run, you must protect the ``nova-compute`` service against host failures. .. note:: Many ``nova.conf`` options are relevant to libvirt but do not apply to this driver. .. _vmware-images: Images with VMware vSphere -------------------------- The vCenter driver supports images in the VMDK format. Disks in this format can be obtained from VMware Fusion or from an ESX environment. It is also possible to convert other formats, such as qcow2, to the VMDK format using the ``qemu-img`` utility. After a VMDK disk is available, load it into the Image service. Then, you can use it with the VMware vCenter driver. The following sections provide additional details on the supported disks and the commands used for conversion and upload. Supported image types ~~~~~~~~~~~~~~~~~~~~~ Upload images to the OpenStack Image service in VMDK format. The following VMDK disk types are supported: * ``VMFS Flat Disks`` (includes thin, thick, zeroedthick, and eagerzeroedthick). Note that once a VMFS thin disk is exported from VMFS to a non-VMFS location, like the OpenStack Image service, it becomes a preallocated flat disk. This impacts the transfer time from the Image service to the data store when the full preallocated flat disk, rather than the thin disk, must be transferred. * ``Monolithic Sparse disks``. Sparse disks get imported from the Image service into ESXi as thin provisioned disks. Monolithic Sparse disks can be obtained from VMware Fusion or can be created by converting from other virtual disk formats using the ``qemu-img`` utility. * ``Stream-optimized disks``. Stream-optimized disks are compressed sparse disks. They can be obtained from VMware vCenter/ESXi when exporting vm to ovf/ova template. The following table shows the ``vmware_disktype`` property that applies to each of the supported VMDK disk types: .. list-table:: OpenStack Image service disk type settings :header-rows: 1 * - vmware_disktype property - VMDK disk type * - sparse - Monolithic Sparse * - thin - VMFS flat, thin provisioned * - preallocated (default) - VMFS flat, thick/zeroedthick/eagerzeroedthick * - streamOptimized - Compressed Sparse The ``vmware_disktype`` property is set when an image is loaded into the Image service. For example, the following command creates a Monolithic Sparse image by setting ``vmware_disktype`` to ``sparse``: .. code-block:: console $ openstack image create \ --disk-format vmdk \ --container-format bare \ --property vmware_disktype="sparse" \ --property vmware_ostype="ubuntu64Guest" \ ubuntu-sparse < ubuntuLTS-sparse.vmdk .. note:: Specifying ``thin`` does not provide any advantage over ``preallocated`` with the current version of the driver. Future versions might restore the thin properties of the disk after it is downloaded to a vSphere data store. The following table shows the ``vmware_ostype`` property that applies to each of the supported guest OS: .. note:: If a glance image has a ``vmware_ostype`` property which does not correspond to a valid VMware guestId, VM creation will fail, and a warning will be logged. .. list-table:: OpenStack Image service OS type settings :header-rows: 1 * - vmware_ostype property - Retail Name * - asianux3_64Guest - Asianux Server 3 (64 bit) * - asianux3Guest - Asianux Server 3 * - asianux4_64Guest - Asianux Server 4 (64 bit) * - asianux4Guest - Asianux Server 4 * - darwin64Guest - Darwin 64 bit * - darwinGuest - Darwin * - debian4_64Guest - Debian GNU/Linux 4 (64 bit) * - debian4Guest - Debian GNU/Linux 4 * - debian5_64Guest - Debian GNU/Linux 5 (64 bit) * - debian5Guest - Debian GNU/Linux 5 * - dosGuest - MS-DOS * - freebsd64Guest - FreeBSD x64 * - freebsdGuest - FreeBSD * - mandrivaGuest - Mandriva Linux * - netware4Guest - Novell NetWare 4 * - netware5Guest - Novell NetWare 5.1 * - netware6Guest - Novell NetWare 6.x * - nld9Guest - Novell Linux Desktop 9 * - oesGuest - Open Enterprise Server * - openServer5Guest - SCO OpenServer 5 * - openServer6Guest - SCO OpenServer 6 * - opensuse64Guest - openSUSE (64 bit) * - opensuseGuest - openSUSE * - os2Guest - OS/2 * - other24xLinux64Guest - Linux 2.4x Kernel (64 bit) (experimental) * - other24xLinuxGuest - Linux 2.4x Kernel * - other26xLinux64Guest - Linux 2.6x Kernel (64 bit) (experimental) * - other26xLinuxGuest - Linux 2.6x Kernel (experimental) * - otherGuest - Other Operating System * - otherGuest64 - Other Operating System (64 bit) (experimental) * - otherLinux64Guest - Linux (64 bit) (experimental) * - otherLinuxGuest - Other Linux * - redhatGuest - Red Hat Linux 2.1 * - rhel2Guest - Red Hat Enterprise Linux 2 * - rhel3_64Guest - Red Hat Enterprise Linux 3 (64 bit) * - rhel3Guest - Red Hat Enterprise Linux 3 * - rhel4_64Guest - Red Hat Enterprise Linux 4 (64 bit) * - rhel4Guest - Red Hat Enterprise Linux 4 * - rhel5_64Guest - Red Hat Enterprise Linux 5 (64 bit) (experimental) * - rhel5Guest - Red Hat Enterprise Linux 5 * - rhel6_64Guest - Red Hat Enterprise Linux 6 (64 bit) * - rhel6Guest - Red Hat Enterprise Linux 6 * - sjdsGuest - Sun Java Desktop System * - sles10_64Guest - SUSE Linux Enterprise Server 10 (64 bit) (experimental) * - sles10Guest - SUSE Linux Enterprise Server 10 * - sles11_64Guest - SUSE Linux Enterprise Server 11 (64 bit) * - sles11Guest - SUSE Linux Enterprise Server 11 * - sles64Guest - SUSE Linux Enterprise Server 9 (64 bit) * - slesGuest - SUSE Linux Enterprise Server 9 * - solaris10_64Guest - Solaris 10 (64 bit) (experimental) * - solaris10Guest - Solaris 10 (32 bit) (experimental) * - solaris6Guest - Solaris 6 * - solaris7Guest - Solaris 7 * - solaris8Guest - Solaris 8 * - solaris9Guest - Solaris 9 * - suse64Guest - SUSE Linux (64 bit) * - suseGuest - SUSE Linux * - turboLinux64Guest - Turbolinux (64 bit) * - turboLinuxGuest - Turbolinux * - ubuntu64Guest - Ubuntu Linux (64 bit) * - ubuntuGuest - Ubuntu Linux * - unixWare7Guest - SCO UnixWare 7 * - win2000AdvServGuest - Windows 2000 Advanced Server * - win2000ProGuest - Windows 2000 Professional * - win2000ServGuest - Windows 2000 Server * - win31Guest - Windows 3.1 * - win95Guest - Windows 95 * - win98Guest - Windows 98 * - windows7_64Guest - Windows 7 (64 bit) * - windows7Guest - Windows 7 * - windows7Server64Guest - Windows Server 2008 R2 (64 bit) * - winLonghorn64Guest - Windows Longhorn (64 bit) (experimental) * - winLonghornGuest - Windows Longhorn (experimental) * - winMeGuest - Windows Millennium Edition * - winNetBusinessGuest - Windows Small Business Server 2003 * - winNetDatacenter64Guest - Windows Server 2003, Datacenter Edition (64 bit) (experimental) * - winNetDatacenterGuest - Windows Server 2003, Datacenter Edition * - winNetEnterprise64Guest - Windows Server 2003, Enterprise Edition (64 bit) * - winNetEnterpriseGuest - Windows Server 2003, Enterprise Edition * - winNetStandard64Guest - Windows Server 2003, Standard Edition (64 bit) * - winNetEnterpriseGuest - Windows Server 2003, Enterprise Edition * - winNetStandard64Guest - Windows Server 2003, Standard Edition (64 bit) * - winNetStandardGuest - Windows Server 2003, Standard Edition * - winNetWebGuest - Windows Server 2003, Web Edition * - winNTGuest - Windows NT 4 * - winVista64Guest - Windows Vista (64 bit) * - winVistaGuest - Windows Vista * - winXPHomeGuest - Windows XP Home Edition * - winXPPro64Guest - Windows XP Professional Edition (64 bit) * - winXPProGuest - Windows XP Professional Convert and load images ~~~~~~~~~~~~~~~~~~~~~~~ Using the ``qemu-img`` utility, disk images in several formats (such as, qcow2) can be converted to the VMDK format. For example, the following command can be used to convert a `qcow2 Ubuntu Trusty cloud image `_: .. code-block:: console $ qemu-img convert -f qcow2 ~/Downloads/trusty-server-cloudimg-amd64-disk1.img \ -O vmdk trusty-server-cloudimg-amd64-disk1.vmdk VMDK disks converted through ``qemu-img`` are ``always`` monolithic sparse VMDK disks with an IDE adapter type. Using the previous example of the Ubuntu Trusty image after the ``qemu-img`` conversion, the command to upload the VMDK disk should be something like: .. code-block:: console $ openstack image create \ --container-format bare --disk-format vmdk \ --property vmware_disktype="sparse" \ --property vmware_adaptertype="ide" \ trusty-cloud < trusty-server-cloudimg-amd64-disk1.vmdk Note that the ``vmware_disktype`` is set to ``sparse`` and the ``vmware_adaptertype`` is set to ``ide`` in the previous command. If the image did not come from the ``qemu-img`` utility, the ``vmware_disktype`` and ``vmware_adaptertype`` might be different. To determine the image adapter type from an image file, use the following command and look for the ``ddb.adapterType=`` line: .. code-block:: console $ head -20 Assuming a preallocated disk type and an iSCSI lsiLogic adapter type, the following command uploads the VMDK disk: .. code-block:: console $ openstack image create \ --disk-format vmdk \ --container-format bare \ --property vmware_adaptertype="lsiLogic" \ --property vmware_disktype="preallocated" \ --property vmware_ostype="ubuntu64Guest" \ ubuntu-thick-scsi < ubuntuLTS-flat.vmdk Currently, OS boot VMDK disks with an IDE adapter type cannot be attached to a virtual SCSI controller and likewise disks with one of the SCSI adapter types (such as, busLogic, lsiLogic, lsiLogicsas, paraVirtual) cannot be attached to the IDE controller. Therefore, as the previous examples show, it is important to set the ``vmware_adaptertype`` property correctly. The default adapter type is lsiLogic, which is SCSI, so you can omit the ``vmware_adaptertype`` property if you are certain that the image adapter type is lsiLogic. Tag VMware images ~~~~~~~~~~~~~~~~~ In a mixed hypervisor environment, OpenStack Compute uses the ``hypervisor_type`` tag to match images to the correct hypervisor type. For VMware images, set the hypervisor type to ``vmware``. Other valid hypervisor types include: ``ironic``, ``lxc``, and ``qemu``. Note that ``qemu`` is used for both QEMU and KVM hypervisor types. .. code-block:: console $ openstack image create \ --disk-format vmdk \ --container-format bare \ --property vmware_adaptertype="lsiLogic" \ --property vmware_disktype="preallocated" \ --property hypervisor_type="vmware" \ --property vmware_ostype="ubuntu64Guest" \ ubuntu-thick-scsi < ubuntuLTS-flat.vmdk Optimize images ~~~~~~~~~~~~~~~ Monolithic Sparse disks are considerably faster to download but have the overhead of an additional conversion step. When imported into ESX, sparse disks get converted to VMFS flat thin provisioned disks. The download and conversion steps only affect the first launched instance that uses the sparse disk image. The converted disk image is cached, so subsequent instances that use this disk image can simply use the cached version. To avoid the conversion step (at the cost of longer download times) consider converting sparse disks to thin provisioned or preallocated disks before loading them into the Image service. Use one of the following tools to pre-convert sparse disks. vSphere CLI tools Sometimes called the remote CLI or rCLI. Assuming that the sparse disk is made available on a data store accessible by an ESX host, the following command converts it to preallocated format: .. code-block:: console vmkfstools --server=ip_of_some_ESX_host -i \ /vmfs/volumes/datastore1/sparse.vmdk \ /vmfs/volumes/datastore1/converted.vmdk Note that the vifs tool from the same CLI package can be used to upload the disk to be converted. The vifs tool can also be used to download the converted disk if necessary. ``vmkfstools`` directly on the ESX host If the SSH service is enabled on an ESX host, the sparse disk can be uploaded to the ESX data store through scp and the vmkfstools local to the ESX host can use used to perform the conversion. After you log in to the host through ssh, run this command: .. code-block:: console vmkfstools -i /vmfs/volumes/datastore1/sparse.vmdk /vmfs/volumes/datastore1/converted.vmdk ``vmware-vdiskmanager`` ``vmware-vdiskmanager`` is a utility that comes bundled with VMware Fusion and VMware Workstation. The following example converts a sparse disk to preallocated format: .. code-block:: console '/Applications/VMware Fusion.app/Contents/Library/vmware-vdiskmanager' -r sparse.vmdk -t 4 converted.vmdk In the previous cases, the converted vmdk is actually a pair of files: * The descriptor file ``converted.vmdk``. * The actual virtual disk data file ``converted-flat.vmdk``. The file to be uploaded to the Image service is ``converted-flat.vmdk``. Image handling ~~~~~~~~~~~~~~ The ESX hypervisor requires a copy of the VMDK file in order to boot up a virtual machine. As a result, the vCenter OpenStack Compute driver must download the VMDK via HTTP from the Image service to a data store that is visible to the hypervisor. To optimize this process, the first time a VMDK file is used, it gets cached in the data store. A cached image is stored in a folder named after the image ID. Subsequent virtual machines that need the VMDK use the cached version and don't have to copy the file again from the Image service. Even with a cached VMDK, there is still a copy operation from the cache location to the hypervisor file directory in the shared data store. To avoid this copy, boot the image in linked_clone mode. To learn how to enable this mode, see :oslo.config:option:`vmware.use_linked_clone`. .. note:: You can also use the ``img_linked_clone`` property (or legacy property ``vmware_linked_clone``) in the Image service to override the linked_clone mode on a per-image basis. If spawning a virtual machine image from ISO with a VMDK disk, the image is created and attached to the virtual machine as a blank disk. In that case ``img_linked_clone`` property for the image is just ignored. If multiple compute nodes are running on the same host, or have a shared file system, you can enable them to use the same cache folder on the back-end data store. To configure this action, set the ``cache_prefix`` option in the ``nova.conf`` file. Its value stands for the name prefix of the folder where cached images are stored. .. note:: This can take effect only if compute nodes are running on the same host, or have a shared file system. You can automatically purge unused images after a specified period of time. To configure this action, set these options in the :oslo.config:group:`image_cache` section in the ``nova.conf`` file: * :oslo.config:option:`image_cache.remove_unused_base_images` * :oslo.config:option:`image_cache.remove_unused_original_minimum_age_seconds` .. _vmware-networking: Networking with VMware vSphere ------------------------------ The VMware driver supports networking with the Networking Service (neutron). Depending on your installation, complete these configuration steps before you provision VMs: #. Before provisioning VMs, create a port group with the same name as the ``vmware.integration_bridge`` value in ``nova.conf`` (default is ``br-int``). All VM NICs are attached to this port group for management by the OpenStack Networking plug-in. Volumes with VMware vSphere --------------------------- The VMware driver supports attaching volumes from the Block Storage service. The VMware VMDK driver for OpenStack Block Storage is recommended and should be used for managing volumes based on vSphere data stores. For more information about the VMware VMDK driver, see Cinder's manual on the VMDK Driver (TODO: this has not yet been imported and published). Also an iSCSI volume driver provides limited support and can be used only for attachments. Troubleshooting --------------- Operators can troubleshoot VMware specific failures by correlating OpenStack logs to vCenter logs. Every RPC call which is made by an OpenStack driver has an ``opID`` which can be traced in the vCenter logs. For example consider the following excerpt from a ``nova-compute`` log: .. code-block:: console Aug 15 07:31:09 localhost nova-compute[16683]: DEBUG oslo_vmware.service [-] Invoking Folder.CreateVM_Task with opID=oslo.vmware-debb6064-690e-45ac-b0ae-1b94a9638d1f {{(pid=16683) request_handler /opt/stack/oslo.vmware/oslo_vmware/service.py:355}} In this case the ``opID`` is ``oslo.vmware-debb6064-690e-45ac-b0ae-1b94a9638d1f`` and we can grep the vCenter log (usually ``/var/log/vmware/vpxd/vpxd.log``) for it to find if anything went wrong with the ``CreateVM`` operation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/configuration/hypervisor-zvm.rst0000664000175000017500000001414600000000000024352 0ustar00zuulzuul00000000000000=== zVM === z/VM System Requirements ------------------------ * The appropriate APARs installed, the current list of which can be found: z/VM OpenStack Cloud Information (http://www.vm.ibm.com/sysman/osmntlvl.html). .. note:: IBM z Systems hardware requirements are based on both the applications and the load on the system. Active Engine Guide ------------------- Active engine is used as an initial configuration and management tool during deployed machine startup. Currently the z/VM driver uses ``zvmguestconfigure`` and ``cloud-init`` as a two stage active engine. Installation and Configuration of zvmguestconfigure ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Cloudlib4zvm supports initiating changes to a Linux on z Systems virtual machine while Linux is shut down or the virtual machine is logged off. The changes to Linux are implemented using an activation engine (AE) that is run when Linux is booted the next time. The first active engine, ``zvmguestconfigure``, must be installed in the Linux on z Systems virtual server so it can process change request files transmitted by the cloudlib4zvm service to the reader of the virtual machine as a class X file. .. note:: An additional activation engine, cloud-init, should be installed to handle OpenStack related tailoring of the system. The cloud-init AE relies on tailoring performed by ``zvmguestconfigure``. Installation and Configuration of cloud-init ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ OpenStack uses cloud-init as its activation engine. Some Linux distributions include cloud-init either already installed or available to be installed. If your distribution does not include cloud-init, you can download the code from https://launchpad.net/cloud-init/+download. After installation, if you issue the following shell command and no errors occur, cloud-init is installed correctly:: cloud-init init --local Installation and configuration of cloud-init differs among different Linux distributions, and cloud-init source code may change. This section provides general information, but you may have to tailor cloud-init to meet the needs of your Linux distribution. You can find a community-maintained list of dependencies at http://ibm.biz/cloudinitLoZ. As of the Rocky release, the z/VM OpenStack support has been tested with cloud-init 0.7.4 and 0.7.5 for RHEL6.x and SLES11.x, 0.7.6 for RHEL7.x and SLES12.x, and 0.7.8 for Ubuntu 16.04. During cloud-init installation, some dependency packages may be required. You can use zypper and python setuptools to easily resolve these dependencies. See https://pypi.python.org/pypi/setuptools for more information. Image guide ----------- This guideline will describe the requirements and steps to create and configure images for use with z/VM. Image Requirements ~~~~~~~~~~~~~~~~~~ * The following Linux distributions are supported for deploy: * RHEL 6.2, 6.3, 6.4, 6.5, 6.6, and 6.7 * RHEL 7.0, 7.1 and 7.2 * SLES 11.2, 11.3, and 11.4 * SLES 12 and SLES 12.1 * Ubuntu 16.04 * A supported root disk type for snapshot/spawn. The following are supported: * FBA * ECKD * An image deployed on a compute node must match the disk type supported by that compute node, as configured by the ``zvm_diskpool_type`` property in the `zvmsdk.conf`_ configuration file in `zvm cloud connector`_ A compute node supports deployment on either an ECKD or FBA image, but not both at the same time. If you wish to switch image types, you need to change the ``zvm_diskpool_type`` and ``zvm_diskpool`` properties in the `zvmsdk.conf`_ file, accordingly. Then restart the nova-compute service to make the changes take effect. * If you deploy an instance with an ephemeral disk, both the root disk and the ephemeral disk will be created with the disk type that was specified by ``zvm_diskpool_type`` property in the `zvmsdk.conf`_ file. That property can specify either ECKD or FBA. * The network interfaces must be IPv4 interfaces. * Image names should be restricted to the UTF-8 subset, which corresponds to the ASCII character set. In addition, special characters such as ``/``, ``\``, ``$``, ``%``, ``@`` should not be used. For the FBA disk type "vm", capture and deploy is supported only for an FBA disk with a single partition. Capture and deploy is not supported for the FBA disk type "vm" on a CMS formatted FBA disk. * The virtual server/Linux instance used as the source of the new image should meet the following criteria: 1. The root filesystem must not be on a logical volume. 2. The minidisk on which the root filesystem resides should be a minidisk of the same type as desired for a subsequent deploy (for example, an ECKD disk image should be captured for a subsequent deploy to an ECKD disk). 3. The minidisks should not be a full-pack minidisk, since cylinder 0 on full-pack minidisks is reserved, and should be defined with virtual address 0100. 4. The root disk should have a single partition. 5. The image being captured should not have any network interface cards (NICs) defined below virtual address 1100. In addition to the specified criteria, the following recommendations allow for efficient use of the image: * The minidisk on which the root filesystem resides should be defined as a multiple of full gigabytes in size (for example, 1GB or 2GB). OpenStack specifies disk sizes in full gigabyte values, whereas z/VM handles disk sizes in other ways (cylinders for ECKD disks, blocks for FBA disks, and so on). See the appropriate online information if you need to convert cylinders or blocks to gigabytes; for example: http://www.mvsforums.com/helpboards/viewtopic.php?t=8316. * During subsequent deploys of the image, the OpenStack code will ensure that a disk image is not copied to a disk smaller than the source disk, as this would result in loss of data. The disk specified in the flavor should therefore be equal to or slightly larger than the source virtual machine's root disk. .. _zvmsdk.conf: https://cloudlib4zvm.readthedocs.io/en/latest/configuration.html#configuration-options .. _zvm cloud connector: https://cloudlib4zvm.readthedocs.io/en/latest/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/configuration/hypervisors.rst0000664000175000017500000000510600000000000023717 0ustar00zuulzuul00000000000000=========== Hypervisors =========== .. toctree:: :maxdepth: 1 hypervisor-kvm hypervisor-qemu hypervisor-lxc hypervisor-vmware hypervisor-virtuozzo hypervisor-zvm hypervisor-ironic OpenStack Compute supports many hypervisors, which might make it difficult for you to choose one. Most installations use only one hypervisor. However, you can use :ref:`ComputeFilter` and :ref:`ImagePropertiesFilter` to schedule different hypervisors within the same installation. The following links help you choose a hypervisor. See :doc:`/user/support-matrix` for a detailed list of features and support across the hypervisors. The following hypervisors are supported: * `KVM`_ - Kernel-based Virtual Machine. The virtual disk formats that it supports is inherited from QEMU since it uses a modified QEMU program to launch the virtual machine. The supported formats include raw images, the qcow2, and VMware formats. * `LXC`_ - Linux Containers (through libvirt), used to run Linux-based virtual machines. * `QEMU`_ - Quick EMUlator, generally only used for development purposes. * `VMware vSphere`_ 5.1.0 and newer - Runs VMware-based Linux and Windows images through a connection with a vCenter server. * `Virtuozzo`_ 7.0.0 and newer - OS Containers and Kernel-based Virtual Machines supported. The supported formats include ploop and qcow2 images. * `zVM`_ - Server virtualization on z Systems and IBM LinuxONE, it can run Linux, z/OS and more. * `Ironic`_ - OpenStack project which provisions bare metal (as opposed to virtual) machines. Nova supports hypervisors via virt drivers. Nova has the following in tree virt drivers: * :oslo.config:option:`compute_driver` = ``libvirt.LibvirtDriver`` This driver runs on Linux and supports multiple hypervisor backends, which can be configured via the :oslo.config:option:`libvirt.virt_type` config option. * :oslo.config:option:`compute_driver` = ``ironic.IronicDriver`` * :oslo.config:option:`compute_driver` = ``vmwareapi.VMwareVCDriver`` * :oslo.config:option:`compute_driver` = ``zvm.ZVMDriver`` * :oslo.config:option:`compute_driver` = ``fake.FakeDriver`` This driver does not spawn any virtual machines and therefore should only be used during testing. .. _KVM: https://www.linux-kvm.org/page/Main_Page .. _LXC: https://linuxcontainers.org .. _QEMU: https://wiki.qemu.org/Manual .. _VMware vSphere: https://www.vmware.com/support/vsphere-hypervisor.html .. _Virtuozzo: https://www.virtuozzo.com/products/vz7.html .. _zVM: https://www.ibm.com/it-infrastructure/z/zvm .. _Ironic: https://docs.openstack.org/ironic/latest/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/configuration/index.rst0000664000175000017500000000152700000000000022434 0ustar00zuulzuul00000000000000============= Configuration ============= To configure your Compute installation, you must define configuration options in these files: * ``nova.conf`` contains most of the Compute configuration options and resides in the ``/etc/nova`` directory. * ``api-paste.ini`` defines Compute limits and resides in the ``/etc/nova`` directory. * Configuration files for related services, such as the Image and Identity services. A list of config options based on different topics can be found below: .. toctree:: :maxdepth: 1 /admin/configuration/service-user-token /admin/configuration/api /admin/configuration/resize /admin/configuration/cross-cell-resize /admin/configuration/fibre-channel /admin/configuration/iscsi-offload /admin/configuration/hypervisors /admin/configuration/logs /admin/configuration/samples/index ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/configuration/iscsi-offload.rst0000664000175000017500000000563600000000000024054 0ustar00zuulzuul00000000000000=============================================== Configuring iSCSI interface and offload support =============================================== Compute supports open-iscsi iSCSI interfaces for offload cards. Offload hardware must be present and configured on every compute node where offload is desired. Once an open-iscsi interface is configured, the iface name (``iface.iscsi_ifacename``) should be passed to libvirt via the ``iscsi_iface`` parameter for use. All iSCSI sessions will be bound to this iSCSI interface. Currently supported transports (``iface.transport_name``) are ``be2iscsi``, ``bnx2i``, ``cxgb3i``, ``cxgb4i``, ``qla4xxx``, ``ocs``, ``tcp``. Configuration changes are required on the compute node only. iSER is supported using the separate iSER LibvirtISERVolumeDriver and will be rejected if used via the ``iscsi_iface`` parameter. iSCSI iface configuration ~~~~~~~~~~~~~~~~~~~~~~~~~ * Note the distinction between the transport name (``iface.transport_name``) and iface name (``iface.iscsi_ifacename``). The actual iface name must be specified via the iscsi_iface parameter to libvirt for offload to work. * The default name for an iSCSI iface (open-iscsi parameter ``iface.iscsi_ifacename``) is in the format transport_name.hwaddress when generated by ``iscsiadm``. * ``iscsiadm`` can be used to view and generate current iface configuration. Every network interface that supports an open-iscsi transport can have one or more iscsi ifaces associated with it. If no ifaces have been configured for a network interface supported by an open-iscsi transport, this command will create a default iface configuration for that network interface. For example : .. code-block:: console # iscsiadm -m iface default tcp,,,, iser iser,,,, bnx2i.00:05:b5:d2:a0:c2 bnx2i,00:05:b5:d2:a0:c2,5.10.10.20,, The output is in the format:: iface_name transport_name,hwaddress,ipaddress,net_ifacename,initiatorname * Individual iface configuration can be viewed via .. code-block:: console # iscsiadm -m iface -I IFACE_NAME # BEGIN RECORD 2.0-873 iface.iscsi_ifacename = cxgb4i.00:07:43:28:b2:58 iface.net_ifacename = iface.ipaddress = 102.50.50.80 iface.hwaddress = 00:07:43:28:b2:58 iface.transport_name = cxgb4i iface.initiatorname = # END RECORD Configuration can be updated as desired via .. code-block:: console # iscsiadm -m iface-I IFACE_NAME--op=update -n iface.SETTING -v VALUE * All iface configurations need a minimum of ``iface.iface_name``, ``iface.transport_name`` and ``iface.hwaddress`` to be correctly configured to work. Some transports may require ``iface.ipaddress`` and ``iface.net_ifacename`` as well to bind correctly. Detailed configuration instructions can be found at: https://github.com/open-iscsi/open-iscsi/blob/master/README ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/configuration/logs.rst0000664000175000017500000000152500000000000022267 0ustar00zuulzuul00000000000000================= Compute log files ================= The corresponding log file of each Compute service is stored in the ``/var/log/nova/`` directory of the host on which each service runs. .. list-table:: Log files used by Compute services :widths: 35 35 30 :header-rows: 1 * - Log file - Service name (CentOS/Fedora/openSUSE/Red Hat Enterprise Linux/SUSE Linux Enterprise) - Service name (Ubuntu/Debian) * - ``nova-api.log`` - ``openstack-nova-api`` - ``nova-api`` * - ``nova-compute.log`` - ``openstack-nova-compute`` - ``nova-compute`` * - ``nova-conductor.log`` - ``openstack-nova-conductor`` - ``nova-conductor`` * - ``nova-manage.log`` - ``nova-manage`` - ``nova-manage`` * - ``nova-scheduler.log`` - ``openstack-nova-scheduler`` - ``nova-scheduler`` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/configuration/resize.rst0000664000175000017500000000252200000000000022622 0ustar00zuulzuul00000000000000====== Resize ====== Resize (or Server resize) is the ability to change the flavor of a server, thus allowing it to upscale or downscale according to user needs. For this feature to work properly, you might need to configure some underlying virt layers. This document describes how to configure hosts for standard resize. For information on :term:`cross-cell resize `, refer to :doc:`/admin/configuration/cross-cell-resize`. Virt drivers ------------ .. todo:: This section needs to be updated for other virt drivers, shared storage considerations, etc. KVM ~~~ Resize on KVM is implemented currently by transferring the images between compute nodes over ssh. For KVM you need hostnames to resolve properly and passwordless ssh access between your compute hosts. Direct access from one compute host to another is needed to copy the VM file across. Cloud end users can find out how to resize a server by reading :doc:`/user/resize`. Automatic confirm ----------------- There is a periodic task configured by configuration option :oslo.config:option:`resize_confirm_window` (in seconds). If this value is not 0, the ``nova-compute`` service will check whether servers are in a resized state longer than the value of :oslo.config:option:`resize_confirm_window` and if so will automatically confirm the resize of the servers. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.2376077 nova-32.0.0/doc/source/admin/configuration/samples/0000775000175000017500000000000000000000000022232 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/configuration/samples/api-paste.ini.rst0000664000175000017500000000026700000000000025432 0ustar00zuulzuul00000000000000============= api-paste.ini ============= The Compute service stores its API configuration settings in the ``api-paste.ini`` file. .. literalinclude:: /../../etc/nova/api-paste.ini ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/configuration/samples/index.rst0000664000175000017500000000075600000000000024103 0ustar00zuulzuul00000000000000========================================== Compute service sample configuration files ========================================== Files in this section can be found in ``/etc/nova``. .. toctree:: :maxdepth: 2 api-paste.ini rootwrap.conf .. # NOTE(gmann): Keep policy sample file for HTML only. # Sample file are too large and cause TeX memeor issue. # ref bug# https://bugs.launchpad.net/nova/+bug/1883200 .. only:: html .. toctree:: :maxdepth: 2 policy.yaml ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/configuration/samples/policy.yaml.rst0000664000175000017500000000031500000000000025223 0ustar00zuulzuul00000000000000=========== policy.yaml =========== The ``policy.yaml`` file defines additional access controls that apply to the Compute service. .. literalinclude:: /_static/nova.policy.yaml.sample :language: yaml ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/configuration/samples/rootwrap.conf.rst0000664000175000017500000000070600000000000025570 0ustar00zuulzuul00000000000000============= rootwrap.conf ============= The ``rootwrap.conf`` file defines configuration values used by the rootwrap script when the Compute service needs to escalate its privileges to those of the root user. It is also possible to disable the root wrapper, and default to sudo only. Configure the ``disable_rootwrap`` option in the ``[workaround]`` section of the ``nova.conf`` configuration file. .. literalinclude:: /../../etc/nova/rootwrap.conf ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/configuration/service-user-token.rst0000664000175000017500000000407100000000000025054 0ustar00zuulzuul00000000000000.. _service_user_token: =================== Service User Tokens =================== .. warning:: Configuration of service user tokens is **required** for every Nova service for security reasons. See https://bugs.launchpad.net/nova/+bug/2004555 for details. Configure Nova to send service user tokens alongside regular user tokens when making REST API calls to other services. The identity service (Keystone) will authenticate a request using the service user token if the regular user token has expired. This is important when long-running operations such as live migration or snapshot take long enough to exceed the expiry of the user token. Without the service token, if a long-running operation exceeds the expiry of the user token, post operations such as cleanup after a live migration could fail when Nova calls other service APIs like block-storage (Cinder) or networking (Neutron). The service token is also used by services to validate whether the API caller is a service. Some service APIs are restricted to service users only. To set up service tokens, create a ``nova`` service user and ``service`` role in the identity service (Keystone) and assign the ``service`` role to the ``nova`` service user. Then, configure the :oslo.config:group:`service_user` section of the Nova configuration file, for example: .. code-block:: ini [service_user] send_service_user_token = true auth_url = $AUTH_URL auth_type = password project_domain_name = $PROJECT_DOMAIN_NAME project_name = service user_domain_name = $USER_DOMAIN_NAME username = nova password = $SERVICE_USER_PASSWORD ... And configure the other identity options as necessary for the service user, much like you would configure nova to work with the image service (Glance) or networking service (Neutron). .. note:: Please note that the role assigned to the :oslo.config:group:`service_user` needs to be in the configured :oslo.config:option:`keystone_authtoken.service_token_roles` of other services such as block-storage (Cinder), image (Glance), and networking (Neutron). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/configuring-migrations.rst0000664000175000017500000003122500000000000023140 0ustar00zuulzuul00000000000000.. _section_configuring-compute-migrations: ========================= Configure live migrations ========================= Migration enables an administrator to move a virtual machine instance from one compute host to another. A typical scenario is planned maintenance on the source host, but migration can also be useful to redistribute the load when many VM instances are running on a specific physical machine. This document covers live migrations using the :ref:`configuring-migrations-kvm-libvirt` and VMWare hypervisors .. :ref:`_configuring-migrations-kvm-libvirt` .. note:: Not all Compute service hypervisor drivers support live-migration, or support all live-migration features. Similarly not all compute service features are supported. Consult :doc:`/user/support-matrix` to determine which hypervisors support live-migration. See the :doc:`/configuration/index` for details on hypervisor configuration settings. The migration types are: - **Non-live migration**, also known as cold migration or simply migration. The instance is shut down, then moved to another hypervisor and restarted. The instance recognizes that it was rebooted, and the application running on the instance is disrupted. This section does not cover cold migration. - **Live migration** The instance keeps running throughout the migration. This is useful when it is not possible or desirable to stop the application running on the instance. Live migrations can be classified further by the way they treat instance storage: - **Shared storage-based live migration**. The instance has ephemeral disks that are located on storage shared between the source and destination hosts. - **Block live migration**, or simply block migration. The instance has ephemeral disks that are not shared between the source and destination hosts. Block migration is incompatible with read-only devices such as CD-ROMs and Configuration Drive (config\_drive). - **Volume-backed live migration**. Instances use volumes rather than ephemeral disks. Block live migration requires copying disks from the source to the destination host. It takes more time and puts more load on the network. Shared-storage and volume-backed live migration does not copy disks. .. note:: In a multi-cell cloud, instances can be live migrated to a different host in the same cell, but not across cells. Refer to the :ref:`cells v2 documentation `. for more information. The following sections describe how to configure your hosts for live migrations using the libvirt virt driver and KVM hypervisor. .. _configuring-migrations-kvm-libvirt: Libvirt ------- .. _configuring-migrations-kvm-general: General configuration ~~~~~~~~~~~~~~~~~~~~~ To enable any type of live migration, configure the compute hosts according to the instructions below: #. Set the following parameters in ``nova.conf`` on all compute hosts: - ``server_listen=0.0.0.0`` You must not make the VNC server listen to the IP address of its compute host, since that addresses changes when the instance is migrated. .. important:: Since this setting allows VNC clients from any IP address to connect to instance consoles, you must take additional measures like secure networks or firewalls to prevent potential attackers from gaining access to instances. - ``instances_path`` must have the same value for all compute hosts. In this guide, the value ``/var/lib/nova/instances`` is assumed. #. Ensure that name resolution on all compute hosts is identical, so that they can connect each other through their hostnames. If you use ``/etc/hosts`` for name resolution and enable SELinux, ensure that ``/etc/hosts`` has the correct SELinux context: .. code-block:: console # restorecon /etc/hosts #. Enable password-less SSH so that root on one compute host can log on to any other compute host without providing a password. The ``libvirtd`` daemon, which runs as root, uses the SSH protocol to copy the instance to the destination and can't know the passwords of all compute hosts. You may, for example, compile root's public SSH keys on all compute hosts into an ``authorized_keys`` file and deploy that file to the compute hosts. #. Configure the firewalls to allow libvirt to communicate between compute hosts. By default, libvirt uses the TCP port range from 49152 to 49261 for copying memory and disk contents. Compute hosts must accept connections in this range. For information about ports used by libvirt, see the `libvirt documentation `_. .. important:: Be mindful of the security risks introduced by opening ports. .. _`configuring-migrations-securing-live-migration-streams`: Securing live migration streams ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If your compute nodes have at least libvirt 4.4.0 and QEMU 2.11.0, it is strongly recommended to secure all your live migration streams by taking advantage of the "QEMU-native TLS" feature. This requires a pre-existing PKI (Public Key Infrastructure) setup. For further details on how to set this all up, refer to the :doc:`secure-live-migration-with-qemu-native-tls` document. .. _configuring-migrations-kvm-block-and-volume-migration: Block migration, volume-based live migration ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If your environment satisfies the requirements for "QEMU-native TLS", then block migration requires some setup; refer to the above section, `Securing live migration streams`_, for details. Otherwise, no additional configuration is required for block migration and volume-backed live migration. Be aware that block migration adds load to the network and storage subsystems. .. _configuring-migrations-kvm-shared-storage: Shared storage ~~~~~~~~~~~~~~ Compute hosts have many options for sharing storage, for example NFS, shared disk array LUNs, Ceph or GlusterFS. The next steps show how a regular Linux system might be configured as an NFS v4 server for live migration. For detailed information and alternative ways to configure NFS on Linux, see instructions for `Ubuntu`_, `RHEL and derivatives`_ or `SLES and OpenSUSE`_. .. _`Ubuntu`: https://help.ubuntu.com/community/SettingUpNFSHowTo .. _`RHEL and derivatives`: https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/7/html/Storage_Administration_Guide/nfs-serverconfig.html .. _`SLES and OpenSUSE`: https://www.suse.com/documentation/sles-12/book_sle_admin/data/sec_nfs_configuring-nfs-server.html #. Ensure that UID and GID of the nova user are identical on the compute hosts and the NFS server. #. Create a directory with enough disk space for all instances in the cloud, owned by user nova. In this guide, we assume ``/var/lib/nova/instances``. #. Set the execute/search bit on the ``instances`` directory: .. code-block:: console $ chmod o+x /var/lib/nova/instances This allows qemu to access the ``instances`` directory tree. #. Export ``/var/lib/nova/instances`` to the compute hosts. For example, add the following line to ``/etc/exports``: .. code-block:: ini /var/lib/nova/instances *(rw,sync,fsid=0,no_root_squash) The asterisk permits access to any NFS client. The option ``fsid=0`` exports the instances directory as the NFS root. After setting up the NFS server, mount the remote filesystem on all compute hosts. #. Assuming the NFS server's hostname is ``nfs-server``, add this line to ``/etc/fstab`` to mount the NFS root: .. code-block:: console nfs-server:/ /var/lib/nova/instances nfs4 defaults 0 0 #. Test NFS by mounting the instances directory and check access permissions for the nova user: .. code-block:: console $ sudo mount -a -v $ ls -ld /var/lib/nova/instances/ drwxr-xr-x. 2 nova nova 6 Mar 14 21:30 /var/lib/nova/instances/ .. _configuring-migrations-kvm-advanced: Advanced configuration for KVM and QEMU ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Live migration copies the instance's memory from the source to the destination compute host. After a memory page has been copied, the instance may write to it again, so that it has to be copied again. Instances that frequently write to different memory pages can overwhelm the memory copy process and prevent the live migration from completing. This section covers configuration settings that can help live migration of memory-intensive instances succeed. #. **Live migration completion timeout** The Compute service will either abort or force complete a migration when it has been running too long. This behavior is configurable using the :oslo.config:option:`libvirt.live_migration_timeout_action` config option. The timeout is calculated based on the instance size, which is the instance's memory size in GiB. In the case of block migration, the size of ephemeral storage in GiB is added. The timeout in seconds is the instance size multiplied by the configurable parameter :oslo.config:option:`libvirt.live_migration_completion_timeout`, whose default is 800. For example, shared-storage live migration of an instance with 8GiB memory will time out after 6400 seconds. #. **Instance downtime** Near the end of the memory copy, the instance is paused for a short time so that the remaining few pages can be copied without interference from instance memory writes. The Compute service initializes this time to a small value that depends on the instance size, typically around 50 milliseconds. When it notices that the memory copy does not make sufficient progress, it increases the time gradually. You can influence the instance downtime algorithm with the help of three configuration variables on the compute hosts: .. code-block:: ini live_migration_downtime = 500 live_migration_downtime_steps = 10 live_migration_downtime_delay = 75 ``live_migration_downtime`` sets the target maximum period of time Nova will try to keep the instance paused during the last part of the memory copy, in *milliseconds*. This value may be exceeded if there is any reduction on the transfer rate after the VM is paused. The default is 500. ``live_migration_downtime_steps`` sets the total number of adjustment steps until ``live_migration_downtime`` is reached. The default is 10 steps. ``live_migration_downtime_delay`` sets the time interval between two adjustment steps in *seconds*. The default is 75. #. **Auto-convergence** One strategy for a successful live migration of a memory-intensive instance is slowing the instance down. This is called auto-convergence. Both libvirt and QEMU implement this feature by automatically throttling the instance's CPU when memory copy delays are detected. Auto-convergence is disabled by default. You can enable it by setting ``live_migration_permit_auto_converge=true``. .. caution:: Before enabling auto-convergence, make sure that the instance's application tolerates a slow-down. Be aware that auto-convergence does not guarantee live migration success. #. **Post-copy** Live migration of a memory-intensive instance is certain to succeed when you enable post-copy. This feature, implemented by libvirt and QEMU, activates the virtual machine on the destination host before all of its memory has been copied. When the virtual machine accesses a page that is missing on the destination host, the resulting page fault is resolved by copying the page from the source host. Post-copy is disabled by default. You can enable it by setting ``live_migration_permit_post_copy=true``. When you enable both auto-convergence and post-copy, auto-convergence remains disabled. .. caution:: The page faults introduced by post-copy can slow the instance down. When the network connection between source and destination host is interrupted, page faults cannot be resolved anymore and the instance is rebooted. .. important:: You may need to enable ``unprivileged_userfaultfd`` on newer kernels in order for post-copy to work. .. code-block:: console sysctl vm.unprivileged_userfaultfd=1 .. TODO Bernd: I *believe* that it is certain to succeed, .. but perhaps I am missing something. The full list of live migration configuration parameters is documented in the :doc:`Nova Configuration Options ` VMware ------ .. :ref:`_configuring-migrations-vmware` .. _configuring-migrations-vmware: vSphere configuration ~~~~~~~~~~~~~~~~~~~~~ Enable vMotion on all ESX hosts which are managed by Nova by following the instructions in `this `_ KB article. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/cpu-models.rst0000664000175000017500000004073200000000000020527 0ustar00zuulzuul00000000000000========== CPU models ========== Nova allows you to configure features of the virtual CPU that are exposed to instances. The combined set of CPU features is collectively referred to as the *CPU model*. Use cases include: * To maximize performance of instances by exposing new host CPU features to the guest * To ensure a consistent default behavior across all machines, removing reliance on system defaults. To configure the virtual CPU, you can configure a :ref:`CPU mode `, configure one or more :ref:`named CPU models `, and explicitly request :ref:`cpu-feature-flags`. The `Effective Virtual CPU configuration in Nova`__ presentation from the 2018 Berlin Summit provides a good overview of this topic. .. note:: It is also possible to configure the topology of the CPU. This is discussed in :doc:`cpu-topologies`. .. important:: The functionality described below is currently only supported by the libvirt driver. .. __: https://www.openstack.org/videos/summits/berlin-2018/effective-virtual-cpu-configuration-in-nova .. _cpu-modes: CPU modes --------- The first step in configuring the guest CPU is configuring the CPU *mode*. The CPU mode determines whether the CPU model is configured manually based on admin configuration or is automatically configured based on the host CPU. The CPU mode is configured using the :oslo.config:option:`libvirt.cpu_mode` config option. This option can accepts one of the following values: ``none``, ``host-passthrough``, ``host-model``, and ``custom``. Host model ~~~~~~~~~~ If :oslo.config:option:`cpu_mode=host-model `, libvirt requests the :ref:`named CPU model ` that most closely matches the host and requests additional CPU flags to complete the match. This CPU model has a number of advantages: * It provides almost all of the host CPU features to the guest, thus providing close to the maximum functionality and performance possible. * It auto-adds critical guest CPU flags for mitigation from certain security flaws, *provided* the CPU microcode, kernel, QEMU, and libvirt are all updated. * It computes live migration compatibility, with the caveat that live migration in both directions is not always possible. In general, using ``host-model`` is a safe choice if your compute node CPUs are largely identical. However, if your compute nodes span multiple processor generations, you may be better advised to select a ``custom`` CPU model. The ``host-model`` CPU mode is the effective default for the KVM & QEMU hypervisors (:oslo.config:option:`libvirt.virt_type`\ =\ ``kvm``/``qemu``) on x86-64 hosts. This default is provided by libvirt itself. .. note:: As noted above, live migration is not always possible in both directions when using ``host-model``. During live migration, the source CPU model definition is transferred to the destination host as-is. This results in the migrated guest on the destination seeing exactly the same CPU model as on source even if the destination compute host is capable of providing more CPU features. However, shutting down and restarting the guest may result in a different hardware configuration for the guest, as per the new capabilities of the destination compute. Host passthrough ~~~~~~~~~~~~~~~~ If :oslo.config:option:`cpu_mode=host-passthrough `, libvirt tells KVM to pass through the host CPU with no modifications. In comparison to ``host-model`` which simply matches feature flags, ``host-passthrough`` ensures every last detail of the host CPU is matched. This gives the best performance, and can be important to some apps which check low level CPU details, but it comes at a cost with respect to migration. In ``host-passthrough`` mode, the guest can only be live-migrated to a target host that matches the source host extremely closely. This includes the physical CPU model and running microcode, and may even include the running kernel. Use this mode only if your compute nodes have a very large degree of homogeneity (i.e. substantially all of your compute nodes use the exact same CPU generation and model), and you make sure to only live-migrate between hosts with exactly matching kernel versions. Failure to do so will result in an inability to support any form of live migration. .. note:: The reason for that it is necessary for the CPU microcode versions to match is that hardware performance counters are exposed to an instance and it is likely that they may vary between different CPU models. There may also be other reasons due to security fixes for some hardware security flaws being included in CPU microcode. Custom ~~~~~~ If :oslo.config:option:`cpu_mode=custom `, you can explicitly specify an ordered list of one or more supported named CPU models using the :oslo.config:option:`libvirt.cpu_models` configuration option. This accepts any named CPU model that is valid for the given host, as discussed in :ref:`cpu-models` below. When more than one CPU model is provided, it is expected that the list will be ordered so that the more common and less advanced CPU models are listed first. In selecting the ``custom`` mode, along with a named CPU model that matches the oldest of your compute node CPUs, you can ensure that live migration between compute nodes will always be possible. However, you should ensure that the CPU model you select passes the correct CPU feature flags to the guest. If you need to further tweak your CPU feature flags in the ``custom`` mode, see :ref:`cpu-feature-flags`. .. note:: If :oslo.config:option:`libvirt.cpu_models` is configured, the CPU models in the list needs to be compatible with the host CPU. Also, if :oslo.config:option:`libvirt.cpu_model_extra_flags` is configured, all flags needs to be compatible with the host CPU. If incompatible CPU models or flags are specified, nova service will raise an error and fail to start. None ~~~~ If :oslo.config:option:`cpu_mode=none `, libvirt does not specify a CPU model. Instead, the hypervisor chooses the default model. The ``none`` CPU model is the default for all non-KVM/QEMU hypervisors. (:oslo.config:option:`libvirt.virt_type`\ != ``kvm`` / ``qemu``) .. _cpu-models: CPU models ---------- When :oslo.config:option:`libvirt.cpu_mode` is set to ``custom``, it is possible to configure one or more explicit CPU models that should be used. These CPU model names are shorthand for a set of feature flags. The libvirt KVM driver provides a number of standard CPU model names. These models are defined in ``/usr/share/libvirt/cpu_map/*.xml``. You can inspect these files to determine which models are supported by your local installation. For example, consider a host that provides the following (incomplete) set of CPU models: .. code-block:: bash $ ls /usr/share/libvirt/cpu_map/x86_*.xml -1 ... /usr/share/libvirt/cpu_map/x86_Broadwell-IBRS.xml /usr/share/libvirt/cpu_map/x86_Broadwell-noTSX-IBRS.xml /usr/share/libvirt/cpu_map/x86_Broadwell-noTSX.xml /usr/share/libvirt/cpu_map/x86_Broadwell.xml /usr/share/libvirt/cpu_map/x86_Haswell-IBRS.xml /usr/share/libvirt/cpu_map/x86_Haswell-noTSX-IBRS.xml /usr/share/libvirt/cpu_map/x86_Haswell-noTSX.xml /usr/share/libvirt/cpu_map/x86_Haswell.xml /usr/share/libvirt/cpu_map/x86_Icelake-Client-noTSX.xml /usr/share/libvirt/cpu_map/x86_Icelake-Client.xml /usr/share/libvirt/cpu_map/x86_Icelake-Server-noTSX.xml /usr/share/libvirt/cpu_map/x86_Icelake-Server.xml /usr/share/libvirt/cpu_map/x86_IvyBridge-IBRS.xml /usr/share/libvirt/cpu_map/x86_IvyBridge.xml /usr/share/libvirt/cpu_map/x86_SandyBridge-IBRS.xml /usr/share/libvirt/cpu_map/x86_SandyBridge.xml /usr/share/libvirt/cpu_map/x86_Skylake-Client-IBRS.xml /usr/share/libvirt/cpu_map/x86_Skylake-Client-noTSX-IBRS.xml /usr/share/libvirt/cpu_map/x86_Skylake-Client.xml /usr/share/libvirt/cpu_map/x86_Skylake-Server-IBRS.xml /usr/share/libvirt/cpu_map/x86_Skylake-Server-noTSX-IBRS.xml /usr/share/libvirt/cpu_map/x86_Skylake-Server.xml ... Each of these files contains information about the feature set provided by the CPU model. For example: .. code-block:: bash $ cat /usr/share/libvirt/cpu_map/x86_SandyBridge-IBRS.xml ... You can also list these CPU models using ``virsh cpu-models ARCH``. For example: .. code-block:: bash $ virsh cpu-models x86_64 ... SandyBridge SandyBridge-IBRS IvyBridge IvyBridge-IBRS Haswell-noTSX Haswell-noTSX-IBRS Haswell Haswell-IBRS Broadwell-noTSX Broadwell-noTSX-IBRS Broadwell Broadwell-IBRS Skylake-Client Skylake-Client-IBRS Skylake-Client-noTSX-IBRS Skylake-Server Skylake-Server-IBRS Skylake-Server-noTSX-IBRS Icelake-Client Icelake-Client-noTSX Icelake-Server Icelake-Server-noTSX ... By settings :oslo.config:option:`cpu_mode=custom `, it is possible to list one or more of these CPU models in the :oslo.config:option:`libvirt.cpu_models` config option in ``nova.conf``. For example: .. code-block:: ini [libvirt] cpu_mode = custom cpu_models = IvyBridge Typically you will only need to list a single model here, but it can be useful to list multiple CPU models to support requesting CPU feature flags via traits. To do this, simply list the additional CPU models in order of oldest (and therefore most widely supported) to newest. For example: .. code-block:: ini [libvirt] cpu_mode = custom cpu_models = Penryn,IvyBridge,Haswell,Broadwell,Skylake-Client More details on how to request CPU feature flags and why you might wish to specify multiple CPU models are provided in :ref:`cpu-feature-flags` below. .. _cpu-feature-flags: CPU feature flags ----------------- .. versionadded:: 18.0.0 (Rocky) Regardless of your configured :oslo.config:option:`libvirt.cpu_mode`, it is also possible to selectively enable additional feature flags. This can be accomplished using the :oslo.config:option:`libvirt.cpu_model_extra_flags` config option. For example, suppose you have configured a custom CPU model of ``IvyBridge``, which normally does not enable the ``pcid`` feature flag, but you do want to pass ``pcid`` into your guest instances. In this case, you could configure the following in ``nova.conf`` to enable this flag. .. code-block:: ini [libvirt] cpu_mode = custom cpu_models = IvyBridge cpu_model_extra_flags = pcid An end user can also specify required CPU features through traits. When specified, the libvirt driver will select the first CPU model in the :oslo.config:option:`libvirt.cpu_models` list that can provide the requested feature traits. If no CPU feature traits are specified then the instance will be configured with the first CPU model in the list. Consider the following ``nova.conf``: .. code-block:: ini [libvirt] cpu_mode = custom cpu_models = Penryn,IvyBridge,Haswell,Broadwell,Skylake-Client These different CPU models support different feature flags and are correctly configured in order of oldest (and therefore most widely supported) to newest. If the user explicitly required the ``avx`` and ``avx2`` CPU features, the latter of which is only found of Haswell-generation processors or newer, then they could request them using the :nova:extra-spec:`trait{group}:HW_CPU_X86_AVX` and :nova:extra-spec:`trait{group}:HW_CPU_X86_AVX2` flavor extra specs. For example: .. code-block:: console $ openstack flavor set $FLAVOR \ --property trait:HW_CPU_X86_AVX=required \ --property trait:HW_CPU_X86_AVX2=required As ``Haswell`` is the first CPU model supporting both of these CPU features, the instance would be configured with this model. .. _mitigation-for-Intel-MDS-security-flaws: Mitigation for MDS ("Microarchitectural Data Sampling") Security Flaws ---------------------------------------------------------------------- In May 2019, four new microprocessor flaws, known as `MDS`__ and also referred to as `RIDL and Fallout`__ or `ZombieLoad`__, were discovered. These flaws affect unpatched Nova compute nodes and instances running on Intel x86_64 CPUs. .. __: https://access.redhat.com/security/vulnerabilities/mds .. __: https://mdsattacks.com/ .. __: https://zombieloadattack.com Resolution ~~~~~~~~~~ To get mitigation for the said MDS security flaws, a new CPU flag, ``md-clear``, needs to be exposed to the Nova instances. This can be done as follows. #. Update the following components to the versions from your Linux distribution that have fixes for the MDS flaws, on all compute nodes with Intel x86_64 CPUs: - ``microcode_ctl`` - ``kernel`` - ``qemu-system-x86`` - ``libvirt`` #. When using the libvirt driver, ensure that the CPU flag ``md-clear`` is exposed to the Nova instances. This can be done in one of three ways, depending on your configured CPU mode: #. :oslo.config:option:`libvirt.cpu_mode`\ =host-model When using the ``host-model`` CPU mode, the ``md-clear`` CPU flag will be passed through to the Nova guests automatically. This mode is the default, when :oslo.config:option:`libvirt.virt_type`\ =kvm|qemu is set in ``/etc/nova/nova-cpu.conf`` on compute nodes. #. :oslo.config:option:`libvirt.cpu_mode`\ =host-passthrough When using the ``host-passthrough`` CPU mode, the ``md-clear`` CPU flag will be passed through to the Nova guests automatically. #. :oslo.config:option:`libvirt.cpu_mode`\ =custom When using the ``custom`` CPU mode, you must *explicitly* enable the CPU flag ``md-clear`` to the Nova instances, in addition to the flags required for previous vulnerabilities, using the :oslo.config:option:`libvirt.cpu_model_extra_flags`. For example: .. code-block:: ini [libvirt] cpu_mode = custom cpu_models = IvyBridge cpu_model_extra_flags = spec-ctrl,ssbd,md-clear #. Reboot the compute node for the fixes to take effect. To minimize workload downtime, you may wish to live migrate all guests to another compute node first. Once the above steps have been taken on every vulnerable compute node in the deployment, each running guest in the cluster must be fully powered down, and cold-booted (i.e. an explicit stop followed by a start), in order to activate the new CPU models. This can be done by the guest administrators at a time of their choosing. Validation ~~~~~~~~~~ After applying relevant updates, administrators can check the kernel's ``sysfs`` interface to see what mitigation is in place, by running the following command on the host: .. code-block:: bash # cat /sys/devices/system/cpu/vulnerabilities/mds Mitigation: Clear CPU buffers; SMT vulnerable To unpack the message "Mitigation: Clear CPU buffers; SMT vulnerable": - ``Mitigation: Clear CPU buffers`` means you have the "CPU buffer clearing" mitigation enabled, which is mechanism to invoke a flush of various exploitable CPU buffers by invoking a CPU instruction called "VERW". - ``SMT vulnerable`` means, depending on your workload, you may still be vulnerable to SMT-related problems. You need to evaluate whether your workloads need SMT (also called "Hyper-Threading") to be disabled or not. Refer to the guidance from your Linux distribution and processor vendor. To see the other possible values for ``/sys/devices/system/cpu/vulnerabilities/mds``, refer to the `MDS system information`__ section in Linux kernel's documentation for MDS. On the host, validate that KVM is capable of exposing the ``md-clear`` flag to guests: .. code-block:: bash # virsh domcapabilities kvm | grep md-clear More information can be found on the 'Diagnosis' tab of `this security notice document`__. .. __: https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html#mds-system-information .. __: https://access.redhat.com/security/vulnerabilities/mds Performance Impact ~~~~~~~~~~~~~~~~~~ Refer to this section titled "Performance Impact and Disabling MDS" from `this security notice document`__, under the *Resolve* tab. .. note:: Although the article referred to is from Red Hat, the findings and recommendations about performance impact apply for other distributions also. .. __: https://access.redhat.com/security/vulnerabilities/mds ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/cpu-topologies.rst0000664000175000017500000010410200000000000021420 0ustar00zuulzuul00000000000000============== CPU topologies ============== The NUMA topology and CPU pinning features in OpenStack provide high-level control over how instances run on hypervisor CPUs and the topology of virtual CPUs available to instances. These features help minimize latency and maximize performance. .. include:: /common/numa-live-migration-warning.txt SMP, NUMA, and SMT ------------------ Symmetric multiprocessing (SMP) SMP is a design found in many modern multi-core systems. In an SMP system, there are two or more CPUs and these CPUs are connected by some interconnect. This provides CPUs with equal access to system resources like memory and input/output ports. Non-uniform memory access (NUMA) NUMA is a derivative of the SMP design that is found in many multi-socket systems. In a NUMA system, system memory is divided into cells or nodes that are associated with particular CPUs. Requests for memory on other nodes are possible through an interconnect bus. However, bandwidth across this shared bus is limited. As a result, competition for this resource can incur performance penalties. Simultaneous Multi-Threading (SMT) SMT is a design complementary to SMP. Whereas CPUs in SMP systems share a bus and some memory, CPUs in SMT systems share many more components. CPUs that share components are known as thread siblings. All CPUs appear as usable CPUs on the system and can execute workloads in parallel. However, as with NUMA, threads compete for shared resources. Non-Uniform I/O Access (NUMA I/O) In a NUMA system, I/O to a device mapped to a local memory region is more efficient than I/O to a remote device. A device connected to the same socket providing the CPU and memory offers lower latencies for I/O operations due to its physical proximity. This generally manifests itself in devices connected to the PCIe bus, such as NICs or vGPUs, but applies to any device support memory-mapped I/O. In OpenStack, SMP CPUs are known as *cores*, NUMA cells or nodes are known as *sockets*, and SMT CPUs are known as *threads*. For example, a quad-socket, eight core system with Hyper-Threading would have four sockets, eight cores per socket and two threads per core, for a total of 64 CPUs. PCPU and VCPU ------------- PCPU Resource class representing an amount of dedicated CPUs for a single guest. VCPU Resource class representing a unit of CPU resources for a single guest approximating the processing power of a single physical processor. .. _numa-topologies: Customizing instance NUMA placement policies -------------------------------------------- .. important:: The functionality described below is currently only supported by the libvirt/KVM driver. When running workloads on NUMA hosts, it is important that the vCPUs executing processes are on the same NUMA node as the memory used by these processes. This ensures all memory accesses are local to the node and thus do not consume the limited cross-node memory bandwidth, adding latency to memory accesses. Similarly, large pages are assigned from memory and benefit from the same performance improvements as memory allocated using standard pages. Thus, they also should be local. Finally, PCI devices are directly associated with specific NUMA nodes for the purposes of DMA. Instances that use PCI or SR-IOV devices should be placed on the NUMA node associated with these devices. NUMA topology can exist on both the physical hardware of the host and the virtual hardware of the instance. In OpenStack, when booting a process, the hypervisor driver looks at the NUMA topology field of both the instance and the host it is being booted on, and uses that information to generate an appropriate configuration. By default, an instance floats across all NUMA nodes on a host. NUMA awareness can be enabled implicitly through the use of huge pages or pinned CPUs or explicitly through the use of flavor extra specs or image metadata. If the instance has requested a specific NUMA topology, compute will try to pin the vCPUs of different NUMA cells on the instance to the corresponding NUMA cells on the host. It will also expose the NUMA topology of the instance to the guest OS. In all cases where NUMA awareness is used, the ``NUMATopologyFilter`` filter must be enabled. Details on this filter are provided in :doc:`/admin/scheduling`. The host's NUMA node(s) used are chosen based on some logic and controlled by ``packing_host_numa_cells_allocation_strategy`` configuration variable in nova.conf. By default ``packing_host_numa_cells_allocation_strategy`` variable is set to ``True``. It leads to attempt to chose NUMA node(s) with less amount of free resources (or in other words **more used** NUMA nodes) first. It is so-called "pack" strategy - we try to place as much as possible load at **more used** host's NUMA node until it will be completely exhausted. And only after we will choose **most used** host's NUMA node from the rest available nodes on host. "Spread" strategy is reverse to "pack" strategy. The NUMA node(s) with **more free** resources will be used first. So "spread" strategy will try to balance load between all NUMA nodes and keep number of free resources on all NUMA nodes as more equal as possible. .. caution:: Host's NUMA nodes are placed in list and list is sorted based on strategy chosen and resource available in each NUMA node. Sorts are performed on same list one after another, so the last sort implemented is the sort with most priority. The python performed so-called stable sort. It means that each sort executed on same list will change order of list items only if item's property we sort on differs. If this properties in all list's items are equal than elements order will not changed. Sorts are performed on host's NUMA nodes list in the following order: * sort based on available memory on node(first sort-less priority) * sort based on cpu usage (in case of shared CPUs requested by guest VM topology) or free pinned cpus otherwise. * sort based on number of free PCI device on node(last sort-top priority) Top sorting priority is for host's NUMA nodes with PCI devices attached. If VM requested PCI device(s) logic **always** puts host's NUMA nodes with more PCI devices at the beginning of the host's NUMA nodes list. If PCI devices isn't requested by VM than NUMA nodes with no (or less) PCI device available will be placed at the beginning of the list. .. caution:: The described logic for PCI devices is used **both** for "pack" and "spread" strategies. It is done to keep backward compatibility with previous nova versions. During "pack" logic implementation rest (two) sorts are performed with sort order to move NUMA nodes with more available resources (CPUs and memory) at the END of host's NUMA nodes list. Sort based on memory is the first sort implemented and has least priority. During "spread" logic implementation rest (two) sorts are performed with sort order to move NUMA nodes with more available resources (CPUs and memory) at the BEGINNING of host's NUMA nodes list. Sort based on memory is the first sort implemented and has least priority. Finally resulting list (after all sorts) is passed next and attempts to place VM's NUMA node to host's NUMA node are performed starting from the first host's NUMA node in list. .. caution:: Inadequate per-node resources will result in scheduling failures. Resources that are specific to a node include not only CPUs and memory, but also PCI and SR-IOV resources. It is not possible to use multiple resources from different nodes without requesting a multi-node layout. As such, it may be necessary to ensure PCI or SR-IOV resources are associated with the same NUMA node or force a multi-node layout. When used, NUMA awareness allows the operating system of the instance to intelligently schedule the workloads that it runs and minimize cross-node memory bandwidth. To configure guest NUMA nodes, you can use the :nova:extra-spec:`hw:numa_nodes` flavor extra spec. For example, to restrict an instance's vCPUs to a single host NUMA node, run: .. code-block:: console $ openstack flavor set $FLAVOR --property hw:numa_nodes=1 Some workloads have very demanding requirements for memory access latency or bandwidth that exceed the memory bandwidth available from a single NUMA node. For such workloads, it is beneficial to spread the instance across multiple host NUMA nodes, even if the instance's RAM/vCPUs could theoretically fit on a single NUMA node. To force an instance's vCPUs to spread across two host NUMA nodes, run: .. code-block:: console $ openstack flavor set $FLAVOR --property hw:numa_nodes=2 The allocation of instance vCPUs and memory from different host NUMA nodes can be configured. This allows for asymmetric allocation of vCPUs and memory, which can be important for some workloads. You can configure the allocation of instance vCPUs and memory across each **guest** NUMA node using the :nova:extra-spec:`hw:numa_cpus.{num}` and :nova:extra-spec:`hw:numa_mem.{num}` extra specs respectively. For example, to spread the 6 vCPUs and 6 GB of memory of an instance across two NUMA nodes and create an asymmetric 1:2 vCPU and memory mapping between the two nodes, run: .. code-block:: console $ openstack flavor set $FLAVOR --property hw:numa_nodes=2 # configure guest node 0 $ openstack flavor set $FLAVOR \ --property hw:numa_cpus.0=0,1 \ --property hw:numa_mem.0=2048 # configure guest node 1 $ openstack flavor set $FLAVOR \ --property hw:numa_cpus.1=2,3,4,5 \ --property hw:numa_mem.1=4096 .. note:: The ``{num}`` parameter is an index of *guest* NUMA nodes and may not correspond to *host* NUMA nodes. For example, on a platform with two NUMA nodes, the scheduler may opt to place guest NUMA node 0, as referenced in ``hw:numa_mem.0`` on host NUMA node 1 and vice versa. Similarly, the CPUs bitmask specified in the value for ``hw:numa_cpus.{num}`` refer to *guest* vCPUs and may not correspond to *host* CPUs. As such, this feature cannot be used to constrain instances to specific host CPUs or NUMA nodes. .. warning:: If the combined values of ``hw:numa_cpus.{num}`` or ``hw:numa_mem.{num}`` are greater than the available number of CPUs or memory respectively, an exception will be raised. For more information about the syntax for ``hw:numa_nodes``, ``hw:numa_cpus.N`` and ``hw:num_mem.N``, refer to :doc:`/configuration/extra-specs`. .. _cpu-pinning-policies: Customizing instance CPU pinning policies ----------------------------------------- .. important:: The functionality described below is currently only supported by the libvirt/KVM driver and requires :ref:`some host configuration ` for this to work. .. note:: There is no correlation required between the NUMA topology exposed in the instance and how the instance is actually pinned on the host. This is by design. See this `invalid bug `_ for more information. By default, instance vCPU processes are not assigned to any particular host CPU, instead, they float across host CPUs like any other process. This allows for features like overcommitting of CPUs. In heavily contended systems, this provides optimal system performance at the expense of performance and latency for individual instances. Some workloads require real-time or near real-time behavior, which is not possible with the latency introduced by the default CPU policy. For such workloads, it is beneficial to control which host CPUs are bound to an instance's vCPUs. This process is known as pinning. No instance with pinned CPUs can use the CPUs of another pinned instance, thus preventing resource contention between instances. CPU pinning policies can be used to determine whether an instance should be pinned or not. They can be configured using the :nova:extra-spec:`hw:cpu_policy` extra spec and equivalent image metadata property. There are three policies: ``dedicated``, ``mixed`` and ``shared`` (the default). The ``dedicated`` CPU policy is used to specify that all CPUs of an instance should use pinned CPUs. To configure a flavor to use the ``dedicated`` CPU policy, run: .. code-block:: console $ openstack flavor set $FLAVOR --property hw:cpu_policy=dedicated This works by ensuring ``PCPU`` allocations are used instead of ``VCPU`` allocations. As such, it is also possible to request this resource type explicitly. To configure this, run: .. code-block:: console $ openstack flavor set $FLAVOR --property resources:PCPU=N (where ``N`` is the number of vCPUs defined in the flavor). .. note:: It is not currently possible to request ``PCPU`` and ``VCPU`` resources in the same instance. The ``shared`` CPU policy is used to specify that an instance **should not** use pinned CPUs. To configure a flavor to use the ``shared`` CPU policy, run: .. code-block:: console $ openstack flavor set $FLAVOR --property hw:cpu_policy=shared The ``mixed`` CPU policy is used to specify that an instance use pinned CPUs along with unpinned CPUs. The instance pinned CPU could be specified in the :nova:extra-spec:`hw:cpu_dedicated_mask` or, if :doc:`real-time ` is enabled, in the :nova:extra-spec:`hw:cpu_realtime_mask` extra spec. For example, to configure a flavor to use the ``mixed`` CPU policy with 4 vCPUs in total and the first 2 vCPUs as pinned CPUs, run: .. code-block:: console $ openstack flavor set $FLAVOR \ --vcpus=4 \ --property hw:cpu_policy=mixed \ --property hw:cpu_dedicated_mask=0-1 To configure a flavor to use the ``mixed`` CPU policy with 4 vCPUs in total and the first 2 vCPUs as pinned **real-time** CPUs, run: .. code-block:: console $ openstack flavor set $FLAVOR \ --vcpus=4 \ --property hw:cpu_policy=mixed \ --property hw:cpu_realtime=yes \ --property hw:cpu_realtime_mask=0-1 .. note:: For more information about the syntax for ``hw:cpu_policy``, ``hw:cpu_dedicated_mask``, ``hw:realtime_cpu`` and ``hw:cpu_realtime_mask``, refer to :doc:`/configuration/extra-specs` .. note:: For more information about real-time functionality, refer to the :doc:`documentation `. It is also possible to configure the CPU policy via image metadata. This can be useful when packaging applications that require real-time or near real-time behavior by ensuring instances created with a given image are always pinned regardless of flavor. To configure an image to use the ``dedicated`` CPU policy, run: .. code-block:: console $ openstack image set $IMAGE --property hw_cpu_policy=dedicated Likewise, to configure an image to use the ``shared`` CPU policy, run: .. code-block:: console $ openstack image set $IMAGE --property hw_cpu_policy=shared .. note:: For more information about image metadata, refer to the `Image metadata`_ guide. .. important:: Flavor-based policies take precedence over image-based policies. For example, if a flavor specifies a CPU policy of ``dedicated`` then that policy will be used. If the flavor specifies a CPU policy of ``shared`` and the image specifies no policy or a policy of ``shared`` then the ``shared`` policy will be used. However, the flavor specifies a CPU policy of ``shared`` and the image specifies a policy of ``dedicated``, or vice versa, an exception will be raised. This is by design. Image metadata is often configurable by non-admin users, while flavors are only configurable by admins. By setting a ``shared`` policy through flavor extra-specs, administrators can prevent users configuring CPU policies in images and impacting resource utilization. Customizing instance CPU thread pinning policies ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. important:: The functionality described below requires the use of pinned instances and is therefore currently only supported by the libvirt/KVM driver and requires :ref:`some host configuration ` for this to work. When running pinned instances on SMT hosts, it may also be necessary to consider the impact that thread siblings can have on the instance workload. The presence of an SMT implementation like Intel Hyper-Threading can boost performance `by up to 30%`__ for some workloads. However, thread siblings share a number of components and contention on these components can diminish performance for other workloads. For this reason, it is also possible to explicitly request hosts with or without SMT. __ https://software.intel.com/en-us/articles/how-to-determine-the-effectiveness-of-hyper-threading-technology-with-an-application To configure whether an instance should be placed on a host with SMT or not, a CPU thread policy may be specified. For workloads where sharing benefits performance, you can request hosts **with** SMT. To configure this, run: .. code-block:: console $ openstack flavor set $FLAVOR \ --property hw:cpu_policy=dedicated \ --property hw:cpu_thread_policy=require This will ensure the instance gets scheduled to a host with SMT by requesting hosts that report the ``HW_CPU_HYPERTHREADING`` trait. It is also possible to request this trait explicitly. To configure this, run: .. code-block:: console $ openstack flavor set $FLAVOR \ --property resources:PCPU=N \ --property trait:HW_CPU_HYPERTHREADING=required For other workloads where performance is impacted by contention for resources, you can request hosts **without** SMT. To configure this, run: .. code-block:: console $ openstack flavor set $FLAVOR \ --property hw:cpu_policy=dedicated \ --property hw:cpu_thread_policy=isolate This will ensure the instance gets scheduled to a host without SMT by requesting hosts that **do not** report the ``HW_CPU_HYPERTHREADING`` trait. It is also possible to request this trait explicitly. To configure this, run: .. code-block:: console $ openstack flavor set $FLAVOR \ --property resources:PCPU=N \ --property trait:HW_CPU_HYPERTHREADING=forbidden Finally, for workloads where performance is minimally impacted, you may use thread siblings if available and fallback to not using them if necessary. This is the default, but it can be set explicitly: .. code-block:: console $ openstack flavor set $FLAVOR \ --property hw:cpu_policy=dedicated \ --property hw:cpu_thread_policy=prefer This does not utilize traits and, as such, there is no trait-based equivalent. .. note:: For more information about the syntax for ``hw:cpu_thread_policy``, refer to :doc:`/configuration/extra-specs`. As with CPU policies, it also possible to configure the CPU thread policy via image metadata. This can be useful when packaging applications that require real-time or near real-time behavior by ensuring instances created with a given image are always pinned regardless of flavor. To configure an image to use the ``require`` CPU policy, run: .. code-block:: console $ openstack image set $IMAGE \ --property hw_cpu_policy=dedicated \ --property hw_cpu_thread_policy=require Likewise, to configure an image to use the ``isolate`` CPU thread policy, run: .. code-block:: console $ openstack image set $IMAGE \ --property hw_cpu_policy=dedicated \ --property hw_cpu_thread_policy=isolate Finally, to configure an image to use the ``prefer`` CPU thread policy, run: .. code-block:: console $ openstack image set $IMAGE \ --property hw_cpu_policy=dedicated \ --property hw_cpu_thread_policy=prefer If the flavor does not specify a CPU thread policy then the CPU thread policy specified by the image (if any) will be used. If both the flavor and image specify a CPU thread policy then they must specify the same policy, otherwise an exception will be raised. .. note:: For more information about image metadata, refer to the `Image metadata`_ guide. .. _emulator-thread-pinning-policies: Customizing instance emulator thread pinning policies ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. important:: The functionality described below requires the use of pinned instances and is therefore currently only supported by the libvirt/KVM driver and requires :ref:`some host configuration ` for this to work. In addition to the work of the guest OS and applications running in an instance, there is a small amount of overhead associated with the underlying hypervisor. By default, these overhead tasks - known collectively as emulator threads - run on the same host CPUs as the instance itself and will result in a minor performance penalty for the instance. This is not usually an issue, however, for things like real-time instances, it may not be acceptable for emulator thread to steal time from instance CPUs. Emulator thread policies can be used to ensure emulator threads are run on cores separate from those used by the instance. There are two policies: ``isolate`` and ``share``. The default is to run the emulator threads on the same core. The ``isolate`` emulator thread policy is used to specify that emulator threads for a given instance should be run on their own unique core, chosen from one of the host cores listed in :oslo.config:option:`compute.cpu_dedicated_set`. To configure a flavor to use the ``isolate`` emulator thread policy, run: .. code-block:: console $ openstack flavor set $FLAVOR \ --property hw:cpu_policy=dedicated \ --property hw:emulator_threads_policy=isolate The ``share`` policy is used to specify that emulator threads from a given instance should be run on the pool of host cores listed in :oslo.config:option:`compute.cpu_shared_set` if configured, else across all pCPUs of the instance. To configure a flavor to use the ``share`` emulator thread policy, run: .. code-block:: console $ openstack flavor set $FLAVOR \ --property hw:cpu_policy=dedicated \ --property hw:emulator_threads_policy=share The above behavior can be summarized in this helpful table: .. list-table:: :header-rows: 1 :stub-columns: 1 * - - :oslo.config:option:`compute.cpu_shared_set` set - :oslo.config:option:`compute.cpu_shared_set` unset * - ``hw:emulator_treads_policy`` unset (default) - Pinned to all of the instance's pCPUs - Pinned to all of the instance's pCPUs * - ``hw:emulator_threads_policy`` = ``share`` - Pinned to :oslo.config:option:`compute.cpu_shared_set` - Pinned to all of the instance's pCPUs * - ``hw:emulator_threads_policy`` = ``isolate`` - Pinned to a single pCPU distinct from the instance's pCPUs - Pinned to a single pCPU distinct from the instance's pCPUs .. note:: For more information about the syntax for ``hw:emulator_threads_policy``, refer to :nova:extra-spec:`the documentation `. Customizing instance CPU topologies ----------------------------------- .. important:: The functionality described below is currently only supported by the libvirt/KVM driver. .. note:: Currently it also works with libvirt/QEMU driver but we don't recommend it in production use cases. This is because vCPUs are actually running in one thread on host in qemu TCG (Tiny Code Generator), which is the backend for libvirt/QEMU driver. Work to enable full multi-threading support for TCG (a.k.a. MTTCG) is on going in QEMU community. Please see this `MTTCG project`_ page for detail. In addition to configuring how an instance is scheduled on host CPUs, it is possible to configure how CPUs are represented in the instance itself. By default, when instance NUMA placement is not specified, a topology of N sockets, each with one core and one thread, is used for an instance, where N corresponds to the number of instance vCPUs requested. When instance NUMA placement is specified, the number of sockets is fixed to the number of host NUMA nodes to use and the total number of instance CPUs is split over these sockets. Some workloads benefit from a custom topology. For example, in some operating systems, a different license may be needed depending on the number of CPU sockets. To configure a flavor to use two sockets, run: .. code-block:: console $ openstack flavor set $FLAVOR --property hw:cpu_sockets=2 Similarly, to configure a flavor to use one core and one thread, run: .. code-block:: console $ openstack flavor set $FLAVOR \ --property hw:cpu_cores=1 \ --property hw:cpu_threads=1 .. caution:: If specifying all values, the product of sockets multiplied by cores multiplied by threads must equal the number of instance vCPUs. If specifying any one of these values or the multiple of two values, the values must be a factor of the number of instance vCPUs to prevent an exception. For example, specifying ``hw:cpu_sockets=2`` on a host with an odd number of cores fails. Similarly, specifying ``hw:cpu_cores=2`` and ``hw:cpu_threads=4`` on a host with ten cores fails. For more information about the syntax for ``hw:cpu_sockets``, ``hw:cpu_cores`` and ``hw:cpu_threads``, refer to :doc:`/configuration/extra-specs`. It is also possible to set upper limits on the number of sockets, cores, and threads used. Unlike the hard values above, it is not necessary for this exact number to used because it only provides a limit. This can be used to provide some flexibility in scheduling, while ensuring certain limits are not exceeded. For example, to ensure no more than two sockets, eight cores and one thread are defined in the instance topology, run: .. code-block:: console $ openstack flavor set $FLAVOR \ --property hw:cpu_max_sockets=2 \ --property hw:cpu_max_cores=8 \ --property hw:cpu_max_threads=1 For more information about the syntax for ``hw:cpu_max_sockets``, ``hw:cpu_max_cores``, and ``hw:cpu_max_threads``, refer to :doc:`/configuration/extra-specs`. Applications are frequently packaged as images. For applications that prefer certain CPU topologies, configure image metadata to hint that created instances should have a given topology regardless of flavor. To configure an image to request a two-socket, four-core per socket topology, run: .. code-block:: console $ openstack image set $IMAGE \ --property hw_cpu_sockets=2 \ --property hw_cpu_cores=4 To constrain instances to a given limit of sockets, cores or threads, use the ``max_`` variants. To configure an image to have a maximum of two sockets and a maximum of one thread, run: .. code-block:: console $ openstack image set $IMAGE \ --property hw_cpu_max_sockets=2 \ --property hw_cpu_max_threads=1 The value specified in the flavor is treated as the absolute limit. The image limits are not permitted to exceed the flavor limits, they can only be equal to or lower than what the flavor defines. By setting a ``max`` value for sockets, cores, or threads, administrators can prevent users configuring topologies that might, for example, incur an additional licensing fees. For more information about image metadata, refer to the `Image metadata`_ guide. .. _configure-libvirt-pinning: Configuring libvirt compute nodes for CPU pinning ------------------------------------------------- .. versionchanged:: 20.0.0 Prior to 20.0.0 (Train), it was not necessary to explicitly configure hosts for pinned instances. However, it was not possible to place pinned instances on the same host as unpinned CPUs, which typically meant hosts had to be grouped into host aggregates. If this was not done, unpinned instances would continue floating across all enabled host CPUs, even those that some instance CPUs were pinned to. Starting in 20.0.0, it is necessary to explicitly identify the host cores that should be used for pinned instances. Nova treats host CPUs used for unpinned instances differently from those used by pinned instances. The former are tracked in placement using the ``VCPU`` resource type and can be overallocated, while the latter are tracked using the ``PCPU`` resource type. By default, nova will report all host CPUs as ``VCPU`` inventory, however, this can be configured using the :oslo.config:option:`compute.cpu_shared_set` config option, to specify which host CPUs should be used for ``VCPU`` inventory, and the :oslo.config:option:`compute.cpu_dedicated_set` config option, to specify which host CPUs should be used for ``PCPU`` inventory. Consider a compute node with a total of 24 host physical CPU cores with hyperthreading enabled. The operator wishes to reserve 1 physical CPU core and its thread sibling for host processing (not for guest instance use). Furthermore, the operator wishes to use 8 host physical CPU cores and their thread siblings for dedicated guest CPU resources. The remaining 15 host physical CPU cores and their thread siblings will be used for shared guest vCPU usage, with an 8:1 allocation ratio for those physical processors used for shared guest CPU resources. The operator could configure ``nova.conf`` like so:: [DEFAULT] cpu_allocation_ratio=8.0 [compute] cpu_dedicated_set=2-17 cpu_shared_set=18-47 The virt driver will construct a provider tree containing a single resource provider representing the compute node and report inventory of ``PCPU`` and ``VCPU`` for this single provider accordingly:: COMPUTE NODE provider PCPU: total: 16 reserved: 0 min_unit: 1 max_unit: 16 step_size: 1 allocation_ratio: 1.0 VCPU: total: 30 reserved: 0 min_unit: 1 max_unit: 30 step_size: 1 allocation_ratio: 8.0 Instances using the ``dedicated`` CPU policy or an explicit ``PCPU`` resource request, ``PCPU`` inventory will be consumed. Instances using the ``shared`` CPU policy, meanwhile, will consume ``VCPU`` inventory. .. note:: ``PCPU`` and ``VCPU`` allocations are currently combined to calculate the value for the ``cores`` quota class. .. _configure-hyperv-numa: Configuring CPU power management for dedicated cores ---------------------------------------------------- .. versionchanged:: 27.0.0 This feature was only introduced by the 2023.1 Antelope release .. important:: The functionality described below is currently only supported by the libvirt/KVM driver. For power saving reasons, operators can decide to turn down the power usage of CPU cores whether they are in use or not. For obvious reasons, Nova only allows to change the power consumption of a dedicated CPU core and not a shared one. Accordingly, usage of this feature relies on the reading of :oslo.config:option:`compute.cpu_dedicated_set` config option to know which CPU cores to handle. The main action to enable the power management of dedicated cores is to set :oslo.config:option:`libvirt.cpu_power_management` config option to ``True``. By default, if this option is enabled, Nova will lookup the dedicated cores and power them down at the compute service startup. Then, once an instance starts by being attached to a dedicated core, this below core will be powered up right before the libvirt guest starts. On the other way, once an instance is stopped, migrated or deleted, then the corresponding dedicated core will be powered down. There are two distinct strategies for powering up or down : - the default is to offline the CPU core and online it when needed. - an alternative strategy is to use two distinct CPU governors for the up state and the down state. The strategy can be chosen using :oslo.config:option:`libvirt.cpu_power_management_strategy` config option. ``cpu_state`` supports the first online/offline strategy, while ``governor`` sets the alternative strategy. We default to turning off the cores as it provides you the best power savings while there could be other tools outside Nova to manage the governor, like tuned. That being said, we also provide a way to automatically change the governors on the fly, as explained below. .. important:: Some OS platforms don't support ``cpufreq`` resources in sysfs, so the ``governor`` strategy could be not available. Please verify if your OS supports scaling govenors before modifying the configuration option. If the strategy is set to ``governor``, a couple of config options are provided to define which exact CPU governor to use for each of the up and down states : - :oslo.config:option:`libvirt.cpu_power_governor_low` will define the governor to use for the powerdown state (defaults to ``powersave``) - :oslo.config:option:`libvirt.cpu_power_governor_high` will define the governor to use for the powerup state (defaults to ``performance``) .. important:: This is the responsibility of the operator to ensure that the govenors defined by the configuration options are currently supported by the OS underlying kernel that runs the compute service. As a side note, we recommend the ``schedutil`` governor as an alternative for the high-power state (if the kernel supports it) as the CPU frequency is dynamically set based on CPU task states. Other governors may be worth to be tested, including ``conservative`` and ``ondemand`` which are quite a bit more power consuming than ``schedutil`` but more efficient than ``performance``. See `Linux kernel docs`_ for further explanations. .. _`Linux kernel docs`: https://www.kernel.org/doc/Documentation/cpu-freq/governors.txt As an example, a ``nova.conf`` part of configuration would look like:: [compute] cpu_dedicated_set=2-17 [libvirt] cpu_power_management=True cpu_power_management_strategy=cpu_state .. warning:: The CPU core #0 has a special meaning in most of the recent Linux kernels. This is always highly discouraged to use it for CPU pinning but please refrain to have it power managed or you could have surprises if Nova turns it off! One last important note : you can decide to change the CPU management strategy during the compute lifecycle, or you can currently already manage the CPU states. For ensuring that Nova can correctly manage the CPU performances, we added a couple of checks at startup that refuse to start nova-compute service if those arbitrary rules aren't enforced : - if the operator opts for ``cpu_state`` strategy, then all dedicated CPU governors *MUST* be identical. - if they decide using ``governor``, then all dedicated CPU cores *MUST* be online. .. Links .. _`Image metadata`: https://docs.openstack.org/image-guide/introduction.html#image-metadata .. _`MTTCG project`: http://wiki.qemu.org/Features/tcg-multithread ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/default-ports.rst0000664000175000017500000000210400000000000021237 0ustar00zuulzuul00000000000000========================================== Compute service node firewall requirements ========================================== Console connections for virtual machines, whether direct or through a proxy, are received on ports ``5900`` to ``5999``. The firewall on each Compute service node must allow network traffic on these ports. This procedure modifies the iptables firewall to allow incoming connections to the Compute services. **Configuring the service-node firewall** #. Log in to the server that hosts the Compute service, as root. #. Edit the ``/etc/sysconfig/iptables`` file, to add an INPUT rule that allows TCP traffic on ports from ``5900`` to ``5999``. Make sure the new rule appears before any INPUT rules that REJECT traffic: .. code-block:: console -A INPUT -p tcp -m multiport --dports 5900:5999 -j ACCEPT #. Save the changes to the ``/etc/sysconfig/iptables`` file, and restart the ``iptables`` service to pick up the changes: .. code-block:: console $ service iptables restart #. Repeat this process for each Compute service node. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/emulated-tpm.rst0000664000175000017500000001140600000000000021051 0ustar00zuulzuul00000000000000======================================= Emulated Trusted Platform Module (vTPM) ======================================= .. versionadded:: 22.0.0 (Victoria) Starting in the 22.0.0 (Victoria) release, Nova supports adding an emulated virtual `Trusted Platform Module`__ (vTPM) to guests. .. __: https://en.wikipedia.org/wiki/Trusted_Platform_Module Enabling vTPM ------------- The following are required on each compute host wishing to support the vTPM feature: * Currently vTPM is only supported when using the libvirt compute driver with a :oslo.config:option:`libvirt.virt_type` of ``kvm`` or ``qemu``. * A `key manager service`__, such as `barbican`__, must be configured to store secrets used to encrypt the virtual device files at rest. * The swtpm__ binary and associated libraries__. * Set the :oslo.config:option:`libvirt.swtpm_enabled` config option to ``True``. This will enable support for both TPM version 1.2 and 2.0. With the above requirements satisfied, verify vTPM support by inspecting the traits on the compute node's resource provider: .. code:: bash $ COMPUTE_UUID=$(openstack resource provider list --name $HOST -f value -c uuid) $ openstack resource provider trait list $COMPUTE_UUID | grep SECURITY_TPM | COMPUTE_SECURITY_TPM_1_2 | | COMPUTE_SECURITY_TPM_2_0 | .. __: https://docs.openstack.org/api-guide/key-manager/ .. __: https://docs.openstack.org/barbican/latest/ .. __: https://github.com/stefanberger/swtpm/wiki .. __: https://github.com/stefanberger/libtpms/ Configuring a flavor or image ----------------------------- A vTPM can be requested on a server via flavor extra specs or image metadata properties. There are two versions supported - 1.2 and 2.0 - and two models - TPM Interface Specification (TIS) and Command-Response Buffer (CRB). The CRB model is only supported with version 2.0. .. list-table:: :header-rows: 1 * - Flavor extra_specs - Image metadata - Description * - ``hw:tpm_version`` - ``hw_tpm_version`` - Specify the TPM version, ``1.2`` or ``2.0``. Required if requesting a vTPM. * - ``hw:tpm_model`` - ``hw_tpm_model`` - Specify the TPM model, ``tpm-tis`` (the default) or ``tpm-crb`` (only valid with version ``2.0``. For example, to configure a flavor to use the TPM 2.0 with the CRB model: .. code-block:: console $ openstack flavor set $FLAVOR \ --property hw:tpm_version=2.0 \ --property hw:tpm_model=tpm-crb Scheduling will fail if flavor and image supply conflicting values, or if model ``tpm-crb`` is requested with version ``1.2``. Upon successful boot, the server should see a TPM device such as ``/dev/tpm0`` which can be used in the same manner as a hardware TPM. Limitations ----------- * Only server operations performed by the server owner are supported, as the user's credentials are required to unlock the virtual device files on the host. Thus the admin may need to decide whether to grant the user additional policy roles; if not, those operations are effectively disabled. * Live migration, evacuation, shelving and rescuing of servers with vTPMs is not currently supported. Security -------- With a hardware TPM, the root of trust is a secret known only to the TPM user. In contrast, an emulated TPM comprises a file on disk which the libvirt daemon must be able to present to the guest. At rest, this file is encrypted using a passphrase stored in a key manager service. The passphrase in the key manager is associated with the credentials of the owner of the server (the user who initially created it). The passphrase is retrieved and used by libvirt to unlock the emulated TPM data any time the server is booted. Although the above mechanism uses a libvirt secret__ that is both ``private`` (can't be displayed via the libvirt API or ``virsh``) and ``ephemeral`` (exists only in memory, never on disk), it is theoretically possible for a sufficiently privileged user to retrieve the secret and/or vTPM data from memory. A full analysis and discussion of security issues related to emulated TPM is beyond the scope of this document. .. __: https://libvirt.org/formatsecret.html#SecretAttributes References ---------- * `TCG PC Client Specific TPM Interface Specification (TIS)`__ * `TCG PC Client Platform TPM Profile (PTP) Specification`__ * `QEMU docs on tpm`__ * `Libvirt XML to request emulated TPM device`__ * `Libvirt secret for usage type ``vtpm```__ .. __: https://trustedcomputinggroup.org/resource/pc-client-work-group-pc-client-specific-tpm-interface-specification-tis/ .. __: https://trustedcomputinggroup.org/resource/pc-client-platform-tpm-profile-ptp-specification/ .. __: https://qemu.readthedocs.io/en/latest/specs/tpm.html .. __: https://libvirt.org/formatdomain.html#elementsTpm .. __: https://libvirt.org/formatsecret.html#vTPMUsageType ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/evacuate.rst0000664000175000017500000001040600000000000020247 0ustar00zuulzuul00000000000000================== Evacuate instances ================== If a hardware malfunction or other error causes a cloud compute node to fail, you can evacuate instances to make them available again. To preserve user data on the server disk, configure shared storage on the target host. When you evacuate the instance, Compute detects whether shared storage is available on the target host. Also, you must validate that the current VM host is not operational. Otherwise, the evacuation fails. There are two different ways to evacuate instances from a failed compute node. The first one using the :command:`nova evacuate` command can be used to evacuate a single instance from a failed node. In some cases where the node in question hosted many instances it might be easier to use :command:`nova host-evacuate` to evacuate them all in one shot. Evacuate a single instance ~~~~~~~~~~~~~~~~~~~~~~~~~~ The procedure below explains how to evacuate a single instance from a failed compute node. Please be aware that these steps describe a post failure scenario and should not be used if the instance is still up and running. #. To find a host for the evacuated instance, list all hosts: .. code-block:: console $ openstack host list #. Evacuate the instance. You can use the ``--password PWD`` option to pass the instance password to the command. If you do not specify a password, the command generates and prints one after it finishes successfully. The following command evacuates a server from a failed host to ``HOST_B``. .. code-block:: console $ nova evacuate EVACUATED_SERVER_NAME HOST_B The command rebuilds the instance from the original image or volume and returns a password. The command preserves the original configuration, which includes the instance ID, name, uid, IP address, and so on. .. code-block:: console +-----------+--------------+ | Property | Value | +-----------+--------------+ | adminPass | kRAJpErnT4xZ | +-----------+--------------+ Optionally you can omit the ``HOST_B`` parameter and let the scheduler choose a new target host. #. To preserve the user disk data on the evacuated server, deploy Compute with a shared file system. To configure your system, see :ref:`section_configuring-compute-migrations`. The following example does not change the password. .. code-block:: console $ nova evacuate EVACUATED_SERVER_NAME HOST_B --on-shared-storage .. note:: Starting with the 2.14 compute API version, one no longer needs to specify ``--on-shared-storage`` even if the server is on a compute host which is using shared storage. The compute service will automatically detect if it is running on shared storage. Evacuate all instances ~~~~~~~~~~~~~~~~~~~~~~ The procedure below explains how to evacuate all instances from a failed compute node. Please note that this method should not be used if the host still has instances up and running. #. To find a host for the evacuated instances, list all hosts: .. code-block:: console $ openstack host list #. Evacuate all instances from ``FAILED_HOST`` to ``TARGET_HOST``: .. code-block:: console $ nova host-evacuate --target_host TARGET_HOST FAILED_HOST The option ``--target_host`` is optional and can be omitted to let the scheduler decide where to place the instances. The above argument ``FAILED_HOST`` can also be a pattern to search for instead of an exact hypervisor hostname but it is recommended to use a fully qualified domain name to make sure no hypervisor host is getting evacuated by mistake. As long as you are not using a pattern you might want to use the ``--strict`` flag which got introduced in version 10.2.0 to make sure nova matches the ``FAILED_HOST`` exactly. .. note:: .. code-block:: bash +------+--------+--------------+ | Name | Status | Task State | +------+--------+--------------+ | vm_1 | ACTIVE | powering-off | +------------------------------+ If the instance task state is not None, evacuation will be possible. However, depending on the ongoing operation, there may be clean up required in other services which the instance was using, such as neutron, cinder, glance, or the storage backend. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.2456076 nova-32.0.0/doc/source/admin/figures/0000775000175000017500000000000000000000000017363 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/figures/SCH_5007_V00_NUAC-multi_nic_OpenStack-Flat-DHCP-manager.jpg0000664000175000017500000054140400000000000031360 0ustar00zuulzuul00000000000000JFIFHHXICC_PROFILEHMSFTmntrRGB XYZ  -!acspMSFT-BEP rTRC, gTRC8 bTRCD vcgtP'lumi xrXYZ gXYZ bXYZ bkpt wtpt chad ,Infodescicprt( curv  *5AO^o+Jj%O{ >sY]3lk  { :  h ;  n^VV_p?nD-~*: !_""#$Y%%&'v(C))*+,f-A../0123{4h5W6H7<829+:%;"<"=$>(?.@7ABBOC_DqEFGHIKL3MXNOPRS5TiUVXYOZ[]^\_`b?cdf8ghjGkmnloqsY]3lk  { :  h ;  n^VV_p?nD-~*: !_""#$Y%%&'v(C))*+,f-A../0123{4h5W6H7<829+:%;"<"=$>(?.@7ABBOC_DqEFGHIKL3MXNOPRS5TiUVXYOZ[]^\_`b?cdf8ghjGkmnloqsY]3lk  { :  h ;  n^VV_p?nD-~*: !_""#$Y%%&'v(C))*+,f-A../0123{4h5W6H7<829+:%;"<"=$>(?.@7ABBOC_DqEFGHIKL3MXNOPRS5TiUVXYOZ[]^\_`b?cdf8ghjGkmnloq?@ABCEFGH!I+J4K=LFMONXOaPjQsR|STUVWXYZ[\]^_abcd'e3f@gMh[ihjwklmnopqrt uv.w@xSyezw{|}~Ҁ(8HWftÑϒܓ!0@Qcv̢8VtѬ (C\tĸ˹κ̻ƼyX/ÕWƆ9ȘDʘA̒<Α>ОQҾx4ձr4ؼقHܠi2W r4j!FN?_,P\Ag  N 1FPSOF9* !"#y$g%R&8''()*+d,;--./0j1@22345o6F7789:};W<0= =>?@zAWB6CCDEFGHfILJ3KLLMNOPQ{ReSOT8U"V VWXYZ[w\\]@^$__`abcldLe*ffghizjUk0l lmnojpAqqrstiu8V9m:;<=>?ABC/DCEUFhGzHIJKLMNOPQSTU VWXYZ[ \ ]]^_`abcdefgzhgiTj@k+lmmnopqrvs^tFu-vvwxyz{|i}Q~9! ܂ŃhQ9!֌lP3הuR/ ›wP*۠c;nF˫yO%ͰrAܴn4v2뺠T[I!S~ģ4SnȆɝ(ʳ?Xw Ξ5nѲVӝ <"Luminance" = "0"> <"Gamma" = "2.2"> <"Whitepoint" = "6500 K"> <"ChromaticAdaptation" = "2"> <"BlackLuminance" = "0"> } descDell G2410.iccDell G2410.icctextCopyright by ColorLogictExifMM*>F(iNHHC     C  " }!1AQa"q2#BR$3br %&'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz w!1AQaq"2B #3Rbr $4%&'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz ?(((((((((((((((((((((((((((((((((((((((((((((((((((((?~?>,Evu!k% g|:AB|BO_W[k5֧E_G,om%v&2k3i> k$2cF˜ppHn~> ӠᏊ>_=/|A2ꍜ>y[W>7yTahxYk&\xJ6HP H9օOܩBQWÿ -|c:|7qw"@_78S?KKVKxP 0e5A]5Ox~ ԼU Mu*IO=CHlGV 7gcDbnvZilvjG Oً/߆~ӯ%wJganyػP+2jaIN$G?GwzD@]r#S+Hx0A+ȯ߆_{?F0e[8-Jْg;1$QJkVG3v2j3)g}*Ȟ&9;[p#_$QJ)t[?|i _Ļ_viv,ZpX53$ZKthqxÿ[K[i2HwfA9)rOJOО?^xb̲Ĺ}VD%OoA?~3|{3Ͽm̯"Kb6md* >$xGƝ;N9vrJ#5xŭ),R$:GFʰ<)b ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( (kڿ??iνyޛFD6s܅%s20xY#gv 2NOS~ :|<_#ú&Ѯd^OXXyͭW7-ntKLh<@u ᙱ_h^y^Vm(I>{Q{VOI?+ڏ+ڏffg}(Oi^y^{03/#>((((((((((((((((((((((((((((((_ߴ?ٿ|q2{ "#Z8ɗa7IG5{7 Vo><7)5+ M\^C` MƕSe2(׍[Ԝ2.BۡV\ x~ֿ+/E|<_z!U՜dE9{G%Dž=R/z|Jd]yLKtbCkcGдxbDt;DѬYX[$[計^m|GH+4G'*7O?__VlY tHv{!BybA/ǯ⿄90?#G91%_b0^W~FKt|a#'qjdڗXW;qN6s R>a5t3OIhS %E~ 2__g5m\[NҞ@#ԼqDzퟀ>#[/|8nO jUlQH22)k9֊(((((((((((((((((((((((((((((((෿k?jQ_YcUdпc?F>>/3D%t`*r=~|DT#3I"~毮x\*hu!TOSH({$|(z{h)$>WOAil5ʹP:3~2]kܦ5xϗ4]2cGkS4y^Ưc/4y^Ƶ<˹'`:խ2[u?~A?:r]6ޅDSI#mQ5h~g_NV'Yo5 wLvW: o;בI+CCYov6q=*uW\z|Cmݞ5cs'5s_םZ.Fn)d1&i]O/#>/#>O>X(((((((((((((((((((((((((((((>,ǁ ^!|+OVKkhZG!FOI~.~џW9Rvgd2> > |0?h~ 9P ;.^i1#Vc__>/@__uoowZ`n= lShڇl?l~%Kf"wyI<@bS~|#gWÿ bє}42]ް)|!́[iY(Iw '~>:A ށo=q$1F1_{࿆m<) hՖl"Bpv#sbX&/j֝G3v)ygޏ,/G}hc3~Ο~>?'wtnX)eFM#B.Ƀ|3} &Ҩ# ɶ<\Ƚ\uMƞ1mwmHmtcevL6I7FCm5jK3֨csjkjx6OUaفv5-t_hZZms賂%Y JLgĖ>HOeG{[`BДW[o;Hs5>$p5j)s ~TqqBy~BjߙUk0^V~G;JLM7mn$yYR!h]C(a\C2dwo x6}|] Ś 5-;iRGq)$?/N^$d{18HF/_ytyT|֓+U\6PHɼk]EC\zqU3*o;UyUwLo:~\I|6$W5V˟)?D2o ogGLZW^^aGLZW^_,QEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQ__Q|%[~_H? AI"E@~suoccmKqqq 8FYَOW;oE[ |&/dЭ$ˆ#!F+t ]{;g&i-c*A6 6F9`O?vZ͞ (@fF I$/k)n:daWyHr?3_٧|uO?O>\gt*#ʜymf6eJ%8}"yH[ymSmΩt13<= p>\_Oѳ^mZөDa|r>]]^g֎@Gˣ˫>l(tyu{g֍Z9W#KbF,R!sd``H}65i j/;d3n Hтg"m=A2FU `BfOx,yFV*Yi{{<(ȯ-ʀ$U2&UdYAj'A2|ZᶶO/ąbb?ӅDUG`FsSp<* ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( AɬIE~WMgWJ(O+]~>ѵX4[:uRD؂éS/?(w:769 y/E\cq d }ff >&3I"y+%wCue:xđJ?S=GQ.%M=f%$G(O u<ڷ<]nDw2Y z˧*GLy>/O/'|#džy=u^1-ų,cC/QXd__StNo~L$ >t?Lv؇n|/2Fvxgv$b{q}~xRrٟos&{Yk}sPKlX *́lD_?Zƛ?/cƿmʶm'LyC w uGfe,~$d?o#1s|ҊW!p;ty;3љ~w/;Ak;V]UW+y޼zm3s'5s_םZ.Fn(ɿcOL1џ2_%i_Gz}y2_%i_Gz}~|QEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEcx^+^>&޴Qq1UGSNzٯ?io۫4/xknu$|w!-wJHUr1_oE7<V>]֡H?<]_Eb +x~3Z1Oˡ"QJNf[-HdqDF;|/wM\/1$ ְGou[TG?ٷ Sа4 T Y_ ųBҝ#D|?-AҴFEgaal[۠+KgָVM"ĉ*(ª=;e[gֹ (V}h (V}h (V}h (V}h (V}h (W&vڪ}qt] ^(<hړ**J'dTb쎛UD]UG'=y~'?mF[d<1'u/0r|/?Mp3cskRV= 8%xo!3B>XdpmO,_+^rr=jqIY=f/?V(],/ZW_a?E[ ( Š((((((((((((((((((BBf!T O-j*o^f֟>&C'OƑ8?h\OXrY4{XikqZX[1-c\$z_ҿ?-۔c-&ʭo` kEx2]Vz HZω_~FV(,5[kHW2PGםG6UBs]/|cx{ĸ kԆ{MťTm%.MgZ|Ők$Լ5t߼'rF^2NgNり((((((෿k?jQ_YcU3o/¯$kɢ*?LH]_1Gp㑬I 1Eþ#=9SJz<2T%D qe<9tA5h"hа; S]f~ /ZF/_V?_ AV~?uύ |^{x6W[\<>#D"F1G~Cjiem-m$s>kd;Bz{Ikm7|D<_N7d'`}ϲVmW>'_4O ,;uy mEl$'޲riCeI>=xc㟆'kx/6Z#ۥMh6@ȧ`}ѯ?GU_ψ߶ě ?i/xJִ!eYo!O9_bHжò+1eZ|mi|^R|N'"^]7Ku =D'G afOgV?_ h+M*KFO.wKmE4E˪lo=F>nSJM QP"? |2=#ZSm~ӧ &啝j*9sRՆdmg+fbY ] v} //~^|M_n007Z$W,v3L%)ӎ-_7wSFѿOv}LpE\7/[}KW&WƟboi?վ&xF/ y:H ~́uWd>x_Zmo◍X4#xEL4[w2K-ѷ~UT,8W|~mO ?_@,~ l.pmSޱ S]xwA/ڳ?/|5Εvfj^-$VVSwqu|[_ ~(Pz3ʫԝF,/ZW_wVb\Gi_ҍ}v_V$B(0(((((((((((((((((?i?3Ow'rCijyyL'οR?xŚ}ƴv]UQn_ᯂ44?37fs)&idaRG#-+1)JD]>CĹ< (Sv{ъrvvNoMZ_ .t{_^`4c@x#uy xsžM_ '1DNG񑎕6z]Ceaiogic= Jw; f:C~'IwˆW_Z-yȿCUuҼ]'UJItu+;{9F$x+~_ zQJѣ2JuʝHFJ=SO]Q֭{~񵧍xW߈1k~6;:{aڿIfO+lj<+|>4++aP Bs_?yҹ/^4>ɯi긆?x~rpk q-O/~>mό,5g“ߥEnQ |3NV#Z\;~(+I]%ѻ`,7cgbO{Sڼi ivtuw /}X7_ʀ?Z(>~{xwtxw& gb(H粨,{ s|-um>m~\iέ<1e npqm/dgok6IvZ((mLڭhr錝. 2P_? ?fφ6wx.4P~^|<^&MoܭݕP_Ewq"278'O+/OZºeewj0<(lW$}{k<毝ckō[/>xv cW"6"ɸɗfd~rW~_^g帰osm>;2; oo@X5gcq}v|1#Oi7_m`0 &42Mxo۾3Sj_hSi> x Eoih-UʁZKUE^_*@q-͗ό +)guܵG⧉a|;[O?xs_j2iYՏ30_#9>j9$aM0#<jV SBp,9aƪ'ӿgKIMA5 گ|1wǟ\ѠῃC+Z:.|Go Y,Ҙge7I|[_ _yY6̘R}j (btﯡ:f.Vb\Gi_ҍ5OYrJ58OC~G[ ( Š(((((((((((((((((-W?e[𗉮#\[&g&i?"bHtC)eW_[55](/0N;/x Z_<_,UW @ |R:e)YKۧc&*U&֫*RqnIߒQN)]vx~|I&4_x̰]4 ,I! RE0a$=zm>3h>յ*Qrxtr0h3zuwQUlĶҽl>Ҁ `1_gq9Ji(N.[^NO?<4#Ů\d M* )'.k:ZNtQZ@39b?ޣzG=cɑ^55mclMc7aJ {Qj**dO ?KPZoϺ{w[ϣ^7.Wv\M 9dVNhmOeU-m]I#U7vODcr8:Od),3FOPAƿ׭fr*Hph%tNq_doN+>/S/E5夺.faTW{8V~-| ßڷύqx#U8&H1쬇^C XӼC'_E擩yepP:>eXk!8*QwLLaʕh.5ff>d|cғ>-pJkLK޳'ݘtZF? 4x]s@j\.xEQ?(|w=;fF<{d6Qb=MzP)nFDiBg5/|0ԗ*}zI9ͻZg{?g\Ijd^X@x ] ծAKyח[B>˰o_k|mWhyj9FaѧnXHȪ6W!? Y=|1 [7ťiZcF5}=#~3uMf`b>&`=@~|߂ -OY[ͬ lo.5 @rV0s0pԒ {&?ƫ%}_?[>WwtK"AOsJKmZ0 $he4 ĆYpI5gW5XQ7 v3yi,Ϯ_υ>5<>Xͭ63{MмEDwEh"7)2##dNJY^i>4[tV?|}2XI8OK_OnI?֝5j-/?MiZ?$ j,?RȕO1GHb{hT&ZxKouGv &{}VbpRWq޵y6>#8l? U3{{GU{CƿGYU7>x&oxĺ_e8P]v6GZ+(ʞ=jVMޣ0'C,J:$݅*3;7l?lz,WMQؤUۯͯ:CV|UB߲[Jv{h '$Cb۲I;W߳_:6iړďH.du"!;ҮN%l 3Ly~s?V g ^,a`$eܳ7rkN67ɗ^+U9k>7ɗ^+U9h _$+ ď ]sI慘m\q,272*H?J!_`MU⋟n6u]]Al B))fS@%US)6ό)tw{tZ O~џOW~-Qq2$-,>V: *'L}6}'6@n{x/_aiZ-듀-H -C5惫h)uiaet9dp{>]࿀1#oYom9 *5Jk]Wrԋסv۵|yU??lZ隿80DZ;M݁\=B4(|"Ryg)J ]?R IT1ɔE׀k8y_tfG\:h~ VDo RxJE_~Ej `Om U]|E{]gƗi)NK&_U VӃխSِxH/Sׁ֡4KXr+ "㞹s-+,z}=+WU y6TzؽQU0Q Øӕ!y53wEEt4Ml}X5P聉?CX?CX: bMMcO[3}XםX鉇?CXmO,^uC'_OYrJ5=f/?V(OC~G$B(0(((((((((((((((((w FKvGby*HnD-A77,fⱤkaؾ 鷹b@G9N9B8j_&oo%u/h6Q%>?+U-쇜!'$Ȇ9 9flbs 1t]9ޟ?f<x̣ë+i)Eezy([xN'#w~N./,~WH,.NO8⾕+VK'ԅE}L#>rQ5@_岺Ҟ᯼)}Bf<. ~|^Ѣ3\|2nOmRxrz90 <7IUB~n?^ɷz7zNrB3Y%y%JU&,V!"N=ݯ.lt 8ٵ0NLd`g[+=B8x: c 8={t7Xll=26)$l$E#>W58w5K}[W?][^:``:{W:uU:ׅtmYDopP $JӏeժB ܔӣʣ'.eg>}W^⟈wII`vzTvFj7?/sc *{ZU.Twj^gl0ԖOm({8I'ϥg>#fa;nW+$G FT5Og|%8>?46+VqR~YI唰#XQo}]z%m_ S􂏽)cyJKRc SkxwIֵk6p$S8rIOt տhOk%w᫑%d@y9 8ʻrRkeֽ!kzyW?+(+..gkJF'%I'|&^}½y29_q&Of-:_ޒi'~_^5hwzhb #NIh$@/EbVqWIȥ@/#UM\FW og?#u`Si.mmmmEWQ@~E|No ]a 9 _O(\V]g?f/]7_fBv6މʊ kk6IY`# + ^!EiٟRewQujKTurpUF ~z|:wG¿ x-l8Yj}/ vȌh\dBOB+/wo>)|Z|Oz%i%[Z;&L6߻6O@gazAZP<{}=I>|Nv|O3HX@K3#0I53Mz{C𿅮nڦ땆(ǻ1;?oxFKmKeL 09SVw3U8ZحF{Zi͛a# +|8J=34YSg _xwrVIYZڨ&cذ^Ga]%C_y}sUt_9'. sY1)eg/Ňᗃ"x|]jcrqCv~aq׿SïU߳k/ Z(<qpS€8g9Š((((((((((((((((((((((((((w_#>w_#?B)m ~|g+vu_ ZjNco"pF \$d7X+?%0[_Z]R1G")tr)?Q_җS6/sMZh4/Ai$0Eu`DVxO/O~3ҬMCc6$z9O]eeJ/g#|;tĚEyǎwE_:Fq {0-έƷK5O%Ŵ9y mF,Ia dž"I(XՂ"`~ԟ |SoX--I,P_1nwu L`/JkSz_ YZK[{#CYm\YUWVVVro'Vh =V?-rZw6S}@0b%#пT<_ֹ,ZkP/0y,Ukfsik~.+n~.+Ϩtěƞf:~g*ڟYDOş?+Jk{_뗈+Q0IzQ]aEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEGS4zX:]f[aA՝؀ܚM˧NSU%Խ\'WyWȣ\젟¾muY< G~#OǭZ[  <\@k~7/ߋu>+8q&*g!>~G#bkcZ۸|6#+v㏁?xf,$UԚ~Ly+屍7!d|ܯ_/77GoMޔ. oCrM~vNM gIghu1pp u8 +Gۆʱ3Ξ2x  Iq <::|m'EO>T߇D\D8MxG|-??h^!ѼQu'Smƪp$q]GOPƺMwO iӮa meIPyn3_di`Iӆ_3O% WJM~Tc%Ӛ'܏3\Q8M&[2nwǣn?⻉lm5 :NMDuO|W7&Qw-N.f%2 \:YZRa/J16yY%-ky] iBᓓ]w[ÎLHԞ%uUF7֛*tLz~=7: 71eoR 5qx-u X7r vUGeP+?D|?kxƒ[ס`.VU`($>Fp9y}WXfRK.M4fem:[]MS5X~e(EI9:ϙ]II(+-ti!-C\qm8ԃCȃ;^3᫯?#~ ..n-@Sg2刎8f$ )ndN6=Sk`6=kiJs}+M_{xZRֵ {[(uV_"(Y ż_٣wcͥu $N+>~Oh?ni*h͞xI6}MCW|ZkGhto*ϓe 2<9f Ym#k[⿉TRTc6oH$]~|V˟ ~87Jm{-c$ 1e1؍Ÿ ~8|,¯3WS[9所1[[^k"M,$FuۍBls%IWCP|Jki)1>TcSӣaO?5eUte5z;k_Wx?n,|Ki |a[c{/ܞrKF8ٍ;QUoM.uatu$2A^i(yPo-'F6;c.Yc]"2A o^#7߳oNs"Y㈅$tYRL`5_?jEc jZSyMpc1Gd}~HE6-߳YncɶjYOl=E&VD|Wռ ⟈M~K}6BUjZU=soω+L=3 3>ϦY"DgcHC 5zWEa%˙A_֛Vlse}~ϟ,6:Ƌo]8{hsHWAQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEW|n/bVrקט|n/bVrHW?-A{n1O ok۵|Fc?=?bt6R t"|=kf:ݳũjv %|ތ*kn[vb*E3oO|em 6ґH3u92h}v繯oco 潺*KuwrT|VZsͳwEtfG!['D՜FӞ6S VױKqΖ|I)+^L«Dۏip09x/|X~5Z6}kslڕդ&I6J@f`8>[\4Is|eOԑ^_:[g~o"B qccU*^70NǻjL?5q^guZğBNObe#н{{4Q5/yyGy`.;efOhc~.+n~.+ Iifokz10k⭩şΨtD+ +1|Y^#ҴF'gxJҿ z/ėQEfQEQEQEQEQEQEQEQEW?mo /tg񷈡%d|6p"oI',"\ g^_x qk?em&bk>GfU--?~>э|.QF&ךMWUB^j aum-/!s#r{Wx ]N+?|(N܋N#_0N\79}S_ǽ}6jGC^cڼʧl>4߶G|%CWM얷-a^FFk8 U}x0GYW`og7[Koko{h1e\Z<\ sM ־(~׈5yʹx,ki IJ6:3k??b׼f|GdH>#ʀ;rdT5((@+Y>?3|ٌa>u ~ i(E=>!}.FV!<Q_WTEF+?1qfVo]薋 ( ~#|,qw@sZjk07ma_?_NJy/!|EdYi^YAtK2=%gSǿMfEDCq袿??hoOέ_%um:Ŧ/[2Y F8fN XR0Aʵ uR7^g~Yc2DqJ.z=z5[5-Fo1o.^7(VS;+5ozci߄>|K+6{[,܎w@?WxQ }ϊ&5I|5kXkFG)eFI,Nk/I0x/;Y]H[P2r$ow4v55$D\$*gh|KѾ >[hzuZO Cɾcrcס,=5Qx2J1SJRwz-uSA>mJ=<{ &d\OM}Ru-&_iyvnl4,in.'$q"3 $OOgV0 w4x oiFp$yIH|PuZ=A[Q9F=+\DH0p$* ~߁SSCZe\ՙ6`` k{dĞh(((((((k?h/g?>-_VP[jv_ٖjgrŽ6@JQX>ljxsċllUy͉dٻ8݌g+^&{[.Q\4RPqڀ'%多q[=JHPd@qWμW_ ?>2xp=s឵7HyݧQ0v.|tUWMR;Ktv(8琹ժ(-g3E `u~o?C/73z;MQֵzk[yhZie'9F YT酧)#BQ#sY_ >1"πakԿ{| H~{|=ma L4A۸W>2~߶<EρKm4+hl}z@ߕ-/3 c1uO+\5+i$F!^K1%,GE~x{z!g hfZ.{ u(SPI䞤O5}O65ĿaE5W]O?_9\-5)˽8};ƥ௄?N^m7sE PiZ7 Dr7;`yWzφ~=ND*yLKF$߹`9d$؎E|k_ mÙ-I#iF]3(/J;&"Z-~o+Ʈ[N9_3hO :c7M'krtIA JB>C-Qeߌp;\mhut̫@I?2u$FuFtOiZ~cYOfFON?P7#~ȔmJrm=żY2?}G52&Ϟ`~V~Ů*p\kXlUM=ޮ>移OK SFtMGOKouk2FC+)!8uxKuմXu]ꄍb#v,8֏?OZiz|i&Z¬>NVo&5*?.'|5M?6SWj=ҷڋmkcZ)AzK_\;KQԴD}BKmн1(@QM~xmx?2Oῃ6I|$Gt =gCnYO^ 8| *o-9M&ϼ+CtO xVBhz5{-lmb_EU_z֢%eٽտW#>:3xfu4ӂ-]MnQEQEQEQ^edž~-(5 v./kV"A;^9MIa&xϮ{{DFDF` 1!KtGw$mIZ{nچ#gj r3ᔲ@3Ce0{r-WR$uWCr1@7m[Wy2=[N?/"yTXZ8(Oa^F2rAoq>׶~KK8q9kғn$ڔh˯Ϣoҏ'VL`^"Fvտ(\1'ZQvz3+(}JyG|`}J/<; ϢxFִ~1ٔVH#?Q)ƣֆaVHԥ'-SN>魏~~I_iz!0j0Fzߨ~?WȆmЦֹ ݼgoYz HZxjNɶQң yn2Ye{u˸jW7 E71V7SO+|+KL׉9/i'𖽤xÚ"kOKKk?Ē!*kr=h'_ /6xPY4qc4]y 6v2X9+%%tjaJ5!}ꞩŠ(lQEQEQEQEQEw5k45OoSr_]v x],H[rac=F;/l&^$Ηo ^izޅzmn[;4S̉w|&]#t-k~WyuM6J{5FGbřY;np$` ~`oύ{\xx|,!D@7 C9ҪS[=NJ|WIۦ#ΙUdP:Bdt|&?g㞷qŋ^1b r,#5?*_sˡ_D!A$\ cAI2b|]  |%? /~0|T GRBkбl~m?࠷_|@ou%u]#Nz>rY.Wx]d{^C_gNeϊ|u] ww٧-4{C W־,~Zhӟ|C|vԼ]&w%LCu$1!tu.c\~~7玵O 5wiqqdZ2G$ǧΞh~^ c_VѬCgc`w4-l##t$q@k?ڟg-k.OhGo(fUKBr2ϟyd ':x_>?^%t\IۀI,AbI5_ᇃ|jmǀIoKw28 w/|:}n'>|7WmOCcܾ4)Voݸ<|zI.~84~ymO6zk- dex(+;ӿfmY񾝡,E4hO6RFZgكM\u:2QrYr2T1o?Po{D7| ˝J d ./I!~F  Jz8|gF74;YdrlKt۹9؇汫^eG*?n(/#A[zIͦImh[Hy0 2go&wτcLJ mҭI"B:npD5MhZO1ch:VhQ,--tTDUGC^Ul]IGT)F;$P$QFƊW@+şW?tohr/б㣣+Z(p}_l mkZvd XiFHP ݍ򏡯>%o/ |DwLW$0%AQ:OStb>qG1eƾot FLr/ފAQ°w ?e?w?c$lҭXa)`##v1_EοW}r/~^wY~qm6_ .3[>G%7I#:uc5xQks𿊼3oi(w4ooPͲĹ^I<x ߭ ( ( ( ( ( ( ( ( ( ( eJZeJZW c5mڿ&?B)m ~vtG N۵|?;t4[+5#Vڍ*V<)еnoRѴRDk5~]݅Ok8Zr%J~m.ƿr:gխ5,dCb4fF3rGWFkͼh75)"doRKm8\8&ZYMQ, e.gvrh=MTֲ=umtA?vŲ?W5` \Vz5@ejQC<8Q<ksG/j;~m# y0Lo#5d|Uxu~'7fH4Ƿ*phcwo/[-%I2r\(|R1Vj<Ϣߡ!,/ZW_Ɨſ_S/k9F.`iw|@`s?O+Gᇋ![&H }WO|I;$22vr+JsNJJU?5f_R*zISZ( ( ( ( ( (;~ > M|}㨉F!ppE FQC8 katJQ^|/9㣂Qw+M/3CQt;OU4X[(aEgwbN~m|twA"񆹓l,!ncLGkjۛP_j~EXf^Q)g|V gAj+K`xXs2b yi邏~gOYcl-'>ժyuK8x HHx/uijC:,c؀ܪ σ?> x,hݦ//ޚf*v+hk)&sT{Z_|Ax+ VJ? KRE5£QEQ@Q@Q@u+?m׭?geţU +~g>Kwɗm;`lP?~Q^sX??3 WSG+95+1Xs<፳Nneˍ ok㟄mlG ɖ,ﳐR+*dxMb*TtoFv?(Q e֖2겔S}D:njw,WZ'HXPc?u ~_0 6r(`$#pYc+-4 ̘_-Vu-fw ~Ls_Zy>Qe3|?T#U)dx:k\ӂߎ/RQE}YQEQEQEWŏ`|Xixsĺ׆?ZI'2u`>IlҼ_mעkZlvq0Uw+M&}(b졥ڮ[ɯ>!> _xO_< ~ֵm?WS˗k4[`̲)p^l?ܧ$xr?m/|#ak4|c>dĽ}P9^Q=O:vX˜@cy1+-CGUt@xIMPB). ofrf&_$tK xE$-'fɥX$TPVlДl9Mj?_w7Oj6 \E{e N1a˜"= *UaZ>_ZTVe`ip0}30r?hxOiK.4K1Z6jQ<*gy^3k ƾ+p #qAVu)Fjs< YmM'5֎C uO&M:w SLy/|%n`$uQwD;ߨmOßݕqo<wFÂ+࠿={_xNޡÿۛMu]A  '!Kf9>/Ƶ_N| Wie֝ F!@"3C^ $oekmo]t3l NOH:5Iyi߆.ψW3[!A՛rq<}u:&?{l4m7X=*:ε{LJ% )A<3N 04{x_T|>BxI%]OcTMsy2\J"4,(IW [T|7㝕cV3-Qtk M+`g|RSn S /x/gJ"1ԁIϔ5$kEv9?WmbaJe*Q#+¾ğ@<Ȟej,g80^x^?~%_>r:n#{XrUu SQMsu)G>.]u?J8Er_ϧOOĴpt2z^/G6ZkeʟQ_͹Fg|fboI9$h_"?ynkHz~#Š(@(((((((((((}}ghuoc@\ H;1(O ^sCO .|eOƚ=@'Ngˆ1LtH՘W{OGn⋩o}GOBp[?l5?C2woh\I'dR ||"*r Hje5Ƣk [Q|>zZ(Eq}V^% lS[ ox(~:ֵjZuyr^ߒOHIc//IU(das)@.b$Ѧx߂<<+GöV:e™ 9w8ؖc$aCZ~]]rr\g} Q5ÙQ4yG֟GGffyGCZ 3U,Ǡ$!h^E,Rǥ;2}:'dTT pX=~xgu_xBĿi"g(=Œ΀L98*yjFL'"+-\>د1ɮK[PY+UhksD[˩?dO62\pxH`g%#wDzW~ ~8io ( ( ( ( ( ( ( ( ( ( eJZeJZW c5mڿ&?B)m }RUƇږͤU6q[1SoP!$K|sXc&oc۠B'іݫa[x/ߍjW "~ϪiYb<'ȹFs=J -Qs֍ZލbO -m?W5`kbֿ*\{gE/yQQTwok0z<*֍b=E`GF֎p{(micԴ;aTI$^X' k X0[kJu2S!9#V+/?n4CṓK8#A^K ?kVJ]_2ngFUh]%սx^o\V<^Cf9Nnd)WΫdWψl&ť\[(.Sׂ]/GG*qtJ˪W|q5H]}2]Q-^oM6˾t yj,B>xXxߊO^:]8cqSsZOu=.YF"Ohqt=+^Ajؖ!ywhIܹ$l<'1_r~{?G?<|^:w>6Easb[f?Zg$ڒcpN+)b9&E _d5(co |>-׉k /Z=A܅8ES9rF\-lMhѣ9$mKVW|_玼ClP9RTU20X+/ٿúy!Yj7N0@@8g 팊Ws 8qx7wYmYob:UTt2~ CiXSzƄm,EO.]yvmɹ^.?i/W7S^f+D0 6)E`J0⾢/͟>+iD#I`AFO`A q> 𯀼 ihg+aKjN.ǫ˱,ǒI=1<G><YG a]Zu*|W~N [(mH#P0@8TQ_Z>6ۻ (Q@Q@Q@Q@Q@2HTIbu*!;} ;u|B? ŏ(]^`$\~By<3E~?o-< z \M_,q[Ve.L03~-2þ+4wK{+Xa0A=`jy|/;[3Q8M:itom??+/F4u}g]!' g(OF5QcX> ~%ZC<ڃCq'6Jzg2Szx^,?i jvhuȴ 'p=w6sXJY9?fIksOp9y⾷M+ʄM?wi;mպ+|3Zx:-f ıIuu8 @tF V.2MYM=S (((??G4W4;䱃Q4-#$^Qr$rA ?|e<_s&W,W+IYCp9*~tP'~i=Z^)]=fYZB1!GO | ':߆|=˪Βܲ4숊N (Z(>~7>(|5LE _[ mE%1 K6OŸ٣z֗Wk /M槪˹4aAvUQ$I?BK1Oj _>O|P46}? )k)я4ݏ><&üF:|~Inߢ>uþuNGҭtw,Qcǰ|/˾uD>hz֭rylWc4z|"Mx5 Ck&0ϵF28_+_'-868Wڼi{ߡN/r?e>4 Q+/K[G_HxK8xo!c[^7 d~>!> |b+JXǠ^铰g#e>Ҿ.~&6)~0z5b ]͢/=d㎤ˊja6i?m~kn+W%_a_܋RϤb>Gյo~By,vٜ?98?6{Z[oB#^X3gDXNI?L@[ME_ϹI,> C/žI#qc[yV0?06k\tQ[t>75a~߳_Zj6jzsm*YIW^x5ɡ:OڣfT^]WhƝ3@ olC2 m}ͯxC𿆧$|SWmxW .?܌6:ω~u{PʶVLd}5!uc^_xTn\?EFSV9iVv]>~['8OzG},c?sFHnr 6}G r?j eJZFͥx{ß<_7WQMR '6QmJFтwP{ahu9c<pA  WS okD4=U -n%`nf1/03 UeR#:QkF}1QW-i:vvN0%V':RH.$ԠѕQVqGqZreyTyUU{-.eXeYbYzГ{6gm !ZIrXg9|=μ;NI'^v'tҢ#񦫨%fXG|?~0=o:uWuj]YwQ;׌|b]bWw&/mCId^g+BYcU"?~|pQEQEQEQEQEQEQEQEQEQEW|n/bVrקט|n/bVrHW?-A~(o, nao*R :ߺl־W c535]Q?Aqrk-\CIT-G+1t {T9O ih~x}x)L#tL8{iףmݓO.iD,6+h3g72X-5{lGQ} gN.b'-'[8W=`TfW2mn 5i-Wn}ϦoIlT!-z2:=-Z+T9ƽ/_q骷seړt&2sTGo? wZJ~.0^ju$H8eȥ;u ďh$/Kp xg yNw*w:J\vjŦI|/i^F-?My+K^:h-{>yި4W1a>Gy`{>?~~Ͼ7o|Goyqs1 HY.>u m@} ^EE^Pq'&aW!sN0IӋNZnk:.j:SmyJ߉? T2)+, "2P3kϥ|l5JSp<9fhbpcRRM?F_ҏ/WG}+>S<_үgҏ,Qء(>ygҎPLj௅h^CZd1Vo{5- S Jh-&y/f. dcnY.l,eU+,3 tpzWd[ZwA}[d 1~ܿjm _ayqB<mc2H.N+{K I$h,ǀzWGOzm[F" ⶵ?t/k)7 1@Nc y 7$C(ҩ<غO.f 9yK5V 7o(ʥ?*omS> 7IokLmH4VPVgG~6Ừ5ƭ."3mX3S8sc߲3o_Tp:d l洔`( د!cs)*ޔ֧|\ ,oUKY)T}ZJa}OE }~HfjT(c zT)t9q1skJY&7 +c ( ( ( ( ( ( ( ( ( ~|h_7kKԢyS ꦽbʽ uR*Iz\n[+VTAJ-ŧ֧of3T玾xVg˫[HyC'm՟):am}/dmxyUFГ>:~ǿ 6Gwo$_ҡPf[Y>8EˤezӖ_G\ÜcJ8N7֎.Q{H%jy+n}YOmȓC"D`FAuwZW kmB|sN73iI# ['l~>qҵ?Fdj*Nǿvu~_Zx+'J}_lT;Uj[7}袊3񀢊(k|<ߌuϙ'`34goeކM=)U5V۲Kͳ;"OǺ O"KB8O61_xx/5}6 ֿ_ȝ7TH^ b.-e9slNICpw^\+>\!c*yGhӷKQ^8QEVV~ ɯhVkot\~UEy|45ޜ~|3Һۏ و2x;9\OEе~G?Qqc,C$g8G, E~Q@VgEm_Oo/A>.|?Nd;x8ȇQ"s?kC@ucwĖcW %@nːsƵZj1LYl#v٬B]O' ejJc(KM{&Y'k37:M?@#U5Y$|H^I;UGQZQEQEQEQEQEQEQEQEQEQEQEQEQEW|n/bVrקט|n/bVrܴ=_ZV=PI ѴaX rt?@|I(m }O <-??Kgysk{oc\FHtYSit%w/* z?YzVq=3[{VWBv a0#]F&F5Ś7ZVD"TY^+V~ϲcw|Vds.Y,(@Xv7??[~uVɦz~uahc~W;Ha5 i~FNj7!Xҷ?Apϊth.a$bO# B"O ¾3烼MxD Pϴ)/Jl崿:UjA6Zdtu5|KRk=\evn[rz9&|s{ι'^RݚƚEKJo;kmOr:rZo:xg-y޼cĿ%x/5߉+jK%y?>_9_"?~ ~8ioり((((((((((w_#>OmfT2H0T _wj~|U6uYZ`C z$Z(n,(D)$r(eu#x 1__c$ -1\j-)j:p-ph'Sp,hc$/Ux |ٳšx[YĿXI[?9\ILR5]ea+PPSԾ৊o<]6 K Nn5τד]3le9)ڢ|N9Dx{Xk -F\}M5Ծ=f[}ǒ7oP>!V'"ʉ'_j h~9_O+lF]_Qf|~?+/SV "Țoc<]߃~:|~{-|FWficԣ=J=Sz֭+s.%+;'oSWş~|k!x;R\bKoil#lthmd|q]Z|:vGe$՟P[e\aA$޼/R~M|6ozC+vZX=1?_oi^8p5ѢӤxN6#.]J?_Q?w^[ν^ډMyNּ||~F_GG=:\<4{tyZ7s)~^NwQӟ?k/MX;v>vB:gq݃_?OƯ<3“xkZ0Y8jT\|wm\ws_"5mӡfj>ҵ _uL `4G`sA5^zRռan uu*|~-vAݱҼTJKmZ~wOHRZ4lq8B.S嬡m!Vgw|{F zL6Hɺn)&1 9k{67q^Q"0U/;P^[i,{lX#ou9Zt׬okvkĹ_Ln6i`1m>vu՛Q򏡣>"nX)#8=E;sl>:F$ѭGzQ4yGև(>SongGCZPr3h򏡭(Q 9CۙQ5Mo.0#T](]9NTrha%C8IYI4fOgƿ|Gi :ߖK)1>\cH'!kX'sx#ӭ~x0|Qht9{G2NZ1| |Iu͸TPAӯ8E~qHZ1]2įϋ9I>?3SuYj6ciW֗S\F*J j" ~:~?NJO5=Α0';%9&@1_yk:~j^5O xAHO|;~_I "Q*[6*"JJI?s̃1ɱrcʕH]}}eQ]QEQEQEQEQEQEQEUk=;J.,m2Oqq(8rYS_+x?KlS̭BlE:J9<)iw[>6?ot~t7fo~ }9֦~L~vs\k77nx>SXvO+~~џ %Ccx,5~敫Gk?\73*VslcEVUHwM~|(@N I1чIe<Gnn|Qn) .ņ4y{r͞7 ~^ve1+G> ?-EC 7s.L['A}ό *|={ .,E̪vcaIAxj='_kR{xoߞMRvۆ3Ϩ+١RSE5fnxw["F8]7f3R7|=e5udSU Al' 'ğ-O||&da҇3-#O57¿>ָ҅Sl%#+c|k٫xgYbOp?9WWQ(G)xOC~c˂|1G9i$WE**JTcJ/$(lQEQEQ_=Һ'~6_<#C\[5DȪL< %w?> x^>8 \[\]\n(nQ#/dğ 3/%7N|#!|M7dF?5;!>KBh=?n+g¯Ck}^5I҉`V| jÕ((((((((((((((+>7ɗ^+U9k>7ɗ^+U9h# S$+ Կר|J~9|co5Y5]JK$6WRZY<䏴 ,W_J!??`_d@ѐ2 `zk+tMNOOG}0x(zo1ň9ǁj/_^x_f]sYм!Yn fNا&Iї9fwNM7w-Hd;&KB}(CYcX5߄:!WM)Em:^A!NrFk.4S8!o>! ,rE%f0a1;G_Z#N U4|IUIGc&K[: [\[$,zK%o/>aޞ!zSI+>pSG}My~ѝ|J ¼y5VG**-y\V[έ^Ms^w&/mCIdg^1Mw_ڇ^O:W&ƫO%5}_?D/55Z)|ࢊ((((((((((( [k6 ;)0ʌ0x*A lo&G<>>~ iN){W䴺a99=I@ugo(N Jߴ ~ͼdmrh̪vB#  _\G=sC*M `FAuOߵVs_ZSXxHS$ yUnS%dT+X7/+qȚKɏ7O`[$ @|7$Rz1gּ_!x/BJ1o=8?1$|q|omm4k%N(H?<J9}@9vpBvi\*-5GW׍7h~x"҂G< ^~"ѯ*Y]-5YK :1)1̣̩"S^ƒ[GR?7aMO3޼^}?Vv׺a>+WȆFqrGr5hωV!=+{eR11>M|1Zqo܎1tyྊfbMUmb{LDʧU}~,< :x'YCW2^DmPm#,##pe}8WZe˟xyo03H8vX ~Kt 'z'7b-4&-ORy959ayGjThٗ Զ>-ƠNVg'r$9.9 zuJ#ѷo 'xn5tkXe1nS)S_|u7V^kzMƗA7x'p9V 9t|$9o.C 0ʡ1G`q{NxI<-,:ģdN6zynFzt'_NJ_uCv|PYM&4U-ch@Tzxv?3t:f " ;X0??H_qVyW|ni۾ Öʨe_[cWOzi=eA֗$Y/=x_vKi2GayɰBǮI< xkLWΙ,5ż&yYcaXzږ-,< {+c>"=O*&{!gc}kϋ/5VZ+uyp:xS㫒m].Oݗ[qNY(êTiu#NMJ0qakJJqA+)BSIGOf}զelm uL`)&}q}BULG[ OC{;K& KGOW*|#{fN}}amoMYZմY-dAUI?vMחkIҿ?$8ˆWBJ4O~Xڕ՜g:s E(giG| K |D,w1b(q^{X֭giq(pS#`FyUMN #r8s* kkIu˾]]iy^Ə+!K5 ~&x+ީ"oj?4ʃ 'ҕ ԒmG&;6`J&#I/6<˯lu=5}lehǸ)8r0YIG?~(G&99 xT#5ŏ>/u-rIt6:=l,0pc](ᰟ|X*]=ߚ>g Br{ _Ջ_ꓳrǪz߃tKg-~ xXǾ+B;G C0pj^GF9hBp [?&De8Agv8 $UʺW-VJn?3RG[9ZDTcE]]_HXcx[$|.W>9^jLhqdqH@Zz+-%EZWq`p c(=0y.W*rK~OM_/< t*w+&}n@Z|~2seIMz]~AWő^B⾇t .=O86${xm{ZQpoI~'5|>K,Pj.@%&Fiʯx5_joᾑs_ q?w1k5? ?b׿*ݮMZgҭďqp~y\r>+ #GҴ&civɲ(ǠU bq?|~USa'¿w/z5ÿÔSxWu^ôzuɴ[1LµFWWy|{KA?Y=ǝ$N+v8k Q8(WZ>qS,)Ukѩ(ϥܝ̖S۷irmE:}?u9BOIs_s  "4|-׺ ?̹s?<[+zrNIn65MT7\yVt=\-ݺ):/y8受1~s#ºذ(uχ2ǁ*"Tތx8j))EGyfY62x<„Ui}aEWAQ@~hP)hb |w-ޡg/^,kk^h`>-c~ ~1_ h>;x]h> .4F" 4N{cz5D|)bd7aUU͊7 qnx}KTv^/\+]Gyqs!esA^;i~)??a麟#!k ,mlnrhXQ 0."I Kݶ)8Q I\ +[|KN|=m:rũȖ^Dnn>W߿z|f/>ŇVw4uo]z{ȁy7;S敹8 ઐOsK2Ju 1a_k|*`obQ_*_~G?lW-Oc\'/[C r8XU[ *pNxoO;ZƳwڊ^VuBx7(M}EPEPEPEPEPEPEPEPET,//./.-$E ʜsV(((((0&_{ĭWHO0&_{ĭWH%O0[_v?O%O0[_v?{T?+h3Ja<0#5cſ<7+&9Zt~OɿpTΗ]tu%t C.$Z fhD$v;Xq]Tlhc*TwfIymV [kFil8$k.yp|;H#! \x_H9 _7gfDŽtc?gZtTvӅ;ty?<g){ΐˑk+j?=rW+yի}9bWsXz_kV?J{~%ح,| _k??jS_BYcUώ ( ( ( ( ( ( ( ( ( ( ( ( ygiiW67WsF$TaP 0kVqqq&+`Jh/R ~ok?HC?ÝNǀ>寇 9pxq(NV*2qwLd<'34MdjzFj]#Plv>o¾w5ĭ;÷oZhϡyG~|@Iⶆ4߉<EBCtЏfΥg??ODK}7,a/i3ܾ]y ~ⰥØeQNn<ʫ -#obV謯?PZK8tv]_VW ^0&Ifoz+ߌTUn(b ( >~Z]猼64xAUPA65-QM7Eޭ:NkuPugv (&i+ӔvIu?_Ox^W>)PjעTveFKyN# `ޟ"x_|a]~IC& d#9 BcNo^O-Z^wʡe=dŝbel<.!#:kx 4zH&;c, ?x $_Ϊaquvmj2ZI{&Xcz2 R1Ng8-o ҏ&J!xP:o'myݼM']x|׺z 9q%pymI/ .ɼQp]OAu6iqn#?/4N .?z(hKO9 օZbԏxtM',r1GIQ_Gh:FZVWZ>3;N*9#s^)wgeygݬ[iJ=YW2 zʾAZ_|a/C+ca`^t9&PJǩTkkJ{ۛ˹2M<F'%$5_>WNy"_謼T6$LsEV0Ax:i>)u[^|!'?Z-~q*|Cm=/ ]ϕ,$e983_IVgor㖆>+>ܓOIz&|_?^2?/|3xangr¨,q~.ڞ m񆶤jn|-|Nzҫ!x?gߍ`wTдY1ITN{^8G+יW1N\W4?+rsNdΗ?xρz1-Hy01|scdVWߎZ̾|4[؜ss"2IAÏUϰx;Ci-֡7/.FFv 9ŒפR\s>ϗo_1oWE$׫nbÚ<+kkHM&q"4I)xp1Эc˻xyr5TˍateG8Kt3߳ş>1o|->3Р}ؒksGn'Z߄?G[m 61tӆ<}_iο?f__Q8%u:0 4|,^+x*6oc pIb8n ~CxkxD'PVAE>+ue3߆l.K`T~ϡ–[/0Y^(eMILǿva* |'.IYv~#X<6%oNz_/i._EmO}+*ׇgOմWVw,̇0!zM&˧RP]5/5g-a8$Dz$N@we|r8g|_sMxS\.*jQ&p$|y;_5@i_~þ=֛=1c m،AB |+]*]?e>?U90pʸ 23O4r׬%x◁bIxKl $ FvKa\]~Bx3/FY[G(MN$JmǗyPOcuK+үXGo>&q ۾x$n\"0(3QPaz>?Yn+eS=bFڟU7SӵU5 -WLKmwi: z2:Pj}bip$%fAES$((((((((((k[Ѽ7MC^Z&{CPK{{hWG!UG WKu}ooJ>#f07u+g΅ۜ=8w@ J8ɤ?Y~1vM|/]#:O+l^ϗoIrp2k??:kWfYg ʺnG*(hZ>9|P'ǯxZ.\yl$F-4PUq3xoDM;DӭQXח>ǖ>_ q zxO%_>/豞q&/8o zٯyE zGϾx qi0$˩>rI>[s, ]!+(((㜱|`i(:gv9z^[an;/c42%.%aaɎkk?8NŸ 4 !;t($|Fܟ)T6yNHOO-A׻u.]4s̰pHc3$8UrM~|(> }s:[Nm)h/4Ԑ\4=",t:;K+h QDA@76&l–9pN[:DEg?u[Qoǁty[V5]QGh>>hvXGh>>h5fo:Iy|dTNwCH;׌|b]bWw,P~$Xx^v 2mH3!ɬpIM~W MgVJk_>8(((((((((((((((((((B|.um~1{4;Mu,r93ߍ|#o<+-.(2| _4<6~7Wc?]Os#ü}W. :ʽW{9_G$}[}t=e㘲L'gxU Dk]3? ]w \dm| uuIJug׿a/ >ůxAƑ^YPA93kPW|~f C}O>#t8?R+F?sM_RZ>~| c P1mψ0ɑlb$?݌ nFk ?kֹTw#xÚ|K[(4m4K8 @ 2Q_WCBcN G?͸63c13]ܟ2kUg],:<#m4|NP-6feq"1?xAI^Ǚ@ߨ=^~_J HU:BfӼɒK2FO>djTG⯎_[^/jj$c"=Jp2T߅*[)$%y==O/:Rx\K*[K(J:\hmsdUЯ9TF%${f}D 6Q oсM:쎺JSגoKy1}q_<֭KN?5\>y;ق{)|ʝv= 5qBUB7dǐ}0k/]FOeN֧9Vk?KKB,3V.zABXO?ocr8:x&\~'5 ^&iWVqTd&Ml6 IV=`j)yԄhnǒteuKJסHʹ)~8L~,oDz?4ePڝ6 /XW=3XXj_ή_I5 ;r{W>,G-%xKu^~iOAIY{p嗜o;u>$=֙i ֧}pa$Xw|M>xc+(STTWzgxӛcr^ 1)UW,[WfnUYc5)ѭ4^ 2s9: U3Í-3Ş񭿄5 Kkq9"_1B/p%1^ώᷴgb4Ǘȷ \mF^\a_=uI-oe7ʴNd쥤ڬh[S/**tk k[{yʌ28 AgWNb9ӓѧfGZ_ҏ/KngTyU(s<wO|OQOv-bB͞`(B+`?`ό>Iu 1;?Bd+/L9TXJ:X s}/xxѝۍ/mA}=e}5+E>~Ӑh /Ï2WrgqK,3_}OG׉[}< |ϲEO:__l?>,(ռ*H.$Sدis(6iG.O>[' exfߕMy;@M%^.Ӧnx\+~Aك\.t_i0̃'/ؿ Ohuh6yᘙrx[y#`"l0`JN KH#1HFelF'Zϖ=pK}_M~Y/ك¯jfC cR~0Ec qώ+ (JR,O`2:*p]+S ( ( ( ( ( (!kka KH#-~>k .bWCyluʎ0}EaSM\!l<u%5%~n~bcx/㇇]wJOŌq)l3?|*5붚*-؟3)8m~񧄧мW/-owfSXgRk~2EŹ2.^UCXe_5ˉ_~}~)_෎¯]G͹D~9Zo m^6m}Y5> 宯V:r໳eAq]|]*.pY'<K|QzN>*(O _㦗$(цem^ǁ8? 8TtW>+ GMӭ(,2qeJv[O>kQQ]N'> |ۯ*œf6A }+YYhx2?9X0pc5KUe#;~? >'{Ǎ$˗%׎y ?:U|['?a7U0_X1ojkI.*cо6ӟ׋l!C7庸3!ϝ@e+-c^_޵G}/=(|((((((((<jg/^ÿ|uw|1|IukֱRKcydcp;/|4gKk-{5fiF_)grzׄi֒?ŭ*+m5yb 0I~Ur澱*|O/x'|)@aqHHc`(˲,%02k8t#QGz'[u?<^WUbcC66jJ4߆h2zo)8>z{|Yľ֏>j49aelw 5~hb}w/y gч89Y3x ^[~+i?Xާ?|AOoUɇXi>N~T}sOĩ<>GqԵA |;E| F>|*Tq՟$.> irH,@Oj<7oo%&SFIa5馮P?,ncڇὪ >).=X&;,;mg~_>\dF~&$oI9h-c|NZǿwԧ_ ^xC³9Mw]zU3 7J BVl?nUNsRRLVvVzvimae !q(ῂm5[Cb'Clx'5ƨ(HD YJR0&_{ĭWH%.}gG/nqsoJ?i;;O2}l5<}@ͺ6 R21_. @%)w<]ifKJc[ʰO^ @־xo~:N@02F-2^4xh<^ETn*4u;aդҺj0]7P_k;PU1lsqk7i٧u/Z8fzv7[8P<3vku5I^ofl|?]BEׁ?h!x֮(Xάw91Tu\|:]JEW%Ҟ掊Z?C'y?hwd?Qp3T,OZ o఺.5/Lw*jך'_4f#!aJ߲{v^% ~5Ωk%y ga%) Fq/ji-~~/|Jl~ Sii ͭY# #!ץ~`>ފCǯWF|-~d߀7Ƨ.v/ucU=MT\*(u,IcWrQ@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@յ=Fdy,xI5  \cݜ?|R[]U6 [=6mCCJ, ^v.Wc>F'bOHWw6}?xCo~]wH{W鎃h~𥞅#NдkDmean }Mk_AL^߫<_U3:~qivM.&l(O ( -ľKqp-|[Ң+}Ao*(>O"?k9u.ۤ;eeD2ߍ_A+}!4xqSI ʫ^YZE֟[_\\[\$daFV2H k֊_F{9y&28̺Uҋ}.Toc S,+K%u[`ZU({1 +[E|:&/Góü~h/fp4cӖ]/95?Nj/又.lCxcH*B MFt%7kioo#+*Bg[ ̰MbiUIʋnX^/T_JZ;~6mRQkK+WG/ӓ_A sL6HyWdPAp.8P-@vK$\[iEsO~~gx 8f \oguuϐe? 43IZDҴۛ(Wp魏o͟|H_{It2km~mRJMo|: ?[-ƚnѨqMѷr9TVa^S3+/&m^̈Q 澓s[U߯o7ˆ"S/m5示/ zo| >"lu=..-] k~+}v?]ھ@RVF#w $oTW lb߆?.KxzQS"["@%wg}*(<2Jx< ӳWOɤEUEPEPEPEPEPE`^W5x_/>#-%zBN+w)s V2C! K0 >@Wmw >=xoxznuu+ XMc ӗތALմ t/mvL{A'IDlI 51d[4R++ƮeX=ijyW߂,ff~UWɎW ٯfឯuo.ȥdyLRh,FW̴t|/fV* -I_&>t|  ~c ?f5 AWe?~׫i|]&\&fe9$,cC~վo'mLZ 7vwoV]]2Q1ڧRzW[z:.f~Q^-ÿz'Ae*nD";㒣8GqԅHE幦0NQVw—4;]Ѯe͕ ЫߌO/g|7Zn Чx7K !=(Rh+4p-Z:ѯG#!CW%SNkwZ]ZIl-_5uhDmBM :HHĄOB=</ĺ$%Ք¶2QGVp+֠P~u.яE28~_?f|ASEBZ{+ebsc˼NsDkׁ3lN|q^k[O89ĿjK]7[%&~_^]gN;kfg܃.+BN IC<2IVG <6cН5GkxϪgM-$T=Q^EPEPEPEPEPEP-&뿶_tMb-/RF.61aeH`p>|S׼ , mlmu;)׳*[6!>_ݟsd+?7WiV0<]glηq < G$#Ne׃]5_Z{sG?00N_T9-a+iOɿuH>e3eTipRmԋJJrZJ)R榭zj(,O |vjږmxqA =uhsciqj P;4,7;m^0+mf_q=|B $_fK=]NOkZ$j%c7KupNS(+Ut{)٪' iETUQתڏ {V6Q{uMue<}`+p1һk+u&,j}Ury3#yڵ}(9>y7~GW[XMVK#y({ڃ&#O⋝cM:TƙvZg1<`8q_xZ&& | {E@p=2#ⷵ_ >M^/C?Z1a30`d3-iڗijV=#o;xsq50vtV oMxx3ޱEC_485"IqW޵%WSyI# 6'ҿ^:q4k(ZcsP5KXi-IJ0*H?ZE1 Qҗ.G*Thʟً #wSi)soQBSB ]`na,֫KK[ .%G(ªTV(t ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( (' jĖl!kM6/cE+*уI(C+ʱ*\)T7e&ߒZ_(|t|J{/F L>Ls_ xj/o xU˺eϔn-Ԏv!_F |G)#xq xU9WNYwIiQÜJ88~׎R{I'ji=VҺzYrq{/!? L`ZzslǜWw7G{}CK?%p]阢 X(4 qUE8%zo PW.֧qqiZ*4Mh[=ھEWџQ@Q@Q@U=CQRo>=Q,@$u4r^hͻɻYkm"EPEP_~l>4.%iK"_x晨%5Ȧ:+7Ph[?7H*kk^4:+6UB02d9 8CÍOEqx:wnq |+P< )  >7/|?k 4^mu=p\~6N2 ?UW7E+K^z?|RMN򏡣>\஫epri+-үdgo񶖣Vǯxd25L_ii3/Q{U.fҎhQVnow} Q5?\x{Z_@M Q>1&#l~[P_aoL>Ow7J/K~IB4Ep(aou יS/=?-̼8xlDo{zSzzE}Dg<# I;^4Đ"4mKĿ.|coL>qH6Z~}΢ɦ?uؤ_Uܸ9x[=H9YrW\G wGg?8ϧzuwҊaXj5^j:}bK{YHSѕGu1M;QE1|q~|qFf1NO8l Z_M madYg!Q,G2cSQSG4!I#C+ v̸j"*u?hslS;WT; t+|3S-45i4[s_VO ~; &qLbD?wO/60Tc^NkO ߴWk_|}֯ +l}ϖn.vHKt}+3/R:Z[xƔ|WQ97jy~Q^Qoo'D wK2xİFx2zւ9)'jkj_eO>&mQ$VA2Kv汯¾!s|1R/4? IoG66 z{AẔL-S3|Y4ӎ墂kP W漇C ۴Mcp8̀08F+_: d1ۼ~=F K x;熾].%c*V./n?U##޾'/-dӷ+l-Ӫ?]þἾ~BsɾZ8,.iR t%4VI)6gks5O x:?ɎR{fǗ,R? Ѿ0Ϣht+}#vi4Þ>s $p+_VA'%O 钾jZvQ@$~^e|AkZE}١OU4>{>JsG9o 8N|)Xjг4Q+IY?敬}Iᗌ>*kqGZyo@y&?*2G9`O7<h=o0˔M!۟2Al3_Eύ-bŪh)0o̎ETH2I>b6?$#O֍\MJRm|Q^i[ |iWz7<=U&|bPdLs5f8/ sG/>~ּ\nD+iмx&;(?- U$>zti$m%ݶGWucŨkƩ#7 -'Z˒L[vrVLWW|M3@ 01t| %M:h. -5(>`__Uޣ{OΤǭ<'éZL#[8 _y\ό|6ռ%:-OD`1\B㑞a M'0E>;5ׁ5^;u'18o2#aM###S5ˢh~ oN*}BHO'%x!q;5ox.qubK#K֗89+[r.]<PI OR's\SpwtѺ[H2ϊbSkY𮙩}~nr p8&i,tiUhyC(_Gt$g׾%<-#p!15HpUO3eG|$W׈BhB{>y'ˏ07!kbي&n4oSsivr$;D,yUmsI,^c4%Ѿ}S6 iN>|]5Fג W>?m<9 Ll?sgcy9czkHB0U?qVNRmݶm[ (((((+?)yu+?)yu`Og7?"_|;FX多T2y09ʅ#9W~,1o~4˲Giw"Xî7Fde5goL?|9xĞ'y59WF `Qn!u+HY pg {x CJ S+{ E,U2L @2? ]7/bĦO.Uw*$Fd)#(7'bld3r_,ki, K$嫰<a|N_Z(ϫ\xrHbnJ'f g>$g߷?:fgA ic-ԳEs7:ݍ= v~~ܟG액~XFGm/-o.mԎe⺉eT+P'knOյ9Vl:HI%U$kॆ1AeR  _F? <ȑMU,8tە6(>տ'LïhGQu[c )WvH9e᰺tIcS(lffom0=Qw6F޵Ɵ?-uj2xS%ĖX+`)G8#1^}l*\4V~mxxb |=]~c o;_亱ž(}k!k;Da\7o |AvL4ow%n$ bnX-rWoo|+]x'Ѿ%?t/,e`hm+*/B>?iYᛣ)+C2نG]˂evaΚKONkO?Nq}}X`[NqIKmO~_Z/ \I 6^laո`1ǁ|i|~=ܗ^E‰ 2wcw>'|=>-?lxŞ+%ЇO~~9.%ůju 5fmL@݈=5r2,:켽l"ץ*QW z]7{j}m}WSG=M|?gqdxvk-?#a[ƣ-`7GDOZwZ]8#`G >F9VВz/O[Mw3g͎xrL׬5.vmyRB_9jk5x ^~Xei̙P-e-'' K Ya[ğux4 "OF7Bsa~^/4]F, iVH_[XiƜ`ꩴ-!hŷuѹCp8,1eꔱx|UHAʥ5)(pr'UJ/⦶?JAM9Fυe@ekgݨ^d}gNGMq^ rĜk%V܏KYZZgUoz#5fuS:F)%ъbE`t5epH2#fĺoh}ry<($QF.X2zns&H4~iCi,'ֿ[P;ؕih< ~ڿQ_x7᦯SN,mNEa24(lm]͊MH߃xMѴ-2;M>ơ4Aª ,JF[^|h vѾvIuӢ+?E ( (O< u6Kb2Xhjz2~vo8OeWǟ?~m͟|RV= LH!mҲdlb!#@k_'% ';.uU17e"+8F Qj0M'eftcҜKt$BI\sXկ {9<~%-we0^u%VR*s&k o|Ea㸱 mJ.fujI|ųB!=#Ct Yhh=B+; d t#@TzhuVNz- I#""44P @aOU..MnSQ]]roҍߥ\ {~l*GG(\uh~(ML;،W~j·]a7 [WX߳_k%0hW2*\l}l7GZSœ$a W^?a_>8_/ /H/0 3՘<F1_?iς_?~xMק <3DχP ۼ~7Z|J}wOx[DwnGlu;UޱJnl?wͻYjS,2R62ZҢ)E>~DW^+!ӅtMnvuDP4yxG@˳j 4zkmZGQE}W; xc -3zw 3Xg 36*ƭ)8.f٦Lx,2Um.`[]doJA#9v*o_?<5 tui}g~אY_y{ۘЭoogcnBzupl}bt|B~5`s,28nVx8K\s^ 53:% 6t*yh/I~^1j^.h>eeqǒo_?%-ȌO4.|0scmx{U_g^ Y c{OR]W),)$nFNCЃSOZQEQEq:ெ~ľ;&>R|Ҷ34yTT qr]YӃ1uC9F)dx7Cg_f= LUN k4W:{V v'SR&Z>m#7ßZ-=YxZͱx#$VԆV+>8&l֏C<$.'Ԥ97c2~&i1lcN=M>7ωYh^(%"u]6' |3h֕Կ(~/w.:uV8E?ҏɧG(ܟunU\2Pzbt(绺>Aыj>A3;9r: 5KO CLjmeY^`n#cIp$c'{ƶ~6u^_|}W!}4]NY# c\/RgG%%lgs(qΣrOj%N7oܾjW;]oBÃ7UBT xW~ x.Z7/  <7_gڋ /M_ i1pe۾A< ׃Lf?0h뭾잯w?ZȾ7\Ff:٧-kiԜ/N7M?v@ wUG?4q/TqU})B ɻtKc̾ |I*~@u˩jڞ~ Kwu3P"@H +OxW_1mQ5?ppr#CH8ۯ|ox&Y \VALYcy'9#?| + ?~" 3JX]?CW+8(Ui+ EnOD.g +'~cڄdʆvuB5|it hUFA&aR9l '+1ylCeuĕ?ڪr꺿7+W0]mu76ҡIbC# _eM2Q"^ Q]~$-~|_ƚOt]G칱r3@ؒu-O5_ ɣxF4$FznFdo_?fxQ,6W]iǗu1ѫ_]p'Foˇm?&Iiʊ@௞Th?>Ɵ+1wo%竮I"%R~:=߂E,na YY ^j_/dZj ]nmo_/-iZ)hXZ6;nmԟC:BG+Q@Q@Q@Q@Q@Q@Q@Q@V~iS3~ ޓiOjC #-p%W ]~ǿ~[A}aKsmj_Ϛ[2YeBw(w?7kýsNbiwBv6o)uS(?R zov 5u 6Dr\Kp5]dd3;Qz(ibχ4u(<7'vPVIWT+g/oͮ[дs& $W;w#g9%(5/ /^9XĐ0~JiP#odIG>jiޜZ+wd}C߄A:¾-LWiSWVC?7X#[Laa|[X<|D7m"sqqP ¾Ҵ/CiZVl--!X}T+bq?|~d7M]K)^=Oym_ĩ|~mߛa_)ڷtZ[_銱:$݌MI&)23pO?og~ŗچ7DRE}ŧCunS/ЉC|6ХԙT{ ߍcU3lq-o[2q ;?޺1Gz⭀R\YZ332LҿgMӭ?)7_L^AݛKa!R9 NƷ^xSšXkq]n&_/a x$U^l  >M/qFs׌q~>= IuxxgZ]@:!`88f}Zq/֤d.?(< de-|]໔Q=Cwn#$ 7Z/x? <[=jZ h+EL.;.ߔp{jW\ź*ӣ,F9[8 P͜ xu_(T| J mg([[;IF!NQrMp:5W[l?=.?#ΰ䄪CTzS;)o[sЬw@ʊWiZl kDh{|ڃ^ #Ѽ*"bc-<ס|jZw>2j:¯[iuKBWqe`JR1!W*|coyuS◰{_ݏOY[L?~Ӿ$m:|C/kk` 1iǯ޾2׈~$~'ԭ>s>.Pw`{yPm#z~~ g[Zj"|IWP6r Hˌ$`B }OL{<OY_+Ï)aaχ<{1?K&]8f\1 4dj͎qVI>-:W'YY%oG|/~&AgM+H4V}fXȈ. 󐊤 ć1ZEU ?bwW:pծ82[ڍ |K$DEU-WrlO"(Ww{^ߖ$!'bQҿ[> S#e_O elݔl9[>lݔl9[>lݔl9[>lݔl9[>lݕzGl~5Fů{XY}0H}oxg]𦷧T,nê0cᧂ~&xT3Э5hTFˋb9̇CAW>(1| mǍ>xU4xvK6\\睃.lN~?3}aW_Ƃ_m.m#{/B?7u \pw-,:Wv]uk<76Ҡx# ;Z7NWa궒i^&+sG*.mGs&[sJv?<,}qErcp|]'Nh} ^orD]b]KiWjw^O-[? MK0<98FI$~O۟/ho%VV~诡fCȯnm,&ÝE#YOaw_Yz)X᫡ LHdTG(205T$zKzz8sqVXlW\F$ԤyFGzo mHMg2zrGc\ߊ>3|&]?4o,aV *_J:_mB-2)Ͼ?*&O[|J~1PY_irS?k>#8$pӌ9Gk +ۿsZv8Uށsycm"~Ҿ,4[ Ӱ:-"Ǘds_ g um_V-sz8BYO{]L8wKhF ׫&r^ BRV&t{/SϾ,/?wwOtc|Ҷ^Fbq`q^EB K|v"xUGRܤۓ}z+C((+~2M@|/Z^(nkYgBV.Fwj(Cqhׄq[} n8쌌_vhXYiZ]+ 0F `U( 8?zLgH'W[x&;MmwƑm 6s+&Ǩ]i_?fWAKN^lI>y)$*?v?H=n+ϱBS$A=:97 nM5⡦[k^w &餖k$%ЅliqvU&ݥ毢?XC2uꗺJK+z3i~-moiz\[`3;Gjk3|$Ǫ6>՞}[º'tREBpd>NO6T # {:u#+Uև0q6N]OѳEG99o$Qǘo>OOky"$QY<#wSy5;LC.S7} |yHl]O_ C ÌInI4E7JVE5dp0FA`sm/xc<J^ւr{/ӟ2|HWៅ:40|ռk~֮ǩ~U%\$*3_|Y|3s"Kx@o_Dy8dk{ Q|>j>DVW%fL̻1WsJ={=[ (jNZIwI/8?hovM[IK[.뀪?ue9RY5蟤Q@Q@Q@Q@U{nHG$($ߊqM!6,QUnteY\C;U$v`j(C [j뮋mY>Hg?ŏl/{[{>kK{yWkt#~ݗ~[ qf?p(NТ((((((((ٶXv*x.XW7:׿~G~~~?iڴ{-㿝Gȷt^f;K q \[RGe4^?uy Ւ Ӄ{{X|5Rxno3Jʅx$B4p}һUaR pwOT~Wjaq$M;4((((((((lP;V:dwŞ-+L1 zO/+ >\sYU Q曲<;eXgT຿-WgїWV:t0YBy$jK3 hh%Sd2ԝXF#QNcjf^#Dڴw"L,c;\sTҾM;|:Ik ֕l&%5>qϔ>Kׅj#%?3X8=ܢհ_M{_zg>.nl5-_G2ÍW?s'Ï5ѼZh<7Mrp3讬6 V<$n4VRomw ( ( ( ( ()vW^cgp7Vp 2:0!A`w? #&k^m>6OЪǒv ?9e&tVW? -RsgjK$DHum~QEQEQE~qͿo xwO?^#~)N_K畋HW ]xÿ|N/-gLveۘ#nKRT ]~̪FA`Lyw6-ô*ŧRI8Xn\;ӛF\C||<8J<^]牵BZ9A;s})y>~7?gVKc_^-xbO7[QtuPdHd`_'|M4|Cŏ ;Z Oi֓yѺ J*][<:n홪Sfϼٷ'^o_FmzV-wP1Dg >C~fϼSgOA;C տMd-M>0p 7#`+?^oWýO3W ,Pw/ $(X| >뿳o?Oҏ.uVV J42{ui p2GH=86}+wY~_ m xV÷pjk]Kp],Q5bRoۑGa_7a;KڇFh)cVHcN0j˿ٷ'GmW. \a?ݷ]G!7 M~i?ot!l}iz[G)KL+ݤѼ2ShCڳ6}+ w*{h2TΝyxRn:lV19W*5Y|XyZ/Z\|KtXGCXd-uD/tCn#>y>~:cgxeTQ,95|[s@3Vj"D(4:4jf>2B}y>j>>Agy$$9|'=x~otK_)cOtERHT`ZF]Sτu'I5+?zp,[icweJk䐮饎t֑Gi7?#kϵTuv)Ё26C_|_-{~Ė촕u  fkf #HcTHkNaFs:a02mڞ:9Z?%dZ/Mw_v*A5"pk߳V o~l>+4(((?_ïV6^(e޹ Iݔt68 D?O쑫r|6ͧOVOk!Rq:d,UCw_ ˞/];]M+jZ)'׫m~+C-pѵ@20~\s_FWc_xu6_쨇>O|^?goxCá[\ɺ,+2$Sbh>\B>DžKlj6U_[S_<Oß mD sc'}x25q] Ql<-wO悊(:((((+_~Ӟ.V~!h>-={ڭ% J$fW,(ļz}_8?nऺċ-jܵcM0h_l,[ |3|7OL[.fh6"k2pA'+W]-wgo i<5c|@؎O %lzN6#9 i/P7 O|q>Ԓ{hfyX*e$Xfe w:g.~+~Ӿn-M7썠DE-pM~I|M|&Ԧy\|`GHM^:fC3\[pi FUxic?>|zм3a};xz<]~qYȭdҞ~m@դWťk,iwM)Y6cNz:K IVS Wʟ FOnekm7gV6w60=~[ \БXOk57|u۹/!=O!:Z\iI$p $O=ji?g5KoH!x=rGtP1cm߈ ׇ5FèirЧ[ˍ³n=3x^d;iCGj,> @T aD.P` H 0?cw45-m)r]jvv> ݱ&K) +;7VZ'Z*)5gI__I4٦@mJK* @pÂCc W_x_K׾ש35sj$m/pۋ7猒[j?xB<_3ox|{HFsqn>N*#yVg:WQћD19fQ4dT(= _qY_ x]ZYzs28jQùk%'{-Tw?žm!#E3 9u>O|'_ ze>ߧE{ۏ\5™ (|2jſ¯aҾ?PK\` HI&{?VMʤҳj 䓖Km' K N>OO|'8=o.)@x$f) W|:kۄw ƸEڶ &,2H)*`M%ۧf|//TcR)%{=S(+jNDWp4כriGc"?~ ~8ioC% ( ( ( ( ?J?_֥E<\m:XH#*FiJj3R}"S̾[᧹hϑEϚ㌧:z`*dqP$Q"G(TD @aOط5k,*T{Q\XQEQEQEQEQEQEQEQEQEQEW +*|O I֦& pCG2gXv#"**҅H8M]=:8F OH5(i5枧⇀<}/|C^[LgGLom6HF? τ"=WWCw6O̎30 ;>Wzɖ5r _*ǿGxjm-c.DC'eէ)po@Xrc_c&~NӘO W8MJM:O]>~~͗'mDKx컷0'}+ Y^5tetae9z4{J_]8π}O6úSޱ-$;1QEzQ@Q@Q@V'KxK—:牵{ I~``3Տe'JέXRV^lۯ4w? =kNZNw?DqqŽW_l/x51GIde |9Ujoγ?g^u+ˤEzeSqq}zc:W<|K~!s:po5-)Cus׾6uQu/0N˩'x0G>Z`G^8_ZԐxīKKۯs)jNh~𵮉&Em#E8IO$MlU7=g/z#'ޗV;9X_| T‘ƋjUT`((D4((((>~t?KUk+ZŠYEq0,ۘI,`&ؘgq9#{?]ৃyA}mxDaEY@+@f-oHy^K*ßGᎏ/?< i/|auy-᳂9T\2S#u9_OJo~Ծu+ z\Ayg-KP)"rUh_ |=uip..5mhܪ?iAU_5M3Dj6FeOy}{po $yor?o x}bmM4˛a)qlved| 'x5ǃ46ƫ~3wa2gY0Tqj$[r26DjXB~}2((( )K? ~芯9C)@Anw̟ NM4i-zY$ip{&k=-?zF6g#lh,_ac߁u_xa=/Mjn'.]~x7E-m3[s6 wJd-yn+Vʜ 'M#Yh~.m}pVVPC)־b)?r~k\6BYd,׆)숾'׊_ix9IOUMك'<&~^=}G[v4 ~:dML( Q-"*. ywo;||Wᾙ[5ZżL0 K!0Ἵ"5!-%>W?Hku~^6^PgpԘScךgd! _Hh6ŏ? ~9x/Ji2I5+.?"dV#89 7̳EA-GupE+C~Go'_3,x^_3ϕGwsS##~~Ng᎕iZA4QI xy^C\~l]y~!Ҭ.u3~NicNrǕGv ==nGk=ީqa htrr9'_z/ï{㏍>$[ ^"Vi3f*|_ |:/;xb4X7H7n߸䜜ϷIv?5|Ex6c"{|]7ǐ4zUޭnRvXFnˑ$W[?@4h!o2s_/~lnti <ftH#g&oʞmH+Wry`) <7-~>Au;M{L7Zi^ᵖV2bi2GM,kE˯Zj旿Ѭ-(?y <^s]R_,|)'Ew6+;E3DHbi88澻%OZH>u/5 .%V! 쪤+E 8_!j>.sOA~In%Cca밍w7SA_z }_>"+v'i \VҺ7#fy{F>`p?5׶?i #CF˥\Md! *g$VpF}լzƉ BĂhTE|biOh7u=,Zt?*dZ/Mw_v*A5"pk߳V [_袊3 ( ( ( ( xVD.Lg0Gc[tRi5fgV*i4i꟪?:~"Ǟ 4 7eIs#pB8]퍭xsg+;Qq VF)>!_U ⯇~ɑ j;_(<9Rz2x S|wt~WV/+ ]7)[lv:t?Z[a7 n#zt V~]~:~'o=wP/Au [9ů"@H?l_G|JA<ľ|71d|Y䬹e?Fu$RXi<.#/8Oo~I}E5$$HC+)`z})蟦Q@Q@Q@~PCc*~:Z>Ro ү7tP2t?W3#oٟ_ e=+`觕#4&8?h8'i5Y;jn.n&Oʈ 8QKO(ME{S ó[5GOlLƼ|WKVjWh~~D\rKɤXѩw'BIžzwv? oL=eдHn]9N3J~D|\|{~>|TTњuɼ[˃YOYX(q>U|y5-]hLsyAx{W1#wM ]R:ߴpȗw3]Ÿ>\Hߏ>۠r|6B%NQwdw? 8|ֿxcqŢxMG¾'>Vy<!X1>|WoZhn?0 Ƕ޼pO|>=;oiq\A͞9 R:A!xڷck1F1tj^|v<ӧ*>FQ D-~/wH~?S}xo/m/5 in@ w|y2/&PRBOݜ kԡ4`, &ppy7:6d,Dϗ:XKY6'bQ[%-]🎾&9mdl_o?Cw/S֓|HB[OJ3)> O5\+U+ KFI?a٠ÿâw=SVM^(J}ɯ R2< |mu: 3EB;.HO0bA,@xrg}a[隅QcYg*YUvJ9𕬗2ƟQ{c[ 9;?!'ZեT{5ws:P.(a9k<<ސ$QM٧_挾FG"7?WyʺuْkR ~Py5}]B=k?>yq8ߣ[KQjKɅQ]Q@2I#I"@OA^}7⇅>6Ğ*Ӕ\dG>E~vͫ|}|Q= l$xc8>P05ʼnƓPKO>x ֎7_?7+ڟB|]>Eh쒲_$QEQQEQEQEQEQE~3l Go /6y|\GOh sv>|Wshx xuuHb-e볻'|#xo-|*+S7Z )y#ncu,ԔnHʟ|ZAoζyheZ?}Z𗆭?9񵾍cu/\i׺-%DVV˒s}_^-j:^-R+=wR^<`@#@Hs7e}Sᅿ7~$Y]FM$X4~lyEgFۇ8ƿ"?ړi_>Ckطů xFmG:=Cq#J4m 9xE|]o ]xo񷇧*Ri{f%KYLZȢ{ izmΫqigܜrȁLѠ(( )K? }*?x'>(Ҭ<'"cC1 2B&RfOC_6|zҫM}C+G_ZGʼng? |gCWc,wr6vڃo|%&D?V~X/ۯ+0X>3|Tv~(mZEƭd`3;n#iW *+8\>kno2Zum":8 EiZ*sY͚5`6zFEE >(?|WCir印>;UA]D!ivc7~!l~po65܃TALoL[S[6=ʇREpݪ=_Զkw+'ẽ:uk.OYBb.4i9Bo}cZλ{&ufy#FD|'VZI^Qgu/k+,>q,=>rwUʇy22'cI"܍ d\eS9<ѪF%5?V5?WC&-bbןT艇qֱ>񭛎qyNkmuKT: ~}D?ȴ_-i?aoCbUko !&EkuI go?_p͟;}_EWўhQEQEQEQEQEQEWf~ө"--Dg͎++*aVQ^WrUk.mmt-໴3MtHVSv5şؿ̾!Ux k}G F2zם1mi>h{\/e>_oފs>梿2<)I|]!|ڶy6>ܨ76v]'l+|G_į clS{f?,g Q VJG}w qWJtG54_;Q]مQ@Q@Q@TS73EooY\*"I<=hi+ZQ x/S+ִ Ih'{*Oa_!|Z1ɧ>_1 A4= x~2x|3WBp1\睇96`(i~ ՟f#Ʈ!r:?Zkp'Ls/#u7&5ͼt-\Ks">~w: =j[dͣxoy>tq_eែ>xTi> Э4\c}$cׂp3)C/s>!>êqoTZ-)C?k=F|?]#Clt.6ig cTb$GpEF*lW//x;/|+ >0Husc ÐyWQE2_FW׺?;KE+i矖Yf>я@++"M6m5BI;fy7G?F7=EtRi=ȝ8ZJ\>-8k٫χ_t?6YnaA,BF>$>gM_H~ GJ3xsTq$ڷlMj?OM˦_%RSŚ\?B\V;d [ N7p_FNQVm%(5$l?c>__>w\EK3;87+aZG]tx}ᇇ'Å5i{m*sȸۺw%c/5~F'RkkdA^e'傈:1WƉ𖟠xsH4  ZcG+YmUue~dxLҮg:~Ҵyjww٣Wd| Ew;d]gZ_XH9ͥ,AH^@FTq_Q]AEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPE^ s~>V ZԵ`Rs 2X4Q_Sb(<[&$Tk-=s+ds_V>*BzOh[ϝV" .m;p21 ?xw?ž8&}qnv#O yjS{Ljk(&m.4]NH43$ͳ2ÎYtGdA-N[4Nޤ+xF4Kkw?~=UngsJOhEY;;+$믑?3]+UsJO}_k_QočMW\o(My᱐m`_O+}N {Ϳnt.igOYdu3**)^=eqI5v՟jf=/^oMRq 9Ue`A h2X`2 C WAxQujoFߦH25PB?og,OiR<7 dHp;Bz@>> ='U:!V5"D@|W5~B^O9$"|_DgtY?,6~_#B"aVizhD}#d9OT.'?9$ܒWm~ۡs'E:v85{#/Z,vau+EyG+c *ʵG+Dɽ">Ե=7En]cPtX̗70£;G5_(O? uImZi<?ıZgïڣ[a-g9#}oG93}4 Z }PcD«L $I90/2Ѫj/u.LA^ʯ#0+ = uK!N^ 5υ_$u7:gRqUF w}3'ߍotZ 5Y_3PaqaBWzv7:m鷖se)PVRC |k;#bi5oY5Ė=.\w Uo(YL8}8yiȇnp7J晦Xa)<Q#| |op%౏l>"WjUuײ}@g+=x2ڤܹm? '=sH0X>2BjK? ~8w,i^^qmyQ]g΅Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@R2&:('&5ٓ0M!~9O_36zOq߭ݥ[JLwAUgU&EW>qWĿS|E|iiR V|7@Av6U;;|CwF|I*[h.|6$eiGR$1.,qb*m!H֛TAK9 mڨQF?Q(C\RXnI/kYW_| yO;ڷb<9ehO `+yR+TI_zs־O&RM7@na-2:b? ekUtI\N4g9(xd8e 8:|"tkx~-xN[mճ}PBX& P&I;rkjoI`;3KI| kl~O𧈯M˥u&]}w? SM?WJ^+5hB!W91=yQg/}.#`5*3%oAmWnX=nW\iůvF|n}rڴxR'k{}rź?/qb%Z`;Z7m``њK9YRğn~MbOUP&-bbןT艇qֱ>񭛎qyNkmuKT: ~}D?ȴ_-i?aoCbUko !&EkuI go?_p͟;}_EWўhQEQEQEQEQEQEQEQExτм[k\a%MFe Z㯇+o|[}˾zY1_W.'Jĵ'=JXrԏR>];myj4j8ž7F}J2~JմsjeoeUVRA4GŽWTHkDEyze5v+|SD7ܷ3< W`P#zqG9b05Tkt(d(9c_ >1l%d-oa.}|Ǟ7(E| /?ho?ރxsv\\۾ۏ,煅HjǑW'N]q7e<ZiAsM4>L;QƟ-76(-y9Xsš-hVY_nW!ߺz=5?\48!oO;6 iHQp: |N,~U|%{*5 *RG_cƽ+ҥJd~e,JJD}vaEVQEQEQ^OO_ &|_/^woE'BF42 X y+_?}+1R:pԯ|? o1i8VRw.~`/WaԒ\6wvI,ۨ?0'4^aOzl^9×)mc݋́uN*L$(((((((((((((((no?Hu L'j4O iɃ vv( ; WK>=x%iJZþ2i@7ATz#^ah>kMko)q{8#xQ}/al¾XT'*qJ-zWVt º^C:uwS$ƥ*OҿfOi.< g{%<8R;{`H&qȣ|~AڂTd/6^!'tUӴȶW w Gɰ~ f4y}Y?N$!p~bgs_O]]bxWGOܪx_>34BRBEkᎄ7@~;> x3)yό5L֟Ot9.etDurdDu?.ܲ,pT~OR 5rKwemuۧ]u?1 հ}l 8šyVw#'to_]焾h~ z_ʽᢄ!r0G¿ 5GYom9 < }hZ/+e{JM1 J;*Vkqjl*[SKԩI7!@:_?Q WfҥUڄ'W?hⲝtkz߆Jsoy]ƞ1dVy~QHe  ;צ~UxUцXdiP`?M޿|6[C˷¿XIF5ׇ>߲ڇ˲18ʓ$ G_5h-}M[qenE Vdu;mZфRkq/`J !_9h%\$Ο#{Mk3? <ˁ_ZWv"D}3?Xk^2<kOg;UWK_[ qRg' [VTAqvikt0*((((((((( )K? ~tw̟ NJm W/du}sM[MYfU.?m?{kxwBZL`JFup_viY\ Z x;Dž/RM_~?~?[Q֯lؤ#m-k%2Eyr${6VzD!ۯ9kB(u\9!TW>$hm e"]+P}8 RFzm=+ொOF>7m LQoH!H n2Q]ԣைIxź-m?$<8p+_υ/ᾳmSߴ~ŬeKX@ah/9*o?f#~*|+xw\/.|3O-on[[{6$+uCe@ Jz~S[j?/ok#?/k/p覵 G}Z{{9ahT|z DfaD{ 9%rIqaϊ;PX% mk׿W.9sF}ևdžyF3xw,5gKU?4KK =^U߷$PyEqow k 7ڿ;dh6Q-޸ViEgQ*ұ?s*s]òj//Ow?^.ׯq$RHmu'}Pց/ [h~,4M&[HB =rOs[tWVJϩ7FE:VOY?Y=~J(>(((+O}*:ï|>KZ{|.6K"poVrs_U?_st߇/L3Uyѧ't@ax*O?e#w>!5OZn]H3;9J'\_xoksg^dJn.KLI9| O_Y`_Zx~ XULޅTW Ce6Gjtn"iw|a_G-u?ǯ@ fGтesҿ]<~iJ{i_9\xz-؀a0]\&L vfYo>@M_ > ?-@IKkxg#0I$I$$~S?&?R_*V_4QEQEQEQEQEQEQEQEQEQEQEQER 1 dx[_-~kcĺuQyXP)+V  ;VRGiv<)xY|8Y,3 0g8.RvHچzJ.RI]%gQoڗkn= ѵmg?,<ąpy < ˸]mK$ =v7g9GNzW~~=If4&+ybGW#Û5 mfĉ&rh9 Ydr*5SZGU@gx6Zelka%t\0 ڇ|gI} v񯵛˴XR~TE iQV|mL|~k_'_xv3Oso41zcCbUH<1F=]}L;Zӆos>p__< aŔ84SqK˚MEmQOǦhv1YۨL}n*A5k>yg޿9ԛ`ᩨSRI.-Ɵh>0<9}N2G_=̿~-xVRR5 2QYi^J^aG$vB_N.k$׾ 7nzj:JRxhJV]jݫj<(EW'ڜ\371b4_gW$ڮ]?j18դ:'R]bUmbjbkEuWL0YMYOPq]W$5ȻvXS8\5fygŽupa涷d'*|Cƾ&x]3IY±jѩ?/cp= K>9+żG7ydǖtQq:s݃U~SxFuTSyuoF|$dnvMoQE(Twe3.*u $qoC~7}g_^;.-0RF~NkPxk_ Yu/.=%F>j ~9~ȿ ~7u] @JxIVI#g2N|^_'S,iXGW?,Ҏ0jǷV-P<7VPMż)bpFC8 A-~+nal d[YJs셏~| >pM??uiUeϑ' 8 (%^J˯ٟ)ƞ fNW,vZCYA[6tQE}!QEQEQEQEQEQEQEQE])'`ӡқn5ٓ0MA_3t n[v~۵tݫʤt-VN6QCwjNLtϮC0k۵kֽ:Rh暹?;q/lns~Z{N1]e{|FE[F \BWohʧ!oS_cэ]ֹ?>> k?F5sbe|/x L Kq޶:%z*?>dZ/Mw_v*A5"pk߳V и[ϝ>+4(((((((((((((((((+Pjτчn~?[-HU|LN/c|!of,|7w%F4NP|sm.N3^w#i=FHRxW$Zͥ oGo,I濜@·vtP_?fh>I8]V_vv-N4q a?|^qq=/o_$[b?ߴpuW𥮮ڦ+u{$s?@Q@}{k<rx@51/V6rV0yTiN(+&} ˶I5@g*.NN5j(((((((((((+8b?c?AiYog?,6ZK!THodhϺ-I|E r_ܯ::h}.~fxǟk3WnrE?,Q\嘒Iff?Zx;4vh;~xq< j3÷Yc[ec<ğ@qc^մUqPwoV?ӯ8Nfi+JkN2WQfӼ|-k>OV6ʍ򯌲?+_1^zH{76bEEv:}0%K Q`VQUSZ8ӜیvM#eLe\e ZےX"plF@w{!4HI0rj'HSjOcsWZGY5z5\GZ_;⡉yַ< *qSbJVժ"GV*8W?jDV3.#ۧN=~m>taF?ϥam>eR;Rj}O9&=x5fs<#$y?_-W>7η] 4cFisp&XvoOŤv3ZAʹRX@F A k^:)ksb?Ԓ=2}/Ms=vYG9uzY[R2T)ԛm٦ڲkm?o<එ ᧉl+=O h-c_yGQW9T`Jj ngT8du#!Gq^kqun 5x# 3jGOc_$j=ſ ĶՓ}F +64{]g_?63R7[tǂ?LgֲK.xv^^륙ڏ|mi׉:%ӆY)tź W>1;g%"|2{K |@lm8;ISSߌ 3Ě!=OTqٔ??'^ь_y7`ɂ@g'6z+kCF9Nc/R_ޏoչ=:+>% V'`)"o{e=D;OAgl nݦk))##A׽xL7-Vת'c\W% $wח2/m,>)ovoxÒbK i iYVhnQȁYP|oxGozOC-# Ke88 = 7Nc-ΛpL;:،H WeN]pM̰aaҽD!x y'Fkkk?}7{{=Ѽz>Ӕ{#IF~ѫ=Mr'>!F5sb(/0y>a>0y>a>s){>aj}]/k6~A)'ڦN%qֱ>񭛎qqU: b\uKqֹRNno k x̓\J#$f$k̞>ɑh8Z5~~īOOҵ/xBE^5Y7+'QW-(~JM~qY;sF{QE}Q@Q@Q@Q@Q@|+x?o3_ |0.xRyj~Lb 9el3?[W/+[ٺt}]/_ ~S<;wz/m?64qK42.9~ fگχb_KahnEk, ATo|x 폍?฿σ3w_bKĒ>$O|YxB ]GX2zFU5BǾf;_xNڝZiZ ky<1Jd3TOf3|2 c|J#ƾ.׃~;>kF]\ĪTƟ'+D~ IcGh񟅴aXIF @&n+sm/WísQkz 3X>UɄ 0kG1<6ςUi ;[{ʸ-&e 22' xWCao^&M[}qǵn#IG+ mnU=.Ou?`7&`[hbBYݻ@/O$'Y[\^F- 9p|*Lj$;tŞ>_D)_[GE~LxIb:G[/u/^nokI&,8d8rItg${jhOߴ?>|>xŖڔPxb+x6Z+ETyh[82@?DUoxBT񿋼12i뚤6P!+(,pxW<⸥v\Ih9XyDW_OĿ;Lbtַf/ > Ir@ vpÜKS/Y|OQY&vԩ%;Iz+ϳ ƒ*7NJy5{E]>XF0JvV-Ɏ q|k8YЧ:k)~ ƽcya2jdž :ZMwP@VH9aghĚ&ˏ=E !8;W~WGuGk-K$宿y#MUSŧvr:W tIDM|WDˏ>DGk]N$uJNJ:+0c4Nu%NUorSmmō;DYTO*XJݟ5i^[R 65Ċ:{X4}w]IE6[4?W1uunnJC?~5-lmJxK .f9~T|ܜU8p*|b>ykBΖ6ejėiZiR ,', Ox~(KߵhpOn$GpsӜ׉k(JBվo$%b"|p<0'K}ce~:x2_ܭ#Iy,F:z^F֫P[tZ}h|8 V<Ŭ@&89?ۂ$zuKxV.661RqIk!P^~?Cc昹(SXB;M75e\\/ĭRxnYn/,icd*U;XwSK>gq&t_~ ]՚D;fMv[c6 (NkɞGIS?[ڨ:vM(T˼ݒfF\HKT~OlmnWP0x[?LѴԱI0u2*Mխӳ?82G/?k^I%cI{|;o _+^xk»n|I;MIdGw7C_vgکK[XWUB a8`h{r?Ɵq8\Gf5prU4+5y ( d5KWG\Eϋ.ƾyIHIsj'>Qׇ̜x{všj>#{7OⷺfP9t*xVD.Lg0Gc^m\ss|?17Kd^חs6[6r,x⟇?<ȈOI eObkѫ/y xOg}lX/ W0Aqv!g,Kּ9#ߏZ8]^+#[|1C֦S&!r͂W{ E)e=cJ2Z_P[_K|= d{+/~?=sL#2콖c9;W|+|yȯ*==W};;z%inQeWʏj+_Ÿzl0xsXObBcl?cp\⾊idfy&6x<ƄUOWO (((((xٓ0MA_WJ_`?k۠:?rFj-W?mڷ5R:dp+32>-r ٯ26cAM# )hƊTtv[֍ZuA't.oѿTh;7kL|MjkcKģ#B5/֊0Q sƇ(gQQG8X`0z<( `."yQQG8Xc\}^wwOk=$:C^^0>f(s_m? | 5xEIegrXr2P]< ͵-H J&0znIF1Mɾ-O>>~? > ]<дs7+g`*|i Gy a.4prLq'8*8 hOj~d>=5D@ 0 L၏<[/'E f;TT`9#w|g!.<9u6`UZFٷ98#=Z?ট |1>]v4/,&rHAt1NB!4c 1H[ccjۇG^} עvH xCiap>j?}8=Q77;zŏc?o(xvoukwOX0Ff_?Q=c>"'xU4VP%*lm*o?/c̿ /_|Amns'ؾj97ơx\k&Kϋ>3o|K;e=GOP[I 1fCIGlZ#Kj6q]ڻ!RȁАypkJ>Z_9SVNjpjNzLDi5-K$hLh0eW 6W$@\DwM>Xؠv\L3H[-k(((((((((((((((((((((_- )j2ZP]=B?O"?#sU_;[Sy:GtXԱ*7s9 vϝ€<#e$6>7+,XF{E\4DEIRyjX$mZnCQ'<}1+Ol)d9˰I sdM;Gē.\?}"W>ws&8z|hRkGQ-H\nccm&3=tj:{Uө8FQOIh4|f FHT]ލPoݔZ{w߇~xC&8 @-601i> > X,ij$rhʙap=8t_]㸼QmvOkp[l {{Ea@O ӊډY-$O__e{vn>3*?|~o{4Myյ#xc1mɍSs9yx8AoOg-ofX1w]8E/mB !A&m׼kKA{ jDhیGHӥuWRj+|g;t:Tbw+Is%iZjg)[O׼h Q۴~b2tpz#MvWW?n"{/aq8'1!4+kA_$8;NTץS8Fq咺9q>2b n&~]k~:~>(g=sP/Au [9ů"@L+޾~ h;A<xߋÿsl<'2t#7p}~Wc|;^.Ux'79Zt*;OXW:I IVS S cuh[K>b =Los@Rrv3_n&~|[J?LW2z8?vUJm!R\\{|DVz).kEWyQEUK WFuK+MKNqku x*ЊE&VeBr j^?b=4iqe#%{wOy_?lO_wÿcšg=JdQG8Lwq9̍ҿ_ï|N4߆i/Gc|n0ѿHApϳS_?]C* /i1Sw'VЫ⏀+#~~-֯[ g&5<}V7D~V K\ZzqIT%c=0-6eppvz,x'~*m<$6ֶ-XBNS!FIWџ xb  Vy.px8 5f(eTJu$K^:xSV3q1J:u$f ݫx?1VKMFkB(zW:Fh:$ZvaoYGb03{O&_/G3\Kl|S?&[C{%Iz/(\yZyJO=h(r2EFV\" 4Zg<#2wX B2-1gx_HJS0E|/ %Z|CwBlدLTp@xpCI|M}w/yk:9y+x ^[~+i?XjeW ?z+Wſ&U,[Q#2B  >' :oj0u=..-HM5t*dѻEx~yE_i"ﮤEPII׉|]|љ_žC}ʯ+ooď2o߳wلв')o<0s"mi@|)g???੖~ÿ5w?j:q592D8Xb#yj)l]| <@?q7?"<(bY>=ᐮ͞V;nˎHa>#GX~ڟ >Al߅l$ԵM7Gwq!i A8jߋU;|I¿7tznX > i׾ lOWƏw#L{{ e 1I[HJ+BYw4uK$ߎg/kZ˪,v-XnGʂY0 o^)?>24CKmϊ~! r=#ٙfX l?08n1_|~<[?7^WU'2lQG2s< DQu ٛ~~-xsljI]d%[ xci[pP|?^f ʚ[x ؚ_6˫jpvS˶ $),ҝv'p U&m?I>=?Iz^ke"[ITbG˟i"Ǩf}}wB#9t$`Nr:Y\zkuss0qgߪ}OUju^rk|.u#sq_˚Q|̩O)ׇ57qʹ5eVW4WҬ*Ыh9XTUsh:*~2fLeG0?ZDRQ_Y5528IdhTK۸IIn*{Y SH;0GQG^s[[p_>BOCD᳾V=8W?՚#ҜR9hhG\HHe^<^)"^J nD1]#گG +B ʥw._Y[@}+4-]Aim@bzZ(UخaTG5k$/nLE7_qMwSG1K#d'Ҵ{>Cc$ZfejNmm O_S˹yVzGKW~-#1֗:ۍԳ@Z#oi~epmC>(Uk ( ( ( ( ( ( ӧd)4$R0UA5şؿ@«n殛#7ؤq1@^=# Nm5s縋rRRiG5T ~x|siۑ;.ܶO Ax7^-|Jvld0q#g|/͡xDtzsJ# 27)zd|4;y2f,/^ca8V008]+}aGh/'Ҋ+⟀l>3ס' g1a7C|c6)Zu~üKEϚ;5%ë+s ( Og¿z3hkm4аC/$A0y{h)ubўIf9>2̾Uҋi=SsUgR~>%Z|2okBh[c8_g [z$ F}8RI菵p7#_|v>Un4׍˛~rI!o&叟.4?ܟ|w?|qCQ֒]UTw>ߢ_c{ Qn|iK=4 BȆQqs-7E,, ב5}ރb\ǒG,A<ع^Uew>{ .8 z/KtQE}QE])'`ӡ#$imڮ6h`2qdҿ5k=nf%W ؖbxI5Ye"Ko:IrA:?2GҞ? } vk[KR2U#͏Æn5Ź>ExB,g ĽHW|T1ߋZ_h*ҟiyǪ6Q .ᄔCc_!ԭ/e.H0<5NjJIYi?i\c'\=Oh Om < R1\7<#qr\7rno?>|Lm>+|8E߀ww>&(z 2$͟__LE;6|#g/SNvIrw4;'~V`Wkk3/^-:OTBclșNF}-aiowz:u)WYI{^O>;vmHb/<7D\(`9Ldk/C>x?݇~x7bMη=Yg쪽2j6z~MEh>hG^W9^Eh>hGG8G*=:<+Հeid]X<}2wMg]hPغ ymPN^iS?j/C{yoi%vW ? ޿pa}wmiw}Ƴx~P/ip*̇ 5(NYml\ѫ9wxjx8¤=/&RH5R(Q~)=k! fUEIJӯ}zl2ƽtA5~>"xWMm|3wYܵaayfG4G%@9Pz\­f^֥ygҴ|h򏡯ۙY>CG} ,QJ򏡣>Pw}(ϥhGCG({s|k¾Sd u=*XLQ?V:28ih>9Ih7o-RMkf\<Ǥn@NGO#ևKGc998خs,~=|B1?Qxg^lxcFیt'ˎ䆯J {ӟ޳/M?QUf1>7P1 5QXnx~҇J7X?)i}?_ xÖ:Ht,?Y[,x08gkE&"MG:}ZǦKI܀<Ǔ⾘x(((((*)k ,@bO&>9>ֿzx#_Nмe]kPe.Tu;QuU;>+. %_ xcŷ4_{JO,$o!;9rk˾l"/ׁ,hGϜHXͤM#|!?WhoѦ~V߳|15 OO k{GK{k9PhW$\)8\6|a։i>`柩xrMFQ@pcdP!h+Yo|sstu7q[J-+4\ܓ#8Z,+xh_?ᮕ{Hz>Ijk4ID"yFRE|_OG~!{&sjWa7K[(oGGπ^  g⿉ 7zR^[uLG#dn0j~$|KGwk߄zt= sIվMtbH%ddfXn!1ʾ:?lKm.3OկQ7r-QR; d)$pH QEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQECUsZjFk%BHnaJIᕔAj~O_'ŭO=7Y)OԴ$>pǫ~~|gg=7+O|@2#c lQ^6GP d_7?g⧂4EQzfYlp,8ٕ>^kpt>hPS VR%kWVfR)"Yڣa\χo"?f~S8=;T#Z:o|UiڄܹwL.ݰ+99C"6u# :;%l#)EvӹMBuR^3,${s$cϛDLц9j:WF}.*Ο)Sia,/TsƁh۟YIaCӥ%FW.gFv4̤D9$$1ʘqCU J]G[+|*dUF}OjT%RvL6 SZv7E+FHvc@$+ka'Oh6S~鉿kuOxmO_LFw8Aw_]ā_]CҝW{?O? ùaiԫ>Q_6۲MB|o4۟6;05mJJϓÃq#?cxjo{65Z5l%ߕ<|YT &5((Dࢊ((((((((Oڏv߄|}+dbA(+ulm9v~.>'%Z}.'׶"9l}yjο/5|i׶m/{HײN89D'֍hqF<52ZJx7RntQEzaEPEPEP^h# +O{wVW43)> m|u׮|)It)5Y9\q_WdLv{5}<#ur1S(=_I%GoW_?5ajej0/@G\DA#'2_~$xľMnIc;$CupzVWğ-%j^imţU:`8gOOnVq|iH Ǘy]?_?ϛeW_~~W?9u/'.)s!Ljƽ:?x$Wi[/lr Y:W轝힣_v7--G*AYno;]W?o8VͰ xh3g/}|;+AgɎ'p}y .H˟_?/G5z~Dt3I)?#3غ鸨W_"[doxNloGWcJs̄ m`x<+Chu% ]]jv-[G,M.a,3*He#AZb8T)$r.u#x 1_-{P36|B{θ鷠[)?gKISS~ {i]Gy,HJW{,9`c nx=zT^g:|SyٻŰ+.d3jk jzW<2ܯNMC+x=/u|1;k+~᫖<Dk[=d[}IHnl巸+yPHԌ Ab#W󹯳á>$a׆-5c>_di: a uSw|B.|x"ڇ=OlokԄ|nzq~]^F֍Ra'.h5ch:{iљ..XVgbM~}b-^ᯃz_,DzOI K0TҺ:rҍ/Ƶjt}_xs3-ח+k2O,{(' ~q|Mux/~|W;~g˦ ~^)~1ȒRj_= 8ov^bԩMII|G_F?ٯ a'KQ&i75𴓦 Z(;3O,YkkŵC{j!i$ 3(u'5w^YQo鱜jY7'tNO9xgt?H5Cci@>U!:bχ&`_+[hHGq~8_^XWݔ$vҵ~vg^#%q58ZPޯ4rtKJqq|Q^iuꆆ9\m?>{iW^<]WډYnDA<&QG\`69;B ,V,Q۬ז7715ͨnb7~2=\\w_$dvWuOC&9\5 ~ͨPy#*^뙸e ̡ }~n2\|]|7 u;/*t yj,V3Bs^7¿ȵğ^%1 mxjKx[;Xdr+_?y.j o< m o`4UC76JČ $Ը{:ƴ0ɧ)lho~dAøyB+:h˚J اII]{$?M;YYa6Ȍ2pA?/ؿe 4((((+2xVoSº uz~neXiji,q!W,dy"">oi~x{gIxJx_aya۵Z[#&6X~mλ~߄o| xe<[K4(]fRۮ.#]%7~Va,|ZTsq"4I0!O̪@&ψ߶_G' Du4YMŊM@n ZxPqqiE|=9>3~Ӻ4l<_B]o 7Vf!ġ/ 8o?gsdsR-KR4>&^$:5%leӑ*@$2xg1x rp#sL=\p N`|̗O?sʙliIRk5ouJFEXQ?x+QV6{~&OᏇ!ٹOxmm`4ҰT@f'__W|fV iڮ"iӬkdJ 3䐠nQڵ\k %OZ|QWt<>dE ݺ09f!>ˆxnXVމv_6x]( ,Ma&[ջh3O- 7)Cis1Ѳw }~R+?w {ċIs5nC( 1'ʟi/>#|έ2jv\iw7nsK9bi==td&7-es-gr-2*{q}aS\m3𷃬e o0FgXAO]MQEQEQEQEQEQEQ_ I-+.?"}im]=6r.$Ieg V$w)P\7_06nN?oAr8LL9#QZ3"oxaAw,88vpAE^'C>$6Uw];Bsc9Nj|/+wt>}:Mg? ʏe ?+^#3sX|j\;0սy_? + <~5_@~/Aɡkk B!4nC1)8 w-tPQE}XC |0a/S|^zͿP}Cƿ %h;g(Xs{}V_/YAp)Kwo^x~7jѯq$60@ZX&7Lgk ǩA*z1?KEO{+QL((((㝤ךt/l>"ҕc F6̽8q8VZ?js\C>8L%1HM*O&XsYx˩ I{QL8XJ8`x s2>&:>|k\'^>qWIӹ;5I}O~? 45#2N6? /_Ɂ|=nAɐ g./0qEbs_3xkdY#㟇u?<.{|[VU*L0?w^|sf\38^=*GoytW 9b#jּuUe65+1_!|7|+ Lh&^VpC  A' oػڤxJS6R쐳'o*bhy Kƿ K^"4 y\&?src^_[F,E5:rRGk㲼\RV2N-z|0x~/SwD(աͭ OvL?]s-|U =X&ejOKOſFRQx:;Cm=WgO#ɟs[:~|I3嚼VGJ|":qZee~:__?챩&D''Qլ<bԭC[}ȑA''Q|SIui5' ~%1Tחs2+㗌;w޸ ~e/+xRZfvׂ~ybxK^liM|y Cۗib0Òm~]:},q4:(%&(r|4q|YPX[?O\ aYMx_k@R_嘤ڍIcl0eqU(`2#kGbe߳ӼEiĨ6|CjAVyh s$JJĻ/qbsfgÿO|Bo \Mks=Fz5"K)U JzW~Ͽ 47oGO3TK{GpG9hNx={ܬu Sp_w4S{_c0UwFoIKM~j?԰y)SQ0xN5* {+k:ma(UV/jF;zMjLl1-?ͤ/hJy9T`q[/_GRZis2WnF57[1RRxYlhg~\nu\=<%wB+х9Qj46֗+e+$5~s&xo'|Atn<%6"6fgnabJI)!:Xizkؿ2TyOUopAEcvtZ]Ԯɫ|-ŴK2I-*+Qwjڎ1AmXdpvhy).M7>P:W&.ukn.@:*ʠWq5UJ.X{?OElaR8ӺT"{Z5KLBP{i1{o su;K 6{,"R4'_|^z?mI-d–rlcA7< -j:q+>k?9^f x­UH6Vm-m׽/4O-xMͣvVEAź|뗐r _?೟(Ͷ%{xŭڥHWkң^XA_a'UT&OٟQEjzEPEPX"?|Cznijۣ06YPXEc k>,xkczi<{4kQ2F\\%2MϟΑN׎[5m=RI6 tKey3o!Pq)`~#f ǯnm״+{`Y?$W@Oq+ܿ>|16^{}WB4X,mgV- %f)Ѧjo6YZIK{ͣS~ĉVVh&𖡠xGA_aӵ+TI#pUE)$ՙtN֩CsMcO))rI+ޢzm<\]xGg/?|D-ƷKll HUf U|wsA_OjRM=J`pnx-@d_IN,ʍe SO$W}Yk_Zeŷ-nMtHPx/_?mtyS{V4 ۓN9i_"­P7gBt2r>GrM"#lsO aMg mte%=vijI'r̓ _??x>T֪V}vmt?Q;}F'_ smhWPL㨒+6 z?C*-__]m+_:iygڿ{\_A_@O:W羃ɽ'4n<9EN;2ĤIY@lvh|bxD %,'HZ~k_Nn.dtku+cRGQ"7.ʊ\UP 3"4Ȣf֣w#3XbZ+xpoՓW`kն{տ-־B_6^5 3M,V7NRcv~󪌂+ks5kFE6,pv&5lg>@<5}q_5;}K̏$VZ;%9l)voB5|*oE8YcE:!ʣ 8P9=I'Eƚi|ycrz|ZגKU}qUBqӵRӮ,o P2H0U;U双sJE_3{ #zV!n 7I4}F sWJI1(r+yC+FAz'UuG: k70IymuMLnv1*\N܂F ~PP;_򵈞\xZ|R7)Te[m _/_.m [8]2}7Io__h:IT !Qg]ӯ+MIS߭3OE_5ώ4'fGr_a%Ua]6B4S\FqmiZ`H6MR85C?DŽkOXxc⇇m 9XPynX۾%l6;0ǚ.aqx0-SNϸ [nf̒#TP2XrM~ZxsVF\Em췟kC6-rbMC[/?g &mE.yǮPg'0- ?eH7g/yGFx'Fn-׭t2#xywr S+e%-춹gk-~1},OFP{מy|.|)gwWKC%XׇY|n'lx>#χ6l9k-\#(Qꌥm ?Gu ᛉں͋F/԰i΋6::u)ſ7/m;?/.KQu_fr Y̺&l2Ha7$ KbV*K#I#^to[w?R(؞ɶѼlxM/)VOdƾVW]]eYNACNy~&[azuQEl{EPEPEPX>%'{ Ok,߅+1_"?*-;~M 76rEIbC+*A8|}G.|ƕK:F R=S,sA"H2AGz"oMT/?3V=^V6U}G>↣/oC(-OOzE:$&gd<-H~Q@-mm{++x-,XG(ª( (((A__OOxLޏkpҷ]H'ڿ/#>0~~֭Sp]drH9׍LI^Ohd_~^U0t(CQR['ݭڶh/qx/[r!ݚ-ڰ2vBy"8ӭ>Ѿ l'kk:v6Lhda6<`~ ?!}m[IB:BMbBQWA"/\ў>Sb7,?y@A5<Bi o=&ӟlW#mFK麷uL1۳e8G#O^$ |# Fl<~w &lʅyN\ӍZ~ο7,bGMr1Z)^#oxq-b 0=9VtGqna L-._,Ƽ~9YCă8Kᇆ幋PZA=v]}A>C+eRqO*v)j}u\G^XJ05)Μc~M'{PI c!'8WN?5/'o֭qcZIi22dG+#x?ڳĺr'5@m`5PfXyi6#E42G1T]')$I[;%h+(ɮYQaJQ^ҕ)TE)J-ŸJ5x^.׌>Ɵ< je@D)sXW q]g?ֿ:\¼bg)~QdJXzj䋴RJ)eս_viZ' xEOk6*r2?2xZ|LĿxAt i3^5ڬ1\3p~E띸suWo>ͫx _vL?xOS_aj4!{ꢿW_y ye hU)ݶnz$ڟ  /~-?| [|3OHv3lq_o:5zjMVM`Iu/݌"rybtU{qPT.EM׊8Ra,ޱn^Et3|Qʴ$|x"EoiLz_G[PT.h;382q+gy4|C?bwNG->¨Ckg]c|obP1 5οڃ ?jNWGߧӁ>yvaˇңSnvzu?j+17~ԯٖ +P]eŵ3Bs_{J:_Gm!~'jh:]>`0p$@N a?ŐE}i.XT5k3𯀼q/oth>A=GWcT{ k7_^蘓W{?:qд02Lq{?_7v<mgÚ#YܨdjՔ6y~|6K/ƍ_2suWd98xqLO߰MhmzԂm 敤sWYXoGm 2Vbog.,c6r"'l&J('C]'}'Ol'<&m1\/>+ UC~oLIVTd,F j^ ~Z<5G4bLkL^,sw#'¾:Q2X\W$Ȥ+{MvP'x  🆮cӣJ u d( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( +_/|)5/+jp^B߱aS6oN1!Ls|b>$*b@$+𿌼!k/^~otMN3f\57l~wojc$]\Ml儑1R<Ek? f(}{Yf<{tseo(NT>c8I _Z;.y|Ie:vw !P{~Uf>t%+M~KS@zFx"4 qUE8zWF|Wks&6X%V4nOUmu$ߞ>lzz'Ŀ@M m5:;PTA?m?WYQjO ]x[ESi-&QbYch 䲝˹ ~t?VDii );0 ƿ'7"OYW袽jIcu׿?$xHaR%վP0";9OJ,Ei߻?8խc%Y^we亍]EU 1r:LX5UHGRR]b^ߝZXerH+hE* p#2IKR)\/h;yxkscr0<."̧H˟j'ZKY-аք ;Q$8eC}gxe->"xOpĩe%(e'ܬ'{&#/?|&%PḲsFO_?f>ލ՚umݦqkUGPrT;*+W oFhv3x~qgpXk4!J7+un+}Rˡ,#īו$-{|-_/{|Gx?_GMFnel1Qyi?|]~;sWլc|7t=+(jQ|o-Ex[[ndSnZٳ00־W72ʵKI=We~lqq|OE`qlU}S%bъ+w٫Vh k$P避Xx.s'͜.1_ 0|:[j al:0zG5e\C7ZxI/_?#[ ?'JI/ҵ7QE{E?;OKߍʍIR;\O35ɍtJQ^ֿ#xc"Z{Eyɥ}e{}e:wkaaoU( fb:_ࠞ5|yy$W'6;@<\q_>Zcu?ޟ|'2IAh`8Pr7J%,(c ׇl#Ue$a^ |giZLpwx#wڵNuI3߲P|D|WviI'>ZC.?v1Ï~Md<5~Ã;ĻasJy݉Az YysM)k'#^=w81ךBVw@+?/wx={813N\;ҹ/ ᯉ5jdngrrPvVC|Dּ9MKǏ_隥g!Դֆ2zZo=s|x-Gw._9!CT+!a/d%NzzOn8,գ/ƕ)-)cpUHtp>eˢ ]A阔;J4 o37`x&I?xK]~;qebs쑀Dh%۠kdxQP薽Ev~\J:PqjK[G.u]ZMu{-?yH#94;߈? O_ [TGO6tcq*k> xi_豙S۾t\\#y;xls_!#xVZԖGΉq=;IVT`J[[V|k=4RQ惴WvWN.3Oi/T).lttYj{Btg( z_um6Wp{/xUxH^[*=F - e[pIP@4/Lu[ᕆyAAWJ{==o|;<7~J{_F(PmyJ򽫇ۙ_ҏ/Z^WW/G/+ڏ+ڎ`w%xw?Ś<E>9=y>5|UKçHe Iq*GI=Yi#z d/_WWz8,־Gxpp9-ci*Y|[:w7aw b]FY!+3ڒbPqN+x%x$@WR2#_̟ŏٛΧ +|clMI 88QK>iߓؘ">2YHyca$1:'ivs[b8F9*OWZ?z+_ً Ri~džnn"C~ Aۘ'tdYS8?W ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( r?Zs^K4|RfռC:t^/K9Onyv^ᦵcWKCJuP*ʆgOD6SG3 {vrJ KX|N& nsz=O_d9}RoK|MoKlE,QXĶ|R!77~-1_:u[iub8- J:**_'[k^I?+BMs;yܑ&|1B1Ú|snGTԤH#AOd|gG3Y!K/Xr!8F*mex ȻԌA;W~cղг@;{v`C4q8$g8 |3sxI\iG}*Nr#k]gƺxW^?ȗWՑ7PFOQ_~א|Qސ}!b=͖"orp`gIao'#WcrpdޮKӬ_Q:fZjZr&Yb}UGүW喧xTŞdHai`*;}Wko|FO$,|(}~cyִE˒嗟G$W97,y{4}QEz!EPEPEPEP+/G|5oi%'n :7G8u!b+C~o@o 7qv ^.EyEǤC[5_3^W8JR]_Rsӗ{VȾ;~_ 6Xm-ث]_xmЪIeBp ēךOA}LIz| :{Ø{K&9xb[ *\%:q~8i?P/)M#վ^q+¶ UJTr=~#|2mwkxT藖4g딀t"ʊ ¸JuUjufދox]ҧǑS[DHXUHNsQEQEQEQEQE|AʿicT E4XUӳn'ny4dC_+gֵooc_xCfnsbXw 08ꢊӌդ6*7jOUg~G 8.<~tuM kwQ a"aHza)A_?gF/NRQa_ ?{˿9!rrv+hF *XUlMGVܤm < ݟKGk F Hp pA{_Y oat$xgTp" լo18 [E'z_|7ᗉRXCRvs|͔Y # ͧ;x)y,<'|GA*I1^gJQ_(eeগ2nkD(NVv[q=sǍ%6>ZWe%z[4<2UYSt;pHYg|ǪCýN [IdBv+'̙n[|<>3)M]Y4>oEkNvk)$4~$읬gy^+Q6ZΘo4{o0s(b: d{֗} vBg(|%IΜfFf^+V} Q5w#ۙWCG} nf^«^i֚qcik}e:1$r)꬧[[ynfpCyP9$½& _ ?N%կ(g,9 oBJעEeTo vmr{$gKTbgt4(IWXՁG89 `85|AixL?@/eG,,)-2RylHr.u~56UCHX:5+&zK15I!Cd}ށ4*v#9\F ;c^_#P䶶~c,ʘUHdVdgSpn36*m? X~$hl|9ijS_d,O%f0|0;8# 2[_i(veLa3IaOqNJI95mVcK58WEp0p7}=25\ωtOOd{ֵ In8T<@{~5i~+}Ο(Q}I{$p Ei  !*F7-9r׾ֺoƵz5\ºCZfo4m> ,$/$oatzF7,w'QtcEea-t*C\=upn\vX֮z6>,/3?Nߐ%}n==*wx>'IKk+vߙ9N=kNvyf1ӕOf7n?۹q\H̼o+A\Nujǜ}bRC;a9A'+w`/u0/۴NKKW1APHg?.X/m-~[s{nz@Y$rU.p q֜ `O Qlu .H6+W|#}*+/xMmb,D0˞#e+H;3kƭIliFG#:txm_ a,Cbp&68_HoC 1Pj?d [_5)u/;!\byfDO|I?M-Awvqx|m%R>{4O4-7+붺ɼW1 晥(;[]c1#p}ϋzR5uXXh׸S8uBÑ:Z֗YjmleUe`AZGa՛_5i5@tzLNo>wDށ?yEyggK]7¯u7ȯK]W?]Ꮛ>sUhC;藠f^?R 9s_|yu'3k#jciV/^sc28П-XTg]EQ]gׅQ@Q@Q@Q@Q@Q@Q@Q@Q@m cCOȻJ7KEyAn=+rOťꚭYws,PxX;@NoOX+o5LaA$31n٠(/?] o^>灣֮ZJ:ȌȠ:'2yP|/v3y'NykwyMUShY ]{4M7(jρ+*}C&&if($3$rH.6 أX~_oJ-M_Wy0K#A=v)a~QEx^Q|5x7ɲLl GbUPÐy>Yj_?do蚤 n#f2mnN,C3_Pɜ#8]3~1xWG~/CW? j^ZZK0$Nj^١xd=W3V8@?n@Cc>x7ƿaj-?J;3nUȒ| p\~=|6[p7<%:,~?9]QFxxQf<JT`[5?arqgMI%5%>Rz%6g~hΪ4zqF-o"p?xC~ kHK(er` G ;Xx:7:PZVR*oJ߈wtJ4~FpI&opŴz?(5g-Uq<ҭՌZUae(Rz^t2~2 ⿁v.-/JĒNy䓞{cq ǿn8CXzZK[[`f9p`5r_y!ڮ!ɭ/5Ko<*3il/u?Q> Ry1NBk|S?[ j5tMt+,'\*H8cM! ^^Pǎy!&r:?lFk a'nKw%zׄkZ& 헁4֭ &3)=ؓ|3|yߋ%/7o|&{iZ}ԋ̯" A,@ SsW"":uFPOv=|>'5_ G Su̷^z<1tZUYƝ)?f$JԢԔi^j#}#ஈ?LsYdDdb5GU|j֣yW k9'{5zxԲz%ly$*45 Pir>]{Of5MXǟl5藲$pG`I$w-djV WLNеmȆXHaku?i'I}iXNDO袊((((((((((((((((((((((((((((((tU1YZ4DrJ.f&C!`0YYM/AZ~Ѿ!{ZG;`cPUP t\?h_,*MGD𽨓1 L&W:AQ_m7ދ)Q}.vI[ݛ88̞QsO eI+_Il޺|&'r]V{u}$B34U~Uڠ(d9JVwW ^}^YWʈkd|3MNS9Zjߦ#B[[3=Gr>&յo7M{UGÚd~lwmܥs3tBNǷՄtMy>euMᏍ"ھVax2otmsX~혃"Idx Q2u(\~-iuu%{ܪyn  98 W%a\~ayg/Ib BLw>,]'.f_r?^ELG*>IgR)5'ec.[nd×>;OxÚMy.•hP9=x?4o]~Ė:׎gLMu^[v*BA+mzWCW|aiWVțm۾]:`=Nw[=ZX9ÖT/qT4(kזo<7EE=]K,WtWU Y{dl֧EK:y6 :4 =CI#ǵ}5C㿅%m5m D$kyE Twm?e~]_{o.-C43HfٻZB0޻?c#A֟SV򓜝ۍص'#3܉^1a%B6qq"fy19vWDP}j[ bRʖlpUz\M¿xZU<-=h}s$.&I! zS:CY[ Ŏ2u F58R䔔eP5QWY^sk7\H'nn׭d_ >o'_=W^^5rCE$n6zkt_z7tk7h.FDy6MAjP<ctӾIMo{Y6Lwwq _:X}UrTdc w,~ x~/ڬ#nir;wKg:VU2zO2s[h11cA1߭fxW^&NºٺZ oǒx=\Rq% ٯ,~)Jb-7oݳWY x~OԵUUmn'EPuG#>^N:u_k_|BF]g@zdkh*!Xr6q]ߌ<Ş4oGGB{ӅIƱT _jAmH|grt[G?9XԱ. Ǧ4ZJ+zٮݿC pcBgMO]Ԝ⓷廋25{&c[pnob8C`:2WQ}J&z>dY\q+ Ƥe V2IlyXB 8IV~ g\ʉLM"[v? gȯ~> x_|YŖ" cGI9zɉR[ mU jA>|+5k9j|#- 7ppDr6z+ԒP qo 9eaW|X>\ZGx܌bU@V%xȐ#yJ,9l2@\Ҫu#|/x2^W<ͷE|wHZw7 I&'s xY=s־FhU4fM`3\2`}W;0+S ( ( ( ( ( ( (?_k.O?bjR8Y(%cLw8N1ZT߁k[[jsβȱy\/#rd}3k(—kvZufݞXr 1[$BR@7~Uol*Q qq(8uff =M|1[t<7O>,d%5YacjN1Ӣ8|F*y82QyK1Zuh>9<%& vBӗ!^y[eoT_5~#Us5kE:E l$)|a|^7=[M 8kNce~A־W<Ix7A,F ̳)$9i݉\?8G|7Vض5oZ4,y&y3g\. ~H~ݟdߴO37F𾥦[^?<7dcnv?T-7N|?}Z{Y^@Cq H vѥgexP\Vs{ݒ?:xs^uCWf''ʃ|Ԯ~/|?6k_ : ?X-՜hn+kڧ #,1g셪AMm%72Y:'$e)2vȀk_^''[hk<7=8=wyTvz񗄘V[5?g^>JN5ιd{Mςz6=&ˉ59^q޾zou/]xJvտr2vʯ쐖kW#QbU9\!h?|CN\M6?x?ۇwx~Oٮ;~ NkҼo:y4>YO[eozoPc_U':k:mnulZzy6_XO#5w?,uգk' D Q#"澯yZIH(<iЌd:M^*v{;wSF4$qwcu$~>Z_cAsb">8)%ksH# K:HAoF>9O_g{< y.5}_ݎ>eOYjrMwl*ka ڿU_o>1#ĞwZ/LFIAm)}?7?az;ٲG}O X|w wͬ'w'/$ ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( u4<2.,fer?JլcOM[z#KI-ي:'d>$%s\]_򜴒Ȯ}ͭWmĩme-ħƹ<ggQoiҥޑtqcG_Zͼr@q$֣}6 }'UO 4v߁Wơr#?$@'@ VFYYY]NX`jE_μ?(cN)Eh NҪu:KfS29VUlA:=VUlB02T:P: L#juOƧXj[9Tƽ5<[8'@fw9f$&sMviFE<?޹ӥoM\MRqcڮG:4ѥ6Zćڭ>4q6rAVVگ$^m jboJf8sZ[~:R%cy$M//reW\|oxOӔM>'[ˑ4;l{iUa/X8SS `>(S_L(Š(((((((ʯ#0+ = :w}YI'|Hc@o|u%X'o_-|+_E?lmPw8|0>JFsA|K᛭U\.٭o!Y#PyU7='/<Ê/=Ww/)gik?>o2:޴RsG_S{_% JلRjfbѦ8Q|Kķ?*GNl WPwm#/ߵO߆iҬf_z8Jdc%׵@k|>(+7|)_ ˡn/?YӢ~*ʺ(3bojs]|{_s>_F䅆)%8/$akvd¾5Ԑ<vT=+Ί{i/$g\|FK_ bfCQjYo.]F ~pN5~ӾԴ_rc9X|¾md߁>)xF ^Lzo4[R<9L|O%s\]VrۖUH3}$;3_?|8wT׾ bL[ VyDRL2}W-^{|9|Jqo9~x=?>"ao7c:qԯ9+3$@t&0H1E?Ï_ <'7o-46[%sB̒31k9ei'=?mWGmVK.U(߲X? 5?hB⯌'-rAGj?IЬ*L-!Xmm-!X5TDPrOҰQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEIs_^jg||SW "ChK6$־A3IGKF$+|PCY(ޘ'rA?^kg_왨x\h4Zb]㖆@6H|ʄ)+wҷ/4Ks$&m+\OD30Icd#7>M[I-w? PR{4O[5ٳ@e# ^^pUΝt*?$~V: {/?^L0"2sau_SPGC2ȱp8Ztݜ[kTiaEzwmߕ {Aq40$6ϸ0(Fp;ri}aN68%\U_δCE mݍ`=}qS.ln,˕ʟ֫w:JNIrj­1FXQ&g!꽪1%H8%V|r Ee 0G\Ԫ-#}ؘ0osj9STWɑHc1n?kKp~cڤm2PrS,nZ"U*%ֳ1Ajv})vU*EGM#E,}ymD8:kKe*\aW)4(%58\tle+{q^HH칮]txTB ny6h"8IBC?((((((((((=|1- Qؤ[^'I^y*q5TT͌ateF-WO>x?SZەGdrv3^BDl,ovn`>Jھ֯~-?,Gq\Y7oYe   ͇z+ ~nTmb{SEqkKH6 dGޤʱ'$jadş 2^}??~}x~_-E#*ah%ïL6Jچa ˒k]{=. 2 [Ӟ}6o4QEwQ@Q@Q@TS 3EooY*I<zJ얹xÞ ƻjBarUYePI+?w3=ƃ/xq6Ns0~3{m7xUtvP.geV睍כW0N\W4s_S9W[8y{|kox|SOr\g:DK{rBi^%ukVq4D7#iX+J/ <^ mt`>s7$wS78 C.|C}# U*XT֔fݮi:>l`c1죏ǽiEzI$Ӎ8Y- (aEPEPE7K7O77i~8 [=>l엉g';;fI\A'}sEPE?h?A?>*x;:&k[F 0q\Rx @()bXd@񺜆R2I@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@x/w{/(XV'N !o3QnB1p?_WҘ4KQ-.?1FߥW;lmnbVvzXoҦ2ۻe}mQEW(*ߥ~\9X_JA#R ж)eg۫6o{<[oiَ0?9U)YV~f? i\7f ha%Pcs,$M{<NcR ˫?x3r= ~^rjեZlkɿ~ ~?K)|?uAFӁo3 R?X_oIxEַs}.Smoi6hrR-s@'h[KK[ 2 +k{+8$0AHQUQU!Z* 4 좒K-QEG@QEQEQEQEQEQEQEQEQEQEQEQECsg-1\[ʅ%E `ھ)x[ .aGrd|m @s2 _mXWSM\8qԔG^iW `o,7 ،qpsza7]@E;.m$G_L]+xJBNa3 ̧A> ~~.??6J;f2&~U#Xf/mGrq\7_~yn)]h7¯]ZWO.Iׇ:w.l7c*{w _y^ O}gúS]YNޣ*x#<ۇҮ} qUrռzN>zיEWIEy/^_O2ȇFxp?>|lUpq;Ϊ}_7_}|Y ?f_ `F0ҩ݄u`౯(*TNfyXK ]^ *K.oCo<2AU_ 7(%Q~vƾյñ7%=$1ɷP3*d1^M: ;>A‡+:j\%:cݏo>^8oZٻ~(-j.3A&r`) C@~_olt|G'uIIJ^v 𣒻`o(?+W/[R_zeXUIO.]pWoooCmk k0ā4Qt.O_~k@^y:Xy:@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@;VاōQk~m#N OQB pAEz/CI 6g/,KP .xF`:3ܓ^_^5]HSqt/@5wvI7;}fGæzfW߳U|761Xf1|sxGxW,,i^` k) :c`AcI(%Ϳxn=,M*88F8E.dN5z-Sxi^/к[O2,dee7xivV̓Owg=J1W꿵7M#K{~!iV&Ϡ {{_mWM߇>Y_xgI3Ƴy"Ub!,Erar|]yYA| 5*4JgR.K7k_(-B]{Z-e=sAi'xݚE%px<~q'v1K_Oc?"kɿcO&?h_|Bk sn؇5iNbmpd8ZL,?h"[=߹kHh 35k9;?!U7 ֭Η(+vpK[w{ۧᶏtg")&8 Ϩ/U'|)2>elZIKyXoß"oOH]Ht{Y&iJ$??>i^}7Neq'pȓrI쏞G B.<  ,K yܾMr&<MZZj,j:OmlH޿JO|6nOz;'rFa4 vn8"@72I_&]r;W28/3B=Jy_j_AxOÞ ,Z=7DӢV #P  2qɮ.[;Ӆ[~ Uur"ޜRdoNc>P|tWӣu<иe1*$g!QT,|RhniQ^K(klsWK!N3_巄C Y4-.q`oslMvtQ@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@_'8iS>UfA.{Wĺwf> |DT-}>IQ븨?ݯҪ+W՟u>+|?sj|VӃ޷~g.xQ >m/SJS]D?RI+mc˞(@x goVCdгfxہ((((((((((((((((((((((((((((((((((((((4~?hO\"F[;c1^ؖ`B% 6G ۭF_[G.LgD'nax?⢀?7Mn5> Ӌ>k?O*} ca_h?:o6I犼t4Q#S)S P0AEPEPEPEPEPEPEPEPEPEPEPEPEPEPEnj! $u Ũoe_Ij;V(ѐAbv@ JOg gO?~1Gmị=g:H[kR7'L~Ο>$>!i,56 2ķɣFiga[5CF/9Pb㸴~;XEf–T@|/ٍx++]v ./ ih[|j7 0ݵHt?*+'Ck>8[3njCr>{%Yψ4HHX7޾|_Xx#<#1h :e#k;MSźı$$8PIE~~_6>0aҾ {UӬFʹ&daE 29 Z+VIk4o^3tK="N X[QI]%U {ᶭ_%ދk%֩ ]HٝR-[?+h^2[7~ 4HqbAҠ(((((((((((((((((((((((((((((((((((((((((((((((?ෟj?lZO-.<|:W?ī&ni %gv?.=\~:g.i4𼶱+ m1&q:<߲s iW?xxPR-٥hRCYkV xwpkSMqrI'nǦ@YVO_?YVO@=A|T#Nx]G0&۲#|F>4q:2=3D=9cпf/]Kux岴bV ;ƾä~Ku0׬|/\[φbM5ɬ;ºgeĀsk㟸+կ\,,~^<9ծ4k5h nD1}+~1ǟ&ŸNgnd%[f b:x̘< |UOxoNZ\6$u_(:Y?,{Rt"*Q#A΀?X(L򰗂o2?&%k񿎼ᮧG4KGRX@yg "x(3~4 $V|-mQ$qڿX?__Q#}~K\]ݟ_tK]YxMyl{PEmO8_?[/G(?6d)V%7? l~i8mzV:#k6hO/sҏ U֟Z^Ն9ͺp|st$Ÿ7Y:>x~:/uZN,#*32lSK=;W_oCVV=[9&eEbTzPgcOxNZ}~+~v#3 ՟^+#x̌4[p{WOQ=O):iyjW \~"j !iRv9쾊𯊼7㏇z?!$Ϋl:va8 ۣ+AV |+|a?4_V[Ka*EJ"?b cSwExzi4z}:I O@+ sZ?|G4[]oCmH$̟P [s/Fq?:_O8_ 5 #?.ۛxb&x+.F{Wou6]m&]r(mnlmrA]jiIH村 eAְ<+_ x/Z_3ۋ}?Lӭ-TQ$NI&)|_w^a[-Bᶚv wéV\L/^[???OK.~3^ 7|#CNe4й>\?Ҁ>4?r_}ǑjIl:_ ڏc{}_'  i'ۮt߆Zau22j73LnX)N.kiëGG?5wǮgexG5Z5T6iv Aʀ??_sVҖ*# '[K Ƶf:yG I5՗̇ 5BbrX__࡞ ᯴jc5niuMC˓bc>O@}= $ wǖ,uό q"`|'c k'/ZLski(6<F;_Jcg_,K_hyQ 2ǒY$I$}txwg_ G⍷}͔Vkvrk@PvqOk}A_lOîh  /-FH#Vi)M6Yy8͙IT:1H;G$qLZm^>[7#Ry)4Z:*no6̐2k$=R~|0ƽuo6S#̬ '?<3I xf b[;Z;aBp8G_\f :gEpG`89u4P4}?kw@Өf+&>| Tcrڭϙs,Gۏ7zfh?b|KOoڨma(E3+?zu}b[kmHTQ9QϯZ( JpyO࿉_ tĞ#XTݦa#+AW,V9 smEuoE6'9~5Pr|hAei ^`m-$Eˌ]+Ll4o >k cmC QTEPU(۳[b#:=?[4=CWW񲖎dwZdrʙ=eg W|}Ra4.ne,? {Cʅ$d>@/К(/e_ < c@-|YmsK2@b&PbFL@ی@l~W?eO؂ᗎ5 k'E·$n#(W2ƍ?.9ڴPEP?i];>/|eg.s&ziz n*rUr2H?i%~Eޛ);GTU&~ӮDϙux$eu^ ?d'~ xzS66ˉ&$9P?;+'>W;o7m?~ۊ(((OqKx+ ~?Yj$𞣤Yv̰-ͬHT3 8^E~S24nwx>yy%D\rc+ _o|<h6:~>$v$ 9|E~s[~ɟ/ߛ/-=kpI;dfH6Ģ,**C[}_᾵NƉCFCg(oGUѿk_Y~0EwLL`k` 1~,͟{~>wi:Fy V: K<@aU@ ~@?oO/Oφ dӭc }*F&S1| -'$ [X!5b@(U:TP왨?. ^мoj:NV<,#œiӭf?f⿆sY]3lk  { :  h ;  n^VV_p?nD-~*: !_""#$Y%%&'v(C))*+,f-A../0123{4h5W6H7<829+:%;"<"=$>(?.@7ABBOC_DqEFGHIKL3MXNOPRS5TiUVXYOZ[]^\_`b?cdf8ghjGkmnloqsY]3lk  { :  h ;  n^VV_p?nD-~*: !_""#$Y%%&'v(C))*+,f-A../0123{4h5W6H7<829+:%;"<"=$>(?.@7ABBOC_DqEFGHIKL3MXNOPRS5TiUVXYOZ[]^\_`b?cdf8ghjGkmnloqsY]3lk  { :  h ;  n^VV_p?nD-~*: !_""#$Y%%&'v(C))*+,f-A../0123{4h5W6H7<829+:%;"<"=$>(?.@7ABBOC_DqEFGHIKL3MXNOPRS5TiUVXYOZ[]^\_`b?cdf8ghjGkmnloq?@ABCEFGH!I+J4K=LFMONXOaPjQsR|STUVWXYZ[\]^_abcd'e3f@gMh[ihjwklmnopqrt uv.w@xSyezw{|}~Ҁ(8HWftÑϒܓ!0@Qcv̢8VtѬ (C\tĸ˹κ̻ƼyX/ÕWƆ9ȘDʘA̒<Α>ОQҾx4ձr4ؼقHܠi2W r4j!FN?_,P\Ag  N 1FPSOF9* !"#y$g%R&8''()*+d,;--./0j1@22345o6F7789:};W<0= =>?@zAWB6CCDEFGHfILJ3KLLMNOPQ{ReSOT8U"V VWXYZ[w\\]@^$__`abcldLe*ffghizjUk0l lmnojpAqqrstiu8V9m:;<=>?ABC/DCEUFhGzHIJKLMNOPQSTU VWXYZ[ \ ]]^_`abcdefgzhgiTj@k+lmmnopqrvs^tFu-vvwxyz{|i}Q~9! ܂ŃhQ9!֌lP3הuR/ ›wP*۠c;nF˫yO%ͰrAܴn4v2뺠T[I!S~ģ4SnȆɝ(ʳ?Xw Ξ5nѲVӝ <"Luminance" = "0"> <"Gamma" = "2.2"> <"Whitepoint" = "6500 K"> <"ChromaticAdaptation" = "2"> <"BlackLuminance" = "0"> } descDell G2410.iccDell G2410.icctextCopyright by ColorLogictExifMM*>F(iNHHnC     C  n" }!1AQa"q2#BR$3br %&'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz w!1AQaq"2B #3Rbr $4%&'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz ?w)|-fײ׋~<_Jv_%kiAER(((((((((((((((((((((((((((((((((((((((((((((((((((((((g0)蕯j?~اeVQE ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( (ԊL~)`wg\lKWZ願iQ|a?P//0=Oe2w0[c(+.kpg{Hx=3>VmuHmIYwxY;OBqY(ϭ袊Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@k<#xĺΗ5]c#_Q|>~:|D4k5v2G$ꧬbxm3Cv/ď|)i{-мQVaB$"pp 7@ ~A|D|]CV'Rb؅OXBe^܏'5/?n:׎^IE8+UQ5kYx[|)A}?LX"S݈QI(to xr?2SoiO ?xپ~̞,\mN 02쎤C#?_;߱UCW_?NwܢGhE"׼?3V)OX:lpPguE e\oX>&_g.<7k~ah^U ۬Q 'Ï?~|9ohb0XCd9y_]٘}W[9i Inr< OA|7YGim[ * s7|)/'Ɖ=󍜸шI`$ّTנ1jǪQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEG4[< y$($w }"s)tا3Ƭ,:2!oYĿ+oU x¿I8uMoO w+]j"Fuܢo$%I<~O-,wKs_s@1zim?;_ccV,No3 `69V(pkO~Go %![G$O}?vSn=|SQ''#lzayʝhG )8^g@Vau+o54;IZ9H,.6?򃟄_j1ؑPs;^WWbׅG7~FY(qQ]((((((((((((?~اeVWp ?bZlQE((((((((?io۫4/xknu$|w!-wJHUr1_ y?e_پX o[v"TJ*i6}IK>|n|!YW<>Ha.2nLq$ A־)ݩ럴n|E4F W}lgدO٧u lxGŚ[E06XF7<_*i.>Ͼ _?DH_j?P93B)'jJ/گ~Ə/Mt(y~y~4y~Ɨ s=iI7)e*85eԫcJ ww#P)S ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( (? mZgcwk^.׺ 07Wk[ |9Gb`+LNҍv׼=D?? _kV$|{s%ƭ[+[QO(TTzt*'^Μv=Sҏ9$s~="OC2D%,vmörzc{f}.#p" _+kb.1O^Iwڔ.PN@8ݾ\]VW)t;(A#*WqU#o LM_J+??Xӵ|Aɓb#U)_%g}v/_f}(Š(((((((((((?~اeVWp ?bZlQE((((((𯏿/٣4^/umn]-3jr+"]̹5Ϣ~~~#CqNf0pnTn L 6kk gm~_YTxf:Hu++)A 0kߌl*wijЈc17Q 1(J1oNI|XK"oZJ sCd1裿޸n>鯗Sw=JqQI#WtWtR,1vGEW'ʅQ@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@31;J5>xxV`\k wĨ4)p堜2<^ah?]Ѝ1쥴(ngwG(#U JM]46zh7M۹W'j ijC<_@H->}bj q ;վ/U}q\'tofq\>_J/}CGߴ?/5_"V~ޱk'bFSWҿJA/?y_E_é#(QE}iQ@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@+8ɄND{Ux&;/T )QEQEQEQEQEQEloԿC&U˩7(ÐXA"A~?7_Wv־_?$= cfHtLd$cA8|!d-6\ALUkF# OisiwG}rDp) w?yTs%Fм\%NzJ*.KCfOCs1(~wG5ݮOA 0$m%s|V9kh^h-Oq,d慎C:W__ k??m\ϨxO\|;y=w]]S#к11Lngwy|Zu0e) ~8BG*+轜{o3?e})U{8k28峊VCjHg d;xğ k7ΗVuσm?}A89lRoxe U}=Vu{mCyw{d` 5 9{8f~6-:JDrTّWWbo o5׈/Y.0{p󮬮yr2Kgk~d蟳|HqI|qr; @vvqG`gpassq 3m8Vh<068ٵ)lԭ弌fHU2 㒠u~3 ?Ƶ~ֺ=bf]Yu J@x̪Ud9?x:65H`_mG>'~$x /WZ%/=6]4`!4C, Z>iះ<:I.$񧈯udM!S#D"m 4{8f~jv7+Cxt R䜳?*ᎌλ0' fqo\g<>#пf WIk~Ѻ'Žkj+5PSn6*2wm }Ww&?d(-GWFu]E#?q1rI< pQQq8Ϸ2 ~|giMN8gbn3 Sd` z{EH{xE~Ә^>xLIԦ?hB}+S̈́Z_Od3)+:*w_[~t/?/ÏjP54OxB5Dw;n3 J[߂:*vQ7[4ѕ X`־Q9i63CѾx][Lde Ѳ IR>$QkӨ3sիGRM JN:M}sիGRM JN:My\?#*_?%y_?*(Q>T(((((((((((((Ӯ//.!6yp(31$ W !'AcP͇\Hk?Xs_Ө#1XB:t} |ynǤSm9eUi G7-t<-uEWp|ۚ۟P rCEYQEQEQEQE~d(kLh?]m3Wg?Y`I%/>&xBEnuҼm}@ 1\We/L_Ox?_tZ6K(C 2#9k|#x]i}}s-#b0Gj`7p=iQ\ԫV2u!kt28!s 5bv!W}^n.Jt7~?A`a}G5My5jO2N+?\8szh'bFSWҿJA/?y_5~_dHjW Y(9E_z+Ku=WyٿC>(>(>4 'R[o_c 𿂿/_> Nuk"CjZ !oPvȤ3 2M*10ϷUS?w44χSxGLDŽ Mhc"MVT!Wہm9ɪSiÁ|^wohAfv]m 9:3_[ǾYi17$H_id0Ezej5g&C ^>:e >8u9'@]|'_6x#[Y\jzI+ģ I7[p A |{%Սƅw#n}[_e^Z#SWM;F՛P|5үQQk? C!GpYcRVO 6&+H<˙{gz5_XK|Eoȃp#V8rzƚ[̳z6j猲R w"<9)|;7u]AvZ\ ۱#Ċ\g1*>=kgm%;nzG=Zxi4D)C a?*/>UQ(lJjŹXնeƜ{8|*ំxɼA+N|Dcׄ}"[扚ZC!Z w\d?Ϥ?xiS?ozR&Uo-p~Q*Z*ck;:#F {5v/=\Kqgee,ŷl kѮ>Zx ? n| jۭ|9/_M3e-yjrrxk ૘bRtGKQxx. x;þ )vm?DᲃsVm]3=+̮>돾k1m&NUK_?oST?qk _?oST?qk^/ݥ!QEEPEPEPEPEPEPEPEPEPEPEPEgC^.opxRm ӡ4LWqYcq1Y0 Oڃ.i:o5Vm|Ȳ#7 r9sG+O MY_$m5:=pp^[yYS u ˃t*˙7 о%B˖$ȹ}y qiŻ<x+ AV/<,zӧ߯_\?gVpgo=$-?ςtC6fOӠSgxN7*Cyh{j#)f]JTGG&3Vs]Ѵdۻup s\8?Yӵ}+W3:B:irI?|->j:o6x~߫=,#7}Ohi/E!=(Eg=Eyd8?g(~ X5w?kѮg/7Yv`:\I3ӵޟ諚}:5_ Zu^eּm_yz~ G>a}>qS~Cs5?J!xgQƺH/--S\&ЫlAkC^𿀾^cUNu t8U%I$I56VTrg{y&tTQE{QEQEQEQEQEQEQEQEQEQEQE⿳OJ׵Wa?S+^M (Q@Q@Q@Q@Q@Q@V;'KJ:K@6Xw;Kr@#=zd}E|[loԿCYkn .Z~1 v]Hw|t}Wv]r|Omsgi^ 3x{zT+.o0[nͷ|"0; g3{cLJu4C]+mv^"A۾ئWY^3 e _|I<PfI-l$?=dT |Q㧇MMꋶ wH2 5 zJ&.[3Mj~̟xKφJuNv]FpJȸ=+Ծ |MΓkZB5{1v #+.YIRHj!7}QvNj\jWWQvLѱ{&^:)-̣̪ggs̙sޗmW@qְn>뎵qMp:"b\}+kva\}^mm| 5q`}^uc&%ViI7)e*85ViI7)e*852Yc/ (O ( ( ( ( ( ( ( ( ( ( (  ((p_>i|`|#K i/Q4OigYn% 4# 4~3nˆWKfm-[pY>XU,BkMZF4"qYRa`Gf Ve|¾!4 Xu)#% ],˔)aH=cd U_ڳ79PVD 1~UM:u)S]Rٮg=Q2L%~$)jC1m/2yUo\r->-.W qKoj5ltk}Fkh|K30猌MZ( <$l1Fk)%Q.Mo~?||UƵUZR\nnWR~ٕ7fJ>}+IQqz Y*+I-NRFtJe,{^Y |XFO/OkB}w|GJ(8'!ԁ^c^࿃^"_Fux'틿k ßy>V#_&cٟ=-Ğ\ M֭-.gY_V5_jVO4;' KG7+ Ẳh-@KGR2rx< ^"x#sSxn,A#=%_ibIiO8_乤qht~&pn\ܕ"C(E9E7fWQEz__w? l$V\i]u 1ŽG5:Ck񿂼9n.jPwgk)tou` r >\|~薟 ? eٻKw0;rL/1A`TO=SR|ҼmbMt;)RZh!F2 Bcؗ\>(ηw,̱.$nT爜RpQž,?uxiDZZ?o>Y+Q røl 1yEH_G:_h1@׮5H+RCkV rT\(}Ư>_7+d8M9,ZWmV~#X8钛)}$h0`60p͞kzh|ۘZz-}"0v JP;WVUhT^zpmק1?5f? y_w ,vs؏{_lo 7I EПE*K0+x֯V0~N &{m湹{xQ $ko?߇l|i>sQ&S y{: qW ׿ݶE' \Kl)ǜe<85]!5'#?4X_wCg]ZLTm,CfwҚ?hi:>!j'{@FB]qR2kC?o~E> (_5,a3f>WS qQxI խ˿H?z3sҾ]C2Ir/FS+ ]ƕiTqAm ڗ i=^"b'&8.aa0^jF1}w 'w>[}ʛHIC`pR;Xz1-_ׅy޹yyKgF>a>s(¸]-c\}XT&%?Z¸n>5V鉋q`}[| 5V:bb\j?~ŸRӎ_x\j?~ŸRӎ]|?#*_?%?~hDP(((((((((((߈<'"OtgdQ.^FPqsYի PsI.c؈aJvQnMIjboZ=; ?mH00D?眡dyO}_d|a_ψ{ vvN(f),A4y< 6Uj^9w᱐yMQ\2#o PN8_J xM^/.~?Y;Z]xԟ.)##r7 frpMzE~?]"qY60Ƨ^-Fݡw'{5O-,me$#V:Z]6,ṃex * ?Q+o #ѭ4#Y݉5Ot[Rm鲺JC~[}18g8d4WJ~)˙vQM赔^Ŭomo R_jGN/cu[sI 6^񗈭~~4>'+,'P%랇{֙斏|Qb? 51B> KHXÏwC'x~~ڋO;|GuzEpcnr3D+UM|=^yXeR=m?&|Ah)N~ OEmeȌ0GW?=ƣϊ_Kyimxϥ 0 ;w&m7A𾣭7iM^]NRK3&Ə_. M&mpG<= [/ğ 07CΌrcVI '#|Oozx?0LEoj#y\zg>M&NI |2_kj.}R5G3Y_k'w g[mgكNGW[Ƕ.7$am-Fؕr1h)7 (((((((((>DxKxk m֩.FTd,z|o/syZY7ٖ7@.Yb9K/V 9MOǟZ 9?e +ȗڭAkA<'A,G?D|:u~^;{}WA Zh80|p[NXQ/^O kG|=S$Jگ+k}؏↩xyF39P'8Cʼ,M}/ >4 i? ~ΟK$zT$ 1mJ*+œ88Oj*妡[_][XIJ[$TauaAb ((_L'v_%kګg0)蕯jQH((((((ď+gc~z}k}k+gc~z}k}k3Cў_譻WAmڹn[v6=ڎgwף) =Okl]^uEx,/c۹ q tgzv9.+}aitf>^uSGZI~.XI )v%%Tolea#c'9ǙGZGbyj9j>*2k7_]J!;pGfVQU0Q 6/yyGyp/yJyFi9] ɸXWt?Z¸klo>kάtĹ#)&?C&#)&?C&FTK1ݥ!QEEPEPEPEPEPEPEPEPEPEPYWλi6ia>Di|>w| m7o[:#`cY%@ۛ?0͑x7vMeڇŗJ'62e 1l N+9]GGkQoo?%v~xQV8f|E]`pLⷎ&!O/dPF5~x+េa߁<9oG4o^Gi>Rȥ^j>~KhV?ZVxü)+JjXJVK*Xz=}b;{khV8@0U@ j+IYrgB ŒwFbsYb`dws]"<iX?C- `~QJP(b*КJN2]S @2mxsVӥ2 帕 $!ֿ*+=sԓMJRniI$z~=k:V$}F|Z/]jEGLʹ A/>2`~*iVBWIԦ!_K ];,6o>Z/otOxnE yļd9&ۂ;8 %%ZZ=:^4(#<| 7F6ow<lj昇i%hE>H6Nэ|MɦvSZo?JK kX6B^**yF@"0Ax?|~5+5<;j@VV7R<D)^Oho_Oi*es0#q u>S5tiEM-}Wk?͟x-#E4j? -զMH/Qj/= y?[0^+ww X[PT"7@;7hhR4dSep59 lד>C%O3ÞaL~4kuM;6ebw\eRڵ ?]п|Kk -[HEՓ[ #s XdkzNӧd)4$SVS_/st|AJf,GU@o3 +0E 5~燿I<5 z*O-\_ ?+/.~(VS5~ +i]ځ I1Ju 4nH)4?%%uQE (+~/\ozOOo;kYZ)x"5/Q%t;PTWoƟۻ Q mWm#^ݥ=Gvc"] $-_|w?o׎uo|c;uodRϹp>?cH׭|mX-'E{(Ѥb̡X:cWo_ iZ_%ukſԮo.5E,P#9TbDaqǍʾ%RMIkCu_v2<1͉U6T8S@W֞c/2Uu3^ͯN.,%i` 3$[5~5$m5i~ҧ8Rh1F$ 2Fە$:zg/?|oU]{?QXi|d׋oLuE7Cq˧k A$,Ȫ_.t [w/ȥ3]|>#3O dn޲Z4r1b`1_k̿R_[mcZvbK+hm،GP"+[}J>#/W MVՆE+) N+ hmbX-`b5芣`OV2l; ī&W^L==Ꟙ}rWʋOWy4Ҙkͼ]˓הSy\x{Y`C>c~R(P==R(P==RIy;kY+M5]q`}^1U'L 2<GuS_ď:ZxSկbM?vޭ_ _x;@Ҽ7[glrp2c$׊fNo~R![唩w`7O(K[!"ÓQM|@ž0AnC}覱}~ukj's'Ko` c'ѿލBL,ƚ桍7i$46xx{ۆOPԏ]I1>^w\uя a#9Jo??~<fj݋ǤI~-NJў90H&LA~Կ xDcK7Mk!@dSCg̩92fXlN1cin趣(;sJZ4WwQ/My폎5Iݷѕ]$Uk.h?ڃ_|8/5̂##  kh ҊWD~C繎q3ZUjyIZ-QEQ@c^!<7amw-sGmnn㴷Q%CVrHDžQ=xWo=>'~@ִé#Ǝ0#2Xq O;Mφ!|GߍvZ_˯:n; VOMr 2PĿ_ kO(Þ5b6)r@;AFq[9m­+6)[7v"Z\C2F!p95Hx]nFO-/K,G3\>fBx/^+~hƏº&;~VW.LXXU ت@Rҋ+-OI4]B1$6ҬJ+) +; 9 Hc A-_:t[Q;2I }4e%*6W,o::m'6=?^ЖRֺDH-nK.9$l@H)4"8ԃџֹW^/Oi)w[UVk+ ͲyHTǚhبr{º~6tZ?cbp5 - a&~ akx㽣F|+yZ$Zt*[I8#wbNV)?>)i ku{f VXa`LL?vV K>cp{?2E *sT;˕JKuJcN_uz<&eӬ.?4K顆iT0 d$n.3.=V+ς״n5 .fNtSa O)\'8dzsg̋ ))9:@, |G{k"tK؝N3ʴ e9W׵ޖ?1"K*|[&U(-z/Uw x^Aơ@T%eٗ+>+|3緹۠ :aє[1[d=0A:/AMxW5:p?* ѧ%o7g0{?+:ӥ5{$Vno䟌wWy.1mf<}MB53?oiz|.}48>(јp ue>SrE&A>?n RaUZJ4 ( ( {x.Z !-NGЀjj(2]-tnIf`~>aG=>_MiS^#b2|Z(f6kͧ˪[F< f\@<⠲д=6YtJ[6yXa%@ˑnh KgonKoo  '1rI5q/N:ЇfOf7mlh -ïc_ۦ/(?dO癍}}kj TInX ##ԓZTPEP_qD>2?i\z`dr/J|%ybN09d?̋$OXdKzk΍xtta|5i xv?.MT@'<9׋(w] p}l=?QOח8Z[мGd&k,glȍ!oȇ j)Gڭ)SJ>}+kҏJHǜ9ҵg=mċvS!=\[vsyk;U5XmQE} ,y?)+"@2ÚlF+ͧxtKRO֍ZטRO֍Z9pBxӌUy^p@ӄWo^Y_ָqz G_(*}Z<9bGT<_֎p̣̬BL3$8 f<Rk?Eӵ~8gW(L= &0}ViNVwzӧ:#N\'dmI-[>Ŀ/ lj}m/,Yk0Ҷ34y_?{oFg|]([GBc=7Ђ"2M;xQ`s-BUs\{56:|Vv.ء0#X\/A{I _k?|5-fy&/fwq] )4m^Mw]gq,d&C0 I,O;9P/GqʞҼܟo/p 8<4H[&o ~'Z\࿷39S xW ~,9n|sxĐk^yr8!H}+(Cˤ]_5卥^Uakviĵlu&}D3wH>𵗄@-O|-狥̏ѰxuKU`Av"f<-P2C(ݬe9 xN+6|W>txsyCvKXRl'A!L=*[˽ݺ%Żan l5EPE>83+/OxOD-goq"dc{a#s UӣRbeYN73C *&~jz|UMmLL+#p 5oڇO-#Cu 'Pytqp[\Ba(9zWkcy3;?ߵ틩[k_u$ik]'Nَ~MΣR/$|eьp(4).Z~R]\޺QE{gEP\< Þ7mSm{m7}pC k8.2WO:0ZѭBni?C?Ŀ1 ." L { d46j &'Yk?67MMnmG2yy%lY=/u8¼~_y]V8wVߥ.Jݤϡ(˂((O. |5Mr//^ Zhtv+m#*% ܪOBsU/whc_~=;Og"%;r@fn~@ aѵ.mZΑc-V[3I PdxXO&ş NЮ/4SMK_C$ye!H%q#^%+qX=&˛ɂ&{{(=|]~ѿ>-xo&OwF(>9L`篊KG>k?̿)i9UN+reO?k_ x+Xޟ?fImkp1;IH4|uE;$73x6_ ;_V\^Gq#?,{S__KTfq4 ZG'ɘIL"⾶DXTETE*v#e9c7%O*-tѳ~~?u&sQQ%':DxQ:?m'eO?c-r;@>\rn,FQQ#d}%`pTьiܩh߭~<_XǛ)4Mak&i>s:WC2v1^|?I{mMlJu D'.4hVN1=hJ=?nc?2&> <%i p1¢*]>7$N/4ԄdEc.q Vm[|J]\m ;|mɣ>ؓ:}`l9=uVG?f%߆Z:#FqxT IKV6o=kL/-vӭ;g7ֳG%j׶7 ݜq$,CʿlDӆ_h -1hm-̤Ij|hBIs+~Oi$''xZ wORz dstW$ w ɎC"#! Q3 gWvriKxs$lcKg d2@ ˼XےN@5AaJڤ(/ޫ,y$4H5j}yy ϙ?Pj>qz1'}#;3)+C"BLJa?)PHr?S,w_?(J>`(_L'v_%kګg0)蕯jQH((((((ď+gc~z}EI{WgćrG>,$JwW7 ʹ~RFqf/iR+Yt+N{1/c#qϗ&W4YDy8ty>Mcsk}+FLOHƉ5cOZſ~'i177kfo7HYkr@VfPu2;[Uxу )#֝^ E,)۵ވޢ [,`c}|;"XyAx_ @O`zow;oMei6u嶡c:U9T*H#UZϙbſY|Wo^Sν.wyGgpGgF:?_Ψyo^GG=:{{8AygҲ=ngy^+V}(ϥ+S$X)bX!ZY>(R-"| vVd-2td! 8+3kl? 5"ΕhZ1#>xoF.&鶺eSꧪpAx:?eϊm=l:P^$𶷤"kKLKkFIa WG<3^xs5= 2X5|6?&_u/ K/x#Y&Ԙ(f|f9p kO߃mOڔX{{(om` "% YKWZ<'.gR PG[_okI87gooÏk<5ȷFs.$]QCja?k%_Vtf/˰xG VKU~O~JvIͦVӴk >%!p**U(I$~#)96ۻaES$((((+ߍ_Ï:)2Kí,4ЩP[8?{&ރs)Ky9~+< S^$ӼE6-p?,ҺKLӵJ,]23ͥ ,3!ꮌ`}~ Z|s/w a\r`Qؤr {.u{K~e611^A7M^T[h^~Q_^5O x/'257#87mRe''5M7Zj:uudu$0l5cb)jOFS|3V1Ҵ'*ESqd]R/QExş;txևyk}؇O`)ڵaNi{ጎ \]'Qc>)' ;:WX$;Fvzwڙ k_ܦJ}O;5xomiHSN_ \O;'ʃbL|?_'t I\Ckg58IIMlQ]40jV}.A™vQ*r/r|ӗ+/ +@(|K_~'k?kKrX[Z噎]wm0gsW?ߎ_z=חb̖No!pHJD62 U_%Y kSi}n?a5tٍCNIvYc(((((((((+Vɦ|A%EU+?> ޼C?Ğ ďxoEODVSp*Â=6?7cxsgQT,A- $,I4G}mψ|Qh5Q5ԛT`31 ,H@^'|S?4sDB׶S bTpA"ytV\ϖ~BljK,={as=CVҵY#[+:Vh/*ֱ alēo#^|BW{+k-.vglrA#hxp1q]-:xʩBϴjOjHiޯ3ڝ\vCuJ$%w| u.<] B/ ӵ.>}*&%L(%yf|5n~xAC%ݻ߅h5F]= )(+$lvJ=~1&]&YYAgˑyY̒)]He< |g{p7]v[O4 蒤8 \ӫ>8弘l|J᷻Fkky!O3kҲ~7.p~*xY9]7WX5U7N<-~A%v֮!"~)u,cb Fۈy"2G5woW%_6qxr3%<|SL"!?9N8xSKYx>3q>mo-V-m A;88kMh?>ş $^RBi -)|ш°xE񎹡x'Etя66QgvJS Υ]l;-ό9l_[m^E{רm/E ~/yC֍ZΎRGT7h9OCE?R+F%в*(Rr:+ߍ xtȎΰ5:A9x$0 ~ ɱ&g8$w\M5J|J)9-,/tߪڛAqn[¾N3ЏA^Ţv 𖝭/`Yn2Gb=+ɾ:U߃ djIyq5Rѧ{eYG,MGSl7'jxo/Y>|_ ϙo_Zy2F|1[t1|Mciy:%?ko^Þ=ޝ=-1bHoAB ~jxįط^)Xj~o|#KC1ޖ#na]z% m7ߎ Hw؆ty;} /ll>&"JJQ}Q9&?) *Ucdkv{>EVQEWѮ@{-g_5X¸񎟮xHd-oFDW"$P,7ƾծ{y -ܓ4%YLA@OS7/A}mowݷo%?iѰy^v88LԴQEQE2I#Ie"@OA@C^?O/~ D,7:tħ qE1kG|/cxE?(0ldy>HNޠʄbs?;L^8O3?*qG+%Wjq}mۜ{8N??>>^X;ƌ> |Yf6-X*'$‡O}iQd%f(HQEQEQEy_~S|u+-ԃx]\]03rHLcKzQE~:>hw>\?=l&+} ̤LmLh'U5MM-Xd{m:p-mlr 8†$X?!wMO:}ܞ\7r3-mb0!9%2t53;3)+C"BGz=G&l]>WӾ#>e ¼֫.fE)c8W*?ɓ~h袊((g0)蕯j?~اeVQE ( ( ( ( ( ( _~ 4i)AeK1H\~hX9$ml8v @l>~oOo G\(},yjd"d&DF|--#[1д'`!U<siM<| 8-$s Yn23"8* ?> j2vyj$}6X3ϙ3HgJ.*P@p+ra){#f5*hG}?jzaYi2\{Xe-C\֬w0Ƨo'%(¿Ġz_w+| 97-;źC }RA%WYcʮsدZm{P\Zd >{|2KIRp<~&jV nxϕp?A6|ֽڴ8.'8,F<<<%e?8nSvCWi煭owzmț7\pA.GNmb whW, 8$SDx]m8k|;MzΝ~dH!'ͬZM`903H0l rw>ƴ8ѻ?OGݥ`'Ir82x ;zo]qvxШJrUHJQII^Zڛ~PZ^_ҏ/\{s7P4-[#4-2Xn[lp4qA'`+''ÿ}. Pn>VX`\)Ȯ89N[Enމ]ߑ|KTy՗N rK)5Ho5Mz[ _%ス,%晰NN"1jݝ_^i6ϥxz#,]J 8fThi}"n7{Gj x_]SW] t,TH%F&Lk'\tOVBk5{.lohe_FV_jFBQMo_5f~x:HeIX,tjKU_.h;%d|oOxK.7 qg.퍌u8hwVq]m~sx6\|E|g}oxZu-L/s\\[ mkOn#{t ;nBe FMTfc𗤺z;>#b*Mri_[sO>^Zj]սbH.-G*ÂV+ӹԢjQA!EPEPEPEP_/)O_k+m=tƨqciq#̹ #$ FRoSD?/$@K I*%px$޹h OhF*Rۙ.J;?`<ǀ%ǂO[mVF8ea!vJ㗅Y*j C}᫋KNu{y,|,,吡rdVޗ/cweFXaف-t:nwzt[O<;9 q e.XZP$c\endQwեWҏJ7fg_]< -&Z :zdlLW|?i^.GǯcXogo*n@1dXTǒj?{RPAA5/rE>h+OKxgTTĿ{vE$~R٧?-|M_X/fj oF@Ʒl*5mn>;ⷴWHCXNCJv/,iYq sci2 )qHG5O*ozo'7wy"?g`?YK1;ky|p}G,U>zҬ.~~Rg ޟ|k)7l=t 8ai_Ƌ[&oY@ꨳ<{<3_sx>'4h~1;]1e q ½E+_#}58.CiPk%g(u_kLz&r%^I@7cퟪ]Ftei:fg֩:+u"O;*n9$~rrG'|u׿+>뚜|V{ {,=ӵ|⯀5׿? N׌t|?:Il X,-jD'$ɯ q C 6LZ#ݷ`fȦ)9$l]H #55o;?jIĚ_xsǖgrXRG*~,~`U?3)j*Y(FMQm^^&Ӽj?`O|T j1WyӏݏW_E[w%Sh|:1skQ($ n$s`cῲ6~8TҼA<v,oXآF&s_4_=;8`h^)|;wF/2;*zG3?8k麭ums\M3TB,ceF$\dd;q]#C?aPWTɝs֏\O'#ᗃ_߉ti"6QkɄ6Uik+|{c[^>=;kM2_]8W#QD*I mX\ߴ_< wdZL[ neV"V2$ ]|k-_=[S7g,v@2~>eK; ,_wJOتz?2'6W=ʸy@G_A}O+ ݊l44!ORy$ŸaT[k=%j:\>>"'&VYC[9yvl~nzVaeiv6vp,0*"( Whu$Š( o|74$ψ h{pdlgdh2?,}**T8M.`F9N1M$oƞ /i /W(:4>LZ;dh#HC)r~Jx7Vѿҵ8-{∳4Z}Xc_//K|3^M?]gʒ TC5dfq$!T=ξ#?hTy ş'6O6)a)(uJw]aq4h5R&\ҧtiUOih ?H!m߈JdS Hq/ Wiڝ^L=Ѿq^Ke W7 >tT](rG9LwZO%K=V&TyjNʼʜ q/ V/ƑX+, i?, k*-%^4_nmP|rLZ#M/yjX%JhC+/%HkpGgcjy,rOL.k֯S-XiT"N0wO4YpY-%U߼[ZUl\lk7w_Z+_ L]ޢyިCSˋj$d_8] rO, ;tI5:?]CY#zeS pgߌ?$W3|9! stFPzLPl%V)oEno<;SR=9Iɾi7I-˳Yxg֛798<6F:_ܤ8wKѴB{ =3Nvmm{U h~(Gt-_Mqwv#WWQP4] ʼ+*S~g +x/_EA.}]e|PcbI@q__ƝZt0]+]e.\.'` N^g~&i[}NCl|{W FS#'.9 ƽV^KL?9Vůkj@!F{+}Ji!c d lᯊ_EQ𶫶U-H@".d=:r>OmSss%Dž^cqjOX+[U|Ld^/bh?-ℱiN.]Tpx~:J*G\9uS&OمQZWKGB5e .m2N +(QV"}˳,^ Nԃ[ROkT~`?e N[V^n![lKXf2}C] w'yrwY+I SY8?&kㅴ|9-&8ν>²μܞ췏<qZT%wъSMZ5Wy.Yiv}/E~^Gڃ?Vq,#uaAC$Bp(ri~k2_Y紐3'gt1RNѯ4|x_dUPqX)|5>hzM|T3K]gQE쟚Q@Q@Q@/S ඗ɥDB I> Ѽe|qb{7a%ˉ!|FHqXbpJT殚d'*iӒ{j}A^?%x_^ˢRkZ s1 !e}ΰZ[[#R[hTJ4{w>`SzWg`MU1̓*E0e]<PUsW WDnwqθyW2}^ FRy5'\Ҕ!Fe֍iZޘVK$cbVER9q5 |#z^Womb1-d11A*2 ǕVс^-?9 k0ҡ'%Uk_~_qSgmK U*U㇨w)ьS|wn1Q=ckmni๷i7^=9}?Qv9&8 _}!sM>3Xk:xcEмI.|Ct,L$֑8@Cls^rU\:z<y^E-Tܖ/+^ 4Z0I$:ױApZxWD֮gym Js]5ŏ-{Oz?Qv٫LlCɹ/Z_k~!u oW}}ى?Aڢs5_5Yy.հѹ$=҄4JVt= ?/Wo|+^Q/干5MX1tL, if*pJ,.h^]kIfN-89 $ :Y?$ij>} DnB#(fؿgX_K? G)K#±M^я,1J`C$ /jZݶ?ȏ1R*uƜovEEjމkVNUm5-2RӮYfYbe]I  [ᗎj[k߂#2x\(1߃[ji8x'5ɢbr}$A"xm+CK&v" ?$ qy]>飊=7Gxg }xCoMC;D[9.q9+<#+s֛hF^_\,Q '{<^)~,|?7]xÖEm:o 6nSc^5~6E<'rog?jo_%M=i|Bg͑[K5'hkH[E }eG S;oW//@ Yʺe;r򼪤E9S_=G <+Vj"Wld ǹ5׷0HڜuᯊY=𦅤xk.- F*((((_L'v_%kګg0)蕯jQH((((((((((((YiEΡX[dc$%ʟlOn,> b *Rpy@"36d_hگV׿h/]1hdZx3K_*yT!=A*>֧e_AF&x(uQ7RKޟwm>㙉j1.`rR!²x`W^"o Zݕϖ}b"dgsk? >|$R>ΟY>,k+enx ++yLLL|_/x(KwVyXӷD9-@GGkecn ʊ^^E20L 2HaAYq?Vu&7vm|o~9~Þ(Ze_:"l,sA|8jH |N.!{v'{Z7zu}#I ]4kFl/x.baI#`U o$GOk7+mWo&I'j[; x^"rB^o}//Gn~;2l=,mAb ­/U{ӥ8YZ< oOx">Y4k>=Z9rv ?镔s?P ѡmy1MJ[_o@/ev$  3#N+{Os oݥeLթV Iʣ}_][JaO'c=EWԵ E|+e'mc2ka{e@ϊJVv?W1XJ/4$޼7UxL:rY5JJ vdSv_,H ^_O3?4 }+ ][VD$o,ܐA,xKi_t41S4۬ǘ{u64Mf`HEO>JՓ+n:x_l}_uF8V1eRUUmCR3,LR+:s5^M1?MeZ^gX,~"Vm TrYLɎF53ۏ=]V橨ƦtoDU|0* ~ij~Jִ=WMMBG e-[RI)e @zN+IIwn@9¬.pxз]\_Oح/ƿ& xX?gDPOa9X8b7˾^0W5x7DŽƋE834jZ[K#e<'2f*|c[o>(#Ź_RMV>>9XkZM9ޣ#M5t~J*AN0)2HHf%ԫUQ_ X7߂:ψgx&krzˌ3ll9+FۺOt}W uX+m%fpwOSc2iBkGJ1h0 Y&A/ x"𖻦xDu2{# 'iC(+; '}]?hȋ|p"r7έXSݒݽCTSroKVwO*KuxǺÚc,(zr231/CxknL77Տ1yɯz%|5S}|#+?6br^X|. 3כqϗw~KV<<_ѼN k͑EYQok^IN$zNC#K+8ly`Zko<-߇z|k#>ԡ0i 8  dJq79OvKN^&bX=|Ν8ɴ49;&t4y^ghjA~尺ybU%zMlC0zLy~դZE__ŗ$~,k n{Wg%ʸ#,ǯ=K&?ou$ 㟂:boq`r;q_9J^R^vdt-R5%b0/朝?I޷)}\§tHG-=;t杫d񎩤_ͤ{&+|ǧQ^q'/ٗZ|j۹6X,^ %e\k85iSiҦWҖ,o@ Uê6V%k[ ssT,ZP'䢺7*|+O5cafdb@Yk´}; 6FGg5 4@ڝAbEs4W=fk'$2; Q Ic{9l.k~_"/h{ 1u _&iS2c٦AeG'Ƀ]'&`V+M]l=},>e'pf;Ҕ4ZRvK);YkjǬ8':raii6ϫ^bK8u@<תڼ'i!^Et_[Y]o ⸷K Do9(T>j1RUݟWͩcMʶm]5t=-ZYlaڼ_O?FcKJkzh 0a.8`㓆'?E,LK~%XpU9]Ӻgd |/:[KVn'fbp~п?h+⯄3|Mhm _iݰ6 RÚpIm! HA|Tz+e`yFwDp־0_Gߡa_H&' Goi-ꮼ/f5/̃Kdzl׶u?¯^ gfeԶ0`.fvUu PE~?w߂Ɵ_'ě:wkk5vJ R3/#x@ox[[|GB5]ŵg$J3.*&kQ%iћ4QEQQEQEQEQEQEQEx&;/U⿳OJ׵S`(EPEPEPEPEPEPEPEPEPEPEs+ ໯x^9[_N#@{(,Dz/cW㏋<&x/Po.ڭH,cd30¼8`v>9θN )u>JP_ޛWd}o/~爡 Y6M}y9xP+Eawiue#ח#?$[c!]KW*>q< &届OPe#xR+TD4XE @+?1/eO")t}\;"uލ5u:ٟ7|D_#Օd6½xA:jjtGy6XƼU]蕒肊( ( ( (^6GP d_?/FaOqd!+K2ۦ ǃ$h&S(˥VtfMh/W~3: ݝ4V>$\iښ$Q1TbEn^gៈV#ž oYB=?HҭFv{Af% aF:fx'>^׆_b=zfh(v8a0S#5_duS?}/Bds&u4ױ睝lR-_3Wr{FM7W~?o"8NhԫJ3**U#s?ܪjOW{oz&^ 4M}E5goU#Ddu~VXu3|A'f{T;xa; ,m|]+]N$B蚚烟.<+57߳{OM"љGnE 0MnͶV 1(YJ{猣_+a$ED$nf=cQJOO_??uae\E^4#;MF:TIZ媕pZ_Z>oh7?me\)P7B2r Lo4x///>ul5C/R_A]fZ։#Lf2 'JԛFl5?fk__Gsgsm%וkr3p$Q$r7rW5' }+nvoFsQB8*4^[Is^3*QwߒjVMIGg+M׌f+^' 3z&s3]ßڿAռH|[J!q O\Is7_Ut8o%E"9G g*{\/ :NϪ~/*TK=i:vǣ(ee9 B: ~4ukɾ-|,FVk82Ɂ,Vh}wf+M3P>hZ2:gZX\\G25,6?{Kf5=ڊ(B(%?Ɵ eÖ[=J^0 ꦾߵ}%BoܼF{$vƹt T+ % D[EkɟMy}hB懬>9vwEokƶ>Iu{''uY 0=dXt٧iw?ş[Xg8ُ^? }z~2u߶7^iqo8^a$~n]۶ n#$v\{b< Nڒrv~x /O ؄N+NqR>k'o!_M~!qNe< < 4x{z z}m*G8 q1UI {~/<^|-ՏҖWY>&6w*wl቏~t_)>#>iwmʹ<9Zƍƒ\]Y@-(U6HSp',exF+}D$bLFy%IR2?~p=+4S6cٙ5=F[ VpR pOy#|S-cuVsHswQ{`G*ɸOaN(VOMQJ}#7s~=(8ž<-H#˹e'uhX^O>"񀸸ӭy5 E&I[2>QRyK/iޖcU xsh~*lu2OeqJcr0H#ּa|L~ 5/t,>~0:X bA134^\ _{_x b]?G}Ǚq$* U?~ʾ:ּQkzc{7mmQvU½즴c{ݥe}>ù ?m8TԩǚJ-N^z _߲'lz5NJ-aaU{tW  \Hze|(f&/_G!EPEPEPEPEPEPa?S+^^+8ɄND{U6(@QEQEQEQEQEQEQEQEQX"Ok&bӴM.K:EjY$p$9)(QRHӧ)IնIwfCmg-ıAoY*ĞŸm|.>1Dc-a˧OOBc oOW֣qkx-mIFPIJci,َ#/W|7i[FOYaA袾]cqٟ;O1T1_Aڝ7U-e%֝=?dO|omp?iA`S,#&Rs_|?xWE|?Z.{+(u''y$ݢl(`'c&uĎF?5J-VŠ(L`((((㥇 x^5̚CQ&U;b9@ v[RNCL0J^zW4y\iCjH!T0~n+ߵ~|W;| ŢI_j;?v1D&@t,~K m,P15I g⿍_4O |3k+^_,g48K^X;UU ~߿?h+ד@NoIklfXd.HTVy$g M~__I^+տfwOjZs& rxc_sV2JOrcuSMOij |Ad'zȬ(O 9@?v鶚DkЯeM i!LErZb7 7H_C6~uVU=."帵`G1?w9u7hTYh}Y5IRf߽(rJcR˚>E%/[}O<K-JΆYd#$Ȥ2 =+2 [o xHđ˪0ڬ̏$:|JAto!ַk֨hiPD;R\cxHdin-lfY@"+NnӴmv/mҝ97-t-]l֜l͏g$1)>{'oX=_^6iMCn&џݝ0}yֽl}DŽ/Ihj852}?iwxwQ.[f]D^?#d8=F2 m{W+}e@EI`9!ȭφ~9t~2k2hoxFc-ˤ2Mh0!XdzVzبJ-}g3U19d5'did(͞g^jV7׳8Hm2I#TI /+.<>ѥI/>')n2\patˍ6'ldSƹr]bxe@^qq%IJ4,ŝܒI$TiR? 򜩭U1~IU֧yo|:ml~] H$cuğ~w 88&oX=YML驙ULDܞ˲]KH$zƿ?zkzO9k'Ѱ_| $_^i (O ( ( ( ( ( ˬ2-oK@FU7(хX]^osL;))}ҭVf(e$w >h?o!_ mmXS{]E!ˀ0f r2p@?Ko?~eѺG'ե}wZFj&ZGk_E1>=Xx6Kω:e֡1wnnY |K-z?'㦱U5ދI,yIR+q𯈼gi:PulǼxoړ7 ≼׌"j#HՆڏo4Oj7K66rW#dq\CxgaCchֺlI+)*H >%d}~.֣umo.܏<{⏊> 'bltmCWL& v9/<=I&㩜M=Τ%rz|Xt&zMy:Oq~76[:Ӻ֣|~~ۿu#ᦽ[d֗BDf23#12|Gg<֚='ᯉYɊ37Q*AOX濭xsAW_ Gӵjvii,\뵣t< -@,'|8o:M/ɗPkyRI^ΠgiveMF+Sߟ_>fX qi^*ME)6q^2׈Ǿ[M{.~H/$R(xjveem|;º1`&5=?ѥwEFq_%:ֻS7ӯSBO8H1U,<+.3g]ޘM G^7"Tk7 Gk'ʼSm u WNnyxDPn1G)KIP HcҾ?DֵZV/U0~NxTA⋋ijd[].U׉AB>H pnkoxMKXzqG\Yڣ7W.*HPM.cȨ)T83Q=Ԣvd{FWK.d&Aɯki_7K[_[>'XbE˳T>c=O|Iqk~4"lAZF8)+"UfO%z1_<չkYhrLΧnR2+ʲJjU۩/Xlm 4ڦԣ s[H>9> mO'L5XaJ\Ckrue##J(?Š((((((?~اeVWp ?bZlQE((((((((_\|?C^40SstD 79?[tok)tW#z) Y?oZLox &tQլ.rZ5w>8|2,xEoqA6B;K8z5S})⼻*/$-ChWcFQixb4!R,k+Q݅eWo 85/+藈aVE];dMI'Vq18H:]Mu]c^!=VmRNO} (c(((((ޯwwwy~xǞ>zɦF F0P9\uqW^$'?5MkNkc\Xo>lmU#],?#ZUޙ)uk6O77t ɯ~^?mύ ?#TpPoՀ#ҊX񯁿e!|MCoå^b+% avBA]4*_tGXU,@I#?CiSOb4i,\g.1XS7c8/(\eUKyIV$}CB𷅮u&n3-4_AԞrOW!*DXHo~x+1_69>?h"$M}7?$x?+a[qQIU1tʜ`V4.DSQ<rROSj|wZ|ii2i7o6h"#ݢ d9 H¿ڝ_៉/<4|?u |{|Cуһy%^_8??X (Ky;Zs"8*CWs_'RB]Y-gPp-|@_;_.E'nG^}wUӟ+xuڳi8.>>l=7㇈"0atÀ3br=Gq^5=x sE{{tWFapEyŔ!i4'??AׅP35b:v5佴W)VckrXce:WRwt>)a4>]7/}6vj~&~/wҵwSs7S|lj?w\/POv𞟤 me.߳Eppu¾氵^wP]x']> KnW\a2Euԋ7v_e+}h_S*rBmuAdks7}hϭcľ/1.mbFӣq&7Uz_9nNaF(IWmInv6<Mg2nKėSz|A=~C~8k_(TƮE=IH9drٓ wέm㏊SYĚ:nrc-f,c5x rhu??]ҥ y>daٔ=k W/>!G[[Dgo|#HJPe~I3_y\}g#沍Z´~|y+|0!koZMQIaH8*8? Sz_Zњ/躢ǶXWza(_jX~п͛?soD=;2 HRkmZW4{yCyM~2t~^7ÿb mqX$.q"! w⽒iԍHEwfxLF;~tQEYQEQEs🅼iׇ-]5FR5 8Ig xxk1ŽVlky8^?gyn.ľ-,zV=:pr)N+ > !\nzn".˧|Fm)b Mq xh0^~GQoO^ g_Z0v-&$< $ڜ_yÿ{M>(?N]onmnH$o\}ўAu5g%LJ ^cQWk{NԣuJ>zZot=~Ѵ .Fҭl6p,Qu=S޵hIYS qP[% (cdDTY#u*!+|qğWρ|3O^iwV)Cb+(M|Av-[N-׉|)+ CwR"%PHƽ\^@ 6YxOǘh~B S|gmlDqkgHG2BOT9I\75o'~|@yuR] e2$bByi2¡9$ujִBPu e #08 9c Gnڗf| jCarr}+SU=k1C6Ԥ*V꒽ߣv?eln#69=,E}QA/k^=Q_P~QEQEQEQEQEQ\G#3ᯄYg|03@RQWnbaJi:ݒl~,~Kij^'N`E$32MkT@\IsY?78ԭ=K3߳ڟZQpnѿs¿9"]xO %; Vpdcjte?>xWI3H;Q +Z88A=eZG<oU{Ԟ Zw (ϯ ( ωߌvҧ>.߶1rJtP瞫5=DJf$XSTbNs/p=>{/kw `xہ-Z.0?d6E%ZgxMAQ˸sd$kt5π|mp ]~YЮ"YK %99ol=NԬu >&%)XA 0E~TaB~ο>6X~iCWHI''z9>P񁀱/9eܹ3 ژWA)8JqI_IF~~YĻ5_>%!-xs˒>)OO۟]&+awzor38$W?<+z_,We./+rp ڹ…WU28u-{;;ìRN>Yw0|3o x+o(((((((((((oO K/jzRYG!HgUrKt(Ӿ7|b wnMvqt$f7 !Nꖣsj6zr.-nYbOfV*5I3eU^^6~s9%v]ӛxoX]]|McUnj$⋯rh1Ec)rBq@oڻ?%H^ҮH(|>% T,kuo=Ojg]S)zI&>좾>,xbsboq_t? [+y=^0=r: MT?ٕe_BӜI~m#?< ڞ3׭tpM;nHCoLh$W|BI˧|/>6v)?g,؇쟧'?4ӭ7Ƴ0Hg@Cs0`l50oiQH8Pp}z8s\t$50[9m{ũEl'vwIjhE~Eynyc_AEvR q傲>)Yfat8.}[waEVQET-"*J6FTGA/4]iLl%ia 1#vԆQп-x⎑M |)nOYY֤-B2o\̙PI?uĿhotO<xu{KĺƆuAekA8` $ S_~_ ?!xV\:Iw;4qx^E| E/ooz>0j;_.>!~_<}ueu_ ֑9t{h1HRz/Jb|sx4?ս b4T{Gmހ=((((((((?~اeVWp ?bZlQE((jzW+ Iu{}p*rq@诀g;|]O_ʞn댭HNRF}3.d'CoƏ.ZvL葛Y_ٍ}GoxX?*Fa ;+O:55.i23E$x!Tb:Ʀ Kxgea*x ~xIm.Rd)nv,3|7м E\Z$n=|% sN[/վsN=UQcu.T?Zo-[<:u1Xdd g,xu?okFn5iEիA-Y/^Hۏ5O%O6㫮xsG&.["U |oĞ ;ahSh/"PAo0VUL@ r3Q5(v]q̰U#7TM NiڄJŘE0'~v#sJd_v"3k}^gY>1W[ :n1~K{^mm/,;(o_CEWOiwqcnwc $Z.|¿o9G? ܴҠYo_dL(UUk15b~L>SGm+P|mp =ƿJ+C˘YN+w{]wcLU 5iv):uME+h`+?- ( ( ( (^[Zl27]̱E1Wjx;Vo x*_:O& /2A"9%kt8'.]Ͳ ` \qR2 R+59rR\^,ǎ!:4Yogjp_WoIxt狵ɯ\[mn F M%*uOAoX̰SV8&PJ`w:lV6vp H`0ƣQ]1J(QEQEQ_6g8WkqxxxnI#{+xB,IڪZg3o~u9tOYKwk E$x`ے f 8Qٛ Ꮑ,vm}N , _i\ϓ[]}{Ex/ DxS~B񅇉"4w[BbmGſ?7]kZ?VӢ kD;HO Ry^E|O5[]i7|.nͤ:X]EEKE-xFZOSk\ߊ,O4DFV6YZ5=QEQEQEQEQEQEQEQE⿳OJ׵Wa?S+^M (Q@_~~#TΏi6.c8!O؅Q$G#toᆟv_M崬~69mЖ#RE}Ag|\;5N&wngr8"$Tўqߏ/🅴AE1I wß^4/xbѤk]+HKkh9T@,}I4EQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQE⿳OJ׵Wa?S+^M (yG |)Ɵ|eCCck=܀WSO|x^Nд}>kFOHUX|]ۻ  NDӋf=N߈Eyvw-9XxzRQ+sr^q&o ~2Uj:%}ݟWNJUo)}M**km :nxo-{#'UuGt2Gw&u#dV<4IX I^N-5-3EN:0ć}՛]1XȣWk*5k󵼯saTRSEknyVψ1G4 S.],j1agj].q>o?3/ cySgo$/5"C5_FRzp}+DW ,Ӟyb#d6@pGwUӅJ^'fk{cY39byjҦMBz''o{+)uzݼ/$24nBc+s+?Q |f:^GIJYdOQ\߈!:/GckzdMv HXRV߳jJ5OR7:>jz.  a nH8",/.-ocow=fUm2S.F2I:\u7,j6Z`̒-yma@;'# G45Lk$W{wKkrCGg!xL;9C{&=}tXXn"#Я]ImNnTy(EY)5k|C=کW?(g,Tds975;0Q." X{Hkȼ_yj"Z.? !+ Q$`tx:F?1[J.l rő jTa?i{}{{3So|qvr^6ud%$d~aFT;gڴnnwnּQ7>x'Yjnl5|ax Z&uFytn=[P?dD3Gmi&ieX ʼnϟ< }^c<m7Hi6rg#M5,Ԋb{/&},3 NZp]W.^c+# ( ( (>N^~OҖ]Cx'zeeOAhy?X#_t++Ʈ2 ZiV{Goџc/vTѯEWyQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQE⿳OJ׵Wa?S+^M (O_6o~V1219vW'VXպ>_^;ۼ+u5j] WC cUJrNI>siBKas_Ք+3[RNt^Քm]J?jTm#TN V/j5ac+kUrMK_6rUI䓂qVY-F yђBAQihcJ%Q)#Y.^W'h*hNu*pF> ]8HS*PVq[kӊ dxW.g5q~%I=%]v55_ލE}XQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQE⿳OJ׵Wa?S+^M (+OKQ}J5;rq0Q <:nq舅,e!+ ާLW^Z ^j:}@؏ڿ+gzß烞1R⬯JRtzwk3\&ڥ^;fDH^_r,{߽{~!р}+]W6}B _ԡK<.r˗ӎqW]b~ '[VʊJF'S@9fp3_K,ƴpO~)H> (Nq{iB vRnݮIj]Ox7;Ssgp6RC#wn~ kN ʻ~($$~<&~(|RvPhmᶗζ弊7{Y%\D%K4U[ƌ6_viŸ%kxmWI/=ެ(>X((((wgii_N&Iz)koMs7_<[dzN(=уFxTW4VIg3gqTi(?ýϐ~ڗgM)2E21$n6#*)ioIMXu)C_ه`fc `,|R-qaR.cbGp+Iө;}>w!̱vd|ƧNT=2tw_F}EWyQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQE⿳OJ׵Wa?S+^M (Q@.7o$>a5`>'ӂ͂I&Jn2]Si8?iث oˍ3Q/}j]J_-Ej,v(ᧉ޿ovƗ3ѠYC{z2 ?Ǐ >"ij֠ 7'='":)`RA|Mlj7ߊOJޡ^iY~o.c!Yx9C ” ӧS XF"'YVrNwivY->ֽkcGsId;pz tX5wk VHb4^+-@SsfRXە'."7ox[b5rG-\oș>bE~wLJQ}dK.J~ZX𾒏IFm%zycCV <7E5ZI#&A[r<<.&J7lK=<M3USݷ-YX4ycw//M𶟯_iφCeM"O9=W~8Gׇ7p<~\?LzXV`gnsIԊ?>ǡ?Eou~=?LMH{*bxrMy5{։Z^IJ&+m+LrG<.;N9K+.~9|4SfkgUP Ѿgm# c<Js\[X`^O ID6֯]n|9Kω?/-WC7ip)FڌQXYn4W^6L5_ܫp d,VU(|X]\'#Z(B(((((((((((((((((((((((((((((((((((((?~اeVWp ?bZlQE(((~&.?Z^ӼWME1qevuv4rr! )G<?`4i=.b_*aH`覊M&˧Rtfkt+kN5;{V&Zƫ{~[qv㻈YYM'RnTUTE@H>ʖ/)z+֫J擕.VLl6Oӭ H-mmX4PUP j+c ( ( ( ( +i75j?>!^I}Hm7ZRR{ f?*($'ǿ_RT kᗃ#4 =mWQ;fR1A"uVQ3H}uk}k,~ <{/?^'}o.x}[2E ,^MrAG_ ?ࢿOM~9K|T$/ʒޔ%gq+HNňzoo5ZַLR崀˟dE{lUk?xcRN#RM+O[ի+\ߵ­gz_u[fMˉ/qk,yI I|oxNvq hzi$W:g?ojl4XC#) iϒ buP@U9T7Z|Ҹ|c3$SRw.02riݷEuo M3*|=i//EmL޷\'9?ZZ~.|&I0XuE FbDO< M L Af>I[EU݅LEiЭGܣ pڼ(%'+8;]pIxJ9Ɲ.d߫\߷AuyCwDtʢKWR@q_Nj8CY9eLApTAX=묚=Rv޹톜$ktρ%O,/]E5]T2H2E{Qww S?.fƋ6q,:_'~bX9ctr#H236[ӿc/y@+(((((((((((((((((((((((((((((((((((((g0)蕯j?~اeVQE ( ( ( ( ( ( ( (l쏢[hsY㟊Oŕ- ɈYB<{w[rixGöf6;#љI8UVMehzno+ ,h*+OZ֡K- ^-# ],YIdLݾھ#1\D}"xx"XN+-'ꕗWhhԴ+\N3e+ƼCP~"xAדMs}'6na72G;qkDž6,V8M&۽}j(+S~'|E u N|>pռե@3?V==Mg!h `N?j[Y3N*p>l~n7X9wkv=\Fu#Q>}ԜwQkV߫HF(ZI"(&-턧!W\xźWh~%tbX{Eզb9$% $\zvĩͭ[Ah}nI{(^xeA"Kg,$oo`Еh$iIߙ=,M{1$"<W^x:׆7f&Ou}{PK?82] qxg6"ľS/xᾅ@З$& co =/rJqsKg%-fgkj{Kȭ« <8_~ ^V?Ʀضg #X_:E\#dcO4"bi" NNUɻgUJte[mAT}饣aQE}QEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEx&;/U⿳OJ׵S`(EPEPEPEPEPEPEP|e|~!Sgsvgd2c7fMw|K՞9r_HYnbrED@_-~"uޑdM=.cctb`nkF-aO ]B_^\N@.dG^>y +qݻxXg<4gVJ&&gh~"8#8a"*"@A\FIsy,I>lwqKjȰڌ7SCu\jt?O7'K? řUomV|ۉ\F9 $NQmm{*%YTtB>;Ҷ}M'4_ ^)D~gsz"J̌޺hz߉~5-z]+ u8UlLzzGV:);j+qMyN#N\fmw'm.yO߀!ñI]/Qtdm,N2ppH*s6/iZ3YGj(QwY>f-q8ѫ L1sIt9r/)+ZrWWիkmG :Whqm:_I]7VdV)r 2E*pF xK^Y鶞"4/LK9- EY䕙0^Oo/j] d!uU1IZ_1V*g)(md%++^ǔ[Z|zTaxwT^ zVI!XS$m6ҹ?G\L^ i ]DKC*}QXTi$,'$ɫCZ{y%g?GN2QQRQ[shr:jxNNfOscwr,D>fVSN1L}SEQjZvwl["*yWIzp{U*%r] -X`~Z~.<2׎j(d@3#zlY[Ojuc[xQim4:K| \8||WocO>+a$) f +񍱶>b'?/hW"?_DeG\?>JZ_iw6WDO%F2 A )Kn> V)]Fb2"cs0*|XwŮpLX8_nhGW{SjISŶD1{{{+Ư jEޣ隥jZe + `Zw3JѼ2ng?+')ғEf[̩8TOukF8EHux@XJmbISbfQhoE4JJ E*N &ȝKQ*Gӏ„^md~PA|cRW(+)c/tQd.>VإVSl[j{jor>hSigNAO+|£,Бj:ܾ5t1[f@ZqiDُ{gS)O3#?:#@:d՝ߥgڞxnXot.#-Ä$I&VrS5Kv/>-_ޥ$=G7QX[`a,6ǭ~'Zu/!$\?̶Zhg~hW9ÿ-m!ZMQ]_H:|E8S&O:O|c@Mд}> :!8_eUP>6Mxl2z.Sp SVQ]I(;QEzaEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPa?S+^^+8ɄND{U6(@QEQEQEQEQEQEQEiV\,5 g:0*}hQ@-'/ 7'jB;֮L~v\Dǖ 3_xb+᫭ae,-s#1U_|7º~][apz0 HFkG_W֭lse++N 6 iVqzne(wfbНv``/ÿxI玼-KxcA#8ZOυ53-1eX\[c3.DH8'$1t觅:Y x|ppuVWtd~;|1hooLdžcO OUt:Ee[,qf?@+|9~$|nִ\>n˳\,r/M69^/ E㧷qNrFk3ODg0$vrPq_S>^,8dVrne}~ӧdRwi~k]VSk0bAT%ն ~_|sZP1pyv㝎#8e)hhdRFYH 93|\$!:Lŧ_ gM7Pzs^ncEsxFIT"Ƭ>]o>\N>}l&Y+c¾a6S5ZAk&,XF'h !?C].?C.K_"\mm5wO|ьj`/f#᛿%`"9?4hA W=[?6񕥉qwQQ6ٻwL9 +|?Cያ=Quqwb_u% W88 h_J_⦵|_ K8Y{^S+iF2 /oSnUzcb odXUo#ּ( $"4W I^_>.2|D־+-D 1fa,HG9CѼ3/4= MK]?OaP"@jQEQEQEQEQEQEQEQEQEQEWC&|OZj4Vkڴ6b?1q#=Eyl_5qEFW`v$ߠ0τ L7uUfmfI.X)dY`3&> ++\4o 7U]?C4I.FuԳ#UTI8W=_ֹi ~*xǺB໚m2#qv1q@Ey? ~izm^+Ea.f.],7+d8?Ut>9|jV? >(׺|K-Cw%1!]35յk=/J{۹(mE,HB*K@(7gß2|5t\K1X6YQ^ӿRi/XOx.a,t/1T{W?ߋx^GWˢ^x+nȚ2wkۨb7ms፿X6'-]2rv۸xU5+ԵwQ4]:&Cm )gݸUUz@TW?v7Ïxo>[mGD#f@.2:ᇭgC&|OZj4Vkڴ6b?1q#=Ez%x3 ŞN/ ]_%մQt$#<]MW~&eBVqo&\Ÿ+NGq^_>}wᗏ<'*2=Ƈt f#sĚtUx+gm8<կ~ov^Ɵ0EֺV {h9Xщ;QYڀ=Š~߳ßWƟ~E#iZma t/0eܬ29| d.|87!€|rA'Z(((((((((((((((((g0)蕯j?~اeVQE ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( (???'u}U {?| Gᖩq_hwL|Uyd?$:Wʿ\f :H_xS~9o ՍP3"M[8Y4$gTD\*xA_e:ZO=sbWQ@W /&tt\x>;nqrˏVf5Sþ${sh/B453%Fkܜ3]vgz{7>O?+|t?.^h.Z->˿iV?1wK}i͙$ɆEݑ}c&?o̞4 ]MImZuOYrO N ?cQ1Vt8Ϙ`ݎ~7|_< a]M)@ci񆽯 mׇb,n5hUQ4 Ҧߴ[Idko?Q-G;տJS&3OwHnf:?/Q۟-d5dqgֿOѮu]A64=[js{ଞ~>?2Ԉy ɼ3X.tkT|m\[F}VR( y,ojOw,|QHn(r$rrfLdTwSBYmWU#Y`t;g࿏?7l6k}nnn/.K)t%1?ng=ծ^Y\ñEg8e+"[p] >~۞"<]cwmSXKy$BYI1_ؿ)nǚíWEtEvɱqxKdQ_VrߢpjVk;[Yts&l;) C_ xR~ߴr+۟<T2)s-@E~?:GIۗMm//[:ioDd/nחS`KX9\w1> ~"x4Z[j_  E yNI6ymݾ %f?|+xr;WԮu[[T%PIv噱=~ fğ(uI\kRp@Z Xf8!۵˓;0dfo/^)?Yek./Sw^,.ey *K_J<ojڦV$|%k ; @$ '¿_thO׾3V}=ƷEЯΫzw߱]Ӿ!tټuA7Ƣmh 3ऌew^?WG֢Ʃ?Kq&t\@̛]<ӭ~~/Oⶽ/7~9}'2kW|U!L6גO?"|;oŏP|5O;_ /CRPɪxM6S]-Goݱ~_ :]8>܍\dg5tPx/ÝkUM\:hسđ̤d.ˍi ws6[_6:u͔რ!EǞT6|3s5E?W՗B~QkY@yZ(>?Z=oo-3{{]E*2~Q@|/ S:~|C!yZ hnk$玕w9c:gĩOrΛ0]Ͷ2w( OS7Fk_KR1-5 &e?Jb_ #ӵHu'u6SJҘ˾.~XקJ2a_'D-m<? OrhmVӪHmxՇ$|9\*/%%񮧣ivX[`X4bԆ h 7 YG|4:K| %ӝ:['1_T~ğOe1xNV/]/ ."4I+~bWerF?IJ3Q&s,4I㼲NL n%r8+{ײ~Ϳǯߵ" i%׆5}^m y'β 0yP/_Ⅸ?: yߴ~|}8zn_غ?/G_xoUVMb<Ƕh1" ` KU☼kxwEL[a_.61lX ^EQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQE././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/figures/SCH_5007_V00_NUAC-multi_nic_OpenStack-VLAN-manager.jpg0000664000175000017500000051267400000000000030525 0ustar00zuulzuul00000000000000JFIFHHXICC_PROFILEHMSFTmntrRGB XYZ  -!acspMSFT-BEP rTRC, gTRC8 bTRCD vcgtP'lumi xrXYZ gXYZ bXYZ bkpt wtpt chad ,Infodescicprt( curv  *5AO^o+Jj%O{ >sY]3lk  { :  h ;  n^VV_p?nD-~*: !_""#$Y%%&'v(C))*+,f-A../0123{4h5W6H7<829+:%;"<"=$>(?.@7ABBOC_DqEFGHIKL3MXNOPRS5TiUVXYOZ[]^\_`b?cdf8ghjGkmnloqsY]3lk  { :  h ;  n^VV_p?nD-~*: !_""#$Y%%&'v(C))*+,f-A../0123{4h5W6H7<829+:%;"<"=$>(?.@7ABBOC_DqEFGHIKL3MXNOPRS5TiUVXYOZ[]^\_`b?cdf8ghjGkmnloqsY]3lk  { :  h ;  n^VV_p?nD-~*: !_""#$Y%%&'v(C))*+,f-A../0123{4h5W6H7<829+:%;"<"=$>(?.@7ABBOC_DqEFGHIKL3MXNOPRS5TiUVXYOZ[]^\_`b?cdf8ghjGkmnloq?@ABCEFGH!I+J4K=LFMONXOaPjQsR|STUVWXYZ[\]^_abcd'e3f@gMh[ihjwklmnopqrt uv.w@xSyezw{|}~Ҁ(8HWftÑϒܓ!0@Qcv̢8VtѬ (C\tĸ˹κ̻ƼyX/ÕWƆ9ȘDʘA̒<Α>ОQҾx4ձr4ؼقHܠi2W r4j!FN?_,P\Ag  N 1FPSOF9* !"#y$g%R&8''()*+d,;--./0j1@22345o6F7789:};W<0= =>?@zAWB6CCDEFGHfILJ3KLLMNOPQ{ReSOT8U"V VWXYZ[w\\]@^$__`abcldLe*ffghizjUk0l lmnojpAqqrstiu8V9m:;<=>?ABC/DCEUFhGzHIJKLMNOPQSTU VWXYZ[ \ ]]^_`abcdefgzhgiTj@k+lmmnopqrvs^tFu-vvwxyz{|i}Q~9! ܂ŃhQ9!֌lP3הuR/ ›wP*۠c;nF˫yO%ͰrAܴn4v2뺠T[I!S~ģ4SnȆɝ(ʳ?Xw Ξ5nѲVӝ <"Luminance" = "0"> <"Gamma" = "2.2"> <"Whitepoint" = "6500 K"> <"ChromaticAdaptation" = "2"> <"BlackLuminance" = "0"> } descDell G2410.iccDell G2410.icctextCopyright by ColorLogictExifMM*>F(iNHHx,C     C  ,x" }!1AQa"q2#BR$3br %&'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz w!1AQaq"2B #3Rbr $4%&'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz ?(((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((`l/o2x,-u]dbGC7gKi~ݞ6|7xOWk俑}sܘ۽B\&Ae)IE]&?Zi(W3jĈAU%Y.WlC&Dm_9~W,HDv! k2U0<wVZcMJ;4sNt_B4=SLfexe]I r85r!y9ukᾷ7įAo%S9f-YtSҿMfO*OCOn"-mMj.CT&r@RH^.2?OVW]20ʰ9zՒQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEWY[36چvG\p 1k#4_xWuM/o[<s<:Oo?g'qJվo4KQ[ۦdgj3@~-Lh_-9guβ>4D[& >2 f^&ů7%ԯ.VuDsܒ3AWgo~|:Ÿ <?B;r?Ff:HͣE߁?K >-߉Uׇ dIX>lFGq^߃|a h:G (M;MX ER{Mv^H՜;ɝ1_Q~Jߥ|D9~p[wA_^?skV-.d=m29%䜖n%J!h\I`$ԗQMœC%UAK"JM5tbՂ( (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((/[4_ xN_h&kGQK{{hWG!UG W+|O5+~̚-_/ٓ7JTR"p1H+ǟ1ZMsAkltV6pD'6'/&ڠ___]?~>h$LᏌvK/&dA\n,yg^1|M#M~WaiG!Hwiڂ !lLMo/٧ౣ.fO$A/uyt$p; @~>y'ҹ*JS1HGH/$QJٗs7y#ҴG}(as7y#ҴG}(as7 I'fndԮDoҿ{}Nx jk{3H?~Erbq4/y֝)c|Exb7Kۃy(у>n_5/|Ep*9X_Ve%9gZqZ.=|>wݟ_m_aj+w+ox W_e_T¿#_Ɵ (p(((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((+;~x_nY؆Jto$<#IScԩ% ?붟u.O|'|A2Tb-Ž"KY 7(,Os_5*Y_`}O?G4 F6ZMИB[ݿ"Tx#H * {7Tb74c8!2עi5ǯ_ZJGpFē1[aKkou0'Ty>> \^(̶NLWQ "WIʽ8r^.4]_*Wi+ ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( (??뎙p,ZLQv#v"#<i_6=GҾ_9XgWO=~w'<#֓E_!ԑHl n7Jɾ7/NAٶƃKkE4e4q'Vc?Z4j.D.i&A{VlMiեgt|- ;zm&rk^]FI7hdym%Ho(eF `l7`FAK5w78:wZi,񭷃u9<62\jKze5 @PsWQK[QѢ"S8z;736D4g_ |,h ݾA&f|!8՘_[/ˤY#e•ʓćP=I\%vVX[O3 ׍uJL޶ʷGkTsNkТ_,ŹwegN+^7u8o./]#U,,=Ԧ{^1%$S~6,|( ͬϗq bHdds^_Vϋ*C'Um2 Hb$ A*kYGW>t>:Uˎ?a\e|1_A\}>zW~?Ɯp7깶ӗGdKFTjVgCgGfYญUD%YHA օz'Q@Q@Q@Q@Q@Q@ǟ\@m?QǨWǟ\@m?QǨWgY࿆LHѾyk 5!ѴKe" fdE0At\uS#>jK'vhg$hE<#Ќ oZm_/~.Ms麎hցZk+״XG- 4w:5ٿB-_Z4n`ͪC%̐#!U@"8QR,V'Cغ7kͫKuV/=:m&c:2;ISIC4zk?G}O9M|'ֹz˼k**/yU^tjo?k΍^> V鲿?/0MUK+>Wi+ ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( (h/Ǘ_!o<>mtZD!I+"xd =%g3 (-dycԴHU@OA_ˏ)'7."x?ş Lܼa}h/@o:!cq }ğZ?;:|8X9̐,i N1_R S}Qibp84:vqi枨U?K?FK]_幌x&Un\ɵU'W/u-oÚWUf#=ySeU׍ Zwpfϵ )9+>? ~/~%\3BmQeBq/2xw+ɾ%|q]Gƾ,Ӭo-.8,+`ddogmڴƱ07<\M9\4 |OŰxxOL.VRnuZU8Op\` xjĿ^UbtFq)pÌa?$k_|@4GT6Hib@7Oʃ$Nzt=Gҿ#7ZߖV6vVM x# H*p}m|*$1'xsb=<$Is@`h> KuSOI}Jنw4A.a"XJ8y}p4F^hvr? ik 5v\O_,FzyUj<^g+5ڬ4{Qj=VkyTR#Mgyڏ4{Ru\F_|s&F|eTZDa'꣭s5ZGVxÿ[{?Zj?a?+DŽ?k]{GnenQl$+E<\ÒO_iJ%I~WIIuaEWQQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEq~?O o|cź<3h?}jk {HE."x+ƾ4~?gχ-~9'dW2^_0 {t̒ڤ.rk>7_xWu /ny~!כ|*m|WV~ xTem=OzcHtE p d`eV)ɕ9l^0i? c?xFR["vs噔P[xxU|Qw/ڮt ;^Ic9 ~9Së |7 ,8i oCnֻ+ڼH莺t"z|-x6þдh6Qv~hB9h&=L״K?QKy=?[ǟ%5MfO#᷊0hww2*ι1 >D?j_ k>vTK L: v,Ic{W%o ¾=\v4yˑqԆAIjzb~~ӿil&ƛAitFN7Qf|'ľ K|wgV=V{kw8 1̽CG/P*~SOW: &;bȊϭJ*/uEy? >9|4 4;[ٕYY1d!~sЍÿ ia-l68A2`teP0ye?wr2'bB-JlkuHz6} >XKi$ܥ'z?,qsE$㴒v4L؅')FsэWV:n38i-IA3'''ēiڧW>eݭ,ȱ,۔H<'Ѣ|L3uvz=O*ۇ/eW=';vˑC*M6ֿGW~#99**@k|Aͦkzua(@=G#|ͩ|?~[u>O:-/LާG2 l^_;іSS<ㇸ7VH8K{O}->~:ƛ M<+qL/z~K7Ş  jZv/}A_MwESz,jS }=w;m kbwzt_ >1vkmlҝ6Iͳ$rU9;2Lc'(8K滯pO*t1RNQڍMfm.kGY΋x7.k:E~]͕ ,2+ i^Y+ WýPd7;H`;/jfoS,<'^K3keLtܪ?Ze?(n?gۗs=>+Kca/3]pR9cb_g_~ x?O]Fnu&ҴXgX䘱ö=kZs5i.Ċ2]&pzZ? egQk{!!pJ1\w5_N}rWVD:v0yOo { ~uWOv:?w|d4/:?N6يUʑcŸg-6\Q/؋[Klk9yV66#i펣߆_~~|چtxzkz I-0 N@<*ʒ9͔^5mz8Fo~3__+)W m?B<6\%t ^~'e>]Ce%1^Ǜk \#j)>Xg')B?_ T7bKHOޏM|\p ܀g /M獼a nr\6sSCf~>?k߄7"A$1NJI{vKG}Q]siqo4S&|WSg1sTϣV'~fhmEpPti4Gh|c؞'ԍT'w;)"X~.#*`8YE~X4yϞ=hǭdy֏8C5G=k#>?=2/ĶWF^Ig´RJ0Wdɨuǭ|G>{_kW1xwH+5qǙWs!T7J>_]{C`DlVz[ +h@=e͟?i i>+?g$Is[?7$28澫Ó]~iiM|/\(O \i}iĽ _; *I&#TxjC[d څ쟯 "R?1~C_Fx:&&eOH-#@TvZ5t0ǖ<%7y;8HD$PU>+b((((((((((((((((((((((((((()I6K,Hݰ$z }ez }3Aa3^:[F:9 =I1?i*߂d|jt=?}vd!-۳ 3/|do04=M vg`N%n2Mb~ҿWcQGɠ(~>=';AWKb  ɉNbvou kb=+j HyG`SF7k+ڸj&񤺞So?>/ᇃ4i%0ca ;TW^ա{Q{Wn4+ڏ+ڴ |03??471mb0^ D"p&ЛGď'Golc/!m5xLK' \sI  h>. l~n$/ vF9-9@s_(χ<)]\h>]>сe?G_Tr? GotMT*\Xݭg#e,KmʂiZ4$'=X989dC/HZe}l⺂TG*7uee`{+Rp?w]Ha8B((((((((((((((((9߰Oj}}nLRRK(tdU+3E< ggH| H9֬bn-"u#~']_A'nYyMsseɞ^,|r7?(<W#xKBnVf8 :甐du!5tُ q |?-W)nO'&f_tlK³^,fM|&Vン/זW{ٯ~?*R7Rq皿QN˚WJ|kҊj.QBh:MX\'xe89W .j:ewK!W{>A ?Ӧ-ӑw|H3ybBdzdd H#yE< k`psV*niʟ4ܟ3kt>'c3Yghp `#|׀g+z߂|_KNQy98'O|qFéfڵK2D"v;wpB˞x*{ ;[V}^Zv7s\srj0F.q'y59ˢڡ {>-M]`\$+$8H׶ gך f{md$$fc{!䛛<1'$f7$ g'[T4ϋ? -1ɦ.GmPp<63^+$w+y= X-4}?g > |w񿅭#W-$(~y)>steP_KxD RI Np $)O>PAe;>#ɮ֙Y!d#^foHo2:j&r5QcQ!B$^E\>-M{H?[~JM>Vuǿ| Ϯĺg0|;[9+k_n}w^4g5ox_ri\Op絭{ s9ql/: 7%d>[btY;HDW.]5U*'44<ͨ^} ԤLf+Xy9ܘ?g$֫(!~1&s zNXEwV:q1WwO_ 4_ x3F|g3ݲf&I#1z}C ZQlEJg%x/Û|? ٌ[M[§ 8cc$[EbQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQ_GYO~7z֣r;]:+)T+wE,H9З޳ L!|'I  m2tWf aMg >1IRDh`eQcPц9%f ~>j:ټ+kMvgewX۞G5+V vx/ oh.nJmD yVe(._np lGOaƹ)̑`s$qҿ@{uz?,9;d1{ѳ޶?u<>F>z6{uz?`F>z6{uz?`F>z6{uz?`F>z6{uRB?]{úck?S+*WiP.Zޑ/;V\l4}8~+Kg3?ړe[/nm U^I>fcI<-7<[&.pp,ZC*ʮS9 J0%Hk1،eD wF:16y$ɯ(?pI.r ?ƿ6 u~/ VzM̊6DzA|W?rp9bMƄm,MOwh'[m.W%i?K -Og6$wGq %U$( #_8$ev.X$>%#/nrco=9ne~wo{g/<Flc^,M8a$-y4OkKcq nAԏOc i<^ׄ 3Hy'g]ƒwA,iFXÿO~u&j+1j?|?p?l~3nA%o7N!Uvgp t͌3WNKɩ7ݾ/母^>c8օ=+Ah{__ri/Hciz߀d]=JZvyBGȧ5נ|x>|Boxƾ s6k7Isȶؓ8>_ x'វZpՙ1>y.d_TzsX6+-çJ/W;%|3sxS36#K}Jd2tGlf="i !PAzT' e~%|}C'˃N7?S7;g(CmjM$p;X^8\25'u{<Ύ!Y;>GıP>JiI۽$5S_,~߶Gڏ‹?ß%"IxOWo+$JzD]@$GǾ Ѿ"|Rm3V{y @&#sh]Obywm7Wmgm M<,h1>h ]SHOMP fϽ~W'?BJ V"q q_QE/KqS|K9j79Q`-5ϭ'_?emsR>׾,x)wlu.`>Ll>x+|6:U.5[nXnۑ-vD=PEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEP_Q >?򂏎@Zx#Lxxo>/NaV^Tr8 g:ѯ8:ޔڭu8U%$cwFKzݤi??>x¶qJ:E6P\)f C_?oc4I4EwQaZ<eOI;9/g.Ǯ_g9x#V:|[quk. F#0'8Q _??| 3O ;hGέt-9V/ޗ|A~]W-4oxG61^5^[A%Xݣ*89^[myԼ֗J qtA5_.oN߇|>qi7rH̐EUNT8=v/N׾ | 2׃%axůugmo#y*di0I%@# <5uoُςuK;OY2Kom.XT22|Ư ~ᖭ{ 2..ṖC[.mB$Xh1x$]#? x'x+Xxͥ/i`ϵ$2!';H7 |yejWT^FI=2'>-_-c/;歩rcM ,rY ]^sOşվh>;r^.nn#$,J@rKv4>Y>. Pj_(- Zz^>I,o'YECjbxmq1.HmsG+sxF=K iy##R!` q^π>'{jtȵ'm=^h$ګpA ]?ধ?؛diֺWV..[ŤuE9&PGR.5%gsz&/|?}lI fgaŅB_#5{aC4;ߍ|GixG(5k+KV sd]* sɇU~*[nhÙ'4UKXxYel?fHbcAO?4jcU5}SRyְ͒%gv/Io%S}+MJogDuJ~4^ּXy -θa< t&_VgpK}Vť10*׉3E|]뿊?gN{IǾg#y-4%]Vi[|Ez|iLO>+Ahl3+_[fs܋"%)y#xξ?cSY^ u~ӿ4;ڗxo f_:k?cbP0j 7.ck^0ԗOɴkKfI&)<"8}Yeo_Y⏃~4ygėz'u6Ag@E,Ӭ!@<o??6/|Q ߉*MMYi ]A J\iTD?QC75>p?w]Ha((((((((((((((*9e YgX4/$0UE$x//Ok ORj"{y`>̓_yV~ܺh|$i-fY{(A 5SWcms*]_:9OdO{|+7A1e[?OhSvE~56[[yG|%cm}/@SJ]R+!AB[xX~p\+C$fszS^8wK _JU_gM?5v)WO exv[d;F+ը t`N*)tZ#4q*xeYT7w)7&ߛzQZEPYkѵ3Ot{Zu apU hQ@_7 >*go _Jq4ܽ)pkkk^;%Ӣ]'̞[02Π?y@;+a$SGg_W;5]Y/t /~պ%zۑ8^x7vy"*$S)xexw'o k~Cd-{yvfWo*0Uab wfKiez-_pDPp})֟[/s%n׾{wŸ_Y3DGPc /uχEo\N4{ɖGw 4|.BW;ŇOIY^i֗X]<c{8 |.a2_"*xcqMgNE>ov:''⵼Gk?ɓUKEӵLլ-5=:6Omu9R0kMINO8IJ.lτu+ZQLjf6I~Iַ `fĖ ?k jݵXG,ZVmK?y'|\?~x;?<[j(|A/ӡ ׿ ھ H-ץ7_~{ÿ? m|q:<)p܇Hd4N#e|Z~Ԗ>$~ڮ05-326inWkH~Ve%Fo7>|j.|O_ _DTtxU칌ڸ~<l_oE ?#0YMIdnT}$&A!Tǚ?xo 9}eR=m}\*~ ~6Z6}^u`&eapV5x$G _G66u7vƲ<.%FVVA5tBQEQEQEQEQEQEQEW_R~ƶ_^Ec/o[dnvj u wƯe_ToÝ~S_Hn ˹+,X%vҟ K&W>xs~1_bK}Tb') a@HrگE[f9j? w/OIYit2 %(B0[PM&$^:^4 >|"?fg+}f'zC00<*~`Tȧi V%_ ~~ʾ/߇:Ֆ5Y}ݘsVY Kd'7ec_qƻkУw -ԚNUJ7*w@?ChQ|XD.C``ׇC=Oٳ֣w7SX,al˷?Sia|K|}/+ǀ.[sxD`.#IZ,leڮ_8@0[}U>j:Uƃ^Xqu\H%eT"5(((:VwjwvkMsuu2(,@UIos ėa?0`&k'L[P"SPc??|WhՌ{=V- u' t~mG7ǎѿZd}Eߕ5~ph߳'t7]k:>\ݬڐem$K_{ø?eogBo>)o;tx'Ě'<7|5=&.mHGPx85W1~߶g7?5x/il;5y6eDnp70uf+\YGң;mb&q'Cd9oAZQҺ)ՅEx9AٟTU3T5Yj6c7֗W 4*JЂAա!EPEPEPEPEPEPEPEPEPEPEPEPEP_Q >?򂏎@~VZާm j54M-:1R`UG8y/O|!tVHi8yDJYGT a @E'Z?>|9_h0\I[N%LѳC2c7VS2 ucEU89|y[f𾽢xZ~[O4KDw3·EuiCk#Fã#Uf_ǝSUoIxD4FuFlqgCPY _ |oבx2Yo'JTq0 _4D_ĿCSHo-Pz+3!hOjQΟq{W6Z;^=wom1vN V8'qiS[O8[\RzuzJP4~>1T|hO9UrF;\z.NA5:8[_:kG-G-d Ϋ2q֨WEuѫ.]u5\멮zy5:s= ;&"K_Q4]UEw(f&/_Gnk 5}~|QEQEQEQEQEQEQEQEQEQEQEQEQX4/ xN^&hZ-oXa{:s3Oٷ#.6+Hݾi{וgXL-^jߢ uwO,xG/voEeɟ~Nh̺#brsHFy s_>2?~8:[Zޅ&R{f]EmHt aLNX{uYaAˏ$F+xo^<7A-lf=]vbX&gft(gs{1ͳ/Zrwū ~;WV'{?1wfA&rw\prr~[[[YiZYii b8a$j诣r&%[~f3Y%kebF+HEwնQEz'ƅQ@Q@M [K\)17VK4#veX y#q<'|-yYҼ?Z(kCRKkxB$"5h~4xrߴ P*ݮ6> m*[Ƕ1:AmӐ*=*o aKxFl5⾰k=-椹"5hzOgsow$ !:vȫj?'n@׬K-: +?K^jԿ&i2ʨYk?f/ǟݏm{1p<%X$ZXl*d)1&7֊5[Y~I^&QfH_S%z0`C0GP+N?i~߱ާ M+◄|?{x%V[Ha|Dh gO sj%5&2jZ7e,I@Oro\kD\-qeztt䮵-_^ڞyKDpUӼCΖK;xbl2<|r5¯e|qggqyoIᶅ.*J6v>HW3u[Z|m֕vJgd i:d᧋DZxx=<}RK3_HFhoC̪K]F=:>[?>a}`~ŏ٧?xkN ]^0 Mecc?u}x7Eơ"ύ[RH~U'x-jШ&l:4NjvJmY_wM5 h/r|{o:=\iL$2,I)&+O~wf:ׄPլe :\7P~gI|$Үlutώ7hsaChV2H"zC| 6 ivf|3}OʶOk-D;cQU_*U[s-aƸℰ47tsZSnCV+?V ( ( ( ( ( (?0>:0ɿĭ~#^YY@ڄcֺOؿߎ |Cxú> xvPxa $0ED9\7E~ Y/u_⯇5:Υ{ }J |7 z:Om>|.åĽ ᷉4>tY5d` !x+7 Q@^ ?k]K /*K4kdG:a*z$*1R|gG#kao o^-=GxO.|cW <'E~r~w(oxC>Q÷Q&X(!/ύ#G/76QIۏZSRuk%H7y;˨ූrQ@~5~_/i|kƍas,[CUvKR0>-5[Nt[TMV@GCD(;NG%E~$~~5o4v2FBP[`h B*«?i79e.b  I1$8wPEPEw1z~Ͽ ֝>lq^1*P5OO|<+x$ú?~g AH0L1 BuI>|a-2C%Ak6$K#2'MgWo %SFFͼxn3p]UEUP)H."M6GT)&WsGsZHkey?5/ | mx~)U`f6\lpk${zsE 3o~?N]~_[|K3{kky/4Y --4-{WOU i,~!K?#X 7*'& *k/${gW=Yr\z {\eRBI$$^kg4?WRD7Y#u !A)GaEҼEaѷҭoI11"@y W*~̿&i7]9ee;n`IlÂW~GB2RWF 5QEQEQEQEQEQEQEQEQEQEQEQEPQ| ((p_>iwQy?Y-e|s4>&=Ԟ(_\2I$mZ!iwqYO kOoo}j Cwuth c8ڱkss|\1 ?_.CmǚKYq. ^ sf@+ӥNI^ nك\?f?׾/:P[9$!p^m>R1e/PWM Kz5SaHҵ!{w9gqjmon5-y|uzBKVw,NrǢ1~FRq_M,G b>2r1O'Gv$͵?7jzLs.S觯>O?=KsG4#5sެ[Dt4 \otW]۟JP9뮦u5\&T~~pGc_Iu_= ;&"K?ucEdEk;PMt_! ( ( ( ( ( ( ( (~8~? F.S7# *KgeA o`A qc65wK¼q&5`4T}"K+M#馆I%#RI#TQ$Oy}t/k)|7#}UG ?W?Y +iDž3!Py?"z[yO~ξ<_l._Ylw:kqv2KtR wMݯX{>C;JapXi kӽ+'&u?j Ko_]CNC!+?> Ncᯅ&nWSa@yPJOn>njwgZVw~o[<19s7s޶?Oقf!T Ow5`׺<3(i>47|GC_%]k߶O 1;v$g>e&_m| |z֫n?X]NyonIT ŜC.q\7s?wH/&=}#!p:[XJ BWW»Y\C9bxx$WΐAt~| |?S=z&. T<׵Q^Wø\=-_˷w.yxuGHФiYEQ^QEQEQEQEICKn7Aw+$:U]'^V{_~+j_9ß\ ЦقyIfHWߊo/ |ਵ&[wiQQdE.V27<N{Lt]3P&7t[Xt}/Vяb}*8{P? )oG|X| G㟋nCj>1+值c WП|s( xW&Ὲ77!tn- v%JB9FPO;~/:&Kr}K i(@ 3ES~U -2KƓwhdlI+Egk  UٸG["7bHXppI5?n^4k~Ѻng8Bǻ"&+Tw &8_ڣL,D GvjW,x6Yiտ5=;< gtl~y9x/|V?<^bu[M-IWA$#@WǏ&!0kz>ŏ`3ۆg)uc  XeO {-ԿԼ;zo7ݼ-cjʄ#Q.V_Nnkr,n6_NV.q{+w[MlM+&ϯ> m)H!%@^vpKs6խm_[+ bR^-9FrqnR囒I);r#ῂ~XE|dP .G 8O=>x.Ğ&𿇭m<dc4݉T ^Iq\^_uwq5Ԯ^Yry$'mLK~//#ϟ}^vw5O*Y6wt5JO'O;YWgJzr6ʨ>bo oOỾ ]]B'.#eybv}Q_T\Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@|3;Oⷍ,JXLs߁yVs1Wg;>t}sVOâIuk;vL ;C1m?<d'z FH>!;\~R3x8*DTNcQ{>>losQFnU(;5 d$E_8g?%<yk WO1gEtx%'$[]yC6_^ |=<3xKvk};KX!S݈P2ǩcc$SW%JҖF i6g}ZDYj0*^GiyTyU.f#ޏ${֗GG HQQo=Gi?#1*,p=I<^{NHtUyHUe Q݊e'du7 mgw{s}p=k h~08A}g6໫6FA) ֒jlP B#"HDm Yx 5{}&]d$^^Dn >n m͑YIL{^7%μ߭wɘos$5O5btc{^o֏7Yi&4\ܦR+??hR\ooۣ&.~}+z멮zt7]Ms=ʭ_Q4]Wzɣ?',goQC75>p?w]HaC(((((+?3,%4 ѝk;lgdhNR**T8M.񡇦9;(6տ$vui_4f>0V_x{M=%2hy;*+#⟋^zZFv1c]nn;g ?O}CT45GQ=@,$'vV&>oQ_[k&tY ,̸hx,EOUپ7y4~ӿoBρm4;t -/  <gk|Mt⍡@VMSS_txs>=mLhml-;Tu'O$I$oZJTG^>cyO PYn }oSmG)##E4F`Ӭ㷉@WDDY#u*!)W**h~VJu'&md߁4۟o(5ƶWH;FtV~zkiw0C^xA̷۴PO@$8+b E?^#X,fZJo~ u>/yi' xO>68g Οr*,Is_h ~R>Z3RiA9 d|)[kzT  jiȺ~a\rU+-?GG ]ƚl8VyVY{[]c}qO~xIN?+J]=W~G l;|Ti?*3n[2vV-_oW=28/;=>{X%A' Q_T6e'(JX&}aEWAQ@Q@Q@Q@Q@Q@Q@ZO/ k(GVf8S_|W4m.O #/פ%u9"chx$i=:/B +F7>w+{\mU췔V_o x /,4-58\?#uePO~Cŏ~խI*.OؒP_~̟~3?xZKKXCŏ-Snsu|=wß & EѮgaRHrN;`W 8/pď> Y|UVɤrsn''t#85_ 1xZoB%.{b" %X rAY ݇RgYgfRm#e?'Kh6gVCna>[|Wp?uǺw٣obq|@A=7e9TAzڿ}GN} Kլ,M2#ե ,3# t`C)A̟I_s⎵{N_7ϖ+#:i9Tahk㓯̾vx~'|o%(;H!աԮ-ӜI(o]uȯu_"}V' {,v&Mb3ʒnq\u?X:}#Hp-4Ilb1;kI[_vq:: ֩քvxִ?E>x~ SZ-sH$o|⺏'ӿ4}Sbv;iuaWK?Tk>-.?Nkfr MFb W|,_ƏFO^ue!6[LQbSa`.8`M]~%GsxqԲ3ZpS~_%]h_n:/Z񟉿< [mR ۘ{7N<1s@5_q8x3Y Lq$h~~˟H&ߴoK[#VF8rc^m<^9կˈ_-LqHx;E?HeޯxkF4y()` T5~ǟGd~׵?:KF*H%K;e<F>giZVi igg ¢"pvR,] 6 +|VvŠ(P((((((((((((q7>Jv]졨')GSn}(OnZHN>e>n _(ie*Mڅf|"Gm_-Ӽ(-.K.YEDuReܪ xړ9wOCr^'}nӮtGc:*]m.ۉm.+$d9NU<^xDil|Wi c9}ʔ>ưQ|_#J}o**?wr<:Ve47QWuq]/kՇ,IsEٙ^UUj&/m,N٤ /C}?m$ědbNɬ S^ӴdCYD* GU.nhѼr>OU^u5rtJjKV?1מu7^|*NMNRrZZozxug{M8EU(f&/_Q4]Ww(C } Vxߢ>+8(((((((((((+*?PQ|'ZҾx/<aGK%-+?3gOq2c/iq:V=wSk}#Eici8DFfTp9fEi|\ \zDNi$5XgH刐FbTIXim-ᦽף 5H/q@_4H>-Ěg?o߀ht?Q7zu v[DwB@.VՕI`WԣJ7Sٮ"iK xϭ綝O3ޏ3ިSYsAk[u^cנIC]p?w]HaC((((_z-֥iuM-PTI'Kh|*3|y㨉CW 88"b#(H2U)V44Km9{FnʖhPSaX}?ӂ 3<qV8 իNM?> χ$[|ebm~i6@p xW?dϏ!ĺyWMe4[1%gP2F!|) %8Ao+mo>þ=֛M-c[yH0HAp˥Q˧}|"821 ޘ~p9[A&x⧁cIxLl [v#;%XߥwU /CF|[j1[Η5891.>>ǁ5࠺mޯ~>'u2 @r>x$n\R3QPaz>?y0sx72POtrפmgEQ5M7Zj:uudu$0W&?S$%fi (EPEPEPE0FqAvZLķ?FNۘȩI~BU!vKO/~Կ>KxyXqFs_(><~%­/Mm1B #'L206ύ ::Ou"?w nj]gLfZ{:nE'WS _~^7Cb<a3zW_ ?g߇_좟D?Gk\#b%H[{zjUF Z9sޗwv>|61 x/Ð`OkLǙ,/4uFf=z%PEPEPEPEPEPEPEPEPEPEPEPEPEPEPQU m|FVfFJeŜ0C'TqpFCRA7>JvKÞ(?k6>CfXvXN٣ nTr7M;;k4| *i AN| ԯDzURIi %.7#Znjl+M5?xG}R @0MyKHT ZCմ?|e=>!mod]B\{,W巈ਟd4 ?_VlyMܘY_nWU@Oڟ3o/om/U_,-5x̟j>uRap#)bKGlϒI$cIJcJkV{B  hȮ~󩫞Q;5RWUZҹk⮦ҹk&OsVWUy\VLɣ;u?cEdEk =&"K?w]Ha4;GQE}Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@|G|s/4 >9O}|]^x/:K_~ZWOE#do')Xj6i鲂FVN>]_{䏡Vj'H[t;L3EH]ry4#aR!GT|sGhSfOQawM-yGkҬu.{בk=# -Yp?w]HaC((]/ ~%񖿦xoBF!(,ǀ o<_o@fjmuw1\_6p>k88g f|>ѽi{hd~G5˨?YߚOѼ~]lypTU5iSswռ5[}*m yzvHp"S2gC' Xƣxٜ b7FE~?^[_ x/g;~R̀ @v%_=|83t(ki9cy%N_܏iMS>O~-Z|A2F4V`aPA5~[ZZA p HFUGtr&E~/1[ͱ.-1ZEz+oP+>0((((((+??¯zLXcm0,7``l*`9}Esⰴq4:R_#, f_^TiE<ꞏ?/?K+8^$n{"Zdq̱:3k?ߋDe 7Sfw8V$\}@*Ak)u~LKet<I*Qe_O@Ď㘪cQKD>梿t?o/ |YЮi8Kh#q q30;B-x'_CGo}KGFG΅g_E&{uQ^QEG4[< y$($wM+Ğ('sX.n죻1$GfxO“\h_ #`c{29Bڼ_ß>5^*Əj~Л緶@.|-Ku?pCj ˒Ym<'xW[r$|ASO ڕU{3-ԣC{8\f|5-o_>1^ڭi^HsIf3|-7 fx7Aw7l7ܑIO34u>!>úمx8*-U5(zGzhZ73kxKѴe"8}s'ZQ^I+#:tN VKdAES,((()PYkT_޷ۯ k2i,:جnsrc5Zo /bZ fXyFGQH@=;QI4.ε쓴[*'mSpN#<7Q<\ վ'k~ψm^Lded^&aA_Sko ~m^'Q: c# 8A(?~4&:?[-VM6CtH␰snT 6ߎ_5KP?|E:Ö-ݸ@ UVP*NEytoc5:{{8^;+y7*6k=/_ufƕ6OO3}KgeY$$3O EPEPEPEPEPEPEPEPEPEPEPEPEPEPEPQU ?ek`OHbxLA< pZFO)Vn+_H: sg:6Sx/]\KI:ﴺsϖa#>??4_U}ݿ$|iyiX\Ibgݕ$bNLA,v=+˵u-cktvΟi۠bt8/z\LS,jC>1ht_hW?C^I}=_z0\"+8(((((((((((+*?PQ|'Z^.Ӟ-\Zkl(qP8 Uws<9oTAoa9o k}}㈿iw_?GO8xxe}` $y_@x#ω:|kZ!x"3jpUӿBԢUO_$pTBI&}yM8{z}o?4|G=;4yp4|G=;4֝R6w`$iAuBJn-RMӭ2\]]LE Ԛ x&x{|u:z-Yi#3xgޙ į'매bK0TQ|&y~տNjl> ~Ҿ'&CYp8;m>TR J7&s_O>V6|4\G hN,Ql ͵NNMuQ^Q_-s~|M?+) &Uicl>'+2/# chai:՞pkc0V_f*7%նϧk)nf$/,TE%<$/ %Rg#I`HԆ%⼟7rƣ잿 7'\tOVBk5{.lohe_FV_jO kֲ} {q;oEq_WdLZ5}<#u2N^9҃դ nO ӾZԈX[1V.t!_ ׃"xľ]sg.u?280 ;^IFY|Bm{2jS#鿊}?W|H}V6ɔCv-0p _?JcVTg5Zv87?/ 1VB8vQЏy=b7[$͟i/_ H>MM4ݼa:rhUڣ/^O}~ԃ ׃~>8շ mVZxӦְt.T|&XӥldLhT=)PqcWC6j.UV>W.#'4hiWb٨ ju(/#R[ZcVx+npXдQ_CJ)ǖ ,&xuT%B(L((((~핥,v^B:Upz7yiiCF˚ j~|#]n'qdUwy fOBc|iVz:[ 9XgT%wܻFFs^k ZW?ľZGlI T #//[SiEZeO\^'b/F2_ xF}<64ff7>hُ?s~$~|C Si38BXu*fTǷl}K_o_?|ow(]C:\VjwV0DJ&eLLI ( ( ( ( ( ( ( +| Ήr(@Lohw-d]݀A*e*O^σa:~~iN*~UFLh((((((-s7j򏯂?$iKq_ZFO)Vn+_H:sL^+{=>kᵴ6ip(%OJ?~ EO՝4#_yֲ+8(((((((((((+)G>:Zs+e{tMgs;HϲUe# WZiV;WWWm vfT' |wQh>3^sm/4Ncp1uG0#rxc[#V[.RQrj=HX7?ϊ1Ժ&D]K,wnwO#yؿ3)֧O#~4 m;ⶓuDjb5Ьk6c2 Y&6?dW R*xvU 2C9:) 2skO0W~1xƞ ׾D~ ]6IJI9m鹹Z!G?UzN^s>5lpg=>/J[",G: Tտd__4?RQӦH6zHQO6݄rɅ,`:gGڋm|a=a!Pjz&;9It)8=.n0svq0|Cj[]Ccp`Jimjϼ;~>/(^oYOLH?Au5m]Z67G_(%bPC> xs^M.nc\GA9>zܤ2`:ן_pXY*xj>uv>"cV1Y/r.Gޣ^qa%]'5/ õ~u (ԕ37Z S4f13cl . ;PV>Bfڻe"J}}ɯgo('^]P]n KkW;裤2U1 eCIӿ׹e/ FQ~||h\ E5+E>#iYЌWkJ99Qs'}sJ[%[?Y](r @L3 ⸢U:e?k> C)5|_pXZ*y(kkF+S#ǿǏ;Dz;<5sJ; iRx$qܕeے +kx[r]h -"񎲄jsn]>|O~<_x=W@Nmf YZDܬSϹÏimxőῶ58A򛃘"bq:ּ#+y2U8J1nU+t~ߴNs?>3N0&ܠt溓Cic?~xoWi/ Kd hb ȡԌAMeoK>7iKQ3_ ?o zU4x~0!>gzO1^-Q儏%ߛw0M#^#S|5_x7vm?Ba֑R{ǻIk~`P^׿Sxg/мNZ^7GO+Y﵍3^"ґc'yg^{²WY is$oq'$y,9-Lbj)GC4i,N]rz.JG]O> 6ɰy8GF/w-\k>Qb|)/G8|43xvk6o^K e ƽjzuNJQ}VirTRR2M4(Š((((((((((((Wxᾷ6Αhz{&mAf?I Ii+ֻ=Z썡^D3A a]MՕ`dI8-tJRQWoCJTVjMն$~gٓ<_maMM.5]GSn;r19Xȯfx'=2][븏헣naA*|fk>x>;Ɵk"/xwqup'j1_Bi-çv6Q G G;59a\W?/)&/?Qz/y?z |=k?jWOh(~oU׺`]Yn|S2t> ~y_OlKmo+Pcc{n׶hd1jzoմ [MaIH?_hm%X5/$0UEI@VNh޹-[O{Tqy}:cݘs_|jn|a kIΗ%~rJ0PH$}GZe ٤4zS33cN2?Ib_i@0. r~pk_uҏ ?QG=Zk͕ Y>phGQ}jpkͼo ~!񆿥sE~qgyf8W$j& -Ϗ(?pI.S;PMt_!Ow?xosE;k9$ԥIH-QKą6Tum/';ĺt?UqI$eJ*H$q# ԡQϝ*FutQE{GQEQEQEQEQEQEQEQEQEQEQEQEW?aH[^k/Y5#NdȔd9?1Ydӊ(e>:׎ўּEŦ_K%67ts)*6Wo/O*''{Я`'0ݣ.W< u3|nZ. Ѷ8a0)yF j*|ou]O:3\i|q?ZqEC2pꏰ|ʯ4V[[=H4,Te$eIĊK1|S6 ^kyc p.}RSǾ O,5+U$.@=m_?_'S]y1ɗ lV.ѵ}c\MPAZ s#sq>1jzV&Mk;3Y̪Y OǏ~7L_t;<;jj[XgbbF9c8r14T]\L#NVc~x7. z/_PixΠ &}۳+KM2N%0z8,~g.②GKoeDt"$p_WxGX>)x3k%i-Xo,df8/6)U+L lmG~{hm# Ac1<"˖uQ9&Wf?#ʶGfu}h?#OBum;V,exg2cG"QjEGFkΕzrV*QM]4M=>K >>/k^/fmwnu $mc$W1^mU{\7k<,izH8%>\W~Oٵ5%T/a.!u{Wo^<{_&ϟXz_f/M(~["DŽ~_XZ׮Քivۗ 0F\ă?yYa+@CO^2]f>.e!h!6@&Tޒb1_~,~4_|Yi"iq@ ͤx2_>5.?a5"P?$d~.l95٫:u:Yxc _3i+5Q 9_uts}[iU/IV/|FkslaAnFkߩWF 2Al-=5NTb#<ϳzE̗W\<p$&>'~ٺ5D>h]\Cdfbp3 fq8T>(l .-g/HSG|(ۼckq"`ķ]qFXW緊~>n[qτhr EI7\p!1R_^*?2jQKtitŻO7P 𷄼3AODty[{X8{bXk/WoekiKS_g0ֺ˘xPavF+ `o¶s_x◆vOf KӔN8<.#7 ~lNdR=sC"M `FAu^rw ( Q? x~nxD2{0 AQS8Fqqf|EZcVetӳMlZ~RWM|OiХ Fzl3l??x3SLmA6aZg|@q#g5^IcwO^:_|9o2![MN E}g1L@;NPS_%_fe=˧c+atҴkKN[cn)ziv6_< xwJVE$$yC*fN~TlWJ#kſz[[^ڟ}8 ^<تtӢԓi{'RTӌ<# Þ.X$V]&DsNҦ!W{ʹ3M]lt4KFҀm5q|Yo* œH[!k=[`Ԑ}VЍktTG<~Get)JriIvk]DG2$X ʜrU%N$pk1oX> Mr P)ϗujs v9 )澬hk;So#o 2勥^UKWks,?.xKC>?i{Aw+i-]n3J9R[[N5/]fm[ͺ[C9ʕ^9O%㴚#PAMS^ړm2 O> }SJYe!|%$~>Sg>}gpZpu/ w[IOBҖGu> B xrI5?d>𶧴Լ6c*VE_-ϻ>_>~Ԛeo/,7^",X۶v] 9 2ș}_F~0~R_~< "n V#?xJ߱I?/#: ^Na Dq@9kK{ֳxűjnü31 ( _(ie*Mڅy'hmG[gz2RiV q. cP%IE]Ct {ό<{u hzxSkZ6d-.y¨$4X c’<3XÛkq- O5]C|mP xʗ$$vSSCm|Tk,_@'|q@އ0 DC̨3%@]⧆~oWk^dڞJ*7WK|ooj~5})־'y! XGܠO CߋbJq7IkgH 7q=p$c׵yjf[i|F})]Lj!G xԶ|ydpb?+yP'0p3_KjVzo[_΁ืIz2V~gSwrү>I$kaY =ɯ~6~ߟ  E'Iۦ[7LÙqlYMyï|CxyS&mZSlUF&e9?J"?v>{|}:z-Y|dhZ~KO'ծ- b[8qR%oWl$~1־xbcAݪ$DLyV(yX2alcf؋4''U񧕲[R\# !Tq0ym 25lMJ< GKnG7iı-׾EwEPEPEPEPEPEPEPEPEPEPEPEPEPEPEP_:MfRM7?5 2PV9_TP^CyUM :^g d;= |@>~H:i-srNs͵I!B+Ҽ;7DFELjt sk祥:z:.H9s@G,47I~x>TH>@Pdb^Ofωux)%⸺I!yM $8&߳_[>5 _t/g3g;Qsgʘ&>v3.g{ZZq],AޑYLA"M靮 %'s\7dܢjjklݏJ5RZ1ZrqS\|іQtf9~7%%C:Rf6&(Gp@۬H˹Ha5zfaN"S*ÐBȻ<[ulj;kkc;K+FwgP1s^q(\OTׯ4Y\|'Fqw Ҏ]XyBӫ9[U{TQ&hF1~ߢy#|fk8-CoŖ9# W~{JgocrM̪F?c$^ɱm]B?ɻ|/Y ZTVm*BPqnMrWk z~y B I\G 1<DAbxן+} / z~>!,3x,|-L9F06 ?gOxBK ȥP֥حLy2#@|oP6 _lQz18 į@|#S-R/K_$8(+`㌍N7j5ڕIJ)|g?d?*'fcOڽ6s&`yRZNZ0|z Q4_7=ľ6YdbgRI'I5yWn/XJund\m"b7{Z׵ݽ.XQE|QEQEQEQEQExſCXn,o ZɡTldya>[ FZi#sYxeUN e i&DԴ;UP2I'o77:7|q$%ZyͷnH3\?ka۶ElDmK\H>Sn Fk쏄?߆&Vԡ_x2uGl!U#;`FA\ϭWAZ?D~S.,(4:ר=||ß;Ҿ(wjA ;OZZ T9|r ~2-.`:mկqI|r3^EtmݟW5Gw{՟&+Т(w >_x/ff}GUX"R,"4_Mhq?? ']?Ğ4~!-ݥ͵!*ڢ((.~,%ơ5#^,pJ:t`44|,Sa_DQYգ 嚺<&wRU 53nh'UX߆PKϧOEoj=8RݟO?Ũ`K/Ϣj.V=w%E{Aue-1\[ʅ%T `Jc>1w&~nߚ 0yWaǿT˚ފsϷhO ~~nivwp݈˕cq?|7VniƠ};.-"?296:ge=Ϭ=:0nxTf]/(B(L|*ַz? ĀtTyq 7e_ώ+:+:u?C8k~%F[IwM4Z gkMŹL*O9qYs° ['k5%CQ -T{3/ugX >K}ׯ;2ʵ}ݝX\Fc1$r*x 5 _M⟂|>J?S :i16yw W|nekzN -=Tc}dn1V?G(Ǐ~֟gA"?h,c̆>]cݳcm~|4W^luU0)sh&z8$s^ULsp2K~oǾg+kEVIҒ{{o~ZEW~ZQEQEQEQEQEQEQEQEQE|[ο#'M :M>ksFI^;{kJ'lp 7g}*vg[f̣tg9GH//ms E*A*2:pku?Ӫj0==[Y>owcșTea.1$a0] ߄~.NӼ N`c͍{8?N.UĬg]8O{rܖVOysj5ߗM))5N.Q =kNdז0BUB5~:,W:G{]3yKk{$Pɿ5`$w>! >\7ۗN#fx LX#5Hq^iwWoNV}7H~%ʎ[৊Q)ҍ֜c pW~~y>ߥy@~aO!.U "s4գ|^5ٛ ]bKN(%Q nm丫6rmTLȥR0YSN8ߢr#DoOGVǐ=?Z<כϴя~=GeO]^&=> Sp9pF/0k_~:ǃ f?CX] 8$ ҫÿ^x[nr`񮰋>~0(H+gzz1r.?E/S}L´y5ZJW>,k >+S_w#~i? "[os|>^NtC6 1NYNݣc W77~)"~xoUo>]RO]*v62I9'j:ꚵ橩\g3ǖgbKrkC ?(Ksa]_N|#KvUݬG'iREʵ%V|PhA7hme?EWQ@ tI"hUt`C+ PE~M-o3?c QԼX\d^ܛ^ఏUE \+o"{_?|W O) i=[^sEE|Kv4h/c[dBr3r:YOos?%ϊRymi3'I#'pL?m_O舚J&U·Z5k \=;G sx97?ֿ=>N|l>*KXNd4Yܲ`K,Zs7/[BNYN$9RkAZ>vWwR' ˏ? y5A |Et-!{&n_ٟ<;^ hf0_j1t*z7LbI =kK&ޯקsXTi=]>g)I&x1t1<|ʎG>~۟PN+o_f?oY,t3_0~_J?O7+_ȧS٣YH9ա$JA|!|QEpC j8Bھ/0[4.x،uZl~_Mo]7_I&[JK=e|9_BHϥyo#0|J=iW.1 9ZUyܨ́韙vIU4Ok:ނOiߋJL;]N|dʹ2>|p'.i룿+^\itR[}7fGRWM2RtTۿ4*R8JNRTKe{oyPj JK/TtW_l|umYo5jOR0G8:!7&S96jI*_sJ:w_44ipV_8ͿNR5TqX)YB pFu"S}KZmQ4*7;[N@ ٵS8ghžu,5>+͟p`cW10JъB WV]ZGKf ZJO4Yrqҿ@:\f I>=M~ͤ&neãw^*d" -<6~ڿGj^-  Ι4i4GLH<PtJ_J!>eULn"X.yJnZs|O{1+Q(說P=tQ^bJQAAEt&FTE1u$;W4b[׵];DѬ3^_)cB++| 5 |9?af ".e'OC 4gЏE,\X[[IiUX3r SҢu#y1>௞ׂf~,xɤ:kqH<2cel6CUq_xk@l߉V?kžcQ@/& lY! uV~~Ͽo17[HeF#3c5^]|OH/O./Z.Gl[!dU_^`O_x|vḔc5R ot-BYw_WWrՃ泣 #ٓ kⴖm_>#u+:q;|v(c~Z]v76W,H9QU A`^j? >&"X{pd$q_?sYš~1{q09f>v?4Bp ɌWG l9єOƊ߳{w˂?\ 51]'W<_ !|"Owߍx~#h@MAfrc}? fr>hUqqW~.:x,N5iI>eN:)Ew^9MOy_? i }VHop~q^Nkgx࿅"MAƲyq[N n8`JA_t% ƍ[qj \!_8qۨg+cC+^\n&ի%ֶjiZg3y09~*sJ{(|Qc?#z2z8t{~!?e|gڋd#N$*NvF  GUxZ4uNOoy(台uq5:O x wF5E/$yWxY%wr/'5%9 ˹S|]~|}x9Em0~_6vϗC|Y񕎈ͧi?RԈ{u;̣~<FiڷĺZFs&qSwx0-Ѐw唼1W?1xZ2^jd[?a?i_(6x1y]߇|*bK_αy2r2aC7AwRgzI1 O.Oknu,' !i?|S7^9_k>7,qH`{ms?1^\s,r_  ^ڸjo^%٧ oֿ?k oyFP&Hl$~ O0QheIupe< z֩ԍHE=S[3kŞshZwtYZDAz̤؊hY3b~>y{7a.R].b6,wU|K_J//'?>6ZV m|B$*#tYNFp?sa;ƿ E-^$Ml 3"=  ~m:GO_şZf"ewb=w<~3EoZ㡙$88.28׽E;{NhPl>-8EZTPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPE 3]]O (^YpOMIdM\ϋgOx"Ğ2hvw\U]ePX_nn|3, C-n灵9)zWxCZiZxNmer^ZGh6pJ6s_+e*Ym};}Z8(YY~ꔕTK׼>V:_"mZQrX׳Xu%EjZ=o/<5eF>xGwvGK3|v8N5yg~FZJs /Gcxú<%ggGt6[X[1F=@=IO&h1QI%d=֭RIT')7vۻmSwğZ<{#žc4JK 8=aR ~7k.|>s?mǨL#g8$}9?nBm;=شcveRK/#Q0;}+_?~'#o]k~ی(ڼ9Z)"K *,taw, {+GKqP2*'vqmTMhՏrZtmQ&ӥIt vPl szl7y^|𵏘uq4mb3~B}9oj?3τZ|3ev~[.<<|u&7'5[[M==1MgDZ0+[bFvɸaS#R3^./p)ߖ-.m_Ӳx&RQnk.;8%wQlX)U>|O ,oC0T"t?~OajV}>;݌s[ŦA?> ndeUMG KPHB(;W Z%oOAi(kus"r2kl ͌*tpX{RWw? /̱1jʵiYsJջ+~T|+eO\\7j>'KI6Amm8(ƏxSŧ<9s\Gk& CH^[+ ʮ)ӷvxULz۝ozZWWRJj?٣f A?2!NYzĥc&&ѯ5{uKc0`ku?ߊʹ͎0XAs !j[xƺƁK[ؚ;XKPA~ 0^d$V;<k 7-IZm4G=p'̑R"Qj#\CUu> 8JVtwT)/~Kwt ;EwJfɶ[ķruV#Sm@gqg)Ӆ8Yf/J݅QVwQ@a|4|L>eZߡ<=L;XW&W`Os >_躖 ĈAU&IgNncC&Dm_/ S\i1M\y}s9+8n/xH.k'S2xR4[ .1D8F\5q_lOPƱZmd,As#2s+ڏ+ڼML鍒C+ڏ+ڴx*XMk:X= C_H_>"x1Ɠ-: {NC=/##,WhzTz9NQ+b((>/~ʿ~' WO|26&<+gl'l[Cվ'o'3MfW|noDqa_5VSn,5;]B Omsz+5)s|T~qsb^;-v}{Ǐ|)Gj`nD8$Q' ҽiwڄ&? 뱿T0g9Ar6 <PP?Ú'jȁo3ᳲ8y;ۥguJ/yx~=2Zq%DUwN^b}laEq >!E5k:݉_@glpvJJa4qVԣ-SN&(nQE|_#]{ q)2GM}?RI]6NInVAiRxሐEhgd-n8E2|ZiZ^#ZӬu}*3՝ 43!ꬌaE|?iT,*W x`QPau~8yөE-#˹io__ bkj[a,UrD?`;^__v:'c`Ά"$OOTB(9B(((+~*|lkc_ھ>%4{{xGh3s ʽzt`RJ)uz#+ʱ*\)T7e&ߒZ_)t>K!t &e- \K=_"W/E~>4/ 6˻IvyGW%c}/e9#'؝U9}dzԖ^ x:q|q%UNwki]Xu,jۗ]Y/ dƒWeNT7x &|)ooiw$3U爵hF'݀u6 }5QAkE0F#5 `ڤa{zի.56Ym8p EFy-{'W*)kkb KHR\n$xP|"df͡{>{2O]`z?f=:o;>|Aj8 ^شo7i]4W.+ǖWr.(Z]IKI/&?CGyVv yRV'=$n[{iHj1(+p88lܿ-rOjJյˤB>A~ Q53_e ᱬ{ mV}JbFFol.F7#5}&SZu-żBg8ʿ>c_i48G ?e< ˩73, |[N9i+\_dMX.ۯu-=fnFlleq$txj?-]?LO4P\|IƎBO`wO=?DP֍kmFd4ue kЂfaW~+BW}oq\燤w䃗/3[M;i}'+r9kbn]`4< G6cgsoOm_6]6Lv\)~C 6q[ HB( 8yykcϸ_/>:^˅s֏ލ'ϙ>_ ~j:~ Gy-Z%y :ܾ }AEXL -5Nb#>"l>b%ZKKI%Q]GQEW[E|sj)ӿgeXxM2g ((% !xswnRo&{[ʅI&8U dg"g⟆%ة,q~Κ jo㯇5ַoKhb2$=[ʆ9 T5^|?w>Sm.T(=e G_yQ__Ggjo_WMӮ hhX(!wc̔27Pgog⏋^8%`dMẉL+r3H s_?_5?[~gYKt["pe- f^~2>/|l>,~_BڅYeУԅp=-c8Dp DW/_ |6? |x;B᱃pc̚CLqFfyKcHos[//ڿ=Eܿj펣,I+yz͝#*2?8g^τ4 #>˴ӴDe=IO&/ڏ/ڸ77QKb=*w#?{kμGcvy$sB>XU{ukڵ-<=:(gږ_ݿY%|=`=)6ҟ O/z|um?^2rY2J y?Ed ]"մGMӬQF=وw5Sč>ﯵ~G$s@"KdtlIᴯ|#6ഽH.gW<'cd;hF/S#C8޸7(:s.Ep_~'⟃^/5!:Ffb>66`3ds]x4?hV:*ѸF))J*J0ӕ*RѦL'?_G^c/N\pOkkWhYϪ/3iomr'_V O l#+ƾ*|ysNs=(Z늛`֬1xĊ?%͇v{?*?&,W wѝ)zuGhږ}iie)TVRAWhoK[ּI2܈iJ7 `s"cja_t^T_d 金pcI=Nn2%E.gxXΛ =};5Q^@QEW Ꮑ>*OZw%b1){+:RWOt:8N  FR.Qm4Lm?e3]խ%L@QS]|rA<-קږ>Ǣ+Â)/$ƊYݎԓP \u`>oo^ ွ?{xٿt IZn%u>dU#Qu iޑs}ndFھA75?)'? e50~$:uƱIfHB Mv3 |?<ueDY#Z!,N> T껽Ukoek7yrvƥ-̛pO_zQiKN>ߥOPh֟Q㑗|Oz^d*j}QVѦe>ߥOZWUe/*c~ekX\!IaG*=o^5+FJQvh3G솱kτEi0"Jr$ટ osMؗI hcyb^Sş 3KE^#^#mi nW<}޽G!Jѫ/ /5 .iO^/~|*wo|+?Kie>\0 [+ҫ7]c6<^Z0lDg; у__?b/_--u;Mյ+=ŬrHMp $ )IncK"iI^N2^N.?&zQVwQ@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@ooQo-t>&U(QFY٘$p+k>7Ӡ\\E5Jm_AF[⯋iO~I;fPhuKfXogiHh0q٘7~ >|7|3g @nR85げ?[kΆ \)oo peX|׉}g^]0OxVoM.nn.rDj:yqg1:#4ؠmh+?7|m-փZ.;' q yK7)`ku:u~[cV cWVVߋQqM?# e5+_iiks2 *7(sdsR/??]<+׳J\>[-R |?k6z׋]-s[F{us@tfϥLrOz(P{yxԘrkkJ5Ug$kg&mcʾMRmզ9K8'sEՌ/ut^W-sbGyV~(|!ZQӤX:%4y@ q\οMYBo>jv|}܌;}z+mJ]@v{XO"6 ^q5?Hݯ<\YK3qpOm%-\x?O x$G՞r(3< Moȯ`_yzfWfJ9'0nߛ_^G.h4mg 91ڃ;V'~+LTXxh>}{)--}۵ݏ??pEU e)OTTiYYɫK~)xX}js\]J]z(?l7ğO=}^Zi\ii iY{_?| ŜڤvjH!C6 ``8sK&R&2gUf\UCr?QC75>Ozk O Xohtm.̝ۓ'z-}4;n{[յ9 \ Akn&|Rb Xdqkֹqr-kAڤ[?s<<@#ҵeJRDW^,e ܿG+῅o >$%^"Դfc,8%K6FH6N+vigwZ׋kex~*yV }6Ё;MjgUwKx?~P*?z/]ŜPQ*=taB(VsEo迕Ň:0G+s9TN9A =taw0O@+&xw^>+xxN|2׷V&)Wtӽ±$n220L208-j䮫jp6yM%a?-?,cݏy5|"e$b&ɾ ꒃn&]qmx1V?Jn}9/]^BHl^'Wex_YKk$M\3[k-&=%۷q5qjQP_hW~O?75VZ7n#*8W_, w:>eH\3?ZCG>$yuMn|2ӢxDHeaoQ p=sI{öXkUbKk)hfS]I} h|Ew_:2v$oݐw1e8 ~ڟ (D4|?Qm8z#98ٔQE!_dO|BVᅋ*|Kil[B0i$5}mEc_NyfxeԔOtG妓C_|FҮG'8km4v(WCw V s/Os<9i$Ѵ-IAk+(,= P3[4xV\QOەKƞy<=JeZyEZ~h'_o-ssUH[xyX~>8)*t /_q^o8csLDU}d]+I%(>|((*/}gPt({-@YwbTIb@U'<׼+ZYӧbHA4mʳƀ%5|9^ .Եݥͼ RRD%XdzY j>3|axqADq0Pe @rȯo&woZ3^0;<*BDMnjL7cbt 3dB1lhَψn!~Z_>/\\V֣6%2`nFAȍٟ߳{JПi-CmƋ=2ovqͿ#YեzDVR#qsb|>W/#o< zg|'|OZK|3Ŕ*9\/x#3W i'n@kN`L[ˤLYg(U|-񵧇#m1Gַq n ݷi_-t8~VwO'#nC,-粼[V|I.df7/ ^| kmXmW6䴑uyaּ3SMkPwښ8 2x/tJO x~k NR- u$3^L7RKp~<1lL!^.t;(Z;{?^KSv [jBi+5%P|/h%D {5I)p^zW>(x]WYII]/m8"H[ x݂Wsוx7mî+_x_vxDж02a ;ag+h|׳LbOS S?A7{IIt#{_M7oKx1H#ەΨ}j^0G@ulj|"m~+x#!Kƕdް8zd2*?$-oSq/mRe.e/(A~ϼb?*iDž j)fg D1Ğ(~6_s| оxb8=|#lJp:F2^Qox!:uRgʍ@ƃAɯ1khW-(y]Z0Y_qZӳkƒ ޣZ~ڦMcP5+uw3K,yfbI;PMt_!?,P$yL?w]HaxTvl9ONzϣ袊B((((((((((((((?/{/4kif >\᣸}դ po&dGuMV#(EyyԆ2k>q~W+¥jQhyo{ὦ+UQj)C?k=̭CѼ7m@4m*vig`S=ǿh?O5E SA:nzǛo.*C#`VWQ^I+#:tN*VKdG X~Z&8|/RZivH-ޖዾ8S8 G,qN'ݍU$ \? Co Pʁu>5L=9ck/u%=%(7'ozX~yU_äeB팮GkZ7m`=& WG9Moy|A4Ϟ(g?Y-_j EͥrxEtqF 00E~&?IO|:o~#~$8CJՖsL)63)8j5t/#s?0Qr (|#ْ?'im)?bqjVG%'R.֦\}vk˦$}Q6?Yr&.jg?ܑ_y9P`k:s<Ӵh7BizŭBĊ9$NsCJ]⏎TZ]G:^N%ߒ[+oAj?e 9~2~-xCT.O }Fn$#%1,͍ܪx5$*SG|l?$⿍>V7U&g+I峁Z@[IM۷3^O ~^0֝|B?Sou$zvk=T:DEdi8MxWUl-O [j|kyy]y9lb`YI|#k^BvZbef\ppT` uk]?oڋSI o᷃׶PLjDQ+!¿O_?kh>%agUo0ibkA0*&4R~w}' &׼cQž*/$p%\IrȮ.^֭7"~Zߌ~&5 `u4ETϔŲ#ܞV{.ir~ vuitJ.<WB;Jprm08>|:ֵ7㏇m=cOxKHn/qGnJ\a[q=2+*ׅ|._tؼG{/sIOj3xvz ^E{9v>sW/nQ5L{]Dhd`$2ީex:75?|>~!Bsg_ftA+C,=+kK<)~ßhMx>.࿈6:Lkqk<-]9`&16(r+~5goǛrX{M65F3$&M;|)2g.w[XWJ/mR:p;;+ծŽpړZAo?| g?؏Vos[^6i"{Mx,omf2\8;N8/oCG/i5|!KJE߅M6ܻ}9UVi@M>|rMǿxX^";8t[8]]]XC9%f#h^.HjAW# /mX,Owm-,"  C_yJGI{ǯ[X>*x_m%U+ 2[5i,dl>DdȾx^b-J|AԿeۻߊ4k?|L<3Us.ND/)y y&tR3}CL|WH5#OH>J=MPni9 t/zwoa]C“vCCyq <͂1B1P<"ޢZΛÞռ/.|AGY ~ŵGW.&,amdQpy$6ҟ O/z > C0ok 蚆W2=%7*`F?<~SKޱbiVMӒkm˫JpEEPEPEP_mSzׇc.n d~`dM'J"|Ҿ&~׈m|7 I+GvِvhH8 D~п 5h ucu=NEB22A⽃UtwzFXUf;;XfSّ}k?5 xOEvmp[1+ $)?~~^nM mz+; sjA#'.k>>ooCxN'[Y>h\@}Q@7>x+_= W0=A!Gf|_7~xVmX;%.q88*uz>s2JD֏RioV>Mgw]-#_UUXX@W^͵ݝv xf@"Â+þ,ο~-Muiؾ&+sMPwI^{^x? iR^}Q.b1v#&)x8Q|>(1 ܑDy2g{%~:G@OR]%x$d9U[i*uңm|=߆U_\j5-GMYӥkW 885KE~AxBb&so|]n)GPԴmA3J. ];.pg#xd]j}|5>~? 5H.ny}Q:[foY)*XyK6Y_(+-~ѿOinWzv!|+Vujœy;5YnX]UN v.Zd8yet$R $W?lx=tO.SH>ݺr㙏|}v#/\>xt*8N?/ٓ„VxŨ:Σ&&}y~O͎+|NW,?q3) ;޽E_|,qT_:>yٚ, __tSsZ6ɥ6r(E}`2l*<Ɵ~'xWLwFdO''x\! 2py5C pjvϮ8#a*u'RU]9IJK]-nåRR-ZsfؼOZC.O2)9~6|x+ g|1[\r}OJ_mxSm|8ʚKm9lAz<k?x^/78ïr 6/bř2sr.YR7{y[^uvapPpIQ{'/BT1?qmIN4ak%vǛTǎ>(*r|}k /'Ke[D^ /ubH,?Snd"d|=:?uĒ+OK;FzWG>_|_wCsھG~[|Ws|2fek >% &u57IBP ۸W=ơmic56!yf"(OS^^7тWxr_RSIծ4 :qn3Z]N:I=:/{oZ6eiɟ> QGʂ}Oa /!QW[5X~iؓ0[{?&ǿ+Ķ?j?k).$HT1]Ը#W!UiTW}/xf8ż={^Q>$$kۧwjsEdkR~xTXQP]Alt <3 xwz5G4X-XPd95|}fK'zs7s†KQėd8.p+khQ*jڹI0XUjtՕ}v (=`((((((((((((((! ~(~? n Դ6+s,)2ƀC^^/Y8_Ţi!+XͼԦD69b q#Zi&rҾ[ûT(/jKG?MןSS`[ty~W{iHYTg(¿M y~4_ um0ge3ȉf.vÞ?VKo |%O\_xSͱ2yaIsY^GqX\-6]cɟd9O7K:%Vzuoo>~?l |*5<=4ph'r`JO׺~RQEQEQEQEWAm{U;nHCoLўH',ɦI҂}4[|I][>|&?!tM:@#v󟕈tS_g5lqOou T|IxWn" >Ń s9} 0EoopA8*j|N,~E|C:|;߼<|ju+{k?HVֵu tk[sX_[.fɺfm$9 (3Cս7/~O-3DR&zbL֗ *\ΤJW^F_^CĞn^$,:`8ewl`q_>3a[uυ~_.;;{x'2jkVd}2Si/= ӒOcWZncI)liw{3*{YW恀 0 nn7k<;aw'ƺEӮ+6rsY p\`{xB[xL qXZ-92ԗ\m~ >~|^Q^ Qfϊ F :SUK{ʡ,ofh4:O5QI|Wћ>L6#DwK,N0<q__o#,&K:,iF99NKRTpL}s.| nUrڅFP]q36JS5qI?_)+#ͫ 'aJ}ygSQebsGk>tW)s=~fܕs޹ƹۯkƹۯkˬt~W;uk\ݯ._iO}s޾:6ҟ O/z>|Sv?((((((/R[>:톽cɅ][1Ԍ9W濍dߌm?gOkZŌYi4\ E#;,ywܓ!_PwA/Ɲ7CDIDќgPq_WZmiv dTxNo_h˺]Y@rHe+h^)Ή=&[n%Hpz؎A뛞Qq%x욫:6V8?Wß!5̈́߻@T&&:#~Z|E ]{Pm%xF}6G˜퉯_ă f˫db[dwtCC*o_~LxpGCM+S|?=~7kzL1]Y$C wVzi)UX)S (QEQEQEQEQEQEQEgjƕxvW+#KMws,QF=K1Rm-ɝH.RvKvh ߉~ gS]a`~;.H3Hƿlﵣ5ֻ\Iëfoow>}9?~5"vכ{(YyXPq2@8yɇ\ϿD~Y:|=GoJP[ocg/-_j&,p/3ge>͜ c xzxC*i>ep ȝ+׼;xvM=&GW{-R;IIRAqZPAp8*ja)'˪O9¼+,3Z!oR\Ig#˰5i6{'fgG.N֭B2KQN/ ZUc)[Dw{t[ÿ8<<>F29EF6rzY9%hu~E?ip>\[؃ʨj"4eiI{D7 g߱۔{S\K>#isz[cD,/.ZW~ks?co[ſa}A[Ӽy.d{BByaeW;d`=SD/{[*p'T"NM[Xzi&RMt?+mw}/ׄ id=͟YOVx[߼~>&έROlgY`tuᔃW~tlQEQE|(iE ;מ{7Oڇ.w5"Pϭ@<0rO~O}:,q^m.q-cʀ{LN+#߫~`>'¿y%ٯC?߲kM>xR儒iIxeu 4"52@+ xWÞ 𝾅mOд~嵤Aǫ1Ē{ߢ0:T~ଫ";_/YyQ]GQ@Q@|-,X?I!l⮌UMl_tSAǏӿe@ CjZٷmd۩\`8o> &QүclJA1_߰7)|\nS_XpYjm2GpeɆ~Z?HMs~?hQ<fGܐ^X߭HS"7c[/;>!? ΅ȑkw6N˄g«`V/7s#xCǿ|' U]'xnS{g, ݠ^%00eRr & #-h7:6|5ϬF?.qK AX‰%,-ݹ |1+fOxc-Y&c4pS~"wyO8_<'j >h%4C1dy&/g+((m'Va=SIi[7O$Z?ݪz3l?c~Vơi~'R[Im-'_[+kҺk?ᯎI{Rh*,1ӞGbc$/k871}2W̚NW~>"t(_'sU:D:2K!9I$R6g:Z~j|1X_xnWԔOYS c#_CFƚ_/:'{mWO[ȼX$*'^[/?>,6=gO\B=cv84.r+G"4Ѓ+ "k(ɏ^蔬?2LxOL#4{u֗%θGFGYhGYsh2+.?C3JJ訫w_x;uMtW_x;uMys?|+\vחX\~SK޾Ź_iO}|u_g;CQ_pxAEPEPEPEPEPEPMeW]aA(ύmw x-m_Dz>:k}( x0?g=BPwcxú'yH6Y{@yowuno#5ޓs?߈YOm_v<@i ׶"Yy` ##ExWǃ-CwMp?wue0uun<Gp+7o|Tt8/dD+m뜣3ӕ=Gpդ\n*8)[ɟ_.|]{\|Iw\Amۋ^VP98J?_O|b/7.>ὝHp?O}|wbwY.u x#XӢoYe>lq^cՠO{| Õ ?Iv.9#'Ibu =>+#h'VK{-n%y GpkퟄgïpCi_3&I7ݔu69*+l>>$,?xwLa* [Ӟ%f[Sި?A ( ( ( ( B@I==ZY7Ab[3в8 WkW:?<%ܘh ;zƃÈӧ.EK>BTĽhk?Nf>)L%V g_j[xNs_nZGq FZduB2;_~][T|g[1vf w쿕~pnsRU%- n䵓NS[Q 𦌃Qts][ˏ]`m_P*f^+҅8|Xyedc$jkIՋ~=>J/èSN>ZoI+|-vV.׻>(6>w-ڕSJ0vj; MMr)ڜ#f2~v9 vb??}Zt4MVHۻ\HHck#ͧT;E$~3?>ÿeX:kJ%7{hh*Ņ:TdGE\cj t2~/A4j,˥=s.35`#䍀 h\|j_U#ὅv_]A*.fKj z߱7O9?GuviN"-N5',!H!5: N^"0m;$9G}*ʰNZxjSsJ!U*I=#ugK˵g kC-<N][* JgI T"1% ]I} }FY3sPٯU#x^!JaTzxh5#J(P@(((G_)~,>m`:Ne%]N$qͅVcRO6r|`b؃J߲߈mZ \7&n"CH"#0pUʑ/~!FxW~0:V/y<h3~dEl6#rFI? >/.h-S??xLԴNiaaxDnRO5SY|b[./o&RIV0#\2&xZ|-wj[ ~7s֢-PZvA%k7 Jo>_}N UH:\Kވ 8QEQEWMٿ?'"_6+f`\OFm,}O[JJ3Io\\CM, }|e٨vqֿ7f8 w?3i\]D>0Go m$ڪ2N< %,])h8Կn#ÿ$Y-c|tqp;<"d#~u CKZ5 d\*9 (ʊ5!?L[8?iHw&-ƿ$;~?Gc_ o?G1?Iŷ+|`b kOC01m~g|'j[]ݡ=|;ySA?iHw&-ƿ$;~<__٫W^8 ]_S]ˋ\QS1o]+yNb`Gr *1jui=\-=:|Z_;C࿎>յ-د' ު7>v]'ճ+|JW cπ2~d=zpHS 3^,,5*^Z yXr;2Gc_=9W[jKx\Ңvy&yNfP0K_tiVmiVvo(tUU*݇Ҡ}pWQx*J7[^ztH(>(((+5ƿ'O&2Ow_ ]Zx4J\'z~|"X93e  W?ƺߎ~-^'7m̐A`*c |yc>.M+?f/W?oJGpޯa5ONmvǃnq4h?,R8& l0>}PoLe R2:JPBا/~87?ozJ.RI$z$շ? |5h I{]wt,>U^Ko CŘdp@%J8`OP^Wu#52ؑr=rl>,VJx;_ BI\_ 4nEM?D~*p6<3):/KϲmcX--YU7m.[$w$Ošs eW|'צ>oǗ$jԮ]DDF@!>%⿀> mfGO jE~G;-gVFX/n)ѣGUh$ FryKn&Դ.#xy&>$Ҽv։ûrIvn_< ~[]\AsTn/̊FR/m>}Z=(+v2/:ܝCE^pQ0q:QQ^}ol׈WE ֺqgЯ#\G^pprl{FU૪]x2&e>zS[4֩Mw;x'"4)-yڬTzeZ$ORkGTc ܑ/ʩƹ ^"uJmW?Წ Zt^RQWzRHӥ^>sL;ҳNEHEK~u#̫X# NդڵQ8*W)M6dtRkPEUom28µ5)'S}OdcZghi{AYd gĀw*H^ ,ڇ/?4ї٢"X2k:ĚMHɢBd,RZYO!Ǿ{oȟ"=u?cK+=~dqW@!(&h7ky6Oqp[2֗-"uy:叓14?V\pA83q;i%/?cQMvRwq#㞆x;\X5ԏ}ށS$H^FH뭥t\ͥt֟_Dj1z^$ώO *IQҤZWgZ2ʪіfd@d3 XxgJ7 3B&iBg!$2OrIO0޽ WMf-aN($_?4yzgU)ˍ\=kg]%#Oj3Ghy˲4?ڬ4zZ9=PZG 攥t;?|+\vc=s޾:6ҟ O/z|um?^|!~CQE}Q@Q@/ 3_~ M}?'m_̿o(}'M7U C@g1 w۹J| YYjEoniki~ H.nbY"7iՁ +׈~"s ?|$TfDqos+EůLdm ~G|"li Ӣоi:v+f#9+5o_~xQe+I%P6RI#+B$vil1'aPXhT?g/!GJנ{N. F1b϶8S7ωK0^?GY[[BdYYn<__?4~[7JiiY1|/fxߌfuß>;h,G^ȶ,VZ[\{ioRąPXn4P[Gƿ)W[_7xm'-ΖKW0 41O$>&>'h2^>MDk'1mmT.ٿM|[q ԛ,;3>PFe\yϛdvޟ7Z_ [T~7}HxOHi?XJ<h˾ |Jf?_S>SŸ " O,ZwǾEE&U?>!|?co/ T'ϋ7i2Z4axVX23D* Ǿ#TARZ_z"OYV?.xpxAV }W|c3㿏*-csLd=iI]rF_o⯂|🆜xoxZ5)GEi`r8EHÛO{&\Kww)gљI$vu,f/ r?ѣ?/|=/xVTPn ,M/<~𿎼uZot[[C{2ó)v"6~%Ğ2?MA{jXi/|RO%S˓N(*QujZρhhzŬ l)"oze=F=?w޿G^)𥦻_Nt{fRFGB: (oYb'Nڥ7?hi6?6$^کfK ]!eHxcǸSj4;_ J=wH<{kDž9KgU=7o>ҿ{k,tO3/VVL#F?^¿ثwJfɶmkPķoV3V0Kywz򌪿֭*G?z/TQEvvQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQ^sW7Ԋhnc<_ŪVxQ=8<#-ὦW>%U'׵˩kZ_1K>F_q(_~ ηwJ=TS|mchhhW==F@{BFV6z6zz6zeVvc,z:rV(s ?ZTw:WЇVU:#؀ѭhbD-'X??R!sGJSTE֯ĝ*@VKb*,F*iPƵj#kǀ QՓwe#[Hښ6j*cU)Y`x8{[E+*VAk'V8xqִTP#Fa5cJrV4ӟ?4Gx7t@4({I$ldl8=xލ޷pXץG/x[=leRvz5$f5%B~ǿ_ZBuxd7YMZ߲ڏDо(Q ұ4y%.}S, G26t8?/ MZ e ]cYd>R=+?5~xLM?=v#a~tW5i(5ɯS§p:*ǚv2qv$kE?M?h;[?R ]-F*]܈@!/缇ҿK4WO|/kZMW]\Z\FrX}AX|^ Vԁ:M䋑mr'`J6?@gڧ|mGc{rxdccCn,ݜlZq KJVps_f_=_TTpŤW<x䍃+w+?eM5tO<5ykfqhfS,#+JRI3Ju'NjpvkTGo'+7gr=bA6 \W rc:jP'@%|q"H̄y~g5ÓʻgcpʸxHC|'FOM]>_qK3+L/װHO54r׬пo> h7~~9'w\-.QG9ywܓFq߂_P_xO㆚xķ:/>@< $Vn蚴oyV1$y$g^Bڗ?Ŗ~5xWR=E~3&Ԣ q9;CۓuSקmׯtzR>==iDcj_>1`p/8hp2q^_Z?|ex"VK,G} /3{?4yÜӔ=g3Gѣ9M/3yh?4s)-\ݯ.C*%׉aTxXP?(?r~r?w?ٿׇ<0˾xcolp9 Vno kpjk·Zd\ic5ċd5~kIu,&dtbHF :֢Uᇎ]\}ޝdtM7?uWUy}0\H\nޭܒ{_|H/Oj[{޷<^!46P^&(oZ9!GhW?ӵ{Q,Ŷ߾BۥFX݂Zqo@^QEQEQEiW޿C~EPEPV7~m)i_%3M~ph?7~/A_A۸m 2ՖH`ly s~Q@||E6zD<]&ZlOsdvLƾVWEe`FA 7o{,m5-@<= d{+?~ӿ~{(̕s\ch&-^>Ntc\L|Ti }@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@l4{+hk (;1TI'8_P?)_^6> 6~tּitjK![(BS,̈́L>tm+Hj6ɪxR6~bHYDC+?{,~c &u?x#fmQ_c<> ό3h'+y?$V|7s?][IwMޫ+%zeH_X~:©DQUzV.! \:8A>³qSIZ;[D^M>Ȱ5 4+[ɹ7mlWvVWe(P4G]ku+o Ax+YZ6T' uji渜<+z{AYy)WHuGCVNN/oCu["u1}\XCiwZToGsw˪ f-fX]]^1IlZ=ׂ!%76d23r1\-]+V,-wW(PUԌR"ՖI˴Cjx^ˀՄܥFTJΔ<`&k+:yN& sy-ҹk/W~/K[+fqw1HNxXh0jȢ"8sTgO|f_s^ֆ&JI~RzE^y.djД[e^u#iVkxSZ'WY4SH5 WGn $Vspob;FEwmVvM"a=.trbiپI6 c8"n4N eG~U+KeOE"kS/J2K@Iƻ0T֥ v7-9.b2I`2)6F]Z_Ta>u/ɬX]D7ZQY Ef7<~(Kv׬_M/I2{MOY/|+L$r%"4"5>}e?=i!&Q2;dC:Rt% sX$7˩<ֆ# 'ՋIB Ӎտp+7"|/'Ú/5i5ycxـޥy#T|CᏆM_SkEH s%zFR2NA+V{'~j둿T0g 9\7b0|3.#o{*ְ\_}WdIOh~~?|z[ix|O+^?O &z[8[dofץuᱴh}peyѓhTgGh>+ŸvS]xIfC=)V+e 8$}Esbt14:Rg ,$WQv~}S}Qwگ9z8Nˍ4ݤg}3"on_7Wsg\܏x\(uI˗=eRe ftI"xU62 W_`_ɑ%o+>$_(+srLz?\qTxqoEZ^r">9@~ҿ/-<ڏ|˲1 ʓ%FǏ_|-Ouya4Eg9x޻k wEe|x7.'-c^K_6{ Q^(QEQEQEQEWMٿ?'"_ϗ;Ucmqt'> cfN|)B&Ֆ99\1TJ2)MFi~?+noA",K۱qnr7?( s⚏}? aghb@@:MU;IdQg]|Dx+Eÿm ݫxrIylpg'!>g ,Hx<1O>&.uنyVA.Jס8h;G A),yTcv+x2@5?i5σ~3ߌ4rd$J>6 pug`5CvM?RpN\eUGX:?f!OƏVM[V5hFQ"|NV/o_׫!3_ԿN|k48ɣD ,G;:_N/MoKWVeU u~ h>Z|>&Yrnu }}IVa&QWb$TҢR0~eG8Q5{3HEܳ6H?>GĖfPWz1s_zWiySYa~:~ٟ > x1?Ǘl~?sKFySWc8Wi6wZebY݈ 56 fM]SEx5MK ]9eCa~lj_%ug|ﵰZ>wN;n+h<TwW.2JDW*lc5щe{}T~ϫp7|eŘgQTh[ǟI[ϡ >ZxU-asmt'{:Q]g@9ϟ~hoP}5*/ƦBy>pdv;x;>05/^i/t(;VہzwZ?o_->i >|V%:m >LO!D+2\.mJ~~^~S?p5<4&#!K1ahurɞS_kt?+a+'<mkzJͧ]M0ܠaYv>UxZY^eDE%`f_N7g=c֮ uON|>m|A}ZK9g&#ω^/(9t ,׉h? 7^c,FZU[/V$¡1'KE~l~_f_b >π'7-YdƑe7 $JerIIRw![_>,/ď׼|:ï &`M㑤!2HElEq_tPo|M1 JaW )3:ͥİ_C`\xـ-}xo>T NJMSÞ#Ӽ3fl\HF2#+*((((~'_z(((XzEΟY-bY"Ue`CEZ?:2}/ j_x'Hgr89^ݳ.py?k_|i/º֯g5 T.c&|kvId=+?oď{N+4Mo7iH>[ GixvUn%#qC_f[Q:SwQ06lں?֏/$<r7T9T?H+VZj.ciWQ-,YaO+ hPEPEPEPEPE࿵/BKHtHRT2:+oR|E>AM ̖Iyٮ7/g|`b kOC01m5!??E~pL[?iHw&-~_ƿ$;~c_ o?@W1?Iŷ|`b kOC01m5!??E~pL[?iHw&-~_ƿ$;~fۗYk^x4=[Ft[{x Qf<3Nx#hڊ((((((u v o-ݓ hٷ:w$6Eoi  HAU_#hm_E}#M2j7)#0'(kPi-;Ȑ.Ê_ԫ)ya04߸&T*UJjS~"*uJu*~2HV3Z(B52S{TꟉQ9TJyGe9RW$jhr΢j̪"_SK--H[Coj ,^Y@x_5:vg[@ǓT(Cdɬ??? vi(lӒ-QEQEr17)4?hV* 7F 'G$OjX(K9(?8xXmɉRuOVyi׋X5"gon?B]I|/K2y/Cl'zPWݚ~-_ZjZudU=ՔG&9ψk*mZWqp c(lkMCߴ/짮~ egC.DP{ls"c?j،74{ת>7kx[xe/`y^kն?T(>?%m'X<‹+[p '6־Fzuct~g~oXURO&OɤŠ(O\=x:. 6Y_۬8#yk>7'߳W5XhSj аK2_tEy9I#jl֍z?E:ruRUեm?񏀼b>~ӞlC6F)EG!_9 ;M~#g&{caU`x WO5Sg~'Sy+ʜ kş-ǿxiH{KX]JsӔQn@Xdּme?N|iy]B(Yܟ5Iv_ֶFOS>ZsXH.rK[HJ}W<3CsgżOo*X2#eٮOPW)mt.K̖(@((9m"{yPHԌ Ab? l%x_Þ'x/O3$,f#ną9"28𐟆͌ }A5F)m9? |mP~j1cGg ,7R`AK,2$.+KM|iV7tq:~ahgi1-fYb!Ԑc|B<1Hsjvk:yw z2ƿ*|#6?*/Ͻxxkk51p`{ߐ} A5قb0uUJq~_OÙ?`4ƭ7KTg|jسT3>$|#qhz)eu|ڗñt$;!-2̉'u",#vvEޛkg}k˶Xf@Ѓg]" u ks X#xZ)< 'uԲ^9Zk]?^ߗwW0˹|='^G/E5夺%'_V+9i_>(YOOa:/Ei \3rIkoaT T`OKMV_H! ڕh7:WBqTաVTQvi4F^/໏x_|;\^KqvsAcWS<^f kV6u˜3E+GdV?e"mc[epW쇝@?whUݑ5KO~4h힍n} 7L?Y[,gF'Ÿ aC_55k2 H/]*;=@F]F߆,OZ~Zk+(_EU Ԣ (?^{gxF߉<9QtY5hh%\>V/ u/+N;Fh6 e0+`W_x# 7LO|g.i.:Gx!t@#i10|YaG~?~x]qSHY ,%K36Q@p<xo tVZGouqwIq!+,qRp~*?.0]i u=W+6d*T+#$eH=|xo8:očþ)1z&Vky$erRwಷ0Nk/A_+gV? ;/JP~蒾( $X")qPꆑM),-R++TXQ^F $f?"C@1S/㹶pĨN ~W>1~~ >s+L熴siRjha1Ȟ+u z'|JG_B|YP5h.mkgt|[=E}|qZ+~ ՁZ{bzy'bvUg#!G:G|i#mPF9K(~'_z(((((7Vz*km{OWp (`^E~=j i^"7 k 5A%8~g/CCgxH90P}d['wZ17o>\X>t}M(+GJ%ĭ_UN?)9_?Z G{;C}ٓGвxl(((?mDdSw_QF??] M's|k޼3uouz|IJI|fcJ0Wi|}Ulוּ9/4[G,{yVDbU`H8 AWßc ~-*'/EsD|gw]ter #+ Cq n?tω:ieǀ|`Eemc!H r=v{#II8 zu8+7F~=ENuK F$}P^i [NagPH]/nc ?DŗSXj6@kylh]8d;r0j+W5soĿ[ KKX>;ňeuEĊbĬYrA` > |^IKgRkGō? }Q}2[=A 4rC,8b9 n_!"| LkӮlNevXUw*(r+HV+o@j~/L?$G/mM^XYcX,heG!+m`6h)J|wO~-{co}40k3[HgԲ݂}Y@~x~{7`o+o@QEQEQEQEQEQE_/d>)rZ |w.a72u`߆KMWA c[_o* EjP;ǘ"Cz*{}+Оqghp$~>eT?jkcuzMXcJՎم˃qҮǣafec֪ǵioLjE:ŜqW 8a1 gڗiU3BКt_Vv<uK1mi߉xorjd&Ab8@$vQeYtQ]V|Oq2:wWo{W}1M_,«Y_j˹Xwr~>(PdcJ𧜸:97\Ne#U=žG aGb+ԯVUj;M ((()@ '/_j0xO2Cr}0y$4eZs&?˵i2DTϵ ^W9մ/]֓iZr.-.Yb} ם[/='/-h;SMW4>yN;w<߇a/]Qcsb+=ry /z~~|OY>,!hyiݴas{$dW;??TQOUK4'k@MDqq|Ae5JɽXҗ/T~\ׅ-Ӄ|"oɎlo\ba*y,T~c>Җ3\hrn"t[ɷc6bo׀]ݡ?,-'8ppH:zuNIj,etNddz+S((+oQ &D#tEI$ ;{̲v"7)8\g(/oįÚoiMW9fLE\Ly?I h:'PFP47,[)eCk?)y,">!}ׄ@\kE)#W1<78*3HtBU)Nퟁ7:xB Cú< 41DȊe764xhkqkR|H`4ƓbGYO}pl 𺁷|8o0O,7χ/I3?9dw+?u̝ŽG`x^׫^\WN5Z ^Oc*qV5JriG+^ u"a-Xrjq4cZ%tM5Ѧk?GJAt}hÞ3N-*PHFa'8O5ޑ߳7/XuI"+&i#>]Ź+*y`kO Wоu):X$roa'mv pH*3< >\>?o87;\*u[&mV|=6.WIx7lOG&DkJx%x$@WR2#_7|\~hOgw4zu;:( ,j|PW#9S?τAc O' .PU`ӿmqq bw5Q\)r^.QR+iv[ר>X(p|,mmϊG@Iܥ"۔cEg$0ꦽUZ߅-T~0]iZUyHw)eIuk> | J-cC<%xT֐GgvWhmۗh`׳&A_|:K>Ե+V,2H1* (K/-|Z< :jln'yvHݑ5|kcX3w\Eqi7~=$s$`{1$eO̧JcV;.x;ysTeŝk/c"heFϓ]ho}KT׾ h_+|8"MsN>^]Lܮۉ'i K" `E~=&><]࿌WV|3{gJPE䆷D3(8|6,S0sG=7`OCM?4f=ك}>0sG=7`OCM?4ďڛI4oGKLڧ' O{?鿳|`?Gzo>?0ϧ!h~{?鿳|`?Gzo>?0ϧ!h~}~ ~%Vˬz'Ԭ[)u0FTg=85QEQEQEf> ai7Ql YbOfV_`Yo||1ŢOz69z+2~+{g|GUEH[WȌej@%|q"H̄O -"_mi&Lm7VJ|t?r]ȻgcQJxN`q_1>5=O||cQ`˒ywi$>3@WW(U ?u-'X&PѢ0Ұhﵛ:WO9$_'aͻ/4?m/@/\.qm;ъ,1%O <_]}-6ܑ(1wbu4 o5עHiȁgUGvڃD-|C ^O}d fW'%G7g~?Ÿ[T׼F;~nxFMy55 ex"lP,>|x~0O? |71|7s[\uؑL"F"ɱ7l~Q@i/ৌ?k ,P>|1ᫍZZ2kE( v,8` _~j~>i~$mn s>tq@v,K:+`+_ ~ ".j?Qe(|F(sk㕇_-{wxT5C}_j\G!`lCGbFOywi&ẖ/6d*fK!?OKNQ.u>mo$yXP2Kn'1t?goߴG4ڻ>-ѵ&l(q͙/ x PY  +EW'_zOOoEPEPEPEPEPEP$9TIbu*`x W/ g?Úi&x Q%ڧ3YC@CF FA(>(Cx_f-+S>,rx)I_[ T8fy7d8݀c(K;נ4m56H-0d#</-R2=IkGii#M#2u7ة|/5wii*l4l5oii9ʛ  [iV8^I#Aff8,XѰ—I GvbX#!hr~0\cUk.ao7 (!wW01x8Uc?38CZ+^TM=_uN]\53j:}bB8PX^+8l~>j;ſJa㯈~{h"9 PAKc #_Goړž{|(𝞫A~ֺz/lKZ 2F_ '#9O3< Vիid{^gF]6m7Rj:U07qtim U;%.q߃6}F |Pg+X 8*ى\^}_5|t<'sB`R `~I8>YP/}|(>+񇂼']xkƞ0d 2AԟQ@Q@Q@G41\ZG<!I#C+ v(/ڏ /*cNy%vߦQ\*K,UO5šiǥsLXdv|q82X?C&eh$Emen 9~x ~#G D' 0>߄^+/fA.4Ks@XیԒO$т˫ P-x+6^M8-5^H] qOJgi0QmIER\\ތ]xh~i>$C^4 Z'A_|D?2^ujG"6';cac_TXFk& +ٕJ^?h7v~¼6bQNݖB?݂|o'"OLH e3@ӮmlPq$ĕ(H8ǩ澒/CV_)7';={m&W-/2[f%0#dxlwfݏCG\TEɵx)맮E(??k/ڿ-Xo`E.^k isnFp$kdԺ |vv ~"p1!8R5#t~fXL~8-E8Kf (;((((OOo_ em>?>=6<>1:,6i=,;җ;V'?xZ_cit 8N-g#&u n'9/o,_x|`:寄I2wCL%'i3}˨||럴ޏ?|? jŔRxYEװA* mخKx(Q/ ycῌ0_MeLԆ_/eUQ$L>#~^95!=J7 #ȑ%*s†"4W$5)H9bW`w$6|} i ?-GO(\[A~tgM:}ڻ/lk!ԷY|1U'n0O~$x+7_ZE_o式_fX!5rIOv <#g߳76twǬX{sm=XH'ǟ''3{sm $JIo,ɍ\U#[/SzFZj?`u;3kuhl 7P@+ۥ}@c/į 5~X+M_QEQEQEQEQE^7ID|o\.-BܨeĊ= R?g{m+:eRZ|_49Ԛ6Gg]*OSk7퍐-#Bt? q6 Gvcrk Vo$Y+L -ܓWżv3CʹјT + `x A?3@c |-4Nk{lXX%]Xd8׫b_/N*oM>;kM'm|!x:#!x:#47¿&t<%G<%_O¦WDnT |?vؗS=ؗS=}? _ON?S|+i _7@0b_/Nb_/N*oM>;kM'm|!x:#!x:#47¿&t<%G<%_O¦WDnT |?vؗS=| @@aM?Un-lb0DL # ~¦WDn4u=(s]hC,y$pz(((((((+όOG㎽፦Z}s7/\笎&W'FtP厕{n+~'!J)>Ʌ~~৚Ƌ+9t\M|bw!HX+W;_0O2CqO xwĖ(Ix&‡(VUuRI2U9٭S῎ u߇ߊMAOX!p>eX'= ;^eګ@7t9ZCvp${pǏvg=\+6=k/xuKIBzkEw?/2x{mbD|J툤PY"3#m!@|/Â1ny}&չ#{6-w*ݳ>hk?U֍+dԡ?X/Ef?u@AYC)# ֿ6r*ΝU?4k_e|OGڃY.g+4m5gh[2ρ).}VMP|fIuv5NIF*IQzPUn.hk>)F\JM>4 cV rM?it&oD[zdE#v}y\tԹjJ>Iѫ(EJ*-KQiv-Ԗ¡f*I'\MMw/TmB6@8 *?5|L l{mHK\ Tw/:+K$iN-%jw#t5VcB烜G;:gpgma"5'yn^WW66][3$zxNzr-OXw$ "ޛetR:eԵ qK@z`d_NNjl|gOkZ7-dI##퓲L!#u%Om 5UVZw>#wì.2bSO/ë7eg͞cz[qd;e |A4OɥY'=R OJFY3y"/M%!$q"*p*M|;|U sXKtjueYY7&@%ek*(((((((>[M_5gTS%fO+cUك7neiFؑRGM^m yaK@~ןQσΧski$YXC4ە?pˌ0#Uy0A*FS-֑/>y6/]y ,OB@_`+߄? 5oxSԥuY2\HwIg9rrXWន:kR\ؤ8ikf԰'Ôp/U(ѿv[/f,G x?jNN䐜m_2ZZD%hnl⸷9@KWR2s6<~Ζ/٭i/|s&_[K_&{8W'rz+Z+):u़N#_G%F%^Gŏ?j؃67$¥Gm콖_#qa?OUL JHuLJ5FX1dBp1)8|4 [Du[=ONskw z#B+{gC5 |]'U>$O禋<,@w/n36) /kI}?y?x[si,&u7޵>~nQJm/?O𞹫@B.28Lwq27J@yG?|ObkbX{029g*kx$_~gǾ (AUOMҒ{ZKkR}.;(xmmaBM3H f<s_o|; o-ϗsl>?21^~cp4E~/nϰϸNUeխ#R~Wץ7_x*~0״h2_N#LQYe'0*~>.ٓxl ד{l%Q1)2?|{ڿ<2;-^Qm nqc~yo>4Pywc7<QGJmNr^K͟fxG5#.٩vJ1z|$x&kr СZic?wy$l~~o>=mMhm-(III&({+0|mF:տW#^hVS]{zq\OGkIJ2׌kjt?M?bƒO} 6Ǭ ~B˝h -B!|] w_3!=@"?6b\ t˩UӖ2{PN{W|QE~/O>D>ue^jHό=ίr#1z0|/n|`NogxoQT-nGn=Z'? |~ittG2GrBu 3X0K). xWwS[Zݮ3*#p99Qx Fq}_˳F~^8d8 bp&5~ZwdkΜZN_TW<aYSx˺0=޾Q'|EUτ{kU40eec矺uZ6|OGYs?{)>!}-^R9fq)Fx-|ISjh6ukwN&r!Kep֧ÿCϤWq*r3; z%i6VXr<2r)_i}aXiZ=YZi&mX}@@5NJJg\}#[h#Hվii_^΅&x"G~ ʈx?|Ck0\qw`wH0P  zXluZ~<͸o2uѣ esƚ%"OjgGʹx/?>pYo?^3Ѽgp5̶r3$2c*9+) V1$,7d>"=ofCgo%2Ac 0sM ww(T5kF{ɳ\>>zϻ$mu,^<;u/Eԃ9{G'NZ1~ږhzewp֗s *A ׺~jEP0?i$e4{0&r^Im9BFO c5؇5>!x<:q8pA93h;/h?ϩxS` dv1g;wc Ai W'|imPNa~wKZ#l7q G̨Q˞GϪ /&,k[u?tHS 09|A TH FGMEQ^ /ۏOuXc-ܿhPy{`m6,V$ =NsSQ@GD?k?`߳9ۻ3)d#41Jc}@v63}Z(ľ |= o*?H7x<>y_wn}(eaؕ&=+~%x_I> ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( iznCFt-[H{kh.bu*Ȍ 2 AQ@χwO?~ʆJ>o2۾rEħcF{G3\;$|CC~ >!񗁮-)--W|8N 濷ʡZ^M:m1-u`AEc[JyjEIyYZT2qzh&j_Z|GiVLtl{W"kڷ}Su1riCydE̠g@}ɯKIDkc2}wnl;ksEz cwzT_=狳*]J[)I7k ~ƿ_w-O-_w,|okO 9mׇ|?{}%ܾ75n큁FO آ"m#+x#PTpKEQEQEQEQEQEQE|?fc /^61=2o" Ljyɱ^l a:C<\ k`ɟ]}~"7?aw z,;3#qh?j/7rj2mr*8 4B.8fdFq~%Vl/^8k xJ_+<51̖%FrAU$pG& Mwx.OX+Vx%{RzH3{g閖j!Ǜu.>cL:fpWWlp')*_7mj;CG+㿁n4:Wm}IZ^>X!O%8t[IHoV^:ѭKxowR0o}62LAO~%jj?qswijK>s1?xWᇃ5A&]:z(<3{$O}"rq刖k}TOh?fVkr&qd?Zvg58<+}Zf>|E-1(o)[0k/ϑ d+k!iѤ٥->7?|;$)mD)72|e v;(EqLT:jTsݟY\f.*ܟwe-bk{ky]Hwq_\3m5: k7 $$f*Ѹ$`OЋڙ5 3o?1mlf_K &t϶ĺ)8$U{4WGu$' hxO]WށN;_ݢ-Rs=> _a?Z_\=GVEbW8Mn7L%-3.Clœ}%C_ mk/զӅq7K+a@8nH{'~(Aeu{eҧHl'Ho//$~dgO#33 k%ii7y5g-)bSB?ZI-jEir @4y;=B.,a A yJUih z b~|OzKYnuxn"֬,bz}qeygM_U>2FtŒG1}?} ld+ ? F7aJ\p!75e&#.~Ꮜ~Uu}6:*0GPd o11'௎X,x+LԵFe$p9u}'߂gBvvo3ZG{r#K>!;OxO?4B⮋|8, lG7[OxzWG_VG6ĚW0?Aojې+r*,|,'_RM/S-b׵o ~ z?N5:Q7 `Hzz֟ ি4o:!c>|w0ȗ:B"BHq_E|Y4:w>#|-m|Gm5Ճ&kIXHeC+1 f OxGT񞫬xn&_YLf\" Q@Q@Q@c/į 5~X+M_QEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQ_9~Пwًŏǧj7 FScz.x$)<_?4wP9}3HGŵux-c$蚊_-=a<o_fHFym(9t@ϫ+t~ c4^OYЇKKre@FFx ހ;)7Y.>4xy@|?dr;+|Ff(Wt> :ujwWm&MbI.n/f'.T3w9~-ࣺu&QnwA_xlS D3Ԫ_6º~ZeY@[BESܞs|҄>'x^ Xw$vmh[*|6Mɸּ_=ܭ(_(='޾p6ᥔ'"y\D]Ո!AGצ2Bk/`IS@J,N]rWq4yJ?x/*իпZy;^/pM{.d'R/.5 ˿=ܦIgݽ݉,!rI~ø ;|_'qiOXxBC*k*O&0ҹ XBG?d.[.[,=\K8JǍ*ux38oURkx%Ev}.QE} QEQEQEQEQEQEOgY/| }ǃ5KƱ^^Agop[C&@댃7k-x7kρCSdT7YC$Pvc =~DMbx {~hmxK=5*E:GAo1cn?(oE~!~͟_n7uokW_ROEp_I RIk9X7e_0a9-?? moۋe6ߴwejڜefGFDaK=~~߷lG~!|bL^vdЙ|Eo:$QIF$$K,~ z>^~< &o!) e\YPs&'@~?_>C}Z47w]}o(Y]B%IU#?ߴ_k_A3ߥ4($KsKm1tgE%^kʎ8G_`+Z>z.b׊T{[IG-ۡ2 70v1iS=3|.+?/>&lk>{%ty%O4P3ՙ *tx{Uύ>K W[,r*m)Ȍ-1AA?~qG㶛1x+*ԧ|'}elElCh`s?1EV: Oo!~!D}ĻH;v7#m*ǐ[ Ĝ?G0%gg꭪ݯzts(R0"m-8}|R.ӌg}9|ZEƑu-XŪC-7m[šQ! a|܁(Ģ4>+4eQT2prs1y~"Qz{E~z^"J8z),-GwQ9+?$uZ?MsZշ=hxmEg )Z]x~@+i/|-Kjy$02l;sw?|[7V3[^Hc1znq,Hsn$AKᯆ'2Vzhr7m̬Cɝw'n3]pxJ9TmYg:U05(EΤiAIM& Mk([XY'Ch nOkv$|5r'@6]ȤDld6z!x?1 xF_&nI2?.y yiijI$۲ㅃVk2XJ8Hߩvh{XۙKѽC(OB(OOo_ em>1|ESևk6h}rXTBhԖ˸ ddV|Ek6 ~_/Z߀/WmZţ C>#)6K4@ l Zށu'dW6ѧL4偹Š_Q*Z -KN{}nTW6O2Mw'#d|5{񮦟 4O',m">mbIk@$q[-jwFI?xž4KݦcjMwu*o?z׋u]5OjyX:>"L :ol953a)=ɏPab+˖M%f|s߬ 5uZRAX |? XS`yzXM{?+x3w=eeȣ^KORkkqjj.?8Ɯ=Z_ b6m*]ߗ?jsZ/rLBɮrM*ϳ> >~#cK]j:| + 2Oۯ}FYsUu%x|ET1I)JI+$v}Vv6>ЦG ÅRN=%RrrMvE@e ޸$̌0pGiK/Ƶv4Zlִ#JMzUӥdU*GV:tiW3YuJ+L#>ec C*+}2?,Mzv/epC۵E֩zh~CmPHki[ 븕ǵl>·BKOӠ/.5Ls|d~(ft ($ElՎ"$Q<šhOO[<I B(Z[R2!_uÈS׻..$/+}nt1+j}m|}(i:ĉ>Y_L>rcI=N=|{F?XcokLI!}+ooxGkR+ []]2Q$q .z[*Ċ/+tI~Q^KoߋZ0.1L?޽j*u#8EeB5-wAEUEPEPEPEP?Q$o"|:&޻w㮋E5b HTGb2H2jO 3όt'iipJ%V g Gz~z6R.n ٢\#jѬ q#< 7g|;|'O|EEmWQ!0 A,: ů^OV!ٿP~ hA{PK? CoZVڻ'FmR@^zr>=Փd~sG)渉L3i'n*連+߀9χsui[YV"/q8rK3_o|!86$~7Za#PoFޕi>mi6zfn-maX}TүSK旟<8RĬ~iQ??>P.z?>sĭwxe8Ye츁Br=I??=gZoٚo+MsAe D mghEIYbpԱ:R[?+:~Gd!MHj:n_U3~1v)+''+2uV~"{Us7Z4Kx -_$sB~hq#d/+( Gky});ieX8Ө*/p$w|[mg- X|Qf褅" /$kW]Z"t}''~4okԭ+I.#V]MHeeFHyB2 _Q^QEQEiW޿C_ڋf??<c)~Ge<+|`bI?/ޫ ? ×jbxv29?ڧG MN|Q]_"FhmKW&b[.ՂkhڕwxZq RHy+}7Ch x+GY=*+-: qƿ< SW_|-|=ekZډg{yܲ#&!I1#|ƿ!I'v?Ai|8U4DEjV{{*3Ia',V?iHw&-ƿ$;~98fV?D?4?_ F䵸8׮owdnF9Hɗk`&W7 7c N|SxRmoQ6KnKC.ܨ29_ƿ$;~c_ o?@뿲~!߶'rN㏌=w1y7J"v*=rICzoO Qt;q++i#qlT>a%vF\emß5!?L[L&x_6+⽏rI Fh#x\ O~pL[?iHw&-~_ƿ$;~c_ o?@c/į 5~Ɵw쟭>$|Pjt+GXh;8*5]QEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQ^C_ f_/ĭMC]ֽծH%mБRI 1P{b$.3Zy=YiT2f[fD` 4τ׏.k֮Mm$1I+?k_+S͗4.մ :ss"F&G?grT@O?Ÿ ~&J]f5xCè/##/P^j$u7/w84`ՍVk[}t#/Uݖ5)F xǏ/dеGIrmx[# ]C^|k>5ѮмYd`㄁%屟a]QWoC<aIZEO->f/F |#\5?'F]M&LHd0%1_ɭ"$oiSj1i UyQNRᄸR0ѯ_.dd칥ZVݏHNa0|=Zϋ|wis'V'M C瑎1]G_|PS\x]y菖hМ(V[ߏ9ߊ௫{W꣦&v*OU~lEO2Y"#IΟ|co~+]s4u)&MlmDP͇'xM-O>4}'w"+rN㷱Jn=^P'h8F|WV}Toe}?_~7|+yhc}hʾ1I\RLf)0rSj:vsJȟE.$C YW_.qG><6"3e+IrOTڒZPƵy Yu$|3xß+K5:3hIdU*㢰= xWH:K:ygc O^33&pKjrќda4KWqQnW议ϭ-fDXdU o+ϑcW?^}KmSÞt^vy< `sּF<_NE=-n3iȇ 1$:4/e,n:J֓rMyeH@=SY:n(af}+~x_wwdfU"1r+,r*|S@xZn,!\xYO>-"k@9ͽwFށ?yGß/?xsTymF^ĽH,`M~W㿇 CA->D6lapq#*e_n0 =[^ҟ~Ϫۢ[~s(u k'ICkU#ҺZ_{jΝ-jde{?sש|&JxFyTPuk{_~ݷ#hl 9rގ޽+ϯF2䦹~> <]M? 9m]l{/CF$;~GWS&6"AӢ ?/o/;τEneeht)$9gov'+dpZ8jQF 1%dAEUQ@Q@aAo xvЭQwnZv-.600bh((((((((((((((((((((((((((((((((((((((((((( ׷nwEick Ms<"Aff=}~>x^jZ#_\xhcIl z#AF2;c\IQW_ ]:C~у`Jz y raŨWWbG]"-,ˀv9%}H$5Ɏ^v>G<=bs&EMzs5#__<'[j^%t4#ΕPrygi1|duFG~1L5?>!q+3+땏ڦ NZs\}[6EE,bݿ1gɟ 𯉼=i97 adG>>(j׾Ԭ59lR],1.řWҡ=Q%iCM;N>ۓm]>֌ݻ>>{־; b~'0u!* `q3_WC3,-u&+9BOq揻*ފέ(T,fN 2AjϳZm#ug~wKkd;\xn|bEO;m1l xY?澏tI!xE6R2JqεG%l(_pb?G<כjmhh+{|_>GS[ШKϧze__?io4O:ϋ~նiv6nabz~?r=^ǍS6O/_ޗOɮ_cύ :Bu"?U5 jUF Z+СF=+Χӯ,X8FTg |5 [6*}QUVUhUjuZѳӜVUQSԶrb*z=iʵ25 е2=V*~u FTJꞵ-@fk]j;+#*Nz0J˼Э<#z4m:US?j~i4sYCz:VW=zgqW,XҴ",'܏6k>i f)R^[NGSKL-EcsOG39%gVM#[Q!'sbxODk&SH{r=Քgԉ[~ٞ1ßlIj^$$Sy12+}>TC]}YFe=/_b4 &[}T=3A':Fc)۬QR(CI_b䂊(@(((()߳uPYi+?j?Ɵ|w/|$W> UO4vi[c(tb'k)jິ{{!9P2 9,^8mKf|5nfrL46BM g8^\^rne+B6|/ﭵvvZ[F0CD^sjz_tρ~,k6?öKq&*J>i$881'탥||4.]Y'ceFhB+ˢ>Ok Coh^ WƉ>{]h#yĒG^ d`zQ@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@ 76rEIbC+*A8 6h.߇uhr ey0BcW ^YÏ_5O<7i? mPY4RN{TSI2*҅H8M]={43)Jx/➃麍퟇|vUcKbInuٝ㑂c\Z3FD ~ox:Ɓ=K ?쳲0"߶L"9kikmwH5K}j <.Gp2Og:c? d /'5|΃{4oMuN][2o '<8?QOj?id"g/o_ҽo}Uܹ ӑj$s[EqhD=tު&ǻm˃gTU<||j+cդsL;XNr`|r1v+Ǎ _>ѼS3UKiѣpT8<*I18j8RZ*PM]4M=?o~.;5ߢa4;1 wsIiN~?uD1)oB\M~x +xRό| &KOxEr{Y Ho٪f3W.tYdžum{_E FU%հcY Ov?!< gV y?)?=γ%ռ4S 0Wʘ qOכM~._A}g[Lc,e>Q+3pHF>$~ < ݵ}+ĺ5y:V3Ǽ*;#KtG4Λ|eS%맖gXHѓ{rrk8?ŭasifǼh~ſ7dWqY\⿉JZߊ\Jdxk\tb>af9cU?<3ODK,w2>ݐGB+~n< K@MJnLԭȎJynAXq_g?]xv:ZLeCY?+Oj/+2X%3|8҄g9{,DQ+ZsGzJv}h-+*x[,ŭiq<ڮ[y&i?7↡m\֞qiӌw7 2녱\rVOŏ,EӑkA~>I˽B9T8Yq qs_*+Zxixct7|s "(pv8#k =m|!|P nAyy89?Ÿ|#|4;@nla8 w'ds,I}>&˞tc9Ԍ]wQm}@?Cn򐡭t{U>Y nkc~ɶ Eyr{%ߊ%B䴌3 vQ@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@<?ZGS[0ki5 LH<'g~pͦi[5Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@[E7ΣuC6ֆhX,īD>/> 5;{ZE XNk r+ Oxk߲ïzuuv5igxړgmG[O_J/]O k>$X6sn,cb/}>1 zg.X0̫r@8ῲ?R$Hв"@o ffM{6:N?c:i-ft+pIUbk)yuK@txS%'?d7cԚ%Qր?{{. YY!'# R8 #yO/fs{OiʴEijC3(yp$ 57>5Iw_j_Y%2BnKwms/ f =ubBDw"hOW?]xYҬfb#f]Xk|2S|ky$mǦ̊p8iVtj(_/>$|Lb8{K*mRdU\g$7MKK~~.η`VJ]DpVMczc;Re?=q{uM'ZO- Ak0v}o_{}K(mr`_V'G⿅ڷ>)kw]KM:%Gpg68RZÄ;DdAwq{u#FGڀ??<{zDž5s-%I ~](~+xc9|IZQ: E k74G,*?_ ٣WixW]T15F:$_/-Ÿ't$:ͯ gKNuBTWP O=ÿiW߲ 3ǶICXhF7GE 7bKvH_ >ح4ua%j?o[ZŜͪjo";`*q/ (g|ih~xg~=🉮&ν]D)@#s!>O_-7ntɯu 'w:Nev5W%Y_N1qxSKt{ #Q/<2 tqXǿ|v_>&|%m=7:#_EdPY_^?|u$ 떇JSqMr~h!o_ o{y_^ܩsI"q;"qՉ?<(7+Ck!I]E9 #1x?,vL{{R3}<16DPȄ ~xg1zMs;\Gt< r9?sH +27d'q''8q]W#?B?~Ӯg7vHpmUn!8h}D7[|Of K_Sy DJl]gN9$'_hoZwxK6 {K-kVd"|tXKjZzMRO$>l9'99/oo*-oHy\tc }|? Ե}V+p'?(C|I_xׅ- {ikmBA* 8P*_/ÿm Gu:Z|dzt'!Ixw'/z ww~5d/ 5)$ Rܩ+oBpHM|NzI%ӞDmai!m{xۑw?W6~ "+WI~E-Z.<'q@]o.6'Py:Ww_>B?? n|Q7|Gzq 0Kg ֿ|xJAEzj֖|?<;A h%X]kvP3X%/Q |O[u?-"-`aFK[łdbqҿF__Q$}x7ChNW?j?Xʐ]ZGG:Wg< E%__ V& /]h۪2ZS7zQ:yduFf'$k?Ϗʿ3 7kυ7>$KKI&?$7_cm)ru5c1|x=;NUMGwK@) 6EgCѼA|0,ﴱ$9nLH+3GP=k^ozu9V6M -=v6 xE/+u-\hG N}ިۂu|t8?#g_oZUŞ0>ȼ1zӵBfA.#Iʯ 6d|/"[jm0o0YaaVrz' ?W¿?hG?-..nnsr< |g89)O?oYx =5yq٢CB3MҾ?>%?|D{&%?'Hmggl NI=X+#X|E? Kf -u}N+eYT\ R&iwdFs*__O?TRt(ƮT݀A)WwWxD/;y[>3mxERv[.{u`1V5\wWPG>/M? x7&#`%+[(Ɉl, Oo^^8#$XfW`:0$Qӭ|q0bK MgKV[hIO3Pc17\C e7&^,Ɖ<^iy xBUCyy6ssΏ!@`F$3ec'ǯo|\5[^uiKpgeS +z~/|E7[xGAx9͕0p1hѿ_έ+JoVK[X]-H+FZ j~ҵfWӵ#,Abl#gqlymZ]y|:d-=8=[rۀmC qiˑFj? HD.nK/ 7p>d4TQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEi׺|Mƻw%1Ԑq5P/~x6_6W< Kr2hztv3#,`*p]Pͷ_y\^\ΟVW>I1Ս+D4?iޏ>"u}>; ڤFD` U>~σ>7A~Lq"(#pfA#{5P|CI֏ix?V40TWl6'i#z iKyᏁm~ݱ{ Ţtdx6fު#9U=z?]Gn?~𯀴{usgiZE4U pv_j>xA˽ӵ+T\pU@<آ<?? :r:XMhݽԕ$2+t x#Kυ]/þmO4Tz$q =(5N>&k>V +K>|⻷6r6.: /_?f#FmY JKz2>y O{iﴝZTүmJr#A (oc/EZem3[Դ=V4r#V'P@5}ϊ|\V:1EYfZoe8/GwtO1] <=vPD'ƕ7~6.&x!R J*uM𗂼=xSz2XZU[[[vv fcbz訠((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/figures/SCH_5007_V00_NUAC-multi_nic_OpenStack.vsd0000664000175000017500000614100000000000000026356 0ustar00zuulzuul00000000000000ࡱ> 23456789:;<=>?@ABCDEFGHIRoot EntryRoot EntryF0Qk@VisioDocumentJ*SummaryInformation( ODocumentSummaryInformation8  !"#$%&'()*+,-./01KLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~VisioInformation" ՜.+,D՜.+,$HP X`hp x  =Page-10VBackground-1ESSwitchoServeroDatabase serverCloudseDynamic connectornsBridge EthernetonnHub Patch panelFiber optic transmitter ATM switch Bridge.14 Dynamic connector.15erModemc Ring networkctoTilesetUrbanet AlphabetorkModuletNone tSolidtRegistrationctoVervera None .26ionCity.26Technic Server.29on MainframeonAustere TranscendonBlocksn FirewallonLaptop computerPCtApplication server5 Web server FTP server Email serverervManagement server5E-Commerce server5Public/private key serverr Box callout Comm-linkt Terminalt File server DocumenterOblique connector sPrint serverctoLegendeDisk storagectoDatastoContent management serverrStreaming media servereDirectory serverervApplication server.57e Real-time communications serverProxy servermunUser seRoutere PagesMasters;0|_PID_LINKBASE_VPID_ALTERNATENAMES _TemplateIDATC010498141033Oh+'0OHPhx Schma RseauRaziqueLC:\Program Files\Microsoft Office\Office14\visio content\1033\DTLNET_M.VSTRaziqueMicrosoft Visio@@,kGN`C9 < EMFNl8}U HVISIODrawingLN`CaD ??d(M(aDM̱ٿ󣣣󿿿ٱ̣̔@@@```棣z¬٣̣000$M@3r+('888f |@l(h󱱱PPPQUY]a^@888w8Xߗl(ڇiiiDDD111xxx2nMTNUGXXw8أx7a #" 4:7y2o7yC(((f ttt]]]$$$)1)Í000燇 RWA1\ f )*rtxx((HH{  Id4$'"ppp P 0888m*c/0/;b ppp@@GBMCKKPHHHP""....!!hhhhq0HHHHhv{q>>LL//0󶶶XXXׂHHpp((U`g 11@88EE`666󿿿 z (xq0鷗((8?DОhsx!!!Р0000H..Hxp00hhJLMȶۯג̿F>9543(((EH?]@@444011ג߇PPVxﱱ󳳳{{..c(p}%&&xl(ڇl([mJppp ܏f ܏QQQmmmGGG "ʾ444ߗl(ڇ۱\^XpTTS p_#   DDD:;<ٻشq0h00333N6).%qqq QQQzzzQQQn L Svwt888KMN=>>>?>d|@00hh@AAAbbbHHHFRZ,02 %cIY<361sl(ɹPP@@CCCWW &/6HHHDHJ000ppp f%/"@@E$&"$&"GU^܏^[yvw((/// hhh!̚|@@@\hq hv{=ET??SSXfffk'[l'rxJ(hp棣$,0YZ[WWW݆@@@ %-36ؒFF @@GG88``܏f ·AAA6:D|Уlvzddd88~~11P[[[amt!&)@@@)/o}įׯי8;<IIxx((www[rI&'%@@@'''000DLQIJJ!!!ۙ8;bbc񅚣@@@IU]17<@@@000=>?000XXX% ppp󿿿̱ٱ??beX9~#(((>= ||JJJ*4< ///&, "銳 ,X!!((II##(;qpX(PPP %479҆@@@S^e HH`))8CCyJ(\XPPPamu &)ݥ )/[mw168Т}444  00̂t:---&+NUY@@@BILѤ_ko<<ssHHHHf ٕ{RRR111 )/ezpuxٳ٧ӊ00hh+eyV'('󞨯jlmpppPPPPY^}sssۭ_ko ` v26: &06iij߿ߧӊ]]A**,%'$aaa`gkA>??ƬGNS**+8=Aߧӊ I!s:XXt///ER[Z_a(((x{~ PPP3M+DQ:888ٔ000PPP!&-48 ##(-9Bٍ?CFtwy9:: /gggMMMBLS .//)/CVcknoqrstttXenoqs122暽XXX''' NQ,,0~~~4;A9<>9;A>??ť000XXX      !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~      !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~      !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~      !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~      !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~      !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~      !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~      !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~                           ! " # $ % & ' ( ) * + , - . / 0 1 2 3 4 5 6 7 8 9 : ; < = > ? @ A B C D E F G H I J K L M N O P Q R S T U V W X Y Z [ \ ] ^ _ ` a b c d e f g h i j k l m n o p q r s t u v w x y z { | } ~                            ! " # $ % & ' ( ) * + , - . / 0 1 2 3 4 5 6 7 8 9 : ; < = > ? @ A B C D E F G H I J K L M N O P Q R S T U V W X Y Z [ \ ] ^ _ ` a b c d e f g h i j k l m n o p q r s t u v w x y z { | } ~                            ! " # $ % & ' ( ) * + , - . / 0 1 2 3 4 5 6 7 8 9 : ; < = > ? @ A B C D E F G H I J K L M N O P Q R S T U V W X Y Z [ \ ] ^ _ ` a b c d e f g h i j k l m n o p q r s t u v w x y z { | } ~                            ! " # $ % & ' ( ) * + , - . / 0 1 2 3 4 5 6 7 8 9 : ; < = > ? @ A B C D E F G H I J K L M N O P Q R S T U V W X Y Z [ \ ] ^ _ Visio (TM) Drawing *B#k)uR`7`{@| ߳!fffMMM333-yN77sPɺm88rcc3f{ڌ626?3ff?G}UUUհc5ڬм>{N9KmfwJpYDRb߷qaXPF91I@dddFFF9{$<<$JJ1tl77A@@@p0Ezٕ㰬VęյTBj~daPc*p PT幵.|AJ Pj( YYY `9tk݀Fʺʹ& LGND/cmd_=nop( LGND/cmd=wnop2h /a/Ra/Ua/Wa/Za/\a/a/a/a/a/Hr)C/xr*C  U)J:DT5I[1hXT@. /UbW##_b0U]?ȾL \&B1&Wb%e !oM $d )P?;$,, & #& , 5&?D#//?M?\.? 7A  T~,,,'q/%P6$Y6 ((}k?l)P?"  U UA3#o >S@#B:aU_ReUeUeUUeUeUeUePONOD`T_ReUeUeUUeUeUeUeUUb RSR;25OM% >Qiq;RRRggR qh>Qj /A^F' %p3|CbFp#| |% | vi?=Os<34*x,,,/HRQAmeY q]U@Q-,_vL__q____OO_OOYJAe@L&d2K?ɠQ3Ҳ s]֦xΟ$H"Ŧ)Լ Dץ4@dUA??ϩJ[D!# f:45ŧ5ŷ5ŧ 0H 0D$#U   [#Wb ;Ch( D?@eDrqr:ePB#<D5,ND0DDA7XDbabX#F:FDF(A:#7 D% #7H(DRQRHBA7D B7DJBA:B78DBAB8BAtstt<tB9DB9D%B9(!(D2&1'2&(!BA9d!Dn&m'n&d!BY0C&'&!<!BȁD&'ȁ<ȅ"A6761<1B#AT6S7T6T1<T1B$91D6761B%91D67%61B'AF GFA<AB!(9DABaicTgdhTsG'0f6U[e0 sFf|tvu dd4φFRd,c  F،3*d4V56Fs 9Faaj9eOwOkŕh8FTo!32pbGFdF毭oʳUųUvoft\W?޳P`ކmW"> gAkA oojx5lR}J<DTqC`` {<eh]-w>yqyu4x.~O6ARH-D@>QWACGcwc]oP{Jk? `Aooookt|/R|a`OX󀠟ҟ=<HQ0BTASFwByѯ+=Vasvψ߿'9Z/oϷ#5ASew߉ߛ߭߿ߐ@C DVhPTCW@CaTHY"$? 4-Qc___o o;Moqo/////%/7/I/o/&/////?@!?3?51qU<e?w??V??????O"MUHh2ET_ BE'_"h%]lO~H~E7O5_G_Y_k_}_fo____o1oCoUoUZoooo4~ /ASeȟ ?M$aǔ=93W+harz3P贁N[ܞԏq a/%?xbpb AD@ xA 柲TfxF#_'2 W2{~"D@妠JJuERB0%B I B0BzvCt*346^C 9C$CI1%7I[mߑߣߵ`rF ﰚEݝ[ #l~FbbOAX:iοF4 9S=*?b/???(:L^p'12O8!M!!,G L!%\.TbJnRtVtQbry !qPmlF/{gYn)1z}nPectWPiehP7__#wSbQ =Ulu!q`UoP9@t `enf9blo~o6o@S^TKoޠ!P`"Rnpu!5zoR*"`u`d0nec;bI[o8e Uu!`{uTa`sp9@Ur`n`yfCb {:o' PanRbTW|5̈W?|\!??+a`B9@ckerv0oP?S/e'BBEYYFYY '1 1" EL;#?L ?OO)N)>F8UOASZOlO~O&I[i O٤E);M_q+kЋSЋSU1%vп?߶U///Ǡ/(%ySA!58%3͟±Ԛ''b<0%@/1d/v#%//(/?!5//ߤ3G5" R` C1pO?% )N@OTbt'ϱpS!jdꕰQX!3<=P4#>k5$y }'9mUТE%O( ٯO-?QcQ\8_H/$ ժϙGB:(Op߂ߔxߩO%C?I Wi{O? i~\6/(O?mJvVŀ#5GY#5GYk}i н 'N(C.%ZFZFH/`%bdRq4q Acrt3rbb㤱7m-c8tZq<|oobobNbb¤osR@Q:4!s/֢ ZF8p%`՞UG?/,TKQeҭy)77s!X7es/??.?R?d?ď????OO{/˥ʦ#xaҏ,>PMR?Ο,*'(7OQTf+=Oas̯ޯ&8J\n𒿤A+PпbbPI ,ɺ /ύoow͏ϡϳτ/|nbb/i~@7I[mߑ+=OasOO'OK*<__D_h_z_GYk} .@Rdvȿf?-m7/I/AS// ?//ϷbbObmҏ%?I?[?m????ef???OO+O=OOO7sOOOOOO__'_9_]_c|C/ßՑa%`&q% A*ewj_f____Oazoo'9K]~ooo/#/5/G/ Ӓf/x////////??,? __0_LUR9/d?v????;6O??OO<'K+bGcM?tOOOOOGqpCQ_:ou__z__v_oo)o_oqoϕoooyF1#5Gccߖ ` 1CUW'uϏ);=?%cȒ ^2ȟڟfffJ@ӦIJVEbwcȾ.LaJυͿ'9DIoρϷ#/5GY[}///S/e/*<N`ri1 2DVM2DVh c n_4BoXj|oo/ //oB/!?3?x///e/#i?E?W?i?{?????c??OO*O;fϧϹϦh`b& 1CUgߝ߯L? -??cu??);M_q"O.T -?QcnF-a & YQgT!-6 FO@l@5GYkPAuV!!` jo2{o7s/ !/3/E/UVt//////7O/?>O(?:?^?p???????:O$OUjQOcOuOQb{GE4f!3 GF}tGN=@OS|OOOO OOOO ]#_/_A_S_e_w________H$b #o5oNtRv|@s!z@@Rzq}U{uwx 0Bufxq߮ҏ,PbewΟ(^ʯ ܯU$ô4DVhƃ˿ݿͽqH2Tv!rAtuacus,oE@ct Uc"VoSe"f//o/3?upgXq:??3oofct* w(hz$rosFo) CHZ*___ ߰?_+"-?UgUK0f//'/9/K/]/o-HNNk?/OPu=A"pcD?V?fb׺Ŝq ? /d߇hf"(VmmmHeXϚO|CUO_DX_YoCQUO_a_:SU__寷roꯖo 1 Wcooo`@-by"q{ ď͆CC jSϑ>qUn'(U)*+,U-./0U1234U5678U9:;?@U4<Y,b'@"H$ @ n 0/[C-H,7 AU'()U+,-.U/012U3456U789:U;<=>U?@CDUEFGIJU4< Y,b'@"H$ @ v/lC-,R0 AU'()U*+,-U./01U2345U6789U:;<=U>?@CUDEFGHLU4()*U+,-.UMNOPUQRSTU4<Y,b'@"H$ @ ~hy1}C-p,7 AUVU4<Y,b'@"H$ @ ,2 A-037Un'()U*+,-U./01U2345U6789U:;<=U>?@WU4<Y,b'@"H$ @ a ^2[C-,7 Aj()+,-./0123456789:;<=>?CNOPQRSTYZ[\]^_`abcdefghRiklUmnopUqrstUuvwxUyz{|U}~UUUUU4?@BU4<Y,b'@"H$ @ [C-Hχ* AUn'()U*+,-U./01U2345U6789U:;<=U>?@U4<Y,b'@"H$ @ M5[C-p7 AU&'()U*+,-.U4< Y,b'@"H$ @ Xl6!C-$7 AUn'()U*+,-U./01U2345U6789U:;<=U>?@U4<Y,b'@"H$ @ 6[C-HM7 AUn'()U*+,-U./01U2345U6789U:;<=U>?@U4<Y,b'@"H$ @ 57[C-0nl7 A-VU4<Y,b'@"H$ @ 87A-:K7 ;UN'()U*+,-U./01U2345U67@U4<Y,b'@"H$ @ HRR8AC-:7 :b()+,-./0123456789:;<=>?CNOPQRST\]^_J`acUdefgUhijkUlmnoUpqrsUtuvwUxyz{U|}~UUUUUU4C-ȶwU'7 AU"'()U+,-.U4<Y,b'@"H$ @ X>C-ol7 AU-U4<Y,b'@"H$K @ c >C-h_ ?AYU'()U+,-.U/012U3456U789:U;<=>U?@CDUEFGJU4< Y,b'@"H$ @ l1d?lC-ol7 AUn'()U*+,-U./01U2345U6789U:;<=U>?@U4<Y,b'@"H$ @ +[zC-ol AU&-.ZU[U4< Y,b'@"H$ @ (l!C-0w* AU&-.ZU[U4< Y,b'@"H$ @ lXA!C-:y7 7U&-.ZU[U4< Y,b'@"H$ @ lA!C-H:7 7UN'()U*+,-U./01U2345U67@U4<Y,b'@"H$ @ 7 RBAC-p:7 :U~'()U*+,-U./01U2345U6789U:;<=U>?@AUDEFGU4<Y,b'@"H$ @ (m1BhC-:WC AU'()U*+,-U./01U2345U6789U:;<=U>?@CUDEFGJKU4?@CUDEFGHU4?@CUDEFGHU4?@CUDEFGHU4<"Y,b'@"H$ @ sFrC-\:7 AU'()U*+,-U./01U2345U6789U:;<=U>?@CUDEFGHU4?@CUDEFGHU4<"Y,b'@"H$ @  HrC-͇|7 AUz'()U*+,-U./01U2345U6789U:;<=U>?@CHU4<Y,b'@"H$ @ HQHeC-,U?@DEFGU4<Y,b'@"H$ @ QJeC-,K AU'()U*+,-U./01U2345U6789U:;<=U>?@CUDEFGHU4?@CUDEFG=HU4?@CUDEFG=HU4?@CUDEFG=HU4?@CUDEFG=JU4?@CUDEFG=HU4?@CUDEFG=HU4?@U4<Y,b'@"H$ @ OT[C-p% 7 AL>@8l/QR@xl\0RR@Hl'1RR@l1MR@l2FR@l2QR@ȈlF4SR@؇l4OR@l5QR@8l.6QR@l6QR@Xl7QR@l8OR@Hl8RR@l:SR@8l:QR@l;RR@xl;QR@Xl<QR@l}<NR@v<IR@z6U=QR@{6=OR@8|61>OR@|6>QR@Kl?QR@~6?QR@~6@NR@X6 AOR@6ARR@6BRR@H6BRR@6aCRR@6,DQR@(6DJR@6VERR@h6!FRR@6FQR@6GQR@H6HQR@6FIRR@6IOR@(6PJRR@6KRR@h6KQR@6=LKR@6LKR@H6KMOR@6MQR@60NQR@(6NQR@6wORR@h6CPRR@6QRR@6QQR@X 7RQR@ 7qSRR@lSQR@TQRL<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L>ElYRElYRE،lYREȍlYREhlYRExlYRElYRE(lYRElYRElYREl ZREhlZREȃl)ZRE؂l7ZRElEZRElSZRElaZRE(loZREl}ZRElZREXvZREH{6ZRE{6ZRE|6ZRE(}6ZRE}6ZREh~6ZRE6ZRE6 [REX6[RE6%[RE63[RE86A[RE6O[REx6][RE6k[RE6y[REX6[RE6[RE6[RE86[RE6[REx6[RE6[RE6[REX6[RE6\RE6\RE86!\RE6/\REx6=\RE6K\RE6Y\REX6g\RE 7u\RE 7\REH!7\REl\RE.\RUFD  h(^TYYBBUF\.ߗ?x<F BP(?hP?X$ ]B66ȅHq?j|?G IB@L&ɯd2?sU!&;/xt  0RKB` SwitcP,ne work pʴ r pP!aUl d v c z !^| R( G@&L >V&7?J7 ?ppwpwpwnwwyZwpw wwwpwwpwyplRp)~pw߇p}~zZDrag onut hepe.Rih-c]lckaUdcos UPo etQeAvUwreP 5aabL&d2ٿ\.??{ڿ ;?k?r3r CUG DF P# h @T(PYY#EψUF\.?@L&_d2?@ w?P} Y.B! ":J :f^7r7ֆAu` ?U"U66JJU^^rrUSu#U ^b"h,h'RqU f??z4b u{[`:#"&a#'QT)b" ^l!L1$l" l"7'>>ÿ@qZ05?46 r23ZS2uY0`-1ZBu@ l"r3A'Bl" hL$9u`u `"[ bzb!u`R$"% أ#Q!'1`Vis_PRXYcPm!#5HP4{6NP2`?CopWyr.PgPt u(>P) 20{PU9 M.PcePo0P'ofmR^QraPQUamPi_Pn WAlP eXsRUe0PeePvPdB!#d$%b!*(=#&#@Bd&& "$B##0Ub#]g,7{TrN @ cTg d(a1+_c@11"hh"2)t3*tB+tBa a"aab(!BpaSb"+QQou\%G!!["@@Bq"53T@R"q3<1 KEq`RPQs.PU TPxmPSP^2l"+ 0^<@5b 䱠"5R"br3&BrM_q(pffs_<៵R-m11'rvu"u"2$u3/8B0EBH%Q%Q"2_b3l !!E|QQQQ67l!8ĢAP!!1;(bL1D:b;aqq^eD!b(pQu [ ]@U0` SPa*aPePl rsRQV)P E u.PpBP!ePQ"5X%7\In` WTcPx]SP`DP!vR3^߮߼Iӏb` Syb9Sw>?QEd-MPnyfPcmPuRmt߆EQ`]1SPRd Nm E ZK!P PPr8J\h}]%R'`-sRU3e_ 5Afe3`؇e ,S SRiPQ ՑրTZIPL_PcX@;۞`rBYlPiPq٫€րY0KSR_PoŲ/ٸ!ר@/R/5NwRk,a_B`+x/OI4PQdPSi/, "?=ns#]?,(dN??(JSL1m "/Q9tVQ IQ /?րf?O\I-` R 4PQQ^OpOƒ?O?MPCI?O._qSCRo"mY6AtN BT]]9 M JU@L&d2?@|>??@Fh4?P6 AJuM` W?SuJt At=WG_z#J]RbJlp= ף#R>2yw?@MJA@eb&#z(b),+,&J[(.;?&L ?A$./O%B!!5 g`?Copy_rigTtZ(c)Z2U009ZM 0c 0os0f21r0>1a0i0n}.Z AlY0U 8s]2e10e 05vo0dQ0! $-# ',&?6iie59 6uX7_'0UpBŁ&Z!$0 'F";JlJAh h7!1!Z@x<3ף , <~y#  IRvVJ:?OS QlU^TURUEU{bX,G\ZYaaȫQ5UL&[RQQ .aYU520_BUv"rA # cHD H# h4>T]]9 MT JU@Fh?@+@ ??@?]P6 ]nAJuM{` ?5uJt  Q߸It=W贁NUIRIl%0w#>2zG7z?3@MJA@eb #zbR( (2+2&J[ (wp"?& ?A$ 4/U%B!!5 g`?CopyrigTt (c])020%090uM0c0os 0If21r 0D1a0i 0n.0 WAl_0 8sc2Ue70e0vu0dW0!E$5 򤲝-?# '&6iieE9 6X73'0UvBŴ&!$0 T-FAJlJ8>UhAA 11!`m`0?@rO\.ש ,#<#{ RVJ:9XYUي#rT Q%UE?,b R8]S# l_@ѱQEakQY,aU5U^fGO+|no/j3asChAZGQaX_j_i2>_PU"rA #js( bUGD  3 h0TdYYB 1 U@ BP(?@'J?@ V?,?@f_(@yw?P} ,w 5 &M& DD bb 4  M  4  &M& 4&4& R&R& p&p&d &M& && &&4 && 66o"u` ?J$0BN`l~$I/( /2(>/P(\/n($z/(/(/(I/(/8?"4u.9!t+1  QyUtmQRkyUWq#6&yU~W6͔QUQQR`?CopyrigPt (c)2`2\092`M*`c(`o%s"`f0b!ar$`]aua0`i"`n.2`_ Alx` (hUs|beP`e(`v` dp`RSST SB-S`6b g?/4)=SfS@tff bT#S0Uorc@V*` @4c u vXgw'ntŔf!d] vze!`^H@!!Yd&Db&VUʄ&ՋeʄDՋ3ʄbՋ uʄՋʄՋ|ʄՋƈՋcȆՋƈ!Ջ&ƈ4!ՋDƈR!Ջbƈp!ՋƈRƈ!Ջƈ!Ջƈ!Ջʄ1VHD: # h0>Th]]9 MTIAU@?@kE8?@ ͂?@SSfhr~?Pps-8R?>$ > n JuM{` ?e ;  Su*Jt'  $mCLmta{\_I 3.Jv  q?#cwn?v(A[}Kѭ>JU2N贁N[?@M#J&A@bI(bUW)U+U+U&*Jy#?!lJ ?%P?AO"d+bW+$u/%B #A1A15 `?Co}p rigTt (cu)x02`09x0uMp0cn0osh0Ifv2g1rj01av0]ih0n.x0 Ul0 n8s2e0en0v0d0@:1Y>4#-#M3 M7 #(p0b5(9 DFXGdG'2WUAŔ;F!$a dF$&#l,&}>Uh ETh]]9 MTIAU@?@-Ы?@ ͂?@SSfhr~?Pps-8R?>$ > n JuM{` ?e ;  Su*Jt'  H-mta{n_s9/3.Jv  q?#cwn?v(A[}Kѭ>JU2N贁N[?@M#J&A@bI(bUW)U+U+U&Jy#?!lJ ?%P?AO"d+bW+$u/%B #A1A15 `?Co}p rigTt (cu)x02`09x0uMp0cn0osh0Ifv2g1rj01av0]ih0n.x0 Ul0 n8s2e0en0v0d0@:1Y>4#-#M3 M7 #(p0b5(9 DFXGdG'2WUAŔ;F!$a dF$&#l,&}>Uh ETh]]9 MTIAU@B?@c?@ ͂?@SSfhr~?Pps-8R?>$#A n JuM{` ?e ;  Su*Jt'  m@mta{7._ xX3.Jv  q?#cwn?v(A[}Kѭ>JU2N贁N[?@M#J&A@bI(bUW)U+U+U&*Jy#?!lJ ?%P?AO"d+bW+$u/%B #A1A15 `?Co}p rigTt (cu)x02`09x0uMp0cn0osh0Ifv2g1rj01av0]ih0n.x0 Ul0 n8s2e0en0v0d0@:1Y>4#-#M3 M7 #(p0b5(9 DFXGdG'2WUAŔ;F!$a dF$&#l,&}>Uh ETh]]9 MTIAU@eZԩ?@z?@ ͂?@SSfhr~?Pps-8R?>$ > n JuM{` ?e ;  Su*Jt'  }$~mta{_P@3.Jv  q?#cwn?v(A[}Kѭ>JU2N贁N[?@M#J&A@bI(bUW)U+U+U&*Jy#?!lJ ?%P?AO"d+bW+$u/%B #A1A15 `?Co}p rigTt (cu)x02`09x0uMp0cn0osh0Ifv2g1rj01av0]ih0n.x0 Ul0 n8s2e0en0v0d0@:1Y>4#-#M3 M7 #(p0b5(9 DFXGdG'2WUAŔ;F!$a dF$&#l,&}>Uh ETh]]9 MTIAU@h!I?@Zzv?@ ͂?@SSfhr~?Pps-8R?>$ > n JuM{` ?e ;  Su*Jt'  XƿHmta{_z3.Jv  q?#cwn?v(A[}Kѭ>JU2N贁N[?@M#J&A@bI(bUW)U+U+U&Jy#?!lJ ?%P?AO"d+bW+$u/%B #A1A15 `?Co}p rigTt (cu)x02`09x0uMp0cn0osh0Ifv2g1rj01av0]ih0n.x0 Ul0 n8s2e0en0v0d0@:1Y>4#-#M3 M7 #(p0b5(9 DFXGdG'2WUAŔ;F!$a dF$&#l,&}>Uh ETh]]9 MTIAU@sLGv?@Bw6t?@ ͂?@SSfhr~?Pps-8R?>$> n JuM{` ?e ;  Su*Jt'  [Rw͖mta{__j3.Jv  q?#cwn?~v(A[}Kѭ>JU2N贁N[R?@M#J&A@bI(b*W)U+U+U&ՆJy#a?!J ?%P?AO"d+bRW+$u/%B#A1A15 `?Cop rigTt (c)x02`09x0Mp0cn0osh0fv2g1rj01av0ih0n.x0 l0 n8s2e0ejn0v0d0:1Y>4#-X#M3 M7#(p0b59 DFXGdG_'2WUA;FJ!$a dFD$&#l,&}>Uh ETh]]9 MTIAU@?@9 EU?@ ͂?@SSfhr~?Pps-8R?>$> n JuM{` ?e ;  Su*Jt'  $mCLmta{_3.Jv  q?#cwn?v(A[}Kѭ>JU2N贁N[?@M#J&A@bI(bW)U+U+TU&Jy#?!J ?%P?AO"d+KbW+$u/%B#A1A15 `?Cop rigTt (c)x02`09x0Mp0cn0osh0fv2g1rj01av0ih0n.x0 l0 n8s2e0en0v0d0:1Y>4#b-#M3 M7A#(p0bP59 DFXG~dG'2WUA);F!$a dF$&#l,&}>Uh ETh]]9 MTIAU@?@,?@ ͂?@SSfhr~?Pps-8R?>$> n JuM{` ?e ;  Su*Jt'  H-mta{X_ 3.Jv  q?#cwn?v(A[}Kѭ>JU2N贁N[?@M#J&A@bI(bUW)U+U+U&Jy#?!lJ ?%P?AO"d+bW+$u/%B #A1A15 `?Co}p rigTt (cu)x02`09x0uMp0cn0osh0Ifv2g1rj01av0]ih0n.x0 Ul0 n8s2e0en0v0d0@:1Y>4#-#M3 M7 #(p0b5(9 DFXGdG'2WUAŔ;F!$a dF$&#l,&}>Uh ETh]]9 MTIAU@B?@-r-.?@ ͂?@SSfhr~?Pps-8R?>$> n JuM{` ?e ;  Su*Jt'  m@mta{_ :3.Jv  q?#cwn?v(A[}Kѭ>JU2N贁N[?@M#J&A@bI(bUW)U+U+U&*Jy#?!lJ ?%P?AO"d+bW+$u/%B #A1A15 `?Co}p rigTt (cu)x02`09x0uMp0cn0osh0Ifv2g1rj01av0]ih0n.x0 Ul0 n8s2e0en0v0d0@:1Y>4#-#M3 M7 #(p0b5(9 DFXGdG'2WUAŔ;F!$a dF$&#l,&}>Uh ETh]]9 MTIAU@eZԩ?@ƍu?@ ͂?@SSfhr~?Pps-8R?>$> n JuM{` ?e ;  Su*Jt'  }$~mta{_D3.Jv  q?#cwn?v(A[}Kѭ>JU2N贁N[?@M#J&A@bI(bUW)U+U+U&*Jy#?!lJ ?%P?AO"d+bW+$u/%B #A1A15 `?Co}p rigTt (cu)x02`09x0uMp0cn0osh0Ifv2g1rj01av0]ih0n.x0 Ul0 n8s2e0en0v0d0@:1Y>4#-#M3 M7 #(p0b5(9 DFXGdG'2WUAŔ;F!$a dF$&#l,&}>Uh ETh]]9 MTIAU@h!I?@O2?@ ͂?@SSfhr~?Pps-8R?>$> n JuM{` ?e ;  Su*Jt'  XƿHmta{p_'F3.Jv  q?#cwn?v(A[}Kѭ>JU2N贁N[?@M#J&A@bI(bUW)U+U+U&Jy#?!lJ ?%P?AO"d+bW+$u/%B #A1A15 `?Co}p rigTt (cu)x02`09x0uMp0cn0osh0Ifv2g1rj01av0]ih0n.x0 Ul0 n8s2e0en0v0d0@:1Y>4#-#M3 M7 #(p0b5(9 DFXGdG'2WUAŔ;F!$a dF$&#l,&}>Uh ET]]9 M JU@]u?@x?@,ww'?@d?Pn6 >JuM` ?u#t  fIt=W紁NIRkgS|lWxI#>2zGz;?@MJA@eb #zb(b%)2+2&J[/.;?&L ?A$4/U%B!!5 g`?CopyrigTt (c)0W20%090M0]c0os 0f2R1r 0D1a0i 0n.0 AUl_0 8sc2e70e0vu0dW0!$ę-# '&?6iie59 6X7}'0UvBiŇ&!$0 -FAJlJ8>UhAA@ 11!T2 $<cV] &WR~VJ3kq?@BP( ]S m၏jy#mS zjaֱQE^y`0__ lOA/X5:Fh4 XYePYv81sX5aZjoohY}aUU߭?@,bl'4/\K)aX2>_;"rA #jsHD: # h0>Th]]9 MTIAU@sLGv?@_?@ ͂?@SSfhr~?Pps-8R?>$> n JuM{` ?e ;  Su*Jt'  [Rw͖mta{_3.Jv  q?#cwn?~v(A[}Kѭ>JU2N贁N[?@M#&A@bI(bUW)U+U+U&Jy#?!lJ ?%P?AO"d+bW+$u/%B #A1A15 `?Co}p rigTt (cu)x02`09x0uMp0cn0osh0Ifv2g1rj01av0]ih0n.x0 Ul0 n8s2e0en0v0d0@:1Y>4#-#M3 M7 #(p0b5(9 DFXGdG'2WUAŔ;F!$a dF$&#l,&}>Uh ETh]]9 MTIAU@?@*y>{?@ ͂?@SSfhr~?Pps-8R?>$>  uM` ? ; E)u*Jt'  $mCLmta{)4G3.Jv  q?#cwn_?v(A[}Kѭ>JU2N贁N[R?@M#J&A@bI(b*W)U+U+U&Jy#c?!J ?%P?AO"d+bRW+$u/%B#A1A15 `?Cop rigTt (c)x02`09x0Mp0cn0osh0fv2g1rj01av0ih0n.x0 l0 n8s2e0ejn0v0d0:1Y>4#-X#M3 M7#(p0b59 DFXGdG_'2WUA;FJ!$a dFD$&#l,&}>Uh ET]]9 M JU@L&d2?@\.?tP6 >JuM` ?uJt ASt=WJRb~li>t2U?=@MJA@b + &J[N#?0a& ?AT)B!!5 g`?CopyrigTtZ(c)Z20 9ZM c. os f"!r 1ai n.Z Al90 (s=2e0ej vO0d10@!$b-# 'Ya&?6iiek59 6jX7^'0a&Z!|$0 FJlJ UhqMAA ]A !4Ja-'Z@?@  <0贁N#A OR|VJ:6]G+|M__Zw?/#UEF`?0?@ aQS xQ#`Y{GzYk5^ro!o :mX5U'h?gkT:&#dT[RUU4F?@|?BP(flwaw%l5p!Xnc 3dXAYEh4\`Yb/XAdx̼_ZWnQXAYL&tgkRwpQlQE26_HU"]rA HD: # h0>Th]]9 MTIAU@?@?]0?@ ͂?@SSfhr~?Pps-8R?>$hA n JuM{` ?e ;  Su*Jt'  H-mta{AJ_ap3.Jv  q?#cwn?v(A[}Kѭ>JU2N贁N[?@M#J&A@bI(bUW)U+U+U&Jy#?!lJ ?%P?AO"d+bW+$u/%B #A1A15 `?Co}p rigTt (cu)x02`09x0uMp0cn0osh0Ifv2g1rj01av0]ih0n.x0 Ul0 n8s2e0en0v0d0@:1Y>4#-#M3 M7 #(p0b5(9 DFXGdG'2WUAŔ;F!$a dF$&#l,&}>Uh ETh]]9 MTIAU@B?@d;]?@ ͂?@SSfhr~?Pps-8R?>$> n JuM{` ?e ;  Su*Jt'  m@mta{d_v3.Jv  q?#cwn?v(A[}Kѭ>JU2N贁N[?@M#J&A@bI(bUW)U+U+U&*Jy#?!lJ ?%P?AO"d+bW+$u/%B #A1A15 `?Co}p rigTt (cu)x02`09x0uMp0cn0osh0Ifv2g1rj01av0]ih0n.x0 Ul0 n8s2e0en0v0d0@:1Y>4#-#M3 M7 #(p0b5(9 DFXGdG'2WUAŔ;F!$a dF$&#l,&}>Uh ETh]]9 MTIAU@eZԩ?@>6#TTĶ?@ ͂?@SSfhr~?Pps-8R?>$> n JuM{` ?e ;  u*t'  }$~mta{%$苵3.Jv  q?#cwn?v(A[}KI>JU2N贁NI[?@M#J&A@bI(bW)U+U+TU&Jy#?!J ?%P?AO"d+KbW+$u/%B#A1A15 `?Cop rigTt (c)x02`09x0Mp0cn0osh0fv2g1rj01av0ih0n.x0 l0 n8s2e0en0v0d0:1Y>4#b-#M3 M7A#(p0bP59 DFXG~dG'2WUA);F!$a dF$&#l,&}>Uh ETh]]9 MTIAU@h!I?@Ž?@ ͂?@SSfhr~?Pps-8R?>$> n JuM{` ?e ;  Su*Jt'  XƿHmta{_v3.Jv  q?#cwn?v(A[}Kѭ>JU2N贁N[?@M#J&A@bI(bUW)U+U+U&Jy#?!lJ ?%P?AO"d+bW+$u/%B #A1A15 `?Co}p rigTt (cu)x02`09x0uMp0cn0osh0Ifv2g1rj01av0]ih0n.x0 Ul0 n8s2e0en0v0d0@:1Y>4#-#M3 M7 #(p0b5(9 DFXGdG'2WUAŔ;F!$a dF$&#l,&}>Uh ETh]]9 MTIAU@sLGv?@c?@ ͂?@SSfhr~?Pps-8R?>$> n JuM{` ?e ;  Su*Jt'  [Rw͖mta{_] 3.Jv  q?#cwn?~v(A[}Kѭ>JU2N贁N[R?@M#J&A@bI(b*W)U+U+U&ՆJy#a?!J ?%P?AO"d+bRW+$u/%B#A1A15 `?Cop rigTt (c)x02`09x0Mp0cn0osh0fv2g1rj01av0ih0n.x0 l0 n8s2e0ejn0v0d0:1Y>4#-X#M3 M7#(p0g59 DFXGdG_'2WUA;FJ!$a dFD$&#l,&}>Uh ET9 M3JU@L&d2?@\.?ҷP6 m>JuM` ^?5ulbSD_Jt bstE>2zGz;?@MJA@u+b+&K !JS"j& 1"A.b+-,-/*B!!5 `?CopyrwigTt `wc)5020A0950M-0c+0o%s%0f32$1r'0`1ua30i%0n.50_ Al{0 +8Us2eS0e+0v0ds0ib =L!XY'0U Bj&%0 :lJ Uh*M[A[A 9A Z1:'h? Y T:&Qi@J4F?@|BP( IQ!i pP!!U31^ `0,I_[_ {uXEEh4Ẍ TbYb/uX _dx_aZW贁NuXAIL&tKRPQ\%QAI@@ HlQbY 0hauXAo G+|oaZw/(uXAY RU:q Ib_`yA\ppr׌ :muXEOO_\_dH >Z(+s .ṿrAA'Fhu7aa#b!OB 9 c~dڮ w7d@+ 9BBGe P?m&P qYXct߻( Bxzج{ Xҿ} g:P~ZU (X Zb|]ޚ_HRo8 ~ 8 Z ߮ iV]!_;eIL UMU'UW4YAU BN [U h u R.8UEU_l y!񷪆"A#K$X%&'ª()Ū*+$,$-"$./$/Ъ<$0I$1V$2c$3p$4}$5#6 !7( !8 !9 !:X !$<$=UFD  h(^TYYBBUFjZ?F~??x<F BP(?P } kX` BW66 W TTT%TTȅ5H?? (?B@L&d2?-(\.g1(.sUW!AU&w/~  0QB`#serv",comput$dWis r0b$ud n tw 'rk!eA^| (SG|& b?,>?TTwpp pqqppqwqpqwqwq+p pppwwwznw6;5wqpwwwhghw>Drag onut hepe.Rih-c]lckaUdcos UPo etQeAvUwreP 5aabjZֿ~??cN׿ mF1??SbM1{V↑T*J?@ ?UG DF P# h @T(PYY# 3U@jZ?@~??F?Pn} u{` ?Su#΍>>HX HlHYHH E.Oč>UDDXXUllBa'9-",'tU"J%q4 Q?"'4b0{[`:#26Q79$"t3&9"OL1" ^!'܈ >7@ÿ@q@@?@8B?@!ABP5F02u@K`-1xBu@rCC53A 3Bo2гC9uv`u `"[ۢ b!-u`23kQQ1Q7B`?CopyrigPtf0(c])f020P9f0uMPcPosPIfRQrPQaPiPn f0AUlP XsRePePvPd/`VPs_SBNcPm!#5JP83`5a Q1#d:4%B @8=3R6#d\6P\6 X2$#3+0Ub\3]Jxge8^ E U#".5m Hvq%t{.5tpw3t{Et{t{^y {|tYwmkdaIB'BT1aa"0ք.2132B3z1z1"|+q+qEQQX116Q;r<A>Ab\1EF`rAJ͔!hh)1baA (dT6vQ1oUkQ]b` MPnuPaPtUQra1Q` E50uPp -`ePt=e.5Q`!3]t1LPRdU\ %`u-`bcu30D-!I PPr֯H^-]L˭DRQqU`r\BAPaPrB`LDž5^,B%I SbiPQXj feTϟB LLPcXςPC5%Bl `iPgCų@o߰DLRPo}߄|AqZ@@K ӱpKDQ`;BNQwRkax`2`녨cϣ[I$`Qd `SsPo8SUbPRPsU}AxBuLDmatVa IaXAfBLT R `QQVhP+IQUm-`u-tP !`4줒ӿU챒VBALC`<1B` W2: /XؑLMPmRAy;/M/˒pq//$OPaSDy1!//%ؒ7VI]BPlRgRTP訰y??SPa2CP򿃪X??B` 7T+01I@"`a^%{/MPCAD vOOHKdQ>C2Z>6?H?qgtԁ/tufvop#U@jZ?F~?vvIC-pˑup` s?bujp`&T0a,Stԁ/p]e-2q?@< o5pmH!q."Tԁ.dԂxAƒ@T@>$pÿ^$gqa OvcrSr`Rno>㳫1e&BIrSrOvcv=ss"u~$bb pmp sty:b{bráe@6Cu&P`?hi@k""oűRT h]]9  5AU@jZ?F͂T??Fx?SF?P6 jt`  o?tk[>(A Vu aSuo.]&A]  *ԮJU2zGzt?@L#&A@bA#z7 5#1bO(b])j+$j&[ "*(w "?& ?AU$l/%B115 `?CopyrigTt (cu)Q02`09Q0uMI0cG0osA0IfO2@1rC0|1aO0iA0n.Q0 WAl0 G8s2Ueo0eG0v0d0"1E]4MŤ-&3 &7&l>]Uhz811U!J!u6?F+\.>djIIa? >"(EǑ I%K9E 5EU . K ;;5QIWEEEF _KnSA?8@@Bu?FDR> ?Nk"\6`bmMM\j=am {:e P> #? D"? 5-g?j4B7!B{oogr57!ibDAbE9 MuX7_'0U~rN!$a)5 5vIzHD # =hj0>T h]]9  qAU@;?FI5K?@xEL?6ѿP6 t`  bm&Qo?t@@L]WI,#u aiuoL> JU2zGzt;?@L?&A@b}#zs q#1b(b)+$&[9#H",ɺ (wH"?& ?A$ /%9#BV1V15 `?CopyrigTt (cu)02`090uM0c0os}0If2|1r01a0i}0n.0 WAl0 8s2Ue0e0v0d0"O1E]S4MŤ-9#b3 b7&l>]Uhzt1.A!JN!m.%-+0djV1ax߃S(FJ-&?F mO"> ?Nk"L[Majnha_:e P> #? BK=o? n-?j4Bs!B{__Wr5s!D8E6V 6?wj_|_ ^XE.U@z T6?F @' d_mOoT \VyWxRkrVo_bVh*g:n"'UaEjz36>CN>?Bn}]zGvN7V iDAbE9 MX+G'0UJPN!4ae5 MaHD # =hj0>T h]]9  U@jpiH?F^?@$?F]F?P6 t`  !a&ao?t,F=۳=u;u auo>J2zGzwt?@ʐLAw@b#z1b](b!).+I.&[-m(?0& ?A$I0/Q%!!5 `?CopyrigTt (c)02`090M 0c 0os0f21r0@1a0i0n.0 Al[0 8s_2e30e 0vq0dS0T"!E]$M-# %'&l>]Uhz!1!JF% djc~YZ(_N9zOMGE%_EH$SwO"K ңHE ^&_8_WE_OqOHOOi&DAbPE9 MX7'0UUfb>!$a % f1jHD # Uh4>T#]]9 M5AU@jZ?FL|+?@?F({~?P6 JuM` ?u JYt )t7zEA2&L_D?. > >tJU2q K?=@MJ&A@-b9(+bG)T+T&J[#"#a?& ?AH?/`)#115 {`?CopyrigTt (c);020G09;0M30c10os+0f92*1r-0f1ai+0n.;0 Al0 18s2eY0e10v0dy0z"!4B-#3K 7&)? b 0K8>U# ]A]A "1!ъ!PzQ <%OYAJEU . K;;\%W%EF($L[.FX5Eai%?F 6?wL;] W-\TB⤲8?F@&SA?F @ ?Nk"L'`bmMW_Ye[ٯ7 VK=F: [jr ? D"? XZvD?ta@Boogr5;!QEx]SiLO MLGXa~WOTZiAle59 XG"'0Uc>!$05 .HD # =hj0>T h]]9  IAU@K?FOgVb?FY%?Fwᳬ?PJW?>t`  ^Z?t#ə.E @x?r?/{4܂ G _Z+u u$ >  J JpU?43f<!.&P?b3bbfz@1bAe&bq$d 2zGzt.#@LT"&r(~&i$q)(+}'i"~*#81815 `?Co yrigTt (c)o02`09o0Mg0ce0os_0fm2^1r 1am0i_0n.o0 Al0 e8s2e0ee0v0d0"11E]54M-#D3 %D7.&lUh#7 V1#Ai!J# F/?B1B  VRiAAbE9 MX G{W'0U*R2N!I$a {VZUHPD  # #>h0JTpERI MU@u6?Fjc{g&?@%r`5B?Fm_F:Is?HNtQ`  II?t#]  _ii$!`kNu  iu%wj>M JQ T       "@&&JNU2"?H@Qs#N&A@b(bU)++&RNp}#>H1#19 ?#bbbz| R&/5R}#11`?Co0yrig\t (c)02h090M0c0os0f21r0Aa0i0n.0 Al!@ 8s%Be0e0v7@d@21a4t#-}#3 %746lJ#$JUp5 3 M19 1!q(!FnElrkRt# 8$kcD?@숒@)?FP?P"EkU |1\iOT/irvs}i>`_,of :m ,M ?? S&d2? *b?r4B!B oo1gr #5!>%(EQOOOO__FAc1_C_ D4ևS]_o_yRX+{_,__ #R__ (YQoSo+o=osd {oEeooooooFyUy{* hDVW(I X"c7I!pg bf'iMjSE t%XyG|'0UŢŞN!O4i |UGD # hz0TdYYBU@(%)?Fxg^?@0߯H?Fs7Uݹ?P} t` 87?t#_)TeΦu ]ukv  ̝L&UUU&I&3 U!!`?CopyrigPt _(c) 2\W09 M c os f"!r 1a i n}. Al00U (s42e0e uvF0d(0'HS$#-## 'I?k=#ǒ 2j$B##0U'BM3@&@F0Yk} #HA%DKb5DK3DK5DKED$!GiAZG5(|IDFX7yW'&Dŭ6!4YA yVZHD  # =hj4>T!]]>#AU@0߯H?Fz)e??F˞9q?P6 t`z  ?5t# rXѮu)Nu;):O]ע0 &> # " "+"+"+"+" $+"T-J[U7 ??& bA@ 2b4-#2zGzt#+@ Li68!6 ?}; 72U!:B#AA5 5`?CopyrigTt(c)20F@9M2@c.0@os*@f8B)Ar,@eAa8@i*@n. Al@ 0HsBeX@ej0@v@dx@)21"DM-,;?C G& l>]UhzAE!A#!ְ1J`#@{&pz?FUy{?NT,dKj&b&R&M/8T9K-q?F@F?FdG0 >Nk"?YE*]\LThKjfGD&Qhj ?iU3UGe&Y`?@/Jb)h5oWKoWo;èKod}l$ro`nE2K?Fˌ%4G{?F -q? eZR^{|\?6?vodmP}l>MBe 9 N 1iS? 1?grl#51:PO"4ga@Sq(UE^@O|ǐ[`ߡa v2rbA @3ijD&}laqOz o?#WHZ'Qu=*[^N?F]%P?@O&@ c?FаM<n ?` eb0)\(Pfodm}nv}l?V5(?q$Ix[cN#W{ #iA@&QeE9 MX''0UjdN!0 /HD  # =hj0T h]]9 #]AU@}%?FR$,?@6#V?Fc_3֡w?P6 u` ?u#t  3zM%t3ws%.<%H._'B> #PJU2N贁Nw[?@ʐL+&A@wbu](i+bk)%+&p%#4">9"<5!?& ?M#bbb͓z$ p&"/%T%#L1L15 w`?Co yrigTt (c)02`090M{0cy0oKss0f2r1r 1a0is0n.0 Al0 y8s2e0ey0v0d0"E1E(]I4M-%#X3 X7&l*>(UhE j1#$Ac!J`%#@5e$_F??!foA3nBM( V?FJ-R?@HQ??FS!R?P| llHL6VLD5E+v;*AůʙH: 8snP%$? k?Q? \?4_!__WrA5_! R,U3OC@IL?F4~Kz?P)Oy޾ T!]]>#IAU@܋?F2Oa>?@#r`5B?F*ݷ?P6 t`  >B?\oEt# ŖoZu 0Nu;Y:|8>$# JpU>##<?2& ?5bbbz@b#ANi&bu$'h 2N贁N[2$@ LX"D&v(&m$ :;T'm"*B#d1d15 5`?Co yrigTt (c)020090M0c0os0f21r 1a0i0n.0 Al0 8s2e0e0v0d0"]1"a4M-/p3 p72&l>0>Uhh= 1#!eJ`#&FNTM\jh1B,RBM(!JoUy{O_ e@ So4(D@^Tw?F]֝ä ?Nk"?w_KT\T+\_(Kjӷjmplhj -`\TPQE\Uui,St_o u~!ՠXEO9= T?FL?;uQ_Pm"rA @3sjQ1lğ=GoYniAleE9 MX|'w'0Ur^N!M$|A vzHD  # =hj4>T!]]>#IAU@܋?Fj?@2r`5B?F^ݷ?P6 t`  >B?\oEt# 6ΠZu 0Nu;Y~:#&}>0m8>  PJU2N贁Nwk?@ eL&A( 5bM(b[)h+h+$h&#[-M$#?& ?b$Z*'*B#O1O15 5`?CopyrigTt (c)02U0090M~0c|0osv0f2u1rx01a0iv0n}.0 Al0U |8s2e0e|05v0d0"H1P"L4M-/[3 [7&l>0>Uhh= m1#!S!J`#*&FPUy{_?NTMjC1gc RVMʣ(D#@ ![?F"$. ?Nk"W?IKTLTcKjRlhj p=' (*!)i,SO2Kes9T;QEGUFw6qX}__TM7fTZT\Dž}_P^52XOES"rA 0S#Es3iAleE9 MX|$Gxw'0UrIN!4gA xvzHD  # =hj4>T!]]>#!AU@܋?Fc_v?@#r`5B?F*ݷ?P6 t`  >B?\oEt# ap'EZu 0Nu;Y:|$># BJpU>#͙;? & ?5bbbz@}bAA& K&@ 2zGzt?"K@ L0"&N(bM)+TK/8Z*B<1<15 5`?CoyrigTt (c)s02009s0Mk0ci0osc0fq2b1r1aq0ic0n.s0 Al0 i8s2e0ei0v0d0b"5194M-t/H3 H7T &l>(>UhE=3Z1#E!J!FENTMjN@1h(NoUy{OO e@ o)4(D@^Tw?F]֝ä ?Nk"?'_KD3\T+\(Kj?ӷjmlhj P\ DiAlePsI=MXsG'0UzbZ6N!%$0 sFEjHD  # =hj8>T! ]]>#!AU@܋?F*?@#r`5BW?Fw?P6 t`  >B? \Et#տ|u l4RQu?]>I$>I# JpU>͙#<?& ?9bbbz@b#AE&bQ$$D 2zG[zt#@ L4"&R(^&I$Q)+]'I"^*Bh{.M#To1o15 9`?CoyrigTtk(c)k2009kM0c0os0f21r1a0i0n.k Al0 8s2e0e0v@d0f"h1l4M-:?{3 %{7&l>(>UhE= `51#I!J49fAn^ R)X f"B!Jui,SG?RSlOS u~!N]8-19= T?FL;uQ ?Nk"?YVTQRXQOnğ/=i n (\ wTiAaePE9 MXLg'0UbZiN!)$0 Lf`jUHLuD" # I>h(J TEI 3qMU@jZ?F({~+?P >uA` ?u#VMJ*  )8*A )LG#P`G.$*G8BV6j>t   t|zpb"#"J>U2zGz?@9 +#>;&A@b]m(b{)+Y&^" !l$jN5#",#C"&p "Ms"b{);x'^",^"V/5#113#`?CopyrigXwt dc)0W20090M0]c0os0f2R1r01a0i0n.0 AUl@ 8sBe0e0v@d0R#"lJ8JUlC -(#s!)(J!O?zQ0 # # %YA NEU . K ;;LGE3EF)$OK.FHE\Vai%?F 6?wt\[ W-L/D5Dꤲ8?F@@SA?F @ ?Nk"?wY2`bmM3_Yw[7 K=ދF  #j1H|AIx]Sit\1IMLHDEnPOoDJ1AAeCBp OOG|u?F9 mO"t\7=L驣/XQ eel.%-+?@llփ?FU~> @oVoɽ7y2VzdOfшlA=$o`"jQ3 ex}@4SSa[?o4ui'e!Oa#Əa$jiMiyI,%!Xtz'0U &`50HF vؚ_`H>;s `Dt![x_H=ߚFXu!#ȱw&OB H9 Ȼ~dۮ F@+ J[zsGUfȓ4 "v" X/ ߻O @֛ ȝ r6?  ( k` 7e޿ IN]UFD  h(^TYYB UFjZ?F~??x<F BPG(?P } X ]B66]  TTUTTTB9T9ȅHBU?? ?B@L&d2?6-G(\.Y(7.sU!}&/)Ѥ&)  0cB`,dat 0b 0se,2rv2,0ompu 0#3di0t0i013d0n0tw*0rk!e ^| (S G& iz ?!3E?? ?  wpp  pqw~ qwqwwpqwqqpppw'wwwqqbwqpwww7{{w^Drag onut hepe.Rih-c]lckaUdcos UPo etQeAvUwreP 5aabjZֿ~??cN׿ mF1??SbM1{V↑T*J?@ ?UG DF P# h @T(PYY# [U@jZ?@~?tP} u` ?u#2>D>HX DHlHYHH (E9.EUO؍:DUDXXlUlH"' &T",'U)2&5qq/4 ?""'4b0{[`:#26%Q_9 $7&9"L1" 2^!'>~_@ÿ@qh@o?@`B?gD]F0#Bu)@`-1֠Bu@Ur#CC5CAB""Ch9u`u `"[ b!u`2ւ/3Qy1y7B`?CopyrigPt0(c)020P90MPcPosPfRQrPQaPiPn 0Al` XsbePejPv&`d/`VPs__SBNcPm!#5P8[`CQ2y1&3db4&5=Bh8=/3z6&3d66 Ԁ2&4#/30U!r3]ge<^1E U#&5m Hq&5twV5t{3t{FEt{t{y {et !{|tYwmAdaуIOBT.1:a:a&20#V21032=FB3J112I5db6q 7~C|hhE) *^Q_`!CQCQA11B=B;8ARrSqe a> !b1AE4FAނ.1#8dT7`XQ`]rw` MPnuP%aPt%ar6aYQ_` E]0uPpU`e`tee^V5Qn]1PRRd M`uU`b¯Ԧ30Z! PPr#5GCHhz]3 D&bQe^\޿𿏫AP)aB`Ҩ75^gyϏ-% S*biPaϷZeTϏ-LPcX&ߠϫk5N`rBDl 2`i`g̨)@`ߑRPoʂѩ|(ZP\@ pKD`ĖUSPaPEePlP6?ЪX9` TP7`?>` /aZPa_qSbI`zy`DRa0"ZʒT=NwRkqa<.2`s @ҨגcOaI` ad2`%c& d nĝAs Nx+P=鑙mrDatV/a I͢@+aNfP󨑙 R `Qa// '=/KM`C/P⯺/+QUmU`utP I`/ %N4$?6?s 2v? 2Nv??;Hd2QC_3` Wz%?NV%O7OCt aOO#ޅO`M&`mR{OOӧY"Np_1_E;OUe1ySQ]_o_M qt!Tw*foSU@jZ?@?/v/vUpSp2u8p` ?bup` B=ݝ`M,{t!Wp5'm-ׁ<3ohm!Qb8q׏XݥBT!.ct!xAZg>pÿ=$ bUfq vU֒rrrvvvrsY"`ERon_10 бeCZ~bb W}Wbr@":tbWuP`TfnMq1o$6zK}_obK1K2\#b^ºaaaaFҴ_AaY6P`u$`bTs`ݡs`ap䑹x^tݥ5f_b&bׄЂF3s͵N?SYDs\rV@(#e"88G^80G8ɐIFZ,@rb4 2V}uauaxuYV{nx` % P&S.1ua`NEW_ORKd2H IPհs!R۰RЕIհSÑQȑTi|o&ohXAG'0U?8!0oHD # =hj0>T h]]9  5AU@jZ?@͂T??@x?SF?P6 jt`  o?tk[>(A Vu aSuo.]&A]  *ԮJU2zGzt?@L#&A@bA#z7 5#1bO(b])j+$j&[ "*(w "?& ?AU$l/%B115 `?CopyrigTt (cu)Q02`09Q0uMI0cG0osA0IfO2@1rC0|1aO0iA0n.Q0 WAl0 G8s2Ueo0eG0v0d0"1E]4MŤ-&3 &7&l>]Uhz811U!J!u6?@+\.>djIIa? >"(EǑ I%K9E 5EU . K ;;5QIWEEE@ _KnSA?8@VBu?@DR> ?Nk"\6`ObmMM\j=am {:e 뇿P> #? D"? 5-gg?j47!B{oogrK57!ibDAbE9 MX7'0U~rN!R$a)5 5vIzHD # =hj0>T h]]9  qAU@;?@I5K?@xEL?6ѿP6 t`  bm&Qo?t@@L]WI,#u aiuoL> JU2zGzt;?@L?&A@b}#zs q#1b(b)+$&[9#H",ɺ (wH"?& ?A$ /%9#BV1V15 `?CopyrigTt (cu)02`090uM0c0os}0If2|1r01a0i}0n.0 WAl0 8s2Ue0e0v0d0"O1E]S4MŤ-9#b3 b7&l>]Uhzt1.A!JN!m.%-+0djV1ax߃S(@J-&?@ mO"> ?Nk"L[Majnha_:e P> #? BK=o? n-?j4Bs!B{__Wr5s!D8E6V 6?wj_|_ ^XE.U6Vz T6 @ d_mOoT ݬ\VߦyWx?RkrVo_bVh*g:n"'UaEjz}36>CN>?Bn]z/GvN7ViDAAbE9 MX+G'W0UPN!4)ae5 MaHD # =hj0>T h]]9  U@jpiH?@^?@$?@]F?P6 t`  !a&ao?t,F=۳=u;u auo>J2zGzwt?@ʐLAw@b#z1b](b!).+I.&[-m(?0& ?A$I0/Q%!!5 `?CopyrigTt (c)02`090M 0c 0os0f21r0@1a0i0n.0 Al[0 8s_2e30e 0vq0dS0T"!E]$M-# %'&l>]Uhz!1!J@% djc~YZ(_N9zOMGE%_EH$SwO"K ңHE ^&_8_WE_OqOHOOi&DAbPE9 MX7'0UUfb>!$a % f1jHD # Uh4>T#]]9 M5AU@jZ?@L|+?@?@({~?P6 JuM` ?u JYt )t7zEA2&L_D?. > >tJU2q K?=@MJ&A@-b9(+bG)T+T&J[#"#a?& ?AH?/`)#115 {`?CopyrigTt (c);020G09;0M30c10os+0f92*1r-0f1ai+0n.;0 Al0 18s2eY0e10v0dy0z"!4B#e-?3 7)& b 0K8>U# ]A]A "1!ъ!PzQ <%OYAJEU . K;;\%W%E@($L[.FX5Eai%?@ 6?wL;] W-\TB⤲8?W&SA?@ @ ?Nk"L'`bmMW_Ye[7 VK=F: [jr ? D"? X?ZvD?a@Boogr5;!QEؿx]SiLO MLXa~WOTZiAle59 XG"'0Ujc&N!$05 .HD # =hj0>T h]]9  IAU@K?@OgVb?@Y%?@wᳬ?PJW?>t`  ^Z?t#ə.E @x?r?/{4܂ G _Z+u u$ >  J JpU?43f<!.&P?b3bbfz@1bAe&bq$d 2zGzt.#@LT"&r(~&i$q)(+}'i"~*#81815 `?Co yrigTt (c)o02`09o0Mg0ce0os_0fm2^1r 1am0i_0n.o0 Al0 e8s2e0ee0v0d0"11E]54M-#D3 %D7.&lUh#7 V1#Ai!J# F/?B1B  VRiAAbE9 MX G{W'0U*R2N!I$a {VZUHPD  # #>h0JTpERI MU@u6?@jc{g&?@%r`5B?@m_F:Is?HNtQ`  II?t#]  _ii$!`kNu  iu%wj>M JQ T       "@&&JNU2"?H@Qs#N&A@b(bU)++&RNp}#>H1#19 ?#bbbz| R&/5R}#11`?Co0yrig\t (c)02h090M0c0os0f21r0Aa0i0n.0 Al!@ 8s%Be0e0v7@d@21a4t#-}#3 %746lJ#$JUp5 3 M19 1!q(!@nElrkRt# 8$kcD?@숒@)?@P?P"EkU |1\iOT/irvs}i>`_,of :m ,M ?? S&d2? *b?r4B!B oo1gr #5!>%(EQOOOO__@Ac1_C_ D4ևS]_o_yRX+{_,__ #R__ (YQoSo+o=osd {oEeoooooo@yUy{* hDV!VV(I X"c7I!pg bf'iMjSE t%XyG|'0UŢŞN!O4i |UGD # hz0TdYYBU@(%)?@xg^?@0߯H?@s7Uݹ?P} t` 87?t#_)TeΦu ]ukv  ̝L&UUU&I&3 U!!`?CopyrigPt _(c) 2\W09 M c os f"!r 1a i n}. Al00U (s42e0e uvF0d(0'HS$#-## 'I?k=#ǒ 2j$B##0U'BM3@&@F0Yk} #HA%DKb5DK3DK5DKED$!GiAZG5(|IDFX7yW'&Dŭ6!4YA yVZHD  # =hj4>T!]]>#AU@0߯H?@z)e??@˞9q?P6 t`z  ?5t# rXѮu)Nu;):O]ע0 &> # " "+"+"+"+" $+"T-J[U7 ??& bA@ 2b4-#2zGzt#+@ Li68!6 ?}; 72U!:B#AA5 5`?CopyrigTt(c)20F@9M2@c.0@os*@f8B)Ar,@eAa8@i*@n. Al@ 0HsBeX@ej0@v@dx@)21"DM-,;?C G& l>]UhzAE!A#!ְ1J`#@{&pz?@Uy{?NT,dKj&b&R&M/8T9K-q?x6@F?@dG0 >Nk"?YE*]\LT_hKjfGD&Qhj iU3UGe&Y_`?@/b%)h5oWKoWo;KTod}l$roҥ`nE2K?@ˌ%4G{?@ -q eZ̿R^{|\6?vodmP}l>MBe 9 N 1iS? 1?g]rl#51:O("4ga@Sq(UE^@O|ǐ[`Ρa g v2rA @3ijD&}l迲aqOz o?#WHZ'Qu=*[^N?@]%P?@O&@ c?@аM<n ?` eb0)\(Pfodm}nv}l?V5(?q$Ix[cN#W{#iA&QeE9 MX|''0UdN!0 /HD  # =hj0T h]]9 #]AU@}%?@R$,?@6#V?@c_3֡w?P6 u` ?u#t  3zM%t3ws%.<%H._'B> #PJU2N贁Nw[?@ʐL+&A@wbu](i+bk)%+&p%#4">9"<5!?& ?M#bbb͓z$ p&"/%T%#L1L15 w`?Co yrigTt (c)02`090M{0cy0oKss0f2r1r 1a0is0n.0 Al0 y8s2e0ey0v0d0"E1E(]I4M-%#X3 X7&l*>(UhE j1#$Ac!J`%#@5e$_F??!foA3nBM( V?@J-RHQ??@S!R?P| llHL6VLD5E+v;*AʙH: 8snP%$? k?Q? \?f4_!_t_Wr5_! R,U3OC@IL?@4~Kz?P)Oy޾ T!]]>#IAU@܋?@2Oa>?@#r`5B?@*ݷ?P6 t`  >B?\oEt# ŖoZu 0Nu;Y:|8>$# JpU>##<?2& ?5bbbz@b#ANi&bu$'h 2N贁N[2$@ LX"D&v(&m$ :;T'm"*B#d1d15 5`?Co yrigTt (c)020090M0c0os0f21r 1a0i0n.0 Al0 8s2e0e0v0d0"]1"a4M-/p3 p72&l>0>Uhh= 1#!eJ`#&@NTM\jh1B,RBM(!JoUy{O_ e@ So4(DF^Tw?@]֝ä ?Nk"?w_KT\T+\(Kj?ӷjmlhj -`\TPQE\Uu?i,St_o u~!XEO9= T?@L;uQ_Pm"rA @3sjQ1lğ= GoYniAlePE9 MX'w'0UrJ^N!M$|A vzHD  # =hj4>T!]]>#IAU@܋?@j?@2r`5B?@^ݷ?P6 t`  >B?\oEt# 6ΠZu 0Nu;Y~:#&}>0m8>  PJU2N贁Nwk?@ eL&A( 5bM(b[)h+h+$h&#[-M$#?& ?b$Z*'*B#O1O15 5`?CopyrigTt (c)02U0090M~0c|0osv0f2u1rx01a0iv0n}.0 Al0U |8s2e0e|05v0d0"H1P"L4M-/[3 [7&l>0>Uhh= m1#!S!J`#*&@PUy{_?NTMjC1gc RVMʣ(D#F ![?@"$. ?Nk"?+IKTLTcKjRlhj p= (*!)i,SO2Kes9T;QEGUFw6qX}__TM7fTZT\Dž}I_P^52O,ES"rA 0S# Es3iAlePE9 MX$Gxw'0UrJIN!4gA xvzHD  # =hj4>T!]]>#!AU@܋?@c_v?@#r`5B?@*ݷ?P6 t`  >B?\oEt# ap'EZu 0Nu;Y:|$># BJpU>#͙;? & ?5bbbz@}bAA& K&@ 2zGzt?"K@ L0"&N(bM)+TK/8Z*B<1<15 5`?CoyrigTt (c)s02009s0Mk0ci0osc0fq2b1r1aq0ic0n.s0 Al0 i8s2e0ei0v0d0b"5194M-t/H3 H7T &l>(>UhE=3Z1#E!J!@ENTMjN@1h(NoUy{OO e@ o)4(DF^Tw?@]֝ ?Nk_"?'_KD3\T+\(Kjjmlhj P\DiAlesI(=MXsG_'0Uzb6N-!%$0 sFEjHD  # =hj8>T! ]]>#!AU@܋?@*?@#r`5BW?@w?P6 t`  >B? \Et#տ|u l4RQu?]>I$>I# JpU>͙#<?& ?9bbbz@b#AE&bQ$$D 2zG[zt#@ L4"&R(^&I$Q)+]'I"^*Bh{.M#To1o15 9`?CoyrigTtk(c)k2009kM0c0os0f21r1a0i0n.k Al0 8s2e0e0v@d0f"h1l4M-:?{3 %{7&l>(>UhE= `51#I!J49fAn^ R)X f"B!Jui,SG?RSlOS u~!N]8-19= T?@L;uQ ?Nk"?YVTQRXQOnğ/=i n (\ wTiAaePE9 MXLg'0UbZiN!)$0 Lf`jUGD # hz8T YYT#BqU@5?@`0 ?@ t2?@?P} ։u`h?u# Lv3<@PT=d,2<<Pddt m۶YtjfUh!NUdI,@=#B!!u+`?CopyrigP}tg(c)gW20 9gM ]c os f"R!r 1a i n.g AUl0 (s#2e e v50d0+"!$4#bB-m)|# '?h==$&$#66 2j4$#=#0UB%3#3&50$^ #Q9 =#HAQ5D<KE%DdGg*Fj#aOb'*TATA4"1(3&Sa4% `2HAEBOiSHdiSi^PQ5 3FXm'.W'DŜ6-!40 VZUHLuD" # I>h(J TEI 3MU@jZ?@({~+?P >uA` ?u#jJ* )8* 9LGA )`G#dtG.$*G8BVj~>t  e t>"z "p{b+#7"J>U2zGwz?@9 S#>c&A@b(b)+,&" !$5N]#2T#k"6 "M"b)HF;'","/]#11[#`?CopyrigXt dc)020090M0c.0os0f21r0Aa0i0n.0 Al+@ 8s/Be@ej0vA@d#@K"ulJ Ul,aAaAC 5LU(#!Q(r!wW?@s80&9<01# 0IK A# ѥ,Og NEx]SiL5[ML&X39^PzQQ_c_ ?%YAQ)UEEU . K ;;\W)U I@)$_[.F&XA:jai%?@ 6?wL^ W-\/&T5Dꤲ8?Eg@SA?@ @ ?Nk"?I5`bmMohiy[7 K=F  %?j1&XIOD?@u@rLu7j T_ʹhLi,KV@8$Ȯ?@1x}b} yy_@){yEZJ]%8O@G \t;8j|c1}p?i @'WE"NO_T ZYAAekBp OOLJ}u?@9 mO"9=\/hQeel.%-+@mlփ?@U> "8?ɽ7yf\tOfj|A=$䚏Bp$za3ex@4Sa!6Wy'eU!a)#kq&ziM2i?IT%!Xh<'0Ub6502 ?}HD # =hj0>T h]]9 !AU@+3]q?@8i?@.!'?@\S۳r?P6 t`  C=?t#9sxZlRJ)u duo$>  J JU?'N<?& ?AbA@bNbbz87 5#,"b4$,"'b4$2zG7z?@L&5/G/Y 3%,"j&d,,"Rj*/1/15 `?CopyrigTt (c)f02`09f0M^0c\0osV0fd2U1rX01ad0iV0n.f0 Al0 \8s2e0e\0v0d0r"(1E],4Mb-;3 ;7&loUh#7 M1D hp  o%J F( dj!1B2 bdV.RiAbE9 MX|GW'0UR)N!!$a VZHD: # =h4>TB#]]9 TIAU@1q?@!9?@aww'?@տԌU?P6 t`  9F׿=?t ڰ#slgW ª%?)Zu Lhu!s8hAh hJU'N!?2& ?AbA@bNbbzc a#X"b`$X"b`$#2N贁Nk? "@L&a/s/ _%X"&,X"*T#[1[15 `?CopyrigTt (c)020090M0c.0os0f21r01a0i0n.0 Al0 8s2e0ej0v0d0"T1X4M!EQ#-0Og3 g72&l*>0>U#3 A y1vXD #p %J`#@.T'oo>hn56>fcRM(! Zך4_F_ b/OSϏHT >ɪ#f:?@*S)Y=> yy_@YjeeRT;Gq!Qh~n~OkzEMsZJPl -iU@SQEU"pL._qw;m;aU_ BnhX#rA 0X"IsTj߿kHhl!Qdok!(Npd 0Ale E9 MXXGB'W0U*}N!M$)Aj5 vzHD: # =h4>TB#]]9 BIAU@b?@;h?@ ?@F_a4w?P6 t`  /?t {/_  g?xRJu hu!s8&> JtU2]q ?=@ʐL&A@5b]M(b[)h+Ih&[#$"#?& ?AS/t)T#115 `?CopyrigTt (c)O020[09O0MG0c.E0os?0fM2>1rA0z1a i?0n.O0 Al0 E8s2em0ejE0v0d0"@14M#B-,?$3 $7&l>0>U#3 CE761$!; #p _%J*!ԌSU hneNw3󛍩(D#2q>@t|M?@a^!v:1 yy_@I56>[ŀQhnO:@K0hd7l aV@S8E8Vؚ㱫L{_pZ5X50U-0f:?@D?cHe-1>[W9k]geϺQ Y" KaĘ\Bqj]GRׂP_FR}LY5E:/U=uOqsR Y2JZ8 b G0Ale59 MXGB'0Ur:N!R$XA'5 vz_Hp>,ÿ;s ZH(GBF𸮏t# EOB 9 V~|d8ۮ @+W [sGU0O3;O >OߦOO}OrOMFXO 8O)#5O^(,R_,R0?8R36hR6NRH:x8O<T9ROB˾XS`K(S7IUFD  h$^T YYBBUF\.?x<F BP(?43^P? Vs V6gH?R\d ?8EB G 09gB`.Cloud,netwrskcaUins t,pa !i sa ! %e^ M%J 1Gg YR 1H?_^p Qwwx0w x3wxwx0wxwww xQww ww0wp wwwwwwwz pww/ p7^LDrag thuesapWonodwipe.b~߿~?? XAP7d??O\.ߗr3r\.CUG DF P# h @T(PYYUEUF\.?~?F~׻?P} <Y ^.B" N'O'*Q'R'S'T' *^ Y&Y u` ? "s'_":s"Ds"Ns"Xs"bs"ls"vs"s"s"s"s"'&&:&D"uN)*2<7)VU2^L[b1j2 ^`s" H3z3'N‚??lF ?ma[Z<` Ac@entColn@r3z[_b[[bNbbzI1dq9E 8AЇEO 24bYP{[`:BeR&nVB#32Q?]q ؂3Q8A*W1`Vis_MRXY%cPm!P5`12`݉`?Ap]y@igPt?Pu(@)?P20L`U9?PMPc@o`'of>bAr2`AUa@i@n% ?PA@l?P7gsbe*`e@v@d%Bd455uO#4[%B+@q~9p?@IBp7u@usPRD"J-D"t=2u0`B-1D"֝ruL &@BGm3ry=_"n4O N)9u`u `"AA b1O$u`B, ^aepcT<@o?Pm@sĴ@a>5SRTJAn@e@?aT]N@twBk?PD@vVbe)EяFE*UM_q|U۟e"4Fpd(aC l &&2::)BDD BRXX|bb2llbvv TNA5 rT&_Qj~}8pK!bdzӲ=`pM0 : D N X b l v%‡'_"D_"N_"X_"b_"l_"v%™2:2D2N2X2b2Hl2v2BS&2NC󨕃FkZV?F'IOғp~կˍW 2r033(FE9<QߐlzBO$ߪ߼߮(U9|>}>MR,AZl~(|9x uSV!+=  (UǎoC  (ָe.rMi>3LÔ  eD2(dv  Q!Uh!%5QU}4< ?VV$`fBh}Ȳ7O!<?F/*Ȳ/2sU~1p%)E+D>L?/Q<,6FE%mEB?T?f411%U\+:-yO?H1BdhPUifb}WSȊOt6XQ4X @ pp` k*`n`RPn`dxQdЯd79@#AUUiB+T.(T;)dH*d db$"Tt ,@AaiE)khp:j%56_:9c&ЩD3s͵NSY2a)k8la)k(8l|)k 8hm8_ZI۩Qy`Uee8^6JVAAD @^R` WShpCꥁs!`L ctsPzTq`ss wTyr"^RS` pu EqC#5#s SzbCRsqozA`BiEOO\_xX#Nsqe'2q[[!c0_ZˑU#|7t:}#/'_x # fHavU# 1H !eU iEQ % UH duD " # J U> hzi@T(iiJ\ UF~?F~?tP @Yu`?(.8uBbSA{szat? Cteb"0#$YE['?a&?.tb'|z酱2Q?AG}$&}qam{c8`Con0ect0rz 1l2z{{+` Li2?15-tzYaU#F131 ?WPa0t0r0)4;1Z7}`T5M151?UR0u0dB2g6Rz8%"Q-!c_C~#J *$&#FH-B;"2MC &BS{%ZMCQ-!dgG 1`?1py0i@h0 c)20RP9MB0c0os0fDDR1r8P11i2]. AL0lJ=WsRedP1v0dP]@>ֵDCz\MC2qP?_f[!?\.? ~:o -D~% IKMC&&UgyQh @ 1_`S0aP2BiEfoxoyXMGw'0cia&!|$0 vjlAUEu P-QyB:MCz?bkp Tbb-.(},8>8dup*ar%@68K]ou ÏՏ ?|>R9=PbtuYȟڟ x<w(LA7BUgyu]ͯ߯"}>%7GZl~uAҿ '%OHL_qσϧϠue kZV?F'IOғ %NMQdvߚ߬ߠui+.Q7Vi{Q"QucwHD: # h8>T ]]9 M UF~?F~?JP6 7@ZAAM ,'@' T'h'|']"' ' "  " '""'"'0"`''JuM`l? 'y""y"6y"Jy"^y"ry"y"y"y"y"y"y"y",y"&,y":/L/^"uh)!b_A@36Jte! i(Bt1 LJFb4D"FEB>fCJ[oC'N(?F ?"b96BhE#fCtoC2Q͞C@MEMJ3V8E@8;G2JoCQuAuA5 `B`?CopyrigTtk(c])k20 `9kuMPcPosPIfRQrP+aa+PiPn.k WAlF` XsJbUe`ePv\`d>`PoCQ! \l-OS WFfEP b$U9 fuXG_'0UHrũFZ!D0 fzlJ\>Uhqq]!!0!PQAcHQ@1S?Fȓ$,_q?^"d#e4Eq ^"3d]q 0!0"/"Q!0"fBJ4?F;W?FjlJ?FH E(>?NpKv7atuXad#hX\a3g6a0e!2 4#q E6?Fv9?Fb4!?FЩԏ挍maVv]2:{(?-yjI>P!Xj?FT꞊j?F'K?F;(嗟éT.kvd*ĮVq#(]HXEAr ?FK(umB)?F.~rYYH_amAáF+x(~*=į֯uhY*?FY.(v0zkd)D6GdSZpB{(i]"/M͐?Fߧ78ET+Lk ]D am++Tz&{+69[Ϟuf?FP$?F [13nٶ7gۣ挠Y,Idkim0H@eQY"ߤİuӧ?Fl?|f ͲKlKBG]fkϐߵAZV?F:?F* ?P$ 1EK3 3i{߮Gz~}Ԗ0>_NO5q :_" s3t? ט P&d2g? 41z3B2_qr5E~\.儐?FQ( BP >5 w pmD0VY l j٩ 3 UIPY?F3>Dx?Ptk= `K0z߂3S0kg; z0f=kVN?PX#Z3d1H-Z?F)$F\;X})挗J hG~W*fU4~%?FVFKL/GaH)r~(H  N/?t4ZA rGg?F4W eſ?FH?Ýi`)6¿l#kpc'??4FF\>FI(1 O怘Rrq SCH>t v|RuONHD: # h8>T ]]9 M UFx<?FAj?F~?FwQuu?P6 7@FA]A]M ],'@' T'h'|']"' ' "' ' " '""'"'0"JuM`^l? 'e""e"6e"Je"^e"re"e"e"e"e"e"e"e",e"&,e":/KuT)!bA@*36JtQ! 2l] 2t12zG57RQ57)\(1>RJCJ[SC'N'(?F ?^"b'96BtSC2B??088r0MICJF8;;G2J2SC@?JE=26S0#CSCQYAYA5 `2`?CopyrigTtk(c)k20 `9kMPc.PosPfRQrP)aa@iPn.k AlD` XsHbe`ePvZ`d<`P񾔝mT!l-pHS %WF{Vi AbdU9 {YXpG'0UFrōF!D0 fzl*J\>Uhqq!!0!QsAGHQCf֩?F1Qd?J"P#Oe4E] J"3d]] b0!\0"/",&JBJ2(B@?F,/k?F]?F8{# \*?LpKv7_tuX_P#_hX\_3?g6a0Q!2 4#?DEh?F~E ?F_pLVwMҏ䌍m_Vv]$A2:{&-yjI-H+ŕ' YbҖ.K?uAc9z^̰f5U?FF/`_`P \!b@ir_f|Ƣn y(Tf>rpd6ENj/-g?FRr?Fn^\K0z߀3S0k~mr{~.*{ S4K" =b™? C? "@wD#<2tr5EOK4n?F{׀?FBi䌗J hGW*fUÝ)k4Il RuF?FB5r?Fn?$Xp ?G'gH᷷?r~.H  N//4;KGۨI} yFa%?FO"Z/g`)l#&Ƚkpc%//2DF7?d>FM?MRrq LS3H>{v|R؇?>HD: # h8>T ]]9 M UF`0 ?F M  ,'@' T'h'|'] ' ' "' ' " '""'"'0"JuM`^l? 'e""e"6e"Je"^e"re"e"e"e"e"e"e"e",e"&,e":/KuT)!bA@*36JtQ!  \(\] 2t12 _p= 57]@ף57%)2] >JCJ[SC'N'?(?F ?"b'96BtSC2Q?0G88r0MICQJF8;;G2J2SC@?JE=Z26S0#RCSCQYAYA5 `2`?CopyrigTtk(wc)k20 `9kMPcPo%sPfRQrP)aua@iPn.k_ AlD` XUsHbe`ePvZ`d<`PRmT!l-pHS W$F{Vi bdU9 {YXpG'0UFrōF!D0R fzlJ\>Uhqq]!!0!QsAGHQ'n-?F1Qd?J"P#e4E] J"3d]] 0!0"/",&JBJ2G2,?F,/k?F}X?F8{# < ?LpKv7_tuX_P#hX\_3g6a0Q!2 4#?DEU: Z[?F~E ɀ;9C5 ?F?wMҏm_Vv]2:{&-yjIAim.H@eQW °A{?FH 6I_^ d ͲKlK~BG]fkߵy X0?Fjd?F*OzE=M2'-H+ŕ' YbҖ.KuAc!N\?FZf5U?FO/(Sÿ?P? \!b@ir_f|Ƣn? y(Tf>rp^N/-ُg?F6+:n^ºK0z߀3S0kmr{.*{ S4K" =b ? _C? @wD#<2:r5E1f.]n룠?FF[PBi䌗J ~hG~W*fU)k4R9uRuFpZ?Fn$Xp G'gHr~.H  N//4[/I} KKbv"Z/Ýg`)l#&Ƚkp!c%//2DF %>FM?MRrq kLS3H>{v|R?>HD H# h @>T(]]9 MT  UF|>?Fd2L&??FVR?P6 @ZfA]A] >M /]8HC8\C 8pC8C8C8C8C||C 5C 8" 5"C5$""C58"C5L"JuM` ?  "4'C"R"f"z""""""",",".,"B,"V/Kup)8!bSA_36Jtm! q$y BYt1B5A!EM!E*G[~By >zCJ[C'N'(?F ?7"b'96tC2Q??088r_HEyCJV8;Q;G2JB2C@?zE= BFS_#CCF~F$u?FNR?F~Sߪe f"l#ͽU>aſf"J3RRy f"l#>b@Ed3̀ͅap`L!  f$#p)bq65q p)CaAA5 `%B`?CopyrigTt@(c)@20:p9@M&pc$poKspf,rqr pYqaPipn.@ Altp $xsxreLpe$pv"pdlppT$!q$-Hc gFV$VibE9 YXG{'0>bUŽF!D0 UilJ"\>Uh"" !$!8!L!q'ArwHqI}^?Fwo?0je4EZeLdd?]e\DL"K""L"zBJT" Y?F]?Fo?F5? Ѭ:?1ipKv7tuXZlhX\Zexdg6gJ0m!2 4?#oD Um?Fڼt?Fɤ_i?F0>m* 9?F?eD;]D m++Zl?Tz&{si?69Vu9,"f?FXev!Jx W?F8+y٨HZ ͲKlK~ZlBG]Lfk&8}H ?F$j #?F?B?FH+g'Zl Yb*xdKuAc>j ?F-}JӦ?Fզ.ki]7aB< x!Gi@ir_Zlf|Ƣ y($M-wF ?d2huON{v|RONqNwHD: # h0>Th]]9 MP UF~?F~_?P6 dnA >IM .)B) V)]j ~   ) )) ) ) ")")2"e)~)JuM` ?E){"${"8{"L{"`{"t{"{"{"{"{"{"{",{",{"(,{"JU2Q?@M?CJOFA@!@HbʏIKFrB !RDJICB'?Vp BABbI2[GrBLrBOB-ICR SC9 _VICQQW;HB`?CopyrigTt (c)=`2`09=`M5`c3`os-`f;b,ar/`haa;`i-`n.=` Al` 3hsbe[`e3`v`d{`Pi @bU9 VHXWXA'2qpVtUIa fjlJ Q\>UhQQ]~ !!2!DEQ!=H^A@1S?FO$,q(P`"3 e4Es `"C ?]s Jt{Z rGg?F;mx>F9h E(>?NÝ*`)*f#퐲>+3cݛs a0g!2 4#@ES~%?FV?F4W eſ?F}G*H᷷Ļl#*لȽkpcXSH?F-Z?FF?FPL߯@J *⁈hGr~لH  NᵪS UI?FY?F섰a?F@\ K0z~**3S0k~Ä&~ԉ~$ARm}A\.?F:?F3?FDٳx?P+k= 1`竪3*{Gzޟg; z?ԉf=ks :a" F f 94FQ2! 3B2:9rt5EG~[ZVn-C4?F4?Pu>5 Οw ð}H 1唜2fi0ƱVX 23@0Bk{̒[mY*?Fuӧ?F?F* ?P% t1EC6G*ͲKlK~}>NO5 RԋS2Z#@cukǜ{̦Ouf~$l?F|ƯY,*kBG]fk0BO"/M͐ŧ78E [13nٶ7g"]D *m++仌imIلH@eQOY.(vT+Lɇk h!˿S컌Tz&{u69Rd~OEAr K(F0z?Fkd?YH_2mA B{لi]I'p1OjT꞊j?FmB)k.~rkéT.kܶĮ~+x~ل~*=u1O6?Fv9m'K;(-/m*Vv]BÄVq#ޔ]H//rP;W?Fb]/?F?J/pKv7*tuX滌 &ה`Ypl?GuSFjlJ?FH?#5~hX\~ g61!O3K_H8>X"s D')K'g F(uN1#āOXB CK @Q<>dlۮ |o@+H ~oMsG7K o PK ~["a `thi jI ]sOx |۩K :UFDf h-TYYU?~@x]@L'} V6lX-1u. Bj2u2Z2j2#u9r#ULH/MZ1+B#AD5 60`Vis_SE.cTm!#20AD%`;Copy<0wigTt ?DU1f@M)@c<0o+@'ofdBUArX@AUad@iV@n3@ f@WAl@ \HsBUe+@e<0v@d3@J@=## A G?9#"n"Ea&444 70#AHB53_lj#SV6U8l>(UhnE / J$&9"(Fn_g5Fe#pheo'r#T X!! Jc0xo)vR2'"U7A%O3W_7rr5-6 `F2 Vrqu\@|Q:0b)`R@As)@E %T@xd@Di  ei=G]U6Tg'0"q)V!DHAn 6SWaH>w* O`EW )F8u#7B ` Ȓed- @x PUFD  h(^TYYB UF\.?x<F BP(?P?Xv B6u6  T^ThWZTTȅHBU?? ?B@L&d2?J-[(m(K.sU!&/")ҍx;(  0KB` Bridge,n&0two0k(0p&01pP=1al(0d&0v 0c@&0 [1^P| ( G&e WPW"*U---?-. wwwowwwp~wwp!wxxp}wwhd:lww w pvw>ÿ@qZ@E?DFp6@rR$Sn0!u@`i-FAD"BRuL @GCD$]QO(gR_"|i& 4O N)9u`u `"[bO$]u`J425)3Ga171`Vis_PRXYcPm!#5n`46B.R`?CopWyrn`gPt@u(~`)@20`U9@Mn`c`op`'ofbar`aUa`i`n @WAl` hsbUep`e`v pdR13d45*8=363@tFF B4B#30UrC]Vw,7%$T!nLPLsT<`o@m`s`q>g d(aq;_=@TATA2hhbB)3*̈́b+ڄ8bq q200Dr81@a6s2+66u\L %BWQ(A.A[2BB]P@RbESTO$@6r23| e|8e5=_4&F3s͵NSYFețG@DugQ̗,@Ga cupgbE@*28ABTOopD"%J-.RRO(R$[3VÁ`R pasn`e T px``B| 0Ls'_"x#PG $UJ"bk B3U2brZTCfRr ïը8=vv_<H!U"D"=m0!D0!g!@2ÅbBЅ3/xԂb08beaea22Dr3 AAE|66v7 |8Ԑ`b-A-A;hr#zr{qV W!rA8T@GaKk ]]P@` USPa`e`lLsaQi` E0u n`p`e`TbEXew` —T``8` D pvbT3^r` MSb Bbd`5ed;YRmM`nf`c"`u r88eQTAPbd Nm@Te@h=O! P`rxAx8eb`m +sbeXsuĨ9KuAe;S`uJB Sri`a /TTh//A/L`ch{/Ҡ///Blpi`[/@?#?FR`oS?a??uNwbkl"aXb`K?[It`adp s*OT]]9 M JU@x2zGz?@MJA@eb#z(b),+,&J[(.?& I?A$./O%B!!5 g`?CopyrigTtZ(c)Z2009ZM 0c 0os0f21r0>1a0i0n.Z AlY0 8s]2e10e 0vo0dQ0!$$-# '&%?6iie59 6X7'K0UpBŁ&!$K0 'F;JlJAh h7!1!Z@vnǣ f, <AA#  IRvVJ:'BP(?OS spBlU^TURUEUr\.G\ZY?PuPիQ,5U҄Q[pB.aYU520_BU"rA 5#cHD: # h4>T]]9 M JU@1&d2?@Vj?@҄BP(P6 ]AJuM` ?ju#t  k]It=W(\ZIRUIl">2zGzw?@MJAw@eb #zb](b%)2+)2&J[/.w?& ?A$T4/U%B!!5 g`?CopyrigTt (c)020%090M0c0os 0f21r 0D1a0i 0n.0 Al_0 8sc2e70e0vu0dW0!$-# e'& ?6iie59 6X7{'0UŇ&!$0R -FAJlJ8>UhAA 11!T2 $S<cV{ WR~VJd2L&?@ubX,X ]S fR#mS [ÐڱQE^Fh4__ K&oeX5:f'l6 XYePY?ۭsHMQ5aZjoohY}aUVeEVx_;"rA #dsHD: # h4>T]]9 M JU@|BP(?@h4WFP6 v>JuM` ?)uJt  U]It=W{GzUIR]I%l#>m2zw?3S@MJA@eb #z.b( (2+2&J[ (p"?& I?A$ 4/U%B!!5 g`?CopyrigTt (c)020%090M0c0oKs 0f21r 0D1a0i 0n.0 Al_0 8sc2e70e0vu0dW0!$-X# '&?6iie59 6X73'0]UŇ&!$0 -FAJlJ8>Uh 1Z1!Z@o.rɩ  <uCB] WRVJ@ ?@bx ]S ؒQ#hY))QE^jϵZ__ CaX5a9XYePXRmScRU5:]mxc_PU"]rA #dsHD: H ih(>T9 M3JU@xJuM` ^?5ulbSD_Jt bstE>2zGz;?@MJA@u+b+&K !JS"j& 1"A.b+-,-/*B!!5 `?CopyrwigTt `wc)5020A0950M-0c+0o%s%0f32$1r'0`1ua30i%0n.50_ Al{0 +8Us2eS0e+0v0ds0ib =L!XY'0U Bj&%0 :lJ Uh*M[A[A 9A Z1:'BP(?X Y s BQi@JSv?@bX, IYwYQ!i )\(!!U31^Fh4I_[_ p= ףuXEfl6 TbYHuX _Vgj_aZ(oQ !UAI҄AK B\%QAI@h4GFHlQbY{uXAo ].Oroa_(miY"@ _?@-BQȠ\ p_\oR\uXAppD`Zsƌ uXEOO_\HD: H h0>Th]]9 M JU@$?@-#?@%G֖g?@q;Ƙ?P6  n>JuM{` ?5 uJt  -|ߥ_Et9S8-zENسא\Eh)Fx%[v>2N_Nk?<@MJA@cb]b)++&J"< u) ?AHb$z@J+%/F%B!!5 c`?CopyrigTt (c)302`0930M+0c)0oKs#0f12"1r%0^1a10i#0n.30 Aly0 )8s}2eQ0e)0v0dq0!(Y$-3 7x&+0b59 6X7G'K0UhBx&!$Ia F3JlJ]Uhz11Z@S3   8gF/_ M ARnVJ!49BYOPBRWSMRUE:]|`X?\QRYl>Q5U/V@_YQĈU2(_:UrA cHD: H h0>Th]]9 M JU@!\?@JI?@8HL?@VG?P6  n>JuM{` ?5 uJt  4 ^sEt9SPDEN׈]Ehoþ>2N_Nk?<@MJA@cb]b)++&Jޱ< u) Y?AHb$z@+%/F%*B!!5 c`?CopyrigTt _(c)302`W0930M+0c)0os#0f12"1r%0^1a10i#0n}.30 Aly0U )8s}2eQ0e)05v0dq0!PY$-,3 7x&+0b59 6X7G'0UhBx&!$a F3JlJ]Uhz1h1Z@Pg   8gF__  ARnVBJ49BYOPBRWSMRUE:]?{^{?\QRYl>Q5U/V@_YQU2(_:UrA cHD: H h0>Th]]9 M JU@F5x3?@5yF_?@%G֖g?@q;Ƙ?P6 n>JuM{` ?5 uJt  a8~4Et9SuEJENسא\Eh)Fx%[v>2N_Nk?<@MJA@cb]b)++&J:3f< u) ?AHbʣ$z@bb3?bfb)$%/F%BB115 c`?CopyrigTt (cu)>02`09>0uM60c40os.0If<2-1r00i1a<0i.0n.>0 WAl0 48s2Ue\0e40v0d|01Y4Ť-3 7x&60b59 FX|7*G'0UsB)x&!$aI *F>JlJ]Uhz%11Z@S݆ f  8gF_  LR)yVJ49MYZPMRbSXRUE:]|`XJ\Q]Yl>Q5U:VK_YQU23_EUrA cHD: H h0>Th]]9 M JU@ ~?@gŬs7.?@%G֖g?@q;Ƙ?P6 n>JuM{` ?5 uJt  YoDt9~S-/ENس\Eh)Fx%'[v>2N贁Nk?<@MJA@bb)+R+&J"< u) ?AHb$z@J+%/F%B!!5 c`?CopyrigTt (c)302`0930M+0c)0oKs#0f12"1r%0^1a10i#0n.30 Aly0 )8s}2eQ0e)0v0dq0!(Y$-3 7x&+0b59 6X7G'K0UhBx&!$Ia F3JlJ]Uhz11Z@S3   8gF/_ E ARRrRJ!49BYOPBRWSMRUE:]|`X?\QRYl>Q5U/V@_YQĈU2(_:UrA cHD: H h0>Th]]9 M JU@ ôOS?@S !p?@%G֖g?@q;Ƙ?]P6 nAJuM{` ?4 uJt  M:AEt9SENسא\Eh)Fx%[v>2N_Nk?<@MJA@cb]b)++&J~"< u) Y?AHb$z@+%/F%*B!!5 c`?CopyrigTt _(c)302`W0930M+0c)0os#0f12"1r%0^1a10i#0n}.30 Aly0U )8s}2eQ0e)05v0dq0!PY$-,3 7x&+0b59 6X7G'0UhBx&!$a F3JlJ]Uhz1h1Z@Sg݆   8gF_AR M ARnVJ!49BYOPBRWSMRUE:]|`X?\QRYl>Q5U/V@_YQĈU2(_:UrA cHD: H h0>Th]]9 M JU@]C}?@8?@%G֖g?@q;Ƙ?P6 n>JuM{` ?5 uJt  [J}$Et9S=d ENسא\Eh)Fx%[v>2N_Nk?<@MJA@cb]b)++&J"< u) ?AHb$z@J+%/F%B!!5 c`?CopyrigTt (c)302`0930M+0c)0oKs#0f12"1r%0^1a10i#0n.30 Aly0 )8s}2eQ0e)0v0dq0!(Y$-3 7x&+0b59 6X7G'K0UhBx&!$Ia F3JlJ]Uhz11Z@S3   8gF/_ M ARnVJ!49BYOPBRWSMRUE:]|`X?\QRYl>Q5U/V@_YQĈU2(_:UrA cHD: H h0>Th]]9 M JU@Y ?@fJ7%G֖g?@q;u?P6 >JuM` ^?M uJt  (N7Et9Sp%6ENس\Eh)Fx%'[v>2N贁Nk?<@MJAw@cbWb)++&J:3f< u) ?AHb$z@bb3bOfb)$%/F%B115 c`?CopyrigTt (c)>0]2`09>0M60]c40os.0f<2R-1r00i1a<0i.0n.>0 AUl0 48s2e\0e40v0d|0@1Y4-3 7 x&60b5(9 FX7*G_'0UsBx&J!$a *F>JlJ]Uhz%11劵Z@S݆ Y  8gF_i  LRyV J49MYZPMRbSXRUE:]|`XJ\Q]Yl>Q5U:V K_YQU23_EUvrA cHD: H h0>Th]]9 M JU@$䣹?@R?@%G֖g?@q;Ƙ?P6 n>JuM{` ?5 uJt  >b3Et9SWENسא\Eh)Fx%[v>2N_Nk?<@MJA@cb]b)++&J~"< u) Y?AHb$z@+%/F%*B!!5 c`?CopyrigTt _(c)302`W0930M+0c)0os#0f12"1r%0^1a10i#0n}.30 Aly0U )8s}2eQ0e)05v0dq0!PY$-,3 7x&+0b59 6X7G'0UhBx&!$a F3JlJ]Uhz1h1Z@Sg݆   8gF__  ARnVBJ49BYOPBRWSMRUE:]?|`X?\QRYl>Q5U/V@_YQU2(_:UrA cHD: H h0>Th]]9 M JU@֤M?@ yo?@%G֖g?@q;Ƙ?P6 n>JuM{` ?5 uJt  ߸ Et9S)hBDNس\Eh)FxO%[v>2N/Nk?<@MJA@cbb)+R+&J:3f< u) f?AHb$z@bb3bfb)$T%/F%B115 c`?CopyrigTt (c)>02`09>0M60c40os.0f<2-1r00i1a<0i.0n.>0 Al0 48s2e\0e40v0d|01Y4b-3 7Ax&60bP59 FX7*G'0UsBŔx&!$a $*F>JlJ]Uhz@%11Z@?S݆   8gF_  LRyVJ49MYZPMRbSXRUE:]|`XJ\Q]Yl>Q5U:V@K_YQU23_EUrA cHD: # h4>T]]9 M JU@xJuM` ?uJt ASt=WJRb~li>t2U?=@MA@+b + &J[N#a?a& ?A)BB!!5 g`?CopyrigT}tZ(c)ZW20 9ZM ]c os f"R!r 1ai n.Z AUl90 (s=2e0e vO0d10!$ę-# 'a&?6iiek59 Կ6X7'0Ŵa&!|$0 FJlJ UhqMAA ])A !4Ja-'Z@h4FӃ f <{Gz#A OR|VJ:6]].rM__Z)\("UEC@ ?@-bȃ S 1 `#`YR\Xk5^jZso!o ̌aX5U'BOP(?gks#dT[RUUSv?@n aX,flYwq%lSXn?Fh4  p= ףXAYfl6\`YHzmPXAV``ʼ_Z(QYAY҄agklQbE26_HU"rA _H<>gI+s nsMg@q3JFXu#ǁ"7OB  9 Y~dۮs o@+_ oKsGh$'fX K j\ ƫNؗ ~ ~8 $ ~$xKZ$(ʐXUFD  h(^TYYBBUF\.ߗ?x<F BP(?hP?XV 55r55 Jbggah5Zg\ȅH?8?f)B@L&d2U?$-5(G(%.sUm!k&/|(  0O~B`"EtPern t],4wo k0p"i0#al*0d v0c 91^| ( 5G&u  * AAS ~pni'TDrag onut he]pe,nUdu /p64lies9Lt]wrkcQm tOfTly lGo_"imdO.bL&d2~?K?=tp'I??C\.r3?r\.CUG D  # h L^T4YY  BUF\.?@L&d2?@~?PYU^f6.NGOGPGQGRGSGTG\G]G^G_G`GaGbGcGdGeGfGgGQJiGjGkGlGmGnGoGpGqGrGsGtGuGvGwGxGyGzG{G|G}G~GGGGGGGGGGGGGGGGGGGGG736363u` o?%$7G-7B<7BF_G#BP7BZ7Bd7Bn7Bx7B7B7B7B7B7B7B7B7B7B7B7B7B7B"7B"7B"7B""7B,"7B6"7B@"7BJ"7BT"7B^"7Bh"7Br"7B|"7B"7B"7B"7B"7B"7B"7B"7B"7B"7B"7B"7B"7B"7B27B27B27B&27B027B:27BD27BN27BX27Bb27Bl27Bv27B27B27B27B27B27BH27B26g6U6666MBuIU$FrL|Lw&1~rW]@Es  ^7B( ( ~s!v̨ q?@h!;p`:1P IFPr,s~rD1''@A3{:18E9 'N( d?pxh` Acn CS lS r3!ۦpڈ٦p1bbbz0q@!PZY<^pbbNbb™qb"b7+d||u@+@Q Q?@G}x^?@IH‘BMB6}Vu]`-qZBuFgw#/s;}#B(0t@I9u`u`"TYYbzFqDuK`EWAαq1W`V sPwXYEcPm!#528gX1`U?ԑp r gPt1()12U0B91M cq Eo{ o5ԑr] ua iS nE J1A5 l1-sUe{ eq vdE*E WN~`~u01 l2T'sFt5ӤӨὭ w?gߏBq&W,s^KVVTK~u5TԑnI e5TJ Nt uk1DvLN'9K]utc担c8E&cNI[mc}c濵c0BcewcFլcc晱6HZB_z\\Fq ////_ qR/d/v/_///\`//?c+?=?O?cn???@_???_? OO\VCOUOgO_OOO_qOOO__'_9_\\_n__\픱____ꌑ__o_3oEoWo^;t~ooob!ooo\폰,^#M_q\$\%@\%7I_'i{\(ԏ\!) \*>Pb\+\ϟ_,>@c]o_/ů\0 \ 2DV_2v\3Ͽ\ +=5K]oB\6Ϥ϶\7\8 2D\!9gyߋ\:߄\;<x?#5m`3ACTE5[ɳaj1,.tu[񼱼[[_[[([ [CZ==2ffyѭ";;N"""@#2WWj2d`e U@s` M;aiuf,pueQ` Eq0ipeith 'UPouoN:mb>Pb btq' P,r9! / u'EsorK"t=h;/M/$\l/~/A s}-.1 '7`1^/?!' S6"i,l 0?B?`>Th?z?X[Q'Lc,(?`K???B*"liigOP<XLHOZO' RoO_eZOCA8wa' BEp&$/qf`Y4$_8]Sra e\l,)1@#` fi!bty_l_~_W"` WTPQ@^2` Eh6"*lt\a4 l.c`_O|N(,TQw`#|`p/xo6_IPsAd<Told?o/S:bEd +sc*lAxTf;'qmBi tCv9 I[7!3oW='/) fsPt@$:PftMpCnp㏊gTˑ'fmuQ SF l!M_(|/@"ha  c%>UcAXE_`>Z ** [j$w1tt>,3K̗?nؑ`tQgctRm#ZC̗,@_Cc%SuK`rϔ? h!b M^b!bC%_5Ÿ(56c_t&YF3sNSYF??Eq! $U dkV@~?>? ijTMwu `b|tϔ?b+cU@L&d2FX ~u~ߐk7լj(ߎp.AɯD c0I.NO Pl$u1S>GAGAKaaX]8eԁԁrݡݡiaiaa lc/a/aQQAAIIA0,j=al$BAe"!A#Mwx!g%9!@QRQ@ϒ^^!:q"v}#rq01RQz3{| +}*~t36 /*&03!@2M3Z4x|Ut6񡊁7񡕋8񡌛O:񡎵q=>?R0i[8s]&?-ܑ-!lVőncb[r ]#d%+`[ ob Q'eu"$I7 | "#####!#2";#ԁ#ݡ#ia#o#|###/a#Q#A#I###a##A##%#x##L#Q#f#s###q#Q##########)#6CCCPC]C|CwCCCCCCCC`CC "&eR"eR2eR 2eR2eR 2eR*2eR2eR>2eRH2eRR2eR\2eRf2eRp2eRz2eR2eR2eR2eR2eR2eR2eR2eR2eR2eR2eR2eR2eR2eRBeRBeRBeR$BeR.BeR8BeRBBeRLBeRVBeR`BeRjBeRtBeR~BeRBeRBeRBeRBeRBeRBeRBeRBeRBeRBeRBeRBeRReR ReRReRReR(ReR2ReR2(ݤ>2&̶Pl~3+CkߏH2ݤH2&̶ 1CU4}XRoݤR& ̶ F!R8JݤR*&̶R(ݤR&̶TRݤR&̶Xj4!X/|/R//ݤR ̶/0/B/!?j/E?b\?n?ݤb*̶// ?1?3?O b%O7Oݤ b̶???xAO?ObO_ݤb̶yOOOAQ|_O_g__{b ̶B_T_f_ aEo_ioh|ooDr ̶'qo o 4aWo2iEW ̶oooq j Sւc ̶eďk׏&! !̶fx.il٨h ""ڡ2D 42{Vmi{1## Dn2Dk{$$ӯ寉Ŀ o 4D% 9%Rֿp،* &9&Se wVߟzqߟߨU'9'.@hCrVh(9(1 s1Xh) 9)!t!*1*9*"w ?zuy+9+#@RdCgvz(B"&,9,$ - /U0/wC/U/|8 26- 9-%!///x ??EH2*UF.9.&// /c1?/?y??XBV/L/'d?v??,AgO?OzOOXfRV0L0(-O?OQOA0_yOT_{g_y_h/bf1 \1)O__Q_B_o|0oBoixb*yv2\2*__ _ao oo}o 2rB3\3+oooPqo~ 4\4,QcuTxĘSԖ5 \5-,>fATf 6\6Gv: 謫/ /Vf7\7k/ПӯȮ/8\8k0u=xw9 \9k1>PbAϊeτxϊϱ@*:\:k2 + S.߅ASߨz ;\;3Ϙ߆ CS<\<4߽߫a߂ = \=5bt*ed*>\>6+= O.wRew-?\?7@.@g(w&@\@8  /08"@6A \A9N!////82* FB\B:O/a/ s/1R?/v???HQBFC\C;?*?s__ _;avo_ooo(xurvGGK>oPo@q?ocv>HɌH@)qQ,?QxIIdTtsTssSrU} @ `EtherntnrAnkssAs%t sΑϐΑor#r` %P&o]ptis ߢ``N `TWORKÐS{AEÐPOR2I `Q2 ia1ṃ'0Uyu1!1 0tbų)zw tر)1 ۲eʡ0(&100@w뷤H1wLĭGX+L_W8LWGEDrDC  @   !"#$%&'()*+,-./0123456789:;<=>?>A*BCheATe(eJ\U@L&d2?@0~{??@?PYmUU%.u`o?&UH&T"""u,)ct)! q-tm!ʇ"L5 .&b$&"} U}!Uɤa/ '19 b {V?`%2H6"\>3c6?c82\.?A?9[B>30/D?P?y A@lBw0uC񁠙A>9D@SVCA'1X7i`"`?CopyrigPt(Uc 09uM PcPosPIfRQrP=QaPiPn AUlXP Xs\Re0PePvnPd"Z,AuL-$cA2T%c3C#lBF{L u,G"aOCTOU_2sOU!OOOOO_"_4_F_SO*^x_b._o_p__ ooARodovoooo_j_oC.@+k@asx@j`D.mѐBǏُx!0BTfx"ja.ӟ7 -?Qgx:A̯ޯjbu._qxl jeB.k!xґ.@Rdvjf ".5  ;+=Oe>s8jg/s2)N@7/N?[/0p/K///Ns!//@ ??0?B?O/jht?B^?O?P?B[? OO1nsNO`OrOOOO?"jiO?bn_o'_g`<_R{]_o__~sj_____o_6j@or[ ioopo ooo s,>Pbtok !3!);Mc!s6!ȏڏ@q'"5 LYn"ɾ"s1 .@Mhmrײ#ԯ!#/#sQL^pnؿ=$%e:P$[mϕ$shaψ 4o>ߣY%@g~߶%%sq*@Kf:rp">N!/=(<(//-N(?HJ/\/n////Js/;BN)/O#?c@8?N[)Y?k?}?^)XfѰ????? OD?2jt38b/.J~8/2SN4a/x?/0/K4///N4X?$?6?H?Z?l?y/Z?R^5@?O?+POk5!O3OEO[n5mh.xOO@OOOO?j_ib~6-_DoQ_`f_B|{6___~6x__oo&o8oE_"`jor7oopo7oo'79DVhzoƚ5랁8]2H8Sew8ʑΏ,6Q9_và9˟ݟ91"4FXjDwʄ·:ů ܿ):1CY:k,Qv пݯڅg;+BOϏdz!;ϗϩϿ;a$6C^h<ߨ<%<7qBTfx 3=@=Qcu=^@**4O.>]tB+>.>8ġ 2DVhu :!<">?/'0KF /1/C/YL?iH*t//////J3A\eB^@)?@OM?@b?x[@???^@X???O"O4OA?\jAmlRnAO_OPOkAOO _#~A5x@_R_d_v___DOz_1r~B_ oYp.oDBOoaosoB\ooo oo o(2 Ma^uC!CC 0BTfsD؟%D-?UDg(1rُ̟cE@'>K`vEEȎAد@ 2?ZڐdFϱƿBF !F3Q>PbtφϘϥ"/G W,BGM_qGZq߶ & 0KHYp} HHj .@RdqI#+I+=S.Ie$# TUHL5D" # =h @^T(aaJB [U@L&d2?@~?P e.N Q  m>uA` _?`N  Y c$ A.Y8>uQH>tEt#bH ">U.%>R.#N-7/de!#?A. \|#&/,?\.5?IA@/B[|# 'N(#AxT`?CopyrigXt(wc)20(@9M@c@o%s @fB Ar@GAua@i @n._ Alb@ HUsfBe:@e@vx@dZ@ V4 ?)o32TUA??)77se<-#v26ZbhafYv2r|cVqQN "QQQdl5C/oAe+(dVd>lrd "!s(8r4."%S(QVouodC~w.N}" (-DT!I@[0v2rzCsc "oP%3}`wȄՏyE|?v2>E *P&ai7HD H# h @>T(]]9 MT  )AU@L&d2?@W~?P6 $LA n  JuM` ?8* M^ K*0u:Jt7 StJb)>JU5 JD-//!X#V? .B\F#k&/k,?\.?;IA /[F#Y!3/ g"Ai @@2'bH42F/ -o31/!`'; J`U!`?CopyrigTt(c)2009M0c0os0f21r0Aa0i0n. Al,@ 8s0Be@e0vB@dH$@V o4 x?)eno32TUA?2IaH@2U6TO<@2U6X7ibE9 &Xo72'0BUik&!d$0 V1ZlJQUhhf5`'1@1Kdo30C^*Va&JQ&dfl64iAc7FlUnbi:(}m (-DT!;@-0@2rdUj"Uc60eE3&m9GtZTe%|&ov+=~u @2stktbqvu@<&]ew7UHLD  Jh0TlE>M U@L&d2?@W~?JP jJ %A Q"2  .>uA` ?E_  (< _Z&?uMt>p/b ">tq bo>t6  }配>U2TmUg!?#@9 U#!"/ Aw@"b)+&> !v?(t($#>_#2( m"6 G"\"b)H;'"bu3z=&b3~8/D_V_8E"lJaUl~X'4M1!S(t!?@9j# p}j# K$(°b`0w8-DT! lq0$#qj{jr}4k (Mt6 P pp@U<r(5%8:dfl6/wkrvQ uC EW`#0!y0!yD#|ud)<$ooooQdQAuddT8 %9oeS(<@EYeCw<ȏڄؙhjsu qyoNK$ i 8_\H>+s !SFO^? F؜Zu#2K <qB9h4K ]aۮ 6@+p 8NaG𘡁PxPh x u? RUFD  h(^TYYBCUF\.?x<F BP(?P?\Xt3 D5S?>?5%6qW{qk66b6HCI6CJ6CN6!C&aS6/!C9&R6M!CW&L6k!Cu&16!C&a26!C&36!C&46!C&561C 6a661C)676=1CG6 6[1Ce6 6y1C6a 61C6 61C6 61C661C6!6ACFh9hC7F6KACUF6iACsFa6ACF6ACF6ACF6ACF!6AC Vd9dC'V6;QCEV6YQCcVa6wQCV6QCV6QCV6QCVa6QCV 6 aCf!6+aC5f"6IaCSfa#6gaCqf$60aCfȅU?of?Z0Bc@L&d2?"}3xEx#~sUkqiviҍrx  0bEB`Hub,netwopkpppri hal*pdpvcpp-Qa| xy3U5Gv\ NP?U*U---?-ppwpwwppw׆%p6ppw pwwww }σw ^?w wpwp pd_&wpwwfDrag onut hepe.Rih-c]lckaUdcos UPo etQeAvUwreP 5aabx<οL&d2??9GΑsп=t??\.r3r\.CUG D # h L^T4YYT UF\.?@x2" $9u`un `"0Yb!u`$"%؂#UQ!'1`Vis_PRXY'cPm!#5P46ޖPg da +_Pb1b1"(Ɣp2)Ӕ2*R+FR"((Rb(!SaDc"76u&`~PG1"1 0&2k@c4p5,CT@Db"5 UaAFUQS_$|lF3s͵NSYF'U;0Re>1,@UQP)epzp51S(("(AT?sR !U/!/keF/X/Ae`'e^//,U SbiPa??g TT]]9 M JU@x2zGz?@MJA@eb#z(b),+,&J[(.?& I?A$./O%B!!5 g`?CopyrigTtZ(c)Z2009ZM 0c 0os0f21r0>1a0i0n.Z AlY0 8s]2e10e 0vo0dQ0!$$-# '&%?6iie59 6X7'K0UpBŁ&!$K0 'F;JlJAh h7!1!Z@vnǣ f, <AA#  IRvVJ:'BP(?OS spBlU^TURUEUr\.G\ZY?PuPիQ,5U҄Q[pB.aYU520_BU"rA 5#cHD: # h4>T]]9 M JU@1&d2?@Vj?@҄BP(P6 ]AJuM` ?ju#t  k]It=W(\ZIRUIl">2zGzw?@MJAw@eb #zb](b%)2+)2&J[/.w?& ?A$T4/U%B!!5 g`?CopyrigTt (c)020%090M0c0os 0f21r 0D1a0i 0n.0 Al_0 8sc2e70e0vu0dW0!$-# e'& ?6iie59 6X7{'0UŇ&!$0R -FAJlJ8>UhAA 11!T2 $S<cV{ WR~VJd2L&?@ubX,X ]S fR#mS [ÐڱQE^Fh4__ K&oeX5:f'l6 XYePY?ۭsHMQ5aZjoohY}aUVeEVx_;"rA #dsHD: # h4>T]]9 M JU@҄BP(?@h4FtP6 >JuM` ?uJt  kU]It=W{GzتIRIl#>2mzw?3@MJAw@eb #zb( (2+T2&J[ (p"?0& ?A$ 4/U%BB!!5 g`?CopyrigTt (c])020%090uM0c0os 0If21r 0D1a0i 0n.0 WAl_0 8sc2Ue70e0vu0dW0!$-# '&?6iieP59 6X7{3'0]UŇ&!$0 -FAJlJ8>Uh@ 11!Z@o?.rɩ  ~<uhB] WRVJ@ ?@bȩ ]S 9Q#hY?))QE^jZ__ CaX5a9XY@ePXRmScRU5:]mx_PU"rA k#dsHD:@ # h4>T]]9 MTJU@aܹ?@&cM?@5?@$D"?P6 v >JuM` ?qu#t  @@|It=Wn5_6uIRFIlI#>2zGzK? HJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A/.7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6#)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ# $<Ef4B 9b`fJ x۰Z @]0?c Q#Oc Q\}ȱ~aE:Õ)0:iG`iw*h<5a#j8oJiqze~EA#o>"]rA #sHD:@ # h4>T]]9 MTJU@ (?@hZ?@>TC?@ݿJWc^?P6 v >JuM` ?)uJt  It=WX IR?8IlRv\#>2zGz? HJAebM #zWb(#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A(.?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LFO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!ZA!Z!?v"rA # sHD:@ # h4>T]]9 MTJU@?!Qw?@k?@BZ?@ib+iF?P6 v >JuM` ?qu#t  1It=W_OIRUo}WIlTy6ވI#>2zGzK? HJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A/.7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6#)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ# $<Ef4B 9b`fJ:O@]0:iG`Oc qy"zeEajoJiaze<5e'f`0mFaij? mh~EA#o>"rA k#sHD:@ # h4>T]]9 MTJU@P4K?@l?@u'?@ݿJWc^?P6 v >JuM` ?)uJt  {d5Z\It=W2;wEIRkzIlRv\#>2zGz? HJAebM #zWb(#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A(.?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LFO ie<59 FXG/'0U`Rb6-!}40 V+Z lJ]UhzЎ!A!Z!?&sA#<# $BK jfJ 'j `00>oPaYc l]]beE:Ͷc0TbCcba`iK8K8h<5eg۰Zk(qQlqe~Ee@||`ai !hA'o>v"rA # sHD:@ # h4>T]]9 MTJU@#zUv?@f s9?@ә!?@8laxy?P6 v >JuM` ?)uJt  V"(It=W7:LIRE! YBIl p#>2zGz? HJAebM #zWb(#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A(.?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LFO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!A!19b $<# $B 9b`fJ: muۘ]0:mOc ?i'A~aEesDͅ?c ? V#NeFaze<5e@|lVaiU3Lh`~EA#o>"rA #sHD:@ # h4>T]]9 MTJU@"z>?@^i2VS?@ˋ>WB?@=E?P6 v>JuM` ?)uJt  7It=WܯHIRݮIl$dvĤ#>2zGz?(3 HJsAeb #z (#+#+#&*Jp!p!5 g`?CopyrigTt (c) 2U0 9 M c os f"!rԙ !a i n}. Al U (s"e e 5v0d i!Em$}! (K2?b6 ?ALb4($I3/ 4/U%b-|# |'Yb6?Fiie<59 FjX/G3_'0UBb6Z!}40 aFuJlJ8>UhAA Ў!1!Z}'[- E|zo}#<b$,] VJ:@8Qw0RS S IBf>QE !?@\BU--# i[F @ h<53n:CU9fKo]o ڵ h5a9YUU aUUwiǤ]Q işeq5 h2r_U"rA 5#sHD:@ # h8>T ]]9 MTJU@IKY?@#v\?@(w)٫?@Q`m ^?P6 >JuM`lW? SuJt  o|MtA[_"oRMVYBMpry2VI'>\??\.?lFA8 J2llf?3 HRJB& @i7b#zv t# b( (+&B(ݙ" ?APb)4(z$CD3//&*115 k`?CopyrigTtk(wc)k2009kM0c0o%s0f21r01ua0i0n.k_ Al @ 8UsBe0e0v @ d@1H4-,3 7%bO&&i Ab~59 &&XG3'K0U!R&!4K0 FJlJI  ( sQsQ] 1heA Zl0|z3 0 @?/n' QE bR;bJ:]~^PsojRփUaE !?@q`0 0,c s!R'i e:}ɟh~5aef9oa|b c gT]]9 MTJU@5^?@hP?@5?@$D"?P6 rAJuM` ? qu#t  .9BWyIt=Wn_q8IRFIlI#>2zGzK? HJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A/.7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6#>F O @ie<59 FXG'0U`Rb6!}40 V+ZlJQI0h|G!A !1C# $I<EfB C`fJ x۰Z @]0?c Q#Oc Q\G}ֱ~aE:Õ)0:iG`iw*h<5a#j8oJiqze~EA#o>"rA #sHD:@ # h4>T]]9 MTJU@H?@6͍~>TC?@JWc^?P6 >JuM` ?juJt  HIt=WByIR?8Il?Rv\#>2zGz?) HJsAeb #zbU(#+#+#&Jp!p!5 g`?CopyrigTt (c) W20 9 M ]c os f"R!r !a i n. AUl (s"e e v0d @i!Em$+A(.?b6 ?AgJaT6` F !!l"zbAR +6ig5V181 ?2?;:C?'$4/U%G|# |'b6L3FL ie<59 FXG'K0U`Rb6!}4K0 V+ZlJQI0h|G!A!Z!?"]rA #sHD:@ # h4>T]]9 MTJU@Sw?@K?@BZ?@ib+iF?P6 v>JuM` ?qu#t  fd>It=WN6sgHRUo}WIlTy6ވ#>2zGz? HJAebM #zWb(#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A/.?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼#FO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!A!1lL# $<EfB 9b`fJ:'@]0:iG`Oc qy"zeEajoJiaze<5e'f`0mFaij mh~EA#o>"rA 5#sHD:@ # h4>T]]9 MTJU@t)H׾?@Ȑ?@u'?@ݿJWc^?P6 v>JuM` ?)uJt  ُuߓIt=Wί IRkzIlRv\#>2zGz? HJAebM #zWb(#+#+#&JBp!p!5 g`?CopyrigT}tt(c)tW20 9tM ]c os f"R!r !a i n.t AUl (s"e e v0d @i!Em$+A(.?b6 ?AgJaT6` F !!l"zbAR +6ig5V181 ?2?;:C?'$4/U%G|# |'b6L3FO ie<59 FXG'K0U`Rb6!}4K0 V+ZlJ]Uhz!A!Z!刭?&sA#<# $B jfJ 'j `00>oPaYc l]]aE:Ͷ10TbCcba`iK8K8h<5eg۰Zk(qQlqe~Ee@||`ai !hA'o>"rA #sHD:@ # h4>T]]9 MTJU@?^oO?@^?@ә!?@8laxy?P6 v>JuM` ?)uJt  L+It=WݑHIRE! YBIl p#>2zGz? HJAebM #zWb(#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A(.?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LFO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!A!19b $<# $B 9b`fJ: muۘ]0:mOc ?i'A~aEesDͅ?c ? V#NeFaze<5e@|lVaiU3Lh`~EA#o>"rA #sHD:@ # h4>T]]9 MTJU@tr,?@Uw?@ˋ>WB?@=E?P6 v>JuM` ?)uJt  6j>It=WNFFIRݮIl$dvĤ#>2zGz?3 HAeb& #z *(#+#+#&UJp!p!5 g`?CopyrigTt (c) 20 9 M c. os f"!r !a i n. Al (s"e ej v0d  i!Em$}! (K2?b6 ?ALb4($3/ 4/U%ę-|# |'b6?Fiie<59 FX/G3'0UBŴb6!}40 TaFuJlJ8>UhAA !1!Z}'- E|zo}v#<b$,] VJ:@8Qw0RS S IBf>QE !?@\BU--# i[F @ h<53n:CU9fKo]o ڵ h5a9YUU aUUwiǤ]Q işeq5 h2r_U"rA 5#sHD:@ # h8>T ]]9 MTJU@^5=~?@,=Vݼ?@(w)٫?@Q`m ^?P6 > uM`lW? SuJt  ?aV_MtA[_=p'MVYBMpry2VI'>\??\.?lFA8 J2llf?3 HRJB& @i7b#zv t#]b( J(+&B(" ?APb)4(z$!D3//&115 k`?CopyrigTtk(c)k2009kM0c0os0f21r01a0i0n.k Al @ 8sBe0e0v @d@1$4-3 7bO&&i b~59 &&XG3'0U!R&!40 FJlJ $  (sQsQ] 1eA!Zl0|z 0 @?n' Q b7fJ:]~^PsojR֯UaE !?@q?`0 0c s!R'i ?e:}ɟh~5aef9oa|b c ؠgT]]9 MTJU@?έ?@=?@5?@$D"?P6 v>JuM` ?qu#t  &lIt=WA_I߃IRFIlI#>2zGzK? HJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A/.7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6#)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ# $<Ef4B 9b`fJ x۰Z @]0?c Q#Oc Q\}ȱ~aE:Õ)0:iG`iw*h<5a#j8oJiqze~EA#o>"]rA #sHD:@ # h4>T]]9 MTJU@Iq?@q֌?@>TC?@ݿJWc^?P6 rhAJuM` ?)uJt  0@hIt=WmiIR?8IlRv\#>2zGz? HJAebM #zWb(#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A(.?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LhFO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!ZA!Z!?v"rA # sHD:@ # h4>T]]9 MTJU@ڮ?@7 ?@BZ?@ib+iF?P6 v>JuM` ?qu#t  JJ&qIt=Wr_hIRUo}WIlTy6ވI#>2zGzK? HJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A/.7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6#)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ# $<Ef4B 9b`fJ:O@]0:iG`Oc qy"zeEajoJiaze<5e'f`0mFaij? mh~EA#o>"rA k#sHD:@ # h4>T]]9 MTJU@=9_?@o(Ѿ?@u'?@ݿJWc^?P6 v>JuM` ?qut  V6>nIt=WgD_~baIRkzIlRv\I#>2zGzK? HJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A(.7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6B>F O @ie<59 FXG'0U`Rb6!}40 V+ZlJ]Uhz!hA!Z![?&sA#<# $B %jfJ 'j `00>oPaYc l]]|eE:Ͷ10TbCcba`iK8K8h<5eg۰Zk(qQlqe~Ee@||`ai !hA'o>"rA #sHD:@ # h4>T]]9 MTJU@BP(?@xQ8?@ә!?@8laxy?P6 v>JuM` ?)uJt  9KIt=WYGJIRE! YBIl p#>2zGz? HJAebM #zWb(#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A(.?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LFO ie<59 FXGB'0U`Rb6!}40 V+ZlJQI0h|G!A !19b$<# $BM 9b`fJ: muۘ]ᇄ0:mOc i'A~aEesD?c  V#NeFaze<5e@|lVaiU?3Lh~EA#o>"rA k#sHD:@ # h4>T]]9 MTJU@W?@M6N?@ˋ>WB?@=E?P6 rlAJuM` ?)uJt  1|It=WƯ:IRݮIl$dvĤ#>2zGz?(3 HJsAeb #z (#+#+#&*Jp!p!5 g`?CopyrigTt (c) 2U0 9 M c os f"!rԙ !a i n}. Al U (s"e e 5v0d i!Em$}! (K2?b6 ?ALb4($I3/ 4/U%b-|# |'Yb6?Fiie<59 FjX/G3_'0UBb6Z!}40 aFuJlJ8>UhAA Ў!1!Z}'[- E|zo}#;<b$k] VJ:@8Qw0RS S IBf>QE !?@\BU--| i[F @ h<53n:CU9fKo]o ڵ h5a9YUU aUUwi]Q ieq'5 h2r_Uv"rA # sHD:@ # h8>T ]]9 MTJU@&J?@_5+:?@(w)٫?@Q`m ^?P6 >JuM`lW? SuJt  y SksMtA[_eMVYBMpry2VI'>\??\.?lFA8 J2llf?3 HRJB& @i7b#zv t#]b( J(+&B(" ?APb)4(z$!D3//&115 k`?CopyrigTtk(c)k2009kM0c0os0f21r01a0i0n.k Al @ 8sBe0e0v @d@1$4-3 7bO&&i b~59 &&XG3'0U!R&!40 FJlJ $  (sQsQ] 1eA!Zl0|z 0 @?n' Q b7fJ:]~^PsojR֯UaE !?@q?`0 0c s!R'i ?e:}ɟh~5aef9oa|b c ؠgT]]9 MTJU@B?@-8?@5?@$D"?P6 v>JuM` ?qu#t  4E$p͡It=W_Lx4IRFIlI#>2zGzK? HJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A/.7?b6 ?AgJaT6` F !!l"zbAJ +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼#FO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!A!1lL# $<EfB 9b`fJ x۰Z @]0?c Q菌#Oc Q\}ֱ~aE:ÕO)0:iG`iw*h<5a#j8oJiqze~EA#o>"rA #sHD:@ # h4>T]]9 MTJU@*ӏ?@c?@>TC?@ݿJWc^?P6 v>JuM` ?)uJt  uIt=Wt]aIR?8IlRv\#>2zGz? HJAebM #zWb(#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A(.?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LFO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!ZA!Z!?v"rA # sHD:@ # h4>T]]9 MTJU@"?@ "S/?@BZ?@ib+iF?P6 v >JuM`?qu#t  B?ͳIt=WF_p\cIRUo}WIlTy6ވI#>2zGzK? HJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A/.7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6#( DO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ# $<Ef4B 9b`fJ:O@]0:iG`Oc qy"zeEajoJiaze<5e'f`0mFaij? mh~EA#o>"rA k#sHD:@ # h4>T]]9 MTJU@d?@ 3\D?@u'異JWc^?P6 !>JuM` ?juJt  rs"It=WVIRkzIl?Rv\#>2zGz?) HJsAeb #zbU(#+#+#&Jp!p!5 g`?Copy_rigTtZ(c)Z2U0 9ZM c os f"!rԙ !a i n}.Z Al T[&s"e e 5v0d i!Em$+A(.a?b6 ?AgJaT6` F !!l"z}bA +6ig5V*181 ?2?;#:C?'4/U%G|# |'b6LLFO ie<59 FXG'0U`Rb610 V+ZlJ]Uhz!A!Z!?&osA#<# $B jfJ 'j `00>oPaYc l]]#aE:Ͷ0TbCcba`iK8K8h<5eg۰Zk(qQlqe~Ee@?||`ai !hA'o>"rA #sHD:@ # h4>T]]9 MTJU@69 Ğ?@l_n.?@ә!?@8laxy?P6 v">JuM` ?)uJt  sd It=W> IRE! YBIl p#>2zGz? HJAebM #zWb(#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A(.?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LFO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!A!19b $<# $B 9b`fJ: muۘ]0:mOc ?i'A~aEesDͅ?c ? V#NeFaze<5e@|lVaiU3Lh`~EA#o>"rA #sHD:@ # h4>T]]9 MTJU@#&\?@ԋZ|?@ˋ>WB?@=E?P6 v#>JuM` ?)uJt  [It=Wq.IRݮIl$dvĤ#>2zGz?(3 HJsAeb #z (#+#+#&*Jp!p!5 g`?CopyrigTt (c) 2U0 9 M c os f"!rԙ !a i n}. Al U (s"e e 5v0d i!Em$}! (K2?b6 ?ALb4($I3/ 4/U%b-|# |'Yb6?Fiie<59 FjX/G3_'0UBb6Z!}40 aFuJlJ8>UhAA Ў!1!Z}'[- E|zo}#<b$,] VJ:@8Qw0RS S IBf>QE !?@\BU--# i[F @ h<53n:CU9fKo]o ڵ h5a9YUU aUUwiǤ]Q işeq5 h2r_U"rA 5#sHD:@ # h8>T ]]9 MTJU@\W>?@47 ?@(w)٫?@Q`m ^?P6 $>JuM`lW? SuJt  /êQPͰMtA[_Y#MVYBMpry2VI'>\??\.?lFA8 J2llf?3 HRJB& @i7b#zv t#]b( J(+&B(" ?APb)4(z$!D3//&115 k`?CopyrigTtk(c)k2009kM0c0os0f21r01a0i0n.k Al @ 8sBe0e0v @d@1$4-3 7bO&&i b~59 &&XG3'0U!R&!40 FJlJ $  (sQsQ] 1eA!Zl0|z 0 @?n' Q b7fJ:]~^PsojR֯UaE !?@q?`0 0c s!R'i ?e:}ɟh~5aef9oa|b c ؠgTh]]9 M JU@xJuM` ?Uu Jb[LgJt b{tM>2zGz?)77Os@MJA@b7sz}"b)+,& !5Jp"( ?& N"A"b)+ 'b##b#(J,J/B-  Q3M#9 \/??\.?!?f=Q1Q1Y7;m`?CopyrigTt (c)3@2`093@M+@c)@oKs#@f1B"Ar%@^Aa1@i#@n.3@ Aly@ )Hs}BeQ@e)@v@dq@O0i bz59 m6m?7/'0URś65a FTR#l:J Uh(] nQnQ]FQ AB1!:'BP(Ա?a sRYqJSv?@bX,, iYwla)q )\()4eEDnFh4\ono ?p= ףЈhz5Ufl6a \uiHшhnVgj tj(a(4eU҄Q kR!l8aQY@h4GF[|0aui{hFT]1rto(yQiC@ ?@-uUaȳl1rlb\߈hQ:WpZƟ ܈hU_oo)lHD:@ # h4>T]]9 MTJU@D//?@w 8?@5?@$D"?P6 v1>JuM` ?qu#t  z7It=W_iIRFIlI#>2zGzK? HJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A/.7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6#)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ# $<Ef4B 9b`fJ x۰Z @]0?c Q#Oc Q\}ȱ~aE:Õ)0:iG`iw*h<5a#j8oJiqze~EA#o>"]rA #sHD:@ # h4>T]]9 MTJU@qK?@>,Λ?@>TC?@ݿJWc^?P6 v2>JuM` ?)uJt  jDFIt=W-M3}IR?8IlRv\#>2zGz? HJAebM #zWb(#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A(.?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 0??8:C?'4/U%GX|# |'b6LLBBO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!ZA!Z!?v"rA # sHD:@ # h4>T]]9 MTJU@QL ?@L+?@BZ?@ib+iF?P6 v3>JuM` ?qu#t  Ç\UIt=W_ @ٶIRUo}WIlTy6ވI#>2zGzK? HJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A/.7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6#)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ# $<Ef4B 9b`fJ:O@]0:iG`Oc qy"zeEajoJiaze<5e'f`0mFaij? mh~EA#o>"rA k#sHD:@ # h4>T]]9 MTJU@#li[?@e>?@u'?@ݿJWc^?P6 a?JuM` ?uJt  ICIt=WN3אsIRk_zIlRv\#>2zGz?R HJAeb #zb(#+#+T#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A(. ?b6 ?AgJamT6G` F !!l"zbA +6iTg5V181 ?2?;:C?'I4/U%G,|# |'b6&La`B O @ie<59 FXG'0U`Rb6!}40 V+ZlJ]Uhz!hA!Z![?&sA#<# $` %jfJ 'j `00>oPaYc l]]aE:Ͷc0TbCcba`iK8K8h<5eg۰Zk(qQlqe~Ee@||`ai !hA'o>v"rA # sHD:@ # h4>T]]9 MTJU@ 3?@8.G?@ә!?@8laxy?P6 v5>JuM` ?)uJt  ͧILIt=Wf IRE! YBIl p#>2zGz? HJAebM #zWb(#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A(.?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LFO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!A!19b $<# $B 9b`fJ: muۘ]0:mOc ?i'A~aEesDͅ?c ? V#NeFaze<5e@|lVaiU3Lh`~EA#o>"rA #sHD:@ # h4>T]]9 MTJU@'HY?@pN?@ˋ>WB?@=E?P6 v6>JuM` ?)uJt  PrIt=WoDIRݮIl$dvĤ#>2zGz?(3 HJsAeb #z (#+#+#&*Jp!p!5 g`?CopyrigTt (c) 2U0 9 M c os f"!rԙ !a i n}. Al U (s"e e 5v0d i!Em$}! (K2?b6 ?ALb4($I3/ 4/U%b-|# |'Yb6?Fiie<59 FjX/G3_'0UBb6Z!}40 aFuJlJ8>UhAA Ў!1!Z}'[- E|zo}#<b$,] VJ:@8Qw0RS S IBf>QE !?@\BU--# i[F @ h<53n:CU9fKo]o ڵ h5a9YUU aUUwiǤ]Q işeq5 h2r_U"rA 5#sHD:@ # h8>T ]]9 MTJU@mڷ?@WZgL?@(w)٫?@Q`m ^?P6 7>JuM`lW? SuJt  NKMtA[X_*`lMVYBMpry2VI'>\??\.?lFA8 J2llf?3 HRJB& @i7b#zv t#]b( J(+&B(" ?APb)4(z$!D3//&115 k`?CopyrigTtk(c)k2009kM0c0os0f21r01a0i0n.k Al @ 8sBe0e0v @d@1$4-3 7bO&&i b~59 &&XG3'0U!R&!40 FJlJ $  (sQsQ] 1eA!Zl0|z 0 @?n' Q b7fJ:]~^PsojR֯UaE !?@q?`0 0c s!R'i ?e:}ɟh~5aef9oa|b c ؠgTh]]9 MTIAU@?@|be?@X͂?@SSfhr~?Pps-8R?>$9> n JuM{` ?e ;  Su*Jt'  $mCLmta{x_&3.Jv  q?D=:%z?v(A[|Jq>JU2N贁N[?@M#J&A@bI(bUW)U+U+U&*Jy#?!lJ ?%P?AO"d+bW+$u/%B #A1A15 `?Co}p rigTt (cu)x02`09x0uMp0cn0osh0Ifv2g1rj01av0]ih0n.x0 Ul0 n8s2e0en0v0d0@:1Y>4#-#M3 M7 #(p0b5(9 DFXGdG'2WUAŔ;F!$a dF$&#l,&}>UhETh]]9 MTIAU@?@2R} ?@X͂?@SSfhr~?Pps-8R?>$:> n JuM{` ?e ;  Su*Jt'  H-mta{_mLL}3.Jv  q?D=:%z?v(A[|Jq>JU2N贁N[?@M#J&A@bI(bUW)U+U+U&Jy#?!lJ ?%P?AO"d+bW+$u/%B #A1A15 `?Co}p rigTt (cu)x02`09x0uMp0cn0osh0Ifv2g1rj01av0]ih0n.x0 Ul0 n8s2e0en0v0d0@:1Y>4#-#M3 M7 #(p0b5(9 DFXGdG'2WUAŔ;F!$a dF$&#l,&}>UhETh]]9 MTIAU@B?@ K?@X͂?@SSfhr~?Pps-8R?>$;> n JuM{` ?e ;  Su*Jt'  m@mta{ɀ_13.Jv  q?D=:%z?v(A[|Jq>JU2N贁N[?@M#J&A@bI(bUW)U+U+U&*Jy#?!lJ ?%P?AO"d+bW+$u/%B #A1A15 `?Co}p rigTt (cu)x02`09x0uMp0cn0osh0Ifv2g1rj01av0]ih0n.x0 Ul0 n8s2e0en0v0d0@:1Y>4#-#M3 M7 #(p0b5(9 DFXGdG'2WUAŔ;F!$a dF$&#l,&}>UhETh]]9 MTIAU@Zԩ?@j4ʼn?@X͂?@SSfhr~?Pps-8R?>$<> n JuM{` ?e ;  Su*Jt'  }$~mta{_M {3.Jv  q?D=:%z?v(A[|Jq>JU2N贁N[?@M#J&A@bI(bUW)U+U+U&*Jy#?!lJ ?%P?AO"d+bW+$u/%B #A1A15 `?Co}p rigTt (cu)x02`09x0uMp0cn0osh0Ifv2g1rj01av0]ih0n.x0 Ul0 n8s2e0en0v0d0@:1Y>4#-#M3 M7 #(p0b5(9 DFXGdG'2WUAŔ;F!$a dF$&#l,&}>UhETh]]9 MTIAU@h!I?@u\?@X͂?@SSfhr~?Pps-8R?>$=> n JuM{` ?e ;  Su*Jt'  XƿHmta{"_}{3.Jv  q?D=:%z?v(A[|Jq>JU2N贁N[?@M#J&A@bI(bUW)U+U+U&Jy#?!lJ ?%P?AO"d+bW+$u/%B #A1A15 `?Co}p rigTt (cu)x02`09x0uMp0cn0osh0Ifv2g1rj01av0]ih0n.x0 Ul0 n8s2e0en0v0d0@:1Y>4#-#M3 M7 #(p0b5(9 DFXGdG'2WUAŔ;F!$a dF$&#l,&}>UhETh]]9 MTIAU@?@d\0?@X͂?@SSfhr~?Pps-8R?>$>> n JuM{` ?e ;  Su*Jt'  H-mta{pu_3.Jv  q?D=:%z?v(A[|Jq>JU2N贁N[?@M#J&A@bI(bUW)U+U+U&Jy#?!lJ ?%P?AO"d+bW+$u/%B #A1A15 `?Co}p rigTt (cu)x02`09x0uMp0cn0osh0Ifv2g1rj01av0]ih0n.x0 Ul0 n8s2e0en0v0d0@:1Y>4#-#M3 M7 #(p0b5(9 DFXGdG'2WUAŔ;F!$a dF$&#l,&}>UhETh]]9 MTIAU@sLGv?@m\?@X͂?@SSfhr~?Pps-8R?>$R>  JuM` ? ;  )u*Jt'  [Rwmta{ēd3.Jv  q?D=:%_z?v(A[|?Jq>JU2N?贁N[?)@M#J&A@wbI(bW)U+U+U&Jy#0?!J ?%P?tAO"d+bW+$u/%B#BA1A15 `?Cop rigTt (c)x0]2`09x0Mp0]cn0osh0fv2Rg1rj01av0ih0Wn.x0 l0U n8s2e0en05v0d0:1PY>4#-#,M3 M7#(p0b59 DFXGdG'/2WUA;F%!$a dF$&#l,&}>UhEuwγ u~w;'bAUSSr`?CopyrigPt (cu) 2\09 uMcoszIfyr|aizn. WAlЀ sԂUeevdȀ;LSPDB-M_ M_?[D =M`DrMM ID1+0UǒM"C $cPc X(.'ƔM$!I] Ue*^Y30DXl ! !dH!\!p!!!!!!!!1$1%&'*()11pD113Ac M?  F4 K &E& 4&4&U R&R&u` W?I$0BN`l~$I/( /2(>/2P(\/n$uz)tw!  IQEtABOJ-EG14%F4GaO ۃ ZUAA/B`?CopyrigPt (c)@2\0 M@c@os@fBAr@Aa@i@n.@ AlP HsRe@e@v&PdPA{BCSDCB-CC "G{$FCTVV RD#C05UbS@qF@ ^PU $fXhW.Dg'dōV%!T] DfXje H^maPaYqT  2qa8wEJtU{BUJtU{3Jt!U{UJtU{JtR!U{|JtU{&qFx&U{tSHvDU{.qFxbU{2qFxU{6uJtUwHD:B # h0>Th]]9 MTIAU@?@&*y>{?@X͂?@SSfhr~?Pps-8R?>$A n JuM{` ?e ;  Su*Jt'  $mCLmta{_n3.Jv  q?D=:%z?v(A[|Jq>JU2N贁N[?@M#J&A@bI(bUW)U+U+U&*Jy#?!lJ ?%P?AO"d+bW+$u/%B #A1A15 `?Co}p rigTt (cu)x02`09x0uMp0cn0osh0Ifv2g1rj01av0]ih0n.x0 Ul0 n8s2e0en0v0d0@:1Y>4#-#M3 M7 #(p0b5(9 DFXGdG'2WUAŔ;F!$a dF$&#l,&}>UhET]]9 M JU@xJuM` ?uJt ASt=WJRb~li>t2U?=@MJA@b + &J[N#?0a& ?AT)B!!5 g`?CopyrigTtZ(c)Z20 9ZM c. os f"!r 1ai n.Z Al90 (s=2e0ej vO0d10@!$b-# 'Ya&?6iiek59 6jX7^'0a&Z!|$0 FJlJ UhqMAA ]A !4Ja-'Z@?h4FӃ  <{Gz#A OR|VJ:6]].rM__Z)\("UEC@ ?@-bȃ S 1 `#`YR\Xk5^jZso!o ̌aX5U'BOP(?gks#dT[RUUSv?@n aX,flYwq%lSXn?Fh4  p= ףXAYfl6\`YHzmPXAV``ʼ_Z(QYAY҄agklQbE26_HU"rA HD:B # h0>Th]]9 MTIAU@B?@Dd;]?@X͂?@SSfhr~?Pps-8R?>$F> n JuM{` ?e ;  Su*Jt'  m@mta{_dA3.Jv  q?D=:%z?v(A[|Jq>JU2N贁N[?@M#J&A@bI(bUW)U+U+U&*Jy#?!lJ ?%P?AO"d+bW+$u/%B #A1A15 `?Co}p rigTt (cu)x02`09x0uMp0cn0osh0Ifv2g1rj01av0]ih0n.x0 Ul0 n8s2e0en0v0d0@:1Y>4#-#M3 M7 #(p0b5(9 DFXGdG'2WUAŔ;F!$a dF$&#l,&}>UhET]]9 MTJU@F6&?@}%4+ޱ?@5?@$D"?P6 rAJuM` ?qu#t  (LbIt=W_,MyIRFIlI#>2zGzK? HJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A/.7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6#(BO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ# $<Ef4 9b`fJ x۰Z @]0?c Q#Oc Q\}|zeE:ÕO)0:iG`iw*h<5a#j8oJiqze~EA#o>"rA #sHD:@ # h4>T]]9 MTJU@)?@Uj?@>TC?@ݿJWc^?P6 vI>JuM` ?)uJt  6|.It=WL\} IR?8IlRv\#>2zGz? HJAebM #zWb(#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A(.?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LFO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!ZA!Z!?v"rA # sHD:@ # h4>T]]9 MTJU@#{5?@ř?@BZ?@ib+iF?P6 vJ>JuM` ?qu#t  DqIt=W_{*IRUo}WIlTy6ވI#>2zGzK? HJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A/.7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6#)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ# $<Ef4B 9b`fJ:O@]0:iG`Oc qy"zeEajoJiaze<5e'f`0mFaij? mh~EA#o>"rA k#sHD:B # h0>Th]]9 MTIAU@Zԩ?@6#TTĶ?@X͂?@SSfhr~?Pps-8R?>$K> n JuM{` ?e ;  Su*Jt'  }$~mta{=g2.Jv  q?D=:%_z?v(A[|Jq>JU2N贁N[R?@M#J&A@bI(b*W)U+U+U&Jy#c?!J ?%P?AO"d+bRW+$u/%B#A1A15 `?Cop rigTt (c)x02`09x0Mp0cn0osh0fv2g1rj01av0ih0n.x0 l0 n8s2e0ejn0v0d0:1Y>4#-X#M3 M7#(p0b59 DFXGdG_'2WUA;FJ!$a dFD$&#l,&}>UhET ]]9 MTJU@fl6?@eb,)?@(w)٫?@Q`m ^?P6 L>JuM`lW? SuJt  LUMtA[_ʓWMVYBMpry2VI'>\??\.?lFA8 J2llf?3 HRJB& @i7b#zv t#]b( J(+&B(" ?APb)4(z$!D3//&115 k`?CopyrigTtk(c)k2009kM0c0os0f21r01a0i0n.k Al @ 8sBe0e0v @d@1$4-3 7bO&&i b~59 &&XG3'0U!R&!40 FJlJ $  (sQsQ] 1eA!Zl0|z 0 @?n' Q b7fJ:]~^PsojR֯UaE !?@q?`0 0c s!R'i ?e:}ɟh~5aef9oa|b c ؠgT]]9 MTJU@S?@ަƢ?@u'?@ݿJWc^?P6 vN>JuM` ?)uJt  cIt=W(?IRkzIlRv\#>2zGz? HJAebM #zWb(#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A(.?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LFO ie<59 FXG/'0U`Rb6-!}40 V+Z lJ]UhzЎ!A!Z!?&sA#<# $BK jfJ 'j `00>oPaYc l?]]aE:Ͷǝ0TbCcba`iK8K8h<5eg۰Zk(qQlqe~Ee@||`ai !hA'o>"rA #sHD:B # h0>Th]]9 MTIAU@h!I?@Ž?@X͂?@SSfhr~?Pps-8R?>$P> n JuM{` ?e ;  Su*Jt'  XƿHmta{0_Y3.Jv  q?D=:%z?v(A[|Jq>JU2N贁N[?@M#J&A@bI(bUW)U+U+U&Jy#?!,J ?%?AO"d+bW+$u/%B #A1A15 `?Co}p rigTt (cu)x02`09x0uMp0cn0osh0Ifv2g1rj01av0]ih0n.x0 Ul0 n8s2e0en0v0d0@:1Y>4#-#M3 M7 #(p0b5(9 DFXGdG'2WUAŔ;F!$a dF$&#l,&}>UhET]]9 MTJU@ةA!z?@B]?@ˋ>WB?@=E?P6 vR>JuM` ?)uJt  wT vIt=W?mIRݮIl$dvĤ#>2zGz?(3 HJsAeb #z (#+#+#&*Jp!p!5 g`?CopyrigTtt(wc)t20 9tM c o%s f"!r !ua i n.t_ Al (Us"e e v0 d i!Em$}! (K2?0b6 ?ALcb4($3/ 4/U%-X|# |'b6?Fiie<59 FX/G3'0UBb6!}40 aFuJl*J8>UhAA !1!Z}'- E|zo}#.<b$]K VJ:@8Qw0RS S IBf>QE !?@\BU--# i[F @ h<53n:CU9fKo]o ڵ h5a9YUU aUUwi]Q ieq'5 h2r_Uv"rA # sHD:@ # h4>T]]9 MTJU@?@zų[?@ә!?@8laxy?P6 vS>JuM` ?)uJt  {It=W ůIRE! YBIl p#>2zGz? HJAebM #zWb(#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A(.?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LFO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!A!19b $<# $B 9b`fJ: muۘ]0:mOc ?i'A~aEesDͅ?c ? V#NeFaze<5e@|lVaiU3Lh`~EA#o>"rA #sHD:B # h0>Th]]9 MTIAU@sLGv?@c?@X͂?@SSfhr~?Pps-8R?>$U> n JuM{` ?e ;  Su*Jt'  [Rw͖mta{ۡ_TL3.Jv  q?D=:%z?~v(A[|Jq>JU2N贁N[R?@M#J&A@bI(b*W)U+U+U&ՆJy#a?!J ?%P?AO"d+bRW+$u/%B#A1A15 `?Cop rigTt (c)x02`09x0Mp0cn0osh0fv2g1rj01av0ih0n.x0 l0 n8s2e0ejn0v0d0:1Y>4#-X#M3 M7#(p0b59 DFXGdG_'2WUA;FJ!$a dFD$&#l,&}>UhE9_%O+s u8+DOʅVFxZI#>B U 9? ,~d,ܮ o@+ht oKsG8ofHVz OxX#\Z&Lo>*Ͽ-1H58j<Ͽ?C~Gxҿ&KN$اkR6VH8Yhy] aKdMh lo.sw]0 z82 ^~Hh4 G'6 68 CH: ΌTO//?#?5?G?Y?k?}???X` bNdf߁i@6HkܦHxmg1ObNϮ4!NINJǶ`JNIxmNJ?@QB.OB_ J-O6UQF\)'xNE?_"P(T~X?xߺEW*O9>%_"|PU_D)Z6)oEMaUFD  h(^TYYBXCUF~?x<F BP(?P?  B+66 J] 6qa{6a!6ah9ha6a6!a&`8%!a/&6C!aM&6a!ak&6!a&`d9da&6!a&6!a&6!a6a61a6631a=66Q1a[66o1ay6a 61a6!61a6"61a6#61a6a$6AaF%6#Aa-F&6AAaKF'6_AaiF`]9]aF)6AaF*6AaF+6AaFa,6AaF-6QaV.61Qa;V/6OQaYVa06mQawV16QaV26QaV36QaVa46QaV56aa f66!aa+f86?aaIfa96]aagf:6{aaf;6aaf<6aafa=6aaf>6aaf9av@6/qa9vaA6MqaWvB6kqauvC6qav9av!E6qav9avG6a H6a)aI6=aGJ6[aeK6yaL6aaM6aN6Ӂa݆O6a9aQ06-a7R6K aUS6iasT6aU06aV6Ñ a͖W6aX6a Y06a'Z6; aE[6Yac\6wa]06a^6 a_6ѡaۦ`6aa06 ac6+ a5d6IaSe6gaqf06ag6aQ9Qa˶i6߱aj06ak6 a%l69aCm6Waan06uao6 ap6aq6ar06as6 at6)a3u6GaQv06eaow6 ax6ay6az06a{6 a|6a#}67aA~06Ua_6s a}6a6a06a6 a6 a6'a106EaO6c am6a6a06a6 a6a6a!065a?6S a]6qa{ra0rar arara0r%a/rC MraQkrQ0rQr QrQrQ&0r!Q&r3! Q=&rQ!Q[&ro!Qy&0r!Q&r! Q&r!Q&r!Q&0r1Q6r#1 Q-6rA1QK6r_1Qi60r}1Q6r1 Q6r1Q6r1Q60r1Q6rA QFr1AQ;FrOAQYF0rmAQwFrA QFrAQFrAQFrAQFJȖ SUqJ?_,V S=V Q'  T UrQS S@L&ɯd2?]ɓX?\.ҥX^ sUQV T_\ UJ%Q xS T -b@Y T`&P t>h,aeL,dbtok^`eupjet^`h r\wbgQgQ*QJ_i( RU SJWS> 0V]!? ?wwww{np0wzZp~uwpwwppw^w|{pdp^ZDrag onut hepe.Rih-c]lckaUdcos UPo etQeAvUwreP 5aab~߿~??)j࿔S5E?0FJ?nɶؿ3@ ?DHD: # h4>T]]9 MTJUF~?F͓??FtQ+/?P6 AmJuM` ?uJt At=Wrb#JRbJli:O#>[a? ?AbAw@"b $J2zGz?)@MJ7& #fbu#z$ ( (&,"*BB!!5 g`?CopyrigT}tZ(c)ZW2009ZM 0]c 0os0f2R1r0>1a0i0n.Z AUlY0 8s]2e10e 0vo0dQ0!$ę-# '?6iie59 6X7}'0UpBi!0 'F;JlJAh7!1!ZFTE <EgQ# IRSvVJ:M&d2?6VOSe\^TUR UEU@SG\BZ_ɫQ5[@_,cYU520_BU"rA #cHD: # h4>T]]9 MTJUFM&d2?FDx??Fl5?P6 AmJuM` ?uJt\  ]It=WIW$IRIl ڜI#>[ ? ?Ab A@"b$J2zGz?R=@MJ`=&#f {#z .(b)&,"*BB!!5 g`?CopyrigTt (c])020%090uM0c0os 0If21r 0D1a0i 0n.0 WAl_0 8sc2Ue70e0vu0dW0!$-# '?6iieP59 6X7F"'0UvB!0 -FAJlJ $L!(AA ]A1h1!ZFhmg  <!q}"##M _RVJL!%t?FTE\QeS , φ+#pYYSUKQUE^,n__ "+^HX5:?LV&k?SN#tTkRU5Ulf܄BP(%lgpY?BeO#X(dec]\m\cX ~.K$oZhLYUiXQaIZ^_i3qU32F_XU"rA 5#HD: # h#4>T#3 AMJUFbX?FϏ ?FM&d2?F _gw?P6 #AJuM` ?uJt  k]It=~W.*IRIl̆Rkפ#>[a? ?AbAw@"b$J2zGz?)@MJ=&#fb{#z\ (b)&T,"*B!!5 g`?CopyrigTt (c)020%090M0c0os 0f21r 0D1a0i 0n.0 Al_0 8sc2e70e0vu0dW0!$$-# '%?6iie59 6X7'K0UvB!K0 -FAJlJAh711T?ҿP mVN  Q  . *>uA` ?Gm$m8JuQ\>XbK>tY  wetVy 5Ie>U2zGz?@9 /#>?&Aw@bq(Wb)+&b" !p$4>9#"?& "w"b)";|'b"i,b"/N9#0%B 9#11;8"`?CopyrigXt (wc)020090M0c0o%s0f21r0Aua0i0n.0_ Al/@ 8Us3Be@e0vE@d'@'"i fE 6(X7H!'_2q@&,d50 tFJul>4Ul8QQCaR 1(Q1w!-(N!R?FJ?@ `0R QyUeR /ge%$(U⤖侇_X?,_XN Uωr?F r?FOtO?P"ZO %o1~Y.$!$iA4Y!eX]?C!ebRҡe:S |we"d  1X461Boo wr$5%>aEJe_~?F.ԵN?FaV-u"ؾ?PWRrR~|`%s mK)m >'ʲl} fdjg tsC uL+t-VwfxQUv`q7&ҹ _XRQb%d2[i֥TWVQFd50_#jS aUTf imp#jpm0"XY֘t?FTEY`Qj;!lЙXbQ?,n$ xXfQYJhbPnZ\>]!h"bXjQV6W_QUnU_o_)oTQaRp v__N-aZ?Fah<b}煔M_Ophz y7vPQP]BP({ύ 贁NPE 0BXЕHD: # h4>T]]9 MTJUF~?FS BP??Fx<?P6 #AmJuM` ?uJt At=W5_v@"J]RbJl>t2U.?=@MJA@+b)+&J[P'?g& ?A)*B!!5 g`?CopyrigTtZ(wc)Z2009ZM c o%s f"!r $1uai n.Z_ Al?0 (UsC2e0e vU0 d70!H$-,# 'g&K?6i@ieq59 6X7'K0g&!$K0 F!JlJ\#Uh( #3 ]At!"3'ZF>h f <YDc#BA NR{VJ:5]dGL_^Zh?.2\QE]t?FgPg܉ S )?w#_YXq5^(l]o o e]ǂX5U%?D"?;Vfk(\#cTZRUUM\_Y7ۨ Xno ^Z~#b%{XAYM&d2fkBqlUE25_tGUSrA   UGD  3 h0TdYYBjUFțau?F73X?FGV^bd?FZ^Zp?P} X  f  f0 D fX l f  f  bd  f  & f & 4& fH& \& fp& &! f&" &# f&$ &% f&& &' f6( $6) f86* L6+ f`6, t6- f6. 6/ b6]_61 f62 63 fF4 F5 (F6 T#3 AMJUF^]P'?Fݱ?F#6?F{_ "w?P6  >JuM` ?uJt  (LbIt=W,MyIR_FIl#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4B 9b`fJ %ޜ\0?c Q#Oc Q\}|zeE:pOϫ0:iG`iw*h<5a#j8oJiqze~EA#o>"rA #sHD:  # h4>T]]9 MTJUFH[b?F:%lj?FݡD?F,6^?P6 v >JuM` ?)uJt  6|.It=WL\} IR?8IlRv\#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 DV+ZlJQI0h|G!hA!ZV\槈?F9"rA #sHD:  # h#4>T#3 AMJUFiW>E?FTb?F %?FP_EyFw?P6 >JuM` ?uJt  DqIt=W{*IRUo_}WIlTy6ވ#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4B 9b`fJ:%O\0:iG`Oc qy"zeEajoJiaze<5e'fÏ 0mFaij? mh~EA#o>"rA k#sHD:  # h4>T]]9 MTJUFF ?FƢ?F%9.?F,6^?P6 v>JuM` ?)uJt  cIt=W(?IRkzIlRv\#>2zGz?@M2JAe7b #zb(J#+#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c oKs f"!r !a i n. Al (s"e e v0d i!EHm$+A?b6 ?AgJaTv6` F !!l"z۾bZA +J6ig5V181 ?2?;ߑ:C?'4/U%ęG|# b|'b6LFO ie<59 FXG}'0U`Rib6!}40I V+ZlJ]Uhz!AZֺV\?F9&sA#<# $ B bnbJ'jWĞ 00>oPaYc l]]ȱaE:P^0TbCcba`iK8K8h<5e %k?(qQlqe~EeF$||`ai? !hA'oFrA k#sHD:  # h4>T]]9 MTJUFw??FxOe?FV\?F. xy?P6 rAJuM` ? )uJt  {It=W ůIRE! YBIl p#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6L>FO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!A!19b $<# $BJ C`fJ: m$D]ᇄ0:mOc i'A~aEeTE?c  V#NeFaze<5eF$|lVaiU?3Lh~EA#o>"rA k#sHD:  # h4>T]]9 MTJUF% !(@{?FmN,']?F~ tC?F7܄E?P6 v>JuM` ?)uJt  wT vIt=W?mIRݮIl$dvĤ#>2zGz?@MJAebM #zT (#+#+#&Jp!p!5 g`?Copy_rigTtt(c)t2U0 9tM c os f"!rԙ !a i n}.t Al U (s"e e 5v0d i!Em$}! K2?b6 ?ALb4($I3/ 4/U%b-|# |'b6<Fiie<59 FjX/G'0UBib6!}40 aFuJlJ8>UhAA@ !1!Zo(. E|zo}#<bX$-B] VʼJ:F,?0ȒS S I?Bf>QEҺV\?Ft@K\BU--# i[ÏF @ h<53n?I9fKo]o ڵ h5a9YUU aUU 3yk]Q i?eq5 h2r_U"rA k#sHD:  # h8>T ]]9 MTJUFG"7?F~8)?F AE?Fzl]?P6 >JuM`lW? SuJt  LUMtA[_ʓWMVYBMpry2VI'>\??\.?lFA8 J2ll/f?=@MJB& @nib#zv t#b( (+&B" ?APb)4(z$CD3//&*115 k`?CopyrigTtk(wc)k2009kM0c0o%s0f21r01ua0i0n.k_ Al @ 8UsBe0e0v @ d@1H4-,3 7%bO&&i Ab~59 &&XGK"'0U!R&!40 FJlJ Q! (sQsQ]31heA!Zsꈐ43 0 @?/n' #M b7fJ:]n#vca71xp]] 8usUQPUQS^%S+ %R&bpGba8DaP`Q]`NE_TWOPKJ0WSH+`Ps RUORuIsSk1J0@^e_ erP}28\usvvP62<%-g ۏ'9_us1125q5q2A+Bhh@R)pR*) 2hqhq|b(8 1xpa[3!1Qq6`:zGQ`1f1[j2B@@7RF5VCT @nb2FE5us_4Σ&F3s͵NSYFB@UͬpU ͬQUHͬ|eA§,@Q_Ɔp׏(ɟ 8A B<F5$lAVB\KkF`RC`asPe uexPP2!"! %(P%PORJ1 '2qa` P֒ȑ8(S˱AȕE{8H5AALA˱LܥAL(ARGMAWЛtA5ЛA͞/%AELHD:  # h#4>T#3 AMJUF͹30?FY˟8?F#6?F{_ "w?P6 >JuM` ?uJt  z7It=WiIR_FIl#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4B 9b`fJ %ޜ\0?c Q#Oc Q\}ȱ~aE:pϫ0:iG`iw*h<5a#j8oJiqze~EA#o>"]rA #sHD:  # h4>T]]9 MTJUF,2?F/LΛ?FݡD?F,6^?P6 v>JuM` ?)uJt  jDFIt=W-M3}IR?8IlRv\#>2zGz?@MlAneb #zb(J#+#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c oKs f"!r !a i n. Al (s"e e v0d i!EHm$+A?b6 ?AgJaTv6` F !!l"z۾bZA +J6ig5V181 ?2?;ߑ:C?'4/U%ęG|# b|'b6LBO ie<59 FXG}'0U`Rib6!}40 V+ZlJQI0h|GЎ!A!ZV\O?F9"rA 5#sHD:  # h#4>T#3 AMJUF *.?F)s*?F %?FP_EyFw?P6 >uM` ?uJt  Ç\UIt=W @ٶIRUo_}WIlTy6ވ#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4B 9b`fJ:%O\0:iG`Oc qy"zeEajoJiaze<5e'fÏ 0mFaij? mh~EA#o>"rA k#sHD:  # h4>T]]9 MTJUFJ.?Fh ?F%9.?F,6^?P6 v>JuM` ?)uJt  ICIt=WN3sIRkzIlRv\#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 $V+ZlJ]Uhz@!A!ZֺV?\?F9&sA #<# $B jfJ'jWĞ 00>oPaYc l]]aE:P^0TbCcba`iK8K8h<5e %k(qQlqe~EeF$ǧ||`ai !hA'o>"rA 5#sHD:  # h4>T]]9 MTJUFPY?F3pG?FV\?F. xy?P6 rhAJuM` ?)uJt  ͧILIt=Wf IRE! YBIl p#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LhSFO ie<59 FXG'0U`RŴb6!}40 DV+ZlJQI0h|G!hA!`197b$<# h$B 9b`fJ: m$D]0:mOc i'A~aEeT7E?c  V#NeFaze<5eF$|lVaiU3Lh~EA#o>"]rA #sHD:  # h4>T]]9 MTJUF]cZ?FVIܳ?F~ tC?F7܄E?P6 v>JuM` ?)uJt  PrIt=WoDIRݮIl$dvĤ#>2zGz?@MJAebM #zT (#+#+#&Jp!p!5 g`?CopyrigTt (c) W20 9 M ]c os f"R!r !a i n. AUl (s"e e v0d @i!Em$}! K2?b6 3?ALb4(&$3/ 4/U%-|# e|'b6 ?Fiie<59 FX/G'0UBb6!}40 aFuJlJ 8>UhAA !1!Z(. E|zo}#<b$] RVJ:F,a0RS S IBf>QEҺV\?Ft@K\B?U--# i[F @ h<53nI9fKo]o ڵ h5a9YUU aUU~ 3yk]Q ieq5 h2r_U"]rA #sHD:  # h8>T ]]9 MTJUFyh ?F*nK?F AE?Fzl]?P6 >JuM`lW? u t  NKMtA[X*`lMVYBMpry2V'>\¹??\.?lA8 J2l_lf?=@JMJB& @ib#ztv t#b() (+&ՆB3" 3?APb)4(z$D3//&T115 k`?CopyrigTtk(c)k2009kM0c0oKs0f21r01a0i0n.k Al @ 8sBe0e0v @d@14-X3 7JbO&&i b~59 &&XGK"/'0U!R&-!40 FJ%lJ Q! (sQsQ]3Ы1eA!Zsꈐg4 0֚ @?n'#2#M b7fJ:]T#3 AMJUF~S5e?Fk]?F#6?F{_ "w?P6 >JuM` ?uJt  @@|It=Wn56uIR_FIl#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L)FO ie<59 FXGB/'0U`Rb6-!}40 V+ZlJQI0h|G!A!1l&fL# $<EfB 9b`fJ %ޜ\0?c Q菌#Oc Q\}ֱ~aE:pOϫ0:iG`iw*h<5a#j8oJiqze~EA#o>"rA #sHD:  # h4>T]]9 MTJUF3w?Fr(IY?FݡD?F,6^?P6 rlAJuM` ?)uJt  It=WX IR?8IlRv\#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LlQkBO ie<59 FXG'0U`RŴb6!}40 DV+ZlJQI0h|G!hA!ZV\槈?F9"rA #sHD:  # h#4>T#3 AMJUFZ)x?F~m?F %?FP_EyFw?P6 >JuM` ?uJt  1It=WOIRUo_}WIlTy6ވ#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4B 9b`fJ:%O\0:iG`Oc qy"zeEajoJiaze<5e'fÏ 0mFaij? mh~EA#o>"rA k#sHD:  # h4>T]]9 MTJUF:N[%?F ɇ?F%9.?F,6^?P6 v>JuM` ?)uJt  {d5Z\It=W2;wEIRkzIlRv\#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L)FO ie<59 FuXG_'0U`Rb6Z!}40 V+ZlJ]Uhz!A!ZֺV\?F9&sA#<# $BK jfJ'jWĞ 00>oPaYc l]]beE:P^c0TbCcba`iK8K8h<5e %k(qQlqe~EeF$||`ai !hA'o>v"rA # sHD:  # h4>T]]9 MTJUFydY?F?FV\?F. xy?P6 v>JuM` ?)uJt  V"(It=W7:LIRE! YBIl p#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 DV+ZlJQI0h|G!hA!`197b$<# h$B 9b`fJ: m$D]0:mOc i'A~aEeT7E?c  V#NeFaze<5eF$|lVaiU3Lh~EA#o>"]rA #sHD:  # h4>T]]9 MTJUFEtJuM`?)uJt  7It=WܯHIRݮIl$dvĤ#>2zGz?@MJAebM #zT (#+#+#&Jp!p!5 g`?CopyrigTt (c) W20 9 M ]c os f"R!r !a i n. AUl (s"e e v0d @i!Em$}! K2?b6 3?ALb4(&$3/ 4/U%-|# e|'b6 ?Fiie<59 FX/G'0UBb6!}40 aFuJlJ 8>UhAA !1!Z(. E|zo}#<bc$ R] )VJ:F,00RS S IBf>QEҺV\?Ft@K\BU--# i?[F @ h<53nI9fKo]o ڵ h5a9YUU aUU? 3yk]Q ieq5 h2r_U"rA #sHD:  # h8>T ]]9 MTJUF̉?F7|?F AE?Fzl]?P6 !>JuM`lW? SuJt  o|MtA[_"oRMVYBMpry2VI'>\??\.?lFA8 J2ll/f?=@MJB& @nib#zv t#b( (+&B" ?APb)4(z$CD3//&*115 k`?CopyrigTtk(wc)k2009kM0c0o%s0f21r01ua0i0n.k_ Al @ 8UsBe0e0v @ d@1H4-,3 7%bO&&i Ab~59 &&XGK"'0U!R& 10 FJlJ Q! (sQsQ]31heA!Zsꈐ43 0 @?ҏn'8R# b7fJ:]T#3 AMJUFJuM` ?uJt  .9BWyIt=Wnq׏8IR_FIl#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4B 9b`fJ %ޜ\0?c Q#Oc Q\}ȱ~aE:pϫ0:iG`iw*h<5a#j8oJiqze~EA#o>"]rA #sHD:  # h4>T]]9 MTJUF%?FG>'~ݡD?F,6^?P6 #>JuM` ?juJt  HIt=WByIR?8Il?Rv\#>2zGz?@MJAebM #zWb(#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LFO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!ZA!ZV\?F9"rA #sHD:  # h#4>T#3 AMJUFQK݆x?F-1?F %?FP_EyFw?P6 $>JuM` ?uJt  fd>It=WN6sgHRUo}WIlT?y6ވ#>2zGz?@MJAebM #zWb(#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LFO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!A!1l&fL# $<EfB 9b`fJ:%ޜ'\0:iG`Oc qy"zeEajoJiaze<5e'fÞ 0mFaij mh~EA#o>"rA 5#sHD:  # h4>T]]9 MTJUF|k~ؾ?F;?F%9.?F,6^?P6 v%>JuM` ?)uJt  ُuߓIt=Wί IRkzIlRv\#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTtt(c])t20 9tuM c os If"!r !a i n.t WAl (s"Ue e v0d i!Em$+A?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LFO ie<59 FXG/'0U`Rb6-!}40 V+Z lJ]UhzЎ!A!ZֺV\O?F9&sA#<# $B %jfJ'jWĞ 00>oPaYc l]]aE:P^c0TbCcba`iK8K8h<5e %k(qQlqe~EeF$||`ai !hA'o>v"rA # sHD:  # h4>T]]9 MTJUFFJuM` ?)uJt  L+It=WݑHIRE! YBIl p#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 DV+ZlJQI0h|G!hA!`197b$<# h$B 9b`fJ: m$D]0:mOc i'A~aEeT7E?c  V#NeFaze<5eF$|lVaiU3Lh~EA#o>"]rA #sHD:  # h4>T]]9 MTJUFQt?FRLxw?F~ tC?F7܄E?P6 v'>JuM` ?)uJt  6j>It=WNFFIRݮIl$dvĤ#>2zGz?@MJAebM #zT (#+#+#&Jp!p!5 g`?CopyrigTt (c) W20 9 M ]c os f"R!r !a i n. AUl (s"e e v0d @i!Em$}! K2?b6 3?ALb4(&$3/ 4/U%-|# e|'b6 ?Fiie<59 FX/G'0UBb6!}40 aFuJlJ 8>UhAA !1!Z(. E|zo}#<b$] RVJ:F,a0RS S IBf>QEҺV\?Ft@K\B?U--# i[F @ h<53nI9fKo]o ڵ h5a9YUU aUU~ 3yk]Q ieq5 h2r_U"]rA #sHD:  # h8>T ]]9 MTJUFXf?Fݼ?F AE?Fzl]?P6 (>JuM`lW? SuJt  ?aV_MtA[_=p'MVYBMpry2VI'>\??\.?lFA8 J2ll/f?=@MJB& @nib#zv t#b( (+&B" ?APb)4(z$CD3//&*115 k`?CopyrigTtk(wc)k2009kM0c0o%s0f21r01ua0i0n.k_ Al @ 8UsBe0e0v @ d@1H4-,3 7%bO&&i Ab~59 &&XGK"'0U!R&!40 FJlJ Q! sQsQ]31heA!Zsꈐ43 0 @?ҏn'R# b7fJ:]T#3 AMJUF;Э?Fn?F#6?F{_ "w?P6 )>JuM` ?uJt  &lIt=WAI׃IR_FIl#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L( DO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4B 9b`fJ %ޜ\0?c Q#Oc Q\}ȱ~aE:pϫ0:iG`iw*h<5a#j8oJiqze~EA#o>"]rA #sHD:  # h4>T]]9 MTJUFAYr?F L?FݡD?F,6^?P6 v*>JuM` ?)uJt  0@hIt=WmiIR?8IlRv\#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 DV+ZlJQI0h|G!hA!ZV\槈?F9"rA #sHD:  # h#4>T#3 AMJUFZ?F!ќ?F %?FP_EyFw?P6 +>JuM` ?uJt  JJ&qIt=WrhIRUo_}WIlTy6ވ#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4B 9b`fJ:%O\0:iG`Oc qy"zeEajoJiaze<5e'fÏ 0mFaij? mh~EA#o>"rA k#sHD:  # h4>T]]9 MTJUF9 `?F2{ о?F%9.?F,6^?P6 v,>JuM` ?)uJt  V6>nIt=WgD~baIRkzIlRv\#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 $V+ZlJ]Uhz@!A!ZֺV?\?F9&sA #<# $B jfJ'jWĞ 00>oPaYc ?l]]|eE:P^0TbCcba`iK8K8h<5e %k(qQlqe~EeF$||`ai !hA'o>"rA #sHD:  # h4>T]]9 MTJUFq[')?F9i7?FV\?F. xy?P6 v->JuM` ?)uJt  9KIt=WYGJIRE! YBIl p#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 DV+ZlJQI0h|G!hA!`197b$<# h$B 9b`fJ: m$D]0:mOc i'A~aEeT7E?c  V#NeFaze<5eF$|lVaiU3Lh~EA#o>"]rA #sHD:  # h4>T]]9 MTJUFӅN?FZDM?F~ tC?F7܄E?P6 v.>JuM` ?)uJt  1|It=WƯ:IRݮIl$dvĤ#>2zGz?@MJAebM #zT (#+#+#&Jp!p!5 g`?CopyrigTt (c) W20 9 M ]c os f"R!r !a i n. AUl (s"e e v0d @i!Em$}! K2?b6 3?ALb4(&$3/ 4/U%-|# e|'b6 ?Fiie<59 FX/G'0UBb6!}40 aFuJlJ 8>UhAA !1!Z(. E|zo}#<bc$2] )VJ:F,00RS S IBf>QEҺV\?Ft@K\BU--| i[F @ h<53nI9fKo]o  h5a9YUU aUU 3yk]Q ieq5 hb2r_U"rA #sHD:  # h8>T ]]9 MTJUFv?Fz?F AE?Fzl]?P6 />JuM`lW? SuJt  y SksMtA[_eMVYBMpry2VI'>\??\.?lFA8 J2ll/f?=@MJB& @nib#zv t#b( (+&B" ?APb)4(z$CD3//&*115 k`?CopyrigTtk(wc)k2009kM0c0o%s0f21r01ua0i0n.k_ Al @ 8UsBe0e0v @ d@1H4-,3 7%bO&&i Ab~59 &&XGK"'0U!R&!40 FJlJ Q! (sQsQ]31heA!Zsꈐ43 0 @?/n' #M b7fJ:]T#3 AMJUF`;?F:7?F#6?F{_ "w?P6 0>JuM` ?uJt  4E$pIt=WLx4IR_FIl#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L( DO ie<59 FuXG_'0U`Rb6J!}4 V"+ZlJUhh|G!A!1l&fL# $<EfB 9b`fJ %ޜ\0?c Q菌#Oc Q\}ֱ~aE:pOϫ0:iG`iw*h<5a#j8oJiqze~EA#o>"rA #sHD:  # h4>T]]9 MTJUF [kԏ?F4 c?FݡD?F,6^?P6 v1>JuM` ?)uJt  uIt=Wt]aIR?8IlRv\#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 DV+ZlJQI0h|G!hA!ZV\槈?F9"rA #sHD:  # h#4>T#3 AMJUFYļ#?FŬ /?F %?FP_EyFw?P6 2>JuM` ?uJt  B?˳It=WFp\cIRUo_}WIlTy6ވ#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181a ??8:C$?'4/U%G|# |'b6L(BBO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4B 9b`fJ:%O\0:iG`Oc qy"zeEajoJiaze<5e'fÏ 0mFaij? mh~E#o>"rA k#sHD:  # h4>T]]9 MTJUF-!4?FL$$?F%9.,6^?P6 3>JuM` ?juJt  rs"It=WVIRkzIl?Rv\#>2zGz?@MJAebM #zWb(#+#+#&JBp!p!5 g`?CopyrigT}tZ(c)ZW20 9ZM ]c os f"R!r !a i n.Z AQl [&s"e e v0d @i!Em$+A?b6 ?AgJaT6` F !!l"zbAR +6ig5V181 ?2?;:C?'$4/U%G|# |'b6L3FO ie<59 FXG'K0U`Rb6!}4K0 V+ZlJ]Uhz!A!ZֺV\?F9&߮sA#`<# $B jfJ'jW?Ğ 00>oPaYc l]G]aE:P^0TbCcba`i?K8K8h<5e %k(qׁQlqe~EeF$||`ai !hA'o>"]rA #sHD:  # h4>T]]9 MTJUFX?Ş?F).?FV\?F. xy?P6 a?JuM` ?uJt  sd It=W> IRE!_ YBIl p#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6La(`BO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!19b$<# $4` 9b`fJ: m$D]0:mOc i'A~aEeTE?c  V#NeFaze<5eF?$|lVaiU3Lh~EA#o>"rA #sHD:  # h4>T]]9 MTJUFQC]?FD?b.?F~ tC?F7܄E?P6 v5>JuM` ?)uJt  [It=Wq.IRݮIl$dvĤ#>2zGz?@MJAebM #zT (#+#+#&Jp!p!5 g`?CopyrigTt (c) W20 9 M ]c os f"R!r !a i n. AUl (s"e e v0d @i!Em$}! K2?b6 3?ALb4(&$3/ 4/U%-|# e|'b6 ?Fiie<59 FX/G'0UBb6!}40 aFuJlJ 8>UhAA !1!Z(. E|zo}#<b$] RVJ:F,a0RS S IBf>QEҺV\?Ft@K\B?U--# i[F @ h<53nI9fKo]o ڵ h5a9YUU aUU~ 3yk]Q ieq5 h2r_U"]rA #sHD:  # h8>T ]]9 MTJUF:??Fq12?F AE?Fzl]?P6 6>JuM`lW? SuJt  /êQPͰMtA[_Y#MVYBMpry2VI'>\??\.?lFA8 J2ll/f?=@MJB& @nib#zv t#b( (+&B" ?APb)4(z$CD3//&*115 k`?CopyrigTtk(wc)k2009kM0c0o%s0f21r01ua0i0n.k_ Al @ 8UsBe0e0v @ d@1H4-,3 7%bO&&i Ab~59 &&XGK"'0U!R&!40 FJlJ Q! (sQsQ]31heA!Zsꈐ43 0 @?/n' #M b7fJ:] f? @ fA B &C  fE  &F b & 4&H fH&I \&J fp&K &L f&M &N &&O & f&Q &R f6S $6T f86U L6V f`6W t6X f6Y 6Z f6[ 6\ f6] 6^ fF_ F` f(Fa ~L1B~`1F~t1[D1e1R~1~1Z~1^~1b~Af~Aj~(AnT#3 AMJUF6٫&?Fh%4+ޱ?F*5?F$_D"w?P6 8>JuM` ?uJt  (LbIt=W,MyIR_FIl#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6ig5V1`e ?2?;:C$?'4/U%G|# |'b6L)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4B 9b`fJ۰Z@]0?c Q#Oc Q\}|zeE:O)0:iG`iw*h<5a#j8oJiqze~EA#o>"rA #sHD:7 # h4>T]]9 MTJUFI)?FUj?FeTC?FǿJWc^?P6 v9>JuM` ?)uJt  6|.It=WL\} IR?8IlRv\#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LQ DO ie<59 FXG'0U`RŴb6!}40 DV+ZlJQI0h|G!hA!Z!姈?F9"rA #sHD:7 # h#4>T#3 AMJUFe{5?F?Fs۰Z?FNb_+iFw?P6 :>JuM` ?uJt  DqIt=W{*IRUo_}WIlTy6ވ#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4B 9b`fJrO@]0:iG`Oc qy"zeEajoJiaze<5e'f`0mFaij? mh~EA#o>"rA k#sHD:7 # h4>T]]9 MTJUFS?FަƢ?F'?FǿJWc^?P6 v;>JuM` ?)uJt  cIt=W(?IRkzIlRv\#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 $V+ZlJ]Uhz@!A!Z5?!?F9&sA #<# $B jfJ'j`00>oPaYc l]]aE:Ͷ0TbCcba`iK8K8h<5e۰ϿZk(qQlqe~EeFͭ||`ai !hA'o>"rA 5#sHD:7 # h4>T]]9 MTJUF蕯?Fcų[?F!?Flaxy?P6 v<>JuM` ?)uJt  {It=W ůIRE! YBIl p#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 DV+ZlJQI0h|G!hA!`197b$<# h$B 9b`fJ: m٠uۘ]0:mOc i'A~aEe7D?c  V#NeFaze<5eF|lVaiU3Lh~EA#o>"]rA #sHD:7 # h4>T]]9 MTJUFA!z?FA]?F>WB?Ft=E?P6 v=>JuM` ?)uJt  wT vIt=W?mIRݮIl$dvĤ#>2zGz?@MJAebM #zT (#+#+#&Jp!p!5 g`?Copy_rigTtt(c)t2U0 9tM c os f"!rԙ !a i n}.t Al U (s"e e 5v0d i!Em$}! K2?b6 ?ALb4($I3/ 4/U%b-|# |'Yb6?Fiie<59 FjX/G'0UBib6!}40 aFuJlJ8>UhAA@ !1!Z}o'- E|zo}#<b$] VJ:FQw0RS S IBf>QE2!?Fp\BU--# i[F @ h<53n CU9fKo]o  h5a9YUU aUUwi]Q ieq5 hb2r_U"rA #sHD:7 # h8>T ]]9 MTJUF gl6?FDb,)?FUw)٫?FQ`m ^?P6 >>JuM`lW? SuJt  LUMtA[_ʓWMVYBMpry2VI'>\??\.?lFA8 J2ll/f?=@MJB& @nib#zv t#b( (+&B" ?APb)4(z$CD3//&*115 k`?CopyrigTtk(wc)k2009kM0c0o%s0f21r01ua0i0n.k_ Al @ 8UsBe0e0v @ d@1H4-,3 7%bO&&i Ab~59 &&XGK"'0U!R&!40 FJlJ Q! (sQsQ]31heA!ZU0|z3 0 @?/n' #M b7fJ:]h^PsojRփUaEQ!?!`0 0c s!R'i e:}ɟh~5aef9oa|b c gT#3 AMJUFE//?F_ 8?F*5?F$_D"w?P6 ?>JuM` ?uJt  z7It=WiIR_FIl#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+Ab6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L(a2BO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4a2 9b`fJ۰Z@]0?c Q#Oc Q\}ȱ~aE:)0:iG`iw*h<5a#j8oJiqze~EA#o>"]rA #sHD:7 # h4>T]]9 MTJUFK?Fc>,Λ?FeTC?FǿJWc^?P6 v@>JuM` ?)uJt  jDFIt=W-M3}IR?8IlRv\#>2zGz?MHJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L(BO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!A!Z!?F9v"rA # sHD:7 # h#4>T#3 AMJUFQL ?F7ԩ+?Fs۰Z?FNb_+iFw?P6 A>JuM` ?uJt  Ç\UIt=W @ٶIRUo_}WIlTy6ވ#>2zGz?N@MJ@eb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L( DO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4B 9b`fJ:rO@]0:iG`Oc qy"zeEajoJiaze<5e'f`0mFaij? mh~EA#o>"rA k#sHD:7 # h4>T]]9 MTJUFLli[?FQ>?F'?FǿJWc^?P6 vB>JuM` ?)uJt  ICIt=WN3sIRkzIlRv\#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 $V+ZlJ]Uhz@!A!Z5?!?F9&sA #<# $B jfJ'j`00>oPaYc l]]aE:Ͷ0TbCcba`iK8K8h<5e۰ϿZk(qQlqe~EeFͭ||`ai !hA'o>"rA 5#sHD:7 # h4>T]]9 MTJUF63?F8.G?F!?Flaxy?P6 vC>JuM` ?)uJt  ͧILIt=Wf IRE! YBIl p#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LQ DO ie<59 FXG'0U`RŴb6!}40 DV+ZlJQI0h|G!hA!`197b$<# h$B 9b`fJ: m٠uۘ]0:mOc i'A~aEe7D?c  V#NeFaze<5eF|lVaiU3Lh~EA#o>"]rA #sHD:7 # h4>T]]9 MTJUFPHY?FpN?F>WB?Ft=E?P6 rAJuM` ?)uJt  PrIt=WoDIRݮIl$dvĤ#>2zGz?@MJAebM #zT (#+#+#&Jp!p!5 g`?CopyrigTt (c) W20 9 M ]c os f"R!r !a i n. AUl (s"e e v0d @i!Em$}! K2?b6 3?ALb4(&$3/ 4/U%-|# e|'b6 ?Fiie<59 FX/G'0UBb6!}40 aFuJlJ 8>UhAA !1!Z}'- E|zo}#<bc$] )VJ:FQw00RS S IBf>QE2!?Fp\BU--# i?[F @ h<53n CU9fKo]o ڵ h5a9YUU aUU?wi]Q ieq5 h2r_U"rA #sHD:7 # h8>T ]]9 MTJUFǦmڷ?FWZgL?FUw)٫?FQ`m ^?P6 E>JuM`lW? SuJt  NKMtA[X_*`lMVYBMpry2VI'>\??\.?lFA8 J2ll/f?=@MJB& @nib#zv t#b( (+&B" ?APb)4(z$CD3//&*115 k`?CopyrigTtk(wc)k2009kM0c0o%s0f21r01ua0i0n.k_ Al @ 8UsBe0e0v @ d@1H4-,3 7%bO&&i Ab~59 &&XGK"'0U!R&!40 FJlJ Q! (sQsQ]31heA!ZU0|z3 0 @?/n' #M b7fJ:]h^PsojRփUaEQ!?!`0 0c s!R'i e:}ɟh~5aef9oa|b c gT#3 AMJUFܹ?F cM?F*5?F$_D"w?P6 F>JuM` ?uJt  @@|It=Wn56uIR_FIl#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4B 9b`fJ۰Z@]0?c Q#Oc Q\}ȱ~aE:)0:iG`iw*h<5a#j8oJiqze~EA#o>"]rA #sHD:7 # h4>T]]9 MTJUF (?FthZ?FeTC?FǿJWc^?P6 vG>JuM` ?)uJt  It=WX IR?8IlRv\#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%`|# |'1b6LQBBO ie<59 FXG'0U`RŴb6!}40 DV+ZlJQI0h|G!hA!Z!姈?F9"rA #sHD:7 # h#4>T#3 AMJUFr!Qw?Fvk?Fs۰Z?FNb_+iFw?P6 AJuM` ?uJt  1It=WOIRUo_}WIlTy6ވ#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L(BO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4 9b`fJ:rO@]0:iG`Oc qy"zeEajoJiaze<5e'f`0mFaij? mh~EA#o>"rA k#sHD:7 # h4>T]]9 MTJUF!P4K?F l?F'?FǿJWc^?P6 vI>JuM` ?)uJt  {d5Z\It=W2;wEIRkzIlRv\#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 $V+ZlJ]Uhz@!A!Z5?!?F9&sA #<# $B jfJ'j`00>oPaYc ?l]]beE:Ͷǝ0TbCcba`iK8K8h<5e۰Zk(qQlqe~EeFͭ||`ai !hA'o>"rA #sHD:7 # h4>T]]9 MTJUFYzUv?FS s9?F!?Flaxy?P6 vJ>JuM` ?)uJt  V"(It=W7:LIRE! YBIl p#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 DV+ZlJQI0h|G!hA!`197b$<# h$B 9b`fJ: m٠uۘ]0:mOc i'A~aEe7D?c  V#NeFaze<5eF|lVaiU3Lh~EA#o>"]rA #sHD:7 # h4>T]]9 MTJUF)"z>?F^i2VS?F>WB?Ft=E?P6 vK>JuM` ?)uJt  7It=WܯHIRݮIl$dvĤ#>2zGz?@MJAebM #zT (#+#+#&Jp!p!5 g`?CopyrigTt (c) W20 9 M ]c os f"R!r !a i n. AUl (s"e e v0d @i!Em$}! K2?b6 3?ALb4(&$3/ 4/U%-|# e|'b6 ?Fiie<59 FX/G'0UBb6!}40 aFuJlJ 8>UhAA !1!Z}'- E|zo}#<b$] RVJ:FQwa0RS S IBf>QE2!?Fp\B?U--# i[F @ h<53n CU9fKo]o ڵ h5a9YUU aUU~wi]Q ieq5 h2r_U"]rA #sHD:7 # h8>T ]]9 MTJUF{KY?F#v\?FUw)٫?FQ`m ^?P6 L>JuM`lW? SuJt  o|MtA[_"oRMVYBMpry2VI'>\??\.?lFA8 J2ll/f?=@MJB& @nib#zv t#b( (+&B" ?APb)4(z$CD3//&*115 k`?CopyrigTtk(wc)k2009kM0c0o%s0f21r01ua0i0n.k_ Al @ 8UsBe0e0v @ d@1H4-,3 7%bO&&i Ab~59 &&XGK"'0U!R&!40 FJlJ Q! (sQsQ]31heA!ZU0|z3 0 @?/n' #M b7fJ:]h^PsojRփUaEQ!?!`0 0c s!R'i e:}ɟh~5aef9oa|b c gT#3 AMJUF<5^?FUP?F*5?F$_D"w?P6 M>JuM` ?uJt  .9BWyIt=Wnq׏8IR_FIl#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L( DO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4B 9b`fJ۰Z@]0?c Q#Oc Q\}ȱ~aE:)0:iG`iw*h<5a#j8oJiqze~EA#o>"]rA #sHD:7 # h4>T]]9 MTJUF?F͍~eTC?FJWc^?P6 N>JuM` ?juJt  HIt=WByIR?8Il?Rv\#>2zGz?@MJAebM #zWb(#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LFO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!ZA!Z!?F9"rA #sHD:7 # h#4>T#3 AMJUFSw?FK?Fs۰Z?FNb_+iFw?P6 O>JuM` ?uJt  fd>It=WN6sgHRUo}WIlT?y6ވ#>2zGz?@MJAebM #zWb(#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LFO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!A!1l&fL# $<EfB 9b`fJ:r'@]0:iG`Oc qy"zeEajoJiaze<5e'f`0mFaij mh~EA#o>"rA 5#sHD:7 # h4>T]]9 MTJUF%u)H׾?FȐ?F'?FǿJWc^?P6 rAJuM` ?)uJt  ُuߓIt=Wί IRkzIlRv\#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTtt(c])t20 9tuM c os If"!r !a i n.t WAl (s"Ue e v0d i!Em$+A?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6LLs2BO ie<59 FXG/'0U`Rb6-!}40 V+Z lJ]UhzЎ!A!Z5!O?F9&sA#<# $s2 %jfJ'j`00>oPaYc l]]aE:Ͷc0TbCcba`iK8K8h<5e۰Zk(qQlqe~EeFͭ||`ai !hA'o>v"rA # sHD:7 # h4>T]]9 MTJUFf^oO?F]?F!?Flaxy?P6 vQ>JuM` ?)uJt  L+It=WݑHIRE! YBIl p#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 DV+ZlJQI0h|G!hA!`197b$<# h$B 9b`fJ: m٠uۘ]0:mOc i'A~aEe7D?c  V#NeFaze<5eF|lVaiU3Lh~EA#o>"]rA #sHD:7 # h4>T]]9 MTJUFr,?F4w?F>WB?Ft=E?P6 vR>JuM` ?)uJt  6j>It=WNFFIRݮIl$dvĤ#>2zGz?@MJAebM #zT (#+#+#&Jp!p!5 g`?CopyrigTt (c) W20 9 M ]c os f"R!r !a i n. AUl (s"e e v0d @i!Em$}! K2?b6 3?ALb4(&$3/ 4/U%-|# e|'b6 ?Fiie<59 FX/G'0UBb6!}40 aFuJlJ 8>UhAA !1!Z}'- E|zo}#<b$] RVJ:FQwa0RS S IBf>QE2!?Fp\B?U--# i[F @ h<53n CU9fKo]o ڵ h5a9YUU aUU~wi]Q ieq5 h2r_U"]rA #sHD:7 # h8>T ]]9 MTJUF_5=~?Fe,=Vݼ?FUw)٫?FQ`m ^?P6 S>JuM`lW? SuJt  ?aV_MtA[_=p'MVYBMpry2VI'>\??\.?lFA8 J2ll/f?=@MJB& @nib#zv t#b( (+&B" ?APb)4(z$CD3//&*115 k`?CopyrigTtk(wc)k2009kM0c0o%s0f21r01ua0i0n.k_ Al @ 8UsBe0e0v @ d@1H4-,3 7%bO&&i Ab~59 &&XGK"'0U!R&!40 FJlJ Q! (sQsQ]31heA!ZU0|z3 0 @?/n' #M b7fJ:]h^PsojRփUaEQ!?!`0 0c s!R'i e:}ɟh~5aef9oa|b c gT#3 AMJUF<@έ?F&¬?F*5?F$_D"w?P6 T>JuM` ?uJt  &lIt=WAI׃IR_FIl#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJa6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6 LeE O @ie<59 FXG'0U`Rb6!}40 V+ZlJQI0h|G!A !6&f# &$<EfBM 9b`fJ۰Z@]0?c Q#Oc ?Q\}ֱ~aE:)'0:iG`iw*h<5a#j8oJiqze`~EA#o>"rA #sHD:7 # h4>T]]9 MTJUFpq?Fq֌?FeTC?FǿJWc^?P6 vU>JuM` ?)uJt  0@hIt=WmiIR?8IlRv\#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LQBBO ie<59 FXG'0U`RŴb6!}40 V+ZlJQ@hh|G!A!Z!?F9v"rA # sHD:7 # h#4>T#3 AMJUF߳ڮ?F$ ?Fs۰Z?FNb_+iFw?P6 V>JuM` ?uJt  JJ&qIt=WrhIRUo_}WIlTy6ވ#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6big581e ?2?;:C$?'4/U%G|# |'b6L)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4B 9b`fJ:rO@]0:iG`Oc qy"zeEajoJiaze<5e'f`0mFaij? mh~EA#o>"rA k#sHD:7 # h4>T]]9 MTJUF>=9_?FL(Ѿ?F'?FǿJWc^?P6 vW>JuM` ?)uJt  V6>nIt=WgD~baIRkzIlRv\#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 $V+ZlJ]Uhz@!A!Z5?!?F9&sA #<# $B jfJ'j`00>oPaYc ?l]]|eE:Ͷǝ0TbCcba`iK8K8h<5e۰Zk(qQlqe~EeFͭ||`ai !hA'o>"rA #sHD:7 # h4>T]]9 MTJUFBP(?FxQ8?F!?Flaxy?P6 vX>JuM` ?)uJt  9KIt=WYGJIRE! YBIl p#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FD'0U`RŴb6!}40 DV+ZlJQI0h|G!hA!`197b$<# h$B 9b`fJ: m٠uۘ]0:mOc i'A~aEe7D?c  V#NeFaze<5eF|lVaiU3Lh~EA#o>"]rA #sHD:7 # h4>T]]9 MTJUFMW?FM6N?F>WB?Ft=E?P6 vY>JuM` ?)uJt  1|It=WƯ:IRݮIl$dvĤ#>2zGz?@MJAebM #zT (#+#+#&Jp!p!5 g`?CopyrigTt (c) W20 9 M ]c os f"R!r !a i n. AUl (s"e e v0d @i!Em$}! K2?b6 3?ALb4(&$3/ 4/U%-|# e|'b6 ?Fiie<59 FX/G'0UBb6!}40 aFuJlJ 8>UhAA !1!Z}'- E|zo}#<b$] RVJ:FQwa0RS S IBf>QE2!?Fp\BU--| i?[F @ h<53n CU9fKo]o ڵ h5a9YUU aUU?wi]Q ieq5 h2r_U"rA #sHD:7 # h8>T ]]9 MTJUF('J?FK5+:?FUw)٫?FQ`m ^?P6 Z>JuM`lW? SuJt  y SksMtA[_eMVYBMpry2VI'>\??\.?lFA8 J2ll/f?=@MJB& @nib#zv t#b( (+&B" ?APb)4(z$CD3//&*115 k`?CopyrigTtk(wc)k2009kM0c0o%s0f21r01ua0i0n.k_ Al @ 8UsBe0e0v @ d@1H4-,3 7%bO&&i Ab~59 &&XGK"'0U!R&!40 FJlJ Q! (sQsQ]31heA! U0|z3 0 @?ҏn'R# b7fJ:]h^PsojR֯UaEQ!?ߟ!`0 0c s!R'i e:}ɟh~5aef9oa|b c gT#3 AMJUFB?F̒-8?F*5?F$_D"w?P6 [>JuM` ?uJt  4E$pIt=WLx4IR_FIl#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4B 9b`fJ۰Z@]0?c Q#Oc Q\}ȱ~aE:)0:iG`iw*h<5a#j8oJiqze~EA#o>"]rA #sHD:7 # h4>T]]9 MTJUFJ*ӏ?Fc?FeTC?FǿJWc^?P6 v\>JuM` ?)uJt  uIt=Wt]aIR?8IlRv\#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 DV+ZlJQI0h|G!hA!Z!姈?F9"rA #sHD:7 # h#4>T#3 AMJUF"?F!S/?Fs۰Z?FNb_+iFw?P6 ]>JuM` ?uJt  B?˳It=WFp\cIRUo_}WIlTy6ވ#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4B 9b`fJ:rO@]0:iG`Oc qy"zeEajoJiaze<5e'f`0mFaij? mh~EA#o>"rA k#sHD:7 # h4>T]]9 MTJUF?F2\D?F'異JWc^?P6 ^>JuM` ?juJt  rs"It=WVIRkzIl?Rv\#>2zGz?@MJAebM #zWb(#+#+#&JBp!p!5 g`?CopyrigT}tZ(c)ZW20 9ZM ]c os f"R!r !a i n.Z AQl [&s"e e v0d @i!Em$+A?b6 ?AgJaT6` F !!l"zbAR +6ig5V181 ?2?;:C?'$4/U%G|# |'b6L3FO ie<59 FXG'K0U`Rb6!}4K0 V+ZlJ]Uhz!A!Z5!?F9&߮sA#`<# $B jfJ'j?`00>oPaYc l]G]aE:Ͷ0TbCcba`i?K8K8h<5e۰Zk(qׁQlqe~EeFͭ||`ai !hA'o>"]rA #sHD:7 # h4>T]]9 MTJUF79 Ğ?Fl_n.?F!?Flaxy?P6 v_>JuM` ?)uJt  sd It=W> IRE! YBIl p#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 DV+ZlJQI0h|G!hA!`197b$<# h$B 9b`fJ: m٠uۘ]0:mOc i'A~aEe7D?c  V#NeFaze<5eF|lVaiU3Lh~EA#o>"]rA #sHD:7 # h4>T]]9 MTJUF-#&\?FZ|?F>WB?Ft=E?P6 v`>JuM` ?)uJt  [It=Wq.IRݮIl$dvĤ#>2zGz?@MJAebM #zT (#+#+#&Jp!p!5 g`?CopyrigTt (c) W20 9 M ]c os f"R!r !a i n. AUl (s"e e v0d @i!Em$}! K2?b6 3?ALb4(&$3/ 4/U%-|# e|'b6 ?Fiie<59 FX/G'0UBb6!}40 aFuJlJ 8>UhAA !1!Z}'- E|zo}#<b$] RVJ:FQwa0RS S IBf>QE2!?Fp\B?U--# i[F @ h<53n CU9fKo]o ڵ h5a9YUU aUU~wi]Q ieq5 h2r_U"]rA #sHD:7 # h8>T ]]9 MTJUFW>?F7 ?FUw)٫?FQ`m ^?P6 a>JuM`lW? SuJt  /êQPͰMtA[_Y#MVYBMpry2VI'>\??\.?lFA8 J2ll/f?=@MJB& @nib#zv t#b( (+&B" ?APb)4(z$CD3//&*115 k`?CopyrigTtk(wc)k2009kM0c0o%s0f21r01ua0i0n.k_ Al @ 8UsBe0e0v @ d@1H4-,3 7%bO&&i Ab~59 &&XGK"'0U!R&!40 FJlJ Q! (sQsQ]31heA!ZU0|z3 0 @?ҏn'0S b7fJ:]h^PsojR֯UaEQ!?ߟ!`0 0c s!R'i e:}ɟh~5aef9oa|b c g~L1B~`1F~t1[D1e1R~1~1Z~1^~1b~Af~Aj~(AnT#3 AMJUF6٫&?Fh%4+ޱ?F*5?F$_D"w?P6 c>JuM` ?uJt  (LbIt=W,MyIR_FIl#>2zGz?N@MJAeb #z (#+#+T#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A ?b6 ?AgJamT6G` F !!l"zbA +6iTg5V181 ?2?;:C?'I4/U%G,|# |'b6&L D O @ie<59 FXG'0U`Rb6!}40 V+ZlJQI0h|G!A !16&f &$<EfBM 9b`fJ۰Z@]0?c Q#Oc Q\}|zeE:)0:iG`i?w*h<5a#j8oJiqze~EA#o>"rA k#sHD:b # h4>T]]9 MTJUFI)?FUj?FeTC?FǿJWc^?P6 vd>JuM` ?)uJt  6|.It=WL\} IR?8IlRv\#>2zGz?@M2JAe7b #zQ (#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6LL0DO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!ZA!Z!?F9"rA #sHD:b # h#4>T#3 AMJUFe{5?F?Fs۰Z?FNb_+iFw?P6 e>JuM` ?uJt  DqIt=W{*IRUo_}WIlTy6ވ#>2zGz?N@MJAeb #z (#+#+T#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A ?b6 ?AgJamT6G` F !!l"zbA +6iTg5V181 ?2?;:C?'I4/U%G,|# |'b6&L0D O @ie<59 FXG'0U`Rb6!}40 V+ZlJQI0h|G!A !16&f &$<EfBM 9b`fJ:r@]0:iG`Oc qy"zeEajoJiaze<5e'f`0mFaij mh~EA#o>"rA #sHD:b # h4>T]]9 MTJUFS?FަƢ?F'?FǿJWc^?P6 vf>JuM` ?)uJt  cIt=W(?IRkzIlRv\#>2zGz?@M2JAe7b #zQ (#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6LL DO ie<59 FXG/'0U`Rb6-!}40 V+Z lJ]UhzЎ!A!Z5!O?F9&sA#< $B %jfJ'j`00>oPaYc l]]aE:Ͷc0TbCcba`iK8K8h<5e۰Zk(qQlqe~EeFͭ||`ai !hA'o>v"rA # sHD:b # h4>T]]9 MTJUF蕯?Fcų[?F!?Flaxy?P6 vg>JuM` ?)uJt  {It=W ůIRE! YBIl p#>2zGz?@M2JAe7b #zQ (#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A?b6 ?A mT6G` F !!l"zbA +6iTg5V181 ?2?;:C?'I4/U%G,|# |'b6&L2B O @ie<59 FXG'0U`Rb6!}40 V+ZlJQI0h|G!A !19b$< $2M 9b`fJ: m٠uۘ]ᇄ0:mOc i'A~aEeD?c  V#NeFaze<5eF|lVaiU?3Lh~EA#o>"rA k#sHD:b # h4>T]]9 MTJUFA!z?FA]?F>WB?Ft=E?P6 rUAJuM` ?)uJt  wT vIt=W?mIRݮIl$dvĤ#>2zGz?@M JAe #z (#+#+#&*Jp!p!5 g`?CopyrigTtt(wc)t20 9tM c o%s f"!r !ua i n.t_ Al (Us"e e v0 d i!Em$}! K2?0b6 ?ALcb4($3/ 4/U%-X|# |'b6?Fiie<59 FX/G_'0UBb6Z!}40 aFuJlJ8>UhAA Ў!1!Z}'[- E|zo}#;<b$T] VJ:FQw0RS S IBf>QE2!?Fp\BU--# i[F @ h<53n CU9fKo]o ?ڵ h5a9YUU aUUwi㼕]Q ieOq5 h2r_U"rA #sHD:b # h8>T ]]9 MTJUF gl6?FDb,)?FUw)٫?FQ`m ^?P6 i>JuM`lW? SuJt  LUMtA[_ʓWMVYBMpry2VI'>\??\.?lFA8 J2ll/f?=@MJB& @i #zv t#.b( (+&B"f ?APb)4(z$D3//& 115 k`?CopyrigTtk(c])k2009kuM0c0os0If21r01a0i0n.k WAl @ 8sBUe0e0v @d@14-3K 7!bO&& b~59 &&XGK"'0U!R&!40R FJlJ Q! (sQsQ]31eA-!ZU0|z 0f @?n'B# b7fJ:]h^Psoj?R֯UaEQ!?!`0 0c s!R'i e:}ɟh~5aef9oa|b c gT#3 AMJUFE//?F_ 8?F*5?F$_D"w?P6 j>JuM` ?uJt  z7It=WiIR_FIl#>2zGz?N@MJAeb #z (#+#+T#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A ?b6 ?AgJamT6G` F !!l"zbA +6iTg5V181 ?2?;:C?'I4/U%G,|# |'b6fLF O @ie<59 FXG'0U`Rb6!}40 V+ZlJQI0h|G!A !16&f &$<EfBM 9b`fJ۰Z@]0?c Q#Oc ?Q\}ֱ~aE:)'0:iG`iw*h<5a#j8oJiqze`~EA#o>"rA #sHD:b # h4>T]]9 MTJUFK?Fc>,Λ?FeTC?FǿJWc^?P6 vk>JuM` ?)uJt  jDFIt=W-M3}IR?8IlRv\#>2zGz?@M2JAe7b #zQ (#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LFO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!ZA!Z!?F9"rA #sHD:b # h#4>T#3 AMJUFQL ?F7ԩ+?Fs۰Z?FNb_+iFw?P6 l>JuM` ?uJt  Ç\UIt=W @ٶIRUo_}WIlTy6ވ#>2zGz?N@MJAeb #z (#+#+T#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A ?b6 ?AgJamT6G` F !!l"zbA +6iTg5V181 ?2?;:C?'I4/U%G,|# |'b6&L D O @ie<59 FXG'0U`Rb6!}40 V+ZMQI0h|G!A !16&f &$<EfBM 9b`fJ:r@]0:iG`Oc qy"zeEajoJiaze<5e'f`0mFaij mh~EA#o>"rA #sHD:b # h4>T]]9 MTJUFLli[?FQ>?F'?FǿJWc^?P6 vm>JuM` ?)uJt  ICIt=WN3sIRkzIlRv\#>2zGz?@M2JAe7b #zQ (#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LFO ie<59 FXG/'0U`Rb6-!}40 V+Z lJ]UhzЎ!A!Z5!O?F9&sA#< $B %jfJ'j`00>oPaYc l]]aE:Ͷc0TbCcba`iK8K8h<5e۰Zk(qQlqe~EeFͭ||`ai !hA'o>v"rA # sHD:b # h4>T]]9 MTJUF63?F8.G?F!?Flaxy?P6 vn>JuM` ?)uJt  ͧILIt=Wf IRE! YBIl p#>2zGz?@M2JAe7b #zQ (#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6LL DO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!A!19b $< $B 9b`fJ: m٠uۘ]0:mOc ?i'A~aEeDͅ?c ? V#NeFaze<5eF|lVaiU3Lh`~EA#o>"rA #sHD:b # h4>T]]9 MTJUFPHY?FpN?F>WB?Ft=E?P6 vo>JuM` ?)uJt  PrIt=WoDIRݮIl$dvĤ#>2zGz?@M JAe #z (#+#+#&*Jp!p!5 g`?CopyrigTt (c) 2U0 9 M c os f"!rԙ !a i n}. Al U (s"e e 5v0d i!Em$}! K2?b6 ?ALb4($I3/ 4/U%b-|# |'Yb6?Fiie<59 FjX/G'0UBib6!}40 aFuJlJ8>UhAA@ !1!Z}o'- E|zo}#<bX$ [ VʼJ:F?Qw0ȒS S I?Bf>QE2!?Fp\BU--# i[ÏF @ h<53n ?CU9fKo]o ڵ h5a9YUU aUUwi]Q i?eq5 h2r_U"rA k#sHD:b # h8>T ]]9 MTJUFǦmڷ?FWZgL?FUw)٫?FQ`m ^?P6 p>JuM`lW? SuJt  NKMtA[X_*`lMVYBMpry2VI'>\??\.?lFA8 J2ll/f?=@MJB& @i #zv t#.b( (+&B"f ?APb)4(z$D3//& 115 k`?CopyrigTtk(c])k2009kuM0c0os0If21r01a0i0n.k WAl @ 8sBUe0e0v @d@14-3K 7IbO&&i bP~59 &&XGK"'0U!R&!40 FJlJ $Q! (sQsQ]31ZeA!ZU0|z 0 @?n'0S b7fJ:]h?^PsojR֯UaEQ!?!`c0 0c s!R'i e:}ɟh~5aef9oa|b c gT#3 AMJUFܹ?F cM?F*5?F$_D"w?P6 q>JuM` ?uJt  @@|It=Wn56uIR_FIl#>2zGz?N@MJAeb #z (#+#+T#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A ?b6 ?AgJamT6G` F !!l"zbA +6iTg5V181 ?2?;:C?'I4/U%G,|# |'b6fLF O @ie<59 FXG'0U`Rb6!}40 V+ZlJQI0h|G!A !16&f &$<EfBM 9b`fJ۰Z@]0?c Q#Oc ?Q\}ֱ~aE:)'0:iG`iw*h<5a#j8oJiqze`~EA#o>"rA #sHD:b # h4>T]]9 MTJUF (?FthZ?FeTC?FǿJWc^?P6 vr>JuM` ?)uJt  It=WX IR?8IlRv\#>2zGz?@M2JAe7b #zQ (#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6LL2BO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!ZA!Z!?F9"rA #sHD:b # h#4>T#3 AMJUFr!Qw?Fvk?Fs۰Z?FNb_+iFw?P6 s>JuM` ?uJt  1It=WOIRUo_}WIlTy6ވ#>2zGz?N@MJAeb #z (#+#+T#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A ?b6 ?AgJamT6G` F !!l"zbA +6iTg5V181 ?2?;:C?'I4/U%G,|# |'b6&L D O @ie<59 FXG'0U`Rb6!}40 V+ZlJQI0h|G!A !16&f &$<EfBM 9b`fJ:r@]0:iG`Oc qy"zeEajoJiaze<5e'f`0mFaij mh~EA#o>"rA #sHD:b # h4>T]]9 MTJUF!P4K?F l?F'?FǿJWc^?P6 vt>JuM` ?)uJt  {d5Z\It=W2;wEIRkzIlRv\#>2zGz?@M2JAe7b #zQ (#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6LL DO ie<59 FXG/'0U`Rb6-!}40 V+Z lJ]UhzЎ!A!Z5!O?F9&sA#< $B %jfJ'j`00>oPaYc l]]beE:Ͷ10TbCcba`iK8K8h<5e۰Zk(qQlqe~EeFͭ||`ai !hA'o>"rA #sHD:b # h4>T]]9 MTJUFYzUv?FS s9?F!?Flaxy?P6 vu>JuM` ?)uJt  V"(It=W7:LIRE! YBIl p#>2zGz?@M2JAe7b #zQ (#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LFO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!A!19b $< $B 9b`fJ: m٠uۘ]0:mOc ?i'A~aEeDͅ?c ? V#NeFaze<5eF|lVaiU3Lh`~EA#o>"rA #sHD:b # h4>T]]9 MTJUF)"z>?F^i2VS?F>WB?Ft=E?P6 vv>JuM` ?)uJt  7It=WܯHIRݮIl$dvĤ#>2zGz?@M JAe #z (#+#+#&*Jp!p!5 g`?CopyrigTt (c) 2U0 9 M c os f"!rԙ !a i n}. Al U (s"e e 5v0d i!Em$}! K2?b6 ?ALb4($I3/ 4/U%b-|# |'Yb6?Fiie<59 FjX/G'0UBib6!}40 aFuJlJ8>UhAA@ !1!Z}o'- E|zo}#<bX$ 0[ VʼJ:F?Qw0ȒS S I?Bf>QE2!?Fp\BU--# i[ÏF @ h<53n ?CU9fKo]o ڵ h5a9YUU aUUwi]Q i?eq5 h2r_U"rA k#sHD:b # h8>T ]]9 MTJUF{KY?F#v\?FUw)٫?FQ`m ^?P6 w>JuM`lW? SuJt  o|MtA[_"oRMVYBMpry2VI'>\??\.?lFA8 J2ll/f?=@MJB& @i #zv t#.b( (+&B"f ?APb)4(z$D3//& 115 k`?CopyrigTtk(c])k2009kuM0c0os0If21r01a0i0n.k WAl @ 8sBUe0e0v @d@14-3K 7IbO&&i bP~59 &&XGK"'0U!R&!40 FJlJ $Q! (sQsQ]31ZeA!ZU0|z 0 @?nK' # bS7fJ:]h^PsojR֯UaEQ!?!`0 0c s!R'i e:}ɟh~5aef9oa|b c gT#3 AMJUF<5^?FUP?F*5?F$_D"w?P6 x>JuM` ?uJt  .9BWyIt=Wnq׏8IR_FIl#>2zGz?N@MJAeb #z (#+#+T#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A ?b6 ?AgJamT6G` F !!l"zbA +6iTg5V181 ?2?;:C?'I4/U%G,|# |'b6fLF O @ie<59 FXG'0U`Rb6!}40 V+ZlJQI0h|G!A !16&f &$<EfBM 9b`fJ۰Z@]0?c Q#Oc ?Q\}ֱ~aE:)'0:iG`iw*h<5a#j8oJiqze`~EA#o>"rA #sHD:b # h4>T]]9 MTJUF?F͍~eTC?FJWc^?P6 y>JuM` ?juJt  HIt=WByIR?8Il?Rv\#>2zGz?@MJAebM #zT (#+#+#&Jp!p!5 g`?CopyrigTt (c) W20 9 M ]c os f"R!r !a i n. AUl (s"e e v0d @i!Em$+A?b6 ?AgJaT6` F !!l"zbAR +6ig5V181 ?2?;:C?'$4/U%G|# |'b6L DO ie<59 FXG'K0U`Rb6!}4K0 V+ZlJQI0h|G!A!Z!?F9"rA #sHD:b # h#4>T#3 AMJUFSw?FK?Fs۰Z?FNb_+iFw?P6 z>JuM` ?uJt  fd>It=WN6sgHRUo}WIlT?y6ވ#>2zGz?@MJAebM #zT (#+#+#&Jp!p!5 g`?CopyrigTt (c) W20 9 M ]c os f"R!r !a i n. AUl (s"e e v0d @i!Em$+A?b6 ?AgJaT6` F !!l"zbAR +6ig5V181 ?2?;:C?'$4/U%G|# |'b6L3FO ie<59 FXG'K0U`Rb6!}4K0 V+ZlJQI0h|G!A!1&f $<EfB 9b`fJ:r@]0:iG`Oc ?qy"zeEajoJiaze<5e'f`0mFaij mh~EA#o>v"rA # sHD:b # h4>T]]9 MTJUF%u)H׾?FȐ?F'?FǿJWc^?P6 v{>JuM` ?)uJt  ُuߓIt=Wί IRkzIlRv\#>2zGz?@M2JAe7b #zQ (#+#+#&JBp!p!5 g`?CopyrigT}tt(c)tW20 9tM ]c os f"R!r !a i n.t AUl (s"e e v0d @i!Em$+A?b6 ?AgJaT6` F !!l"zbAR +6ig5V181 ?2?;:C?'$4/U%G|# |'b6L3FO ie<59 FXG'K0U`Rb6!}4K0 V+ZlJ]Uhz!A!Z5!?F9&߮sA#`< $B jfJ'j?`00>oPaYc l]G]aE:Ͷ0TbCcba`i?K8K8h<5e۰Zk(qׁQlqe~EeFͭ||`ai !hA'o>"]rA #sHD:b # h4>T]]9 MTJUFf^oO?F]?F!?Flaxy?P6 v|>JuM` ?)uJt  L+It=WݑHIRE! YBIl p#>2zGz?@M2JAe7b #zQ (#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LFO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!A!19b $< $B 9b`fJ: m٠uۘ]0:mOc ?i'A~aEeDͅ?c ? V#NeFaze<5eF|lVaiU3Lh`~EA#o>"rA #sHD:b # h4>T]]9 MTJUFr,?F4w?F>WB?Ft=E?P6 v}>JuM` ?)uJt  6j>It=WNFFIRݮIl$dvĤ#>2zGz?@M JAe #z (#+#+#&*Jp!p!5 g`?CopyrigTt (c) 2U0 9 M c os f"!rԙ !a i n}. Al U (s"e e 5v0d i!Em$}! K2?b6 ?ALb4($I3/ 4/U%b-|# |'Yb6?Fiie<59 FjX/G'0UBib6!}40 aFuJlJ8>UhAA@ !1!Z}o'- E|zo}#<b$] VJ:FQw0RS S IBf>QE2!?Fp\BU--# i[F @ h<53n CU9fKo]o  h5a9YUU aUUwi]Q ieq5 hb2r_U"rA #sHD:b # h8>T ]]9 MTJUF_5=~?Fe,=Vݼ?FUw)٫?FQ`m ^?P6 ~>JuM`lW? SuJt  ?aV_MtA[_=p'MVYBMpry2VI'>\??\.?lFA8 J2ll/f?=@MJB& @i #zv t#.b( (+&B"f ?APb)4(z$D3//& 115 k`?CopyrigTtk(c])k2009kuM0c0os0If21r01a0i0n.k WAl @ 8sBUe0e0v @d@14-3K 7IbO&&i bP~59 &&XGK"'0U!R&!40 FJlJ $Q! (sQsQ]31ZeA!ZU0|z 0 @?nK' # bS7fJ:]h^PsojR֯UaEQ!?!`0 0c s!R'i e:}ɟh~5aef9oa|b c gT#3 AMJUF<@έ?F&¬?F*5?F$_D"w?P6 >JuM` ?uJt  &lIt=WAI׃IR_FIl#>2zGz?N@MJAeb #z (#+#+T#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A ?b6 ?AgJamT6G` F !!l"zbA +6iTg5V181 ?2?;:C?'I4/U%G,|# |'b6fLF O @ie<59 FXG'0U`Rb6!}40 V+ZlJQI0h|G!A !16&f &$<EfBM 9b`fJ۰Z@]0?c Q#Oc ?Q\}ֱ~aE:)'0:iG`iw*h<5a#j8oJiqze`~EA#o>"rA #sHD:b # h4>T]]9 MTJUFpq?Fq֌?FeTC?FǿJWc^?P6 v>JuM` ?)uJt  0@hIt=WmiIR?8IlRv\#>2zGz?@M2JAe7b #zQ (#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LFO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!ZA!Z!?F9"rA #sHD:b # h#4>T#3 AMJUF߳ڮ?F$ ?Fs۰Z?FNb_+iFw?P6 >JuM` ?uJt  JJ&qIt=WrhIRUo_}WIlTy6ވ#>2zGz?N@MJAeb #z (#+#+T#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A ?b6 ?AgJamT6G` F !!l"zbA +6iTg5V181 ?2?;:C?'I4/U%G,|# |'b6fLF O @ie<59 FXG'0U`Rb6!}40 V+ZlJQI0h|G!A !16&f &$<EfBM 9b`fJ:r@]0:iG`Oc qy"zeEajoJiaze<5e'f`0mFaij mh~EA#o>"rA #sHD:b # h4>T]]9 MTJUF>=9_?FL(Ѿ?F'?FǿJWc^?P6 v>JuM` ?)uJt  V6>nIt=WgD~baIRkzIlRv\#>2zGz?@M2JAe7b #zQ (#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LFO ie<59 FXG/'0U`Rb6-!}40 V+Z lJ]UhzЎ!A!Z5!O?F9&sA#< $B %jfJ'j`00>oPaYc l]]|eE:Ͷ10TbCcba`iK8K8h<5e۰Zk(qQlqe~EeFͭ||`ai !hA'o>"rA #sHD:b # h4>T]]9 MTJUFBP(?FxQ8?F!?Flaxy?P6 v>JuM` ?)uJt  9KIt=WYGJIRE! YBIl p#>2zGz?@M2JAeWb #z@Q (#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LFO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!A!19b $< $B 9b`fJ: m٠uۘ]0:mOc ?i'A~aEeDͅ?c ? V#NeFaze<5eF|lVaiU3Lh`~EA#o>"rA #sHD:b # h4>T]]9 MTJUFMW?FM6N?F>WB?Ft=E?P6 v>JuM` ?)uJt  1|It=WƯ:IRݮIl$dvĤ#>2zGz?@M JAe #z (#+#+#&*Jp!p!5 g`?CopyrigTt (c) 2U0 9 M c os f"!rԙ !a i n}. Al U (s"e e 5v0d i!Em$}! K2?b6 ?ALb4($I3/ 4/U%b-|# |'Ib6?Fiie<59 FjX/G'0UBib6!}40 aFuJlJ8>UhAA@ !1!Z}o'- E|zo}#<bX$!B] VʼJ:F?Qw0ȒS S I?Bf>QE2!?Fp\BU--| i[F @ h<53n CU9fKo]o ڵ h5a9YUU aUUwiǤ]Q işeq5 h2r_U"rA 5#sHD:b # h8>T ]]9 MTJUF('J?FK5+:?FUw)٫?FQ`m ^?P6 >JuM`lW? SuJt  y SksMtA[_eMVYBMpry2VI'>\??\.?lA8 M2ll/f?=@MJB& @i #zv t#.b( (+&B"f ?APb)4(z$D3//& 115 k`?CopyrigTtk(c])k2009kuM0c0os0If21r01a0i0n.k WAl @ 8sBUe0e0v @d@14-3K 7IbO&&i bP~59 &&XGK"'0U!R&!40 FJlJ $Q! (sQsQ]31ZeA!ZU0|z 0 @?n'."# b7fJ:]h?^PsojR֯UaEQ!?!`c0 0c s!R'i e:}ɟh~5aef9oa|b c gT#3 AMJUFB?F̒-8?F*5?F$_D"w?P6 >JuM` ?uJt  4E$pIt=WLx4IR_FIl#>2zGz?N@MJAeb #z (#+#+T#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A ?b6 ?AgJamT6G` F !!l"zbA +6iTg5V181 ?2?;:C?'I4/U%G,|# |'b6&L72B O @ie<59 FXG'0U`Rb6!}40 V+ZlJQI0h|G!A !16&f &$<Ef72M 9b`fJ۰Z@]0?c Q#Oc ?Q\}ֱ~aE:)'0:iG`iw*h<5a#j8oJiqze`~EA#o>"rA #sHD:b # h4>T]]9 MTJUFJ*ӏ?Fc?FeTC?FǿJWc^?P6 v>JuM` ?)uJt  uIt=Wt]aIR?8IlRv\#>2zGz?@M2JAe7b #zQ (#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LFO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!ZA!Z!?F9"rA #sHD:b # h#4>T#3 AMJUF"?F!S/?Fs۰Z?FNb_+iFw?P6 >JuM` ?uJt  B?˳It=WFp\cIRUo_}WIlTy6ވ#>2zGz?N@MJAeb #z (#+#+T#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A ?b6 ?AgJamT6G` F !!l"zbA +6iTg5V181 ?2?;:C?'I4/U%G,|# |'b6fLF O @ie<59 FXG'0U`Rb6!}40 V+ZlJQI0h|G!A !16&f &$<EfBM 9b`fJ:r@]0:iG`Oc qy"zeEajoJiaze<5e'f`0mFaij mh~EA#o>"rA #sHD:b # h4>T]]9 MTJUF?F2\D?F'異JWc^?P6 ݉>JuM` ?juJt  rs"It=WVIRkzIl?Rv\#>2zGz?@MJAebM #zT (#+#+#&Jp!p!5 `?CopyrigTtZ(c)Z20 9ZM c. os f"!r !a i n.Z Al [&s"e ej v0d  i!Em$+A?b6 ?zAgJaۢT6` F !!l"zbjA) +6ig5UV181 ?2?F;:C?'4/U%G|# |'b6LA#BO ie<59 FXG'0U`Rb6!}40 V+ZlJ]UhzA!Z5!?F9&s7A#< X$" jfJ'j`00>oPaYc l]]aE:?Ͷ0TbCcba`iK8K8h<5e?۰Zk(qQlqe~EeFͭ||`ai !h`A'o>"rA #sHD:b # h4>T]]9 MTJUF79 Ğ?Fl_n.?F!?Flaxy?P6 v>JuM` ?)uJt  sd It=W> IRE! YBIl p#>2zGz?@M2JAe7b #zQ (#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LFO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!A!B19b$< $BM 9b`fJ: m٠uۘ]ᇄ0:mOc i'A~aEeD?c  V#NeFaze<5eF|lVaiU?3Lh~EA#o>"rA k#sHD:b # h4>T]]9 MTJUF-#&\?FZ|?F>WB?Ft=E?P6 v>JuM` ?)uJt  [It=Wq.IRݮIl$dvĤ#>2zGz?@M JAe #z (#+#+#&*Jp!p!5 g`?CopyrigTt (c) 2U0 9 M c os f"!rԙ !a i n}. Al U (s"e e 5v0d i!Em$}! K2?b6 ?ALb4($I3/ 4/U%b-|# |'Yb6?Fiie<59 FjX/G'0UBib6!}40 aFuJlJ8>UhAA@ !1!Z}o'- E|zo}#<b$] VM:FQw0RS S IBf>E2!?Fp\BU--# i[F @ h<53n CU9fKo]o  h5a9YUU aUUwi]Q ieq5 hb2r_U"rA #sHD:b # h8>T ]]9 MTJUFW>?F7 ?FUw)٫?FQ`m ^?P6 >JuM`lW? SuJt  /êQPͰMtA[_Y#MVYBMpry2VI'>\??\.?lFA8 J2ll/f?=@MJB& @i #zv t#.b( (+&B"f ?APb)4(z$D3//& 115 k`?CopyrigTtk(c])k2009kuM0c0os0If21r01a0i0n.k WAl @ 8sBUe0e0v @d@14-3K 7IbO&&i bP~59 &&XGK"'0U!R&!40 FJlJ $Q! (sQsQ]31ZeA!ZU0|z 0 @?nK' # bS7fJ:]h^PsojR֯UaEQ!?!`0 0c s!R'i e:}ɟh~5aef9oa|b c g~L1B~`1F~t1[D1e1R~1~1Z~1^~1b~Af~Aj~(AnT#3 AMJUF6٫&?Fh%4+ޱ?F*5?F$_D"w?P6 >JuM` ?uJt  (LbIt=W,MyIR_FIl#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L(BO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4 9b`fJ۰Z@]0?c Q#Oc Q\}|zeE:O)0:iG`iw*h<5a#j8oJiqze~EA#o>"rA #sHD: # h4>T]]9 MTJUFI)?FUj?FeTC?FǿJWc^?P6 v>JuM` ?)uJt  6|.It=WL\} IR?8IlRv\#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 DV+ZlJQI0h|G!hA!Z!姈?F9"rA #sHD: # h#4>T#3 AMJUFe{5?F?Fs۰Z?FNb_+iFw?P6 >JuM` ?uJt  DqIt=W{*IRUo_}WIlTy6ވ#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4B 9b`fJ:rO@]0:iG`Oc qy"zeEajoJiaze<5e'f`0mFaij? mh~EA#o>"rA k#sHD: # h4>T]]9 MTJUFS?FަƢ?F'?FǿJWc^?P6 v>JuM` ?)uJt  cIt=W(?IRkzIlRv\#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 $V+ZlJ]Uhz@!A!Z5?!?F9&sA #<# $B jfJ'j`00>oPaYc l]]aE:Ͷ0TbCcba`iK8K8h<5e۰ϿZk(qQlqe~EeFͭ||`ai !hA'o>"rA 5#sHD: # h4>T]]9 MTJUF蕯?Fcų[?F!?Flaxy?P6 v>JuM` ?)uJt  {It=W ůIRE! YBIl p#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 DV+ZlJQI0h|G!hA!`197b$<# h$B 9b`fJ: m٠uۘ]0:mOc i'A~aEe7D?c  V#NeFaze<5eF|lVaiU3Lh~EA#o>"]rA #sHD: # h4>T]]9 MTJUFA!z?FA]?F>WB?Ft=E?P6 v>JuM` ?)uJt  wT vIt=W?mIRݮIl$dvĤ#>2zGz?@MJAebM #zT (#+#+#&Jp!p!5 g`?Copy_rigTtt(c)t2U0 9tM c os f"!rԙ !a i n}.t Al U (s"e e 5v0d i!Em$}! K2?b6 ?ALb4($I3/ 4/U%b-|# |'Yb6?Fiie<59 FjX/G'0UBib6!}40 aFuJlJ8>UhAA@ !1!Z}o'- E|zo}#<b$] VJ:FQw0RS S IBf>QE2!?Fp\BU--# i[F @ h<53n CU9fKo]o  h5a9YUU aUUwi]Q ieq5 hb2r_U"rA #sHD: # h8>T ]]9 MTJUF gl6?FDb,)?FUw)٫?FQ`m ^?P6 >JuM`lW? SuJt  LUMtA[_ʓWMVYBMpry2VI'>\??\.?lFA8 J2ll/f?=@MJB& @nib#zv t#b( (+&B" ?APb)4(z$CD3//&*115 k`?CopyrigTtk(wc)k2009kM0c0o%s0f21r01ua0i0n.k_ Al @ 8UsBe0e0v @ d@1H4-,3 7%bO&&i b~59 && XGK"'0U!R&!40 FJlJ Q! (sQsQ]31heA!ZU0|z3 0 @?ҏn'B# b7fJ:]h^PsojR֯UaEQ!?ߟ!`0 0c s!R'i e:}ɟh~5aef9oa|b c gT#3 AMJUFE//?F_ 8?F*5?F$_D"w?P6 >JuM` ?uJt  z7It=WiIR_FIl#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4B 9b`fJ۰Z@]0?c Q#Oc Q\}ȱ~aE:)0:iG`iw*h<5a#j8oJiqze~EA#o>"]rA #sHD: # h4>T]]9 MTJUFK?Fc>,Λ?FeTC?FǿJWc^?P6 v>JuM` ?)uJt  jDFIt=W-M3}IR?8IlRv\#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 DV+ZlJQI0h|G!hA!Z!姈?F9"rA #sHD: # h#4>T#3 AMJUFQL ?F7ԩ+?Fs۰Z?FNb_+iFw?P6 >JuM` ?uJt  Ç\UIt=W @ٶIRUo_}WIlTy6ވ#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4B 9b`fJ:rO@]0:iG`Oc qy"zeEajoJiaze<5e'f`0mFaij? mh~EA#o>"rA k#sHD: # h4>T]]9 MTJUFLli[?FQ>?F'?FǿJWc^?P6 v>JuM` ?)uJt  ICIt=WN3sIRkzIlRv\#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 $V+ZlJ]Uhz@!A!Z5?!?F9&sA #<# $B jfJ'j`00>oPaYc l]]aE:Ͷ0TbCcba`iK8K8h<5e۰ϿZk(qQlqe~EeFͭ||`ai !hA'o>"rA 5#sHD: # h4>T]]9 MTJUF63?F8.G?F!?Flaxy?P6 v>JuM` ?)uJt  ͧILIt=Wf IRE! YBIl p#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 DV+ZlJQI0h|G!hA!`197b$<# h$B 9b`fJ: m٠uۘ]0:mOc i'A~aEe7D?c  V#NeFaze<5eF|lVaiU3Lh~EA#o>"]rA #sHD: # h4>T]]9 MTJUFPHY?FpN?F>WB?Ft=E?P6 v>JuM` ?)uJt  PrIt=WoDIRݮIl$dvĤ#>2zGz?@MJAebM #zT (#+#+#&Jp!p!5 g`?CopyrigTt (c) W20 9 M ]c os f"R!r !a i n. AUl (s"e e v0d @i!Em$}! K2?b6 3?ALb4(&$3/ 4/U%-|# e|'b6 ?Fiie<59 FX/G'0UBb6!}40 aFuJlJ 8>UhAA !1!Z}'- E|zo}#<b$] RVJ:FQwa0RS S IBf>QE2!?Fp\B?U--# i[F @ h<53n CU9fKo]o ڵ h5a9YUU aUU~wi]Q ieq5 h2r_U"]rA #sHD: # h8>T ]]9 MTJUFǦmڷ?FWZgL?FUw)٫?FQ`m ^?P6 AJuM`lW? SuJt  NKMtA[X_*`lMVYBMpry2VI'>\??\.?lFA8 J2ll/f?=@MJB& @nib#zv t#b( (+&B" ?APb)4(z$CD3//&*115 k`?CopyrigTtk(wc)k2009kM0c0o%s0f21r01ua0i0n.k_ Al @ 8UsBe0e0v @ d@1H4-,3 7%bO&&i Ab~59 &&XGK"'0U!R&!40 FJlJ Q! (sQsQ]31heA!ZU0|z3 0 @?ҏn'# b7fJ:]h^PsojR֯UaEQ!?ߟ!`0 0c s!R'i e:}ɟh~5aef9oa|b c gT#3 AMJUFܹ?F cM?F*5?F$_D"w?P6 >JuM` ?uJt  @@|It=Wn56uIR_FIl#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4B 9b`fJ۰Z@]0?c Q#Oc Q\}ȱ~aE:)0:iG`iw*h<5a#j8oJiqze~EA#o>"]rA #sHD: # h4>T]]9 MTJUF (?FthZ?FeTC?FǿJWc^?P6 v>JuM` ?)uJt  It=WX IR?8IlRv\#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 DV+ZlJQI0h|G!hA!Z!姈?F9"rA #sHD: # h#4>T#3 AMJUFr!Qw?Fvk?Fs۰Z?FNb_+iFw?P6 >JuM` ?uJt  1It=WOIRUo_}WIlTy6ވ#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4B 9b`fJ:rO@]0:iG`Oc qy"zeEajoJiaze<5e'f`0mFaij? mh~EA#o>"rA k#sHD: # h4>T]]9 MTJUF!P4K?F l?F'?FǿJWc^?P6 v>JuM` ?)uJt  {d5Z\It=W2;wEIRkzIlRv\#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 $V+ZlJ]Uhz@!A!Z5?!?F9&sA #<# $B jfJ'j`00>oPaYc ?l]]beE:Ͷǝ0TbCcba`iK8K8h<5e۰Zk(qQlqe~EeFͭ||`ai !hA'o>"rA #sHD: # h4>T]]9 MTJUFYzUv?FS s9?F!?Flaxy?P6 v>JuM` ?)uJt  V"(It=W7:LIRE! YBIl p#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 DV+ZlJQI0h|G!hA!`197b$<# h$B 9b`fJ: m٠uۘ]0:mOc i'A~aEe7D?c  V#NeFaze<5eF|lVaiU3Lh~EA#o>"]rA #sHD: # h4>T]]9 MTJUF)"z>?F^i2VS?F>WB?Ft=E?P6 v>JuM` ?)uJt  7It=WܯHIRݮIl$dvĤ#>2zGz?@MJAebM #zT (#+#+#&Jp!p!5 g`?CopyrigTt (c) W20 9 M ]c os f"R!r !a i n. AUl (s"e e v0d @i!Em$}! K2?b6 3?ALb4(&$3/ 4/U%-|# e|'b6 ?Fiie<59 FX/G'0UBb6!}40 aFuJlJ 8>UhAA !1!Z}'- E|zo}#<b$] RVJ:FQwa0RS S IBf>QE2!?Fp\B?U--# i[F @ h<53n CU9fKo]o ڵ h5a9YUU aUU~wi]Q ieq5 h2r_U"]rA #sHD: # h8>T ]]9 MTJUF{KY?F#v\?FUw)٫?FQ`m ^?P6 >JuM`lW? SuJt  o|MtA[_"oRMVYBMpry2VI'>\??\.?lFA8 J2ll/f?=@MJB& @nib#zv t#b( (+&B" ?APb)4(z$CD3//&*115 k`?CopyrigTtk(wc)k2009kM0c0o%s0f21r01ua0i0n.k_ Al @ 8UsBe0e0v @ d@1H4-,3 7%bO&&i Ab~59 &&XGK"'0U!R&!40 FJlJ Q! (sQsQ]31heA!ZU0|z3 0 @?/n' #M b7fJ:]h^PsojRփUaEQ!?!`0 0c s!R'i e:}ɟh~5aef9oa|b c gT#3 AMJUF<5^?FUP?F*5?F$_D"w?P6 >JuM` ?uJt  .9BWyIt=Wnq׏8IR_FIl#>2zGz?N@MJAeb #zb(#+B#+#&Mp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L(\"BO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4\" 9b`fJ۰Z@]0?c Q#Oc Q\}ȱ~aE:)0:iG`iw*h<5a#j8oJiqze~EA#o>"]rA #sHD: # h4>T]]9 MTJUF?F͍~eTC?FJWc^?P6 ݤ>JuM` ?juJt  HIt=WByIR?8Il?Rv\#>2zGz?@MJAebM #zWb(#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6LLyBBO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!ZA!Z!?F9"]rA #sHD: # h#4>T#3 AMJUFSw?FK?Fs۰Z?FNb_+iFw?P6 >JuM` ?uJt  fd>It=WN6sgHRUo}WIlT?y6ވ#>2zGz?@MJAebM #zWb(#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LFO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!A!1l&fL# $<EfB 9b`fJ:r'@]0:iG`Oc qy"zeEajoJiaze<5e'f`0mFaij mh~EA#o>"rA 5#sHD: # h4>T]]9 MTJUF%u)H׾?FȐ?F'?FǿJWc^?P6 v>JuM` ?)uJt  ُuߓIt=Wί IRkzIlRv\#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTtt(c])t20 9tuM c os If"!r !a i n.t WAl (s"Ue e v0d i!Em$+A?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LFO ie<59 FXG/'0U`Rb6-!}40 V+Z lJ]UhzЎ!A!Z5!O?F9&sA#<# $B %jfJ'j`00>oPaYc l]]aE:Ͷc0TbCcba`iK8K8h<5e۰Zk(qQlqe~EeFͭ||`ai !hA'o>v"rA # sHD: # h4>T]]9 MTJUFf^oO?F]?F!?Flaxy?P6 v>JuM` ?)uJt  L+It=WݑHIRE! YBIl p#>2zGz?@M2JAe7b #z]b(#+%#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'b6L5*6O ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!19b$<# $4h" 9b`fJ: m٠uۇ]0:mOc i'A~aEeכD?c  V#NeFaze<5eF?|lVaiU3Lh~EA#o>"rA #sHD: # h4>T]]9 MTJUFr,?F4w?F>WB?Ft=E?P6 v>JuM` ?)uJt  6j>It=WNFFIRݮIl$dvĤ#>2zGz?@MJAebM #zT (#+#+#&Jp!p!5 g`?CopyrigTt (c) W20 9 M ]c os f"R!r !a i n. AUl (s"e e v0d @i!Em$}! K2?b6 3?ALb4(&$3/ 4/U%-|# e|'b6 ?Fiie<59 FX/G'0UBb6!}40 aFuJlJ 8>UhAA !1!Z}'- E|zo}#<b$] RVJ:FQwa0RS S IBf>QE2!?Fp\B?U--# i[F @ h<53n CU9fKo]o ڵ h5a9YUU aUU~wi]Q ieq5 h2r_U"]rA #sHD: # h8>T ]]9 MTJUF_5=~?Fe,=Vݼ?FUw)٫?FQ`m ^?P6 >JuM`lW? SuJt  ?aV_MtA[_=p'MVYBMpry2VI'>\??\.?lFA8 J2ll/f?=@MJB& @nib#zv t#b( (+&B" ?APb)4(z$CD3//&*115 k`?CopyrigTtk(wc)k2009kM0c0o%s0f21r01ua0i0n.k_ Al @ 8UsBe0e0v @ d@1H4-,3 7%bO&&i Ab~59 &&XGK"'0U!R&!40 FJlJ Q! (sQsQ]31heA!ZU0|z3 0 @?/n' #M b7fJ:]h^PsojRփUaEQ!?!`0 0c s!R'i e:}ɟh~5aef9oa|b c gT#3 AMJUF<@έ?F&¬?F*5?F$_D"w?P6 >JuM` ?uJt  &lIt=WAI׃IR_FIl#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4B 9b`fJ۰Z@]0?c Q#Oc Q\}ȱ~aE:)0:iG`iw*h<5a#j8oJiqze~EA#o>"]rA #sHD: # h4>T]]9 MTJUFpq?Fq֌?FeTC?FǿJWc^?P6 v>JuM` ?)uJt  0@hIt=WmiIR?8IlRv\#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 DV+ZlJQI0h|G!hA!Z!姈?F9"rA #sHD: # h#4>T#3 AMJUF߳ڮ?F$ ?Fs۰Z?FNb_+iFw?P6 >JuM` ?uJt  JJ&qIt=WrhIRUo_}WIlTy6ވ#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4B 9b`fJ:rO@]0:iG`Oc qy"zeEajoJiaze<5e'f`0mFaij? mh~EA#o>"rA k#sHD: # h4>T]]9 MTJUF>=9_?FL(Ѿ?F'?FǿJWc^?P6 v>JuM` ?)uJt  V6>nIt=WgD~baIRkzIlRv\#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 $V+ZlJ]Uhz@!A!Z5?!?F9&sA #<# $B jfJ'j`00>oPaYc ?l]]|eE:Ͷǝ0TbCcba`iK8K8h<5e۰Zk(qQlqe~EeFͭ||`ai !hA'o>"rA #sHD: # h4>T]]9 MTJUFBP(?FxQ8?F!?Flaxy?P6 v>JuM` ?)uJt  9KIt=WYGJIRE! YBIl p#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 DV+ZlJQI0h|G!hA!`197b$<# h$B 9b`fJ: m٠uۘ]0:mOc i'A~aEe7D?c  V#NeFaze<5eF|lVaiU3Lh~EA#o>"]rA #sHD: # h4>T]]9 MTJUFMW?FM6N?F>WB?Ft=E?P6 v>JuM` ?)uJt  1|It=WƯ:IRݮIl$dvĤ#>2zGz?@MJAebM #zT (#+#+#&Jp!p!5 g`?CopyrigTt (c) W20 9 M ]c os f"R!r !a i n. AUl (s"e e v0d @i!Em$}! K2?b6 3?ALb4(&$3/ 4/U%-|# e|'b6 ?Fiie<59 FX/G'0UBb6!}40 aFuJlJ 8>UhAA !1!Z}'- E|zo}#<b$] RVJ:FQwa0RS S IBf>QE2!?Fp\BU--| i?[F @ h<53n CU9fKo]o ڵ h5a9YUU aUU?wi]Q ieq5 h2r_U"rA #sHD: # h8>T ]]9 MTJUF('J?FK5+:?FUw)٫?FQ`m ^?P6 >JuM`lW? SuJt  y SksMtA[_eMVYBMpry2VI'>\??\.?lFA8 J2ll/f?=@MJB& @nib#zv t#b( (+&B" ?APb)4(z$CD3//&*115 k`?CopyrigTtk(wc)k2009kM0c0o%s0f21r01ua0i0n.k_ Al @ 8UsBe0e0v @ d@1H4-,3 7%bO&&i Ab~59 &&XGK"'0U!R&!40 FJlJ Q! (sQsQ]31heA!ZU0|z3 0 @?/n' #M b7fJ:]h^PsojRփUaEQ!?!`0 0c s!R'i e:}ɟh~5aef9oa|b c gT#3 AMJUFB?F̒-8?F*5?F$_D"w?P6 >JuM` ?uJt  4E$pIt=WLx4IR_FIl#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4B 9b`fJ۰Z@]0?c Q#Oc Q\}ȱ~aE:)0:iG`iw*h<5a#j8oJiqze~EA#o>"]rA #sHD: # h4>T]]9 MTJUFJ*ӏ?Fc?FeTC?FǿJWc^?P6 v>JuM` ?)uJt  uIt=Wt]aIR?8IlRv\#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 DV+ZlJQI0h|G!hA!Z!姈?F9"rA #sHD: # h#4>T#3 AMJUF"?F!S/?Fs۰Z?FNb_+iFw?P6 >JuM` ?uJt  B?˳It=WFp\cIRUo_}WIlTy6ވ#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4B 9b`fJ:rO@]0:iG`Oc qy"zeEajoJiaze<5e'f`0mFaij? mh~EA#o>"rA k#sHD: # h4>T]]9 MTJUF?F2\D?F'異JWc^?P6 ݴ>JuM` ?juJt  rs"It=WVIRkzIl?Rv\#>2zGz?@MJAebM #zWb(#+#+#&JBp!p!5 g`?CopyrigT}tZ(c)ZW20 9ZM ]c os f"R!r !a i n.Z AQl [&s"e e v0d @i!Em$+A?b6 ?AgJaT6` F !!l"zbAR +6ig5V181 ?2?;:C?'$4/U%G|# |'b6L3FO ie<59 FXG'K0U`Rb6!}4K0 V+ZlJ]Uhz!A!Z5!?F9&߮sA#`<# $B jfJ'j?`00>oPaYc l]G]aE:Ͷ0TbCcba`i?K8K8h<5e۰Zk(qׁQlqe~EeFͭ||`ai !hA'o>"]rA #sHD: # h4>T]]9 MTJUF79 Ğ?Fl_n.?F!?Flaxy?P6 v>JuM` ?)uJt  sd It=W> IRE! YBIl p#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 DV+ZlJQI0h|G!hA!`197b$<# h$B 9b`fJ: m٠uۘ]0:mOc i'A~aEe7D?c  V#NeFaze<5eF|lVaiU3Lh~EA#o>"]rA #sHD: # h4>T]]9 MTJUF-#&\?FZ|?F>WB?Ft=E?P6 v>JuM` ?)uJt  [It=Wq.IRݮIl$dvĤ#>2zGz?@MJAebM #zT (#+#+#&Jp!p!5 g`?CopyrigTt (c) W20 9 M ]c os f"R!r !a i n. AUl (s"e e v0d @i!Em$}! K2?b6 3?ALb4(&$3/ 4/U%-|# e|'b6 ?Fiie<59 FX/G'0UBb6!}40 aFuJlJ 8>UhAA !1!Z}'- E|zo}#<b$] RVJ:FQwa0RS S IBf>QE2!?Fp\B?U--# i[F @ h<53n CU9fKo]o ڵ h5a9YUU aUU~wi]Q ieq5 h2r_U"]rA #sHD: # h8>T ]]9 MTJUFW>?F7 ?FUw)٫?FQ`m ^?P6 >JuM`lW? SuJt  /êQPͰMtA[_Y#MVYBMpry2VI'>\??\.?lFA8 J2ll/f?=@MJB& @nib#zv t#b( (+&B" ?APb)4(z$CD3//&*115 k`?CopyrigTtk(wc)k2009kM0c0o%s0f21r01ua0i0n.k_ Al @ 8UsBe0e0v @ d@1H4-,3 7%bO&&i Ab~59 &&XGK"'0U!R&!40 FJlJ Q! (sQsQ]31heA!ZU0|z3 0 @?/n' #M b7fJ:]h^PsojRփUaEQ!?!`0 0c s!R'i e:}ɟh~5aef9oa|b c g_+s +EiHG%>F8Z#B9? ~d`ܮ :@+H| <Ss񨚳 P x O   oG  ߫( L!X $ ~( >,諶 / h3(= P7 : A D] WHH膸 K OH >Sϸ V Zq X^t bHHv ex Ii#'z l)| wp `tH x {#' 2߇ z) )8 h #'@ 6B QHE ژ)8G |'hI 6>~K 00"'M 0 N882 74 )6 @8 ߹): oj  W&? \$H1 3 0%%5 17 *:  7<  )h> #' @:" RbG% 8' h) :)+ :- ެ; T82 : : H {(8 }J v((= _/8X? ( ( 2[ "Y( &YX *!( .% 17 R5I( 8nYX < 8@H C"i G QKFiXNXiR~nYAVYYYo]"eHWa(Tg0;j~FinXiȳ&ru'y9}K}4jY=aH(NjjjY#y߰o4i7jPࡾjRijT'yVĬyYQ"iH[94ix]ڷY_o1 i(4~yH6cƎix88:4i<zY?"YHxg܊yy~(xY8YhYoKnYmY85yh-FH#/8x 7o~{(HU!) n$Z(lH+~x_/2)6ߚu:Ʃ8>ZhAl@IE)BIEL 8G5P(hITHKW(Mj[~( ^z8"bxh$Mf &i(mƩ*gqZ(-ulX/xz1W|~35(8lƩX:Z<l>@z(~ X,1UFD  h(^TYYBBUF~?x<F BP(?P? V B666 6 B66h9ȅ2H??\IB@L&d2?-(\.Y'(.sUM!K&Pm/:  0oB`2Fiber], pt c t ansm t #n tw rk equ p e t h rd 0-1e!91A^| (gUGr& wr  .@ wvwpppopp wwpwxwwwwpx=pp pwvLDrag thuesapWonodwipe.bL&d2ɿ~?P?=t˿)"g??)j3@ ?DHD: # h4>T]]9 MTAUF}Da 0?FH7r?F_/++?FXϸ?P6 `A$M (#<#lPJuM{` ?#{{20{F{ZujJtg  ɟ߇{tј` 'ɭ?s>JU2zGz?@M[#|Jk&A@$7b#z #!"b(b)+&J[e#t"t"?'6 I?A$/%Be#115 "`?CopyrigTt (c)020090M0c0oKs0f21r01a0i0n.0 Al0 8sBe0e0v@d0S"{14\#-Xe#3 7'6\% ZLFiie1E9 FXG/'0UR'6-!B40 FJlJ7Q0hLG1ZA!Y(}=K?Fl@I0`fS?Ks`ZsLbL\"JTy+HlPrW?F腺Z>FbyC ua)H0Ynǿl_Vef[U>dxԷErlg uVcQ$@aELe|}?FCʞ*Nc`vna Oc>wok&O8XWleBPmlfVArieoo.HLe+pQ?FYR&`O]vol``}; PaiEz6!iDF`ޣKH_SFn.ki`"rA k#ڃjnw5}6!i{HD: # h4>T]]9 MTUF)Q~ R?F\L#?Fx?F=:?P6 AA M (<Pdx#  $<PdxmJuM` /?9"9"29"F9"Z9"n9"9"9"9"/(Su()Jt%!  _lcd6tY1s2˟ke5n7rs=e57Fq1 >JU2zGzK?=@M3J6A@n4b%Cz@C2b3H%@3HNKNFJ[3 B?F &?A9D POqEUB3AA5 2`?CopyrigTt (c)5P20AP95PM-Pc.+Pos%Pf3R$Qr'P`Qa3Pi%Pn.5P Al{P +XsReSPej+PvPdsP2@AB3b-3 S  WF5 \fiieU9 fjXg2'0UbŴF!D0 TIf]jlJ @maa ]xQQ9A8`3F+G?F| @"$#әF1 "X3]/Lֲ1r2JAn?FkS?FΘtpb N ?_)Ay*t^(|CN7ȗu$#D[DP1X3aT1 :" $z 2? I? uK?$"X2a@s(BS2}:r5qEqEtFdo?F[K ۃ?FJ(OL?P$DT!  bqt%@{rfJHG^"f~Bt2)Џ⏪32Uu5*&?Fl5?F)b&&~?Fcle?P,A&!٘UީȽ|;!GmH﹡^@pw*oAΏ ՟4ޯFUuu #?F{+.W?,b26I>P-nx:aG|ZÉ-DuH rε50!au>uM"D$.@4#ZqTmz?FD?Fb.\i ]aJ`?@~{bcEt/^Vu1 a{&DFT?@l4ϳ$3<@ƒ>Pn}11WBx~|kz}~tesDޠux)LBk(a\6 `G $#Ppztt~Pz+ݿ]s~aϔb-4쇂3~uy0cفMk?Fǒ?F 6_?P(AT~' 6>Ľ?e+P?Toa?CB1wa1 83쇖y&Nfn~bS9xvD<$Q$#QcuNI|" 0)^VUıwm2G:LB3 .쇪;utq~F>`7bt?di д9Brq 8CJ &*짦f4omGL3A/S/#HD: # h4>T]]9 MTMUF؋@?Fce?F5svL*?F7= ?P6 ]A]M (<P]d_]x#_(_<s__JuM` ?_""2"F"Z"n"""uJt  oUxJ5t!21l]5 7,ȃ5$7ǁO>JU2zG/z?=@Ms3J6A@4b3z郷0392bR808;6J[}3 w(B??F ?A4 T? EB}3AA5 2`?CopyrigTt (c)@20@9@M@c@os@fBAr@Aa@i@n.@ AlP HsRe@e@v-PdPk2ADt3-}3C eG?Ft5 r\ViieIU9 VXW2'0U.b?F!ZD0 VZl1 &@(aa 3ArQ1q8Z}3F{و>r/2"0,s\>rt2JA%)?F+?FvWZk>Fy4 wa)`Ayf/!\3s M_lu|a<1u#Ca aܵ0 lkj? &_8)?"a@=s(<" !rÀ5 EbqEnu3.h?Fn`?F=/*lq?FF|oĵWA|GK|Wu|V#H1CRW%7#I3Et2~IUnuj "?FBĥج|+zw~>{Iσ]X41yu,A1BaNycЍ@2Wa4y}K6#I@ZltF~fUds!'Hɇ?FQy~<?F$f㷖꿈xywF,(|pgF|'c7|,X&l x קi4bqJ+ o N9z)e~zy0l44t;Ff4t/p1znuf.ʐ Lˮ?F I`V'0'|֠{yl7#y%m"gs7#It~jyozY_ƀ%çM,fFI|^t-,捇&F9~lFV6#I.@t~3ot~F+}|$+p?F?~a@v2rA 3z9Xt|L;W-yG7IGYt#~HD: # h4>T]]9 MTMUF}W?F?F5svL*?F7= ?P6 >M (<Pd_]x#_(_<s__JuM` ?_""2"F"Z"n"""uJt  wCHݛ5t!2-8Lc5 7,ȃ5$7ǁO>JU2zG/z?=@Ms3J6A@4b3z郷0392bR808;6J[}3 w(B??F ?A4 T? EB}3AA5 2`?CopyrigTt (c)@20@9@M@c@os@fBAr@Aa@i@n.@ AlP HsRe@e@v-PdPk2ADt3-}3C eG?Ft5 r\ViieIU9 VXW2'0U.b?F!ZD0 VZl1 &@(aa ]ArQ1q8Z}3F{و>r_2h"0,sf >r)t2JA%)?F+?FvWZk>Fy4 wa)`Ayf/!\3s ޿Mlu|a<1u#Ca aܵ0 lkj? &_8S?"a@=sB(<" !r5 EbqEnu3.h?Fn`?F=/*lq?FF|oĵWA|GK|WuǢ|V#HCRW%7#I3Et2~IUnuj "?FBĥج|ﲖ+zw~>{I]X41yu,A1BϠaNycߍ@2Wa4y}K6#IZltF~fU~ds!'Hɇ?FQy~<?F$fՖxywF,(|pgF|'c7|,X&l x ק1^uJ+ o N9z)e~zy0l44t;Ff4t/p1znuf.ʐ Lˮ?F I`V'0'|֠ yl7#y%m"gs7#It~jyozY_ƀ%çM,fFI|^t-,捇&F9~lFV6#I.@t~3ot~F+}|$+p?F?~a@v2rA 3z9Xt|L;W-yG7IGYt#~HD H# h4>T]]9 MMUF:TJ3?F ?FW7")?F(GP@?P6 f@#A#M (<Pd]x#$<PdxmJuM` /?""2"F"Z"n"""u )Jt!  ߆,ߑ'7t172DJM)527HB)5L7r Dɸ!>JU2zGz?=)@M3J6߀A@E4bM3z03a2b808RKFJ[3 PB?gF ?$A4 O5E3AA5 G2`?CopyrigTt (c)@20P9@M@c@oKs@fBAr@$Qa@i@n.@ Al?P HsCRePe@vUPd7P2AED3 5 -_C %GgFViieE9 VXW2'K0UVbgF!DK0 f!jl$1N@(aa ]3AQ18`3FPʞ>FQ?͉@"#] >"3n3v1lr2JA_[3nB?F#lNa?F ?Fe a)A@y?2oW Vytu#|_ӄ 5u3?\1a u׿@ ]h? d?#t?"2%a@ks(<29tK1r55EqEu}?FG?F U?F(k=|S@ܱ|ŗ-_`ǿmgV0ÖpITf#was2Eu2* ?Fd|ڀ >{$*Jr| ZȘ*'vY9!=a,z0WJ⅏ze#wFUuUfNڿ?F6 +C?F}ۛ)NQv1Vy>ﴺkcY︉0h ӯe#wZq~zd! nS@3(}|{gu2lqZt@>% rgrܥg/?B~fȿe#wϢnuex9?Fqp1C5bb=|=I"~;Jt^E>~KAOA/0ƛOu=e#w yJG?Fާ;נpIZ=gE܎=|grE2`~%-Bg0L0;e3w0䬌3t-~F`3FgG ߈A2rA 3+j[K~@])nATGf@B7e3w7I#Ha`dgHD H# h4>T]]9 MUFQ|3?Fͪ'?FH(])?F?\s?P6 A > (<@P]dx#D $<PdxJuM` ?9"9"29"F9"Z9"n9"9"9"9"/()u()Jt%!  [4c7tY1s2&뒺e5n7-V;e57n?ͷqJ1 >JU2zGz?R=@M3J6A@4b%Cz@C.2b3H%@3HNKNFJ[3 B?F I?A9D POqE*3AA5 2`?CopyrigTt (c)5P2U0AP95PM-Pc+Pos%Pf3R$Qr'P`Qa3Pi%Pn}.5P Al{PU +XsReSPe+P5vPdsP2AED3 (5 -_ SK  WFVi@ieE9 VXW2'0UbţF!D0 If]jl J @maa ]xQQ9A8Z3F꺷S"$#P8hB1 2X2?@s6vK r2J:jtFEZ qrs%@"s )1HA\HG@HA"lNa?F|/(?Fb) a)AyF7U1 "fWmA}$# c6n>}X3?$1 a }x ?@ %? z?M?$"X2%a@s(}{^<[}̈ιƋ/90!3'x+;m3; 3)1F;+x(Nڿ?FT&B?Fb{f,-JQ1;Ejv㓌 +ߌK$ؿLrZ\b03>P1Z;~+p•%ʲSpFZ>ppd-Tfs1u񶛄D𷱄K 瓌)h:GrKr߿?#4B31;;uSq~F7ސ?FF;Kf@9Brq 8CysZLs뿇* %L3 /1#;H4B`gHD: # h4>T]]9 MTMUFM&d2?F}{?P6  >M (< Pd_#x_tt_ ___(_<JuM` ?2FZnuJt -t!"J&b$=>tJU2Uj2?R=@MY3Ji6A@%2308b9R;6J[c3r27a? F ?A?9Bc3BfAfA5 2`?CopyrigT}t (c) W20@9 M@]c@os@fBRAr@Aaa0i@n. AUl@ HsBe@e@v@d@Q2_AcDZ3ę-c3rC 2rG FZ5 >\iViieU9 iVXW\r2'0j3 FZ!&D0 VZlJ`>UhLaLa ]Rd u 1A A1AW8Pa o|2U?F,%-@Nx[Jk#b?o--hi  6vZ2JAp?F}l^?F +ْ?F G&S- a),Ayx|S<ݻuz}קu#qSBa y= 30 4*?? ?"2a@/s<"t!r55TqE`ud ?F /wpc*uGs>{T#|f{}b*yZ>voigjJ*#;%7f2pU`uTK`j0rr?F);L^fFp/Xx1ѣ?V|%O4Tqde㼕?FHyt߲Xzΐ_3]ؘ#yԄT|qTS|t]<8l j m?@P3ɡեZ:+a{c3~|+2|}ZliL~|d($7! C]{-m;BP4~ΐKjgVhߩ~D8V-l|G2y)PlƳÿտa:dBl#ꃋ-@t32y ?c$p3~,xEr l|0?FS$^ΐWqp>hmN.Ey?'<(+t^+}m܀3}Tq(*wmnp:~qмZBT<i}~۸73q1&3–(Ko|>F}ѣ ?Fs?FUޒ[g$,y~vZ؆t=<}!uɡaѩYK&h0?FL#|ߘ 1D|\ ťa8 ?Fd\Ў!ݯ|? ﮉ2ɡbҨg7+?FBl;|qyS ~L](MB-eS?F@J9F:?FrIÅ hߏ¢h|@b$yoSk9r|hͦ |ɡe2oe1Br{a 53Q2HD: H h0>Th]]9 MIAUFd^V?F߫C?F>?F?_j|w?P6 8 >M $mJuM` /?`OO.u>Jt;  sxЕt=M4&ٕQ䣯+? G>yJU$"#?.& ?JbA@T"Z+bR\)Z&J#2N贁Nk.#@M# J&]/o/T"Z+'T"*B#BA1A15 `?Cop rigTt (c)x0]2`09x0Mp0]cn0osh0fv2Rg1rj01av0ih0n.x0 AUl0 n8s2e0en0v0d0@:1Y>4#-'M3 M7 .&%p0b5(9 DFX'dG_'0UB.&J!I$a dFxJlJ]Uhz_1!T!Պ(Z#FF-D}ؔ?cJ:Z 4qgjGQ RV"JmTtV,Щ=?FG с aW)O YPRSW:fOtG4Pi3GYA; l'z&@B!@C?F[,P4S *?j0[OeT?UƖaU5mTV.b9 Nj?FZp^9o`isoT0e(a~Th]]9 MIAUFHm?Fʋ?FqZzS̩?Fo_ w?P6 8 >M $mJuM` /?`OO.u>Jt;  zt/ Wٕᆝ?a%?uG>yJU$"#?.& ?JbA@T"Z+bR\)Z&J#2N贁Nk.#@M# J&]/o/T"Z+'T"*B#BA1A15 `?Cop rigTt (c)x0]2`09x0Mp0]cn0osh0fv2Rg1rj01av0ih0n.x0 AUl0 n8s2e0en0v0d0@:1Y>4#-'M3 M7 .&%p0b5(9 DFX'dG_'0UB.&J!I$a dFxJlJ]Uhz_1!T!(Z#Fb?d ?F94:,X,TGZ SY RV"JmTig/?F6o?FşY] `)O 1RSx 4S x$G4:74C͐GYA; ^^s¯&@B:~mTn%y2[9)A\(aU5UF O 2Wα?FG?8LolQ,i33((?l+dUl1"Canoo2m_;T#rA $sHD: H h0>Th]]9 MIAUFk"?Fwc?Fymh?F_!sw?P6 8>M $mJuM` /?`OO.u>Jt;  aMtl#/ו1`ng n}_GT>yJU"#?.& ~?JbA@uT"Z+b\)Z&J#2N贁Nk.#S@M#J&]/o/T"Z+'T"U*B#A1A15 `?Cop rigTt (c)x02`09x0Mp0c.n0osh0fv2g1rj01av0ih0n.x0 Al0 n8s2e0ejn0v0d0:1Y>4#-X'M3 M7.&%p0b59 DFX'dG/'0UB.&%!I$a dFxJ lJ]Uhz_1!T!(Z#FFút[?J:Z 4=iLGQ RSV"JmTVoEXNx A`?F&nƜ?FӪr#> `)O 4S }#GYkhkn/e~:KF4}3>YA; ΣW$@B4mT[PRSbU5U2{6E,?F72*ʶ'?FD;6o;^}p/l@i.fMdӟ2Z[lxg҇.atoo2m_UT#rA $sHD: # h4>T]]9 MTAUF\f?FQ)Xd?Fv?FZW?P6 `>$M (#<#lPJuM{` ?#{{20{F{ZujJtg  ntL_פ| 'RGU˲s>JU2zGz?@M[#|Jk&A@$7b#z #b(b)+&J[e#t"t"?'6 I?A$/%Be#115 "`?CopyrigTt (c)020090M0c0oKs0f21r01a0i0n.0 Al0 8sBe0e0v@d0S"{14\#-Xe#3 7'6\% ZLFiie1E9 FXG/'0UR'6-!B40 FJlJ7Q0hLG1ZAY(э6?FξRI0`fS?Ks`ZsLbRP\"JT6mPD2n ?FHOX\>F->wW Wa)H0YnǿlᜯVef[U>dxԷErlg u?VcQ$@aELeANS?FZ ͉?Fޓ{lʤm>wok&O8XWleBPmlfVArieoo.HLec=}pLwS?FXʍ`ˏvol``}; PaiEz|DF`ޣKH_SF:x~ALrA k#ڃjnw5}Oi{HD: # h4>T]]9 MTAUF8XV?Fp#enD?Fv?FZW?P6 `>$M (#<#lPJuM{` ?#{{20{F{ZujJtg  ґߝ t$wOפ| 'RGU˲s>JU2zGz?@M[#|Jk&A@$7b#z #!"b(b)+&J[e#t"t"?'6 I?A$/%Be#115 "`?CopyrigTt (c)020090M0c0oKs0f21r01a0i0n.0 Al0 8sBe0e0v@d0S"{14\#-Xe#3 7'6\% ZLFiie1E9 FXG/'0UR'6-!B40 FJlJ7Q0hLG1ZA!Y(э6?FξRI0`fS?Ks`?Zs$LL!bL\"JT6mPD2n ?FHOX\>F->wW Wa)H0YnǿlVef[U>d~xԷErlg uVcQ$@aELeANS?FZ ?F{lʤm>wok&O8XWleBPmlfVArieoo.HLec=}pLwS?FXʍ`ˏvol``}; PaiEz|DF`ޣKH_SF:x}~AL"rA k#ڃjnw5}Oi{UGDF P# h @T(PYYU UF~?M&Wd2P} (-. +0@ 0T0h0|Y03 - -3 --3--& &#:u` ? T,_'@@TUThh|U|UU&&K"&5&&0"u:)!z2<7q:U? ?; 4b0{[`+:B Fy3U"'ĉQ9" L A$6 K",G'@I@@q@E?DF@[r&BCkBuq@`-A0"Ruu8 @GC0$5Q;(?RK"U& d4; :)9u `u`["[b!;$wu`񉾑4?R5ػ3a171`Vis_PRXYcPm!#5``08``R`?CopyrF`gPt0(V`)02d`090MF`c:}`oH`ofbvary`aa`iw`n 0Al` }hsbeH`e}`v`dRh13d45!B=31ZrA66 24#30Ur?P].w,-^q^$P$sT<`o0mR`s`a>rv "saI8G28\vvk`2 hY,TYYRJ )eUF,,?F6_v?P fQ#"I#M !:#Q>NIU>bIY>vI]>I M ? I ? I ? I"+M?+Q#*"I*S%&S]V#f"+YV#z"V#"IaV#"IeV#"IAV#"IqV#" IV#"I S&2"IS&2IS&.2IS&B2I V#V2 I>2!>2IS&~2 IV#2+S&2"IS&2+S&2+V#2&?PI3 B&?I("C2B?IJCZB%&"9'a'77@I2I2GUG1uM` ?"I&6BDBXBlBBVVVVVV(V$(V8(VL(V`(/Vt(CV(WV(B,B,B,B,B,B};Āi 1a` Q|Ϳs2e8-ߕq`R~,y\1BTC\Zݼc6[?F[hsF߱~Q>qˁ&t ?FgMuN?Dv` ?,?6 EPU-?-+" Ԍ/UFz[aDv>tJ~M^3;0?gt{s4)/~Z!WJ=˧ب 5Z=fO/߱&////??{?q?;d?*V-?F`xM0P01W=͝4F??=ӎC !]=@/V54, OOS9qW@0kM9~0iZ <ȝ0*DT! SU7:w$ ]UZNOW~/mà' ]: e9T ,,? 6v?G@B1B$Gr65߱O__,_>_P_Fs[Q?F/e[p؟?M ?Sl ʟ_tfS$lߕ름Fx1cn)@aCo?20 6-Ԭ,65vٝJ w(-_q9KrUuw;y+OUF!R>7~n字/䉄5 _fL6&lLXqzݍMJ13LIo%]f Fz^(ʲS | u6)|LXX'} Ĥԭ67,M'5LmmFs]mrtoɊ!3EٿEݍv LSB@ d@}U00|Wdby#ANU 0 0 0 qH3ۓ] _WbB'P.>3]_A0hx%>Hލ?1;>s,}O+NڿW >gEn(F_>3 ]Q3}2bu$ˣSfQNFe'-$Sد|;~*YQoU(_HԪb~xBß;6H~ TEbݍ}nO*/<9n-@P,e'}eݴآm'viޞMTQދP?F/VJ$CճѵȌܷݍ1+OOĮY"ؑX:fDx~@Wt)L[~3x _AVW~IA]m_jݍX;AQq__s*~pm.@a)sI_8`}^ߔfЄ_j]~V^1P0oBoTzyE|N8^~{*ϳ}8CތҢ&s%X>F @4ύ?F*棕 L4?n~g&vn˹5so L~<@D~ew,[{ܿc=(SHǑo ,AU@y2ѝrͲs!?F N?F3?FcT(}03@ya?B᳠{j@1y8bvw ??a醼nMk?FCZώek/Ê?F@ٕ:?P(ܴ'B1/?{!K @-X;L_ @1aѶߘ>3C?FqFLc0/?Fd?<+v7D?6.zY Uco غ9JȆ߈ѿrTߣDqV\s"7G\DN,9FslD7Q-r|O╣'YQF{ F@) l$u(U)}}bQr)g|}1p1<G7t}bHA8sah77@y5?F]}H 0NXUVMFQ¡/.}=!8՞%د-+1pQ4ADF*.}K 0BoaKVWg>t}x=$GB=bvaA Q/R"i L1}/9/K/]')uw?F%ly?F*껩/ѵP﹈b@D>3bSSmlGAȔ|#h`*b5%F[ A-@zjaDi~b.x4dM?O+zOOu (?:?L?^?p??d??Fb2*.Q_ DCY?ˉFY] ) ~ zM?F!d]CZXjĞ82Dҥ4Y 0YLD|^,DYA_^F{aE`?`?Nd,/__b6mʝǧNAIY/////?r6FH?> _$s 1+/,5B/Zf#ȸB HK ~??vLl⿤愈?[G=u?aK&d23L4CUG DF P# h @T(PYY#EψUFL&d2?|>?F~?]P} . B!D:J :^:rY: Au` ?"66JJ^^rrju# ^Jb"h,h'U ?L?4b {N[`:!#"&a#'b"^DQ)3 z$2l!L1$l" l"3'@>>ÿ@q05?B46 r&23c2ui0`҈-1Bu:@ l"3-A7Bl"F L$9u`u `m"[ bb!wu`)$"%أ#Q!'1`Vis_PRXYcPm!#5XP1XP02`?Co_pyr>PgPUt (NP) 2`PW09 M>PcuPo@Pof}RnQrTqPQa}PioPn] AlP uXUsRe@PeuPvP dB!#d$%b!(=#&#Rd&& "$B##0Ub#]&g,7^aNX@cTg@ daA+_ s11"hh22)t3*tBaaR+t"qqb(!pacb"!6u\%G!!["-@@Bq253T@b"q3L [ELP@[UL0]E5 s_$\&F3s͵NSYFue7A6P,@QjPUn725^}@$1p]]( sUQŅUQbN9P %BR&RpRQQcPFL]`NETWOFPK SHPP5 DROIR7IR5S!:0@ L*FrLr]@o"( qffs_PU 7ex}PcPn2l"! 0n<@5~b -"ER"^br36 Bm Ak Adzau"u22u3/BaQaQR5Q5Q"2b3 !!E|7l!!8ԲAP! !1; 8b,/$1JbKa'q'qne!b($pǕW ]-@$ SPaCPqA9P _Equ>PpRP!ePk`25Xq\` TsPxcP`DP!vR*`3^5b` Sb8C.SwXOQEdeyMPnfPc}PuRU`1%PRd$  N$m $`E I[!" PPe5R'`y 7sRU dCeEWAe C`eV^` SRiP@Q//` T;/M/"LoPcX/Ҭ///B lPiP+q/ ?/?R% RoPo_?Pm??NwRkx"adB`K?g IDPQdPS6O@HL7/nOnsICOHL8dhOPO#%AmV2irUQ ]Q 1O+_6 fH_Z_\G` fM#AtO_BO_OM@CO(oFNRCbogq%C2mQtBQW6oHL_Roof MHHiAאe2U w'0U W0 /ix4*є"%ݩ܅uuY>2 H4ֹhńЋńЋNńЋTńЋ>ńЇHD: %# h4>T]]9 M JUF/\r?F.U?F~?F_w?P6 AJuM` ?uJt  ;uAIt=WzIRUIl7O],#>2zGz?)@MJ߀A@ebM #zwb(b%)2+2&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$[w?b6 ?A$$4/U%-|# |'b6%?6iie<59 6X7'M0Ub6!}4K0 -FAJlJAI0h7!1!ZF9|$0f2 <ڒ#  OR|VJa9Z9P] PReS[RUE:dHܣM\]\ɱQ<5U oYaU526_HU"]rA #cHD: "# h4>T]]9 M JUF ?F"]?F|>?Fuvw?P6 ]AJuM` ?uJt  ].It=W`ĆW #JRbJlLXz#>2zGz?S@MJA@eb#z(b)R,+,&Jj!j!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v d c!Eg$[;?\6L ?A$./O%-v# v'\6?6iieP659 6X7'0UpB\6!w4%0 'F;JlJAC0h@7!1!ZF?Hܣ~0, <pB#\  IRvVJ:~?6VOS sgZ^TURUEUEBQG\ZYpBaիQ65UQ[fS.aYUb520_BU"rA #cHD: ## h4>T]]9 M JUFyE+?F0]я?F~?F@?d{w?P6 >JuM` ?uJt  vr It=WP|IRsUIlO.a#>2zG_z?=@MJAw@eb #zb( (2+T2&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$[}! K2?b6 &?A$ 4/U%-|# e|'b6 ?6iie<59 6X7y'0Ub6!}40 -FAJlJAI0h7!1!効ZFHܣۄ0Y <? 6ybi  OR|VJa9PY]PPReS[RUE:]|$M\Q`YdC4U<5U=VN_YQU526_HU"rA #cHD: $# h4>T]]9 M JUF ?FDL?F|>?F~w?P6  >JuM` ?uJt  ].It=W,t@ "JRb~l>t2UK?=@MJA@bJ + &Ji[J'?a& ?A)B !!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r 1uai n. _ Al90 (Us=2e0e vO0 d10!H$-,# 'a&K?6i@iek59 6X7'K0a&!|$K0 FJlJ8>UhAA !!ZF@?d3{  </.a#]M 1R^VJ:]ɜHܣ/_AZ2Ey#xUEUVS s#FT=RxUk5U؝Qˠ\BY3QX5*n?֬BoAZ7],XU[Bl|Q2_*U"rA >sHD: ## h4>T]]9 M qAUFA[?FFw:?FE12Oz_:w?P6 L[ >  (m$JuM{` ?Agg2DuVJtS  .!tw|3s,kIɸ _>JU2N贁/Nk?=@M3#JC&A@b#zw u#bR( (+&jJ=#%'?& ?A"b${$:3/*T'B=#115 `?CopyrigTt (c)020090M0c0os0f21r01a0i0n.0 Al@ 8sBe0e0v@d0+"|144#-=#3 7&4%"XOFiie2E9 ԆFXG|L"'0URi&!40 FJlJ" >UhiQiQD ]GQ N1DJc$LBhL!l%,W2[AN11(Z=#FC[RRTFQ ucR/Q Rf4"J:GdFX_!0wbfc L|c :D)$aER!Jj^opdiUjh2E~V! b# 2 ?߮.hOEu<ɛ?F#Pw|hؾ6|ެ^hu?F/Lw|iU6|^^e~?EYo< thQ[T`vg2w 0M6weQT`Dgwӏ u5XPo`Iϣ4oj7::eQT`Bvu*< f|hQT`r :{T_t Pa?F3SXk͖o?F vT?P);N 90 ,)Ri0m6uR. 6u`yyb !` /K!? -hO.?Ra @cB)r5%aQ K)XiyBG\h5b-Ob?Fԇ*sR6|FsaeQ Q~B?F)qqiQhAeQ9dϢqwiaeQe񯁜ai|hQEWXYWuwZl a++hf! l?FQD0)ˑ6|P?U/hL t9?Fz2)v<>KU6|*hQyո ?ghhWyh= A=}!eQW%|6^T?`NAA_owA j;[6te UI:vdv =KylI?ε~T_Ţr+a W  /~nIX(dur|?F 4?F?PَKK :d?Ltؤ?yl!?(!@[l Xj2GtaCpzm&褯 "4gyC?); 1ecDhe2GoYe%3rQ M3#k2HD: H ih ,>T  9 MJUF ?F~?F|>һP6 m >JuM` ^?5uJHbcFFmtulyt b#tI>2zGwz?@MJA@Wb)+& !Jg"~& E"\Ab)+'A,A/UB115 `?CopyrigTt (c)I020U09I0MA0c.?0os90fG281r;0t1aG0i90n.I0 Al0 ?8s2eg0ej?0v0d0ŤM3 b7~&XXt5i b9 `!X78G'0UB~&%0R 8FLJlJ8>UhAA( 1:9m EsBU[JHU؝Hܣˠ Xm] 3Ey%U3^֬_Z7]G,XHU)a[BUQHUF@?d{lQY?.aXQ,o`ɜQۤoZ 2QXUH_Z_l_~\_H8>t"s ûdGC, FȘZߓ#'wfB C ~dܮ @+C SsGX7`t PhIK)M" 6%?8u( G00UFD  h(^TYYBBUFL&d2?x<F BP(?P? 8$ B66ȅH?j|;? 5JB@?\.ϗsU!&;/ҍ  0RB`=Bridge,lo ica ,n tw rk sy t m t p"r pPy"i"s c n"cB i2%z e^| R(* G@& ߃l?"488Q ]frsww ww ~ww w!%} Iceqs^QZQDrag onut hepe.Rih-c]lckaUdcos UPo etQeAvUwreP 5aab|>~??[q㿦)j?&Iz?&d23L4CUG DF P# h @T(PYY#EUFL&d2?@F|>?@_~w?P} Y.  !:J :^:rYDu` ?~"66JJ^^rru# *^:"@,@'酅#U2N贁Nk?H]b{# ?#^4Wb {['`:!#"h69#':"UY^"Q)+3 62D!L1$D" D"&3ɜ'>>ÿ@kq05?460r&BCku2u{0`i-1$Bu@ D"3?AIBD" $$9u`u `"[ b:!𿻐u`$i"&){#)Q!'1`Vis_PRXYcPm!y 5jP1jPAB`?CopyrPPgPt (`P) 20P9 MPPcPoRPofRQrPQaPiPn AlP XsReRPeJPvPdB !r#(d$r%:!B={#&r#dd&& t"r$,7^aN.@.cT<*Po mPsPQ>g da +_c&1&1r"hh")ht3* ut2aaB+ti"aa bo(!`ac:"!6uM\%$G!![2?@@Bq%CT@ br"q3$ 35$6Ep05E5c_r$4&F3s͵N?SYFu eIAt,@)Q ySUn%6}61`]]o(c-U)Q)U)QNqKP % TR&RpRQQuP$]`NETWuOXPK SHPUP VRO!RI S1L0@T$J$r5GYkr"o(pa&&s_<*-pq o(A֨57IABKFkq`RPQsPPU exPuP2D"! 0<@Eb Z"E*"br&3BmACAfa^ur"ku"xu3/\Ԭ20 iBGQGQi"2 b3 !!E0|qq7D!C8ArRA!!A;Jb61\b]aaae!bo(q`軕1W c]?@ SPaCPaAKP EquPPpBdPePC8%XI[m` {TPuP`DBPvR83^xmz"` SQbBRdP5d=QMPnfPcPuRiEQ`]&1PRd Nm 8E !3o! PP_q eiGR'`Q sRU<&eФ/YAeC`Ue.^Ԓ` SRiP@Q8T/%/mLPcX_/҄///B }lPiPq/p/?o* RPo7?PEd?v?YNwRkP"a =Yko HiԿAhǖ꥔ Uはw'0Ur/lAx ;,u;uM1, صpH ֍@噄ѤѤ&Ѥ,ѤHD: %# h4>T]]9 M JUFX^?@ ׬?F~?@?P.6 AJuM` ?urJt  ]It=W_7],IRUIlw#>2zGzw?@MJAw@eb #zb](b%)2+2&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$Ɇ[?b6 &?A$4/U%-|# e|'b6 ?6iie<59 6X7{'0Ub6!}40 -FAJlJAI0h7!1!効Z@9|$0Y2 <ڒ#4  OR|VJa9Z9P]PReS[RUE:d_HܣM\]\ɱQ<5U oYaUb526_HU"rA #cHD: "# h4>T]]9 M CAUF|>?@:x??@V|+q?P6 8]AI]M ] (JuM` ?#SS2uBJt? tzEUQؕKJbtU]OK>JU2zGz;?@M#J&A@bS#zI G#a(+bo)|+|&UJ#!!5 `?CopyrigTt(c)20 9M c oKs f"!r 1a i n. Al70 (s;2e0e vM0d/0!EH$#[#""?06 ?Ag$I~/%-#,# '6%KO/Fi@ie59 /FXEG'0UBŬ6!40 wFJlJ]Uhz!hAg!(Z#F@H/DžB0J>| 8Ene&KQ4\ RV"JT~?@gf_?F)?@ $C?8S sBKYbFe>J^R,UFeTGK!Q? ӧ|+@B:TPܛ\YܨMJe5$!JP/.`*7 `3iK=Dn鷯hZbU5TQ`R?@[uo0lBqaRSgdd*lxrl{!Joo2_Uvg"rA f# =HD: ## h4>T]]9 M !AUFnx?@?d{?Fg݅?@@?P6 $&> JuM` ^?1?u.Jt+  Rqtes.aqzWWqI7>JU2zGz?=@MJA@b1#zt' %#b?()1 ?(Z+Z&UJ!!5 `?CopyrigTt (c) 20 9 M c. os f"!r !a i n. Al0 (s2e ej v+0d 0 !E$[! s2?6 I?AE$ \/}%b-# 'Y6? Fiied59 FjX#G'0UBX640 UF"iJlJAq0h7!1E!`Fmr?@Hӣ۬0$*fq`7$d 6yb7  R VJa9Q|TK ST1 QE:]|$0]Y_dC4a8^] 7sP@EU < KB?x_QYFf*ɴ%Ud,Z7"_+dK lV}?? g52^_pUE"rA kD#TsHD: $# h4>T]]9 M AUF|>?@~?P6 n# >] JuM` ??Su.Jt+L iteJ]zb>tJU2U?=@MJ?A@(b')4+4&J[vr'?& ?A/@)B!!5 `?Copy_rigTt(c)2U0'09M0c0os 0f2 1r 0F1ai 0n}. Ala0U 8se2e90e05vw0dY0! $-# ',&?6iie59 65X7/'0ʼn&-!$0 /FCJ)lJ<>Uh3 C ]1!ZF@@_?d{ J*4 $d?.a7^QiBQ ]RVJ:D]ɜHܣ[_mZ2Ey7UEfU$S sTrTiRU5U؝QX\nY3QX5Vn֬nomZ7],Xf8:`ϋ iؚ~Gc2anY_Wt T}զDTk 0Q: %C?J i3l}f*i&udWl˔alb+d ?ܲVXU2D_VU"rA k#-UHLuD" # ;3h , T  JEuUF'_?@~?F|>ếP VNM A #$Ae  >uA` ?KG `mm. RSu#\>b]XFOmrߞuZptY b7#t&>U2N贁Nk?@9 3#>C&A@q" u(b)e+&f" !It$>=##"4#K"& "{"b)&;'f",f"/N =#11;#`?CopyrigXt (c)020090M0c0oKs0f21r01a0i0n.0 Al @ 8sBe0e0vZ!@d@+"BM=#3 7&4"Ht1EiIfE 4%!X|FGG'0UB&h50 TFJl>(fUlI 8%#IA{!1(R!gG?@c\;0R dUeR ?!c e(T@|$?Fn-?@Z %_C?YsB\Ӌ$UX?uj3dR-=wnQAY ܲVX3 e^Fx*aqRS3jIBFm_[viH{goRJdo~b KYeTGQ# #s sFR-Dn?% FZߝ4# $6wYB H }7~dܮ o@+W oMsGXJ8^ PxADHا@Lx?OPUFD # hVTYYzU?@ ?IF?h PaR"|ou]ofu2/uU ` C&nect&r` e-?{U H  ^   -}|5 p`I`V?}`xa833ސ]3σ3u 33?, ?Gxpx^& This coUnetrWaumtUcl#yrebtwe h *hp3is.HD # =h @>T(]]9 T 2AYUa? uP6 u]`u buA_ u  >3.TAuV ` lxMui"a)J@-?x;'6"wE- xo'6"y( _rq?o@I ?$m%? @horVA$"38*EL2-_br @P' Z6\11u .$n2y2^2n2u9ȑ#ULH/MM^1 # A D5 :0`Vis_SE.cTm!#20ED)`?Copy@0igTt; kc)j@DCU9j@M-@c@0o/@'ofhBYAr\@AUah@iZ@n7@ j@WAl@ `HsBUe/@e@0v@d7@N@B= #x  A G{?9#r22&0444 MG4'ALB5?_$k&(SZ6Y8# #2"qlSF,C`l>lUhr5 QA+/J *&%(og5k#p f3e'r#TA(11MJEsooR6'2Y7E5S3Wc7r516Ũ_aJ2_quU\DQ>0]b;)`R@As-@E T@xh@DiA eJi=S]UBTJg'0SV!@LA,SrWg 1Cy$A$AMJ6!Hp,@R/TAEb?RQP? _H>Ff"s O`E )FZ߂U#HUV7Bk WeHdo@XP+U \"sUFD  h(^TYYBBUF\.ߗ?x<F BP(?hP?X8 ]B66 TbH?~? ?B@L&dW2?Y (sU/!-&PO/v:  0eIB`Modemw,n tw rk p rui h"al !v c !^| f(U GT&( F?_""U*++'?MwpwtwpwDwwp tGwww G@wwDGwD@w p@wyww[wp|`npXDrag onut hepe.Rih-c]lckaUdcos UPo etQeAvUwreP 5aabi4FѿL&d2?j?oҿ=t??\.r3r\.CUG DF P# h @T(PYY#EUF\.?@i4F?@M&d2ɻ?P} č,.B ! :J :^:rYD 7 7l7u` ?"66JJ^^rr)u#",'񤔅qU ??4^b,0{][`:#A82A6#'%Q)U" ^/LV1$  d7'>>ÿ@qZ05?46F0r.B4CZ2u0`-V1ZRBu@ 3mAwB h$9u`u `"[bz!u`R$"% #WQ 1 71`Vis_PRXYcPm!#5P4o67>B`?CopWyr~PgPt"0u(P)"020PU9"0M~PcPoP'ofRQrPQUaPiPn "0WAl` Xs bUePePv`dR 1#d$%!*(=#6#@d66 2$B##0Ub3]fg,7^q+^\@V\cTg Pda+_Msd1d1"hhr2)$t3*tR+tHR@ q q"@q@qTb(!PpaFc"+m6u\%RG81>1[B2m@@Rr5.CT@Fb"31 UFHUM5Ms_$&ׄF3s͵NSYF)UW0TewA,@WQ S+epwr5}1PpP"(A2yT?+>BB).B4KCFq`R`Qs\~PU T`xPP2 0<#@4Eb_ &2CE"brd3vRrӟ(Mff.F_<1-mTwNcPpu"ur2u3/R0HRuQ$uQ"2Tb3 &1&1E|FF0RQ(dPpWQ,[[ ]m@0` USPaPePl\rsQAyP E u ~PpPePr5X 1V` ?TPEѣPH` D`vR3𯴊ߜb` SabߣPLB PMBPd`rUdKMPnafPcPubHU\nHUQi{]d1;PRd Nm.0Š)U,03!8 PPr0BQhHpcu]uR`sRUѠeANe>`pe^bt1R; SbiPa¢&T18LPcX!mFTXjB?l'`iPkqo&0Z38RPoO,d/?c dDψ(ȱȱ"(Ms3) &&&WUՁ:p/` %R&RE"ibȱ`NEWOPKPSUH`PV0 ROj2URIV0S$AQxR7 i q'U,ygm5xbb7'dũfɵR3D-҃aa8')uyqarL?aTe5x`H7vAտD?aKDKDaKЅDKDK7eDAKTeDJHD: # h4>T]]9 M JU@i4F?@??@hĻ?P6 ܠAJuM` ?juJt At=W ףp= #JRbJlQO#>2zGz?)@MJ߀A@ebM#z\(b),+),&J[(.w?& ?A$T./O%B!!5 g`?CopyrigTtZ(c)Z2009ZM 0c. 0os0f21r0>1a0i0n.Z AlY0 8s]2e10ej 0vo0dQ0@!$b-# 'Y&?6iie59 6X7'0UpBŴ&!$0 D'F;JlJAh h7!h1!YZ@x f, <#  IRvVJ:ǿxT]]9 M JU@"&d2?@ `?@BP(ĺP6 ]AJuM` ?ju#t  m۶mIt=WwIR$I$+IIl#>2zGz?@M|JA@e7b #zb(b%)2+2&J[/.?& I?A$4/U%B!!5 g`?CopyrigTt (c)020%090M0c0oKs 0f21r 0D1a0i 0n.0 Al_0 8sc2e70e0vu0dW0!$-X# '&?6iie59 6X7/'0UvBŇ&-!$0 -FAJUlJ8>UhAA 1h1!T12 $<cV5{ WR~VJd2L&?@bX,Ʃ ]S fR#mS ?ZZQ4E^0__ %zzX5:U XYePY؉X5aZjoohY}aUVeEVŠRldQY)+R+X2>_;"rA 5#dsHD: # h4>T]]9 M JU@xmJuM` ?uJt  Am۶mIt=WHzGIR]Il#>2nz?3@MJAw@eb #zb( (2+T2&J[ (p"?0& ?A$ 4/U%BB!!5 g`?CopyrigTt (c])020%090uM0c0os 0If21r 0D1a0i 0n.0 WAl_0 8sc2Ue70e0vu0dW0!$-# '&?6iieP59 6X73'0UvBŇ&!$0 -FAJlJ8>Uh@ 11!Z@?t:Né  <qqh#B] WRVJ.@ ?@L&d© ]S )Q#hY8?Kh/QE^ `__ ^BX5a9XYePXRmScRU5:]md2L&U\ahY*PX\eEVV_iaU2>_PU"rA #dsHD: # h4>T]]9 M JU@i4F?@M&ɯd2?tP6  >JuM` ?uJt ASt=WJRb~li>t2U?=@MJA@b + &J[N#?0a& ?AT)B!!5 g`?CopyrigTtZ(c)Z20 9ZM c. os f"!r 1ai n.Z Al90 (s=2e0ej vO0d10@!$b-# 'Ya&?6iiek59 6jX7^'0a&Z!|$0 FJlJ UhqMAA ]A !4Ja-'Z@?fl6˃  <HzGh#BA OR|VJ:6]t:ϝNM__Z(\#UEL@ _?@Lƒ S AA#`YQ(\Xk5^ s`o!o ̌aX5UxT9 M3JU@i4F?@M&d2?ҷP6 m >JuM` ^?5ulbSD_Jt bstE>2zGz;?@MJA@u+b+&K !JS"j& 1"A.b+-,-/*B!!5 `?CopyrwigTt `wc)5020A0950M-0c+0o%s%0f32$1r'0`1ua30i%0n.50_ Al{0 +8Us2eS0e+0v0ds0ib =L!XY'0U Bj&%0 :lJ Uh*M[A[A 9A 1:xTh]]9 M JU@w[2?@2?@?뇤?@bf?P6  n>JuM{` ?5 uJt  hxvEt9SYEN^Ӟ~ӞEh!D#<>>2N_Nk?<@MJA@cb]b)++&J"< u) ?AHb$z@J+%/F%B!!5 c`?CopyrigTt (c)302`0930M+0c)0oKs#0f12"1r%0^1a10i#0n.30 Aly0 )8s}2eQ0e)0v0dq0!(Y$-3 7x&+0b59 6X7G'K0UhBx&!$Ia F3JlJ]Uhz11Z@`Y3   8gF/_ M ARnVJ!49BYOPBRWSMRUE:]R.?\QRYl>Q5U/V@_YQĈU2(_:UrA cHD: H h0>Th]]9 M JU@p8?@d2L&?@αB?@.?P6  n>JuM{` ?5 uJt  YEt9SRQEN߆$1Eh'>2N贁NKk?<@MJA@cbb)++&J[ < u)| ?$%/F%BB!!5 c`?CopyrigTt (cu) 02`09 0uM0c0os If2!r 51a0i n. 0 WAlP0 8sT2Ue(0e0vf0dH0!Y$Ť-['# 'x&0b59 6X|['7'0U?B)x&!$aI 6 JlJ]Uhz!^!Z@mN/?  8gF_  REVJ49Y&PR.S$R_UE:p]"`3  \Q)Yl?>cQ5oUV_YQ_Ub2OUrAg wcHD: H h0>Th]]9 M JU@&6?@0ސ?@?뇤?@bf?P6 n>JuM{` ?5 uJt  lUUEt9S;EN^Ӟ~ӞEh!D#<>>2N_Nk?<@MJA@cb]b)++&J~"< u) Y?AHb$z@+%/F%*B!!5 c`?CopyrigTt _(c)302`W0930M+0c)0os#0f12"1r%0^1a10i#0n}.30 Aly0U )8s}2eQ0e)05v0dq0!PY$-,3 7x&+0b59 6X7G'0UhBx&!$a F3JlJ]Uhz1h1Z@`Yg   8gF__  ARnVBJ49BYOPBRWSMRUE:]?R.?\QRYl>Q5U/V@_YQU2(_:UrA c_H>vQ+s #X8L(r FXZ\f#ͱ 4^!B- U_~|dTݮ o@+( oLsG!`mP%h*8'kUh) oT+ar(#vz-yX0|2{$OUFD  h,^TYYBBUF\.?x<F BP(c?Ps?m. 9c:M:O @z0 BH ?? V@L&dW2?F-W(Yi(G.sU!&P/)7(:  0 `WB`&Ring\network ?p$0rr0pPE1al\d$0v0c$0 c1^ (J YG& r+?!) +)!q q ǿ~~|p~~ṗ ~p~;ކ~pp2AAqDrag onut he]pe,nUdu /p64lies9Lt]wrkcQm tOfTly lGo_"imdO.b\.L&d2??;IOғEMQS?=?mar3r\.CUG D  # h T,YYEUF\.?@?FL&d2?P} ϛ% ^.&N+O+.Q+R+S+T+\+]+^+_+`+a+b+c+d*+e+f+g+Q.i+j+k+l+m+n+o+p+q+r+s+t+u+v+w+x+y+z+{+|+}+~++++++++++++++++++*++++73636 3 l_Fu{` ? &CG/B>CBHCBRCB\CBfCBpCBzCBCBCBCBCBCBCBCBCBCBCBCBCBCB"CB"CB"CB$"CB."CB8"CBB"CBL"CBV"CB`"CBj"CBt"CB~"CB"CB"CB"CB"CB"CB"CB"CB"CB"CB"CB"CB"CB2CB 2CB2CB2CB(2CB22CB<2CBF2CBP2CBZ2CBd2CBn2CBx2CB2CB2CB2CB2CB2CB2T6g666U66FFMBuIUHrN|Nw1r L@Gs5 ^CB0  $sV!v q)?@4v!p`:!@ K*RGs Br(1=s0s)) %Rt؉sq1`V sPeXY)cPm!#52862F1`?UCA p r gPt3()32U0Z93M c_ Eoi oM=rK yas iA n) J3A# l3EsUei e_ vd)21wrޟҀs?@A3{d|R$Bƕ~u +@@âɸ!BMBj8}rup`i-qBu *@rw1s!8=}/B 2t@I9u`un`"p2] HqDou`t1E[B2TU뱥)8s5ä  r}vh^T`%AT<7 o3mss >XxĀu7T=n7 eMTNtT yk3Dvd{uu ƃ@Rdư5ߙ߫ƕ9'9h\nE|1CU t@ʴ#FXjHqʶq-?bt[Ű//A:/L/^/~///A///p?!?3?ʼqV?h?z????z?? O@ɯ+O=OOOoOOO9OOO =t__'_!D_V_h_ͪ___#___$o+o=oB%`oroo&ooo'o!(5GY)|*+ .HTfx{şÏƹ/&8J!0m1Ɵ؟2 3BTf͐B_5Я6);7^p ̿_9:3EW;zόϞ(Oas?ߨߺDqm4# 1T 5h86%8llc౓#"AAш]]Ѥ C){q{qpqq22EyyNNaѕѨHr/d~CU|@c` MaЧufu e nA` EqipeBtm/h~4qP o|uNmb /b XQ Pr6HZ/{+ srt_q/\A6s6!P/^za Sil/T1 LcAHR\fpz$.8BLV`jt~""0":"D"N"X"b"|"v""""""""""""""" "" "*"4">"H"R"\"f"p"z"""""""""""""""""$"."8"B"L"V"`"j"t"~""AaXT@b FTP%జJKTltφT0ƐĮbPQwr SS#TS”GLF2DFbXO%—ȩP_(_:_S…LF?Hd}OƁLQ____KFLpXO( B}KoЁ?_T_o(ooS L FY2ణ3oaoЁ !eo}cGYkS L Fft]eҼ–B" E L FsxLfO͏ߏ*H L F l_go}<*A]LF џrmБ Є!7I[EʟLF `0 毯ʯ_&tEULF OT~N򤢿ƿTؿFLF O!H$6 Hϱa6ϗ*FLF ?ZبP6HZߙBLFR5xqy?o%L%9F//"q8?onT&L&9F?ᏼJ`_-?*e'L'r?UO{O$(L(roy:uoڟ) L)r!_@4 oj|*L*r"oݿF\);a+L+r#xQwχP ,L,r$|[6qȟϨ߹-L-r%Ϗ0 fTx.L.r&BX%7*] /L/r'7Msߙ0 0r(3 W2m1  1r)˿@,bt-2 2r*>T!/3/Y=3 3r+p/Iߔ/o!/P//M4 4r,tS?.1i?/??M5 5r-O1(O/^OpO]6 6r;Q?\AO:OP_/_*Um7 7r//?E_Q_{?__}8 8r0+oOo*aeo/_oo}9  9r1oa@$ _Zl: :v2/q6LO+Q; ;v3hA?gwoP?؏<  >v6~_ɯ߯2H'*M? ?v7'=ocsoԿ @ @v8#oGπ"]ϴ¿ϥA  Av9ϻ@RdߊB Bv:zߠߠ.D#IC Cv;`9_oP D Dvv*@  E-GV,Gv !7-[k=HLB,N5>LB_r:?C&2F3s͵NSYQFEB,N>HdDq7ME5qD68W#"U%&EfW@@cX @ S1` Token?_RiP1nD?[Q?##qh1ab /X##zQ?AzQgVD6e]r##"` %P&rPpPrtPe+sA D2`A!_`NTMPUOPKfPS A``UEfPPPObR`5ISi0QAqD_!_\X\OT.U_C0?ni4oo\XXOSw'K0Ur%V!0^ v2Zei1DAT\1sEPuQ1MEX OS%HVl2UxAUx1'ExAMEx HCDrDC  @   !"#$%&'()*+,-./0123456789:;<=>?*>ABhiATiiJ\ 3U@\.?FL&d2?JP}@YIYqX"u`W?"&&5&,"u6)~}bE@{#&t03! 7("tm!,v&b$&"qK!Y$30^?2-,"l-2""2!l"6 #",$=F#"f$6)![0$%J[LD0D=F?|" (&3AA1A1 ;m`"`?CopyrigPt(c 09M-Pc+Pos%Pf3R$Qr'P`Qa3Pi%Pn Al{P +XsRe*SPe+PvPd"!ZOAy 7$^ ?P?b?[!H${eQnB? ???O %O.O@@OROdOvO6CkaOROOOO_ %!_3_E_W_i_{_Vx<kb_b____ o %&o8oJo\onooy_ocoroooo %_.@@Rdvwd %0BTfxyheҏ %5GYk}~/mfĢן  % =Oas~_?gůɲܯ $ %?Q@cuOwhʿ) %RGYk}ϏϜ_i . %I[mߑߣoj!3 %N`r k &8 %Sew+= %Xj|)m 0BU]o9n"/#/5/G/Ub/t/////Io/2? ?(?:?L?Ug?y?@????ϟYp?B OO-O?OQOeGQoOOOOO?ߧfqOR_ _2_D_V_"u!q______yr_ro%o7oIo[o'"voooooos *<N`,#{@yt /ASe1ˏݏu "4FXj6%͟ߟ/yv'9K]o;&ү?yw, >Pbt@'@ſ׿Oyxπ1CUgyE(Ϧϸ_yy#6HZl~J)߽߫oyz$(;M_qO*y{)-@ RdvT+@y|.2EWi{Y%,y}37"J\n^5-/y~8/<2O/a/s///cE.///// ?y=?ABT? f?x???hU/??@???OyBOFRYOkO}OOOme0OOOO__yG_Kb^_p____ru1____ oo yLoPrcouoooow2oooo QUh z|3@%VZm4Џ*/[_r5՟ /? Ɇ`dw6گ"4#Oهei| Ŀ7߿@'9(_jπnҁϓϥϷϕ8,>-o',sߘߪ߼9 1C2! F1 xR'9K:)6} ;@);M<+)~%< .@RA09"5=/!/3/E/W/F5I/2/////E>??&?8?J?\?K:Y?B? ????U?OO@+O=OOOaOP?iOROOOOOeQ_!_3_E_W_dOXGv_b_____uAo#o5oGoYokoZIoroooooDžB(:L^p_NɁHD: H h <>T$]]9 MT kAU@\.?FL&d2=??F?P6 LA P>  + 4DJuM` ?"?o`0?oNu^Jt[ tgJbg>6#J?#,b/ @TZT"Z""P& T/@n_^B% 'N< ?m6 ?AeJa@ۢR4` Ac0entCol0r3z۝0Aٝ0bbbz90“2???9Nbb?1"bAYFbeDG?#E![C39 , B6"hLJ?#QE!TG; J`A`?Apy$@igTt(@)20mP9MYPc$@os@f_RArSP#Aa@i@n. AR @lXWsRePe$@v0dP@ᾡ`1O_o?#2TUam3 FD5#]BrFlL]BrF 0PuGiAbE9 6%yXGg'0bUm6!40 fjlJ qUhhUGHQA3(Z\*{ ٠0"#")b5DrD6"J Az~%"#"BA}/t?PSQ< E!?yET#-#tgaD[#,E:g@qVE`0w>]BrA \Cď'$$+HD  hj @>TA(]]9 M  eAU@\.?FL&d2+?]P6 LA > M /8HJuM` ^? C s4CLsRubJ1t_ tuJb$">JU5 JI4#=#/ @X^X"^"B"6 X/r cb!Bh@,G-v31k!k!5 J``?CopyrigTt(c)2009M0c.0os0f21r0Aa0i0n. Al3@ 8s7Be @e0vI@d+@0tv3?c- =< ?F ?AeJa[_Rf14W` A0cI@n0j1lB3z_ՁA_AbbbzY_)mEZf1T  9 MSAU@\.?Fr??F~q?P6 B >%M a*JuM` ? %U U4:uDJ@bEF&d2ɿuBltA bJt EIM>JoU2TU!?#S@M #~A@@"WbH)U+U&mJ !?v(GtI#J#"# %"& "A@"bH);E'|"/-<*B#h1h15 "`?CopyrigTt(wc)2009M0c0o%s0f21r01ua0i0n._ Al0 8Us2e0e0v0d0"MX#t3 t7&"tt Ei b@9 %!X GގG'0"UX&-50 FʢJlJAUhh)Q)Q!#A@! (:#`RB: 3R$JTV  h1?[_R: @AABUVeLٮ\2maXUe.o_jLo^m2aroo__7\HD H# h4>T]]9 MT CAU@\.?F??F~߻?P6 8M >M (JuM` ?#0SS2uBJt? tTKJWbK>[JU#?,& ?AA@R"bZ$J#B!!5 `?CopyrigT}t(c)W20 9M ]c os f"R!r !a i n. AUl0 (s2e e v0d !$#bB-## ',&%en#2TU1?"@M#R"g&a,R"g&j'\?6iieE9 6XG"7'02U,&-!G$0 NFbJqlJAUhh7!lR!(Q:#N J>83R JWTfV  !?qY_R8>qRl?38VUVL?&d2n\] aٔXEUoe6._x_j omQ2oDo5V_h_o\056 b_HR>+s 1ǡY-MSMלC FZ# !B (6K )|dݮ o@+H oMsGU5PXF:*ٮ 8'=7,UFD  h ^TYY>UFD"H$'@F @FxT]]9 #4AUFD"H$@F@ @&@F?x0>Uhhn1n1`!#!J`F6 [>A{ (FMBa!? OABE3RNkO}OGBERE6 O?OBE2?;.1rA _0rRxS`3HD: ##=h0>Th]]9 (3q #UFGJg:#@F@ @FWtyz@F.???Pv> u{` ?u3>(.t  ( y?_A@HNpΈ_-t#`aU"p [`JڍU5 ILҹc0U~ "?AG}cTLIb` LineCoklK rzc+"1&Z/#nFFF"?&?#+"1&&` STadK wI/#/N+$;# FC lM 72zS&eacۢR4` uA cG nt)?@#cb3zs94#+$3$I!8<>3 Q3#8.f.30FP=4B/G3B11G;;#?1pyQ i}gTt ( u)@2`09@uMC cQ osK IfB1r@P!a`0iK n.@ UA%2 HsBe@eQ vG d@0 l>]UhzЍA3+!`J!VQAU__T3^_odgU5^No_roU __ooj'EAb59 M{7/v!3rQav44CK7%iOHBTg7D'$~vv I+"688e @ Q:(F|@v5?=+$cz,Z#b~-(T{ HD: ##=h0>Th]]9 3#UF u` ?6u3`b>"6(t  <,?A@U"p [ZBH|гYYt [ՄJU5 CLT0U"?AG}eC]` LineColZI rz۷!#nF;FF{#?&?#),)"/&&` STaKdI wG/#/ZL+$9# FA lK r72Q&a` ?RR) 3#B2((236727kpvB1o31P17;9#?L1wpyO igTt (cu)J@2`09J@uMA cO osM0IfHBL1r<@N!aH@iM0n.J@ UA#2 @HsBeh@eO vE d@0 l>]Uhz1A3)!BJ@o39DYX3UD_d_vY3^__gzUf53^_V_ozU3__H\o_j5bPf59 M{o7l/5v!3HraPv4!$ 5v|mai6oozo7]'$t~kvPv v8%TE xH@L?c8 eaD@eJ@ l@Qo:0@3 9r)']z,Z#(T5-sTAcL@0P@HD: ##=h4>T]]9 P3#UFpo@F@ @F18Q[ @F.q??P> u` ?u3 >,2t  eX?A@LR[ A\c2t#`e"p [NdJU5 MLc0U "?AG}gLMb` LineColO rz$g/"5&0mddd#nFF;F"?&L?#/"5&&` STa%dO wM/#/R+-$?# FG lQ 72W&eagRa` uA cK nt 85W%#gbd3zRw98#/$h(Ph9BB3 (C#8>239FA=B8G3APAG;?#? 1wpyU igT_t ( )@W20@9@MG ]cU osO fBR 1r@T!ad0iO wn.@ A)2U HsBe@eU EvK d@@l> 0>UhhCA3/!J!VCQU__T3^oomgU5^Wo_{oUE_oo%obTG t>Q e jXHle59 M{7/v!3rX/Qz#=!$K 7%inug7D/'2 #vtx J/":UGDF P# h>,/T6D # UFD"H$@]F@&u@P} jih,D@Su`} ?.U@Nu#hQgtSftܸtRtA@3db#t%T/M/"j/+.d#%@?/"t`Make B 0c"0ground#3%~ؒ314+1`VIS_PRXY0CHM!#60z070"w`?C40py20]i00ht&0(,0u)&02009&0UM0c22s40f2R1r0Aa0i40un0 &0Al:@U 8s>Be@e20%v$0d2"e (^q5 J% 3hHA%DK%D,K3DGg $aI_9SFTv4y13TU-T[Te ZTATU J8>wSa%F1Q01$DtybgURwSi@^UR-n,?oUR1j?RRTuKrQi? eb #! >baes?,3QRoX0cD&q1cQ(QR=wS1VZi#3 D40N40UtBn0a0n`Eq#?_^9jYazPtt"(wS117eU5`>Xcy1`I!&` H0d$2&F40qUAw0޸q` SPo]wRR+="v6 ~!R)RRLf] Th 'Q0d_HV> ל ߪJAC BFʿt#( i;B= Uݮ ]U@eko+C/ ]Ka_)]m*<N`r%` "m*>GC/$UFD  h ^TYYUFD"H$'@F @FxT h]]9 # AUFD"H$@Fcѿ?&@F5 L?sFC?P6 u` ?iu#t A@t   q?7Q.7)CWb7]ARMJqmdddpnFFF? ?Agam9T6` Fil> wCo> orz9b2j9A9obdk#zY)א9S|uX9/K&` /2L%!X!#\#w&` STWadD w/" /;)#82#b5"oU y3#B&(23"I&3cg30U2?AGQ}9L"|w` L ne%?(75N582M:Bhj<:AgCy1y17;C?C!pyJ i! htj(0)j2`09jM cJ osD fBC!r@I!a@iD n.j A" HsRe@eJ v#@dPw0l>]UhzAh#j181Ja`gCVQ'  2 bM[8A_^#e33nLo^og#eE3eVo_o#e!_>82rRA w082YsASi6bE9 MzgG]'4Ō;J!$a vvET#@x@&OnAHD # =hj0>T h]]9 # AUFD"H$@F@ ?&@Fq)?sFC?P6 u` ?iu#t A@t  ^)?7.7C_(\T[>JS/qnFFFa? ?Aga9T6` Fil* Con* orz9_b9A9bW#zE)9S|u%/7&k` /28%ߵ!D!o##&` STaUd0 w/"/+oUz G3#B&( 3"&3iE530U2?AG}x "|` L n e?#2Bh8<:*ACG1G1O7;3?/!py6 i htj(c)j2`09jM c6 os0 fB/!ru@5!a@i0 n.j A" yHsBe@e6 v0d@E0l>]UhzjADJ`XC66Q'  2RM)8@ mT}_^U3^_oagUEUtVKo_ooUp!p_>[QrA E0r sC)84Aeio6AbE9 MzG'4Z;!$Da Wvkv5T0x@?AHD # =hj0>T h]]9 # AUFD"H$@Fɻ1V?&@F&7 ?sFC?P6 u` ?iu#t A@t  Biq?7.7C!_lV}T[>JS/qnFFF? ?ga9T6` Fil* Co* orz}9b 9A9bW#zE)k9S|u%/7&` /28%!ߵ!D!o##w&` STWad0 w/"/+oU QG3#B&(3"&3E530U2?AG}u "|` L 'ne?#2BhP8<:A2CBG1G1O7;3]?/!py6 i htj(cu)j2`09juM c6 os0 IfB/!ru@5!a@i0 n.j UA" yHsBe@e6 v0d@E0 l>]UhzXjADJ`C66Q' C 2RM)8A@mT}_^U3^_oagUEUtVKo_ooU!p_>[QrRA E0r sCio6bE9 MzG]'4Z;J!$a T h]]9 #  AUFR_!@F;(kY?F̅$@F_fw?P6 u` ?u#t  \ Ac?oA@'t3(?.Zd;O?M&S?- >JS/qnFFF? ?{gaCۢT6` Fil4 Co4 orz۾Cb CACbۖa#zO)5CSu//A&` /2B%ߐ!N!y##&` STad: w/"H/+oU (Q3#B&(23"&3E?30U2?AG}u "` L 'ne?-2BhB<:A!CQ1Q1Y7;3?9!py@ i ht (c)@2`09@M c@ oKs: fB9!r@?!a@i: n.@ A" HsBe@e@ v0dH@O0l>]UhztA#B1J`!C660Q'  2RM38@wT_^U3^ookgUEU~VUo_yoU!z_>eQrA O0 rsCiy6bE9 Mz!G'R4d;! $a FvZv5T0x@?(AHD # =hj0>T h]]9 #  AUFR_!@F6??F̅$@F_q)w?P6 u` ?u#t  \ Ac?oA@'t3^)?.Zd;O?Mɯ??- >JS/qnFFF? ?AgލaCT6` Fwil4 Co4 orzCbCACbva#zO)CSu//A&`5 /2B%߿!N!y##&` STad*: w/"/+RoU= Q3# B&(!23"&3iE?30U2?AG}x "` L n e?-2BhB<:*A!CQ1Q1Y7;3?9!py@ i ht (c)@2`09@M c@ os: fB9!r@?!a@i: n.@ A" HsBe*@e@ v0d@O0l>]Uhz`tA#B1ŊJ`!C66"Q '  2RM38@wT_^U3^ookgUEU~VUo_yoU!z_>eQrA rs:Ciy6bE9 Mz!G'4Ŕd;! $a (FvZv5T0x@?(AHD # =hj0>T h]]9 #  AUF^!@Fp>?FKg@FmЁ??P6 u` 7?u#t  ?߀A@'t3;On?.Dioׅ?M@_߾?[ >JUonFFF? ?ob"ozCC]b,#&` STad_owCP lP -rz=&G/Y+[ ##e.2"#&3iE0U2?AG}x "` Li'nez/"Bh,:A!!';:3?I1py\ igTt (c)02`090MB0c\ oKsv f2I1r0[!a0iv n J0AX l07s"BUe0e\ vF0d""l>]Uhz1#!2!ŊJ`%6%6"A2  "RM(@DON(U38^Q_c_W(UE8UF_O_(U2O;2"rA 2#^cFCi%bE9 Mzl7]'4+J! $a ff95TF0x0I?s1HD # =hj0>T h]]9 #  AUF+ $@FmЁ?Fh_IŒ??P6 mu` o?u#t  $C?A@'t3;On?.N@?M? >RJUonFFF? ~?{b"zCCb,#&` STadowCjP lP rz=&G/Y+[ (##."#I&3E0U2?AG}""` LiOnez/"BPh,:A !!';u:3?I1py\ igTt _(c)02`W090MB0c\ osv f2I1rT0[!a0iv n 0AX l07s"Be0e\ vF0d$""l>]Uhz@1#!2!J`%6%6EdA  ("RM(@DON(U38^Q_c_W(UE8UF _O_(U2O;2"rA 2#^c%FCi%bE9 Mzl7'4Ŕ+! $a (ff95TF0x0I?s1HD # =hj0>T h]]9 #  AUFP0&@F'ǖ~rl?FMO?Fq_Ww?P6 u` ?u#t  ~jtx?oA@'t3ʡE?.Cl]M/$A >JUffnFF;F? ?b$zCuCb,#&` STadowCP lP rz=&HG/Y+[z ##."#&3E0U2?AG} "` Linez/"Bh,:A!!';:3?I1py\ igTt (c)02`090MB0c.\ osv f2I1r0[!a0iv n* 0AX l07Us"Be0e\ vF0Id""l>]Uhz1#!2!J`%6%6A  P"RM(@DON(U38^Q_c_W(UE8U@F_O_(U2O,;2"rA 2#J^cFCi%bPE9 Mzl7w'4)+! $aQ ff95TF0x0I?s1HD # =hj0>T h]]9 # AUF€#%@FL&d2?FM_O?ݐ?P6 mu` o?u#t  ~jth?A@t b@(.eX'?(MA>RJUffnFFF? ?b$zCCb&#&` STadowCJ lJ rz7&A/S+I[ # #.2"#&3E0U2?AG} "` Linet/"Bh,T:AB!!';43?C1pyV igTt(cu)2`09uM<0cV osp If2C1r0U!a0Uip n AR l7sBe0eJV v@0d""l>]Uhz1#!,!J`X66DA  " RMB(@DON"U32^K_]_W"UE2UF_O_"Ub2O;,"rAQ ,#Xc@Ci%bE9 Mzf7K'4Ž+!$a ff35T@0x0C?m1HD # =hj0>T h]]9 #  AUFb%@F'ǖ~rl?F2wa?Fq_W}w?P6 u` ?u#t  _LU?oA@'t3ʡE?.j+]M/$A>JUffnFF;F? ?b$zCCb,#&` STadowCjP lP rz=&G/Y+[ (##.2">"#&3E0U2?AG} "` Linez/"Bh,:A!!';:3?I1py\ igTt (c)02`090MB0c.\ osv f2I1r0[!a0iv n* 0AX l07Us"Be0e\ vF0Id""l>]Uhz1#!"J`%6%6EdA  ("RM(@DON(U38^Q_c_W(UE8UF _O_(U2O;#rA 2#^cFCi%bE9 Mzl7]'4+J! $a ff95TF0x0I?s1HD # =hj0>T h]]9 #  AUFu %@F'ǖ~rl?FMO?Fq_Ww?P6 u` ?u#t  ~jth?oA@'t3ʡE?.٬\m]M/S$AAJUnFF;F? ?b$zCuCb,#&` STadowCP lP rz=&HG/Y+[z ##. "#>3E0U2?AG} "` LiOnez/"BPh,:A !!';u:3?I1py\ igTt _(c)02`W090MB0c\ osv f2I1rT0[!a0iv n 0AX l07s"Be0e\ vF0d$""l>]Uhz@1#!2!J`%6%6EA#  R#R M(@DON(U38^Q_c_W(UE8UF_O_(U2O;2"ErA 2#^cFC i%bE9 Mzl7.'4+%! $a ff 95TF0x0I?s1HD # =hj0>T h]]9 #  AUF;%@F'ǖ~rl?FOr?Fq_Ww?P6 u` ?u#t  _vO~?oA@'t3ʡE?.]K=]M/$A>JUnFF;F? ?b$zCuCb,#&` STadowCP lP rz=&HG/Y+[z ##."#&3E0U2?AG} "` Linez/"Bh,:A!!';:3?I1py\ igTt (c)02`090MB0c.\ osv f2I1r0[!a0iv n* 0AX l07Us"Be0e\ vF0Id""l>]Uhz1#!2!J`%6%6A  P"RM(@DON(U38^Q_c_W(UE8U@F_O_(U2O,;2"rA 2#J^cFCi%bPE9 Mzl7w'4)+! $aQ ff95TF0x0I?s1HD # =hj0>T h]]9 #  AUF`9d[%@F?F{?Fߢ_-Jkw?P6 u` ?u#t  a2U0*C?oA@'t3 +?.ڊe]M~jtA>JUnFF;F? ?b$zCuCb,#&` STadowCP lP rz=&HG/Y+[z ##."#&3E0U2?AG} "` Linez/"Bh,:A!!';:3?I1py\ igTt (c)02`090MB0c.\ osv f2I1r0[!a0iv n* 0AX l07Us"Be0e\ vF0Id""l>]Uhz1#!2!J`%6%6A  P"RM(@DON(U38^Q_c_W(UE8U@F_O_(U2O,;2"rA 2#J^cFCi%bPE9 Mzl7w'4)+! $aQ ff95TF0x0I?s1HD: ##=h0>Th]]9 3$#UFt>!@Fp@FU^+m@F BP(??Pv> u{` ?u3>.6.t  ?A@RXqhki)t bk"p [SjJmU5 SLiT0U'"?AG}yFSb` LineColi rz!#nFFF#?&?#I,I"O&&` STadi wg/#/l+$Y# Fa lk &72q&` @?r)o 3#B..36=27.JM3117;Y#?l1pyo igTt (c)R@2`09R@Ma co osm0fPBl1rD@n!aP@im0n.R@ AC2 HHsBep@eo ve d@0l>]Uhz9Ah3I!J@35&5&hQ (;UL_q_T3^__gU5;^_^_oU;__do_j5b59 M{؏7/=v!3PrhaXv4!$A;7ai6oo74'&$|~svXv v_am A` Te xP@l>#mbzR#p8!e t@PQ:(F|'@f5ɔ?{I$m."z,Z#!b~-(T{<\B2>2 HD: ##=h0>Th]]9 3$#UFfG#@F BP(?FUiԄUV?P> u` ?iu3>W..t  x&1?A@RXʡE5i)t bk"p [TjJU5 [ SLT0U'"?AG}F^S` LineColi rz!#nFFF#?&?#I,I"O&&` STadi wg/#/l+$Y# %Fa lk &72q&'` @?r) 3#hR@BL..236=27$1311ԩ7;Y#?l1pyo igTt (c)j@]2`09j@Ma ]co osm0fhBRl1r\@n!ah@im0wn.j@ AC2U `HsBe@eo Eve d@0l>]UhzQA3I!J@X35&5&DQ(SUd__T3^__(gU5S^ov_6oUS__H|o_j5bP59 M{7l/Uv!3hrapv4!$ Uv}aiHz7]'&$Ŕ~vpv vX%Te xh@l?8 e'ad@ej@ @Q:1@3 9rI'm"z,Z#.T5-TAcl@0p@HD: ##=h4>T]]9 P3$#UFl6@Fp@Flf^J @F BP(q??P> u` ?u3 ]>2m2t  %䃞?A@V\m)t bo"p: [nՄJU5 WLT0U+"?AG}FWWb` LineCoklm rz醑!#nFFF#?0&?#M,M"S&&` S.Tadm wk/#i/p+$]# Fe lo *72u&` D?v)h9I!Bo? 3#eB223FA BGT2M3B117;]#?p1pys igTt(c])20@9uMe cs osq0IfBp1rw@r!a@iq0n. UAG2 {HsBe@es vi d@0)l>0>UhhClA3ZM!J@3,9&9&C"Q(nU__T3^__CgU5n^-o_QoUEn__o_(bTe ts0e j$Fje59 Mb{7/v!3rQv5!$;7aiPDV74'2B+#~vx _akq A` Ti x@p>#qb[zމV#qUGDF P# h4TPYY# %UFD"H$@F@&@P} j6   L 4 Hn[u` _?BU 4H9 u#p3lbtntQtRA%@> tS%tK/]'4( tTi%i/](/z.R&U@?/".g`Make BJ0cL0ground #d.#4#30U2)1_FB3!5~*؀3A$D;1`VIS_PRXY@0CHM!060<@78݉"`?C^0puy\0iZ0htP0(V0)P02b@0U9P0M@c\2s^0IfBvAry@Aa@i^0n@0 P0AUl@ }HsBe@e\0vN0d@2"e (^E 5 3r ca%A oCjA$A13 f3ba!5 cq@~n^e3jU?~bhKr;aq? |ub #h bauA1*4H f5ohc&Ac.abaEkQ,o^e= cAzf y3` D^0N^0tBn@a@nEi#_9Y@a `A1A1"( c'AAezeE>Xs A pIp1!` H@dN2&F^0YA @ޮ1` SPo]w:bb+"v 6 ~!bbbf,4T4?Had3E_HV>ל ߪJAC BFw #,ߩ6B} ޮ ]@e8ko+U ]BBG/)Ph{ q|(5&1$5.c/x6X789 (-JUFD  h ^TYYRUFD"H$'@F @FxH5@@@0ۀ HD:  # ;h4>T]]9 #U,AUFD"H$@FjZV@&@@P -u `u bu  @")lt A)0t#` bFv_n?p [_eAu` ?jurJ:26W&$##D./@%W(l"r/tvel JYO8?$"'D{(/$B}U2q(0?'?96?\.?fd?F 7`BackgroundC0l0rz\)z 5G!3M1 3?l"M@h<:A酙!30O贁Nk?mdddclak)2w` A0ce0t?A)n1bdCzI7!3117;l`?1py0i0h@ (0)2P20>P92PM*PcJ2s0f0R1r$P1a@i0n..2P A0l2P)WUs|RePPe0v@dpP0l>#$!Uh#3 pA QYŊJ*!3"Be(o1HD:  # ;h4>T]]9 #U,AUFD"H$@FVj?&@@P -u `u bu  @")lt A)0t#` bԚp [euv` ?SurAJ\tt&!$)'$"3/E X/j$=(tvel gsS?$"'*Q/$BU2q(0?'?96?\.?Efd?F 7`BackgroundC05l0rz)  5Gt!3M1 3&?l"(MPh<:A!30U_B??mdddLcla)2` ]A0ce0t?A)1b[dCzI7%!3117;l`?1py0i0h@ (0)2P20>P92PM*Pc2s0f0R1r$P1a@i0n.2P KA0l2P)Ws|RUePPe0v@dpP0l>#$!Uh#3 A Q!3 (BHA 3J*!32B'e@o1VcHD:  # ;h4>T]]9 #U,AUFD"H$@Fr߹\.@&@@P -u `u bu  @")lt A)0t#` bF_xP92PM*Pc2s0f0R1r$P1a@i0n.2P KA0l2P)Ws|RUePPe0v@dpP0l>#$!Uh#3 A Q\YJ*!32Be(o1>cHD:  # ;h4>T]]9 #U,AUFD"H$@F~߿?&@@P -u `u bu  @")lt A)0t#` bԚp [euv` ?ur>JvSȱ&$)'$"3/ E X/j$=(Etvel 37?$"'*/$BUo2q(0?'?96?\.?fd?F 7`BackgroundC0l0rz) K 5G!3:M1 3?l"Mh0<:A!300b/?mdddzcla)2` A0ce0t?A)1bdCzI7!3117;l`?1py0i0h@ u(0)2P20>PU92PM*Pc2s0If0R1r$P1a@i0n.2P A0l2P)Ws|Re*PPe0v@dpP0l>#$!Uh#3 A Q]!3 (BA 3J*X!32B'e@o1VcUGDF P# h>,/T6D # UFD"H$@]F@&u@P}  ih0D *@3-T-hc-|u` ?W΍U,@T'*hh||-"| 'u#QtSt% "{tR$t/'A@#KtT%/(/-3b/(08"A2?X>d.3@??2.`Make B0c0ground 335~J3hAmD;1`VIS_PRXY0CHM!#60@79݉j2`?C@puy@i @ht@U(@)@2@0@ @i@As@fBAr@Aa@i@n0 @AlP HsRe@eJ@v@d22e h4^hh P T3 3hHQ5T[5T[3T@[TT[ETh[TX|WndPao7T1128cpAhAhEhATh]]9 3#UFԚ?F BP(?F @F߅d\?P>u` ?iu3>k(.t  9#J{?Ao@HtSݓ_N׳_ambԚp [aJڍU5 ILc0U~!"?AG}cLIc"` LineColc rz$cC"I&nFFF#?&^?B#I&&` STa%dc wa/#/f/G(iF" d3#$..36hH1$d1d1l7;S#? 1pyi igTt ( )@2`09@M[ ci os!0f B 1r@h!a @i!0n.@ AR#0l@GsYBe-@ei v_ dM@b0 l>]Uhz13C!`J/!F%QA(E _._@T3O^h_z_W?UI5N___?U Oa_!o_j5AbI59 M{R7/f!3 raavb5!$fAm/%Q12ooXR7w ' $9~0vx] vF88e 1@tQR:(F|@{f5Q?=C$cz,Z#bS~-(T{<B2 >, HD:  ##=h0>Th]]9 3#UFGZت$@F BP(?Fu?Fa]?P>u` ?iu3>k(.t  aTR'?Ao@HtSˡE_N4@_ambԚp [aJڍU5 ILb0U!"?@2LhcC"II&ynFFF#($(?#R,C"I&&` STadowC l rz&/+_y" QB3#.3i6h H1q$B1B1J7;v`? 1py igTt _(c)02`W090M0c os0f2 1r0!a0i0nU.0  l 8Us-Be@e v?@d!@@0l>]Uhz13 C!J/!FA(EO_T3#^<_N_WU'5N_O_UO5__Y_jD#b'59 M{07/f!3baf-A4!$fm/%ij5|oorX07Ww' $ ~vf Wv $8e'ua0e0 @PHZQ0:^2@3 9R'cz,Z#.T.'~-qT@Ac000HD:  ##=h4>T]]9 P3#UFD"H$@F@Fq!j@FC1[z??P>u` ?u3 >,2t  0*?A7@LtWzGcQR e0qbFL&d2?p [>eՄJU5 MLT0U%"?AG}FWMbg"` LineColg rzR#nwFFF#?x&?F#uM&&` STadg we/#/j+$W# F_ lDi 72o&G,`h9C!Bt! 3#.2236A2JM3117;W#?1pym igTt(c)20U@9M_ c.m osg fGB1r;@l!aG@ig n0 A22 ?HsBeg@em vc d2l>UhhC0A3G!bJ@33&3&C_QA(2VC_h_zT3^__gyUb52^_U_oyUE2__[o_(bT_ ti e j5ieb59 M{7/[v!3nrvv-4!$;7aiX7w'2%#՚~vvv vw_H<>SGל ߪJAC BFXwI|#(- T]]9 3#UFD"H$@Fh4@Fj~L$@FZV{?@F?P>mu` o?u3 #,2t  K46?A@LR e,֍t#`F BP(?pz [e"ejJU5 MKLT0U$"?AG}Mb` LineColf -rz!#nFFF#?&F?#F,F"L&&` STa%df wd/#/i+-$V# F^ lh #792n&` =?o))eCQ3#8&327A22\ތ32q@?O FM$? "fKO:F!%311Ԧ7;V#?i1p]yl igTty(c)y2U0@9yM^ cl osj0fBi1rԪ@k!a@ij0n].y A@2 HUsRe@el vb d@0l>0>Uhh7A3gBbJ@32&2&QA(U_ _T3^ o%oxeU5^`o_oU5_ oo.oi3 eB!3)m!#1'/28pȱ;!$8Q 52xbszgO#u%w#vj#lei:M{!/!3wxAJ dQ`$U8!e fQ:(F|@$v9K?F$g"z]#b?M-,T{{/ Ck\tq ĂHD  # =hj0>T h]]9 # AUFD"H$@&@FxitI%"p: [kՆJsnFFF? ?Abm$zOOSsu` FilA ConA orzOk` >/2O%(!(&\#&` STad*G wE/"/+ReU= 3#J8'32H23"&3E#0Ut2??AG}ΐ` L? n /Bh,:A311 7;3?F!pyM igTta(c)a2`09aM? cM oKsG f3BF!r'@L!a3@iG n.a AA" +HsBeS@eM v dHs@0l>]UhzA#!J`366KaDX' (32oRM(@T/_O_KQU3^__gUEU&V_A_!oUk!"_> QrA *bcCi%AbE9 Mz7's4;! $Da fv5T x3@?1HD  # =hj8>T! ]]9 #,AUFD"H$@Fx@&@@P6 -u `u `bu  @e" - A-0t#` b ̮?p [iuu`l?SuvAJ`rJ &$-'("7/ I \/n$A(tzip Q?$"'(/$񓾍U2q,0?+?=6?\.?ljh?F 7`BackgroundCj0l0rz-.~5e%3Q1 3?a t##FGB%3!#rٿnFFFf6AۚsA,? ("bCzy-lB-3&` STa0ow?(!` Fi0lrH2F1` OE=%:?Q1&gp!K7h<:AJ%3117;XQ@?1py0i0htk(0)k20P9kM@c2s0fR1rP1aPi0n.k AB Xs=be*`e0v0d1`0l>#(!Uh #3 XQ\iJQ*%3'928e&o i1HSeE9 MzW'0URr;!4o ]vqv1` T0xP_QH D:  ##=h4>T]]9 3#UFD"H$@F~@Ff>$@FZV{?@F?P>mu` o?u3 >,2t  Q|a?A@LR e,֍t#`Fi4F?pz [e"ejJU5 MKLT0U$"?AGq}2Mb` LineColf rz!#nFFF#?&?#F,F"L&&` STadf wd/#/i+$V# F^ lh #72n&` =?o)eC(3#82A"36A27\327q@?O FM$? HfKO:F! 3117;uV#?i1pyl igTty(wc)y20@9yM^ cl o%sj0fBi1r@k!ua@ij0n.yW A@2 HsRUe@el vb d@0l>0>Uhh7A3gBJ@X32&2&DQ(U__T3^ o%oxeU5^`o_oU5 _ oo.oi3 e23)pm!4/'2$#ձ;!$8Q 52xbszgO#u%t#xj#lei:M{!/!3wxAJ VdQ`$U8bT^ tl0e @he}}k\tq ĂHD:  ##=h0>Th]]9 3#UF3钿Y"@FFە @FNPQkӭ??Pn>u` ?6u3`b >"6(t  z6?A@BHK=kUYt ["p: [ZՄJU5 CLT0U"?AG}eC` LineColZI rz۷!#nF;FF{#?&?#),)"/&&` STaKdI wG/#/ZL+$9# FA lK r72Q&` ?RR) 3#B2((236727kB1o31P17;9#?L1wpyO igTt (cu)J@2`09J@uMA cO osM0IfHBL1r<@N!aH@iM0n.J@ UA#2 @HsBeh@eO vE d@0 l>]Uhz1A3)!BJ@o39DYX3UD_d_vY3^__gzUf53^_V_ozU3__H\o_j5bPf59 M{o7l/5v!3HraPv4!$ 5v|mai6oozo7]'$t~kvPv v8%TE xH@L?c8eaD@e J@ fQo:3@e3 9)']z,Z#bK~-\sTAcL@ 0P@UGDF P# h>,/T6D # CUFD"H$@]F@&u@P} ~ k ,D@Tu` ?LR,ru#|QΉtSzttRt 'A@tT$/ (L/+-3xOb ((A"i/.d#@3)5~??".s`Make B0c0ground#b314 ;1`VIS_PRXYx0CHM!#6008!"`?C0py0i0ht0(0)02@090M@cJ2s0f$BAr@QAa$@i0nx0 0All@ HspBeD@e0v0dx2"eYk} !HA%DK)5D,K3DKDTGnDAD_'7BTAy1y1 "(S11}51<>@XBr1I!&` H@d.2&F0o$@Az0#` SPowRo +<"v8d7e7 J@cbbg bg__0R11"Sa%1+bb$y1r,QHEq)5Se@~|u3{,|uVz?rxK2cxfq<`}? rjb #! bIgaj?@TT,3qEGxsD& 'M1sYq(q{r=SeS,fa0 D0NZbAn$@a@Q"AC@M7LqtKo]e_vH>ל @(T6D UFY,b'@F#H$ @FxT ]]9 TaAUFY,b@F#H$@&@WFF"W@P6 u`bt A@u)΍tStS<tTJ(S3d i)Ob  ADREJ#U0U8"?@$ t0 ?$( ?U kaX:`B c groundC l rzAb#z.)s$%B//#b w/bn3H? K1#5 CL.`M kek2?dQ5.1#A D5 v$`?!py;0]i90htk(50])k20S@9kUM?@c;2s=0fEBR!r9@!aE@i=0wn.k A lk>GsBee@e;0v0d@B1#lhA@=^M G5C G&dRMl>0>UhhE p.A!1 $R0SPJf l#b(.hD Ob"YbM_(F!o+g?7o3coe5noogoeV%efo+o oeE2 o;rAsCghe[+TQQM- &EtJsTl@S6V%B,QE=BAA!D)B0#` D=0N=0t*E2nE@a?@nE,15eq3FB(OqiAbV%9 M!4.'7$ň&!$Q2 $H>%1x (z)ub"`\NWFN#ô jPeB ״ ]Pa, Q@+Y #BXV+k!UFDfP h>,/T6D ؉UFY,b'@F#H$ @Fx%B2 6gdaO{'TT'T= S5Ҧ13C#C D)@N)@tC)@nsPaiyPr$^ Ph(:LV?DOowpx`,p|rag ont] hep ad ful--> siz "]bckru"im.bY,b#H$@@5pm;μ@@3F" HD  ?hZ8>T H]]9 UUFY,b@F#H$W@' P6 lA u`l?U  u(tS&A@ctTl$bsTJh=M/ m20U?@LebTqBG H#$\( ?A3`BackgrounWdC l rzuu!$)(=~ H" #{#9"Mj%7 M`M kek/dpQu` EvNO0nt!p'!~u eJع1!';1#Vis_o RXY. hm!#6050 v`?!py ui hx0 ( u)k2009kUM0c"s fBR!r0!ax0i Un0 kA lk*GsdBe0e s1Qd0 &+?"A %l>0>Uhh5@ 7A!!!JnGV lfX 2 3VM@ !Q_&1q_mQU3^__5gU%UHVoc_CoЧU"5G_ #rRA !"cCgThe'+RT!!M-Qt^t JcTl!nqz{,@{3#T811 C!## D N t"w1a0nEH>Or S-"GqS@w`F8)V"#3 KXTB ]^ax߮s ]@+a Y}BزVIPUFD  h ^TYYUFD"H$'@F @FxT]]9 #U/P%g(|"./t  {Pk?e"p _o*'6#4!Uh#3 A AYAJ*=3F"+2BU_ ASHD: # ;h4>T]]9 #U#AUF@F\.?Fl6 @@P Vt`  ?"u u u " '@l0+bA80&t# bp [5Ru7?u_eAYJUFL8~i/6a房D!@ '!"*(E,{Pko"] U?N@*(~k *Q/Bo2q0??6??S??F 7`BackgroundCq0lq0rz~8b%I=(1g 3?"&Ph<:AZ20U0B3 Lb+ 7117;Y`]?z1pyo0im0Wht(i0)W20@9M@co2sq0fBz1rԡ@1a@iq0n]. A}0lGsBe@eo0v" Pd@0l>#$>Uh,#3 A AUE 2teA}o #J*A @"BU_1SHD:  # ;ih(>TA9 35AUFi4F? <Ѕ2P $>  u` ^?3 u4#"t  q?A@g#tYsn$"Jh?hB="5 +#?A&\ ?~!~!5 `?CopyrigTt(c)20 9M c os f"!r !a i n. Al (s"e e v0d l>#UhEI !#J?Xl]b4X6WBHD:  # ;h0>Th]]9 #AUFi4F?k?@P -u `u bu  @",% AGh[ >u`] ?u#J" ѿ$ #p/$.?/ ")Z/l)t )baht Da&(/Ԯ$BU27q 0??16?\._?b\?F 7`Backgrou_ndC0l0rz%b5=t3E1 3&? ?9!J31h17;h`?1py0i0hUt (0) 2`W09 M{@c2%s0fB1ru@1ua@i0n. A0l zGsBe@e0v@d@0l># !Uh#3 A jAYbJ*3B`Uy_1HD:  # ;h0>Th]]9 #AUFi4F;? ?@P'-DT!>-u `u bu  @"E% AG[mu` 7?u#e@)J"9$' "/-%Y'%-(c/- t aht aR"/$&QbaBU2q 0??16?\.?Eb\?F 7`BackgroundC0l0rz%b_5=3E1 3? ?9!3117;h`?1py0i0ht (0) 2`09 M{@cJ2s0fB1ru@1a@i0n.. A0l zGUsBe@e0v@d@0l># !Uh#3 pA jAYŊJ*3&"2B`Uy_1SUGD  # h#$T #3 BUFL8?Fc?@Fi4F? uP} VV    $ u` ?ULV){$$ASu#L"p [A@)HtI  {Pk5?0t bF,/|񁠅dyBM#!!Z* "`?CopyrigPt (c) 20 9 M c os f"!r !a i n. Al (s2e e v0d ;"eYk}$Hp1D%|4;E|4$7UGD  # h#$T #3 BUFibAA%@Fc?@Fi4F? P}  u` ?u# VrVh -(^<c=PLVU((<<PP"p [A@Kt  6TA9 35AUFi4F? <Ѕ2P u` ?Mu# t  q?A7@t+"=&9Enm$ >  , Jh?B="5 +#?A&\ ?~!~!5 ~`?CopyrigTtl(c)l20 9lM c os f"!r !a i n.l Al (s"e e v0d. l>UhE`I !#J?l]2b6BHD:  # ;h,>T]]9 #AUFi4Fu?k?@P -vu `u bu  @"E! ACWmu` 7?~u#m>)J" ѻt b[]dt ]&( '/$$y'!t"/%'-/ BU2q0??-6?\.?^X?F 7`BackgroundCj0l0rz!?b!I=3A1g 3?3117;vd`?1py0]i0ht/ (0])/ 20\@9/ UMH@c2s0fNBR1rB@1aN@i0wn./ A0l/ GGsBen@e0v@d@0l8>#t!Uh #3 A 7AI+J*3[2B-UF_1\SHD:  # ;h,>T]]9 #AUFi4F? ?@P'-DT!ݿ>-u `u b_u  @"! ACWu` ?~u#->J"9Bt ]d!t ]"/ $-&b[]$y't"/%'-/ BU2q0??-6?\.?^X?F 7`BackgroundC0l0rz!b!=t3A1 3&?J31h17;d`?1py0i0ht (0) 2U0\@9 MH@c2%s0fNB1rB@1uaN@i0n. A0l GGsBen@e0v@d@0l>#t!Uh #3 A 7AI#J*X3[2B-UF_1\SHD: # ;h4>TB]]#AAUFD"H$@Fc?@F%@FCHy5?P6 Amu` ?A@ u#`%t  ŏ1w-#LRtA`%"p [S$J=#UG ?| ?釉\2q- ?,/>&?\.+?&i/: -4 # #20U" @ #Mb Bh,:*A!!';`?CopyrigTty(c)y2009yM0c.0os0f21r01a0i0n.y Al0 8s2e0e0v0d0 l*>,>UhE=%y1# 1J! rBA?(wN9OOEE3wEOKOY% wESO#R] JiAle%(9 MX&7WW'$ž.! $0 V #x"j__$f!32bGgHf_1 `5kyoobq %?1HD: ##=h0>Th]]9 3#UF( x"@FFxQ}H( @Fy0խ??Pn>u` ?A@u3`b>,2t  ?sFLR +5t ""p [NJU5 MLc0U"?AG}(LM"` LineColS rz$3"9&nFFF#?&^?2#9&&` STa%dS wQ/#/V/7(iF" T3#$2236hL1$T1T1\7;C#?1pyY igTt ( )02`090MK cY os0f21r0X!a0i0n.0 AR0l07sIBe@eY vO d=@R0 l>]Uhz133!J!9 RXEO?_)[3?^X_j_W/U95N_ __/U OQ_ou_j|5Ab959 M{B7/f!3bѿavS4!$f1m%A12(ooXB7sw'$)~ vv: sv688e !@dvQB:(F|@kf9A?=3$z,Z#bԁC~-,T{< B2 > HD: ##=h4>T]]9 P3!#UFD"H$@F<@F ҵ #@Fx萢5?@FN@?Pv> u{` ?{u3` #>,2t  ]?A@LR e0t bF@ ?p [e"eJU5 - MLT0U("?A?G}FMb` LineColj rzR#nwFFF#?x&?I#uP&&` STadj wh/#/m+$Z# Fb lDl 72r&J,`h9F!Bt! 3#.2236A2JM3117;Z#?1pyp igTty(c)y20X@9yMb c.p osj fJB1r>@o!aJ@ij n0 yA52 BHsBej@ep vf d2l>UhhC3A3J!bJ@36&6&C\WA(5UF_k_}T3^__ g|Ue55^_X_o|UE5__^o_(bTb tl e j5iee59 M{7/^v!3qryv-4!$;7ai X7w'2(#՝~vyv vwUGDF P# h>,/T6D # UFD"H$@]F@&u@P}  D 3,@+  h?u` ?U~@@T^,u#Q3tStB%: D"tRB$tK/]'A@3bI/[(#:"tTB%/](/{/A*""/.dJ3@S3y5~h?S?J2.`Make B0c0ground: @3ز3AD[;1`VIS_PRXY0CHM!#60+@892`?]C0py0i0Wht0(0)0U2Q@0W@ I@i0J1s0ftBeArh@Aat@i0n0 0Al@ lHsBe@e0v0dT2A2e4^AA @Jy5 3H=QJ5ITT[y5IT,T[3IT@T[ITT7WEIT^[@EXhTWnda:oTw7T(11J2G8hcAAEA>Xr1IC1&` Hn@d2&F0ot@A0#` SPowoTD!+"vde* @"s)r#r%_w)rgA0o^So=@ib@1$1J2ty2-t3I[t ZtE "bG8TQk`aJ5AK#r)r1{1Qg ga$y5hce@6D3,φ+?6ם ߪJAC BFg]b#i ^CB ߮ _U@eko}+j oBBVoP?nbXqeO8KgxiGClO[n:prtX?#w@zo}drUFDfP h,TPYYCUFY,b'@F"H$ @Fx siz "]bckru"im.bY,b"H$@@\L[/f&.{@ͺl@;μ3D UGD" # h <^T$(YY#UFY,b@F"H$+@' ]P} j&(<XP"u` ? ((<<PjP nu#:xtSv߀A@#tT /tb"(9'do `%  `"Mo`Make B c ground!}Q ` Ev nt!pb ~  "%JT%-%'W+3S?$B#i#k0Ud2>3F&=i$46`#4r6r6 :2g"\i#2q0??F`#F\.?y( .O@i#dA+137]D"`?C"y ui h ( ]) 20@9 UM@c"s fBRAr Aa i n. AUl@ HsBe@Ie !d@)01w`V@s_0_RXY@cPWm![06@2.P QW"&/H:L e(^!6D _] i#HZZ`%T[%T@[3TYWgda^oC +T# 1 1`"13Rd3](cvydA C08Ua%k,@odL33TdAfx\3` D N tB!a@n8UA^E `%{MW?r6!3r0v*4*P83r69MTHD # =hj0>T h]]#A UFf0!? c@F"H$@FL( jKb' P6 ]Au` ^?M u#t  ]oEt9SENQ,>ChJ#0O迴Nk?{F 2q ?/&F\.?A8@73`Backgrou_ndCp lp rz@b Q Q#t$]A -]&MB±~<# u;c/u/!O2݇ #Iob< @P2V?h?!ܔ6!P!';3?1py0i0hUtp(0)p2`W09pM@c2%s0f!B1r@!ua!@i0n.p A| lpGsmBeA@e0v@dHa@ l> ,>Uh =E A#!1ŊJ`+6R  8x\ UVMY!9#_x9_5QoU3U+>_3__E2 _U2KrA 2NcCiA1eE(9 MX'E'0UbŔ+!ra .fjvoo{'_ !3rgfc`#fI-pHD # =hj0>T h]]#A UFL( jb@F"H$+@' P6 n>u{` ?5 u#t  ]EQt9SENEEhJ#0O贁Nk?F  ޱ2q ?/&F\.?MA8@73`BackgroundCp lp rz@bI Q:'! #t*$ -&MB±}_'!<# u;c/u/!<݇ #Ib-< @P2V?h?!܂6!!';3?1py0i0htp(0)p2`09pM@c2s0f!B1r@!a!@i0n.p AR| lpGsmBeA@e0v@da@ l> ,>Uh =E A#!1J`X+6  8x" UVMY!!_v9_xoU3^9__1UoUbE2 _U2rA 2NcCiA1eE9 MŔX'E'0Ub+!ra fjvoob{'*?!3rgfc`#fI-pHD # ;hj0>T h]]#A!AUF04qy#@FǮX?Fqo#@uP6 $> lu{` ? ;u4#*t' ‡tfmta{mvm[W"m3J#U0O贁Nk?F 2\2q* ?)/;&F\.?+A,8 iG #f?pH&M<# ";`BackgroundCJ0lJ0rz@ba0a072=?O?a4i6qbWd @B !!';us3?1py0i0ht (}0) @2`0U9 @M@c2s0IfB1r0Y1a@i0n. @ A0l @GsTBe*(@e0vf@dH@ l>,>UhE=%1#!1J`Fr  $&M3 `Q 6RFxAgߊ |? YF_W&d>2 ~`_21^'` _}A_(@M9 __Ue%2OU2rA T 2cxCiA1e%9 MX'g'0EUrŻ+$a f#xB"joo!$m_!3r =w>v12y q-> Ako(#HD # =hj0>T h]]#A UFOvE%@FǮX?FU1@?P6  >u` ?j u#t  ΈEt9SsENCп+EhJ"A|7<F ?ka@X:` Ac entCol* rz} b A bQ#z?)ۀ Ib'Ak!" //.*?/b!#Z/l 񽓱2q0??6F\.?Ey =?:@73`B cwkg0 ou d/b'Q Q3$A -6MB80UB2  !?'117;`?x1py0 ik0h ( )@2`0U9@M@cm2s* IfBx1r@/!a i* n.@ A, l@GsBe*@e0 v d@0Rl> ,>Uh =EAh#1o!J`z*`1  86n8[2 VM(X1_[gU3^9o!oUUE2~_Uo"KrA 0o"c%SiAbE9 M!4E.'D;;Da f jooŕ{7v!3r_w`v`Wvv+H=R_\H>` r =T"?KF(#6 وTB9괿 -^aд o@{+C oQaGUoPxxUH͐IéٗUFDfP h>(/T6D UFY,b'@F#H$ @FxT P]]9 %AUFY,b@F#H$@'R P6 u`bA@utS!WtT6 WWb; Z RADEJ#U0U?t":#a?L& ? kaX:`Bw cq groundCj l rzAb#z)sx%8B//#b ܭ/b23/!5 /Lf1w`Mw kekz/d!/R:}y` Ev0'nt1p~O2%145 8`?!py i h0 ( )k20>@9kM*@cJ"s0f0B!r0!a0i0n..k A lk)Gs|BeP@e 1dp@B+lhA@=M Ga53+ 7L&AORMl> 0>UhhE A!d1 s$R S PJ8VlbhXD !:bDbM#( !0og?"ocZeX5jnoogZe%jeVoooZeE12_;rA*sCgPhe[+!TQQM- IEJCTf%qbt!%s3,@=M! E=s11 DB` D0NR0t 21a*@nE!X5Xeq3FB(qiA b%9 M!d4?#ԁ'ŔL&!g$oQ5 H> r (z)ub"`\NWFX Q#Ǵ eB ( `Pa_Tд o@+ oB+o4UFD  h$^T YYBUFL&d2?F~?Fx<F BP(?| ~P?| w VBW< 2h5h? Pm?2?2P'2ȅHBU? ? D G 0eB`-City,n_ tworko ly cak iy no si"eo pR a !iq sk a !%= \%E^  G n?~#0?688?U[fcffgf 3fwf c3gwfw` f{~6a wxxxwwp` px wpu 0226yR2gy?LDrag thuesapWonodwipe.b~??Q뿤i??"&d2}L4oҿ޿UGDF P# h @T(PYY*# U@L&d2?@~?FUP} . !P6 :T 7h7|777 ,7%A'7.u`_?U"6@RThh||"u#) ^2<7:U? ?4b0{[`:S1#26X3#"2^QHW93 2 1/L1$-"@ 2  2"3'>>ÿ@ǵqp@eE?oDeF0|r&BCkBu@`i-1Bu @  26CA (B" 2#& "0)9u `u`"[ b1 $𿻐u`񾄑W3J#N5)W3Qq1q71`Vis_PRXYcPm!#5n `13RB`?CopWyrPgPtu(`)20?`U9MPc)`oP'of1b"ar%`^aUa1`i#`n WAly` )hs}bUePe)`v`dRq1N3 dZ4V1B=Tr6N3@t|6|6 x2N4#W30U[r|3]W,7^q_@VcTg dagRsqu2+ D3hhR)^RHqqE2K8CT $0aN2{5\1pac2!6u%\ GŜ1S1J[2@@R4{35s_N4]&F3s͵NSYFwUUaU,@Qr#`er51pN2K8ڀqiviv_<׀,=pޟ &K8AWBCgE/s"ITh]]9 M IAUF~?@s?F,jג?FRW_7r6?Pps-8Rw?>$Y]A  JuM` ? ;  )u*Jt' eta{+23.JvюV?lv_ ?vx]PqE>JU43f;?4.&P?Apb3bbfz@Ave bh(bv)t&*J#!!5 `?CopyrigTt~(c)~2`09~M c o%s f"!r 1ua i n.~_ Al/0 (Us32e0e vE0 d'0!3]$$#-## '.&%d 2zoGz.#@ M#J6h/z/ BK' B*ib~59 6X7oG'K0UB.&!I$Ia oFJl6.}UhU=!1 A(# ~VIP & `!& T` V_RUGD  3 h0TdYYB qUF9r?@H$$?Fvn?@@u?P} Lk f 0u` ?00DuNrtK  jtK޹ѹMWZU;!;!`?CopyrigPt(c)2\09Mj c.h osb fp"a!rd !ap ib n. Al h(s"e ejh v d #"P4#S8$,#B-5# H" G'I?O=5#H&,#Z45656 12j,$#5#0U2%53+&j  ^K5 6X77'456!.4] 6Je$B^ 6D 0HA,%DK%D0GHD: # h 4>T  9 P#CUF0C3?@&@ P?FμW3P6 n>u{` ?5u#t  (]It=W!IVR1_IlI#>y;?? ?bA@"b])b)&2\"@ #[&/#'*+:'";*t\U$q ?/6 46?> Jh1h15 g`?CoprigTt (c)02U0090M0c0os0f21rԑ01a0i0nU.0 0l0 8Us2e0e0v0 d0a1(e4B-t3 t70ieM9 kF:X=GA_'2r"Z!0 F3j|OO{=GyV!%3RGFNDGN_:w5k&_8_Rq @ElAUhhEG1#@A-!T6j& f$<f bfI(:x<'0i`c q$q3a(z=iKquM$ufE`|aOyX+cqE2o;#rA $2H bWHD: # h 4>T  9 P#CUF?@&@ _P?FP6 v >u` ?)u#t  P]It=W!IRg]Hl#>y? ~?bA@"b)b)b$񅴍2\"@#[&*<%&$*Q+:'";*\U$q ?/6 b46?> UJh1h15 g`?CoprigTt (c)020090M0c0oKs0f21r01a0i0n.0 0l0 8s2e0e0v0d0a1e4B-t3 t70@ieM9 kFX=G'2r"!u0 F3j|OO{X=GyV!3RGFNDGN_:w5!k&_8_q @ElAUhhEGІ1#@A!Z@2r80; <qL#4 bvI(a9i@`bcbu3:+}E`lQqi?X+cqM*ufoUyQquE2oe#rA $2H bWHD H# h0>Th]]9 M#JUFvn?@&@ P??@*~w?P6  >JuM` ? u#Jt =t9S!WJNbFh.>y*<? ?AJA@+b)b$J2N7Nk@MJQ&*2%$*+0'1*T!!5 c`?CoprigTtV(c)V2`09VM0c0os0f 21r0M1a 0i0n.V Alh0 8sl2e@0e0v~0d`0!E]$-# '0AbE9 6X7G'0URWB!a F"JlJ]Uhz 1#1-Z61 K 8 0R]VJ:9 6S gPQETT]]B9 #CUF^?@z?FO&_~uP6  >u`3?ju#t  @WIt=WUr mIR~[WPHl#>? ?Lb "z@A gb(N(+b*$2g"?088r #f&#fb8!r.DF&""+E'""F* \ ?X64?+96N?UJ~1~15 g`?CopyrigTt (c)020090M0c.0os0f21r01a0i0n.0 0l0 8s2e0e0v@d0w1P{4B-,3 70ieM9 FXSGG'2WUA!0 F%jOO{SGV!%3R W VdDWN_:5k<_N_Rq VEl QUhhE+G1#VA-"!T6u&F f$<f bvT(:+/U?k`c 2g.q%:uoc xWJ#eaq*uM:uf-l6R|aeyj xx-E2o;""rA !$N*H bWHD:  %$h4>T]]B9 #CUFD%?@\f#??@k߻?P6  >u` ?ju#t  I_IIt=WPaIRIJl*w#>G?? ?b߀A@eb=!z #"b $"bI$2g"?088r b#f&*8!r%/F&"+E'"%F*\ ` ?64?6N?U J~1~15 g`?CopyrigTt (c)020090M0c0os0f21r01a0i0n.0 0l0 8s2e0ej0v@d0@w1{4B-3 70ieM9 FXSGG_'2WUAZ!0 Fw%j(OO{SGV!3R W V@dDWN_:5kH<_N_q VEql QUhhE+G1#VA!Z@+*S?F <`?"B## bv@T(a9i`bcb0u%:A}?l6lgqiQaF 4qMuЍ=7{cc )A #ougq0u-E2oe#rA 0#VN*H bWHD:  %!$h4>T]]B9 #CUFW-?@=?Fh?@uV?P6 v>u` ?)u#t  Ht=WB/aקIR_noIl3ur#>!;??| ?Lb "zo@A gwb(b*)I(&2g"?@#f&#f #-(+(+E'""F*\ ?64?6N? *J~1~15 g`?CopyrigTt (c)02U0090M0c0os0f21rԧ01a0i0nU.0 0l0 8Us2e0e0v@ d0w1({4B-3 70ieM9 FXSGG'2WUA30 F%jOOŕ{SGV!3R W VdDWN_:5k<_N_)q VEl QUhhE+G1#VA"!Z@%g 1?( <Kg" bSvT(:?31J9c 0_M#db0u%@u~~mlfl?"0uM@ur\8ˊc`[y뵸m}q0u-E2oe""rA 3!$Tu bWUGD  3 h0TdYYB qUF 0KR?@xՈ ?F?@_@w?P} Lf  0u` ?00D)uNtK  Uɹt0zkn̹ k*ѹWU;!;!`?CopyrigPt (c)r 2\09r Mj ch o%sb fp"a!rd !uap ib n.r _ Al h(Us"e eh v d #"4#S8$J,#B-5#6G# G'?O)=5#H&,#@Z45656 12,$u#5#0UU53@+&j ^K5 6X77'R456!.4]R 6Je$^ 6D  5#HA,%DK%D0GHD: # Th4> T]]9 #CUF/KR?@wn?F#vP.6 Au`= ? u#t  o,It=W:CIRGm Mw+Il#i>y9 ?? ? bA@"b)b%)&񭃵2\"@# [&/#'*+:'J";*\U$7q ?/6P 46?> J h1h15 g`?Co}prigTt (c])020090uM0c0os0If21r01a0]i0n.0 0Ul0 8s2e0e0v0d0a1e4bB-t3 t70ieM9 kFX=GA'2r"!0 F3jP|OO{=G,yV!3RGFNDGN_:w5k&_8_q @ElAUhhEG1h#@A Tj&1 $<fib bfʼI(:0 0i`c 498˱q3a(z=iKquM$uft:ۍ|HaOybxE2ot;#rA $V2H bWHD: # Th4> T]]9 #CUF?@wݯn?tP6 >u` ?u#t  ]yY'It=W菗:CIRHl#>Hc? ?Lbbcbz@A } b(b*) #a 2g"@#f&*G%1$*+E'""F*t\`$q0??6 *4A?> Js1s15 g`?CopyrigTt (c)0W20090M0]c0os0f2R1r01a0i0Wn.0 )0l0U 8s2e0e05v@d0l1p4B-X3 70ieM9 vFXHG/'2r 2!0 F 3jOOŕ{HGV!3RGVYDGN_:5k1_C_)q KElQUhhE G1#KA Z@~#0f < 1#4 b vT(a9i@`bcb%u3:6}t:l\qi)qM5ufo`y\q%u"E2oe""rA Y!$=H bWHD H# h0>Th]]9 M#JUFʺ?@؏pǶ??@/Qw?P6 >JuM` ? u#Jt =t9S6WBBJNbFhՁ$>:3f<? ?A A@ab3bfbz #b$b$J 2zGz@MJ\&!(<&'$*+;'<*B!!5 c`?CopyrigT}tV(c)V]2`09VM%0]c#0os0f+2R1r0X1a+0i0n.V AUls0 #8sw2eK0e#0v0dk0@!E]$-3 7 %0bE(9 6X7G_'0UbBJ!a F-JlJ]Uhz1#1 劯Z@`Y<  8KAF4 ;RhVJ:Ç9 AS ]yY'PTGRU3U2_DQL_UEUu^jYH ]U2"_UrA Y$c( bUGD  3 h0TdYYB UFZV?@[V?FRHG?@ _BPhw?P} `f  f0 Du` ?00DDX)ubt_  .HLt %+,!iUc!c!`?CopyrigPt (c) 2\09 M c os f"!r !a i n. Al (s"e e v d AK"\#S`$T#B-]#o# &o'?c=]#p&T#4]6]6 Y2T$#]#05U2]3@qS& ^Ps5 6X87.G'4]6%!V4] F(J)e(^(I ]#HAT%D[5D0[3DDWHD: % $h4>T]]B9 #CUFO/?@/`p?FqRA?@?P6 n>u`3?u#t  noIt=WoIR#U HlAw#>2?088r@2#Aeb8 !rztb(#+#+#&y<u"?& ?"#+b%)+@'\?64?+6C? *Js1s15 g`?Coph rigTt (wc)020090M0c0o%s0f21r01ua0i0n.0U 0l0 8s2Ue0e0v@d0l1p4B-j(3 7&0ieM9 vFXj'G'2WUAŇ&!$0 F%CjOO{j'/V!3RGVYDGN_:B5k1_C_q m&lQUhhE GБ1#m!!bTc# $<f b)v^(::'Ԝ?k`c S#q%/u3;c .9~#eVquM/ufflG|aZy{ґ#q"E2o;#rA $CH bWHD: %$h4>T]]B9 #CUF3;?@/`p??@w?P6 >u` ?u#t  oIt=WoWIRIlAw#>2?088r@<#Aeb{8 !rzb(J#+#+#& yL?q#?& ?w"b%)#+b4/U%\?64?6C? *Js1s15 g`?Coph rigTt (wc)020090M0c0o%s0f21r01ua0i0n.0U 0l0 8s2Ue0e0v@d0l1p4B-3 7&0ieM9 vFXHGG'2WUAŇ&!$0 F%CjOO{HG/V!3RGVYDGN_:B5k1_C_q KElQUhhE G1#KA!効Z@x<?f# <h-nѶ# b v^(a9i`bcb%u%:6}fll\qi{ґ)qM5uf o`y\q%u"E2oe:#rA $=+H bWUGD  3 h0TdYYB qUF%@?@5,k?F ?@Q_rw?P} Lbd  0u` ^?U00_uNtK  :Z[tZJ[jIӹ._jѹԒWU;!;!`?CopyrigPt (c)r 2\09r Mj ch o%sb fp"a!rd !uap ib n.r _ Al h(Us"e eh v d #"4#S8$J,#B-5#6G# G'?O)=5#H&,#@Z45656 12,$#5#0U253@+&j ^K5 6rX77'4)56!.4] 6Je$^ P6D  5#HA,%DdK%D0GHD: # Th4> T]]9 #CUFy?@vn?F#?@u?P6 > u` ^?Mu#t  %7YIt=WaR9IR_L=*Hlw#>y!;?? ?bA@"b)Kb)&Z2\"y@#A[&/#'*+:'";*\nU$q ?/6 X46?> Jh1h15 g`?CoprigTt (c)020090M0c0os0f21r01a0i0n.0 0l0 8s2e0e0v0d0a1e4ŤB-t3 t70iePM9 kFX=GA'K2r"!0 FB3j|OO{=GyV!3RGFNDGN_:Bw5k&_8_q @ElAUhhEG1#@A!ŊTj& $L<f bfI(j! ?.۾0c _*#c ˱q3aklquM:mx<ۓ|aUyM8'hxE2o;:#rA $8+H bWHD: # Th4> T]]9 #CUF_xa*?@vn??@?]P6 hvA u{` ?5u#t  ߔ@gIt=WaR9UIRIl%w#>y%? ?bA@"b);b)b$i2\"@#[&*<%&$*+:'";*t\U$q ?/6 46?> Jh1h15 g`?CoprigTt (c)02U0090M0c0os0f21rԑ01a0i0nU.0 0l0 8Us2e0e0v0 d0a1(e4B-t3 t70ieM9 kFX=G'K2r"!0 FB3j|OO{=GyV!3RGFNDGN_:Bw5k&_8_q @ElAUhhEG1#@A!効Z@ xQ0Y; <3H#4Zb bvI(a9i@`bcbu3:+}x<lQqiM8qMuuUo/] d =xd #YuQquE2oe:#rA $8+H bWHD H# h0>Th]]9 M#JUFs`m?@|nH˲?Fdvn?@MQY?P6 v> uM{` ?5 u#Jt  o\SDt9S2X2ENFYEhI5KOV4>y<? ?AJbA@" +b )b $J2N贁NkS@MJ W& *8%"$ *+6'J"7*!!5 c`?CoprigTt (c)(02`09(0M 0c0os0f&21r0S1a&0i0n.(0 Aln0 8sr2eF0e0v0df0!E]$b-# 'A 0bPE9 6X7G'0U]BŔ!a $F(JlJ]Uhz@1#1!Z@?~ 7  >8ħ]i 6RcVJ:_3?f& .;T 1_QKTBR}U3U-_?QD_VT}UEU7&eD YZM8ʺ]a}U2_U#rA $c( bUGD  3 h0TdYYB qUFx<^?@ȵ?Fʺ?@Ot?Pn} L  0u` W?U050DuNtK Ot@ڹ *"!-U;!;!`?Copy_rigPt_(c)2\W09Mj ch osb fp"a!rd !ap ib n}. Al U h(s"e eh 5v d #"4#(S8$,#B-5#G# G'?O=5#H&,#Z45656 12,$#5#0U2&53@+&j  %cK5 6X77'R456!.4]R 6Je$^ 6D  5#HA,%DK%D0GHD: # Th4> T]]9 #CUF/KR?@}JR4?F#vnP6 >u` ?ju# t  ,It=WXBIRGm_ MwIlb>y;?? ?bA@"b.)b)&i2\"@#[&/#'*Q+:'";*\U$q ?/6 b46?> UJh1h15 g`?CoprigTt (c)020090M0c0oKs0f21r01a0i0n.0 0l0 8s2e0e0v0d0a1e4B-t3 t70@ieM9 kFX=GA/'2r"!0 F 3j|OOŕ{=GyV!3RGFNDGN_:w5k&_8_)q @ElAUhhEG1#@A!Tj& $3<fM bfI(:0i`c l$Zaq3a(z=iKquM$ufh6fҍ|aOy"?0x,bxE2o;#rA Y$2H bWHD: # Th4> T]]9 #CUF?@2L&??@@?]P6 lnAu{` ?5u# t  ]yY'It=Wȅ,d!K IRHl%mw#>y%? ?bA@"b);b)b$i2\"@#[&*<%&$*+:'";*t\U$q ?/6 46?> Jh1h15 g`?CoprigTt (c)02U0090M0c0os0f21rԑ01a0i0nU.0 0l0 8Us2e0e0v0 d0a1(e4B-t3 t70ieM9 kFX=G'K2r"!0 FB3j|OO{=GyV!3RGFNDGN_:Bw5k&_8_q @ElAUhhEG1#@A!効Z@v0Y; <B{ %#4k bvI(a9i@`bcbu3:+}76flQqi*h/qM*ufoUyQquE2oe#rA $2H bWHD H# h0>Th]]9 M#JUFʺ?@BP(T??@W0 w?P6 >JuM` ? u8# t =t9S?JNbFh?s>yr<?X ?AJ~A@+b)b$J2N贁NkS@MJ Q&*2%$*+0'J1*!!5 c`?CoprigTtV(c)V2`09VM0c.0os0f 21r0M1a 0i0n.V Alh0 8sl2e@0ej0v~0d`0!E]$-X# '0bE9 6X7G/'0UWB%!a F"J lJ]Uhz 1#1Z@~1  8[_B! 0R]VJ:9 6S ]yY'ETTh]]9 MJUF3;?@z^??@x<w?P6 >JuM` ? uJt  oEt9S֡WlENEhII>yI$<?X ?AJbA@"b )b$ )&J2zGz@M JW& /'*+6'"7*BB!!5 c`?CoprigTt (c)(0]2`09(0M 0]c0os0f&2R1r0S1a&0i0n.(0 AUln0 8sr2eF0e0v0df0@!Y$-# '  0b5(9 6X7G_'0U]BJ!a FR(JlJ,>Uh11!T0 9 F 8  2RYVJ4]1_CY?QsU3:@|>\OQ HS m`wQ52_;#rA 3$@cHD H# h0>Th]]9 M#JUFfl?@z^?F3;?@x<?P6 n>JuM{` ?5 u#Jt  KEt9S֡lENoחEhᐟI>y$<? ?AJbA@"b] )b$ )&J2z_Gz@ MJW& /'*+6'"7*T!!5 c`?CoprigTt (c)(02`09(0M 0c0oKs0f&21r0S1a&0i0n.(0 Aln0 8sr2eF0e0v0df0!E(]$-# ' 0bE9 6X7G'K0U]B!Ia F(JlJ*,>Uh1#1!Tf& 38>VM 2RYVJ:|> 3Y@P HS mۻPwQ34]1_CYQsUE2_;#rA $@c( bHD # h4>T]]9 P#CqAUFe!@b?@M?FQEV?@^?P6 L6 > #(m$uy`?Agg2Du#VtS  n4BH=t; w9_W*Ut3۶_>UF$<?Z& ?bA@b!z #"b$$"b$ 2"Z$@#&(&$:;'"*\$qy0?x?64#( Z4?V> J=#115 `?CopyrigTt (c)"@20.@9"@M@c@os@f BAr@MAa @i@n."@ 0lh@ HslBe@@e@v~@d`@+"1 44#ŤB-=#3 7Z&4%@iePM9 FXG>'K2r2Z&!u$0 VB3jO_{GZ/V!3bwWxVDnW^^Ko:B5k__q JE4"l(>Uhe Ah#A!1(Z=#F@ g0aR LoWGa"@&b$4"(qC^]?Fs(?@|0 ֿ%L|\|HD_LR4Уӟ^L-ٙ"X_a y4 pW? +?Ra@ts<ЯTh]]9 M!AUF<%?@[?FdHǩ?@W_Mw?P6 $[!>  JuM` ? ;  )u*Jt'  P&'mta{T 6Dmv޿mxb$y3>yJU]<?& ?AJbA@,"b4)b4)Sb4$J2zGzR$@MJ &5*`%J$*+^',"_*BB115 `?Cop_rigTt_(c)2`W09MH0cF0os@0fN2?1rB0{1aN0i@0n}. Al0U F8s2en0eF05v0d01PY4-,%3 %7&H0b59 FX7T]]9 MAUFvIڳ?@J?F8?@P} TP>">JuM` W?uJu u bKu `u Y-JWR]nio@&>٣~ C,ᗢeuܥ]h[o?C$ '"/'%S'-+]/' Jt  $Ӗb#"t="! Ư%'_deѬ&'YY-t#JJU2N贁Nk?<MJ6uAdbG8WbU9S;S;S6B3B115 d`?CopyrigTt (c])020090uM0c0os0If21r0Aa0i0n.0 WAl,@ 8s0BUe@e0vB@d$@14Ry36D3IP!BM2b;R?s?fzU3 7F02%je^E(9 BVXWbW_'0URšFZ!D0 bVrvZl!UhX3 1iAA! P@B#J^ewkdHD:  H h0>Th]]9 M!AUFٟx7$?@!{?FdHǩ?@W_Mw?P6 $[#>  JuM` ? ;  )u*Jt'  Y@mta{kbmv޿mxb$y3>yJU]<?& ?AJbA@,"b4)b4)Sb4$J2zGzR$@MJ &5*`%J$*+^',"_*BB115 `?Cop_rigTt_(c)2`W09MH0cF0os@0fN2?1rB0{1aN0i@0n}. Al0U F8s2en0eF05v0d01PY4-,%3 %7&H0b59 FX7T]]9 MAUF4?@R ?F8?@P} TP>$>JuM` W?uJu u bKu `u Y-JWR]ni_@"J&>kn OS?F4H⋘?"  'B"/'%S'-+]/' Jt  |3#"t="Y(ئ%'~#&' bS#JJU2N贁Nk?<MJ6AdbG8bU9JS;S;S6B3115 d`?CopyrigTt (c)020090M0c0oKs0f21r0Aa0i0n.0 Al,@ 8s0Be@e0vB@d$@14y36D3IP* M2b;R?ds?zU3 7F02%je^E9 BVXWbW'0URšF!D%0 bVvZlUhX3 h1iAA0P@#J^ewkdUHTuD" # A#A hz ,T! YJ MUFv{h?@K]`?FU UP N% # H, @ T  cO,OmJuM` /? O"6J^ Ocu#X"bs!Fۅb^uJt  7͉"tY!"^OJU2zGz?)@M#J&A@i"wb8b#9!;!;!62 %!4J#2#"6 p2.2b#9;/72l<2l?TN#=A=Ai"`?Copyrig`t (c)t@20@9t@Ml@cj@osd@frBcArf@Aar@id@n.t@ Al@ jHsBe@ej@v@d@"M#IC IG6"xh"t"Eiyn %1XGcW'0URũ6EK0 cVwZlJUdnUtLQQS) T =1T[A#A1(!`Iοͺ?@|0}# f ǿձm# Կ׌$8n:Moo` =[Weh$+HeQj?@)v&?FQ-?@iH?PS~>  F߈s|.Zߥ"lE _ e?h=X##Rȵ: Izu:t ɩ X"41(RCBS"r"5w5ae2oG[pF&ߞ֡h|}VhYr~!{/te,gEe DT?nxM殏Ea$6ЇK;@R d9:x,IyyߧUC6 -R&6t/}GyzνTB4gwqLAq@dR!5j;?@iwE?FmnX Űu{jlk8 fl?y>ĕl?_'t?quEt&XQV  :T;haai~?@^vνl쇢&UebhMHz?@^Z|b>jca>lTס/hi-Ya\1~|oW.f? 45=uTIy^. ?@}drPFuN?@zH?Q;|Uuelfay1| @pUmɯR^iqeoo oT##$g}i3 uoouBwoҕF})EL h0m&}~+̖ݿBmȇVQ9adv_UF ]"6l|]ΖmC\+~}i3 CQ!]!dΕ+ (al?@֤ |b$G4).&ܕ\w7f_Utpx'Wẏ,k/~? 5kՕHD: # h4>T]]9 M qAUF~?@%g?Fs7s^?@WҔh?P6 mL'>E  ($JuM` ?Agg2DuVbJtS t~Dzb 013v ';_>-JUC!C!5 `?CopyrigTt(wc)20 9Mr cp o%sj fx"i!rl !uax ij n._ Al p(Us"e ep v d +"<#T@$4#B=#uP!7?56 ?Ab`4z@9Al0b{3j8]b8a0$o3-=#O# O'564%酙t=#2mq @?2@ M3#JF?=EQ6<@B: ?6iie59 6XG2'0UR56!P40R FJlJD4Uh8ZQZQ ]   %a!@A1(A/.F?@":W0LRb O_LP)$J^2_o ꗦ`T8a.eEUZ+mQ?@_Vl}l?9+h5Uu?@w Vlm1lrU|pi5Ui焞?@Gd{y}.xbli\V)+hUrw?@wVleHl~s!2+hUpp%Vl߂'}|U'r,.?@C[>?Fi(w?@[ P?PU>b ԿftVlIֶ6l{)eRX0~eGdQR@_:M I~> zg@&? ɿI#?!$?Ra@B r552aQ:iт0;Kq𾙵QP!µ""]JT豸*e2 Z"s 7tG@-(F7# m#ʴ :+OB I eEJnOgXݻ 5 # ' . Q~ȝ +L葹 pH KH!fz'͞}K?hߔ h K B?(ݵ ` s8 G@HD TJ$, Rе (ӵ )ֵ " ? P/ p%UFDfP h,TPYYCUFY,b'@F"H$ @Fx siz "]bckru"im.bY,b"H$@@5pm;μ@@}_3 D HD  # =hj8>T! ]]9 #UFY,b@F"H$@' P6 eA [u`l?  u#(΍tS&Aw@ctTl$bsTJ20-U?FW]ebqRG # ?sakunB`BZ ckgrou_ndCy ly rzubuAub#z)u=~ " #L#9"#MBh\"C? j0>Uhh5`7A#!Jn00.lPX2 3VM(Q_&1_QU3n o2ogUEe0&oo@_oUb5_KrA -sCgSXhe_'r!TH!!M-t "J'sTl!Mq %F{,@wj1%5bPE9 M{k7l!3k'QPsBp#!J$G=1ƀHD  # =hj0>T h]]?5AUF\,b@FW5?'P6 .L]A]  u` ?Eiu#4t1  tuKˁ=JU^;:Fx& sa@`B`BE ckgroundCd ld rz}J b J AJ b#z)ۀJ bA!"b#"bx##釉2q ?/ 6F\.W?5 8?:@7oW/i/.{!'G!1 03] \NB20UB4&'$$%117;`?s1pyh0if0ht (b0)@2`09@M{@cJh2sj0fBs1ru@s!a@ij0n..@ Ap l@zGUsBe@eh0v@Qd@0l> ^Uh[A"A]@MjA#1!J4/%CrA 0"S C`Q%S1Y9**0=*Qt&2t31:?F@F̬%?PKM! [t?[WY[ZnaU0fJ$U~tr<S! YM# ? 3&M@T@0tr$5!:+oog4! `Q^__1js+TN!p_^Wo[r5V rvM,(b  jbE9 M{7! 3a򪆢P3!5$;ۏ7愕HD:  # ;h0>Th]]#TAqAUFv\.%@F4O"Ƈ@F.r?F"H$ @P6 L6>  #. .u` ?)c,c8u#RtO  ߢ.t|} [J@}@@@FV&  ;`BackgroundC l rz@b ݶ bOA!|"/Ԕ/!K)!?#2!3z29b13{z&b@F3:3BU27qm0?l?~6F+?S ? :@7//!'#Qf31 ("C$ $lSF-2f30UB5DV&$$f3"AP"A*G;#?1py0i0hUt(0)2`W09M@c2%s0fB1r@!ua@i0n. A lGsHRePe0vZPd

 4 >UhzQ9 #AA#ZA!J`f3FI' BHN MozӋ[Hİ] 2b\B qD `2biVl+qېeNI e\0q:I lӱ`gB_@ 81!@N~P@CBfxwr|5Q5Vabe9iEovhwD_CrA) @"dlSaAiW.Fsb?FyOa`?P p ? 'l)݆9t$1d$G|,6J]w : ~gBt*<wxUGD" # h <^T$(YY#IUFY,b@F"H$+@' P} B &( u` ?U U(( Fu#PtSNA@tTLb!d ^% j#"MG`Make Bh cj groundQ` E;vl nt{!pb~- J%9-H%W#??$ZB##0U2#sF =$&#M4"6"6 ""\#2qƢ0??6#F\.?QE?@)#A!'`?C"yz ix h (t )n 20W@9n MC@cz"s| fIB:Ar vAa i| n.n Al@ AHsJBei@ez !d@ 1`VC@s_u0RXY@cPm! 06JW@2@3!&H/<e#ha!#3 H%Y #H Z%|T[?%|TWgda_C +T#!!"H!#8d3 (cQA6@0EHa?%Tk,@doEd 3F3TA`f;nd 3` D| N| tsB!aC@nEG!#^E %b{Gd?"6!3lr'0tv$@b#"6=s_H8> r =T"?KFf7#(9TB ȯ|]^aϥ]@+]La?*:P= AGCF}UFD  h(^TYYBBUF\.ߗ?x<F BP(?hP?X` ]B66]  TTTTTdH?ܸ? Q?B@L&d2?-(,1(.sUW!U&(w/x  0KB` Serv",n two k p"i h"al d v0c@ 1^| (SG|& b?,>?TTwpp pqqppqwqpqwqwq+p pppwwwznw6;5wqpwwwhghw>Drag onut hepe.Rih-c]lckaUdcos UPo etQeAvUwreP 5aabjZֿ~??+ ؿ)j??E(i\.r3r\.CUG DF P# h @T(PYY#E3UF\.?@jZ?F~߻?P} ΍L .B":J :^:rY:f 7 7ָAu` ?Uč"U66JJU^^rr)u#",'tU"J%q4??4bp0{[`: #|2&6#*'Q79" z^L1$" 7'@>>ÿ@q@@5EӅ??D5F0Zr&rBxC2u0`-1Buu&@ CAB@C$9u`u `m"[ b!wu`)3kQQ1Q71`Vis_PRXY%cPm!#5jP4P2B`?CopWyrPgPtf0u(P)f020PU9f0MPcPoP'ofRQrPQUaPiPn% f0WAl` XsbUePePv/`d%SRQ1#d:4%!*@8=3R6#@d\6\6 X2$B#30Ub\3]zg,7^-q>@VpcT<`of0m`sP4a>mmdasIJ'BT1/t"0t.2QQ3QQB3 tj1j1"5hbI6% 72EC?|8LPZX116Q;sa>arExAsF@>>ý__0Ru'@`*Xr>a cc^`Roo H?䣬!e'2$r d bpf`bffb bpf"}Be%Rbk ``.e;Rkbrđf@B #[_z}R=3đđBޅrr++r,$t -tH;:@0a(IafcX(Pq`uK`AP!eqPrqbBq b%F@rBq8%5=3_tD&&|rf"e#F3s͵NSYr'X$G,@R!B衅W_i_.] Q QBHʀ=34I'F}@` %_&͒#J0 Q`wNElWODpKDH0PِRTߐRlIِSqIQiOO|1<3;K'0U?}10~U3Uee8sTeq rqqq13@H36EBM$BM4BeMLBMB3M̅BM-uBG2HD: # h0>Th]]9 MP /AU@jZ?F͂T??FSFظ?P6 .]A]   JuM` W? EKu4bJt1 ytuo[=Jb(A =>JU2zGzt?@dM#߀A@bM;#z1 /#\I(bW)d+)d&J["*(w"?& ?AO$Tf/%B115 `?CopyrigTt(c)2`09MC0c.A0os;0fI2:1r=0v1aI0i;0n. Al0 A8s2ei0ejA0v0d0 1Y4-X 3  7&C0b59 FX7}'0UB)Ź&!$aI 7FKJlJ]Uhz211O! !u6?Fh,\.>*0II=*t >J:@T&ȑ/ [WpY\QEUzU . X[:;QRuSU5UF,®\oZOnlQ 8MP@u?FR >Nk"\T`GbmMlQi_f0%=mUt.m {=4+ ߥP> D"? 5-g?T0tp@bTh]]9 MP qAU@;?F5K?@xEL?6?F6?Pn6 L>M  P$  #uM` ?=`cc.@uRJtO  bm&QȽtA@Lؽl]WI@[>JU2zGzt?+PS@M/#J?&A@bbbPzs q#b](b)+&J[9#,ɺ ("a?& ?AR$ /%B9#V1V15 `?CopyrigTt (c)02`090M0c0os}0f2|1r01a0i}0n.0 Al0 8s2e0e0v0d0'"O1YS40#-9#b3 b7&0%0AbE9 YFX+G'0UB&!4a yFJlJ]Uhzt1h.A!-(:9#t.%-+0JN H߃W%$Jq-&?F mO" >Nk'"\RSWNh[H@_[4I 뇥P> 'CK=o? nO-?N]`@ b<couor5%BUV 6?woZA^ǿXEU@ T6?F __ jv6dDlVߦyZgPRkro}oiBdalN!\z3>6>CRHS X?BnDeTGvN7VHD: # h0>Th]]9 MP JU@jpiH?F[^?@$?F]Fڻ?P6 >JuM` ?j uJt  (a&aEt9S;-FEN=ۯ=Eh?u;>2zGzt?@M|JA@a7b#z}b(b!).+.&J[-m(?& I?A$0/Q%B!!5 c`?CopyrigTt (c)02`090M 0c 0oKs0f21r0@1a0i0n.0 Al[0 8s_2e30e 0vq0dS0!(Y$-# '& 0b59 6uX7_'0UJBŃ&J!$a FJlJ]Uhz!1!劵:F% Y.  8?zY^J4 ]9$]$R9S/RJUE UH$S!\1\? GX5ZU_YQJU __-_?\HD H# Uh4>T#A]]9 M /AU@jZ?FL|+??F@{~?P6 m. >b  JuM` ?+ILKu8J1t5 }ty_7??J]bA>tJU2]U"?=@MJ&A@3(WbA)N+N&JJ[ "#?& ?A9/Z) !!5 `?CopyrigTt(c])20A09uM-0c+0os%0If32$1r'0`1ai%0n. WAl{0 +8s2UeS0e+0v0ds0!E$5 򤲝-? 3  7&6iieE9 6X7 "'0#Ŵ&!$0 TIF]JlJ8>U#9AA `1!:Nk"\T`ObmM\"f4N[7 zex-K=FAa տ[jr D"? WZvD?4xa@RB#Th]]9 M IAU@FK?F\gVb?FY%?F_wᳬ?PJwW?>$[ >  JuM` ? ;  )u*Jt'  ^Zmta{֙3.JvdE @x?"_r?v{4܂ >pJU43f<!.&P~?b3bbfz@bAe&bq$Jd 2zGzt&.#@M#T"I&r(~&i$q)Q+}'i"~*B#81815 `?Co yrigTt (c)o02`09o0Mg0ce0os_0fm2^1r 1am0i_0n.o0 Al0 e8s2e0ee0v0d011Y54#-#D3 D7.&%g0Ab59 ;FX G[G'0URB.&!I$a [FoJl&}>UhE3MV1Ai!(# jV? & `& T` V_RUHPD  # #>h0TpHeeJ EU@u6?F5c{g&?@{r`5B?FF:Iͷ?mj>  M   *  (Y JuM` ? Y ":N`u#xJtu IoI%t"' % 'ii%$'`OkŁJU2"?@Ms#J&A@"b(bU)++&RJp}#>1#19 ?#bbbz| &/5> }#11"`?Co0yrig\t (c)02h090M0c0o%s0f21r0Aua0i0n.0_ Al!@ 8Us%Be0e0v7@ d@k"1Ue4t#B-}#3 746t%0 jU FXyGG'0UR46!O4i FJlJ$jUp5 3 h19 1!q(q4}#&J,t R j# 8bD?@@)?FvP؞?PEkU O|\QnS .܁nts}ped`,of :o ,M ? &d2? bȩ?tP@RBBoogr"55ZQUf_x____bY@ßAc_\jF?4ևi$on&h{c8oJo\opoo T)Y oaoooo o 6$|U7I[mS Uy{䷰7h7}'g"(I%ZvShX"cwӏiďa$xUGD  3 h0TdYYB U@%)?Fxg^?@ ߯H?Fw_7Uw?P}   f  f0 D fX lu` ?00DDXXll)ut  871%t%!?"Ư)1%:'ֿTeΦ1%T'ʤU!!O"`?CopyrigPt (c) 2\09 M c oKs f"!r 1a i n. Al00 (s42e0e vF0d(0"#S$#B-#l# '?R=#ǒ 2$Z##0U'B3@& ^5 DFX7dG'R&Dŭ6!4] dFxJeYk} #HFQ%RT][b5RT0][3RTD][5RTX][RTl]WHD:  H h4>T]]9 MAU@ ߯H?Fԋz)e??F9q?P6  >IM (#<Pd`<#P#dJuM` ? #2FZnubJt E-tA!["XѮJV&bN$p'N]I֤>J)U5 J[-# ?6 ??" A@.2Sb64B#2zGzt3@M#Jc678C6.? ;B72C: #115 k"`?CopyrigTt^ (c])^ 20@@9^ uM,@c*@os$@If2B#Ar&@_Aa2@i$@n.^ WAlz@ *Hs~BUeR@e*@v@dr@"14ʺ#-/ C  G6&iieP59 &XG W'0UiRŴ6!#40 $ V4ZlJ]Uhz@A!1(`C@U&pz?FDUy{?,d@#b`sb`"J-d K-q?F@F?F F0 ?Nk"?GiE*]CpdadC0[dhZe@#eGDQ> @" i$a3n&Y+`~rrood]io;e s"yr1C82K?Fѻ4G{?F-qx>tFjR^{|ZlYZe tmP|{Mޥa 0 1iS? q1+?wa@rc(<;"̏A!rFÀ5H5aE=*[^N?F%P?@"P&@ c?F{MTh]]9 M]AU@}%?F[$,?@+#V?Fd_3֡w?P6 B>M @ $ JuM` ^?3YY.KuHJtE  L<ܩt.'_3zMҩ,wsQ>JU2N贁N[?@M#J+&Aw@b](]i+bk)+)&Jp%#4"?>"<5!?& ?M#bbbz$ Rp&"/%B%#L1L15 `?Co yrigTt (c)02`090M{0cy0oss0f2r1r 1a0is0n.0 Al0 y8s2e0ey0v0d0"E1YI4#b-%#X3 X7A&%{0bP59 OFX!GoG'0UBŔ&!$a oFJlJ(>UhoI j1h$Ac!(`%#@'$F?>D{Q>L!gfQQ R "Jg V?Fo-R?@bHQ??FT!R?P& ߤlH\f6V\P5_E+UDv;*UmʙHQ:? w8s`*%$? .l?Q? ?\?D4_!R oogr5%Q3p_S@hIL?F~Kz?P)Oy޾ F @c"rA b$^sPj+u`dl(ET+QdAf u8<$HD:  H h4>T]]9 MIAU@ ܋?FOa>?@q`5B?F1*ݷ?P6 m8> (JuM` ?#SS2)uBJt?  \Eto֙>_B|KR>JU5 Jp'>͙#<?Z& ?bbbz@b#;A&b$B 2N/N[Z$@MI #"&(&D$ :;'"*#d1d15 `?Co; yrigTt (c)02U0090M0c0os0f21r; 1a0i0n}.0 Al0U 8s2e0e05v0d0"]1a4 #-,/p3 p7Z&'&iieU59 '&X9GG'K0UBZ&!u$K0 FJlJAUhhG1!- (T9C&J> RbT$R}R( "J:]Uy{䷧_8S s_@ o4%BT@.Tw?F:^֝ä >Nk"?YUU!f>)?\(K8ӷjmKl? a\$QU5Ui,SlQ%iu~!8hE_= T?F;uQuo`"rA 4sjQ޽lğ=onHD:  H h4>T]]9 MIAU@ ܋?F<j?@q`5B?F^ݷ?P6 m8>M (JuM` ?#SS2)uBJt?  \Et[Πؙ>_B?&}>J)>JU5 J#2N迴Nk?S@M #JC&AP bu(b)J++&B1[- #L#?&| ?$H*'#O1O15 `?CopyrigTt (c)020090M~0c|0osv0f2u1rx01a0iv0n.0 Al0 |8s2e0e|0v0d0"H1L4 #-/[3 %[7&'&iie@59 '&X|$GrG'0UBi&!40 rFJlJAUhh7m1!{! (Z$CR&FUy{?X> 81cKR RV "J{T@ ![?F=$. >Nk_"?YPSW>OcK8RKl? ף_p= $B:]'i,S\6aYss9$Q@5UV+6o'lQ:ifOdD`i[Dž}soąo82{_U{"rA z$s"HD:  H h4>T]]9 M!AU@ ܋?F@_v?@q`5B?F1*ݷ?P6 m$> JuM` ??Su.Jt+  \Epteap'Eqz>Bq|7>5 Jp>G͙;?2& ?bbbz@bAi&s&Bh 2zGztM?+"@MrX"&v(bu)+s/8*T<1<15 `?Co yrigTt (c)s02009s0Mk0ci0oKsc0fq2b1r 1aq0ic0n.s0 Al0 i8s2e0ei0v0d051P94-H3 H72&iie-59 XG_G'0UB2&M$0 _FsJlJ( >Uh_I `Z1m!afR* zRQd J:`]Uy{w_$S s@ +o4B`T@.Tw?F:^֝ä >Nk?"?zYQYV*)\§(7$d?ӷjm7lb+d ha\HD:  H h8>T ]]9 M!AU@ ܋?F*?@q`5Bǯ?F1?P6 $6>  JuzM`l? C"u2Jt/  \Etti|u~>Bu뒣;>RJU5 Jp>͙#<?6& ?bbbz@b#Am&by$Bl 2zGzt6#I@M\"&Rz(&q$y)+'q"*h,#:3o1o15 `?Co yrigTtk(c)k2009kM0c0os0f21r 1a0i0n.k Al0 8s2e0e0v@d0h1l4e-:?{3 {7I6&&i bP159 &XDGG'0UBX6&Q$0 FRJlJ(>UhI 1lq!a4DC9Nk"?CYQYf.Q;(hğ=;Af/h (\UHLuD" # ;3h( TEJ kU@jZ?F@{W~?P VN- I A `*%#.>>uA` ?%i i4iHu#XpTb>tU b7>t  a>U2zGz]?@9 %#>5&A@g(bu)+&X" !f$>F/#"&#="& "m"bu);r'X"i,X"/N/#11-#`?CopyrwigXt dwc)020090M0c0o%s0f21r01ua0i0n.0_ Al0 8UsBe0e0v@d0"i f &%!X/#]'0UB&Z5%0 VJl>8fUlY(C '(#vAm!#(:/#_Cgwu?F( mO"\>sl?/h%mq.%-+?@lփ?F^Uw>o|m7yQySf|wA=$*<~a3d@9SӀaۏ펃y'e0a#FX_`H> V+s LyC{:,YFhJr#~"IL&OB |oM~d`ro@+xpWoQsG@NfFXHw\HKp`(MScXO3gQj?PUNnVpZu\y_}$⯿<.jAxCtUFD  h(^TYYBBUF\.ߗ?x<F BP(?hP?X` ]!B66U 66 66%66ȅH??f]B@L&d2?-(1(.sUW!U&w/ҍ~  0QB`#Mainfr me, etwo k p rr pP1al d v c %1^| (SUG|& p??(:L ?qpwqqq wq wwpwq  qqwwzEwwtqwqw~wzqwqwp~*?Drag onut hepe.Rih-c]lckaUdcos UPo etQeAvUwreP 5aabL&d2ٿ\.??{ڿaAv??"jr3r\.CUG DF P# h>@/T(6D # #UF\.?@L&d2W?@w?P} <.4! D:J D:^:r,: A7f77f7 7!7&"l70&u`?( "66JJ^^rrTU/&0&0&D"u#N) ^2<7qU ?4b@{[`:#B'FT3i"'RQ92 z1L>ÿ@q@E?BDF,@rRSBu@`҈-g d(ag;_3@JAJA2hhXB)3*ÄR+Є.bq q2&&:r816a,s2+66u\L %8WQA$A[(BBSP@RXESTO$@,r23rUrD.e53_4&F3s͵N?SYFer=@:u]Qv,@F=a`up]XE628ABTOfD"J-$RRO(R[)VO`Rpasd`.e Tpx``B2+ 0Ls'_"x#0!UJ"b_ 簋 B)U2brJC\Rr˨83vvF_<U"D"=m2Q:2Q]q2XBƅ3/nR0{.b[a[a22:raa  A AE,,f7Ԣ18Q`#A#AQ; ^rprqqMMr1"8J6=a~Ak ]\SP@` SPa`e`lBsa_` E0ud`px`e`JXEas` T``U\cam`uF3dr7` SbM`i`fbUD7SLgn*f`c`ub/.eQXJAPbd. Nm@Je@=O! P`rx7x.[b`m DraeNiu9KoAe1S`u^8 Sri`a /J T//A/^`L`ch@{/Ҡ///Bl pQ/ @?#?<R`#Q?W??*oNwbkl"b`KҸH?[Ij`ad ps*OT]]9 M JU@z^?@]z?@ZVjźP6 ܠAJuM` ?juJt  3\It=W(\I*RIl#>2zGz?@M|JA@e7b #zb(b%)2+2&J[-m(?& I?A$4/U%B!!5 g`?CopyrigTt (c)020%090M0c0oKs 0f21r 0D1a0i 0n.0 Al_0 8sc2e70e0vu0dW0!$-X# '&?6iie59 6X7/'0UvBŇ&-!$0 -FAJlJAn h71Z1!TlL2 $<[V  ORvVJ:%'d2ɩ PY]PeS YY˱QEaZ_`YQU5U=Vr\. l\QY?))bUb526_;"rA #cHD: # h4>T]]9 M JU@d2L&?@P( ??@>?P6 e]A#uM` W?SuJt  RQ͸It=W\_(\IRIlƒ_',#>[,ɺ (? ?Ab A@"Sb$J2zGz?+P@MJ=&#fbbbPz (bE)&,"*UB!!5 g`?CopyrigTt (c)020%090M0c.0os 0f21r 0D1a0i 0n.0 Al_0 8sc2e70ej0vu0dW0@!$b-# 'Y?6iie59 6X7'0UvBŴ!0 D-FAJlJAh71h1!Z@bgX,  <)gV#\ M OR|VJ a9PY]PPReS[RUE:]r\.M\Q`YO Q5U=VN_YQĖU526_HU"rA #cHD: # h4>T]]9 M JU@z^?@<0 ?@bX,?@_~w?P6 >JuM` ?uJt  ףp= It=Wk t@IR)\_(IlK&D#>[-m(? ?AbA@"b$J2zGz?@M»J=&#fb{#z (bE)&,"*UB!!5 g`?CopyrigTt (c)020%090M0c.0os 0f21r 0D1a0i 0n.0 Al_0 8sc2e70ej0vu0dW0@!$b-# 'Y?6iie59 6X7'0UvBŴ!0 D-FAJlJAh71h1!TL&1 $<[V  ORvVJ:?`0 PY]PeS a+QEaZ_`YQU5U~=Vr\.m\QY%NX526_;"]rA #cHD: # h4>T]]9 M JU@RT*J?@r\.?@zP6  >#uM` ?juJt  ףp= It=W\(\IRQIl#>[,ɺ (a? ?Ab Aw@"b$J2zGz?+P@M»J=&#fbbbPz (b)&,"*B !!5 g`?CopyrigTt (wc)020%090M0c0o%s 0f21r 0D1ua0i 0n.0_ Al_0 8Usc2e70e0vu0 dW0!H$-,# 'K?6i@ie59 6X7'0UvB!0 -FAJlJAh711-!Z@3F  <F9Kt#  OR|VJa9PY]PPReS[RUE:]M\Q`YLQ5U=V N_YQU526_HUv"rA # cHD: # h4>T]]9 M JU@L&d2?@|??@t:?P6  >JuM` W?SuJt At=Wi_6#J]RbJlGX%#R>[*(? ?AbA;@"b $J2zGz?@dM#7& #fbu#z$ ( (&,"*BB!!5 g`?CopyrigT}tZ(c)ZW2009ZM 0]c 0os0f2R1r0>1a0i0n.Z AUlY0 8s]2e10e 0vo0dQ0!$ę-# '?6iie59 6X7}'0UpBi!0 'F;JlJF!( ]A!1-!Z@ B <yO#,4#BQ YR VJF!L&?@bX,_S RQ#jYiFQEU,b?@%\\(\\aX5U@?@ՔJR\p=o ף\kQX5Ufl6?@3oF\H8!\4rOiX:=?F&[f3r|UnTeRU ugZY8oJi QyZV'j[3rJa=yUU2@_RU"rA # HD: H ih(>T9 M3AU@L&d2?@\.?JP6 t >%M  &:"S:JuM` ?S0DVhurlnb#4"Jto ob'#t3&>JU#g"X#a?~& ?AJbAW@"+7 #J!%(,Ja#2zGz~"@MW#"6/%b)_;_6" /*Ba#115 2`?CopyrwigTt `wc)020090M0c0o%s0f21r0Aua0i0n.0_ Al/@ 8Us3Be@e0vE@d'@O"ib =X%`!Xa#Y'0UB~&%0 JlJ UhMQQ A75A!U(!1L&?@bX,Ơ h # RQ{h# >_,S{J,b?@xz^?@1o=?@h?PQ ̿ A!Y\(\\tگ@ UnĿvUohú{:i dk6!d P Ln4!lBoogr5%BUfl6ˣ_nH1_Xq~?4F6H/ ݴpX =?!6@,9?@HW8?P`ڒ੿ ) !oqf%$$UjGil!j( gtoomddvg0|RQPbرɠ )8!Oy;DcwVQ]z,Nz(6abxZQ~FZVjp[3’L%q^QFvc~b-Tt$bxbQ蟯 ' OBNz)6bXfUU^@ss9 ?@u?P1r _RUj2ilpfg.ɏmbd,>gxHD: # h0>Th]]9 MP IAU@Dxb?@F+?@`Wڔ?@u5F&?Pps-8R?>$> n JuM{` ?e ;  Su*Jt'  ʼnhmta{|_3.Jv9?\̧q?v-F$>JU2N'N[?@M#J&A@bI(bW)U+RU+U&J[# (<!J m?%P?AR^$(u/%B##1#15 `?CopyrigTt c)Z02`09Z0MR0cP0osJ0fX2I1rL01aX0iJ0n.Z0 l0 P8s2ex0ejP0v0d01Y 4#-X#/3 /7#(R0b59 &FX7FG_'2WUAFJ!$a FFD$&#l,&}>UhEMA11-O!(#H UV & `&d T` V_RHD: # h4>T]]9 M JU@^z?@ `?@<?@>_w?P6 >#uM` ?uJt  QIt=WwIRGzIlƒO_,#>*5 g`?CopyrigTt (c) 2U0 9 Mcosf"r-!a in}. AlH U sL"e e5v^ d@ PB[,ɺ ("a?& ?Ab Aw@"b$- &2?+P@MJ\6#fbbbPz (bE)&,"*%?@6iie 59 @6X7'K0UvBŽ&!$K0 -FAJlJ A>Uh1MDH]A U H!1|4#hLjA*lX!効:@MJR Y <RyJk1LbX,?@`0  S qq#Y`?[G=`a]~mΓAdžeЕQQ0F0ZVCXQ`@iqm2.uQQ=0r"H$YZ~X#Q`kwU{qm)F&uQ'QaZS0W+Q6`V BPXUmt~QR٘6p?@B |"Y(aaX$_کȑYr%e"U1/AoJ\` V"X;Q}rܢZ;o6hׯA\a ?,H%5eQCQ.|JZ'Ja6h1Aupm +p;eQ3@ Z,6hOQ'AE;Y}R8sRYh~Df7gWQAhVǫjfjchiL&E7gjE3ɏd2L14~.yl߶7ggQA?p8;M7 AİeXkQ9QUoQA4F G`XsUWiZñeHD: # h4>T]]9 M JU@JRT*?@+@ ?@ `0?@ ?P6 >n#uM` ?uJt  zGIt=W贁NIRQjIl{^z#R>[,ɺ (? ?Ab A;@"b$UJ=!=!5 g`?CopyrigTt (c)t 20 9t Ml c.j osd fr"c!rf !ar id n.t Al j(s"e ejj v d  6!E:$-I# I'L2]z?+OP@MJ\6#fbbbPz\ (b)&T,"*?@6iie 59 @6X7'0UvBŴ!0 D-FAJlJAhK7[!h1!Z@߆gp8  <JmQ#  OR|VJa9PY]PPReS[RUE:]6BGP(M\Q`YmRbU 5U=VN_YQUM526_HU"]rA #cHD: # h4>T]]9 M JU@jZV?@@?@,b?@&_d2?P(-DTw! @>>JuM` ?uJt  QIst=WR\(\I-lU#>[-m(w?ZP?AbA@"b$J2zG/z?HJ=&#fb{#z (b)&,"*B!!5 g`?CopyrigTt (c)0W20%090M0]c0os 0f2R1r 0D1a0i 0n.0 AUl_0 8sc2e70e0vu0dW0!$ę-# '?6iie59 6X7='0Z!0 -FRAJlJ(>Uh-I 1l!`a9 $< J:@t: HYdQ]S lQE-UV_XYTQHD: # h0>Th]]9 MP 5AU@D 1!?@&?@`Wڔ?@ 6F&?Pps-8R?>$ > n JuMy`?e ;  Su*Jt'  :mta{$_#pYmvK@{3.Jv׊ߖ.?k-I?>JU2N贁N[?@MJ&A@b5(bUC)A+A+A&J[? (<!6 ?%MP?AJ$(a/%B115 `?CopyrigTt] c)F02`W09F0M>0c<0os60fD251r80q1aD0i60nU.F0 l0 <8Us2ed0e<0v0 d01Y 4-3 7 #>0b59 FX72G'2KWU|A F!$a 2F#l(&}>UhE M-11;! AVI & `!&P }T` V_RHD: # h4>T]]9 M JU@&d2?@<0 ?@|>?@_~w?P6 !>#uM` ?uJt  )\(It=Wk t@IRף_p= ׳IlK&D#>[, (? ?Ab A@"b$J2zGz?+P)@MJ=&#fbbbPz (b)&,"*B!!5 g`?CopyrigTt (c)0W20%090M0]c0os 0f2R1r 0D1a0i 0n.0 AUl_0 8sc2e70e0vu0dW0!$ę-# '?6iie59 6X7}'0UvBa!0I -FAJlJ]Uhz11!Z@`0 f <:a+ѐ#B SRԀVJ:BP(?L&YS RJ)#hT_RUEtL!ZrMP  _QdYP^CyQ5Ul6aTYa\8{?h5a=ZR_YyaĚU2:_LU"rA #sHD H# Uh4>T#A]]9 M AU@L&d2?@\.?tP6 j">  ( <$U<mJuM` /?U2FXjutJtq t ,J&b2$H &C">[JU7w$?& ?A5"bA@R"&Jtm#2U"?s"@Mc#J&(IC (/58*m#p1p15 "`?Copy_rigTt (c) 2U009 M0c0os0f21rԙ01a i0n}. Al0U 8s2e0e05v@d0["i1Em4d#(m#5 -EO|3K |7&WFi@ieE9 WFXmGs"'K0#Ŋ&!$K0 FJlJ U#%]VQVQ ]9 1HA!a(Zm#]/z JpC"j(\g}QQ R"fd"J:TZV՛jjS 3sbd dW&@aa5Le@%c~\ja}_,‘up2u?pf}Lb .lukut PF Lpa @cBr5%3q?u,b?@xz^?@oo=?@?PqQ ̿ us&r\ϑ|t@ ħ|v˽|oohwà0B  "K2[@a!fl6NwekH?zGёtm !+s KwYZHCEl#Fxy9ܕwOB í~dto@+UDZoLsGof8 $ (=hZ+P+=OaX5H1?ײ;//%/7/(3X0ϼՁo7k"آS9l%UFD  h ^TYY>UFD"H$'@F @Fx,/T6D # 9UFD"H$@]F@&u@P} jihf,l>u` ?V,@y^u#hQtSfttRt'A@y3dbr(+#tTB/(u/!/ <"/S.pd#@i#5J~?/"._`Make Bx0cz0groundT#X3Ҵ14;1`VIS_PRXYn0CHM!_#60080"`?C0py0i0ht~0(0)~02009~0M@cJ2s0fB Ar@GAa@i0nn0 ~0Alb@ HsfBe:@e0v|0d$n2"e (^C5@ #5 X3hHA%DK5D,K3D@Gg$aq_DaS T@H11"T2-T3[T ZTET@(APa%1 C1o1Do1y.bg B2Q5SiC@nU3bk,qoUcj?bhKrQi? ^ebo #!} baRe#d,UEohbcD&'C1(cQ(QR=S1fZi#3 D0N0UtDBn@a@nEEg_09YBaPPo1o1"(S11iee52>bXc1`I!]&` H@d|2&F0q}A0q` SPowُbb+e"vM- ~-!bb bL~f]Dfah 7Qbdҏ䅕HD: # ;h4>T]]9 # ,AUFD"H$@FZVj@&@@P -u `u bu  _@"f)l A)0t#` bFNt?p [eܠAuv` ?5urJϞ>Xq& $##./@%W(l"r/ tvel 46<R?$&(Q/$BU2q$0?#?56?\.?Ef`?F 7`BackgroundC05l0rz)z5=t3I1 3&?h<:JA#3+0UQB3_ 3117;l`?1py0i0ht (0)@20@9@M@c2s0fB1r@1a@i0n.@ KA0l@Gs RUe@e0vPd@0l>#$!Uh#3 A A\TYJ*3BU_1HD: ##=h0>Th]]9 3 UF>T/?FD"H$@Fn- @FN?t?P>u` o? u3`>$t  `"?A@>D$_~Ut W"p [VJ5 .Lc0U?AGG}YL?b` LineColA r&zY!"'&ɆX#mddd#nFFF"?&?#!"'&&` ]STadA w?/#/D+$1# F9 lC '2I&aWY*1` Ac= nt/#Ybdy3zg9*#!$)$|I!8<e Q3#8$f$3$FP3(B#GB117;1#?!pyG i}gTt (u)@2`09@uM9 cG osA IfB!r@F!aV0iA n.@ UA2 HsBe@eG v= d@0 l>]UhzЁA3!!`J !V)XU__Y3^_oXgU5^Bo_foU@__oojEb59 Mb{7/v!3rav44K7 %iPCH6Hg7w4'~vv !"608e @Q:(3O@v+?=!$Yz,Z#b~~-T{ HD: ##=h0>Th]]9 3 #UFH$$@FFJx-C-?F BP(ԭ??Pn>u` ?6u3`b>"6(t  2U0*?A@BHV-kYt ["p: [ZՄJU5 CLc0U"?AG}]LC` Line_ColI rz])"/&Cmddd#nFFF"?0&?#)$1$)"/&&` STadI wG/#/L+$9# %FA lK 72Q&a]A1` AcE nt?#]bd3zՃ~92#!c'  3#e8((3F7BGk+B$JA3117;9#?1pyO igTtW ()@2`W09@MA cO osI fB1rԗ@N!am0iI n].@ A22 HUsBe@eO vE d@0l>]U@hzA4K ?BEOJ !9"XU__Y3^o(o{gU5^eo_oU_oo$3ojEb5(9 M{7/6v!3ra4v54ve}%i6FWig74 '$~vx 1)"&8ea:@e@ @("4Q:/=@3 9)']zE,Z#(T(-ӁTjQc@0@HD ##=h4>TB]]9 3UF.L@FD"H$@F+@FNt?P>u` o? u3`>"(t L F%u?A@BHڬ\mY(t["p[ZJ5 .LT0U"?A?G}FCb` LineColE rzR˵#nwFFFw#?x&?$#u+&&` STadE wC/#/H+$5# F= lDG '2M&%,`h9!!Btn! 3#.> (3672M%11Ԓ7;5#?!pyK igTt (c)'@W203@9'@M= ]cK osE f%BR!r@J!a%@iE Un0 '@A2 HUsqBeE@eK vA Id2l>0>UhhCA3%!bJ@&)C6XU!_A_SY3g^__WWU@5^_3__WU@E_y_9o_bT= tG e j5ie@5(9 M{x7/69v!3LrA,Tv5!$;7laiooXx7w'2#x~ov]x vw_H<> Eל ߪJAC BFw#84B} uF]@ekHo+xWJ]BzaG=]#P_*<N`r(ߗ?fUFD  h ^TYYHUFD"H$'@F @FxT]]9 #Q ,AUFD"H$@F BߡP(?&@@P-DT! -u `u bu  @")lt A)0t#` bԚp [WenAu` ?urJ &@Fi#$!Uh#3 A Q3 "$BA 3ŊJ*3&"B%e>o1UGDF P# h>,/T6D # 9UFD"H$@]F@&u@P} jih,D >u`} ?VU,@y^u#hQtSft{tRt'A@3<db(+#tTDB/(u/!/<"/S.d#@4#5~%?/"._`Make Bx0cz0gro_und*#X314;1`VIS_PRXYn0CHM!#60082݉"`?C0puy0i0ht~0(0)~0200U9~0M@c2s0IfB Ar@GAa@i0nn0 ~0AUlb@ HsfBe:@e0v|0dn2"e (^C5 (#5 X3hHA%DK5D,K3DGg$aq_aS T11"IT2-T3[T ZTETɴ(Ta%1 C18o1Hy.bgBE 2Q5Si!@nU3lk,{oUZmj?bhuKrQi? ^eb #! >baeo1,UEohlc&'C1(cQPQR=S1fi#3 D0N0tDBn@a@nEEg_09YBaPo1o1"(S11see52>Xc1`I!&` H@d|2&F0}A0q` SPo]wbb+e"vU`6ȅ- ~-!b)bbVf]Dfah 7Qld܏HD: ##=h 0>T@h  9 T3UF}ð?FD"H$@FYjB @F l?uP>u` ? u3`>$t  u?A@>D9ȯvUQt W"up [VJJ5 Lc0U?AG}YL?b` Line_ColA rzY!"'&##nFFF"?&S?#!"'&&w` STadA Iw?/#/D+$1#K F9 lC '2I+'&!$)$|!$L8X< (3#.$3$3632M1P17;1#?!wpyG igT_t ()<@]2`09<@M9 ]cG osA f:BR!r.@F!a:@iA Un0 <@A2 2HUsBeZ@eG v= Id2l>]Uhz#A3!!J !,VKKX%U6_V_hY3|^__WlU5%^_H_olU%__No_j5b59 M{7/'v!3:raBv5X4;s7B %i5oog74'f~]vKx Nv!"I68e ^@Q:(3OO@f+~?!$\Yz,ZB#b ~-~T{ HD: ##=h 0>T@h  9 T3#UFG1"@FF$I @F BP(?V?P>u`} ?u3`b[>"(t  c]K?A@BHuV5Yt T["p [ZjJU5 CKLc0U"?AGQ}]LC` LineColI rzI])"/&Emddd#nFwFF"?&?#)$1$)"/&&` ]STadI wG/#/L+$9# FRA lK 72Q&eoa]R4` AcE nt83Q%#]b[d3z92#M!c'  Q3#8(f(3F7BGkBBLA3B117;9#?1pyO i}gTt (u)@2`09@uMA cO osI IfB1r@N!am0iI n.@ UA22 HsBe@eO vE d@0 l>]UhzA4 ABGOJ!9"XU__Y3^o*o}gU5^go_oU@_oo5ojEb59 Mb{7/v!3rav44vg}%iP8FYkg7w4'$~vv 3)"&P8ea@e@ @$6Q:>@e3 9)']z,Z#(T(-\ՁTQc@ 0@HD: ##=h4>T]]9 @3 UF=U~@FD"H$@F[M l_?uP>u` ? u3`>"(t  {?A@BH-ϝY(Qt ["up [ZJJ5 Li0U"?AG}FLCb` Lne_ColE rz|&.#E#mddd#nFFF"a?&?#%"+&&` S.TadE wC/#i/H+$5# FlG 72M&ea]Ra` AcX0e? t83M%#]bd3zs9.#%$,-$!]/(h9B>3z C#8> (3JF7NBIGJAA!G;5#?1pyK igTt (X0)@20@9@Mc.K osE fB1r@J!a`0iE n.@ A%2 Hs Re@eK v"A d@@l> 0>Uhh @CA3%!J@&CXU__Y3no+o~gU5^ho_oUE_oo6obTt>G e jiHle59 M{G/v!3r@QvE4,K7qgG^"D'2#x [%"6)_H<>? Dל ߪJAC BFxy#:n5B} w]@e ko+>X]B}B]@PG-&P+=Oas?Sĩx%UFD  h ^TYYRUFD"H$'@F @FxT]]9 #AUFD"H$@&@FTЃ$?P ]Au` ? un#`t A@`@I:t5ZE$:"p: [g5J=U5 ? ?B#0{GzI@hQ"*!!5 o`?CopyrigTt](wc)]20 9]M c o%s f"!r !ua i n.]_ Al0 (Us2e e v00-d0l>0>Uhh!#k!UJ`F6[48{  FMBC!??1$E34NMO_OG$E4E6O?O$E28?;1rA TRZSB3HD:  # ;h0>Th]]9 #UFzEI@FD"H$@FE I#@F)[3#Gӹ?P ]Au` ^? u#`t  I.!?_A@=Dgj+U-tA`WU"p [VJ#0{GzK?<@/BhJA/mdddG G) ?AgaYT6` Fil Co orzYbWYAY{bd#;z),!=D" #g#]#M%!!7;}`?!py io ht (c)y02`09y0M c os fw2!rk0!aw0i n.y0 A" o8s2e0e v0d0 l>]Uhz`1X#!J`iF  8H\ 3FM@BsOOAE3NO_WWE%EjFA_Oe_E2cO;QArA RS3HD:  # ;h0>Th]]9 #AUF_k'$@FC@F9C?Fl6_f?P n>u{` ?m u#7`t  Fx $?A@=DsU,tA`Fp5׹?p [WW"WJ#U0{Gz?<@MBh G ^) ?A_aYL!.` Fil Co orVzYAY{b#z)=/["  3~##M% 1 17;}`?!py igTt (c)02`090M c os f2!rz0!a0i n.0 A" ~8s2e0e v0d00l>]Uhzo1X#!Je`FxF ) 8HR )3FMBOOAE3N__fWE%EyFP_Ot_E2rO;`ArA bc3HD:  # ;h0>Th]]9 #AUF_k'$@FF9C?F)[3#ӹ?u?P u` ?u#`b>"(t  Fx $?A@BH_sYCt ["p [ZJ#U0{Gz%?<@?aBhA(G XO) ?A_oa]L.` Fil Co orz]A]bv#z)=/L" !#o#"(#JM!h!7;`?!wpy igTt (cu)y02`09y0uM c os Ifw2!rk0!aw0i n.y0 UA" o8s2e0e v0d0 l>]Uhz`1$ "H/1BJ!`9H""" 3FMBOOE3N __oWE%EY_O}_E2{O;rA bc3HD:  # ;h0>Th]]9 #AUFzEI@FFE I#@F)[?3#ӹ?P u` ?mu#`bn >"m(t  I.!?A@BHgj+YtP ["up [ZJ#U0U?<@/BhA^mdddG `O) ?Aga]T6` Fwil Co orz]b]A]bdv#z)4!=/L" 3o#"(#M%11 7;`?!py iw ht (c)02`090M c os f2!rs0!a0i n.0 A" w8s2e0e v0d00l>]Uhzh1Z$ "71BJ`9H"( "3FMBOOE3N_$_wWEEa_O_E2O;rA bc3UGDF P# h>,/T6D # UFD"H$@]F@&u@P}  k0g,@DTh |u`} ?LU,@TUh|u#QtSt.%& 0"tR.$t|7/I'A@%#tT.%U/I(/g-3b)5/G((""/.d\63@?3e5~???62.`Make B0c0grou/nd& ,3؞314G;1`VIS_PRXY0CHM!#w60@82"`?C0py0i0ht0(0)02=@090MZ@c2s0f`BQArT@Aa`@i0n0 0Al@ XHsBe*@e0v0d2-2 eh8^hhH-Q659TD[e59ThD[39T@D[9TD[E9ThD[5X|D[%U9TDWndaKoc7TP116238ycA1I51x>Xr1I/1&` HZ@d2&F0o`@A0#` SPowo '"0!+x"vde&s ~s!4r:r4r%pw:rgAo%UhmI {a1$162te2-t3I[ ZE*R38\yca651+4r:rB1b?xav@|5Rq'e5ycBE@G'3,'㫊?GMKc`? 3b #/1 bq3Hep@,T*h|62+EM6H'1\+U=yceq@ D0NrQAn`@aZ@5a-2] ,Th O7qouHD:  ##=h0>Th]]9 3!#UFaD@Fi4F?@F3mq?P݀>u` ?u3>(.t  (~k ?A@HrN_atR bp [_a"aJڍU5 ILc0U~$"?AG}cLI` LineCoklf rzcF"L&nFFF#?&S?#F"L&&w` STadf wd/#/i/J(i" Qg3#$.f.36h@H1$g1g1o7;V#?#1pyl igTt ( )@2`09@M^ c.l os$0fB#1r@k!a@i$0n.@ A&0lT@ Gs\Be0@el Evb dP@e0l>]Uhz143F!J2!XVD(Q(E _1_CT3R^k_}_WBUL5N___BUOd_H$o_j5bPL59 M{U7l/f!3raXve5!$fDm2%T12ooXU7w'B#$<~3v!x vI8!e fwQU:(3O@~f5T?F$c"z,Z#bV~-(T{{<B2>/ HD:  ##=h0>Th]]9 3!#UF_k'$@Fi4F?F9C?F3m]?P>u` ?iu3>k(.t  Fx $?A@HN?s_a't bp [a"aJU5 - ILT0U$"?A?G}F/I` LineColf rz!#nFFF#a?&?#F,F"L&&` ]STadf wd/#/i+$V# F^ lh #72n&` =?o)z 3#B..23`6=27kH A3117;V#?i1pyl igTt (c)g@2`09g@M^ cl oKsj0feBi1rY@k!ae@ij0n.g@ A@2 ]HsBe@el vb dH@0l>]UhzNA3F!J@32&2&}Q(PUa__T3^__%gU5P^os_3oUP__yo_ j5b59 M{7/Rv!3eramv4!$Rv}ai6z7 '#$ő~vmv vU%T"b xe@i?8Te'aa@eg@ fQ:?@,3 9F'c"z,Z#.T5-TAci@0m@HD:  ##=h4>T]]9 P3#UFD3@F:N@ @F~?Pn>u` ? vu3`[>"(t  #?A@BHY,t#`F B_P(?p [_["[JU5 Lc0U~"?AG}]TLCb` LineCokl\ rz]<"B&#nFFF#?&?#<"B&&` ]STad\ wZ/#/_+$L# FT l^ 72d+A'Ah9ѤB! Q3#.(f(36(72M3117;L#?1pyb igTt ( )M@20Y@9M@MT cb os\ fKB1r?@a!aK@i\ n0 M@A62 CHsBek@eb vX d2l>0>UhhC4A3 ל ߪJAC BF|#˲R7B} x]@eko+s]K|aIJ][8 3ͩ Ի5 ~Dz G(l P+=Oas   ߥȲs mUFD  h(^TYYBUF\.?x<F BP(?P?< 5 6IHC 6gfa6a9SBU@L&d2U?(sU*!(&J/BH8$q?/&?(H])|  0OB`"Firewal0,n0t0o0k"0p0r0pP711,d 0v0c0 U1^| a(){U5GO& h?0B?E\\\o   w w wy ߹{w yw yϹw {y{wwy w  ߛ{ 뛐*8p  } ߰ i|jDrag onut hepe.Rih-c]lckaUdcos UPo etQeAvUwreP 5aab\.ҿ~?? ;Կ)j??r]3rCUG DF P# h @T(PYY#EψUF\.?@~߻?P} ,.B ! :J :^:rY:k Au` ?"66JJ^^rru# *^b"h,h')qU ?3?4b {[`:#"&a#'JQ)b" ^^l!L1$l" l"7'>>ÿ@q0-5?46 rz23S2-uY0`-1Bu@ l"r3A`'Bl" L$9u`u `"[ bzb!u`R$"% أ#Q!'1`Vis_PRXYcPm!#5HP47"2`?CopyUr.PgPt (>P]) 20{P9 M.PcePo0PoIfmR^QraPQamPi_Pn AUlP eXsRe0PeePvPdB@!#d$%b!(g#>@f!^#&& "2bbbz12bGbf!cB##0Ub#]g,7^aN @V cT8fda[+_'s1 1"hh"2)t3 *tB+tBaa"qqb(!*paaSb"+6u\%G(!!["@Чcq"53T@ R"q3f uEf;uE5's_t$v&F3s͵NSYFuEf0we'Aj,@QZPUpQ"5x}L1*p"(AT?ݟZ2wB2;6q`RPQs.PU TPxmPSP^2l"+ 0^<@5b ۠۠"x5R"۫br3-PBrw('ٛ`ff_< $R-mA.AQS*pl!u "u"2u3/bBA0oB%Q%Q"2b3 !!E؁؁| NPl!#(>*pQ~ [ ]\@U0` SPaaPePl6rsRQA)P E u.PpBPeP{"5X .0` TcPѼSP"`DPvR3 `r ޹b` S;b4SP&SF.PQ!wPQJEd%TMPn;fPEcmPuR"/6HEQI[]1PRd Nm Eh! PPr"h"CU]%R`sRU3eA(ew3`tJرe^`BT , SRi+~K׻T `L_PcXG&8*JBlPiP;anIY0 -R_PoUS>b©'s)&U1:`` %2R&RviR`f`NEtWO6PKK`SHPP60 4ROJ2R*tI60S1QR`f`s7fitac'Eympx]b7'dYfjBr 3aQrf$i-QIaaL&E`v?`HvADK^DKDQKDqKED3aGHD H# h4>T]]9 MT JU@5wn?@'d2?@vP6 AJuM` W?SuJt  w]It=WuVIRY]JIl#>>@fY(? ?ALbbbz@A ebbfb)]b 6(. T#Jq!q!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d j!n$EB5 -F?}# }'X62zGz?3S@MJD6*-!./@ D)QQ&K,<"Q*i@ieE9 X6Xn73'K0UB!K0 aFuJlJAUhhc7![hz]0T ]]JU@\.?@WjZ?@* `?@JRT?P NamuM` ?,2<u#FtC mtimuV~ڻunO2zGz?>?@MA@bb?bz+ )#bC(+bQ)^+^&Ɇ"@fx?0&?x|5/-&bf^ >/`/%N B1B1`?Copyrigt(c)209Mq0co0o%si0fw2h1rk01uaw0ii0n._ Al0 o8Us2e0eo0v0 d0;1U?4-N3 N7&q045 EFXGeG'0UBų&!$$ eFyJlU`1#El,B^ PhO (!p&d2?@`0  <S ?؉O<S 5<Q3::*\QY'aQ5____Tehf'W{oaYQE##$Wuu']C_U_ kuVn_j@ղ_bl|>xoo uJSAU(:L^xSp8wag?%ehYjdcp i!rx  -?QcxS ?@ je,㨏O4Fӏ 5:vox] 2DVhxSPbtߔ尒 ? ypv y FXj|t:NѧwmN6@4~펰pdNAQcu.0ܰ+ 5P/Q&a/4EHD H# h4>T]]9 MT JU@\.?@W~?P6 >JuM` ?juJt At=WJRb~lT>5 g`?CopyrigTtZ(c)Z20 9ZMcoKsfr!!ain.Z Al< s@"e evR d4 HT5 B[}#?& ?AAw@"b4-/ &&t2Uy2?")@MJx6D8<6<2:iieE9 &5X.7"/'0y3&-!$0 FJ)lJ8>Uh ޓ$Ja7Z@?@ 0<Mbh#B] ?RlVJ:vn?6\ES YwQ#TTKRU%Ur=\PYZd;O#UEd^_OZaQU%5UPBgP([Ma\QU-V'?d2=_OZuV%hb2&_8U2rA 3LsQ% bHD H# h4>T]]9 MT JU@* `?@~?@PBWP(P6  n>JuM{` ?5u#t  5]It=WZd;IRMIl#>>@f.? ?ALbbbz@A ebbfb)]b6(. T#Jq!q!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d j!n$EB5 -F?}# }'X62zGz?S@MJP6-21/D)Q&K,<"Q*iiePE9 X6Xn7'0UB!%0 aFuJlJAUhhc7!-oiLaUe52j_|Uv<"rA ;#c8 bHD H# h4>T]]9 MT JU@\.?@@ ??@{?^?P6 v >JuM` ?)uJt At=W;On#JRbJl?ʡE#>>@f?.a? ?ALbbbz@A ebbfb)bE0(( #Jk!k!5 g`?CopyrigTtZ(c)Z20 9ZM c os f"!r !a i n.Z Al (s"e e v d d!h$B5 ɤ-@?w# Rw'R62zGzM?@MAJ6-2+/>)QK&E,6"K*i@ieE9 R6Xh7'0UB!0 [FoJl8JAUhh]7!6!Z@( <xLK#  }RSVJ:vn?6S YwQUTRU5UXz{\YR/IѫQEUPBP3([MabaiU_52d_vU6"rA 5#c8THD: H ih(>T9 M3JU@\.?@~?ҷP6 m >JuM` ^?5ulbSD_Jt bstE>2zGz;?@MJA@u+b+&K !JS"j& 1"Ab+Kb )-,-/B!!5 `?CopyrigTt `c)502U0A0950M-0c+0os%0f32$1r'0`1a30i%0n}.50 Al{0U +8s2eS0e+0v0ds0ib =L!XY/'0U Bj&%0 :l*J8>Uh@[A[A 1:vݏn? Y YwAQi@qJEr Ti Z?d;O!U3!^9_KZXQAUEPBϡP(KMQLQ I@'d2\ QLYuV_XAo@ ,oKZ?Mb_XEOOO\_H8> G*+s d:TzR4D$o F #yw LB г ~dzC @+htEBG̲_PhI" (J& 2 F, 3Ey/ 8G2(C5 UFD  h(^TYYB>UF~?x<F BP(c?P?  s 6 6DSM 6gM 6D{M6M 6TM66x6K6v6HBU?8/&?R)B@L&d2?h-y(\.gҋ(i.sU!A&/@)Y(  0B`KLaptuo>0,>0or@0ableF0nB0tV0bB0oukF0mB0biT4UcB0m>0u^2rj4nr0A11nV0twJ2i1eq0i>0mV0n@0,PaL0d01e11^| ()IU5G& p??U"6? QQQ ``pwwpw w wނ ppK_wpwpwwppwwpppp ~ w w~~@w?~MpopJpppp~ppwpkwwZDrag onut hepe.Rih-c]lckaUdcos UPo etQeAvUwreP 5aab~߿?_)jk?3@ ?DUG D # h @^T(YYEUF~K? P} 7K.B!C D :T:h1Y:| 73 7 73v7x7377&u` ? "66JTThh||j&&"uJ&)R2%X<X7eU ?0?' 4b0{[`:1262Q3R2A"'QT9R2 ^L1$K" 7"Gd' @Bc]D?MQ0rBCSBuY@`-1"Ru$ @mGrC$Q'('R7"A& <4' &)9uv`u `"[bR1'$u.`425ؓ3a171`Vis_P_RXYcPm!#5H`o06QB`?CopWyr.`gPt0U(>`)02L`0P`W M.`ce`o0`'ofmb^ara`aUam`i_`n 0WAl` ehsbUe0`ee`v`dR13d45R1*83 Br3 66 2'QbtzV[1SBuL@`` F.`a^al^bz1 -2uߐqݐvR2CB#30U3F:w,7^F`^ P sT<`o0m`sm`a>g da;_zAA2Hhh"B)B* R+R::2mDmr81}ahcwP#6u\$ %W11%[2P@RA4"ECT'$@b2AEȜUiBȜU5z_4ɓ&F3sNSYF=U0ʚu s,@a#Z`ep"EˍLA}28ABTO0V""-sRxQ'(BKF>`R`as.`e T`xm`S`^B 0^LK'7"P#P E""b ..2EB2.˯brCRqʯܯ8z,BF3<^-""B=mQQ<*dh2"B B/R0R%a%a2qqr3 11E++0|ss7C8*PbQ11A;Q(r,`LA=k^u1MAE!ffuVҶ8}aY k q]PU@` SPUaa`e`lsRaQ)` Equ .`pB`e`β"Ep` QTc`S`&c^aEma`u9E^Mx_ ` S*bS`u`EL t_b9UdxM`n*f`cm`ub%7Uh5GAPbds Nm0{U0! P`rxqp2D@%b` Dbaew3uĮAe:fC`c'97/I/ Sbi`ao//¹T//` L_`ch/6?'?9?B"l`i`]?8(Y@?0?R_`?65?ON*h"wbk"a"R`K9B-/H`OI4`ad`cOLO/OOnh$gsC_L(t˰=_O_TQm2/atfa I4aoO_g¹f__㪲`Uk f`Pbao%ov8_No\_MPCOoNo(m Qt Q6LM5GMLvEHbdr!SC qpsY` W@DQQ:V6xHS`C@MOя^u`MpmB :V¡3EYx!O U1vqyRdnu}Ϗo߳(©`X^~"` %_@&דq|`NTWORKurH`PaRTRǰISQk:i*,Ŕ{Xo'0U¹ňaY`y}֏(!(H*ڽ(5|ѳy(!{5{ᤰdvHv˷v d,v5˧v(!&v˟"vĈ~(%vċ~Λ%vİQ5vѼd{5v5UGD  3 h0TdYYB IUF%D"?Fب̓m?Fre?F_jYfw?P} 8Yf u` ?0)u:t7  (\toLӑpG?WS̆{c٤CU!!`?CopyrigPt (c)J 2\09J MB c@ oKs: fH"9!r< u!aH i: n.J Al @(s"eh e@ v d  #S$#B- #l# '?R;= # &#24 6 6 2$Z# #0U2 3@&B ^#5 6X'7'R4 6!4] 6:e6^P#[EBM #HA%DGHD: H ih ,>T  9 M/AUF~?FtC&j+?P6 8V> a JuM` ?0K Ku:yJ6bqlt7~ bJt  $OC>JU2zGz?#@MJ?A@+(b9)F+,F&" !*$5J" "& w"\A1"b9)+6'"s,"s/B-  P3v#9 P1P1X7;`?CopyrigTt (c)020090M0c0os0f21r01a0i0n.0 Al@ 8s Be0e0v@d0N0i @b9 l6(X>7JG'0UBX&50 JF^JlJ UhMAA $ A11!:Lh4 0 tt3J!@|?Fhdzt 0 ?kCYk XEjUqA '[\<1ߑUjU!o6Pa@oVUhq`V r])m܆YaYi*X(QYaD"H?F=0 \S㥛\blXfYFt:N\[ZR ÅبX0Q^pp\.zZ ,\ 6?iwUS5?FrSGo?PE ?=TvE?!Y?.`txv6mT]]9 M 5AUF@ ?Ft:N?FM~u_7w?P6 .>M JuM` ? IKSu8Jt5  rtyV_->5^IWA>JUT5 J#/!/!5 `?CopyrigTt (c)f 20r 9f M^ c\ osV fd"U!rX !ad iV n.f Al \(s"e e\ v d (!E,$#E)edq%rVA٫llx51o>2rA 53eHD H# ih#4>TA#3 AM JUF8?FzwJHM?F,+?F%VA?P6  n>JuM{` ?5uJt  f߰nZIt=Wv%MIRG|׈,IljE7#D>5 J5 g`?CopyrigTt (c)* 206 9* M" c oKs f("!r U!a( i n.* Alp (st"eH e v dh PB~A!"#?& ?AlJa@Y;6` F" q!z!lT"z0_b0A0#bmT3z0B9Ub25181 $?25?G5\ P1u`"?46?8C_1A1b?t0bAFbI&DɤG b&LBFAL2zGz?")@MJFp'CfT<(Gb&I3F-LB3FiieE9 1X]G/'0UR&-!40 jV~Z 3Q hRG!-AT6yf3@$<f@C  bfJ: @BS+c X$}#e`e%akqcEAyHh?F~l0I#c ,aTE1vo>B]rA Cs+( bHD: # h#4>T#3 AMT JUF ?FH$D?F8}, WcSP6  n>JuM{` ?5uJt  s߿ 0eIt=W(\IR}WIl#>5 UJ5 g`?CopyrigTt (c)* 206 9* M" c. os f("!r U!a( i n.* Alp (st"eH ej v dh  E~A!"w#?X& ?AlJa@Y6` F" q!!lT"z}0b0A0#bT3z0UB9b25U181 $?25?*G5\P1u`"?46 ?8_1A1b?t0}bAF'b&D%G &LBFAL2zGz?"@MJF'CfT<(GbE&I3F-LB3Fiie%9 1X]G'0URŴ&!40 jV~Z 3Q hRG!A`Lޖ3?F\.0hO *1J#<=#,AB  f!JabdP$dreEAJT]]9 M qAUFp8?Ft:N?P6 mL >M ($JuM` ?AggL2DuVJtS  {G7zt.%_>JU5 J =#k!k!5 `?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v d +"d!Eh$4#=#|x!F2 J3?]6 ?Ab4z@0Szu< `` F !!l"z֔0` ?251ݔ6b A6b4(4"Y==/w# w']6T4"BK/C2?=@M3#JF3@C37b"9F<2Fiie759 O&5X/GB_A/'0U{R]6-!x40 2VFZlJQD0h$G!Z@!11(Z/CFF\.30AR@Lu)_G  RaCf4"J? 6Bf*}~2?FVor?PE &TvE?~1LZc Jxdid`gR*YB_Lc׏_:M m&!&Ra DBBBTfwr5UBoPb2]rA 3q+"HD: # h4>T]]9 M 5AUFVj?Fޣ ~-?Fe2L&?FZT?P6 m. >b JuM` ?+ILKu8Jt5  O7nty>+B`"$'-A>JU5 J #/!/!5 `?CopyrigTt (wc)f 20r 9f M^ c\ o%sV fd"U!rX !uad iV n.f _ Al \(Us"e e\ v d (!E,$~|y7, SzQ|nF?F.`d %TvE?B1ZBrA YCc4!*uxnG@#r & 4xavDB#Bsrb5UHD: # h4>T]]9 M 5AUF{y3I?F~??Fd2Lq?P6 . >  mJuM` ?0IKu8Jt5  n`ݴڅtyx&1UԅbX9AR>JU5 J#/!/!5 `?CopyrigTt (c)f W20r 9f M^ ]c\ osV fd"RU!rX !ad iV n.f AUl \(s"e e\ v d @(!E,$~ 0T  JUFe?F:[??FO&Q,"7?P }uM` ?j`fpu#zAtw  [{t2Oۨy`<%t2N贁Nk?@MM+&A8 b*](i+i+i&p!!`?Copyrigt (cu) 209 uM c os If"!r 1a# i n. WAl30 (s72Ue 0e vI0d+0"!3 $#)Ry&#‘4?6Q?\c"bk)i+Q;'-,# '6% h5 IFXGiG'K0UBŨ6!4I iF}JlU!#A(ZFj^I30vi p-ڏ|Y &V"(:!]H?F{wX@0pS l01 o(fy { xU,>Pbtv3m?F/@M\lSFp ^ \ 0+??Fް}ulHyƊqWwi?Fܾrޝ۷cl)-i]oa?F5o רo`lۗ]Cfϟy { xY^p𔟦pO4?F0eO,plkleW,^ \ -r[v?Fp =\LhlmhG N1?F[Q{ l/5:1 fz7[;KՃT0P1ey { x¿Կ{dޯ?Fv9|eޝN.l?DɨT[\[fk'?F#Y}3Zflٙ(oWU"NhLޝ].Yt?BدW>lrt~]|%߸'0(f%y{xa v ?FϘǫḿl$h|a[s!?Fۂe2'[\YRSWW ?FIǧmp~Xl-TBi9w_Ku~Սe~< 8qOfSx#5y=Qgk?FG[Pm j%a ;(|a[sjT?FjwZ̮̈́S"o& +~?F 'APm*?w?3f|o7gyP( '7Ƀp5{/$ x //-/?/Q/c/ya-x?FlS+0ƄɰOYOe[s/?F@A ǻDÄZNF7?>.N?F nMoi?~q,P>Ʉ (:^?OD*x 7OIO[OmOOOyNaZBPK+qǀ̩BF6Ks _S~MGt='7e]Վe?Fɍ#cAcaKҌVB_,ϠZp"IؿoodK eowooooo k4@\{-=(eKs;o@,P%T̆m ?;e_#ֶZpp\J)~Wf ,Padҧɻ;y ÏՏ5A ?Frg,mr2 +nrsc9az?FWn ~}\+ :|wص@ymwQkܳ 8I2=d]? ń̙ "Ͻ)i8j7i3 ͯ߯i+?Fu߫˶<yOKs@,0YvjO}ȶѭ $Y MMeܚ-O ߟ4~ <-f5]Xкj 3ʶPbtf2+ PpߚDڋp,ψyӅ猜,4p:9?Fo}!s>ybH E3Ɲp ?Fn~p6Ü~y˓lj~ݨ" : (0!϶<'y'K+HHZl~f~ӋE?F&Ý A;yƿ* ,/=r?Fegm yQEڳs/e\?Ff"h=y/k.h@̵I y|!??4JkYhs??????iHu)?F͎ l䲥aβ ,IO;o\?F!yhX ejyROm,F @^f]ywO~6bԢ`V 6y O_o%d r______??FlO?=Ȣtc&O ,wo<ڐbrqM~@ O`> s>?Fcme~Vve~sO~%HlĀW<]|F<>~zK_]]M),ͭ`7E̺ژB>mfT# ,Bg)?F[='JFmjG;B|'|`yq])tK } /:Pp0&%;Taqԃa/E-? _ [X!////??D Tw2?Fw]12%W嬨$:W{,߰ M >:6yEpV(0AzGxaNtb#P?P-yp\J~%wMIn17O~:3݃* 3%\K?|\^OT[__ { xOO__+_=_"^j^I<=ADǂr-+ű!M!?夿e{*% m9N?:ߤzf"h<=b\㭖h1boyt65o %#M#'VW?FGn5> 9ؗϽrqCug<#ֶ?Fv^݇qLJ|U[>j?F^@;# qYq(ٿ2#>#Md TH7D6D # 4#T  U U !"#$%&'()*+,-./012*3>5h T  M YUFBg?FJ:?FAؔPM.uM`?u#t  jm7}%t!" .Π%'&!%4-Ut}2N贁NKk?@M#&A b(J++&Yz}/R/$38?#b`4zt (+b/R5N}11/"`?Copyrig t(c 09M0c.0os0f21r0Aa i0n Al6@ 8s:Be@e0vL@d{"1U%4e3 756%01hEPFX|GG'0U%R!56S1I FJlY)U4FX1#Aց(`}oO40OeOW0fnj# Qˣ-Y R5b 8!EЏv?F[ =\}g5ly$Oa3[e_q[~haWWrm)T`fnӟ_C1hhE:]_cd0g`h _R"rg t4{s{"oQ#w______Vcޯ?FW9|e!|vr.l`(n :l Jo}`[Q ?F#Y!|helƳoZfuj'7czX!|ߝ fUo(oZf?M :rm3~Tɶi(R}ty { xUV ?Fǫ!|?UvʁD!iip_(n :l r}9!?Fʂߤe2!|ωY\*!ijlߐjǏȯگ` K <ZfB0񁚍axCmƉR}Bty { xYџ+BQgk?FG[!| =jlU)+R(n :l }jT?FwZ!|B9nl(Z!X󁟜 ")p Tϑ` 4/]$R}p ty { x#5GY,h+?F߼߫!| ?iOf+k:l}‚,Y?FqvjO!|6zKi%SI#Zf@,0ƪ!|~9RĞX}ߘNd̮ Q>'ەߘux x&8J\nYJa-x?FglS+0!| @m}i˜?Wz(n}?F@ D!|¶RiBQ:4i!@Se L]yEڨZf?g5 J[#PR}ty xTfxoPoof+k*\onooocN1?FEߋ]2φ gm_h{ 'aV ?FIQ|X"Z~2z}5o]Sh]CYR`-`CLVI h oo&8@6>Pbtg[`#|϶U /(+~?F0'Au=0cLas` =E ߴI h 0BTfYl~g[ 2DFd-?FGt\e(-upғh6SFOp \pH%y|C/h6+S{0 ӼvAf@g+hPљV M H<l5-W/ :FdAg?F#ccMTbzm1Gcqe~1-B?`ZAvkNngoPoboto OkH?Fɝ?) V1? zk "M ,o"r?FLgmƦ J TܪMcSS6-F؏{wKJ\M,z% #߭ҋEŁN"}3cQLYNVV䯀@.HZl~ n P?F͚Dڋ~Ͱd@~y6{:M ,"Ęp:9?Fn8ͣϕ茻yb H v_=x?vQܠK='+ #X;F.+*3 rz@M\vЯ *ͭ?F;Y ~Eؚ)~ykM ,L5?F)-ffA煤 0{l[΁F~Uk߸?I36&&d>!ё*a0tM@ޞs0COρn{϶ f5A ?Frg, {35ٰy^5PנN,z5kzX_z?F-Wn \?/qy?%b/'8a'߀x\tRʓVxK?q%im!Fg?Jq /2_D_V_hV {+,qO_6}i?FrsM<+bSykY=T=/a]e/r_`x ;8$6HZ>asu=?6|./6l,`F@^f\/j~9̿ia/nLHThmO;~?}[h<11?C?U?g?y??@妎Ė  O0B_f^O8e\?FU"hύ?@B^bybOl?Q ,Mo~SA(5 `_ - <\_n_____⩼Pο!!2o^po8p ?Fna9hfoʌn%Zm?7 ZT![!j( ""`DpHxs/ȏ]-߼LbqS?FҾ+cCXA9x1ˠWC s\3EP|`0""ʟܟy*<N##A?FyYǟìiQ9!.Q^=&J+#ͷFSPA &&Dxj!.?Fo,,M V~< XO~U?F>6S p??HUK,HY&/ ?` _/& A[&PXp//////y*ӋE?FTmcͦQl~V)a ''F? |kټ]c¡J:?YnD((!ooo&$ ?FEc:d9~.]Gl))W_?F޶P dS&9~ԫߟFRB1 > 'Nzrmw }}?=In됿Tr)˻)ڸ" 0BT Yoo?F\/8BҡdifO g **ПA Y{?FְgmƦ͌!MdihMcSS%@'9 )|z֠?H,g]r~ei8YY$m**#(:L^p Y'4?FÄ!GQJ?Zqqqdi +"++,#a?F-ߕfadi [΁S<]?~qUg  ʕ7mI[[ߣވ/ZG +' +6$Vhzߌߞ߰@ Y???6 ,,,X?j??"6?FB6"MRp4KTp#1 ?mJ,` O4PHOZG\,U+,d(% Y__of';--Z__G/_t^> s>?Fyme%]U`vdicdUʔQQ ,?w/ ?~efvoZG*/\-K-H& / YP$6HU[../uO/t|?FaqS} 4dIalBt:K7? \T-E_?!#hTXO\.k.h'??OO(O:O)@R(dvK//Oo_t͇W?FЧji QI?e_N|%ЏIMs.qe$ү䧆o*\/ߋ/(o o2oDoVoho)nπϒƗK00o"яF>t44I8?F?f_<'kԸ!Iv' Pr۱|@? }.q?5d䧴*\0 0)Ptlt)K?FZIl_<緬TF!Ǿ iϿ­.L.7\1 ;1J*j|į)"FM?FO4_<`s31In1)0` 202@q1.?FzT~_<ʱI (빸~u7#ߩ MǍKKRw 2 lPiCϨ6\2i2x+Ϫϼ)PK303nߒ[Pr| ?Fo3on1Ox0 ~L":rCF]KSDJlع0 MPq6>B33, aZdFy^#I1Af4b4J?A^31K404dlI?FjwX@_<\X*߰~LϡD/g\Hb1),-FuPx~K :ehDo^1GEG?7l*B4;48-*<NLIw?F`sT5]|N~LZQCݞaQK505dm%?FGn5;TW~ A,p"wQYy>)N1`B@PKB^qq_'T?B5[5h.?(?:?L?^?p???F1 4c}C0\wPN)qk6<6?d]@?`pF*;Z"}^װIAOfQ;8aVȯ#{Wn\=͘OLP$Zɺ}_.qV'@IO^6 !60/D_V_h_z___2\֟?F4vS[p-̬)K7<7o\?y1<<<:y6yrЃ&~?!1b W@=yBP(lf7?۵FSF&Oi{// bxRC//Jy<<8X5X/j/|//// ???6[=2 l=.?f&M?dsl?~?+eXO-y?Fp\JlR ?ɦp{?\s__ ~C +OOJy==ftSr BTGHD: $ Uh4> T]]9 PM /AUF~?FtC&j?P6 .v> mJuM` ?0IKu8Jt5 }ty$AJbA>JU5 *J)";/#?@& ?Abk$zo@w Su`` Fil Co orzw ` Z/2%w!w&}bA&E&UBh115 `?!wpy igT}t(c)W20[09M ]c os fM2R!rA0!aM0i wn. A"U E8s2em0e 5v0d01 4Y=$3 $7@&T"+t32UHB?=@MJGF(I(/HFiie59 &X7G'K0HC@&![$K0 FJlJ$+VA' (++ ]61h!Z3F\ O6?F9.4?.`tx@! y@c#CJVAbX,?F?&d2b iQA.$c Ȑ #Sa3:TLh4b bcitt3h5eqA 'k?\<1lOe5e4P܇a5f_eD"H?F0 wlS㥛ԍlZblheFt:ǝNl+aiR أhQ\.j ,X^F5~I?F\O;\?PcG &TvE?a!"rA #c4xLexmdAa S ύ4xa$B#T]]9 M qAUFbX?Ffl6?FI$D"?Fz^?P6 L6x> ](m$JuM{` ?Agg2DuVJtS  y&1tQ rhGzI>JU5 J0! #<l!A?% ?A^Jb @bbbz #"b9#$"b$B 2% # ?<@M3#J6(&$%"QD;'"**=#115 `?CopyrigTt (c)02U0090M0c0os0f21r0Aa0i0nU.0 l@ 8Us!Be0e0v3@ d@+"1 (44#-=/3K 73L)i@ie59 O&XuGG'0U RŚF!$0R FJlJ8>UhW^Q^Q 1@!V!1(ZuCFWF3Ф JR L?[<œ_Q3$R$4"J1n6f?Fr\. LSW_Y?)$8aE:TCVb]k&aPdd *#8d2L?Fw;?FH?PBzt &TvE?!YaYcR&ekre~RMv'(J9_b 2}&(!&Ra@cBwrÀ5%8aOEev?n?V]kAyqiQ4eDe'BP(\orlh )$a^F)?F.\?PpU#u#rA =4Pz d|oO_Y~sEzw uFtQcwHD: H h4>T]]9 MT JUF|?Ffl6?F'd2?Fz^?P6 n>JuM{` ?5uJt  bX9It=WQIRuVIlGOz|>5 Jr2?@MJA@]gb%(1+Wb3)O+O&B!!5 g`?CopyrigTt (c) W20 9 M ]c os f"R!r !a i n. AUl 0 (s2e e v 0d0!$صh2?6 ?ALb4z@'!Sgu`` F 1!l"z' ` ?25߶1ݐ6:/[)ʼ=# '66Kiie<59 XQGG'0UB6!4%0 FJlJ(>UhI !+!:6V߫j֋#0=1 JUF'ÇpС0RSO S ;[5)۱Q3a9iQY'aHD: H h4>T]]9 MT JUFt:?FFh4?Fz^?FkZV{?P9>JuM` ?uJt  lIt=W9v׾IR}?_5^IIljt#>5 UJ5 g`?CopyrigTt (c)* 206 9* M" c. os f("!r U!a( i n.* Alp (st"eH ej v dh  3Eص!"#a?&?ALb4z@0Sgu`` F" q!!lT"z0` 2?2C516bANt6b4J= &;02zGz#)@MJF86x?OK7XB 6iie%(9 X7G_'0UB&Z!40 FRJlJ(>UhI !lXAa20$)0=~0M:XbX,\QS }A_б5Y9mYQ_H8>z 9s -b@J)F9 #ߥ; OB ٳ%= ~dso@+_6o8s}G"of֛_H C*J rH]O /qS (-3W Z +_ arc Bhg | !Pas//'/9/K/]/o/////////?#?5?G?Y?k?}???????? OO1OCOUOgOyOOOOOOOO __-_?_Q_c_u________oo)o;oMo_oqoooooooo%7I[m!3EWi{Qw iX Џ*<N`r_ x?v ߖUFD  h(^TYYB UF~?x<F BP(?P?X6 Bh9U66 6 6U 6 66W66}ȅHBU??I\BU@L&ɯd2?6-G(?\.Y(7. sU!}&/)J'( G 0yB`7PC,personal 0c0m0utR2,$01i*0o.2]n0tw<2k 0Weq(0i0m0Un*0,Pa0dH0i1e!u1 ^| (g UG& b `,?FLϲppw  pw owwwwpwpw pwpwp݆~(p5;~@?CW~XZwwpwDrag onut hepe.Rih-c]lckaUdcos UPo etQeAvUwreP 5aab~߿?)j࿚i?3@ ?DUG DF P# h @T(PYY#E#UF~K? P} 7S.BB"D:J :^:rY: ,7 A 7377377&cy7&'}7f:&7N&7fb&7v&7&u`} ? U"6U6JJ^U^rrUUUU&&&&/8(:&N&N&b&b&v&v&j&&"u#L)VB%\L\GUBEqD?3? 4bP{[`:# R&VUC"'JQIL "/L*Q$" "8W!' @>>ÿ@qZPU?TVpPr&bcwRu}P`-*Q"&bu &@WS$Aa(Kb""ch@D )9u`u `"[bz$u`؂CaAG1`Vis_PRXY%cPm!#52madPa%VR%TKa/ZB0gBqq32REqEqAAB5r6$ 7”ECϔ|A8ܔspꓮQQ;< !LAE7ĂFD a,!J^hdh")xA*HKa./Hd$Gpha1]` Mpnupa2ptqrFqa` E@u"pp 6peptuEQ`ѯ]8QP}rd NmP3P9KZ! PprtX˿]r`iDr|q uϯ\/AA$pqc`#E^-% Sripq߫uT+=,LSpcxwߠ E߱Bl pipgE}Ph RSpoO"|ґyz(ޥA` qp鴜D`NwrkhaGpB`e#AI(pqdpss@|'߸ֿSbpsQx/%mF#qtvq Iq{uQf{ r (pqq@(3/+URqm6put%  SGt5QVeqσąV///(C(pA` WlګO`/iMpmr1//\Qp?!?OUpqsIyP?b?XiBplrgrTSp#"2NO)OS4paB2Cpo! 7iO{O` GT01@ `p#_OOGO _u% `_#kapTq#(?=MpCv/Qv__SH"d\"ZqCB??Pq|&Љ و3UF~?vZS\u`?d?7ruAy}5-.B"TqqՈASw>>{ÿc6Hr92u?`s`Bxrq 07K`R oF@IAeB$H|8Ex~8"7=rb ӀӀƞFSrӋb{ra@RKᙑsz}_MSaabn.{&$,"-Ѵh!PPa@iy60`ZUuu`a p*LQ. p`RÑ.1=f@ߒbÑ;J&;i1J5MS_dK&#„BCF3sNSYFw";(LլDQT]]9 M !AUF8?F:?Fm](?FF]?P6 e$A  JuM` ??Su.Jt+  Rqte_|tH1qzrMZqaMKI7>JU5 J["a?2& ?AbAw@X"b`$B2zGz?")@MJ&a#b#z\c a(b`)m&g,X"m* 21215 `?CopyrigTt (wc)i020u09i0Ma0c_0o%sY0fg2X1r[01uag0iY0n.i0_ Al0 _8Us2e0e_0v0 d0+1 (/4->3K >72&i@ie#59 XG'0UB2&M$K0 UFiJlJA h7P1X!ZCFt,ܿ?F9$*m ;6dm S)RJ:^T<@T R}S $S @gQEU23\T xY _5ˀ aYQU#5UFY&yw\QY!ħ[68^_zd {?FC:\4 `]3?X"rA kf#sS*<7$dFl~r7lb+d [!}UHLuD  #;3h0TlaaJ )UF~?һ P m.N >uA` W?I?u#8b>t5 }ty>b"[>U?& ?bAK@8">&>*m!m!`?CopyrigXt(c)2d09M c oKs f"!r !a i n. Al (s"e e v0d f!3(aj$B-y# y'&t2U2?@9 H>6A(A(>/J8M*ifPE p6XB7^G'03&J!-$e F/Jl>!$Ul8AAa  @!#E18!1Zb?F _4 .40nCA.xMѧnA[(:DTPFmq4 _WtY\ FهX3^&o`_sZRXECU֚|I?F'@[\b Eq\ ypQUdKq\;rdXXQϓCگ K/ڇTXK^F!}} p$?P8-DT! .3!^_p_U.~4}]~q.x~TA#3 AM JUF#H^?FՃv?Fꖞ@?FĶ?P6 n>JuM{` ?5uJt  ߀It=WKꑔIR ׼IlV #D>5 J[? & ?AbA@0"b8$2zGz?@M»Je&9#fb#z; 9(bE8)E&?,0"E*UB 1 15 g`?CopyrigTt (c)A020M09A0M90c.70os10f?201r30l1a?0i10n.A0 Al0 78s2e_0ej70v0d0@1 4Y-3 7 &iieE9 X7'0UvBŴ &!%$0 D-FAJlJAh7(1h0!T#VcS( bHD: # h#4>T#3 AMT JUFȹޘ?FOd?FoT#?FqJĻ?P6  >JuM` ?juJt  uD It=W36EIRj׌IlB?@-#>5 J[? & ?AbA@0"b8$B2zGz?@M»Je&9#fb#z; 9(bE8)E&?,0"E* 1 15 g`?CopyrigTt (c)A0W20M09A0M90]c70os10f?2R01r30l1a?0i10n.A0 AUl0 78s2e_0e70v0d01 4-3 7 &iie%9 X7}'0UvBi &!%$0 -FAJlJAh7(10!`3'e?FfAs!h, ?nN#<bydb   VJaRTT $jTRUE:]gy_eZ-ֱQ%UOfkZoaU526_HU0"rA 5>#cHD: # h4>T]]9 M JUFS6h+?Fڨ?FC7LԠ?F_oo߯w?P6  >JuM` ?uJt  k6It=WHףrIR'c_hIlX_#>5 ՆJ[;? & ?AbA@N0"b8$B2zG7z?@MJe&9#fb#z; 9(b8)E&?,0"RE* 1 15 g`?CopyrigTt (c)A020M09A0M90c70os10f?201r30l1a?0i10n.A0 Al0 78s2e_0e70v0d01 4e-3 7 &iie%9 X7'0UvB &!%$0 -FAJlJ]Uhz(10!Z'F#?F9=7Z#<E $ ĀVJt!#cGP!2, TYŏ.ZfQoS 5!;KƱQE:F1aQ, jRYSuRYFU>X%e1lER[6C/I .g\wPU5egㅪlaYpO0X2:_;0"rA >#sHD: # h#4>T#3 AMT JUFJV?FO7x?F g8t8?FxMƻ?P6  >JuM` ?juJt  .![It=WBaIIR#!xIl?7O##>5 J[? & ?AbA@0"b8$B2zGz?@M»Je&9#fb#z; 9(bE8)E&?,0"E* 1 15 g`?CopyrigTt (c)A0W20M09A0M90]c70os10f?2R01r30l1a?0i10n.A0 AUl0 78s2e_0e70v0d01 4-3 7 &iie%9 X7}'0UvBi &!%$0 -FAJlJAh7(10!ZF/, E <{O!#4  OR|VAJaN[]PPReSRUE:]?Cm{ɾ__Z"u)Q%U=V9P]Y\QĖU526_HU0"rA >#cUHLuD" # ;3h( TEJ UF~K? P .N  >uA` ^?A u#0p,bKp>t- bFtq>U2zGz?@9 >?A@(b),+,,&" !$i>"&p ]""b)+'"Y,"VY/N*1*1`?CopyrigXwt dc)a0W20m09a0MY0]cW0osQ0f_2RP1rS01a_0iQ0n.a0 AUl0 W8s2e0eW0v0d0:i f x!X]'0U5BX&50 Jl>$Ul8AAC U  B # A!֚|I?F'@ & b E9& y'9m(^L0T,_>_ *XX3U{?Fr\HzdKB\;rdXXXAk}pCڇ K//XTTZbP _?F92}&~`m`[ ]3PY0nC]MnBU,Nvlza&p (91n-pQYWAliPFmqٸ EY\ FXX܏&o`DZ?RXXE_&__J_|D#AD1guOKOOjkFs"DI@[sDz.q[]J,bu?kZ̯_ofd $?QRp؞lq o|T]]9 M !AUFM4_?FX({'?F8as?F'|~?P6 m$ > JuM` ??Su.Jt+  ,ئqte_GqzLxg q}KRI7>JU5 J["a?2& ?AbAw@X"b`$B2zGz?")@MJ&a#b#z\c a(b`)m&g,X"m* 21215 `?CopyrigTt (wc)i020u09i0Ma0c_0o%sY0fg2X1r[01uag0iY0n.i0_ Al0 _8Us2e0e_0v0 d0+1 (/4->3K >72&i@ie#59 XG'0UB2&M$K0 UFiJlJ]UhzP1X!ZCFFMaT J*m $d2rv 7|QL{RJ:bTVͶ$S ?!͟TT QE!K:?Fc\?7YMQ#5UC6ǎȥy\aY = GZgh5Uc?hV[\]PfaiQb_tSF菐-?FҮYT˳ ]3?X"]rA f#asS*ԍ֯JQed~V(L qAb+d ׻HD: # h4>T]]9 M JUFd $?Fs"D?tP6 >JuM` ?uJt  RpIt=WcsW.IRI%l#>Q5 J2zGzK?=@MJA@neb1#z' %#b?(1 ?(Z+Z&B[ "?& &?AE$ \/}% 1 15 g`?CopyrigTt (c)A0W20M09A0M90]c70os10f?2R01r30l1a?0i10n.A0 AUl0 78s2e_0e70v0d01 4-3 7&iie%9 X7|'0UvBiů&!$0 -FAJlJA h7(1E!bTc1 $<K $  OR)vVJ: KvŦM\lQeS -^PűQEaZ9]`Y\QU%U=V,M_Zڕ]X526_HUE"rA D#cHD: # h#4>T#3 AMT JUFEM?FZI{?F'ϸBl?Fĺ?P6 >mJuM` ?uJt  ^J˛"It=W0"شIR}*IlFw#>5 J2zGz?)@MJ߀A@eb1#z' %#b?(bJM)Z+Z&B[?& ?$AE$\/}% 1 15 g`?CopyrigTt (c)A020M09A0M90c70oKs10f?201r30l1a?0i10n.A0 Al0 78s2e_0e70v0d01 P4-3 7&iie%9 X7/'0UvBů&-!$0 -FAJlJA h7(1ZTlT]]9 M JUF5 J2zGz?S@MJA@eb1#z' %#b?(bJM)Z+Z&B[?& ?$AE$\/}% 1 15 g`?CopyrigTt (c)A020M09A0M90c70oKs10f?201r30l1a?0i10n.A0 Al0 78s2e_0e70v0d01 P4-3 7&iie%9 X7/'0UvBů&-!$0 -FAJlJA h7(1ZE!ZF" Z <] Gʼ#OR  ORS|VJ:-US DZ/`#dTK QEUHǻM\Q`YO$iQ%UY $?)/Ep?Fa_V0w?P} 8f u` ?0)u:t7  Id̝txhˑgFݑZo`ҤCU!!`?CopyrigPt (c)J 2\09J MB c@ oKs: fH"9!r< u!aH i: n.J Al @(s"eh e@ v d  #S$#B- #l# '?R;= # &#24 6 6 2$Z# #0U2 3@&B ^#5 6X'7'R4 6!4] 6:e6^P#[EBM #HA%DGH7D6D # 4# *  !"#$%&'()*+,-./012*3>5h]T]]M YUFhop?F>+?FoPp?Fsqє?PM]uM`^?u#t  EF't!"UY%'~P*%4'?"U}2N贁Nk?@dM#&A b(+++&Y}/R/`$B8?V#bo4z @/'/5N }11/"`?Copyrig t(Uc 09uM0c0os0If21r0*Aa0i0n AUlE@ 8sIBe@e0v[@d{"1U%4e,3 7D6%01wEPFXGG' 0U4RD6b1I FJlY)U4FX14#A(!L15?Fd4Mf0:O$}#?)߰=^!8U}?Fz d@ lxrNj$!la.PX7h3URWa?3am:"~vn*&^x%>awE:}JC4` k|| a(d08gh_oo)o{T#______Hv?F d1a|3`[]w|c?7oHv.?FMFa|S3w|\£|oHvJKmc~ea|{ oHv(sna|;6n5coMA}#5G~QUHv>Η4?F@va|+|w|JCMHv,f V3Sa|p )zyBr7椏Hv]DWrGsɾa|k eą﫚l2 +7a|QWgQ-<̾э_q}Ra&8JHvˁ?F~廽a|/ͼhzyoχڡHvkT?FS;a|3zy? HvJbS6̨6a|ω9?OHvS1Ɇb._ja|^~_\@{}R -?Qc&Wa#UwvnnE>wm|<jBmm*'Waɱ`nk}.V̲o~'݌ssK/8}߯mEKx6iBJP1 UB}K^~o>P/6i@A}///* ! %/7/I/[/m//&A2mo~Kf΍4D2=y|Bm;m`(=w}zNmWUȣ(K\S}|t|d\L|L+YlQn0T%%$ċ^QUHQOOOO& AOSOeOwOOO&놮 G3!(E> H}OĹ[DnXXt' U!2h`Xn b_ Pg3~qP3fbAu~R)umooo& ]oooooo|o&g-N+_!eW?1 il}1HiDYH'rg|xMMXjQ%f|ɒ= %;vro]ݓinÞ]S( & yӏ&f@e!`¥ G8M Z³绾hv0W5gS1a8|QXSbW~7mȧՠ~=߬lB|y:߾nuTѺ'9& ˯ݯﯾ+A 7>M%- q?j j dEuk uQ9)?FAަIif[WvpQ鐲9 =~OIѮ]xz~êJ 1CU& E`=U4 Xw׵\ o5|݄ erKv7(\2Έf!9"pϼRѡ{ R}\ $”ގ#q[\ƧGJ Q\BĪ&iZf}#Xr_N|1i"@w\y =~X@/n????Te/?#?5?G?Y?3㒾?Fu}A{ZϹ?$=l?Fߣ؝@鴡s! U/q2 y#iwjQ rt}\\ǟ~nxBt@fP@Q*(g˼ߙ`]N1YY͓___Z_-_?_Q_c_u_՜t?F#|IΝ} C)u18h6o]?F\ v!ۊ5,'pm~ "߹3*_CRڟ8Yq_S˼_}5#7o7I[m ռwc?F䱹!? Uܟ*d4IM}l+m/r?F!zxy4GXM g1rH-odip'ʽh:PF}LHM\БqWČMQyS:UŸԟSewZ=x?FrJS=W+}| 8*_ޛU z0~+WzBJ/pVD W| 2'ؚ5F] 0+!31rߠ߲߯450i;h4MLTJMUs`I}$z8.=u2fC}1 O2V{uf`5MEp4}o7o9lM* g`S}+M?=;??1O'9K&)E_i) QiğuPmk6nɣ{j}Q}L]_h?>nj^$oVLɓ߱ mkVo@0Vp > vCu!9QAdmLG~ >Qo1/C/U/g/V/ /T7} 7վkEA*w}+% B`;$gK*"V2w@96קM-nvSiF!nLLgqNy4~ QUcqÍZS)^MO_OqOO-v??OO'O|9O F1'ߢM UPiPK>´}]"}9'uH0 A5]mI;;#ǧ66hx~r JQa~&qrMi~߭Uȴ߯E~߻ io{oooI_ oo1oCo|Uo$C$g;#,b2?.x}CwMLW1b[̵yx8ج`}eM}oJjRݣS62`M\|"XB}B+<5asݴf);M_|q7;ӱ,_|#Vx~ C.t 1_!; }i틺%~?3]9r?Fn?59?趒;>L]ߛ^_u^VQ$2Ս޻}ǸПƯد3EWi{7TS@f𿟜L)l\m߇WMw~쾟K,:֑Zь?FSƊl\?i@9 <>̩#Տߘ]l\ s?BZBDȕ݇k2PU}l\q3h^ ϞOasυϗϩS#ѣn>x8Bި9ڞMh #sUx=BT.< @?F %婢?2#.5>_9K 罰5L]!g6L}MJA?**=k}1TӦy?F'sO=/ge [A\{9M?F>Y=k X*e S(N $9-g[?ow-$.Tzv )\$?3߇Nx'fl\|z $M}~' /IpLֵ?F˦6k]zFq&>}w^YM|Ԅ?F<']?0#Jh#h /xIkHw1]ǥK]P|G>q2:|@_3Y 1I l\"_@=H?` @/?$?6?H?U //////1bJx?FL!E#ԇ}Se γybE?FcN} Ɗx3c }CRD80}>~^ϝ{ª\3s./M?F^Y⣝iϟʁi{s_4I[H?FJ8oxJJϑ+ϊUϰʉhFq$ >LT ]~~[4xYpŬ0Ex}LYTfxy."ooo#5yP?Fឍ~QTJZvW9 ?FڡGSۈrQ5|橁5ǤˑIGYh($Qw~5tjl@fxF# -?Qy?FOT ʼn@-1ş9Ыva*S?FR?fB V1?ݰف]沛tk{Τ/Yv{ʙޅ݉}1MZѰb$%7I[my -}\SU=cx|[)T*Oў}|d.KnޟR{nIxߒΫ}@B j-2YpM:H`FZT=e-B~yh,?/߯}%/ASew߉ߞyf}x0 Q>zsg=ڟUbw;IS ڀ>3LI~ goSy?~wib`W?YM7+mɼ7V9H3pvsSs/M/}~iV9@gM!&K]oZBdPeF]ʼY mV6| ig5 ח9]#.Sp_xuT&~?Ftm*ԍ`_rYHp O~![ިKmU|ױ.jriwo /F'gy|yrqFYH?߁h}FUϯz5ꞵ,WCˑB`Ԟ`H}sU2w~xoӄڭfDjt! J Hna)(J6w~nv؜p oo1oCo)______ֹ2JI`H~Ͱ f tɾ4EwߗMMpW[̵yط]?F3,棣SLLh ?mu`Xr}Q͘~?Dp);M_ *ֹci+!c) x+Ҳ{&3oٶ[i?FqTƊF,]H'^ <Əٶ H;z0=}F,XQʮ}7$zLٶzTǬGWF,4$ZgsRF{^ j4Z>~[T-G^8?Fz^.H ~L?%aasS Xޟno#ά:HTFF}6CF J'bO[;| ~^-!3EWiKYYx4?F -t[|L?r69BMj-1(?FN^4>Nsz9ET~.Q^=#94o4>ݠ_J? H~&U7`dU \(5Vm H FۄܱykyU} қh?nBN 2DV3ïկ IQY1Q( &&] 6P^JY8}Soj11Qً;]n;P^u񟷱g_ G!.H?Fh\<;Qq7B1b;Y|#Kk{[oY??cgCo;M_q4' IsЪN =͐\h \[Vmq:Dyxj?FYi&?Gbx)U͖֒z/^bA/ F5f;d#\EH{=Xj|8%5 1C Id匽 轫 (Z-q͜DD D58|UBn FxV5?F+A玕R\ȚE Fk$!2 +\HC.P?%ҧnP A @6D .THD: # h0>Th]]9 MP IAUFOѬ?FA^?FMOh?F9d*C?P68R?>$y> n JuM{` ?e ;  Su*Jt'  q3 Pmta{Z_ebN3.JvH?zhǿUv?vh e:Z}.b%{>JU2N?贁N[?)@M#J&A@wbI(bW)U+U+U&J π(<!lJ ?%P?AO"d+b Y)(u/%B #A1A15 `?Co}p rigTwt c)x0]2`09x0Mp0]cn0osh0fv2Rg1rj01av0ih0Wn.x0 l0U n8s2e0en05v0d0:1PY>4#-#,M3 M7#(p0b59 DFXGdG'/2WUA;F%!$a dF$&#l,&}>UhET ]]9 M qAUFr ?F l?F?Fg!ِ3u?P6 L}>   h,(JuM`l? Ek"k6HuZJtW  7OttG97i;y4v'ʚc>JU #<H!-A?^% ?AJb^ @bbbz #"b#$"b$J 2zGz?<@M7#J&(&$%" ;'"*@!EA#5 hy<=A#B115 `?CopyrigT}tk(c)kW20@9kM@]c@os0fBR1r05Aa@i0Wn.k ] lP@U HsTBe(@e@5vf@dH@/"148#-,?3 7#9 i b59 6XGG'0U?RF!y$0 F ZlJ]Uhz 1hy1!5(`A#FB1?F#)Q PV?d"cP΄ ťc(Mb(8"J! jo0o Cƣ8-$DqaE4d\b@d Kc;d .#8磵g.p~?FTHR?PV7 /!i q7ikWPV]DT#3 AMT 5AUFr ?F l?F?Fg!ِ3?P6 .6> JuM` ?IKu8Jt5  Otyt_G97iͅ;y4vʚIA>JU5 J#2zGz?@MJ/&A@bm#ztc a#b{(Wb)+&B#!!5 `?CopyrigTt (c) 0W2009 0M0]c0os f 2R!r 61a 0i n. 0 AUlQ0 8sU2e)0e0vg0dI0!$[#8"8"?6 ?A$/%-/# '6&iie59 &XG}'0UBi6!40I iF}JlJ(>UhiI !!AaR4 Rx J:j]ݿb~)_.S buBqVy3'?F(ËE?P,a,[ /A1YQYV4hA.xeAR +3l4xa@ R#Bsoogrb5%UGD  3 h0TdYYB UFW)eK?FI{Ä?F9n;*Y?FI_tnw?P} tf  f0 D Xu` ?00DDXXl)uvts  < %t"F0" %'ӿp %,'qdX¤U!!'"`?CopyrigPt (c) 2\09 M c oKs f"!r !a i n. Al0 (s 2e e v0d0s"#S$|#B-#l# '?Rw=#&|#466 2|$Z##0U23@{& ^5 FX`7Th]]9 M]AUF?FMHR  h$ JuM` ? 3YY.KuHJtE  0oI~$tq/袋[Dmk.IQ>JU2N贁NK[?<@M#J+&A@b](bk)i+i+i&Jy%#7 ?3#) ?Ac"i+h//%*B%#U1U15 `?Cop rigTt (c)02`090M0c0o%s|0f2{1r~01ua0i|0n.0_ Al0 8Us2e0e0v0 d0"N1YR4#--0" a7&%0bE9 XFX'xG'0UB&!$$a xFJlJ]Uhzs1Z!1(`%#FXxcӔ?F>D [TQ>Q) R "J:!l?FQXP\C\6m/m$BQYsZ_?FFt%7?F{hUuW{ ]3\+B MTT0TDiQ>8aQ4? /Ȟv/x i[? `RT*?D`@ S <oor5%QEUVV__ <#<>e__f{,Vxh`d3rAg 4 ccgd$ymAv AGuPHD: H h4>T]]9 MJUFoc?F24ͷ?F7?FXPq?P6 >JuM` ^?MuJt  ~27It=W]tEIURF]IlGEw|>2N/Nk?<@MJA@gbb ) +R + &J[_ffy) ?/3)"B5 !!5 g`?CopyrigTt (c)6020B0960M.0c,0oKs&0f42%1r(0a1a40i&0n.60 Al|0 ,8s2eT0e,0v0dt0!P$-/ 3  7|&&iie%9 &X7"G'0UkB|&!$%0 "F6JlJAUhh71!!T $#< $M DRkVJ:+;5jB\aQZS kBձQ3aZ9]UYQQU%U2VZ?9=_30B_ZK&DiaX52+_=U"rA $cHD: H h4>T]]9 MJUFoc?F24ͷ?F7?FXPq?P6 >JuM` ^?MuJt  ~27It=W]tEIURF]IlGEw|>5 J2zGzKt?<@MJA@gb%(b3)1+1+T1&B!!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al 0 (s2e e v 0d0!$)[&`h4|9 ?A(:/[)-#K '6i@ie<59 X7"G'0UkB6!40 "F6JlJ(>Uh"I !e1+!AaBc1 $<@ J:?+;5j:\YQ,RS kBձaQ3"U9]MYIQHD: H h4>T]]9 MJUFoc?FXP?F7?F+;5jq?P6 >JuM` ^?MuJt  ~27It=WE]tIURF]Il\tE#>5 J2z_Gzt?<@MJA@gb]%(b3)1+1+1&B !!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al 0 (Us2e e v 0 d0!H$[&h4|9 C?A:/[)Y-# '6iie<59 X7"G'0UkB6!40 "F6Jl*$>Uh @3 !Ie1+!a1 $<@ JU99YUQIYEQHD: H h4>T]]9 MJUFc?F24ͷ?F7?FXP㟸?Pn6 >JuM` ?uJt  ~o2It=W]tEZIRF^IlEw|>5 J2zGzt?)@MJA@gb%(1+b3)O+O&*B!!5 g`?CopyrigTt (c) 2U0 9 M c os f"!rԶ !a i n}. Al 0U (s2e e 5v 0d0! $![h4?6 ?A:/[)e-# ' 6iie<5(9 X7"G_'0UkB6Z!40 "FR6JlJ(>Uh"I !4e1+!ahBO $<1 J:#]Z9=_30:_RS &DQaQ3"U9=YYQMYIQUHLuD" # ;3h0TlaaBJ >UFE3kh?F3?F_T?FU Whu?P vN>uA` ?)u#>t  5@{It=W#H9IRj;Il廤#2zGzt?@9 >A@gbb )bJ )'+'&>yga?|& ? " +'+,5'UN!!g`?Cop] rigXt (c),02d09,0M$0c."0os0f*21r0W1a*0i0n.,0 Alr0 "8sv2eJ0ej"0v0dj0!Ea$-X3 7|&$0fE 6X7G/'0UaB|&%!$e F,J l>aUl~1#1!jQ?FN @ ?[ql#<3ٽECUS(: 9[#UT eW3wU( R@SRQY?I?dXE ^ߑC8_J_ dX _2_=oV_D#OOOOO _vVFmR^\j:=pd_ofI Њ?F0떜lZ$&Оl ?:Ynx3~ ,az kXao41}gK}YWq`m0BTf|UH luD( " #  #A h]0T]]JMUF:%?F6emP?Fiz9vJ?P N&&M %4&U8HCM 9 / 9 / 9 /9/9CWW W WuM` ?% &W ">"R'&j'&~'&'&'&;&SuQ)t  bD%t!2 F_%7J">67šʿ~ U2zGzK?@Mk3{6A@n4b3z0312b808;6Tu3 A A2`?Copyrigxt c)W@209W@MO@c.M@osG@fUBFArI@AaU@iG@n.W@ AlTW@NGsBeu@eM@5v@d@c2AQ}Dl3R[u3-A B?V?4 ?E-Xu3,C ,GVl5O@(E ԕVXgW|2'0UR)V!-TA VZMU@QjQ1i81g\a?F;|~4P~G #W.?a4Hd3.䛵?Fp _`9},jX?F>@ ]3?iFS_lUӧ:ޜ !hރe#ģ[ Q " i!$a4BH:}k_wg0wEuf.29]~4pO˯ô>P|o!S#_`1y;6}wfl{o@opoQQ$Ug@EiE{iooeD?FD ~a]#Q`7\X!?Fw^?FrY?Fo2?P'DT! /3Q9 մ7'||U|6ys r #, Jދ]? $oe?Ň41:/ B"tŇrf5EU>PbtItnɟ ^HY*Gą 2DhaspA]鯱ï ֯z %7I[mOb罞О )E@ͿXP-?;a+ >L &WEW thqϧ"mϏ] 0BTfߪߘW4. Vwp,> i\_XT&8JZ;?FT}:/?$qxE~yE?FA5g`u/V/ 123\?FrMT/?d3~~?FEa,蕱/?CZ@6 De //*/]0T]]JUFp>)/Ep?F2왵??F ɏD,"7?P uM` ?j`fpu#zAtw tnٳ \b "OMG2N贿Nk?@dM#%&A2 bW(be)Jr+r+r&*!ܿ!`?Copy_rigt_(c)2W09M c os f"!r !1a i n}. Al<0U (s@2e0e 5vR0d40 "!3]$#Ry"&#4?6 Q?]"W*s*;'-X# '6% h5 RFX$GrG'0UBű6!4 rFJlU!h#'A(:FYSocg0vr p_y $(4!Z[7?FL30pS a۫YQ?\I`X3U5 Tީ?FʨQYC\f"qoY5U%?Fq$B\F\bJFXz___\Q# __1_C_U_g_VNx?FM=\>ۆ\34=(_V5T?F}\#R)\b@QoVr]?Fc߰#Z}L \$6ԡyqm0:?F u\.Iſ\)?oUU/ASewV?FN,]\\wzV]~?F@(r\F-~Ȩ\(p|ymIxg?F[j| 7+*y\= qmh?Y?FYgR\Aa\ulӏɟ۟UY=Oas!KC4?F}7PM\8HJ|\ HNVh?Fwi@­\A>xAV%+j7ٽ}#E~\I&)5eF=s '7["mSpуdĿֿRtYk}Ȏ~+]~nn)`]͟Cc֦ъ]#.?FPZk+N:2\y_@W߁2ʞ54}d\e҉qm}&?FUfK­thV\9\ލQak}ߏߡ߳~VZwۿ4&b-} ߜ.c|~kd$~Y""A6\lJ(i0(sVTloV?F} \ǀ_?$qmR'%"2?D= bY\r6yލ 2 QYI0`鎍1/ 48Ve[bV$?Fi뭩1?0pS!K;?F"~[:{Q~b?S)qm#|W-?FG~b .f-?R[Ϡ ލ /*/fɾw2Op>)/Ep?Fd(_a9~ {F!_ ~`2DMz2m^Gm}>IލPobotoj ___o o2o̺m@UP1-JO}^^fon l?FSƊ1̓ ;]\OogMNE0A`^?~uݟ$E~ɰ~%^~ =7}ÀMuҳ1?4tx鑡?3:\ `r $6Hj(i?F ڷ1^̄ZϭSԢ%^XP$֧zw]<<E?F6 "Rf2 =s 7Pq=[~1̇1%]?F | .@Rdc)Jl pe},X|4]-Huvzxډj?FwJ%H]ǕDG7t{GtY~Ǔ.PH#T~]?9+T =~B3~-L /=m$+)Hϰʛ&8J\n|JoWx#0z ),?5Uz!xz?Vsf&; `ߌ 猖gߺ8˺A}2!¹diqBZ$@S =u V N,.zu~6J1&ߴEWi{f&~c?Fۿ>4U, }J\1?Fξ9rзX=YR Qm#07q X; I6 =4T37 xU,=A S)d= Yqas{%?Fyx{wmQ7Z6n4Nl]?F7c2Gsj3j뙚;(N8 i<=ۇ4W-ɫ-9)]-?F`x}SoA뙳x@///?"?Rz//////xmYԒ?F0ufx)r蜅وWjFQ1?GUF MDe9gsaEo2I^~T@s@܂]Y/|sMv۶ IY?}lD?B9kP/ߊ.x/ONϋaz`3O__)_;_uOOOOOOxHOB?Fi$0%oAlOo-M_k"W?F[y\x_ ]g蜢, )+b77bћ9}DxZ]Qey=zFVjx/I bvdRo$6HZooooo  p? {,Բ5~}j8>6zB?FӺ ~|g=)s-?F8H~ݖ}T M?lfa=Kh݁5};p/"_f/tl 8at""4FXj| ߬XNT'(/E9?&I/ i {.PvwHkOQ qӟĶO@ߒ#'ߛ[w̜zfdm ___ooŅp______DAL4?FGww̆Ia{VU$oEFem(5tc(yÂ;UKG whsi}2Ca6o? ?F20?æmEyo6\iq,"4ߤsZ4?F"EwH | QUY% (?FZ <#a;:wPID8 n7 ~f* /e'Mǃac5'?Fԫ|>ka?I}|b:QH,>P גT_HZ> )6s ;gLjOhLAݨF(֯ #c ^OB He߲ ~d] @+yЫ_8sGhoT~ Pȣ  |H? >x} 1X h̿ "\8 L ѻHqU _U exs B d&/#/5/G/Y/k/}//////// ??1?C?U?g?y???????? OO-O?OQOcOuOOOOOOOO__)_;_M___q________oo%o7oIo[omooooooooo!3EWi{ /ASew(-C ԁ/ bҏ+=Oas͟ߟ'9K?y Xs  т"Y %пI! 'B$ h)& H+u) 8%, o"/ ÄYkXu.8 HUFDfP h TPYYhUFL&d2?F`0 ?Fx<F BP(c?P? w<lh -^&88Kh;_ 8srY3 8 83 8 838838 &8&381&8E&38Y&8m&38&d*8&38&8&8&]; 63!8!6#8563$8I6&8]63'8q6(86d2)86*8f6+86,8f6-86.8fF/8F08f/F18CF28fWF38kF48fF58F6U789;8fF<8F=8&F>8 V;fV@83VB8fGVC8[VE8boV;VG8ȗVRH8VI8VJ8VK8VL8fM8fN8-fO`8Af;UfQ8fifR8}fS8ffT8fUUVWXZ8ff[8f\8f v]8v^8f1v_8Eva8fYvb8mvd8fve8vf8Hvrg8vQ;vi8vj8k8l8+m8?n8So8gp8{q8r8s8tuvwy8z8{8|8/}8C~8W8k8Ѕ q  0A`Ap4icbptpo\ evq !aE  -QuYda|z !T3Q_`|]1n@` X?W&&.11 T_?xpxx p  pxp%{wxw?w _w xwOw  p JwwxwpxbL&d2鿃`0 +?? {ҿ ȿUGDF # hz8T YY# B{UFL&d2?F`0 ?P} [u`ho?u#   @B<66D^nY^-] [ [ [ [[[&7^"&[6&[J&[^&]r&[&^&[&d^&[&[&[&[6[&6[:6[N6h^b6![v6"[6#[6$[6%[6&[6'[6([F)[F*[*F+[>F,[RF-[fF.[zF/[F0[F1[F2[F3[F4[F5[V6[V7[.V]_BV9[VV:[jV;[~V<[V=[V>[V?[V@[VA[VB[ fC[f^2fEL[FfF[Zf^nfH[fI[fJ[fK[fL[fM[fN[fO[v^"vQ[6vR[JvS[^vT[rvU[vV[vW[vX[vY[vZ[v[[v\[][&^[:_[N`[ba[vb[c[d[e[Ɔf[چg[Q^i[j[*k[>l[Rm[fn[zo[p[q[r[ʖs[ޖt[u[v[w[.x[By[Vz[j{[~|[}[~[[Φ[[[ ު-2<ZZnn&&"&"&6&6&J&J&^&^&r& r&&&<&U&&&&U&&&&U&&66U&6&6:6:6UN6N6b6b6Uv6v666U6666U6666U66FFUFF*F*FU>F>FRFRFUfFfFzFzFUFFFFUFFFFUFFFFUVVVVU.V.VBVBVUVVVVjVjVU~V~VVVUVVVVUVVVVUVV f fUff2f2fUFfFfZfZfUnfnfffUffffUffffUffffUvv"v"vU6v6vJvJvU^v^vrvrvUvvvvUvvvvUvvvvUvvU&&::UNNbbUvvUUƆƆچچUU**U>>RRUffzzUUʖʖUޖޖUU..BBUVVjjU~~UUΦΦU  $BWW.(%6QDUQ?S>S3Q?RdV"qwX[?( 8RLS`S~SSSSıSرS챔SBRS(Sv|Hv|Rv|\v|fv @ 4񸣢vvy|v|v|v|v|v|v|v-|A|U|U$$}|.|8|V|`|j |t|~1|E|Y|mx؆؆|| "|!"|5"| ]"|q"|("|<"|F" |P"ZZ"|d"|n92|M2|a2|u2|2|2|2|Ȗ2|Җ21@1 AQB|eB|"yB|,B|6B|@B`|TB|^ދQUVVVVUVVVVV SRVfff&f0f :fDfNfSRbfUlfvfffUffffUffffUfffvU vv v*vU4v>vHvRvU\vfvpvzvUvvvvUvvvvUvvvvUvU$.8BULV`jUt~UUĆΆ؆U U(2U<FPZUdnxUUȖҖUܖU"U,6@JUT^h "UȦUܦb&l&U&,@UTh|U&&&&U&̶U046UDXR6lUU66U*>URfz6UFFF$FUUjF.UBFVjU~UFFU(1H1R1\1f1p1z1Ä1Î1Ø1â1ì1ö11111111AAA$A.A8ABALAð÷jAtA~AÈAÒAÜAæAðAúAAAAAAAQ QQQ(Q2QBU=Us/?Se?SU?SѲ?SđPÿ@qP?@R?TV?ܲu`W@jqRu`a@r URfchqb?SuLiup`u`@բѠ+u`ѓb&Y faH1crgg`Ÿ /"(úfiic` dFpaMvaf&afl,s  a?5 %` pq q wu`Vis_PR Y. hm!#5N 93N"+b`?Copyr gt  c)M20Y9MM cCo"o Lÿ҆u` ?u~pQ(-DT! @ p@U au@`ba@? ìҀ@^0FH%@Q.FӀOռB\i@?yQ/FQd@ߐRrf i`R$pnst*oM Pe]ف p ջ bmCuE``-u5@`^0aՁ^0d qn pN<udRE %EЎ%$-+h 0JTlaaM>UFN&d2?F`0 ?F|G?Fm]ri?P U>tA`  M?t q۠ FZ>u hu!s7YaMJMRKm? ?M>bA@bKbmbz #e8 ۢ =# b#2O[(ta k-3thk-![(}/tГ/w t/ t/w t/ t?w t&?t;: Ne F;,";&a#68;R;W *P>B m42A2A`?CopyrigXt _(c)i@2dW09i@Ma@c_@osY@fgBXAr[@Aag@iY@nU.i@ 0l@ _HUsBe@e_@v@-d@\2lH#"p# @K4JUlQQQQPA!@@2Ruݯ?F#& hnGHge) ]NUq Z?FG ߄\.r\oMGhEUKar?F3ojp0m]-Eά\?T\iEUe^?F`1<<0mN[ʰ~\on\i^*S__` 5C3\i__B_TFTA'VW@qqx_[QQ JR! J!4J78 h !a!hE!lU]T T!"#$%T__o{?FlӖ|н1|Ϥo} rhQя Me]otv۲ǀFRH0ύ|dr٣|WӍ[Smp"pʍ|q5JK tvp?FZEoZɍ| 9EPڣ||lbtvX9|"@G!/ ւ?F9|H&3|?<=$_4F+\>30m_d 5\i`U0m~ !Qx$(gN/ ?P0wf>nZ?l? r?;{|{*K1?Fݿ K 1wz_ 9*S?FAcM@=orOyg?_P|* /$?F#K i >捵JLJ=|*zJ@w/d;ЍuI@ͺhˏ4pHD: )&# T=h0>Th]]9 Q BUFܷqP?Fԁ43a?F7??FtYOȻ?P Bt`  XM6 ?t׻4n%O__Bu aquo7>KJASTfw?X ?Az@bbb! ( 6f )  '+ ߈?I-Z#g!z A Qu +%/,8)E!'+ k(?r+o?(a4 8) E"#{ #BstG8tU: Ba L>2?!88;D;68?5;D; tj>8LALA1 `?CopyrigTt (c)@2`09@M{@c.y@oss@fBrAru@Aa@is@n.@ 0l@ yHsBe@ey@vZ@d@v2h<M#l#@ M@?4>UhjA6Q>1| L`Fkdj]J}vMjR]M 6)fMB@Wa\ k_稇avCe=ESeR9Y? l`gESeFeabkod iSQޗh~??5@+ c;Ꞗi"2_U3rA/04sCHD: )&# T=h0>Th]]9 Q BUFdHKJAUVJp?X ?Awz@bbb! ( "f Փ'+ ' qq?z A  +/.&68))E! '+ go?W-sU!n*a4 8) "u#{ u#D!st?8tM: Bav L62?!08;<;60?5;<; tb>{8DADA1 `?CopyrigTt (c){@2`09{@Ms@c.q@osk@fyBjArm@Aay@ik@n.{@ 0l@ qHsBe@eq@vZ@d@n2h<M#l#@ M@?]UhzbA.QR61b L` KdjMaQ  6fM{"@Ftnq\[3K_,7e5EGe<\ֽE?Nf[ NA_qd7eEGec6i\m j'f܋h2_UB3rA'04msCHD: )&# T=h0>Th]]9 Q BUF)8/?FFT?F!/$?Fᱝ̻?P Bt`  vNC?t|¢ϛT H0Bu auo7 >JTA0f88v?WYALv@z bbb!'n 6 % )! ! ,+ <?' o?z #a47% =) J""#K#{% (+ ( tQ#BXYщ?A> ?A "+(/+/=/O/a!y:5rl/@ sU10( /???? "?0<2?M{!b 1/Ĝ/*%,"t:"t>TQQ'@`?CopyrigTt (c)FP2`09FPM>PcTh]]9 PB IAUFJ1>?F}?FF(e?F=PFo?P t`  0!?tj$h5q+ tЖZu Mauo8 >    PEJƱZVJp!?&. ?Az@b_bbe!Y(݉e 6 ,p )l l w+ g?Z-#U!׷!ze Ae [+j/|//-g#+ s"?(a490 9_b!#{t0#tN8tϻ: Ua BL2$18KE;F?VEK]; t>B 4AA0`?CopyrigTt _(c)@2`W09@M@c@os@fBAr@Qa@i@nU.@ @l/P HUs3RePe@vEP-d'P2h<M#bl# M/P?]Uhz@AQ1 L@3FO#۲?F?o2zP dj|aW{'8cUF:&x0?F?F?F_'qoO 0Yigull4aj?N~f0c~o+ fEee?F<'?F@]?FwF%ޕol}dQ3ll BXl귺m|?bBhaU~e`ba`?FV ?F?Th ofl?PI3qoikeðl8` |GT?heFnF$OĒ?F o* leowk~y{f~tGg˂a ,8b1;MHD: )&# ;h0>Th]]9 PB IAUFGF?FEs?F+\(?F~@ t?P t`  9`Ik?t{ͻIu -ZrqKZu Mauo8 >    PEJƱZVJp!?&. ?Az@b_bbe!Y(݉e 6 ,p )l l w+ g?Z-#U!׷!ze Ae [+j/|//-g#+ s"?(a40 9 .*2!#{0:#t8'tϻ:Ua L2$18K;F?VEK.; t>B4AA0`?CopyrigTt (c)@2`09@M@c@oKs@fBAr@Qa@i@n.@ @l/P Hs3RePe@vEPd'P2hb<M#l1# M/P?]UhzAQ1 L@3FMbX9?F4ҫP dj}ah8cUF[~K?F|D?F+Xc?F&PRcGoO 0YiM+FllZaj}0%|f0c̭ fEe.V?Fȿ̅)`oSt?F?(X:olGGTll .Xl{e{|rww@haUe:&x0?F%Clw?FAz?F 1vņflux1qoiFA(]`t߼ |~Th]]9 PB IAUFGF?F\Dֳ?F+\(?F~@ t?P t`  9`Ik?t%h2tMu -ZrqKZu Mauo8 >    PEJƱZVJp!?&. ?Az@b_bbe!Y(݉e 6 ,p )l l w+ g?Z-#U!׷!ze Ae [+j/|//-g#+ s"?(a40 9 .*2!#{0:#t8'tϻ:Ua L2$18K;F?VEK.; t>B4AA0`?CopyrigTt (c)@2`09@M@c@oKs@fBAr@Qa@i@n.@ @l/P Hs3RePe@vEPd'P2hb<M#l1# M/P?]UhzAQ1 L@3FMbX9?F4ҫP dj}ah8cUF[~K?F|D?F+Xc?F&PRcGoO 0YiM+FllZaj}0%|f0c̭ fEe.V?Fȿ̅)`oSt?F?(X:olGGTll .Xl{e{|rww@haUe:&x0?F%Clw?FAz?F 1vņflux1qoiFA(]`t߼ |~Th]]9 PB IAUFXIn?FLw¾?F+\(?F-ku?P t`  -k?tZ߅BTu -CmoLZu Mauo8 >    PEJڍU[\/(?&. ?AbbY!z@Ae bbCt c(a4b  b(h#{ h#΍t(tI*#0f88vb.#i L5<1v(&$7/(D%"6/5;0( t.*B#11`?CopyrigTt] c).@2`W09.@M&@c$@os@f,BAr @YAa,@i@n}..@ Alt@U $HsxBeL@e$@v@dl@"h<M#l# Mt@?]UhzAA!J@#FMbX9?F$x0OԛP dj}תa4(SUF[~K?FE?FSc?F?#oO 0YM+Fӱ\*aj,.}a$?# XEU"n?F +?FbEd?F v<olTBJ\LMYBltavXlp@XEUv/?Fpvw;pg?F $hއvol/AD0qYJg ַBl)$ԞXlD?B:XU^F.fّPKk^Rl_[Ei1GAf`dSOq+a,(bvHD: )&# ;h0>Th]]9 PB IAUFXIn?F(?F+\(?F-ku?P t`  -k?t7/;3bu -+CmoLZu Mauo8>    PEJڍU[\/(?&. ?AbbY!z@Ae bbCt c(a4b  # b(h#{ h#΍t(tI*#0f88vb.#i L1<1v(&$7/(D%"6/5;0( t.*B#11`?CopyrigTt] c).@2`W09.@M&@c$@os@f,BAr @YAa,@i@n}..@ Alt@U $HsxBeL@e$@v@dl@"h<M#l# Mt@?]UhzAA!J@#FMbX9?F%x0OԛP dj}תa4(SUF[~K?FE?Fdc?F,?#oO 0YM+Fӱ\*aj ,.}a?# XEU"n?F{ +?F(%/Nd?F<olTBJ\MYBlU{XlpDc@XEUv/vw;pg?F^$hއ?Fvol/AD0qYf ַBls$ԞXl+?B:XU^F.fّPyKk^Rl_[Ei1GAf`dSOq+a,(bvHD: )&# T=h0>Th]]9 Q BUF贷F$?Ft\@?F\HRq?FƯK7?P Bt`  x#?tK#ll$nZVBu auo7>JA]^'? ?Az@b ( 6f  )ϓb= + _? ' 88o?z A s +=  /)/;/M$[+["-j!Q U1[!x(ar4 ) "'}# }#BtΜQ8t_:Ba~ LH2!B8;N;6B?5;.N; tt>8VAVA `?CopyrigTt (c)@2`09@M@c@oKs}@fB|Ar@Aa@i}@n.@ 0l@ HsBe@e@v@d@2hb<M#l1# M@?4>UhuQuQPtA@Qj LZFχϰ 7j bL\2] 6R1bMB:MbPdc Ɵmdb`GeGEWe$oZ\aibvIGeU@Vϰ+@?F23<~pk_xu jlTh]]9 Q BUF#/?FKGY?F\HRq?FƯK7?P Bt`  ~B?tIH#ll$nZVBu acuo77AJA]^'? ?Az@b ( 6f  )ϓb= + _? ' 88o?z A s +=  /)/;/M$[+["-j!Q U1[!x(ar4 )] "'}#{t }#BtNQ8t_:Ba~ PLH2!B8;N;Q6B?5;N; tt>8VAVA `?CopyrigTt (c)@2`09@M@c@o%s}@fB|Ar@Aua@i}@n.@U 0l@ HsBUe@e@v@d@ 2h<M#l# MT@?4>UhuQuQtA(@QH1j LZFهgϰ 7j bL\G6] 6S-fMB:MbPdc mdb`GeGEWe$oˏZ\ai?bvIGeUWeVϰ+@?Vpk_xu j aGeWe _ooh2K]u:3rA 4s2HD: )&# T=h0>Th]]9 Q BUF/ ֛9?FD6Hr?F\HRq?FƯK7?P Bt`   a1?t -_9#ll$nZVBu auo7>JA]^'? ?Az@b ( 6f  )ϓb= + _? ' 88o?z A s +=  /)/;/M$[+["-j!Q U1[!x(ar4 )] "'}#{t }#BtNQ8t_:Ba~ PLH2!B8;N;Q6B?5;N; tt>8VAVA `?CopyrigTt (c)@2`09@M@c@o%s}@fB|Ar@Aua@i}@n.@U 0l@ HsBUe@e@v@d@ 2h<M#l# MT@?4>UhuQuQtA(@QH1j LZFهgϰ 7j bL\] 6-fMB:MbPdc Ɵmdb`GeGEWenZ\aiavIGeUWeVϰ+@?Vpk_xu j aGe@We_ooh2Kt]u3rA 4s2HD: )&# T=h0>Th]]9 Q BUF[@#?F\O?Fo?FƯK7?P Bt`  ?tJ8Xj$nZVBu auo7>JA]^'? ?Az@b ( 6f  )ϓb= + _? ' 88o?z A s +=  /)/;/M$[+["-j!Q U1[!x(ar4 )] "'}#{t }#BtNQ8t_:Ba~ PLH2!B8;N;Q6B?5;N; tt>8VAVA `?CopyrigTt>(cu)>2`09>uM@c@os}@IfB|Ar@Aa@]i}@n.> 0Ul@ HsBe@e@v@d@B2h,<M#l# M@?U4>UhuQuQtAJ@QH1j LZFهϰ 7j bL\] 6-fMB:ʿMbPdc 2Ngodb`GeGEWe$oZ\aibvIGeUWe4?Vpku j aGeWe_ooh2K]u3rA 4s2HD: )&# ;h0>Th]]9 PB IAUF?FË.5?FQ?F?P t`  ?{R   ] EJ]_DRb(?&. ?Az@bY(e 6f h )ϓb k+ _?Z'e 88o?ze Ae s[+ h g/y//$# L:5 /sUK1*a4 9 2(E#{ #t8tϤ:U a L2T18K;6Ԉ?@EK; tͺ>B40AA0`?CopyrigTwt c)@]2`09@M@]c@os@fBRAr@Aa@i@Wn.@ 0lPU HsRe@e@v/PdP2h<M#l#A MP? <>Uh$QQ(]A(Q1 LZ3;'xP 7jbbFbL7a 6{fM8cUFnDdTh]]9 PB IAUF4{"?F=N?FQ?F?P t`  ^}W;o?tQxČ( 뾅@l* 9u auo8>   ] EJ]_DRb(?&. ?Az@bY(e 6f h )ϓb k+ _?Z'e 88o?ze Ae s[+ h g/y//$# L:5 /sU"K1*a4 9 2(E#{ #t8tϤ:U a L2T18K;6Ԉ?@EK; tͺ>B40AA0`?CopyrigTwt c)@]2`09@M@]c@os@fBRAr@Aa@i@Wn.@ 0lPU HsRe@e@v/PdP2h<M#l#A MP? <>Uh$QQ(]A(Q1 LZ3;'xP 7jbbFbL7a 6{fM8(a?nDFF!@~qWoO 0dTc I\rdcd bjUkgaPbeEef"v?Fr6d?F?kĀqo`OivddlXr`tF0sK|٢jr3t!KU:G}?{]rLl} ]|q3xnTrg?F<ے7pooZgtfio,V|=] 5ve&1?Fpn?FTh]]9 PB IAUFGC<-?F f?FQ?F?P t`  L?RD)o?tC%)Č( 뾅@l* 9u auo8>   ] EJ]_DRb(?&. ?Az@bY(e 6f h )ϓb k+ _?Z'e 88o?ze Ae s[+ h g/y//$# L:5 /sU"K1*a4 9 2(#{ #st8tϥ:RUa L21*8K;6?@EK; tR>B4AA0`?CopyrigTt c)@2`09@M@c.@os@fBAr@Aa@i@n.@ 0lP HsRe@e@vZ/PdP2h<M#l# MP? < >Uh$QQ]AQ1 LZ3;'xP 7jbUbFb7a 6{fM8cUFnDFK!@?FDqWoO 0dTc I\addǼbjUkgabeE(aFjv?Fr6d?FkĀq o`Oi^j^r`t0sKÊ#|pr9t!KU:M}{]r,Ll}]|9xLufTrg?F<ے7vodObdcZgtfi,V #|=c ;vLu&1?Fpn?FTh]]9 PB IAUFĊ?F$n6b?FQ?F?P t`  ro?t,Č( 뾅@l* 9u auo8dA d  ] EJ]_DRb(?&. ?Az@bY(e 6f h )ϓb k+ _?Z'e 88o?ze Ae s[+ h g/y//$# L:5 /sU"K1*a4 9 2(E#{ #t8tϤ:U a L2T18K;6Ԉ?@EK; tͺ>B40AA0`?CopyrigTwt c)@]2`09@M@]c@os@fBRAr@Aa@i@Wn.@ 0lPU HsRe@e@v/PdP2h<M#l#A MP? <>Uh$QQ(]A(Q1 LZ3;'xP 7jbbFbL7da 6{fM8(a?D +F2|>?FW@WoO 0j?rdcd bj{^WeabeEef"v?Fddd?F܃qo`OivddlXr`tuqE|E:jr3t!KU:G}{]rLl}]|q3xngqrg?F~pooZgt_ML|\ 5ve&1?FWf?F?#0b|[m낪 z'oǵ(|F$$!ayTh]]9 Q BUF贷F$?FlIJ?F\HRq?F#o˯Z?P Bt`  x#?tnZ/J#ll#VBu auo7>JTA0f88v?WYϳAjLv@z bbb!' 6Y % )! ! ,+ <?' o?z #a47% @& J"#K#({% (+ (G t#Beı?A>L ?A"&"&$/*8%,"t:"t>  A A6 `?CopyrigTt _(c)D@2`W09D@M<@c:@os4@fBB3Ar6@oAaB@i4@n}.D@ Al@U :HsBeb@e:@v@d@2h/1<M#l#@7? M@?(>Uh7E P+AA!{ L:F A7<6j% bRzg9<2 "\MbPdS mT W5U6ik0YaYvq 2HD: )&# T=h0>Th]]9 Q BUF#/?F ?F\HRq?Fn˯Z?P Bt`  ~B?t6#ll#VBu acuo7hAJTA0f88v?WYϳAjLv@z bbb!' 6Y % )! ! ,+ <?' o?z #a47% =) J"#K#({% (+ (G t#Beı?A>L ?A"&"&$/*8%,"t:"t>  A A6 `?CopyrigTt$(c)$2`09$M<@c:@o%s4@fBB3Ar6@oAuaB@i4@n.$_ Al@ :HUsBeb@e:@v@-d@2`h/1<M#l1#7? PM@?(>Uh7E +AA!{ L:F<F6j% bRzg9<2 "\7MbPdS mT W5UO?6ik0YaYvq 2HD: )&# T=h0>Th]]9 Q BUF/ ֛9?FT?F\HRq?Fn˯Z?P Bt`   a1?t#ll#VBu auo7>JTA0f88v?WYϳAjLv@z bbb!' 6Y % )! ! ,+ <?' o?z #a47% =) J"#K#({% (+ (G t#Beı?A>L ?A"&"&$/*8%,"t:"t>  A A6 `?CopyrigTt _(c)D@2`W09D@M<@c:@os4@fBB3Ar6@oAaB@i4@n}.D@ Al@U :HsBeb@e:@v@d@2h/1<M#l#@7? M@?(>Uh7E P+AA!{ L:F6j% bT ü2 "\MbPdS mT W5UO6ik0YaYvq 2HD: )&# T=h0>Th]]9 Q BUF[@#?Flo]?Fo?Fn˯Z?P Bt`  ?tz6,JJ\8Xj#VBu acuo7AJTA0f88v?WYϳAjLv@z bbb!' 6Y % )! ! ,+ <?' o?z #a47% =) J"#K#({% (+ (G t#Beı?A>L ?A"&"&$/*8%,"t:"t>  A A6 `?CopyrigTt>(c)>2`09>M<@c:@o%s4@fBB3Ar6@oAuaB@i4@n.>_ Al@ :HUsBeb@e:@v@-d@2`h/1<M#l1#7? PM@?(>Uh7E +AA!{ L:F<F6j% bRzg9<2 "\ʩ7MbPdS 2NoT W5UO?6ik0YaYvq 2UHT5D B &# T>h0JTtiiM ENUFlQ?Fڋ?FPaW?F ru?P WNtQ` c͕.:7?tU)oni8;(\I2-nNuj mu){E0f88wv?AVQ Ӎ!@#b#m@b  0#  #W(3 (o+&T" #t#NĽ/?. ?M"&$a%$$,/>*L%i,T"tΞ*T"tR.R11#`?Copyrig`t (c)02l090M0c0os0f21r01a0i0n.0 Al@ 8sBe0e0v0@d@"Vt!X#xB #/ @T,UtE1UA-!@nZ pvm?S׮(%UQG@Y9"S\FkV5%UЗn@‹Pهϰ=\WQS\OXbiTQY%_7_I_[_DQOOOO__^̔ԍ?f Q́)f@vVP|?5^BlӿIY:6Xl?Ib ʏ܏EABTfxF\HRqCk?-q_WaW?ϰ+@"Bl!H,BXQnB)fFޖBoT[inJ\ۙgyAHD: )&# T=h0>Th]]9 Q BUFhY?FlIJ?F&1?F3(e?P Bt`  6n?tҝw~m4tMuVBu auo7>JAS`? ?Az@bbb! ( 6 )  '+ ߈?/I-Z#mg!z A " ././@/R+,g/y'sU1,ar4 )] "#{t #BtNg8tu:Ba PL^2!X8;d;Q6X?E;d; t͊>8lAlA `?CopyrigTt (c)@2`09@M@c@o%s@fBAr@Aua@i@n.@U 0l@ HsBUe@e@v@d@ 2h<M#l# MT@?4>UhQQA(VQ^1| LZFX} g 7j b2ɶGOR] 6SCfMB:ZMbXdc $I$I܍+d `2aa]Eme'la'iL.G]eU@9/$E?;FslAka,dHּgmeoo(oh2gyu3rA 4s2HD: )&# T=h0>Th]]9 Q BUFy?F.޺s?F&1?F3(e?P Bt`  7 ?tN]m4t_MuBu aquo7>KJAS`?X ?Az@bbb! ( 6f )  '+ ߈?I-Z#g!z A " ././@/R+g/y'sU1,a4 ) ."#{ :#Btg8tu:B@a L^2!X8;d;6X?E;d; Kt͊>8alAlA `?CopyrigTt (c)@2`09@M@c@os@fBAr@Aa@i@n.@ 0l@ HsBe@e@v@d@2hX<M#l # M@?4>UhQQAVQ^1| LZFX} 3 7j b2ɶOK] 6SCfMB:ZMbXdc $I$I܍+d 2aa]Emekla'i.]eU@;0$E?FTh]]9 Q BUF߹e?F4jߤ?F!?FTUUUɺ?P Bt`  ?tiqn= ףc?ckBu1 ,duon7>)JAUVJp? ?Awz@b_bb! (݉ " '+ ' qq?z A  +S/.&68))E! '+ g?uW-sUUn*a4 8) "u#{ u#D!tΜ?8tM:Bav L62?!08;<;60?5;.<; tb>{8DADA1 `?CopyrigTt (c){@2`09{@Ms@cq@oKsk@fyBjArm@Aay@ik@n.{@ 0l@ qHsBe@eq@v@d@n2hb<M#l1# M@? Uh,cQcQ] '1PbA.Q61b L`w"?FMDA dj? |m cuۡ'1 6!1fM{":F$3<6d kS=ѣ<n2{!5E[nLa vogaKeE[eV_@cbc g@]amr\GDž}iNu?FS%f|]tElFTh]]9 Q BUFtx?F/!?FTUUU?P Bt`  K_?t)C= ףc?cZBu auo7[>ʜJAUVJp? ?Awz@bbWb! ( 7 " '+ ' qq?z A  +/.&68))E! '+ g?W-sUUn*a4 8) ."u#{ {8aDADA1 `?CopyrigTt (c){@2`09{@Ms@cq@osk@fyBjArm@Aay@ik@n.{@ 0l@ qHsBe@eq@v@d@n2hX<M#l # M@? Uh,@cQcQ] '1bA.Q61b L`zw"?FDA dj O|m cuۡi'1 61fM{":F$3<6d kS=ѣ<n2{!5E[nLah vogaKeE[eV_cbPc g@]amr\Dž}iNu?FS%f|]tElF<2PhQp[" |"*`KeQitVl:qid?5hQ@J_nohU2t3rA 4n2HD: )&# T=h0>Th]]9 Q BUFiv?F{<?FU?F3z[j˺?P Bt`  y]?thS&XRBu ,duo7oAJAa^!? ?Az@bbb! (n 6  )  '+ g~X?I-Z#!g!z A " ,/,/>/P-[ן+g/y'"ي,a4 ) "!E#{ #Btg8tu:Ba L^2!X8;d;6X?Eu;d; t͊>)8lAlA `?CopyrigTt (c)@]2`09@M@]c@os@fBRAr@Aa@i@Wn.@ 0l@U HsBe@e@v@d@2h<M#l#A M@?8>UhoQQ]AJVQ^1| L@}?F! dj ٯla6?ztB: 7d /kyԷ&Dh]EWemkp+ɉ 5d}bGeUWeFBϰ+@ol0juMDhnMbX0j`(DhnC<o+o"<EgUoo6o9HD: )&# T=h0>Th]]9 Q BUFT1#E2H?F]&UaU?F3z[j?P Bt`  ?t1:&XRZBu auo7[!>ʜJAa^Ė? ?Az@bbWb! ( 7 6 )  '+ g~X?I-Z#!g!zV A " ,/,/>/P-[O+g/y',*a4 ) "!#{ #Bstg8tu: Ba L^2!X8;d;6X?E;d; t͊>8lAlA `?CopyrigTt (c)@2`09@M@c.@os@fBAr@Aa@i@n.@ 0l@ HsBe@e@vZ@d@2h<M#l# M@?8>UhoQQ]AVQ^1| L@Ͼ?F! dj ٨la6ztB: 7d /kyԷ&Dh]EWemkp+ɉ 5d}bGeUWeFBϰ+@ol0juMDhnH?MbX0j`DhWef9o-ai qGeUoo$o6o3UHLD )&# R>h 0JTlaaM>UFObFd0?Fgj?F0U?F_gw?P >tA`  N+ru?t sM׵01,_Œ X">u eu!s7-"JɅJUU?HA9 j@7tt7 b:*H&tH  th*Nĵ) ?MB"b;)H&e8H m 3 H -US4o?FPjj?6ֈ@!QpoĬگ eoĩ́ F o 01G}}϶@b@mp?#nʼPV]O߸uy\l2_+ʦE}@p5"iРo~e*al|ٍşןHD: )&# ;h0>Th]]9 PB IAUF\6:?F}?FF(e?F=PFo?P t`  @?tP[5q+ tЖZu Mauo8#>    PEJƱZVJp!?&. ?Az@b_bbe!Y(݉e 6 ,p )l l w+ g?Z-#U!׷!ze Ae [+j/|//-g#+ s"?(a40 9 .*2!#{0:#t8'tϻ:Ua L2$18K;F?VEK.; t>B4AA0`?CopyrigTt (c)@2`09@M@c@oKs@fBAr@Qa@i@n.@ @l/P Hs3RePe@vEPd'P2hb<M#l1# M/P?]UhzAQ1 L@3FO#۲?Fo2zP dj|aW{ޓ8cUF:&x0?F?F?FX'qoO 0Yifull4ajS?ON~f0c?h+ fEeޖe?F<'?F@]?F\F%ޕol}dQ3ll BXy귺m|ԇbBhaUe```?FV ?F?Th ofl?PI3qoijeðl.` |=T?heFnF$OĒ?F o* leowk~y{f~tGg˂a ,8b1;MHD: )&# ;h0>Th]]9 PB IAUF\6:?Fx?FF(e?F=PFo?P t`  @?t%5q+ tЖZu Mauo8$>    PEJƱZVJp!?&. ?Az@b_bbe!Y(݉e 6 ,p )l l w+ g?Z-#U!׷!ze Ae [+j/|//-g#+ s"?(a40 9 .*2!#{0:#t8'tϻ:Ua L2$18K;F?VEK.; t>B4AA0`?CopyrigTt (c)@2`09@M@c@oKs@fBAr@Qa@i@n.@ @l/P Hs3RePe@vEPd'P2hb<M#l1# M/P?]UhzAQ1 L@3FO#۲?Fo2zP dj|a{ޓ8cUF:&x0?F?F?F'qoO 0Yifull4ajK?ON~f0c?+ fEeޖe?F<'?F@]?FsF%ޕol}dQ3ll BXyR귺m|bBhaUe```?FV ?F?Th ofl?PI3qoiGkeðl?2` ,yFT?heFnF*OĒ?F o* leowk~yf~tGg˂a ,8b1;MHD: )&# T=h0>Th]]9 Q BUFL&d?F`0?F(\µ^S?P Bt`  {Go?tNV yAP FBub ,duo7%>RJ[0U?<ALv@tt7 }b6*D&tD  td*JBı) ?A>"b7)D&a4D  V3 D 8/td/tRy.x1x1`?CopyrigTt (c)02`090M0c0os0f21r01a0i0n.0 Al0 8s2e0e0v @d0"h0!<M#l#/MlP> @?UhLAA]R 1 P1"7Ж1bA>!J`oO2z[ dja_q:z׍ Q 6VMB@챝/P\`_m;E,Ui5Uޖ?FQ[5lf\VU'EUY2Ruݯ?FU]˖7sf\pdѣSYUWfe _zx]݅om{ZcXUFRP\g[)JXAp| rhQ#5 IXAYaLPւ?FتcP\T f\3$sXAp"۲ ElU1YA-k?FFoZP\fyf\!XAYX%?F8K7P\{ 䘷f\&SAY&1l?Fr>|]P\-]qiYip~ X1#g~jt-? XAY?F ףp=P\' f\?P=Rw㹥X'RXT}7vP\__/vnTpljlX7U/?$?vQ[E7Ժf\wUQY|>qh|P\4GN"MDX U29_KU#rA0$@CHD: )&# T=h0>Th]]9 Q BUF8?Fā43?F7??FtYOȻ?P Bt`  1=o2?!88;D;68?5u;D; tj>)8LALA1 `?CopyrigTt (c)@]2`09@M{@]cy@oss@fBRrAru@Aa@is@Wn.@ 0l@U yHsBe@ey@v@d@v2h<M#l#A M@?4>UhkQkQjA6Q>1| L`Fdj]JK}vM41R] 6)fMB@Wa\ k_程avCe=E~SeR9Y? lgESeFeabkod i?SQޗh~?5@+ c;ꉞi2_U3rA/04sCHD: )&# T=h0>Th]]9 Q BUF2B9?FZ&?F'l?F{Cƻ?P Bt`  ,i?t('̺㯝4\YDrVBu auo7'>JAUVJp? ?Awz@bbb! ( " '+ ' qq?z A 9 +/.&68))E! '+ g?W-sU"!n*a4 8) "u#{ u#D!t?8tM:Bav L62?!08;<;60?5u;<; tb>){8DADA1 `?CopyrigTt (c){@]2`09{@Ms@]cq@osk@fyBRjArm@Aay@ik@Wn.{@ 0l@U qHsBe@eq@v@d@n2h<M#l# M@?]UhzbA.Q61b L` djM"aQ  6fM{"@Ftnq\[3K_,7e5EGe<\ֽE?Nf[ N_qd7eEGec6i\m jOf܋h2_U3rA'04msCHD: )&# T=h0>Th]]9 Q BUF] ?F t5?F!/$?Fᱝ̻?P Bt`  "?t4@+ϛ׊T, H_0Bu aquo7(>KJTA0f88v?WYgAL5v]@z bbob!'݉ 6 ,% )! ! ,+ <?' o?z #a47% =) J"E#K#{% (+ ( t#BXY?A> ?A "+(/+/=/O/a!y:5r=l/@ sU10( /???? "0<2(?M{!b 1//*%,"st:"t>0QQ'@`?CopyrigTt (cu)FP2`09FPuM>PcTh]]9 PB IAUFR?F}Y?FF(e?F=PFo?P t`  (\#?tƯ.5q+ tЖZu Mauo8)>    PEJƱZVJp!?&. ?Az@b_bbe!Y(݉e 6 p l l w+ g?-#U!׷!zme Ae [+j/|//-g#+ s"?(a4\0 9 *2!#{0#t8tϻ:񧂍Ua PL2$18K;QF?VEK; t>B4AA0`?CopyrigTt (c)@2`09@M@c@o%s@fBAr@Qua@i@n.@U @l/P Hs3RUePe@vEPd'P 2h<M#l# M/P?]UhzPAQ1 L@3FO#۲?FoO2zP dj|aW{8cUF:&x0?F?F?F_?'qoO 0Yigull4aj?N~f0co+ fEeޖe?F<'?F@]?FwF%ޕol}dQ3ll BXlﷺm|bBhaUeߔ`ba`?FV ?FTh oflPIϮ3qoikeðl8` |GT?heFnF$OĒ?F o* leowky{ftGg˂a,8b1;MHD: )&# ;h0>Th]]9 PB IAUFpX?F&U?F+\(?F~@ t?P t`  (TJ?t[uᯜ(=u -ZrqKZu Mauo8*>    PEJƱZVJp!?&. ?Az@b_bbe!Y(݉e 6 ,p )l l w+ g?Z-#U!׷!ze Ae [+j/|//-g#+ s"?(a40 9 .*2!#{0:#t8'tϻ:Ua L2$18K;F?VEK.; t>B4AA0`?CopyrigTt (c)@2`09@M@c@oKs@fBAr@Qa@i@n.@ @l/P Hs3RePe@vEPd'P2hb<M#l1# M/P?]UhzAQ1 L@3FMbX9?F4ҫP dj}ah8cUF[~K?F|D?F+Xc?F&PRcGoO 0YiM+FllZaj}0%|f0c̭ fEe.V?Fȿ̅)`oSt?F?(X:olGGTll .Xl{e{|rww@haUe:&x0?F%Clw?FAz?F 1vņflux1qoiFA(]`t߼ |~Th]]9 PB IAUFpX?F*g?F+\(?F~@ t?P t`  (TJ?t4%u -ZrqKZu Mauo8+>    PEJƱZVJp!?&. ?Az@b_bbe!Y(݉e 6 ,p )l l w+ g?Z-#U!׷!ze Ae [+j/|//-g#+ s"?(a40 9 .*2!#{0:#t8'tϻ:Ua L2$18K;F?VEK.; t>B4AA0`?CopyrigTt (c)@2`09@M@c@oKs@fBAr@Qa@i@n.@ @l/P Hs3RePe@vEPd'P2hb<M#l1# M/P?]UhzAQ1 L@3FMbX9?F4ҫP dj}ah8cUF[~K?F|D?F+Xc?F&PRcGoO 0YiM+FllZaj}0%|f0c̭ fEe.V?Fȿ̅)`oSt?F?(X:olGGTll .Xl{e{|rww@haUe:&x0?F%Clw?FAz?F 1vņflux1qoiFA(]`t߼ |~Th]]9 PB IAUF$?Fy}?F+\(?F-ku?P t`  p&k?tl    PEJڍU[\/(?&. ?AbbY!z@Ae bbCt c(a4b  # b(h#{ h#΍t(tI*#0f88vb.#i L5<1v(&$7/(D%"6/5;0( t.*B#11`?CopyrigTt] c).@2`W09.@M&@c$@os@f,BAr @YAa,@i@n}..@ Alt@U $HsxBeL@e$@v@dl@"h<M#l# Mt@?]UhzAA!J@#FMbX9?F$x0OԛP dj}תa4(SUF[~K?FE?FSc?F?#oO 0YM+Fӱ\*aj,.}a$?# XEU"n?F +?FbEd?F v<olTBJ\LMYBltavXlp@XEUv/?Fpvw;pg?F $hއ?FLvo~l/AD0qYJg ַBl)$ԞXlDB:XU^F.fّPKk^Rl_[Ei?1GAf`d?Sq+abvHD: )&# ;h0>Th]]9 PB IAUF$?FQ?F+\(?F-ku?P t`  p&k?t u -+CmoLZu Mauo8->    PEJڍU[\/(?&. ?AbbY!z@Ae bbCt c(a4b  # b(h#{ h#΍t(tI*#0f88vb.#i L5<1v(&$7/(D%"6/5;0( t.*B#11`?CopyrigTt] c).@2`W09.@M&@c$@os@f,BAr @YAa,@i@n}..@ Alt@U $HsxBeL@e$@v@dl@"h<M#l# Mt@?]UhzAA!J@#FMbX9?F%x0OԛP dj}תa4(SUF[~K?FE?Fdc?F,?#oO 0YM+Fӱ\*aj ,.}a?# XEU"n?F{ +?F(%/Nd?F<olTBJ\MYBlU{XlpDc@XEUv/vw;pg?F^$hއ?Fvol/AD0qYf ַBls$ԞXl+?B:XU^F.fّPyKk^Rl_[Ei1GAf`dSOq+a,(bvHD: )&# T=h0>Th]]9 Q BUFt[@#?FX J?F\HRq?FƯK7?P Bt`  =0[?t@'#ll$nZBu duo7.>JA]^'? ?Az@b ( 6f  )ϓb= + _? ' 88o?z A s +=  /)/;/M$[+["-j!Q U1[!x(ar4 )] "'}#{t }#BtNQ8t_:Ba~ PLH2!B8;N;Q6B?5;N; tt>8VAVA `?CopyrigTt (c)@2`09@M@c@o%s}@fB|Ar@Aua@i}@n.@U 0l@ HsBUe@e@v@d@ 2h<M#l# MT@?4>UhuQuQtA(@QH1j LZFهgϰ 7j bL\GB] 6S-fMB:MbPdc mdb`GeGEWe$oˏZ\ai?bvIGeU@Vϰ+@_?F23Th]]9 Q BUF偃?F?F\HRq?FƯK7?P Bt`  Xkw?t[I2#ll$nZVBu auo7/>JA]^'? ?Az@b ( 6f  )ϓb= + _? ' 88o?z A s +=  /)/;/M$[+["-j!Q U1[!x(ar4 )] "'}#{t }#BtNQ8t_:Ba~ PLH2!B8;N;Q6B?5;N; tt>8VAVA `?CopyrigTt (c)@2`09@M@c@o%s}@fB|Ar@Aua@i}@n.@U 0l@ HsBUe@e@v@d@ 2h<M#l# MT@?4>UhuQuQtA(@QH1j LZFهgϰ 7j bL\] 6-fMB:MbPdc Ɵmdb`GeGEWe$oZ\aibvIGeUWeVϰ+@?Vpk_xu j aGe@We_ooh2Kt]u3rA 4s2HD: )&# T=h0>Th]]9 Q BUF͜?Fil8?F\HRq?FƯK7?P Bt`  W׎?tt^ #ll$nZVBu acuo7aJ?JA]^'?X ?Az@bu ( 6  )b= + ? ' 88?z A  +=  /)/;/M$[+["-j!Q U1[!x(a4 ) ."'}#{ :}#BtQ8t_:B@a~ LH2!B8;N;6B?5;N; Ktt>8aVAVA `?CopyrigTt (c)@2`09@M@c@os}@fB|Ar@Aa@i}@n.@ 0l@ HsBe@e@v@d@2hX<M#l # M@?4>UhuQuQtA@QH1j LZFه3 7j bL\`] 6-fMB:MbPdc gmdb`GeGEWenZ\aiavIGeUWeVϰ+@?Vpk_xu j aGeWe_ooh2K]u3rA 4s2HD: )&# T=h0>Th]]9 Q BUF[@#"?F`>?Fo?FƯK7?P Bt`  ?tS'8Xj$nZVBu auo71>JA]^'? ?Az@b ( 6f  )ϓb= + _? ' 88o?z A s +=  /)/;/M$[+["-j!Q U1[!x(ar4 )] "'}#{t }#BtNQ8t_:Ba~ PLH2!B8;N;Q6B?5;N; tt>8VAVA `?CopyrigTt>(cu)>2`09>uM@c@os}@IfB|Ar@Aa@]i}@n.> 0Ul@ HsBe@e@v@d@B2h,<M#l# M@?U4>UhuQuQtAJ@QH1j LZFهϰ 7j bL\] 6-fMB:ʿMbPdc 2Ngodb`GeGEWe$oZ\aibvIGeUWe4?Vpku j aGeWe_ooh2K]u3rA 4s2HD: )&# ;h0>Th]]9 PB IAUFv?Fo]j?FQ?F?P t`  Smo?tT.qČ( 뾅@l* 9u duo82>   ] EJ]_DRb(?&. ?Az@bY(e 6f h )ϓb k+ _?Z'e 88o?ze Ae s[+ h g/y//$# L:5 /sU"K1*a4 9 2(E#{ #t8tϤ:U a L2T18K;6Ԉ?@EK; tͺ>B40AA0`?CopyrigTwt c)@]2`09@M@]c@os@fBRAr@Aa@i@Wn.@ 0lPU HsRe@e@v/PdP2h<M#l#A MP? <>Uh$QQ(]A(Q1 LZ3;'xP 7jbbFb7@c 6{fM8cUFnDdTh]]9 PB IAUFSQ=?F{3?FQ?F?P t`  4o?tPǭ\"Č( 뾅@l* 9u auo83>   ] EJ]_DRb(?&. ?Az@bY(e 6f h )ϓb k+ _?Z'e 88o?ze Ae s[+ h g/y//$# L:5 /sU K1(a4 9 2(E#{ #t8tϤ:U a L2T18K;6Ԉ?@EK; tͺ>B40AA0`?CopyrigTwt c)@]2`09@M@]c@os@fBRAr@Aa@i@Wn.@ 0lPU HsRe@e@v/PdP2h<M#l#A MP? <>Uh$QQ(]A(Q1 LZ3;'xP 7jbbFbL7a 6{fM8(a?nDFF!@~qWoO 0dTc I\rdcd bjUkgaPbeEef"v?Fr6d?F?kĀqo`OivddlXr`tF0sK|٢jr3t!KU:G}?{]rLl} ]|q3xnTrg?F<ے7pooZgtfio,V|=] 5ve&1?Fpn?FTh]]9 PB IAUF!?F?FQ?F?P t`  + {o?tn&Č( 뾅@l* 9u auo84>   ] EJ]_DRb(?&. ?Az@bY(e 6f h )ϓb k+ _?Z'e 88o?ze Ae s[+ h g/y//$# L:5 /sU"K1*a4 9 2(E#{ #t8tϤ:U a L2T18K;6Ԉ?@EK; tͺ>B40AA0`?CopyrigTwt c)@]2`09@M@]c@os@fBRAr@Aa@i@Wn.@ 0lPU HsRe@e@v/PdP2h<M#l#A MP? <>Uh$QQ(]A(Q1 LZ3;'xP 7jbbFbL7a 6{fM8cUFnDFK!@?FDqWoO 0dTc I\addbjUWkgabeE(aFjv?Fr6d?FkĀAqo`Oi^j^r`t0sK#|pr9t!KU:M}{]rYLl}]|9xLufTrg?F<7vodObdcZgtfi,V#|=c ;vLu&1?Fpn?FTh]]9 PB IAUFĊ?F)lIJ?FQ?F?P t`  o?t\Č( 뾅@l* 9u auo85>   ] EJ]_DRb(?&. ?Az@bY(e 6f h )ϓb k+ _?Z'e 88o?ze Ae s[+ h g/y//$# L:5 /sU"K1*a4 9 2(E#{ #t8tϤ:U a L2T18K;6Ԉ?@EK; tͺ>B40AA0`?CopyrigTwt c)@]2`09@M@]c@os@fBRAr@Aa@i@Wn.@ 0lPU HsRe@e@v/PdP2h<M#l#A MP? <>Uh$QQ(]A(Q1 LZ3;'xP 7jbbFbL7a 6{fM8(a?D +F2|>?FW@WoO 0dTc rdcd bj{^ea͂beEef"v?Fddd?F܃q o`OivddlXr`tuqEÊ|:jr3t!KU:G}{]r,Ll}]|q3xngqrg?F~pooZgtML|\ 5ve&1?FWf?F#0bĂ|[m낪 z'ǵ(|F$$!ayTh]]9 Q BUFt[@#?Fyوe??F\HRq?F#o˯Z?P Bt`  =0[?t&0N#ll#Bu duo76>JTA0f88v?WYϳAjL@z bbb!'  ,% )! ! ,+ <?' o?z #a47% =) J"E#K#{% (+ ( t#B2?A> &?A"&"I&$/*%,"t:"Kt> A A6 `?CopyrigTt (c)D@2`09D@M<@c:@oKs4@fBB3Ar6@oAaB@i4@n.D@ Al@ :HsBeb@e:@vZ@d@2h/1<M#bl#7? M@?(>Uh7E +A(A!{ L:F A<6j% bRzg9ǩ<2 "\MbPdS mT W5U6ik0YaYvq 2HD: )&# T=h0>Th]]9 Q BUF偃?F?P?F\HRq?Fn˯Z?P Bt`  Xkw?tGj#ll#VBu auo77>JTA0f88v?WYϳAjLv@z bbb!' 6Y % )! ! ,+ <?' o?z #a47% =) J"#K#({% (+ (G t#Beı?A>L ?A"&"&$/*8%,"t:"t>  A A6 `?CopyrigTt _(c)D@2`W09D@M<@c:@os4@fBB3Ar6@oAaB@i4@n}.D@ Al@U :HsBeb@e:@v@d@2h/1<M#l#@7? M@?(>Uh7E P+AA!{ L:F<6j% bRzg9ǩ<2 "\MbPdS mT W5UO6ik0YaYvq 2HD: )&# T=h0>Th]]9 Q BUF͜?F:'?F\HRq?Fn˯Z?P Bt`  W׎?ts aU#ll#VBu auo78>JTA0f88v?WYϳAjLv@z bbb!' 6Y % )! ! ,+ <?' o?z #a47% =) J"#K#({% (+ (G t#Beı?A>L ?A"&"&$/*8%,"t:"t>  A A6 `?CopyrigTt _(c)D@2`W09D@M<@c:@os4@fBB3Ar6@oAaB@i4@n}.D@ Al@U :HsBeb@e:@v@d@2h/1<M#l#@7? M@?(>Uh7E P+AA!{ L:F6j% bT ü2 "\MbPdS mT W5UO6ik0YaYvq 2HD: )&# T=h0>Th]]9 Q BUF[@#"?F.޺?Fo?Fn˯Z?P Bt`  ?tchE 8Xj#VBu auo79>JTA0f88v?WYϳAjLv@z bbb!' 6Y % )! ! ,+ <?' o?z #a47% =) J"#K#({% (+ (G t#Beı?A>L ?A"&"&$/*8%,"t:"t>  A A6 `?CopyrigTt>(c)>2`09>M<@c:@o%s4@fBB3Ar6@oAuaB@i4@n.>_ Al@ :HUsBeb@e:@v@-d@2`h/1<M#l1#7? PM@?(>Uh7E +AA!{ L:F<F6j% bRzg9<2 "\ʩ7MbPdS 2NoT W5UO?6ik0YaYvq 2UHT5D B &# T>h0JTtiiM ENUF|u(?F-?FPaW?F ru?P WNtQ` 786R7?tU溁i8;(\I2-nNuj mu){:E0f88wv?AVQ Ӎ!@#b#m@b  0#  #W(3 (o+&T" #t#NĽ/?. ?M"&$a%$$,/>*L%i,T"tΞ*T"tR.R11#`?Copyrig`t (c)02l090M0c0os0f21r01a0i0n.0 Al@ 8sBe0e0v0@d@"Vt!X#xB #/ @T,UtE1UA-!@nZ pvm?S׮(%UQG@Y9"S\FkV5%UЗn@‹Pهϰ=\WQS\OXbiTQY%_7_I_[_DQOOOO__^̔ԍ?f Q́)f@vVP|?5^BlӿIY:6Xl?Ib ʏ܏EABTfxF\HRqCk?-q_WaW?ϰ+@"Bl!H,BXQnB)fFޖBoT[inJ\ۙgyAHD: )&# T=h0>Th]]9 Q BUF4@ ,?FYوe?^?F&1?F3(e?P Bt`  N!?tx/m4tMuVBu auo7;>JAS`? ?Az@bbb! ( 6 )  '+ ߈?/I-Z#mg!z A " ././@/R+,g/y'sU1,ar4 )] "#{t #BtNg8tu:Ba PL^2!X8;d;Q6X?E;d; t͊>8lAlA `?CopyrigTt (c)@2`09@M@c@o%s@fBAr@Aua@i@n.@U 0l@ HsBUe@e@v@d@ 2h<M#l# MT@?4>UhQQA(VQ^1| LZFX} g 7j b2ɶO] 6CfMB:ZMbXdc $I$I܍+d 2aa]Eme'la'iL.]eU@9/$Ew?FslAka,dHޓּgmeoo(oh2gyu3rA 4s2HD: )&# T=h0>Th]]9 Q BUFnټk-?F]u)?F&1?F3(e?P Bt`  3?tg[ m4tMuVBu auo7<>JAS`? ?Az@bbb! ( 6 )  '+ ߈?/I-Z#z A* " ././@/R+Yg/y'sU1,a4 ) "#{ #BtΜg8tu:Ba L^2!X8;d;6X?E;.d; t͊>8lAlA `?CopyrigTt (c)@2`09@M@c@oKs@fBAr@Aa@i@n.@ 0l@ HsBe@e@v@d@2hb<M#l1# M@?4>UhQQPAVQ^1| LZFX}  7j b2/O]M 6CfMB:ZMbX͉dc ?$I$I܍+d 2aa]Emekla'i.]eU@;0$E?FTh]]9 Q BUFܲ?FH7H?F!?FTUUUɺ?P Bt`  &?tQ3ņ= ףc?ckBu5 auon7=>)JAUVJp? ?Awz@b_bb! (݉ " '+ ' qq?z A  +S/.&68))E! '+ g?uW-sUUn*a4 8) "u#{ u#D!tΜ?8tM:Bav L62?!08;<;60?5;.<; tb>{8DADA1 `?CopyrigTt (c){@2`09{@Ms@cq@oKsk@fyBjArm@Aay@ik@n.{@ 0l@ qHsBe@eq@v@d@n2hb<M#l1# M@? Uh,cQcQ] '1PbA.Q61b L`w"?FMDA dj? |m cuۡ'1 6!1fM{":F$3<6d kS=ѣ<n2{!5E[nLa vogaKeE[eV_@cbc g@]amr\GDž}iNu?FS%f|]tElFTh]]9 Q BUF:C<?FY!)^?F!?FTUUUɺ?P Bt`  Ȼ?ޫd?ttyI5= ףc?ckBu5 auon7>>)JAUVJp? ?Awz@b_bb! (݉ " '+ ' qq?z A  +S/.&68))E! '+ g?uW-sUUn*a4 8) "u#{ u#D!tΜ?8tM:Bav L62?!08;<;60?5;.<; tb>{8DADA1 `?CopyrigTt (c){@2`09{@Ms@cq@oKsk@fyBjArm@Aay@ik@n.{@ 0l@ qHsBe@eq@v@d@n2hb<M#l1# M@? Uh,cQcQ] '1PbA.Q61b L`w"?FMDA dj? |m cuۡ'1 6!1fM{":F$3<6d kS=ѣ<n2{!5E[nLa vogaKeE[eV_@cbc g@]amr\GDž}iNu?FS%f|]tElFTh]]9 Q BUFx]?FTy4߮?FU?F3z[j˺?P Bt`  Q`7lg?t-xW&XRkBu5 auon7?>)JAa^! ?Az@b_bb! (݉ 6 , )  '+ g~X?ZI-Z#!g![z A " ,/,/>/P-[?+g/y'Eي,a4 ) "!#{ #Btg8t)u:Ba L^2!*X8;d;6X?E;d; tR>8lAlA `?CopyrigTt (c)@2`09@M@c@os@fBAr@Aa@i@n.@ 0l@ HsBe@ej@v@d@2!h<M#l# M@? 8>UhoQQ]AVQ^1| L@?F҄! dj O٨l,d6ztJB: 7d /k?yԷ&Dh]EWemkp+ɉ 5d}bGeUWeFBϏ+@ol0ju?MDhnMbX0j`QDhnC<o+o"Th]]9 Q BUFT1#E2?FLΉ?FU?F3z[j˺?P Bt`  S|.?tп}Qm&XRkBu5 auon7@>)JAa^!? ?Az@b_bb! (݉ 6 , )  '+ g~X?ZI-Z#!g![z A " ,/,/>/P-[?+g/y'Eي,a4 ) "!#{ #Btg8t)u:Ba L^2!*X8;d;6X?E;d; tR>8lAlA `?CopyrigTt (c)@2`09@M@c@os@fBAr@Aa@i@n.@ 0l@ HsBe@ej@v@d@2!h<M#l# M@? 8>UhoQQ]AVQ^1| L?F҄! dj _٨la6ztJB: 7d /k?yԷ&Dh]EWemkp+ɉ 5d}bGeUWeFBϏ+@ol0ju?MDhnHMbX0j`DhWef9o-ai qGeUoo$o6o3UHLD )&# R>h 0JTlaaM>UF+1#E2?F* 9?F0U?F_gw?P >tA`  9|4?t +5f1,_Œ X">u eu!s7-AJɅJUU?H<j@7tt7 b:*H&tH  th*Nĵ) ?MB"b;)H&e8H m 3 H -US4o?FPjj?6ֈ@!QpoĬگ eoĩ́ F o 01G}}϶@b@mp?#nʼPV]O߸uy\l2_+ʦE}@p5"iРo~e*al|ٍşןHD: )&# ;h0>Th]]9 PB IAUF~Q?F?FF(e?F=PFo?P t`  IGr?tn 5q+ tЖZu Mauo8B>    PEJƱZVJp!?&. ?Az@b_bbe!Y(݉e 6 ,p )l l w+ g?Z-#U!׷!ze Ae [+j/|//-g#+ As"(a40 9 *2!#{0#΍t8tIϻ:AUa L2$18K;F?VEK; Kt>B4aAA0`?CopyrigTt (c)@2`09@M@c@os@fBAr@Qa@i@n.@ @l/P Hs3RePe@vEPd'P2hX<M#l # M/P?]UhzA(Q1 L@3FO#?Fo2zP dj|aW{8cUF:&x0?F?Fώ?FX'qoO 0Yifull4ajS?N~dh+ fEeޖe?F<'?F@]?F\F%ޕol}dQ3ll BXlm|ԇbBhaUe```?FV ?FTh oflPIϮ3qoijeðl.` |=T?heFnF$OĒ?F o* leowky{ftGg˂a,8b1;MHD: )&# ;h0>Th]]9 PB IAUF~Q?Fe?FF(e?F=PFo?P t`  IGr?t8n5q+ tЖZu Mauo8C>    PEJƱZVJp!?&. ?Az@b_bbe!Y(݉e 6 ,p )l l w+ g?Z-#U!׷!ze Ae [+j/|//-g#+ As"(a40 9 *2!#{0#΍t8tIϻ:AUa L2$18K;F?VEK; Kt>B4aAA0`?CopyrigTt (c)@2`09@M@c@os@fBAr@Qa@i@n.@ @l/P Hs3RePe@vEPd'P2hX<M#l # M/P?]UhzA(Q1 L@3FO#?Fo2zP dj|a{8cUF:&x0?F?FȎ?F'qoO 0Yifull4ajK?N~d+ fEeޖe?F<'?F@]?FsF%ޕol}dQ3ll BXlRm|bBhaUe`Ԗ``?FV ?FTh oflPIϮ3qoiGkeðl2` |FT?heFnF*OĒ?F o* leowkyftGg˂a,8b1;MHD: )&# T=h0>Th]]9 Q BUF&d29?F `0?F(\µ?F^Sӻ?P Bt`  o?tՐs] yAP FBuj auo7ARJ[0U?<ALv@tt7 }b6*D&tD  td*JBı) ?A>"b7)D&a4D  V3 D 8/td/tRy.x1x1`?CopyrigTt (c)02`090M0c0os0f21r01a0i0n.0 Al0 8s2e0e0v @d0"h0!<M#l#/MlP> @?UhLAA]R 1 P1"7Ж1bA>!J`oO2z[ djaq:z׍ QM 6VMB@챝P\`_m;?E,Ui5Uޖ?FQ[_5lf\VU'EUY2Ruݯ?FU]˖7sf\pdѿSYUWfe /zx]݅om{ZcXXUFRP\g[)JXAp| rhQ#5 ?IXAYaLPւ?FتcP\T f\3$sXAp" ElX1YA-k?FFoZP\fyf\?!,UAYX%?F8K7P\{ f\&SAY&1l?Fr>|]P\-]qiYip~ X1#g~jt-? ѰXAY?F ףp=P\' f\?P=RwX'RXT}7vP\__/vnTpډlX7U/$9SYE7Ժf\wUQY|>îqh|P\4GN?"MX U29_KU#rA0$@CHD: )&# T=h0>Th]]9 Q BUFܷqP?F@?F7??FtYOȻ?P Bt`  XM6 ?ti|9O__Bu aquo7E>KJASTfw?X ?Az@bbb! ( 6f )  '+ ߈?I-Z#g!z A Qu +%/,8)E!'+ k(?r+o,(a4 8) E"#{ #BtΜG8tU:Ba L>2?!88;D;68?5;.D; tj>8LALA1 `?CopyrigTt (c)@2`09@M{@cy@oKss@fBrAru@Aa@is@n.@ 0l@ yHsBe@ey@v@d@v2hb<M#l1# M@?4>UhkQkQPjA6Q>1| L`Fd~j]J}v)M]M 6)fMB@Wa\ k_稇avCe=ESeR9Y? l`gESeFeabkod iSQޗh~??5@+ c;Ꞗi"2_U3rA/04sCHD: )&# T=h0>Th]]9 Q BUFdHJAUVJp? ?Awz@bbb! ( " '+ ' qq?z A 9 +/.&68))E! '+ g?W-sU"!n*a4 8) "u#{ u#D!t?8tM:Bav L62?!08;<;60?5u;<; tb>){8DADA1 `?CopyrigTt (c){@]2`09{@Ms@]cq@osk@fyBRjArm@Aay@ik@Wn.{@ 0l@U qHsBe@eq@v@d@n2h<M#l# M@?]UhzbA.Q61b L` djM"aQ  6fM{"@Ftnq\[3K_,7e5EGe<\ֽE?Nf[ N_qd7eEGec6i\m jOf܋h2_U3rA'04msCHD: )&# T=h0>Th]]9 Q BUF)8/?FFT?F!/$?Fᱝ̻?P Bt`  vNC?tG(ϛT H0Bu auo7G>JTA0f88v?WYALv@z bbb!'n 6 % )! ! ,+ <?' o?z #a47% =) J""#K#{% (+ ( tQ#BXYщ?A> ?A "+(/+/=/O/a!y:5rl/@ sU10( /???? "?0<2?M{!b 1/Ĝ/*%,"t:"t>TQQ'@`?CopyrigTt (c)FP2`09FPM>PcTh]]9 PB IAUFJ1>?Fl?FF(e?F=PFo?P t`  0!?trj5q+ tЖZu Mauo8A    PEJƱZVJp!?&. ?Az@b_bbe!Y(݉e 6 ,p )l l w+ g?Z-#U!׷!ze Ae [+j/|//-g#+ s"?(a40 9 .*2!#{0:#t8'tϻ:Ua L2$18K;F?VEK.; t>B4AA0`?CopyrigTt (c)@2`09@M@c@oKs@fBAr@Qa@i@n.@ @l/P Hs3RePe@vEPd'P2hb<M#l1# M/P?]UhzAQ1 L@3FO#۲?Fo2zP dj|aW{ޓ8cUF:&x0?F?F?F_'qoO 0Yigull4aj?ON~f0c?o+ fEeޖe?F<'?F@]?FwF%ޕol}dQ3ll BXl귺m|bBhaUe`ba`?FV ?FTh oflPI3qoiؿkeðl8` |G?T?heFnF$O?F o* leowky?{ft?Gg˂a,8b1;MHD: )&# ;h0>Th]]9 PB IAUFGF?FEs?F+\(?F~@ t?P t`  9`Ik?t)6^u -ZrqKZu Mauo8I>    PEJƱZVJp!?&. ?Az@b_bbe!Y(݉e 6 ,p )l l w+ g?Z-#U!׷!ze Ae [+j/|//-g#+ s"?(a40 9 .*2!#{0:#t8'tϻ:Ua L2$18K;F?VEK.; t>B4AA0`?CopyrigTt (c)@2`09@M@c@oKs@fBAr@Qa@i@n.@ @l/P Hs3RePe@vEPd'P2hb<M#l1# M/P?]UhzAQ1 L@3FMbX9?F4ҫP dj}ah8cUF[~K?F|D?F+Xc?F&PRcGoO 0YiM+FllZaj}0%|f0c̭ fEe.V?Fȿ̅)`oSt?F?(X:olGGTll .Xl{e{|?rww@,eaUe:&x0?F%Clw?FAz?F 1vņflux1,oiFA(]`t߼ |~Th]]9 PB IAUFGF?FKDֳ?F+\(?F~@ t?P t`  9`Ik?t┯4;u -ZrqKZu Mauo8J>    PEJƱZVJp!?&. ?Az@b_bbe!Y(݉e 6 ,p )l l w+ g?Z-#U!׷!ze Ae [+j/|//-g#+ s"?(a40 9 .*2!#{0:#t8'tϻ:Ua L2$18K;F?VEK.; t>B4AA0`?CopyrigTt (c)@2`09@M@c@oKs@fBAr@Qa@i@n.@ @l/P Hs3RePe@vEPd'P2hb<M#l1# M/P?]UhzAQ1 L@3FMbX9?F4ҫP dj}ah8cUF[~K?F|D?F+Xc?F&PRcGoO 0YiM+FllZaj}0%|f0c̭ fEe.V?Fȿ̅)`oSt?F?(X:olGGTll .Xl{e{|rww@haUe:&x0?F%Clw?FAz?F 1vņflux1qoiFA(]`t߼ |~Th]]9 PB IAUFXIn?F    PEJڍU[\/(?&. ?AbbY!z@Ae bbCt c(a4b  # b(h#{ h#΍t(tI*#0f88vb.#i L5<1v(&$7/(D%"6/5;0( t.*B#11`?CopyrigTt] c).@2`W09.@M&@c$@os@f,BAr @YAa,@i@n}..@ Alt@U $HsxBeL@e$@v@dl@"h<M#l# Mt@?]UhzAA!J@#FMbX9?F$x0OԛP dj}תa4(SUF[~K?FE?FSc?F?#oO 0YM+Fӱ\*aj,.}a$?# XEU"n?F +?FbEd?F v<olTBJ\LMYBltavXlp@,UEUv/?Fpvw;pg?F $hއ?FLvo~l/AD0,YJg ַBl)$ԞXlDB:XU^F.fّPKk^Rl_[Ei?1GAf`d?Sq+a,(bvHD: )&# ;h0>Th]]9 PB IAUFXIn?F(?F+\(?F-ku?P t`  -k?tEeoEu -+CmoLZu Mauo8L>    PEJڍU[\/(?&. ?AbbY!z@Ae bbCt c(a4b  # b(h#{ h#΍t(tI*#0f88vb.#i L5<1v(&$7/(D%"6/5;0( t.*B#11`?CopyrigTt] c).@2`W09.@M&@c$@os@f,BAr @YAa,@i@n}..@ Alt@U $HsxBeL@e$@v@dl@"h<M#l# Mt@?]UhzAA!J@#FMbX9?F%x0OԛP dj}תa4(SUF[~K?FE?Fdc?F,?#oO 0YM+Fӱ\*aj ,.}a?# XEU"n?F{ +?F(%/Nd?F<olTBJ\MYBlU{XlpDc@XEUv/vw;pg?F^$hއ?Fvol/AD0qYf ַBls$ԞXl+?B:XU^F.fّPyKk^Rl_[Ei1GAf`dSOq+a,(bvHD: )&# T=h0>Th]]9 Q BUF贷F$?Ft\@?F\HRq?FƯK7?P Bt`  x#?t#ll$nZVBu auo7M>JA]^'? ?Az@b ( 6f  )ϓb= + _? ' 88o?z A s +=  /)/;/M$[+["-j!Q U1[!x(ar4 )] "'}#{t }#BtNQ8t_:Ba~ PLH2!B8;N;Q6B?5;N; tt>8VAVA `?CopyrigTt (c)@2`09@M@c@o%s}@fB|Ar@Aua@i}@n.@U 0l@ HsBUe@e@v@d@ 2h<M#l# MT@?4>UhuQuQtA(@QH1j LZFهgϰ 7j bL\G@[ 6S-fMB:MbPdc mdb`GeGEWe$oˏZ\ai?bvIGeU@Vϰ+@_?F23Th]]9 Q BUF#/?FKGY?F\HRq?FƯK7?P Bt`  ~B?tl#ll$nZVBu auo7N>JA]^'? ?Az@b ( 6f  )ϓb= + _? ' 88o?z A s +=  /)/;/M$[+["-j!Q U1[!x(ar4 )] "'}#{t }#BtNQ8t_:Ba~ PLH2!B8;N;Q6B?5;N; tt>8VAVA `?CopyrigTt (c)@2`09@M@c@o%s}@fB|Ar@Aua@i}@n.@U 0l@ HsBUe@e@v@d@ 2h<M#l# MT@?4>UhuQuQtA(@QH1j LZFهgϰ 7j bL\] 6-fMB:MbPdc Ɵmdb`GeGEWe$oZ\aibvIGeUWeVϰ+@?Vpk_xu j aGe@We_ooh2Kt]u3rA 4s2HD: )&# T=h0>Th]]9 Q BUF/ ֛9?F46Hr?F\HRq?FƯK7?P Bt`   a1?t 3JA]^'? ?Az@b ( 6f  )ϓb= + _? ' 88o?z A s +=  /)/;/M$[+["-j!Q U1[!x(ar4 )] "'}#{t }#BtNQ8t_:Ba~ PLH2!B8;N;Q6B?5;N; tt>8VAVA `?CopyrigTt (c)@2`09@M@c@o%s}@fB|Ar@Aua@i}@n.@U 0l@ HsBUe@e@v@d@ 2h<M#l# MT@?4>UhuQuQtA(@QH1j LZFهgϰ 7j bL\] 6-fMB:MbPdc Ɵmdb`GeGEWenZ\aiavIGeUWeVϰ+@?Vpk_xu j aGe@We_ooh2Kt]u3rA 4s2HD: )&# T=h0>Th]]9 Q BUF[@#?FLO?Fo?FƯK7?P Bt`  ?tSӈEw8Xj$nZVBu acuo7AJA]^'? ?Az@b ( 6f  )ϓb= + _? ' 88o?z A s +=  /)/;/M$[+["-j!Q U1[!x(ar4 )] "'}#{t }#BtNQ8t_:Ba~ PLH2!B8;N;Q6B?5;N; tt>8VAVA `?CopyrigTt>(cu)>2`09>uM@c@os}@IfB|Ar@Aa@]i}@n.> 0Ul@ HsBe@e@v@d@B2h,<M#l# M@?U4>UhuQuQtAJ@QH1j LZFهϰ 7j bL\ѻ] 6-fMB:ʩMbPdc 2N3odb`GeGEWe$oZ\aibvIGeUWe4?Vpku j aGeWe_ooh2K]u3rA 4s2HD: )&# ;h0>Th]]9 PB IAUF?FtË.5?FQ?F?P t`  ?{R   ] EJ]_DRb(?&. ?Az@bY(e 6f h )ϓb k+ _?Z'e 88o?ze Ae s[+ h g/y//$# L:5 /sU"K1*a4 9 2(E#{ #t8tϤ:U a L2T18K;6Ԉ?@EK; tͺ>B40AA0`?CopyrigTwt c)@]2`09@M@]c@os@fBRAr@Aa@i@Wn.@ 0lPU HsRe@e@v/PdP2h<M#l#A MP? <>Uh$QQ(]A(Q1 LZ3;'xP 7jbbFbL7a 6{fM8cUFnDdTh]]9 PB IAUF4{"?F=N?FQ?F?P t`  ^}W;o?t9pČ( 뾅@l* 9u auo8R>   ] EJ]_DRb(?&. ?Az@bY(e 6f h )ϓb k+ _?Z'e 88o?ze Ae s[+ h g/y//$# L:5 /sU"K1*a4 9 2(E#{ #t8tϤ:U a L2T18K;6Ԉ?@EK; tͺ>B40AA0`?CopyrigTwt c)@]2`09@M@]c@os@fBRAr@Aa@i@Wn.@ 0lPU HsRe@e@v/PdP2h<M#l#A MP? <>Uh$QQ(]A(Q1 LZ3;'xP 7jbbFbL7a 6{fM8(a?nDFF!@~qWoO 0dTc I\rdcd bjUkgaPbeEef"v?Fr6d?F?kĀqo`OivddlXr`tF0sK|٢jr3t!KU:G}?{]rLl} ]|q3xnTrg?F<ے7pooZgtfio,V|=] 5ve&1?Fpn?FTh]]9 PB IAUFGC<-?Ff?FQ?F?P t`  L?RD)o?t? )Č( 뾅@l* 9u auo8S>   ] EJ]_DRb(?&. ?Az@bY(e 6f h )ϓb k+ _?Z'e 88o?ze Ae s[+ h g/y//$# L:5 /sU"K1*a4 9 2(E#{ #t8tϤ:U a L2T18K;6Ԉ?@EK; tͺ>B40AA0`?CopyrigTwt c)@]2`09@M@]c@os@fBRAr@Aa@i@Wn.@ 0lPU HsRe@e@v/PdP2h<M#l#A MP? <>Uh$QQ(]A(Q1 LZ3;'xP 7jbbFbL7a 6{fM8cUFnDFK!@?FDqWoO 0dTc I\addbjUWkgabeE(aFjv?Fr6d?FkĀAqo`Oi^j^r`t0sK#|pr9t!KU:M}{]rYLl}]|9xLufTrg?F<7vodObdcZgtfi,V#|=c ;vLu&1?Fpn?FTh]]9 PB IAUFĊ?Fn6b?FQ?F?P t`  ro?tpwZČ( 뾅@l* 9u auo8T>   ] EJ]_DRb(?&. ?Az@bY(e 6f h )ϓb k+ _?Z'e 88o?ze Ae s[+ h g/y//$# L:5 /sU"K1*a4 9 2(E#{ #t8tϤ:U a L2T18K;6Ԉ?@EK; tͺ>B40AA0`?CopyrigTwt c)@]2`09@M@]c@os@fBRAr@Aa@i@Wn.@ 0lPU HsRe@e@v/PdP2h<M#l#A MP? <>Uh$QQ(]A(Q1 LZ3;'xP 7jbbFbL7a 6{fM8(a?D +F2|>?FW@WoO 0dTc rdcd bj{^ea͂beEef"v?Fddd?F܃q o`OivddlXr`tuqEÊ|:jr3t!KU:G}{]r,Ll}]|q3xngqrg?F~pooZgtML|\ 5ve&1?FWf?F#0bĂ|[m낪 z'ǵ(|F$$!ayTh]]9 Q BUF贷F$?FlIJ?F\HRq?F#o˯Z?P Bt`  x#?tx#ll#VBu auo7U>JTA0f88v?WYϳAjLv@z bbb!' 6Y % )! ! ,+ <?' o?z #a47% =) J"#K#({% (+ (G t#Beı?A>L ?A"&"&$/*8%,"t:"t>  A A6 `?CopyrigTt _(c)D@2`W09D@M<@c:@os4@fBB3Ar6@oAaB@i4@n}.D@ Al@U :HsBeb@e:@v@d@2h/1<M#l#@7? M@?(>Uh7E P+AA!{ L:F A7<6j% bRzg9<2 "\MbPdS mT W5U6ik0YaYvq 2HD: )&# T=h0>Th]]9 Q BUF#/?F ?F\HRq?Fn˯Z?P Bt`  ~B?t AF#ll#VBu auo7V>JTA0f88v?WYϳAjLv@z bbb!' 6Y % )! ! ,+ <?' o?z #a47% =) J"#K#({% (+ (G t#Beı?A>L ?A"&"&$/*8%,"t:"t>  A A6 `?CopyrigTt _(c)D@2`W09D@M<@c:@os4@fBB3Ar6@oAaB@i4@n}.D@ Al@U :HsBeb@e:@v@d@2h/1<M#l#@7? M@?(>Uh7E P+AA!{ L:F<6j% bRzg9ǩ<2 "\MbPdS mT W5UO6ik0YaYvq 2HD: )&# T=h0>Th]]9 Q BUF/ ֛9?FD?F\HRq?Fn˯Z?P Bt`   a1?t(9Sj#ll#VBu auo7W>JTA0f88v?WYϳAjLv@z bbb!' 6Y % )! ! ,+ <?' o?z #a47% =) J"#K#({% (+ (G t#Beı?A>L ?A"&"&$/*8%,"t:"t>  A A6 `?CopyrigTt _(c)D@2`W09D@M<@c:@os4@fBB3Ar6@oAaB@i4@n}.D@ Al@U :HsBeb@e:@v@d@2h/1<M#l#@7? M@?(>Uh7E P+AA!{ L:F6j% bT ü2 "\MbPdS mT W5UO6ik0YaYvq 2HD: )&# T=h0>Th]]9 Q BUF[@#?F\o]?Fo?Fn˯Z?P Bt`  ?tF]8Xj#VBu auo7X>JTA0f88v?WYϳAjLv@z bbb!' 6Y % )! ! ,+ <?' o?z #a47% =) J"#K#({% (+ (G t#Beı?A>L ?A"&"&$/*8%,"t:"t>  A A6 `?CopyrigTt>(c)>2`09>M<@c:@o%s4@fBB3Ar6@oAuaB@i4@n.>_ Al@ :HUsBeb@e:@v@-d@2`h/1<M#l1#7? PM@?(>Uh7E +AA!{ L:F<F6j% bRzg9<2 "\ʩ7MbPdS 2NoT W5UO?6ik0YaYvq 2UHT5D B &# T>h0JTtiiM ENUFlQ?Fڋ?FPaW?F ru?P WNtQ` c͕.:7?tUr=Ki8;(\I2-nNuj mu){YE0f88wv?AVQ Ӎ!@#b#m@b  0#  #W(3 (o+&T" #t#NĽ/?. ?M"&$a%$$,/>*L%i,T"tΞ*T"tR.R11#`?Copyrig`t (c)02l090M0c0os0f21r01a0i0n.0 Al@ 8sBe0e0v0@d@"Vt!X#xB #/ @T,UtE1UA-!@nZ pvm?S׮(%UQG@Y9"S\FkV5%UЗn@‹Pهϰ=\WQS\OXbiTQY%_7_I_[_DQOOOO__^̔ԍ?f Q́)f@vVP|?5^BlӿIY:6Xl?Ib ʏ܏EABTfxF\HRqCk?-q_WaW?ϰ+@"Bl!H,BXQnB)fFޖBoT[inJ\ۙgyAHD: )&# T=h0>Th]]9 Q BUFhY?FlIJ?F&1?F3(e?P Bt`  6n?t:m4tMuVBu auo7Z>JAS`? ?Az@bbb! ( 6 )  '+ ߈?/I-Z#,z A* " ././@/R+Yg/y'sU1,a4 ) "#{ #BtΜg8tu:Ba L^2!X8;d;6X?E;.d; t͊>8lAlA `?CopyrigTt (c)@2`09@M@c@oKs@fBAr@Aa@i@n.@ 0l@ HsBe@e@v@d@2hb<M#l1# M@?4>UhQQPAVQ^1| LFX}  7j b2ɏOR] 6CfMB:ZMbXdc $I$I܍+d 2aa]Eme'la'iL.]eU@9/$Ew?FslAka,dHޓּgmeoo(oh2gyu3rA 4s2HD: )&# T=h0>Th]]9 Q BUFy?Flo]9?F&1?F3(e?P Bt`  7 ?t)!m4tMuBu ,duo7[>JAS`? ?Az@bbb! ( 6 )  '+ ߈?/I-Z#mg!z A " ././@/R+,g/y'sU1,ar4 )] "#{t #BtNg8tu:Ba PL^2!X8;d;Q6X?E;d; t͊>8lAlA `?CopyrigTt (c)@2`09@M@c@o%s@fBAr@Aua@i@n.@U 0l@ HsBUe@e@v@d@ 2h<M#l# MT@?4>UhQQA(VQ^1| LZFX} g 7j b2ɶO] 6CfMB:ZMbXdc $I$I܍+d A2aa]Emekla'i.G]eU@;0$E?[FTh]]9 Q BUF߹e?F$jߤ?F!?FTUUUɺ?P Bt`  ?t&S= ףc?ckBu5 auon7\>)JAUVJp? ?Awz@b_bb! (݉ " '+ ' qq?z A  +S/.&68))E! '+ g?5W-sUU,p(a4 8) "u#{ u#D!tΜ?8tM:Bav L62?!08;<;60?5;.<; tb>{8DADA1 `?CopyrigTt (c){@2`09{@Ms@cq@oKsk@fyBjArm@Aay@ik@n.{@ 0l@ qHsBe@eq@v@d@n2hb<M#l1# M@? Uh,cQcQ] '1PbA.Q61b L`w"?FMDA dj? |m cuۡ'1 6!1fM{":F$3<6d kS=ѣ<n2{!5E[nLa vogaKeE[eV_@cbc g@]amr\GDž}iNu?FS%f|]tElFTh]]9 Q BUFtx?F/?F!?FTUUUɺ?P Bt`  K_?t= ףc?ckBu5 auon7]>)JAUVJp? ?Awz@b_bb! (݉ " '+ ' qq?z A  +S/.&68))E! '+ g?uW-sUUn*a4 8) "u#{ u#D!tΜ?8tM:Bav L62?!08;<;60?5;.<; tb>{8DADA1 `?CopyrigTt (c){@2`09{@Ms@cq@oKsk@fyBjArm@Aay@ik@n.{@ 0l@ qHsBe@eq@v@d@n2hb<M#l1# M@? Uh,cQcQ] '1PbA.Q61b L`w"?FMDA dj? |m cuۡ'1 6!1fM{":F$3<6d kS=ѣ<n2{!5E[nLa vogaKeE[eV_@cbc g@]amr\GDž}iNu?FS%f|]tElFTh]]9 Q BUFiv?F{<?FU?F3z[j˺?P Bt`  y]?tHIΧ&XRBu auo7^>JAa^!? ?Az@bbb! (n 6  )  '+ g~X?I-͊Z#!,[z A " ,/,/>/P-[?+g/y'Eي,a4 ) "!#{ #Btg8t)u:Ba L^2!*X8;d;6X?E;d; tR>8lAlA `?CopyrigTt (c)@2`09@M@c@os@fBAr@Aa@i@n.@ 0l@ HsBe@ej@v@d@2!h<M#l# M@? 8>UhoQQ]AVQ^1| L@?F҄! dj _٨la6ztJB: 7d /k?yԷ&Dh]EWemkp+,rb6c}bGeUWeFBϏ+@ol0ju?MDhnMbX0j`QDhnC<o+o"Th]]9 Q BUFT1#E2H?FK&Ua?FU?F3z[j˺?P Bt`  ?t &XRkBu5 auon7_>)JAa^!? ?Az@b_bb! (݉ 6 , )  '+ g~X?ZI-Z#!g![z A " ,/,/>/P-[?+g/y'Eي,a4 ) "!#{ #Btg8t)u:Ba L^2!*X8;d;6X?E;d; tR>8lAlA `?CopyrigTt (c)@2`09@M@c@os@fBAr@Aa@i@n.@ 0l@ HsBe@ej@v@d@2!h<M#l# M@? 8>UhoQQ]AVQ^1| L@?F҄! dj _٨la6ztJB: 7d /k?yԷ&Dh]EWemkp+ɉ 5d}bGeUWeFBϏ+@ol0ju?MDhnHMbX0j`DhWef9o-ai qGeUoo$o6o3UHLD )&# R>h 0JTlaaM>UFObFd0?F gj?F0U?F_gw?P >tA`  N+ru?t Tլ1,_Œ X">u eu!s7-`JɅJUU?HA9 j@7tt7 b:*H&tH  th*Nĵ) ?MB"b;)H&e8H m 3 H -US4o?FPjj?6ֈ@!QpoĬگ eoĩ́ F o 01G}}϶@b@mp?#nʼPV]O߸uy\l2_+ʦE}@p5"iРo~e*al|ٍşןHD: )&# ;h0>Th]]9 PB IAUF\6:?F}?FF(e?F=PFo?P t`  @?t^$h5q+ tЖZu Mauo8a>    PEJƱZVJp!?&. ?Az@b_bbe!Y(݉e 6 ,p )l l w+ g?Z-#U!׷!ze Ae [+j/|//-g#+ s"?(a40 9 .*2!#{0:#t8'tϻ:Ua L2$18K;F?VEK.; t>B4AA0`?CopyrigTt (c)@2`09@M@c@oKs@fBAr@Qa@i@n.@ @l/P Hs3RePe@vEPd'P2hb<M#l1# M/P?]UhzAQ1 L@3FO#۲?Fo2zP dj|aW{ޓ8cUF:&x0?F?F?FX'qoO 0Yifull4ajS?ON~f0c?h+ fEeޖe?F<'?F@]?F\F%ޕol}dQ3ll BXy귺m|ԇbBhaUe```?FV ?F?Th ofl?PI3qoijeðl.` |=T?heFnF$OĒ?F o* leowk~y{f~tGg˂a ,8b1;MHD: )&# ;h0>Th]]9 PB IAUF\6:?F}?FF(e?F=PFo?P t`  @?t8ȍ5q+ tЖZu Mauo8b>    PEJƱZVJp!?&. ?Az@b/b@Z'݉e 6 ,)l l w+ g?Z-#U!׷!ze Ae [+j/|//-g#+ s"?(a4 9 .*2!#{:#t8'tϻ:Ua !L2$18K8F?VEK.; t>B4AA0`?CopyrigTt (c)@2`09@M@c@oKs@fBAr@Qa@i@n.@ @l/P Hs3RePe@vEPd'P2hb<M#l1# M/P?]UhzAQ1 L@3FO#۲?Fo2zP dj|a{ޓ8cUF:&x0?F?F?F'qoO 0Yifull4ajK?ON~f0c?+ fEeޖe?F<'?F@]?FsF%ޕol}dQ3ll BXyR귺m|bBhaUe```?FV ?F?Th ofl?PI3qoiGkeðl2` |FT?heFnF*OĒ?F o* leowk~yf~tGg˂a ,8b1;MHD: )&# T=h0>Th]]9 Q BUFL&d?F`0?F(\µ?F^Sӻ?P Bt`  {G?t7F0 yA_P FBu aquo7c>KJm0U?<AL@tMt7( }b6*D&stD  td**Bıa) ?A.>"b7)D&ar4D [ 3 D 8/td/Kty.x1x1`?CopyrigTt (c)02`090M0c0oKs0f21r01a0i0n.0 Al0 8s2e0e0vZ @d0"h!<M#l#/MBl> @?UhLAA]J 1 A 1"7A1bA>!J`?o2z[ dUjaq:z׍40S 6VMՋB@_P\`_m;E,Ui5Uޖ??FQ[5lf\VU'EUY2Ruݯ?FU]˖7sf\pdGѿSYUWfe zx]om{Zc`,UUFRP\g[)JXAp|? rhQ#5 IXAYaLPւ?FتcP\T f\3$sXAp"۲ ElU1YA-k?FFoZP\fyf\!XAYX%?F8K7P\{ 䘷f\&SAY&1l?Fr>|]P\-]qiY?ip~ X1#g~jt-? GXAY?F ףp=P\' f\?P=RwX'RXT}7vP\__/vnTpډlX7U/$?vQ[E7Ժf\wUQY|>îqh|P\4GN"MX U29_KU#rA0$@CHD: )&# T=h0>Th]]9 Q BUF8?F43?F7??FtYOȻ?P Bt`  1=oJASTfw? ?Az@bbb! ( 6 )  '+ ߈?/I-Z#mg!z A u +%/,8)E!'+ k(?r+o#?(a4 8) E"E#{ #BtG8tU:Ba L>2?!88;D;68?5u;D; tj>)8LALA1 `?CopyrigTt (c)@]2`09@M{@]cy@oss@fBRrAru@Aa@is@Wn.@ 0l@U yHsBe@ey@v@d@v2h<M#l#A M@?4>UhkQkQjA6Q>1| L`Fdj]JK}vM4@[ 6)fMB@Wa\ k_程avCe=E~SeR9Y? lgESeFeabkod i?SQޗh~?5@+ c;ꉞi2_U3rA/04sCHD: )&# T=h0>Th]]9 Q BUF2B9?FW]i\?F'l?F{Cƻ?P Bt`  ,i?t^̺㯝4\YDrVBu auo7e>JAUVJp? ?Awz@bbb! ( " '+ ' qq?z A 9 +/.&68))E! '+ g?W-sU"!n*a4 8) "u#{ u#D!t?8tM:Bav L62?!08;<;60?5u;<; tb>){8DADA1 `?CopyrigTt (c){@]2`09{@Ms@]cq@osk@fyBRjArm@Aay@ik@Wn.{@ 0l@U qHsBe@eq@v@d@n2h<M#l# M@?]UhzbA.Q61b L` djMaQ@ 6fM{"@Ftnq\[3K_,7e5EGe<\ֽE?Nf[ NA_qd7eEGec6i\m j#f,7e2_UB3rA'04msCHD: )&# T=h0>Th]]9 Q BUF] ?FtF?F!/$?Fᱝ̻?P Bt`  "?tEQTϛT H0VBu auo7f>JTA0f88v?WYϳAjLv@z bbb!' 6Y % )! ! ,+ <?' o?z #a47% =) J"#K#({% (+ (G t#BXY'?A> ?A "+(/+/=/O/a!y:5r{l/@ sU10( /???? "0aQQ'@`?CopyrigTt (c)FP2`09FPM>PcTh]]9 PB IAUFR?F|}Y?FF(e?F=PFo?P t`  (\#?tN뻫5q+ tЖZu Mauo8g>    PEJƱZVJp!?&. ?Az@b_bbe!Y(݉e 6 ,p )l l w+ g?Z-#U!׷!ze Ae [+j/|//-g#+ s"?(a40 9 .*2!#{0:#t8'tϻ:Ua L2$18K;F?VEK.; t>B4AA0`?CopyrigTt (c)@2`09@M@c@oKs@fBAr@Qa@i@n.@ @l/P Hs3RePe@vEPd'P2hb<M#l1# M/P?]UhzAQ1 L@3FO#۲?Fo2zP dj|aW{ޓ8cUF:&x0?F?F?F_'qoO 0Yigull4aj?ON~f0c?o+ fEeޖe?F<'?F@]?FwF%ޕol}dQ3ll BXl귺m|bBhaUe`ba`?FV ?FTh oflPI3qoiؿkeðl8` |G?T?heFnF$O?F o* leowky?{ft?Gg˂a,8b1;MHD: )&# ;h0>Th]]9 PB IAUFpX?FEsR?F+\(?F~@ t?P t`  (TJ?th/3u -ZrqKZu L,duo8UAU    PEJƱZVJp!?&. ?Az@b_bbe!Y(݉e 6 ,p )l l w+ g?Z-#U!׷!ze Ae [+j/|//-g#+ s"?(a40 9 .*2!#{0:#t8'tϻ:Ua L2$18K;F?VEK.; t>B4AA0`?CopyrigTt (c)@2`09@M@c@oKs@fBAr@Qa@i@n.@ @l/P Hs3RePe@vEPd'P2hb<M#l1# M/P?]UhzAQ1 L@3FMbX9?F4ҫP dj}ah8cUF[~K?F|D?F+Xc?F&PRcGoO 0YiM+FllZaj}0%|f0c̭ fEe.V?Fȿ̅)`oSt?F?(X:olGGTll .Xl{e{|rww@haUe:&x0?F%Clw?FAz?F 1vņflux1qoiFA(]`t߼ |~Th]]9 PB IAUFpX?F<Dֳ`?F+\(?F~@ t?P t`  (TJ?tA u -ZrqKZu Mauo8i>    PEJƱZVJp!?&. ?Az@b_bbe!Y(݉e 6 ,p )l l w+ g?Z-#U!׷!ze Ae [+j/|//-g#+ s"?(a40 9 .*2!#{0:#t8'tϻ:Ua L2$18K;F?VEK.; t>B4AA0`?CopyrigTt (c)@2`09@M@c@oKs@fBAr@Qa@i@n.@ @l/P Hs3RePe@vEPd'P2hb<M#l1# M/P?]UhzAQ1 L@3FMbX9?F4ҫP dj}ah8cUF[~K?F|D?F+Xc?F&PRcGoO 0YiM+FllZaj}0%|f0c̭ fEe.V?Fȿ̅)`oSt?F?(X:olGGTll .Xl{e{|rww@haUe:&x0?F%Clw?FAz?F 1vņflux1qoiFA(]`t߼ |~Th]]9 PB IAUF$?F,w¾`?F+\(?F-ku?P t`  p&k?t?̈n=u -CmoLZu L,duo8j>    PEJڍU[\/(?&. ?AbbY!z@Ae bbCt c(a4b  # b(h#{ h#΍t(tI*#0f88vb.#i L5<1,(&$7/(D%"6/5;0( t.*B#11`?CopyrigTt] c).@2`W09.@M&@c$@os@f,BAr @YAa,@i@n}..@ Alt@U $HsxBeL@e$@v@dl@"h<M#l# Mt@?]UhzAA!J@#FMbX9?F$x0OԛP dj}תa4(SUF[~K?FE?FSc?F?#oO 0YM+Fӱ\*aj,.}a$?# XEU"n?F +?FbEd?F v<olTBJ\LMYBltavXlp@XEUv/?Fpvw;pg?F $hއ?FLvo~l/AD0qYJg ַBl)$ԞXlDB:XU^F.fّPKk^Rl_[Ei?1GAf`d?Sq+a,(bvHD: )&# ;h0>Th]]9 PB IAUF$?F(n?F+\(?F-ku?P t`  p&k?t A4u -+CmoLZu Mauo8k>    PEJڍU[\/(?&. ?AbbY!z@Ae bbCt c(a4b  # b(h#{ h#΍t(tI*#0f88vb.#i L5<1v(&$7/(D%"6/5;0( t.*B#11`?Copy_rigTt$_(c)$2`W09$M&@c$@os@f,BAr @YAa,@i@n}.$ Alt@U $HsxBeL@e$@v@dl@"h<M#l# Mt@?]UhzAA!J@#FMbX9?F%x0OԛP dj}תa4(SUF[~K?FE?Fdc?F,?#oO 0YM+Fӱ\*aj ,.}a?# XEU"n?F{ +?F(%/Nd?F<olTBJ\MYBlU{XlpDc@XEUv/vw;pg?F^$hއ?Fvol/AD0qYf ַBls$ԞXl+?B:XU^F.fّPyKk^Rl_[Ei1GAf`dSOq+a,(bvHD: )&# T=h0>Th]]9 Q BUFt[@#?Ft\?F\HRq?FƯK7?P Bt`  =0[?tRwq#ll$nZBu duo7l>JA]^'? ?Az@b ( 6f  )ϓb= + _? ' 88o?z A s +=  /)/;/M$[+["-j!Q U1[!x(ar4 )] "'}#{t }#BtNQ8t_:Ba~ PLH2!B8;N;Q6B?5;N; tt>8VAVA `?CopyrigTt (c)@2`09@M@c@o%s}@fB|Ar@Aua@i}@n.@U 0l@ HsBUe@e@v@d@ 2h<M#l# MT@?4>UhuQuQtA(@QH1j LZFهgϰ 7j bL\GlR] 6S-fMB:MbPdc mdb`GeGEWe$oˏZ\ai?bvIGeU@Vϰ+@_?F23Th]]9 Q BUF偃?FKG?F\HRq?FƯK7?P Bt`  Xkw?tO [#ll$nZVBu auo7m>JA]^'? ?Az@b ( 6f  )ϓb= + _? ' 88o?z A s +=  /)/;/M$[+["-j!Q U1[!x(ar4 )] "'}#{t }#BtNQ8t_:Ba~ PLH2!B8;N;Q6B?5;N; tt>8VAVA `?CopyrigTt (c)@2`09@M@c@o%s}@fB|Ar@Aua@i}@n.@U 0l@ HsBUe@e@v@d@ 2h<M#l# MT@?4>UhuQuQtA(@QH1j LZFهgϰ 7j bL\] 6-fMB:MbPdc Ɵmdb`GeGEWe$oZ\aibvIGeUWeVϰ+@?Vpk_xu j aGe@We_ooh2Kt]u3rA 4s2HD: )&# T=h0>Th]]9 Q BUF͜?F$6H?F\HRq?FƯK7?P Bt`  W׎?t #ll$nZVBu auo7n>JA]^'? ?Az@b ( 6f  )ϓb= + _? ' 88o?z A s +=  /)/;/M$[+["-j!Q U1[!x(ar4 )] "'}#{t }#BtNQ8t_:Ba~ PLH2!B8;N;Q6B?5;N; tt>8VAVA `?CopyrigTt (c)@2`09@M@c@o%s}@fB|Ar@Aua@i}@n.@U 0l@ HsBUe@e@v@d@ 2h<M#l# MT@?4>UhuQuQtA(@QH1j LZFهgϰ 7j bL\G@[ 6S-fMB:MbPdc mdb`GeGEWenˏZ\ai?avIGeUWeVϰ+@?Vpk_xu j aGeWe _ooh2K]u:3rA 4s2HD: )&# T=h0>Th]]9 Q BUF[@#"?FJA]^'? ?Az@b ( 6f  )ϓb= + _? ' 88o?z A s +=  /)/;/M$[+["-j!Q U1[!x(ar4 )] "'}#{t }#BtNQ8t_:Ba~ PLH2!B8;N;Q6B?5;N; tt>8VAVA `?CopyrigTt>(cu)>2`09>uM@c@os}@IfB|Ar@Aa@]i}@n.> 0Ul@ HsBe@e@v@d@B2h,<M#l# M@?U4>UhuQuQtAJ@QH1j LZFهϰ 7j bL\ѻ}@[ 6-fMB:ʩMbPdc 2N3odb`GeGEWe$oZ\aibvIGeUWe4?Vpku j aGeWe_ooh2K]u3rA 4s2HD: )&# ;h0>Th]]9 PB IAUFv?FdË.?FQ?F?P t`  Smo?tϰոČ( 뾅@l* 9u duo8p>   ] EJ]_DRb(?&. ?Az@bY(e 6f h )ϓb k+ _?Z'e 88o?ze Ae s[+ h g/y//$# L:5 /sU"K1*a4 9 2(E#{ #t8tϤ:U a L2T18K;6Ԉ?@EK; tͺ>B40AA0`?CopyrigTwt c)@]2`09@M@]c@os@fBRAr@Aa@i@Wn.@ 0lPU HsRe@e@v/PdP2h<M#l#A MP? <>Uh$QQ(]A(Q1 LZ3;'xP 7jbbFb7@c 6{fM8cUFnDdTh]]9 PB IAUFSQ=?F|=?FQ?F?P t`  4o?taVpRKČ( 뾅@l* 9u auo8q>   ] EJ]_DRb(?&. ?Az@bY(e 6f h )ϓb k+ _?Z'e 88o?ze Ae s[+ h g/y//$# L:5 /sU K1(a4 9 2(E#{ #t8tϤ:U a L2T18K;6Ԉ?@EK; tͺ>B40AA0`?CopyrigTwt c)@]2`09@M@]c@os@fBRAr@Aa@i@Wn.@ 0lPU HsRe@e@v/PdP2h<M#l#A MP? <>Uh$QQ(]A(Q1 LZ3;'xP 7jbbFbL7a 6{fM8(a?nDFF!@~qWoO 0dTc I\rdcd bjUkgaPbeEef"v?Fr6d?F?kĀqo`OivddlXr`tF0sK|٢jr3t!KU:G}?{]rLl} ]|q3xnTrg?F<ے7pooZgtfio,V|=] 5ve&1?Fpn?FTh]]9 PB IAUF!?F?FQ?F?P t`  + {o?tHUČ( 뾅@l* 9u auo8r>   ] EJ]_DRb(?&. ?Az@bY(e 6f h )ϓb k+ _?Z'e 88o?ze Ae s[+ h g/y//$# L:5 /sU"K1*a4 9 2(E#{ #t8tϤ:U a L2T18K;6Ԉ?@EK; tͺ>B40AA0`?CopyrigTwt c)@]2`09@M@]c@os@fBRAr@Aa@i@Wn.@ 0lPU HsRe@e@v/PdP2h<M#l#A MP? <>Uh$QQ(]A(Q1 LZ3;'xP 7jbbFb7@c 6{fM8cUFnDFK!@?FDqWoO 0dTc I\addǼbjUkgabeE(aFjv?Fr6d?FkĀq o`Oi^j^r`t0sKÊ#|pr9t!KU:M}{]r,Ll}]|9xLufTrg?F<ے7vodObdcZgtfi,V #|=c ;vLu&1?Fpn?FTh]]9 PB IAUFĊ?Fn6b?FQ?F?P t`  o?t]eČ( 뾅@l* 9u auo8s>   ] EJ]_DRb(?&. ?Az@bY(e 6f h )ϓb k+ _?Z'e 88o?ze Ae s[+ h g/y//$# L:5 /sU"K1*a4 9 2(E#{ #t8tϤ:U a L2T18K;6Ԉ?@EK; tͺ>B40AA0`?CopyrigTwt c)@]2`09@M@]c@os@fBRAr@Aa@i@Wn.@ 0lPU HsRe@e@v/PdP2h<M#l#A MP? <>Uh$QQ(]A(Q1 LZ3;'xP 7jbbFb7@c 6{fM8(aD +F2|>?FW@WoO 0dTc ?rdcd bj{^WeabeEef"v?Fddd?F܃qo`OivddlXr`tuqE|E:jr3t!KU:G}{]rLl}]|q3xngqrg?F~pooZgt_ML|\ 5ve&1?FWf?F?#0b|[m낪 z'oǵ(|F$$!ayTh]]9 Q BUFt[@#?FlIJ?F\HRq?F#o˯Z?P Bt`  =0[?t7c`C#ll#Bu duo7t>JTA0f88v?WYϳAjL@z bbb!' 6Y % )! ! ,+ <?' o?z #a47% =) J"#K#({% (+ (G t#Beı?A>L ?A"&"&$/*8%,"t:"t>  A A6 `?CopyrigTt _(c)D@2`W09D@M<@c:@os4@fBB3Ar6@oAaB@i4@n}.D@ Al@U :HsBeb@e:@v@d@2h/1<M#l#@7? M@?(>Uh7E P+AA!{ L:F A7<6j% bRzg9<2 "\MbPdS mT W5U6ik0YaYvq 2HD: )&# T=h0>Th]]9 Q BUF偃?F (?F\HRq?Fn˯Z?P Bt`  Xkw?tɣ#ll#VBu auo7u>JTA0f88v?WYϳAjLv@z bbb!' 6Y % )! ! ,+ <?' o?z #a47% =) J"#K#({% (+ (G t#Beı?A>L ?A"&"&$/*8%,"t:"t>  A A6 `?CopyrigTt _(c)D@2`W09D@M<@c:@os4@fBB3Ar6@oAaB@i4@n}.D@ Al@U :HsBeb@e:@v@d@2h/1<M#l#@7? M@?(>Uh7E P+AA!{ L:F<6j% bRzg9ǩ<2 "\MbPdS mT W5UO6ik0YaYvq 2HD: )&# T=h0>Th]]9 Q BUF͜?F4@?F\HRq?Fn˯Z?P Bt`  W׎?tϘsW~#ll#VBu auo7v>JTA0f88v?WYϳAjLv@z bbb!' 6Y % )! ! ,+ <?' o?z #a47% =) J"#K#({% (+ (G t#Beı?A>L ?A"&"&$/*8%,"t:"t>  A A6 `?CopyrigTt _(c)D@2`W09D@M<@c:@os4@fBB3Ar6@oAaB@i4@n}.D@ Al@U :HsBeb@e:@v@d@2h/1<M#l#@7? M@?(>Uh7E P+AA!{ L:F6j% bT ü2 "\MbPdS mT W5UO6ik0YaYvq 2HD: )&# T=h0>Th]]9 Q BUF[@#"?FLo]Y?Fo?Fn˯Z?P Bt`  ?tC2.8Xj#VBu auo7w>JTA0f88v?WYϳAjLv@z bbb!' 6Y % )! ! ,+ <?' o?z #a47% =) J"#K#({% (+ (G t#Beı?A>L ?A"&"&$/*8%,"t:"t>  A A6 `?CopyrigTt>(c)>2`09>M<@c:@o%s4@fBB3Ar6@oAuaB@i4@n.>_ Al@ :HUsBeb@e:@v@-d@2`h/1<M#l1#7? PM@?(>Uh7E +AA!{ L:F<F6j% bRzg9<2 "\ʩ7MbPdS 2NoT W5UO?6ik0YaYvq 2UHT5D B &# T>h0JTtiiM ENUF|u(?Fڋ?FPaW?F ru?P WNtQ` 786R7?tUau}wAi8;(\I2-nNuj mu){xE0f88wv?AVQ Ӎ!@#b#m@b  0#  #W(3 (o+&T" #t#NĽ/?. ?M"&$a%$$,/>*L%i,T"tΞ*T"tR.R11#`?Copyrig`t (c)02l090M0c0os0f21r01a0i0n.0 Al@ 8sBe0e0v0@d@"Vt!X#xB #/ @T,UtE1UA-!@nZ pvm?S׮(%UQG@Y9"S\FkV5%UЗn@‹Pهϰ=\WQS\OXbiTQY%_7_I_[_DQOOOO__^̔ԍ?f Q́)f@vVP|?5^BlӿIY:6Xl?Ib ʏ܏EABTfxF\HRqCk?-q_WaW?ϰ+@"Bl!H,BXQnB)fFޖBoT[inJ\ۙgyAHD: )&# T=h0>Th]]9 Q BUF4@ ,?FlIJ/?F&1?F3(e?P Bt`  N!?tw;%m4tMuVBu auo7y>JAS`? ?Az@bbb! ( 6 )  '+ ߈?/I-Z#mg!z A " ././@/R+,g/y'sU1,ar4 )] "#{t #BtNg8tu:Ba PL^2!X8;d;Q6X?E;d; t͊>8lAlA `?CopyrigTt (c)@2`09@M@c@o%s@fBAr@Aua@i@n.@U 0l@ HsBUe@e@v@d@ 2h<M#l# MT@?4>UhQQA(VQ^1| LZFX} g 7j b2ɶGO@[ 6SCfMB:ZMbXdc $I$I܍+d `2aa]Eme'la'iL.G]eU@9/$E?;FslAka,dHּgmeoo(oh2gyu3rA 4s2HD: )&# T=h0>Th]]9 Q BUFnټk-?F.޺s?F&1?F3(e?P Bt`  3?tʼnm4tMuVBu auo7z>JAS`? ?Az@bbb! ( 6 )  '+ ߈?/I-Z#z A* " ././@/R+Yg/y'sU1,a4 ) "#{ #BtΜg8tu:Ba L^2!X8;d;6X?E;.d; t͊>8lAlA `?CopyrigTt (c)@2`09@M@c@oKs@fBAr@Aa@i@n.@ 0l@ HsBe@e@v@d@2hb<M#l1# M@?4>UhQQPAVQ^1| LZFX}  7j b2/O]M 6CfMB:ZMbX͉dc ?$I$I܍+d 2aa]Emekla'i.]eU@;0$E?FTh]]9 Q BUFܲ?Fj$?F!?FTUUUɺ?P Bt`  &?t§= ףc?ckBu5 auon7{>)JAUVJp? ?Awz@b_bb! (݉ " '+ ' qq?z A  +S/.&68))E! '+ g?uW-sUUn*a4 8) "u#{ u#D!tΜ?8tM:Bav L62?!08;<;60?5;.<; tb>{8DADA1 `?CopyrigTt (c){@2`09{@Ms@cq@oKsk@fyBjArm@Aay@ik@n.{@ 0l@ qHsBe@eq@v@d@n2hb<M#l1# M@? Uh,cQcQ] '1PbA.Q61b L`w"?FMDA dj? |m cuۡ'1 6!1fM{":F$3<6d kS=ѣ<n2{!5E[nLa vogaKeE[eV_@cbc g@]amr\GDž}iNu?FS%f|]tElFTh]]9 Q BUF:C<?F8!)^?F!?FTUUUɺ?P Bt`  Ȼ?ޫd?tI= ףc?ckBu5 auon7|>)JAUVJp? ?Awz@b_bb! (݉ " '+ ' qq?z A  +S/.&68))E! '+ g?uW-sUUn*a4 8) "u#{ u#D!tΜ?8tM:Bav L62?!08;<;60?5;.<; tb>{8DADA1 `?CopyrigTt (c){@2`09{@Ms@cq@oKsk@fyBjArm@Aay@ik@n.{@ 0l@ qHsBe@eq@v@d@n2hb<M#l1# M@? Uh,cQcQ] '1PbA.Q61b L`w"?FMDA dj? |m cuۡ'1 6!1fM{":F$3<6d kS=ѣ<n2{!5E[nLa vogaKeE[eV_@cbc g@]amr\GDž}iNu?FS%f|]tElFTh]]9 Q BUFx]?F{<W?FU?F3z[j˺?P Bt`  Q`7lg?t M&XRkBu5 auon7}>)JAa^!? ?Az@b_bb! (݉ 6 , )  '+ g~X?ZI-Z#!g![z A " ,/,/>/P-[?+g/y'Eي,a4 ) "!#{ #Btg8t)u:Ba L^2!*X8;d;6X?E;d; tR>8lAlA `?CopyrigTt (c)@2`09@M@c@os@fBAr@Aa@i@n.@ 0l@ HsBe@ej@v@d@2!h<M#l# M@? 8>UhoQQ]AVQ^1| L@?F҄! dj _٨la6ztJB: 7d /k?yԷ&Dh]EWemkp+ɉ 5d}bGeUWeFBϏ+@ol0ju?MDhnMbX0j`QDhnC<o+o"Th]]9 Q BUFT1#E2?FxLΉ?FU?F3z[j˺?P Bt`  S|.?tk, &XRkBu5 auon7~>)JAa^!? ?Az@b_bb! (݉ 6 , )  '+ g~X?ZI-Z#!g![z A " ,/,/>/P-[?+g/y'Eي,a4 ) "!#{ #Btg8t)u:Ba L^2!*X8;d;6X?E;d; tR>8lAlA `?CopyrigTt (c)@2`09@M@c@os@fBAr@Aa@i@n.@ 0l@ HsBe@ej@v@d@2!h<M#l# M@? 8>UhoQQ]AVQ^1| L@?F҄! dj _٨la6ztJB: 7d /k?yԷ&Dh]EWemkp+ɉ 5d}bGeUWeFBϏ+@ol0ju?MDhnHMbX0j`DhWef9o-ai qGeUoo$o6o3UHLD )&# R>h 0JTlaaM>UF+1#E2?Fgj?F0U?F_gw?P >tA`  9|4?t <'Ŵ\1,_Œ X">u eu!s7-JɅJUU?HA9 j@7tt7 b:*H&tH  th*Nĵ) ?MB"b;)H&e8H m 3 H -US4o?FPjj?6ֈ@!QpoĬگ eoĩ́ F o 01G}}϶@b@mp?#nʼPV]O߸uy\l2_+ʦE}@p5"iРo~e*al|ٍşןHD: )&# ;h0>Th]]9 PB IAUF~Q?F|}Y?FF(e?F=PFo?P t`  IGr?ttƯ.5q+ ~Жu auo8M> `  EJƱZVJp!?&. ?Az@bbbe!Y(ne 6 p )l l w+ g?-#U!׷!zme Ae [+j/|//-g#+ s "(a40 9 *2!E#{0#t8tϤ:U a L2T$18K;FԞ?VEK; t>B40AA0`?CopyrigTt (cu)@2`09@uM@c@os@IfBAr@Qa@]i@n.@ @Ul/P Hs3RePe@vEPd'PB2h,<M#l#_9M/P?]UhzAQ1 L@3FO#۲?Fo2zP dj|_aW{8cUF:&x0?F?Fώ?FX'qoO 0Yifull4ajS?N~dh+ fEeޖe?F<'?F@]?F\F%ol}dQ3ll BXl귺m|ԇbBhaUew```?FV ?FTh oflPI3qoijeðl.` |=T?heFnF$OĒ?F o* leowky{ftGOg˂a,8b1;MHD: )&# ;h0>Th]]9 PB IAUF~Q?F8?FF(e?F=PFo?P t`  IGr?t+>`5q+ tЖZu Mauo8>    PEJƱZVJp!?&. ?Az@b_bbe!Y(݉e 6 ,p )l l w+ g?Z-#U!׷!ze Ae [+j/|//-g#+ As"(a40 9 *2!#{0#΍t8tIϻ:AUa L2$18K;F?VEK; Kt>B4aAA0`?CopyrigTt (c)@2`09@M@c@os@fBAr@Qa@i@n.@ @l/P Hs3RePe@vEPd'P2hX<M#l # M/P?]UhzA(Q1 L@3FO#?Fo2zP dj|a{8cUF:&x0?F?FȎ?F'qoO 0Yifu,oi4ajK?N~d+ fEeޖe?F<'?F@]?FsF%ޕol}dQ3ll BXlRm|bBhaUe`Ԗ``?FV ?FTh oflPIϮ3qoiGkeðl2` |FT?heFnF*OĒ?F o* leowkyftGg˂,,8b1;MHD: )&# T=h0>Th]]9 Q BUF&d29?Ft`0L?F(\µ?F^Sӻ?P Bt`  o?tbS yAP FBuj auo7>RJ[0U?<ALv@tt7 }b6*D&tD  td*JBı) ?A>"b7)D&a4D  V3 D 8/td/tRy.x1x1`?CopyrigTt (c)02`090M0c0os0f21r01a0i0n.0 Al0 8s2e0e0v @d0"h0!<M#l#/MlP> @?UhLAA]R 1 P1"7Ж1bA>!J`oO2z[ dja_q:z׍ Q 6VMB@챝/P\`_m;E,Ui5Uޖ?FQ[5lf\VU'EUY2Ruݯ?FU]˖7sf\pdѣSYUWfe _zx]݅om{ZcXUFRP\g[)JXAp| rhQ#5 IXAYaLPւ?FتcP\T f\3$sXAp"۲ ElX1YA-k?FFoZP\fyf\!XAYX%?F8K7P\{ 䘷f\&SAY&1l?Fr>|]P\-]qiYip~ X1#g~jt-? XAY?F ףp=P\' f\?P=Rw㹥X'RXT}7vP\__/vnTpljlX7U/?$?vQ[E7Ժf\wUQY|>qh|P\4GN"MDX U29_KU#rA0$@C_ HN>Lo3s rFYd߫" EMFhP #ȘT B `g+V @elk-o+(/o9BGohoP~rp { п m 8 }OX^ Dc6ߧ W  ! x#@ K& *ҿ 9-+ HĽ B @hF Aʐiϐ ]ҿ  pސrn t &uL ~y 8|I 1&}z X o'@ #'B2 {'E H/ @9K E5'O C'99 m! -%9( (x-  2 $69'T; !()> ~*A*I8,E ?]-@H0L98P9:T 'KXI8\\9ȸ_ 'cv0&8_h |@mIq 0&w z 88~ Ș ׿: 5'XP@98r?d9Xu~v9xxǗJYXzu98|9~9 ]9x &PK 9X H~9 IX "*I b*Io NIB``I ~9x &ά9X  i( ՖIx  ~W  (? )*@& i (  9 g ~(X 0G( 1 n~  [ OI8  ~ O x  e8  w8 Y 8P mR hTM hHV# wY( (]/- 1 p6 9 =I<ȿ2@ OHXqCHKVO IgSxWX[ (^t'c Hth l /"wUFD  h(^TYYBBUFjZ?F~??x<F BP(?P } kX` BW66 W TTT%TTȅ5H?? (?B@L&d2?-(\.g1(.sUW!AU&w/t  0cB`,web,s rv",T P comput$d+i t i 3ud n tw0'rk!eA^| (SG|& f/?,v>? ?T wp  ̠ p ʔ  pqwpqwqnwqpqqbqpppw wwwqqEwqpw߄wwrvrrwDrag onut hepe.Rih-c]lckaUdcos UPo etQeAvUwreP 5aabjZֿ~??cN׿ mF1??SbM1{V↑T*J?@ ?UG DF P# h @T(PYY# QU@jZ?@~??F?Pn} u{` ?Su#؍>>HX HlHYHH EO.LO4:UDDXXUllk'"&R",I'U25ĉq%4 ?"u'4b0{[`:#(26 U9$"3&x9"L1" ^!'>U@ÿ@ǽq^@?@VBӅ?]DSF0ZBu@`-1ZBuW@rCC!8GAB"2C9u`u `"[ b![u`2 %3Qo1o7B`?CopyrigPt0(c)020P90MPcPosPfRQrPQaPiPn 0Al` Xs bePePv`d/`VPs_SBNcPm!#5Qb41,a@o13dX453B^8=%3p63dz6z6 v24#V%30Urz3]ge<^1E U#5m Hq5twL5t{3t{ez1A+E*F7ԂX$18dT-`XQj2y`]r` MPWnuPaPtar,aOQ` UES0uPpK`ePt[eTL5Qdv],1PRd C`uK`bʦ30ޯ𯇡! PPr+=9H^p]ZaDbQUT\Կ濅APaB`Ȩ-5^]oυ% S biP aϭPeTυLPcXūa5DVߪhB:l(`iPg߸̨@߇RPoǩ|Z\@E pAD`~YxČUSPaPePlP,5ƪXp/` ATP-`54`%aPK]狥%Sb?`.A4`W`ZʦT3NwRk qa2$2`_ @Ȩ͒Y;MI`ad(`cz d ϴ兵nēAs DxP)ߑmr:atV%a Iâ@!auoDfP㞑 R `Qa/)/7M`Cl/Pد/M+QUmK`utP ?`/ D*?"?_ (b? (Dv??;Hd2QC K3` Wf}%5DVO#O `C` akO}O#ԅO` M`mRgOOɧDODp __1;OUQ1y?QI_[_CqtJw fo SU@jZ?Fű?%v%vKpIp(u.p` s?vbup` ? B=݉`C,qtMp5mQ-́<)oTm}QX.q͏Pߊӥ T.OtxFf@>|pÿruob쁙JRq KN̒rr$QUvvvrsE"`RoZK1& be9P bb OObr@muP`TaC]1eߊ_e71721bKa aXaaehAaYip P`u`T_`_`Qhڑx4ӥ5f_dP&b鄞pCwF3s͵NSY:3R rV@0e18TR <շqW,@rB* B}aaaahʾYVԐ;n` %̀P&²S$1aa`NEW_ORKP2H IP_!RRЕISQTҷiroohA'0UQ?8!Ԑ0oHD # =hj0>T h]]9  5AU@jZ?F͂T??Fx?SF?P6 jt`  o?tk[>(A Vu aSuo.]&A]  *ԮJU2zGzt?@L#&A@bA#z7 5#1bO(b])j+$j&[ "*(w "?& ?AU$l/%B115 `?CopyrigTt (cu)Q02`09Q0uMI0cG0osA0IfO2@1rC0|1aO0iA0n.Q0 WAl0 G8s2Ueo0eG0v0d0"1E]4MŤ-&3 &7&l>]Uhz811U!J!u6?F+\.>djIIa? >"(EǑ I%K9E 5EU . K ;;5QIWEEEF _KnSA?8@@Bu?FDR> ?Nk"\6`bmMM\j=am {:e P> #? D"? 5-g?j4B7!B{oogr57!ibDAbE9 MuX7_'0U~rN!$a)5 5vIzHD # =hj0>T h]]9  qAU@;?FI5K?@xEL?6ѿP6 t`  bm&Qo?t@@L]WI,#u aiuoL> JU2zGzt;?@L?&A@b}#zs q#1b(b)+$&[9#H",ɺ (wH"?& ?A$ /%9#BV1V15 `?CopyrigTt (cu)02`090uM0c0os}0If2|1r01a0i}0n.0 WAl0 8s2Ue0e0v0d0"O1E]S4MŤ-9#b3 b7&l>]Uhzt1.A!JN!m.%-+0djV1ax߃S(FJ-&?F mO"> ?Nk"L[Majnha_:e P> #? BK=o? n-?j4Bs!B{__Wr5s!D8E6V 6?wj_|_ ^XE.U@z T6?F @' d_mOoT \VyWxRkrVo_bVh*g:n"'UaEjz36>CN>?Bn}]zGvN7V iDAbE9 MX+G'0UJPN!4ae5 MaHD # =hj0>T h]]9  U@jpiH?F^?@$?F]F?P6 t`  !a&ao?t,F=۳=u;u auo>J2zGzwt?@ʐLAw@b#z1b](b!).+I.&[-m(?0& ?A$I0/Q%!!5 `?CopyrigTt (c)02`090M 0c 0os0f21r0@1a0i0n.0 Al[0 8s_2e30e 0vq0dS0T"!E]$M-# %'&l>]Uhz!1!JF% djc~YZ(_N9zOMGE%_EH$SwO"K ңHE ^&_8_WE_OqOHOOi&DAbPE9 MX7'0UUfb>!$a % f1jHD # Uh4>T#]]9 M5AU@jZ?FL|+?@?F({~?P6 JuM` ?u JYt )t7zEA2&L_D?. > >tJU2q K?=@MJ&A@-b9(+bG)T+T&J[#"#a?& ?AH?/`)#115 {`?CopyrigTt (c);020G09;0M30c10os+0f92*1r-0f1ai+0n.;0 Al0 18s2eY0e10v0dy0z"!4B#e-?3 7)& b 0K8>U# ]A]A "1!ъ!PzQ <%OYAJEU . K;;\%W%EF($L[.FX5Eai%?F 6?wL;] W-\TB⤲8?F@&SA?F @ ?Nk"L'`bmMW_Ye[ٯ7 VK=F: [jr ? D"? XZvD?ta@Boogr5;!QEx]SiLO MLGXa~WOTZiAle59 XG"'0Uc&N!$05 .HD # =hj0>T h]]9  IAU@K?FOgVb?FY%?Fwᳬ?PJW?>t`  ^Z?t#ə.E @x?r?/{4܂ G _Z+u u$ >  J JpU?43f<!.&P?b3bbfz@1bAe&bq$d 2zGzt.#@LT"&r(~&i$q)(+}'i"~*#81815 `?Co yrigTt (c)o02`09o0Mg0ce0os_0fm2^1r 1am0i_0n.o0 Al0 e8s2e0ee0v0d0"11E]54M-#D3 %D7.&lUh#7 V1#Ai!J# F/?B1B  VRiAAbE9 MX G{W'0U*R2N!I$a {VZUHPD  # #>h0JTpERI MU@u6?Fjc{g&?@%r`5B?Fm_F:Is?HNtQ`  II?t#]  _ii$!`kNu  iu%wj>M JQ T       "@&&JNU2"?H@Qs#N&A@b(bU)++&RNp}#>H1#19 ?#bbbz| R&/5R}#11`?Co0yrig\t (c)02h090M0c0os0f21r0Aa0i0n.0 Al!@ 8s%Be0e0v7@d@21a4t#-}#3 %746lJ#$JUp5 3 M19 1!q(!FnElrkRt# 8$kcD?@숒@)?FP?P"EkU |1\iOT/irvs}i>`_,of :m ,M ?? S&d2? *b?r4B!B oo1gr #5!>%(EQOOOO__FAc1_C_ D4ևS]_o_yRX+{_,__ #R__ (YQoSo+o=osd {oEeooooooFyUy{* hDVW(I X"c7I!pg bf'iMjSE t%XyG|'0UŢŞN!O4i |UGD # hz0TdYYBU@(%)?Fxg^?@0߯H?Fs7Uݹ?P} t` 87?t#_)TeΦu ]ukv  ̝L&UUU&I&3 U!!`?CopyrigPt _(c) 2\W09 M c os f"!r 1a i n}. Al00U (s42e0e uvF0d(0'HS$#-## 'I?k=#ǒ 2j$B##0U'BM3@&@F0Yk} #HA%DKb5DK3DK5DKED$!GiAZG5(|IDFX7yW'&Dŭ6!4YA yVZHD  # =hj4>T!]]>#AU@0߯H?Fz)e??F˞9q?P6 t`z  ?5t# rXѮu)Nu;):O]ע0 &> # " "+"+"+"+" $+"T-J[U7 ??& bA@ 2b4-#2zGzt#+@ Li68!6 ?}; 72U!:B#AA5 5`?CopyrigTt(c)20F@9M2@c.0@os*@f8B)Ar,@eAa8@i*@n. Al@ 0HsBeX@ej0@v@dx@)21"DM-,;?C G& l>]UhzAE!A#!ְ1J`#@{&pz?FUy{?NT,dKj&b&R&M/8T9K-q?F@F?FdG0 >Nk"?YE*]\LThKjfGD&Qhj ?iU3UGe&Y`?@/Jb)h5oWKoWo;èKod}l$ro`nE2K?Fˌ%4G{?F -q? eZR^{|\?6?vodmP}l>MBe 9 N 1iS? 1?grl#51:PO"4ga@Sq(UE^@O|ǐ[`ߡa v2rbA @3ijD&}laqOz o?#WHZ'Qu=*[^N?F]%P?@O&@ c?FаM<n ?` eb0)\(Pfodm}nv}l?V5(?q$Ix[cN#W{ #iA@&QeE9 MX''0UjdN!0 /HD  # =hj0T h]]9 #]AU@}%?FR$,?@6#V?Fc_3֡w?P6 u` ?u#t  3zM%t3ws%.<%H._'B> #PJU2N贁Nw[?@ʐL+&A@wbu](i+bk)%+&p%#4">9"<5!?& ?M#bbb͓z$ p&"/%T%#L1L15 w`?Co yrigTt (c)02`090M{0cy0oKss0f2r1r 1a0is0n.0 Al0 y8s2e0ey0v0d0"E1E(]I4M-%#X3 X7&l*>(UhE j1#$Ac!J`%#@5e$_F??!foA3nBM( V?FJ-R?@HQ??FS!R?P| llHL6VLD5E+v;*AůʙH: 8snP%$? k?Q? \?4_!__WrA5_! R,U3OC@IL?F4~Kz?P)Oy޾ T!]]>#IAU@܋?F2Oa>?@#r`5B?F*ݷ?P6 t`  >B?\oEt# ŖoZu 0Nu;Y:|8>$# JpU>##<?2& ?5bbbz@b#ANi&bu$'h 2N贁N[2$@ LX"D&v(&m$ :;T'm"*B#d1d15 5`?Co yrigTt (c)020090M0c0os0f21r 1a0i0n.0 Al0 8s2e0e0v0d0"]1"a4M-/p3 p72&l>0>Uhh= 1#!eJ`#&FNTM\jh1B,RBM(!JoUy{O_ e@ So4(D@^Tw?F]֝ä ?Nk"?w_KT\T+\_(Kjӷjmplhj -`\TPQE\Uui,St_o u~!ՠXEO9= T?FL?;uQ_Pm"rA @3sjQ1lğ=GoYniAleE9 MX|'w'0Ur^N!M$|A vzHD  # =hj4>T!]]>#IAU@܋?Fj?@2r`5B?F^ݷ?P6 t`  >B?\oEt# 6ΠZu 0Nu;Y~:#&}>0m8>  PJU2N贁Nwk?@ eL&A( 5bM(b[)h+h+$h&#[-M$#?& ?b$Z*'*B#O1O15 5`?CopyrigTt (c)02U0090M~0c|0osv0f2u1rx01a0iv0n}.0 Al0U |8s2e0e|05v0d0"H1P"L4M-/[3 [7&l>0>Uhh= m1#!S!J`#*&FPUy{_?NTMjC1gc RVMʣ(D#@ ![?F"$. ?Nk"W?IKTLTcKjRlhj p=' (*!)i,SO2Kes9T;QEGUFw6qX}__TM7fTZT\Dž}_P^52XOES"rA 0S#Es3iAleE9 MX|$Gxw'0UrIN!4gA xvzHD  # =hj4>T!]]>#!AU@܋?Fc_v?@#r`5B?F*ݷ?P6 t`  >B?\oEt# ap'EZu 0Nu;Y:|$># BJpU>#͙;? & ?5bbbz@}bAA& K&@ 2zGzt?"K@ L0"&N(bM)+TK/8Z*B<1<15 5`?CoyrigTt (c)s02009s0Mk0ci0osc0fq2b1r1aq0ic0n.s0 Al0 i8s2e0ei0v0d0b"5194M-t/H3 H7T &l>(>UhE=3Z1#E!J!FENTMjN@1h(NoUy{OO e@ o)4(D@^Tw?F]֝ä ?Nk"?'_KD3\T+\(Kj?ӷjmlhj P\ DiAlePsI=MXsG'0UzbZ6N!%$0 sFEjHD  # =hj8>T! ]]>#!AU@܋?F*?@#r`5BW?Fw?P6 t`  >B? \Et#տ|u l4RQu?]>I$>I# JpU>͙#<?& ?9bbbz@b#AE&bQ$$D 2zG[zt#@ L4"&R(^&I$Q)+]'I"^*Bh{.M#To1o15 9`?CoyrigTtk(c)k2009kM0c0os0f21r1a0i0n.k Al0 8s2e0e0v@d0f"h1l4M-:?{3 %{7&l>(>UhE= `51#I!J49fAn^ R)X f"B!Jui,SG?RSlOS u~!N]8-19= T?FL;uQ ?Nk"?YVTQRXQOnğ/=i n (\ wTiAaePE9 MXLg'0UbZiN!)$0 Lf`jUGD # hz4TYYT#BIU@Sj'ో?FL]&?@j>(?FeTot7Ѻ?P} u` ?u#8k/8CU(.88Lt `lm3tϭ^}ѯnAf'wyU~##0UH"?@&#!!*`?CopyrigPt (c) 20 9 M c os f"!r !a i n. Al (s2e e v0d z!~$#B-#,# 'O#^ =$&#4 {6{6 w2$e#ab3W) #LG,%,Dj8Hg\&dPaxO6'TXAXA"(2Ca%?28(B,"iha05( e&XV7 W'G${6!t40 VZHD: # =h4>TB#]]9 T!AU@%?FYp5?@_RP6 u` ?Mu t  !o)t~7RnB25"!)Lv$>   EJtU2q?=@dLA@-wb%(b3)%@+@&[w#?& ?A+/L)R!!5 {`?CopyrigTt (c)'020309'0M0c0os0f%21r0R1ai0n.'0 Alm0 8sq2eE0e0v0de0f"!$MEBY-?# '&rl U# #7 1* #p| '&)1&JH FFѷ |-B1 @V&Rl( b 0Ale59 MX7'0URN!$0% VZUGD D# h0T@dYY]BIU@j>(?F8??F/I6?Pn} 8WdT t`  7?ht#rL&~n[f1uowuwU!!`?CopyrigPtv(cu)v2\09vuMB c@ os: IfH"9!r< u!aH i: n.v WAl @(s"Ueh e@ v d  #S$#$- ## '?w= # &#24 6 6 $B# #0U2 3@& wa#3~  #dH1%4Gi^#5 6X'EGK'4 6! ] EFYJHD # =h4>TB]]9 \#!AU@#.?F/I6?@P t`  gjo?t#T U $YhAn m7 u)hnx Fu!)J hI 97eyh?#!?9& aAbA@_"bg)b$g)bIg$2zGz9#@L@&h*%}$f*+T'_"*BL1L1`?Co}p rigT}t((c)(W2009(M{0]cy0oss0f2Rr1ru01a0is0n.( AUl0 y8s2e0ey0v0d0E1I4Mb-X3 X79&l)Uh# j1#_!J FF$[ 7#e T$V3RiAlPeE9 XW'0URZFN!^0 VHD # =h0>T@h]]9 #U@딃r?F/(?@2?FkF_w?P t`  ;0K?t# y_h:.A>Dj# ixsj||yyh|j|Ay"s"y,"s,"yJ"y^" yr""y"/y"a|"1y"8y":y";y"|`AUhMq@q] y a7T^1 hjP!,!J!^!o$!$U#$%&U'()*+,t|q!a!U234567$9$!1&1>74Ua#[aJ`S@ɞd?F9^2d3Qqs^2tY7N26J6M#hN1y-loG?gn`bDPu6(āThq 6p?F!_T?@R h^*B?PQi e>amabƫWSuMD,Xud3, utSқJ! \&\xZ? $Zas? V =? @S?d2tR@B8BoRį֧r3Cc EH ]&H[Bѻ?@lML6~ ;m`~b&¨r?+J^t/qO@? [,!rPצ 2XQqH zg?F,53lܿu  ?FM9(֜2Ar}JND7UØB]ٿ?@h߾#?Fd~5ȿMuPu`~dg}-S/9u}DѺ f <-@uušJAV+ǥ8xP?\ q|?FKyhQ~2vTPf=?PoyGV(na~u3vu;*G1+u~ݏֻA_ ^Gϯᬅ?oQo!|b0QJX^?Fsm?@@޻Pg?POWLKz Fw^=g%fԧ!6&]JVbGᬤ? o[oQr!rmz@zg?@f(?FVɱC OPKMgBlVܖVVwVsS3O_ܘ=w?FKL?@jP0A`QʱH=lTG5OxOeuF|Fv=/Fꞏ ,-<60 ]Cs~?Mv1E GZ}v -# &<"ݝK-'>TAFd|6δ̙2{# ,',v #cX#JREB'~5-O$ ,RI~  `ܝ-C-X-_]bd% ,Wl' 4%ŀ>-խ@ײc& , Ǡ4 \lb\c>-R땳J ' ,+E :{(b\s"=]& a:U( ,/?Fb\.bLOOMևQ:U) ,!`3?F k?%6fSԿr`* ,Oڿ?F&Kba,e~Lʴ2ͷ+ ,b d?FjFŋݸML, ,Z׺ı?F bҢu W%r;z- ,TUI8?Fs%:v ~LSa@ . ,3Pވ?Fb^vO$)yϊ&Lؾ1/*mCe%f(~5JOAjuLīt~eq>aT~.j&],_10y՚?Fr`QSٵ?F艿O:McfdilFU1i;}m&R F$w|!l~1`-?F~J~ 03y<$-t9BIL{HiG,LDd|jph:OE 4b\{~-K 6,Ku20e%+~L-r; 7,kh}6`E׻L?S)` {A8 $Fٞ@`o?F%&0MuPu9KdfǸ;}-UL󥶚!s@+C9,*}3?F:ט26-*NY'| ݑU8am?FYe~Kp?@ŮM?F@_O`P jBނ> l'xk超VSOB;6{.f9;?@ v%e??Fh1*o/m_~!Wx ~)j |ALyUƅ?PbO__<J+t?F"e?@'IȂl:xl@׭9,&͓S2&n&Yۙo> =~vU^4Q?@,z"TEo-<[?|8q&?FCv?@s&Ŕck>\ :A:z5,7ح-idsjѦD;gcZT#n$!$ɞd?Fх@Z f<x.Ϗ:"r 23ED'佴9Ki PyT A$))X#!('0Uqŝ)) (h(J TEI 3aAU@jZ?F({~+?P >uA` ?u#ΕMJ*A )83*#<VQ 9jQ 9~Q 9Q 93(QA = Q(Q.$* BT`t &h)&4t   4t"z"pbE#"J>U2zGz?@9 3>+6A@"b]8bk9x;x6N2 !\4N%323326 2Mc2b"k9Kh7N2ׯa : R;l   4"ABoogr#5"Ap(U3TH P/?@ yjPvf MuPu@YF]ul9;VmMgHr f 9LTBUx]Si\QE[?MLx/r?FPzQ- %YAxlQY?U . [ ;;\upQYF)$r[.Fxai%?F 6?w\^ W-\Xj ~ɽ7yކ}dOflA=$'p$`4U@x@4S>aai6$xi'e!aS#8&`iMiI51Xԥ'0U6PE0 H?>_;s 枹\$qG"Fw #My 5OB Jz ~dk{ @+z[sG_fx ʼn 8  H xq F Mߞ 6q? ,s vF x 7z$ N(}r ، Xĺ^H ' ( &UFD  h(^TYYBUFjZ?F~??x<F BPG(?P } X ]B66]  TTUTTT"9Td9TȼHBU?F? ?B@L&d2?@-Q(\.,c(A.sU!&(/)1(  0%YB`'FT,serv2,comput"4di0t0ib04d0n0t;w*0rk!ea| (SG& i?!3E?? ?| wp  p | qw qwqw{pqpqwqqpqvppww'wwwnbwqpwoww6zwxvDrag onut hepe.Rih-c]lckaUdcos UPo etQeAvUwreP 5aabjZֿ~??cN׿ mF1??*t {V↑T*J?@ ?UG DF P# h @T(PYY# QU@jZ?F~?tP} u` ?u#2>D>HX DHlHYHH EOe.O4:DDXXll*k'"&J",':U2%5%4 ?"'{4b0u{[`:Q1#26"*!^ U9$"3&x9"L61 2!'@>e@ÿ@qn@?@fB?BmDcF0)B-u/@`-1Bu@rCC5GAB"2C9uv`u `"[ۢ b!]u` %3Qo1o7B`?CopyrigPt0(c)020P90MPcPosPfRQrPQaPiPn 0Al` XsbePePv,`d/`VPs_SBNcPm!#E5ab3P=` o13dX45CB ^8=%3p63dz6Pz6 v24#%3+0U'rz3]Jge<^AE U#D5m Hq5twL5t{3t{LEt{t{y {et!{|tYwmAdaT׃IUBT$1H@a@a20)L21632CLB3P1$125jb6w  7C|hh2E)*dQ 8ŔIQIQA11CB;1XrbYqea>!bz1AE:FG$18dT=`XQj2`]r` M`n+uPaPt+arp` ?bup` ? B=ݙ`S,t']p5#mQ-݁<9odm'Qh>qݏPT'._t'xVf@>pÿЅobZbq [Nܒrr$ aUvvvrsU"`Roj[16 beI`bb __br@muP`TaSm1u _u'G1G2Ab[a ahaauhAaYypP` u*`To`o` axx%D5f_d`&b鄮pSF3s͵NSYJCb rV@̓@e8d=`LqW,@r B:р.ǏR}qaqahYV䐷Kn ` %݀P&S41qa`NEWORK`2H Po!RRIJSQ*io"ohAo'0Ua?!0oHD # =hj0>T h]]9  5AU@jZ?F͂T??Fx?SF?P6 jt`  o?tk[>(A Vu aSuo.]&A]  *ԮJU2zGzt?@L#&A@bA#z7 5#1bO(b])j+$j&[ "*(w "?& ?AU$l/%B115 `?CopyrigTt (cu)Q02`09Q0uMI0cG0osA0IfO2@1rC0|1aO0iA0n.Q0 WAl0 G8s2Ueo0eG0v0d0"1E]4MŤ-&3 &7&l>]Uhz811U!J!u6?F+\.>djIIa? >"(EǑ I%K9E 5EU . K ;;5QIWEEEF _KnSA?8@@Bu?FDR> ?Nk"\6`bmMM\j=am {:e P> #? D"? 5-g?j4B7!B{oogr57!ibDAbE9 MuX7_'0U~rN!$a)5 5vIzHD # =hj0>T h]]9  qAU@;?FI5K?@xEL?6ѿP6 t`  bm&Qo?t@@L]WI,#u aiuoL> JU2zGzt;?@L?&A@b}#zs q#1b(b)+$&[9#H",ɺ (wH"?& ?A$ /%9#BV1V15 `?CopyrigTt (cu)02`090uM0c0os}0If2|1r01a0i}0n.0 WAl0 8s2Ue0e0v0d0"O1E]S4MŤ-9#b3 b7&l>]Uhzt1.A!JN!m.%-+0djV1ax߃S(FJ-&?F mO"> ?Nk"L[Majnha_:e P> #? BK=o? n-?j4Bs!B{__Wr5s!D8E6V 6?wj_|_ ^XE.U@z T6?F @' d_mOoT \VyWxRkrVo_bVh*g:n"'UaEjz36>CN>?Bn}]zGvN7V iDAbE9 MX+G'0UJPN!4ae5 MaHD # =hj0>T h]]9  U@jpiH?F^?@$?F]F?P6 t`  !a&ao?t,F=۳=u;u auo>J2zGzwt?@ʐLAw@b#z1b](b!).+I.&[-m(?0& ?A$I0/Q%!!5 `?CopyrigTt (c)02`090M 0c 0os0f21r0@1a0i0n.0 Al[0 8s_2e30e 0vq0dS0T"!E]$M-# %'&l>]Uhz!1!JF% djc~YZ(_N9zOMGE%_EH$SwO"K ңHE ^&_8_WE_OqOHOOi&DAbPE9 MX7'0UUfb>!$a % f1jHD # Uh4>T#]]9 M5AU@jZ?FL|+?@?F({~?P6 JuM` ?u JYt )t7zEA2&L_D?. > >tJU2q K?=@MJ&A@-b9(+bG)T+T&J[#"#a?& ?AH?/`)#115 {`?CopyrigTt (c);020G09;0M30c10os+0f92*1r-0f1ai+0n.;0 Al0 18s2eY0e10v0dy0z"!4B#e-?3 7)& b 0K8>U# ]A]A "1!ъ!PzQ <%OYAJEU . K;;\%W%EF($L[.FX5Eai%?F 6?wL;] W-\TB⤲8?F@&SA?F @ ?Nk"L'`bmMW_Ye[ٯ7 VK=F: [jr ? D"? XZvD?ta@Boogr5;!QEx]SiLO MLGXa~WOTZiAle59 XG"'0Uc&N!$05 .HD # =hj0>T h]]9  IAU@K?FOgVb?FY%?Fwᳬ?PJW?>t`  ^Z?t#ə.E @x?r?/{4܂ G _Z+u u$ >  J JpU?43f<!.&P?b3bbfz@1bAe&bq$d 2zGzt.#@LT"&r(~&i$q)(+}'i"~*#81815 `?Co yrigTt (c)o02`09o0Mg0ce0os_0fm2^1r 1am0i_0n.o0 Al0 e8s2e0ee0v0d0"11E]54M-#D3 %D7.&lUh#7 V1#Ai!J# F/?B1B  VRiAAbE9 MX G{W'0U*R2N!I$a {VZUHPD  # #>h0JTpERI MU@u6?Fjc{g&?@%r`5B?Fm_F:Is?HNtQ`  II?t#]  _ii$!`kNu  iu%wj>M JQ T       "@&&JNU2"?H@Qs#N&A@b(bU)++&RNp}#>H1#19 ?#bbbz| R&/5R}#11`?Co0yrig\t (c)02h090M0c0os0f21r0Aa0i0n.0 Al!@ 8s%Be0e0v7@d@21a4t#-}#3 %746lJ#$JUp5 3 M19 1!q(!FnElrkRt# 8$kcD?@숒@)?FP?P"EkU |1\iOT/irvs}i>`_,of :m ,M ?? S&d2? *b?r4B!B oo1gr #5!>%(EQOOOO__FAc1_C_ D4ևS]_o_yRX+{_,__ #R__ (YQoSo+o=osd {oEeooooooFyUy{* hDVW(I X"c7I!pg bf'iMjSE t%XyG|'0UŢŞN!O4i |UGD # hz0TdYYBU@(%)?Fxg^?@0߯H?Fs7Uݹ?P} t` 87?t#_)TeΦu ]ukv  ̝L&UUU&I&3 U!!`?CopyrigPt _(c) 2\W09 M c os f"!r 1a i n}. Al00U (s42e0e uvF0d(0'HS$#-## 'I?k=#ǒ 2j$B##0U'BM3@&@F0Yk} #HA%DKb5DK3DK5DKED$!GiAZG5(|IDFX7yW'&Dŭ6!4YA yVZHD  # =hj4>T!]]>#AU@0߯H?Fz)e??F˞9q?P6 t`z  ?5t# rXѮu)Nu;):O]ע0 &> # " "+"+"+"+" $+"T-J[U7 ??& bA@ 2b4-#2zGzt#+@ Li68!6 ?}; 72U!:B#AA5 5`?CopyrigTt(c)20F@9M2@c.0@os*@f8B)Ar,@eAa8@i*@n. Al@ 0HsBeX@ej0@v@dx@)21"DM-,;?C G& l>]UhzAE!A#!ְ1J`#@{&pz?FUy{?NT,dKj&b&R&M/8T9K-q?F@F?FdG0 >Nk"?YE*]\LThKjfGD&Qhj ?iU3UGe&Y`?@/Jb)h5oWKoWo;èKod}l$ro`nE2K?Fˌ%4G{?F -q? eZR^{|\?6?vodmP}l>MBe 9 N 1iS? 1?grl#51:PO"4ga@Sq(UE^@O|ǐ[`ߡa v2rbA @3ijD&}laqOz o?#WHZ'Qu=*[^N?F]%P?@O&@ c?FаM<n ?` eb0)\(Pfodm}nv}l?V5(?q$Ix[cN#W{ #iA@&QeE9 MX''0UjdN!0 /HD  # =hj0T h]]9 #]AU@}%?FR$,?@6#V?Fc_3֡w?P6 u` ?u#t  3zM%t3ws%.<%H._'B> #PJU2N贁Nw[?@ʐL+&A@wbu](i+bk)%+&p%#4">9"<5!?& ?M#bbb͓z$ p&"/%T%#L1L15 w`?Co yrigTt (c)02`090M{0cy0oKss0f2r1r 1a0is0n.0 Al0 y8s2e0ey0v0d0"E1E(]I4M-%#X3 X7&l*>(UhE j1#$Ac!J`%#@5e$_F??!foA3nBM( V?FJ-R?@HQ??FS!R?P| llHL6VLD5E+v;*AůʙH: 8snP%$? k?Q? \?4_!__WrA5_! R,U3OC@IL?F4~Kz?P)Oy޾ T!]]>#IAU@܋?F2Oa>?@#r`5B?F*ݷ?P6 t`  >B?\oEt# ŖoZu 0Nu;Y:|8>$# JpU>##<?2& ?5bbbz@b#ANi&bu$'h 2N贁N[2$@ LX"D&v(&m$ :;T'm"*B#d1d15 5`?Co yrigTt (c)020090M0c0os0f21r 1a0i0n.0 Al0 8s2e0e0v0d0"]1"a4M-/p3 p72&l>0>Uhh= 1#!eJ`#&FNTM\jh1B,RBM(!JoUy{O_ e@ So4(D@^Tw?F]֝ä ?Nk"?w_KT\T+\_(Kjӷjmplhj -`\TPQE\Uui,St_o u~!ՠXEO9= T?FL?;uQ_Pm"rA @3sjQ1lğ=GoYniAleE9 MX|'w'0Ur^N!M$|A vzHD  # =hj4>T!]]>#IAU@܋?Fj?@2r`5B?F^ݷ?P6 t`  >B?\oEt# 6ΠZu 0Nu;Y~:#&}>0m8>  PJU2N贁Nwk?@ eL&A( 5bM(b[)h+h+$h&#[-M$#?& ?b$Z*'*B#O1O15 5`?CopyrigTt (c)02U0090M~0c|0osv0f2u1rx01a0iv0n}.0 Al0U |8s2e0e|05v0d0"H1P"L4M-/[3 [7&l>0>Uhh= m1#!S!J`#*&FPUy{_?NTMjC1gc RVMʣ(D#@ ![?F"$. ?Nk"W?IKTLTcKjRlhj p=' (*!)i,SO2Kes9T;QEGUFw6qX}__TM7fTZT\Dž}_P^52XOES"rA 0S#Es3iAleE9 MX|$Gxw'0UrIN!4gA xvzHD  # =hj4>T!]]>#!AU@܋?Fc_v?@#r`5B?F*ݷ?P6 t`  >B?\oEt# ap'EZu 0Nu;Y:|$># BJpU>#͙;? & ?5bbbz@}bAA& K&@ 2zGzt?"K@ L0"&N(bM)+TK/8Z*B<1<15 5`?CoyrigTt (c)s02009s0Mk0ci0osc0fq2b1r1aq0ic0n.s0 Al0 i8s2e0ei0v0d0b"5194M-t/H3 H7T &l>(>UhE=3Z1#E!J!FENTMjN@1h(NoUy{OO e@ o)4(D@^Tw?F]֝ä ?Nk"?'_KD3\T+\(Kj?ӷjmlhj P\ DiAlePsI=MXsG'0UzbZ6N!%$0 sFEjHD  # =hj8>T! ]]>#!AU@܋?F*?@#r`5BW?Fw?P6 t`  >B? \Et#տ|u l4RQu?]>I$>I# JpU>͙#<?& ?9bbbz@b#AE&bQ$$D 2zG[zt#@ L4"&R(^&I$Q)+]'I"^*Bh{.M#To1o15 9`?CoyrigTtk(c)k2009kM0c0os0f21r1a0i0n.k Al0 8s2e0e0v@d0f"h1l4M-:?{3 %{7&l>(>UhE= `51#I!J49fAn^ R)X f"B!Jui,SG?RSlOS u~!N]8-19= T?FL;uQ ?Nk"?YVTQRXQOnğ/=i n (\ wTiAaePE9 MXLg'0UbZiN!)$0 Lf`jUGD # hz4TYYT#BU@5?F;?@ t2?F`0 ֺ?P} u` ?u#+/8<&L9`d<ft99(.8P8L``Uttyt mH 8"tC"hfc!N&>'nJ!O%X'm= 'p ~i##0U"K?@&S#a!1!1*"`?CopyrigPt (c])X020d09X0uMP0cN0osH0IfV2G1rJ01aV0iH0n.X0 WAl0 N8s2Uev0eN0v0d0"14J#B-#-3 -7#^=$.6#@DFPF B$e 0^dA\C #HA%DK%D8K5D`KEDdKEDg&Ca_'TAA""(Pa%288BKc`DcE ciha5 6rX7fg'$)F!DA ffzjUHPD  # #>h,JTEI MU@jZ?F@s??FX7`/?HmNuQ` o?u# j>tMtQ 1@t PATOQ E O#l|O6,2c J^vNt, "t "QwNbo ?!FVxNo]2УɆX3BUYU>VZ\)_ W거QUTAQ LG4CaCa_* Q_*____ o2f 8CT]?FoYDȡ?@ ~1pc} 9dS0]Yb<=p\/_ RT)4HtvzI!| ?^_AV,uݹ:4Z\o &Ja ʆXBU 9A1p@߄ߝZ\X9 (p\P΀dˆXEBUp2t Z\5q[,=6B҆XaU̓쬏 qXa>YU . [[ ;;p\Ua>YF`rZYoQUaai%?FzP4Z\^ W-p\] xTy8?F.?@@SA?FWk@ ?Nk"_|5`bmMp\PB|y[7 ơ|`?])?p*FWAX|a筃,H7~Z\?bxT?:!G4QڡUeB_T_f_x_DOOkoooZ}u?FU2Ty9=ŏsҟGO"vP_?@mlփ?Ff>> }= R3|Ofѡ|&['aޯ)d3#ux"@4SK 6ASʎy'e>'"Y`r,iMm׹ ߵl%1X~G׷_'0UJ26L50Ov ׶HD: # =h4>TB#]]9 TqAU@1D)?FL"ߛ$??Fٵ #ָ?P6 u` ?u4 t  ]<)t7񿗯)2)LEkL>  JtU2qD ?=@LC&A@-bu(b)+&񆤍[=#L"#a?& ?AH{/)=#@1@15 {`?CopyrigTt (c)w02009w0Mo0cm0osg0fu2f1ri01a; ig0n.w0 Al0 m8s2e0em0v0d0"91=4MB=#e-OL3 L7&l>  U#,|A|A9 $] t^1*=# #p Kw&&J`=#R&F 2'_b0CwvA kRqVM(~"!;ɡ?@<3?F ?P-DT! ;d1?Y=:ZTO㿂f{OaB J4 13? v,? (N_?r5w!:8oJo\g"a@jS(.8R!ZFI nx4__ D_U5U,VS?@g$ ?F~4,ێ~@ 8_H_ZTml`KDfl`'E4q2d8R鿃oo`rmkod&'aE2%_7USr(A@z#y3ae&b{, fZT5sae~t?FG=Va|7>-\氹pAiʈ$%?F(XoZ1a|}%\Iii ^WB^:fvEZT˒AiU?FAx߾Ya|k#ӝY~/Yt(Ae o0AleP59 MX=GL"'0UŢbN!40 O5 |HD: # =h4>TB#]]9 T]AU@R7)?Fy߈C??F"ָ?P6 u` ?u4 t  *5y")t7n)2)LI"+BA B JU2q0 ?=@L/&A@ybm#zc a#-bK{(m {(+I&)#Kڬ !?& ?A"&g$bbu/b /%)#Bo1o15 {`?CopyrigTt (c])020090uM0c0os0If21r01a0i0n.0 WAl0 8s2Ue0e0v@d0"h1l4M!E)#𤲝-DO{3 {7&l> U#,AA9 ] 18! #p %J`)#@~t?F # 0hU-~-/HA RVM(ET"rYD?@J-?F-' 9d 0_Y b%r\_oӫ([MFoFl 77]8>!Zݹ:4\\o?gƭkdQEF5W4?@A=?F[Q`??P-DT! :U^Zs[ #l@*p9lJjV\\l_ I&vpkhAzi!J?F߸TF\\Ͼ\zT khzinA9f@.mY <\\ԕ$ ,S{khAzi׍8r?Fį^?m ? ~_5 ә(Ae 0AleE9 MXlGR'0UuőN!40 ~5 ,@HD: # =h4>TB#]]9 TU@~t?FYaJE??F? ~?P6 u` 7?u t  }A_)t7Ǫ+)2)L^A]>REJt2q?R=@LA@-bbJ )+&L ;W!?m&L ?A" &zbbbOz /R;%!!5 {`?CopyrigTt (c)(020409(0M 0c0os0f&21r0S1ai0n.(0 Aln0 8sr2eF0e0v0df0>"!$MDO!EY-?# 'm&l> 0>U#3 -A9 B1 #p ;%J`F(jdO C]Se֑  FM(HC-O"K[%qU5)UuBТ:DY=~W\GUE2OE#rA 0"S3QhA%YB[)xIB_STD(Pd 0Ale59 MX7|B'0UbN!$1A5 fjHD # =hj0>T h]]9 U@/?Fxpz?@]JQ?Fk],>Uh@ !f$; hp _Q%Jd(jcZ(sEp!$a VZHD # =hj0>T h]]9 U@~t?Fxpz?@?Fx?vÂE?P6 t`  }A_?t#%_ ,u auo>J2zGzwt?@ʐLA@bu+b)%#+#&!yHa$?x&L ?A+!#+,1'!!5 `?CopY rigTt (c)(02`09(0M 0c0os0f&21r0S1a&0i0n.(0 Aln0 8sr2eF0e0v0df0I"!E]$M-# %'x&l>],>Uhh 1[$ hp  F%Jdjc%O(Eq!$a VjHD: )# Uh4> T]]9 M#AU@#ܡ,?Fb?JP WJtM`  ?t# AJb $>h g JQubhr6 u!>h=U7Jyb?!A?3& [AJ~CA@Y"wba)b$a)'ba$2zGz3#@MJ&b*%w$`*Q+'Y"**F1F1`?Cop rigTt(c)2009Mu0cs0osm0f{2l1ro01a{0im0n. Al0 s8s2e0es0v0d0?1EC4*-R3 %R73&l& UhVE9 `d1#Y!  FFU 7C@VCi@Ale759 XW'0UjR@N!N$0 VHD: )# Uh0>Th]]9 PM#U@X?Fg.?@DE`v?FepA1D?P JtM`  ;0K?t#˯ yh:.A>j# ixsj||"yy h|yy"s"j|,"s," |J""y^" yr"""y"/y" a|"1y""8y":y"";y"JU2z?Gz:1?N1HSJVAL0bXbY[R[VJSON9;?Ax74Ual#aX`S@fN:O6?F9^2d3QqsR^2tY7+"LN26J6RJN1v{+0?F2rn`XDPk6(āThqZMiڲ?FȒ?@%~?džm@`?PR߷i e>amaXƫWSkMD,Xkd3, ktS?J! \&\xZ? $Zas? V =? ?@?d2tR%߀@8BoR4̧r3cu>XRVSwSy?@r$Wè ;m`X&¨r+J^j{/qO@ [jrPͦ 2XQqE>L?F,D?@ cK'ةhzq 6ѱn|3wsW\ۉ>53lҿ侨gnxP?Fa"&~̜2Ah~sJN:@8PKNWM?@0R?Fw+PuP~y`~dg~~s̜ʪ}Bk3a ,?|IF%̜O,nA3O"?̜g'R S;x+#]Ǯ.uǥ8nP\ {r|%ʿs?Fcj˰?@3?FJ?Poy=V(nxW~u3vk;*G!v+u~ݏֻA_ ^=ů׬{5oGorb&Q"P C.?@M:P̈́} ?PNWLKz Fw^=g%fuԧ!6&{]JVbGᢤ oQo׬Qhzrmٿdlƣn@"N@0?@NPXcC OwPKMgBlVܖF{VmVsOS3E_dNF+P?F_iVJ͸?@9;+0}0'Z,H3wlTG5EnOe{uF Fv=/<~R2d0Pm%s~? MvO1; ܿ;t l h-`w?F?6z?Ϋ9W7FOQ1}M4ڔC͏&ِ1Wi!~RѪV? el 3~]_>-#O &2"ܿ#˕G GWAS?@nA8 ?1ІCѵzT]4-'>T7Fd6Ĵ̏2q#~R|Rl uްJREB'+-E$RߎD~ CXн-C-X-]bd%R| j#$ 4-խ@c&Rf^+ !̠Bd% c~4-Rꕳ@ 'R[\e'? }ķp)~ s"ޗ=?]& as(R lM?FAo .bLOMQs)R ^D^*%6VIrV*R2T?F8]X^*a,eLʴ2ͭ+R ug?FdpA1DݸoL,R;0 W%r;p-RH)?F76˸ LSa@ .RPe6˜R|)yϊ&Lؾ1/|EԈ1f?FFX ر}+ ķ&OjuLīt[q>aJ.j&S"_10|*?F `Jԭ%1+.nNMcfdilFU1i1}m&H F$w|!bt1|q59>?F>FK׀*F/BIL{Hi<[cԍ(=Pї!32$f ˂}sD|2k!f{30%_ʣz?T `c`Tw4G,~LDd|`pta?FVynZ{~~-K6~Ka0?F>&Dw@}|%+)~K-r;7F?FTa|E׻LS)`qA8|}FSP`M{ r?FhPuS@/}Kdf1}-U{Ls@+C9\}K;:*N}O'| ݇Uaqyli!&,$e?@hu?FA3]{ӿP jBނ>\'xk|OVO;|Pq2.?@`Ȩ?Fn |%c_Wx ~)j |A LyU{PbE__<|*&G&?F4J?@2s+M{0xl@ף9,͟S2n?&Yۏo4=|4P `RZC?@b* o0Z d֣r~Pz;'Uo};}לSU>T|m?F3|TsDܷ[ڣ̭?|X?F _PМؓ?@1}v򅗫0z5,7-i~ dsjќD;gcZT#dfN:6RDž@2]Kc$Lŏ0r ڝ 23E솤Dʟ佴/AiऑTAX'0Ugœ 2UGD # hz0TdYYBIU@zUϰ?Fz^?@#ܡ,?Fbݱ?P} t`  ?t#H$IpZίP!::8vgpqu B`Ufpp5uU!!`?CopyrigPt (c)J 2\09J MB c@ o%s: fH"9!r< u!uaH i: n.J _ Al @(Us"eh e@ v d  #S$#- #l# '?R= # &#24 6 6 2$ZB# #0U2 3@& a#3~  #pH1%4GicP#5 6X'.EG'4 6%!4] EFYJ_dH >;s w2mUqCU_ܾF8[ # VOB UOO ~d8ouo@+_xo[sGU, f ?   X Hm xd ? F֛  6hQ ,S   7( NX@ G _U# *9( <9X . VQb1 xS@4 /Z 7$\9 (XM G&UFD  h(^TYYBBUFjZ?F~??x<F BP(?P } kXt BW66 W TTTTT9ȅH?Q?G ?B@L&ɯd2?"-3(?\.E(#. sUk!i&/J( G 0gB`.email,SNM,serv 2,co puT.3d s.r b 4d n 0tw0rk!e^| (QSG& q ?%7I??? wpwp pqwqqqwqwq}pqqqpppw,wwwqqgawqp~wwww;yDrag onut hepe.Rih-c]lckaUdcos UPo etQeAvUwreP 5aabjZֿ~??cN׿ mF1?? m{V↑T*J?@ ?UG DF P# h @T(PYY# [U@jZ?@~?tP} u` ?u#2>D>HX DHlHYHH EAEdR9.&؍>DDXXllRu'9b"",'US2&5/4&3 ?"'4Wb0{['`:[1#2!6"d^Q_9$7&9"/L6 2&!'ݯ>o@ÿ@qx@?@-pB?wDmF03Bu9@`҈-1Bu @r#CC5 ",CAB2Cm9u`u `"[ b!u`/3Qy1y7B`?CopyrigPt0(c)020P90MPc.PosPfRQrPaaPiPn 0Al ` Xs$bePePv6`d"/`VPs_SBN cPm_!#5P8k` 7Fay1&3db4&5܀MB=/3z6&3d66 2&4#/30U1r3]%ge<^AE U#"&5m Hq&5twV5t{3t{VEt{t{y {etd{|tY wmda*V_BTAJa$Ja&203V21@3 2MVB3Z1125tb6 ta$taC|èEhhPÓnQ* ϔ8ܔAPꓰMB111;@cqcqba>*Ab1%EQF^.1XA#8dTG`XQ"y`])r` M`WnuPaPt5arFaiQ` WEqiPm6`nPvd{V5QX]1PRd*ơ ]`ue`bͯ߯@30! P`r@RdSH@ȱ]C5D6bQ۱eʿܿ{"\ ϬAP9a:B`諸75^ϖϬ% S:bi`#aweT ߬ぶLPchCekT}ߏBalB`i`g̨9@߮,RPoڂ|EZ\@ =phDBU T)?ar0TPpVrKB` B`sR;IY`A Y`Q@;bmr:PeExab!gD"qQr2w`ijJSCC ` TsR\͒kXV` ?ON`1ڒPN`SbY`(|~PEe`a 蘒Z Nwbk4a+.2`\d: IpadB`5cwϯJ)nĺskAx/&/ImTat f?a ];a£rl/kf//ő R pa/a//((/&?4/M`Ci?5?'+ pu"tPf!?BkQ OO"Ȁ\=O_OOkv(OOKHdBQ#CġHC` Wc!wV_ _ C]0ah_z_yi_'`M@m@_2d__vkpoo.KOB%fy "(p5j-T^u uqE珽 ST1.i1xACw>門ÿތc$bel erłłՆ߂^B2`RoUK=e2jrb ]]br-@O"Wyrl]u``dvx~jc*<Q_c--4A4Bj(6yB1B1*P{£qqqDqcҴzQaig6``!-uA`}ż @avh{zI5_}v&ﲄПF3s͵NSYFvy߂f!@v>8M{>6M>Ocլra~g,@wBQ&8P\UqUq}zʷ@ifz}!` %P&4odAA첱UqNNTWORKJeH0P\1RRISQBito̔3GQM'0UƗ?Ϳ10WƕHD # =hj0>T h]]9  5AU@jZ?@͂T??@x?SF?P6 jt`  o?tk[>(A Vu aSuo.]&A]  *ԮJU2zGzt?@L#&A@bA#z7 5#1bO(b])j+$j&[ "*(w "?& ?AU$l/%B115 `?CopyrigTt (cu)Q02`09Q0uMI0cG0osA0IfO2@1rC0|1aO0iA0n.Q0 WAl0 G8s2Ueo0eG0v0d0"1E]4MŤ-&3 &7&l>]Uhz811U!J!u6?@+\.>djIIa? >"(EǑ I%K9E 5EU . K ;;5QIWEEE@ _KnSA?8@VBu?@DR> ?Nk"\6`ObmMM\j=am {:e 뇿P> #? D"? 5-gg?j47!B{oogrK57!ibDAbE9 MX7'0U~rN!R$a)5 5vIzHD # =hj0>T h]]9  qAU@;?@J5K?@xEL?6ѿP6 t`  bm&Qo?t~@@L]WI,#u auoL> @JU2zGzt?@L?&A@b}#zts q#1b(Wb)+&[9#H",ɺ (;H"?&L ?A$ /R%9#V1V15 `?CopyrigTt (c)02`090M0c0os}0f2|1r01a0i}0n.0 Al0 8s2e0e0v0d0"O1E]S4Mb-9#b3 b7I&l>]Uhzt1.AF!JN!n.%-+0rdjV1ax߃)(@K-&?@ mO"> ?Nk"PL[Majnha_:e 뇿P> #? BK=o? n-g?j4s!B{__Wr5s!D8E6V 6?wj_|_ ^XE.U6Vz T6?@ @ d_mOoT ݬ\VyWxRkrVo_bVh*g:n"'UaEjz36>C~N>?Bn]_zGvN7ViDAbE9 MX+G'0UPN!R4ae5 MaHD # =hj0>T h]]9  U@jpiH?@^?@$?@]F?P6 t`  !a&ao?t,F=۳=u;u auo>J2zGzwt?@ʐLAw@b#z1b](b!).+I.&[-m(?0& ?A$I0/Q%!!5 `?CopyrigTt (c)02`090M 0c 0os0f21r0@1a0i0n.0 Al[0 8s_2e30e 0vq0dS0T"!E]$M-# %'&l>]Uhz!1!J@% djc~YZ(_N9zOMGE%_EH$SwO"K ңHE ^&_8_WE_OqOHOOi&DAbPE9 MX7'0UUfb>!$a % f1jHD # Uh4>T#]]9 M5AU@jZ?@L|+?@?@({~?P6 JuM` ?u JYt )t7zEA2&L_D?. > >tJU2q K?=@MJ&A@-b9(+bG)T+T&J[#"#a?& ?AH?/`)#115 {`?CopyrigTt (c);020G09;0M30c10os+0f92*1r-0f1ai+0n.;0 Al0 18s2eY0e10v0dy0z"!4B#e-?3 7)& b 0K8>U# ]A]A "1!ъ!PzQ <%OYAJEU . K;;\%W%E@($L[.FX5Eai%?@ 6?wL;] W-\TB⤲8?W&SA?@ @ ?Nk"L'`bmMW_Ye[7 VK=F: [jr ? D"? X?ZvD?a@Boogr5;!QEؿx]SiLO MLXa~WOTZiAle59 XG"'0Ujc&N!$05 .HD # =hj0>T h]]9  IAU@K?@OgVb?@O?@X5?PJW?>t`  ^Z?t#ə.b?T@[ u?/CG<_Z+u u$ >  J JpU?43f<!.&P?b3bbfz@1bAe&bq$d 2zGzt.#@LT"&r(~&i$q)(+}'i"~*#81815 `?Co yrigTt (c)o02`09o0Mg0ce0os_0fm2^1r 1am0i_0n.o0 Al0 e8s2e0ee0v0d0"11E]54M-#D3 %D7.&lUh#7 V1#Ai!J# F/?B1B  VRiAAbE9 MX G{W'0U*R2N!I$a {VZUHPD  # #>h0JTpERI MU@u6?@ic{g&?@%r`5B?@n_F:Is?HNtQ`  II?t#\  _ii$"`kNu  iu%wj>M JQ T       "@&&JNU2"?H@Qs#N&A@b(bU)++&RNp}#>H1#19 ?#bbbz| R&/5R}#11`?Co0yrig\t (c)02h090M0c0os0f21r0Aa0i0n.0 Al!@ 8s%Be0e0v7@d@21a4t#-}#3 %746lJ#$JUp5 3 M19 1!q(!@oElrkRt# 8$lcD?@숒@)?@P?P"EkU |1\iOT/irvs}i>`_,of :m ,M ?? S&d2? *b?r4B!B oo1gr #5!>%(EQOOOO__@Ac1_C_ D4ևS]_o_zRX,{_,__ #R__ (YQoSo+o=osd {oEeoooooo@zUy{* hDV!VV(I X"c7I!pg bf'iMjSE t%XyG|'0UŢŞN!O4i |UGD # hz0TdYYBU@(%)?@xg^?@0߯H?@s7Uݹ?P} t` 87?t#_)TeΦu ]ukv  ̝L&UUU&I&3 U!!`?CopyrigPt _(c) 2\W09 M c os f"!r 1a i n}. Al00U (s42e0e uvF0d(0'HS$#-## 'I?k=#ǒ 2j$B##0U'BM3@&@F0Yk} #HA%DKb5DK3DK5DKED$!GiAZG5(|IDFX7yW'&Dŭ6!4YA yVZHD  # =hj4>T!]]>#AU@0߯H?@z)e??@˞9q?P6 t`z  ?5t# rXѮu)Nu;):O]ע0 &> # " "+"+"+"+" $+"T-J[U7 ??& bA@ 2b4-#2zGzt#+@ Li68!6 ?}; 72U!:B#AA5 5`?CopyrigTt(c)20F@9M2@c.0@os*@f8B)Ar,@eAa8@i*@n. Al@ 0HsBeX@ej0@v@dx@)21"DM-,;?C G& l>]UhzAE!A#!ְ1J`#@{&pz?@Uy{?NT,dKj&b&R&M/8T9K-q?x6@F?@dG0 >Nk"?YE*]\LT_hKjfGD&Qhj iU3UGe&Y_`?@/b%)h5oWKoWo;KTod}l$roҥ`nE2K?@ˌ%4G{?@ -q eZ̿R^{|\6?vodmP}l>MBe 9 N 1iS? 1?g]rl#51:O("4ga@Sq(UE^@O|ǐ[`Ρa g v2rA @3ijD&}l迲aqOz o?#WHZ'Qu=*[^N?@]%P?@O&@ c?@аM<n ?` eb0)\(Pfodm}nv}l?V5(?q$Ix[cN#W{#iA&QeE9 MX|''0UdN!0 /HD  # =hj0T h]]9 #]AU@}%?@R$,?@6#V?@c_3֡w?P6 u` ?u#t  3zM%t3ws%.<%H._'B> #PJU2N贁Nw[?@ʐL+&A@wbu](i+bk)%+&p%#4">9"<5!?& ?M#bbb͓z$ p&"/%T%#L1L15 w`?Co yrigTt (c)02`090M{0cy0oKss0f2r1r 1a0is0n.0 Al0 y8s2e0ey0v0d0"E1E(]I4M-%#X3 X7&l*>(UhE j1#$Ac!J`%#@5e$_F??!foA3nBM( V?@J-RHQ??@S!R?P| llHL6VLD5E+v;*AʙH: 8snP%$? k?Q? \?f4_!_t_Wr5_! R,U3OC@IL?@4~Kz?P)Oy޾ T!]]>#IAU@܋?@2Oa>?@#r`5B?@*ݷ?P6 t`  >B?\oEt# ŖoZu 0Nu;Y:|8>$# JpU>##<?2& ?5bbbz@b#ANi&bu$'h 2N贁N[2$@ LX"D&v(&m$ :;T'm"*B#d1d15 5`?Co yrigTt (c)020090M0c0os0f21r 1a0i0n.0 Al0 8s2e0e0v0d0"]1"a4M-/p3 p72&l>0>Uhh= 1#!eJ`#&@NTM\jh1B,RBM(!JoUy{O_ e@ So4(DF^Tw?@]֝ä ?Nk"?w_KT\T+\(Kj?ӷjmlhj -`\TPQE\Uu?i,St_o u~!XEO9= T?@L;uQ_Pm"rA @3sjQ1lğ= GoYniAlePE9 MX'w'0UrJ^N!M$|A vzHD  # =hj4>T!]]>#IAU@܋?@j?@2r`5B?@^ݷ?P6 t`  >B?\oEt# 6ΠZu 0Nu;Y~:#&}>0m8>  PJU2N贁Nwk?@ eL&A( 5bM(b[)h+h+$h&#[-M$#?& ?b$Z*'*B#O1O15 5`?CopyrigTt (c)02U0090M~0c|0osv0f2u1rx01a0iv0n}.0 Al0U |8s2e0e|05v0d0"H1P"L4M-/[3 [7&l>0>Uhh= m1#!S!J`#*&@PUy{_?NTMjC1gc RVMʣ(D#F ![?@"$. ?Nk"?+IKTLTcKjRlhj p= (*!)i,SO2Kes9T;QEGUFw6qX}__TM7fTZT\Dž}I_P^52O,ES"rA 0S# Es3iAlePE9 MX$Gxw'0UrJIN!4gA xvzHD  # =hj4>T!]]>#!AU@܋?@c_v?@#r`5B?@*ݷ?P6 t`  >B?\oEt# ap'EZu 0Nu;Y:|$># BJpU>#͙;? & ?5bbbz@}bAA& K&@ 2zGzt?"K@ L0"&N(bM)+TK/8Z*B<1<15 5`?CoyrigTt (c)s02009s0Mk0ci0osc0fq2b1r1aq0ic0n.s0 Al0 i8s2e0ei0v0d0b"5194M-t/H3 H7T &l>(>UhE=3Z1#E!J!@ENTMjN@1h(NoUy{OO e@ o)4(DF^Tw?@]֝ ?Nk_"?'_KD3\T+\(Kjjmlhj P\DiAlesI(=MXsG_'0Uzb6N-!%$0 sFEjHD  # =hj8>T! ]]>#!AU@܋?@*?@#r`5BW?@w?P6 t`  >B? \Et#տ|u l4RQu?]>I$>I# JpU>͙#<?& ?9bbbz@b#AE&bQ$$D 2zG[zt#@ L4"&R(^&I$Q)+]'I"^*Bh{.M#To1o15 9`?CoyrigTtk(c)k2009kM0c0os0f21r1a0i0n.k Al0 8s2e0e0v@d0f"h1l4M-:?{3 %{7&l>(>UhE= `51#I!J49fAn^ R)X f"B!Jui,SG?RSlOS u~!N]8-19= T?@L;uQ ?Nk"?YVTQRXQOnğ/=i n (\ wTiAaePE9 MXLg'0UbZiN!)$0 Lf`jUGD # hz4TYYT#BqU@ t2?@D&d2?@$p?@|>׺?P} u` ?u#Lk/89L<`(.88LL`t m۶m3tبפUC~#9#0Up"?҃@/&9#!!!G*`?CopyrigPt (c) 20 9 M c. os f"!r 1a i n. Al&0 (s*2e ej v<0d0'"@!$0#B-9## 'w#^=9$&0#466 2R0$e#$^ P#30%T% 9#LHLA0%XDcKT%XD8cGg&daO^'BTcAcA0"-()Ca0%g2 ATB_6Si@haX5 &X~7aW'o$Ŵ6!40 aVuZHD: # =h4>TB#]]9 TU@$p?@|>W?P6 u` ?Mu t  )At72(L>REJt2q?=@LA@-bb )+&񆤍[Z#?m& ?A/$)R!!5 {`?CopyrigTt:(c):20 09:M c. os f"!r *1ai n.: AlE0 (sI2e0ej v[0d=0>"@!$MBQ-?# 'm&l>0>U#3 C!*; #p _ &J`@4?g8oyƏ C?,'ߑK FMD(9IMBIGE%UZ8!<A% fjHD: # =h4>TB#]]9 TU@$p?@|>?׸?P6 u` ?u4 t  )t7J)2(LAEJ޵2q?=@L|A@y7b #z]-b( J(2+2&*;q!?& ?A"#&$bb/bI4/U% 1 15 {`?CopyrigTt:(c):20N09:M:0c80os20f@211r40m1a@0i20n.: Al0 88s2e`0e80v0d0X"14MDi!EY-?3 7&Rl>0>U#3 C)1 #p| U%J`@g8oyƩ C2,'ߑB %VM^(9IMBIG3U%CUY8#M)U@$p?@|>?tP >tA`  ?t# >b24 C>u\u!.M# *H>U2llf?@9 >A@b-(C-(b;)W+W&>yg–#?& ?3"B-*X*G,e'񩣕p%1%1`?Cop _rigXt_(c)2dW09MT0cR0osL0fZ2K1rN01aZ0iL0n}. Al0U R8s2ez0eR05v0d0}"1P^"4-,13 17&!Qi 0,fUl4#A5C1CU3! !R \box(<v9s?@^!?@sckz%?PD-DT! /[S@K0Lobp׻nxw_gBs 3CUy_@ xh ,J T  M#EMU@jZ?FH2)'*??@n>?P m>uA` o?u#VlMJl -<lA -PK#TdK2(.K<FZnf>t  t3|>b!F**JRuKJ>U2zGz?@9 ?#>O&Aw@b(Wb)+&r" !$NI#"@#W"6 "\M"b)2;'r",r"/I#11G#`?CopyrigXt (c)020090M0c0os0f21r01a0i0n.0 Al@ 8sBe0e0v-@d@7"BMI#3 76@"!"t1"=ElJ Ul4@AAC 9" =Q A(4#UA!=(^!#?@RXM(0 # g# '?DnMN:U~TR\u5i[HmvQ~X3^w='__ '-~XE:UU . S[ ;;h\gU:U@>wR9o\jZ|K5~XAjai%?@@dQ]^ W-h\_~T9;Tꤲ8?@@)#SpSA?@jK@ ?Nk"V`UY5`bmMh\ly[7 / *?<~X6YxS?LR\cpTqUQ g8oy1 cJQ6YD}uϯ?^&S[vm۶modUQ6Y ~ÀC8[H`K?)K?M~X U@:_L_^_p_DA(G!EM_"_]u?@S.~{>O]UM~1xQDuMuRw?@lփ?@2'>w9Z^|)Sf|u?U35~8q3Du$-@8S,/: CYy$'eisa"+ziM̱iٙI@%!XRGٗ'0UL6t5 0f ٖ_H>x;s \<6x,c @LaFQ #GS XOB UT ~dU @+X![sGe|8 P` (id ch Ak 6o r F֛XWv Hx 5"~ 8#  86h NP W R 3U m 9!UFD  h(^TYYBވUFjZ?F~??x<F BPG(?P } XZ B66  TTTTTTd &!&Y"/&%'D/+6,k&/TȅHBU??/&:& ?B#@L&d2?=8\.Y+8 >sUQ1O6Pq?)(:  0%$gB`.man0ge]m0ntt s0rv2,co0pu03di0t0ibDdt n0twN@rk1ea| 8(SGv6 ߃m `???(:Lpppp wpppp pqp~pqwwqwqwpwqpqqwwqqp pp w. wwwicwqwwwww}Drag onut hepe.Rih-c]lckaUdcos UPo etQeAvUwreP 5aabjZֿ~??cN׿ mF1??M7{V↑T*J?@ ?UG DF P# h @T(PYY# QU@jZ?@~??F?Pn} u{` ?Su#؍>>HX HlHYHH EO.LO4:UDDXXUllk'"&R",I'U25ĉ%4 ?"u'4b0{N[`:Q1#2T6"!^U9$"3&9"/L6 2&!'ݯ>e@ÿ@qn@?@-fB?mDcFԸ0)Bu/@`҈-1Bu@rCC 5GAB"2Cm9u`u `"[ b!u`%3Qo1o7B`?CopyrigPt0(c)020P90MPc.PosPfRQrPQaPiPn 0Al` XsbePePv,`d/`VPs_SBNcPm_!#5ab4P=`o13dX45CB^8=%3p63dz6z6 v24#%30U'rz3]ge<^AE@ U#5m Hq5twL5t{3t{LEt{t{y {et@!{|tYwm@Ada׃IUBT$1@a@a20)L2163jajaLBA3P1125jb6w 7C |hhE)*dQ8ŔIQIQA11CB!;1XrYqea>!bz1AXE:FG$1 8dT=`XQj2`]r` M`nuPaPt+arp` ?bup` B=ݧ`S,t']p"51m-݁<9orm'Qh>qݏBKT'.mt'xydf@>p{ÿ#ГobZpq [ܒrr$avvvrs{c"`RQoxi16 e5`(bb_ mmbr@ uP`TaS@u._u'U1U2A-b[aahaDauhAaYg6s(P` -u*`}`ᕁ }`ax3R5Y_dn&bpSF3s͵NSYJQbrVC@ړN-e8%dE%'XLqW,@rb:*<Տ`}aahʀYVYn ` %P&S41Ģʢa`NEWOR+Kn2H P}!RȱRISQĢʢĢ ʦio0oŌh^A%'0Uo?č!0ÎoHD # =hj0>T h]]9  5AU@jZ?F͂T??Fx?SF?P6 jt`  o?tk[>(A Vu aSuo.]&A]  *ԮJU2zGzt?@L#&A@bA#z7 5#1bO(b])j+$j&[ "*(w "?& ?AU$l/%B115 `?CopyrigTt (cu)Q02`09Q0uMI0cG0osA0IfO2@1rC0|1aO0iA0n.Q0 WAl0 G8s2Ueo0eG0v0d0"1E]4MŤ-&3 &7&l>]Uhz811U!J!u6?F+\.>djIIa? >"(EǑ I%K9E 5EU . K ;;5QIWEEEF _KnSA?8@@Bu?FDR> ?Nk"\6`bmMM\j=am {:e P> #? D"? 5-g?j4B7!B{oogr57!ibDAbE9 MuX7_'0U~rN!$a)5 5vIzHD # =hj0>T h]]9  qAU@;?FI5K?@xEL?6ѿP6 t`  bm&Qo?t@@L]WI,#u aiuoL> JU2zGzt;?@L?&A@b}#zs q#1b(b)+$&[9#H",ɺ (wH"?& ?A$ /%9#BV1V15 `?CopyrigTt (cu)02`090uM0c0os}0If2|1r01a0i}0n.0 WAl0 8s2Ue0e0v0d0"O1E]S4MŤ-9#b3 b7&l>]Uhzt1.A!JN!m.%-+0djV1ax߃S(FJ-&?F mO"> ?Nk"L[Majnha_:e P> #? BK=o? n-?j4Bs!B{__Wr5s!D8E6V 6?wj_|_ ^XE.U@z T6?F @' d_mOoT \VyWxRkrVo_bVh*g:n"'UaEjz36>CN>?Bn}]zGvN7V iDAbE9 MX+G'0UJPN!4ae5 MaHD # =hj0>T h]]9  U@jpiH?F^?@$?F]F?P6 t`  !a&ao?t,F=۳=u;u auo>J2zGzwt?@ʐLAw@b#z1b](b!).+I.&[-m(?0& ?A$I0/Q%!!5 `?CopyrigTt (c)02`090M 0c 0os0f21r0@1a0i0n.0 Al[0 8s_2e30e 0vq0dS0T"!E]$M-# %'&l>]Uhz!1!JF% djc~YZ(_N9zOMGE%_EH$SwO"K ңHE ^&_8_WE_OqOHOOi&DAbPE9 MX7'0UUfb>!$a % f1jHD # Uh4>T#]]9 M5AU@jZ?FL|+?@?F({~?P6 JuM` ?u JYt )t7zEA2&L_D?. > >tJU2q K?=@MJ&A@-b9(+bG)T+T&J[#"#a?& ?AH?/`)#115 {`?CopyrigTt (c);020G09;0M30c10os+0f92*1r-0f1ai+0n.;0 Al0 18s2eY0e10v0dy0z"!4B#e-?3 7)& b 0K8>U# ]A]A "1!ъ!PzQ <%OYAJEU . K;;\%W%EF($L[.FX5Eai%?F 6?wL;] W-\TB⤲8?F@&SA?F @ ?Nk"L'`bmMW_Ye[ٯ7 VK=F: [jr ? D"? XZvD?ta@Boogr5;!QEx]SiLO MLGXa~WOTZiAle59 XG"'0Uc&N!$05 .HD # =hj0>T h]]9  IAU@K?FOgVb?FY%?Fwᳬ?PJW?>t`  ^Z?t#ə.E @x?r?/{4܂ G _Z+u u$ >  J JpU?43f<!.&P?b3bbfz@1bAe&bq$d 2zGzt.#@LT"&r(~&i$q)(+}'i"~*#81815 `?Co yrigTt (c)o02`09o0Mg0ce0os_0fm2^1r 1am0i_0n.o0 Al0 e8s2e0ee0v0d0"11E]54M-#D3 %D7.&lUh#7 V1#Ai!J# F/?B1B  VRiAAbE9 MX G{W'0U*R2N!I$a {VZUHPD  # #>h0JTpERI MU@u6?Fjc{g&?@%r`5B?Fm_F:Is?HNtQ`  II?t#]  _ii$!`kNu  iu%wj>M JQ T       "@&&JNU2"?H@Qs#N&A@b(bU)++&RNp}#>H1#19 ?#bbbz| R&/5R}#11`?Co0yrig\t (c)02h090M0c0os0f21r0Aa0i0n.0 Al!@ 8s%Be0e0v7@d@21a4t#-}#3 %746lJ#$JUp5 3 M19 1!q(!FnElrkRt# 8$kcD?@숒@)?FP?P"EkU |1\iOT/irvs}i>`_,of :m ,M ?? S&d2? *b?r4B!B oo1gr #5!>%(EQOOOO__FAc1_C_ D4ևS]_o_yRX+{_,__ #R__ (YQoSo+o=osd {oEeooooooFyUy{* hDVW(I X"c7I!pg bf'iMjSE t%XyG|'0UŢŞN!O4i |UGD # hz0TdYYBU@(%)?Fxg^?@0߯H?Fs7Uݹ?P} t` 87?t#_)TeΦu ]ukv  ̝L&UUU&I&3 U!!`?CopyrigPt _(c) 2\W09 M c os f"!r 1a i n}. Al00U (s42e0e uvF0d(0'HS$#-## 'I?k=#ǒ 2j$B##0U'BM3@&@F0Yk} #HA%DKb5DK3DK5DKED$!GiAZG5(|IDFX7yW'&Dŭ6!4YA yVZHD  # =hj4>T!]]>#AU@0߯H?Fz)e??F˞9q?P6 t`z  ?5t# rXѮu)Nu;):O]ע0 &> # " "+"+"+"+" $+"T-J[U7 ??& bA@ 2b4-#2zGzt#+@ Li68!6 ?}; 72U!:B#AA5 5`?CopyrigTt(c)20F@9M2@c.0@os*@f8B)Ar,@eAa8@i*@n. Al@ 0HsBeX@ej0@v@dx@)21"DM-,;?C G& l>]UhzAE!A#!ְ1J`#@{&pz?FUy{?NT,dKj&b&R&M/8T9K-q?F@F?FdG0 >Nk"?YE*]\LThKjfGD&Qhj ?iU3UGe&Y`?@/Jb)h5oWKoWo;èKod}l$ro`nE2K?Fˌ%4G{?F -q? eZR^{|\?6?vodmP}l>MBe 9 N 1iS? 1?grl#51:PO"4ga@Sq(UE^@O|ǐ[`ߡa v2rbA @3ijD&}laqOz o?#WHZ'Qu=*[^N?F]%P?@O&@ c?FаM<n ?` eb0)\(Pfodm}nv}l?V5(?q$Ix[cN#W{ #iA@&QeE9 MX''0UjdN!0 /HD  # =hj0T h]]9 #]AU@}%?FR$,?@6#V?Fc_3֡w?P6 u` ?u#t  3zM%t3ws%.<%H._'B> #PJU2N贁Nw[?@ʐL+&A@wbu](i+bk)%+&p%#4">9"<5!?& ?M#bbb͓z$ p&"/%T%#L1L15 w`?Co yrigTt (c)02`090M{0cy0oKss0f2r1r 1a0is0n.0 Al0 y8s2e0ey0v0d0"E1E(]I4M-%#X3 X7&l*>(UhE j1#$Ac!J`%#@5e$_F??!foA3nBM( V?FJ-R?@HQ??FS!R?P| llHL6VLD5E+v;*AůʙH: 8snP%$? k?Q? \?4_!__WrA5_! R,U3OC@IL?F4~Kz?P)Oy޾ T!]]>#IAU@܋?F2Oa>?@#r`5B?F*ݷ?P6 t`  >B?\oEt# ŖoZu 0Nu;Y:|8>$# JpU>##<?2& ?5bbbz@b#ANi&bu$'h 2N贁N[2$@ LX"D&v(&m$ :;T'm"*B#d1d15 5`?Co yrigTt (c)020090M0c0os0f21r 1a0i0n.0 Al0 8s2e0e0v0d0"]1"a4M-/p3 p72&l>0>Uhh= 1#!eJ`#&FNTM\jh1B,RBM(!JoUy{O_ e@ So4(D@^Tw?F]֝ä ?Nk"?w_KT\T+\_(Kjӷjmplhj -`\TPQE\Uui,St_o u~!ՠXEO9= T?FL?;uQ_Pm"rA @3sjQ1lğ=GoYniAleE9 MX|'w'0Ur^N!M$|A vzHD  # =hj4>T!]]>#IAU@܋?Fj?@2r`5B?F^ݷ?P6 t`  >B?\oEt# 6ΠZu 0Nu;Y~:#&}>0m8>  PJU2N贁Nwk?@ eL&A( 5bM(b[)h+h+$h&#[-M$#?& ?b$Z*'*B#O1O15 5`?CopyrigTt (c)02U0090M~0c|0osv0f2u1rx01a0iv0n}.0 Al0U |8s2e0e|05v0d0"H1P"L4M-/[3 [7&l>0>Uhh= m1#!S!J`#*&FPUy{_?NTMjC1gc RVMʣ(D#@ ![?F"$. ?Nk"W?IKTLTcKjRlhj p=' (*!)i,SO2Kes9T;QEGUFw6qX}__TM7fTZT\Dž}_P^52XOES"rA 0S#Es3iAleE9 MX|$Gxw'0UrIN!4gA xvzHD  # =hj4>T!]]>#!AU@܋?Fc_v?@#r`5B?F*ݷ?P6 t`  >B?\oEt# ap'EZu 0Nu;Y:|$># BJpU>#͙;? & ?5bbbz@}bAA& K&@ 2zGzt?"K@ L0"&N(bM)+TK/8Z*B<1<15 5`?CoyrigTt (c)s02009s0Mk0ci0osc0fq2b1r1aq0ic0n.s0 Al0 i8s2e0ei0v0d0b"5194M-t/H3 H7T &l>(>UhE=3Z1#E!J!FENTMjN@1h(NoUy{OO e@ o)4(D@^Tw?F]֝ä ?Nk"?'_KD3\T+\(Kj?ӷjmlhj P\ DiAlePsI=MXsG'0UzbZ6N!%$0 sFEjHD  # =hj8>T! ]]>#!AU@܋?F*?@#r`5BW?Fw?P6 t`  >B? \Et#տ|u l4RQu?]>I$>I# JpU>͙#<?& ?9bbbz@b#AE&bQ$$D 2zG[zt#@ L4"&R(^&I$Q)+]'I"^*Bh{.M#To1o15 9`?CoyrigTtk(c)k2009kM0c0os0f21r1a0i0n.k Al0 8s2e0e0v@d0f"h1l4M-:?{3 %{7&l>(>UhE= `51#I!J49fAn^ R)X f"B!Jui,SG?RSlOS u~!N]8-19= T?FL;uQ ?Nk"?YVTQRXQOnğ/=i n (\ wTiAaePE9 MXLg'0UbZiN!)$0 Lf`jUGD # hz4TYYT#B#U@f?FjK?@瘛?F1.?ٺ?P} u` ?ut#nW/8d`<Lg<`9ft99b<!9f"9#9f&9&'9f&(9(&)9<&.9P&P&/9n&*9&(.88LL``tt&&&&(&(&J<&<&P/b(n&n&&&t : zBtBIE GeroBE$G?3龅U~0UB?@4sF}CAAJSB`?CopyrigPt(}c; 200P9MPcPo%sPf"RQrPOQUa"PiPnO WAljP XsnRUeBPePvPdO"kBADtCB-}CC EGC=}DFtC dVV $RtDe\^RdYQ qd*a!gtEd8HjUdd{|dh{Sft{ah{ah{ah{ah{ah{ahkah!{ah!{tah(!{edh ,J T  M#EMU@jZ?F??Fĩ!3ܾ?P m>uA` o?u#jpMp -<p=PKA -dK#hxK2(.K<FZn>t,  t"lgyPb;!Fi&d2u!J>U2zGz?@9 g#>w&A@b(b)+&" !$NFq#2h#".6 "M"b)Z;'"i,"/*q#11o#`?CopyrigXt (c)020@90M0c.0os0f21r0$Aa0i0n.0 Al?@ 8sCBe@ej0vU@d7@_"ŤBMq#3 b7.6h"P&0"tY"eElJpYUl@{OX(QYuq'`0\q ٬\:SضX,Qp? X̏ ,MgX0QYŞ6ex`WYBaZuU4QY[S FP\ߴ_\16 4X8Qo џ 4WTX&tqYˣhMGX]}/s]GwXHQYt@tԚ?Fu\ɢ|~\oDlQLQ¿԰:G޿ ˘~XPU~U%BUT__ZAA(GE]T_f_[}u?Fʢ \9=Ŭ\G_ԑuQuucߟ?@ml?F=^`> P+CzOOߎf] B]$嗿<Љq3ux5@4Sa/JYԹ6TfJ'e0k?iMiIh%1X|zG'0U]1.650/ (HD # =h#4>T#3 >U@Ac?FO|_D?@ !?Foצ ?P6 u` ?u >,2t  f?5dLtW1qcRH_Axcl^?kJ2N贁Nk?W@ LA@b ++b )'+'&񉄉ԑ"?& ?A' !zR @$)$)/J%B 31315 `?CopyrigTt (wc)j020v09j0Mb0c`0o%sZ0fh2Y1r\01uah0iZ0n.j0_ Al0 `8Us2e0e`0v0 d0,1(04M-_/?3K ?7&l> 0>Uhh= Q1Zb!!J`F9A,2 %,VMS(Jt= OMmѱQ~%+UD_!OSUE^8__ {GzoXb52O;"rAQ 0#Qc3iAleE9 MXG'0Ub-N!R$KAB5 fjHD # =hj4>T]]>U@CC?Fd?@?FԬVq?P6 u` ?)u hAk,2t  Rg(LtW\jwS}ժcRV]clMJ2N贁Nk?@ LA@]b +Wb )'+'&𩆑;"?& ?A' !zR @$I)$)/J%B31315 `?CopyrigTt (c)j020v09j0Mb0c`0oKsZ0fh2Y1r\01ah0iZ0n.j0 Al0 `8s2e0e`0v0d0,1P04M-_/?3 ?7&l>0>Uhh= Q1b!!J`Fuz T ʼ)UB ,VMS(UjExK?DZ/`D%Q~%+U\bHNOkTI?$i|UE+Ugr?F9Y7TRY_DU52OE"rA 0#Qc3iAleE9 MXG}'0Ub-N!$KAB5 fjHD # =hj4>T]]>!AU@0M,?F (Q!?@2"\?F21기?P6 mu` o?u $lAl 1@,2<UJt  gttqzh&vJU? & ?Ab5"z@AA 6$>)ub ^(6 D#@ D2N߁Nk?@ L&^(+bl)+y&s,d"y*BB[1[15 `?CopyrigTt (c])020090uM0c0os0If21r01a0i0n.0 WAl0 8s2Ue0e0v0d0T1X4M-/g3 g7 &l>0>Uhh= y1!d!J`@"G3?F9;<#R<M(!A|jL, I%% [F?gGQ%SUl"F?F7K^5˖Q YEQCUESUF]k_T Y0ħ[HOMTb^?FWȬs рY+ d"rA 0#c<Fl~rl r!}iA2gE9 MX0G'0U*arUN!%$sAj5 v,zHDJ # =h#4>T#3 >BU@m?Fyg!B?@u>71?F!A0d6u?P6 u` ?iu >k,2t  eypLtWVcRB )clOODJ? ?Ab "z@A $)Mb 6( # D2N߁Nk?@ L&6(+bD)+Q&K,J<"Q*31315 `?CopyrigTt (c)j020v09j0Mb0c`0osZ0fh2Y1r\01ah0iZ0n.j0 Al0 `8s2e0e`0v0d0,104MB-k/?3 ?7l>0>UhhE= Q1n!T#3 >U@rڌ?FlUM?@MX?FI?P6 u` ?u >,2t  ?ou mLtWWcR$_˟clɽJw? ?Ab "zG@A $)]Mb6( # 2N贁Nk?V@ L\&6(bD)+QQ&K,<"Q*B31315 `?CopyrigTt (c)j020v09j0Mb0c`0oKsZ0fh2Y1r\01ah0iZ0n.j0 Al0 `8s2e0e`0v0d0,1P04M-k/?3 ?7l>0>Uhh= Q1n!T]]>TU@n2L?FIҮ?@y“:K?F[dg?P6 u` ?Mu [>,2t  K4LtWc4cR^rCcl{STJ?0 ?Ab "z@A $Һ)Mb6(@ # 2N贁Nk?@ L&6(bD)+Q&K,<"Q*T31315 `?CopyrigTt (c)j020v09j0Mb0c.`0osZ0fh2Y1r\01ah0iZ0n.j0 Al0 `8s2e0ej`0v0d0,1E04M-,k/?3 ?7l>#aUh[3 A Q1n!sXE/U8όKBI .O"tW/U{ؔG\b# aio?p0sX2O;<"rA (0#c3q_(RAeoiAVAeP$59 MXG'0U_r-N!0 B5 v*zHDJ # =h#4>T(#3 >U@ҙK?F<;H?@jB?FEk9?Pn6 u{` ?Su  A,2t  ןG}LtWҤcRMNcl?\zՆJ;? ?Ab "z@A $).Mb6( # 酑2N贁Nk?+@ L&6(bD)+(Q&K,<"Q*31315 `?CopyrigTt (c)j020v09j0Mb0c`0oKsZ0fh2Y1r\01ah0iZ0n.j0 Al0 `8s2e0e`0v0d0,1E(04M-k/?3K ?7l> 0>Uhh= Q1Zn!T]]>!AU@ꪓH|?Fto?@P"?F%dݳ?P6 mu` o?u $!>2 1@,2<UJt  ghttb뽘)ub^(6 D#@ 2N迴Nk?@ L&^(Wbl)+y&Ts,d"y*B[1[15 `?CopyrigTt (c)020090M0c0os0f21r01a0i0n.0 Al0 8s2e0e0v0d0T1X4M-/g3 %g7 &l>#aUh[3 A]@ y1!d!J`&F?ޓ, W_rv J<'R<M(!kuϢ?FK!TBKQ%WUZ&L#\ = GZɛXEWU8(?/&?F9I'\]\WHOC@}?F,^ рYZ+ d"rA 0#c֪J~V(L ~A ׻XWU8TL)?Fj;KL~\M2aiA~AeE9 MX0G}'0UrUN2(!0j5 svzHD # =hj4>T]]>U@gr?F^i`?@?Fyrq?P6 u` ?iu ">k,2t  !LtW( UcR]cl ;_@J2N贁Nk?*<@ LA@wbb ) + + &D  ") ?A !EzR @$)R$ )/J%B31315 `?CopyrigTt (c)j020v09j0Mb0c`0osZ0fh2Y1r\01ah0iZ0n.j0 Al0 `8s2e0e`0v0d0,104M-_/?3 %?7&l>0>Uhh= Q1b!-!J`[F%),2 ,VMыS(K=_LD^^PCűQ~%+U9IS_JUE+UF XOO ڕ]oX52OE"rA 0#Qc3iAleE9 MXG}'0Ub-N!$KAB5 fjUGD # hz0TdYYBIU@a ?Ftnm?@Me?FpWe`ݷ?P} mu` o?u#8#5$+4%5HU$*44HHt  g t3 at?Ы O` *0U"?҃@&T #M!M!`?CopyrigPt (c) 2\09 M| cz ost f"s!rv !a it n. Al z(s"e ez v d F!3YJ$#$- #Y# Y'#^ B= #Z&#l4G6PG6 C2$e^.317;( #HH1|H44Gi^P5 1&X"7.EG'$G6%!@4] EFYJUHuDh#" # J#UU U UUA>a0JTaaMU@Me̷?Fk ?@N?F0Cꮷ,"7?P AtQ`  7?`t#jn \v\fw"MGwugoquo$Kt2zG/z?@Q#+&A8 b*](i+i+i&)j#3$(Qc"bk)+'Rp7171{`?Copyrig}tn(c)n]209nMf0]cd0os^0fl2R]1r`01a# i^0n.n AUl0 d8s2e0ed0v0d0@"01Y44-C3 C7$&lU@U1#A(:!F?SD [6yw(E ?F{ŦL.D;\I`H3E^?FpJ"MC=\C"qoI5Eo9?F(bȵLvF=\#bJFHOOOO@DQKO]OoOOOOf??Fޝ,lCۆ=\)?4=(Of`ẏ?FC+mR)=\/b@QS_fFȄ?F7qm ~=\76ԡy]4։U?F*t,lI=\oooo ?E#goyooooo/?F:}W=\wzfu=pӣ?F߼<,l-~Ȩ]p|y^]x?F}+*y=\+ 3]X0l?F-Zߣh,l Bߒa=\ vl%?EYˏݏfO/?FI42,llHJ|=\HN4f:f?F`\},l>x싟f~?F3x!##E=\J&)O]챘?F/8-k%mSp=\< 2DBB]ǯٯ~S*|ǾȰq~Fn)`]]c֦]L½r]7rG2=\`_g@P 4?F8^d=\he]|J%%D2Vthz1hV~=\K m0BTfCAſ+i /НJ+mV.c|~kdny].!̛ ?FW,lJ(Jd0(fä j[+mޣ =\_?ny]~"2!GD= bY~=\6y@ mL^pCA .i}-|{zv,l3  4f #[tU?Fߞp=,l0ϓpBKg嵿?FdNw:{QBb?ɣ]gۼ?F5̅h.fQ[Ϡ mhzCA&8J 7?FG1^}a aB Kf?F@;,l^s};]K_dШ&?FfOBl?w2OKNe9@?+o)F?{qOCw_PvI4~=z2m?Gm}>ID ___Z1 (_:_L_^_p__H>ho 2 DVhzHMgz?FC_߯():?1ZSGbZ2@ҽw]6<730b837SI|~"RM?L2]-m?Fq,cɥ~)̲1%]? FZ ҟ2 `rѩ]/U Bkn|)<|4];-@uvzwWPFMǕDGt{GI"MqcB߀~@6?HPͿ]M+T/]-$A#8FNL /Y-m$+|*1 |Ŀֿ~ѩ'Z?s =-B^l?2UzVs0~ԦLnQ ~덧쭕g8A}2!"M 2:2?ְqBYZ$@SK]-?FkL}-.zus1v8 .@2ߪ߼ѩ|[s(^lU }J\!Omy͹?F$зX-R P"M^b=-s ,G4,c^lX I6g ]-BS3>-U,-A -,>Pb 1Ԧ(?F~^lAw,?Q7ZьkP?F=S~-sj3jK~)4~"M`dqhY W-߇4ĭɫ)^v 0$3^l0}S1Ix@YpB/T/f/x/"R//*/ѩ??FTmȵ^l5rЯLjوWjFQ/Zyk?F|X"\=De9gs jEo9 ^80>~sMY/|=?t۶ I4TSx?F/doM~^l ONQZW`@?[OmOOO;u??O"O4OFO??FhWy?0WvOo-uѣOd[?FSҮ- ]gWy, l Sz?Fl}WmxZWQ灝in]\h?F.j.\=/I Wdvdp_zooooboo,o>oPoboѩm4p=u_0S}Ÿj8>4W[դ?F:mq_Wş|g #Z?F'[=ݛ}T r=lfa4J0-p/"_flȏr$6HZl~ѩ;h4=(V.@ ʯܯa@Rdv`j^l8g Uw*$B_.nV?F" }"SqN̎?F-;u6D!FA\Y] œ1g ^~SI6? r1 b\nϒϤ϶`kx7|>@|SA-:N^{a*0An?F~ _1ZA}2!g-О?F\b F! qZtq*s 1Mm#)*:ݧ#VْG%E\־ |)߽BJx;mmMQs~߯X9Y Z}l@/P N?M/?1?C?U?U!///// ?7 ;{?Ft\,;) q~-a.d?~A3U'3҈>}>/Eܑp%ߠߤ@^^l{;HkOQ ҠO\ۼ?Fߙ4zf]ziO;_M___q_OOO__&_juW/?Fia~Ia{pU߀_~ Z lS;܃(Siri`RriZ;ӱhsm2Ca6.o&s ?F~f(;EyoIs6\aoZl~:oo 0BzP1c/?FAd)? |hAقUYܙK?FxtQمwPIr2] ~€+Xy'MፍǃaM*2S%ʰ?F+#f~;-a~p}|b:ѧ`vXT 3T %iX3ha TaPaMYU@?F{dܫ?@B?Fx?PM]tQ`  -F7?t#UY~PUu0"u;(,\"b&hUt}2zGz?@dQ#&A b(++&Y}/SV0$38?b`4z */!"!f2$R}11`?Copyriug t(cU 09M0]c0os0f2R1r0!Aa i0un Al<@U 8s@Be@e0vR@d21Y%4e3K 756lU)U4FX1#A2%!Gj0?F73W00"6#8$}-%L#1O)߰=! 8@U$h?FTۜX\rNj$n\B.PXX3@U xQwv&5}X\:"^?5&^,!UnE@UO/NPY[||m]P5Wh@_R_d_v_DQOOO __-_f/,?FRal_`[]lS_fV>ս?F)b rlS3le£ϲ_fq l+{ ~W2ofG Dv~iqhCs}6n~#coo[mE#o%7If[v6/?F2l+|lTCf% ?F92mXlp )B&ur7f}ܑ]{}k ~Xą=mg?]̧lHWg#tdf߻mAY /ASefWc{?Fu]S䱴ls :Llgu伟flo gU?F;^U=lChlx-jfF^eۙq~j0xi|m~c&Mz?lG6OɿE]+=Oas8,ǿ?l:<]izT}ۿ~*P?F=|{FlqHlο:.2~DB'ǘ}l5(3V߆ϾfD ыh殴li7>-I<̾ϲBbGYk}ߏGM~}?FF*.^/ͼi?oߛfu?Ff l3Mi[ KP~0굂c}lOF?آロfIӂr=Eldݠ_\B\n"w\㾥P8bnnE]<0j]SEj`B^ݷk. ρV̲_#u悔&ƀ ܠ\mmElxY=mu1~!Ii)\mz^.P/كYm//&*Ax"x$qQ} F4D2}{=yM].u #q2}~s}z0N?lWU$?μǵF!i}߶bܑ!n@垇D}LnXXt~#ViԂ,iߝXQ b4Oq>ad䅔tnR)u@o0oBoTon _____ o"u2$j1}Xι_!eΟ`?7-:ܐ9Ri>DYH-#Gߍ=ԑ4MXjQ9Ef׹ tt}k͟in?S(:L^pn &*~]JB&u¥ G48Ġ7-a#|GgS>a8|I>3; nP#bWU6mȧ .ֹc`}l:2?xuTѺVhzn 0B=M#p0}!:rd Q%- qOj 7-Vsb: u>O9e>(xx&?FgH??[WvpLQ?M9 Ԣ1/!ﰳm 7xzNªJrτϖϨn (:L^sjJ^/0ЧIs*!\ٖ o5|OF ȷӸ@`?F/ ߃7\!\ψf+!"p@)-_!\?$”Mގ#q|f@LS`8}!\߬BĪ&Zf<1ߒ2 2DVhz=U[[0m{TfP7-^u,b%:k#ԍ?FȘ!\(V!I/iUBg78;L5?Qcuӵ3@?F:Ě ],‰L?L/LY!ԣ?FRs;=w] PʉL?^+UϰGIm߲E,1u5w]LK>P#XrO?? vnj!\Zz -~X@////UX/j/|////=R?FQJ<}A{ХloZ ?@lsچk?Fp"mI鴡si}/q2 ym9HQ em\\g^rBt@|? [úA'5r?#7-oo#5ߕoooooo=zg?Fzl) Ud4I堹> /k͹?FV{<˽h4GX'1rn=["jyN%.:PɟLIUw ~#Q)9=Syeӄ0_6=6LTL=Us|9ks}ߵX5>2f$~1b?Uw0'/N`%;M p4~mmo7o 9Q5:+G(j=?=;J?1OnN&4'9K]Vv*RQxR]6g^ɣ|Zk%O!QڑQ^^h?g^j^}_Txk|j~WmVo@?xp >eBcN#c G!Tx5Pm|LGf.Q,ojF.@RdvSzL``qn ~GA>~wz^gqҕim~x$gK>~ V2wTx"dQᆠA(Os:!nLĥlqNCyQӫKcAahG}6ZSN?H????fF1;?M?_?q???Vv|&5}7@R"oPiyK> k#>ġ"}?A |M-¿t6j`~r_QW44zaqYX8Tȴ߯~n߻d ____T_f_x____ Y<,2?(rݾW1[̵yѿ㸗_go4͵S62M\{Qo^~(Z&T!+<5&s pBr~y>08x޻ ϛCE/RuCQݞ%~޽3]Fb?F?|~퀶5 ~ 趒;پQtoqΣ6 ޻֮Ǹ) 1Ÿԟ)e]+akDH~6 WMw׭K,@&"c?F'%|6 ξi@a > <ŗ&8`kb|-~ 6  s??BD&m/V%߇6 b3hD5^E);M̿޿Ͼ)`]|i!x899|/*a|>5i=ϭ,?F#6 K.5> ս5|[ݍvϒe*y34ݶJA)=`2DVh j@?F-/g ?hA\)/N/U?FM2e-j X*?v(N })2-OrYQ-~.Tk )\}/'?^qq|*=3laz ~} 4}ס@Rdv22(:)*?FN !kyMdFq&w^I/(Y[?Fߧ!Ma#Jh|ϻ/x9P8!il{mM{G~.2:O˕BĈy!" _뼙-Y` k/}///KU/ /2/D/V/4^F?}e_mu? y/X /?Fjnǚq7}? D?Ɗx^o߾AytÉmA~ ^{yoQfAYvӗ-ׂoNZc?OOOOgu O*O~gL<7@p?ʼn@-1ş9p6=n?FQ>~:B !~ ޾UC?i^Jak{_f/˲p6J%V{垇͉}1JMZ ۯ#l~Ư4ʩ݆!~ cx|[) T@ Ѳg.Kn>$?{nIn880ߊfr2Yp=ȓ }UptCE_}lB~?h,,$ϚϬm9eG\'1rth|~1=9zsg-?Ubw;]9 _Y*'1`-LI@0goSy?o7x>PEu{ X~=8+m?ɼ7)}^E"S0 }٫i)}.@R%m9Y; dA Z8M]Y ]V6yY+t8skA rM#\$Sp!_n8$Ye#=]*}_I}4I H~`]ت]~| .ʗjri_.@RdF&|m9;q;gi}I߂h(}(Uϯz5y+[Bq}X~v"x>?0~1>4W *wFI.i}A0p}} &N{*y/ K?]?o??+e'//??$?6?MIbLR$UFײ;nC -+}IDQ$Օi}-HsC V2wYNHCw?F3kDjt!eJ }=Zܵ|#aOQcܝw~BnW؜p f_x___F(O __._@_R_MIst@xAOK~` f ͹+ixrݧJQp~[̵yvOGo?Flg6uMh ٘]oSC`qb?':^Bo}^~^?>Dp$ςb)&8J\nMImc@xgAx+_f{&34'Hee÷@%|~g܂HV <|PFXdTX׊ćgߩXQ#֟7$zLvPFBrX}g$Zg̞Rש<͏ß՟*0BTfxMIߵ.W_=%eiJz#Ms2,MD_c|T%#5JLYXD?F5hFg y@8|ѕSCw]&.mᕼA?鯻Ϳ߿+L^pP>FS`D0 e4Z>+-G_SCa\0І?FKȝ ǶH ~%aWNwZ=!/OG#b q1{~! tIWxa@$,"-MIkO/?F|R |~A&'A;߯bc @``5('Mh-nf=!SCrRat!2vIgn/=gyzu  jf&q#=-uS),/*/NR<btB1*<|N t!ߡ..Nsz9_-ֽ'}.<94o._/?{iPא5] HV:;$HĠ﬉ қZ?nBӭ_2"4FXj ErT@y}Mo 6N?PJY_-_`o颾Mn;NOqȝR?FOZ|<;QaA1b;oIaWW6/H]ToY?vbgCt_Ϭz3,>PbtφϞ m9`Xizh \~wKVaϤ~gPϐsY6i&vb8l5' f EXe]/; ^bAߡqEiK )~E?H{=߷4HZl~-Yf"b }h 0JTlaHaMCMU@Kk?FVٖ?@uW< ?FY鷺?P >uA` ?u &J2A:2,t  #)taN_"舺m\pmv*IJ>U2zGz?@UL?A>A@Wb#q)!?(b'=&Nm ?0& ?M "b?)'7,I! O'11`?CopyrigXt (c)902d0990M10c/0oKs)0f72(1r+0d1a70i)0n.90 Al0 /8s2eW0e/0v0dw0!(]$B-3 7&lJaUl~ 1l !`@9  颥 nw&. ʙF,2 FFՋN@DW9_k7BLL␕U?A%E?AL9C=QIDZB3>X5EER_`y5qLWO ]7@D>XE2OE#rM) 0 " c3AAO(O:OLO^OpOFF&(LKSoW N L Od]uvx?FZl6>LI3nZձL)LA_x1ur/ fLP $e'Hٰ_FfIXDoo p_Uoi &kpiMf5kiX7!'0Uʂ>$e5 UH luD( " # #Aha0JTaaMQU@Ad{?FdT#3 >5AU@c^?F`Ld?@9x(g?F_Uw?P6 u` ?u .6(>2Q 1@;,2<_Jt  N3 trBNٟ,PޟbX"; JU2N贁Nk?@ʄ L&A@bu9(E+bG)%c+c&# #!!5 `?CopyrigTt (wc)020 090M c o%s f"!r +1ua i n.0_ AlF0 (UsJ2e0e v\0 d>0!E$M[#!2?06 ?AN$)e/%-/#K '6l> 2UhE=%!?!J!(NѠuOO u(:@¤̥?FbP;?PEB GݐAY1&YMAJFHe׵B `,a,[ /? :7i? >y4vʚ?rJ5;!:_o#ga@^AOiAlerI=MuXG_'0Ub5N!40% rFjUHLuD" # R>h0JT#3 _aRMC5MU@u{v?F ??F];\zظ?P >uA` ?u .)J2 1@;,2<_J>t  w?t&9W`ݟu㵩OJ[>U $?& ?MbA@D"J&N#y!y!`?CopyrigXt (c) 2d09 M c o%s f"!r !ua i n. _ Al (Us"e e v 0 d r!]$v$-## '&zBt#2q0[?"@9 q>6M(bL)J/8Y*lJ.-$U#8APAa A =!4Q1D!1A9 {?F%@ 0nCϿMnNEuýL9%K FH(5N?:G_&_ RHE5E#f/?FR=JvK ϽLQ ELxAEE^]R__ *HE8,;@ƫn~LnzdKL5rdXHsA8#5 뇯/tH@ED@/GpOc#?P-DT! рY?!IO F_}]~3v!9$e% UGD # hz0TdYYBqU@UJm?FBO ?@IxJC?F+9lbݠ?P} mu` o?u#L*5-+4,5HS+5\$*U44HH\\t  j@g> t~pm J)4 {ו' -U;!;! "`?CopyrigPt (c)r ]2\09r Mj ]ch osb fp"Ra!rd !ap ib n.r AUl h(s"e eh v d #"4#S8$,#4G# &G'?=5#H&,#Z45656 12,$B#5#05U253@+& $^ #E K5 5#\HA%$DH /KK5$D4/GiA^K5 6X7G'4Ŕ56!.4] FJHD* # =hj0>Th]]>]AU@ ąMu?F*]!?@v$?Fu?P6 u` ?iut  Dm%t3._袋%.0I~$%UHSB+> PJlU?B& ?AuA@o +bs(b$%#2zGz?+"@ L&s#v ##zu s/ 5&,J2*%#c1c15 w`?CopyrigTt (c)02`090M0c0os0f21r01a0i0n.0 KA# l07s2Ue0e0v0d0"\1E]`4MŤ-%#o3 o7B&$!A]UhzA 11J`%#@frq?F ["(M(!T?FyIRaZL\m۶mQ8ϐ?F@$mZK\٩?F}e?P,DT! VрYc!ITB M~E'QY5a XȞv/ ,^$ Ui[? RRT*?r.5 1:Oo aosg4 1"KUE[UF5sRŐO_ <tf ?@Gui&PbE9 M5X8G+"!'0UE]N!R]$ar5 vUHLuD*" # R>h-4JTaTa9 JB>U@@(/&?F94;??FIRaZ?P>uA` ?u>t   e~27-t!;E]t-6-P\E`,J%EJff? ?Mdb"z@A }$)1b:( #>!f!?X(d, P酑2N贁Nk@9 ^"&:(+!D:/ 5;T'2U&Ra1a1`?CopyrigXt (c)020090M0c0os0f21r01a0i0n.0 Al0 8s2e0e0v0d0"Z1E^4-/m3 m7lJ0JUllI 1!1` C  /VN!b͉(O"TYURIUD%YU+u7tYF_QER5:MT?7 T [Nё\#CTAE2OEv2rM $Fyc"mA$zG %I OUOYU`fsRsŠq_ _ wT#3 >U@+(/&?F94;??FIRaZw?P6 u` ?u t  <~2)t7E]tU)2)LXE\->J2N迴Nk?+@ LA@{b +b )'+'&D*!!5 {`?CopyrigTt (c) 2U0 9 M c os f"!rԶ !a i n}. Al 0U (s2e e 5v 0d0M"!E$M[h2?6 ?$A$)/J%,b,# '6l>(>UhE=~%!6!JXS(xNWOO D QH~%xE9OAOAiAle6I=MX7'0UjR>!40% 6FZHD:  # ;h0>T@h]]9 BIAU@Y"2?F h?F\9?F_X*qw?P68Rw?>u` ?u$.>. V- (.8:Q [t  !g?t{Yu?pvMl{~ݕvs5Q!fJU2N贁N[R?@L&A@bI(b*W)U+U+U&񩆍y#? (<!J ?%MP?AO"d+b Y)(u/%*B#A1A15 `?Cop rigTt cu)x02`09x0uMp0cn0osh0Ifv2g1rj01av0]ih0n.x0 Ul0 n8s2e0en0v0d0@:1Y>4M-#M3 M7#lUh/#7 _1AO!J # F qBd V(R iAb59 MXGW'2WUQ;N!R$aP5 VM&#HD # =hj8>T! ]]9 UqAU@c^?F`Ld?@D6 5D?6Hb?b06]DN`lt   ?N t!rBN,_PaX"; jJU #<nH!A?^%Y ?Ab^ @$bbbz #"b#$"b$ 2N_Nk?<@L&(&$%" ;'"!*@!A#BBhy?H *A#115 "`?CopyrigTtk(wc)k20@9kM@c@o%s0fB1r05Aua@i0n.kU ] lP@ HsTBUe(@e@vf@dH@/"14M-?3 7#l>]Uhz11!J`A#@SfVo?FnOo0  d" ΄ ť^R^M(!`ZQaSt__ Cƣ8-$Րj8:ۦ?@u-{?FrgY?PC d f"Y!Y1a ]D_;s ˿}nE>0F # ZOB Q ~dxϽo@+觎o[sGCY_. fX  ? u l X _G F֛# %# 6)Y +[ .  7 NH  P8^ $? B//'/ (y PX6Hapxc}1oeg.i Qk^(Eq#J&IKXMLVpY9H_d(b%K=fiulHAJo1UFD  h(^TYYB UFjZ?F~??x<F BPG(?P } X ]B66]  TTUTTT9T9ȴHBU?Q?G ?B@L&ɯd2?6-G(?\.Y(7. sU!}&/) *%  0eB`-ecom0er 0eW,s2v2, 4put$4dui0t0ib24ud0n0tw 0rk!% ^| (S G& q?%7I???"" wp ""ppqwqqwqw^qpi ppwpw)wwwqqd^wqp~wwwwwyDrag onut hepe.Rih-c]lckaUdcos UPo etQeAvUwreP 5aabjZֿ~??cN׿ mF1??%!{V↑T*J?@ ?UG DF P# h @T(PYY# [U@jZ?@~?tP} u` ?u#2>D>HX DHlHYHH EEdR.؍>DDXXllRu'9h",'US2&5/4&3 ?"'4Wb0{['`:[1#26"d^Q_9$7&9"/L6 2&!'ݯ>o@ÿ@qx@?@-pB?wDmF03Bu9@`҈-1Bu@r#CC5",CAB2Cm9u`u `"[ b!u`/3Qy1y7B`?CopyrigPt0(c)020P90MPc.PosPfRQrPaaPiPn 0Al ` Xs$bePePv6`d/`VPs_SBNcPm_!#5P8k` 8Fay1&3db4&5܀MB=/3z6&3d66 2&4#/30U1r3]%ge<^AE U#"&5m Hq&5twV5t{3t{VEt{t{y {etd{|tY wmda*V_BTAJa$Ja&203V21@3 2MVB3Z1125tb6 7$C|èEhhPÓnQ*ϔtataAPꓰMB111;@cqcqba>*Ab1%EQF^.1XA#8dTG`XQy`])r` M`WnuPaPt5arFaiQ` WEqiPm6`nPvd{V5QX]1PRd*ơ ]`ue`bͯ߯@30! P`r@RdSH@ȱ]C5D6bQ۱eʿܿ{"\ ϬAP9a:B`諸75^ϖϬ% S:bi`#aweT ߬ぶLPchCekT}ߏBalB`i`g̨9@߮,RPoڂ|EZ\@ =phDBU T)?ar0TPpVrKB` B`sR;IY`A Y`Q@;bmr:PeExab!gD"qQr2w`ijJSCC ` TsR\͒kXV` ?ON`1ڒBkfHZSbY`|` E@cqTZ Nwbk4qa+.2`f dDIpadB`5cϐT)nĺs kx/0/(mTa t f?a ];a£|v///ő R pa/a@/?(/0?>/M`Cs?5?'U+pu"tPf!?BkQO)O"f=OiODOkvOOKHdBQ#CġRC<` Wm!wV_*_ Cg0ar__yi_'`M@mi2n__v"kpo$o8KOB%fy"FaPoboj qt1dw:1NvcU@jZ?@ű? _v pBup` ?}rup`  B=ݐpj,U>(p5$j-T^u*uq, ST1.s1'xAMw>ÿ$revK ˆeGrςłˆTς߆L2`R"oUK=e2j"rb ggbr-}@"Wbrgu``dv~jc 4F[_c@-->A>Bj(@yL1L1*Z{­q qqqcҴQ:ai6“`k`!uA`aż@q@ʇ{„S5_&ﲄПF3s͵N?SYFy 邉f@ H8W{H5WH`Yc|ag,@ BQр0Bf_q_qʷifz!` %.P&4odAA_qNNTWOWRKeH0PRf1RRI%SQ"it!̔=GQW'70U? 10a?ƕHD # =hj0>T h]]9  5AU@jZ?@͂T??@x?SF?P6 jt`  o?tk[>(A Vu aSuo.]&A]  *ԮJU2zGzt?@L#&A@bA#z7 5#1bO(b])j+$j&[ "*(w "?& ?AU$l/%B115 `?CopyrigTt (cu)Q02`09Q0uMI0cG0osA0IfO2@1rC0|1aO0iA0n.Q0 WAl0 G8s2Ueo0eG0v0d0"1E]4MŤ-&3 &7&l>]Uhz811U!J!u6?@+\.>djIIa? >"(EǑ I%K9E 5EU . K ;;5QIWEEE@ _KnSA?8@VBu?@DR> ?Nk"\6`ObmMM\j=am {:e 뇿P> #? D"? 5-gg?j47!B{oogrK57!ibDAbE9 MX7'0U~rN!R$a)5 5vIzHD # =hj0>T h]]9  qAU@;?@J5K?@xEL?6ѿP6 t`  bm&Qo?t~@@L]WI,#u auoL> @JU2zGzt?@L?&A@b}#zts q#1b(Wb)+&[9#H",ɺ (;H"?&L ?A$ /R%9#V1V15 `?CopyrigTt (c)02`090M0c0os}0f2|1r01a0i}0n.0 Al0 8s2e0e0v0d0"O1E]S4Mb-9#b3 b7I&l>]Uhzt1.AF!JN!n.%-+0rdjV1ax߃)(@K-&?@ mO"> ?Nk"PL[Majnha_:e 뇿P> #? BK=o? n-g?j4s!B{__Wr5s!D8E6V 6?wj_|_ ^XE.U6Vz T6?@ @ d_mOoT ݬ\VyWxRkrVo_bVh*g:n"'UaEjz36>C~N>?Bn]_zGvN7ViDAbE9 MX+G'0UPN!R4ae5 MaHD # =hj0>T h]]9  U@jpiH?@^?@$?@]F?P6 t`  !a&ao?t,F=۳=u;u auo>J2zGzwt?@ʐLAw@b#z1b](b!).+I.&[-m(?0& ?A$I0/Q%!!5 `?CopyrigTt (c)02`090M 0c 0os0f21r0@1a0i0n.0 Al[0 8s_2e30e 0vq0dS0T"!E]$M-# %'&l>]Uhz!1!J@% djc~YZ(_N9zOMGE%_EH$SwO"K ңHE ^&_8_WE_OqOHOOi&DAbPE9 MX7'0UUfb>!$a % f1jHD # Uh4>T#]]9 M5AU@jZ?@L|+?@?@({~?P6 JuM` ?u JYt )t7zEA2&L_D?. > >tJU2q K?=@MJ&A@-b9(+bG)T+T&J[#"#a?& ?AH?/`)#115 {`?CopyrigTt (c);020G09;0M30c10os+0f92*1r-0f1ai+0n.;0 Al0 18s2eY0e10v0dy0z"!4B#e-?3 7)& b 0K8>U# ]A]A "1!ъ!PzQ <%OYAJEU . K;;\%W%E@($L[.FX5Eai%?@ 6?wL;] W-\TB⤲8?W&SA?@ @ ?Nk"L'`bmMW_Ye[7 VK=F: [jr ? D"? X?ZvD?a@Boogr5;!QEؿx]SiLO MLXa~WOTZiAle59 XG"'0Ujc&N!$05 .HD # =hj0>T h]]9  IAU@K?@OgVb?@O?@X5?PJW?>t`  ^Z?t#ə.b?T@[ u?/CG<_Z+u u$ >  J JpU?43f<!.&P?b3bbfz@1bAe&bq$d 2zGzt.#@LT"&r(~&i$q)(+}'i"~*#81815 `?Co yrigTt (c)o02`09o0Mg0ce0os_0fm2^1r 1am0i_0n.o0 Al0 e8s2e0ee0v0d0"11E]54M-#D3 %D7.&lUh#7 V1#Ai!J# F/?B1B  VRiAAbE9 MX G{W'0U*R2N!I$a {VZUHPD  # #>h0JTpERI MU@u6?@ic{g&?@%r`5B?@n_F:Is?HNtQ`  II?t#\  _ii$"`kNu  iu%wj>M JQ T       "@&&JNU2"?H@Qs#N&A@b(bU)++&RNp}#>H1#19 ?#bbbz| R&/5R}#11`?Co0yrig\t (c)02h090M0c0os0f21r0Aa0i0n.0 Al!@ 8s%Be0e0v7@d@21a4t#-}#3 %746lJ#$JUp5 3 M19 1!q(!@oElrkRt# 8$lcD?@숒@)?@P?P"EkU |1\iOT/irvs}i>`_,of :m ,M ?? S&d2? *b?r4B!B oo1gr #5!>%(EQOOOO__@Ac1_C_ D4ևS]_o_zRX,{_,__ #R__ (YQoSo+o=osd {oEeoooooo@zUy{* hDV!VV(I X"c7I!pg bf'iMjSE t%XyG|'0UŢŞN!O4i |UGD # hz0TdYYBU@(%)?@xg^?@0߯H?@s7Uݹ?P} t` 87?t#_)TeΦu ]ukv  ̝L&UUU&I&3 U!!`?CopyrigPt _(c) 2\W09 M c os f"!r 1a i n}. Al00U (s42e0e uvF0d(0'HS$#-## 'I?k=#ǒ 2j$B##0U'BM3@&@F0Yk} #HA%DKb5DK3DK5DKED$!GiAZG5(|IDFX7yW'&Dŭ6!4YA yVZHD  # =hj4>T!]]>#AU@0߯H?@z)e??@˞9q?P6 t`z  ?5t# rXѮu)Nu;):O]ע0 &> # " "+"+"+"+" $+"T-J[U7 ??& bA@ 2b4-#2zGzt#+@ Li68!6 ?}; 72U!:B#AA5 5`?CopyrigTt(c)20F@9M2@c.0@os*@f8B)Ar,@eAa8@i*@n. Al@ 0HsBeX@ej0@v@dx@)21"DM-,;?C G& l>]UhzAE!A#!ְ1J`#@{&pz?@Uy{?NT,dKj&b&R&M/8T9K-q?x6@F?@dG0 >Nk"?YE*]\LT_hKjfGD&Qhj iU3UGe&Y_`?@/b%)h5oWKoWo;KTod}l$roҥ`nE2K?@ˌ%4G{?@ -q eZ̿R^{|\6?vodmP}l>MBe 9 N 1iS? 1?g]rl#51:O("4ga@Sq(UE^@O|ǐ[`Ρa g v2rA @3ijD&}l迲aqOz o?#WHZ'Qu=*[^N?@]%P?@O&@ c?@аM<n ?` eb0)\(Pfodm}nv}l?V5(?q$Ix[cN#W{#iA&QeE9 MX|''0UdN!0 /HD  # =hj0T h]]9 #]AU@}%?@R$,?@6#V?@c_3֡w?P6 u` ?u#t  3zM%t3ws%.<%H._'B> #PJU2N贁Nw[?@ʐL+&A@wbu](i+bk)%+&p%#4">9"<5!?& ?M#bbb͓z$ p&"/%T%#L1L15 w`?Co yrigTt (c)02`090M{0cy0oKss0f2r1r 1a0is0n.0 Al0 y8s2e0ey0v0d0"E1E(]I4M-%#X3 X7&l*>(UhE j1#$Ac!J`%#@5e$_F??!foA3nBM( V?@J-RHQ??@S!R?P| llHL6VLD5E+v;*AʙH: 8snP%$? k?Q? \?f4_!_t_Wr5_! R,U3OC@IL?@4~Kz?P)Oy޾ T!]]>#IAU@܋?@2Oa>?@#r`5B?@*ݷ?P6 t`  >B?\oEt# ŖoZu 0Nu;Y:|8>$# JpU>##<?2& ?5bbbz@b#ANi&bu$'h 2N贁N[2$@ LX"D&v(&m$ :;T'm"*B#d1d15 5`?Co yrigTt (c)020090M0c0os0f21r 1a0i0n.0 Al0 8s2e0e0v0d0"]1"a4M-/p3 p72&l>0>Uhh= 1#!eJ`#&@NTM\jh1B,RBM(!JoUy{O_ e@ So4(DF^Tw?@]֝ä ?Nk"?w_KT\T+\(Kj?ӷjmlhj -`\TPQE\Uu?i,St_o u~!XEO9= T?@L;uQ_Pm"rA @3sjQ1lğ= GoYniAlePE9 MX'w'0UrJ^N!M$|A vzHD  # =hj4>T!]]>#IAU@܋?@j?@2r`5B?@^ݷ?P6 t`  >B?\oEt# 6ΠZu 0Nu;Y~:#&}>0m8>  PJU2N贁Nwk?@ eL&A( 5bM(b[)h+h+$h&#[-M$#?& ?b$Z*'*B#O1O15 5`?CopyrigTt (c)02U0090M~0c|0osv0f2u1rx01a0iv0n}.0 Al0U |8s2e0e|05v0d0"H1P"L4M-/[3 [7&l>0>Uhh= m1#!S!J`#*&@PUy{_?NTMjC1gc RVMʣ(D#F ![?@"$. ?Nk"?+IKTLTcKjRlhj p= (*!)i,SO2Kes9T;QEGUFw6qX}__TM7fTZT\Dž}I_P^52O,ES"rA 0S# Es3iAlePE9 MX$Gxw'0UrJIN!4gA xvzHD  # =hj4>T!]]>#!AU@܋?@c_v?@#r`5B?@*ݷ?P6 t`  >B?\oEt# ap'EZu 0Nu;Y:|$># BJpU>#͙;? & ?5bbbz@}bAA& K&@ 2zGzt?"K@ L0"&N(bM)+TK/8Z*B<1<15 5`?CoyrigTt (c)s02009s0Mk0ci0osc0fq2b1r1aq0ic0n.s0 Al0 i8s2e0ei0v0d0b"5194M-t/H3 H7T &l>(>UhE=3Z1#E!J!@ENTMjN@1h(NoUy{OO e@ o)4(DF^Tw?@]֝ ?Nk_"?'_KD3\T+\(Kjjmlhj P\DiAlesI(=MXsG_'0Uzb6N-!%$0 sFEjHD  # =hj8>T! ]]>#!AU@܋?@*?@#r`5BW?@w?P6 t`  >B? \Et#տ|u l4RQu?]>I$>I# JpU>͙#<?& ?9bbbz@b#AE&bQ$$D 2zG[zt#@ L4"&R(^&I$Q)+]'I"^*Bh{.M#To1o15 9`?CoyrigTtk(c)k2009kM0c0os0f21r1a0i0n.k Al0 8s2e0e0v@d0f"h1l4M-:?{3 %{7&l>(>UhE= `51#I!J49fAn^ R)X f"B!Jui,SG?RSlOS u~!N]8-19= T?@L;uQ ?Nk"?YVTQRXQOnğ/=i n (\ wTiAaePE9 MXLg'0UbZiN!)$0 Lf`jUGD # hz4TYYT#BqU@",?@~?@$p?@|>׺?P} u` ?u#L+/8<L9`( .88L``t uPu3Pt۶m!tUH~#9#[0Up"?@/& 9#!!G*`?CopyrigPt(c)20 9M c os f"!r 1a i n. Al&0 (s*2e e v<0d0'"!$0#B-9#,# 'w#^ =9$&0#4 66 20$e#$^ #3 % 9#HLA0%XD`cKT%XD8cG g&daO^'*TcATcA0"-()Ca0% g2ATBK6SihaX5 &X~7aWK'o$ţ6!4 0 aVuZHD: # =h4>TB#]]9 TU@$p?@|>W?P6 u` ?Mu t  )At72(L>REJt2q?=@LA@-bb )+&񆤍[Z#?m& ?A/$)R!!5 {`?CopyrigTt:(c):20 09:M c. os f"!r *1ai n.: AlE0 (sI2e0ej v[0d=0>"@!$MBQ-?# 'm&l>0>U#3 C!*; #p _ &J`@7?g8oyƏ C?,'ߑK FMD(9IMBIGE%U\8!<A% fjHD: # =h4>TB#]]9 TU@$p?@|>??@?Pn6 u{` ?u th  )t*7)2(QLAEzJ2q?=@LA@yb #zt-b() (2+2&醍N>{??& ?A"b%$$>b7b{/b4/U% 1 15 {`?Copy_rigTt:(c):2U0N09:M:0c80os20f@211r40m1a@0i20n}.: Al0U 88s2e`0e805v0d0X"14Mi!E(-?3K 7&l> 0>U#3 C)1 #p U%J`@!g8oyƩ C2,'ߑB DVM^(9IMBIG3U%~CU[8 h:0T5 AB#MU@1 g?@4?@84C?@Kk^Ӻ?P t`  WwB?t#_L&,qۗ'0 .(='u uu1t6#I    ҩ""PE&&E&U2llf?43f@#&A@)b3bbfzt #Eb(Wb) ; 6#"w?_6C?/$ ;H,7#11+`?Copyright (c)@2t09@M@c@oKs @fB Ar @EAa@i @n.@ Al`@ HsdBe8@e@vv@dX@021M(=4#-#3 7_6( v `@K( vU `A#1(Og0x~u- h'#Q  "8!}Z_]02QEUCT/\?@Wh{vž\<ˉ\78u2/k٠' QQQ,WeI_[Xg_`zT'!68?@J[8j/OW\U M "RSDrV95Uۮ4R?@ rё\!l§\Wԫ{d8m k?NR܆?@ֿK #? }+OqZx~b_Xu5sZ OZ{d+i2o,u2r @3C QAoooooouYYG?@Pb|[p o _7~I|׿zT6Q3M7f?@Cs|h')`ρ?|ώ&a@-_4o0:02i@wE%tHXG'W0UN!z4 0N  UHLuD" # I>h ,J T  M#EMU@jZ?FJX??@HX?P m>uA` o?u#VlMJl -<lA -PK#TdK2(.K<FZnf>t  tt|>b!FQ~uKJ>U2zGz?@9 ?#>O&Aw@b(Wb)+&r" !$NI#"@#W"6 "\M"b)2;'r",r"/I#11G#`?CopyrigXt (c)020090M0c0os0f21r01a0i0n.0 Al@ 8sBe0e0v-@d@7"BMI#3 76@"!"t1"=ElJ Ul0@AAC 9 =QA(#UA!=(^!j9?@+P(0 # 4.!# քOCN6ULw-iN\u5e[EYzX3^GxQ1__ pPzXE6UU . O[ ;;d\@#Q}U6U@ _\fZ__"zXAjai%?@NT_N\^ W-d\ƿFvwzT97Tꤲ8?@)#?@@SAWp>@ ?Nk"R`QY5`ߛbmMd\?E_Wqy[7 9(  UDGzX2YGf?@2CL[N\AlTzXQ2YY a?^&O[?++d\g}UQ2Y?i`hCN\,@Иg*%:׉zXU6U?_z^_p^AA(G!EM __0[u?@ N\>4t>GCc-xQ@uIuc ?@lփ?@nc>s9ZQh |)Sf|Dҍu~4q3@uɕҔ@8S7^A?$My$'eCO9c]ziMqi~I@%!XRG~'0U6t5 Ff ~_H>Y;s ~HA10Hù:zFgv#xJOB Ly~d !o@+Xo[sGD)z: PXOcюԎՎ(؎XڎF֛ݎߎ5Ϣ,Ҧ?86جmNl9aH1d7x38Ly9!UFD  h(^TYYBBUFL&d2?x<F BP(?P? t B66  TTTTTTdH?? Q?BZ@?"-3(\.E(#.sUk!i&/(  0B`PPublic,pr vUa.e0k0y0s0r01, og"a ,n.0tw*0r01y0 1m0t*0p *2r 0pP1!91Us0c*0n82c.!iz275 eA^| (SG& q ?%7I??? wpwp pqwqqqwqwq}pqqqpppw,wwwqqgawqp~wwww;yDrag onut hepe.Rih-c]lckaUdcos UPo etQeAvUwreP 5aabjZֿ~??cN׿ mF1??f&d23L4CUG DF P# h @T(PYY#E[UFL&d2?jZ?F~?]P} .B":J :^:rY:3 7 77 Au`?؍"66JJ^^*rrUu#J",':U2%&5/4f??z4b0u{[`:[1#2&6#"'V"^%Q_93 2 L1$" "3'@>>ÿ@qZx@mE?wDmF0r&BCZBu%@`-1ZBu&@ >CAB""C$9u`u `"[ bz!u`!Qy1y71`Vis_PRXY%cPm!#5P168B`?CopyUrPgPt0(P])020`90MPc`oPoIf bQrP6aa `iPn% 0AUlQ` hsUbePe`vg`d%R@y1&3db4&5! h8=/3z6&3d6P6 2&4B#/3+0U3r3]*g,^eqV5@cT<*G`o0m?`s `la>t&5 pISO`PdaTs"_BT /&20*V2QQ32DVB3Q1$125kb6x I7EC|ßgh`nQ*ƄQQA``b111;<balrqqAb1#8dsPXQ1`]+r` ]M?`nu-`aPtfarQiQ_` EqiPmg`n `xd>V5Q`N`]1yP%bd Nm0:30ȟڟZq!v P?`r'SHHZ]R`Dgb$a Be>\ЯoAPja}C`y75^GYo-%v Skbi?`Ta:eT̿o-vLPc?hϠk5.@RB$l s`iG`gpϢE%@`ϮqyRPorϱ|ߩZPm\@x px(+s&5ՈUm lparJ0TPpg`PKBg` Br"a;s;0S0LQmrPeExRg""tXqQrb2:`bv+mSJ)CQ`qsĂXC` qP@тuSbp?1PKg`yނn۠UN~w5bka.2` dHEIPNads`fc6H n $׮n~}sIH xJymQt=fpa ஑la1+fHZ㼁y +b P6a`a@lu MO`C(/F,Ÿb/ U+v3utP)Ɵ/H 9// ?Hq?(1tVEImx_dGF~?AvAvp BuJpGYB#uBGYpLJqsp(7Q7Q(xAF@>>ÿOoQ=ONRRCRIHށrA 7S>S"`AR(a1DceF"$?\[?RVRVR?R]?R">UDBb_ PPaMUZB[br@2\ zOz_sv(YqYq!!BO\ɅiqTqvxUzaO9ꂃSS{P_`u`(0y⢀@Bb 2a8RL7@(0aB{8Q|B{BQ|&Cz,@R2ivOOMoM()A)Awb8z[9 N6dՁ:w`` %c`P&!!i2<B)ANDTWORK(%H PD!R1@URIDS{QR<B<hB!i#??=ly|a'0U?Th]]9 MP /AUFjZ?F͂T??FSFظ?P6 .]A]   JuM` W? EKu4bJt1 ytuo[=Jb(A =>JU2zGzt?@dM#߀A@bM;#z1 /#\I(bW)d+)d&J["*(w"?& ?AO$Tf/%B115 `?CopyrigTt(c)2`09MC0c.A0os;0fI2:1r=0v1aI0i;0n. Al0 A8s2ei0ejA0v0d0 1Y4-X 3  7&C0b59 FX7}'0UB)Ź&!$aI 7FKJlJ]Uhz211O! !u6?Fh,\.>*0II=*t >J:@T&ȑ/ [WpY\QEUzU . X[:;QRuSU5UF,®\oZOnlQ 8MP@fu?FR >Nk"\T`bmMlQi_f0%=mUt.m {=4+ 뇥P> D"? 5-g?0tp@bTh]]9 MP qAUF;?F5K?FxEL?6?F6?Pn6 L>M  P$  #uM` ?=`cc.@uRJtO  bm&QȽtA@Lؽl]WI@[>JU2zGzt?@M/#J?&A@b}#zs q#b(b)R+&J[9#H",ɺ (H"?0& ?A$ /%B9#BV1V15 `?CopyrigTt (cu)02`090uM0c0os}0If2|1r01a0i}0n.0 WAl0 8s2Ue0e0v0d0'"O1YS40#Ť-9#b3 b7&0%0bE9 YFX+G'K0UB&!4Ia yFJlJ]Uhzt1.A!-(:9#t.%-+0JN H߃+%$Jq-&?F mO" >Nk"\RSWNh[H@[4I 뇥P> 'CK=o? n-?N]`@ b<couo]r5%BUV 6?woZÃ^ǿXEUV T6?F __ jv6dDlVyZgPRkro}oiBdalN!\z3ˏ6>CRHS X?BnDe>TGvN7VHD: # h0>Th]]9 MP JUFjpiH?F[^?F$?F]Fڻ?P6 >JuM` ?j uJt  (a&aEt9S;-FEN=ۯ=Eh?u;>2zGzt?@M|JA@a7b#z}b(b!).+.&J[-m(?& I?A$0/Q%B!!5 c`?CopyrigTt (c)02`090M 0c 0oKs0f21r0@1a0i0n.0 Al[0 8s_2e30e 0vq0dS0!(Y$-# '& 0b59 6uX7_'0UJBŃ&J!$a FJlJ]Uhz!1!劵:F% Y.  8?zY^J4 ]9$]$R9S/RJUE UH$S!\1\? GX5ZU_YQJU __-_?\HD H# Uh4>T#A]]9 M /AUFjZ?FL|+??F@{~?P6 m. >b  JuM` ?+ILKu8J1t5 }ty_7??J]bA>tJU2wq ?=)@MJ&A@\3(bA)N+)N&J[ "#?& C?A9/Z)*!!5 `?CopyrigTt(wc)20A09M-0c+0o%s%0f32$1r'0`1uai%0n._ Al{0 +8Us2eS0e+0v0 ds0!E$$5 -? 3  7&6iiePE9 6X7 "'0UBţ&!$0 IF]JlJ 8>U#9AA 1[!:Nk"\T`bmM\"f4N[7 zex-K=FAa [jr D"? WZvDS?4xa@R#Th]]9 M IAUFFK?F\gVb?FO?F$_5?PJwW?>$[ >  JuM` ? ;  )u*Jt'  ^Zmta{֙3.Jvt?@[_ u?vC >pJU43f<!.&P~?b3bbfz@bAe&bq$Jd 2zGzt&.#@M#T"I&r(~&i$q)Q+}'i"~*B#81815 `?Co yrigTt (c)o02`09o0Mg0ce0os_0fm2^1r 1am0i_0n.o0 Al0 e8s2e0ee0v0d011Y54#-#D3 D7.&%g0Ab59 ;FX G[G'0URB.&!I$a [FoJl&}>UhE3MV1Ai!(# jV? & `& T` V_RUHPD  # #>h0TpHeeJ EUFu6?F5c{g&?F{r`5B?FF:Iͷ?mj>  M   *  (Y JuM` ? Y ":N`u#xJtu IoI%t"' % 'ii%$'`OkŁJU2"?@Ms#J&A@"b(bU)++&RJp}#>1#19 ?#bbbz| &/5> }#11"`?Co0yrig\t (c)02h090M0c0o%s0f21r0Aua0i0n.0_ Al!@ 8Us%Be0e0v7@ d@k"1Ue4t#B-}#3 746t%0 jU FXyGG'0UR46!O4i FJlJ$jUp5 3 h19 1!q(4}#FXJt R j# 8bD?F@)?FvP؞?PEkU O|Ä\QnS ?.܁nts}ped`,of :o ,M ? &d2? bS?tP@RBoogr"55ZQUf_x____bY@?Ac_\jF4ևi$on&h{ǡ8oJo\opoo T)YAoaoooo o 6$|U7I[mS Uy{䷰7ho}V(f"(I%ZvShX?"cwӏ叭iďa$xUGD  3 h0TdYYB UF%)?Fxg^?F ߯H?Fw_7Uw?P}   f  f0 D fX lu` ?00DDXXll)ut  871%t%!?"Ư)1%:'ֿTeΦ1%T'ʤU!!O"`?CopyrigPt (c) 2\09 M c oKs f"!r 1a i n. Al00 (s42e0e vF0d(0"#S$#B-#l# '?R=#ǒ 2$Z##0U'B3@& ^5 DFX7dG'R&Dŭ6!4] dFxJeYk} #HFQ%RT][b5RT0][3RTD][5RTX][RTl]WHD:  H h4>T]]9 MAUF ߯H?Fԋz)e??F9q?P6  >IM (#<Pd`<#P#dJuM` ? #2FZnubJt E-tA!["XѮJV&bN$p'N]I֤>J)U5 J[-# ?6 ??" A@.2Sb64B#2zGzt3@M#Jc678C6.? ;B72C: #115 k"`?CopyrigTt^ (c])^ 20@@9^ uM,@c*@os$@If2B#Ar&@_Aa2@i$@n.^ WAlz@ *Hs~BUeR@e*@v@dr@"14ʺ#-/ C  G6&iieP59 &XG W'0UiRŴ6!#40 $ V4ZlJ]Uhz@A!1(`CFU&pz?FDUy{?,d@#b`sb`"J-d K-q?FFF?F F0 ?Nk"?GiE*]CpdadC0[dhZe@#eGDQ> @" i$a3n&Y+`~rrood]io;e s"yr1C82K?Fѻ4G{?F-qx>tFjR^{|ZlYZe tmP|{Mޥa 0 1iS? q1+?wa@rc(<;"̏A!rFÀ5H5aE=*[^N?F%P?F"P&@ c?F{MTh]]9 M]AUF}%?F[$,?F+#V?Fd_3֡w?P6 B>M @ $ JuM` ^?3YY.KuHJtE  L<ܩt.'_3zMҩ,wsQ>JU2N贁N[?@M#J+&Aw@b](]i+bk)+)&Jp%#4"?>"<5!?& ?M#bbbz$ Rp&"/%B%#L1L15 `?Co yrigTt (c)02`090M{0cy0oss0f2r1r 1a0is0n.0 Al0 y8s2e0ey0v0d0"E1YI4#b-%#X3 X7A&%{0bP59 OFX!GoG'0UBŔ&!$a oFJlJ(>UhoI j1h$Ac!(`%#F'$F?>D{Q>L!gfQQ R "Jg V?Fo-RbHQ??FT!R?P& lH\f6V\P5E+UDv_;*UmʙHQ:? w8s`*%$? .l?Q? \ς?D4h_!R otogr5%Q3p_SFhIL?F~Kz?P)Oy޾ F @c"rA b$^sPj+u`dl(?ET+QdAf u8<$HD:  H h4>T]]9 MIAUF ܋?FOa>?Fq`5B?F1*ݷ?P6 m8> (JuM` ?#SS2)uBJt?  \Eto֙>_B|KR>JU5 Jp'>͙#<?Z& ?bbbz@b#;A&b$B 2N/N[Z$@MI #"&(&D$ :;'"*#d1d15 `?Co; yrigTt (c)02U0090M0c0os0f21r; 1a0i0n}.0 Al0U 8s2e0e05v0d0"]1a4 #-,/p3 p7Z&'&iieU59 '&X9GG'K0UBZ&!u$K0 FJlJAUhhG1! (T9CFJ> kRbT$R}R( "J:]Uy{䷧_8S s@ o4%BTV.Tw?F:^֝ä >Nk"?YUU!f>)?\(K8ӷjmKl? a\$QU5Ui,SlQ%iu~!8hE_= T?F;uQuo`"rA 4sjQ޽lğ=onHD:  H h4>T]]9 MIAUF ܋?F<j?Fq`5B?F^ݷ?P6 m8>M (JuM` ?#SS2)uBJt?  \Et[Πؙ>_B?&}>J)>JU5 J#2N迴Nk?S@M #JC&AP bu(b)J++&B1[- #L#?&| ?$H*'#O1O15 `?CopyrigTt (c)020090M~0c|0osv0f2u1rx01a0iv0n.0 Al0 |8s2e0e|0v0d0"H1L4 #-/[3 %[7&'&iie@59 '&X|$GrG'0UBi&!40 rFJlJAUhh7m1!{!Պ (Z$CFFUy{?> 81cKR R)V "J{TV ![?F=$. >Nk_"?YPSW>OcK8RKl? ף_p= $B:]'i,S\6aYss9$Q@5UV+6o'lQ:ifOdD`i[Dž}soąo82{_U{"rA z$s"HD:  H h4>T]]9 M!AUF ܋?F@_v?Fq`5B?F1*ݷ?P6 m$> JuM` ??Su.Jt+  \Epteap'Eqz>Bq|7>5 Jp>G͙;?2& ?bbbz@bAi&s&Bh 2zGztM?+"@MrX"&v(bu)+s/8*T<1<15 `?Co yrigTt (c)s02009s0Mk0ci0oKsc0fq2b1r 1aq0ic0n.s0 Al0 i8s2e0ei0v0d051P94-H3 H72&iie-59 XG_G'0UB2&M$0 _FsJlJ( >Uh_I `Z1m!afR* zRQd J:`]Uy{w_$S s@ +o4B`TgV.Tw?F:^֝ >Nk"?zYQYV*)\(7$djm7lb+d ha\HD:  H h8>T ]]9 M!AUF ܋?F*?Fq`5Bǯ?F1?P6 $6>  JuzM`l? C"u2Jt/  \Etti|u~>Bu뒣;>RJU5 Jp>͙#<?6& ?bbbz@b#Am&by$Bl 2zGzt6#I@M\"&Rz(&q$y)+'q"*h,#:3o1o15 `?Co yrigTtk(c)k2009kM0c0os0f21r 1a0i0n.k Al0 8s2e0e0v@d0h1l4e-:?{3 {7I6&&i bP159 &XDGG'0UBX6&Q$0 FRJlJ(>UhI 1lq!a4DC9Nk"?CYQYf.Q;(hğ=;Af/h (\UGD # hz4TYYP# 9UFs2?FM&d2?F$p?F|>׺?P} 5   & >hRQ p  u` W?   *<J>R\npUu# t  mom%ty!V"!%' %'ؤUC~#30U82?҃@&T3q1q1:"`?Copy_rigPt (c) 2U009 M0c0os0f21rԚ01a0i0n}. Al0U 8s2e0e05v@d0"j1En4#-d3}3 }7Q?3^=4~6#DkFkF gB$gdaO&7*THT"(Pa%/BQ  2iRcE U6rXFGE'74ikF!dD0 EZeYk} 3 QYg%kd>vk5kd#avk3kdpvkEkdvk=Ekdvkkd vgHD H# Uh4>T#A]]9 MUAUF‰IJ?F#-c?F?FFt=?P6 &> A](# D<# P# dd# xJuM` ? #2FZnuJt  }7Fi8&t-!G"R7NA9%UB'DM"9%\'+d>tJU2wq ?=@M#&A@q"b(bJ);6J鏵#"wJ3?]6 ?A<#0Ɖ3z #V#bA38/9*#115 W"`?CopyrigTt (c)#@2U0/@9#@M@c@os@f!BAr@NAa i@n}.#@ Ali@U HsmBeA@e@5v@da@"1E4#(#5 -O3K 7]6Fi@ieE9 FXG"'0UR]6!x40 7VKZl:JdTU#& uUT ]9a x 1 A*# 2&#p +5(Z#FF<g60Q0,#0Q4"1 Rf"J! {-?F'<0c 0TYǮixxLpH$a5e r&7?FQh|a-|,CxEe}$X%lE?FV}w{|C|80}-|?"l!eEeaM?FO|7#Nqi}=ԡ$dduC_?F:s?FQ+7?FO9 uPu~0y jBqi/3-u_2O<-u,#$6v›l* ," z3H[o'?F hnd?FCsM>-|6`؄Ì~[U0!܉f;-aaiq x?F#^|P-|hP`]H<dp&B?FŜgN,5ҫi~l/WG~Ɖ|] ܉NQ*TMW.DPF)?Fa$IKbүnexC؆˄ILMƯدaWe=j$,$,hx?F3%?F+S 雖dNɛ˄@&fUٌ[?承uW[w?FqoW?Fsg:o?F)3⿗1܊5#AKƉsω5`R!Oǟ1a:dj{+ hʲdde5a:f?Foj&GCx9e2oeWcrA 3"5a5PeHD: H h0>Th]]9 MIAUFe?FM᜕?FXWF-0?FO?PDJW?A$> n JuM{` ?e ;  u*t'  Oț2mta{ԿHM3.Jv??v*Ij/I>JU2N贁NKk?<@M#J&A@bI(bW)U+U+TU&J#!!5 `?CopyrigTt (c) 2`09 M c os f"!r 1a i n. Al.0 (s22e0e vD0d&0!3]$#y#&#4#9PO"Pd+T/u/%-#,# '6% b}59 DFXGdG'0UBţ6!4a dFxJl(,&}>UhETh]]9 M9MUFJsx?F6?FI?FnjrL?P9V> $ 8 L ` t JuM` ?.BVj~SuJt  5hޅ%ty!"_:~څ%''"%'Bs>JU2N贁Nk?<@M#J6A@"b98bG9JE;E;E6J3115 "`?CopyrigTt (c)02`090M0c0oKs0f21r0Aa0i0n.0 Al@ 8s"Be0e0v4@d@"13H]4#y36#|D3I?A?2T;D?e?5-33 7F%0AbmE9 4VXWTW'0URRœF!DaR TVhZlJTUh6UT]$8]tA1 QJVb(T3RT0bLx"fA RSf"J:mGӎ!=Мoc ]rte$a31sE?Fd0!@c +HjẏvR%amE@uRU?F.$wX|'1Tn|pA-x@uӆN?FFoX|1Rqyh -x@u`CB2OX|Ә {n|O$ dCe?F_';?F4Hy ?F ! uPu@[y!Xn|kb_6nu(nux#vi8˿IQv x" YꥬM$5zG?F}Bh?u?FoyD?FYό Ə،m\ҋ:n|XAB0B5R+?F;(h?F?F;S4، nB6{n|Jqe!{q o.2ZQI{]>F˻w&t 1t?FOiL،?ö}>{} SS~DxĶȯ]z#0bb?Fwϯ،BZc4s  x̶.jlT1|t[mi 0?F_?Ft ҿ t߇R?ɨV=7Th]]9 MIAUF9PR1?Fq`?F]Pġ?F_Ӱ?PDJW?A$> n JuM{` ?e ;  Su*Jt'  Oځmta{_ t-3.JvĬN %e?7Ùf?vGOw{jyƤ>JU2N贁Nk?<@M#J&A@bI(bUW)U+U+U&J#!!5 `?CopyrigTt (c) ]2`09 M ]c os f"R!r 1a i n. AUl.0 (s22e0e vD0d&0@!3]$# y#&#4#J9PO"d+(T/u/%-## '6% g}59 DFXGdG'K0UBţ6!4Ia dFxJl,&}>UhETh]]9 MJUFâ[w?FVFc-??Fx?P9v>JuM` ? )uJt  81QEt9Siޯ2ENEh^ڻ IH^>2N贁Nk?<@MJA@cbbU)++&Jy;?u)?AbP)/%/F%B!!5 c`?CopY rigTt (c)(02`09(0M 0c0os0f&21r0S1a&0i0n.(0 Aln0 8sr2eF0e0v0df0!Y$b-# 'Ax& 0bP59 6X7G'0U]BŔx&!$a $F(JlJ]Uhz@11!ZF(?~O攚   8P[&~ 6RcVJ:)Hpv ?#V Th]]9 MMUF"q?F땘n?Fq0>?F_A!w?P9@7>M  $ 8 L ` t      8L`tJuM` ?""."B"V"j"~""pu)Jt!  1X{%5t132m_%5.7RjX6H7!>JU2N贁Nk?<@M3J6A@C2b8b9J;;6J=y3;?3UI?A2b9?O&EB3AA5 C2`?Cop9@rigTt _(c)P2`W09PMPc@os@fRAr@3QaPi@n}.P AlNPU HsRRe&Pe@5vdPdFP2APYD3-3,C GXF5PbU9 VXWW'0U=bXF!sDa Vjl:J Uh^maa]`t AQ֍A8Z3FWY?F9#}"# C22@fCsa Ur)[v2JAURo?FN?F=!?F G< 6uPuyA/ybMv Js 9o#"u#|u32$N 4 ?A~0 f? h#|S)?"2@Ts(<2"41rÀ5&Eyq3u0,Ǥ?Fs^?Fq?FS|xX1y6>|>E<ti!Z$@nqcN#`qBsq- dM?F"7z@Er4s0{욹nZ|q9a!i[+|ҼN#`V}188| m vqt(?&4yq`y:#Z?Fvd@^=PiY$|n׽\|6 ^yEb$ ^S ݿN#`"jtyVp`iK?F3H?FڛY ʜ|B6yI^a};I| Yb[soq|U?n \*O#`7I~Fmp1BB{?F>v?F 6Qqy hq|o]^})f~^K!OM3`Xjy%gzp jh?F t?Fŷ 9X|9} |)x?pM` #e2e:~Cra D)2HD: H h0>Th]]9 M9MUF$sж?Fp b?F4tV ?F_<?P96>M  $83L 3]`3t3 JuM` ?3.BVj~)uJt  DYU%ty!"8Ӆ%'fY*%';ƐnƤ>JU2N贁Nk?<@M#J6A@"b98bUG9E;E;E6Jy3;?39?A?2bPG9D?e?5B31A1A5 "`?Cop0rigTt (c)h@2`09h@M`@c^@osX@ffBWArZ@Aaf@iX@n.h@ Al@ ^HsBe@e^@v@d@"*AY.D#b-3=C =GA6%`@bPE9 4VXWTW'0URŔ6!4a TTVhZlJ <>Uh$]T]tOA Q1(`3F ?FiXe& 0On?Tڿx#EA_0_;Jb"JaFK`E?F /?F߱)\ uPu1ad0kvX'qeDz`Ֆex#P_v^ܿ4 Sm?T0 -? YzO?x"pp1@c](Qv:y!r55a3id{ Y,?F\?F O&&`VW:E3 xui?k0M5eigJA|@ W|wv0rIQv v uB@#aE^0:O o?F^k#?FTL?FY.}*\/~kU?F#]ҝZ?G3´<]lѤqA|? yFז_t&o˿Wi}id3.aДgSh?FDR<<ddT0~B{*!ZyqH_C,qÿ́w?F?F}SR?FD?ƙ!FsU.RM .fMA.zM +M++M.++M."M+"M+."M+B" M / 9Z#j"M M / 9#"Q& / 9#"U@#"a#"y&/9# 2Q&/M#"322y&M='y&x(-63"-6(-6uM`? A7242H2\2p22222222,2$,28,2L,6d(6x(6(6(6(6(F(%F(68686,86,3B?T?f?x???>u#9|J2b&baFU*JRu0;t1 b.!ta  zKdS'& 0JU2zGz?@dMc vA@b\`;xbIyV{YVv,r !:tJsrcrv rArb"Iy{Fw,r|,rMNsTTs`?Copyright%(c])%209%uMcos{Ifzr}ai{n.% WAlр sՂUeevdɀ+bMs` `vba3Wbtbiv eqX z/'0UÒv.0 zlJvUt@S +HW!!1<!.!@B!oe#Aqhq}?FTOp2c r\2"02c JWidxȯ(U?F:IВ?F -e젌"v?PN,DT! uPuqMk 6̍B!23ץE c#)!:2 Fbk  1d2b4?Q& Bbҿr45u)3ߤi]~젙K.LL̖bTJIr^F_ l ^c ׳fif|F~1r!o` GR["hJY@]+ݾ?F6vb"BFϤ %7ɝh|ֻL~|OߔhcAXT4B;CS;YBUߤ_qt|ȝ\ {~TqUAdɼx1ֶ?F|DB젓 Sb\.B| JLǝrZ~HĢg^ޥFg?F$@Į﫶 ,3JaE̲Sι?x<#̿- B 俋}p4<'xld~1<ܬCs?FicuU FЪ֌?< 1TZ pq~Y,ͲJOuOOadO"o4orVbP}q1EʄOCZ d:d?FQC;sl_<>Д]xeW. buDSwV*ɟjoko}o(ZoUgrrPʃL?Fse9mpi1@ps?C>O"Y+Aq[*2l>M_Xp#:,?F[`XK ^$ǽhXvFLzN?F-\Z?FM+ ")?P@Μ<^'k<\ 4.iyq1Kqkjцpqx"pȯ(UP:IВMk=6̍Bᦺi(֋P:mk?FJNHP?PN,.y1",+~]s\߂~]5zn5]?]ȻEtAF5G u$6GaMș<ѶgQ>E% Jצ#{~[:ĂKǯsԪhZ*ߝVU4db62GA,ϹJLk81-s߭f>T (ז0g1!e\%zynA,=͒}s c 5P=D mOAؿvP $kL`C@Y?F,ɴZ`2a]o*"+H) $WV= ً5Od.t@//GB"I0}!!v!M ~+ ت!fLZf-?FOgE>k^:Ўmz@ܐE! cykA$i/ Ok/5BOkFBKHgJ&?كd v:Ì?JO\O1/6O1_C_y%tNw&q00[^cAD[F=k"l4 x QP*% T|0IWj&?F?0ۃ4_#BDM[ٺF1يC nf1Wob!x///oHokJ?~CvY_0 ?Fy]Gp0V~YPM'aܿuiTױ ?pNo=W/_$OpG/_قuO> >F6q0>XY'O?/?A6ُ}Deٱ 5?u_aX݆ϟs+?QcuAuu?FH_*|>Pe hV8?FeHN55/Ԡlփ~+'vc>`h?Nk"\T`bmMPyG&_Sf \l>{f`lER ͮF9S*#s B}PBA9!F8#TEXOB ~dT%@+(ݓMsG:j|fN?xXkL(@P(h ,Dؗ$U-]A8 Hr3 H !O^")9B /UFDfP h>$KT 6D UF~@x! <`>!kbs{kbsi^% X\/7'TFJ!DbA[ qʕHDB # =hj8>T ]]9 #2AUFM&d2?]?Q6 ~A@9 hpAW(4?u`l?Xuhib  #AEJU `0.UH :l$'!f$5 #!`?CopyrigTtk(c])k20 9kuM c os If"!r !a i n.k WAl (s"Ue e v d $\#2q+0?*?<6M?\.?lp*E&B=#Hn&M4?6PMM4-,(,m# m'6##0OoNk3@*6'l6 ?Uhh#3 t!,$ BWaBE@BJ!>BdCRQ, 9VM (DAAO?_SSUR%c^|__WSU5cU ___C2O;rAc#p (e jO(heoiAb59 7,X\,'w'<4N%!4!m; v$zHD # =hj8>T! ]]9 T #iAUFL&d2ٿ} ??FP!3|>2u `u bu  @[A-0' - luu`l?ubC Ae"& J0G6G'0<p'"R('%.&' '!$ +/ "&"7/"',? U`0 h:HM %B7145 11`?CopyrigTtk(c)k20-@9kM@c@oKs@fBAr@LAa@i@n.k Alg@ HskBe?@e@v}@d_@4="36MD?F-P?M-83 7ԿF#30O贁NkC @Lg@?!Uh35 A"@ aRKaSPJOQsb2t8UoAo,giAreE9 6X7g'2Uq`^!D1 5 fj_(Hgyr Ds{ 9O5LFxmBw#HaiDݑB 7]a&E@+,4aGŁiPH.KX$NPUFD  h,^TYYBBUF\.?x<F BP(c?P?tO @L^^{;B_@L&d2?sU!&"/ȍH$б?W/i&? ~t  0 ;QB`#Com ,link\netw r*"p0r pP1]a ,d0v c0 /1^ ; YP!3u &?  ;;" ;;;gr330;G;#7Drag onut heqp ad likb tw e cmGuiCaisd vOe]sBKcnTe)*`i]^o 9il^)n5k].b\.BP(?? M7Pfo??H D #  B>T>':)DNYh>) e0T9 EM dAU@\.?@?@?BP(?P6 .2A7 7P u` ?8=Q Keuoasu`u`bu`u -##@"L( (0!>zLU<?&(?bAR%g#2q l?AG}F]#qa0 c{`Con0eoct 0r 1l2z0 A0 +` Li2?05U"g# ~#"9 `.4 u{0 `:q1276' x:^%g#@KF$=mG<hB<wOEAGMOQ8u?^"f 4 #G<H7g#AwQ17;1w`VA0s_ RXYJchm!#e5P4PQ`? 1py0igPt (0)P209PMA0c0oPofR 1rP1a0i 2.P ARK0lPWs)bePe0v0dJU" Ip`@#neϥ?xdnf'#rK"c'#f `/2 k@ebum 7@w!;FPSeapb po18 `Qps-8R_t-0`R%J1^#B TU7^%\f*?v~!)?LEp &L,5bK`&! CT<0oPm`s0@a> m D/^""QQl2 2B33B11B56hR6C 7$PMC]E(j )w*:B^%]wQ1{U_ B@#"w` M`nu`a2u9bP1vpuqMc;Ca𡟳œTr`PrRdؐ1 Nؐ!m0k6['Zœ!̓ P`rRdEkœR`GD;bQiPeկ3EĜAP>aůYU_hœ%̓ S?bi`(aѿhUz1h B@̓L 0chAWej|ώUBؐiK0dA2gDMMœ̓ R 0oEAYCUœ`Α Apm0n0}Rfz\SP)aPF1lsQ˒W` ;iC`i0y,B@ ̓ ITPOP` 3c/@śSؐbP` 1mP e B1U"%Ѝ# A77^"[(qpPU]=`% &ROái;bBA`NEWOPKPSA ETPQOkRIWS1PT#ѿq;A11^"!! mBz3BB@9191A??´[(3F&p`g1q1 !0 2`T#;4l5cTIt,@r^";E3EU$hU9_D&zFBECF3s͵NSY";ų`pv*HA`bTfo?wQ&bu0HybDr)Ixbkfz`RQz:c exhGFRbBb<\ARbBg"ұeT2b @@-,eu LbrZSt?A////3s8BP(?N#F@<M?Orb:B$Ix\iĺAqG(.f\gXtFZ'0ye] IV]Zq 8BU~\2C }'qbšg q9#3^sGE[Tb%@tbSAc2} 2FA ]bF" JZd~Eq`axe@:F.qTba'g UD?@ ?`0o:C)aY3pra0]! \2rBa(a 3G2T3rA 0ex9dvn]GWrC jae%ah4F?@ p8Gp  Sfeb]oobs5{HZ,+s 5)d5ukDnmFQq#跐jSB#Ta&o@+/o=axU PUFD  h(^TYYBBUF~?x<F BP(?P? ` B 66 6 66^I6ȅH??A>IB@L&ɯd2?-(?\.1(. sUW!U&w/J G 0B`ETerminal,s"v",c i nt wo k t t o , !1i 01, et 05, qu p 5h rd0]1e!i1A^| (]G|& n?_ Q4FPR? pww {pv w wxwږ#wpwp~pw~? ~' ~~  wwpwl+w~wWv-{wywDrag onut hepe.Rih-c]lckaUdcos UPo etQeAvUwreP 5aab~߿?)j࿚i?3@ ?DUG DF P# h @T(PYY#EUF~K? P} 7K.B! : DTDh1YD| A3 AA3AA3AA&u` ? "6@@TThh||j&&"u#L&)R2%X<X7eqU ??' 4Wb0{[W`:#2P6Q3A"'򉄉Q9 ^ L1$K" H7"G' @>>ÿ@q@E?DF0܇rzBCCB-uI@`-1"Bu$ @]GbC$ Q`'(R7"A& <4' &)9u`u `"[ bz'$u`R425 ؓ3Q171`Vis_PRXYcPm!#58`0o68B`?CopWyr`gPt0(.`)02<`0U90M`cU`o `'of]bNarQ`aUa]`iO`n 0WAl` UhsbUe `eU`v`dR13d45*8=363@2t66 24B#30Ur3]w,A$T!^@cT<`o0m`s]`a>gdPa!gRsAA2hhB)p3+}RqqRqq281pacR2+6uM\$ %G11[2 P@ЯRECT'$@ b23;.UM5s_4/&ׄF3s͵NSYFU00UQ#,!@Q#J`epE>Ҡj_7Nam uBtS` SB~&_o<м_oB=Eoo/.c` WI4q*ՄVoC`C0N`҃?w`M`mI2JmhO@E!4ay>",>jeрϿ55 ©ltgИa ` %_0&ȳa5`NTW_ORK3bHPIPBQRБR5ISЀQ<) i$ eu؊6ŔX·q-/'0UwŻѡ ˈ74hH臘h5: '4ANѤ%Nb͡"44?4A?4'?e4[?4N?]4?4?Y4"4?9%4?%4ɥܣUHLuD" # ;3h( TEJ eUF~K? P VN% @*&>uA` ^?Ci i4Fu#Xp^Tb>tU b7t>U2zGz?@9 #>/&A@qa(bo)e|+|&R" !I`$>)##" #7"& "g"bo);l'R",R"/N )#z1z1'#`?CopyrigXt dc)020090M0c0os0f21r01a0i0n.0 Al0 8s2e0e0v @d0"i f  %!X)#]'0UB&T50 PJl>8fUlYC !(#pAg!(:)#FM&d2?qFGN tQ!P"(>!p8?ST0gYfRaNrTaU3^\.,__ 31bXEU[Vj?F@  l(\\ rX HYF `0QYp= ףXo`?̺oZqapXUL_^_p_\A*# ?Fg?l6 l\1"X%Du~ |!YRQi3a\ayo7;mDucP]{1cr|%iU{^?F^ lʡE\1_Zd럄ht X01?FXE#?FFb?PƚK G2G2 lû c_tFaXNTN]\R-\^Dr7 јٜ ZLT4&1pBHZlr À5%8yU`0 ST*J! o`rcݘEF3?FAM)ỷ?PMޟ+=){ )LFj?cl~Hӿ忷ǨHD: # h4>T]]9 M qAUFU*JR?FKRT*M&d2?Fp_8w?P6 L]A]M ($JuM` ?Agg2DSuVJtS  tq= ףUp$_>JU2zG_z?=@M3#JC&Aw@b#zw u#b( (+T&J[=# "?0& ?A$ /%B=#BZ1Z15 `?CopyrigTt (c])020090uM0c0os0If21r01a0i0n.0 WAl0 8s2Ue0e0v0d0+"S1W44#-=#f3 f7&4%/O]FiieP E9 ]FXsGL"'0UB&!4%0 FJlJQ h@$Gx12A!1(Z=#FF!0JR Lx{פ_Q\  R)V4"JVT?F]Q!?P-M G2G2\PRSWRc^_Lr___:M -LdTd&!&Ra@SBЯoogr5%Bb#iOD"ZoZD?B!)$a EeVHc^?F7zT?PxT8QoWmilvd)TJEvސl5 gvtmFd(:w.|&E2_U"rA k#ރ+"HD: # h4>T]]9 M 5AUFOt:?Fr\.?F#H$?F|>?P6 e.#A#b  JuM` ?+ILKu8Jt5  r7htyw/ؤA>[JU"?& ?AbA@D"bL$J#2zGz?"S@MJy&M#b#zO M(bL)Y&S,D"Y*B #115 `?CopyrigTt(c])20a09uMM0cK0osE0IfS2D1rG01aS0iE0n. WAl0 K8s2Ues0eK0v0d014-#*3 *7&?!FiieP59 !FX7G'0UB&!9$%0 iF}JlJA h@7<11D!Z#F&*JR?F9.4mL AxY S+RJ:rTDa@ RS .S X%QEU~[r\wY aU5UFGh4\Q iʧEQ8y^Ffj?F~@W?PbʒK G2G2@ D"]rA R#s c4v(O A.x$&AR q|4xa@S#Bsdvwrb5^%HD: # h#4>T#3 AMT JUFx<?Ft:N?F~?F7ͯfл?P6  >JuM` ?juJt It=WGWzHRIlQI#>[? ?AbA@"b$J2z6v?@MJ=&#fb{#z (b)&,"*B!!5 g`?Copy_rigTtZ(c)Z2U0%09ZM0c0os 0f21r 0D1a0i 0n}.Z Al_0U 8sc2e70e05vu0dW0! $-# ',?6iie59 6uX7_'0UvBZ!0 -F"AJlJAh711!TصTh]]9 MP qAUFbX,?F3L&?Ffl6?FWjZ?P6 L6 >M   $ m JuM{` ?=cc.@uRJtO  \(\tu_VQӽmI[>JU s#<@!A?V% ?zAJbV @bbbz #|"b#$|"bĄ$J 2zGz?<)@M/#J&B(&$%|";T'|"*B9#t1t15 `?CopyrigTt (c)02`090M0c0os0f21r01a0i0n.0 U l0 8s2e0e0v@d0'"m1Yq40#b-9#3 7A#-(0bP#E9 wFXIGG'0UBŔnF!q$a TFJlJ8>UhS2Q2Q]1LA|!-(`9#FBP(?FMx HNټOqɠ[Hl[yQ RV0"JU}e\?FAϡ`2If9?PaEQosqvyifdY0_rޖlͅhn4g_oeBdy$w4|!Z$ܿ__  %$?qyU2_U|#rA 4'"HD H# ih#4>TA#3 AM 5AUFbX,?F3L&?Ffl6?FW_jZw?P6 . > JuM` ? IKSu8Jt5  \(\tyuVQӅmݤA>JU2zGz;?@MJ&A@bE#z; 9#bS(ba)n+n&J[#";"?&L ?AY$p/R%#115 `?CopyrigTt (c)U020a09U0MM0cK0osE0fS2D1rG01aS0iE0n.U0 Al0 K8s2es0eK0v0d01E4D#5 Y-?*3 *7&FiieE9 FXG'0UBŴ&!$0 iF}JlJ(>UhiI <1Y!aRY4n Rx_ J:j]_`0 Ɓ_.S F) 8qViH$D?F@\.?Pַ  G2GC2\QYV4GK`mA.x ΤAR Ԡ3l4xa@ R#Bsoog]rb5% 8 bHD: # h4>T]]9 M 5AUFQ( B?F].r?Fx?F`0?P6 m. >b JuM` ?+ILKu8Jt5  ty(\{GzAt>tJU2U"?=@MJ&Aw@b9(WbG)T+T&JJ[#";'?& ?A?/`)UB#115 `?CopyrigTt (c);020G09;0M30c.10os+0f92*1r-0f1ai+0n.;0 Al0 18s2eY0ej10v0dy0@!4b-#3 7Y&?Fiie59 FjXG"^'0#ũ&Z!$0 OFcJlJ8>Uh9AA x"1DJau'Z#FFp8 J4T .xDD3AQ]M RVJ:n] ̅_Zr==B&d2?tW~ɲ`?FH!{?PZss G2G2 .S =;;RSW4O+EA.xSfϽ؍AR X]ded4Jxa@S#Bsoogrb5w%Q5U}H$D\\~?BBQ5\~CftZ a axUv~lk&lqU2n_U"rA 5>#pHD: # h4>T]]9 M JUFp8?F}>??FbX,?P6 >JuM` W?SuJt  f\It=W|?5^UIRIlI +#>5 ՆJ[;? & ?AbA@N0"b8$Be!e!5 g`?CopyrigTt (c) 20 9 M c oKs f"!r !a i n. Al (s"e e v d ^!b$-,q# q' &2zGz?@MJ69#nfb3z; .9(b8)E&?, 0"E*iie59 X:7'0UvB &!%$0 -FAJlJAh/7!0!eZF43, E <ᢋ/.# M OR|VJ:VjUS |bPReS0QEUxx#cHD H# ih#4>TA#3 AM JUFVj?F\.?F`0 Pn6 >JuM` ?uJt  3\ߦIt=WIRfIl$#>5 J[w? & ?AbA;@0"b8$񩣑e!e!5 g`?CopyrigTt (c) W20 9 M ]c os f"R!r !a i n. AUl (s"e e v d @^!Eb$-q# q' &2zGz?S@MJ69#fb3z; 9(b8)(E&?,0"E*iieE9 X:7'K0UvB &!%$K0 -FAJlJAh/7!T#cS( bUGD  3 h0TdYYB IUF}>?F&G/?Fgl6?FB\_z17=w?P} 8f u` ?0)u:t7 ? = ףptDʑ_Gzޑ?"L쇪iU!!`?CopyrigPt (c)J 2\09J MB c@ os: fH"9!r< u!aH i: n.J Al @(s"eh e@ v d  #S$#%B- ## '?;= # &#24 6 6 2$# #0U2 3@&B A^#5 6X'7'4Ŕ 6!4] 6:e6^#[EBM #HA%DGUHuDh" # R#U U  >]0T]]JUFgl6?F &p&??F{jG,"7?P  uM` ?j`fpu#zAtw tnٳ \b "OMGt2N贿Nk?@MM%&A2 bW(c+c+c& !!`?Copyrigt(c)209M c o%s f"!r 1ua i n._ Al-0 (Us12e0e vC0 d%0 "!3]$#Ry&#4?6QC?]"W(c+Q;'-,# '6% h|5 CFXGcG'K0UBŢ6!4I cFwJlU!#A(:F<3)0 vc p/y $(4!Xv݃`?FZyE}0pS a۫YQ\I`X3Uk?FQYC~\f"qoY|5UD/5?Fԧ߾ \F\bJFXk_}__\A#O_"_4_F_X_Vh#?F=]W\>ۆ\34=(_V[e¯?F'e*m#R)\b@QoWZʵ?F,ށmL \$6ԡybm&>e?FDȆY\.Iſ\)ozEU 2DVhVၱ?F}!\\wzVxtD?FYY4F-~Ȩ\(p|y m! ?FߝBF7+*y\= ڙbm&R{?FbZd\Aa\?ulď̟EY.@Rdv m5[?FRⳭ8HJ|\ HN۟VHD2?FߦM\\A>x2V8`?Fitͳ}#E\I&)|5e?:Xa"7["mSpd㯵ǿٿBeJ\n~.?oJ4=|nn)`]Cc֦Š]]7PPE5߁m:2\y_1W3E?F#Bd\eÉbm^9^?FV;.O!thV\9MύAa\n߀ߒߤ߶߻VV ZKX=\ .c|~k綖d]!:`="K\lJ(i0(dV;amЏ9ta*m \ǀ_?bmcO"QGm= bY~\r6y@ύ# AuYKtxoa{6\/ 4)V w!>?F ߄*"̕0ϓpD!K>?FWP! -ߟ:{Qb?D)bmh4(?F|hS ߵ.f-R[Ϡ ύ //-/?* Y7Q%i?F_s"'a E/`'r?Fs7"̍^sÁm;]֜/O?F/RE"ןh~pMM/㿈E8:͉@;$Pe"̪ (M۽^mJ?O.O@ORO ?????O%?kZ@ I~}P"P7|Msk aO=˩ ?Fa7 >fɾw2θOgl6'"*’ ܏{7_(4?F8W!"̿z2m^Gm}>IύAoSoeowj ____o#o\+w(@ `&"-JO}=O^f~oyV 6?F9~"̓ ;M\Oo~ahFp@o!"?ݟ$Eߡ?~%^]"Q>IۢwFX"̰4txڑ3:\Qcu '9لndu%?F<2}G"^̄ZϭSԓD-?F=ߓC.k0w]<<7 Qv@b" "R؝f2]bwƈWРJk "̇1%~]F@m 1CUb^fĽ w \,X|4]-HuvziP=?F ##BOP9]ǕDG(t{GeY zKgPfO2~]9+T~]~;?o-L /=m$+)9ϡʌ);M_q 0;F,5Uz!izVsJ}0W }f6i,g8~ٜ˺A}2!~ tLհ#QqBZ$@S]5tC6 e`,.zu'J1ߥ6HZl~"~ѠZ cX, }ٜ?J\1~C( k0ޜcAKrзX=YϯR p0%N. X, I6]+ Ӂ?FI:5;iU,=A  D)U= 1qRdvE[%ڻ2i{w׾mQ7Z'ütUl?F3&Gsj3jܙ-Ow) S-ۇ4H-ɫ9].E|"?F&ti}S`Aܙx@ ///??Rk/}/////b"m X i)r~ٜوWjFQ"?|PP?Fo%s=De9gsܙaEo#IOl֪E@e~iY/|dM?v۶ :YhSQv}T\Pˇi/ON|aܙz?`$OO__,_uOOOнOOOI@B?FKTG.i`AٜlOo->_|?F!i_ ]gٜ?, )ʼn!|?F3Ck*}DxZٜ?]QVyhk߻?FX[g]/I ٜ?bvd Co '9Koooooopj T&}j8>ͯ߮\nE@L@Gٜ|gxgx0Y3N,*ݖ}T  MlfahhÔ}Dq:6};p/"_f/-?Qc я$]+w(0p:dy~͇JO} M?p 0!s?FYY4ڼ~iS!(p|ʟEDk}'pҧO?zA'xў?R_F!] EQ>sNϽ=(V-Sewaۯ#5ٿ9/{?i߼$&B4O% C3i?y}o3qo?Fq ZiD!'FA\]0$=ڵ}SI6?Ͳ Ai{ߍߟI 0BT~#a\.LSA-BN^{q"?p?Fe6i㪡<_#AA}2![?F1:!6F! [jtE\qRD%yjǍ6Pݿ#V<>tl )ae"%7I[mzj3?C_%?Zzh̿Z} ?-P N?/????eK?]?o????yxz`h qDa.??>#L3ўh>/Ew9&:/ ЎP.JhHkOQ qӠħOʜ- &h̜zfUm@O___oa_s_____}2Z?F$'_{uh̆Ia{V?Uoǿ7e~h̰(ecy) - mz-$.hhsZ}2Ca6o`(~ "?F0.MX×mEyoށ顷6\Zq%բ}t7Z?F~ Oh?H |A?UYwkE?F?>C ?<a,:wPI5) ߈2/WfsM% e'MvǃaËgO?F%F92\aI}|b:B9 /VA ȒTH7D6D # 4# J  !"#$%&'()*+,-./012*3>5haATaaMYUF%>h`?Fr<."L?FcG?Fqј?PM ]u\`^?u#t  EF't !#"UY%'~P*%8'C"Ut2N贁Nk?@d\#&A b(++&Y/R/$78?#bMd4z (+b/513%R 11;3"`?Copyriug t(cU 09M@]c@os @fBR Ar@GAa i @un Alb@U HsfBe:@e@vx@d"1Y]4i<3K 7966i@15P6XGW'0UBQR96W1 VZlY]U]l@TGA#1(!l[?FuO[0:$}#)ߓ=b!8e,չ?FAOw(lxrNj$>la.PXTh|3ecztaLN E'm:"n*&G^|%[a5:q m5a)k?||=a Ed0UgEo"o4oFoT#______evHG?F3'y~|3`[]Ք|cToev_kfP?F;~|S3ٔ|\£oevV!ex<ڤ~|{ |ev-+3t#~|;6nRcoj^}.@RdQUev~؇Z?Fi~|+|ܔ|JCjev<)OO~|p )yBr7ev1 aNְk ~Ůeąܩ#A 5OfYQWgn?FO*~|Ch|`-jݯev%~?&nq΃0xi7y :kOop ~|4ZO勿]oρϓϗU+=Oσ+ӑ!]~|:<]їyT}ςT ?FO'>]P~|qH~|ο:.H!|ςto w'"~| 5(CşVPevR`t)/~|?7>-<Ѫ|Ra 1CUgevo I?Fb7~~|/ͼ셡yoevkY?F>_ ~|3y evMXy"}~|ω9?lev ?< "~|^_\˱R&8J\n|by\M[`bD'nnEm|<z_m6itaW}nk.V̲ocxL45MR9?}.mE?KxSi#;m1LL'7,}K^>P/Si^}///*=! B/T/f/x///byYEvk덚4D2=y_mS M߾}zNmWUdw4֢DB&™t|d˽\Li#hމQ?7B%$Ĩ^QUHnOOO_8& ^OpOOOOObyV~%װ!(E>ܩu;ofUN1[D}~XXtۯ~'haCjmt{ܿ`Xn b_փ%hkqwu~R)uoo 8& zoooooo~KL}"_!eΟW?N eFjӈHi>DYH'h(%i?}ou MMXjQ %f[݉gm`¥ G8j ,E/R&5gS>Na8|'hhv_ ߖ3bW7mȧս[PebݱJ}:?nuTѺ 2DV8& į֯ &i~FR?[M%- q?j  DJP}uQ9/'hdfO?FV+p\[Wvpa9| wdѿOtZ(UxzêJUfeC(6,b#l+eR(߫\(V1:9Oi:4ߋcG1!Gq}9 ?C)uNUBv(Bo?FO& v!~5I-lym(,.$23G_CRHCM4q6vl~_}5#7o Tfxy?F{UX>͘ Uܼ*d4Ij@ ?Fi; Ly4GXj9 g1re-#d\D-h:Pc}Lw4mv/*ĩMQy S:U ߟpʟy7j?FZ?W+}(U*<SW?F .턟bGF赁9 <8߁-`Z͸~~5 CTc/ws}e v#P3ׂo(r.i,"4Nr¿Կy=Lu?F7G3z 9 ˇ 5F?FJ.3$w 1tK -o9a|ዙCѾ;-z0ƛ+WzBg/w-%&G,ߪ 2Dؚ5F] MH,>PNr#߽v<߯0ʨ)QMLTgMUs}I$FATG}=u2f`?}1&OwuXp+MEp41}o7o9ݭ&fgPy}}M?=;??1O2DVh6 yJ]}z aFgRlnk6+nɣj4 anf-n_h?+nj^AoxvTN.g!uh-Vo@MVp >ߜ)sPU 1D'[}LG*>QoN/`/r//.V//+/=/v(#xEAGw} }%(Lʧ1;$gKG"V2w]xR)vJD)3 7F!nLigqN|ݭƃN'QM7ZSF^ jO|OOOJv?O O2ODOVOOh_D PicPK>ѩ݅cDchQ>M cAѩ JBɆf!\.S6x~r|#ݭ#.CqQzUȴ߯b~(oooofo*o<* 8}\q3h^ ߻l~ϢϴQ9 AS@XC^}|/9K ս5١TB㰉\JA**=@$,Q9G\鐒?FU ^d /g [A\9mpï?FnrB=k X* S(N A9]'Nx =$.T~v )\A?S7|#!8+|z AM}D(:LfQ9j,,j?F4*a.ȉ\zFq&ǻ}w^Ym[6?FG!Ɖ\0#Jh@ /x|I]11\\o=P|G>q2:]_1BM4=1vq"_]=H` @]//?A?S?e?e ////??/ԸƑ?F qT}S҂ ϵymޯZ?FQo8G} ϊx"1IQ }M}>~~^{ªy1}eYQrŁ& [ׂox^.cyOK_]_o__+!OO__$_6_/ٗW(?F 5w*zi۟灜i{ސ_26w?Fߒ(xJJ쑻+Uϰ|]FuqyUq~iT ]~[4vp=!;L }?LYh- pzK"o .@R/R7?Fhߤ? QTJZ׬23l%?FO& ?GSۈQ?5])B.zhɅ($Q1(5} X㺾 ?lرc#&8J\n/_?FdW2ʼn@-1ş9ȟ2֮tp?F<*fB s1]V%sAS})Tk{#/v2OB!XLi.݉}1̾MZͯÿտ$0BTfx~P _! QR6#I -hCFUz5|Y}N?#TG}"F$=c*c,j4Z>[T-G#ϊt9a?FQڧc,H ~{%a -2FA`~hLr Xޟno#:L$FFǕgtg'bl[;| ߛ{-,>PbtG)AQHZ?F;ƃOc,t[|{r6<9{mgE?F Cg=:*\R鬱YG/7)- I;-Z?N5şZ=/I'nD5-!bp1{ IWx^q2.BTfx\PX{Zxߛwtu,]R |{A&BQbhì?FB#5һ5(B]-nf!Pb,$21Nw gnV~B]gyz9^p 7QoGI|q~-9@d]/// :/^/p/////\ˍ|x?FȯI|hLYosլ{m|^>`tzϞI|ύKYʰ;!l?bsdzNQN鮢I|\Jm^*Hq^o_`n~APpⶍߥzȣs^ˀ≜O__(ZA}OOOOOO_~ޯ1}^i8Z`֋ ŸLS$W+wvqO j2~Lx/b6^Ni~ KC} mN;-럴 =T=qip8/~ h ;-#霝 2Dz1oooooo\~DG߷&wz P}/p쯹BxN]dK]y.8@h&V&%-2|ЏLWM;| =yidݑ%%-tڨNR 2DV2ď֏ : O21f& =Q>Nsz99]yX21 ;>#94oQ>ݠ_g?]^%mT`>rG.l(5sm H_`?#~6\9Ҵ} қhnBk=Oas#3ί(\Ofe#y~NQmr갯]ߕ 6m^ϕJY9]7}}NQ X]n;m^uԱ_^?$R?F[5l<;Qq<7B1b;-Y='!"/;mZoY?4cgC2oXj|ߎ84 2D\M'g\elh \~5[Vq:a,8ω?F0 iWi&4\GbF|aS)H9z#/<^bAL_߶,fdlEH{=uU%5*eBn v;!#4?FADo)XLȚ|bv!7A!*EKGalH`.P%ҧqp6t KT4HD #h34>T33 AMT JUFM&d2?F W`?P6 >JuM` ?juJt  5]It=Wp= ףIURIl#>5 ՆJ[ ;? & ?Ab A@N0"b8$*e!e!5 g`?CopyrigTt (c) 2U0 9 M c os f"!rԎ !a i n}. Al U (s"e e 5v d ^!Eb$-,q# q' &2zGz?=@MJ69#fE 3z; 9(b8)E&?,0"E*iieE9 jX:72'0UvBŴ &!%$0 D-FAJlJAh/7!h0!ZFg, E <ēNmP#  &OR|VJa9PY]PPReS[RUV%:]x#c S( bHD H# h4>T]]9 MT JUFp8?F@ W?P6 >JuM` ?juJt  f\It=W rI*RIl#>5 J[? & ?AbA@0"'b8$e!e!5 g`?CopyrigTt (c) 20 9 M c oKs f"!r !a i n. Al (s"e e v d ^!EHb$-q# q' &ڝt2U2?@MJ69(5,E&?,2E*iieE9 X:7'03Ŵ &!%$0 TF'JlJ8>UhAA `!1ZF? `, E <A#] =RjVJ:VjCS |b>RSS6 QV%U\.;\QNYu=!ٱQE^?&d2_MZW#h415U)Hb?*V[E! YB\JQUU+Vp;_MZw@0$[D#h2$_6U2rA >#JsS(#e_zH"%u@s $I/bO`MFXk_#VaZOB Urb~d-o@+[Wo8sG?*c^frmXzsrumv(x/zXB}R(}^+82ۈ6h46Ax8Up:Ӧz&M XH'UFD  h(^TYYBUFjZ?F~??x<F BPG(?P } X ]B66]  TTUTTT9T9d9iȅHBU?? ?B@L&_d2?@-Q(\.c(A.sU!&/)1(  0%[B`(file,s0rv2,compuUt$4d0s40r0b24d0n0tw,0rk!e^| (QSG& i?!3E?? ?qq wp  p w{{ qw qwqw{pqpqwqt}pqppw'wwwbwqpwww|zwxvDrag onut hepe.Rih-c]lckaUdcos UPo etQeAvUwreP 5aabjZֿ~??cN׿ mF1??*t {V↑T*J?@ ?UG DF P# h @T(PYY# QU@jZ?@~??F?Pn} u{` ?Su#؍>>HX HlHYHH EO.LO4:UDDXXUllk'"&R",I'U25ĉ%4 ?"u'4b0{N[`:Q1#2T6"!^U9$"3&9"/L6 2&!'ݯ>e@ÿ@qn@?@-fB?mDcFԸ0)Bu/@`҈-1Bu@rCC 5GAB"2Cm9u`u `"[ b!u`%3Qo1o7B`?CopyrigPt0(c)020P90MPc.PosPfRQrPQaPiPn 0Al` XsbePePv,`d/`VPs_SBNcPm_!#5P8a` 6!bz1AXE:FG$1 8dT=`XQj2`]r` M`nuPaPt+arp{` ?bup` BO=ݛ`S,t@']p5%m-T݁<9ofm'Qh>qݏ,BT'.at'xXf@>pÿЇobLZdq [ܒrr$ avvvrsW"`ERol]16 бeI`~bb a}abr@6uP`TaSo1u"_u'I1I2A!b[aahaauhAaYypP`Z u*`q`*q`azAx'F5_db&bpSF3s͵NSYJEbrV@ΓB!e 8d : LqW,@rB:@0ɏT}sasahYV搹Mny ` %߀P&S41sa`NEWOWRKb2H PRq!RRI%SQio$o¹h^A'70Uc?!0oHD # =hj0>T h]]9  5AU@jZ?F͂T??Fx?SF?P6 jt`  o?tk[>(A Vu aSuo.]&A]  *ԮJU2zGzt?@L#&A@bA#z7 5#1bO(b])j+$j&[ "*(w "?& ?AU$l/%B115 `?CopyrigTt (cu)Q02`09Q0uMI0cG0osA0IfO2@1rC0|1aO0iA0n.Q0 WAl0 G8s2Ueo0eG0v0d0"1E]4MŤ-&3 &7&l>]Uhz811U!J!u6?F+\.>djIIa? >"(EǑ I%K9E 5EU . K ;;5QIWEEEF _KnSA?8@@Bu?FDR> ?Nk"\6`bmMM\j=am {:e P> #? D"? 5-g?j4B7!B{oogr57!ibDAbE9 MuX7_'0U~rN!$a)5 5vIzHD # =hj0>T h]]9  qAU@;?FI5K?@xEL?6ѿP6 t`  bm&Qo?t@@L]WI,#u aiuoL> JU2zGzt;?@L?&A@b}#zs q#1b(b)+$&[9#H",ɺ (wH"?& ?A$ /%9#BV1V15 `?CopyrigTt (cu)02`090uM0c0os}0If2|1r01a0i}0n.0 WAl0 8s2Ue0e0v0d0"O1E]S4MŤ-9#b3 b7&l>]Uhzt1.A!JN!m.%-+0djV1ax߃S(FJ-&?F mO"> ?Nk"L[Majnha_:e P> #? BK=o? n-?j4Bs!B{__Wr5s!D8E6V 6?wj_|_ ^XE.U@z T6?F @' d_mOoT \VyWxRkrVo_bVh*g:n"'UaEjz36>CN>?Bn}]zGvN7V iDAbE9 MX+G'0UJPN!4ae5 MaHD # =hj0>T h]]9  U@jpiH?F^?@$?F]F?P6 t`  !a&ao?t,F=۳=u;u auo>J2zGzwt?@ʐLAw@b#z1b](b!).+I.&[-m(?0& ?A$I0/Q%!!5 `?CopyrigTt (c)02`090M 0c 0os0f21r0@1a0i0n.0 Al[0 8s_2e30e 0vq0dS0T"!E]$M-# %'&l>]Uhz!1!JF% djc~YZ(_N9zOMGE%_EH$SwO"K ңHE ^&_8_WE_OqOHOOi&DAbPE9 MX7'0UUfb>!$a % f1jHD # Uh4>T#]]9 M5AU@jZ?FL|+?@?F({~?P6 JuM` ?u JYt )t7zEA2&L_D?. > >tJU2q K?=@MJ&A@-b9(+bG)T+T&J[#"#a?& ?AH?/`)#115 {`?CopyrigTt (c);020G09;0M30c10os+0f92*1r-0f1ai+0n.;0 Al0 18s2eY0e10v0dy0z"!4B#e-?3 7)& b 0K8>U# ]A]A "1!ъ!PzQ <%OYAJEU . K;;\%W%EF($L[.FX5Eai%?F 6?wL;] W-\TB⤲8?F@&SA?F @ ?Nk"L'`bmMW_Ye[ٯ7 VK=F: [jr ? D"? XZvD?ta@Boogr5;!QEx]SiLO MLGXa~WOTZiAle59 XG"'0Uc&N!$05 .HD # =hj0>T h]]9  IAU@K?FOgVb?FY%?Fwᳬ?PJW?>t`  ^Z?t#ə.E @x?r?/{4܂ G _Z+u u$ >  J JpU?43f<!.&P?b3bbfz@1bAe&bq$d 2zGzt.#@LT"&r(~&i$q)(+}'i"~*#81815 `?Co yrigTt (c)o02`09o0Mg0ce0os_0fm2^1r 1am0i_0n.o0 Al0 e8s2e0ee0v0d0"11E]54M-#D3 %D7.&lUh#7 V1#Ai!J# F/?B1B  VRiAAbE9 MX G{W'0U*R2N!I$a {VZUHPD  # #>h0JTpERI MU@u6?Fjc{g&?@%r`5B?Fm_F:Is?HNtQ`  II?t#]  _ii$!`kNu  iu%wj>M JQ T       "@&&JNU2"?H@Qs#N&A@b(bU)++&RNp}#>H1#19 ?#bbbz| R&/5R}#11`?Co0yrig\t (c)02h090M0c0os0f21r0Aa0i0n.0 Al!@ 8s%Be0e0v7@d@21a4t#-}#3 %746lJ#$JUp5 3 M19 1!q(!FnElrkRt# 8$kcD?@숒@)?FP?P"EkU |1\iOT/irvs}i>`_,of :m ,M ?? S&d2? *b?r4B!B oo1gr #5!>%(EQOOOO__FAc1_C_ D4ևS]_o_yRX+{_,__ #R__ (YQoSo+o=osd {oEeooooooFyUy{* hDVW(I X"c7I!pg bf'iMjSE t%XyG|'0UŢŞN!O4i |UGD # hz0TdYYBU@(%)?Fxg^?@0߯H?Fs7Uݹ?P} t` 87?t#_)TeΦu ]ukv  ̝L&UUU&I&3 U!!`?CopyrigPt _(c) 2\W09 M c os f"!r 1a i n}. Al00U (s42e0e uvF0d(0'HS$#-## 'I?k=#ǒ 2j$B##0U'BM3@&@F0Yk} #HA%DKb5DK3DK5DKED$!GiAZG5(|IDFX7yW'&Dŭ6!4YA yVZHD  # =hj4>T!]]>#AU@0߯H?Fz)e??F˞9q?P6 t`z  ?5t# rXѮu)Nu;):O]ע0 &> # " "+"+"+"+" $+"T-J[U7 ??& bA@ 2b4-#2zGzt#+@ Li68!6 ?}; 72U!:B#AA5 5`?CopyrigTt(c)20F@9M2@c.0@os*@f8B)Ar,@eAa8@i*@n. Al@ 0HsBeX@ej0@v@dx@)21"DM-,;?C G& l>]UhzAE!A#!ְ1J`#@{&pz?FUy{?NT,dKj&b&R&M/8T9K-q?F@F?FdG0 >Nk"?YE*]\LThKjfGD&Qhj ?iU3UGe&Y`?@/Jb)h5oWKoWo;èKod}l$ro`nE2K?Fˌ%4G{?F -q? eZR^{|\?6?vodmP}l>MBe 9 N 1iS? 1?grl#51:PO"4ga@Sq(UE^@O|ǐ[`ߡa v2rbA @3ijD&}laqOz o?#WHZ'Qu=*[^N?F]%P?@O&@ c?FаM<n ?` eb0)\(Pfodm}nv}l?V5(?q$Ix[cN#W{ #iA@&QeE9 MX''0UjdN!0 /HD  # =hj0T h]]9 #]AU@}%?FR$,?@6#V?Fc_3֡w?P6 u` ?u#t  3zM%t3ws%.<%H._'B> #PJU2N贁Nw[?@ʐL+&A@wbu](i+bk)%+&p%#4">9"<5!?& ?M#bbb͓z$ p&"/%T%#L1L15 w`?Co yrigTt (c)02`090M{0cy0oKss0f2r1r 1a0is0n.0 Al0 y8s2e0ey0v0d0"E1E(]I4M-%#X3 X7&l*>(UhE j1#$Ac!J`%#@5e$_F??!foA3nBM( V?FJ-R?@HQ??FS!R?P| llHL6VLD5E+v;*AůʙH: 8snP%$? k?Q? \?4_!__WrA5_! R,U3OC@IL?F4~Kz?P)Oy޾ T!]]>#IAU@܋?F2Oa>?@#r`5B?F*ݷ?P6 t`  >B?\oEt# ŖoZu 0Nu;Y:|8>$# JpU>##<?2& ?5bbbz@b#ANi&bu$'h 2N贁N[2$@ LX"D&v(&m$ :;T'm"*B#d1d15 5`?Co yrigTt (c)020090M0c0os0f21r 1a0i0n.0 Al0 8s2e0e0v0d0"]1"a4M-/p3 p72&l>0>Uhh= 1#!eJ`#&FNTM\jh1B,RBM(!JoUy{O_ e@ So4(D@^Tw?F]֝ä ?Nk"?w_KT\T+\_(Kjӷjmplhj -`\TPQE\Uui,St_o u~!ՠXEO9= T?FL?;uQ_Pm"rA @3sjQ1lğ=GoYniAleE9 MX|'w'0Ur^N!M$|A vzHD  # =hj4>T!]]>#IAU@܋?Fj?@2r`5B?F^ݷ?P6 t`  >B?\oEt# 6ΠZu 0Nu;Y~:#&}>0m8>  PJU2N贁Nwk?@ eL&A( 5bM(b[)h+h+$h&#[-M$#?& ?b$Z*'*B#O1O15 5`?CopyrigTt (c)02U0090M~0c|0osv0f2u1rx01a0iv0n}.0 Al0U |8s2e0e|05v0d0"H1P"L4M-/[3 [7&l>0>Uhh= m1#!S!J`#*&FPUy{_?NTMjC1gc RVMʣ(D#@ ![?F"$. ?Nk"W?IKTLTcKjRlhj p=' (*!)i,SO2Kes9T;QEGUFw6qX}__TM7fTZT\Dž}_P^52XOES"rA 0S#Es3iAleE9 MX|$Gxw'0UrIN!4gA xvzHD  # =hj4>T!]]>#!AU@܋?Fc_v?@#r`5B?F*ݷ?P6 t`  >B?\oEt# ap'EZu 0Nu;Y:|$># BJpU>#͙;? & ?5bbbz@}bAA& K&@ 2zGzt?"K@ L0"&N(bM)+TK/8Z*B<1<15 5`?CoyrigTt (c)s02009s0Mk0ci0osc0fq2b1r1aq0ic0n.s0 Al0 i8s2e0ei0v0d0b"5194M-t/H3 H7T &l>(>UhE=3Z1#E!J!FENTMjN@1h(NoUy{OO e@ o)4(D@^Tw?F]֝ä ?Nk"?'_KD3\T+\(Kj?ӷjmlhj P\ DiAlePsI=MXsG'0UzbZ6N!%$0 sFEjHD  # =hj8>T! ]]>#!AU@܋?F*?@#r`5BW?Fw?P6 t`  >B? \Et#տ|u l4RQu?]>I$>I# JpU>͙#<?& ?9bbbz@b#AE&bQ$$D 2zG[zt#@ L4"&R(^&I$Q)+]'I"^*Bh{.M#To1o15 9`?CoyrigTtk(c)k2009kM0c0os0f21r1a0i0n.k Al0 8s2e0e0v@d0f"h1l4M-:?{3 %{7&l>(>UhE= `51#I!J49fAn^ R)X f"B!Jui,SG?RSlOS u~!N]8-19= T?FL;uQ ?Nk"?YVTQRXQOnğ/=i n (\ wTiAaePE9 MXLg'0UbZiN!)$0 Lf`jUGD # hz4TYYT#BU@5?F;?@ t2?F`0 ֺ?P} u` ?u#t+/8<&L9`d<t9( .88LU``ttyt m "t"hf;!&&'n"!'%0'm= 'p ~i##0U"K?@&S#a!!*_"`?CopyrigPt (c])0020<0900uM(0c&0os 0If.21r"0[1a.0i 0n.00 WAlv0 &8sz2UeN0e&0v0dn0w"!$J#B-#3 7#^=$6#D66 2$e,^6C% #HA%DK%D8K5D`KEDdGg&}c__'TAA""}(Pa%288BKS`DS\U Siha5 &rX7g'$i6!40 f-jUHPD  # #>h,JTEI MU@jZ?F@s??FX7`/?HmNuQ` o?u# j>tMtQ 1@t PATOQ E O#l|O6,2c J^vNt, "t "QwNbo ?!FVxNo]2УɆX3BUYU>VZ\)_ W거QUTAQ LG4CaCa_* Q_*____ o2f 8CT]?FoYDȡ?@ ~1pc} 9dS0]Yb<=p\/_ RT)4HtvzI!| ?^_AV,uݹ:4Z\o &Ja ʆXBU 9A1p@߄ߝZ\X9 (p\P΀dˆXEBUp2t Z\5q[,=6B҆XaU̓쬏 qXa>YU . [[ ;;p\Ua>YF`rZYoQUaai%?FzP4Z\^ W-p\] xTy8?F.?@@SA?FWk@ ?Nk"_|5`bmMp\PB|y[7 ơ|`?])?p*FWAX|a筃,H7~Z\?bxT?:!G4QڡUeB_T_f_x_DOOkoooZ}u?FU2Ty9=ŏsҟGO"vP_?@mlփ?Ff>> }= R3|Ofѡ|&['aޯ)d3#ux"@4SK 6ASʎy'e>'"Y`r,iMm׹ ߵl%1X~G׷_'0UJ26L50Ov ׶HD: # =h4>TB#]]9 TqAU@1D)?FL"ߛ$??Fٵ #ָ?P6 u` ?u4 t  ]<)t7񿗯)2)LEkL>  JtU2qD ?=@LC&A@-bu(b)+&񆤍[=#L"#a?& ?AH{/)=#@1@15 {`?CopyrigTt (c)w02009w0Mo0cm0osg0fu2f1ri01a; ig0n.w0 Al0 m8s2e0em0v0d0"91=4MB=#e-OL3 L7&l>  U#,|A|A9 $] t^1*=# #p Kw&&J`=#R&F 2'_b0CwvA kRqVM(~"!;ɡ?@<3?F ?P-DT! ;d1?Y=:ZTO㿂f{OaB J4 13? v,? (N_?r5w!:8oJo\g"a@jS(.8R!ZFI nx4__ D_U5U,VS?@g$ ?F~4,ێ~@ 8_H_ZTml`KDfl`'E4q2d8R鿃oo`rmkod&'aE2%_7USr(A@z#y3ae&b{, fZT5sae~t?FG=Va|7>-\氹pAiʈ$%?F(XoZ1a|}%\Iii ^WB^:fvEZT˒AiU?FAx߾Ya|k#ӝY~/Yt(Ae o0AleP59 MX=GL"'0UŢbN!40 O5 |HD: # =h4>TB#]]9 T]AU@R7)?Fy߈C??F"ָ?P6 u` ?u4 t  *5y")t7n)2)LI"+BA B JU2q0 ?=@L/&A@ybm#zc a#-bK{(m {(+I&)#Kڬ !?& ?A"&g$bbu/b /%)#Bo1o15 {`?CopyrigTt (c])020090uM0c0os0If21r01a0i0n.0 WAl0 8s2Ue0e0v@d0"h1l4M!E)#𤲝-DO{3 {7&l> U#,AA9 ] 18! #p %J`)#@~t?F # 0hU-~-/HA RVM(ET"rYD?@J-?F-' 9d 0_Y b%r\_oӫ([MFoFl 77]8>!Zݹ:4\\o?gƭkdQEF5W4?@A=?F[Q`??P-DT! :U^Zs[ #l@*p9lJjV\\l_ I&vpkhAzi!J?F߸TF\\Ͼ\zT khzinA9f@.mY <\\ԕ$ ,S{khAzi׍8r?Fį^?m ? ~_5 ә(Ae 0AleE9 MXlGR'0UuőN!40 ~5 ,@HD: # =h4>TB#]]9 TU@~t?FYaJE??F? ~?P6 u` 7?u t  }A_)t7Ǫ+)2)L^A]>REJ2q?R=@LA@yb #z.-b( (%2+2&L q!?& &?A"#&$bb/b 4/U%  1 15 {`?CopyrigTt (wc)B020N09B0M:0c80o%s20f@211r40m1ua@0i20n.B0_ Al0 88Us2e`0e80v0 d0X"1 4Mi!E-?3 R7&l> 0>U#3 GA9 B)1 #p| U%J`F(jdO C]/S֑K VM(HC-O"K[%q3U5CU?uBТ:^Y=~q\G3UE2OE#rA 0"c37QA?Y\[)xIB_mT^(`d :0A@le59 MXGB'0Ur-N!$KA5 fjHD # =hj0>T h]]9 U@/?Fxpz?@]JQ?Fk],>Uh@ !f$; hp _Q%Jd(jcZ(sEp!$a VZHD # =hj0>T h]]9 U@~t?Fxpz?@?Fx?vÂE?P6 t`  }A_?t#%_ ,u auo>J2zGzwt?@ʐLA@bu+b)%#+#&![Ha$?x& ?A//)񩣑!!5 `?CopyrigTt (c) 0]2`09 0M0]c0os f2R!r 51a0i n. 0 AUlP0 8sT2e(0e0vf0dH0@I"!E]$M-# 'x&l>],>Uh ![$ hp| F%JdjcO(hEq<cBɀODI$xD\AEOOOOD~hEuBТ91Y=~D\GiAbE9 MX|7W'0Ub>!$a VZ_H$;s nXk_$:@@_F#XߖVOB P~dpj@+Kn[sG8fط;7h ™F֛Hƙ-(ș6˙Ιp(sc 7XuNw_0*9yZ!<9|&]~),!UFD  h> T6D BU?V}U???Fx<F B_P(?3P?| (^-҅  0B`ZWirefam,\UIusrintfcpoymckpAplctoԸWbeUdac"sJ'#ttaeB^ L% --HR D\ 9  9σ 85  3?`?3i o3?{33 3{?33?% } O3 3| n3?l oa?b BP(xh -RfHz C=4uU2U?YYYFuu@bt !"}&#"b) !# w#""n%?#>~?[ڻ! G% LineColt0mrz۱ "ٓ 3z bT'ܱ "#13?6&F$%` I=7Bs?CH32&n#Z A?AGG"`?s1pyz0iglt (wc)@20@9@Ml0cz0o%st0fBs1r@y1ua@it0n.@ Av0l@GsRe@ez0vp0dt@=@1`Vl0s_<0RXY@clm!W#5@4QP8._vHcs 1D:XF؂;1G#2wB xq93U@ek;o+_=oPa7oPUFD  h$^T YYBBUF\.??x<F BP(?i3^P? .vV B2% 2 2ȅH?p G?`1 Bt  0XGB`User,n tWwo k p"ui h"al d v# c >!^ K%OU# r?/%?IHPNL@DD@@D@tDDtDDGpDtpG ttttG_GOOGGW D@DDGD@%DD@@zD@DGGDDM_iDpicZR?Drag onut hepe.Rih-c]lckaUdcos UPo etQeAvUwreP 5aab\.ҿL&d2??s׿4?@?p2эr3rCUG DF P# h @T(PYY#EU@\.?@L&d2?]P} .B"H:J "bbrY:f 7 Amu` _?U"66UJJ^^UrrUSu#*",'񩤅MU 02#?T"'#^Ax)"L[$ $3' >>ÿ@q05ӥ?46Zr&23G2uM0`-N12uu&@ f3AB@3t$9u`u `m"[ b!wu`)#A!'1`Vis_PRXY%cPm!#52840!2`?Copyr@gPt (P) 2P09 M@c:)Po@of1R"Qr%P^Qa1Pi#Pn% AlyP )Xs}Re@e)PvPd%BH!#$U~%d$%! =#d&P& "$B##+0Ub#]  gmAdaoc_Td"5 tR6$t 7#tEC0t|hh9PKs(&q`#L ]G2`NgPmPTZqU!hj|]sL#PcgXx'e|Bu@lPioPgyEf|R#Pobv|3qdv]@Vq DPpgPr1PBqn1PxHrfcQA!эSPUa%Pe[PlgPs@ c` "QnoPePjQv@t'PxUrT sAp ;T'PA r`UTTq1PaE m\#ff%<z-q#o&5m1`P]](cEAE5:Z%C %B&MRAiR p!]`NEWO@K SH-0Pr BORIrS1QN1^d^dpЭ&&"(AT܍?<$aylA2;6Mq`CRPQllS exR2" 0R<@5b :::4u"5z":br3@@BK ڿUmRc11"@u&2Mu3+ԼB,@"Q(!`a#b!qo+`9R7[d[#uҳB&53@R"3阼E q`_$&F3s͵NSYFO0UAC,@d_"PUi(X''d&E! EQ7 jXq!Ia2ܬوW.Hƒ%Q &5f ~5! E8! U. UHD: # h0>Th]]9 MP MU@˙?@[ bg?@ƒ?@?]P6 ]&A]M  $ 8A] LGD`G t  DG G GG$oJuM` ?.BVj~uJt  &7&t!"Mg7%'tU% 762>[JU 'N((?6 ?A!2b(A@2b4Jte32zGz?)77s@M[3J68b96<2:Be3hAhA5 2`?CopyrigTt (c)@]2`09@M@]c@os@fBRAr@Aa0i@n.@ AUl@ HsBe@e@v@d@@S2aAYeD\3-e3tC tG 6\5@bU9 kVX=W'0URRł6!4aR VZlJ4Uh8&a&a\qA@Q1Y8`e3@omi?@o˟+00f'##Rܗ'/Rb\2JAL)?@}Ɯ?@FGy?@?Ҿi ǒ$Y1iM/JCl;~e*e<8e#K4 @ J? 7ͫ?"p@c <"!r55qE*uzf7?@|ݨ?@ړ]?@,_oTl%rl |}}Ya|tqw(Kx#0.:Ud:Q5>[}W*??@#Y8'&WSyirlE_Ţ|g;||8Q  cGkfɑudpJ?@P"|*u]ad@k!iNŒ|\(|ݙÚw }:xɒ|)3@*ȡyQ5l~eoc@q(Y?@6qﴐ2rA 3zƟZJJ=ay*/AHD H# h4>T]]9 MT !AU@ `h?@ E?@( y?@b|?P6 $]A n JuM{` ?e? $Su.Jt+  SnpteGVBqzE0E>qچۤ7>[JU 'N(( ? & ?Ab(A@0"Sb8$J2zGz?CUUU@MJe&9#ވ"bU#z.;&b8)E&?,J0"E*115 ^"`?CopyrigTt (c)<020H09<0M40c20os,0f:2+1r.0g1a:0i,0n.<0 Al0 28s2eZ0e20v0dz0!E45 e-?3 7 &6iieE9 6XG{'0o bo &%$0 PFdJlz&>UhE=^!Z10! _V, $*$d* Td$VcR( bHD: # h4>T]]9 M 6AU@ù?@C?@V_#?@ zrC?@+>_$6>M JuM` ??u` noD<Ju ,u ,bsu *| -Jz@"JL>Cјղ?@~T]]9 M 0AU@H}^Wm?@,71?@[>(?@Cn9`P B0wR?>$ >M lJuM{` ?E?Ju`bJu ,u ~,bmWu v {-J)yt@e"J>>?@zc?@c$Û?@s?-'7/I%u'A-(?/I Jt+  >Nkt7"te"^_'n3%':4%'~7JJU2zGz?CUUU@MJ76yAZ2bUu3 zii32Wb9;6B13115 02`?CopyrigTt (c)@W20@9@M@]c@os0f BR1r@9Aa @i0n.@ AUlT@ HsXBe,@e@vj@dL@14[13 'N((3FP% 4(d?U3 7F@T%jeE9 LVXW''0A0A0F-!D0 lVZ9lUhzE ,5i 1a^*0^dJid:SYwh>F R e?ijm$*D@@y46$d tNe%7lb+d hc٢4HD: # h0>Th]]9 MP !AU@܄?@?@oEݒ?@fc?P6 $6 >M JuM` ? ;)u*Jt'  sZlta{Jxlmv _=m)>d3>JU2zGz?CUUU@MJA@"bU-#z#!bs!#<"bD)Q+Q&J[ 'N((?& &?A<$(S/t%UB115 `?CopyrigTt (c)802`0980M00c..0os(0f62'1r*0c1a60i(0n.80 Al~0 .8s2eV0ej.0v0dv0!Y$-X 3  7&00b59 FX7='0Ŧ&$a $F8JlUh E ITh]]9 MP MU@\gr?@KeF{2?@0_'LھP6 @M >M   $ 83]L3` t  3 3  3[3tJuM` ?""."B"V"j"~""""u)Jt!  N7F$6t132_y:G#7.7w%5%H=!>}tJU2q0?=@M3J6Aw@]2b8Wb9;6JJ[326C?IF ?PA?IB3AA5 C2`?CopyrigTt (c)@2`09@M@c@os@fBAr@Qa0i@n.@ Al!P Hs%Re@e@v7PdP2AYD3-3C GIF5@AbSU9 VXyW2'0UbIF!dDa VZlTJoAT>Uh<oAoA]`t AVb8`3@X?@ bgk@#O 3G$ ):r2JXq@|KC8 uV_Itu4$9K]bkt)Y?@>W?@t |jdEJC!|v|#t0\|`jՉw?0GuN.#?ɿۿjjt^qt'f7?@?@cVB?@-~̍{U |S r!|ÿAD:|S|v|8O/w#L,3?j~;̈މ Q5?@]}W*??@ׅK0M='T!x\.!|[2jq|jm|Äމ}ƒpJ?@Ggo: xυAf%r1$yA5pt| D(ǓI |!TJψ' 55)n?@dѓ`iDF_ὐ#ǮD"ϑ<)tyϏ6qy-8n|r!t@Ikݟ -?jta~TUdQ ;f |I ([%6qZueos@xk.?@U.foB]ra 3z׳]yqʰ-;bq2"HD H# ih ,>T  9 M#EMUFuj?@&d2?@ƒUһP6  > M    4/ H/ \/ p/ / // / mJuM` /?*>Rfzu#J"bo1FI AcﰿuJt  ?t,2t!72B1>JU2zGz?@Mg3Jw6A@-2b8b9;62 !4JFq3Bh32.F 2A2b9ZK72i<2?*q3AA5 p2`?CopyrigTt (c)@2U0P9@M@c@os@fBAr@$Qa@i@n}.@ Al?PU HsCRePe@vUPd7P_2bBMq3C G1.Fh2!"t%2eUibE9 h5AXzWW'0Ub1b.FE0I VZlJ1T>Uh<11@ H\p A l5#}Qbe81@;X?@ bgP@3 ggq3 6MgW7]4JAq@dgLjv2xT?@&e ?>Ȓ$YOA"4[Dzx=Buگ+Bu#տ(: (-DT!o Ut  %"4nARB"&r55Hu™0PL?@dl2,|?U6XB|$e6XxEt\s綡?@kZO?@-9b?@B(® Ǣt/y1MB|sf| +6l|?MQ ' K +Xx}uƻ?@3W?@tM/JCB|w|RDDk|& mzf7?@ݨ?@cVB?@w̍{?PFk%rB|:|F,r8|?P)̿޿:Q5?@P[}W*??@ׅK0mM='a3urB|(q|̔mn|{iϡϕtZpJ?@pgo: x+y|xutyw1B HRs?~dso@+ho-BGD@P_GZhNQJUX#8\ѩc+UFD  h> T6D BU????Fx<F B/P(?3P?|C (^-  0B`WWire]fam,\wUIus}rintUfcpoymckpApUlctoWbed*ac"sbe^ F%B --H{ mj? ~?|?<8M;G3?n333?3? k?I3 {33 383?33^|??|nr?3);3bxBP(״??g*+0KҶ?*?.U5 ÓI&5?<$UH TD &# J>hZRMR"Uu!]SAGLD VMA,lWg7GRql Ut(%EaUQsgH ~(ày8?F~p"Z ao4O eA_DHtO3?FIX]?@fs?Fv+ K7AHPyߖ䰜OC.ő"#*tu尕~D72SőMC!RB _&x_DXP'u[*jB!e#dM׉Yb藛A4+=ɕteٕqC ?FD@?FI][h aӰDW-k}̒cuٕ86 ?F ڜ?@9HBV?FtQM_3LA!uWir"޿xٕ- [?@dpՉ?FJqy7ï!lz(AhiW `_Mϳϱٕg{\?F  ߩ ﰜap` ?%%N`ߢrEٕ{@ٛ?#F1ߨ!^D'DqW4Q8 $0BXj]1G nCgל۴ƟؖýIgcpg?Fm1K V 5ҹ_ :WuC'Kx2Uٕy`*?F!X p?FztRvȖ `ҨTui As8ЧW`tO -uÃ뭤 .) ʉte(Hs 5~si(ovհ?FQ~ksm^9`Dj~UWc&.7Iϐ?@˜%V?FL/DAbWwppX0 YXcu@rgdve'?KssG?F]K?@Ke?ӿSɨ*Ԑ7AN @Y釼if!Џ]ZAJlFbҗ ~=A>7b=Ag[k}oŘJ1*LUsqgQq Tr}Ty)TIr-x~Tq k"`p`IcGonk$,@qqU7_yu5)\(J܂dmwB ,snU@e8ko+_؄oTaX -o:UFD  h> T6D jBU???Fx<F BP(?R3P| (^-J G 0B``Wirefam,\UIusrintfcpoymckpAplctRofrsdaog$sb$ ma e^ X%% -Hu |8' ώw@Hww_ >?nDrawg nd]dopi]t sa]u brBa*iem;.bڿoxTB$]]9 #zAU@??Fx<?P6 mu` o?u#Qrr<;>A@]@aa. A  > MJ\}<?& ?A|baUa` FilR CoR orz0>UhhECGQ\#/J`NJF9O Or-B  ,HrMY8m@Kqu3~pEGu E{pTxu2v;'2rRA ]P'2cg8t heDCGU88rTyM;;\2CC3-d BJXcTQQ S\Q&YcSPau`d\5!u@r?̞B#uL@[@x\23*gE̒Ҙ: EQuD, @jqV3e E9 MdxOz6z6]UaO HbTD`xP ȯگrXMW'd%r[uC-! qn n)EH9 ٥q4_EunmFzO#<|>B 'z]Qas]@=+C]x }PUFD  h> T6D BU?X}U???Fx<F B_P(?3P?| (t  0B`NWirefam,\UIusrintf*cpoymckpApl*ctocsfrwrdWe^ *% -H ]?   ?.-) ?i33?33?݌e3e3?e33?e3 3ytojb BP()A??/KҶ\H??5ZU=5? K=?HDB &D# ?h <>T$P]]9 UÍU@XU?@Ԫ?F BP(?@)Aº?P6 lAT >u` ?W."G&,u6J2U?YYYFuu4A@bb !xAwwwL?C. ?[G%` LineCol rz-"#zbT[S` F l .` Z/2%߽!ݽ%%![k3h#""+"Ph\4>*l1k1s7;`?!py igTt (c)#@W20/@9#@M ]c os f!BR!r@!a!@i wn.#@ A"U HsmBeA@e v da@i01`V s_\ RXYa@cTm!#53@1O67h1B\q@?HOFMA87AT_>p  ?@CE@>@uL-,v?2BSvQ &RuQSep7z!\]Uhz Aa\12SJrQKQsVO@,S7aYhe"(nYo O+xP5eL2S^PO4\tE?oy5~9}ew.ue?&VT2s+xE~W.u?U~o.uob'ghyaeN7bTaޔMP%"JT11 Ch1CI'@^AD%%!ڐ7K,@q8ab59 < DrW9k3ac>ˏixX;g5'bD~;7di!0% 5jd&8{;gV!36 1>i5_sϊVH_9\s ":,$*_AF(w8#ȈwB sm}@etoo}+qoMa  oUFD)  h> T6D jBU??Fx<F BP(?3P| (^-  0B`hWirwefam,\UIusrinUtfcpIoymWckpAWplctofrsdaogB$sbR %hl Hbe^( h% -Hu p'9K]wtrDag] nddWopitW sau brthe cp4Lp.bxT$]]9 #UmU@?Fx<P6 u` 7?u#eAE+4:DMJ\A}<? ?A|b@Ua` FilCoorz9A,b #zvr6b< /"/4!b(' &񅴍A2U"8A$( $*BA-%3s"zp%#D?`Bh4M:G? !C4#1BM؂XAAG;v9"`?pyigTt (c)@2U0@9@MS cosfBrԁ@a@in.@ V! HsBe@ev@dt@@1`VS s_RXY@cTm!#5@34L!"\$ǍqHP?G_YVM?\.? !_;!Rl>0>Uhh5CvA\#/Je`F#f$IaX cbifM( a-oRne3noowe5e$fo?oeE2o;"rA @"s.SgXhe979T$M--"JCTXAXA 4wCASS@Au@-T=%4K,@Y_q.F`n3e59 < @T66Ea\RjiЏX7{'YTK!Qn {xH `9g_DʪFh.W#w6B U']Ua`t@+(1a C PUFD  h ^TYYVUF88??x<F BP(?i3P?~v`:DD\fvv霍UF F~# @Ui$!U$ rR"#)78H"HMHL[0 `workfl-0w`/cmd=3Iy 1Ik1p4;1`Vis_PRXY.A0hC0!_#59042]"`?C-0py/0igPt (A0) 2009 M0c/0o"0o3011r0.1Ua0i-0n0 A50l 7sBe*0e/0v/@d00e7% 3$1S29w3 w7? 7>AhBBBBG&? :7dt5]\nF7FFRFF BbL0A 11R` SPaE081161rzV_k$3 F0A*g2 f"3 DoeBE2N贁Nk?OI}B!^`b` L0n/@tpoi^KE 9E]zoW'2r?qF!D$1z5 feT/@x0oq#a7 i\5QfffE\/uR *5K#[$$L%^vF#2a#M\5鏈ndPa'!T>A>A@Ȑo9Fs? 8#3+ %0&2p3B A0A0]`N`TWO0K SH@P` 2OhRVI`SK1QJP5&X4^CPUeA0k u1D 9m2s D 0`A`ASȐY$!-."+;\2H##KB)Ub)RofR||b "H%vœk1Eբ0.c.AP-0at;X.;; ObSпF-0u@d`H@D%RTC,@²\5fZ]@W3&{01A0U60dI0a00A01-030d0bJ4/@-30e0ubf@480}+KE?!0Pʮ?1(N)U??OfUf]@0+|TƔߔܡNeaUk;_m H*6N秊HD: # ;ih#$>T #3 A UF\.?FL&ɯd2?tP Z>u` ?u4# t  9t-GDB8\J5 `?CopyrigTtJ(c)J209JMcosfr!ain.J Al8 s<"e evN dF0  `0^%l>!Uhh # t /J` ,"  R6MB:?:4b30l53@}:?N6?p1x??=21l5%2?5rA _ BC`#?FJů$?FX~wZ?28Pzu` ?7uu? .1J1i 1h j*}cisځ}$8tTF ]Yj?t84 28?u` ?0u8 1r *;Zbi c28Ɠ?2P@u` w?7u?K= JS9ukuBm  1\2q??\.? ?A7}`?Copyright (c))2059)M!coKsf'rTa'in.) Alo sseGevdg} `0HGgGlGr0d'G_2lM EMFD^i-, +@^`^XLg0@u??@ @K<Lj@w >5Aq=A? UAcA*AA5A(ͪ}@?3p?楟A +VA:#NXJO= Gsss3@U*$B:N@dz*Kɑ*߼- >8g 0wT5> F@> >!b:%U;C(UP        J"<CC(^)C$)^TNPPQwC$J ^!k$r-6(+8g?r(??05w1 0? ?25rGM?w4<); :LUKAJBHCW EV V +U !W?S 3YSRFZgQYYlZ?~^__? -"C$^xg+@`z4W4S4e?g|0*: eD=g1_?4B5"G0|.I%C(e;j%! ׫pVpq @_Vk+p+pn qeezh9fav1V1F%neE $1w?qxA@ˡA'1AqdrGȏAqqs9Tr^'rqP?@F'rquL7WA~@1.A-A6=a=A.JJwAx9f2f"j!rr@=Lzoo '(L.Ci^)V% s r-ATn-pwnC AM} MpwT[ f0 ؍pر >g-pg;X >ɐlR%>(q[,J\i^;d|pw @;I`1 Aj]1Aw)\uMLIKAXIAsh3AffF!A"@Xeg~:erzyȏڍPΠZMbs,1ZEcm̿9\;A6;ҟ;1,UxlzځV A7@}?!A|@)AO@I:=b>~==p.AףA.AZdAX90AA1®G3AA4AB`A4p5A+AtOAlKAvLA%3A!JA($>M-AFU(}?A9&%!EA+FA3ANbF5eFAu6Ҙn8ҤpS-79AS1AA&!@BA@v:y@V0>q@h'rszүP,vV1r-*w WWCr-W1ྍY due]fmf{}w^wEin᧋tr-@}|}}} 0.mS-pe]^ w[q̿޿l{]rj  ]A"@ ߄@8m?8 Au7m@pSe@URM߫A2shAA4uJ )8rTԧʡz!+=f!z! >.>5^>=>C<ߦphca␿?>/>f:/L/^/!Ep`  `Ep *p p p ///340fe!rei!!d  wN%V1N%Oz9 ׯ 0>030[0]..'5?--: 5.4W~?,334:52h?++ 9 4 2 ?77 28,%2=6/ "72 ,%&0 1!/02 345'' /0#$ '}()* +,-.0 !"#$%&'  >@ 4QJ>Z ^<0Yk>GY@!AVA@+D- DA:B`AQBAAj=%?AAV@AA9BA~r V@b@@A"Q= 7)RrZAQ6QA6QA.QX9>BRQQRQQJQU8;^]^ߧ@؞P߱7K܆47 U;`;`7`MWA=` `A`pQo0=Daۛ`M۫=``dgڝ&ံBTfxXLA-@l/Ao@H1_z@RV}AUFp%@,!-r@q+@Ck@A`@@n@-@M@-@~uv]FXjqqvJp~aʡξ}?kjk0OʡbX9_=j|K h<> rM@ :"x[&4@J$> > Z:mS@ S@$@zT@Nb@wWaͅ[@S_@ @Ͽc@+p@T@'19p@Ϫ@Qխ@/ж@&1Az@1JA@\VA/݄@W@@`X.Y-PHQ~y@}?Ypu@ˡG@,!r:Mƛ?Zt@_@~q\?j(a/?%%POAp@y@A_s@Z*Aɰ@@)\@Ƚ@Zd5P@@@@Xqp@"UhrbVAףp7@ÃÖqNb`@_-P@]@r\$H¹^uxN܂Ty]____Q5G~a5f5g 7I8I9IBeMXIlkn\EB55!O`@M?M>}&--d->Q5HЪjqꩉw*a =`!W,eYO`E``aUp;IeRensd~a4*)`=en98f9oko(:L|R2 L2 bu> oHwtDAZA2ff8AZAEE\E1j=Z6ZOwA׉A|ppAw|yZyX9ARaAMq=#ۑAtǩ ʑAwAPw"d;ٰffAWp9P©ûprHˡ̴ >>>T`ݬףj~ ~~wyw נ ܾ"@]P9Tw96J&9 "ˡ@B"uN";jZ"dI6!w\5"=ZI"&!J!!B!! &vW|!/*[Z@T{*Z.FF=l?>94??NX00n0 9҉~~aKp 2 M р )@*006d~o,g JFJ%F/F6 +f":AjE>OPObE @C6N5O EB0vI1OH`ICMO]LIV_0_GVINf_IQRvI _`IWvI1_I0B0en҅?Gm0nc рT0a0 d рa }6f0'.xsպaV e0 '@rqjf.n)oo0sA&q"qrq, >Pdvwuqq'$qx.*4FXl~1u/,ʏ܏3u62&C2ADVh|CbQu¡?<ȯگ@`S*FBòL^עl`+4@@Nf!)12B)[G0Ֆ:g߬_cӮ"e2[)Px!J0dE?7dH'33==5No`g!$Ԗ_(.3Х7757Ѕ_odC785?`]JDH}QH`r]JHnDj?O8R`j+]J8O7ZcZмTZVR)G=BM?/Vx?dC]JQdU-5n'`Ko]j2<ê5 'm!B޹ Ufz GJI6DCK%^@,G $%ߓAX̡A'1A| j͆πϪϼȽ`W`zEjЃ!2DVhd`da1){@')1%/B/Ȟ1~A(zEՈD45 $h衒@T} YGoB_(HAs jkQ+w9CSrF>#7\(hB t@eko+ocBG8roPuONWcVUFDfP h>$/T 6D UUF~@xT$N diIh>) e0T9 M B FAU@bX?\.uP6 Aܿu` ?_A@7=5GQu[(fUau`u`bu`u B($>U@f@X'P%7 Az'6-='l(280?@vKk?45?(@X92V* #LA-hbrO@N3mk2u.h(<KF922D8hapK92u\C9?&,<?/M%I#AAFD5 /`Vis_SFT.chm!#5028`?Copy{0igx@t (v@)@2B9@Mh@c{0oj@ofBAr@Aa@i@nt@ @Al@ HsBe*j@e{0vPdt@@u0x#IA MG?9#2B[ DMW`wBk@l@]w`/v@mP=3yh"Kt7";GbA7"J&l_>'8 \#I#2q L`MSg0SF5B\Ej?ofNA?\ZfoG=I#NF DNCMVMV(IR@$iA2E9 mxoTPw/'2rLbMV!FT Pv&ZWg  aT>@"$K"+X"(e2BA)rR'Q'QcR䌄 완M覄$1`Vis_PRXY. h !#59087`?Cpyi}gPt ( U) 2~ 0| Ui rsf"R!r a iUnn A l 's"eb ev" dn ]3' X#!"E# E'? 1*0>2w27 &~ 7dM@޹nFFF}Bq6q6 m2b A2` SPa !!!-rzۺFOKh F` !G2F"X# _EE2NNk?I}YC.R` L` n >_ i5^C5 Muz=G'2rqq6!j4H% VUT x _q#a7 Y5AvV@vVvV\5" !"aR oh  d*%dEN @wu w5,{Vzq !"8]A d lT`"!i e 1m`"s$s da('T 1 1 =)Y6A/#X# %f &"!! YA@}N_PTWOh ]K SH P_PU f"OR`I_PUS!Q%g Vk͏ イ$-*"2A+C2##)84BEcBR,BT@(Hb*%n,}5vI9!Z5[ .Cr!PQt;u*;OSد*Fu d(4bC5f+0WX#&U{ 1 6 d ua0 14u4 4 -bl4 - kez bꌰf 4 2}3n}4En<}cEn?~ H+0 [ 5e aw[ OHĠNŖHD: # ;ih#$>T #3 A UF\.?FL&ɯd2?tP Z>u` ?u4# t  9t-GDB8\J5 `?CopyrigTtJ(c)J209JMcosfr!ain.J Al8 s<"e evN dF0  `0^%l>!Uhh # t /J` 6 `,21"  )R6MB@ ?R6?21l53|>??Ll59 6 ?(?Ol5%2?rA _ BC`#?Fx&?Fx ?28Pzu` ?7uu? .1J1i 1h j*}cisځ}$8tjT t8i 28?u` ?0u8 1r *;Zbi c28Ɠ?2P@u` w?7u?K= JS9ukuBm  1\2q??\.? ?A7}`?Copyright (c))2059)M!coKsf'rTa'in.) Alo sseGevdg} `0dG|gGHmlG0 fG_7l EMFxr@, |+@`^pdg0@u??@ @ @$Q@@AAA!bH=!'G%JVV0g;u{}6{DIjJA.^g/@wv^@@?5AA%q=A"SA!%-"z߈A$9""@:!I&;M"OM""!F!"!>!"%"!'cI6(p'96IcEU(;sq{Y_q_;@1u0Su!95}XL'?5]0 5}/0~x2] 0xa0~a0W/}]0}M6<>j))" $^rj^H4zzX9@{@HAjElC@fAzEjA%3 *BfE@TRAz!*A>#=‘aE?PP0)J0FXM>UoV2EbE!Vl?zJRQZp9%)Eّ__aVjzQQTcpPa__ooD'oaV ChZaTa bpa`oroo֙oobfQ Chaa [rXqoo/IrfQ Chbq^q rqhzӡrfQyqq c`C%7fQ Chjf pϩՂfQChkh~-?YfQChrnxz˱ßݒ fQvFesp8 u5GDufQChzvpqȹ˯fQvFe{x(l=OufQ!Fe~hӿ"`€ 0c_ Vϴ_zϢHIMj1zi+3H@0{4 2Mth7@!щAAHҦґ@(@R%**"`ц/OOAOO>a7=Ϩ`,QYM!J8/_^4G AA^A|?@?)>_///Eϭaޑqz??4p!j % UwGAa i2 ޑց 2DV R{ha@ɆZޑ!! 1CUSx]PLa!!!! /0/B/T/fn!r//R>!!11 / ??0?f)r`?r???RZ!!AA /? O8O0OR~ q/kO@/O1ja>RQ/O __0_fJQN?`Of_O?_iba?___ oBYNOrqOooi)'ѐց@5Aw-A*VARAAwA?S=pAU7Rv/R>I'LACn;n%?+g*y/gpb6HZlgy~!;wUPRk5]L?;?-;-7-EAͪ=55_z4DiR_ўX)Lжՠހހᴑ1A=AףA-A@)J )33AA>>E^A#}QzAz)݁]S]9AQAwy{yߤAǍ`AshA'1uQxyAAjAVwA࣪w̷ ѠݿIݶ"""ޡ"֡"Dlբrݢ\bbblʣEˡ~桢ޡ֡]ijT=)`{y^AAQwA .;jP=_\>? ~Ҁ?g0g >vtהX#"!w ayUy=@Uِ6ה_p"">U Բ"OՂ.@R@NjDZx"*DYxaiJAZ~x"}F} ɁJv)  R  d * m jC~feyzzYI"q"ِ މ}!fq `"4*L^#zz` "!*}z%z!!`"!/*/DpGpJtOU[`Rp`p`tHapa)bGj#A]pGu}{vun[Aui?pdjեbd5dEeJ eQˏ5QERIRMRmR*}M}H;;U6111s51E2 2\A~pߕpN/[fEEAȝp@S5O5K5G@BCNC~ ~qd"A[lFihAh@qYe.vaadaDud޳hq{qi@,p q=AAZmA`mo$x2DVyW{'~ўIIЯjmHAqahva0$h\Fa.A$@p y>–C@&9?}#?"!?5@S@>oTZd?@sh A:w-Aփ.E9A~BAa҅abыl Aå9}R~~ќ6tӦ8nѦf+B]Ҟ,kJԃ3Њe*`QBb@d#Na*S== ~NaNNpY=rSNЁ ancW>_T>!bPbu " QY0 `(\ P±`rqRtpe`,A58z *c e` A )   ~ Y+|x }BV± = ArCAJaa.va~1~TNPPJae(͹VVP daGa|.T?Zcu2^h?f劷 z ! !@  @  wwx ff!@UV!D2@423@"2Fjo|oooe?ooo*u~ƿؿZBs0 aI@Q@PwBJ!ArO^VPVPNOVBVOABhAwO@%"mBOJjA-_WV@I AR*Q+_PP!E@V_`IQ_ QRwAQJ!_*@uRVq(:L⩀*2m) ;h1 &3_q[8.11q.1ՈD45 $h8|ߧT Pn+OB_(HL ⠪EBq"F>#]wB uuU@ekwo+77yofBGToPXzpON{M CVUFD  h> T6D BU?XU??@`?Fx<F BP(?3P?|; (^-  0B`]Wirefaum,\UIusriWntfc%poy]mckp]AplctoWbedac"s!ssH$be^ R% --Hw i] IA?m333?3?y?3?_3m'n~ީ}3?o?)}y}3B3 "4> sb BP(:K??VLҶ?Y\?̗K?vMcUUUuDlW&e97?<$UHPD  &# R>h -C5Bb=I !H>CN3BB5?N??[N;Am1@G%` LineCol@mrz1@"ٓ;@ Sz1@bT'1@>B>CQ3 _V>#5D`ERpXI =~WcSCNR30@aQW!B`?Apy@ig\t (c)>`20J`9>`M@c@os@f` AR@l>`5gsbe\`e@v@d|`P1N`V@s_RXY|`c\m!#5N`46J`Pt\4q`#?ov3Al%k  t7J`P sE@J@CuL(E-1A`"29s NQf"_@s=NRZ"ruqMS74k =BnA\W7BlJ4Up] e8&I]NQ*tD`!p%a9 QMs8qfqjmD?@ Hp`"s :vs y#>t4HF<{Uf#R+gtUXa:ǟ T㊘UFk#<o1w \~ʝ$Y}7@UF\ihct̖?@çly)?Fg,d7\UH ~?e˜Ndx˕f#T9"iJt޿5_Ks 3g!2 TEZuFا򺬜Ei ơGEO~9*Υ?F2W-9#5ʯi ebKa5qi ɚK ύF} "ǻ<:GʝtǍL%Fз?@@T5A{+y&%hw񿊘O%+f#}V7t&L!f#>@1Lt@d7?@6δFX.1?@7Ii _-&1˜Jd݃p]؂wϳό쁞;倈46~i n NuT<-?@gİ?FB?@-Pͯ\hzF˜Nkpa-t![1webNQ UVͿh!Nqա93?FX0sg!𸎃ؙ%\܂i i qa?? u,񄣾 ^uCn2`onI yp84{enߜ˜Zm ?Ļ%yp~-=ʡr! A{aǂ:h!`] QxWizT?/ůc/V!ϚؠnrC%[k?@ѻ  ǨWX-q jω0wse˥)& +'?VVvFZT!ҡpKȹ\?p/pd[ Y=ɲSp?g:XQ]ᑄ8崏Ə؏,>@,w/;r~+(_xfZce40$[)Q_N`w$p_Xvd̪+?F[w?@D4\蔜*)? +%qsi %|w?NY1Oâ ;oq6Un?FWZbE/{\,Op9_pt~5iԅkoO<`o >FG߶}%w++Q\nt{\?@_,S6~j˜`?ԆX ~FaC{WEX!',JH = -?, FB{s)" %݌jTrOQ`_7Zu |r#9?fF$!ѿ+?{sQlg guyt;]Ft?>%_d+B̟`:Rf"D_RF)z>~gj+Oe__uyD@S T6D BU?VU??`Fx<F BP(K?3P?| P(^-:  0B`WWirefam,\U]Iusr_intfcpoytmckupAplctoWbedac"sfd$ae^ F% --H  t  % CFs?c?3?x?33?? ]3|3ML^pz3 p*33?bGzBP(״??X%KҶ?`?/_U5T5&mB5?<$UHlD( &#  >hmcz>ePbTePrRrS9a>oPf2@iTU K=gkcS"bA"SdP;qagUR`?'apy.`igxt c)rp20~p9rpM `c.`oKs(`fpr'ardp-aappi(`n..rp AlrpiwUsrepe.`v$`dp`1`V `s_PRXYpcxm/ #+5p46a^\+TqF+?*<"SA5 0TkEbP` ?@4@.SSuL\U-eQ2e Y02 P ]b12- u `oP1"GT0]0!lwGR U _A $ [PTQ1$$10!9aD!YqaXg3?F8jZM2K 0PTX8(\±éЯ⬩eA_~!0T̑e޹zG?F?@DM/`2F K7A`'3[2i&_ pXP1 R d ,u䰵U_.0˥}?F<{nCl?F#L&ѿ'pY7 (>PώM=%ԗQy@?nς3塛Ϧn fS?Qcո`*Ǘq.?F:c@Y>Ѓ?F˿01BS%̿5{"x<A0R1i3ʢ (:VJ= ףq r((3ʢ  s꤂{1 t(niGƯ3ʢղ% 26Yk_,)=С?F[ a= ު17_؜S㥛?FA /&l? Y%Age/w/4F%%=?5؄Ўi|?5^$?lY O!; M?TQE B%WO3=OãȎD†BMbh?F?5O…BĿ5P% rOO#W†D"΀_%RQȭL.U…@ԙS[{^g_㼟R]fl_ Ҡto%oPBgoidT "?Of@I +psooۇ}}f+x"6}%7XvT0A~ 3Ɓ:\}p= ãʢr4`yӅRoF~md3"ЧfC؀=lXoJC(R,u?Qؕjůџ!.RĿxvðRc( C(!51 *^"4͛TAޯ֑B#C7T@ߚ~_ȯ_ҔcVB)qD9w o2bl&R 8orU7aؖdPPeM0a`y؉{Ȣrx78?FVj)=\S\"rEM8IPӐ j/QhT`I7IUWEcFr#߸ aBـ߿E8_Eϥ`Ģ `X?F$#+/0_o!l̬`ѬI4ܹz @`?GM;X80_B_P/X5_D|=_{-V(__L__Ū/// ??0?B?Хr/H //>Ӭ6H.QUg˛H 7"X x4 g@>ah.?H ?li>OPNM=/i/f < ooo fbooooooooFr+oH JoW\/<,COHgrO\ݱxOp9ǗOO>H ׸]o8U@rB@V&kCP?y ٯ4I.,O܏_.oϤϥ(fbO8 SS3EWiΠm4?FP_Ӑ d[\TfůJH2 ~奔vx< _r*HI[zBBϡP( YV0BTgaSP\ D1 ]o!" n$tv!8J 0 tr @_Dds2 // ĶY/>}///&/5DV@_xWL?Fj M<br+IMʄ'sObXMtφDgas {0 A Ao n ?Fy|Db %.0OOP2i"2@]}~V-M CYѱuDl3P@<ײ?Fchw?ܓ%7ԯϚqqVʄ,DqLY?@-iUY__ r/"(\µhoPYoiVeo1A?U@Yb9]NW@Т?Fº__ّxVu^?2GGʼnC%o7o>Ֆ}p= lpOݒv_?_AlpODO>cEO __As0OBOrOOSиcé4l;8RPWq ?=G\7 ?,_U@3Wҹ7@[8p?*w$?1$H(I Kv?@o //oPgRtȝ¶?ݿ`>ߝӟg`DY-T-?2sT;$ĉB- ;GTs/bB`s`IconD;ð,@7=q DQ7IE̥2\s?F P4L0&9mi (ߧ߹ߔ2XR'2qk?!M U2jCUg2{Ḁ:!Q332̥^E0ӶC_vHcs MeP@M< F86B#x}QB  wNU@e,kPo+_nRoPa%7oXPUFD  h> T6D BU?XU??`?Fx<F B/P(?3P?|S (Н:  0B`LWirefam,\U]Iusr_intfcpoytmckupAplctocJssWeB^ &% -H ~\`C||d{as'Ï?g?3333?3h? ?333Z3_? Z!~? 3?|8 3u333ww Q 1DC*b BP(Lͺ۰??M/KҶezxet??VU D'JĤdRU?wAk?UH `D &# R>h -BRBfBzFFFFFFF (F(B.,BB,FZ(Fn(F(F(V(B,F(F(F(F8V"8V68BF<7f^8Bn 6<P-Iv0BxE~F;C|&.Ml I(:L^p߂cÜr-ؠŀ 4M&㪏AlXUQ** ?!rpjb^_y h6r|/ / ?wQ=/O/Xm+vy?@` /i?@jLz/E Mu b/@aY&UC/ ?/XF!H46{/ϥ?` A8/? 8S );M)dd?@0gHa!"u Y->>'h?@%h ?FB¯?@тAO@`2u xFoRO(5X𯎶۷O?^/FE֊?@upGˮ?F<p?@?o`g_c3>u  @Y?"IzFfU6!No o!?F0,Uh˙?F(?@?-ݠإgoK_i(X3,}qoo g__rM XI΅& s+Lhǣȡ_]߹liX߯:HQ/5m+VoW@1qU?Fډ3?@Ũ̌s zo jXrpoF L<?@-UZ.?@:_ȹտÿJ3$Cu >`%qX:!2ɣ3Eܺ _r$?@Fì3K}y`#^/;_ 4^t'՘Ec- hm*m =OW=eF o?@ f6_ެ38/uD/Uz1` HP/AS tXjF0뷓uHg)9|{F!̯d)GG%?@D?F8]?@f<ɿ+L]mP.VFd߯ PpBcɾA9h8#yQHa/s$!/$?F%Q'?@Nxʎ6/=O ^7ȼ?F߅ CHiPhN ,ᐤEn|XVWm ?I45ѻߊ-ޝ?@6V T6D BU?}??PU?Fx<F B_P(?3P?| (t  0B`NWirefam,\UIusrintf*cpoymckpApl*ctocsntEwrXeB^ *%  -H ws  25?Q ;3w33w3?3 3? 3?x|~??3'@n~bQ BP(??b//KҶ??4俻RU=忬4?[=?UHdD  &# U>hj cBHuR2U?YYYFAuP@[bbF !www?_.?[G%` LineCol rz"#zbTSawS.` F l .k` ?2%!!%%H!w3#"+"@x4Z*h!Ah17`?!wpy igp}t(c)W20K@9M ]c os f=BR!r1@!a=@i wn. A2U 5HsBe]@e v d}@01`V s_x RXY}@cpm!#5O@1O871\q@?O VAT!T8_b  ?@BS @uL-H[N BSQ(BRuQSf*!x(nT";xl5eV\(]Nq$I$||5~,>u5or2dooooooRљ?F\.kV|Ɓ;~h4Fj:Rd= OףpyN%I:q}ɖ$HC,Pbʏ܏Rpʑ?Fx4U@@@@r??n67',?FJӒ~쓘bFt!A8Q)`n _=c?C;nFt T6D BU?zͅ)U??@U??I?3P| (^-҅  0B`WWirefam,\UIusrintfcpoymckpAplctoԸWbeUdac"s"nbe!^ F% --Hu ,33? 3?43 3?O3 ?O:KO3 <l vZWOW?63?b{Gzw?|?]%tN?zS?s*"iJUt"&\-k7U?<$UHPD  &# R>h -PQ("=Y*ሶǪePf̤Ob;aY!]!䟿 x! ˿ïx"ƕ& ߛ:iXh "ϝA8/!F@ϖ˦IQQVxe A!@J?@9 {ޜ:߉TGaƶȸ; n&)א?Af2?@cBڥ>؇apY :؉ͅ10uy嵼?_TШߠ>ǯDqxGߛZ0L7+',,?@.AΙ?AT o/{^\>Y_Od;f܄-؉BSj~}(杻^eƕ ɨZ-F°u?Av 8V Y⻣E腬\)~]D4q&,e`x"ƕ2ߛ!]ybՠl ?A^GP$!~ &3vb'n:?-!l~"™ˀ$'ߛ!?0g/y$zE2o(+dPAwʄ?@^e%~Y!DζM"5Ͻj֧b 92/*™_sKA4ߛusf܊6L5AQdf?@wϸ.Y?Y'9/^.؃-e^??2™J&RE#ߛ#Op.D66VA`(bfC"QO*}sOsOえ_O :?ּOO7™76n?=Whըa%xA*0ڤN1?\w_s#?利(v`>+!<2_ o?ƕyeߛa/j -?Qc d^瓞(Gqמ*_b%H/|F%kJ?= EW(/{"[颙|ӂُ*ds޾@zo` ?G'13E :Z # Q'p蟢OL "ir֟mTf?Fҽ@@u  Kҏ$/#\9&&ꢅ8/DQXs/kG/#/=x~/q  '?@6\ 3ٰ??@59(W?N?j`S6VJ0?_? @TOQoR-O?O@{\D(Q kO#ߩR\RO?nOOO _R\/K_+tV\&K_@` ik _o7o6|mo#.Af ѠoXxho*Dk>k .0QS T6D BU?V=U??PmI?)3P|N (ҝ  0B`MWire]fam,\wUIus}rintUfcpoymckpApUlctocswzNrdWeB^ (%  -H j ?|2.<+&~  33333?3?3?s?33?3y3? ?zWq{3IqHn  zxn3b{Gz|?&X%(?VU5<35U?SEUH TD  &# U>hZ Pth4J*xV1w17G`?!py ig`t (c)/@20;@9/@M c. os f-B!r!@!a-@i n./@ A2 %HsyBeM@e vҜ dm@u01`V s_h RXYm@c`m!#5?@:AAґ\Ǎq@?OFADCT(_J  ?@CJ@uL-8K)> St2RuQWSq|C!hS*"*?A%`< P8S a"YKYU +׏O}:?A.?";=w 1$aJPu)YfyӍґs?D* r9?]Q/Նay/?A~9^I z Y顃ځay&l %_g탒xQ Ws`?A{ɖP`P_qYE[%Dx$/py?quQ *p?AL<l#?5?`aVP{鞁6!Bѷ?0?~?KqyOuq L%u?Ay):?OcP#Q|\QZHTOb^$P?s8? Uy#TOׄqrp!0]o%|I:Mj9?Uu q pG?A W1O`hbQfҌ@vLR?՞߸ xi?Fo& qS"^F?A]|ӮV `^@ߌh%X?Agodm9W̸euvӒ7?AooXHc"R`̓P5~Wo3BtWu{M?-uj|vۅ?A? !YnYX&Lraݮg-t@չg/ ~w?N&9'Uj|[wT?A*@M-[asucۇVI!^1בx3!p?Pq3j|\ʛ?A@Tގ9KMcۍOG}Zсqџś?AןU j|l1/wrCQ#%Vcof"G}qfN߯S`?McсޯU!j|ui/e?2|c;wnQ>!a5ň t"I2rw~_7G?z+P!U#j|2_{fIWjwӿT#Zc_~aӔq^[?7_-Xo ?EPmu$j|u4@p '߫#{5~)^1OEݿXq?!fzޗQ?ՎU%j|H8 `ay;‡i3{A Xqm!S?ӣ>%((~T?Aʿn燡GVDQBlLx)(}W[?A$EdXśGEi7"GqU*j|PkP>ALC{)t]5B$/夈1${M]1K??V+j|Tt<0]UgS{b=J2?6)\H`x?: l`V,j|70?Aj[Υ-/?/S{6AEikyew/2/pA?61?>4eT4/ t-Y_P2?Ar],Ճ `Blq).j| ?A^zަ???c{-`#BlɒGq]?2a?最VaF7?rLPģ?q/Yl&k?A aEi+_)q0Y0?Alab+VX1j|ho?Ay]â4OFOs{ Xx]?2R.ьO2rUԉTM_?~l/w2j|ՌEɼLۿ__gs{%}@q|Zhqo2?:K/ل*}ɑ/w3j|V`V?AA GĔoo?{ [y\ ro~18?pL0~tu1r4Y%(~k?A#x@ٔ \] (Zm~15\Rb?AoÙj|W+Sy\拏 2$r?2lK?gRL6#J{/L4_ \?w؍`ᾼq7Y Y9?AoӟHY \By\&u18\1"?AvǏF ^ט:JT.u2.ؔ8>2K?A-w9\*"*%?`<1 T.a`Yy\U +k!w2d0?iԽ>| ?G"q=!:U]TPåљ`¯T[ZF'B`0?֥\ؕf!K?i ޱt->2??t㕾`?1ޱ:Yk0$?x&啾On?l@ޱ/0Sao?/3QW敾/ݤ?%;%ޱQ@n[?ꬑ~_\?q6ؿ1&ϔy'W?~:1?@}D'z?q^Q?˲Rߢ(hzb2?<}Id!?.R1)B?`X\!O#?b1*N`"K?Zqӏ\?R1+[K0O?ZY2?r5p1,4F}GV?ylqzmr?{qr e-؊P/]?uq~n? ΂Q߁1.,^! ?+ 1#?SF1/׽M?N ~ʛ?y0!?A(5?%+J111sR?paԳ?$ρ12O<-nrO'J1O?uWp݄C/o3Y/k/Hbk?l/i?^?14//w!?҉R(?Q׻?M`e5O15??Q?c8R?' ~S`W?|&I?o6??{]ҹ?IbOL?Oo7%O7O׽/?x~wa?&Oo8OOz(2?UuOR? qG_Q9 __׿a?輥j?iJDO3a:~__?op|_73@a?eD-oa;_oͼ]ed7~}o?RNooko}ooz?~~-ޔ?Doo=oo!o?Bh3.s?9@aq>J\׫r?-Aզt(ac?~ fv?pj!?jl@0Bzf?Hauo!?sAڨ.q7?xhzߏ6 Z?䢉{oˑB(U˱?Ӳ+rL?E"Dş>C㜠G?rMo~=j ?og_bDתG*.?+ӱX9?Lg_$Eo\8?)g?P58PakOd1&?KDaO GUgT/?UA?D}HȿڿjTA?lTA?.r?_W|wI;M׉+!u|>5D?U/D?cJ$Q߼?KSܼ|>q ?[v ޗK!3@bo?`Uz~<ؑ?A ILߦ߸qoU?In$} fd?RD ߼Mg"w?@|>^?kd/Nzg>ᩑ?|>ށq?]AIDoOgqoI?V@MBCA?XѓD/P`rgQ? #0"z|>^!-?E2.ӽQ/?1/o?5!0oRFXHӀη7?+ro?|p0VN{?nSgrgv1o? ZQ,>g]A)?:^)4@as?^Dla>1U7>1$>51da΁U U R !%uq6QU !"#LѪ%&'()*+,J-.0%12ф48c7U|!A$1$0@0Bx?A-s?"u`` l?U2u`` h0;4/!uH0%}g"Oxy銎`!2?꾡xΎ?_6-o6`!9irBVp˜.`r#;zNh<R<trqy:?ؑ}5d!9?P F%uC8J#;ك+hPV!V;XO}ףƏ?IaxOeޯ4t!9&pn?AAE_(V!;["1k9$ٻlE"K;?Z=0?Ġ?{6x!9tv28JR!uV!;x1 -͊ϡGCF'=YP?D߀6)28)s?Am2;#;m_ѵAk9o_eEGV?yql{qf2T!69\zNzߠ?A[_{R<|/]Qk9t~8!9m"lxz?A ǟ߱#;!ak9x2func/? }Տ6!9ao+ٱ#;)gak91#E^ ?+AA#m4!9eB@{?AGoV!;"k9J`r(oFM?NQˎp7?yD5!9S?x_?AJ~#;͌YF?+Z7Џ6!9Db?A?"Vht3;рԳh¼Oa?F55FM^?qOV 5;NS0T֨hst5OGO4}=0eO~w1?*OFT\TOPXS0)WxwwfQ=  VFGAy):K_5}?a=@hp]}e_wST eMmS`"oۻ?|_8u GמY"?A;XOݐ=oOo6}ULO@oz{0Aҹ?Ih!xoLuctoPX쎘//>?APf+w/cqdM@N_ ߐf,LI؟JY&k7}h i~uzTwg? V/'t4cGtvBT8}UߠW ̗ `-2?USi/ ~fw]F-9}Nq u8|s?Nq"~j?^Jʖ!~l>'I?a{fI:xH "|G+oBܭ{73[?eHʖ"lȶE 2?Ak0 G˯ݯ;}`ތہR 5e07곾۵jP#XPۿfzFwwQ11a^$lCȓk?A? [j<}2WW5R-~!?N KAϦ%l<-xW"^ω=}ÔA6[&߼^A?oߎA?9q/&l`Y?ASgȩ߻߉>}`E)vap_߼]?-qGtB?ʖ'lBO}x1%AejX+-k??Bʖ(lR}:3?A{GzZ(laX+!hq袕ٮ?H7A`@ϡkA?()\Vc#?A/ \Ej7 h%*la?AkCʯC&AI +oPMbc4{u0r6-0Z?Cl{ ʖ+lNԲWEKh`r&B͡!ƟAa U?l +r$%/64,lqT~ DJ/&C..4!1!/{uz,?nrh~j0?opʖ-lԕ^GBs?"?&D3DO=ao ƪJ4+`4=ؾX9?Lqʖ.lX?AG h"8/?&EMIK0g5 \8A?kfOEUOy/\+AspѸ\`hqe0l'lscz@B5v;oO&Fߩ4 do Umda&?1ʖ1LAܵ|@je_o&GV.mDo T?Uqotto2Lz;i@a sooH4.m@q (Uj@p?lj~t_Wp|ʖ3L0Bx@-sI.m /1!5?UN>lTU@?44 Ĕ(Q0(SUc $ߜ+(_~:\ApkMp h8\٧ p?A@j'sxJέ<1pЏ $0?`S } ?[9v=(Q_M?Ar]hӸ"K΀ QeNh 3?żඓr!?/e0>kqqZLΎ?ᵐ@RU?Ie ?a?"s0j婞ӿMȟڛj?X֨j?ȿQd's <9$DTT[m4t8*5{?A⪲ʙ\LqY CLAF?AϮN`.Y<8?rL rhw?@xI6C1r.iv ?APY`.h6Y`6A(UX?V0R? s!ՊW!%R\U]?A3r]Tl R`.@~Yx&1OF/ǀ?9@+|<Nbh֋h?AӺ+SYjRSBr1vo?ۣp= :/հeu//T/SB)?:Mz4Ws?^8?pRg1(Q?QTCnE`8%T KD-XD_CTp|A `@`IconAdEYDCT,@BHpqԃ G{ԙm`8\Z?A,U|V~}B XМ_=gWpiOOD`82ПZ?'2qPK=!o8eܢV `8jQԓ__D`8{WAmo!W3uvA}CffoMm=U_H\s Y&Uc@F8AA(#^iBw0B xCU@eko+_9o7B!o/PUFD  h> T6D BU?fD}??ޖz?Fx<F BP(?3P?| (^-t  0B`YWirefam,\UIusrintf*cpoymckpApl*ctouWbedac"sa*cPe!^ J% --Hf X?N??w57544;<J3?s3x{w?33w{o3?333?3ل?G3 3}?3u3b zBP(״??Q%.KҶ?ݳ?$UdPitV&U7^-?<$UHLD &# T>h uA` ?2  O"*O">O"RO"fO"zO"O"O"O"c&w&& (O",l*&&4"u>)J>U2U2?YYYF9u>u< M@>b012632b9 Q!3N٣3225?ON ??[>1m0G%` LineCol@mrz0"ٓ0Cz0bT'023A3OF#4 ElI $=*W_!kStCB03!0QhkQsWG2`?Awpy@igXt (c])P20P9PuM@c@os@IfRArPAaPi@n.P A@lPWs4be*`e@v@d(`iP1>`V@s}_h@RXY(`cXm!#5P4}`0hQt\4q`#?of3A@%? doJ1B@ ?@BcJ@3u3-14"29G A:" @ns=BruqCq7|4? =A\\Wg7QlJfD Ul855aQl-Qc89q%)9?Fs2$s .?勦G yU4Nt`o?F3t?@%kzx?Fhg{  K7Ax@݉_/>l:#ρqtt[_1G }3;!2 @r !UP *(C@?@O{%?FT>~CUD`<%Y_m܁O~K5՗h᭟MXAbY?FHm?F^X)ʥUJ|7 ԐMpہTחpI<CsX…Ql?F'D\ ڌ Ie p7Ie?F}?@=>F٢T.^SɯU&5A4[߲e3EB…dbb?F肘3k~gڌ)t'Ȅh^8Z?Fj?@sLA 2?FF>V~v!X<[&_1ʌ>N`ς… { Ȱڌ&*gtO'ç %)=^.?Fi;%?@Wsֿ|U㸘 = 3hCt -.*tttGtʄj>ÞʆH2?@_ ;F?FOe.NU) ~ӌ/*ۅ\An#)gq㸗l= YFy+{pwٓ4Gݗ@|'+:5"=ß/̓?@8/<r f}h큜M1/pe,W\M/_/@|]@/3Â:Ă?F//` x(6@+ 3 5 ??Ut@Z5t?/  ?C/?g&qgil)QTT@:uɁX%ThATRjpRWFSTQ>qQ4PhQ[cIP%aȈd!QCt,@=RCX>qqmaA DZ:udtx:u9v6R<ʡwuiq__ X[g'd?H ~q [f juLo^o {AU@!3\r2UdvKP6SfIv}U_vHtcs MPOL*@ FxMtM#uB 8yv@eHko+(/moZa(ݛo PUFD  h> T6D BU?e ??Fx<F B_P(?3P?| (t  0B`QWirefam,\UIusrintf*cpoymckpApl*ctocsreHXe^( 0% -H o?Ǐ?~?{?_9XXTT3333?3?333s?3兔3 tw  'œ 79D pBxmwo3Q3b?Cz BP(״?@?%/KҶ??I7A`y忌G^9n|? ?q9BIUH \D &# T>h ?O[W1M0ۢG%` Line_Col@r[zM0"W0&CzM0bT)M0Z2Z3T"S=7` F@l@N` ZoO2E9A9EP5|51oC3"HBNKRBVPD:!ZhQAGښ=2`?Apy@ight (c)PW20P9PM@]c@os@fRRArPAaPi@wn.P ArBU XsRePe@v @dP@1`V@s_0RXYPchm!#5P1#86`@^n1qX`?Woif 3A% do8@ ?@ac@;3uLD5-M1) {0#s-:q :ru!Bq[_s'$-1L`r'2l U8q0D QUAc8af?Fcz`?s e JyM8,%8w9#W3w +G=;ỈEwsh2_cfiZ'$ ;𤁾*UwӥiDf2^P<~zEdqn>oH?@oHWY~aOMf 3_6o~K~)tm41!7Itk ~ =ExGFt7aNdß4 ϴ_ zGy{I+xsHe? 6fn`(?@>1&Zʥ6EW /lewt!ٸy?F5{ɟ} 9oOt`SIw-Хj;-"'hɞ쮿~Z` vC~nx24'>,Q?F1Q}{.i>Jy%~c݊/ Y na.ςt cOtr MAņ2eu]9o.Q¼QmP` 0)t|y/1/cխ7l:N?)[ބ2z'~=ɞ:ƻL|6z ~-u1qyЬ=?@93bq?W8"?ͿL/A>XH ԋ!J~hsɌ??zeb)a ;@eX`OÔO+|SDJyʖ?G~&z>?B_?!Oas5_PwХeƅ%7D輧[ yw?F?(?@z7Ghf`M!{ܾ],d&vigi4"|oRY3it0wqـ" zo;&7Լ?F h${d?$?WgǞ(!&&مp4H֙Է=J պ}5"m(L~Dt0#H0NxFFlA٩6F7bvs"6?F$K){DBQ% Y̎|(8D{ܸPm wFm(ۃ?Fn܅({Y U ;^ ϾyÒٍx,p?F/r L{? T6D BU?VU??PFx<F BP(K?3P?| (ҝ  0B`MWirefaum,\UIusriWntfc%poy]mckp]AplctocsflXe^ (%  -Hm _r?? ??@9 ??33? 31?v?3J33??3F>w3H @b BP(봿?/KҶ?VU5yR55U?[EUH PD  &# >h<JT$EINÕU@VU?@JF BP(HftMru` ?W.> *O.4uHA>J2U?YYYF u<M@-bqb# !Mwwwĕ?K.?O[ۢG%` Line_Col r[z"#zbT)>rAcS` F l .` /2%!%%>!$Cs3p#D"+"RPpd4F*t1s1{7G`?!py ig\t (c)+@W207@9+@M ]c os f)BR!r@!a)@i wn.+@ A"U !HsuBeI@e v di@q01`V s_RXYi@c\m!#5;@1#7@q0\q@?HOFA@?A T$_JP @C@J@uL-4~G: S~Q.Ru!QSmx?!dِSn#?28Oa?MbXCl? rHr= ף?q"E?@GU5G&Ƕs˝o%JS?@嶰} +B-IX9v?qz'>?q]0 l?2o$CS,.  8?kq"vKap*(r!됮|?5?$'w ^?R?Q;޼>Se:0^zڨU3;Us,?7<>/P/qا</$6-˱? w//%1?(s?/naazQ~d(\okoo@wjZ?Fd+JZЫw ua+*C?F-?e_EӺvomϬYpn}GOOcO߇_aDsb kxOb_xaKI OOPbt_jՙui4F!G2oDooKAOSCo_oj C 3EW^:^gxS__07S1UxFďՖ1=O/+=/myl͟WAi1G4A_%??a1lR8%??G JR?F?;(kY*L &6n#ЊVOP֨^'ȰS?*JRi{tKzp館R /- 6X"ӿwe'~ m3aźEL&d2?FϋǶX߄ۧK7I--C\51IR<z/hRx oV9j`_r_*59{WA T6D BU?6T??@U??I?3P| (^-҅  0B`ZWirefam,\UIusrintfcpoymckpAplctoԸWbeUdac"sUsPufea?!^ L%B --H }l??A1*?AJ??Sb?3?3'3u q{3 {1r33{?33?{?3?{?3p qp|U=A}U/Zw3v?be( Gz״?d?bӸX%??΅)PU=E&e?<$UHPD  &# R>h - .BVj~@+&?&&4uAJNU2UU?YYY@Q#NuM Nb 12&&327b%9 !&3N#""%?>?O[N#1 ۢG%` Line_Col0r[z "#03z bT &2&31?F>H4H5RpI =$fGcC3D6BA#0AAGڎ 2`?1py0ig\t (c)&PW202P9&PM0]c0os0f$RR1rP1a$Pi0wn.&P A0l&PWspReDPeJ0v0ddP@1Nw`V0s__RXYdPc\m!#56PG510P@\$qP?_V#AdoJ1@ ?@BSJ@3uL5-12)6AG0c-6Bbua5C'$-BV1LHG'm2lJeUp Q9 iA5c(!6gDio?A`c ?is؉$j8uk-UuI1Paog1EuT߀ٱTr%vPA4ٜ+v+pqaA0v/@"5pw TN/ٜ ~Dqw@dZĸ/.g|qT|q OɽH Tdq@DB`OB2drTCTANA SASI*PaQT]AB,9Cd,@yBHqa1Di "JxTQdYh irBupnBitOOF|JxX>wW'T?E}=qa0D~ VJxj?__F|Jx{>w4&|o!3b2f@rCV4&oBm2U_vHxds ylL~o+@oUB7oPUFD  h(^TYYBBUFL&d2?x<F BP(?P?| 8ȅH?FX? B@?\.,sU(/  07B`:Key,logica ,n tw rk s s em t pJ"r pP#i"Us c n"c !i"%V eA^| .( G&u }%׀EJ߀\<nOikDrag onut hepe.Rih-c]lckaUdcos UPo etQeAvUwreP 5aab BP(Կ?'Կ?&d23L4CUGDF P# h @T(PYY*# UFL&d2?@ BWP(P} . ! :DTSDrYD Amu` _?U"6@@T^pru# ^v"|,|'q:U? ?4b {[`+:#"6u#'򄤉Q)v" ^!L1$" "(7'>>ÿ@q05?B46 0r&23g2um0`҈-1Bu:@ "31A;B"F `$9u `u`m"[ bv!wu`)$"%ط#Q!'1`Vis_PRXYcPm!#5\P167B`?CopyrBPUgPt (RP) W20P9 MBPucyPoDPofRRrQruPQaPisPun AlPU yXsReDPeyP%vPdBМ!#@d$%v!B=#&#Vd&& "$##0Ub#]u*g,A^aN @ cT<*Po mPsPQ>gdaE C _s (1(1"hh62)t3* tBaa R+t"qq (!pa cv"!6uM\%G!1[21@@Bq653T @ b"q3P_EPB_ UP0aEM5s_$`&ׄF3s͵NSYFue;AT,@QT#nPUr;65b}@`1p"(qffs_hU(/TUUJ5UU@ B_P(? [M #(. M  )FV)UFj)FD~) C)CD)C CD C CD C "=  #"#2"e"#F")"#Z"-&()))) ")l"U'JuM{` ?)"$"8&P6d"t""""""",",&,(6@(C6T("d/pv////u#)t"bCBB !yt!bC#tAFJU2zGz?@MCJVA@\@5XbCYP[YPV&R !4TJCRC RV R;Rb"CY[@W&R}\&R}_CBNaNaC`?Copyrig\t hc)`20`9`M}`c{`osu`fbtarw`aa`iu`n.` Al` {hsbe`e{`v`d`uBi j@ EQXC|a'0UYrźV(e0 $$zlJeUpД A9 DqHQ]?@ǒ"ǔAP"C 4?p "C id XH}$^E?@[ )K\>o HrL?+ 1>k P>#IH GCꈢp q@!"BU3&@d>~:j?8-DT! d;Q+=O҉䍰&kf :" (Wc ٖ4Q q=HBBǟٟr85Uq#!pw@q!!(SqCq !&Q ogċWƄSO=?@ծj?@.H{[?@?pOg-+'|F+`ˑόV<3= g%L?@ȭ?@<&?@%Y0|D6όTЏ匡?+VRgPI@?@u_T[?@w?@xu+D($Jϣߑ_c|~+~ ό0E'匯̟ɴ΋BGs˄?@9pf߄gA|^x4і |?@[>SfOߢu|ؠ !;?@Xf Ne|$rSDKؤ!PIA& 2بz%/N~4լ'=䊭bg^䲦EDEѰ k;8?@LF߮z fc)V|TzLD*Mc/VNHE?@UN??ѯ ߣ?ޱL`F?q9<ᕚ /`X\(*zϒcFCTD?@_~?@R** R?@@;AQo|Oۈό`D 北9Bc]҃Uƺ-jS ?@&nhS?uC-n|G`ş[~4J dOa'9e*G~ѿ?[CTHYaټXr ͍5LJ45'ek-#LC //63FѺuw ɐ'UECaʔF3TJVA&!BIB?@H8???@#bf?@?$ nTfe[|?)˴1҉?1O}޺(꾟.?@?16fOxO6BEQ̡&7jlP?@M?@13 b ?fgo|nWׄ.ƾ>_OO@O̡O__rRMСt+iu@p` idg?@ -kC%_~~GeJk"ߏ___С_oorrMԡ&oZ= P#?@lV?@M$ύXof]L]R όJ 6老QMסyǼo*ԡr6Y=إ~81JJ&Wd}f+1׬$3-?mFj4Q0^dl Jް PWƦp >L]c@=M>%ϭs)vE?@";r,̡?@x{aхÎT"I|!1߼#y\lǑ4N[U ;M_Mpet*)nOE@ 7@,Хܣɨ z=|Եĝ<ƁTX/$/!s"*?@g9Ų?@bn?@ Vp!iѰ?f|^bܲƶy\dAo >%V@V+幻]>spMy-ui3獛f"A<`[ r9Y9HD: ## h4>T]]9 M 9MU@QB% ?@z%/N?@Ŀ_5P6 ]A| ](# <# P# d# x# #mJuM` /?#2FZnuJt  =Dv-%t}!"4މ%'swJ%->tJU2zGz?=@M#J 6A@"b=8bK9RX;X6J3115 "`?CopyrigTt (c)020090M0c0os0f21r01a0i0n.0 Al@ 8sBe0e0v)@d @"1E4R#[31 wqB?F ?AC4 $Z?{5-33 7F%%O ViiebE9 VX!W2'K0URňF!DK0 SVgZlJTo@6UT $<ax@C11AUb(T3RYI0b|"ҩf\C1 Rf"J:m_!oc ]rte$aE1}HOv?@?Xݪ@c +Hj!yvR,xbE?uf9B?@>SW|'1Tm|pЏA,xE?uӛ4?@%pW|?1Rqyh ,x?uC|?@u_T[W|Ә {m|O$}dI?@ˠȭ?@e?@)Ll rLߊ?Zy!Xm|kb_6mu(mu|#vi8lz |" YꥬM(#??@ծj?@1Nx?@ f4?ŏ׌m\ҋ:m|XAB/A9d_?@Hm9S@?@$V 6E׌ nB6{m|Jqe!{q o.2ZÔQM8^N>@M*)nO?@ j?@*# LK׌?ö}>{} SS~Dxĵǯakz^5/n??@!,?@?qVw׌BZc4r  x̶.jlT0{xZl?@YBN?@9}7(ѿ  tR?ɨV=7;Mω5;P4HE?@'~$:vߔ׌V9t,Bq0oo!ߠF?@qKp ;R?.A֍^m|ߵf BJq! myaUe2؄oeC2rA 5B3"HD: ## h4>T]]9 M IAU@ts ?@A3V?@Ჺ?@Qp?QDJWƿ>m$>  JuM` ?? $u.Jt+   qte]_W~7.Jzx?{ltJU2zGzK?=@M #J&A@bM(+b[)h+h&UJ#!!5 `?CopyrigTt (c) 20 9 M c. os f"!r 1a i n. Al#0 (s'2e ej v90d0" !E$ #[#! 2?6PI?AS$ j/%b-## 'Y6 %?Fiier59 FjX1G$"'0UBŴ6!40 DcFwJl0&>UhEM![S! (# rV0$*$d*Y Td$VcRHD: ## h4>T]]9 M JU@s?@߳g?@Δ ?@=_&ʠ w?P6  >JuM` ?uJt  /vIt=W IR_IlAm#>t2zGz?R=@MJA@bbJ )+&J[ V"?m& ?A$ /;%*B!!5 g`?CopyrigTt (c) 2U0 09 M c os f"!r *1ai n}. AlE0U (sI2e0e 5v[0d=0! $-# ',m&?6iiew59 65X7_'0U\Bm&Z!$0 F"'JlJ}AT h7!1!Z@ iO3  <P[&/~# M 5RbVJ:Dq9?;S U$^E#JTAR|UEUG_L3\C\Ry^Qw5U> [k;aYa|U52_.U"rA 5#cHD: ## h4>T]]9 M MU@9e*G?@뢑??@gt D7q?P6 @ > M (< P]dx $<PdxJuM` ?""2"F"Z"n"""Su )Jt!  ټXr)5t172X_;y)52=)5L7dϘ'!>tJU2zG_z?=@M3J6A@a2b]8b9;6J[3 6Ba?MF ?AR4 ?EB3AA5 G2`?CopyrigTt (c)@20@9@M@c@os@fBAr@ Qa0i@n.@ Al%P Hs)Re@e@v;PdP2A$D3-3C GMF5%}_ViieWU9 VXW2'K0U&¢"x|6>|>E<ti!Z$@m;M#_I[2WUuP',Ap4RޓpIwS?@B?0ڙՏ|& |֧|?i$g#i!>qbM#_pFtUrqa.^vb?@)fMo@Dr3s0{욹nZ|q9a!iZ+|һM#_Z1 ɐ'?@a"s|? m vpt(&4xquBIB?@Ǐg0p%#|n׽\|6 ]yEb# ]S ܿM#_!ߊnxyjlPpv?@0?@F?aܛ|B6yI^a};I1y Yb[sop|Un [)N#_6H𔌌@uhǕ?@A5Pqy hq|o]^})f?^K!NL3_Wiy;oZ=SO?@`T]]9 M 9MU@YH?@t{FYX?@m?@Txݠ?P6 m >M (<7 P7d7]x7mJuM` /?72FZnSuJt  ԥiAPɉ%t}!",_։%'%3庉%'?݈iӻI>tJU2zGz?=@M#J 6Aw@"b=8WbK9X;X6J[3 w2?6 ?AC4 TZ?{5B3AA5 "`?CopyrigTt (c)?@20K@9?@M7@c5@os/@f=B.Ar1@jAa0i/@n.?@ Al@ 5HsBe]@e5@v@d}@"AD#-3C eG6% O ViieE9 VX!W2'0UҜRŭ6!40R SVgZlJ <>Uh$]T ]&AAC1(`3@]f?@y]0On?T|#EA0_;b"Ja@B*?@[Ŗ?@2Q vrL0adX0kvX'qeD_z`Օe|#Pv^a Sm?I0 -? Yz?|"a@ca(Qu}!r5{5aEhde}U*\?@dB2n?@Dِ?@BwQTDx~uik0M5eigJ@|@ V|wv0rlz v uB@#aEuVA?@ G0?@#S?@S);glk~@|ˆV| -mHux@ Wl:,*TȻ?@:&;]lѤq@| yF֖^t&o˿Vhhdh/ fα?@C%Y;<ddI0A{*V|qH_C+}u$>?@e_Y?@]=Q?@Y+r;.DZӕleM@@|PMkqYyK\r֑ٯ9edon@!u]?@F<ǜl0C2rA B3 BzU=V|b Hot2 IGxtXjq\#x_H82&#s xh;Z LS Fx0n#JwB U]I~>a,{o@+o;aGCsYPxx] X0٩8274h9T>PPUFD  h> T6D BU?VU??P?Fx<F B/P(?3P?|S (Ν:  0B`KWirefam,\U]Iusr_intfcpoytmckupAplctocslXe^ $%  -H o?6 ?" K 33??33}3?s3E??O3P33333?s33^3?xw73bp= ף BP(??N贁N/KҶ??]VU53{5U?S5?UH XD  &# U>h 1{17G`?!py igdt (c)3@20?@93@M c. os f1B!r%@!a1@i n.3@ A2 )Hs}BeQ@e vҠ dq@y01`]V s_l RYq@cdm!#5C@174x1Z\q@?OF)AHGT,_J . ?@CtJ@uL-<O(B SQt6RuQ?SuG!lm?ݣ`PBGpdn:?"rh &賚:l?5#-e0 CU$Hw$s 0g_?ڱc~b'?'W%X9 ڱCU0/B/Vѝ?_/ $&GQ//d6`?+7e /P?!?bd9d?äW_X~?.F?HX??^71—䑾"P#P??x&1Zڱ?O;shO|#EDB͂a@κoOOPk?w_rOäҽ<#OHƣQOOd)UKOģ U+OGQ_c_d3*Yچ? E0YQ\ڱ__'5%o'_äldi.aU(i\rCt!nT vø#Up"Qԅ4?Fx#˥ۥGzwmlsEGk孅 8P\˥ۥ[BP(.ԏ!:r]ﺟvvD{%u/~L^F0-Qan v8g lvvoko ?FXe#Ͻ|㹼KG7vuu!?F `ztFc๼пse}iƢ?FYyÑ,F3UJ`ع0qxr7ߟqIƋ"0BP&d#2tkxԝ>+Ͻ +ē(!Pg:i(!(%aoaaBϗϫ0r?F ) ]͹_%uqbFJur?/œ>Cl/uJ o,BEIu`F ^ur^?ݣm%}ŜhE$_v~ rh ԕj4F5/G/cDVc+/urn?"41 o賚:ldo"tւ!,/m?8iFc?b'vb't//pY$H?Ĕs 0g ?-B?$NGO6-fOxOTfLUMLUX9C/e),1S_,euV5O_6F_Op_{e_(@]|d/aH?л\.sCoUo??Pvom6`?+7ne /DoReάC9~iBo,0rw[ʡ n,obd9ĔW_X~?.ƛi;+ԑ\;DF1CUg-Aqm_. o ?F^VjrΟHm^7ةbJ?x&1⑏s[FĻ /F~9jnƟCe!;sh|}P5pkOw_rӿ崜v<#绯\篨\O _wρ6!jn$^_ť`Ņ_Yo:I#e[ď~菾e~%g~td08} }T6 BP(ě߭2EWp3*Yچ_? pYOQ\ԲDOi%iq&G":o8jn1` 7oi5Ѭe_aiP\aF[`!펒 j1qyrgJ1CT/ħzT1T$-a$2h#T螠!b`c`I?con!m%M#,@"$(qN'-z ҧns?[["A۰G߶r5iR/?=z:'2q0ݼ !0Z". 6zjV??=z{7A싵O!3B2F # FFOVM[_HO\s }t7M:3F>.#lwB U{I@ekKo+8/MoKaʜo{PUFDf h(TPYYUA@ ?IF?P?hj/ bNb>>is wץ @,wOb WA/>,,'OWT / , 'ȉH?/&?"p  0C`User,c_tiv/0,Ui10e72o10y30ObjI4~$e^| UGoPs??h 1 12 5`L/0af! "`I&bW_ObWnFdF^O%nF nA6B XsRe Pe@vPdY ' C G?d ,p(!P^H  cN$ daocJTX1ddL"d2d2 tBaaR$t 1t >tEAAVvaXt et*brt t*یtܙt7Uݦt޳ttAt!RQfTpW0S&UT`r("#'BaK"B&s7F֍A @J)a`wQs. TPxqPEq?`eUT PP s7oSNZmh g.7' DVeU!0IN 6B8B%7qri"2i"2i"bAy "q?jUmEq3`!3`!b3`!b 3`1rQ 'a!R ’лh0gC<s 1. 1^\ L6L?^?2~3 5(,;C6?;y2?:;O(O?KO15+,?>?OOt?~`B⸤q?`}b Hn7eUGD  # h#$T #3 BވU@?Zm/ʪиP} u` ?u4# t 5 t #J8tV53  S UU%3MU!$g`?CopyrigPt@(c)@20 9@M c oKs f"!r !a i n.@ Al (s2e e v0d S`"1B-y##0" 'I?e ,^A   #!H1p%4;E4;34;547HD: # ;ih(>TA3AU@_bZ$1?@v;?@sSx:?@m?Pn6 u{` ?u# t  ~G?t+&@,\`@V>   @JRUĬ&<Q!?b& ?bbbz@B#E#2zGzb#@ 3JE#!$5 o`?CopyrigTt `c)%020109%0M0c0os0f#21r0P1a#0i0n.%0 Alk0 8so2eC0e0v0d^c0 `1l>q$UBh8119 B # 1 1#J1u?@}/?wרll41 kFMыB!zQ?@4\QLIqMqHgLLE3J@LbUӵQLZeHgLswD}{uݢzaʲ{eD?@*N q:QL3" hK"HbcyN6B  ޭO?  ?K`?]r5!:_ og4!B(ESL1@rC#QLߦMNUGNKX Z?@y?@zI@$TXpUTI8$6jI@_A\]Xˉ4VTܿĖTA3AU@vq.?@ ??@v?@?ƛ?P6 u` 7?u# t  _ Qt+&k1@A> #  ""2"2" P"P" n"n" "" "" """"l22B"2"2A2A2A2A2/$A2/0$A2BaUh@SaSa9 l $k$!"1 2!!Q#Q!JBa *Di]?4 |#O tlO"L"2k6MBAb*R/ux ?@ f:|VbBe|O~Fk_XQ /3qB  ly2 =Q s !#1?rZ35@:(4<7r_8Wu3gux TH?@ 0W^ 9P?@l5͒?P-DT! bRG+|4̋|Iv>'vtPϷ_ٸqt~U Q!398Jk}(#Wugu3?@š?@ @ڢ?@I I~ eЏ!y|m%|?fCn| &w?[4g0hC9k}}#WuB?@r[ɯ@?@p}\y-!{oΏ*E|p* %#|k:|xdw]@Ư0C9@ѯ}#$jQ֒?@ٍE|؛|4WmƭWu2!ɞې:Ag]rvpA7?P,Ȅj6{EA~v+m j~y]m`t,&فw| b%ÿ h`rC9(:}#Wuehtn@6fkЄ;??PDžwN!rA z $Zm͌|}Xko?w4TGMs߅r,39M_}G[qn!cydg*?@+?@vw4y=Y4eh"E|?Pyp2|eLU@"@r翨t)C9zd#Wu!cy ¥?@Po*?@/w ?@Ev\?PǏ yf. |dl|6m67 tw=]q@)sHM)3F#Wu!cy:kJ>?@֣e?@.`WC.e?@#[{ s@rH%b~| |y(R~||Bٵ` W),C#Wu!cyy?@;xY]ÀA z}Wi|YnzvtH,M߸q@sM&O8/?+?)33Wuacyhy`'&/p 9>?P%}q AI<bN-[|t'mmRa|@S>'/l?LO^O)ő?Ē6acyZު:?@\\d`ڲh?@Tvϩ?P/ yKDiҊH|N|̏MD`@ЗY?O__)aOb6bʘ. =L?@ݒл?@7g=:s0rN90W|?;[B*9|>0xYea@_z_oo)_Fr6cy~d@=)yΤϊUp!?PņJoWRj 9~Ipwpht+;vVUͫL _o\!{/ɰQؾې JfK;|E R/qwĕHD: # ;ih(>TA3!AU@ *?@8PzvL ?@?@}j_Nw}?P6 u` ?u# t  Pߢ=t+&Xl@$ &> J=UG ? ?MB#2N贁NkC*u!z$5 o`?CopyrigTt `c) 20 9 M c oKs f"!r !a i n. Al (s"e e vz0d I`1l>,>Uh: #!#J"1E 10 l k6MB: z}?@5ё :?9^B:<_ܙ4|DtH ]H79?zF:1E2?51rA 0HRNS%3HD: # ;ih(>TA3IAU@ T?@3g?@@?@?Pn6 u{` ?u# t  [DrXt+&J}:@8 >$ J=U"<?&& ^?B# #2zGz?"B3 #!$5 o`?CopyrigTt `c) 20 9 M c. os f"!r !a i n. Al0 (s2e e v00d0% `@1l>,>Uh: #!# JJ1@ޏ~J? Y0 K {Él!k)MB9}jN}?@n'I?@KǦ :/??4dd]#=̖^/  "H7>zIzzCL7+E?3r$?@"?ieOw@1SrARS{J пPL/~w5ONHD: # ;ih(>TA3AU@]?@_Ң?@0c6?@%#=e?Pn6 u{` ?u# t  Kqt+&P8`@` >$ #J=U <?v& ?B#Y#2zGz?_"@ 3JY#!$5 o`?CopyrigTt `c)$020009$0M0c0os0f"21r0O1a"0i0n.$0 Alj0 8sn2eB0e0v0dNb0l>G!,>UhG!G": # 1\#JV?@+f_n?%㝢l ÷lkMBD"3?@6?@g  ^:?/IBLJjd8_;qIi! 6AA{E3~ـ@ds?@&_ԍ _E.JS'BFBL6Jzܘ+# \>>[B  B½ɖ{? * ^u?(Wr#5eP:__(W4#a[B({EL@7҇c;n?@7uǔFnx_,@ArA_ bcJH \ 4 BbnbK>>H_)@U " &E '/WBHh/н?J" G 0oB`2user, ndu'n tw wrk sy t m$%1rp i",a60pl:0cB0t:0oH0e^| oUW$3u {? ?k0AICȋeȍ{^zȈ{̝̍Ȍf l` uvDrag thuesapWonodwipe+.miUm9r=ai:bH]tEĿ??$XE ſd:M? ?nUH TD # #Ahj4JTAiiM MU@M?@F]t[Eg?P % -jRMQ Q`#dtoeRQ#mNuQ` _?`RR@o`j~ " "uh#  N+bO%X& X!酕#NU2N贁N{?\N3N"+#R "Q3Rt$V 4i V-)' #% #I0@kq0~5?4~6 !uLrM+0@A0=2b .6u~`bP"o+% A7/I&ց#AF1F7`?Copyrig`t (c)@2U0@9@M@c@os@fBArԻ@Aa@i@n}.@ AlPU HsRe@e@v%PdPO /N`V@s_PE P.@hm! 48B6RwEl#$UtE#3 MA#0 Na.RaSPu(?!:32# (Uo!o3elQQx_______u8`oxo4oFo1tmghUU(g5 Y ooor;/?袋.@ Q.Z?'cK3^~ohUu$%#5?@W鯏Ō(T退hЗ vڟGP23(ta xh0 2"@3ɞ@ZICUyP}өqfAsPbt_Zc0:5*o/e,;D'Q#l%x%Ho˿'9ϗ,hBmQwωβ%\IYQ6Ͼ#č?p"^3Hk}6ѢtqZۻɁTQM+1+13u(z贒A:7k:S0U'R @o@a1Pd @y@t%Pm@u'Ty@ud@a@Am3S%,@>2_H f@s Y"tq_HF8rLxw{#xݡB Va_T}o@+UoBBJ4PUFDfP h>(KT6D UUF~?x<F BP(?E P? g/5> މbnbK>>% 1@ob w!O^#,#,#'owd /> w, #, #'dH?/\&?+2  0(}`9Round,keye0Spria0ge0Bi0as8mi0cPaa0i0ale0ew2v1ei0t7s:2e0at2$ e^ 5Y #UGmdaGO'UTTA(ruC;5d31;1D !P` A0k,Oa0 r]0p~Y&b_`TR%0E)%#A#A" =-7?` 3?37w3y7?pw woc]Drag onut hepe.Rih-clckedtk ywy n9 'eg.bL&d2ɿBP(?P?fОɿ8ݥ??~35CUG DF *A)#h8T P  UUF~?M&d2?F BP(?P} rr(V.BDCfCf&D'LHbbf6 ^։u`h?"u)m bI%%R& R!͜U2'q ?&r#?\.?hmAr"&+'$7q{#.@"`H_eigPt_` UsZ0rG]D\0amZ0tl2~M0`W\0db0Q` i7LbZ0n^032pa ۢb M0a` Evz2tro1R 3H3C<{#HAMD+/`V\0s_TMEGcPm!#5 431H!`?C2yn0]5 u(}@)g20@U9gM\0c2s0IfBAr0Aab0i0nG gAUl@ HsBej41dGi"6{#r% ] r(SVR.4&U~{Qv%"@wIP@VT?]fBu`-M01r[2S@%#Ru H@qrHZV!C?C$2a1,Q&$h)4="u`u `"M0d 1 1`u`i"`07 TC MTG?hec^]0uL"UHi4FF 14WK'%v!t+0. VQ%%2ndaXT7GTHHHr"N%2o(4TLEh PETM&fESz2 &$r2y1nj0CsG:I 0v(`2+'0"3+6T-!\g?y>~?;5i"%5i!uŏ׏*>QP6 KC1d12A51`_R RiMzZ0 11 }pa0d@ Qo&J@mW=\P;41N%5uo(S_QHA"-(a,cZ6bb%","0` Be ߱cQebr32`@{aK(+`mA:E u@exb2YcMigZb2r"mYy@66%2''9B"8$HAHA&P.Q6֝K!!W=K#߱aU0B39ECSe~ߐߢִӛ  i"gaW}as@TQTQ%29B*&R66sRaaR11aaL!"8Sh ?#EcH3@L!a r">9EGZAER ," 50(:&U!P%2>sUKfoG,`x'TS-?ss`i"vUnOZ G7veTa fC[u``ub`fR (:9,@$Zr",ds,waUAi{Bf#pniDHD: E*)#;h ,>T  9 EAUFM&d2?F BPW(?ٻjP t`  ?t Bn>u;.ff af puU b J|U2'q" ?!&M?\.?A4'q'B= $N贁Nk?<@e*%Kb #Z. L@* #*1/45 `G`?CopyrigTt(c)20x09Md0cb0os\0fj2[1r^01aj0i\0n. Al0 b8s2e*0eb0v0d0)l>f0>UhhffS1D! GUaJj #H9pp[  j"FMBVsDjZ"ICOB(`(rMrz$ DEB`BEA!vJVRrFTQJsO@y_ l`Rc c_Hl@s ͿKN?g>FxsLk#Ș!wB h"}ma}uo@+ GyocG_XOo\: *OUFDfP h-,TYTYBɉUA@ ??E P?7 u bCbWY[KX^x @Db LLX%DL9 ew.؉.m(sU!A&/2ȍ2H$??6?@2BW( G 0 `;Taper0d\k0y\S0ring\B0a8m0cPa0i.0al\e21e25s2e0a#t2Ue!^ EUYGmdaOL'3TA}r"Cw5o1w1D !` UA0k0O0 ro0/%b`TUR[ Ew  )?__8/NSe ,̃~ύMώ ̯ Ͽ ^ mϯDrag onut hepe.Rih-clckeKdt:m nss,Ja eVou eEov gba9.mbп?8i6пp:m??@o tڰ4 UG DF *A)#h8T P  U[UA? u?P} wrr*V.B$#F#F$LPi(yBBiF` ^Zu`ho?ul +b)%2& 2!iU2na'qt ?s&R#?\.+?hR""i'#J+''[#.wt@`HeigPt` Us:0r'D<0am:0tL2~.-0`W<0dB0Q+` I7L:0n>0r3e2pa }b -0a` Ev2trGo12 3(@##y,J[#(A-D+/`V<0s_TME'cPm!#5i 4g@7(!."`?C2yN0=5 (]@)g2U0@9gM<0c2%s0fB}Ar0AUaB0i0n' gWAl@ HsBIeJ41d'I"6[#R%~ r3V4U~[QV%f" ?@IPs$U{'UlFBut`q--0a1r["@#VRu(@r(:VC_3a,x$h4"uv`u `"-0d 1 1`u`I"P7 4C 4G?heC^d]uY,"UH!i&F 4dW'r%f!Vd0. dVQ%2nda8QW]!T((R"Nz2AOBRO(4T,Eh 0E4M&FESZ2 &$R2Y1nJ0Cs':) v`e2%03%N !VG?Y>xx?;5I"5fɏۏ$J@_TBa0#%b5EI!i.1P0/ C1D1A]51`_RB7iz:0 l1r1 ]pa0d@Ao :g@7=VJj;Uf.:biʗ"f.SW1#AAo0dG<0bgh:0axo0@a`_am01-?a!`A]@EJy[k@]b1`@I" ,^dYap i2g@eR%(@i(cE&M3.R+R"@2BO(f(Ae(Ap@ )-S``` TP P 1ga503(!"#g0R512T$QD`抁; TPAk0A,(Xa!+#jJ El.;͛X1 9I"g@a{]a@g12BR00SR4Q4QRQQb!!1ba] *4 #h5aiaR#?.<E"!1+a "u+a(UbqR@J2SUf_d&54S}# Ss`US1/bc@$a9cr+a+7ex/:atQtRn4%87 G~Ta FC[u``u` VJ 2bSR-4T7,@p@dlq{zU pS_Fi??v3R_h(=Ra cZb)PRGnB`\"e V )c1eV;Wbr3;Na#vJ)iq` p}Kex o%82bIg7pfBf xp}>@$-/HD *")#=h0>Th]]9 P YAoUA?PZ6 t`  ?tF$]AU]A\yMu?.Ej~] \  u?ڍ bJ J|U2''q: ?9&M?\.? e4/''-!#0O_Nk?<@rLb#I!#r. _L@"B"/KA%J!#k1p45 `K`?CopyrigTt (c) 2`09 M0c.0os0f21r01a0i0n.  Al0 8s2e0eJ0v @d0 $l> <>Uhd)A)A]1,4(B0B@$ $bJT!#b&b&kFtAM "FM(FJ?2OA R\S%j*_YBr 7Krݹ dhrx1eSRTVEJڿJ !PY,1Q!?;_(1:rpRS ?YVUqJi>#o+6k9Eo?^`R rSeX]tW-cmVqW!\!_uVr"QpA/O\F32CvtE'!>"zuF2$gYD heW8YBTxxMQQ"A"EJ^A{Gz?5SU+1 `tH~c*vMÁ%Za*MބYBp TS*%-ATTHx$EeЄUsބ)SJ3&o t $|M5aAb@M%tA2_H4 As LoZ4EQ^L4 FuLߘ-*#ȝ/wB C0ax~1@+I:aGHVP8/:bUFDf h(TPYYUF|>@x>[g%U @0 /\pb x0!(O>D,&E&px /'> d, d, d'dH?/\&?+%2  0(`Ucardke_y,}5,6R3p~01a0c0s01l01aUn2,0e0u0Wit2a0tPUo2z~0t0o0,,w01ng21t0o01lJ~2m%e^H 2E*#UUGo$^  h\1\12\5(;`AF 2d,A8 CJ @`z&bxpbxaVWVpaV aQR __+^DEOOSS<9_K_]_o__DJa b oX%_j^%`/SE2UcBi2l,F0Ax01.A$ib oobgVdaV'_Tq(ʄs Qi [\);M en yv  iDrag onut he]pe,nrih-wcl%cks e/if+ymu%yC.@hD KopisebP&d2ɿx35CUG D # h><T$6D U@|>?L&d2?Fx<޺?P} T uv` ?CuN b} ʆL.BXB 'V  ^6#,"2/D$L/zBU0?Oa4{@`: "&U'[)O"6@<&P"!'D23" L^"'Ԃ%$ bւ#1!'`?CopyrigPt (c) 20@9 M0c0os0fB1r0/Aa@i0n AlJ@ 8sNBe"@ej0v`@d/`V0s__TFScPm!#468@8|&7~uC0;1@2E$P!96@U&R2rq$U ?.T$V 3%u`-z rr1b@CgRuuL@rSS,"N7S3DQeP"ZON"mu `u`0@d 0u 0u`2&\#&io(wb\._?OKA2eOaZe59,UHq%t wphda`kw1TP"(syT_C3UJb pp#xU`{br3{P@"WaCJ(FaShZRCJRa)`IR`@3As0;E @Gex@}hP" "P"D7D2&#nRdvx(s9Ohz?%;%uC %@P&BpdB;AaA}Rb H!4}m9a;9a^Esp"\\񺁺2*2 B(esh15s `@n%` M0u@@At005`nP - PTjBaAt8@l@TTr@@)iJ@e@SS.@Q@t11"15a+'y"}"|(Q?٣tQatRE7Z5!va]٣6Gb``֑gW!!Nq "6%^5 S,@٢"^Efz٣WuC&{U55B0k-kW37,@-@7`P8pP9,@A-@3@C4`P3kA@D}E^ U6 @NqT\aPZZe(@3YP@%i T;g5ə6^Zh\7s''.Tf]u0nʕZQ"wR BWh|1`/ۥGd UHD +D# h0T@dYY'4EU@L&d2?Fx(_HE" /%4DU`0;102B$714+ 3`?CopyrigPt (c)@2\09@M@c@o%s @fB Ar @FAua@i @n.@_ Ala@ HUseBe9@e@vw@IdY@4f^P7 &(nh:MQ%%;gz+j )jp` 91 ,^[Q_5:&*TxAc@0!@SQ%7PCT7T UH `uD " ,# J>  h]0JT]]MR/MU@L&d2?FxP0~ϊ߀LUe3EWi{A"?F}Uϰplݶm'm?ܹag\t'qTPTQ{!{!"T&iB$p1TQ-v/u4;u5 &hI_(H*r X: gF9FvL+?#BwkB _H]}aC@+hW SaG]C:"KOh$?`NyPUFDfP h>(/T6D UUA@ ??E P? gU/5 މbobK>>W[g%U @ /p b x0!O>D,px /'> d, d, d'ȉH?/&k?%2  0(`Tcardkey,6}5,0e~0d0r2l2r~0nJ0e0a01s0,0e0u0iUt2a0tPo2z~0t0o0,9w01ng21t0o01l~2m%e^ 0ER*#UGo$^  \1\12\5(;`AF 2d:,A8 C Ju @`z&bxpbx_VUVp_V _QB __)^EѲOOSS<7_I_[_m__Hab oX%_^Z#`/SE2cBUi2l,F0x01,A"ib oobgdaT'_Tq(s 2Q0 "`(:Lm?p kDrag onut he]pe,nrih-wcl%cks e/ifymu NndFh%o1oNtMDi.sb?88ֿJ?#543=<7gfMUG D # h <^T$YYU@?@g]f?@uP} U u` ?#u.b]*f fV.V LV ^*# "&/8$L@/zqU  0?/Ya4ff{@`: "&'ĉ[)C"-@"D"@D""#" ^,'v%u$ `Kb^ f #1!'`?CopyrigPt (wc) 20@9 M0c0o%s0fB1r0-AUa@i0n WAlH@ 8sLBUe @e0v^@d/`V0s_TFScPm!#?468 B4駙7~sC0;1@2E$D!9\#&_((R\.?/-A2򥜡6UR2rq-U?TV 3u`- rr1bY@#Ru,@Urc!c' "B7C7$aED":/."u `u`k" d ֦0 0-u`2ehaMy56: Hzv%t@!{5t!wp@da7k1Tv"s(sT_C`'!e~*b #0e@obr#`@"a #*(2a cZb&#*bA:`)k`R^@1As09E @ex@]hD"D"87(n@uxs(9/Z?;%vsC %@P&BpbB9A_A]bb !4] 5m[M'Rv"#2y2H݁݁2*ĤB ԡas(esh1ԕ5 ``` M0u>@At0 40`nP - PThB_At6@l@TJT#r>@YiH@e@SJSlf6@czFJFH@u @h@RJRk@M"JM#IA;C;K)D^BWJW\_CJC^@wi>@g*@HJH0dTR#$@tB 45݂`An 0l0g0*<(%3`|B*JB6@r@oT*g6@nj@;KAx0myaD0t2 ExQv6B-Ao\CDQHmcoD!}m'Cd\ 8UVy5pΐ _@rEB#wd)Cm0Bnx 8` SPUa @l6@s$B5 0c< c\im2mȆ݁E~#D`Sm#\]` b@ǡܢK ̖yܕ !aA ^@S;C41^>`@QP3Tmow(D&AKA @ycv%C`5y5B`5@RcEaE>uc U !< YcA>?RTZ%QcI`22p211y2!!¢e+E$~R`1ƦaxDL5 3r`K. V~S|!5%y5-/(~U'y5A? tQ$@Qt^2'sb("|!5Ua$U 6jP`W(`^1R`"Y'H|! Uc,@ r|!~UfL W^sC&{5԰5@tFA@A-?@37U-@7P8P9A-@3@CU@4P3?@A@D}НE|!]"AP@¡*d0Fj"N(@MF݁Iw"MP@iFǡE i7qgi:1h8s:1GW'T?`8I1 0]핾 Sat;Eia`MLWyڼUHLD ,# h0JTlEDIAN /MU@? P >tA`  ?A@Ft >]b7%M>uoSu&{>[ >b M%J͜>U2'q ?&?\.?eD!M\#c+ ''>7L. ?N@n&!Pg#; U 2280?@r"?4֪5 $?rg#brkA lu`u`.>p |0e03F7#"' pw1"zt't)"l J MSt)"Nu>`0;1c@!"NGTADL+(A^@ `?CopyW0igXt(c)2d09M@cW0oKs@fBAr@Aa@i@n0 AlP HsRe@eW0v$Pd$2"lJaUl~PA$e1@aRUPAkW"pv9Ap9AD]'  l(b( 2_n3e%Co\onog3eQkVo_o!oJ>rM*cris#i>MAfQkYxY$w'205J!deD+ }[$&"(i ;RQG1LT!@%oY_V_^k  3o v gAOQiT'>TMg!g!ՎT&PUBqpr1M-;u4'u5h5UHD +D# h0T@dYY'4EoU@?AsTpP?@*Iv?P} t`bAtuLJ- m0u.;m$e ^dsubC^eosuz ?- % 7U"s1(ed #H`/ "!/"xnC#Ps$" /%$4DwU`0;102S5B714+ 3`?CopyrigPt (c)@2\09@M@c@os @fB Ar @FAa@i @n.@ Ala@ HseBe*9@e@vw@dY@ f^7 &(Xh:Q%%;z+ p$,^EQ_5T:&TxAc@A0!@jSQ%7|PCuT7}T UHD +D# h0T@dYY'4EoU@?@AsTp?@*Iɻ?P} t`b_ACtC WuL-j 0uk.;u u ?^   ڛ$R ^e7U"1( #H`/ oezou!_euaPjzy/"C#_E " /%4DU`0;102B714+ 3`?CopyrigPt (c)@2\09@M@c@os @fB Ar @FAa@i @n.@ Ala@ HseBe*9@e@vw@dY@4 f^7 &(Xh:Q%%;z+  p%2,^EQ_5T:&TxAc@A0!@jSQ%7|PBuT7T _BHhBr v+N@s FxLT#CcvWZ H]a0ͿOX@+LS=aGx(˫D:,aeO/?ihOUFD# Th,/TYTYBBUA@ ??P?7 uB BbCbWY[KX^:ש @6wDb LXJDL9   BЗ.,.(sU!&H/2ȍH$?$?66?`2w(  0 B`@Keywa0,SPaft\k21pring\B0a8m0c2n0c0l\e21e07s2e"0a0Ac$e!^ CEUYG& R?(WeVVlepooe xZ|Drag onut hepe.uy l/owWdimdr s;z k* y5a+.bP&d2ɿH?_fО#eY6/˘EUGD # h8^T YYJEU@eY?@/˘?@L&d2?@J?]P} `. N  $(U(+G*G (e$$y(Bju`h?Fu[ b%"&I "!"yU}  "b8#֒K#!$X*`?CopyrigPtg(c)g20 9gM c os f"!r !a i n  gAl0 (s 2e e v0d /`V sW_T E cPm!#5 '46X07$AK' L@$y'J12h2K$5~1R|K#??FB#?\.?h$p123[B8WB2qZ@E?DF @12u0`Ձ-0rwr[2i 1Bu] @rZS"S2A3Q:Re22ShB@s7|# Җ'Hhe/!^=U"+U Hg!daQ/'c_BTFFB")a{"jjh2PP2AA211 QQP?(ua`hN!St ;!sra@ ;!)v.v-~0W'u/u 9;!11Bho{\s!" w9"a{%d@;_r?s@S~jbuvurP'@At_CFeh513Ts p(0se5lr z" ;!1}\slcwt !$x6 >a&lu\sp1fv 6hev5Cj?:R`H7:QtR,tQ”vE`a13Ku4ZQ5F"  (ebSe,@B"pJlh5qmc  B"?(PqxؤN&BQj@[eRJ""Uf p1Uebr3P@j1UT0x"B0o kgP"i& @6y2_gi__8F N$7'F!T0?N 6$9qWF7BHa]DüIA.#HD "-#=h(>T"7 31AU@I&d2?K@J P6 t`  ?Qt# U  l>ƍuQjbb*AluQ b TJU5 L@RGG +$?B& !AbH$5 `C`?CopyrigTt(wc)20 9M c o%s f"!r !ua i n._ Al0 (Us2e e v'0d 0l>4 >UhG1G1@ b!g% Ca4Ը0J}@xRW5 n7o}WSgheof#_TG1M[Q[Q3AARAA2AAj2AA1 JCc8C|@B$%F C0[C,u3F=tReJLsp 61 !@b?2CS,uUd6LsxAXP]pZUN@@ [wM0q5I=t_jZKt* US@5]qH{j5 uKt!X-trc=tDKtC#rC#:S,u+rGfTf A7`G2ciAV1e =MX'2q7B&߁!0% h_HAs )N<r(ORFzLk#gmwB En}]a,1o@+ؔWoBaGoo-PXu PUFD  h> T6D  UA??I43P?<N (^.hh;1 B_1 BsYE V B1 B1VHBU  0B`USB,>\key e^ *%)sU-G/ !L??--B?wpwpp wwpTwp{ xp} wwwwpwwwwwwpww}wOX\Drag onut hep adn"y aex.ubۿk??ؿt 37??UGD" # Q h <T$YY^Q#[U@??@?Pu` 7?u#m )V@@:D:brbYb _ _ _ _l_&7b&&6@^^rr*&&&&&&2S0267E67Jr< >0ÿ@q0?@2?46drD@U238'uL 2-1m2u .FB,@+I2u`u`"V0!u`lB@h4?"?P2HGI'i2)UP@r5B{36{?V ?2bbvbz1`B#p`S`SY`S`S`S`S`S`S02\V/3]`R02S"`R02\V?3\V@3\V02 \V 02 \V 02 \V 02 \V 02 \V 02\V0#`R`R"ccccjBC2Ub%SCvlXsfsf"s7sBBfRsf Rs4v%"zw%2c03>vc%bb|QPAD BS؄LXb0@R=R 58SyTT3rr5705=U2`?CopyrigPt (c)2U09MۀcـosӀfҁrՀaiӀnU. pl) وUs-eeـv?d!31`Vۀs_,PRXY!cPm!#54162ea, U  Yo8cUHEEEYuEn|! 7pf3PalGTDDi2o8T?Ŀ {1lBQ(-DT! @b0Q:!iA>AbsF?G .A;¬sBt7lBh@ %@( ; lƄ& ǚ ,FǏAǏ>>A#@4rj4$4V5 ;5  bXu\)@%iA6T0VQ0#dAbN5Q h=)`R?sۀ T?x3B@5X|Swrg^p @i2o8P1 ,D,@"΀HD: )&# =h ,>T  9 MU@z?@|*?@2z[?@S㯥һ?P Bt`  B֫?t/$*_PcBVBu ]uk&>     7ʡl  K"K"K"K"K"K"K"K"K"K""K""K""K"""K","K"6"K"@"ZJ}BU0@!bA$0:2@;@;@;R@;@6BA"G 4?6L ?A:4B?116 @0 e@0 03lf0Ht6@@Mt`@MthOt}OtВO tO tݼO tO tO tNOtZh1<M#h$@3pxQxQG`?CopyrigTt (c])P20P9PuMPcPosPIfRQrPQaPiPn.P WAlP XsRUePePv `dPK2l>`p>UhX``  !!7"!,!6!l6AQ!J`3@u3J0`f59]|/շY76A 6%vMBCsU@VL ?@2Rq?_,Tk00ie<}󵍕A|]f>^몖v|1s 5E(?u:UOuN$?@i6?@0|]1>î8y~|݋ yN.ݳ*|81|?[!xOu~0O??@o@?@B`r.?@bp` | e1 |17#st ;]tVޟ R?uOu\HI?@w (?@Gd%QǏ~|p p v~t?jTx?8ry?j;R`s?uKy5ZD~|?5^cQT?@mȽHm}W^;ލ~|C('? vKqy?fיKyCD?@_:?@S б?@M|>LiM|I޷|*C|nª̷|95T|sy(Ոft{d"5~jt?@RPFo˴lC`X|?lP6wxKyvn?@- p+?@"?@V*Τ |x|}?8|ۼr|>znȆa}sʡe l6WUh)pU*xKy3?7B)ߋ|hca!~A|!u?(n6x!~KyӜ|F'n}ܔE:(|[J@k~}£>b6!KyaCl]p?@]p&fC|؋k}@|T}# zx!Kyn@e]p,\p}o{z[1}LƦ%ue2h/e3rJQ 4/31RHD: )&# =h ,>T  9 AU@ BҴ?@v?@Ot@?@WB׽-+?P Bt`  gI?ttX̦1P^<(w\5VBu ]uk`&>   J  ֔J}BU02@@}BbAp U"++++&B]##kM?> ?,b#bb zp /K0c!r  i3b/$hb<M#hT{0]#11G`?CopyrigTt (c)@20%@9@M@c@os @fBAr @DAa@i @n.@ Al_@ HscBe7@e@vu@dW@"l>4Uh8AA  v1AƆ!J\HRq?@e ?`f Z<]|̟fW74Bv1 6oVMBSU@$M8{?@ ^c?@)RPv/k/00=YHqoP\:y8)+]fwґdOV|66U5U ?@D;?@IޖN`Œ_,Q_\Qj\1}P\|[~ll7ju:Vdp-hUaWk?@ ?@.PX?@njo\X_P\{ǿlPL ln:1ͳ-hUBҔ?@,et?@+R?@p"ۙ\|5P\p@lfL>&lǟ6.hD:#Tj&:m:P7AT ~Q[Wذr%n%*g>@dZd:\?-'P\ 5܇nQUYo2z[?@I 0Pu?@&1L͸\t~P\kw[OV d@0J~l_>Ʌ)A`?@5^s!):\ѽEaP\Q,\Q-hY+L>î9?@PT7?@G\?OǑSY?k r d? gnBvd ;OU-hU?c/?@! P5n˿PP]:WlpF,d계v-hY*^@US#R`\I_[[i܄f+D̗D"E2$5U#rAg $"HD: )&# =h ,>T  9 IAU@7B?@V?@CDD  )JBU2U"u?{@L@-lA@kGzQ bbbf!['Q Ma q 6!x+ 3"?['!Zd;O?zQ O#b(q (+&B>?8> ?I-\-m/&6))!~0~0x+ n?=!0f21(U/g k/}//%Pkw/1Zd*0@ u/I 2/B4h<M#h8@ #QQ(@I`?CopyrigTtD (wc)D 20DP9D M0Pc.Po%s(Pf6R'Qr*PcQua6Pi(Pn.D _ Al~P .XUsReVPe.PvPdvP2l> *<>Uh$QQ Q! L` #@{֣p= S?`f ڱa]|msQNIZa7R 6zfMB"!|?5n?@qMbX9ElIB2ٗ[lnHa+Q>?@$?@ҹI +W?@x/ka00Hiav@h[l x§?30_fq_J]|;/daeÿg5?@F1ElF4?a[lN,he6i]?@ &h@L?@Uʿm9?@k@;k%7|nS[lU(8gYzc|^fdy|臐T  9 !AU@p$?@&1?@z?@lq?P Bt`  ݸQ?t48F_snϽ@3 Bu ]uk$M > )J 2Uu?g@@-lA_@Gz) bbob>!3'm) a I !P+ ^!?3'}!\(\?'z) '#07\I a) n!/b'#I J(+&B}BU???%478 ?,b?d4zI&"&$/"**'h<M#hj 30 A AZ I`?CopyrigT}t (c) W20P@9 M<@]c:@os4@fBBR3Ar6@oAaB@i4@n. AUl@ :HsBeb@e:@v@d@ 2l>8>UhkAA +A! L`3@ʫcs1?`f ӥg _|=fY7iQ 6VbMB00e?@(ʡEM\5Yc\TP bQ8)Q@:m MbXI?7@Ȼ'`[kZ007TTI d[cabVfv]~|T(RUQUjtߓ?@p$ôM\ lJFc\ia7d4hUs!iJN[E߻!b]A?]"WU9_KP@x+ٮX%?-o?`P_b_tUhi?V4Nӣvd?(Ђ\UbU2SHU"rAg x4 2HD: )&# =h ,>T  9 5AU@0?@Dx0O?@g w?@V-2?P Bt`  Q??tZ<_-iB-P Bu ]uk.M > X  J 2U"u?@L@-lA@kGz= bbbR!G'= Ma ] "!ݓd+ !?G'!\(\?z= ;#07] u)bE;#] (+&BBU?E> ?" ;#Fb3z-D/**7h31<M#h~ 030!A!An I`?CopyrigT}t0 (c)0 W20d@90 MP@]cN@osH@fVBRGArJ@AaV@iH@n.0 AUl@ NHsBev@eN@v@d@2l>4>UhAA ?A! LZ03&@\?6f] ^~|_ar7Q 6VMBSU@1ߗ'%X ?@3Pkn00`eS ]uTUztĴ _flU_JO]|wӠ*U5U_,?@x&e?@j IS*n_]_lq*kr&dϯ4lma!좷e?@5 ̃]PYKle'Jh9QI_[P@ Ҕ?@$M8oP`_r_k4 4lQt'hU2\XUs3rA #2HD: )&# =h ,>T  9 AU@eZ?@ޖz?@=~j?@T$M8{˻?P Bt`  կy ?tm3%R+T"(o_?y(Bu ]iuk` > P  kJ}BU0@@BbAp "+++R+&B]#TTT?> ?%$FbT I3zp #c!  i3 b/$h0!<M#h{0*]#11G`?CopyrigTt (c)@W20%@9@M@]c@os @fBRAr @DAa@i @n.@ AUl_@ HscBe7@e@vu@dW@R"l>v1L>Uh4v1v1   A!JGz%^I ?`fMe"]|IPc87F 6kVMBSU@_,?@K~j%k/009Yt&c~K]ݾaQ`f~KV|x 3eaQU5UnFoZ?@1bDDDľ rhsҿ_\kطAL\!~1P?@* ?@rh휾_]( jL\Ih\˼hԯ3q"9X(iYEaS?@<RqC?@@ tj\DQ#[L\Rϻ%\KL~l r?UY-yP|-kuL``X9?@c\_9pUTTąr\V,ʸldCy^)dDn%}C?@7[~L\}ɩ&_8_J_\VE2%7#rA $"HD: )&# =h ,>T  9 IAU@Cl?@lK~d?@D )J 2U"e?@@-?A@GzQ bbbf!['6Q a q u6!x+ !?['!\(\?zQ O#07q ) !bO#q 8 ; 6B>BUPX`G ?Y> ?,bPbXb`z&2 6$//?22h<M#h D35A5A I`?CopyrigTtD (c])D 20x@9D uMd@cb@os\@IfjB[Ar^@Aaj@i\@n.D WAl@ bHsBUe@eb@v@d@22l> UhaAA(  SA1 LM1֣p= ?@aW?`fO]|=g*71 6VMBSU@gcDwP"?@a} wPT}xk00~Ye0 84 Zsy]f7~kP1f|rIr8"!4?@M8Y~\c5(]QYy>tdQU9Zd;ߛ?@3 P?@7a ol!۶mݗ\f7^?KHlyyrfdh-D_'theҔ_?@Hz\w\;hY>îW?{@b"?@Tc?@K7AljJ\qHl/ܼ^ljth/QiX%`S\_DGfT&ÓQUYvN贁`SQF(?@?@elCvCTW=1fPdyoaiC!1awe7Qiq___Vwe;U22;rA 422HD: )&# =h ,>T  9 IAU@NC?@هϰ+?@ kJ 2U"e?@@-?A@GzQ bbbf!Z['Q aq 6!x+ ^!?['!\(\?'zQ O#07\q ) !/bO#q J8 ; 6B}BUPX`G ?Y> ?,bPbX?b`z&2  6$//?22!h<FM#h D35A5A I`?CopyrigTtD (c)D 20x@9D Md@cb@os\@fjB[Ar^@Aaj@i\@n.D Al@ bHsBe@eb@v@dV@22l> UhaAAP  `SA1 LM1֣p= ?@ T}?`fO]|K741 6VMBSU@gcDwPRq3?@a}W wP|Qxk00Ye0 84 Z]}]f7~kP1f|ʟrᮾIr8"!4?@UM8Y\c5(]QYߝ>tdQU9Zd;ߛ?@i6P?@7Կo2z; ol!۶m۶\SWBN~HlyyrfdM:theҔ_?@Bq\w\0hY>îW?@[!?@Tc?@[ljJ\@qHl/ܼ^l?th/Q~iX%`xS\?_DGfT?䣓QUYvN贁`&{ͅ?@?@N?(el?CvCT?iM1fPd?yoai?RR,1awe7Qiq___Vwe;U22rA 3422HD: )&# =h ,>T  9 IAU@MoZS?@^s?@D?@ rhǻ?P Bt`  #Yt?t\_gY~DWBu ]uk8M>  J}BUe0@@BbA 6"<+<+J<+<+<&B #TTTk?. /?5%>$F7bT#z ?#!<  3 Ob l/$h!<M#h+0 #p11G`?CopyrigT}t (c) W2009 M0]c0os0f2R1r01a0i0n. AUl@ 8sBe0e0v%@d@"l> <>Uh$EAEA 1[6!J` #@Կ?@\-kuL`?`fs ]|vuL7A 6 VMB"!J`YcOO p.w 9 Q8A&J~?@$!i?@ ̓k 07TT< KQtJ]f݇[@a]|PR+~HUyT)Q:DRvN[/?PirTD zWQ77B-?@DvGj 2׶_PPYPZaI<9t\SϏ׷f dh2 +Za|UziK7A`?@e贁Я?@KP3pz?!iJ)Ҩol#\L?ڔ>BY]l|3dm#?`\gyXziN@^`D淪Ol\_KY`}(iy'yIhE2E:6#rA D$"HD: )&# T=h0>Th]]> AU@<^I B?@E?@r A?@PV_L?>Bu `u b]u  -B :mBu` 7?Vuae"B@0Sn7>JBU2U?yBt A1Uh5= A YBJ1eVP7^2 "BU[THD: )&# =h ,>T  9 MU@K?@PT?@2z[?@S㯥һ?P Bt`  B֫?t/$X_ pBVBu ]Suk7&A7     7ʡl  K"K"K"K"K"K"K"K"K"K""K""K""K"""K","K"6"K"@"JBU2U2?<@L- A@:2t D=t7D8!bq:2;6:2t0D= tͮ:By339 ? "+br9;?7h1<M#h 3AAG`?Cop0rigTt (wc)@20@9@M@c@o%s@fBAr@Qua@i@n.@_ Al#P HUs'Re@e@v9P-dP2l>`p>UhX``  !!%,!6!l:1A!J`3@ߌޖu3J?XRf59]|/Y7:1M 6SfMBqcU@VL䥛 ?@2Rqǒ_,Tk@0!ie<3m󵍯A|]f_>^3f|?1s 5E(mehE}eⴁN$?@i6?@0|]12p>î8ާol7iN.ݳ*l81l?[!x}e~0O??@o@?@B`r.?;@``Nl e1 4l?17#vd? ;vt?Vޟ Rme}e\HI?@w (?@Gd%pQlp p fLiM4lIl*ClnjmeyiT}?@v/?@:#uMbꟹl3E]lhtqC,r=meU@k.$?@<a?@\`fH@5k5'>ªl9ߠ5Tիlsy_(ՈftTd~jt?@RPFo˴lC`X4llP6wxyivn?@- -p+?@p"?@V*Τ8lx4l}?8lrl>zQs߃ʡe l6WUhWpU*xyiƻ3??7B)0Ϲlhca!~AlOu(n6x!yilF'n3mܔE:(l[J@km£>76!yiaCl`?@*Ћ`&fql؋k3m@lTm# zx@!yin@eً`,\p韸m-o|?kj[1m?LƦSU2e$CrxA 32D]#2H?G_,bs KX LHF8|Lz#ȃ{w B 4|Dd2xo@+HWzo-sGgooJP4 %*ط,2^Cؼ`|$c'fǯRhiHkPsUFD  h> T6D BU??? 5?Fx<F BP(?3P?|; (^-  0B`WWirefaum,\UIusriWntfc%poy]mckp]AplctoWbedac "scPb e^ F% !--H? 1߃[*/A+*&??s333?3}?33?3333?33?3y3?3b BP(![??=MҶ-@v?J?FѪU& }I&t\7?<$UHLD &# T>h uA`} ? / '"*'">'"R'"f'"z'"'"O&'"'"Dw&&'"" "#u)J݀>U2UZ2?YYYF9>u M@>bP0č1|2Y632b9 !3NS3b2b2J5?> ?i[>10G%` LineCoklP@rzۍ0ˣ"0fCz>0bT܍023aAfOxF45񁠕lI %=G _S$CB J30AcQQ#WG}2`?OApyV@igXt (c)P20P9PMH@cV@oKsP@fROArPUAaPiP@n..P AR@lPWUsRePeV@vL@dPP1>`VH@s_@RXYPcXm!/#5P4-bP\S4ǍqS`?RodfJ3A% |doJ1Bx@ +?@\c@J@{3u3-1 "29 A)"0s2=Bt"5ru=qC!7,4 6=@1 \7W7QlJ]DUl8qqaX!pQAcG8aa.b?@*Z $:s C$I EyP?4Nri<~˜?@)` ]WmƠVHdfwNZ#?FՁ݀?@ih ~?C{A,A#]ƪ$tnL/D -3!22 ]Urf%v˥k![= TU `҅2?F>Tk_JY JRЈ,~J ?ek*ԴƐ^͛CerN?H[IMLl۶Ռ𞤹Erl9&?@w^y-\ Ҡ(P  MT[Q?@EIc?FZbΦ uEJp 7ls͡Ey" Tў|XqίƐ䮄rú #L:>+=rkϖ;\$F>Fu`j_>` {cPEy"u'-SySȯ#~!%1}h~D z;FX7kocFQϧʔ tApt4pD"4HZ~?-ɻk۶n#Q6Yѡk?Fw0Q/WJl~~JtYo*#wϬƐzEr\[y?@3J8&U ;]?@OY=b?FP0D?@-˗gJ  ǿ ؈e6ÈYئxq!y?8~^iƐ{.UF#o?@'JپV~?@9 L y쓔8{(-.ڞz)8aEy`;ӄ ˇ4 &e b˶ +,ZՕ$I 2BTSeT/ura$ߓq?c }&U?F+$ֵ\/MpO O{L_qg),qTq$oxaɾx%T(Ydvbphb,smcT aa``%`Ico#nareRctK,@bhq-vQdەv땙x\%BP( [˳?ѤF` T6D jBU??Fx<F BP(?3P|$s h.̝  0B`JWirefam,`IusrinUtfcpIoymWckpAWplct]o,C"tl,\cPe%vnke^ 6% 7G r?{&A2 &   wwޅ튭yz>- +zyGjh~vPDrawg nd]dopo%t eachvn.bÿ?qĿr??L!;?UGD  3 h0TdYY U@L @??Pn} u{` ?SuLY.X.5H5\$*44*HH\\ )&UL0U"?LLLFVbmB%` LineColV mrzٓ9 l#zbwHB#V?##_#&& "3"n!<.FN lX U/g/@b<|*$Y#6*'ت#1p!' "`?m1wpyt0igPt (cu)@2\09@uMN ct0osn0IfBm1r@s1a@in0n.@ UAh2 Hs]Be1@et0vR dQ@ 1`VN s_60RXYQ@cPm!#5>#@213!!e1a 6D v #YHATH[%T\Wg%&fat_T-T!P,@1 3EHD # =h4>T]]9 !AU@?һ P6 mu` o?u# t  )t72R(L$A  J EJU2q?SL{Nb/@A*  /_b/!8<#3"b;$3"% ]+]&PhI 9?2<?& ?A"t/0 $6117;{`?Copy_rigTt:(c):2U04@9:M @c@os@f&BAr@SAa&@i@n}.: Aln@U HsrBeF@e@Ev@df@0l. Uh #@7 A#!3! J_ |!#FR9 hVwRiAlPev59 -X'W'0UbZ>!<0 V*Bs8!ervrH4EŕHD # =h4>T]]9 !AU@m۶?@êPn6 u{` ?u# t lȸ )t72yL3R)L $]A  J EuJU07?A3{`Backgrou_ndC) l) rz@@ -bAE&bQ$& !R(^&񁠍hI 99<?& ?Ag"t)NC*Q)^&I" ^&!\#{u1bZ @!&0%B=?" !33P2]"1P17;#?2!py' i% ht(! )2U0O@9M;@c'"%s) fAB2!r5@8!uaA@i) n. A5 l:GsBea@e' v@dȁ@0l Uh #7 *Ah#!I!Ja,_ab2!aRO VRiW5ie59 -X'W'0U7b;4%0 V*B~8!e?'IVH*qߕHD # =h4>T]]JU@۶mۯ?@$_I$Inq?P6 u` ?iu# t +T)t764_τ?)2W3X)L >REJ5 {`?CopyrigTt(c)20 9Mcosf"r-!a in. AlH sL"e ev^ d@ K2q ?=Lzb#z@ -b A!&#" ( !(&*?4?F6?\.? E q?F 7w`B6 ck*!u> d*"l," I۽ #b= S? %B?[MZD66\ 2"&J" :l>(>Uh5=#WA!JVC$w(&Vp:~AYii@\ZSQmU3&Vھq>_fTYdf`jTiAleI =M!X,GG_'0Ub;\$0 FJjB8e.F[r _BHCך Z95"MK` F~L:#w)B GCe3o@+XXWo3sG_o~(1X&&PUFD  h> T6D BU?VU??X?Fx<F B/P(?3P?|S (֝:  0B`OWirefam,\U]Iusr_intfcpoytmckupAplctoc*sdu$Xe^ ,% -H s Q4B?Z[?3w?? ?3{ D>3??3@3?w3w3?bp= ף BP(??N贁N/KҶ??]VU53{5U?S5?UH dD  &# J>h cBHu$RYYYwww?? [b@G%` LineCol rzۖ "/#z} bT k E"wSuP` F l .` x/2!%B!B%bK# !w!#Q"W+["$h2nU?F^Al'غ(L"2dq6(&2 !B'0A!'l"`?!py igpt(c)20K@9M c os f=B!r1@!a=@i n. A{" 5HsBe]@eJ v d}@ 1w`V s__RXY}@cpm!#5O@183!t\k4q@#?O VAT!T8_!bA  ?@S @.3uL5-M!H[N SQ(BRuQSf1,bw"la0UaE@7&A!NSQQr?Fy?4FPHS ؉q[YQ!̟)(n! ;x35e?zG]N wq||b5~,>u4or2dooooooRvnk} B;x<)+= ؒ{|$HC,Pbʏ܏n\.岇 iW.THZb3x;|@d_1HϬlρb¯ԯ ~ϵsJP?&d2cu 33ʿ3ϳW$ϧ|\{Md߈b*IyfWom6E_p-oh qo y 'v+oBOo.?oX@ogM . T Y-**RT-c-phwTLᔁ 2`MR`Iconȩ|\%,@RLqM T@Q(HY-*5\rju?&v&vw<ɵiMࢁ&*Xd'2q?J1!t' *jMଡ*&*{A-;!32-æe&uHd\s ;S5rN|_AOFL4#C JB O3]@eko+X]Ka >PUFD  h> T6D BU?xͅ)U??Fx<F B_P(?3P?| (^-҅  0B`TWirefam,\UIusrintfcpoymckpAplctoTTob)rMn%nwae^ @% -H u]? |??!??52+*!~?}u3 33?333?33?χ 33?3333?(u3 3? xxb BP(봿?/KҶ?as!iJU_5>?-kU?BEUHLD &H# >h U@ͅ)U?@|F BP(P p J J>uA` ?.&K*0u:J2U?YYYF9>u8M@->bqb !N ?G. ~?[>G%` LineColژ rz"#zbOTf!/&A%lI D="7_c3l#"A1c1k7G`?!py igXt (c)020090M c oKs f2!r0!a0i n..0 A l07Us,Be@e v d @a01>`V s_` RXY @cXm!#50968`1\nq@?OFA<;DOJQ @C@J@u-0zLC!6 fSz"*}RCuQ#it;~B!T<7_JQlJ!JUl-a-aa) & !1da%1C1Q ZšX'.SrXY0{_Bv߇Ne> -?FI:lw(l*3J;x5e"4?Fiwpk>PlL1+ruH5e֛u7pF*tkOcrYIvu5eGylt A_yQuEe3̼6?EeunAv6C,þ¦|E&Lώ06dOa|e2Vo8Qo ya~i_k9]{WΪikuԎI݄Xu*DyD┖ua"TgASuaiP=7_vʩEeF[15GNvMs!iB5}/p^;jF?M~1sqeueoo"dA-o?oQocouooooo Pv׶?FZR6l!hՇ'Ǻyv;t=o Q< ڎCX>m"Qgb-jB?觪Sښ̙p5. P*y?FgyY߫ڹ!`&h8kl@Ew@z?_,ԙ6ﭪ8H5=st?Fib΄q$ڬ& ~h̢fܝ@d+):ayDo?},.ڈ5ա44H?rѕՋLF>L??!<vYI܏>I?O]J9VkIOq(2z?dOPQ?O(O+B.jTOz,ՌP8 /;_|lo"(//7jT_+BA4q`?֥+To(V/%/rdA?+Bu ?z,-?SDoB;M֕q zTI/+K_2n.?ar4oA $}6éT)XɅ4Xr4۵dF':H@Xf[ٚ&H鏈ʄ)+Byd*40r^;j|[_m_gBYt_ , ۱T ۵&ѡȑT%$Fү.x%("4FyէQ,ύ)ϟew=/O/a/s?'?9???O%YkOOOa5__9E?QuoooUM_)U%7%7Ie!u ՟q~u\g!d!n/$$-$#Ti!r`Icon!%Z ,@"(qhԟe'\* OBPNA5@6"<5i7i=?O? U7'2qC@߬!褿0q. 6BjN?? e{+GAO!3B2V #[FF/_M5_H8Rכ t(.$IFHL߮E#!w B 4@e kp+^Y=B@+ PUFDS  h> T6D BU?PU?Fx<F BP(?3P?|; (^-  0B`_Wirefaum,\UIusriWntfc%poy]mckp]Aplctofrsdaog$ %)!cH"be^ V% --H n????&?2x762F' 3333G333?v?3\?wq<3 ??133 @H}?bl?rUUUWſV??Ǫo??hۏ\5JF?UEUHTD &# J>hZ8JT EI/UU@l?rUUU?@VP ZnM Q $U(83A(L3(`3(t3(@3(3 %(3Q ) 3G[" ) 3#("G#<"[#P"3 NuQ`x?d "."B"V"j"~"""&&&&(&"(&6(&J(&^(4"d"j"uUt)#NU0WUU?F|Q QN CYYYKBED`H?"NbDz@bTܙ@@BOG)R CADB`?Copyrig`tw(c)w20&P9wMPc.Pos PfR Qr PEQaPi Pn.w Al`P XsdRe8PePvvPdXP1N`VPs}_s@RXYXP]c`m! @5*P3P2QV\ C2qP?H_VCAv%xQTo@@S@N+uLr M-Aj"e9} p"/bc=td"buac74x=D! Cb`&1Bt5M qliaUt(@QUmq)c8ia Sc#Pj"c e~ |!i49H?dXQ?@dȽB?@1W4p?@_- )s@,Up}?ZdTj"p#K`'>dT3q!2 UEhg ?@e?@ɱveCzL5b?h* Ќ>daVhvU-?@}HvUg"UYD ~Ev}Ќ1?mb Ÿ6ehBB-kYd;?@A<uFu~+Mb>nlЌTzU)uhT}x?@^sZTO[Ā6RگWdWj4? yUs4Č=fADV\h#?@QB.żd~EX6 ?߃Q= XL(L(\E~h*X9v۲VD1Vd~\uh#@65XvrQ9(/4eOӉ3'U}UAߌqQ0wt)u4g~؇ϰ+@VB)(sAd?|9H<vvr!z\&E@e#Xj -Y 06eRvt|@pBU`JB)@?.`jfDy?@$$5UjekC2؄h FOAPQѐa jf(`{H:$7-؄_>&h/`@@+rh@p)gG/Bt)߾`Z|Rw6// L`9P71z4 ?\/n/-<؄?QM^>7Fy?;qqNVTڷ1)S?` AI*U(T㥛D?@,SDS?;>BQ7?WxfȩU T6D BU?NT??`U??I?3P|' (^-  0B`YWirefaum,\UIusriWntfc%poy]mckp]AplctoWbedac"ssrpHbe^H J% 7U-H~ ptv$8?@K K^NC??3?x3?3~333?~?3?3  ?@8;?zb|Gz??{pܑ֡`M<+ ??迏(\37?z?UH XD  &# U>h < T$mmJ\zU@?@< ףp=?@?A{Gz?P  C  Q 6 U:JEA:^E]:rE:Em:EE:E ru` ?ΡE,2@Th|FuQUYYY"?. ? "b03z @bT;0;0uH1S000AH1#2q0?"#[+5;0G%` LineCol0rVz;0*10?2B6F3a4F9628g1G7 !F3 &#xX R=nGkC!3MB B">\0AAG+2`?1py0igdt (c).P20:P9.PM0c0os0f,R1r P1a,Pi0n..P KA0l.P%WsxRUeLPe0v0dlP@1`V0s_0RYlPcdm!#5J>P5:P2AZ \:?_V#k0d'ora1B0 ?@3vr@L2uLT5)-H1)MA(P0 @c-MBtbua?LC'$->ALG "x2lUx0BPQnD=c(`Sd/U ?!fc #:!o vv \x"u8?8YF JbF,>bHSdg$Hd?An?@J}?A&Rp s%0=)C}̣_ |ɣv?ʣd.qǷ#" |?dr;$E̅]7'3?A Giе?@^h&?AT8[A@߅" 4 KܒJSB$|۷`rH̅~fw#o?@oQPzPu?A3f ;bF4,nJz?\dP1#5Z̅zh2rs,|j񾣏d7 DJLu%BiJeu?'& jgdؐ?A~yiC\H?Ai>[zuཹ.{dqQu;̅S7?A%$?@5 ,~ ?A0Y-0+L4<,Ji$ʇm`,4>Y?AݸߎyvVE32s;J?Em`ՆߘBoqz?MVУuc a2rq `36:?G2S+I[qqQQ|tyueEyuE/,9lAA𼚙3dm۶mŸmht  ѻqqU);M_qn?Aũ4gˢ$I$It.?(/f۹?(\qqB!4FXj|ns#*Ѧ?Aw'ձ3*t"C»qqQ?Qcun\N*f"O//zpgGd%U!TU!c?k+HrTq4B`2}"uH3TAaAh^PASI2PiQT1}%3d,%@28aq:tq`]4EyxLHTYdahwCC<b 1ui(2ODON|RxXFwG7'T?M}Eqbѿ}pFRxjOON|Rx{Fw!f_!2~eV03PVV$o]}e_vHds HأKw@ FL߮G#H%wB U5@eTko+/oNa PUFD  h> T6D jBU???]Fx<F BP(?R3P|$ (^-S.:  0B`UWirefam,\U]Iusr_intfcpoytmckupAplctoT*o brMncpHle^( L% AG Ln 3 ; $3 . c.G.?σ\?3q?{u?3333Մ33?33? 33?q733??v3 333L`353?730bL&d2빿?JXߢ=tݻ?W# |5?BP( {^?M?UG DF P# h @T(PYY# UF BP(?@ ?FL&d2P} u`?Mu#8 >D ^B:DDXfqfl]}bz[@$!"d$2U"?YYYF= uuj!b| !"&#" b) !y#"񉆕#V'?36 ?/"[ԉ![%` LineColj0r["ٓ 3z[bT["(t&=/-2 3P3"/# A>A17"`?1py0igPt gc)u@20@9u@M0c0os0fsB1rg@1as@i0n.u@ KA0lu@lGsBUe@e0v0d@01`V0s_D0RXY@cPm #5@96P0t\$q.P?-_?V?\.?A!P0@7S]^@#uL%-!$/! $/"9fbu!a5ceOav% BZ,0o Hl((auQ9sDYhmgdaTkTAJt"PYr3 (DZqFp>A>AD4`@1CIy@ATgq3Cs,@rxqoo&TVV4<c%nirX7'?T K$Q10 ~!jV{73?!3 4ޟWUHxuD4B &# 4  >hQ(TP4 QJUF x^ъ?FG&d2? FAP t`  7?$tM.5*VW;D;YVu+ 5uE#0W-U?F YYY)">(?"bk$z@boTw w Ef"l/~'!!s`?Copyrigt c) 2009 M c oKs f"!r #1a i n. Al>0 (sB2e0e vZT0d60B( p_A{kUlJY0UYYO!M}@7?FW[&4g;i(EqeEO2ODL[N?ksOO AHHNO'OOKE5OmO-_O1Q??????__xsgˏRu -c[BϡP(İŸ 噖透W ?vECsaõJݤRC2 rqiR"Vaq8`'Saqau)K=Ofxzk sY}yT mCECK@b1ew R&J\\VECeBs&a%DMA6O]j>mtW6 /_HQכ "tD9UcFLuH#C'B `6@elko+ȯoQBG$DPhPUFD  h> T6D BU?zͅ)U??@U?Fx<F BP(?3P?|; (^-  0B`WWirefaum,\UIusriWntfc%poy]mckp]AplctoWbedac"snb e^ F% !--Hu ,33? 3?43 3?O3 ?O:KO3 <l vZWOW?63?b BP(??gXKҶtN?l?s*"iJUt"&\-kU?<$UHPD  &# R>h -P~Q("=Y*ǪePf̤Ob;aXY!]!䟿 x! !˿ïxƕ& łߛ:iX "cϝF>8/!Fϖ˦IQQVрxe A!@z0B?@9 {ޜ?:߉TGaƶȸ; &)א?F v?@cB/ڥ>J  :ͅ10uy嵼_TШߠ>ǯ?OY&ߛZ0L7+',ݠ{D?@.AΙ?Fٵ{^\>Y_Od;f܄-؉؅BSj쵼}(杻^eƕ Z,-F°u?F~vn& ?8V Y⻣E腬\)]D4q&,ex"ƕ2ߛ!]ybՠl ?F~^P$! &3'vb'n:?-!l~"™.BKB$'ߛ!0g/y$zE KU*dPFVT|?@^e%Y!sDq~5߽j֧>b 92/*™njA4ߛus/f܊6՜ Y=5F!'+!<2!_ o?ƕyeߛa/j -?Qcy#Fc瓞(qȞ؝(MH/|F%kJ= EW/{颙|ӂُ޶<} z??@z` G'13E :Z #3 Q'p蟢OL Fqۛ֟mTfFҽ@  Kҏ/9&&!Ԣ8/QXs/kG/#/?6Ų/?q '?@6 F?@59(W??k`S6VJ?_?1cTOQoR-O?O@\D;Gk1}jO"ߨS\ROnOOO c_R\/K_r˻tV&sK_@` ik _o7o6||mo+p^>fee1ѠoXxhxo*Dk>k .0QSqgtqqTqa@ɧx|T(-%TSЮmrq|`Icon,%@qtq@[y(ix\JEBP?@Рn<$i0/B/|xX*'2q60?d!0d &xjt//|ʧx{7Ai?!322i6N66"O=_vH?  cs La{POJMN FHLH#(m& wB 6]@eko+8/]]a%'!< PUFD  h> T6D BU? qq?F~?Fx<F BP(?3P?|8 (^-L.A.  0B`ZWire]fam,t]Iusr_intfcpoytmckupAplcto,UC"tl,\R !s-!bx,"]gid,"c&"%une^ j% -Gu ?&4wOYLwu YNJYdFDrag on]t pead lis bx.bi߿ǜ!?? mF1cN??U`X׿ Ta*ʼy0OໆMÌ?UGHD # =h @>T(]]9 #B 9AU@//:&RSS2 `UO;B` *UW<0iu hp8"COaQa$xboOM.{_Z #1AbS\qntuv]MZ r5L% VrC\9sT?vM?\.5?AB{*9s8AG;C?ap0rf kc)o20{9oM 2x!sy fmar` a7.o AB esel2rKv<0d@1cV0s_PR0YcTm!#52O17ABhLK`4! ;@߁T!A1 u.wV#S/J`!*! !(rB s9MH aO3_xݧOe_ǯ O0A>ObrA]Ncg eTqM Bqq3b@AArA=qqEKKGA8Ad -{ĢqJT3@AEϩɈ3~88x‰@AcQsp2BeGedق'9K߉uma{؇ŬzCoӀ#2` Y oxoi0e{ASClG2TT|?ބmbw3>(ۅA:L E By!ņ,iFbe9 ^ E 'BF!I$1 }9B4` am`l<0x1q7"^3` Al0ib"bP3B2`K TtoBqv[#VVB<s,Z - E:.-xTBjnVqDF/!32"Dn/Ue&A9ƙU-`lp'$)+E$}a'HD # =h8>TB ]]9 #C"AUFi?Fǜ!??@}P6 u`lo?u#o`bt v 1r:Aw%  b ul`A@0t! %$2AZD]3JH +@ uL u I.g  t-^"t-_k"'&!&'ZMC P3K#9A@##2Mz2q0|?(9!L+%g68!t04e2V32Fll<Y3c6c6P L"A4 D|jAgU<` Fil@wCo@or1yglgAg,[bCzOE3vFV6OIbOO).O F KF2\3B\:?WURWM?\.? lfo:5%P1P1X7;`?Apy@igTtk(c)k20`9kM)Pc.@os@fqbAre`Aaq`i@n.k A+R ihsbe`e@vB`d`7;B>0>UhhE5GxZai#U+q2)w:J`6At ~/rSD  8rM(1 E/u3~%I'u5u@wvNru7E2p,;2rA 72 cgthe>{ErT sMAA";HA3蚔"*7BJ3l@aI[c6M`+H`a`i`(H.B%Qll@Ӟq2p 8BD' "3h:ȟڗB@tq`om5k٘7EϯA28keCunk1piAPAe59 D9XL7W'c6H1L!a W jHZŕ{7]Oc6!3XX43'ÕHD: # ;h0>Th]]9 # AUFi?FL}?FL&d2?F BP(Pn u` ?u#`bu~A@!)1t t W>5JWU I?#MBhM@8V0Uq"@L#b$7"&lVVV'<H Ae!9Q+!.` Fil"0Con"0orz9A9#7bG3z9.9Z9suN]3d3$'8+ H!e2 &M ;s`?'1py.0igTtf(c)f2`09fM 0c.0oKs(0fB'1r@-1a@i(0n.Rf h #1 Hs^BUe2@e.0v0dR@P#"0?],>Uh  1j2U+#p 8Ab j0#J`FVBQ7 fRM@B!&_F_BQ|U%[_ _Q|U2_;"KrA "[cCUHLuD  T>h0JTlaaDM >UFi?Fǜ!?FL&d2?@}?P >u~A`bSu >uA@taB>t  KAV$My $yJ== ?yQzNz2q |?@9 ! 7>&>(t05 U55"bf ?"R5"t&B~GKP M#LC] >@w(D:'5"t!6loG``?CopyrigXt (c) 2d09 M0c0os0f21r01a0i0n. Al0 8s2e0e0v0dH0HlJa,JUlK  1z/+!p 8Mb 7w Q $?FUѼ >$?&`$?CA?ARB9?\.¾OIY@5"r"4Q)5G?FB?n_bX_CHOGFV2OSU5"wSHqZ GAa$Bp;O Q]I+5#r& ~SI`!&_5BbZa W   r4QQUoi pY }X3:0}ogT$ ]4ZD xu}oa_BH ך A]kEJj FL .f#po/Q d]@a70@{+U^aGh`؜7`< a@UFD<  h> T6D jBU???Fx<F BP(?R3P|$ (^VB7@E G 0B`fWirefam,`IusrintfcpoymckpAplcto,C"tl,\s!bx"im"cr"u! gWid:$elke^ n%J 7GuE 7 #(hDrawg nd]dopit l s. bxdiem.b࿏x#J NAUFx!ApAV0g(gu`m u#>aa "=4>|-*/!"V"kL]w$A,% @$/@6 "bfA1u{$ffV"7" -V;V"$ -w#2q@|?Oå21r+`#b.A0]n2M@[=ZEA.` LineCol@Mrz=8lobCz=>AIYD"f2fA[B\ J?OV!#?\.?A/@h-/'"HuYT?VHAafFMEA:uABPckg@ou}@dOGRDOA0H)Ff2 JJ.+R cBu2#B0@|!"Saag;2`]?apy`iPht (P])p20#p9pUM{@cRs`frRar paapi`wn.p A`lpwsare5peJ`v@dUp`1w`V{@s_P_RXYUpcTm!#5'p218ae>b%9 %%@&R(bT@xp P7heS5 WT!"=An""`!`!"aa"ɄXRR{{"MAABAb CA kDAEEA'u"v!f1(EFG'?a'G3 `7v @ " pa3n"An%]~vw,prS*䒐=L%v#TaapG rda~=%>QJ{=M@ (%c\=r'b7 *)JC\B7b-B5A` 3o%f}>XU=U*\?M@QʺJ4r8rc0kFL&d2ѿu2Ϙ d#AQQ ~#0=FXSA|G>Em˩|ώϠ=/QTظQΓc"qNO:@Z% <<,@qeU"Kq_/! P#}j<:#>!%.(:J'BH7ek{BvBa۷`AMIjMpltAtA!"n"::""䑀"Ԃ**Ⴉ(Aeaeb;^`DAp&tKrqe޲*n%s)8\`c`&LnPQbM~q !72vRHߒg@~Naf #5%VTss ec ;`!u /2/D/V/&Dr/@/ // Օ/?%s PI&MQepLdwrAs ?@E?hh?s&)Rq U>B5OGLG DXUgO?Of&9OO,OO Y!+_=_O_ a_&)wK{__k8 __ Y1#iބtPz]u#b'2qp?[`U1k0f`"r )XAia4ǁ&qU ǁu\re Չ]ea /#ޱ rѸ) 4Mǿ g.5:z?xM(Ġ  HHXp?"~H9u4YsڏJ !2ut -fjTŮoo*{1d vԟ !3%w&xw {)xHDD # =h ,>T  9 #EUF?FxuR`'LR\u'Jޭ0ql?}@Aun`A Wb č5A|/ȿM?k& AnZ4` Ac entCol r3zۼAٖ&#z}bK)אb# / "!%?08<< 3R#R&= e" 3L)u6B117;`?!py iUgTt&( )&W207@9&M#@]c os f)BR!r@!a i wn.& A l&"GsuBeI@e v di@l>R0>UhhRRA^+ p EA ;#J` 7V\b\]QLt2  3~2RMB(A_d/a_]QU5^__%gUE[>PoS_3oU2X1_;rA *cCBB(7erw:GC\!މi8QeE9 MX7#w'2;<4鿫A #v_H ך {5њ4=k\@X+FLVDg#(mEw-B _8i]Ka,85F@-k7IaGhd@ PHkOjPUFD h> ^T6D BU?@m?I?)3P|t h^-\h.8jUUK.Q LE|  0B`[Wirefam,\UIusrNin& !fcpo&)!yt$ m( ck upA$ plcto f( rsdaP ogl/`* e^ %% UGu )7wwwYk} wU{PXDrawg nd]dopo%t e)ail}frm.mb?8?I$k?%5UG HD )# Jh<> T$E>#MEAU@?@Pu` ?u#t&L>"MRbRvRJA `:RDbvAT>&Uc<~# 3#0q ?åߥA?֔nb@Z.` Li eCokl rzۺ Aٺ ,7b#z ߀A)5 lM '3'3qX2l^2A+q2?o5 ? |*&L'F l /B&/ = )N!0! ?2?? >1&^!!A` STad0w?/..?1Ʉ'9 0CJ[:OY1  S3L!# KR'3GV}"hL>4@ #Q Q(W;"`?}Apy@igTt (c)PW20P9PM ]c@os~@fRR}ArPAaPi~@Un@P PA2 XUs6be `e@v d@R1`V s_PRXY@PcTm!#5P3` ABgheTo~ lTd"GAaM"7Aa}"HAqI2IAqD8A qM9HA-qwR-7t (7cQQ SC` }AnPa$ABGqM%Xc$a6r qb #!$?:7v8CCu}%c[2UH?crixD`zDsqɈGqI5cuAz v #%!-Yrap^! ix2k M"GqgE1ix>{ +CR}ARM(MMfO1eaɃŇٌCh*wU@PHCu,ϯe(b7I5 ^!Z%iMtM%it}%iN1eP#LY#"G  ]aU#bS7@$nB %,˵ CUgŊS{dMM AV0|TՀ9 gQ# / (3h Û(7e 6"J(ߌ3ߠ}%jI5CZ~G!A>#!pA^ U cpoo`A" (J`?ͣMcrjȟ!ArƣA? 7 SS1|lΗ\a-T%%Srnt%* iKq` RHbiz Pa"ae9Ńك21Sq pn*A͚PP] 2͚MP`5:naHD: )# T=h8> T ]]A# 5AU}@?2`Uߵ?2 @YA?PBt` l?@[9BtA`1t#-u3;*B bp 7AgbThoiIuBu FA7YJBBUMbAz:"]@B/TlL!a?& ?A}Bbt`BackgroundC l re1UhhE= A$+] L"J`\S}1F7a  6[bMBa#o!9o5cqe}%ooo@jqqe5eoo6o~FE}1X3>2rA 2sISHD: )# T=h8> T ]]A# AU2?2EH, ?2 @2ߚ`?{?P6Bt` bp 7Au l?A@UBAt#@]A)GV47B bXH4RB@RIx3B##5h7A$J'^, d&N" JBB:/zbAz6"]@B/R#G +3?XA6 ?AlBb6X:`BackgrounWdC0l0r!%6b3z968 Hm1526=#M+2 A#C^39AD"#Ma0UlB?åW@ #;H07#B#A#A+G;;`?1py0i0htk(0)k20@9kM*@c2s0fB1r@1a@i0n.k Af0lTkGs;RePe0EvMPd/P!@h/?g1A0>UhhE= A# /J`rS2,f"$#h6aD"  CCc"ZbMBzAo&+%?npe5noogpeEefo,o pe\Ea o,>2rA !@2s_SHD:  )# h4>T]]9 # Mz?AU@?@H6@@@ N??P t`  o?$t# bu! 33?)uLu. @Q]R,Y]A ԛ JA #Q><"l"A*"?(% |?bAdbb$zddSu` Fwil Co orzd` /2%n!n&(Sc+.In&N"T&7DJ#"=1U&" 43E#(9A2;Z"30q}0?åX 6?"A/U!.S'j\"32}3??6\.+?4.OJ>!"3P2J"3B4141<7;`?!py igTt(c)20@9M c os fB!r@!a@i n. T0! HsRe@e v+Pd PgheY_7U+T#T"";AQ2KAQ*3SAA`H+Pa7Pi PCQ C;UQ5QQSFrQ`B t@om;RF;TοB;L+PAEd_l6*SDTjh7Qi5ienY=(9eXZGnW'@G6uWCF!tt nVmEjoo{ZGG6!3:w;x<42v!{ ,A>e>4N -S4i JMq LTA#/cd9"RnQ'D7  pHK"8  p{0am5qa5qdٟaQE!>N"rA N"=Sp"bD a"g*T t eHD:  ;h8>TB ]]>T }AU2U?2EH, ?@2ߚ`?Pu`l?uhp HAA@Ryt! bN]W67<bfI<hGB6Bt Si BUN` dB V>+&%"+&Y+& JBr/IbAzD"]@%j/BUl_uL?6 ?ADt`BackgrounWdCS0lS0r!yDl.%D,b3zj?|5DSu `` Fi_0l[?` ?2e511274"!13B 41CB.FGh#M2 iC%39A%"FH񡅙a0?å< @@G#iAiAqG;`?\1pyQ0iO0htk(K0)k20MP9kM0cJQ2sS0f?R\1r3Pb1a?PiS0n.k A2 7XsRe_PeQ0vPdPg@0/@l>0>UhhE= (Q%!$+1 "J`S9  a G  6N"bM(too$4oceEneweNE>PotdE1o>BrRA g@B S(]e%_\H\!$, Bt&DJIR FLST#h*UUB9]dH8s]@+_j]7aGȨVP\i(5`ZXpchUFD  h> T6D BU?VU??`Fx<F BP(K?3P?| P(^-:  0B`[Wirefam,\U]Iusr_intfcpoytmckupAplctoWbedac"snt$ae^ N% --H n?Plww?w{}}&~~262.&"{sw?@3333?3 3?3? ?3߆ 3? 3DŽ 3%  ?G3 ) B'\T  u3??|?bP#GzBP(??x%0KҶ??#L M\UUUR7&fmF7?<$UH!D &#  J>hZm@~+DqQɼ]ԿD Ξ $6u?b{?Fm%exxϊǾ01EgZ+Iπ8P! ^>2JיĜϥ0d TMipӀQ߼\3Ѥ(q5"4j?FzެhNaCd;._:33#dh6Ih&f;Nv ~?{F5^Vui8j "~Vu6z< 3j?r 86x?FxGND  z'+KY!?Fi޶?@&N?FW 0ׇ=-zLߠe u j7]6K(yr?FćN|^r'"/ݲV6mXƧ~){E3?IOzU$6H8Χ׶፥0SlD Qh~N $J V~5Xo~ñ.C}9=20zTGjϨCA5DU@qs|"+d?@#5%<;QDpߩqD S=j9C_ p%$ij oY|0m6N4zD E4`[OOE5'i?{=O1tA9??*?<:4ΧCAiᚕ///0We3O1O͐OF?NGoҳ|ʡfoLE`~#`3~MUļm?ux̲EU@ E}/*\?@U-15|W~l?dDIL)FMHF$&A1nuAD}™@OROO@-KKp 0U ]ɥi=9 @fɡd@#ߚ?F_rW?_o13F 뾆h?O<Χ$_o o_uƴ5_G_ Ƕ}ooڟ&cX =M@ao,l%w.4m# 6(t%BZ:KN\33Sm>ab*)U@` 9Ӛ`@r?@Qv,nx`?)fw?(\M?KqTDŽ.ԍ&4)A~]<-`^2QǦ:VZd<#?5d_k +ǟRဉ//:Lbt ߅Z ^`,߮yLHxf<˳mӟmOq,`+ʵ۲LN`YچZ;\YjԭOÉA{ u8?F6<,klk0y;oXa?l[=#K,;;B'{tX?f"?{}, pAtȔ{'K#y1xʗj/A~k"XWwW?F>z@w_PAn/w)*U@>:Uç?F3?@ok(Wb Aa(`kux&y8\GVdܔVU@P/mgԿ.M q] Q} g~+Ac?Jn?@z i@!Zb-t^;6NlZ? ѐoo^Ư~>Ma|i2Ac;I Q MS;[&n+=jInIS죣w@G^(d14r ttKONln/~C݀0|Nš*=BQ~jτ?FOD` @333H͙zU@} 7@_?Fˢմ`0,? tҳNljW PO~??'?^ ///////U@I @,y~ۼn|?5M?˳mfE?~n#ϝꐺ0oicn=o};ԭ0RD  JX8Psaڲ?@N*l9))G (\ mTd l fTd߻JU@Zk&P  VN m<Lֳ%~ʣ:vLaprhv ~ʣa)](:ߖߨߺU@^a"PO̡giCK mOBA5HHq~ߌMZ m˸~vf ׀F 2ߗRͦsϒoֺF_=xrh +~*]2P:a֣TUR\5_aU@'$)_̡?/ݤ ;?~Mv{/̡/ 6K%?ob <m?`?Ԓ .N{_]ߌÙP&45xAri3ò?F֕߈Ltߌ$QM"jϱұvU?FxG?@?FѭL9/|^G;}4 *tߓyC]H:|///@[84PzkKy\4Qs|/rf1<_ p.>&oHfY)2fkxMgo̡87m`?zOOOOgm97DATAOTWlH3BT6AIt-tmH~sTlq 2`m3B`IcGonqu}sD,@rxlqmq2AQIt*IRUH\qP#Gz?F ?BP(?A:-Dr<ኾP}9i-?HX_D؊'2q$u?R}!0R~ ۆHjɏۏ폕H{ ARU!3ܒ2RUps<ɖv_vH!cs g9 WJHx FHL{lB#mw.B `8nU@elko+_joXBBGoPUFD  h> T6D BU?{ͅ???Fx<F B_P(?3P|C (^-  0B`XWire]fam,\wUIus}rintUfcpoymckpApUlctoWbedac"so#wbe!^ H% --H q??%?-|2-?A~?I>>80?003?h?33_?y3333?r?) #&?>CaDD?3?b僰|b;[NBP(??hƵ/KҶ??-_s!i=O&A-?<$UHLD &# T>h uA`} ?Ε/ *>RfzutJ>U2U"?YYYF9>uM@>b 12&"327b!9 !"3 N#""%?> ?[>10G%` LineCol0rz-0"03z0bTd0"2"31?FA4D5l,I !=bG_C3$2B#0AGG2`?1py0igXt (c)"P20.P9"PM0c.0os0f R1rP1a Pi0n."P A0lT"PWslRe@Pe0v0d`P@1>`V0s_0RXY`PcXm!#52P4O68A\$qP?_V#AdoJ1pB@ ?@SJ@3Iu3-1)2AC0Bc-2Bbua1C'$-R1LGğ'QlJUl0 QbD1c(qagv[?F&qiPc Ÿitm:$N~   #_K~$VHGd]wcAa?Fʱ"β?@#ba:?Fm( ƯK7A0d vm GJ UT d0#" ? %񍈈E:M~?@,&¼?FB47ʏ܌  {HGr}Zi ]fÑ4FHgؕ?FsF CϷyO>e?Fy㋟ڎ߫k%J ,o:/s6UdOJ?Fle?@T ?Flȏڎ u+}w{ɝ >6lOl 2ʯR1P?Fz$ a a{S]a'ȃ=}̠Jc@Tc?F -ϟMb=wd?20~ʑϬϼl8*7O W` -f` jEP dy@Ugߐu~@F:6?FƒC ,ۍp+ ]N͊-drA|qutlUGG4`?FAvaxg(wk۞\xw6HJNֵ,&?cHYEgEx7[ExnpqgtqTTV Ʉ8TAj"`yA(F~TAAC4 PASI&P]QTAcd,@qtu0/E2lL8TMdqWf2Cu0v<o'u0ߝk GzIށ0(ؽ0N(Ix0SOaA9I uh;@!9W1;@@@IH!_sʞ π@TZdI@EdI@@I(sNIRI@o'@I,%P{>*Y%PSE8(I?k7[Peya[P.'I@AIPLʛ /kgGP HR0 8XP1%_c-8PߒCM*S7kB<~aADe / TGF !"#$*%&'CU)*+,U-./0U1234567 9:;<=>@ABRC4EFG#IJKLMNOPQRTUVXYy ?JRT@b?X, @*<g( "*^p?(4wq #5GYk}D C:\Progam FilesMcsfUtOfe 14vsocne:\$03J\ANROT_.VSbw}q"4FXj| D C:\Progam FilesMcsftOfe\ 14vUsocnue:\$03J\OPS]_.VXSyq"4FXj|D C:\Progam FilesMcsftOfe\ 14vUsocnue:\$03J\DTLNER_.7VSdyq"4FXj|D C:\Progam FilesMcsftOfe\ 14vUsocnue:\$03JERIH_.VSdyq"4FXj|D C:\Progam FilesMcsftOfe\ 14vUsocnue:\$03J\NETL C_.7VSdyq"4FXj|D C:\Progam FilesMcsftOfe\ 14vUsocnue:\$03J\NETSY_.VVSyq"4FXj|D C:\Progam FilesMcsftOfe\ 14vUsocnue:\$03J\SERUVR_.VSPy}q"4FXj|D C:\Progam FilesMcsft*Ofe W14vsUocne:\$03J\}RKSVP_.XSVuq "4FXj|D C:\Progam FilesMcsftOfe\ 14vUsocnue:\$03J\SWA]_.VPSuq "4FXj|D C:\Progam FilesMcsftOfe\ 14vUsocnue:\$03JW\SW_.VPSuq "4FXj|D C:\Progam FilesMcsftOfe\ 14vUsocnue:\$03J\SWT]_.VPSyq "4FXj|D C:\Progam FilesMcsftOfe\ 14vUsocnue:\$03J\WEBSIT_.VVS]q  2DVhz user_(Oq*dSC!+/h/He/na/~b 2A@(AmE/9A9 *Nb 10Su U Ur`8?JRT@bX, @TFDyC ?P uh,T]UFzE8:&@FbN%@Fǿx187 #&,\6e &2{?'11 #)5 #.1:1t7z1 #5$A$A* #:C;LATZG #39GFHJJA #f[FdOOFA\ V(V((VabZ8gVj &k &l &<QBQAQ22?a>a=Q #n &o & &p &q &okrbookT3ll  U2)}?8;}0x{2"u `D412ws"#uy#s@L&_d2?}x\.x~sU#!CJ1u #s1?x :!@#oonsȇ"ȏrCU Mf6?\00"(p1\1\1(\1!  7:!D!!N!!!Ta1 1 # !41Q"n>1$%UmAGdY dYAGiYdYD!YYp1YN!TeY$AUAU@aUMe0%YD!Udb%YdUd1UdU(̾U-dU1Y*aUud!U!U1UQ!UQ!UBA 1UtAdUA*5YTad UmdUAuYQR5YJa-YiaUSaAU@^aEYhaɿxPGTaЅ6LdAwJau0iռ NA+`HpH\#` Cnt_ectr)R" o1d@aEW>3` UAo|a|i s >2 1Ae Flw zhr|siDVVAmSAm(aUd(Azc}s) _@to /.1ZS[3`SxuJi|y2 /z//6mEy!cl (Fx|!p12 ?2?D2QaCKAHCXI&`hQiю?p5/Q˱Pqp1rxT^$!A!AP uPPmWTRpEsk(O D"^absň)q^_qTD֔ Ta`d8TexUaQMuqQ#tf3r?@p@d?@B-hUpsS?_znsry?uVRtq P7UtqRUWa<"LUg<'4Pxrho eV3fomzJA&qj\?@q`qX3v_PgQ?ym? $@sq`t B-mt7Q@yQBP(??OPQ8@Uiрq,YYENőg! EQ@'n?@qTn74qa[´?@vh?@^G9}kQԚ!W ~u]?u v4.Bl@<#jໍ|߆lsoy ,Ĉ]bv8U@ f@装t8t)%4Uݱ@a7I[B?@u?*us?@Cd ^cPNྏVX4Х|.R@?@2JK6?@\_-]xa?ɇ*51tl" x6~/]a̧ %!PS?@J4^e]_Sk[̧EU)Y vSY4?@~7WYC F:[̧A9mS@NK{?@+uYIJ3ķyrDf{JL]K7j}YҡU˕ؤŭƣ@Лap]ѐEq(:L^pbGG$hï\ k#Y>?>iS?@[}WʽoC !z:ψ P]ύoϧMÅǪz LV>}yP *&8fSECɒaߍfrf$ڔ߯,z <E4"yW/{Oq5X W })X0jXg!A*WခACB#%s9/XeEA4 WB1ziA/@_c$c?@UUh㿁 ?@|BʨEfCV1!_Q_VSSFX% :n[!R?N~k!R(\;c!RARMdaXeoXb8IL`Obgd0cl?YYYfP*"7bbSz`b#`ULq`1%bXc3_:h|?PcJSbvdRcSeRu`` FilwCoor*[z`` o2eafBYFR\~32S?vS?\._?p0SAG~xY=_3 7EN`va-2p` `d??Q?X;Hc3??._'t{*e!Wd}1"Bj- V3_PiR8r#A,j/O/}j#Ay;H/T"B!CFF?\jPGY;H.l@v!3a "Bݡ@zɢ4a=Tw> k䠿̈h;L1cG*14:?3ݕ˹I(,>E-nM~4Ǵ @@2Օl @D?@p`?dG'1MCAR@YBFN֕F WOAuUzQ j_w-.SwXUe*YQ[\,P=FR0A1YUkvaP2A%F(:L~߂06kob"߬߾OSFA"%Uw D :LVh5zkFA1fA8+OdC*@fAiuUCrI~4n"@@T$ @@*dr @w37HU}6irQU*UU2_ Q@q_co O#t&a/[7b2`B2c{g`oundobgqzYPs!a3Qu!$ -LEUY*ȟBi⟮ņU=[u\[/m//']OxZ@@au4k?@pP~@@ńe,-e?dy1΀1v[v fv vvv\nhsoojqgq/%,q@"$dN'V1<doq4Uoٓ!u8 @/-Éo߹ǭR(LiuktR1V4Qtg 2鵕q(rt?k?@EtV}D2www»FFSF$1Se#D33ktHu__0*W)g\_zQ7M3Á?@o}DQ‘DLSev˿aHUQ$6HZl~<Ƴ"l?@࢟\ީD?Կu޼]l[A%E[g5@t__\I l]AuEs0l__o2&=)R+_IBA[xooV;ABKAυEfKo VCAafOGAI6f#vVKA#OA&8iЫֵdAWAI\p2?@IH1 ]][Aɳc>%A=Iuď֏\cA`;AgA~Io>:i@I[kAɱA%EfȟڟsA_fwAx{AVAAYVhK]2EOJq80DAAT! ň II%(!Gܒ=g3vX!g@dA!ϙ<T`=I-JB(!x3q&eQ? BP(?+AG,D?ADBBT!s/A8X@!Ou!h$!,}"%!%PEA (TV|4w%?@N=ǃ @@Itw~u, ?PINB/AgAu@` #?u@ٿ!HxGˆOnF7FFI%%!I%X[APBB`2X@҅8#S2N贁Nk?I}3vAZIQe\.?L?&d2/AGAE/AfiPDx<(zG&&&!'2rq%!.Ԩ!" l",bz:pb :p:pp  ZeM%% O4vпdd܋hE Ѿa%n,D  TA. 2AE%H19/A*$HD$(R+Q$2ZZ")k$Rx$&b@QbHKA9e!x/Hd?ATOAsaa@͊1Ou+M a$bٓEeťe[U@]Hr6?@'GO @@Yp?@ W?45 F@Qe-לhaET<ΰq84v?麧Q /0πBTU1 А9/շ?x{_@Za"!Q1afUAZa9X ;v!$Q9щ<QQ@NnΰƁ▔,(ags;?@SoBAKΰR7 K7A$E$eZ4yGcQ…E$e@B?\+^g=g;OeO荄]?@6d@ D?@@wofCa@UOe:~%J?@ˍGp3T(@_% Q0w0™#wEL?"5bSPu~\@`` Fi! lCo! omrY@z۶@` ,1BTH2O!"U\_2E/"&#?\.?@&AQ_X!Y@eh= >4Br&}///AEQ0Ayϋ8V/ͯA1IP??KGv>5O?YOԝ v??OO TT5mp_utendCP J\nQ8"?]?'"$+ʭ !b 1W2Z iV2  8"X1,fTabPaW-X1Q8cT W2V3FYeoo jZ_l_X,Q8K&o!3rW2!vQYK*Rl_'>C`! gkrooBX,F .?*at=mH5VUu}_=,sH@@6 @@^m @G Yiqs $lU V K!_ D /.$:/] 7 `Bacpgo?PnKPbp!!|*l/'EP$qEu/,$7o!zLuP?Ud7*OB24O9u-FlgEQG4 -,>P9?t~qY4t @@ddֈg@@z?Qs ƈu*<N`rQ/)/ܟ_/q/[mg/// ??9B?T?f?/OO _?GO#OG ^pOsODOY}Z-XTT DQa=PiEPsCP%s eAP_vGPccr$n%eEPmCPsaP________oo(o:oLo^ooo?o/#/oo$6H Ol~p??ana#@òUƿCXAA 8NGpAQ,YSoo6ex?@f}í?@vU??@=O2 PSߑUߥdBtl! kg3tRV9&UW7vW7C]PWR_<=HolTAԈ;oۢXEgoUҫWwww3fs12(0upmYEt@rb#w[sOs-o9\U#VTYA8E :dDOSZ14U !4aq$@q%JD!T|!!qСU%d'()*+$zq.1*4356 Q9B3Q@LXq(#?@ K/"@1?@pKcSk,}?=Еw Y0bp>H֊ YJ/X?L;~YYQð,o)1\UYt^is?@>Ex<"gJ+#p㜟xE~gןB圯8UYv PWs^boUf< ?@ф_%q&AU;?@DVbV[[?@{ƃ{UR) `n59q([/ߍÿտ䃖 &#?@?RPJmaမ2d ZřǖYO D9a\µx.\?@+￞Qso[?@;'ACU׆U?@ws AdxMA+߆bd2{dvλ㟌?@$ZS ο z?@؂--pj|kÁ~fo3?2x>kp&vAbt) |$߿5XݭS_q!U$+Ș?@n3yxӁ[]7Y^pkׁw ve=?@d2/D/ہq?@iWMsՆ//aU7qX?@oH%x぀ˡ1D3^//UwSǤU!X?@>??p(~õvvꢄսY?k?󁀙cu4-lOOSwt.G)5^OpO6zqUF/c@"!(k$_nz شEOO6U.&NPĮvt:d7Is ~l͹VEHPS?@OpmS&ȋߝ}P)cU-~1 l &Nʵz{7Oanko&NPx̀>?@>Caв}G <]~`vC}X@~PhPDZm?@볨masāZ^m~OӘ9 qȁPV[jO#k}́P LfK?@?Bѻ=Ё%dn( ^*hhԁPzc-0Tf}؁P|b}$7&&N܁:=SwT?͐*'/9/}P( QT;z//&NP"mpPX" 1gy֫?O>P3<b(~1a.IO[O>PCv=2 v5OOf%e AyEpysx"5qP1E@W Z|/_>P2xqm~ lp__> P`NtbBw^&/_>PqcJ3b׍Co*o>Pfޢp =rtooV%vU?d`SqPle1pV@ oo} Pa륥4p?`?Q>$~P#&K6p?L;(T^ql9U SVNy!T$$m0vl!u쟉>LΟbUYbφ?@ubdvd} YUٯqف4TمB#ՙ xJsE О&gفLe س T قMaMaAŝL{Gz?M_9MUHƢTDcC}h$[!Lԣ#A ؎;#՛ƨjb_$@@kkW@@7LR!mfryP(-DT!FQ7}c +uz0`[$?u 㰤 &SlbX "bƨ q9Uba !U9@ BP(?R&GUz[o#z؆L#սـ1 ! #@@"@@?@OC "&%w?P[_1ca&*e#u&b&&e#&M&b&&8 e#&b&&*Q"W&a&a&Pu&+3&&/*(&&&+3&&02&ѿ@ƨ*A?zeƤ4IHٟŃ&у Vc၅WGمiD+tKiD!tKXiDtKiD!tKiD8tKiD1tGW\ngL 7O&"@@0$TR @6|f@@r)?d[bA-/?-HQnbtf???$Ԅ}oK&р$ԅၟ Zg_ف_y[E@O"O4OFOD}U?t(`| A&ơAoSot([ epHoT>pit]wd+Hiri*kfa&}da&aɥoɢQLRE0q@l?YYYzb"hz]@@b#$EA㱛ɣ#UpB)(#c~bq~S}`` FiUlCoUorUzց` 2߂{{nbФ-1&чj\2@?{ɣ?\.?A$w#F nbZaDŞoL:Vурf }DUH2' &ыh^S)E8Qc϶(Eᾠ(UJ nh,s a)`kt(:\|bQd:|__q_m[}QԺQQUl_U@JS?"`GxT@@f)G4GP(?-DT!b@oPdmaމR,E*8UG Zmtfeu)/7~`Bcgoudb~ۡE窂C aloɗlQSB_l&UϷe*>ߝ!X$l56;CQAq1>tP1 J;DQIMWuAlRH?@`^$?@wU??@e=S2 @CE2kK2ރ3' 4@PAQ@BSRR9RRRMRa CV MVtSRMRltSR MRP1tSRMR qVRMRtSRMRؑtSR9RRMR1tSb9RbMRtS$b9R$bMRtatSBb9RBbMRZtS`b9R`bkV CV 9V MVcbVcbMRd1cb9Rb*Rf:VfI[f\_nTf~X vXf\f\f\f\f_dfo"df.o@dfLo^dfjo|dfo4lQvhf`mgyt "ΕZƻtɂ\껅ć1F&&އ*Է $z_Gz@Ka=a K@ؒ ޑHݒa ` <8@K#&&Y\KbBaٜaٟ|H9!e!'E|1`?!py i h ( )2w09Mc"s fߢ!rӠ!]i n. z%lk רrҩхvd`&@?sJO95(F7aG wM|?U2K=Fno?@lߧQj4p߅Ľi_d*Ԑ4A% %$h?@x#z {aiW &ة?9 1Žk P҈_IH Gꈢp HrLCE+Qj?@Fyp, [[$h?P>Z ?4Έ+7,>Ͱ&fՉ: Y d;?  BP(Fǀ4ϑxBrs5P %91ApF7XQ<AA3d1oAK0B@P4P1QQ1BaAaտ^VۡϳFC(v?@v@ Nq?@.$?@73Yϒ̗+'kܡF+`V<̾3= |HV=]?@?@R4*?@T6(Нv0kD6TЏ̡+V8V7Ͼ?@8/dyӏ?@ʢȖ9ߑ_ck~+~ Ծ0E'̯̟ɣ1d5S7s?@fAGKU߄gAk^x#5oE\B&?@ =vUOuk¦"1"ot^?@kZU Nek$rSDK ABE%eTt8aA4l& 2/@O@`Ͳ/m4p!5)?@,G?^䲦25"5b#?@߰摨Uܺc)VkܙTzLD!/ARɿ_N.@xJG0* %K ޱLO5q9nWOK AQokOۈ`D ~̗9B!{OOQRɰO0ߤ@ +/y91#OK -nk܁G`~]Ş[~49~ dQ>_P_#'xr|=@nꙟ%~/ɧ?@r[b?@}8@Z?@F^CUܯe[k)ˣqɼ1O}޺( oۯq vUg%璂4@:_?@nϯ%?@bC ?@˪B @gokܰnW.ƾ>!NrrŒҍcԖU@V/ywt~pH;c@=Q<-egJ(Οý?@;0B11&ʲEtÎT"I!1w #hlǑ#=Ϸ *Ж_ɗ`z=|V<vXX1oo  MǙ@}T?@cZ:?@siѰ?f^w ѿbܲƶhd⏏^p-eE@8,?@+K+shmdvۂ3獛f" +@O﷛sr(bHyi5!TP5նd$Ծ'0Um?!dSeR鍌 528eQ , 5%PB%_PeRdSFeQHԄ!TDő .#@$)XuÂ5h4*418 18J4 eQ9ЫA#u?@ϢS!?@9?@O_@`f5o5$$!!*1 22!2222213@221322 622!3B2A3&B2bQ3:Bk$SdSh2622eBZi\ ", r5qn%eTtʂ]rtewq} )?@kZ>HjpQvR}ŝ4R!?@ص =v•́'1T圼pA}FдzGAGK1Rh }R%?@48̮Ә {؜?Oweλİ?@1ώС ?@z[ͩвXҫkb6( vϕi80YꥬMwuN?@yw@ Nq?@)Fl?@;*m\ҋ:ʫXA B(!Ywz?@W?@^&؞?@¡E^`0o nB6{Jqe!{q o.2ZÔQ8[¡>@}8*Lo?@Bv2?}>}ӶSS~ DxĜ߮)te: wtxr?@`RIBZc4Yݛ 7x̶ .jlTbt!ASȷF?@j?Ԛ?@K\ĸϓ tVQR?ɨ ?V=7"4D~Nm :.?@1Bҵ{V9,Bi0ooӶVQ Q~/"[k9dυ`8ǵ?@=% >^ٗߵf BJXԕ my#4 9aueh{(8UDG2E}EI`*Dc1?@CZ2)V>?@9`E`55?QDJWPa$RQE F JO\A2F Feq_ ?*si݋_______ oo-o?oQo/uoooooooo);M_qxw.@Rdv,A݄Cu{=oDUXDhF>=6C-Gi .F&N`C //-/?/Q/c/u///>o//'//?"?4?F?X?@j?^ǡ??}FXspTQW?@mc?@F$0R[3 r0tC6 ZO2BBvHB\$غEG( [^EH7}RO Y'0ʓ ^?@"'x?@p5 Ɂ55vACCA67A32222!3B2B2929292'CAB2AB2-22-22-22M1'C}B2}B211'CB 2B2622B?4B?DBO7B-O?DBKO+4BiO{DBODBOпreb .t mKL6 ͸ny]B;M_qσϧϹ!%7I[mߑߣ!3E5Tfx["4FXjr5!!&Rd5!5!A=11aqq _n}"#ލ.;aq=\r xAWVA,\?@ ?@Dt*Ǖ?@ v bMvP 9o#ז"إ|_إS2$NP4B ?A~N f? h?#|S?RDH>E<tiAZ$@o=OSaK]όS%~sʂZyȲOi?@A溦~Ű8צ׿& ج֧~i$g#iA>qdOCar߄ߌ%SM6!Ln#rbU?@i 욹nZ?q9?a!i\+|ҽO7CaCSFtu@q\wIH m vr(&⇻Fdzu 7>ɧ?@LH?@ 8곝oeL=%n׽\~ج6 _~Eb%~ _S OUCa#aST:_/ɀ0f?@'ŝB6yI^a׭;I Yb[sor|Un ]+PsCa8JS߅%5@yn %mAq?@J;˼7R۩ hqo]^})f^K!PNCaY/k/S15u{fy?@Qkpx0pK49ڲ׭ ٚ)x?/q/NCa??S"v`rq s-*CkriqTLuyvRdud/'0UBŹqp!tt ]Fp\p8Ta,ΈPQ-rgQPTbScFTaAєT!DȖ a\py^#tTYщwLuuhqp8SG \ӝTa9 J&}?@W*İ57C(+48e Cc\ 7jAAmBA[qb|fAqbZUcӚAqbAtqcbb}qbHAqbfۢcQlbrfb[7ro7rl7rl7rl7rl7rl7r h4(w;tAsQ k+ut5uw? luM%bp+j` XU`?CopWyrE@gPtP_(c)P2W09PME@cosfrԠفain].P AlPsèevR dD{&G 8Yp{]`Ek_nPh|’ ƓFF]iA7yS;sP`` FE@l؂z ` A ??!FbMXzIe~"Db A!3B a=, Kҩra2zGz??YYYJA5"S"S.a_b#IGB$0 3BljDnP$EUJAQmS[aaeQA3A X~w5t,?@F',t4On?TJE?A0_;BSl#R ejz?@B]񡦁?@͗ 8T /vX߼'q#,Dz`Pv^hיSm?1-? Yz?2Vw/A1PsQa}}8 0$lPW?@uطbnpd/ 8\7k0M5eqMgJ#,@ ?wv0rRBL uB@acyXQLq/`J-?@WߴԊ أ{gIk~#,ˆ -mL^d٤y#ju?@xR.s?@,'X 8roC]Ѥq yF &o˿!|'BOAh߳{1zV' h<d$0$+*`3NL?@PP 9$O6I&)U=` q)`<1}:7kX( yl*1AH1>o?7o?__/OI-l@$?@N]FQ3WK0LGACFT}C$*k PEr zċ]jvx,!1_ig_y__MC_XA\5ЀS1k0"!A!Q!lBn~;o5Umo~`Mx"+lv#?@٢.TM?@ _*%55GYCTxA =:g * d2;ἄ|d2%o,d2/51vCtP{q9#U2? 4FXj|8  -ϸJa B9U'OzOG \5$_6_뿦s_O_OSr91c!;!f}d<'f NfOM$aR/c!}ROFA9%% (F"6Ocim"-X%dNs4 Y=Ws=x8O<8gPE,zKvBpMMw`MFMTY\'éYYYwww8,Ԗ1 425ӹ$((!OO+O=OoaOsLsU@i "?@?p,WI~ZU?{_Y>y2A:L^pRz?N&&W'# K/ZQz¯ԯ?&k%+c`81/̿޿ϏSި?fujϧϣߠxq/e"4FXVZShߦ$n"tŹ /Y!BVh)ig}AG0ZjQ)M9@RB¬ k[ 2aza,?Szay-8.qY0AgrERWG?@|\ ݴP !a4?0aCgWi%NuSEv +Ev0o8K ) /52gdA Id?G51ɼjaTQ4B-$4R(3c52/zojBMa!&$Ma]du"{oB1I>ye@)'b8/J/\/t5U?@R?@{RʨUPSgvqqu~qxRSuqxA*nBb fwxA ԃ|2b‡-Ĩ{2b/e"Rbt y2 xrLT0rЁC*t0 lC$`vAbszT0b#T0e|1" A6ryCRpsFsbt rsSru`` FilCoorvzT0` Z2uqvbB2ш\݄c?(y?\.?j`PA O^WsqAv=oC PGdž}Sb{Z&NAE2DVf_m/&/-A?DbRY@SY-|VW=aӞUQ =|;_6=|埥 K(ցW@ULpbvprti`dpiCep ptPObOK8#s"??'(D:a52鍤 s%Copyb851Jl'eUѼ8TW52}SFcyyKjKdvLŕ8'Q!3#q52+@ѹQrMe>IC`b*k+LL\%ăGdADWO S#K,Yjaԝg?Y$:{2"@@&?r7`?TPVpdPV:~gBP f^% fg_Q ƅy֏e;as< p51 AyA1pJP1^# -?Q5@@N$_2rcLӤ'ָ:" !@R`!A!\!5Dq!{Ў.!37J\%>,Y1 M\!e1|9>y_QDo6ex?@f}?@vU??@=2 035'hr kg>kB?V9&XkB?7vqlBdhd=0 7ঀ?5ʁ'/*/禠āa"rgbwwwJcf^S€jU\MnpsYTzrRbwSSr-Oe2.rU2Ubc秀^[ȓa\%!O:E4lalbB?: ф5~;Q sqiKĐQ~RKqKKN$r$RK%4(K)K%*K+1Q.Kk113K4K5*K6K7f49Oa3pU@LXq(#3 K/ר(@1?@pKcS<ü]? Wu0bp>pH֊uuJ/XpL;@NuYQ,po)1@2ut^is?@>악qy<"3?J+#pq\%uןB5uYߧv 0Ws^b@Ryf< 4ф_%x~yU;?@生;Rx[[?@{ƃ@ayR)]`n59x#y([/ ߍ%ay &#=RPJ콯ϯJy2j vt5u&vAbt)[m<y5XSۯqy$+Ș?@n3yuqy[]7ழ^u v'qyw߲ ve=U@d҂+qyq?@iWMsAy7qX?@?oHqyˡ1~D3^*<7qywS,@;qy!X?@>?px?qyõvv?ꢄսCqycu-lS/e/GqySwt..G)'%//:Q~yF/cڰ@"!xOqyk$~nz شN5??:Sqy?@>Camq~<]o@?vC}Vhaq@~P XqDZm?@?볨u~&ai{?@a^(:nqu;k/szXJ`|q:^m?OӘ9Z%aqV[jOskmq LfK?@Bѻ=Pba qudnx^*h X$qzcL-0m(q|b}L$7&"4n,q7*=Sw͐*wm0q( Q~;n4q"mp٩=UTu|֍ղ>TuQ'=Uu|;/_/=UP|/ /0A1rNva@cnseC0ut/ mo# ??9'(l!upUTt w6L8},GTq1qp01qMMTu}Ѽo1sT}BFgE){}OyPj(s??}oQ6 _!3'Ref(p19Q 2e>7 A`.GkOOoL MsTqTsJ'm4$B ѭT/ςoчQk_r)uԴ;r@@b!gYP7LR!m$@'cgP(-DT!9ύ!vy'u Y'v1vgFygegtooRղj.A/jh$?k]M ԰sң@ BP(?})_ oى?oQo>8ATKMzzaed55pGxT@@񟙅f)븎up;yzyzEU $ QOD'54@o07ג`BC0cPgoW0n{db@v' CkhaQ5z$bH5p'9A:s.q-˖Cm%G,6[Gُ鏀[M]oooFu},@@4@@9!@@G po{?8?A9:8p/1y8AR{ڢӏ֨#Şkq{*{֟!S0}ϕFVe u8AEۡqϳ+TŒmA QS4WQbQ :x2u>d=h?@"?@??@z$*[րAџ !qt \iwfNtt2^_>-[8>GRn$jE8B(BLd'53l+%1 F(X5ϳu6CquHH\T""C[#b "CS#v/`` F_0lc` /QAFsd2Bl5\y/QR/uW"ϳ5G%V{/;꒵Q10TYQ{c տU鷖E@W( |5?i?(>??QG,|>;O?_O,@|??O Ou PWUTeRusGrRrGqeGPtGP n_RcXQ^QUR LiU__hh#"?]?H'($+u!h1%Z ;fR\8@)ͅ4P4ѽh~4T%P3FeofjP__5X'lQ&)!3Er %MvaYQ*bt]e'>^`! Lgkxoo5L%)Pu 2DV??z8?@{⃋냸& u =8n= }-X }q Ҍ .@RdvUb /&///A/e/w///,@///??$?H?Z?l?OOO_?߆MOO 7I߾OyOĆπO2V"ZN0[SWPnd]RgPu[PhizWP_kPocPseCPmmTagWP|______ oo0oBoTofoxooooo@,>P|`r9K/Q.Ä$ .7rƅwD񋄺Dŵȏr' @@u9@@wU??@f=2 K JE#L%"%47&Si\$i ""ir@#"""" & &#""R#2 "#%2" &92"#Q#M2"#a2"a2"$2"2"Y#2"2"&2"2"#2"2& & & &3B 63)B"DP3=B"=B"&& \B,\B/$pF(F 8\B<\B/<\BC<\BW<\Bk?}4\B?4\B?4\B?4\B?4pFOULF7H\BGMS[z_Gz-3H.uZ"f{@bvh!ġwgâwgfgb Gud 'b_3ѠEi'{ggblZgboom%`?pygzt()2]09McJofrrpai. xre vdhR!WEO(_ǮÀR#ϳU@Hno?@mQj4pģidՋԂA%%$h?@z#z{aig' (ة?> 1k P҈%}#IH GZꈢp' }"Z HrL(r!j?@Hyp,[[$h?P>-DT! 4Έ+q#&f :x" (+d;? BP(Y䬧ؗ4aB"蛯ؗrC56HNw!p_Xw!w!S} Qy91naGQ11gȁoC<놟+ӭ(v?@v@ NqJ.$?@7?3Yew+'УF+`ˑV<˹3= ϟ}x;=]?@ώ?@T4*?@U6(Н[w0գD6TЏӹ?+V;7Ͼ?@8gyӏ?@ʢȖw_cP~+~ ԣ0E'ٹ'̟ɈߚދڵS7sпAGK:߿gAP^xjڵ\B&?@ =v:OuP¦gSֹ"ot^?@kZ: NeP$rSDKgxa5E&%eTtpA`Q& 2g}@P@`R4Uj倱@,L';^䲦 򕓥jֹb#?@:c)VPTzLD7_O.CxJG+ %0ܼޱL4敔q9nW0AQoPOۈ補`D ع9Bk`rM17O}ߤ +y?91#ȶ0-nPG`ş[~4 d#/5/ɲ%'x~s|=pꙟ%Сɧ?@r[b?@8@Z?@F^(:e[~P)ˈA~1O}޺(?Oȯ>_P_AOB3C%:_?@nϯ%?@bC ?@̪BOk3__W_i_too_WHC@Vѿywt@H_j~RGek"cooooo𣲽Wu{fPnS6?@vd&,cr!0 1|9,:] mR J 6QMסy(0ҏ3Gڵ _遼f:+1׬$-mFj%p028@pXp紿ƿpLA,\PX34@~zp > m?c@=!!5jJ(ΟIý?@=0B11 ʲEYzÎT"I!1\#~MllǑƏ0@a :kv9~9] = 握k>{/2ڣ|q0z=|Ե&dԝ<[=X// MǙ}T?@cZ:?@Xؿ2iѰ?f^\bܲƶMld⏏ACU5*@:,?@+K+XM=I[gy3獛f"A\4F@󒚳1iTg(ܙ˕Șd m'0U?6Ŝ!𿹑q pw8B,Li't˕ȘLTFH /iTDqaڟ2hi"PgśȘcn ?@TQ@@!PS? wAHP Pb#"MeBmBE ?YYY vA "BLpzb#Ѐn 27q3?2D?\._?~p07`BackgrouUnClr z =XL~Bm dȔAŜ$șgUmq*c vܖnBVgŮ%B +Tfi1 rU9,1*3hș,1{5ؕ c?@~U?@2@4E81r (1 hFhrtuԑ |M`BtҠDAEG*BޙEMf!*/L q<l1 " XR1LpCbTgRS) `` FXl` o2B'2 g\,toPS]o6# "Pklؕ&oC1%E%ǟ?량/'/m'/q"F>G1). EGY-~%@u@d n^PhuChes t onci .XnfmRaoe X ew~S??d G3X_jP~:L^u `jP}x}xlf!31a f¶hjtg>#` kgtTfz|<Xci1a,?p3B?T?f?x?????;0yn7?@g5W0zEWr%O7OIO 1a ցIHw-ijKPحHhOO __/_A_S_e_w________oo+o=oTofoooooooo|%7I[mߣ5GYk!T0Mqurլԏ;$_/#>g/}B.̐owe̐@R'9K]oɯۯ8#)OG?-?}żտ?  /xOOewGx G] }ɀ$XW1]Y?  ާ?@r7`g2Vpd`Wyc ~ЦӄrRfcA9ff#ޢbfff ff߭ц4p U0UrA1?o!o3o2p xQ@4R@ ERa~ EcPF5 _@:xov?@;IJ@@U@.?@ FW?괕ƋϚ߬߾сSew+=Oasс%7I[m |c,////F ) /Mqܶߚ~/9/;@/߃C*x/5s t3euolr0tn0d 0Htp:/nJmvc 0pmUt?oJPe=yz ?Z_l_A?S?e?w????????OORO=OdaOVhOOOOMO_7&_8_J___Q7_*___ꑗ:H JiIWdK @@w=2 ?@M]F?@V3*%%kޟ&8 c16S;*62 6U66*6*6t ?c%;)tU2{5|7_Ml571:¤cq?K/]/o/EU1~k*1A*1̂1ٌ//I,'O9OKOLx" pv#?@آ.TM?@ __*ŎUU///Sv8 =:g *;R;;R%o;R/566c #|뎤rx@ڋ1a QW2zrcUW:^ߢ|UZHL,dr-bra@ƃA9dL3Oa _#]a( iaif_xS0yn7?@k˷?@x.?@~*_ ѴؽVS _Qfc_\ rDj;?`;C;?*3?Xے 8?LvezIElYYYoa# c[:;±Y@`ς ςooooo  w(Lp̿ϙϋF:dςBϾ.VVNCG}XcB A:O&O%_+U=R˻ݵ5鵂)wTg( @@)iU ?@?@zk}dzZ__`iun૕ a eSXD?\nT?\._?7P=`BacgUrudoo04J&=T ѷ7PrlլA?변1?J$ġ+-鵡eUVU1w%2߱%l.+A$*1ġd7Q*1O鱲#BQ$ٿ?X+U 1)-wTM]FPV3ɽ?p4?@i{2́5P3[]mA8'hx?X=0f7&?fVU{O0wwwZlzaՆd2 FVX5S/S Ri "?@p,GWLb^{2XU{_Y>o;n_U_ZpQQx______oz_$N&&W(oeoa^oEd6o/mTѸoooo'=dp{4i;nEdvomT3 .@RSި}uޏEd mT%-?pQ1,}ыI?ύ-8.qY;EgrEE֫WG?@|\PkV!14}1'ߧ$Ќ5e6 +64/F YLg4᠃4ɋJTݢ-&r "(3<3!p= ף? BP([?/!q:@s4N%J@|m:>F/G?@1!$0QBC//9951 2ǿ8)U?g7)$]@@5\?@wU??@Ptf,<ʊ?60Q O֡l;$"m۶m;$""2)K$")I$IO,F>_>ÿFD%EH$ &>[ET_@G}AUO&?J$1`"DV(VBAQݢALUM2ݣMONmM#bDz2A3Sh"u `` FioPl+ooPomrzے` IO-2ZE#A#F U^$0"Z:ܣ"h"bC!Bb#J񫡐*?_' :Tݢ66)TTA*aTZZѷ+{T)YAO( #T.Tݢ Tg0Mp____Xb/m&n mO _fo~QJUݢWUdU6BTBmed!l1Bz1>4ABex"@nwz1HvݥqqqpZёqePex})PKpAyz14 ! B(qJyf9G.fA,2zGzBOOO"NS`1y yQ@Hno?@jufԐ輱ߗA%\Yd?@z#zߛde ' f?nw U@Hyp,?ȟId?P -DT?! N7d^X|@:1 ԠԠ4!Ԡbү ˥(v?@XA ,m?@.$?@=q?Ǐ>CU=]?@fe'a?@T4*?@L]fBw87Ͼ`Lx@]?gyӏ?@*ߜ|Q@S7s?@a~즌\B&<B<r5"ot^?@SÏ؇5_y5LƏ^XیtF ɏ:@,׉b#?@Ew?]Aw1_HoQc CxJGRw:f:?U?@8?@=+Y'O?@у2 +EXwc%'x2n=x pꙟ%|b[k kїN \x?*Sw!c 7>ɧ?@"6D&?@8@Z?@T?@vd&,cr0J|H1NU@ _&@+`2cmw7IJLA,\hK8w jJ(Ο?@d,0B11?@?|$"kv>T&L ?@=~& e7/ n MǙ?@~?@cZ:?@ )"/ug@:,2? 2/Iq*7qQ]yFFğ?{ܳ q4~<\d4DGj}cU`p `)aIᄎTDغaB CsIF5I?\qmSu%`` FiljZ0ooRzkk` _2Us5s[QSivL,c ",gFIBG 0YYEWi{VX?χEN׈LNFZ mN ?)N埤4R!ГoFǴzGR%?@:E λĘgģ߭ d }Ԇ_Q<.NB)Fl?@/[/,UYwz?@񕄄?@b&z? C+1F[¡ߡ>@vSh$9*Lo +qwe:dek vx)1 ~)ڏgk m?Ԛ?@?q&o~Pm p^1B~H;h7kx:U@2"[kp0C&vqυ`8~sXWlcىo5B1>?P?lam?y???????6;Խ2`dկp?@(K?@V3+4?QDJW>OPObOtOOOOOOKO__(_:_L_^_p___Vǀ____okTa &g˞oU@=*CHWipaȯگD- ?@f)Y"mc?@?9 (!^a%mǿٿG /ASewωϭϦVknEorߣoF@$eo6vGaG&yp Q80?T$~6vRx%/`@ P2DA/ASew6%'x?@ݴ#)GI&# 2DVhzC!&8V7]ok|a !)dp! |a;^opi*t _l7T%YA,\ C)q?@Dt*Ǖs5I 3y&} H?@kEW I J?@@ ˦(_.vsʂZy0uEpD溦?@GbO o& T EprbU&qHkkQ@\[~ϒwĥ 7>Q[&NZ 8곲TjAůoV:_Pk.|fcg$Yx)CFq%mAqop&wExu{fP?*\-?@Qkp ˢJom_}* !B//4!(14/@/R/d/v////=ط{r(87Cc-~3m¼V`6=/ ??1?C?U?g?y???3^????OO%O7OIO[OmFOOO@OO,7Q!D$ٌ7Q_%_C_U_58t,?@[ہx]#2'WvN`]񡦁?@< ʉ(_Q@}}8N$D0\S?@x'bz?@_n_u*o~1|XQLqпtGNYߴԊڦP.^9uXwbro7y#ju?@:9]?@/'X np.̈Ox8(BO={1zPb 〥o79X*a`HG UtP‡|P9%XO;Vƾ@@3NL?@H6l%f8"!fF)t7#i4·ٌp dyR?@\l! Ynu{` ?#u"pB"!pBJot# g+ot5o11{j{ӈ+𐕳QLᡱ0ql)ɝ83/@0iw{ux&8J\C!"2\?Se?\.+?#ࠀ!F#pC2&%/E̓B؏{gU/zs'-4!e%/ +/dU>)?;?7ep.x?/?e}e/"??F?mрSend}sc}e@k_t]k|Bm|@]s@ag|@ էфps)$M`??.r'ee!́b鍱% F:L-8a,naрa]ayMcTb##R_jPzdOO\,M蒥lbo!3~b bf,YAR4M>1Wko*o\,쨵:GTmjW)I~}]+0Žq(·TMqq/0c+& @@~?@^})@@)GR]?%X#NJIHcs/CUJY ss.*% / 7*U-壀0/Q pwȡex(#tJv ;eIMqfUJ.P{!=IMq/q]tlAqKsXktp/X$/Q]Q#QmTt}uhԗ7?@4r?@k ?@%tQ[f?MqN@Տ5#8,\?@]eFxvgK{!W!ty{luԡPၥ;b88RqpwP#Gz? BP(?MqD!QL/^///v|)SQ@rԐXF(GЩ?@SF? K7At|F;kIۊ 2#F߈KUcei?@?R\EF 鴷U$hbxND!%A>heS;q+}ߏߡװ|ZUu`9x!u74`tN?7KU?-~շ9H]ntΉ' %sgx?@"k?@8TX?@$/SŪ A;qhm㵣S?@GYG2W/SU矱㱝>;F$5GYkN88eJuVcӛ^J#6U@`"6Ifx ^?@0?@r6O̻4?\[nQ/Yծڌm h U縡;q4;qy;qQCq i]I,|/?h?K?@~W%`Q@c ᱃,] 5?@-oy?</ ' t9?@VB>R a6R$@[H1w jXOAE>uE/?C/?A?/Z,[q?_Y"͜?^70T_L.A_0mtv̨E0ڻ0p4 :ʺZ>J0Lu9k~O/C(ƒ0׏ .Z $o U_&8OOOO޿k9cɒ0?20_ֿϓ0?Kbס=w0d1Y{?@ P&Y0?t__~־c%P0SO=o/- ġ//o?oo F qMÝ߉{Z 0^.:?@[_?)z#60b5O_U@А57@_?@> m?3p̅T ։ȡoovo;9:{0;]^)0?5kFHfl ?@5q?@~ g]Ayh>' :p2HOU@V80@*`D#k\d1a -?Qc~f=5g )_;]p\ ~+N{ȯQ@V}ꌡF``K]J ' ԆmU@>[u@}5?~ \Pggl H 0(Q 7Iz}\0c/@|Y?5v>غ2 Ȼ?@~隴]πY$`]Pc?@ =0>oĿ?@C:Q}OΟ;6"Efb$q u}Wo=A~42H@ Q@\תco̰rd(9xCفbh/,iebEOas.@RznP|?@? Gv?|**_?@?F{qQ@l0m5@Cfhj,7>/;._>_!M%xSU@rɻT `CIφ4Ϧ?h/mG%2/@zI g<+U; ' u!M5MP?@wU@?x'@kyw#0ߙfh/E! //./@/R/d/QͲR]04;]-Cu vO5w6* h75k^ @&z} ;ݠ:G=j*N(!65!z7E[unM@>tx0x w ֡!??O#O5OGOYOQ@| N ГZyb?/ſ5~Rcꐝ?@u'!:2j?; !+?5?p[ؗy6F=|~ G X@!?{7SA@Io+v?,SoE"___ oo-o?oQ@unq?@pzۏy6Mo~ 5kLa렿,*3ӫ 9}ϖ_)/Rĸ O=RtRἏG=ކF_MyNA?9#%Q@?+Av~.\ NԸ 7m-5 Ǝo~y6ZȈ?+!b߽ 4k?o3W2L.neSlF"+hɜolB)o6 (2?@,q3bDBB,!;@`_` Filk` O2))")6΅\CO_gCt_&AuZ +[\$hoJ /*<N( ooi ~%"4fqouͲo?ո3BTGxyNstrs @RRYC??K'^T[! YAZ M3OE@&&28RZ,ص {ZRTZcF뛫j|PRV9!3U QZ]Z%ݛW>N(`Q2\!kP,3SO=0c "(Y$ /[ ,9Q$ا1)!Qn, /at=?@ٹPM }?@<+5qa==ЈoپT? ԅJ]n? $S68Zѫm10{@gqǸ?@VUngy!ۤ0eAn͓ѥe-rҥ8۠(ݾ;[N? BP(?[(=a`1 !Mdd1tofcV!ƵQ*oYi̓Q@yw?@ƌDirD7ÌDZH8sQmo/P ca;=k U?@4F( ~aQ8M S#Qԍ?@掛@?@ 2`ÿpP?@,P<Z?@4$W>A'qV%r?0{{w~˒?@3/ʵp |CtL9Y躵P ~x.:CDUy@B 5Y?@~XU=adeIo[ooQcuJ]N?@%By[G@";/̓UBq1ӯwykTn$1d1q] ypՅ[g5G/1|)I lԒ yUsK5%)R+g6!3|)6yx5^k|) k̻/1/w^̿/!LQ7iЫֵ =y\?@IH1 ]ؗ//-7\6cy16Em?? 7`Ȁ;y~o>:ih;?O7ZFe;y\Յ/F;qOO? !_3Q$7a[y( `/O_K,Zlq8(TXѳeճ_Qщܒ6Egg@X)doBkɡTd-Dd}()`PoQ? BP(ġ? < qGFS FDTbqg}@}]ۈghjx &g}i_5/ @@I*Ȁ'Cmc?@iiJI%ý?y8Zq@bJ=e`qOatuy`q?PUV``t5tu gѿO&A !el_`Hlxtpa}!1PQMőt[ Lcw o֑qyqy JNy$&g8Ngщ@@)|?@=wU??@=2 ڭ@ tp Cg2(آt݁8THާ|c[=鄜OEqj!1gѣoooogT&c֑ 2u2p ֑Ep`R׃.y0BQ*;~}hp>?@䰟Wɴf $(;02K?@dG'LQ@XT{<8* Q@MSM<z.IvLQMy<՞Y0w^i,ָD?;1;D㯀L+Uw%<U&&+Q@Ĭ?쫠:+Q@3j=~u.!=D_5ϱπ /Aн`C?@H`?H_%v36__)Q%%q@#?@)q AF%ݴFwg(GY%kh73?@W& 8KK/AF52šC/U/abƗ//·A=r &//E??Q?8???EBTgЏ1/?OD8wALĎ/DOVODc:;g:/OODdwMyAnئO+𢡄1@uS$G_Y_> (___qqշ(: _W0Q {:_Vq\_ґ\̱3dWRBչu50ش9̱5e xCB ?@pTZ?@9k?@&{culuȯڦa {{﫢rhp r''#rKǫ鄅C0j?YYYA?T4җaۀ䂰YG.GbNzl@b#산グZBق+GPbf날լ@ -_(o^ebkzhn@#= ,?ȱFп4GB_7BHB‘IBϑ8Bܑ 9B5#a݃ʳzB 9NCd J4Va_Va^Vz!0BL=Õa䯑P@oa]RVa` >׊U,U@ooUevUdߔ ۨua F,a[.nJoU2uplxVςl4ոU vp[FPY|TdJdo/ ?ש hanbiXo÷oeJ$ 7E?2P24i?%ဲzv&Ӆݶ̩ŵd5GYk}Nᓿq滿̿<ϸ)jaw!0BTfx2:R@=ޯЫ2%ݩZ͇{#5GYk}_JUq/R'9 /[i/a 2Iݶ[=t//(/:/Lϛ/ľ_/4]/h_/ _y ]Al">b?n?GmA?:I/q?@gXcx(}'} uxcOH,"rzxOFMLquA";@qŲK@q">fiϤA@ nsUIy1{xXBw'@TDu!C L{x oŲ-OOOOE%ŴOT _̿lw_#s_Tҡ7< o΁{~ҥ=4Ggq{yƏ؇…}iun@@6f @@xwp3 OuXwWi244L>#)%Dm=a(!LF/y4a?;d*(m%go (:L #_q X/j/S  BƄߋɠ)aTѥ͔r-ڔ(xƀ.qP#Gz? BP(?_27.q`aE O-OFI`aW`iΈ8SMEr{le|9FA\tozuq݀q@LD'8\.1da">2yܱ#x,[,o ?.(/FݷD~ե9U@vsz@ 17[(ӏ?Uf fH*05@5Gp% Vh:&~EwΊKٳ|?@Z.P&osd1?@:CĆoS3}?@΋[˜"o:"}53@qL 9˺߇И23Au΁;v?ap: G`/Hw>?@rd)]Gj%Kyn___'M_J|H۹?@Bmpw9V׸?@?2͟U@@@@?@a DfNZR޿IUr?@j>C$`+EOl_ /SU@?9}v|'c.]ڭ@D~5|dcs uM(O??@U@/@{H oѶ8);M_q..7㱈|S}HyTmy]e~/u@l 0!>[= ~MNBʲl͓JaU5 @ {Myw ! 0BTfxU@Jpsmo}L⧳e$Xhܕ?@Gp6o/凿íH-(?&}5<l`@Kj?$`?($I?o`@h?JFj?jj"/??(?:?L?^?U@RЯ\W֖)mʑ/< :ژr㕮/HoC2lj[ X_eVe/~lԘ ݰ1oŭX_v#OOO_ _2_D_U@폫T}L߉)>y~Kş'N\Aᯀ)9}4-KOc?凎jݰel #ŭ$ooooooU@ _jSp˟\*uf9߳ b5B&Iѝ(} '8ͽ8Ew'hM `d'!k//oGHC'ޣMpU=n4PzRwd|@@7 @@vnxűT,?DEx *߶޷6F*F߶:SFÿ̅_e@ MO_O wcW̅d g5uuJ ]AG1Xj͆̈oo/A6vͬ?uK~3Wv@߯ r2E??{¡HЯ-aq7 #)1fa1botoooooo0{H$D @@P( B@@xCP[rpAф1n]D< C??uO#OsYOkNFh@@JRT @@^)ŕ@1嵰OOIRI _J/_A_S_e_w________CoiR)o;oMo_oqod E)euo&ooooSeߛ-?Q^*yj3ɏۅ(/L/k /s5şן 1C j6?H? b {/S\!3hB 76@7>O#% oZiqߓ5߿!H$?@'d2-(L&P‰TL^pAf/%7I[mA }5 e:.@Rd:v9xN_`_7I[///V9//:UQc/e/w%ş/M q?!66+? OOa?s???????f pOOO__&_8_n_@ D"P?@~ XOï____ w n-Dn㖷Uogoyoooooooo w>Pbtoң-?bhz ϰԏBTfs.OƮ֟_0 xp7ȯگ"4FXjqxҿDVπ^ZV@@e]P|?@@b 55qσϕY3! io6 oDo$6HZl~ߐߢߴ.y3! 0BTf_Y_:LFH$6//l~"/ 6Zon~o;l5ooSdlo,KootoC8`r//FeU'(U)*+,U-.UUAAA7A8A9A;A?AFAGAHAIAJAKALAMAuU4<$zE8:&@bN%@ xC-3ѿYAU-UU4<zE8:&@bN%@ 0UC-7"A@ gQR@"ԒKRL<(L<(E8RE"RU`8?s\.@@ LFDNTyB@ uh(TBԆ~ U?zE8:&@?bN%@FxVzqOAOAQ5 ;Hj nU+%@@ ŜK* ?SOF o TG_ƅic܏ʯܯO6aسł0﷿15/AS@dteZ~0֟ aZ*<[OAfx&@F"?$ !ΑB? vPD8 vP /U1( O-D&UA%U f -h"/T))U,   '-P/!(8(B(-$#P/D DU! (,(E(-(U((((U(+-6U?AJE(UH(R(T(e(Ug(y({(0(U3(B(UM(ENPUT]_cUlnr}UUh(t((U((Q(W(U\(`(a(c(Uf(s(v(~(U((((U((((U((((Uk(o(p(r(Uu(w(z((U((((U((((U((((U((((U((((U((((U((N((]((l( "* *+"59:<=>@CILMP5VSUE$%.).*/*xxxxC!+PS!+c!+#x&xU'x*x,x0x Jx4x7x8 x;xFx7#-PɯGxHxURxVxWxXxUYxZx[x\xU^x`xaxdxexfxgX]xix2xbxjxmx xpxtxuxvxxh/{x{xx/xK xkxqx3)Ȩ !!P)GTfpQ 77B/DUYl#~ GWg'%7gA "4U{A fU7|E?@71cu Ě'c0U?DzfeArial UncodeMiS6?/?`4 R$fSymbol$67fWingds*7 fECalibr@  ?$fSwimunt(S$fPMingLU(w$fMS PGothicj@ $fDotum"|i0@ S$fESylaen  $fEstrangeloU dsaC 9$fEVrind1a8 Q$fEShrutqi| Q$fEM_angl$fETunga<"@ Q$fGSendya*/ (   R$fERavi"o Q$fGDhenu"*/ (  R$fELath#/ R$fEGautmqi | Q$fECordia NewE R$fEArial"*Cx /@~4 R$fMalgun Gothic |w L$fETimes NwRoan*Ax /@4$fDSego UI")  7Q$ vEB.B%BD=B3B5BGB0:Btj7BuABv0Bv1BwC0Bws0Bx;Bxޠ.By 9ByE-Bzr2B;Bߡ8BIB`JB>BGuideTheDocPage-1"Gestur Fom aSwitch&msvNoAWutCn 7ecCa}loutPC SidePC TopPC FrontPC OutlineButonScren"Handhel Frot Handhe}l SieHandhel Top&Handhel Ou_tLieCo}nectrNet ormal"Periph Out5ln NetworukFonTower pTower }FontTower Sid TowerOutli nPeriph ToPeriph SdPeriph Font PeriphSadowPeopl"Peopl Outi n3D NetFacu 3DNetShadow&3D NetHighltSymbol TpSymbol Frnt"Symbol OutineSymbol} ide(Default Sv]gtye!HasText$ShapeC5ls$ShapeT ySubhapeTy$SolSH"visLegndSh7apXvisVerion1Row_1Manufctre ProductNmb ePartNumbe*ProductDes_ripinuAsetNumbrSerialNumbLocatinBuildngRoomNetworkamIPAdre sSubnetMask"AdminIterf7ac NumberofPrtsMACdre s$Comuni_tySrng*NetworwkDscWipinPropetis$Lapto cmuerHubDepart5mn HardDi_veSz CPU Memory$ OperatingSwysemIsViableSe7rvBelong7sTPC$Datbse rv CloudRow_2Row_3Row_4Row_5Row_6Row_7Row_8(Dynamic onetrTextPoWsiinBridgeu*Aplicaton servEthernt ScaleAntiScaleRow_9Row_10Row_1Row_12Row_13Row_14Row_15Row_16Row_17Row_18Row_19Row_20Row_21Row_2Row_23Row_24Row_25Row_26Row_27Row_28Row_29Row_30Row_31Row_32Row_3Row_34Row_35Row_36Row_37Row_38Row_39Row_40Row_41Row_42Row_43Row_4Row_45Row_46Row_47Row_48Row_49Row_50Row_51Row_52Row_53Row_54Row_5Row_56Row_57Row_58Row_59Row_60Row_61Row_62Row_63Row_64Patch }pnel4Fiber _optctansemtAusterTranscedvisSpecIDBlocksATM switchBridge.14.Dynamic onetr15ModemRing etwork VBackground-1RegistraionTilesF msvSDContaierExclud daegr eShowFoterBackground*+msvSha_peCtgorisCityServ.29SkinColrMainfrme$msvViioCreat dUrbanAlphabetModuleVerveNone Solid$Background FeNone .26viwsSTXTechniShwufleCom-linkFirewa lBox ca}loutKeyLockUser.698Public/prvate Ukysr Terminalu2AlowShapeMvCbckDirTypeWeb sr vFTP se7rvEmail servType(Ma}ngemnt s7rv (E-Comewrc s vDirBehavoClasGUIDModelGUIDDocumentGU IClasNmeFile s7rvParentGUIDContacUserNameUser.50$Staus briemRe_sizWdthUserWidthFo_rwad AL_Prefayout(AL_Wantuoay ,AL_ChildayoutSye&AL_Justifcaon AL_JustOfs e AL_cyBtwnSubs&AL_cyBelowParnt AL_cxBtwnSubs"AL_cxB_twns t (AL_PropJustOf s e $Staus briconList boxFlow Nrma0Check Wusrpmison6msvPreiwIconCopT Pa gvisKeyword"msvAtoicClas&visWfRuleStNam(visWfRuleStIndx"msvTheeCol r,Dynamic onetr.1&visWfBranchIdex"visWfBranchStvisWfEent$msvTheeEfe7ctvisWfBranchvisWfY evisWfNovisWfBlanku8Ad lisutiemp r son"msvAtoicShapeDi_scusonErrorFindUsersNetworkLinkConfigureSearch&msvStrucueTyp LineEr o<msvSDLitRequr dCaegoi sNetwork.80New*msvSDLitirecionDocumentTopLeftRightIn_tere.103*msvSDL}itAlWgnetScript&Informatin5 cFilterBotomFloatingUser.70"visDecrpton Hident alFastenr  Round keyAskOnDropDiametrLengthTxtSizng Ta_perd kyTaperT5Taper1*Safety_ (Tx ags)"Safety -whie4Ca_rd edrwithky p(IFCPropsnviibleMount BaseElvtionTextWidhCa_rd c7esTechnolgyMountT7ex"TechnolgyxtHairlneKeywaLim1X_eqHLim2XYMxMyAdjCY1USB keyChatChevron,msvSDL]itIewMaerCopy.msvSDConta}ierM7rg ,msvSDContaiert7yl 6msvSDContaiertyl uCStyle8msvSDConta}ierH wad gE g ListbouxiemplacehuodrsHideFr7amLink.84TextColr&placehodrW5itPinX1PinX2spacingStateDialogw frmtopBarHeig hbarWidthbarHeightResizPanlHSideIn_terePower$'NN3КXE3hG3}G3%G3G3̫(G3$G3@ G3\"G3:7G3QG3oG3G3H%G3#G30߬"G3x)G3:*G3XDG3a&G3#G3;G3íG3G3$G30$G3XBG3a"G3#G3G3إ%G3G3#G3"%G30GG3Xd"G38&G3G3h˯)G3G3 G3(;)G3CG3bE3r&G3G3E30Ű G3X$G3 G3 &'-G3TG3r!G3H;G3h;G3 űE3ӱG3;G30 G3Ȧ+&G3XQ$G3uG3&G3X&+G3G3(%G3H # E3/G3L#G3\ o E3{G3X(G3;G3$ӳG3;G3p  E3 !G38-E3P=E3hME3]E3mE3}E3ЛE3E3)G30ִ G3@G3& ,G3;7G3PE3<`G3 |E3\G3G3G3˵G3G3G3$ G3@G3\4G3IG3^G3sG3G3G3$G3@ǶG3\ܶG3G3G3G30G3EG3$ZG3@oG3\G3G3G3÷G3طG3G3$G3@G3\,G3AG3VG3kG3G3G3$G3@G3\ԸG3G3G3G3(G3=G3$RG3@gG3\|G3G3G3G3йG3G3$G3XG3H .4G3@bG3(<zG3G3\G3ǺG3H<G3ȏ&/G380E3@"G3b%G30"G3PE3x HG3XG3G3&<-G3hiE3h<wG3<G3<G3Ƽ'G3E3<G3G3,E3<E3МLE3\&G3<G3=G3G3ͽG3(=G3H=G3G3 7 E3CE3QG3 i9G3h=G3 1G3G3G3 G30<!G3]E3Hk(G3o&G3XG3=ؿG3=G3#G3=2G3LG3iG3$G3 E38E3@G3ho%G3G3=G3\2G30H$G3ol(G3@&/G3xn'G3X$G3%G3n3+G3^%G3m%G3Xm-G3l&G3>G3G3x&4/G3 c9G3!G3k%G3x+G3 ,G3ب9%G3&^.G3+G38'G30G3h'G3X"!G3(>CG3]G3uG3H 5G3&G3G3P E3hE3(E38G3PE3H>^G3zG3ȩ)G3h>G3 =G3G3 . E3&:-G3>gG3  E3E3НE30J G3 &+G3G3 'G32G3$GG3>[G3@uG3($G3&G3>G30&G3>G3?$G3\=G3Ȕ R E3^G3X&rG3E3ܔ  E3G3X&-G3X%G3 2G3D,G3pE3&#G3(?G3&G3&G3H?G3#G3h?8G3QG3 fE38tE3E3PE3E3E3  E3  E3  E30  E3G3hE3G3&+G3=E3ȑ&K0G3&{0G3 6G3$G3 9G3&/!G30&P G3?pG3@G3?G3(G3E3E3\G3ОE3X&/G3&N#G3?qG3?G3&G3E3&G3E  !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~      !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMUU U UUUU U!"#$U%&&U4<)zE8:&@bN%@ ș C-/ο[ AU4< 8(}A-`7 "A@xmeRR@x8RL<(L<(Em-RE ;R?uR  g"4FX88(}'M@(3O@([>Hֹ2P e}BH P ̚ `.8"ȓ,0f37BxY=wR$U !h@) ER'C ̘z1JbҮ!5l2 ԨT?L*PDC IH%a!././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/figures/serial-console-flow.svg0000664000175000017500000007716600000000000024011 0ustar00zuulzuul00000000000000 image/svg+xml nova-serialproxy nova-api nova-compute # nova.conf[DEFAULT]my_ip=192.168.50.104[serial_console]enabled=trueport_range=10000:20000base_url=ws://192.168.50.100:6083proxyclient_address=192.168.50.104 # nova.conf[DEFAULT]my_ip=192.168.50.100[serial_console]enabled=trueserialproxy_host=192.168.50.100serialproxy_port=6083 10000 ... 20000 nova-compute # nova.conf[DEFAULT]my_ip=192.168.50.105[serial_console]enabled=trueport_range=10000:20000base_url=ws://192.168.50.100:6083proxyclient_address=192.168.50.105 10000 ... 20000 Browser/CLI/Client 1. 2. 3. 4. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/file-backed-memory.rst0000664000175000017500000001020200000000000022100 0ustar00zuulzuul00000000000000================== File-backed memory ================== .. important:: As of the 18.0.0 Rocky release, the functionality described below is only supported by the libvirt/KVM driver. The file-backed memory feature in Openstack allows a Nova node to serve guest memory from a file backing store. This mechanism uses the libvirt file memory source, causing guest instance memory to be allocated as files within the libvirt memory backing directory. Since instance performance will be related to the speed of the backing store, this feature works best when used with very fast block devices or virtual file systems - such as flash or RAM devices. When configured, ``nova-compute`` will report the capacity configured for file-backed memory to placement in place of the total system memory capacity. This allows the node to run more instances than would normally fit within system memory. When available in libvirt and qemu, instance memory will be discarded by qemu at shutdown by calling ``madvise(MADV_REMOVE)``, to avoid flushing any dirty memory to the backing store on exit. To enable file-backed memory, follow the steps below: #. `Configure the backing store`_ #. `Configure Nova Compute for file-backed memory`_ .. important:: It is not possible to live migrate from a node running a version of OpenStack that does not support file-backed memory to a node with file backed memory enabled. It is recommended that all Nova compute nodes are upgraded to Rocky before enabling file-backed memory. Prerequisites and Limitations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Libvirt File-backed memory requires libvirt version 4.0.0 or newer. Discard capability requires libvirt version 4.4.0 or newer. Qemu File-backed memory requires qemu version 2.6.0 or newer. Discard capability requires qemu version 2.10.0 or newer. Memory overcommit File-backed memory is not compatible with memory overcommit. :oslo.config:option:`ram_allocation_ratio` must be set to ``1.0`` in ``nova.conf``, and the host must not be added to a :doc:`host aggregate ` with ``ram_allocation_ratio`` set to anything but ``1.0``. Reserved memory When configured, file-backed memory is reported as total system memory to placement, with RAM used as cache. Reserved memory corresponds to disk space not set aside for file-backed memory. :oslo.config:option:`reserved_host_memory_mb` should be set to ``0`` in ``nova.conf``. Huge pages File-backed memory is not compatible with huge pages. Instances with huge pages configured will not start on a host with file-backed memory enabled. It is recommended to use host aggregates to ensure instances configured for huge pages are not placed on hosts with file-backed memory configured. Handling these limitations could be optimized with a scheduler filter in the future. Configure the backing store ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. note:: ``/dev/sdb`` and the ``ext4`` filesystem are used here as an example. This will differ between environments. .. note:: ``/var/lib/libvirt/qemu/ram`` is the default location. The value can be set via ``memory_backing_dir`` in ``/etc/libvirt/qemu.conf``, and the mountpoint must match the value configured there. By default, Libvirt with qemu/KVM allocates memory within ``/var/lib/libvirt/qemu/ram/``. To utilize this, you need to have the backing store mounted at (or above) this location. #. Create a filesystem on the backing device .. code-block:: console # mkfs.ext4 /dev/sdb #. Mount the backing device Add the backing device to ``/etc/fstab`` for automatic mounting to ``/var/lib/libvirt/qemu/ram`` Mount the device .. code-block:: console # mount /dev/sdb /var/lib/libvirt/qemu/ram Configure Nova Compute for file-backed memory ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #. Enable File-backed memory in ``nova-compute`` Configure Nova to utilize file-backed memory with the capacity of the backing store in MiB. 1048576 MiB (1 TiB) is used in this example. Edit ``/etc/nova/nova.conf`` .. code-block:: ini [libvirt] file_backed_memory=1048576 #. Restart the ``nova-compute`` service ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/flavors.rst0000664000175000017500000001323400000000000020130 0ustar00zuulzuul00000000000000============== Manage Flavors ============== Admin users can use the :command:`openstack flavor` command to customize and manage flavors. To see information for this command, run: .. code-block:: console $ openstack flavor --help Command "flavor" matches: flavor create flavor delete flavor list flavor set flavor show flavor unset .. note:: Configuration rights can be delegated to additional users by redefining the access controls for ``os_compute_api:os-flavor-manage:create``, ``os_compute_api:os-flavor-manage:update`` and ``os_compute_api:os-flavor-manage:delete`` in ``/etc/nova/policy.yaml`` on the ``nova-api`` server. .. note:: Flavor customization can be limited by the hypervisor in use. For example the libvirt driver enables quotas on CPUs available to a VM, disk tuning, bandwidth I/O, watchdog behavior, random number generator device control, and instance VIF traffic control. For information on the flavors and flavor extra specs, refer to :doc:`/user/flavors`. Create a flavor --------------- #. List flavors to show the ID and name, the amount of memory, the amount of disk space for the root partition and for the ephemeral partition, the swap, and the number of virtual CPUs for each flavor: .. code-block:: console $ openstack flavor list #. To create a flavor, specify a name, ID, RAM size, disk size, and the number of vCPUs for the flavor, as follows: .. code-block:: console $ openstack flavor create FLAVOR_NAME --id FLAVOR_ID \ --ram RAM_IN_MB --disk ROOT_DISK_IN_GB --vcpus NUMBER_OF_VCPUS .. note:: Unique ID (integer or UUID) for the new flavor. If specifying 'auto', a UUID will be automatically generated. Here is an example that creates a public ``m1.extra_tiny`` flavor that automatically gets an ID assigned, with 256 MB memory, no disk space, and one VCPU. .. code-block:: console $ openstack flavor create --public m1.extra_tiny --id auto \ --ram 256 --disk 0 --vcpus 1 #. If an individual user or group of users needs a custom flavor that you do not want other projects to have access to, you can create a private flavor. .. code-block:: console $ openstack flavor create --private m1.extra_tiny --id auto \ --ram 256 --disk 0 --vcpus 1 After you create a flavor, assign it to a project by specifying the flavor name or ID and the project ID: .. code-block:: console $ openstack flavor set --project PROJECT_ID m1.extra_tiny For a list of optional parameters, run this command: .. code-block:: console $ openstack help flavor create #. In addition, you can set or unset properties, commonly referred to as "extra specs", for the existing flavor. The ``extra_specs`` metadata keys can influence the instance directly when it is launched. If a flavor sets the ``quota:vif_outbound_peak=65536`` extra spec, the instance's outbound peak bandwidth I/O should be less than or equal to 512 Mbps. There are several aspects that can work for an instance including *CPU limits*, *Disk tuning*, *Bandwidth I/O*, *Watchdog behavior*, and *Random-number generator*. For information about available metadata keys, see :doc:`/user/flavors`. For a list of optional parameters, run this command: .. code-block:: console $ openstack flavor set --help Modify a flavor --------------- Only the description of flavors can be modified (starting from microversion 2.55). To modify the description of a flavor, specify the flavor name or ID and a new description as follows: .. code-block:: console $ openstack --os-compute-api-version 2.55 flavor set --description .. note:: The only field that can be updated is the description field. Nova has historically intentionally not included an API to update a flavor because that would be confusing for instances already created with that flavor. Needing to change any other aspect of a flavor requires deleting and/or creating a new flavor. Nova stores a serialized version of the flavor associated with an instance record in the ``instance_extra`` table. While nova supports `updating flavor extra_specs`_ it does not update the embedded flavor in existing instances. Nova does not update the embedded flavor as the extra_specs change may invalidate the current placement of the instance or alter the compute context that has been created for the instance by the virt driver. For this reason admins should avoid updating extra_specs for flavors used by existing instances. A resize can be used to update existing instances if required but as a resize performs a cold migration it is not transparent to a tenant. .. _updating flavor extra_specs: https://docs.openstack.org/api-ref/compute/?expanded=#update-an-extra-spec-for-a-flavor Delete a flavor --------------- To delete a flavor, specify the flavor name or ID as follows: .. code-block:: console $ openstack flavor delete FLAVOR Default Flavors --------------- Previous versions of nova typically deployed with default flavors. This was removed from Newton. The following table lists the default flavors for Mitaka and earlier. ============ ========= =============== =============== Flavor VCPUs Disk (in GB) RAM (in MB) ============ ========= =============== =============== m1.tiny 1 1 512 m1.small 1 20 2048 m1.medium 2 40 4096 m1.large 4 80 8192 m1.xlarge 8 160 16384 ============ ========= =============== =============== ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/huge-pages.rst0000664000175000017500000002351100000000000020500 0ustar00zuulzuul00000000000000========== Huge pages ========== The huge page feature in OpenStack provides important performance improvements for applications that are highly memory IO-bound. .. note:: Huge pages may also be referred to hugepages or large pages, depending on the source. These terms are synonyms. Pages, the TLB and huge pages ----------------------------- Pages Physical memory is segmented into a series of contiguous regions called pages. Each page contains a number of bytes, referred to as the page size. The system retrieves memory by accessing entire pages, rather than byte by byte. Translation Lookaside Buffer (TLB) A TLB is used to map the virtual addresses of pages to the physical addresses in actual memory. The TLB is a cache and is not limitless, storing only the most recent or frequently accessed pages. During normal operation, processes will sometimes attempt to retrieve pages that are not stored in the cache. This is known as a TLB miss and results in a delay as the processor iterates through the pages themselves to find the missing address mapping. Huge Pages The standard page size in x86 systems is 4 kB. This is optimal for general purpose computing but larger page sizes - 2 MB and 1 GB - are also available. These larger page sizes are known as huge pages. Huge pages result in less efficient memory usage as a process will not generally use all memory available in each page. However, use of huge pages will result in fewer overall pages and a reduced risk of TLB misses. For processes that have significant memory requirements or are memory intensive, the benefits of huge pages frequently outweigh the drawbacks. Persistent Huge Pages On Linux hosts, persistent huge pages are huge pages that are reserved upfront. The HugeTLB provides for the mechanism for this upfront configuration of huge pages. The HugeTLB allows for the allocation of varying quantities of different huge page sizes. Allocation can be made at boot time or run time. Refer to the `Linux hugetlbfs guide`_ for more information. Transparent Huge Pages (THP) On Linux hosts, transparent huge pages are huge pages that are automatically provisioned based on process requests. Transparent huge pages are provisioned on a best effort basis, attempting to provision 2 MB huge pages if available but falling back to 4 kB small pages if not. However, no upfront configuration is necessary. Refer to the `Linux THP guide`_ for more information. Enabling huge pages on the host ------------------------------- .. important:: Huge pages may not be used on a host configured for file-backed memory. See :doc:`file-backed-memory` for details Persistent huge pages are required owing to their guaranteed availability. However, persistent huge pages are not enabled by default in most environments. The steps for enabling huge pages differ from platform to platform and only the steps for Linux hosts are described here. On Linux hosts, the number of persistent huge pages on the host can be queried by checking ``/proc/meminfo``: .. code-block:: console $ grep Huge /proc/meminfo AnonHugePages: 0 kB ShmemHugePages: 0 kB HugePages_Total: 0 HugePages_Free: 0 HugePages_Rsvd: 0 HugePages_Surp: 0 Hugepagesize: 2048 kB In this instance, there are 0 persistent huge pages (``HugePages_Total``) and 0 transparent huge pages (``AnonHugePages``) allocated. Huge pages can be allocated at boot time or run time. Huge pages require a contiguous area of memory - memory that gets increasingly fragmented the long a host is running. Identifying contiguous areas of memory is an issue for all huge page sizes, but it is particularly problematic for larger huge page sizes such as 1 GB huge pages. Allocating huge pages at boot time will ensure the correct number of huge pages is always available, while allocating them at run time can fail if memory has become too fragmented. To allocate huge pages at run time, the kernel boot parameters must be extended to include some huge page-specific parameters. This can be achieved by modifying ``/etc/default/grub`` and appending the ``hugepagesz``, ``hugepages``, and ``transparent_hugepages=never`` arguments to ``GRUB_CMDLINE_LINUX``. To allocate, for example, 2048 persistent 2 MB huge pages at boot time, run: .. code-block:: console # echo 'GRUB_CMDLINE_LINUX="$GRUB_CMDLINE_LINUX hugepagesz=2M hugepages=2048 transparent_hugepage=never"' >> /etc/default/grub $ grep GRUB_CMDLINE_LINUX /etc/default/grub GRUB_CMDLINE_LINUX="..." GRUB_CMDLINE_LINUX="$GRUB_CMDLINE_LINUX hugepagesz=2M hugepages=2048 transparent_hugepage=never" .. important:: Persistent huge pages are not usable by standard host OS processes. Ensure enough free, non-huge page memory is reserved for these processes. Reboot the host, then validate that huge pages are now available: .. code-block:: console $ grep "Huge" /proc/meminfo AnonHugePages: 0 kB ShmemHugePages: 0 kB HugePages_Total: 2048 HugePages_Free: 2048 HugePages_Rsvd: 0 HugePages_Surp: 0 Hugepagesize: 2048 kB There are now 2048 2 MB huge pages totalling 4 GB of huge pages. These huge pages must be mounted. On most platforms, this happens automatically. To verify that the huge pages are mounted, run: .. code-block:: console # mount | grep huge hugetlbfs on /dev/hugepages type hugetlbfs (rw) In this instance, the huge pages are mounted at ``/dev/hugepages``. This mount point varies from platform to platform. If the above command did not return anything, the hugepages must be mounted manually. To mount the huge pages at ``/dev/hugepages``, run: .. code-block:: console # mkdir -p /dev/hugepages # mount -t hugetlbfs hugetlbfs /dev/hugepages There are many more ways to configure huge pages, including allocating huge pages at run time, specifying varying allocations for different huge page sizes, or allocating huge pages from memory affinitized to different NUMA nodes. For more information on configuring huge pages on Linux hosts, refer to the `Linux hugetlbfs guide`_. Customizing instance huge pages allocations ------------------------------------------- .. important:: The functionality described below is currently only supported by the libvirt/KVM driver. .. important:: For performance reasons, configuring huge pages for an instance will implicitly result in a NUMA topology being configured for the instance. Configuring a NUMA topology for an instance requires enablement of ``NUMATopologyFilter``. Refer to :doc:`cpu-topologies` for more information. By default, an instance does not use huge pages for its underlying memory. However, huge pages can bring important or required performance improvements for some workloads. Huge pages must be requested explicitly through the use of flavor extra specs or image metadata. To request an instance use huge pages, you can use the :nova:extra-spec:`hw:mem_page_size` flavor extra spec: .. code-block:: console $ openstack flavor set m1.large --property hw:mem_page_size=large Different platforms offer different huge page sizes. For example: x86-based platforms offer 2 MB and 1 GB huge page sizes. Specific huge page sizes can be also be requested, with or without a unit suffix. The unit suffix must be one of: Kb(it), Kib(it), Mb(it), Mib(it), Gb(it), Gib(it), Tb(it), Tib(it), KB, KiB, MB, MiB, GB, GiB, TB, TiB. Where a unit suffix is not provided, Kilobytes are assumed. To request an instance to use 2 MB huge pages, run one of: .. code-block:: console $ openstack flavor set m1.large --property hw:mem_page_size=2MB .. code-block:: console $ openstack flavor set m1.large --property hw:mem_page_size=2048 Enabling huge pages for an instance can have negative consequences for other instances by consuming limited huge pages resources. To explicitly request an instance use small pages, run: .. code-block:: console $ openstack flavor set m1.large --property hw:mem_page_size=small .. note:: Explicitly requesting any page size will still result in a NUMA topology being applied to the instance, as described earlier in this document. Finally, to leave the decision of huge or small pages to the compute driver, run: .. code-block:: console $ openstack flavor set m1.large --property hw:mem_page_size=any For more information about the syntax for ``hw:mem_page_size``, refer to :nova:extra-spec:`the documentation `. Applications are frequently packaged as images. For applications that require the IO performance improvements that huge pages provides, configure image metadata to ensure instances always request the specific page size regardless of flavor. To configure an image to use 1 GB huge pages, run: .. code-block:: console $ openstack image set [IMAGE_ID] --property hw_mem_page_size=1GB If the flavor specifies a numerical page size or a page size of "small" the image is not allowed to specify a page size and if it does an exception will be raised. If the flavor specifies a page size of ``any`` or ``large`` then any page size specified in the image will be used. By setting a ``small`` page size in the flavor, administrators can prevent users requesting huge pages in flavors and impacting resource utilization. To configure this page size, run: .. code-block:: console $ openstack flavor set m1.large --property hw:mem_page_size=small .. note:: Explicitly requesting any page size will still result in a NUMA topology being applied to the instance, as described earlier in this document. For more information about image metadata, refer to the `Image metadata`_ guide. .. Links .. _`Linux THP guide`: https://www.kernel.org/doc/Documentation/vm/transhuge.txt .. _`Linux hugetlbfs guide`: https://www.kernel.org/doc/Documentation/vm/hugetlbpage.txt .. _`Image metadata`: https://docs.openstack.org/image-guide/introduction.html#image-metadata ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/hw-emulation-architecture.rst0000664000175000017500000001035500000000000023546 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================ hw_emulation_architecture - Configuring QEMU instance emulation architecture ============================================================================ .. versionadded:: 25.0.0 (Yoga) The libvirt driver now allows for handling of specific cpu architectures when defined within the image metadata properties, to be emulated through QEMU. Added ``hw_emulation_architecture`` as an available image_meta property. .. note:: The following only applies to environments using libvirt compute hosts. and should be considered experimental in its entirety, during its first release as a feature. Introduction ------------ This capability is to fill a need with environments that do not have the capability to support the various cpu architectures that are present today with physical hardware. A small subset of architectures that are supported both within libvirt and QEMU have been selected as prime candidates for emulation support. While support has been added for the below base architectures, this does not guarantee that every subset or custom operating system that leverages one of these architectures will function. Configure --------- ------------------- QEMU Binary Support ------------------- To ensure that libvirt and QEMU can properly handle the level of cpu emulation desired by the end-user, you are required to install the specific ``qemu-system-XXX``, ``qemu-efi-arm``, ``qemu-efi-aarch64`` binaries on the compute nodes that will be providing support. --------------- Console Support --------------- Consideration need to be made in regards to which architectures you want to support, as there are limitations on support through spice, novnc, and serial. All testing and validation has been done to ensure that spice and serial connections function as expected. - ``AARCH64`` - Spice & Serial - ``S390X`` - Serial - ``PPC64LE`` - Spice & Serial - ``MIPSEL`` - untested -------------------------------- Supported Emulated Architectures -------------------------------- The supported emulated architectures require specific image meta properties to be set in order to trigger the proper settings to be configured by libvirtd. For end users the emulation architecture of an instance is controlled by the selection of an image with the ``hw_emulation_architecture`` image metadata property set. AARCH64 ~~~~~~~ ``Tested and Validated as functional`` .. code-block:: shell $ openstack image set --property hw_emulation_architecture=aarch64 $IMAGE $ openstack image set --property hw_machine_type=virt $IMAGE $ openstack image set --property hw_firmware_type=uefi $IMAGE S390x ~~~~~ ``Tested and Validated as functional`` .. code-block:: shell $ openstack image set --property hw_emulation_architecture=s390x $IMAGE $ openstack image set --property hw_machine_type=s390-ccw-virtio $IMAGE $ openstack image set --property hw_video_model=virtio $IMAGE PPC64LE ~~~~~~~ ``Tested and Validated as functional`` .. code-block:: shell $ openstack image set --property hw_emulation_architecture=ppc64le $IMAGE $ openstack image set --property hw_machine_type=pseries $IMAGE MIPSEL ~~~~~~ ``Testing and validation is ongoing to overcome PCI issues`` .. note:: Support is currently impacted, one current method for support is manually patching and compiling as defined in libvirt bug `XML error: No PCI buses available`_. .. _`XML error: No PCI buses available`: https://bugzilla.redhat.com/show_bug.cgi?id=1432101 .. code-block:: shell $ openstack image set --property hw_emulation_architecture=mipsel $IMAGE $ openstack image set --property hw_machine_type=virt $IMAGE ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/hw-machine-type.rst0000664000175000017500000001514700000000000021460 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ====================================================================== hw_machine_type - Configuring and updating QEMU instance machine types ====================================================================== .. versionadded:: 12.0.0 (Liberty) .. versionchanged:: 23.0.0 (Wallaby) The libvirt driver now records the machine type of an instance at start up allowing the ``[libvirt]hw_machine_type`` configurable to change over time without impacting existing instances. Added ``nova-manage`` commands to control the machine_type of an instance. .. versionchanged:: 25.0.0 (Yoga) Added ``nova-manage`` commands to set the image properties of an instance. .. note:: The following only applies to environments using libvirt compute hosts. Introduction ------------ QEMU's machine type concept can be thought of as a virtual chipset that provides certain default devices (e.g. PCIe graphics card, Ethernet controller, SATA controller, etc). QEMU supports two main variants of "machine type" for x86 hosts: (a) ``pc``, which corresponds to Intel's I440FX chipset (released in 1996) and (b) ``q35``, which corresponds to Intel's 82Q35 chipset (released in 2007). For AArch64 hosts, the machine type is called: ``virt``. The ``pc`` machine type is considered legacy, and does not support many modern features. Although at this time of writing, upstream QEMU has not reached an agreement to remove new versioned variants of the ``pc`` machine type, some long-term stable Linux distributions (CentOS, RHEL, possibly others) are moving to support ``q35`` only. Configure --------- For end users the machine type of an instance is controlled by the selection of an image with the `hw_machine_type image metadata property`__ set. .. __: https://docs.openstack.org/glance/latest/admin/useful-image-properties.html .. code-block:: shell $ openstack image set --property hw_machine_type=q35 $IMAGE The libvirt virt driver supports the configuration of a per compute host default machine type via the :oslo.config:option:`libvirt.hw_machine_type` option. Providing a default machine type per host architecture to be used when no corresponding ``hw_machine_type`` image property is provided for the instance. When this option is not defined the libvirt driver relies on the following `hardcoded dictionary`__ of default machine types per architecture: .. __: https://github.com/openstack/nova/blob/dc93e3b510f53d5b2198c8edd22528f0c899617e/nova/virt/libvirt/utils.py#L631-L638 .. code-block:: python default_mtypes = { obj_fields.Architecture.ARMV7: "virt", obj_fields.Architecture.AARCH64: "virt", obj_fields.Architecture.S390: "s390-ccw-virtio", obj_fields.Architecture.S390X: "s390-ccw-virtio", obj_fields.Architecture.I686: "pc", obj_fields.Architecture.X86_64: "pc", } Update ------ Prior to the Wallaby (23.0.0) release the :oslo.config:option:`libvirt.hw_machine_type` option had to remain static once set for the lifetime of a deployment. This was due to the machine type of instances without a ``hw_machine_type`` image property using the newly configured machine types after a hard reboot or migration This could in turn break the internal ABI of the instance when changing between underlying machine types such as ``pc`` to ``q35``. From the Wallaby (23.0.0) release it is now possible to change the :oslo.config:option:`libvirt.hw_machine_type` config once all instances have a machine type recorded within the system metadata of the instance. To allow this the libvirt driver will now attempt to record the machine type for any instance that doesn't already have it recorded during start up of the compute service or initial spawn of an instance. This should ensure a machine type is recorded for all instances after an upgrade to Wallaby that are not in a ``SHELVED_OFFLOADED`` state. To record a machine type for instances in a ``SHELVED_OFFLOADED`` state after an upgrade to Wallaby a new :program:`nova-manage` command has been introduced to initially record the machine type of an instance. .. code-block:: shell $ nova-manage libvirt update_machine_type $instance $machine_type This command can also be used later to update the specific machine type used by the instance. An additional :program:`nova-manage` command is also available to fetch the machine type of a specific instance: .. code-block:: shell $ nova-manage libvirt get_machine_type $instance To confirm that all instances within an environment or a specific cell have had a machine type recorded another :program:`nova-manage` command can be used: .. code-block:: shell $ nova-manage libvirt list_unset_machine_type The logic behind this command is also used by a new :program:`nova-status` upgrade check that will fail with a warning when instances without a machine type set exist in an environment. .. code-block:: shell $ nova-status upgrade check Once it has been verified that all instances within the environment or specific cell have had a machine type recorded then the :oslo.config:option:`libvirt.hw_machine_type` can be updated without impacting existing instances. Device bus and model image properties ------------------------------------- .. versionadded:: 25.0.0 (Yoga) Device bus and model types defined as image properties associated with an instance are always used when launching instances with the libvirt driver. Support for each device bus and model is dependent on the machine type used and version of QEMU available on the underlying compute host. As such, any changes to the machine type of an instance or version of QEMU on a host might suddenly invalidate the stored device bus or model image properties. It is now possible to change the stored image properties of an instance without having to rebuild the instance. To show the stored image properties of an instance: .. code-block:: shell $ nova-manage image_property show $instance_uuid $property To update the stored image properties of an instance: .. code-block:: shell $ nova-manage image_property set \ $instance_uuid --property $property_name=$property_value ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/image-caching.rst0000664000175000017500000001235200000000000021130 0ustar00zuulzuul00000000000000============= Image Caching ============= Nova supports caching base images on compute nodes when using a `supported virt driver`_. .. _supported virt driver: https://docs.openstack.org/nova/latest/user/support-matrix.html#operation_cache_images What is Image Caching? ---------------------- In order to understand what image caching is and why it is beneficial, it helps to be familiar with the process by which an instance is booted from a given base image. When a new instance is created on a compute node, the following general steps are performed by the compute manager in conjunction with the virt driver: #. Download the base image from glance #. Copy or COW the base image to create a new root disk image for the instance #. Boot the instance using the new root disk image The first step involves downloading the entire base image to the local disk on the compute node, which could involve many gigabytes of network traffic, storage, and many minutes of latency between the start of the boot process and actually running the instance. When the virt driver supports image caching, step #1 above may be skipped if the base image is already present on the compute node. This is most often the case when another instance has been booted on that node from the same base image recently. If present, the download operation can be skipped, which greatly reduces the time-to-boot for the second and subsequent instances that use the same base image, as well as avoids load on the glance server and the network connection. By default, the compute node will periodically scan the images it has cached, looking for base images that are not used by any instances on the node that are older than a configured lifetime (24 hours by default). Those unused images are deleted from the cache directory until they are needed again. For more information about configuring image cache behavior, see the documentation for the configuration options in the :oslo.config:group:`image_cache` group. .. note:: Some ephemeral backend drivers may not use or need image caching, or may not behave in the same way as others. For example, when using the ``rbd`` backend with the ``libvirt`` driver and a shared pool with glance, images are COW'd at the storage level and thus need not be downloaded (and thus cached) at the compute node at all. Image Caching Resource Accounting --------------------------------- Generally the size of the image cache is not part of the data Nova includes when reporting available or consumed disk space. This means that when ``nova-compute`` reports 100G of total disk space, the scheduler will assume that 100G of instances may be placed there. Usually disk is the most plentiful resource and thus the last to be exhausted, so this is often not problematic. However, if many instances are booted from distinct images, all of which need to be cached in addition to the disk space used by the instances themselves, Nova may overcommit the disk unintentionally by failing to consider the size of the image cache. There are two approaches to addressing this situation: #. **Mount the image cache as a separate filesystem**. This will cause Nova to report the amount of disk space available purely to instances, independent of how much is consumed by the cache. Nova will continue to disregard the size of the image cache and, if the cache space is exhausted, builds will fail. However, available disk space for instances will be correctly reported by ``nova-compute`` and accurately considered by the scheduler. #. **Enable optional reserved disk amount behavior**. The configuration workaround :oslo.config:option:`workarounds.reserve_disk_resource_for_image_cache` will cause ``nova-compute`` to periodically update the reserved disk amount to include the statically configured value, as well as the amount currently consumed by the image cache. This will cause the scheduler to see the available disk space decrease as the image cache grows. This is not updated synchronously and thus is not a perfect solution, but should vastly increase the scheduler's visibility resulting in better decisions. (Note this solution is currently libvirt-specific) As above, not all backends and virt drivers use image caching, and thus a third option may be to consider alternative infrastructure to eliminate this problem altogether. Image pre-caching ----------------- It may be beneficial to pre-cache images on compute nodes in order to achieve low time-to-boot latency for new instances immediately. This is often useful when rolling out a new version of an application where downtime is important and having the new images already available on the compute nodes is critical. Nova provides (since the Ussuri release) a mechanism to request that images be cached without having to boot an actual instance on a node. This best-effort service operates at the host aggregate level in order to provide an efficient way to indicate that a large number of computes should receive a given set of images. If the computes that should pre-cache an image are not already in a defined host aggregate, that must be done first. For information on how to perform aggregate-based image pre-caching, see the :ref:`image-caching-aggregates` section of the Host aggregates documentation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/index.rst0000664000175000017500000001725600000000000017573 0ustar00zuulzuul00000000000000=================== Admin Documentation =================== The OpenStack Compute service allows you to control an Infrastructure-as-a-Service (IaaS) cloud computing platform. It gives you control over instances and networks, and allows you to manage access to the cloud through users and projects. Compute does not include virtualization software. Instead, it defines drivers that interact with underlying virtualization mechanisms that run on your host operating system, and exposes functionality over a web-based API. Overview -------- To effectively administer compute, you must understand how the different installed nodes interact with each other. Compute can be installed in many different ways using multiple servers, but generally multiple compute nodes control the virtual servers and a cloud controller node contains the remaining Compute services. The Compute cloud works using a series of daemon processes named ``nova-*`` that exist persistently on the host machine. These binaries can all run on the same machine or be spread out on multiple boxes in a large deployment. The responsibilities of services and drivers are: .. rubric:: Services :doc:`nova-metadata-wsgi ` A WSGI application that serves the Nova Metadata API. :doc:`nova-api-wsgi ` A WSGI application that serves the Nova OpenStack Compute API. :doc:`nova-compute ` Manages virtual machines. Loads a Service object, and exposes the public methods on ComputeManager through a Remote Procedure Call (RPC). :doc:`nova-conductor ` Provides database-access support for compute nodes (thereby reducing security risks). :doc:`nova-scheduler ` Dispatches requests for new virtual machines to the correct node. :doc:`nova-novncproxy ` Provides a VNC proxy for browsers, allowing VNC consoles to access virtual machines. :doc:`nova-spicehtml5proxy ` Provides a SPICE proxy for browsers, allowing SPICE consoles to access virtual machines. :doc:`nova-serialproxy ` Provides a serial console proxy, allowing users to access a virtual machine's serial console. The architecture is covered in much greater detail in :doc:`/admin/architecture`. .. toctree:: :maxdepth: 2 architecture .. note:: Some services have drivers that change how the service implements its core functionality. For example, the ``nova-compute`` service supports drivers that let you choose which hypervisor type it can use. Deployment Considerations ------------------------- There is information you might want to consider before doing your deployment, especially if it is going to be a larger deployment. For smaller deployments the defaults from the :doc:`install guide ` will be sufficient. * **Compute Driver Features Supported**: While the majority of nova deployments use libvirt/kvm, you can use nova with other compute drivers. Nova attempts to provide a unified feature set across these, however, not all features are implemented on all backends, and not all features are equally well tested. * :doc:`Feature Support by Use Case `: A view of what features each driver supports based on what's important to some large use cases (General Purpose Cloud, NFV Cloud, HPC Cloud). * :doc:`Feature Support full list `: A detailed dive through features in each compute driver backend. * :doc:`Cells v2 configuration `: For large deployments, cells v2 cells allow sharding of your compute environment. Upfront planning is key to a successful cells v2 layout. * :doc:`Availability Zones `: Availability Zones are an end-user visible logical abstraction for partitioning a cloud without knowing the physical infrastructure. * :placement-doc:`Placement service <>`: Overview of the placement service, including how it fits in with the rest of nova. * :doc:`Running nova-api on wsgi `: Considerations for using a real WSGI container instead of the baked-in eventlet web server. * :doc:`Nova service concurrency `: Considerations on how to use and tune Nova services in threading mode. .. toctree:: :maxdepth: 2 cells aggregates default-ports availability-zones configuration/index concurrency Basic configuration ------------------- Once you have an OpenStack deployment up and running, you will want to manage it. The below guides cover everything from creating initial flavor and image to log management and live migration of instances. * :doc:`Quotas `: Managing project quotas in nova. * :doc:`Scheduling `: How the scheduler is configured, and how that will impact where compute instances land in your environment. If you are seeing unexpected distribution of compute instances in your hosts, you'll want to dive into this configuration. * :doc:`Exposing custom metadata to compute instances `: How and when you might want to extend the basic metadata exposed to compute instances (either via metadata server or config drive) for your specific purposes. .. toctree:: :maxdepth: 2 manage-the-cloud services service-groups manage-logs root-wrap-reference ssh-configuration configuring-migrations live-migration-usage secure-live-migration-with-qemu-native-tls manage-volumes manage-shares flavors admin-password-injection remote-console-access scheduling config-drive image-caching metadata-service unified-limits networking security vendordata notifications Advanced configuration ---------------------- OpenStack clouds run on platforms that differ greatly in the capabilities that they provide. By default, the Compute service seeks to abstract the underlying hardware that it runs on, rather than exposing specifics about the underlying host platforms. This abstraction manifests itself in many ways. For example, rather than exposing the types and topologies of CPUs running on hosts, the service exposes a number of generic CPUs (virtual CPUs, or vCPUs) and allows for overcommitting of these. In a similar manner, rather than exposing the individual types of network devices available on hosts, generic software-powered network ports are provided. These features are designed to allow high resource utilization and allows the service to provide a generic cost-effective and highly scalable cloud upon which to build applications. This abstraction is beneficial for most workloads. However, there are some workloads where determinism and per-instance performance are important, if not vital. In these cases, instances can be expected to deliver near-native performance. The Compute service provides features to improve individual instance for these kind of workloads. .. include:: /common/numa-live-migration-warning.txt .. toctree:: :maxdepth: 2 pci-passthrough cpu-topologies real-time huge-pages virtual-gpu file-backed-memory ports-with-resource-requests vdpa virtual-persistent-memory emulated-tpm uefi secure-boot sev managing-resource-providers compute-node-identification resource-limits cpu-models libvirt-misc Maintenance ----------- Once you are running nova, the following information is extremely useful. * :doc:`Upgrades `: How nova is designed to be upgraded for minimal service impact, and the order you should do them in. .. toctree:: :maxdepth: 2 support-compute evacuate migration migrate-instance-with-snapshot upgrades node-down hw-machine-type hw-emulation-architecture soft-delete-shadow-tables ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/libvirt-misc.rst0000664000175000017500000001361300000000000021061 0ustar00zuulzuul00000000000000====================== Other libvirt features ====================== The libvirt driver supports a large number of additional features that don't warrant their own section. These are gathered here. Guest agent support ------------------- Guest agents enable optional access between compute nodes and guests through a socket, using the QMP protocol. To enable this feature, you must set ``hw_qemu_guest_agent=yes`` as a metadata parameter on the image you wish to use to create the guest-agent-capable instances from. You can explicitly disable the feature by setting ``hw_qemu_guest_agent=no`` in the image metadata. .. _extra-specs-watchdog-behavior: Watchdog behavior ----------------- .. versionchanged:: 15.0.0 (Ocata) Add support for the ``disabled`` option. A virtual watchdog device can be used to keep an eye on the guest server and carry out a configured action if the server hangs. The watchdog uses the i6300esb device (emulating a PCI Intel 6300ESB). Watchdog behavior can be configured using the :nova:extra-spec:`hw:watchdog_action` flavor extra spec or equivalent image metadata property. If neither the extra spec not the image metadata property are specified, the watchdog is disabled. For example, to enable the watchdog and configure it to forcefully reset the guest in the event of a hang, run: .. code-block:: console $ openstack flavor set $FLAVOR --property hw:watchdog_action=reset .. note:: Watchdog behavior set using the image metadata property will override behavior set using the flavor extra spec. .. _extra-specs-random-number-generator: Random number generator ----------------------- .. versionchanged:: 21.0.0 (Ussuri) Random number generators are now enabled by default for instances. Operating systems require good sources of entropy for things like cryptographic software. If a random-number generator device has been added to the instance through its image properties, the device can be enabled and configured using the :nova:extra-spec:`hw_rng:allowed`, :nova:extra-spec:`hw_rng:rate_bytes` and :nova:extra-spec:`hw_rng:rate_period` flavor extra specs. To configure for example a byte rate of 5 bytes per period and a period of 1000 mSec (1 second), run: .. code-block:: console $ openstack flavor set $FLAVOR \ --property hw_rng:rate_bytes=5 \ --property hw_rng:rate_period=1000 Alternatively, to disable the random number generator, run: .. code-block:: console $ openstack flavor set $FLAVOR --property hw_rng:allowed=false The presence of separate byte rate and rate period configurables is intentional. As noted in the `QEMU docs`__, a smaller rate and larger period minimizes the opportunity for malicious guests to starve other guests of entropy but at the cost of responsiveness. Conversely, larger rates and smaller periods will increase the burst rate but at the potential cost of warping resource consumption in favour of a greedy guest. .. __: https://wiki.qemu.org/Features/VirtIORNG#Effect_of_the_period_parameter .. _extra-specs-performance-monitoring-unit: Performance Monitoring Unit (vPMU) ---------------------------------- .. versionadded:: 20.0.0 (Train) If nova is deployed with the libvirt virt driver and :oslo.config:option:`libvirt.virt_type` is set to ``qemu`` or ``kvm``, a virtual performance monitoring unit (vPMU) can be enabled or disabled for an instance using the :nova:extra-spec:`hw:pmu` flavor extra spec or ``hw_pmu`` image metadata property. If the vPMU is not explicitly enabled or disabled via the flavor or image, its presence is left to QEMU to decide. For example, to explicitly disable the vPMU, run: .. code-block:: console $ openstack flavor set FLAVOR-NAME --property hw:pmu=false The vPMU is used by tools like ``perf`` in the guest to provide more accurate information for profiling application and monitoring guest performance. For :doc:`real time ` workloads, the emulation of a vPMU can introduce additional latency which would be undesirable. If the telemetry it provides is not required, the vPMU can be disabled. For most workloads the default of unset (enabled) will be correct. .. _extra-specs-hiding-hypervisor-signature: Hiding hypervisor signature --------------------------- .. versionadded:: 18.0.0 (Rocky) .. versionchanged:: 21.0.0 (Ussuri) Prior to the Ussuri release, this was called ``hide_hypervisor_id``. An alias is provided to provide backwards compatibility. Some hypervisors add a signature to their guests. While the presence of the signature can enable some paravirtualization features on the guest, it can also have the effect of preventing some drivers from loading. You can hide this signature by setting the :nova:extra-spec:`hw:hide_hypervisor_id` to true. For example, to hide your signature from the guest OS, run: .. code:: console $ openstack flavor set $FLAVOR --property hw:hide_hypervisor_id=true .. _extra-spec-locked_memory: Locked memory allocation ------------------------ .. versionadded:: 26.0.0 (Zed) Locking memory marks the guest memory allocations as unmovable and unswappable. It is implicitly enabled in a number of cases such as SEV or realtime guests but can also be enabled explicitly using the ``hw:locked_memory`` extra spec (or use ``hw_locked_memory`` image property). ``hw:locked_memory`` (also ``hw_locked_memory`` image property) accept boolean values in string format like 'true' or 'false' value. It will raise ``FlavorImageLockedMemoryConflict`` exception if both flavor and image property are specified but with different boolean values. This will only be allowed if you have also set ``hw:mem_page_size``, so we can ensure that the scheduler can actually account for this correctly and prevent out of memory events. Otherwise, will raise ``LockMemoryForbidden`` exception. .. code:: console $ openstack flavor set FLAVOR-NAME \ --property hw:locked_memory=BOOLEAN_VALUE .. note:: This is currently only supported by the libvirt driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/live-migration-usage.rst0000664000175000017500000003255700000000000022515 0ustar00zuulzuul00000000000000====================== Live-migrate instances ====================== Live-migrating an instance means moving its virtual machine to a different OpenStack Compute server while the instance continues running. Before starting a live-migration, review the chapter :ref:`section_configuring-compute-migrations`. It covers the configuration settings required to enable live-migration, but also reasons for migrations and non-live-migration options. The instructions below cover shared-storage and volume-backed migration. To block-migrate instances, add the command-line option ``-block-migrate`` to the :command:`nova live-migration` command, and ``--block-migration`` to the :command:`openstack server migrate` command. .. _section-manual-selection-of-dest: Manual selection of the destination host ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #. Obtain the ID of the instance you want to migrate: .. code-block:: console $ openstack server list +--------------------------------------+------+--------+-----------------+------------+ | ID | Name | Status | Networks | Image Name | +--------------------------------------+------+--------+-----------------+------------+ | d1df1b5a-70c4-4fed-98b7-423362f2c47c | vm1 | ACTIVE | private=a.b.c.d | ... | | d693db9e-a7cf-45ef-a7c9-b3ecb5f22645 | vm2 | ACTIVE | private=e.f.g.h | ... | +--------------------------------------+------+--------+-----------------+------------+ #. Determine on which host the instance is currently running. In this example, ``vm1`` is running on ``HostB``: .. code-block:: console $ openstack server show d1df1b5a-70c4-4fed-98b7-423362f2c47c +----------------------+--------------------------------------+ | Field | Value | +----------------------+--------------------------------------+ | ... | ... | | OS-EXT-SRV-ATTR:host | HostB | | ... | ... | | addresses | a.b.c.d | | flavor | m1.tiny | | id | d1df1b5a-70c4-4fed-98b7-423362f2c47c | | name | vm1 | | status | ACTIVE | | ... | ... | +----------------------+--------------------------------------+ #. Select the compute node the instance will be migrated to. In this example, we will migrate the instance to ``HostC``, because ``nova-compute`` is running on it: .. code-block:: console $ openstack compute service list +----+------------------+-------+----------+---------+-------+----------------------------+ | ID | Binary | Host | Zone | Status | State | Updated At | +----+------------------+-------+----------+---------+-------+----------------------------+ | 3 | nova-conductor | HostA | internal | enabled | up | 2017-02-18T09:42:29.000000 | | 4 | nova-scheduler | HostA | internal | enabled | up | 2017-02-18T09:42:26.000000 | | 5 | nova-compute | HostB | nova | enabled | up | 2017-02-18T09:42:29.000000 | | 6 | nova-compute | HostC | nova | enabled | up | 2017-02-18T09:42:29.000000 | +----+------------------+-------+----------+---------+-------+----------------------------+ #. Check that ``HostC`` has enough resources for migration: .. code-block:: console $ openstack host show HostC +-------+------------+-----+-----------+---------+ | Host | Project | CPU | Memory MB | Disk GB | +-------+------------+-----+-----------+---------+ | HostC | (total) | 16 | 32232 | 878 | | HostC | (used_now) | 22 | 21284 | 422 | | HostC | (used_max) | 22 | 21284 | 422 | | HostC | p1 | 22 | 21284 | 422 | | HostC | p2 | 22 | 21284 | 422 | +-------+------------+-----+-----------+---------+ - ``cpu``: Number of CPUs - ``memory_mb``: Total amount of memory, in MB - ``disk_gb``: Total amount of space for NOVA-INST-DIR/instances, in GB In this table, the first row shows the total amount of resources available on the physical server. The second line shows the currently used resources. The third line shows the maximum used resources. The fourth line and below shows the resources available for each project. #. Migrate the instance: .. code-block:: console $ openstack server migrate d1df1b5a-70c4-4fed-98b7-423362f2c47c --live-migration --host HostC #. Confirm that the instance has been migrated successfully: .. code-block:: console $ openstack server show d1df1b5a-70c4-4fed-98b7-423362f2c47c +----------------------+--------------------------------------+ | Field | Value | +----------------------+--------------------------------------+ | ... | ... | | OS-EXT-SRV-ATTR:host | HostC | | ... | ... | +----------------------+--------------------------------------+ If the instance is still running on ``HostB``, the migration failed. The ``nova-scheduler`` and ``nova-conductor`` log files on the controller and the ``nova-compute`` log file on the source compute host can help pin-point the problem. .. _auto_selection_of_dest: Automatic selection of the destination host ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To leave the selection of the destination host to the Compute service, use the nova command-line client. #. Obtain the instance ID as shown in step 1 of the section :ref:`section-manual-selection-of-dest`. #. Leave out the host selection steps 2, 3, and 4. #. Migrate the instance: .. code-block:: console $ nova live-migration d1df1b5a-70c4-4fed-98b7-423362f2c47c Monitoring the migration ~~~~~~~~~~~~~~~~~~~~~~~~ #. Confirm that the instance is migrating: .. code-block:: console $ openstack server show d1df1b5a-70c4-4fed-98b7-423362f2c47c +----------------------+--------------------------------------+ | Field | Value | +----------------------+--------------------------------------+ | ... | ... | | status | MIGRATING | | ... | ... | +----------------------+--------------------------------------+ #. Check progress Use the nova command-line client for nova's migration monitoring feature. First, obtain the migration ID: .. code-block:: console $ nova server-migration-list d1df1b5a-70c4-4fed-98b7-423362f2c47c +----+-------------+----------- (...) | Id | Source Node | Dest Node | (...) +----+-------------+-----------+ (...) | 2 | - | - | (...) +----+-------------+-----------+ (...) For readability, most output columns were removed. Only the first column, **Id**, is relevant. In this example, the migration ID is 2. Use this to get the migration status. .. code-block:: console $ nova server-migration-show d1df1b5a-70c4-4fed-98b7-423362f2c47c 2 +------------------------+--------------------------------------+ | Property | Value | +------------------------+--------------------------------------+ | created_at | 2017-03-08T02:53:06.000000 | | dest_compute | controller | | dest_host | - | | dest_node | - | | disk_processed_bytes | 0 | | disk_remaining_bytes | 0 | | disk_total_bytes | 0 | | id | 2 | | memory_processed_bytes | 65502513 | | memory_remaining_bytes | 786427904 | | memory_total_bytes | 1091379200 | | server_uuid | d1df1b5a-70c4-4fed-98b7-423362f2c47c | | source_compute | compute2 | | source_node | - | | status | running | | updated_at | 2017-03-08T02:53:47.000000 | +------------------------+--------------------------------------+ The output shows that the migration is running. Progress is measured by the number of memory bytes that remain to be copied. If this number is not decreasing over time, the migration may be unable to complete, and it may be aborted by the Compute service. .. note:: The command reports that no disk bytes are processed, even in the event of block migration. What to do when the migration times out ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ During the migration process, the instance may write to a memory page after that page has been copied to the destination. When that happens, the same page has to be copied again. The instance may write to memory pages faster than they can be copied, so that the migration cannot complete. There are two optional actions, controlled by :oslo.config:option:`libvirt.live_migration_timeout_action`, which can be taken against a VM after :oslo.config:option:`libvirt.live_migration_completion_timeout` is reached: 1. ``abort`` (default): The live migration operation will be cancelled after the completion timeout is reached. This is similar to using API ``DELETE /servers/{server_id}/migrations/{migration_id}``. 2. ``force_complete``: The compute service will either pause the VM or trigger post-copy depending on if post copy is enabled and available (:oslo.config:option:`libvirt.live_migration_permit_post_copy` is set to ``True``). This is similar to using API ``POST /servers/{server_id}/migrations/{migration_id}/action (force_complete)``. You can also read the :oslo.config:option:`libvirt.live_migration_timeout_action` configuration option help for more details. The following remarks assume the KVM/Libvirt hypervisor. How to know that the migration timed out ---------------------------------------- To determine that the migration timed out, inspect the ``nova-compute`` log file on the source host. The following log entry shows that the migration timed out: .. code-block:: console # grep WARNING.*d1df1b5a-70c4-4fed-98b7-423362f2c47c /var/log/nova/nova-compute.log ... WARNING nova.virt.libvirt.migration [req-...] [instance: ...] live migration not completed after 1800 sec Addressing migration timeouts ----------------------------- To stop the migration from putting load on infrastructure resources like network and disks, you may opt to cancel it manually. .. code-block:: console $ nova live-migration-abort INSTANCE_ID MIGRATION_ID To make live-migration succeed, you have several options: - **Manually force-complete the migration** .. code-block:: console $ nova live-migration-force-complete INSTANCE_ID MIGRATION_ID The instance is paused until memory copy completes. .. caution:: Since the pause impacts time keeping on the instance and not all applications tolerate incorrect time settings, use this approach with caution. - **Enable auto-convergence** Auto-convergence is a Libvirt feature. Libvirt detects that the migration is unlikely to complete and slows down its CPU until the memory copy process is faster than the instance's memory writes. To enable auto-convergence, set ``live_migration_permit_auto_converge=true`` in ``nova.conf`` and restart ``nova-compute``. Do this on all compute hosts. .. caution:: One possible downside of auto-convergence is the slowing down of the instance. - **Enable post-copy** This is a Libvirt feature. Libvirt detects that the migration does not progress and responds by activating the virtual machine on the destination host before all its memory has been copied. Access to missing memory pages result in page faults that are satisfied from the source host. To enable post-copy, set ``live_migration_permit_post_copy=true`` in ``nova.conf`` and restart ``nova-compute``. Do this on all compute hosts. When post-copy is enabled, manual force-completion does not pause the instance but switches to the post-copy process. .. caution:: Possible downsides: - When the network connection between source and destination is interrupted, page faults cannot be resolved anymore, and the virtual machine is rebooted. - Post-copy may lead to an increased page fault rate during migration, which can slow the instance down. If live migrations routinely timeout or fail during cleanup operations due to the user token timing out, consider configuring nova to use :ref:`service user tokens `. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/manage-logs.rst0000664000175000017500000001701400000000000020646 0ustar00zuulzuul00000000000000======= Logging ======= Logging module ~~~~~~~~~~~~~~ Logging behavior can be changed by creating a configuration file. To specify the configuration file, add this line to the ``/etc/nova/nova.conf`` file: .. code-block:: ini log_config_append=/etc/nova/logging.conf To change the logging level, add ``DEBUG``, ``INFO``, ``WARNING``, or ``ERROR`` as a parameter. The logging configuration file is an INI-style configuration file, which must contain a section called ``logger_nova``. This controls the behavior of the logging facility in the ``nova-*`` services. For example: .. code-block:: ini [logger_nova] level = INFO handlers = stderr qualname = nova This example sets the debugging level to ``INFO`` (which is less verbose than the default ``DEBUG`` setting). For more about the logging configuration syntax, including the ``handlers`` and ``qualname`` variables, see the `Python documentation `__ on logging configuration files. For an example of the ``logging.conf`` file with various defined handlers, see the :oslo.log-doc:`Example Configuration File for nova `. Syslog ~~~~~~ OpenStack Compute services can send logging information to syslog. This is useful if you want to use rsyslog to forward logs to a remote machine. Separately configure the Compute service (nova), the Identity service (keystone), the Image service (glance), and, if you are using it, the Block Storage service (cinder) to send log messages to syslog. Open these configuration files: - ``/etc/nova/nova.conf`` - ``/etc/keystone/keystone.conf`` - ``/etc/glance/glance-api.conf`` - ``/etc/glance/glance-registry.conf`` - ``/etc/cinder/cinder.conf`` In each configuration file, add these lines: .. code-block:: ini debug = False use_syslog = True syslog_log_facility = LOG_LOCAL0 In addition to enabling syslog, these settings also turn off debugging output from the log. .. note:: Although this example uses the same local facility for each service (``LOG_LOCAL0``, which corresponds to syslog facility ``LOCAL0``), we recommend that you configure a separate local facility for each service, as this provides better isolation and more flexibility. For example, you can capture logging information at different severity levels for different services. syslog allows you to define up to eight local facilities, ``LOCAL0, LOCAL1, ..., LOCAL7``. For more information, see the syslog documentation. Rsyslog ~~~~~~~ rsyslog is useful for setting up a centralized log server across multiple machines. This section briefly describe the configuration to set up an rsyslog server. A full treatment of rsyslog is beyond the scope of this book. This section assumes rsyslog has already been installed on your hosts (it is installed by default on most Linux distributions). This example provides a minimal configuration for ``/etc/rsyslog.conf`` on the log server host, which receives the log files .. code-block:: console # provides TCP syslog reception $ModLoad imtcp $InputTCPServerRun 1024 Add a filter rule to ``/etc/rsyslog.conf`` which looks for a host name. This example uses COMPUTE_01 as the compute host name: .. code-block:: none :hostname, isequal, "COMPUTE_01" /mnt/rsyslog/logs/compute-01.log On each compute host, create a file named ``/etc/rsyslog.d/60-nova.conf``, with the following content: .. code-block:: none # prevent debug from dnsmasq with the daemon.none parameter *.*;auth,authpriv.none,daemon.none,local0.none -/var/log/syslog # Specify a log level of ERROR local0.error @@172.20.1.43:1024 Once you have created the file, restart the ``rsyslog`` service. Error-level log messages on the compute hosts should now be sent to the log server. Serial console ~~~~~~~~~~~~~~ The serial console provides a way to examine kernel output and other system messages during troubleshooting if the instance lacks network connectivity. Read-only access from server serial console is possible using the ``os-GetSerialOutput`` server action. Most cloud images enable this feature by default. For more information, see :ref:`compute-common-errors-and-fixes`. OpenStack Juno and later supports read-write access using the serial console using the ``os-GetSerialConsole`` server action. This feature also requires a websocket client to access the serial console. .. rubric:: Configuring read-write serial console access #. On a compute node, edit the ``/etc/nova/nova.conf`` file: In the ``[serial_console]`` section, enable the serial console: .. code-block:: ini [serial_console] # ... enabled = true #. In the ``[serial_console]`` section, configure the serial console proxy similar to graphical console proxies: .. code-block:: ini [serial_console] # ... base_url = ws://controller:6083/ listen = 0.0.0.0 proxyclient_address = MANAGEMENT_INTERFACE_IP_ADDRESS The ``base_url`` option specifies the base URL that clients receive from the API upon requesting a serial console. Typically, this refers to the host name of the controller node. The ``listen`` option specifies the network interface nova-compute should listen on for virtual console connections. Typically, 0.0.0.0 will enable listening on all interfaces. The ``proxyclient_address`` option specifies which network interface the proxy should connect to. Typically, this refers to the IP address of the management interface. When you enable read-write serial console access, Compute will add serial console information to the Libvirt XML file for the instance. For example: .. code-block:: xml .. rubric:: Accessing the serial console on an instance #. Use the :command:`nova get-serial-console` command to retrieve the websocket URL for the serial console on the instance: .. code-block:: console $ nova get-serial-console INSTANCE_NAME Or use the :command:`openstack console url show` command. .. code-block:: console $ openstack console url show --serial INSTANCE_NAME .. list-table:: :header-rows: 0 :widths: 9 65 * - Type - Url * - serial - ws://127.0.0.1:6083/?token=18510769-71ad-4e5a-8348-4218b5613b3d Alternatively, use the API directly: .. code-block:: console $ curl -i 'http://:8774/v2.1//servers//action' \ -X POST \ -H "Accept: application/json" \ -H "Content-Type: application/json" \ -H "X-Auth-Project-Id: " \ -H "X-Auth-Token: " \ -d '{"os-getSerialConsole": {"type": "serial"}}' #. Use Python websocket with the URL to generate ``.send``, ``.recv``, and ``.fileno`` methods for serial console access. For example: .. code-block:: python import websocket ws = websocket.create_connection( 'ws://127.0.0.1:6083/?token=18510769-71ad-4e5a-8348-4218b5613b3d', subprotocols=['binary', 'base64']) Alternatively, use a `Python websocket client `__. .. note:: When you enable the serial console, typical instance logging using the :command:`nova console-log` command is disabled. Kernel output and other system messages will not be visible unless you are actively viewing the serial console. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/manage-shares.rst0000664000175000017500000002656700000000000021204 0ustar00zuulzuul00000000000000============= Manage shares ============= Overview -------- With the Manila share attachment feature, users can easily attach file shares provided by Manila to their instances, and mount them within the instance. This feature eliminates the need for users to manually connect to and mount shares provided by Manila within their instances. Use Cases --------- The Manila share attachment feature can be used in the following scenarios: * As an operator I want the Manila datapath to be separate to any tenant accessible networks. * As a user I want to attach Manila shares directly to my instance and have a simple interface with which to mount them within the instance. As a user I want to detach a directly attached Manila share from my instance. * As a user I want to track the Manila shares attached to my instance. Prerequisites ------------- To use the Manila share attachment feature, you must have an OpenStack environment with Manila and Nova installed and configured. Additionally, your environment must meet the following requirements: * The compute host running your instance must have ``QEMU`` version 5.0 or higher and ``libvirt`` version 6.2 or higher. If these requirements are met, the ``COMPUTE_STORAGE_VIRTIO_FS`` trait should be enabled on your compute host. * All compute nodes must be upgraded to a nova version that enables the use of virtiofs. * Additionally this initial implementation will require that the associated instances use `file backed memory`__ or `huge pages`__. This is a requirement of `virtio-fs`__. * Kernel drivers and user land tools to support mounting NFS and CEPHFS shares. * A kernel version of >= 5.4 within the instance guest OS to support mounting virtiofs shares. .. __: https://docs.openstack.org/nova/latest/admin/file-backed-memory.html .. __: https://docs.openstack.org/nova/latest/admin/huge-pages.html .. __: https://virtio-fs.gitlab.io/ Configure instance shared memory -------------------------------- This can be achieved by either configuring the instance with ``hw:mem_page_size`` extra spec. or, you can enable the ``COMPUTE_MEM_BACKING_FILE`` trait by configuring the file_backed_memory feature in ``libvirt`` for nova-compute. This will allow the use of file-backed memory. ``COMPUTE_MEM_BACKING_FILE`` support requires that operators configure one or more hosts with file backed memory. Ensuring the instance will land on one of these hosts can be achieved by creating an AZ englobing these hosts. And then instruct users to deploy their instances in this AZ. Alternatively, operators can guide the scheduler to choose a suitable host by adding ``trait:COMPUTE_MEM_BACKING_FILE=required`` as an extra spec or image property. Limitations ----------- * You must have an instance in the SHUTOFF state to attach or detach a share. * Due to current virtiofs implementation in Qemu / libvirt, the following Nova features are blocked for VMs with shares attached: * evacuate * live_migrate * rebuild * resize(migrate) * resume * shelve * volume snapshot Known bugs ---------- * Due to a `bug`__, the configuration drive is not refreshed at the appropriate time, causing the shares to remain invisible on the configuration drive. You can use the metadata service instead. * The share backup process removes share access and locks because it requires exclusive access for backup consistency. This operation `disrupts`__ the share attachment, making it inaccessible for the VM. To use this feature correctly, follow these steps: 1. Stop the VM. 2. Detach the share from the VM (Nova removes the lock). 3. Perform the backup using the Manila API. 4. Reattach the share (Nova reapplies the lock). 5. Start the VM. * Nova `does not send the user's token alongside Nova's service token`__, Manila will only recognize Nova as the requester during access creation. Consequently, the audit trail log will lack information indicating that Nova is acting on behalf of a user request. This is a significant limitation of the current implementation. * The share is `not marked as an error`__ if the VM fails to delete. .. __: https://bugs.launchpad.net/nova/+bug/2088464 .. __: https://bugs.launchpad.net/nova/+bug/2089007 .. __: https://bugs.launchpad.net/nova/+bug/2089030 .. __: https://bugs.launchpad.net/nova/+bug/2089034 Managing shares --------------- Attaching a Share ~~~~~~~~~~~~~~~~~ To attach a Manila share to an instance, use the ``POST /servers/{server_id}/shares API``, and specify the ``share_id`` of the share you want to attach. The tag parameter is optional and can be used to provide a string used to mount the share within the instance. If you do not provide a tag, the share_id will be used instead. After issuing the attach request, the share's attachment state is recorded in the database and should quickly transition to attaching and then to inactive. The user should verify that the status reaches inactive before proceeding with any new operations; this transition should occur unless an error arises. .. code-block:: shell $ openstack server add share 9736bced-44f6-48fc-b913-f34c3ed95067 3d3aafde-b4cb-45ab-8ac6-31ff93f69536 --tag mytag +-----------------+--------------------------------------------------------------------------------------+ | Field | Value | +-----------------+--------------------------------------------------------------------------------------+ | Export Location | 192.168.122.76:/opt/stack/data/manila/mnt/share-25a777f7-a582-465c-a94c-7293707cc5cb | | Share ID | 3d3aafde-b4cb-45ab-8ac6-31ff93f69536 | | Status | inactive | | Tag | mytag | | UUID | b70403ee-f598-4552-b9e9-173343deff79 | +-----------------+--------------------------------------------------------------------------------------+ Then, when you power on the instance, the required operations will be done to attach the share, and set it as active if there are no errors. If the attach operation fails, the VM start operation will also fail. .. code-block:: shell $ openstack server share show 9736bced-44f6-48fc-b913-f34c3ed95067 3d3aafde-b4cb-45ab-8ac6-31ff93f69536 +-----------------+--------------------------------------------------------------------------------------+ | Field | Value | +-----------------+--------------------------------------------------------------------------------------+ | Export Location | 192.168.122.76:/opt/stack/data/manila/mnt/share-25a777f7-a582-465c-a94c-7293707cc5cb | | Share ID | 3d3aafde-b4cb-45ab-8ac6-31ff93f69536 | | Status | active | | Tag | mytag | | UUID | b70403ee-f598-4552-b9e9-173343deff79 | +-----------------+--------------------------------------------------------------------------------------+ After connecting to the VM, you can retrieve the tags of the attached share by querying the OpenStack metadata service. Note: Here, we can see 2 shares attached to the instance with a defined tag (mytag) and another one with the default tag. Note2: Using this mechanism, shares can be easily mounted automatically when the machine starts up. .. code-block:: shell $ curl -s -H "Metadata-Flavor: OpenStack" http://169.254.169.254/openstack/latest/meta_data.json | jq .devices [ { "type": "share", "share_id": "3d3aafde-b4cb-45ab-8ac6-31ff93f69536", "tag": "mytag", "bus": "none", "address": "none" }, { "type": "share", "share_id": "894a530c-6fa0-4aa1-97c9-4489d205c5ed", "tag": "894a530c-6fa0-4aa1-97c9-4489d205c5ed", "bus": "none", "address": "none" } ] To mount the attached share, use the mount command with the virtiofs file system type, and the tag provided in the response body. .. code-block:: shell user@instance $ mount -t virtiofs $tag /mnt/mount/path Detaching a Share ~~~~~~~~~~~~~~~~~ To detach a Manila share, first stop the instance, then use the ``DELETE /servers/{server_id}/shares/{share_id}`` API, specifying the share_id of the share you wish to detach. .. code-block:: shell $ openstack server remove share 9736bced-44f6-48fc-b913-f34c3ed95067 3d3aafde-b4cb-45ab-8ac6-31ff93f69536 Listing Attached Shares ~~~~~~~~~~~~~~~~~~~~~~~ To list all shares attached to an instance, use the ``GET /servers/{server_id}/shares`` API. .. code-block:: shell $ openstack server share list 9736bced-44f6-48fc-b913-f34c3ed95067 +--------------------------------------+----------+--------------------------------------+ | Share ID | Status | Tag | +--------------------------------------+----------+--------------------------------------+ | 3d3aafde-b4cb-45ab-8ac6-31ff93f69536 | inactive | mytag | | 894a530c-6fa0-4aa1-97c9-4489d205c5ed | inactive | 894a530c-6fa0-4aa1-97c9-4489d205c5ed | | 9238fc76-5b21-4b8e-80ef-26d74d192f71 | inactive | 9238fc76-5b21-4b8e-80ef-26d74d192f71 | +--------------------------------------+----------+--------------------------------------+ Showing Details of an Attached Share ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To show the details of a specific share attached to an instance, use the ``GET /servers/{server_id}/shares/{share_id}`` API, and specify the ``share_id`` of the share you want to show. .. code-block:: shell $ openstack server share show 9736bced-44f6-48fc-b913-f34c3ed95067 3d3aafde-b4cb-45ab-8ac6-31ff93f69536 +-----------------+--------------------------------------------------------------------------------------+ | Field | Value | +-----------------+--------------------------------------------------------------------------------------+ | Export Location | 192.168.122.76:/opt/stack/data/manila/mnt/share-25a777f7-a582-465c-a94c-7293707cc5cb | | Share ID | 3d3aafde-b4cb-45ab-8ac6-31ff93f69536 | | Status | inactive | | Tag | mytag | | UUID | 8a8b42f4-7cd5-49f2-b89c-f27b2ed89cd5 | +-----------------+--------------------------------------------------------------------------------------+ Notification of Share Attachment and Detachment ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ New notifications will be added for share attach and share detach. You can enable them using ``include_share_mapping`` configuration parameter. Then you can subscribe to these notifications to receive information about share attachment and detachment events. Available versioned notifications: https://docs.openstack.org/nova/latest/reference/notifications.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/manage-the-cloud.rst0000664000175000017500000000432600000000000021570 0ustar00zuulzuul00000000000000.. _section_manage-the-cloud: ================ Manage the cloud ================ .. toctree:: common/nova-show-usage-statistics-for-hosts-instances System administrators can use the :command:`openstack` to manage their clouds. The ``openstack`` client can be used by all users, though specific commands might be restricted by the Identity service. **Managing the cloud with the openstack client** #. The ``python-openstackclient`` package provides an ``openstack`` shell that enables Compute API interactions from the command line. Install the client, and provide your user name and password (which can be set as environment variables for convenience), for the ability to administer the cloud from the command line. For more information on ``python-openstackclient``, refer to the :python-openstackclient-doc:`documentation <>`. #. Confirm the installation was successful: .. code-block:: console $ openstack help usage: openstack [--version] [-v | -q] [--log-file LOG_FILE] [-h] [--debug] [--os-cloud ] [--os-region-name ] [--os-cacert ] [--verify | --insecure] [--os-default-domain ] ... Running :command:`openstack help` returns a list of ``openstack`` commands and parameters. To get help for a subcommand, run: .. code-block:: console $ openstack help SUBCOMMAND For a complete list of ``openstack`` commands and parameters, refer to the :python-openstackclient-doc:`OpenStack Command-Line Reference `. #. Set the required parameters as environment variables to make running commands easier. For example, you can add ``--os-username`` as an ``openstack`` option, or set it as an environment variable. To set the user name, password, and project as environment variables, use: .. code-block:: console $ export OS_USERNAME=joecool $ export OS_PASSWORD=coolword $ export OS_TENANT_NAME=coolu #. The Identity service gives you an authentication endpoint, which Compute recognizes as ``OS_AUTH_URL``: .. code-block:: console $ export OS_AUTH_URL=http://hostname:5000/v2.0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/manage-volumes.rst0000664000175000017500000003717000000000000021401 0ustar00zuulzuul00000000000000============== Manage volumes ============== Depending on the setup of your cloud provider, they may give you an endpoint to use to manage volumes. You can use the ``openstack`` CLI to manage volumes. For the purposes of the compute service, attaching, detaching and :doc:`creating a server from a volume ` are of primary interest. Refer to the :python-openstackclient-doc:`CLI documentation ` for more information. Volume multi-attach ------------------- Nova `added support for multiattach volumes`_ in the 17.0.0 Queens release. This document covers the nova-specific aspects of this feature. Refer to the :cinder-doc:`block storage admin guide ` for more details about creating multiattach-capable volumes. :term:`Boot from volume ` and attaching a volume to a server that is not SHELVED_OFFLOADED is supported. Ultimately the ability to perform these actions depends on the compute host and hypervisor driver that is being used. There is also a `recorded overview and demo`_ for volume multi-attach. Requirements ~~~~~~~~~~~~ * The minimum required compute API microversion for attaching a multiattach-capable volume to more than one server is :ref:`2.60 `. * Cinder 12.0.0 (Queens) or newer is required. * The ``nova-compute`` service must be running at least Queens release level code (17.0.0) and the hypervisor driver must support attaching block storage devices to more than one guest. Refer to :doc:`/user/support-matrix` for details on which compute drivers support volume multiattach. * When using the libvirt compute driver, the following native package versions determine multiattach support: * libvirt must be greater than or equal to 3.10, or * qemu must be less than 2.10 * Swapping an *in-use* multiattach volume is not supported (this is actually controlled via the block storage volume retype API). Known issues ~~~~~~~~~~~~ * Creating multiple servers in a single request with a multiattach-capable volume as the root disk is not yet supported: https://bugs.launchpad.net/nova/+bug/1747985 * Subsequent attachments to the same volume are all attached in *read/write* mode by default in the block storage service. A future change either in nova or cinder may address this so that subsequent attachments are made in *read-only* mode, or such that the mode can be specified by the user when attaching the volume to the server. Testing ~~~~~~~ Continuous integration testing of the volume multiattach feature is done via the ``tempest-full`` and ``tempest-slow`` jobs, which, along with the tests themselves, are defined in the `tempest repository`_. .. _added support for multiattach volumes: https://specs.openstack.org/openstack/nova-specs/specs/queens/implemented/multi-attach-volume.html .. _recorded overview and demo: https://www.youtube.com/watch?v=hZg6wqxdEHk .. _tempest repository: http://codesearch.openstack.org/?q=CONF.compute_feature_enabled.volume_multiattach&i=nope&files=&repos=tempest Managing volume attachments --------------------------- During the lifecycle of an instance admins may need to check various aspects of how a given volume is mapped both to an instance and the underlying compute hosting the instance. This could even include refreshing different elements of the attachment to ensure the latest configuration changes within the environment have been applied. .. note:: If you encounter any dangling volume attachments in either the Nova or Cinder databases, a ``hard reboot`` of the affected instance can help resolve the issue. During the instance reboot process, Nova performs a synchronization mechanism that verifies the availability of volume attachments in the Cinder database. Any missing or dangling/stale attachments are detected and deleted from both Nova and Cinder during ``hard reboot`` process. Checking an existing attachment ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Existing volume attachments can be checked using the following :python-openstackclient-doc:`OpenStack Client commands `: List all volume attachments for a given instance: .. code-block:: shell $ openstack server volume list 216f9481-4c9d-4530-b865-51cedfa4b8e7 +--------------------------------------+----------+--------------------------------------+--------------------------------------+ | ID | Device | Server ID | Volume ID | +--------------------------------------+----------+--------------------------------------+--------------------------------------+ | 8b9b3491-f083-4485-8374-258372f3db35 | /dev/vdb | 216f9481-4c9d-4530-b865-51cedfa4b8e7 | 8b9b3491-f083-4485-8374-258372f3db35 | +--------------------------------------+----------+--------------------------------------+--------------------------------------+ List all volume attachments for a given instance with the Cinder volume attachment and Block Device Mapping UUIDs also listed with microversion >=2.89: .. code-block:: shell $ openstack --os-compute-api-version 2.89 server volume list 216f9481-4c9d-4530-b865-51cedfa4b8e7 +----------+--------------------------------------+--------------------------------------+------+------------------------+--------------------------------------+--------------------------------------+ | Device | Server ID | Volume ID | Tag | Delete On Termination? | Attachment ID | BlockDeviceMapping UUID | +----------+--------------------------------------+--------------------------------------+------+------------------------+--------------------------------------+--------------------------------------+ | /dev/vdb | 216f9481-4c9d-4530-b865-51cedfa4b8e7 | 8b9b3491-f083-4485-8374-258372f3db35 | None | False | d338fb38-cfd5-461f-8753-145dcbdb6c78 | 4e957e6d-52f2-44da-8cf8-3f1ab755e26d | +----------+--------------------------------------+--------------------------------------+------+------------------------+--------------------------------------+--------------------------------------+ List all Cinder volume attachments for a given volume from microversion >= 3.27: .. code-block:: shell $ openstack --os-volume-api-version 3.27 volume attachment list --volume-id 8b9b3491-f083-4485-8374-258372f3db35 +--------------------------------------+--------------------------------------+--------------------------------------+----------+ | ID | Volume ID | Server ID | Status | +--------------------------------------+--------------------------------------+--------------------------------------+----------+ | d338fb38-cfd5-461f-8753-145dcbdb6c78 | 8b9b3491-f083-4485-8374-258372f3db35 | 216f9481-4c9d-4530-b865-51cedfa4b8e7 | attached | +--------------------------------------+--------------------------------------+--------------------------------------+----------+ Show the details of a Cinder volume attachment from microversion >= 3.27: .. code-block:: shell $ openstack --os-volume-api-version 3.27 volume attachment show d338fb38-cfd5-461f-8753-145dcbdb6c78 +-------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | Field | Value | +-------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | ID | d338fb38-cfd5-461f-8753-145dcbdb6c78 | | Volume ID | 8b9b3491-f083-4485-8374-258372f3db35 | | Instance ID | 216f9481-4c9d-4530-b865-51cedfa4b8e7 | | Status | attached | | Attach Mode | rw | | Attached At | 2021-09-14T13:03:38.000000 | | Detached At | | | Properties | access_mode='rw', attachment_id='d338fb38-cfd5-461f-8753-145dcbdb6c78', auth_method='CHAP', auth_password='4XyNNFV2TLPhKXoP', auth_username='jsBMQhWZJXupA4eWHLQG', cacheable='False', driver_volume_type='iscsi', encrypted='False', qos_specs=, target_discovered='False', target_iqn='iqn.2010-10.org.openstack:volume-8b9b3491-f083-4485-8374-258372f3db35', target_lun='0', target_portal='192.168.122.99:3260', volume_id='8b9b3491-f083-4485-8374-258372f3db35' | +-------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ Refresh a volume attachment with nova-manage ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. versionadded:: 24.0.0 (Xena) Admins may also refresh an existing volume attachment using the following :program:`nova-manage` commands. .. note:: Users can also refresh volume attachments by shelving and later unshelving their instances. The following is an alternative to that workflow and useful for admins when having to mass refresh attachments across an environment. .. note:: Future work will look into introducing an os-refresh admin API that will include orchestrating the shutdown of an instance and refreshing volume attachments among other things. To begin the admin can use the ``volume_attachment show`` subcommand to dump existing details of the attachment directly from the Nova database. This includes the stashed ``connection_info`` not shared by the API. .. code-block:: shell $ nova-manage volume_attachment show 216f9481-4c9d-4530-b865-51cedfa4b8e7 8b9b3491-f083-4485-8374-258372f3db35 --json | jq .attachment_id "d338fb38-cfd5-461f-8753-145dcbdb6c78" If the stored ``connection_info`` or ``attachment_id`` are incorrect then the admin may want to refresh the attachment to the compute host entirely by recreating the Cinder volume attachment record(s) and pulling down fresh ``connection_info``. To do this we first need to ensure the instance is stopped: .. code-block:: shell $ openstack server stop 216f9481-4c9d-4530-b865-51cedfa4b8e7 Once stopped the host connector of the compute hosting the instance has to be fetched using the ``volume_attachment get_connector`` subcommand: .. code-block:: shell root@compute $ nova-manage volume_attachment get_connector --json > connector.json .. note:: Future work will remove this requirement and incorporate the gathering of the host connector into the main refresh command. Unfortunately until then it must remain a separate manual step. We can then provide this connector to the ``volume_attachment refresh`` subcommand. This command will connect to the compute, disconnect any host volume connections, delete the existing Cinder volume attachment, recreate the volume attachment and finally update Nova's database. .. code-block:: shell $ nova-manage volume_attachment refresh 216f9481-4c9d-4530-b865-51cedfa4b8e7 8b9b3491-f083-4485-8374-258372f3db35 connector.json The Cinder volume attachment and connection_info stored in the Nova database should now be updated: .. code-block:: shell $ nova-manage volume_attachment show 216f9481-4c9d-4530-b865-51cedfa4b8e7 8b9b3491-f083-4485-8374-258372f3db35 --json | jq .attachment_id "9ce46f49-5cfc-4c6c-b2f0-0287540d3246" The instance can then be restarted and the event list checked .. code-block:: shell $ openstack server start $instance ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/managing-resource-providers.rst0000664000175000017500000002045000000000000024073 0ustar00zuulzuul00000000000000============================================== Managing Resource Providers Using Config Files ============================================== In order to facilitate management of resource provider information in the Placement API, Nova provides `a method`__ for admins to add custom inventory and traits to resource providers using YAML files. __ https://specs.openstack.org/openstack/nova-specs/specs/ussuri/approved/provider-config-file.html .. note:: Only ``CUSTOM_*`` resource classes and traits may be managed this way. Placing Files ------------- Nova-compute will search for ``*.yaml`` files in the path specified in :oslo.config:option:`compute.provider_config_location`. These files will be loaded and validated for errors on nova-compute startup. If there are any errors in the files, nova-compute will fail to start up. Administrators should ensure that provider config files have appropriate permissions and ownership. See the `specification`__ and `admin guide`__ for more details. __ https://specs.openstack.org/openstack/nova-specs/specs/ussuri/approved/provider-config-file.html __ https://docs.openstack.org/nova/latest/admin/managing-resource-providers.html .. note:: The files are loaded once at nova-compute startup and any changes or new files will not be recognized until the next nova-compute startup. Examples -------- Resource providers to target can be identified by either UUID or name. In addition, the value ``$COMPUTE_NODE`` can be used in the UUID field to identify all nodes managed by the service. If an entry does not include any additional inventory or traits, it will be logged at load time but otherwise ignored. In the case of a resource provider being identified by both ``$COMPUTE_NODE`` and individual UUID/name, the values in the ``$COMPUTE_NODE`` entry will be ignored for *that provider* only if the explicit entry includes inventory or traits. .. note:: In the case that a resource provider is identified more than once by explicit UUID/name, the nova-compute service will fail to start. This is a global requirement across all supplied ``provider.yaml`` files. .. code-block:: yaml meta: schema_version: '1.0' providers: - identification: name: 'EXAMPLE_RESOURCE_PROVIDER' # Additional valid identification examples: # uuid: '$COMPUTE_NODE' # uuid: '5213b75d-9260-42a6-b236-f39b0fd10561' inventories: additional: - CUSTOM_EXAMPLE_RESOURCE_CLASS: total: 100 reserved: 0 min_unit: 1 max_unit: 10 step_size: 1 allocation_ratio: 1.0 traits: additional: - 'CUSTOM_EXAMPLE_TRAIT' Schema Example -------------- .. code-block:: yaml type: object properties: # This property is used to track where the provider.yaml file originated. # It is reserved for internal use and should never be set in a provider.yaml # file supplied by an end user. __source_file: not: {} meta: type: object properties: # Version ($Major, $minor) of the schema must successfully parse # documents conforming to ($Major, 0..N). Any breaking schema change # (e.g. removing fields, adding new required fields, imposing a stricter # pattern on a value, etc.) must bump $Major. schema_version: type: string pattern: '^1\.([0-9]|[1-9][0-9]+)$' required: - schema_version additionalProperties: true providers: type: array items: type: object properties: identification: $ref: '#/provider_definitions/provider_identification' inventories: $ref: '#/provider_definitions/provider_inventories' traits: $ref: '#/provider_definitions/provider_traits' required: - identification additionalProperties: true required: - meta additionalProperties: true provider_definitions: provider_identification: # Identify a single provider to configure. Exactly one identification # method should be used. Currently `uuid` or `name` are supported, but # future versions may support others. # The uuid can be set to the sentinel value `$COMPUTE_NODE` which will # cause the consuming compute service to apply the configuration to # to all compute node root providers it manages that are not otherwise # specified using a uuid or name. type: object properties: uuid: oneOf: # TODO(sean-k-mooney): replace this with type uuid when we can depend # on a version of the jsonschema lib that implements draft 8 or later # of the jsonschema spec. - type: string pattern: '^[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{12}$' - type: string const: '$COMPUTE_NODE' name: type: string minLength: 1 # This introduces the possibility of an unsupported key name being used to # get by schema validation, but is necessary to support forward # compatibility with new identification methods. This should be checked # after schema validation. minProperties: 1 maxProperties: 1 additionalProperties: false provider_inventories: # Allows the admin to specify various adjectives to create and manage # providers' inventories. This list of adjectives can be extended in the # future as the schema evolves to meet new use cases. As of v1.0, only one # adjective, `additional`, is supported. type: object properties: additional: type: array items: patternProperties: # Allows any key name matching the resource class pattern, # check to prevent conflicts with virt driver owned resources classes # will be done after schema validation. ^[A-Z0-9_]{1,255}$: type: object properties: # Any optional properties not populated will be given a default value by # placement. If overriding a pre-existing provider values will not be # preserved from the existing inventory. total: type: integer reserved: type: integer min_unit: type: integer max_unit: type: integer step_size: type: integer allocation_ratio: type: number required: - total # The defined properties reflect the current placement data # model. While defining those in the schema and not allowing # additional properties means we will need to bump the schema # version if they change, that is likely to be part of a large # change that may have other impacts anyway. The benefit of # stricter validation of property names outweighs the (small) # chance of having to bump the schema version as described above. additionalProperties: false # This ensures only keys matching the pattern above are allowed additionalProperties: false additionalProperties: true provider_traits: # Allows the admin to specify various adjectives to create and manage # providers' traits. This list of adjectives can be extended in the # future as the schema evolves to meet new use cases. As of v1.0, only one # adjective, `additional`, is supported. type: object properties: additional: type: array items: # Allows any value matching the trait pattern here, additional # validation will be done after schema validation. type: string pattern: '^[A-Z0-9_]{1,255}$' additionalProperties: true .. note:: When creating a ``provider.yaml`` config file it is recommended to use the schema provided by nova to validate the config using a simple jsonschema validator rather than starting the nova compute agent to enable faster iteration. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/metadata-service.rst0000664000175000017500000001357500000000000021702 0ustar00zuulzuul00000000000000================ Metadata service ================ .. note:: This section provides deployment information about the metadata service. For end-user information about the metadata service and instance metadata in general, refer to the :ref:`user guide `. The metadata service provides a way for instances to retrieve instance-specific data. Instances access the metadata service at ``http://169.254.169.254``. The metadata service supports two sets of APIs - an OpenStack metadata API and an EC2-compatible API - and also exposes vendordata and user data. Both the OpenStack metadata and EC2-compatible APIs are versioned by date. The metadata service can be run globally, or on a per-cell basis. A detailed comparison is provided in the :ref:`cells V2 guide `. .. versionchanged:: 19.0.0 The ability to run the nova metadata API service on a per-cell basis was added in Stein. For versions prior to this release, you should not use the standalone :program:`nova-api-metadata` application for multiple cells. Guests access the service at ``169.254.169.254`` or at ``fe80::a9fe:a9fe``. .. versionchanged:: 22.0.0 Starting with the Victoria release the metadata service is accessible over IPv6 at the link-local address ``fe80::a9fe:a9fe``. The networking service, neutron, is responsible for intercepting these requests and adding HTTP headers which uniquely identify the source of the request before forwarding it to the metadata API server. For the Open vSwitch and Linux Bridge backends provided with neutron, the flow looks something like so: #. Instance sends a HTTP request for metadata to ``169.254.169.254``. #. This request either hits the router or DHCP namespace depending on the route in the instance #. The metadata proxy service in the namespace adds the following info to the request: - Instance IP (``X-Forwarded-For`` header) - Router or Network-ID (``X-Neutron-Network-Id`` or ``X-Neutron-Router-Id`` header) #. The metadata proxy service sends this request to the metadata agent (outside the namespace) via a UNIX domain socket. #. The :program:`neutron-metadata-agent` application forwards the request to the nova metadata API service by adding some new headers (instance ID and Tenant ID) to the request. This flow may vary if a different networking backend is used. Neutron and nova must be configured to communicate together with a shared secret. Neutron uses this secret to sign the Instance-ID header of the metadata request to prevent spoofing. This secret is configured through the :oslo.config:option:`neutron.metadata_proxy_shared_secret` config option in nova and the equivalent ``metadata_proxy_shared_secret`` config option in neutron. Configuration ------------- The :program:`nova-api` application accepts the following metadata service-related options: - :oslo.config:option:`neutron.service_metadata_proxy` - :oslo.config:option:`neutron.metadata_proxy_shared_secret` - :oslo.config:option:`api.metadata_cache_expiration` - :oslo.config:option:`api.local_metadata_per_cell` - :oslo.config:option:`api.dhcp_domain` .. note:: This list excludes configuration options related to the vendordata feature. Refer to :doc:`vendordata feature documentation ` for information on configuring this. For example, to configure the :program:`nova-api` application to serve the metadata API, without SSL, using the ``StaticJSON`` vendordata provider, add the following to a :file:`nova-api.conf` file: .. code-block:: ini [neutron] service_metadata_proxy = True [api] dhcp_domain = metadata_cache_expiration = 15 local_metadata_per_cell = False vendordata_providers = StaticJSON vendordata_jsonfile_path = /etc/nova/vendor_data.json .. note:: This does not include configuration options that are not metadata-specific but are nonetheless required. Configuring the application to use the ``DynamicJSON`` vendordata provider is more involved and is not covered here. The :program:`nova-api-metadata` application accepts almost the same options: - :oslo.config:option:`neutron.service_metadata_proxy` - :oslo.config:option:`neutron.metadata_proxy_shared_secret` - :oslo.config:option:`api.metadata_cache_expiration` - :oslo.config:option:`api.local_metadata_per_cell` - :oslo.config:option:`api.dhcp_domain` .. note:: This list excludes configuration options related to the vendordata feature. Refer to :doc:`vendordata feature documentation ` for information on configuring this. For example, to configure the :program:`nova-api-metadata` application to serve the metadata API, without SSL, add the following to a :file:`nova-api.conf` file: .. code-block:: ini [neutron] service_metadata_proxy = True [api] dhcp_domain = metadata_cache_expiration = 15 local_metadata_per_cell = False .. note:: This does not include configuration options that are not metadata-specific but are nonetheless required. For information about configuring the neutron side of the metadata service, refer to the :neutron-doc:`neutron configuration guide ` Config drives ------------- Config drives are special drives that are attached to an instance when it boots. The instance can mount this drive and read files from it to get information that is normally available through the metadata service. For more information, refer to :doc:`/admin/config-drive` and the :ref:`user guide `. Vendordata ---------- Vendordata provides a way to pass vendor or deployment-specific information to instances. For more information, refer to :doc:`/admin/vendordata` and the :ref:`user guide `. User data --------- User data is a blob of data that the user can specify when they launch an instance. For more information, refer to :ref:`the user guide `. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/migrate-instance-with-snapshot.rst0000664000175000017500000001243200000000000024513 0ustar00zuulzuul00000000000000================================== Use snapshots to migrate instances ================================== This guide can be used to migrate an instance between different clouds. To use snapshots to migrate instances from OpenStack projects to clouds, complete these steps. In the source project: #. :ref:`Create_a_snapshot_of_the_instance` #. :ref:`Download_the_snapshot_as_an_image` In the destination project: #. :ref:`Import_the_snapshot_to_the_new_environment` #. :ref:`Boot_a_new_instance_from_the_snapshot` .. note:: Some cloud providers allow only administrators to perform this task. .. _Create_a_snapshot_of_the_instance: Create a snapshot of the instance ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #. Shut down the source VM before you take the snapshot to ensure that all data is flushed to disk. If necessary, list the instances to view the instance name: .. code-block:: console $ openstack server list +--------------------------------------+------------+--------+------------------+--------------------+-------------------------+ | ID | Name | Status | Networks | Image | Flavor | +--------------------------------------+------------+--------+------------------+--------------------+-------------------------+ | d0d1b7d9-a6a5-41d3-96ab-07975aadd7fb | myInstance | ACTIVE | private=10.0.0.3 | ubuntu-16.04-amd64 | general.micro.tmp.linux | +--------------------------------------+------------+--------+------------------+--------------------+-------------------------+ #. Use the :command:`openstack server stop` command to shut down the instance: .. code-block:: console $ openstack server stop myInstance #. Use the :command:`openstack server list` command to confirm that the instance shows a ``SHUTOFF`` status: .. code-block:: console $ openstack server list +--------------------------------------+------------+---------+------------------+--------------------+-------------------------+ | ID | Name | Status | Networks | Image | Flavor | +--------------------------------------+------------+---------+------------------+--------------------+-------------------------+ | d0d1b7d9-a6a5-41d3-96ab-07975aadd7fb | myInstance | SHUTOFF | private=10.0.0.3 | ubuntu-16.04-amd64 | general.micro.tmp.linux | +--------------------------------------+------------+---------+------------------+--------------------+-------------------------+ #. Use the :command:`openstack server image create` command to take a snapshot: .. code-block:: console $ openstack server image create --name myInstanceSnapshot myInstance If snapshot operations routinely fail because the user token times out while uploading a large disk image, consider configuring nova to use :ref:`service user tokens `. #. Use the :command:`openstack image list` command to check the status until the status is ``ACTIVE``: .. code-block:: console $ openstack image list +--------------------------------------+---------------------------+--------+ | ID | Name | Status | +--------------------------------------+---------------------------+--------+ | ab567a44-b670-4d22-8ead-80050dfcd280 | myInstanceSnapshot | active | +--------------------------------------+---------------------------+--------+ .. _Download_the_snapshot_as_an_image: Download the snapshot as an image ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #. Get the image ID: .. code-block:: console $ openstack image list +--------------------------------------+---------------------------+--------+ | ID | Name | Status | +--------------------------------------+---------------------------+--------+ | ab567a44-b670-4d22-8ead-80050dfcd280 | myInstanceSnapshot | active | +--------------------------------------+---------------------------+--------+ #. Download the snapshot by using the image ID that was returned in the previous step: .. code-block:: console $ openstack image save --file snapshot.raw ab567a44-b670-4d22-8ead-80050dfcd280 .. note:: The :command:`openstack image save` command requires the image ID and cannot use the image name. Check there is sufficient space on the destination file system for the image file. #. Make the image available to the new environment, either through HTTP or direct upload to a machine (``scp``). .. _Import_the_snapshot_to_the_new_environment: Import the snapshot to the new environment ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In the new project or cloud environment, import the snapshot: .. code-block:: console $ openstack image create --container-format bare --disk-format qcow2 \ --file snapshot.raw myInstanceSnapshot .. _Boot_a_new_instance_from_the_snapshot: Boot a new instance from the snapshot ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In the new project or cloud environment, use the snapshot to create the new instance: .. code-block:: console $ openstack server create --flavor m1.tiny --image myInstanceSnapshot myNewInstance ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/migration.rst0000664000175000017500000000664500000000000020455 0ustar00zuulzuul00000000000000================= Migrate instances ================= .. note:: This documentation is about cold migration. For live migration usage, see :doc:`live-migration-usage`. When you want to move an instance from one compute host to another, you can migrate the instance. The migration operation, which is also known as the cold migration operation to distinguish it from the live migration operation, functions similarly to :doc:`the resize operation ` with the main difference being that a cold migration does not change the flavor of the instance. As with resize, the scheduler chooses the destination compute host based on its settings. This process does not assume that the instance has shared storage available on the target host. If you are using SSH tunneling, you must ensure that each node is configured with SSH key authentication so that the Compute service can use SSH to move disks to other nodes. For more information, see :ref:`cli-os-migrate-cfg-ssh`. To list the VMs you want to migrate, run: .. code-block:: console $ openstack server list Once you have the name or UUID of the server you wish to migrate, migrate it using the :command:`openstack server migrate` command: .. code-block:: console $ openstack server migrate SERVER Once an instance has successfully migrated, you can use the :command:`openstack server migrate confirm` command to confirm it: .. code-block:: console $ openstack server migrate confirm SERVER Alternatively, you can use the :command:`openstack server migrate revert` command to revert the migration and restore the instance to its previous host: .. code-block:: console $ openstack server migrate revert SERVER .. note:: You can configure automatic confirmation of migrations and resizes. Refer to the :oslo.config:option:`resize_confirm_window` option for more information. Example ------- To migrate an instance and watch the status, use this example script: .. code-block:: bash #!/bin/bash # Provide usage usage() { echo "Usage: $0 VM_ID" exit 1 } [[ $# -eq 0 ]] && usage VM_ID=$1 # Show the details for the VM echo "Instance details:" openstack server show ${VM_ID} # Migrate the VM to an alternate hypervisor echo -n "Migrating instance to alternate host " openstack server migrate ${VM_ID} while [[ "$(openstack server show ${VM_ID} -f value -c status)" != "VERIFY_RESIZE" ]]; do echo -n "." sleep 2 done openstack server migrate confirm ${VM_ID} echo " instance migrated and resized." # Show the details for the migrated VM echo "Migrated instance details:" openstack server show ${VM_ID} # Pause to allow users to examine VM details read -p "Pausing, press to exit." .. note:: If you see the following error, it means you are either running the command with the wrong credentials, such as a non-admin user, or the ``policy.yaml`` file prevents migration for your user:: Policy doesn't allow os_compute_api:os-migrate-server:migrate to be performed. (HTTP 403) .. note:: If you see the following error, similar to this message, SSH tunneling was not set up between the compute nodes:: ProcessExecutionError: Unexpected error while running command. Stderr: u Host key verification failed.\r\n The instance is booted from a new host, but preserves its configuration including instance ID, name, IP address, any metadata, and other properties. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/networking.rst0000664000175000017500000003273000000000000020645 0ustar00zuulzuul00000000000000======================= Networking with neutron ======================= While nova uses the :neutron-doc:`OpenStack Networking service (neutron) <>` to provide network connectivity for instances, nova itself provides some additional features not possible with neutron alone. These are described below. SR-IOV ------ .. versionchanged:: 2014.2 The feature described below was first introduced in the Juno release. The SR-IOV specification defines a standardized mechanism to virtualize PCIe devices. This mechanism can virtualize a single PCIe Ethernet controller to appear as multiple PCIe devices. Each device can be directly assigned to an instance, bypassing the hypervisor and virtual switch layer. As a result, users are able to achieve low latency and near-line wire speed. A full guide on configuring and using SR-IOV is provided in the :neutron-doc:`OpenStack Networking service documentation ` .. note:: Nova only supports PCI addresses where the fields are restricted to the following maximum value: * domain - 0xFFFF * bus - 0xFF * slot - 0x1F * function - 0x7 Nova will ignore PCI devices reported by the hypervisor if the address is outside of these ranges. .. versionadded:: 25.0.0 For information on creating servers with remotely-managed SR-IOV network interfaces of SmartNIC DPUs, refer to the relevant section in :neutron-doc:`Networking Guide `. **Limitations** * Only VFs are supported and they must be tagged in the Nova Compute configuration in the :oslo.config:option:`pci.device_spec` option as ``remote_managed: "true"``. There is no auto-discovery of this based on vendor and product IDs; * Either VF or its respective PF must expose a PCI VPD capability with a unique card serial number according to the PCI/PCIe specifications (see `the Libvirt docs `_ to get an example of how VPD data is represented and what to expect). If this is not the case, those devices will not appear in allocation pools; * Only the Libvirt driver is capable of supporting this feature at the time of writing; * The support for VPD capability handling in Libvirt was added in release `7.9.0 `_ - older versions are not supported by this feature; * All compute nodes must be upgraded to the Yoga release in order for scheduling of nodes with ``VNIC_TYPE_REMOTE_MANAGED`` ports to succeed; * The same limitations apply to operations like live migration as with `legacy SR-IOV ports `_; * Clearing a VLAN by programming VLAN 0 must not result in errors in the VF kernel driver at the compute host. Before v8.1.0 Libvirt clears a VLAN by programming VLAN 0 before passing a VF through to the guest which may result in an error depending on your driver and kernel version (see, for example, `this bug `_ which discusses a case relevant to one driver). As of Libvirt v8.1.0, EPERM errors encountered while programming VLAN 0 are ignored if VLAN clearing is not explicitly requested in the device XML (i.e. VLAN 0 is not specified explicitly). NUMA Affinity ------------- .. versionadded:: 18.0.0 The feature described below was first introduced in the Rocky release. .. important:: The functionality described below is currently only supported by the libvirt/KVM driver. As described in :doc:`cpu-topologies`, NUMA is a computer architecture where memory accesses to certain regions of system memory can have higher latencies than other regions, depending on the CPU(s) your process is running on. This effect extends to devices connected to the PCIe bus, a concept known as NUMA I/O. Many Network Interface Cards (NICs) connect using the PCIe interface, meaning they are susceptible to the ill-effects of poor NUMA affinitization. As a result, NUMA locality must be considered when creating an instance where high dataplane performance is a requirement. Fortunately, nova provides functionality to ensure NUMA affinitization is provided for instances using neutron. How this works depends on the type of port you are trying to use. For SR-IOV ports, virtual functions, which are PCI devices, are attached to the instance. This means the instance can benefit from the NUMA affinity guarantees provided for PCI devices. This happens automatically and is described in detail in :ref:`pci-numa-affinity-policy`. For all other types of ports, some manual configuration is required. #. Identify the type of network(s) you wish to provide NUMA affinity for. - If a network is an L2-type network (``provider:network_type`` of ``flat`` or ``vlan``), affinity of the network to given NUMA node(s) can vary depending on value of the ``provider:physical_network`` attribute of the network, commonly referred to as the *physnet* of the network. This is because most neutron drivers map each *physnet* to a different bridge, to which multiple NICs are attached, or to a different (logical) NIC. - If a network is an L3-type networks (``provider:network_type`` of ``vxlan``, ``gre`` or ``geneve``), all traffic will use the device to which the *endpoint IP* is assigned. This means all L3 networks on a given host will have affinity to the same NUMA node(s). Refer to :neutron-doc:`the neutron documentation ` for more information. #. Determine the NUMA affinity of the NICs attached to the given network(s). How this should be achieved varies depending on the switching solution used and whether the network is a L2-type network or an L3-type networks. Consider an L2-type network using the Linux Bridge mechanism driver. As noted in the :neutron-doc:`neutron documentation `, *physnets* are mapped to interfaces using the ``[linux_bridge] physical_interface_mappings`` configuration option. For example: .. code-block:: ini [linux_bridge] physical_interface_mappings = provider:PROVIDER_INTERFACE Once you have the device name, you can query *sysfs* to retrieve the NUMA affinity for this device. For example: .. code-block:: shell $ cat /sys/class/net/PROVIDER_INTERFACE/device/numa_node For an L3-type network using the Linux Bridge mechanism driver, the device used will be configured using protocol-specific endpoint IP configuration option. For VXLAN, this is the ``[vxlan] local_ip`` option. For example: .. code-block:: ini [vxlan] local_ip = OVERLAY_INTERFACE_IP_ADDRESS Once you have the IP address in question, you can use :command:`ip` to identify the device that has been assigned this IP address and from there can query the NUMA affinity using *sysfs* as above. .. note:: The example provided above is merely that: an example. How one should identify this information can vary massively depending on the driver used, whether bonding is used, the type of network used, etc. #. Configure NUMA affinity in ``nova.conf``. Once you have identified the NUMA affinity of the devices used for your networks, you need to configure this in ``nova.conf``. As before, how this should be achieved varies depending on the type of network. For L2-type networks, NUMA affinity is defined based on the ``provider:physical_network`` attribute of the network. There are two configuration options that must be set: ``[neutron] physnets`` This should be set to the list of physnets for which you wish to provide NUMA affinity. Refer to the :oslo.config:option:`documentation ` for more information. ``[neutron_physnet_{physnet}] numa_nodes`` This should be set to the list of NUMA node(s) that networks with the given ``{physnet}`` should be affinitized to. For L3-type networks, NUMA affinity is defined globally for all tunneled networks on a given host. There is only one configuration option that must be set: ``[neutron_tunnel] numa_nodes`` This should be set to a list of one or NUMA nodes to which instances using tunneled networks will be affinitized. #. Configure a NUMA topology for instance flavor(s) For network NUMA affinity to have any effect, the instance must have a NUMA topology itself. This can be configured explicitly, using the ``hw:numa_nodes`` extra spec, or implicitly through the use of CPU pinning (``hw:cpu_policy=dedicated``) or PCI devices. For more information, refer to :doc:`cpu-topologies`. Examples ~~~~~~~~ Take an example for deployment using L2-type networks first. .. code-block:: ini [neutron] physnets = foo,bar [neutron_physnet_foo] numa_nodes = 0 [neutron_physnet_bar] numa_nodes = 2, 3 This configuration will ensure instances using one or more L2-type networks with ``provider:physical_network=foo`` must be scheduled on host cores from NUMA nodes 0, while instances using one or more networks with ``provider:physical_network=bar`` must be scheduled on host cores from both NUMA nodes 2 and 3. For the latter case, it will be necessary to split the guest across two or more host NUMA nodes using the :nova:extra-spec:`hw:numa_nodes` extra spec, as discussed :ref:`here `. Now, take an example for a deployment using L3 networks. .. code-block:: ini [neutron_tunnel] numa_nodes = 0 This is much simpler as all tunneled traffic uses the same logical interface. As with the L2-type networks, this configuration will ensure instances using one or more L3-type networks must be scheduled on host cores from NUMA node 0. It is also possible to define more than one NUMA node, in which case the instance must be split across these nodes. virtio-net Multiqueue --------------------- .. versionadded:: 12.0.0 (Liberty) .. versionchanged:: 25.0.0 (Yoga) Support for configuring multiqueue via the ``hw:vif_multiqueue_enabled`` flavor extra spec was introduced in the Yoga (25.0.0) release. .. important:: The functionality described below is currently only supported by the libvirt/KVM driver. Virtual NICs using the virtio-net driver support the multiqueue feature. By default, these vNICs will only use a single virtio-net TX/RX queue pair, meaning guests will not transmit or receive packets in parallel. As a result, the scale of the protocol stack in a guest may be restricted as the network performance will not scale as the number of vCPUs increases and per-queue data processing limits in the underlying vSwitch are encountered. The solution to this issue is to enable virtio-net multiqueue, which can allow the guest instances to increase the total network throughput by scaling the number of receive and transmit queue pairs with CPU count. Multiqueue virtio-net isn't always necessary, but it can provide a significant performance benefit when: - Traffic packets are relatively large. - The guest is active on many connections at the same time, with traffic running between guests, guest to host, or guest to an external system. - The number of queues is equal to the number of vCPUs. This is because multi-queue support optimizes RX interrupt affinity and TX queue selection in order to make a specific queue private to a specific vCPU. However, while the virtio-net multiqueue feature will often provide a welcome performance benefit, it has some limitations and therefore should not be unconditionally enabled: - Enabling virtio-net multiqueue increases the total network throughput, but in parallel it also increases the CPU consumption. - Enabling virtio-net multiqueue in the host QEMU config does not enable the functionality in the guest OS. The guest OS administrator needs to manually turn it on for each guest NIC that requires this feature, using :command:`ethtool`. - In case the number of vNICs in a guest instance is proportional to the number of vCPUs, enabling the multiqueue feature is less important. Having considered these points, multiqueue can be enabled or explicitly disabled using either the :nova:extra-spec:`hw:vif_multiqueue_enabled` flavor extra spec or equivalent ``hw_vif_multiqueue_enabled`` image metadata property. For example, to enable virtio-net multiqueue for a chosen flavor: .. code-block:: bash $ openstack flavor set --property hw:vif_multiqueue_enabled=true $FLAVOR Alternatively, to explicitly disable multiqueue for a chosen image: .. code-block:: bash $ openstack image set --property hw_vif_multiqueue_enabled=false $IMAGE .. note:: If both the flavor extra spec and image metadata property are provided, their values must match or an error will be raised. Once the guest has started, you must enable multiqueue using :command:`ethtool`. For example: .. code-block:: bash $ ethtool -L $devname combined $N where ``$devname`` is the name of the network device, and ``$N`` is the number of TX/RX queue pairs to configure corresponding to the number of instance vCPUs. Alternatively, you can configure this persistently using udev. For example, to configure four TX/RX queue pairs for network device ``eth0``: .. code-block:: bash # cat /etc/udev/rules.d/50-ethtool.rules ACTION=="add", SUBSYSTEM=="net", NAME=="eth0", RUN+="/sbin/ethtool -L eth0 combined 4" For more information on this feature, refer to the `original spec`__. .. __: https://specs.openstack.org/openstack/nova-specs/specs/liberty/implemented/libvirt-virtiomq.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/node-down.rst0000664000175000017500000002473300000000000020354 0ustar00zuulzuul00000000000000================================== Recover from a failed compute node ================================== If you deploy Compute with a shared file system, you can use several methods to quickly recover from a node failure. This section discusses manual recovery. .. _node-down-evacuate-instances: Evacuate instances ~~~~~~~~~~~~~~~~~~ If a hardware malfunction or other error causes the cloud compute node to fail, you can use the :command:`nova evacuate` command to evacuate instances. See :doc:`evacuate instances ` for more information on using the command. .. _nova-compute-node-down-manual-recovery: Manual recovery ~~~~~~~~~~~~~~~ To manually recover a failed compute node: #. Identify the VMs on the affected hosts by using a combination of the :command:`openstack server list` and :command:`openstack server show` commands. #. Query the Compute database for the status of the host. This example converts an EC2 API instance ID to an OpenStack ID. If you use the :command:`nova` commands, you can substitute the ID directly. This example output is truncated: .. code-block:: none mysql> SELECT * FROM instances WHERE id = CONV('15b9', 16, 10) \G; *************************** 1. row *************************** created_at: 2012-06-19 00:48:11 updated_at: 2012-07-03 00:35:11 deleted_at: NULL ... id: 5561 ... power_state: 5 vm_state: shutoff ... hostname: at3-ui02 host: np-rcc54 ... uuid: 3f57699a-e773-4650-a443-b4b37eed5a06 ... task_state: NULL ... .. note:: Find the credentials for your database in ``/etc/nova.conf`` file. #. Decide to which compute host to move the affected VM. Run this database command to move the VM to that host: .. code-block:: mysql mysql> UPDATE instances SET host = 'np-rcc46' WHERE uuid = '3f57699a-e773-4650-a443-b4b37eed5a06'; #. If you use a hypervisor that relies on libvirt, such as KVM, update the ``libvirt.xml`` file in ``/var/lib/nova/instances/[instance ID]`` with these changes: - Change the ``DHCPSERVER`` value to the host IP address of the new compute host. - Update the VNC IP to ``0.0.0.0``. #. Reboot the VM: .. code-block:: console $ openstack server reboot 3f57699a-e773-4650-a443-b4b37eed5a06 Typically, the database update and :command:`openstack server reboot` command recover a VM from a failed host. However, if problems persist, try one of these actions: - Use :command:`virsh` to recreate the network filter configuration. - Restart Compute services. - Update the ``vm_state`` and ``power_state`` fields in the Compute database. Recover from a UID/GID mismatch ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Sometimes when you run Compute with a shared file system or an automated configuration tool, files on your compute node might use the wrong UID or GID. This UID or GID mismatch can prevent you from running live migrations or starting virtual machines. This procedure runs on ``nova-compute`` hosts, based on the KVM hypervisor: #. Set the nova UID to the same number in ``/etc/passwd`` on all hosts. For example, set the UID to ``112``. .. note:: Choose UIDs or GIDs that are not in use for other users or groups. #. Set the ``libvirt-qemu`` UID to the same number in the ``/etc/passwd`` file on all hosts. For example, set the UID to ``119``. #. Set the ``nova`` group to the same number in the ``/etc/group`` file on all hosts. For example, set the group to ``120``. #. Set the ``libvirtd`` group to the same number in the ``/etc/group`` file on all hosts. For example, set the group to ``119``. #. Stop the services on the compute node. #. Change all files that the nova user or group owns. For example: .. code-block:: console # find / -uid 108 -exec chown nova {} \; # note the 108 here is the old nova UID before the change # find / -gid 120 -exec chgrp nova {} \; #. Repeat all steps for the ``libvirt-qemu`` files, if required. #. Restart the services. #. To verify that all files use the correct IDs, run the :command:`find` command. Recover cloud after disaster ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to manage your cloud after a disaster and back up persistent storage volumes. Backups are mandatory, even outside of disaster scenarios. For a definition of a disaster recovery plan (DRP), see `https://en.wikipedia.org/wiki/Disaster\_Recovery\_Plan `_. A disk crash, network loss, or power failure can affect several components in your cloud architecture. The worst disaster for a cloud is a power loss. A power loss affects these components: - A cloud controller (``nova-api``, ``nova-conductor``, ``nova-scheduler``) - A compute node (``nova-compute``) - A storage area network (SAN) used by OpenStack Block Storage (``cinder-volumes``) Before a power loss: - Create an active iSCSI session from the SAN to the cloud controller (used for the ``cinder-volumes`` LVM's VG). - Create an active iSCSI session from the cloud controller to the compute node (managed by ``cinder-volume``). - Create an iSCSI session for every volume (so 14 EBS volumes requires 14 iSCSI sessions). - Create ``iptables`` or ``ebtables`` rules from the cloud controller to the compute node. This allows access from the cloud controller to the running instance. - Save the current state of the database, the current state of the running instances, and the attached volumes (mount point, volume ID, volume status, etc), at least from the cloud controller to the compute node. After power resumes and all hardware components restart: - The iSCSI session from the SAN to the cloud no longer exists. - The iSCSI session from the cloud controller to the compute node no longer exists. - Instances stop running. Instances are not lost because neither ``destroy`` nor ``terminate`` ran. The files for the instances remain on the compute node. - The database does not update. .. rubric:: Begin recovery .. warning:: Do not add any steps or change the order of steps in this procedure. #. Check the current relationship between the volume and its instance, so that you can recreate the attachment. Use the :command:`openstack volume list` command to get this information. Note that the :command:`openstack` client can get volume information from OpenStack Block Storage. #. Update the database to clean the stalled state. Do this for every volume by using these queries: .. code-block:: mysql mysql> use cinder; mysql> update volumes set mountpoint=NULL; mysql> update volumes set status="available" where status <>"error_deleting"; mysql> update volumes set attach_status="detached"; mysql> update volumes set instance_id=0; Use :command:`openstack volume list` command to list all volumes. #. Restart the instances by using the :command:`openstack server reboot INSTANCE` command. .. important:: Some instances completely reboot and become reachable, while some might stop at the plymouth stage. This is expected behavior. DO NOT reboot a second time. Instance state at this stage depends on whether you added an ``/etc/fstab`` entry for that volume. Images built with the cloud-init package remain in a ``pending`` state, while others skip the missing volume and start. You perform this step to ask Compute to reboot every instance so that the stored state is preserved. It does not matter if not all instances come up successfully. For more information about cloud-init, see `help.ubuntu.com/community/CloudInit/ `__. #. If required, run the :command:`openstack server add volume` command to reattach the volumes to their respective instances. This example uses a file of listed volumes to reattach them: .. code-block:: bash #!/bin/bash while read line; do volume=`echo $line | $CUT -f 1 -d " "` instance=`echo $line | $CUT -f 2 -d " "` mount_point=`echo $line | $CUT -f 3 -d " "` echo "ATTACHING VOLUME FOR INSTANCE - $instance" openstack server add volume $instance $volume $mount_point sleep 2 done < $volumes_tmp_file Instances that were stopped at the plymouth stage now automatically continue booting and start normally. Instances that previously started successfully can now see the volume. #. Log in to the instances with SSH and reboot them. If some services depend on the volume or if a volume has an entry in fstab, you can now restart the instance. Restart directly from the instance itself and not through :command:`nova`: .. code-block:: console # shutdown -r now When you plan for and complete a disaster recovery, follow these tips: - Use the ``errors=remount`` option in the ``fstab`` file to prevent data corruption. In the event of an I/O error, this option prevents writes to the disk. Add this configuration option into the cinder-volume server that performs the iSCSI connection to the SAN and into the instances' ``fstab`` files. - Do not add the entry for the SAN's disks to the cinder-volume's ``fstab`` file. Some systems hang on that step, which means you could lose access to your cloud-controller. To re-run the session manually, run this command before performing the mount: .. code-block:: console # iscsiadm -m discovery -t st -p $SAN_IP $ iscsiadm -m node --target-name $IQN -p $SAN_IP -l - On your instances, if you have the whole ``/home/`` directory on the disk, leave a user's directory with the user's bash files and the ``authorized_keys`` file instead of emptying the ``/home/`` directory and mapping the disk on it. This action enables you to connect to the instance without the volume attached, if you allow only connections through public keys. To reproduce the power loss, connect to the compute node that runs that instance and close the iSCSI session. Do not detach the volume by using the :command:`openstack server remove volume` command. You must manually close the iSCSI session. This example closes an iSCSI session with the number ``15``: .. code-block:: console # iscsiadm -m session -u -r 15 Do not forget the ``-r`` option. Otherwise, all sessions close. .. warning:: There is potential for data loss while running instances during this procedure. If you are using Liberty or earlier, ensure you have the correct patch and set the options appropriately. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/notifications.rst0000664000175000017500000001237400000000000021331 0ustar00zuulzuul00000000000000============= Notifications ============= Like many other OpenStack services, nova emits notifications to the message bus with the ``Notifier`` class provided by :oslo.messaging-doc:`oslo.messaging `. From the notification consumer point of view, a notification consists of two parts: an envelope with a fixed structure defined by oslo.messaging and a payload defined by the service emitting the notification. The envelope format is the following:: { "priority": , "event_type": , "timestamp": , "publisher_id": , "message_id": , "payload": } There are two types of notifications in nova: legacy notifications which have an unversioned payload and newer notifications which have a versioned payload. Legacy (unversioned) notifications ---------------------------------- The unversioned notifications exist from the early days of nova and have mostly grown organically. The structure of the payload of the unversioned notifications is defined in the code that emits the notification and no documentation or enforced backward compatibility contract exists for that format. Nova code uses the ``nova.rpc.get_notifier`` call to get a configured oslo.messaging ``Notifier`` object and it uses the oslo-provided functions on the ``Notifier`` object to emit notifications. The configuration of the returned ``Notifier`` object depends on the parameters of the ``get_notifier`` call and the value of the oslo.messaging configuration options :oslo.config:option:`oslo_messaging_notifications.driver` and :oslo.config:option:`oslo_messaging_notifications.topics`. Versioned notifications ----------------------- The versioned notification concept was created to fix the shortcomings of the unversioned notifications. The envelope structure of the emitted notification is the same as in the unversioned notification case as it is provided by oslo.messaging. However, the payload is not a free-form dictionary but a serialized :oslo.versionedobjects-doc:`oslo versionedobjects object <>`. .. _service.update: For example the wire format of the ``service.update`` notification looks like the following:: { "priority": "INFO", "payload": { "nova_object.namespace": "nova", "nova_object.name": "ServiceStatusPayload", "nova_object.version": "1.0", "nova_object.data": { "host": "host1", "disabled": false, "last_seen_up": null, "binary": "nova-compute", "topic": "compute", "disabled_reason": null, "report_count": 1, "forced_down": false, "version": 2 } }, "event_type": "service.update", "publisher_id": "nova-compute:host1" } The serialized oslo.versionedobject as a payload provides a version number to the consumer so the consumer can detect if the structure of the payload has changed. Nova provides the following contract regarding the versioned notification payload: * The payload version defined by the ``nova_object.version`` field of the payload will be increased if and only if the syntax or the semantics of the ``nova_object.data`` field of the payload is changed. * A minor version bump indicates a backward compatible change which means that only new fields are added to the payload so a well written consumer can still consume the new payload without any change. * A major version bump indicates a backward incompatible change of the payload which can mean removed fields, type change, etc in the payload. * There is an additional field, ``nova_object.name``, for every payload alongside the ``nova_object.data`` and ``nova_object.version`` fields. This field contains the name of the nova internal representation of the payload type. Client code should not depend on this name. A `presentation from the Train summit`__ goes over the background and usage of versioned notifications, and provides a demo. .. __: https://www.openstack.org/videos/summits/denver-2019/nova-versioned-notifications-the-result-of-a-3-year-journey Configuration ------------- The :oslo.config:option:`notifications.notification_format` config option can be used to specify which notifications are emitted by nova. The versioned notifications are emitted to a different topic than the legacy notifications. By default they are emitted to ``versioned_notifications`` but this can be configured using the :oslo.config:option:`notifications.versioned_notifications_topics` config option. There are notification configuration options in nova which are specific for certain notification types like :oslo.config:option:`notifications.notify_on_state_change`, :oslo.config:option:`notifications.default_level`, etc. Notifications can be disabled entirely by setting the :oslo.config:option:`oslo_messaging_notifications.driver` config option to ``noop``. Reference --------- A list of all currently supported versioned notifications can be found in :doc:`/reference/notifications`. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/pci-passthrough.rst0000664000175000017500000010252300000000000021574 0ustar00zuulzuul00000000000000======================================== Attaching physical PCI devices to guests ======================================== The PCI passthrough feature in OpenStack allows full access and direct control of a physical PCI device in guests. This mechanism is generic for any kind of PCI device, and runs with a Network Interface Card (NIC), Graphics Processing Unit (GPU), or any other devices that can be attached to a PCI bus. Correct driver installation is the only requirement for the guest to properly use the devices. Some PCI devices provide Single Root I/O Virtualization and Sharing (SR-IOV) capabilities. When SR-IOV is used, a physical device is virtualized and appears as multiple PCI devices. Virtual PCI devices are assigned to the same or different guests. In the case of PCI passthrough, the full physical device is assigned to only one guest and cannot be shared. PCI devices are requested through flavor extra specs, specifically via the :nova:extra-spec:`pci_passthrough:alias` flavor extra spec. This guide demonstrates how to enable PCI passthrough for a type of PCI device with a vendor ID of ``8086`` and a product ID of ``154d`` - an Intel X520 Network Adapter - by mapping them to the alias ``a1``. You should adjust the instructions for other devices with potentially different capabilities. .. note:: For information on creating servers with SR-IOV network interfaces, refer to the :neutron-doc:`Networking Guide `. **Limitations** * Attaching SR-IOV ports to existing servers was not supported until the 22.0.0 Victoria release. Due to various bugs in libvirt and qemu we recommend to use at least libvirt version 6.0.0 and at least qemu version 4.2. * Cold migration (resize) of servers with SR-IOV devices attached was not supported until the 14.0.0 Newton release, see `bug 1512800 `_ for details. .. note:: Nova only supports PCI addresses where the fields are restricted to the following maximum value: * domain - 0xFFFF * bus - 0xFF * slot - 0x1F * function - 0x7 Nova will ignore PCI devices reported by the hypervisor if the address is outside of these ranges. .. versionchanged:: 26.0.0 (Zed): PCI passthrough device inventories now can be tracked in Placement. For more information, refer to :ref:`pci-tracking-in-placement`. .. versionchanged:: 26.0.0 (Zed): The nova-compute service will refuse to start if both the parent PF and its children VFs are configured in :oslo.config:option:`pci.device_spec`. For more information, refer to :ref:`pci-tracking-in-placement`. .. versionchanged:: 26.0.0 (Zed): The nova-compute service will refuse to start with :oslo.config:option:`pci.device_spec` configuration that uses the ``devname`` field. .. versionchanged:: 27.0.0 (2023.1 Antelope): Nova provides Placement based scheduling support for servers with flavor based PCI requests. This support is disable by default. .. versionchanged:: 31.0.0 (2025.1 Epoxy): * Add managed tag to define if the PCI device is managed (attached/detached from the host) by libvirt. This is required to support SR-IOV devices using the new kernel variant driver interface. * Add a live_migratable tag to define whether a PCI device supports live migration. * Add a live_migratable tag to alias definitions to allow requesting either a live-migratable or non-live-migratable device. Enabling PCI passthrough ------------------------ Configure compute host ~~~~~~~~~~~~~~~~~~~~~~ To enable PCI passthrough on an x86, Linux-based compute node, the following are required: * VT-d enabled in the BIOS * IOMMU enabled on the host OS, e.g. by adding the ``intel_iommu=on`` or ``amd_iommu=on`` parameter to the kernel parameters * Assignable PCIe devices Configure ``nova-compute`` ~~~~~~~~~~~~~~~~~~~~~~~~~~ Once PCI passthrough has been configured for the host, :program:`nova-compute` must be configured to allow the PCI device to pass through to VMs. This is done using the :oslo.config:option:`pci.device_spec` option. For example, assuming our sample PCI device has a PCI address of ``41:00.0`` on each host: .. code-block:: ini [pci] device_spec = { "address": "0000:41:00.0" } Refer to :oslo.config:option:`pci.device_spec` for syntax information. Alternatively, to enable passthrough of all devices with the same product and vendor ID: .. code-block:: ini [pci] device_spec = { "vendor_id": "8086", "product_id": "154d" } If using vendor and product IDs, all PCI devices matching the ``vendor_id`` and ``product_id`` are added to the pool of PCI devices available for passthrough to VMs. In addition, it is necessary to configure the :oslo.config:option:`pci.alias` option, which is a JSON-style configuration option that allows you to map a given device type, identified by the standard PCI ``vendor_id`` and (optional) ``product_id`` fields, to an arbitrary name or *alias*. This alias can then be used to request a PCI device using the :nova:extra-spec:`pci_passthrough:alias` flavor extra spec, as discussed previously. For our sample device with a vendor ID of ``0x8086`` and a product ID of ``0x154d``, this would be: .. code-block:: ini [pci] alias = { "vendor_id":"8086", "product_id":"154d", "device_type":"type-PF", "name":"a1" } It's important to note the addition of the ``device_type`` field. This is necessary because this PCI device supports SR-IOV. The ``nova-compute`` service categorizes devices into one of three types, depending on the capabilities the devices report: ``type-PF`` The device supports SR-IOV and is the parent or root device. ``type-VF`` The device is a child device of a device that supports SR-IOV. ``type-PCI`` The device does not support SR-IOV. By default, it is only possible to attach ``type-PCI`` devices using PCI passthrough. If you wish to attach ``type-PF`` or ``type-VF`` devices, you must specify the ``device_type`` field in the config option. If the device was a device that did not support SR-IOV, the ``device_type`` field could be omitted. Refer to :oslo.config:option:`pci.alias` for syntax information. .. important:: This option must also be configured on controller nodes. This is discussed later in this document. Once configured, restart the :program:`nova-compute` service. Special Tags ^^^^^^^^^^^^ When specified in :oslo.config:option:`pci.device_spec` some tags have special meaning: ``physical_network`` Associates a device with a physical network label which corresponds to the ``physical_network`` attribute of a network segment object in Neutron. For virtual networks such as overlays a value of ``null`` should be specified as follows: ``"physical_network": null``. In the case of physical networks, this tag is used to supply the metadata necessary for identifying a switched fabric to which a PCI device belongs and associate the port with the correct network segment in the networking backend. Besides typical SR-IOV scenarios, this tag can be used for remote-managed devices in conjunction with the ``remote_managed`` tag. .. note:: The use of ``"physical_network": null`` is only supported in single segment networks. This is due to Nova not supporting multisegment networks for SR-IOV ports. See `bug 1983570 `_ for details. ``remote_managed`` Used to specify whether a PCI device is managed remotely or not. By default, devices are implicitly tagged as ``"remote_managed": "false"`` but and they must be tagged as ``"remote_managed": "true"`` if ports with ``VNIC_TYPE_REMOTE_MANAGED`` are intended to be used. Once that is done, those PCI devices will not be available for allocation for regular PCI passthrough use. Specifying ``"remote_managed": "true"`` is only valid for SR-IOV VFs and specifying it for PFs is prohibited. .. important:: It is recommended that PCI VFs that are meant to be remote-managed (e.g. the ones provided by SmartNIC DPUs) are tagged as remote-managed in order to prevent them from being allocated for regular PCI passthrough since they have to be programmed accordingly at the host that has access to the NIC switch control plane. If this is not done, instances requesting regular SR-IOV ports may get a device that will not be configured correctly and will not be usable for sending network traffic. .. important:: For the Libvirt virt driver, clearing a VLAN by programming VLAN 0 must not result in errors in the VF kernel driver at the compute host. Before v8.1.0 Libvirt clears a VLAN before passing a VF through to the guest which may result in an error depending on your driver and kernel version (see, for example, `this bug `_ which discusses a case relevant to one driver). As of Libvirt v8.1.0, EPERM errors encountered while programming a VLAN are ignored if VLAN clearing is not explicitly requested in the device XML. ``trusted`` If a port is requested to be trusted by specifying an extra option during port creation via ``--binding-profile trusted=true``, only devices tagged as ``trusted: "true"`` will be allocated to instances. Nova will then configure those devices as trusted by the network controller through its PF device driver. The specific set of features allowed by the trusted mode of a VF will differ depending on the network controller itself, its firmware version and what a PF device driver version allows to pass to the NIC. Common features to be affected by this tag are changing the VF MAC address, enabling promiscuous mode or multicast promiscuous mode. .. important:: While the ``trusted tag`` does not directly conflict with the ``remote_managed`` tag, network controllers in SmartNIC DPUs may prohibit setting the ``trusted`` mode on a VF via a PF device driver in the first place. It is recommended to test specific devices, drivers and firmware versions before assuming this feature can be used. ``managed`` Users must specify whether the PCI device is managed by libvirt to allow detachment from the host and assignment to the guest, or vice versa. The managed mode of a device depends on the specific device and the support provided by its driver. - ``managed='yes'`` means that nova will let libvirt to detach the device from the host before attaching it to the guest and re-attach it to the host after the guest is deleted. - ``managed='no'`` means that Nova will not request libvirt to attach or detach the device from the host. Instead, Nova assumes that the operator has pre-configured the host so that the devices are already bound to vfio-pci or an appropriate variant driver. This setup allows the devices to be directly usable by QEMU without requiring any additional operations to enable passthrough. .. note:: If not set, the default value is managed='yes' to preserve the existing behavior, primarily for upgrade purposes. .. warning:: Incorrect configuration of this parameter may result in compute node crashes. Configure ``nova-scheduler`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The :program:`nova-scheduler` service must be configured to enable the ``PciPassthroughFilter``. To do this, add this filter to the list of filters specified in :oslo.config:option:`filter_scheduler.enabled_filters` and set :oslo.config:option:`filter_scheduler.available_filters` to the default of ``nova.scheduler.filters.all_filters``. For example: .. code-block:: ini [filter_scheduler] enabled_filters = ...,PciPassthroughFilter available_filters = nova.scheduler.filters.all_filters Once done, restart the :program:`nova-scheduler` service. Configure ``nova-api`` ~~~~~~~~~~~~~~~~~~~~~~ It is necessary to also configure the :oslo.config:option:`pci.alias` config option on the controller. This configuration should match the configuration found on the compute nodes. For example: .. code-block:: ini [pci] alias = { "vendor_id":"8086", "product_id":"154d", "device_type":"type-PF", "name":"a1", "numa_policy":"preferred" } Refer to :oslo.config:option:`pci.alias` for syntax information. Refer to :ref:`Affinity ` for ``numa_policy`` information. Once configured, restart the :program:`nova-api-wsgi` service. Configuring a flavor or image ----------------------------- Once the alias has been configured, it can be used for an flavor extra spec. For example, to request two of the PCI devices referenced by alias ``a1``, run: .. code-block:: console $ openstack flavor set m1.large --property "pci_passthrough:alias"="a1:2" For more information about the syntax for ``pci_passthrough:alias``, refer to :doc:`the documentation `. .. _pci-numa-affinity-policy: PCI-NUMA affinity policies -------------------------- By default, the libvirt driver enforces strict NUMA affinity for PCI devices, be they PCI passthrough devices or neutron SR-IOV interfaces. This means that by default a PCI device must be allocated from the same host NUMA node as at least one of the instance's CPUs. This isn't always necessary, however, and you can configure this policy using the :nova:extra-spec:`hw:pci_numa_affinity_policy` flavor extra spec or equivalent image metadata property. There are three possible values allowed: **required** This policy means that nova will boot instances with PCI devices **only** if at least one of the NUMA nodes of the instance is associated with these PCI devices. It means that if NUMA node info for some PCI devices could not be determined, those PCI devices wouldn't be consumable by the instance. This provides maximum performance. **socket** This policy means that the PCI device must be affined to the same host socket as at least one of the guest NUMA nodes. For example, consider a system with two sockets, each with two NUMA nodes, numbered node 0 and node 1 on socket 0, and node 2 and node 3 on socket 1. There is a PCI device affined to node 0. An PCI instance with two guest NUMA nodes and the ``socket`` policy can be affined to either: * node 0 and node 1 * node 0 and node 2 * node 0 and node 3 * node 1 and node 2 * node 1 and node 3 The instance cannot be affined to node 2 and node 3, as neither of those are on the same socket as the PCI device. If the other nodes are consumed by other instances and only nodes 2 and 3 are available, the instance will not boot. **preferred** This policy means that ``nova-scheduler`` will choose a compute host with minimal consideration for the NUMA affinity of PCI devices. ``nova-compute`` will attempt a best effort selection of PCI devices based on NUMA affinity, however, if this is not possible then ``nova-compute`` will fall back to scheduling on a NUMA node that is not associated with the PCI device. **legacy** This is the default policy and it describes the current nova behavior. Usually we have information about association of PCI devices with NUMA nodes. However, some PCI devices do not provide such information. The ``legacy`` value will mean that nova will boot instances with PCI device if either: * The PCI device is associated with at least one NUMA nodes on which the instance will be booted * There is no information about PCI-NUMA affinity available For example, to configure a flavor to use the ``preferred`` PCI NUMA affinity policy for any neutron SR-IOV interfaces attached by the user: .. code-block:: console $ openstack flavor set $FLAVOR \ --property hw:pci_numa_affinity_policy=preferred You can also configure this for PCI passthrough devices by specifying the policy in the alias configuration via :oslo.config:option:`pci.alias`. For more information, refer to :oslo.config:option:`the documentation `. .. _pci-tracking-in-placement: PCI tracking in Placement ------------------------- .. note:: The feature described below are optional and disabled by default in nova 26.0.0. (Zed). The legacy PCI tracker code path is still supported and enabled. The Placement PCI tracking can be enabled via the :oslo.config:option:`pci.report_in_placement` configuration. .. warning:: Please note that once it is enabled on a given compute host **it cannot be disabled there any more**. Since nova 26.0.0 (Zed) PCI passthrough device inventories are tracked in Placement. If a PCI device exists on the hypervisor and matches one of the device specifications configured via :oslo.config:option:`pci.device_spec` then Placement will have a representation of the device. Each PCI device of type ``type-PCI`` and ``type-PF`` will be modeled as a Placement resource provider (RP) with the name ``_``. A devices with type ``type-VF`` is represented by its parent PCI device, the PF, as resource provider. By default nova will use ``CUSTOM_PCI__`` as the resource class in PCI inventories in Placement. However the name of the resource class can be customized via the ``resource_class`` tag in the :oslo.config:option:`pci.device_spec` option. There is also a new ``traits`` tag in that configuration that allows specifying a list of placement traits to be added to the resource provider representing the matching PCI devices. .. note:: In nova 26.0.0 (Zed) the Placement resource tracking of PCI devices does not support SR-IOV devices intended to be consumed via Neutron ports and therefore having ``physical_network`` tag in :oslo.config:option:`pci.device_spec`. Such devices are supported via the legacy PCI tracker code path in Nova. .. note:: Having different resource class or traits configuration for VFs under the same parent PF is not supported and the nova-compute service will refuse to start with such configuration. .. important:: While nova supported configuring both the PF and its children VFs for PCI passthrough in the past, it only allowed consuming either the parent PF or its children VFs. Since 26.0.0. (Zed) the nova-compute service will enforce the same rule for the configuration as well and will refuse to start if both the parent PF and its VFs are configured. .. important:: While nova supported configuring PCI devices by device name via the ``devname`` parameter in :oslo.config:option:`pci.device_spec` in the past, this proved to be problematic as the netdev name of a PCI device could change for multiple reasons during hypervisor reboot. So since nova 26.0.0 (Zed) the nova-compute service will refuse to start with such configuration. It is suggested to use the PCI address of the device instead. .. important:: While nova supported configuring :oslo.config:option:`pci.alias` where an alias name is repeated and therefore associated to multiple alias specifications, such configuration is not supported when PCI tracking in Placement is enabled. The nova-compute service makes sure that existing instances with PCI allocations in the nova DB will have a corresponding PCI allocation in placement. This allocation healing also acts on any new instances regardless of the status of the scheduling part of this feature to make sure that the nova DB and placement are in sync. There is one limitation of the healing logic. It assumes that there is no in-progress migration when the nova-compute service is upgraded. If there is an in-progress migration then the PCI allocation on the source host of the migration will not be healed. The placement view will be consistent after such migration is completed or reverted. Reconfiguring the PCI devices on the hypervisor or changing the :oslo.config:option:`pci.device_spec` configuration option and restarting the nova-compute service is supported in the following cases: * new devices are added * devices without allocation are removed Removing a device that has allocations is not supported. If a device having any allocation is removed then the nova-compute service will keep the device and the allocation exists in the nova DB and in placement and logs a warning. If a device with any allocation is reconfigured in a way that an allocated PF is removed and VFs from the same PF is configured (or vice versa) then nova-compute will refuse to start as it would create a situation where both the PF and its VFs are made available for consumption. Since nova 27.0.0 (2023.1 Antelope) scheduling and allocation of PCI devices in Placement can also be enabled via :oslo.config:option:`filter_scheduler.pci_in_placement` config option set in the nova-api, nova-scheduler, and nova-conductor configuration. Please note that this should only be enabled after all the computes in the system is configured to report PCI inventory in Placement via enabling :oslo.config:option:`pci.report_in_placement`. In Antelope flavor based PCI requests are support but Neutron port base PCI requests are not handled in Placement. If you are upgrading from an earlier version with already existing servers with PCI usage then you must enable :oslo.config:option:`pci.report_in_placement` first on all your computes having PCI allocations and then restart the nova-compute service, before you enable :oslo.config:option:`filter_scheduler.pci_in_placement`. The compute service will heal the missing PCI allocation in placement during startup and will continue healing missing allocations for future servers until the scheduling support is enabled. If a flavor requests multiple ``type-VF`` devices via :nova:extra-spec:`pci_passthrough:alias` then it is important to consider the value of :nova:extra-spec:`group_policy` as well. The value ``none`` allows nova to select VFs from the same parent PF to fulfill the request. The value ``isolate`` restricts nova to select each VF from a different parent PF to fulfill the request. If :nova:extra-spec:`group_policy` is not provided in such flavor then it will defaulted to ``none``. Symmetrically with the ``resource_class`` and ``traits`` fields of :oslo.config:option:`pci.device_spec` the :oslo.config:option:`pci.alias` configuration option supports requesting devices by Placement resource class name via the ``resource_class`` field and also support requesting traits to be present on the selected devices via the ``traits`` field in the alias. If the ``resource_class`` field is not specified in the alias then it is defaulted by nova to ``CUSTOM_PCI__``. Either the ``product_id`` and ``vendor_id`` or the ``resource_class`` field must be provided in each alias. For deeper technical details please read the `nova specification. `_ Support for multiple types of VFs --------------------------------- SR-IOV devices, such as GPUs, can be configured to provide VFs with various characteristics under the same vendor ID and product ID. To enable Nova to model this, if you configure the VFs with different resource allocations, you will need to use separate resource_classes for each. This can be achieved by following the steps below: - Enable PCI in Placement: This is necessary to track PCI devices with custom resource classes in the placement service. - Define Device Specifications: Use a custom resource class to represent a specific VF type and ensure that the VFs existing on the hypervisor are matched via the VF's PCI address. - Specify Type-Specific Flavors: Define flavors with an alias that matches the resource class to ensure proper allocation. Examples: .. note:: The following example demonstrates device specifications and alias configurations, utilizing resource classes as part of the "PCI in placement" feature. .. code-block:: shell [pci] device_spec = { "vendor_id": "10de", "product_id": "25b6", "address": "0000:25:00.4", "resource_class": "CUSTOM_A16_16A", "managed": "no" } device_spec = { "vendor_id": "10de", "product_id": "25b6", "address": "0000:25:00.5", "resource_class": "CUSTOM_A16_8A", "managed": "no" } alias = { "device_type": "type-VF", "resource_class": "CUSTOM_A16_16A", "name": "A16_16A" } alias = { "device_type": "type-VF", "resource_class": "CUSTOM_A16_8A", "name": "A16_8A" } Configuring Live Migration for PCI devices ------------------------------------------ Live migration of instances with PCI devices requires specific configuration at both the device and alias levels to ensure that the migration can succeed. This section explains how to configure PCI passthrough to support live migration. Configuring PCI Device Specification ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Administrators must explicitly define whether a PCI device support live migration. This is done by adding the ``live_migratable`` attribute to the device specification in the :oslo.config:option:`pci.device_spec` configuration. .. note:: Of course, this requires hardware support, as well as proper system and hypervisor configuration. Example Configuration: .. code-block:: ini [pci] dev_spec = {'vendor_id': '8086', 'product_id': '1515', 'live_migratable': 'yes'} dev_spec = {'vendor_id': '8086', 'product_id': '1516', 'live_migratable': 'no'} Configuring PCI Aliases for Users ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ PCI devices can be requested through flavor exta_specs.. To request a live migratable PCI device, the PCI alias definition in the :oslo.config:option:`pci.alias` configuration must include the ``live_migratable`` key. Example Configuration: .. code-block:: ini [pci] alias = {'name': 'vf_live', 'vendor_id': '8086', 'product_id': '1515', 'device_type': 'type-VF', 'live_migratable': 'yes'} alias = {'name': 'vf_no_migrate', 'vendor_id': '8086', 'product_id': '1516', 'device_type': 'type-VF', 'live_migratable': 'no'} Virtual IOMMU support --------------------- With provided :nova:extra-spec:`hw:viommu_model` flavor extra spec or equivalent image metadata property ``hw_viommu_model`` and with the guest CPU architecture and OS allows, we can enable vIOMMU in libvirt driver. .. note:: Enable vIOMMU might introduce significant performance overhead. You can see performance comparison table from `AMD vIOMMU session on KVM Forum 2021`_. For the above reason, vIOMMU should only be enabled for workflow that require it. .. _`AMD vIOMMU session on KVM Forum 2021`: https://static.sched.com/hosted_files/kvmforum2021/da/vIOMMU%20KVM%20Forum%202021%20-%20v4.pdf Here are four possible values allowed for ``hw:viommu_model`` (and ``hw_viommu_model``): **virtio** Supported on Libvirt since 8.3.0, for Q35 and ARM virt guests. **smmuv3** Supported on Libvirt since 5.5.0, for ARM virt guests. **intel** Supported for Q35 guests. **auto** This option will translate to ``virtio`` if Libvirt supported, else ``intel`` on X86 (Q35) and ``smmuv3`` on AArch64. For the viommu attributes: * ``intremap``, ``caching_mode``, and ``iotlb`` options for viommu (These attributes are driver attributes defined in `Libvirt IOMMU Domain`_) will directly enabled. * ``eim`` will directly enabled if machine type is Q35. ``eim`` is driver attribute defined in `Libvirt IOMMU Domain`_. .. note:: eim(Extended Interrupt Mode) attribute (with possible values on and off) can be used to configure Extended Interrupt Mode. A q35 domain with split I/O APIC (as described in hypervisor features), and both interrupt remapping and EIM turned on for the IOMMU, will be able to use more than 255 vCPUs. Since 3.4.0 (QEMU/KVM only). * ``aw_bits`` attribute can used to set the address width to allow mapping larger iova addresses in the guest. Since Qemu current supported values are 39 and 48, we directly set this to larger width (48) if Libvirt supported. ``aw_bits`` is driver attribute defined in `Libvirt IOMMU Domain`_. .. _`Libvirt IOMMU Domain`: https://libvirt.org/formatdomain.html#iommu-devices Known Issues ------------ A known issue exists where the ``live_migratable`` flag is ignored for devices that include the ``physical_network`` tag. As a result, instances using such devices do not behave as non-live migratable, and instead, they continue to migrate using the legacy VIF unplug/live migrate/VIF plug procedure. Example configuration where the live_migratable flag is ignored: .. code-block:: ini [pci] device_spec = { "vendor_id":"8086", "product_id":"10ca", "address": "0000:06:", "physical_network": "physnet2", "live_migratable": false} A fix for this issue is planned in a follow-up for the **Epoxy** release. The upstream bug report is `here`__. .. __: https://bugs.launchpad.net/nova/+bug/2102161 One-Time-Use Devices -------------------- Certain devices may need attention after they are released from one user and before they are attached to another. This is especially true of direct passthrough devices because the instance has full control over them while attached, and Nova doesn't know specifics about the device itself, unlike regular more cloudy resources. Examples include: * Securely erasing NVMe devices to ensure data residue is not passed from one user to the other unintentionally * Reinstalling known-good firmware to the device to avoid a hijack attack * Updating firmware to the latest release before each user * Checking a property of the device to determine if it needs repair or replacement before giving it to another user (i.e. NVMe write-wear indicator) * Some custom behavior, reset, etc Nova's scope does not cover the above, but it does support a feature that makes it easier for the operator to orchestrate tasks like this. By marking a device as "one time use" (hereafter referred to as OTU), Nova will allocate a device once, after which it will remain in a "reserved" state to avoid being allocated to another instance. After the operator's workflow is performed and the device should be returned to the pool of available resources, the reserved flag can be dropped and Nova will consider it usable again. .. note:: This feature requires :ref:`pci-tracking-in-placement` in order to work. The compute configuration is required, but the transitional scheduler config is optional (during transition but required for safety). A device can be marked as OTU by adding a tag in the ``device_spec`` like this: .. code-block:: shell device_spec = {"address": "0000:00:1.0", "one_time_use": true} By marking the device as such, Nova will set the ``reserved`` inventory value on the placement provider to fully cover the device (i.e. ``reserved=total`` at the point at which the instance is assigned the PCI device on the compute node. When the instance is deleted, the ``used`` value will return to zero but ``reserved`` will remain. It is the operator's responsibility to return the ``reserved`` value to zero when the device is ready for re-assignment. The best way to handle this would be to listen to Nova's notifications for the ``instance.delete.end`` event so that the post-processing workflow can happen immediately. However, since notifications could be dropped or missed, regular polling should be performed. Providers that represent devices that Nova is applying the OTU behavior to will have the ``HW_PCI_ONE_TIME_USE`` trait, making it easier to identify them. For example: .. code-block:: shell $ openstack resource provider list --required HW_PCI_ONE_TIME_USE +--------------------------------------+--------------------+------------+--------------------------------------+--------------------------------------+ | uuid | name | generation | root_provider_uuid | parent_provider_uuid | +--------------------------------------+--------------------+------------+--------------------------------------+--------------------------------------+ | b9e67d7d-43db-49c7-8ce8-803cad08e656 | jammy_0000:00:01.0 | 39 | 2ee402e8-c5c6-4586-9ac7-58e7594d27d1 | 2ee402e8-c5c6-4586-9ac7-58e7594d27d1 | +--------------------------------------+--------------------+------------+--------------------------------------+--------------------------------------+ Will find all such providers. For each of those, checking the inventory to find ones with ``used=0`` and ``reserved=1`` will identify devices in need of processing. To use the above example: .. code-block:: shell $ openstack resource provider inventory list b9e67d7d-43db-49c7-8ce8-803cad08e656 +----------------------+------------------+----------+----------+----------+-----------+-------+------+ | resource_class | allocation_ratio | min_unit | max_unit | reserved | step_size | total | used | +----------------------+------------------+----------+----------+----------+-----------+-------+------+ | CUSTOM_PCI_1B36_0100 | 1.0 | 1 | 1 | 1 | 1 | 1 | 0 | +----------------------+------------------+----------+----------+----------+-----------+-------+------+ To return the above device back to the pool of allocatable resources, we can set the reserved count back to zero: .. code-block:: shell $ openstack resource provider inventory set --amend \ --resource CUSTOM_PCI_1B36_0100:reserved=0 \ b9e67d7d-43db-49c7-8ce8-803cad08e656 +----------------------+------------------+----------+----------+----------+-----------+-------+ | resource_class | allocation_ratio | min_unit | max_unit | reserved | step_size | total | +----------------------+------------------+----------+----------+----------+-----------+-------+ | CUSTOM_PCI_1B36_0100 | 1.0 | 1 | 1 | 0 | 1 | 1 | +----------------------+------------------+----------+----------+----------+-----------+-------+ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/ports-with-resource-requests.rst0000664000175000017500000001065700000000000024300 0ustar00zuulzuul00000000000000================================= Using ports with resource request ================================= Starting from microversion 2.72 nova supports creating servers with neutron ports having resource request visible as a admin-only port attribute ``resource_request``. For example a neutron port has resource request if it has a QoS minimum bandwidth rule attached. The :neutron-doc:`Quality of Service (QoS): Guaranteed Bandwidth ` document describes how to configure neutron to use this feature. Resource allocation ~~~~~~~~~~~~~~~~~~~ Nova collects and combines the resource request from each port in a boot request and sends one allocation candidate request to placement during scheduling so placement will make sure that the resource request of the ports are fulfilled. At the end of the scheduling nova allocates one candidate in placement. Therefore the requested resources for each port from a single boot request will be allocated under the server's allocation in placement. Resource Group policy ~~~~~~~~~~~~~~~~~~~~~ Nova represents the resource request of each neutron port as a separate :placement-doc:`Granular Resource Request group ` when querying placement for allocation candidates. When a server create request includes more than one port with resource requests then more than one group will be used in the allocation candidate query. In this case placement requires to define the ``group_policy``. Today it is only possible via the ``group_policy`` key of the :nova-doc:`flavor extra_spec `. The possible values are ``isolate`` and ``none``. When the policy is set to ``isolate`` then each request group and therefore the resource request of each neutron port will be fulfilled from separate resource providers. In case of neutron ports with ``vnic_type=direct`` or ``vnic_type=macvtap`` this means that each port will use a virtual function from different physical functions. When the policy is set to ``none`` then the resource request of the neutron ports can be fulfilled from overlapping resource providers. In case of neutron ports with ``vnic_type=direct`` or ``vnic_type=macvtap`` this means the ports may use virtual functions from the same physical function. For neutron ports with ``vnic_type=normal`` the group policy defines the collocation policy on OVS bridge level so ``group_policy=none`` is a reasonable default value in this case. If the ``group_policy`` is missing from the flavor then the server create request will fail with 'No valid host was found' and a warning describing the missing policy will be logged. Virt driver support ~~~~~~~~~~~~~~~~~~~ Supporting neutron ports with ``vnic_type=direct`` or ``vnic_type=macvtap`` depends on the capability of the virt driver. For the supported virt drivers see the :nova-doc:`Support matrix ` If the virt driver on the compute host does not support the needed capability then the PCI claim will fail on the host and re-schedule will be triggered. It is suggested not to configure bandwidth inventory in the neutron agents on these compute hosts to avoid unnecessary reschedule. Extended resource request ~~~~~~~~~~~~~~~~~~~~~~~~~ It is expected that neutron 20.0.0 (Yoga) will implement an extended resource request format via the ``port-resource-request-groups`` neutron API extension. As of nova 24.0.0 (Xena), nova already supports this extension if every nova-compute service is upgraded to Xena version and the :oslo.config:option:`upgrade_levels.compute` configuration does not prevent the computes from using the latest RPC version. The extended resource request allows a single Neutron port to request resources in more than one request groups. This also means that using just one port in a server create request would require a group policy to be provided in the flavor. Today the only case when a single port generates more than one request groups is when that port has QoS policy with both minimum bandwidth and minimum packet rate rules. Due to the placement resource model of these features in this case the two request groups will always be fulfilled from separate resource providers and therefore neither the ``group_policy=none`` nor the ``group_policy=isolate`` flavor extra specs will result in any additional restriction on the placement of the resources. In the multi port case the Resource Group policy section above still applies. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/quotas.rst0000664000175000017500000003400600000000000017770 0ustar00zuulzuul00000000000000:orphan: ============= Manage quotas ============= .. warning:: As of Nova release 28.0.0 (2023.2 Bobcat), the ``nova.quota.DbQuotaDriver`` has been deprecated and the default quota driver configuration will be changed to the ``nova.quota.UnifiedLimitsDriver`` in the 29.0.0. (2024.1 Caracal) release. See the :doc:`unified limits documentation `. .. note:: This section provides deployment information about the quota feature. For end-user information about quotas, including information about the type of quotas available, refer to the :doc:`user guide `. To prevent system capacities from being exhausted without notification, you can set up quotas. Quotas are operational limits. For example, the number of gigabytes allowed for each project can be controlled so that cloud resources are optimized. Quotas can be enforced at both the project and the project-user level. Starting in the 16.0.0 Pike release, the quota calculation system in nova was overhauled and the old reserve/commit/rollback flow was changed to `count resource usage`__ at the point of whatever operation is being performed, such as creating or resizing a server. A check will be performed by counting current usage for the relevant resource and then, if :oslo.config:option:`quota.recheck_quota` is True, another check will be performed to ensure the initial check is still valid. By default resource usage is counted using the API and cell databases but nova can be configured to count some resource usage without using the cell databases. See `Quota usage from placement`_ for details. Using the command-line interface, you can manage quotas for nova, along with :cinder-doc:`cinder ` and :neutron-doc:`neutron `. You would typically change default values because, for example, a project requires more than ten volumes or 1 TB on a compute node. __ https://specs.openstack.org/openstack/nova-specs/specs/pike/implemented/cells-count-resources-to-check-quota-in-api.html Checking quota -------------- When calculating limits for a given resource and project, the following checks are made in order: #. Project-specific limits Depending on the resource, is there a project-specific limit on the resource in either the ``quotas`` or ``project_user_quotas`` tables in the database? If so, use that as the limit. You can create these resources using: .. code-block:: console $ openstack quota set --instances 5 #. Default limits Check to see if there is a hard limit for the given resource in the ``quota_classes`` table in the database for the ``default`` quota class. If so, use that as the limit. You can modify the default quota limit for a resource using: .. code-block:: console $ openstack quota set --instances 5 --class default .. note:: Only the ``default`` class is supported by nova. #. Config-driven limits If the above does not provide a resource limit, then rely on the configuration options in the :oslo.config:group:`quota` config group for the default limits. .. note:: The API sets the limit in the ``quota_classes`` table. Once a default limit is set via the ``default`` quota class, that takes precedence over any changes to that resource limit in the configuration options. In other words, once you've changed things via the API, you either have to keep those synchronized with the configuration values or remove the default limit from the database manually as there is no REST API for removing quota class values from the database. .. _quota-usage-from-placement: Quota usage from placement -------------------------- Starting in the Train (20.0.0) release, it is possible to configure quota usage counting of cores and RAM from the placement service and instances from instance mappings in the API database instead of counting resources from cell databases. This makes quota usage counting resilient in the presence of `down or poor-performing cells`__. Quota usage counting from placement is opt-in via the ::oslo.config:option:`quota.count_usage_from_placement` config option: .. code-block:: ini [quota] count_usage_from_placement = True There are some things to note when opting in to counting quota usage from placement: * Counted usage will not be accurate in an environment where multiple Nova deployments are sharing a placement deployment because currently placement has no way of partitioning resource providers between different Nova deployments. Operators who are running multiple Nova deployments that share a placement deployment should not set the :oslo.config:option:`quota.count_usage_from_placement` configuration option to ``True``. * Behavior will be different for resizes. During a resize, resource allocations are held on both the source and destination (even on the same host, see https://bugs.launchpad.net/nova/+bug/1790204) until the resize is confirmed or reverted. Quota usage will be inflated for servers in this state and operators should weigh the advantages and disadvantages before enabling :oslo.config:option:`quota.count_usage_from_placement`. * The ``populate_queued_for_delete`` and ``populate_user_id`` online data migrations must be completed before usage can be counted from placement. Until the data migration is complete, the system will fall back to legacy quota usage counting from cell databases depending on the result of an EXISTS database query during each quota check, if :oslo.config:option:`quota.count_usage_from_placement` is set to ``True``. Operators who want to avoid the performance hit from the EXISTS queries should wait to set the :oslo.config:option:`quota.count_usage_from_placement` configuration option to ``True`` until after they have completed their online data migrations via ``nova-manage db online_data_migrations``. * Behavior will be different for unscheduled servers in ``ERROR`` state. A server in ``ERROR`` state that has never been scheduled to a compute host will not have placement allocations, so it will not consume quota usage for cores and ram. * Behavior will be different for servers in ``SHELVED_OFFLOADED`` state. A server in ``SHELVED_OFFLOADED`` state will not have placement allocations, so it will not consume quota usage for cores and ram. Note that because of this, it will be possible for a request to unshelve a server to be rejected if the user does not have enough quota available to support the cores and ram needed by the server to be unshelved. __ https://docs.openstack.org/api-guide/compute/down_cells.html Known issues ------------ If not :ref:`counting quota usage from placement ` it is possible for down or poor-performing cells to impact quota calculations. See the :ref:`cells documentation ` for details. Future plans ------------ Hierarchical quotas ~~~~~~~~~~~~~~~~~~~ There has long been a desire to support hierarchical or nested quotas leveraging support in the identity service for hierarchical projects. See the `unified limits`__ spec for details. __ https://review.opendev.org/#/c/602201/ Configuration ------------- View and update default quota values ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To list all default quotas for a project, run: .. code-block:: console $ openstack quota show --default .. note:: This lists default quotas for all services and not just nova. To update a default value for a new project, run: .. code-block:: console $ openstack quota set --class --instances 15 default View and update quota values for a project or class ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To list quotas for a project, run: .. code-block:: console $ openstack quota show PROJECT .. note:: This lists project quotas for all services and not just nova. To update quotas for a project, run: .. code-block:: console $ openstack quota set --QUOTA QUOTA_VALUE PROJECT To update quotas for a class, run: .. code-block:: console $ openstack quota set --class --QUOTA QUOTA_VALUE CLASS .. note:: Only the ``default`` class is supported by nova. For example: .. code-block:: console $ openstack quota set --instances 12 my-project $ openstack quota show my-project +----------------------+----------------------------------+ | Field | Value | +----------------------+----------------------------------+ | backup-gigabytes | 1000 | | backups | 10 | | cores | 32 | | fixed-ips | -1 | | floating-ips | 10 | | gigabytes | 1000 | | health_monitors | None | | injected-file-size | 10240 | | injected-files | 5 | | injected-path-size | 255 | | instances | 12 | | key-pairs | 100 | | l7_policies | None | | listeners | None | | load_balancers | None | | location | None | | name | None | | networks | 20 | | per-volume-gigabytes | -1 | | pools | None | | ports | 60 | | project | c8156b55ec3b486193e73d2974196993 | | project_name | project | | properties | 128 | | ram | 65536 | | rbac_policies | 10 | | routers | 10 | | secgroup-rules | 50 | | secgroups | 50 | | server-group-members | 10 | | server-groups | 10 | | snapshots | 10 | | subnet_pools | -1 | | subnets | 20 | | volumes | 10 | +----------------------+----------------------------------+ To view a list of options for the :command:`openstack quota show` and :command:`openstack quota set` commands, run: .. code-block:: console $ openstack quota show --help $ openstack quota set --help View and update quota values for a project user ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. note:: User-specific quotas are legacy and will be removed when migration to :keystone-doc:`unified limits ` is complete. User-specific quotas were added as a way to provide two-level hierarchical quotas and this feature is already being offered in unified limits. For this reason, the below commands have not and will not be ported to openstackclient. To show quotas for a specific project user, run: .. code-block:: console $ nova quota-show --user USER PROJECT To update quotas for a specific project user, run: .. code-block:: console $ nova quota-update --user USER --QUOTA QUOTA_VALUE PROJECT For example: .. code-block:: console $ projectUser=$(openstack user show -f value -c id USER) $ project=$(openstack project show -f value -c id PROJECT) $ nova quota-update --user $projectUser --instance 12 $project $ nova quota-show --user $projectUser --tenant $project +-----------------------------+-------+ | Quota | Limit | +-----------------------------+-------+ | instances | 12 | | cores | 20 | | ram | 51200 | | floating_ips | 10 | | fixed_ips | -1 | | metadata_items | 128 | | injected_files | 5 | | injected_file_content_bytes | 10240 | | injected_file_path_bytes | 255 | | key_pairs | 100 | | security_groups | 10 | | security_group_rules | 20 | | server_groups | 10 | | server_group_members | 10 | +-----------------------------+-------+ To view the quota usage for the current user, run: .. code-block:: console $ nova limits --tenant PROJECT For example: .. code-block:: console $ nova limits --tenant my-project +------+-----+-------+--------+------+----------------+ | Verb | URI | Value | Remain | Unit | Next_Available | +------+-----+-------+--------+------+----------------+ +------+-----+-------+--------+------+----------------+ +--------------------+------+-------+ | Name | Used | Max | +--------------------+------+-------+ | Cores | 0 | 20 | | Instances | 0 | 10 | | Keypairs | - | 100 | | Personality | - | 5 | | Personality Size | - | 10240 | | RAM | 0 | 51200 | | Server Meta | - | 128 | | ServerGroupMembers | - | 10 | | ServerGroups | 0 | 10 | +--------------------+------+-------+ .. note:: The :command:`nova limits` command generates an empty table as a result of the Compute API, which prints an empty list for backward compatibility purposes. To view a list of options for the :command:`nova quota-show` and :command:`nova quota-update` commands, run: .. code-block:: console $ nova help quota-show $ nova help quota-update ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/real-time.rst0000664000175000017500000001365200000000000020337 0ustar00zuulzuul00000000000000========= Real Time ========= .. versionadded:: 13.0.0 (Mitaka) Nova supports configuring `real-time policies`__ for instances. This builds upon the improved performance offered by :doc:`CPU pinning ` by providing stronger guarantees for worst case scheduler latency for vCPUs. .. __: https://en.wikipedia.org/wiki/Real-time_computing Enabling Real-Time ------------------ Currently the creation of real-time instances is only supported when using the libvirt compute driver with a :oslo.config:option:`libvirt.virt_type` of ``kvm`` or ``qemu``. It requires extensive configuration of the host and this document provides but a rough overview of the changes required. Configuration will vary depending on your hardware, BIOS configuration, host and guest OS' and application. BIOS configuration ~~~~~~~~~~~~~~~~~~ Configure your host BIOS as recommended in the `rt-wiki`__ page. The most important steps are: - Disable power management, including CPU sleep states - Disable SMT (hyper-threading) or any option related to logical processors These are standard steps used in benchmarking as both sets of features can result in non-deterministic behavior. .. __: https://rt.wiki.kernel.org/index.php/HOWTO:_Build_an_RT-application OS configuration ~~~~~~~~~~~~~~~~ This is inherently specific to the distro used, however, there are some common steps: - Install the real-time (preemptible) kernel (``PREEMPT_RT_FULL``) and real-time KVM modules - Configure hugepages - Isolate host cores to be used for instances from the kernel - Disable features like CPU frequency scaling (e.g. P-States on Intel processors) RHEL and RHEL-derived distros like CentOS provide packages in their repositories to accomplish. The ``kernel-rt`` and ``kernel-rt-kvm`` packages will provide the real-time kernel and real-time KVM module, respectively, while the ``tuned-profiles-realtime`` package will provide `tuned`__ profiles to configure the host for real-time workloads. You should refer to your distro documentation for more information. .. __: https://tuned-project.org/ Validation ~~~~~~~~~~ Once your BIOS and the host OS have been configured, you can validate "real-time readiness" using the ``hwlatdetect`` and ``rteval`` utilities. On RHEL and RHEL-derived hosts, you can install these using the ``rt-tests`` package. More information about the ``rteval`` tool can be found `here`__. .. __: https://git.kernel.org/pub/scm/utils/rteval/rteval.git/tree/README Configuring a flavor or image ----------------------------- .. versionchanged:: 22.0.0 (Victoria) Previously, it was necessary to specify :nova:extra-spec:`hw:cpu_realtime_mask` when realtime mode was enabled via :nova:extra-spec:`hw:cpu_realtime`. Starting in Victoria, it is possible to omit this when an emulator thread policy is configured using the :nova:extra-spec:`hw:emulator_threads_policy` extra spec, thus allowing all guest cores to be allocated as real-time cores. .. versionchanged:: 22.0.0 (Victoria) Previously, a leading caret was necessary when specifying the value for :nova:extra-spec:`hw:cpu_realtime_mask` and omitting it would be equivalent to not setting the mask, resulting in a failure to spawn the instance. Compared to configuring the host, configuring the guest is relatively trivial and merely requires a combination of flavor extra specs and image metadata properties, along with a suitable real-time guest OS. Enable real-time by setting the :nova:extra-spec:`hw:cpu_realtime` flavor extra spec to ``yes`` or a truthy value. When this is configured, it is necessary to specify where guest overhead processes should be scheduled to. This can be accomplished in one of three ways. Firstly, the :nova:extra-spec:`hw:cpu_realtime_mask` extra spec or equivalent image metadata property can be used to indicate which guest cores should be scheduled as real-time cores, leaving the remainder to be scheduled as non-real-time cores and to handle overhead processes. For example, to allocate the first two cores of an 8 core instance as the non-real-time cores: .. code-block:: console $ openstack flavor set $FLAVOR \ --property hw:cpu_realtime=yes \ --property hw:cpu_realtime_mask=2-7 # so 0,1 are non-real-time In this configuration, any non-real-time cores configured will have an implicit ``dedicated`` :ref:`CPU pinning policy ` applied. It is possible to apply a ``shared`` policy for these non-real-time cores by specifying the ``mixed`` :ref:`CPU pinning policy ` via the :nova:extra-spec:`hw:cpu_policy` extra spec. This can be useful to increase resource utilization of the host. For example: .. code-block:: console $ openstack flavor set $FLAVOR \ --property hw:cpu_policy=mixed \ --property hw:cpu_realtime=yes \ --property hw:cpu_realtime_mask=2-7 # so 0,1 are non-real-time and unpinned Finally, you can explicitly :ref:`offload guest overhead processes to another host core ` using the :nova:extra-spec:`hw:emulator_threads_policy` extra spec. For example: .. code-block:: console $ openstack flavor set $FLAVOR \ --property hw:cpu_realtime=yes \ --property hw:emulator_thread_policy=share .. note:: Emulator thread pinning requires additional host configuration. Refer to :ref:`the documentation ` for more information. In addition to configuring the instance CPUs, it is also likely that you will need to configure guest huge pages. For information on how to configure these, refer to :doc:`the documentation ` References ---------- * `Libvirt real time instances (spec)`__ * `The Real Time Linux collaborative project`__ * `Deploying Real Time OpenStack`__ .. __: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/libvirt-real-time.html .. __: https://wiki.linuxfoundation.org/realtime/start .. __: https://that.guru/blog/deploying-real-time-openstack/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/remote-console-access.rst0000664000175000017500000005363100000000000022653 0ustar00zuulzuul00000000000000=============================== Configure remote console access =============================== OpenStack provides a number of different methods to interact with your guests: VNC, SPICE, Serial, RDP or MKS. If configured, these can be accessed by users through the OpenStack dashboard or the command line. This document outlines how these different technologies can be configured. Overview -------- It is considered best practice to deploy only one of the consoles types and not all console types are supported by all compute drivers. Regardless of what option is chosen, a console proxy service is required. These proxy services are responsible for the following: - Provide a bridge between the public network where the clients live and the private network where the servers with consoles live. - Mediate token authentication. - Transparently handle hypervisor-specific connection details to provide a uniform client experience. For some combinations of compute driver and console driver, these proxy services are provided by the hypervisor or another service. For all others, nova provides services to handle this proxying. Consider a noVNC-based VNC console connection for example: #. A user connects to the API and gets an ``access_url`` such as, ``http://ip:port/?path=%3Ftoken%3Dxyz``. #. The user pastes the URL in a browser or uses it as a client parameter. #. The browser or client connects to the proxy. #. The proxy authorizes the token for the user, and maps the token to the *private* host and port of the VNC server for an instance. The compute host specifies the address that the proxy should use to connect through the :oslo.config:option:`vnc.server_proxyclient_address` option. In this way, the VNC proxy works as a bridge between the public network and private host network. #. The proxy initiates the connection to VNC server and continues to proxy until the session ends. This means a typical deployment with noVNC-based VNC consoles will have the following components: - One or more :program:`nova-novncproxy` service. Supports browser-based noVNC clients. For simple deployments, this service typically runs on the same machine as :program:`nova-api-wsgi` because it operates as a proxy between the public network and the private compute host network. - One or more :program:`nova-compute` services. Hosts the instances for which consoles are provided. .. todo:: The below diagram references :program:`nova-consoleauth` and needs to be updated. This particular example is illustrated below. .. figure:: figures/SCH_5009_V00_NUAC-VNC_OpenStack.png :alt: noVNC process :width: 95% Consoleauth configuration: -------------------------- The consoleauth accepts following options: - :oslo.config:option:`consoleauth.token_ttl` - :oslo.config:option:`consoleauth.enforce_session_timeout` .. code-block:: ini [consoleauth] token_ttl = 1000 # default value is 600 second enforce_session_timeout = True # default is False Supported consoles: ------------------- * :ref:`vnc-console` * :ref:`spice-console` * :ref:`serial-console` * :ref:`mks-console` .. _vnc-console: noVNC-based VNC console ----------------------- VNC is a graphical console with wide support among many hypervisors and clients. noVNC provides VNC support through a web browser. .. note:: It has `been reported`__ that versions of noVNC older than 0.6 do not work with the :program:`nova-novncproxy` service. If using non-US key mappings, you need at least noVNC 1.0.0 for `a fix`__. If using VMware ESX/ESXi hypervisors, you need at least noVNC 1.1.0 for `a fix`__. __ https://bugs.launchpad.net/nova/+bug/1752896 __ https://github.com/novnc/noVNC/commit/99feba6ba8fee5b3a2b2dc99dc25e9179c560d31 __ https://github.com/novnc/noVNC/commit/2c813a33fe6821f5af737327c50f388052fa963b Configuration ~~~~~~~~~~~~~ To enable the noVNC VNC console service, you must configure both the :program:`nova-novncproxy` service and the :program:`nova-compute` service. Most options are defined in the :oslo.config:group:`vnc` group. The :program:`nova-novncproxy` service accepts the following options: - :oslo.config:option:`daemon` - :oslo.config:option:`ssl_only` - :oslo.config:option:`source_is_ipv6` - :oslo.config:option:`cert` - :oslo.config:option:`key` - :oslo.config:option:`web` - :oslo.config:option:`console.ssl_ciphers` - :oslo.config:option:`console.ssl_minimum_version` - :oslo.config:option:`vnc.novncproxy_host` - :oslo.config:option:`vnc.novncproxy_port` If using the libvirt compute driver and enabling :ref:`vnc-security`, the following additional options are supported: - :oslo.config:option:`vnc.auth_schemes` - :oslo.config:option:`vnc.vencrypt_client_key` - :oslo.config:option:`vnc.vencrypt_client_cert` - :oslo.config:option:`vnc.vencrypt_ca_certs` For example, to configure this via a ``nova-novncproxy.conf`` file: .. code-block:: ini [vnc] novncproxy_host = 0.0.0.0 novncproxy_port = 6082 .. note:: This doesn't show configuration with security. For information on how to configure this, refer to :ref:`vnc-security` below. The :program:`nova-compute` service requires the following options to configure noVNC-based VNC console support: - :oslo.config:option:`vnc.enabled` - :oslo.config:option:`vnc.novncproxy_base_url` - :oslo.config:option:`vnc.server_listen` - :oslo.config:option:`vnc.server_proxyclient_address` If using the VMware compute driver, the following additional options are supported: - :oslo.config:option:`vmware.vnc_port` - :oslo.config:option:`vmware.vnc_port_total` For example, to configure this via a ``nova.conf`` file: .. code-block:: ini [vnc] enabled = True novncproxy_base_url = http://IP_ADDRESS:6082/vnc_auto.html server_listen = 127.0.0.1 server_proxyclient_address = 127.0.0.1 Replace ``IP_ADDRESS`` with the IP address from which the proxy is accessible by the outside world. For example, this may be the management interface IP address of the controller or the VIP. .. _vnc-security: VNC proxy security ~~~~~~~~~~~~~~~~~~ Deploy the public-facing interface of the VNC proxy with HTTPS to prevent attacks from malicious parties on the network between the tenant user and proxy server. When using HTTPS, the TLS encryption only applies to data between the tenant user and proxy server. The data between the proxy server and Compute node instance will still be unencrypted. To provide protection for the latter, it is necessary to enable the VeNCrypt authentication scheme for VNC in both the Compute nodes and noVNC proxy server hosts. QEMU/KVM Compute node configuration +++++++++++++++++++++++++++++++++++ Ensure each Compute node running QEMU/KVM with libvirt has a set of certificates issued to it. The following is a list of the required certificates: - :file:`/etc/pki/libvirt-vnc/server-cert.pem` An x509 certificate to be presented **by the VNC server**. The ``CommonName`` should match the **primary hostname of the compute node**. Use of ``subjectAltName`` is also permitted if there is a need to use multiple hostnames or IP addresses to access the same Compute node. - :file:`/etc/pki/libvirt-vnc/server-key.pem` The private key used to generate the ``server-cert.pem`` file. - :file:`/etc/pki/libvirt-vnc/ca-cert.pem` The authority certificate used to sign ``server-cert.pem`` and sign the VNC proxy server certificates. The certificates must have v3 basic constraints [2]_ present to indicate the permitted key use and purpose data. We recommend using a dedicated certificate authority solely for the VNC service. This authority may be a child of the master certificate authority used for the OpenStack deployment. This is because libvirt does not currently have a mechanism to restrict what certificates can be presented by the proxy server. For further details on certificate creation, consult the QEMU manual page documentation on VNC server certificate setup [1]_. Configure libvirt to enable the VeNCrypt authentication scheme for the VNC server. In :file:`/etc/libvirt/qemu.conf`, uncomment the following settings: - ``vnc_tls=1`` This instructs libvirt to enable the VeNCrypt authentication scheme when launching QEMU, passing it the certificates shown above. - ``vnc_tls_x509_verify=1`` This instructs QEMU to require that all VNC clients present a valid x509 certificate. Assuming a dedicated certificate authority is used for the VNC service, this ensures that only approved VNC proxy servers can connect to the Compute nodes. Make sure to provide correct permissions to the certificate files for the process which creates instance. Please follow the libvirt wiki page [3]_ for the same. After editing :file:`qemu.conf`, the ``libvirtd`` service must be restarted: .. code-block:: shell $ systemctl restart libvirtd.service Changes will not apply to any existing running guests on the Compute node, so this configuration should be done before launching any instances. noVNC proxy server configuration ++++++++++++++++++++++++++++++++ The noVNC proxy server initially only supports the ``none`` authentication scheme, which does no checking. Therefore, it is necessary to enable the ``vencrypt`` authentication scheme by editing the :file:`nova.conf` file to set. .. code-block:: ini [vnc] auth_schemes=vencrypt,none The :oslo.config:option:`vnc.auth_schemes` values should be listed in order of preference. If enabling VeNCrypt on an existing deployment which already has instances running, the noVNC proxy server must initially be allowed to use ``vencrypt`` and ``none``. Once it is confirmed that all Compute nodes have VeNCrypt enabled for VNC, it is possible to remove the ``none`` option from the list of the :oslo.config:option:`vnc.auth_schemes` values. At that point, the noVNC proxy will refuse to connect to any Compute node that does not offer VeNCrypt. As well as enabling the authentication scheme, it is necessary to provide certificates to the noVNC proxy. - :file:`/etc/pki/nova-novncproxy/client-cert.pem` An x509 certificate to be presented **to the VNC server**. While libvirt/QEMU will not currently do any validation of the ``CommonName`` field, future versions will allow for setting up access controls based on the ``CommonName``. The ``CommonName`` field should match the **primary hostname of the controller node**. If using a HA deployment, the ``Organization`` field can also be configured to a value that is common across all console proxy instances in the deployment. This avoids the need to modify each compute node's whitelist every time a console proxy instance is added or removed. - :file:`/etc/pki/nova-novncproxy/client-key.pem` The private key used to generate the ``client-cert.pem`` file. - :file:`/etc/pki/nova-novncproxy/ca-cert.pem` The certificate authority cert used to sign ``client-cert.pem`` and sign the compute node VNC server certificates. The certificates must have v3 basic constraints [2]_ present to indicate the permitted key use and purpose data. Once the certificates have been created, the noVNC console proxy service must be told where to find them. This requires editing :file:`nova.conf` to set. .. code-block:: ini [vnc] vencrypt_client_key=/etc/pki/nova-novncproxy/client-key.pem vencrypt_client_cert=/etc/pki/nova-novncproxy/client-cert.pem vencrypt_ca_certs=/etc/pki/nova-novncproxy/ca-cert.pem .. _spice-console: SPICE console ------------- The VNC protocol is fairly limited, lacking support for multiple monitors, bi-directional audio, reliable cut-and-paste, video streaming and more. SPICE is a new protocol that aims to address the limitations in VNC and provide good remote desktop support. SPICE support in OpenStack Compute shares a similar architecture to the VNC implementation. The OpenStack dashboard uses a SPICE-HTML5 widget in its console tab that communicates with the :program:`nova-spicehtml5proxy` service by using SPICE-over-websockets. The :program:`nova-spicehtml5proxy` service communicates directly with the hypervisor process by using SPICE. Configuration ~~~~~~~~~~~~~ .. important:: VNC must be explicitly disabled to get access to the SPICE console. Set the :oslo.config:option:`vnc.enabled` option to ``False`` to disable the VNC console. To enable the SPICE console service, you must configure both the :program:`nova-spicehtml5proxy` service and the :program:`nova-compute` service. Most options are defined in the :oslo.config:group:`spice` group. The :program:`nova-spicehtml5proxy` service accepts the following options. - :oslo.config:option:`daemon` - :oslo.config:option:`ssl_only` - :oslo.config:option:`source_is_ipv6` - :oslo.config:option:`cert` - :oslo.config:option:`key` - :oslo.config:option:`web` - :oslo.config:option:`console.ssl_ciphers` - :oslo.config:option:`console.ssl_minimum_version` - :oslo.config:option:`spice.html5proxy_host` - :oslo.config:option:`spice.html5proxy_port` For example, to configure this via a ``nova-spicehtml5proxy.conf`` file: .. code-block:: ini [spice] html5proxy_host = 0.0.0.0 html5proxy_port = 6082 The :program:`nova-compute` service requires the following options to configure SPICE console support. - :oslo.config:option:`spice.enabled` - :oslo.config:option:`spice.agent_enabled` - :oslo.config:option:`spice.html5proxy_base_url` - :oslo.config:option:`spice.server_listen` - :oslo.config:option:`spice.server_proxyclient_address` For example, to configure this via a ``nova.conf`` file: .. code-block:: ini [spice] agent_enabled = False enabled = True html5proxy_base_url = http://IP_ADDRESS:6082/spice_auto.html server_listen = 127.0.0.1 server_proxyclient_address = 127.0.0.1 Replace ``IP_ADDRESS`` with the IP address from which the proxy is accessible by the outside world. For example, this may be the management interface IP address of the controller or the VIP. Optionally, the :program:`nova-compute` service supports the following additional options to configure compression settings (algorithms and modes) for SPICE consoles. - :oslo.config:option:`spice.image_compression` - :oslo.config:option:`spice.jpeg_compression` - :oslo.config:option:`spice.zlib_compression` - :oslo.config:option:`spice.playback_compression` - :oslo.config:option:`spice.streaming_mode` As well as the following option to require that connections be protected by TLS: - :oslo.config:option:`spice.require_secure` .. _serial-console: Serial console -------------- Serial consoles provide an alternative to graphical consoles like VNC or SPICE. They work a little differently to graphical consoles so an example is beneficial. The example below uses these nodes: * controller node with IP ``192.168.50.100`` * compute node 1 with IP ``192.168.50.104`` * compute node 2 with IP ``192.168.50.105`` Here's the general flow of actions: .. figure:: figures/serial-console-flow.svg :width: 100% :alt: The serial console flow 1. The user requests a serial console connection string for an instance from the REST API. 2. The :program:`nova-api-wsgi` service asks the :program:`nova-compute` service, which manages that instance, to fulfill that request. 3. That connection string gets used by the user to connect to the :program:`nova-serialproxy` service. 4. The :program:`nova-serialproxy` service then proxies the console interaction to the port of the compute node where the instance is running. That port gets forwarded by the hypervisor (or ironic conductor, for ironic) to the guest. Configuration ~~~~~~~~~~~~~ To enable the serial console service, you must configure both the :program:`nova-serialproxy` service and the :program:`nova-compute` service. Most options are defined in the :oslo.config:group:`serial_console` group. The :program:`nova-serialproxy` service accepts the following options. - :oslo.config:option:`daemon` - :oslo.config:option:`ssl_only` - :oslo.config:option:`source_is_ipv6` - :oslo.config:option:`cert` - :oslo.config:option:`key` - :oslo.config:option:`web` - :oslo.config:option:`console.ssl_ciphers` - :oslo.config:option:`console.ssl_minimum_version` - :oslo.config:option:`serial_console.serialproxy_host` - :oslo.config:option:`serial_console.serialproxy_port` For example, to configure this via a ``nova-serialproxy.conf`` file: .. code-block:: ini [serial_console] serialproxy_host = 0.0.0.0 serialproxy_port = 6083 The :program:`nova-compute` service requires the following options to configure serial console support. - :oslo.config:option:`serial_console.enabled` - :oslo.config:option:`serial_console.base_url` - :oslo.config:option:`serial_console.proxyclient_address` - :oslo.config:option:`serial_console.port_range` For example, to configure this via a ``nova.conf`` file: .. code-block:: ini [serial_console] enabled = True base_url = ws://IP_ADDRESS:6083/ proxyclient_address = 127.0.0.1 port_range = 10000:20000 Replace ``IP_ADDRESS`` with the IP address from which the proxy is accessible by the outside world. For example, this may be the management interface IP address of the controller or the VIP. There are some things to keep in mind when configuring these options: * :oslo.config:option:`serial_console.serialproxy_host` is the address the :program:`nova-serialproxy` service listens to for incoming connections. * :oslo.config:option:`serial_console.serialproxy_port` must be the same value as the port in the URI of :oslo.config:option:`serial_console.base_url`. * The URL defined in :oslo.config:option:`serial_console.base_url` will form part of the response the user will get when asking for a serial console connection string. This means it needs to be an URL the user can connect to. * :oslo.config:option:`serial_console.proxyclient_address` will be used by the :program:`nova-serialproxy` service to determine where to connect to for proxying the console interaction. .. _mks-console: MKS console ----------- MKS is the protocol used for accessing the console of a virtual machine running on VMware vSphere. It is very similar to VNC. Due to the architecture of the VMware vSphere hypervisor, it is not necessary to run a console proxy service. Configuration ~~~~~~~~~~~~~ To enable the MKS console service, only the :program:`nova-compute` service must be configured. All options are defined in the :oslo.config:group:`mks` group. The :program:`nova-compute` service requires the following options to configure MKS console support. - :oslo.config:option:`mks.enabled` - :oslo.config:option:`mks.mksproxy_base_url` For example, to configure this via a ``nova.conf`` file: .. code-block:: ini [mks] enabled = True mksproxy_base_url = https://127.0.0.1:6090/ .. _about-nova-consoleauth: About ``nova-consoleauth`` -------------------------- The now-removed :program:`nova-consoleauth` service was previously used to provide a shared service to manage token authentication that the client proxies outlined below could leverage. Token authentication was moved to the database in 18.0.0 (Rocky) and the service was removed in 20.0.0 (Train). Frequently Asked Questions -------------------------- - **Q: I want VNC support in the OpenStack dashboard. What services do I need?** A: You need ``nova-novncproxy`` and correctly configured compute hosts. - **Q: My VNC proxy worked fine during my all-in-one test, but now it doesn't work on multi host. Why?** A: The default options work for an all-in-one install, but changes must be made on your compute hosts once you start to build a cluster. As an example, suppose you have two servers: .. code-block:: bash PROXYSERVER (public_ip=172.24.1.1, management_ip=192.168.1.1) COMPUTESERVER (management_ip=192.168.1.2) Your ``nova-compute`` configuration file must set the following values: .. code-block:: ini [vnc] # These flags help construct a connection data structure server_proxyclient_address=192.168.1.2 novncproxy_base_url=http://172.24.1.1:6080/vnc_auto.html # This is the address where the underlying vncserver (not the proxy) # will listen for connections. server_listen=192.168.1.2 .. note:: ``novncproxy_base_url`` uses a public IP; this is the URL that is ultimately returned to clients, which generally do not have access to your private network. Your PROXYSERVER must be able to reach ``server_proxyclient_address``, because that is the address over which the VNC connection is proxied. - **Q: My noVNC does not work with recent versions of web browsers. Why?** A: Make sure you have installed ``python-numpy``, which is required to support a newer version of the WebSocket protocol (HyBi-07+). - **Q: How do I adjust the dimensions of the VNC window image in the OpenStack dashboard?** A: These values are hard-coded in a Django HTML template. To alter them, edit the ``_detail_vnc.html`` template file. The location of this file varies based on Linux distribution. On Ubuntu 14.04, the file is at ``/usr/share/pyshared/horizon/dashboards/nova/instances/templates/instances/_detail_vnc.html``. Modify the ``width`` and ``height`` options, as follows: .. code-block:: ini - **Q: My noVNC connections failed with ValidationError: Origin header protocol does not match. Why?** A: Make sure the ``base_url`` match your TLS setting. If you are using https console connections, make sure that the value of ``novncproxy_base_url`` is set explicitly where the ``nova-novncproxy`` service is running. - **Q: How do I know which nova config file to update to set a particular config option?** A: First, find out which nova-service is responsible for the change you want to make, using ``ps -aux | grep nova``. Once the service is found, check the status of the service via systemctl. In the status output, associated conf files with respective paths will be listed. References ---------- .. [1] https://qemu.weilnetz.de/doc/4.2/qemu-doc.html#vnc_005fsec_005fcertificate_005fverify .. [2] https://tools.ietf.org/html/rfc3280#section-4.2.1.10 .. [3] https://wiki.libvirt.org/page/VNCTLSSetup#Changes_to_be_made_on_the_virtualisation_host_server ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/resource-limits.rst0000664000175000017500000002341700000000000021606 0ustar00zuulzuul00000000000000=============== Resource Limits =============== Nova supports configuring limits on individual resources including CPU, memory, disk and network. These limits can be used to enforce basic Quality-of-Service (QoS) policies on such resources. .. note:: Hypervisor-enforced resource limits are distinct from API-enforced user and project quotas. For information on the latter, refer to :doc:`quotas`. .. warning:: This feature is poorly tested and poorly maintained. It may no longer work as expected. Where possible, consider using the QoS policies provided by other services, such as :cinder-doc:`Cinder ` and :neutron-doc:`Neutron `. Configuring resource limits --------------------------- Resource quota enforcement support is specific to the virt driver in use on compute hosts. libvirt ~~~~~~~ The libvirt driver supports CPU, disk and VIF limits. Unfortunately all of these work quite differently, as discussed below. CPU limits ^^^^^^^^^^ Libvirt enforces CPU limits in terms of *shares* and *quotas*, configured via :nova:extra-spec:`quota:cpu_shares` and :nova:extra-spec:`quota:cpu_period` / :nova:extra-spec:`quota:cpu_quota`, respectively. Both are implemented using the `cgroups cpu controller`__. Note that allowed values for *shares* are platform dependent. CPU shares are a proportional weighted share of total CPU resources relative to other instances. It does not limit CPU usage if CPUs are not busy. There is no unit and the value is purely relative to other instances, so an instance configured with value of 2048 will get twice as much CPU time as a VM configured with the value 1024. For example, to configure a CPU share of 1024 for a flavor: .. code-block:: console $ openstack flavor set $FLAVOR --property quota:cpu_shares=1024 The CPU quotas require both a period and quota. The CPU period specifies the enforcement interval in microseconds, while the CPU quota specifies the maximum allowed bandwidth in microseconds that the each vCPU of the instance can consume. The CPU period must be in the range 1000 (1mS) to 1,000,000 (1s) or 0 (disabled). The CPU quota must be in the range 1000 (1mS) to 2^64 or 0 (disabled). Where the CPU quota exceeds the CPU period, this means the guest vCPU process is able to consume multiple pCPUs worth of bandwidth. For example, to limit each guest vCPU to 1 pCPU worth of runtime per period: .. code-block:: console $ openstack flavor set $FLAVOR \ --property quota:cpu_period=1000 \ --property quota:cpu_quota=1000 To limit each guest vCPU to 2 pCPUs worth of runtime per period: .. code-block:: console $ openstack flavor set $FLAVOR \ --property quota:cpu_period=1000 \ --property quota:cpu_quota=2000 Finally, to limit each guest vCPU to 0.5 pCPUs worth of runtime per period: .. code-block:: console $ openstack flavor set $FLAVOR \ --property quota:cpu_period=1000 \ --property quota:cpu_quota=500 .. note:: Smaller periods will ensure a consistent latency response at the expense of burst capacity. CPU shares and CPU quotas can work hand-in-hand. For example, if two instances were configured with :nova:extra-spec:`quota:cpu_shares`\ =1024 and :nova:extra-spec:`quota:cpu_period`\ =100000 (100mS) for both, then configuring both with a :nova:extra-spec:`quota:cpu_quota`\ =75000 (75mS) will result in them sharing a host CPU equally, with both getting exactly 50mS of CPU time. If instead only one instance gets :nova:extra-spec:`quota:cpu_quota`\ =75000 (75mS) while the other gets :nova:extra-spec:`quota:cpu_quota`\ =25000 (25mS), then the first will get 3/4 of the time per period. .. __: https://man7.org/linux/man-pages/man7/cgroups.7.html Memory Limits ^^^^^^^^^^^^^ The libvirt driver does not support memory limits. Disk I/O Limits ^^^^^^^^^^^^^^^ Libvirt enforces disk limits through maximum disk read, write and total bytes per second, using the :nova:extra-spec:`quota:disk_read_bytes_sec`, :nova:extra-spec:`quota:disk_write_bytes_sec` and :nova:extra-spec:`quota:disk_total_bytes_sec` extra specs, respectively. It can also enforce disk limits through maximum disk read, write and total I/O operations per second, using the :nova:extra-spec:`quota:disk_read_iops_sec`, :nova:extra-spec:`quota:disk_write_iops_sec` and :nova:extra-spec:`quota:disk_total_iops_sec` extra specs, respectively. For example, to set a maximum disk write of 10 MB/sec for a flavor: .. code-block:: console $ openstack flavor set $FLAVOR \ --property quota:disk_write_bytes_sec=10485760 Network bandwidth limits ^^^^^^^^^^^^^^^^^^^^^^^^ .. warning:: These limits are enforced via libvirt and will only work where the network is connect to the instance using a tap interface. It will not work for things like :doc:`SR-IOV VFs `. :neutron-doc:`Neutron's QoS policies ` should be preferred wherever possible. Libvirt enforces network bandwidth limits through inbound and outbound average, using the :nova:extra-spec:`quota:vif_inbound_average` and :nova:extra-spec:`quota:vif_outbound_average` extra specs, respectively. In addition, optional *peak* values, which specifies the maximum rate at which a bridge can send data (kB/s), and *burst* values, which specifies the amount of bytes that can be burst at peak speed (kilobytes), can be specified for both inbound and outbound traffic, using the :nova:extra-spec:`quota:vif_inbound_peak` / :nova:extra-spec:`quota:vif_outbound_peak` and :nova:extra-spec:`quota:vif_inbound_burst` / :nova:extra-spec:`quota:vif_outbound_burst` extra specs, respectively. For example, to configure **outbound** traffic to an average of 262 Mbit/s (32768 kB/s), a peak of 524 Mbit/s, and burst of 65536 kilobytes: .. code-block:: console $ openstack flavor set $FLAVOR \ --property quota:vif_outbound_average=32768 \ --property quota:vif_outbound_peak=65536 \ --property quota:vif_outbound_burst=65536 .. note:: The speed limit values in above example are specified in kilobytes/second, while the burst value is in kilobytes. VMWare ~~~~~~ In contrast to libvirt, the VMWare virt driver enforces resource limits using consistent terminology, specifically through relative allocation levels, hard upper limits and minimum reservations configured via, for example, the :nova:extra-spec:`quota:cpu_shares_level` / :nova:extra-spec:`quota:cpu_shares_share`, :nova:extra-spec:`quota:cpu_limit`, and :nova:extra-spec:`quota:cpu_reservation` extra specs, respectively. Allocation levels can be specified using one of ``high``, ``normal``, ``low``, or ``custom``. When ``custom`` is specified, the number of shares must be specified using e.g. :nova:extra-spec:`quota:cpu_shares_share`. There is no unit and the values are relative to other instances on the host. The upper limits and reservations, by comparison, are measure in resource-specific units, such as MHz for CPUs and will ensure that the instance never used more than or gets less than the specified amount of the resource. CPU limits ^^^^^^^^^^ CPU limits are configured via the :nova:extra-spec:`quota:cpu_shares_level` / :nova:extra-spec:`quota:cpu_shares_share`, :nova:extra-spec:`quota:cpu_limit`, and :nova:extra-spec:`quota:cpu_reservation` extra specs. For example, to configure a CPU allocation level of ``custom`` with 1024 shares: .. code-block:: console $ openstack flavor set $FLAVOR \ --quota:cpu_shares_level=custom \ --quota:cpu_shares_share=1024 To configure a minimum CPU allocation of 1024 MHz and a maximum of 2048 MHz: .. code-block:: console $ openstack flavor set $FLAVOR \ --quota:cpu_reservation=1024 \ --quota:cpu_limit=2048 Memory limits ^^^^^^^^^^^^^ Memory limits are configured via the :nova:extra-spec:`quota:memory_shares_level` / :nova:extra-spec:`quota:memory_shares_share`, :nova:extra-spec:`quota:memory_limit`, and :nova:extra-spec:`quota:memory_reservation` extra specs. For example, to configure a memory allocation level of ``custom`` with 1024 shares: .. code-block:: console $ openstack flavor set $FLAVOR \ --quota:memory_shares_level=custom \ --quota:memory_shares_share=1024 To configure a minimum memory allocation of 1024 MB and a maximum of 2048 MB: .. code-block:: console $ openstack flavor set $FLAVOR \ --quota:memory_reservation=1024 \ --quota:memory_limit=2048 Disk I/O limits ^^^^^^^^^^^^^^^ Disk I/O limits are configured via the :nova:extra-spec:`quota:disk_io_shares_level` / :nova:extra-spec:`quota:disk_io_shares_share`, :nova:extra-spec:`quota:disk_io_limit`, and :nova:extra-spec:`quota:disk_io_reservation` extra specs. For example, to configure a disk I/O allocation level of ``custom`` with 1024 shares: .. code-block:: console $ openstack flavor set $FLAVOR \ --quota:disk_io_shares_level=custom \ --quota:disk_io_shares_share=1024 To configure a minimum disk I/O allocation of 1024 MB and a maximum of 2048 MB: .. code-block:: console $ openstack flavor set $FLAVOR \ --quota:disk_io_reservation=1024 \ --quota:disk_io_limit=2048 Network bandwidth limits ^^^^^^^^^^^^^^^^^^^^^^^^ Network bandwidth limits are configured via the :nova:extra-spec:`quota:vif_shares_level` / :nova:extra-spec:`quota:vif_shares_share`, :nova:extra-spec:`quota:vif_limit`, and :nova:extra-spec:`quota:vif_reservation` extra specs. For example, to configure a network bandwidth allocation level of ``custom`` with 1024 shares: .. code-block:: console $ openstack flavor set $FLAVOR \ --quota:vif_shares_level=custom \ --quota:vif_shares_share=1024 To configure a minimum bandwidth allocation of 1024 Mbits/sec and a maximum of 2048 Mbits/sec: .. code-block:: console $ openstack flavor set $FLAVOR \ --quota:vif_reservation=1024 \ --quota:vif_limit=2048 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/root-wrap-reference.rst0000664000175000017500000001140100000000000022334 0ustar00zuulzuul00000000000000==================== Secure with rootwrap ==================== Rootwrap allows unprivileged users to safely run Compute actions as the root user. Compute previously used :command:`sudo` for this purpose, but this was difficult to maintain, and did not allow advanced filters. The :command:`rootwrap` command replaces :command:`sudo` for Compute. To use rootwrap, prefix the Compute command with :command:`nova-rootwrap`. For example: .. code-block:: console $ sudo nova-rootwrap /etc/nova/rootwrap.conf command A generic ``sudoers`` entry lets the Compute user run :command:`nova-rootwrap` as root. The :command:`nova-rootwrap` code looks for filter definition directories in its configuration file, and loads command filters from them. It then checks if the command requested by Compute matches one of those filters and, if so, executes the command (as root). If no filter matches, it denies the request. .. note:: Be aware of issues with using NFS and root-owned files. The NFS share must be configured with the ``no_root_squash`` option enabled, in order for rootwrap to work correctly. Rootwrap is fully controlled by the root user. The root user owns the sudoers entry which allows Compute to run a specific rootwrap executable as root, and only with a specific configuration file (which should also be owned by root). The :command:`nova-rootwrap` command imports the Python modules it needs from a cleaned, system-default PYTHONPATH. The root-owned configuration file points to root-owned filter definition directories, which contain root-owned filters definition files. This chain ensures that the Compute user itself is not in control of the configuration or modules used by the :command:`nova-rootwrap` executable. Configure rootwrap ~~~~~~~~~~~~~~~~~~ Configure rootwrap in the ``rootwrap.conf`` file. Because it is in the trusted security path, it must be owned and writable by only the root user. The ``rootwrap_config=entry`` parameter specifies the file's location in the sudoers entry and in the ``nova.conf`` configuration file. The ``rootwrap.conf`` file uses an INI file format with these sections and parameters: .. list-table:: **rootwrap.conf configuration options** :widths: 64 31 * - Configuration option=Default value - (Type) Description * - [DEFAULT] filters\_path=/etc/nova/rootwrap.d,/usr/share/nova/rootwrap - (ListOpt) Comma-separated list of directories containing filter definition files. Defines where rootwrap filters are stored. Directories defined on this line should all exist, and be owned and writable only by the root user. If the root wrapper is not performing correctly, you can add a workaround option into the ``nova.conf`` configuration file. This workaround re-configures the root wrapper configuration to fall back to running commands as ``sudo``, and is a Kilo release feature. Including this workaround in your configuration file safeguards your environment from issues that can impair root wrapper performance. Tool changes that have impacted `Python Build Reasonableness (PBR) `__ for example, are a known issue that affects root wrapper performance. To set up this workaround, configure the ``disable_rootwrap`` option in the ``[workaround]`` section of the ``nova.conf`` configuration file. The filters definition files contain lists of filters that rootwrap will use to allow or deny a specific command. They are generally suffixed by ``.filters`` . Since they are in the trusted security path, they need to be owned and writable only by the root user. Their location is specified in the ``rootwrap.conf`` file. Filter definition files use an INI file format with a ``[Filters]`` section and several lines, each with a unique parameter name, which should be different for each filter you define: .. list-table:: **Filters configuration options** :widths: 72 39 * - Configuration option=Default value - (Type) Description * - [Filters] filter\_name=kpartx: CommandFilter, /sbin/kpartx, root - (ListOpt) Comma-separated list containing the filter class to use, followed by the Filter arguments (which vary depending on the Filter class selected). Configure the rootwrap daemon ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Administrators can use rootwrap daemon support instead of running rootwrap with :command:`sudo`. The rootwrap daemon reduces the overhead and performance loss that results from running ``oslo.rootwrap`` with :command:`sudo`. Each call that needs rootwrap privileges requires a new instance of rootwrap. The daemon prevents overhead from the repeated calls. The daemon does not support long running processes, however. To enable the rootwrap daemon, set ``use_rootwrap_daemon`` to ``True`` in the Compute service configuration file. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/scheduling.rst0000664000175000017500000016243500000000000020611 0ustar00zuulzuul00000000000000================== Compute schedulers ================== Compute uses the ``nova-scheduler`` service to determine how to dispatch compute requests. For example, the ``nova-scheduler`` service determines on which host or node a VM should launch. You can configure the scheduler through a variety of options. In the default configuration, this scheduler considers hosts that meet all the following criteria: * Are in the requested :term:`Availability Zone` (``map_az_to_placement_aggregate``) placement pre filter. * Can service the request meaning the nova-compute service handling the target node is available and not disabled (``ComputeFilter``). * Satisfy the extra specs associated with the instance type (``ComputeCapabilitiesFilter``). * Satisfy any architecture, hypervisor type, or virtual machine mode properties specified on the instance's image properties (``ImagePropertiesFilter``). * Are on a different host than other instances of a group (if requested) (``ServerGroupAntiAffinityFilter``). * Are in a set of group hosts (if requested) (``ServerGroupAffinityFilter``). The scheduler chooses a new host when an instance is migrated, resized, evacuated or unshelved after being shelve offloaded. When evacuating instances from a host, the scheduler service honors the target host defined by the administrator on the :command:`nova evacuate` command. If a target is not defined by the administrator, the scheduler determines the target host. For information about instance evacuation, see :ref:`Evacuate instances `. .. _compute-scheduler-filters: Prefilters ---------- As of the Rocky release, the scheduling process includes a prefilter step to increase the efficiency of subsequent stages. These *prefilters* are largely optional and serve to augment the request that is sent to placement to reduce the set of candidate compute hosts based on attributes that placement is able to answer for us ahead of time. In addition to the prefilters listed here, also see :ref:`tenant-isolation-with-placement` and :ref:`availability-zones-with-placement`. Compute Image Type Support ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. versionadded:: 20.0.0 (Train) Starting in the Train release, there is a prefilter available for excluding compute nodes that do not support the ``disk_format`` of the image used in a boot request. This behavior is enabled by setting :oslo.config:option:`scheduler.query_placement_for_image_type_support` to ``True``. For example, the libvirt driver, when using ceph as an ephemeral backend, does not support ``qcow2`` images (without an expensive conversion step). In this case (and especially if you have a mix of ceph and non-ceph backed computes), enabling this feature will ensure that the scheduler does not send requests to boot a ``qcow2`` image to computes backed by ceph. Compute Disabled Status Support ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. versionadded:: 20.0.0 (Train) Starting in the Train release, there is a mandatory `pre-filter `_ which will exclude disabled compute nodes similar to (but does not fully replace) the `ComputeFilter`_. Compute node resource providers with the ``COMPUTE_STATUS_DISABLED`` trait will be excluded as scheduling candidates. The trait is managed by the ``nova-compute`` service and should mirror the ``disabled`` status on the related compute service record in the `os-services`_ API. For example, if a compute service's status is ``disabled``, the related compute node resource provider(s) for that service should have the ``COMPUTE_STATUS_DISABLED`` trait. When the service status is ``enabled`` the ``COMPUTE_STATUS_DISABLED`` trait shall be removed. If the compute service is down when the status is changed, the trait will be synchronized by the compute service when it is restarted. Similarly, if an error occurs when trying to add or remove the trait on a given resource provider, the trait will be synchronized when the ``update_available_resource`` periodic task runs - which is controlled by the :oslo.config:option:`update_resources_interval` configuration option. .. _os-services: https://docs.openstack.org/api-ref/compute/#compute-services-os-services Isolate Aggregates ~~~~~~~~~~~~~~~~~~ .. versionadded:: 20.0.0 (Train) Starting in the Train release, there is an optional placement pre-request filter :doc:`/reference/isolate-aggregates` When enabled, the traits required in the server's flavor and image must be at least those required in an aggregate's metadata in order for the server to be eligible to boot on hosts in that aggregate. The Filter Scheduler -------------------- .. versionchanged:: 23.0.0 (Wallaby) Support for custom scheduler drivers was removed. Only the filter scheduler is now supported by nova. Nova's scheduler, known as the *filter scheduler*, supports filtering and weighting to make informed decisions on where a new instance should be created. When the scheduler receives a request for a resource, it first applies filters to determine which hosts are eligible for consideration when dispatching a resource. Filters are binary: either a host is accepted by the filter, or it is rejected. Hosts that are accepted by the filter are then processed by a different algorithm to decide which hosts to use for that request, described in the :ref:`weights` section. **Filtering** .. figure:: /_static/images/filtering-workflow-1.png The :oslo.config:option:`filter_scheduler.available_filters` config option provides the Compute service with the list of the filters that are available for use by the scheduler. The default setting specifies all of the filters that are included with the Compute service. This configuration option can be specified multiple times. For example, if you implemented your own custom filter in Python called ``myfilter.MyFilter`` and you wanted to use both the built-in filters and your custom filter, your :file:`nova.conf` file would contain: .. code-block:: ini [filter_scheduler] available_filters = nova.scheduler.filters.all_filters available_filters = myfilter.MyFilter The :oslo.config:option:`filter_scheduler.enabled_filters` configuration option in ``nova.conf`` defines the list of filters that are applied by the ``nova-scheduler`` service. Filters ------- The following sections describe the available compute filters. Filters are configured using the following config options: - :oslo.config:option:`filter_scheduler.available_filters` - Defines filter classes made available to the scheduler. This setting can be used multiple times. - :oslo.config:option:`filter_scheduler.enabled_filters` - Of the available filters, defines those that the scheduler uses by default. Each filter selects hosts in a different way and has different costs. The order of :oslo.config:option:`filter_scheduler.enabled_filters` affects scheduling performance. The general suggestion is to filter out invalid hosts as soon as possible to avoid unnecessary costs. We can sort :oslo.config:option:`filter_scheduler.enabled_filters` items by their costs in reverse order. For example, ``ComputeFilter`` is better before any resource calculating filters like ``NUMATopologyFilter``. .. _AggregateImagePropertiesIsolation: ``AggregateImagePropertiesIsolation`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. versionchanged:: 12.0.0 (Liberty) Prior to 12.0.0 Liberty, it was possible to specify and use arbitrary metadata with this filter. Starting in Liberty, nova only parses :glance-doc:`standard metadata `. If you wish to use arbitrary metadata, consider using the :ref:`AggregateInstanceExtraSpecsFilter` filter instead. Matches properties defined in an image's metadata against those of aggregates to determine host matches: * If a host belongs to an aggregate and the aggregate defines one or more metadata that matches an image's properties, that host is a candidate to boot the image's instance. * If a host does not belong to any aggregate, it can boot instances from all images. For example, the following aggregate ``myWinAgg`` has the Windows operating system as metadata (named 'windows'): .. code-block:: console $ openstack aggregate show myWinAgg +-------------------+----------------------------+ | Field | Value | +-------------------+----------------------------+ | availability_zone | zone1 | | created_at | 2017-01-01T15:36:44.000000 | | deleted | False | | deleted_at | None | | hosts | ['sf-devel'] | | id | 1 | | name | myWinAgg | | properties | os_distro='windows' | | updated_at | None | +-------------------+----------------------------+ In this example, because the following Win-2012 image has the ``windows`` property, it boots on the ``sf-devel`` host (all other filters being equal): .. code-block:: console $ openstack image show Win-2012 +------------------+------------------------------------------------------+ | Field | Value | +------------------+------------------------------------------------------+ | checksum | ee1eca47dc88f4879d8a229cc70a07c6 | | container_format | bare | | created_at | 2016-12-13T09:30:30Z | | disk_format | qcow2 | | ... | | name | Win-2012 | | ... | | properties | os_distro='windows' | | ... | You can configure the ``AggregateImagePropertiesIsolation`` filter by using the following options in the ``nova.conf`` file: - :oslo.config:option:`filter_scheduler.aggregate_image_properties_isolation_namespace` - :oslo.config:option:`filter_scheduler.aggregate_image_properties_isolation_separator` .. note:: This filter has limitations as described in `bug 1677217 `_ which are addressed in placement :doc:`/reference/isolate-aggregates` request filter. Refer to :doc:`/admin/aggregates` for more information. .. _AggregateInstanceExtraSpecsFilter: ``AggregateInstanceExtraSpecsFilter`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Matches properties defined in extra specs for an instance type against admin-defined properties on a host aggregate. Works with specifications that are scoped with ``aggregate_instance_extra_specs``. Multiple values can be given, as a comma-separated list. For backward compatibility, also works with non-scoped specifications; this action is highly discouraged because it conflicts with :ref:`ComputeCapabilitiesFilter` filter when you enable both filters. Refer to :doc:`/admin/aggregates` for more information. .. _AggregateIoOpsFilter: ``AggregateIoOpsFilter`` ~~~~~~~~~~~~~~~~~~~~~~~~ Filters host by disk allocation with a per-aggregate ``max_io_ops_per_host`` value. If the per-aggregate value is not found, the value falls back to the global setting defined by the :oslo.config:option:`filter_scheduler.max_io_ops_per_host` config option. If the host is in more than one aggregate and more than one value is found, the minimum value will be used. Refer to :doc:`/admin/aggregates` and :ref:`IoOpsFilter` for more information. .. _AggregateMultiTenancyIsolation: ``AggregateMultiTenancyIsolation`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Ensures hosts in tenant-isolated host aggregates will only be available to a specified set of tenants. If a host is in an aggregate that has the ``filter_tenant_id`` metadata key, the host can build instances from only that tenant or comma-separated list of tenants. A host can be in different aggregates. If a host does not belong to an aggregate with the metadata key, the host can build instances from all tenants. This does not restrict the tenant from creating servers on hosts outside the tenant-isolated aggregate. For example, consider there are two available hosts for scheduling, ``HostA`` and ``HostB``. ``HostB`` is in an aggregate isolated to tenant ``X``. A server create request from tenant ``X`` will result in either ``HostA`` *or* ``HostB`` as candidates during scheduling. A server create request from another tenant ``Y`` will result in only ``HostA`` being a scheduling candidate since ``HostA`` is not part of the tenant-isolated aggregate. .. _AggregateNumInstancesFilter: ``AggregateNumInstancesFilter`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Filters host in an aggregate by number of instances with a per-aggregate ``max_instances_per_host`` value. If the per-aggregate value is not found, the value falls back to the global setting defined by the :oslo.config:option:`filter_scheduler.max_instances_per_host` config option. If the host is in more than one aggregate and thus more than one value is found, the minimum value will be used. Refer to :doc:`/admin/aggregates` and :ref:`NumInstancesFilter` for more information. .. _AggregateTypeAffinityFilter: ``AggregateTypeAffinityFilter`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Filters hosts in an aggregate if the name of the instance's flavor matches that of the ``instance_type`` key set in the aggregate's metadata or if the ``instance_type`` key is not set. The value of the ``instance_type`` metadata entry is a string that may contain either a single ``instance_type`` name or a comma-separated list of ``instance_type`` names, such as ``m1.nano`` or ``m1.nano,m1.small``. .. note:: Instance types are a historical name for flavors. Refer to :doc:`/admin/aggregates` for more information. ``AllHostsFilter`` ~~~~~~~~~~~~~~~~~~ This is a no-op filter. It does not eliminate any of the available hosts. .. _ComputeCapabilitiesFilter: ``ComputeCapabilitiesFilter`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Filters hosts by matching properties defined in flavor extra specs against compute capabilities. If an extra specs key contains a colon (``:``), anything before the colon is treated as a namespace and anything after the colon is treated as the key to be matched. If a namespace is present and is not ``capabilities``, the filter ignores the namespace. For example ``capabilities:cpu_info:features`` is a valid scope format. For backward compatibility, the filter also treats the extra specs key as the key to be matched if no namespace is present; this action is highly discouraged because it conflicts with :ref:`AggregateInstanceExtraSpecsFilter` filter when you enable both filters. The extra specifications can have an operator at the beginning of the value string of a key/value pair. If there is no operator specified, then a default operator of ``s==`` is used. Valid operators are: * ``=`` (equal to or greater than as a number; same as vcpus case) * ``==`` (equal to as a number) * ``!=`` (not equal to as a number) * ``>=`` (greater than or equal to as a number) * ``<=`` (less than or equal to as a number) * ``s==`` (equal to as a string) * ``s!=`` (not equal to as a string) * ``s>=`` (greater than or equal to as a string) * ``s>`` (greater than as a string) * ``s<=`` (less than or equal to as a string) * ``s<`` (less than as a string) * ```` (substring) * ```` (all elements contained in collection) * ```` (find one of these) Examples are: ``>= 5``, ``s== 2.1.0``, `` gcc``, `` aes mmx``, and `` fpu gpu`` Some of attributes that can be used as useful key and their values contains: * ``free_ram_mb`` (compared with a number, values like ``>= 4096``) * ``free_disk_mb`` (compared with a number, values like ``>= 10240``) * ``host`` (compared with a string, values like `` compute``, ``s== compute_01``) * ``hypervisor_type`` (compared with a string, values like ``s== QEMU``, ``s== ironic``) * ``hypervisor_version`` (compared with a number, values like ``>= 1005003``, ``== 2000000``) * ``num_instances`` (compared with a number, values like ``<= 10``) * ``num_io_ops`` (compared with a number, values like ``<= 5``) * ``vcpus_total`` (compared with a number, values like ``= 48``, ``>=24``) * ``vcpus_used`` (compared with a number, values like ``= 0``, ``<= 10``) Some virt drivers support reporting CPU traits to the Placement service. With that feature available, you should consider using traits in flavors instead of ``ComputeCapabilitiesFilter`` because traits provide consistent naming for CPU features in some virt drivers and querying traits is efficient. For more details, refer to :doc:`/user/support-matrix`, :ref:`Required traits `, :ref:`Forbidden traits ` and `Report CPU features to the Placement service `_. Also refer to `Compute capabilities as traits`_. .. _ComputeFilter: ``ComputeFilter`` ----------------- Passes all hosts that are operational and enabled. In general, you should always enable this filter. ``DifferentHostFilter`` ----------------------- Schedules the instance on a different host from a set of instances. To take advantage of this filter, the requester must pass a scheduler hint, using ``different_host`` as the key and a list of instance UUIDs as the value. This filter is the opposite of the ``SameHostFilter``. For example, when using the :command:`openstack server create` command, use the ``--hint`` flag: .. code-block:: console $ openstack server create \ --image cedef40a-ed67-4d10-800e-17455edce175 --flavor 1 \ --hint different_host=a0cf03a5-d921-4877-bb5c-86d26cf818e1 \ --hint different_host=8c19174f-4220-44f0-824a-cd1eeef10287 \ server-1 With the API, use the ``os:scheduler_hints`` key. For example: .. code-block:: json { "server": { "name": "server-1", "imageRef": "cedef40a-ed67-4d10-800e-17455edce175", "flavorRef": "1" }, "os:scheduler_hints": { "different_host": [ "a0cf03a5-d921-4877-bb5c-86d26cf818e1", "8c19174f-4220-44f0-824a-cd1eeef10287" ] } } .. _ImagePropertiesFilter: ``ImagePropertiesFilter`` ~~~~~~~~~~~~~~~~~~~~~~~~~ Filters hosts based on properties defined on the instance's image. It passes hosts that can support the specified image properties contained in the instance. Properties include the architecture, hypervisor type, hypervisor version, and virtual machine mode. For example, an instance might require a host that runs an ARM-based processor, and QEMU as the hypervisor. You can decorate an image with these properties by using: .. code-block:: console $ openstack image set --architecture arm --property img_hv_type=qemu \ img-uuid The image properties that the filter checks for are: ``hw_architecture`` Describes the machine architecture required by the image. Examples are ``i686``, ``x86_64``, ``arm``, and ``ppc64``. .. versionchanged:: 12.0.0 (Liberty) This was previously called ``architecture``. ``img_hv_type`` Describes the hypervisor required by the image. Examples are ``qemu``. .. note:: ``qemu`` is used for both QEMU and KVM hypervisor types. .. versionchanged:: 12.0.0 (Liberty) This was previously called ``hypervisor_type``. ``img_hv_requested_version`` Describes the hypervisor version required by the image. The property is supported for HyperV hypervisor type only. It can be used to enable support for multiple hypervisor versions, and to prevent instances with newer HyperV tools from being provisioned on an older version of a hypervisor. If available, the property value is compared to the hypervisor version of the compute host. To filter the hosts by the hypervisor version, add the ``img_hv_requested_version`` property on the image as metadata and pass an operator and a required hypervisor version as its value: .. code-block:: console $ openstack image set --property hypervisor_type=qemu --property \ hypervisor_version_requires=">=6000" img-uuid .. versionchanged:: 12.0.0 (Liberty) This was previously called ``hypervisor_version_requires``. ``hw_vm_mode`` describes the hypervisor application binary interface (ABI) required by the image. Examples are ``xen`` for Xen 3.0 paravirtual ABI, ``hvm`` for native ABI, and ``exe`` for container virt executable ABI. .. versionchanged:: 12.0.0 (Liberty) This was previously called ``vm_mode``. ``IsolatedHostsFilter`` ~~~~~~~~~~~~~~~~~~~~~~~ Allows the admin to define a special (isolated) set of images and a special (isolated) set of hosts, such that the isolated images can only run on the isolated hosts, and the isolated hosts can only run isolated images. The flag ``restrict_isolated_hosts_to_isolated_images`` can be used to force isolated hosts to only run isolated images. The logic within the filter depends on the ``restrict_isolated_hosts_to_isolated_images`` config option, which defaults to True. When True, a volume-backed instance will not be put on an isolated host. When False, a volume-backed instance can go on any host, isolated or not. The admin must specify the isolated set of images and hosts using the :oslo.config:option:`filter_scheduler.isolated_hosts` and :oslo.config:option:`filter_scheduler.isolated_images` config options. For example: .. code-block:: ini [filter_scheduler] isolated_hosts = server1, server2 isolated_images = 342b492c-128f-4a42-8d3a-c5088cf27d13, ebd267a6-ca86-4d6c-9a0e-bd132d6b7d09 You can also specify that isolated host only be used for specific isolated images using the :oslo.config:option:`filter_scheduler.restrict_isolated_hosts_to_isolated_images` config option. .. _IoOpsFilter: ``IoOpsFilter`` ~~~~~~~~~~~~~~~ Filters hosts by concurrent I/O operations on it. Hosts with too many concurrent I/O operations will be filtered out. The :oslo.config:option:`filter_scheduler.max_io_ops_per_host` option specifies the maximum number of I/O intensive instances allowed to run on a host. A host will be ignored by the scheduler if more than :oslo.config:option:`filter_scheduler.max_io_ops_per_host` instances in build, resize, snapshot, migrate, rescue or unshelve task states are running on it. ``JsonFilter`` ~~~~~~~~~~~~~~~ .. warning:: This filter is not enabled by default and not comprehensively tested, and thus could fail to work as expected in non-obvious ways. Furthermore, the filter variables are based on attributes of the `HostState`_ class which could change from release to release so usage of this filter is generally not recommended. Consider using other filters such as the :ref:`ImagePropertiesFilter` or :ref:`traits-based scheduling `. Allows a user to construct a custom filter by passing a scheduler hint in JSON format. The following operators are supported: * ``=`` * ``<`` * ``>`` * ``in`` * ``<=`` * ``>=`` * ``not`` * ``or`` * ``and`` Unlike most other filters that rely on information provided via scheduler hints, this filter filters on attributes in the `HostState`_ class such as the following variables: * ``$free_ram_mb`` * ``$free_disk_mb`` * ``$hypervisor_hostname`` * ``$total_usable_ram_mb`` * ``$vcpus_total`` * ``$vcpus_used`` Using the :command:`openstack server create` command, use the ``--hint`` flag: .. code-block:: console $ openstack server create --image 827d564a-e636-4fc4-a376-d36f7ebe1747 \ --flavor 1 --hint query='[">=","$free_ram_mb",1024]' server1 With the API, use the ``os:scheduler_hints`` key: .. code-block:: json { "server": { "name": "server-1", "imageRef": "cedef40a-ed67-4d10-800e-17455edce175", "flavorRef": "1" }, "os:scheduler_hints": { "query": "[\">=\",\"$free_ram_mb\",1024]" } } .. _HostState: https://opendev.org/openstack/nova/src/branch/master/nova/scheduler/host_manager.py ``MetricsFilter`` ~~~~~~~~~~~~~~~~~ Use in collaboration with the ``MetricsWeigher`` weigher. Filters hosts that do not report the metrics specified in :oslo.config:option:`metrics.weight_setting`, thus ensuring the metrics weigher will not fail due to these hosts. .. _NUMATopologyFilter: ``NUMATopologyFilter`` ~~~~~~~~~~~~~~~~~~~~~~ Filters hosts based on the NUMA topology that was specified for the instance through the use of flavor ``extra_specs`` in combination with the image properties, as described in detail in :doc:`/admin/cpu-topologies`. The filter will try to match the exact NUMA cells of the instance to those of the host. It will consider the standard over-subscription limits for each host NUMA cell, and provide limits to the compute host accordingly. This filter is essential if using instances with features that rely on NUMA, such as instance NUMA topologies or CPU pinning. .. note:: If instance has no topology defined, it will be considered for any host. If instance has a topology defined, it will be considered only for NUMA capable hosts. .. _NumInstancesFilter: ``NumInstancesFilter`` ~~~~~~~~~~~~~~~~~~~~~~ Filters hosts based on the number of instances running on them. Hosts that have more instances running than specified by the :oslo.config:option:`filter_scheduler.max_instances_per_host` config option are filtered out. .. _PciPassthroughFilter: ``PciPassthroughFilter`` ~~~~~~~~~~~~~~~~~~~~~~~~ The filter schedules instances on a host if the host has devices that meet the device requests in the ``extra_specs`` attribute for the flavor. This filter is essential if using instances with PCI device requests or where SR-IOV-based networking is in use on hosts. ``SameHostFilter`` ~~~~~~~~~~~~~~~~~~ Schedules an instance on the same host as all other instances in a set of instances. To take advantage of this filter, the requester must pass a scheduler hint, using ``same_host`` as the key and a list of instance UUIDs as the value. This filter is the opposite of the ``DifferentHostFilter``. For example, when using the :command:`openstack server create` command, use the ``--hint`` flag: .. code-block:: console $ openstack server create \ --image cedef40a-ed67-4d10-800e-17455edce175 --flavor 1 \ --hint same_host=a0cf03a5-d921-4877-bb5c-86d26cf818e1 \ --hint same_host=8c19174f-4220-44f0-824a-cd1eeef10287 \ server-1 With the API, use the ``os:scheduler_hints`` key: .. code-block:: json { "server": { "name": "server-1", "imageRef": "cedef40a-ed67-4d10-800e-17455edce175", "flavorRef": "1" }, "os:scheduler_hints": { "same_host": [ "a0cf03a5-d921-4877-bb5c-86d26cf818e1", "8c19174f-4220-44f0-824a-cd1eeef10287" ] } } .. _ServerGroupAffinityFilter: ``ServerGroupAffinityFilter`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Restricts instances belonging to a server group to the same host(s). To take advantage of this filter, the requester must create a server group with an ``affinity`` policy, and pass a scheduler hint, using ``group`` as the key and the server group UUID as the value. For example, when using the :command:`openstack server create` command, use the ``--hint`` flag: .. code-block:: console $ openstack server group create --policy affinity group-1 $ openstack server create --image IMAGE_ID --flavor 1 \ --hint group=SERVER_GROUP_UUID server-1 For more information on server groups, refer to :doc:`/user/server-groups`. .. _ServerGroupAntiAffinityFilter: ``ServerGroupAntiAffinityFilter`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Restricts instances belonging to a server group to separate hosts. To take advantage of this filter, the requester must create a server group with an ``anti-affinity`` policy, and pass a scheduler hint, using ``group`` as the key and the server group UUID as the value. For example, when using the :command:`openstack server create` command, use the ``--hint`` flag: .. code-block:: console $ openstack server group create --policy anti-affinity group-1 $ openstack server create --image IMAGE_ID --flavor 1 \ --hint group=SERVER_GROUP_UUID server-1 For more information on server groups, refer to :doc:`/user/server-groups`. ``SimpleCIDRAffinityFilter`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. todo:: Does this filter still work with neutron? Schedules the instance based on host IP subnet range. To take advantage of this filter, the requester must specify a range of valid IP address in CIDR format, by passing two scheduler hints: ``build_near_host_ip`` The first IP address in the subnet (for example, ``192.168.1.1``) ``cidr`` The CIDR that corresponds to the subnet (for example, ``/24``) When using the :command:`openstack server create` command, use the ``--hint`` flag. For example, to specify the IP subnet ``192.168.1.1/24``: .. code-block:: console $ openstack server create \ --image cedef40a-ed67-4d10-800e-17455edce175 --flavor 1 \ --hint build_near_host_ip=192.168.1.1 --hint cidr=/24 \ server-1 With the API, use the ``os:scheduler_hints`` key: .. code-block:: json { "server": { "name": "server-1", "imageRef": "cedef40a-ed67-4d10-800e-17455edce175", "flavorRef": "1" }, "os:scheduler_hints": { "build_near_host_ip": "192.168.1.1", "cidr": "24" } } .. _weights: Weights ------- .. figure:: /_static/images/nova-weighting-hosts.png When resourcing instances, the filter scheduler filters and weights each host in the list of acceptable hosts. Each time the scheduler selects a host, it virtually consumes resources on it and subsequent selections are adjusted accordingly. This process is useful when the customer asks for the same large amount of instances because a weight is computed for each requested instance. In order to prioritize one weigher against another, all the weighers have to define a multiplier that will be applied before computing the weight for a node. All the weights are normalized beforehand so that the multiplier can be applied easily.Therefore the final weight for the object will be:: weight = w1_multiplier * norm(w1) + w2_multiplier * norm(w2) + ... Hosts are weighted based on the following config options: - :oslo.config:option:`filter_scheduler.host_subset_size` - :oslo.config:option:`filter_scheduler.weight_classes` ``RAMWeigher`` ~~~~~~~~~~~~~~ Compute weight based on available RAM on the compute node. Sort with the largest weight winning. If the multiplier, :oslo.config:option:`filter_scheduler.ram_weight_multiplier`, is negative, the host with least RAM available will win (useful for stacking hosts, instead of spreading). Starting with the Stein release, if per-aggregate value with the key ``ram_weight_multiplier`` is found, this value would be chosen as the ram weight multiplier. Otherwise, it will fall back to the :oslo.config:option:`filter_scheduler.ram_weight_multiplier`. If more than one value is found for a host in aggregate metadata, the minimum value will be used. ``CPUWeigher`` ~~~~~~~~~~~~~~ Compute weight based on available vCPUs on the compute node. Sort with the largest weight winning. If the multiplier, :oslo.config:option:`filter_scheduler.cpu_weight_multiplier`, is negative, the host with least CPUs available will win (useful for stacking hosts, instead of spreading). Starting with the Stein release, if per-aggregate value with the key ``cpu_weight_multiplier`` is found, this value would be chosen as the cpu weight multiplier. Otherwise, it will fall back to the :oslo.config:option:`filter_scheduler.cpu_weight_multiplier`. If more than one value is found for a host in aggregate metadata, the minimum value will be used. ``DiskWeigher`` ~~~~~~~~~~~~~~~ Hosts are weighted and sorted by free disk space with the largest weight winning. If the multiplier is negative, the host with less disk space available will win (useful for stacking hosts, instead of spreading). Starting with the Stein release, if per-aggregate value with the key ``disk_weight_multiplier`` is found, this value would be chosen as the disk weight multiplier. Otherwise, it will fall back to the :oslo.config:option:`filter_scheduler.disk_weight_multiplier`. If more than one value is found for a host in aggregate metadata, the minimum value will be used. ``MetricsWeigher`` ~~~~~~~~~~~~~~~~~~ This weigher can compute the weight based on the compute node host's various metrics. The to-be weighed metrics and their weighing ratio are specified using the :oslo.config:option:`metrics.weight_setting` config option. For example: .. code-block:: ini [metrics] weight_setting = name1=1.0, name2=-1.0 You can specify the metrics that are required, along with the weight of those that are not and are not available using the :oslo.config:option:`metrics.required` and :oslo.config:option:`metrics.weight_of_unavailable` config options, respectively. Starting with the Stein release, if per-aggregate value with the key ``metrics_weight_multiplier`` is found, this value would be chosen as the metrics weight multiplier. Otherwise, it will fall back to the :oslo.config:option:`metrics.weight_multiplier`. If more than one value is found for a host in aggregate metadata, the minimum value will be used. ``IoOpsWeigher`` ~~~~~~~~~~~~~~~~ The weigher can compute the weight based on the compute node host's workload. This is calculated by examining the number of instances in the ``building`` ``vm_state`` or in one of the following ``task_state``\ 's: ``resize_migrating``, ``rebuilding``, ``resize_prep``, ``image_snapshot``, ``image_backup``, ``rescuing``, or ``unshelving``. The default is to preferably choose light workload compute hosts. If the multiplier is positive, the weigher prefers choosing heavy workload compute hosts, the weighing has the opposite effect of the default. Starting with the Stein release, if per-aggregate value with the key ``io_ops_weight_multiplier`` is found, this value would be chosen as the IO ops weight multiplier. Otherwise, it will fall back to the :oslo.config:option:`filter_scheduler.io_ops_weight_multiplier`. If more than one value is found for a host in aggregate metadata, the minimum value will be used. ``PCIWeigher`` ~~~~~~~~~~~~~~ Compute a weighting based on the number of PCI devices on the host and the number of PCI devices requested by the instance. For example, given three hosts - one with a single PCI device, one with many PCI devices, and one with no PCI devices - nova should prioritise these differently based on the demands of the instance. If the instance requests a single PCI device, then the first of the hosts should be preferred. Similarly, if the instance requests multiple PCI devices, then the second of these hosts would be preferred. Finally, if the instance does not request a PCI device, then the last of these hosts should be preferred. For this to be of any value, at least one of the :ref:`PciPassthroughFilter` or :ref:`NUMATopologyFilter` filters must be enabled. Starting with the Stein release, if per-aggregate value with the key ``pci_weight_multiplier`` is found, this value would be chosen as the pci weight multiplier. Otherwise, it will fall back to the :oslo.config:option:`filter_scheduler.pci_weight_multiplier`. If more than one value is found for a host in aggregate metadata, the minimum value will be used. .. important:: Only positive values are allowed for the multiplier of this weigher as a negative value would force non-PCI instances away from non-PCI hosts, thus, causing future scheduling issues. ``ServerGroupSoftAffinityWeigher`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The weigher can compute the weight based on the number of instances that run on the same server group. The largest weight defines the preferred host for the new instance. For the multiplier only a positive value is allowed for the calculation. Starting with the Stein release, if per-aggregate value with the key ``soft_affinity_weight_multiplier`` is found, this value would be chosen as the soft affinity weight multiplier. Otherwise, it will fall back to the :oslo.config:option:`filter_scheduler.soft_affinity_weight_multiplier`. If more than one value is found for a host in aggregate metadata, the minimum value will be used. ``ServerGroupSoftAntiAffinityWeigher`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The weigher can compute the weight based on the number of instances that run on the same server group as a negative value. The largest weight defines the preferred host for the new instance. For the multiplier only a positive value is allowed for the calculation. Starting with the Stein release, if per-aggregate value with the key ``soft_anti_affinity_weight_multiplier`` is found, this value would be chosen as the soft anti-affinity weight multiplier. Otherwise, it will fall back to the :oslo.config:option:`filter_scheduler.soft_anti_affinity_weight_multiplier`. If more than one value is found for a host in aggregate metadata, the minimum value will be used. .. _build-failure-weigher: ``BuildFailureWeigher`` ~~~~~~~~~~~~~~~~~~~~~~~ Weigh hosts by the number of recent failed boot attempts. It considers the build failure counter and can negatively weigh hosts with recent failures. This avoids taking computes fully out of rotation. Starting with the Stein release, if per-aggregate value with the key ``build_failure_weight_multiplier`` is found, this value would be chosen as the build failure weight multiplier. Otherwise, it will fall back to the :oslo.config:option:`filter_scheduler.build_failure_weight_multiplier`. If more than one value is found for a host in aggregate metadata, the minimum value will be used. .. important:: The :oslo.config:option:`filter_scheduler.build_failure_weight_multiplier` option defaults to a very high value. This is intended to offset weight given by other enabled weighers due to available resources, giving this weigher priority. However, not all build failures imply a problem with the host itself - it could be user error - but the failure will still be counted. If you find hosts are frequently reporting build failures and effectively being excluded during scheduling, you may wish to lower the value of the multiplier. .. _cross-cell-weigher: ``CrossCellWeigher`` ~~~~~~~~~~~~~~~~~~~~ .. versionadded:: 21.0.0 (Ussuri) Weighs hosts based on which cell they are in. "Local" cells are preferred when moving an instance. If per-aggregate value with the key ``cross_cell_move_weight_multiplier`` is found, this value would be chosen as the cross-cell move weight multiplier. Otherwise, it will fall back to the :oslo.config:option:`filter_scheduler.cross_cell_move_weight_multiplier`. If more than one value is found for a host in aggregate metadata, the minimum value will be used. ``HypervisorVersionWeigher`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. versionadded:: 28.0.0 (Bobcat) Weigh hosts by their relative hypervisor version reported by the virt driver. While the hypervisor_version filed for all virt drivers is an int, each nova virt driver uses a different algorithm to convert the hypervisor-specific version sequence into an int. As such the values are not directly comparable between hosts with different hypervisors. For example, the ironic virt driver uses the ironic API micro-version as the hypervisor version for a given node. The libvirt driver uses the libvirt version i.e. Libvirt ``7.1.123`` becomes ``700100123`` vs Ironic ``1.82`` becomes ``1``. If you have a mixed virt driver deployment in the ironic vs non-ironic case nothing special needs to be done. ironic nodes are scheduled using custom resource classes so ironic flavors will never match non-ironic compute nodes. If a deployment has multiple non-ironic virt drivers it is recommended to use aggregates to group hosts by virt driver. While this is not strictly required, it is desirable to avoid bias towards one virt driver. see :ref:`filtering_hosts_by_isolating_aggregates` and :ref:`AggregateImagePropertiesIsolation` for more information. The default behavior of the HypervisorVersionWeigher is to select newer hosts. If you prefer to invert the behavior set the :oslo.config:option:`filter_scheduler.hypervisor_version_weight_multiplier` option to a negative number and the weighing has the opposite effect of the default. ``NumInstancesWeigher`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. versionadded:: 28.0.0 (Bobcat) This weigher compares hosts and orders them based on their number of instances respectively. By default the weigher is doing nothing but you can change its behaviour by modifying the value of :oslo.config:option:`filter_scheduler.num_instances_weight_multiplier`. A positive value will favor hosts with a larger number of instances (packing strategy) while a negative value will follow a spread strategy that will favor hosts with the lesser number of instances. ``ImagePropertiesWeigher`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. versionadded:: 31.0.0 (Epoxy) This weigher compares hosts and orders them based on the existing instances image properties respectively. By default the weigher is doing nothing but you can change its behaviour by modifying the value of :oslo.config:option:`filter_scheduler.image_props_weight_multiplier`. A positive value will favor hosts with the same image properties (packing strategy) while a negative value will follow a spread strategy that will favor hosts not already having instances with those image properties. You can also use :oslo.config:option:`filter_scheduler.image_props_weight_setting` config option for defining the exact properties you would like to weigh. For example, if you configure ``os_distro`` to 2 and ``hw_machine_type`` to 0, then the latter property won't be weighed while the former will count twice. Say you configure ``os_distro=10,os_secure_boot=1,os_require_quiesce=0``, then when requesting an instance with an image using those properties, then, for each of the instances already running on the host having an image using at least one of those properties, a match of the same ``os_distro`` value (eg. ``windows`` or ``linux``) would count 10 times more than a match of the same ``os_secure_boot`` value (eg. ``true`` or ``false``), while any matches about the same ``os_require_quiesce`` value wouldn't count. If you define :oslo.config:option:`filter_scheduler.image_props_weight_setting` then any property from the image used for booting wouldn't be counted if not provided in the option value. The resulted host weight would then be multiplied by the value of :oslo.config:option:`filter_scheduler.image_props_weight_multiplier`. Utilization-aware scheduling ---------------------------- .. warning:: This feature is poorly tested and may not work as expected. It may be removed in a future release. Use at your own risk. It is possible to schedule instances using advanced scheduling decisions. These decisions are made based on enhanced usage statistics encompassing data like memory cache utilization, memory bandwidth utilization, or network bandwidth utilization. This is disabled by default. The administrator can configure how the metrics are weighted in the configuration file by using the :oslo.config:option:`metrics.weight_setting` config option. For example to configure ``metric1`` with ``ratio1`` and ``metric2`` with ``ratio2``: .. code-block:: ini [metrics] weight_setting = "metric1=ratio1, metric2=ratio2" Allocation ratios ----------------- Allocation ratios allow for the overcommit of host resources. The following configuration options exist to control allocation ratios per compute node to support this overcommit of resources: * :oslo.config:option:`cpu_allocation_ratio` allows overriding the ``VCPU`` inventory allocation ratio for a compute node * :oslo.config:option:`ram_allocation_ratio` allows overriding the ``MEMORY_MB`` inventory allocation ratio for a compute node * :oslo.config:option:`disk_allocation_ratio` allows overriding the ``DISK_GB`` inventory allocation ratio for a compute node Prior to the 19.0.0 Stein release, if left unset, the ``cpu_allocation_ratio`` defaults to 16.0, the ``ram_allocation_ratio`` defaults to 1.5, and the ``disk_allocation_ratio`` defaults to 1.0. Starting with the 19.0.0 Stein release, the following configuration options control the initial allocation ratio values for a compute node: * :oslo.config:option:`initial_cpu_allocation_ratio` the initial VCPU inventory allocation ratio for a new compute node record, defaults to 16.0 * :oslo.config:option:`initial_ram_allocation_ratio` the initial MEMORY_MB inventory allocation ratio for a new compute node record, defaults to 1.5 * :oslo.config:option:`initial_disk_allocation_ratio` the initial DISK_GB inventory allocation ratio for a new compute node record, defaults to 1.0 Starting with the 27.0.0 Antelope release, the following default values are used for the initial allocation ratio values for a compute node: * :oslo.config:option:`initial_cpu_allocation_ratio` the initial VCPU inventory allocation ratio for a new compute node record, defaults to 4.0 * :oslo.config:option:`initial_ram_allocation_ratio` the initial MEMORY_MB inventory allocation ratio for a new compute node record, defaults to 1.0 * :oslo.config:option:`initial_disk_allocation_ratio` the initial DISK_GB inventory allocation ratio for a new compute node record, defaults to 1.0 Scheduling considerations ~~~~~~~~~~~~~~~~~~~~~~~~~ The allocation ratio configuration is used both during reporting of compute node `resource provider inventory`_ to the placement service and during scheduling. .. _resource provider inventory: https://docs.openstack.org/api-ref/placement/?expanded=#resource-provider-inventories Usage scenarios ~~~~~~~~~~~~~~~ Since allocation ratios can be set via nova configuration and the placement API, it can be confusing to know which should be used. This really depends on your scenario. A few common scenarios are detailed here. 1. When the deployer wants to **always** set an override value for a resource on a compute node, the deployer should ensure that the :oslo.config:option:`DEFAULT.cpu_allocation_ratio`, :oslo.config:option:`DEFAULT.ram_allocation_ratio` and :oslo.config:option:`DEFAULT.disk_allocation_ratio` configuration options are set to a non-None value. This will make the ``nova-compute`` service overwrite any externally-set allocation ratio values set via the placement REST API. 2. When the deployer wants to set an **initial** value for a compute node allocation ratio but wants to allow an admin to adjust this afterwards without making any configuration file changes, the deployer should set the :oslo.config:option:`DEFAULT.initial_cpu_allocation_ratio`, :oslo.config:option:`DEFAULT.initial_ram_allocation_ratio` and :oslo.config:option:`DEFAULT.initial_disk_allocation_ratio` configuration options and then manage the allocation ratios using the placement REST API (or `osc-placement`_ command line interface). For example: .. code-block:: console $ openstack resource provider inventory set \ --resource VCPU:allocation_ratio=1.0 \ --amend 815a5634-86fb-4e1e-8824-8a631fee3e06 3. When the deployer wants to **always** use the placement API to set allocation ratios, then the deployer should ensure that the :oslo.config:option:`DEFAULT.cpu_allocation_ratio`, :oslo.config:option:`DEFAULT.ram_allocation_ratio` and :oslo.config:option:`DEFAULT.disk_allocation_ratio` configuration options are set to a None and then manage the allocation ratios using the placement REST API (or `osc-placement`_ command line interface). This scenario is the workaround for `bug 1804125 `_. .. versionchanged:: 19.0.0 (Stein) The :oslo.config:option:`DEFAULT.initial_cpu_allocation_ratio`, :oslo.config:option:`DEFAULT.initial_ram_allocation_ratio` and :oslo.config:option:`DEFAULT.initial_disk_allocation_ratio` configuration options were introduced in Stein. Prior to this release, setting any of :oslo.config:option:`DEFAULT.cpu_allocation_ratio`, :oslo.config:option:`DEFAULT.ram_allocation_ratio` or :oslo.config:option:`DEFAULT.disk_allocation_ratio` to a non-null value would ensure the user-configured value was always overridden. .. _osc-placement: https://docs.openstack.org/osc-placement/latest/index.html .. _hypervisor-specific-considerations: Hypervisor-specific considerations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Nova provides three configuration options that can be used to set aside some number of resources that will not be consumed by an instance, whether these resources are overcommitted or not: - :oslo.config:option:`reserved_host_cpus`, - :oslo.config:option:`reserved_host_memory_mb` - :oslo.config:option:`reserved_host_disk_mb` Some virt drivers may benefit from the use of these options to account for hypervisor-specific overhead. Cells considerations -------------------- By default cells are enabled for scheduling new instances but they can be disabled (new schedules to the cell are blocked). This may be useful for users while performing cell maintenance, failures or other interventions. It is to be noted that creating pre-disabled cells and enabling/disabling existing cells should either be followed by a restart or SIGHUP of the nova-scheduler service for the changes to take effect. Command-line interface ~~~~~~~~~~~~~~~~~~~~~~ The :command:`nova-manage` command-line client supports the cell-disable related commands. To enable or disable a cell, use :command:`nova-manage cell_v2 update_cell` and to create pre-disabled cells, use :command:`nova-manage cell_v2 create_cell`. See the :ref:`man-page-cells-v2` man page for details on command usage. .. _compute-capabilities-as-traits: Compute capabilities as traits ------------------------------ .. versionadded:: 19.0.0 (Stein) The ``nova-compute`` service will report certain ``COMPUTE_*`` traits based on its compute driver capabilities to the placement service. The traits will be associated with the resource provider for that compute service. These traits can be used during scheduling by configuring flavors with :ref:`Required traits ` or :ref:`Forbidden traits `. For example, if you have a host aggregate with a set of compute nodes that support multi-attach volumes, you can restrict a flavor to that aggregate by adding the ``trait:COMPUTE_VOLUME_MULTI_ATTACH=required`` extra spec to the flavor and then restrict the flavor to the aggregate :ref:`as normal `. Here is an example of a libvirt compute node resource provider that is exposing some CPU features as traits, driver capabilities as traits, and a custom trait denoted by the ``CUSTOM_`` prefix: .. code-block:: console $ openstack --os-placement-api-version 1.6 resource provider trait list \ > d9b3dbc4-50e2-42dd-be98-522f6edaab3f --sort-column name +---------------------------------------+ | name | +---------------------------------------+ | COMPUTE_DEVICE_TAGGING | | COMPUTE_NET_ATTACH_INTERFACE | | COMPUTE_NET_ATTACH_INTERFACE_WITH_TAG | | COMPUTE_TRUSTED_CERTS | | COMPUTE_VOLUME_ATTACH_WITH_TAG | | COMPUTE_VOLUME_EXTEND | | COMPUTE_VOLUME_MULTI_ATTACH | | CUSTOM_IMAGE_TYPE_RBD | | HW_CPU_X86_MMX | | HW_CPU_X86_SSE | | HW_CPU_X86_SSE2 | | HW_CPU_X86_SVM | +---------------------------------------+ **Rules** There are some rules associated with capability-defined traits. 1. The compute service "owns" these traits and will add/remove them when the ``nova-compute`` service starts and when the ``update_available_resource`` periodic task runs, with run intervals controlled by config option :oslo.config:option:`update_resources_interval`. 2. The compute service will not remove any custom traits set on the resource provider externally, such as the ``CUSTOM_IMAGE_TYPE_RBD`` trait in the example above. 3. If compute-owned traits are removed from the resource provider externally, for example by running ``openstack resource provider trait delete ``, the compute service will add its traits again on restart or SIGHUP. 4. If a compute trait is set on the resource provider externally which is not supported by the driver, for example by adding the ``COMPUTE_VOLUME_EXTEND`` trait when the driver does not support that capability, the compute service will automatically remove the unsupported trait on restart or SIGHUP. 5. Compute capability traits are standard traits defined in the `os-traits`_ library. .. _os-traits: https://opendev.org/openstack/os-traits/src/branch/master/os_traits/compute :ref:`Further information on capabilities and traits ` can be found in the :doc:`Technical Reference Deep Dives section `. .. _custom-scheduler-filters: Writing Your Own Filter ----------------------- To create **your own filter**, you must inherit from |BaseHostFilter| and implement one method: ``host_passes``. This method should return ``True`` if a host passes the filter and return ``False`` elsewhere. It takes two parameters: * the ``HostState`` object allows to get attributes of the host * the ``RequestSpec`` object describes the user request, including the flavor, the image and the scheduler hints For further details about each of those objects and their corresponding attributes, refer to the codebase (at least by looking at the other filters code) or ask for help in the ``#openstack-nova`` IRC channel. In addition, if your custom filter uses non-standard extra specs, you must register validators for these extra specs. Examples of validators can be found in the ``nova.api.validation.extra_specs`` module. These should be registered via the ``nova.api.extra_spec_validator`` `entrypoint`__. The module containing your custom filter(s) must be packaged and available in the same environment(s) that the nova controllers, or specifically the :program:`nova-scheduler` and :program:`nova-api-wsgi` services, are available in. As an example, consider the following sample package, which is the `minimal structure`__ for a standard, setuptools-based Python package: .. code-block:: none acmefilter/ acmefilter/ __init__.py validators.py setup.py Where ``__init__.py`` contains: .. code-block:: python from oslo_log import log as logging from nova.scheduler import filters LOG = logging.getLogger(__name__) class AcmeFilter(filters.BaseHostFilter): def host_passes(self, host_state, spec_obj): extra_spec = spec_obj.flavor.extra_specs.get('acme:foo') LOG.info("Extra spec value was '%s'", extra_spec) # do meaningful stuff here... return True ``validators.py`` contains: .. code-block:: python from nova.api.validation.extra_specs import base def register(): validators = [ base.ExtraSpecValidator( name='acme:foo', description='My custom extra spec.' value={ 'type': str, 'enum': [ 'bar', 'baz', ], }, ), ] return validators ``setup.py`` contains: .. code-block:: python from setuptools import setup setup( name='acmefilter', version='0.1', description='My custom filter', packages=[ 'acmefilter' ], entry_points={ 'nova.api.extra_spec_validators': [ 'acme = acmefilter.validators', ], }, ) To enable this, you would set the following in :file:`nova.conf`: .. code-block:: ini [filter_scheduler] available_filters = nova.scheduler.filters.all_filters available_filters = acmefilter.AcmeFilter enabled_filters = ComputeFilter,AcmeFilter .. note:: You **must** add custom filters to the list of available filters using the :oslo.config:option:`filter_scheduler.available_filters` config option in addition to enabling them via the :oslo.config:option:`filter_scheduler.enabled_filters` config option. The default ``nova.scheduler.filters.all_filters`` value for the former only includes the filters shipped with nova. With these settings, all of the standard nova filters and the custom ``AcmeFilter`` filter are available to the scheduler, but just the ``ComputeFilter`` and ``AcmeFilter`` will be used on each request. __ https://packaging.python.org/specifications/entry-points/ __ https://python-packaging.readthedocs.io/en/latest/minimal.html Writing your own weigher ------------------------ To create your own weigher, you must inherit from |BaseHostWeigher| A weigher can implement both the ``weight_multiplier`` and ``_weight_object`` methods or just implement the ``weight_objects`` method. ``weight_objects`` method is overridden only if you need access to all objects in order to calculate weights, and it just return a list of weights, and not modify the weight of the object directly, since final weights are normalized and computed by ``weight.BaseWeightHandler``. .. |BaseHostFilter| replace:: :class:`BaseHostFilter ` .. |BaseHostWeigher| replace:: :class:`BaseHostFilter ` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/secure-boot.rst0000664000175000017500000000676400000000000020715 0ustar00zuulzuul00000000000000=========== Secure Boot =========== .. versionadded:: 14.0.0 (Newton) .. versionchanged:: 23.0.0 (Wallaby) Added support for Secure Boot to the libvirt driver. Nova supports configuring `UEFI Secure Boot`__ for guests. Secure Boot aims to ensure no unsigned kernel code runs on a machine. .. __: https://en.wikipedia.org/wiki/Secure_boot Enabling Secure Boot -------------------- Currently the configuration of UEFI guest bootloaders is only supported when using the libvirt compute driver with a :oslo.config:option:`libvirt.virt_type` of ``kvm`` or ``qemu``. In both cases, it requires the guests also be configured with a :doc:`UEFI bootloader `. With these requirements satisfied, you can verify UEFI Secure Boot support by inspecting the traits on the compute node's resource provider: .. code:: bash $ COMPUTE_UUID=$(openstack resource provider list --name $HOST -f value -c uuid) $ openstack resource provider trait list $COMPUTE_UUID | grep COMPUTE_SECURITY_UEFI_SECURE_BOOT | COMPUTE_SECURITY_UEFI_SECURE_BOOT | Configuring a flavor or image ----------------------------- Configuring UEFI Secure Boot for guests varies depending on the compute driver in use. In all cases, a :doc:`UEFI guest bootloader ` must be configured for the guest but there are also additional requirements depending on the compute driver in use. .. rubric:: Libvirt As the name would suggest, UEFI Secure Boot requires that a UEFI bootloader be configured for guests. When this is done, UEFI Secure Boot support can be configured using the :nova:extra-spec:`os:secure_boot` extra spec or equivalent image metadata property. For example, to configure an image that meets both of these requirements: .. code-block:: bash $ openstack image set \ --property hw_firmware_type=uefi \ --property os_secure_boot=required \ $IMAGE .. note:: On x86_64 hosts, enabling secure boot also requires configuring use of the Q35 machine type. This can be configured on a per-guest basis using the ``hw_machine_type`` image metadata property or automatically for all guests created on a host using the :oslo.config:option:`libvirt.hw_machine_type` config option. It is also possible to explicitly request that secure boot be disabled. This is the default behavior, so this request is typically useful when an admin wishes to explicitly prevent a user requesting secure boot by uploading their own image with relevant image properties. For example, to disable secure boot via the flavor: .. code-block:: bash $ openstack flavor set --property os:secure_boot=disabled $FLAVOR Finally, it is possible to request that secure boot be enabled if the host supports it. This is only possible via the image metadata property. When this is requested, secure boot will only be enabled if the host supports this feature and the other constraints, namely that a UEFI guest bootloader is configured, are met. For example: .. code-block:: bash $ openstack image set --property os_secure_boot=optional $IMAGE .. note:: If both the image metadata property and flavor extra spec are provided, they must match. If they do not, an error will be raised. References ---------- * `Allow Secure Boot (SB) for QEMU- and KVM-based guests (spec)`__ * `Securing Secure Boot with System Management Mode`__ .. __: https://specs.openstack.org/openstack/nova-specs/specs/wallaby/approved/allow-secure-boot-for-qemu-kvm-guests.html .. __: http://events17.linuxfoundation.org/sites/events/files/slides/kvmforum15-smm.pdf ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/secure-live-migration-with-qemu-native-tls.rst0000664000175000017500000002020500000000000026664 0ustar00zuulzuul00000000000000========================================== Secure live migration with QEMU-native TLS ========================================== Context ~~~~~~~ The encryption offered by nova's :oslo.config:option:`libvirt.live_migration_tunnelled` does not secure all the different migration streams of a nova instance, namely: guest RAM, device state, and disks (via NBD) when using non-shared storage. Further, the "tunnelling via libvirtd" has inherent limitations: (a) it cannot handle live migration of disks in a non-shared storage setup (a.k.a. "block migration"); and (b) has a huge performance overhead and latency, because it burns more CPU and memory bandwidth due to increased number of data copies on both source and destination hosts. To solve this existing limitation, QEMU and libvirt have gained (refer :ref:`below ` for version details) support for "native TLS", i.e. TLS built into QEMU. This will secure all data transports, including disks that are not on shared storage, without incurring the limitations of the "tunnelled via libvirtd" transport. To take advantage of the "native TLS" support in QEMU and libvirt, nova has introduced new configuration attribute :oslo.config:option:`libvirt.live_migration_with_native_tls`. .. _`Prerequisites`: Prerequisites ~~~~~~~~~~~~~ (1) Version requirement: This feature needs at least libvirt 4.4.0 and QEMU 2.11. (2) A pre-configured TLS environment—i.e. CA, server, and client certificates, their file permissions, et al—must be "correctly" configured (typically by an installer tool) on all relevant compute nodes. To simplify your PKI (Public Key Infrastructure) setup, use deployment tools that take care of handling all the certificate lifecycle management. For example, refer to the "`TLS everywhere `__" guide from the TripleO project. (3) Password-less SSH setup for all relevant compute nodes. (4) On all relevant compute nodes, ensure the TLS-related config attributes in ``/etc/libvirt/qemu.conf`` are in place:: default_tls_x509_cert_dir = "/etc/pki/qemu" default_tls_x509_verify = 1 If it is not already configured, modify ``/etc/sysconfig/libvirtd`` on both (ComputeNode1 & ComputeNode2) to listen for TCP/IP connections:: LIBVIRTD_ARGS="--listen" Then, restart the libvirt daemon (also on both nodes):: $ systemctl restart libvirtd Refer to the "`Related information`_" section on a note about the other TLS-related configuration attributes in ``/etc/libvirt/qemu.conf``. Validating your TLS environment on compute nodes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Assuming you have two compute hosts (``ComputeNode1``, and ``ComputeNode2``) run the :command:`virt-pki-validate` tool (comes with the ``libvirt-client`` package on your Linux distribution) on both the nodes to ensure all the necessary PKI files are configured are configured:: [ComputeNode1]$ virt-pki-validate Found /usr/bin/certtool Found CA certificate /etc/pki/CA/cacert.pem for TLS Migration Test Found client certificate /etc/pki/libvirt/clientcert.pem for ComputeNode1 Found client private key /etc/pki/libvirt/private/clientkey.pem Found server certificate /etc/pki/libvirt/servercert.pem for ComputeNode1 Found server private key /etc/pki/libvirt/private/serverkey.pem Make sure /etc/sysconfig/libvirtd is setup to listen to TCP/IP connections and restart the libvirtd service [ComputeNode2]$ virt-pki-validate Found /usr/bin/certtool Found CA certificate /etc/pki/CA/cacert.pem for TLS Migration Test Found client certificate /etc/pki/libvirt/clientcert.pem for ComputeNode2 Found client private key /etc/pki/libvirt/private/clientkey.pem Found server certificate /etc/pki/libvirt/servercert.pem for ComputeNode2 Found server private key /etc/pki/libvirt/private/serverkey.pem Make sure /etc/sysconfig/libvirtd is setup to listen to TCP/IP connections and restart the libvirtd service Other TLS environment related checks on compute nodes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **IMPORTANT**: Ensure that the permissions of certificate files and keys in ``/etc/pki/qemu/*`` directory on both source *and* destination compute nodes to be the following ``0640`` with ``root:qemu`` as the group/user. For example, on a Fedora-based system:: $ ls -lasrtZ /etc/pki/qemu total 32 0 drwxr-xr-x. 10 root root system_u:object_r:cert_t:s0 110 Dec 10 10:39 .. 4 -rw-r-----. 1 root qemu unconfined_u:object_r:cert_t:s0 1464 Dec 10 11:08 ca-cert.pem 4 -rw-r-----. 1 root qemu unconfined_u:object_r:cert_t:s0 1558 Dec 10 11:08 server-cert.pem 4 -rw-r-----. 1 root qemu unconfined_u:object_r:cert_t:s0 1619 Dec 10 11:09 client-cert.pem 8 -rw-r-----. 1 root qemu unconfined_u:object_r:cert_t:s0 8180 Dec 10 11:09 client-key.pem 8 -rw-r-----. 1 root qemu unconfined_u:object_r:cert_t:s0 8177 Dec 11 05:35 server-key.pem 0 drwxr-xr-x. 2 root root unconfined_u:object_r:cert_t:s0 146 Dec 11 06:01 . Performing the migration ~~~~~~~~~~~~~~~~~~~~~~~~ (1) On all relevant compute nodes, enable the :oslo.config:option:`libvirt.live_migration_with_native_tls` configuration attribute and set the :oslo.config:option:`libvirt.live_migration_scheme` configuration attribute to tls:: [libvirt] live_migration_with_native_tls = true live_migration_scheme = tls .. note:: Setting both :oslo.config:option:`libvirt.live_migration_with_native_tls` and :oslo.config:option:`libvirt.live_migration_tunnelled` at the same time is invalid (and disallowed). .. note:: Not setting :oslo.config:option:`libvirt.live_migration_scheme` to ``tls`` will result in libvirt using the unencrypted TCP connection without displaying any error or a warning in the logs. And restart the ``nova-compute`` service:: $ systemctl restart openstack-nova-compute (2) Now that all TLS-related configuration is in place, migrate guests (with or without shared storage) from ``ComputeNode1`` to ``ComputeNode2``. Refer to the :doc:`live-migration-usage` document on details about live migration. .. _`Related information`: Related information ~~~~~~~~~~~~~~~~~~~ - If you have the relevant libvirt and QEMU versions (mentioned in the "`Prerequisites`_" section earlier), then using the :oslo.config:option:`libvirt.live_migration_with_native_tls` is strongly recommended over the more limited :oslo.config:option:`libvirt.live_migration_tunnelled` option, which is intended to be deprecated in future. - There are in total *nine* TLS-related config options in ``/etc/libvirt/qemu.conf``:: default_tls_x509_cert_dir default_tls_x509_verify nbd_tls nbd_tls_x509_cert_dir migrate_tls_x509_cert_dir vnc_tls_x509_cert_dir spice_tls_x509_cert_dir vxhs_tls_x509_cert_dir chardev_tls_x509_cert_dir If you set both ``default_tls_x509_cert_dir`` and ``default_tls_x509_verify`` parameters for all certificates, there is no need to specify any of the other ``*_tls*`` config options. The intention (of libvirt) is that you can just use the ``default_tls_x509_*`` config attributes so that you don't need to set any other ``*_tls*`` parameters, _unless_ you need different certificates for some services. The rationale for that is that some services (e.g. migration / NBD) are only exposed to internal infrastructure; while some services (VNC, Spice) might be exposed publicly, so might need different certificates. For OpenStack this does not matter, though, we will stick with the defaults. - If they are not already open, ensure you open up these TCP ports on your firewall: ``16514`` (where the authenticated and encrypted TCP/IP socket will be listening on) and ``49152-49215`` (for regular migration) on all relevant compute nodes. (Otherwise you get ``error: internal error: unable to execute QEMU command 'drive-mirror': Failed to connect socket: No route to host``). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/security.rst0000664000175000017500000000342700000000000020326 0ustar00zuulzuul00000000000000================== Security hardening ================== OpenStack Compute can be integrated with various third-party technologies to increase security. For more information, see the `OpenStack Security Guide `_. Encrypt Compute metadata traffic ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **Enabling SSL encryption** OpenStack supports encrypting Compute metadata traffic with HTTPS. Enable SSL encryption in the ``metadata_agent.ini`` file. #. Enable the HTTPS protocol. .. code-block:: ini nova_metadata_protocol = https #. Determine whether insecure SSL connections are accepted for Compute metadata server requests. The default value is ``False``. .. code-block:: ini nova_metadata_insecure = False #. Specify the path to the client certificate. .. code-block:: ini nova_client_cert = PATH_TO_CERT #. Specify the path to the private key. .. code-block:: ini nova_client_priv_key = PATH_TO_KEY Securing live migration streams with QEMU-native TLS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ It is strongly recommended to secure all the different live migration streams of a nova instance—i.e. guest RAM, device state, and disks (via NBD) when using non-shared storage. For further details on how to set this up, refer to the :doc:`secure-live-migration-with-qemu-native-tls` document. Mitigation for MDS (Microarchitectural Data Sampling) security flaws ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ It is strongly recommended to patch all compute nodes and nova instances against the processor-related security flaws, such as MDS (and other previous vulnerabilities). For details on applying mitigation for the MDS flaws, refer to :ref:`mitigation-for-Intel-MDS-security-flaws`. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/service-groups.rst0000664000175000017500000000555700000000000021442 0ustar00zuulzuul00000000000000================================ Configure Compute service groups ================================ The Compute service must know the status of each compute node to effectively manage and use them. This can include events like a user launching a new VM, the scheduler sending a request to a live node, or a query to the ServiceGroup API to determine if a node is live. When a compute worker running the nova-compute daemon starts, it calls the join API to join the compute group. Any service (such as the scheduler) can query the group's membership and the status of its nodes. Internally, the ServiceGroup client driver automatically updates the compute worker status. .. _database-servicegroup-driver: Database ServiceGroup driver ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ By default, Compute uses the database driver to track if a node is live. In a compute worker, this driver periodically sends a ``db update`` command to the database, saying "I'm OK" with a timestamp. Compute uses a pre-defined timeout (``service_down_time``) to determine if a node is dead. The driver has limitations, which can be problematic depending on your environment. If a lot of compute worker nodes need to be checked, the database can be put under heavy load, which can cause the timeout to trigger, and a live node could incorrectly be considered dead. By default, the timeout is 60 seconds. Reducing the timeout value can help in this situation, but you must also make the database update more frequently, which again increases the database workload. The database contains data that is both transient (such as whether the node is alive) and persistent (such as entries for VM owners). With the ServiceGroup abstraction, Compute can treat each type separately. .. _memcache-servicegroup-driver: Memcache ServiceGroup driver ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The memcache ServiceGroup driver uses memcached, a distributed memory object caching system that is used to increase site performance. For more details, see `memcached.org `_. To use the memcache driver, you must install memcached. You might already have it installed, as the same driver is also used for the OpenStack Object Storage and OpenStack dashboard. To install memcached, see the *Environment -> Memcached* section in the `Installation Tutorials and Guides `_ depending on your distribution. These values in the ``/etc/nova/nova.conf`` file are required on every node for the memcache driver: .. code-block:: ini # Driver for the ServiceGroup service servicegroup_driver = "mc" # Memcached servers. Use either a list of memcached servers to use for caching (list value), # or "" for in-process caching (default). memcached_servers = # Timeout; maximum time since last check-in for up service (integer value). # Helps to define whether a node is dead service_down_time = 60 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/services.rst0000664000175000017500000000507400000000000020302 0ustar00zuulzuul00000000000000======================= Manage Compute services ======================= You can enable and disable Compute services. The following examples disable and enable the ``nova-compute`` service. #. List the Compute services: .. code-block:: console $ openstack compute service list +----+----------------+------------+----------+---------+-------+----------------------------+ | ID | Binary | Host | Zone | Status | State | Updated At | +----+----------------+------------+----------+---------+-------+----------------------------+ | 4 | nova-scheduler | controller | internal | enabled | up | 2016-12-20T00:44:48.000000 | | 5 | nova-conductor | controller | internal | enabled | up | 2016-12-20T00:44:54.000000 | | 8 | nova-compute | compute | nova | enabled | up | 2016-10-21T02:35:03.000000 | +----+----------------+------------+----------+---------+-------+----------------------------+ #. Disable a nova service: .. code-block:: console $ openstack compute service set --disable --disable-reason "trial log" compute nova-compute +----------+--------------+----------+-------------------+ | Host | Binary | Status | Disabled Reason | +----------+--------------+----------+-------------------+ | compute | nova-compute | disabled | trial log | +----------+--------------+----------+-------------------+ #. Check the service list: .. code-block:: console $ openstack compute service list +----+----------------+------------+----------+---------+-------+----------------------------+ | ID | Binary | Host | Zone | Status | State | Updated At | +----+----------------+------------+----------+---------+-------+----------------------------+ | 5 | nova-scheduler | controller | internal | enabled | up | 2016-12-20T00:44:48.000000 | | 6 | nova-conductor | controller | internal | enabled | up | 2016-12-20T00:44:54.000000 | | 9 | nova-compute | compute | nova | disabled| up | 2016-10-21T02:35:03.000000 | +----+----------------+------------+----------+---------+-------+----------------------------+ #. Enable the service: .. code-block:: console $ openstack compute service set --enable compute nova-compute +----------+--------------+---------+ | Host | Binary | Status | +----------+--------------+---------+ | compute | nova-compute | enabled | +----------+--------------+---------+ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/sev.rst0000664000175000017500000002560500000000000017256 0ustar00zuulzuul00000000000000.. _amd-sev: AMD SEV (Secure Encrypted Virtualization) ========================================= .. versionadded:: 20.0.0 (Train) `Secure Encrypted Virtualization (SEV)`__ is a technology from AMD which enables the memory for a VM to be encrypted with a key unique to the VM. SEV is particularly applicable to cloud computing since it can reduce the amount of trust VMs need to place in the hypervisor and administrator of their host system. .. __: https://developer.amd.com/sev/ .. _deploying-sev-capable-infrastructure: Enabling SEV ------------ First the operator will need to ensure the following prerequisites are met: - Currently SEV is only supported when using the libvirt compute driver with a :oslo.config:option:`libvirt.virt_type` of ``kvm`` or ``qemu``. - At least one of the Nova compute hosts must be AMD hardware capable of supporting SEV. It is entirely possible for the compute plane to be a mix of hardware which can and cannot support SEV, although as per the section on `Permanent limitations`_ below, the maximum number of simultaneously running guests with SEV will be limited by the quantity and quality of SEV-capable hardware available. In order for users to be able to use SEV, the operator will need to perform the following steps: - Ensure that sufficient memory is reserved on the SEV compute hosts for host-level services to function correctly at all times. This is particularly important when hosting SEV-enabled guests, since they pin pages in RAM, preventing any memory overcommit which may be in normal operation on other compute hosts. It is `recommended`__ to achieve this by configuring an ``rlimit`` at the ``/machine.slice`` top-level ``cgroup`` on the host, with all VMs placed inside that. (For extreme detail, see `this discussion on the spec`__.) __ http://specs.openstack.org/openstack/nova-specs/specs/train/approved/amd-sev-libvirt-support.html#memory-reservation-solutions __ https://review.opendev.org/#/c/641994/2/specs/train/approved/amd-sev-libvirt-support.rst@167 An alternative approach is to configure the :oslo.config:option:`reserved_host_memory_mb` option in the ``[DEFAULT]`` section of :file:`nova.conf`, based on the expected maximum number of SEV guests simultaneously running on the host, and the details provided in `an earlier version of the AMD SEV spec`__ regarding memory region sizes, which cover how to calculate it correctly. __ https://specs.openstack.org/openstack/nova-specs/specs/stein/approved/amd-sev-libvirt-support.html#proposed-change See `the Memory Locking and Accounting section of the AMD SEV spec`__ and `previous discussion for further details`__. __ http://specs.openstack.org/openstack/nova-specs/specs/train/approved/amd-sev-libvirt-support.html#memory-locking-and-accounting __ https://review.opendev.org/#/c/641994/2/specs/train/approved/amd-sev-libvirt-support.rst@167 - A cloud administrator will need to define one or more SEV-enabled flavors :ref:`as described below `, unless it is sufficient for users to define SEV-enabled images. Additionally the cloud operator should consider the following optional steps: .. _num_memory_encrypted_guests: - Configure the :oslo.config:option:`libvirt.num_memory_encrypted_guests` option in :file:`nova.conf` to represent the number of guests an SEV compute node can host concurrently with memory encrypted at the hardware level. For example: .. code-block:: ini [libvirt] num_memory_encrypted_guests = 15 This option exists because on AMD SEV-capable hardware, the memory controller has a fixed number of slots for holding encryption keys, one per guest. For example, at the time of writing, earlier generations of hardware only have 15 slots, thereby limiting the number of SEV guests which can be run concurrently to 15. Nova needs to track how many slots are available and used in order to avoid attempting to exceed that limit in the hardware. Since version 8.0.0, libvirt exposes maximum number of SEV guests which can run concurrently in its host, so the limit is automatically detected using this feature. So it is not necessary to configure this option. However in case an older version of libvirt is used, it is not possible for Nova to programmatically detect the correct value and Nova imposes no limit. So this configuration option serves as a stop-gap, allowing the cloud operator the option of providing this value manually. This option has been deprecated and will be removed in a future release. .. note:: If libvirt older than 8.0.0 is used, operators should carefully weigh the benefits vs. the risk when deciding whether to use the default of ``None`` or manually impose a limit. The benefits of using the default are a) immediate convenience since nothing needs to be done now, and b) convenience later when upgrading compute hosts to future versions of libvirt, since again nothing will need to be done for the correct limit to be automatically imposed. However the risk is that until auto-detection is implemented, users may be able to attempt to launch guests with encrypted memory on hosts which have already reached the maximum number of guests simultaneously running with encrypted memory. This risk may be mitigated by other limitations which operators can impose, for example if the smallest RAM footprint of any flavor imposes a maximum number of simultaneously running guests which is less than or equal to the SEV limit. - Configure :oslo.config:option:`ram_allocation_ratio` on all SEV-capable compute hosts to ``1.0``. Use of SEV requires locking guest memory, meaning it is not possible to overcommit host memory. Alternatively, you can explicitly configure small pages for instances using the :nova:extra-spec:`hw:mem_page_size` flavor extra spec and equivalent image metadata property. For more information, see :doc:`huge-pages`. - Configure :oslo.config:option:`libvirt.hw_machine_type` on all SEV-capable compute hosts to include ``x86_64=q35``, so that all x86_64 images use the ``q35`` machine type by default. (Currently Nova defaults to the ``pc`` machine type for the ``x86_64`` architecture, although `it is expected that this will change in the future`__.) Changing the default from ``pc`` to ``q35`` makes the creation and configuration of images by users more convenient by removing the need for the ``hw_machine_type`` property to be set to ``q35`` on every image for which SEV booting is desired. .. caution:: Consider carefully whether to set this option. It is particularly important since a limitation of the implementation prevents the user from receiving an error message with a helpful explanation if they try to boot an SEV guest when neither this configuration option nor the image property are set to select a ``q35`` machine type. On the other hand, setting it to ``q35`` may have other undesirable side-effects on other images which were expecting to be booted with ``pc``, so it is suggested to set it on a single compute node or aggregate, and perform careful testing of typical images before rolling out the setting to all SEV-capable compute hosts. __ https://bugs.launchpad.net/nova/+bug/1780138 .. _extra-specs-memory-encryption: Configuring a flavor or image ----------------------------- Once an operator has covered the above steps, users can launch SEV instances either by requesting a flavor for which the operator set the :nova:extra-spec:`hw:mem_encryption` extra spec to ``True``, or by using an image with the ``hw_mem_encryption`` property set to ``True``. For example, to enable SEV for a flavor: .. code-block:: console $ openstack flavor set FLAVOR-NAME \ --property hw:mem_encryption=true It is also possible to use SEV-ES, instead of SEV, by setting the :nova:extra-spec:`hw:mem_encryption_model` extra spec to ``amd-sev-es``, or by using an image with the ``hw_mem_encryption_model`` property set to ``amd-sev-es``. In case the extra spec and the property are unset or set to ``amd-sev`` then SEV is used. In all cases, SEV instances can only be booted from images which have the ``hw_firmware_type`` property set to ``uefi``, and only when the machine type is set to ``q35``. This can be set per image by setting the image property ``hw_machine_type=q35``, or per compute node by the operator via :oslo.config:option:`libvirt.hw_machine_type` as explained above. Limitations ----------- Impermanent limitations ~~~~~~~~~~~~~~~~~~~~~~~ The following limitations may be removed in the future as the hardware, firmware, and various layers of software receive new features: - SEV-encrypted VMs cannot yet be live-migrated or suspended, therefore they will need to be fully shut down before migrating off an SEV host, e.g. if maintenance is required on the host. - SEV-encrypted VMs cannot contain directly accessible host devices (PCI passthrough). So for example mdev vGPU support will not currently work. However technologies based on `vhost-user`__ should work fine. __ https://wiki.qemu.org/Features/VirtioVhostUser - The boot disk of SEV-encrypted VMs can only be ``virtio``. (``virtio-blk`` is typically the default for libvirt disks on x86, but can also be explicitly set e.g. via the image property ``hw_disk_bus=virtio``). Valid alternatives for the disk include using ``hw_disk_bus=scsi`` with ``hw_scsi_model=virtio-scsi`` , or ``hw_disk_bus=sata``. Permanent limitations ~~~~~~~~~~~~~~~~~~~~~ The following limitations are expected long-term: - The number of SEV guests allowed to run concurrently will always be limited. `On the first generation of EPYC machines it will be limited to 15 guests`__; however this limit becomes much higher with the second generation (Rome). __ https://www.redhat.com/archives/libvir-list/2019-January/msg00652.html - The operating system running in an encrypted virtual machine must contain SEV support. Non-limitations ~~~~~~~~~~~~~~~ For the sake of eliminating any doubt, the following actions are *not* expected to be limited when SEV encryption is used: - Cold migration or shelve, since they power off the VM before the operation at which point there is no encrypted memory (although this could change since there is work underway to add support for `PMEM `_) - Snapshot, since it only snapshots the disk - ``nova evacuate`` (despite the name, more akin to resurrection than evacuation), since this is only initiated when the VM is no longer running - Attaching any volumes, as long as they do not require attaching via an IDE bus - Use of spice / VNC / serial / RDP consoles - :doc:`VM guest virtual NUMA ` References ---------- - `libvirt driver launching AMD SEV-encrypted instances (spec)`__ .. __: http://specs.openstack.org/openstack/nova-specs/specs/train/approved/amd-sev-libvirt-support.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/soft-delete-shadow-tables.rst0000664000175000017500000000557400000000000023432 0ustar00zuulzuul00000000000000============================= Soft Delete and Shadow Tables ============================= Nova has two unrelated features which are called ``soft delete``: Soft delete instances that can be restored ------------------------------------------ After an instance delete request, the actual delete is delayed by a configurable amount of time (config option :oslo.config:option:`reclaim_instance_interval`). During the delay, the instance is marked to be in state ``SOFT_DELETED`` and can be restored (:command:`openstack server restore`) by an admin in order to gracefully handle human mistakes. If the instance is not restored during the configured delay, a periodic job actually deletes the instance. This feature is optional and by default off. See also: - "Delete, Restore" in `API Guide: Server Concepts `_ - config reference: :oslo.config:option:`reclaim_instance_interval` Soft delete database rows to shadow tables ------------------------------------------ At an actual instance delete, no DB record is deleted. Instead the records are marked as deleted (for example ``instances.deleted`` in Nova cell databases). This preserves historic information for debugging and audit uses. But it also leads to accumulation of data in Nova cell DB tables, which may have an effect on Nova DB performance as documented in `DB prune deleted rows `_. The records marked as deleted can be cleaned up in multiple stages. First you can move them to so-called shadow tables (tables with prefix ``shadow_`` in Nova cell databases). This is called *archiving the deleted rows*. Nova does not query shadow tables, therefore data moved to the shadow tables no longer affect DB performance. However storage space is still consumed. Then you can actually delete the information from the shadow tables. This is called *DB purge*. These operations can be performed by nova-manage: - https://docs.openstack.org/nova/latest/cli/nova-manage.html#db-archive-deleted-rows - https://docs.openstack.org/nova/latest/cli/nova-manage.html#db-purge This feature is not optional. Every long-running deployment should regularly archive and purge the deleted rows. For example via a cron job to regularly call :program:`nova-manage db archive_deleted_rows` and :program:`nova-manage db purge`. The tradeoffs between data retention, DB performance and storage needs should be considered. In the Mitaka release there was an agreement between Nova developers that it's not desirable to provide shadow tables for every table in the Nova database, `documented in a spec `_. Therefore not all information about an instance is preserved in the shadow tables. Since then new shadow tables are not introduced. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/ssh-configuration.rst0000664000175000017500000000440200000000000022113 0ustar00zuulzuul00000000000000.. _cli-os-migrate-cfg-ssh: =================================== Configure SSH between compute nodes =================================== .. todo:: Consider merging this into a larger "migration" document or to the installation guide If you are resizing or migrating an instance between hypervisors, you might encounter an SSH (Permission denied) error. Ensure that each node is configured with SSH key authentication so that the Compute service can use SSH to move disks to other nodes. .. note:: It is not necessary that all the compute nodes share the same key pair. However for the ease of the configuration, this document only utilizes a single key pair for communication between compute nodes. To share a key pair between compute nodes, complete the following steps: #. On the first node, obtain a key pair (public key and private key). Use the root key that is in the ``/root/.ssh/id_rsa`` and ``/root/.ssh/id_rsa.pub`` directories or generate a new key pair. #. Run :command:`setenforce 0` to put SELinux into permissive mode. #. Enable login abilities for the nova user: .. code-block:: console # usermod -s /bin/bash nova Ensure you can switch to the nova account: .. code-block:: console # su - nova #. As root, create the folder that is needed by SSH and place the private key that you obtained in step 1 into this folder, and add the pub key to the authorized_keys file: .. code-block:: console mkdir -p /var/lib/nova/.ssh cp /var/lib/nova/.ssh/id_rsa echo 'StrictHostKeyChecking no' >> /var/lib/nova/.ssh/config chmod 600 /var/lib/nova/.ssh/id_rsa /var/lib/nova/.ssh/authorized_keys echo >> /var/lib/nova/.ssh/authorized_keys #. Copy the whole folder created in step 4 to the rest of the nodes: .. code-block:: console # scp -r /var/lib/nova/.ssh remote-host:/var/lib/nova/ #. Ensure that the nova user can now log in to each node without using a password: .. code-block:: console # su - nova $ ssh *computeNodeAddress* $ exit #. As root on each node, restart both libvirt and the Compute services: .. code-block:: console # systemctl restart libvirtd.service # systemctl restart openstack-nova-compute.service ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/support-compute.rst0000664000175000017500000004026600000000000021647 0ustar00zuulzuul00000000000000==================== Troubleshoot Compute ==================== Common problems for Compute typically involve misconfigured networking or credentials that are not sourced properly in the environment. Also, most flat networking configurations do not enable :command:`ping` or :command:`ssh` from a compute node to the instances that run on that node. Another common problem is trying to run 32-bit images on a 64-bit compute node. This section shows you how to troubleshoot Compute. .. todo:: Move the sections below into sub-pages for readability. .. toctree:: :maxdepth: 1 troubleshooting/orphaned-allocations.rst troubleshooting/rebuild-placement-db.rst troubleshooting/affinity-policy-violated.rst Compute service logging ----------------------- Compute stores a log file for each service in ``/var/log/nova``. For example, ``nova-compute.log`` is the log for the ``nova-compute`` service. You can set the following options to format log strings for the ``nova.log`` module in the ``nova.conf`` file: * ``logging_context_format_string`` * ``logging_default_format_string`` If the log level is set to ``debug``, you can also specify ``logging_debug_format_suffix`` to append extra formatting. For information about what variables are available for the formatter, see `Formatter Objects `_. You have two logging options for OpenStack Compute based on configuration settings. In ``nova.conf``, include the ``logfile`` option to enable logging. Alternatively you can set ``use_syslog = 1`` so that the nova daemon logs to syslog. Guru Meditation reports ----------------------- A Guru Meditation report is sent by the Compute service upon receipt of the ``SIGUSR2`` signal (``SIGUSR1`` before Mitaka). This report is a general-purpose error report that includes details about the current state of the service. The error report is sent to ``stderr``. For example, if you redirect error output to ``nova-compute-err.log`` using :command:`nova-compute 2>/var/log/nova/nova-compute-err.log`, resulting in the process ID 8675, you can then run: .. code-block:: console # kill -USR2 8675 This command triggers the Guru Meditation report to be printed to ``/var/log/nova/nova-compute-err.log``. For WSGI based services like ``nova-api-wsgi`` and ``nova-metadata-wsgi`` using signals is not trivial due to the web server's own signal handling. An alternative to the signal-based approach that works equally well for freestanding and hosted entry points is to use a file-based trigger. Configure the service to trigger the GMR by the modification time changes of a file or directory. .. code-block:: [oslo_reports] file_event_handler=/var/lib/nova Then the report can be triggered by touching the file or directory. The GMRb will be emitted in the same place where the service logs normally. .. code-block:: console touch /var/lib/nova Note that some web servers freeze the request handler processes when there is no HTTP request to be handled. This prevent the file system monitoring loop to detect the change. So after touching the file make a HTTP request to the given WSGI application. .. code-block:: console openstack compute service list For more details and a complete working example you can check `openstack-must-gather`_. .. _openstack-must-gather: https://github.com/openstack-k8s-operators/openstack-must-gather/pull/34 The report has the following sections: * Package: Displays information about the package to which the process belongs, including version information. * Threads: Displays stack traces and thread IDs for each of the threads within the process. * Green Threads: Displays stack traces for each of the green threads within the process (green threads do not have thread IDs). * Configuration: Lists all configuration options currently accessible through the CONF object for the current process. For more information, see :doc:`/reference/gmr`. .. _compute-common-errors-and-fixes: Common errors and fixes for Compute ----------------------------------- The `ask.openstack.org `_ site offers a place to ask and answer questions, and you can also mark questions as frequently asked questions. This section describes some errors people have posted previously. Bugs are constantly being fixed, so online resources are a great way to get the most up-to-date errors and fixes. Credential errors, 401, and 403 forbidden errors ------------------------------------------------ Problem ~~~~~~~ Missing credentials cause a ``403 forbidden`` error. Solution ~~~~~~~~ To resolve this issue, use one of these methods: #. Manual method Gets the ``novarc`` file from the project ZIP file, saves existing credentials in case of override, and manually sources the ``novarc`` file. #. Script method Generates ``novarc`` from the project ZIP file and sources it for you. When you run ``nova-api`` the first time, it generates the certificate authority information, including ``openssl.cnf``. If you start the CA services before this, you might not be able to create your ZIP file. Restart the services. When your CA information is available, create your ZIP file. Also, check your HTTP proxy settings to see whether they cause problems with ``novarc`` creation. Live migration permission issues -------------------------------- Problem ~~~~~~~ When live migrating an instance, you may see errors like the below: .. code-block:: shell libvirtError: operation failed: Failed to connect to remote libvirt URI qemu+ssh://stack@cld6b16/system: Cannot recv data: Host key verification failed.: Connection reset by peer Solution ~~~~~~~~ Ensure you have completed all the steps outlined in :doc:`/admin/ssh-configuration`. In particular, it's important to note that the ``libvirt`` process runs as ``root`` even though it may be connecting to a different user (``stack`` in the above example). You can ensure everything is correctly configured by attempting to connect to the remote host via the ``root`` user. Using the above example once again: .. code-block:: shell $ su - -c 'ssh stack@cld6b16' Instance errors --------------- Problem ~~~~~~~ Sometimes a particular instance shows ``pending`` or you cannot SSH to it. Sometimes the image itself is the problem. For example, when you use flat manager networking, you do not have a DHCP server and certain images do not support interface injection; you cannot connect to them. Solution ~~~~~~~~ To fix instance errors use an image that does support this method, such as Ubuntu, which obtains an IP address correctly with FlatManager network settings. To troubleshoot other possible problems with an instance, such as an instance that stays in a spawning state, check the directory for the particular instance under ``/var/lib/nova/instances`` on the ``nova-compute`` host and make sure that these files are present: * ``libvirt.xml`` * ``disk`` * ``disk-raw`` * ``kernel`` * ``ramdisk`` * ``console.log``, after the instance starts. If any files are missing, empty, or very small, the ``nova-compute`` service did not successfully download the images from the Image service. Also check ``nova-compute.log`` for exceptions. Sometimes they do not appear in the console output. Next, check the log file for the instance in the ``/var/log/libvirt/qemu`` directory to see if it exists and has any useful error messages in it. Finally, from the ``/var/lib/nova/instances`` directory for the instance, see if this command returns an error: .. code-block:: console # virsh create libvirt.xml Empty log output for Linux instances ------------------------------------ Problem ~~~~~~~ You can view the log output of running instances from either the :guilabel:`Log` tab of the dashboard or the output of :command:`nova console-log`. In some cases, the log output of a running Linux instance will be empty or only display a single character (for example, the ``?`` character). This occurs when the Compute service attempts to retrieve the log output of the instance via a serial console while the instance itself is not configured to send output to the console. Solution ~~~~~~~~ To rectify this, append the following parameters to kernel arguments specified in the instance's boot loader: .. code-block:: ini console=tty0 console=ttyS0,115200n8 Upon rebooting, the instance will be configured to send output to the Compute service. Reset the state of an instance ------------------------------ Problem ~~~~~~~ Instances can remain in an intermediate state, such as ``deleting``. Solution ~~~~~~~~ You can use the :command:`nova reset-state` command to manually reset the state of an instance to an error state. You can then delete the instance. For example: .. code-block:: console $ nova reset-state c6bbbf26-b40a-47e7-8d5c-eb17bf65c485 $ openstack server delete c6bbbf26-b40a-47e7-8d5c-eb17bf65c485 You can also use the ``--active`` parameter to force the instance back to an active state instead of an error state. For example: .. code-block:: console $ nova reset-state --active c6bbbf26-b40a-47e7-8d5c-eb17bf65c485 Injection problems ------------------ Problem ~~~~~~~ Instances may boot slowly, or do not boot. File injection can cause this problem. Solution ~~~~~~~~ To disable injection in libvirt, set the following in ``nova.conf``: .. code-block:: ini [libvirt] inject_partition = -2 .. note:: If you have not enabled the config drive and you want to make user-specified files available from the metadata server for to improve performance and avoid boot failure if injection fails, you must disable injection. Cannot find suitable emulator for x86_64 ---------------------------------------- Problem ~~~~~~~ When you attempt to create a VM, the error shows the VM is in the ``BUILD`` then ``ERROR`` state. Solution ~~~~~~~~ On the KVM host, run :command:`cat /proc/cpuinfo`. Make sure the ``vmx`` or ``svm`` flags are set. Follow the instructions in the :ref:`enable-kvm` section in the Nova Configuration Reference to enable hardware virtualization support in your BIOS. Failed to attach volume after detaching --------------------------------------- Problem ~~~~~~~ Failed to attach a volume after detaching the same volume. Solution ~~~~~~~~ You must change the device name on the :command:`nova-attach` command. The VM might not clean up after a :command:`nova-detach` command runs. This example shows how the :command:`nova-attach` command fails when you use the ``vdb``, ``vdc``, or ``vdd`` device names: .. code-block:: console # ls -al /dev/disk/by-path/ total 0 drwxr-xr-x 2 root root 200 2012-08-29 17:33 . drwxr-xr-x 5 root root 100 2012-08-29 17:33 .. lrwxrwxrwx 1 root root 9 2012-08-29 17:33 pci-0000:00:04.0-virtio-pci-virtio0 -> ../../vda lrwxrwxrwx 1 root root 10 2012-08-29 17:33 pci-0000:00:04.0-virtio-pci-virtio0-part1 -> ../../vda1 lrwxrwxrwx 1 root root 10 2012-08-29 17:33 pci-0000:00:04.0-virtio-pci-virtio0-part2 -> ../../vda2 lrwxrwxrwx 1 root root 10 2012-08-29 17:33 pci-0000:00:04.0-virtio-pci-virtio0-part5 -> ../../vda5 lrwxrwxrwx 1 root root 9 2012-08-29 17:33 pci-0000:00:06.0-virtio-pci-virtio2 -> ../../vdb lrwxrwxrwx 1 root root 9 2012-08-29 17:33 pci-0000:00:08.0-virtio-pci-virtio3 -> ../../vdc lrwxrwxrwx 1 root root 9 2012-08-29 17:33 pci-0000:00:09.0-virtio-pci-virtio4 -> ../../vdd lrwxrwxrwx 1 root root 10 2012-08-29 17:33 pci-0000:00:09.0-virtio-pci-virtio4-part1 -> ../../vdd1 You might also have this problem after attaching and detaching the same volume from the same VM with the same mount point multiple times. In this case, restart the KVM host. Failed to attach volume, systool is not installed ------------------------------------------------- Problem ~~~~~~~ This warning and error occurs if you do not have the required ``sysfsutils`` package installed on the compute node: .. code-block:: console WARNING nova.virt.libvirt.utils [req-1200f887-c82b-4e7c-a891-fac2e3735dbb\ admin admin|req-1200f887-c82b-4e7c-a891-fac2e3735dbb admin admin] systool\ is not installed ERROR nova.compute.manager [req-1200f887-c82b-4e7c-a891-fac2e3735dbb admin\ admin|req-1200f887-c82b-4e7c-a891-fac2e3735dbb admin admin] [instance: df834b5a-8c3f-477a-be9b-47c97626555c|instance: df834b5a-8c3f-47\ 7a-be9b-47c97626555c] Failed to attach volume 13d5c633-903a-4764-a5a0-3336945b1db1 at /dev/vdk. Solution ~~~~~~~~ Install the ``sysfsutils`` package on the compute node. For example: .. code-block:: console # apt-get install sysfsutils Failed to connect volume in FC SAN ---------------------------------- Problem ~~~~~~~ The compute node failed to connect to a volume in a Fibre Channel (FC) SAN configuration. The WWN may not be zoned correctly in your FC SAN that links the compute host to the storage array: .. code-block:: console ERROR nova.compute.manager [req-2ddd5297-e405-44ab-aed3-152cd2cfb8c2 admin\ demo|req-2ddd5297-e405-44ab-aed3-152cd2cfb8c2 admin demo] [instance: 60ebd\ 6c7-c1e3-4bf0-8ef0-f07aa4c3d5f3|instance: 60ebd6c7-c1e3-4bf0-8ef0-f07aa4c3\ d5f3] Failed to connect to volume 6f6a6a9c-dfcf-4c8d-b1a8-4445ff883200 while\ attaching at /dev/vdjTRACE nova.compute.manager [instance: 60ebd6c7-c1e3-4\ bf0-8ef0-f07aa4c3d5f3|instance: 60ebd6c7-c1e3-4bf0-8ef0-f07aa4c3d5f3] Traceback (most recent call last):...f07aa4c3d5f3\] ClientException: The\ server has either erred or is incapable of performing the requested\ operation.(HTTP 500)(Request-ID: req-71e5132b-21aa-46ee-b3cc-19b5b4ab2f00) Solution ~~~~~~~~ The network administrator must configure the FC SAN fabric by correctly zoning the WWN (port names) from your compute node HBAs. Multipath call failed exit -------------------------- Problem ~~~~~~~ Multipath call failed exit. This warning occurs in the Compute log if you do not have the optional ``multipath-tools`` package installed on the compute node. This is an optional package and the volume attachment does work without the multipath tools installed. If the ``multipath-tools`` package is installed on the compute node, it is used to perform the volume attachment. The IDs in your message are unique to your system. .. code-block:: console WARNING nova.storage.linuxscsi [req-cac861e3-8b29-4143-8f1b-705d0084e571 \ admin admin|req-cac861e3-8b29-4143-8f1b-705d0084e571 admin admin] \ Multipath call failed exit (96) Solution ~~~~~~~~ Install the ``multipath-tools`` package on the compute node. For example: .. code-block:: console # apt-get install multipath-tools Failed to Attach Volume, Missing sg_scan ---------------------------------------- Problem ~~~~~~~ Failed to attach volume to an instance, ``sg_scan`` file not found. This error occurs when the sg3-utils package is not installed on the compute node. The IDs in your message are unique to your system: .. code-block:: console ERROR nova.compute.manager [req-cf2679fd-dd9e-4909-807f-48fe9bda3642 admin admin|req-cf2679fd-dd9e-4909-807f-48fe9bda3642 admin admin] [instance: 7d7c92e0-49fa-4a8e-87c7-73f22a9585d5|instance: 7d7c92e0-49fa-4a8e-87c7-73f22a9585d5] Failed to attach volume 4cc104c4-ac92-4bd6-9b95-c6686746414a at /dev/vdcTRACE nova.compute.manager [instance: 7d7c92e0-49fa-4a8e-87c7-73f22a9585d5|instance: 7d7c92e0-49fa-4a8e-87c7-73f22a9585d5] Stdout: '/usr/local/bin/nova-rootwrap: Executable not found: /usr/bin/sg_scan' Solution ~~~~~~~~ Install the ``sg3-utils`` package on the compute node. For example: .. code-block:: console # apt-get install sg3-utils Requested microversions are ignored ----------------------------------- Problem ~~~~~~~ When making a request with a microversion beyond 2.1, for example: .. code-block:: console $ openstack --os-compute-api-version 2.15 server group create \ --policy soft-anti-affinity my-soft-anti-group It fails saying that "soft-anti-affinity" is not a valid policy, even thought it is allowed with the `2.15 microversion`_. .. _2.15 microversion: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id13 Solution ~~~~~~~~ Ensure the ``compute`` endpoint in the identity service catalog is pointing at ``/v2.1`` instead of ``/v2``. The former route supports microversions, while the latter route is considered the legacy v2.0 compatibility-mode route which renders all requests as if they were made on the legacy v2.0 API. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.2456076 nova-32.0.0/doc/source/admin/troubleshooting/0000775000175000017500000000000000000000000021146 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/troubleshooting/affinity-policy-violated.rst0000664000175000017500000000615300000000000026620 0ustar00zuulzuul00000000000000Affinity policy violated with parallel requests =============================================== Problem ------- Parallel server create requests for affinity or anti-affinity land on the same host and servers go to the ``ACTIVE`` state even though the affinity or anti-affinity policy was violated. Solution -------- There are two ways to avoid anti-/affinity policy violations among multiple server create requests. Create multiple servers as a single request ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Use the `multi-create API`_ with the ``min_count`` parameter set or the `multi-create CLI`_ with the ``--min`` option set to the desired number of servers. This works because when the batch of requests is visible to ``nova-scheduler`` at the same time as a group, it will be able to choose compute hosts that satisfy the anti-/affinity constraint and will send them to the same hosts or different hosts accordingly. .. _multi-create API: https://docs.openstack.org/api-ref/compute/#create-multiple-servers .. _multi-create CLI: https://docs.openstack.org/python-openstackclient/latest/cli/command-objects/server.html#server-create Adjust Nova configuration settings ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When requests are made separately and the scheduler cannot consider the batch of requests at the same time as a group, anti-/affinity races are handled by what is called the "late affinity check" in ``nova-compute``. Once a server lands on a compute host, if the request involves a server group, ``nova-compute`` contacts the API database (via ``nova-conductor``) to retrieve the server group and then it checks whether the affinity policy has been violated. If the policy has been violated, ``nova-compute`` initiates a reschedule of the server create request. Note that this means the deployment must have :oslo.config:option:`scheduler.max_attempts` set greater than ``1`` (default is ``3``) to handle races. An ideal configuration for multiple cells will minimize :ref:`upcalls ` from the cells to the API database. This is how devstack, for example, is configured in the CI gate. The cell conductors do not set :oslo.config:option:`api_database.connection` and ``nova-compute`` sets :oslo.config:option:`workarounds.disable_group_policy_check_upcall` to ``True``. However, if a deployment needs to handle racing affinity requests, it needs to configure cell conductors to have access to the API database, for example: .. code-block:: ini [api_database] connection = mysql+pymysql://root:a@127.0.0.1/nova_api?charset=utf8 The deployment also needs to configure ``nova-compute`` services not to disable the group policy check upcall by either not setting (use the default) :oslo.config:option:`workarounds.disable_group_policy_check_upcall` or setting it to ``False``, for example: .. code-block:: ini [workarounds] disable_group_policy_check_upcall = False With these settings, anti-/affinity policy should not be violated even when parallel server create requests are racing. Future work is needed to add anti-/affinity support to the placement service in order to eliminate the need for the late affinity check in ``nova-compute``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/troubleshooting/orphaned-allocations.rst0000664000175000017500000003553400000000000026020 0ustar00zuulzuul00000000000000Orphaned resource allocations ============================= Problem ------- There are orphaned resource allocations in the placement service which can cause resource providers to: * Appear to the scheduler to be more utilized than they really are * Prevent deletion of compute services One scenario in which this could happen is a compute service host is having problems so the administrator forces it down and evacuates servers from it. Note that in this case "evacuates" refers to the server ``evacuate`` action, not live migrating all servers from the running compute service. Assume the compute host is down and fenced. In this case, the servers have allocations tracked in placement against both the down source compute node and their current destination compute host. For example, here is a server *vm1* which has been evacuated from node *devstack1* to node *devstack2*: .. code-block:: console $ openstack --os-compute-api-version 2.53 compute service list --service nova-compute +--------------------------------------+--------------+-----------+------+---------+-------+----------------------------+ | ID | Binary | Host | Zone | Status | State | Updated At | +--------------------------------------+--------------+-----------+------+---------+-------+----------------------------+ | e3c18c2d-9488-4863-b728-f3f292ec5da8 | nova-compute | devstack1 | nova | enabled | down | 2019-10-25T20:13:51.000000 | | 50a20add-cc49-46bd-af96-9bb4e9247398 | nova-compute | devstack2 | nova | enabled | up | 2019-10-25T20:13:52.000000 | | b92afb2e-cd00-4074-803e-fff9aa379c2f | nova-compute | devstack3 | nova | enabled | up | 2019-10-25T20:13:53.000000 | +--------------------------------------+--------------+-----------+------+---------+-------+----------------------------+ $ vm1=$(openstack server show vm1 -f value -c id) $ openstack server show $vm1 -f value -c OS-EXT-SRV-ATTR:host devstack2 The server now has allocations against both *devstack1* and *devstack2* resource providers in the placement service: .. code-block:: console $ devstack1=$(openstack resource provider list --name devstack1 -f value -c uuid) $ devstack2=$(openstack resource provider list --name devstack2 -f value -c uuid) $ openstack resource provider show --allocations $devstack1 +-------------+-----------------------------------------------------------------------------------------------------------+ | Field | Value | +-------------+-----------------------------------------------------------------------------------------------------------+ | uuid | 9546fce4-9fb5-4b35-b277-72ff125ad787 | | name | devstack1 | | generation | 6 | | allocations | {u'a1e6e0b2-9028-4166-b79b-c177ff70fbb7': {u'resources': {u'VCPU': 1, u'MEMORY_MB': 512, u'DISK_GB': 1}}} | +-------------+-----------------------------------------------------------------------------------------------------------+ $ openstack resource provider show --allocations $devstack2 +-------------+-----------------------------------------------------------------------------------------------------------+ | Field | Value | +-------------+-----------------------------------------------------------------------------------------------------------+ | uuid | 52d0182d-d466-4210-8f0d-29466bb54feb | | name | devstack2 | | generation | 3 | | allocations | {u'a1e6e0b2-9028-4166-b79b-c177ff70fbb7': {u'resources': {u'VCPU': 1, u'MEMORY_MB': 512, u'DISK_GB': 1}}} | +-------------+-----------------------------------------------------------------------------------------------------------+ $ openstack --os-placement-api-version 1.12 resource provider allocation show $vm1 +--------------------------------------+------------+------------------------------------------------+----------------------------------+----------------------------------+ | resource_provider | generation | resources | project_id | user_id | +--------------------------------------+------------+------------------------------------------------+----------------------------------+----------------------------------+ | 9546fce4-9fb5-4b35-b277-72ff125ad787 | 6 | {u'VCPU': 1, u'MEMORY_MB': 512, u'DISK_GB': 1} | 2f3bffc5db2b47deb40808a4ed2d7c7a | 2206168427c54d92ae2b2572bb0da9af | | 52d0182d-d466-4210-8f0d-29466bb54feb | 3 | {u'VCPU': 1, u'MEMORY_MB': 512, u'DISK_GB': 1} | 2f3bffc5db2b47deb40808a4ed2d7c7a | 2206168427c54d92ae2b2572bb0da9af | +--------------------------------------+------------+------------------------------------------------+----------------------------------+----------------------------------+ One way to find all servers that were evacuated from *devstack1* is: .. code-block:: console $ nova migration-list --source-compute devstack1 --migration-type evacuation +----+--------------------------------------+-------------+-----------+----------------+--------------+-------------+--------+--------------------------------------+------------+------------+----------------------------+----------------------------+------------+ | Id | UUID | Source Node | Dest Node | Source Compute | Dest Compute | Dest Host | Status | Instance UUID | Old Flavor | New Flavor | Created At | Updated At | Type | +----+--------------------------------------+-------------+-----------+----------------+--------------+-------------+--------+--------------------------------------+------------+------------+----------------------------+----------------------------+------------+ | 1 | 8a823ba3-e2e9-4f17-bac5-88ceea496b99 | devstack1 | devstack2 | devstack1 | devstack2 | 192.168.0.1 | done | a1e6e0b2-9028-4166-b79b-c177ff70fbb7 | None | None | 2019-10-25T17:46:35.000000 | 2019-10-25T17:46:37.000000 | evacuation | +----+--------------------------------------+-------------+-----------+----------------+--------------+-------------+--------+--------------------------------------+------------+------------+----------------------------+----------------------------+------------+ Trying to delete the resource provider for *devstack1* will fail while there are allocations against it: .. code-block:: console $ openstack resource provider delete $devstack1 Unable to delete resource provider 9546fce4-9fb5-4b35-b277-72ff125ad787: Resource provider has allocations. (HTTP 409) Solution -------- Using the example resources above, remove the allocation for server *vm1* from the *devstack1* resource provider. If you have `osc-placement `_ 1.8.0 or newer, you can use the :command:`openstack resource provider allocation unset` command to remove the allocations for consumer *vm1* from resource provider *devstack1*: .. code-block:: console $ openstack --os-placement-api-version 1.12 resource provider allocation \ unset --provider $devstack1 $vm1 +--------------------------------------+------------+------------------------------------------------+----------------------------------+----------------------------------+ | resource_provider | generation | resources | project_id | user_id | +--------------------------------------+------------+------------------------------------------------+----------------------------------+----------------------------------+ | 52d0182d-d466-4210-8f0d-29466bb54feb | 4 | {u'VCPU': 1, u'MEMORY_MB': 512, u'DISK_GB': 1} | 2f3bffc5db2b47deb40808a4ed2d7c7a | 2206168427c54d92ae2b2572bb0da9af | +--------------------------------------+------------+------------------------------------------------+----------------------------------+----------------------------------+ If you have *osc-placement* 1.7.x or older, the ``unset`` command is not available and you must instead overwrite the allocations. Note that we do not use :command:`openstack resource provider allocation delete` here because that will remove the allocations for the server from all resource providers, including *devstack2* where it is now running; instead, we use :command:`openstack resource provider allocation set` to overwrite the allocations and only retain the *devstack2* provider allocations. If you do remove all allocations for a given server, you can heal them later. See `Using heal_allocations`_ for details. .. code-block:: console $ openstack --os-placement-api-version 1.12 resource provider allocation set $vm1 \ --project-id 2f3bffc5db2b47deb40808a4ed2d7c7a \ --user-id 2206168427c54d92ae2b2572bb0da9af \ --allocation rp=52d0182d-d466-4210-8f0d-29466bb54feb,VCPU=1 \ --allocation rp=52d0182d-d466-4210-8f0d-29466bb54feb,MEMORY_MB=512 \ --allocation rp=52d0182d-d466-4210-8f0d-29466bb54feb,DISK_GB=1 +--------------------------------------+------------+------------------------------------------------+----------------------------------+----------------------------------+ | resource_provider | generation | resources | project_id | user_id | +--------------------------------------+------------+------------------------------------------------+----------------------------------+----------------------------------+ | 52d0182d-d466-4210-8f0d-29466bb54feb | 4 | {u'VCPU': 1, u'MEMORY_MB': 512, u'DISK_GB': 1} | 2f3bffc5db2b47deb40808a4ed2d7c7a | 2206168427c54d92ae2b2572bb0da9af | +--------------------------------------+------------+------------------------------------------------+----------------------------------+----------------------------------+ Once the *devstack1* resource provider allocations have been removed using either of the approaches above, the *devstack1* resource provider can be deleted: .. code-block:: console $ openstack resource provider delete $devstack1 And the related compute service if desired: .. code-block:: console $ openstack --os-compute-api-version 2.53 compute service delete e3c18c2d-9488-4863-b728-f3f292ec5da8 For more details on the resource provider commands used in this guide, refer to the `osc-placement plugin documentation`_. .. _osc-placement plugin documentation: https://docs.openstack.org/osc-placement/latest/ Using heal_allocations ~~~~~~~~~~~~~~~~~~~~~~ If you have a particularly troubling allocation consumer and just want to delete its allocations from all providers, you can use the :command:`openstack resource provider allocation delete` command and then heal the allocations for the consumer using the :ref:`heal_allocations command `. For example: .. code-block:: console $ openstack resource provider allocation delete $vm1 $ nova-manage placement heal_allocations --verbose --instance $vm1 Looking for instances in cell: 04879596-d893-401c-b2a6-3d3aa096089d(cell1) Found 1 candidate instances. Successfully created allocations for instance a1e6e0b2-9028-4166-b79b-c177ff70fbb7. Processed 1 instances. $ openstack resource provider allocation show $vm1 +--------------------------------------+------------+------------------------------------------------+ | resource_provider | generation | resources | +--------------------------------------+------------+------------------------------------------------+ | 52d0182d-d466-4210-8f0d-29466bb54feb | 5 | {u'VCPU': 1, u'MEMORY_MB': 512, u'DISK_GB': 1} | +--------------------------------------+------------+------------------------------------------------+ Note that deleting allocations and then relying on ``heal_allocations`` may not always be the best solution since healing allocations does not account for some things: * `Migration-based allocations`_ would be lost if manually deleted during a resize. These are allocations tracked by the migration resource record on the source compute service during a migration. * Healing allocations only partially support nested allocations. Nested allocations due to Neutron ports having QoS policies are supported since 20.0.0 (Train) release. But nested allocations due to vGPU or Cyborg device profile requests in the flavor are not supported. Also if you are using provider.yaml files on compute hosts to define additional resources, if those resources are defined on child resource providers then instances using such resources are not supported. If you do use the ``heal_allocations`` command to cleanup allocations for a specific trouble instance, it is recommended to take note of what the allocations were before you remove them in case you need to reset them manually later. Use the :command:`openstack resource provider allocation show` command to get allocations for a consumer before deleting them, e.g.: .. code-block:: console $ openstack --os-placement-api-version 1.12 resource provider allocation show $vm1 .. _Migration-based allocations: https://specs.openstack.org/openstack/nova-specs/specs/queens/implemented/migration-allocations.html Using placement audit ~~~~~~~~~~~~~~~~~~~~~ If you have a situation where orphaned allocations exist for an instance that was deleted in the past, example log message: .. code-block:: console Instance has allocations against this compute host but is not found in the database. you can use the :ref:`nova-manage placement audit ` tool to have it find and optionally delete orphaned placement allocations. This tool will call the placement API to modify allocations. To list all allocations that are unrelated to an existing instance or migration UUID: .. code-block:: console $ nova-manage placement audit --verbose To delete all allocations on all resource providers that are unrelated to an existing instance or migration UUID: .. code-block:: console $ nova-manage placement audit --verbose --delete To delete all allocations on a specific resource provider that are unrelated to an existing instance or migration UUID: .. code-block:: console $ nova-manage placement audit --verbose --delete --resource-provider ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/troubleshooting/rebuild-placement-db.rst0000664000175000017500000000477500000000000025674 0ustar00zuulzuul00000000000000Rebuild placement DB ==================== Problem ------- You have somehow changed a nova cell database and the ``compute_nodes`` table entries are now reporting different uuids to the placement service but placement already has ``resource_providers`` table entries with the same names as those computes so the resource providers in placement and the compute nodes in the nova database are not synchronized. Maybe this happens as a result of restoring the nova cell database from a backup where the compute hosts have not changed but they are using different uuids. Nova reports compute node inventory to placement using the ``hypervisor_hostname`` and uuid of the ``compute_nodes`` table to the placement ``resource_providers`` table, which has a unique constraint on the name (hostname in this case) and uuid. Trying to create a new resource provider with a new uuid but the same name as an existing provider results in a 409 error from placement, such as in `bug 1817833`_. .. _bug 1817833: https://bugs.launchpad.net/nova/+bug/1817833 Solution -------- .. warning:: This is likely a last resort when *all* computes and resource providers are not synchronized and it is simpler to just rebuild the placement database from the current state of nova. This may, however, not work when using placement for more advanced features such as :neutron-doc:`ports with minimum bandwidth guarantees ` or `accelerators `_. Obviously testing first in a pre-production environment is ideal. These are the steps at a high level: #. Make a backup of the existing placement database in case these steps fail and you need to start over. #. Recreate the placement database and run the schema migrations to initialize the placement database. #. Either restart or wait for the :oslo.config:option:`update_resources_interval` on the ``nova-compute`` services to report resource providers and their inventory to placement. #. Run the :ref:`nova-manage placement heal_allocations ` command to report allocations to placement for the existing instances in nova. #. Run the :ref:`nova-manage placement sync_aggregates ` command to synchronize nova host aggregates to placement resource provider aggregates. Once complete, test your deployment as usual, e.g. running Tempest integration and/or Rally tests, creating, migrating and deleting a server, etc. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/uefi.rst0000664000175000017500000000271300000000000017404 0ustar00zuulzuul00000000000000==== UEFI ==== .. versionadded:: 17.0.0 (Queens) Nova supports configuring a `UEFI bootloader`__ for guests. This brings about important advantages over legacy BIOS bootloaders and allows for features such as :doc:`secure-boot`. .. __: https://en.wikipedia.org/wiki/Unified_Extensible_Firmware_Interface Enabling UEFI ------------- Currently the configuration of UEFI guest bootloaders is only supported when using the libvirt compute driver with a :oslo.config:option:`libvirt.virt_type` of ``kvm`` or ``qemu``. When using the libvirt compute driver with AArch64-based guests, UEFI is automatically enabled as AArch64 does not support BIOS. .. todo:: Update this once compute drivers start reporting a trait indicating UEFI bootloader support. Configuring a flavor or image ----------------------------- Configuring a UEFI bootloader varies depending on the compute driver in use. .. rubric:: Libvirt UEFI support is enabled by default on AArch64-based guests. For other guest architectures, you can request UEFI support with libvirt by setting the ``hw_firmware_type`` image property to ``uefi``. For example: .. code-block:: bash $ openstack image set --property hw_firmware_type=uefi $IMAGE References ---------- * `Open Virtual Machine Firmware (OVMF) Status Report`__ * `Anatomy of a boot, a QEMU perspective`__ .. __: http://www.linux-kvm.org/downloads/lersek/ovmf-whitepaper-c770f8c.txt .. __: https://www.qemu.org/2020/07/03/anatomy-of-a-boot/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/unified-limits.rst0000664000175000017500000003331400000000000021377 0ustar00zuulzuul00000000000000============================ Manage Unified Limits Quotas ============================ .. note:: This section provides deployment information about the quota feature. For end-user information about quotas, including information about the type of quotas available, refer to the :doc:`user guide `. Since the **Nova 28.0.0 (2023.2 Bobcat)** release, it is recommended to use `Keystone unified limits`_ for Nova quota limits. For information about legacy quota limits, see the :doc:`legacy quota documentation `. Quotas ------ To prevent system capacities from being exhausted without notification, you can set up quotas. Quotas are operational limits. The number of servers allowed for each project can be controlled so that cloud resources are optimized, for example. Quotas can be enforced at both the global (default) level and at the project level. Unified limits -------------- Unified limits is a modern quota system in which quota limits are centralized in the Keystone identity service. There are three steps for quota enforcement in this model: #. Quota limits are retrieved by calling the `Keystone unified limits API`_ #. Quota usage is counted from the `Placement API service`_ #. Quota is enforced locally using the `oslo.limit`_ limit enforcement library In unified limits, the terminology is a bit different from legacy quotas: * A **registered limit** is a global or default limit that applies to all projects * A **limit** is a project-scoped limit that applies to a particular project Cloud operators will need to manage their quota limits in the Keystone service by calling the API directly or by using the OpenStackClient (OSC) `registered limit`_ and `limit`_ commands. .. _Keystone unified limits: https://docs.openstack.org/keystone/latest/admin/unified-limits.html .. _Keystone unified limits API: https://docs.openstack.org/api-ref/identity/v3/index.html#unified-limits .. _Placement API service: https://docs.openstack.org/placement .. _oslo.limit: https://docs.openstack.org/oslo.limit .. _registered limit: https://docs.openstack.org/python-openstackclient/latest/cli/command-objects/registered-limit.html .. _limit: https://docs.openstack.org/python-openstackclient/latest/cli/command-objects/limit.html Roles ~~~~~ By default Keystone API policy, a user must have the following roles and scopes in order to perform actions with unified limits. .. list-table:: :header-rows: 1 :widths: 10 20 20 * - Action - Role - Scope * - List registered limits - ``*`` - ``*`` * - Get registered limit - ``*`` - ``*`` * - Create registered limit - ``admin`` - ``system=all`` * - Update registered limit - ``admin`` - ``system=all`` * - Delete registered limit - ``admin`` - ``system=all`` * - List limits - ``*`` - ``*`` * - Get limit - ``*`` - ``*`` * - Create limit - ``admin`` - ``system=all`` * - Update limit - ``admin`` - ``system=all`` * - Delete limit - ``admin`` - ``system=all`` Configuration ------------- To enable unified limits quotas, some Nova configuration of the :program:`nova-api-wsgi` and :program:`nova-conductor` services is necessary. Set the quota driver to the ``nova.quota.UnifiedLimitsDriver``: .. code-block:: ini [quota] driver = nova.quota.UnifiedLimitsDriver Add a configuration section for oslo.limit: .. code-block:: ini [oslo_limit] username = nova user_domain_name = $SERVICE_DOMAIN_NAME auth_url = $KEYSTONE_SERVICE_URI auth_type = password password = $SERVICE_PASSWORD system_scope = all endpoint_id = $SERVICE_ENDPOINT_ID .. note:: The Nova service endpoint ID can be obtained by ``openstack endpoint list --service nova -f value -c ID`` Ensure that the ``nova`` service user has the ``reader`` role with ``system`` scope: .. code-block:: console openstack role add --user nova --user-domain $SERVICE_DOMAIN_NAME \ --system all reader Setting quota limits on resources --------------------------------- Any resource that can be requested in the cloud must have a registered limit set. Quota checks on cloud resources that do not have registered limits will continue to fail until registered limits are set because oslo.limit considers an unregistered resource to have a limit of 0. Types of quota ~~~~~~~~~~~~~~ Unified limit resource names for resources that are tracked as `resource classes`_ in the Placement API service follow the naming pattern of the ``class:`` prefix followed by the name of the resource class. For example: class:VCPU, class:PCPU, class:MEMORY_MB, class:DISK_GB, class:VGPU. .. list-table:: :header-rows: 1 :widths: 10 40 * - Quota Name - Description * - class:VCPU - Number of shared CPU cores (VCPUs) allowed per project * - class:PCPU - Number of dedicated CPU cores (PCPUs) allowed per project * - servers - Number of instances allowed per project * - server_key_pairs - Number of key pairs allowed per user * - server_metadata_items - Number of metadata items allowed per instance * - class:MEMORY_MB - Megabytes of instance ram allowed per project * - server_groups - Number of server groups per project * - server_group_members - Number of servers per server group * - class:DISK_GB - Gigabytes of instance disk allowed per project * - class:$RESOURCE_CLASS - Any resource class in the Placement API service can have a quota limit specified for it (example: class:VGPU) .. _resource classes: https://docs.openstack.org/os-resource-classes/latest OpenStack CLI commands ~~~~~~~~~~~~~~~~~~~~~~ For full OpenStackClient documentation, see https://docs.openstack.org/python-openstackclient/latest/index.html. Registered Limits ^^^^^^^^^^^^^^^^^ To list default limits for Nova: .. code-block:: console openstack registered limit list --service nova To show details about a default limit: .. code-block:: console openstack registered limit show $REGISTERED_LIMIT_ID To create a default limit: .. code-block:: console openstack registered limit create --service nova --default-limit $LIMIT \ $RESOURCE To update a default limit: .. code-block:: console openstack registered limit set --default-limit $LIMIT $REGISTERED_LIMIT_ID To delete a default limit: .. code-block:: console openstack registered limit delete $REGISTERED_LIMIT_ID Limits ^^^^^^ To list project limits for Nova: .. code-block:: console openstack limit list --service nova To list limits for a particular project: .. code-block:: console openstack limit list --service nova --project $PROJECT_ID To show details about a project limit: .. code-block:: console openstack limit show $LIMIT_ID To create a project limit: .. code-block:: console openstack limit create --service nova --project $PROJECT_ID \ --resource-limit $LIMIT $RESOURCE To update a project limit: .. code-block:: console openstack limit set --resource-limit $LIMIT $LIMIT_ID To delete a project limit: .. code-block:: console openstack limit delete $LIMIT_ID Quota enforcement ----------------- When enforcing limits for a given resource and project, the following checks are made in order: #. Limits (project-specific) Depending on the resource, is there a project-specific limit on the resource in Keystone limits? If so, use that as the limit. If not, proceed to check the registered default limit. #. Registered limits (default) Depending on the resource, is there a default limit on the resource in Keystone limits? If so, use that as the limit. If not, oslo.limit will consider the limit as 0, the quota check will fail, and a quota limit exceeded exception will be raised. .. warning:: Every resource that can be requested in the cloud **must** at a minimum have a registered limit set. Any resource that does **not** have a registered limit set will fail quota enforcement because oslo.limit considers an unregistered resource to have a limit of **0**. Rechecking quota ~~~~~~~~~~~~~~~~ If :oslo.config:option:`quota.recheck_quota` = True (this is the default), Nova will perform a second quota check after allocating resources. The first quota check is performed before resources are allocated. Rechecking quota ensures that quota limits are strictly enforced and prevents any possibility of resource allocation going over the quota limit in the event of racing parallel API requests. It can be disabled by setting :oslo.config:option:`quota.recheck_quota` = False if strict quota enforcement is not important to the operator. Quota usage from Placement -------------------------- With unified limits quotas, it is required that quota resource usage is counted from the Placement API service. As such, the :oslo.config:option:`quota.count_usage_from_placement` configuration option is ignored when :oslo.config:option:`quota.driver` is set to ``nova.quota.UnifiedLimitsDriver``. There are some things to note when quota resource usage is counted from the Placement API service: * Counted usage will not be accurate in an environment where multiple Nova deployments are sharing a Placement deployment because currently Placement has no way of partitioning resource providers between different Nova deployments. Operators who are running multiple Nova deployments that share a Placement deployment should not use the ``nova.quota.UnifiedLimitsDriver``. * Behavior will be different for resizes. During a resize, resource allocations are held on both the source and destination (even on the same host, see https://bugs.launchpad.net/nova/+bug/1790204) until the resize is confirmed or reverted. Quota usage will be inflated for servers in this state. * The ``populate_queued_for_delete`` and ``populate_user_id`` online data migrations must be completed before usage can be counted from Placement. Until the data migration is complete, the system will fall back to legacy quota usage counting from cell databases depending on the result of an EXISTS database query during each quota check. Use ``nova-manage db online_data_migrations`` to run online data migrations. * Behavior will be different for unscheduled servers in ``ERROR`` state. A server in ``ERROR`` state that has never been scheduled to a compute host will not have Placement allocations, so it will not consume quota usage for cores and ram. * Behavior will be different for servers in ``SHELVED_OFFLOADED`` state. A server in ``SHELVED_OFFLOADED`` state will not have Placement allocations, so it will not consume quota usage for cores and ram. Note that because of this, it will be possible for a request to unshelve a server to be rejected if the user does not have enough quota available to support the cores and ram needed by the server to be unshelved. Migration to unified limits quotas ---------------------------------- There is a `nova-manage`_ command available to help with moving from legacy Nova database quotas to Keystone unified limits quotas. The command will read quota limits from the Nova database and call the Keystone API to create the corresponding unified limits. .. code-block:: console $ nova-manage limits migrate_to_unified_limits -h usage: nova-manage limits migrate_to_unified_limits [-h] [--project-id ] [--region-id ] [--verbose] [--dry-run] Copy quota limits from the Nova API database to Keystone. options: -h, --help show this help message and exit --project-id Project ID for which to migrate quota limits --region-id Region ID for which to migrate quota limits --verbose Provide verbose output during execution. --dry-run Show what limits would be created without actually creating them. .. important:: Per-user quota limits will **not** be copied into Keystone because per-user quotas are not supported in unified limits. .. _nova-manage: https://docs.openstack.org/nova/latest/cli/nova-manage.html#limits-migrate-to-unified-limits Require or ignore resources ~~~~~~~~~~~~~~~~~~~~~~~~~~~ The :oslo.config:option:`quota.unified_limits_resource_strategy` and :oslo.config:option:`quota.unified_limits_resource_list` configuration options are available for operators to specify which cloud resources they will require to have registered limits set in Keystone. The default strategy is ``require`` and the default resource list contains the ``servers`` resource. When ``unified_limits_resource_strategy = require``, if a resource in ``unified_limits_resource_list`` is requested and has no registered limit set, the quota limit for that resource will be considered to be 0 and all requests to allocate that resource will be rejected for being over quota. Any resource not in the list will be considered to have unlimited quota. When ``unified_limits_resource_strategy = ignore``, if a resource in ``unified_limits_resource_list`` is requested and has no registered limit set, the quota limit for that resource will be considered to be unlimited and all requests to allocate that resource will be accepted. Any resource not in the list will be considered to have 0 quota. The options should be configured for the :program:`nova-api-wsgi` and :program:`nova-conductor` services. The :program:`nova-conductor` service performs quota enforcement when :oslo.config:option:`quota.recheck_quota` is ``True`` (the default). The ``unified_limits_resource_list`` list can also be set to an empty list. Example configuration values: .. code-block:: ini [quota] unified_limits_resource_strategy = require unified_limits_resource_list = servers,class:VCPU,class:MEMORY_MB,class:DISK_GB ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/upgrades.rst0000664000175000017500000003713200000000000020271 0ustar00zuulzuul00000000000000======== Upgrades ======== Nova aims to provide upgrades with minimal downtime. Firstly, the data plane. There should be no VM downtime when you upgrade Nova. Nova has had this since the early days. Secondly, we want no downtime during upgrades of the Nova control plane. This document is trying to describe how we can achieve that. Once we have introduced the key concepts relating to upgrade, we will introduce the process needed for a no downtime upgrade of nova. .. _minimal_downtime_upgrade: Minimal Downtime Upgrade Process -------------------------------- Plan your upgrade ~~~~~~~~~~~~~~~~~ * Read and ensure you understand the release notes for the next release. * You should ensure all required steps from the previous upgrade have been completed, such as data migrations. * Make a backup of your database. Nova does not support downgrading of the database. Hence, in case of upgrade failure, restoring database from backup is the only choice. * During upgrade be aware that there will be additional load on nova-conductor. You may find you need to add extra nova-conductor workers to deal with the additional upgrade related load. Rolling upgrade process ~~~~~~~~~~~~~~~~~~~~~~~ To reduce downtime, the compute services can be upgraded in a rolling fashion. It means upgrading a few services at a time. This results in a condition where both old (N) and new (N+1) nova-compute services co-exist for a certain time period (or even N with N+2 upgraded nova-compute services, see below). Note that, there is no upgrade of the hypervisor here, this is just upgrading the nova services. If reduced downtime is not a concern (or lower complexity is desired), all services may be taken down and restarted at the same time. .. important:: As of OpenStack 2023.1 (Antelope), Nova supports the coexistence of N and N-2 (Yoga) :program:`nova-compute` or :program:`nova-conductor` services in the same deployment. The ``nova-conductor`` service will fail to start when a ``nova-compute`` service that is older than the support envelope is detected. This varies by release and the support envelope will be explained in the release notes. Similarly, in a :doc:`deployment with multiple cells `, neither the super conductor service nor any per-cell conductor service will start if any other conductor service in the deployment is older than the N-2 release. Releases older than 2023.1 will only support rolling upgrades for a single release difference between :program:`nova-compute` and :program:`nova-conductor` services. #. Before maintenance window: * Start the process with the controller node. Install the code for the next version of Nova, either in a venv or a separate control plane node, including all the python dependencies. * Using the newly installed nova code, run the DB sync. First run ``nova-manage api_db sync``, then ``nova-manage db sync``. ``nova-manage db sync`` should be run for all cell databases, including ``cell0``. If necessary, the ``--config-file`` argument can be used to point to the correct ``nova.conf`` file for the given cell. These schema change operations should have minimal or no effect on performance, and should not cause any operations to fail. * At this point, new columns and tables may exist in the database. These DB schema changes are done in a way that both the N and N+1 release can perform operations against the same schema. #. During maintenance window: * Several nova services rely on the external placement service being at the latest level. Therefore, you must upgrade placement before any nova services. See the :placement-doc:`placement upgrade notes ` for more details on upgrading the placement service. * For maximum safety (no failed API operations), gracefully shutdown all the services (i.e. SIG_TERM) except nova-compute. * Before restarting services with new code, perform the release-specific readiness check with ``nova-status upgrade check``. See the :ref:`nova-status upgrade check ` for more details on status check. * Start all services on the new code, with ``[upgrade_levels]compute=auto`` in nova.conf. It is safest to start nova-conductor first and nova-api last. Note that you may use a static alias name instead of ``auto``, such as ``[upgrade_levels]compute=``. Also note that this step is only required if compute services are not upgraded in lock-step with the control services. * If desired, gracefully shutdown nova-compute (i.e. SIG_TERM) services in small batches, then start the new version of the code with: ``[upgrade_levels]compute=auto``. If this batch-based approach is used, only a few compute nodes will have any delayed API actions, and to ensure there is enough capacity online to service any boot requests that happen during this time. #. After maintenance window: * Once all services are running the new code, double check in the DB that there are no old orphaned service records using ``nova service-list``. * Now that all services are upgraded, we need to send the SIG_HUP signal, so all the services clear any cached service version data. When a new service starts, it automatically detects which version of the compute RPC protocol to use, and it can decide if it is safe to do any online data migrations. Note, if you used a static value for the upgrade_level, such as ``[upgrade_levels]compute=``, you must update nova.conf to remove that configuration value and do a full service restart. * Now all the services are upgraded and signaled, the system is able to use the latest version of the RPC protocol and can access all of the features in the new release. * Once all the services are running the latest version of the code, and all the services are aware they all have been upgraded, it is safe to transform the data in the database into its new format. While some of this work happens on demand when the system reads a database row that needs updating, we must get all the data transformed into the current version before the next upgrade. Additionally, some data may not be transformed automatically so performing the data migration is necessary to avoid performance degradation due to compatibility routines. * This process can put significant extra write load on the database. Complete all online data migrations using: ``nova-manage db online_data_migrations --max-count ``. Note that you can use the ``--max-count`` argument to reduce the load this operation will place on the database, which allows you to run a small chunk of the migrations until all of the work is done. The chunk size you should use depends on your infrastructure and how much additional load you can impose on the database. To reduce load, perform smaller batches with delays between chunks. To reduce time to completion, run larger batches. Each time it is run, the command will show a summary of completed and remaining records. If using the ``--max-count`` option, the command should be rerun while it returns exit status 1 (which indicates that some migrations took effect, and more work may remain to be done), even if some migrations produce errors. If all possible migrations have completed and some are still producing errors, exit status 2 will be returned. In this case, the cause of the errors should be investigated and resolved. Migrations should be considered successfully completed only when the command returns exit status 0. * At this point, you must also ensure you update the configuration, to stop using any deprecated features or options, and perform any required work to transition to alternative features. All deprecated options are supported for at least one cycle, but should be removed before your next upgrade is performed. Current Database Upgrade Types ------------------------------ Currently Nova has two types of database upgrades that are in use. - Schema Migrations - Data Migrations Nova does not support database downgrades. .. _schema-migrations: Schema Migrations ~~~~~~~~~~~~~~~~~ Schema migrations are defined in ``nova/db/main/migrations/versions`` and ``nova/db/api/migrations/versions``. They are the routines that transform our database structure, which should be additive and able to be applied to a running system before service code has been upgraded. For information on developing your own schema migrations as part of a feature or bugfix, refer to :doc:`/reference/database-migrations`. .. note:: The API database migrations should be assumed to run before the migrations for the main/cell databases. This is because the former contains information about how to find and connect to the latter. Some management commands that operate on multiple cells will attempt to list and iterate over cell mapping records, which require a functioning API database schema. .. _data-migrations: Data Migrations ~~~~~~~~~~~~~~~ Online data migrations occur in two places: #. Inline migrations that occur as part of normal run-time activity as data is read in the old format and written in the new format #. Background online migrations that are performed using ``nova-manage`` to complete transformations that will not occur incidentally due to normal runtime activity. An example of online data migrations are the flavor migrations done as part of Nova object version 1.18. This included a transient migration of flavor storage from one database location to another. For information on developing your own schema migrations as part of a feature or bugfix, refer to :doc:`/reference/database-migrations`. Migration policy ~~~~~~~~~~~~~~~~ The following guidelines for schema and data migrations are followed in order to ease upgrades: * Additive schema migrations - In general, almost all schema migrations should be additive. Put simply, they should only create elements like columns, indices, and tables. * Subtractive schema migrations - To remove an element like a column or table during the N release cycle: #. The element must be deprecated and retained for backward compatibility. (This allows for graceful upgrade from N to N+1.) #. Data migration, by the objects layer, must completely migrate data from the old version of the schema to the new version. #. The column can then be removed with a migration at the start of N+2. * All schema migrations should be idempotent. For example, a migration should check if an element exists in the schema before attempting to add it. This logic comes for free in the autogenerated workflow of the online migrations. * Constraints - When adding a foreign or unique key constraint, the schema migration code needs to handle possible problems with data before applying the constraint. (Example: A unique constraint must clean up duplicate records before applying said constraint.) * Data migrations - As mentioned above, data migrations will be done in an online fashion by custom code in the object layer that handles moving data between the old and new portions of the schema. In addition, for each type of data migration performed, there should exist a nova-manage option for an operator to manually request that rows be migrated. Concepts -------- Here are the key concepts you need to know before reading the section on the upgrade process: RPC version pinning Through careful RPC versioning, newer nodes are able to talk to older nova-compute nodes. When upgrading control plane nodes, we can pin them at an older version of the compute RPC API, until all the compute nodes are able to be upgraded. https://wiki.openstack.org/wiki/RpcMajorVersionUpdates .. note:: The procedure for rolling upgrades with multiple cells v2 cells is not yet determined. Online Configuration Reload During the upgrade, we pin new serves at the older RPC version. When all services are updated to use newer code, we need to unpin them so we are able to use any new functionality. To avoid having to restart the service, using the current SIGHUP signal handling, or otherwise, ideally we need a way to update the currently running process to use the latest configuration. Graceful service shutdown Many nova services are python processes listening for messages on a AMQP queue, including nova-compute. When sending the process the SIGTERM the process stops getting new work from its queue, completes any outstanding work, then terminates. During this process, messages can be left on the queue for when the python process starts back up. This gives us a way to shutdown a service using older code, and start up a service using newer code with minimal impact. If its a service that can have multiple workers, like nova-conductor, you can usually add the new workers before the graceful shutdown of the old workers. In the case of singleton services, like nova-compute, some actions could be delayed during the restart, but ideally no actions should fail due to the restart. .. note:: While this is true for the RabbitMQ RPC backend, we need to confirm what happens for other RPC backends. API load balancer draining When upgrading API nodes, you can make your load balancer only send new connections to the newer API nodes, allowing for a seamless update of your API nodes. Expand/Contract DB Migrations Modern databases are able to make many schema changes while you are still writing to the database. Taking this a step further, we can make all DB changes by first adding the new structures, expanding. Then you can slowly move all the data into a new location and format. Once that is complete, you can drop bits of the scheme that are no long needed, i.e. contract. This happens multiple cycles after we have stopped using a particular piece of schema, and can happen in a schema migration without affecting runtime code. Online Data Migrations using objects Since Kilo, we have moved all data migration into the DB objects code. When trying to migrate data in the database from the old format to the new format, this is done in the object code when reading or saving things that are in the old format. For records that are not updated, you need to run a background process to convert those records into the newer format. This process must be completed before you contract the database schema. DB prune deleted rows Currently resources are soft deleted in the main database, so users are able to track instances in the DB that are created and destroyed in production. However, most people have a data retention policy, of say 30 days or 90 days after which they will want to delete those entries. Not deleting those entries affects DB performance as indices grow very large and data migrations take longer as there is more data to migrate. nova-conductor object backports RPC pinning ensures new services can talk to the older service's method signatures. But many of the parameters are objects that may well be too new for the old service to understand, so you are able to send the object back to the nova-conductor to be downgraded to a version the older service can understand. Testing ------- We use the "grenade" jobs to test upgrades. The current tests only cover the existing upgrade process where old computes can run with new control plane but control plane is turned off for DB migrations. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/vdpa.rst0000664000175000017500000000754400000000000017415 0ustar00zuulzuul00000000000000============================ Using ports vnic_type='vdpa' ============================ .. versionadded:: 23.0.0 (Wallaby) Introduced support for vDPA. .. versionadded:: 26.0.0 (Zed) Added support for all instance move operations, and the interface attach/detach, and suspend/resume operations. .. important:: The functionality described below is only supported by the libvirt/KVM virt driver. The kernel vDPA (virtio Data Path Acceleration) framework provides a vendor independent framework for offloading data-plane processing to software or hardware virtio device backends. While the kernel vDPA framework supports many types of vDPA devices, at this time nova only support ``virtio-net`` devices using the ``vhost-vdpa`` front-end driver. Support for ``virtio-blk`` or ``virtio-gpu`` may be added in the future but is not currently planned for any specific release. vDPA device tracking ~~~~~~~~~~~~~~~~~~~~ When implementing support for vDPA based neutron ports one of the first decisions nova had to make was how to model the availability of vDPA devices and the capability to virtualize vDPA devices. As the initial use-case for this technology was to offload networking to hardware offload OVS via neutron ports the decision was made to extend the existing PCI tracker that is used for SR-IOV and pci-passthrough to support vDPA devices. As a result a simplification was made to assume that the parent device of a vDPA device is an SR-IOV Virtual Function (VF). As a result software only vDPA device such as those created by the kernel ``vdpa-sim`` sample module are not supported. To make vDPA device available to be scheduled to guests the operator should include the device using the PCI address or vendor ID and product ID of the parent VF in the PCI ``device_spec``. See: :nova-doc:`pci-passthrough ` for details. Nova will not create the VFs or vDPA devices automatically. It is expected that the operator will allocate them before starting the nova-compute agent. While no specific mechanisms is prescribed to do this udev rules or systemd service files are generally the recommended approach to ensure the devices are created consistently across reboots. .. note:: As vDPA is an offload only for the data plane and not the control plane a vDPA control plane is required to properly support vDPA device passthrough. At the time of writing only hardware offloaded OVS is supported when using vDPA with nova. Because of this vDPA devices cannot be requested using the PCI alias. While nova could allow vDPA devices to be requested by the flavor using a PCI alias we would not be able to correctly configure the device as there would be no suitable control plane. For this reason vDPA devices are currently only consumable via neutron ports. Virt driver support ~~~~~~~~~~~~~~~~~~~ Supporting neutron ports with ``vnic_type=vdpa`` depends on the capability of the virt driver. At this time only the ``libvirt`` virt driver with KVM is fully supported. QEMU may also work but is untested. vDPA support depends on kernel 5.7+, Libvirt 6.9.0+ and QEMU 5.1+. vDPA lifecycle operations ~~~~~~~~~~~~~~~~~~~~~~~~~ To boot a VM with vDPA ports they must first be created in neutron. To do this the normal SR-IOV workflow is used where by the port is first created in neutron and passed into nova as part of the server create request. .. code-block:: bash openstack port create --network --vnic-type vdpa vdpa-port openstack server create --flavor --image --port vdpa-vm vDPA live migration ~~~~~~~~~~~~~~~~~~~ At this time QEMU and the ``vhost-vdpa`` kernel module do not support transparent live migration of vm with vdpa ports. To enable live migration of VMs with vDPA interfaces the existing SR-IOV hotplug live migration procedure has been extended to include ``vnic_type='vdpa'`` interfaces. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/vendordata.rst0000664000175000017500000001623200000000000020604 0ustar00zuulzuul00000000000000========== Vendordata ========== .. note:: This section provides deployment information about the vendordata feature. For end-user information about the vendordata feature and instance metadata in general, refer to the :doc:`user guide `. The *vendordata* feature provides a way to pass vendor or deployment-specific information to instances. This can be accessed by users using :doc:`the metadata service ` or with :doc:`config drives `. There are two vendordata modules provided with nova: ``StaticJSON`` and ``DynamicJSON``. ``StaticJSON`` -------------- The ``StaticJSON`` module includes the contents of a static JSON file loaded from disk. This can be used for things which don't change between instances, such as the location of the corporate puppet server. It is the default provider. Configuration ~~~~~~~~~~~~~ The service you must configure to enable the ``StaticJSON`` vendordata module depends on how guests are accessing vendordata. If using the metadata service, configuration applies to either :program:`nova-api-wsgi` or :program:`nova-metadata-wsgi`, depending on the deployment, while if using config drives, configuration applies to :program:`nova-compute`. However, configuration is otherwise the same and the following options apply: - :oslo.config:option:`api.vendordata_providers` - :oslo.config:option:`api.vendordata_jsonfile_path` Refer to the :doc:`metadata service ` and :doc:`config drive ` documentation for more information on how to configure the required services. ``DynamicJSON`` --------------- The ``DynamicJSON`` module can make a request to an external REST service to determine what metadata to add to an instance. This is how we recommend you generate things like Active Directory tokens which change per instance. When used, the ``DynamicJSON`` module will make a request to any REST services listed in the :oslo.config:option:`api.vendordata_dynamic_targets` configuration option. There can be more than one of these but note that they will be queried once per metadata request from the instance which can mean a lot of traffic depending on your configuration and the configuration of the instance. The following data is passed to your REST service as a JSON encoded POST: .. list-table:: :header-rows: 1 * - Key - Description * - ``project-id`` - The ID of the project that owns this instance. * - ``instance-id`` - The UUID of this instance. * - ``image-id`` - The ID of the image used to boot this instance. * - ``user-data`` - As specified by the user at boot time. * - ``hostname`` - The hostname of the instance. * - ``metadata`` - As specified by the user at boot time. Metadata fetched from the REST service will appear in the metadata service at a new file called ``vendordata2.json``, with a path (either in the metadata service URL or in the config drive) like this:: openstack/latest/vendor_data2.json For each dynamic target, there will be an entry in the JSON file named after that target. For example: .. code-block:: json { "testing": { "value1": 1, "value2": 2, "value3": "three" } } The `novajoin`__ project provides a dynamic vendordata service to manage host instantiation in an IPA server. __ https://opendev.org/x/novajoin Deployment considerations ~~~~~~~~~~~~~~~~~~~~~~~~~ Nova provides authentication to external metadata services in order to provide some level of certainty that the request came from nova. This is done by providing a service token with the request -- you can then just deploy your metadata service with the keystone authentication WSGI middleware. This is configured using the keystone authentication parameters in the :oslo.config:group:`vendordata_dynamic_auth` configuration group. Configuration ~~~~~~~~~~~~~ As with ``StaticJSON``, the service you must configure to enable the ``DynamicJSON`` vendordata module depends on how guests are accessing vendordata. If using the metadata service, configuration applies to either :program:`nova-api-wsgi` or :program:`nova-metadata-wsgi`, depending on the deployment, while if using config drives, configuration applies to :program:`nova-compute`. However, configuration is otherwise the same and the following options apply: - :oslo.config:option:`api.vendordata_providers` - :oslo.config:option:`api.vendordata_dynamic_ssl_certfile` - :oslo.config:option:`api.vendordata_dynamic_connect_timeout` - :oslo.config:option:`api.vendordata_dynamic_read_timeout` - :oslo.config:option:`api.vendordata_dynamic_failure_fatal` - :oslo.config:option:`api.vendordata_dynamic_targets` Refer to the :doc:`metadata service ` and :doc:`config drive ` documentation for more information on how to configure the required services. In addition, there are also many options related to authentication. These are provided by :keystone-doc:`keystone <>` but are listed below for completeness: - :oslo.config:option:`vendordata_dynamic_auth.cafile` - :oslo.config:option:`vendordata_dynamic_auth.certfile` - :oslo.config:option:`vendordata_dynamic_auth.keyfile` - :oslo.config:option:`vendordata_dynamic_auth.insecure` - :oslo.config:option:`vendordata_dynamic_auth.timeout` - :oslo.config:option:`vendordata_dynamic_auth.collect_timing` - :oslo.config:option:`vendordata_dynamic_auth.split_loggers` - :oslo.config:option:`vendordata_dynamic_auth.auth_type` - :oslo.config:option:`vendordata_dynamic_auth.auth_section` - :oslo.config:option:`vendordata_dynamic_auth.auth_url` - :oslo.config:option:`vendordata_dynamic_auth.system_scope` - :oslo.config:option:`vendordata_dynamic_auth.domain_id` - :oslo.config:option:`vendordata_dynamic_auth.domain_name` - :oslo.config:option:`vendordata_dynamic_auth.project_id` - :oslo.config:option:`vendordata_dynamic_auth.project_name` - :oslo.config:option:`vendordata_dynamic_auth.project_domain_id` - :oslo.config:option:`vendordata_dynamic_auth.project_domain_name` - :oslo.config:option:`vendordata_dynamic_auth.trust_id` - :oslo.config:option:`vendordata_dynamic_auth.default_domain_id` - :oslo.config:option:`vendordata_dynamic_auth.default_domain_name` - :oslo.config:option:`vendordata_dynamic_auth.user_id` - :oslo.config:option:`vendordata_dynamic_auth.username` - :oslo.config:option:`vendordata_dynamic_auth.user_domain_id` - :oslo.config:option:`vendordata_dynamic_auth.user_domain_name` - :oslo.config:option:`vendordata_dynamic_auth.password` - :oslo.config:option:`vendordata_dynamic_auth.tenant_id` - :oslo.config:option:`vendordata_dynamic_auth.tenant_name` Refer to the :keystone-doc:`keystone documentation ` for information on configuring these. References ---------- * Michael Still's talk from the Queens summit in Sydney, `Metadata, User Data, Vendor Data, oh my!`__ * Michael's blog post on `deploying a simple vendordata service`__ which provides more details and sample code to supplement the documentation above. __ https://www.openstack.org/videos/sydney-2017/metadata-user-data-vendor-data-oh-my __ https://www.madebymikal.com/nova-vendordata-deployment-an-excessively-detailed-guide/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/virtual-gpu.rst0000664000175000017500000005231500000000000020736 0ustar00zuulzuul00000000000000======================================= Attaching virtual GPU devices to guests ======================================= .. important:: The functionality described below is only supported by the libvirt/KVM driver. The virtual GPU feature in Nova allows a deployment to provide specific GPU types for instances using physical GPUs that can provide virtual devices. For example, a single `Intel GVT-g`_ or a `NVIDIA GRID vGPU`_ physical Graphics Processing Unit (pGPU) can be virtualized as multiple virtual Graphics Processing Units (vGPUs) if the hypervisor supports the hardware driver and has the capability to create guests using those virtual devices. This feature is highly dependent on the version of libvirt and the physical devices present on the host. In addition, the vendor's vGPU driver software must be installed and configured on the host at the same time. Caveats are mentioned in the `Caveats`_ section. To enable virtual GPUs, follow the steps below: #. `Enable GPU types (Compute)`_ #. `Configure a flavor (Controller)`_ Enable GPU types (Compute) -------------------------- #. For NVIDIA GPUs that support SR-IOV, enable the virtual functions. .. code-block:: bash $ /usr/lib/nvidia/sriov-manage -e slot:bus:domain.function For example, to enable the virtual functions for the GPU with slot ``0000``, bus ``41``, domain ``00``, and function ``0``: .. code-block:: bash $ /usr/lib/nvidia/sriov-manage -e 0000:41:00.0 You may want to automate this process as it has to be done on each boot of the host. Given an example ``systemd`` template unit file named ``nvidia-sriov-manage@.service``: .. code-block:: text [Unit] After = nvidia-vgpu-mgr.service After = nvidia-vgpud.service Description = Enable Nvidia GPU virtual functions [Service] Type = oneshot User = root Group = root ExecStart = /usr/lib/nvidia/sriov-manage -e %i # Give a reasonable amount of time for the server to start up/shut down TimeoutSec = 120 # This creates a specific slice which all services will operate from # The accounting options give us the ability to see resource usage # through the `systemd-cgtop` command. Slice = system.slice # Set Accounting CPUAccounting = True BlockIOAccounting = True MemoryAccounting = True TasksAccounting = True RemainAfterExit = True ExecStartPre = /usr/bin/sleep 30 [Install] WantedBy = multi-user.target To enable the virtual functions for the GPU with slot ``0000``, bus ``41``, domain ``00``, and function ``0``: .. code-block:: bash $ systemctl enable nvidia-sriov-manage@0000:41:00.0.service .. note:: This is only an example and it is important to consult the relevant vendor documentation for the specific devices that you have. #. Specify which specific GPU type(s) the instances would get. Edit :oslo.config:option:`devices.enabled_mdev_types`: .. code-block:: ini [devices] enabled_mdev_types = nvidia-35 If you want to support more than a single GPU type, you need to provide a separate configuration section for each device. For example: .. code-block:: ini [devices] enabled_mdev_types = nvidia-35, nvidia-36 [mdev_nvidia-35] device_addresses = 0000:84:00.0,0000:85:00.0 [mdev_nvidia-36] device_addresses = 0000:86:00.0 where you have to define which physical GPUs are supported per GPU type. If the same PCI address is provided for two different types, nova-compute will refuse to start and issue a specific error in the logs. To know which specific type(s) to mention, please refer to `How to discover a GPU type`_. .. versionchanged:: 21.0.0 Supporting multiple GPU types is only supported by the Ussuri release and later versions. #. Restart the ``nova-compute`` service. .. warning:: Changing the type is possible but since existing physical GPUs can't address multiple guests having different types, that will make Nova return you a NoValidHost if existing instances with the original type still exist. Accordingly, it's highly recommended to instead deploy the new type to new compute nodes that don't already have workloads and rebuild instances on the nodes that need to change types. Configure a flavor (Controller) ------------------------------- Configure a flavor to request one virtual GPU: .. code-block:: console $ openstack flavor set vgpu_1 --property "resources:VGPU=1" .. note:: As of the Queens release, all hypervisors that support virtual GPUs only accept a single virtual GPU per instance. The enabled vGPU types on the compute hosts are not exposed to API users. Flavors configured for vGPU support can be tied to host aggregates as a means to properly schedule those flavors onto the compute hosts that support them. See :doc:`/admin/aggregates` for more information. Create instances with virtual GPU devices ----------------------------------------- The ``nova-scheduler`` selects a destination host that has vGPU devices available by calling the Placement API for a specific VGPU resource class provided by compute nodes. .. code-block:: console $ openstack server create --flavor vgpu_1 --image cirros-0.3.5-x86_64-uec --wait test-vgpu How to discover a GPU type -------------------------- Virtual GPUs are seen as mediated devices. Physical PCI devices (the graphic card here) supporting virtual GPUs propose mediated device (mdev) types. Since mediated devices are supported by the Linux kernel through sysfs files after installing the vendor's virtual GPUs driver software, you can see the required properties as follows: .. code-block:: console $ ls /sys/class/mdev_bus/*/mdev_supported_types /sys/class/mdev_bus/0000:84:00.0/mdev_supported_types: nvidia-35 nvidia-36 nvidia-37 nvidia-38 nvidia-39 nvidia-40 nvidia-41 nvidia-42 nvidia-43 nvidia-44 nvidia-45 /sys/class/mdev_bus/0000:85:00.0/mdev_supported_types: nvidia-35 nvidia-36 nvidia-37 nvidia-38 nvidia-39 nvidia-40 nvidia-41 nvidia-42 nvidia-43 nvidia-44 nvidia-45 /sys/class/mdev_bus/0000:86:00.0/mdev_supported_types: nvidia-35 nvidia-36 nvidia-37 nvidia-38 nvidia-39 nvidia-40 nvidia-41 nvidia-42 nvidia-43 nvidia-44 nvidia-45 /sys/class/mdev_bus/0000:87:00.0/mdev_supported_types: nvidia-35 nvidia-36 nvidia-37 nvidia-38 nvidia-39 nvidia-40 nvidia-41 nvidia-42 nvidia-43 nvidia-44 nvidia-45 Checking allocations and inventories for virtual GPUs ----------------------------------------------------- .. note:: The information below is only valid from the 19.0.0 Stein release. Before this release, inventories and allocations related to a ``VGPU`` resource class are still on the root resource provider related to the compute node. If upgrading from Rocky and using the libvirt driver, ``VGPU`` inventory and allocations are moved to child resource providers that represent actual physical GPUs. The examples you will see are using the `osc-placement plugin`_ for OpenStackClient. For details on specific commands, see its documentation. #. Get the list of resource providers .. code-block:: console $ openstack resource provider list +--------------------------------------+---------------------------------------------------------+------------+ | uuid | name | generation | +--------------------------------------+---------------------------------------------------------+------------+ | 5958a366-3cad-416a-a2c9-cfbb5a472287 | virtlab606.xxxxxxxxxxxxxxxxxxxxxxxxxxx | 7 | | fc9b9287-ef5e-4408-aced-d5577560160c | virtlab606.xxxxxxxxxxxxxxxxxxxxxxxxxxx_pci_0000_86_00_0 | 2 | | e2f8607b-0683-4141-a8af-f5e20682e28c | virtlab606.xxxxxxxxxxxxxxxxxxxxxxxxxxx_pci_0000_85_00_0 | 3 | | 85dd4837-76f9-41f2-9f19-df386017d8a0 | virtlab606.xxxxxxxxxxxxxxxxxxxxxxxxxxx_pci_0000_87_00_0 | 2 | | 7033d860-8d8a-4963-8555-0aa902a08653 | virtlab606.xxxxxxxxxxxxxxxxxxxxxxxxxxx_pci_0000_84_00_0 | 2 | +--------------------------------------+---------------------------------------------------------+------------+ In this example, we see the root resource provider ``5958a366-3cad-416a-a2c9-cfbb5a472287`` with four other resource providers that are its children and where each of them corresponds to a single physical GPU. #. Check the inventory of each resource provider to see resource classes .. code-block:: console $ openstack resource provider inventory list 5958a366-3cad-416a-a2c9-cfbb5a472287 +----------------+------------------+----------+----------+-----------+----------+-------+ | resource_class | allocation_ratio | max_unit | reserved | step_size | min_unit | total | +----------------+------------------+----------+----------+-----------+----------+-------+ | VCPU | 16.0 | 48 | 0 | 1 | 1 | 48 | | MEMORY_MB | 1.5 | 65442 | 512 | 1 | 1 | 65442 | | DISK_GB | 1.0 | 49 | 0 | 1 | 1 | 49 | +----------------+------------------+----------+----------+-----------+----------+-------+ $ openstack resource provider inventory list e2f8607b-0683-4141-a8af-f5e20682e28c +----------------+------------------+----------+----------+-----------+----------+-------+ | resource_class | allocation_ratio | max_unit | reserved | step_size | min_unit | total | +----------------+------------------+----------+----------+-----------+----------+-------+ | VGPU | 1.0 | 16 | 0 | 1 | 1 | 16 | +----------------+------------------+----------+----------+-----------+----------+-------+ Here you can see a ``VGPU`` inventory on the child resource provider while other resource class inventories are still located on the root resource provider. #. Check allocations for each server that is using virtual GPUs .. code-block:: console $ openstack server list +--------------------------------------+-------+--------+---------------------------------------------------------+--------------------------+--------+ | ID | Name | Status | Networks | Image | Flavor | +--------------------------------------+-------+--------+---------------------------------------------------------+--------------------------+--------+ | 5294f726-33d5-472a-bef1-9e19bb41626d | vgpu2 | ACTIVE | private=10.0.0.14, fd45:cdad:c431:0:f816:3eff:fe78:a748 | cirros-0.4.0-x86_64-disk | vgpu | | a6811fc2-cec8-4f1d-baea-e2c6339a9697 | vgpu1 | ACTIVE | private=10.0.0.34, fd45:cdad:c431:0:f816:3eff:fe54:cc8f | cirros-0.4.0-x86_64-disk | vgpu | +--------------------------------------+-------+--------+---------------------------------------------------------+--------------------------+--------+ $ openstack resource provider allocation show 5294f726-33d5-472a-bef1-9e19bb41626d +--------------------------------------+------------+------------------------------------------------+ | resource_provider | generation | resources | +--------------------------------------+------------+------------------------------------------------+ | 5958a366-3cad-416a-a2c9-cfbb5a472287 | 8 | {u'VCPU': 1, u'MEMORY_MB': 512, u'DISK_GB': 1} | | 7033d860-8d8a-4963-8555-0aa902a08653 | 3 | {u'VGPU': 1} | +--------------------------------------+------------+------------------------------------------------+ $ openstack resource provider allocation show a6811fc2-cec8-4f1d-baea-e2c6339a9697 +--------------------------------------+------------+------------------------------------------------+ | resource_provider | generation | resources | +--------------------------------------+------------+------------------------------------------------+ | e2f8607b-0683-4141-a8af-f5e20682e28c | 3 | {u'VGPU': 1} | | 5958a366-3cad-416a-a2c9-cfbb5a472287 | 8 | {u'VCPU': 1, u'MEMORY_MB': 512, u'DISK_GB': 1} | +--------------------------------------+------------+------------------------------------------------+ In this example, two servers were created using a flavor asking for 1 ``VGPU``, so when looking at the allocations for each consumer UUID (which is the server UUID), you can see that VGPU allocation is against the child resource provider while other allocations are for the root resource provider. Here, that means that the virtual GPU used by ``a6811fc2-cec8-4f1d-baea-e2c6339a9697`` is actually provided by the physical GPU having the PCI ID ``0000:85:00.0``. (Optional) Provide custom traits for multiple GPU types ------------------------------------------------------- Since operators want to support different GPU types per compute, it would be nice to have flavors asking for a specific GPU type. This is now possible using custom traits by decorating child Resource Providers that correspond to physical GPUs. .. note:: Possible improvements in a future release could consist of providing automatic tagging of Resource Providers with standard traits corresponding to versioned mapping of public GPU types. For the moment, this has to be done manually. #. Get the list of resource providers See `Checking allocations and inventories for virtual GPUs`_ first for getting the list of Resource Providers that support a ``VGPU`` resource class. #. Define custom traits that will correspond for each to a GPU type .. code-block:: console $ openstack --os-placement-api-version 1.6 trait create CUSTOM_NVIDIA_11 In this example, we ask to create a custom trait named ``CUSTOM_NVIDIA_11``. #. Add the corresponding trait to the Resource Provider matching the GPU .. code-block:: console $ openstack --os-placement-api-version 1.6 resource provider trait set \ --trait CUSTOM_NVIDIA_11 e2f8607b-0683-4141-a8af-f5e20682e28c In this case, the trait ``CUSTOM_NVIDIA_11`` will be added to the Resource Provider with the UUID ``e2f8607b-0683-4141-a8af-f5e20682e28c`` that corresponds to the PCI address ``0000:85:00:0`` as shown above. #. Amend the flavor to add a requested trait .. code-block:: console $ openstack flavor set --property trait:CUSTOM_NVIDIA_11=required vgpu_1 In this example, we add the ``CUSTOM_NVIDIA_11`` trait as a required information for the ``vgpu_1`` flavor we created earlier. This will allow the Placement service to only return the Resource Providers matching this trait so only the GPUs that were decorated with will be checked for this flavor. Caveats ------- .. note:: This information is correct as of the 17.0.0 Queens release. Where improvements have been made or issues fixed, they are noted per item. * After installing the NVIDIA driver on compute nodes, if ``mdev`` are not visible but VF devices are present under a path like ``/sys/bus/pci/devices/0000:25:00.4/nvidia``, this indicates that the **kernel variant driver** is in use. This most likely occurs on **Ubuntu Noble** or **RHEL 10**. .. versionchanged:: 31.0.0 Please refer to the `PCI passthrough documentation`_ for proper configuration. * When using recent nVidia GPU architectures like Ampere or newer GPUs which have SR-IOV feature, Nova can't know how many vGPUs can be used by a specific type. You then need to create virtual functions and then provide the list of the virtual functions per GPUs that can be used by setting ``device_addresses``. .. versionchanged:: 29.0.0 By the 2024.1 Caracal release, if you use those hardware, you need to provide a new configuration option named ``max_instances`` in the related mdev type group (eg. ``mdev_nvidia-35``) where the value of that option would be the number of vGPUs that the type can create. As an example for the `A40-2Q nVidia GPU type`__ which can create up to 24 vGPUs, please provide the below configuration : .. __: https://docs.nvidia.com/vgpu/16.0/grid-vgpu-user-guide/index.html#vgpu-types-nvidia-a40 .. code-block:: ini [devices] enabled_mdev_types = nvidia-558 [mdev_nvidia-558] max_instances = 24 As a side note, you can see that we don't use ``device_addresses`` in the ``mdev_nvidia-558`` section, as we don't need to tell which exact virtual functions we want to use for that type. * When live-migrating an instance using vGPUs, the libvirt guest domain XML isn't updated with the new mediated device UUID to use for the target. .. versionchanged:: 29.0.0 In the 2024.1 Caracal release, Nova now `supports vGPU live-migrations`_. In order to do this, both the source and target compute service need to have minimum versions of libvirt-8.6.0, QEMU-8.1.0 and Linux kernel 5.18.0. You need to ensure that either you use only single common vGPU type between two computes. Where multiple mdev types are configured on the source and destination host, custom traits or custom resource classes must be configured, reported by the host and requested by the instance to make sure that the Placement API correctly returns the supported GPU using the right vGPU type for a migration. Last but not least, if you want to live-migrate nVidia mediated devices, you need to update :oslo.config:option:`libvirt.live_migration_downtime`, :oslo.config:option:`libvirt.live_migration_downtime_steps` and :oslo.config:option:`libvirt.live_migration_downtime_delay`: .. code-block:: ini live_migration_downtime = 500000 live_migration_downtime_steps = 3 live_migration_downtime_delay = 3 You can see an example of a working live-migration `here`__. .. __: http://sbauza.github.io/vgpu/vgpu_live_migration.html * Suspending a guest that has vGPUs doesn't yet work because of a libvirt limitation (it can't hot-unplug mediated devices from a guest). Workarounds using other instance actions (like snapshotting the instance or shelving it) are recommended until libvirt gains mdev hot-unplug support. If a user attempts to suspend the instance, the libvirt driver will raise an exception that will cause the instance to be set back to ACTIVE. The ``suspend`` action in the ``os-instance-actions`` API will have an *Error* state. .. versionchanged:: 25.0.0 This has been resolved in the Yoga release. See `bug 1948705`_. * Resizing an instance with a new flavor that has vGPU resources doesn't allocate those vGPUs to the instance (the instance is created without vGPU resources). The proposed workaround is to rebuild the instance after resizing it. The rebuild operation allocates vGPUS to the instance. .. versionchanged:: 21.0.0 This has been resolved in the Ussuri release. See `bug 1778563`_. * Cold migrating an instance to another host will have the same problem as resize. If you want to migrate an instance, make sure to rebuild it after the migration. .. versionchanged:: 21.0.0 This has been resolved in the Ussuri release. See `bug 1778563`_. * Rescue images do not use vGPUs. An instance being rescued does not keep its vGPUs during rescue. During that time, another instance can receive those vGPUs. This is a known issue. The recommended workaround is to rebuild an instance immediately after rescue. However, rebuilding the rescued instance only helps if there are other free vGPUs on the host. .. versionchanged:: 18.0.0 This has been resolved in the Rocky release. See `bug 1762688`_. For nested vGPUs: .. note:: This information is correct as of the 21.0.0 Ussuri release. Where improvements have been made or issues fixed, they are noted per item. * If creating servers with a flavor asking for vGPUs and the user wants multi-create (i.e. say --max 2) then the scheduler could be returning a NoValidHosts exception even if each physical GPU can support at least one specific instance, if the total wanted capacity is not supported by only one physical GPU. (See `bug 1874664 `_.) For example, creating servers with a flavor asking for vGPUs, if two children RPs have 4 vGPU inventories each: - You can ask for a flavor with 2 vGPU with --max 2. - But you can't ask for a flavor with 4 vGPU and --max 2. .. _bug 1778563: https://bugs.launchpad.net/nova/+bug/1778563 .. _bug 1762688: https://bugs.launchpad.net/nova/+bug/1762688 .. _bug 1948705: https://bugs.launchpad.net/nova/+bug/1948705 .. _supports vGPU live-migrations: https://specs.openstack.org/openstack/nova-specs/specs/2024.1/approved/libvirt-mdev-live-migrate.html .. Links .. _Intel GVT-g: https://01.org/igvt-g .. _NVIDIA GRID vGPU: http://docs.nvidia.com/grid/5.0/pdf/grid-vgpu-user-guide.pdf .. _osc-placement plugin: https://docs.openstack.org/osc-placement/latest/index.html .. _PCI passthrough documentation: https://docs.openstack.org/nova/latest/admin/pci-passthrough.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/virtual-persistent-memory.rst0000664000175000017500000002663700000000000023661 0ustar00zuulzuul00000000000000============================================= Attaching virtual persistent memory to guests ============================================= .. versionadded:: 20.0.0 (Train) Starting in the 20.0.0 (Train) release, the virtual persistent memory (vPMEM) feature in Nova allows a deployment using the libvirt compute driver to provide vPMEMs for instances using physical persistent memory (PMEM) that can provide virtual devices. PMEM must be partitioned into `PMEM namespaces`_ for applications to use. This vPMEM feature only uses PMEM namespaces in ``devdax`` mode as QEMU `vPMEM backends`_. If you want to dive into related notions, the document `NVDIMM Linux kernel document`_ is recommended. To enable vPMEMs, follow the steps below. Dependencies ------------ The following are required to support the vPMEM feature: * Persistent Memory Hardware One such product is Intel® Optane™ DC Persistent Memory. `ipmctl`_ is used to configure it. * Linux Kernel version >= 4.18 with the following modules loaded: ``dax_pmem``, ``nd_pmem``, ``device_dax``, ``nd_btt`` .. note:: NVDIMM support is present in the Linux Kernel v4.0 or newer. It is recommended to use Kernel version 4.2 or later since `NVDIMM support `_ is enabled by default. We met some bugs in older versions, and we have done all verification works with OpenStack on 4.18 version, so 4.18 version and newer will probably guarantee its functionality. * QEMU version >= 3.1.0 * Libvirt version >= 5.0.0 * `ndctl`_ version >= 62 * daxio version >= 1.6 The vPMEM feature has been verified under the software and hardware listed above. Configure PMEM namespaces (Compute) ----------------------------------- #. Create PMEM namespaces as `vPMEM backends`_ using the `ndctl`_ utility. For example, to create a 30GiB namespace named ``ns3``: .. code-block:: console $ sudo ndctl create-namespace -s 30G -m devdax -M mem -n ns3 { "dev":"namespace1.0", "mode":"devdax", "map":"mem", "size":"30.00 GiB (32.21 GB)", "uuid":"937e9269-512b-4f65-9ac6-b74b61075c11", "raw_uuid":"17760832-a062-4aef-9d3b-95ea32038066", "daxregion":{ "id":1, "size":"30.00 GiB (32.21 GB)", "align":2097152, "devices":[ { "chardev":"dax1.0", "size":"30.00 GiB (32.21 GB)" } ] }, "name":"ns3", "numa_node":1 } Then list the available PMEM namespaces on the host: .. code-block:: console $ ndctl list -X [ { ... "size":6440353792, ... "name":"ns0", ... }, { ... "size":6440353792, ... "name":"ns1", ... }, { ... "size":6440353792, ... "name":"ns2", ... }, { ... "size":32210157568, ... "name":"ns3", ... } ] #. Specify which PMEM namespaces should be available to instances. Edit :oslo.config:option:`libvirt.pmem_namespaces`: .. code-block:: ini [libvirt] # pmem_namespaces=$LABEL:$NSNAME[|$NSNAME][,$LABEL:$NSNAME[|$NSNAME]] pmem_namespaces = 6GB:ns0|ns1|ns2,LARGE:ns3 Configured PMEM namespaces must have already been created on the host as described above. The conf syntax allows the admin to associate one or more namespace ``$NSNAME``\ s with an arbitrary ``$LABEL`` that can subsequently be used in a flavor to request one of those namespaces. It is recommended, but not required, for namespaces under a single ``$LABEL`` to be the same size. #. Restart the ``nova-compute`` service. Nova will invoke `ndctl`_ to identify the configured PMEM namespaces, and report vPMEM resources to placement. Configure a flavor ------------------ Specify a comma-separated list of the ``$LABEL``\ s from :oslo.config:option:`libvirt.pmem_namespaces` to the flavor's ``hw:pmem`` property. Note that multiple instances of the same label are permitted: .. code-block:: console $ openstack flavor set --property hw:pmem='6GB' my_flavor $ openstack flavor set --property hw:pmem='6GB,LARGE' my_flavor_large $ openstack flavor set --property hw:pmem='6GB,6GB' m1.medium .. note:: If a NUMA topology is specified, all vPMEM devices will be put on guest NUMA node 0; otherwise nova will generate one NUMA node automatically for the guest. Based on the above examples, an ``openstack server create`` request with ``my_flavor_large`` will spawn an instance with two vPMEMs. One, corresponding to the ``LARGE`` label, will be ``ns3``; the other, corresponding to the ``6G`` label, will be arbitrarily chosen from ``ns0``, ``ns1``, or ``ns2``. .. note:: Using vPMEM inside a virtual machine requires the following: * Guest kernel version 4.18 or higher; * The ``dax_pmem``, ``nd_pmem``, ``device_dax``, and ``nd_btt`` kernel modules; * The `ndctl`_ utility. .. note:: When resizing an instance with vPMEMs, the vPMEM data won't be migrated. Verify inventories and allocations ---------------------------------- This section describes how to check that: * vPMEM inventories were created correctly in placement, validating the `configuration described above <#configure-pmem-namespaces-compute>`_. * allocations were created correctly in placement for instances spawned from `flavors configured with vPMEMs <#configure-a-flavor>`_. .. note:: Inventories and allocations related to vPMEM resource classes are on the root resource provider related to the compute node. #. Get the list of resource providers .. code-block:: console $ openstack resource provider list +--------------------------------------+--------+------------+ | uuid | name | generation | +--------------------------------------+--------+------------+ | 1bc545f9-891f-4930-ab2b-88a56078f4be | host-1 | 47 | | 7d994aef-680d-43d4-9325-a67c807e648e | host-2 | 67 | --------------------------------------+---------+------------+ #. Check the inventory of each resource provider to see resource classes Each ``$LABEL`` configured in :oslo.config:option:`libvirt.pmem_namespaces` is used to generate a resource class named ``CUSTOM_PMEM_NAMESPACE_$LABEL``. Nova will report to Placement the number of vPMEM namespaces configured for each ``$LABEL``. For example, assuming ``host-1`` was configured as described above: .. code-block:: console $ openstack resource provider inventory list 1bc545f9-891f-4930-ab2b-88a56078f4be +-----------------------------+------------------+----------+----------+-----------+----------+--------+ | resource_class | allocation_ratio | max_unit | reserved | step_size | min_unit | total | +-----------------------------+------------------+----------+----------+-----------+----------+--------+ | VCPU | 16.0 | 64 | 0 | 1 | 1 | 64 | | MEMORY_MB | 1.5 | 190604 | 512 | 1 | 1 | 190604 | | CUSTOM_PMEM_NAMESPACE_LARGE | 1.0 | 1 | 0 | 1 | 1 | 1 | | CUSTOM_PMEM_NAMESPACE_6GB | 1.0 | 3 | 0 | 1 | 1 | 3 | | DISK_GB | 1.0 | 439 | 0 | 1 | 1 | 439 | +-----------------------------+------------------+----------+----------+-----------+----------+--------+ Here you can see the vPMEM resource classes prefixed with ``CUSTOM_PMEM_NAMESPACE_``. The ``LARGE`` label was configured with one namespace (``ns3``), so it has an inventory of ``1``. Since the ``6GB`` label was configured with three namespaces (``ns0``, ``ns1``, and ``ns2``), the ``CUSTOM_PMEM_NAMESPACE_6GB`` inventory has a ``total`` and ``max_unit`` of ``3``. #. Check allocations for each server that is using vPMEMs .. code-block:: console $ openstack server list +--------------------------------------+----------------------+--------+-------------------+---------------+-----------------+ | ID | Name | Status | Networks | Image | Flavor | +--------------------------------------+----------------------+--------+-------------------+---------------+-----------------+ | 41d3e139-de5c-40fd-9d82-016b72f2ba1d | server-with-2-vpmems | ACTIVE | private=10.0.0.24 | ubuntu-bionic | my_flavor_large | | a616a7f6-b285-4adf-a885-dd8426dd9e6a | server-with-1-vpmem | ACTIVE | private=10.0.0.13 | ubuntu-bionic | my_flavor | +--------------------------------------+----------------------+--------+-------------------+---------------+-----------------+ $ openstack resource provider allocation show 41d3e139-de5c-40fd-9d82-016b72f2ba1d +--------------------------------------+------------+------------------------------------------------------------------------------------------------------------------------+ | resource_provider | generation | resources | +--------------------------------------+------------+------------------------------------------------------------------------------------------------------------------------+ | 1bc545f9-891f-4930-ab2b-88a56078f4be | 49 | {u'MEMORY_MB': 32768, u'VCPU': 16, u'DISK_GB': 20, u'CUSTOM_PMEM_NAMESPACE_6GB': 1, u'CUSTOM_PMEM_NAMESPACE_LARGE': 1} | +--------------------------------------+------------+------------------------------------------------------------------------------------------------------------------------+ $ openstack resource provider allocation show a616a7f6-b285-4adf-a885-dd8426dd9e6a +--------------------------------------+------------+-----------------------------------------------------------------------------------+ | resource_provider | generation | resources | +--------------------------------------+------------+-----------------------------------------------------------------------------------+ | 1bc545f9-891f-4930-ab2b-88a56078f4be | 49 | {u'MEMORY_MB': 8192, u'VCPU': 8, u'DISK_GB': 20, u'CUSTOM_PMEM_NAMESPACE_6GB': 1} | +--------------------------------------+------------+-----------------------------------------------------------------------------------+ In this example, two servers were created. ``server-with-2-vpmems`` used ``my_flavor_large`` asking for one ``6GB`` vPMEM and one ``LARGE`` vPMEM. ``server-with-1-vpmem`` used ``my_flavor`` asking for a single ``6GB`` vPMEM. .. _`PMEM namespaces`: http://pmem.io/ndctl/ndctl-create-namespace.html .. _`vPMEM backends`: https://github.com/qemu/qemu/blob/19b599f7664b2ebfd0f405fb79c14dd241557452/docs/nvdimm.txt#L145 .. _`NVDIMM Linux kernel document`: https://www.kernel.org/doc/Documentation/nvdimm/nvdimm.txt .. _`ipmctl`: https://software.intel.com/en-us/articles/quick-start-guide-configure-intel-optane-dc-persistent-memory-on-linux .. _`ndctl`: http://pmem.io/ndctl/ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.2496078 nova-32.0.0/doc/source/cli/0000775000175000017500000000000000000000000015376 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/cli/index.rst0000664000175000017500000000411400000000000017237 0ustar00zuulzuul00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Command-line Utilities ====================== In this section you will find information on Nova's command line utilities. Nova Management Commands ------------------------ These commands are used to manage existing installations. They are designed to be run by operators in an environment where they have direct access to the nova database. .. toctree:: :maxdepth: 1 nova-manage nova-policy nova-status Service Daemons --------------- The service daemons make up a functioning nova environment. All of these are expected to be started by an init system, expect to read a nova.conf file, and daemonize correctly after starting up. .. toctree:: :maxdepth: 1 nova-compute nova-conductor nova-novncproxy nova-scheduler nova-serialproxy nova-spicehtml5proxy WSGI Services ------------- Starting in the 2025.2 release, the only way to deploy the nova api is in a wsgi container (uwsgi or apache/mod_wsgi). These are the wsgi entry points to do that: * :doc:`nova-api-wsgi ` * :doc:`nova-metadata-wsgi ` Additional Tools ---------------- There are a few additional cli tools which nova services call when appropriate. This should not need to be called directly by operators, but they are documented for completeness and debugging if something goes wrong. .. toctree:: :maxdepth: 1 nova-rootwrap ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/cli/nova-compute.rst0000664000175000017500000000220600000000000020545 0ustar00zuulzuul00000000000000============ nova-compute ============ .. program:: nova-compute Synopsis ======== :: nova-compute [...] Description =========== :program:`nova-compute` is a server daemon that serves the Nova Compute service, which is responsible for building a disk image, launching an instance via the underlying virtualization driver, responding to calls to check the instance's state, attaching persistent storage, and terminating the instance. Options ======= .. rubric:: General options .. include:: opts/common.rst Files ===== .. todo: We shouldn't have policy configuration in this non-API service, but bug #1675486 means we do have one * ``/etc/nova/nova.conf`` * ``/etc/nova/policy.yaml`` * ``/etc/nova/policy.d/`` * ``/etc/nova/rootwrap.conf`` * ``/etc/nova/rootwrap.d/`` * ``/etc/nova/compute_id`` * ``/var/lib/nova/compute_id`` See Also ======== :doc:`nova-conductor(1) `, :doc:`nova-manage(1) `, :doc:`nova-rootwrap(1) `, :doc:`nova-scheduler(1) `, :doc:`nova-status(1) ` Bugs ==== * Nova bugs are managed at `Launchpad `__ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/cli/nova-conductor.rst0000664000175000017500000000132500000000000021072 0ustar00zuulzuul00000000000000============== nova-conductor ============== .. program:: nova-conductor Synopsis ======== :: nova-conductor [...] Description =========== :program:`nova-conductor` is a server daemon that serves the Nova Conductor service, which provides coordination and database query support for nova. Options ======= .. rubric:: General options .. include:: opts/common.rst Files ===== * ``/etc/nova/nova.conf`` See Also ======== :doc:`nova-compute(1) `, :doc:`nova-manage(1) `, :doc:`nova-rootwrap(1) `, :doc:`nova-scheduler(1) `, :doc:`nova-status(1) ` Bugs ==== * Nova bugs are managed at `Launchpad `__ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/cli/nova-manage.rst0000664000175000017500000015350500000000000020332 0ustar00zuulzuul00000000000000=========== nova-manage =========== .. program:: nova-manage Synopsis ======== :: nova-manage [ [...]] Description =========== :program:`nova-manage` controls cloud computing instances by managing various admin-only aspects of Nova. The standard pattern for executing a :program:`nova-manage` command is:: nova-manage [] Run without arguments to see a list of available command categories:: nova-manage You can also run with a category argument such as ``db`` to see a list of all commands in that category:: nova-manage db Options ======= These options apply to all commands and may be given in any order, before or after commands. Individual commands may provide additional options. Options without an argument can be combined after a single dash. .. option:: -h, --help Show a help message and exit .. option:: --config-dir

Path to a config directory to pull ``*.conf`` files from. This file set is sorted, so as to provide a predictable parse order if individual options are over-ridden. The set is parsed after the file(s) specified via previous :option:`--config-file`, arguments hence over-ridden options in the directory take precedence. This option must be set from the command-line. .. option:: --config-file Path to a config file to use. Multiple config files can be specified, with values in later files taking precedence. Defaults to None. This option must be set from the command-line. .. option:: --log-config-append , --log-config , --log_config The name of a logging configuration file. This file is appended to any existing logging configuration files. For details about logging configuration files, see the Python logging module documentation. Note that when logging configuration files are used then all logging configuration is set in the configuration file and other logging configuration options are ignored (for example, :option:`--log-date-format`). .. option:: --log-date-format Defines the format string for ``%(asctime)s`` in log records. Default: None. This option is ignored if :option:`--log-config-append` is set. .. option:: --log-dir , --logdir The base directory used for relative log_file paths. This option is ignored if :option:`--log-config-append` is set. .. option:: --log-file PATH, --logfile Name of log file to send logging output to. If no default is set, logging will go to stderr as defined by use_stderr. This option is ignored if :option:`--log-config-append` is set. .. option:: --syslog-log-facility SYSLOG_LOG_FACILITY Syslog facility to receive log lines. This option is ignored if :option:`--log-config-append` is set. .. option:: --use-journal Enable journald for logging. If running in a systemd environment you may wish to enable journal support. Doing so will use the journal native protocol which includes structured metadata in addition to log messages. This option is ignored if :option:`--log-config-append` is set. .. option:: --nouse-journal The inverse of :option:`--use-journal`. .. option:: --use-json Use JSON formatting for logging. This option is ignored if :option:`--log-config-append` is set. .. option:: --nouse-json The inverse of :option:`--use-json`. .. option:: --use-syslog Use syslog for logging. Existing syslog format is DEPRECATED and will be changed later to honor RFC5424. This option is ignored if :option:`--log-config-append` is set. .. option:: --nouse-syslog The inverse of :option:`--use-syslog`. .. option:: --watch-log-file Uses logging handler designed to watch file system. When log file is moved or removed this handler will open a new log file with specified path instantaneously. It makes sense only if :option:`--log-file` option is specified and Linux platform is used. This option is ignored if :option:`--log-config-append` is set. .. option:: --nowatch-log-file The inverse of :option:`--watch-log-file`. .. option:: --debug, -d If enabled, the logging level will be set to ``DEBUG`` instead of the default ``INFO`` level. .. option:: --nodebug The inverse of :option:`--debug`. .. option:: --post-mortem Allow post-mortem debugging. .. option:: --nopost-mortem The inverse of :option:`--post-mortem`. .. option:: --version Show program's version number and exit Database Commands ================= db version ---------- .. program:: nova-manage db version .. code-block:: shell nova-manage db version Print the current main database version. db sync ------- .. program:: nova-manage db sync .. code-block:: shell nova-manage db sync [--local_cell] [VERSION] Upgrade the main database schema up to the most recent version or ``VERSION`` if specified. By default, this command will also attempt to upgrade the schema for the cell0 database if it is mapped. If :option:`--local_cell` is specified, then only the main database in the current cell is upgraded. The local database connection is determined by :oslo.config:option:`database.connection` in the configuration file, passed to nova-manage using the ``--config-file`` option(s). Refer to the :program:`nova-manage cells_v2 map_cell0` or :program:`nova-manage cells_v2 simple_cell_setup` commands for more details on mapping the cell0 database. This command should be run **after** :program:`nova-manage api_db sync`. .. rubric:: Options .. option:: --local_cell Only sync db in the local cell: do not attempt to fan-out to all cells. .. rubric:: Return codes .. list-table:: :widths: 20 80 :header-rows: 1 * - Return code - Description * - 0 - Successfully synced database schema. * - 1 - Failed to access cell0. .. versionchanged:: 20.0.0 (Train) Removed support for the legacy ``--version `` argument. .. versionchanged:: 24.0.0 (Xena) Migrated versioning engine to alembic. The optional ``VERSION`` argument is now expected to be an alembic-based version. sqlalchemy-migrate-based versions will be rejected. db archive_deleted_rows ----------------------- .. program:: nova-manage db archive_deleted_rows .. code-block:: shell nova-manage db archive_deleted_rows [--max_rows ] [--verbose] [--until-complete] [--before ] [--purge] [--all-cells] [--task-log] [--sleep] Move deleted rows from production tables to shadow tables. Note that the corresponding rows in the ``instance_mappings``, ``request_specs`` and ``instance_group_member`` tables of the API database are purged when instance records are archived and thus, :oslo.config:option:`api_database.connection` is required in the config file. If automating, this should be run continuously while the result is 1, stopping at 0, or use the :option:`--until-complete` option. .. versionchanged:: 24.0.0 (Xena) Added :option:`--task-log`, :option:`--sleep` options. .. rubric:: Options .. option:: --max_rows Maximum number of deleted rows to archive. Defaults to 1000. Note that this number does not include the corresponding rows, if any, that are removed from the API database for deleted instances. .. option:: --before Archive rows that have been deleted before ````. Accepts date strings in the default format output by the ``date`` command, as well as ``YYYY-MM-DD[HH:mm:ss]``. For example:: # Purge shadow table rows older than a specific date nova-manage db archive_deleted_rows --before 2015-10-21 # or nova-manage db archive_deleted_rows --before "Oct 21 2015" # Times are also accepted nova-manage db archive_deleted_rows --before "2015-10-21 12:00" Note that relative dates (such as ``yesterday``) are not supported natively. The ``date`` command can be helpful here:: # Archive deleted rows more than one month old nova-manage db archive_deleted_rows --before "$(date -d 'now - 1 month')" .. option:: --verbose Print how many rows were archived per table. .. option:: --until-complete Run continuously until all deleted rows are archived. Use :option:`--max_rows` as a batch size for each iteration. .. option:: --purge Purge all data from shadow tables after archive completes. .. option:: --all-cells Run command across all cells. .. option:: --task-log Also archive ``task_log`` table records. Note that ``task_log`` records are never deleted, so archiving them will move all of the ``task_log`` records up to now into the shadow tables. It is recommended to also specify the :option:`--before` option to avoid races for those consuming ``task_log`` record data via the `/os-instance_usage_audit_log`__ API (example: Telemetry). .. __: https://docs.openstack.org/api-ref/compute/#server-usage-audit-log-os-instance-usage-audit-log .. option:: --sleep The amount of time in seconds to sleep between batches when :option:`--until-complete` is used. Defaults to 0. .. rubric:: Return codes .. list-table:: :widths: 20 80 :header-rows: 1 * - Return code - Description * - 0 - Nothing was archived. * - 1 - Some number of rows were archived. * - 2 - Invalid value for :option:`--max_rows`. * - 3 - No connection to the API database could be established using :oslo.config:option:`api_database.connection`. * - 4 - Invalid value for :option:`--before`. * - 255 - An unexpected error occurred. db purge -------- .. program:: nova-manage db purge .. code-block:: shell nova-manage db purge [--all] [--before ] [--verbose] [--all-cells] Delete rows from shadow tables. For :option:`--all-cells` to work, the API database connection information must be configured. .. versionadded:: 18.0.0 (Rocky) .. rubric:: Options .. option:: --all Purge all rows in the shadow tables. .. option:: --before Delete archived rows that were deleted from Nova before ````. Accepts date strings in the default format output by the ``date`` command, as well as ``YYYY-MM-DD[HH:mm:ss]``. For example:: # Purge shadow table rows deleted before specified date nova-manage db purge --before 2015-10-21 # or nova-manage db purge --before "Oct 21 2015" # Times are also accepted nova-manage db purge --before "2015-10-21 12:00" Note that relative dates (such as ``yesterday``) are not supported natively. The ``date`` command can be helpful here:: # Archive deleted rows more than one month old nova-manage db purge --before "$(date -d 'now - 1 month')" .. option:: --verbose Print information about purged records. .. option:: --all-cells Run against all cell databases. .. rubric:: Return codes .. list-table:: :widths: 20 80 :header-rows: 1 * - Return code - Description * - 0 - Rows were deleted. * - 1 - Required arguments were not provided. * - 2 - Invalid value for :option:`--before`. * - 3 - Nothing was purged. * - 4 - No connection to the API database could be established using :oslo.config:option:`api_database.connection`. db online_data_migrations ------------------------- .. program:: nova-manage db online_data_migrations .. code-block:: shell nova-manage db online_data_migrations [--max-count ] Perform data migration to update all live data. This command should be called after upgrading database schema and nova services on all controller nodes. If it exits with partial updates (exit status 1) it should be called again, even if some updates initially generated errors, because some updates may depend on others having completed. If it exits with status 2, intervention is required to resolve the issue causing remaining updates to fail. It should be considered successfully completed only when the exit status is 0. For example:: $ nova-manage db online_data_migrations Running batches of 50 until complete 2 rows matched query migrate_instances_add_request_spec, 0 migrated 2 rows matched query populate_queued_for_delete, 2 migrated +---------------------------------------------+--------------+-----------+ | Migration | Total Needed | Completed | +---------------------------------------------+--------------+-----------+ | create_incomplete_consumers | 0 | 0 | | migrate_instances_add_request_spec | 2 | 0 | | migrate_quota_classes_to_api_db | 0 | 0 | | migrate_quota_limits_to_api_db | 0 | 0 | | migration_migrate_to_uuid | 0 | 0 | | populate_missing_availability_zones | 0 | 0 | | populate_queued_for_delete | 2 | 2 | | populate_uuids | 0 | 0 | +---------------------------------------------+--------------+-----------+ In the above example, the ``migrate_instances_add_request_spec`` migration found two candidate records but did not need to perform any kind of data migration for either of them. In the case of the ``populate_queued_for_delete`` migration, two candidate records were found which did require a data migration. Since :option:`--max-count` defaults to 50 and only two records were migrated with no more candidates remaining, the command completed successfully with exit code 0. .. versionadded:: 13.0.0 (Mitaka) .. rubric:: Options .. option:: --max-count Controls the maximum number of objects to migrate in a given call. If not specified, migration will occur in batches of 50 until fully complete. .. rubric:: Return codes .. list-table:: :widths: 20 80 :header-rows: 1 * - Return code - Description * - 0 - No (further) updates are possible. * - 1 - Some updates were completed successfully. Note that not all updates may have succeeded. * - 2 - Some updates generated errors and no other migrations were able to take effect in the last batch attempted. * - 127 - Invalid input was provided. db ironic_compute_node_move --------------------------- .. program:: nova-manage db ironic_compute_node_move .. code-block:: shell nova-manage db ironic_compute_node_move --ironic-node-uuid --destination-host Move ironic nodes, along with any associated instances, between nova-compute services. This is useful when migrating away from using peer_list and multiple hash ring balanced nova-compute servers to the new ironic shard system. First you must turn off the nova-compute service that currently manages the Ironic host. Second you mark that nova-compute service as forced down via the Nova API. Third, you ensure the new nova-compute service is correctly configured to target the appropriate shard (and optionally also a conductor group). Finally, most Ironic nodes should now move to the new service, but any Ironic nodes with instances on them will need to be manually moved to their new Ironic service by using this nova-manage command. .. versionadded:: 28.0.0 (2023.2 Bobcat) .. rubric:: Options .. option:: --ironic-node-uuid Ironic node uuid to be moved (which is also the Nova compute node uuid and the uuid of corresponding resource provider in Placement). The Nova compute service that currently manages this Ironic node must first be marked a "forced down" via the Nova API, in a similar way to a down hypervisor that is about to have its VMs evacuated to a replacement hypervisor. .. option:: --destination-host Destination ironic nova-compute service CONF.host. API Database Commands ===================== api_db version -------------- .. program:: nova-manage api_db version .. code-block:: shell nova-manage api_db version Print the current API database version. .. versionadded:: 2015.1.0 (Kilo) api_db sync ----------- .. program:: nova-manage api_db sync .. code-block:: shell nova-manage api_db sync [VERSION] Upgrade the API database schema up to the most recent version or ``VERSION`` if specified. This command does not create the API database, it runs schema migration scripts. The API database connection is determined by :oslo.config:option:`api_database.connection` in the configuration file passed to nova-manage. This command should be run before ``nova-manage db sync``. .. versionadded:: 2015.1.0 (Kilo) .. versionchanged:: 18.0.0 (Rocky) Added support for upgrading the optional placement database if ``[placement_database]/connection`` is configured. .. versionchanged:: 20.0.0 (Train) Removed support for upgrading the optional placement database as placement is now a separate project. Removed support for the legacy ``--version `` argument. .. versionchanged:: 24.0.0 (Xena) Migrated versioning engine to alembic. The optional ``VERSION`` argument is now expected to be an alembic-based version. sqlalchemy-migrate-based versions will be rejected. .. _man-page-cells-v2: Cells v2 Commands ================= cell_v2 simple_cell_setup ------------------------- .. program:: nova-manage cell_v2 simple_cell_setup .. code-block:: shell nova-manage cell_v2 simple_cell_setup [--transport-url ] Setup a fresh cells v2 environment. If :option:`--transport-url` is not specified, it will use the one defined by :oslo.config:option:`transport_url` in the configuration file. .. versionadded:: 14.0.0 (Newton) .. rubric:: Options .. option:: --transport-url The transport url for the cell message queue. .. rubric:: Return codes .. list-table:: :widths: 20 80 :header-rows: 1 * - Return code - Description * - 0 - Setup is completed. * - 1 - No hosts are reporting, meaning none can be mapped, or if the transport URL is missing or invalid. cell_v2 map_cell0 ----------------- .. program:: nova-manage cell_v2 map_cell0 .. code-block:: shell nova-manage cell_v2 map_cell0 [--database_connection ] Create a cell mapping to the database connection for the cell0 database. If a database_connection is not specified, it will use the one defined by :oslo.config:option:`database.connection` in the configuration file passed to nova-manage. The cell0 database is used for instances that have not been scheduled to any cell. This generally applies to instances that have encountered an error before they have been scheduled. .. versionadded:: 14.0.0 (Newton) .. rubric:: Options .. option:: --database_connection The database connection URL for ``cell0``. This is optional. If not provided, a standard database connection will be used based on the main database connection from nova configuration. .. rubric:: Return codes .. list-table:: :widths: 20 80 :header-rows: 1 * - Return code - Description * - 0 - ``cell0`` is created successfully or has already been set up. cell_v2 map_instances --------------------- .. program:: nova-manage cell_v2 map_instances .. code-block:: shell nova-manage cell_v2 map_instances --cell_uuid [--max-count ] [--reset] Map instances to the provided cell. Instances in the nova database will be queried from oldest to newest and mapped to the provided cell. A :option:`--max-count` can be set on the number of instance to map in a single run. Repeated runs of the command will start from where the last run finished so it is not necessary to increase :option:`--max-count` to finish. A :option:`--reset` option can be passed which will reset the marker, thus making the command start from the beginning as opposed to the default behavior of starting from where the last run finished. If :option:`--max-count` is not specified, all instances in the cell will be mapped in batches of 50. If you have a large number of instances, consider specifying a custom value and run the command until it exits with 0. .. versionadded:: 12.0.0 (Liberty) .. rubric:: Options .. option:: --cell_uuid Unmigrated instances will be mapped to the cell with the UUID provided. .. option:: --max-count Maximum number of instances to map. If not set, all instances in the cell will be mapped in batches of 50. If you have a large number of instances, consider specifying a custom value and run the command until it exits with 0. .. option:: --reset The command will start from the beginning as opposed to the default behavior of starting from where the last run finished. .. rubric:: Return codes .. list-table:: :widths: 20 80 :header-rows: 1 * - Return code - Description * - 0 - All instances have been mapped. * - 1 - There are still instances to be mapped. * - 127 - Invalid value for :option:`--max-count`. * - 255 - An unexpected error occurred. cell_v2 map_cell_and_hosts -------------------------- .. program:: nova-manage cell_v2 map_cell_and_hosts .. code-block:: shell nova-manage cell_v2 map_cell_and_hosts [--name ] [--transport-url ] [--verbose] Create a cell mapping to the database connection and message queue transport URL, and map hosts to that cell. The database connection comes from the :oslo.config:option:`database.connection` defined in the configuration file passed to nova-manage. If :option:`--transport-url` is not specified, it will use the one defined by :oslo.config:option:`transport_url` in the configuration file. This command is idempotent (can be run multiple times), and the verbose option will print out the resulting cell mapping UUID. .. versionadded:: 13.0.0 (Mitaka) .. rubric:: Options .. option:: --transport-url The transport url for the cell message queue. .. option:: --name The name of the cell. .. option:: --verbose Output the cell mapping uuid for any newly mapped hosts. .. rubric:: Return codes .. list-table:: :widths: 20 80 :header-rows: 1 * - Return code - Description * - 0 - Successful completion. * - 1 - The transport url is missing or invalid cell_v2 verify_instance ----------------------- .. program:: nova-manage cell_v2 verify_instance .. code-block:: shell nova-manage cell_v2 verify_instance --uuid [--quiet] Verify instance mapping to a cell. This command is useful to determine if the cells v2 environment is properly setup, specifically in terms of the cell, host, and instance mapping records required. .. versionadded:: 14.0.0 (Newton) .. rubric:: Options .. option:: --uuid The instance UUID to verify. .. option:: --quiet Do not print anything. .. rubric:: Return codes .. list-table:: :widths: 20 80 :header-rows: 1 * - Return code - Description * - 0 - The instance was successfully mapped to a cell. * - 1 - The instance is not mapped to a cell. See the ``map_instances`` command. * - 2 - The cell mapping is missing. See the ``map_cell_and_hots`` command if you are upgrading from a cells v1 environment, and the ``simple_cell_setup`` command if you are upgrading from a non-cells v1 environment. * - 3 - The instance is a deleted instance that still has an instance mapping. * - 4 - The instance is an archived instance that still has an instance mapping. cell_v2 create_cell ------------------- .. program:: nova-manage cell_v2 create_cell .. code-block:: shell nova-manage cell_v2 create_cell [--name ] [--transport-url ] [--database_connection ] [--verbose] [--disabled] Create a cell mapping to the database connection and message queue transport URL. If a database_connection is not specified, it will use the one defined by :oslo.config:option:`database.connection` in the configuration file passed to nova-manage. If :option:`--transport-url` is not specified, it will use the one defined by :oslo.config:option:`transport_url` in the configuration file. The verbose option will print out the resulting cell mapping UUID. All the cells created are by default enabled. However passing the :option:`--disabled` option can create a pre-disabled cell, meaning no scheduling will happen to this cell. .. versionadded:: 15.0.0 (Ocata) .. versionchanged:: 18.0.0 (Rocky) Added :option:`--disabled` option. .. rubric:: Options .. option:: --name The name of the cell. .. option:: --database_connection The database URL for the cell database. .. option:: --transport-url The transport url for the cell message queue. .. option:: --verbose Output the UUID of the created cell. .. option:: --disabled Create a pre-disabled cell. .. rubric:: Return codes .. list-table:: :widths: 20 80 :header-rows: 1 * - Return code - Description * - 0 - The cell mapping was successfully created. * - 1 - The transport URL or database connection was missing or invalid. * - 2 - Another cell is already using the provided transport URL and/or database connection combination. cell_v2 discover_hosts ---------------------- .. program:: nova-manage cell_v2 discover_hosts .. code-block:: shell nova-manage cell_v2 discover_hosts [--cell_uuid ] [--verbose] [--strict] [--by-service] Searches cells, or a single cell, and maps found hosts. This command will check the database for each cell (or a single one if passed in) and map any hosts which are not currently mapped. If a host is already mapped, nothing will be done. You need to re-run this command each time you add a batch of compute hosts to a cell (otherwise the scheduler will never place instances there and the API will not list the new hosts). If :option:`--strict` is specified, the command will only return 0 if an unmapped host was discovered and mapped successfully. If :option:`--by-service` is specified, this command will look in the appropriate cell(s) for any nova-compute services and ensure there are host mappings for them. This is less efficient and is only necessary when using compute drivers that may manage zero or more actual compute nodes at any given time (currently only ironic). This command should be run once after all compute hosts have been deployed and should not be run in parallel. When run in parallel, the commands will collide with each other trying to map the same hosts in the database at the same time. .. versionadded:: 14.0.0 (Newton) .. versionchanged:: 16.0.0 (Pike) Added :option:`--strict` option. .. versionchanged:: 18.0.0 (Rocky) Added :option:`--by-service` option. .. rubric:: Options .. option:: --cell_uuid If provided only this cell will be searched for new hosts to map. .. option:: --verbose Provide detailed output when discovering hosts. .. option:: --strict Considered successful (exit code 0) only when an unmapped host is discovered. Any other outcome will be considered a failure (non-zero exit code). .. option:: --by-service Discover hosts by service instead of compute node. .. rubric:: Return codes .. list-table:: :widths: 20 80 :header-rows: 1 * - Return code - Description * - 0 - Hosts were successfully mapped or no hosts needed to be mapped. If :option:`--strict` is specified, returns 0 only if an unmapped host was discovered and mapped. * - 1 - If :option:`--strict` is specified and no unmapped hosts were found. Also returns 1 if an exception was raised while running. * - 2 - The command was aborted because of a duplicate host mapping found. This means the command collided with another running ``discover_hosts`` command or scheduler periodic task and is safe to retry. cell_v2 list_cells ------------------ .. program:: nova-manage cell_v2 list_cells .. code-block:: shell nova-manage cell_v2 list_cells [--verbose] By default the cell name, UUID, disabled state, masked transport URL and database connection details are shown. Use the :option:`--verbose` option to see transport URL and database connection with their sensitive details. .. versionadded:: 15.0.0 (Ocata) .. versionchanged:: 18.0.0 (Rocky) Added the ``disabled`` column to output. .. rubric:: Options .. option:: --verbose Show sensitive details, such as passwords. .. rubric:: Return codes .. list-table:: :widths: 20 80 :header-rows: 1 * - Return code - Description * - 0 - Success. cell_v2 delete_cell ------------------- .. program:: nova-manage cell_v2 delete_cell .. code-block:: shell nova-manage cell_v2 delete_cell [--force] --cell_uuid Delete a cell by the given UUID. .. versionadded:: 15.0.0 (Ocata) .. rubric:: Options .. option:: --force Delete hosts and instance_mappings that belong to the cell as well. .. option:: --cell_uuid The UUID of the cell to delete. .. rubric:: Return codes .. list-table:: :widths: 20 80 :header-rows: 1 * - Return code - Description * - 0 - An empty cell was found and deleted successfully or a cell that has hosts was found and the cell, hosts and the instance_mappings were deleted successfully with :option:`--force` option (this happens if there are no living instances). * - 1 - A cell with the provided UUID could not be found. * - 2 - Host mappings were found for the cell, meaning the cell is not empty, and the :option:`--force` option was not provided. * - 3 - There are active instances mapped to the cell (cell not empty). * - 4 - There are (inactive) instances mapped to the cell and the :option:`--force` option was not provided. cell_v2 list_hosts ------------------ .. program:: nova-manage cell_v2 list_hosts .. code-block:: shell nova-manage cell_v2 list_hosts [--cell_uuid ] Lists the hosts in one or all v2 cells. By default hosts in all v2 cells are listed. Use the :option:`--cell_uuid` option to list hosts in a specific cell. .. versionadded:: 17.0.0 (Queens) .. rubric:: Options .. option:: --cell_uuid The UUID of the cell. .. rubric:: Return codes .. list-table:: :widths: 20 80 :header-rows: 1 * - Return code - Description * - 0 - Success. * - 1 - The cell indicated by :option:`--cell_uuid` was not found. cell_v2 update_cell ------------------- .. program:: nova-manage cell_v2 update_cell .. code-block:: shell nova-manage cell_v2 update_cell --cell_uuid [--name ] [--transport-url ] [--database_connection ] [--disable] [--enable] Updates the properties of a cell by the given uuid. If a database_connection is not specified, it will attempt to use the one defined by :oslo.config:option:`database.connection` in the configuration file. If a transport_url is not specified, it will attempt to use the one defined by :oslo.config:option:`transport_url` in the configuration file. .. note:: Updating the ``transport_url`` or ``database_connection`` fields on a running system will NOT result in all nodes immediately using the new values. Use caution when changing these values. The scheduler will not notice that a cell has been enabled/disabled until it is restarted or sent the SIGHUP signal. .. versionadded:: 16.0.0 (Pike) .. versionchanged:: 18.0.0 (Rocky) Added :option:`--enable`, :option:`--disable` options. .. rubric:: Options .. option:: --cell_uuid The UUID of the cell to update. .. option:: --name Set the cell name. .. option:: --transport-url Set the cell ``transport_url``. Note that running nodes will not see the change until restarted or the ``SIGHUP`` signal is sent. .. option:: --database_connection Set the cell ``database_connection``. Note that running nodes will not see the change until restarted or the ``SIGHUP`` signal is sent. .. option:: --disable Disables the cell. Note that the scheduling will be blocked to this cell until it is enabled and the ``nova-scheduler`` service is restarted or the ``SIGHUP`` signal is sent. .. option:: --enable Enables the cell. Note that the ``nova-scheduler`` service will not see the change until it is restarted or the ``SIGHUP`` signal is sent. .. rubric:: Return codes .. list-table:: :widths: 20 80 :header-rows: 1 * - Return code - Description * - 0 - Success. * - 1 - The cell was not found by the provided UUID. * - 2 - The specified properties could not be set. * - 3 - The provided :option:`--transport-url` or/and :option:`--database_connection` parameters were same as another cell. * - 4 - An attempt was made to disable and enable a cell at the same time. * - 5 - An attempt was made to disable or enable cell0. cell_v2 delete_host ------------------- .. program:: nova-manage cell_v2 delete_host .. code-block:: shell nova-manage cell_v2 delete_host --cell_uuid --host Delete a host by the given host name and the given cell UUID. .. versionadded:: 17.0.0 (Queens) .. note:: The scheduler caches host-to-cell mapping information so when deleting a host the scheduler may need to be restarted or sent the SIGHUP signal. .. rubric:: Options .. option:: --cell_uuid The UUID of the cell. .. option:: --host The host to delete. .. rubric:: Return codes .. list-table:: :widths: 20 80 :header-rows: 1 * - Return code - Description * - 0 - The empty host was found and deleted successfully * - 1 - A cell with the specified UUID could not be found. * - 2 - A host with the specified name could not be found * - 3 - The host with the specified name is not in a cell with the specified UUID. * - 4 - The host with the specified name has instances (host not empty). Placement Commands ================== .. _heal_allocations_cli: placement heal_allocations -------------------------- .. program:: nova-manage placement heal_allocations .. code-block:: shell nova-manage placement heal_allocations [--max-count ] [--verbose] [--skip-port-allocations] [--dry-run] [--instance ] [--cell `) but the corresponding allocation is not found then the allocation is created against the network device resource providers according to the resource request of that port. It is possible that the missing allocation cannot be created either due to not having enough resource inventory on the host the instance resides on or because more than one resource provider could fulfill the request. In this case the instance needs to be manually deleted or the port needs to be detached. When nova `supports migrating instances with guaranteed bandwidth ports`__, migration will heal missing allocations for these instances. .. __: https://specs.openstack.org/openstack/nova-specs/specs/train/approved/support-move-ops-with-qos-ports.html Before the allocations for the ports are persisted in placement nova-manage tries to update each port in neutron to refer to the resource provider UUID which provides the requested resources. If any of the port updates fail in neutron or the allocation update fails in placement the command tries to roll back the partial updates to the ports. If the roll back fails then the process stops with exit code ``7`` and the admin needs to do the rollback in neutron manually according to the description in the exit code section. There is also a special case handled for instances that *do* have allocations created before Placement API microversion 1.8 where project_id and user_id values were required. For those types of allocations, the project_id and user_id are updated using the values from the instance. This command requires that the :oslo.config:option:`api_database.connection` and :oslo.config:group:`placement` configuration options are set. Placement API >= 1.28 is required. .. versionadded:: 18.0.0 (Rocky) .. versionchanged:: 20.0.0 (Train) Added :option:`--dry-run`, :option:`--instance`, and :option:`--skip-port-allocations` options. .. versionchanged:: 21.0.0 (Ussuri) Added :option:`--cell` option. .. versionchanged:: 22.0.0 (Victoria) Added :option:`--force` option. .. versionchanged:: 25.0.0 (Yoga) Added support for healing port allocations if port-resource-request-groups neutron API extension is enabled and therefore ports can request multiple group of resources e.g. by using both guaranteed minimum bandwidth and guaranteed minimum packet rate QoS policy rules. .. rubric:: Options .. option:: --max-count Maximum number of instances to process. If not specified, all instances in each cell will be mapped in batches of 50. If you have a large number of instances, consider specifying a custom value and run the command until it exits with 0 or 4. .. option:: --verbose Provide verbose output during execution. .. option:: --dry-run Runs the command and prints output but does not commit any changes. The return code should be 4. .. option:: --instance UUID of a specific instance to process. If specified :option:`--max-count` has no effect. Mutually exclusive with :option:`--cell`. .. option:: --skip-port-allocations Skip the healing of the resource allocations of bound ports. E.g. healing bandwidth resource allocation for ports having minimum QoS policy rules attached. If your deployment does not use such a feature then the performance impact of querying neutron ports for each instance can be avoided with this flag. .. option:: --cell Heal allocations within a specific cell. Mutually exclusive with :option:`--instance`. .. option:: --force Force heal allocations. Requires the :option:`--instance` argument. .. rubric:: Return codes .. list-table:: :widths: 20 80 :header-rows: 1 * - Return code - Description * - 0 - Command completed successfully and allocations were created. * - 1 - :option:`--max-count` was reached and there are more instances to process. * - 2 - Unable to find a compute node record for a given instance. * - 3 - Unable to create (or update) allocations for an instance against its compute node resource provider. * - 4 - Command completed successfully but no allocations were created. * - 5 - Unable to query ports from neutron * - 6 - Unable to update ports in neutron * - 7 - Cannot roll back neutron port updates. Manual steps needed. The error message will indicate which neutron ports need to be changed to clean up ``binding:profile`` of the port:: $ openstack port unset --binding-profile allocation * - 127 - Invalid input. * - 255 - An unexpected error occurred. .. _sync_aggregates_cli: placement sync_aggregates ------------------------- .. program:: nova-manage placement sync_aggregates .. code-block:: shell nova-manage placement sync_aggregates [--verbose] Mirrors compute host aggregates to resource provider aggregates in the Placement service. Requires the :oslo.config:group:`api_database` and :oslo.config:group:`placement` sections of the nova configuration file to be populated. Specify :option:`--verbose` to get detailed progress output during execution. .. note:: Depending on the size of your deployment and the number of compute hosts in aggregates, this command could cause a non-negligible amount of traffic to the placement service and therefore is recommended to be run during maintenance windows. .. versionadded:: 18.0.0 (Rocky) .. rubric:: Options .. option:: --verbose Provide verbose output during execution. .. rubric:: Return codes .. list-table:: :widths: 20 80 :header-rows: 1 * - Return code - Description * - 0 - Successful run * - 1 - A host was found with more than one matching compute node record * - 2 - An unexpected error occurred while working with the placement API * - 3 - Failed updating provider aggregates in placement * - 4 - Host mappings not found for one or more host aggregate members * - 5 - Compute node records not found for one or more hosts * - 6 - Resource provider not found by uuid for a given host * - 255 - An unexpected error occurred. .. _placement_audit_cli: placement audit --------------- .. program:: nova-manage placement audit .. code-block:: shell nova-manage placement audit [--verbose] [--delete] [--resource_provider ] Iterates over all the Resource Providers (or just one if you provide the UUID) and then verifies if the compute allocations are either related to an existing instance or a migration UUID. If not, it will tell which allocations are orphaned. This command requires that the :oslo.config:option:`api_database.connection` and :oslo.config:group:`placement` configuration options are set. Placement API >= 1.14 is required. .. versionadded:: 21.0.0 (Ussuri) .. rubric:: Options .. option:: --verbose Provide verbose output during execution. .. option:: --resource_provider UUID of a specific resource provider to verify. .. option:: --delete Deletes orphaned allocations that were found. .. rubric:: Return codes .. list-table:: :widths: 20 80 :header-rows: 1 * - Return code - Description * - 0 - No orphaned allocations were found * - 1 - An unexpected error occurred * - 3 - Orphaned allocations were found * - 4 - All found orphaned allocations were deleted * - 127 - Invalid input Volume Attachment Commands ========================== volume_attachment get_connector ------------------------------- .. program:: nova-manage volume_attachment get_connector .. code-block:: shell nova-manage volume_attachment get_connector Show the host connector for this compute host. When called with the ``--json`` switch this dumps a JSON string containing the connector information for the current host, which can be saved to a file and used as input for the :program:`nova-manage volume_attachment refresh` command. .. versionadded:: 24.0.0 (Xena) .. rubric:: Return codes .. list-table:: :widths: 20 80 :header-rows: 1 * - Return code - Description * - 0 - Success * - 1 - An unexpected error occurred volume_attachment show ---------------------- .. program:: nova-manage volume_attachment show .. code-block:: shell nova-manage volume_attachment show [INSTANCE_UUID] [VOLUME_ID] Show the details of a the volume attachment between ``VOLUME_ID`` and ``INSTANCE_UUID``. .. versionadded:: 24.0.0 (Xena) .. rubric:: Return codes .. list-table:: :widths: 20 80 :header-rows: 1 * - Return code - Description * - 0 - Success * - 1 - An unexpected error occurred * - 2 - Instance not found * - 3 - Instance is not attached to volume volume_attachment refresh ------------------------- .. program:: nova-manage volume_attachment refresh .. code-block:: shell nova-manage volume_attachment refresh [INSTANCE_UUID] [VOLUME_ID] [CONNECTOR_PATH] Refresh the connection info associated with a given volume attachment. The instance must be attached to the volume, have a ``vm_state`` of ``stopped`` and not be ``locked``. ``CONNECTOR_PATH`` should be the path to a JSON-formatted file containing up to date connector information for the compute currently hosting the instance as generated using the :program:`nova-manage volume_attachment get_connector` command. .. versionadded:: 24.0.0 (Xena) .. rubric:: Return codes .. list-table:: :widths: 20 80 :header-rows: 1 * - Return code - Description * - 0 - Success * - 1 - An unexpected error occurred * - 2 - Connector path does not exist * - 3 - Failed to open connector path * - 4 - Instance does not exist * - 5 - Instance state invalid (must be stopped and unlocked) * - 6 - Volume is not attached to the instance * - 7 - Connector host is not correct Libvirt Commands ================ libvirt get_machine_type ------------------------ .. program:: nova-manage libvirt get_machine_type .. code-block:: shell nova-manage libvirt get_machine_type [INSTANCE_UUID] Fetch and display the recorded machine type of a libvirt instance identified by ``INSTANCE_UUID``. .. versionadded:: 23.0.0 (Wallaby) .. rubric:: Return codes .. list-table:: :widths: 20 80 :header-rows: 1 * - Return code - Description * - 0 - Successfully completed * - 1 - An unexpected error occurred * - 2 - Unable to find instance or instance mapping * - 3 - No machine type found for instance libvirt update_machine_type --------------------------- .. program:: nova-manage libvirt update_machine_type .. code-block:: shell nova-manage libvirt update_machine_type \ [INSTANCE_UUID] [MACHINE_TYPE] [--force] Set or update the recorded machine type of instance ``INSTANCE_UUID`` to machine type ``MACHINE_TYPE``. The following criteria must be met when using this command: * The instance must have a ``vm_state`` of ``STOPPED``, ``SHELVED`` or ``SHELVED_OFFLOADED``. * The machine type must be supported. The supported list includes alias and versioned types of ``pc``, ``pc-i440fx``, ``pc-q35``, ``q35``, ``virt`` or ``s390-ccw-virtio``. * The update will not move the instance between underlying machine types. For example, ``pc`` to ``q35``. * The update will not move the instance between an alias and versioned machine type or vice versa. For example, ``pc`` to ``pc-1.2.3`` or ``pc-1.2.3`` to ``pc``. A ``--force`` flag is provided to skip the above checks but caution should be taken as this could easily lead to the underlying ABI of the instance changing when moving between machine types. .. versionadded:: 23.0.0 (Wallaby) .. rubric:: Options .. option:: --force Skip machine type compatibility checks and force machine type update. .. rubric:: Return codes .. list-table:: :widths: 20 80 :header-rows: 1 * - Return code - Description * - 0 - Update completed successfully * - 1 - An unexpected error occurred * - 2 - Unable to find instance or instance mapping * - 3 - The instance has an invalid ``vm_state`` * - 4 - The proposed update of the machine type is invalid * - 5 - The provided machine type is unsupported libvirt list_unset_machine_type ------------------------------- .. program:: nova-manage libvirt list_unset_machine_type .. code-block:: shell nova-manage libvirt list_unset_machine_type [--cell-uuid ] List the UUID of any instance without ``hw_machine_type`` set. This command is useful for operators attempting to determine when it is safe to change the :oslo.config:option:`libvirt.hw_machine_type` option within an environment. .. versionadded:: 23.0.0 (Wallaby) .. rubric:: Options .. option:: --cell_uuid The UUID of the cell to list instances from. .. rubric:: Return codes .. list-table:: :widths: 20 80 :header-rows: 1 * - Return code - Description * - 0 - Completed successfully, no instances found without ``hw_machine_type`` set * - 1 - An unexpected error occurred * - 2 - Unable to find cell mapping * - 3 - Instances found without ``hw_machine_type`` set Image Property Commands ======================= image_property show ------------------- .. program:: nova-manage image_property show .. code-block:: shell nova-manage image_property show [INSTANCE_UUID] [IMAGE_PROPERTY] Fetch and display the recorded image property ``IMAGE_PROPERTY`` of an instance identified by ``INSTANCE_UUID``. .. versionadded:: 25.0.0 (Yoga) .. rubric:: Return codes .. list-table:: :widths: 20 80 :header-rows: 1 * - Return code - Description * - 0 - Successfully completed * - 1 - An unexpected error occurred * - 2 - Unable to find instance or instance mapping * - 3 - No image property found for instance image_property set ------------------ .. program:: nova-manage image_property set .. code-block:: shell nova-manage image_property set \ [INSTANCE_UUID] [--property] [IMAGE_PROPERTY]=[VALUE] Set or update the recorded image property ``IMAGE_PROPERTY`` of instance ``INSTANCE_UUID`` to value ``VALUE``. The following criteria must be met when using this command: * The instance must have a ``vm_state`` of ``STOPPED``, ``SHELVED`` or ``SHELVED_OFFLOADED``. This command is useful for operators who need to update stored instance image properties that have become invalidated by a change of instance machine type, for example. .. versionadded:: 25.0.0 (Yoga) .. rubric:: Options .. option:: --property Image property to set using the format name=value. For example: ``--property hw_disk_bus=virtio --property hw_cdrom_bus=sata``. .. rubric:: Return codes .. list-table:: :widths: 20 80 :header-rows: 1 * - Return code - Description * - 0 - Update completed successfully * - 1 - An unexpected error occurred * - 2 - Unable to find instance or instance mapping * - 3 - The instance has an invalid ``vm_state`` * - 4 - The provided image property name is invalid * - 5 - The provided image property value is invalid Limits Commands =============== limits migrate_to_unified_limits -------------------------------- .. program:: nova-manage limits migrate_to_unified_limits .. code-block:: shell nova-manage limits migrate_to_unified_limits [--project-id ] [--region-id ] [--verbose] [--dry-run] [--quiet] [--no-embedded-flavor-scan] Migrate quota limits from the Nova database to unified limits in Keystone. This command is useful for operators to migrate from legacy quotas to unified limits. Limits are migrated by copying them from the Nova database to Keystone by creating them using the Keystone API. The Nova configuration file used by ``nova-manage`` must have a :oslo.config:group:`keystone_authtoken` section that contains authentication settings in order for the Keystone API calls to succeed. As an example: .. code-block:: ini [keystone_authtoken] project_domain_name = Default project_name = service user_domain_name = Default username = nova auth_url = http://127.0.0.1/identity auth_type = password password = By default `Keystone policy configuration`_, access to create, update, and delete in the `unified limits API`_ is restricted to callers with the ``admin`` role. You will need to ensure that the user configured under :oslo.config:group:`keystone_authtoken` has the necessary role and scope. .. warning:: The ``limits migrate_to_unified_limits`` command will create limits only for resources that exist in the legacy quota system and any resource that does not have a unified limit in Keystone will use a quota limit of **0**. For resource classes that are allocated by the placement service and have no default limit set, you will need to create default limits manually. The most common example is class:DISK_GB. All Nova API requests that need to allocate DISK_GB will fail quota enforcement until a default limit for it is set in Keystone. See the :doc:`unified limits documentation ` about creating limits using the OpenStackClient. .. _Keystone policy configuration: https://docs.openstack.org/keystone/latest/configuration/policy.html .. _unified limits API: https://docs.openstack.org/api-ref/identity/v3/index.html#unified-limits .. versionadded:: 28.0.0 (2023.2 Bobcat) .. versionchanged:: 31.0.0 (2025.1 Epoxy) Added flavor scanning for resource classes missing limits along with the --quiet and --no-embedded-flavor-scan options. .. rubric:: Options .. option:: --project-id The project ID for which to migrate quota limits. .. option:: --region-id The region ID for which to migrate quota limits. .. option:: --verbose Provide verbose output during execution. .. option:: --dry-run Show what limits would be created without actually creating them. Flavors will still be scanned for resource classes missing limits. .. option:: --quiet Do not output anything during execution. .. option:: --no-embedded-flavor-scan Do not scan instances embedded flavors for resource classes missing limits. .. rubric:: Return codes .. list-table:: :widths: 20 80 :header-rows: 1 * - Return code - Description * - 0 - Command completed successfully * - 1 - An unexpected error occurred * - 2 - Failed to connect to the database * - 3 - Missing registered limits were identified See Also ======== :doc:`nova-policy(1) `, :doc:`nova-status(1) ` Bugs ==== * Nova bugs are managed at `Launchpad `__ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/cli/nova-novncproxy.rst0000664000175000017500000000536200000000000021324 0ustar00zuulzuul00000000000000=============== nova-novncproxy =============== .. program:: nova-novncproxy Synopsis ======== :: nova-novncproxy [...] Description =========== :program:`nova-novncproxy` is a server daemon that serves the Nova noVNC Websocket Proxy service, which provides a websocket proxy that is compatible with OpenStack Nova noVNC consoles. Options ======= .. rubric:: General options .. include:: opts/common.rst .. rubric:: Websockify options .. include:: opts/websockify.rst .. rubric:: VNC options .. option:: --vnc-auth_schemes VNC_AUTH_SCHEMES The authentication schemes to use with the compute node. Control what RFB authentication schemes are permitted for connections between the proxy and the compute host. If multiple schemes are enabled, the first matching scheme will be used, thus the strongest schemes should be listed first. .. option:: --vnc-novncproxy_host VNC_NOVNCPROXY_HOST IP address that the noVNC console proxy should bind to. The VNC proxy is an OpenStack component that enables compute service users to access their instances through VNC clients. noVNC provides VNC support through a websocket-based client. This option sets the private address to which the noVNC console proxy service should bind to. .. option:: --vnc-novncproxy_port VNC_NOVNCPROXY_PORT Port that the noVNC console proxy should bind to. The VNC proxy is an OpenStack component that enables compute service users to access their instances through VNC clients. noVNC provides VNC support through a websocket-based client. This option sets the private port to which the noVNC console proxy service should bind to. .. option:: --vnc-vencrypt_ca_certs VNC_VENCRYPT_CA_CERTS The path to the CA certificate PEM file The fully qualified path to a PEM file containing one or more x509 certificates for the certificate authorities used by the compute node VNC server. .. option:: --vnc-vencrypt_client_cert VNC_VENCRYPT_CLIENT_CERT The path to the client key file (for x509) The fully qualified path to a PEM file containing the x509 certificate which the VNC proxy server presents to the compute node during VNC authentication. .. option:: --vnc-vencrypt_client_key VNC_VENCRYPT_CLIENT_KEY The path to the client certificate PEM file (for x509) The fully qualified path to a PEM file containing the private key which the VNC proxy server presents to the compute node during VNC authentication. Files ===== * ``/etc/nova/nova.conf`` * ``/etc/nova/rootwrap.conf`` * ``/etc/nova/rootwrap.d/`` See Also ======== :doc:`nova-serialproxy(1) `, :doc:`nova-spicehtml5proxy(1) ` Bugs ==== * Nova bugs are managed at `Launchpad `__ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/cli/nova-policy.rst0000664000175000017500000000372400000000000020376 0ustar00zuulzuul00000000000000=========== nova-policy =========== .. program:: nova-policy Synopsis ======== :: nova-policy [...] Description =========== :program:`nova-policy` is a tool that allows for inspection of policy file configuration. It provides a way to identify the actions available for a user. It does not require a running deployment: validation runs against the policy files typically located at ``/etc/nova/policy.yaml`` and in the ``/etc/nova/policy.d`` directory. These paths are configurable via the ``[oslo_config] policy_file`` and ``[oslo_config] policy_dirs`` configuration options, respectively. Options ======= .. rubric:: General options .. include:: opts/common.rst .. rubric:: User options .. option:: --os-roles Defaults to ``$OS_ROLES``. .. option:: --os-tenant-id Defaults to ``$OS_TENANT_ID``. .. option:: --os-user-id Defaults to ``$OS_USER_ID``. Commands ======== policy check ------------ :: nova-policy policy check [-h] [--api-name ] [--target [...] Prints all passing policy rules for the given user. .. rubric:: Options .. option:: --api-name Return only the passing policy rules containing the given API name. If unspecified, all passing policy rules will be returned. .. option:: --target [...] The target(s) against which the policy rule authorization will be tested. The available targets are: ``project_id``, ``user_id``, ``quota_class``, ``availability_zone``, ``instance_id``. When ``instance_id`` is used, the other targets will be overwritten. If unspecified, the given user will be considered as the target. Files ===== * ``/etc/nova/nova.conf`` * ``/etc/nova/policy.yaml`` * ``/etc/nova/policy.d/`` See Also ======== :doc:`nova-manage(1) `, :doc:`nova-status(1) ` Bugs ==== * Nova bugs are managed at `Launchpad `__ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/cli/nova-rootwrap.rst0000664000175000017500000000220000000000000020740 0ustar00zuulzuul00000000000000============= nova-rootwrap ============= .. program:: nova-rootwrap Synopsis ======== :: nova-rootwrap CONFIG_FILE COMMAND Description =========== :program:`nova-rootwrap` is an application that filters which commands nova is allowed to run as another user. To use this, you should set the following in ``nova.conf``:: rootwrap_config=/etc/nova/rootwrap.conf You also need to let the nova user run :program:`nova-rootwrap` as root in ``sudoers``:: nova ALL = (root) NOPASSWD: /usr/bin/nova-rootwrap /etc/nova/rootwrap.conf * To make allowed commands node-specific, your packaging should only install ``{compute,network}.filters`` respectively on compute and network nodes, i.e. :program:`nova-api-wsgi` nodes should not have any of those files installed. .. note:: :program:`nova-rootwrap` is being slowly deprecated and replaced by ``oslo.privsep``, and will eventually be removed. Files ===== * ``/etc/nova/nova.conf`` * ``/etc/nova/rootwrap.conf`` * ``/etc/nova/rootwrap.d/`` See Also ======== :doc:`nova-compute(1) ` Bugs ==== * Nova bugs are managed at `Launchpad `__ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/cli/nova-scheduler.rst0000664000175000017500000000124700000000000021053 0ustar00zuulzuul00000000000000============== nova-scheduler ============== .. program:: nova-scheduler Synopsis ======== :: nova-scheduler [...] Description =========== :program:`nova-scheduler` is a server daemon that serves the Nova Scheduler service, which is responsible for picking a compute node to run a given instance on. Options ======= .. rubric:: General options .. include:: opts/common.rst Files ===== * ``/etc/nova/nova.conf`` * ``/etc/nova/rootwrap.conf`` * ``/etc/nova/rootwrap.d/`` See Also ======== :doc:`nova-compute(1) `, :doc:`nova-conductor(1) ` Bugs ==== * Nova bugs are managed at `Launchpad `__ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/cli/nova-serialproxy.rst0000664000175000017500000000271700000000000021461 0ustar00zuulzuul00000000000000================ nova-serialproxy ================ .. program:: nova-serialproxy Synopsis ======== :: nova-serialproxy [...] Description =========== :program:`nova-serialproxy` is a server daemon that serves the Nova Serial Websocket Proxy service, which provides a websocket proxy that is compatible with OpenStack Nova serial ports. Options ======= .. rubric:: General options .. include:: opts/common.rst .. rubric:: Websockify options .. include:: opts/websockify.rst .. rubric:: Serial options .. option:: --serial_console-serialproxy_host SERIAL_CONSOLE_SERIALPROXY_HOST The IP address which is used by the ``nova-serialproxy`` service to listen for incoming requests. The ``nova-serialproxy`` service listens on this IP address for incoming connection requests to instances which expose serial console. .. option:: --serial_console-serialproxy_port SERIAL_CONSOLE_SERIALPROXY_PORT The port number which is used by the ``nova-serialproxy`` service to listen for incoming requests. The ``nova-serialproxy`` service listens on this port number for incoming connection requests to instances which expose serial console. Files ===== * ``/etc/nova/nova.conf`` * ``/etc/nova/rootwrap.conf`` * ``/etc/nova/rootwrap.d/`` See Also ======== :doc:`nova-novncproxy(1) `, :doc:`nova-spicehtml5proxy(1) ` Bugs ==== * Nova bugs are managed at `Launchpad `__ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/cli/nova-spicehtml5proxy.rst0000664000175000017500000000305100000000000022247 0ustar00zuulzuul00000000000000==================== nova-spicehtml5proxy ==================== .. program:: nova-spicehtml5proxy Synopsis ======== :: nova-spicehtml5proxy [...] Description =========== :program:`nova-spicehtml5proxy` is a server daemon that serves the Nova SPICE HTML5 Websocket Proxy service, which provides a websocket proxy that is compatible with OpenStack Nova SPICE HTML5 consoles. Options ======= .. rubric:: General options .. include:: opts/common.rst .. rubric:: Websockify options .. include:: opts/websockify.rst .. rubric:: Spice options .. option:: --spice-html5proxy_host SPICE_HTML5PROXY_HOST IP address or a hostname on which the ``nova-spicehtml5proxy`` service listens for incoming requests. This option depends on the ``[spice] html5proxy_base_url`` option in ``nova.conf``. The ``nova-spicehtml5proxy`` service must be listening on a host that is accessible from the HTML5 client. .. option:: --spice-html5proxy_port SPICE_HTML5PROXY_PORT Port on which the ``nova-spicehtml5proxy`` service listens for incoming requests. This option depends on the ``[spice] html5proxy_base_url`` option in ``nova.conf``. The ``nova-spicehtml5proxy`` service must be listening on a port that is accessible from the HTML5 client. Files ===== * ``/etc/nova/nova.conf`` * ``/etc/nova/rootwrap.conf`` * ``/etc/nova/rootwrap.d/`` See Also ======== :doc:`nova-novncproxy(1) `, :doc:`nova-serialproxy(1) ` Bugs ==== * Nova bugs are managed at `Launchpad `__ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/cli/nova-status.rst0000664000175000017500000001205100000000000020413 0ustar00zuulzuul00000000000000=========== nova-status =========== .. program:: nova-status Synopsis ======== :: nova-status [ [...]] Description =========== :program:`nova-status` is a tool that provides routines for checking the status of a Nova deployment. Options ======= The standard pattern for executing a :program:`nova-status` command is:: nova-status [] Run without arguments to see a list of available command categories:: nova-status Categories are: * ``upgrade`` Detailed descriptions are below. You can also run with a category argument such as ``upgrade`` to see a list of all commands in that category:: nova-status upgrade These sections describe the available categories and arguments for :program:`nova-status`. Upgrade ~~~~~~~ .. _nova-status-checks: ``nova-status upgrade check`` Performs a release-specific readiness check before restarting services with new code. This command expects to have complete configuration and access to databases and services within a cell. For example, this check may query the Nova API database and one or more cell databases. It may also make requests to other services such as the Placement REST API via the Keystone service catalog. **Return Codes** .. list-table:: :widths: 20 80 :header-rows: 1 * - Return code - Description * - 0 - All upgrade readiness checks passed successfully and there is nothing to do. * - 1 - At least one check encountered an issue and requires further investigation. This is considered a warning but the upgrade may be OK. * - 2 - There was an upgrade status check failure that needs to be investigated. This should be considered something that stops an upgrade. * - 255 - An unexpected error occurred. **History of Checks** **15.0.0 (Ocata)** * Checks are added for cells v2 so ``nova-status upgrade check`` should be run *after* running the ``nova-manage cell_v2 simple_cell_setup`` command. * Checks are added for the Placement API such that there is an endpoint in the Keystone service catalog, the service is running and the check can make a successful request to the endpoint. The command also checks to see that there are compute node resource providers checking in with the Placement service. More information on the Placement service can be found at :placement-doc:`Placement API <>`. **16.0.0 (Pike)** * Checks for the Placement API are modified to require version 1.4, that is needed in Pike and further for nova-scheduler to work correctly. **17.0.0 (Queens)** * Checks for the Placement API are modified to require version 1.17. **18.0.0 (Rocky)** * Checks for the Placement API are modified to require version 1.28. * Checks that ironic instances have had their embedded flavors migrated to use custom resource classes. * Checks for ``nova-osapi_compute`` service versions that are less than 15 across all cell mappings which might cause issues when querying instances depending on how the **nova-api** service is configured. See https://bugs.launchpad.net/nova/+bug/1759316 for details. * Checks that existing instances have been migrated to have a matching request spec in the API DB. **19.0.0 (Stein)** * Checks for the Placement API are modified to require version 1.30. * Checks are added for the **nova-consoleauth** service to warn and provide additional instructions to set **[workarounds]enable_consoleauth = True** while performing a live/rolling upgrade. * The "Resource Providers" upgrade check was removed since the placement service code is being extracted from nova and the related tables are no longer used in the ``nova_api`` database. * The "API Service Version" upgrade check was removed since the corresponding code for that check was removed in Stein. **20.0.0 (Train)** * Checks for the Placement API are modified to require version 1.32. * Checks to ensure block-storage (cinder) API version 3.44 is available in order to support multi-attach volumes. If ``[cinder]/auth_type`` is not configured this is a no-op check. * The "**nova-consoleauth** service" upgrade check was removed since the service was removed in Train. * The ``Request Spec Migration`` check was removed. **21.0.0 (Ussuri)** * Checks for the Placement API are modified to require version 1.35. * Checks for the policy files are not automatically overwritten with new defaults. This check has been dropped in 26.0.0 (Zed) release. **22.0.0 (Victoria)** * Checks for the policy files is not JSON-formatted. **23.0.0 (Wallaby)** * Checks for computes older than the previous major release * Checks for any instances without ``hw_machine_type`` set. **24.0.0 (Xena)** * Checks for the Placement API are modified to require version 1.36. See Also ======== :doc:`nova-manage(1) `, :doc:`nova-policy(1) ` Bugs ==== * Nova bugs are managed at `Launchpad `_ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.2496078 nova-32.0.0/doc/source/cli/opts/0000775000175000017500000000000000000000000016363 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/cli/opts/common.rst0000664000175000017500000000644600000000000020417 0ustar00zuulzuul00000000000000.. option:: --config-dir DIR Path to a config directory to pull `*.conf` files from. This file set is sorted, so as to provide a predictable parse order if individual options are over-ridden. The set is parsed after the file(s) specified via previous --config-file, arguments hence over-ridden options in the directory take precedence. This option must be set from the command-line. .. option:: --config-file PATH Path to a config file to use. Multiple config files can be specified, with values in later files taking precedence. Defaults to None. This option must be set from the command-line. .. option:: --debug, -d Set the logging level to DEBUG instead of the default INFO level. .. option:: --log-config-append PATH, --log-config PATH, --log_config PATH The name of a logging configuration file. This file is appended to any existing logging configuration files. For details about logging configuration files, see the Python logging module documentation. Note that when logging configuration files are used then all logging configuration is set in the configuration file and other logging configuration options are ignored (for example, log-date-format). .. option:: --log-date-format DATE_FORMAT Defines the format string for %(asctime)s in log records. Default: None . This option is ignored if log_config_append is set. .. option:: --log-dir LOG_DIR, --logdir LOG_DIR (Optional) The base directory used for relative log_file paths. This option is ignored if log_config_append is set. .. option:: --log-file PATH, --logfile PATH (Optional) Name of log file to send logging output to. If no default is set, logging will go to stderr as defined by use_stderr. This option is ignored if log_config_append is set. .. option:: --nodebug The inverse of :option:`--debug`. .. option:: --nouse-journal The inverse of :option:`--use-journal`. .. option:: --nouse-json The inverse of :option:`--use-json`. .. option:: --nouse-syslog The inverse of :option:`--use-syslog`. .. option:: --nowatch-log-file The inverse of :option:`--watch-log-file`. .. option:: --syslog-log-facility SYSLOG_LOG_FACILITY Syslog facility to receive log lines. This option is ignored if log_config_append is set. .. option:: --use-journal Enable journald for logging. If running in a systemd environment you may wish to enable journal support. Doing so will use the journal native protocol which includes structured metadata in addition to log messages.This option is ignored if log_config_append is set. .. option:: --use-json Use JSON formatting for logging. This option is ignored if log_config_append is set. .. option:: --use-syslog Use syslog for logging. Existing syslog format is DEPRECATED and will be changed later to honor RFC5424. This option is ignored if log_config_append is set. .. option:: --version Show program's version number and exit .. option:: --watch-log-file Uses logging handler designed to watch file system. When log file is moved or removed this handler will open a new log file with specified path instantaneously. It makes sense only if log_file option is specified and Linux platform is used. This option is ignored if log_config_append is set. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/cli/opts/websockify.rst0000664000175000017500000000154700000000000021271 0ustar00zuulzuul00000000000000.. option:: --cert CERT Path to SSL certificate file. .. option:: --daemon Run as a background process. .. option:: --key KEY SSL key file (if separate from cert). .. option:: --nodaemon The inverse of :option:`--daemon`. .. option:: --nosource_is_ipv6 The inverse of :option:`--source_is_ipv6`. .. option:: --nossl_only The inverse of :option:`--ssl_only`. .. option:: --record RECORD Filename that will be used for storing websocket frames received and sent by a proxy service (like VNC, spice, serial) running on this host. If this is not set, no recording will be done. .. option:: --source_is_ipv6 Set to True if source host is addressed with IPv6. .. option:: --ssl_only Disallow non-encrypted connections. .. option:: --web WEB Path to directory with content which will be served by a web server. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.2496078 nova-32.0.0/doc/source/common/0000775000175000017500000000000000000000000016117 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/common/numa-live-migration-warning.txt0000664000175000017500000000122600000000000024210 0ustar00zuulzuul00000000000000.. important:: In deployments older than Train, or in mixed Stein/Train deployments with a rolling upgrade in progress, unless :oslo.config:option:`specifically enabled `, live migration is not possible for instances with a NUMA topology when using the libvirt driver. A NUMA topology may be specified explicitly or can be added implicitly due to the use of CPU pinning or huge pages. Refer to `bug #1289064`__ for more information. As of Train, live migration of instances with a NUMA topology when using the libvirt driver is fully supported. __ https://bugs.launchpad.net/nova/+bug/1289064 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/conf.py0000664000175000017500000001343700000000000016136 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # nova documentation build configuration file # # Refer to the Sphinx documentation for advice on configuring this file: # # http://www.sphinx-doc.org/en/stable/config.html import os import sys # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('../')) # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.graphviz', 'openstackdocstheme', 'sphinx_feature_classification.support_matrix', 'oslo_config.sphinxconfiggen', 'oslo_config.sphinxext', 'oslo_policy.sphinxpolicygen', 'oslo_policy.sphinxext', 'ext.versioned_notifications', 'ext.feature_matrix', 'ext.extra_specs', 'sphinxcontrib.rsvgconverter', ] config_generator_config_file = '../../etc/nova/nova-config-generator.conf' sample_config_basename = '_static/nova' policy_generator_config_file = [ ('../../etc/nova/nova-policy-generator.conf', '_static/nova'), ] todo_include_todos = True # The master toctree document. master_doc = 'index' # General information about the project. copyright = u'2010-present, OpenStack Foundation' # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'native' # -- Options for man page output ---------------------------------------------- # Grouping the document tree for man pages. # List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual' _man_pages = [ ('nova-api', 'Server for the OpenStack Compute API service.'), ( 'nova-api-metadata', 'Server for the OpenStack Compute metadata API service.', ), ( 'nova-api-os-compute', 'Server for the OpenStack Compute API service.', ), ('nova-compute', 'Server for the OpenStack Compute compute service.'), ('nova-conductor', 'Server for the OpenStack Compute conductor service.'), ('nova-manage', 'Management tool for the OpenStack Compute services.'), ( 'nova-novncproxy', 'Server for the OpenStack Compute VNC console proxy service.' ), ( 'nova-rootwrap', 'Root wrapper daemon for the OpenStack Compute service.', ), ( 'nova-policy', 'Inspect policy configuration for the OpenStack Compute services.', ), ( 'nova-scheduler', 'Server for the OpenStack Compute scheduler service.', ), ( 'nova-serialproxy', 'Server for the OpenStack Compute serial console proxy service.', ), ( 'nova-spicehtml5proxy', 'Server for the OpenStack Compute SPICE console proxy service.', ), ( 'nova-status', 'Inspect configuration status for the OpenStack Compute services.', ), ] man_pages = [ ('cli/%s' % name, name, description, ['openstack@lists.openstack.org'], 1) for name, description in _man_pages] # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. html_theme = 'openstackdocs' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any paths that contain "extra" files, such as .htaccess or # robots.txt. html_extra_path = ['_extra'] # -- Options for LaTeX output ------------------------------------------------- # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', 'doc-nova.tex', u'Nova Documentation', u'OpenStack Foundation', 'manual'), ] # Allow deeper levels of nesting for \begin...\end stanzas latex_elements = { 'maxlistdepth': 10, 'extraclassoptions': 'openany,oneside', 'preamble': r''' \setcounter{tocdepth}{3} \setcounter{secnumdepth}{3} ''', } # Disable use of xindy since that's another binary dependency that's not # available on all platforms latex_use_xindy = False # -- Options for openstackdocstheme ------------------------------------------- # openstackdocstheme options openstackdocs_repo_name = 'openstack/nova' openstackdocs_bug_project = 'nova' openstackdocs_bug_tag = 'doc' openstackdocs_pdf_link = True # keep this ordered to keep mriedem happy # # NOTE(stephenfin): Projects that don't have a release branch, like TripleO and # reno, should not be included here openstackdocs_projects = [ 'ceilometer', 'cinder', 'cyborg', 'glance', 'horizon', 'ironic', 'keystone', 'neutron', 'nova', 'oslo.log', 'oslo.messaging', 'oslo.i18n', 'oslo.versionedobjects', 'placement', 'python-novaclient', 'python-openstackclient', 'watcher', ] # -- Custom extensions -------------------------------------------------------- ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.2536077 nova-32.0.0/doc/source/configuration/0000775000175000017500000000000000000000000017476 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/configuration/config.rst0000664000175000017500000000045400000000000021500 0ustar00zuulzuul00000000000000===================== Configuration Options ===================== The following is an overview of all available configuration options in Nova. .. only:: html For a sample configuration file, refer to :doc:`sample-config`. .. show-options:: :config-file: etc/nova/nova-config-generator.conf ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/configuration/extra-specs.rst0000664000175000017500000001220300000000000022464 0ustar00zuulzuul00000000000000=========== Extra Specs =========== The following is an overview of all extra specs recognized by nova in its default configuration. .. note:: Other services and virt drivers may provide additional extra specs not listed here. In addition, it is possible to register your own extra specs. For more information on the latter, refer to :doc:`/admin/scheduling`. Placement --------- The following extra specs are used during scheduling to modify the request sent to placement. ``resources`` ~~~~~~~~~~~~~ The following extra specs are used to request an amount of the specified resource from placement when scheduling. All extra specs expect an integer value. .. note:: Not all of the resource types listed below are supported by all virt drivers. .. extra-specs:: resources :summary: ``trait`` ~~~~~~~~~ The following extra specs are used to request a specified trait from placement when scheduling. All extra specs expect one of the following values: - ``required`` - ``forbidden`` .. note:: Not all of the traits listed below are supported by all virt drivers. .. extra-specs:: trait :summary: Scheduler Filters ----------------- The following extra specs are specific to various in-tree scheduler filters. ``aggregate_instance_extra_specs`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The following extra specs are used to specify metadata that must be present on the aggregate of a host. If this metadata is not present or does not match the expected value, the aggregate and all hosts within in will be rejected. Requires the ``AggregateInstanceExtraSpecsFilter`` scheduler filter. .. extra-specs:: aggregate_instance_extra_specs ``capabilities`` ~~~~~~~~~~~~~~~~ The following extra specs are used to specify a host capability that must be provided by the host compute service. If this capability is not present or does not match the expected value, the host will be rejected. Requires the ``ComputeCapabilitiesFilter`` scheduler filter. All extra specs expect similar types of values: * ``=`` (equal to or greater than as a number; same as vcpus case) * ``==`` (equal to as a number) * ``!=`` (not equal to as a number) * ``>=`` (greater than or equal to as a number) * ``<=`` (less than or equal to as a number) * ``s==`` (equal to as a string) * ``s!=`` (not equal to as a string) * ``s>=`` (greater than or equal to as a string) * ``s>`` (greater than as a string) * ``s<=`` (less than or equal to as a string) * ``s<`` (less than as a string) * ```` (substring) * ```` (all elements contained in collection) * ```` (find one of these) * A specific value, e.g. ``true``, ``123``, ``testing`` Examples are: ``>= 5``, ``s== 2.1.0``, `` gcc``, `` aes mmx``, and `` fpu gpu`` .. note:: Not all operators will apply to all types of values. For example, the ``==`` operator should not be used for a string value - use ``s==`` instead. .. extra-specs:: capabilities :summary: Virt driver ----------- The following extra specs are used as hints to configure internals of a instance, from the bus used for paravirtualized devices to the amount of a physical device to passthrough to the instance. Most of these are virt driver-specific. ``quota`` ~~~~~~~~~ The following extra specs are used to configure quotas for various paravirtualized devices. Different quotas are supported by different virt drivers, as noted below. .. extra-specs:: quota ``accel`` ~~~~~~~~~ The following extra specs are used to configure attachment of various accelerators to an instance. For more information, refer to :cyborg-doc:`the Cyborg documentation <>`. They are only supported by the libvirt virt driver. .. extra-specs:: accel ``pci_passthrough`` ~~~~~~~~~~~~~~~~~~~ The following extra specs are used to configure passthrough of a host PCI device to an instance. This requires prior host configuration. For more information, refer to :doc:`/admin/pci-passthrough`. They are only supported by the libvirt virt driver. .. extra-specs:: pci_passthrough ``hw`` ~~~~~~ The following extra specs are used to configure various attributes of instances. Some of the extra specs act as feature flags, while others tweak for example the guest-visible CPU topology of the instance. Except where otherwise stated, they are only supported by the libvirt virt driver. .. extra-specs:: hw ``hw_rng`` ~~~~~~~~~~ The following extra specs are used to configure a random number generator for an instance. They are only supported by the libvirt virt driver. .. extra-specs:: hw_rng ``hw_video`` ~~~~~~~~~~~~ The following extra specs are used to configure attributes of the default guest video device. They are only supported by the libvirt virt driver. .. extra-specs:: hw_video ``os`` ~~~~~~ The following extra specs are used to configure secure_boot. They are only supported by the libvirt virt driver. .. extra-specs:: os ``vmware`` ~~~~~~~~~~ The following extra specs are used to configure various attributes of instances when using the VMWare virt driver. They are only supported by the VMWare virt driver. .. extra-specs:: vmware Others (uncategorized) ---------------------- The following extra specs are not part of a group. .. extra-specs:: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/configuration/index.rst0000664000175000017500000001167500000000000021351 0ustar00zuulzuul00000000000000=================== Configuration Guide =================== The static configuration for nova lives in two main files: ``nova.conf`` and ``policy.yaml``. These are described below. For a bigger picture view on configuring nova to solve specific problems, refer to the :doc:`Nova Admin Guide `. Configuration ------------- Nova, like most OpenStack projects, uses INI-style configuration files to configure various services and utilities. This functionality is provided by the `oslo.config`__ project. *oslo.config* supports loading configuration from both individual configuration files and a directory of configuration files. By default, nova will search the below directories for two config files - ``nova.conf`` and ``{prog}.conf``, where ``prog`` corresponds to the name of the service or utility being configured such as :program:`nova-compute` - and two config directories - ``nova.conf.d`` and ``{prog}.conf.d``: - ``${HOME}/.nova`` - ``${HOME}`` - ``/etc/nova`` - ``/etc`` - ``${SNAP_COMMON}/etc/nova/`` - ``${SNAP}/etc/nova/`` Where a matching file is found, all other directories will be skipped. This behavior can be overridden by using the ``--config-file`` and ``--config-dir`` options provided for each executable. More information on how you can use the configuration options to configure services and what configuration options are available can be found below. * :doc:`Configuration Guide `: Detailed configuration guides for various parts of your Nova system. Helpful reference for setting up specific hypervisor backends. * :doc:`Config Reference `: A complete reference of all configuration options available in the ``nova.conf`` file. .. only:: html * :doc:`Sample Config File `: A sample config file with inline documentation. .. # NOTE(mriedem): This is the section where we hide things that we don't # actually want in the table of contents but sphinx build would fail if # they aren't in the toctree somewhere. .. # NOTE(amotoki): toctree needs to be placed at the end of the section to # keep the document structure in the PDF doc. .. toctree:: :hidden: config .. # NOTE(amotoki): Sample files are only available in HTML document. # Inline sample files with literalinclude hit LaTeX processing error # like TeX capacity exceeded and direct links are discouraged in PDF doc. .. only:: html .. toctree:: :hidden: sample-config .. __: https://docs.openstack.org/oslo.config/latest/ Policy ------ Nova, like most OpenStack projects, uses a policy language to restrict permissions on REST API actions. This functionality is provided by the `oslo.policy`__ project. *oslo.policy* supports loading policy configuration from both an individual configuration file, which defaults to ``policy.yaml``, and one or more directories of configuration files, which defaults to ``policy.d``. These must be located in the same directory as the ``nova.conf`` file(s). This behavior can be overridden by setting the :oslo.config:option:`oslo_policy.policy_file` and :oslo.config:option:`oslo_policy.policy_dirs` configuration options. More information on how nova's policy configuration works and about what policies are available can be found below. * :doc:`Policy Concepts `: Starting in the Ussuri release, Nova API policy defines new default roles with system scope capabilities. These new changes improve the security level and manageability of Nova API as they are richer in terms of handling access at system and project level token with 'Read' and 'Write' roles. * :doc:`Policy Reference `: A complete reference of all policy points in nova and what they impact. .. only:: html * :doc:`Sample Policy File `: A sample nova policy file with inline documentation. .. # NOTE(mriedem): This is the section where we hide things that we don't # actually want in the table of contents but sphinx build would fail if # they aren't in the toctree somewhere. .. # NOTE(amotoki): toctree needs to be placed at the end of the section to # keep the document structure in the PDF doc. .. toctree:: :hidden: policy-concepts policy .. # NOTE(amotoki): Sample files are only available in HTML document. # Inline sample files with literalinclude hit LaTeX processing error # like TeX capacity exceeded and direct links are discouraged in PDF doc. .. only:: html .. toctree:: :hidden: sample-policy .. __: https://docs.openstack.org/oslo.policy/latest/ Extra Specs ----------- Nova uses *flavor extra specs* as a way to provide additional information to instances beyond basic information like amount of RAM or disk. This information can range from hints for the scheduler to hypervisor-specific configuration instructions for the instance. * :doc:`Extra Spec Reference `: A complete reference for all extra specs currently recognized and supported by nova. .. toctree:: :hidden: extra-specs ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/configuration/policy-concepts.rst0000664000175000017500000004055200000000000023351 0ustar00zuulzuul00000000000000Understanding Nova Policies =========================== .. warning:: JSON formatted policy file is deprecated since Nova 22.0.0(Victoria). Use YAML formatted file. Use `oslopolicy-convert-json-to-yaml`__ tool to convert the existing JSON to YAML formatted policy file in backward compatible way. .. __: https://docs.openstack.org/oslo.policy/latest/cli/oslopolicy-convert-json-to-yaml.html Nova supports a rich policy system that has evolved significantly over its lifetime. Initially, this took the form of a large, mostly hand-written ``policy.yaml`` file but, starting in the Newton (14.0.0) release, policy defaults have been defined in the codebase, requiring the ``policy.yaml`` file only to override these defaults. In the Ussuri (21.0.0) release, further work was undertaken to address some issues that had been identified: #. No global vs project admin. The ``admin_only`` role is used for the global admin that is able to make almost any change to Nova, and see all details of the Nova system. The rule passes for any user with an admin role, it doesn’t matter which project is used. #. No read-only roles. Since several APIs tend to share a single policy rule for read and write actions, they did not provide the granularity necessary for read-only access roles. #. The ``admin_or_owner`` role did not work as expected. For most APIs with ``admin_or_owner``, the project authentication happened in a separate component than API in Nova that did not honor changes to policy. As a result, policy could not override hard-coded in-project checks. Keystone comes with ``admin``, ``manager``, ``member`` and ``reader`` roles by default. Please refer to :keystone-doc:`this document ` for more information about these new defaults. In addition, keystone supports a new "system scope" concept that makes it easier to protect deployment level resources from project or system level resources. Please refer to :keystone-doc:`this document ` and `system scope specification `_ to understand the scope concept. In the Nova 25.0.0 (Yoga) release, Nova policies implemented the scope concept and default roles provided by keystone (admin, member, and reader). Using common roles from keystone reduces the likelihood of similar, but different, roles implemented across projects or deployments (e.g., a role called ``observer`` versus ``reader`` versus ``auditor``). With the help of the new defaults it is easier to understand who can do what across projects, reduces divergence, and increases interoperability. The below sections explain how these new defaults in the Nova can solve the first two issues mentioned above and extend more functionality to end users in a safe and secure way. More information is provided in the `nova specification `_. Scope ----- OpenStack Keystone supports different scopes in tokens. These are described :keystone-doc:`here `. Token scopes represent the layer of authorization. Policy ``scope_types`` represent the layer of authorization required to access an API. .. note:: The ``scope_type`` of each policy is hardcoded to ``project`` scoped and is not overridable via the policy file. Nova policies have implemented the scope concept by defining the ``scope_type`` for all the policies to ``project`` scoped. It means if user tries to access nova APIs with ``system`` scoped token they will get 403 permission denied error. For example, consider the ``POST /os-server-groups`` API. .. code:: # Create a new server group # POST /os-server-groups # Intended scope(s): project #"os_compute_api:os-server-groups:create": "rule:project_member_api" Policy scope is disabled by default to allow operators to migrate from the old policy enforcement system in a graceful way. This can be enabled by configuring the :oslo.config:option:`oslo_policy.enforce_scope` option to ``True``. .. note:: [oslo_policy] enforce_scope=True Roles ----- You can refer to :keystone-doc:`this ` document to know about all available defaults from Keystone. Along with the ``scope_type`` feature, Nova policy defines new defaults for each policy. .. rubric:: ``reader`` This provides read-only access to the resources. Nova policies are defaulted to below rules: .. code-block:: python policy.RuleDefault( name="project_reader", check_str="role:reader and project_id:%(project_id)s", description="Default rule for Project level read only APIs." ) Using it in policy rule (with admin + reader access): (because we want to keep legacy admin behavior the same we need to give access of reader APIs to admin role too.) .. code-block:: python policy.DocumentedRuleDefault( name='os_compute_api:servers:show', check_str='role:admin or (' + 'role:reader and project_id:%(project_id)s)', description="Show a server", operations=[ { 'method': 'GET', 'path': '/servers/{server_id}' } ], scope_types=['project'], ) OR .. code-block:: python policy.RuleDefault( name="admin_api", check_str="role:admin", description="Default rule for administrative APIs." ) policy.DocumentedRuleDefault( name='os_compute_api:servers:show', check_str='rule: admin or rule:project_reader', description='Show a server', operations=[ { 'method': 'GET', 'path': '/servers/{server_id}' } ], scope_types=['project'], ) .. rubric:: ``member`` project-member is denoted by someone with the member role on a project. It is intended to be used by end users who consume resources within a project which requires higher permission than reader role but less than admin role. It inherits all the permissions of a project-reader. project-member persona in the policy check string: .. code-block:: python policy.RuleDefault( name="project_member", check_str="role:member and project_id:%(project_id)s", description="Default rule for Project level non admin APIs." ) Using it in policy rule (with admin + member access): (because we want to keep legacy admin behavior, admin role gets access to the project level member APIs.) .. code-block:: python policy.DocumentedRuleDefault( name='os_compute_api:servers:create', check_str='role:admin or (' + 'role:member and project_id:%(project_id)s)', description='Create a server', operations=[ { 'method': 'POST', 'path': '/servers' } ], scope_types=['project'], ) OR .. code-block:: python policy.RuleDefault( name="admin_api", check_str="role:admin", description="Default rule for administrative APIs." ) policy.DocumentedRuleDefault( name='os_compute_api:servers:create', check_str='rule_admin or rule:project_member', description='Create a server', operations=[ { 'method': 'POST', 'path': '/servers' } ], scope_types=['project'], ) 'project_id:%(project_id)s' in the check_str is important to restrict the access within the requested project. .. rubric:: ``manager`` ``project_manager`` is denoted by someone with the manager role on a project. It is intended to be used in project-level management APIs and perform more privileged operations on its project resources than ``project_member``. It inherits all the permissions of a ``project_member`` and ``project_reader``. For example, a ``project_manager`` can migrate (cold or live) their servers without specifying the host. Further, a ``project_manager`` will be able to list migrations related to their own project. ``project_manager`` persona in Nova policy rule (it is defined as ``project_manager_api`` in policy yaml) looks like: 'project_id:%(project_id)s' in the check_str is important to restrict the access within the requested project. .. code-block:: yaml # Default rule for Project level management APIs. "project_manager_api": "role:manager and project_id:%(project_id)s" To keep the legacy ``admin`` behavior unchanged, Nova allow ``admin`` also to access the project level management APIs: .. code-block:: yaml # Default rule for Project level management APIs. "project_manager_or_admin": "rule:project_manager_api or rule:context_is_admin" The above base rule are used for specific API access: .. code-block:: yaml # Cold migrate a server without specifying a host # POST /servers/{server_id}/action (migrate) # Intended scope(s): project "os_compute_api:os-migrate-server:migrate": "rule:project_manager_or_admin" .. rubric:: ``admin`` This role is to perform the admin level write operations. Nova policies are defaulted to below rules: .. code-block:: python policy.DocumentedRuleDefault( name='os_compute_api:os-hypervisors:list', check_str='role:admin', scope_types=['project'] ) With these new defaults, you can solve the problem of: #. Providing the read-only access to the user. Polices are made more granular and defaulted to reader rules. For example: If you need to let someone audit your deployment for security purposes. #. Customize the policy in better way. For example, you will be able to provide access to project level user to perform operations within their project only. .. rubric:: ``service`` The ``service`` role is a special role in Keystone, which is used for the internal service-to-service communication. It is assigned to service users i.e. nova or neutron which model the OpenStack services. Nova defaults its service-to-service APIs to require the ``service`` role so that they cannot be used by any non-service users. Allowing access to service-to-service APIs to non-service users can be destructive to resources and leave the deployment in an invalid state. It's advisable to audit the ``policy.yaml`` files and keystone users to make sure those APIs are not allowed to any non-service users and the service role is not granted to human admin accounts. .. note:: Make sure the configured nova service user in other services has the ``service`` role otherwise communication from the other services to Nova will fail. For example, user configured as ``username`` option in ``neutron.conf`` file under ``[nova]`` section has the ``service`` role. Nova supported scope & Roles ----------------------------- Nova supports the below combination of scopes and roles where roles can be overridden in the policy.yaml file but scope is not override-able. #. ADMIN: ``admin`` role on ``project`` scope. This is an administrator to perform the admin level operations. Example: enable/disable compute service, Live migrate server etc. #. PROJECT_MANAGER: ``manager`` role on ``project`` scope. This is used to perform project management operations within project. For example: migrate a server. #. PROJECT_MEMBER: ``member`` role on ``project`` scope. This is used to perform resource owner level operation within project. For example: Pause a server. #. PROJECT_READER: ``reader`` role on ``project`` scope. This is used to perform read-only operation within project. For example: Get server. #. PROJECT_MANAGER_OR_ADMIN: ``admin`` or ``manager`` role on ``project`` scope. Such policy rules are default to project management level APIs and along with ``manager`` role, legacy admin can continue to access those APIs. #. PROJECT_MEMBER_OR_ADMIN: ``admin`` or ``member`` role on ``project`` scope. Such policy rules are default to most of the owner level APIs and align with ``member`` role legacy admin can continue to access those APIs. #. PROJECT_READER_OR_ADMIN: ``admin`` or ``reader`` role on ``project`` scope. Such policy rules are default to most of the read only APIs so that legacy admin can continue to access those APIs. #. SERVICE_ROLE (Internal): ``service`` role on service users with ``project`` scope. Such policy rules are default to the service-to-service APIs (The APIs only meant to be called by the OpenStack services). Backward Compatibility ---------------------- Backward compatibility with versions prior to 21.0.0 (Ussuri) is maintained by supporting the old defaults and disabling the ``scope_type`` feature by default. This means the old defaults and deployments that use them will keep working as-is. However, we encourage every deployment to switch to the new policy. The new defaults will be enabled by default in OpenStack 2023.1 (Nova 27.0.0) release and old defaults will be removed starting in the OpenStack 2023.2 (Nova 28.0.0) release. To implement the new default reader roles, some policies needed to become granular. They have been renamed, with the old names still supported for backwards compatibility. Migration Plan -------------- To have a graceful migration, Nova provides two flags to switch to the new policy completely. You do not need to overwrite the policy file to adopt the new policy defaults. Here is step wise guide for migration: #. Create scoped token: You need to create the new token with scope knowledge via below CLI: - :keystone-doc:`Create Project Scoped Token `. #. Create new default roles in keystone if not done: If you do not have new defaults in Keystone then you can create and re-run the :keystone-doc:`Keystone Bootstrap `. Keystone added this support in 14.0.0 (Rocky) release. #. Enable Scope Checks The :oslo.config:option:`oslo_policy.enforce_scope` flag is to enable the ``scope_type`` features. The scope of the token used in the request is always compared to the ``scope_type`` of the policy. If the scopes do not match, one of two things can happen. If :oslo.config:option:`oslo_policy.enforce_scope` is True, the request will be rejected. If :oslo.config:option:`oslo_policy.enforce_scope` is False, an warning will be logged, but the request will be accepted (assuming the rest of the policy passes). The default value of this flag is False. #. Enable new defaults The :oslo.config:option:`oslo_policy.enforce_new_defaults` flag switches the policy to new defaults-only. This flag controls whether or not to use old deprecated defaults when evaluating policies. If True, the old deprecated defaults are not evaluated. This means if any existing token is allowed for old defaults but is disallowed for new defaults, it will be rejected. The default value of this flag is False. .. note:: Before you enable this flag, you need to educate users about the different roles they need to use to continue using Nova APIs. #. Check for deprecated policies A few policies were made more granular to implement the reader roles. New policy names are available to use. If old policy names which are renamed are overwritten in policy file, then warning will be logged. Please migrate those policies to new policy names. NOTE:: We recommend to enable the both scope as well new defaults together otherwise you may experience some late failures with unclear error messages. For example, if you enable new defaults and disable scope check then it will allow system users to access the APIs but fail later due to the project check which can be difficult to debug. Below table show how legacy rules are mapped to new rules: .. list-table:: :widths: 25 45 15 15 :header-rows: 1 * - Legacy Rule - New Rule - Operation - Scope * - RULE_ADMIN_API - ADMIN - Global resource Write & Read - project * - RULE_ADMIN_API - PROJECT_MANAGER_OR_ADMIN - Project management level - project * - RULE_ADMIN_OR_OWNER - PROJECT_MEMBER_OR_ADMIN - Project resource write - project * - RULE_ADMIN_OR_OWNER - PROJECT_READER_OR_ADMIN - Project resource read - project We expect all deployments to migrate to the new policy by OpenStack 2023.1 (Nova 27.0.0) release (``project_manager`` role is available from Nova 32.0.0) so that we can remove the support of old policies. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/configuration/policy.rst0000664000175000017500000000114300000000000021526 0ustar00zuulzuul00000000000000============= Nova Policies ============= The following is an overview of all available policies in Nova. .. warning:: JSON formatted policy file is deprecated since Nova 22.0.0(Victoria). Use YAML formatted file. Use `oslopolicy-convert-json-to-yaml`__ tool to convert the existing JSON to YAML formatted policy file in backward compatible way. .. __: https://docs.openstack.org/oslo.policy/latest/cli/oslopolicy-convert-json-to-yaml.html .. only:: html For a sample configuration file, refer to :doc:`sample-policy`. .. show-policy:: :config-file: etc/nova/nova-policy-generator.conf ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/configuration/sample-config.rst0000664000175000017500000000112300000000000022751 0ustar00zuulzuul00000000000000========================= Sample Configuration File ========================= The following is a sample nova configuration for adaptation and use. For a detailed overview of all available configuration options, refer to :doc:`/configuration/config`. The sample configuration can also be viewed in :download:`file form `. .. important:: The sample configuration file is auto-generated from nova when this documentation is built. You must ensure your version of nova matches the version of this documentation. .. literalinclude:: /_static/nova.conf.sample ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/configuration/sample-policy.rst0000664000175000017500000000074400000000000023013 0ustar00zuulzuul00000000000000======================= Sample Nova Policy File ======================= The following is a sample nova policy file for adaptation and use. The sample policy can also be viewed in :download:`file form `. .. important:: The sample policy file is auto-generated from nova when this documentation is built. You must ensure your version of nova matches the version of this documentation. .. literalinclude:: /_static/nova.policy.yaml.sample ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.2576077 nova-32.0.0/doc/source/contributor/0000775000175000017500000000000000000000000017201 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/contributor/api-ref-guideline.rst0000664000175000017500000002610300000000000023223 0ustar00zuulzuul00000000000000======================= API reference guideline ======================= The API reference should be updated when compute APIs are modified (microversion is bumped, etc.). This page describes the guideline for updating the API reference. API reference ============= * `Compute API reference `_ The guideline to write the API reference ======================================== The API reference consists of the following files. Compute API reference --------------------- * API reference text: ``api-ref/source/*.inc`` * Parameter definition: ``api-ref/source/parameters.yaml`` * JSON request/response samples: ``doc/api_samples/*`` Structure of inc file --------------------- Each REST API is described in the text file (\*.inc). The structure of inc file is as follows: - Title (Resource name) - Introductory text and context The introductory text and the context for the resource in question should be added. This might include links to the API Concept guide, or building other supporting documents to explain a concept (like versioning). - API Name - REST Method - URL - Description See the `Description`_ section for more details. - Response codes - Request - Parameters - JSON request body example (if exists) - Response - Parameters - JSON response body example (if exists) - API Name (Next) - ... REST Method ----------- The guideline for describing HTTP methods is described in this section. All supported methods by resource should be listed in the API reference. The order of methods ~~~~~~~~~~~~~~~~~~~~ Methods have to be sorted by each URI in the following order: 1. GET 2. POST 3. PUT 4. PATCH (unused by Nova) 5. DELETE And sorted from broadest to narrowest. So for /severs it would be: 1. GET /servers 2. POST /servers 3. GET /servers/details 4. GET /servers/{server_id} 5. PUT /servers/{server_id} 6. DELETE /servers/{server_id} Method titles spelling and case ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The spelling and the case of method names in the title have to match what is in the code. For instance, the title for the section on method "Get VNC Console" should be "Get VNC Console (os-getVNCConsole Action)" NOT "Get VNC Console (Os-Getvncconsole Action)" Description ----------- The following items should be described in each API. Or links to the pages describing them should be added. * The purpose of the API(s) - e.g. Lists, creates, shows details for, updates, and deletes servers. - e.g. Creates a server. * Microversion - Deprecated - Warning - Microversion to start deprecation - Alternatives (superseded ways) and their links (if document is available) - Added - Microversion in which the API has been added - Changed behavior - Microversion to change behavior - Explanation of the behavior - Changed HTTP response codes - Microversion to change the response code - Explanation of the response code * Warning if direct use is not recommended - e.g. This is an admin level service API only designed to be used by other OpenStack services. The point of this API is to coordinate between Nova and Neutron, Nova and Cinder (and potentially future services) on activities they both need to be involved in, such as network hotplugging. Unless you are writing Neutron or Cinder code you should not be using this API. * Explanation about statuses of resource in question - e.g. The server status. - ``ACTIVE``. The server is active. * Supplementary explanation for parameters - Examples of query parameters - Parameters that are not specified at the same time - Values that cannot be specified. - e.g. A destination host is the same host. * Behavior - Config options to change the behavior and the effect - Effect to resource status - Ephemeral disks, attached volumes, attached network ports and others - Data loss or preserve contents - Scheduler - Whether the scheduler choose a destination host or not * Sort order of response results - Describe sorting order of response results if the API implements the order (e.g. The response is sorted by ``created_at`` and ``id`` in descending order by default) * Policy - Default policy (the admin only, the admin or the owner) - How to change the policy * Preconditions - Server status - Task state - Policy for locked servers - Quota - Limited support - e.g. Only qcow2 is supported - Compute driver support - If very few compute drivers support the operation, add a warning and a link to the support matrix of virt driver. - Cases that are not supported - e.g. A volume-backed server * Postconditions - If the operation is asynchronous, it should be "Asynchronous postconditions". - Describe what status/state resource in question becomes by the operation - Server status - Task state - Path of output file * Troubleshooting - e.g. If the server status remains ``BUILDING`` or shows another error status, the request failed. Ensure you meet the preconditions then investigate the compute node. * Related operations - Operations to be paired - e.g. Start and stop - Subsequent operation - e.g. "Confirm resize" after "Resize" operation * Performance - e.g. The progress of this operation depends on the location of the requested image, network I/O, host load, selected flavor, and other factors. * Progress - How to get progress of the operation if the operation is asynchronous. * Restrictions - Range that ID is unique - e.g. HostId is unique per account and is not globally unique. * How to avoid errors - e.g. The server to get console log from should set ``export LC_ALL=en_US.UTF-8`` in order to avoid incorrect unicode error. * Reference - Links to the API Concept guide, or building other supporting documents to explain a concept (like versioning). * Other notices Response codes ~~~~~~~~~~~~~~ The normal response codes (20x) and error response codes have to be listed. The order of response codes should be in ascending order. The description of typical error response codes are as follows: .. list-table:: Error response codes :header-rows: 1 * - Response codes - Description * - 400 - badRequest(400) * - 401 - unauthorized(401) * - 403 - forbidden(403) * - 404 - itemNotFound(404) * - 409 - conflict(409) * - 410 - gone(410) * - 501 - notImplemented(501) * - 503 - serviceUnavailable(503) In addition, the following explanations should be described. - Conditions under which each normal response code is returned (If there are multiple normal response codes.) - Conditions under which each error response code is returned Parameters ---------- Parameters need to be defined by 2 subsections. The one is in the 'Request' subsection, the other is in the 'Response' subsection. The queries, request headers and attributes go in the 'Request' subsection and response headers and attributes go in the 'Response' subsection. The order of parameters in each API ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The request and response parameters have to be listed in the following order in each API in the text file. 1. Header 2. Path 3. Query 4. Body a. Top level object (i.e. server) b. Required fields c. Optional fields d. Parameters added in microversions (by the microversion they were added) Parameter type ~~~~~~~~~~~~~~ The parameters are defined in the parameter file (``parameters.yaml``). The type of parameters have to be one of following: * ``array`` It is a list. * ``boolean`` * ``float`` * ``integer`` * ``none`` The value is always ``null`` in a response or should be ``null`` in a request. * ``object`` The value is dict. * ``string`` If the value can be specified by multiple types, specify one type in the file and mention the other types in the description. Required or optional ~~~~~~~~~~~~~~~~~~~~ In the parameter file, define the ``required`` field in each parameter. .. list-table:: :widths: 15 85 * - ``true`` - The parameter must be specified in the request, or the parameter always appears in the response. * - ``false`` - It is not always necessary to specify the parameter in the request, or the parameter does not appear in the response in some cases. e.g. A config option defines whether the parameter appears in the response or not. A parameter appears when administrators call but does not appear when non-admin users call. If a parameter must be specified in the request or always appears in the response in the microversion added or later, the parameter must be defined as required (``true``). Microversion ~~~~~~~~~~~~ If a parameter is available starting from a microversion, the microversion must be described by ``min_version`` in the parameter file. However, if the minimum microversion is the same as a microversion that the API itself is added, it is not necessary to describe the microversion. For example:: aggregate_uuid: description: | The UUID of the host aggregate. in: body required: true type: string min_version: 2.41 This example describes that ``aggregate_uuid`` is available starting from microversion 2.41. If a parameter is available up to a microversion, the microversion must be described by ``max_version`` in the parameter file. For example:: security_group_rules: description: | The number of allowed rules for each security group. in: body required: false type: integer max_version: 2.35 This example describes that ``security_group_rules`` is available up to microversion 2.35 (and has been removed since microversion 2.36). The order of parameters in the parameter file ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The order of parameters in the parameter file has to be kept as follows: 1. By in type a. Header b. Path c. Query d. Body 2. Then alphabetical by name Example ------- One or more examples should be provided for operations whose request and/or response contains a payload. The example should describe what the operation is attempting to do and provide a sample payload for the request and/or response as appropriate. Sample files should be created in the ``doc/api_samples`` directory and inlined by inclusion. When an operation has no payload in the response, a suitable message should be included. For example:: There is no body content for the response of a successful DELETE query. Examples for multiple microversions should be included in ascending microversion order. Reference ========= * `Verifying the Nova API Ref `_ * `The description for Parameters whose values are null `_ * `The definition of "Optional" parameter `_ * `How to document your OpenStack API service `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/contributor/api.rst0000664000175000017500000003250400000000000020510 0ustar00zuulzuul00000000000000Extending the API ================= .. note:: This document provides general background information on how one can extend the REST API in nova. For information on the microversion support including how to add new microversions, see :doc:`/contributor/microversions`. For information on how to use the API, refer to the `API guide`__ and `API reference guide`__. .. __: https://docs.openstack.org/api-guide/compute/ .. __: https://docs.openstack.org/api-ref/compute/ Nova's API is a mostly RESTful API. REST stands for *Representational State Transfer* and provides an architecture "style" for distributed systems using HTTP for transport. Figure out a way to express your request and response in terms of resources that are being created, modified, read, or destroyed. Nova has v2.1 API frameworks which supports microversions. This document covers how to add API for the v2.1 API framework. A :doc:`microversions-specific document ` covers the details around what is required for the microversions part. The v2.1 API framework is under ``nova/api`` and each API is implemented in ``nova/api/openstack/compute``. .. note:: Any change to the Nova API to be merged will first require a spec be approved first. See `here `_ for the appropriate repository. For guidance on the design of the API please refer to the `OpenStack API WG `_ Basic API Controller -------------------- API controller includes the implementation of API methods for a resource. A very basic controller of a v2.1 API: .. code-block:: python """Basic Controller""" from nova.api.openstack.compute.schemas import xyz from nova.api.openstack import extensions from nova.api.openstack import wsgi from nova.api import validation class BasicController(wsgi.Controller): # Define support for GET on a collection def index(self, req): data = {'param': 'val'} return data # Define support for POST on a collection @extensions.expected_errors((400, 409)) @validation.schema(xyz.create) @wsgi.response(201) def create(self, req, body): write_body_here = ok return response_body # Defining support for other RESTFul methods based on resource. # ... See `servers.py `_ for ref. All of the controller modules should live in the ``nova/api/openstack/compute`` directory. URL Mapping to API ~~~~~~~~~~~~~~~~~~ The URL mapping is based on the plain list which routes the API request to appropriate controller and method. Each API needs to add its route information in ``nova/api/openstack/compute/routes.py``. A basic skeleton of URL mapping in ``routers.py``: .. code-block:: python """URL Mapping Router List""" import functools import nova.api.openstack from nova.api.openstack.compute import basic_api # Create a controller object basic_controller = functools.partial( _create_controller, basic_api.BasicController, [], [], ) # Routing list structure: # ( # ('Route path': { # 'HTTP method: [ # 'Controller', # 'The method of controller is used to handle this route' # ], # ... # }), # ... # ) ROUTE_LIST = ( # ... ('/basic', { 'GET': [basic_controller, 'index'], 'POST': [basic_controller, 'create'] }), # ... ) Complete routing list can be found in `routes.py `_. Policy ~~~~~~ For more info about policy, see :doc:`policies `, Also look at the ``context.can(...)`` call in existing API controllers. Modularity ~~~~~~~~~~ The Nova REST API is separated into different controllers in the directory ``nova/api/openstack/compute/``. Because microversions are supported in the Nova REST API, the API can be extended without any new controller. But for code readability, the Nova REST API code still needs modularity. Here are rules for how to separate modules: * You are adding a new resource The new resource should be in standalone module. There isn't any reason to put different resources in a single module. * Add sub-resource for existing resource To prevent an existing resource module becoming over-inflated, the sub-resource should be implemented in a separate module. * Add extended attributes for existing resource In normally, the extended attributes is part of existing resource's data model too. So this can be added into existing resource module directly and lightly. To avoid namespace complexity, we should avoid to add extended attributes in existing extended models. New extended attributes needn't any namespace prefix anymore. Validation ~~~~~~~~~~ .. versionchanged:: 2024.2 (Dalmatian) Added response body schema validation, to complement request body and request query string validation. The v2.1 API uses JSON Schema for validation. Before 2024.2 (Dalmatian), this was used for request body and request query string validation. Since then, it is also used for response body validation. The request body and query string validation is used at runtime and is non-optional, while the response body validation is only used for test and documentation purposes and should not be enabled at runtime. Validation is added for a given resource and method using decorators provided in ``nova.api.validation`` while the schemas themselves can be found in ``nova.api.openstack.compute.schemas``. We use unit tests to ensure all method are decorated correctly and schemas are defined. The decorators accept optional ``min_version`` and ``max_version`` arguments, allowing you to define different schemas for different microversions. For example: .. code-block:: python @validation.schema(schema.update, '2.1', '2.77') @validation.schema(schema.update_v278, '2.78') @validation.query_schema(schema.update_query) @validation.response_body_schema(schema.update_response, '2.1', '2.6') @validation.response_body_schema(schema.update_response_v27, '2.7', '2.77') @validation.response_body_schema(schema.update_response_v278, '2.78') def update(self, req, id, body): ... In addition to the JSON Schema validation decorator, we provide decorators for indicating expected response and error codes, indicating removed APIs, and indicating "action" APIs. These can be found in ``nova.api.openstack.wsgi``. For example, to indicate that an API can return ``400 (Bad Request)``, ``404 (Not Found)``, or ``409 (Conflict)`` error codes but returns ``200 (OK)`` in the success case: .. code-block:: python @wsgi.expected_errors((400, 404, 409)) @wsgi.response(201) def update(self, req, id, body): ... To define a new action, ``foo``, for a given resource: .. code-block:: python @wsgi.action('foo') def update(self, req, id, body): ... To indicate that an API has been removed and will now return ``410 (Gone)`` for all requests: .. code-block:: python @wsgi.removed('30.0.0', 'This API was removed because...') def update(self, req, id, body): ... Unit Tests ---------- Unit tests for the API can be found under path ``nova/tests/unit/api/openstack/compute/``. Unit tests for the API are generally negative scenario tests, because the positive scenarios are tested with functional API samples tests. Negative tests would include such things as: * Request schema validation failures, for both the request body and query parameters * ``HTTPNotFound`` or other >=400 response code failures Functional tests and API Samples -------------------------------- All functional API changes, including new microversions - especially if there are new request or response parameters, should have new functional API samples tests. The API samples tests are made of two parts: * The API sample for the reference docs. These are found under path ``doc/api_samples/``. There is typically one directory per API controller with subdirectories per microversion for that API controller. The unversioned samples are used for the base v2.0 / v2.1 APIs. * Corresponding API sample templates found under path ``nova/tests/functional/api_sample_tests/api_samples``. These have a similar structure to the API reference docs samples, except the format of the sample can include substitution variables filled in by the tests where necessary, for example, to substitute things that change per test run, like a server UUID. The actual functional tests are found under path ``nova/tests/functional/api_sample_tests/``. Most, if not all, API samples tests extend the ``ApiSampleTestBaseV21`` class which extends ``ApiSampleTestBase``. These base classes provide the framework for making a request using an API reference doc sample and validating the response using the corresponding template file, along with any variable substitutions that need to be made. Note that it is possible to automatically generate the API reference doc samples using the templates by simply running the tests using ``tox -e api-samples``. This relies, of course, upon the test and templates being correct for the test to pass, which may take some iteration. In general, if you are adding a new microversion to an existing API controller, it is easiest to simply copy an existing test and modify it for the new microversion and the new samples/templates. The functional API samples tests are not the simplest thing in the world to get used to and it can be very frustrating at times when they fail in not obvious ways. If you need help debugging a functional API sample test failure, feel free to post your work-in-progress change for review and ask for help in the ``openstack-nova`` OFTC IRC channel. Documentation ------------- All API changes must also include updates to the compute API reference, which can be found under path ``api-ref/source/``. Things to consider here include: * Adding new request and/or response parameters with a new microversion * Marking existing parameters as deprecated in a new microversion More information on the compute API reference format and conventions can be found in the :doc:`/contributor/api-ref-guideline`. For more detailed documentation of certain aspects of the API, consider writing something into the compute API guide found under path ``api-guide/source/``. Deprecating APIs ---------------- Compute REST API routes may be deprecated by capping a method or functionality using microversions. For example, the :ref:`2.36 microversion <2.36 microversion>` deprecated several compute REST API routes which only worked when using the since-removed ``nova-network`` service or are proxies to other external services like cinder, neutron, etc. The point of deprecating with microversions is users can still get the same functionality at a lower microversion but there is at least some way to signal to users that they should stop using the REST API. The general steps for deprecating a REST API are: * Set a maximum allowed microversion for the route. Requests beyond that microversion on that route will result in a ``404 (Not Found)`` error. * Update the Compute API reference documentation to indicate the route is deprecated and move it to the bottom of the list with the other deprecated APIs. * Deprecate, and eventually remove, related CLI / SDK functionality in other projects like *python-novaclient*. Removing deprecated APIs ------------------------ Nova tries to maintain backward compatibility with all REST APIs as much as possible, but when enough time has lapsed, there are few (if any) users or there are supported alternatives, the underlying service code that supports a deprecated REST API, like in the case of ``nova-network``, is removed and the REST API must also be effectively removed. The general steps for removing support for a deprecated REST API are: * The `route mapping`_ will remain but all methods will return a ``410 (Gone)`` error response. This is slightly different to the ``404 (Not Found)`` error response a user will get for trying to use a microversion that does not support a deprecated API. 410 means the resource is gone and not coming back, which is more appropriate when the API is fully removed and will not work at any microversion. * Related configuration options, policy rules, and schema validation are removed. * The API reference documentation should be updated to move the documentation for the removed API to the `Obsolete APIs`_ section and mention in which release the API was removed. * Unit tests can be removed. * API sample functional tests can be changed to assert the 410 response behavior, but can otherwise be mostly gutted. Related \*.tpl files for the API sample functional tests can be deleted since they will not be used. * An "upgrade" :doc:`release note ` should be added to mention the REST API routes that were removed along with any related configuration options that were also removed. Here is an example of the above steps: https://review.opendev.org/567682/ .. _route mapping: https://opendev.org/openstack/nova/src/branch/master/nova/api/openstack/compute/routes.py .. _Obsolete APIs: https://docs.openstack.org/api-ref/compute/#obsolete-apis ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/contributor/blueprints.rst0000664000175000017500000000570700000000000022133 0ustar00zuulzuul00000000000000================================== Blueprints, Specs and Priorities ================================== Like most OpenStack projects, Nova uses `blueprints`_ and specifications (specs) to track new features, but not all blueprints require a spec. This document covers when a spec is needed. .. note:: Nova's specs live at: `specs.openstack.org`_ .. _`blueprints`: http://docs.openstack.org/infra/manual/developers.html#working-on-specifications-and-blueprints .. _`specs.openstack.org`: http://specs.openstack.org/openstack/nova-specs/ Specs ===== A spec is needed for any feature that requires a design discussion. All features need a blueprint but not all blueprints require a spec. If a new feature is straightforward enough that it doesn't need any design discussion, then no spec is required. In order to provide the sort of documentation that would otherwise be provided via a spec, the commit message should include a ``DocImpact`` flag and a thorough description of the feature from a user/operator perspective. Guidelines for when a feature doesn't need a spec. * Is the feature a single self contained change? * If the feature touches code all over the place, it probably should have a design discussion. * If the feature is big enough that it needs more than one commit, it probably should have a design discussion. * Not an API change. * API changes always require a design discussion. When a blueprint does not require a spec it still needs to be approved before the code which implements the blueprint is merged. Specless blueprints are discussed and potentially approved during the ``Open Discussion`` portion of the weekly `nova IRC meeting`_. See `trivial specifications`_ for more details. Project Priorities =================== * Pick several project priority themes, in the form of use cases, to help us prioritize work * Generate list of improvement blueprints based on the themes * Produce rough draft of list going into summit and finalize the list at the summit * Publish list of project priorities and look for volunteers to work on them * Update spec template to include * Specific use cases * State if the spec is project priority or not * Keep an up to date list of project priority blueprints that need code review in an etherpad. * Consumers of project priority and project priority blueprint lists: * Reviewers looking for direction of where to spend their blueprint review time. If a large subset of nova-core doesn't use the project priorities it means the core team is not aligned properly and should revisit the list of project priorities * The blueprint approval team, to help find the right balance of blueprints * Contributors looking for something to work on * People looking for what they can expect in the next release .. _nova IRC meeting: http://eavesdrop.openstack.org/#Nova_Team_Meeting .. _trivial specifications: https://specs.openstack.org/openstack/nova-specs/readme.html#trivial-specifications ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/contributor/code-review.rst0000664000175000017500000002705600000000000022156 0ustar00zuulzuul00000000000000.. _code-review: ========================== Code Review Guide for Nova ========================== OpenStack has a general set of code review guidelines: https://docs.openstack.org/infra/manual/developers.html#peer-review What follows is a very terse set of points for reviewers to consider when looking at nova code. These are things that are important for the continued smooth operation of Nova, but that tend to be carried as "tribal knowledge" instead of being written down. It is an attempt to boil down some of those things into nearly checklist format. Further explanation about why some of these things are important belongs elsewhere and should be linked from here. Upgrade-Related Concerns ======================== RPC API Versions ---------------- * If an RPC method is modified, the following needs to happen: * The manager-side (example: compute/manager) needs a version bump * The manager-side method needs to tolerate older calls as well as newer calls * Arguments can be added as long as they are optional. Arguments cannot be removed or changed in an incompatible way. * The RPC client code (example: compute/rpcapi.py) needs to be able to honor a pin for the older version (see self.client.can_send_version() calls). If we are pinned at 1.5, but the version requirement for a method is 1.7, we need to be able to formulate the call at version 1.5. * Methods can drop compatibility with older versions when we bump a major version. * RPC methods can be deprecated by removing the client (example: compute/rpcapi.py) implementation. However, the manager method must continue to exist until the major version of the API is bumped. Object Versions --------------- * If a tracked attribute (i.e. listed in fields) or remotable method is added, or a method is changed, the object version must be bumped. Changes for methods follow the same rules as above for regular RPC methods. We have tests to try to catch these changes, which remind you to bump the version and then correct the version-hash in the tests. * Field types cannot be changed. If absolutely required, create a new attribute and deprecate the old one. Ideally, support converting the old attribute to the new one with an obj_load_attr() handler. There are some exceptional cases where changing the type can be allowed, but care must be taken to ensure it does not affect the wireline API. * New attributes should be removed from the primitive in obj_make_compatible() if the attribute was added after the target version. * Remotable methods should not return unversioned structures wherever possible. They should return objects or simple values as the return types are not (and cannot) be checked by the hash tests. * Remotable methods should not take complex structures as arguments. These cannot be verified by the hash tests, and thus are subject to drift. Either construct an object and pass that, or pass all the simple values required to make the call. * Changes to an object as described above will cause a hash to change in TestObjectVersions. This is a reminder to the developer and the reviewer that the version needs to be bumped. There are times when we need to make a change to an object without bumping its version, but those cases are only where the hash logic detects a change that is not actually a compatibility issue and must be handled carefully. Database Schema --------------- * Changes to the database schema must generally be additive-only. This means you can add columns, but you can't drop or alter a column. We have some hacky tests to try to catch these things, but they are fragile. Extreme reviewer attention to non-online alterations to the DB schema will help us avoid disaster. * Dropping things from the schema is a thing we need to be extremely careful about, making sure that the column has not been used (even present in one of our models) for at least a release. * Data migrations must not be present in schema migrations. If data needs to be converted to another format, or moved from one place to another, then that must be done while the database server remains online. Generally, this can and should be hidden within the object layer so that an object can load from either the old or new location, and save to the new one. * Multiple Cells v2 cells are supported started in the Pike release. As such, any online data migrations that move data from a cell database to the API database must be multi-cell aware. REST API ========= When making a change to the nova API, we should always follow `the API WG guidelines `_ rather than going for "local" consistency. Developers and reviewers should read all of the guidelines, but they are very long. So here are some key points: * `Terms `_ * ``project`` should be used in the REST API instead of ``tenant``. * ``server`` should be used in the REST API instead of ``instance``. * ``compute`` should be used in the REST API instead of ``nova``. * `Naming Conventions `_ * URL should not include underscores; use hyphens ('-') instead. * The field names contained in a request/response body should use snake_case style, not CamelCase or Mixed_Case style. * `HTTP Response Codes `_ * Synchronous resource creation: ``201 Created`` * Asynchronous resource creation: ``202 Accepted`` * Synchronous resource deletion: ``204 No Content`` * For all other successful operations: ``200 OK`` Config Options ============== Location -------- The central place where all config options should reside is the ``/nova/conf/`` package. Options that are in named sections of ``nova.conf``, such as ``[serial_console]``, should be in their own module. Options that are in the ``[DEFAULT]`` section should be placed in modules that represent a natural grouping. For example, all of the options that affect the scheduler would be in the ``scheduler.py`` file, and all the networking options would be moved to ``network.py``. Implementation -------------- A config option should be checked for: * A short description which explains what it does. If it is a unit (e.g. timeouts or so) describe the unit which is used (seconds, megabyte, mebibyte, ...). * A long description which explains the impact and scope. The operators should know the expected change in the behavior of Nova if they tweak this. * Descriptions/Validations for the possible values. * If this is an option with numeric values (int, float), describe the edge cases (like the min value, max value, 0, -1). * If this is a DictOpt, describe the allowed keys. * If this is a StrOpt, list any possible regex validations, or provide a list of acceptable and/or prohibited values. Previously used sections which explained which services consume a specific config option and which options are related to each other got dropped because they are too hard to maintain: http://lists.openstack.org/pipermail/openstack-dev/2016-May/095538.html Third Party Tests ================= Any change that is not tested well by the Jenkins check jobs must have a recent +1 vote from an appropriate third party test (or tests) on the latest patchset, before a core reviewer is allowed to make a +2 vote. Virt drivers ------------ At a minimum, we must ensure that any technology specific code has a +1 from the relevant third party test, on the latest patchset, before a +2 vote can be applied. Specifically, changes to nova/virt/driver/ need a +1 vote from the respective third party CI. For example, if you change something in the VMware virt driver, you must wait for a +1 from the VMware CI on the latest patchset, before you can give that patch set a +2 vote. This is important to ensure: * We keep those drivers stable * We don't break that third party CI Notes ----- Please note: * Long term, we should ensure that any patch a third party CI is allowed to vote on, can be blocked from merging by that third party CI. But we need a lot more work to make something like that feasible, hence the proposed compromise. * While its possible to break a virt driver CI system by changing code that is outside the virt drivers, this policy is not focusing on fixing that. A third party test failure should always be investigated, but the failure of a third party test to report in a timely manner should not block others. * We are only talking about the testing of in-tree code. Please note the only public API is our REST API, see: :doc:`policies` Should I run the experimental queue jobs on this change? ======================================================== Because we can't run all CI jobs in the check and gate pipelines, some jobs can be executed on demand, thanks to the experimental pipeline. To run the experimental jobs, you need to comment your Gerrit review with "check experimental". The experimental jobs aim to test specific features, such as LXC containers or DVR with multiple nodes. Also, it might be useful to run them when we want to test backward compatibility with tools that deploy OpenStack outside Devstack (e.g. TripleO, etc). They can produce a non-voting feedback of whether the system continues to work when we deprecate or remove some options or features in Nova. The experimental queue can also be used to test that new CI jobs are correct before making them voting. Database Schema =============== * Use the ``utf8`` charset only where necessary. Some string fields, such as hex-stringified UUID values, MD5 fingerprints, SHA1 hashes or base64-encoded data, are always interpreted using ASCII encoding. A hex-stringified UUID value in ``latin1`` is 1/3 the size of the same field in ``utf8``, impacting performance without bringing any benefit. If there are no string type columns in the table, or the string type columns contain **only** the data described above, then stick with ``latin1``. Microversion API ================ If a new microversion API is added, the following needs to happen: * A new patch for the microversion API change in both python-novaclient and in python-openstackclient should be submitted before the microversion change in Nova is merged. See :python-novaclient-doc:`Adding support for a new microversion ` in python-novaclient for more details. See also `Add support for 'server group create --rule' parameter`_ patch as example how to support a new microversion in the openstack client. * If the microversion changes the response schema, a new schema and test for the microversion must be added to Tempest. The microversion change in Nova should not be merged until the Tempest test is submitted and at least passing; it does not need to be merged yet as long as it is testing the Nova change via Depends-On. The Nova microversion change commit message should reference the Change-Id of the Tempest test for reviewers to identify it. .. _`Add support for 'server group create --rule' parameter`: https://review.opendev.org/#/c/761597 Notifications ============= * Every new notification type shall use the new versioned notification infrastructure documented in :doc:`/reference/notifications` Release Notes ============= A release note is required on changes that have upgrade impact, security impact, introduce a new feature, fix Critical bugs, or fix long-standing bugs with high importance. See :doc:`releasenotes` for details on how to create a release note, each available section and the type of content required. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/contributor/contributing.rst0000664000175000017500000000370100000000000022443 0ustar00zuulzuul00000000000000============================ So You Want to Contribute... ============================ For general information on contributing to OpenStack, please check out the `contributor guide `_ to get started. It covers all the basics that are common to all OpenStack projects: the accounts you need, the basics of interacting with our Gerrit review system, how we communicate as a community, etc. Below will cover the more project specific information you need to get started with nova. Communication ~~~~~~~~~~~~~ :doc:`how-to-get-involved` Contacting the Core Team ~~~~~~~~~~~~~~~~~~~~~~~~ The overall structure of the Nova team is documented on `the wiki `_. New Feature Planning ~~~~~~~~~~~~~~~~~~~~ If you want to propose a new feature please read the :doc:`blueprints` page. Task Tracking ~~~~~~~~~~~~~ We track our tasks in `Launchpad `__. If you're looking for some smaller, easier work item to pick up and get started on, search for the 'low-hanging-fruit' tag. Reporting a Bug ~~~~~~~~~~~~~~~ You found an issue and want to make sure we are aware of it? You can do so on `Launchpad `__. More info about Launchpad usage can be found on `OpenStack docs page `_. Getting Your Patch Merged ~~~~~~~~~~~~~~~~~~~~~~~~~ All changes proposed to the Nova requires two ``Code-Review +2`` votes from Nova core reviewers before one of the core reviewers can approve patch by giving ``Workflow +1`` vote. More detailed guidelines for reviewers of Nova patches are available at :doc:`code-review`. Project Team Lead Duties ~~~~~~~~~~~~~~~~~~~~~~~~ All common PTL duties are enumerated in the `PTL guide `_. For the Nova specific duties you can read the Nova PTL guide :doc:`ptl-guide` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/contributor/development-environment.rst0000664000175000017500000001437200000000000024626 0ustar00zuulzuul00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ======================= Development Quickstart ======================= This page describes how to setup and use a working Python development environment that can be used in developing nova on Ubuntu, Fedora or Mac OS X. These instructions assume you're already familiar with git. Following these instructions will allow you to build the documentation and run the nova unit tests. If you want to be able to run nova (i.e., launch VM instances), you will also need to --- either manually or by letting DevStack do it for you --- install libvirt and at least one of the `supported hypervisors`_. Running nova is currently only supported on Linux, although you can run the unit tests on Mac OS X. .. _supported hypervisors: http://wiki.openstack.org/HypervisorSupportMatrix .. note:: For how to contribute to Nova, see HowToContribute_. Nova uses the Gerrit code review system, GerritWorkflow_. .. _GerritWorkflow: http://docs.openstack.org/infra/manual/developers.html#development-workflow .. _HowToContribute: http://docs.openstack.org/infra/manual/developers.html .. _`docs.openstack.org`: http://docs.openstack.org Setup ===== There are two ways to create a development environment: using DevStack, or explicitly installing and cloning just what you need. Using DevStack -------------- See `Devstack`_ Documentation. If you would like to use Vagrant, there is a `Vagrant`_ for DevStack. .. _`Devstack`: http://docs.openstack.org/developer/devstack/ .. _`Vagrant`: https://github.com/openstack-dev/devstack-vagrant/blob/master/README.md .. Until the vagrant markdown documents are rendered somewhere on .openstack.org, linking to github Explicit Install/Clone ---------------------- DevStack installs a complete OpenStack environment. Alternatively, you can explicitly install and clone just what you need for Nova development. Getting the code ```````````````` Grab the code from git:: git clone https://opendev.org/openstack/nova cd nova Linux Systems ````````````` The first step of this process is to install the system (not Python) packages that are required. Following are instructions on how to do this on Linux and on the Mac. .. note:: This section is tested for Nova on Ubuntu (14.04-64) and Fedora-based (RHEL 6.1) distributions. Feel free to add notes and change according to your experiences or operating system. Install the prerequisite packages listed in the ``bindep.txt`` file. On Debian-based distributions (e.g., Debian/Mint/Ubuntu):: sudo apt-get install python-pip sudo pip install tox tox -e bindep sudo apt-get install On Fedora-based distributions (e.g., Fedora/RHEL/CentOS/Scientific Linux):: sudo dnf install python-pip sudo pip install tox tox -e bindep sudo dnf install On openSUSE-based distributions (SLES, openSUSE Leap / Tumbleweed):: sudo zypper in python-pip sudo pip install tox tox -e bindep sudo zypper in Mac OS X Systems ```````````````` Install virtualenv:: sudo easy_install virtualenv Check the version of OpenSSL you have installed:: openssl version The stock version of OpenSSL that ships with Mac OS X 10.6 (OpenSSL 0.9.8l) or Mac OS X 10.7 (OpenSSL 0.9.8r) or Mac OS X 10.10.3 (OpenSSL 0.9.8zc) works fine with nova. OpenSSL versions from brew like OpenSSL 1.0.1k work fine as well. Brew is very useful for installing dependencies. As a minimum for running tests, install the following:: brew install python3 postgres python3 -mpip install tox Building the Documentation ========================== Install the prerequisite packages: graphviz To do a full documentation build, issue the following command while the nova directory is current. .. code-block:: bash tox -edocs That will create a Python virtual environment, install the needed Python prerequisites in that environment, and build all the documentation in that environment. Running unit tests ================== See `Running Python Unit Tests`_. .. _`Running Python Unit Tests`: https://docs.openstack.org/project-team-guide/project-setup/python.html#running-python-unit-tests Note that some unit and functional tests use a database. See the file ``tools/test-setup.sh`` on how the databases are set up in the OpenStack CI environment and replicate it in your test environment. Using the pre-commit hook ========================= Nova can make use of the `pre-commit framework `__ to allow running of some linters on each commit. This must be enabled locally to function: .. code-block:: shell $ pip install --user pre-commit $ pre-commit install --allow-missing-config As a reminder, the hooks are optional and you are not enforced to run them. You can either not install pre-commit or skip the hooks once by using the ``--no-verify`` flag on ``git commit``. Using fake computes for tests ============================= The number of instances supported by fake computes is not limited by physical constraints. It allows you to perform stress tests on a deployment with few resources (typically a laptop). Take care to avoid using scheduler filters that will limit the number of instances per compute, such as ``NumInstancesFilter``. Fake computes can also be used in multi hypervisor-type deployments in order to take advantage of fake and "real" computes during tests: * create many fake instances for stress tests * create some "real" instances for functional tests Fake computes can be used for testing Nova itself but also applications on top of it. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/contributor/documentation.rst0000664000175000017500000000704400000000000022611 0ustar00zuulzuul00000000000000======================== Documentation Guidelines ======================== These are some basic guidelines for contributing to documentation in nova. Review Guidelines ================= Documentation-only patches differ from code patches in a few ways. * They are often written by users / operators that aren't plugged into daily cycles of nova or on IRC * Outstanding patches are far more likely to trigger merge conflict in Git than code patches * There may be wide variation on points of view of what the "best" or "clearest" way is to say a thing This all can lead to a large number of practical documentation improvements stalling out because the author submitted the fix, and does not have the time to merge conflict chase or is used to the Gerrit follow up model. As such, documentation patches should be evaluated in the basic context of "does this make things better than the current tree". Patches are cheap, it can always be further enhanced in future patches. Typo / trivial documentation only fixes should get approved with a single +2. How users consume docs ====================== The current primary target for all documentation in nova is the web. While it is theoretically possible to generate PDF versions of the content, the tree is not currently well structured for that, and it's not clear there is an audience for that. The main nova docs tree ``doc/source`` is published per release, so there will be copies of all of this as both the ``latest`` URL (which is master), and for every stable release (e.g. ``pike``). .. note:: This raises an interesting and unexplored question about whether we want all of ``doc/source`` published with stable branches that will be stale and unimproved as we address content in ``latest``. The ``api-ref`` and ``api-guide`` publish only from master to a single site on ``docs.openstack.org``. As such, they are effectively branchless. Guidelines for consumable docs ============================== * Give users context before following a link Most users exploring our documentation will be trying to learn about our software. Entry and subpages that provide links to in depth topics need to provide some basic context about why someone would need to know about a ``filter scheduler`` before following the link named filter scheduler. Providing these summaries helps the new consumer come up to speed more quickly. * Doc moves require ``.htaccess`` redirects If a file is moved in a documentation source tree, we should be aware that it might be linked from external sources, and is now a ``404 Not Found`` error for real users. All doc moves should include an entry in ``doc/source/_extra/.htaccess`` to redirect from the old page to the new page. * Images are good, please provide source An image is worth a 1000 words, but can go stale pretty quickly. We ideally want ``png`` files for display on the web, but that's a non modifiable format. For any new diagram contributions we should also get some kind of source format (``svg`` is ideal as it can be modified with open tools) along with ``png`` formats. Long Term TODOs =============== * Sort out our toctree / sidebar navigation During the bulk import of the install, admin, config guides we started with a unified toctree, which was a ton of entries, and made the navigation sidebar in Nova incredibly confusing. The short term fix was to just make that almost entirely hidden and rely on structured landing and sub pages. Long term it would be good to reconcile the toctree / sidebar into something that feels coherent. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/contributor/evacuate-vs-rebuild.rst0000664000175000017500000001272700000000000023613 0ustar00zuulzuul00000000000000=================== Evacuate vs Rebuild =================== The `evacuate API`_ and `rebuild API`_ are commonly confused in nova because the internal `conductor code`_ and `compute code`_ use the same methods called ``rebuild_instance``. This document explains some of the differences in what happens between an evacuate and rebuild operation. High level ~~~~~~~~~~ *Evacuate* is an operation performed by an administrator when a compute service or host is encountering some problem, goes down and needs to be fenced from the network. The servers that were running on that compute host can be rebuilt on a **different** host using the **same** image. If the source and destination hosts are running on shared storage then the root disk image of the servers can be retained otherwise the root disk image (if not using a volume-backed server) will be lost. This is one example of why it is important to attach data volumes to a server to store application data and leave the root disk for the operating system since data volumes will be re-attached to the server as part of the evacuate process. *Rebuild* is an operation which can be performed by a non-administrative owner of the server (the user) performed on the **same** compute host to change certain aspects of the server, most notably using a **different** image. Note that the image does not *have* to change and in the case of volume-backed servers the image `can be changed`_ with microversion `2.93`_. Other attributes of the server can be changed as well such as ``key_name`` and ``user_data``. See the `rebuild API`_ reference for full usage details. When a user rebuilds a server they want to change it which requires re-spawning the guest in the hypervisor but retain the UUID, volumes and ports attached to the server. Scheduling ~~~~~~~~~~ Evacuate always schedules the server to another host and rebuild always occurs on the same host. Note that when `rebuilding with a different image`_, the request is run through the scheduler to ensure the new image is still valid for the current compute host. Image ~~~~~ As noted above, the image that the server uses during an evacuate operation does not change. The image used to rebuild a server *may* change but does not have to. Resource claims ~~~~~~~~~~~~~~~ The compute service ``ResourceTracker`` has a `claims`_ operation which is used to ensure resources are available before building a server on the host. The scheduler performs the initial filtering of hosts to ensure a server can be built on a given host and the compute claim is essentially meant as a secondary check to prevent races when the scheduler has out of date information or when there are concurrent build requests going to the same host. During an evacuate operation there is a `rebuild claim`_ since the server is being re-built on a different host. During a rebuild operation, since the flavor does not change, there is `no claim`_ made since the host does not change. Allocations ~~~~~~~~~~~ Since the 16.0.0 (Pike) release, the scheduler uses the `placement service`_ to filter compute nodes (resource providers) based on information in the flavor and image used to build the server. Once the scheduler runs through its filters and weighers and picks a host, resource class `allocations`_ are atomically consumed in placement with the server as the consumer. During an evacuate operation, the allocations held by the server consumer against the source compute node resource provider are left intact since the source compute service is down. Note that `migration-based allocations`_, which were introduced in the 17.0.0 (Queens) release, do not apply to evacuate operations but only resize, cold migrate and live migrate. So once a server is successfully evacuated to a different host, the placement service will track allocations for that server against both the source and destination compute node resource providers. If the source compute service is restarted after being evacuated and fixed, the compute service will `delete the old allocations`_ held by the evacuated servers. During a rebuild operation, since neither the host nor flavor changes, the server allocations remain intact. .. _evacuate API: https://docs.openstack.org/api-ref/compute/#evacuate-server-evacuate-action .. _rebuild API: https://docs.openstack.org/api-ref/compute/#rebuild-server-rebuild-action .. _conductor code: https://opendev.org/openstack/nova/src/tag/19.0.0/nova/conductor/manager.py#L944 .. _compute code: https://opendev.org/openstack/nova/src/tag/19.0.0/nova/compute/manager.py#L3052 .. _can be changed: https://specs.openstack.org/openstack/nova-specs/specs/zed/implemented/volume-backed-server-rebuild.html .. _2.93: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#maximum-in-zed .. _rebuilding with a different image: https://opendev.org/openstack/nova/src/tag/19.0.0/nova/compute/api.py#L3414 .. _claims: https://opendev.org/openstack/nova/src/tag/19.0.0/nova/compute/claims.py .. _rebuild claim: https://opendev.org/openstack/nova/src/tag/19.0.0/nova/compute/manager.py#L3104 .. _no claim: https://opendev.org/openstack/nova/src/tag/19.0.0/nova/compute/manager.py#L3108 .. _placement service: https://docs.openstack.org/placement/latest/ .. _allocations: https://docs.openstack.org/api-ref/placement/#allocations .. _migration-based allocations: https://specs.openstack.org/openstack/nova-specs/specs/queens/implemented/migration-allocations.html .. _delete the old allocations: https://opendev.org/openstack/nova/src/tag/19.0.0/nova/compute/manager.py#L627 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/contributor/how-to-get-involved.rst0000664000175000017500000003472400000000000023563 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _getting_involved: ===================================== How to get (more) involved with Nova ===================================== So you want to get more involved with Nova? Or you are new to Nova and wondering where to start? We are working on building easy ways for you to get help and ideas on how to learn more about Nova and how the Nova community works. Any questions, please ask! If you are unsure who to ask, then please contact the `PTL`__. __ `Nova People`_ How do I get started? ===================== There are quite a few global docs on this: - https://docs.openstack.org/contributors/ - https://www.openstack.org/community/ - https://www.openstack.org/assets/welcome-guide/OpenStackWelcomeGuide.pdf - https://wiki.openstack.org/wiki/How_To_Contribute There is more general info, non Nova specific info here: - https://wiki.openstack.org/wiki/Mentoring - https://docs.openstack.org/upstream-training/ What should I work on? ~~~~~~~~~~~~~~~~~~~~~~ So you are starting out your Nova journey, where is a good place to start? If you'd like to learn how Nova works before changing anything (good idea!), we recommend looking for reviews with -1s and -2s and seeing why they got downvoted. There is also the :ref:`code-review`. Once you have some understanding, start reviewing patches. It's OK to ask people to explain things you don't understand. It's also OK to see some potential problems but put a +0. Once you're ready to write code, take a look at some of the work already marked as low-hanging fruit: * https://bugs.launchpad.net/nova/+bugs?field.tag=low-hanging-fruit How do I get my feature in? ~~~~~~~~~~~~~~~~~~~~~~~~~~~ The best way of getting your feature in is... well it depends. First concentrate on solving your problem and/or use case, don't fixate on getting the code you have working merged. It's likely things will need significant re-work after you discuss how your needs match up with all the existing ways Nova is currently being used. The good news, is this process should leave you with a feature that's more flexible and doesn't lock you into your current way of thinking. A key part of getting code merged, is helping with reviewing other people's code. Great reviews of others code will help free up more core reviewer time to look at your own patches. In addition, you will understand how the review is thinking when they review your code. Also, work out if any on going efforts are blocking your feature and helping out speeding those up. The spec review process should help with this effort. For more details on our process, please see: :ref:`process`. What is expected of a good contributor? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ TODO - need more info on this Top Tips for working with the Nova community ============================================ Here are some top tips around engaging with the Nova community: - IRC - we talk a lot in #openstack-nova - do ask us questions in there, and we will try to help you - not sure about asking questions? feel free to listen in around other people's questions - we recommend you setup an IRC bouncer: https://docs.openstack.org/contributors/common/irc.html - Email - Use the [nova] tag in the mailing lists - Filtering on [nova] and [all] can help tame the list - Be Open - i.e. don't review your teams code in private, do it publicly in gerrit - i.e. be ready to talk about openly about problems you are having, not "theoretical" issues - that way you can start to gain the trust of the wider community - Got a problem? Please ask! - Please raise any problems and ask questions early - we want to help you before you are frustrated or annoyed - unsure who to ask? Just ask in IRC, or check out the list of `Nova people`_. - Talk about problems first, then solutions - Nova is a big project. At first, it can be hard to see the big picture - Don't think about "merging your patch", instead think about "solving your problem" - conversations are more productive that way - It's not the decision that's important, it's the reason behind it that's important - Don't like the way the community is going? - Please ask why we were going that way, and please engage with the debate - If you don't, we are unable to learn from what you have to offer - No one will decide, this is stuck, who can help me? - it's rare, but it happens - it's the `Nova PTL`__'s job to help you - ...but if you don't ask, it's hard for them to help you __ `Nova People`_ Process ======= It can feel like you are faced with a wall of process. We are a big community, to make sure the right communication happens, we do use a minimal amount of process. If you find something that doesn't make sense, please: - ask questions to find out \*why\* it happens - if you know of a better way to do it, please speak up - one "better way" might be to remove the process if it no longer helps To learn more about Nova's process, please read :ref:`process`. Why bother with any process? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Why is it worth creating a bug or blueprint to track your code review? This may seem like silly process, but there is usually a good reason behind it. We have lots of code to review, and we have tools to try and get to really important code reviews first. If yours is really important, but not picked up by our tools, it's possible you just get lost in the bottom of a big queue. If you have a bug fix, you have done loads of work to identify the issue, and test out your fix, and submit it. By adding a bug report, you are making it easier for other folks who hit the same problem to find your work, possibly saving them the hours of pain you went through. With any luck that gives all those people the time to fix different bugs, all that might have affected you, if you had not given them the time go fix it. It's similar with blueprints. You have worked out how to scratch your itch, lets tell others about that great new feature you have added, so they can use that. Also, it stops someone with a similar idea going through all the pain of creating a feature only to find you already have that feature ready and up for review, or merged into the latest release. Hopefully this gives you an idea why we have applied a small layer of process to what we are doing. Having said all this, we need to unlearn old habits to move forward, there may be better ways to do things, and we are open to trying them. Please help be part of the solution. .. _why_plus1: Why do code reviews if I am not in nova-core? ============================================= Code reviews are the life blood of the Nova developer community. There is a good discussion on how you do good reviews, and how anyone can be a reviewer: http://docs.openstack.org/infra/manual/developers.html#peer-review In the draft process guide, I discuss how doing reviews can help get your code merged faster: :ref:`process`. Lets look at some of the top reasons why participating with code reviews really helps you: - Doing more reviews, and seeing what other reviewers notice, will help you better understand what is expected of code that gets merged into master. - Having more non-core people do great reviews, leaves less review work for the core reviewers to do, so we are able get more code merged. - Empathy is one of the keys to a happy community. If you are used to doing code reviews, you will better understand the comments you get when people review your code. As you do more code reviews, and see what others notice, you will get a better idea of what people are looking for when then apply a +2 to your code. - If you do quality reviews, you'll be noticed and it's more likely you'll get reciprocal eyes on your reviews. What are the most useful types of code review comments? Well here are a few to the top ones: - Fundamental flaws are the biggest thing to spot. Does the patch break a whole set of existing users, or an existing feature? - Consistency of behaviour is really important. Does this bit of code do things differently to where similar things happen else where in Nova? - Is the code easy to maintain, well tested and easy to read? Code is read order of magnitude times more than it is written, so optimise for the reader of the code, not the writer. - TODO - what others should go here? Let's look at some problems people hit when starting out doing code reviews: - My +1 doesn't mean anything, why should I bother? - So your +1 really does help. Some really useful -1 votes that lead to a +1 vote helps get code into a position - When to use -1 vs 0 vs +1 - Please see the guidelines here: http://docs.openstack.org/infra/manual/developers.html#peer-review - I have already reviewed this code internally, no point in adding a +1 externally? - Please talk to your company about doing all code reviews in the public, that is a much better way to get involved. showing how the code has evolved upstream, is much better than trying to 'perfect' code internally, before uploading for public review. You can use Draft mode, and mark things as WIP if you prefer, but please do the reviews upstream. - Where do I start? What should I review? - There are various tools, but a good place to start is: https://review.opendev.org/q/project:openstack/nova+status:open+label:Review-Priority%253DANY - Depending on the time in the cycle, it's worth looking at NeedsCodeReview blueprints: https://blueprints.launchpad.net/nova/ - Custom Gerrit review dashboards often provide a more manageable view of the outstanding reviews, and help focus your efforts: - Nova Review Inbox: https://goo.gl/1vTS0Z - Small Bug Fixes: http://ow.ly/WAw1J - Maybe take a look at things you want to see merged, bug fixes and features, or little code fixes - Look for things that have been waiting a long time for a review: https://review.opendev.org/#/q/project:openstack/nova+status:open+age:2weeks - If you get through the above lists, try other tools, such as: http://status.openstack.org/reviews How to do great code reviews? ============================= http://docs.openstack.org/infra/manual/developers.html#peer-review For more tips, please see: `Why do code reviews if I am not in nova-core?`_ How do I become nova-core? ========================== You don't have to be nova-core to be a valued member of the Nova community. There are many, many ways you can help. Every quality review that helps someone get their patch closer to being ready to merge helps everyone get their code merged faster. The first step to becoming nova-core is learning how to be an active member of the Nova community, including learning how to do great code reviews. For more details see: https://wiki.openstack.org/wiki/Nova/CoreTeam#Membership_Expectations If you feel like you have the time to commit to all the nova-core membership expectations, reach out to the Nova PTL who will be able to find you an existing member of nova-core to help mentor you. If all goes well, and you seem like a good candidate, your mentor will contact the rest of the nova-core team to ask them to start looking at your reviews, so they are able to vote for you, if you get nominated for join nova-core. We encourage all mentoring, where possible, to occur on #openstack-nova so everyone can learn and benefit from your discussions. The above mentoring is available to every one who wants to learn how to better code reviews, even if you don't ever want to commit to becoming nova-core. If you already have a mentor, that's great, the process is only there for folks who are still trying to find a mentor. Being admitted to the mentoring program no way guarantees you will become a member of nova-core eventually, it's here to help you improve, and help you have the sort of involvement and conversations that can lead to becoming a member of nova-core. How to do great nova-spec reviews? ================================== https://specs.openstack.org/openstack/nova-specs/specs/2025.2/template.html :doc:`/contributor/blueprints`. Spec reviews are always a step ahead of the normal code reviews. Follow the above links for some great information on specs/reviews. The following could be some important tips: 1. The specs are published as html documents. Ensure that the author has a proper render of the same via the .rst file. 2. More often than not, it's important to know that there are no overlaps across multiple specs. 3. Ensure that a proper dependency of the spec is identified. For example - a user desired feature that requires a proper base enablement should be a dependent spec. 4. Ask for clarity on changes that appear ambiguous to you. 5. Every release nova gets a huge set of spec proposals and that's a huge task for the limited set of nova cores to complete. Helping the cores with additional reviews is always a great thing. How to do great bug triage? =========================== https://wiki.openstack.org/wiki/Nova/BugTriage Sylvain Bauza and Stephen Finucane gave a nice `presentation`_ on this topic at the Queens summit in Sydney. .. _presentation: https://www.openstack.org/videos/sydney-2017/upstream-bug-triage-the-hidden-gem How to step up into a project leadership role? ============================================== There are many ways to help lead the Nova project: * Mentoring efforts, and getting started tips: https://wiki.openstack.org/wiki/Nova/Mentoring * Info on process, with a focus on how you can go from an idea to getting code merged Nova: :ref:`process` * Consider leading an existing `Nova subteam`_ or forming a new one. * Consider becoming a `Bug tag owner`_. * Contact the PTL about becoming a Czar `Nova People`_. .. _`Nova people`: https://wiki.openstack.org/wiki/Nova#People .. _`Nova subteam`: https://wiki.openstack.org/wiki/Nova#Nova_subteams .. _`Bug tag owner`: https://wiki.openstack.org/wiki/Nova/BugTriage#Tag_Owner_List ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/contributor/index.rst0000664000175000017500000001306500000000000021047 0ustar00zuulzuul00000000000000=========================== Contributor Documentation =========================== Contributing to nova gives you the power to help add features, fix bugs, enhance documentation, and increase testing. Contributions of any type are valuable, and part of what keeps the project going. Here are a list of resources to get your started. Basic Information ================= .. toctree:: :maxdepth: 2 contributing Getting Started =============== * :doc:`/contributor/how-to-get-involved`: Overview of engaging in the project * :doc:`/contributor/development-environment`: Get your computer setup to contribute .. # NOTE(amotoki): toctree needs to be placed at the end of the section to # keep the document structure in the PDF doc. .. toctree:: :hidden: how-to-get-involved development-environment Nova Process ============ The nova community is a large community. We have lots of users, and they all have a lot of expectations around upgrade and backwards compatibility. For example, having a good stable API, with discoverable versions and capabilities is important for maintaining the strong ecosystem around nova. Our process is always evolving, just as nova and the community around nova evolves over time. If there are things that seem strange, or you have ideas on how to improve things, please bring them forward on IRC or the openstack-discuss mailing list, so we continue to improve how the nova community operates. This section looks at the processes and why. The main aim behind all the process is to aid communication between all members of the nova community, while keeping users happy and keeping developers productive. * :doc:`/contributor/project-scope`: The focus is on features and bug fixes that make nova work better within this scope * :doc:`/contributor/policies`: General guidelines about what's supported * :doc:`/contributor/process`: The processes we follow around feature and bug submission, including how the release calendar works, and the freezes we go under * :doc:`/contributor/blueprints`: An overview of our tracking artifacts. * :doc:`/contributor/ptl-guide`: A chronological PTL reference guide .. # NOTE(amotoki): toctree needs to be placed at the end of the section to # keep the document structure in the PDF doc. .. toctree:: :hidden: project-scope policies process blueprints ptl-guide Reviewing ========= * :doc:`/contributor/releasenotes`: When we need a release note for a contribution. * :doc:`/contributor/code-review`: Important cheat sheet for what's important when doing code review in Nova, especially some things that are hard to test for, but need human eyes. * :doc:`/reference/i18n`: What we require for i18n in patches * :doc:`/contributor/documentation`: Guidelines for handling documentation contributions .. # NOTE(amotoki): toctree needs to be placed at the end of the section to # keep the document structure in the PDF doc. .. toctree:: :hidden: releasenotes code-review /reference/i18n documentation Testing ======= Because Python is a dynamic language, code that is not tested might not even be Python code. All new code needs to be validated somehow. * :doc:`/contributor/testing`: An overview of our test taxonomy and the kinds of testing we do and expect. * **Testing Guides**: There are also specific testing guides for features that are hard to test in our gate. * :doc:`/contributor/testing/libvirt-numa` * :doc:`/contributor/testing/serial-console` * :doc:`/contributor/testing/zero-downtime-upgrade` * :doc:`/contributor/testing/down-cell` * :doc:`/contributor/testing/pci-passthrough-sriov` * **Profiling Guides**: These are guides to profiling nova. * :doc:`/contributor/testing/eventlet-profiling` .. # NOTE(amotoki): toctree needs to be placed at the end of the section to # keep the document structure in the PDF doc. .. toctree:: :hidden: testing testing/libvirt-numa testing/serial-console testing/zero-downtime-upgrade testing/down-cell testing/eventlet-profiling testing/pci-passthrough-sriov The Nova API ============ Because we have many consumers of our API, we're extremely careful about changes done to the API, as the impact can be very wide. * :doc:`/contributor/api`: How the code is structured inside the API layer * :doc:`/contributor/microversions`: How the API is (micro)versioned and what you need to do when adding an API exposed feature that needs a new microversion. * :doc:`/contributor/api-ref-guideline`: The guideline to write the API reference. Nova also provides notifications over the RPC API, which you may wish to extend. * :doc:`/contributor/notifications`: How to add your own notifications .. # NOTE(amotoki): toctree needs to be placed at the end of the section to # keep the document structure in the PDF doc. .. toctree:: :hidden: api microversions api-ref-guideline notifications Nova Major Subsystems ===================== Major subsystems in nova have different needs. If you are contributing to one of these please read the :ref:`reference guide ` before diving in. * Move operations * :doc:`/contributor/evacuate-vs-rebuild`: Describes the differences between the often-confused evacuate and rebuild operations. * :doc:`/contributor/resize-and-cold-migrate`: Describes the differences and similarities between resize and cold migrate operations. .. # NOTE(amotoki): toctree needs to be placed at the end of the section to # keep the document structure in the PDF doc. .. toctree:: :hidden: evacuate-vs-rebuild resize-and-cold-migrate ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/contributor/microversions.rst0000664000175000017500000003607300000000000022646 0ustar00zuulzuul00000000000000API Microversions ================= Background ---------- Nova uses a framework we call 'API Microversions' for allowing changes to the API while preserving backward compatibility. The basic idea is that a user has to explicitly ask for their request to be treated with a particular version of the API. So breaking changes can be added to the API without breaking users who don't specifically ask for it. This is done with an HTTP header ``OpenStack-API-Version`` which has as its value a string containing the name of the service, ``compute``, and a monotonically increasing semantic version number starting from ``2.1``. The full form of the header takes the form:: OpenStack-API-Version: compute 2.1 If a user makes a request without specifying a version, they will get the ``DEFAULT_API_VERSION`` as defined in ``nova/api/openstack/wsgi.py``. This value is currently ``2.1`` and is expected to remain so for quite a long time. There is a special value ``latest`` which can be specified, which will allow a client to always receive the most recent version of API responses from the server. .. warning:: The ``latest`` value is mostly meant for integration testing and would be dangerous to rely on in client code since Nova microversions are not following semver and therefore backward compatibility is not guaranteed. Clients, like python-novaclient, should always require a specific microversion but limit what is acceptable to the version range that it understands at the time. .. warning:: To maintain compatibility, an earlier form of the microversion header is acceptable. It takes the form:: X-OpenStack-Nova-API-Version: 2.1 This form will continue to be supported until the ``DEFAULT_API_VERSION`` is raised to version ``2.27`` or higher. Clients accessing deployments of the Nova API which are not yet providing microversion ``2.27`` must use the older form. For full details please read the `Kilo spec for microversions `_ and `Microversion Specification `_. When do I need a new Microversion? ---------------------------------- A microversion is needed when the contract to the user is changed. The user contract covers many kinds of information such as: - the Request - the list of resource urls which exist on the server Example: adding a new servers/{ID}/foo which didn't exist in a previous version of the code - the list of query parameters that are valid on urls Example: adding a new parameter ``is_yellow`` servers/{ID}?is_yellow=True - the list of query parameter values for non free form fields Example: parameter filter_by takes a small set of constants/enums "A", "B", "C". Adding support for new enum "D". - new headers accepted on a request - the list of attributes and data structures accepted. Example: adding a new attribute 'locked': True/False to the request body However, the attribute ``os.scheduler_hints`` of the "create a server" API is an exception to this. A new scheduler which adds a new attribute to ``os:scheduler_hints`` doesn't require a new microversion, because available schedulers depend on cloud environments, and we accept customized schedulers as a rule. - the Response - the list of attributes and data structures returned Example: adding a new attribute 'locked': True/False to the output of servers/{ID} - the allowed values of non free form fields Example: adding a new allowed ``status`` to servers/{ID} - the list of status codes allowed for a particular request Example: an API previously could return 200, 400, 403, 404 and the change would make the API now also be allowed to return 409. See [#f2]_ for the 400, 403, 404 and 415 cases. - changing a status code on a particular response Example: changing the return code of an API from 501 to 400. .. note:: Fixing a bug so that a 400+ code is returned rather than a 500 or 503 does not require a microversion change. It's assumed that clients are not expected to handle a 500 or 503 response and therefore should not need to opt-in to microversion changes that fixes a 500 or 503 response from happening. According to the OpenStack API Working Group, a **500 Internal Server Error** should **not** be returned to the user for failures due to user error that can be fixed by changing the request on the client side. See [#f1]_. - new headers returned on a response The following flow chart attempts to walk through the process of "do we need a microversion". .. graphviz:: digraph states { label="Do I need a microversion?" silent_fail[shape="diamond", style="", group=g1, label="Did we silently fail to do what is asked?"]; ret_500[shape="diamond", style="", group=g1, label="Did we return a 500 before?"]; new_error[shape="diamond", style="", group=g1, label="Are we changing what status code is returned?"]; new_attr[shape="diamond", style="", group=g1, label="Did we add or remove an attribute to a payload?"]; new_param[shape="diamond", style="", group=g1, label="Did we add or remove an accepted query string parameter or value?"]; new_resource[shape="diamond", style="", group=g1, label="Did we add or remove a resource url?"]; no[shape="box", style=rounded, label="No microversion needed"]; yes[shape="box", style=rounded, label="Yes, you need a microversion"]; no2[shape="box", style=rounded, label="No microversion needed, it's a bug"]; silent_fail -> ret_500[label=" no"]; silent_fail -> no2[label="yes"]; ret_500 -> no2[label="yes [1]"]; ret_500 -> new_error[label=" no"]; new_error -> new_attr[label=" no"]; new_error -> yes[label="yes"]; new_attr -> new_param[label=" no"]; new_attr -> yes[label="yes"]; new_param -> new_resource[label=" no"]; new_param -> yes[label="yes"]; new_resource -> no[label=" no"]; new_resource -> yes[label="yes"]; {rank=same; yes new_attr} {rank=same; no2 ret_500} {rank=min; silent_fail} } **Footnotes** .. [#f1] When fixing 500 errors that previously caused stack traces, try to map the new error into the existing set of errors that API call could previously return (400 if nothing else is appropriate). Changing the set of allowed status codes from a request is changing the contract, and should be part of a microversion (except in [#f2]_). The reason why we are so strict on contract is that we'd like application writers to be able to know, for sure, what the contract is at every microversion in Nova. If they do not, they will need to write conditional code in their application to handle ambiguities. When in doubt, consider application authors. If it would work with no client side changes on both Nova versions, you probably don't need a microversion. If, on the other hand, there is any ambiguity, a microversion is probably needed. .. [#f2] The exception to not needing a microversion when returning a previously unspecified error code is the 400, 403, 404 and 415 cases. This is considered OK to return even if previously unspecified in the code since it's implied given keystone authentication can fail with a 403 and API validation can fail with a 400 for invalid json request body. Request to url/resource that does not exist always fails with 404. Invalid content types are handled before API methods are called which results in a 415. .. note:: When in doubt about whether or not a microversion is required for changing an error response code, consult the `Nova API subteam`_. .. _Nova API subteam: https://wiki.openstack.org/wiki/Meetings/NovaAPI When a microversion is not needed --------------------------------- A microversion is not needed in the following situation: - the response - Changing the error message without changing the response code does not require a new microversion. - Removing an inapplicable HTTP header, for example, suppose the Retry-After HTTP header is being returned with a 4xx code. This header should only be returned with a 503 or 3xx response, so it may be removed without bumping the microversion. - An obvious regression bug in an admin-only API where the bug can still be fixed upstream on active stable branches. Admin-only APIs are less of a concern for interoperability and generally a regression in behavior can be dealt with as a bug fix when the documentation clearly shows the API behavior was unexpectedly regressed. See [#f3]_ for an example. Intentional behavior changes to an admin-only API *do* require a microversion, like the :ref:`2.53 microversion <2.53-microversion>` for example. **Footnotes** .. [#f3] https://review.opendev.org/#/c/523194/ In Code ------- In ``nova/api/openstack/wsgi.py`` we define an ``@api_version`` decorator which is intended to be used on top-level Controller methods. It is not appropriate for lower-level methods. Some examples: Adding a new API method ~~~~~~~~~~~~~~~~~~~~~~~ In the controller class:: @wsgi.api_version("2.4") def my_api_method(self, req, id): ... This method would only be available if the caller had specified an ``OpenStack-API-Version`` of >= ``2.4``. If they had specified a lower version (or not specified it and received the default of ``2.1``) the server would respond with ``HTTP/404``. Removing an API method ~~~~~~~~~~~~~~~~~~~~~~ In the controller class:: @wsgi.api_version("2.1", "2.4") def my_api_method(self, req, id): ... This method would only be available if the caller had specified an ``OpenStack-API-Version`` of <= ``2.4``. If ``2.5`` or later is specified the server will respond with ``HTTP/404``. A change in schema only ~~~~~~~~~~~~~~~~~~~~~~~ If there is no change to the method, only to the schema that is used for validation, you can add a version range to the ``validation.schema`` decorator:: @wsgi.api_version("2.1") @validation.schema(dummy_schema.dummy, "2.3", "2.8") @validation.schema(dummy_schema.dummy2, "2.9") def update(self, req, id, body): ... This method will be available from version ``2.1``, validated according to ``dummy_schema.dummy`` from ``2.3`` to ``2.8``, and validated according to ``dummy_schema.dummy2`` from ``2.9`` onward. Other API method changes ~~~~~~~~~~~~~~~~~~~~~~~~ When you want to change more than the API request or response schema, you can directly test for the requested version with a method as long as you have access to the api request object (commonly called ``req``). Every API method has an api_version_request object attached to the req object and that can be used to modify behavior based on its value:: def index(self, req): req_version = req.api_version_request req1_min = api_version_request.APIVersionRequest("2.1") req1_max = api_version_request.APIVersionRequest("2.5") req2_min = api_version_request.APIVersionRequest("2.6") req2_max = api_version_request.APIVersionRequest("2.10") if req_version.matches(req1_min, req1_max): ....stuff.... elif req_version.matches(req2min, req2_max): ....other stuff.... elif req_version > api_version_request.APIVersionRequest("2.10"): ....more stuff..... The first argument to the matches method is the minimum acceptable version and the second is maximum acceptable version. A specified version can be null:: null_version = APIVersionRequest() If the minimum version specified is null then there is no restriction on the minimum version, and likewise if the maximum version is null there is no restriction the maximum version. Alternatively a one sided comparison can be used as in the example above. Other necessary changes ----------------------- If you are adding a patch which adds a new microversion, it is necessary to add changes to other places which describe your change: * Update ``REST_API_VERSION_HISTORY`` in ``nova/api/openstack/api_version_request.py`` * Update ``_MAX_API_VERSION`` in ``nova/api/openstack/api_version_request.py`` * Add a verbose description to ``nova/api/openstack/compute/rest_api_version_history.rst``. * Add a :doc:`release note ` with a ``features`` section announcing the new or changed feature and the microversion. * Update the expected versions in affected tests, for example in ``nova/tests/unit/api/openstack/compute/test_versions.py``. * Update the get versions api sample file: ``doc/api_samples/versions/versions-get-resp.json`` and ``doc/api_samples/versions/v21-version-get-resp.json``. * Make a new commit to python-novaclient and update corresponding files to enable the newly added microversion API. See :python-novaclient-doc:`Adding support for a new microversion ` in python-novaclient for more details. * If the microversion changes the response schema, a new schema and test for the microversion must be added to Tempest. * If applicable, add Functional sample tests under ``nova/tests/functional/api_sample_tests``. Also, add JSON examples to ``doc/api_samples`` directory which can be generated automatically via tox env ``api-samples`` or run test with env var ``GENERATE_SAMPLES`` True. * Update the `API Reference`_ documentation as appropriate. The source is located under ``api-ref/source/``. * If the microversion changes servers related APIs, update the ``api-guide/source/server_concepts.rst`` accordingly. .. _API Reference: https://docs.openstack.org/api-ref/compute/ Allocating a microversion ------------------------- If you are adding a patch which adds a new microversion, it is necessary to allocate the next microversion number. Except under extremely unusual circumstances and this would have been mentioned in the nova spec for the change, the minor number of ``_MAX_API_VERSION`` will be incremented. This will also be the new microversion number for the API change. It is possible that multiple microversion patches would be proposed in parallel and the microversions would conflict between patches. This will cause a merge conflict. We don't reserve a microversion for each patch in advance as we don't know the final merge order. Developers may need over time to rebase their patch calculating a new version number as above based on the updated value of ``_MAX_API_VERSION``. Testing Microversioned API Methods ---------------------------------- Testing a microversioned API method is very similar to a normal controller method test, you just need to add the ``OpenStack-API-Version`` header, for example:: req = fakes.HTTPRequest.blank('/testable/url/endpoint') req.headers = {'OpenStack-API-Version': 'compute 2.28'} req.api_version_request = api_version.APIVersionRequest('2.6') controller = controller.TestableController() res = controller.index(req) ... assertions about the response ... For many examples of testing, the canonical examples are in ``nova/tests/unit/api/openstack/compute/test_microversions.py``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/contributor/notifications.rst0000664000175000017500000002616100000000000022612 0ustar00zuulzuul00000000000000============= Notifications ============= As discussed in :doc:`/admin/notifications`, nova emits notifications to the message bus. There are two types of notifications provided in nova: legacy (unversioned) notifications and versioned notifications. As a developer, you may choose to add additional notifications or extend existing notifications. .. note:: This section provides information on adding your own notifications in nova. For background information on notifications including usage information, refer to :doc:`/admin/notifications`. For a list of available versioned notifications, refer to :doc:`/reference/notifications`. How to add a new versioned notification --------------------------------------- To provide the versioning for versioned notifications, each notification is modeled with oslo.versionedobjects. Every versioned notification class shall inherit from the ``nova.notifications.objects.base.NotificationBase`` which already defines three mandatory fields of the notification ``event_type``, ``publisher`` and ``priority``. The new notification class shall add a new field ``payload`` with an appropriate payload type. The payload object of the notifications shall inherit from the ``nova.notifications.objects.base.NotificationPayloadBase`` class and shall define the fields of the payload as versionedobject fields. The base classes are described in the following section. .. rubric:: The ``nova.notifications.objects.base`` module .. automodule:: nova.notifications.objects.base :noindex: :members: :show-inheritance: Note that the notification objects must not be registered to the ``NovaObjectRegistry`` to avoid mixing nova-internal objects with the notification objects. Instead, use the ``register_notification`` decorator on every concrete notification object. The following code example defines the necessary model classes for a new notification ``myobject.update``. .. code-block:: python @notification.notification_sample('myobject-update.json') @object_base.NovaObjectRegistry.register.register_notification class MyObjectNotification(notification.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': fields.ObjectField('MyObjectUpdatePayload') } @object_base.NovaObjectRegistry.register.register_notification class MyObjectUpdatePayload(notification.NotificationPayloadBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'some_data': fields.StringField(), 'another_data': fields.StringField(), } After that the notification can be populated and emitted with the following code. .. code-block:: python payload = MyObjectUpdatePayload(some_data="foo", another_data="bar") MyObjectNotification( publisher=notification.NotificationPublisher.from_service_obj( ), event_type=notification.EventType( object='myobject', action=fields.NotificationAction.UPDATE), priority=fields.NotificationPriority.INFO, payload=payload).emit(context) The above code will generate the following notification on the wire. .. code-block:: json { "priority":"INFO", "payload":{ "nova_object.namespace":"nova", "nova_object.name":"MyObjectUpdatePayload", "nova_object.version":"1.0", "nova_object.data":{ "some_data":"foo", "another_data":"bar", } }, "event_type":"myobject.update", "publisher_id":":" } There is a possibility to reuse an existing versionedobject as notification payload by adding a ``SCHEMA`` field for the payload class that defines a mapping between the fields of existing objects and the fields of the new payload object. For example the service.status notification reuses the existing ``nova.objects.service.Service`` object when defines the notification's payload. .. code-block:: python @notification.notification_sample('service-update.json') @object_base.NovaObjectRegistry.register.register_notification class ServiceStatusNotification(notification.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': fields.ObjectField('ServiceStatusPayload') } @object_base.NovaObjectRegistry.register.register_notification class ServiceStatusPayload(notification.NotificationPayloadBase): SCHEMA = { 'host': ('service', 'host'), 'binary': ('service', 'binary'), 'topic': ('service', 'topic'), 'report_count': ('service', 'report_count'), 'disabled': ('service', 'disabled'), 'disabled_reason': ('service', 'disabled_reason'), 'availability_zone': ('service', 'availability_zone'), 'last_seen_up': ('service', 'last_seen_up'), 'forced_down': ('service', 'forced_down'), 'version': ('service', 'version') } # Version 1.0: Initial version VERSION = '1.0' fields = { 'host': fields.StringField(nullable=True), 'binary': fields.StringField(nullable=True), 'topic': fields.StringField(nullable=True), 'report_count': fields.IntegerField(), 'disabled': fields.BooleanField(), 'disabled_reason': fields.StringField(nullable=True), 'availability_zone': fields.StringField(nullable=True), 'last_seen_up': fields.DateTimeField(nullable=True), 'forced_down': fields.BooleanField(), 'version': fields.IntegerField(), } def populate_schema(self, service): super(ServiceStatusPayload, self).populate_schema(service=service) If the ``SCHEMA`` field is defined then the payload object needs to be populated with the ``populate_schema`` call before it can be emitted. .. code-block:: python payload = ServiceStatusPayload() payload.populate_schema(service=) ServiceStatusNotification( publisher=notification.NotificationPublisher.from_service_obj( ), event_type=notification.EventType( object='service', action=fields.NotificationAction.UPDATE), priority=fields.NotificationPriority.INFO, payload=payload).emit(context) The above code will emit the :ref:`already shown notification ` on the wire. Every item in the ``SCHEMA`` has the syntax of:: : (, ) The mapping defined in the ``SCHEMA`` field has the following semantics. When the ``populate_schema`` function is called the content of the ``SCHEMA`` field is enumerated and the value of the field of the pointed parameter object is copied to the requested payload field. So in the above example the ``host`` field of the payload object is populated from the value of the ``host`` field of the ``service`` object that is passed as a parameter to the ``populate_schema`` call. A notification payload object can reuse fields from multiple existing objects. Also a notification can have both new and reused fields in its payload. Note that the notification's publisher instance can be created two different ways. It can be created by instantiating the ``NotificationPublisher`` object with a ``host`` and a ``source`` string parameter or it can be generated from a ``Service`` object by calling ``NotificationPublisher.from_service_obj`` function. Versioned notifications shall have a sample file stored under ``doc/sample_notifications`` directory and the notification object shall be decorated with the ``notification_sample`` decorator. For example the ``service.update`` notification has a sample file stored in ``doc/sample_notifications/service-update.json`` and the ``ServiceUpdateNotification`` class is decorated accordingly. Notification payload classes can use inheritance to avoid duplicating common payload fragments in nova code. However the leaf classes used directly in a notification should be created with care to avoid future needs of adding extra level of inheritance that changes the name of the leaf class as that name is present in the payload class. If this cannot be avoided and the only change is the renaming then the version of the new payload shall be the same as the old payload was before the rename. See [1]_ as an example. If the renaming involves any other changes on the payload (e.g. adding new fields) then the version of the new payload shall be higher than the old payload was. See [2]_ as an example. What should be in the notification payload? ------------------------------------------- This is just a guideline. You should always consider the actual use case that requires the notification. * Always include the identifier (e.g. uuid) of the entity that can be used to query the whole entity over the REST API so that the consumer can get more information about the entity. * You should consider including those fields that are related to the event you are sending the notification about. For example if a change of a field of the entity triggers an update notification then you should include the field to the payload. * An update notification should contain information about what part of the entity is changed. Either by filling the nova_object.changes part of the payload (note that it is not supported by the notification framework currently) or sending both the old state and the new state of the entity in the payload. * You should never include a nova internal object in the payload. Create a new object and use the SCHEMA field to map the internal object to the notification payload. This way the evolution of the internal object model can be decoupled from the evolution of the notification payload. .. important:: This does not mean that every field from internal objects should be mirrored in the notification payload objects. Think about what is actually needed by a consumer before adding it to a payload. When in doubt, if no one is requesting specific information in notifications, then leave it out until someone asks for it. * The delete notification should contain the same information as the create or update notifications. This makes it possible for the consumer to listen only to the delete notifications but still filter on some fields of the entity (e.g. project_id). What should **NOT** be in the notification payload -------------------------------------------------- * Generally anything that contains sensitive information about the internals of the nova deployment, for example fields that contain access credentials to a cell database or message queue (see `bug 1823104`_). .. _bug 1823104: https://bugs.launchpad.net/nova/+bug/1823104 .. references: .. [1] https://review.opendev.org/#/c/463001/ .. [2] https://review.opendev.org/#/c/453077/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/contributor/policies.rst0000664000175000017500000001645600000000000021556 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Development policies -------------------- Out Of Tree Support =================== While nova has many entrypoints and other places in the code that allow for wiring in out of tree code, upstream doesn't actively make any guarantees about these extensibility points; we don't support them, make any guarantees about compatibility, stability, etc. Furthermore, hooks and extension points in the code impede efforts in Nova to support interoperability between OpenStack clouds. Therefore an effort is being made to systematically deprecate and remove hooks, extension points, and classloading of managers and other services. Public Contractual APIs ======================== Although nova has many internal APIs, they are not all public contractual APIs. Below is a link of our public contractual APIs: * https://docs.openstack.org/api-ref/compute/ Anything not in this list is considered private, not to be used outside of nova, and should not be considered stable. REST APIs ========== Follow the guidelines set in: https://wiki.openstack.org/wiki/APIChangeGuidelines The canonical source for REST API behavior is the code *not* documentation. Documentation is manually generated after the code by folks looking at the code and writing up what they think it does, and it is very easy to get this wrong. This policy is in place to prevent us from making backwards incompatible changes to REST APIs. Patches and Reviews =================== Merging a patch requires a non-trivial amount of reviewer resources. As a patch author, you should try to offset the reviewer resources spent on your patch by reviewing other patches. If no one does this, the review team (cores and otherwise) become spread too thin. For review guidelines see: :doc:`code-review` Reverts for Retrospective Vetos =============================== Sometimes our simple "2 +2s" approval policy will result in errors. These errors might be a bug that was missed, or equally importantly, it might be that other cores feel that there is a need for more discussion on the implementation of a given piece of code. Rather than `an enforced time-based solution`_ - for example, a patch couldn't be merged until it has been up for review for 3 days - we have chosen an honor-based system where core reviewers would not approve potentially contentious patches until the proposal had been sufficiently socialized and everyone had a chance to raise any concerns. Recognising that mistakes can happen, we also have a policy where contentious patches which were quickly approved should be reverted so that the discussion around the proposal can continue as if the patch had never been merged in the first place. In such a situation, the procedure is: 0. The commit to be reverted must not have been released. 1. The core team member who has a -2 worthy objection should propose a revert, stating the specific concerns that they feel need addressing. 2. Any subsequent patches depending on the to-be-reverted patch may need to be reverted also. 3. Other core team members should quickly approve the revert. No detailed debate should be needed at this point. A -2 vote on a revert is strongly discouraged, because it effectively blocks the right of cores approving the revert from -2 voting on the original patch. 4. The original patch submitter should re-submit the change, with a reference to the original patch and the revert. 5. The original reviewers of the patch should restore their votes and attempt to summarize their previous reasons for their votes. 6. The patch should not be re-approved until the concerns of the people proposing the revert are worked through. A mailing list discussion or design spec might be the best way to achieve this. .. _`an enforced time-based solution`: https://lists.launchpad.net/openstack/msg08574.html Metrics Gathering ================= Nova currently has a monitor plugin to gather CPU metrics on compute nodes. This feeds into the MetricsFilter and MetricsWeigher in the scheduler. The CPU metrics monitor is only implemented for the libvirt compute driver. External projects like :ceilometer-doc:`Ceilometer <>` and :watcher-doc:`Watcher <>` consume these metrics. Over time people have tried to add new monitor plugins for things like memory bandwidth. There have also been attempts to expose these monitors over CLI, the REST API, and notifications. At the `Newton midcycle`_ it was decided that Nova does a poor job as a metrics gathering tool, especially as it's incomplete, not tested, and there are numerous other tools available to get this information as their primary function. Therefore, there is a freeze on adding new metrics monitoring plugins which also includes exposing existing monitored metrics outside of Nova, like with the nova-manage CLI, the REST API, or the notification bus. Long-term, metrics gathering will likely be deprecated within Nova. Since there is not yet a clear replacement, the deprecation is open-ended, but serves as a signal that new deployments should not rely on the metrics that Nova gathers and should instead focus their efforts on alternative solutions for placement. .. _Newton midcycle: http://lists.openstack.org/pipermail/openstack-dev/2016-August/100600.html Continuous Delivery Mentality ============================= Nova generally tries to subscribe to a philosophy of anything we merge today can be in production today, and people can continuously deliver Nova. In practice this means we should not merge code that will not work until some later change is merged, because that later change may never come, or not come in the same release cycle, or may be substantially different from what was originally intended. For example, if patch A uses code that is not available until patch D later in the series. The strategy for dealing with this in particularly long and complicated series of changes is to start from the "bottom" with code that is no-op until it is "turned on" at the top of the stack, generally with some feature flag, policy rule, API microversion, etc. So in the example above, the code from patch D should come before patch A even if nothing is using it yet, but things will build on it. Realistically this means if you are working on a feature that touches most of the Nova "stack", i.e. compute driver/service through to API, you will work on the compute driver/service code first, then conductor and/or scheduler, and finally the API. An extreme example of this can be found by reading the `code review guide for the cross-cell resize feature`_. Even if this philosophy is not the reality of how the vast majority of OpenStack deployments consume Nova, it is a development philosophy to try and avoid merging broken code. .. _code review guide for the cross-cell resize feature: http://lists.openstack.org/pipermail/openstack-discuss/2019-May/006366.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/contributor/process.rst0000664000175000017500000010572600000000000021424 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _process: ================= Nova team process ================= Nova is always evolving its processes, but it's important to explain why we have them: so we can all work to ensure that the interactions we need to happen do happen. The process exists to make productive communication between all members of our community easier. OpenStack Wide Patterns ======================= Nova follows most of the generally adopted norms for OpenStack projects. You can get more details here: * https://docs.openstack.org/infra/manual/developers.html * https://docs.openstack.org/project-team-guide/ If you are new to Nova, please read this first: :ref:`getting_involved`. Dates overview ============== For 2025.2 Flamingo, please see: https://wiki.openstack.org/wiki/Nova/2025.2_Release_Schedule .. note:: Throughout this document any link which references the name of a release cycle in the link can usually be changed to the name of the current cycle to get up to date information. Feature Freeze ~~~~~~~~~~~~~~ Feature freeze primarily provides a window of time to help the horizontal teams prepare their items for release, while giving developers time to focus on stabilising what is currently in master, and encouraging users and packagers to perform tests (automated, and manual) on the release, to spot any major bugs. The Nova release process is aligned with the `development cycle schedule `_ used by many OpenStack projects, including the following steps. - Feature Proposal Freeze - make sure all code is up for review - so we can optimise for completed features, not lots of half completed features - Feature Freeze - make sure all feature code is merged - String Freeze - give translators time to translate all our strings .. note:: debug logs are no longer translated - Dependency Freeze - time to coordinate the final list of dependencies, and give packagers time to package them - generally it is also quite destabilising to take upgrades (beyond bug fixes) this late As with all processes here, there are exceptions. The exceptions at this stage need to be discussed with the horizontal teams that might be affected by changes beyond this point, and as such are discussed with one of the OpenStack release managers. Spec and Blueprint Approval Freeze ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This is a (mostly) Nova specific process. Why we have a Spec Freeze: - specs take a long time to review and reviewing specs throughout the cycle distracts from code reviews - keeping specs "open" and being slow at reviewing them (or just ignoring them) annoys the spec submitters - we generally have more code submitted that we can review, this time bounding is a useful way to limit the number of submissions By the freeze date, we expect all blueprints that will be approved for the cycle to be listed on launchpad and all relevant specs to be merged. For 2025.2 Flamingo, blueprints can be found at https://blueprints.launchpad.net/nova/2025.2 and specs at https://specs.openstack.org/openstack/nova-specs/specs/2025.2/index.html Starting with Liberty, we are keeping a backlog open for submission at all times. .. note:: The focus is on accepting and agreeing problem statements as being in scope, rather than queueing up work items for the next release. We are still working on a new lightweight process to get out of the backlog and approved for a particular release. For more details on backlog specs, please see: http://specs.openstack.org/openstack/nova-specs/specs/backlog/index.html There can be exceptions, usually it's an urgent feature request that comes up after the initial deadline. These will generally be discussed at the weekly Nova meeting, by adding the spec or blueprint to discuss in the appropriate place in the meeting agenda here (ideally make yourself available to discuss the blueprint, or alternatively make your case on the ML before the meeting): https://wiki.openstack.org/wiki/Meetings/Nova#Agenda_for_next_meeting String Freeze ~~~~~~~~~~~~~ String Freeze provides an opportunity for translators to translate user-visible messages to a variety of languages. By not changing strings after the date of the string freeze, the job of the translators is made a bit easier. For more information on string and other OpenStack-wide release processes see `the release management docs `_. How do I get my code merged? ============================ OK, so you are new to Nova, and you have been given a feature to implement. How do I make that happen? You can get most of your questions answered here: - https://docs.openstack.org/infra/manual/developers.html But let's put a Nova specific twist on things... Overview ~~~~~~~~ .. image:: /_static/images/nova-spec-process.svg :alt: Flow chart showing the Nova bug/feature process Where do you track bugs? ~~~~~~~~~~~~~~~~~~~~~~~~ We track bugs here: - https://bugs.launchpad.net/nova If you fix an issue, please raise a bug so others who spot that issue can find the fix you kindly created for them. Also before submitting your patch it's worth checking to see if someone has already fixed it for you (Launchpad helps you with that, at little, when you create the bug report). When do I need a blueprint vs a spec? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For more details refer to :doc:`/contributor/blueprints`. To understand this question, we need to understand why blueprints and specs are useful. But here is the rough idea: - if it needs a spec, it will need a blueprint. - if it's an API change, it needs a spec. - if it's a single small patch that touches a small amount of code, with limited deployer and doc impact, it probably doesn't need a spec. If you are unsure, please ask the `PTL`_ on IRC, or one of the other nova-drivers. How do I get my blueprint approved? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ So you need your blueprint approved? Here is how: - if you don't need a spec, please add a link to your blueprint to the agenda for the next nova meeting: https://wiki.openstack.org/wiki/Meetings/Nova - be sure your blueprint description has enough context for the review in that meeting. - if you need a spec, then please submit a nova-spec for review, see: https://docs.openstack.org/infra/manual/developers.html Got any more questions? Contact the `PTL`_ or one of the other nova-specs-core who are awake at the same time as you. IRC is best as you will often get an immediate response, if they are too busy send him/her an email. How do I get a procedural -2 removed from my patch? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When feature freeze hits, any patches for blueprints that are still in review get a procedural -2 to stop them merging. In Nova a blueprint is only approved for a single release. To have the -2 removed, you need to get the blueprint approved for the current release (see `How do I get my blueprint approved?`_). Why are the reviewers being mean to me? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Code reviews take intense concentration and a lot of time. This tends to lead to terse responses with very little preamble or nicety. That said, there's no excuse for being actively rude or mean. OpenStack has a Code of Conduct (https://www.openstack.org/legal/community-code-of-conduct/) and if you feel this has been breached please raise the matter privately. Either with the relevant parties, the `PTL`_ or failing those, the OpenStack Foundation. That said, there are many objective reasons for applying a -1 or -2 to a patch: - Firstly and simply, patches must address their intended purpose successfully. - Patches must not have negative side-effects like wiping the database or causing a functional regression. Usually removing anything, however tiny, requires a deprecation warning be issued for a cycle. - Code must be maintainable, that is it must adhere to coding standards and be as readable as possible for an average OpenStack developer (we acknowledge that this person is not easy to define). - Patches must respect the direction of the project, for example they should not make approved specs substantially more difficult to implement. - Release coordinators need the correct process to be followed so scope can be tracked accurately. Bug fixes require bugs, features require blueprints and all but the simplest features require specs. If there is a blueprint, it must be approved for the release/milestone the patch is attempting to merge into. Please particularly bear in mind that a -2 does not mean "never ever" nor does it mean "your idea is bad and you are dumb". It simply means "do not merge today". You may need to wait some time, rethink your approach or even revisit the problem definition but there is almost always some way forward. The core who applied the -2 should tell you what you need to do. My code review seems stuck, what can I do? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ First and foremost - address any -1s and -2s! The review load on Nova is high enough that patches with negative reviews often get filtered out entirely. A few tips: - Be precise. Ensure you're not talking at cross purposes. - Try to understand where the reviewer is coming from. They may have a very different perspective and/or use-case to you. - If you don't understand the problem, ask them to explain - this is common and helpful behaviour. - Be positive. Everyone's patches have issues, including core reviewers. No-one cares once the issues are fixed. - Try not to flip-flop. When two reviewers are pulling you in different directions, stop pushing code and negotiate the best way forward. - If the reviewer does not respond to replies left on the patchset, reach out to them on IRC or email. If they still don't respond, you can try to ask their colleagues if they're on holiday (or simply wait). Finally, you can ask for mediation in the Nova meeting by adding it to the agenda (https://wiki.openstack.org/wiki/Meetings/Nova). This is also what you should do if you are unable to negotiate a resolution to an issue. Secondly, Nova is a big project, look for things that have been waiting a long time for a review: https://review.opendev.org/#/q/project:openstack/nova+status:open+age:2weeks Eventually you should get some +1s from people working through the review queue. Expect to get -1s as well. You can ask for reviews within your company, 1-2 are useful (not more), especially if those reviewers are known to give good reviews. You can spend some time while you wait reviewing other people's code - they may reciprocate and you may learn something (:ref:`Why do code reviews when I'm not core? `). If you've waited an appropriate amount of time and you haven't had any +1s, you can ask on IRC for reviews. Please don't ask for core review straight away, especially not directly (IRC or email). Core reviewer time is very valuable and gaining some +1s is a good way to show your patch meets basic quality standards. Once you have a few +1s, be patient. Remember the average wait times. You can ask for reviews each week in IRC, it helps to ask when cores are awake. Bugs ^^^^ It helps to apply correct tracking information. - Put "Closes-Bug", "Partial-Bug" or "Related-Bug" in the commit message tags as necessary. - If you have to raise a bug in Launchpad first, do it - this helps someone else find your fix. - Make sure the bug has the correct `priority`_ and `tag`_ set. .. _priority: https://wiki.openstack.org/wiki/BugTriage#Task_2:_Prioritize_confirmed_bugs_.28bug_supervisors.29 .. _tag: https://wiki.openstack.org/wiki/Nova/BugTriage#Tags Features ^^^^^^^^ Again, it helps to apply correct tracking information. For blueprint-only features: - Put your blueprint in the commit message, EG "blueprint simple-feature". - Mark the blueprint as NeedsCodeReview if you are finished. - Maintain the whiteboard on the blueprint so it's easy to understand which patches need reviews. - Use a single topic for all related patches. All patches for one blueprint should share a topic. For blueprint and spec features, do everything for blueprint-only features and also: - Ensure your spec is approved for the current release cycle. If your code is a project or subteam priority, the cores interested in that priority might not mind a ping after it has sat with +1s for a week. If you abuse this privilege, you'll lose respect. If it's not a priority, your blueprint/spec has been approved for the cycle and you have been patient, you can raise it during the Nova meeting. The outcome may be that your spec gets unapproved for the cycle, so that priority items can take focus. If this happens to you, sorry - it should not have been approved in the first place, Nova team bit off more than they could chew, it is their mistake not yours. You can re-propose it for the next cycle. If it's not a priority and your spec has not been approved, your code will not merge this cycle. Please re-propose your spec for the next cycle. Nova Process Mission ==================== This section takes a high level look at the guiding principles behind the Nova process. Open ~~~~ Our mission is to have: - Open Source - Open Design - Open Development - Open Community We have to work out how to keep communication open in all areas. We need to be welcoming and mentor new people, and make it easy for them to pickup the knowledge they need to get involved with OpenStack. For more info on Open, please see: https://wiki.openstack.org/wiki/Open Interoperable API, supporting a vibrant ecosystem ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ An interoperable API that gives users on-demand access to compute resources is at the heart of :ref:`nova's mission `. Nova has a vibrant ecosystem of tools built on top of the current Nova API. All features should be designed to work with all technology combinations, so the feature can be adopted by our ecosystem. If a new feature is not adopted by the ecosystem, it will make it hard for your users to make use of those features, defeating most of the reason to add the feature in the first place. The microversion system allows users to isolate themselves This is a very different aim to being "pluggable" or wanting to expose all capabilities to end users. At the same time, it is not just a "lowest common denominator" set of APIs. It should be discoverable which features are available, and while no implementation details should leak to the end users, purely admin concepts may need to understand technology specific details that back the interoperable and more abstract concepts that are exposed to the end user. This is a hard goal, and one area we currently don't do well is isolating image creators from these technology specific details. Smooth Upgrades ~~~~~~~~~~~~~~~ As part of our mission for a vibrant ecosystem around our APIs, we want to make it easy for those deploying Nova to upgrade with minimal impact to their users. Here is the scope of Nova's upgrade support: - upgrade from any commit, to any future commit, within the same major release - only support upgrades between N and N+1 major versions, to reduce technical debt relating to upgrades Here are some of the things we require developers to do, to help with upgrades: - when replacing an existing feature or configuration option, make it clear how to transition to any replacement - deprecate configuration options and features before removing them - i.e. continue to support and test features for at least one release before they are removed - this gives time for operator feedback on any removals - End User API will always be kept backwards compatible Interaction goals ~~~~~~~~~~~~~~~~~ When thinking about the importance of process, we should take a look at: http://agilemanifesto.org With that in mind, let's look at how we want different members of the community to interact. Let's start with looking at issues we have tried to resolve in the past (currently in no particular order). We must: - have a way for everyone to review blueprints and designs, including allowing for input from operators and all types of users (keep it open) - take care to not expand Nova's scope any more than absolutely necessary - ensure we get sufficient focus on the core of Nova so that we can maintain or improve the stability and flexibility of the overall codebase - support any API we release approximately forever. We currently release every commit, so we're motivated to get the API right the first time - avoid low priority blueprints that slow work on high priority work, without blocking those forever - focus on a consistent experience for our users, rather than ease of development - optimise for completed blueprints, rather than more half completed blueprints, so we get maximum value for our users out of our review bandwidth - focus efforts on a subset of patches to allow our core reviewers to be more productive - set realistic expectations on what can be reviewed in a particular cycle, to avoid sitting in an expensive rebase loop - be aware of users that do not work on the project full time - be aware of users that are only able to work on the project at certain times that may not align with the overall community cadence - discuss designs for non-trivial work before implementing it, to avoid the expense of late-breaking design issues FAQs ==== Why bother with all this process? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We are a large community, spread across multiple timezones, working with several horizontal teams. Good communication is a challenge and the processes we have are mostly there to try and help fix some communication challenges. If you have a problem with a process, please engage with the community, discover the reasons behind our current process, and help fix the issues you are experiencing. Why don't you remove old process? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We do! For example, in Liberty we stopped trying to predict the milestones when a feature will land. As we evolve, it is important to unlearn new habits and explore if things get better if we choose to optimise for a different set of issues. Why are specs useful? ~~~~~~~~~~~~~~~~~~~~~ Spec reviews allow anyone to step up and contribute to reviews, just like with code. Before we used gerrit, it was a very messy review process, that felt very "closed" to most people involved in that process. As Nova has grown in size, it can be hard to work out how to modify Nova to meet your needs. Specs are a great way of having that discussion with the wider Nova community. For Nova to be a success, we need to ensure we don't break our existing users. The spec template helps focus the mind on the impact your change might have on existing users and gives an opportunity to discuss the best way to deal with those issues. However, there are some pitfalls with the process. Here are some top tips to avoid them: - keep it simple. Shorter, simpler, more decomposed specs are quicker to review and merge much quicker (just like code patches). - specs can help with documentation but they are only intended to document the design discussion rather than document the final code. - don't add details that are best reviewed in code, it's better to leave those things for the code review. If we have specs, why still have blueprints? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We use specs to record the design agreement, we use blueprints to track progress on the implementation of the spec. Currently, in Nova, specs are only approved for one release, and must be re-submitted for each release you want to merge the spec, although that is currently under review. Why do we have priorities? ~~~~~~~~~~~~~~~~~~~~~~~~~~ To be clear, there is no "nova dev team manager", we are an open team of professional software developers, that all work for a variety of (mostly competing) companies that collaborate to ensure the Nova project is a success. Over time, a lot of technical debt has accumulated, because there was a lack of collective ownership to solve those cross-cutting concerns. Before the Kilo release, it was noted that progress felt much slower, because we were unable to get appropriate attention on the architectural evolution of Nova. This was important, partly for major concerns like upgrades and stability. We agreed it's something we all care about and it needs to be given priority to ensure that these things get fixed. Since Kilo, priorities have been discussed at the summit. This turns in to a spec review which eventually means we get a list of priorities here: http://specs.openstack.org/openstack/nova-specs/#priorities Allocating our finite review bandwidth to these efforts means we have to limit the reviews we do on non-priority items. This is mostly why we now have the non-priority Feature Freeze. For more on this, see below. Blocking a priority effort is one of the few widely acceptable reasons to block someone adding a feature. One of the great advantages of being more explicit about that relationship is that people can step up to help review and/or implement the work that is needed to unblock the feature they want to get landed. This is a key part of being an Open community. Why is there a Feature Freeze (and String Freeze) in Nova? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The main reason Nova has a feature freeze is that it allows people working on docs and translations to sync up with the latest code. Traditionally this happens at the same time across multiple projects, so the docs are synced between what used to be called the "integrated release". We also use this time period as an excuse to focus our development efforts on bug fixes, ideally lower risk bug fixes, and improving test coverage. In theory, with a waterfall hat on, this would be a time for testing and stabilisation of the product. In Nova we have a much stronger focus on keeping every commit stable, by making use of extensive continuous testing. In reality, we frequently see the biggest influx of fixes in the few weeks after the release, as distributions do final testing of the released code. It is hoped that the work on Feature Classification will lead us to better understand the levels of testing of different Nova features, so we will be able to reduce and dependency between Feature Freeze and regression testing. It is also likely that the move away from "integrated" releases will help find a more developer friendly approach to keep the docs and translations in sync. Why is there a non-priority Feature Freeze in Nova? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We have already discussed why we have priority features. The rate at which code can be merged to Nova is primarily constrained by the amount of time able to be spent reviewing code. Given this, earmarking review time for priority items means depriving it from non-priority items. The simplest way to make space for the priority features is to stop reviewing and merging non-priority features for a whole milestone. The idea being developers should focus on bug fixes and priority features during that milestone, rather than working on non-priority features. A known limitation of this approach is developer frustration. Many developers are not being given permission to review code, work on bug fixes or work on priority features, and so feel very unproductive upstream. An alternative approach of "slots" or "runways" has been considered, that uses a kanban style approach to regulate the influx of work onto the review queue. We are yet to get agreement on a more balanced approach, so the existing system is being continued to ensure priority items are more likely to get the attention they require. Why do you still use Launchpad? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We are actively looking for an alternative to Launchpad's bugs and blueprints. Originally the idea was to create Storyboard. However development stalled for a while so interest waned. The project has become more active recently so it may be worth looking again: https://storyboard.openstack.org/#!/page/about When should I submit my spec? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Ideally we want to get all specs for a release merged before the summit. For things that we can't get agreement on, we can then discuss those at the summit. There will always be ideas that come up at the summit and need to be finalised after the summit. This causes a rush which is best avoided. How can you get the current specs and bug fixes tracked? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As there are a lot of incoming changes, both feature implementation and bug fixes, that core reviewers need to keep up with, the Nova team is using a tracking process. The team creates an etherpad for every new release cycle and tracks the status of blueprints, feature implementations and bug fixes. The purpose of this process is to help with managing the core reviewers’ load, while ensuring that things don’t fall through the cracks. If you have a blueprint or bug fix to track during the release cycle, please add it to the correct section in the etherpad. You can follow the format that is already applied in the different sections. Please check the below template to use as guidance for what information to include at the minimum: - [ optional] - repeat as needed - short note of summary of current status. i.e. needs second +2, needs spec approval. How can I get my code merged faster? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ So no-one is coming to review your code, how do you speed up that process? Firstly, make sure you are following the above process. If it's a feature, make sure you have an approved blueprint. If it's a bug, make sure it is triaged, has its priority set correctly, it has the correct bug tag and is marked as in progress. If the blueprint has all the code up for review, change it from Started into NeedsCodeReview so people know only reviews are blocking you, make sure it hasn't accidentally got marked as implemented. Secondly, if you have a negative review (-1 or -2) and you responded to that in a comment or uploading a new change with some updates, but that reviewer hasn't come back for over a week, it's probably a good time to reach out to the reviewer on IRC (or via email) to see if they could look again now you have addressed their comments. If you can't get agreement, and your review gets stuck (i.e. requires mediation), you can raise your patch during the Nova meeting and we will try to resolve any disagreement. Thirdly, is it in merge conflict with master or are any of the CI tests failing? Particularly any third-party CI tests that are relevant to the code you are changing. If you're fixing something that only occasionally failed before, maybe recheck a few times to prove the tests stay passing. Without green tests, reviewers tend to move on and look at the other patches that have the tests passing. OK, so you have followed all the process (i.e. your patches are getting advertised via the project's tracking mechanisms), and your patches either have no reviews, or only positive reviews. Now what? Have you considered reviewing other people's patches? Firstly, participating in the review process is the best way for you to understand what reviewers are wanting to see in the code you are submitting. As you get more practiced at reviewing it will help you to write "merge-ready" code. Secondly, if you help review other peoples code and help get their patches ready for the core reviewers to add a +2, it will free up a lot of non-core and core reviewer time, so they are more likely to get time to review your code. For more details, please see: :ref:`Why do code reviews when I'm not core? ` Please note, I am not recommending you go to ask people on IRC or via email for reviews. Please try to get your code reviewed using the above process first. In many cases multiple direct pings generate frustration on both sides and that tends to be counter productive. Now you have got your code merged, lets make sure you don't need to fix this bug again. The fact the bug exists means there is a gap in our testing. Your patch should have included some good unit tests to stop the bug coming back. But don't stop there, maybe its time to add tempest tests, to make sure your use case keeps working? Maybe you need to set up a third party CI so your combination of drivers will keep working? Getting that extra testing in place should stop a whole heap of bugs, again giving reviewers more time to get to the issues or features you want to add in the future. Process Evolution Ideas ======================= We are always evolving our process as we try to improve and adapt to the changing shape of the community. Here we discuss some of the ideas, along with their pros and cons. Splitting out the virt drivers (or other bits of code) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Currently, Nova doesn't have strong enough interfaces to split out the virt drivers, scheduler or REST API. This is seen as the key blocker. Let's look at both sides of the debate here. Reasons for the split: - can have separate core teams for each repo - this leads to quicker turn around times, largely due to focused teams - splitting out things from core means less knowledge required to become core in a specific area Reasons against the split: - loss of interoperability between drivers - this is a core part of Nova's mission, to have a single API across all deployments, and a strong ecosystem of tools and apps built on that - we can overcome some of this with stronger interfaces and functional tests - new features often need changes in the API and virt driver anyway - the new "depends-on" can make these cross-repo dependencies easier - loss of code style consistency across the code base - fear of fragmenting the nova community, leaving few to work on the core of the project - could work in subteams within the main tree TODO - need to complete analysis Subteam recommendation as a +2 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ There are groups of people with great knowledge of particular bits of the code base. It may be a good idea to give their recommendation of a merge greater strength. In addition, having the subteam focus review efforts on a subset of patches should help concentrate the nova-core reviews they get, and increase the velocity of getting code merged. Ideally this would be done with gerrit user "tags". There are some investigations by sdague in how feasible it would be to add tags to gerrit. Stop having to submit a spec for each release ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As mentioned above, we use blueprints for tracking, and specs to record design decisions. Targeting specs to a specific release is a heavyweight solution and blurs the lines between specs and blueprints. At the same time, we don't want to lose the opportunity to revise existing blueprints. Maybe there is a better balance? What about this kind of process: - backlog has these folders: - backlog/incomplete - merge a partial spec - backlog/complete - merge complete specs (remove tracking details, such as assignee part of the template) - ?? backlog/expired - specs are moved here from incomplete or complete when no longer seem to be given attention (after 1 year, by default) - /implemented - when a spec is complete it gets moved into the release directory and possibly updated to reflect what actually happened - there will no longer be a per-release approved spec list To get your blueprint approved: - add it to the next nova meeting - if a spec is required, update the URL to point to the spec merged in a spec to the blueprint - ensure there is an assignee in the blueprint - a day before the meeting, a note is sent to the ML to review the list before the meeting - discuss any final objections in the nova-meeting - this may result in a request to refine the spec, if things have changed since it was merged - trivial cases can be approved in advance by a nova-driver, so not all folks need to go through the meeting This still needs more thought, but should decouple the spec review from the release process. It is also more compatible with a runway style system, that might be less focused on milestones. Replacing Milestones with SemVer Releases ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You can deploy any commit of Nova and upgrade to a later commit in that same release. Making our milestones versioned more like an official release would help signal to our users that people can use the milestones in production, and get a level of upgrade support. It could go something like this: - 14.0.0 is milestone 1 - 14.0.1 is milestone 2 (maybe, because we add features, it should be 14.1.0?) - 14.0.2 is milestone 3 - we might do other releases (once a critical bug is fixed?), as it makes sense, but we will always be the time bound ones - 14.0.3 two weeks after milestone 3, adds only bug fixes (and updates to RPC versions?) - maybe a stable branch is created at this point? - 14.1.0 adds updated translations and co-ordinated docs - this is released from the stable branch? - 15.0.0 is the next milestone, in the following cycle - not the bump of the major version to signal an upgrade incompatibility with 13.x We are currently watching Ironic to see how their use of semver goes, and see what lessons need to be learnt before we look to maybe apply this technique during M. Feature Classification ~~~~~~~~~~~~~~~~~~~~~~ This is a look at moving forward the :doc:`support matrix effort `. The things we need to cover: - note what is tested, and how often that test passes (via 3rd party CI, or otherwise) - link to current test results for stable and master (time since last pass, recent pass rate, etc) - TODO - sync with jogo on his third party CI audit and getting trends, ask infra - include experimental features (untested feature) - get better at the impact of volume drivers and network drivers on available features (not just hypervisor drivers) Main benefits: - users get a clear picture of what is known to work - be clear about when experimental features are removed, if no tests are added - allows a way to add experimental things into Nova, and track either their removal or maturation .. _PTL: https://governance.openstack.org/tc/reference/projects/nova.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/contributor/project-scope.rst0000664000175000017500000003220600000000000022513 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Scope of the Nova project ========================== Nova is focusing on doing an awesome job of its core mission. This document aims to clarify that core mission. This is a living document to help record where we agree about what Nova should and should not be doing, and why. Please treat this as a discussion of interesting, and hopefully useful, examples. It is not intended to be an exhaustive policy statement. .. _nova-mission: Mission ------- Our mission statement starts with: To implement services and associated libraries to provide massively scalable, on demand, self service access to compute resources. Our official mission statement also includes the following examples of compute resources: bare metal, virtual machines, and containers. For the full official mission statement see: https://governance.openstack.org/tc/reference/projects/nova.html#mission This document aims to help clarify what the mission statement means. Compute Resources ------------------ Nova is all about access to compute resources. This section looks at the types of compute resource Nova works with. Virtual Servers **************** Nova was originally focused purely on providing access to virtual servers running on a variety of different hypervisors. The majority of users use Nova only to provide access to virtual servers from a single hypervisor, however, its possible to have a Nova deployment include multiple different types of hypervisors, while at the same time offering containers and bare metal servers. Containers *********** The Nova API is not a good fit for a lot of container use cases. The Magnum project intends to deliver a good container experience built on top of Nova. Nova allows you to use containers in a similar way to how you would use on demand virtual machines. We want to maintain this distinction, so we maintain the integrity and usefulness of the existing Nova API. For example, Nova is not designed to spin up new containers for every apache request, nor do we plan to control what goes on inside containers. They get the same metadata provided to them as virtual machines, to do with as they see fit. Bare Metal Servers ******************* Ironic project has been pioneering the idea of treating physical machines in a similar way to on demand virtual machines. Nova's driver is able to allow a multi-tenant cloud style use of Ironic controlled resources. While currently there are operations that are a fundamental part of our virtual machine abstraction that are not currently available in ironic, such as attaching iSCSI volumes, it does not fundamentally change the semantics of our API, and as such is a suitable Nova driver. Moreover, it is expected that gap with shrink over time. Driver Parity ************** Our goal for the Nova API is to provide a consistent abstraction to access on demand compute resources. We are not aiming to expose all features of all hypervisors. Where the details of the underlying hypervisor leak through our APIs, we have failed in this goal, and we must work towards better abstractions that are more `interoperable`_. This is one reason why we put so much emphasis on the use of Tempest in third party CI systems. The key tenet of driver parity is that if a feature is supported in a driver, it must feel the same to users, as if they where using any of the other drivers that also support that feature. The exception is that, if possible for widely different performance characteristics, but the effect of that API call must be identical. Following on from that, should a feature only be added to one of the drivers, we must make every effort to ensure another driver could be implemented to match that behavior. It is important that drivers support enough features, so the API actually provides a consistent abstraction. For example, being unable to create a server or delete a server would severely undermine that goal. In fact, Nova only ever manages resources it creates. .. _interoperable: https://www.openstack.org/brand/interop/ Upgrades --------- Nova is widely used in production. As such we need to respect the needs of our existing users. At the same time we need evolve the current code base, including both adding and removing features. This section outlines how we expect people to upgrade, and what we do to help existing users that upgrade in the way we expect. Upgrade expectations ********************* Our upgrade plan is to concentrate on upgrades from N-1 to the Nth release. So for someone running juno, they would have to upgrade to kilo before upgrading to liberty. This is designed to balance the need for a smooth upgrade, against having to keep maintaining the compatibility code to make that upgrade possible. We talk about this approach as users consuming the stable branch. In addition, we also support users upgrading from the master branch, technically, between any two commits within the same release cycle. In certain cases, when crossing release boundaries, you must upgrade to the stable branch, before upgrading to the tip of master. This is to support those that are doing some level of "Continuous Deployment" from the tip of master into production. Many of the public cloud provides running OpenStack use this approach so they are able to get access to bug fixes and features they work on into production sooner. This becomes important when you consider reverting a commit that turns out to have been bad idea. We have to assume any public API change may have already been deployed into production, and as such cannot be reverted. In a similar way, a database migration may have been deployed. Any commit that will affect an upgrade gets the UpgradeImpact tag added to the commit message, so there is no requirement to wait for release notes. Don't break existing users **************************** As a community we are aiming towards a smooth upgrade process, where users must be unaware you have just upgraded your deployment, except that there might be additional feature available and improved stability and performance of some existing features. We don't ever want to remove features our users rely on. Sometimes we need to migrate users to a new implementation of that feature, which may require extra steps by the deployer, but the end users must be unaffected by such changes. However there are times when some features become a problem to maintain, and fall into disrepair. We aim to be honest with our users and highlight the issues we have, so we are in a position to find help to fix that situation. Ideally we are able to rework the feature so it can be maintained, but in some rare cases, the feature no longer works, is not tested, and no one is stepping forward to maintain that feature, the best option can be to remove that feature. When we remove features, we need to warn users by first marking those features as deprecated, before we finally remove the feature. The idea is to get feedback on how important the feature is to our user base. Where a feature is important we work with the whole community to find a path forward for those users. API Scope ---------- Nova aims to provide a highly interoperable and stable REST API for our users to get self-service access to compute resources. No more API Proxies ******************** Nova API current has some APIs that are now (in kilo) mostly just a proxy to other OpenStack services. If it were possible to remove a public API, these are some we might start with. As such, we don't want to add any more. The first example is the API that is a proxy to the Glance v1 API. As Glance moves to deprecate its v1 API, we need to translate calls from the old v1 API we expose, to Glance's v2 API. The next API to mention is the networking APIs, in particular the security groups API. Most of these APIs exist from when ``nova-network`` existed and the proxies were added during the transition. However, security groups has a much richer Neutron API, and if you use both Nova API and Neutron API, the mismatch can lead to some very unexpected results, in certain cases. Our intention is to avoid adding to the problems we already have in this area. No more Orchestration ********************** Nova is a low level infrastructure API. It is plumbing upon which richer ideas can be built. Heat and Magnum being great examples of that. While we have some APIs that could be considered orchestration, and we must continue to maintain those, we do not intend to add any more APIs that do orchestration. Third Party APIs ***************** Nova aims to focus on making a great API that is highly interoperable across all Nova deployments. We have historically done a very poor job of implementing and maintaining compatibility with third party APIs inside the Nova tree. As such, all new efforts should instead focus on external projects that provide third party compatibility on top of the Nova API. Where needed, we will work with those projects to extend the Nova API such that its possible to add that functionality on top of the Nova API. However, we do not intend to add API calls for those services to persist third party API specific information in the Nova database. Instead we want to focus on additions that enhance the existing Nova API. Scalability ------------ Our mission includes the text "massively scalable". Lets discuss what that means. Nova has three main axes of scale: Number of API requests, number of compute nodes and number of active instances. In many cases the number of compute nodes and active instances are so closely related, you rarely need to consider those separately. There are other items, such as the number of tenants, and the number of instances per tenant. But, again, these are very rarely the key scale issue. Its possible to have a small cloud with lots of requests for very short lived VMs, or a large cloud with lots of longer lived VMs. These need to scale out different components of the Nova system to reach their required level of scale. Ideally all Nova components are either scaled out to match the number of API requests and build requests, or scaled out to match the number of running servers. If we create components that have their load increased relative to both of these items, we can run into inefficiencies or resource contention. Although it is possible to make that work in some cases, this should always be considered. We intend Nova to be usable for both small and massive deployments. Where small involves 1-10 hypervisors and massive deployments are single regions with greater than 10,000 hypervisors. That should be seen as our current goal, not an upper limit. There are some features that would not scale well for either the small scale or the very large scale. Ideally we would not accept these features, but if there is a strong case to add such features, we must work hard to ensure you can run without that feature at the scale you are required to run. IaaS not Batch Processing -------------------------- Currently Nova focuses on providing on-demand compute resources in the style of classic Infrastructure-as-a-service clouds. A large pool of compute resources that people can consume in a self-service way. Nova is not currently optimized for dealing with a larger number of requests for compute resources compared with the amount of compute resources currently available. We generally assume that a level of spare capacity is maintained for future requests. This is needed for users who want to quickly scale out, and extra capacity becomes available again as users scale in. While spare capacity is also not required, we are not optimizing for a system that aims to run at 100% capacity at all times. As such our quota system is more focused on limiting the current level of resource usage, rather than ensuring a fair balance of resources between all incoming requests. This doesn't exclude adding features to support making a better use of spare capacity, such as "spot instances". There have been discussions around how to change Nova to work better for batch job processing. But the current focus is on how to layer such an abstraction on top of the basic primitives Nova currently provides, possibly adding additional APIs where that makes good sense. Should this turn out to be impractical, we may have to revise our approach. Deployment and Packaging ------------------------- Nova does not plan on creating its own packaging or deployment systems. Our CI infrastructure is powered by Devstack. This can also be used by developers to test their work on a full deployment of Nova. We do not develop any deployment or packaging for production deployments. Being widely adopted by many distributions and commercial products, we instead choose to work with all those parties to ensure they are able to effectively package and deploy Nova. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/contributor/ptl-guide.rst0000664000175000017500000002475600000000000021643 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Chronological PTL guide ======================= This is just a reference guide that a PTL may use as an aid, if they choose. New PTL ------- * Update the nova meeting chair * https://github.com/openstack-infra/irc-meetings/blob/master/meetings/nova-team-meeting.yaml * Update the team wiki * https://wiki.openstack.org/wiki/Nova#People * Get acquainted with the release schedule * Example: https://releases.openstack.org/antelope/schedule.html * Also, note that we usually create a specific wiki page for each cycle like https://wiki.openstack.org/wiki/Nova/2023.1_Release_Schedule but it's preferred to use the main release schedule above. Project Team Gathering ---------------------- * Create PTG planning etherpad, retrospective etherpad and alert about it in nova meeting and dev mailing list * Example: https://etherpad.opendev.org/p/nova-antelope-ptg * Run sessions at the PTG * Do a retro of the previous cycle * Make agreement on the agenda for this release, including but not exhaustively: * Number of review days, for either specs or implementation * Define the Spec approval and Feature freeze dates * Modify the release schedule if needed by adding the new dates. As an example : https://review.opendev.org/c/openstack/releases/+/877094 * Discuss the implications of the `SLURP or non-SLURP`__ current release .. __: https://governance.openstack.org/tc/resolutions/20220210-release-cadence-adjustment.html * Sign up for group photo at the PTG (if applicable) After PTG --------- * Send PTG session summaries to the dev mailing list * Add `RFE bugs`__ if you have action items that are simple to do but without a owner yet. * Update IRC #openstack-nova channel topic to point to new development-planning etherpad. .. __: https://bugs.launchpad.net/nova/+bugs?field.tag=rfe A few weeks before milestone 1 ------------------------------ * Plan a spec review day * Periodically check the series goals others have proposed in the “Set series goals” link: * Example: https://blueprints.launchpad.net/nova/antelope/+setgoals Milestone 1 ----------- * Do milestone release of nova and python-novaclient (in launchpad only, can be optional) * This is launchpad bookkeeping only. With the latest release team changes, projects no longer do milestone releases. See: https://releases.openstack.org/reference/release_models.html#cycle-with-milestones-legacy * For nova, set the launchpad milestone release as “released” with the date * Release other libraries if there are significant enough changes since last release. When releasing the first version of a library for the cycle, bump the minor version to leave room for future stable branch releases * os-vif * placement * os-traits / os-resource-classes * Release stable branches of nova * ``git checkout `` * ``git log --no-merges ..`` * Examine commits that will go into the release and use it to decide whether the release is a major, minor, or revision bump according to semver * Then, propose the release with version according to semver x.y.z * X - backward-incompatible changes * Y - features * Z - bug fixes * Use the new-release command to generate the release * https://releases.openstack.org/reference/using.html#using-new-release-command Summit ------ * Prepare the project update presentation. Enlist help of others * Prepare the on-boarding session materials. Enlist help of others * Prepare the operator meet-and-greet session. Enlist help of others A few weeks before milestone 2 ------------------------------ * Plan a spec review day (optional) Milestone 2 ----------- * Spec freeze (if agreed) * Release nova and python-novaclient (if new features were merged) * Release other libraries as needed * Stable branch releases of nova * For nova, set the launchpad milestone release as “released” with the date (can be optional) Shortly after spec freeze ------------------------- * Create a blueprint status etherpad to help track, especially non-priority blueprint work, to help things get done by Feature Freeze (FF). Example: * https://etherpad.opendev.org/p/nova-antelope-blueprint-status * Create or review a patch to add the next release’s specs directory so people can propose specs for next release after spec freeze for current release Non-client library release freeze --------------------------------- * Final release for os-vif * Final release for os-traits * Final release for os-resource-classes Milestone 3 ----------- * Feature freeze day * Client library freeze, release python-novaclient and osc-placement * Close out all blueprints, including “catch all” blueprints like mox, versioned notifications * Stable branch releases of nova * For nova, set the launchpad milestone release as “released” with the date * Start writing the `cycle highlights `__ Week following milestone 3 -------------------------- * Consider announcing the FFE (feature freeze exception process) to have people propose FFE requests to a special etherpad where they will be reviewed and possibly sponsored: * https://docs.openstack.org/nova/latest/contributor/process.html#non-priority-feature-freeze .. note:: if there is only a short time between FF and RC1 (lately it’s been 2 weeks), then the only likely candidates will be low-risk things that are almost done * Mark the max microversion for the release in the :doc:`/reference/api-microversion-history`: * Example: https://review.opendev.org/c/openstack/nova/+/719313 A few weeks before RC --------------------- * Make a RC1 todos etherpad and tag bugs as ``-rc-potential`` and keep track of them, example: * https://etherpad.opendev.org/p/nova-antelope-rc-potential * Go through the bug list and identify any rc-potential bugs and tag them RC -- * Do steps described on the release checklist wiki: * https://wiki.openstack.org/wiki/Nova/ReleaseChecklist * If we want to drop backward-compat RPC code, we have to do a major RPC version bump and coordinate it just before the major release: * https://wiki.openstack.org/wiki/RpcMajorVersionUpdates * Example: https://review.opendev.org/541035 * “Merge latest translations" means translation patches * Check for translations with: * https://review.opendev.org/#/q/status:open+project:openstack/nova+branch:master+topic:zanata/translations * Should NOT plan to have more than one RC if possible. RC2 should only happen if there was a mistake and something was missed for RC, or a new regression was discovered * Do the RPC version aliases just before RC1 if no further RCs are planned. Else do them at RC2. In the past, we used to update all service version aliases (example: https://review.opendev.org/230132) but since we really only support compute being backlevel/old during a rolling upgrade, we only need to update the compute service alias, see related IRC discussion: http://eavesdrop.openstack.org/irclogs/%23openstack-nova/%23openstack-nova.2018-08-08.log.html#t2018-08-08T17:13:45 * Example: https://review.opendev.org/642599 * More detail on how version aliases work: https://docs.openstack.org/nova/latest/configuration/config.html#upgrade-levels * Write the reno prelude for the release GA * Example: https://review.opendev.org/644412 * Push the cycle-highlights in marketing-friendly sentences and propose to the openstack/releases repo. Usually based on reno prelude but made more readable and friendly * Example: https://review.opendev.org/644697 Immediately after RC -------------------- * Look for bot proposed changes to reno and stable/ * Follow the post-release checklist * https://wiki.openstack.org/wiki/Nova/ReleaseChecklist * Drop old RPC compat code (if there was a RPC major version bump and if agreed on at the PTG) * Example: https://review.opendev.org/543580 * Bump the oldest supported compute service version (if master branch is now on a non-SLURP version) * https://review.opendev.org/#/c/738482/ * Create the launchpad series for the next cycle * Set the development focus of the project to the new cycle series * Set the status of the new series to “active development” * Set the last series status to “current stable branch release” * Set the previous to last series status to “supported” * Repeat launchpad steps ^ for python-novaclient (optional) * Repeat launchpad steps ^ for placement * Register milestones in launchpad for the new cycle based on the new cycle release schedule * Make sure the specs directory for the next cycle gets created so people can start proposing new specs * Make sure to move implemented specs from the previous release * Use ``tox -e move-implemented-specs `` * Also remove template from ``doc/source/specs//index.rst`` * Also delete template file ``doc/source/specs//template.rst`` * Create new release wiki: * Example: https://wiki.openstack.org/wiki/Nova/2023.1_Release_Schedule * Update the contributor guide for the new cycle * Example: https://review.opendev.org/#/c/754427/ Miscellaneous Notes ------------------- How to approve a launchpad blueprint ************************************ * Set the approver as the person who +W the spec, or set to self if it’s specless * Set the Direction => Approved and Definition => Approved and make sure the Series goal is set to the current release. If code is already proposed, set Implementation => Needs Code Review * Add a comment to the Whiteboard explaining the approval, with a date (launchpad does not record approval dates). For example: “We discussed this in the team meeting and agreed to approve this for . -- ” How to complete a launchpad blueprint ************************************* * Set Implementation => Implemented. The completion date will be recorded by launchpad ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/contributor/releasenotes.rst0000664000175000017500000000516500000000000022433 0ustar00zuulzuul00000000000000.. _releasenotes: Release Notes ============= What is reno ? -------------- Nova uses `reno `__ for providing release notes in-tree. That means that a patch can include a *reno file* or a series can have a follow-on change containing that file explaining what the impact is. A *reno file* is a YAML file written in the ``releasenotes/notes`` tree which is generated using the *reno* tool this way: .. code-block:: bash $ tox -e venv -- reno new where usually ```` can be ``bp-`` for a blueprint or ``bug-XXXXXX`` for a bugfix. Refer to the `reno documentation `__ for more information. When a release note is needed ----------------------------- A release note is required anytime a reno section is needed. Below are some examples for each section. Any sections that would be blank should be left out of the note file entirely. If no section is needed, then you know you don't need to provide a release note :-) * ``upgrade`` * The patch has an `UpgradeImpact `_ tag * A DB change needs some deployer modification (like a migration) * A configuration option change (deprecation, removal or modified default) * some specific changes that have a `DocImpact `_ tag but require further action from an deployer perspective * any patch that requires an action from the deployer in general * ``security`` * If the patch fixes a known vulnerability * ``features`` * If the patch has an `APIImpact `_ tag * For nova-manage and python-novaclient changes, if it adds or changes a new command, including adding new options to existing commands * not all blueprints in general, just the ones impacting a :doc:`/contributor/policies` * a new virt driver is provided or an existing driver impacts the :doc:`HypervisorSupportMatrix ` * ``critical`` * Bugfixes categorized as Critical in Launchpad *impacting users* * ``fixes`` * No clear definition of such bugfixes. Hairy long-standing bugs with high importance that have been fixed are good candidates though. Three sections are left intentionally unexplained (``prelude``, ``issues`` and ``other``). Those are targeted to be filled in close to the release time for providing details about the soon-ish release. Don't use them unless you know exactly what you are doing. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/contributor/resize-and-cold-migrate.rst0000664000175000017500000001562600000000000024353 0ustar00zuulzuul00000000000000======================= Resize and cold migrate ======================= The `resize API`_ and `cold migrate API`_ are commonly confused in nova because the internal `API code`_, `conductor code`_ and `compute code`_ use the same methods. This document explains some of the differences in what happens between a resize and cold migrate operation. For the most part this document describes :term:`same-cell resize `. For details on :term:`cross-cell resize `, refer to :doc:`/admin/configuration/cross-cell-resize`. High level ~~~~~~~~~~ :doc:`Cold migrate ` is an operation performed by an administrator to power off and move a server from one host to a **different** host using the **same** flavor. Volumes and network interfaces are disconnected from the source host and connected on the destination host. The type of file system between the hosts and image backend determine if the server files and disks have to be copied. If copy is necessary then root and ephemeral disks are copied and swap disks are re-created. :doc:`Resize ` is an operation which can be performed by a non-administrative owner of the server (the user) with a **different** flavor. The new flavor can change certain aspects of the server such as the number of CPUS, RAM and disk size. Otherwise for the most part the internal details are the same as a cold migration. Scheduling ~~~~~~~~~~ Depending on how the API is configured for :oslo.config:option:`allow_resize_to_same_host`, the server may be able to be resized on the current host. *All* compute drivers support *resizing* to the same host but *only* the vCenter driver supports *cold migrating* to the same host. Enabling resize to the same host is necessary for features such as strict affinity server groups where there are more than one server in the same affinity group. Starting with `microversion 2.56`_ an administrator can specify a destination host for the cold migrate operation. Resize does not allow specifying a destination host. Flavor ~~~~~~ As noted above, with resize the flavor *must* change and with cold migrate the flavor *will not* change. Resource claims ~~~~~~~~~~~~~~~ Both resize and cold migration perform a `resize claim`_ on the destination node. Historically the resize claim was meant as a safety check on the selected node to work around race conditions in the scheduler. Since the scheduler started `atomically claiming`_ VCPU, MEMORY_MB and DISK_GB allocations using Placement the role of the resize claim has been reduced to detecting the same conditions but for resources like PCI devices and NUMA topology which, at least as of the 20.0.0 (Train) release, are not modeled in Placement and as such are not atomic. If this claim fails, the operation can be rescheduled to an alternative host, if there are any. The number of possible alternative hosts is determined by the :oslo.config:option:`scheduler.max_attempts` configuration option. Allocations ~~~~~~~~~~~ Since the 16.0.0 (Pike) release, the scheduler uses the `placement service`_ to filter compute nodes (resource providers) based on information in the flavor and image used to build the server. Once the scheduler runs through its filters and weighers and picks a host, resource class `allocations`_ are atomically consumed in placement with the server as the consumer. During both resize and cold migrate operations, the allocations held by the server consumer against the source compute node resource provider are `moved`_ to a `migration record`_ and the scheduler will create allocations, held by the instance consumer, on the selected destination compute node resource provider. This is commonly referred to as `migration-based allocations`_ which were introduced in the 17.0.0 (Queens) release. If the operation is successful and confirmed, the source node allocations held by the migration record are `dropped`_. If the operation fails or is reverted, the source compute node resource provider allocations held by the migration record are `reverted`_ back to the instance consumer and the allocations against the destination compute node resource provider are dropped. Summary of differences ~~~~~~~~~~~~~~~~~~~~~~ .. list-table:: :header-rows: 1 * - - Resize - Cold migrate * - New flavor - Yes - No * - Authorization (default) - Admin or owner (user) Policy rule: ``os_compute_api:servers:resize`` - Admin only Policy rule: ``os_compute_api:os-migrate-server:migrate`` * - Same host - Maybe - Only vCenter * - Can specify target host - No - Yes (microversion >= 2.56) Sequence Diagrams ~~~~~~~~~~~~~~~~~ The following diagrams are current as of the 21.0.0 (Ussuri) release. Resize ------ This is the sequence of calls to get the server to ``VERIFY_RESIZE`` status. .. image:: /_static/images/resize/resize.svg :alt: Resize standard workflow Confirm resize -------------- This is the sequence of calls when confirming `or deleting`_ a server in ``VERIFY_RESIZE`` status. Note that in the below diagram, if confirming a resize while deleting a server the API synchronously calls the source compute service. .. image:: /_static/images/resize/resize_confirm.svg :alt: Resize confirm workflow Revert resize ------------- This is the sequence of calls when reverting a server in ``VERIFY_RESIZE`` status. .. image:: /_static/images/resize/resize_revert.svg :alt: Resize revert workflow .. _resize API: https://docs.openstack.org/api-ref/compute/#resize-server-resize-action .. _cold migrate API: https://docs.openstack.org/api-ref/compute/#migrate-server-migrate-action .. _API code: https://opendev.org/openstack/nova/src/tag/19.0.0/nova/compute/api.py#L3568 .. _conductor code: https://opendev.org/openstack/nova/src/tag/19.0.0/nova/conductor/manager.py#L297 .. _compute code: https://opendev.org/openstack/nova/src/tag/19.0.0/nova/compute/manager.py#L4445 .. _microversion 2.56: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id52 .. _resize claim: https://opendev.org/openstack/nova/src/tag/19.0.0/nova/compute/resource_tracker.py#L248 .. _atomically claiming: https://opendev.org/openstack/nova/src/tag/19.0.0/nova/scheduler/filter_scheduler.py#L239 .. _moved: https://opendev.org/openstack/nova/src/tag/19.0.0/nova/conductor/tasks/migrate.py#L28 .. _placement service: https://docs.openstack.org/placement/latest/ .. _allocations: https://docs.openstack.org/api-ref/placement/#allocations .. _migration record: https://docs.openstack.org/api-ref/compute/#migrations-os-migrations .. _migration-based allocations: https://specs.openstack.org/openstack/nova-specs/specs/queens/implemented/migration-allocations.html .. _dropped: https://opendev.org/openstack/nova/src/tag/19.0.0/nova/compute/manager.py#L4048 .. _reverted: https://opendev.org/openstack/nova/src/tag/19.0.0/nova/compute/manager.py#L4233 .. _or deleting: https://opendev.org/openstack/nova/src/tag/19.0.0/nova/compute/api.py#L2135 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.2576077 nova-32.0.0/doc/source/contributor/testing/0000775000175000017500000000000000000000000020656 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/contributor/testing/down-cell.rst0000664000175000017500000003114500000000000023300 0ustar00zuulzuul00000000000000================== Testing Down Cells ================== This document describes how to recreate a down-cell scenario in a single-node devstack environment. This can be useful for testing the reliability of the controller services when a cell in the deployment is down. Setup ===== DevStack config --------------- This guide is based on a devstack install from the Train release using an Ubuntu Bionic 18.04 VM with 8 VCPU, 8 GB RAM and 200 GB of disk following the `All-In-One Single Machine`_ guide. The following minimal local.conf was used: .. code-block:: ini [[local|localrc]] # Define passwords OS_PASSWORD=openstack1 SERVICE_TOKEN=$OS_PASSWORD ADMIN_PASSWORD=$OS_PASSWORD MYSQL_PASSWORD=$OS_PASSWORD RABBIT_PASSWORD=$OS_PASSWORD SERVICE_PASSWORD=$OS_PASSWORD # Logging config LOGFILE=$DEST/logs/stack.sh.log LOGDAYS=2 # Disable non-essential services disable_service horizon tempest .. _All-In-One Single Machine: https://docs.openstack.org/devstack/latest/guides/single-machine.html Populate cell1 -------------- Create a test server first so there is something in cell1: .. code-block:: console $ source openrc admin admin $ IMAGE=$(openstack image list -f value -c ID) $ openstack server create --wait --flavor m1.tiny --image $IMAGE cell1-server Take down cell1 =============== Break the connection to the cell1 database by changing the ``database_connection`` URL, in this case with an invalid host IP: .. code-block:: console mysql> select database_connection from cell_mappings where name='cell1'; +-------------------------------------------------------------------+ | database_connection | +-------------------------------------------------------------------+ | mysql+pymysql://root:openstack1@127.0.0.1/nova_cell1?charset=utf8 | +-------------------------------------------------------------------+ 1 row in set (0.00 sec) mysql> update cell_mappings set database_connection='mysql+pymysql://root:openstack1@192.0.0.1/nova_cell1?charset=utf8' where name='cell1'; Query OK, 1 row affected (0.01 sec) Rows matched: 1 Changed: 1 Warnings: 0 Update controller services ========================== Prepare the controller services for the down cell. See :ref:`Handling cell failures ` for details. Modify nova.conf ---------------- Configure the API to avoid long timeouts and slow start times due to `bug 1815697`_ by modifying ``/etc/nova/nova.conf``: .. code-block:: ini [database] ... max_retries = 1 retry_interval = 1 [upgrade_levels] ... compute = stein # N-1 from train release, just something other than "auto" .. _bug 1815697: https://bugs.launchpad.net/nova/+bug/1815697 Restart services ---------------- .. note:: It is useful to tail the n-api service logs in another screen to watch for errors / warnings in the logs due to down cells: .. code-block:: console $ sudo journalctl -f -a -u devstack@n-api.service Restart controller services to flush the cell cache: .. code-block:: console $ sudo systemctl restart devstack@n-api.service devstack@n-super-cond.service devstack@n-sch.service Test cases ========== 1. Try to create a server which should fail and go to cell0. .. code-block:: console $ openstack server create --wait --flavor m1.tiny --image $IMAGE cell0-server You can expect to see errors like this in the n-api logs: .. code-block:: console Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context [None req-fdaff415-48b9-44a7-b4c3-015214e80b90 None None] Error gathering result from cell 4f495a21-294a-4051-9a3d-8b34a250bbb4: DBConnectionError: (pymysql.err.OperationalError) (2003, "Can't connect to MySQL server on u'192.0.0.1' ([Errno 101] ENETUNREACH)") (Background on this error at: http://sqlalche.me/e/e3q8) Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context Traceback (most recent call last): Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context File "/opt/stack/nova/nova/context.py", line 441, in gather_result Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context result = fn(cctxt, *args, **kwargs) Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context File "/opt/stack/nova/nova/db/sqlalchemy/api.py", line 211, in wrapper Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context with reader_mode.using(context): Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context File "/usr/lib/python2.7/contextlib.py", line 17, in __enter__ Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context return self.gen.next() Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context File "/usr/local/lib/python2.7/dist-packages/oslo_db/sqlalchemy/enginefacade.py", line 1061, in _transaction_scope Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context context=context) as resource: Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context File "/usr/lib/python2.7/contextlib.py", line 17, in __enter__ Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context return self.gen.next() Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context File "/usr/local/lib/python2.7/dist-packages/oslo_db/sqlalchemy/enginefacade.py", line 659, in _session Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context bind=self.connection, mode=self.mode) Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context File "/usr/local/lib/python2.7/dist-packages/oslo_db/sqlalchemy/enginefacade.py", line 418, in _create_session Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context self._start() Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context File "/usr/local/lib/python2.7/dist-packages/oslo_db/sqlalchemy/enginefacade.py", line 510, in _start Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context engine_args, maker_args) Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context File "/usr/local/lib/python2.7/dist-packages/oslo_db/sqlalchemy/enginefacade.py", line 534, in _setup_for_connection Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context sql_connection=sql_connection, **engine_kwargs) Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context File "/usr/local/lib/python2.7/dist-packages/debtcollector/renames.py", line 43, in decorator Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context return wrapped(*args, **kwargs) Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context File "/usr/local/lib/python2.7/dist-packages/oslo_db/sqlalchemy/engines.py", line 201, in create_engine Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context test_conn = _test_connection(engine, max_retries, retry_interval) Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context File "/usr/local/lib/python2.7/dist-packages/oslo_db/sqlalchemy/engines.py", line 387, in _test_connection Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context six.reraise(type(de_ref), de_ref) Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context File "", line 3, in reraise Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context DBConnectionError: (pymysql.err.OperationalError) (2003, "Can't connect to MySQL server on u'192.0.0.1' ([Errno 101] ENETUNREACH)") (Background on this error at: http://sqlalche.me/e/e3q8) Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context Apr 04 20:48:22 train devstack@n-api.service[10884]: WARNING nova.objects.service [None req-1cf4bf5c-2f74-4be0-a18d-51ff81df57dd admin admin] Failed to get minimum service version for cell 4f495a21-294a-4051-9a3d-8b34a250bbb4 2. List servers with the 2.69 microversion for down cells. .. note:: Requires python-openstackclient >= 3.18.0 for v2.69 support. The server in cell1 (which is down) will show up with status UNKNOWN: .. code-block:: console $ openstack --os-compute-api-version 2.69 server list +--------------------------------------+--------------+---------+----------+--------------------------+--------+ | ID | Name | Status | Networks | Image | Flavor | +--------------------------------------+--------------+---------+----------+--------------------------+--------+ | 8e90f1f0-e8dd-4783-8bb3-ec8d594e60f1 | | UNKNOWN | | | | | afd45d84-2bd7-4e49-9dff-93359f742bc1 | cell0-server | ERROR | | cirros-0.4.0-x86_64-disk | | +--------------------------------------+--------------+---------+----------+--------------------------+--------+ 3. Using v2.1 the UNKNOWN server is filtered out by default due to :oslo.config:option:`api.list_records_by_skipping_down_cells`: .. code-block:: console $ openstack --os-compute-api-version 2.1 server list +--------------------------------------+--------------+--------+----------+--------------------------+---------+ | ID | Name | Status | Networks | Image | Flavor | +--------------------------------------+--------------+--------+----------+--------------------------+---------+ | afd45d84-2bd7-4e49-9dff-93359f742bc1 | cell0-server | ERROR | | cirros-0.4.0-x86_64-disk | m1.tiny | +--------------------------------------+--------------+--------+----------+--------------------------+---------+ 4. Configure nova-api with ``list_records_by_skipping_down_cells=False`` .. code-block:: ini [api] list_records_by_skipping_down_cells = False 5. Restart nova-api and then listing servers should fail: .. code-block:: console $ sudo systemctl restart devstack@n-api.service $ openstack --os-compute-api-version 2.1 server list Unexpected API Error. Please report this at http://bugs.launchpad.net/nova/ and attach the Nova API log if possible. (HTTP 500) (Request-ID: req-e2264d67-5b6c-4f17-ae3d-16c7562f1b69) 6. Try listing compute services with a down cell. The services from the down cell are skipped: .. code-block:: console $ openstack --os-compute-api-version 2.1 compute service list +----+------------------+-------+----------+---------+-------+----------------------------+ | ID | Binary | Host | Zone | Status | State | Updated At | +----+------------------+-------+----------+---------+-------+----------------------------+ | 2 | nova-scheduler | train | internal | enabled | up | 2019-04-04T21:12:47.000000 | | 6 | nova-consoleauth | train | internal | enabled | up | 2019-04-04T21:12:38.000000 | | 7 | nova-conductor | train | internal | enabled | up | 2019-04-04T21:12:47.000000 | +----+------------------+-------+----------+---------+-------+----------------------------+ With 2.69 the nova-compute service from cell1 is shown with status UNKNOWN: .. code-block:: console $ openstack --os-compute-api-version 2.69 compute service list +--------------------------------------+------------------+-------+----------+---------+-------+----------------------------+ | ID | Binary | Host | Zone | Status | State | Updated At | +--------------------------------------+------------------+-------+----------+---------+-------+----------------------------+ | f68a96d9-d994-4122-a8f9-1b0f68ed69c2 | nova-scheduler | train | internal | enabled | up | 2019-04-04T21:13:47.000000 | | 70cd668a-6d60-4a9a-ad83-f863920d4c44 | nova-consoleauth | train | internal | enabled | up | 2019-04-04T21:13:38.000000 | | ca88f023-1de4-49e0-90b0-581e16bebaed | nova-conductor | train | internal | enabled | up | 2019-04-04T21:13:47.000000 | | | nova-compute | train | | UNKNOWN | | | +--------------------------------------+------------------+-------+----------+---------+-------+----------------------------+ Future ====== This guide could be expanded for having multiple non-cell0 cells where one cell is down while the other is available and go through scenarios where the down cell is marked as disabled to take it out of scheduling consideration. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/contributor/testing/eventlet-profiling.rst0000664000175000017500000002742700000000000025241 0ustar00zuulzuul00000000000000======================= Profiling With Eventlet ======================= When performance of one of the Nova services is worse than expected, and other sorts of analysis do not lead to candidate fixes, profiling is an excellent tool for producing detailed analysis of what methods in the code are called the most and which consume the most time. Because most Nova services use eventlet_, the standard profiling tool provided with Python, cProfile_, will not work. Something is required to keep track of changing tasks. Thankfully eventlet comes with ``eventlet.green.profile.Profile``, a mostly undocumented class that provides a similar (but not identical) API to the one provided by Python's ``Profile`` while outputting the same format. .. note:: The eventlet Profile outputs the ``prof`` format produced by ``profile``, which is not the same as that output by ``cProfile``. Some analysis tools (for example, SnakeViz_) only read the latter so the options for analyzing eventlet profiling are not always deluxe (see below). Setup ===== This guide assumes the Nova service being profiled is running devstack, but that is not necessary. What is necessary is that the code associated with the service can be changed and the service restarted, in place. Profiling the entire service will produce mostly noise and the output will be confusing because different tasks will operate during the profile run. It is better to begin the process with a candidate task or method *within* the service that can be associated with an identifier. For example, ``select_destinations`` in the ``SchedulerManager`` can be associated with the list of ``instance_uuids`` passed to it and it runs only once for that set of instance UUIDs. The process for profiling is: #. Identify the method to be profiled. #. Populate the environment with sufficient resources to exercise the code. For example you may wish to use the FakeVirtDriver_ to have nova aware of multiple ``nova-compute`` processes. Or you may wish to launch many instances if you are evaluating a method that loops over instances. #. At the start of that method, change the code to instantiate a ``Profile`` object and ``start()`` it. #. At the end of that method, change the code to ``stop()`` profiling and write the data (with ``dump_stats()``) to a reasonable location. #. Restart the service. #. Cause the method being evaluated to run. #. Analyze the profile data with the pstats_ module. .. note:: ``stop()`` and ``start()`` are two of the ways in which the eventlet ``Profile`` API differs from the stdlib. There the methods are ``enable()`` and ``disable()``. Example ======= For this example we will analyze ``select_destinations`` in the ``FilterScheduler``. A known problem is that it does excessive work when presented with too many candidate results from the Placement service. We'd like to know why. We'll configure and run devstack_ with FakeVirtDriver_ so there are several candidate hypervisors (the following ``local.conf`` is also useful for other profiling and benchmarking scenarios so not all changes are relevant here): .. code-block:: ini [[local|localrc]] ADMIN_PASSWORD=secret DATABASE_PASSWORD=$ADMIN_PASSWORD RABBIT_PASSWORD=$ADMIN_PASSWORD SERVICE_PASSWORD=$ADMIN_PASSWORD VIRT_DRIVER=fake # You may use different numbers of fake computes, but be careful: 100 will # completely overwhelm a 16GB, 16VPCU server. In the test profiles below a # value of 50 was used, on a 16GB, 16VCPU server. NUMBER_FAKE_NOVA_COMPUTE=25 disable_service cinder disable_service horizon disable_service dstat disable_service tempest [[post-config|$NOVA_CONF]] rpc_response_timeout = 300 # Disable filtering entirely. For some profiling this will not be what you # want. [filter_scheduler] enabled_filters = '""' # Send only one type of notifications to avoid notification overhead. [notifications] notification_format = unversioned Change the code in ``nova/scheduler/manager.py`` as follows to start the profiler at the start of the ``_select_destinations`` call and to dump the statistics at the end. For example: .. code-block:: diff diff --git nova/scheduler/manager.py nova/scheduler/manager.py index 9cee6b3bfc..4859b21fb1 100644 --- nova/scheduler/manager.py +++ nova/scheduler/manager.py @@ -237,6 +237,10 @@ class SchedulerManager(manager.Manager): alloc_reqs_by_rp_uuid, provider_summaries, allocation_request_version=None, return_alternates=False, ): + from eventlet.green import profile + pr = profile.Profile() + pr.start() + self.notifier.info( context, 'scheduler.select_destinations.start', {'request_spec': spec_obj.to_legacy_request_spec_dict()}) @@ -260,6 +264,9 @@ class SchedulerManager(manager.Manager): action=fields_obj.NotificationAction.SELECT_DESTINATIONS, phase=fields_obj.NotificationPhase.END) + pr.stop() + pr.dump_stats('/tmp/select_destinations/%s.prof' % ':'.join(instance_uuids)) + return selections def _schedule( Make a ``/tmp/select_destinations`` directory that is writable by the user nova-scheduler will run as. This is where the profile output will go. Restart the scheduler service. Note that ``systemctl restart`` may not kill things sufficiently dead, so:: sudo systemctl stop devstack@n-sch sleep 5 sudo systemctl start devstack@n-sch Create a server (which will call ``select_destinations``):: openstack server create --image cirros-0.4.0-x86_64-disk --flavor c1 x1 In ``/tmp/select_destinations`` there should be a file with a name using the UUID of the created server with a ``.prof`` extension. Change to that directory and view the profile using the pstats `interactive mode`_:: python3 -m pstats ef044142-f3b8-409d-9af6-c60cea39b273.prof .. note:: The major version of python used to analyze the profile data must be the same as the version used to run the process being profiled. Sort stats by their cumulative time:: ef044142-f3b8-409d-9af6-c60cea39b273.prof% sort cumtime ef044142-f3b8-409d-9af6-c60cea39b273.prof% stats 10 Tue Aug 6 17:17:56 2019 ef044142-f3b8-409d-9af6-c60cea39b273.prof 603477 function calls (587772 primitive calls) in 2.294 seconds Ordered by: cumulative time List reduced from 2484 to 10 due to restriction <10> ncalls tottime percall cumtime percall filename:lineno(function) 1 0.000 0.000 1.957 1.957 profile:0(start) 1 0.000 0.000 1.911 1.911 /mnt/share/opt/stack/nova/nova/scheduler/filter_scheduler.py:113(_schedule) 1 0.000 0.000 1.834 1.834 /mnt/share/opt/stack/nova/nova/scheduler/filter_scheduler.py:485(_get_all_host_states) 1 0.000 0.000 1.834 1.834 /mnt/share/opt/stack/nova/nova/scheduler/host_manager.py:757(get_host_states_by_uuids) 1 0.004 0.004 1.818 1.818 /mnt/share/opt/stack/nova/nova/scheduler/host_manager.py:777(_get_host_states) 104/103 0.001 0.000 1.409 0.014 /usr/local/lib/python3.6/dist-packages/oslo_versionedobjects/base.py:170(wrapper) 50 0.001 0.000 1.290 0.026 /mnt/share/opt/stack/nova/nova/scheduler/host_manager.py:836(_get_instance_info) 50 0.001 0.000 1.289 0.026 /mnt/share/opt/stack/nova/nova/scheduler/host_manager.py:820(_get_instances_by_host) 103 0.001 0.000 0.890 0.009 /usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py:3325(__iter__) 50 0.001 0.000 0.776 0.016 /mnt/share/opt/stack/nova/nova/objects/host_mapping.py:99(get_by_host) From this we can make a couple of useful inferences about ``get_by_host``: * It is called once for each of the 50 ``FakeVirtDriver`` hypervisors configured for these tests. * It (and the methods it calls internally) consumes about 40% of the entire time spent running (``0.776 / 1.957``) the ``select_destinations`` method (indicated by ``profile:0(start)``, above). Several other sort modes can be used. List those that are available by entering ``sort`` without arguments. Caveats ======= Real world use indicates that the eventlet profiler is not perfect. There are situations where it will not always track switches between greenlets as well as it could. This can result in profile data that does not make sense or random slowdowns in the system being profiled. There is no one size fits all solution to these issues; profiling eventlet services is more an art than science. However, this section tries to provide a (hopefully) growing body of advice on what to do to work around problems. General Advice -------------- * Try to profile chunks of code that operate mostly within one module or class and do not have many collaborators. The more convoluted the path through the code, the more confused the profiler gets. * Similarly, where possible avoid profiling code that will trigger many greenlet context switches; either specific spawns, or multiple types of I/O. Instead, narrow the focus of the profiler. * If possible, avoid RPC. In nova-compute --------------- The creation of this caveat section was inspired by issues experienced while profiling ``nova-compute``. The ``nova-compute`` process is not allowed to speak with a database server directly. Instead communication is mediated through the conductor, communication happening via ``oslo.versionedobjects`` and remote calls. Profiling methods such as ``update_available_resource`` in the ResourceTracker, which needs information from the database, results in profile data that can be analyzed but is incorrect and misleading. This can be worked around by temporarily changing ``nova-compute`` to allow it to speak to the database directly: .. code-block:: diff diff --git a/nova/cmd/compute.py b/nova/cmd/compute.py index 01fd20de2e..655d503158 100644 --- a/nova/cmd/compute.py +++ b/nova/cmd/compute.py @@ -50,8 +50,10 @@ def main(): gmr.TextGuruMeditation.setup_autorun(version, conf=CONF) - cmd_common.block_db_access('nova-compute') - objects_base.NovaObject.indirection_api = conductor_rpcapi.ConductorAPI() + # Temporarily allow access to the database. You must update the config file + # used by this process to set [database]/connection to the cell1 database. + # cmd_common.block_db_access('nova-compute') + # objects_base.NovaObject.indirection_api = conductor_rpcapi.ConductorAPI() objects.Service.enable_min_version_cache() server = service.Service.create(binary='nova-compute', topic=compute_rpcapi.RPC_TOPIC) The configuration file used by the ``nova-compute`` process must also be updated to ensure that it contains a setting for the relevant database: .. code-block:: ini [database] connection = mysql+pymysql://root:secret@127.0.0.1/nova_cell1?charset=utf8 In a single node devstack setup ``nova_cell1`` is the right choice. The connection string will vary in other setups. Once these changes are made, along with the profiler changes indicated in the example above, ``nova-compute`` can be restarted and with luck some useful profiling data will emerge. .. _eventlet: https://eventlet.net/ .. _cProfile: https://docs.python.org/3/library/profile.html .. _SnakeViz: https://jiffyclub.github.io/snakeviz/ .. _devstack: https://docs.openstack.org/devstack/latest/ .. _FakeVirtDriver: https://docs.openstack.org/devstack/latest/guides/nova.html#fake-virt-driver .. _pstats: https://docs.python.org/3/library/profile.html#pstats.Stats .. _interactive mode: https://www.stefaanlippens.net/python_profiling_with_pstats_interactive_mode/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/contributor/testing/libvirt-numa.rst0000664000175000017500000007071100000000000024027 0ustar00zuulzuul00000000000000================================================ Testing NUMA related hardware setup with libvirt ================================================ This page describes how to test the libvirt driver's handling of the NUMA placement, large page allocation and CPU pinning features. It relies on setting up a virtual machine as the test environment and requires support for nested virtualization since plain QEMU is not sufficiently functional. The virtual machine will itself be given NUMA topology, so it can then act as a virtual "host" for testing purposes. ------------------------------------------ Provisioning a virtual machine for testing ------------------------------------------ The entire test process will take place inside a large virtual machine running Fedora 24. The instructions should work for any other Linux distribution which includes libvirt >= 1.2.9 and QEMU >= 2.1.2 The tests will require support for nested KVM, which is not enabled by default on hypervisor hosts. It must be explicitly turned on in the host when loading the kvm-intel/kvm-amd kernel modules. On Intel hosts verify it with .. code-block:: bash # cat /sys/module/kvm_intel/parameters/nested N # rmmod kvm-intel # echo "options kvm-intel nested=y" > /etc/modprobe.d/dist.conf # modprobe kvm-intel # cat /sys/module/kvm_intel/parameters/nested Y While on AMD hosts verify it with .. code-block:: bash # cat /sys/module/kvm_amd/parameters/nested 0 # rmmod kvm-amd # echo "options kvm-amd nested=1" > /etc/modprobe.d/dist.conf # modprobe kvm-amd # cat /sys/module/kvm_amd/parameters/nested 1 The virt-install command below shows how to provision a basic Fedora 24 x86_64 guest with 8 virtual CPUs, 8 GB of RAM and 20 GB of disk space: .. code-block:: bash # cd /var/lib/libvirt/images # wget https://download.fedoraproject.org/pub/fedora/linux/releases/29/Server/x86_64/iso/Fedora-Server-netinst-x86_64-29-1.2.iso # virt-install \ --name f29x86_64 \ --ram 8000 \ --vcpus 8 \ --file /var/lib/libvirt/images/f29x86_64.img \ --file-size 20 --cdrom /var/lib/libvirt/images/Fedora-Server-netinst-x86_64-24-1.2.iso \ --os-variant fedora23 When the virt-viewer application displays the installer, follow the defaults for the installation with a couple of exceptions: * The automatic disk partition setup can be optionally tweaked to reduce the swap space allocated. No more than 500MB is required, free'ing up an extra 1.5 GB for the root disk * Select "Minimal install" when asked for the installation type since a desktop environment is not required * When creating a user account be sure to select the option "Make this user administrator" so it gets 'sudo' rights Once the installation process has completed, the virtual machine will reboot into the final operating system. It is now ready to deploy an OpenStack development environment. --------------------------------- Setting up a devstack environment --------------------------------- For later ease of use, copy your SSH public key into the virtual machine: .. code-block:: bash # ssh-copy-id Now login to the virtual machine: .. code-block:: bash # ssh The Fedora minimal install does not contain git. Install git and clone the devstack repo: .. code-block:: bash $ sudo dnf install git $ git clone https://opendev.org/openstack/devstack $ cd devstack At this point a fairly standard devstack setup can be done with one exception: we should enable the ``NUMATopologyFilter`` filter, which we will use later. For example: .. code-block:: bash $ cat >>local.conf < select numa_topology from compute_nodes; +----------------------------------------------------------------------------+ | numa_topology | +----------------------------------------------------------------------------+ | { | "nova_object.name": "NUMATopology", | "nova_object.namespace": "nova", | "nova_object.version": "1.2", | "nova_object.data": { | "cells": [{ | "nova_object.name": "NUMACell", | "nova_object.namespace": "nova", | "nova_object.version": "1.4", | "nova_object.data": { | "id": 0, | "cpuset": [0, 1, 2, 3, 4, 5, 6, 7], | "pcpuset": [0, 1, 2, 3, 4, 5, 6, 7], | "memory": 7975, | "cpu_usage": 0, | "memory_usage": 0, | "pinned_cpus": [], | "siblings": [ | [0], | [1], | [2], | [3], | [4], | [5], | [6], | [7] | ], | "mempages": [{ | "nova_object.name": "NUMAPagesTopology", | "nova_object.namespace": "nova", | "nova_object.version": "1.1", | "nova_object.data": { | "size_kb": 4, | "total": 2041795, | "used": 0, | "reserved": 0 | }, | "nova_object.changes": ["size_kb", "total", "reserved", "used"] | }, { | "nova_object.name": "NUMAPagesTopology", | "nova_object.namespace": "nova", | "nova_object.version": "1.1", | "nova_object.data": { | "size_kb": 2048, | "total": 0, | "used": 0, | "reserved": 0 | }, | "nova_object.changes": ["size_kb", "total", "reserved", "used"] | }, { | "nova_object.name": "NUMAPagesTopology", | "nova_object.namespace": "nova", | "nova_object.version": "1.1", | "nova_object.data": { | "size_kb": 1048576, | "total": 0, | "used": 0, | "reserved": 0 | }, | "nova_object.changes": ["size_kb", "total", "reserved", "used"] | }], | "network_metadata": { | "nova_object.name": "NetworkMetadata", | "nova_object.namespace": "nova", | "nova_object.version": "1.0", | "nova_object.data": { | "physnets": [], | "tunneled": false | }, | "nova_object.changes": ["tunneled", "physnets"] | } | }, | "nova_object.changes": ["pinned_cpus", "memory_usage", "siblings", "mempages", "memory", "id", "network_metadata", "cpuset", "cpu_usage", "pcpuset"] | }] | }, | "nova_object.changes": ["cells"] | } +----------------------------------------------------------------------------+ Meanwhile, the guest instance should not have any NUMA configuration recorded: .. code-block:: bash MariaDB [nova]> select numa_topology from instance_extra; +---------------+ | numa_topology | +---------------+ | NULL | +---------------+ ----------------------------------------------------- Reconfiguring the test instance to have NUMA topology ----------------------------------------------------- Now that devstack is proved operational, it is time to configure some NUMA topology for the test VM, so that it can be used to verify the OpenStack NUMA support. To do the changes, the VM instance that is running devstack must be shut down: .. code-block:: bash $ sudo shutdown -h now And now back on the physical host edit the guest config as root: .. code-block:: bash $ sudo virsh edit f29x86_64 The first thing is to change the ```` block to do passthrough of the host CPU. In particular this exposes the "SVM" or "VMX" feature bits to the guest so that "Nested KVM" can work. At the same time we want to define the NUMA topology of the guest. To make things interesting we're going to give the guest an asymmetric topology with 4 CPUS and 4 GBs of RAM in the first NUMA node and 2 CPUs and 2 GB of RAM in the second and third NUMA nodes. So modify the guest XML to include the following CPU XML: .. code-block:: xml Now start the guest again: .. code-block:: bash # virsh start f29x86_64 ...and login back in: .. code-block:: bash # ssh Before starting OpenStack services again, it is necessary to explicitly set the libvirt virtualization type to KVM, so that guests can take advantage of nested KVM: .. code-block:: bash $ sudo sed -i 's/virt_type = qemu/virt_type = kvm/g' /etc/nova/nova.conf With that done, OpenStack can be started again: .. code-block:: bash $ cd devstack $ ./stack.sh The first thing is to check that the compute node picked up the new NUMA topology setup for the guest: .. code-block:: bash $ mysql -u root -p123456 nova_cell1 MariaDB [nova]> select numa_topology from compute_nodes; +----------------------------------------------------------------------------+ | numa_topology | +----------------------------------------------------------------------------+ | { | "nova_object.name": "NUMATopology", | "nova_object.namespace": "nova", | "nova_object.version": "1.2", | "nova_object.data": { | "cells": [{ | "nova_object.name": "NUMACell", | "nova_object.namespace": "nova", | "nova_object.version": "1.4", | "nova_object.data": { | "id": 0, | "cpuset": [0, 1, 2, 3], | "pcpuset": [0, 1, 2, 3], | "memory": 3966, | "cpu_usage": 0, | "memory_usage": 0, | "pinned_cpus": [], | "siblings": [ | [2], | [0], | [3], | [1] | ], | "mempages": [{ | "nova_object.name": "NUMAPagesTopology", | "nova_object.namespace": "nova", | "nova_object.version": "1.1", | "nova_object.data": { | "size_kb": 4, | "total": 1015418, | "used": 0, | "reserved": 0 | }, | "nova_object.changes": ["total", "size_kb", "used", "reserved"] | }, { | "nova_object.name": "NUMAPagesTopology", | "nova_object.namespace": "nova", | "nova_object.version": "1.1", | "nova_object.data": { | "size_kb": 2048, | "total": 0, | "used": 0, | "reserved": 0 | }, | "nova_object.changes": ["total", "size_kb", "used", "reserved"] | }, { | "nova_object.name": "NUMAPagesTopology", | "nova_object.namespace": "nova", | "nova_object.version": "1.1", | "nova_object.data": { | "size_kb": 1048576, | "total": 0, | "used": 0, | "reserved": 0 | }, | "nova_object.changes": ["total", "size_kb", "used", "reserved"] | }], | "network_metadata": { | "nova_object.name": "NetworkMetadata", | "nova_object.namespace": "nova", | "nova_object.version": "1.0", | "nova_object.data": { | "physnets": [], | "tunneled": false | }, | "nova_object.changes": ["physnets", "tunneled"] | } | }, | "nova_object.changes": ["pinned_cpus", "siblings", "memory", "id", "cpuset", "network_metadata", "pcpuset", "mempages", "cpu_usage", "memory_usage"] | }, { | "nova_object.name": "NUMACell", | "nova_object.namespace": "nova", | "nova_object.version": "1.4", | "nova_object.data": { | "id": 1, | "cpuset": [4, 5], | "pcpuset": [4, 5], | "memory": 1994, | "cpu_usage": 0, | "memory_usage": 0, | "pinned_cpus": [], | "siblings": [ | [5], | [4] | ], | "mempages": [{ | "nova_object.name": "NUMAPagesTopology", | "nova_object.namespace": "nova", | "nova_object.version": "1.1", | "nova_object.data": { | "size_kb": 4, | "total": 510562, | "used": 0, | "reserved": 0 | }, | "nova_object.changes": ["total", "size_kb", "used", "reserved"] | }, { | "nova_object.name": "NUMAPagesTopology", | "nova_object.namespace": "nova", | "nova_object.version": "1.1", | "nova_object.data": { | "size_kb": 2048, | "total": 0, | "used": 0, | "reserved": 0 | }, | "nova_object.changes": ["total", "size_kb", "used", "reserved"] | }, { | "nova_object.name": "NUMAPagesTopology", | "nova_object.namespace": "nova", | "nova_object.version": "1.1", | "nova_object.data": { | "size_kb": 1048576, | "total": 0, | "used": 0, | "reserved": 0 | }, | "nova_object.changes": ["total", "size_kb", "used", "reserved"] | }], | "network_metadata": { | "nova_object.name": "NetworkMetadata", | "nova_object.namespace": "nova", | "nova_object.version": "1.0", | "nova_object.data": { | "physnets": [], | "tunneled": false | }, | "nova_object.changes": ["physnets", "tunneled"] | } | }, | "nova_object.changes": ["pinned_cpus", "siblings", "memory", "id", "cpuset", "network_metadata", "pcpuset", "mempages", "cpu_usage", "memory_usage"] | }, { | "nova_object.name": "NUMACell", | "nova_object.namespace": "nova", | "nova_object.version": "1.4", | "nova_object.data": { | "id": 2, | "cpuset": [6, 7], | "pcpuset": [6, 7], | "memory": 2014, | "cpu_usage": 0, | "memory_usage": 0, | "pinned_cpus": [], | "siblings": [ | [7], | [6] | ], | "mempages": [{ | "nova_object.name": "NUMAPagesTopology", | "nova_object.namespace": "nova", | "nova_object.version": "1.1", | "nova_object.data": { | "size_kb": 4, | "total": 515727, | "used": 0, | "reserved": 0 | }, | "nova_object.changes": ["total", "size_kb", "used", "reserved"] | }, { | "nova_object.name": "NUMAPagesTopology", | "nova_object.namespace": "nova", | "nova_object.version": "1.1", | "nova_object.data": { | "size_kb": 2048, | "total": 0, | "used": 0, | "reserved": 0 | }, | "nova_object.changes": ["total", "size_kb", "used", "reserved"] | }, { | "nova_object.name": "NUMAPagesTopology", | "nova_object.namespace": "nova", | "nova_object.version": "1.1", | "nova_object.data": { | "size_kb": 1048576, | "total": 0, | "used": 0, | "reserved": 0 | }, | "nova_object.changes": ["total", "size_kb", "used", "reserved"] | }], | "network_metadata": { | "nova_object.name": "NetworkMetadata", | "nova_object.namespace": "nova", | "nova_object.version": "1.0", | "nova_object.data": { | "physnets": [], | "tunneled": false | }, | "nova_object.changes": ["physnets", "tunneled"] | } | }, | "nova_object.changes": ["pinned_cpus", "siblings", "memory", "id", "cpuset", "network_metadata", "pcpuset", "mempages", "cpu_usage", "memory_usage"] | }] | }, | "nova_object.changes": ["cells"] +----------------------------------------------------------------------------+ This indeed shows that there are now 3 NUMA nodes for the "host" machine, the first with 4 GB of RAM and 4 CPUs, and others with 2 GB of RAM and 2 CPUs each. ----------------------------------------------------- Testing instance boot with no NUMA topology requested ----------------------------------------------------- For the sake of backwards compatibility, if the NUMA filter is enabled, but the flavor/image does not have any NUMA settings requested, it should be assumed that the guest will have a single NUMA node. The guest should be locked to a single host NUMA node too. Boot a guest with the ``m1.tiny`` flavor to test this condition: .. code-block:: bash $ . openrc admin admin $ openstack server create --image cirros-0.4.0-x86_64-disk --flavor m1.tiny \ cirros1 Now look at the libvirt guest XML: .. code-block:: bash $ sudo virsh list Id Name State ---------------------------------------------------- 1 instance-00000001 running $ sudo virsh dumpxml instance-00000001 ... 1 ... This example shows that there is no explicit NUMA topology listed in the guest XML. ------------------------------------------------ Testing instance boot with 1 NUMA cell requested ------------------------------------------------ Moving forward a little, explicitly tell nova that the NUMA topology for the guest should have a single NUMA node. This should operate in an identical manner to the default behavior where no NUMA policy is set. To define the topology we will create a new flavor: .. code-block:: bash $ openstack flavor create --ram 1024 --disk 1 --vcpus 4 m1.numa $ openstack flavor set --property hw:numa_nodes=1 m1.numa $ openstack flavor show m1.numa Now boot the guest using this new flavor: .. code-block:: bash $ openstack server create --image cirros-0.4.0-x86_64-disk --flavor m1.numa \ cirros2 Looking at the resulting guest XML from libvirt: .. code-block:: bash $ sudo virsh list Id Name State ---------------------------------------------------- 1 instance-00000001 running 2 instance-00000002 running $ sudo virsh dumpxml instance-00000002 ... 4 ... ... The XML shows: * Each guest CPU has been pinned to the physical CPUs associated with a particular NUMA node * The emulator threads have been pinned to the union of all physical CPUs in the host NUMA node that the guest is placed on * The guest has been given a virtual NUMA topology with a single node holding all RAM and CPUs * The guest NUMA node has been strictly pinned to a host NUMA node. As a further sanity test, check what nova recorded for the instance in the database. This should match the ```` information: .. code-block:: bash $ mysql -u root -p123456 nova_cell1 MariaDB [nova]> select numa_topology from instance_extra; +----------------------------------------------------------------------------+ | numa_topology | +----------------------------------------------------------------------------+ | { | "nova_object.name": "InstanceNUMATopology", | "nova_object.namespace": "nova", | "nova_object.version": "1.3", | "nova_object.data": { | "cells": [{ | "nova_object.name": "InstanceNUMACell", | "nova_object.namespace": "nova", | "nova_object.version": "1.4", | "nova_object.data": { | "id": 0, | "cpuset": [0, 1, 2, 3], | "memory": 1024, | "pagesize": null, | "cpu_pinning_raw": null, | "cpu_policy": null, | "cpu_thread_policy": null, | "cpuset_reserved": null | }, | "nova_object.changes": ["id"] | }], | "emulator_threads_policy": null | }, | "nova_object.changes": ["cells", "emulator_threads_policy"] | } +----------------------------------------------------------------------------+ Delete this instance: .. code-block:: bash $ openstack server delete cirros2 ------------------------------------------------- Testing instance boot with 2 NUMA cells requested ------------------------------------------------- Now getting more advanced we tell nova that the guest will have two NUMA nodes. To define the topology we will change the previously defined flavor: .. code-block:: bash $ openstack flavor set --property hw:numa_nodes=2 m1.numa $ openstack flavor show m1.numa Now boot the guest using this changed flavor: .. code-block:: bash $ openstack server create --image cirros-0.4.0-x86_64-disk --flavor m1.numa \ cirros2 Looking at the resulting guest XML from libvirt: .. code-block:: bash $ sudo virsh list Id Name State ---------------------------------------------------- 1 instance-00000001 running 3 instance-00000003 running $ sudo virsh dumpxml instance-00000003 ... 4 ... ... The XML shows: * Each guest CPU has been pinned to the physical CPUs associated with particular NUMA nodes * The emulator threads have been pinned to the union of all physical CPUs in the host NUMA nodes that the guest is placed on * The guest has been given a virtual NUMA topology with two nodes, each holding half the RAM and CPUs * The guest NUMA nodes have been strictly pinned to different host NUMA node As a further sanity test, check what nova recorded for the instance in the database. This should match the ```` information: .. code-block:: bash MariaDB [nova]> select numa_topology from instance_extra; +----------------------------------------------------------------------------+ | numa_topology | +----------------------------------------------------------------------------+ | { | "nova_object.name": "InstanceNUMATopology", | "nova_object.namespace": "nova", | "nova_object.version": "1.3", | "nova_object.data": { | "cells": [{ | "nova_object.name": "InstanceNUMACell", | "nova_object.namespace": "nova", | "nova_object.version": "1.4", | "nova_object.data": { | "id": 0, | "cpuset": [0, 1], | "memory": 512, | "pagesize": null, | "cpu_pinning_raw": null, | "cpu_policy": null, | "cpu_thread_policy": null, | "cpuset_reserved": null | }, | "nova_object.changes": ["id"] | }, { | "nova_object.name": "InstanceNUMACell", | "nova_object.namespace": "nova", | "nova_object.version": "1.4", | "nova_object.data": { | "id": 1, | "cpuset": [2, 3], | "memory": 512, | "pagesize": null, | "cpu_pinning_raw": null, | "cpu_policy": null, | "cpu_thread_policy": null, | "cpuset_reserved": null | }, | "nova_object.changes": ["id"] | }], | "emulator_threads_policy": null | }, | "nova_object.changes": ["cells", "emulator_threads_policy"] | } +----------------------------------------------------------------------------+ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/contributor/testing/pci-passthrough-sriov.rst0000664000175000017500000000431600000000000025674 0ustar00zuulzuul00000000000000======================================================== Testing PCI passthrough and SR-IOV with emulated PCI NIC ======================================================== Testing nova PCI and SR-IOV device handling in devstack in general requires special hardware available in the devstack machine or VM. Since libvirt 9.3.0 and qemu 8.0.0 the situation is a lot simpler as qemu is now capable of emulating and intel igb NIC that supports SR-IOV. So devstack can be run in a VM that has one or more SR-IOV capable NIC while the host running the devstack VM does not require any special hardware just recent libvirt and qemu installed. .. note:: The emulated igb device used in this doc only useful for testing purposes. While network connectivity will work through both the PF and the VF interfaces the networking performance will be limited by the qemu emulation. Add SR-IOV capable igb NIC to a devstack VM ------------------------------------------- You can add an igb NIC to the devstack VM definition: .. code-block:: bash virsh attach-interface --type network --source --model igb --config The SR-IOV capability also requires an IOMMU device and split APIC defined in the domain as well as the q35 machine type. So make sure you have the following sections: .. code-block:: xml ... .. code-block:: xml .. .. code-block:: xml ... hvm Then enable IOMMU in the devstack VM by adding ``intel_iommu=on`` to the kernel command line. Using nova to get a VM with SR-IOV capable igb NICs --------------------------------------------------- If you are using nova 2025.1 (Epoxy) or newer then you can use the ``hw_machine_type=q35``, ``hw_vif_model=igb``, and ``hw_iommu_model=intel`` image properties to request a VM with igb NICs. Configuring your devstack VM for SR-IOV --------------------------------------- Follow the `guide `_; ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/contributor/testing/serial-console.rst0000664000175000017500000000565200000000000024337 0ustar00zuulzuul00000000000000 ====================== Testing Serial Console ====================== The main aim of this feature is exposing an interactive web-based serial consoles through a web-socket proxy. This page describes how to test it from a devstack environment. --------------------------------- Setting up a devstack environment --------------------------------- For instructions on how to setup devstack with serial console support enabled see `this guide `_. --------------- Testing the API --------------- Starting a new instance. .. code-block:: bash # cd devstack && . openrc # nova boot --flavor 1 --image cirros-0.3.2-x86_64-uec cirros1 Nova provides a command ``nova get-serial-console`` which will returns a URL with a valid token to connect to the serial console of VMs. .. code-block:: bash # nova get-serial-console cirros1 +--------+-----------------------------------------------------------------+ | Type | Url | +--------+-----------------------------------------------------------------+ | serial | ws://127.0.0.1:6083/?token=5f7854b7-bf3a-41eb-857a-43fc33f0b1ec | +--------+-----------------------------------------------------------------+ Currently nova does not provide any client able to connect from an interactive console through a web-socket. A simple client for *test purpose* can be written with few lines of Python. .. code-block:: python # sudo easy_install ws4py || sudo pip install ws4py # cat >> client.py < 0: # websocket data: opcode + length + data word = b[2:b[1]+2] b = b[b[1]+2:] sys.stdout.buffer.write(word) sys.stdout.flush() except: # socket error expected pass finally: self.terminate() if __name__ == '__main__': if len(sys.argv) != 2 or not sys.argv[1].startswith("ws"): print("Usage %s: Please use websocket url") print("Example: ws://127.0.0.1:6083/?token=xxx") exit(1) try: ws = LazyClient(sys.argv[1], protocols=['binary']) ws.connect() while True: # keyboard event... c = sys.stdin.read(1) if c: ws.send(c.encode('utf-8'), binary=True) ws.run_forever() except KeyboardInterrupt: ws.close() EOF .. code-block:: bash # python client.py ws://127.0.0.1:6083/?token=5f7854b7-bf3a-41eb-857a-43fc33f0b1ec cirros1 login ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/contributor/testing/zero-downtime-upgrade.rst0000664000175000017500000001367000000000000025647 0ustar00zuulzuul00000000000000===================================== Testing Zero Downtime Upgrade Process ===================================== Zero Downtime upgrade eliminates any disruption to nova API service during upgrade. Nova API services are upgraded at the end. The basic idea of the zero downtime upgrade process is to have the connections drain from the old API before being upgraded. In this process, new connections go to the new API nodes while old connections slowly drain from the old nodes. This ensures that the user sees the max_supported API version as a monotonically increasing number. There might be some performance degradation during the process due to slow HTTP responses and delayed request handling, but there is no API downtime. This page describes how to test the zero downtime upgrade process. ----------- Environment ----------- * Multinode devstack environment with 2 nodes: * controller - All services (N release) * compute-api - Only n-cpu and n-api services (N release) * Highly available load balancer (HAProxy) on top of the n-api services. This is required for zero downtime upgrade as it allows one n-api service to run while we upgrade the other. See instructions to setup HAProxy below. ----------------------------- Instructions to setup HAProxy ----------------------------- Install HAProxy and Keepalived on both nodes. .. code-block:: bash # apt-get install haproxy keepalived Let the kernel know that we intend to bind additional IP addresses that won't be defined in the interfaces file. To do this, edit ``/etc/sysctl.conf`` and add the following line: .. code-block:: INI net.ipv4.ip_nonlocal_bind=1 Make this take effect without rebooting. .. code-block:: bash # sysctl -p Configure HAProxy to add backend servers and assign virtual IP to the frontend. On both nodes add the below HAProxy config: .. code-block:: bash # cd /etc/haproxy # cat >> haproxy.cfg <> default_backend nova-api backend nova-api balance roundrobin option tcplog server controller 192.168.0.88:8774 check server apicomp 192.168.0.89:8774 check EOF .. note:: Just change the IP for log in the global section on each node. On both nodes add ``keepalived.conf``: .. code-block:: bash # cd /etc/keepalived # cat >> keepalived.conf <> 0" | sudo socat /var/run/haproxy.sock stdio * OR disable service using: .. code-block:: bash # echo "disable server nova-api/<>" | sudo socat /var/run/haproxy.sock stdio * This allows the current node to complete all the pending requests. When this is being upgraded, other api node serves the requests. This way we can achieve zero downtime. * Restart n-api service and enable n-api using the command: .. code-block:: bash # echo "enable server nova-api/<>" | sudo socat /var/run/haproxy.sock stdio * Drain connections from other old api node in the same way and upgrade. * No tempest tests should fail since there is no API downtime. After maintenance window ''''''''''''''''''''''''' * Follow the steps from general rolling upgrade process to clear any cached service version data and complete all online data migrations. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/contributor/testing.rst0000664000175000017500000001122100000000000021405 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============== Test Strategy ============== A key part of the "four opens" is ensuring the OpenStack delivers well-tested and usable software. For more details see: http://docs.openstack.org/project-team-guide/introduction.html#the-four-opens Experience has shown that untested features are frequently broken, in part due to the velocity of upstream changes. As we aim to ensure we keep all features working across upgrades, we must aim to test all features. Reporting Test Coverage ======================= For details on plans to report the current test coverage, refer to :doc:`/user/feature-classification`. Running tests and reporting results =================================== Running tests locally --------------------- Please see https://opendev.org/openstack/nova/src/branch/master/HACKING.rst#running-tests Voting in Gerrit ---------------- On every review in gerrit, check tests are run on very patch set, and are able to report a +1 or -1 vote. For more details, please see: http://docs.openstack.org/infra/manual/developers.html#automated-testing Before merging any code, there is an integrate gate test queue, to ensure master is always passing all tests. For more details, please see: http://docs.openstack.org/infra/zuul/user/gating.html Infra vs Third-Party -------------------- Tests that use fully open source components are generally run by the OpenStack Infra teams. Test setups that use non-open technology must be run outside of that infrastructure, but should still report their results upstream. For more details, please see: http://docs.openstack.org/infra/system-config/third_party.html Ad-hoc testing -------------- It is particularly common for people to run ad-hoc tests on each released milestone, such as RC1, to stop regressions. While these efforts can help stabilize the release, as a community we have a much stronger preference for continuous integration testing. Partly this is because we encourage users to deploy master, and we generally have to assume that any upstream commit may already been deployed in production. Types of tests ============== Unit tests ---------- Unit tests help document and enforce the contract for each component. Without good unit test coverage it is hard to continue to quickly evolve the codebase. The correct level of unit test coverage is very subjective, and as such we are not aiming for a particular percentage of coverage, rather we are aiming for good coverage. Generally, every code change should have a related unit test: https://opendev.org/openstack/nova/src/branch/master/HACKING.rst#creating-unit-tests Integration tests ----------------- Today, our integration tests involve running the Tempest test suite on a variety of Nova deployment scenarios. The integration job setup is defined in the ``.zuul.yaml`` file in the root of the nova repository. Jobs are restricted by queue: * ``check``: jobs in this queue automatically run on all proposed changes even with non-voting jobs * ``gate``: jobs in this queue automatically run on all approved changes (voting jobs only) * ``experimental``: jobs in this queue are non-voting and run on-demand by leaving a review comment on the change of "check experimental" In addition, we have third parties running the tests on their preferred Nova deployment scenario. Functional tests ---------------- Nova has a set of in-tree functional tests that focus on things that are out of scope for tempest testing and unit testing. Tempest tests run against a full live OpenStack deployment, generally deployed using devstack. At the other extreme, unit tests typically use mock to test a unit of code in isolation. Functional tests don't run an entire stack, they are isolated to nova code, and have no reliance on external services. They do have a WSGI app, nova services and a database, with minimal stubbing of nova internals. Interoperability tests ----------------------- The Interop Working Group maintains a list that contains a subset of Tempest tests. These are used to verify if a particular Nova deployment's API responds as expected. For more details, see: https://docs.opendev.org/openinfra/interop/latest/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/index.rst0000664000175000017500000002275400000000000016502 0ustar00zuulzuul00000000000000.. Copyright 2010-2012 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ======================== OpenStack Compute (nova) ======================== What is nova? ============= Nova is the OpenStack project that provides a way to provision compute instances (aka virtual servers). Nova supports creating virtual machines, baremetal servers (through the use of ironic), and has limited support for system containers. Nova runs as a set of daemons on top of existing Linux servers to provide that service. It requires the following additional OpenStack services for basic function: * :keystone-doc:`Keystone <>`: This provides identity and authentication for all OpenStack services. * :glance-doc:`Glance <>`: This provides the compute image repository. All compute instances launch from glance images. * :neutron-doc:`Neutron <>`: This is responsible for provisioning the virtual or physical networks that compute instances connect to on boot. * :placement-doc:`Placement <>`: This is responsible for tracking inventory of resources available in a cloud and assisting in choosing which provider of those resources will be used when creating a virtual machine. It can also integrate with other services to include: persistent block storage, encrypted disks, and baremetal compute instances. For End Users ============= As an end user of nova, you'll use nova to create and manage servers with either tools or the API directly. .. # NOTE(amotoki): toctree needs to be placed at the end of the section to # keep the document structure in the PDF doc. .. toctree:: :hidden: user/index Tools for using Nova -------------------- * :horizon-doc:`Horizon `: The official web UI for the OpenStack Project. * :python-openstackclient-doc:`OpenStack Client <>`: The official CLI for OpenStack Projects. You should use this as your CLI for most things, it includes not just nova commands but also commands for most of the projects in OpenStack. * :python-novaclient-doc:`Nova Client `: For some very advanced features (or administrative commands) of nova you may need to use nova client. It is still supported, but the ``openstack`` cli is recommended. Writing to the API ------------------ All end user (and some administrative) features of nova are exposed via a REST API, which can be used to build more complicated logic or automation with nova. This can be consumed directly, or via various SDKs. The following resources will help you get started with consuming the API directly. * `Compute API Guide `_: The concept guide for the API. This helps lay out the concepts behind the API to make consuming the API reference easier. * `Compute API Reference `_: The complete reference for the compute API, including all methods and request / response parameters and their meaning. * :doc:`Compute API Microversion History `: The compute API evolves over time through `Microversions `_. This provides the history of all those changes. Consider it a "what's new" in the compute API. * :doc:`Block Device Mapping `: One of the trickier parts to understand is the Block Device Mapping parameters used to connect specific block devices to computes. This deserves its own deep dive. * :doc:`Metadata `: Provide information to the guest instance when it is created. Nova can be configured to emit notifications over RPC. * :doc:`Versioned Notifications `: This provides information on the notifications emitted by nova. Other end-user guides can be found under :doc:`/user/index`. For Operators ============= Architecture Overview --------------------- * :doc:`Nova architecture `: An overview of how all the parts in nova fit together. .. # NOTE(amotoki): toctree needs to be placed at the end of the section to # keep the document structure in the PDF doc. .. toctree:: :hidden: admin/architecture Installation ------------ .. TODO(sdague): links to all the rest of the install guide pieces. The detailed install guide for nova. A functioning nova will also require having installed :keystone-doc:`keystone `, :glance-doc:`glance `, :neutron-doc:`neutron `, and :placement-doc:`placement `. Ensure that you follow their install guides first. .. # NOTE(amotoki): toctree needs to be placed at the end of the section to # keep the document structure in the PDF doc. .. toctree:: :maxdepth: 2 install/index Deployment Considerations ------------------------- There is information you might want to consider before doing your deployment, especially if it is going to be a larger deployment. For smaller deployments the defaults from the :doc:`install guide ` will be sufficient. * **Compute Driver Features Supported**: While the majority of nova deployments use libvirt/kvm, you can use nova with other compute drivers. Nova attempts to provide a unified feature set across these, however, not all features are implemented on all backends, and not all features are equally well tested. * :doc:`Feature Support by Use Case `: A view of what features each driver supports based on what's important to some large use cases (General Purpose Cloud, NFV Cloud, HPC Cloud). * :doc:`Feature Support full list `: A detailed dive through features in each compute driver backend. * :doc:`Cells v2 configuration `: For large deployments, cells v2 cells allow sharding of your compute environment. Upfront planning is key to a successful cells v2 layout. * :doc:`Running nova-api on wsgi `: Considerations for using a real WSGI container. .. # NOTE(amotoki): toctree needs to be placed at the end of the section to # keep the document structure in the PDF doc. .. toctree:: :hidden: user/feature-classification user/support-matrix admin/cells user/wsgi Maintenance ----------- Once you are running nova, the following information is extremely useful. * :doc:`Admin Guide `: A collection of guides for administrating nova. * :doc:`Flavors `: What flavors are and why they are used. * :doc:`Upgrades `: How nova is designed to be upgraded for minimal service impact, and the order you should do them in. * :doc:`Quotas `: Managing project quotas in nova. * :doc:`Aggregates `: Aggregates are a useful way of grouping hosts together for scheduling purposes. * :doc:`Scheduling `: How the scheduler is configured, and how that will impact where compute instances land in your environment. If you are seeing unexpected distribution of compute instances in your hosts, you'll want to dive into this configuration. * :doc:`Exposing custom metadata to compute instances `: How and when you might want to extend the basic metadata exposed to compute instances (either via metadata server or config drive) for your specific purposes. .. # NOTE(amotoki): toctree needs to be placed at the end of the section to # keep the document structure in the PDF doc. .. toctree:: :hidden: admin/index user/flavors admin/upgrades user/unified-limits admin/vendordata Reference Material ------------------ * :doc:`Nova CLI Command References `: the complete command reference for all the daemons and admin tools that come with nova. * :doc:`Configuration Guide `: Information on configuring the system, including role-based access control policy rules. .. # NOTE(amotoki): toctree needs to be placed at the end of the section to # keep the document structure in the PDF doc. .. toctree:: :hidden: cli/index configuration/index For Contributors ================ * :doc:`contributor/contributing`: If you are a new contributor this should help you to start contributing to Nova. * :doc:`contributor/index`: If you are new to Nova, this should help you start to understand what Nova actually does, and why. * :doc:`reference/index`: There are also a number of technical references on both current and future looking parts of our architecture. These are collected here. .. # NOTE(amotoki): toctree needs to be placed at the end of the section to # keep the document structure in the PDF doc. .. toctree:: :hidden: contributor/index contributor/contributing reference/index .. only:: html Search ====== * :ref:`Nova document search `: Search the contents of this document. * `OpenStack wide search `_: Search the wider set of OpenStack documentation, including forums. ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.261608 nova-32.0.0/doc/source/install/0000775000175000017500000000000000000000000016275 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/install/compute-install-obs.rst0000664000175000017500000002066200000000000022736 0ustar00zuulzuul00000000000000Install and configure a compute node for openSUSE and SUSE Linux Enterprise ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Compute service on a compute node. The service supports several hypervisors to deploy instances or virtual machines (VMs). For simplicity, this configuration uses the Quick EMUlator (QEMU) hypervisor with the kernel-based VM (KVM) extension on compute nodes that support hardware acceleration for virtual machines. On legacy hardware, this configuration uses the generic QEMU hypervisor. You can follow these instructions with minor modifications to horizontally scale your environment with additional compute nodes. .. note:: This section assumes that you are following the instructions in this guide step-by-step to configure the first compute node. If you want to configure additional compute nodes, prepare them in a similar fashion to the first compute node in the :ref:`example architectures ` section. Each additional compute node requires a unique IP address. Install and configure components -------------------------------- .. include:: shared/note_configuration_vary_by_distribution.rst #. Install the packages: .. code-block:: console # zypper install openstack-nova-compute genisoimage qemu-kvm libvirt #. Edit the ``/etc/nova/nova.conf`` file and complete the following actions: * In the ``[DEFAULT]`` section, set the ``compute_driver``: .. path /etc/nova/nova.conf .. code-block:: ini [DEFAULT] # ... compute_driver = libvirt.LibvirtDriver * In the ``[DEFAULT]`` section, configure ``RabbitMQ`` message queue access: .. path /etc/nova/nova.conf .. code-block:: ini [DEFAULT] # ... transport_url = rabbit://openstack:RABBIT_PASS@controller Replace ``RABBIT_PASS`` with the password you chose for the ``openstack`` account in ``RabbitMQ``. * In the ``[service_user]`` section, configure :ref:`service user tokens `: .. path /etc/nova/nova.conf .. code-block:: ini [service_user] send_service_user_token = true auth_url = https://controller/identity auth_type = password project_domain_name = Default project_name = service user_domain_name = Default username = nova password = NOVA_PASS Replace ``NOVA_PASS`` with the password you chose for the ``nova`` user in the Identity service. * In the ``[DEFAULT]`` section, configure the ``my_ip`` option: .. path /etc/nova/nova.conf .. code-block:: ini [DEFAULT] # ... my_ip = MANAGEMENT_INTERFACE_IP_ADDRESS Replace ``MANAGEMENT_INTERFACE_IP_ADDRESS`` with the IP address of the management network interface on your compute node, typically ``10.0.0.31`` for the first node in the :ref:`example architecture `. * Configure the ``[neutron]`` section of **/etc/nova/nova.conf**. Refer to the :neutron-doc:`Networking service install guide ` for more details. * In the ``[vnc]`` section, enable and configure remote console access: .. path /etc/nova/nova.conf .. code-block:: ini [vnc] # ... enabled = true server_listen = 0.0.0.0 server_proxyclient_address = $my_ip novncproxy_base_url = http://controller:6080/vnc_auto.html The server component listens on all IP addresses and the proxy component only listens on the management interface IP address of the compute node. The base URL indicates the location where you can use a web browser to access remote consoles of instances on this compute node. .. note:: If the web browser to access remote consoles resides on a host that cannot resolve the ``controller`` hostname, you must replace ``controller`` with the management interface IP address of the controller node. * In the ``[glance]`` section, configure the location of the Image service API: .. path /etc/nova/nova.conf .. code-block:: ini [glance] # ... api_servers = http://controller:9292 * In the ``[oslo_concurrency]`` section, configure the lock path: .. path /etc/nova/nova.conf .. code-block:: ini [oslo_concurrency] # ... lock_path = /var/run/nova * In the ``[placement]`` section, configure the Placement API: .. path /etc/nova/nova.conf .. code-block:: ini [placement] # ... region_name = RegionOne project_domain_name = Default project_name = service auth_type = password user_domain_name = Default auth_url = http://controller:5000/v3 username = placement password = PLACEMENT_PASS Replace ``PLACEMENT_PASS`` with the password you choose for the ``placement`` user in the Identity service. Comment out any other options in the ``[placement]`` section. #. Ensure the kernel module ``nbd`` is loaded. .. code-block:: console # modprobe nbd #. Ensure the module loads on every boot by adding ``nbd`` to the ``/etc/modules-load.d/nbd.conf`` file. Finalize installation --------------------- #. Determine whether your compute node supports hardware acceleration for virtual machines: .. code-block:: console $ egrep -c '(vmx|svm)' /proc/cpuinfo If this command returns a value of ``one or greater``, your compute node supports hardware acceleration which typically requires no additional configuration. If this command returns a value of ``zero``, your compute node does not support hardware acceleration and you must configure ``libvirt`` to use QEMU instead of KVM. * Edit the ``[libvirt]`` section in the ``/etc/nova/nova.conf`` file as follows: .. path /etc/nova/nova.conf .. code-block:: ini [libvirt] # ... virt_type = qemu #. Start the Compute service including its dependencies and configure them to start automatically when the system boots: .. code-block:: console # systemctl enable libvirtd.service openstack-nova-compute.service # systemctl start libvirtd.service openstack-nova-compute.service .. note:: If the ``nova-compute`` service fails to start, check ``/var/log/nova/nova-compute.log``. The error message ``AMQP server on controller:5672 is unreachable`` likely indicates that the firewall on the controller node is preventing access to port 5672. Configure the firewall to open port 5672 on the controller node and restart ``nova-compute`` service on the compute node. Add the compute node to the cell database ----------------------------------------- .. important:: Run the following commands on the **controller** node. #. Source the admin credentials to enable admin-only CLI commands, then confirm there are compute hosts in the database: .. code-block:: console $ . admin-openrc $ openstack compute service list --service nova-compute +----+-------+--------------+------+-------+---------+----------------------------+ | ID | Host | Binary | Zone | State | Status | Updated At | +----+-------+--------------+------+-------+---------+----------------------------+ | 1 | node1 | nova-compute | nova | up | enabled | 2017-04-14T15:30:44.000000 | +----+-------+--------------+------+-------+---------+----------------------------+ #. Discover compute hosts: .. code-block:: console # su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova Found 2 cell mappings. Skipping cell0 since it does not contain hosts. Getting compute nodes from cell 'cell1': ad5a5985-a719-4567-98d8-8d148aaae4bc Found 1 computes in cell: ad5a5985-a719-4567-98d8-8d148aaae4bc Checking host mapping for compute host 'compute': fe58ddc1-1d65-4f87-9456-bc040dc106b3 Creating host mapping for compute host 'compute': fe58ddc1-1d65-4f87-9456-bc040dc106b3 .. note:: When you add new compute nodes, you must run ``nova-manage cell_v2 discover_hosts`` on the controller node to register those new compute nodes. Alternatively, you can set an appropriate interval in ``/etc/nova/nova.conf``: .. code-block:: ini [scheduler] discover_hosts_in_cells_interval = 300 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/install/compute-install-rdo.rst0000664000175000017500000002010400000000000022726 0ustar00zuulzuul00000000000000Install and configure a compute node for Red Hat Enterprise Linux and CentOS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Compute service on a compute node. The service supports several hypervisors to deploy instances or virtual machines (VMs). For simplicity, this configuration uses the Quick EMUlator (QEMU) hypervisor with the kernel-based VM (KVM) extension on compute nodes that support hardware acceleration for virtual machines. On legacy hardware, this configuration uses the generic QEMU hypervisor. You can follow these instructions with minor modifications to horizontally scale your environment with additional compute nodes. .. note:: This section assumes that you are following the instructions in this guide step-by-step to configure the first compute node. If you want to configure additional compute nodes, prepare them in a similar fashion to the first compute node in the :ref:`example architectures ` section. Each additional compute node requires a unique IP address. Install and configure components -------------------------------- .. include:: shared/note_configuration_vary_by_distribution.rst #. Install the packages: .. code-block:: console # dnf install openstack-nova-compute #. Edit the ``/etc/nova/nova.conf`` file and complete the following actions: * In the ``[DEFAULT]`` section, configure ``RabbitMQ`` message queue access: .. path /etc/nova/nova.conf .. code-block:: ini [DEFAULT] # ... transport_url = rabbit://openstack:RABBIT_PASS@controller Replace ``RABBIT_PASS`` with the password you chose for the ``openstack`` account in ``RabbitMQ``. * In the ``[service_user]`` section, configure :ref:`service user tokens `: .. path /etc/nova/nova.conf .. code-block:: ini [service_user] send_service_user_token = true auth_url = https://controller/identity auth_type = password project_domain_name = Default project_name = service user_domain_name = Default username = nova password = NOVA_PASS Replace ``NOVA_PASS`` with the password you chose for the ``nova`` user in the Identity service. * In the ``[DEFAULT]`` section, configure the ``my_ip`` option: .. path /etc/nova/nova.conf .. code-block:: ini [DEFAULT] # ... my_ip = MANAGEMENT_INTERFACE_IP_ADDRESS Replace ``MANAGEMENT_INTERFACE_IP_ADDRESS`` with the IP address of the management network interface on your compute node, typically 10.0.0.31 for the first node in the :ref:`example architecture `. * Configure the ``[neutron]`` section of **/etc/nova/nova.conf**. Refer to the :neutron-doc:`Networking service install guide ` for more details. * In the ``[vnc]`` section, enable and configure remote console access: .. path /etc/nova/nova.conf .. code-block:: ini [vnc] # ... enabled = true server_listen = 0.0.0.0 server_proxyclient_address = $my_ip novncproxy_base_url = http://controller:6080/vnc_auto.html The server component listens on all IP addresses and the proxy component only listens on the management interface IP address of the compute node. The base URL indicates the location where you can use a web browser to access remote consoles of instances on this compute node. .. note:: If the web browser to access remote consoles resides on a host that cannot resolve the ``controller`` hostname, you must replace ``controller`` with the management interface IP address of the controller node. * In the ``[glance]`` section, configure the location of the Image service API: .. path /etc/nova/nova.conf .. code-block:: ini [glance] # ... api_servers = http://controller:9292 * In the ``[oslo_concurrency]`` section, configure the lock path: .. path /etc/nova/nova.conf .. code-block:: ini [oslo_concurrency] # ... lock_path = /var/lib/nova/tmp * In the ``[placement]`` section, configure the Placement API: .. path /etc/nova/nova.conf .. code-block:: ini [placement] # ... region_name = RegionOne project_domain_name = Default project_name = service auth_type = password user_domain_name = Default auth_url = http://controller:5000/v3 username = placement password = PLACEMENT_PASS Replace ``PLACEMENT_PASS`` with the password you choose for the ``placement`` user in the Identity service. Comment out any other options in the ``[placement]`` section. Finalize installation --------------------- #. Determine whether your compute node supports hardware acceleration for virtual machines: .. code-block:: console $ egrep -c '(vmx|svm)' /proc/cpuinfo If this command returns a value of ``one or greater``, your compute node supports hardware acceleration which typically requires no additional configuration. If this command returns a value of ``zero``, your compute node does not support hardware acceleration and you must configure ``libvirt`` to use QEMU instead of KVM. * Edit the ``[libvirt]`` section in the ``/etc/nova/nova.conf`` file as follows: .. path /etc/nova/nova.conf .. code-block:: ini [libvirt] # ... virt_type = qemu #. Start the Compute service including its dependencies and configure them to start automatically when the system boots: .. code-block:: console # systemctl enable libvirtd.service openstack-nova-compute.service # systemctl start libvirtd.service openstack-nova-compute.service .. note:: If the ``nova-compute`` service fails to start, check ``/var/log/nova/nova-compute.log``. The error message ``AMQP server on controller:5672 is unreachable`` likely indicates that the firewall on the controller node is preventing access to port 5672. Configure the firewall to open port 5672 on the controller node and restart ``nova-compute`` service on the compute node. Add the compute node to the cell database ----------------------------------------- .. important:: Run the following commands on the **controller** node. #. Source the admin credentials to enable admin-only CLI commands, then confirm there are compute hosts in the database: .. code-block:: console $ . admin-openrc $ openstack compute service list --service nova-compute +----+-------+--------------+------+-------+---------+----------------------------+ | ID | Host | Binary | Zone | State | Status | Updated At | +----+-------+--------------+------+-------+---------+----------------------------+ | 1 | node1 | nova-compute | nova | up | enabled | 2017-04-14T15:30:44.000000 | +----+-------+--------------+------+-------+---------+----------------------------+ #. Discover compute hosts: .. code-block:: console # su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova Found 2 cell mappings. Skipping cell0 since it does not contain hosts. Getting compute nodes from cell 'cell1': ad5a5985-a719-4567-98d8-8d148aaae4bc Found 1 computes in cell: ad5a5985-a719-4567-98d8-8d148aaae4bc Checking host mapping for compute host 'compute': fe58ddc1-1d65-4f87-9456-bc040dc106b3 Creating host mapping for compute host 'compute': fe58ddc1-1d65-4f87-9456-bc040dc106b3 .. note:: When you add new compute nodes, you must run ``nova-manage cell_v2 discover_hosts`` on the controller node to register those new compute nodes. Alternatively, you can set an appropriate interval in ``/etc/nova/nova.conf``: .. code-block:: ini [scheduler] discover_hosts_in_cells_interval = 300 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/install/compute-install-ubuntu.rst0000664000175000017500000001751500000000000023500 0ustar00zuulzuul00000000000000Install and configure a compute node for Ubuntu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Compute service on a compute node. The service supports several hypervisors to deploy instances or virtual machines (VMs). For simplicity, this configuration uses the Quick EMUlator (QEMU) hypervisor with the kernel-based VM (KVM) extension on compute nodes that support hardware acceleration for virtual machines. On legacy hardware, this configuration uses the generic QEMU hypervisor. You can follow these instructions with minor modifications to horizontally scale your environment with additional compute nodes. .. note:: This section assumes that you are following the instructions in this guide step-by-step to configure the first compute node. If you want to configure additional compute nodes, prepare them in a similar fashion to the first compute node in the :ref:`example architectures ` section. Each additional compute node requires a unique IP address. Install and configure components -------------------------------- .. include:: shared/note_configuration_vary_by_distribution.rst #. Install the packages: .. code-block:: console # apt install nova-compute 2. Edit the ``/etc/nova/nova.conf`` file and complete the following actions: * In the ``[DEFAULT]`` section, configure ``RabbitMQ`` message queue access: .. path /etc/nova/nova.conf .. code-block:: ini [DEFAULT] # ... transport_url = rabbit://openstack:RABBIT_PASS@controller Replace ``RABBIT_PASS`` with the password you chose for the ``openstack`` account in ``RabbitMQ``. * In the ``[service_user]`` section, configure :ref:`service user tokens `: .. path /etc/nova/nova.conf .. code-block:: ini [service_user] send_service_user_token = true auth_url = https://controller/identity auth_type = password project_domain_name = Default project_name = service user_domain_name = Default username = nova password = NOVA_PASS Replace ``NOVA_PASS`` with the password you chose for the ``nova`` user in the Identity service. * In the ``[DEFAULT]`` section, configure the ``my_ip`` option: .. path /etc/nova/nova.conf .. code-block:: ini [DEFAULT] # ... my_ip = MANAGEMENT_INTERFACE_IP_ADDRESS Replace ``MANAGEMENT_INTERFACE_IP_ADDRESS`` with the IP address of the management network interface on your compute node, typically 10.0.0.31 for the first node in the :ref:`example architecture `. * Configure the ``[neutron]`` section of **/etc/nova/nova.conf**. Refer to the :neutron-doc:`Networking service install guide ` for more details. * In the ``[vnc]`` section, enable and configure remote console access: .. path /etc/nova/nova.conf .. code-block:: ini [vnc] # ... enabled = true server_listen = 0.0.0.0 server_proxyclient_address = $my_ip novncproxy_base_url = http://controller:6080/vnc_auto.html The server component listens on all IP addresses and the proxy component only listens on the management interface IP address of the compute node. The base URL indicates the location where you can use a web browser to access remote consoles of instances on this compute node. .. note:: If the web browser to access remote consoles resides on a host that cannot resolve the ``controller`` hostname, you must replace ``controller`` with the management interface IP address of the controller node. * In the ``[glance]`` section, configure the location of the Image service API: .. path /etc/nova/nova.conf .. code-block:: ini [glance] # ... api_servers = http://controller:9292 * In the ``[oslo_concurrency]`` section, configure the lock path: .. path /etc/nova/nova.conf .. code-block:: ini [oslo_concurrency] # ... lock_path = /var/lib/nova/tmp * In the ``[placement]`` section, configure the Placement API: .. path /etc/nova/nova.conf .. code-block:: ini [placement] # ... region_name = RegionOne project_domain_name = Default project_name = service auth_type = password user_domain_name = Default auth_url = http://controller:5000/v3 username = placement password = PLACEMENT_PASS Replace ``PLACEMENT_PASS`` with the password you choose for the ``placement`` user in the Identity service. Comment out any other options in the ``[placement]`` section. Finalize installation --------------------- #. Determine whether your compute node supports hardware acceleration for virtual machines: .. code-block:: console $ egrep -c '(vmx|svm)' /proc/cpuinfo If this command returns a value of ``one or greater``, your compute node supports hardware acceleration which typically requires no additional configuration. If this command returns a value of ``zero``, your compute node does not support hardware acceleration and you must configure ``libvirt`` to use QEMU instead of KVM. * Edit the ``[libvirt]`` section in the ``/etc/nova/nova-compute.conf`` file as follows: .. path /etc/nova/nova-compute.conf .. code-block:: ini [libvirt] # ... virt_type = qemu #. Restart the Compute service: .. code-block:: console # service nova-compute restart .. note:: If the ``nova-compute`` service fails to start, check ``/var/log/nova/nova-compute.log``. The error message ``AMQP server on controller:5672 is unreachable`` likely indicates that the firewall on the controller node is preventing access to port 5672. Configure the firewall to open port 5672 on the controller node and restart ``nova-compute`` service on the compute node. Add the compute node to the cell database ----------------------------------------- .. important:: Run the following commands on the **controller** node. #. Source the admin credentials to enable admin-only CLI commands, then confirm there are compute hosts in the database: .. code-block:: console $ . admin-openrc $ openstack compute service list --service nova-compute +----+-------+--------------+------+-------+---------+----------------------------+ | ID | Host | Binary | Zone | State | Status | Updated At | +----+-------+--------------+------+-------+---------+----------------------------+ | 1 | node1 | nova-compute | nova | up | enabled | 2017-04-14T15:30:44.000000 | +----+-------+--------------+------+-------+---------+----------------------------+ #. Discover compute hosts: .. code-block:: console # su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova Found 2 cell mappings. Skipping cell0 since it does not contain hosts. Getting compute nodes from cell 'cell1': ad5a5985-a719-4567-98d8-8d148aaae4bc Found 1 computes in cell: ad5a5985-a719-4567-98d8-8d148aaae4bc Checking host mapping for compute host 'compute': fe58ddc1-1d65-4f87-9456-bc040dc106b3 Creating host mapping for compute host 'compute': fe58ddc1-1d65-4f87-9456-bc040dc106b3 .. note:: When you add new compute nodes, you must run ``nova-manage cell_v2 discover_hosts`` on the controller node to register those new compute nodes. Alternatively, you can set an appropriate interval in ``/etc/nova/nova.conf``: .. code-block:: ini [scheduler] discover_hosts_in_cells_interval = 300 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/install/compute-install.rst0000664000175000017500000000232700000000000022153 0ustar00zuulzuul00000000000000Install and configure a compute node ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Compute service on a compute node for Ubuntu, openSUSE and SUSE Linux Enterprise, and Red Hat Enterprise Linux and CentOS. The service supports several hypervisors to deploy instances or virtual machines (VMs). For simplicity, this configuration uses the Quick EMUlator (QEMU) hypervisor with the kernel-based VM (KVM) extension on compute nodes that support hardware acceleration for virtual machines. On legacy hardware, this configuration uses the generic QEMU hypervisor. You can follow these instructions with minor modifications to horizontally scale your environment with additional compute nodes. .. note:: This section assumes that you are following the instructions in this guide step-by-step to configure the first compute node. If you want to configure additional compute nodes, prepare them in a similar fashion to the first compute node in the :ref:`example architectures ` section. Each additional compute node requires a unique IP address. .. toctree:: :glob: compute-install-ubuntu compute-install-rdo compute-install-obs ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/install/controller-install-obs.rst0000664000175000017500000003366200000000000023451 0ustar00zuulzuul00000000000000Install and configure controller node for openSUSE and SUSE Linux Enterprise ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Compute service, code-named nova, on the controller node. Prerequisites ------------- Before you install and configure the Compute service, you must create databases, service credentials, and API endpoints. #. To create the databases, complete these steps: * Use the database access client to connect to the database server as the ``root`` user: .. code-block:: console $ mysql -u root -p * Create the ``nova_api``, ``nova``, and ``nova_cell0`` databases: .. code-block:: console MariaDB [(none)]> CREATE DATABASE nova_api; MariaDB [(none)]> CREATE DATABASE nova; MariaDB [(none)]> CREATE DATABASE nova_cell0; * Grant proper access to the databases: .. code-block:: console MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' \ IDENTIFIED BY 'NOVA_DBPASS'; MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' \ IDENTIFIED BY 'NOVA_DBPASS'; MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' \ IDENTIFIED BY 'NOVA_DBPASS'; MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' \ IDENTIFIED BY 'NOVA_DBPASS'; MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost' \ IDENTIFIED BY 'NOVA_DBPASS'; MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' \ IDENTIFIED BY 'NOVA_DBPASS'; Replace ``NOVA_DBPASS`` with a suitable password. * Exit the database access client. #. Source the ``admin`` credentials to gain access to admin-only CLI commands: .. code-block:: console $ . admin-openrc #. Create the Compute service credentials: * Create the ``nova`` user: .. code-block:: console $ openstack user create --domain default --password-prompt nova User Password: Repeat User Password: +---------------------+----------------------------------+ | Field | Value | +---------------------+----------------------------------+ | domain_id | default | | enabled | True | | id | 8a7dbf5279404537b1c7b86c033620fe | | name | nova | | options | {} | | password_expires_at | None | +---------------------+----------------------------------+ * Add the ``admin`` role to the ``nova`` user: .. code-block:: console $ openstack role add --project service --user nova admin .. note:: This command provides no output. * Create the ``nova`` service entity: .. code-block:: console $ openstack service create --name nova \ --description "OpenStack Compute" compute +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | OpenStack Compute | | enabled | True | | id | 060d59eac51b4594815603d75a00aba2 | | name | nova | | type | compute | +-------------+----------------------------------+ #. Create the Compute API service endpoints: .. code-block:: console $ openstack endpoint create --region RegionOne \ compute public http://controller:8774/v2.1 +--------------+-------------------------------------------+ | Field | Value | +--------------+-------------------------------------------+ | enabled | True | | id | 3c1caa473bfe4390a11e7177894bcc7b | | interface | public | | region | RegionOne | | region_id | RegionOne | | service_id | 060d59eac51b4594815603d75a00aba2 | | service_name | nova | | service_type | compute | | url | http://controller:8774/v2.1 | +--------------+-------------------------------------------+ $ openstack endpoint create --region RegionOne \ compute internal http://controller:8774/v2.1 +--------------+-------------------------------------------+ | Field | Value | +--------------+-------------------------------------------+ | enabled | True | | id | e3c918de680746a586eac1f2d9bc10ab | | interface | internal | | region | RegionOne | | region_id | RegionOne | | service_id | 060d59eac51b4594815603d75a00aba2 | | service_name | nova | | service_type | compute | | url | http://controller:8774/v2.1 | +--------------+-------------------------------------------+ $ openstack endpoint create --region RegionOne \ compute admin http://controller:8774/v2.1 +--------------+-------------------------------------------+ | Field | Value | +--------------+-------------------------------------------+ | enabled | True | | id | 38f7af91666a47cfb97b4dc790b94424 | | interface | admin | | region | RegionOne | | region_id | RegionOne | | service_id | 060d59eac51b4594815603d75a00aba2 | | service_name | nova | | service_type | compute | | url | http://controller:8774/v2.1 | +--------------+-------------------------------------------+ #. Install Placement service and configure user and endpoints: * Refer to the :placement-doc:`Placement service install guide ` for more information. Install and configure components -------------------------------- .. include:: shared/note_configuration_vary_by_distribution.rst .. note:: As of the Newton release, SUSE OpenStack packages are shipped with the upstream default configuration files. For example, ``/etc/nova/nova.conf`` has customizations in ``/etc/nova/nova.conf.d/010-nova.conf``. While the following instructions modify the default configuration file, adding a new file in ``/etc/nova/nova.conf.d`` achieves the same result. #. Install the packages: .. code-block:: console # zypper install \ openstack-nova-api \ openstack-nova-scheduler \ openstack-nova-conductor \ openstack-nova-novncproxy \ iptables #. Edit the ``/etc/nova/nova.conf`` file and complete the following actions: * In the ``[api_database]`` and ``[database]`` sections, configure database access: .. path /etc/nova/nova.conf .. code-block:: ini [api_database] # ... connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova_api [database] # ... connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova Replace ``NOVA_DBPASS`` with the password you chose for the Compute databases. * In the ``[DEFAULT]`` section, configure ``RabbitMQ`` message queue access: .. path /etc/nova/nova.conf .. code-block:: ini [DEFAULT] # ... transport_url = rabbit://openstack:RABBIT_PASS@controller:5672/ Replace ``RABBIT_PASS`` with the password you chose for the ``openstack`` account in ``RabbitMQ``. * In the ``[keystone_authtoken]`` section, configure Identity service access: .. path /etc/nova/nova.conf .. code-block:: ini [keystone_authtoken] # ... www_authenticate_uri = http://controller:5000/ auth_url = http://controller:5000/ memcached_servers = controller:11211 auth_type = password project_domain_name = Default user_domain_name = Default project_name = service username = nova password = NOVA_PASS Replace ``NOVA_PASS`` with the password you chose for the ``nova`` user in the Identity service. .. note:: Comment out or remove any other options in the ``[keystone_authtoken]`` section. * In the ``[service_user]`` section, configure :ref:`service user tokens `: .. path /etc/nova/nova.conf .. code-block:: ini [service_user] send_service_user_token = true auth_url = https://controller/identity auth_type = password project_domain_name = Default project_name = service user_domain_name = Default username = nova password = NOVA_PASS Replace ``NOVA_PASS`` with the password you chose for the ``nova`` user in the Identity service. * In the ``[DEFAULT]`` section, configure the ``my_ip`` option to use the management interface IP address of the controller node: .. path /etc/nova/nova.conf .. code-block:: ini [DEFAULT] # ... my_ip = 10.0.0.11 * Configure the ``[neutron]`` section of **/etc/nova/nova.conf**. Refer to the :neutron-doc:`Networking service install guide ` for more details. * In the ``[vnc]`` section, configure the VNC proxy to use the management interface IP address of the controller node: .. path /etc/nova/nova.conf .. code-block:: ini [vnc] enabled = true # ... server_listen = $my_ip server_proxyclient_address = $my_ip * In the ``[glance]`` section, configure the location of the Image service API: .. path /etc/nova/nova.conf .. code-block:: ini [glance] # ... api_servers = http://controller:9292 * In the ``[oslo_concurrency]`` section, configure the lock path: .. path /etc/nova/nova.conf .. code-block:: ini [oslo_concurrency] # ... lock_path = /var/run/nova * In the ``[placement]`` section, configure access to the Placement service: .. path /etc/nova/nova.conf .. code-block:: ini [placement] # ... region_name = RegionOne project_domain_name = Default project_name = service auth_type = password user_domain_name = Default auth_url = http://controller:5000/v3 username = placement password = PLACEMENT_PASS Replace ``PLACEMENT_PASS`` with the password you choose for the ``placement`` service user created when installing :placement-doc:`Placement `. Comment out or remove any other options in the ``[placement]`` section. #. Populate the ``nova-api`` database: .. code-block:: console # su -s /bin/sh -c "nova-manage api_db sync" nova .. note:: Ignore any deprecation messages in this output. #. Register the ``cell0`` database: .. code-block:: console # su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova #. Create the ``cell1`` cell: .. code-block:: console # su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova #. Populate the nova database: .. code-block:: console # su -s /bin/sh -c "nova-manage db sync" nova #. Verify nova cell0 and cell1 are registered correctly: .. code-block:: console # su -s /bin/sh -c "nova-manage cell_v2 list_cells" nova +-------+--------------------------------------+----------------------------------------------------+--------------------------------------------------------------+----------+ | Name | UUID | Transport URL | Database Connection | Disabled | +-------+--------------------------------------+----------------------------------------------------+--------------------------------------------------------------+----------+ | cell0 | 00000000-0000-0000-0000-000000000000 | none:/ | mysql+pymysql://nova:****@controller/nova_cell0?charset=utf8 | False | | cell1 | f690f4fd-2bc5-4f15-8145-db561a7b9d3d | rabbit://openstack:****@controller:5672/nova_cell1 | mysql+pymysql://nova:****@controller/nova_cell1?charset=utf8 | False | +-------+--------------------------------------+----------------------------------------------------+--------------------------------------------------------------+----------+ Finalize installation --------------------- * Start the Compute services and configure them to start when the system boots: .. code-block:: console # systemctl enable \ openstack-nova-api.service \ openstack-nova-scheduler.service \ openstack-nova-conductor.service \ openstack-nova-novncproxy.service # systemctl start \ openstack-nova-api.service \ openstack-nova-scheduler.service \ openstack-nova-conductor.service \ openstack-nova-novncproxy.service ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/install/controller-install-rdo.rst0000664000175000017500000003307300000000000023446 0ustar00zuulzuul00000000000000Install and configure controller node for Red Hat Enterprise Linux and CentOS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Compute service, code-named nova, on the controller node. Prerequisites ------------- Before you install and configure the Compute service, you must create databases, service credentials, and API endpoints. #. To create the databases, complete these steps: * Use the database access client to connect to the database server as the ``root`` user: .. code-block:: console $ mysql -u root -p * Create the ``nova_api``, ``nova``, and ``nova_cell0`` databases: .. code-block:: console MariaDB [(none)]> CREATE DATABASE nova_api; MariaDB [(none)]> CREATE DATABASE nova; MariaDB [(none)]> CREATE DATABASE nova_cell0; * Grant proper access to the databases: .. code-block:: console MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' \ IDENTIFIED BY 'NOVA_DBPASS'; MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' \ IDENTIFIED BY 'NOVA_DBPASS'; MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' \ IDENTIFIED BY 'NOVA_DBPASS'; MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' \ IDENTIFIED BY 'NOVA_DBPASS'; MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost' \ IDENTIFIED BY 'NOVA_DBPASS'; MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' \ IDENTIFIED BY 'NOVA_DBPASS'; Replace ``NOVA_DBPASS`` with a suitable password. * Exit the database access client. #. Source the ``admin`` credentials to gain access to admin-only CLI commands: .. code-block:: console $ . admin-openrc #. Create the Compute service credentials: * Create the ``nova`` user: .. code-block:: console $ openstack user create --domain default --password-prompt nova User Password: Repeat User Password: +---------------------+----------------------------------+ | Field | Value | +---------------------+----------------------------------+ | domain_id | default | | enabled | True | | id | 8a7dbf5279404537b1c7b86c033620fe | | name | nova | | options | {} | | password_expires_at | None | +---------------------+----------------------------------+ * Add the ``admin`` role to the ``nova`` user: .. code-block:: console $ openstack role add --project service --user nova admin .. note:: This command provides no output. * Create the ``nova`` service entity: .. code-block:: console $ openstack service create --name nova \ --description "OpenStack Compute" compute +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | OpenStack Compute | | enabled | True | | id | 060d59eac51b4594815603d75a00aba2 | | name | nova | | type | compute | +-------------+----------------------------------+ #. Create the Compute API service endpoints: .. code-block:: console $ openstack endpoint create --region RegionOne \ compute public http://controller:8774/v2.1 +--------------+-------------------------------------------+ | Field | Value | +--------------+-------------------------------------------+ | enabled | True | | id | 3c1caa473bfe4390a11e7177894bcc7b | | interface | public | | region | RegionOne | | region_id | RegionOne | | service_id | 060d59eac51b4594815603d75a00aba2 | | service_name | nova | | service_type | compute | | url | http://controller:8774/v2.1 | +--------------+-------------------------------------------+ $ openstack endpoint create --region RegionOne \ compute internal http://controller:8774/v2.1 +--------------+-------------------------------------------+ | Field | Value | +--------------+-------------------------------------------+ | enabled | True | | id | e3c918de680746a586eac1f2d9bc10ab | | interface | internal | | region | RegionOne | | region_id | RegionOne | | service_id | 060d59eac51b4594815603d75a00aba2 | | service_name | nova | | service_type | compute | | url | http://controller:8774/v2.1 | +--------------+-------------------------------------------+ $ openstack endpoint create --region RegionOne \ compute admin http://controller:8774/v2.1 +--------------+-------------------------------------------+ | Field | Value | +--------------+-------------------------------------------+ | enabled | True | | id | 38f7af91666a47cfb97b4dc790b94424 | | interface | admin | | region | RegionOne | | region_id | RegionOne | | service_id | 060d59eac51b4594815603d75a00aba2 | | service_name | nova | | service_type | compute | | url | http://controller:8774/v2.1 | +--------------+-------------------------------------------+ #. Install Placement service and configure user and endpoints: * Refer to the :placement-doc:`Placement service install guide ` for more information. Install and configure components -------------------------------- .. include:: shared/note_configuration_vary_by_distribution.rst #. Install the packages: .. code-block:: console # dnf install openstack-nova-api openstack-nova-conductor \ openstack-nova-novncproxy openstack-nova-scheduler #. Edit the ``/etc/nova/nova.conf`` file and complete the following actions: * In the ``[api_database]`` and ``[database]`` sections, configure database access: .. path /etc/nova/nova.conf .. code-block:: ini [api_database] # ... connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova_api [database] # ... connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova Replace ``NOVA_DBPASS`` with the password you chose for the Compute databases. * In the ``[DEFAULT]`` section, configure ``RabbitMQ`` message queue access (RabbitMQ should be already `installed and configured `_): .. path /etc/nova/nova.conf .. code-block:: ini [DEFAULT] # ... transport_url = rabbit://openstack:RABBIT_PASS@controller:5672/ Replace ``RABBIT_PASS`` with the password you chose for the ``openstack`` account in ``RabbitMQ``. * In the ``[keystone_authtoken]`` section, configure Identity service access: .. path /etc/nova/nova.conf .. code-block:: ini [keystone_authtoken] # ... www_authenticate_uri = http://controller:5000/ auth_url = http://controller:5000/ memcached_servers = controller:11211 auth_type = password project_domain_name = Default user_domain_name = Default project_name = service username = nova password = NOVA_PASS Replace ``NOVA_PASS`` with the password you chose for the ``nova`` user in the Identity service. .. note:: Comment out or remove any other options in the ``[keystone_authtoken]`` section. * In the ``[service_user]`` section, configure :ref:`service user tokens `: .. path /etc/nova/nova.conf .. code-block:: ini [service_user] send_service_user_token = true auth_url = https://controller/identity auth_type = password project_domain_name = Default project_name = service user_domain_name = Default username = nova password = NOVA_PASS Replace ``NOVA_PASS`` with the password you chose for the ``nova`` user in the Identity service. * In the ``[DEFAULT]`` section, configure the ``my_ip`` option to use the management interface IP address of the controller node: .. path /etc/nova/nova.conf .. code-block:: ini [DEFAULT] # ... my_ip = 10.0.0.11 * Configure the ``[neutron]`` section of **/etc/nova/nova.conf**. Refer to the :neutron-doc:`Networking service install guide ` for more details. * In the ``[vnc]`` section, configure the VNC proxy to use the management interface IP address of the controller node: .. path /etc/nova/nova.conf .. code-block:: ini [vnc] enabled = true # ... server_listen = $my_ip server_proxyclient_address = $my_ip * In the ``[glance]`` section, configure the location of the Image service API: .. path /etc/nova/nova.conf .. code-block:: ini [glance] # ... api_servers = http://controller:9292 * In the ``[oslo_concurrency]`` section, configure the lock path: .. path /etc/nova/nova.conf .. code-block:: ini [oslo_concurrency] # ... lock_path = /var/lib/nova/tmp * In the ``[placement]`` section, configure access to the Placement service: .. path /etc/nova/nova.conf .. code-block:: ini [placement] # ... region_name = RegionOne project_domain_name = Default project_name = service auth_type = password user_domain_name = Default auth_url = http://controller:5000/v3 username = placement password = PLACEMENT_PASS Replace ``PLACEMENT_PASS`` with the password you choose for the ``placement`` service user created when installing :placement-doc:`Placement `. Comment out or remove any other options in the ``[placement]`` section. #. Populate the ``nova-api`` database: .. code-block:: console # su -s /bin/sh -c "nova-manage api_db sync" nova .. note:: Ignore any deprecation messages in this output. #. Register the ``cell0`` database: .. code-block:: console # su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova #. Create the ``cell1`` cell: .. code-block:: console # su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova #. Populate the nova database: .. code-block:: console # su -s /bin/sh -c "nova-manage db sync" nova #. Verify nova cell0 and cell1 are registered correctly: .. code-block:: console # su -s /bin/sh -c "nova-manage cell_v2 list_cells" nova +-------+--------------------------------------+----------------------------------------------------+--------------------------------------------------------------+----------+ | Name | UUID | Transport URL | Database Connection | Disabled | +-------+--------------------------------------+----------------------------------------------------+--------------------------------------------------------------+----------+ | cell0 | 00000000-0000-0000-0000-000000000000 | none:/ | mysql+pymysql://nova:****@controller/nova_cell0?charset=utf8 | False | | cell1 | f690f4fd-2bc5-4f15-8145-db561a7b9d3d | rabbit://openstack:****@controller:5672/nova_cell1 | mysql+pymysql://nova:****@controller/nova_cell1?charset=utf8 | False | +-------+--------------------------------------+----------------------------------------------------+--------------------------------------------------------------+----------+ Finalize installation --------------------- * Start the Compute services and configure them to start when the system boots: .. code-block:: console # systemctl enable \ openstack-nova-api.service \ openstack-nova-scheduler.service \ openstack-nova-conductor.service \ openstack-nova-novncproxy.service # systemctl start \ openstack-nova-api.service \ openstack-nova-scheduler.service \ openstack-nova-conductor.service \ openstack-nova-novncproxy.service ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/install/controller-install-ubuntu.rst0000664000175000017500000003252400000000000024204 0ustar00zuulzuul00000000000000Install and configure controller node for Ubuntu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Compute service, code-named nova, on the controller node. Prerequisites ------------- Before you install and configure the Compute service, you must create databases, service credentials, and API endpoints. #. To create the databases, complete these steps: * Use the database access client to connect to the database server as the ``root`` user: .. code-block:: console # mysql * Create the ``nova_api``, ``nova``, and ``nova_cell0`` databases: .. code-block:: console MariaDB [(none)]> CREATE DATABASE nova_api; MariaDB [(none)]> CREATE DATABASE nova; MariaDB [(none)]> CREATE DATABASE nova_cell0; * Grant proper access to the databases: .. code-block:: console MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' \ IDENTIFIED BY 'NOVA_DBPASS'; MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' \ IDENTIFIED BY 'NOVA_DBPASS'; MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' \ IDENTIFIED BY 'NOVA_DBPASS'; MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' \ IDENTIFIED BY 'NOVA_DBPASS'; MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost' \ IDENTIFIED BY 'NOVA_DBPASS'; MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' \ IDENTIFIED BY 'NOVA_DBPASS'; Replace ``NOVA_DBPASS`` with a suitable password. * Exit the database access client. #. Source the ``admin`` credentials to gain access to admin-only CLI commands: .. code-block:: console $ . admin-openrc #. Create the Compute service credentials: * Create the ``nova`` user: .. code-block:: console $ openstack user create --domain default --password-prompt nova User Password: Repeat User Password: +---------------------+----------------------------------+ | Field | Value | +---------------------+----------------------------------+ | domain_id | default | | enabled | True | | id | 8a7dbf5279404537b1c7b86c033620fe | | name | nova | | options | {} | | password_expires_at | None | +---------------------+----------------------------------+ * Add the ``admin`` role to the ``nova`` user: .. code-block:: console $ openstack role add --project service --user nova admin .. note:: This command provides no output. * Create the ``nova`` service entity: .. code-block:: console $ openstack service create --name nova \ --description "OpenStack Compute" compute +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | OpenStack Compute | | enabled | True | | id | 060d59eac51b4594815603d75a00aba2 | | name | nova | | type | compute | +-------------+----------------------------------+ #. Create the Compute API service endpoints: .. code-block:: console $ openstack endpoint create --region RegionOne \ compute public http://controller:8774/v2.1 +--------------+-------------------------------------------+ | Field | Value | +--------------+-------------------------------------------+ | enabled | True | | id | 3c1caa473bfe4390a11e7177894bcc7b | | interface | public | | region | RegionOne | | region_id | RegionOne | | service_id | 060d59eac51b4594815603d75a00aba2 | | service_name | nova | | service_type | compute | | url | http://controller:8774/v2.1 | +--------------+-------------------------------------------+ $ openstack endpoint create --region RegionOne \ compute internal http://controller:8774/v2.1 +--------------+-------------------------------------------+ | Field | Value | +--------------+-------------------------------------------+ | enabled | True | | id | e3c918de680746a586eac1f2d9bc10ab | | interface | internal | | region | RegionOne | | region_id | RegionOne | | service_id | 060d59eac51b4594815603d75a00aba2 | | service_name | nova | | service_type | compute | | url | http://controller:8774/v2.1 | +--------------+-------------------------------------------+ $ openstack endpoint create --region RegionOne \ compute admin http://controller:8774/v2.1 +--------------+-------------------------------------------+ | Field | Value | +--------------+-------------------------------------------+ | enabled | True | | id | 38f7af91666a47cfb97b4dc790b94424 | | interface | admin | | region | RegionOne | | region_id | RegionOne | | service_id | 060d59eac51b4594815603d75a00aba2 | | service_name | nova | | service_type | compute | | url | http://controller:8774/v2.1 | +--------------+-------------------------------------------+ #. Install Placement service and configure user and endpoints: * Refer to the :placement-doc:`Placement service install guide ` for more information. Install and configure components -------------------------------- .. include:: shared/note_configuration_vary_by_distribution.rst #. Install the packages: .. code-block:: console # apt install nova-api nova-conductor nova-novncproxy nova-scheduler #. Edit the ``/etc/nova/nova.conf`` file and complete the following actions: * In the ``[api_database]`` and ``[database]`` sections, configure database access: .. path /etc/nova/nova.conf .. code-block:: ini [api_database] # ... connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova_api [database] # ... connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova Replace ``NOVA_DBPASS`` with the password you chose for the Compute databases. * In the ``[DEFAULT]`` section, configure ``RabbitMQ`` message queue access (RabbitMQ should be already `installed and configured `_): .. path /etc/nova/nova.conf .. code-block:: ini [DEFAULT] # ... transport_url = rabbit://openstack:RABBIT_PASS@controller:5672/ Replace ``RABBIT_PASS`` with the password you chose for the ``openstack`` account in ``RabbitMQ``. * In the ``[keystone_authtoken]`` section, configure Identity service access: .. path /etc/nova/nova.conf .. code-block:: ini [keystone_authtoken] # ... www_authenticate_uri = http://controller:5000/ auth_url = http://controller:5000/ memcached_servers = controller:11211 auth_type = password project_domain_name = Default user_domain_name = Default project_name = service username = nova password = NOVA_PASS Replace ``NOVA_PASS`` with the password you chose for the ``nova`` user in the Identity service. .. note:: Comment out or remove any other options in the ``[keystone_authtoken]`` section. * In the ``[service_user]`` section, configure :ref:`service user tokens `: .. path /etc/nova/nova.conf .. code-block:: ini [service_user] send_service_user_token = true auth_url = https://controller/identity auth_type = password project_domain_name = Default project_name = service user_domain_name = Default username = nova password = NOVA_PASS Replace ``NOVA_PASS`` with the password you chose for the ``nova`` user in the Identity service. * In the ``[DEFAULT]`` section, configure the ``my_ip`` option to use the management interface IP address of the controller node: .. path /etc/nova/nova.conf .. code-block:: ini [DEFAULT] # ... my_ip = 10.0.0.11 * Configure the ``[neutron]`` section of **/etc/nova/nova.conf**. Refer to the :neutron-doc:`Networking service install guide ` for more information. * In the ``[vnc]`` section, configure the VNC proxy to use the management interface IP address of the controller node: .. path /etc/nova/nova.conf .. code-block:: ini [vnc] enabled = true # ... server_listen = $my_ip server_proxyclient_address = $my_ip * In the ``[glance]`` section, configure the location of the Image service API: .. path /etc/nova/nova.conf .. code-block:: ini [glance] # ... api_servers = http://controller:9292 * In the ``[oslo_concurrency]`` section, configure the lock path: .. path /etc/nova/nova.conf .. code-block:: ini [oslo_concurrency] # ... lock_path = /var/lib/nova/tmp * Due to a packaging bug, remove the ``log_dir`` option from the ``[DEFAULT]`` section. * In the ``[placement]`` section, configure access to the Placement service: .. path /etc/nova/nova.conf .. code-block:: ini [placement] # ... region_name = RegionOne project_domain_name = Default project_name = service auth_type = password user_domain_name = Default auth_url = http://controller:5000/v3 username = placement password = PLACEMENT_PASS Replace ``PLACEMENT_PASS`` with the password you choose for the ``placement`` service user created when installing :placement-doc:`Placement `. Comment out or remove any other options in the ``[placement]`` section. #. Populate the ``nova-api`` database: .. code-block:: console # su -s /bin/sh -c "nova-manage api_db sync" nova .. note:: Ignore any deprecation messages in this output. #. Register the ``cell0`` database: .. code-block:: console # su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova #. Create the ``cell1`` cell: .. code-block:: console # su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova #. Populate the nova database: .. code-block:: console # su -s /bin/sh -c "nova-manage db sync" nova #. Verify nova cell0 and cell1 are registered correctly: .. code-block:: console # su -s /bin/sh -c "nova-manage cell_v2 list_cells" nova +-------+--------------------------------------+----------------------------------------------------+--------------------------------------------------------------+----------+ | Name | UUID | Transport URL | Database Connection | Disabled | +-------+--------------------------------------+----------------------------------------------------+--------------------------------------------------------------+----------+ | cell0 | 00000000-0000-0000-0000-000000000000 | none:/ | mysql+pymysql://nova:****@controller/nova_cell0?charset=utf8 | False | | cell1 | f690f4fd-2bc5-4f15-8145-db561a7b9d3d | rabbit://openstack:****@controller:5672/nova_cell1 | mysql+pymysql://nova:****@controller/nova_cell1?charset=utf8 | False | +-------+--------------------------------------+----------------------------------------------------+--------------------------------------------------------------+----------+ Finalize installation --------------------- * Restart the Compute services: .. code-block:: console # service nova-api restart # service nova-scheduler restart # service nova-conductor restart # service nova-novncproxy restart ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/install/controller-install.rst0000664000175000017500000000054500000000000022662 0ustar00zuulzuul00000000000000Install and configure controller node ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Compute service on the controller node for Ubuntu, openSUSE and SUSE Linux Enterprise, and Red Hat Enterprise Linux and CentOS. .. toctree:: controller-install-ubuntu controller-install-obs controller-install-rdo ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.261608 nova-32.0.0/doc/source/install/figures/0000775000175000017500000000000000000000000017741 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/install/figures/hwreqs.graffle0000664000175000017500000000767200000000000022616 0ustar00zuulzuul00000000000000[W۸ǟ˧G%Y0m 30gX,(SNw?@RHI -֯_z~JG/ ~yN 7l{A~T}ݳN}čw;Gvfslqrt:k9/ټ6p^b8hDa_Gl.3:{!;msW϶>Wn]#FGA[j&A/uWGVsqN |?:4EN _Ax(c:r:_˭$gMmPgBڊLS0A6K,cl/y __4!k! I[e,%9'e;2k7,'9Sã{se+|v<1}@ a qu&L2-Rt~Z4?۾ *y<֭)vW&TIW囔nRҠKW%wI¨Ǎ?8Nyܴ\ǯ\*nKc|<_+he۴Rn*%9̃DZjyvTEҲ4)Dq aDJl_rNڜMM8©m3ܠɘbT۪ Zd$/k2 0脳ԥ .r%Mtg$ Q}qS q_-'ltϻv4Zh]lSXYrgqkZ͖g9eu<߿ᕟnKJr"Ir}Ӕlγ&eTR̥䅔G_K^V;:jee.=`AQvt5Stڲf@GN R6Sj>gK<&fm<;q];wC7tC KyT!;x^:@ǤQ<=HyLnu|}O2H}wyEHM䍁'o$ovjyj"Hj7ISiIr^/ x9g7": <&^wT6k49h_`ɒ6ЉH* 6<Rx{[e.p.;fcPͼ{X4Ra y7gqP c 60`XND"gMCm^e );UeLŵOY6 pCdy3hr !#9B!+Rx+RTF7-|*%1X$)8"\ ֔oĬB*JmX!M֧HL)bAJů"~惬q*djBVn!3RWfU*RRRIvϓUMRVéjFIs!" 2miTTWDԏ-߫Vȫ^ī,I%8J%z5Jr5Vڝ10uviPnK"MpQWQWq-iQru(psXqow~G?^^8;c-> 6}w_ߞ#݉S4n0;%pD3$.!p̋JWΠRNx#mۍy4q 8L.>u{ ~{mޑ@o{J"-ɪ^;48DNTPih4 |V)4u 18f\1sO˯pR74\bG1Ou \YfLLGq 2Jg[ʚB_jRNiJQzMyV{闅h[TU>4WHga././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/install/figures/hwreqs.png0000664000175000017500000026132200000000000021766 0ustar00zuulzuul00000000000000PNG  IHDRvFNsRGB pHYs&:4iTXtXML:com.adobe.xmp 5 2 1 2@IDATx]E֮sTL gTs8쬎,άb8|FTQ HJ%g6LgvfvR{^U#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0@( #A*ܕCv˖W ތ#0@D(f&mX&,O8H2.q60ZDP@D;0@XbF`@@Q Ey E,[oMSܕhV2Gb@07s\M?^WꝹT2e# e_FH2sVq]_[oXv?wc) i,f5Bh 5;hczF><\a=fδSٳg[oxxwoG􋗮HtYnR.v:ggE t>}w#m3`nNe4M E;+HwKJ;R~0!g2#kpFYr(D>Q^M6ʥ.G=ky=#ӅìekǾ)OZLGb!,ߕ '^S4mGނf#n[E(8^878oU:x ~[}c)jU Ct\BbRZ\p ]C9@K<\p*B;^S'N,өY%sauy驺.ߺe̺dY?U|/ҟS |("z 5&ؑ"s<н eڢئ(=fb2GTz-&q7^'M=Vռ"gn J. d?'-6' 9O~\ALcdfOs"%۳MӄrIOԣ_a 0'7yBރnESRf:'*i0^c]Mj P 7#AKkW޷سf8o"(>+caa%|>'=aSzk`ԕ<Eb+--' ˎ`Ggǘs`LG/Ҕ\Mt>ݣy *Dn{U{.|XQpRЫ{bT:}{ 2&(Y$,-џ2PIiFEcm|oVH,s ˫(Pt'*wpT(ɓq_2.uNJ +촪(Cul"VKgф.oAcBU-x|Iv}BNq1y[=|á'7PS<,65^A;J^M K YHzYizP0h16×J j1^S Tߠ>#zuV@9+ j/zy>nl?IRiVIS5$LS[5]v-wŎ'qy7gnZmϷ 0rۃU364@e0)BgS4gC3R}1,3s'œ2*t^Q-,t!t4~g9XIq@J[+Ii8FZiv.pN ùP>@qfOzlaeFu>DlV|dabPWƧׂBE|C`I9Od ݫF%B ]3i,^( fC /vI +1ԢX.$'96J%D&)@]ӴkL~OMXB=T?q.WEyڨ$w޹N"5Z”d"Y_/#"Шct"2: :CcwKu b7@i QPzF(gwe:,C|6^-=k^$SIIgXsAVEEK4U5@Q61'R>}GMwNØ.w޵jq ~YQvJ :"ԨB h%mGOOVB9EX$O"_z<yBvk00ѠDR=|+]̸Y۬=G}+vL8|}X0#`lza2#-+gE/R?km:Xoh,r2wY.b>h}Yޓ PoTA2}rlRxɓbD"ʹc@ǂHI Hv0P8Mt-G"ܼ1jO:e#OVp`5:xI>պH·>t^{-h~N+<@?x +C'=1GCC ,`,9*> i$^܍g^dmH'E:'z`~T1ԋ[`1+7Oyd$R\~0 9r@J{zu|deJ6h5>]7RJY^vU`E|2uC54ȞfP+aWś,ocֈjq&ٶ(9m #wH~ˣ?>O2[y &&GS,7rG(i F$|L_+hc$Igewf+ŢlBՏY!$`%94*#5wcmݓ RL7U4o{ĥߒ*|xE*J 7&3?N`"#rd|-#lyJL7QhjLaJt=Bs:b{as2%Ok)]U.(g}ffJV}kBn[oFm=m-͆c1҇ 揽6WL4 vH?<߃GJ")^J7vf0L| Rٱ֣:0`E92>hTvN&V}c0+/gU@a1Pi>p* ưX)JٶGglFl٬_cy-ӬkC`3 v,)_Cv3-}Pğž&?]!`n Β0;3hc#c0p PLh@Hk,c!rʠ#`Ք*+{tL#ś;_@5ә:Jiyp)Mke}fU9E0︍f2d3G$Y(mj+fڴ2_ynچM>'rC`C93lY'jLvcYEu>Bmг̽p ӓsoW;/wCIWSyp=88uMp$ðOYQt䯡޳#(+o˕CT.(r hZ%ā#ʬ{$Bm6GxŰ9>}bZi7L^̊*3i(8OC/:2dTʐ>š]<$ww$=O`@ mcon뉵8i )&נGRn(,c[}޷q91p!8xݳo} .np[|^g?> Jsxw﬊Sx3$,+0h ;#G7IFj#/lE/pF]섣Z@θ??0K gTw<1'``T)lʣ F irҠ#@\(HB[i@;Jϝ|q䭶(^YK}_;|yÄ@;X֋Xڻ SQ~d,; |f=;SJ%w~-6=pĤ;_`H j;ݢFxw$ܛZA.a8y-_doӱfg*΃}KYS ?-L "lO,ϼaۻ߂w`wr]1oxpo(HK'dM\zű :Իq{ 2 FAjSN.uLn"eF i`0ˎ`trf8oߒ2>Ǟ{fuƓϒO.嫛_T~fӺ)K?لP,¶!}7i{#7һMa<߁P/ vW)=ZJu3s&aڐ?ɓ]I e=]ŧz E%%GJ ܑ))3cA,Hd/lv!~/[,M4ig|dr,Fi pʹimJ M.TtxPK]E,Fk d*(0M܂~u]O}sD`5RGЗeEѮWOE E5U<(|d1MB^rb;MUXNS4ӘH6fܘvF  (cvy򽺴B2;o[cXINNSDCw<_D!IE9YNS2Hv)e`D`6- _ZZG'g\MSާN"[b "8F"FJs!6ir+vR)T؂KNVYTcm`Fܳ)g'Y$'eFYdn!/g%9Yt@nP_U'OI/:i! Q:{ͫ=2w#DyMd*ɔmReB_RgAe #`@xɑ[]nYN$Hޅ,LɬU͑\fF ^(pq61B#'S`95VN `\RF sHXQυ&Or)cAtJ",Ij%AMmv $ڵM*aENܣiD@sG$ˤr4(9fY2UO/KF !p % +H7̛' ˑIƃ4ɀrreTMsc y˥f6ډK>{7W&R3G2EDr4r4(9!FsCɗQ+…~F Al#dDŠ2 b_p|F!"y"Jc9M#s / +#$=Hu&('J@Op+yt-XFp8H:JMX^*|Jʯi~)A)kť„m>f"pV.[o23/N;L(tZYە3'}95;a1deCtd9={l[NEM6^47orc NnR"K*HmQH=ﰇkMpEc*LMݮ3q-ó'`fqaixAzbxxE`BIIWJA vTsmO8YQl\j)2?7Rjq:/Mfn踖`aX7ns 3gڵu[jl,-q$լ7?eq@~ d_g8xg_|a\~o i +mJ!Em2+գΘGm1C[v|&劫ZE;MceFrkyYbpMt,8@*/h,QsA#ұ]<ݷǒ^7GT+NAyQ& 2M.rǬ)(B¹ky :Sv `n^P@֪uvnfڴ}nhn},W|=+"F&(Vt4*ɄQ}cīAqA>u?0U+֎Eƣa>p# ~.PE(4nW!=Eî6p*}ZOA hxtafs.-Mr4t܇`Wзgf;fHζ,"m,Y*c'.GÅrk`˧EQн\4e5 PeiPNPEv'&߱՘t ~ ~RJfɰ姚罈7no{-ߪy|K3І4:a}B :~&Sr,|g~觿'1|No}AUh/<1DQtM]mf. \ ehB^(-·#~F9U=_ai,5 D[VcO+>ɓP<4rV0i'יNJ_gDx< Б 2J]_ rX;AJ #5/PEk-ؼ3þG wgsL~čW8vT( ^=]W=-tmi{q՛4=Zl)rwt^u Vxtzy0;R]꙾ ňN[~|iԇaP(C=XC)Dky0)c脵Pt7Rh#e?\,D>q7ws~Nݺ&t7wcG6aߢR{z .LF2Cn&(^9Ȇnis4Rq ޥ. cg=w:2 3]E 8Wk,W ogɑ.I8q2mT>vm8|㢉"Fp>TN4@ou٬m>©Xxi dBvA}Cc)g[ZBɚݹU_ծiLO^Vf"ΦNQ~n(YB,6҆ay躩Sӡ:Qb&`ml.F?QL /^>ڮ)[?lgڹzz1A~NENrmh'|z$ڟSgh1+.ZG]e+uzEts ],bF?%rhmk1DRm10`&,z2{l64@,uBPB`Eyy.P^C6yMVQHuHX ER8(`/Ke2@:zC~Ip:hA!|L?z,/#=#gKG3*HyTLWtO7jVs.m]論CvaJU8]Gs9O5X ̞Nle NO6aP^O i@=*RIf&cIզB=]+%$<4{GA~XYt:UR"m*qEQXU5#]})(ӗ'}Oܦ[}s#A7O *bt4kJ>+I&(l;b'd5lͭ}A+k~7\WꜼ̸ŝ#=&v Ki?MѱۙdO,~LtFBz/bG%BĢ~ QE=WVe1Œ[M7 BжN+as[Y^N_lDz#[m_X(f"j.X,:, *$ N សv>{+wyt g)w_a#3E, гPV4$m *Zwh H rLAb.hyv(jOWzo,?5F?I-8IW$|W2RO.#四kR;靆1ϝ1a›(`Ka/jLW;׀I.1? ^.yH]YDZ fhKz*S|+٘Iܙ.dzo<]a} ODspqǢ#Ć<~hk+.,ФY(<y3@<,<Kd{ *YfO,lSb*lsL؞lo<6ϡWӺ}q1@p)h#,lſ gAWHߡQɘHнm'jo[ъT uZ%O^iQ] /]y=13RlQ4ݎ{J04$4taC;)\"KzJGޣ Z =0(|-m7az K1M_C4> }#9.Ƅ>$wg'e˷\'VeVLt>Sh+pN29yl!ØD1;ЦBmcPw JCD ]O[iVz/BbXRO~\1ϫ/(q6/Qݭvj)wx=;E(wBIV%fOz5ٔeϾ7`[?p$>"*eQYUC6Ca:p+iܸg3hzy>ثmw[h ̰'ꍰÂu pIt*A8%2k3(Wfz< adѴAf@,^wOD&@^TH1_+0Y:H?= L9 *=lWåv7 'v֙^q]6p׻ۭa3 NٯH Ie'o;(VдCq (`5q70g(0DsPקca⧜i`#(Iޗz Pzq -_Z,DK.~ݧ,tgZS;܄Ix6"'.6f/8Z$ѩmt`=O'=دdOv tz}m=D,䬷PިRƸ@:KƑxdS_0خFXk ɑ5lV!-ӡ1$& eI5v(ŶXRNXU7AvDg K& "L}Xă-l`G>:_j l#dd`<2 K''YjaZ(彜Q޽s->nrL E$p ˼ݤ4eB)sxmUlo]C?Pi(x~"מ_GKa$ߢC {mvY˛t(jGt pkA6zr y7[O^m"x.Au{OF&rbB>LC:gNPQ}@`*k  Ou-&Ͻj;tv[;Wfgi1kRŭ~po`(B gcep>S}B<+ !B]ݱQ"ܽ;/Py6ziE<|%FKSfOE!4mf2](g 5̫c@?$sD QRQOqN">v e!ڱ7$tl'iVYd|m D?6u6[ >)c.},uWaw,.T=*ꄋ6xӫHnYPo9F{z'ty}9b;$ ,]`ʵͦpO;oKJм4wx &&δDYG4^2h_11DUޕn?uMvF`] Oȓ>g$(=xď:oe%y 0nFgwa{9c̓ف hX-ykC A5E:<(`ǻp??ṭ%-Wc)'0,/ h\e: ߥ~&,38޴۬Cy|9i؇U!>a,}   36pɼBV=WcP @|mN ud4]xk]=,_@o{gb$lw)ǢSy䠬K+J[ d]^B[[( lهv(*Kp>li:v pkbaB*Tb9Z_k%BAnEkt}ݮ< ; `Wø4d\tۑYM7dH}l,_fSSP֎ )~a}>?! [ܙ?s0g01ll6j}YS40^^ˍ[$Z6lqN7;h5&'YqVlY=!+ab)'}o{F"&^3dʌ4?VwӲ5OM=R|:ڛUefoH{[EphGu릒UVkuW4~{d]ƓaX\]`m [Q}N|ﶆV8^ŖVWUVpEg: ]1KȔri"%+nu:zlnߦd%$6I蟡]C}l, tVEN7T{/Eh|_ 6ӮsMɞA 1)Z<c6pN(f(+&3if*~eT{ Ɉ6j̒'t캂]W5H͐xLG3ߊ~hi]f|} 9UOR>: v*1>˓p2dMBedF!0sĢi'i["1#uƓVǞ-9+˲֬uQƁBȌ#Д[(I_Ҕ ea P5Ů)!f2Z5ڝ(XQN=0#0#qn1Uɮpc!D㸌#0@X8%F#hIqH3g`FHG@cc BI̗A83)#0#0 VØ\F`F` rjp\F`F`l2#4M&:gf)T@Д)Se9y=58Gwtu^}a#yKb^7>^3 TMq c2ta%g%qߦSQJAS.yp[}bE(-.U#)bPPz=[gҬۊlc!⽉o&Z6R&[>⛦<^|tڒ:2y]V#5A5Q9$)iPo|(Prl5G 6'7zUպЩX-r6=P\vTj{+-2ȵ>Z[ɗ^z AHJ\zmY[^o[ofj;oUO,x s&/üB(kn_x&U˕4-#YNG#2ENS(+d۪MۧUM:_wmA!tWr}su࠲FL|^>movz5kR;˫ݘojoF}]]ʓ!=A⟟(nAT^sgM(eJ*irP΄64w^>JxY,~ݷ4 \StQ?-:ȍ|N1sb*PXЪ f^sd#ԾJRbƂV]X#&3ȻXTu&ӉjP9ι4$eK,{Q#/jw٭NERl(_[l7TB*:M5 =y5Ő~i8d7:m2ii(wQ˩tf+ʔ>ݫ׀xGeg.Fjyc\o12֭cA#0*0Ĩ)5 r$ih_W*~^`9ݏEw,qCS3;jJf=SsQ^=J#v#@]-v~ķ?z?s&fOf͕y JQI!u$n_K4ARܾFCRp`D|f9 95Yc5O۷ք]~cvN H5kv^|_2kɅYGse^GRaRkn_G,$RоB r*HU64 NB.UrjLC&wYG@-ȟ]uF )9ٸ)[ QUf^'kgȐf} @>rj a9m ~=t@IDATJ95KQtI/c0쓜& 7Ή  ϼ-Of_7aogQSKg|#y4Lal.cxM*oYpMԅMU@ypOVXU]*D䝉kX:&:O_%!+V*I^&]NCAC%oC֝m[.jrA}5e[VV!_(ϼ5_ypqؑ!ٰyϜ,C).=e7 dN0D;d=}XzS>8W,#zvn/=T%~:ŦtԮsAaźY, pE:c=|< _(Xv^ug'(XlzXyYW`_zoXrp{T1wqQEOmٱWbMe~2쫯'Rb=aI_4HggHw涯a)S5QӠ@!A4 Ivi„wȄT4Lkʷ8,A|w{_~Emeg] =䴴Akvfh}$r$۴'9_,>XclܶKNrZoܷصRQMtj* BA %|tI 3>9v; {]KƉ[D//74JۭXnS8|IxZ A':(tҪ氟V^]iK3= `2|x_ OMd/גXmݦ˟~zLͼ% myR޾yY=4 ܺE.Fm6/<ǃ1`Ц66(}lK) -AVkSMn_=#H8B!c4T-_gbbnٚUecЎm[ Z47vԟL m)r7g2BI^qzyj4r^Saq3Wj@%ɚQ/M|k+\s0Z=ط,4bpw8CIs:<3.$萨8Kd\4OFz1q,^J7<{*u}E~[+^H.ӟ9_.U&ͻ&f/`b`VѮUYI^:+dV#u^oqpf;2\ 浬3tMi- O wMy># COc`>qx"-_S!'^D$2֒. *ѩ]˔_n2NNc= hC8MFN]sW ]mR]6m\gf}!~\OjqfRIN':]#^6x7FLb7CQ%NWb-{֎܅cROdsgڪsFA\Ӌ&Hi8|: 35Ӟ{WL܀-!*VVo n& 𙔭͘}#^x_=OGsI絑gu sSp0}_}p 8HM0?5{.Bp)tIԁoڶ[_U}):X(9\$ڈw:L!jb)GrtnCM~ᚖ1P$D}7v*}NI iegwh?|q&Y^2TEgdc(SDtHc-AiPɑ rB|ǥ3Xg1b,H3jh&%{؊l.WU$7SdGG+QEœw]]irߧ1cB 8damLG~Yv-y0ܥSFAvrOiHl>咢egqRw|aZ!ʶ gkHvqTӱ?8h>EnB(#i3HYJtdf2_~/îB;sƛJSTzArI2TOOfXZcEKD׿Ox0CʱTh[q!:ZH&+Ad \:N?4{t)vK9%s aSo bYHL4)PDr}حE4dB|zx-)$oیt];b_>@;.;GG ΐ3xmF߳Xb}Pr `lHp9Z,6CEPMȶrۮ}M svwDȴ~OF) 2#ͨϧkPϾ}ũnw=E/O(Zaf˄يFu? VW~8Qxy*Gc$Kbq/;IUxQWreۘ3@rh1k<[8RcE98 #0#l&:O4g>Mvڜ#Д(3+@`F`D@G]('%GgJK@di0#0"hY@cF1sigF`F0 f(Ӗ'W7p)$tiᥱ$,F4=mHrʮ>bw~}: [wS{GG:ئ%N>Ŀ,hбGGNizq۬M\>ؾk/NX8+OG ؎kŌ>qŧgN#Ʊ Y?Vz5p{\{՟FZ|~? sq"JJ KCPk8, ']'Ł:0DbE|j(MG(Ĵ}Wp4F1o7?{goҊf<89q3==i'nQux˼3{}OߴΙ S XN/=f!p$w{.&I_~? Wg␙1HpYɊ#'N]aFm(l3U.2$lپ[-:%s{+sok6n[`qۄf̴iT4@,qs:r:yj%tXZL\sɘUbOٓxTj~n!o%sR(UUmUw #)}e $Óѩ(FDu[SUm_gZ.QQ ,OFҶxJ]D ȃQZ-(y dIt*7S'2Cn0iM8ȃšmZGrit)u:W»ʩF{r-Rчk.r: Id6 Zヺ.ޝpR `R-r}_H?:_jݘxZ}:[X &zlEY0kkv]E}|} ڥtY' M<FsVFSy=ff[7*W/,:~@`w!Yo`E3#/>54lT,?ÈX.DGL 񭶦fW]#b.ݼ&[f>1&[势9J>m?`ޱO.i2Jio_ MUNn_82hsӡPlIY~G_0}u/z'xW?VyPWH;f 3GM&cq`]* *Y6ԀK%@غFI9vر? ~Xo Fl7n%h1Gv;(>WQe#MtSNV~z~f,#9[{19^auNB_W} Acq[p>W)|#y@~0ܧt=uw#{Wj1E*E?_'h쏿Szum0LQھkjrjbj-~$x2Mf9 ^)2BL޺sԩW8^t6tp}g0 ,OmlD9Ք~ :3e@Bu+Q7o=D%=u1 {6pѸ3%x% u-K# - FA?=J?|!NO  fl@ѧ{W%H@.6ҝ{5qkǶyb򓯉ŠԷ/d^pމn Χ?/cm)g"|Oa*D$ʼnNv ?qW6K8K̓LK |KFҙͿXxe t:y33t➌%Ll_!3&Fde4"\L(S,څ y*d;.A TcO0| ?(ʏHQ'9@xڂEo8q5 d|ϼ6}xȥzk|w)h_sn/0mgȝ4q?IޥRNPi$Ear{|.;]n߲}p-A4K84|q:7$HdH._,T2cA+)u@Q% w&ѐ&&䵉mr@!il C.9thڈ ipTIhfΨTWϿo۩۸xCh9JIgRВFPT9ߡRzs >x M3L?ʯUv#+ol.}?b$f/z"}u/X6żOP~V|g]טiD贈KS& 6=Vp!$k TP=F&::(Pu Y 35JA~y{<#я!tϹM^u|mr\|3f9mPK4IȟnW [+whsׁo>+75tJ2p}& < 0A,A4LS'f+DQ"xJqVR*M`|/OؑB-T'yF{rg2>J+K=ڇ]ZyoHc):p2fJQ̂+ THʲB*MQlGbr,Mctu}LOy&9Txe*ϙgglsHaRbLWyߜdr`ӳĊ3Ii, ::4>kYYheƊ44.웑>6kYY}caNj>&4MtFzVmVo)t:e#_S2d^'Ԧ]vŢ :B9Z2t"y>]< $JSφ"""!R}fnnڽ0;~gΝsϜ{mAgc#=wߴ;v G{ރײ{nEFɦGYH*>*/iNk9z%vUj|H{#+.h]dɧ^ɦEz]iBiACfזC|rxbye6]~~/Uj O+q {Wk&v8x*]| 8/f)#S'<9·[ITB|_:œ_V> x{^b,S QFڳ0 ^h$?زI>2/iL`nK> -/4>$N2tfžP*s6)%>}ov&UHy*I/ w>ebWqKRHZ`U_TD%|*|Z1FRкdɧ(UFCPv?>VkD_XVht5NIqFq\->'7nA@|,E7!I:2Aݭ\\ʣ܍ɧ%X|%4d*=c0|:G䋚eS",>1)itTښ:ď8Zc,)jc=nKϩӍ|xV9Rӫ,iJ5Ir| .$i{s$xxORNɒOCL:ŧ+ςԚ 87 H ֲj8¿rz;D SI7cbCJ%D~4Z:HZ5:$Jk(M| j~H>NM>A\&5hnJ?ɕvmwr9 Nđx ChaK˯L_A#䕒V k_0TZ'`A]`LFH>?>Vu@|ݪ7<г@鈀>FOъD?7Ġ8&)>Vf"(~;wiZ;/[7,Vl^tm+BE֩1˹bWc7\W :W,?%Dl}PpOJa_Z'eҮG-:v(2ggB׎TM` ŷ lvCthT\:o8.G>F7A6QPnyM?p(7/<1Gq0~X* k~8[ 4+4D4owO_љ³] UO[I>վFE1O HHrDnJ-(?ZjLZoxX+4R+{=.6v;Mt6CuմBRKgA[%h1ng5,30?\߉sgC޾mï'$:(emnH@!$_ \MeCF2'x^I`QZg>.}hXiIش렠a6 /;G{z {vd?˝7aB[_v^/+Qu أn1>]5 ,wDB4C">w|ՕO#4zBz)"yx7#1&§ޭz%FO1O HPRT rVϲyXU*^n&GNdD-p䒗4w ΜɧQrv)WNiBpXXy8t<[,YyW A "Ƞѯ|#ˍhYwمgNjB#Fϳ#F{t "_n:I1cbgR}jjɳ gwi})`L嗥%kwi+yHF34(kH Sȍ>HUO[v|QZ?Y3Y%HAp7}\< 67cBhZ\ ~!{?%s8b_YDʽW ~eGDvNl.DQXq1UvA/˄ю",d)بW2Ss#Lz#Nk8Q%!&E&$׽ϏRsX&6=qRٴ5W>Eӹ?-p[/;WL>u ܂RQ?"3ҺGxLO[9ɧKhV 4DOKV/(z [~@%S@[GP@Rҗ I Ɖ\]<^Cu,`x[EDe47UdFge9ZYnDs@u]KuHKu>Nk%M'x{|pͼ!M35չ\'YQtyg?/.U/MBϹ6ɚ428Mk`1n|5X蘾w+*(hZrF!iTX^z0(Fļ_761/rQ4Հ41/~۸(ia˶'!_]^ $C'o9@WNRĻRꦀp8I`0.M8L&IEt$ϒ_oߧE|k1{)=;Bf|*X{Yr`0|`ldJi?^ne%jYS#(剷UGBPf#6<$v?C7.@Mdnovb-[ϰ=ϲ Wʹ"+;,$uvN⅕hn(07mP\DxbmT(Өˡ]%k҆gy~i5_WFN.mkg鞍4{:,m-wOL*cޫSkѺY2p.ͧ8imr&ru糖huFug9zFвjF ѩBO{m|?^n["jA:=j;f,5n#ez+j^癚{)#/.]KB3_&7]:@-f'6\{L3-5P\c)oꭂ_JtZG6DZC.O&z>UPQњv_7Xsmʎa (Ցp+ H|DFz(#j*(ˠW))wkW : }Z;.lcVzZd./s\i;Ge ѯX)]S.}YsGS g:לq2/<4uXkC# 4ywkExޗ|Fո%KUUN+Jҵjh6I-C#P|Q(QVHl{*f\QG֑,`dq_l_%L-|Ѫ^#H$D@" HAjEJ" H$D DF?_٢azL^H$D@" H".&*Bvh8uIA9d^D@" R㊒%Ij9jkH!M n.dxCpm7oe\d|\kJOHteSieSj_ v{!HhѸ򂳴gmn[U :#9:kďģ&B|=g$ü[C5_g`{kOC@qXV$Vu n?>=|l&} T 8&&?65}X_8>uC]T[AL,^]6r7JѸA]՝I^TKeu7f#Pj ʻ5u$ѽ}Kк-?qt[_3;|8x/w P܆鹅h a,մM1H׻ܷpذ#S'ͭS/'3"1h?bv ..nk ho˱us|e. !H>.ޕ]ʦ@hәWevj,ZM:\ѳSߧ7gqtB˽ =w嗿}:q0SYf_ϻo!nǻ<:2|Gݥx/>za;G{_iX/QtlD$cڻ@\ܯp\fc0DVMĿ!sŌy+ĕu]ۉ\F|2_j5 F\v u&EKEZ;=Dێ_i'Ol ݊i/Y$FߪV(2:u|n_|ګSk]l)P}OO}|}œ fNFO6 28 [ 'O nAٝh|9Z x tWb~f# AYu8v;yбuS>u L.1kկ Y%'%`J.%m{oOP|U狄X_P|wm`Hnq$ѫckmpش|C3fŜujt!v30GguI \|֠S _hW].tT]9F_}9.qÐUnwp]Ck3F_C}jFz|KURyxǸh)rGP@}Z'!.)dܰ3S{_o`'v~ e"Сu_)`ӵ- GVbݾ 96ѵh_iU}O3SG(ӋY's(*q=?:C?/Z'$Ɖa( +Z7/,3z}šdro.~s (3luPc~΂@|,f"Fa5̲e9 G"Azh30LSY7H‚ʪWumh:R㫯g§_⨨~WՕOgwkZL3L!hXsȗi\kEP6BƤ(P ʧ@kMYK\q>FZPִf9GN(4]hdp{4o!-_: !‡^6#>'5O#=mubMZߖmpwzܺNb˞C"挏6L_ @밧'P06)ȷ ^_8Ui4psׅuϖMkaf\G.gdzsĺӚFc;=CP{:u96jƸ([8;'f >g-^[*wg ӎz؊|8oD45hOlс8{v-DٞXUV&7n6o_,YCeg,j4=&onT먶Һii_}kFp| JS.B{'R{m\?7B3Dúu:$L Scp6h OiGsT*|>FT>@IDATBLfcݟzI;G{C>ŧ\ͬa`NXI[ 6z rj!4ezCݹ Y.6&dqE}:v-?%޲W]=^?aOQz8snv1f[3?X~9E0ۦe[xؼ⮺lh,p-|,ѼcfOv>Dr:K xG~/k+FnP$W/JQՙO#8#2i^|zQ^D[s^NAO{vl%?7|Qm9)AFvA ~}+횙WbR>T=kT|* k iU}VħmiN:me~;XHʬRxF[767lsP[D.e}&F{jZim> *h:Y-).<oF'!=JA'jO,ޣ947).,4*F>$dJݥ (W]ާ.&*V - HAf_dl~]IlIQ J詢LRdpO!YI*'@h+A3mi&n.+ ė8ZtzYX3n`R/OrF/<7?^I|ZO;k0Si Fx|& #9&S3I!.$-n׼۵wvuK%-BRZZ0C$e6I{ 1#Y3es{jghU}ѭ\q#~/y-+h#4+~j̃ktZvQ_}6=#m<$pӼ&M]mE_ˍVNn%E:ɧaK^M!|8qpU 筫߸F|%;@6jY7h~pqg,5s1&ė8o&7uYG5xmۺ3/)Mhii]f3US``#z. k(M.G2&OIHyvmޘi/,ȬӰYv:pC=зM{YCv dX'((,gŠC9ذnH߸;'CUt.KSΙ)VY3h 閃ŐOYu(E L^fr|uYڅ2FG˷\L91F":1T(G3d|ei"ͪCC FHa,t̤I;DqC84=-W4]pիv;䐄wjR|\/QZժ7]q:vmg;}`PAq4)D0](:\ǃB> uAY(#*"A/pX ԴU6uUI`d*Chxu>SJ hI:a.[y[ԫy3ָ{]ݶvǏ:{@2.yx]S ]HMVLM2{'Ao ǥwuaͧ,TҺbh uUBҬb YyT ;\#k9&i·uB]^ AyZ] 94%J[)WUf@0DPnimVc}͑3l(1ߞ Ze]Xk=oM>+Om2qX?V]?\H ְIpُ {ZXZXç,H:08æu`J, __tZY:>[,u]5}{㧂^m:vngۖZ|\#YjUY~"(l1ե {/e˺hVA1t 6ÃמBrmSBAA4˨I?PL?jɥPoRLR_]1w*SV"iiHCH1,EGQe #p-76lR?繃R9uß>+2:,؎EUimlN)i,N_ ( K/aˊpgs=aLtM(=j(R{v&bS!!>B]湲oCfs {fdv>eIk=DkE+iΐiV inpWhL#PgH탸a3GjMJTɆLkS ̾5]$I?JQT4d$އGu.M OK?=i$*YUD~в?1Z:f iɔVJ\bڌ!-o?y ˴h E_k6J= @N9b~;c\YYk{囶GU!T+AِB$D@" LUubT/ R'^R]_Biiye|SRW0JPM D@" T2S' Q0z0o ]S}eifLU珱Mj8f Yxpm?t,VRG@U6cYY*r2D@" H D8) 1J)fͲ1?VpVk,>i;UeC4N(i$nkйPz! DM SXs#nο?3< ~KA22^" H$QDOQlߪ M6Y~Smz0:uU0Ø,#ڴI%ھ}1.HZ̮争jr.{2 a5t++;G;&I:Oe9 8W;Yodq H$D RB]v ^)i.j3<^Z\.]; ۫ܶ Pm`vbeAxͶwAb}*_JQXY0O[ɬD@" @@Q7w`LAgbK]f?}hE?u:(DLlL %rϵD3Z)$xC7Jky߳{2^պF*_RP;S" H$%{A*=|$c49Ӈ(}-&6N־x枫D&dtw_i޾lLqC+Z),+E" ,P$D@" j} lEh ۥofIi(5El k'b{=4@Z)g"uOSPQ%D@"5"m)3Xps_P x }λ @R޿ 2K!qrr&mEķ{jÕgZ:E" H$F #m<"]O˟ק:gf5kSNݤ;yis ;y+v[;,XS?hZƜ5xpQڙL ʵD@" <G%x\󒂛M}Ѡ!Έw #ŽG>rGau_eyֽq1+D; eg)(CMfH$DbC[PVM q dR,]!ZĸRĨ:Jh 8k $ v_D@" H${vX5{Ӱ,~'=uo1"TQ}rbi` 8Tn)nVwĶ}EDt\&f_#N[/("{tQW?IoD\;z.]ڭĬĭ6KsVQ,y-kwQe۳CIB8♛Eg;glR\kI/;.H$U ѩJm -7|33e^*S BMGd{V7v8s-įK׋0 PjZݡSgwo">!ܢ {Ѡn8k[wMeW^wlTظ DEr:ycޝݵ_zkѾEcm; x\NniMA9l\VsFz(#)5F(ːH$@( j{lU暇x#)еY6zתl+CM3*YFh)4OzXz8uJ9&+ (3 OF:x Bov<_@ uD|l A9@.yEת5!kdg%D@"Pxǖ 266u fiK7M`>oipª-{MفaۤA]q,봻'K ևF'o+9MKۿ{{9v-q^#vҫ f>'3 E%Bv#?đxf? ٥"$D@"  $)UfZ5B c]8aOvvmBfvwpWѻskq_n!]޶.^@cfw.[G)a >խ7bҍ ם4o|&/eBry5k+e%D@"%,f wUfNd o.\)8{7־T.@م^>]ˉ\PʫDN5//},ycA^:.j\_x) z (FkQD^.Xp.}Q@CUo O+z޼Gئ%?hngvf;7k_'qgB~>3xCC RP9O" H$X6w ZX,<1_uae϶-6iU[-Gg+qVѠ^"L(0]my}:7"' N($ϝnrAB0!lt0ɴD@" ^`*E&5>wHYj鳍Ze&5K޹e_vpUudy3-36\k%fH*ဟdUEas5oӮclBB:,鐥v,,,8]{྽/[:?7 ( \v'8٘ʢ$D@"6S' a!S *\{#ﰉPv#{fddPÑ# G=uqAegš Ҕ_j C eBy ʺɅm25ʵ*'`z1*Kr8ɼD@" 0Uyr#hۛz` HN7ɠ 9 n \KAhq16vin3O.P]Bu$Y" H$ SWx"odj[l0&M>6- Qej=ڠY&. < #FaZe&vE " `%D@"  DK9a |;]_ }iv!M/JeO DX?!5L#dN py%D@"j|eA5>N¡(־S'<?YA1*xO2 _Ѕe ƺp ȵ8FpHeD@" T )Ƃ;a| 4Uudl@M5C-p\53Ob_ ]LTGG  ӧO7vY׽R`m{iј.~8j[Bk6=*7.DnVyKU#OZ\F <1O^qi$֊4<q&|Uq8u0~m4G>t z>":*k-c<܋pݨ@<52_L%>~}+lbf4ۃtSiarK!4&GϕI|>/.Z5(c(~xHt+isɽ /+㻑dLK4'<[xCН&X3_D5N c(H:v;>#ǧ!-/d<,(`yMnU/1|.pj,ʰ=Ǡvb$_y%>xέF@`(vU1VF{վBo>jcݱcOD2@C.x!.W7lMjt8`j!8 {yrnsfl*Q[vlKT!sNS'*JlQc##(Oظ W`rpħDV]v!K+EQ6vy2H$U% ]Ѥ*ҴٌQ)iTN=nѨz_[6_#ld>!f3h^-S>DgPIk@/+2*ӓ''eO2hFOVf&PqgwMʿ,11{V+1q8:DWs ;Ti/بY²Q~QDv:!ii<췧g?ֶ:$*,^|S-,_ !9]UL:ak%֫ ɨߺaY؜f$K!9xpRAZ4J&e:lQ }AU6kp;n4fsϧnd~D 2lY2 @ms @lӗojIJR9aT2SiGy$[>\Иp_IMrtLs}`bx^ 0eD (oyt3i4"2* @t.x+ʻcaңZn"%5nxԮrǽv֏OH a b^:~tď8ͦtSr.ʨUH#uÐ^4R|3M^$hZy.[#mBWHJ#@ \[:V  n6:u/]en]_F糌vngI\ H<-fEhqr F~&Yo9Y$, b4) »t.cJ"!)sK>H(^p;I I޴+Ss]ӹޝDՊ!M#`m1}RrnMMĐ~Űs{h3Z6.rJtIgXth[UaZ9k6 Bt Msnf\??-\#Nm6nX$jeg^~A+AA&iVݧ l22hϪ5ˎJ"f] UreaZY> H7˔Ax\nbaZOjz{uj%cYPB8ѬS'&Vlkw_=H\=lկ#'5ax!X=,ߴKB>Ec-jLQh:фOZ$5o(mn#K{EfPx"4o̧\ .mje8X!2D@"P*@ˁͰso02H$a" G-aV(Spӊ#n/8NM;|U8!Ndhc]5XDtYm&T7m$x4kTO?gՂ,5+6” (@yc^:e_.ĉ_9n\4gnp @tMAHLTrYČ{716X6{wam\o X%X׷ldz9rTp!4<<ӳnGwL,+,QS|3lԳ5Gi$Mr5OQ;s"P nd~컧2htaYW-}m~ _{}[/uEWK֋@^~OOpv6b [m .ڋEuᏸ9 { ̷tvѧ[;]6]#ba]Q]1)b_ж>Xxe!Bxx"pİ/"4WZ<_32J6WBH^ dtvpp| bàuSu, kLPz$xٓRd0~iR'ON܄t8m?kpM;܋ l}_{]%jRY5LA&dԎBzn[4~|?)7­m|9WB` c1G~9A[TWNDͦ~rr왻wY~JPÊE8g犕{ڔơDf kNx??$"W˞%]sƆۻpb3o|.~3shbpnߖqo} |q<~N>LsXMWDW^9.:X,fSMii2=l-Kk6zcg0{fٷN /cDV/Z^OI_7^Y;o={2y.#6 ,x Y׫J-c: cnD3+(׉R uh$ĊwU*59/ڤvsW7xK:|.Ύma\!Ku9s(gP^ :k 0v1ZC8",x6hF'}34qvco %1aӞytī$Iubyjm.xaz4Qf<`4QqZs^UJh益WE8jO,Ѕd/p!inؗ C| -,gf4O~"h$t sYG(LʍKוup[DWW|&I/#E% @nQEuFT'0I7 [=o )icoӳeҺsߞߊ~|c*9"`>Ԑj_TLch|DU,sU{ɈFE| 0i yvA1)>CQxNCSq;v&Θ_o[Z}?mṺͳny{t|)[irH M:՞OƇ aZFb^Z0 LhМhTU,ܞ}X9|`1_UG^?X EXxʣpKA5dy?MK($Dz ^8w/Z_ Ļ&<}!K`n_ɴxP\$!${f2 A|kizwaPK==j04=*z^OO{fTvxL.c90Y@J@Mo#{ Up}.kڎL8zg54pr7'{u8e/ Z?Y&=iψ#:ݧ+rGpw`xJ,SVv{Uu=_x$jQx4=]Ո#GqE> w%FӆmQLS$[hF\w?LTcŃ2['`L᠟.K,l>|ϊ ɛ-Mu}S-\C`4/_Jx44>/=:~D[Y̆0 z^GE C?WW>u`r:a>ƇG  u4h980LSQ@ UuQzQ2ڻd*i-Xd˖|a>u߻1@y&pmkWg,Tp%CE A lAI;UG[}%Zh;A᭬jƭ" B{&Ѓj9~ -{Fx2@y@FZԮb:vEu G 6Qi BQfZ-'E|6{y|q+pG3RR 't.4ߌ66d3./WbK7cecaWŋE"o_4, dv10M`CKk[b"|_5>3`LruR?5-'=Դq3G̓10uB{ϧ"E!6n~ͳRo~nA{= Ӑj冠%$dViO.A`Ygkǡ׿9η}*9BKf:h-Ӿ`4:S20/d6˻rB|̆ŌB$ױm XxYvni k"}isn|*IƘ匫[ 5AZ0|6LgarAZRQ #O^NMSxFj(ffFﯯQ[-4ۋ&\SKvkƉ\>r{ e7ܺiï{ہK2''%bXkvfdUo?4cF㔑uxWK A"UA:L Y0tk쳝 I }g`[*ilfN-m=K]+ju_b* '<~oZ[t+.m+vqn! ު8}`1^gE0-7;9|"e=z2je-Ѧ[nFi^hV[8JRM=\zK/M}鬂 Xl? lYT !>\jr~2rט  g[-3l= a1څDP6/uPPuz$OG-ncw:Irci(ƣ~uӌQ /> b *-rxD]Nآ(␟1P\(?fMI2!ezEGj2kQLc"s}Gz}5gQIpG!قC{r%ڧomڵKH IY1#] Lc=t~nɃ{wo۰|z0%-;\N\-(~*76s{ݜ(pp)ѶR1{)@~h!lHq|Ӟ{By >D>O.(=P)jh{*\=;>UCUu[R_&Kg)`h_< KhCX|  [0W|rQbQv:Ch~Br I/NmF8a9 ltL0e҂>r=.suS:U#^g&4ҏ3P@@)Qke_=~/.??pՂykpC<]e cyItA9 T e˴fDĠ5Zг흿VewNh>ƿ Xi6R7ָ<꾧k|tǕ` J62?5WJ1 ؃zy : xt 90\bjѪRa"ўOIU ťGK:1ӊM¥M8$HԸL-p3@$o?uؖm~G=y⟠[:_%<e'RPtSKmU|#})[gestuϧ.;ե4b O$$EEjwM^T>MYZMZc!ċaB0 KaBs_7x6+|fRՕO,1dL i?1!5<*8L6U^5(T^=)1TzXq4iqYk!PÍB2tAB)ē_Y+_5WL&(#:)UNvph8X]G^+й( =ϳSuݣ怇eu_ZVew;eM4TGyOfѶ4i Hl**a0l rTRF@8ۤ@AZ Uf`7A@IDAT|m00"˴~C #)~֡֯u2?ׂ>Bii!P^}=wIt65ЦdCSHWmdph T )1\< ݶᅌ&/ƓFb揟bZ]m.~~?sO`\,,~9p 7yM?^'X\N>ue§{^^t|[g\UQԐk4 TJQABM2^sxާVy0_5/ <󠖘B05{Q*5u!Yϋ^{|x5^Sc#4AYbz4>vKd)$){lq vGAnxQ.X,UAfc~j?aV ʏ6 E!c³kba%bmk*6))JhOnh!cPt_f?MP \[чB2sřUKxk4 гBaoÒh}\ibDLnƐ AtT_JR*=nX|W~3v&(]H@N</ieiuס> J[xT ՕO#l/ xN_v+Jzt%"pL*`pħ?^5c<=}6*7TvT?aMISL1xε!9wM?8H\qQVL\kEoCv(!a\@J96VqC<9?؅ޗJ3W;+MMLΣ1cS^: l3ZMIɄսo61yrfUp9qβ1ƣƧoUS̤unl~T\GBP.Şܵ;Az6?j+/+445aP Oɋ=GZn$]h>WOL G׹?޴=E`jz+̈_FLDQGZ[co(8 3F]$>fz*q1j/OxQĺu%א{T$Ⱥ&]*PvNpu2''[-|b172|jTFc;X(^]||hd0+3}Ahfh<몃^s5[&4| ~RL^ڗ0"a4Ư ش=nj'm|EvIQ>1_@5Jau :̳.0 -*=2Gpp85zB@A\Z eqfh`ЌYpvt,ɋ#I!9P+;)wЬAqDNPk1nq8Kx H13YLW,2W ((zf:ӥ~@&묉oZ(ELtdžuwл~Wʘ<ה|w>,&.6WyI1֠O@OtvsҲv#1M-nhkܽQ&J"\GO>'^<<Oha.x?[H>-I1qvxq\t.>.LbgwlcwZWu(^hoӝCr@8.5!AjMyhNzN,Rx Ն͛-4횜LkOآ++pL "a@ZUVH zZMm㶄%m遣=LN,Ȋ9OçKU-646II=cO.D@"PQ>+' 9 >A.z+j XՊ~zJ= (("MIvw3w3Iv7{Nvٙwgy}hwujwx_`|x&>q %Z eo3~EY?rK (* Ͷ?/݂RNiZMfD|iKa$?i oj;Ep jζZjodk_F4͜ '>$c%v`) 8%MW6kUL*.% To4٪<]E/ck]1rw1>&+a; orG7nڵ_V&P`k[k_p&Nh.F@'(Ϛ_$,&c&&!I=;]ЫS$z)":Y+`ӰyoHd4Z˧¼oRaYWſnw8Z1oǺ E՗cN.v\O~*umjSzW˝NW[F T0jT?Mkq˂Kxw MdHMz+{Zg'nܳkpńRk]F.پv[jEYW3l¼vl~Bb kC$r;Iuw }Sl*_ JyrJFNhmg2@#D  A<>y7<ɦ-!lk2{'/O>{:P$?g|\]tqâw(` Z+ʵ> --遛Gb,A 5p"7{0,ѻ\l?e,uO?雲[7ޢh2Y'oUr@0@h(SO%5U[H~ DpH["9TǼI";֪y OrBk\ D42.1<T0oa!ƍړ&'jz1 nSb v}K ]cdyso;jϊE`hşosbAKr-sM  O8/#o^,Ud5&jNlv?͒@>aG#cR_2T8_L$Z'e\yB5>O6]WyF ({h; š7 D}M!?YnqGU62T>üAN ͜W.D.s텳@u`ȜdM50 =c|`YH` < &$iZev4dꙢVu> τ xdp4oT**)1pKPuˤ⬸TR?-zvA'ɩ\q3<7rZn8>Y}­`eLly 7ĴN0.4}9/VÐᎉҟ{|Op4ƘW6pu!Ƣdnv'eVk[@ r1NƶTRWno7g~}3d%Ջ*;,[: %eܺE s>_-I>iM`+ʾ]'2o((x>^|h_9$ì+ +uRocS^T :q**m]Э ՗ m’敿!3׺F#sì}2F xXhՠbi4=lX4S/P9.)+.m[~+TaW|Gz dKşhȤz| EHQOCú7Am!!^$g[cSjG0Oa~#kDjĵ3D+l79~~c+h޴P=>W<5 u>y[eOgjPp>a+|6mY4p|լ\0#h_5JaRbWq aOg.\jHok[O 4L;i[5#%":Ac~Qnw;!=4Lq{v8~`kQ 8㜫a6dTv0nHAney$y2I2649,\7HS 9~B>6$sYdVT4wo $Anma0DVfwov/OyXmWM΅3&KUmk)!+ʒ$Wv mf3;j[/Wfѡul?ĕ8QӴl.Yt8$8M6-uӱ3]d*P1qq>~|=t  zQxܩӇZqP"EN!Փ ēvDrJ. -/\~ڛcv]cڌ귓KdM EVD7Կ՟|TX:HIJ!cxٟS+Y]~CyU9{2zf?3%BYKɾbt _W% JDhQfWk`oz>0zFK4|X++fskB>.3:_ 9v)#bQ_\3IN O|??NLҵ6.QG3rD>NAQi9>@'Aώ]'im|Y$>wfbF@'h( +B:t}3e|C`go}\tuB %ފo]^ !ӻK$L?ɻ\M$onX XM~/6I7>6F$B.iv{4?&j L}p;ͤ5iBՅ*r!4RHv_#KI<}(TrD2Z*fJ3{&ddo>/Gl,eZt:}2kE†,)4t5u#eљ @פ  Uv^'4MSڒu2-L}NK' mZDX/FΒu8-̕mNŖv6~2uqȩ/-֢L$*;]<D' ݥ,CuS9 9XP"+Ѥ0Hs?Fs8dBr)# 5eO*Ou6/Kߡaʴ`ʿ4wHd9DV)e7~;ejl?4Op }+0Lw)bNkZ6k 8ϿTv;-!\sKZoh/z:~x$dAfT K [\.֠i337t|0(Z E$tSA # B)܄ Ԅ䰡UIZre^FNiY ϝ\n9k??pbF@(FI}>+:1!@ouC:qvF "`E9"iN5f&fc~ /\;#BXQ!\#.3W \/#)h0)p?HC hE-UvIp"3#)F`F@mVR6%\#0#0N%T*8 lF@ǝ:{ .dF`0 FSe%29E6]52zaI}/S irsmrO r|v!8uέku-c R1B.s y c6CWֲuuz*t0oݟj>ZjNx7N"O_ t/ÝGK,/"TN€33@!/V*fVUр^,",5ulFz[V^~/^AkepH%Z.9Jk o{ QQ&jO6EnmaђuhYAUߡ+Q؂_NG9\xlSB~!4E_tKˬ<Lٷµ?T\s|%l#UN_u]r#5Z[vTZ+.EQNNJJD=>jY<*@?: PPT*mZ;U㨲_o^-.,.}Ӛ& ,JdopOGtd/LvC82VYQQ@Trýb3Tpslʬe" ;Z\*)>qla_ɑ*Zg0@8JQ\(4Pfb=)=%Qf7WOgCvzSO*r%.&Ì;j׏w(ZaANS*\r+:̇u|Fz_0ۥF@mQ&@&wְHꞨ 8$O Fl^,2胰 aٺ]pE٩/kp$<\(V>WyT~xSvdܠ$>g"޴7-a4u-c1Ͼcg_+*yj=)|[F˲kZ6"h*h\u]J®ç]sp#΋z\Ho&Ê{@~b0%;yZАkgJ #MN0Of} L*aRtI(ټn/ܼ7G oN[{>Oa>ΛLO׿8mY)ՒV`fs2|'\[ Bذ(<(d-Dfe%؏^wnRy(ۄ#VN hmAS\Ӈyu5d6O^P84O[و笄]Ѣ] GҘkuUIr* 5#4v6sJ'>]/IUN .] ugo{k䩾p_XRYyģ'Q4лmE@ޙӯ~JUƃH-nGz8ܼau?vLӣ6nq6k*:ߎuu%רӾE5iDgC֘Zat5 ci8jt.`4B h2TVP,uK/wlAކ+Q dj+ȍ[~nNw}e o5? 7ojv.e '95 8<&rFo©$uעH7ki/7`|C xE~=dդaRidU!ab+n߁[^|{GWjaZ?cot`Id'p˟ax>\ oڷRk<2a6N#4ZzA2@^Ku?nOo@pZ"q l8}[k-^ '\ _Y-%KNMȗ_%oGўhmN?l\PXTRsÚ^RĆ>" l{qjjwdϮɔߴY{r:筸h70k1.4_2UV|Kn 8uur_3u˖,)--`Tp6t`ɓ}74}7|tUyy))dE&Bɟї5s JGG\ .qC+y7# QpXp%T1K^}۵ҭ[|bRZlB|d)*6ke$R9s!,Jk$)[JkS4_>dw3>IzL|\SUTN|ġq K?=$ܰ͟(MsjsC޲4k5jaQocPâi@VO_(b/~ٷ}U7]v& Q"\h6Y#IQ&K2:FR~-s*;q9遈n̟SIem MV 㫓TŠ=Ζ/#e(-_9"Hd=B$p!HHR")‹0:Nyō75I̟oʟqEf%0uvu˜aַr.FPejr0Ll5Y$VI(5DxPR",^, IpF6WEirKaIQu5`+ƒĵIM0A hEGKu`NVIRZtJr)uorLkRłaI̟g 01{5s&+g[{F@w(a )P(ƴѤ$ \_`EzIM_mFT&36׮"kƵ˜OϜo1e] x7h"ERr,ָ+aBI e : \kubUÜՇRbu0[`TE #4AL̟<;<.s&ס)9Sg>̪C.`zQE{x0 )6q@$#HFVC9Wf?ZFF`Fq#̖} pF`F` ̖*7.F`F`F@ЊrG; 7F=#0Q+QJ޴% $ѻ oqVYݿߊ&Έ;_x\ Vr5XZT5W?9e`,c%v`) 8%x` ù֪*GAQT\f5LgϞwVe[W-zDJWbd!ެWnk.E„N\בF5u8=aVt׌#2du*M5wK1cSR吤̞ZK`) bʬiؼ7Gs$t|/Bl2>>길G+6uy3 g .?/-ǜ6\4ȟ2UcR-Ɵ1@T0jl,?R T$/j81IW6H9n/!ܳkpńRk]F.پvYC!s~o6[Ha\uy;q6?w\1hu׆!WP(So:ĉp*_ Jyrf`E hEd?丟?}1#-9>DMޝyl~ľWDeǟ^-Ao?M& ̛o*yPP:{?ĝGxf}V`UP`J2~bΟ52g9;#@Њr 5xI)_!e끛GLsCn. ],9h薝`nʖ+wf\c{y?%oyE壳;u}`XP7`a}k5+WcP Sy| @:]ɝ++*E05U*)bfjr=׏nYR|BoK57?Rd'Z5OI9\""h()7̵:Z\g" Wph0J_rkwLbhmV:+pr}շjšC1o_ ۝ 5Or%B$ZE$Ys\E$!Z I<͟UYe[x`@7i/*,Ħi, [%6:/G0o_(7,\h_F{\F+!נXr.^FA+,U4 krk~6f4h "%IjqG(162T>üs8q44sʃ( `|̫12g~x|_`|)`t@Њ2TQ< &B$*[+=VǓ 戶G3!)i֤{?*?Լi\xK<`KȢLQ? ;u|z E +d1 ʵ?P hKm 4_#3#Ы_-Yq-.TMW OWaaq#J.\bA%&1bQ a^AQ7ߕ72zvRfݢjdt=(]K&_!=;@ZN8#4j/Y]jZFˌCHVrM{i2|y3C.M1(Wy-\HNp| A FP 2XR ]5~Z"$H tގzCrx0[c!Ku/zhN_~ZR9QvK.zQk%jvyj)ʨNR7Yv7 qrJ:~s B;DPկչ=uS#|*w`!@0tP..\.iɉc;\qvMަfSmc }C_=+KPYO>XCuwjDkТL)(Cl;!:P9Maْ,lJyQ+$:2M(5keAn ~ZY(;&Pb! fHF hrt6i&Zr,%I1 (h/[ڴLwiȆ 0u_I eK}LѡƅC/m-IZ\#lAqEФ2̛}١:X9mOJUNmNŖV\Ye٤H:݋L,'F VC "SMc-SiV鷌h$͟yu_N(Zдs05`[#0C x+Yh"F 8(<W 0#0"E9#P9<.T>F@ìn^;cm``EY;ldF 0(<.'F-0[ja YuH@F@ˆr#@D(pb!@YfQ}n"h,!QUrڨ2QrX,Uv`%eЪy*\7~fcgh[Qdwt??Rn${k"to >_?.$Pvn?_iоUsj{iAku`9UP ic`0".K'a㞣mci8|܃{ |j++^ :ъ|j]Go6Q} 4֊J8v&dS/˩zX6XN SNFe_*"qTzchj"Q:{_Jʬ0OYpפa@!?CjR@pՔB]@q`1dkddާ]~~9#1:*8wt=d5Lu-?#0W9#>=}Vc@IDAT.2qI`E,SQ%1~0w6pbZ4:ɀ +}FU>p~ZgxIT)Za7'Z7d &ү[;xIЧk6|b3Y+_:b܆z+yԛGE_8 \ Y7 2O`;VXRE%нux""h(2i(]Q#0Ar~,iNmZgoLIJTT-!Y 9u.2}A@V{&_ q_ϋdR|-cQN<5 zwj#++yQC-cr|iד;&r᩿ASES6ʐjɵe}=D@YNF;A^+ՂyWznZh@7x~okw·@88gHԦ=Cn KCVùlN8C>ޣC&e?΢``bJw) DL*j:fe%GTzxfxo{ c16RYy<:@l@L-V9 Z^"Z1@hZQVX)ʲZqTuE"&4(.0>dtY󗊡'/^ Ӳr/btxYeeߟC'T{řR\L ZJv-( 6U ZhsAhݎ-oIOIg}]#*+*  GXkQNCk3T/@93# ]/RUZEP$kYi^qyA[?v.žj{ ,E1;\c43r:_@ytc(Gܞ7d:ٯ\ùCV4IhiXjډp$<**`JժJ.W`hFo?~ړ_L3y,~xv!MqN4}\8Ŷ< ;uA!j\Gu+e^5vN9p|u#(R'ܙվ!^^)K8C7v}En zwIW:g8E|^G|2͆ЧKIњ8d-] FZ5s6Yv:%.rL3^?twD3_\ͪ p>$x( U6~ݲ+hqYM} ncyy6-a@ve.E! [+UN}ZpG{wfF6k*BSCkLdŅpiKƯzmc|ubyTz QٗX`ř/50P &.kDy$=yuִ TTVkBu<"ΰ6W6^wç.T%\hnZ=3k{un6o6ҟAOSy]O9S\ύL[U &r]w k:6rIN_͜?\# P֑ŃRlTC5>9) 8{}pL4SE]õRI8e@+<8]ڦ6G8^<{<%)(Kт7OkyhQe H,\yjEЬE3d\{HSrbb P7Z@e2@h(Ss,uK/wl-A5Ƅ5;WcrZp#pCʲջzΛ[<^7 ^)(u4D\8"f|wp.k{U1@8B Oo] E1dƥi5Z⪪*rRFyqUjz`cI4ܐ^*`lN 6D δq`湇vo߃xbŅ.y\YƄ;ojo9Wq*Q<\e qqԽx Pަ)buܺ8#n| O2XF0#L r%ZCIAܩ)͚IVClw ,εrR @ķ?f8}[їxjELJVT?tزMfϼv}al${/7awJ= kz?P3wUrb~>0"y4kAʲ;/7%宻Z5O1Gq#@ >Y K]0'Y#IH*_ _Z޽3'#+K*z3oxIѽ\)BI߀sf5+4PZ k`*g|eE9F"L\(TeL]69wEaHk45['uہ4m¢}/"6BiTB -ٕc2r6qOD V\ywnuxX( G J2E8jl-8V/s-a>T-K-0#Ъ>V;0<30Z @Vczi82S4..zI||bҰӮ[Q&9gc8E˶a(\4E£ 4On+:$_dIiiEle\h:1HRjN<򗘘lɓt2yC'q7?oo*//%"BVWp:[׵\#=r=:T.k_F+A+d-7[(Y8Mp )kgbҫo]uC9-6!>l2d2m+jD];iHsrkayiI>s`CX!!+p d&kvʩA%sמMc"ogT%䭢uK'8phqCZ?ޙ<̣,Ӻqpm0ġX[ K1NH8[A.&5K:Aqk:_}m:c0"C2̪n;Efb 'Y>R.o-%}mBIh4 p߰ SGSU֊?xY~/'…R|fOؔBHR݂֤1,X+ >'N\qz wXzÕ̟?^uuZFˌnΦmCq I嵃u?M4JzA]Kz̗6sFA+8Pju)œ+wRpeK(fKDAD,11Ũ0p3[ ͘ظlxw@E~F-V k$)d E &Fß&PU SxFԊ(3o}rsK~(*'gN?;u~Tkk}BvOO`G xEẕVfRe"J EY)1qqyfeKN.UV}$%V߾{ϟ=uRVJ%@NJҢLB P(XgGl?gsC *x|KbOr[t1cwL6LpBOTftgazUr"F_B(SNkGI$eLV)XsNUpWvƼ3F]w/~?șlLѢT) ϥ5 <7_#и:.11&V[wb;ws1J*I]to?~mXWku{fYfOdL;Ճ@RB(ɡWO-4w?N*3nͷ_WQԅ/.>ko_9#\1H9U7fn m˺KQ9fԵπ>XG賯Yʵ9\Yxf5ewRQ aRbLkr%&3MiQբ?fN27m__zћ鿊`NV$ڤ; [8oKcbcr F@O4dю˞u]۟?SoYMW3ZguəP9 ;| # 5e,U~E2-t XqWhSlLQ0afs[\SCfKp[[Vm~|ofxվ &Ġ_+11gnD9m)Ɂ1 ȏ>ԩkEpoUoMoNzZW ~靳ŧ17DUzi<|m2-/a6 1 r:Ŧ&~4gf;b@Wbm]2X ;p:xn._ &I>n4ߨ8/u9ox:֘8G&9/#h@Њm y1R G+-3k|A-٩J3oJ+o0j -"`G:*a3>WRO6ӧo1#0 "ZC?͜E%rlWJOô#Y6ǜRJW{T購h7>wOfM4W@%9BQBNfyX|>TotBqΞ?(<7Z%IQm 5r7t˩Gq3>|`Qpբ,\29h\w'W/%%Ó^gOg0C8!^&q K#KT_rx\}~_^o0)eG_zz9}U6V,u0[D*Q#y3OEYsۻˇ;r|9'..!ils"dez8wCztb5zM !r]*}1Ƣ,zG\\r}cl|ԅ,5Xl D:ѫx`0#p8Rl+d+3wï߂AMfdJ~lnSbB3|l-x*6y?0l +m<]^8ȥ7;MC=S7`%~r<@D q>5b)ʄvKb?A{w:?/_"h+:l J?e}%],"3@cB@ E$,UDA\2TVJV*CNe4&}圆 u  >}᲼G8"]n|N8UY-#Khخ^G#XN\`?ZQDKUl,kZ#=\+݀K\fI)Y9K -#4t_*.?n2+N3-۹*++'))):K8Lpd4 vzPNNc˩:x0@(^~o[!$?gա*fh ZSmړgARtL<E%0{7h01HY&wrx\ :n÷Έ|Ơ*'Vr*x`? '7G}ZOv8?lu.F&9#i*VO;M,j5ɽm3AFj_X]Nχda |qtl!g]ql{ faHI~v ͽ/ zqu]]G`]Poczth?4 >C! t@aL+oHN#SBADF *^PUlfd$E/$Ud*߱3PYU}d_~^87⍳Kۖqw_,zYѻƏRo6@d`+*"muڵj&+v?{v/߰ )=4yKd5& wŹ֫!`@ç\ǂ '_™ѠtL4Q4U(QEॲJ 9Ɖm\BN kaDmce sp4szC_崡1b9%M0F@ VBR%Uj4H/eX)k.%; Knm#MXdeZPPXJ]Љ,L}:gCl R?z1}SGuTkMi㐒NEw9ւjr壛ټY튯}ޥ]K;C"X~u'e;]+?G8U'6._Fk_|O8gxa̘9$Dڡu\/;lg9r闌B얰xv鏜z#&, 0\#wj24 {O*. %Vl|ŵ*\lwlB]?x &r}_B+N3[ὥ`hʼnvơ;1\3)Ey6)4+F2"As gpfGN^y*ڹpUcd0]%tƏ y_Qa;4G8=+ |G3*~8<_4/7s_xgtnY}V9ݹOQHoڬg_$oo@>>ԕR!\M䴡14Pݟ2@`r r}bÅ \5ú_C;{Sny`d*ÓK3pxR<2{:/­şsWZkaUMN"4X 7|Ou%D(%I#XNkF`C@ E" 9n\2B_]8sª>¢C^9A!/pÊeOK/L2'F+4>m3IKf9oYN}És1@d";j}S/w?w^ze98ӆBkX7`;( ބwM|Oq_ rL 3-Iо8\h* zK'_%R#1Ҕמ9nzg;>|`]/REׇ`|zGV |7?x%R(э@r %nԤ( pr4)m96BI&aˆ>ʤ G|"hFAZ0 g}#Ϡ7gimݺ+J,NC1@T# KIX97#Wj8 Ng͛ħ{: "HJ G )BQ}PwEl/%q&|LNL!dS/H%𛂦t7=[X"˩{XYN{F-%*iUӟ_ɠIp,z}kRr0YAAQl)ĴN6)Ф$S^qKn7Iϝ9z|=c3罈\* iO=ts3)v,N6,N<`(gf-:k'xa\0gG}̹=(/˅"Zeq&|hOQOrx\"`@RJEE^Ui`tE݂:$,&F˩ K~ a"ApCP&Mv"et#&%cZSVENЍX(LkNr)p㏗N{J/9~{3~x~1rW!,qYD<|C3g>{ޓ4- ̙E]Ib;bi0 ! I2 Ad.ޑ,oI;lR@?LBS# s<YN=r(Ǖ06#  ލ7hYUW||֕0/wmvqKA,f#%9QJi`Ļ`Ս4TT2GAQfCZfG4@`OhPLI2?^P\9 -x5*_~.c9uO˩{\x/#D'j(QgKŀ_ϙʾQ%%wfCA=]ڶ4Q1;WCRD &;>=:q6:i?%?p㳦<|M.+l]nI>OSgλJE9$;~Aio-,`nDW3A;Fs&%6VjQTbN_ o}~bm`o> lG+Zs s 㰓-Fuj*-imXFkHF͜K7?҃x =:d:V}ĕ&/h2S W\!N2!g,oeG/,i,qu`1CuYvg{An=lj]WCl-SMF%۰)~}FǶoٱJkV.EM(QcoY15mef=ҹ{-s?XNk0r0|W[Nkj-F lQ n1$ ~`}Ѭ$G8%3#j| ϺQMEͺ#-9x18 StzvtZOcF@ЊYhiD}\,D&l$9W7Orp~bu1c4>#88kM4(fuCi+!,AC0@#B hE9 -U kr|ѣ⼣q4E(ҾӰz[xWCQ7 {qfE9VV9~>8r"f>s`"eJ}*(ǥyq 8ծywtk' rySqj|O81/u2ǝ:{ ƒS(eM6mW&`Fog׹o43S@y7Ɂ )QqNBQG ' O0[?)0{à\jrc%0RD8YLN .&ūft} x xp$lI~aXvt瓆zG7_Q9_7Сu0?ԭc\Xq^mZ+{ʧY K!E:L*HM xMxC:@8 TM$qx`\N?п[[d=~zYf,~ a9GD#RB8݄I KUUeIQiyѿw TVUti7텣q#*䖰qw2xgZѯ3N'33+š9 .ꨞ?t u"~vt:}C*Y/ GoIE4? @hqrT雬-?šgԿےAۿ=Ϛ>4߼36Z"--ʉ+srɟ37U?33_{ߗJ̃Ohz/(29F޵y5aq9[nۋO=d:,2˜^,k8ш 22O:(}luJD:@.Z;Y7o Y魵uk/˜n[5CN:v.'?2BZw+8GgC꘭u̗s8v!=ǟ{= #|ʞe'f<7jdQece(? o5OhқTKN?Z}ɯ}ΡyP;\3t˶LӳFD5sZnsz̾N6r p7ztq.ǫV|tC-]<0F#CGЛyPsh;=?.\~Mܜ7?&CN4GD?,s|A(B3$18W2%}w c,2-co bxHpwϑzi n =4ܵOy`Sڧ}x >,9s{,bZܼ˓էgvΰ^}v\Njeok>1߾vvT$oI׋]鞹봝~JZP!giTC@ tX2Y; W8>sGْf<ٗisLNҵKL0z{2+[QkA8ݞ5>C>?9ŗ2o9 ()O5Ua$L:kA!LӆT؆_EUYSzs}}͝y5ss`vr,k[L_˒2prc<ء}+Jte77H*S,[,{g:Z2k ϡJgjsiG#tC:kaQ:H!lZ0ʝ F;[Q9X֛"+qÞ}?]Fۥ0&])IkmAh9T["}/l"'=ǟR4H֡;\/v PLCמwGK@r:m! #@hqV, =vjU|yPi$Lwr7wJCd٣wg]t׎ܠddi 2-62:$eqe8E]zy~|k.E헬sآwY"sX򣞽t'zR~WAAawiY!5#}unW.Yſl۲E6틬0@\^ ogԫrҋruQI-<|q>\|sNơAr?+h<%{@38PxZ7J@ (M8 Kxj8 u{׭[k_7F 5]VVfҠ% \MGYoӁ5Pn˅z\p|BO,Yz\IV~q(I@:@e߷ =UvF~i_( ӡE^0(րO' g d㴋:(jj?2{˺ꩭy$'Δּ3JљI~sw\MeN+W^$n#7N IZ(KWgN>?Vd|̗@9l!p=r@YB@Ygd(C˲~Hb Jg]/0HM*8ٜ= 5θ*ȷ^\~ktFoqöKEμBN$@%U'mSNdӔS 5[} X:}eٜ7S{.>MjrR3[ё=;7Ot6{N5KcWl:Zu*tb|hlOVA#D=|4À%lY0@ 9嶺IgGTm+a.S|Nnsæ8?y|،P |3/d]{/a p_k7:+Ǐ?lq46O" >e_\ᵨTyH>4/U'*+4iRgIEfX YϹլ;~~oG^WO>iMg:N͕ }Unm\>y+gV JqXJ?{ƾwۘ1̤D*Y3=.%zӑ?-$!5v팪x|GWu:V~uX7n]?23~~r$D#^&s̷/C^k}gqńʓk+y$j"~S/Q3J}^tRsƞPХDŽ@> Ak2/&HΧsE@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@mˢGUL_2q{e,g&j=lQ{u@@]eߟeUrUƺk3GYk(7^J(#3Cg@l?b[ @@!ЪrIBa!@k2?yVVc^-GM;|9]/+DŜ+@@*jrIEZ;uoI UgZ|KWZ8'0f`j*w 7HkTh{>bb}.~ikY\ܼ姝_` Һ|׷ ZIDATi瞷aѳ^,y  Q -9dhuB25E@d[ FSH l=ov0#.'H>0݌QqeBj@@4P-27ItKNjg)13ZH? :>ɍ{-{{O3C,+F@h@e}qCͬ e Hzf\  69 {0'm޹KQG$}  r(˹Oa"y3qM  pHhRy$uID< }̷L  m,@YM!uʼnz+  @ EW@@h=ֳ%g@@,@܁<  zʭgK  X@yT@@r6EU"p[ UwI`92ƚ2RՔq6%  EyE]rÒgG:jW)A*u%Syr& Jv]YߟUj˒J`nQvzllioNHS[ C?kϖSs ,N;{$3Y<3mFe0qq<3Gs'6mITzӤI=mJeXuĬucEkix˰,) uxub¯T!ʾZcnn7_^\p ucէ /swT}3<7L|`]uMg#Ψ,[itjjv&=N:FZ>;?6pg9c!51/Y1bynH߿m̘MzdEՌtfڈ|m=yraX_]1*)YςdˉM-_ZcnL++[p6l&_5.i:WTM&u$lML  >hQnCk!җ Wcw.i4ƛZo=cvmAnVSwkJ#oun ctyet硒g_Ϲ}ݠ6XZwwobԹ@[nwLvM$i%^)/<pPZѝM$2.CՔ&%0V=~G&oVAwǍ7.=uS][[>$ejf W4fڻT~G-$1SjT~guW'}x2ZFv gGswn &)vs-{)LIn췮N"﬇[u)/جn٤Pk/H#S]:Fre #Պ4Y fcR_5tY_ZA4A] -u~- m.zf}~ܿ5R e/}_bW߳_O_D;ۛ.z]-J"s޹;N/W˾EnT/# p0~ʞZ9vwKr^H7g|NJ>7#Q'-waU\h`4N ʀh|dիɔdQ7vMA&<|VCI7%0X2/;OR Ni1KL4IWǫʺGVךm}SI_Z7]C6"~S|e-eKdd+5䓣ytl64  pP|Ps[PwW {W֥}cI}:z]2 K5҂yYv>!7UiZ LTZ7HsV[W$ܨ붂>~"9 3W]M0W{WnkS+M*-]H4zQc۔IbKV$emdx?vPkƎ }b&%KnzC؆ X@Z5ѹBzsLήTUɍ{tkdX?qѭ)Gj%UUtC:Dоl1.`s].琫ԥsǨUC؆ \8^̌QE2;BԮ@Y>oJR9@0ܪ3?󯖹Y¸YGr  @r(K-dG0wjxf\1i<=_7)1@@lԋ RU@oFzp?{?('Gf')w̨ :3! -YʲE2؇rw[\+2Q<3x  @L͝GdsL:Q꧎`{:iq*  @i< 6^vbQ\&7  ОZ5P]V%W]%#8sn=aE]13O08:n7'-@@&rv5FUL_mH_ ?%Pޤs]ia|x9G@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@iGUL_|q{4gC3W|7{[ޡ;XF.Ɨ'w͌ĸߵzR@@ ڤEb◜K]y:=ieeZ]NZC\cGfT2M8@@@@Gwm @@+7Wo <*LlJf#N-(@@ -??nJ:[A6;y՚# (yKK%oVt{ieen4u[&.G{6;muU׬wy?Ԇ'3BZz-oh:?6qbim>D#57VWuNzmrMNMH cl)HkSF'Ϩ,[2Ɉ+igG~AZɟJՉz~?Y=UJYEpz/F\ /H1򣣿 /XZ돖7^ʱo9Y+77\]Yig㤬c$ϻ峵,Ouw.bWXQک}g]zsytRC}}_t @ve׋W$kr% 4>'I-6tEvTɺ?N{rGF K88W5ɷ-$wEXE&nԥ6>.)ֳWO,sz]F'ARVF7%PNk4h ^b圷 =!ܿeir2Åsz=n?X %Fm,K-Od9ܳb/fϑWڶPy("a.H=G@~Ę Yn|s;P1.,ay$? Α-/.8vN|7欝?jv @ZkzSj.bqA[J{o#>$QY~mUwpl> KtuNo[]QNs1K7v쿦8^yV}2(#vl] к-3WTM&_kK4|-@~ zJ^Jp:NZgTkN˷/$M5xDU;sUrI3pl{BKKswaF:p3m^>|M#֓zɓ \}u]8g1;FZe3%?0+Yo?X[;p6l&_|>+-–D"pO"/_>Yk60iu{ *eDnIm֬YҨմ%컬"=Z/dg{=_Vw˸L70|H$[;_ȊLZ5v$eLoW$~*&)];\ՙ^a}-n:J_}lVfn@N%-ԚK:~dm(owoJ݁W̿{G>V@@hQ>){Wד)w}Ci\T}Zu]-deǟ3-頑9r/+U?1\g{LUfBwf,:1eƎWS^Fsdo%z*R b 5{)z]xK{Ub[˟ʞk> Rnr؏ @GjQkiHaDnΣGAa+o|S"LeҟL:ZbQA`~$I/ܫӺ⛫>:=i RJGg>'eܾcS>ܿ7$J*&Fץ6Pn <݋~_NJݜݖNe*W\1auwk;K7%0X.;OR Ni1KL4xUYhZO}*Kpp~wKKxl򃡲%2ĕҚFcdQyUʵV7vMA&<|~+&{lJu<Εkp3K@[ g-2@\罋`KsK 7-DҒeyҊDI59#fcCgT_س`FQ:X\9O9~ZNnbGϗKƻpe].9.)#A<_&_o_xG?˿3k&]-zL6VaY[OzԂZpi2TJ0"}]H4z'_/:s2xU21X)JK&J%~IڹQcc7ȍ$G}檯gg@ZA@n.zFV:ol{ڵ@ h ݊+ݕ~/ ~Co+8\כ;MǏ?qz%UU4ֿG}٦IX.TUX>*ā^6V^coZ5} @EsP52L9c L{E m.b_Z965/'@9킛f$*2cT# KVf'iֱ԰5Bq;ي  К9 5}h׶5+/yowt|wDs^{LR۝!}wW9y[@~=ru9D@89{ȾNGl}<ʯ 8LyGM7zQot<8eN@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@  [.eTIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/install/figures/hwreqs.svg0000664000175000017500000012130300000000000021773 0ustar00zuulzuul00000000000000 Produced by OmniGraffle 6.5.2 2016-04-26 14:57:28 +0000Canvas 1Layer 1Controller NodeCompute Node 11-2CPUBlock Storage Node 1Object Storage Node 1Object Storage Node 2Hardware RequirementsCore componentOptional component8 GBRAM100 GBStorage2-4+CPU8+ GBRAM100+ GBStorage1-2CPU4 GBRAM2NIC2NIC1NIC1NIC4+ GBRAM1-2CPU1NIC100+ GBStorage100+ GBStorage/dev/sdb/dev/sdb/dev/sdc/dev/sdb/dev/sdc1-2CPU4+ GBRAM100+ GBStorage/dev/sdc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/install/figures/network1-services.graffle0000664000175000017500000000770400000000000024674 0ustar00zuulzuul00000000000000s۶?O>;i@0ڸ+f4s E$? Y^}db v 04>$ Ĵ*^ ~|-?/7vup1 43ϷwGPmo\G ><}n<ʲыkR`}#d7Pd0{Y9fRz:pˍg;]? >cF%GQO}G!125PKkg{s^E<%1O?f^֨"dXq^|ӅI=eƯQk/tz7 gUz?52{uټSx󆤍vT=ΚB4 ZL8r6SJ@ݩ-,b;B:MvmriJ1.8uujdJ:yTRN1߮SЙ[[M\秵LfKnU , C-CҞ3]p6RQNjҭqثcȹTNV7q%VDD*+߯yzvxVBAlܽ~wݼݭHi+XVvi.w\Ɗ%gҞS.d[_׋j=jjv`zQqdᆞ(9:,[zvqI7^/kl49A%QEg5-)܅VuOA ~$f?g;]ݾG|W^2_@2;Hp;O8SPyI}‚p,RQ/i/El7a,V. p@D$/Y0T|@O0~xu7N(M/_sG#sBŸ ǹ G%ukIF)֕Y\ܓ66@yO2It.ײ%ё{s<̹S]Gh9 ʼp<0\S2 > iMAK#a;d{ A!w5|ꀌ;K`G%_= ۆItLK  TJp דtD:>J:@~(itnL 4`lmU.:F1)cks@@H爩DH"$kɇ#mP{؄X@D|:H$q;ك,,P钧ncI@sm;6n-a'p a{ʰ==9a[ɢ߹b)ae%Ao|jIdzg*G"z@o^2P2ͯiNLEҭ'I5*TC%7ݍo6)6Nlp5TD*"q+0?HNuǁ4KBT9 .F\SQ$[]@o<ۜanƶiy=~P 8 :EiܻEU\=JI%G|)"bq՞?gbm/çbk::D!pzg?O7KX޺sDbI\SFVHd${ N09\rERDzD4"1||c_)h:/ 'µ]CȷNC$>&$~DD"><\E7 6<tg t[246FBũ5kG|D>"v RPWlYt8H[`#tHZ[a'>!9rn8w4-}m|15%NC!Vp=݂fpbѵm33n n4 p[4?dܢs(˲+t\3"8)Dԣ-j=He30}#x\ĺ7xAy2;eFw fnj?١;l]?tHBÏ8y̤ރ}i ;N02j.%pPrZ7.E4q4hgUW$V3 }EjmK,AEMl!m=ss]yíc}C59g-'r?GX;n,c9CUf;̑GF[UF*ե eqgWpAMRpE`Ru.fl u AMˆxya'񧠧YjڽOLM<>0#$.+Xs6#Z^)~ܦA3Nj'ZRW+c]!%dW]dmHNbxi枵 qJ1ScBL!STexafmfuVGm[`8ά~3 .™HSY 3;/.Tu7@ı)~\Y"`1v¹#NQC-yZ` 1c5C+˘ΌÆ~RK9 ze=3L:5e5v*:=NZσZs|_:W!n&ґ7J/䋫ϯj@z gl5)%wA\4:9Dsu+•NPg*iJ.jTm$g45'C_kN$OjJ1HS]tû@]UN;yx{{ݍF=թ~IiVDt.Q{j╹K|W0Q.8kTF1>c JgNGx,$_U 4@Ӑ)hDJIg?AN [ۢ /,A\\yA%&xYv_#}OL-~IgIFvz& ]fµك1]@Vsً,x Ԡiy0(g)TU0C3O4 D< 쵞ho7R~8z{:WWgo-N.}w||.܅C~qASknsx?V4*wVR1-OZxc Z1lq\8gptӐR pˑ}JtG|>Tv<'././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/install/figures/network1-services.png0000664000175000017500000051645000000000000024055 0ustar00zuulzuul00000000000000PNG  IHDRsRGB pHYs&:4iTXtXML:com.adobe.xmp 5 2 1 2@IDATx|Eg]){E}W}$ Rr\](ڐDEWW_{{W@Per%ʳ\vwv晙3m 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&Z٪BÁaL \W P`U=dL&p~N%}|>m|=g9ci??)S,yĊc#L׼wQ;;* Njń^FGRўUƅB; P" ]ե00N~[O(O(_w]3c䲖 [[H&sLr꼧Nx CIo [pnhG(4Z:Nz(ݶHi]8e ϐ쌸vswlڎ}}[%N;?OZws}(/CpR+%dB}n٧LpW.[o*)ԃVX}Pd?tK-qtdyw+WR-sSu./pk:W(cyx@>/KwHM!-k]0o eׇoǥ3&v<lxicu$ΥKz ̝PjD%b1iS=4GY;$4i|~Ή-;y>CZ]LvP%æ>{{pSM_2@~+a;kVw8rMNs? gç% Ӕ9sPZ θ%%8I=6VhB.^Kc)|Azη0i1ʫ<055yXU̝SOS5[42riA^maCnPiqsYM (NYA5᠉xuGܲl2;Gem>&|Th_G6{Ni>ϞΎMu W KM 켳ș??KWValp{敷(C턼l/%Y#rq 1챹ʼoȗ=n]<ıGN]Hh4R Mq8t#z>āUϿň+5FDRa{Lt:gOŸaxGXjV"^PXQғU.I4!/vF\}[:Wq)@xvm{f_yM!S yy }Lg%DƢ韠7a0NM c454 *V+l~ 8G%>QaIӔv>W\Pi160gOǝsE'deqc:B1˜=/`Q<4RRChQ >9Θ[߃dYgPY@C4ވ^6[a&yTlz5FG h,AR>ϝiT73p;zesw@a?Hހ~fx/3GӴa?)yx :1=nXJ܎' Ay[0zM"!zDz\ޥg(ʺhJ2p+]iQyx + '0ǯ!.qFzHL9帡p>.0go2_"S4V.lX3x*n]=#d+U꺧^5гsozC>?v@)z r!V#h')=HSg`[OO ]yϱ}FJ5}}GV]² ﲼLLh6?g-Az`mT^]gV|TXGkMQFbռ>qB=P=HK1,@Cr?^SH{ИCHXraQlKZN B(Des|iU?'D"I}#'q커h4Z[`crC %LѿγeryPmF x VhƳzGJ @;t&N\}y4^[!Ngi]pE 27 C{΍Ms!?<&xG F(QU7 뜚nҴtTS0MMSVF~hxz=KaHߋkr4/{%K*@tp-ØaxлebrPZ#DuzOr@y*/v~m7T~5O2OޱBE%kJשÿN'wt/5J Y3NK8\=v@X^(/O3F{yW[*'7H'﹭:Y[1J_7 y,>Z:y3oM fI9 Iq w( Sը`Ўԃ,*q+ eFcY ~Y?*!y: n7ˎ@=C7= '{t8첌̨R 'mN GznO5 SUTL̃5]APS EV|τ) rc7L|-)s(:Q\8 "UNq;1{s?YL)c Nun+=nES#lk$;FT93uȌ/] y=;~h9Li,R!7S=HX?dĒ/7+A E_wN/Ww/}_59nW; n n߻T_7BZCoy6DѠ0XҲȴ+8ڼ%<:|q.H1@ٌwD*pNMLaO(QOH/\2:uܸrJn!M>Jfnv+&0cFaO@CPc=>3jht;M1ʖ#ei% :yNg@HgKT+ $9=C t3(gEwG| yS1Es"92"V6bq$е{z h䟄]]E< p%qC`A9pz㉴Qo2db9|[IL@^l]iQP] BWP}Ĵs쮱*%;mی ܘ.'sZ˾emmj Fn7\B Z;\* J{$/U}kHhpQ#Q^n*7዗\J胺 _؊캡kCqǗLMD(9L As+=2 6CdFm+``+ :?EC^ԋz:~R ]T5!'WY3f_ǓF2zw~)uF?pO * d%`IAbBS\(Nі_b+aM%;8_JcKĬG8g}";}cpT(3<ydIwc?YU+fs=L[nHAZGO[pé@ci=@AÔ9C/Z# !=yhϻkߑ0Mi}!^}P>俑H-ACK>ȨWw)qE B ؛xg(Xba.cnN8nuؼY +}~o§rx,Ec0hO{]V.Φ߷Aw/ֆ捈c`Ҕi^a?-\f]<>S6>-D{~V|93Z3VBJ`)#s/z?*iyp*=vm¨ U)"Ӽ%~w)eڕ k=J4)l<*f>_oX'hE?E*hNu')+X\3*$kguۉ8alSq!挝WKs{^"7,'b3b*~YbЮ4.G>DQ ȝJω4 ٗ&M9W{NkcJ&,ԩ?8͙qѢԾ\J<(zcz|bH(DoYUW9EpFOuJN?Ɖ(/> LR}88lV tvj LS  ͨ^FxnM>W"bD3 n ?ʽ펯@k##-E8<)!t*4 չcZ;90_ 3-NUBR&G{*x#ѳ&zw&%>Nؒd(G"^Q ے&0 CLl݋ cٍg <Ŋ`M*y*44X=xXH3лP,n}0Q)U[6WKĝ):AzDc x.ޕ1\&gԌi\ڸFp]ާw14ZZ5$4W,2(V=L@c))Ǐ k1T JEaIWCǍ]^:)(1R!w}|S%#6l/41JwzwҤe.C;lzUb(|Z-V6ZmpCv9" MKݍ Cly2Ǣ0{4veJAxN7矆Jph8 M_'|0LQy`8Ζm!ӵ.QuqRԿRt!QOECbE]sJ1 ߐ?*p>)> tnfz]5Bѻ)[5`7:uX9є/(6~.O;HS;`F<؋>7gGݞys1r0 ,L8$]KUpm ,j Jɉqʯw]&eU^֨ѬVuA2_O"JiWxkl䀅SmƁN(s\qy-+5&F`wC0@9]76oDK[PNå|9\zv \gò2F\|Z5 VRh|DjrN[1bGhߍF~ OP!mfJ7240aw^ F/ܫh^. dUSɨʺtEvbzI5;R9cK,6M/a"0q!5ћ!in mFω7'Q1ׅd EPxL Kvfِ_Gm\qb !~ Hv < yM8r4B1.bP>>8.·!HOB7O |>\HO䗺JCԀ'9o )r 'JCV +_!/P^b|;'Oo@y} %+,;ZN˩hAMp_719XW[uehå7go>4;PƶWJ{E/G בg;ko(N!3~y>7&o$sMɲ(+*Yo+C[(3֏?/ƇZcQޔ 3.hI<ђ&'Hiduf&#P/Cw8r1;3cR(Th$X%Gi޴'PVbVxwo%n $w1~P4݃vK8| "{D_߳{|X}I/Vet6J&E@&TYACF]hPaRC>%h't05 h[ԝ[Lv啾+}4tv!(7w{+L–s0[aە kS+8ճ裟r!(чCH?dik_4$~"|E:3܆S4Yϰx=vއaGxgL(̼_Leڹ0< _Tuꀜ=&Yȋws[Z׋tH a}L#Yj~DSkFSgڷZ_1=v\A9^ח5Vp.bYp5F1V0ꮡ]xjhez͛*0rx51 r MDŽ^eZ?"Z9KF@ MS>o~prO4-ОXWL<54o龮TjĪoZ+ƭ{_bh#l9OVK_-yzfvHfi&`mFF+¼0Ej*RXЗ1J XI_9{q Oej9`+1U9NLZa]]vǂ쾈614Ӥz`O--.הּDPO&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&N 0&RI@w(6VFGRv2“C~ɸcL 0&RETd9L sD( t_B珺w]7wԭI_چ/( Nn3PEcÿCוjJ?X6`L`G 82&9sXs^&KThͩhP\uxCib%M(y>g@{lj 0&Мh6o5JiڕyW]T ZovM`E-'" 0VCV&4emKxd"~FBO*u2dﮤ Ta?ΑhɜizRq8t9|Pbo  ^_*447aXk3BG q^d܇Mú,ʮy:kҲ/ >=;\AѸf EcU$8KL (AXCj6y2ţsj8}.nGe+Y4 ,"+$*n^=v E/}jEs?\ٜLYDF *1&@xd~Fl 0&P/jD Dcx^ź-+[~w_y-nj1X<~ק@Z1JaYsp?}%t̐KJE0—oݵڳtX1HoAp7U`*@gr+,51ݛ<}Dܱ&MG6bsaS&@JJP(n-oYi8s0ȧ.E~$|ãGE`ʢjE,cDb m;ƿ?ӚLI⎹ d$^Av [~C# aO2C@B|r˴u Bvvj$F+^q)uB%% 0&l4?s 0vLs!zeK6ROml ; D1<($(;N37 'R9F}׊*ɀ_u'K6k^H%bѓ5Mk%̝?vBYbO(3b5ţ,=xh0snbqw@ J =ʌo`L ;&v UԪgbv-|E 5zAw1]⾧kC1u e 5 #GeFwgHak]&Z1 ߅>)P M[-5REFWC-|>{޴l:sL 'JhH&H=V6Rϔ%2& td{U2_3`-v_ EAՖpz^6֊5@/~/,' 02(/8|.HN~[O!˺>_tI/b(W/.Ч؁3}! ڙedAz޾$U̷h|`!P=d! 0&b(3Ρ'GLg4j_qx0v#,%psūxGjeQ@ϩ15+Z ^фS}êT/}0c.mbLb1»+.0 ]({Jt|hIYVۗr5FШ6#kΕIS`L 0FU7B;eL 0=h"ofeRyVKٔxSY5_vN :”Rݒ L]\:,UVs.aT\%fj%m|ga#Ç%q<^bƕ27T"_F'2-VamBYb 6_.Ov-*VF7 ޲Z9^Ǿ,eF4Y!CNe'>nޠ|&J3`L 9</I߮،EW(z OM%^ACFeRֿjeZ/eb+v̱Gg{݋n}*Zc5`L Ce,L 0&*cg!+Ζ,[c}Bt}WXyZI{챮RqA[^瞠ؔᛤѽ"\.>xBSH4y9eL>xVHօC|ީ_.yy>k&@13`L 0&`Q=)R_p`;$^C&;G 0&к qRg`Z`L EQE͙`LE E {`&#F`L <*%zvY(<,&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0! FlMcsg 7-TVR]j;&`B(!J%gyaa"b5.m=fXB\&CEgl;6G-ȫkHxP=&MsmBkkR]gX)% Q%eL 0&Ц E臣胺#4&u]}9f\iyͷUJZ \,dؓD?3B([U~?RI`ΓM& 䝂!#R>CZh\&h?Κ*TJ]iS}5ƒ֘*-֜wfNT[ f % L^fc)l!Gn0\ NRMJ%'k\&hc0cbčy;[SkMѺҚnfneBZ!`L Sɬ@aT.磊9Tqtdeb8?)0++#086@Vc鑙34LJMWX@AJM V4ϔ%0&@Mv݂:_WTUԴLw\5vMKE#O;r$+$C5a4O eͣRUJ u^ 55xڒ.JXf5fNV5/ k<9^L5g3+YFCkhCHya^>SWgtJ*%NY&" e%6Ҷm(Jjwo"F>uU%/]m ))K,rXfS5#xe'mɚ(/pC/™iZVA0yCc`)Ɔ`wL Oc_X/ŷ3Oh[Rږȇk>mu6H uJ/EZ'VIugPjqStQ&I %*\֗>y!o{ 7G 'DΩX а@TzRb:X;P . V&uLքW n{y.;{`BdF[K ,Gy(7:QN迡=rZc,{8'䣯BXV6R WV 45/0@(O}/gq{a OidFnҰ+h7v;b' ]@#K+:Ke\OM&v3*nB|4Oh, 43@YR_ ic]]$ᒓe%N))SfL3˦-h_` E_6qdQ(QM- ҵ+m9gnNtp~pp[a/I'9f 9j&%*m;= /IΛ3e[ 2=^)9$ܗWnUT詸#.DQ⯅y̧v:Ul`XZ[P8Þ8H׵bcp/WW + Gn DhVc6 |qB,r/lu4q`tdvn*[\]ee}({(9GVntEd$\r'o3Uv PFϧ)TNkbjNGLZB}w] 8} B}/|p{D k"gŲ3{}EyմϜDSqޛ4! +9C9 ѹgOQ~0;w^OfU=?afc%=3WK..Ca@\VVRjA0w]7I^*%zOeHCFt%BoʧP &YL .f5Y>;>Pb7DCu/^x!!_󛋍rn7^ZTx0ŁD?ydsZ~eiKAϓ 9 bGe#\UE8#9b k!so)*d9^ViA@* KYx?톯M6ր$v[m5My.OMQ/wǠ Q)>H,ʫA(g0aXuShF݅(x·aF5e&VrW*_@_/2EƩ+]L0Ƶ=e iòΎHœL>;kVrcoH-sOK{,L3ZYgGS$#lXOɶC* r멈fI|VʠiYxV)s̗dWX>՟^ع2G@ՖAx-BG/^y . ^C[6מ;!X|%F2C&?J^Ig)A1xy{ ~5ޣ:a!dAa"q-< e~ޣg\/mhsi&%ŠQљlQ˹b"1FX5JSWWZ5Ye'f9{ݫ,%ϺwQ[h&3p8FT% -if/l{'ઑ!uJƒ5pZ@v o#+w`\p0:UzPO!1 Ļ M0ӴQ{r7v" {P/LAiz`|Rh( RQJ(wDq])aZ߆kic/cmȿSZ>7sr̚ G9T36`E K’eBgz@Qpl$Pwl@ݱsYU񱂅WƧ8ǀKiuf,u1N{kتOVi803I@SLEVq"2SކJGjX'儥1bL 9ޞת0=d@Ӣh92B;t6yCb"]y1KJۍ&JmI) Z0mBOuHvW 鎋Ls:qH%C2~2u*D#?Kr2 u2c*c;[{S[S;ݛv2Fnj ]&E"{Fȗ?13mOg/tcv ٠ݸ \F8>n* 2hG+:|ݖQVLzA̾/ޞ"\NS;&NTTa=r;b=,^]A5D0Q`1pO#"J_7?C7v9\ʲnEJ }$XSgR >%}gHp~Ŏ\LS2W#<p>ZӖ*DiweZ[~/ 3Xw-K(t/?%'1J]0FqvZcXÍR=~v>M;Gx^jOsjyG6j*47 9/h,S'ðf=Q+)LL(NC`Ox9_<& L!Oʚ/諈#)D@TKc{ixe!+)?WBh0d≅( M h4*yDQchV/@;"@OVS`x7գɼөD0I( !l)hXg:[⎣eHj|]3<e{ICuT{'(Oc-QG =#6;M}ҁjL>+̛%sdG]G]>`-k$ .b͇g;ͨ*GRB\h00/pO@#?%oZ0ބ\vU:M10ۥ 7 /xhfzSq4Iz3L1Sif uA' *v]2Z#w.n}w#8Xz;]׹wpݜs>goC" Zc\+0-UPon{Ԫxɼ-gyNT&_ xʝmNqΖs~FZchgc~Ӻhi<~Ds>|F;.n4dx'XnPn~/?%BO2n3淽".0RweYKCAf| 4 ,PF3 7He^փ#Lӣ_5w5OJkSk F{\UmƼ=7YYMdҔg2M1Mɑe3&@8GG2t*\|w L0ZEꛨy? C&Ϫ:V4E`:(dSWc t;l=84=(oMbe#yf 0&@$GJC[}ѧ#YPSB~g)[)N)"'~VZ Χ#]ȗvI QP6nZ橈`%U?)DD6-%VdP¼5?ne7w;0Bif o bXJ 4)DVn(S)g n9.XifMI@u0*5/,siH6HT)R"!ք\ira} ]2:Xݻf>%ifrS[.ZӴ6`.OE08bh1N9llak`Ueo~_\cWO ؏ |BR_]зM3sC=fec&J2ʚ`z˜@ p>MV""fas˩ 3:lߡbˌ鞦姄 W/_#>]~\餫W7[4GJҨ%4mHemZ*5dp>.gםh⨃ٿD#j >h`ٙ~oEo3M]4[푧㧍 )QL+˚?7qbE}0rxPr g?b\*(-X .h*1 Rփ^Fٳl7r- ,H;!?WE5ZHq< vB{Wjꅂ'hsgNE==BmI$"#Bz۫.7\~ P46[m ((( )-w٭>}M-#|8֨P3 +M?DfnZMow1+7^q<=DEMJRpN.9E٭>pCʏV0(|Z.{PB=20ݛ/ 5:Jqu}q8n=4pBGפhǰ {a*~¯Y:K'd́P(0(%` Xe@Z|:fFt("_<'BR(4!g^s{Ѧ\ygA$N PZN,mWOדU884nLR?{N2?`wO5j ݘ VD`])WgF~BGS|R۲>V5.sw=פ: LJc=!RxΦ!Lѡ[PvHvP!|(<{@?KhM cY=.dQ(|[_o';~MU[ʧDW4U4|ӐWOݳ'ni{)7![CU]s2 h_(M)m)53%C}eiD yZŖճg);bm@zODyQa %2Up4AECLkh?fؼ.oLt()>+ txZK"S~e[p? ;Qۮ E;75 =@OڎI[ʧDUilK3gT> 8eEۡ AweҠP6ߘЧ죸hPRG{P3:wyx m'iSdkDZ*[H`ɕn3{]!eG1*.[ޮt֤62@~Eg|A@_;8J>O{-O}i=`+u69#3YkljLu`ZVN9#MS`{-B'iJCd }gC-SZXs!tF7j@*pwA'JXweA'4n@D6P7yč ?Q?Jݷ޵*ꌢ+l p~XH{kEyϳ̇_ʃpmf <=kJ>-ԳJO>`cq`4nO(oFlDִҼѮSM >v4G~XC]Qe:.U?dG<rនG]<ղ;"9Ν;u:wt`UjGєq ,\cG?4G۽TlH nYR)m"/P8 D4M㷡w-c˅AƸXO_ koQbގ^)9fǰF@[ɧFi0գbΒ%4uoڛMsp]%WBacKm9^>]5Q!6:phiܘ'?}= [gf]SU m(;}O>wxS1KQEX HIPj^|ZPNr קDdw {5PJE-emFΨL >Uw7zGwO=Lx:<̺զ ؞PIEh2lbȨl#h!b[8Y8h m!ˆ4łS\`gEG@!YivCj [ЙhZZA"[K͐,G]֮$>gqƑPZ\x!W^_,me_^!Aة@ P71`!gG˧PƚJʰ@2U|x#p.( ֘@R-?`FM3•:|@aq ?sݻ}0? }^G[n ޏs(my!:|~ |jop`:1bP`DGR&[0M" {S*I!e`5zيUb[i׫8!{G:Uq_=w9apm J\}{_Lx/gDc)|1G۸UP"aM~_ើ@i_VR2"-:GŦ%̿;EKO[r; ZJ}O'ߺ>Oi"ƭ:^;u03:7eY%2".>C<Ҿ_n>l!`㥟[^?CwbϿXWw%C|rҷQv萦*ʆl)x;{r|z`"4h-5x/G#͹X]ز:HNm9ߜы_K(!nʩ RdRз3lRr<"qI|D߃PsV%:LdC<:(/n[-IĻE"{ ?֋ċȾ |?!W P+B)2II қr<}9smDrB0ŭeIWoamC#z~3uТQ!#O66ғǟ,1a"5=C?sA l?R4nYw4X%]J!˗F?8wp·'E7e~b}aeqgW9|z,7cv#yB(S@pC !Na|f*;LPry݀81U߸t*> `xO%dD!ElDNpI%vH$?Avʸ:pBuj5Y O)TNxE1E-d;\!l8!'EEN-BDmLpmuњŊWEb04Ԯn !|Utl^_}kW٦vCbƽu#:BeE⇡n-k.<#Uڨ2GAӻC3rp]ڨ9K70׹ص5sb Ц]/<T[\M&B:K>tPILWvDC$C.qB,<4SzZ6XoE;'5?.4P%}C]O56b%E2׮QU`q~@R$\I&hhڳ =~;gAa(˧p=3iqQ0" MQ_wHS!^{lto4`?Bv=SDȶHcGnwT.2 4wiil D., %[5ǣE _׊Fu"$].B =蹇i,{ہb!CcF-GsҌtW470``@'nwbvs+"A<.@|4$@nirO-JXY(4+cg*5?0  (Si߼̿DS`? W$MآپֳdbEVNi%3:KK@s 2&=5=z+\.'N%ha}:Y0nnrJX--셰@-ADxEѴ^t{BT"@`@id(ÁcEKhBqd"id`cj@x}CTp-# B)hRP%c|[ h7 ~R˥Gi/F (2҄hCUily>Ps)##}w}{ @3{Fj~q4U5!nL:39vXtPPh1{k16'HM5RAz#lsDo}Ѩ8(5k՚ I i.F S˲sh%΀Ep76܌`z 2bP"5lfBM%͕*(M]*cUJj:d@I *!RӄiѺ]"|p:) #CD3#d("M Bd hbAp}>_tnPU?+oX{@6\ ;!$=%z> z? ovo#Qjl:x4ںFT_P83X8wF]pq"94s!覮&X݈*" + 5F4db *xZ6Z=N 9ٛo8WhiFt%U"{lt<;,h'e σ8)^@I >3BUK'5e F˩Ш\mp (\Q #/]IWo3ԙ7hG0iΫE \[QU: j 00_+K/{ KذLzdFa+GUȕ.={GƯa!͏<)נ )0m8OG\MVtBת^vo9QhlUbEYhhF ]P^N&釱n!1oV)0W)\*p9IXwi >!A2Tt+jg1o&XMW`/s{wj TP\ɾ|J5-}x.hq_-8;phfg2~?W]<0QCшfa`0[Ls0 w#o1Ź8y AGrH tV2V)02$o ywd!(͏†8[PBT+iX 6K7SN}'$]ъLV 0,i Φ,g9t@ plX4Z b9#pÔNKctF(ǁZmT{lwDAU^҆b&9'WiFhasۼ~MIw?#PAs:p ˠ΀- vcVclLNͫ`>%:~s ]&KPQm%$  ? ̯CZHgRAM[H\q{0НhhͭhN({}8 2nܫ#7Qb'n(&ln}ƒoF=Y&4 ֦ "08ru{oE$MMҠ92v|+Z#x FGp2W6\A&f\ wǦjsYY7gAsU~l>ˋ}:HRhtFݻT (4yY;̵34J&LGD!BC^ yi5a4mn;vЊ(oD♂Fs3JKw hC&}l4)dNB((&cȲ_'tn$@W׊#AQ /֑(u>x6~(Oq ` ݝ`^]dn;G!>DE['FDnC #Ѽt[8 =:SވJFg=T[sRd1z%)Pe3g6p^DЄQ 9_i89>T-&ڲqTUs>)H8S:$fw[O8*[l̃lK:VL]Z[|MjBly9kQlt^'3\.=Gwޛh_py P yyq!$t*3@O],Uo6g( :e1[3t8-[;pqϋ#PҦ߀p5}QO"6dD0c?VVn})rb46<41@/ϾJ3d2HMȷHzGW>qd(94xh^#8kB}:7E=ls{c|I1%6 C|0tuu\QQWa c=`o`E|ՁVv(N;d% xİR/O w@9u}E@x)&"O,dڤmFAF NsǙzx{G(\E$-fx9EFbTW|Jn\7##{ F,_^m ڽ PUE E)lazA~OaY&?%,8Ќx&$`7nk0p-3 3jKJ5,sS^?M{uhԇm+M`ЩtϧMRIas_;cPj㮖D[nzj\(DUVES ^r*A&(bDє0r|zWN@ztjv>L<,9/#z+a5e%+1WbCV*#H|{eB,0,A 挌kjٯg.J{6Rd'}|:wvAu{>d.2oJлaeZ\RW'=oӟ;E~KPpȌ>}Fz8 :3AoޢSCذ3X&shΕܕЈё}\YQW!0&;NX1kru]LW=g~1z!hĝԑmo<_jOI/x>"‘h0-+V)aC/(qvȩ&|4 #t߱-ȨʥX&O :u|[tǏ60|voXf1˷ WӮ!rK-qn a6"hs#r%Ѡvul1izzjֻxndAO"xdt&c=c_4 4[C<fhLm1"ش22V.X'`$6+F$plcH23qor,vF+_1;sI"T&4!=ڈ5u8֪=F Zq̯2oDi:4kk7/,w ˫K1Ţĥ+cbGf/Zf.81>/Sj=݅fL|c/t$lX%jP,,sD kwA?-dݞ2 ӯsK1w};x`]t*vqE'n\b8ls9MalYܣz=49x7!eHtj1Ź7$8l0Юi]LR0v0+)7 bDMĄ;Iab-GLqXh"J0a:5z Q_Zŕk"%-]:N[f51nxOI=U/$b|L*؊vS"!ca?zb㞣qB9 jXշ}~b|w3w'nau5' gcڣFf젮5-U*IxAv*b }x*T;?Af(ltn@շw[3^:pq'گm͢$$C{-ƟcpН{Vl/]x)}(!PW.uW4-58#F>8OZ,~[~04?Nԕ'VR#’qKԽjx_$3Ɨs8^2zGa #}gf &H fv]nj$ 2[6+ևp+^`zUF/=mq0eWu˱@Ф^D ti%jC Uyʌ^t`IɝڀYm߼$b!h娡 Av=G-XֻWf>jLDJjhB9zѮGWhNMZA;0[Bhn*nDUY~d5TŽPyMA ~(vlY_Wi;R\@6࿅^hӸ.5cg=BçV."LWZ%71҇<"|jǼcҬjVfyjM (;xsC* [Så7 ]) 31'f{Ps)SV@(^5suN?g B ێ?,VϢQ&T x[>8 ~C}-}R8ESm2xL&w4SPs(^,ZK$&][EYeVvn 6B6P0:=~$NfR(JP^GU `/䢧}q98b^5CrҤa {ons+Mbgr$`3KA%UAL=c}:8}ewbxg<}G303鈛O& PP{م+ד-@>}N ~ՠ }hlaŦ\ӟ#4vmQ\ (g_e?LFŐ@hH!fQ 9}z L -cAL 5jt †'%5F?@C(} bˎ]z;jeXUt&]H`raJ%;ƕ@IDAT%TfT =Ђs??/- H C-2BKd P˅ISph5Z5#7w.8rc˗IQ.4τd>iLQO)7v<>wfssΔ{jSI>ka8>#=270t:aǦٹ˿4[s'#I$Ɵ f*/07c ViȈ(jˍ*DFƌ?ugP3y"t v4&6dîEm/O^>c21=5v7K̿fG[sRx)8&39iU>8n:B9'<VF;7or7#|D>XNC"1>~GxDX1+cl/>Xn7 cvZ"à΂AޟT 0V9ǤTR5r?b1]Jzݳ:+`'ޯN%S i. 2ZkҀ% կb S+j^hN$chmkMZ8=skmuH'{/gw0O&m)肓yU[vta[ ~Z<>f8@8%=ڊAXI3/Ecf 77,3׮:(aC-TtX?I#aC+PLFI4ú t̘>eM;S Ƣ^Wm9 /ReTтl޽Ч A Բ08 4[2 ҢAmYF9kq0 1 N~H)Щɒ oX.NAIf gf5: #No\"o+hhѾ|ut}pU}뽇M u^d]V@ Jа-L'r&ͣs#b _-jo >OGaē'>Ƿz٨%>Ʒ :< LxΟI4w]Ty2pj?ю1@HzEN='ĻVP@@-MbޢIje`aqo():u%̬5 Y7)P11X[/>>V J }05_Vn\ի}Be *"}\_yX?lR=yCդ$42Ϭ;grܕW'cI0̹j/ oAۗ\CtдN⵫W3s9+6GGj0u{<&˕`3$Gk/ow[ݜ>\p;SAfK]ݹߦo| ފuc 8|ؽi'))콆bHe>΁]e@N]kԮ{V,}.69d|մ-fǩqU-RpΠvC^Zj< \!.\*G7tJPwܸ|%awFķCE4*-%#@שQsEmԳ޷=U*RZ & 4O#kXOѸK Qb-3Jկq', arM82$ɭ퟽yB5L6k>E2Kۼb6l5y(GY{ a|?]y`5s_z6+ vVlnY|>o5Զu2y繱2ɒ {dD Dv+dt- xILWԢA^9N8o༹w'L֏W>>ee;̫}MagN6͐in|kRqm;5MedX"<>L( "椏~={ՅN 6aJ6?d6Sf֭ccx2vPW?_@ ptc~ZlOQ~JP dPFAaT\&-9摟׫IOeEw8(#x~7uڻdl;8Zaex-4zhzW%CN\Qj>b&MW{(#23"nbsE)l(f3Rعπ3.7Oxŕhca +^Ψý`/eT!::e5YCfqb)aCQi5d& J#>(Rs{ۋs٪ M gZԮY=Q8?isy˵_\)Op`Js5PyܲRH-gV#|rkk4-|PLvF`(Q"a6H49biQUH{曊AɏfohXL;#nqؼLFLCl3H% ʩPu`6S:^ ϼA)}vԐ 3hD %oFvьf^Ð2Vl{d9SB%Yp (xł+j jZ\i}F.v)Ze΂q0aǻ6?~ زzy-b6,;;k 0bP 3:µL 3a#` SNŮڰ䏥iiW"y9w#à ҁ-`R ~­r8hڨ@dbjj6"p2qWb[Y0&r̸?c78Pؠ& BT?i%`;X@a?wM;bOJ[`mlY6[86mm=Q`1ѩ|Mm{h%evh[Cvм /XDОme1*W)8PhD'prTQ>\? 5l5]۶ Ҳ?1%KMb?jBnp)esgk ش^fBBV 7#ΐ9##=rիI'۽_#"@FB9Ja,6p!}5HTnp[LܬN-CτWk׿L|7TLj 719HXu Y"񟝝q=ڕ)N>n.dqޠA9eFŹ:f8.s82[H` KQP&T,eY~:]`„eM9iTTrc z#h(P؎v h͹hT>h80 0m?1ĺ`\Ȼ" MA\a&9ti))p9Dkh;)>, mO>h30P 0B/na/ىRCFf #v׶]bqLFqB sfu cM [2d}aV% J >X3 ڹA !;Ķ8wHaA/MH^gsQmڭ7qP†:xsn)Z @ [\&͗{Z4[:jvAL6~\][V{L*P{b>(ݪ a{ B D f_Z$L%e7KmqN3 1SJ0|DXp>RgǏ E"QPB;8>|LCL~ZǢ'4?1W ZDIS5)քJ!m.ؕ? ?.$M'b*U %] |ӱկgo2η*}2CS^ԯUMG ͢hBbRh .@Yӣ-O$Xi;!@ Z6 Ǟ{~e=sYN=C`Ї]E5"BiB;*aCSp<ǽ'mUs^SQoQW 08jDt}%ۧv:)敫iRqsIq`4f ڰ0b^aE (^b)B #UԤm 00Pl 3}bW=%l(S1a+h( Ɛ7QXQ0+N =Um8.P(C JS9):uQ=!~ j9~nyJ)܁0J}0N[Upa5ridmG s#69"nsi]=װv ᬥj2cz&u-gԳhR}:ڐlsX_ fxf}:4q oW>Y&U M6QgL2Խ(`K+E_]&ԼGM ./>YE1 Z}T8 152Ȭq^]羨`GQ1EodTռbEs{ҧEܣ6c Wx*ǬCՃCls06? G1n5U"cEͶui:ؖÿ| 5l\(pBJNL׶y~3nZ2/,Uz<-lS~8Si?Lc 2f j^v9m9ulw~x])!C]S{c JL L}'#r|=I^+hh 9?-Wcj2~8*o\y#;ICՕ<7&i$gI~`f41]ڣ7Y5{|VcXϗѐEn1uM\"u$}eʛVIDOUs]FM%ĀEX̔ ̖~~~?{}Vq҇Gg9{ԗw>>ycwȝlIFyb0g ~丱erk!|Bmb&zf|M\ Eu*ܬ]"i5wfIm4dn/ n |Pׄ'743+E@ 骞dN;玃ۯ {}ʩOgS^ j`:H//2BrǧOQ73ƏYDaԯOg~ ӟ2gEL@30t$3'_I]L?3PKW|N}>%nD5c^?O6i#м鍧w˸A L<t9:m2K7FGdۯɈkRzPLT^_Ša%}=C//'2.%_N5 |%Ւyz,=4YY,2KkFI3%i(GDʳHjyyV[qx6ycaEfee=CFVgT"У߼s>#lNg>yz=-ljVx%d8kd,/k,X_ _}^V09vԢ=L i/̕@h/ydTӗoNA~ =`thr--|Jz>2! p9sXKz@?>K*6 yS:'`صNdE_!p+Bb_F\PptjAK FCcrsI"Z%*Yd3"l]'$hRd,ŐNvY.83 C8/\KNщ4zB #whϺʕ?BO+] nq[klM`Ɨu"* j4JRʃ1"XCf327 {"7t[66gg_ϒbW/nb6_ںvE,҄Z,r>&%_-F㣟QtkD D#DE \*|[w 0gc`ȓg. pXNtW,>@>U=!9H8,^o',Aa5i:QoA0Bڙ>}MG `VRl+W+iBԢa:ew1;-)z;A*[0[z751LVx~wgn paFl:}oW?=>OCq]U`dBqC2K1a2Lӯe mkz鄩ӣ-O$lŹ$gM;v_~l6V}@hHm?|_ݰ +Lu,Ǹlߒc1N|de$i!|xJ JظnႥ欬difa@Ǘ|5b~?o*NI oMiTЩ3^%D*BVAC[1?ڲ>.*3&s%ǽ>ת\{sn˶m\w3*bTtS/ &|<\<-x 41(L-- GX, 1VLHKK=fw4`p=uk18޴i4mfL2Ad"ө )q  B;:=`v+qx;f?N >D|l h=977{~fK1؇T ~+8t9~KC_z|=2r\Fa;֭qd_ż5C"xr\9y2j8i0T/#~xNK,8p ͫg s[&l#F$Ûr]%;p~!b~rѮY=ڍv,f-(MLwO{HrPD-ZGm药TURQjXK ޮ^EP?t>w_{m˾cpF2N-h/88zzz}tT.';IׯJqMNI>>JԨRU7deg#B$㬘Kq .X7-OTpzhF0seb75itLFI݂UXOnRKfmi:EƱ}?-y6ͧ+aCi6e$<=Щ; 2 jaAdJdsں@}E+zņ~ZHfTE\=,7`Rd4''"+!nsh)vsbGGC-XS3 ȸ~:wxi0QBN(7g}^J!˗PmAfR)lpRB9AAwÜeSwRfIWos詯3g؟OJ%#GN_ÀCy%iX L!i{\L<ةWP>K4%,0?Æ2Y)׭fG̚MxM o^РS7:u!L!JLjX[/;DEFɝXN jU?L,LrZ`q'}dFzQ.9%MP2W{XEUÐm2\DACQ`;D_;(]Na6+F#1 ?\KDx12$SI"voXA= hEjazwt)r%e[D܉3",$XthK;ՙt3prB0M-hTE/DIɐ8q/4\;4+8^t .zM\ѪQmq5 2(ȾZILWdzuj"g/^PAF Ϊbu""~U玃ɻA %~5,~\ Q2,⾡X-ViRG,Ev@8\{NMO>i'{?aJIKnЌfpkջ 8QKaWo 1q۠!}zY )aŠ X\23-τ}Mq6,ciZdj 9+N4Rǖ̻j0.C0TR.N9DV,zwl!wa־c$j4WkWIlhׁBFD#2u7yu({2R`0f"VHo95Z4/<8D ?1OsBlUQ; &Mwun!c4ߙObT)1w3ɏb(uaصX󐘷r5 WiVX,c ?;(@OHFEb2Y5DIk}(M= cZ-M۵>A O݂w 3iBу[F/ЇSJAUj6M-ĨwS{2j5(LPpA:ǙBzE:6Tcָ%F>n Vl{D_G;-Z[[,#b6fUH H\7:}C`t(P#-t Ma Uit?v:׸MGj[/mOcandT]oj,Q ?iWO G&=:=j($+-ݸǶ)<(/4EffjKU?IeD ^KMH0a2%@D;؞^qn8 })n&sO-Usq#x!1S t@pK} ΀з$ٜE:7s=7_(ϻlM0G`|ẇBmR}8ՃVSo]h~E ƯPaq)p Zk65ǓV`0WF~N%lh?j`4G -;nRkAQ}JfYFߡ34R)`=y66h%8iWa#B):aT]uѡ֞&26{j@t+qR|*Wh6~b %!һcSLMIwDڡ&kY 2F%g.t& aCMj/3*աfpQ@U /Vc O2cH5K* #۔6[X|ݶivTNZc$jfb)b$* Ee[{}"Df\ >E"@Q9p^aep?ݏ:8W Z&Uc2\ Wi4:ǑǐTi0W׹E0G-]NN|>mҖJ''O[fL҇\`42d<cϞ?i:sGzJ-;4ybpWd!jt8zgg8G&$=:VB~pݾ(- %+~lIa0ז:=878 (HsZMXa߆@!XM)E-I4n >CD4FrRm[,{oއ.b]b<60"D|8Bҕ0y]j^`QsXovN ;h}!81̵5To9%hF J{/ 2rwgu1Wa4rHgageY;Qj+xyѹߠ&[V.I@}<)^S\p4ŭfЬf>Bs?C'?v?ڿ|>h0$hF{)\QCi/ Q]YGA΄ "j7rlUw@Gj+EanjPHCrЗ^{)!{yy8oL vF(5G#_=yVyv/ֹe}RA[K_i?r8@w@>+̕[ӎ2S0[N zgOުI1 U]:5[Q:*LlK[&Щɒ!-m?~| {%3Kc_.ZU_Wn'iE?zoqOJ-T\&E¹K'YyQiX~P4SL{^v'];An\Lo٦8c?Ci$"sѴ/CjRlǻψfnQ})?L?>5(;),"= ?B͈Ǥ~{t$K3'_I Rb6~g~N:Mm i`Nu>z_F"9\3-<>DVcguk)%۾yCmvy4%dKOԱE1U{jC46!3υ=t0b\Q^uĎRp^>l8rsZU$l ja)0?.Et-(Eǽ0y_`-fs-h, a}S&!wk딂 H9|Zlw&2}6;G^ȗoM]e0ԗG/ﺤSw ?\nr8hŨ@0Pb,5p5-oB>/3=BfzL8Js-2s>!o~m]G^(|H^-(/ύ''$~Gyw)@{ nQݼ3?3ފ^|Q<"LyL\ P!r%"NK/xa޵ kN}e Oѩ S Yl'3V:x+W`@j:^9Dk؇Z`ޚ aBE_*=`D:{X`wˢ*ΔF._^kPQA Ɗ)s:LlɈ} T׋ڹǾ'5M;h`0]0 Ԣ|ydPc 0yZ?n@1m=Щ 0 "y, dFZ㦋Ix 0#Va+x] 6Ő>?L3a ,ݸW&dVIC&i~Vo} |!99˼?4:|r4g'[sVX9xfeg9Ü0tf3҅-ns2>X-s? '\\7 hXۤ=G-`q'1.477 rJӭbө'/R<sFƵ]0\*=WCK0UfXfܨ##{JH*Xvrd#n?WUzd. NJЍ| yJG^e_9?K0ZcY ̶K*]0[@nDz\)tf Fb)p^s%h(4xt3'7a8bE CL|h ;9K2g V!mRQB% DI rA]d+*JegbA\wۼ< oKA(*T/c(T=BQsEGfn >z w5ah4`]GFulX=NOz_N5鍷]†/]GPc`4cO9KqT(>>Яu fsM0?#hVѠvui6UÇ%لL䌴Bz#1G!vx06d'"cxDi><763!J 2-y14@.0qN&1<\FUi6lW7#тĴ'F/Hz]n$*/dқ aKz|/ro֌lٻĥ+cbD"~:J`( >gTGl^Ֆ٫ߙ&4=kZ Ge>qef%W!CG*{\\G} [1SrBnq\k5 Su^ .bcŷ.l[շBIU5'2a3װR< E3D>DinWm >KsHH%!J#1w';gtɣa xF}u,Feoh#F>8OZtZFkzN%lufԽjx_̘6>~UDnQKd} &dfa+Z،s>rڹ#S=0t=hEZUZ5"ÜB eo{HxkjHztIL"Jkk.ȳU(,N:=)o]fT?{&;Cl˥B{a=IK6ʨ4>>i|| h.CcGqQ18Hzm'] ө䁳xyK +huhYM{65c4{| sz{|gK?;֧mbp5nxzR4l @IDATw1.Ŭ])*U `A{mzQ^`~̫z(EZl7][ ދG ~h4W7qfTCeniR:woN,a)X#_`B}4N͙ S(K*5^a$ʳX1:ш+'K-tnHٶݡΎtI Ӟ-=o{4YvH@a93\T  `͊WS`uΞ Kpt8)2qƉT{k"(U( z@A=')te co9w\Kjɼ~/GPP*2ƾ(h(~nyto6"*W 5|RR$^@k81}D;xNFo/~Eˑl.Ҏؓb#|")<-X}GʄA NM .YHj$V*"fŖ؂aڴqeƨDĠu4asHŐ U}@oѰKGeA%H}Џ٫[i1[v##3##!%=D_Jބ4ٞZFhTY0_:텂*aA΀U tЏ2S*[}zRuXojk,;Bnd.IE+r9rLJx1@DzU+4 ,W (c SE2"m-xNPV'ܘGӪE}eFҴYݿ5x:yp 0zy=]2A nτ/$#J\-#EY~DzXv ,$JGU\|7#`ҽmcѧS YY`o$SؐM)0_6hʴpX~># 1" W9/_ο!A}"!%>/}D~iL(L[îY<~N$8cl:q$n=4 XOƄ?i8-7̹LjƼ*[4gҦ*թ޻ QjKFՊWf&."un!̱aj.kg4cT3^H->!Z/_/a|r+Yo{EzY$3fDh66; &ݡuv,Yկ%PQ ;b ( ڋ#hL, #1!RkHb%p^2w~ը2}0+= .Jg=<" 4 h61gҷCg` >a&MnE(vwԝ?-+WlٱӅBT;`|2̵aQ||>7{pOɕTD7g[wĞl_ %hpo^۴eѪ689{VN_J} s89i}FK| 09C?11J)'Q/-]ڶĝUa1XkW⒤=K 0¹b ls׵`c(c&}p^&~xN# V .Qko6sb@V6 ԂPQ|c1w""UV{Zgbc ҃7W߀`5Q2pjfjm5@8L<trQ::iw["V(4.*&^ S󞣀:,w t@wUڄQJv40RQ@K.mR=Ţu!+; Mm] Std$;"ݹe4c_lBGm6PSB?ǣ/Z-S|pWo<^-""œnXpØ]Nc`輸 |?6 qP~p-dkܩ03 ii){m|z^ yޫ۲r|M{0eߩ\H҄VA$ڵI`hF}ǞmfTُks%dxAHI'$܎hἥ.uֶ]fά~Q4 0EtmU!m{>ޅiGa;e )(l+M]a*#E2@x "9<6Fc]~ u&Odĵp?6Өl 䈦uшHPG>×r-2$( Agi^&ûj(u$|DU͟W BsU:YȜ^`/vZpAN%nsQW'?9PP@VVV~}[mAp0Y^1d Zs;1ɇ\3?áght;Ic_ |Z"] K]Me>&.\w n^nG C҇cK#Gѝ`L`rM{jP^2V&]~x멻eY3f/ct&,Vs? apuBX"!ƀ){d+@ =~3".ψ߅5p$Ok)l{Y As[ %.]uR}cOWvLaoP.Z0~x|vI)':-o%P𓙕mJHI3aL u0dge^Ũp)\PРwIPPfT4KW(#`ڕQm:hМ`TCa*TČz,3"#+ĬLG&]i٢ii)B%m (@ y?yO`l UWGYFܯ9F(6=" )a6wH?Cº@-2_I (?4 X,Eࠒ]:@3w&ay5'ދ +!<& ]&ENo)ۊ]w le.Cf!9q^Ҥm55hR!>$<Ӕ}Nrb"o(*VwQ[|⾄쬬djĊSMÆ2zfFRZJSG;#\# d$i&̨8 #mpWcd_>7iѨV-BJW(X6_7^$rq1;J{EڠAw =X>;KZwNqdw ۘ#h"IjIJ~)̲ue#1 P!GäQ}EhP:j >q7'Tm~q:x|51KmlW4zR CAPZ !qS$Y|\3?w}2 <ȜPwn-qΝhC %lk-I.%(3c؃#TI#Z~JؒF(|*Ag{Z/<:U=;?NY U&]e`}:8Hz3@y&O͇j)0A2`XhA܉Hkg.&†Byi/Z1 & .ā+Ai*`bz=jY1d0Z\Y,w74J H2 C 5߳63U@Ȉ`0S.h(yYc6HJJQ4b>PD άO? ĉ~_5>tp+0_M{p Mn7px6_8E'F0hԻ .#?Oч W6Fp34) a(mHOhI80nCp,Þ6]EFiX+†Qڢ~u82/(#;hD*,c@ǀwcO.%ES:(h(a}AF%w8r5~_G0(C1|΃c(T^҈N0:xq9 KE'QQ?@R:87YΪ3kk)lqHe%Ծ_QбP9ٮXfe6ѷCSD 1n-{zz^׼:BEIwCx1_0}gI1>@Fel)짧Ia}"@&C8QKQYJP̬O'Us1pcɬ5Eb&̣'kUzX/U Zmjv|QD/Nǐ4gS.-}z# }ߔcŷ_=160@3&&,#|ڰrX2hZZ*?Q7vH].s(Gr>y|nfow4V{ -8(>x|@& a)4[nХݰ6+ps_VnNMDń=EYpv,\K {Z†B2d@Be,C=>eY-eaė‰l ?qxpXbtB (ZPEm2"=Q Y\\];7\k48ЖsRPk9t!Cah2L+~I=hZGa7]XkI̅kbx}M5-ڰGv2~`]*hُ"Vk2o,d%lEiD@V2&#qlL\@%`'D48܇ >$pogLn9.ch\ 1AC>pѷj\NipNM`7̊|uSzx _WL_9;Xߵ<"#sOcI藡C1a`G!R9i+Y5Dv4RPj¦S<4L"g_!Kҁbr8`yV fZL~]]V:| !%d[69$zm"~ZEjXCxMEgVВl{>R&7ARߩZU*@#XJ_#R2$&КS'b6"# mLp\h0y d.u i1!)͔g4ޚ8mF<~\d"#S&3Bu(2^=Q'Kge'L@PIXDԬR1t]lwֱE3_;Q 2_`3:DK;̘s=1mFogMH[t,:x5Bօ!qLbO Tw(m\ J>[G?o{C?QKJIjw{)SEkJCU\i(Y2@imJ*ɶ<~NFL9S[<-(acG qOjĊ!.qEΙ& ]Fbr?ow.$c>صe?7? [՛իn"[6n::x?0),cLr䄯ލ\. > 4zŽMј3TujW1ui@thV7f(:1``>fHGߙ˯?r>s0A{ڧHnYOy2g`^vMkKS.p?dp$i3Hs4#ơJ2VFj_vB%?>ΎR{k h@A^RIGg %) kT;:%.?좉=:{ ݮImYERhsI:a³Axz; 8|Ǽo'H8 ";oٖ*g/&+o4?A(p84x@i'L9 I\?+;8+mFa--sk -v$$_gg?P~mt@ ]Se@P94"rLI0 L4GѤxbokO>'ԶXK*@rX(4! @ iVn|5)Sa >Hw^d.m Oҩ' 㯾MP"1 WF yiPn7m?x S" _IL:}ڠ;^fV:+wgGC`)FBV`/_g/{oD|I<:SFZL5oo yOtpm&A{ʖ1P0DseJDI *I0鏺X&0rI'z_?K'0m"XK* `Zм>@H2 K(( !M=7=s GtR99/ߚ(͢ػV}.h87*Ec1oSӇOt[24#CF*Uxj[Ӵ4xCBCxph'7̱[I8iD <hV:maRECJв /KV k3Uʋ0?-!ͅƖGE܉x5ʶ1J22өWd@ZmR+xF}G;6H\)Q9 _O`%!9Uز_\MLѧ>"&Jq62՟;2V2Պ0L.\RI}+QӐ$GC7*[c:' >Dl1 :}cL2i5k ACy }l޽LG sgoO_h}ȡ~FSBnm pah8{詤>@Gl%$;"gJ I̗(\hxqzV6EoZRI˶hdLKж K^8 TLlh|Xi7YAŇ| G"ǹ^6γuWGU5`t::ZNdfXwݟ7qa.lw bsb!#}W]G(}ٿ3`'Ǖ{"O[>wIDx'M}/|_ :jq\7r]@A Df [/=rSSkLʼn<-a]l#&c[q=wɨ-QjE1aDOQLx5YF-._'He^+X70kmJ`R V>)M_fB0?ݬq;M8_|Z 6f5 S>ZqN5ȸ.D&C+pBkEit֌-2rO,Qpbc' "̨Hyn] F#CƵĜ%DXLea~JbHYIg\g CŽCՂl@1_GWI屑H|J0%ţHd ,v~G;jU#bqTHI Р.;>R0s$(bGȾL'Ic0,l?(;DC+b.x"(=ۈA`k {3f!3 QA/x`/R2 -R7dqú &}"pL$D'lP0?ib̐}kҍ12+l*@#=l73&e՟.FGaڵq9A4΢eb |qnmO Ev$OW1QP~[Z$ӣb53Smc'uA{(5]LfӲERlSH¡ i@So[]5$Ldt1>4oJW<';Ƅ$[3NhHӸpn{I3Y' @-I22brZ%7_{h(ֈ:U]%x(P@XfNͱ^ɑ΋}"dՠR}+΂ d"&bJCeCP0v91u2 NSдPa"&R@v˾rAC}}n;ѤvYPiZ7\ kwl 훊Y0ksE155u#4(6bS j=!,O?pdfea>ԜaSZlRj:B 1:HtV(a?'6{G-HKcm\U}vPj0Y&3#s ءd!xS$:z9'cjM(xR(&з H_+bKOY, 5wxKL=s8wfx;8Z&H30?VP4ɶ*St:Q.Sw DjUe9dEA 4=Y F6;7>wM&@!h+ <~N"YNre&!97E ܴN5mƹf jVIkAm r ͻ2A+?|߾} RVW&eI\gCJZ7c1 X>4o{w5)EԂ4,߷?ah6tk޲bokOy-_KJEnqMj$B-"* O(hƣ.hƑyà gW\JP== jZŀ#jpG\_P` A0mQR€C+j-ՠ`A)uP+F^:zI.I;|kʶ]†0չ(m߽50@&AуbC[ |lgqTeu섯r@ڽg%TV׫^\$]yh6,g X\4ϲ&u5Ha 2iGT>7yS !\A )`FoK b:ŤQ}--}x.(Dh.(h0`op|+OV ߛVae'u^Js-򴹠Muok Q;}&ަԜfkҪ~LmQ}8@F4H "v/KnyB18/?^u?Bp&dt: =gk8p1OKeiXpe7QQ} 24i >dd]Zf}fHnTK~Z B߿o{8Jgb{3:χ X:+3supg_о[_‡78nRCU)0:~K7~TM-imq&P; @KfsQy_|ma9B' V+6Z:@++s5<diZ'o`Fg p1 >F4*\OUg5W"ŕ:żiqKAC Snl^u!1Вx ,O{сFt|`rJkͨ^ED.j7N}"Bɤm9g:b6҇X̷V ٓ30Lkw]e2KMwEIƞ-{{/>2D ?޻~D^ E99Mh"~3i>zQ!uM/d?0AůC6 #۶IWljU[ex_KZ [ڂgn`i Ͼ:fS7cw@8xBU]\$} k}+!#;YkWǙoOB8P-tnY_PNq*&-T<@ɈX2 xamyh6.eL˾>#&P@ow3%襁0+q f8td Hv)eo cw*)lq6c"{ A̵a6jJG^MlVB RkE KaSel>w͌[̭#R} TH,>pAHֻۣXN}w4N}RbGt "J%w:m&a4Wx``: 1#ԯQIm0m&NT%X,&na4z[c&)d:n'iE0E8vr'zWI3j6{j#irpSwטe2Sh־5B^8&X_H擹uÓ|A#1`)h-EIDeEWf1 g2Mj+7{Alk+k)hv4=k[t ?DysmUiZ_ϋzM3}4ܶz9EF /OaC ^l@o8:YroѶ#?"M |whW-3FFFk \6>"007L՛̧9ƹy}6X.<6ǁ^^MQ$ڌ(h}-6M>t ByF;fZadypXUsnNpfwh@'a%1$B+4!X3u-UQa4"$CIPnh;L)SEX4Ђ#vJ"ܳ^ջ|ҩ׀dט6sÓVpԧCSQ0/8Mݑ6N1oVyg5M+7Wޘۯsd"Ǟ ~\Qڱofaҵ;qHnF) L1wC Zni([Ʋg1I A!&blkIiұ6E9h:hN(W&D /9[A\r#g/?b{S-1WծI-6屈_FU8t0B֜^0"0':Ҿū~3?ߢ& (p)(O~1A Z=iS^2<hNpJA}ӺwuK7hҸ-2m?x S" _I@w4H%.y:,,8#zby~_޴+g_ȯTѩH$*H_;ƌ-ψ/HөY91pV9s`:e0t{ v5FNň33o(̤!jP@sTUiT !J론mZb[>Pq y,tz5eapi4Z#f޷]GN_}cgM's!H6N"Q;JG`[{}.ԾX rhi](jc!Ӯv]y A%Jzcw2肆/ mxr\9~~}~ Cq9Z=?j$ O>:>rWIn~N[5 /~f Cdr=B> kR&JǥkQc&94Ij>nɬE2eCBXl[);(_173J<=_QCy&[B  C>93FxAj˜Af S8 b/4R`[ =vN`A^X,+_'F(hk 1c(T=yEV ^,Yع#-\ A:r@}Fj!Is<#>BוMWںd.2oja͂('w7PlT\^j4\6o4V+ KY{hvc;DžąՈ~_?Bb^w6lu83ԡ4{q|s 5)`hZ,gdxW3LE{O?ȫB\>_ќ>~Ю`IbX)ճԬ<94[!A7b^-hS{1ɺ]N0 ,˶ۿdR;r!>ַOF~m[xb:8 3Y-^7TT-UVV>԰Ă;†sZiOjӏy7%}w5]bرoRH=}4tlѦS 3xߗ4l+R|ܫGCh |=/6?~ixd(px#:N9I5}7 Lk^{ z9ݰ*pL1'[S+>'lZBZAC[>#0UJZL4oA`uM2jBD w),l[bmC=1@歔Bs"P Ӳ-үh @M7F7+dh)C_Ds$=09c{ThJSItMEY~Gդ3'B15 GThH'"oWO"{.gG&Tlw Db{b-H<2`xhp %6Ed}4 `"j5yhaS&D59{柍{m A*Ə ʲ v|onM˰)V Lj{6!?;>sF+ꤠ!8)3E&:qydS<6f4<00$p<|NM447۷B2z|0:D*ۆGqN:휀EBkr0!;$辅kZ|$y~;#Dqú$dON˟D>JɄRl4hE9{ 5HPyͥB^P蝃2u d|fX e3|ǀ:'?E,X|*Oa%0CC2ͱ"v܆x|StAH .l{T!TsJL򿿭Ƌ7 e{ۆbYG,{u8x/M@K`c&c=紴yVՊ C@n t󙈹7ճ 78W+m/b׉76xd"ǟPl?ky]⒃zh9i6_`r];" xta$w #'Ȅ8(%-=y`Z%j.@4{Lf \* FPF`rRzЯSv-!ҏo/~^%[6@uWk`j3ocv&,jFJ9wvj!ѥU#>9Ԑ0h@ǖ$ -S}Q!=re`]о(`0 FBEGhev"8c $Rg&_=iι-{ wb>iVi5*W GÁW]S]8f&M6~E-FcAe 'u31Q3 9O;,[_ Çy%گuJA-1Sy7 jEȸest"}h;&( MRxOwA?D3OX Cڡ y;n״*`1Y({1BM$TPy@_/ÀPP&@qFVS_=X#(ėņP;_/R)(m۟kw-̟öYդN7ejJ e]xg(.E +!GƒL\_@@2A\n;i-uWdXsS4߳uqj4e\w Q|,Pǜc==M8%M5 &,:m`i.|>M %rCMGzD3c^ xWRC0s0wͮʖ)a#JE^sQeHINW*8xaeKG3m~Z .4&{|+\@ e=wDHXy y ”j.lA;BK` >CC'˅u"z!{FxPI7iupP9C-[&uIq# _@څ _oFiѩ;]x_p:w'*MgAXG/m0  =F)4C^^ "Ra+6m8xa@*z#j޶q|UU?,#Eb5mZM{XS$"sQ_bPV͝L.Gx_Wn&lC$37e,% }G"fAMO>/5ǪH{ISˏ͜>muGV+N Zuo^ q@7֨e`t=nTI6 Qj@zAq6o݁͞03-'E3eI9K6A(acäPú ^QaRC А`a ZxeqX4ݢ >秖|xVM-Z2M&ɨdpv?wA.95[ scGC+kO`hT"O|1`&y;YCt}2OFC&Z01?60`ޖ%jR $^tE5`.W8`Ve݅Sr8y4n# f(Ln|)8%0k}?07h$P(gDb$E tp51VKAC;yh^J|(ZFx:蝂69B c4_ LAj| S H !wiVacޢM@SeFuB&u a4Pm;]lH:m~V Zws>l4i m:c`ts@ ZOȝV SpUs*9 +w k50_}8#g P ̭s4>8~cuEdK7%}Vv rnS!N("rwJڿ~נU+ @]<0ccbI Im9Jj-܇ktF5.U=§Åe8gʝ!i˥M~O/@AҸmC"_^PFyqgj[-˱Pϸ#7rQ,MJCx{LXj}CZ4 )G2_\2Vk>kimg@ HAқꏭ+rMƜFJL\Ym.'`SK][2ذ'8+ X{#d~7 hRȄe`BVLK&~+ T@uA ?C;+$3 a1_NeY2 Π@ C"-'=1ww]>pR1p><+( -U2<  Z?hlO"ThiWGk܌#mՠ c7801 PT(c F`|ip^z6`hfqm0 R7"MFⵙ; rͧeW|Y Z:Hℂ?5WRƼ *à/[ghwj$wƞ@³R!hN7(S65pfLh]ʹK3&SSF|kJΞ8'zí4ztݷ6Z4*,ЮP6٫ד.:%)-{9xlg*љ00IU{,.XumS:p)ԭ s̛:@=hJ+xXX{hD-hZA܉RP`Xet>s1A+Pԙ}\9)#= mMWO_FԡR"ewɴфh%{ u䆎y7JT4-g%t8[A+ThyDO[\Y>;,A.0 hb/ VYHGIԴT,kk8 B՛F"JڇWԲQ+ ҏ\QͪZ#LzԨѬkn2Ze;_U gHpY(Ep #*9R@u ~wϤ=0w/?z_A hU%QXM99Q-X*aC lNތE ~g _;j0r/v@+xׂ2k6 rٌgLaAФNUBbLj>(6]U&6w n00nKB3V3$]qYk\jwE}䗉;%N^3@FA@aX*xl}-]>׽3ٜ) P;}jޥ#)5MrVT0~;o oA,Z l4 h^H?:z30k9.hCZ$2a=1\}ߡ3;bS:8I\FPOdTQ[1چnFdO[v'p?(T]aMS$wn1m܏(Z_^Lȁ}=F+љ3$|盿$>T.<{h/|GFk@6 5rsqT 謑U! B曛4T>TiVD1;&zWiaB' Kt{0=͸j.t S0ߍGN-PEڠ2sisYͫ.U=)rzǜӗoMٖԷ򛳦OͳX7Np0 *57~zrpFJBA:hO)j4e\ql$.%5$j> I4*66sG~'=z|D@VVV~}[mA hӣ?rp`gQEr]ҪZh+Tu3/~ںYæ?<-mpU-PޞLʍ@Ehnڤ#:QAjH- RoY'=AIj6_R\v7ge^TT/nմ~ G锛FFsg ;;G20i~ 4S`4q+ZP֞6KZsk 1,}JG"&kPk=Qp|@$[\@8~8Wq /aŚ {NJd`tk> aA54eN|ChAZv_pXҤm55hR!>$<_Ӕ}Nrb"ߑxV ;b(\)67LBvVVz^y2zyb)&a_y=3#)-%#*(hA*}9Ǒsypc@oxM @=\&O+`NѸU lG9muAsஔZȵdٱG,HP6 %@"DVPؒ) sٕE:ՆZ ]v8g;bβRNwnn^ɹC+lk t1: 9g]:\M}UukKM@Z…v-  ksV1RkA@1jwJFAi5lN@-FшTVc.9)gr>NOz(t(P ^>Ǯ E&up.QB;8>|O:oeӧ{MTjKNvN͜V/z a OgE.' 6Z-k%lp!@ Z:ń+fA1{ZA1 <>ǘԻ :}XEOTc "PP QUs'L"wMzIU,e|.N܁{5op*\c@ǀ30 S+aC߬yg] @>.8O.r6k.,\d(pX 1#YPcHƀR(p|atnp'}(o8XnP(C HdK7Eߘ]◠ƑG톚7;G&y1MZ2ޅ+I2- X[{GW nGy7}6:VA}eZUlFP+}_y ?h+m<7BNtE_<)ljYa&NʼnPǏ2,,ƼCU%H(aXQmm~Z{Wef{?~`g%*"/\ s̯|D氟e!0S.wSśĞçEJZ*>{Ѯi"׭*`~]thVWݾj~Vg†ZT1ZB"a@;Y)&L+UNEb{ΑN(g{ת}ڳԵ^Ng[}U㧘uO 6[ -^xP&#GzZ2n2_V UKj:ϵnǀ<|vŞ}E ftU'?`5 T*A[k[V\† ?iAP U eT}0 "M 锟ΘMO&cmA>NnB+Š%y`5-l`{wJY&uzQB=3|mS'NuhVxhP/!9U_Mĝ!rzh6Ocs:;Y4Oᜯγo07D%hКի.?_ *vŝL"طC3nΒMF0q%!EluHtjQO=sQL9;?,jTzow}gG4eV)FoڎQ:7Eԫ^ɳ)UuǨBE&lDF~x6dLRg ~Shp\h0QábIی Ii ?,8oM6#jǏkYd4+2X>YW x*Y JIX`ԬR1t:*[:ulQN@2_p~V_v sǁ~rB'xCԨiVu+[Z5)l?(Z6)/+Jb ]/l$fg(!\th^W>u^!* 8QObrva7BԩVISˁ][K $BB,EĈ>6KawVHLM-U**Hs4+[:H VQ3DUy4hNH m#cu1ѿKK:6(68xb=7]I:a|LJXj.L1'[SOVTȻƀ9羂0UҩCzĪKѶZǀ22{cU}^ջNd> 6ozɽMј0bҺw8CvoH"C/oS6:v;MGN_9QhVy~+)[f@f?s\pаVUeiRaa|3",4D,ТA +61 JBآ| 2g0 [44i}sr23}N)<`?]UjǨUJ2 - @ӟ,5tZy(W&D4'fun)!cQ}ERsV @ Z#_KYmC]|†f Je};qI2!JU$ԑ\[ً =/~OQ {Ӈ}S7ЇxbSBC)ר>E˝;]r8$_JX ذ%?W^|9zF浄T^I F w9)❯|gwk- m L`ʋ1*!)ՔuU.?R=YӧhIbV|dy)ݴn5椑0#AU:$Pѱ8>Q`1 |AfA:/l;(ͥ(t2 r e/SFqWe5jp;s1gL"G.=3\"'Ά_}9 G1]! h nK0ʴ'L4|N$Ju[ x0T +Dﻳ!nRqK:AG:_鵠{s&yNuKjɄx3+,I3;R%Eyh6힛*%_GMlX+wӋxqv!Z(Ɩ4j iWS"(JG߀R%K@^9}Qѿs }Dj:’ج}vqU7Fw KBR+My uXawj㵂\OF[5~uMK4\:Q[{c2-i'6{Ӈ="ϊ@Ā_fᑓF6dh7]иEhB 2Oi63?C[Mq]gp:1iC>c8{X#N_ISMsӫV(+Kgop8K:2rUk_ɪPoHfuz2v!i9-sQq?s(M²h== t~aWRžgI (gm[!-m#,at!"ѤJl-͐OW((GSt"g{|?.VlհEZhSѨ'4%J|E)j4|e0v+ KY{hvdKáGo9K躘C" ů/765b|6jQ\s 4}6tǀDԛ?G[ؠ_!Cv}xv|Žܥsr NX@;fF l|o<Wl wiG<6Jd{;`oLؾ~U,ڪ lzq%Щ.x5WMji^ޥIs.A0#0B?\ Чⶠ#LHN[p/L)%t;r=)X7G^by;~tunEVIf{B+QӐ2GR嶞8KkG4,.0Ql#U<v>8mԬ. =q Yq|t]cqasbXN}#O3Elӯ70 i!@`, ]|u|͋tc |><1((4{".jS|l 3@D% Q4Vk>k.-uMM9L}O 4<9||hf-iN׭uCC iBqC捾L܌i~Mks]Ѩw Y „ɛ'[f0昗 J9%2VE҇!"">xh4[=[[T  737|`P.4q=zh4v=JRƛ^t\u!{Dr՟{PWqX-֔(dfLKKNs;`lmbqEr2l}ri@7 )Iv24bM4:kԉ<&-.)\ 5!̣(3JmWz.ǜcR6?J;B~m]չhmGdO_EȘoc਀)0` WGԾ1۵} va"[4\^聘?0u&KD|O+D܁{2҄7tJA7fsw?Χ .p $]pX=bET@PB  $zBHOv&wlv7mw9MfvΝ;瞹{Z%dj6nAOsNBRx2>Ǭs/$`e$H&l؉ɽm+ڴ7%\5z2Y2tx}jrrEJ$hn.<%Z'[K/_D,'6U|oA\TD"Fl':4+1q'3W0.-+ZXn7bמO3Pdb˛2RS<_1mN-_d8)aIݼUXוƹrǹWMt͊2Ѡ5`~]%x%ɸc9҄;1Z:Ug~c{~͎ՄoGB1R&r}2:qD{ct}&>DZQpz/D_;~*ģ9Nd49Mma9}YԴwSeV+&L33a܀K:~۪eSR'J2p֘8w Nc5m{C{:+cǂġ&?oo3Hf#-̡@/0𓜖aY/뗾)l{j3(dh4γ:N %;Ŭi;Q`&b`F n춋8JtcqE MYNқ"ՈzW `3sQ; @>tB '.˕AJ!q`"1tM¦XL֊w(O[4SMNڷ%µ&Z7m!\CM1b@ *pƼ`4 ׆cV]eC&0D?&?zHy&]v)}{T/'!,@ٻ̇ 3ܼx|KAU6F-c$3L@N8E)@d& P6CM|~r$#xu1ص7BCO -@J@ ]:`RX:~Lȉ8Ҫh״.1i Yf5هGC{YhiLfQT3cĭyO[ev*}yDO釾@8ӿԒ>$S *^l 7(#z#l#P.#Yʭ"p$/\();Z( Q [i79u-+559Z8Iv6i۷R`o/OvK@VvVzzjjbW1*&284XjBJauWGҩ55n7.] ۆzMZԬ۴isLɀjG3/BL3?'%&Op4~Cn@ XLe$'%$ĝ>yxo) 2X)iHrE&|4&|д<'飪/}>Pc{$ (&{[],@{DMX9sA \JH"3$.3r➫:59@ 'bO9؃Pŏ$ڂ Ww7_~{cuX1j=4@:{FH!flw NA-UjG{j3.++[~ch-hkk 0E1 5vtC$3CE'vZD݃:)aHfdb=;^Zk.ygdA|/%ptkl?Q؎5ås?ԷzT3[/gGg; yJ rs2`rJXk O]t9ڣ4kUHdÑ&SNӂN֠ajr%ؙ"9uaM Kiz-f 7L T]NLSs++Q!IOMJc=GLb\k@IDATYCHiAbNK h+|BBU`u\P)Gc/ A+ǗCƜ[aCa@at󨠋"|h&G2xd8cNZ0p1iɏ9ǜA>xs<^'}ߜrZpeqi.IF"6 ɔ>t !Ʃ䦀Chn)4}¼{ qۼQ%i}b 2FjI~Jy~x][FWϋgGG?Q)O&Z!C(tALw3Ib#bѺ]f2RLԷc3)U,KӪ2ˢxhdinB2*#_:n$c08 ='[2_. ZC璀eVvK2Uw܀>aW1 CxLp9}<6W XA'ԹV ( Z2{ET -~9mZ4wL6MrG8 ZN tǰ\2YSp"VӬfi\yOߨmsAT@fUW׊:ja3[B]FĂ!J2EWcqumU|$Y#,<=nO[WA&#}(LA5uŰX f{2q܈ ~8RPJPLdr G,š[Ϝ H@2I_ćG/CS(1g6HC }4PAǤoE#L:mi)"Zj*8IRЬ~ذ+ZT#[Z#\9c9ioL4GoXTlߑӢY`e!0 T,RGXDᴘ{Rb:ce, QG9 KG%p?i 1z{-Z%zU茶M J`֖!s#Nz<(:?j`7iIC) 5&>)3ֻΌJ}ab4ƂS͗@AC!>Ѩj+jJ0AaEJt^쮾J6YۏAdxq0llf}!_ HrʘGTW;zV6ɝ '{2\eԮ+FAn#7!7bܸWw2cV6f (]S2JP2MU4kU9}=7EFIVUk†_-+ ?0$σĸ"̾MN:˾G|(81ϝ>Q^W^R 4)!5Ur\%ҼB7g0J΅KA02XUA:"FczpрPrmKNF 𝡆BEUq|夙&f@$NFt^f Ԏ#1U᥉#,FX36#iq@|q|B7M8/rh!>5ENjDʭz5Ři-^E{/[{v0 3r;>^SPS .i7UFgy:֖S9đ‰vo ?2/y}Zt0b~S;n a#k>I5Pq]+V(;\dܡMцTv;*xd-Y *}úi䣉sY`F†<שCFPS\V0J,myk zIcnғe;N#mTX.>k>f>kcqq,h)isݪ8dI,Oǣ/vl4B F93,`Y[aCi5cW7dJYTzjgNxQ 57ri~_pN$,)\d?hp[3&o9a: Ā=#V`hJ`%&[ߙWI$cx]7sf+͡y MٳСgKM-wac=Aa  s-3U1"HupC 1Z))%h2͚7y25UQ*hct0@3x|^ˉIdv0aN7>xh%Õ00q#kԹgIy?g X X`MBgȀ|GNIK w<^ߋώ{WBh>,\S !GN!P N1U~8W5¸Y 砶Mֆ4x#19ouө.lWCAN1PbR@d\G^yYOrW 0;+5K0] RG27UWf8] VWcs^gf`\ǹ~)垝8bvnY&:,14GtUiËWցNN}r)ZU9]t 1P k/ʽzٛdx[YxK+&4y![,lrُ/O٩y=G>eA'/HZ-)6-+w+Ӂ;"GuMP4q] N e̷;uaUa;E}˙C:b,b[}d0NjյɱdOsl4?r*́azlL9{^1d /L@N*39vW``鼡=k C:mӤg=;vԘ)ou3*+xUvLziukT-QtFދg1'(8t"N:cTOu{&S͈ Z",IɩbޢbE sCtܝ4oL, ٰ"D%h!uWU FjRD {v|>\nm,ʚCa޽e62Pa)y999̼l9 JjV4aTO+91m _ù s bÆ%XF+5Z7{K Ljǎ};[4 Xa#Cq4j09DN?+CYƧH=E+$ZsG>|g/V* )3L҅["Dr ʼqo7cMA50<2o Fo# |ޭVh$gl@! 0 3Uk="S쇘S!]=1rj. 3e[2Bbp~+\ PѯSw}PI N\SW6*?P=4L~F`va\A_) D .2x[FTf X0kUنy?ͦ>^F<-IcV`)f+F"q$&#ɚway=6."1 횆ldҲ3}ڿSsb_TYY˷ jS 2Iۏ+#m>硞H 7_Nr6jv:!#EE44,ɩbTvЉX\H5Dp4%hK6GĈXiB%'J}}D3CdȐ+i lD tW\ONpsRf֮!R"ش'F2=6Yi/hZY ѫ|dvmEA!xRJBˀ"Cxġ94CD*4;nO-a&x1X!"w*Q 5( yaCG]kB<{G bx6vУ@=}fIӣuIJW"855Y 4]zsJ߹̀wb.b}dѱY=q' ʜ0i﷐b{tP9ŭ̥4۷R)6Egٺ>b)l|M9t+-):Xѷ#LOx^[>>w ?^I-ntAkRGL͉YA"V/ CbʸRƀ F2Vѱy]u&=[!D(}Rd{ RRcg/zC`5qT/IayN}odxp0!;ًCЗv0d#6Y y+6VlqRLV_, A5)>#3[/ޢ~jaa·Sf/ ~>Hw>ɸB3  QDW0#yyH6SC,th"smtpVmu{RkDƕPZe0Lj:7EZZCpsZw:.:7pu($w7wBD&p,: {]7s0t뚝4S RHa[>tCAX!R|d 1`SsۋE^/'^G@ n(A? LC ;g(2u_l+*.WCx[sϾrJܫd?i_ ?F=ܱN#RLgѭ=4Jzpj†7ң˕J;4W0ĞS@2gWag.n( vnQ?-83 MldJcPd!_1\e5,{/F.~^.!p ԙX, )mꬅx6;^Ъx[j[Ș}`&*W!T W@EpG5=W؍tUڨ ju@C %qjdwA Rd4EML3Z6AWj@VvJczM5%&!եK؅czvm)}VZPfiߝ}ۋXx NɭۤFZJM:W8qH+.4h BLݱF>`B HquTI]k^"4X;V)PĄu@ GS"MMfͫŁUń<n\Y?LڻL1?7kS~|Y.:QV_j>r.> PTyfT&c22or C5W;iB`vi.㣥fNo{XtXб'-5!d^-ʵ0 5%=/ +&8'j+̝rܦ<$PϥegL?MW9aOi axj*T4ys:f7|?hfCY+=״SKC74,6FS3 Y[h9sìP~S@eA8lRKx䊚= 0+֩C{7H߇ CQx BP@M}p3U8YS@Q+w4ɹ i+cjr2w'ҰSTP>0 3rO\$r@K2k>+{a>Ld~&`g[ F~e' $+M_\wfRe+NX]֣ ëܥ3J(i(p@b:0$mF:e`A|Vȸ,@I}pFm5F.XnQ^j$~00y^ GIm'J ey> `c84ih&km{o9ZaƂrpPՂ!7}~ԪiAiɬ[[Ε;Oiڦ}՘=qxnЕ)Y$U^97/\-W|"~{[X<ǔ4X*{h/=.^F'MLLnj[$ tZ;[i/BN{󗉁]ZB;[/<'/ejX ,†OŊ,N[hiclx[3&\!lX*tm9KK{}do<&[F"-{BLb`L&tiYO8x Z-dŨ#L|l-|G<(s}Fj&H;>n#U؟Ln0w;px0R#gE= >6)ymED5@&ݴSlK&92Rsmӄo;vX"#j`[0̂ܣv!foΚ0@Iw0p5Z X7+춅4 aHU5H胓zR >>c@ cEzH1 Yo:k?|yN4'5) -QE +K[ SN2D(pXоix`FE*F)$Z ]ԌC0駥=oUjѱkO9 AztG&OEv˕+&n25,mB4Xu x_*1!pt0_-وU` =6i`u ]NBqJrR|5KN-_ a*'hmLc#D}C#PH5r'̶8?oo}hNm$(tj95"h흜q<8yn جk7pg}2?{:?(Gs8N ᛱ ΰ4Mf2XWNr`@nh*6>>ն3J̉X:#I2E}I9(v Z&Njpt%p[k;eK UPQ*wfX;}̍ÇB*yʎVFRZ= o2??^#!}JAA"(>X@[c[~Zg/13`F,cT _^M )f}C?{vOP`iJa Gѥ3L<*W9*X`v}:61O39}L'pX0v`g-ҷBÔ 6h&Fh^GA\{K'}éc1[QV0Gqv}d(9r+K h0ݰ@3Т sđFвkq,  =+z(UZPmݳ]$.?4燖hDQj[$bO WiPs[x~F \ipL0 i}'}ˋg"6!+!aƆ2Z#M]Z! 44gi A4eRw(lFY@ҸEiwvl)ɝ$H:jNܸ](d*mYuмж ( >4šs[˹cc®MGrlGi D WS3WMxf]Ԣiͥ=~p?'Pw[EN6ErvT2Z26 4fJf9J!(*'ͧ80𱡉[cWժq+CB{u@h^2dͼ`e( ӼU3o6Ws)krҎ{K~PY>ҹ S٩ڎq_}KAC zmwtUѳؓ'DԪ0ip,C:m f5c:Cf(\3楚y/0WOeT⥤WU/&25OHj :< _\D.34ԼR*J 7:cLh:/V!e 0T*䉌#+2 i <j$]!,CS2ޅ<4*DvW]fY&He俗0r~qUha_+zX%),F$W!-KKG>{w%t[0-M; S`1t ѷR]^HAv0j1 Mw6% c¨皽Y(ESBgw4}1k<YA f%=00TyQ PG_Mىa-Y*aC lN6A hRŭ="7JRg✅?&[ ;Q`"{pssݭ^줍Ƭp!GaC \5IkL9.3`M9y^^yxd6[Vs@6Cf_Go,)):^5 0PpAiFJJ};zRX'ܔ@=`ئ!b}O-^j# 6~j}vV ۹'We N#s]f8w&[@1SH1P|QhN i CfǼ@s}x4`lfn1/މ :jګʹni¹ϴ}/q839w7%5T*JMZj?i|ґIun j&SN) 4rX^oOmO:o@.Qg_L^[ y> Z2 اXٿJ78~T, zt KH:tp2&j" l0gr@!2Rvz7HcĐ `Prt['㇢޴-#s_.l(ՙR30 (YVeoH{A1hB2dyh3\rMf'-W #{%v: aEC'VN|Z &S†l&>[AmRc~e*-'` CQ10w[8pG\.^8ekaeNLoS.}&Rܧy@isq!BA70͈ԫ}Ue꜌p:=$94 u-\!f ye/6orT0 N@"P@z%ݧ2Cn"UZgd,~Wngo4JpHa +K>߯ u dh [򲐇 Ԛ< _ʨBtuRĖm$@}<7%l(3*E:}ZϹ npXň76Yvqo`jv"@9)0a57E>/޼%<jvA8tkpmέ €*l-1b%-#lN*f}Cxmkз2d5 ,1&d́Ct0ctioe<(J` GӰ5H ֚e !u.Ms,V9euAZfڹamXF FsS% &f\-DHlc -3ĬL؇A݀>L:~۪eSRsE:tndA:$b> HJn%p[FꋒGjbЯXT׀0&;d4etZOzf)z }"ǏK߼7s  6hIaCM8ԡ XfgyF[+,c-1=kadvh6~m 0ͼ^Yb[n_WF@2ƈhi:6w{i"+M1Kڣ͝=s9 MpCvHө0#Awa^n7ݾf9Zś 2 TpU"pd\Ͽfm;6 iܴ_Py^yⶀ,驩ׯ_9}$&*&28Wd(P1]i5HZC؁ApӚSQ8~h־s 5 :Si3Hݷ\ūZzzzV`q$def'S'VJ8iR']8}ȁ#{*(hpAs?؏+}c[+# hFaʗ }>:zJ,#y-+hFaF.^c%hxA9ΰd}hڦݓ66_&C#IRj"מ$o>l8&@?<ESnJdߒ) s *u3u*:@k^=g^lQÅ {YR|\¹~<:):V}zyh7 9np ͱEj : cb4˗Vu1{+7 W Vi1{]q+Ёw"k&D ޚ1Yd a&@MJD0tT|y~up/ c S:~%f9-F?v4}#F7~sW*TB5sp(PqA-RpP%dt1`LnI?m6.(JIW!{k@fKϊ>$WlصJ$Cf 6 j:di(pcS*a-ecghziB;8`'c WMҨbfy/sp (ܫqcP _5vh5O8ĀvނE9ʁU) tqڡ*afIǜX8P4cK:7Q`*&NLp&}(g8~X.P(C H`Cy7E<◠G7[ E;p ̊ gz_~_/ukԤ2EE rgn\fķ[ o틬Z`"gQV9P}k%UN%d N({sǪ}ڽԱ^N{[yUuNݧ` |e> od{^5X;!P8'E&13}\0ƈQɧeVhVFY`mסHNݸ$6ޓa9;tߎ37#VG,*4QRԢp,b I[$6H9aTo2 i)Qz Ӆ jN2qQ!⎡w H-Ƙ)rHJEy[?KKP!g?1v0d25DMu%U@߻iI&Lfi̞:}3fM»ݗrC;/sqzy w-Мb7B_tH01!E{AMDo[QtoHܳcWlO3@˳ߋL^pZ 1(%JxE  ?  "NQH=[g_!=< ;zZϛ=%+ا P+idBHfoh7 +FqR:&uLN%FaU x2S̬Gy)HլNW)gR79\+[V50Fv3-b̕0krBw;q*?>7DMLr8lI6%#]FUgSC{GΈ Ƣ 0-Rr3%D}:6S=8$+?俨A7gqj@jW"ŹK hC 4'Y̜GkRwLGm?4=~81k 0WEJbw #װvuѯsN["ő3E&!FUx0 ^mqٳ}zyr~h[Q 5ݻLGϜ'0OS&sT)}=7V(oǏBVYf5aC' t >w1a,&cb>uR9%Ǽ <.f#}%dƭgAR@0V' XԂF@m0wb+·Q,D`Bƽz`̘+#:,>t\% ZLd2g⻿bEuDDߢ N;LA ELNE)M97%38f4 גMWPQQ?&97\mߐ~-Lar:4X 54sX`f~vCj/ 3?s fa.  _fkx66R0Lwj^4aTOPn @h/O0>jP $ :}`-@P#0q=D/c7%ꑗ_769sXgJKInLLqسQN&)TUaT+-ʭh !@fCoI-KjmB2id]j*.^/l_s4mQWEx OtW_i , 7r~j,ӒG"IjTܴL_x%Y~XoL:ŏ;?EK :>;>}ZY[Eg3RS҅{}qʌYY3YMRcHMIrMMˀFJDz4b!  "Ѷڣ pѭՑ*Va l OS{mɲ{w\yorrϝ8Z59\%lZ5F.ze o NxaZwyz>J\~ʕkAR?\a.~' 0 )&_OQnN}ˉ}7rş#E˅p#2g2߄2z):(y&L|1[kihl*@IDAT>_1{5?K IjۧLf`zR8{IZ<#r񝑊((am[Y٬'o8d(lYWcvnE#Ō&ÌZij63cլ[:Y3x@Y휐PZ/];ƺD#˽70 YnP6I3˷ V> ԔP;HD i]`fGtHVB.[v m'}Q!c舚lax}$|UzeAcL8&oy}#n[Ex8K}!awŘUы: MzGx߾mɭ>B}ێ>>m'ppX zV3T{ND3\uo.E]YͼvULD(U#lP ꜽ=UuMF]5-MXuoKnv rǖcˑO*iKdHAp-Ё;dQ>%nxs>bj}E1y{ Ԡ1ًeVYN3cYI`[3&sw'n,;"(Wi]d^h 1>?翿_\c˩bÒQ%K#7":u߁ľiCz~}vq9j)COrY^9>m4%aDi{-#Pˈ@D'̿X/1j" nifؓ'D$ `✜'n,ZY6Vp kD9MKo>Ln#Q|1R`eC>ex{tB?mIn;;p7Ğ8%p#8FJ9.Y׳4m0Ynf_+x{OEi\ϯђt(c> ;v|l4n:8z=n?_;~SݸTMAC [/]mJ@L#͙ne C֪Q-5/J*h(\ր#<}1qYؿgג%h6HS.ͽ6fnYEfffzi6f1FjS5PfI:}7]}|-=V8^&skе0r\?udMLfagS8_sdϒvi4Aa_Khۘ-oɏ=p'M+>EAZyhDζ5;[ń\6ּҥ$INQgQqb,]q0<e{s:M|]  Z""U%b,K8[4#(K1 C^^}@*&ǏeHŤc\6x9~8m2t^6Q1qg=Un%S-RР`$$x?ׯcbY 9Ո 'mDKO̜(<}>}%3 i"C|b#G-`UWbC!8AcH$hnnAt KHJ?~ib|dlZ,DU9tնzݑ']`MXi@)1|S* }@h gUE~.9_/љZJ-Z*|dQt$_t >˛2RSŴeG?slS1]u]i._5C7+68GShu h>E}9Dh `R.C-<_ Z:Ug~c{~͎ռ{`gDgV'h"[.?V*:|ck)0ǰ9s`KYn Xod_r%(PVpv1 ʼn%vDذ׶eew{\+><1YVEb?4b/\KT81; fS1ܣ؊_Cb52K?8vq7!;d4pZ}s;?3FSBj(xP60< o˨St7fe&El8OpR)aTiY3^U-RB07 [֮]ԱW?Yj@ ͐ u(v]+(Ӹ'^x}1Lbq6E\7>5}v ))rhR"pA%bïbr-=bQeu'm }0cq?z֪x qǘ^ݿs ċoЍOH̴? Yxki"!"$͚uw~1x=4/'qIWAFݑۉN-rNlaːO;@WO~Y쨵*T*\fQ}pDD8#6m@/UVg⯈[#Ec:1}ABQ={񂛧A|{yVo=ct&޴' j?¢+|o~hnn0Vy$S\MtlVO7j%8t ٶqD$C;Rk뚝H~v}D$Cce[4.{AG;~*q!qBF7 ίj4o[X{t2ovvVo045]T>Wcn i&fg>h 迤S1뷭Z:%zMGp77D:媶ոkط=lw{ 90xL/ 8m~lgb eV^^ajO''9-tG23 ^/}{Sx<}fP)io\1ym$`&Tb^I l9 +by&]v F0ejc6E+YG)MCDP/&n tR8wY$r keIa+߽:4ɓQ|`-V4ˋ rFqf2 +{Dѫ`d?]VTd<| Nq{-9#.HfnWQR(0Y3@- vhV¢BڑZQؼ(,O$C-3ۢi&ls7cDZB%m#G#T .-*FN]JMM΄c9NiҺ]M5XKWqSnYY驩ׯ_AƨȈh`b &JN`uqU6kҢfݦMTFȴ`M Uk?z=yiBtVffg9)1xEUL,2N<7 Ҧڨ *|c`g?yOg⃳ (xy-)p8n݀>n6wvG-0T$7:a3IАZ9R9~3++DJЉX)0yQӺ53B$375{cNG%gOϳ*̶&=3~]s;>' IwꍁPѶIϋuGГ5f0GqVzm+ړ}*JAw1.el6HR@s-+߾YlD<>x,V&a`זRF9Sؠ|%:4dɢK,٣is!796ؑobtd.ժeu;WU8oG)؇ܔ0~$b2p\1枌b\s{)4;^# ^+<Xn^S81\KZ8qN}GQ8S9xVp4~z{֫|ǔA:U~V]H@oBMt:իHPJV4Iօ盳E`<$۵)"FtkH/6C6{g3Vۊܫ0[jR7X޶Y+:jʂCU@DVlawZ`["aUh="Ѵzfi])glUʮ4RgrbVLoNJ!MF^n| !>d_r}KACi5J̤oqvW*w/)2 $߇1U~Jy~x][efy(h4>e)^+d(eS<k\/1psS β!<&}E%6+4S۱YnP ,K!D)CEO7|Fyk#XK G` 5-4ǰ>m̺;nZp+(,n(үb|Ie$/6*LFc%lp— qSz7%J LC2iܔf+jy3˩{qX@l1++ꯡGc(d 7ߏʕ %lPAߥ _|bh71GG=qNat>c%kD%.kMcA+E:΍ IT7Հ7LlZ4LJh7%0҄-^quX$g<@MfGePڷӗbSD4VPf`07 yWYL˷BcxY1~>Ҧμ!x)@h39!̓ݒ^Bs)ە+OXWϢ?qv9(լ&n$޾?3,醂yߑӢ=>)9EfRҞ?h nC G!%U+I)4.Ѵ,^X*ݫ6ZI\12lT.E (Y8؟ܴB`J >sߏO= ڍo㲶d觏p$Ȥja˨Sߦl F9xp~o$S jLR}Sa왟?[36r[Uv̨ ޓrJׄ ZI ~W%WK6^_%v9{B苁,hZ|iRD!>Ѩjw^T' .vAd4bo,^w)ٛO  ^I8aXan1!-Y[F tm~@&$UZ|_jh%U`6>Hmktc<Ҳɔ4:}i~<k`U  1e `KGӲ{SڻW췆mV;Ndʨ] V;@GnCnSŸqs0'Od42v2csgܠi,ۮ}O)UKWc4W0T}s;?K6^(SѪ.ۻ@pٻ-ۼGTC(L%lɳ|/3/f } &%C"3ĸRπ=|EQ^yxP> :wytж3')Gfh)jѼkLkঀ]=F(1gXZc,l檽Ć? pj]L>;т5W^=kֲF( !ΐ,٣iU9AIR1CnԼB5ELb̴LPܖ}֌<>sF6dXfi8߉UE dTvSe{'h˩cm9uSZ/O)h-اyw Hξ+hӔap]ذLFiVQHd1ʶ~jɜ4-=2K2rqHu a4XO:!;݄ ' %y6wER֬ *}Jc܌AT)dɤY{qA,n=mU8C.s@F& u=o݄⊦yn *O:ۺfUN\s!w[grY'k)vD2F|/lƟoCcޘӹyw&~qͨ/ .BCi73J'C߫0"OBrY> T㯾_ ,Mu }W97,9 oJ`|/4[}XǔYҏ䡓†ltcX$Y4L: S`rMH6GkQ~r3Yx[:2ri(" z$,WHwG,T?ױXI(4ӊG$eS+iXT-~^W+tE l*3 {#9]xCXϷbv Zl晹 aop4~v#L8~f~9lB}8odj >~[{{{s46^!F0 6A1<3m\90^FYgFP8Vtdq)]ذN:]^D`RCmFan6X*}F\Aa DR!\B?6D1YK 5ɿE2??7F(%s 1K77|5_#;\ު;Uo14CzH˴ؚ@ؒWMZxh!>lv1^pUS?4@%U`VY zy*?ˣZZ Am1 ?h8{G^}cs^߈2 })YTYǏ+U]ذ(<U WF$^Oo?s@ ? !f+.*0Dvca֥1c@@(h(-]g'pą[֧i`'2q7cdEyc19SoWI@1 6Ph]J.B`Ok.TP^j i Ƒнm#`vQ é |^7b(M71@\ڠAEKOiߢUSRĭbb_M=1?~̯ 0 ^BzMz/RE)TEwذLSBbGt{!+$6r!{S޼̾7}h _Ph Gofb{ ^&R(VT@M*¤j>rl0\<}>N0Fr bC{zt|dFq>?ilфV#97î T\N-C^@,W9 PvW)hu honA=/I'wњׯ&r z9Z+@n_Y~][_Fͨǂ| yy3P{ Q -${eIgf^nhYAXK}Zuhui-CҪqLƵHb>1فGNyj#h;˶ YxL yWv20@ˆ /aw]ٝ,/]sdBP399iS*EcΈLGı`s I%^Odg F{W+Dbd7x\ȴ+4ڴ^wש]CS uCܿtÞ㇏:'PNө?US0L5ɑ \`BݮmX4IC8;psd&)ld$nfAs]c֡suM+\ιc8wLȃ"[:҃}Odf2&eSo -m$,yk0Uxna-M{ɳL,;7A(έ/U3v'n yjO -\`wis:U˥ヌӗF|v\hCYUFy=O6Z86u_ש*!D6wk-M^EjjzBJ(ڳisW ɲ6r NZIqFS@Đxhݟ2M_i*(Z4cTC} mʤ:*9DaE&9h:C};g0nQllbR#D#mӏc bi!0=FHW u1h jjY O !kG>u.c *KVЁe2|!\/CGB) (y[.1Ӳ@o;̀8{J(XfDzNIrLX|8ۑsq/HI-B do&n-0>}s$ _Kr$ 4i K[@gzV0||??Z&7F.QJ;`RHzC ^Pu~S.r^tju Lަh.שC0Bn/mK /Ůf{qdO92hd]I0N A aA1{} 꽵M/!LT'qUiTK=zRFDu@Q|)_|Qʇb.9OPFEi:q0}j)У})0P5c^z~][I#Q QYݍ͑s:v8-瘍֥_;bēi`XQ =AXMl<%x@z%sIɐ%sOK(O׵k!KĜwΜS/`6OНI6?\M0bNG|f7 hdO`{r gܥ&$$ȳtGcFwlLbFƐ@dޣAskD>{\^| ?C2nhg4[7J + "͢hh&g6њ2kw(AjTy3O ? ZPy- +T ?4׫B& |w'ƛFa/sBnKI9nw%>Tǵ-˖ &J[[C0Lm<͛:; O(Qf5-ܲ fcؽҵ)y8,!*:wa#U|F5aNR1k2VmGO ~0rlK &{epW,9fAA*T<.͡h&뷱\^^ٺ?AVvmva#J*C)UeS l42F\NuVxТE-Lu7O2 g,k{K%'G%&kf4ClAsιOIL8gI4>-N1g rP=FqP`9pey?@#A"A8(gw"ąBXZV%BӫiβeFPc.x٘>+C^AA&M584tY)Y,&Ixs])Q,̚EgPѼ^ʹޠB|ԆPӀ"d}1 ]jPB>~Fua2F"Tʁ~=Qf_j]0OgQiQVWfhB|ƵQ7 .{.d"A Hs?\ gdt{tjgŸlԵp|R7׷j(}V hxAUe^kh&]Б"MשdYv#Sl \;N"q(|D5:$puasι?jܸ+kBpEDy0[o m{ݱ̧旿1հlz/P14wiCS*j]&sB,Ƙ}:Mx4s9~N|P^P8q(Li} ɊaUtY]]"|qo;ŗrsUTi>_ͱk7h+jV*+>2W<=YZ &@#Yt ҍvh߮(hG.5[PN7ꋟxmnz< 06)EvLL?++dSVnN85,p`%їȗ¯j3ϔ+^L +| C\[6oyfͶe7}tYpw6מ^~wYRI]ӯyL a)ڽ9syshd!{}B\p3.?'(Xs aS{a,;Ou<ݻ}&! ;N=fܕ6*r\ɃJFCfbQ7A )@Cⓟ/eyW㒐Ϡw&cߥ؄ah|BŁt@o5L 6%cުi0Bqݰ̚{j}ͪQLP҇ʍ <̼9ȬVB3#C]`~돷g;q$|32pWg~s4Qe|WsllC]SС] dcS1tQdy]L%oAd Rq~zZ&<AL6~!D ˕/=G/QFV42 [,Bx9 q>`tm'm4EaC 9:U)y)`kY`yo Ne;֘Ե=Ail_ \L.-;ې^ _a𕰡[~ lΝsF0=J/7sie_ŝVH8@AI@7DmwjMQ/ p{qӚoT-ե9wߏ9~ԗ\eںş&ڤo}Mv!|]3d0u##'(SԨD*` :AA+;} K 2O}2bb ?1+ac+fM_ZtoqCY35F!H\vpΣc8=O)l0A1 &}Wd>Ȱ^%as8زnnPa8&zp7V;XdoNv,QEaw%џU0K;8K+\.?]HgMntf\xu¨0ÈqvȨKCPhv@`иsS=cΜbphO)j4ߧ|Z &S†lp&} W \}n>_?| )]a}P v*Fv)ko/0t=ޮU| aA9F_uѓ6[.]*q!-~үhv-:W]ۙz ZѻEkF{\HW\}(A,5Gm_jgu8JV/* 2}:=?$MgVx&c aܡ];3cnBEHƦdA:p]lMR "}(aw4{a΃1忟ZE讕)QL{^(MESHyd]?1AǜnK>{0+c\7P& ŸotJ"o;Z;d`UbYcuy+SCϛ7W=u*wL h Z @5"w!Q s_qofmרנiHXEB 0#dANNNLMx=ڼ~?:Wd(P5]i5HF0ÈBrAm4veS~jتmju6 +Z>|ذ)qѐ*U)qwfKL+O,#9%9BŸٺg̀I8h:ȵ"}po RF`Zg7CqphмS6[]ƥٹ.9##UIu(8b8Bp `MJp>rj((I#S@@1 <+p%l(LHGEPAF(`pC\C~eZKxVOzQs|*s]❚ \7x u -Pyn%NH28*=kZ4 W><[lspw >B4k74glaΤGŤ v- 5S!2APd ,`҇:Q44GR gJ@Y1\OSuG Y>BN\;(d{ [{n_*s;i-E>|נY?@^Qan?Pu&^1 32 Q9,ť[0-Z M5Ǿ"Bш@P3{iHe]wY,6~պs ίZ;8/jnpY`@~n?qk\ƀ `\wHL \u+ab<5?,Pࠐa4c&搌ΥQfQ0+K =Um~ %l35+zStoC?d svb}(z#{H(gs>#>&o5H7˧bJ惵(skvyA#qd7/exs}D2<D*LW@ѽ:!C01} 䵰z>*䢬>Cfa*LQUL`T]&}gիtBAW<+0sV3=g,pbo}y,͝[g, pY1{f}4@ܯ^`cŏ3#'Έ eJཎNzo.ċ6 k~ZE:_$%S@VA;1h/0mڴ[t gm3ʟ[Ŭ4>r$@5Xםk~]<@1j|u7aj1۱g,E^YC:KeRW6yBdqHAjEZI\0* xy1AbnoXb΄#K7쮈XZu҈ 厭6 ;t XSX;jzӣK 1mQ YUA2QE36haڌTȟ#=*Z6!uxX@UbbP.DhH}Œu;ϳWo^?Z&n앢kMj=;eKW|:HcC7͡:Ni x_[fyCTEGAу#Io㱃9xcߕĊ۽v8U֭mRGkV*3`(yd||ўWGx ;S6Z/^qEh|Fdhipkխ^QآFF&, 0o{Р%=4Fs%ߟP&2 98կsߚln9u'>E<9FѬn57-P6ɪ}GqMDuM{įֈ_O'&? GwQ|Iq&6^|1;Eē[`w3DţnRkFָ;y6ҿ[-n[-Su+ `O \1a߶loxqDGmNy,O>W}믳?X^ԫQ@.ľT/Mи5uΪN7~o}paiHIC ;>.U"kU0׏,תR^Mڎ:T9u~[`{;G~tCKIN>}66_ѵ;'L]Q^Y<]HHfNtn\Xv,14d.Ba\Q7Zpy)lwM8+(ZT$zUx ))F8]``Iv1x燴#g|^ ڠw^kV6& @h=7Q:>R<>ܪ}=Jdo> )E.*)9&wlh5nulTx&Zү{͐o}gN-@IDATbc^Ԕ솦<XJ8e?)zgzM[@솟c`ر%DZoțh +†/}]b)AFܬo19Z6kۋ>v&}ӱzaٳeYm!?ȋ577k"nJ*{#c >u^#԰&ip OSj:,$XtmWQ#S4Xv4َ-7c;ιo@gp0=fU^CQt:gaX@J%Y`TسpB t* lǜ{zp54yF, .M\W٤nNVYy, 3$v! u1¡vi3-KX\;7cڷ:(7-u:/xlfb4B;9&&H,۸G h}1;O)`eFd2nݻ{ I)؁bE&pv?OSn@(r7G({ԓ(< ^<*Nq2s F!\ͤ.i&w_VyPcXXm?}n6-y۲9. `SKRt?}Z&3B%A5}: $Y>+TMx:51E\N,@/\ܨXv3EDJ ok|`^~ pα]hEs> ?d:B/ҤKg.$I_U|?\U_l2'S`VSIo(Vg?Sܸ]Ep oGMөjFخqXzesqE>8w4P(pR$msRؠ Eq=ꣵkW/}ٮPν;6zw{g ,#Q0˕(7Ǡ;P kŰ{{ʄeø?@!x2>dMT6|ybB% lEX oB p-BN . eQBn&%߽mc Iydʬ"hʋIlb;ҬIeJ! 6=r gbx`Qx{CKqmڲV //v1~,U*T2gMv+bgzmCqI8r|ؾ?N"+Mc|#z#raEI+~UʧA1'zŲB ONJg.GNOGQj= }?-v4{$LlLG~][!A)Y{̨͒ש"qS˰O#-1 Q\iC:'n"ȡ`):(-+X؝Q'#Mh FjSmZn5;a}|kGBBҿdj'XHO.#0Ic``xZfy !(6~R?b5M%o[8}̒*j3(d '2z%@i ALwhՠ([2 a+Ha[uwݎR8}F#1ՖGn60׷n' !DăbQt uB|iTb![Je/NY|| AAdywNP1z80F ML1فxP.!u&#^Yf&_ Q` W5 ԳЇwb'Ck$lm@-AŲf&ݱ4>}^ƕK !ZY&q^='٣5+!Iqu_xoj46:,cF岲d| ' v1^;:;9:wwAhQ:'$#K^~\FjВ7תRCͻ)T">C,:4+>i JM<1+wې}>h,(>#C>& oZ7)"ԴdЊa JS8#d-K?%Yq"l yi a¨%ΰoJ -+zf>@AFq5vxtܴ1pgz,Zr()AZ {|2nAw_Ag?4~2X֧ˀQJ`NVBeqcHGf 02|潜cW{҂qy h@i"m~E&)EuqL i2vV^}GNpo{LSURvcWPzl쥵Zo[jbhJ j -R7F 4S*w~qq~B&!'~?ou4AX ,Ϲ(AG :B-R : uNß*U?K`^xCFD49}PDa*߶ZRk"nM?,Zh[gF2ѣ2-OYAuyPkd;#@4r4V6: Ԃ^?~a0 J`@ڦaZV[X ݁c2xo'>!!$VKF:~B]q]R00E-4 (T} aOĘխX^3҇_х7(,yD0S)>+zrwW>U}qXVg׺Q)m'rFvh-UQ#']qύKraBRldEMNʨ&~ L4an>~_uR蟱j>h0;m;6ӗb܅x)Tpab\7ː Aeb,*Äk뾣2/O< }{x3K3.f:3:{׷jIWV{i]ye@(*5bFQ鉞To4[q uuWPP+W2ӂQsֺa-/]%oeZãVh6s|YwŝG2pIaְbP՘f /A,_'~Y7MsHg28}\^j4@nf^8rKO}w54bdlGXV|[3̡[{S3[MlN?8Z{;̵7a%9'a⨈ ֏K%{Vkz Rތ0fx[E 8 iDIfsK疢uM:bZWQ&#;Chհhm̕ HO̕2dhOA _!hTijڋ?KDH)%ԊegxߥBgofnEmp%n2XZNFY}-Lj>>ijbkY7M!wv,B`D&C_'zUGdujiDB/qSpo5ulQOܲO܆~͉##>A+4A}k2dTh_gERO1*kOuN9CpVsˌ }4`:MRjЂmCát}~>*␱%;L F,80i ߱Q44bt_p-tMʀW[dyZ?-l0 ᰻ԫڅy "F#@x[<\iMf窎~a6<,Vn']d9r"{AK~-hxFR>>{g'(kOLNvJN?zPxLJ?ܞ70AdR0:L=w#џvl}b4Bq~_$YPUg~-7`' ~ (*~CXw/iB` Z8|լB&7]ѣ8D )$MԳmS¬kݪ99@F=uUW1r;:5)Ń#"gC%2#-R(N0,_6˧ FZANiW%63h38(ps^i4;Hn? EًϢ<;sq5Z@[9t@-L6Q-Vժ9;d>ƟuO6(?Oup lŨ#lsaꭉ2P|'jz(| Z$<|ᏟwW Ja#!L"ْ=" $<+Y 4f b#q9}JG`C pWYAf 63qIwoX0nMgJ?1að`wMFN14>Q[kDS.g3b%\]ug'N:;r8<#,K굞 O$dx*-S˟\O2a0S cb7ٶ{zk̉FߔoݔImcc\9VMN؃h4R/G:dIǿ `\M쒫[g<GKd=cb. ayE)q_3qǜeMN' '&"~M̀*V B&B,7L PݮBt{b|f h?19'|Φ|0c15.o4mI4J됈[u+9;߻ڀ;+Of"LXX2VS2V8GZ@'$ձԝr۩eRP1;'Bkq9f6dF3ujc[oh%S&_qx羣's&c(Si0$wߙbB2;έ#MjqyœyL07O`p!|K}#Y?MG`B2Y }HᯅEVmoJI">CQ=x-dt2h2<,u]5*˳gO <{t;t@z0 HפJ?婘&|!k9b3Sv g/݂-IKP)PQ׵N==[t6͎ @&a b;ĪOQ &S^; T/%Ҳzq/+K)!Bw?ōA$sG IfiwMvepni5jEGu26~aoedݟ^}1ޞ>w4d^:zCxǠ]-]c Nl#b6ةum_w윉?DhrTV 8CʵB7 $<.XRX.õ q-|,:R6OV"]y74IJ%Q!Wvƈ mz"Zu;!óS6ǣ6̓h5ҾÀًqp 969+bÝ74jz%?]#Bu`v2\jL|4̨HHeOyLaÈ)Ā?_{?-&r ϑv=RI)EtrJ kiaMB e=vhXq,!@~Ajzl,WXkHns|5|fŜo  < "Pt l `QBvc8e}8,*F,2Bu`_VlsT]o|,p=sL AkfW 6i"f \X{4UJ]|:hzyM)Aqx-MWN.o/4Ͳ[gJM9sg^{->Yg Mv$Q15F φa|Y^Me>7 M{׈klb1igdV]s^h>\Θ[ʇBg ߇p>"L_A:U:0 .(,,}U6c9?rHvB 3i2pb;$8 kL `_EQ,l`ߒp8h1vtu쁵/x *BCw>~a\ݿ- kO*yS=knGKZ߯@3+>)>( ]-qja`D?'%B?q͊@ i^-#e[*AF{ؼ]F,~9QajBZ[Jf[oSf/oCf,Կ4,V(@[),{8U^5B4a9{] LѮ$Rclxj,'o^* n@:5䴿^BgrrҦZU1!7ͧh]F'?۠?jL (YoEۜB M6 y%V0ꀰocǮA-I{|2a9T A){ӱ |,AOw 'AYDoȈz&hD:lOrGK!#FF(~.c{xb3k45 䙌C|j2ނv$kV Y{Y x0?kHLOQuLf\*01F*~sk+  whn?T3u/y@H_Q)85Wfesr?Pi!)H2,7sʧ;y' <eK Gl9}31 u]TVq+=! j4P0rucûݴ/rR}dם߃-pqÆy@hs/oAq.%MAD5#_2x<Ľ6c[nh!LZ"^Ҝ 7zAxXvG!*tPW0^Qd=(Unu0Q @#dXU0>xz<]tȢ1Z}p&T4.O qlO#;&BF6LDKWGǨyVn ̭G:>b=f?}xhݟhszĀ #(fdRx!6ZsFUL HRiOgcKj-,aiI:36qp }p^-40 Mz'w}hddUar_61#'}4 {>>D ˕8ʗ(lx|hI ﻩ/Z7 15R0 1" 9Iپ o{ʖ #(Lx߃̛[#5ӻ`lv'HiPvC2r}kݎ+*͠jO`8f!h1ft[ )",S|+*9&4k4cP6BCAC i!Z{CS-{9 Oa$3Zhg]dΐ90@43zөYz{)6;賃_K3D+M4aØ'6>Ae~41 #6k^bYt|^Ř<ݎ/b.9κ*o臵Wc_LIȌy}J 4^֥4`K M<~M@/R>k%$\W󯺢JSu<(hpZ7G_3B ڟi&[yUFH,HI6?dL0bN%}AdBL4iQ(] 6G eY6ɐʤIQ`bEQ0C``)V縷܅sfzl&e4y$Ϳ(`4SvH#cQ xʷRIcHs_zaG;$޺Q-UjЏ7ba4$d6jTQ^%$KM^lOQ/;M$>iNg pe>21p4P )7t&Ȱߠ *bpq|l)lp /붔.ݕ}pG'0i '<)XFڕ+, Y}/* )8 S$iBԣΞq-L*c)dY;ORO\| 'wjwh |+@L胓%*:wa#$Ɋ~Wwx49  jzz *khٖz`HL;ȢC \5ʽȹd-!ɣj7sS>\SUFM\S NL'[:^m;5rK֪"PKS:+u XюQ !촣d.>/[MUFd ӭ[5,'V\կ잳)p\݊6F.\IӼzjڰhi^OmgWfvӣ7nӾ3PapdyĀYb` _h Y*W J #IO=eoIoN09ݿY=&>}ቋ! |h3+'}:MS4sڦL/2UaLx飩Z,Kter((Da6YQhD=v;(=Fc;GhÄ❉W0&D:/yj{1U&׮xaĮH͉G7}G6pGVi^=fFKƼ>~7C>n)>yWO{>5aɔ1?&BwE3Pо](hӇ[&L 01[е],mKsH!9IWӀB i^7މB2#o >ee_yt[4A.5_wPCI)Š5 MMmA"C1'ێ:6IcTב|~!3QJ MT`BvC!7}+Rxه_yei=x`[?<ͫ'%pƼ>Ct/3zUϽ=S! ]{=߅of?ӷ9ط8 R0%dQͳU=~5x\Sλȝ'yޣÉpu`s9?v3mR smi ]W1Gfg&hYfe8~aeO(ddW5g;q$hoi^=53ZRy}:O0d>Re;9oBx0OYhׇ/@[>tbm4,] @fL  Uf' 5|vFi5;w΍kZ(]//'`e_0MIaä лcS!A4%!!5ߨZK;~_nT^|뀟4b'i$Yjk-&01`b@€ ,5k|BΚDM($I>uyTƚHY0ȸ 4_ cplYRm~ W=+Ig[`e7 _i= [а^y%ń\4IUW@FI/\?MZ*rݷ>#O= l6[>l* <{&01 iD mi{.GcW`t?TmZ ǝXwoÐ7#ou:t>42%A#(ݩ0C& J>43N&FkS,zq_ Y JaSm1 pRMV65[UMʹ'Klۯ6q-~Cb2 :C ʌtA0#\ἊK-:]ckU)ؾ5Z5O`pGT,g=i /eƅ3j7~ b`h,;>rĂށ$}O&t..j?fBpOo|Vد5]6x&|}Ka犜+x@8J(fY4?jU;;ԷaFz`T0hP(fә swh΅+̘p; QɃ 6&T-BtEPp+#hv;*Mc?]pYTVD1~\$ H (&X0$Ì(bfOͮcNa% 1 \?(t '`^bkiE6PnBxm⨈xmǻʹ`;COKxq[|OJu՜X<%%דojÁZA: ;sg||so?3hSFMCŠ-Rd`i- rrrbblŋgٵ}Y"@FB9J,6E jLSQu2)\?f5lնA:u - W>lؔXCAE+WJ>5|$6[bZy2fy f\Oq p8S.$\;yx{6o܃)TPuM~pV &014i, jVw!#Fw?EZ;hC6=Ww-~-Rhz7u5mP~DK0zt:zeK3t ƂuKX`ڴi~+ rvc߹/9y^;gVg5;_M1Z8놎|U~z=`OĦ?Jc8|Lׄe;UmЈ}Vq\x} tC]p4*Z}L Q|}s¹+>{$ܴn&#B A& @8< ɹ%S@@1 <{+iUH:Q5 ҧc5ql3\/,xt|:y(:V{sa65j &5NׅIn~(2yKpbI\eY24Du89QO58WVM< 8P_3S0Q?`;>842c,ߖoWMk'ʼMP>Kpr웈Guy&}[7}1dDd=\3 >/;dL})#a 8 }{ s] D<G{cv{#~1*Ƞ7G_xyXQmt ^_ atI@L~hBqٓ?À?)Hj-(ANr^((s&}JAL(!sBanP  Tc}PuAmRpPƵL01`bG9mc0m;vݐ*U(t@{a1z+jO-̼J`՛`P~jb^C7ACC~ٓ Kg}M8c&>>s%/G_k8CKߢ :?װ@"yJv[Aaؐ,pqPDg׋(;8G9}. ˒ %z6,YF\g0ݞ)[OL.i:=9>d.##^ayuZè=39/@}ٖV~d,мWaC1xdB>a@}y_1{FA1 <>Իt &P]U~{S1ϹM  Zv5 ZuθI3Xu/anG^\+8J&01+ F:bԖC CG1~dw޴ .Rh:5Q RWƺ?.ٗσ`ܩix A)DkEKo=  +-^.5L-Ϻ$ &'|iX|6qSj  (C$˕!j? ܉uB8aS܏9X47fYS֖2˼6A?<k b?€?5d xp.yȜ_ayooe|I'ҖjuB JxP )M)KPGUj(n9o,czȈ10qfCw~ ߌ0哑Y~ipjye,gp$qѭɝuc\ܛea|I: aRT tI>D!]-&b1r%n]LܚGOj]yP>U0+/emu/ɇ`_۴/7r-̹hV\,lG+eKeYz%-=HeM& }d%z ǢkCb \tTKd,;R`8n4 M pq9JԁlIB_+PtGFըPk0>WH#MڣUS1pԻYG+skn &L L-77v?C  mI*+{-b7l N˅;:-Α-U<:XUO-v1}m ^u<^Qۄvjb{L=;4iv;S@~!zy<Imz1;ɗx@]jxT=` t=ƇMj ue@Mw ܵn/{~9=/ W-W}T~dԇK}Ȍ&5w\xytiT&}~^GF: ^Ϟ3SfKxW813r?%d{=lb@1mīWZ4os&qZF-hlUμ'mH%$^@BBmpJ R FRMYƿ>>%֧/bG?ZʰS^ǁd`|C&ȕ-EJ:E'TA ]d;f٭6 FKO.뮻rS0 @_hGtܛr_† 1-R*`@)lb8È0{¨>ꥤ?0)#S&ǀgu۽u1tLoJy7CC'~01yE:˹AM_Ht'&2 3c%K5r at$%h@Qi!o;+B$2tV>OrtbJrtڃ/DASc^{ 6htƒ.͢H +6.2Q#^:N<܏J*u P=f; E^zТR%BԏH<!AKH8u}bjEZ Z槨1Gxy1AbnoXb΄#K7쮸z~Zun/]ڰ[VYY| )K>c}|#F-6`^01'H tsv34vط`Z XhՑ#ozꅚF(>!AaHwGDZ%mz0qAg ͺ~(61d)ꈷquVoC{!Мy"/#_灁E_r+dl4An;Y K- ųNF[u=9(F? WUj75\BJw{\ssi9< YݭYZxD.>u~Z%L|"/k<AWs`7#w15 `TBRؖ}BnXל"Ǐ~ W]PTdXŦ?ٕŭp'{:~`s|vgƢ.I x=6~H\i%co׫C/F3Z_KfD f<6-cM (֞y#f|7M- 4wOs057z2sqIx&P@/$RL5L~yb{e}_w0@АQclh㦼fDo͓u U 4#"' U뀀VZ qnuxz@U:?}R}xk2ף| UפR\hq+xK '+j?U bKzF!Prx,BNjdtmgM]X ME\Tz &:d[u`TE(S! ƆItn4g& eBR? fý^o1\bW߯_H.tXq=U]US@@x9n>T#mL<^n8>OzfըptSׯV+Dv%`Ѷi0ƲNa"@q:wv}lj@!w_Ï>(PRP( k;r up\֌Z=$vSb(kk]Ec@9k\\S{'$gqLǪ\Uz[2WvZ)UL[ԹB@!p2,9qK7OA8e-Sqp0e6GU /nJW_1~27\\\䩅Й.RS?G=ҢSǸĊ(?ͦJ:l`S8SJPqpdpW {[ f#F5Nj_7a/>q֮N'9SSs}$(7C(AQ9}}}9VYg'a${Ԝޫu\BƂ5zb v8by7Xz{Y ApMB@!p 8KHn+ ceҝT u@%$^ZN`0^xd0xyѡgNHR-4sJzeT˫wz~h .@3;!`"}\fww_2g!b9샷`] >nH0UӒcr+Q{Ac16眩\qg1beD&o++]}WNEQÛs IMtl}n.t?jUԹtYU q -\l }gTzUs4u? Cݑ׉zgG/%_/oPwq5(>iediKdiOi6=B凜 ԡ̫H"%J5* cpb#ne:!94WhD-H'PjԸnHX˲+$QiP"]I<`o@ Z%$к]Gb8]eNwR7₏' ,C9YAA|<7 dMK{E.gyuҚw#vr11fb+kBxdl7}Nme ]' uEjI0ɮzcFzācqkFP կLɩvg ֊ڃ~\^;jߌڋ~Y숁 X^ 5䜘a7ms_FI)KIE60}kg '%]I AgjX'D0Dky0W;c6.sZ$װj\t . rC.c'M|h0c&w(ڝiߑ38чi_ccKIƕƁۺ;n kwQ~KjZVˁqԮi(YX3fG`( %%hhuA!E5=ܯIOzx? '?Z4k+hȹylMLNӯeLny.+3Ä؏ L6%Q[>XY̆(󗒨{v9r:.  j S% `6@tm9҈'ݼ2ѓ X`0i>TJ\c1VJ$6j~ԩeq.fiWIe6x^>  7dAWZffކu("KJM K /e)!52ij,j 3g_qq޸2ddZ(ah G;NjTGs6&9C* L͌o -8bpV ZEsUkԭF;RZFXڶXͨ#onM^팷mOҡ *fPa(Љ]|(56)zz5X␒X~qǛUsEޭ-H3 >^tfR D&/^M( ?WG /HbVa"Ə((f?xdc Zi|iXtK3>Ka~H>9ِ_+g ƔPg2RH ]Jӻ> mrKm4tH4l'YgfQݐԥ2YJ&(E73*`X"dv ܻZUyB;Ys ʓ|ʌٖB5cM-d AqǕҌye[hcl@ QPz0g̨ j^VnЄ bQo :8 ]Z5njm/Hs]d屷}`N}40p8s? l43o5d <;KЅ/-EdvR;V(-A?d6#&Y5=ԶR3UB^+X McU L\:4'Tz2,R#G\oU끧JTh\P\aG**3?J1PR*j7nTZ6)jW)({\  ߓd4ztP_h<6 ǀ*=Ѯh_Oi?P.tY FU=}m-O}ғ*}җROwwj5=[yyՅh6@c|P-hɆ}"s?UPxifw=tՉd.*xiZ/y)~kJwZ2g""7g.83~4|1o3adwؕmX&| l=` ^*j7nwhC^ainu)\z@@c+&!^7Q'tFݚABQ!j*aXiq/<%_۶oڒ 1B4̆rMG( {tӄ+J1DNwp=@*Qdbo~0oG&9+½t5!BJ!oI>JG+xK6 ={iϮ{3ۣX@!P>a^hP-CXt9elAT#$n.+|iaĽ($N {/9t\)j7l-#ӝ=\`Ć DhQU0Rcذê  9(P,1h_y=ƻ_OB*=!aԡ0rTnv\Y2QX{Ϩk r@TfOA՘pdm#qq4J'.+wЭ#=\N]7۶riM~]< XLz0U Oj+c֌+/N> +Nv%/[?U.TyzxW^X!pc$6})I}+!)lHiԺcq\60rtCB@!pmrlFDDJ5orGԪ{[5A1Tm1]N(|9}=CNl b! ?zr_T!йUCSvQ Wn4U6A`vԗFPA;nh?C ;iv q==Ȍwp8g<[2qjpQTȸ1A!!wDGB%mSsl1^!P(n^FYS`6==C|tpf7U.: G6sc8m ]fc?;|z_{馫&jX!fx1Mi._:p{DZ5!M8hd\?|5+]sp!Tv B\{-uXIDȸQI;voHJ6:R /V\iЅڳS #gw]LOÒZ6/@E!c6vEQGsAHF3/ً3-A[;QKq3A+ (B1F9%q46~XIHc|0ߍȸϻTrxU0+m ƾqmoUhv#4IYRUiHFÚaN{3u!lZ|m_fSm/睯aĜ?>KXM$KW_N!PJ&Efxɼc8"ORUn?$(8CpxL% ,Rx)g C.N1J0 Aҁ$5χ:rIcRvm&c32SG\7VOq`|twVFP@sRIl1-/099w%GBO@ڗ>|,/eᕸbukk1by6LVg|,zZ:[X}թfp e·J~ ]ت| @kal #K;M*S {[()<voۘnҒ*XI؃^ZPhߔ HHBGxS2j`ֶ H*$ ,]ܽږ#DX}3yosǪ~Č \Uxﺇ iƘo&=l{D22-X Zh@_>n%M %xNbN jB+4_SQYDE3E<`id>ep`W*3r5+87z1W,Fha-êwԱ3ԤnSmX'DTrhZ?lq= +_hXtMiwE 8YI2sQ団 DiZq@[ؠ Ψ(k]f?B'rH5f|ӣD~BYQQ.hSq{## YyQ vK6^?V欄'GIh!(Ɯ[c=5qilu\H"+.]nU8K @ a)&9Wv$:<U&<H1' F.cHAEjTޘp7}Iy ;5rj%C91ʖ}ci=tRP uKEnݸ6~h)w(ӹ`أ sh8"6b<-֯L k=7wyCbY?Q\nؾsĢpV.} NfA}obG<9T_ v$ӆY9wk1x?Vsv|bRuc6 Ngl5АO>$L0?K.<{QQ~o4X}S3xg$rǛE>ul5^5VPdAja$0Ck6>Tݜuߣ&FC05jӲ-3.];PAz1"^7?e RdގA=# n1"80GҀ<%ÓlX5RYDlX@(%\27=m&+*DqW*5Hſ7%jJw$.y":. Eb~,@bq}b_fIY KH\f|ٻ*A5Zw3H7W-*BPv v[FöPrR TϷ?&ˌu<9yAE2[FCþpٖ;ikb[~<͘˼0y$T5̌1]y8|_cR ǞZ6vWp+Vjlݟ4 c{@}iDƼC@X dAƇcJј\F#Wm4U.B`vԗ\@, ԊTp[sl[p۟$]wfy]/a;u?s Y^}c}f=}2/7L=Z/Kʴ_GaXUF|_cOlj]b4_, A*+?l?b`/]Y} }ߡSMn ̂`6`>)! OπLs/Rㇽۻ9Խ‹F9LcsgH)3iK(w=VEv8=}k;\G>y^Kv7l Z& @~0Ua~dKwhݬ ʄ{.}~3c#\ F,8F'lPZ.1HvN֦&*N6|&Bgm7;*Z2 l;h Ҋ`lFm6?s\TAJ7$PƏ+N=ty%g̍b~Dȅz#v1-TriFjڅqJ!S uD`BxdqSY,fPF5[ ?.lJ>_VlXdYmֳ/k|d6$QPu-rZyMm= ?-v]=3 /0>`?[j=_,hwpȢw+ JLojevTѻu7 돍[.!6\N7UhcD8޻eçWo9"!' JQ{A*(0YiiI){m|nվY>w"I ^ðlwؚX$"^;B! aȩ+7PSq.GY>y'-k1aleסkC2g³׶S9Ld/XTK>?G5g)D" Fch'bu[Xـ " m{NԴV) ǵ*%YO{UQ=*p vb2 }Ēnum^*Ե231"Vd3u_8Gg. aR @р$ǣ⷟>IfCJ6*@UR9~`u}Q~O}eP_Gfs[n&mod64ak{+D!A@1R`W;&+4u(c6wi (б 9ƒ5+2$MAN|P`9Y`6x~oYk'%U6 #p` 8pom1c}e5_ZHIfCQ)Fyi)ʃ7_~}U/69uBܻkaɥE\pEl+|VO=c/ [xq^QƏ| |sS-5=E  w{9FϞc7M{p`< n]o+N iWP:r@`gkQ4ɑ֘Llf UX2ehR7ҁТ{h}M{Ģkً"p!1!'C&1`X :DTQXIh f(_yvBf[ .zi>a9WffbLYs7& P}lkgg[D5|U4oDz6ReC lɘU.\IĆ d6 DHfV%d߇~5祦6}l 85V-_睷sM . jږncUO٢'&b3/~Ym0pu fC;.B"+$yr֍&Rl ^ Ox9r<QuLhX I ;m)B1iǵd1vg]1kw꾁"٪\DիlľA:5{yCibu>F:2G@x炵nvfl$yQVx(މ"b8g; +4bqa<|jlUJaNaܙzź|:7߬E[?`O*nJ=qqY왙I))N9w1.^׏ݥT#d+R\ ,Xթ eZ?صEh㦭|}1O~$%AVVw;-G|ʋ|]lNMIJTO/HNK{w7 I5*ym0V,A8WPX@'1\ (Tnj@L .q/gql˗v}Ou`6"鹄$Ȓ0!h<i@boQGN3AQVz `f@s4,o[&D uKw2=4Hk%5BmIZ3Q,mƘ[8ZL7̆x})}fM [kz|$9nk dFٱh>DAg>vQTvM2h[L 09 ]{A}N30ƱA0!ѻ!^x..|{ybXgM*a7 yR@oU9 6+R(J@VFeh*ek%qgy@|X ƻmBSXuZ>FŶwGSYhSQ]IgK0%xz%TxVDY*m4gjatS!~̚s4pIZugfOfr@,,`Obr R2g*y {DRKz.ia㋺_{a6z8Usvx|,$Qc9zd@ ƞrR2늀l?)R L AN]1QR,H!P"%xO=D d ##d3*m܁7N.R`dȱ Z>©:u@*R(#Me^J4lj7 ;H0['Ґ\[y{{!yy(J$^B*mL1QDl|9#\vaۉ f8CunҕβX^V\@jӂ@#[BV+3tӗvJO;卯%ɣ$il?xX8~"2_;0ǒǒG70J7$!|[ B@'ɞq ' :|H!P*CA10rc6y |}}TNfs*߸qcP2h_9v]d!7<ȍBb=WPA`vԗ\oH?u(z_FM{dӝ:=~ZW3%n|ԙU:ϪC ޖm%vŜ8Ah=mۤ.#_*t"0(a e4fr{褰ɀ)glTc4_BQى /`xWF;!~>t1 /o{ #қ_&/C ;ǧFK3c@4 *>cÖѐȅ'lhKDoE@;L~1 |qDJ*p Ge?ügTz6a\@; ݐ%sS`DޫgGL])GH֨훏v8$J2!9J&=թeCl@ѯs+NMFbHKݱ {+3VzQY;6x}UDdW7tļ1YAiƇGcrLL'+qpJ7& q|Hh}ćm4kvDPk#& jVc=ߨe{^=Į4q(YY`6]&ʖpO?HJJC}=b#usYɖ`#7~fmCbe[ ɑC\4=AvO|*`k ł&GvvDL&f/?̥ygXjߜ Yթ״&ъlR6~#6x\7Fc&n<׸/9c:y_^bbt3$!q4rDcl48HB$`ڟ7{[M__>[C^yt( z3,RRbj(x,)+psA6t{ /[1yEY[=jBgqQ5~?E.$bu.*BGM;JQWQﻬ7+,,}`ePcpe~ #0!,?Rό|h#`d1cX{ӭMfz}ՇH7 8eI~w:3 xDxHN\Ǩ@uA!P+*};~Yv 288E( ޓ 1*yJvkBjw<nj) QA 0׎GGݾ\5=OKNc6XKFPԱB@!P( g!PQ%0X M8G8U]*xfل{`cx+$G읿_sh{ ua6RrB@!P( @I l@&/?[pl|~Gq`ۦz6vM9GI Җ р{[x:e[);֮qϦha΀d6d/9xLU B@!P(\^ը0Ѹ jڷe{v1h6ڀUQN?F~|9ޟ?y1DW҃T= B@!P\E`+bGAޫ~A5[1Ъ оuBRjk۶{`np:/}*HRP( B@! رvrlܰ" ~p^ǍgQ OE771ms?8Nھ-p]7TSR( B@!PR oWgۦ(-Y"~rS%Z(ECG B@!PC0V0 e]xY:uYqEF{:r讹}}@mHS&RFS+ B@!(7`އ%TCC> ,O \Dž,3E8k/fz2k>Q'M͚=Ȩ8eEP=P( BE9uhXO0>^:e(mxT`.oK @z5=IچltJ]Z7*k=D_Bf*K/D{-C }ujՀ,tYfZ๊pq8> 򓦗m.SB@!P( Ur/ 0R!Ԩ=}>/q4 l{|¥|(cno/AŲP-]Swζf45W\h;fr[i.WHU B@!P(?^@- kap3=<l@b~ө%ڵ5(?=vwo~aߵbZw$jUG˒s43'['F ȹIݢt|<8'5"8{1~\NMN{mc-/Oڴ=0kvURq"T ֍sT$Cr46~XIHc|<4K4\BO;|>7Id>f5JHLuS`p[&3<:4~]r|%ZR,Rzi¨[z:ݤ9;i;t6=nJƅEX-I#8j$,< NUOB@!P( l "Oj(:OC(.2 jL>IDe6SBR /,px{ćS]Vyq&.;JLVA;6իPfNj:h@cMV_Ly?VAUs2SmyGzFECkg)f<@ɩ`.,ҁjW@?ز!M3:O.F]`2?s/3;uĵq#}a7OG@o q$Aq9\iKl5-t=\|b ku8>?wgq5tߦrT( B"I`Jf)oO?oJJM{/Qs]IMWy7sR3ؙ ߟ|GKp1)U0%>x[B>҅jsKbrӔiF`iƯ^cW2Ub>#pT̶VR( @ dRN(径Y\;ypU?jh,u`u%Ie<[ʖ}E͌pp}60~KO] wbrRO+ B@!(9v{9HFغgIq0.lϞ҃)(ѐao@ؔ@ZŶneWZP傔q:TT=yԹUTS止Gcu]v?+*v0C%iޣhUQLsMb a?Ѕ֫)Y0=9LHkW7.ڝtߠ).\|%(3>h]:rZH^jZի0uv#_"N2:_K-"n)faPVX6|t=j#GHZvWZV̚<|vdmvV B@!H22/'&󦴌,EId ^ ī곍$_/fBt4䒍Tݜ#9p6æM"yv4eaЌr[JR'na$Xj* Jf^ed iCq1RM0p;۲a|x?ǫt=LmR7/TKo9@%t`xE>a?oo/O bFSVs7=+= r969L얝ωVNHP+:Y_᥅Fc4 TqBXĸl]{o4ᅁ:9aȄ"?#-Jϰ*VtDx z5X4gEI>k0uzG5Mpȩ#ݓnwq8P[ßn1Ƈ9Ҫ[ҿkP0lkHVE:, #^ ϱh.Oa5Oܞ3 00Ü6K4.;MfϿOJBnt'Y3'ݝrؾw_6o\bfNиnWI3S\ xƔ=m@vk.cqS"#Owixf+R( #%6~P{ޜdԢzzQ#V^yoRPZ)JrGCTjFšZUa A 9?OfrҋLӡy=ڰp_Sl m!d:{f.l=tB JowVZ#Zխ=΃'MP *dg{eQa޴~M$fIvoyXFb20b)x)@<fszkUǧF>͌F3%_x{x$^;%G*"kS 5i# ^e<=[t9N˟ (4!|zlŒŲOMȓ1hwf4қYRG4jmt2{}žÏ] f4ښi6_Ӝf)z03\Wݼ|8T?ZM_Wl5f9l^!P(* (yL_bھ5;;ic!yPw`D[M~<>o:ywŪ?xR^</ڟ>w{{qRxw4[ V#ȟkvz_h"zlҒ{鵙-fV}D۹UC$ԫL`l:6(b¾A`z?5zldf/{޺zyǺ`A_/9i;qQkeΠ$/Si]sޅǏLXPbD3sDL~Ni~<_ޓA{ըRg͔g5mƬWOÙIaSO igaq[ِJ9{KCE1y+80uڜl榍F*q-,CfV<^>ҁHY[OL}5çSD>q\XYfd>c&xgV/"r%Igrwo23| $?%2Ҫk8]5O'ONDRRDg_ZҏT+sxR4 Br"`յܚ4 ܰd}p =hmx!Tw\Dѷ${:N![|eju$V=~/> Nh9*yӣwJgk,XMG\&'|{X56#x6t0_ifGzwyKWO@2rȩyV~YvvV2o;ҍ:fd N%5W^|.|ګB fk8ֳoLZ2srXu*g)H p۱$#j| qaխqMϥ8hIR[YJ B@!3ğ0I73/Xa~\ L%%Ly0FFxc<# ΖѐK`wx*)–0Q|yþE2{PQ|Ѿhg=;!'d0!J+F&,líFv=O%5gBw3UMtUƱw$`bXw^}졻  UX4+H̿^@XVx}5ߛ(~|WI̐/lpXruvWYӼXL#-zjڕs(F|yjP( b"  ldX{ӭM` }s?AQ%+7}.$M>KIIœB2N!Ä)L!n/f5ټ>N]HbFEl H4Csvakf˥ݜ~AZgfܪ{ӈ;<rZR[,슖)WpjI=JMl_rswr-޻l1Ӳfv-I2LfU_90jC9K5^`fmͯs0y_aeg,V=+&[ן({gJ&<=>%2On/P()ِTiRx>}ڼbW.%+-Q8^h7 )[W.Ì%!%8{Zxl.%XMg5^xapt}ZZχ M6f b\n{fgXE&z7C?ht.y.|FU-PϜ LjժOYOm8|zQ^S{_E3 @E 6tU] * I7 Ec)*HoC{3s&dRHyw?s{w3{O5Hpx*ϫBs^?ɾqBTԉʔGO*ވߡ,Q]}9rK }`nZm2 6Vx{;r|v:5^6c-v9޵bfee2 uc Gk4)G|a*31:R0` y`Dnib"f"ph"lȹrE]{jӮsNABBBlVC5,\ܜ̬#{ٿcXbK>ĊK 6 ACDАuAҸ-8"x/Zdu(oUz@@@j ÔJ|,e*‡D rO&r/}8s]yBz5g  LN6q| >׵|!p@`>1q5SB4FCj ' (yɰw*v†b]R]j*|Z.!'Ȼp[lA@@j^}Bޙ6djlaKɗ!,Q4VìÈ.,|ZBPnP\Br%!˝>IFL:Gkp@@@eУR! !/=rEQp\ .'Ţ<K!@m!P|pGi-ăԆdqr A\MN2-љnFk3+9<*nt3!>:*yaC:'p?Yx>A4sP,"YV6tsQ㣇uj˞[k}{tP؊qxMyW2shaAmDj$9ο?ͱ r4aC/p>%\۲g ؇H[V{%F'K   PU2&\2AK/ZZ7 کmm_'Ib:ɤiMXq\j!p|pyio#j͓j7;)Zw$]ԥoI= e 4y%Q%v+{6e5!z?OcwR쒌P:]UuC(a11Ss[9 -#!uᜅ46Gd#DQF}9H>?"aYP65vn*U-'+_zIPB@@@@F} -MxVh4JUKD1OI>g˖RHG\?۪͍*@U,<2{|SYsрQ"-}VEz<* NyFIyK}@I    "cJ,2ͧ)NR0IY?0P ;GyJ~Uޓk!\t @@@ US%_1 d{0Y޶|Hdyf<!/B?Z"[   u*_ NhԴͲaQA}PV30my3Yu'JDeKA[=A}VnTQeH]@@@j0>lƏcfk}^hpꭴt.J&B4Gn6#ܰRkyқ/>Lv[妫ϧVMCUe>[;ˢatǠnvӮC'誎 Acі#B.GEqk@@@@j*d VY^4VNloC:,rz'?RS)]QϿW^А6b3`SyQZ=Ԡ^;v ߎI4ooV6 YΜ?߂O]prPHW#Q)%& Bә`{]mOvkM}lܠ7}4o7= m"5oR-{p^3R׊,4N_(HfLC#PjF5 rN:3߼'?;{Vj&O/\sUzX8a4ԢIթ 5]ѨQVMƏ^ې)+ iF2 +lՔnbȭ=#+IquhݔrGY𐐚Mo{S ~p ] mԻK[j G2>'vZiGA?O'…>{#$V*j(    p H4L,% ճ Q*LI?(%# I=;M}ޣ' 'Y9ttu5:hDHϚJևRט5 6.4bA{l{x8egGG4jL,hȞREi:Ϻ)%q &Rv~u!͔.\FXv۞#sog4c!$XDeA^= aC 2Qy)*K !.fB -IQZL% &<72Iaᓺ + ex:rķ]etkߒ^%۩^p гX<ᨍmyS;E"&[Fض/~\NO$v鷻whAK7h=$7gc y;pe;GAب@@@|L,%&d3/Ջ?7 ֿ8H\y}B* !f9XpHct9Dmtt_IIQ$E.D@wh]{q54x1pSUO0yuC 1#5Yi h׿#'nb͇YP'VCj6>E    P dҘPGZ̗dxv(tofJ{w*x ZNI5lԙM䋿9thT7Z1}18gFԹT^6Iw;!k!vh$<_J<}^0s.=1m-Xҗ_x9䴌"Eʦb ّ\|Wf~7^Eݖd*E*E(=otԾ <6Z%ۃb    ' ǟcz yXy$oNaeu;uA@Yqظ0]Ѳ 忳у5Ig.GAugjq^ūXIWme!⬾8Z7oܐoޣ/#Z}?;doWʭ Ձ3t]tkBb7n}:ΦZ׳Թj>MJD;1zuYĉw֯BΈpp66j= F6ҵ9"ejo|̗EEJW    B 2:vev\/ḿeG=u7˸ oީ Y-Q %O`lwcҷF_׸' k $],^*W*)H}%%y3Y!m2[$bzUɿʆFز(}K4v^k\|5'l( c%O^d!m}poL W!j=G.kb!rlFeɡ(USf-dBg#k#%fMw0O%oYzRg4AbTNc.R\*pF5iW!lԄQBA@@@"C"ML^BY޼I-a>,7:׶F \N\/gٷ^ߓ$֕PhDVWz~\l\x Ƙ׹U h-ĭYu0 Wh=bN3kT~MIERG^b:    +.Vdmi쯱i/q;WUvHݼI#Z>12<]>xF= yU5Nҟ4+j+ )@Ǣ{WJ)+[W¦SwXS,Mt-뫡 Wq tl m!cuz]_7> 2 o&L Uk͛][_nVB73BUv\zj%[#qS~[7kҤ ((_㍆Y̻TM{eA=%{#L܍Ke]g|DodvoQCèǕRyV?qm+ՎI4W*MؐUdIOQ-^4UCyyj1'- t/Cm{|2?x@1{vCލ{]HvՖp~|[ac{4 5ZpՕz,Z_kzM;Gn^G#f z7S5]ѿy=V07C Cxzrv nxfH4v2ڮEa<[$S~kH̟$fdSZllMkJtM'P}|^!Ӎulӌ7cSxu禾WwnClkm'?4[UX(}p~;dӬ$dV9X;]sse릴!#0r}[Ázڲ֨;o,QVߌ:/1ot*ovF7wƧkw׆Oր8@@@kx YTҮ,2k5t-k"=3m"5bA5bIOؼoNsizIߓ{nhل&F&$luX׮XDUc dƏ{(oTuEuOC}֌XjKrdx͡R;e?גv:E(:#.9{SeԌbYS!np2_@?:, hh?&s].7JP y&&WjŢRG=xꂾ hR\ "iб5!A; Stz$'h.]e:5v(FE+23}+MrzZ#; ͉pV]P/ǿ@nm 1Lcf&ރ@ jJt~OK6tFnv΅䔌j6ZĮIvdl?p\pGT"0LrrlBh*g. Q"m8K'zl$ṇST͵$û9q%N֨ԣf}9Z]'Ox&SY}+ ɩ37Wlw UZEȗ2PiK,L-Jԏb I/֊   'KtIDATϫĢA&R8df>-V746ݯt9efa[V}P"~.,j Q͊ܗ_q͘~^rr]Zdn[ݗB|EOR3t(=1rr6wEЪ)nNٔIe~=>33WngrZ-YΧZv_YGUees2{QꋕIFcJW龂C⊵ '`QT}@ '`L&JE?\ѢyM_MA6k+>Eթ/WKNσ8?0?B[^+YVį}M3Ĕi,Ul՞}/6dD7>a5E^DLzD4; YhǁD 9}I$b"&>"#"\ӥhUzJ 2޼QxzFQK.Z|$KwiSy@Vl5xW\ /0E   P B[)lT / /%3nݯ­ ^t)%`ealr%]Jn%*UR`h¨;t>!R/=qe÷]/#6<q*eH. K"f=,ɑDy;Fi}3ziq2'4zDzXyAӜ ~U>t_lLD湆` @@@d.Q&`ú߻xDeѓ|Y} yϧ߅!h{P+<{ OB8$a%Lhp%5U9ede>=>;aE4eIb(z+Eq# 2"!rѱˤ9r9cGY6cӠ~ZhR'ȄG|&RwZPBjrdڇ'ϥ7LKKư,Q 9B@A&eR1'33-}eX F'>Dݾ%"xoҙdںn,4ҹfy] z7>6~[tD"HH|p'ǎ4lDVB ' B۸rx/V,xѧpe :DV_69"~"lϮ!pa;{gbM,ptٺ2"N5huϑth~G"hHL(S/FlȮ}mqC:Xi#57r`eBa2v95jyC,Ԥ4]ͻ}V3_$ hl}TlۊsW/gnaj)Q/ڥWMw޾{%r'}6.e;[ N 2ckJY&],w:I@Led5*1ѵG}-kVn}z5W./~!ZJ. v:5^6쓅xҎٽt͢gfah4̂ ̧~޶,iHumU>)leUB@*4֑/V>~zl鼎VRޜ7a7G}yN\toW:҃7)/SYe l81 yEM7;gqGp2|UW_Ѯs׮!۬6NGA.+''++%+=pYlo&ľ(6,W;ve>4I$g3g;SڡB-6?-6$c~.wn\?7W7x.1q<6p;rmkFvb@E!>)cӣk@@HpR_? 3ϒ2^ p2ILPuEؐ)l@`(>d$QF?/c,E08K>,-! Xmʍ?^*P@u=O ]hX%V..-)g6\CҴȘQf /7<&`݂6;ׅYЈz>`糮,Tz£y2ɱ,+6~N08g>ogmsO2%S11 bA-vh-)#X{nbȿ)dTU͆:X)*X^|0XW!1^iF}6ͭz]t9sY㜿G1'vDt|0Ovy ++Y2(7:&9Ƶۦ미=19Yݜ)SK}׏(SCr:ݢ867.J/v='G H"k:b(^{L6{fUy}lv(ǜVyM4gV3{9u__ۧ-y{lHR$aLX;q7Ƶc3PcI?m#bzaCi+i;^ohZ;Y,îd!hHQCMUB@@P1Svڭ#Z&E !\8{K8W9/b :&f x[А}R(1N.)HkA~/`MSU*FA"_BJ/h@ frWBE,P kC&4Kp2햳NӼ%([TXRz&Gak#'xxYS2$T)4yL+E9H4.dԜC.?}gN9o5kq )R#@$;ygڒ)Zn6 pJ{c16Ș)e1m.HcqŹ f8<Z[؞l9s֨<X;6kn|@ ߣ2{|=̜fsHuFދD.#_50}z@vC:KMucJaMnq]_1M߉}S#Rj8>X 7@@k,(l5TF/pkmXb-|sn>*2]I<'GebWF'W5ZkV&L+[*-.;&\tbhxܳ,% uw]/+Ŏ^z)H/rT]kx>?;fV(R 46R),oz!eFb$n]mx5'>"'C=J?rR򅬻xyڑclԴ'ͭ6ۈDƳ<,l`cS>܆I22Q+NpժgIj-UtՈa;ƆYo8t?>3gUXykm)߲]bw    3xDiP-hIbE"ۼ81 zXml4LIhB cN 5 o\||3/IvRyG˕բJ㍆EI*&K&LnaPV=R]XVS0^}qy{s8 oᙎk(qɥ+,'9"T@u0_*7bY[ †[S'̍^zUUz\]FV)kzu=׫)h^ 6xR|Z#ڗQ QHrNyQ29ysY]+XxY.oEQD@@@4N7/@n wz+Cbxs)ީ@|Ra/z%Õ=nyd O^y@@@@@$>)lԽ\p@IFZ RY%@@@@@@@@@@@@@@@@@@@@@@@r|r5*Ϯ~96E۬:bO@ײ^jJ VŲ`VDBuu@@@@@@6LJIi=y9׌LDKO9^mrǛ3Ҍ)ɡ폾:qbVGL뭩uß|4͙>cGl㡕}D6}UG|/ͥ{3+[a-(Rߺ?-9b,^ .T7"*;}IENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/install/figures/network1-services.svg0000664000175000017500000014032300000000000024060 0ustar00zuulzuul00000000000000 Produced by OmniGraffle 6.5.2 2016-04-26 14:56:09 +0000Canvas 1Layer 1 Controller NodeSQL DatabaseServiceBlock Storage Nodes Object Storage NodesNetworking Option 1: Provider NetworksService LayoutCore componentOptional componentMessage QueueIdentityImage ServiceComputeManagementNetworkingManagementBlock StorageManagementNetwork Time ServiceOrchestrationTelemetryManagementObject StorageProxy ServiceNetworkingDHCP Agent Compute NodesKVM HypervisorComputeNetworkingLinux Bridge AgentTelemetryAgentTelemetryAgent(s)NetworkingML2 Plug-inObject StorageAccount ServiceObject StorageContainer ServiceObject StorageObject ServiceBlock StorageVolume ServiceTelemetryAgentiSCSI TargetServiceNetworkingLinux Bridge AgentLinux NetworkUtilitiesLinux NetworkUtilitiesShared File SystemServiceShared File SystemManagementNoSQL DatabaseServiceNetworkingMetadata AgentDatabaseManagement ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/install/figures/network2-services.graffle0000664000175000017500000000770100000000000024672 0ustar00zuulzuul00000000000000[s6_Ԧ MǷ6n|K$یfv` PJRqLRrk9|`I p@\~:N(95sC^Oߝ!rm_{'gOR|css{0ٞqzx:3 4ܼ4U&ezQ?L6Ohۀf'TW:>;fv0=$[(|?JyIűʾ̪nĹe#7=RPkڨ]o>͐\֞^^$)mNlRzi|Әp\΋!fSܚK1K Չ.ohjjnieU.cEi:wEkpk+zrsq㷪=TfȠ$ۈULIoBqڥmP~? z "/RY'|#on.ؑldqa^v!{݄F2LvBA֝5%gNڒ(\$BPpj[JVcC ;骲:{DH¦wdBg4ܑwd#h %D6"F=e6G7\%gHGneĦp KZ8& pD8>vqw>c3J$9*dR SWE,"GE^^$ Z1,jQJ,A%0vL9qťtldj2R"#H$_c^Fڻ}DJ…$`',8 ]۲!\QeED!v' ;|s9;a _Fk%x';=m&2EfK(ѻg"zeCNUGqW}j\d`\GlqN w;o3t,2fp$j#tH:Q23Je2&- $̲%r5i! Hn'OF UOg{<ФLvs8XfcH<$;9C{iAEM[XS ̇[M^l̑C4{F@[UE㎨C! uֽi^ w݊ߑ!ta56n9/W ضq);:O'52K'#TUtᄒ iO+-If`"V'G<8/)3}`/rEw^jύfGwSufnl?K7غnx [\Ë(m̨>};N0rf.pQ QEfqf"tw6 Jb"36SHK,AE3X?NdGC.+{(Pkr[ sɳ~n7x#Kptcq˨rbRpGZV64eTU4hT6dwj fԵAu2[#pl.,fCUg,C'uNŪx{at;H$d1=lʡ L`$w\*l 9vЗ[7(禰'#򉝞ixz&p_U:oRD-;*SSik2.&3(;R )BJJ-Еsud!8Or'W#Mqr2Ln99dKdI1Qh<s[HFXffΘc±m m<4Pܱ5ftC| >ӵ"df 6t{W qnʯߙ; m e0j<̔eߩhL~Q}W 2bͦa$Ar!_}~WC\Cpf4bI~'yk_w 5 2 1 2@IDATx|Eg])ҋ_a{m}m* E@r\](bAJ}_}}E}l;v RTZM6l.. IxN<3 `L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 4+Yņ#@\g*yyA(&.l @N>/_%a~GpA#,߷ zȑf4lZ qzejE9Yo8s<@&дtuz^? ǚp]1(zt}n cNCj=K3C+euf/~sU3JJV>]ZǴ2tWƭq˲‡Xlڪp.+{7]K59H4%X'=7mhn2 <%yzvpx/)(V|{#v?oJRMRN=`7]`ieҩn 8mJujnE)HowenmuK&A7OP[.<"G`w*KݯX uu7UҍB=dՇſosOhXUR8*2=%.[gKJO[//W8QI?kMDqKOX.5=3^4?|Ϥ6O %qHy8H =xĘ@r~By{ߖib/}r=8޿dOp\Mc7ׅB=b1vo](Q?cLe`aeWkT 4eh:4zٔ6MT&zo goqEFM%l)U)KRs4)&8_l*`FD@({L gV4?ZG[X-,A)R;] 70\rЇ顿DKcf;o]y#)ŭBjYoKhaYeŌ''fC Sϟ ٳTz](u*+/x4~+~j;L*GIuO}uljcL :7уaS&PMt5 ~tSoRVlNPP"2h|  ThEl tUV$fN"G#jsuiAėwzi9,[H!pPyACٹ (4EzOg]LA;,B aG@iyd݇1Q{jfAذr6Eb}w9sgr/ f7U<8&;9wuڕ| Чh-4޹)ѡAHu~vh!8>()'x<'7'F"/'A#?>:Aɟ^j P0uORo&$={iL7m~PAÎr^(m4 {FvuL% z@\MPm~9K}yS{ "ت^ÔQmz}kh~Q]s.V7 M@J͜!ױD g mR-7ͳgw1dBLwT4l s` |=HGPyZ&j:'Tc2]O&G#QB 䜊3GaڋӱM0s蜟);3u6顡a-A`UC%0QՈ>%2"EzXĝy0$]  A@:;ѐH] ~=HňoyG8%<ݩ͕`sS\(|AfG[Ii$!Nimr ~ ]nty [;-{{>vcM\r@}]?yܷiZx'Z9Wce[: 3 3(CP܏Ԙ5EP'*(zqߘw>x/|Nxt䒇cɼ6b$ĩc9 /F=`8"j|9XLz^xm2e_rSF쳥*~GY[B'&FZ áHgwcϓ(o=>0{2"V9iڷG7gTWСLVR+,+Q(pz!irP*ڦ T"OwxJ,1CG8Ѓ2y^_5*9 @и{yw=uA;j:(xPnQ6ūl>>3=>! ڭ㌲[/BtlO41ԩQFbXռ>qB=P_r8vyT2Ƀh={:|)B7r܏d:UJiʾ(6%%*,e]Knx $?C o.1١Kvs>eqy[יf X ;Ey sު~gJA4Ss#}…fFX!hM6/_(|6ol(vvbf48!5N[~y4A[/"Mjk6y]~zGL9PeX΍s!?<);xG՟hKWeY({3QΩqkUڋ]SotRNhSl1AHkbmL4q;Fț IvtE@#EF޶W7VU( |)gt!efex { QJNW~Mt}:iX:)zJd^3+4xpv.1-K \ʎo뾗8iLj 1vϨxQTcxJ,1m kFPt0N\}Fh>㝋{wfi0!_RQ/dyy-ڏyӆR۶pMw>3d6̀o&Ii `!;Q"v"(͢ cnA좣LP PC_6~Dz<.qѯ G7h9⌆=F\N@JRvXNf4:f)ݓvo;#=/}.PjR1Q'l}W)<`J0c:\4ȍ! 8Ρ ǼTϺ q fE*د4F칟)n.cLvn+=nASlk̊d;F"D93ڤGSAہ=hȰ㹷L450X(S}>2wZȇQhrI@6FPg=KzAԯߌɴ)t ;:'Xz񇺉 c B!-AGx>/WAAL$:"~5=+2׾\u:8>K?;j#"UH-&wזe |u[-[((+L6TG{;.B9(L2.g4Tu>@JĻo}YlPAoI_t}=BUoes dF ,hTS |B  Bw bA 'd\}t<\G>JF0NA٢Ⰴv]a5%,trSe#uۡci24yzJ)ﱅTu]xt w縌.}D*Z.eF2LՔzu{gI4 ^OŎE2ŞZȟ\vDjkPbkmJ<*]E'jgj7Ї uspw4;%"Q^6*owH|cZV<'vo'H| C/'b0 GLLv uhv0çsE:/3p?ޞiuM+҇q_蕾C <-F>" б,]Av+ jQ%dٖ+"VB<9qґz\$^ k!mG}:.o$hZiœV!S&(ԅ4`644Ow?6{}x;9]%ۂ UI4F`@WDdSn."Ou:bߨBٯT#v·C{sJg|Zh4Pǎ59*Dž>l_]3>v-ԟZJ^+t4NSl )r;FsiƱ|Q"dpQ0pXSXkٵ߄(ByHxbW޺ F7mEmv""w,Yq=ôfCW*EjTs[?:45f<`? e[<-vM< 銧N:$dK#᝹\vcۋST~gV#An-%_Fډ[BۍWi" ~;z4|J0ɰxeLCG"[Bcfu>pfHf2|Pl)qK]Y^Xs0 ]"g{AOC߃>ԩb+g1؅E?;x0)^ǐBcÖP6Eȴw?O Cw-4yY;UVa⅐Lǚ nN+>3F{DFnGw oV݋ѨO{:Ǟ؋z9;>Jho9*\8\eLy0!a {W.ubOhg>uZ Z8Gۏ(M_#͢cЎ/Η{4{u gmŞc_ޗNhQ>3<#9dYwcγi0e͆>vy.X%N[ yPTu&$QQqNإ;])6sc⠶TfFBM:آ;kyτ>hpE=c}0[DI3ݩG|SnS/%*)E-m .Al#;ֶm-?lgK"կ;3* rjD_]X o!*rZy$zv@=əyEB,dE ~)!ΣMێ:hij+aEzjU9 0F6 $-E~T/ IBTeAx;~0th|ޘ{M:Fi.b؍Ե IM />~0*ߋ{PߡrQwryY=Y{vm6 FyOl9:cbp01.-ўD7osP:EPQTW _l+N X:SH&po;|tݺ&+.jwي)<,a0knBvpޤz.IM aB/IFtxooĈC7Ez!8ꌗm. l=pxLDT߻fX+}/_Ng+N&[R֓HS=zLu^_SnkJvđB-~5g6VDFqS-k)0f4e5gGwWCx;C0gV5Wef?ԱYߢX]vY9q"AgTC'Ҍ |ܱWQ9"MNXt7iep(Fˊ]o:1c^1d1gWj)ɶʦRlsA"&w9>л7H\R߇iڑwxn̎;cI&Fh𡥊F-&/n(:=ZLi& #ܳory4aTڧQ#dܙSB0!rrFHH F3 ܣ]Nʍp.| > tgnfxJB#o$nm9rgկH/ծa|AǬu O2Ոeıyn< o?ؾ~ IksHrd U^ )uq<_+wbgD8"j12QZjA sJ>PaOSE|*6t(=<ߗdיn t{U ~O@Hg o %)آMA0t/+\&T>H%KUc-E>͒hJуT^(Mcǧ$8g4tͤi븻-!up`f{(e UyN33@ϯs!2z+E.GQ/i*{wTx!=ì<=ksA\^b>mdFdivvEQ.?7j`uP`A(3Ǽ." FŐ@7Yw6mλ;#"M hw=x-Ϝ놖Xㅺu"u_- N ОB^ŧg_q?]wf<"b#@ȞPfo{L7aYv] $7ApLa<6Ө.)㫸Ӹ~Ol7*0\S&qNڋz AaTReJ C\ڡ*U)tN-޵gYNo^d,?MKF4nC e:FoBzox܏EWWl7|5h2}h(?h) 5&\OxjgeV_ -[@F>į 9aeb(XflpE0E )_}&Q*=EgSjw-eڏh?JuDR{-|vE;< }v [ޅY5~+}=ٹE|R񝌅u r@,?>*iqrxǙ(xmsܭUa"%M G!oPVhfRY 7)֍.e1lwP>F(#nH4R H|-ζ`PE)lG}Zp,Zw~8[=3io^δ17gȁ!e%X+ #/^w zԣgP U[DuWmi!+, moBcKϞK<~NBWOzĝL xf#tF`&\w@k{djp;:=HrG_gm- Mt&FD~=:;*zSvCX^>!yy ^ _|v4|" F̉g_ {|Y}c7Ty\jmA` *Uï1BSW3!(R>Srh] 9(y6KviCqW•p:{#zw{+g|W:+MvpPwc`ʾ k+R}ٗ0^G?w!B0g 9={D^gGb GuRV^oBc8ޕ'#xW[iCQ/nH\='4H=s5TFj@y18|?CgKM&w]!ZGRVb}Q*P1md%e*M.RC_-y(jx> ImԱv^[j~0h7=^yZ$Z:، $>@ @*o~:in8ړK6cyLOmWŪbPZ$eS %l̂!38=m-wS]ܨ^sbw v뺞ۭT<]VE hX;A8":2{Ǭ6dNA(v|C.uY9CǦ:.FE7Jw5w)1C[FIt5MmU1R:_/״4*oZuGXb6JS>Yk,"ox:R.5MƼuYsyvnֽ ְsm_qS<.`L 0D`a#Q$&v{9-Q66?|GԿ|͙:u[SA׽E᷷ S"@p* }bS0t]i.d~3&vHN#`L n=%_F81֤v pѦ) Jb(ML`/4?Ogii41&)㰘`m B]ge:yɮJsAN` %;6{桦eђ3@!F `-SG}[_:483g@s srMm5(b:쟌ǡu'/[f_OGQ09re [sR>JߤО;~?~=tJс> ~>{5MNH}4# a-س>k`:4AY؉3~$ Jb5U{)GHFch,xB/Iw% J J[ án!hv@u;<W6'SzV">QRݍ͘`L~ tNu&Fw8SӦmq粍[=|QNguss.vۇVXm0m|D #|]g;BJGOPUHP~SiɱMe>='/>sAN֛cC=&zSO];` Ft.lI k3s:A4+f@>s #)K8FzWݻ/G;S,⺷/)l. 93&@cTMY7v` #~3:A6I&K1,l*Unj$?ӳfEzc6Ls>QʞRKfoq[eq)Tj ~X@:AV %G|ψLiCZ})"N *3"l}ŵ\Gr$6>`LjT2&v[PrTR4ɻR9$lNjm3&+`ʴC3SWёs/jmYt#0RC*[QsҔam(::ڏH 8b6EC Ҳ/:G w^"BձV 0&!:&Hq3gvebOk.>!r*(E,[V+9} ,r @pa{_jc27YsY03҃g0icrޭ.}|c8C#Nv~s3h`L 0&hFJnEY-R"|Δ$ʛom Å 3,JGxԈlOV/`L h=v@Y޾9e5i{̷UJQ \$d8D7"sP t'% Y$+IH䜁 RZ? 0&Zq3gvJ(., XԍfS^s̕]\v3NS[ ufJ&0YsYrԞ('YqJ,qKLO ȳL 0& d'BcRzč9;SkNѼҜnzve|Z%L 0&#`9h{6> ~^fʲ6&C3z~6Zؠ/; gL 0& js6(^"i~~5U'RKmLרBO_5*1`L AnsI8%*{"Te00!bNr,`@]|i|zUJ^DQc#`L C=5^n5IUrOoi9QMB /'pPw4Zw5:  v`L >h6Inw IʮTiUD5 DF MZd!pܹm/nvEwLj*I|k2j% (C֌Θ,k6}mt1D ]կ,3+kC: )}C)3' !9{t^λn*n?.v,lΊm&@z(SZ'mnbvgA$]mFUԡRZ˪fV0ӃYzYs r7D !Ye#7^3aBP?`?vgJKb){w?XX_T13, -]_\*5)={%=&䌶οCKD9c/oSʗZcښCPFyzqYoSJ%,둌@hjs_,q1=z"=_"=/n?<1VE}& ♍qcW 0S6ly^ ;^o umRDO}R#)YK`$Dx!'zE L k*r) 9{(xml5;rѹ$\< YZҔt|<7Ϟabt/KC=EKSC9tJ[g>X.OVXJaYC,l4I HIxSӴ eݼ 1=E)9$ܥ-;_(qBhF*(?RhYF8DN=Ò"^5D<AQ=qsE=+4y#=CZ\#Jx9ARnC@e EpbM _3&PmIj[6Nå]ԧwcĠ )>D,~g!0!Xu:SVN}e%[x.?n3il[a]Aw`Oz)0Dg7cL:Wb]S͜eux 9R}oBt-<$h `L}Z1,6,͚>R퇔C8Gy3͒7 Y)Զ3g,iK~r:'j?s+x \hU[9: K|h8* e}F rۧ~0pu@hߺeGMQ%B07/$4Q*:H FN#eHP\* =K'rX!R:!L={|_L] :rRF`Σǭ;- "vn7JfEpǓ V @/GǁPOϹ?K>3&&C.cRoN;*CPIr~Ơd͆&:&Q80t7h ݣ]a]!S!N~[MB#=]K 2a rE;oskHwa4' XoBc١Qo;τdL.%;蜞{ i5ه&ke=!XL NRm/I\V.$cni/Ipnێ7o_ouj7"Nno]SŢ~h7آ B YB؆&2nk_ JiFX.vlI'Bkի9ּUнܰV|@ԡ :R;]>5+N.6G\^h'6*P{uc9GQ>߫0 S+1q76xbO9#PoK EM"MԱXA뇄wo~QNȞyi Mh&4*DsGf /,̴U~g2 Z1 WCvIxyF'`$Ϫ[BYga" x/B5zd,[,jTi86 ݾQ"gHVR٣#mcΦqr{q]gB+KP;Vۇ;f92nyLgO|C+V]9{6qO:SEī"*W MfLǫ~0ID~Nǝ恵5Mk7)VFE{N6R#\d(asSVk ^Jh7sӳvWZun٣&3p8ZT]Eg[:n؂.t#3 Oӧn}*W $&i{AZ߁%ff|E \_Bi+o6 ,OS!hC;x^N䶧J~GqnQ=CQzޭMFhP|RhUQBwDQm 8-'l>:\2 O];bMN.Y31ݘǛN *1'@Svw`WՉ @k#赺Hɞ[6Cj](;hVMdQ] Q3LQ!8hw"g*ztpLb}Ld0:a~(5xڎvYfUѢW.ϡshvk3#c>綰U6mS(exSqa OT!LKVqѡ+,J'ATށHGjdupҘsCcbL >ޮ!ת06Λx)ƓNԐUSQ+.HkU|Z˥$yKoN;nuh/~Ǘ{Z7>>ʌ:v+֕cN]cWڹw\Ǹ`$!8G#Ws< mrTw>oN}wx@To(w>fLvR5 cά2'QCy[uxҳ3WHkTvln\Fmx>n* r$hjG3/*lTq'w6OC.Z< YO4Qy!Qub>華V1A G;6l=uʔOc++;H@Jm<4OA:cCP[RI_ȑ#M/?W ju|#N{On gd^Bkt!B?e݆v7 y*"}+:T)gRf  ډaϑGᲡ|?EΙTW<&>zf 2QYKEM q(֝oUt]Wy'=N%Oq q v)ֱF_L? !OxgzSs/X5Ǵ6%WavU}C`gWk 睎'|݊r6؃ lFupgͩ۾s=^[.eB_Xߴ!fsc 5EkюK%}ҼЧʢX_Nm~k# Ozj=q쾽ԵտU3uWZ b"iPL i8>;#/'p;Dz((WazjTb{MB}jȀ+#{_ٙ+q|~7.ɴO:mjiM&G $@Ӊsv ݁$|<ߎ׺U=[VD\uvm)}DP5~4Eo66_aدo[%kJte!jvANZ_S?])`?*(/D2H)ʅ-ݻgѦ͝ӳC>]G 6L(102cz޼ =c 4QdmL֭gj'}-ڤz60XL|P|{}z䫧W7K4xGBhWz ڼD(k@=ԟGy=hCؿX& > `}_,_Zf}v-c f=Twi#hJhATӹʲOMTV9#s>x)ZMđA|nኳĵ.>hGjs @yFyGyHyWN=4kn#ZC|XIBk5eӹCZ:GxC.7i)Yl9W7;gN=5E XFR4t6AӺtރ\ޱ~wo쾌ɨ a?u*F^Ejph! {!4pAG$hm'?:5DX 2 ;t(1 qRWŕ1CgCxe*џ h|r}iŭ-bMOS r(ilT[89{a^kRc=aRxΣ!'I17[Pv>z!|Tf=/_ 2t&e 7sf#ǐZ= /{>isNc9oI˻jIJSn2z齼v"y-/6$F\s5$SlG"@yJyKyiWNK%Uʶx>dT_0ذHM#NAJ(`# 63fژaڂ1!TP~ɱzjN9daly3/u Hpݻ|)D,oe?/fAn.Awok>j{~5TNOEͫZuWLw.]6!4w_ŕ G8<ݽj./lar6!1| PmoDOTMQg%$IHO3?7| 8Qm[F[0pvF tR,wW\˯&H@%RJ]? m'ͮ}2`B@:kRnvuMIKz x?͢3>j! Q_ү-JOg0<6ާʴ*7*]Ebf&~-BMQ5_MuG{xFYS_z6ӳ^a41|o h ͉fYOs1tf7j@%;'ܺ:awx *6PbۼM?xo U4EWA>.cqrK4g?GXpmz B=kZJ93J $ilܧEۯ.:H\%-j <un@tڧB6+Yv c~gKZhw;}v?xޗJ<^B#8M[ |ӿ3H&ԠW> 5V&R.] @Fǫd>a;F}(0zb^;(^%9fOEF@K)#G뀏I4B'zNAQٰgyOO}*gTDwz$@(!=Ifnږ==sv9sEjجX~fa){t!Llp53Wfq p y( [[0Ǹ8/}=ǥk;8b< `Q}WFL1zR&U4oiiQYY8}wHrܝ d`u"jV$h+7^Tyq" aV,/F v%Wŋ uk}vqmb=^P"JѻCSѵu#m/'$ 2NW}-Hy۶p< yӱQcKE\IL%Wv } 2ā琔b.eqY[mY,1~˺>.}hV9,8+8;*"N僃D5ŽC3 8k+)O[_Lt'XĜ8'ET%Wp`YKzJJ=㯄 /f%eg>E_|0" 3@BZ?:z7(37J Z6,wOyHBbq};~CTf[V RLe8"A 7x1(r~Z ew4V[#†Q;sSǗϬCG*(~^I4ĕ?%324qE)hPt ¸aF9`w+^V up ߞDqbYMGfO~bVٔ21mJ9\!js K ^r<~1sz)lFQ?!Cu'0>G3ȝgƪPn@^ @o\Q:M{i0<ѧb3F"%lDNpI%vH$?Avʸ҉ pBuj5Y GN_ )TNxÅkw>vB4p4}:6 `eqiµaDk"nj\[#Vl/^OM#k0Ө_7ԆCz[6>#l+['+/^/X]*~*hڲ6=bQe*Á}:7+{+4PӒbh`',uj!vi4^\ڴTs@KTQ%,T64kz@1}e7>T4@2:!'@3j=W[ѱE}Y]¸x^m~2I*~\I3uX j Za/V˂^xO bb?ef ׁFO1WCn>q@/a]R6 澰z/dfc{†)eC T'ЃlO{xE RxhHiӸݯԔWo\INKt`T@ow- :0Q44((޳ϦJȷ&_ ΈEg{I9Q՗ҾwGu@IкIq&R_Yb_"#ټ~dyQ4iAX_q=!,$HP>m -Hk|CʉvM#SgBĨ?&VYϿvg͎`Q-ĆG9j>)3mE]n]QBywAe`4HԂ,@%O`Mќ#w$MW1k rVK N2n._^CD:!d1۩,v>%<{IL9rZ2dU<ڽ(|I*h1gQ&{ (eZ,BBzFbc`rD$h%$AC!'oByis V, 25=CDжHcGnwL.24{iil1D., %Z֗ǣE ֈ%%K?.h> uAnz!hڵ=,vघ4n@DИ|@>5rPQ{.F (Pn0cNC0z_%3H{µ+(/&hMz=YE+EFu$cs\fa?Cey !ՅWhc劤닞횈a=J&V$ j%$V2:4 cңmSأ]rn[L>)p*AT G#ɄqVw$&e2^ W)/ NcNCD% H1ZQPEz8phReh:6-^VM /B0ڌчa=JiZHA*ׂv3'\~DvoiDR"#M8x2N}Ƞ*m9Cj0쮇zObh|f/(Vn/n:4mI't& 2~%uk%&^WZJ}6HoM{C:/5GtVـ-!TbԞ <,K8)jZ4\Gy?f00``pMݯ`c4zu(iUА@iRrגX$Б&dP U*=M%WǙY٫ &!CInJhD"c@ v&WpxySҬRfZkܞ"͝CЎp=J[(]46Z:u^LmZ ?ك/(@^ ;.8"94s!覎&XJ"+ 5Fddb d *ixZ4+Z< /8ٛo^?=S];4J$v=2O杍]&דPG`AdH/\\$sW"H۪e2u³$hTBT8g.(΍%K+7L4F4T[؇W [SϠ.~8Yu(`a W5^Npa]$Ș 5Wr+uNʕ]2{ b5_qarey*SA"a26q)QQJ .!A2Tt+j1o&VXMW`/sĻw*TP\ɾ|J5-}x.hq_-8;phfg 3~?8xß8l [^K*ðآ¯CyILlz߁@=4 [a0K/T}=o̝.҇Ӈ+ͨ1`aFvIb:W.#1W#)D1ZiX%”s#˰t%3IZjc{4? lf7C IS-j0%%cE0X/ٸ'W3;mip5[+BS3YMh-䳴.8dzCT:dѳAtk.l+ S:-Mh=ҍ!H:jQ`iQqZ631%>LS5B3[t@GfjH#Z ЁfWtPg^ pnOhPכ$&gm^.\g(0YƎvh+9'VUqn`~ @:&Zjڨ݂FH ,|@ҍ\= DӯGknEs*F4`@q^= x&;q3@1`qc7\}~3DYR4IP֍)qO} "lo ̑{Yך1WGA0:Ty8ݼ @04ZU;4ATJoZQ<Fg`_V^2FZ6D%R@ΣaIQ2d: MwB2Kˉh[t۱VOy#,0ߟD2x4_Sx6dG?fO"2MdQџ(T̎b9,.Ng6HXT,~5o'|Q(<l S jj臒$G 1 VNj;~"CTx2`Dvtn?԰12ͫ}.)l~sg9eã;[荨dy#p@U1 (q>/?,tNB-9ضs"j&* 00``]S܇Ri[Fumk hOva1 [{J&Q\g;df[>~Sz$er[oPbY;b㇠@<Jp[${o܃.a} ;(}b#Uw .`+qBb W9u*0n(JU2@Nip@LŅ!cO~=ݿs6vi+?{m?P]ubfv)p/h[v>|1]H?/ *BJE=qÌM8[[=OА'(Ҍ|yd>*34 5!Vۮ8َ# C4XΙ|FY;\}u2)(ba;{.#J9 #"7:g ΞPP?yG84~tn ,p6ϵB@A߭0hvکߠ[V,s0 0MxizA u)1/K4YL y?`'&o7*p :-0_Pt#>ƛ޻->g 00Pz0@<"i1K2å2΀Sr*<Kl}95bD`ٲoc`Vm8.Z(JaAYUb#L~&KXpLHrQ|o` 5# 3jKbr_9sekm% :t5No|-AI60g+Ac`Hn: :jZ 'BTk _?<)|n1khP75eBrp= HI^;Vc3oϊ}h:iNoY(0Peaҩ9Cn2݀m64Y 8(dA_x)2oFT`A=NBo:#6aDxS+<⿞WY+b_I|ܠ"WNM R40``T`@1j_*^xI 3ݜM2&My7M~("8Ϗ߱y=?&G?8{_N}eO=Щ!l8# /f'*CFA]zFǃo 2CVq&}eʔ{ѽMQT<,9"z+aե%+1WbkCVy*#d.2oJлaeZTBW;=oӟ;E~KPpȌ>}Fz8 :3AoޢSCذ3X&chܕ}\YQW0&;̥bd((퐙%>]+X{6o1z h"_ONE䖚]%lwBfGd+o leQV5lKŃ1iQzznjֻhndAOuë LQ{?ƾphT8t2y gDi1kedNJIi\GQ'X#x "ddfVWފX6 Z+_kwgDLhCz6q8֪=F Zq̯2o{Di:4K+7.,w˫I1kICzbGf/Zf.81>/(BI'd&xE j6UՊ5(?-,s#kw6@?eݞ2 ӯS 1w}=o̝t*vqE 'jTb8ls=KalYܣz=4%x7!Ptj1tnHp`m:b1%`aV@So=5o'5` X1szѡy`HÆ"l9!Q_ĵT"o=/[7ԤPp֞Xum0d&lqۦԩn10vcYq1ɸGLe~T9,DQ[@?YiS>Gٻ`cɰ:m֕VQ#3vPq;JCKou$]F+bGIgd &H fvoj( 2Zԗ+ևp+^`zUFϯ=mq0iWu˱@иnx tn)ƪC UʌN`IɝZYm׬$b!#h娡 AHMf 2ַn'ա?KK"5Ih zm*C%dVsH'f^YQa9Uqo!nCi_ ZԓUڎ> o.U*/Z7z"yi R^&QC ^>c1iVi5+W<  p9!⭩қV)i͘ ΓM=9{ϔkSQeYIhȏ 'vz&&o3)54sz?o[QQP&:vĜ$5!n[ ׮t2&(%y@MI~~Bg,sVy>CqJPx􇩔^Oj#'D]@IDAT8dQ4kӖFRNӠ PHv$̒17j`r5gQ}txr򙂌B"ԩ-s%l13 8qͯߦٗ :l`XCE֡紏c^tvʵ$KFzzEޠک4!-,ߴKbcc;.-"ʖ9 :S(mWn+$qQ>$R)g~@9a2p@Ԃ)b2\vF4-~ky}9F5^aÓݒ lr=LBQC@#sF*ULGY'r4ӡjIe䦭U={G,mtL"P +&=0߅:HNg~y'EAs'b/n"_b}IGY ZɫY{2ogkT`S=ƊyyM[yNr++9KFZZlbJ6ɾs.q EЦ@?-o_Aq e=C_QD$t>p_ks}zJBx>ځy]0$ LI y: N~`A GA jv$' 4]m9a#n(:?JXjԪ@+l6_ރ82\b>/|Dī{?“вa-33?MlA ?lԖQdk՞60Dj:BlX;cAAk˷йV*p^Պ1Mg!؅)cvh_YjKeFE-N9y€4<"#4Af 5\ޫ45Fˆ={G.rl`ٲ??ʅT'?/)Y1n,,^xxݙrOm 0`- G?I?`zQs<N'FOzdۿKŭ1y2KܙSkêTyj*/07c1Vi]Ȉ(ukTUv! WYΠg˹"tv4&6dm/W^>c219vKV%0F0QQMۡyX:kCm&"Ni꣣NwSrsefvn\]p}P0oYP/~ ,4NAqch6b)(pҌ@ˣ(t !{}^&} Ŝyn%l痴M1r6t dd H> )@njS'ٴK:ep`,zqu2"ʠ*=g>ej]a(ld ުDmaЗk2ʁ]9T0AXpC,L͇NML|;%3/dz03X;s̾-FjW98uWUq9c:{﨓t$BuM_ Pma:3i g hUc}C֞'|: JqN~odQK'(*}Po\p;SAf˗oS׿ފuc 8{ӺOг706J}0;K7>(''_kkU=+XwT[x>6_rݴmꏓZA^5mS{y" xQ5nC 3:-]BTn&`qQJ ðohT4[= /_ze'֢n\z&VIӷS k T04B&.0< |5cV?IFOWCt.a8GA2 ا]8JCb76hb+d$8n:qp+AgX,d&($}$0]Ka;ѶQ[ +IܶfŁV5w]Pr6WJ؉]M|jkmՁv 3ce@$ ؉ W>ZuLe1dPi{6OW:Qӂrߝ8eZ?^t3Ͷ?4]we Dv iLs[lP?h)a`Kd0 `6苲1'ͯ.vZpW `7n{ۼܥ˖l Sh_f|Z &S†l&>RE j6i1[$< pPJ틯;.zڻ+8wpNK70d[ hK1'%3pt'|Ux uf6Oꍾ-)96ѥ\mBYpj~ɐcI?aR:Ξ{I^4>/L7 Žú!q`ٷV}xv'Q0F*e+gD@݂t}sHarfc>Ι;ww*(3 %WQ{1< _ʨBtuuٱv5Ɖ妄 eFl4H(@JU U믥+^luլMό51GpNZRw?W~kݦXדeJs7rqдK*uѷ 3^2sàe^U߲)aò2Q Y0㪑/fdX cx` \|+Ws62 J 2 })% \HJ+J(hCII i7?@*CLBM\FcN`2N[઒QsAL}˕?23.~rƘ !6hIaGz(3O~^FVAP Ǖj|p' P({WhDi*xM+vϔL8h֧d=BHCm0+ V M/`y~BҔjҔCkBF߈@WcGqZlVv* DM9th&FnF+J3q [{*Nլ*^|Vu1hq6JkEyO!.!QIuco`c`b,&o).uQ;胈P4BCAP2JP8qIj՜הbw5~ռc ;8:]ElDtCYۿjہ<&TgK+my Ej<돿v]O yzjt1r@:,;&UlZ ŰP9l(W,+#X,A(:wgH75m[EIHOd~U; JPƒTpN!yEoNy]=gw~_Gjn 3ws̃>h):DUkwp 4M6ZᴮkPp51M:Y}LcCmH9K,ok3<>8@L˟" sy&um(q3OT^JB0ݥ.Ej#&KTRVx,âE>*AC} фQPnd8js_X0裰_yOч72jP=SKo"~8M;1phƃ9J1P\LOrÑD-hMȘ}8^kFbGsgy6/  [9{" \ІFwPM y^0R3rg(ݩB1ZAQ{vc`@LiMZ=JSBWEU=V bg*R2I=c[jLy\\PuQ\LzyOߌtBAW+^=V=-7p;r?%dk9cs $\Ki/dTX>O-k Bh昚 Gv޸x~<8fu9?cZ*NYocG"2_L{y#/0 %+koFs c2Qޭ! 0`o7"-R6 )V-}ڔŬ N&s֌v2\= 6W<1=wz?r29oc5|>w̶1L@P3>& [nVd4ךly蔌FE2 ֋qCAuKWkWsIkc ȼ梉 tUK0'Nkǡ{WT>}'Z3)k/YBHt5~RExKԮ͌ hQl:l';Lsfd 4QI3@@R?kɖT?ȴ S_;{(:Xl:Uz<1ؠ%^x7{@ן-10qʴ~lSW8h5r,ٸW:ٷ}.;Eo&# DHjC3QlpPY50 I%_zV2vz2.?J|'a1׋tEnHI_Fyr~`20AuЮZWy!h~'G^נY_ZXtjIj!X&s>z+4%x˾c596^~7EքǍavF=hc$Ly0ݛ2v?R3,pXgxbGE2S(7x_Eѷ;E`ltomϤ4ݭKFKH iϤ DAWtӟ0Yޫ^@jLĮI/Qf5б%܅+ PNC D0B f-O##UwDV5 YsLOo ?叇__Ea}=C//'ԴW&†^FŃr-ɒ& s}MgV>KRQҌo1BQ-xNIҮ=qTu7bMCzStY?O;a6dݱy}ã{wa c1g'߂:h})c J0cVB:waӴY=胇0渟9v mF<ͨ>ptrAK FCcr3 "j*YD,_#$hRd,ŐNvY.83 C8/XKNщ4zB #wxϺeʔBO+] nqkvc?5 Rw6{g0h5%) D,A!3cȅ}RCO:}-gI1xz7am1/o]<MhiM-Zm|N\}xᐽQtmX F""kT\"[w !0gc`S. pXtW,,>@>M9)9OpE_NdMꆧfTdDF AY+rskgB:-܇ZImԯL' Q>-zJ9x~("C'ĪyMl?l1R: :/2Y#N%P1q+^}<[JZ*X57]"T ErLaƄ0 OhӉӣ-O8lŹ8gM:t?o6Fƾt` 4$nX˔ xUc\:oɱ昗 zv>2Cy޴Gg|L% %l[093 V`40bqFk7o*NI oMiDЩ3^%D*BVAC[1?ڲ>.,3&sǽ>ײzl۲m82h2W1ֳ 31slO ^~y|11B SjK/KnzzyՠDA&dҒo_;Z08:lŵrwoZIb"l}i@P 6Om3OMS̠S 2Jad ܊ 8aj0G@[;NvBuw[=N >D|l h=977{~fK1؇T~+8t9~KB_z|=2rcAcV8ofbjVJp<9yRj8i0T/#~xN-8p *g s[&o#F(ÛrY)=p~a|~bѶi]ڍv,f-(LwO{nHeQDN-ZmTUT^lPS ή^E1P/t>w_{m˾pF2-k/88%=%:EDIGfpo%^IL%Wv)Šxod FpYYPqIk;+{o@,1~.i0(>%).ڣ,?\up2zM%dR lV"o=we.~(@wNQq#Z6%*a2ZW1+cWxRѠvu\$j7ЈUBXaZTvxe.qH<~g\aLŏ7!JE3bň֍k%Ȯ>^xO bRWL!:W\ c'LT)i Qn~ z7aY6jp)l6 ܵaC;w:hH^`VhJX>"f=8@jFᙰs x` \| +7fS†aiӧiݠSSҩ 1dn++%TV\qok7VS2Ydޣ'_ 1CgsWN` o^,k%hJ[|fex†*A(N^&{uh >KeH:Ccөe}Au(jU"ˇvI]OK!Jn\й{K7?2̤i +ɱ780e fab3nx/q5)Y|:$9؊;~h+dx-",.6џnw ~ZpQ:ե0nxwg3mKaD%iyپL81f"^4բ:pq QZ=ۈи)ߟrBh!\Ly-@xsF^ǀtAfI?n7e$e@˱KԶ^df͂BW.@M:@fVfZZJՔqۏJE <+FČ 4%6mO $N-; τnw)ngiԶihV/B2H/<0,WWNܗcTN&`9z&NٜPkv\fkSDhP`xq,dP8t ?}:I!V ٨ZQ9CnTĠ@sF,MʘȆ"}[ ~Cf:u'! #f^0\ R^nv$&UH{fA}|01^W=Dں ڥ?E,_.-sU /B ^oZnz=grqT=jYzrs섖m#R`R2džfVV@?&4⽿4oDv2iPAm>Nmp)x4.&{ NEVpw3Z..z6d丢h#+A]?a+wh1W"c@ C-qPA hpOp# 4IՠSZv xmWt†Y2&~ob_FZճu t5I4ƾT@&D]%(="1GȨYF"\R$ЙČN9|.p Z&& ($BA1l)`Vg@S;gHwTC|N*Yt&$A 5FR\b\$WF\$[fx]4DlPxiX *pD'3 qx hޗmcƂBjM.ӨQTy}$4$АlJƃТAXךEC&Ѐb}.jmN揢W'⪱k;2*Ws.CɀPcqXp"ɔ8dޕV#qA:bvItIΧ ZEѻCs1 jU~[%PZ _*Mf] Z{٬kU̫{ٓyOHoKX#w]:Wh^?BA O݌w 3iBу[F/ЇSJAUj6M-ĨwS{2j5(LPpA:ǙBzE:6Tcָ%F>n Fl{T_G[-Z[K,#|K7fUHB o2u̳IP4+F['햮ɚ&44Wil ^J4m??kw'rS#Y`Izj8Z7+ѱQ@!u_5 o=MԷn!)xT 23gP[Rb(I*„'~gdشyim [\ TN$M=M~Y3Зbo2R5pjk7X?[ˊI;@a tܧ H }{M9!iX3|#<׳]ShNe flZ]x-w0|(9,5߇s]]h5uVх_YZYżiiaPchqN'FO|>mJ'Sbs3O}^{w^~[2pDgNA49ÞAzƄJ-;4ybpWd"jt8zgg8G&$%-V!^? z?N_Ö?[F~9&oqզ–uau_7TT!"^9 )]6 S?-{b7s.P1b?ɍC`5A?u<&Sy/FctGo_?ߛ''xn0Z_5N ss#s;U{;C2u?nr1巑.KwϕBA1hY٤?]РB"LG]K/M_w⼣5~u=`.Z\Y{W̻ۀXVs$hd33ղ}GG(pp"oP-+Ǣ>N̅]W /)*8֗sfh3hVH3o;O\_i4QD_N5Ej(,,vBI gBAs9ph*̻C#Ԣ0T75(!{9pKd/ϐDVcgum)%۾yCmvy4%dKOԡy}1{*= "rڐh:1}HyUbGI)l8ʁc/c992Q6F 5[Zذj@|xQsC 㞛2/&4ZQ-;ȵuJA$9+;V>C/D?Ks e]t.axw`Mnu(*'!<\('}FsL GiEr'䍯T2r(ӫoiëd#$H9.иG[}7}q̏/7#U0_>nRW=tF$lÈ9{Wx=0̻vaWcЩT7:P^bT5ⳙYN0' ta œ){ yrٖ乏Vn .S4,m ģі{0r|ߺ l1M :uttj1܊ ފY)W9--u:J{2"l*7FU^ فYUUK6t#p0CґW+DN/)d>`XkᒡJl: `US.׿/x58 S0sܑa߱X-lP??sDL:wVUhĖL,0ڇ/W 'v&Qq)v,/Oohx}úQQ*]Bx\Ȃܼ>r (Wa؝?H#z+ײe *U+Yރ%_ӹKW.f3ŪUUi=Z5YќA%Va!o'8%l}.g~﷾{1RL~pc>qK #zzs ë)g '=%lx4DF~` pbJ=0YKE#<=<4g- y6tBn4<:pܙēs0!&>ȝ%i陂!Uj۲BP:CF?2Rˁs2\m31 .m^ Wӗ1 #5tԜh#kёjϪa9ZM!FF h ;*A8cuEZ>!V{GAӓנS}Gz-:u$yn@@IDATnWm8nXSΒd88wLdj UԯUMbyMqAhI6!9#A%E`LѺ? `Gɇ|nDo̝t#t{::'G$s&DAc3#Sȥpf8$.kW,ͦ#*fD9fh>#|I/4Yt= BeE Yڽm˟@>:+&Am͘QȖK\$:4'Fm/G#ÑF~uQ[mib(K+A޳pTS}a8jqzRDD6X*x;z 2贄`A^tJswz¦z~~ T|Qff.\Pr!y1tm1'D:꫏r7_Z Pp֞GJew3+u`{ڿ8n۴ N>?8y ,OJ9`(@q|Y6>k!5d\4,!!?,\H* ň__ $:v8:%b@ &W.ugհk p y?iAoi)GGi:uABnWיQwPVbmO~2cEW/AFFe3L'(0P ea0ka3VϹ^i6!uX1d3329"+aMi_Xup*=l4 zoO~Z.FT75[4%*Z* 'M#Sa-裞*;rY<=a@QU\=?,ԐPz0渇T4yLt$)*0\A<[iQL}~2@0ٙsg[.;V^QFyeHU(( 1'D GwhwuۺOvS1AY/gsVxɊqβl^[6)NLo#:;{汗>hu(QU{65c4{l sz{|gK?vӶY>8КAna7_LMϰ PBf|v.ƥ"b պ1eBUe_@@A,wﵹ]/joy՜TH t7fk ^ӗ`{ Q&Gm;9MYnie[?, v> D_=,Rv9Pϖҩ9Cn&yw \%Pzal2A(N?lVmxßU||hDΕ:l(VoۏPAgG{ hs|iOۖ=ܴuwEa9#+$  [c͊WS`uǽΞKpt8)2qƉT{m,(+ ~@A=')te bo9w\r-ɒΗ# ((zc_t 7fufF #޷S &~﫷Goo#` O.)gX1oնR!lS%rK*q@axuo1 -zFYe$`6m}Tv11hb3>>d> Db" v~t>hS"%I={{#|=m:ȣHKMLI3ׁ7b1 A&pKM*KǡPP),Haq;`~QfsJPe o\7BBiհa)G2A!fa犲s}zJrvlC >9/FgHjŘ3~~!e w?a*VfT ҂߼)jWsh}Z/H?]6 wPG<>fY?WG˰4C&(AdDɗ>pddHt: 뀏w!{ED@@+bAuum*+hT'm] ((;{%t If&gxf&mf2޼w}{޽ӎ!t}9HFu9=qNV 8!Fh2BD EFg̘aA~0Mv˷`ŠȰ4/ɀ: \e@#E^Ƞ˴".-x"1A(_& ǕP5p u=[T%12}0˔*l(?0Tk:Nm0j1˓}(}=Gg` D:0pC+: qwrˁ7#sm8t0)#cS!are< (l9珿;nfflY '†[>sWVm.m}$g?nƟOoXX^ 2A^>{4k̷@ 3+;~]6"}2QmMO[~sjݴr>g?p<\P߄B` 5{V2ptcts;|.4ʽOzF :W?vKͣFi\t& Sh󚫀:,y9kB!]sU5v:A)wIP=c 5 uՄ 1&y𔚷zP"ddf2KQzЌ$|u3fB;hn2&`8殱Z3c'.]y{fN YoPtZ WEP׶+ѿϚ6!_C?S=<|G"cI00:kY1@H>ŃfPc[)L p\`:mʙ嬨Vr]4)~dH0!:Vi{9>yi岄xxZD"_1.+j}wM6~(?95lؙz ykڲ䶵;Z0eIIq\(L$i¨ՠ~}6Ү;m{2 Pۿo &?KHg4(p@J:1 f<[2v_joW+ A0p͌H3w[ S:ɳaNMw#\V sVjJ7˹ #0md07bڞm%_i CxK7}.Vn٧'):QU n&{K͖H OJֹhpN{1.i0\tʤc/Qo l0 i8Nްdfl4:tQQ^[!LZ3gʥ{^Me36Fϛ*\8f*ۨ;zX+8oBc+46=j9ϴ ˰ s.H1~z_S /l+RtH`$?ӂhLܸObx䉌b5L[Lմnu5 陸 RN_^GdtީsxV ݳ.Go;}2Ѩj U?}*$xH0rrF,G}տkKmE N 6H=\$m`kg!!! >ۨ?\bc60d x,Af3iLxH1%t>4J}/yͧ8&FD_?9~D쫒cDcg.vQ5Dp7r=ݞIe0/anEh^ ПY@`(L-cU2|FJ"&QY@>_1 SХa`r0 yGwk=TYI2|cN7W-Z}a~oB ܲ/ǎGY -u /˪Ϡ4#d&ݠ A0y-l`_b~ا_s~;,20XZ¾p}&̣:Egp$lY⛭+(6>bFesh!]K_Me>&.w n'.[yC҇}9K!Gm`L`M;J:&rzuYFMX<ꈀUCQ S).0P>#y~ /]-tvΛ' +"p?kxII;S:|pKdD)waG~Ү&'P5?idĂUKFzů6?1 "3I3 bFC;yFI!/޳qnjV>lK1#(ˎ?Ї [vɉ\B%mp#ft 5G6"nσG6v]~0p6f9]G+ڣC"WVT#uRvQDؠ-uEaݿ"CwQ/qZ\B[VP溜,E߰tZ gCk0w&C@yM7)336662b.Cf!c?Iێkk4,,|LxX9'q qq4bPZD߻F 5dd'Sk'V<6iWRΟ8tp0r*8r BFf2bFEg8&i,$%X͘JۤuF6nQ2T%C˄wRRR/8["mPРch5H`7ÑVyyDӫ$rHC̴dX3M$iB-I·YVY_ 9_a!sg9r4WK {s=1[nބg` PX;ͦ~A/ 2d8 %om7uOZX0ug$4>On[ 2{ ?@9W #5"l1ەV4X<)Xھ훷cۇc # M ;$w)$4BSQipYz#k|N,acoEagB>0ɩZr{@(mnBoj>`h;p:к"ٮM1{MkO5 A~ /;2,6 $-A4@0h1q0 Qɕup3R,M)H_7Ga"4FP 5sBrMH6!X}( Fg~F\hGNa8[Ä|`zH˪I9-ǁ.f-svs0K8*o0RxԾ AB;K ~;2,؇"`y&E.u9aL*QvC ~7t t"4"¨ɽ8t B?0 }]xbpI41ЦqMPCGAmC,|"VN2.~uWo;GJ:iڕuBP*P[ԯ9*syK<фJ0&01AƓqd@rҔB 6pgdT^c‘CaCH^>+H mA4bFq?€}Hƾ: m^iD5Ǐ9~L f]\;2n.A^w,,L5WJUvլeՌEUߎM*~cyӟP ~yAh-A"%0*5}h9DgVO=}l_ |'2 d2dOƁ .09x?53+y2I]fƑY ?EOI#(d=-^Y%JèEL ؇yN6 !G囒zGF|Chs͘  Êqˠij9#TX.o6#ѣcY6c],%81]LrǍ}FGLC -H?Mf]#?6{㾨7y7s jk480뱣!g9L!C~oYgX꾤箶oZGjX+Թ+= /4gvyC#QA~ (wh6KNV^T†4WO:n{Hyp6V,YZ po TVBI ,O{l ,'gM}LGjޯ *}XrЇeV! ;E)e>lONx%ؚ9%mOI9^Ŧpe̜{Q_d_Y=~]-**kLiВy>R68;T|jU. *19U!h >oL?u ֌Xa c6 @RYcm ɩAV1& lG5$*1< -¤B>TzFhII.[Oz]GkZ5qBy)Yλ^ӯBԸcϤ/ꙋe0MU7Sƌ&^dr^M&r+T(m}T J_]?o[O<ʹd%M0rAiT\RժmZb^۠ Rŋ[:4AۮM'J-{Q'0rVO"llwL]SZEhnzK>u^s"B^=SU+12G{5,Og3|ŭU-T@K ZLNMG ЧϏ~5jE-+d}Ix> 4ŻMTujWlںجn$gm-0˻ǔ7aΫc]im;tA.QA׽[^ErT|}Ya j  :m״6:v*H8{k$`rסZP|iDc+/[  蚓yq['=`e*4.$Wg a) kTR[}/yc0 &}}罁^ԄXlA?%~W+vppUpUf,`46ㇿ󮦦]K2 #6>ɖ~*ga F a>UĄ*I#]!"3ۤx=/ &3֙aos~;B9l7f2sz㩻 -ΒJJ*EF@#Q1d/ͪ].a:CE4z9^%Rذ<[/>дm=,]èyh`WM{~0z\w(p4/IhOT,Tﻭ%#!p#`Q^~dP翬AK㯼^ߚw $㜌u )ij]9a}gCN WSe_1EE)M<ʗ)m7T^=GG厁C%A/s-<2%d0b}7Vޜ{Wv/&hf#t.sPpR%C޳1k8W:5Sffm,sz05^ V7ٞ)h oU#} m٤53j n DbZhMFhm h $뉃c%B;qn 硫@:f _#m&`lW J>4r?B2,[5%{ 5*Sa~DC wV UQMN.&(J^5M:Լ"q%G0|9e?n؄$hnu9.Q8i6,ٰG]Pr~_aJtŚna ͆VX/i:EFam`\\Lf˗JD]i8LxKhu1Hb|'GUP8׆VSG yG/ڠsB'tZQ>ON-6 $w *VԳ$G4[7e:7F-^` y|A yDvR}tW>i#s#ġ GЧ&`? r%}5NDvwGOH:TsK4sK0I&#nU5$0_pa>GpW3̜,2agI%b3a,B ԽpL:)Թ%$‰zS3#SobClK[}>/Q3x 1p7> <҇}WVVZ`(>?g|/[OWWϝК;u:+.Lq1e?#ϕg›Az'A/>ư(QpW6k kd蝕'(hӉ%-~M0>}Zs\Tty)ky虿 L $@,NfBBzcQV;f[ CU΀ :a loo?5z>Áܱf2Wl=gǯgb Vn `_ٿq2g=i@(wr`ҩ7qQ)Wd8Aafp3a2#>%)ZD^x:c8l~2mzm:- TwAK`eQ &\!Ft@WeqWo[g.Wpk^{ ٳPk+酎\Ԃ6gVU^nda#L%..jq;ȸv}l'M2 ίǾ/t.\^}G2i)!5|f`)@ 6| nHOزz7[W+$do"l-ZF34@h0N4~La!z"#WTPbۄ ea!1"]73K@6kUeJ30م+=[#gEzUM7hv/gTeJ:iZ֛ԧ?.P\=d@!NcVqH͍ޡ>t{WL[$(b'ґ+}J# -L-۴WmuT㱘KVćj: ^K/Ę1x߭5 5nx͓z| x k!!^6#!L=:b' j9 j&}'̲ (<'MM_f [ryHOկrg虋[vQ*\vmsTͤjٰZ> qS6vաI?YAK6A+yvZyZ8Tu3I]?a*4AMOQ5G+սM#=WxqЁҵqʬ9j`(lp~M[lzA}233zYi%le!Xs%0槐ncFx&dA%߿os&''ƢٴM iS_[5ĦldL1>4c"KHcB<ʭܲ_lH8{6{i35Y'&+@ISSޣ1jC.:'7_h(^ۙ:)E̎f<t( w[X}Hgհ[ %KP8 F–`c <+ɋ Ax=GΨ/~Y*Z 4-`Ԯuٝ=.h3\1ձ~MjW ҞujaM~sr~uG`ʓ<WWCP}]U R>u^-VA+uy$@ v~5>mi]8B0ҡ=z cSl٠ڪZ:X_Rukq5QH3L O\׬X$2) sVmSJFAOBs4ܽ'4XaԌ.㫋 WS-\/jԲMڍ4)^:DXɈ,ɔM+ ddfĥ$&^F=wl9 :x̾F~&Qgx)J6Q”Ud- hIpm'A3&!\MR.ߢm 7(LhJ>Ȱ; i\[5SUpz!9턀@L,fZ[Af& }6TȪ ;+ҡw6 :ym<0r^Fc2!5-CM|:0AW-i-˳ܦY]+3,wԩV7vN-% 3R%F~?jDqhn0U7jFiƽZ̲067PH]WVO'ݷA@Z I?mbB*WeU*ܽ.†*lP}6e9G׾vnۃ6|/n'1 >42G \A k`X_Ahms&kn_ө C(]ȼrCc d2 կV,fA]j@ӓ`i߿KsdWf*"Ghswn -OZz`"Yaud֩CBipnY\EZd9k4\p\Ї~wo9+ $γ9!19E7cKUX>4orww9>QՂ`4w0gm5m:5e2Sw̼'m&L ldnBa ffa4 VB4 gԭ֬j.ȤbgaOp-`$ގ@!=Ԭ tVcu,F ]P{^.h.('jCGpy{6<&P+ʿ~EFjӞ:]Uy,0&:z܃ue@ㆨ>HhPkJ@5h{sF} G!1T2dPf{ Fy7 Eq2&#xLDaMcNa? qυ ҟGԚޱd+̫{K`8Xf>{+d<툛fx5S)#M=> }oB p,ߴO1J  Q'T[Z$ +"]QS5LovگgSK 9}n8ϋ-4yd^ժauhn3 *-^MЦu+jtcDӫ2\w)e<" 1܋ fdh"+⎂S^lYu1 }ȱ? a")'Ft$Hө9#ϚQ*e5N}vjfts0/⺡.҇XV S~`ߖoիd;ɗ@{b<Ͻ`-NG>t4(Ly y#/N3iߧ}ViN W/hZ;| p%YqU~}ܣ]g'eO_^5UEkL\-y#}K2`^j h p^Vnt1hN#LBnGpp0MM[Ќ~ IAGj32KuS= ƿzﵟG;tG'yYf\#Q1#i6|z[D:}|{1'P~YVɄMp8i 0h%P`p/D04{6-@ظ= zcZ/umBH:Rr 4O@{lﰾud'gmڣc]ZW:©5 7%E8b;-J;(}:gE ٖd3aR ,)>Q'QQ`Q'NXYn#*>r C3UhN; ''Ġ J'N$~&~_/.F _oUSƌoQ@@ Tt"3ڨos亘4UBX,@{0[0Cp KȳhFj49C_2>m@aT:6[]BIK;yubng9O 0@`@Vd_MpHF2Fr_:2ߋhB3N "dȾ(gkѿZsZ m4W>\|GU7>Rv6_{Q |ҩ߀fݘylܠsVSFn'-zFq!t1~:Y;vަڱj(ʖK&3r3oB#sДs3 $}S,˘d %sobĀ`C yC炃Wbs@´6|DYb`}co;o|s:؜j \ W ĸVGߺ ץ I.hi1NR':2iOg@ |q*_b:R Y_ :[i`RH`!g p.]kYi_~e}>ZgQCk \Iʙ{>ĀLԲG)@ +!<^ ֵG6~wYn0@WѶM{~0z\w(p!hTގi;|mTsh^zcLՠ/Yiۺ'{Q_>Ӏ%&2-w^ī|c3<#]]N]MDӺȩsW#)S˜IrS*BN9gfI1CR!@s$UiT !J!жq-~a,j xxt:=sʲ~lX6Fir؜SY ЮC'w#Mp'{!P6NYQ;JA`Wrw}7yNԁ4Y{ ii[(lXc!.<A|bJCA㉻{[LA#/md_ٿAAzqCq8Z=;z On\&jT ֟=8d?NVA2iפLj Qc594Ij>nͬOE2J‡#RSFUѿb"Nzwh[z!|gglNF?H $L6X1B }H,/V/ݮav`jc"fQB-wU#g[ςV٫w[ቑnL HMzf *W^'խUC<,\푋f܊ ǾZ#5Ť9DQL[KߜGct7CS9uI@ kZZvc\=|3*g>]0ȾhJܼy2eE_JFóx1k' @cek`ҧ]͖/l帀дھ}^F1婻XVm=v)뷘lx}< 8ac̚Vb U *+d yqpcF^ q~Es* 7o橇tXO +jFܪF>+x^LC3c˲m酪`d;,aרq,&~{aӿߘ7X 3W(o~ΞN?e?^|@kXp3jvazZiOj_N>˷ybXo퓵zAZCWa. vhԓ.iSJO?mz92BnJN9Irn-[{hRA`b@ίZ}gYd(p#؅;=s>.m"bj8n$4yqxQwé16jb`oXQ5 ]7 )dyQ)f]$Ha /Uy\MԙKBC]Μ*V rܻ}>X @`Z~E#dj˜y1_ւRk'遁])˥BUJ_HǤke9A{>j+15 GxhH %gG=v !x*v@ȕ[+Db{j9H<2b РK݉6G9h hEjr3\^Hbl?zm7@x^) l=~uz<.1Eժ\^/zM8~F 6Q7:)hh%KۢA \cü؄8 .f4p=44\+ y91 HjLNYAunvJc]~/d|\1곟h3Ɩ 4lDaz^07ڌQCk5PIM5#q:ѵU#>9Ԑ0h@r#qHZtAm?Ea)ƻ##¸B"`w^ ق >(LKL,ELwjonϳWqwѯ+t~7xp0&#kW)[2m$d$O[n6{Wmpܡ@c HownTs;  6O?Ї?X-{ekZc4mLJ( qPI=j"?z0?sF3`|(d6r"92A!> CfN~5G{R mo˷-̟ö9DN4ejJr{c] 3gs1GSd=dX Ud H ~Fÿn ko AۺN}҃s_N&LM7ʕV -CidOIJgn4/\))R'Dq+x- ZA] |`61MhX%a=GjmĹ`W -}?w~$LYSܣqA1LA(= lRZphlke-HunnM<d nƽdKzIA5W $*3bͤtd&D-\CQa,fvnAAC +U2mz >*:&NnQ"Yh AA8I2sx9yh*U*bN'9Nq8`tAA+\M(\_cIŽ3({\w)/ϻ&ظHrVm+߱ Df  \L8 Z8jUCSœU[UEIy[o SlDXЅ˗2{I"ֈ,[*W:K FX7`*峂ЯW;!r0c-Y)(: *+!x%後(,} H>< NjJ-ދHY `~_]Gpt̢॑./냨i ,{ƵsDr2%=F>00kkRwBum:G~_nJֹUś IziE@ aסȪNQ@ 9f) W#ČAZ3;|f?T.''%mVZt[ {kw"ph^oZRs$}H3F%Y߳?w+{"OH yĬ&P?[toPvs;U)__ӚQ+!Yx ؙ :4A&]xO B Ft2zI 5"ô,`0g/Gb95}ڦv!EDpnGyu'q5(h zΧd+,<F&u2F9#!#) 0,WW w Ѽ 3vjw L ) Ѭ~50i]OJN- fbJ^E`Cy"MyL;ӂ<<-xaYs6e.j64 Z{s>l4qzm:/nWb4K?&ܳ\Ca; +c*W5F+D@ -a4%p=G /ߦvk ̝qhF?|.~ZoNE/P"#^.鳴&d"=\O6~ŭʽd(A6` }ҳc{ AM+#-66& +M(Y>$^.yp1O 'WWRd.cm zWr2֍Kh־ŕ[goW-0cF|r+rSO\I N8gs /+y<$\7Ea`25I m̍n%vҎ%t(iimg@ HIAқ+m̌-+D"lpG`scGi&.,ra%-v.lpAW ܝ}P=`?o'&-Ѥ mKÄ LL>+$FӺӏ! {Xjׯss]L'3H2^(ap{x htmhnvZ]o\d$.^B+|Z6PxΗJjPRlt\Ae0ymY-!bi-|qA4'))Ək]yKōCŘ3SwY,P“]k5V, oå4ztaw[4*(ЮPp,6JW&;qsp/8Xy9x\g*љ00I Y< \꼧@tB̙y[ W<8OGM6'j4³ƕ4hk*†lI܉^l[>g|O/^nwy~ELHe3$ "VkF%3~ %=q qF&i'"oj# p`,c<!Tb3J0A:U/1۸v0\ͦ6Y':S-%7&[% ӇwEa />*U˂_ot Z!85*8 }?-zoICgP(|.¿j.#2;φ A4 .JGh7I \D^_ТY!s29 F|D:y3ͦS(O-?o1'+W۶ I^3}c0*(0 4$lpz599.q5xv:VH7 tdN&5NB^hڝpl^襤8rH8Y  eUdG 2A&9hZEyc?=+q:\w6(5`RCM\lt.}00nKB3V3$]qYk\MSSYnQem #70À뿿EV׳|6x. ^ߓ-5g@܀e'N#h{OJ־\lAUw5 grMAm>#5_MfO BGo ,DzM{uHpп X@ҮIl-6;\D7ƍ~S)<A>H~{s'5}j٫"E~jQ:N3Z}N"d$7=j9ϴ ˰P`q҃0 &}7>8^Ea؎iP%u0 {kf{W GyҽVؑ2v!͒ȄԝMػk}2 WB (4E&*Vi/p?h}J3!S/wGgGݯ׌}H^λ9 )܁6 5$/4_FN͇f03 o.bf=ig 1 ͟1/e[a{qum>t4Ll(N"cPFGt4W/yp/EFM씙lqs\BO{99e>u$6&!]|fWTΈ]y<Ą@zzz{|mG hc?[?4WNKZBUF>?[7kŇz +%FrMSD'_Y!CO>k@ӝGDI2Q %񌿱2g/*1a*>_jZE^锋FFs@FFffR%LOjAUu !66GZuk"5 }JsG""7a5{sخ`% (hx굉yG~Yg ߛ# 'm&=WSiرOkc,l2W>5!p0u _ʨBtu–+nw0 &}172x>8q&pE^ysբevfjCx}7O1@F„j-nKKoj3h. 2g2~p"w[}%h Q24XGM!H[|'~DmR~U0(xn ÑiVuE:w&h3Wα[e-GTh#l\8z>dHffF0PWJ,acU_'Br~ ۘ dA&߿os&''rEaat~HAa#WE(63EV֑Iˆ["#JYG.nBji:UfyRl6?2.~A9^pAaf6qL (8R--@}C}ѲhF!Wm[4 d{[ N$B*))h87Ij5lܢdxK 6#< _ii))q)Oܿg-G^A @ws8~;*nc/HKnp Z?5i۱qz +6O!!.U UEk,)7pMCFzzJvy2fyb) a_v5-5>91Cwܡ)TPqUs# {&=64B`s&xL-P`ֲ-q6OCX6s|ma8$U)ȍ8۾y;}8&#AI.?!DQP`ߒ) s yY4A:eqlBȾaۃk;d2.왤&Y'7cm,59AHFaCߦV3Ā0 c(fsx "g&D/ 6LL"pak:{ʪ!X}HaduJFAפ OQ#4" 3 t̥#3Iq@e}R'! Y> ǞY!2d`>L Ǝ/O8aimS8wpjZ l͖BC_lLc68ɐYy vC^\lBa@&{a32 (pc{qLp:)} "F(p3HcjeiD)4p^MҨ0eY8~2np`"}CL 6"lo|կ2mMx<0G,Yaa6F6^ON ufaտkK:6(6dq܃Jdo:zmP}Os. W5Sffm,;9 Sa^}o8yKS!:6A%,mM ha095Mw,C>?ը_@`GO7y>ɰZcR6kU+ۺnH=6[^aw)o S\+GMC'}tOOKp=DŽ羕y-S!x=rr\pHpаVeiRa|S*2"\,ТA uq-lPcrLߥ$թE}}=^Mga68ii}Y33SSN)<`_OE UjvǨUrz#m- @HMP5tMiun{L-ծ+vZMA[ c}UKVr@ F#GKK]†f U+Q́+|)P"pjڂO1^Gίۛ?u 0-AKկQ?*r6+upHs!v,`ǼiWbx4 '#XV4Mg%wsُԽJ8e2S#u+ Ql|-h]NS&NxĬldvr)ݴn5a0ۮ@U*Qc1 |&Рf%s[W6RJ:A Z9>G0KӑUj:rN a#55ȑ?ZӅ߆/#rG*D粷<[/?дm=, YK;^W^ytHfG6=qQ/ߍA9':IrC}*eSb}u0M*nKyxA-闏zɯk2.qN:[jr҅4 5dBʖSG X0qh4xu,_GߎM0qV~5%\v3tQ-h @[R.')WUP*J/Fuyu[.-O&Z-yN_nR;R]V ym}\ZX(D<,>?sVmKXj 11FAC0|#,X[umO4<ڍQ?H-8((Q/4;0vnkb (-Qc5dHwSиAhdq64mk?%XMQͽOܿ1iC>{`w:}z:s&M5MRs.c#@Yp9'Z"LOl؃z3T<0Փ M˪0o/.ro#idFYg{-!#[@!j.']OCp>DYQ6> C B/[5-; wjوF*C=c;pUz%U*,T]]Nщ ذZa_-BNFlX~aŊShJg,1b1,SQ+ ICsޕ_Z\]c0j$` >"pxz as 4]|8Eԟ?g[ؠ_U?/٬Cv>TO@ Ɗsrٻ*_J Bg.WEbuUY@QZWĊAEz%@PC =$47{/&3oΝ;瞹{d bW%h/ Gs>XvLJM;z|VQ{p% xc-cV-Mxdӣvԯը#+wkȣL@4U`z*͕i,܎Qt5R>taJEGr}؂`OQ/+imS?M`yihyaifˣVHfB*S#H5arڛ qxU!uC[Nbtp[OE>놼 ACy.sS6  '$ۧHh.g>vdXűm)&02}1(4A 0S'C0x}\ Ν5}N. 1{#uk]8kky(e &xZٱoy3oIs꾻6ܴ@MīK}i~Iksɸ%hi4UB]K0aVY0 |}'ReaIo:}8 㥫B҇!4450tڒb !(V MAC 7W,[cJFH3 O/sK2>.6V/^YBi4ؿgҥx%l3gjNm4qx6fs"333â3 Gkʕ1 Zİ-ڣ-c Fh2f'XBEU4y&C,b_(Khw\#L:V@JdQ% 9VθàGy,}x=o8wcRF4R.}Vƫ˝o&kJd2ӱe&߈ }<<Ý ԟ\ Q%''H N-r9t ɏXw`t31ݲpkɦ־$h6 Ť|4:oJ&-.)\x-?kB w=̭$3JmWz}/}8R %t C^>>m[h>,WFdW_Ș:QSbquV6)4Ӆ ⋈jbRq7|Q#JśM2:b ?ٯ߸_vn{2҄'}Z:A[[L8 ]p2OQ4JitfgN* 38Tuz2sSě܇9fݜ`3_&a${-#qF2!g.D 6'us˾|Js ֊Dfx i))u.Wr%Ass}Y(!08iZ,O}$fa#^XqvEOCJbxvC3wt7qi_gt’ {#x"WK74)U*V)%O?/_c"c!):;ȸ9mBD%c;hRؠ)Ǿ KzŎC є>{~_h0-Nܘyްh&RIHZ:`ng9/׭[ڳß,X[EfBd>\ס`! {+o.9"}&aM)r::c%p{/S ;4 V0THIZ!=+۱.~h )FZ Zj6|'r4+i~*MY}Ӡq9/̓>_ | hF@1Z)ߜ5RAʈpqy=adO&kO1q+/\I?&\ -PxhH7ċj~0IQ.2(J==|!'<܆5yYKU{"X P"DƈFo=|M{bg&u8]?rFl!@_D6koo{1|QrhѠxsTj%Y{vh"V]nb.0h8-WD68hw"0tc ;H?ib͎2n}mсa9>2:qD{w}%fuEa" d{{ܭe׎J,GF7Mίj/{X}{v&.+aӍU"-`6e9Io<*W#&%RRe\e& #q篊 # vBs6ʕAJ!š`"1t-;¦X L։Q&^4]UNڷ%u&Z7m!\G-*ּ%`4Gv E{bV[e0D?.>yHy&]v)}{7w &!,*Aѫ̇ Ы3ܺx[|KA6f-c$3L@N8s!A%Ad&} P6CM|~r$#xv1ص7BC -@J@ }};`=RX?qd"<'i/Ea!2'La.KK0w!P`)D4*(W V0{k*N$Ȭb%JD21W;C0+ށiުz7ч3 ݣ}ZF1 KBxcK&jʱH"Jb  q$Ȑ\'a,(t=0b/ t즆C= ?Bh .%$~oǴ,۱cFh|# E֍<-RD#/Fœ]ZwՎjY f0!\VVXК&7Bab jYHf+9N P/qzwA:F!xeө9!@T7ZsL=?{i #MvGǩӂN֠ar%"y uaM Kiv=f L jW ]MJSs++Q!IOMJc=GLbRYCHiAb.H h+|RbU`u\P)Gcwʕ+X}!lbOm0ϡA0:ayTETi>4<2.J-iqj1iɏ9ǜA>xsnq |A枓-/lmsI2+ |n@bW1 CxLp9}<6W XA'ԾV YLP-us6e-K&&xpyhDtA-'D^ )8OԇZ~wECM+ʀiV@4z~uʼoͿ a!sҪ+kExOHՊ%^h!Ha[Fb*K2E)p%#AGFġҷVsH&4Q#5$ hV?Dl#*bexd:h##꺈h~a0 5WLԼK=ꞎ yBk|ΪVopI&W+:Oj|&zÒUJAj 0ɷ+j)Pٯ@筕Np:}-WPg/%_y$$q,RKl ;>Z@8ŠC͸ԫ.賱mQ5ɕ#8)C`vD?}ߔb`,3(QBr|t8 ' ܓө -`)hhb>RI`_U?*M+dW<l*T0ֳz?mRXV qBAБQ&IM N)d1I!N9)lufT ]M-Vk k:5~ | $"R2 "U \8`^ aoQ`oS@s!DZ mM&|&ruur#ΩMe.6y2T8 (`m|568x&EwpNmV;Ndʨ] V;@<GnCnSŸqs10i~lY7Ql=IdTheܫ1h>E9gs{nN>>)ׂ l+ /?28σ"̾MN:˾pG|(81ϝ:A^Wޜ8J- θ^^$by?k#,HPN`Y>R 4)!5Ur\%ҼB7g0J΅KA02XUA:"AkظǺsрPrmKNF+𝡆BEUq |e&f@$NFt^f Ԏ#1U ñ,FX36#iq@|q|B7M8/rha|j $ɉ[ik212]3Ri{;T_a46jC!w"}򽸧hUѧ.\^nvm9u-sJ#~eT_`2wFײ}('lk[V9QPYOnɌyBw m;{*xd-Y *}úi䣉s&Y`&A†ծk>f>kcqq,h) nkxdI4Oד5h2օ.sfXҌ7eCi//UϞ*6R֮+:3f9d0b#4qzw8oi3Czy`5Bk͹"Λ=mbnQ6lA?c@ c1bV[mF\b5 u= ԊWup h:u~BG(tYjR2 Lyb/1qwpe ? fZ!4Rxiʗd4@"X2 9QƀdS3d]NTG.dg<ջ3֠ s:~+SR9r LLpʭŹ&͚<mZ۴.g[f\w9†~կ00[+䞸^N2J!7xۻ̻!ULB͒.LBjen.ȽpvRO׾&4kR F˼4axC'/UԲ>MXu:-MhV14GtUiג7N'N}r)ZUYUc@ǀWb@1jgL nʼb{\f«^^fO[0~|my ^?9eh#1WEءe޽LQ|pFYEM뚠( hB:?=N%}5]ذAaNb_brAǀ(V_D?Y2 ,Zu--LYC˔)7ѽ CEdeP ڍ줂=0CaU\ytJ5!iRK_^Pgc1S1)+fTV0un V"K!`ٖLU]v$bcO9PpdtLL2F3DkYܓSļśǦH>%n#;3\3dnaHߏ%^U) ixqypɆȧsPݶ(m~y–Abp\Bd.2ojbZ/3ֲeJWJ#{ \ɉck@_M4lקϨ6-2huެ0I<8[4 X)aޣN5~Ni`r~VγO?VHHA|ޞQ}<]?U #)3L҅["Dr ʼ񗮉o7cm!50<2o Fo# |ޭVh$glB! 0 33Uk=;j;yly8Qcy*+?\dOD$3WA@v\ W=a;kۿs 5Eg鸏;npo!uC[NΥOBqޝ'_Jz+j8vlVޡwOA*;wU1аeډ'IB ԠqH&+@@[52Va t( QG@[sA!Cvd.-Q1{>q)!?/6Nf֪.R"ز7V26Yeh&RQ ѫdvmAu5'=Eq94C箈4#iw2- O-a&x1X!<w*Q 5( yaAG]kB<{)Aģ1Gq7XǾZ㠤Q};$Epά.\S=%Bf@O71f@gr~qzmVO܋"u2'Z-|{6r8] n9j7g? Z5eҝu}8߇!6qX㷤|yO`>0a>y O_4GOj,u; ]db &nN 7Y¸50`$mʬ4!B8qG<&/?{I[ZF 1@p\,Ek#$NL~0g/ 8zB_*BR;ؓ_!:7puNo龜MX}pB2uQ LH`CE[ŢiB @;f>w|醂@=oq2Cȣ2t焑 ]&@!7 F!z=3MO yz/x`g"yf_9UoFe"ynN^)3t\%=8ua ՍY gKJ%8vVNޏ+bo)s d ܰ3]Z7L WK;&6OK2 I(y`vdڲ GO]{/dž-~^!" p ԙ WR}U 43R@sY Glfw&U[ש񀍷Զ1e;LU/\pPN 6\!UDfvUŎ\a75R^9(@BlPsZj /Kג%#&)Z7hfՄ RRSsn9.7SMjnC[g6KK#TbԂb53O^tvDln}H&5JU+UmFFh.c'XBxNFP!BJAimtm)(Zjߵ:%R3`տs aNLhY pq4%d<iּZ\j񽘐GC7`~X{IffqtJޏ} a/E'Kk NnFm2 &' x2T#z& V"o>3Jj.ho[QQ@e {cD?_ !j ot2W))yy_X!7>V\Id6͖) x.5f*q&1Ӥ paƜ_EwݐZV_y)'10mA3j]!T yM;04tCSosh4%4!k -džb%0)/T8aCisu,%O:[y ҳ:4JkEq1fNiZV^e`BU BCB4jV[uj _\QU Q0n0crshOrp~( OA] H]az+?S〓5 ||T-x\79A4mwcLKI!NZBv FaZ SlO=reXt*yC! ]Ky{75&Qtb&ʴ|Qz#fǁfwը2|Z5z{G>3/%&pаDo+ " }W5mg=fj:lh0$Ύ>,ه/uHez<˺f@K2k>+`>Ld~.~4ݵc^6ˠOHj W䝹 2N'jlKͤWNGOQ3AW+(+/>(͢XW0z`dXM jWegRX!J N%B wU`MFIS{:{-ڬYύ8x}d $K0hū"єL֎@T stŒeࠪoCoLUӂҒYk+w<ڴM*Q{+SH$+Sb^);yEBk;N!y)i:U*^z]NH`Г&r&&W`5-i :UbGiOBN{[B;[/<'/z[X ,†_^,N[hiclxiqĐ6,q TfFF|rZ6֜Yͥ= z8^+;˖!-/|+t @x dlACb QJQBaCf~m;RY}N7R/6A҄ؑl^ ZgZug_Q uH3<Z&U5wԖ&D>4c #am$6_FZM?ymR cxor +??[l6yQl: &Yyӑ{T`2Ĵ-YHA N-vBC0k$좚P}p#9`9?1 %PXR-a/=2(_-tA }d j0HKdQ`ʒ2wԯ , 9oZGD`[#C9[}6 øwł@*F傊rCҿ$h($ڔg(I:]~\燖hDQj[$a?'WiPs[x~F \ipL0 i}'|ˊ, ϫO#㍻c Ɔ2Z#M]Z! 44_'i A4eRO(lFY@Ҹiwv*Y SR};Ht.qͷ|9 "PTڲqԡy? mPA|niCasճA[Ǡrlч;k "i7WMxf]Ԣiͥ= ~^j=y8"pYj`tJjBU}4ڰHЎWD vvT2Z2K2 4fJf9J!(* ͧ80ڛ𱡉[cWU+ٱ/CB{<@<T!`~ ɘu>rƣR 7<8bscrҀ!= Hz[X[ ecRӣDM(:a f@ƋN(s7$ťiN0߁ VGȐ?_]p!~vmf?6GJT5#*jk{B&$-&}p\s7]I8 n[@ `<PP`|Bbrle?URKXB ]$W &O24/`2HP3/X9i2Bp~34o[qޒdϺr>dvc8DfR>h Ci^[GǾ>x\?P'm92 >'zN[~FBYhΐY,)0 nLμx\2WIR+pqe' $I_Qc EKZQ/Ly F j^)L(CMmng8~IMr5 @DYcc~ s ʷ\9As0y"Ȋ2%<jw$ ]!,GS2އ<4*DvW]fY&HeW1r~qUh_ϫvzX%),A$.-K#h`VV:<]S&)L0X[g/%`;QAy{@k1aTs,s"MI3fu6}1sʮ<YA f楋?00Tz^XAftp (ݣ줰?.S6DA n~s4n("}\ۯy'%†ۙ8gᏉAhf6Hbf\O;i1+_.D(l(Ӛڣ&i )'qu{̠)Q0돏{+cc (ҟ hjhnģs&Ge) _ \܋/go Thf^GLffh2x0Dp? Zpu+hl Ԃs^` F:*Fr9_Ѧֻ`$@( !9}><hwjY4<3f99y.񇳤mEĜ 2]t=fYeam1:*"xtl8@AN@E 7SSnߵׯ\X~m j=m "чz[-_⥶8jxð'l'))Ii)<h'k`Z~rU@!41;`sg0Z#35d %g rA!=\hvk ɥ}{Bm{~y|tMZԶ8<,|ڭ`J%IВY>=l岭*'ba?Ap^)ď' q vn(lFzșs:}!w >8>!#u5{ŵkT=ǃrBɝ&O^gˆ7G;<]P8ѫ3n30 (YVeoH{A1hB2dEh3\J.OH_ܓ =Me[f|r%̰b2:IGU{-=90k v`+2SmwT`N~ZN-5q,f|b9{-iZ&t8Dj\Ut&:S p)͔?yȸG_fB4Á6`5'c-ZOQ~JP ֤p@1?MZ*;j̓7&?;9;!.~?c#Dl;Za?u(*Fj "˅ l-qs0ɓtʥO#@vD0O^ mp<.DXh:VC~b+lDUY!: zN-B" kN2B|BYD?<|:e|F l ÛL)0^IiL됛H&րߕYś9)l(f3Rܪ?ҩπ.wO*BjBZÖ4@Bq['KUఱN <of2 sS†2RĬӇz5v}P v*F(uư+ x V H 䯱)={sx4S-5~УV92O 0te.764bgj=>^}-'^ C6 !MȥgdɈA4!c E<KͽE(-LB_#%hXkWvexα[1֙Fk>gצuaѻ"b1";;k@M|˙q]` @zfYᙰ}|*6fפ$u0(at`I:})8>ɑ+J(j#FjSRCa/_\߀0&;d4etZOFf)F }"Ǐ̌+߼7s  6hIaCM8ԡ XfgyF[+,c-1=kaDvh6}~m 0ͼ܈Yb[n_WFtG2i:6W{i"+M6Sڣ͝5}9 mpCn;E'Z0WׁÚXN-?S{g*ʼnDHII6E8Y&u7m\ַ|nx, rFFZZRڍ FFE@c  |Zs*JǏwjZA~~*A5MyşxU7joo ,n̴djĊSKc;fFɗN;zhԾx" 8nP# sXb -lÅ {Yʅs?yurSt~Nn8s61ctj1 #h4ʗ- 3jbPVoPc(fNH!VEMΉ͞6Qd a&@MJD0tT|y~u, c S:~%f9-F?v4}#F/~sW*TB5sp(PqA-RpP%dt1`LiI?m6}OIkǑŽ?ٝlYчÛ aVX d,м†V^\qY7a@MYP̞iP<>f9u/NVRN>v}F(pg(Ԫy=q{u]O$*f^>'½78VUcV:1@ Ho-X#X^U€ I 6h6^1Dx̉ KAC1a@1 pc_jbpX Q JEgz8T 8+zSt>ڑ!~ nqWݞpqo8﷍b`VNM(S^TF֍kیrP}U\ L{?<92O?mouo/qw-3nk7y4c0D PC1 ?(M~۩<+ N8d{u Ǎ̈`h^+*QTy^yWч;2JP^5SKo$AWA^>ŏ֢>TjsvgS6:40N6}+16d8xR0ng? klώF&6/:^mo#{LxvV]ݯ0G&nl@DQ/SmR\dҌYM_̜|6T9p㋑Į&|/2:xbTZIAեGI1]A|3 \5^p@{XOw:»‰vo ?<ʱΩ PAa̝ # LwYq%_UyˈM{unDcUd3:{bd2#HW!tt%VEN5\!INH ym1< ֧ވ'q9ڍ+b+CcA]7 (j#bIT GqhSEՇcy{I2EZ`󋔞MUߙq+U=pA_7'Z1x[3CX_i(o맻 ~i\ CL&c]A[WPaeHv;:}8 άazz; y3{f̧niτ2|/sqzyp;Dnc.jr1V(wv0F^9C8yh/ g1>q [c4+,~Ǖ;sW!! B )i1?q BϦ݇Eqb$qOًįv)pR^?85/[F{|0Kedf˨SJu.Qj%1ad}C$3,!0-\a`MR"nz 2`0Zk]j͚sS[V[N(ȕVw422!$?4`?UۢDx)|m:~Ja6qU!Di& ,Vn/]O{vHuH )j߳'){dXw1~̃E՘= & $ >3c7eWY!/)X1ЏIrjnSӽ0Ϝ8}yJVZϘ8MJ :}n܌O2,-&!Wժ$SϤm{Vk`zϝgZp5+a@//j.c숞岐+s*>3}[V&NY9ޤkƒ[![g; AcԆɅ20-Rr3%D8'z6S==8$|ˋAQ)&f_) ϼ ^d)ԪVI/\$ 9ɂf"]d?j+aHyMT+D8b} kU};`Z%(5KjT g˓?EKqcV/Ǩ)jհj57w^+~Y!5]O-{^|ˊ,U'׮&݀ .VU;D="Q = m}}CEpiKI١ORxk LꌉB[7i@`FVORLMπ$# {ӿ>'zwY@b͓nr.}@a4&.Ē|!L6P#dMVXdc!l]DM|tx7v6;s W| :*t|& zb!esc д)L?|?to%ͨ(bF.V}+hE m:bh1chAʋ;4b8El; _{iZ H*y!vړ{zwm#jмB[rޝ'e}|A~%~3ӔmfxO4iSg #9%]>yN<;?XohjSCJߊ-ډ A$hh~\-,1]Ӻ0Jt4j\@ jJ/CaC+p8u\-l`W8+0PJ@IDATjD wN^)ˉ.Pc3P_:ڹK>^rΛE8̃Z!PG!TZ8>3gM6[1{{a.}(-ajT?pd<]!/'ĂD,XGVSr̻q-1)%lW@fzV$qHcuEMh6/o-"|uKBt޿f*dܫU}Ss CǕ=O$ ZOAXTqL|9ѱy]y> ѷhNk{'GSP#AafQ:4#z~#@iJӯSN,e, g) Y2FS͛ (eo6oq%tN09M:c9/WG3?~;ˡPA5u󗅙9DQ„ 3t;Azzu_J Ns])lwl^4~dPn @hMݲ0>2;3(p 3q{շp}^CA@N$:h^x/üX-WO|{.i0qNuԔij(d}BU&N!̽Bd\]<6nPH>tVw+%Fݤ50Wy?K%8p5ډi[G TOGbu/< ѳij27-ɂ;rAjIn#6>%}BI1px@GzAy[m!U*noʹԋta^G_4mf?9w uTRSni7QұtMcj.6C.uڲ\r9_$ȓu^[oݗR5$9ΪVa}9W V mGAѽs^1@QbA_ޡ޻ C7+_`2L2-1H+: w0])j)_Q{9q>fԔWYVS;uK``WnPPLp@[[#EY^{ZԞdO~Y+um-RK8fG{Zcx!iaUSx ~WӨ1htDM0<$|UZEAcL8&_?y}#n[Ex:z >Ɛ0b*E&w{3c?ǯ6DVKKK!>`8 , W=W+hT{^D3\uk.E]YͼvULD(U#lP ꜽ=UuMF[&,:%7;31;jԑnS;My 3):pK-ȘSV.}#&M&G_NSy9, B@  L4*myaimkG|kj>7{Dn.ĭe"X^ej}ՋqxU!uC[N*IpXN>5Ԯ. =$mN ӧ,Ps;>Fjc}d>vdXєN)˹ oMlH@7/#.+#az1>шUapgŝm4_҇'SsrvEZקX'mޣf 8׈usN}\Qj|R`eC>exwB?mI;;p7;%p#8FJ9.]K4m0Ynf_NFi\ϯђt(e> ޯ_[j^Gc Ti7n-U{n9aSP6Hs;ȐjTS&l  O_ n\v5 lξҥh6HSv^ N3Wm~Ά9ٌ4ƈg2E'`?-Pȸ%hh?OGi4 F1;aǺ*r /\_ zGJSu- ՗Os6UQh4GP{[Uj{p>qǪKdbHWJ΀')QLǖ|#2l4` Ojb~,99j' H6$iŇȝ2~B˷-XT1R{wMecڹi׮\Iƛ(4u'ﲾxṂQzGoyq0>Ӕ?(pߍMu("RUr,&˲HE30bBL;zX[L:ƕ; ~;hODlZO7M+6*&1To[鮻~}3HmhFa4F/عq-dI n[4OtJAykvׯ_cof禃@X4#2x9SF)_>Qx]|KCg DFD(]Fm%~#RR!9𒠹y}1,19E}3@9Ut9kUWuGdt 5acٖI9N0 Y@l\8s*{>UzΤU+U@h߾P#B7e"Z;N+_t3-WL[ QvqEc6ԍYmXhq z4npW/ݬ(@ NQnѺ|;^1r h `R.CM<_ Z:Ug~czyθUk?Wa3i+׏=4y-W/^G+t E5۔IS؊ Μ9p|\l,7/'$j0#Q<>!&#UQxt= kЇ7&*BSƹK2$:t"^Ln D'r!):%;ȸqbN2d8-Ǿ KzŎC є> : 02Yɑ 7Y 7%lf?9M+X) .s8ynО}d*-74CޖJpסt` v`LިʛK`*wHIخcStʅ|cSgսi2AZ%l`IX (_ pb}{ThICLjX*40^_!;jM YT8Έ{bDƵĚ⯬dU*ՙ bն( ƘnA_zPmTp}pm>>[1KDo` aQqbh 1[&LU^d1?Dhz!]PC- p2gƵ Hݢ<HL˶hZ=Se˂ Ahvp*q!qBF7 ίj4{X}{vWc 陙&fg>h S1^&5F"MG47D:媶ոkؿ#lO]G9 9^>a);~ygb eV^^ajO''%dff|2m\ΖTQA!CKBqG 法`JR{I>&'ಁ;4@8tc背6VEBNyTdMK$ Aw&[&b6=OJa#.HI*$ʒW{vh'd:hA@ .$̪ tAbx϶WQt:Q -D}D*22$ 2v;2T0VgȧGVջ|o> YO! !8nag$ݬэ*J &+caQBX4+)l1)0ˈ¸"2>9-f6O0v+Q҆'2pD?BҢbԵLh9V&իۤY3_ x*b!+;+##--)ƍ$l흗v!DgefMyş' z\D8?>>e[bfJrbbc1QGEATSeޞoL5LOO~-ә,aC1 ^ GpGc@ 5N[{l+oM EyN /L^uE3{Դnuq%P'LMƾb[18Ybs 1k/d\>Idݎzc!TmR]ѸN5\oICQ`mE{r_y)(.fs߹ަQmws.AK7ʷoVG6"y}dm"?'@Aa0KKI#Mщ)lPpNH Yܪ,h\C, 5v=lKjhYΕD_l-|/{pJ5!7%Lɬq W'cgs<^J iHäqGS-k '+CS&Qeo|>^*1%hNQd3Iom5<Y"O.4&S5z}1ҽXOtj &Sis^ kU걏8~沈x'ԅU"7cřڅD&D]d UԚhd_r}KACi5J̤i_8;اL{7y2 $߇1U~Jy~x][efy(h4>e)^+d(eS<k\/1psS ҅!<&x}Eo-6+4S,7[(p)]ElO/|p(5F,%Dkh[he a (T}uw ޹5V=QtQXP _P00  0s{_lVUA+ x\jEs}D8s*`c w+U18fz:5v4՛Eisen8xdXw1~̃ѤE#>Fc%lp— qSz7%܈ńqO!4nJb<Խ8t37[ʺhƌ#g6*Pɶoi+Jăz7)KK,o cƏ#{}HJK\ւiׂVtl)R6 ns=LlZ4rnKe* G6oIψ8t̎.\IYCiN_-1XMNBW܄A\ef2?mdbIz:<~Vaзg. l`| loٮ|\y:}~ì+GQFUѵu#A ux}͎yaI7,)i/.n0"AKPwK_j84#Q[RbLBrYyMOҳh5ө -`)h(aFui7b@1Ϫ2X7럙:ih7 >eCáޓt",O}JA&2qBQ&IC N)d1I!Nyl8SmU1Za{ʟ58( \JEᯖna^l0 Eo /J m|iOIcIA>aІLxQՂ# -O*(\|wjijŲUoRx73F"!8t\ttfb3`$ҵ;҇WWju]>=ܽTٿӳBz6N۪9^y׹eCk௓)iuz#x^2*ր-ťk428@"+(>`e&Eo ۬&w2pQw":x؏؇اq^#ai'Nl42u4msgMߤi,ۮ}O)UKWc4W0T}s;?K6^(SѪ.ۻ@pٻ-ߺWTE(L%lɳ|/5/a KxyNvlg)ؿ߄=|yQ^|P> :wG"YPU{9 Xw ~= 1LV(<}v诣ktczpeGQBy!uY^GӪ^sTbzJڈ)/gXzelXt qYRXGae71`bĀbB? JK4ݺDέ4?mYи 9s0u^K>W}2 %qL TSE=9 !ej< 53!06̺L C>-ň2_hAnzr;lTqk6}`WFԴ|R}C>$B8z5ǀ倫ZU+r!jRq&Ucj0Di鈩Adqgا##0+iMS$d&t=ə vL 0BU~-=+?Ky~͏u)' av4vo/~,A=xc:Uγ?j>Gk31pi0\vsbG?Ce7f &D6Ί'Np}Le/~O>ffҵMcW^b䄯!dh6mJSo,ZXkoL/zB"?%h8RcԪR$Ps{#2dv9Wd'LsYWف0$)Z\ȴ+8)ht'loۨaɦQTSr^H߹ͨhtZh2*𬭏#ޝdd5훉mBᐉ}uʶGbR  sx(h߼Lޖ&C1/,]qF&/,@_ߨ]L :MW̮2#4qpOj9ヷޘ>ÞMnuѧ 1΃q29*vہUu)NixFp"~bmr f J( ш4~P]?̵~&\ -B[Q5Lm܁#31 FH"Sͭ wZz]ׄs!ZAFeVv}7"^iRWd%z;|쌘|؆Le!ZD]$zTvr8-̻t:1@(:bTӇ2I3k#'.n~J3?E4ukGO;6}ABvs;,rϳ9Nc;z]~2dgDYM--t&A}\QAg-X AYw疢s Zi٩Eq33I{3@Đ:x/,A<ظN P 0vO35|./v̻91"|d*71:G6S.v~D-cNK :xj3ȐCaf?뤐CdrH;lPOY9ˤlA$BOGmHr>GFN*7[ pqM=k!ێ`'|m':6̓">^{5[#4Y>a:LsCqdg}|=x-QjA)h>=:4#ׇ$ oKr$ i KE7^AZϙ绿{v 36Ap5$]E2µo*Ue+NIS(Nmu47sEW86;0 ?^)cb4n.9#1) if}QѾE}([fy4xzxKGvh>gߎz^DKƐӘ^ja[3bH?\sv怦KZh9:l ͫӵh U*2=j| h~wmUhdt߱ eäy^*Ƽ>#fsSbꉫ;5M$v=Ie V))0'$O&k!܅aNE8Bd!a ňg4.#w]M0i۳ힸ2>,GN٥`1ijG?ǑU^A} ʅ4W\]b6N3 4H6q)M?(ןvojt).o0랷/NI~9iݾ'_rsj<AV:L w'LaH3?Ϡ6 BI\ա(;񲆵!&T]ZA@{\:0 *|Y bs5 'wH`BĀ792---61%M3q>sSg^҄_Y yIq Y 9ijfs~ty?k&0ǡ\&@Шv58ҧ3򾄁=&3s,]CjQȃ>q>1aKpX3%\lp8Y}Oe0uy 5mֻ j0%%Km;YE/A}. kW0 {]2o'@[MVWE}мX3j&0btZ*b< ]cӻr_Ύ.ײS66R;R8F INJ<ֲa-6*@ƃJR!s  9#%.$V>H/hWs8:aZdvDEᏰ?˳0WU=m);x⵩ޟ)Dāȱ'XD tDZfԑۿrV8BY#<~LiS} BDӺ)_a 7okg)OXo?D1gޝ)Gs-Ws@W/fKӧq` c#ޞ&7VjDYhN.zo0׏{w.GFA#>.joF;8~]{X 06 9 :`c-" Ќp_E'h)\(Xy/R=/3ϔ-^8fibf ?Ҥm0ܵu#)0. sKAZ0/K^/ wmꖇ+pwG k#ʝ{XObt~ABe)M?yYYhcgNZ\Q25jSKx~A8 Qz,urdIS } <_ L'>\H 2懡pC !v3za,;~<ѿk&!zdžu~ݨZ jíy:'N*M>ƀ C{s  ~!80#2}ޝ>Ojh.# |we jUŘDk7tZm&սaFPÆ É@ZA: M &o SqoѴǹ ˬ1nv?ڬdJ}a+K^sD4Rp9|8.(!#\\-w~#==viN#5Pt)\` W $3¥1)P((Ow1ɸX:(AC}W .n1Z^T R[\L((%B6@'VBfؘ-IkL Z xK BP/3o][Yn>؄BaÑoaJPoa1!МbC(q͈ E?=zhʭ6JD|ߗJa!wBf6J$QE@"Kn;Of %pJH0@QgO)-jYUmVGl}X&쒙{qϐ^ Qa3lbvl\w g&` p9כV-411<b(h0H$n+^9Hf/ɂ+׀|1ktԱO)d|*<8Pvx֞QD6Aؼy:.ߏRPTmRp}{r}zMV;aA `_`0 pѴ`@Sok~_F}.r0ŁؓDf9AwRWmTX3^4n6jb1M^,W%}=4SQ.AAy%;\1>)t,d&( }a#0Kq}7(|eQRJarms-]}7eοZ;|Yzhڮ`FW|:Ātf1)\5j ?bi_?4" |1}r=aVlAq߉Hhkh\gpTjE|Zѫ _Eo C5?d6w6;}VGX;l >McPÙ %zj^2)l(fzP)l0KV̟ 9%6Aqc0gn!!/e9W.{:_J;&6,P])~HB, Gܛۢ]f6o^6LhH<xY@䴴ڽsMJ{E^sޕV`>(!bh9\, ~:7Өeaaqa cF@2Ze )67p_fK,O,Ox?!ĿHKOK=|>ġ=ބ2B \7(ptkE֜@ :< O7#nqdh޶6[_]ƥٹ.aS Iur8ϑ8Bq `Jp]><`oȤ) ss6x&}d/z> \?vkf;C%<'O[=WY>.N68|a7^+afèPM( bgd3sYNK`҇[M5ޠ"Bш@Pg1{HU|e8߮p C%`p~yQsK3d܍~kƀ `\wHL : A*fA}l!㇅ 2b p€?2Fsid I>4Ʌ7DWAqP†:sQ ^7E1{?/Aͣ8ԺB鉠hK3̽P0!`c2EYb( "Q}y6~p0w8]|#AfMi0.zVQP^EwdT]w%8l|gҨMf L\*~|wΡ0Чњk}5~ o.whVrA*J(W0$s& 6T3?@.î>d”oU9‚ˤbx/ H'pz=k?9ԵYNO1zΪjLuPԩ^Y޲†Z,^\oqW=Xr0I~;& 6T@;z{jc}|j!*#zЅHSZp,˼)"z&}x ެC6Z1:v#" 6>=ワ;Od,Ba?1׭֌wVPę'Ҕ:]r'v7c}޸X{Y.p`?cgļĶ}((qnN%o.*{DU[,"Z#ΞKZ47_QX,ÙDjM7c m?\] |{跢u¬Yl)tIh:%XwM3dĸ uy6 ѵs ̣q~H \[b6/Cڎ>nhO۝TX>I2V3oڬP@6gd=19M{uj}&q> b(QemHޢn*gɇS۰:v`ݮk]Z7zˇ?5"&z.ckBx?$֎m>fMxUԩ#hV@88xnt_ fkZWn\G ؼh߼CO ?,X%v<& )1fXh1by+؇nN'L\we; !y+E"[6[5^IDTWb:LcE1`sQFN@IDATiP_[f\q[t_M:kӢ^#aU|+zKtnݤ޹U#M\5S9J{^Y5Ll>d]{I`%^}@9gAmФnu[&_`>AKzi=ExKxC9\/+z~xYFЍz#1)U!׊6MiA t7w_wֶX[pxٻERJxpVMTPV<2kѣC3q/v o|5GTTN…5_LCM'ȣ^1|\xbp˃`Nשfỹ/?nPxvu:Ԍ;C/ZG`kGqtCKOK;u&!_ө?nm&OS[6)K M,]S\1i.hp-8r"^ ] ʅ4]Jh8(Z.HvmnςY⒃us$֧7GA_/P6ac_{ :w.>Z@P20QpofGHV \l#pQ2H(<@WQp\ѷVBոWiyYZ>`گRK/M|IߤPRΜOIRREh4BRB!bb{b@S8s|+˅gOOԫ!X$k!99mLZfɸ8ү|%~χ/tff&\1E(@nUo^~YCA=MA(^mPxr-7>=f@2=>xBkI;f[[+MA8&m V32Mk9?%=w:,wsҤi@6^q (կ r8lMTWuh.j)(3GDYh+.E!=/Й]Д'K 'zG ~Qi%4|Ef7|'M?#^@S@@Q}=y5Ybs\Z%=4,|j^EGs8LٙN Jee |(, ܬ5O `gmk㧯noZla;i!d)1tq V$OX8(fxmg #n%bOƋ<5b3. :@YTݎMنs)pLcbٰaV 8*ҝN',G*Ze3x-hSy`<ٿk;լߩưҤLpH ]@D`5'A0Cbp0 l^ݢ{(;\Lo9]*ƒZرa46F9co@[У}3#ښkMKt?bA)^{dHIKU+32ML{j=[{wBGN}GOjG (-63ζqy+:f M01+9 O >7  x#T&p4Nq2t`s9 }#VfG FO:uotj.ļQ1P6>1I?C`Ǚ檈&r:5AE\ؿ /?2XvuFD ok|`^| pα]͕L(dAV=8}.}GOx{ʱM*֬t^UFr3,foU1hp;Rxo٪cw`.7n7kVNm#>QvHmĵ]Z of/OMWN 'O"&vE˭,y7n6h3aXoubƛfTd,ʄ"#+Yؾ8;s^|?tYƹ˅פ nYe .zsҙҟb>,=A*z~~Ar`hnB1{{wo? T IL\G 4[!GЫ]۰a=Jܿ[k!%87ʯ0ڮLGa ҍ#^Y{ uެ'00e|䕴P)lg%lpa`!AAmB)"$Nٱ9"lq+4vjrX;ĊGfBGVݱ$Za|J(_V`R56wQr1r5rdFM\=?wZT*&^^\޲,zT cw\zG?,B *~Jfz̮u;DۦuĂ[]F$>vF[Il '@Һ5"!/ȁ>^\x@ҊO@fP I[=B(AةВw &ծzQs'3ϡc}qfqx-fpskPQֻb17zj{&iÙdfA4uEFȇo"ȡU`]):(-+X۝ d'#M}k FjSŬ^mݚ]\ٶhy5Yd2,C'V$Al00<„I=tlv Si[~}ich2iwnkq*aK€' 6]R/r m=bӒ,8 }цq*rK rL"!)YLsм0դpWDV̺n~)l=-j Ka`+:ɓS|f">d?_QRyu\RiT↫ډvɬ~&2o:/"ԴdЊa JS8#d-O?%Yq"l yiou K aߔAZV"|Fhȁ]ےp5vXl1pgz,P-qz9wA=>cZ ҋSϡ |l?@ ,{cEDt|P+Nw,¸1"rBM?vc[{҂q-|4 49Ik ֍pL i2vہש*'8wF]PC=JZؕ&5D'[8pah& ƖR3ixZ6PeNƸ8!]7'}=_~y4AfQ^~I |'<4&Sm̰- `jsoO+MuoDߓ{!poҫ@bmJ*ݫTX'puJ@L'A wCqkQO >X ,Ϟq~CF!C)aP͏9m'?ğءZ?z`^~#Fӓ54Y}Xd!Z^ZoWk".M?$:XOThB2NX22qYՑ֨.G>m5i 7@}muԂ^?~a0/J`@թEfV[X 2o`w>!$VU+eD:v\]3]Rp0Dh[hP41iR]g "$3X֓`4S=|(eu]Tešcqc[y]Ftiq( ܃ |hh'胻2 +l'SϞ³_L>6j,vY%hCGѿ#7z\i Emt=7R/ŏ H95-&#{+$\02҄'Mo8Ly1֝ ]ƪ{%숶dhcO_w璤P k"YLwpBh++B0=&\[!x)@з4p1}>3|wUМ~W6ծ&͟.C\xԈGfnLewaл\ڤnuN r)+'-5g[4"9iUPO?Z+ Ej>M㢞՝{#"'p4F!;dR)h5,T5f KPe(l E0g!_M|SR ·j W0YS]/Q{ sh/$m>6nR+vž2rMx$I0uB\ *&ےBVUkGToFu|&~1[Ѝ}} GFs}cRУxY}uNǟxٻv=V*ʁQ>}dZ!턹`Tm)H዆6ḋ?"Uĝ}벢?Q@p&RdJ 4b/ORx73nDpⷥ١畬2X~[NFY}-Lj1>ijbky7M,B`DV#]rntQ\nmZ:?DžsG ZZ̜R]9ԭ]Sr^qQP̂:T7|0'&TɬQU}5>K >ܪY>ׅs35…:[ .р4"+pZdfCA7B M"Qix[G7:+xA4۹ ^N,k`1j'ْ0:6.Іn|o 6+>Vus%oÎCb喽!N=؋iA#w$ nLPdȵtiG47H? {Lwʈ[7f;̪N>Sryx]HGgࡀ]?1AMLD(T:.s7|Hb6Twՙwwם@_ _MGIP9elndGjD!lQmE"XI?Dw'Ԥ0Z*@NBWk(AfO>U)pD߆NMs QYfVL)dzP@Z'{Ux/#nx4֫Hـy42k!4Yd$v~|cYTşXnO~m|nn}e^(9A?&P vVj՜2ѿNO8tl7.;~jɄlstnꭏ2P|jɦxZ$<OR%R(hr1Ll@x7̌MH"ob\:px?^"ySv+RL#(;,kB>0tBؐpBi"rxk/ H."rc $Եkb5}B*=%J>t }Ii:cb7\.^? > ڒ&G_?-j\oWڀ;+%Of"LXX2V2V8Gz!21wHկ[k1SbT艳1]-l x7^A 'asM?-r/@νGLP`^I348_!zthh69h O s.0ta;}h pZ>\䁏 ^~e@0/oK֋: Cߔvç2&D n%r8.edjyX,]ꞻj3Ugu,!$\*>y8ϳ94rXntMӄ~!Dr-Y,OFs5ُg`Ke Ea Ծuzj ym^c 21XR1ĪO &S; TjW(R3^VMSF#BW_ŵA$sG INiwMvek2kЊ0>dlߚɺ/ Lc]})|[_yq ÇA}ه]c Nl#b6ةξumOw윉DhrT9V 8CʵB7 $<NXRX.۵ q9|,:2!OV2uy7,Բ9Q WvzbD%Voz"Zu"óS6ǣ!̓hӢ5ҾOɀqp9b[DYDNr^Z70 ⟮m PҷL*_=83*>y!<aĔymbĀb얉€Eud]F'\LӢqm:-L5C״p&s!ɍ c=vhXq,9t~Njzf,WRk'ƒ=QO>XNy7K%p\(G `h6FO0(1|+B2s^^䔿Z#KF^+6>"2>׌BC8n߹sL A}kfW 6i"f `f6 1D5Rmc$o~x^CzC\4^FӕSYz7yK MҙCӃ`5x /$'4D4G*SiDDlƗ噄dYɱS|S`O~ڴwkX\H-fǐP1`Zu{pfjf_:cn{T:+ ov~gõffx3 l7Up@1&¶)pY #sxtPʁ̤Pr"D!C&0GQ,l`_p`HkU(c|@~Q4}YZo)֞U9{/ݎS9UfV|SP鿁@͍e@2j8T~NM!3L_2.},hZŤW/)ƽzq&l3"]%h/6E?9ۯKT.0̐Ɩ2ٸƫŌy+ )rJ`>OAw7(q2[6@D ^@^ן!Nѝ(om_aߤu~ŬwP{?:u1> d-gWOU(dp4U *_Oٰ`;{E@^ }v`cL46o'&J8mEva&BCގl : <`+*po䰱Q,I{.>vu Vw#w;{с=gXEg ا;aY =b\TS/[ZSA\c60ānn*8`vܯv&ڎ|Z,^Q^MN9vL͆mob1p;ЭxJ!d4AQba^3I1tOz׮;Ʋ!V!1rA{X}ڈ+.v hb7"QmM&S>vVCd"ϡ~{_>Fuq8lQ.%Bظq_4\!ld}cܡu]|sGFGkQ.~{xl\Q+ 013+ )p[W#dƵ&KrDo 27sj07 ٶ_ +b;_!4FLj <=>1ڛD 6#վt7;]Z{uhCYԠBojCޮ _&|)scԼ+!@_uLW~}Ԣu1 |o|pwvFCݴY>)Q>Gw>gSUkRx6Z AngI*h-,SI:6uKpS|^',0{֧ M{-gydTTmaM>wW61ק0}4\"|}6h\U+e6r|G4B f=ǞS?Ϻyutmu/ &ZypPX=Gţ!0P2֞2$/L.r02aȜ/9Coe y!HS@iRW%dec^SPBG6+ pՕ&j}ctH;PSsgU 84yoAKNMd79,M1S)߉ 1KKFrS9fG! oK3A+7h#S0IHJf&f2iɠ5ڹeCdo(v:&srDTE\F la9Q1ƲرݽwTh"OJiSPB)"o)؝sޟy4>L="'xC:dj吟Fmpg1q`9FT2T1$}$^>?*e`Sv{8ͪ 09_+b©$;|N/}S4WH;\svYR؅͡RKaLjBh^IV\^vAhj$ ,Ak\&M7BЛc1h$,sStH&LBZ6!ɼGa09_ըMjhFC&\胝p=`dxYU 4kY:f$J5Z3Z)9a09J69 @ OI&Z#SW\N޳;j*f͜wF9+1G4W\:eӜ:k"vN_'`@w:gp4SA 2-`pq">!6<0YXE$ݖNEDd*Wm/dR#*4ߡ\X{ZC#3t2wPlf&T 9 )lDT1pU"BÜh:U0{6qi`i7]Կ20CrjqYÚ^$ fYAPPF fv1Aߋ뷪kKn޶C6Ģm%pu7L (aC4gA֗vp.yrʖ%V͚|s}zJ4A4=%f!)zù7z.fRqx+A5rRKMǺW/S#R:2"[QB! [pE O|DS5Me`gp8? jCiXص_ޯgv ]ԌPf ľ]eԩ0j8ٴY%SϞ%Q4}֛gU_!C#"FokW:APj09CٷY=&;}0h#*C-W8@S|M֐O_F S7MRuƣ9_g ɷC?s஼'BjǏQ&̕NBRMO]]"ޚ2M1ѥ#Z~[KѧK+qlk~~Htn&ѳS3X΂sQnn\J~VÐ̽@/3 lOC0 8/m2rpo$MeԽܠNH>ʖ)S.{p>~0$S'D7Xa@s $`$QͳRTOO —γ`O~eaM(\sΏL. (\  E1*QX 4iq6GzzP^cxW_K48jXo<}&-(ҧۂvD|k'b cj5g^?hUŊo 4p/31۝ +jBs;g7V J L(0K̈ØkΗG6F'Z4 틊>i,+ &c'pԼkb0M 2@Ξ=qnƵoiLfv>gQt(/-qoӪe&&ӳ7tC1 >BIRpp>fjWڜMN˕9. rVl<^[GII L6)fPcn&L %l(HffWR/qECoy[L(B pG;\1>3l)l0A1 &} Hd,I/ cphߤFM7ZRD aҹo9.޾qٿ2_i=pUbXx`=c,7kV#^Ao QcG)[#Q]}1k Ӑ7 sͤ3~N2]t;4V 6f2X(%p Aaۤ2 ~wxh```PB0`&twVaH8vp~`bĀ70DŽm)5ioFcg`tQmZKǝXoÈnt:t2G>:&N%C1eة/+cA:98mb&?ŢOaҬ6p7:zwϫغIsFZ&E?`/]uNgpX'_%S<̨H>2Pb}(U`V^oSMyA]47-;ʙ`mpGT,g=uSϼV1y׍sgnq05@ &򋁑{qz#L?͚ж~Hӵn[hh`|=w< =YYIt}q:%}r|9#p24P \ϢmkV~PBµ ӧ#TMgVx&c a;?gAryH2D%2 J0P>&Jك VGмyrW%)q~=BU._V{dDV$,C>av )#E4]?cNaͨ)1 \?(t0- N&EfS@, Aㅩ"ժ[oт =-I{mI}#:V97 y*`S"Zj_ȁz[A: ;sgRRsOs[Ԭ^CF `G/> ȂrCwn۹i>tVi Qkλj2 &}9% &€``uAUժ.F1nb$[S:u6dS)z1rlQBӯZK>]jUM߹BqMꢡP 4Cv;8X{zx 끶^^6̚5+`]arPΎ{;S=&U2]x)#'Gp6?NʑG_ `ʦJc8 |քe;Umȉ Vvq\d tތJhU"T|1u8Ugo ?'RCnǏHڎu18vd %]#KP$L(]I>ޠ֩qm>;6وcqzaSͲ9% zuޞ< eT H9ǠaY\|GJl;-LDS3#EAyߩ6M 2ϱ:L@=.2( aQvv#Cf 6x # c(cS--~uSͱ7胈P4BCAP2FA\K\70MҨbfYu3p (ܫukP _vp^&#M>yj &U~~;ˁaN )4U{ec]O xcƜ|kԀVVB0hԴ?|ʔ q$5iJxOElq*nAbu }cL vϺ$ [ #֨/>FtW.} ^sNGN~[*F_JАoM w֍!sqoG7rCH`+?`! :h1u۰+q}72 3I)xg'kmAm!oN~vFEM~#ÜfeYC@IDATP|Urv:_ZeUB͏T6}/X#+A9-0_^)0 ZAWBԻ9,`DN r( sAla0&*AC}N yO,Q tdᢢEwdTq]3v\!4j7uø30p~\;P V`b@1ĺ|70c7m0tϩ3XaK@vX.TЉ?Xqm hnV։-6rt}Hl Yks`îFzhVԨD`=؆3 ;gIy /ܴq`Mj#ԋGHRãxW/]8-6>B nRAͻhypU}髴Ou}aj}B#>\Cf0[bCͣua5飰,>82 \^xZx=c9umw9'*SB3&L F<UPb럽@!$ޛ""X'=lD=)곋 QD; @HϽ$6777&)ΙئKNOEhO_gDMuf]|.XS:~cH0zlk~'ZG͎%03q+yBP>6J#JRy5fZ;I٦DoWxG K1Xk(zOh{mțܶbfϱxpR~,ԇM M܆3W{0Ǣs`0/ 愻wEpΝb6dk0`̏Mvj^1 X/@&PgC obXa̩Q[TK5>3񡏛Ly;Ot~X…< %t :@](@fˢ þPgއh#*`dEfLye/XOxq0l:|<&q~hTL;LIW}//{{D|56lo iH<zWOLr@/ļl_}epfbJ^Fzy"hǵxʒc?eM6슗3SH/y,3fٌ NeS]F&oDQn?Nc*c gӄ]b1M[^8WۿG,nmb;6?_q1936 ׻kJk8OE`3hӴeܧKx Gj|7z˹%K2Z7 {wjN6o(-*}8=p?47l~Թ٦ {9oGϭ>멞*@RsׇQ& /bb_D/pXf:\2ʶ7crwESD'ZppFzWf0o̅@s.2y./kuqmr==Kǟ蔫{ۢe4dd1рrY@1\N\< |a}L)',yz=z,99ě[؍̩t|n薻Qɍ^$zn%`AҕP1bNr%r`v%e݈ wpOKxE=:y,d>Oz'p@IRCU:h|ozZ~,R% jTb>s>Y_r;SQUUTV\x! w#ky5:3Ķ3'&Z'dȜȐJ,7i؀^Ϗf^X,_̈f<55cM H֞y#n|1:Z7]ȍi{SDw,c:G^߉pHp ^"!xvTg`˳Kmr6uOTbc+Mz9խM}̈'@(9dkiygӢp%`G@3lyjMhWCYY=ΡٿYJss7gpK2VOTrdRtO] aɣ)/8xcYG΁53郂L8j,5*knmտLNR+0ow~ ßVqƇ ]eTd:g]CbF2ʀAaVywyfbm`n)u^ebN e'5t:2.qZ_ɣSfl4+ts>oHO76L kԵݓ\˽L{Fao3\b\o:Bܡ+͊V91*cB/C 9vtCVs_vRUAjT@yx?Sl ^vyqa`߾yi0ЗdYYk\u{{bH5m?| DP櫦*D`FTiNC5::{H{ɉRmQުfy7:􎿍Uck=f}b|7)5ͣ=W9i*/ {ggO, Ba}{0%9FYVFRˬ酡~[CHZWm[t.)^Hef|37(ME/&Jc@෵{jե#|K1\ԥBlt8)gQ n] ]˷ёDlDԫCw؅XJ;Xx38.U7W焾HK HP6Pc~iwR:y&`<{KzԦIjk6Ӂcg8(eDd˾c"mqdX䙫(`u@_3RCƬyı {]Kzhx"qiT7(q4θY] #ޞzvFFcnhPP?{$BRp;lL JJJLYJ`0hxygAHPtXy9oHRRRs6* (RTVs/" Hٜq)vMdplAяxT gR$Km"}g/Ёg~Z4v `N{4dtA)k>bu~ػ#6 ώD[M^tGNmќeCӿ[L&M}rHaasnR- {T¸+]Z-hxL~\vĝ ww7ؽ- ^H0>5 +Ow.vߜ֩EF(zcfnra};8"BPRZ =qMze~2!cWMĶQbwmնX"l1OЭ5vNHGÝܹc5twwfAHd"J@ah03Pct\KixHOHXym mS:6s7}\M|<=yhe1F'U]EUB&"W~1Ͼcf2E&pi~,ޒ9a1)۹3qH&5lvgOyeK_Y@uupٸ5k`X!.3>#MԖզHbF43?QV?7'ѱ#gћ6=vqMԥW>>/$ 4k66ml6 =aNuiHf#2.1Q]`|-t76Zy?K- \{Y 48@6C…d+WPkfVM>p`^ğgs#Ҍ̬\y|!$7f}-KrH3$k9:vx䜡eeenܠn[JS #؜v1K. 1{ J;؛lzLa>N`s%wVLb6ܡUVD`BԔKh.O080+ a =4W-mc1OFiڲhoulf/z;NҡI1=C!$MˀrQJfahm8\)H,?*S vX-+w$mA@aՖc0Gx'2JM|d.2ƀ\av5̬udմB|8غxdE;9ubp;;B}&_ wKQ#fԶiz@ 9,J߼Ϸ|ԣUYҔwPQl9 1OW3X>l{3;TWW@ښ !9JU=.Eע+g`21{A 1c#u&E*6 xSӺ>oQ~ϘXV &pg2*g_Ó)dy^L-л>w2g|IЭod:y>6 P{}FV6I-,H=>'՞a ǃ\0.QWaym *еP[+HݱӉb{}] a&Cz*B~<>_WR@58=-S ;fns m%{R4@x;z=Iɧ|)yYTyC.5(6i:vp* Tb\Je)StUU8&.Qsa]䚯J`icv/-ͥ4a{VVM boݸ>%$sx*T  ~ 6q)Xn37tnҸZXfKc-ɹ5 'ԟ\Kxjx{[| 9ҺpHl(z ɹ¥'U*R!P$ ;`4@z?ѯ_O>-\KxsOjTl w?aE=/@z|@ ŧD} /;&q;,:z`O~VٳgP]e7gڊ#ʁ:v ig ξZY0#ؕo0y)WR1ٵG֑Ԫq`,$$#ٳn`|0Xeilp5cҋ_9v5=1UjÓ?`(=ab 6DU8>p-gnRB5nx'nan]E޴7$s<`k(.ޝZ=GT})**^!P#܊EWddDL-$5u6`pKo{7pB̭p8rhԔx 9-Ĩ6oC3<[ n[\blC?/qM^H)$b]/݆aۣUJsO_M!`(j3>dfv>z,@&<_[ii-KD^;\de_V Wѷ8,]1# G}oyﵧX]c,{do<{ֳ c\Ki*esAW#7߆7rq|`|öb?XB@!``쳵_|iىB"9Oro?x=niXd1~tRL 1kjxK (o*Q Z: pfoqW?ߘv0rl+{_u. TNH.VѽFhO@:1%1S{I)yy^d?=p7rWd4'ز|qWB%@)RT ̱XW+٫+juy %{ߑ \TædLĤZ(V׳pUʪkTkPW\C0bk0|G[Wk!&q]պ> !AWy+S]qhC1YNf#a!R`q8V=~okMu톒lT€^?E W@`;F$?zUULL XpNSusXE&1tѰaӳ 577VvHMܮ-|@X GEۮ"W( @Lb4*iUFMFL~p9.Ù l,\.: G$p-1alc9[Ew2o9(&?7ЮC~Ԁ! {uiHD@h<8THnD^0.'4X1NUePT e^`!d<بݪQoHJ6:Ti.ްFԍ: /ó{_/.nD_(?<A!b6vĞ{y e8LH[2tnLB:!:Ϟ &l:B6s!J[۩s;3jdˋcJ\0LxUV2\H8.9q@BerR\{k? 6`k0#wEؽ nd=~@z,䠣o>vB$ C{]>s!_c%r ų- ov*+ (B_ۦy%q$>Qı4f?[pz1g908Q#! ^E8KWh͎Ľ71nZap ONi@6:{g?KoÝ> 9kAMɟcYr--v|PeQ$2RUe/`X3~tMtpZ$OL 5үϾcv J6. UokwKS^ET,_2]-kU2#l&|铟bS[o^).}!b@\2_tUD`BԔ8 oG2X`z3$Ć\od+sˋq%f5@oh/JMˢXxWpC(U_Adžs*Th[EC[5&ᅿ%ٖLgXEf_p-?VGA~)BL*PS)u(_Ce~C'7χ:nL9V'v-&cSi 8NcT `308czf{yZp{yYxCиAhm|~؊-wd9];_hXߎtu-i?Rda$𮃧h Qҍ{J Dh횊AzQ.~2W};AsjLn1OtNs`-A'.t`Mv5lԑldo`7W QX]$,пD QQve-G);9@*ۉ @Ѻb)tT H8lS̿hnaQmv{sYD=4sn!̥};fs%厬ܗ]4v\eqY~B`l T;AWVo?H#u* S?_LBa)s.&/_N').005ҍ=lN9Esb5cA?EZ4f݄7u~ݢBs֐*ZxzoE#F@:AE [$dCG?7^ ~_C}b'vk `݊.IiCbhoI2pi?O$O x鉶2YrCe.}Pd ' @h;qO1`6տuΒM3˒+>  _VlؘTP!L+ƛԫ]-{عn \=p`MU̳N#|woa*8`-[x#\5W s]I6{o^IwPeC^|81/<`.eLwԃqol--&$.3YҝCٰNGegMK='++rF&}N_yq4Tx˘bJ! <pk񇱂]،}Dp`yK'ذ^k:eذ(Ơ(%u(mz9>;#]6T:>0և 5k ղ'mݸ/OxpLcpI8";ˁCRYˑAۈ@-vql&lB"^Gh[S Ǖ){oI\\Ua- $<(%ҹ4^-ѴhXD)?ldO La "ag7~_y6G;Jד/k9"Co3cш1n$8A$\1/[׶+?җtLZ*)Ϫx^UaKbx"as{;W ap% w5\U ޽08hd4 Ƈ]K! ̊+ԣuZ.qkArmKaB|lѲ5=Vx4jJ$A=|mVW\aO~'-}` +->\b|84^Eܾ7 e-Khefr"Z84{ĒT3 o'i\ !q,Ro"Hf"wQn=ߝvIm~T{ę~V.@ LP1X `v3/ߩm6Ϗ?]KE'O{۝DԨ;ع$|pvF'*M6֌|'Bgm79"2 l;*h `lF}>?}\U0X H\㣸Ʃbo⨬3E5[Ҏ9N5UWrf G[4[ۥ pk$II5dgB@!D&D7yZKb6`4j9Œx@|loŁ{ŁH׺|iq%!q͍ܫ~8g#]8>\QLd&rxc Lq(Osq>7Xrs?r ( kKY*^g)J>9.%֥[__?rWjQp9y%%Ljp{ӺO._nB \0qMnoq"a|9NOOkWÃb{Xߙ$N̕) @w7mU@/7)0gۢH!( D1"C@*-2C⼓Vx8AksEu>™0GA,v5>fD_٪<6n9Hb!1ql^y:`6A.`0%*%-ϼm1ՑḖm光 ^~`U_E ]Wկ+/P=@)ZT~Je6vrP?iګ5$Opbֳ**B%.B!LCXv \ө̆kpP0` ,6}&~ߐ^ɚyt:Ʊ}?-eԧ|$!%EpP9Q9`wcӿyⓏ}hfNNN) NslB @ZHwn\Gj` p 8B,pt\ tl 䮆3Saf+u4=ij+ f^|CXz^?__/ jφ]Ybы:(@̅duӘP&,]1g#tz"͒{i+[EDK m&pkpQ;p|Hfv%h_-~sҚ =~&| ~ \S# ։`ËJ_ydWu1d]?cf1 ?l- o/@L *. *E`=m6L@/rD. FX|#Ѱy/0 ;mw&R F$0#I)WĢR9q"vf@Ix{8[+z5AlĹQXP(^3ĴZd;,ʜz<``Nƌd66>1CW_E+ _^y*?*l|2m㢦An5ܽb¸3-eĿlݩk{xrwSG`HɸrCqvo?ʕ cw)(Xj|058> R:LK~֝hڼ_?`=,bwmG,_c!3L _;QTqC2[, 8 ݕgXPC"R}S30ƹA0!;c??!Yp6!|yXTzð1o02yTs2m"?WP̬KT+Jcq'">Of#00Ѿ]LªSu 1>ƪFZVO3k5ӓ$_/ě\~M˟y`0meOK+i߆AO$f2k:%Aja֓ߙ9v<Xڟ Β1|bdDRxGw<>/@M \" 9w Zr녓 @*R(l#őLO;htJ0sdM?;jeFr3J,Ϙ,o{vF9~`~-KeIf#:G_6Z2ZJ;b `6 l3?VTE3.BgG#|/mx( ޔ}-YZ9w`AygyOq&=j:70W%sE _rdz_%*gE  fL~m ' o^8{KVasбE$>K 9؜y>Z6 nG]Ys:ԨVbGIȁj19k"2좶}pZ-N/k4"%XJӔgc&`ia<2ĄK0<[[N@?|"U1:rXz3 fA1-t>-F=>,Xf4€)r!d>ҸP@Z(o{|%c"Vb̙_Y |yDJ*p rq9_Gro; *4ܬ)pzE @se֍Gz^%ِy?)NI};zKƂـc@6B~Y}y1݈%!Mm޿opO"zD8lkCT.Yi`6¨Pv~ggz=Ԣ uNEx̂TLH&a C,օ ʄ20a&j޷"U g+{"[,,fo~B=+gu ǂi6x}y]Dy凿A`k,ְ@g ZyeH帐gLrA*k|%S`Z9MF>rr .R2ƏOyp! \yX&6De;D5%}@PB`0hޠ￷Iw0zMiNP=1l' ,=˕5 u(+A21e͍fu&kj  jH,q8:rEsˁ*`k ł&D0S^"l}J`~0;2թ״*Ee}픺PEFŁg`=&cEG|*s|`=kY?{tZx.ܗ0gH&Cޓ8ymy16pԅB@!P0be#?;",$G87rEQs>>ʕxnZ" '?`60 ztR2\;(X| AtZ9$aժ4+}R&hGA(rax'At3TZ-4+\PU*mkثJ.c/o!O,,o}aePcpeQ(WE5e ΈL>p@2Yƛ'|5]+PTy' k>eF}tc0iCdRfy? <+׍hDxEH.\ǨHuF@k>ƇK?6DG>G~ ZT3l` J7dg\zozm41#jJhDǴc{޺4=:lPkB@!P(B@ӡzlM`8pp>+~y9,ag. e+FF-hٿ{_ZG100&06Bl8*SB@!P( %ـ*MA"ܙd##jW8GY*V р{[x{8^3\ٶz6D}s%!%| crT( B@!`ڨF'e`TӞMڵXîEX @/=l:\gN99:"#`Kq,K\`2u d6l0( B@!P(*T®66!ծ,敿|ﴪy l?o;E39YWRO;oI&XP8xHfx*]t&%٨0*B@!P(Jm,Bd0|'yqx#|kQR  $G0`, 7FK!vy;rPF9@S( B@!pq6۩\#d =~-_੒)#E.TB␪rG)030$HTRũ B@!P$XQ`. /_ 9-vd0\TQ y!Q[!%p*WB@!P(X5Xbjl!f6IfgT̄l@J~FwyU9+ B@!puz lBw*62nqQ̆58F&iJd4&D0S^R6+FES* B@!`)w5է$6]я `GZ`ɡ<נRPL"lP( B"0!j@}Fl\TRX3OqƳ;OpH&eJP( B@!L,i yɉ|}n&(̤i6UEׯ0K[,'-_{*W@1  B@!P(CQ S߾^_D"xY-KT -ɩLGs6;+äsx RWQv B@!P(j  A˻am55_o/S)rm#E{-=VXǦOnU('p5B@!P(0^F?>~o6;'a3cH=7š:y,Tﴮbԫ_3n4/G9Pr^Q( B@!P\z?ш֦nucF7OU_{'=˷;c̤ B*z]!P( Bx4jJ:+fq㇆4 GֆV`Ք]o=pLO4U)Qĕd B@!P(W̗8lPlY^1#*FZ5AH O=ǀyeSCjB@!P( +␿ gh BߐG?;d]L`"f"f6G4H߳i&n4wӹa] B@!P(!^@͇_:opp"el;p?V>ndeP369fvBO\;F [1Q'[<Ȩ8EPP( B5e 4#frCհ{`6==#|tPdWN.\,\Vl9@W3 ҃&ie1u ukۤH&gVx<˷\Mxrrs)^"뼵hסS\ eM^G]4\; #Wn =-=ۃᙪ0iz/P( B@!PZT~%BJ6GHP-?3(5;i=4nԱe$\ɠl^H1Mױ]Z7/@A!ln{6=BH}ЩsIy+Xfؠ.򥌹PT7+B{"9;&Ojp1tMo8H˞GeMb'Ln?6B@!P(QJ~8u#v<&z󱻄Tt\?75 yL/H'$r şH[XEӃ6:L EZ\TO8O!AԶi dH'w 7i̤vƜMJq.v/ G֡+fAzޛ\7qfE eO4[^OHX곟7`YӰ~0My#khC4?g'ǃҜCjgItMǸ)r-[Iӧp|Y%V~#.8]+v* B@!P*T /h&xѝKm^}HJHdvS7eo,QePRfhz:gB "l,c UzQ-)n-آ!xQVhm}Ix]^ױeCٯI,o3^((Ow#3EfK^Y94zxjQ>N2JMˤwо4NT?P֍i[SHi m1Q4_kxt? 9>^ đ0'Fj-bvi'ruV8MVےn廌S8 B@!`Q);RYe [tǛR 1rPãiBaEZF=}wR؜v`. o.RYP[n}a?kM,s`W h$0#"ߋ;qCCUB3$}"ÂEqY6LHR z7vP챳&$9"o/.#K^;Qs4Q2rL8_`6,dž{M)hTLoߌӏm>?̚~5mB@!P(C@J1 Lt\ 9a; D`Aݠ$Pe;h)Y6$Kf $9>12ŭFa%2s& wҟkw1ipE^I54Իahmh'!PْP<-\]Ljak6 [#''?{3_e+̩Qٛr[)fjB@!P(ʈrA)e̢A~b~ߑxJy}R* gǁ' Ò&ϲ$BJcz6Yi`؍J#|PoA tP9t՞9$yz\P#azdRڧ#}r:vȋjtqQP¬Q ~Tb%N 33c&;K1+?XU"B@!P(j !8l %,6ZRKp#5 'TdI+c"  /OwZRem0CNNV"wHO!H8vĞa}; cjxc/VHpnf".N0Dw {\$ ,oZy V$i;`nBu${ԶEGΣle-04>ވ/G9翭4c4%?6B@!P(5 QS3b&/N-qy#W*Cz'.O>%% ~#ҵs*`c:&KB@!P(.EעppUծ±8+]=^1*R( B#鮺\ӑWB%"Q8|Uϲ!w4>%zwLx]Hc8f }p۽mGe8g\ B@!P(Bi?ulM ~R'TF֍ g4 Aqfح { :Dn&q E_0t`J*Aۊp*B@!P(GiSа>~P0K6$MJ9p3IT;nwu߭C ,iQЉ#h[A6:̒^Ԧi}5`u)/;hHw &]?.Do>vx_hp.F][7oRQW3m-E ~Ask"FC60#`~^N8 Eq|8(y+G",lKz÷3riv~^vr@!qfwBؽ- VF7.iQƨ B@!P(g/<`&x2=ȡPS^TC rPjZ&aő,XK<Ȍܪ/5 Kp4U|{q>;'Rph6΄ocuft{.bFAvnAF P!nzڦ`*ăթsWо#GvE[A xypmPIڸ3ZuE@?+'&FpKSFԍ7 ~f,ш~i_[%v]R *Yt r~]93:jkULP( @3̜ˤȒZBBa]St#c?Pjըh;IYETt!9m'R0ujQ9Գ?Ew4Ԣ:fTdulgq':4QT@GЎ4O;w-h!"pjӤ=Č9vn/̬\y| \c=)b*%D ѦGiBîXç%48pԯ[#s4;IH1dt:w5CM3qH i\55%F,usljB@!P8̬K)ief6)=3fUYjh0nZ.򮽤H%TL@]ȚRa#?_VJtw| $\\ɠiE[8e?`RnsW էmް^{kB3suyI+8(@H-~IfV"#NxgDq%]dg牨 rr΅b6kѨ,i=?yh46cQʥ승FRT(8bevrF% $<`vuBw훲={j, TJKmO&t`x鉶v_'fFw>TJ"؞\dcTIzz5v:% x&w޳Vr3Zـ΁]_+o7=;#r9V9La6鳧Q~2e$t[ ?R( QSƙuI~Ќ7qOfOL8.*S2ihފ,bHW,O݃UMs{aW7w&Olx ώ|/===<,[Qoe-_uxI5(n4yxErYS^ @:Ob5Hp٣kn螊003'Ҡ ]&w(͞}$FRdXxJAjɆ{/8$_Nlt]\\t;3Bnv!"(44Z6u; }kI0ȶBucKlBSHgM摡,E0&Oi ͒ԑR58. [eʠPD[-y bNy)̔|њL4G<|S#b3yk#D0:Y{{bR>YhBf=gޙ /n M/A:xjT?OLz!穮=n\ڶf94X0dskwUߟ=%=@l`׆^U+ PԜxkܿ(`iқBIB %=d=''1Ng>|g̨̳q,JZ+8op8#hRA;76Z#([|0&¶y潭 ?q^~@L++I3&}Kit!D򓸄kpkR0p''EX|oߋ8 m]H[HxBzk,?שHfiĜ7m+4[LP^9_o/',Vi<;)OdB Y\N3YqXM}gۣ_jG'q#4)1 e_nLh.zq}8YJN:?ܦw@X+V<++5fH"nWF{hx_Ri64KъG:KwJ{vS&hS)B1ş!/Mi8S{ޥ.IH3Rz '.& qN%i'(lmVϡ tG POUQZbT-ٱ eѵSu߆<_%"t%$A ҈RSd] L=б8]kbi"`L@3ҀalTʈ:e2Evd|hB+2U5$(h˓p`4*:2s4CN^U5ap?']0#.<—ƗYu:/ߏ$lhN1^9g͆W0֏J,p0zGW f9诗2Y-W$Jm{'H@Ț3poU5 ?~H*[!KBqiIR m~ Gʖ5<Q+ M %/8֋y?(WJ9P^Jj$h6$i奀QxG1o+: ԡ 0&@% 肆.lfz嗦[o *T9[!@㛚._avv)taz7j%&dR?WZKQ˲f?d#n)܎V͒<N(숄7m Vyٵty졞2MPFo}ZGa/,ӫu中kE`"f^`h!ަǽ \m#{$'ןs!-\ދ -&=ڣW^Yc:!| 6Bs& 3P&d\}ݪa>Pw侊qHAƖ}IRn׭ =.;AV$x#IvW` &K l1*?'zf9gn]"Xb4&3Ԩ"8Lû<& YJA??xqf>"6¯#?Hc. t*K¡Fd~JuvU>V oEL7/pfHy*XlS[pmi:%O,IkFϫLZF%sbVVgdL 0B&44$b]_dvuTGLHqd4Ɯƞz(+h8mj\WP $Va ~(T<_?a" Rcm`/3ĉ 4!^gp҈T%lM pΫ4IQ8M榞 Ύ;{} (jG}F-B6>,O;%Gxk`UJVk x +ͱ6W5 yX>goI:2'Li7Ә`L5A4وt[w+6,i"1ԍb?ΡqqPD.h HR5 44Mp"ePZ/""@FQ9$}Sܜ 0&@"OF髶.@i%IAZ +"^ 92( $H8Z 6ȌN*hw:؉' 8n[2%̸Y{ Ggko(*uL`L 0F&F}8M> j54_4>4v$XfC$|Z ]PAkPbT uS+6&bY\Vj]#v6Y_3&`ABRQua&F͆Q 9Ѕ:fCj5$ti)^AJIa;>`aRCY~&$k`L 0&P_`t[q_6.d'$pa4of]h2(Oł†?UzM&sTSTuL26~s_`L 4hA_e F'SPyh]AG0xI[FkYrK*ދ'ܔh" C[H{ӝhmi3 E9+`L 0&аQ&F.訟a>6g7HPY(iQTP\l&8r`L 0&@EhE†&p}zt*xJR{,VtU&i,|bRP#c^Qu^o5|k8 && /Iz51L 0&@}!P|H{E(ă&hqrpAA\IUrM)S֯{@LEդ>/lP/(6vcه!|4x9zI&: ;L 0&+jԈ;:u.m~:IWui#x]S=G݇%ɔp8T.i/8!a/Bd(ZV}0q1Rq`L 0&̛ޢ.c.0NsA4NxclڶE7oE8tzFgtN'Yq\ꌰAiHyT+ب*H6iHoI4=iDG{&ǻ\_uYCb}eL 0&i)ˈq1V>u@JBS|`e_(αMlxuJ0yt̔뜊Nu 5o6 li#DG!: 0&W$ c/:,Io!hH SzJ=#C-˽,BW//T=~kL 0&@Dի_޷uDXlfZ$4>TZk\r>Y+pw:؉kW =.`L 0Gn}{$ 4@V4y=p]߁re!&KsJU)oӘ`L 0&|([ZJs?| Q;{ UhiIذ"һYT k'!\`L 0& El=Lh0GNzI7V/mQ4.QO_$`L 0&Ѐ H*VDt %Hh6ڰѨ@>(r;&MrO֮ bSy;|Rqn܈е&pX5Ґ`L 0:L2C 0Y,CUl& )Vl'sl]ڵOuhHCyO~P=;j{&yqaTozBpȲhfZ;Ǘ}bǡh_h&xBv:žS.m˔ Ac(viлPHWH]=`L 0&@"@B4alh8b]ۮ}ybޒM_"HY+^ۣ ԆWol< veh$<}A,g/7$tYiײ𭢢~jwu.ڻo<ݖurL 0&`G}D2D<(d=G=-55=cK^|t  G2;v? peޅx[Ś>L53&`L \vz%E]Q;edBHPv4 9%I$h<wۏcaVrL 0&)4``j͢T/l$XjH s%=;bzCS蜾7f᨞cth"ڷjf#4_JI.H@xp4Ʋi3`ЃquC! 2#ݚ5YŰ.C  Otլ#^} =`L 0&@'@I}Bk2_IŒ !PdiQ߯̔Z 9b-JKa K:̠BS@S/#)Z0,b1ϜĒs8$ rχa Y|+N %?IDATC/GϹ{dDZ Zj ԗX7=+X7i02a*仲d.m|}rMwVK~TୋR3:{QR({\1S$TThlcL 0&>Lv>A!}npWf7a$o0Cu^M=5V\'=&h\Ua*%ģwޠi6R.m՜{-V| !✶9ZlDv@[jw CxVkCVm+ZI[L$(t2T-t_7jsC߶C+!ĠzhQ?h'̭]ΐph.x5Q6 Vn/~}?õ6O7K{NM~`w3;6 yWG>N0j|𧻓jWѱpA&`L 0_!0;2'.fyqs "G3ݾ=^ړpY{!d宱0>ЖEa OiK麏Fi^@a)P: 0yX*+Sy T(o.&cIKBW˲S+KaK,;a=p^4. ߭$P<}@=[/Jym+X͓?_$v?S*hY-g#dcH[J\ culTcL 0&PTV4]po%l"_yhE(2kڴhMqNy+|`*@BAy M FXFRtN}}8HKCKCE4 ,lԅQ62&`L*C*MN^ӂ ZM4|K~^ ,͇![}jIX(A'L 0&`9۞r6֟}MņJJL 0&`L \ٸ$!L 0&`LPhm6FLΩ8ǭ+15 #!0u**eb,upBӉz?`L 0_!e(.6_c,q;˪Ozi rnټT+GjpWփ.U^x[~^c6"c& TYaEHEt۾&TNYں?~ #T:92:*̍S 0&j*Whffq\ m&ر:kqVmmMÂU>.ߪԵtbSإǷ˶hy͒вDc8{+Դj媧 0B2nү`fOx5.ڎg4sާʽBRQ`L ,0r7.˺jsIN٫s_)*zu.][sW=GR|nUAB.[S*hPkڇӗ9a#!vP/uޱg~V/yL 0&(CvnݸG7PЮ_>]wkLoƫ:kxf~{n֮i;;7_Z'4vlGPzv (oo6q7$qq3vo%wвs⋅ymwIt.;vr=l}N~?NʾR,$w)ӇEv<ѾE3]Ճ`y}+iP=)yn^7׉a4&`L :ЪDxeV̧#w&_.\/BfNʥFx^F !ն=ڛ6|ۮ]ڵ'XxnwnŒ m?V1;E0բBYd6B cp>I>=̵F{ AyqGLf'>-lD[#>p}x^ԩq8ևp`L 032s]D~erkĵD+U*xou٫XggL 0&9Z SzSw8Hp{^T<3Llh*hmNC[ dv'0H@GS>4-e'L DMK+yF&Wjq]+׋7WF> \V"M!@Ž"6҄mhjZ'.9^̆TSz$i'5 *s*QRKŒJ%?zwSk_T] QQҰK2ׄS]xJ^34kDL 0&El=$lhZ9&lľcT\jLH66ًpMh+(RY!Bi*^xXT;'EnhD69OxL(}f.nrx7~:~ JhѬH0ާhu>,*[^HHQd˦ ^*JG,"@\2^TssҲ bջvsS?&N>/hYrئUץi/kw2=ھ} s0cu^QP(Lh9ck.|)Wd)lX3}Fi?Ȕ5;Q?L60  2")* {]>dw߻=3=+G,ݸG\i+L 0&@'O&P^Hzx~EE4 _E A5N['MڪS@qBͻYKY^į~HL|mȔi@c,L:≻o9V&#׉ӊTQeX!s$YD\>%? wkwVcE5HFR#P w5{.Zv$=维Ne=P Xf˒|P`W;P ^`U0&4!so 5EuR1#*SOrH3 nt) ;S*UR`!b܈{4|U5 #rM~"g#h w=\Y%`l`afQXV$ )PrEm#!C47O>!7v{h|S΍kgCFл@;( z7>qmL 0&h~_b)E7$HP}p&''g4i\͗<}.Cݥ3>:N4Zݲ/I:v`׫H^V0AB ^jNe#&`L 0O :,I&7i C'M$rP Ɯo+I|컯@HРHSOFlЮq˪IX({\knU6s`L 0R 44!M[t۰;)BMRSm>E'hy]Ǖ[̬WnϿ$dQATdFU͛g'{޿nR_>e;ې *1Y*Z6dv iy3R'^l,L 0& FE3VPco;tӝwv:AX}MKV+ *|@áҎa- :~`^M0 .1uסBҜ)ok]6rY" UQo=TCxzaYlcSH~$č; %!mK*[GvgH˕֡ `L *tTootC: #ݓrK]u?(lG:$ ;傂 ذq[$T_LZ :'ҀEx;FX hi>Hq' #IoA#DʽB-iĸI({BV'LAxQ2I'k>M&nV3&7|GF9oF/6V*p]k*4ߧ@DEB^|1H 4$TS$`FCӠ{ՠ.adtЌ܀@Fܨ W_|#.(#̒yI kviOXh")̘9!ѶieQxmk9 0&@k>S:۟hrjtk]'G"AE  @!i#7HG>hu. IȩSl3mX,l`}+.9ձ32M(%bHO#aos!gE=YPi$OͤB1q"H$8^:H[܇0ک磼X`S^ig%KRMbXt_$E i8US3>2e-xWЪ|D+FLFQӑ&&URύy>oZv{H/8;WRD&WTfhJqd8 E~ۡ Nb]z9UC`dVh|v7S /g6wdNPUL16n#JAc}>wd5/de܀{Ct}G)7 cEwAI&*H^f6>cQot:662xrݶ90&'3y(:RLH+ K&4  ƎI`4u=R:3(Y'3[[5v, (5$r`%_e% M!!Pdiz:ů~"T5~TLc1MK&[[t߭Yѹ&A#| *POt-~G+"Ob[H~N3QcK􋖂gj1wy4ɓ<G٧\Te "d$Lߥ2tI-u~yFB+g/"G,NqI24YA"oIRMhS3U2F H1'}SHR૚ uZTc!U:Gz}pa2(/$L'1l ķsc?.N s^PپO?/߅8/пn1h{ϊckko~@2Q/72&}I( 5q' Oi=:e58a!C+'i%N΍& }/b(!wz!%Ba {.R@TO$4bΛZ-v&e/͜//?d+dJI8y(xSuYqXM}gۣA`1$H-e0b`/Or a`]x?b32(/[ G"jE?`L +([l}"/jKr;~ӗp' k糢#@HƐ"i.h,NGa}| '7$R|R؀8+$ۢ_υS>W{ͪLoP1H{̘_ݗk{t@Q.yI4at̔렝q`_=Hg4 -uO-\˽Yq1IcՆ>ȣ h)]РrV@@M]׃&%99d (?' e#U7U賦p8?kC zV>{8O=ji# 2BDit[dFA2U4(S*R(]s`L 0C>@ELk5[jR$&pAGq]7"pе>t^J.>#HIAQc>0VQ=}*uÔ}E2QiPT|KHB09HRTZ@"&lonI'ІUyN,j:琝_PLI7ТQ5%LўBe Osi I$8HSBi#{IAT>(\ d?~0^:/9U@C\`u@b줍-Om/diiRJڌ tIz:q4kSF OĿ퐒'ߙwOyGS2/t)Nj* C}(ƐUDi k$ULHK8(v</p(ڗw= ^۷~?ONDr. 3bn_ѪY1shT^:cO!yq=Ԙ0wif߈HاQ-MV5;}0T%+/F'05㲿վbHG$ߏ)Z8CL 0:@B0`5 y{IӦqv8C7vL&Ѫrja\?^9|!&IViXqr _ck9\y!mXdrd̔ژ;H 8(e+98&h:ԉX2\Ӥũi"߇v/v -8aBrV5[ R݁%xGL.E<]@NS9)bJ,>"6!?Hc. t*K¡{UTl`2bͺƥFCxgLtNkp[af&Rs y,]^uC0M\mBwQUӡWugP?vI=x$n$k Ac/hcG'Ch*9_(gN9y %%G>#XXȺϑsN¨i&-˰D܏Z6P"m(LNP^r6zq`%jߖǪjоݦ0kyCp>3iqMU5 }0K|#r:`L 0&|&.)LACsŪv91 zX-J2FOf4*/ߘ_Fy+N&3-/-KEy}FӽR̈́M *+UY=2*=>/2>{:_3&`L%VYIO`:r9`ZTnڻ*5M ӳ{`L 4T5z\[` Cx^[ϬR0}?L 0&`>C' LϪB|f.EC$Af9gl0y\›Ϫuaӿ&I`L 4,iF7 N\6kX&nphN&{V 0&`L TL'5fXՋL9rw+C ϙ7xby*;W~`L 0&<Ia֞6csO*RجgCK%9;`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&P=>{WF,/^ܮǐ̒i؉K&`L 0&pI{MW1Vv\5;>_!PP:3&`L 0&pi !e8Ŀxw!Blvĉ ~C&On&)P!O$Nxi_'~`L 0&(%Pg jI@#e> 7s Z{gwy[-eG0DOӏ*5>YzZ]3B0}{/\L 0&`Lr\Vv3(>pKώޫn\h# XŴb=zw=w 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&~GIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/install/figures/network2-services.svg0000664000175000017500000014221000000000000024056 0ustar00zuulzuul00000000000000 Produced by OmniGraffle 6.5.2 2016-04-26 14:55:33 +0000Canvas 1Layer 1 Controller NodeSQL DatabaseServiceBlock Storage Nodes Object Storage NodesNetworking Option 2: Self-Service NetworksService LayoutCore componentOptional componentMessage QueueIdentityImage ServiceComputeManagementNetworkingManagementBlock StorageManagementNetwork Time ServiceOrchestrationDatabaseManagementObject StorageProxy ServiceNetworkingL3 AgentNetworkingDHCP Agent Compute NodesKVM HypervisorComputeNetworkingLinux Bridge AgentTelemetryAgentTelemetryAgent(s)NetworkingML2 Plug-inObject StorageAccount ServiceObject StorageContainer ServiceObject StorageObject ServiceBlock StorageVolume ServiceShared File SystemServiceiSCSI TargetServiceNetworkingMetadata AgentNetworkingLinux Bridge AgentLinux NetworkUtilitiesLinux NetworkUtilitiesShared File SystemManagementTelemetryAgentNoSQL DatabaseServiceTelemetryManagement ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/install/get-started-compute.rst0000664000175000017500000000602000000000000022722 0ustar00zuulzuul00000000000000======================== Compute service overview ======================== .. todo:: Update a lot of the links in here. Use OpenStack Compute to host and manage cloud computing systems. OpenStack Compute is a major part of an Infrastructure-as-a-Service (IaaS) system. The main modules are implemented in Python. OpenStack Compute interacts with OpenStack Identity for authentication, OpenStack Placement for resource inventory tracking and selection, OpenStack Image service for disk and server images, and OpenStack Dashboard for the user and administrative interface. Image access is limited by projects, and by users; quotas are limited per project (the number of instances, for example). OpenStack Compute can scale horizontally on standard hardware, and download images to launch instances. OpenStack Compute consists of the following areas and their components: ``nova-api-wsgi`` service Accepts and responds to end user compute API calls. The service supports the OpenStack Compute API. It enforces some policies and initiates most orchestration activities, such as running an instance. ``nova-metadata-wsgi`` service Accepts metadata requests from instances. For more information, refer to :doc:`/admin/metadata-service`. ``nova-compute`` service A worker daemon that creates and terminates virtual machine instances through hypervisor APIs. For example: - libvirt for KVM or QEMU - VMwareAPI for VMware Processing is fairly complex. Basically, the daemon accepts actions from the queue and performs a series of system commands such as launching a KVM instance and updating its state in the database. ``nova-scheduler`` service Takes a virtual machine instance request from the queue and determines on which compute server host it runs. ``nova-conductor`` module Mediates interactions between the ``nova-compute`` service and the database. It eliminates direct accesses to the cloud database made by the ``nova-compute`` service. The ``nova-conductor`` module scales horizontally. However, do not deploy it on nodes where the ``nova-compute`` service runs. For more information, see the ``conductor`` section in the :doc:`/configuration/config`. ``nova-novncproxy`` daemon Provides a proxy for accessing running instances through a VNC connection. Supports browser-based novnc clients. ``nova-spicehtml5proxy`` daemon Provides a proxy for accessing running instances through a SPICE connection. Supports browser-based HTML5 client. The queue A central hub for passing messages between daemons. Usually implemented with `RabbitMQ `__ but :oslo.messaging-doc:`other options are available `. SQL database Stores most build-time and run-time states for a cloud infrastructure, including: - Available instance types - Instances in use - Available networks - Projects Theoretically, OpenStack Compute can support any database that SQLAlchemy supports. Common databases are SQLite3 for test and development work, MySQL, MariaDB, and PostgreSQL. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/install/index.rst0000664000175000017500000000022500000000000020135 0ustar00zuulzuul00000000000000=============== Compute service =============== .. toctree:: overview get-started-compute controller-install compute-install verify ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/install/overview.rst0000664000175000017500000001605100000000000020700 0ustar00zuulzuul00000000000000======== Overview ======== The OpenStack project is an open source cloud computing platform that supports all types of cloud environments. The project aims for simple implementation, massive scalability, and a rich set of features. Cloud computing experts from around the world contribute to the project. OpenStack provides an Infrastructure-as-a-Service (IaaS) solution through a variety of complementary services. Each service offers an Application Programming Interface (API) that facilitates this integration. This guide covers step-by-step deployment of the major OpenStack services using a functional example architecture suitable for new users of OpenStack with sufficient Linux experience. This guide is not intended to be used for production system installations, but to create a minimum proof-of-concept for the purpose of learning about OpenStack. After becoming familiar with basic installation, configuration, operation, and troubleshooting of these OpenStack services, you should consider the following steps toward deployment using a production architecture: * Determine and implement the necessary core and optional services to meet performance and redundancy requirements. * Increase security using methods such as firewalls, encryption, and service policies. * Implement a deployment tool such as Ansible, Chef, Puppet, or Salt to automate deployment and management of the production environment. .. _overview-example-architectures: Example architecture ~~~~~~~~~~~~~~~~~~~~ The example architecture requires at least two nodes (hosts) to launch a basic virtual machine (VM) or instance. Optional services such as Block Storage and Object Storage require additional nodes. .. important:: The example architecture used in this guide is a minimum configuration, and is not intended for production system installations. It is designed to provide a minimum proof-of-concept for the purpose of learning about OpenStack. For information on creating architectures for specific use cases, or how to determine which architecture is required, see the `Architecture Design Guide `_. .. warning:: Once a cloud has been deployed, changing the host name of *any* node in the deployment is not supported. In some cases, it may be possible to remove a node from the deployment, and add it again under a different host name. Renaming a node in situ will result in problems that will require multiple manual fixes. This example architecture differs from a minimal production architecture as follows: * Networking agents reside on the controller node instead of one or more dedicated network nodes. * Overlay (tunnel) traffic for self-service networks traverses the management network instead of a dedicated network. For more information on production architectures, see the `Architecture Design Guide `_, `OpenStack Operations Guide `_, and `OpenStack Networking Guide `_. .. _figure-hwreqs: .. figure:: figures/hwreqs.png :alt: Hardware requirements **Hardware requirements** Controller ---------- The controller node runs the Identity service, Image service, management portions of Compute, management portion of Networking, various Networking agents, and the Dashboard. It also includes supporting services such as an SQL database, message queue, and Network Time Protocol (NTP). Optionally, the controller node runs portions of the Block Storage, Object Storage, Orchestration, and Telemetry services. The controller node requires a minimum of two network interfaces. Compute ------- The compute node runs the hypervisor portion of Compute that operates instances. By default, Compute uses the kernel-based VM (KVM) hypervisor. The compute node also runs a Networking service agent that connects instances to virtual networks and provides firewalling services to instances via security groups. You can deploy more than one compute node. Each node requires a minimum of two network interfaces. Block Storage ------------- The optional Block Storage node contains the disks that the Block Storage and Shared File System services provision for instances. For simplicity, service traffic between compute nodes and this node uses the management network. Production environments should implement a separate storage network to increase performance and security. You can deploy more than one block storage node. Each node requires a minimum of one network interface. Object Storage -------------- The optional Object Storage node contain the disks that the Object Storage service uses for storing accounts, containers, and objects. For simplicity, service traffic between compute nodes and this node uses the management network. Production environments should implement a separate storage network to increase performance and security. This service requires two nodes. Each node requires a minimum of one network interface. You can deploy more than two object storage nodes. Networking ~~~~~~~~~~ Choose one of the following virtual networking options. .. _network1: Networking Option 1: Provider networks -------------------------------------- The provider networks option deploys the OpenStack Networking service in the simplest way possible with primarily layer-2 (bridging/switching) services and VLAN segmentation of networks. Essentially, it bridges virtual networks to physical networks and relies on physical network infrastructure for layer-3 (routing) services. Additionally, a DHCP` there are also several internal data structures within Nova that map out how block devices are attached to instances. This document aims to outline the two general data structures and two additional specific data structures used by the libvirt virt driver. .. note:: This document is based on an email to the openstack-dev mailing list by Matthew Booth below provided as a primer for developers working on virt drivers and interacting with these data structures. http://lists.openstack.org/pipermail/openstack-dev/2016-June/097529.html .. note:: References to local disks in the following document refer to any disk directly managed by nova compute. If nova is configured to use RBD or NFS for instance disks then these disks won't actually be local, but they are still managed locally and referred to as local disks. As opposed to RBD volumes provided by Cinder that are not considered local. Generic BDM data structures =========================== ``BlockDeviceMapping`` ---------------------- The 'top level' data structure is the ``BlockDeviceMapping`` (BDM) object. It is a ``NovaObject``, persisted in the DB. Current code creates a BDM object for every disk associated with an instance, whether it is a volume or not. The BDM object describes properties of each disk as specified by the user. It is initially from a user request, for more details on the format of these requests please see the :doc:`Block Device Mapping in Nova <../user/block-device-mapping>` document. The Compute API transforms and consolidates all BDMs to ensure that all disks, explicit or implicit, have a BDM, and then persists them. Look in ``nova.objects.block_device`` for all BDM fields, but in essence they contain information like (source_type='image', destination_type='local', image_id='), or equivalents describing ephemeral disks, swap disks or volumes, and some associated data. .. note:: BDM objects are typically stored in variables called ``bdm`` with lists in ``bdms``, although this is obviously not guaranteed (and unfortunately not always true: ``bdm`` in ``libvirt.block_device`` is usually a ``DriverBlockDevice`` object). This is a useful reading aid (except when it's proactively confounding), as there is also something else typically called ``block_device_mapping`` which is not a ``BlockDeviceMapping`` object. ``block_device_info`` --------------------- .. versionchanged:: 24.0.0 (Xena) The legacy block_device_info format is no longer supported. Drivers do not directly use BDM objects. Instead, they are transformed into a different driver-specific representation. This representation is normally called ``block_device_info``, and is generated by ``virt.driver.get_block_device_info()``. Its output is based on data in BDMs. ``block_device_info`` is a dict containing: ``root_device_name`` Hypervisor's notion of the root device's name ``image`` An image backed disk if used ``ephemerals`` A list of all ephemeral disks ``block_device_mapping`` A list of all cinder volumes ``swap`` A swap disk, or None if there is no swap disk .. note:: The disks were previously represented in one of two ways, depending on the specific driver in use. A legacy plain dict format or the currently used DriverBlockDevice format discussed below. Support for the legacy format was removed in Xena. Disks are represented by subclasses of ``nova.block_device.DriverBlockDevice``. These subclasses retain a reference to the underlying BDM object. This means that by manipulating the ``DriverBlockDevice`` object, the driver is able to persist data to the BDM object in the DB. .. note:: Common usage is to pull ``block_device_mapping`` out of this dict into a variable called ``block_device_mapping``. This is not a ``BlockDeviceMapping`` object, or a list of them. .. note:: If ``block_device_info`` was passed to the driver by compute manager, it was probably generated by ``_get_instance_block_device_info()``. By default, this function filters out all cinder volumes from ``block_device_mapping`` which don't currently have ``connection_info``. In other contexts this filtering will not have happened, and ``block_device_mapping`` will contain all volumes. libvirt driver specific BDM data structures =========================================== ``instance_disk_info`` ---------------------- The virt driver API defines a method ``get_instance_disk_info``, which returns a JSON blob. The compute manager calls this and passes the data over RPC between calls without ever looking at it. This is driver-specific opaque data. It is also only used by the libvirt driver, despite being part of the API for all drivers. Other drivers do not return any data. The most interesting aspect of ``instance_disk_info`` is that it is generated from the libvirt XML, not from nova's state. .. note:: ``instance_disk_info`` is often named ``disk_info`` in code, which is unfortunate as this clashes with the normal naming of the next structure. Occasionally the two are used in the same block of code. .. note:: RBD disks (including non-volume disks) and cinder volumes are not included in ``instance_disk_info``. ``instance_disk_info`` is a list of dicts for some of an instance's disks. Each dict contains the following: ``type`` libvirt's notion of the disk's type ``path`` libvirt's notion of the disk's path ``virt_disk_size`` The disk's virtual size in bytes (the size the guest OS sees) ``backing_file`` libvirt's notion of the backing file path ``disk_size`` The file size of path, in bytes. ``over_committed_disk_size`` As-yet-unallocated disk size, in bytes. ``disk_info`` ------------- .. note:: As opposed to ``instance_disk_info``, which is frequently called ``disk_info``. This data structure is actually described pretty well in the comment block at the top of ``nova.virt.libvirt.blockinfo``. It is internal to the libvirt driver. It contains: ``disk_bus`` The default bus used by disks ``cdrom_bus`` The default bus used by cdrom drives ``mapping`` Defined below ``mapping`` is a dict which maps disk names to a dict describing how that disk should be passed to libvirt. This mapping contains every disk connected to the instance, both local and volumes. First, a note on disk naming. Local disk names used by the libvirt driver are well defined. They are: ``disk`` The root disk ``disk.local`` The flavor-defined ephemeral disk ``disk.ephX`` Where X is a zero-based index for BDM defined ephemeral disks ``disk.swap`` The swap disk ``disk.config`` The config disk These names are hardcoded, reliable, and used in lots of places. In ``disk_info``, volumes are keyed by device name, eg 'vda', 'vdb'. Different buses will be named differently, approximately according to legacy Linux device naming. Additionally, ``disk_info`` will contain a mapping for 'root', which is the root disk. This will duplicate one of the other entries, either 'disk' or a volume mapping. Each dict within the ``mapping`` dict contains the following 3 required fields of bus, dev and type with two optional fields of format and ``boot_index``: ``bus``: The guest bus type ('ide', 'virtio', 'scsi', etc) ``dev``: The device name 'vda', 'hdc', 'sdf', 'xvde' etc ``type``: Type of device eg 'disk', 'cdrom', 'floppy' ``format`` Which format to apply to the device if applicable ``boot_index`` Number designating the boot order of the device .. note:: ``BlockDeviceMapping`` and ``DriverBlockDevice`` store boot index zero-based. However, libvirt's boot index is 1-based, so the value stored here is 1-based. .. todo:: Add a section for the per disk ``disk.info`` file within instance directory when using the libvirt driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/reference/conductor.rst0000664000175000017500000000572700000000000021332 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Conductor as a place for orchestrating tasks ============================================ In addition to its roles as a database proxy and object backporter the conductor service also serves as a centralized place to manage the execution of workflows which involve the scheduler. Rebuild, resize/migrate, and building an instance are managed here. This was done in order to have a better separation of responsibilities between what compute nodes should handle and what the scheduler should handle, and to clean up the path of execution. Conductor was chosen because in order to query the scheduler in a synchronous manner it needed to happen after the API had returned a response otherwise API response times would increase. And changing the scheduler call from asynchronous to synchronous helped to clean up the code. To illustrate this the old process for building an instance was: * API receives request to build an instance. * API sends an RPC cast to the scheduler to pick a compute. * Scheduler sends an RPC cast to the compute to build the instance, which means the scheduler needs to be able to communicate with all computes. * If the build succeeds it stops here. * If the build fails then the compute decides if the max number of scheduler retries has been hit. If so the build stops there. * If the build should be rescheduled the compute sends an RPC cast to the scheduler in order to pick another compute. This was overly complicated and meant that the logic for scheduling/rescheduling was distributed throughout the code. The answer to this was to change to process to be the following: * API receives request to build an instance. * API sends an RPC cast to the conductor to build an instance. (or runs locally if conductor is configured to use local_mode) * Conductor sends an RPC call to the scheduler to pick a compute and waits for the response. If there is a scheduler fail it stops the build at the conductor. * Conductor sends an RPC cast to the compute to build the instance. * If the build succeeds it stops here. * If the build fails then compute sends an RPC cast to conductor to build an instance. This is the same RPC message that was sent by the API. This new process means the scheduler only deals with scheduling, the compute only deals with building an instance, and the conductor manages the workflow. The code is now cleaner in the scheduler and computes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/reference/database-migrations.rst0000664000175000017500000001744600000000000023251 0ustar00zuulzuul00000000000000=================== Database migrations =================== .. note:: This document details how to generate database migrations as part of a new feature or bugfix. For info on how to apply existing database migrations, refer to the documentation for the :program:`nova-manage db sync` and :program:`nova-manage api_db sync` commands in :doc:`/cli/nova-manage`. For info on the general upgrade process for a nova deployment, refer to :doc:`/admin/upgrades`. A typical nova deployments consists of an "API" database and one or more cell-specific "main" databases. Occasionally these databases will require schema or data migrations. Schema migrations ----------------- .. versionchanged:: 24.0.0 (Xena) The database migration engine was changed from ``sqlalchemy-migrate`` to ``alembic``. .. versionchanged:: 28.0.0 (Bobcat) The legacy ``sqlalchemy-migrate``-based database migrations were removed. The `alembic`__ database migration tool is used to manage schema migrations in nova. The migration files and related metadata can be found in ``nova/db/api/migrations`` (for the API database) and ``nova/db/main/migrations`` (for the main database(s)). As discussed in :doc:`/admin/upgrades`, these can be run by end users using the :program:`nova-manage api_db sync` and :program:`nova-manage db sync` commands, respectively. .. __: https://alembic.sqlalchemy.org/en/latest/ .. note:: There were also legacy migrations provided in the ``legacy_migrations`` subdirectory for both the API and main databases. These were provided to facilitate upgrades from pre-Xena (24.0.0) deployments. They were removed in the 28.0.0 (Bobcat) release. The best reference for alembic is the `alembic documentation`__, but a small example is provided here. You can create the migration either manually or automatically. Manual generation might be necessary for some corner cases such as renamed tables but auto-generation will typically handle your issues. Examples of both are provided below. In both examples, we're going to demonstrate how you could add a new model, ``Foo``, to the main database. .. __: https://alembic.sqlalchemy.org/en/latest/ .. code-block:: diff diff --git nova/db/main/models.py nova/db/main/models.py index 7eab643e14..8f70bcdaca 100644 --- nova/db/main/models.py +++ nova/db/main/models.py @@ -73,6 +73,16 @@ def MediumText(): sqlalchemy.dialects.mysql.MEDIUMTEXT(), 'mysql') +class Foo(BASE, models.SoftDeleteMixin): + """A test-only model.""" + + __tablename__ = 'foo' + + id = sa.Column(sa.Integer, primary_key=True) + uuid = sa.Column(sa.String(36), nullable=True) + bar = sa.Column(sa.String(255)) + + class Service(BASE, models.SoftDeleteMixin): """Represents a running service on a host.""" (you might not be able to apply the diff above cleanly - this is just a demo). .. rubric:: Auto-generating migration scripts In order for alembic to compare the migrations with the underlying models, it require a database that it can inspect and compare the models against. As such, we first need to create a working database. We'll bypass ``nova-manage`` for this and go straight to the :program:`alembic` CLI. The ``alembic.ini`` file provided in the ``migrations`` directories for both databases is helpfully configured to use an SQLite database by default (``nova.db`` for the main database and ``nova_api.db`` for the API database). Create this database and apply the current schema, as dictated by the current migration scripts: .. code-block:: bash $ tox -e venv -- alembic -c nova/db/main/alembic.ini \ upgrade head Once done, you should notice the new ``nova.db`` file in the root of the repo. Now, let's generate the new revision: .. code-block:: bash $ tox -e venv -- alembic -c nova/db/main/alembic.ini \ revision -m "Add foo model" --autogenerate This will create a new file in ``nova/db/main/migrations`` with ``add_foo_model`` in the name including (hopefully!) the necessary changes to add the new ``Foo`` model. You **must** inspect this file once created, since there's a chance you'll be missing imports or something else which will need to be manually corrected. Once you've inspected this file and made any required changes, you can apply the migration and make sure it works: .. code-block:: bash $ tox -e venv -- alembic -c nova/db/main/alembic.ini \ upgrade head .. rubric:: Manually generating migration scripts For trickier migrations or things that alembic doesn't understand, you may need to manually create a migration script. This is very similar to the auto-generation step, with the exception being that you don't need to have a database in place beforehand. As such, you can simply run: .. code-block:: bash $ tox -e venv -- alembic -c nova/db/main/alembic.ini \ revision -m "Add foo model" As before, this will create a new file in ``nova/db/main/migrations`` with ``add_foo_model`` in the name. You can simply modify this to make whatever changes are necessary. Once done, you can apply the migration and make sure it works: .. code-block:: bash $ tox -e venv -- alembic -c nova/db/main/alembic.ini \ upgrade head Data migrations --------------- As discussed in :doc:`/admin/upgrades`, online data migrations occur in two places: - Inline migrations that occur as part of normal run-time activity as data is read in the old format and written in the new format. - Background online migrations that are performed using ``nova-manage`` to complete transformations that will not occur incidentally due to normal runtime activity. .. rubric:: Inline data migrations Inline data migrations are arguably the easier of the two to implement. Almost all of nova's database models correspond to an oslo.versionedobject (o.vo) or part of one. These o.vos load their data from the underlying database by implementing the ``obj_load_attr`` method. By modifying this method, it's possible to detect missing changes to the data - for example, a missing field - modify the data, save it back to the database, and finally return an object with the newly updated data. Change I6cd206542fdd28f3ef551dcc727f4cb35a53f6a3 provides a fully worked example of this approach. The main advantage of these is that they are completely transparent to the operator who does not have to take any additional steps to upgrade their deployment: the database updates should happen at runtime as data is pulled from the database. The main disadvantage of this approach is that some records may not be frequently pulled from the database, meaning they never have a chance to get updated. This can prevent the eventual removal of the inline migration in a future release. To avoid this issue, you should inspect the object to see if it's something that will be loaded as part of a standard runtime operation - for example, on startup or as part of a background task - and if necessary add a blocking online migration in a later release to catch and migrate the laggards. .. rubric:: Online data migrations Unlike inline data migrations, online data migrations require operator involvement. They are run using the ``nova-manage db online_data_migrations`` command which, as noted in :doc:`/cli/nova-manage`, this should be run straight after upgrading to a new release once the database schema migrations have been applied and the code updated. Online migrations can be blocking, in that it will be necessary to apply given migrations while running N code before upgrading to N+1. Change I44919422c48570f2647f2325ff895255fc2adf27 provides a fully worked example of this approach. The advantages and disadvantages of this approach are the inverse of those of the inline data migrations approach. While they can be used to ensure an data migration is actually applied, they require operator involvement and can prevent upgrades until fully applied. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/reference/glossary.rst0000664000175000017500000000515600000000000021171 0ustar00zuulzuul00000000000000======== Glossary ======== .. glossary:: Availability Zone Availability zones are a logical subdivision of cloud block storage, compute and network services. They provide a way for cloud operators to logically segment their compute based on arbitrary factors like location (country, datacenter, rack), network layout and/or power source. For more information, refer to :doc:`/admin/aggregates`. Boot From Volume A server that is created with a :doc:`Block Device Mapping ` with ``boot_index=0`` and ``destination_type=volume``. The root volume can already exist when the server is created or be created by the compute service as part of the server creation. Note that a server can have volumes attached and not be boot-from-volume. A boot from volume server has an empty ("") ``image`` parameter in ``GET /servers/{server_id}`` responses. Cell A cell is a shard or horizontal partition in a nova deployment. A cell mostly consists of a database, queue, and set of compute nodes. All deployments will have at least one cell (and one "fake" cell). Larger deployments can have many. For more information, refer to :doc:`/admin/cells`. Cross-Cell Resize A resize (or cold migrate) operation where the source and destination compute hosts are mapped to different cells. By default, resize and cold migrate operations occur within the same cell. For more information, refer to :doc:`/admin/configuration/cross-cell-resize`. Host Aggregate Host aggregates can be regarded as a mechanism to further partition an :term:`Availability Zone`; while availability zones are visible to users, host aggregates are only visible to administrators. Host aggregates provide a mechanism to allow administrators to assign key-value pairs to groups of machines. Each node can have multiple aggregates, each aggregate can have multiple key-value pairs, and the same key-value pair can be assigned to multiple aggregates. For more information, refer to :doc:`/admin/aggregates`. Same-Cell Resize A resize (or cold migrate) operation where the source and destination compute hosts are mapped to the same cell. Also commonly referred to as "standard resize" or simply "resize". By default, resize and cold migrate operations occur within the same cell. For more information, refer to :doc:`/contributor/resize-and-cold-migrate`. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/reference/gmr.rst0000664000175000017500000000752500000000000020115 0ustar00zuulzuul00000000000000.. Copyright (c) 2014 OpenStack Foundation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Guru Meditation Reports ======================= Nova contains a mechanism whereby developers and system administrators can generate a report about the state of a running Nova executable. This report is called a *Guru Meditation Report* (*GMR* for short). Generating a GMR ---------------- A *GMR* can be generated by sending the *USR2* signal to any Nova process with support (see below). The *GMR* will then be outputted standard error for that particular process. For example, suppose that ``nova-compute`` has process id ``8675``, and was run with ``2>/var/log/nova/nova-compute-err.log``. Then, ``kill -USR2 8675`` will trigger the Guru Meditation report to be printed to ``/var/log/nova/nova-compute-err.log``. Nova API is commonly run under uWSGI, which intercepts ``SIGUSR2`` signals. In this case, a file trigger may be used instead: .. code-block:: ini [oslo_reports] log_dir = /var/log/nova file_event_handler = /var/log/nova/gmr_trigger Whenever the trigger file is modified, a *GMR* will be generated. To get a report, one may use ``touch /var/log/nova/gmr_trigger``. Note that the configured file trigger must exist when Nova starts. If a log dir is specified, the report will be written to a file within that directory instead of ``stderr``. The report file will be named ``${serviceName}_gurumeditation_${timestamp}``. Structure of a GMR ------------------ The *GMR* is designed to be extensible; any particular executable may add its own sections. However, the base *GMR* consists of several sections: Package Shows information about the package to which this process belongs, including version information Threads Shows stack traces and thread ids for each of the threads within this process Green Threads Shows stack traces for each of the green threads within this process (green threads don't have thread ids) Configuration Lists all the configuration options currently accessible via the CONF object for the current process Adding Support for GMRs to New Executables ------------------------------------------ Adding support for a *GMR* to a given executable is fairly easy. First import the module, as well as the Nova version module: .. code-block:: python from oslo_reports import guru_meditation_report as gmr from oslo_reports import opts as gmr_opts from nova import version Then, register any additional sections (optional): .. code-block:: python gmr.TextGuruMeditation.register_section('Some Special Section', some_section_generator) Finally (under main), before running the "main loop" of the executable (usually ``service.server(server)`` or something similar), register the *GMR* hook: .. code-block:: python gmr_opts.set_defaults(CONF) gmr.TextGuruMeditation.setup_autorun( version, conf=CONF, service_name=service_name) The service name is used when generating report files. If unspecified, *GMR* tries to automatically detect the binary name using the stack trace but usually ends up with ``thread.py``. Extending the GMR ----------------- As mentioned above, additional sections can be added to the GMR for a particular executable. For more information, see the inline documentation under :mod:`oslo.reports` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/reference/i18n.rst0000664000175000017500000000275400000000000020106 0ustar00zuulzuul00000000000000Internationalization ==================== Nova uses the :oslo.i18n-doc:`oslo.i18n library <>` to support internationalization. The oslo.i18n library is built on top of `gettext `_ and provides functions that are used to enable user-facing strings such as log messages to appear in the appropriate language in different locales. Nova exposes the oslo.i18n library support via the ``nova/i18n.py`` integration module. This module provides the functions needed to wrap translatable strings. It provides the ``_()`` wrapper for general user-facing messages (such as ones that end up in command line responses, or responses over the network). One upon a time there was an effort to translate log messages in OpenStack projects. But starting with the Ocata release these are no longer being supported. Log messages **should not** be translated. You should use the basic wrapper ``_()`` for strings which are not log messages that are expected to get to an end user:: raise nova.SomeException(_('Invalid service catalogue')) Do not use ``locals()`` for formatting messages because: 1. It is not as clear as using explicit dicts. 2. It could produce hidden errors during refactoring. 3. Changing the name of a variable causes a change in the message. 4. It creates a lot of otherwise unused variables. If you do not follow the project conventions, your code may cause hacking checks to fail. The ``_()`` function can be imported with :: from nova.i18n import _ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/reference/index.rst0000664000175000017500000001001000000000000020416 0ustar00zuulzuul00000000000000================================ Technical Reference Deep Dives ================================ The nova project is large, and there are lots of complicated parts in it where it helps to have an overview to understand how the internals of a particular part work. .. _reference-internals: Internals ========= The following is a dive into some of the internals in nova. * :doc:`/reference/rpc`: How nova uses AMQP as an RPC transport * :doc:`/reference/scheduling`: The workflow through the scheduling process * :doc:`/reference/scheduler-hints-vs-flavor-extra-specs`: The similarities and differences between flavor extra specs and scheduler hints. * :doc:`/reference/live-migration`: The live migration flow * :doc:`/reference/services`: Module descriptions for some of the key modules used in starting / running services * :doc:`/reference/vm-states`: Cheat sheet for understanding the life cycle of compute instances * :doc:`/reference/threading`: The concurrency model used in nova, which is based on eventlet, and may not be familiar to everyone. * :doc:`/reference/notifications`: The notifications available in nova. * :doc:`/reference/update-provider-tree`: A detailed explanation of the ``ComputeDriver.update_provider_tree`` method. * :doc:`/reference/upgrade-checks`: A guide to writing automated upgrade checks. * :doc:`/reference/database-migrations`: A guide to writing database migrations, be they online or offline. * :doc:`/reference/conductor` .. todo:: Need something about versioned objects and how they fit in with conductor as an object backporter during upgrades. * :doc:`/reference/isolate-aggregates`: Describes how the placement filter works in nova to isolate groups of hosts. * :doc:`/reference/attach-volume`: Describes the attach volume flow, using the libvirt virt driver as an example. * :doc:`/reference/block-device-structs`: Block Device Data Structures * :doc:`/reference/libvirt-distro-support-matrix`: Libvirt virt driver OS distribution support matrix .. # NOTE(amotoki): toctree needs to be placed at the end of the section to # keep the document structure in the PDF doc. .. toctree:: :hidden: rpc scheduling scheduler-hints-vs-flavor-extra-specs live-migration services vm-states threading notifications database-migrations update-provider-tree upgrade-checks conductor isolate-aggregates api-microversion-history attach-volume block-device-structs libvirt-distro-support-matrix Debugging ========= * :doc:`/reference/gmr`: Inspired by Amiga, a way to trigger a very comprehensive dump of a running service for deep debugging. .. # NOTE(amotoki): toctree needs to be placed at the end of the section to # keep the document structure in the PDF doc. .. toctree:: :hidden: gmr Forward Looking Plans ===================== The following section includes documents that describe the overall plan behind groups of nova-specs. Most of these cover items relating to the evolution of various parts of nova's architecture. Once the work is complete, these documents will move into the "Internals" section. If you want to get involved in shaping the future of nova's architecture, these are a great place to start reading up on the current plans. * :doc:`/reference/policy-enforcement`: How we want policy checks on API actions to work in the future * :doc:`/reference/stable-api`: What stable API means to nova * :doc:`/reference/scheduler-evolution`: Motivation behind the scheduler / placement evolution .. # NOTE(amotoki): toctree needs to be placed at the end of the section to # keep the document structure in the PDF doc. .. toctree:: :hidden: policy-enforcement stable-api scheduler-evolution Additional Information ====================== * :doc:`/reference/glossary`: A quick reference guide to some of the terms you might encounter working on or using nova. .. # NOTE(amotoki): toctree needs to be placed at the end of the section to # keep the document structure in the PDF doc. .. toctree:: :hidden: glossary ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/reference/isolate-aggregates.rst0000664000175000017500000001021000000000000023060 0ustar00zuulzuul00000000000000.. Copyright 2019 NTT DATA Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _filtering_hosts_by_isolating_aggregates: Filtering hosts by isolating aggregates ======================================= Background ----------- I want to set up an aggregate ``ABC`` with hosts that allow you to run only certain licensed images. I could tag the aggregate with metadata such as ````. Then if I boot an instance with an image containing the property ````, it will land on one of the hosts in aggregate ``ABC``. But if the user creates a new image which does not include ```` metadata, an instance booted with that image could still land on a host in aggregate ``ABC`` as reported in launchpad bug `1677217`_. The :ref:`AggregateImagePropertiesIsolation` scheduler filter passes even though the aggregate metadata ```` is not present in the image properties. .. _1677217: https://bugs.launchpad.net/nova/+bug/1677217 Solution -------- The above problem is addressed by blueprint `placement-req-filter-forbidden-aggregates`_ which was implemented in the 20.0.0 Train release. The following example assumes you have configured aggregate ``ABC`` and added hosts ``HOST1`` and ``HOST2`` to it in Nova, and that you want to isolate those hosts to run only instances requiring Windows licensing. #. Set the :oslo.config:option:`scheduler.enable_isolated_aggregate_filtering` config option to ``true`` in nova.conf and restart the nova-scheduler service. #. Add trait ``CUSTOM_LICENSED_WINDOWS`` to the resource providers for ``HOST1`` and ``HOST2`` in the Placement service. First create the ``CUSTOM_LICENSED_WINDOWS`` trait .. code-block:: console # openstack --os-placement-api-version 1.6 trait create CUSTOM_LICENSED_WINDOWS Assume ```` is the UUID of ``HOST1``, which is the same as its resource provider UUID. Start to build the command line by first collecting existing traits for ``HOST1`` .. code-block:: console # traits=$(openstack --os-placement-api-version 1.6 resource provider trait list -f value | sed 's/^/--trait /') Replace ``HOST1``\ 's traits, adding ``CUSTOM_LICENSED_WINDOWS`` .. code-block:: console # openstack --os-placement-api-version 1.6 resource provider trait set $traits --trait CUSTOM_LICENSED_WINDOWS Repeat the above steps for ``HOST2``. #. Add the ``trait:CUSTOM_LICENSED_WINDOWS=required`` metadata property to aggregate ``ABC``. .. code-block:: console # openstack --os-compute-api-version 2.53 aggregate set --property trait:CUSTOM_LICENSED_WINDOWS=required ABC As before, any instance spawned with a flavor or image containing ``trait:CUSTOM_LICENSED_WINDOWS=required`` will land on ``HOST1`` or ``HOST2`` because those hosts expose that trait. However, now that the ``isolate_aggregates`` request filter is configured, any instance whose flavor or image **does not** contain ``trait:CUSTOM_LICENSED_WINDOWS=required`` will **not** land on ``HOST1`` or ``HOST2`` because aggregate ``ABC`` requires that trait. The above example uses a ``CUSTOM_LICENSED_WINDOWS`` trait, but you can use any custom or `standard trait`_ in a similar fashion. The filter supports the use of multiple traits across multiple aggregates. The combination of flavor and image metadata must require **all** of the traits configured on the aggregate in order to pass. .. _placement-req-filter-forbidden-aggregates: https://specs.openstack.org/openstack/nova-specs/specs/train/approved/placement-req-filter-forbidden-aggregates.html .. _standard trait: https://docs.openstack.org/os-traits/latest/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/reference/libvirt-distro-support-matrix.rst0000664000175000017500000001564600000000000025324 0ustar00zuulzuul00000000000000Libvirt virt driver OS distribution support matrix ================================================== This page documents the libvirt versions present in the various distro versions that OpenStack Nova aims to be deployable with. .. note:: This document was previously hosted on the OpenStack wiki: https://wiki.openstack.org/wiki/LibvirtDistroSupportMatrix Libvirt minimum version change policy ------------------------------------- At the start of each Nova development cycle this matrix will be consulted to determine if it is viable to drop support for any end-of-life or otherwise undesired distro versions. Based on this distro evaluation, it may be possible to increase the minimum required version of libvirt in Nova, and thus drop some compatibility code for older versions. When a decision to update the minimum required libvirt version is made, there must be a warning issued for one cycle. This is achieved by editing ``nova/virt/libvirt/driver.py`` to set ``NEXT_MIN_LIBVIRT_VERSION``. For example: .. code:: NEXT_MIN_LIBVIRT_VERSION = (X, Y, Z) This causes a deprecation warning to be emitted when Nova starts up warning the admin that the version of libvirt in use on the host will no longer be supported in the subsequent release. After a version has been listed in ``NEXT_MIN_LIBVIRT_VERSION`` for one release cycle, the corresponding actual minimum required libvirt can be updated by setting .. code:: MIN_LIBVIRT_VERSION = (X, Y, Z) At this point of course, an even newer version might be set in ``NEXT_MIN_LIBVIRT_VERSION`` to repeat the process.... An email should also be sent at this point to the ``openstack-discuss@lists.openstack.org`` mailing list as a courtesy raising awareness of the change in minimum version requirements in the upcoming release, for example: http://lists.openstack.org/pipermail/openstack-discuss/2021-January/019849.html There is more background on the rationale used for picking minimum versions in the operators mailing list thread here: http://lists.openstack.org/pipermail/openstack-operators/2015-May/007012.html QEMU minimum version change policy ---------------------------------- After choosing a minimum libvirt version, the minimum QEMU version is determined by looking for the lowest QEMU version from all the distros that support the decided libvirt version. ``MIN_{LIBVIRT,QEMU}_VERSION`` and ``NEXT_MIN_{LIBVIRT,QEMU}_VERSION`` table ---------------------------------------------------------------------------- .. list-table:: OpenStack Nova libvirt/QEMU Support Matrix * - OpenStack Release - Nova Release - ``MIN_LIBVIRT_VERSION`` - ``NEXT_MIN_LIBVIRT_VERSION`` - ``MIN_QEMU_VERSION`` - ``NEXT_MIN_QEMU_VERSION`` * - Havana - 2013.2.0 - 0.9.6 - 0.9.6 - - * - Icehouse - 2014.1 - 0.9.6 - 0.9.11 - - * - Juno - 2014.2.0 - 0.9.11 - 0.9.11 - - * - Kilo - 2015.1.0 - 0.9.11 - 0.9.11 - - * - Liberty - 12.0.0 - 0.9.11 - 0.10.2 - - * - Mitaka - 13.0.0 - 0.10.2 - 1.2.1 - - * - Newton - 14.0.0 - 1.2.1 - 1.2.1 - 1.5.3 - 1.5.3 * - Ocata - 15.0.0 - 1.2.1 - 1.2.9 - 1.5.3 - 2.1.0 * - Pike - 16.0.0 - 1.2.9 - 1.3.1 - 2.1.0 - 2.5.0 * - Queens - 17.0.0 - 1.2.9 - 1.3.1 - 2.1.0 - 2.5.0 * - Rocky - 18.0.0 - 1.3.1 - 3.0.0 - 2.5.0 - 2.8.0 * - Stein - 19.0.0 - 3.0.0 - 4.0.0 - 2.8.0 - 2.11.0 * - Train - 20.0.0 - 3.0.0 - 4.0.0 - 2.8.0 - 2.11.0 * - Ussuri - 21.0.0 - 4.0.0 - 5.0.0 - 2.11.0 - 4.0.0 * - Victoria - 22.0.0 - 5.0.0 - 6.0.0 - 4.0.0 - 4.2.0 * - Wallaby - 23.0.0 - 6.0.0 - 7.0.0 - 4.2.0 - 5.2.0 * - Xena - 24.0.0 - 6.0.0 - 7.0.0 - 4.2.0 - 5.2.0 * - Yoga - 25.0.0 - 6.0.0 - 7.0.0 - 4.2.0 - 5.2.0 * - Zed - 26.0.0 - 6.0.0 - 7.0.0 - 4.2.0 - 5.2.0 * - 2023.1 Antelope - 27.0.0 - 6.0.0 - 7.0.0 - 4.2.0 - 5.2.0 * - 2023.2 Bobcat - 28.0.0 - 7.0.0 - 8.0.0 - 5.2.0 - 6.2.0 * - 2024.1 Caracal - 29.0.0 - 7.0.0 - 8.0.0 - 5.2.0 - 6.2.0 * - 2024.2 Dalmatian - 30.0.0 - 7.0.0 - 8.0.0 - 5.2.0 - 6.2.0 * - 2025.1 Epoxy - 31.0.0 - 8.0.0 - 10.0.0 - 6.2.0 - 8.2.2 * - 2025.2 Flamingo - 31.0.0 - 8.0.0 - 10.0.0 - 6.2.0 - 8.2.2 OS distribution versions ------------------------ .. warning:: This section may become a bit outdated. This table provides information on a representative sample of OS distros and the version of libvirt/QEMU/libguestfs that they ship. This is **NOT** intended to be an exhaustive list of distros where OpenStack Nova can run - it is intended to run on any Linux distro that can satisfy the minimum required software versions. This table merely aims to help identify when minimum required versions can be reasonably updated without losing support for important OS distros. .. list-table:: Distro libvirt/QEMU Support Table * - OS Distro - GA date - Libvirt - QEMU/KVM - libguestfs * - **CentOS Stream** - - - - * - 9 - As of 2025-02-27. - 10.10.0 - 9.1.0 - 1.50.1 * - **Debian** - - - - * - 12.x (Bookworm) ("oldstable") - 2023-06-10 - 9.0.0 - 7.2 - 1.48. * - 13.x (Trixie) ("stable") - 2025-08-09 - 10.7.0 - 9.1.0 - 1.52.2 * - **RHEL** - - - - * - 8.2 - 2020-04-28 - 6.0.0-17.2 - 4.2.0-19 - 1.40.2-22 * - 8.3 - 2020-10-29 - 6.0.0-25.5 - 4.2.0-29 - 1.40.2-24 * - 8.4 - 2021-05-18 - 7.0.0-8 - 5.2.0-10 - 1.44.0-2 * - 8.5 - 2021-11-09 - 7.6.0-6 - 6.0.0-33 - 1.44.0-3 * - **SLES** - - - - * - 15 (SP2) - 2020 - 6.0.0 - 4.2.1 - 1.38.0 * - 15 (SP3) - 2021 - 7.1.0 - 5.2.0 - 1.38.0 * - **Ubuntu** - - - - * - 20.04 (Focal Fossa LTS) - 2020-04-23 - 6.0.0 - 4.2 - 1.40.2 * - 21.04 (Hirsute Hippo) - 2021-04-22 - 7.0.0 - 5.2 - 1.44.1 * - 22.04 (Jammy Jellyfish) - 2022-04-21 - 8.0.0 - 6.2 - 1.44.2 * - 24.04 (Noble Numbat) - 2024-04-25 - 10.0.0 - 8.2.2 - 1.52.0 .. NB: maintain alphabetical ordering of distros, followed by oldest released versions first ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/reference/live-migration.rst0000664000175000017500000000131000000000000022240 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================ Live Migration ================ .. image:: /_static/images/live-migration.svg :alt: Live migration workflow ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/reference/notifications.rst0000664000175000017500000000075200000000000022174 0ustar00zuulzuul00000000000000================================= Available versioned notifications ================================= .. note:: Versioned notifications are added in each release, so the samples represented below may not necessarily be in an older version of nova. Ensure you are looking at the correct version of the documentation for the release you are using. .. This is a reference anchor used in the main index page. .. _versioned_notification_samples: .. versioned_notifications:: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/reference/policy-enforcement.rst0000664000175000017500000002131300000000000023121 0ustar00zuulzuul00000000000000.. Copyright 2014 Intel All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. REST API Policy Enforcement =========================== The following describes some of the shortcomings in how policy is used and enforced in nova, along with some benefits of fixing those issues. Each issue has a section dedicated to describing the underlying cause and historical context in greater detail. Problems with current system ---------------------------- The following is a list of issues with the existing policy enforcement system: * `Testing default policies`_ * `Mismatched authorization`_ * `Inconsistent naming`_ * `Incorporating default roles`_ * `Compartmentalized policy enforcement`_ * `Refactoring hard-coded permission checks`_ * `Granular policy checks`_ Addressing the list above helps operators by: 1. Providing them with flexible and useful defaults 2. Reducing the likelihood of writing and maintaining custom policies 3. Improving interoperability between deployments 4. Increasing RBAC confidence through first-class testing and verification 5. Reducing complexity by using consistent policy naming conventions 6. Exposing more functionality to end-users, safely, making the entire nova API more self-serviceable resulting in less operational overhead for operators to do things on behalf of users Additionally, the following is a list of benefits to contributors: 1. Reduce developer maintenance and cost by isolating policy enforcement into a single layer 2. Reduce complexity by using consistent policy naming conventions 3. Increased confidence in RBAC refactoring through exhaustive testing that prevents regressions before they merge Testing default policies ------------------------ Testing default policies is important in protecting against authoritative regression. Authoritative regression is when a change accidentally allows someone to do something or see something they shouldn't. It can also be when a change accidentally restricts a user from doing something they used to have the authorization to perform. This testing is especially useful prior to refactoring large parts of the policy system. For example, this level of testing would be invaluable prior to pulling policy enforcement logic from the database layer up to the API layer. `Testing documentation`_ exists that describes the process for developing these types of tests. .. _Testing documentation: https://docs.openstack.org/keystone/latest/contributor/services.html#ruthless-testing Mismatched authorization ------------------------ The compute API is rich in functionality and has grown to manage both physical and virtual hardware. Some APIs were meant to assist operators while others were specific to end users. Historically, nova used project-scoped tokens to protect almost every API, regardless of the intended user. Using project-scoped tokens to authorize requests for system-level APIs makes for undesirable user-experience and is prone to overloading roles. For example, to prevent every user from accessing hardware level APIs that would otherwise violate tenancy requires operators to create a ``system-admin`` or ``super-admin`` role, then rewrite those system-level policies to incorporate that role. This means users with that special role on a project could access system-level resources that aren't even tracked against projects (hypervisor information is an example of system-specific information.) As of the Queens release, keystone supports a scope type dedicated to easing this problem, called system scope. Consuming system scope across the compute API results in fewer overloaded roles, less specialized authorization logic in code, and simpler policies that expose more functionality to users without violating tenancy. Please refer to keystone's `authorization scopes documentation`_ to learn more about scopes and how to use them effectively. .. _authorization scopes documentation: https://docs.openstack.org/keystone/latest/contributor/services.html#authorization-scopes Inconsistent naming ------------------- Inconsistent conventions for policy names are scattered across most OpenStack services, nova included. Recently, there was an effort that introduced a convention that factored in service names, resources, and use cases. This new convention is applicable to nova policy names. The convention is formally `documented`_ in oslo.policy and we can use policy `deprecation tooling`_ to gracefully rename policies. .. _documented: https://docs.openstack.org/oslo.policy/latest/user/usage.html#naming-policies .. _deprecation tooling: https://docs.openstack.org/oslo.policy/latest/reference/api/oslo_policy.policy.html#oslo_policy.policy.DeprecatedRule Incorporating default roles --------------------------- Up until the Rocky release, keystone only ensured a single role called ``admin`` was available to the deployment upon installation. In Rocky, this support was expanded to include ``member`` and ``reader`` roles as first-class citizens during keystone's installation. This allows service developers to rely on these roles and include them in their default policy definitions. Standardizing on a set of role names for default policies increases interoperability between deployments and decreases operator overhead. You can find more information on default roles in the keystone `specification`_ or `developer documentation`_. .. _specification: http://specs.openstack.org/openstack/keystone-specs/specs/keystone/rocky/define-default-roles.html .. _developer documentation: https://docs.openstack.org/keystone/latest/contributor/services.html#reusable-default-roles Compartmentalized policy enforcement ------------------------------------ Policy logic and processing is inherently sensitive and often complicated. It is sensitive in that coding mistakes can lead to security vulnerabilities. It is complicated in the resources and APIs it needs to protect and the vast number of use cases it needs to support. These reasons make a case for isolating policy enforcement and processing into a compartmentalized space, as opposed to policy logic bleeding through to different layers of nova. Not having all policy logic in a single place makes evolving the policy enforcement system arduous and makes the policy system itself fragile. Currently, the database and API components of nova contain policy logic. At some point, we should refactor these systems into a single component that is easier to maintain. Before we do this, we should consider approaches for bolstering testing coverage, which ensures we are aware of or prevent policy regressions. There are examples and documentation in API protection `testing guides`_. .. _testing guides: https://docs.openstack.org/keystone/latest/contributor/services.html#ruthless-testing Refactoring hard-coded permission checks ---------------------------------------- The policy system in nova is designed to be configurable. Despite this design, there are some APIs that have hard-coded checks for specific roles. This makes configuration impossible, misleading, and frustrating for operators. Instead, we can remove hard-coded policies and ensure a configuration-driven approach, which reduces technical debt, increases consistency, and provides better user-experience for operators. Additionally, moving hard-coded checks into first-class policy rules let us use existing policy tooling to deprecate, document, and evolve policies. Granular policy checks ---------------------- Policies should be as granular as possible to ensure consistency and reasonable defaults. Using a single policy to protect CRUD for an entire API is restrictive because it prevents us from using default roles to make delegation to that API flexible. For example, a policy for ``compute:foobar`` could be broken into ``compute:foobar:create``, ``compute:foobar:update``, ``compute:foobar:list``, ``compute:foobar:get``, and ``compute:foobar:delete``. Breaking policies down this way allows us to set read-only policies for readable operations or use another default role for creation and management of ``foobar`` resources. The oslo.policy library has `examples`_ that show how to do this using deprecated policy rules. .. _examples: https://docs.openstack.org/oslo.policy/latest/reference/api/oslo_policy.policy.html#oslo_policy.policy.DeprecatedRule ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/reference/rpc.rst0000664000175000017500000003234300000000000020110 0ustar00zuulzuul00000000000000.. Copyright (c) 2010 Citrix Systems, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. AMQP and Nova ============= AMQP is the messaging technology chosen by the OpenStack cloud. The AMQP broker, default to Rabbitmq, sits between any two Nova components and allows them to communicate in a loosely coupled fashion. More precisely, Nova components (the compute fabric of OpenStack) use Remote Procedure Calls (RPC hereinafter) to communicate to one another; however such a paradigm is built atop the publish/subscribe paradigm so that the following benefits can be achieved: * Decoupling between client and servant (such as the client does not need to know where the servant's reference is). * Full a-synchronism between client and servant (such as the client does not need the servant to run at the same time of the remote call). * Random balancing of remote calls (such as if more servants are up and running, one-way calls are transparently dispatched to the first available servant). Nova uses direct, fanout, and topic-based exchanges. The architecture looks like the one depicted in the figure below: .. image:: /_static/images/rpc-arch.png :width: 60% Nova implements RPC (both request+response, and one-way, respectively nicknamed ``rpc.call`` and ``rpc.cast``) over AMQP by providing an adapter class which take cares of marshaling and unmarshaling of messages into function calls. Each Nova service (for example Compute, Scheduler, etc.) create two queues at the initialization time, one which accepts messages with routing keys ``NODE-TYPE.NODE-ID`` (for example ``compute.hostname``) and another, which accepts messages with routing keys as generic ``NODE-TYPE`` (for example ``compute``). The former is used specifically when Nova-API needs to redirect commands to a specific node like ``openstack server delete $instance``. In this case, only the compute node whose host's hypervisor is running the virtual machine can kill the instance. The API acts as a consumer when RPC calls are request/response, otherwise it acts as a publisher only. Nova RPC Mappings ----------------- The figure below shows the internals of a message broker node (referred to as a RabbitMQ node in the diagrams) when a single instance is deployed and shared in an OpenStack cloud. Every Nova component connects to the message broker and, depending on its personality (for example a compute node or a network node), may use the queue either as an Invoker (such as API or Scheduler) or a Worker (such as Compute or Network). Invokers and Workers do not actually exist in the Nova object model, but we are going to use them as an abstraction for sake of clarity. An Invoker is a component that sends messages in the queuing system via two operations: 1) ``rpc.call`` and ii) ``rpc.cast``; a Worker is a component that receives messages from the queuing system and reply accordingly to ``rpc.call`` operations. Figure 2 shows the following internal elements: Topic Publisher A Topic Publisher comes to life when an ``rpc.call`` or an ``rpc.cast`` operation is executed; this object is instantiated and used to push a message to the queuing system. Every publisher connects always to the same topic-based exchange; its life-cycle is limited to the message delivery. Direct Consumer A Direct Consumer comes to life if (and only if) an ``rpc.call`` operation is executed; this object is instantiated and used to receive a response message from the queuing system. Every consumer connects to a unique direct-based exchange via a unique exclusive queue; its life-cycle is limited to the message delivery; the exchange and queue identifiers are determined by a UUID generator, and are marshaled in the message sent by the Topic Publisher (only ``rpc.call`` operations). Topic Consumer A Topic Consumer comes to life as soon as a Worker is instantiated and exists throughout its life-cycle; this object is used to receive messages from the queue and it invokes the appropriate action as defined by the Worker role. A Topic Consumer connects to the same topic-based exchange either via a shared queue or via a unique exclusive queue. Every Worker has two topic consumers, one that is addressed only during ``rpc.cast`` operations (and it connects to a shared queue whose exchange key is ``topic``) and the other that is addressed only during ``rpc.call`` operations (and it connects to a unique queue whose exchange key is ``topic.host``). Direct Publisher A Direct Publisher comes to life only during ``rpc.call`` operations and it is instantiated to return the message required by the request/response operation. The object connects to a direct-based exchange whose identity is dictated by the incoming message. Topic Exchange The Exchange is a routing table that exists in the context of a virtual host (the multi-tenancy mechanism provided by RabbitMQ etc); its type (such as topic vs. direct) determines the routing policy; a message broker node will have only one topic-based exchange for every topic in Nova. Direct Exchange This is a routing table that is created during ``rpc.call`` operations; there are many instances of this kind of exchange throughout the life-cycle of a message broker node, one for each ``rpc.call`` invoked. Queue Element A Queue is a message bucket. Messages are kept in the queue until a Consumer (either Topic or Direct Consumer) connects to the queue and fetch it. Queues can be shared or can be exclusive. Queues whose routing key is ``topic`` are shared amongst Workers of the same personality. .. image:: /_static/images/rpc-rabt.png :width: 60% RPC Calls --------- The diagram below shows the message flow during an ``rpc.call`` operation: 1. A Topic Publisher is instantiated to send the message request to the queuing system; immediately before the publishing operation, a Direct Consumer is instantiated to wait for the response message. 2. Once the message is dispatched by the exchange, it is fetched by the Topic Consumer dictated by the routing key (such as 'topic.host') and passed to the Worker in charge of the task. 3. Once the task is completed, a Direct Publisher is allocated to send the response message to the queuing system. 4. Once the message is dispatched by the exchange, it is fetched by the Direct Consumer dictated by the routing key (such as ``msg_id``) and passed to the Invoker. .. image:: /_static/images/rpc-flow-1.png :width: 60% RPC Casts --------- The diagram below shows the message flow during an ``rpc.cast`` operation: 1. A Topic Publisher is instantiated to send the message request to the queuing system. 2. Once the message is dispatched by the exchange, it is fetched by the Topic Consumer dictated by the routing key (such as 'topic') and passed to the Worker in charge of the task. .. image:: /_static/images/rpc-flow-2.png :width: 60% AMQP Broker Load ---------------- At any given time the load of a message broker node running RabbitMQ etc is function of the following parameters: Throughput of API calls The number of API calls (more precisely ``rpc.call`` ops) being served by the OpenStack cloud dictates the number of direct-based exchanges, related queues and direct consumers connected to them. Number of Workers There is one queue shared amongst workers with the same personality; however there are as many exclusive queues as the number of workers; the number of workers dictates also the number of routing keys within the topic-based exchange, which is shared amongst all workers. The figure below shows the status of a RabbitMQ node after Nova components' bootstrap in a test environment. Exchanges and queues being created by Nova components are: * Exchanges 1. nova (topic exchange) * Queues 1. ``compute.phantom`` (``phantom`` is hostname) 2. ``compute`` 3. ``network.phantom`` (``phantom`` is hostname) 4. ``network`` 5. ``scheduler.phantom`` (``phantom`` is hostname) 6. ``scheduler`` .. image:: /_static/images/rpc-state.png :width: 60% RabbitMQ Gotchas ---------------- Nova uses Kombu to connect to the RabbitMQ environment. Kombu is a Python library that in turn uses AMQPLib, a library that implements the standard AMQP 0.8 at the time of writing. When using Kombu, Invokers and Workers need the following parameters in order to instantiate a Connection object that connects to the RabbitMQ server (please note that most of the following material can be also found in the Kombu documentation; it has been summarized and revised here for sake of clarity): ``hostname`` The hostname to the AMQP server. ``userid`` A valid username used to authenticate to the server. ``password`` The password used to authenticate to the server. ``virtual_host`` The name of the virtual host to work with. This virtual host must exist on the server, and the user must have access to it. Default is "/". ``port`` The port of the AMQP server. Default is ``5672`` (amqp). The following parameters are default: ``insist`` Insist on connecting to a server. In a configuration with multiple load-sharing servers, the Insist option tells the server that the client is insisting on a connection to the specified server. Default is False. ``connect_timeout`` The timeout in seconds before the client gives up connecting to the server. The default is no timeout. ``ssl`` Use SSL to connect to the server. The default is False. More precisely Consumers need the following parameters: ``connection`` The above mentioned Connection object. ``queue`` Name of the queue. ``exchange`` Name of the exchange the queue binds to. ``routing_key`` The interpretation of the routing key depends on the value of the ``exchange_type`` attribute. Direct exchange If the routing key property of the message and the ``routing_key`` attribute of the queue are identical, then the message is forwarded to the queue. Fanout exchange Messages are forwarded to the queues bound the exchange, even if the binding does not have a key. Topic exchange If the routing key property of the message matches the routing key of the key according to a primitive pattern matching scheme, then the message is forwarded to the queue. The message routing key then consists of words separated by dots (``.``, like domain names), and two special characters are available; star (``*``) and hash (``#``). The star matches any word, and the hash matches zero or more words. For example ``.stock.#`` matches the routing keys ``usd.stock`` and ``eur.stock.db`` but not ``stock.nasdaq``. ``durable`` This flag determines the durability of both exchanges and queues; durable exchanges and queues remain active when a RabbitMQ server restarts. Non-durable exchanges/queues (transient exchanges/queues) are purged when a server restarts. It is worth noting that AMQP specifies that durable queues cannot bind to transient exchanges. Default is True. ``auto_delete`` If set, the exchange is deleted when all queues have finished using it. Default is False. ``exclusive`` Exclusive queues (such as non-shared) may only be consumed from by the current connection. When exclusive is on, this also implies ``auto_delete``. Default is False. ``exchange_type`` AMQP defines several default exchange types (routing algorithms) that covers most of the common messaging use cases. ``auto_ack`` Acknowledgment is handled automatically once messages are received. By default ``auto_ack`` is set to False, and the receiver is required to manually handle acknowledgment. ``no_ack`` It disable acknowledgment on the server-side. This is different from ``auto_ack`` in that acknowledgment is turned off altogether. This functionality increases performance but at the cost of reliability. Messages can get lost if a client dies before it can deliver them to the application. ``auto_declare`` If this is True and the exchange name is set, the exchange will be automatically declared at instantiation. Auto declare is on by default. Publishers specify most the parameters of Consumers (such as they do not specify a queue name), but they can also specify the following: ``delivery_mode`` The default delivery mode used for messages. The value is an integer. The following delivery modes are supported by RabbitMQ: ``1`` (transient) The message is transient. Which means it is stored in memory only, and is lost if the server dies or restarts. ``2`` (persistent) The message is persistent. Which means the message is stored both in-memory, and on disk, and therefore preserved if the server dies or restarts. The default value is ``2`` (persistent). During a send operation, Publishers can override the delivery mode of messages so that, for example, transient messages can be sent over a durable queue. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/reference/scheduler-evolution.rst0000664000175000017500000001327300000000000023325 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =================== Scheduler Evolution =================== Evolving the scheduler has been a priority item over several releases: http://specs.openstack.org/openstack/nova-specs/#priorities The scheduler has become tightly coupled with the rest of nova, limiting its capabilities, accuracy, flexibility and maintainability. The goal of scheduler evolution is to bring about a better separation of concerns between scheduling functionality and the rest of nova. Once this effort has completed, its conceivable that the nova-scheduler could become a separate git repo, outside of nova but within the compute project. This is not the current focus. Problem Use Cases ================== Many users are wanting to do more advanced things with the scheduler, but the current architecture is not ready to support those use cases in a maintainable way. A few examples will help to illustrate where the scheduler falls short: Cross Project Affinity ----------------------- It can be desirable, when booting from a volume, to use a compute node that is close to the shared storage where that volume is. Similarly, for the sake of performance, it can be desirable to use a compute node that is in a particular location in relation to a pre-created port. Filter Scheduler Alternatives ------------------------------ For certain use cases, radically different schedulers may perform much better than the filter scheduler. We should not block this innovation. It is unreasonable to assume a single scheduler will work for all use cases. However, to enable this kind of innovation in a maintainable way, a single strong scheduler interface is required. Project Scale issues --------------------- There are many interesting ideas for new schedulers, like the solver scheduler, and frequent requests to add new filters and weights to the scheduling system. The current nova team does not have the bandwidth to deal with all these requests. A dedicated scheduler team could work on these items independently of the rest of nova. The tight coupling that currently exists makes it impossible to work on the scheduler in isolation. A stable interface is required before the code can be split out. Key areas we are evolving ========================== Here we discuss, at a high level, areas that are being addressed as part of the scheduler evolution work. Versioning Scheduler Placement Interfaces ------------------------------------------ At the start of kilo, the scheduler is passed a set of dictionaries across a versioned RPC interface. The dictionaries can create problems with the backwards compatibility needed for live-upgrades. Luckily we already have the oslo.versionedobjects infrastructure we can use to model this data in a way that can be versioned across releases. This effort is mostly focusing around the request_spec. See, for example, `this spec`_. Sending host and node stats to the scheduler --------------------------------------------- Periodically nova-compute updates the scheduler state stored in the database. We need a good way to model the data that is being sent from the compute nodes into the scheduler, so over time, the scheduler can move to having its own database. This is linked to the work on the resource tracker. Updating the Scheduler about other data ---------------------------------------- Over time, its possible that we need to send cinder and neutron data, so the scheduler can use that data to help pick a nova-compute host. Resource Tracker ----------------- The recent work to add support for NUMA and PCI pass through have shown we have no good pattern to extend the resource tracker. Ideally we want to keep the innovation inside the nova tree, but we also need it to be easier. This is very related to the effort to re-think how we model resources, as covered by discussion about `resource providers`_. Parallelism and Concurrency ---------------------------- The current design of the nova-scheduler is very racy, and can lead to excessive numbers of build retries before the correct host is found. The recent NUMA features are particularly impacted by how the scheduler works. All this has lead to many people running only a single nova-scheduler process configured to use a very small greenthread pool. The work on cells v2 will mean that we soon need the scheduler to scale for much larger problems. The current scheduler works best with less than 1k nodes but we will need the scheduler to work with at least 10k nodes. Various ideas have been discussed to reduce races when running multiple nova-scheduler processes. One idea is to use two-phase commit "style" resource tracker claims. Another idea involves using incremental updates so it is more efficient to keep the scheduler's state up to date, potentially using Kafka. For more details, see the `backlog spec`_ that describes more of the details around this problem. .. _this spec: http://specs.openstack.org/openstack/nova-specs/specs/kilo/approved/sched-select-destinations-use-request-spec-object.html .. _resource providers: https://blueprints.launchpad.net/nova/+spec/resource-providers .. _backlog spec: http://specs.openstack.org/openstack/nova-specs/specs/backlog/approved/parallel-scheduler.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/reference/scheduler-hints-vs-flavor-extra-specs.rst0000664000175000017500000001751300000000000026600 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ========================================= Scheduler hints versus flavor extra specs ========================================= People deploying and working on Nova often have questions about flavor extra specs and scheduler hints and what role they play in scheduling decisions, and which is a better choice for exposing capability to an end user of the cloud. There are several things to consider and it can get complicated. This document attempts to explain at a high level some of the major differences and drawbacks with both flavor extra specs and scheduler hints. Extra Specs ----------- In general flavor extra specs are specific to the cloud and how it is organized for capabilities, and should be abstracted from the end user. Extra specs are tied to :doc:`host aggregates ` and a lot of them also define how a guest is created in the hypervisor, for example what the watchdog action is for a VM. Extra specs are also generally interchangeable with `image properties`_ when it comes to VM behavior, like the watchdog example. How that is presented to the user is via the name of the flavor, or documentation specifically for that deployment, e.g. instructions telling a user how to setup a baremetal instance. .. _image properties: https://docs.openstack.org/glance/latest/admin/useful-image-properties.html Scheduler Hints --------------- Scheduler hints, also known simply as "hints", can be specified during server creation to influence the placement of the server by the scheduler depending on which scheduler filters are enabled. Hints are mapped to specific filters. For example, the ``ServerGroupAntiAffinityFilter`` scheduler filter is used with the ``group`` scheduler hint to indicate that the server being created should be a member of the specified anti-affinity group and the filter should place that server on a compute host which is different from all other current members of the group. Hints are not more "dynamic" than flavor extra specs. The end user specifies a flavor and optionally a hint when creating a server, but ultimately what they can specify is static and defined by the deployment. Similarities ------------ * Both scheduler hints and flavor extra specs can be used by :doc:`scheduler filters `. * Both are totally customizable, meaning there is no whitelist within Nova of acceptable hints or extra specs, unlike image properties [1]_. * An end user cannot achieve a new behavior without deployer consent, i.e. even if the end user specifies the ``group`` hint, if the deployer did not configure the ``ServerGroupAntiAffinityFilter`` the end user cannot have the ``anti-affinity`` behavior. Differences ----------- * A server's host location and/or behavior can change when resized with a flavor that has different extra specs from those used to create the server. Scheduler hints can only be specified during server creation, not during resize or any other "move" operation, but the original hints are still applied during the move operation. * The flavor extra specs used to create (or resize) a server can be retrieved from the compute API using the `2.47 microversion`_. As of the 19.0.0 Stein release, there is currently no way from the compute API to retrieve the scheduler hints used to create a server. .. note:: Exposing the hints used to create a server has been proposed [2]_. Without this, it is possible to workaround the limitation by doing things such as including the scheduler hint in the server metadata so it can be retrieved via server metadata later. * In the case of hints the end user can decide not to include a hint. On the other hand the end user cannot create a new flavor (by default policy) to avoid passing a flavor with an extra spec - the deployer controls the flavors. .. _2.47 microversion: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id42 Discoverability --------------- When it comes to discoverability, by the default ``os_compute_api:os-flavor-extra-specs:index`` policy rule, flavor extra specs are more "discoverable" by the end user since they can list them for a flavor. However, one should not expect an average end user to understand what different extra specs mean as they are just a key/value pair. There is some documentation for some "standard" extra specs though [3]_. However, that is not an exhaustive list and it does not include anything that different deployments would define for things like linking a flavor to a set of :doc:`host aggregates `, for example, when creating flavors for baremetal instances, or what the chosen :doc:`hypervisor driver ` might support for flavor extra specs. Scheduler hints are less discoverable from an end user perspective than extra specs. There are some standard hints defined in the API request schema [4]_. However: 1. Those hints are tied to scheduler filters and the scheduler filters are configurable per deployment, so for example the ``JsonFilter`` might not be enabled (it is not enabled by default), so the ``query`` hint would not do anything. 2. Scheduler hints are not restricted to just what is in that schema in the upstream nova code because of the ``additionalProperties: True`` entry in the schema. This allows deployments to define their own hints outside of that API request schema for their own :ref:`custom scheduler filters ` which are not part of the upstream nova code. Interoperability ---------------- The only way an end user can really use scheduler hints is based on documentation (or GUIs/SDKs) that a specific cloud deployment provides for their setup. So if **CloudA** defines a custom scheduler filter X and a hint for that filter in their documentation, an end user application can only run with that hint on that cloud and expect it to work as documented. If the user moves their application to **CloudB** which does not have that scheduler filter or hint, they will get different behavior. So obviously both flavor extra specs and scheduler hints are not interoperable. Which to use? ------------- When it comes to defining a custom scheduler filter, you could use a hint or an extra spec. If you need a flavor extra spec anyway for some behavior in the hypervisor when creating the guest, or to be able to retrieve the original flavor extra specs used to create a guest later, then you might as well just use the extra spec. If you do not need that, then a scheduler hint may be an obvious choice, from an end user perspective, for exposing a certain scheduling behavior but it must be well documented and the end user should realize that hint might not be available in other clouds, and they do not have a good way of finding that out either. Long-term, flavor extra specs are likely to be more standardized than hints so ultimately extra specs are the recommended choice. Footnotes --------- .. [1] https://opendev.org/openstack/nova/src/commit/fbe6f77bc1cb41f5d6cfc24ece54d3413f997aab/nova/objects/image_meta.py#L225 .. [2] https://review.opendev.org/#/c/440580/ .. [3] https://docs.openstack.org/nova/latest/user/flavors.html#extra-specs .. [4] https://opendev.org/openstack/nova/src/commit/fbe6f77bc1cb41f5d6cfc24ece54d3413f997aab/nova/api/openstack/compute/schemas/scheduler_hints.py ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/reference/scheduling.rst0000664000175000017500000001224300000000000021446 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============ Scheduling ============ This is an overview of how scheduling works in nova from Pike onwards. For information on the scheduler itself, refer to :doc:`/admin/scheduling`. For an overview of why we've changed how the scheduler works, refer to :doc:`/reference/scheduler-evolution`. Overview -------- The scheduling process is described below. .. note:: This is current as of the 16.0.0 Pike release. Any mention of alternative hosts passed between the scheduler and conductor(s) is future work. .. image:: /_static/images/scheduling.svg :alt: Scheduling workflow As the above diagram illustrates, scheduling works like so: #. Scheduler gets a request spec from the "super conductor", containing resource requirements. The "super conductor" operates at the top level of a deployment, as contrasted with the "cell conductor", which operates within a particular cell. #. Scheduler sends those requirements to placement. #. Placement runs a query to determine the resource providers (in this case, compute nodes) that can satisfy those requirements. #. Placement then constructs a data structure for each compute node as documented in the `spec`__. The data structure contains summaries of the matching resource provider information for each compute node, along with the AllocationRequest that will be used to claim the requested resources if that compute node is selected. #. Placement returns this data structure to the Scheduler. #. The Scheduler creates HostState objects for each compute node contained in the provider summaries. These HostState objects contain the information about the host that will be used for subsequent filtering and weighing. #. Since the request spec can specify one or more instances to be scheduled. The Scheduler repeats the next several steps for each requested instance. #. Scheduler runs these HostState objects through the filters and weighers to further refine and rank the hosts to match the request. #. Scheduler then selects the HostState at the top of the ranked list, and determines its matching AllocationRequest from the data returned by Placement. It uses that AllocationRequest as the body of the request sent to Placement to claim the resources. #. If the claim is not successful, that indicates that another process has consumed those resources, and the host is no longer able to satisfy the request. In that event, the Scheduler moves on to the next host in the list, repeating the process until it is able to successfully claim the resources. #. Once the Scheduler has found a host for which a successful claim has been made, it needs to select a number of "alternate" hosts. These are hosts from the ranked list that are in the same cell as the selected host, which can be used by the cell conductor in the event that the build on the selected host fails for some reason. The number of alternates is determined by the configuration option ``scheduler.max_attempts``. #. Scheduler creates two list structures for each requested instance: one for the hosts (selected + alternates), and the other for their matching AllocationRequests. #. To create the alternates, Scheduler determines the cell of the selected host. It then iterates through the ranked list of HostState objects to find a number of additional hosts in that same cell. It adds those hosts to the host list, and their AllocationRequest to the allocation list. #. Once those lists are created, the Scheduler has completed what it needs to do for a requested instance. #. Scheduler repeats this process for any additional requested instances. When all instances have been scheduled, it creates a 2-tuple to return to the super conductor, with the first element of the tuple being a list of lists of hosts, and the second being a list of lists of the AllocationRequests. #. Scheduler returns that 2-tuple to the super conductor. #. For each requested instance, the super conductor determines the cell of the selected host. It then sends a 2-tuple of ([hosts], [AllocationRequests]) for that instance to the target cell conductor. #. Target cell conductor tries to build the instance on the selected host. If it fails, it uses the AllocationRequest data for that host to unclaim the resources for the selected host. It then iterates through the list of alternates by first attempting to claim the resources, and if successful, building the instance on that host. Only when all alternates fail does the build request fail. __ https://specs.openstack.org/openstack/nova-specs/specs/pike/approved/placement-allocation-requests.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/reference/services.rst0000664000175000017500000000446200000000000021150 0ustar00zuulzuul00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _service_manager_driver: Services, Managers and Drivers ============================== The responsibilities of Services, Managers, and Drivers, can be a bit confusing to people that are new to nova. This document attempts to outline the division of responsibilities to make understanding the system a little bit easier. Currently, Managers and Drivers are specified by flags and loaded using utils.load_object(). This method allows for them to be implemented as singletons, classes, modules or objects. As long as the path specified by the flag leads to an object (or a callable that returns an object) that responds to getattr, it should work as a manager or driver. The :mod:`nova.service` Module ------------------------------ .. automodule:: nova.service :noindex: :members: :undoc-members: :show-inheritance: The :mod:`nova.manager` Module ------------------------------ .. automodule:: nova.manager :noindex: :members: :undoc-members: :show-inheritance: Implementation-Specific Drivers ------------------------------- A manager will generally load a driver for some of its tasks. The driver is responsible for specific implementation details. Anything running shell commands on a host, or dealing with other non-python code should probably be happening in a driver. Drivers should not touch the database as the database management is done inside ``nova-conductor``. It usually makes sense to define an Abstract Base Class for the specific driver (i.e. VolumeDriver), to define the methods that a different driver would need to implement. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/reference/stable-api.rst0000664000175000017500000001326300000000000021345 0ustar00zuulzuul00000000000000.. Copyright 2015 Intel All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Nova Stable REST API ==================== This document describes both the current state of the Nova REST API -- as of the Pike release -- and also attempts to describe how the Nova team evolved the REST API's implementation over time and removed some of the cruft that has crept in over the years. Background ---------- Nova used to include two distinct frameworks for exposing REST API functionality. Older code is called the "v2 API" and existed in the /nova/api/openstack/compute/legacy_v2/ directory. This code tree was totally removed during Newton release time frame (14.0.0 and later). Newer code is called the "v2.1 API" and exists in the /nova/api/openstack/compute directory. The v2 API is the old Nova REST API. It is mostly replaced by v2.1 API. The v2.1 API is the new Nova REST API with a set of improvements which includes `Microversion `_ and standardized validation of inputs using JSON-Schema. Also the v2.1 API is totally backwards compatible with the v2 API (That is the reason we call it as v2.1 API). Current Stable API ------------------ * Nova v2.1 API + Microversion (v2.1 APIs are backward-compatible with v2 API, but more strict validation) * /v2 & /v2.1 endpoint supported * v2 compatible mode for old v2 users Evolution of Nova REST API -------------------------- .. image:: /_static/images/evolution-of-api.png Nova v2 API + Extensions ************************ Nova used to have v2 API. In v2 API, there was a concept called 'extension'. An operator can use it to enable/disable part of Nova REST API based on requirements. An end user may query the '/extensions' API to discover what *API functionality* is supported by the Nova deployment. Unfortunately, because v2 API extensions could be enabled or disabled from one deployment to another -- as well as custom API extensions added to one deployment and not another -- it was impossible for an end user to know what the OpenStack Compute API actually included. No two OpenStack deployments were consistent, which made cloud interoperability impossible. In the Newton release, stevedore loading of API extension plugins was deprecated and marked for removal. In the Newton release, v2 API code base has been removed and /v2 endpoints were directed to v2.1 code base. v2 API compatibility mode based on v2.1 API ******************************************* v2.1 API is exactly same as v2 API except strong input validation with no additional request parameter allowed and Microversion feature. Since Newton, '/v2' endpoint also started using v2.1 API implementation. But to keep the backward compatibility of v2 API, '/v2' endpoint should not return error on additional request parameter or any new headers for Microversion. v2 API must be same as it has been since starting. To achieve that behavior legacy v2 compatibility mode has been introduced. v2 compatibility mode is based on v2.1 implementation with below difference: * Skip additionalProperties checks in request body * Ignore Microversion headers in request * No Microversion headers in response Nova v2.1 API + Microversion **************************** In the Kilo release, nova v2.1 API has been released. v2.1 API is supposed to be backward compatible with v2 API with strong input validation using JSON Schema. v2.1 API comes up with microversion concept which is a way to version the API changes. Each new feature or modification in API has to done via microversion bump. API extensions concept was deprecated from the v2.1 API, are no longer needed to evolve the REST API, and no new API functionality should use the API extension classes to implement new functionality. Instead, new API functionality should be added via microversion concept and use the microversioning decorators to add or change the REST API. v2.1 API had plugin framework which was using stevedore to load Nova REST API extensions instead of old V2 handcrafted extension load mechanism. There was an argument that the plugin framework supported extensibility in the Nova API to allow deployers to publish custom API resources. In the Newton release, config options of blacklist and whitelist extensions and stevedore things were deprecated and marked for removal. In Pike, stevedore based plugin framework has been removed and url mapping is done with plain router list. There is no more dynamic magic of detecting API implementation for url. See :doc:`Extending the API ` for more information. The '/extensions' API exposed the list of enabled API functions to users by GET method. However as the above, new API extensions should not be added to the list of this API. The '/extensions' API is frozen in Nova V2.1 API and is `deprecated `_. Things which are History now **************************** As of the Pike release, many deprecated things have been removed and became history in Nova API world: * v2 legacy framework * API extensions concept * stevedore magic to load the extension/plugin dynamically * Configurable way to enable/disable APIs extensions ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/reference/threading.rst0000664000175000017500000000671100000000000021271 0ustar00zuulzuul00000000000000Threading model =============== Eventlet -------- Before the Flamingo release all OpenStack services used the *green thread* model of threading, implemented through using the Python `eventlet `_ and `greenlet `_ libraries. Green threads use a cooperative model of threading: thread context switches can only occur when specific eventlet or greenlet library calls are made (e.g., sleep, certain I/O calls). From the operating system's point of view, each OpenStack service runs in a single thread. The use of green threads reduces the likelihood of race conditions, but does not completely eliminate them. In some cases, you may need to use the ``@lockutils.synchronized(...)`` decorator to avoid races. In addition, since there is only one operating system thread, a call that blocks that main thread will block the entire process. Yielding the thread in long-running tasks ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If a code path takes a long time to execute and does not contain any methods that trigger an eventlet context switch, the long-running thread will block any pending threads. This scenario can be avoided by adding calls to the eventlet sleep method in the long-running code path. The sleep call will trigger a context switch if there are pending threads, and using an argument of 0 will avoid introducing delays in the case that there is only a single green thread:: from eventlet import greenthread ... greenthread.sleep(0) In current code, time.sleep(0) does the same thing as greenthread.sleep(0) if time module is patched through eventlet.monkey_patch(). To be explicit, we recommend contributors use ``greenthread.sleep()`` instead of ``time.sleep()``. MySQL access and eventlet ~~~~~~~~~~~~~~~~~~~~~~~~~ There are some MySQL DB API drivers for oslo.db, like `PyMySQL`_, MySQL-python etc. PyMySQL is the default MySQL DB API driver for oslo.db, and it works well with eventlet. MySQL-python uses an external C library for accessing the MySQL database. Since eventlet cannot use monkey-patching to intercept blocking calls in a C library, so queries to the MySQL database will block the main thread of a service. The Diablo release contained a thread-pooling implementation that did not block, but this implementation resulted in a `bug`_ and was removed. See this `mailing list thread`_ for a discussion of this issue, including a discussion of the `impact on performance`_. .. _bug: https://bugs.launchpad.net/nova/+bug/838581 .. _mailing list thread: https://lists.launchpad.net/openstack/msg08118.html .. _impact on performance: https://lists.launchpad.net/openstack/msg08217.html .. _PyMySQL: https://wiki.openstack.org/wiki/PyMySQL_evaluation Native threading ---------------- Since the Flamingo release OpenStack started to transition away form ``eventlet``. During this transition Nova maintains support for running services with ``eventlet`` while working to add support for running services with ``native threading``. To support both modes with the same codebase Nova started using the `futurist`_ library. In native threading mode ``futurist.ThreadPoolsExecutors`` are used to run concurrent tasks and both the oslo.service and the oslo.messaging libraries are configured to use native threads to execute tasks like periodics and RPC message handlers. .. _futurist: https://docs.openstack.org/futurist/latest/ To see how to configure and tune the native threading mode read the :doc:`/admin/concurrency` guide. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/reference/update-provider-tree.rst0000664000175000017500000002377000000000000023377 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==================================== ComputeDriver.update_provider_tree ==================================== This provides details on the ``ComputeDriver`` abstract method ``update_provider_tree`` for developers implementing this method in their own virt drivers. Background ---------- In the movement towards using placement for scheduling and resource management, the virt driver method ``get_available_resource`` was initially superseded by ``get_inventory`` (now gone), whereby the driver could specify its inventory in terms understood by placement. In Queens, a ``get_traits`` driver method was added. But ``get_inventory`` was limited to expressing only inventory (not traits or aggregates). And both of these methods were limited to the resource provider corresponding to the compute node. Developments such as Nested Resource Providers necessitate the ability for the virt driver to have deeper control over what the resource tracker configures in placement on behalf of the compute node. This need is filled by the virt driver method ``update_provider_tree`` and its consumption by the resource tracker, allowing full control over the placement representation of the compute node and its associated providers. The Method ---------- ``update_provider_tree`` accepts the following parameters: * A ``nova.compute.provider_tree.ProviderTree`` object representing all the providers in the tree associated with the compute node, and any sharing providers (those with the ``MISC_SHARES_VIA_AGGREGATE`` trait) associated via aggregate with any of those providers (but not *their* tree- or aggregate-associated providers), as currently known by placement. This object is fully owned by the ``update_provider_tree`` method, and can therefore be modified without locking/concurrency considerations. In other words, the parameter is passed *by reference* with the expectation that the virt driver will modify the object. Note, however, that it may contain providers not directly owned/controlled by the compute host. Care must be taken not to remove or modify such providers inadvertently. In addition, providers may be associated with traits and/or aggregates maintained by outside agents. The ``update_provider_tree`` method must therefore also be careful only to add/remove traits/aggregates it explicitly controls. * String name of the compute node (i.e. ``ComputeNode.hypervisor_hostname``) for which the caller is requesting updated provider information. Drivers may use this to help identify the compute node provider in the ProviderTree. Drivers managing more than one node (e.g. ironic) may also use it as a cue to indicate which node is being processed by the caller. * Dictionary of ``allocations`` data of the form: .. code:: { $CONSUMER_UUID: { # The shape of each "allocations" dict below is identical # to the return from GET /allocations/{consumer_uuid} "allocations": { $RP_UUID: { "generation": $RP_GEN, "resources": { $RESOURCE_CLASS: $AMOUNT, ... }, }, ... }, "project_id": $PROJ_ID, "user_id": $USER_ID, "consumer_generation": $CONSUMER_GEN, }, ... } If ``None``, and the method determines that any inventory needs to be moved (from one provider to another and/or to a different resource class), the ``ReshapeNeeded`` exception must be raised. Otherwise, this dict must be edited in place to indicate the desired final state of allocations. Drivers should *only* edit allocation records for providers whose inventories are being affected by the reshape operation. For more information about the reshape operation, refer to the `spec `_. The virt driver is expected to update the ProviderTree object with current resource provider and inventory information. When the method returns, the ProviderTree should represent the correct hierarchy of nested resource providers associated with this compute node, as well as the inventory, aggregates, and traits associated with those resource providers. .. note:: Despite the name, a ProviderTree instance may in fact contain more than one tree. For purposes of this specification, the ProviderTree passed to ``update_provider_tree`` will contain: * the entire tree associated with the compute node; and * any sharing providers (those with the ``MISC_SHARES_VIA_AGGREGATE`` trait) which are associated via aggregate with any of the providers in the compute node's tree. The sharing providers will be presented as lone roots in the ProviderTree, even if they happen to be part of a tree themselves. Consider the example below. ``SSP`` is a shared storage provider and ``BW1`` and ``BW2`` are shared bandwidth providers; all three have the ``MISC_SHARES_VIA_AGGREGATE`` trait:: CN1 SHR_ROOT CN2 / \ agg1 / /\ agg1 / \ NUMA1 NUMA2--------SSP--/--\-----------NUMA1 NUMA2 / \ / \ / \ / \ / \ PF1 PF2 PF3 PF4--------BW1 BW2------PF1 PF2 PF3 PF4 agg2 agg3 When ``update_provider_tree`` is invoked for ``CN1``, it is passed a ProviderTree containing:: CN1 (root) / \ agg1 NUMA1 NUMA2-------SSP (root) / \ / \ PF1 PF2 PF3 PF4------BW1 (root) agg2 Driver implementations of ``update_provider_tree`` are expected to use public ``ProviderTree`` methods to effect changes to the provider tree passed in. Some of the methods which may be useful are as follows: * ``new_root``: Add a new root provider to the tree. * ``new_child``: Add a new child under an existing provider. * ``data``: Access information (name, UUID, parent, inventory, traits, aggregates) about a provider in the tree. * ``remove``: Remove a provider **and its descendants** from the tree. Use caution in multiple-ownership scenarios. * ``update_inventory``: Set the inventory for a provider. * ``add_traits``, ``remove_traits``: Set/unset virt-owned traits for a provider. * ``add_aggregates``, ``remove_aggregates``: Set/unset virt-owned aggregate associations for a provider. .. note:: There is no supported mechanism for ``update_provider_tree`` to effect changes to allocations. This is intentional: in Nova, allocations are managed exclusively outside of virt. (Usually by the scheduler; sometimes - e.g. for migrations - by the conductor.) Porting from get_inventory ~~~~~~~~~~~~~~~~~~~~~~~~~~ Virt driver developers wishing to move from ``get_inventory`` to ``update_provider_tree`` should use the ``ProviderTree.update_inventory`` method, specifying the compute node as the provider and the same inventory as returned by ``get_inventory``. For example: .. code:: def get_inventory(self, nodename): inv_data = { 'VCPU': { ... }, 'MEMORY_MB': { ... }, 'DISK_GB': { ... }, } return inv_data would become: .. code:: def update_provider_tree(self, provider_tree, nodename, allocations=None): inv_data = { 'VCPU': { ... }, 'MEMORY_MB': { ... }, 'DISK_GB': { ... }, } provider_tree.update_inventory(nodename, inv_data) When reporting inventory for the standard resource classes ``VCPU``, ``MEMORY_MB`` and ``DISK_GB``, implementers of ``update_provider_tree`` may need to set the ``allocation_ratio`` and ``reserved`` values in the ``inv_data`` dict based on configuration to reflect changes on the compute for allocation ratios and reserved resource amounts back to the placement service. Porting from get_traits ~~~~~~~~~~~~~~~~~~~~~~~ To replace ``get_traits``, developers should use the ``ProviderTree.add_traits`` method, specifying the compute node as the provider and the same traits as returned by ``get_traits``. For example: .. code:: def get_traits(self, nodename): traits = ['HW_CPU_X86_AVX', 'HW_CPU_X86_AVX2', 'CUSTOM_GOLD'] return traits would become: .. code:: def update_provider_tree(self, provider_tree, nodename, allocations=None): provider_tree.add_traits( nodename, 'HW_CPU_X86_AVX', 'HW_CPU_X86_AVX2', 'CUSTOM_GOLD') .. _taxonomy_of_traits_and_capabilities: Taxonomy of traits and capabilities ----------------------------------- There are various types of traits: - Some are standard (registered in `os-traits `_); others are custom. - Some are owned by the compute service; others can be managed by operators. - Some come from driver-supported capabilities, via a mechanism which was `introduced `_ to convert them to standard traits on the compute node resource provider. This mechanism is :ref:`documented in the configuration guide `. This diagram may shed further light on how these traits relate to each other and how they are managed. .. figure:: /_static/images/traits-taxonomy.svg :width: 800 :alt: Venn diagram showing taxonomy of traits and capabilities ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/reference/upgrade-checks.rst0000664000175000017500000003025400000000000022210 0ustar00zuulzuul00000000000000============== Upgrade checks ============== .. note:: This document details how to generate upgrade checks as part of a new feature or bugfix. For info on how to apply existing upgrade checks, refer to the documentation for the :program:`nova-status` command in :doc:`/cli/nova-status`. For info on the general upgrade process for a nova deployment, refer to :doc:`/admin/upgrades`. Nova provides automated :ref:`upgrade check tooling ` to assist deployment tools in verifying critical parts of the deployment, especially when it comes to major changes during upgrades that require operator intervention. This guide covers the background on nova's upgrade check tooling, how it is used, and what to look for in writing new checks. Background ---------- Nova has historically supported offline database schema migrations (:program:`nova-manage db sync` and :program:`nova-manage api_db sync`) and online data migrations (:program:`nova-manage db online_data_migrations`) during upgrades, as discussed in :doc:`/reference/database-migrations`. The :program:`nova-status upgrade check` command was introduced in the 15.0.0 (Ocata) release to aid in the verification of two major required changes in that release, namely Placement and Cells v2. Integration with the Placement service and deploying Cells v2 was optional starting in the 14.0.0 Newton release and made required in the Ocata release. The nova team working on these changes knew that there were required deployment changes to successfully upgrade to Ocata. In addition, the required deployment changes were not things that could simply be verified in a database migration script, e.g. a migration script should not make REST API calls to Placement. So ``nova-status upgrade check`` was written to provide an automated "pre-flight" check to verify that required deployment steps were performed prior to upgrading to Ocata. Reference the `Ocata changes`_ for implementation details. .. _Ocata changes: https://review.opendev.org/#/q/topic:bp/resource-providers-scheduler-db-filters+status:merged+file:%255Enova/cmd/status.py Guidelines ---------- * The checks should be able to run within a virtual environment or container. All that is required is a full configuration file, similar to running other ``nova-manage`` type administration commands. In the case of nova, this means having :oslo.config:group:`api_database`, :oslo.config:group:`placement`, etc sections configured. * Candidates for automated upgrade checks are things in a project's upgrade release notes which can be verified via the database. For example, when upgrading to Cells v2 in Ocata, one required step was creating "cell mappings" for ``cell0`` and ``cell1``. This can easily be verified by checking the contents of the ``cell_mappings`` table in the ``nova_api`` database. * Checks will query the database(s) and potentially REST APIs (depending on the check) but should not expect to run RPC calls. For example, a check should not require that the ``nova-compute`` service is running on a particular host. * Checks are typically meant to be run before re-starting and upgrading to new service code, which is how `grenade uses them`__, but they can also be run as a :ref:`post-install verify step ` which is how `openstack-ansible`__ also uses them. The high-level set of upgrade steps for upgrading nova in grenade is: * Install new code * Sync the database schema for new models (``nova-manage api_db sync``; ``nova-manage db sync``) * Run the online data migrations (``nova-manage db online_data_migrations``) * Run the upgrade check (``nova-status upgrade check``) * Restart services with new code .. __: https://github.com/openstack-dev/grenade/blob/dc7f4a4ba/projects/60_nova/upgrade.sh#L96 .. __: https://review.opendev.org/#/c/575125/ * Checks must be idempotent so they can be run repeatedly and the results are always based on the latest data. This allows an operator to run the checks, fix any issues reported, and then iterate until the status check no longer reports any issues. * Checks which cannot easily, or should not, be run within offline database migrations are a good candidate for these CLI-driven checks. For example, ``instances`` records are in the cell database and for each instance there should be a corresponding ``request_specs`` table entry in the ``nova_api`` database. A ``nova-manage db online_data_migrations`` routine was added in the Newton release to back-fill request specs for existing instances, and `in Rocky`__ an upgrade check was added to make sure all non-deleted instances have a request spec so compatibility code can be removed in Stein. In older releases of nova we would have added a `blocker migration`__ as part of the database schema migrations to make sure the online data migrations had been completed before the upgrade could proceed. .. note:: Usage of ``nova-status upgrade check`` does not preclude the need for blocker migrations within a given database, but in the case of request specs the check spans multiple databases and was a better fit for the ``nova-status`` tooling. .. __: https://review.opendev.org/#/c/581813/ .. __: https://review.opendev.org/#/c/289450/ * All checks should have an accompanying upgrade release note. Structure --------- There is no graph logic for checks, meaning each check is meant to be run independently of other checks in the same set. For example, a project could have five checks which run serially but that does not mean the second check in the set depends on the results of the first check in the set, or the third check depends on the second, and so on. The base framework is fairly simple as can be seen from the `initial change`_. Each check is registered in the ``_upgrade_checks`` variable and the ``check`` method executes each check and records the result. The most severe result is recorded for the final return code. There are one of three possible results per check: * ``Success``: All upgrade readiness checks passed successfully and there is nothing to do. * ``Warning``: At least one check encountered an issue and requires further investigation. This is considered a warning but the upgrade may be OK. * ``Failure``: There was an upgrade status check failure that needs to be investigated. This should be considered something that stops an upgrade. The ``UpgradeCheckResult`` object provides for adding details when there is a warning or failure result which generally should refer to how to resolve the failure, e.g. maybe ``nova-manage db online_data_migrations`` is incomplete and needs to be run again. Using the `cells v2 check`_ as an example, there are really two checks involved: 1. Do the cell0 and cell1 mappings exist? 2. Do host mappings exist in the API database if there are compute node records in the cell database? Failing either check results in a ``Failure`` status for that check and return code of ``2`` for the overall run. The initial `placement check`_ provides an example of a warning response. In that check, if there are fewer resource providers in Placement than there are compute nodes in the cell database(s), the deployment may be underutilized because the ``nova-scheduler`` is using the Placement service to determine candidate hosts for scheduling. Warning results are good for cases where scenarios are known to run through a rolling upgrade process, e.g. ``nova-compute`` being configured to report resource provider information into the Placement service. These are things that should be investigated and completed at some point, but might not cause any immediate failures. The results feed into a standard output for the checks: .. code-block:: console $ nova-status upgrade check +----------------------------------------------------+ | Upgrade Check Results | +----------------------------------------------------+ | Check: Cells v2 | | Result: Success | | Details: None | +----------------------------------------------------+ | Check: Placement API | | Result: Failure | | Details: There is no placement-api endpoint in the | | service catalog. | +----------------------------------------------------+ .. _initial change: https://review.opendev.org/#/c/411517/ .. _cells v2 check: https://review.opendev.org/#/c/411525/ .. _placement check: https://review.opendev.org/#/c/413250/ FAQs ---- - How is the ``nova-status`` upgrade script packaged and deployed? There is a ``console_scripts`` entry for ``nova-status`` in the ``setup.cfg`` file. - Why are there multiple parts to the command structure, i.e. "upgrade" and "check"? This is an artifact of how the ``nova-manage`` command is structured which has categories of sub-commands, like ``nova-manage db`` is a sub-category made up of other sub-commands like ``nova-manage db sync``. The ``nova-status upgrade check`` command was written in the same way for consistency and extensibility if other sub-commands need to be added later. - Why is the upgrade check command not part of the standard python-\*client CLIs? The ``nova-status`` command was modeled after the ``nova-manage`` command which is meant to be admin-only and has direct access to the database, unlike other CLI packages like python-novaclient which requires a token and communicates with nova over the REST API. Because of this, it is also possible to write commands in ``nova-manage`` and ``nova-status`` that can work while the API service is down for maintenance. - How should the checks be documented? Each check should be documented in the :ref:`history section ` of the CLI guide and have a release note. This is important since the checks can be run in an isolated environment apart from the actual deployed version of the code and since the checks should be idempotent, the history / change log is good for knowing what is being validated. - Do other projects support upgrade checks? A community-wide `goal for the Stein release`__ is adding the same type of ``$PROJECT-status upgrade check`` tooling to other projects to ease in upgrading OpenStack across the board. So while the guidelines in this document are primarily specific to nova, they should apply generically to other projects wishing to incorporate the same tooling. .. __: https://governance.openstack.org/tc/goals/stein/upgrade-checkers.html - Where should the documentation live for projects other than nova? As part of the standard OpenStack project `documentation guidelines`__ the command should be documented under ``doc/source/cli`` in each project repo. .. __: https://docs.openstack.org/doc-contrib-guide/project-guides.html - Can upgrade checks be backported? Sometimes upgrade checks can be backported to aid in preempting bugs on stable branches. For example, a check was added for `bug 1759316`__ in Rocky which was also backported to stable/queens in case anyone upgrading from Pike to Queens would hit the same issue. Backportable checks are generally only made for latent bugs since someone who has already passed checks and upgraded to a given stable branch should not start failing after a patch release on that same branch. For this reason, any check being backported should have a release note with it. .. __: https://bugs.launchpad.net/nova/+bug/1759316 - Can upgrade checks only be for N-1 to N version upgrades? No, not necessarily. The upgrade checks are also an essential part of `fast-forward upgrades`__ to make sure that as you roll through each release performing schema (data model) updates and data migrations that you are also completing all of the necessary changes. For example, if you are fast forward upgrading from Ocata to Rocky, something could have been added, deprecated or removed in Pike or Queens and a pre-upgrade check is a way to make sure the necessary steps were taking while upgrading through those releases before restarting the Rocky code at the end. .. __: https://wiki.openstack.org/wiki/Fast_forward_upgrades ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/reference/vm-states.rst0000664000175000017500000001213600000000000021245 0ustar00zuulzuul00000000000000Virtual Machine States and Transitions ======================================= The following diagrams and tables show the required virtual machine (VM) states and task states for various commands issued by the user. Allowed State Transitions -------------------------- .. graphviz:: digraph states { graph [pad=".35", ranksep="0.65", nodesep="0.55", concentrate=true]; node [fontsize=10 fontname="Monospace"]; edge [arrowhead="normal", arrowsize="0.8"]; label="All states are allowed to transition to DELETED and ERROR."; forcelabels=true; labelloc=bottom; labeljust=left; /* states */ building [label="BUILDING"] active [label="ACTIVE"] paused [label="PAUSED"] suspended [label="SUSPENDED"] stopped [label="STOPPED"] rescued [label="RESCUED"] resized [label="RESIZED"] soft_deleted [label="SOFT_DELETED"] shelved [label="SHELVED"] shelved_offloaded [label="SHELVED_OFFLOADED"] deleted [label="DELETED", color="red"] error [label="ERROR", color="red"] /* transitions [action] */ building -> active active -> active [headport=nw, tailport=ne] // manual layout active -> soft_deleted [tailport=e] // prevent arrowhead overlap active -> suspended active -> paused [tailport=w] // prevent arrowhead overlap active -> stopped active -> shelved active -> shelved_offloaded active -> rescued active -> resized soft_deleted -> active [headport=e] // prevent arrowhead overlap suspended -> active suspended -> shelved suspended -> shelved_offloaded paused -> active paused -> shelved paused -> shelved_offloaded stopped -> active stopped -> stopped [headport=nw, tailport=ne] // manual layout stopped -> resized stopped -> rescued stopped -> shelved stopped -> shelved_offloaded resized -> active rescued -> active shelved -> shelved_offloaded shelved -> active shelved_offloaded -> active } Requirements for Commands ------------------------- ================== ================== ==================== ================ Command Req'd VM States Req'd Task States Target State ================== ================== ==================== ================ pause Active, Shutoff, Resize Verify, unset Paused Rescued unpause Paused N/A Active suspend Active, Shutoff N/A Suspended resume Suspended N/A Active rescue Active, Shutoff Resize Verify, unset Rescued unrescue Rescued N/A Active set admin password Active N/A Active rebuild Active, Shutoff Resize Verify, unset Active, Shutoff force delete Soft Deleted N/A Deleted restore Soft Deleted N/A Active soft delete Active, Shutoff, N/A Soft Deleted Error delete Active, Shutoff, N/A Deleted Building, Rescued, Error backup Active, Shutoff N/A Active, Shutoff snapshot Active, Shutoff N/A Active, Shutoff start Shutoff, Stopped N/A Active stop Active, Shutoff, Resize Verify, unset Stopped Rescued reboot Active, Shutoff, Resize Verify, unset Active Rescued resize Active, Shutoff Resize Verify, unset Resized revert resize Active, Shutoff Resize Verify, unset Active confirm resize Active, Shutoff Resize Verify, unset Active ================== ================== ==================== ================ VM states and Possible Commands ------------------------------- ============ ================================================================= VM State Commands ============ ================================================================= Paused unpause Suspended resume Active set admin password, suspend, pause, rescue, rebuild, soft delete, delete, backup, snapshot, stop, reboot, resize, revert resize, confirm resize Shutoff suspend, pause, rescue, rebuild, soft delete, delete, backup, start, snapshot, stop, reboot, resize, revert resize, confirm resize Rescued unrescue, pause Stopped rescue, delete, start Soft Deleted force delete, restore Error soft delete, delete Building delete Rescued delete, stop, reboot ============ ================================================================= Create Instance States ---------------------- The following diagram shows the sequence of VM states, task states, and power states when a new VM instance is created. .. image:: /_static/images/create-vm-states.svg :alt: Sequence of VM states, task states, and power states when a new VM instance is created. ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.273608 nova-32.0.0/doc/source/user/0000775000175000017500000000000000000000000015605 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/user/availability-zones.rst0000664000175000017500000000270600000000000022152 0ustar00zuulzuul00000000000000================== Availability zones ================== Availability Zones are an end-user visible logical abstraction for partitioning a cloud without knowing the physical infrastructure. Availability zones can be used to partition a cloud on arbitrary factors, such as location (country, datacenter, rack), network layout and/or power source. Because of the flexibility, the names and purposes of availability zones can vary massively between clouds. In addition, other services, such as the :neutron-doc:`networking service <>` and the :cinder-doc:`block storage service <>`, also provide an availability zone feature. However, the implementation of these features differs vastly between these different services. Consult the documentation for these other services for more information on their implementation of this feature. .. tip:: Server Groups provide another mechanism for configuring the colocation of instances during scheduling. For more information, refer to :doc:`/user/server-groups`. Usage ----- Availability zones can only be created and configured by an admin but they can be used by an end-user when creating an instance. For example: .. code-block:: console $ openstack server create --availability-zone ZONE ... SERVER It is also possible to specify a destination host and/or node using this command; however, this is an admin-only operation by default. For more information, see :ref:`using-availability-zones-to-select-hosts`. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/user/block-device-mapping.rst0000664000175000017500000002771400000000000022332 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Block Device Mapping in Nova ============================ Nova has a concept of block devices that can be exposed to cloud instances. There are several types of block devices an instance can have (we will go into more details about this later in this document), and which ones are available depends on a particular deployment and the usage limitations set for tenants and users. Block device mapping is a way to organize and keep data about all of the block devices an instance has. When we talk about block device mapping, we usually refer to one of two things 1. API/CLI structure and syntax for specifying block devices for an instance boot request 2. The data structure internal to Nova that is used for recording and keeping, which is ultimately persisted in the block_device_mapping table. However, Nova internally has several "slightly" different formats for representing the same data. All of them are documented in the code and or presented by a distinct set of classes, but not knowing that they exist might trip up people reading the code. So in addition to BlockDeviceMapping [1]_ objects that mirror the database schema, we have: 2.1 The API format - this is the set of raw key-value pairs received from the API client, and is almost immediately transformed into the object; however, some validations are done using this format. We will refer to this format as the 'API BDMs' from now on. 2.2 The virt driver format - this is the format defined by the classes in :mod:`nova.virt.block_device`. This format is used and expected by the code in the various virt drivers. These classes, in addition to exposing a different format (mimicking the Python dict interface), also provide a place to bundle some functionality common to certain types of block devices (for example attaching volumes which has to interact with both Cinder and the virt driver code). We will refer to this format as 'Driver BDMs' from now on. For more details on this please refer to the :doc:`Driver BDM Data Structures <../reference/block-device-structs>` reference document. .. note:: The maximum limit on the number of disk devices allowed to attach to a single server is configurable with the option :oslo.config:option:`compute.max_disk_devices_to_attach`. API BDM data format and its history ----------------------------------- In the early days of Nova, block device mapping general structure closely mirrored that of the EC2 API. During the Havana release of Nova, block device handling code, and in turn the block device mapping structure, had work done on improving the generality and usefulness. These improvements included exposing additional details and features in the API. In order to facilitate this, a new extension was added to the v2 API called ``BlockDeviceMappingV2Boot`` [2]_, that added an additional ``block_device_mapping_v2`` field to the instance boot API request. Block device mapping v1 (aka legacy) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ This was the original format that supported only cinder volumes (similar to how EC2 block devices support only EBS volumes). Every entry was keyed by device name (we will discuss why this was problematic in its own section later on this page), and would accept only: * UUID of the Cinder volume or snapshot * Type field - used only to distinguish between volumes and Cinder volume snapshots * Optional size field * Optional ``delete_on_termination`` flag While all of Nova internal code only uses and stores the new data structure, we still need to handle API requests that use the legacy format. This is handled by the Nova API service on every request. As we will see later, since block device mapping information can also be stored in the image metadata in Glance, this is another place where we need to handle the v1 format. The code to handle legacy conversions is part of the :mod:`nova.block_device` module. Intermezzo - problem with device names ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Using device names as the primary per-instance identifier, and exposing them in the API, is problematic for Nova mostly because several hypervisors Nova supports with its drivers can't guarantee that the device names the guest OS assigns are the ones the user requested from Nova. Exposing such a detail in the public API of Nova is obviously not ideal, but it needed to stay for backwards compatibility. It is also required for some (slightly obscure) features around overloading a block device in a Glance image when booting an instance [3]_. The plan for fixing this was to allow users to not specify the device name of a block device, and Nova will determine it (with the help of the virt driver), so that it can still be discovered through the API and used when necessary, like for the features mentioned above (and preferably only then). Another use for specifying the device name was to allow the "boot from volume" functionality, by specifying a device name that matches the root device name for the instance (usually ``/dev/vda``). Currently (mid Liberty) users are discouraged from specifying device names for all calls requiring or allowing block device mapping, except when trying to override the image block device mapping on instance boot, and it will likely remain like that in the future. Libvirt device driver will outright override any device names passed with it's own values. Block device mapping v2 ^^^^^^^^^^^^^^^^^^^^^^^ New format was introduced in an attempt to solve issues with the original block device mapping format discussed above, and also to allow for more flexibility and addition of features that were not possible with the simple format we had. New block device mapping is a list of dictionaries containing the following fields (in addition to the ones that were already there): * source_type - this can have one of the following values: * ``image`` * ``volume`` * ``snapshot`` * ``blank`` * destination_type - this can have one of the following values: * ``local`` * ``volume`` * guest_format - Tells Nova how/if to format the device prior to attaching, should be only used with blank local images. Denotes a swap disk if the value is ``swap``. * device_name - See the previous section for a more in depth explanation of this - currently best left empty (not specified that is), unless the user wants to override the existing device specified in the image metadata. In case of Libvirt, even when passed in with the purpose of overriding the existing image metadata, final set of device names for the instance may still get changed by the driver. * disk_bus and device_type - low level details that some hypervisors (currently only libvirt) may support. Some example disk_bus values can be: ``ide``, ``usb``, ``virtio``, ``scsi``, while device_type may be ``disk``, ``cdrom``, ``floppy``, ``lun``. This is not an exhaustive list as it depends on the virtualization driver, and may change as more support is added. Leaving these empty is the most common thing to do. * boot_index - Defines the order in which a hypervisor will try devices when attempting to boot the guest from storage. Each device which is capable of being used as boot device should be given a unique boot index, starting from 0 in ascending order. Some hypervisors may not support booting from multiple devices, so will only consider the device with boot index of 0. Some hypervisors will support booting from multiple devices, but only if they are of different types - eg a disk and CD-ROM. Setting a negative value or None indicates that the device should not be used for booting. The simplest usage is to set it to 0 for the boot device and leave it as None for any other devices. * volume_type - Added in microversion 2.67 to the servers create API to support specifying volume type when booting instances. When we snapshot a volume-backed server, the block_device_mapping_v2 image metadata will include the volume_type from the BDM record so if the user then creates another server from that snapshot, the volume that nova creates from that snapshot will use the same volume_type. If a user wishes to change that volume type in the image metadata, they can do so via the image API. Valid source / destination combinations ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Combination of the ``source_type`` and ``destination_type`` will define the kind of block device the entry is referring to. The following combinations are supported: * ``image`` -> ``local`` - this is only currently reserved for the entry referring to the Glance image that the instance is being booted with (it should also be marked as a boot device). It is also worth noting that an API request that specifies this, also has to provide the same Glance uuid as the ``image_ref`` parameter to the boot request (this is done for backwards compatibility and may be changed in the future). This functionality might be extended to specify additional Glance images to be attached to an instance after boot (similar to kernel/ramdisk images) but this functionality is not supported by any of the current drivers. * ``volume`` -> ``volume`` - this is just a Cinder volume to be attached to the instance. It can be marked as a boot device. * ``snapshot`` -> ``volume`` - this works exactly as passing ``type=snap`` does. It would create a volume from a Cinder volume snapshot and attach that volume to the instance. Can be marked bootable. * ``image`` -> ``volume`` - As one would imagine, this would download a Glance image to a cinder volume and attach it to an instance. Can also be marked as bootable. This is really only a shortcut for creating a volume out of an image before booting an instance with the newly created volume. * ``blank`` -> ``volume`` - Creates a blank Cinder volume and attaches it. This will also require the volume size to be set. * ``blank`` -> ``local`` - Depending on the guest_format field (see below), this will either mean an ephemeral blank disk on hypervisor local storage, or a swap disk (instances can have only one of those). Nova will not allow mixing of BDMv1 and BDMv2 in a single request, and will do basic validation to make sure that the requested block device mapping is valid before accepting a boot request. .. [1] In addition to the BlockDeviceMapping Nova object, we also have the BlockDeviceDict class in :mod:`nova.block_device` module. This class handles transforming and validating the API BDM format. .. [2] This work predates API microversions and thus the only way to add it was by means of an API extension. .. [3] This is a feature that the EC2 API offers as well and has been in Nova for a long time, although it has been broken in several releases. More info can be found on `this bug `_ FAQs ---- 1. Is it possible to configure nova to automatically use cinder to back all root disks with volumes? No, there is nothing automatic within nova that converts a non-:term:`boot-from-volume ` request to convert the image to a root volume. Several ideas have been discussed over time which are captured in the spec for `volume-backed flavors`_. However, if you wish to force users to always create volume-backed servers, you can configure the API service by setting :oslo.config:option:`max_local_block_devices` to 0. This will result in any non-boot-from-volume server create request to fail with a 400 response. .. _volume-backed flavors: https://review.opendev.org/511965/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/user/boot-instance-using-PXE.rst0000664000175000017500000000314200000000000022661 0ustar00zuulzuul00000000000000========================== Boot an instance using PXE ========================== Follow the steps below to boot an existing instance using PXE. Create an image with iPXE ========================= iPXE is open source boot firmware. See the documentation for more details: https://ipxe.org/docs Use the iPXE image as a rescue image ==================================== Boot the instance from the iPXE image using rescue. Legacy instance rescue ---------------------- The ordering of disks is not guaranteed to be consistent. .. code-block:: console $ openstack server rescue --image IPXE_IMAGE INSTANCE_NAME Stable device instance rescue ----------------------------- To preserve the ordering of disks when booting, use `stable device rescue`_. #. Ensure that the ``hw_rescue_device`` (``cdrom`` | ``disk`` | ``floppy``) and/or the ``hw_rescue_bus`` (``scsi`` | ``virtio`` | ``ide`` | ``usb``) image properties are set on the image. For example: .. code-block:: console $ openstack image set --property hw_rescue_device=disk IPXE_IMAGE or: .. code-block:: console $ openstack image set --property hw_rescue_bus=virtio IPXE_IMAGE or: .. code-block:: console $ openstack image set --property hw_rescue_device=disk \ --property hw_rescue_bus=virtio IPXE_IMAGE #. Run the rescue using the API microversion 2.87 or later: .. code-block:: console $ openstack --os-compute-api-version 2.87 server rescue \ --image IPXE_IMAGE INSTANCE_NAME .. _stable device rescue: https://docs.openstack.org/nova/latest/user/rescue.html#stable-device-instance-rescue ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/user/certificate-validation.rst0000664000175000017500000006612600000000000022764 0ustar00zuulzuul00000000000000Image Signature Certificate Validation ====================================== Nova can determine if the certificate used to generate and verify the signature of a signed image (see `Glance Image Signature Verification documentation`_) is trusted by the user. This feature is called certificate validation and can be applied to the creation or rebuild of an instance. Certificate validation is meant to be performed jointly with image signature verification but each feature has its own Nova configuration option, to be specified in the ``[glance]`` section of the ``nova.conf`` configuration file. To enable certificate validation, set :oslo.config:option:`glance.enable_certificate_validation` to True. To enable signature validation, set :oslo.config:option:`glance.verify_glance_signatures` to True. Conversely, to disable either of these features, set their option to False or do not include the option in the Nova configurations at all. Certificate validation operates in concert with signature validation in `Cursive`_. It takes in a list of trusted certificate IDs and verifies that the certificate used to sign the image being booted is cryptographically linked to at least one of the provided trusted certificates. This provides the user with confidence in the identity and integrity of the image being booted. Certificate validation will only be performed if image signature validation is enabled. However, the presence of trusted certificate IDs overrides the ``enable_certificate_validation`` and ``verify_glance_signatures`` settings. In other words, if a list of trusted certificate IDs is provided to the instance create or rebuild commands, signature verification and certificate validation will be performed, regardless of their settings in the Nova configurations. See `Using Signature Verification`_ for details. .. _Cursive: http://opendev.org/x/cursive/ .. _Glance Image Signature Verification documentation: https://docs.openstack.org/glance/latest/user/signature.html .. note:: Certificate validation configuration options must be specified in the Nova configuration file that controls the ``nova-osapi_compute`` and ``nova-compute`` services, as opposed to other Nova services (conductor, scheduler, etc.). Requirements ------------ Key manager that is a backend to the `Castellan Interface`_. Possible key managers are: * `Barbican`_ * `Vault`_ .. _Castellan Interface: https://docs.openstack.org/castellan/latest/ .. _Barbican: https://docs.openstack.org/barbican/latest/contributor/devstack.html .. _Vault: https://www.vaultproject.io/ Limitations ----------- * As of the 18.0.0 Rocky release, only the libvirt compute driver supports trusted image certification validation. The feature is not, however, driver specific so other drivers should be able to support this feature over time. See the `feature support matrix`_ for information on which drivers support the feature at any given release. * As of the 18.0.0 Rocky release, image signature and trusted image certification validation is not supported with the Libvirt compute driver when using the ``rbd`` image backend (``[libvirt]/images_type=rbd``) and ``RAW`` formatted images. This is due to the images being cloned directly in the ``RBD`` backend avoiding calls to download and verify on the compute. * As of the 18.0.0 Rocky release, trusted image certification validation is not supported with volume-backed (:term:`boot from volume `) instances. The block storage service support may be available in a future release: https://blueprints.launchpad.net/cinder/+spec/certificate-validate * Trusted image certification support can be controlled via `policy configuration`_ if it needs to be disabled. See the ``os_compute_api:servers:create:trusted_certs`` and ``os_compute_api:servers:rebuild:trusted_certs`` policy rules. .. _feature support matrix: https://docs.openstack.org/nova/latest/user/support-matrix.html#operation_trusted_certs .. _policy configuration: https://docs.openstack.org/nova/latest/configuration/policy.html Configuration ------------- Nova will use the key manager defined by the Castellan key manager interface, which is the Barbican key manager by default. To use a different key manager, update the ``backend`` value in the ``[key_manager]`` group of the nova configuration file. For example:: [key_manager] backend = barbican .. note:: If these lines do not exist, then simply add them to the end of the file. Using Signature Verification ---------------------------- An image will need a few properties for signature verification to be enabled: ``img_signature`` Signature of your image. Signature restrictions are: * 255 character limit ``img_signature_hash_method`` Method used to hash your signature. Possible hash methods are: * SHA-224 * SHA-256 * SHA-384 * SHA-512 ``img_signature_key_type`` Key type used for your image. Possible key types are: * RSA-PSS * DSA * ECC-CURVES * SECT571K1 * SECT409K1 * SECT571R1 * SECT409R1 * SECP521R1 * SECP384R1 ``img_signature_certificate_uuid`` UUID of the certificate that you uploaded to the key manager. Possible certificate types are: * X_509 Using Certificate Validation ---------------------------- Certificate validation is triggered by one of two ways: 1. The Nova configuration options ``verify_glance_signatures`` and ``enable_certificate_validation`` are both set to True:: [glance] verify_glance_signatures = True enable_certificate_validation = True 2. A list of trusted certificate IDs is provided by one of three ways: .. note:: The command line support is pending changes https://review.opendev.org/#/c/500396/ and https://review.opendev.org/#/c/501926/ to python-novaclient and python-openstackclient, respectively. Environment Variable Use the environment variable ``OS_TRUSTED_IMAGE_CERTIFICATE_IDS`` to define a comma-delimited list of trusted certificate IDs. For example: .. code-block:: console $ export OS_TRUSTED_IMAGE_CERTIFICATE_IDS=79a6ad17-3298-4e55-8b3a-1672dd93c40f,b20f5600-3c9d-4af5-8f37-3110df3533a0 Command-Line Flag If booting or rebuilding an instance using the :command:`nova` commands, use the ``--trusted-image-certificate-id`` flag to define a single trusted certificate ID. The flag may be used multiple times to specify multiple trusted certificate IDs. For example: .. code-block:: console $ nova boot myInstanceName \ --flavor 1 \ --image myImageId \ --trusted-image-certificate-id 79a6ad17-3298-4e55-8b3a-1672dd93c40f \ --trusted-image-certificate-id b20f5600-3c9d-4af5-8f37-3110df3533a0 If booting or rebuilding an instance using the :command:`openstack server` commands, use the ``--trusted-image-certificate-id`` flag to define a single trusted certificate ID. The flag may be used multiple times to specify multiple trusted certificate IDs. For example: .. code-block:: console $ openstack --os-compute-api-version=2.63 server create myInstanceName \ --flavor 1 \ --image myImageId \ --nic net-id=fd25c0b2-b36b-45a8-82e4-ab52516289e5 \ --trusted-image-certificate-id 79a6ad17-3298-4e55-8b3a-1672dd93c40f \ --trusted-image-certificate-id b20f5600-3c9d-4af5-8f37-3110df3533a0 Nova Configuration Option Use the Nova configuration option :oslo.config:option:`glance.default_trusted_certificate_ids` to define a comma-delimited list of trusted certificate IDs. This configuration value is only used if ``verify_glance_signatures`` and ``enable_certificate_validation`` options are set to True, and the trusted certificate IDs are not specified anywhere else. For example:: [glance] default_trusted_certificate_ids=79a6ad17-3298-4e55-8b3a-1672dd93c40f,b20f5600-3c9d-4af5-8f37-3110df3533a0 Example Usage ------------- For these instructions, we will construct a 4-certificate chain to illustrate that it is possible to have a single trusted root certificate. We will upload all four certificates to Barbican. Then, we will sign an image and upload it to Glance, which will illustrate image signature verification. Finally, we will boot the signed image from Glance to show that certificate validation is enforced. Enable certificate validation ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Enable image signature verification and certificate validation by setting both of their Nova configuration options to True:: [glance] verify_glance_signatures = True enable_certificate_validation = True Create a certificate chain ^^^^^^^^^^^^^^^^^^^^^^^^^^ As mentioned above, we will construct a 4-certificate chain to illustrate that it is possible to have a single trusted root certificate. Before we begin to build our certificate chain, we must first create files for OpenSSL to use for indexing and serial number tracking: .. code-block:: console $ touch index.txt $ echo '01' > serial.txt Create a certificate configuration file """"""""""""""""""""""""""""""""""""""" For these instructions, we will create a single configuration file called ``ca.conf``, which contains various sections that we can specify for use on the command-line during certificate requests and generation. Note that this certificate will be able to sign other certificates because it is a certificate authority. Also note the root CA's unique common name ("root"). The intermediate certificates' common names will be specified on the command-line when generating the corresponding certificate requests. ``ca.conf``:: [ req ] prompt = no distinguished_name = dn-param x509_extensions = ca_cert_extensions [ ca ] default_ca = ca_default [ dn-param ] C = US CN = Root CA [ ca_cert_extensions ] keyUsage = keyCertSign, digitalSignature basicConstraints = CA:TRUE, pathlen:2 [ ca_default ] new_certs_dir = . # Location for new certs after signing database = ./index.txt # Database index file serial = ./serial.txt # The current serial number default_days = 1000 default_md = sha256 policy = signing_policy email_in_dn = no [ intermediate_cert_extensions ] keyUsage = keyCertSign, digitalSignature basicConstraints = CA:TRUE, pathlen:1 [client_cert_extensions] keyUsage = keyCertSign, digitalSignature basicConstraints = CA:FALSE [ signing_policy ] countryName = optional stateOrProvinceName = optional localityName = optional organizationName = optional organizationalUnitName = optional commonName = supplied emailAddress = optional Generate the certificate authority (CA) and corresponding private key """"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" For these instructions, we will save the certificate as ``cert_ca.pem`` and the private key as ``key_ca.pem``. This certificate will be a self-signed root certificate authority (CA) that can sign other CAs and non-CA certificates. .. code-block:: console $ openssl req \ -x509 \ -nodes \ -newkey rsa:1024 \ -config ca.conf \ -keyout key_ca.pem \ -out cert_ca.pem Generating a 1024 bit RSA private key ............................++++++ ...++++++ writing new private key to 'key_ca.pem' ----- Create the first intermediate certificate """"""""""""""""""""""""""""""""""""""""" Create a certificate request for the first intermediate certificate. For these instructions, we will save the certificate request as ``cert_intermediate_a.csr`` and the private key as ``key_intermediate_a.pem``. .. code-block:: console $ openssl req \ -nodes \ -newkey rsa:2048 \ -subj '/CN=First Intermediate Certificate' \ -keyout key_intermediate_a.pem \ -out cert_intermediate_a.csr Generating a 2048 bit RSA private key .............................................................................................................+++ .....+++ writing new private key to 'key_intermediate_a.pem' ----- Generate the first intermediate certificate by signing its certificate request with the CA. For these instructions we will save the certificate as ``cert_intermediate_a.pem``. .. code-block:: console $ openssl ca \ -config ca.conf \ -extensions intermediate_cert_extensions \ -cert cert_ca.pem \ -keyfile key_ca.pem \ -out cert_intermediate_a.pem \ -infiles cert_intermediate_a.csr Using configuration from ca.conf Check that the request matches the signature Signature ok The Subject's Distinguished Name is as follows commonName :ASN.1 12:'First Intermediate Certificate' Certificate is to be certified until Nov 15 16:24:21 2020 GMT (1000 days) Sign the certificate? [y/n]:y 1 out of 1 certificate requests certified, commit? [y/n]y Write out database with 1 new entries Data Base Updated Create the second intermediate certificate """""""""""""""""""""""""""""""""""""""""" Create a certificate request for the second intermediate certificate. For these instructions, we will save the certificate request as ``cert_intermediate_b.csr`` and the private key as ``key_intermediate_b.pem``. .. code-block:: console $ openssl req \ -nodes \ -newkey rsa:2048 \ -subj '/CN=Second Intermediate Certificate' \ -keyout key_intermediate_b.pem \ -out cert_intermediate_b.csr Generating a 2048 bit RSA private key ..........+++ ............................................+++ writing new private key to 'key_intermediate_b.pem' ----- Generate the second intermediate certificate by signing its certificate request with the first intermediate certificate. For these instructions we will save the certificate as ``cert_intermediate_b.pem``. .. code-block:: console $ openssl ca \ -config ca.conf \ -extensions intermediate_cert_extensions \ -cert cert_intermediate_a.pem \ -keyfile key_intermediate_a.pem \ -out cert_intermediate_b.pem \ -infiles cert_intermediate_b.csr Using configuration from ca.conf Check that the request matches the signature Signature ok The Subject's Distinguished Name is as follows commonName :ASN.1 12:'Second Intermediate Certificate' Certificate is to be certified until Nov 15 16:25:42 2020 GMT (1000 days) Sign the certificate? [y/n]:y 1 out of 1 certificate requests certified, commit? [y/n]y Write out database with 1 new entries Data Base Updated Create the client certificate """"""""""""""""""""""""""""" Create a certificate request for the client certificate. For these instructions, we will save the certificate request as ``cert_client.csr`` and the private key as ``key_client.pem``. .. code-block:: console $ openssl req \ -nodes \ -newkey rsa:2048 \ -subj '/CN=Client Certificate' \ -keyout key_client.pem \ -out cert_client.csr Generating a 2048 bit RSA private key .............................................................................................................................+++ ..............................................................................................+++ writing new private key to 'key_client.pem' ----- Generate the client certificate by signing its certificate request with the second intermediate certificate. For these instructions we will save the certificate as ``cert_client.pem``. .. code-block:: console $ openssl ca \ -config ca.conf \ -extensions client_cert_extensions \ -cert cert_intermediate_b.pem \ -keyfile key_intermediate_b.pem \ -out cert_client.pem \ -infiles cert_client.csr Using configuration from ca.conf Check that the request matches the signature Signature ok The Subject's Distinguished Name is as follows commonName :ASN.1 12:'Client Certificate' Certificate is to be certified until Nov 15 16:26:46 2020 GMT (1000 days) Sign the certificate? [y/n]:y 1 out of 1 certificate requests certified, commit? [y/n]y Write out database with 1 new entries Data Base Updated Upload the generated certificates to the key manager ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ In order interact with the key manager, the user needs to have a ``creator`` role. To list all users with a ``creator`` role, run the following command as an admin: .. code-block:: console $ openstack role assignment list --role creator --names +---------+-----------------------------+-------+-------------------+--------+-----------+ | Role | User | Group | Project | Domain | Inherited | +---------+-----------------------------+-------+-------------------+--------+-----------+ | creator | project_a_creator_2@Default | | project_a@Default | | False | | creator | project_b_creator@Default | | project_b@Default | | False | | creator | project_a_creator@Default | | project_a@Default | | False | +---------+-----------------------------+-------+-------------------+--------+-----------+ To give the ``demo`` user a ``creator`` role in the ``demo`` project, run the following command as an admin: .. code-block:: console $ openstack role add --user demo --project demo creator .. note:: This command provides no output. If the command fails, the user will see a "4xx Client error" indicating that "Secret creation attempt not allowed" and to "please review your user/project privileges". .. note:: The following "openstack secret" commands require that the `python-barbicanclient `_ package is installed. .. code-block:: console $ openstack secret store \ --name CA \ --algorithm RSA \ --expiration 2018-06-29 \ --secret-type certificate \ --payload-content-type "application/octet-stream" \ --payload-content-encoding base64 \ --payload "$(base64 cert_ca.pem)" $ openstack secret store \ --name IntermediateA \ --algorithm RSA \ --expiration 2018-06-29 \ --secret-type certificate \ --payload-content-type "application/octet-stream" \ --payload-content-encoding base64 \ --payload "$(base64 cert_intermediate_a.pem)" $ openstack secret store \ --name IntermediateB \ --algorithm RSA \ --expiration 2018-06-29 \ --secret-type certificate \ --payload-content-type "application/octet-stream" \ --payload-content-encoding base64 \ --payload "$(base64 cert_intermediate_b.pem)" $ openstack secret store \ --name Client \ --algorithm RSA \ --expiration 2018-06-29 \ --secret-type certificate \ --payload-content-type "application/octet-stream" \ --payload-content-encoding base64 \ --payload "$(base64 cert_client.pem)" The responses should look something like this: .. code-block:: console +---------------+------------------------------------------------------------------------------+ | Field | Value | +---------------+------------------------------------------------------------------------------+ | Secret href | http://127.0.0.1/key-manager/v1/secrets/8fbcce5d-d646-4295-ba8a-269fc9451eeb | | Name | CA | | Created | None | | Status | None | | Content types | {u'default': u'application/octet-stream'} | | Algorithm | RSA | | Bit length | 256 | | Secret type | certificate | | Mode | cbc | | Expiration | 2018-06-29T00:00:00+00:00 | +---------------+------------------------------------------------------------------------------+ Save off the certificate UUIDs (found in the secret href): .. code-block:: console $ cert_ca_uuid=8fbcce5d-d646-4295-ba8a-269fc9451eeb $ cert_intermediate_a_uuid=0b5d2c72-12cc-4ba6-a8d7-3ff5cc1d8cb8 $ cert_intermediate_b_uuid=674736e3-f25c-405c-8362-bbf991e0ce0a $ cert_client_uuid=125e6199-2de4-46e3-b091-8e2401ef0d63 Create a signed image ^^^^^^^^^^^^^^^^^^^^^ For these instructions, we will download a small CirrOS image: .. code-block:: console $ wget -nc -O cirros.tar.gz http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-source.tar.gz --2018-02-19 11:37:52-- http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-source.tar.gz Resolving download.cirros-cloud.net (download.cirros-cloud.net)... 64.90.42.85 Connecting to download.cirros-cloud.net (download.cirros-cloud.net)|64.90.42.85|:80... connected. HTTP request sent, awaiting response... 200 OK Length: 434333 (424K) [application/x-tar] Saving to: ‘cirros.tar.gz’ cirros.tar.gz 100%[===================>] 424.15K --.-KB/s in 0.1s 2018-02-19 11:37:54 (3.79 MB/s) - ‘cirros.tar.gz’ saved [434333/434333] Sign the image with the generated client private key: .. code-block:: console $ openssl dgst \ -sha256 \ -sign key_client.pem \ -sigopt rsa_padding_mode:pss \ -out cirros.self_signed.signature \ cirros.tar.gz .. note:: This command provides no output. Save off the base64 encoded signature: .. code-block:: console $ base64_signature=$(base64 -w 0 cirros.self_signed.signature) Upload the signed image to Glance: .. code-block:: console $ openstack image create \ --public \ --container-format bare \ --disk-format qcow2 \ --property img_signature="$base64_signature" \ --property img_signature_certificate_uuid="$cert_client_uuid" \ --property img_signature_hash_method='SHA-256' \ --property img_signature_key_type='RSA-PSS' \ --file cirros.tar.gz \ cirros_client_signedImage +------------------+------------------------------------------------------------------------+ | Field | Value | +------------------+------------------------------------------------------------------------+ | checksum | d41d8cd98f00b204e9800998ecf8427e | | container_format | bare | | created_at | 2019-02-06T06:29:56Z | | disk_format | qcow2 | | file | /v2/images/17f48a6c-e592-446e-9c91-00fbc436d47e/file | | id | 17f48a6c-e592-446e-9c91-00fbc436d47e | | min_disk | 0 | | min_ram | 0 | | name | cirros_client_signedImage | | owner | 45e13e63606f40d6b23275c3cd91aec2 | | properties | img_signature='swA/hZi3WaNh35VMGlnfGnBWuXMlUbdO8h306uG7W3nwOyZP6dGRJ3 | | | Xoi/07Bo2dMUB9saFowqVhdlW5EywQAK6vgDsi9O5aItHM4u7zUPw+2e8eeaIoHlGhTks | | | kmW9isLy0mYA9nAfs3coChOIPXW4V8VgVXEfb6VYGHWm0nShiAP1e0do9WwitsE/TVKoS | | | QnWjhggIYij5hmUZ628KAygPnXklxVhqPpY/dFzL+tTzNRD0nWAtsc5wrl6/8HcNzZsaP | | | oexAysXJtcFzDrf6UQu66D3UvFBVucRYL8S3W56It3Xqu0+InLGaXJJpNagVQBb476zB2 | | | ZzZ5RJ/4Zyxw==', | | | img_signature_certificate_uuid='125e6199-2de4-46e3-b091-8e2401ef0d63', | | | img_signature_hash_method='SHA-256', | | | img_signature_key_type='RSA-PSS', | | | os_hash_algo='sha512', | | | os_hash_value='cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a92 | | | 1d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927d | | | a3e', | | | os_hidden='False' | | protected | False | | schema | /v2/schemas/image | | size | 0 | | status | active | | tags | | | updated_at | 2019-02-06T06:29:56Z | | virtual_size | None | | visibility | public | +------------------+------------------------------------------------------------------------+ .. note:: Creating the image can fail if validation does not succeed. This will cause the image to be deleted and the Glance log to report that "Signature verification failed" for the given image ID. Boot the signed image ^^^^^^^^^^^^^^^^^^^^^ Boot the signed image without specifying trusted certificate IDs: .. code-block:: console $ nova boot myInstance \ --flavor m1.tiny \ --image cirros_client_signedImage .. note:: The instance should fail to boot because certificate validation fails when the feature is enabled but no trusted image certificates are provided. The Nova log output should indicate that "Image signature certificate validation failed" because "Certificate chain building failed". Boot the signed image with trusted certificate IDs: .. code-block:: console $ nova boot myInstance \ --flavor m1.tiny \ --image cirros_client_signedImage \ --trusted-image-certificate-id $cert_ca_uuid,$cert_intermediate_a_uuid \ --trusted-image-certificate-id $cert_intermediate_b_uuid .. note:: The instance should successfully boot and certificate validation should succeed. The Nova log output should indicate that "Image signature certificate validation succeeded". Other Links ----------- * https://etherpad.openstack.org/p/mitaka-glance-image-signing-instructions * https://etherpad.openstack.org/p/queens-nova-certificate-validation * https://wiki.openstack.org/wiki/OpsGuide/User-Facing_Operations * http://specs.openstack.org/openstack/nova-specs/specs/rocky/approved/nova-validate-certificates.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/user/feature-classification.rst0000664000175000017500000001422600000000000022770 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ====================== Feature Classification ====================== This document presents a matrix that describes which features are ready to be used and which features are works in progress. It includes links to relevant documentation and functional tests. .. warning:: Please note: this is a work in progress! Aims ==== Users want reliable, long-term solutions for their use cases. The feature classification matrix identifies which features are complete and ready to use, and which should be used with caution. The matrix also benefits developers by providing a list of features that require further work to be considered complete. Below is a matrix for a selection of important verticals: * :ref:`matrix-gp` * :ref:`matrix-nfv` * :ref:`matrix-hpc` For more details on the concepts in each matrix, please see :ref:`notes-on-concepts`. .. _matrix-gp: General Purpose Cloud Features =============================== This is a summary of the key features dev/test clouds, and other similar general purpose clouds need, and it describes their current state. Below there are sections on NFV and HPC specific features. These look at specific features and scenarios that are important to those more specific sets of use cases. .. feature_matrix:: feature-matrix-gp.ini .. _matrix-nfv: NFV Cloud Features ================== Network Function Virtualization (NFV) is about virtualizing network node functions into building blocks that may connect, or chain together to create a particular service. It is common for this workloads needing bare metal like performance, i.e. low latency and close to line speed performance. .. include:: /common/numa-live-migration-warning.txt .. feature_matrix:: feature-matrix-nfv.ini .. _matrix-hpc: HPC Cloud Features ================== High Performance Compute (HPC) cloud have some specific needs that are covered in this set of features. .. feature_matrix:: feature-matrix-hpc.ini .. _notes-on-concepts: Notes on Concepts ================= This document uses the following terminology. Users ----- These are the users we talk about in this document: application deployer creates and deletes servers, directly or indirectly using an API application developer creates images and apps that run on the cloud cloud operator administers the cloud self service administrator runs and uses the cloud .. note:: This is not an exhaustive list of personas, but rather an indicative set of users. Feature Group ------------- To reduce the size of the matrix, we organize the features into groups. Each group maps to a set of user stories that can be validated by a set of scenarios and tests. Typically, this means a set of tempest tests. This list focuses on API concepts like attach and detach volumes, rather than deployment specific concepts like attach an iSCSI volume to a KVM based VM. Deployment ---------- A deployment maps to a specific test environment. We provide a full description of the environment, so it is possible to reproduce the reported test results for each of the Feature Groups. This description includes all aspects of the deployment, for example the hypervisor, number of nova-compute services, storage, network driver, and types of images being tested. Feature Group Maturity ----------------------- The Feature Group Maturity rating is specific to the API concepts, rather than specific to a particular deployment. That detail is covered in the deployment rating for each feature group. .. note:: Although having some similarities, this list is not directly related to the Interop effort. **Feature Group ratings:** Incomplete Incomplete features are those that do not have enough functionality to satisfy real world use cases. Experimental Experimental features should be used with extreme caution. They are likely to have little or no upstream testing, and are therefore likely to contain bugs. Complete For a feature to be considered complete, it must have: * complete API docs (concept and REST call definition) * complete Administrator docs * tempest tests that define if the feature works correctly * sufficient functionality and reliability to be useful in real world scenarios * a reasonable expectation that the feature will be supported long-term Complete and Required There are various reasons why a complete feature may be required, but generally it is when all drivers support that feature. New drivers need to prove they support all required features before they are allowed in upstream Nova. Required features are those that any new technology must support before being allowed into tree. The larger the list, the more features are available on all Nova based clouds. Deprecated Deprecated features are those that are scheduled to be removed in a future major release of Nova. If a feature is marked as complete, it should never be deprecated. If a feature is incomplete or experimental for several releases, it runs the risk of being deprecated and later removed from the code base. Deployment Rating for a Feature Group -------------------------------------- The deployment rating refers to the state of the tests for each Feature Group on a particular deployment. **Deployment ratings:** Unknown No data is available. Not Implemented No tests exist. Implemented Self declared that the tempest tests pass. Regularly Tested Tested by third party CI. Checked Tested as part of the check or gate queue. The eventual goal is to automate this list from a third party CI reporting system, but currently we document manual inspections in an ini file. Ideally, we will review the list at every milestone. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/user/feature-matrix-gp.ini0000664000175000017500000002564500000000000021663 0ustar00zuulzuul00000000000000# # Lists all the CI jobs as targets # [target.libvirt-kvm] title=libvirt+kvm (x86 & ppc64) link=http://docs.openstack.org/infra/manual/developers.html#project-gating [target.libvirt-kvm-s390] title=libvirt+kvm (s390x) link=http://docs.openstack.org/infra/manual/developers.html#project-gating [target.libvirt-virtuozzo-ct] title=libvirt+virtuozzo CT link=https://wiki.openstack.org/wiki/ThirdPartySystems/Virtuozzo_CI [target.libvirt-virtuozzo-vm] title=libvirt+virtuozzo VM link=https://wiki.openstack.org/wiki/ThirdPartySystems/Virtuozzo_Storage_CI [target.vmware] title=VMware CI link=https://wiki.openstack.org/wiki/NovaVMware/Minesweeper [target.zvm] title=IBM zVM CI link=https://wiki.openstack.org/wiki/ThirdPartySystems/IBM_z/VM_CI [target.ironic] title=Ironic CI link= # # Lists all features # # Includes information on the feature, its maturity, status, # links to admin docs, api docs and tempest test uuids. # # It then lists the current state for each of the about CI jobs. # It is hoped this mapping will eventually be automated. # # This doesn't include things like Server metadata, Server tagging, # or Lock Server, or Keypair CRUD as they can all be tested independently # of the nova virt driver used. # [operation.create-delete-server] title=Create Server and Delete Server notes=This includes creating a server, and deleting a server. Specifically this is about booting a server from a glance image using the default disk and network configuration. maturity=complete api_doc_link=https://docs.openstack.org/api-ref/compute/#servers-servers admin_doc_link=https://docs.openstack.org/nova/latest/user/launch-instances.html tempest_test_uuids=9a438d88-10c6-4bcd-8b5b-5b6e25e1346f;585e934c-448e-43c4-acbf-d06a9b899997 libvirt-kvm=complete libvirt-kvm-s390=unknown libvirt-virtuozzo-ct=partial driver-notes-libvirt-virtuozzo-ct=This is not tested in a CI system, but it is implemented. libvirt-virtuozzo-vm=partial driver-notes-libvirt-virtuozzo-vm=This is not tested in a CI system, but it is implemented. vmware=complete ironic=unknown zvm=complete [operation.snapshot-server] title=Snapshot Server notes=This is creating a glance image from the currently running server. maturity=complete api_doc_link=https://docs.openstack.org/api-ref/compute/?expanded=#servers-run-an-action-servers-action admin_doc_link=https://docs.openstack.org/glance/latest/admin/troubleshooting.html tempest_test_uuids=aaacd1d0-55a2-4ce8-818a-b5439df8adc9 cli= libvirt-kvm=complete libvirt-kvm-s390=unknown libvirt-virtuozzo-ct=partial driver-notes-libvirt-virtuozzo-ct=This is not tested in a CI system, but it is implemented. libvirt-virtuozzo-vm=partial driver-notes-libvirt-virtuozzo-vm=This is not tested in a CI system, but it is implemented. vmware=unknown ironic=unknown zvm=complete [operation.power-ops] title=Server power ops notes=This includes reboot, shutdown and start. maturity=complete api_doc_link=https://docs.openstack.org/api-ref/compute/?expanded=#servers-run-an-action-servers-action tempest_test_uuids=2cb1baf6-ac8d-4429-bf0d-ba8a0ba53e32;af8eafd4-38a7-4a4b-bdbc-75145a580560 cli= libvirt-kvm=complete libvirt-kvm-s390=unknown libvirt-virtuozzo-ct=partial driver-notes-libvirt-virtuozzo-ct=This is not tested in a CI system, but it is implemented. libvirt-virtuozzo-vm=partial driver-notes-libvirt-virtuozzo-vm=This is not tested in a CI system, but it is implemented. vmware=complete ironic=unknown zvm=complete [operation.rebuild-server] title=Rebuild Server notes=You can rebuild a server, optionally specifying the glance image to use. maturity=complete api_doc_link=https://docs.openstack.org/api-ref/compute/?expanded=#servers-run-an-action-servers-action tempest_test_uuids=aaa6cdf3-55a7-461a-add9-1c8596b9a07c cli= libvirt-kvm=complete libvirt-kvm-s390=unknown libvirt-virtuozzo-ct=partial driver-notes-libvirt-virtuozzo-ct=This is not tested in a CI system, but it is implemented. libvirt-virtuozzo-vm=partial driver-notes-libvirt-virtuozzo-vm=This is not tested in a CI system, but it is implemented. vmware=complete ironic=unknown zvm=missing [operation.resize-server] title=Resize Server notes=You resize a server to a new flavor, then confirm or revert that operation. maturity=complete api_doc_link=https://docs.openstack.org/api-ref/compute/?expanded=#servers-run-an-action-servers-action tempest_test_uuids=1499262a-9328-4eda-9068-db1ac57498d2 cli= libvirt-kvm=complete libvirt-kvm-s390=unknown libvirt-virtuozzo-ct=complete libvirt-virtuozzo-vm=partial driver-notes-libvirt-virtuozzo-vm=This is not tested in a CI system, but it is implemented. vmware=complete ironic=unknown zvm=missing [operation.server-volume-ops] title=Volume Operations notes=This is about attaching volumes, detaching volumes. maturity=complete api_doc_link=https://docs.openstack.org/api-ref/compute/#servers-with-volume-attachments-servers-os-volume-attachments admin_doc_link=https://docs.openstack.org/cinder/latest/admin/blockstorage-manage-volumes.html tempest_test_uuids=fff42874-7db5-4487-a8e1-ddda5fb5288d cli= libvirt-kvm=complete libvirt-kvm-s390=unknown libvirt-virtuozzo-ct=complete libvirt-virtuozzo-vm=complete vmware=complete ironic=missing zvm=missing [operation.server-bdm] title=Custom disk configurations on boot notes=This is about supporting all the features of BDMv2. This includes booting from a volume, in various ways, and specifying a custom set of ephemeral disks. Note some drivers only supports part of what the API allows. maturity=complete api_doc_link=https://docs.openstack.org/api-ref/compute/?expanded=create-image-createimage-action-detail#create-server admin_doc_link=https://docs.openstack.org/nova/latest/user/block-device-mapping.html tempest_test_uuids=557cd2c2-4eb8-4dce-98be-f86765ff311b, 36c34c67-7b54-4b59-b188-02a2f458a63b cli= libvirt-kvm=complete libvirt-kvm-s390=unknown libvirt-virtuozzo-ct=missing libvirt-virtuozzo-vm=complete vmware=partial driver-notes-vmware=This is not tested in a CI system, but it is implemented. ironic=missing zvm=missing [operation.server-neutron] title=Custom neutron configurations on boot notes=This is about supporting booting from one or more neutron ports, or all the related short cuts such as booting a specified network. This does not include SR-IOV or similar, just simple neutron ports. maturity=complete api_doc_link=https://docs.openstack.org/api-ref/compute/?&expanded=create-server-detail admin_doc_link= tempest_test_uuids=2f3a0127-95c7-4977-92d2-bc5aec602fb4 cli= libvirt-kvm=complete libvirt-kvm-s390=unknown libvirt-virtuozzo-ct=unknown libvirt-virtuozzo-vm=unknown vmware=partial driver-notes-vmware=This is not tested in a CI system, but it is implemented. ironic=missing zvm=partial driver-notes-zvm=This is not tested in a CI system, but it is implemented. [operation.server-pause] title=Pause a Server notes=This is pause and unpause a server, where the state is held in memory. maturity=complete api_doc_link=https://docs.openstack.org/api-ref/compute/?#pause-server-pause-action admin_doc_link= tempest_test_uuids=bd61a9fd-062f-4670-972b-2d6c3e3b9e73 cli= libvirt-kvm=complete libvirt-kvm-s390=unknown libvirt-virtuozzo-ct=missing libvirt-virtuozzo-vm=partial driver-notes-libvirt-virtuozzo-vm=This is not tested in a CI system, but it is implemented. vmware=partial driver-notes-vmware=This is not tested in a CI system, but it is implemented. ironic=missing zvm=complete [operation.server-suspend] title=Suspend a Server notes=This suspend and resume a server, where the state is held on disk. maturity=complete api_doc_link=https://docs.openstack.org/api-ref/compute/?expanded=suspend-server-suspend-action-detail admin_doc_link= tempest_test_uuids=0d8ee21e-b749-462d-83da-b85b41c86c7f cli= libvirt-kvm=complete libvirt-kvm-s390=unknown libvirt-virtuozzo-ct=partial driver-notes-libvirt-virtuozzo-ct=This is not tested in a CI system, but it is implemented. libvirt-virtuozzo-vm=partial driver-notes-libvirt-virtuozzo-vm=This is not tested in a CI system, but it is implemented. vmware=complete ironic=missing zvm=missing [operation.server-consoleoutput] title=Server console output notes=This gets the current server console output. maturity=complete api_doc_link=https://docs.openstack.org/api-ref/compute/#show-console-output-os-getconsoleoutput-action admin_doc_link= tempest_test_uuids=4b8867e6-fffa-4d54-b1d1-6fdda57be2f3 cli= libvirt-kvm=complete libvirt-kvm-s390=unknown libvirt-virtuozzo-ct=unknown libvirt-virtuozzo-vm=unknown vmware=partial driver-notes-vmware=This is not tested in a CI system, but it is implemented. ironic=missing zvm=complete [operation.server-rescue] title=Server Rescue notes=This boots a server with a new root disk from the specified glance image to allow a user to fix a boot partition configuration, or similar. maturity=complete api_doc_link=https://docs.openstack.org/api-ref/compute/#rescue-server-rescue-action admin_doc_link= tempest_test_uuids=fd032140-714c-42e4-a8fd-adcd8df06be6;70cdb8a1-89f8-437d-9448-8844fd82bf46 cli= libvirt-kvm=complete libvirt-kvm-s390=unknown libvirt-virtuozzo-ct=partial driver-notes-libvirt-virtuozzo-ct=This is not tested in a CI system, but it is implemented. libvirt-virtuozzo-vm=complete vmware=complete ironic=missing zvm=missing [operation.server-configdrive] title=Server Config Drive notes=This ensures the user data provided by the user when booting a server is available in one of the expected config drive locations. maturity=complete api_doc_link=https://docs.openstack.org/api-ref/compute/#create-server admin_doc_link=https://docs.openstack.org/nova/latest/admin/config-drive.html tempest_test_uuids=7fff3fb3-91d8-4fd0-bd7d-0204f1f180ba cli= libvirt-kvm=complete libvirt-kvm-s390=unknown libvirt-virtuozzo-ct=missing libvirt-virtuozzo-vm=partial driver-notes-libvirt-virtuozzo-vm=This is not tested in a CI system, but it is implemented. vmware=complete ironic=partial driver-notes-ironic=This is not tested in a CI system, but it is implemented. zvm=complete [operation.server-changepassword] title=Server Change Password notes=The ability to reset the password of a user within the server. maturity=experimental api_doc_link=https://docs.openstack.org/api-ref/compute/#change-administrative-password-changepassword-action admin_doc_link= tempest_test_uuids=6158df09-4b82-4ab3-af6d-29cf36af858d cli= libvirt-kvm=partial driver-notes-libvirt-kvm=This is not tested in a CI system, but it is implemented. libvirt-kvm-s390=unknown libvirt-virtuozzo-ct=missing libvirt-virtuozzo-vm=missing vmware=missing ironic=missing zvm=missing [operation.server-shelve] title=Server Shelve and Unshelve notes=The ability to keep a server logically alive, but not using any cloud resources. For local disk based instances, this involves taking a snapshot, called offloading. maturity=complete api_doc_link=https://docs.openstack.org/api-ref/compute/#shelve-server-shelve-action admin_doc_link= tempest_test_uuids=1164e700-0af0-4a4c-8792-35909a88743c,c1b6318c-b9da-490b-9c67-9339b627271f cli= libvirt-kvm=complete libvirt-kvm-s390=unknown libvirt-virtuozzo-ct=missing libvirt-virtuozzo-vm=complete vmware=missing ironic=missing zvm=missing ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/user/feature-matrix-hpc.ini0000664000175000017500000000405300000000000022015 0ustar00zuulzuul00000000000000[target.libvirt-kvm] title=libvirt+kvm (x86 & ppc64) link=http://docs.openstack.org/infra/manual/developers.html#project-gating [target.libvirt-kvm-s390] title=libvirt+kvm (s390x) link=http://docs.openstack.org/infra/manual/developers.html#project-gating [target.libvirt-virtuozzo-ct] title=libvirt+virtuozzo CT link=https://wiki.openstack.org/wiki/ThirdPartySystems/Virtuozzo_CI [target.libvirt-virtuozzo-vm] title=libvirt+virtuozzo VM link=https://wiki.openstack.org/wiki/ThirdPartySystems/Virtuozzo_Storage_CI [target.vmware] title=VMware CI link=https://wiki.openstack.org/wiki/NovaVMware/Minesweeper [target.ironic] title=Ironic link=http://docs.openstack.org/infra/manual/developers.html#project-gating [operation.gpu-passthrough] title=GPU Passthrough notes=The PCI passthrough feature in OpenStack allows full access and direct control of a physical PCI device in guests. This mechanism is generic for any devices that can be attached to a PCI bus. Correct driver installation is the only requirement for the guest to properly use the devices. maturity=experimental api_doc_link=https://docs.openstack.org/api-ref/compute/#create-server admin_doc_link=https://docs.openstack.org/nova/latest/admin/pci-passthrough.html tempest_test_uuids=9a438d88-10c6-4bcd-8b5b-5b6e25e1346f;585e934c-448e-43c4-acbf-d06a9b899997 libvirt-kvm=complete:l libvirt-kvm-s390=unknown libvirt-virtuozzo-ct=partial driver-notes-libvirt-virtuozzo-ct=This is not tested in a CI system, but it is implemented. libvirt-virtuozzo-vm=partial driver-notes-libvirt-virtuozzo-vm=This is not tested in a CI system, but it is implemented. vmware=missing ironic=unknown [operation.virtual-gpu] title=Virtual GPUs notes=Attach a virtual GPU to an instance at server creation time maturity=experimental api_doc_link=https://docs.openstack.org/api-ref/compute/#create-server admin_doc_link=https://docs.openstack.org/nova/latest/admin/virtual-gpu.html libvirt-kvm=partial:queens libvirt-kvm-s390=unknown libvirt-virtuozzo-ct=unknown libvirt-virtuozzo-vm=unknown vmware=missing ironic=missing ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/user/feature-matrix-nfv.ini0000664000175000017500000000343500000000000022037 0ustar00zuulzuul00000000000000# # Lists all the CI jobs as targets # [target.libvirt-kvm] title=libvirt+kvm (x86 & ppc64) link=http://docs.openstack.org/infra/manual/developers.html#project-gating [target.libvirt-kvm-s390] title=libvirt+kvm (s390x) link=http://docs.openstack.org/infra/manual/developers.html#project-gating # # Lists all features # # Includes information on the feature, its maturity, status, # links to admin docs, api docs and tempest test uuids. # # It then lists the current state for each of the about CI jobs. # It is hoped this mapping will eventually be automated. # [operation.numa-placement] title=NUMA Placement notes=Configure placement of instance vCPUs and memory across host NUMA nodes maturity=experimental api_doc_link=https://docs.openstack.org/api-ref/compute/#create-server admin_doc_link=https://docs.openstack.org/nova/latest/admin/cpu-topologies.html#customizing-instance-cpu-pinning-policies tempest_test_uuids=9a438d88-10c6-4bcd-8b5b-5b6e25e1346f;585e934c-448e-43c4-acbf-d06a9b899997 libvirt-kvm=partial libvirt-kvm-s390=unknown [operation.cpu-pinning-policy] title=CPU Pinning Policy notes=Enable/disable binding of instance vCPUs to host CPUs maturity=experimental api_doc_link=https://docs.openstack.org/api-ref/compute/#create-server admin_doc_link=https://docs.openstack.org/nova/latest/admin/cpu-topologies.html#customizing-instance-cpu-pinning-policies libvirt-kvm=partial libvirt-kvm-s390=unknown [operation.cpu-pinning-thread-policy] title=CPU Pinning Thread Policy notes=Configure usage of host hardware threads when pinning is used maturity=experimental api_doc_link=https://docs.openstack.org/api-ref/compute/#create-server admin_doc_link=https://docs.openstack.org/nova/latest/admin/cpu-topologies.html#customizing-instance-cpu-pinning-policies libvirt-kvm=partial libvirt-kvm-s390=unknown ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/user/flavors.rst0000664000175000017500000002310400000000000020013 0ustar00zuulzuul00000000000000======= Flavors ======= In OpenStack, flavors define the compute, memory, and storage capacity of nova computing instances. To put it simply, a flavor is an available hardware configuration for a server. It defines the *size* of a virtual server that can be launched. .. note:: Flavors can also determine on which compute host a flavor can be used to launch an instance. For information about customizing flavors, refer to :doc:`/admin/flavors`. Overview -------- A flavor consists of the following parameters: Flavor ID Unique ID (integer or UUID) for the new flavor. This property is required. If specifying 'auto', a UUID will be automatically generated. Name Name for the new flavor. This property is required. Historically, names were given a format ``XX.SIZE_NAME``. These are typically not required, though some third party tools may rely on it. VCPUs Number of virtual CPUs to use. This property is required. Memory MB Amount of RAM to use (in megabytes). This property is required. Root Disk GB Amount of disk space (in gigabytes) to use for the root (``/``) partition. This property is required. The root disk is an ephemeral disk that the base image is copied into. When booting from a persistent volume it is not used. The ``0`` size is a special case which uses the native base image size as the size of the ephemeral root volume. However, in this case the scheduler cannot select the compute host based on the virtual image size. As a result, ``0`` should only be used for volume booted instances or for testing purposes. Volume-backed instances can be enforced for flavors with zero root disk via the ``os_compute_api:servers:create:zero_disk_flavor`` policy rule. Ephemeral Disk GB Amount of disk space (in gigabytes) to use for the ephemeral partition. This property is optional. If unspecified, the value is ``0`` by default. Ephemeral disks offer machine local disk storage linked to the lifecycle of a VM instance. When a VM is terminated, all data on the ephemeral disk is lost. Ephemeral disks are not included in any snapshots. Swap Amount of swap space (in megabytes) to use. This property is optional. If unspecified, the value is ``0`` by default. RXTX Factor (DEPRECATED) This value was only applicable when using the ``xen`` compute driver with the ``nova-network`` network driver. Since ``nova-network`` has been removed, this no longer applies and should not be specified. It will likely be removed in a future release. ``neutron`` users should refer to the :neutron-doc:`neutron QoS documentation ` Is Public Boolean value that defines whether the flavor is available to all users or private to the project it was created in. This property is optional. In unspecified, the value is ``True`` by default. By default, a flavor is public and available to all projects. Private flavors are only accessible to those on the access list for a given project and are invisible to other projects. Extra Specs Key and value pairs that define on which compute nodes a flavor can run. These are optional. Extra specs are generally used as scheduler hints for more advanced instance configuration. The key-value pairs used must correspond to well-known options. For more information on the standardized extra specs available, :ref:`see below ` Description A free form description of the flavor. Limited to 65535 characters in length. Only printable characters are allowed. Available starting in microversion 2.55. .. _flavors-extra-specs: Extra Specs ~~~~~~~~~~~ .. todo:: This is now documented in :doc:`/configuration/extra-specs`, so this should be removed and the documentation moved to those specs. .. _extra-specs-hardware-video-ram: Hardware video RAM Specify ``hw_video:ram_max_mb`` to control the maximum RAM for the video image. Used in conjunction with the ``hw_video_ram`` image property. ``hw_video_ram`` must be less than or equal to ``hw_video:ram_max_mb``. This is currently supported by the libvirt and the vmware drivers. See https://libvirt.org/formatdomain.html#elementsVideo for more information on how this is used to set the ``vram`` attribute with the libvirt driver. See https://pubs.vmware.com/vi-sdk/visdk250/ReferenceGuide/vim.vm.device.VirtualVideoCard.html for more information on how this is used to set the ``videoRamSizeInKB`` attribute with the vmware driver. .. _extra-specs-secure-boot: Secure Boot :doc:`Secure Boot ` can help ensure the bootloader used for your instances is trusted, preventing a possible attack vector. .. code:: console $ openstack flavor set FLAVOR-NAME \ --property os:secure_boot=SECURE_BOOT_OPTION Valid ``SECURE_BOOT_OPTION`` values are: - ``required``: Enable Secure Boot for instances running with this flavor. - ``disabled`` or ``optional``: (default) Disable Secure Boot for instances running with this flavor. .. note:: Supported by the libvirt driver. .. versionchanged:: 23.0.0 (Wallaby) Added support for secure boot to the libvirt driver. .. _extra-specs-required-resources: Custom resource classes and standard resource classes to override Specify custom resource classes to require or override quantity values of standard resource classes. The syntax of the extra spec is ``resources:=VALUE`` (``VALUE`` is integer). The name of custom resource classes must start with ``CUSTOM_``. Standard resource classes to override are ``VCPU``, ``MEMORY_MB`` or ``DISK_GB``. In this case, you can disable scheduling based on standard resource classes by setting the value to ``0``. For example: - ``resources:CUSTOM_BAREMETAL_SMALL=1`` - ``resources:VCPU=0`` See :ironic-doc:`Create flavors for use with the Bare Metal service ` for more examples. .. versionadded:: 16.0.0 (Pike) .. _extra-specs-required-traits: Required traits Required traits allow specifying a server to build on a compute node with the set of traits specified in the flavor. The traits are associated with the resource provider that represents the compute node in the Placement API. See the resource provider traits API reference for more details: https://docs.openstack.org/api-ref/placement/#resource-provider-traits The syntax of the extra spec is ``trait:=required``, for example: - ``trait:HW_CPU_X86_AVX2=required`` - ``trait:STORAGE_DISK_SSD=required`` The scheduler will pass required traits to the ``GET /allocation_candidates`` endpoint in the Placement API to include only resource providers that can satisfy the required traits. In 17.0.0 the only valid value is ``required``. In 18.0.0 ``forbidden`` is added (see below). Any other value will be considered invalid. Traits can be managed using the `osc-placement plugin`__. __ https://docs.openstack.org/osc-placement/latest/index.html .. versionadded:: 17.0.0 (Queens) .. _extra-specs-forbidden-traits: Forbidden traits Forbidden traits are similar to required traits, described above, but instead of specifying the set of traits that must be satisfied by a compute node, forbidden traits must **not** be present. The syntax of the extra spec is ``trait:=forbidden``, for example: - ``trait:HW_CPU_X86_AVX2=forbidden`` - ``trait:STORAGE_DISK_SSD=forbidden`` Traits can be managed using the `osc-placement plugin`__. __ https://docs.openstack.org/osc-placement/latest/index.html .. versionadded:: 18.0.0 (Rocky) .. _extra-specs-numbered-resource-groupings: Numbered groupings of resource classes and traits Specify numbered groupings of resource classes and traits. The syntax is as follows (``N`` and ``VALUE`` are integers): .. parsed-literal:: resources\ *N*:**\ =\ *VALUE* trait\ *N*:**\ =required A given numbered ``resources`` or ``trait`` key may be repeated to specify multiple resources/traits in the same grouping, just as with the un-numbered syntax. Specify inter-group affinity policy via the ``group_policy`` key, which may have the following values: * ``isolate``: Different numbered request groups will be satisfied by *different* providers. * ``none``: Different numbered request groups may be satisfied by different providers *or* common providers. .. note:: If more than one group is specified then the ``group_policy`` is mandatory in the request. However such groups might come from other sources than flavor extra_spec (e.g. from Neutron ports with QoS minimum bandwidth policy). If the flavor does not specify any groups and ``group_policy`` but more than one group is coming from other sources then nova will default the ``group_policy`` to ``none`` to avoid scheduler failure. For example, to create a server with the following VFs: * One SR-IOV virtual function (VF) on NET1 with bandwidth 10000 bytes/sec * One SR-IOV virtual function (VF) on NET2 with bandwidth 20000 bytes/sec on a *different* NIC with SSL acceleration It is specified in the extra specs as follows:: resources1:SRIOV_NET_VF=1 resources1:NET_EGRESS_BYTES_SEC=10000 trait1:CUSTOM_PHYSNET_NET1=required resources2:SRIOV_NET_VF=1 resources2:NET_EGRESS_BYTES_SEC:20000 trait2:CUSTOM_PHYSNET_NET2=required trait2:HW_NIC_ACCEL_SSL=required group_policy=isolate See `Granular Resource Request Syntax`__ for more details. __ https://specs.openstack.org/openstack/nova-specs/specs/rocky/implemented/granular-resource-requests.html .. versionadded:: 18.0.0 (Rocky) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/user/index.rst0000664000175000017500000000145700000000000017455 0ustar00zuulzuul00000000000000================== User Documentation ================== The OpenStack Compute service allows you to control an Infrastructure-as-a-Service (IaaS) cloud computing platform. It gives you control over instances and networks, and allows you to manage access to the cloud through users and projects. Compute does not include virtualization software. Instead, it defines drivers that interact with underlying virtualization mechanisms that run on your host operating system, and exposes functionality over a web-based API. End user guide -------------- .. toctree:: :maxdepth: 1 availability-zones security-groups launch-instances server-groups metadata manage-ip-addresses certificate-validation resize reboot rescue block-device-mapping /reference/api-microversion-history ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/user/launch-instance-from-image.rst0000664000175000017500000001562500000000000023445 0ustar00zuulzuul00000000000000================================ Launch an instance from an image ================================ Follow the steps below to launch an instance from an image. #. After you gather required parameters, run the following command to launch an instance. Specify the server name, flavor ID, and image ID. .. code-block:: console $ openstack server create --flavor FLAVOR_ID --image IMAGE_ID --key-name KEY_NAME \ --user-data USER_DATA_FILE --security-group SEC_GROUP_NAME --property KEY=VALUE \ INSTANCE_NAME Optionally, you can provide a key name for access control and a security group for security. You can also include metadata key and value pairs. For example, you can add a description for your server by providing the ``--property description="My Server"`` parameter. You can pass :ref:`user data ` in a local file at instance launch by using the ``--user-data USER-DATA-FILE`` parameter. .. important:: If you boot an instance with an INSTANCE_NAME greater than 63 characters, Compute truncates it automatically when turning it into a host name to ensure the correct work of dnsmasq. The corresponding warning is written into the ``neutron-dnsmasq.log`` file. The following command launches the ``MyCirrosServer`` instance with the ``m1.small`` flavor (ID of ``1``), ``cirros-0.3.2-x86_64-uec`` image (ID of ``397e713c-b95b-4186-ad46-6126863ea0a9``), ``default`` security group, ``KeyPair01`` key, and a user data file called ``cloudinit.file``: .. code-block:: console $ openstack server create --flavor 1 --image 397e713c-b95b-4186-ad46-6126863ea0a9 \ --security-group default --key-name KeyPair01 --user-data cloudinit.file \ myCirrosServer Depending on the parameters that you provide, the command returns a list of server properties. .. code-block:: console +--------------------------------------+-----------------------------------------------+ | Field | Value | +--------------------------------------+-----------------------------------------------+ | OS-DCF:diskConfig | MANUAL | | OS-EXT-AZ:availability_zone | | | OS-EXT-SRV-ATTR:host | None | | OS-EXT-SRV-ATTR:hypervisor_hostname | None | | OS-EXT-SRV-ATTR:instance_name | | | OS-EXT-STS:power_state | NOSTATE | | OS-EXT-STS:task_state | scheduling | | OS-EXT-STS:vm_state | building | | OS-SRV-USG:launched_at | None | | OS-SRV-USG:terminated_at | None | | accessIPv4 | | | accessIPv6 | | | addresses | | | adminPass | E4Ksozt4Efi8 | | config_drive | | | created | 2016-11-30T14:48:05Z | | flavor | m1.tiny | | hostId | | | id | 89015cc9-bdf1-458a-8518-fdca2b4a5785 | | image | cirros (397e713c-b95b-4186-ad46-6126863ea0a9) | | key_name | KeyPair01 | | name | myCirrosServer | | os-extended-volumes:volumes_attached | [] | | progress | 0 | | project_id | 5669caad86a04256994cdf755df4d3c1 | | properties | | | security_groups | [{u'name': u'default'}] | | status | BUILD | | updated | 2016-11-30T14:48:05Z | | user_id | c36cec73b0e44876a4478b1e6cd749bb | | metadata | {u'KEY': u'VALUE'} | +--------------------------------------+-----------------------------------------------+ A status of ``BUILD`` indicates that the instance has started, but is not yet online. A status of ``ACTIVE`` indicates that the instance is active. #. Copy the server ID value from the ``id`` field in the output. Use the ID to get server details or to delete your server. #. Copy the administrative password value from the ``adminPass`` field. Use the password to log in to your server. #. Check if the instance is online. .. code-block:: console $ openstack server list The list shows the ID, name, status, and private (and if assigned, public) IP addresses for all instances in the project to which you belong: .. code-block:: console +-------------+----------------------+--------+------------+-------------+------------------+------------+ | ID | Name | Status | Task State | Power State | Networks | Image Name | +-------------+----------------------+--------+------------+-------------+------------------+------------+ | 84c6e57d... | myCirrosServer | ACTIVE | None | Running | private=10.0.0.3 | cirros | | 8a99547e... | myInstanceFromVolume | ACTIVE | None | Running | private=10.0.0.4 | centos | +-------------+----------------------+--------+------------+-------------+------------------+------------+ If the status for the instance is ACTIVE, the instance is online. #. To view the available options for the :command:`openstack server list` command, run the following command: .. code-block:: console $ openstack help server list .. note:: If you did not provide a key pair, security groups, or rules, you can access the instance only from inside the cloud through VNC. Even pinging the instance is not possible. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/user/launch-instance-from-volume.rst0000664000175000017500000006245300000000000023673 0ustar00zuulzuul00000000000000================================ Launch an instance from a volume ================================ You can boot instances from a volume instead of an image. To complete these tasks, use these parameters on the :command:`openstack server create` command: .. tabularcolumns:: |p{0.3\textwidth}|p{0.25\textwidth}|p{0.4\textwidth}| .. list-table:: :header-rows: 1 :widths: 30 15 30 * - Task - openstack server create parameter(s) - Information * - Boot an instance from an image and attach a non-bootable volume. - ``--block-device`` - :ref:`Boot_instance_from_image_and_attach_non-bootable_volume` * - Create a volume from an image and boot an instance from that volume. - ``--boot-from-volume`` and ``--image``; ``--block-device`` - :ref:`Create_volume_from_image_and_boot_instance` * - Boot from an existing source image, volume, or snapshot. - ``--volume`` or ``--snapshot``; ``--block-device`` - :ref:`Create_volume_from_image_and_boot_instance` * - Attach a swap disk to an instance. - ``--swap`` - :ref:`Attach_swap_or_ephemeral_disk_to_an_instance` * - Attach an ephemeral disk to an instance. - ``--ephemeral`` - :ref:`Attach_swap_or_ephemeral_disk_to_an_instance` .. note:: To attach a volume to a running instance, refer to the :cinder-doc:`Cinder documentation `. .. note:: The maximum limit on the number of disk devices allowed to attach to a single server is configurable with the option :oslo.config:option:`compute.max_disk_devices_to_attach`. .. _Boot_instance_from_image_and_attach_non-bootable_volume: Boot instance from image and attach non-bootable volume ------------------------------------------------------- You can create a non-bootable volume and attach that volume to an instance that you boot from an image. To create a non-bootable volume, do not create it from an image. The volume must be entirely empty with no partition table and no file system. #. Create a non-bootable volume. .. code-block:: console $ openstack volume create --size 8 test-volume +---------------------+--------------------------------------+ | Field | Value | +---------------------+--------------------------------------+ | attachments | [] | | availability_zone | nova | | bootable | false | | consistencygroup_id | None | | created_at | 2021-06-01T15:01:31.000000 | | description | None | | encrypted | False | | id | 006efd7a-48a8-4c75-bafb-6b483199d284 | | migration_status | None | | multiattach | False | | name | test-volume | | properties | | | replication_status | None | | size | 8 | | snapshot_id | None | | source_volid | None | | status | creating | | type | lvmdriver-1 | | updated_at | None | | user_id | 0a4d2edb9042412ba4f719a547d42f79 | +---------------------+--------------------------------------+ #. List volumes and confirm that it is in the ``available`` state. .. code-block:: console $ openstack volume list +--------------------------------------+-------------+-----------+------+-------------+ | ID | Name | Status | Size | Attached to | +--------------------------------------+-------------+-----------+------+-------------+ | 006efd7a-48a8-4c75-bafb-6b483199d284 | test-volume | available | 8 | | +--------------------------------------+-------------+-----------+------+-------------+ #. Create an instance, specifying the volume as a block device to attach. .. code-block:: console $ openstack server create \ --flavor $FLAVOR --image $IMAGE --network $NETWORK \ --block-device uuid=006efd7a-48a8-4c75-bafb-6b483199d284,source_type=volume,destination_type=volume \ --wait test-server +-------------------------------------+-----------------------------------------------------------------+ | Field | Value | +-------------------------------------+-----------------------------------------------------------------+ | OS-DCF:diskConfig | MANUAL | | OS-EXT-AZ:availability_zone | nova | | OS-EXT-SRV-ATTR:host | devstack-ubuntu2004 | | OS-EXT-SRV-ATTR:hypervisor_hostname | devstack-ubuntu2004 | | OS-EXT-SRV-ATTR:instance_name | instance-00000008 | | OS-EXT-STS:power_state | Running | | OS-EXT-STS:task_state | None | | OS-EXT-STS:vm_state | active | | OS-SRV-USG:launched_at | 2021-06-01T15:13:48.000000 | | OS-SRV-USG:terminated_at | None | | accessIPv4 | | | accessIPv6 | | | addresses | private=10.0.0.55, fde3:4790:906b:0:f816:3eff:fed5:ebd9 | | adminPass | CZ76LZ9pNXzt | | config_drive | | | created | 2021-06-01T15:13:37Z | | flavor | m1.tiny (1) | | hostId | 425d65fe75c1e53cecbd32d3e686314235507b6edebbeaa56ff341c7 | | id | 446d1b00-b729-49b3-9dab-40a3fbe190cf | | image | cirros-0.5.1-x86_64-disk (44d317a3-6183-4063-868b-aa0728576f5f) | | key_name | None | | name | test-server | | progress | 0 | | project_id | ae93f388f934458c8e6583f8ab0dba2d | | properties | | | security_groups | name='default' | | status | ACTIVE | | updated | 2021-06-01T15:13:49Z | | user_id | 0a4d2edb9042412ba4f719a547d42f79 | | volumes_attached | id='006efd7a-48a8-4c75-bafb-6b483199d284' | +-------------------------------------+-----------------------------------------------------------------+ #. List volumes once again to ensure the status has changed to ``in-use`` and the volume is correctly reporting the attachment. .. code-block:: console $ openstack volume list +--------------------------------------+-------------+--------+------+--------------------------------------+ | ID | Name | Status | Size | Attached to | +--------------------------------------+-------------+--------+------+--------------------------------------+ | 006efd7a-48a8-4c75-bafb-6b483199d284 | test-volume | in-use | 1 | Attached to test-server on /dev/vdb | +--------------------------------------+-------------+--------+------+--------------------------------------+ .. _Create_volume_from_image_and_boot_instance: Boot instance from volume ------------------------- You can create a bootable volume from an existing image, volume, or snapshot. This procedure shows you how to create a volume from an image and use the volume to boot an instance. #. List available images, noting the ID of the image that you wish to use. .. code-block:: console $ openstack image list +--------------------------------------+--------------------------+--------+ | ID | Name | Status | +--------------------------------------+--------------------------+--------+ | 44d317a3-6183-4063-868b-aa0728576f5f | cirros-0.5.1-x86_64-disk | active | +--------------------------------------+--------------------------+--------+ #. Create an instance, using the chosen image and requesting "boot from volume" behavior. .. code-block:: console $ openstack server create \ --flavor $FLAVOR --network $NETWORK \ --image 44d317a3-6183-4063-868b-aa0728576f5f --boot-from-volume 10 \ --wait test-server +-------------------------------------+----------------------------------------------------------+ | Field | Value | +-------------------------------------+----------------------------------------------------------+ | OS-DCF:diskConfig | MANUAL | | OS-EXT-AZ:availability_zone | nova | | OS-EXT-SRV-ATTR:host | devstack-ubuntu2004 | | OS-EXT-SRV-ATTR:hypervisor_hostname | devstack-ubuntu2004 | | OS-EXT-SRV-ATTR:instance_name | instance-0000000c | | OS-EXT-STS:power_state | Running | | OS-EXT-STS:task_state | None | | OS-EXT-STS:vm_state | active | | OS-SRV-USG:launched_at | 2021-06-01T16:02:06.000000 | | OS-SRV-USG:terminated_at | None | | accessIPv4 | | | accessIPv6 | | | addresses | private=10.0.0.3, fde3:4790:906b:0:f816:3eff:fe40:bdd | | adminPass | rqT3RUYYa5H5 | | config_drive | | | created | 2021-06-01T16:01:55Z | | flavor | m1.tiny (1) | | hostId | 425d65fe75c1e53cecbd32d3e686314235507b6edebbeaa56ff341c7 | | id | 69b09fa0-6f24-4924-8311-c9bcdeb90dcb | | image | N/A (booted from volume) | | key_name | None | | name | test-server | | progress | 0 | | project_id | ae93f388f934458c8e6583f8ab0dba2d | | properties | | | security_groups | name='default' | | status | ACTIVE | | updated | 2021-06-01T16:02:07Z | | user_id | 0a4d2edb9042412ba4f719a547d42f79 | | volumes_attached | id='673cbfcb-351c-42cb-9659-bca5b2a0361c' | +-------------------------------------+----------------------------------------------------------+ .. note:: Volumes created in this manner will not be deleted when the server is deleted and will need to be manually deleted afterwards. If you wish to change this behavior, you will need to pre-create the volume manually as discussed below. #. List volumes to ensure a new volume has been created and that its status is ``in-use`` and the volume is correctly reporting the attachment. .. code-block:: console $ openstack volume list +--------------------------------------+------+--------+------+--------------------------------------+ | ID | Name | Status | Size | Attached to | +--------------------------------------+------+--------+------+--------------------------------------+ | 673cbfcb-351c-42cb-9659-bca5b2a0361c | | in-use | 1 | Attached to test-server on /dev/vda | +--------------------------------------+------+--------+------+--------------------------------------+ $ openstack server volume list test-server +--------------------------------------+----------+--------------------------------------+--------------------------------------+ | ID | Device | Server ID | Volume ID | +--------------------------------------+----------+--------------------------------------+--------------------------------------+ | 673cbfcb-351c-42cb-9659-bca5b2a0361c | /dev/vda | 9c7f68d4-4d84-4c1e-83af-b8c6a56ad005 | 673cbfcb-351c-42cb-9659-bca5b2a0361c | +--------------------------------------+----------+--------------------------------------+--------------------------------------+ Rather than relying on nova to create the volume from the image, it is also possible to pre-create the volume before creating the instance. This can be useful when you want more control over the created volume, such as enabling encryption. #. List available images, noting the ID of the image that you wish to use. .. code-block:: console $ openstack image list +--------------------------------------+--------------------------+--------+ | ID | Name | Status | +--------------------------------------+--------------------------+--------+ | 44d317a3-6183-4063-868b-aa0728576f5f | cirros-0.5.1-x86_64-disk | active | +--------------------------------------+--------------------------+--------+ #. Create a bootable volume from the chosen image. Cinder makes a volume bootable when ``--image`` parameter is passed. .. code-block:: console $ openstack volume create \ --image 44d317a3-6183-4063-868b-aa0728576f5f --size 10 \ test-volume +---------------------+--------------------------------------+ | Field | Value | +---------------------+--------------------------------------+ | attachments | [] | | availability_zone | nova | | bootable | false | | consistencygroup_id | None | | created_at | 2021-06-01T15:40:56.000000 | | description | None | | encrypted | False | | id | 9c7f68d4-4d84-4c1e-83af-b8c6a56ad005 | | migration_status | None | | multiattach | False | | name | test-volume | | properties | | | replication_status | None | | size | 10 | | snapshot_id | None | | source_volid | None | | status | creating | | type | lvmdriver-1 | | updated_at | None | | user_id | 0a4d2edb9042412ba4f719a547d42f79 | +---------------------+--------------------------------------+ .. note:: If you want to create a volume to a specific storage backend, you need to use an image which has the ``cinder_img_volume_type`` property. For more information, refer to the :cinder-doc:`cinder docs `. .. note:: A bootable encrypted volume can also be created by adding the ``--type ENCRYPTED_VOLUME_TYPE`` parameter to the volume create command. For example: .. code-block:: console $ openstack volume create \ --type ENCRYPTED_VOLUME_TYPE --image IMAGE --size SIZE \ test-volume This requires an encrypted volume type which must be created ahead of time by an admin. Refer to :horizon-doc:`the horizon documentation `. for more information. #. Create an instance, specifying the volume as the boot device. .. code-block:: console $ openstack server create \ --flavor $FLAVOR --network $NETWORK \ --volume 9c7f68d4-4d84-4c1e-83af-b8c6a56ad005\ --wait test-server +-------------------------------------+----------------------------------------------------------+ | Field | Value | +-------------------------------------+----------------------------------------------------------+ | OS-DCF:diskConfig | MANUAL | | OS-EXT-AZ:availability_zone | nova | | OS-EXT-SRV-ATTR:host | devstack-ubuntu2004 | | OS-EXT-SRV-ATTR:hypervisor_hostname | devstack-ubuntu2004 | | OS-EXT-SRV-ATTR:instance_name | instance-0000000a | | OS-EXT-STS:power_state | Running | | OS-EXT-STS:task_state | None | | OS-EXT-STS:vm_state | active | | OS-SRV-USG:launched_at | 2021-06-01T15:43:21.000000 | | OS-SRV-USG:terminated_at | None | | accessIPv4 | | | accessIPv6 | | | addresses | private=10.0.0.47, fde3:4790:906b:0:f816:3eff:fe89:b004 | | adminPass | ueX74zzHWqL4 | | config_drive | | | created | 2021-06-01T15:43:13Z | | flavor | m1.tiny (1) | | hostId | 425d65fe75c1e53cecbd32d3e686314235507b6edebbeaa56ff341c7 | | id | 367b7d42-627c-4d10-a2a0-f759501499a6 | | image | N/A (booted from volume) | | key_name | None | | name | test-server | | progress | 0 | | project_id | ae93f388f934458c8e6583f8ab0dba2d | | properties | | | security_groups | name='default' | | status | ACTIVE | | updated | 2021-06-01T15:43:22Z | | user_id | 0a4d2edb9042412ba4f719a547d42f79 | | volumes_attached | id='9c7f68d4-4d84-4c1e-83af-b8c6a56ad005' | +-------------------------------------+----------------------------------------------------------+ .. note:: The example here uses the ``--volume`` option for simplicity. The ``--block-device`` option could also be used for more granular control over the parameters. See the `openstack server create`__ documentation for details. .. __: https://docs.openstack.org/python-openstackclient/latest/cli/command-objects/server.html#server-create #. List volumes once again to ensure the status has changed to ``in-use`` and the volume is correctly reporting the attachment. .. code-block:: console $ openstack volume list +--------------------------------------+-------------+--------+------+--------------------------------------+ | ID | Name | Status | Size | Attached to | +--------------------------------------+-------------+--------+------+--------------------------------------+ | 9c7f68d4-4d84-4c1e-83af-b8c6a56ad005 | test-volume | in-use | 10 | Attached to test-server on /dev/vda | +--------------------------------------+-------------+--------+------+--------------------------------------+ $ openstack server volume list test-server +--------------------------------------+----------+--------------------------------------+--------------------------------------+ | ID | Device | Server ID | Volume ID | +--------------------------------------+----------+--------------------------------------+--------------------------------------+ | 9c7f68d4-4d84-4c1e-83af-b8c6a56ad005 | /dev/vda | c2368c38-6a7d-4fe8-bc4e-483e90e7608b | 9c7f68d4-4d84-4c1e-83af-b8c6a56ad005 | +--------------------------------------+----------+--------------------------------------+--------------------------------------+ .. _Attach_swap_or_ephemeral_disk_to_an_instance: Attach swap or ephemeral disk to an instance -------------------------------------------- Use the ``--swap`` option of the ``openstack server`` command to attach a swap disk on boot or the ``--ephemeral`` option to attach an ephemeral disk on boot. The latter can be specified multiple times. When you terminate the instance, both disks are deleted. Boot an instance with a 512 MB swap disk and 2 GB ephemeral disk. .. code-block:: console $ openstack server create \ --flavor FLAVOR --image IMAGE --network NETWORK \ --ephemeral size=2 --swap 512 --wait test-server .. note:: The flavor defines the maximum swap and ephemeral disk size. You cannot exceed these maximum values. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/user/launch-instance-using-ISO-image.rst0000664000175000017500000001601100000000000024245 0ustar00zuulzuul00000000000000================================== Launch an instance using ISO image ================================== .. _Boot_instance_from_ISO_image: Boot an instance from an ISO image ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ OpenStack supports booting instances using ISO images. But before you make such instances functional, use the :command:`openstack server create` command with the following parameters to boot an instance: .. code-block:: console $ openstack server create --image ubuntu-14.04.2-server-amd64.iso \ --nic net-id = NETWORK_UUID \ --flavor 2 INSTANCE_NAME +--------------------------------------+--------------------------------------------+ | Field | Value | +--------------------------------------+--------------------------------------------+ | OS-DCF:diskConfig | MANUAL | | OS-EXT-AZ:availability_zone | nova | | OS-EXT-SRV-ATTR:host | - | | OS-EXT-SRV-ATTR:hypervisor_hostname | - | | OS-EXT-SRV-ATTR:instance_name | instance-00000004 | | OS-EXT-STS:power_state | 0 | | OS-EXT-STS:task_state | scheduling | | OS-EXT-STS:vm_state | building | | OS-SRV-USG:launched_at | - | | OS-SRV-USG:terminated_at | - | | accessIPv4 | | | accessIPv6 | | | adminPass | ZaiYeC8iucgU | | config_drive | | | created | 2015-06-01T16:34:50Z | | flavor | m1.small (2) | | hostId | | | id | 1e1797f3-1662-49ff-ae8c-a77e82ee1571 | | image | ubuntu-14.04.2-server-amd64.iso | | key_name | - | | metadata | {} | | name | INSTANCE_NAME | | os-extended-volumes:volumes_attached | [] | | progress | 0 | | security_groups | default | | status | BUILD | | tenant_id | ccef9e62b1e645df98728fb2b3076f27 | | updated | 2014-05-09T16:34:51Z | | user_id | fef060ae7bfd4024b3edb97dff59017a | +--------------------------------------+--------------------------------------------+ In this command, ``ubuntu-14.04.2-server-amd64.iso`` is the ISO image, and ``INSTANCE_NAME`` is the name of the new instance. ``NETWORK_UUID`` is a valid network id in your system. Create a bootable volume for the instance to reside on after shutdown. #. Create the volume: .. code-block:: console $ openstack volume create \ --size \ --bootable VOLUME_NAME #. Attach the instance to the volume: .. code-block:: console $ openstack server add volume INSTANCE_NAME \ VOLUME_NAME \ --device /dev/vda .. note:: You need the Block Storage service to preserve the instance after shutdown. The ``--block-device`` argument, used with the legacy :command:`nova boot`, will not work with the OpenStack :command:`openstack server create` command. Instead, the :command:`openstack volume create` and :command:`openstack server add volume` commands create persistent storage. After the instance is successfully launched, connect to the instance using a remote console and follow the instructions to install the system as using ISO images on regular computers. When the installation is finished and system is rebooted, the instance asks you again to install the operating system, which means your instance is not usable. If you have problems with image creation, please check the `Virtual Machine Image Guide `_ for reference. .. _Make_instance_booted_from_ISO_image_functional: Make the instances booted from ISO image functional ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Now complete the following steps to make your instances created using ISO image actually functional. #. Delete the instance using the following command. .. code-block:: console $ openstack server delete INSTANCE_NAME #. After you delete the instance, the system you have just installed using your ISO image remains, because the parameter ``shutdown=preserve`` was set, so run the following command. .. code-block:: console $ openstack volume list +--------------------------+-------------------------+-----------+------+-------------+ | ID | Name | Status | Size | Attached to | +--------------------------+-------------------------+-----------+------+-------------+ | 8edd7c97-1276-47a5-9563- |dc01d873-d0f1-40b6-bfcc- | available | 10 | | | 1025f4264e4f | 26a8d955a1d9-blank-vol | | | | +--------------------------+-------------------------+-----------+------+-------------+ You get a list with all the volumes in your system. In this list, you can find the volume that is attached to your ISO created instance, with the false bootable property. #. Upload the volume to glance. .. code-block:: console $ openstack image create --volume SOURCE_VOLUME IMAGE_NAME $ openstack image list +-------------------+------------+--------+ | ID | Name | Status | +-------------------+------------+--------+ | 74303284-f802-... | IMAGE_NAME | active | +-------------------+------------+--------+ The ``SOURCE_VOLUME`` is the UUID or a name of the volume that is attached to your ISO created instance, and the ``IMAGE_NAME`` is the name that you give to your new image. #. After the image is successfully uploaded, you can use the new image to boot instances. The instances launched using this image contain the system that you have just installed using the ISO image. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/user/launch-instances.rst0000664000175000017500000001474700000000000021613 0ustar00zuulzuul00000000000000================ Launch instances ================ Instances are virtual machines that run inside the cloud. Before you can launch an instance, gather the following parameters: - The **instance source** can be an image, snapshot, or block storage volume that contains an image or snapshot. - A **name** for your instance. - The **flavor** for your instance, which defines the compute, memory, and storage capacity of nova computing instances. A flavor is an available hardware configuration for a server. It defines the size of a virtual server that can be launched. - Any **user data** files. A :ref:`user data ` file is a special key in the metadata service that holds a file that cloud-aware applications in the guest instance can access. For example, one application that uses user data is the `cloud-init `__ system, which is an open-source package from Ubuntu that is available on various Linux distributions and that handles early initialization of a cloud instance. - Access and security credentials, which include one or both of the following credentials: - A **key pair** for your instance, which are SSH credentials that are injected into images when they are launched. For the key pair to be successfully injected, the image must contain the ``cloud-init`` package. Create at least one key pair for each project. If you already have generated a key pair with an external tool, you can import it into OpenStack. You can use the key pair for multiple instances that belong to that project. - A **security group** that defines which incoming network traffic is forwarded to instances. Security groups hold a set of firewall policies, known as *security group rules*. - If needed, you can assign a **floating (public) IP address** to a running instance to make it accessible from outside the cloud. See :doc:`manage-ip-addresses`. - You can also attach a block storage device, or **volume**, for persistent storage. .. note:: Instances that use the default security group cannot, by default, be accessed from any IP address outside of the cloud. If you want those IP addresses to access the instances, you must modify the rules for the default security group. After you gather the parameters that you need to launch an instance, you can launch it from an :doc:`image` or a :doc:`volume`. You can launch an instance directly from one of the available OpenStack images or from an image that you have copied to a persistent volume. The OpenStack Image service provides a pool of images that are accessible to members of different projects. Gather parameters to launch an instance ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Before you begin, source the OpenStack RC file. #. Create a flavor. Creating a flavor is typically only available to administrators of a cloud because this has implications for scheduling efficiently in the cloud. .. code-block:: console $ openstack flavor create --ram 512 --disk 1 --vcpus 1 m1.tiny #. List the available flavors. .. code-block:: console $ openstack flavor list Note the ID of the flavor that you want to use for your instance:: +-----+-----------+-------+------+-----------+-------+-----------+ | ID | Name | RAM | Disk | Ephemeral | VCPUs | Is_Public | +-----+-----------+-------+------+-----------+-------+-----------+ | 1 | m1.tiny | 512 | 1 | 0 | 1 | True | | 2 | m1.small | 2048 | 20 | 0 | 1 | True | | 3 | m1.medium | 4096 | 40 | 0 | 2 | True | | 4 | m1.large | 8192 | 80 | 0 | 4 | True | | 5 | m1.xlarge | 16384 | 160 | 0 | 8 | True | +-----+-----------+-------+------+-----------+-------+-----------+ #. List the available images. .. code-block:: console $ openstack image list Note the ID of the image from which you want to boot your instance:: +--------------------------------------+---------------------------------+--------+ | ID | Name | Status | +--------------------------------------+---------------------------------+--------+ | 397e713c-b95b-4186-ad46-6126863ea0a9 | cirros-0.3.5-x86_64-uec | active | | df430cc2-3406-4061-b635-a51c16e488ac | cirros-0.3.5-x86_64-uec-kernel | active | | 3cf852bd-2332-48f4-9ae4-7d926d50945e | cirros-0.3.5-x86_64-uec-ramdisk | active | +--------------------------------------+---------------------------------+--------+ You can also filter the image list by using :command:`grep` to find a specific image, as follows: .. code-block:: console $ openstack image list | grep 'kernel' | df430cc2-3406-4061-b635-a51c16e488ac | cirros-0.3.5-x86_64-uec-kernel | active | #. List the available security groups. .. code-block:: console $ openstack security group list .. note:: If you are an admin user, this command will list groups for all tenants. Note the ID of the security group that you want to use for your instance:: +--------------------------------------+---------+------------------------+----------------------------------+ | ID | Name | Description | Project | +--------------------------------------+---------+------------------------+----------------------------------+ | b0d78827-0981-45ef-8561-93aee39bbd9f | default | Default security group | 5669caad86a04256994cdf755df4d3c1 | | ec02e79e-83e1-48a5-86ad-14ab9a8c375f | default | Default security group | 1eaaf6ede7a24e78859591444abf314a | +--------------------------------------+---------+------------------------+----------------------------------+ If you have not created any security groups, you can assign the instance to only the default security group. You can view rules for a specified security group: .. code-block:: console $ openstack security group rule list default #. List the available key pairs, and note the key pair name that you use for SSH access. .. code-block:: console $ openstack keypair list Launch an instance ~~~~~~~~~~~~~~~~~~ You can launch an instance from various sources. .. toctree:: :maxdepth: 2 launch-instance-from-image launch-instance-from-volume launch-instance-using-ISO-image boot-instance-using-PXE ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/user/manage-ip-addresses.rst0000664000175000017500000002004000000000000022144 0ustar00zuulzuul00000000000000=================== Manage IP addresses =================== Each instance has a private, fixed IP address and can also have a public, or floating IP address. Private IP addresses are used for communication between instances, and public addresses are used for communication with networks outside the cloud, including the Internet. When you launch an instance, it is automatically assigned a private IP address that stays the same until you explicitly terminate the instance. Rebooting an instance has no effect on the private IP address. A pool of floating IP addresses, configured by the cloud administrator, is available in OpenStack Compute. The project quota defines the maximum number of floating IP addresses that you can allocate to the project. After you allocate a floating IP address to a project, you can: - Associate the floating IP address with an instance of the project. - Disassociate a floating IP address from an instance in the project. - Delete a floating IP from the project which automatically deletes that IP's associations. Use the :command:`openstack` commands to manage floating IP addresses. List floating IP address information ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To list all pools that provide floating IP addresses, run: .. code-block:: console $ openstack floating ip pool list +--------+ | name | +--------+ | public | | test | +--------+ .. note:: If this list is empty, the cloud administrator must configure a pool of floating IP addresses. To list all floating IP addresses that are allocated to the current project, run: .. code-block:: console $ openstack floating ip list +--------------------------------------+---------------------+------------------+------+ | ID | Floating IP Address | Fixed IP Address | Port | +--------------------------------------+---------------------+------------------+------+ | 760963b2-779c-4a49-a50d-f073c1ca5b9e | 172.24.4.228 | None | None | | 89532684-13e1-4af3-bd79-f434c9920cc3 | 172.24.4.235 | None | None | | ea3ebc6d-a146-47cd-aaa8-35f06e1e8c3d | 172.24.4.229 | None | None | +--------------------------------------+---------------------+------------------+------+ For each floating IP address that is allocated to the current project, the command outputs the floating IP address, the ID for the instance to which the floating IP address is assigned, the associated fixed IP address, and the pool from which the floating IP address was allocated. Associate floating IP addresses ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You can assign a floating IP address to a project and to an instance. #. Run the following command to allocate a floating IP address to the current project. By default, the floating IP address is allocated from the public pool. The command outputs the allocated IP address: .. code-block:: console $ openstack floating ip create public +---------------------+--------------------------------------+ | Field | Value | +---------------------+--------------------------------------+ | created_at | 2016-11-30T15:02:05Z | | description | | | fixed_ip_address | None | | floating_ip_address | 172.24.4.236 | | floating_network_id | 0bf90de6-fc0f-4dba-b80d-96670dfb331a | | headers | | | id | c70ad74b-2f64-4e60-965e-f24fc12b3194 | | port_id | None | | project_id | 5669caad86a04256994cdf755df4d3c1 | | project_id | 5669caad86a04256994cdf755df4d3c1 | | revision_number | 1 | | router_id | None | | status | DOWN | | updated_at | 2016-11-30T15:02:05Z | +---------------------+--------------------------------------+ #. List all project instances with which a floating IP address could be associated. .. code-block:: console $ openstack server list +---------------------+------+---------+------------+-------------+------------------+------------+ | ID | Name | Status | Task State | Power State | Networks | Image Name | +---------------------+------+---------+------------+-------------+------------------+------------+ | d5c854f9-d3e5-4f... | VM1 | ACTIVE | - | Running | private=10.0.0.3 | cirros | | 42290b01-0968-43... | VM2 | SHUTOFF | - | Shutdown | private=10.0.0.4 | centos | +---------------------+------+---------+------------+-------------+------------------+------------+ Note the server ID to use. #. List ports associated with the selected server. .. code-block:: console $ openstack port list --device-id SERVER_ID +--------------------------------------+------+-------------------+--------------------------------------------------------------+--------+ | ID | Name | MAC Address | Fixed IP Addresses | Status | +--------------------------------------+------+-------------------+--------------------------------------------------------------+--------+ | 40e9dea9-f457-458f-bc46-6f4ebea3c268 | | fa:16:3e:00:57:3e | ip_address='10.0.0.4', subnet_id='23ee9de7-362e- | ACTIVE | | | | | 49e2-a3b0-0de1c14930cb' | | | | | | ip_address='fd22:4c4c:81c2:0:f816:3eff:fe00:573e', subnet_id | | | | | | ='a2b3acbe-fbeb-40d3-b21f-121268c21b55' | | +--------------------------------------+------+-------------------+--------------------------------------------------------------+--------+ Note the port ID to use. #. Associate an IP address with an instance in the project, as follows: .. code-block:: console $ openstack floating ip set --port PORT_ID FLOATING_IP_ADDRESS For example: .. code-block:: console $ openstack floating ip set --port 40e9dea9-f457-458f-bc46-6f4ebea3c268 172.24.4.225 The instance is now associated with two IP addresses: .. code-block:: console $ openstack server list +------------------+------+--------+------------+-------------+-------------------------------+------------+ | ID | Name | Status | Task State | Power State | Networks | Image Name | +------------------+------+--------+------------+-------------+-------------------------------+------------+ | d5c854f9-d3e5... | VM1 | ACTIVE | - | Running | private=10.0.0.3, 172.24.4.225| cirros | | 42290b01-0968... | VM2 | SHUTOFF| - | Shutdown | private=10.0.0.4 | centos | +------------------+------+--------+------------+-------------+-------------------------------+------------+ After you associate the IP address and configure security group rules for the instance, the instance is publicly available at the floating IP address. Disassociate floating IP addresses ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To disassociate a floating IP address from an instance: .. code-block:: console $ openstack floating ip unset --port FLOATING_IP_ADDRESS To remove the floating IP address from a project: .. code-block:: console $ openstack floating ip delete FLOATING_IP_ADDRESS The IP address is returned to the pool of IP addresses that is available for all projects. If the IP address is still associated with a running instance, it is automatically disassociated from that instance. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/user/metadata.rst0000664000175000017500000003742300000000000020130 0ustar00zuulzuul00000000000000======== Metadata ======== Nova presents configuration information to instances it starts via a mechanism called metadata. These mechanisms are widely used via helpers such as `cloud-init`_ to specify things like the root password the instance should use. This metadata is made available via either a *config drive* or the *metadata service* and can be somewhat customised by the user using the *user data* feature. This guide provides an overview of these features along with a summary of the types of metadata available. .. _cloud-init: https://cloudinit.readthedocs.io/en/latest/ Types of metadata ----------------- There are three separate groups of users who need to be able to specify metadata for an instance. User provided data ~~~~~~~~~~~~~~~~~~ The user who booted the instance can pass metadata to the instance in several ways. For authentication keypairs, the keypairs functionality of the nova API can be used to upload a key and then specify that key during the nova boot API request. For less structured data, a small opaque blob of data may be passed via the :ref:`user data ` feature of the nova API. Examples of such unstructured data would be the puppet role that the instance should use, or the HTTP address of a server from which to fetch post-boot configuration information. Nova provided data ~~~~~~~~~~~~~~~~~~ Nova itself needs to pass information to the instance via its internal implementation of the metadata system. Such information includes the requested hostname for the instance and the availability zone the instance is in. This happens by default and requires no configuration by the user or deployer. Nova provides both an :ref:`OpenStack metadata API ` and an :ref:`EC2-compatible API `. Both the OpenStack metadata and EC2-compatible APIs are versioned by date. These are described later. Deployer provided data ~~~~~~~~~~~~~~~~~~~~~~ A deployer of OpenStack may need to pass data to an instance. It is also possible that this data is not known to the user starting the instance. An example might be a cryptographic token to be used to register the instance with Active Directory post boot -- the user starting the instance should not have access to Active Directory to create this token, but the nova deployment might have permissions to generate the token on the user's behalf. This is possible using the :ref:`vendordata ` feature, which must be configured by your cloud operator. .. _metadata-service: The metadata service -------------------- .. note:: This section provides end user information about the metadata service. For deployment information about the metadata service, refer to the :doc:`admin guide `. The *metadata service* provides a way for instances to retrieve instance-specific data via a REST API. Instances access this service at ``169.254.169.254`` or at ``fe80::a9fe:a9fe``. All types of metadata, be it user-, nova- or vendor-provided, can be accessed via this service. .. versionchanged:: 22.0.0 Starting with the Victoria release the metadata service is accessible over IPv6 at the link-local address ``fe80::a9fe:a9fe``. .. note:: As with all IPv6 link-local addresses, the metadata IPv6 address is not complete without a zone identifier (in a Linux guest that is usually the interface name concatenated after a percent sign). Please also note that in URLs you should URL-encode the percent sign itself. For example, assuming that the primary network interface in the guest is ``ens2`` substitute ``http://[fe80::a9fe:a9fe%25ens2]:80/...`` for ``http://169.254.169.254/...``. Using the metadata service ~~~~~~~~~~~~~~~~~~~~~~~~~~ To retrieve a list of supported versions for the :ref:`OpenStack metadata API `, make a GET request to ``http://169.254.169.254/openstack``, which will return a list of directories: .. code-block:: console $ curl http://169.254.169.254/openstack 2012-08-10 2013-04-04 2013-10-17 2015-10-15 2016-06-30 2016-10-06 2017-02-22 2018-08-27 latest Refer to :ref:`OpenStack format metadata ` for information on the contents and structure of these directories. To list supported versions for the :ref:`EC2-compatible metadata API `, make a GET request to ``http://169.254.169.254``, which will, once again, return a list of directories: .. code-block:: console $ curl http://169.254.169.254 1.0 2007-01-19 2007-03-01 2007-08-29 2007-10-10 2007-12-15 2008-02-01 2008-09-01 2009-04-04 latest Refer to :ref:`EC2-compatible metadata ` for information on the contents and structure of these directories. .. _metadata-config-drive: Config drives ------------- .. note:: This section provides end user information about config drives. For deployment information about the config drive feature, refer to the :doc:`admin guide `. *Config drives* are special drives that are attached to an instance when it boots. The instance can mount this drive and read files from it to get information that is normally available through the metadata service. One use case for using the config drive is to pass a networking configuration when you do not use DHCP to assign IP addresses to instances. For example, you might pass the IP address configuration for the instance through the config drive, which the instance can mount and access before you configure the network settings for the instance. Using the config drive ~~~~~~~~~~~~~~~~~~~~~~ To enable the config drive for an instance, pass the ``--config-drive true`` parameter to the :command:`openstack server create` command. The following example enables the config drive and passes a user data file and two key/value metadata pairs, all of which are accessible from the config drive: .. code-block:: console $ openstack server create --config-drive true --image my-image-name \ --flavor 1 --key-name mykey --user-data ./my-user-data.txt \ --property role=webservers --property essential=false MYINSTANCE .. note:: The Compute service can be configured to always create a config drive. For more information, refer to :doc:`the admin guide `. If your guest operating system supports accessing disk by label, you can mount the config drive as the ``/dev/disk/by-label/configurationDriveVolumeLabel`` device. In the following example, the config drive has the ``config-2`` volume label: .. code-block:: console # mkdir -p /mnt/config # mount /dev/disk/by-label/config-2 /mnt/config If your guest operating system does not use ``udev``, the ``/dev/disk/by-label`` directory is not present. You can use the :command:`blkid` command to identify the block device that corresponds to the config drive. For example: .. code-block:: console # blkid -t LABEL="config-2" -odevice /dev/vdb Once identified, you can mount the device: .. code-block:: console # mkdir -p /mnt/config # mount /dev/vdb /mnt/config Once mounted, you can examine the contents of the config drive: .. code-block:: console $ cd /mnt/config $ find . -maxdepth 2 . ./ec2 ./ec2/2009-04-04 ./ec2/latest ./openstack ./openstack/2012-08-10 ./openstack/2013-04-04 ./openstack/2013-10-17 ./openstack/2015-10-15 ./openstack/2016-06-30 ./openstack/2016-10-06 ./openstack/2017-02-22 ./openstack/latest The files that appear on the config drive depend on the arguments that you pass to the :command:`openstack server create` command. The format of this directory is the same as that provided by the :ref:`metadata service `, with the exception that the EC2-compatible metadata is now located in the ``ec2`` directory instead of the root (``/``) directory. Refer to the :ref:`metadata-openstack-format` and :ref:`metadata-ec2-format` sections for information about the format of the files and subdirectories within these directories. Setting in image ~~~~~~~~~~~~~~~~~~~~~~~~~ .. code-block:: console $ openstack image set IMG-UUID --property img_config_drive=mandatory The img_config_drive image metadata property can be used to force enable the config drive. Setting img_config_drive specifies whether the image needs a config drive. Nova metadata ------------- As noted previously, nova provides its metadata in two formats: OpenStack format and EC2-compatible format. .. _metadata-openstack-format: OpenStack format metadata ~~~~~~~~~~~~~~~~~~~~~~~~~ .. versionchanged:: 12.0.0 Support for network metadata was added in the Liberty release. Metadata from the OpenStack API is distributed in JSON format. There are two files provided for each version: ``meta_data.json`` and ``network_data.json``. The ``meta_data.json`` file contains nova-specific information, while the ``network_data.json`` file contains information retrieved from neutron. For example: .. code-block:: console $ curl http://169.254.169.254/openstack/2018-08-27/meta_data.json .. code-block:: json { "random_seed": "yu5ZnkqF2CqnDZVAfZgarG...", "availability_zone": "nova", "keys": [ { "data": "ssh-rsa AAAAB3NzaC1y...== Generated by Nova\n", "type": "ssh", "name": "mykey" } ], "hostname": "test.novalocal", "launch_index": 0, "meta": { "priority": "low", "role": "webserver" }, "devices": [ { "type": "nic", "bus": "pci", "address": "0000:00:02.0", "mac": "00:11:22:33:44:55", "tags": ["trusted"] }, { "type": "disk", "bus": "ide", "address": "0:0", "serial": "disk-vol-2352423", "path": "/dev/sda", "tags": ["baz"] } ], "project_id": "f7ac731cc11f40efbc03a9f9e1d1d21f", "public_keys": { "mykey": "ssh-rsa AAAAB3NzaC1y...== Generated by Nova\n" }, "name": "test" } .. code-block:: console $ curl http://169.254.169.254/openstack/2018-08-27/network_data.json .. code-block:: json { "links": [ { "ethernet_mac_address": "fa:16:3e:9c:bf:3d", "id": "tapcd9f6d46-4a", "mtu": null, "type": "bridge", "vif_id": "cd9f6d46-4a3a-43ab-a466-994af9db96fc" } ], "networks": [ { "id": "network0", "link": "tapcd9f6d46-4a", "network_id": "99e88329-f20d-4741-9593-25bf07847b16", "type": "ipv4_dhcp" } ], "services": [ { "address": "8.8.8.8", "type": "dns" } ] } ::download:`Download` network_data.json JSON schema. .. _metadata-ec2-format: EC2-compatible metadata ~~~~~~~~~~~~~~~~~~~~~~~ The EC2-compatible API is compatible with version 2009-04-04 of the `Amazon EC2 metadata service`__ This means that virtual machine images designed for EC2 will work properly with OpenStack. The EC2 API exposes a separate URL for each metadata element. Retrieve a listing of these elements by making a GET query to ``http://169.254.169.254/2009-04-04/meta-data/``. For example: .. code-block:: console $ curl http://169.254.169.254/2009-04-04/meta-data/ ami-id ami-launch-index ami-manifest-path block-device-mapping/ hostname instance-action instance-id instance-type kernel-id local-hostname local-ipv4 placement/ public-hostname public-ipv4 public-keys/ ramdisk-id reservation-id security-groups .. code-block:: console $ curl http://169.254.169.254/2009-04-04/meta-data/block-device-mapping/ ami .. code-block:: console $ curl http://169.254.169.254/2009-04-04/meta-data/placement/ availability-zone .. code-block:: console $ curl http://169.254.169.254/2009-04-04/meta-data/public-keys/ 0=mykey Instances can retrieve the public SSH key (identified by keypair name when a user requests a new instance) by making a GET request to ``http://169.254.169.254/2009-04-04/meta-data/public-keys/0/openssh-key``: .. code-block:: console $ curl http://169.254.169.254/2009-04-04/meta-data/public-keys/0/openssh-key ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDYVEprvtYJXVOBN0XNKVVRNCRX6BlnNbI+US\ LGais1sUWPwtSg7z9K9vhbYAPUZcq8c/s5S9dg5vTHbsiyPCIDOKyeHba4MUJq8Oh5b2i71/3B\ ISpyxTBH/uZDHdslW2a+SrPDCeuMMoss9NFhBdKtDkdG9zyi0ibmCP6yMdEX8Q== Generated\ by Nova __ https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html .. _metadata-userdata: User data --------- *User data* is a blob of data that the user can specify when they launch an instance. The instance can access this data through the metadata service or config drive. Commonly used to pass a shell script that the instance runs on boot. For example, one application that uses user data is the `cloud-init `__ system, which is an open-source package from Ubuntu that is available on various Linux distributions and which handles early initialization of a cloud instance. You can place user data in a local file and pass it through the ``--user-data `` parameter at instance creation. .. code-block:: console $ openstack server create --image ubuntu-cloudimage --flavor 1 \ --user-data mydata.file VM_INSTANCE .. note:: The provided user data should not be base64-encoded, as it will be automatically encoded in order to pass valid input to the REST API, which has a limit of 65535 bytes after encoding. Once booted, you can access this data from the instance using either the metadata service or the config drive. To access it via the metadata service, make a GET request to either ``http://169.254.169.254/openstack/{version}/user_data`` (OpenStack API) or ``http://169.254.169.254/{version}/user-data`` (EC2-compatible API). For example: .. code-block:: console $ curl http://169.254.169.254/openstack/2018-08-27/user_data .. code-block:: shell #!/bin/bash echo 'Extra user data here' .. _metadata-vendordata: Vendordata ---------- .. note:: This section provides end user information about the vendordata feature. For deployment information about this feature, refer to the :doc:`admin guide `. .. versionchanged:: 14.0.0 Support for dynamic vendor data was added in the Newton release. **Where configured**, instances can retrieve vendor-specific data from the metadata service or config drive. To access it via the metadata service, make a GET request to either ``http://169.254.169.254/openstack/{version}/vendor_data.json`` or ``http://169.254.169.254/openstack/{version}/vendor_data2.json``, depending on the deployment. For example: .. code-block:: console $ curl http://169.254.169.254/openstack/2018-08-27/vendor_data2.json .. code-block:: json { "testing": { "value1": 1, "value2": 2, "value3": "three" } } .. note:: The presence and contents of this file will vary from deployment to deployment. General guidelines ------------------ - Do not rely on the presence of the EC2 metadata in the metadata API or config drive, because this content might be removed in a future release. For example, do not rely on files in the ``ec2`` directory. - When you create images that access metadata service or config drive data and multiple directories are under the ``openstack`` directory, always select the highest API version by date that your consumer supports. For example, if your guest image supports the ``2012-03-05``, ``2012-08-05``, and ``2013-04-13`` versions, try ``2013-04-13`` first and fall back to a previous version if ``2013-04-13`` is not present. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/user/quotas.rst0000664000175000017500000002070700000000000017661 0ustar00zuulzuul00000000000000:orphan: ====== Quotas ====== .. warning:: As of Nova release 28.0.0 (2023.2 Bobcat), the ``nova.quota.DbQuotaDriver`` has been deprecated and the default quota driver configuration will be changed to the ``nova.quota.UnifiedLimitsDriver`` in the 29.0.0 (2024.1 Caracal) release. See the :doc:`unified limits documentation `. Nova uses a quota system for setting limits on resources such as number of instances or amount of CPU that a specific project or user can use. Quota limits and usage can be retrieved using the command-line interface. Types of quota -------------- .. list-table:: :header-rows: 1 :widths: 10 40 * - Quota name - Description * - cores - Number of instance cores (VCPUs) allowed per project. * - instances - Number of instances allowed per project. * - key_pairs - Number of key pairs allowed per user. * - metadata_items - Number of metadata items allowed per instance. * - ram - Megabytes of instance ram allowed per project. * - server_groups - Number of server groups per project. * - server_group_members - Number of servers per server group. The following quotas were previously available but were removed in microversion 2.36 as they proxied information available from the networking service. .. list-table:: :header-rows: 1 :widths: 10 40 * - Quota name - Description * - fixed_ips - Number of fixed IP addresses allowed per project. This number must be equal to or greater than the number of allowed instances. * - floating_ips - Number of floating IP addresses allowed per project. * - networks - Number of networks allowed per project (no longer used). * - security_groups - Number of security groups per project. * - security_group_rules - Number of security group rules per project. Similarly, the following quotas were previously available but were removed in microversion 2.57 as the personality files feature was deprecated. .. list-table:: :header-rows: 1 :widths: 10 40 * - Quota name - Description * - injected_files - Number of injected files allowed per project. * - injected_file_content_bytes - Number of content bytes allowed per injected file. * - injected_file_path_bytes - Length of injected file path. Usage ----- Project quotas ~~~~~~~~~~~~~~ To list all default quotas for projects, run: .. code-block:: console $ openstack quota show --default .. note:: This lists default quotas for all services and not just nova. For example: .. code-block:: console $ openstack quota show --default +----------------------+----------+ | Field | Value | +----------------------+----------+ | backup-gigabytes | 1000 | | backups | 10 | | cores | 20 | | fixed-ips | -1 | | floating-ips | 50 | | gigabytes | 1000 | | health_monitors | None | | injected-file-size | 10240 | | injected-files | 5 | | injected-path-size | 255 | | instances | 10 | | key-pairs | 100 | | l7_policies | None | | listeners | None | | load_balancers | None | | location | None | | name | None | | networks | 10 | | per-volume-gigabytes | -1 | | pools | None | | ports | 50 | | project | None | | project_name | project | | properties | 128 | | ram | 51200 | | rbac_policies | 10 | | routers | 10 | | secgroup-rules | 100 | | secgroups | 10 | | server-group-members | 10 | | server-groups | 10 | | snapshots | 10 | | subnet_pools | -1 | | subnets | 10 | | volumes | 10 | +----------------------+----------+ To list the currently set quota values for your project, run: .. code-block:: console $ openstack quota show PROJECT where ``PROJECT`` is the ID or name of your project. For example: .. code-block:: console $ openstack quota show $OS_PROJECT_ID +----------------------+----------------------------------+ | Field | Value | +----------------------+----------------------------------+ | backup-gigabytes | 1000 | | backups | 10 | | cores | 32 | | fixed-ips | -1 | | floating-ips | 10 | | gigabytes | 1000 | | health_monitors | None | | injected-file-size | 10240 | | injected-files | 5 | | injected-path-size | 255 | | instances | 10 | | key-pairs | 100 | | l7_policies | None | | listeners | None | | load_balancers | None | | location | None | | name | None | | networks | 20 | | per-volume-gigabytes | -1 | | pools | None | | ports | 60 | | project | c8156b55ec3b486193e73d2974196993 | | project_name | project | | properties | 128 | | ram | 65536 | | rbac_policies | 10 | | routers | 10 | | secgroup-rules | 50 | | secgroups | 50 | | server-group-members | 10 | | server-groups | 10 | | snapshots | 10 | | subnet_pools | -1 | | subnets | 20 | | volumes | 10 | +----------------------+----------------------------------+ To view a list of options for the :command:`openstack quota show` command, run: .. code-block:: console $ openstack quota show --help User quotas ~~~~~~~~~~~ .. note:: User-specific quotas are legacy and will be removed when migration to :keystone-doc:`unified limits ` is complete. User-specific quotas were added as a way to provide two-level hierarchical quotas and this feature is already being offered in unified limits. For this reason, the below commands have not and will not be ported to openstackclient. To list the quotas for your user, run: .. code-block:: console $ nova quota-show --user USER --tenant PROJECT where ``USER`` is the ID or name of your user and ``PROJECT`` is the ID or name of your project. For example: .. code-block:: console $ nova quota-show --user $OS_USERNAME --tenant $OS_PROJECT_ID +-----------------------------+-------+ | Quota | Limit | +-----------------------------+-------+ | instances | 10 | | cores | 32 | | ram | 65536 | | metadata_items | 128 | | injected_files | 5 | | injected_file_content_bytes | 10240 | | injected_file_path_bytes | 255 | | key_pairs | 100 | | server_groups | 10 | | server_group_members | 10 | +-----------------------------+-------+ To view a list of options for the :command:`nova quota-show` command, run: .. code-block:: console $ nova help quota-show ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/user/reboot.rst0000664000175000017500000000140300000000000017627 0ustar00zuulzuul00000000000000================== Reboot an instance ================== You can soft or hard reboot a running instance. A soft reboot attempts a graceful shut down and restart of the instance. A hard reboot power cycles the instance. To reboot a server, use the :command:`openstack server reboot` command: .. code-block:: console $ openstack server reboot SERVER By default, when you reboot an instance it is a soft reboot. To perform a hard reboot, pass the ``--hard`` parameter as follows: .. code-block:: console $ openstack server reboot --hard SERVER It is also possible to reboot a running instance into rescue mode. For example, this operation may be required if a filesystem of an instance becomes corrupted with prolonged use. See :doc:`rescue` for more details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/user/rescue.rst0000664000175000017500000000664000000000000017633 0ustar00zuulzuul00000000000000================== Rescue an instance ================== Instance rescue provides a mechanism for access, even if an image renders the instance inaccessible. Two rescue modes are currently provided. Instance rescue --------------- By default the instance is booted from the provided rescue image or a fresh copy of the original instance image if a rescue image is not provided. The root disk and optional regenerated config drive are also attached to the instance for data recovery. .. note:: Rescuing a volume-backed instance is not supported with this mode. Stable device instance rescue ----------------------------- As of 21.0.0 (Ussuri) an additional stable device rescue mode is available. This mode now supports the rescue of volume-backed instances. This mode keeps all devices both local and remote attached in their original order to the instance during the rescue while booting from the provided rescue image. This mode is enabled and controlled by the presence of ``hw_rescue_device`` or ``hw_rescue_bus`` image properties on the provided rescue image. As their names suggest these properties control the rescue device type (``cdrom``, ``disk`` or ``floppy``) and bus type (``scsi``, ``virtio``, ``ide``, or ``usb``) used when attaching the rescue image to the instance. Support for each combination of the ``hw_rescue_device`` and ``hw_rescue_bus`` image properties is dependent on the underlying hypervisor and platform being used. For example the ``IDE`` bus is not available on POWER KVM based compute hosts. .. note:: This mode is only supported when using the Libvirt virt driver. This mode is not supported when using the LXC hypervisor as enabled by the :oslo.config:option:`libvirt.virt_type` configurable on the computes. Usage ----- .. note:: Pause, suspend, and stop operations are not allowed when an instance is running in rescue mode, as triggering these actions causes the loss of the original instance state and makes it impossible to unrescue the instance. To perform an instance rescue, use the :command:`openstack server rescue` command: .. code-block:: console $ openstack server rescue SERVER .. note:: On running the :command:`openstack server rescue` command, an instance performs a soft shutdown first. This means that the guest operating system has a chance to perform a controlled shutdown before the instance is powered off. The shutdown behavior is configured by the :oslo.config:option:`shutdown_timeout` parameter that can be set in the ``nova.conf`` file. Its value stands for the overall period (in seconds) a guest operating system is allowed to complete the shutdown. The timeout value can be overridden on a per image basis by means of ``os_shutdown_timeout`` that is an image metadata setting allowing different types of operating systems to specify how much time they need to shut down cleanly. To rescue an instance that boots from a volume you need to use the :ref:`2.87 microversion or later `. .. code-block:: console $ openstack --os-compute-api-version 2.87 server rescue SERVER If you want to rescue an instance with a specific image, rather than the default one, use the ``--image`` parameter: .. code-block:: console $ openstack server rescue --image IMAGE_ID SERVER To restart the instance from the normal boot disk, run the following command: .. code-block:: console $ openstack server unrescue SERVER ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/user/resize.rst0000664000175000017500000000432600000000000017645 0ustar00zuulzuul00000000000000================== Resize an instance ================== You can change the size of an instance by changing its flavor. This rebuilds the instance and therefore results in a restart. To list the VMs you want to resize, run: .. code-block:: console $ openstack server list Once you have the name or UUID of the server you wish to resize, resize it using the :command:`openstack server resize` command: .. code-block:: console $ openstack server resize --flavor FLAVOR SERVER .. note:: By default, the :command:`openstack server resize` command gives the guest operating system a chance to perform a controlled shutdown before the instance is powered off and the instance is resized. This behavior can be configured by the administrator but it can also be overridden on a per image basis using the ``os_shutdown_timeout`` image metadata setting. This allows different types of operating systems to specify how much time they need to shut down cleanly. See :glance-doc:`Useful image properties ` for details. Resizing can take some time. During this time, the instance status will be ``RESIZE``: .. code-block:: console $ openstack server list +----------------------+----------------+--------+-----------------------------------------+ | ID | Name | Status | Networks | +----------------------+----------------+--------+-----------------------------------------+ | 67bc9a9a-5928-47c... | myCirrosServer | RESIZE | admin_internal_net=192.168.111.139 | +----------------------+----------------+--------+-----------------------------------------+ When the resize completes, the instance status will be ``VERIFY_RESIZE``. You can now confirm the resize to change the status to ``ACTIVE``: .. code-block:: console $ openstack server resize confirm SERVER .. note:: The resized server may be automatically confirmed based on the administrator's configuration of the deployment. If the resize does not work as expected, you can revert the resize. This will revert the instance to the old flavor and change the status to ``ACTIVE``: .. code-block:: console $ openstack server resize revert SERVER ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/user/security-groups.rst0000664000175000017500000004036100000000000021527 0ustar00zuulzuul00000000000000=============== Security Groups =============== Security groups are sets of IP filter rules that are applied to all servers, which define networking access to the server. Group rules are project-specific; project members can edit the default rules for their group and add new rule sets. All projects have a ``default`` security group which is applied to any port that has no other security group defined. Unless you change the default, this security group denies all incoming traffic and allows only outgoing traffic from your instance. It's important to note early on that security groups and their quota are resources of :neutron-doc:`the networking service, Neutron `. They are modelled as an attribute of ports rather than servers. With this said, Nova provides utility APIs that allow users to add and remove security groups from all ports attached to a server. In addition, it is possible to specify security groups to configure for newly created ports when creating a new server, and to retrieve the combined set of security groups for all ports attached to a server. .. note:: Nova previously provided its own security group APIs. These were proxy APIs for Neutron APIs and have been deprecated since microversion 2.36. Usage ----- Security group-related operations can be broken down into three categories: operations on security groups and security group rules themselves, operations on ports, and operations on servers. .. rubric:: Security group and security group rule operations By default, security groups can be created by any project member. For example: .. code-block:: console $ openstack security group create --description ... .. tip:: When adding a new security group, you should pick a descriptive but brief name. This name shows up in brief descriptions of the servers that use it where the longer description field often does not. For example, seeing that a server is using security group ``http`` is much easier to understand than ``bobs_group`` or ``secgrp1``. Security groups are really only containers for rules. Security group rules define the actual IP filter rules that will be applied. Security groups deny everything by default, so rules indicate what is allowed. Typically, a security group rule will be configured with the following attributes: an IP protocol (one of ICMP, TCP, or UDP), a destination port or port range, and a remote IP range (in CIDR format). You create security group rules by specifying these attributes and the security group to which the rules should be added. For example: .. code-block:: console $ openstack security group rule create \ --protocol --dst-port \ --remote-ip \ .. note:: The ```` argument takes the form of ``port`` or ``from-port:to-port``. This specifies the range of local ports that connections are allowed to access, **not** the source and destination ports of the connection. Alternatively, rather than specifying a remote IP range, you can specify a remote security group. A remote group will allow requests with the specified protocol(s) and port(s) from any server with said port. If you create a security group rule with remote group ``foo`` and apply the security group to server ``bar``, ``bar`` will be able to receive matching traffic from any other server with security group ``foo``. Security group rules with remote security groups are created in much the same way as security group rules with remote IPs. For example: .. code-block:: console $ openstack security group rule create \ --protocol --dst-port \ --remote-group \ Once created, both security groups and security group rules can be listed. For example: .. code-block:: console $ openstack security group list $ openstack security group rule list Likewise, you can inspect an individual security group or security group rule. For example: .. code-block:: console $ openstack security group show $ openstack security group rule show Finally, you can delete security groups. This will delete both the security group and associated security group rules. For example: .. code-block:: console $ openstack security group delete Alternatively, you can delete individual rules from an existing group. For example: .. code-block:: console $ openstack security group rule delete .. rubric:: Port operations Security groups are an attribute of ports. By default, Neutron will assign the ``default`` security group to all newly created ports. It is possible to disable this behavior. For example: .. code-block:: console $ openstack port create --no-security-group ... It is possible to specify different security groups when creating a new port. For example: .. code-block:: console $ openstack port create --security-group ... .. note:: If you specify a security group when creating the port, the ``default`` security group **will not** be added to the port. If you wish to add the ``default`` security group, you will need to specify this also. Additional security groups can also be added or removed from existing ports. For example: .. code-block:: console $ openstack port set --security-group ... $ openstack port unset --security-group ... It is also possible to remove all security groups from a port. For example: .. code-block:: console $ openstack port set --no-security-group .. rubric:: Server operations It is possible to manipulate and configure security groups on an server-wide basis. When you create a new server, networks can be either automatically allocated (a feature known as ":neutron-doc:`Get me a network `") or manually configured. In both cases, attaching a network to a server results in the creation of a port. It is possible to specify one or more security groups to assign to these ports. For example: .. code-block:: console $ openstack server create --security-group ... .. important:: These security groups will only apply to automatically created ports. They will not apply to any pre-created ports attached to the server at boot. If no security group is specified, the ``default`` security group for the current project will be used. It is not possible to specify that no security group should be applied to automatically created ports. If you wish to remove the ``default`` security group from a server's ports, you will need to use pre-created ports or remove the security group after the server has been created. Once a server has been created, it is possible to add or remove a security group from all ports attached to the server. For example: .. code-block:: console $ openstack server add security group $ openstack server remove security group .. note:: Unless customised, the ``default`` security group allows egress traffic from the server. If you remove this group and do not allow egress traffic via another security group, your server will no longer be able to communicate with the :ref:`metadata service `. It is also possible to view the security groups associated with a server. For example: .. code-block:: console $ openstack server show -f value -c security_groups .. important:: As security groups are an attribute of ports rather than servers, this value is the combined set of security groups assigned to all ports. Different ports may have different sets of security groups. You can inspect the port with ``openstack port show`` to see the exact security groups assigned to an individual port. Example ------- Let's look through a worked example of creating security groups for a deployment of 3 web server hosts and 2 database hosts. First, we'll configure the security group that will allow HTTP traffic to the web server hosts. .. code-block:: console $ openstack security group create \ --description "Allows Web traffic anywhere on the Internet." \ web +-----------------+--------------------------------------------------------------------------------------------------------------------------+ | Field | Value | +-----------------+--------------------------------------------------------------------------------------------------------------------------+ | created_at | 2016-11-03T13:50:53Z | | description | Allows Web traffic anywhere on the Internet. | | headers | | | id | c0b92b20-4575-432a-b4a9-eaf2ad53f696 | | name | web | | project_id | 5669caad86a04256994cdf755df4d3c1 | | project_id | 5669caad86a04256994cdf755df4d3c1 | | revision_number | 1 | | rules | created_at='2016-11-03T13:50:53Z', direction='egress', ethertype='IPv4', id='4d8cec94-e0ee-4c20-9f56-8fb67c21e4df', | | | project_id='5669caad86a04256994cdf755df4d3c1', revision_number='1', updated_at='2016-11-03T13:50:53Z' | | | created_at='2016-11-03T13:50:53Z', direction='egress', ethertype='IPv6', id='31be2ad1-be14-4aef-9492-ecebede2cf12', | | | project_id='5669caad86a04256994cdf755df4d3c1', revision_number='1', updated_at='2016-11-03T13:50:53Z' | | updated_at | 2016-11-03T13:50:53Z | +-----------------+--------------------------------------------------------------------------------------------------------------------------+ Once created, we can add a new group rule to allow ingress HTTP traffic on port 80: .. code-block:: console $ openstack security group rule create \ --protocol tcp --dst-port 80:80 --remote-ip 0.0.0.0/0 \ web +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | created_at | 2016-11-06T14:02:00Z | | description | | | direction | ingress | | ethertype | IPv4 | | headers | | | id | 2ba06233-d5c8-43eb-93a9-8eaa94bc9eb5 | | port_range_max | 80 | | port_range_min | 80 | | project_id | 5669caad86a04256994cdf755df4d3c1 | | project_id | 5669caad86a04256994cdf755df4d3c1 | | protocol | tcp | | remote_group_id | None | | remote_ip_prefix | 0.0.0.0/0 | | revision_number | 1 | | security_group_id | c0b92b20-4575-432a-b4a9-eaf2ad53f696 | | updated_at | 2016-11-06T14:02:00Z | +-------------------+--------------------------------------+ You can create complex rule sets by creating additional rules. In this instance we want to pass both HTTP and HTTPS traffic so we'll add an additional rule: .. code-block:: console $ openstack security group rule create \ --protocol tcp --dst-port 443:443 --remote-ip 0.0.0.0/0 \ web +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | created_at | 2016-11-06T14:09:20Z | | description | | | direction | ingress | | ethertype | IPv4 | | headers | | | id | 821c3ef6-9b21-426b-be5b-c8a94c2a839c | | port_range_max | 443 | | port_range_min | 443 | | project_id | 5669caad86a04256994cdf755df4d3c1 | | project_id | 5669caad86a04256994cdf755df4d3c1 | | protocol | tcp | | remote_group_id | None | | remote_ip_prefix | 0.0.0.0/0 | | revision_number | 1 | | security_group_id | c0b92b20-4575-432a-b4a9-eaf2ad53f696 | | updated_at | 2016-11-06T14:09:20Z | +-------------------+--------------------------------------+ .. note:: Despite only outputting the newly added rule, this operation is additive (both rules are created and enforced). That's one security group wrapped up. Next, the database hosts. These are running MySQL and we would like to both restrict traffic to the relevant port (``3306`` in this case) **and** to restrict ingress traffic to requests from the web server hosts. While we could specify a CIDR for the IP addresses of the web servers, a preferred solution is to configure a source group. This will allow us to dynamically add and remove web server hosts with the ``web`` security group applied without needing to modify the security group for the database hosts. Let's create the security group and the necessary rule: .. code-block:: console $ openstack security group create database $ openstack security group rule create \ --protocol tcp --dst-port 3306 --remote-group web \ database The ``database`` rule will now allows access to MySQL's default port from any server that uses the ``web`` group. Now that we've created the security group and rules, let's list them to verify everything: .. code-block:: console $ openstack security group list +--------------------------------------+----------+-------------+ | Id | Name | Description | +--------------------------------------+----------+-------------+ | 73580272-d8fa-4927-bd55-c85e43bc4877 | default | default | | c0b92b20-4575-432a-b4a9-eaf2ad53f696 | web | web server | | 40e1e336-e207-494f-a3ec-a3c222336b22 | database | database | +--------------------------------------+----------+-------------+ We can also inspect the rules for the security group. Let's look at the ``web`` security group: .. code-block:: console $ openstack security group rule list web +--------------------------------------+-------------+-----------+-----------------+-----------------------+ | ID | IP Protocol | IP Range | Port Range | Remote Security Group | +--------------------------------------+-------------+-----------+-----------------+-----------------------+ | 2ba06233-d5c8-43eb-93a9-8eaa94bc9eb5 | tcp | 0.0.0.0/0 | 80:80 | None | | 821c3ef6-9b21-426b-be5b-c8a94c2a839c | tcp | 0.0.0.0/0 | 443:443 | None | +--------------------------------------+-------------+-----------+-----------------+-----------------------+ Assuming everything looks correct, you can now use these security groups when creating your new servers. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/user/server-groups.rst0000664000175000017500000000516300000000000021167 0ustar00zuulzuul00000000000000============= Server Groups ============= Server groups provide a mechanism for indicating the locality of servers relative to other servers. They allow you to indicate whether servers should run on the same host (affinity) or different hosts (anti-affinity). Affinity is advantageous if you wish to minimise network latency, while anti-affinity can improve fault-tolerance and load distribution. .. note:: Server groups are useful for separating or grouping workloads but should not generally be relied on to provide HA. Instead, consider using availability zones. Unlike server groups, availability zones can only be configured by admins but they are often used to model failure domains, particularly in larger deployments. For more information, refer to :doc:`/user/availability-zones`. Server groups can be configured with a policy and rules. There are currently four policies supported: ``affinity`` Restricts instances belonging to the server group to the same host. ``anti-affinity`` Restricts instances belonging to the server group to separate hosts. ``soft-affinity`` Attempts to restrict instances belonging to the server group to the same host. Where it is not possible to schedule all instances on one host, they will be scheduled together on as few hosts as possible. .. note:: Requires API microversion 2.15 or later. ``soft-anti-affinity`` Attempts to restrict instances belonging to the server group to separate hosts. Where it is not possible to schedule all instances to separate hosts, they will be scheduled on as many separate hosts as possible. .. note:: Requires API microversion 2.15 or later. There is currently one rule supported: ``max_server_per_host`` Indicates the max number of instances that can be scheduled to any given host when using the ``anti-affinity`` policy. This rule is not compatible with other policies. .. note:: Requires API microversion 2.64 or later. Usage ----- Server groups can be configured and used by end-users. For example: .. code-block:: console $ openstack --os-compute-api-version 2.64 server group create \ --policy POLICY --rule RULE NAME Once a server group has been created, you can use it when creating a server. This is achieved using the ``--hint`` option. For example: .. code-block:: console $ openstack server create \ --hint group=SERVER_GROUP_UUID ... NAME Once created, a server group cannot be modified. In addition, a server cannot move between server groups. In both cases, this is because doing so would require potentially moving the server to satisfy the server group policy. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/user/support-matrix.ini0000664000175000017500000014670300000000000021337 0ustar00zuulzuul00000000000000# Copyright (C) 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # # # ========================================= # Nova Hypervisor Feature Capability Matrix # ========================================= # # This obsoletes the information previously at # # https://wiki.openstack.org/wiki/HypervisorSupportMatrix # # This file contains a specification of what feature capabilities each # hypervisor driver in Nova is able to support. Feature capabilities include # what API operations are supported, what storage / networking features can be # used and what aspects of the guest machine can be configured. The capabilities # can be considered to be structured into nested groups, but in this file they # have been flattened for ease of representation. The section names represent # the group structure. At the top level there are the following groups defined # # - operation - public API operations # - storage - host storage configuration options # - networking - host networking configuration options # - guest - guest hardware configuration options # # When considering which capabilities should be marked as mandatory, # consider the general guiding principles listed in the support-matrix.rst # file # # The 'status' field takes possible values # # - mandatory - unconditionally required to be implemented # - optional - optional to support, nice to have # - choice(group) - at least one of the options within the named group # must be implemented # - conditional(cond) - required, if the referenced condition is met. # # The value against each 'driver.XXXX' entry refers to the level of the # implementation of the feature in that driver # # - complete - fully implemented, expected to work at all times # - partial - implemented, but with caveats about when it will work # eg some configurations or hardware or guest OS may not # support it # - missing - not implemented at all # # In the case of the driver being marked as 'partial', then # 'driver-notes.XXX' entry should be used to explain the caveats # around the implementation. # # The 'cli' field takes a list of nova client commands, separated by semicolon. # These CLi commands are related to that feature. # Example: # cli=nova list;nova show # List of driver impls we are going to record info for later # This list only covers drivers that are in the Nova source # tree. Out of tree drivers should maintain their own equivalent # document, and merge it with this when their code merges into # Nova core. [driver.libvirt-kvm-x86] title=Libvirt KVM (x86) [driver.libvirt-kvm-aarch64] title=Libvirt KVM (aarch64) [driver.libvirt-kvm-ppc64] title=Libvirt KVM (ppc64) [driver.libvirt-kvm-s390x] title=Libvirt KVM (s390x) [driver.libvirt-qemu-x86] title=Libvirt QEMU (x86) [driver.libvirt-lxc] title=Libvirt LXC [driver.libvirt-vz-vm] title=Libvirt Virtuozzo VM [driver.libvirt-vz-ct] title=Libvirt Virtuozzo CT [driver.vmware] title=VMware vCenter [driver.ironic] title=Ironic [driver.zvm] title=zVM [operation.attach-volume] title=Attach block volume to instance status=optional notes=The attach volume operation provides a means to hotplug additional block storage to a running instance. This allows storage capabilities to be expanded without interruption of service. In a cloud model it would be more typical to just spin up a new instance with large storage, so the ability to hotplug extra storage is for those cases where the instance is considered to be more of a pet than cattle. Therefore this operation is not considered to be mandatory to support. cli=nova volume-attach driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=missing driver.vmware=complete driver.ironic=missing driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=missing driver.zvm=missing [operation.attach-tagged-volume] title=Attach tagged block device to instance status=optional notes=Attach a block device with a tag to an existing server instance. See "Device tags" for more information. cli=nova volume-attach [--tag ] driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=missing driver.vmware=missing driver.ironic=missing driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=missing driver.zvm=missing [operation.detach-volume] title=Detach block volume from instance status=optional notes=See notes for attach volume operation. cli=nova volume-detach driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=missing driver.vmware=complete driver.ironic=missing driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=missing driver.zvm=missing [operation.extend-volume] title=Extend block volume attached to instance status=optional notes=The extend volume operation provides a means to extend the size of an attached volume. This allows volume size to be expanded without interruption of service. In a cloud model it would be more typical to just spin up a new instance with large storage, so the ability to extend the size of an attached volume is for those cases where the instance is considered to be more of a pet than cattle. Therefore this operation is not considered to be mandatory to support. cli=cinder extend driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=unknown driver.libvirt-kvm-s390x=unknown driver.libvirt-qemu-x86=complete driver.libvirt-lxc=missing driver.vmware=missing driver.ironic=missing driver.libvirt-vz-vm=unknown driver.libvirt-vz-ct=missing driver.zvm=missing [operation.attach-interface] title=Attach virtual network interface to instance status=optional notes=The attach interface operation provides a means to hotplug additional interfaces to a running instance. Hotplug support varies between guest OSes and some guests require a reboot for new interfaces to be detected. This operation allows interface capabilities to be expanded without interruption of service. In a cloud model it would be more typical to just spin up a new instance with more interfaces. cli=nova interface-attach driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=missing driver.vmware=complete driver.ironic=complete driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=complete driver.zvm=missing [operation.attach-tagged-interface] title=Attach tagged virtual network interface to instance status=optional notes=Attach a virtual network interface with a tag to an existing server instance. See "Device tags" for more information. cli=nova interface-attach [--tag ] driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=unknown driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=missing driver.vmware=missing driver.ironic=missing driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=missing driver.zvm=missing [operation.detach-interface] title=Detach virtual network interface from instance status=optional notes=See notes for attach-interface operation. cli=nova interface-detach driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=missing driver.vmware=complete driver.ironic=complete driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=complete driver.zvm=missing [operation.maintenance-mode] title=Set the host in a maintenance mode status=optional notes=This operation allows a host to be placed into maintenance mode, automatically triggering migration of any running instances to an alternative host and preventing new instances from being launched. This is not considered to be a mandatory operation to support. The driver methods to implement are "host_maintenance_mode" and "set_host_enabled". cli=nova host-update driver.libvirt-kvm-x86=missing driver.libvirt-kvm-aarch64=missing driver.libvirt-kvm-ppc64=missing driver.libvirt-kvm-s390x=missing driver.libvirt-qemu-x86=missing driver.libvirt-lxc=missing driver.vmware=missing driver.ironic=missing driver.libvirt-vz-vm=missing driver.libvirt-vz-ct=missing driver.zvm=missing [operation.evacuate] title=Evacuate instances from a host status=optional notes=A possible failure scenario in a cloud environment is the outage of one of the compute nodes. In such a case the instances of the down host can be evacuated to another host. It is assumed that the old host is unlikely ever to be powered back on, otherwise the evacuation attempt will be rejected. When the instances get moved to the new host, their volumes get re-attached and the locally stored data is dropped. That happens in the same way as a rebuild. This is not considered to be a mandatory operation to support. cli=nova evacuate ;nova host-evacuate driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=unknown driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=unknown driver.libvirt-lxc=unknown driver.vmware=unknown driver.ironic=unknown driver.libvirt-vz-vm=missing driver.libvirt-vz-ct=missing driver.zvm=unknown [operation.rebuild] title=Rebuild instance status=optional notes=A possible use case is additional attributes need to be set to the instance, nova will purge all existing data from the system and remakes the VM with given information such as 'metadata' and 'personalities'. Though this is not considered to be a mandatory operation to support. cli=nova rebuild driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=complete driver.vmware=complete driver.ironic=complete driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=complete driver.zvm=unknown [operation.rebuild-volume-backed] title=Rebuild volume backed instance status=optional notes=This will wipe out all existing data in the root volume of a volume backed instance. This is available from microversion 2.93 and onwards. cli=openstack server rebuild --reimage-boot-volume --image driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=unknown driver.vmware=missing driver.ironic=missing driver.libvirt-vz-vm=missing driver.libvirt-vz-ct=missing driver.zvm=missing [operation.get-guest-info] title=Guest instance status status=mandatory notes=Provides realtime information about the power state of the guest instance. Since the power state is used by the compute manager for tracking changes in guests, this operation is considered mandatory to support. cli= driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=complete driver.vmware=complete driver.ironic=complete driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=complete driver.zvm=complete [operation.get-host-uptime] title=Guest host uptime status=optional notes=Returns the result of host uptime since power on, it's used to report hypervisor status. cli= driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=complete driver.vmware=missing driver.ironic=missing driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=complete driver.zvm=complete [operation.get-host-ip] title=Guest host ip status=optional notes=Returns the ip of this host, it's used when doing resize and migration. cli= driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=complete driver.vmware=complete driver.ironic=missing driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=complete driver.zvm=complete [operation.live-migrate] title=Live migrate instance across hosts status=optional notes=Live migration provides a way to move an instance off one compute host, to another compute host. Administrators may use this to evacuate instances from a host that needs to undergo maintenance tasks, though of course this may not help if the host is already suffering a failure. In general instances are considered cattle rather than pets, so it is expected that an instance is liable to be killed if host maintenance is required. It is technically challenging for some hypervisors to provide support for the live migration operation, particularly those built on the container based virtualization. Therefore this operation is not considered mandatory to support. cli=nova live-migration ;nova host-evacuate-live driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=missing driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=missing driver.vmware=complete driver.ironic=missing driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=complete driver.zvm=missing [operation.force-live-migration-to-complete] title=Force live migration to complete status=optional notes=Live migration provides a way to move a running instance to another compute host. But it can sometimes fail to complete if an instance has a high rate of memory or disk page access. This operation provides the user with an option to assist the progress of the live migration. The mechanism used to complete the live migration depends on the underlying virtualization subsystem capabilities. If libvirt/qemu is used and the post-copy feature is available and enabled then the force complete operation will cause a switch to post-copy mode. Otherwise the instance will be suspended until the migration is completed or aborted. cli=nova live-migration-force-complete driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=missing driver-notes.libvirt-kvm-x86=Requires libvirt>=1.3.3, qemu>=2.5.0 driver.libvirt-kvm-ppc64=complete driver-notes.libvirt-kvm-ppc64=Requires libvirt>=1.3.3, qemu>=2.5.0 driver.libvirt-kvm-s390x=complete driver-notes.libvirt-kvm-s390x=Requires libvirt>=1.3.3, qemu>=2.5.0 driver.libvirt-qemu-x86=complete driver-notes.libvirt-qemu-x86=Requires libvirt>=1.3.3, qemu>=2.5.0 driver.libvirt-lxc=missing driver.vmware=missing driver.ironic=missing driver.libvirt-vz-vm=missing driver.libvirt-vz-ct=missing driver.zvm=missing [operation.abort-in-progress-live-migration] title=Abort an in-progress or queued live migration status=optional notes=Live migration provides a way to move a running instance to another compute host. But it can sometimes need a large amount of time to complete if an instance has a high rate of memory or disk page access or is stuck in queued status if there are too many in-progress live migration jobs in the queue. This operation provides the user with an option to abort in-progress live migrations. When the live migration job is still in "queued" or "preparing" status, it can be aborted regardless of the type of underneath hypervisor, but once the job status changes to "running", only some of the hypervisors support this feature. cli=nova live-migration-abort driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=missing driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=missing driver.vmware=missing driver.ironic=missing driver.libvirt-vz-vm=unknown driver.libvirt-vz-ct=unknown driver.zvm=missing [operation.launch] title=Launch instance status=mandatory notes=Importing pre-existing running virtual machines on a host is considered out of scope of the cloud paradigm. Therefore this operation is mandatory to support in drivers. cli= driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=complete driver.vmware=complete driver.ironic=complete driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=complete driver.zvm=complete [operation.pause] title=Stop instance CPUs (pause) status=optional notes=Stopping an instances CPUs can be thought of as roughly equivalent to suspend-to-RAM. The instance is still present in memory, but execution has stopped. The problem, however, is that there is no mechanism to inform the guest OS that this takes place, so upon unpausing, its clocks will no longer report correct time. For this reason hypervisor vendors generally discourage use of this feature and some do not even implement it. Therefore this operation is considered optional to support in drivers. cli=nova pause driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=complete driver.vmware=missing driver.ironic=missing driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=missing driver.zvm=complete [operation.reboot] title=Reboot instance status=optional notes=It is reasonable for a guest OS administrator to trigger a graceful reboot from inside the instance. A host initiated graceful reboot requires guest co-operation and a non-graceful reboot can be achieved by a combination of stop+start. Therefore this operation is considered optional. cli=nova reboot driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=complete driver.vmware=complete driver.ironic=complete driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=complete driver.zvm=complete [operation.rescue] title=Rescue instance status=optional notes=The rescue operation starts an instance in a special configuration whereby it is booted from an special root disk image. The goal is to allow an administrator to recover the state of a broken virtual machine. In general the cloud model considers instances to be cattle, so if an instance breaks the general expectation is that it be thrown away and a new instance created. Therefore this operation is considered optional to support in drivers. cli=nova rescue driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=unknown driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=missing driver.vmware=complete driver.ironic=complete driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=complete driver.zvm=missing [operation.resize] title=Resize instance status=optional notes=The resize operation allows the user to change a running instance to match the size of a different flavor from the one it was initially launched with. There are many different flavor attributes that potentially need to be updated. In general it is technically challenging for a hypervisor to support the alteration of all relevant config settings for a running instance. Therefore this operation is considered optional to support in drivers. cli=nova resize driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=missing driver.vmware=complete driver.ironic=missing driver.libvirt-vz-vm=complete driver-notes.vz-vm=Resizing Virtuozzo instances implies guest filesystem resize also driver.libvirt-vz-ct=complete driver-notes.vz-ct=Resizing Virtuozzo instances implies guest filesystem resize also driver.zvm=missing [operation.resume] title=Restore instance status=optional notes=See notes for the suspend operation cli=nova resume driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=missing driver.vmware=complete driver.ironic=missing driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=complete driver.zvm=missing [operation.set-admin-password] title=Set instance admin password status=optional notes=Provides a mechanism to (re)set the password of the administrator account inside the instance operating system. This requires that the hypervisor has a way to communicate with the running guest operating system. Given the wide range of operating systems in existence it is unreasonable to expect this to be practical in the general case. The configdrive and metadata service both provide a mechanism for setting the administrator password at initial boot time. In the case where this operation were not available, the administrator would simply have to login to the guest and change the password in the normal manner, so this is just a convenient optimization. Therefore this operation is not considered mandatory for drivers to support. cli=nova set-password driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=unknown driver-notes.libvirt-kvm-x86=Requires libvirt>=1.2.16 and hw_qemu_guest_agent. driver.libvirt-kvm-ppc64=missing driver.libvirt-kvm-s390x=missing driver.libvirt-qemu-x86=complete driver-notes.libvirt-qemu-x86=Requires libvirt>=1.2.16 and hw_qemu_guest_agent. driver.libvirt-lxc=missing driver.vmware=missing driver.ironic=missing driver.libvirt-vz-vm=complete driver-notes.libvirt-vz-vm=Requires libvirt>=2.0.0 driver.libvirt-vz-ct=complete driver-notes.libvirt-vz-ct=Requires libvirt>=2.0.0 driver.zvm=missing [operation.snapshot] title=Save snapshot of instance disk status=optional notes=The snapshot operation allows the current state of the instance root disk to be saved and uploaded back into the glance image repository. The instance can later be booted again using this saved image. This is in effect making the ephemeral instance root disk into a semi-persistent storage, in so much as it is preserved even though the guest is no longer running. In general though, the expectation is that the root disks are ephemeral so the ability to take a snapshot cannot be assumed. Therefore this operation is not considered mandatory to support. cli=nova image-create driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=missing driver.vmware=complete driver.ironic=missing driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=complete driver.zvm=complete [operation.suspend] title=Suspend instance status=optional notes=Suspending an instance can be thought of as roughly equivalent to suspend-to-disk. The instance no longer consumes any RAM or CPUs, with its live running state having been preserved in a file on disk. It can later be restored, at which point it should continue execution where it left off. As with stopping instance CPUs, it suffers from the fact that the guest OS will typically be left with a clock that is no longer telling correct time. For container based virtualization solutions, this operation is particularly technically challenging to implement and is an area of active research. This operation tends to make more sense when thinking of instances as pets, rather than cattle, since with cattle it would be simpler to just terminate the instance instead of suspending. Therefore this operation is considered optional to support. cli=nova suspend driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=missing driver.vmware=complete driver.ironic=missing driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=complete driver.zvm=missing [operation.swap-volume] title=Swap block volumes status=optional notes=The swap volume operation is a mechanism for changing a running instance so that its attached volume(s) are backed by different storage in the host. An alternative to this would be to simply terminate the existing instance and spawn a new instance with the new storage. In other words this operation is primarily targeted towards the pet use case rather than cattle, however, it is required for volume migration to work in the volume service. This is considered optional to support. cli=nova volume-update driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=unknown driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=missing driver.vmware=missing driver.ironic=missing driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=missing driver.zvm=missing [operation.terminate] title=Shutdown instance status=mandatory notes=The ability to terminate a virtual machine is required in order for a cloud user to stop utilizing resources and thus avoid indefinitely ongoing billing. Therefore this operation is mandatory to support in drivers. cli=nova delete driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=complete driver-notes.libvirt-lxc=Fails in latest Ubuntu Trusty kernel from security repository (3.13.0-76-generic), but works in upstream 3.13.x kernels as well as default Ubuntu Trusty latest kernel (3.13.0-58-generic). driver.vmware=complete driver.ironic=complete driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=complete driver.zvm=complete [operation.trigger-crash-dump] title=Trigger crash dump status=optional notes=The trigger crash dump operation is a mechanism for triggering a crash dump in an instance. The feature is typically implemented by injecting an NMI (Non-maskable Interrupt) into the instance. It provides a means to dump the production memory image as a dump file which is useful for users. Therefore this operation is considered optional to support. cli=nova trigger-crash-dump driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=unknown driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=missing driver.vmware=missing driver.ironic=complete driver.libvirt-vz-vm=missing driver.libvirt-vz-ct=missing driver.zvm=missing [operation.unpause] title=Resume instance CPUs (unpause) status=optional notes=See notes for the "Stop instance CPUs" operation cli=nova unpause driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=complete driver.vmware=missing driver.ironic=missing driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=complete driver.zvm=complete [guest.disk.autoconfig] title=Auto configure disk status=optional notes=Partition and resize FS to match the size specified by flavors.root_gb, As this is hypervisor specific feature. Therefore this operation is considered optional to support. cli= driver.libvirt-kvm-x86=missing driver.libvirt-kvm-aarch64=missing driver.libvirt-kvm-ppc64=missing driver.libvirt-kvm-s390x=missing driver.libvirt-qemu-x86=missing driver.libvirt-lxc=missing driver.vmware=missing driver.ironic=missing driver.libvirt-vz-vm=missing driver.libvirt-vz-ct=missing driver.zvm=complete [guest.disk.rate-limit] title=Instance disk I/O limits status=optional notes=The ability to set rate limits on virtual disks allows for greater performance isolation between instances running on the same host storage. It is valid to delegate scheduling of I/O operations to the hypervisor with its default settings, instead of doing fine grained tuning. Therefore this is not considered to be an mandatory configuration to support. cli=nova limits driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=missing driver.vmware=missing driver.ironic=missing driver.libvirt-vz-vm=missing driver.libvirt-vz-ct=missing driver.zvm=missing [guest.setup.configdrive] title=Config drive support status=choice(guest.setup) notes=The config drive provides an information channel into the guest operating system, to enable configuration of the administrator password, file injection, registration of SSH keys, etc. Since cloud images typically ship with all login methods locked, a mechanism to set the administrator password or keys is required to get login access. Alternatives include the metadata service and disk injection. At least one of the guest setup mechanisms is required to be supported by drivers, in order to enable login access. cli= driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=complete driver-notes.libvirt-kvm-aarch64=Requires kernel with proper config (oldest known: Ubuntu 4.13 HWE) driver.libvirt-kvm-ppc64=missing driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=complete driver.vmware=complete driver.ironic=complete driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=missing driver.zvm=complete [guest.setup.inject.file] title=Inject files into disk image status=optional notes=This allows for the end user to provide data for multiple files to be injected into the root filesystem before an instance is booted. This requires that the compute node understand the format of the filesystem and any partitioning scheme it might use on the block device. This is a non-trivial problem considering the vast number of filesystems in existence. The problem of injecting files to a guest OS is better solved by obtaining via the metadata service or config drive. Therefore this operation is considered optional to support. cli= driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=unknown driver.libvirt-kvm-ppc64=missing driver.libvirt-kvm-s390x=missing driver.libvirt-qemu-x86=complete driver.libvirt-lxc=missing driver.vmware=missing driver.ironic=missing driver.libvirt-vz-vm=missing driver.libvirt-vz-ct=missing driver.zvm=missing [guest.setup.inject.networking] title=Inject guest networking config status=optional notes=This allows for static networking configuration (IP address, netmask, gateway and routes) to be injected directly into the root filesystem before an instance is booted. This requires that the compute node understand how networking is configured in the guest OS which is a non-trivial problem considering the vast number of operating system types. The problem of configuring networking is better solved by DHCP or by obtaining static config via config drive. Therefore this operation is considered optional to support. cli= driver.libvirt-kvm-x86=partial driver-notes.libvirt-kvm-x86=Only for Debian derived guests driver.libvirt-kvm-aarch64=unknown driver.libvirt-kvm-ppc64=missing driver.libvirt-kvm-s390x=missing driver.libvirt-qemu-x86=partial driver-notes.libvirt-qemu-x86=Only for Debian derived guests driver.libvirt-lxc=missing driver.vmware=partial driver-notes.vmware=requires vmware tools installed driver.ironic=missing driver.libvirt-vz-vm=missing driver.libvirt-vz-ct=missing driver.zvm=missing [console.rdp] title=Remote desktop over RDP status=choice(console) notes=This allows the administrator to interact with the graphical console of the guest OS via RDP. This provides a way to see boot up messages and login to the instance when networking configuration has failed, thus preventing a network based login. Some operating systems may prefer to emit messages via the serial console for easier consumption. Therefore support for this operation is not mandatory, however, a driver is required to support at least one of the listed console access operations. cli=nova get-rdp-console driver.libvirt-kvm-x86=missing driver.libvirt-kvm-aarch64=missing driver.libvirt-kvm-ppc64=missing driver.libvirt-kvm-s390x=missing driver.libvirt-qemu-x86=missing driver.libvirt-lxc=missing driver.vmware=missing driver.ironic=missing driver.libvirt-vz-vm=missing driver.libvirt-vz-ct=missing driver.zvm=missing [console.serial.log] title=View serial console logs status=choice(console) notes=This allows the administrator to query the logs of data emitted by the guest OS on its virtualized serial port. For UNIX guests this typically includes all boot up messages and so is useful for diagnosing problems when an instance fails to successfully boot. Not all guest operating systems will be able to emit boot information on a serial console, others may only support graphical consoles. Therefore support for this operation is not mandatory, however, a driver is required to support at least one of the listed console access operations. cli=nova console-log driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=missing driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=missing driver.vmware=complete driver.ironic=missing driver.libvirt-vz-vm=missing driver.libvirt-vz-ct=missing driver.zvm=complete [console.serial.interactive] title=Remote interactive serial console status=choice(console) notes=This allows the administrator to interact with the serial console of the guest OS. This provides a way to see boot up messages and login to the instance when networking configuration has failed, thus preventing a network based login. Not all guest operating systems will be able to emit boot information on a serial console, others may only support graphical consoles. Therefore support for this operation is not mandatory, however, a driver is required to support at least one of the listed console access operations. This feature was introduced in the Juno release with blueprint https://blueprints.launchpad.net/nova/+spec/serial-ports cli=nova get-serial-console driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=unknown driver.libvirt-kvm-ppc64=unknown driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=unknown driver.libvirt-lxc=unknown driver.vmware=missing driver.ironic=complete driver.libvirt-vz-vm=missing driver.libvirt-vz-ct=missing driver.zvm=missing [console.spice] title=Remote desktop over SPICE status=choice(console) notes=This allows the administrator to interact with the graphical console of the guest OS via SPICE. This provides a way to see boot up messages and login to the instance when networking configuration has failed, thus preventing a network based login. Some operating systems may prefer to emit messages via the serial console for easier consumption. Therefore support for this operation is not mandatory, however, a driver is required to support at least one of the listed console access operations. cli=nova get-spice-console driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=unknown driver.libvirt-kvm-ppc64=missing driver.libvirt-kvm-s390x=missing driver.libvirt-qemu-x86=complete driver.libvirt-lxc=missing driver.vmware=missing driver.ironic=missing driver.libvirt-vz-vm=missing driver.libvirt-vz-ct=missing driver.zvm=missing [console.vnc] title=Remote desktop over VNC status=choice(console) notes=This allows the administrator to interact with the graphical console of the guest OS via VNC. This provides a way to see boot up messages and login to the instance when networking configuration has failed, thus preventing a network based login. Some operating systems may prefer to emit messages via the serial console for easier consumption. Therefore support for this operation is not mandatory, however, a driver is required to support at least one of the listed console access operations. cli=nova get-vnc-console driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=missing driver.libvirt-kvm-s390x=missing driver.libvirt-qemu-x86=complete driver.libvirt-lxc=missing driver.vmware=complete driver.ironic=missing driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=complete driver.zvm=missing [storage.block] title=Block storage support status=optional notes=Block storage provides instances with direct attached virtual disks that can be used for persistent storage of data. As an alternative to direct attached disks, an instance may choose to use network based persistent storage. OpenStack provides object storage via the Swift service, or a traditional filesystem such as NFS may be used. Some types of instances may not require persistent storage at all, being simple transaction processing systems reading requests & sending results to and from the network. Therefore support for this configuration is not considered mandatory for drivers to support. cli= driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=complete driver.vmware=complete driver.ironic=complete driver.libvirt-vz-vm=partial driver.libvirt-vz-ct=missing driver.zvm=missing [storage.block.backend.fibrechannel] title=Block storage over fibre channel status=optional notes=To maximise performance of the block storage, it may be desirable to directly access fibre channel LUNs from the underlying storage technology on the compute hosts. Since this is just a performance optimization of the I/O path it is not considered mandatory to support. cli= driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=unknown driver.libvirt-kvm-ppc64=missing driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=complete driver.vmware=missing driver.ironic=missing driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=missing driver.zvm=missing [storage.block.backend.iscsi] title=Block storage over iSCSI status=condition(storage.block==complete) notes=If the driver wishes to support block storage, it is common to provide an iSCSI based backend to access the storage from cinder. This isolates the compute layer for knowledge of the specific storage technology used by Cinder, albeit at a potential performance cost due to the longer I/O path involved. If the driver chooses to support block storage, then this is considered mandatory to support, otherwise it is considered optional. cli= driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=unknown driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=complete driver.vmware=complete driver.ironic=complete driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=missing driver.zvm=missing [storage.block.backend.iscsi.auth.chap] title=CHAP authentication for iSCSI status=optional notes=If accessing the cinder iSCSI service over an untrusted LAN it is desirable to be able to enable authentication for the iSCSI protocol. CHAP is the commonly used authentication protocol for iSCSI. This is not considered mandatory to support. (?) cli= driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=unknown driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=complete driver.vmware=missing driver.ironic=complete driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=missing driver.zvm=missing [storage.image] title=Image storage support status=mandatory notes=This refers to the ability to boot an instance from an image stored in the glance image repository. Without this feature it would not be possible to bootstrap from a clean environment, since there would be no way to get block volumes populated and reliance on external PXE servers is out of scope. Therefore this is considered a mandatory storage feature to support. cli=nova boot --image driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=complete driver.vmware=complete driver.ironic=complete driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=complete driver.zvm=complete [operation.uefi-boot] title=uefi boot status=optional notes=This allows users to boot a guest with uefi firmware. cli= driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=missing driver.libvirt-kvm-s390x=missing driver.libvirt-qemu-x86=complete driver.libvirt-lxc=missing driver.vmware=complete driver.ironic=partial driver-notes.ironic=depends on hardware support driver.libvirt-vz-vm=missing driver.libvirt-vz-ct=missing driver.zvm=missing [operation.device-tags] title=Device tags status=optional notes=This allows users to set tags on virtual devices when creating a server instance. Device tags are used to identify virtual device metadata, as exposed in the metadata API and on the config drive. For example, a network interface tagged with "nic1" will appear in the metadata along with its bus (ex: PCI), bus address (ex: 0000:00:02.0), MAC address, and tag (nic1). If multiple networks are defined, the order in which they appear in the guest operating system will not necessarily reflect the order in which they are given in the server boot request. Guests should therefore not depend on device order to deduce any information about their network devices. Instead, device role tags should be used. Device tags can be applied to virtual network interfaces and block devices. cli=nova boot driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=unknown driver.vmware=missing driver.ironic=missing driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=unknown driver.zvm=missing [operation.quiesce] title=quiesce status=optional notes=Quiesce the specified instance to prepare for snapshots. For libvirt, guest filesystems will be frozen through qemu agent. cli= driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=unknown driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=missing driver.vmware=missing driver.ironic=missing driver.libvirt-vz-vm=missing driver.libvirt-vz-ct=missing driver.zvm=missing [operation.unquiesce] title=unquiesce status=optional notes=See notes for the quiesce operation cli= driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=unknown driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=missing driver.vmware=missing driver.ironic=missing driver.libvirt-vz-vm=missing driver.libvirt-vz-ct=missing driver.zvm=missing [operation.multiattach-volume] title=Attach block volume to multiple instances status=optional notes=The multiattach volume operation is an extension to the attach volume operation. It allows to attach a single volume to multiple instances. This operation is not considered to be mandatory to support. Note that for the libvirt driver, this is only supported if qemu<2.10 or libvirt>=3.10. cli=nova volume-attach driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=unknown driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=missing driver.vmware=missing driver.ironic=missing driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=missing driver.zvm=missing [operation.encrypted-volume] title=Attach encrypted block volume to server status=optional notes=This is the same as the attach volume operation except with an encrypted block device. Encrypted volumes are controlled via admin-configured volume types in the block storage service. Since attach volume is optional this feature is also optional for compute drivers to support. cli=nova volume-attach driver.libvirt-kvm-x86=complete driver-notes.libvirt-kvm-x86=For native QEMU decryption of the encrypted volume (and rbd support), QEMU>=2.6.0 and libvirt>=2.2.0 are required and only the "luks" type provider is supported. Otherwise both "luks" and "cryptsetup" types are supported but not natively, i.e. not all volume types are supported. driver.libvirt-kvm-aarch64=unknown driver.libvirt-kvm-ppc64=unknown driver.libvirt-kvm-s390x=unknown driver.libvirt-qemu-x86=complete driver-notes.libvirt-qemu-x86=The same restrictions apply as KVM x86. driver.libvirt-lxc=missing driver.vmware=missing driver.ironic=missing driver.libvirt-vz-vm=unknown driver.libvirt-vz-ct=missing driver.zvm=missing [operation.trusted-certs] title=Validate image with trusted certificates status=optional notes=Since trusted image certification validation is configurable by the cloud deployer it is considered optional. However, it is a virt-agnostic feature so there is no good reason that all virt drivers cannot support the feature since it is mostly just plumbing user requests through the virt driver when downloading images. cli=nova boot --trusted-image-certificate-id ... driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=complete driver.vmware=missing driver.ironic=missing driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=complete driver.zvm=missing [operation.file-backed-memory] title=File backed memory status=optional notes=The file backed memory feature in Openstack allows a Nova node to serve guest memory from a file backing store. This mechanism uses the libvirt file memory source, causing guest instance memory to be allocated as files within the libvirt memory backing directory. This is only supported if qemu>2.6 and libvirt>4.0.0 cli= driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=unknown driver.libvirt-kvm-ppc64=unknown driver.libvirt-kvm-s390x=unknown driver.libvirt-qemu-x86=complete driver.libvirt-lxc=missing driver.vmware=missing driver.ironic=missing driver.libvirt-vz-vm=missing driver.libvirt-vz-ct=missing driver.zvm=missing [operation.report-cpu-traits] title=Report CPU traits status=optional notes=The report CPU traits feature in OpenStack allows a Nova node to report its CPU traits according to CPU mode configuration. This gives users the ability to boot instances based on desired CPU traits. cli= driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=unknown driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=missing driver.libvirt-qemu-x86=complete driver.libvirt-lxc=missing driver.vmware=missing driver.ironic=missing driver.libvirt-vz-vm=missing driver.libvirt-vz-ct=missing driver.zvm=missing [operation.port-with-resource-request] title=SR-IOV ports with resource request status=optional notes=To support neutron SR-IOV ports (vnic_type=direct or vnic_type=macvtap) with resource request the virt driver needs to include the 'parent_ifname' key in each subdict which represents a VF under the 'pci_passthrough_devices' key in the dict returned from the ComputeDriver.get_available_resource() call. cli=nova boot --nic port-id ... driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=missing driver.libvirt-kvm-ppc64=missing driver.libvirt-kvm-s390x=missing driver.libvirt-qemu-x86=complete driver.libvirt-lxc=missing driver.vmware=missing driver.ironic=missing driver.libvirt-vz-vm=missing driver.libvirt-vz-ct=missing driver.zvm=missing [operation.boot-encrypted-vm] title=Boot instance with secure encrypted memory status=optional notes=The feature allows VMs to be booted with their memory hardware-encrypted with a key specific to the VM, to help protect the data residing in the VM against access from anyone other than the user of the VM. The Configuration and Security Guides specify usage of this feature. cli=openstack server create driver.libvirt-kvm-x86=partial driver-notes.libvirt-kvm-x86=This feature is currently only available with hosts which support the SEV (Secure Encrypted Virtualization) technology from AMD. driver.libvirt-kvm-aarch64=missing driver.libvirt-kvm-ppc64=missing driver.libvirt-kvm-s390x=missing driver.libvirt-qemu-x86=missing driver.libvirt-lxc=missing driver.vmware=missing driver.ironic=missing driver.libvirt-vz-vm=missing driver.libvirt-vz-ct=missing driver.zvm=missing [operation.cache-images] title=Cache base images for faster instance boot status=optional notes=Drivers supporting this feature cache base images on the compute host so that subsequent boots need not incur the expense of downloading them. Partial support entails caching an image after the first boot that uses it. Complete support allows priming the cache so that the first boot also benefits. Image caching support is tunable via config options in the [image_cache] group. cli=openstack server create driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=unknown driver.vmware=partial driver.ironic=missing driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=complete driver.zvm=missing [operation.boot-emulated-tpm] title=Boot instance with an emulated trusted platform module (TPM) status=optional notes=Allows VMs to be booted with an emulated trusted platform module (TPM) device. Only lifecycle operations performed by the VM owner are supported, as the user's credentials are required to unlock the virtual device files on the host. cli=openstack server create driver.libvirt-kvm-x86=partial driver-notes.libvirt-kvm-x86=Move operations are not yet supported. driver.libvirt-kvm-aarch64=missing driver.libvirt-kvm-ppc64=missing driver.libvirt-kvm-s390x=missing driver.libvirt-qemu-x86=partial driver-notes.libvirt-qemu-x86=Move operations are not yet supported. driver.libvirt-lxc=missing driver.vmware=missing driver.ironic=missing driver.libvirt-vz-vm=missing driver.libvirt-vz-ct=missing driver.zvm=missing [operation.boot-stateless-firmware] title=Boot instance with stateless firmware status=optional notes=The feature allows VMs to be booted with read-only firmware image without NVRAM file. This feature is especially useful for confidential computing use case because it allows more complete measurement of elements involved in the boot chain and disables the potential attack serface from hypervisors. cli=openstack server create driver.libvirt-kvm-x86=partial driver-notes.libvirt-kvm-x86=This feature is supported only with UEFI firmware driver.libvirt-kvm-aarch64=missing driver.libvirt-kvm-ppc64=missing driver.libvirt-kvm-s390x=missing driver.libvirt-qemu-x86=missing driver.libvirt-lxc=missing driver.vmware=missing driver.ironic=missing driver.libvirt-vz-vm=missing driver.libvirt-vz-ct=missing driver.zvm=missing ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/user/support-matrix.rst0000664000175000017500000000326100000000000021357 0ustar00zuulzuul00000000000000Feature Support Matrix ====================== When considering which capabilities should be marked as mandatory the following general guiding principles were applied * **Inclusivity** - people have shown ability to make effective use of a wide range of virtualization technologies with broadly varying feature sets. Aiming to keep the requirements as inclusive as possible, avoids second-guessing what a user may wish to use the cloud compute service for. * **Bootstrapping** - a practical use case test is to consider that starting point for the compute deploy is an empty data center with new machines and network connectivity. The look at what are the minimum features required of a compute service, in order to get user instances running and processing work over the network. * **Competition** - an early leader in the cloud compute service space was Amazon EC2. A sanity check for whether a feature should be mandatory is to consider whether it was available in the first public release of EC2. This had quite a narrow feature set, but none the less found very high usage in many use cases. So it serves to illustrate that many features need not be considered mandatory in order to get useful work done. * **Reality** - there are many virt drivers currently shipped with Nova, each with their own supported feature set. Any feature which is missing in at least one virt driver that is already in-tree, must by inference be considered optional until all in-tree drivers support it. This does not rule out the possibility of a currently optional feature becoming mandatory at a later date, based on other principles above. .. support_matrix:: support-matrix.ini ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/user/unified-limits.rst0000664000175000017500000001543000000000000021264 0ustar00zuulzuul00000000000000===================== Unified Limits Quotas ===================== Since the **Nova 28.0.0 (2023.2 Bobcat)** release, it is recommended to use `Keystone unified limits`_ for Nova quota limits. For information about legacy quota limits, see the :doc:`legacy quota documentation `. Nova uses a quota system for setting limits on resources such as number of instances or amount of CPU that a specific project or user can use. Quota limits are set by admin and retrieved for enforcement using the `Keystone unified limits API`_. .. _Keystone unified limits: https://docs.openstack.org/keystone/latest/admin/unified-limits.html .. _Keystone unified limits API: https://docs.openstack.org/api-ref/identity/v3/index.html#unified-limits Types of quota -------------- Unified limit resource names for resources that are tracked as `resource classes`_ in the Placement API service follow the naming pattern of the ``class:`` prefix followed by the name of the resource class. For example: class:VCPU, class:PCPU, class:MEMORY_MB, class:DISK_GB, class:VGPU. .. list-table:: :header-rows: 1 :widths: 10 40 * - Quota name - Description * - class:VCPU - Number of shared CPU cores (VCPUs) allowed per project * - class:PCPU - Number of dedicated CPU cores (PCPUs) allowed per project * - servers - Number of instances allowed per project * - server_key_pairs - Number of key pairs allowed per user * - server_metadata_items - Number of metadata items allowed per instance * - class:MEMORY_MB - Megabytes of instance ram allowed per project * - server_groups - Number of server groups per project * - server_group_members - Number of servers per server group * - class:DISK_GB - Gigabytes of instance disk allowed per project * - class:$RESOURCE_CLASS - Any resource class in the Placement API service can have a quota limit specified for it (example: class:VGPU) .. _resource classes: https://docs.openstack.org/os-resource-classes/latest OpenStack CLI commands ---------------------- For full OpenStackClient documentation, see https://docs.openstack.org/python-openstackclient/latest/index.html. To list default limits for Nova: .. code-block:: console openstack registered limit list --service nova For example: .. code-block:: console $ openstack registered limit list --service nova +----------------------------------+----------------------------------+------------------------------------+---------------+-------------+-----------+ | ID | Service ID | Resource Name | Default Limit | Description | Region ID | +----------------------------------+----------------------------------+------------------------------------+---------------+-------------+-----------+ | be6dfeebb7c340e8b93b602d41fbff9b | 8b22bf8a66fa4524a522b2a21865bbf2 | servers | 10 | None | None | | 8a658096236549788e61f4fcbd5a4a12 | 8b22bf8a66fa4524a522b2a21865bbf2 | class:VCPU | 20 | None | None | | 63890db7d6a14401ba55e7f7022b95d0 | 8b22bf8a66fa4524a522b2a21865bbf2 | class:MEMORY_MB | 51200 | None | None | | 221ba1c19d2c4272952663828d659013 | 8b22bf8a66fa4524a522b2a21865bbf2 | server_metadata_items | 128 | None | None | | a32a9080be6b4a5481c16a91fe329e6f | 8b22bf8a66fa4524a522b2a21865bbf2 | server_key_pairs | 100 | None | None | | 86408bb7a0e542b18404ec7d348da820 | 8b22bf8a66fa4524a522b2a21865bbf2 | server_groups | 10 | None | None | | 17c4552c5aad4afca4813f37530fc897 | 8b22bf8a66fa4524a522b2a21865bbf2 | server_group_members | 10 | None | None | +----------------------------------+----------------------------------+------------------------------------+---------------+-------------+-----------+ To show details about a default limit: .. code-block:: console openstack registered limit show $REGISTERED_LIMIT_ID For example: .. code-block:: console $ openstack registered limit show 8a658096236549788e61f4fcbd5a4a12 +---------------+----------------------------------+ | Field | Value | +---------------+----------------------------------+ | default_limit | 20 | | description | None | | id | 8a658096236549788e61f4fcbd5a4a12 | | region_id | None | | resource_name | class:VCPU | | service_id | 8b22bf8a66fa4524a522b2a21865bbf2 | +---------------+----------------------------------+ To list project limits for Nova: .. code-block:: console openstack limit list --service nova For example: .. code-block:: console $ openstack limit list --service nova +----------------------------------+----------------------------------+----------------------------------+---------------+----------------+-------------+-----------+ | ID | Project ID | Service ID | Resource Name | Resource Limit | Description | Region ID | +----------------------------------+----------------------------------+----------------------------------+---------------+----------------+-------------+-----------+ | 8b3364b2241e4090aaaa49355c7a5b56 | 5cd3281595a9497ba87209701cd9f3f2 | 8b22bf8a66fa4524a522b2a21865bbf2 | class:VCPU | 5 | None | None | +----------------------------------+----------------------------------+----------------------------------+---------------+----------------+-------------+-----------+ To list limits for a particular project: .. code-block:: console openstack limit list --service nova --project $PROJECT_ID To show details about a project limit: .. code-block:: console openstack limit show $LIMIT_ID For example: .. code-block:: console $ openstack limit show 8b3364b2241e4090aaaa49355c7a5b56 +----------------+----------------------------------+ | Field | Value | +----------------+----------------------------------+ | description | None | | domain_id | None | | id | 8b3364b2241e4090aaaa49355c7a5b56 | | project_id | 5cd3281595a9497ba87209701cd9f3f2 | | region_id | None | | resource_limit | 5 | | resource_name | class:VCPU | | service_id | 8b22bf8a66fa4524a522b2a21865bbf2 | +----------------+----------------------------------+ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/user/wsgi.rst0000664000175000017500000000363100000000000017313 0ustar00zuulzuul00000000000000Using WSGI with Nova ==================== Since the version 2025.2 the only way to run the compute API and metadata API is using a generic HTTP server that supports WSGI_ (such as Apache_ or nginx_). The nova project provides two automatically generated entry points that support this: ``nova-api-wsgi`` and ``nova-metadata-wsgi``. These read ``nova.conf`` and ``api-paste.ini`` by default and generate the required module-level ``application`` that most WSGI servers require. If nova is installed using pip, these two scripts will be installed into whatever the expected ``bin`` directory is for the environment. The config files and config directory can be overridden via the ``OS_NOVA_CONFIG_FILES`` and ``OS_NOVA_CONFIG_DIR`` environment variables. File paths listed in ``OS_NOVA_CONFIG_FILES`` are relative to ``OS_NOVA_CONFIG_DIR`` and delimited by ``;``. The new scripts replace older experimental scripts that could be found in the ``nova/wsgi`` directory of the code repository. The new scripts are *not* experimental. When running the compute and metadata services with WSGI, sharing the compute and metadata service in the same process is not supported (as it is in the eventlet-based scripts). In devstack as of May 2017, the compute and metadata APIs are hosted by a Apache communicating with uwsgi_ via mod_proxy_uwsgi_. Inspecting the configuration created there can provide some guidance on one option for managing the WSGI scripts. It is important to remember, however, that one of the major features of using WSGI is that there are many different ways to host a WSGI application. Different servers make different choices about performance and configurability. .. _WSGI: https://www.python.org/dev/peps/pep-3333/ .. _apache: http://httpd.apache.org/ .. _nginx: http://nginx.org/en/ .. _uwsgi: https://uwsgi-docs.readthedocs.io/ .. _mod_proxy_uwsgi: http://uwsgi-docs.readthedocs.io/en/latest/Apache.html#mod-proxy-uwsgi ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.273608 nova-32.0.0/doc/test/0000775000175000017500000000000000000000000014306 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/test/redirect-tests.txt0000664000175000017500000001515700000000000020021 0ustar00zuulzuul00000000000000/nova/latest/addmethod.openstackapi.html 301 /nova/latest/contributor/api.html /nova/latest/admin/arch.html 301 /nova/latest/admin/architecture.html /nova/latest/admin/flavors2.html 301 /nova/latest/admin/flavors.html /nova/latest/admin/quotas2.html 301 /nova/latest/admin/quotas.html /nova/latest/admin/numa.html 301 /nova/latest/admin/cpu-topologies.html /nova/latest/aggregates.html 301 /nova/latest/user/aggregates.html /nova/latest/api_microversion_dev.html 301 /nova/latest/contributor/microversions.html /nova/latest/api_microversion_history.html 301 /nova/latest/reference/api-microversion-history.html /nova/latest/api_plugins.html 301 /nova/latest/contributor/api.html /nova/latest/architecture.html 301 /nova/latest/admin/architecture.html /nova/latest/block_device_mapping.html 301 /nova/latest/user/block-device-mapping.html /nova/latest/blueprints.html 301 /nova/latest/contributor/blueprints.html /nova/latest/cells.html 301 /nova/latest/admin/cells.html /nova/latest/code-review.html 301 /nova/latest/contributor/code-review.html /nova/latest/conductor.html 301 /nova/latest/user/conductor.html /nova/latest/development.environment.html 301 /nova/latest/contributor/development-environment.html /nova/latest/devref/api.html 301 /nova/latest/contributor/api.html /nova/latest/devref/cells.html 301 /nova/latest/admin/cells.html /nova/latest/devref/filter_scheduler.html 301 /nova/latest/admin/scheduling.html # catch all, if we hit something in devref assume it moved to # reference unless we have already triggered a hit above. /nova/latest/devref/any-page.html 301 /nova/latest/reference/any-page.html /nova/latest/feature_classification.html 301 /nova/latest/user/feature-classification.html /nova/latest/filter_scheduler.html 301 /nova/latest/admin/scheduling.html /nova/latest/gmr.html 301 /nova/latest/reference/gmr.html /nova/latest/how_to_get_involved.html 301 /nova/latest/contributor/how-to-get-involved.html /nova/latest/i18n.html 301 /nova/latest/reference/i18n.html /nova/latest/man/index.html 301 /nova/latest/cli/index.html /nova/latest/man/nova-api-metadata.html 301 /nova/latest/cli/nova-api-metadata.html /nova/latest/man/nova-api-os-compute.html 301 /nova/latest/cli/nova-api-os-compute.html /nova/latest/man/nova-api.html 301 /nova/latest/cli/nova-api.html # this is gone and never coming back, indicate that to the end users /nova/latest/man/nova-cells.html 301 /nova/latest/cli/nova-cells.html /nova/latest/man/nova-compute.html 301 /nova/latest/cli/nova-compute.html /nova/latest/man/nova-conductor.html 301 /nova/latest/cli/nova-conductor.html /nova/latest/man/nova-dhcpbridge.html 301 /nova/latest/cli/nova-dhcpbridge.html /nova/latest/man/nova-manage.html 301 /nova/latest/cli/nova-manage.html /nova/latest/man/nova-network.html 301 /nova/latest/cli/nova-network.html /nova/latest/man/nova-novncproxy.html 301 /nova/latest/cli/nova-novncproxy.html /nova/latest/man/nova-rootwrap.html 301 /nova/latest/cli/nova-rootwrap.html /nova/latest/man/nova-scheduler.html 301 /nova/latest/cli/nova-scheduler.html /nova/latest/man/nova-serialproxy.html 301 /nova/latest/cli/nova-serialproxy.html /nova/latest/man/nova-spicehtml5proxy.html 301 /nova/latest/cli/nova-spicehtml5proxy.html /nova/latest/man/nova-status.html 301 /nova/latest/cli/nova-status.html /nova/latest/notifications.html 301 /nova/latest/reference/notifications.html /nova/latest/placement.html 301 /nova/latest/user/placement.html /nova/latest/placement_dev.html 301 /nova/latest/contributor/placement.html /nova/latest/policies.html 301 /nova/latest/contributor/policies.html /nova/latest/policy_enforcement.html 301 /nova/latest/reference/policy-enforcement.html /nova/latest/process.html 301 /nova/latest/contributor/process.html /nova/latest/project_scope.html 301 /nova/latest/contributor/project-scope.html /nova/latest/quotas.html 301 /nova/latest/user/quotas.html /nova/latest/releasenotes.html 301 /nova/latest/contributor/releasenotes.html /nova/latest/rpc.html 301 /nova/latest/reference/rpc.html /nova/latest/sample_config.html 301 /nova/latest/configuration/sample-config.html /nova/latest/sample_policy.html 301 /nova/latest/configuration/sample-policy.html /nova/latest/scheduler_evolution.html 301 /nova/latest/reference/scheduler-evolution.html /nova/latest/services.html 301 /nova/latest/reference/services.html /nova/latest/stable_api.html 301 /nova/latest/reference/stable-api.html /nova/latest/support-matrix.html 301 /nova/latest/user/support-matrix.html /nova/latest/test_strategy.html 301 /nova/latest/contributor/testing.html /nova/latest/testing/libvirt-numa.html 301 /nova/latest/contributor/testing/libvirt-numa.html /nova/latest/testing/serial-console.html 301 /nova/latest/contributor/testing/serial-console.html /nova/latest/testing/zero-downtime-upgrade.html 301 /nova/latest/contributor/testing/zero-downtime-upgrade.html /nova/latest/threading.html 301 /nova/latest/reference/threading.html /nova/latest/upgrade.html 301 /nova/latest/admin/upgrades.html /nova/latest/user/aggregates.html 301 /nova/latest/admin/aggregates.html /nova/latest/user/architecture.html 301 /nova/latest/admin/architecture.html /nova/latest/user/cells.html 301 /nova/latest/admin/cells.html /nova/latest/user/cellsv2_layout.html 301 /nova/latest/admin/cells.html /nova/latest/user/cellsv2-layout.html 301 /nova/latest/admin/cells.html /nova/latest/user/config-drive.html 301 /nova/latest/user/metadata.html /nova/latest/user/filter-scheduler.html 301 /nova/latest/admin/scheduling.html /nova/latest/user/metadata-service.html 301 /nova/latest/user/metadata.html /nova/latest/user/placement.html 301 /placement/latest/ /nova/latest/user/user-data.html 301 /nova/latest/user/metadata.html /nova/latest/user/upgrade.html 301 /nova/latest/admin/upgrades.html /nova/latest/user/vendordata.html 301 /nova/latest/user/metadata.html /nova/latest/vendordata.html 301 /nova/latest/user/metadata.html /nova/latest/vmstates.html 301 /nova/latest/reference/vm-states.html /nova/latest/wsgi.html 301 /nova/latest/user/wsgi.html /nova/latest/admin/arch.html 301 /nova/latest/admin/architecture.html /nova/latest/admin/adv-config.html 301 /nova/latest/admin/index.html /nova/latest/admin/configuration/schedulers.html 301 /nova/latest/admin/scheduling.html /nova/latest/admin/system-admin.html 301 /nova/latest/admin/index.html /nova/latest/admin/port_with_resource_request.html 301 /nova/latest/admin/ports-with-resource-requests.html /nova/latest/admin/manage-users.html 301 /nova/latest/admin/architecture.html /nova/latest/admin/mitigation-for-Intel-MDS-security-flaws.html 301 /nova/latest/admin/cpu-models.html /nova/latest/contributor/api-2.html 301 /nova/latest/contributor/api.html /nova/latest/admin/security-groups.html 301 /nova/latest/user/security-groups.html ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315688.881605 nova-32.0.0/etc/0000775000175000017500000000000000000000000013335 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.277608 nova-32.0.0/etc/nova/0000775000175000017500000000000000000000000014300 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/etc/nova/README-nova.conf.txt0000664000175000017500000000041000000000000017656 0ustar00zuulzuul00000000000000To generate the sample nova.conf file, run the following command from the top level of the nova directory: tox -egenconfig For a pre-generated example of the latest nova.conf, see: https://docs.openstack.org/nova/latest/configuration/sample-config.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/etc/nova/README-policy.yaml.txt0000664000175000017500000000044100000000000020233 0ustar00zuulzuul00000000000000Nova ==== To generate the sample nova policy.yaml file, run the following command from the top level of the nova directory: tox -egenpolicy For a pre-generated example of the latest nova policy.yaml, see: https://docs.openstack.org/nova/latest/configuration/sample-policy.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/etc/nova/api-paste.ini0000664000175000017500000000573500000000000016676 0ustar00zuulzuul00000000000000############ # Metadata # ############ [composite:metadata] use = egg:Paste#urlmap /: meta [pipeline:meta] pipeline = cors http_proxy_to_wsgi metaapp [app:metaapp] paste.app_factory = nova.api.metadata.handler:MetadataRequestHandler.factory ############# # OpenStack # ############# [composite:osapi_compute] use = call:nova.api.openstack.urlmap:urlmap_factory /: oscomputeversions /v2: oscomputeversion_legacy_v2 /v2.1: oscomputeversion_v2 # v21 is an exactly feature match for v2, except it has more stringent # input validation on the wsgi surface (prevents fuzzing early on the # API). It also provides new features via API microversions which are # opt into for clients. Unaware clients will receive the same frozen # v2 API feature set, but with some relaxed validation /v2/+: openstack_compute_api_v21_legacy_v2_compatible /v2.1/+: openstack_compute_api_v21 [composite:openstack_compute_api_v21] use = call:nova.api.auth:pipeline_factory_v21 keystone = cors http_proxy_to_wsgi compute_req_id faultwrap request_log sizelimit osprofiler authtoken keystonecontext osapi_compute_app_v21 [composite:openstack_compute_api_v21_legacy_v2_compatible] use = call:nova.api.auth:pipeline_factory_v21 keystone = cors http_proxy_to_wsgi compute_req_id faultwrap request_log sizelimit osprofiler authtoken keystonecontext legacy_v2_compatible osapi_compute_app_v21 [filter:request_log] paste.filter_factory = nova.api.openstack.requestlog:RequestLog.factory [filter:compute_req_id] paste.filter_factory = nova.api.compute_req_id:ComputeReqIdMiddleware.factory [filter:faultwrap] paste.filter_factory = nova.api.openstack:FaultWrapper.factory [filter:osprofiler] paste.filter_factory = nova.profiler:WsgiMiddleware.factory [filter:sizelimit] paste.filter_factory = oslo_middleware:RequestBodySizeLimiter.factory [filter:http_proxy_to_wsgi] paste.filter_factory = oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory [filter:legacy_v2_compatible] paste.filter_factory = nova.api.openstack:LegacyV2CompatibleWrapper.factory [app:osapi_compute_app_v21] paste.app_factory = nova.api.openstack.compute:APIRouterV21.factory [pipeline:oscomputeversions] pipeline = cors faultwrap request_log http_proxy_to_wsgi oscomputeversionapp [pipeline:oscomputeversion_v2] pipeline = cors compute_req_id faultwrap request_log http_proxy_to_wsgi oscomputeversionapp_v2 [pipeline:oscomputeversion_legacy_v2] pipeline = cors compute_req_id faultwrap request_log http_proxy_to_wsgi legacy_v2_compatible oscomputeversionapp_v2 [app:oscomputeversionapp] paste.app_factory = nova.api.openstack.compute.versions:Versions.factory [app:oscomputeversionapp_v2] paste.app_factory = nova.api.openstack.compute.versions:VersionsV2.factory ########## # Shared # ########## [filter:cors] paste.filter_factory = oslo_middleware.cors:filter_factory oslo_config_project = nova [filter:keystonecontext] paste.filter_factory = nova.api.auth:NovaKeystoneContext.factory [filter:authtoken] paste.filter_factory = keystonemiddleware.auth_token:filter_factory ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/etc/nova/logging_sample.conf0000664000175000017500000000305100000000000020135 0ustar00zuulzuul00000000000000[loggers] keys = root, nova [handlers] keys = stderr, stdout, watchedfile, syslog, null [formatters] keys = context, default [logger_root] level = WARNING handlers = null [logger_nova] level = INFO handlers = stderr qualname = nova [logger_amqp] level = WARNING handlers = stderr qualname = amqp [logger_amqplib] level = WARNING handlers = stderr qualname = amqplib [logger_sqlalchemy] level = WARNING handlers = stderr qualname = sqlalchemy # "level = INFO" logs SQL queries. # "level = DEBUG" logs SQL queries and results. # "level = WARNING" logs neither. (Recommended for production systems.) [logger_boto] level = WARNING handlers = stderr qualname = boto # NOTE(mikal): suds is used by the vmware driver, removing this will # cause many extraneous log lines for their tempest runs. Refer to # https://review.opendev.org/#/c/219225/ for details. [logger_suds] level = INFO handlers = stderr qualname = suds [logger_eventletwsgi] level = WARNING handlers = stderr qualname = eventlet.wsgi.server [handler_stderr] class = StreamHandler args = (sys.stderr,) formatter = context [handler_stdout] class = StreamHandler args = (sys.stdout,) formatter = context [handler_watchedfile] class = handlers.WatchedFileHandler args = ('nova.log',) formatter = context [handler_syslog] class = handlers.SysLogHandler args = ('/dev/log', handlers.SysLogHandler.LOG_USER) formatter = context [handler_null] class = logging.NullHandler formatter = default args = () [formatter_context] class = oslo_log.formatters.ContextFormatter [formatter_default] format = %(message)s ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/etc/nova/nova-config-generator.conf0000664000175000017500000000112100000000000021334 0ustar00zuulzuul00000000000000[DEFAULT] output_file = etc/nova/nova.conf.sample wrap_width = 80 summarize = true namespace = nova.conf # we intentionally exclude oslo.db since we manage these options ourselves namespace = oslo.limit namespace = oslo.log namespace = oslo.messaging namespace = oslo.policy namespace = oslo.privsep namespace = oslo.service.periodic_task namespace = oslo.service.service namespace = oslo.middleware namespace = oslo.concurrency namespace = oslo.reports namespace = oslo.versionedobjects namespace = keystonemiddleware.auth_token namespace = osprofiler namespace = os_vif namespace = os_brick ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/etc/nova/nova-policy-generator.conf0000664000175000017500000000010500000000000021367 0ustar00zuulzuul00000000000000[DEFAULT] output_file = etc/nova/policy.yaml.sample namespace = nova ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/etc/nova/release.sample0000664000175000017500000000011100000000000017114 0ustar00zuulzuul00000000000000[Nova] vendor = Fedora Project product = OpenStack Nova package = 1.fc18 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/etc/nova/rootwrap.conf0000664000175000017500000000216200000000000017025 0ustar00zuulzuul00000000000000# Configuration for nova-rootwrap # This file should be owned by (and only-writeable by) the root user [DEFAULT] # List of directories to load filter definitions from (separated by ','). # These directories MUST all be only writeable by root ! filters_path=/etc/nova/rootwrap.d,/usr/share/nova/rootwrap # List of directories to search executables in, in case filters do not # explicitly specify a full path (separated by ',') # If not specified, defaults to system PATH environment variable. # These directories MUST all be only writeable by root ! exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/sbin,/usr/local/bin # Enable logging to syslog # Default value is False use_syslog=False # Which syslog facility to use. # Valid values include auth, authpriv, syslog, local0, local1... # Default value is 'syslog' syslog_log_facility=syslog # Which messages to log. # INFO means log all usage # ERROR means only log unsuccessful attempts syslog_log_level=ERROR # Rootwrap daemon exits after this seconds of inactivity daemon_timeout=600 # Rootwrap daemon limits itself to that many file descriptors (Linux only) rlimit_nofile=1024 ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.277608 nova-32.0.0/etc/nova/rootwrap.d/0000775000175000017500000000000000000000000016377 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/etc/nova/rootwrap.d/compute.filters0000664000175000017500000000114400000000000021445 0ustar00zuulzuul00000000000000# nova-rootwrap command filters for compute nodes # This file should be owned by (and only-writeable by) the root user [Filters] # os_brick.privileged.default oslo.privsep context privsep-rootwrap-os_brick: RegExpFilter, privsep-helper, root, privsep-helper, --config-file, /etc/(?!\.\.).*, --privsep_context, os_brick.privileged.default, --privsep_sock_path, /tmp/.* # nova.privsep.sys_admin_pctxt oslo.privsep context privsep-rootwrap-sys_admin: RegExpFilter, privsep-helper, root, privsep-helper, --config-file, /etc/(?!\.\.).*, --privsep_context, nova.privsep.sys_admin_pctxt, --privsep_sock_path, /tmp/.* ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.277608 nova-32.0.0/gate/0000775000175000017500000000000000000000000013502 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/gate/README0000664000175000017500000000043600000000000014365 0ustar00zuulzuul00000000000000These are hooks to be used by the OpenStack infra test system. These scripts may be called by certain jobs at important times to do extra testing, setup, etc. They are really only relevant within the scope of the OpenStack infra system and are not expected to be useful to anyone else. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/gate/post_test_hook.sh0000775000175000017500000003246300000000000017115 0ustar00zuulzuul00000000000000#!/bin/bash -x MANAGE="/usr/local/bin/nova-manage" function archive_deleted_rows { # NOTE(danms): Run this a few times to make sure that we end # up with nothing more to archive if ! $MANAGE db archive_deleted_rows --verbose --before "$(date -d yesterday)" 2>&1 | grep 'Nothing was archived'; then echo "Archiving yesterday data should have done nothing" return 1 fi for i in `seq 30`; do if [[ $i -eq 1 ]]; then # This is just a test wrinkle to make sure we're covering the # non-all-cells (cell0) case, as we're not passing in the cell1 # config. $MANAGE db archive_deleted_rows --verbose --max_rows 50 --before "$(date -d tomorrow)" else $MANAGE db archive_deleted_rows --verbose --max_rows 1000 --before "$(date -d tomorrow)" --all-cells fi RET=$? if [[ $RET -gt 1 ]]; then echo Archiving failed with result $RET return $RET # When i = 1, we only archive cell0 (without --all-cells), so run at # least twice to ensure --all-cells are archived before considering # archiving complete. elif [[ $RET -eq 0 && $i -gt 1 ]]; then echo Archiving Complete break; fi done } function purge_db { $MANAGE db purge --all --verbose --all-cells RET=$? if [[ $RET -eq 0 ]]; then echo Purge successful else echo Purge failed with result $RET return $RET fi } BASE=${BASE:-/opt/stack} source ${BASE}/devstack/functions-common source ${BASE}/devstack/lib/nova # This needs to go before 'set -e' because otherwise the intermediate runs of # 'nova-manage db archive_deleted_rows' returning 1 (normal and expected) would # cause this script to exit and fail. archive_deleted_rows set -e # This needs to go after 'set -e' because otherwise a failure to purge the # database would not cause this script to exit and fail. purge_db # We need to get the admin credentials to run the OSC CLIs for Placement. export OS_CLOUD=devstack-admin # Verify whether instances were archived from all cells. Admin credentials are # needed to list deleted instances across all projects. echo "Verifying that instances were archived from all cells" deleted_servers=$(openstack server list --deleted --all-projects -c ID -f value) # Fail if any deleted servers were found. if [[ -n "$deleted_servers" ]]; then echo "There were unarchived instances found after archiving; failing." exit 1 fi # TODO(mriedem): Consider checking for instances in ERROR state because # if there are any, we would expect them to retain allocations in Placement # and therefore we don't really need to check for leaked allocations. # Check for orphaned instance allocations in Placement which could mean # something failed during a test run and isn't getting cleaned up properly. echo "Looking for leaked resource provider allocations in Placement" LEAKED_ALLOCATIONS=0 for provider in $(openstack resource provider list -c uuid -f value); do echo "Looking for allocations for provider $provider" allocations=$(openstack resource provider show --allocations $provider \ -c allocations -f value) if [[ "$allocations" != "{}" ]]; then echo "Resource provider has allocations:" openstack resource provider show --allocations $provider LEAKED_ALLOCATIONS=1 fi done # Fail if there were any leaked allocations. if [[ $LEAKED_ALLOCATIONS -eq 1 ]]; then echo "There were leaked allocations; failing." exit 1 fi echo "Resource provider allocations were cleaned up properly." # Test "nova-manage placement heal_allocations" by creating a server, deleting # its allocations in placement, and then running heal_allocations and assert # the allocations were healed as expected. function get_binding_profile_value { # Returns the value of the key in the binding profile if exists or return # empty. local port=${1} local key=${2} local print_value='import sys, json; print(json.load(sys.stdin).get("binding_profile", {}).get("'${key}'", ""))' openstack port show ${port} -f json -c binding_profile \ | /usr/bin/env python3 -c "${print_value}" } echo "Creating port with bandwidth request for heal_allocations testing" openstack network create net0 \ --provider-network-type vlan \ --provider-physical-network public \ --provider-segment 100 openstack subnet create subnet0 \ --network net0 \ --subnet-range 10.0.4.0/24 \ openstack network qos policy create qp0 openstack network qos rule create qp0 \ --type minimum-bandwidth \ --min-kbps 1000 \ --egress openstack network qos rule create qp0 \ --type minimum-bandwidth \ --min-kbps 1000 \ --ingress openstack port create port-normal-qos \ --network net0 \ --vnic-type normal \ --qos-policy qp0 image_id=$(openstack image list -f value -c ID | awk 'NR==1{print $1}') flavor_id=$(openstack flavor list -f value -c ID | awk 'NR==1{print $1}') network_id=$(openstack network list --no-share -f value -c ID | awk 'NR==1{print $1}') echo "Creating server for heal_allocations testing" # microversion 2.72 introduced the support for bandwidth aware ports openstack --os-compute-api-version 2.72 \ server create --image ${image_id} --flavor ${flavor_id} \ --nic net-id=${network_id} --nic port-id=port-normal-qos \ --wait heal-allocations-test server_id=$(openstack server show heal-allocations-test -f value -c id) # Make sure there are allocations for the consumer. allocations=$(openstack resource provider allocation show ${server_id} \ -c resources -f value) if [[ "$allocations" == "" ]]; then echo "No allocations found for the server." exit 2 fi echo "Deleting allocations in placement for the server" openstack resource provider allocation delete ${server_id} # Make sure the allocations are gone. allocations=$(openstack resource provider allocation show ${server_id} \ -c resources -f value) if [[ "$allocations" != "" ]]; then echo "Server allocations were not deleted." exit 2 fi echo "Healing allocations" # First test with the --dry-run over all instances in all cells. set +e nova-manage placement heal_allocations --verbose --dry-run rc=$? set -e # Since we did not create allocations because of --dry-run the rc should be 4. if [[ ${rc} -ne 4 ]]; then echo "Expected return code 4 from heal_allocations with --dry-run" exit 2 fi # Now test with just the single instance and actually perform the heal. nova-manage placement heal_allocations --verbose --instance ${server_id} # Make sure there are allocations for the consumer. allocations=$(openstack resource provider allocation show ${server_id} \ -c resources -f value) if [[ "$allocations" == "" ]]; then echo "Failed to heal allocations." exit 2 fi echo "Verifying online_data_migrations idempotence" # We will reuse the server created earlier for this test. (A server needs to # be present during the run of online_data_migrations and archiving). # Run the online data migrations before archiving. $MANAGE db online_data_migrations # We need to archive the deleted marker instance used by the # fill_virtual_interface_list online data migration in order to trigger # creation of a new deleted marker instance. set +e archive_deleted_rows set -e # Verify whether online data migrations run after archiving will succeed. # See for more details: https://bugs.launchpad.net/nova/+bug/1824435 $MANAGE db online_data_migrations # Test global registered unified limits by updating registered limits and # attempting to create resources. Because these quota limits are global, we # can't test them in tempest because modifying global limits can cause other # tests running in parallel to fail. echo "Testing unified limits registered limits" # Get the registered limits IDs. reglimit_ids_names=$(openstack registered limit list -f value -c "ID" -c "Resource Name") # Put them in a map to lookup ID from name for subsequent limit set commands. # Requires Bash 4. declare -A id_name_map while read id name do id_name_map["$name"]="$id" done <<< "$reglimit_ids_names" # Server metadata items # # Set the quota to 1. metadata_items_id="${id_name_map["server_metadata_items"]}" bash -c "unset OS_USERNAME OS_TENANT_NAME OS_PROJECT_NAME; openstack --os-cloud devstack-system-admin registered limit set \ --default-limit 1 $metadata_items_id" # Create a server. Should succeed with one metadata item. openstack --os-compute-api-version 2.37 \ server create --image ${image_id} --flavor ${flavor_id} --nic none \ --property cool=true --wait metadata-items-test1 # Try to create another server with two metadata items. This should fail. set +e output=$(openstack --os-compute-api-version 2.37 \ server create --image ${image_id} --flavor ${flavor_id} --nic none \ --property cool=true --property location=fridge \ --wait metadata-items-test2) rc=$? set -e # Return code should be 1 if server create failed. if [[ ${rc} -ne 1 ]]; then echo "Expected return code 1 from server create with two metadata items" exit 2 fi # Verify it's a quota error. if [[ ! "HTTP 403" =~ "$output" ]]; then echo "Expected HTTP 403 from server create with two metadata items" exit 2 fi # Increase the quota limit to two. bash -c "unset OS_USERNAME OS_TENANT_NAME OS_PROJECT_NAME; openstack --os-cloud devstack-system-admin registered limit set \ --default-limit 2 $metadata_items_id" # Second server create should succeed now. openstack --os-compute-api-version 2.37 \ server create --image ${image_id} --flavor ${flavor_id} --nic none \ --property cool=true --property location=fridge --wait metadata-items-test2 # Delete the servers. openstack server delete metadata-items-test1 metadata-items-test2 # Test 'nova-manage limits migrate_to_unified_limits' by creating a test region # with no registered limits in it, run the nova-manage command, and verify the # expected limits were created and warned about. echo "Testing nova-manage limits migrate_to_unified_limits" ul_test_region=RegionTestNovaUnifiedLimits openstack --os-cloud devstack-admin region create $ul_test_region # Verify there are no registered limits in the test region. registered_limits=$(openstack --os-cloud devstack registered limit list \ --region $ul_test_region -f value) if [[ "$registered_limits" != "" ]]; then echo "There should be no registered limits in the test region; failing" exit 2 fi # Get existing legacy quota limits to use for verification. legacy_limits=$(openstack --os-cloud devstack quota show --compute -f value -c "Resource" -c "Limit") # Requires Bash 4. declare -A legacy_name_limit_map while read name limit do legacy_name_limit_map["$name"]="$limit" done <<< "$legacy_limits" set +e set -o pipefail $MANAGE limits migrate_to_unified_limits --region-id $ul_test_region --verbose | tee /tmp/output rc=$? set +o pipefail set -e if [[ ${rc} -eq 0 ]]; then echo "nova-manage should have warned about unset registered limits; failing" exit 2 fi # Verify there are now registered limits in the test region. registered_limits=$(openstack --os-cloud devstack registered limit list \ --region $ul_test_region -f value -c "Resource Name" -c "Default Limit") if [[ "$registered_limits" == "" ]]; then echo "There should be registered limits in the test region now; failing" exit 2 fi # Get the new unified limits to use for verification. declare -A unified_name_limit_map while read name limit do unified_name_limit_map["$name"]="$limit" done <<< "$registered_limits" declare -A old_to_new_name_map old_to_new_name_map["instances"]="servers" old_to_new_name_map["cores"]="class:VCPU" old_to_new_name_map["ram"]="class:MEMORY_MB" old_to_new_name_map["properties"]="server_metadata_items" old_to_new_name_map["injected-files"]="server_injected_files" old_to_new_name_map["injected-file-size"]="server_injected_file_content_bytes" old_to_new_name_map["injected-path-size"]="server_injected_file_path_bytes" old_to_new_name_map["key-pairs"]="server_key_pairs" old_to_new_name_map["server-groups"]="server_groups" old_to_new_name_map["server-group-members"]="server_group_members" for old_name in "${!old_to_new_name_map[@]}"; do new_name="${old_to_new_name_map[$old_name]}" if [[ "${legacy_name_limit_map[$old_name]}" != "${unified_name_limit_map[$new_name]}" ]]; then echo "Legacy limit value does not match unified limit value; failing" exit 2 fi done # Create the missing registered limits that were warned about earlier. missing_limits=$(grep missing /tmp/output | awk '{print $2}') while read limit do openstack --os-cloud devstack-system-admin registered limit create \ --region $ul_test_region --service nova --default-limit 5 $limit done <<< "$missing_limits" # Run migrate_to_unified_limits again. There should be a success message in the # output because there should be no resources found that are missing registered # limits. $MANAGE limits migrate_to_unified_limits --region-id $ul_test_region --verbose rc=$? if [[ ${rc} -ne 0 ]]; then echo "nova-manage should have output a success message; failing" exit 2 fi registered_limit_ids=$(openstack --os-cloud devstack registered limit list \ --region $ul_test_region -f value -c "ID") openstack --os-cloud devstack-system-admin registered limit delete $registered_limit_ids openstack --os-cloud devstack-admin region delete $ul_test_region ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.285608 nova-32.0.0/nova/0000775000175000017500000000000000000000000013525 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/__init__.py0000664000175000017500000000161600000000000015642 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ :mod:`nova` -- Cloud IaaS Platform =================================== .. automodule:: nova :platform: Unix :synopsis: Infrastructure-as-a-Service Cloud platform. """ ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.285608 nova-32.0.0/nova/accelerator/0000775000175000017500000000000000000000000016011 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/accelerator/__init__.py0000664000175000017500000000000000000000000020110 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/accelerator/cyborg.py0000664000175000017500000003612400000000000017656 0ustar00zuulzuul00000000000000# Copyright 2019 Intel # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Note on object relationships: 1 device profile (DP) has D >= 1 request groups (just as a flavor has many request groups). Each DP request group corresponds to exactly 1 numbered request group (RG) in the request spec. Each numbered RG corresponds to exactly one resource provider (RP). A DP request group may request A >= 1 accelerators, and so result in the creation of A ARQs. Each ARQ corresponds to exactly 1 DP request group. A device profile is a dictionary: { "name": "mydpname", "uuid": , "groups": [ ] } A device profile group is a dictionary too: { "resources:CUSTOM_ACCELERATOR_FPGA": "2", "resources:CUSTOM_LOCAL_MEMORY": "1", "trait:CUSTOM_INTEL_PAC_ARRIA10": "required", "trait:CUSTOM_FUNCTION_NAME_FALCON_GZIP_1_1": "required", # 0 or more Cyborg properties "accel:bitstream_id": "FB021995_BF21_4463_936A_02D49D4DB5E5" } See cyborg/cyborg/objects/device_profile.py for more details. """ from oslo_log import log as logging from keystoneauth1 import exceptions as ks_exc from nova import exception from nova.i18n import _ from nova import objects from nova.scheduler import utils as schedutils from nova import service_auth from nova import utils LOG = logging.getLogger(__name__) def get_client(context): return _CyborgClient(context) def get_device_profile_group_requester_id(dp_group_id, owner): """Return the value to use in objects.RequestGroup.requester_id. The requester_id is used to match device profile groups from Cyborg to the request groups in request spec. The request group id should be unique for each dp in the flavor and in the port. :param dp_group_id: The index of the request group in the device profile. :param owner: The port UUID if the dp requested by port. """ req_id = ("device_profile_" + str(dp_group_id) + (str(owner) if owner else '')) return req_id def get_arq_pci_device_profile(arq): """Extracting pci device info from ARQ """ pci_info = arq['attach_handle_info'] return { 'physical_network': pci_info["physical_network"], 'pci_slot': "%s:%s:%s.%s" % ( pci_info["domain"], pci_info["bus"], pci_info["device"], pci_info["function"]), 'arq_uuid': arq['uuid'] } def get_device_profile_request_groups(context, dp_name, owner=None): cyclient = get_client(context) dp_groups = cyclient.get_device_profile_groups(dp_name) return cyclient.get_device_request_groups(dp_groups, owner) def get_device_amount_of_dp_groups(dp_groups): """Get requested devices amount for the groups of a device_profile. :param dp_groups: list of request groups in a device profile. """ devices_amount = 0 for _ignore, dp_group in enumerate(dp_groups): for key, val in dp_group.items(): match = schedutils.ResourceRequest.XS_KEYPAT.match(key) if not match: continue # could be 'accel:foo=bar', skip it prefix, _, _ = match.groups() if prefix == schedutils.ResourceRequest.XS_RES_PREFIX: devices_amount += int(val) return devices_amount class _CyborgClient(object): DEVICE_PROFILE_URL = "/device_profiles" ARQ_URL = "/accelerator_requests" def __init__(self, context): auth = service_auth.get_auth_plugin(context) self._client = utils.get_ksa_adapter('accelerator', ksa_auth=auth) def _call_cyborg(self, func, *args, **kwargs): resp = err_msg = None try: resp = func(*args, **kwargs) if not resp: msg = _('Invalid response from Cyborg: ') err_msg = msg + str(resp) except ks_exc.ClientException as exc: err_msg = _('Could not communicate with Cyborg.') LOG.exception('%s: %s', err_msg, str(exc)) return resp, err_msg def _get_device_profile_list(self, dp_name): query = {"name": dp_name} err_msg = None resp, err_msg = self._call_cyborg(self._client.get, self.DEVICE_PROFILE_URL, params=query) if err_msg: raise exception.DeviceProfileError(name=dp_name, msg=err_msg) return resp.json().get('device_profiles') def get_device_profile_groups(self, dp_name): """Get device groups from a device profile. :param dp_name: string: device profile name Expected to be valid, not None or ''. :returns: [device profile group dict] :raises: DeviceProfileError Expected to be valid, not None or ''. """ dp_list = self._get_device_profile_list(dp_name) if not dp_list: msg = _('Expected 1 device profile but got nothing.') raise exception.DeviceProfileError(name=dp_name, msg=msg) if len(dp_list) != 1: err = _('Expected 1 device profile but got %s.') % len(dp_list) raise exception.DeviceProfileError(name=dp_name, msg=err) return dp_list[0]['groups'] def get_device_request_groups(self, dp_groups, owner): """Get list of profile group objects from the device profile. :param dp_groups: device groups of a device profile. :param owner: The port UUID if the dp requested by port. :returns: [objects.RequestGroup] :raises: DeviceProfileError """ request_groups = [] for dp_group_id, dp_group in enumerate(dp_groups): req_id = get_device_profile_group_requester_id(dp_group_id, owner) rg = objects.RequestGroup(requester_id=req_id) for key, val in dp_group.items(): match = schedutils.ResourceRequest.XS_KEYPAT.match(key) if not match: continue # could be 'accel:foo=bar', skip it prefix, _ignore, name = match.groups() if prefix == schedutils.ResourceRequest.XS_RES_PREFIX: rg.add_resource(rclass=name, amount=val) elif prefix == schedutils.ResourceRequest.XS_TRAIT_PREFIX: rg.add_trait(trait_name=name, trait_type=val) request_groups.append(rg) return request_groups def _create_arqs(self, dp_name): data = {"device_profile_name": dp_name} resp, err_msg = self._call_cyborg(self._client.post, self.ARQ_URL, json=data) if err_msg: raise exception.AcceleratorRequestOpFailed( op=_('create'), msg=err_msg) return resp.json().get('arqs') def create_arqs(self, dp_name): """Create ARQs by dp_name.""" LOG.info('Creating ARQs for device profile %s', dp_name) arqs = self._create_arqs(dp_name) if not arqs: msg = _('device profile name %s') % dp_name raise exception.AcceleratorRequestOpFailed(op=_('create'), msg=msg) return arqs def create_arqs_and_match_resource_providers(self, dp_name, rg_rp_map): """Create ARQs and match them with request groups and thereby determine their corresponding RPs. :param dp_name: Device profile name :param rg_rp_map: Request group - Resource Provider map {requester_id: [resource_provider_uuid]} :returns: [arq], with each ARQ associated with an RP :raises: DeviceProfileError, AcceleratorRequestOpFailed """ arqs = self.create_arqs(dp_name) for arq in arqs: dp_group_id = arq['device_profile_group_id'] arq['device_rp_uuid'] = None requester_id = ( get_device_profile_group_requester_id(dp_group_id, owner=None)) arq['device_rp_uuid'] = rg_rp_map[requester_id][0] return arqs def get_arq_device_rp_uuid(self, arq, rg_rp_map, owner): """Query the ARQ by uuid saved in request_net. """ dp_group_id = arq['device_profile_group_id'] requester_id = ( get_device_profile_group_requester_id(dp_group_id, owner)) # ARQ and rp is 1:1 mapping # One arq always associated with one placement request group and # in placement one prefixed request group is always mapped to one RP. return rg_rp_map[requester_id][0] def bind_arqs(self, bindings): """Initiate Cyborg bindings. Handles RFC 6902-compliant JSON patching, sparing calling Nova code from those details. :param bindings: { "$arq_uuid": { "hostname": STRING "device_rp_uuid": UUID "instance_uuid": UUID }, ... } :returns: nothing :raises: AcceleratorRequestOpFailed """ LOG.info('Binding ARQs.') # Create a JSON patch in RFC 6902 format patch_list = {} for arq_uuid, binding in bindings.items(): patch = [{"path": "/" + field, "op": "add", "value": value } for field, value in binding.items()] patch_list[arq_uuid] = patch resp, err_msg = self._call_cyborg(self._client.patch, self.ARQ_URL, json=patch_list) if err_msg: arq_uuids = bindings.keys() msg = _(' Binding failed for ARQ UUIDs: ') err_msg = err_msg + msg + ','.join(arq_uuids) raise exception.AcceleratorRequestBindingFailed( arqs=arq_uuids, msg=err_msg) def get_arqs_for_instance(self, instance_uuid, only_resolved=False): """Get ARQs for the instance. :param instance_uuid: Instance UUID :param only_resolved: flag to return only resolved ARQs :returns: List of ARQs for the instance: if only_resolved: only those ARQs which have completed binding else: all ARQs The format of the returned data structure is as below: [ {'uuid': $arq_uuid, 'device_profile_name': $dp_name, 'device_profile_group_id': $dp_request_group_index, 'state': 'Bound', 'device_rp_uuid': $resource_provider_uuid, 'hostname': $host_nodename, 'instance_uuid': $instance_uuid, 'attach_handle_info': { # PCI bdf 'bus': '0c', 'device': '0', 'domain': '0000', 'function': '0'}, 'attach_handle_type': 'PCI' # or 'TEST_PCI' for Cyborg fake driver } ] :raises: AcceleratorRequestOpFailed """ query = {"instance": instance_uuid} resp, err_msg = self._call_cyborg(self._client.get, self.ARQ_URL, params=query) if err_msg: err_msg = err_msg + _(' Instance %s') % instance_uuid raise exception.AcceleratorRequestOpFailed( op=_('get'), msg=err_msg) arqs = resp.json().get('arqs') if not arqs: err_msg = _('Cyborg returned no accelerator requests for ' 'instance %s') % instance_uuid raise exception.AcceleratorRequestOpFailed( op=_('get'), msg=err_msg) if only_resolved: arqs = [arq for arq in arqs if arq['state'] in ['Bound', 'BindFailed', 'Deleting']] return arqs def get_arq_by_uuid(self, arq_uuid): """Get ARQs by uuid. The format of the returned data structure is as below: {'uuid': $arq_uuid, 'device_profile_name': $dp_name, 'device_profile_group_id': $dp_request_group_index, 'state': 'Bound', 'device_rp_uuid': $resource_provider_uuid, 'hostname': $host_nodename, 'instance_uuid': $instance_uuid, 'attach_handle_info': { # PCI bdf 'bus': '0c', 'device': '0', 'domain': '0000', 'function': '0'}, 'attach_handle_type': 'PCI' # or 'TEST_PCI' for Cyborg fake driver } :raises: AcceleratorRequestOpFailed """ resp, err_msg = self._call_cyborg(self._client.get, "/".join([self.ARQ_URL, arq_uuid])) if err_msg: err_msg = err_msg + _(' ARQ: %s') % arq_uuid raise exception.AcceleratorRequestOpFailed( op=_('get'), msg=err_msg) arq = resp.json() return arq def delete_arqs_for_instance(self, instance_uuid): """Delete ARQs for instance, after unbinding if needed. :param instance_uuid: Instance UUID :raises: AcceleratorRequestOpFailed """ # Unbind and delete the ARQs params = {"instance": instance_uuid} resp, err_msg = self._call_cyborg(self._client.delete, self.ARQ_URL, params=params) if err_msg: msg = err_msg + _(' Instance %s') % instance_uuid raise exception.AcceleratorRequestOpFailed( op=_('delete'), msg=msg) def delete_arqs_by_uuid(self, arq_uuids): """Delete the specified ARQs, unbinding them if needed. This is meant to be used to clean up ARQs that have failed to bind to an instance. So delete_arqs_for_instance() is not applicable. This Cyborg API call is NOT idempotent, i.e., if called more than once, the 2nd and later calls will throw errors. Cyborg deletes the ARQs without error, or returns 404 if there is ARQ which already deleted. In either way, existed ARQs in arq_uuids will be deleted. Such 404 error can be ignored safely. If this fails, an error is logged but no exception is raised because this cleans up Cyborg resources, but should otherwise not affect instance spawn. :params arq_uuids: dict_keys() of ARQ UUIDs """ arq_uuid_str = ','.join(arq_uuids) params = {'arqs': arq_uuid_str} resp, err_msg = self._call_cyborg(self._client.delete, self.ARQ_URL, params=params) if err_msg: # No point raising an exception. LOG.error('Failed to delete ARQs %s', arq_uuid_str) def get_arq_uuids_for_instance(self, instance): """Get ARQ UUIDs for the instance. :param instance: Instance Object :return: ARQ UUIDs. """ return [arq['uuid'] for arq in self.get_arqs_for_instance(instance.uuid)] ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.289608 nova-32.0.0/nova/api/0000775000175000017500000000000000000000000014276 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/__init__.py0000664000175000017500000000000000000000000016375 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/auth.py0000664000175000017500000000626000000000000015615 0ustar00zuulzuul00000000000000# Copyright (c) 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Common Auth Middleware. """ from oslo_log import log as logging from oslo_serialization import jsonutils import webob.dec import webob.exc from nova.api import wsgi import nova.conf from nova import context from nova.i18n import _ CONF = nova.conf.CONF LOG = logging.getLogger(__name__) def _load_pipeline(loader, pipeline): filters = [loader.get_filter(n) for n in pipeline[:-1]] app = loader.get_app(pipeline[-1]) filters.reverse() for filter in filters: app = filter(app) return app def pipeline_factory_v21(loader, global_conf, **local_conf): """A paste pipeline replica that keys off of auth_strategy.""" return _load_pipeline(loader, local_conf['keystone'].split()) class InjectContext(wsgi.Middleware): """Add a 'nova.context' to WSGI environ.""" def __init__(self, context, *args, **kwargs): self.context = context super(InjectContext, self).__init__(*args, **kwargs) @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): req.environ['nova.context'] = self.context return self.application class NovaKeystoneContext(wsgi.Middleware): """Make a request context from keystone headers.""" @staticmethod def _create_context(env, **kwargs): """Create a context from a request environ. This exists to make test stubbing easier. """ return context.RequestContext.from_environ(env, **kwargs) @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): # Build a context, including the auth_token... remote_address = req.remote_addr service_catalog = None if req.headers.get('X_SERVICE_CATALOG') is not None: try: catalog_header = req.headers.get('X_SERVICE_CATALOG') service_catalog = jsonutils.loads(catalog_header) except ValueError: raise webob.exc.HTTPInternalServerError( _('Invalid service catalog json.')) # NOTE(jamielennox): This is a full auth plugin set by auth_token # middleware in newer versions. user_auth_plugin = req.environ.get('keystone.token_auth') ctx = self._create_context( req.environ, user_auth_plugin=user_auth_plugin, remote_address=remote_address, service_catalog=service_catalog) if ctx.user_id is None: LOG.debug("Neither X_USER_ID nor X_USER found in request") return webob.exc.HTTPUnauthorized() req.environ['nova.context'] = ctx return self.application ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/compute_req_id.py0000664000175000017500000000237600000000000017657 0ustar00zuulzuul00000000000000# Copyright (c) 2014 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Middleware that ensures x-compute-request-id Nova's notion of request-id tracking predates any common idea, so the original version of this header in OpenStack was x-compute-request-id. Eventually we got oslo, and all other projects implemented this with x-openstack-request-id. However, x-compute-request-id was always part of our contract. The following migrates us to use x-openstack-request-id as well, by using the common middleware. """ from oslo_middleware import request_id HTTP_RESP_HEADER_REQUEST_ID = 'x-compute-request-id' class ComputeReqIdMiddleware(request_id.RequestId): compat_headers = [HTTP_RESP_HEADER_REQUEST_ID] ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.289608 nova-32.0.0/nova/api/metadata/0000775000175000017500000000000000000000000016056 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/metadata/__init__.py0000664000175000017500000000145400000000000020173 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ :mod:`nova.api.metadata` -- Nova Metadata Server ================================================ .. automodule:: nova.api.metadata :platform: Unix :synopsis: Metadata Server for Nova """ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/metadata/base.py0000664000175000017500000006743600000000000017362 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Instance Metadata information.""" import itertools import os import posixpath from oslo_log import log as logging from oslo_serialization import base64 from oslo_serialization import jsonutils from oslo_utils import timeutils from nova.api.metadata import password from nova.api.metadata import vendordata_dynamic from nova.api.metadata import vendordata_json from nova import block_device import nova.conf from nova import context from nova import exception from nova.network import neutron from nova.network import security_group_api from nova import objects from nova.objects import virt_device_metadata as metadata_obj from nova import utils from nova.virt import netutils CONF = nova.conf.CONF VERSIONS = [ '1.0', '2007-01-19', '2007-03-01', '2007-08-29', '2007-10-10', '2007-12-15', '2008-02-01', '2008-09-01', '2009-04-04', ] # NOTE(mikal): think of these strings as version numbers. They traditionally # correlate with OpenStack release dates, with all the changes for a given # release bundled into a single version. Note that versions in the future are # hidden from the listing, but can still be requested explicitly, which is # required for testing purposes. We know this isn't great, but its inherited # from EC2, which this needs to be compatible with. # NOTE(jichen): please update doc/source/user/metadata.rst on the metadata # output when new version is created in order to make doc up-to-date. FOLSOM = '2012-08-10' GRIZZLY = '2013-04-04' HAVANA = '2013-10-17' LIBERTY = '2015-10-15' NEWTON_ONE = '2016-06-30' NEWTON_TWO = '2016-10-06' OCATA = '2017-02-22' ROCKY = '2018-08-27' VICTORIA = '2020-10-14' EPOXY = '2025-04-04' OPENSTACK_VERSIONS = [ FOLSOM, GRIZZLY, HAVANA, LIBERTY, NEWTON_ONE, NEWTON_TWO, OCATA, ROCKY, VICTORIA, EPOXY, ] VERSION = "version" CONTENT = "content" CONTENT_DIR = "content" MD_JSON_NAME = "meta_data.json" VD_JSON_NAME = "vendor_data.json" VD2_JSON_NAME = "vendor_data2.json" NW_JSON_NAME = "network_data.json" UD_NAME = "user_data" PASS_NAME = "password" MIME_TYPE_TEXT_PLAIN = "text/plain" MIME_TYPE_APPLICATION_JSON = "application/json" LOG = logging.getLogger(__name__) class InvalidMetadataVersion(Exception): pass class InvalidMetadataPath(Exception): pass class InstanceMetadata(object): """Instance metadata.""" def __init__(self, instance, address=None, content=None, extra_md=None, network_info=None, network_metadata=None): """Creation of this object should basically cover all time consuming collection. Methods after that should not cause time delays due to network operations or lengthy cpu operations. The user should then get a single instance and make multiple method calls on it. """ if not content: content = [] # NOTE(gibi): this is not a cell targeted context even if we are called # in a situation when the instance is in a different cell than the # metadata service itself. ctxt = context.get_admin_context() self.mappings = _format_instance_mapping(instance) # NOTE(danms): Sanitize the instance to limit the amount of stuff # inside that may not pickle well (i.e. context). We also touch # some of the things we'll lazy load later to make sure we keep their # values in what we cache. instance.ec2_ids instance.keypairs instance.device_metadata instance.numa_topology instance = objects.Instance.obj_from_primitive( instance.obj_to_primitive()) # The default value of mimeType is set to MIME_TYPE_TEXT_PLAIN self.set_mimetype(MIME_TYPE_TEXT_PLAIN) self.instance = instance self.extra_md = extra_md self.availability_zone = instance.get('availability_zone') self.security_groups = security_group_api.get_instance_security_groups( ctxt, instance) if instance.user_data is not None: self.userdata_raw = base64.decode_as_bytes(instance.user_data) else: self.userdata_raw = None self.address = address # expose instance metadata. self.launch_metadata = utils.instance_meta(instance) self.password = password.extract_password(instance) self.uuid = instance.uuid self.content = {} self.files = [] # get network info, and the rendered network template if network_info is None: network_info = instance.info_cache.network_info # expose network metadata if network_metadata is None: self.network_metadata = netutils.get_network_metadata(network_info) else: self.network_metadata = network_metadata self.ip_info = netutils.get_ec2_ip_info(network_info) self.network_config = None cfg = netutils.get_injected_network_template(network_info) if cfg: key = "%04i" % len(self.content) self.content[key] = cfg self.network_config = {"name": "network_config", 'content_path': "/%s/%s" % (CONTENT_DIR, key)} # 'content' is passed in from the configdrive code in # nova/virt/libvirt/driver.py. That's how we get the injected files # (personalities) in. AFAIK they're not stored in the db at all, # so are not available later (web service metadata time). for (path, contents) in content: key = "%04i" % len(self.content) self.files.append({'path': path, 'content_path': "/%s/%s" % (CONTENT_DIR, key)}) self.content[key] = contents self.route_configuration = None # NOTE(mikal): the decision to not pass extra_md here like we # do to the StaticJSON driver is deliberate. extra_md will # contain the admin password for the instance, and we shouldn't # pass that to external services. self.vendordata_providers = { 'StaticJSON': vendordata_json.JsonFileVendorData(), 'DynamicJSON': vendordata_dynamic.DynamicVendorData( instance=instance) } def _route_configuration(self): if self.route_configuration: return self.route_configuration path_handlers = {UD_NAME: self._user_data, PASS_NAME: self._password, VD_JSON_NAME: self._vendor_data, VD2_JSON_NAME: self._vendor_data2, MD_JSON_NAME: self._metadata_as_json, NW_JSON_NAME: self._network_data, VERSION: self._handle_version, CONTENT: self._handle_content} self.route_configuration = RouteConfiguration(path_handlers) return self.route_configuration def set_mimetype(self, mime_type): self.md_mimetype = mime_type def get_mimetype(self): return self.md_mimetype def get_ec2_metadata(self, version): if version == "latest": version = VERSIONS[-1] if version not in VERSIONS: raise InvalidMetadataVersion(version) hostname = self._get_hostname() floating_ips = self.ip_info['floating_ips'] floating_ip = floating_ips and floating_ips[0] or '' fixed_ips = self.ip_info['fixed_ips'] fixed_ip = fixed_ips and fixed_ips[0] or '' fmt_sgroups = [x['name'] for x in self.security_groups] meta_data = { 'ami-id': self.instance.ec2_ids.ami_id, 'ami-launch-index': self.instance.launch_index, 'ami-manifest-path': 'FIXME', 'instance-id': self.instance.ec2_ids.instance_id, 'hostname': hostname, 'local-ipv4': fixed_ip or self.address, 'reservation-id': self.instance.reservation_id, 'security-groups': fmt_sgroups} # public keys are strangely rendered in ec2 metadata service # meta-data/public-keys/ returns '0=keyname' (with no trailing /) # and only if there is a public key given. # '0=keyname' means there is a normally rendered dict at # meta-data/public-keys/0 # # meta-data/public-keys/ : '0=%s' % keyname # meta-data/public-keys/0/ : 'openssh-key' # meta-data/public-keys/0/openssh-key : '%s' % publickey if self.instance.key_name: meta_data['public-keys'] = { '0': {'_name': "0=" + self.instance.key_name, 'openssh-key': self.instance.key_data}} if self._check_version('2007-01-19', version): meta_data['local-hostname'] = hostname meta_data['public-hostname'] = hostname meta_data['public-ipv4'] = floating_ip if self._check_version('2007-08-29', version): flavor = self.instance.get_flavor() meta_data['instance-type'] = flavor['name'] if self._check_version('2007-12-15', version): meta_data['block-device-mapping'] = self.mappings if self.instance.ec2_ids.kernel_id: meta_data['kernel-id'] = self.instance.ec2_ids.kernel_id if self.instance.ec2_ids.ramdisk_id: meta_data['ramdisk-id'] = self.instance.ec2_ids.ramdisk_id if self._check_version('2008-02-01', version): meta_data['placement'] = {'availability-zone': self.availability_zone} if self._check_version('2008-09-01', version): meta_data['instance-action'] = 'none' data = {'meta-data': meta_data} if self.userdata_raw is not None: data['user-data'] = self.userdata_raw return data def get_ec2_item(self, path_tokens): # get_ec2_metadata returns dict without top level version data = self.get_ec2_metadata(path_tokens[0]) return find_path_in_tree(data, path_tokens[1:]) def get_openstack_item(self, path_tokens): if path_tokens[0] == CONTENT_DIR: return self._handle_content(path_tokens) return self._route_configuration().handle_path(path_tokens) def _metadata_as_json(self, version, path): metadata = {'uuid': self.uuid} if self.launch_metadata: metadata['meta'] = self.launch_metadata if self.files: metadata['files'] = self.files if self.extra_md: metadata.update(self.extra_md) if self.network_config: metadata['network_config'] = self.network_config if self.instance.key_name: keypairs = self.instance.keypairs # NOTE(mriedem): It's possible for the keypair to be deleted # before it was migrated to the instance_extra table, in which # case lazy-loading instance.keypairs will handle the 404 and # just set an empty KeyPairList object on the instance. keypair = keypairs[0] if keypairs else None if keypair: metadata['public_keys'] = { keypair.name: keypair.public_key, } metadata['keys'] = [ {'name': keypair.name, 'type': keypair.type, 'data': keypair.public_key} ] else: LOG.debug("Unable to find keypair for instance with " "key name '%s'.", self.instance.key_name, instance=self.instance) metadata['hostname'] = self._get_hostname() metadata['name'] = self.instance.display_name metadata['launch_index'] = self.instance.launch_index metadata['availability_zone'] = self.availability_zone if self._check_os_version(GRIZZLY, version): metadata['random_seed'] = base64.encode_as_text(os.urandom(512)) if self._check_os_version(LIBERTY, version): metadata['project_id'] = self.instance.project_id if self._check_os_version(NEWTON_ONE, version): metadata['devices'] = self._get_device_metadata(version) if self._check_os_version(VICTORIA, version): metadata['dedicated_cpus'] = self._get_instance_dedicated_cpus() self.set_mimetype(MIME_TYPE_APPLICATION_JSON) return jsonutils.dump_as_bytes(metadata) def _get_device_metadata(self, version): """Build a device metadata dict based on the metadata objects. This is done here in the metadata API as opposed to in the objects themselves because the metadata dict is part of the guest API and thus must be controlled. """ device_metadata_list = [] vif_vlans_supported = self._check_os_version(OCATA, version) vif_vfs_trusted_supported = self._check_os_version(ROCKY, version) if self.instance.device_metadata is not None: for device in self.instance.device_metadata.devices: device_metadata = {} bus = 'none' address = 'none' if 'bus' in device: # TODO(artom/mriedem) It would be nice if we had something # more generic, like a type identifier or something, built # into these types of objects, like a get_meta_type() # abstract method on the base DeviceBus class. if isinstance(device.bus, metadata_obj.PCIDeviceBus): bus = 'pci' elif isinstance(device.bus, metadata_obj.USBDeviceBus): bus = 'usb' elif isinstance(device.bus, metadata_obj.SCSIDeviceBus): bus = 'scsi' elif isinstance(device.bus, metadata_obj.IDEDeviceBus): bus = 'ide' elif isinstance(device.bus, metadata_obj.XenDeviceBus): bus = 'xen' else: LOG.debug('Metadata for device with unknown bus %s ' 'has not been included in the ' 'output', device.bus.__class__.__name__) continue if 'address' in device.bus: address = device.bus.address if isinstance(device, metadata_obj.NetworkInterfaceMetadata): vlan = device.vlan if 'vlan' in device else None if vif_vlans_supported and vlan is not None: device_metadata['vlan'] = vlan if vif_vfs_trusted_supported: vf_trusted = (device.vf_trusted if 'vf_trusted' in device else False) device_metadata['vf_trusted'] = vf_trusted device_metadata['type'] = 'nic' device_metadata['mac'] = device.mac # NOTE(artom) If a device has neither tags, vlan or # vf_trusted, don't expose it if not ('tags' in device or 'vlan' in device_metadata or 'vf_trusted' in device_metadata): continue elif isinstance(device, metadata_obj.DiskMetadata): device_metadata['type'] = 'disk' # serial and path are optional parameters if 'serial' in device: device_metadata['serial'] = device.serial if 'path' in device: device_metadata['path'] = device.path elif self._check_os_version(EPOXY, version) and isinstance( device, metadata_obj.ShareMetadata ): device_metadata['type'] = 'share' device_metadata['share_id'] = device.share_id device_metadata['tag'] = device.tag else: LOG.debug('Metadata for device of unknown type %s has not ' 'been included in the ' 'output', device.__class__.__name__) continue device_metadata['bus'] = bus device_metadata['address'] = address if 'tags' in device: device_metadata['tags'] = device.tags device_metadata_list.append(device_metadata) return device_metadata_list def _get_instance_dedicated_cpus(self): dedicated_cpus = [] if self.instance.numa_topology: dedicated_cpus = sorted(list(itertools.chain.from_iterable([ cell.pcpuset for cell in self.instance.numa_topology.cells ]))) return dedicated_cpus def _handle_content(self, path_tokens): if len(path_tokens) == 1: raise KeyError("no listing for %s" % "/".join(path_tokens)) if len(path_tokens) != 2: raise KeyError("Too many tokens for /%s" % CONTENT_DIR) return self.content[path_tokens[1]] def _handle_version(self, version, path): # request for /version, give a list of what is available ret = [MD_JSON_NAME] if self.userdata_raw is not None: ret.append(UD_NAME) if self._check_os_version(GRIZZLY, version): ret.append(PASS_NAME) if self._check_os_version(HAVANA, version): ret.append(VD_JSON_NAME) if self._check_os_version(LIBERTY, version): ret.append(NW_JSON_NAME) if self._check_os_version(NEWTON_TWO, version): ret.append(VD2_JSON_NAME) return ret def _user_data(self, version, path): if self.userdata_raw is None: raise KeyError(path) return self.userdata_raw def _network_data(self, version, path): if self.network_metadata is None: return jsonutils.dump_as_bytes({}) return jsonutils.dump_as_bytes(self.network_metadata) def _password(self, version, path): if self._check_os_version(GRIZZLY, version): return password.handle_password raise KeyError(path) def _vendor_data(self, version, path): if self._check_os_version(HAVANA, version): self.set_mimetype(MIME_TYPE_APPLICATION_JSON) if (CONF.api.vendordata_providers and 'StaticJSON' in CONF.api.vendordata_providers): return jsonutils.dump_as_bytes( self.vendordata_providers['StaticJSON'].get()) raise KeyError(path) def _vendor_data2(self, version, path): if self._check_os_version(NEWTON_TWO, version): self.set_mimetype(MIME_TYPE_APPLICATION_JSON) j = {} for provider in CONF.api.vendordata_providers: if provider == 'StaticJSON': j['static'] = self.vendordata_providers['StaticJSON'].get() else: values = self.vendordata_providers[provider].get() for key in list(values): if key in j: LOG.warning('Removing duplicate metadata key: %s', key, instance=self.instance) del values[key] j.update(values) return jsonutils.dump_as_bytes(j) raise KeyError(path) def _check_version(self, required, requested, versions=VERSIONS): return versions.index(requested) >= versions.index(required) def _check_os_version(self, required, requested): return self._check_version(required, requested, OPENSTACK_VERSIONS) def _get_hostname(self): # TODO(stephenfin): At some point in the future, we may wish to # retrieve this information from neutron. if CONF.api.dhcp_domain: return '.'.join([self.instance.hostname, CONF.api.dhcp_domain]) return self.instance.hostname def lookup(self, path): if path == "" or path[0] != "/": path = posixpath.normpath("/" + path) else: path = posixpath.normpath(path) # Set default mimeType. It will be modified only if there is a change self.set_mimetype(MIME_TYPE_TEXT_PLAIN) # fix up requests, prepending /ec2 to anything that does not match path_tokens = path.split('/')[1:] if path_tokens[0] not in ("ec2", "openstack"): if path_tokens[0] == "": # request for / path_tokens = ["ec2"] else: path_tokens = ["ec2"] + path_tokens path = "/" + "/".join(path_tokens) # all values of 'path' input starts with '/' and have no trailing / # specifically handle the top level request if len(path_tokens) == 1: if path_tokens[0] == "openstack": # NOTE(vish): don't show versions that are in the future today = timeutils.utcnow().strftime("%Y-%m-%d") versions = [v for v in OPENSTACK_VERSIONS if v <= today] if OPENSTACK_VERSIONS != versions: LOG.debug("future versions %s hidden in version list", [v for v in OPENSTACK_VERSIONS if v not in versions], instance=self.instance) versions += ["latest"] else: versions = VERSIONS + ["latest"] return versions try: if path_tokens[0] == "openstack": data = self.get_openstack_item(path_tokens[1:]) else: data = self.get_ec2_item(path_tokens[1:]) except (InvalidMetadataVersion, KeyError): raise InvalidMetadataPath(path) return data def metadata_for_config_drive(self): """Yields (path, value) tuples for metadata elements.""" # EC2 style metadata for version in VERSIONS + ["latest"]: if version in CONF.api.config_drive_skip_versions.split(' '): continue data = self.get_ec2_metadata(version) if 'user-data' in data: filepath = os.path.join('ec2', version, 'user-data') yield (filepath, data['user-data']) del data['user-data'] try: del data['public-keys']['0']['_name'] except KeyError: pass filepath = os.path.join('ec2', version, 'meta-data.json') yield (filepath, jsonutils.dump_as_bytes(data['meta-data'])) ALL_OPENSTACK_VERSIONS = OPENSTACK_VERSIONS + ["latest"] for version in ALL_OPENSTACK_VERSIONS: path = 'openstack/%s/%s' % (version, MD_JSON_NAME) yield (path, self.lookup(path)) path = 'openstack/%s/%s' % (version, UD_NAME) if self.userdata_raw is not None: yield (path, self.lookup(path)) if self._check_version(HAVANA, version, ALL_OPENSTACK_VERSIONS): path = 'openstack/%s/%s' % (version, VD_JSON_NAME) yield (path, self.lookup(path)) if self._check_version(LIBERTY, version, ALL_OPENSTACK_VERSIONS): path = 'openstack/%s/%s' % (version, NW_JSON_NAME) yield (path, self.lookup(path)) if self._check_version(NEWTON_TWO, version, ALL_OPENSTACK_VERSIONS): path = 'openstack/%s/%s' % (version, VD2_JSON_NAME) yield (path, self.lookup(path)) for (cid, content) in self.content.items(): yield ('%s/%s/%s' % ("openstack", CONTENT_DIR, cid), content) class RouteConfiguration(object): """Routes metadata paths to request handlers.""" def __init__(self, path_handler): self.path_handlers = path_handler def _version(self, version): if version == "latest": version = OPENSTACK_VERSIONS[-1] if version not in OPENSTACK_VERSIONS: raise InvalidMetadataVersion(version) return version def handle_path(self, path_tokens): version = self._version(path_tokens[0]) if len(path_tokens) == 1: path = VERSION else: path = '/'.join(path_tokens[1:]) path_handler = self.path_handlers[path] if path_handler is None: raise KeyError(path) return path_handler(version, path) def get_metadata_by_address(address): ctxt = context.get_admin_context() fixed_ip = neutron.API().get_fixed_ip_by_address(ctxt, address) LOG.info('Fixed IP %(ip)s translates to instance UUID %(uuid)s', {'ip': address, 'uuid': fixed_ip['instance_uuid']}) return get_metadata_by_instance_id(fixed_ip['instance_uuid'], address, ctxt) def get_metadata_by_instance_id(instance_id, address, ctxt=None): ctxt = ctxt or context.get_admin_context() attrs = ['ec2_ids', 'flavor', 'info_cache', 'metadata', 'system_metadata', 'security_groups', 'keypairs', 'device_metadata', 'numa_topology'] if CONF.api.local_metadata_per_cell: instance = objects.Instance.get_by_uuid(ctxt, instance_id, expected_attrs=attrs) return InstanceMetadata(instance, address) try: im = objects.InstanceMapping.get_by_instance_uuid(ctxt, instance_id) except exception.InstanceMappingNotFound: LOG.warning('Instance mapping for %(uuid)s not found; ' 'cell setup is incomplete', {'uuid': instance_id}) instance = objects.Instance.get_by_uuid(ctxt, instance_id, expected_attrs=attrs) return InstanceMetadata(instance, address) with context.target_cell(ctxt, im.cell_mapping) as cctxt: instance = objects.Instance.get_by_uuid(cctxt, instance_id, expected_attrs=attrs) return InstanceMetadata(instance, address) def _format_instance_mapping(instance): bdms = instance.get_bdms() return block_device.instance_block_mapping(instance, bdms) def ec2_md_print(data): if isinstance(data, dict): output = '' for key in sorted(data.keys()): if key == '_name': continue if isinstance(data[key], dict): if '_name' in data[key]: output += str(data[key]['_name']) else: output += key + '/' else: output += key output += '\n' return output[:-1] elif isinstance(data, list): return '\n'.join(data) elif isinstance(data, (bytes, str)): return data else: return str(data) def find_path_in_tree(data, path_tokens): # given a dict/list tree, and a path in that tree, return data found there. for i in range(0, len(path_tokens)): if isinstance(data, dict) or isinstance(data, list): if path_tokens[i] in data: data = data[path_tokens[i]] else: raise KeyError("/".join(path_tokens[0:i])) else: if i != len(path_tokens) - 1: raise KeyError("/".join(path_tokens[0:i])) data = data[path_tokens[i]] return data ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/metadata/handler.py0000664000175000017500000003351000000000000020047 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Metadata request handler.""" import hashlib import hmac import os from oslo_log import log as logging from oslo_utils import encodeutils from oslo_utils import strutils import webob.dec import webob.exc from nova.api.metadata import base from nova.api import wsgi from nova import cache_utils import nova.conf from nova import context as nova_context from nova import exception from nova.i18n import _ from nova.network import neutron as neutronapi CONF = nova.conf.CONF LOG = logging.getLogger(__name__) # 160 networks is large enough to satisfy most cases. # Yet while reaching 182 networks Neutron server will break as URL length # exceeds the maximum. Left this at 160 to allow additional parameters when # they're needed. MAX_QUERY_NETWORKS = 160 class MetadataRequestHandler(wsgi.Application): """Serve metadata.""" def __init__(self): self._cache = cache_utils.get_client( expiration_time=CONF.api.metadata_cache_expiration) if (CONF.neutron.service_metadata_proxy and not CONF.neutron.metadata_proxy_shared_secret): LOG.warning("metadata_proxy_shared_secret is not configured, " "the metadata information returned by the proxy " "cannot be trusted") def get_metadata_by_remote_address(self, address): if not address: raise exception.FixedIpNotFoundForAddress(address=address) cache_key = 'metadata-%s' % address data = self._cache.get(cache_key) if data: LOG.debug("Using cached metadata for %s", address) return data try: data = base.get_metadata_by_address(address) except exception.NotFound: LOG.exception('Failed to get metadata for IP %s', address) return None if CONF.api.metadata_cache_expiration > 0: self._cache.set(cache_key, data) return data def get_metadata_by_instance_id(self, instance_id, address): cache_key = 'metadata-%s' % instance_id data = self._cache.get(cache_key) if data: LOG.debug("Using cached metadata for instance %s", instance_id) return data try: data = base.get_metadata_by_instance_id(instance_id, address) except exception.NotFound: return None if CONF.api.metadata_cache_expiration > 0: self._cache.set(cache_key, data) return data @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): if os.path.normpath(req.path_info) == "/": resp = base.ec2_md_print(base.VERSIONS + ["latest"]) req.response.body = encodeutils.to_utf8(resp) req.response.content_type = base.MIME_TYPE_TEXT_PLAIN return req.response # Convert webob.headers.EnvironHeaders to a dict and mask any sensitive # details from the logs. if CONF.debug: headers = {k: req.headers[k] for k in req.headers} LOG.debug('Metadata request headers: %s', strutils.mask_dict_password(headers)) if CONF.neutron.service_metadata_proxy: if req.headers.get('X-Metadata-Provider'): meta_data = self._handle_instance_id_request_from_lb(req) else: meta_data = self._handle_instance_id_request(req) else: if req.headers.get('X-Instance-ID'): LOG.warning( "X-Instance-ID present in request headers. The " "'service_metadata_proxy' option must be " "enabled to process this header.") meta_data = self._handle_remote_ip_request(req) if meta_data is None: raise webob.exc.HTTPNotFound() try: data = meta_data.lookup(req.path_info) except base.InvalidMetadataPath: raise webob.exc.HTTPNotFound() if callable(data): return data(req, meta_data) resp = base.ec2_md_print(data) req.response.body = encodeutils.to_utf8(resp) req.response.content_type = meta_data.get_mimetype() return req.response def _handle_remote_ip_request(self, req): remote_address = req.remote_addr try: meta_data = self.get_metadata_by_remote_address(remote_address) except Exception: LOG.exception('Failed to get metadata for IP %s', remote_address) msg = _('An unknown error has occurred. ' 'Please try your request again.') raise webob.exc.HTTPInternalServerError(explanation=str(msg)) if meta_data is None: LOG.error('Failed to get metadata for IP %s: no metadata', remote_address) return meta_data def _handle_instance_id_request(self, req): instance_id = req.headers.get('X-Instance-ID') tenant_id = req.headers.get('X-Tenant-ID') signature = req.headers.get('X-Instance-ID-Signature') remote_address = req.headers.get('X-Forwarded-For') # Ensure that only one header was passed if instance_id is None: msg = _('X-Instance-ID header is missing from request.') elif signature is None: msg = _('X-Instance-ID-Signature header is missing from request.') elif tenant_id is None: msg = _('X-Tenant-ID header is missing from request.') elif not isinstance(instance_id, str): msg = _('Multiple X-Instance-ID headers found within request.') elif not isinstance(tenant_id, str): msg = _('Multiple X-Tenant-ID headers found within request.') else: msg = None if msg: raise webob.exc.HTTPBadRequest(explanation=msg) self._validate_shared_secret(instance_id, signature, remote_address) return self._get_meta_by_instance_id(instance_id, tenant_id, remote_address) def _get_instance_id_from_lb(self, provider_id, instance_address): # We use admin context, admin=True to lookup the # inter-Edge network port context = nova_context.get_admin_context() neutron = neutronapi.get_client(context, admin=True) # Tenant, instance ids are found in the following method: # X-Metadata-Provider contains id of the metadata provider, and since # overlapping networks cannot be connected to the same metadata # provider, the combo of tenant's instance IP and the metadata # provider has to be unique. # # The networks which are connected to the metadata provider are # retrieved in the 1st call to neutron.list_subnets() # In the 2nd call we read the ports which belong to any of the # networks retrieved above, and have the X-Forwarded-For IP address. # This combination has to be unique as explained above, and we can # read the instance_id, tenant_id from that port entry. # Retrieve networks which are connected to metadata provider md_subnets = neutron.list_subnets( context, advanced_service_providers=[provider_id], fields=['network_id']) if not md_subnets or not md_subnets.get('subnets'): msg = _('Could not find any subnets for provider %s') % provider_id LOG.error(msg) raise webob.exc.HTTPBadRequest(explanation=msg) md_networks = [subnet['network_id'] for subnet in md_subnets['subnets']] try: # Retrieve the instance data from the instance's port ports = [] while md_networks: ports.extend(neutron.list_ports( context, fixed_ips='ip_address=' + instance_address, network_id=md_networks[:MAX_QUERY_NETWORKS], fields=['device_id', 'tenant_id'])['ports']) md_networks = md_networks[MAX_QUERY_NETWORKS:] except Exception as e: LOG.error('Failed to get instance id for metadata ' 'request, provider %(provider)s ' 'networks %(networks)s ' 'requester %(requester)s. Error: %(error)s', {'provider': provider_id, 'networks': md_networks, 'requester': instance_address, 'error': e}) msg = _('An unknown error has occurred. ' 'Please try your request again.') raise webob.exc.HTTPBadRequest(explanation=msg) if len(ports) != 1: msg = _('Expected a single port matching provider %(pr)s ' 'and IP %(ip)s. Found %(count)d.') % { 'pr': provider_id, 'ip': instance_address, 'count': len(ports)} LOG.error(msg) raise webob.exc.HTTPBadRequest(explanation=msg) instance_data = ports[0] instance_id = instance_data['device_id'] tenant_id = instance_data['tenant_id'] # instance_data is unicode-encoded, while cache_utils doesn't like # that. Therefore we convert to str if isinstance(instance_id, str): instance_id = instance_id.encode('utf-8') return instance_id, tenant_id def _handle_instance_id_request_from_lb(self, req): remote_address = req.headers.get('X-Forwarded-For') if remote_address is None: msg = _('X-Forwarded-For is missing from request.') raise webob.exc.HTTPBadRequest(explanation=msg) provider_id = req.headers.get('X-Metadata-Provider') if provider_id is None: msg = _('X-Metadata-Provider is missing from request.') raise webob.exc.HTTPBadRequest(explanation=msg) instance_address = remote_address.split(',')[0] # If authentication token is set, authenticate if CONF.neutron.metadata_proxy_shared_secret: signature = req.headers.get('X-Metadata-Provider-Signature') self._validate_shared_secret(provider_id, signature, instance_address) cache_key = 'provider-%s-%s' % (provider_id, instance_address) data = self._cache.get(cache_key) if data: LOG.debug("Using cached metadata for %s for %s", provider_id, instance_address) instance_id, tenant_id = data else: instance_id, tenant_id = self._get_instance_id_from_lb( provider_id, instance_address) if CONF.api.metadata_cache_expiration > 0: self._cache.set(cache_key, (instance_id, tenant_id)) LOG.debug('Instance %s with address %s matches provider %s', instance_id, remote_address, provider_id) return self._get_meta_by_instance_id(instance_id, tenant_id, instance_address) def _validate_shared_secret(self, requestor_id, signature, requestor_address): expected_signature = hmac.new( encodeutils.to_utf8(CONF.neutron.metadata_proxy_shared_secret), encodeutils.to_utf8(requestor_id), hashlib.sha256).hexdigest() if (not signature or not hmac.compare_digest(expected_signature, signature)): if requestor_id: LOG.warning('X-Instance-ID-Signature: %(signature)s does ' 'not match the expected value: ' '%(expected_signature)s for id: ' '%(requestor_id)s. Request From: ' '%(requestor_address)s', {'signature': signature, 'expected_signature': expected_signature, 'requestor_id': requestor_id, 'requestor_address': requestor_address}) msg = _('Invalid proxy request signature.') raise webob.exc.HTTPForbidden(explanation=msg) def _get_meta_by_instance_id(self, instance_id, tenant_id, remote_address): try: meta_data = self.get_metadata_by_instance_id(instance_id, remote_address) except Exception: LOG.exception('Failed to get metadata for instance id: %s', instance_id) msg = _('An unknown error has occurred. ' 'Please try your request again.') raise webob.exc.HTTPInternalServerError(explanation=str(msg)) if meta_data is None: LOG.error('Failed to get metadata for instance id: %s', instance_id) elif meta_data.instance.project_id != tenant_id: LOG.warning("Tenant_id %(tenant_id)s does not match tenant_id " "of instance %(instance_id)s.", {'tenant_id': tenant_id, 'instance_id': instance_id}) # causes a 404 to be raised meta_data = None return meta_data ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/metadata/password.py0000664000175000017500000000557300000000000020304 0ustar00zuulzuul00000000000000# Copyright 2012 Nebula, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from webob import exc import nova.conf from nova import context from nova import exception from nova.i18n import _ from nova import objects from nova import utils CONF = nova.conf.CONF CHUNKS = 4 CHUNK_LENGTH = 255 MAX_SIZE = CHUNKS * CHUNK_LENGTH def extract_password(instance): result = '' sys_meta = utils.instance_sys_meta(instance) for key in sorted(sys_meta.keys()): if key.startswith('password_'): result += sys_meta[key] return result or None def convert_password(context, password): """Stores password as system_metadata items. Password is stored with the keys 'password_0' -> 'password_3'. """ password = password or '' if isinstance(password, bytes): password = password.decode('utf-8') meta = {} for i in range(CHUNKS): meta['password_%d' % i] = password[:CHUNK_LENGTH] password = password[CHUNK_LENGTH:] return meta def handle_password(req, meta_data): ctxt = context.get_admin_context() if req.method == 'GET': return meta_data.password elif req.method == 'POST': # NOTE(vish): The conflict will only happen once the metadata cache # updates, but it isn't a huge issue if it can be set for # a short window. if meta_data.password: raise exc.HTTPConflict() if (req.content_length > MAX_SIZE or len(req.body) > MAX_SIZE): msg = _("Request is too large.") raise exc.HTTPBadRequest(explanation=msg) if CONF.api.local_metadata_per_cell: instance = objects.Instance.get_by_uuid(ctxt, meta_data.uuid) else: im = objects.InstanceMapping.get_by_instance_uuid( ctxt, meta_data.uuid) with context.target_cell(ctxt, im.cell_mapping) as cctxt: try: instance = objects.Instance.get_by_uuid( cctxt, meta_data.uuid) except exception.InstanceNotFound as e: raise exc.HTTPBadRequest(explanation=e.format_message()) instance.system_metadata.update(convert_password(ctxt, req.body)) instance.save() else: msg = _("GET and POST only are supported.") raise exc.HTTPBadRequest(explanation=msg) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/metadata/vendordata.py0000664000175000017500000000215000000000000020555 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class VendorDataDriver(object): """The base VendorData Drivers should inherit from.""" def __init__(self, *args, **kwargs): """Init method should do all expensive operations.""" self._data = {} def get(self): """Return a dictionary of primitives to be rendered in metadata :return: A dictionary of primitives. """ return self._data ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/metadata/vendordata_dynamic.py0000664000175000017500000001207700000000000022272 0ustar00zuulzuul00000000000000# Copyright 2016 Rackspace Australia # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Render vendordata as stored fetched from REST microservices.""" import sys from keystoneauth1 import exceptions as ks_exceptions from keystoneauth1 import loading as ks_loading from oslo_log import log as logging from oslo_serialization import jsonutils from nova.api.metadata import vendordata import nova.conf CONF = nova.conf.CONF LOG = logging.getLogger(__name__) _SESSION = None _ADMIN_AUTH = None def _load_ks_session(conf): """Load session. This is either an authenticated session or a requests session, depending on what's configured. """ global _ADMIN_AUTH global _SESSION if not _ADMIN_AUTH: _ADMIN_AUTH = ks_loading.load_auth_from_conf_options( conf, nova.conf.vendordata.vendordata_group.name) if not _ADMIN_AUTH: LOG.warning('Passing insecure dynamic vendordata requests ' 'because of missing or incorrect service account ' 'configuration.') if not _SESSION: _SESSION = ks_loading.load_session_from_conf_options( conf, nova.conf.vendordata.vendordata_group.name, auth=_ADMIN_AUTH) return _SESSION class DynamicVendorData(vendordata.VendorDataDriver): def __init__(self, instance): self.instance = instance # We only create the session if we make a request. self.session = None def _do_request(self, service_name, url): if self.session is None: self.session = _load_ks_session(CONF) try: body = {'project-id': self.instance.project_id, 'instance-id': self.instance.uuid, 'image-id': self.instance.image_ref, 'user-data': self.instance.user_data, 'hostname': self.instance.hostname, 'metadata': self.instance.metadata, 'boot-roles': self.instance.system_metadata.get( 'boot_roles', '')} headers = {'Content-Type': 'application/json', 'Accept': 'application/json', 'User-Agent': 'openstack-nova-vendordata'} # SSL verification verify = url.startswith('https://') if verify and CONF.api.vendordata_dynamic_ssl_certfile: verify = CONF.api.vendordata_dynamic_ssl_certfile timeout = (CONF.api.vendordata_dynamic_connect_timeout, CONF.api.vendordata_dynamic_read_timeout) res = self.session.request(url, 'POST', data=jsonutils.dumps(body), verify=verify, headers=headers, timeout=timeout) if res and res.text: # TODO(mikal): Use the Cache-Control response header to do some # sensible form of caching here. return jsonutils.loads(res.text) return {} except (TypeError, ValueError, ks_exceptions.connection.ConnectionError, ks_exceptions.http.HttpError) as e: LOG.warning('Error from dynamic vendordata service ' '%(service_name)s at %(url)s: %(error)s', {'service_name': service_name, 'url': url, 'error': e}, instance=self.instance) if CONF.api.vendordata_dynamic_failure_fatal: raise e.with_traceback(sys.exc_info()[2]) return {} def get(self): j = {} for target in CONF.api.vendordata_dynamic_targets: # NOTE(mikal): a target is composed of the following: # name@url # where name is the name to use in the metadata handed to # instances, and url is the URL to fetch it from if target.find('@') == -1: LOG.warning('Vendordata target %(target)s lacks a name. ' 'Skipping', {'target': target}, instance=self.instance) continue tokens = target.split('@') name = tokens[0] url = '@'.join(tokens[1:]) if name in j: LOG.warning('Vendordata already contains an entry named ' '%(target)s. Skipping', {'target': target}, instance=self.instance) continue j[name] = self._do_request(name, url) return j ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/metadata/vendordata_json.py0000664000175000017500000000363100000000000021613 0ustar00zuulzuul00000000000000# Copyright 2013 Canonical Ltd # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Render Vendordata as stored in configured file.""" import errno from oslo_log import log as logging from oslo_serialization import jsonutils from nova.api.metadata import vendordata import nova.conf CONF = nova.conf.CONF LOG = logging.getLogger(__name__) class JsonFileVendorData(vendordata.VendorDataDriver): def __init__(self, *args, **kwargs): super(JsonFileVendorData, self).__init__(*args, **kwargs) data = {} fpath = CONF.api.vendordata_jsonfile_path logprefix = "vendordata_jsonfile_path[%s]:" % fpath if fpath: try: with open(fpath, "rb") as fp: data = jsonutils.load(fp) except IOError as e: if e.errno == errno.ENOENT: LOG.warning("%(logprefix)s file does not exist", {'logprefix': logprefix}) else: LOG.warning("%(logprefix)s unexpected IOError when " "reading", {'logprefix': logprefix}) raise except ValueError: LOG.warning("%(logprefix)s failed to load json", {'logprefix': logprefix}) raise self._data = data def get(self): return self._data ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/metadata/wsgi.py0000664000175000017500000000141100000000000017376 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """WSGI application entry-point for Nova Metadata API, installed by pbr.""" from nova.api.openstack import wsgi_app NAME = "metadata" def init_application(): return wsgi_app.init_application(NAME) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.289608 nova-32.0.0/nova/api/openstack/0000775000175000017500000000000000000000000016265 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/__init__.py0000664000175000017500000002136000000000000020400 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ WSGI middleware for OpenStack API controllers. """ # autopep8: off from nova import monkey_patch ; monkey_patch.patch() # noqa # autopep8: on from oslo_log import log as logging import routes import webob.dec import webob.exc from nova.api.openstack import wsgi from nova.api import wsgi as base_wsgi import nova.conf from nova.i18n import translate LOG = logging.getLogger(__name__) CONF = nova.conf.CONF def walk_class_hierarchy(clazz, encountered=None): """Walk class hierarchy, yielding most derived classes first.""" if not encountered: encountered = [] for subclass in clazz.__subclasses__(): if subclass not in encountered: encountered.append(subclass) # drill down to leaves first for subsubclass in walk_class_hierarchy(subclass, encountered): yield subsubclass yield subclass class FaultWrapper(base_wsgi.Middleware): """Calls down the middleware stack, making exceptions into faults.""" _status_to_type = {} @staticmethod def status_to_type(status): if not FaultWrapper._status_to_type: for clazz in walk_class_hierarchy(webob.exc.HTTPError): FaultWrapper._status_to_type[clazz.code] = clazz return FaultWrapper._status_to_type.get( status, webob.exc.HTTPInternalServerError)() def _error(self, inner, req): LOG.exception("Caught error: %s", inner) safe = getattr(inner, 'safe', False) headers = getattr(inner, 'headers', None) status = getattr(inner, 'code', 500) if status is None: status = 500 msg_dict = dict(url=req.url, status=status) LOG.info("%(url)s returned with HTTP %(status)d", msg_dict) outer = self.status_to_type(status) if headers: outer.headers = headers # NOTE(johannes): We leave the explanation empty here on # purpose. It could possibly have sensitive information # that should not be returned back to the user. See # bugs 868360 and 874472 # NOTE(eglynn): However, it would be over-conservative and # inconsistent with the EC2 API to hide every exception, # including those that are safe to expose, see bug 1021373 if safe: user_locale = req.best_match_language() inner_msg = translate(inner.message, user_locale) outer.explanation = '%s: %s' % (inner.__class__.__name__, inner_msg) return wsgi.Fault(outer) @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): try: return req.get_response(self.application) except Exception as ex: return self._error(ex, req) class LegacyV2CompatibleWrapper(base_wsgi.Middleware): def _filter_request_headers(self, req): """For keeping same behavior with v2 API, ignores microversions HTTP headers X-OpenStack-Nova-API-Version and OpenStack-API-Version in the request. """ if wsgi.API_VERSION_REQUEST_HEADER in req.headers: del req.headers[wsgi.API_VERSION_REQUEST_HEADER] if wsgi.LEGACY_API_VERSION_REQUEST_HEADER in req.headers: del req.headers[wsgi.LEGACY_API_VERSION_REQUEST_HEADER] return req def _filter_response_headers(self, response): """For keeping same behavior with v2 API, filter out microversions HTTP header and microversions field in header 'Vary'. """ if wsgi.API_VERSION_REQUEST_HEADER in response.headers: del response.headers[wsgi.API_VERSION_REQUEST_HEADER] if wsgi.LEGACY_API_VERSION_REQUEST_HEADER in response.headers: del response.headers[wsgi.LEGACY_API_VERSION_REQUEST_HEADER] if 'Vary' in response.headers: vary_headers = response.headers['Vary'].split(',') filtered_vary = [] for vary in vary_headers: vary = vary.strip() if (vary == wsgi.API_VERSION_REQUEST_HEADER or vary == wsgi.LEGACY_API_VERSION_REQUEST_HEADER): continue filtered_vary.append(vary) if filtered_vary: response.headers['Vary'] = ','.join(filtered_vary) else: del response.headers['Vary'] return response @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): req.set_legacy_v2() req = self._filter_request_headers(req) response = req.get_response(self.application) return self._filter_response_headers(response) class APIMapper(routes.Mapper): def routematch(self, url=None, environ=None): if url == "": result = self._match("", environ) return result[0], result[1] return routes.Mapper.routematch(self, url, environ) def connect(self, *args, **kargs): # NOTE(vish): Default the format part of a route to only accept json # and xml so it doesn't eat all characters after a '.' # in the url. kargs.setdefault('requirements', {}) if not kargs['requirements'].get('format'): kargs['requirements']['format'] = 'json|xml' return routes.Mapper.connect(self, *args, **kargs) class ProjectMapper(APIMapper): def _get_project_id_token(self): # NOTE(sdague): project_id parameter is only valid if its hex # or hex + dashes (note, integers are a subset of this). This # is required to hand our overlapping routes issues. return '{project_id:[0-9a-f-]+}' def resource(self, member_name, collection_name, **kwargs): project_id_token = self._get_project_id_token() if 'parent_resource' not in kwargs: kwargs['path_prefix'] = '%s/' % project_id_token else: parent_resource = kwargs['parent_resource'] p_collection = parent_resource['collection_name'] p_member = parent_resource['member_name'] kwargs['path_prefix'] = '%s/%s/:%s_id' % ( project_id_token, p_collection, p_member) routes.Mapper.resource( self, member_name, collection_name, **kwargs) # while we are in transition mode, create additional routes # for the resource that do not include project_id. if 'parent_resource' not in kwargs: del kwargs['path_prefix'] else: parent_resource = kwargs['parent_resource'] p_collection = parent_resource['collection_name'] p_member = parent_resource['member_name'] kwargs['path_prefix'] = '%s/:%s_id' % (p_collection, p_member) routes.Mapper.resource(self, member_name, collection_name, **kwargs) def create_route(self, path, method, controller, action): project_id_token = self._get_project_id_token() # while we transition away from project IDs in the API URIs, create # additional routes that include the project_id self.connect('/%s%s' % (project_id_token, path), conditions=dict(method=[method]), controller=controller, action=action) self.connect(path, conditions=dict(method=[method]), controller=controller, action=action) class PlainMapper(APIMapper): def resource(self, member_name, collection_name, **kwargs): if 'parent_resource' in kwargs: parent_resource = kwargs['parent_resource'] p_collection = parent_resource['collection_name'] p_member = parent_resource['member_name'] kwargs['path_prefix'] = '%s/:%s_id' % (p_collection, p_member) routes.Mapper.resource(self, member_name, collection_name, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/api_version_request.py0000664000175000017500000005245700000000000022742 0ustar00zuulzuul00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import typing as ty from nova import exception from nova.i18n import _ if ty.TYPE_CHECKING: from nova.api.openstack import wsgi # Define the minimum and maximum version of the API across all of the # REST API. The format of the version is: # X.Y where: # # - X will only be changed if a significant backwards incompatible API # change is made which affects the API as whole. That is, something # that is only very very rarely incremented. # # - Y when you make any change to the API. Note that this includes # semantic changes which may not affect the input or output formats or # even originate in the API code layer. We are not distinguishing # between backwards compatible and backwards incompatible changes in # the versioning system. It must be made clear in the documentation as # to what is a backwards compatible change and what is a backwards # incompatible one. # # You must update the API version history string below with a one or # two line description as well as update rest_api_version_history.rst REST_API_VERSION_HISTORY = """REST API Version History: * 2.1 - Initial version. Equivalent to v2.0 code * 2.2 - Adds (keypair) type parameter for os-keypairs plugin Fixes success status code for create/delete a keypair method * 2.3 - Exposes additional os-extended-server-attributes Exposes delete_on_termination for os-extended-volumes * 2.4 - Exposes reserved field in os-fixed-ips. * 2.5 - Allow server search option ip6 for non-admin * 2.6 - Consolidate the APIs for getting remote consoles * 2.7 - Check flavor type before add tenant access. * 2.8 - Add new protocol for VM console (mks) * 2.9 - Exposes lock information in server details. * 2.10 - Allow admins to query, create and delete keypairs owned by any user. * 2.11 - Exposes forced_down attribute for os-services * 2.12 - Exposes VIF net_id in os-virtual-interfaces * 2.13 - Add project id and user id information for os-server-groups API * 2.14 - Remove onSharedStorage from evacuate request body and remove adminPass from the response body * 2.15 - Add soft-affinity and soft-anti-affinity policies * 2.16 - Exposes host_status for servers/detail and servers/{server_id} * 2.17 - Add trigger_crash_dump to server actions * 2.18 - Makes project_id optional in v2.1 * 2.19 - Allow user to set and get the server description * 2.20 - Add attach and detach volume operations for instances in shelved and shelved_offloaded state * 2.21 - Make os-instance-actions read deleted instances * 2.22 - Add API to force live migration to complete * 2.23 - Add index/show API for server migrations. Also add migration_type for /os-migrations and add ref link for it when the migration is an in progress live migration. * 2.24 - Add API to cancel a running live migration * 2.25 - Make block_migration support 'auto' and remove disk_over_commit for os-migrateLive. * 2.26 - Adds support of server tags * 2.27 - Adds support for new-style microversion headers while keeping support for the original style. * 2.28 - Changes compute_node.cpu_info from string to object * 2.29 - Add a force flag in evacuate request body and change the behaviour for the host flag by calling the scheduler. * 2.30 - Add a force flag in live-migrate request body and change the behaviour for the host flag by calling the scheduler. * 2.31 - Fix os-console-auth-tokens to work for all console types. * 2.32 - Add tag to networks and block_device_mapping_v2 in server boot request body. * 2.33 - Add pagination support for hypervisors. * 2.34 - Checks before live-migration are made in asynchronous way. os-Migratelive Action does not throw badRequest in case of pre-checks failure. Verification result is available over instance-actions. * 2.35 - Adds keypairs pagination support. * 2.36 - Deprecates all the API which proxy to another service and fping API. * 2.37 - Adds support for auto-allocating networking, otherwise known as "Get me a Network". Also enforces server.networks.uuid to be in UUID format. * 2.38 - Add a condition to return HTTPBadRequest if invalid status is provided for listing servers. * 2.39 - Deprecates image-metadata proxy API * 2.40 - Adds simple tenant usage pagination support. * 2.41 - Return uuid attribute for aggregates. * 2.42 - In the context of device tagging at instance boot time, re-introduce the tag attribute that, due to bugs, was lost starting with version 2.33 for block devices and starting with version 2.37 for network interfaces. * 2.43 - Deprecate os-hosts API * 2.44 - The servers action addFixedIp, removeFixedIp, addFloatingIp, removeFloatingIp and os-virtual-interfaces APIs are deprecated. * 2.45 - The createImage and createBackup APIs no longer return a Location header in the response for the snapshot image, they now return a json dict in the response body with an image_id key and uuid value. * 2.46 - Return ``X-OpenStack-Request-ID`` header on requests. * 2.47 - When displaying server details, display the flavor as a dict rather than a link. If the user is prevented from retrieving the flavor extra-specs by policy, simply omit the field from the output. * 2.48 - Standardize VM diagnostics info. * 2.49 - Support tagged attachment of network interfaces and block devices. * 2.50 - Exposes ``server_groups`` and ``server_group_members`` keys in GET & PUT ``os-quota-class-sets`` APIs response. Also filter out Network related quotas from ``os-quota-class-sets`` API * 2.51 - Adds new event name to external-events (volume-extended). Also, non-admins can see instance action event details except for the traceback field. * 2.52 - Adds support for applying tags when creating a server. * 2.53 - Service and compute node (hypervisor) database ids are hidden. The os-services and os-hypervisors APIs now return a uuid in the id field, and takes a uuid in requests. PUT and GET requests and responses are also changed. * 2.54 - Enable reset key pair while rebuilding instance. * 2.55 - Added flavor.description to GET/POST/PUT flavors APIs. * 2.56 - Add a host parameter in migrate request body in order to enable users to specify a target host in cold migration. The target host is checked by the scheduler. * 2.57 - Deprecated personality files from POST /servers and the rebuild server action APIs. Added the ability to pass new user_data to the rebuild server action API. Personality / file injection related limits and quota resources are also removed. * 2.58 - Add pagination support and changes-since filter for os-instance-actions API. * 2.59 - Add pagination support and changes-since filter for os-migrations API. And the os-migrations API now returns both the id and the uuid in response. * 2.60 - Add support for attaching a single volume to multiple instances. * 2.61 - Exposes flavor extra_specs in the flavor representation. Flavor extra_specs will be included in Response body of GET, POST, PUT /flavors APIs. * 2.62 - Add ``host`` and ``hostId`` fields to instance action detail API responses. * 2.63 - Add support for applying trusted certificates when creating or rebuilding a server. * 2.64 - Add support for the "max_server_per_host" policy rule for ``anti-affinity`` server group policy, the ``policies`` and ``metadata`` fields are removed and the ``policy`` (required) and ``rules`` (optional) fields are added in response body of GET, POST /os-server-groups APIs and GET /os-server-groups/{group_id} API. * 2.65 - Add support for abort live migrations in ``queued`` and ``preparing`` status. * 2.66 - Add ``changes-before`` to support users to specify the ``updated_at`` time to filter nova resources, the resources include the servers API, os-instance-action API and os-migrations API. * 2.67 - Adds the optional ``volume_type`` field to the ``block_device_mapping_v2`` parameter when creating a server. * 2.68 - Remove support for forced live migration and evacuate server actions. * 2.69 - Add support for returning minimal constructs for ``GET /servers``, ``GET /servers/detail``, ``GET /servers/{server_id}`` and ``GET /os-services`` when there is a transient unavailability condition in the deployment like an infrastructure failure. * 2.70 - Exposes virtual device tags in the response of the ``os-volume_attachments`` and ``os-interface`` APIs. * 2.71 - Adds the ``server_groups`` field to ``GET /servers/{id}``, ``PUT /servers/{server_id}`` and ``POST /servers/{server_id}/action`` (rebuild) responses. * 2.72 - Add support for neutron ports with resource request during server create. Server move operations are not yet supported for servers with such ports. * 2.73 - Adds support for specifying a reason when locking the server and exposes this via the response from ``GET /servers/detail``, ``GET /servers/{server_id}``, ``PUT servers/{server_id}`` and ``POST /servers/{server_id}/action`` where the action is rebuild. It also supports ``locked`` as a filter/sort parameter for ``GET /servers/detail`` and ``GET /servers``. * 2.74 - Add support for specifying ``host`` and/or ``hypervisor_hostname`` in request body to ``POST /servers``. Allow users to specify which host/node they want their servers to land on and still be validated by the scheduler. * 2.75 - Multiple API cleanup listed below: - 400 for unknown param for query param and for request body. - Making server representation always consistent among GET, PUT and Rebuild serevr APIs response. - Change the default return value of swap field from the empty string to 0 (integer) in flavor APIs. - Return ``servers`` field always in the response of GET hypervisors API even there are no servers on hypervisor. * 2.76 - Adds ``power-update`` event to ``os-server-external-events`` API. The changes to the power state of an instance caused by this event can be viewed through ``GET /servers/{server_id}/os-instance-actions`` and ``GET /servers/{server_id}/os-instance-actions/{request_id}``. * 2.77 - Add support for specifying ``availability_zone`` to unshelve of a shelved offload server. * 2.78 - Adds new API ``GET /servers/{server_id}/topology`` which shows NUMA topology of a given server. * 2.79 - Adds support for specifying ``delete_on_termination`` field in the request body to ``POST /servers/{server_id}/os-volume_attachments`` and exposes this via the response from ``POST /servers/{server_id}/os-volume_attachments``, ``GET /servers/{server_id}/os-volume_attachments`` and ``GET /servers/{server_id}/os-volume_attachments/{volume_id}``. * 2.80 - Adds support for optional query parameters ``user_id`` and ``project_id`` to the ``GET /os-migrations`` API and exposes ``user_id`` and ``project_id`` via the response from ``GET /os-migrations``, ``GET /servers/{server_id}/migrations``, and ``GET /servers/{server_id}/migrations/{migration_id}``. * 2.81 - Adds support for image cache management by aggregate by adding ``POST /os-aggregates/{aggregate_id}/images``. * 2.82 - Adds ``accelerator-request-bound`` event to ``os-server-external-events`` API. This event is sent by Cyborg to indicate completion of ARQ binding. The ARQs can be obtained from Cyborg with ``GET /v2/accelerator_requests?instance={uuid}`` * 2.83 - Allow more filter parameters for ``GET /servers/detail`` and ``GET /servers`` for non-admin. * 2.84 - Adds ``details`` field to instance action events. * 2.85 - Add support for ``PUT /servers/{server_id}/os-volume_attachments/{volume_id}`` which supports specifying the ``delete_on_termination`` field in the request body to change the attached volume's flag. * 2.86 - Add support for validation of known extra specs to the ``POST /flavors/{flavor_id}/os-extra_specs`` and ``PUT /flavors/{flavor_id}/os-extra_specs/{id}`` APIs. * 2.87 - Adds support for rescuing boot from volume instances when the compute host reports the COMPUTE_RESCUE_BFV capability trait. * 2.88 - Drop statistics-style fields from the ``/os-hypervisors/detail`` and ``/os-hypervisors/{hypervisor_id}`` APIs, and remove the ``/os-hypervisors/statistics`` and ``/os-hypervisors/{hypervisor_id}/uptime`` APIs entirely. * 2.89 - Add ``attachment_id``, ``bdm_uuid`` and remove ``id`` from the responses of ``GET /servers/{server_id}/os-volume_attachments`` and ``GET /servers/{server_id}/os-volume_attachments/{volume_id}`` * 2.90 - Add support for requesting a specific hostname when creating, updating or rebuilding an instance. The ``OS-EXT-SRV-ATTR:hostname`` attribute is now returned in various server responses regardless of policy configuration. * 2.91 - Add support to unshelve instance to a specific host and to pin/unpin AZ. * 2.92 - Drop generation of keypair, add keypair name validation on ``POST /os-keypairs`` and allow including @ and dot (.) characters in keypair name. * 2.93 - Add support for volume backed server rebuild. * 2.94 - Allow FQDN in server hostname. * 2.95 - Evacuate will now stop instance at destination. * 2.96 - Add support for returning pinned_availability_zone in ``server show``, ``server list --long``, ``server update``, and ``server rebuild`` responses. * 2.97 - Adds new API ``GET /servers/{server_id}/shares`` which shows shares attachments of a given server. ``GET /servers/{server_id}/shares/{share_id} which gives details about a share attachment. ``POST /servers/{server_id}/shares/{share_id} which create an attachment. ``DELETE /servers/{server_id}/shares/{share_id} which delete an attachment. * 2.98 - Add support for returning embedded image properties in ``server show``, ``server list --long``, `server update``, and in the ``server rebuild`` responses. * 2.99 - Add the spice-direct console type to the spice console protocol. * 2.100 - Add support for returning associated scheduler_hints in ``GET /servers/{server_id}``, ``GET /servers/detail``, ``PUT /servers/{server_id}`` and ``POST /server/{server_id}/action`` (rebuild) responses. """ # The minimum and maximum versions of the API supported # The default api version request is defined to be the # minimum version of the API supported. # Note(cyeoh): This only applies for the v2.1 API once microversions # support is fully merged. It does not affect the V2 API. _MIN_API_VERSION = '2.1' _MAX_API_VERSION = '2.100' DEFAULT_API_VERSION = _MIN_API_VERSION # Almost all proxy APIs which are related to network, images and baremetal # were deprecated from 2.36. MAX_PROXY_API_SUPPORT_VERSION = '2.35' MIN_WITHOUT_PROXY_API_SUPPORT_VERSION = '2.36' # Starting from microversion 2.39 also image-metadata proxy API is deprecated. MAX_IMAGE_META_PROXY_API_VERSION = '2.38' MIN_WITHOUT_IMAGE_META_PROXY_API_VERSION = '2.39' # NOTE(cyeoh): min and max versions declared as functions so we can # mock them for unittests. Do not use the constants directly anywhere # else. def min_api_version(): return APIVersionRequest(_MIN_API_VERSION) def max_api_version(): return APIVersionRequest(_MAX_API_VERSION) def is_supported(req: 'wsgi.Request', version: str) -> bool: """Check if API request version satisfies version restrictions. :param req: request object :param version: minimal version of API needed for correct request processing :returns: True if request satisfies minimal and maximum API version requirements. False in other case. """ # TODO(stephenfin): This should be an error since it highlights bugs in # either our middleware or (more likely) an incomplete test if req.is_legacy_v2(): # legacy requests will not pass microversion info return False return req.api_version_request >= APIVersionRequest(version) class APIVersionRequest(object): """This class represents an API Version Request with convenience methods for manipulation and comparison of version numbers that we need to do to implement microversions. """ def __init__(self, version_string=None): """Create an API version request object. :param version_string: String representation of APIVersionRequest. Correct format is 'X.Y', where 'X' and 'Y' are int values. None value should be used to create Null APIVersionRequest, which is equal to 0.0 """ self.ver_major = 0 self.ver_minor = 0 if version_string is not None: match = re.match(r"^([1-9]\d*)\.([1-9]\d*|0)$", version_string) if match: self.ver_major = int(match.group(1)) self.ver_minor = int(match.group(2)) else: raise exception.InvalidAPIVersionString(version=version_string) def __str__(self): """Debug/Logging representation of object.""" return ("API Version Request Major: %s, Minor: %s" % (self.ver_major, self.ver_minor)) def is_null(self): return self.ver_major == 0 and self.ver_minor == 0 def _format_type_error(self, other): return TypeError(_("'%(other)s' should be an instance of '%(cls)s'") % {"other": other, "cls": self.__class__}) def __lt__(self, other): if not isinstance(other, APIVersionRequest): raise self._format_type_error(other) return ((self.ver_major, self.ver_minor) < (other.ver_major, other.ver_minor)) def __eq__(self, other): if not isinstance(other, APIVersionRequest): raise self._format_type_error(other) return ((self.ver_major, self.ver_minor) == (other.ver_major, other.ver_minor)) def __gt__(self, other): if not isinstance(other, APIVersionRequest): raise self._format_type_error(other) return ((self.ver_major, self.ver_minor) > (other.ver_major, other.ver_minor)) def __le__(self, other): return self < other or self == other def __ne__(self, other): return not self.__eq__(other) def __ge__(self, other): return self > other or self == other def matches(self, min_version, max_version): """Returns whether the version object represents a version greater than or equal to the minimum version and less than or equal to the maximum version. @param min_version: Minimum acceptable version. @param max_version: Maximum acceptable version. @returns: boolean If min_version is null then there is no minimum limit. If max_version is null then there is no maximum limit. If self is null then raise ValueError """ if max_version.is_null() and min_version.is_null(): return True elif self.is_null(): raise ValueError elif max_version.is_null(): return min_version <= self elif min_version.is_null(): return self <= max_version else: return min_version <= self <= max_version def get_string(self): """Converts object to string representation which if used to create an APIVersionRequest object results in the same version request. """ if self.is_null(): raise ValueError return "%s.%s" % (self.ver_major, self.ver_minor) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/common.py0000664000175000017500000005035100000000000020133 0ustar00zuulzuul00000000000000# Copyright 2010 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import itertools import re from urllib import parse as urlparse from oslo_log import log as logging from oslo_utils import strutils import webob from webob import exc from nova.api.openstack import api_version_request from nova.compute import task_states from nova.compute import vm_states import nova.conf from nova import exception from nova.i18n import _ from nova import objects from nova import quota from nova import utils CONF = nova.conf.CONF LOG = logging.getLogger(__name__) QUOTAS = quota.QUOTAS POWER_ON = 'POWER_ON' POWER_OFF = 'POWER_OFF' _STATE_MAP = { vm_states.ACTIVE: { 'default': 'ACTIVE', task_states.REBOOTING: 'REBOOT', task_states.REBOOT_PENDING: 'REBOOT', task_states.REBOOT_STARTED: 'REBOOT', task_states.REBOOTING_HARD: 'HARD_REBOOT', task_states.REBOOT_PENDING_HARD: 'HARD_REBOOT', task_states.REBOOT_STARTED_HARD: 'HARD_REBOOT', task_states.UPDATING_PASSWORD: 'PASSWORD', task_states.REBUILDING: 'REBUILD', task_states.REBUILD_BLOCK_DEVICE_MAPPING: 'REBUILD', task_states.REBUILD_SPAWNING: 'REBUILD', task_states.MIGRATING: 'MIGRATING', task_states.RESIZE_PREP: 'RESIZE', task_states.RESIZE_MIGRATING: 'RESIZE', task_states.RESIZE_MIGRATED: 'RESIZE', task_states.RESIZE_FINISH: 'RESIZE', }, vm_states.BUILDING: { 'default': 'BUILD', }, vm_states.STOPPED: { 'default': 'SHUTOFF', task_states.RESIZE_PREP: 'RESIZE', task_states.RESIZE_MIGRATING: 'RESIZE', task_states.RESIZE_MIGRATED: 'RESIZE', task_states.RESIZE_FINISH: 'RESIZE', task_states.REBUILDING: 'REBUILD', task_states.REBUILD_BLOCK_DEVICE_MAPPING: 'REBUILD', task_states.REBUILD_SPAWNING: 'REBUILD', }, vm_states.RESIZED: { 'default': 'VERIFY_RESIZE', # Note(maoy): the OS API spec 1.1 doesn't have CONFIRMING_RESIZE # state so we comment that out for future reference only. # task_states.RESIZE_CONFIRMING: 'CONFIRMING_RESIZE', task_states.RESIZE_REVERTING: 'REVERT_RESIZE', }, vm_states.PAUSED: { 'default': 'PAUSED', task_states.MIGRATING: 'MIGRATING', }, vm_states.SUSPENDED: { 'default': 'SUSPENDED', }, vm_states.RESCUED: { 'default': 'RESCUE', }, vm_states.ERROR: { 'default': 'ERROR', task_states.REBUILDING: 'REBUILD', task_states.REBUILD_BLOCK_DEVICE_MAPPING: 'REBUILD', task_states.REBUILD_SPAWNING: 'REBUILD', }, vm_states.DELETED: { 'default': 'DELETED', }, vm_states.SOFT_DELETED: { 'default': 'SOFT_DELETED', }, vm_states.SHELVED: { 'default': 'SHELVED', }, vm_states.SHELVED_OFFLOADED: { 'default': 'SHELVED_OFFLOADED', }, } def status_from_state(vm_state, task_state='default'): """Given vm_state and task_state, return a status string.""" task_map = _STATE_MAP.get(vm_state, dict(default='UNKNOWN')) status = task_map.get(task_state, task_map['default']) if status == "UNKNOWN": LOG.error("status is UNKNOWN from vm_state=%(vm_state)s " "task_state=%(task_state)s. Bad upgrade or db " "corrupted?", {'vm_state': vm_state, 'task_state': task_state}) return status def task_and_vm_state_from_status(statuses): """Map the server's multiple status strings to list of vm states and list of task states. """ vm_states = set() task_states = set() lower_statuses = [status.lower() for status in statuses] for state, task_map in _STATE_MAP.items(): for task_state, mapped_state in task_map.items(): status_string = mapped_state if status_string.lower() in lower_statuses: vm_states.add(state) task_states.add(task_state) # Add sort to avoid different order on set in Python 3 return sorted(vm_states), sorted(task_states) def get_sort_params(input_params, default_key='created_at', default_dir='desc'): """Retrieves sort keys/directions parameters. Processes the parameters to create a list of sort keys and sort directions that correspond to the 'sort_key' and 'sort_dir' parameter values. These sorting parameters can be specified multiple times in order to generate the list of sort keys and directions. The input parameters are not modified. :param input_params: webob.multidict of request parameters (from nova.api.wsgi.Request.params) :param default_key: default sort key value, added to the list if no 'sort_key' parameters are supplied :param default_dir: default sort dir value, added to the list if no 'sort_dir' parameters are supplied :returns: list of sort keys, list of sort dirs """ params = input_params.copy() sort_keys = [] sort_dirs = [] while 'sort_key' in params: sort_keys.append(params.pop('sort_key').strip()) while 'sort_dir' in params: sort_dirs.append(params.pop('sort_dir').strip()) if len(sort_keys) == 0 and default_key: sort_keys.append(default_key) if len(sort_dirs) == 0 and default_dir: sort_dirs.append(default_dir) return sort_keys, sort_dirs def get_pagination_params(request): """Return marker, limit tuple from request. :param request: `wsgi.Request` possibly containing 'marker' and 'limit' GET variables. 'marker' is the id of the last element the client has seen, and 'limit' is the maximum number of items to return. If 'limit' is not specified, 0, or > max_limit, we default to max_limit. Negative values for either marker or limit will cause exc.HTTPBadRequest() exceptions to be raised. """ params = {} if 'limit' in request.GET: params['limit'] = _get_int_param(request, 'limit') if 'page_size' in request.GET: params['page_size'] = _get_int_param(request, 'page_size') if 'marker' in request.GET: params['marker'] = _get_marker_param(request) if 'offset' in request.GET: params['offset'] = _get_int_param(request, 'offset') return params def _get_int_param(request, param): """Extract integer param from request or fail.""" try: int_param = utils.validate_integer(request.GET[param], param, min_value=0) except exception.InvalidInput as e: raise webob.exc.HTTPBadRequest(explanation=e.format_message()) return int_param def _get_marker_param(request): """Extract marker id from request or fail.""" return request.GET['marker'] def limited(items, request): """Return a slice of items according to requested offset and limit. :param items: A sliceable entity :param request: ``wsgi.Request`` possibly containing 'offset' and 'limit' GET variables. 'offset' is where to start in the list, and 'limit' is the maximum number of items to return. If 'limit' is not specified, 0, or > max_limit, we default to max_limit. Negative values for either offset or limit will cause exc.HTTPBadRequest() exceptions to be raised. """ params = get_pagination_params(request) offset = params.get('offset', 0) limit = CONF.api.max_limit limit = min(limit, params.get('limit') or limit) return items[offset:(offset + limit)] def get_limit_and_marker(request): """Get limited parameter from request.""" params = get_pagination_params(request) limit = CONF.api.max_limit limit = min(limit, params.get('limit', limit)) marker = params.get('marker', None) return limit, marker def get_id_from_href(href): """Return the id or uuid portion of a url. Given: 'http://www.foo.com/bar/123?q=4' Returns: '123' Given: 'http://www.foo.com/bar/abc123?q=4' Returns: 'abc123' """ return urlparse.urlsplit("%s" % href).path.split('/')[-1] def remove_trailing_version_from_href(href): """Removes the api version from the href. Given: 'http://www.nova.com/compute/v1.1' Returns: 'http://www.nova.com/compute' Given: 'http://www.nova.com/v1.1' Returns: 'http://www.nova.com' """ parsed_url = urlparse.urlsplit(href) url_parts = parsed_url.path.rsplit('/', 1) # NOTE: this should match vX.X or vX expression = re.compile(r'^v([0-9]+|[0-9]+\.[0-9]+)(/.*|$)') if not expression.match(url_parts.pop()): LOG.debug('href %s does not contain version', href) raise ValueError(_('href %s does not contain version') % href) new_path = url_join(*url_parts) parsed_url = list(parsed_url) parsed_url[2] = new_path return urlparse.urlunsplit(parsed_url) def check_img_metadata_properties_quota(context, metadata): if not metadata: return try: QUOTAS.limit_check(context, metadata_items=len(metadata)) except exception.OverQuota: expl = _("Image metadata limit exceeded") raise webob.exc.HTTPForbidden(explanation=expl) def get_networks_for_instance_from_nw_info(nw_info): networks = collections.OrderedDict() for vif in nw_info: ips = vif.fixed_ips() floaters = vif.floating_ips() label = vif['network']['label'] if label not in networks: networks[label] = {'ips': [], 'floating_ips': []} for ip in itertools.chain(ips, floaters): ip['mac_address'] = vif['address'] networks[label]['ips'].extend(ips) networks[label]['floating_ips'].extend(floaters) return networks def get_networks_for_instance(context, instance): """Returns a prepared nw_info list for passing into the view builders We end up with a data structure like:: {'public': {'ips': [{'address': '10.0.0.1', 'version': 4, 'mac_address': 'aa:aa:aa:aa:aa:aa'}, {'address': '2001::1', 'version': 6, 'mac_address': 'aa:aa:aa:aa:aa:aa'}], 'floating_ips': [{'address': '172.16.0.1', 'version': 4, 'mac_address': 'aa:aa:aa:aa:aa:aa'}, {'address': '172.16.2.1', 'version': 4, 'mac_address': 'aa:aa:aa:aa:aa:aa'}]}, ...} """ nw_info = instance.get_network_info() return get_networks_for_instance_from_nw_info(nw_info) def raise_http_conflict_for_instance_invalid_state(exc, action, server_id): """Raises a webob.exc.HTTPConflict instance containing a message appropriate to return via the API based on the original InstanceInvalidState exception. """ attr = exc.kwargs.get('attr') state = exc.kwargs.get('state') if attr is not None and state is not None: msg = _("Cannot '%(action)s' instance %(server_id)s while it is in " "%(attr)s %(state)s") % {'action': action, 'attr': attr, 'state': state, 'server_id': server_id} else: # At least give some meaningful message msg = _("Instance %(server_id)s is in an invalid state for " "'%(action)s'") % {'action': action, 'server_id': server_id} raise webob.exc.HTTPConflict(explanation=msg) def url_join(*parts): """Convenience method for joining parts of a URL Any leading and trailing '/' characters are removed, and the parts joined together with '/' as a separator. If last element of 'parts' is an empty string, the returned URL will have a trailing slash. """ parts = parts or [""] clean_parts = [part.strip("/") for part in parts if part] if not parts[-1]: # Empty last element should add a trailing slash clean_parts.append("") return "/".join(clean_parts) class ViewBuilder(object): """Model API responses as dictionaries.""" def _get_project_id(self, request): """Get project id from request url if present or empty string otherwise """ project_id = request.environ["nova.context"].project_id if project_id and project_id in request.url: return project_id return '' def _get_links(self, request, identifier, collection_name): return [{ "rel": "self", "href": self._get_href_link(request, identifier, collection_name), }, { "rel": "bookmark", "href": self._get_bookmark_link(request, identifier, collection_name), }] def _get_next_link(self, request, identifier, collection_name): """Return href string with proper limit and marker params.""" params = collections.OrderedDict(sorted(request.params.items())) params["marker"] = identifier prefix = self._update_compute_link_prefix(request.application_url) url = url_join(prefix, self._get_project_id(request), collection_name) return "%s?%s" % (url, urlparse.urlencode(params)) def _get_href_link(self, request, identifier, collection_name): """Return an href string pointing to this object.""" prefix = self._update_compute_link_prefix(request.application_url) return url_join(prefix, self._get_project_id(request), collection_name, str(identifier)) def _get_bookmark_link(self, request, identifier, collection_name): """Create a URL that refers to a specific resource.""" base_url = remove_trailing_version_from_href(request.application_url) base_url = self._update_compute_link_prefix(base_url) return url_join(base_url, self._get_project_id(request), collection_name, str(identifier)) def _get_collection_links(self, request, items, collection_name, id_key="uuid"): """Retrieve 'next' link, if applicable. This is included if: 1) 'limit' param is specified and equals the number of items. 2) 'limit' param is specified but it exceeds CONF.api.max_limit, in this case the number of items is CONF.api.max_limit. 3) 'limit' param is NOT specified but the number of items is CONF.api.max_limit. """ links = [] max_items = min( int(request.params.get("limit", CONF.api.max_limit)), CONF.api.max_limit) if max_items and max_items == len(items): last_item = items[-1] if id_key in last_item: last_item_id = last_item[id_key] elif 'id' in last_item: last_item_id = last_item["id"] else: last_item_id = last_item["flavorid"] links.append({ "rel": "next", "href": self._get_next_link(request, last_item_id, collection_name), }) return links def _update_link_prefix(self, orig_url, prefix): if not prefix: return orig_url url_parts = list(urlparse.urlsplit(orig_url)) prefix_parts = list(urlparse.urlsplit(prefix)) url_parts[0:2] = prefix_parts[0:2] url_parts[2] = prefix_parts[2] + url_parts[2] return urlparse.urlunsplit(url_parts).rstrip('/') def _update_glance_link_prefix(self, orig_url): return self._update_link_prefix(orig_url, CONF.api.glance_link_prefix) def _update_compute_link_prefix(self, orig_url): return self._update_link_prefix(orig_url, CONF.api.compute_link_prefix) def get_instance(compute_api, context, instance_id, expected_attrs=None, cell_down_support=False): """Fetch an instance from the compute API, handling error checking.""" try: return compute_api.get(context, instance_id, expected_attrs=expected_attrs, cell_down_support=cell_down_support) except exception.InstanceNotFound as e: raise exc.HTTPNotFound(explanation=e.format_message()) def normalize_name(name): # NOTE(alex_xu): This method is used by v2.1 legacy v2 compat mode. # In the legacy v2 API, some of APIs strip the spaces and some of APIs not. # The v2.1 disallow leading/trailing, for compatible v2 API and consistent, # we enable leading/trailing spaces and strip spaces in legacy v2 compat # mode. Althrough in legacy v2 API there are some APIs didn't strip spaces, # but actually leading/trailing spaces(that means user depend on leading/ # trailing spaces distinguish different instance) is pointless usecase. return name.strip() def raise_feature_not_supported(msg=None): if msg is None: msg = _("The requested functionality is not supported.") raise webob.exc.HTTPNotImplemented(explanation=msg) def get_flavor(context, flavor_id): try: return objects.Flavor.get_by_flavor_id(context, flavor_id) except exception.FlavorNotFound as error: raise exc.HTTPNotFound(explanation=error.format_message()) def is_all_tenants(search_opts): """Checks to see if the all_tenants flag is in search_opts :param dict search_opts: The search options for a request :returns: boolean indicating if all_tenants are being requested or not """ all_tenants = search_opts.get('all_tenants') if all_tenants: try: all_tenants = strutils.bool_from_string(all_tenants, True) except ValueError as err: raise exception.InvalidInput(str(err)) else: # The empty string is considered enabling all_tenants all_tenants = 'all_tenants' in search_opts return all_tenants def is_locked(search_opts): """Converts the value of the locked parameter to a boolean. Note that this function will be called only if locked exists in search_opts. :param dict search_opts: The search options for a request :returns: boolean indicating if locked is being requested or not """ locked = search_opts.get('locked') try: locked = strutils.bool_from_string(locked, strict=True) except ValueError as err: raise exception.InvalidInput(str(err)) return locked def supports_multiattach_volume(req): """Check to see if the requested API version is high enough for multiattach Microversion 2.60 adds support for booting from a multiattach volume. The actual validation for a multiattach volume is done in the compute API code, this is just checking the version so we can tell the API code if the request version is high enough to even support it. :param req: The incoming API request :returns: True if the requested API microversion is high enough for volume multiattach support, False otherwise. """ return api_version_request.is_supported(req, '2.60') def supports_port_resource_request(req): """Check to see if the requested API version is high enough for resource request :param req: The incoming API request :returns: True if the requested API microversion is high enough for port resource request support, False otherwise. """ return api_version_request.is_supported(req, '2.72') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.3096082 nova-32.0.0/nova/api/openstack/compute/0000775000175000017500000000000000000000000017741 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/__init__.py0000664000175000017500000000206700000000000022057 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # The APIRouterV21 moves down to the 'nova.api.openstack.compute.routes' for # circle reference problem. Import the APIRouterV21 is for the api-paste.ini # works correctly without modification. We still looking for a chance to move # the APIRouterV21 back to here after cleanups. from nova.api.openstack.compute.routes import APIRouterV21 # noqa ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/admin_actions.py0000664000175000017500000000703700000000000023132 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from webob import exc from nova.api.openstack import common from nova.api.openstack.compute.schemas import admin_actions as schema from nova.api.openstack import wsgi from nova.api import validation from nova.compute import api as compute from nova.compute import instance_actions from nova.compute import vm_states from nova import exception from nova import objects from nova.policies import admin_actions as aa_policies _removal_reason = """\ This action only works with the Xen virt driver, which was deprecated in the 20.0.0 (Train) release. It was removed in the 23.0.0 (Wallaby) release. """ @validation.validated class AdminActionsController(wsgi.Controller): def __init__(self): super(AdminActionsController, self).__init__() self.compute_api = compute.API() @wsgi.expected_errors(410) @wsgi.action('resetNetwork') @wsgi.removed('23.0.0', _removal_reason) @validation.schema(schema.reset_network) @validation.response_body_schema(schema.reset_network_response) def _reset_network(self, req, id, body): """(Removed) Permit admins to reset networking on a server.""" raise exc.HTTPGone() @wsgi.response(202) @wsgi.expected_errors((404, 409)) @wsgi.action('injectNetworkInfo') @validation.schema(schema.inject_network_info) @validation.response_body_schema(schema.inject_network_info_response) def _inject_network_info(self, req, id, body): """Permit admins to inject network info into a server.""" context = req.environ['nova.context'] instance = common.get_instance(self.compute_api, context, id) context.can(aa_policies.POLICY_ROOT % 'inject_network_info', target={'project_id': instance.project_id}) try: self.compute_api.inject_network_info(context, instance) except exception.InstanceIsLocked as e: raise exc.HTTPConflict(explanation=e.format_message()) @wsgi.response(202) @wsgi.expected_errors(404) @wsgi.action('os-resetState') @validation.schema(schema.reset_state) @validation.response_body_schema(schema.reset_state_response) def _reset_state(self, req, id, body): """Permit admins to reset the state of a server.""" context = req.environ["nova.context"] instance = common.get_instance(self.compute_api, context, id) context.can(aa_policies.POLICY_ROOT % 'reset_state', target={'project_id': instance.project_id}) # Log os-resetState as an instance action instance_action = objects.InstanceAction.action_start( context, instance.uuid, instance_actions.RESET_STATE) # Identify the desired state from the body state = { 'active': vm_states.ACTIVE, 'error': vm_states.ERROR, }[body["os-resetState"]["state"]] instance.vm_state = state instance.task_state = None instance.save(admin_state_reset=True) instance_action.finish() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/admin_password.py0000664000175000017500000000477500000000000023342 0ustar00zuulzuul00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from webob import exc from nova.api.openstack import common from nova.api.openstack.compute.schemas import admin_password as schema from nova.api.openstack import wsgi from nova.api import validation from nova.compute import api as compute from nova import exception from nova.i18n import _ from nova.policies import admin_password as ap_policies @validation.validated class AdminPasswordController(wsgi.Controller): def __init__(self): super(AdminPasswordController, self).__init__() self.compute_api = compute.API() # TODO(eliqiao): Here should be 204(No content) instead of 202 by v2.1+ # microversions because the password has been changed when returning # a response. @wsgi.action('changePassword') @wsgi.response(202) @wsgi.expected_errors((404, 409, 501)) @validation.schema(schema.change_password) @validation.response_body_schema(schema.change_password_response) def change_password(self, req, id, body): context = req.environ['nova.context'] instance = common.get_instance(self.compute_api, context, id) context.can(ap_policies.BASE_POLICY_NAME, target={'user_id': instance.user_id, 'project_id': instance.project_id}) password = body['changePassword']['adminPass'] try: self.compute_api.set_admin_password(context, instance, password) except (exception.InstancePasswordSetFailed, exception.SetAdminPasswdNotSupported, exception.InstanceAgentNotEnabled) as e: raise exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as e: raise common.raise_http_conflict_for_instance_invalid_state( e, 'changePassword', id) except NotImplementedError: msg = _("Unable to set password on instance") common.raise_feature_not_supported(msg=msg) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/agents.py0000664000175000017500000000400300000000000021571 0ustar00zuulzuul00000000000000# Copyright 2012 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from webob import exc from nova.api.openstack.compute.schemas import agents as schema from nova.api.openstack import wsgi from nova.api import validation _removal_reason = """\ This API only works with the Xen virt driver, which was deprecated in the 20.0.0 (Train) release. It was removed in the 23.0.0 (Wallaby) release. """ @validation.validated class AgentController(wsgi.Controller): """(Removed) Controller for agent resources. This was removed during the Victoria release along with the XenAPI driver. """ @wsgi.expected_errors(410) @wsgi.removed('22.0.0', _removal_reason) @validation.query_schema(schema.index_query) @validation.response_body_schema(schema.index_response) def index(self, req): raise exc.HTTPGone() @wsgi.expected_errors(410) @wsgi.removed('22.0.0', _removal_reason) @validation.schema(schema.update) @validation.response_body_schema(schema.update_response) def update(self, req, id, body): raise exc.HTTPGone() @wsgi.expected_errors(410) @wsgi.removed('22.0.0', _removal_reason) @validation.response_body_schema(schema.delete_response) def delete(self, req, id): raise exc.HTTPGone() @wsgi.expected_errors(410) @wsgi.removed('22.0.0', _removal_reason) @validation.schema(schema.create) @validation.response_body_schema(schema.create_response) def create(self, req, body): raise exc.HTTPGone() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/aggregates.py0000664000175000017500000003334300000000000022432 0ustar00zuulzuul00000000000000# Copyright (c) 2012 Citrix Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The Aggregate admin API extension.""" import datetime from oslo_log import log as logging from webob import exc from nova.api.openstack import api_version_request from nova.api.openstack import common from nova.api.openstack.compute.schemas import aggregate_images from nova.api.openstack.compute.schemas import aggregates as schema from nova.api.openstack import wsgi from nova.api import validation from nova.compute import api as compute from nova.conductor import api as conductor from nova import exception from nova.i18n import _ from nova.policies import aggregates as aggr_policies from nova import utils LOG = logging.getLogger(__name__) def _get_context(req): return req.environ['nova.context'] @validation.validated class AggregateController(wsgi.Controller): """The Host Aggregates API controller for the OpenStack API.""" def __init__(self): super(AggregateController, self).__init__() self.api = compute.AggregateAPI() self.conductor_tasks = conductor.ComputeTaskAPI() @wsgi.expected_errors(()) @validation.query_schema(schema.index_query) @validation.response_body_schema(schema.index_response, '2.1', '2.40') @validation.response_body_schema(schema.index_response_v241, '2.41') def index(self, req): """Returns a list a host aggregate's id, name, availability_zone.""" context = _get_context(req) context.can(aggr_policies.POLICY_ROOT % 'index', target={}) aggregates = self.api.get_aggregate_list(context) return {'aggregates': [self._marshall_aggregate(req, a)['aggregate'] for a in aggregates]} # NOTE(gmann): Returns 200 for backwards compatibility but should be 201 # as this operation complete the creation of aggregates resource. @wsgi.expected_errors((400, 409)) @validation.schema(schema.create_v20, '2.0', '2.0') @validation.schema(schema.create, '2.1') @validation.response_body_schema(schema.create_response, '2.1', '2.40') @validation.response_body_schema(schema.create_response_v241, '2.41') def create(self, req, body): """Creates an aggregate, given its name and optional availability zone. """ context = _get_context(req) context.can(aggr_policies.POLICY_ROOT % 'create', target={}) host_aggregate = body["aggregate"] name = common.normalize_name(host_aggregate["name"]) avail_zone = host_aggregate.get("availability_zone") if avail_zone: avail_zone = common.normalize_name(avail_zone) try: aggregate = self.api.create_aggregate(context, name, avail_zone) except exception.AggregateNameExists as e: raise exc.HTTPConflict(explanation=e.format_message()) except exception.ObjectActionError: raise exc.HTTPConflict(explanation=_( 'Not all aggregates have been migrated to the API database')) except exception.InvalidAggregateAction as e: raise exc.HTTPBadRequest(explanation=e.format_message()) agg = self._marshall_aggregate(req, aggregate) # To maintain the same API result as before the changes for returning # nova objects were made. del agg['aggregate']['hosts'] del agg['aggregate']['metadata'] return agg @wsgi.expected_errors((400, 404)) @validation.query_schema(schema.show_query) @validation.response_body_schema(schema.show_response, '2.1', '2.40') @validation.response_body_schema(schema.show_response_v241, '2.41') def show(self, req, id): """Shows the details of an aggregate, hosts and metadata included.""" context = _get_context(req) context.can(aggr_policies.POLICY_ROOT % 'show', target={}) try: utils.validate_integer(id, 'id') except exception.InvalidInput as e: raise exc.HTTPBadRequest(explanation=e.format_message()) try: aggregate = self.api.get_aggregate(context, id) except exception.AggregateNotFound as e: raise exc.HTTPNotFound(explanation=e.format_message()) return self._marshall_aggregate(req, aggregate) @wsgi.expected_errors((400, 404, 409)) @validation.schema(schema.update_v20, '2.0', '2.0') @validation.schema(schema.update, '2.1') @validation.response_body_schema(schema.update_response, '2.1', '2.40') @validation.response_body_schema(schema.update_response_v241, '2.41') def update(self, req, id, body): """Updates the name and/or availability_zone of given aggregate.""" context = _get_context(req) context.can(aggr_policies.POLICY_ROOT % 'update', target={}) updates = body["aggregate"] if 'name' in updates: updates['name'] = common.normalize_name(updates['name']) try: utils.validate_integer(id, 'id') except exception.InvalidInput as e: raise exc.HTTPBadRequest(explanation=e.format_message()) try: aggregate = self.api.update_aggregate(context, id, updates) except exception.AggregateNameExists as e: raise exc.HTTPConflict(explanation=e.format_message()) except exception.AggregateNotFound as e: raise exc.HTTPNotFound(explanation=e.format_message()) except exception.InvalidAggregateAction as e: raise exc.HTTPBadRequest(explanation=e.format_message()) return self._marshall_aggregate(req, aggregate) # NOTE(gmann): Returns 200 for backwards compatibility but should be 204 # as this operation complete the deletion of aggregate resource and return # no response body. @wsgi.expected_errors((400, 404)) @validation.response_body_schema(schema.delete_response) def delete(self, req, id): """Removes an aggregate by id.""" context = _get_context(req) context.can(aggr_policies.POLICY_ROOT % 'delete', target={}) try: utils.validate_integer(id, 'id') except exception.InvalidInput as e: raise exc.HTTPBadRequest(explanation=e.format_message()) try: self.api.delete_aggregate(context, id) except exception.AggregateNotFound as e: raise exc.HTTPNotFound(explanation=e.format_message()) except exception.InvalidAggregateAction as e: raise exc.HTTPBadRequest(explanation=e.format_message()) # NOTE(gmann): Returns 200 for backwards compatibility but should be 202 # for representing async API as this API just accepts the request and # request hypervisor driver to complete the same in async mode. @wsgi.expected_errors((400, 404, 409)) @wsgi.action('add_host') @validation.schema(schema.add_host) @validation.response_body_schema(schema.add_host_response, '2.1', '2.40') # noqa: E501 @validation.response_body_schema(schema.add_host_response_v241, '2.41') def _add_host(self, req, id, body): """Adds a host to the specified aggregate.""" host = body['add_host']['host'] context = _get_context(req) context.can(aggr_policies.POLICY_ROOT % 'add_host', target={}) try: utils.validate_integer(id, 'id') except exception.InvalidInput as e: raise exc.HTTPBadRequest(explanation=e.format_message()) try: aggregate = self.api.add_host_to_aggregate(context, id, host) except (exception.AggregateNotFound, exception.HostMappingNotFound, exception.ComputeHostNotFound) as e: raise exc.HTTPNotFound(explanation=e.format_message()) except (exception.AggregateHostExists, exception.InvalidAggregateAction) as e: raise exc.HTTPConflict(explanation=e.format_message()) return self._marshall_aggregate(req, aggregate) # NOTE(gmann): Returns 200 for backwards compatibility but should be 202 # for representing async API as this API just accepts the request and # request hypervisor driver to complete the same in async mode. @wsgi.expected_errors((400, 404, 409)) @wsgi.action('remove_host') @validation.schema(schema.remove_host) @validation.response_body_schema(schema.remove_host_response, '2.1', '2.40') # noqa: E501 @validation.response_body_schema(schema.remove_host_response_v241, '2.41') def _remove_host(self, req, id, body): """Removes a host from the specified aggregate.""" host = body['remove_host']['host'] context = _get_context(req) context.can(aggr_policies.POLICY_ROOT % 'remove_host', target={}) try: utils.validate_integer(id, 'id') except exception.InvalidInput as e: raise exc.HTTPBadRequest(explanation=e.format_message()) try: aggregate = self.api.remove_host_from_aggregate(context, id, host) except (exception.AggregateNotFound, exception.AggregateHostNotFound, exception.ComputeHostNotFound) as e: LOG.error('Failed to remove host %s from aggregate %s. Error: %s', host, id, str(e)) msg = _('Cannot remove host %(host)s in aggregate %(id)s') % { 'host': host, 'id': id} raise exc.HTTPNotFound(explanation=msg) except (exception.InvalidAggregateAction, exception.ResourceProviderUpdateConflict) as e: LOG.error('Failed to remove host %s from aggregate %s. Error: %s', host, id, str(e)) raise exc.HTTPConflict(explanation=e.format_message()) return self._marshall_aggregate(req, aggregate) @wsgi.expected_errors((400, 404)) @wsgi.action('set_metadata') @validation.schema(schema.set_metadata) @validation.response_body_schema(schema.set_metadata_response, '2.1', '2.40') # noqa: E501 @validation.response_body_schema(schema.set_metadata_response_v241, '2.41') def _set_metadata(self, req, id, body): """Replaces the aggregate's existing metadata with new metadata.""" context = _get_context(req) context.can(aggr_policies.POLICY_ROOT % 'set_metadata', target={}) try: utils.validate_integer(id, 'id') except exception.InvalidInput as e: raise exc.HTTPBadRequest(explanation=e.format_message()) metadata = body["set_metadata"]["metadata"] try: aggregate = self.api.update_aggregate_metadata(context, id, metadata) except exception.AggregateNotFound as e: raise exc.HTTPNotFound(explanation=e.format_message()) except (exception.InvalidAggregateAction, exception.AggregateMetadataKeyExists) as e: raise exc.HTTPBadRequest(explanation=e.format_message()) return self._marshall_aggregate(req, aggregate) def _marshall_aggregate(self, req, aggregate): _aggregate = {} for key, value in self._build_aggregate_items(req, aggregate): # NOTE(danms): The original API specified non-TZ-aware timestamps if isinstance(value, datetime.datetime): value = value.replace(tzinfo=None) _aggregate[key] = value return {"aggregate": _aggregate} def _build_aggregate_items(self, req, aggregate): show_uuid = api_version_request.is_supported(req, "2.41") keys = aggregate.obj_fields # NOTE(rlrossit): Within the compute API, metadata will always be # set on the aggregate object (at a minimum to {}). Because of this, # we can freely use getattr() on keys in obj_extra_fields (in this # case it is only ['availability_zone']) without worrying about # lazy-loading an unset variable for key in keys: if ((aggregate.obj_attr_is_set(key) or key in aggregate.obj_extra_fields) and (show_uuid or key != 'uuid')): yield key, getattr(aggregate, key) @wsgi.api_version('2.81') @wsgi.response(202) @wsgi.expected_errors((400, 404)) @validation.schema(aggregate_images.aggregate_images) @validation.response_body_schema( aggregate_images.aggregate_images_response) def images(self, req, id, body): """Allows image cache management requests.""" context = _get_context(req) context.can(aggr_policies.NEW_POLICY_ROOT % 'images', target={}) try: utils.validate_integer(id, 'id') except exception.InvalidInput as e: raise exc.HTTPBadRequest(explanation=e.format_message()) image_ids = [] for image_req in body.get('cache'): image_ids.append(image_req['id']) if sorted(image_ids) != sorted(list(set(image_ids))): raise exc.HTTPBadRequest( explanation=_('Duplicate images in request')) try: aggregate = self.api.get_aggregate(context, id) except exception.AggregateNotFound as e: raise exc.HTTPNotFound(explanation=e.format_message()) try: self.conductor_tasks.cache_images(context, aggregate, image_ids) except exception.NovaException as e: raise exc.HTTPBadRequest(explanation=e.format_message()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/assisted_volume_snapshots.py0000664000175000017500000001021300000000000025620 0ustar00zuulzuul00000000000000# Copyright 2013 Red Hat, Inc. # Copyright 2014 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The Assisted volume snapshots extension.""" from oslo_serialization import jsonutils from webob import exc from nova.api.openstack.compute.schemas import assisted_volume_snapshots as schema # noqa: E501 from nova.api.openstack import wsgi from nova.api import validation from nova.compute import api as compute from nova import exception from nova.policies import assisted_volume_snapshots as avs_policies @validation.validated class AssistedVolumeSnapshotsController(wsgi.Controller): """The Assisted volume snapshots API controller for the OpenStack API.""" def __init__(self): super(AssistedVolumeSnapshotsController, self).__init__() self.compute_api = compute.API() @wsgi.expected_errors(400) @validation.schema(schema.create) @validation.response_body_schema(schema.create_response) def create(self, req, body): """Creates a new snapshot.""" context = req.environ['nova.context'] context.can(avs_policies.POLICY_ROOT % 'create', target={}) snapshot = body['snapshot'] create_info = snapshot['create_info'] volume_id = snapshot['volume_id'] try: return self.compute_api.volume_snapshot_create(context, volume_id, create_info) except (exception.VolumeBDMNotFound, exception.VolumeBDMIsMultiAttach, exception.InvalidVolume) as error: raise exc.HTTPBadRequest(explanation=error.format_message()) except (exception.InstanceInvalidState, exception.InstanceNotReady) as e: # TODO(mriedem) InstanceInvalidState and InstanceNotReady would # normally result in a 409 but that would require bumping the # microversion, which we should just do in a single microversion # across all APIs when we fix status code wrinkles. raise exc.HTTPBadRequest(explanation=e.format_message()) @wsgi.response(204) @wsgi.expected_errors((400, 404)) @validation.query_schema(schema.delete_query, '2.0', '2.74') @validation.query_schema(schema.delete_query_275, '2.75') @validation.response_body_schema(schema.delete_response) def delete(self, req, id): """Delete a snapshot.""" context = req.environ['nova.context'] context.can(avs_policies.POLICY_ROOT % 'delete', target={}) delete_metadata = {} delete_metadata.update(req.GET) try: delete_info = jsonutils.loads(delete_metadata['delete_info']) volume_id = delete_info['volume_id'] except (KeyError, ValueError) as e: raise exc.HTTPBadRequest(explanation=str(e)) try: self.compute_api.volume_snapshot_delete( context, volume_id, id, delete_info) except (exception.VolumeBDMNotFound, exception.InvalidVolume) as error: raise exc.HTTPBadRequest(explanation=error.format_message()) except exception.NotFound as e: return exc.HTTPNotFound(explanation=e.format_message()) except (exception.InstanceInvalidState, exception.InstanceNotReady) as e: # TODO(mriedem) InstanceInvalidState and InstanceNotReady would # normally result in a 409 but that would require bumping the # microversion, which we should just do in a single microversion # across all APIs when we fix status code wrinkles. raise exc.HTTPBadRequest(explanation=e.format_message()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/attach_interfaces.py0000664000175000017500000002447600000000000023777 0ustar00zuulzuul00000000000000# Copyright 2012 SINA Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The instance interfaces extension.""" import webob from webob import exc from nova.api.openstack import api_version_request from nova.api.openstack import common from nova.api.openstack.compute.schemas import attach_interfaces as schema from nova.api.openstack import wsgi from nova.api import validation from nova.compute import api as compute from nova import exception from nova.i18n import _ from nova.network import neutron from nova import objects from nova.policies import attach_interfaces as ai_policies def _translate_interface_attachment_view(context, port_info, show_tag=False): """Maps keys for interface attachment details view. :param port_info: dict of port details from the networking service :param show_tag: If True, includes the "tag" key in the returned dict, else the "tag" entry is omitted (default: False) :returns: dict of a subset of details about the port and optionally the tag associated with the VirtualInterface record in the nova database """ info = { 'net_id': port_info['network_id'], 'port_id': port_info['id'], 'mac_addr': port_info['mac_address'], 'port_state': port_info['status'], 'fixed_ips': port_info.get('fixed_ips', None), } if show_tag: # Get the VIF for this port (if one exists - VirtualInterface records # did not exist for neutron ports until the Newton release). vif = objects.VirtualInterface.get_by_uuid(context, port_info['id']) info['tag'] = vif.tag if vif else None return info @validation.validated class InterfaceAttachmentController(wsgi.Controller): """The interface attachment API controller for the OpenStack API.""" def __init__(self): super(InterfaceAttachmentController, self).__init__() self.compute_api = compute.API() self.network_api = neutron.API() @wsgi.expected_errors((404, 501)) @validation.query_schema(schema.index_query) @validation.response_body_schema(schema.index_response, '2.1', '2.69') @validation.response_body_schema(schema.index_response_v270, '2.70') def index(self, req, server_id): """Returns the list of interface attachments for a given instance.""" context = req.environ['nova.context'] instance = common.get_instance(self.compute_api, context, server_id) context.can(ai_policies.POLICY_ROOT % 'list', target={'project_id': instance.project_id}) search_opts = {'device_id': instance.uuid} try: data = self.network_api.list_ports(context, **search_opts) except exception.NotFound as e: raise exc.HTTPNotFound(explanation=e.format_message()) except NotImplementedError: common.raise_feature_not_supported() # If showing tags, get the VirtualInterfaceList for the server and # map VIFs by port ID. Note that VirtualInterface records did not # exist for neutron ports until the Newton release so it's OK if we # are missing records for old servers. show_tag = api_version_request.is_supported(req, '2.70') tag_per_port_id = {} if show_tag: vifs = objects.VirtualInterfaceList.get_by_instance_uuid( context, server_id) tag_per_port_id = {vif.uuid: vif.tag for vif in vifs} results = [] ports = data.get('ports', []) for port in ports: # Note that we do not pass show_tag=show_tag to # _translate_interface_attachment_view because we are handling it # ourselves here since we have the list of VIFs which is better # for performance than doing a DB query per port. info = _translate_interface_attachment_view(context, port) if show_tag: info['tag'] = tag_per_port_id.get(port['id']) results.append(info) return {'interfaceAttachments': results} @wsgi.expected_errors((403, 404)) @validation.query_schema(schema.show_query) @validation.response_body_schema(schema.show_response, '2.1', '2.69') @validation.response_body_schema(schema.show_response_v270, '2.70') def show(self, req, server_id, id): """Return data about the given interface attachment.""" context = req.environ['nova.context'] instance = common.get_instance(self.compute_api, context, server_id) context.can(ai_policies.POLICY_ROOT % 'show', target={'project_id': instance.project_id}) port_id = id try: port_info = self.network_api.show_port(context, port_id) except exception.PortNotFound as e: raise exc.HTTPNotFound(explanation=e.format_message()) except exception.Forbidden as e: raise exc.HTTPForbidden(explanation=e.format_message()) if port_info['port']['device_id'] != server_id: msg = _("Instance %(instance)s does not have a port with id " "%(port)s") % {'instance': server_id, 'port': port_id} raise exc.HTTPNotFound(explanation=msg) return {'interfaceAttachment': _translate_interface_attachment_view( context, port_info['port'], show_tag=api_version_request.is_supported(req, '2.70'))} @wsgi.expected_errors((400, 403, 404, 409, 500, 501)) @validation.schema(schema.create, '2.0', '2.48') @validation.schema(schema.create_v249, '2.49') @validation.response_body_schema(schema.create_response, '2.1', '2.69') @validation.response_body_schema(schema.create_response_v270, '2.70') def create(self, req, server_id, body): """Attach an interface to an instance.""" context = req.environ['nova.context'] instance = common.get_instance(self.compute_api, context, server_id) context.can(ai_policies.POLICY_ROOT % 'create', target={'project_id': instance.project_id}) network_id = None port_id = None req_ip = None tag = None if body: attachment = body['interfaceAttachment'] network_id = attachment.get('net_id', None) port_id = attachment.get('port_id', None) tag = attachment.get('tag', None) try: req_ip = attachment['fixed_ips'][0]['ip_address'] except Exception: pass if network_id and port_id: msg = _("Must not input both network_id and port_id") raise exc.HTTPBadRequest(explanation=msg) if req_ip and not network_id: msg = _("Must input network_id when request IP address") raise exc.HTTPBadRequest(explanation=msg) try: vif = self.compute_api.attach_interface(context, instance, network_id, port_id, req_ip, tag=tag) except (exception.InterfaceAttachFailedNoNetwork, exception.NetworkAmbiguous, exception.NoMoreFixedIps, exception.PortNotUsable, exception.AttachInterfaceNotSupported, exception.SecurityGroupCannotBeApplied, exception.NetworkInterfaceTaggedAttachNotSupported, exception.NetworksWithQoSPolicyNotSupported, exception.InterfaceAttachPciClaimFailed, exception.InterfaceAttachResourceAllocationFailed, exception.ForbiddenPortsWithAccelerator, exception.ForbiddenWithRemoteManagedPorts, exception.ExtendedResourceRequestOldCompute, ) as e: raise exc.HTTPBadRequest(explanation=e.format_message()) except ( exception.InstanceIsLocked, exception.FixedIpAlreadyInUse, exception.PortInUse, ) as e: raise exc.HTTPConflict(explanation=e.format_message()) except (exception.PortNotFound, exception.NetworkNotFound) as e: raise exc.HTTPNotFound(explanation=e.format_message()) except exception.PortLimitExceeded as e: raise exc.HTTPForbidden(explanation=e.format_message()) except exception.InterfaceAttachFailed as e: raise webob.exc.HTTPInternalServerError( explanation=e.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'attach_interface', server_id) return self.show(req, server_id, vif['id']) @wsgi.response(202) @wsgi.expected_errors((400, 404, 409, 501)) @validation.response_body_schema(schema.delete_response) def delete(self, req, server_id, id): """Detach an interface from an instance.""" context = req.environ['nova.context'] instance = common.get_instance(self.compute_api, context, server_id, expected_attrs=['device_metadata']) context.can(ai_policies.POLICY_ROOT % 'delete', target={'project_id': instance.project_id}) port_id = id try: self.compute_api.detach_interface(context, instance, port_id=port_id) except exception.PortNotFound as e: raise exc.HTTPNotFound(explanation=e.format_message()) except exception.InstanceIsLocked as e: raise exc.HTTPConflict(explanation=e.format_message()) except NotImplementedError: common.raise_feature_not_supported() except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'detach_interface', server_id) except exception.ForbiddenPortsWithAccelerator as e: raise exc.HTTPBadRequest(explanation=e.format_message()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/availability_zone.py0000664000175000017500000001215500000000000024024 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.api.openstack.compute.schemas import availability_zone as schema from nova.api.openstack import wsgi from nova.api import validation from nova import availability_zones from nova.compute import api as compute import nova.conf from nova.policies import availability_zone as az_policies from nova import servicegroup CONF = nova.conf.CONF ATTRIBUTE_NAME = "availability_zone" @validation.validated class AvailabilityZoneController(wsgi.Controller): """The Availability Zone API controller for the OpenStack API.""" def __init__(self): super(AvailabilityZoneController, self).__init__() self.servicegroup_api = servicegroup.API() self.host_api = compute.HostAPI() def _get_filtered_availability_zones(self, zones, is_available): result = [] for zone in zones: # Hide internal_service_availability_zone if zone == CONF.internal_service_availability_zone: continue result.append({'zoneName': zone, 'zoneState': {'available': is_available}, "hosts": None}) return result def _describe_availability_zones(self, context, **kwargs): ctxt = context.elevated() available_zones, not_available_zones = ( availability_zones.get_availability_zones( ctxt, self.host_api)) filtered_available_zones = \ self._get_filtered_availability_zones(available_zones, True) filtered_not_available_zones = \ self._get_filtered_availability_zones(not_available_zones, False) return {'availabilityZoneInfo': filtered_available_zones + filtered_not_available_zones} def _describe_availability_zones_verbose(self, context, **kwargs): ctxt = context.elevated() services = self.host_api.service_get_all( context, set_zones=True, all_cells=True) available_zones, not_available_zones = ( availability_zones.get_availability_zones( ctxt, self.host_api, services=services)) zone_hosts = {} host_services = {} api_services = ('nova-osapi_compute', 'nova-metadata') for service in filter(lambda x: not x.disabled, services): if service.binary in api_services: # Skip API services in the listing since they are not # maintained in the same way as other services continue zone_hosts.setdefault(service['availability_zone'], set()) zone_hosts[service['availability_zone']].add(service['host']) host_services.setdefault(service['availability_zone'] + service['host'], []) host_services[service['availability_zone'] + service['host']].\ append(service) result = [] for zone in available_zones: hosts = {} for host in zone_hosts.get(zone, []): hosts[host] = {} for service in host_services[zone + host]: alive = self.servicegroup_api.service_is_up(service) hosts[host][service['binary']] = { 'available': alive, 'active': service['disabled'] is not True, 'updated_at': service['updated_at'] } result.append({'zoneName': zone, 'zoneState': {'available': True}, "hosts": hosts}) for zone in not_available_zones: result.append({'zoneName': zone, 'zoneState': {'available': False}, "hosts": None}) return {'availabilityZoneInfo': result} @wsgi.expected_errors(()) @validation.query_schema(schema.index_query) @validation.response_body_schema(schema.index_response) def index(self, req): """Returns a summary list of availability zone.""" context = req.environ['nova.context'] context.can(az_policies.POLICY_ROOT % 'list', target={}) return self._describe_availability_zones(context) @wsgi.expected_errors(()) @validation.query_schema(schema.detail_query) @validation.response_body_schema(schema.detail_response) def detail(self, req): """Returns a detailed list of availability zone.""" context = req.environ['nova.context'] context.can(az_policies.POLICY_ROOT % 'detail', target={}) return self._describe_availability_zones_verbose(context) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/baremetal_nodes.py0000664000175000017500000001246200000000000023444 0ustar00zuulzuul00000000000000# Copyright (c) 2013 NTT DOCOMO, INC. # Copyright 2014 IBM Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The baremetal admin extension.""" from openstack import exceptions as sdk_exc import webob from nova.api.openstack.api_version_request \ import MAX_PROXY_API_SUPPORT_VERSION from nova.api.openstack.compute.schemas import baremetal_nodes as schema from nova.api.openstack import wsgi from nova.api import validation import nova.conf from nova.i18n import _ from nova.policies import baremetal_nodes as bn_policies from nova import utils CONF = nova.conf.CONF def _no_ironic_proxy(cmd): msg = _( "Command not supported. Please use Ironic " "command %(cmd)s to perform this action." ) raise webob.exc.HTTPBadRequest(explanation=msg % {'cmd': cmd}) @validation.validated class BareMetalNodeController(wsgi.Controller): """The Bare-Metal Node API controller for the OpenStack API.""" def __init__(self): super().__init__() self._ironic_connection = None @property def ironic_connection(self): if self._ironic_connection is None: # Ask get_sdk_adapter to raise ServiceUnavailable if the baremetal # service isn't ready yet. Consumers of ironic_connection are set # up to handle this and raise VirtDriverNotReady as appropriate. self._ironic_connection = utils.get_sdk_adapter( 'baremetal', admin=True, check_service=True, ) return self._ironic_connection @wsgi.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) @wsgi.expected_errors((404, 501)) @validation.query_schema(schema.index_query) @validation.response_body_schema(schema.index_response) def index(self, req): context = req.environ['nova.context'] context.can(bn_policies.BASE_POLICY_NAME % 'list', target={}) nodes = [] # proxy command to Ironic inodes = self.ironic_connection.nodes(details=True) for inode in inodes: node = { 'id': inode.id, 'interfaces': [], 'host': 'IRONIC MANAGED', 'task_state': inode.provision_state, 'cpus': inode.properties.get('cpus', str(0)), 'memory_mb': inode.properties.get('memory_mb', str(0)), 'disk_gb': inode.properties.get('local_gb', str(0)), } nodes.append(node) return {'nodes': nodes} @wsgi.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) @wsgi.expected_errors((404, 501)) @validation.query_schema(schema.show_query) @validation.response_body_schema(schema.show_response) def show(self, req, id): context = req.environ['nova.context'] context.can(bn_policies.BASE_POLICY_NAME % 'show', target={}) # proxy command to Ironic try: inode = self.ironic_connection.get_node(id) except sdk_exc.NotFoundException: msg = _("Node %s could not be found.") % id raise webob.exc.HTTPNotFound(explanation=msg) iports = self.ironic_connection.ports(node=id) node = { 'id': inode.id, 'interfaces': [], 'host': 'IRONIC MANAGED', 'task_state': inode.provision_state, 'cpus': inode.properties.get('cpus', str(0)), 'memory_mb': inode.properties.get('memory_mb', str(0)), 'disk_gb': inode.properties.get('local_gb', str(0)), 'instance_uuid': inode.instance_id, } for port in iports: node['interfaces'].append({'address': port.address}) return {'node': node} @wsgi.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) @wsgi.expected_errors(400) @validation.schema(schema.create) @validation.response_body_schema(schema.create_response) def create(self, req, body): _no_ironic_proxy("node-create") @wsgi.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) @wsgi.expected_errors(400) @validation.response_body_schema(schema.delete_response) def delete(self, req, id): _no_ironic_proxy("node-delete") @wsgi.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) @wsgi.action('add_interface') @wsgi.expected_errors(400) @validation.schema(schema.add_interface) @validation.response_body_schema(schema.add_interface_response) def _add_interface(self, req, id, body): _no_ironic_proxy("port-create") @wsgi.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) @wsgi.action('remove_interface') @wsgi.expected_errors(400) @validation.schema(schema.remove_interface) @validation.response_body_schema(schema.remove_interface_response) def _remove_interface(self, req, id, body): _no_ironic_proxy("port-delete") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/cells.py0000664000175000017500000000656600000000000021432 0ustar00zuulzuul00000000000000# Copyright 2011-2012 OpenStack Foundation # All Rights Reserved. # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from webob import exc from nova.api.openstack.compute.schemas import cells as schema from nova.api.openstack import wsgi from nova.api import validation _removal_reason = """\ This API only works in a Cells v1 deployment, which was deprecated in the 16.0.0 (Pike) release. It is not used with Cells v2, which is required beginning in the 15.0.0 (Ocata) release. It was removed in the 20.0.0 (Train) release. """ @validation.validated class CellsController(wsgi.Controller): """(Removed) Controller for Cell resources. This was removed during the Train release in favour of cells v2. """ @wsgi.expected_errors(410) @wsgi.removed('20.0.0', _removal_reason) @validation.query_schema(schema.index_query) @validation.response_body_schema(schema.index_response) def index(self, req): raise exc.HTTPGone() @wsgi.expected_errors(410) @wsgi.removed('20.0.0', _removal_reason) @validation.query_schema(schema.detail_query) @validation.response_body_schema(schema.detail_response) def detail(self, req): raise exc.HTTPGone() @wsgi.expected_errors(410) @wsgi.removed('20.0.0', _removal_reason) @validation.query_schema(schema.info_query) @validation.response_body_schema(schema.info_response) def info(self, req): raise exc.HTTPGone() @wsgi.expected_errors(410) @wsgi.removed('20.0.0', _removal_reason) @validation.query_schema(schema.capacities_query) @validation.response_body_schema(schema.capacities_response) def capacities(self, req, id=None): raise exc.HTTPGone() @wsgi.expected_errors(410) @wsgi.removed('20.0.0', _removal_reason) @validation.query_schema(schema.show_query) @validation.response_body_schema(schema.show_response) def show(self, req, id): raise exc.HTTPGone() @wsgi.expected_errors(410) @wsgi.removed('20.0.0', _removal_reason) @validation.response_body_schema(schema.delete_response) def delete(self, req, id): raise exc.HTTPGone() @wsgi.expected_errors(410) @wsgi.removed('20.0.0', _removal_reason) @validation.schema(schema.create) @validation.response_body_schema(schema.create_response) def create(self, req, body): raise exc.HTTPGone() @wsgi.expected_errors(410) @wsgi.removed('20.0.0', _removal_reason) @validation.schema(schema.update) @validation.response_body_schema(schema.update_response) def update(self, req, id, body): raise exc.HTTPGone() @wsgi.expected_errors(410) @wsgi.removed('20.0.0', _removal_reason) @validation.schema(schema.sync_instances) @validation.response_body_schema(schema.sync_instances_response) def sync_instances(self, req, body): raise exc.HTTPGone() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/certificates.py0000664000175000017500000000330700000000000022763 0ustar00zuulzuul00000000000000# Copyright (c) 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import webob.exc from nova.api.openstack.compute.schemas import certificates as schema from nova.api.openstack import wsgi from nova.api import validation _removal_reason = """\ This API was only used to build euca bundles when Nova had an in-tree EC2 API. It no longer interacts with any parts of the system besides its own certificate daemon. It was removed in the 16.0.0 (Pike) release. """ @validation.validated class CertificatesController(wsgi.Controller): """The x509 Certificates API controller for the OpenStack API.""" @wsgi.expected_errors(410) @wsgi.removed('16.0.0', _removal_reason) @validation.query_schema(schema.show_query) @validation.response_body_schema(schema.show_response) def show(self, req, id): """Return certificate information.""" raise webob.exc.HTTPGone() @wsgi.expected_errors((410)) @wsgi.removed('16.0.0', _removal_reason) @validation.schema(schema.create) @validation.response_body_schema(schema.create_response) def create(self, req, body=None): """Create a certificate.""" raise webob.exc.HTTPGone() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/cloudpipe.py0000664000175000017500000000371200000000000022302 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Connect your vlan to the world.""" from webob import exc from nova.api.openstack.compute.schemas import cloudpipe as schema from nova.api.openstack import wsgi from nova.api import validation _removal_reason = """\ This API only works with *nova-network*, which was deprecated in the 14.0.0 (Newton) release. It was removed in the 16.0.0 (Pike) release. """ @validation.validated class CloudpipeController(wsgi.Controller): """Handle creating and listing cloudpipe instances.""" @wsgi.expected_errors((410)) @wsgi.removed('16.0.0', _removal_reason) @validation.schema(schema.create) @validation.response_body_schema(schema.create_response) def create(self, req, body): """Create a new cloudpipe instance, if none exists.""" raise exc.HTTPGone() @wsgi.expected_errors((410)) @wsgi.removed('16.0.0', _removal_reason) @validation.query_schema(schema.index_query) @validation.response_body_schema(schema.index_response) def index(self, req): """List running cloudpipe instances.""" raise exc.HTTPGone() @wsgi.expected_errors(410) @wsgi.removed('16.0.0', _removal_reason) @validation.schema(schema.update) @validation.response_body_schema(schema.update_response) def update(self, req, id, body): """Configure cloudpipe parameters for the project.""" raise exc.HTTPGone() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/console_auth_tokens.py0000664000175000017500000001015000000000000024356 0ustar00zuulzuul00000000000000# Copyright 2013 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import webob from nova.api.openstack import api_version_request from nova.api.openstack.compute.schemas import console_auth_tokens as schema from nova.api.openstack import wsgi from nova.api import validation import nova.conf from nova import context as nova_context from nova.i18n import _ from nova import objects from nova.policies import console_auth_tokens as cat_policies CONF = nova.conf.CONF @validation.validated class ConsoleAuthTokensController(wsgi.Controller): @wsgi.expected_errors((400, 401, 404), '2.1', '2.30') @wsgi.expected_errors((400, 404), '2.31', '2.98') @wsgi.expected_errors((400, 404), '2.99') @validation.query_schema(schema.show_query, '2.1', '2.98') @validation.query_schema(schema.show_query_v299, '2.99') # NOTE(stephenfin): Technically this will never return a response now for # microversion <= 2.30, (as an exception will be raised instead) but we use # the same schema for documentation purposes @validation.response_body_schema(schema.show_response, '2.1', '2.98') @validation.response_body_schema(schema.show_response_v299, '2.99') def show(self, req, id): """Show console auth token. Until microversion 2.30, this API was available only for the rdp-html5 console type which has been removed along with the HyperV driver in the Nova 29.0.0 (Caracal) release. As a result, we now return a HTTP 400 error for microversion <= 2.30. Starting from 2.31 microversion, this API works for all the other supported console types. """ if not api_version_request.is_supported(req, '2.31'): raise webob.exc.HTTPBadRequest() include_tls_port = False if api_version_request.is_supported(req, '2.99'): include_tls_port = True return self._show(req, id, include_tls_port=include_tls_port) def _show(self, req, id, include_tls_port=False): """Checks a console auth token and returns the related connect info.""" context = req.environ['nova.context'] context.can(cat_policies.BASE_POLICY_NAME) token = id if not token: msg = _("token not provided") raise webob.exc.HTTPBadRequest(explanation=msg) connect_info = None results = nova_context.scatter_gather_skip_cell0( context, objects.ConsoleAuthToken.validate, token) # NOTE(melwitt): Console token auths are stored in cell databases, # but with only the token as a request param, we can't know which # cell database contains the token's corresponding connection info. # So, we must query all cells for the info and we can break the # loop as soon as we find a result because the token is associated # with one instance, which can only be in one cell. for result in results.values(): if not nova_context.is_cell_failure_sentinel(result): connect_info = result break if not connect_info: raise webob.exc.HTTPNotFound(explanation=_("Token not found")) retval = { 'console': { 'instance_uuid': connect_info.instance_uuid, 'host': connect_info.host, 'port': connect_info.port, 'internal_access_path': connect_info.internal_access_path, } } if connect_info.console_type == 'spice-direct' and include_tls_port: retval['console']['tls_port'] = connect_info.tls_port return retval ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/console_output.py0000664000175000017500000000602000000000000023373 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # Copyright 2011 Grid Dynamics # Copyright 2011 Eldar Nugaev, Kirill Shileev, Ilya Alekseyev # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import webob from nova.api.openstack import common from nova.api.openstack.compute.schemas import console_output as schema from nova.api.openstack import wsgi from nova.api import validation from nova.compute import api as compute from nova import exception from nova.policies import console_output as co_policies @validation.validated class ConsoleOutputController(wsgi.Controller): def __init__(self): super(ConsoleOutputController, self).__init__() self.compute_api = compute.API() @wsgi.expected_errors((404, 409, 501)) @wsgi.action('os-getConsoleOutput') @validation.schema(schema.get_console_output) @validation.response_body_schema(schema.get_console_output_response) def get_console_output(self, req, id, body): """Get text console output.""" context = req.environ['nova.context'] instance = common.get_instance(self.compute_api, context, id) context.can(co_policies.BASE_POLICY_NAME, target={'project_id': instance.project_id}) length = body['os-getConsoleOutput'].get('length') # TODO(cyeoh): In a future API update accept a length of -1 # as meaning unlimited length (convert to None) try: output = self.compute_api.get_console_output(context, instance, length) # NOTE(cyeoh): This covers race conditions where the instance is # deleted between common.get_instance and get_console_output # being called except (exception.InstanceNotFound, exception.ConsoleNotAvailable) as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) except exception.InstanceNotReady as e: raise webob.exc.HTTPConflict(explanation=e.format_message()) except NotImplementedError: common.raise_feature_not_supported() # XML output is not correctly escaped, so remove invalid characters # NOTE(cyeoh): We don't support XML output with V2.1, but for # backwards compatibility reasons we continue to filter the output # We should remove this in the future remove_re = re.compile('[\x00-\x08\x0B-\x1F]') output = remove_re.sub('', output) return {'output': output} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/consoles.py0000664000175000017500000000415500000000000022145 0ustar00zuulzuul00000000000000# Copyright 2010 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from webob import exc from nova.api.openstack.compute.schemas import consoles as schema from nova.api.openstack import wsgi from nova.api import validation _removal_reason = """\ This API only works with the Xen virt driver, which was deprecated in the 20.0.0 (Train) release. It was removed in the 21.0.0 (Ussuri) release. """ @validation.validated class ConsolesController(wsgi.Controller): """(Removed) The Consoles controller for the OpenStack API. This was removed during the Ussuri release along with the nova-console service. """ @wsgi.expected_errors(410) @wsgi.removed('21.0.0', _removal_reason) @validation.query_schema(schema.index_query) @validation.response_body_schema(schema.index_response) def index(self, req, server_id): raise exc.HTTPGone() @wsgi.expected_errors(410) @wsgi.removed('21.0.0', _removal_reason) @validation.schema(schema.create) @validation.response_body_schema(schema.create_response) def create(self, req, server_id, body): raise exc.HTTPGone() @wsgi.expected_errors(410) @wsgi.removed('21.0.0', _removal_reason) @validation.query_schema(schema.show_query) @validation.response_body_schema(schema.show_response) def show(self, req, server_id, id): raise exc.HTTPGone() @wsgi.expected_errors(410) @wsgi.removed('21.0.0', _removal_reason) @validation.response_body_schema(schema.delete_response) def delete(self, req, server_id, id): raise exc.HTTPGone() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/create_backup.py0000664000175000017500000000737200000000000023114 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import webob from nova.api.openstack import api_version_request from nova.api.openstack import common from nova.api.openstack.compute.schemas import create_backup as schema from nova.api.openstack import wsgi from nova.api import validation from nova.compute import api as compute from nova import exception from nova.policies import create_backup as cb_policies @validation.validated class CreateBackupController(wsgi.Controller): def __init__(self): super(CreateBackupController, self).__init__() self.compute_api = compute.API() @wsgi.response(202) @wsgi.expected_errors((400, 403, 404, 409)) @wsgi.action('createBackup') @validation.schema(schema.create_backup_v20, '2.0', '2.0') @validation.schema(schema.create_backup, '2.1') @validation.response_body_schema( schema.create_backup_response, '2.1', '2.44', ) @validation.response_body_schema( schema.create_backup_response_v245, '2.45' ) def _create_backup(self, req, id, body): """Backup a server instance. Images now have an `image_type` associated with them, which can be 'snapshot' or the backup type, like 'daily' or 'weekly'. If the image_type is backup-like, then the rotation factor can be included and that will cause the oldest backups that exceed the rotation factor to be deleted. """ context = req.environ["nova.context"] instance = common.get_instance(self.compute_api, context, id) context.can(cb_policies.BASE_POLICY_NAME, target={'project_id': instance.project_id}) entity = body["createBackup"] image_name = common.normalize_name(entity["name"]) backup_type = entity["backup_type"] rotation = int(entity["rotation"]) props = {} metadata = entity.get('metadata', {}) # Starting from microversion 2.39 we don't check quotas on createBackup if not api_version_request.is_supported(req, '2.39'): common.check_img_metadata_properties_quota(context, metadata) props.update(metadata) try: image = self.compute_api.backup(context, instance, image_name, backup_type, rotation, extra_properties=props) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'createBackup', id) except exception.InvalidRequest as e: raise webob.exc.HTTPBadRequest(explanation=e.format_message()) # Starting with microversion 2.45 we return a response body containing # the snapshot image id without the Location header. if api_version_request.is_supported(req, '2.45'): return {'image_id': image['id']} resp = webob.Response(status_int=202) # build location of newly-created image entity if rotation is not zero if rotation > 0: image_id = str(image['id']) image_ref = common.url_join(req.application_url, 'images', image_id) resp.headers['Location'] = image_ref return resp ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/deferred_delete.py0000664000175000017500000000571500000000000023425 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The deferred instance delete extension.""" import webob from nova.api.openstack import common from nova.api.openstack.compute.schemas import deferred_delete as schema from nova.api.openstack import wsgi from nova.api import validation from nova.compute import api as compute from nova import exception from nova.policies import deferred_delete as dd_policies @validation.validated class DeferredDeleteController(wsgi.Controller): def __init__(self): super(DeferredDeleteController, self).__init__() self.compute_api = compute.API() @wsgi.response(202) @wsgi.expected_errors((403, 404, 409)) @wsgi.action('restore') @validation.schema(schema.restore) @validation.response_body_schema(schema.restore_response) def _restore(self, req, id, body): """Restore a previously deleted instance.""" context = req.environ["nova.context"] instance = common.get_instance(self.compute_api, context, id) context.can(dd_policies.BASE_POLICY_NAME % 'restore', target={'project_id': instance.project_id}) try: self.compute_api.restore(context, instance) except exception.OverQuota as error: raise webob.exc.HTTPForbidden(explanation=error.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'restore', id) @wsgi.response(202) @wsgi.expected_errors((404, 409)) @wsgi.action('forceDelete') @validation.schema(schema.force_delete) @validation.response_body_schema(schema.force_delete_response) def _force_delete(self, req, id, body): """Force delete of instance before deferred cleanup.""" context = req.environ["nova.context"] instance = common.get_instance(self.compute_api, context, id) context.can(dd_policies.BASE_POLICY_NAME % 'force', target={'user_id': instance.user_id, 'project_id': instance.project_id}) try: self.compute_api.force_delete(context, instance) except exception.InstanceNotFound as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) except exception.InstanceIsLocked as e: raise webob.exc.HTTPConflict(explanation=e.format_message()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/evacuate.py0000664000175000017500000001576200000000000022123 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_utils import strutils from webob import exc from nova.api.openstack import api_version_request from nova.api.openstack import common from nova.api.openstack.compute.schemas import evacuate from nova.api.openstack import wsgi from nova.api import validation from nova.compute import api as compute from nova.compute import vm_states import nova.conf from nova import exception from nova.i18n import _ from nova import objects from nova.policies import evacuate as evac_policies from nova import utils CONF = nova.conf.CONF LOG = logging.getLogger(__name__) MIN_VER_NOVA_COMPUTE_EVACUATE_STOPPED = 62 @validation.validated class EvacuateController(wsgi.Controller): def __init__(self): super(EvacuateController, self).__init__() self.compute_api = compute.API() self.host_api = compute.HostAPI() def _get_on_shared_storage(self, req, evacuate_body): if api_version_request.is_supported(req, '2.14'): return None else: return strutils.bool_from_string(evacuate_body["onSharedStorage"]) def _get_password(self, req, evacuate_body, on_shared_storage): password = None if 'adminPass' in evacuate_body: # check that if requested to evacuate server on shared storage # password not specified if on_shared_storage: msg = _("admin password can't be changed on existing disk") raise exc.HTTPBadRequest(explanation=msg) password = evacuate_body['adminPass'] elif not on_shared_storage: password = utils.generate_password() return password def _get_password_v214(self, req, evacuate_body): if 'adminPass' in evacuate_body: password = evacuate_body['adminPass'] else: password = utils.generate_password() return password # TODO(eliqiao): Should be responding here with 202 Accept # because evacuate is an async call, but keep to 200 for # backwards compatibility reasons. @wsgi.expected_errors((400, 403, 404, 409)) @wsgi.action('evacuate') @validation.schema(evacuate.evacuate, "2.0", "2.13") @validation.schema(evacuate.evacuate_v214, "2.14", "2.28") @validation.schema(evacuate.evacuate_v229, "2.29", "2.67") @validation.schema(evacuate.evacuate_v268, "2.68", "2.94") @validation.schema(evacuate.evacuate_v295, "2.95") @validation.response_body_schema( evacuate.evacuate_response, "2.0", "2.13" ) @validation.response_body_schema(evacuate.evacuate_response_v214, "2.14") def _evacuate(self, req, id, body): """Permit admins to evacuate a server from a failed host to a new one. """ context = req.environ["nova.context"] instance = common.get_instance(self.compute_api, context, id, expected_attrs=['trusted_certs', 'pci_requests', 'pci_devices', 'resources', 'migration_context']) context.can(evac_policies.BASE_POLICY_NAME, target={'user_id': instance.user_id, 'project_id': instance.project_id}) evacuate_body = body["evacuate"] host = evacuate_body.get("host") force = None target_state = None if api_version_request.is_supported(req, '2.95'): min_ver = objects.service.get_minimum_version_all_cells( context, ['nova-compute']) if min_ver < MIN_VER_NOVA_COMPUTE_EVACUATE_STOPPED: raise exception.NotSupportedComputeForEvacuateV295( {'currently': min_ver, 'expected': MIN_VER_NOVA_COMPUTE_EVACUATE_STOPPED}) # Starts to 2.95 any evacuated instances will be stopped at # destination. Previously an active or stopped instance would have # kept its state. target_state = vm_states.STOPPED on_shared_storage = self._get_on_shared_storage(req, evacuate_body) if api_version_request.is_supported(req, '2.29'): force = body["evacuate"].get("force", False) force = strutils.bool_from_string(force, strict=True) if force is True and not host: message = _("Can't force to a non-provided destination") raise exc.HTTPBadRequest(explanation=message) if api_version_request.is_supported(req, '2.14'): password = self._get_password_v214(req, evacuate_body) else: password = self._get_password(req, evacuate_body, on_shared_storage) if host is not None: try: self.host_api.service_get_by_compute_host(context, host) except (exception.ComputeHostNotFound, exception.HostMappingNotFound): msg = _("Compute host %s not found.") % host raise exc.HTTPNotFound(explanation=msg) if instance.host == host: msg = _("The target host can't be the same one.") raise exc.HTTPBadRequest(explanation=msg) try: self.compute_api.evacuate(context, instance, host, on_shared_storage, password, force, target_state) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state( state_error, 'evacuate', id) except ( exception.ComputeServiceInUse, exception.ForbiddenPortsWithAccelerator, exception.ExtendedResourceRequestOldCompute, ) as e: raise exc.HTTPBadRequest(explanation=e.format_message()) except exception.UnsupportedRPCVersion as e: raise exc.HTTPConflict(explanation=e.format_message()) except ( exception.ForbiddenSharesNotSupported, exception.ForbiddenWithShare, ) as e: raise exc.HTTPConflict(explanation=e.format_message()) if ( not api_version_request.is_supported(req, '2.14') and CONF.api.enable_instance_password ): return {'adminPass': password} else: return None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/extension_info.py0000664000175000017500000007575100000000000023361 0ustar00zuulzuul00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import webob.exc from nova.api.openstack.compute.schemas import extension_info as schema from nova.api.openstack import wsgi from nova.api import validation from nova.policies import extensions as ext_policies EXTENSION_LIST = [ { "alias": "NMN", "description": "Multiple network support.", "links": [], "name": "Multinic", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-DCF", "description": "Disk Management Extension.", "links": [], "name": "DiskConfig", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-EXT-AZ", "description": "Extended Availability Zone support.", "links": [], "name": "ExtendedAvailabilityZone", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-EXT-IMG-SIZE", "description": "Adds image size to image listings.", "links": [], "name": "ImageSize", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-EXT-IPS", "description": "Adds type parameter to the ip list.", "links": [], "name": "ExtendedIps", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-EXT-IPS-MAC", "description": "Adds mac address parameter to the ip list.", "links": [], "name": "ExtendedIpsMac", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-EXT-SRV-ATTR", "description": "Extended Server Attributes support.", "links": [], "name": "ExtendedServerAttributes", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-EXT-STS", "description": "Extended Status support.", "links": [], "name": "ExtendedStatus", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-FLV-DISABLED", "description": "Support to show the disabled status of a flavor.", "links": [], "name": "FlavorDisabled", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-FLV-EXT-DATA", "description": "Provide additional data for flavors.", "links": [], "name": "FlavorExtraData", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-SCH-HNT", "description": "Pass arbitrary key/value pairs to the scheduler.", "links": [], "name": "SchedulerHints", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "OS-SRV-USG", "description": "Adds launched_at and terminated_at on Servers.", "links": [], "name": "ServerUsage", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-access-ips", "description": "Access IPs support.", "links": [], "name": "AccessIPs", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-admin-actions", "description": "Enable admin-only server actions\n\n " "Actions include: resetNetwork, injectNetworkInfo, " "os-resetState\n ", "links": [], "name": "AdminActions", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-admin-password", "description": "Admin password management support.", "links": [], "name": "AdminPassword", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-agents", "description": "Agents support.", "links": [], "name": "Agents", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-aggregates", "description": "Admin-only aggregate administration.", "links": [], "name": "Aggregates", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-assisted-volume-snapshots", "description": "Assisted volume snapshots.", "links": [], "name": "AssistedVolumeSnapshots", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-attach-interfaces", "description": "Attach interface support.", "links": [], "name": "AttachInterfaces", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-availability-zone", "description": "1. Add availability_zone to the Create Server " "API.\n 2. Add availability zones " "describing.\n ", "links": [], "name": "AvailabilityZone", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-baremetal-ext-status", "description": "Add extended status in Baremetal Nodes v2 API.", "links": [], "name": "BareMetalExtStatus", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-baremetal-nodes", "description": "Admin-only bare-metal node administration.", "links": [], "name": "BareMetalNodes", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-block-device-mapping", "description": "Block device mapping boot support.", "links": [], "name": "BlockDeviceMapping", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-block-device-mapping-v2-boot", "description": "Allow boot with the new BDM data format.", "links": [], "name": "BlockDeviceMappingV2Boot", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-cell-capacities", "description": "Adding functionality to get cell capacities.", "links": [], "name": "CellCapacities", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-cells", "description": "Enables cells-related functionality such as adding " "neighbor cells,\n listing neighbor cells, " "and getting the capabilities of the local cell.\n ", "links": [], "name": "Cells", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-certificates", "description": "Certificates support.", "links": [], "name": "Certificates", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-cloudpipe", "description": "Adds actions to create cloudpipe instances.\n\n " "When running with the Vlan network mode, you need a " "mechanism to route\n from the public Internet to " "your vlans. This mechanism is known as a\n " "cloudpipe.\n\n At the time of creating this class, " "only OpenVPN is supported. Support for\n a SSH " "Bastion host is forthcoming.\n ", "links": [], "name": "Cloudpipe", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-cloudpipe-update", "description": "Adds the ability to set the vpn ip/port for cloudpipe " "instances.", "links": [], "name": "CloudpipeUpdate", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-config-drive", "description": "Config Drive Extension.", "links": [], "name": "ConfigDrive", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-console-auth-tokens", "description": "Console token authentication support.", "links": [], "name": "ConsoleAuthTokens", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-console-output", "description": "Console log output support, with tailing ability.", "links": [], "name": "ConsoleOutput", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-consoles", "description": "Interactive Console support.", "links": [], "name": "Consoles", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-create-backup", "description": "Create a backup of a server.", "links": [], "name": "CreateBackup", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-create-server-ext", "description": "Extended support to the Create Server v1.1 API.", "links": [], "name": "Createserverext", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-deferred-delete", "description": "Instance deferred delete.", "links": [], "name": "DeferredDelete", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-evacuate", "description": "Enables server evacuation.", "links": [], "name": "Evacuate", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-evacuate-find-host", "description": "Enables server evacuation without target host. " "Scheduler will select one to target.", "links": [], "name": "ExtendedEvacuateFindHost", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-floating-ips", "description": "Adds optional fixed_address to the add floating IP " "command.", "links": [], "name": "ExtendedFloatingIps", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-hypervisors", "description": "Extended hypervisors support.", "links": [], "name": "ExtendedHypervisors", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-networks", "description": "Adds additional fields to networks.", "links": [], "name": "ExtendedNetworks", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-quotas", "description": "Adds ability for admins to delete quota and " "optionally force the update Quota command.", "links": [], "name": "ExtendedQuotas", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-rescue-with-image", "description": "Allow the user to specify the image to use for " "rescue.", "links": [], "name": "ExtendedRescueWithImage", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-services", "description": "Extended services support.", "links": [], "name": "ExtendedServices", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-services-delete", "description": "Extended services deletion support.", "links": [], "name": "ExtendedServicesDelete", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-status", "description": "Extended Status support.", "links": [], "name": "ExtendedStatus", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-extended-volumes", "description": "Extended Volumes support.", "links": [], "name": "ExtendedVolumes", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-fixed-ips", "description": "Fixed IPs support.", "links": [], "name": "FixedIPs", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-flavor-access", "description": "Flavor access support.", "links": [], "name": "FlavorAccess", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-flavor-extra-specs", "description": "Flavors extra specs support.", "links": [], "name": "FlavorExtraSpecs", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-flavor-manage", "description": "Flavor create/delete API support.", "links": [], "name": "FlavorManage", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-flavor-rxtx", "description": "Support to show the rxtx status of a flavor.", "links": [], "name": "FlavorRxtx", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-flavor-swap", "description": "Support to show the swap status of a flavor.", "links": [], "name": "FlavorSwap", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-floating-ip-dns", "description": "Floating IP DNS support.", "links": [], "name": "FloatingIpDns", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-floating-ip-pools", "description": "Floating IPs support.", "links": [], "name": "FloatingIpPools", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-floating-ips", "description": "Floating IPs support.", "links": [], "name": "FloatingIps", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-floating-ips-bulk", "description": "Bulk handling of Floating IPs.", "links": [], "name": "FloatingIpsBulk", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-fping", "description": "Fping Management Extension.", "links": [], "name": "Fping", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-hide-server-addresses", "description": "Support hiding server addresses in certain states.", "links": [], "name": "HideServerAddresses", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-hosts", "description": "Admin-only host administration.", "links": [], "name": "Hosts", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-hypervisor-status", "description": "Show hypervisor status.", "links": [], "name": "HypervisorStatus", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-hypervisors", "description": "Admin-only hypervisor administration.", "links": [], "name": "Hypervisors", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-instance-actions", "description": "View a log of actions and events taken on an " "instance.", "links": [], "name": "InstanceActions", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-instance_usage_audit_log", "description": "Admin-only Task Log Monitoring.", "links": [], "name": "OSInstanceUsageAuditLog", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-keypairs", "description": "Keypair Support.", "links": [], "name": "Keypairs", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-lock-server", "description": "Enable lock/unlock server actions.", "links": [], "name": "LockServer", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-migrate-server", "description": "Enable migrate and live-migrate server actions.", "links": [], "name": "MigrateServer", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-migrations", "description": "Provide data on migrations.", "links": [], "name": "Migrations", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-multiple-create", "description": "Allow multiple create in the Create Server v2.1 API.", "links": [], "name": "MultipleCreate", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-networks", "description": "Admin-only Network Management Extension.", "links": [], "name": "Networks", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-networks-associate", "description": "Network association support.", "links": [], "name": "NetworkAssociationSupport", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-pause-server", "description": "Enable pause/unpause server actions.", "links": [], "name": "PauseServer", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-personality", "description": "Personality support.", "links": [], "name": "Personality", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-preserve-ephemeral-rebuild", "description": "Allow preservation of the ephemeral partition on " "rebuild.", "links": [], "name": "PreserveEphemeralOnRebuild", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-quota-class-sets", "description": "Quota classes management support.", "links": [], "name": "QuotaClasses", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-quota-sets", "description": "Quotas management support.", "links": [], "name": "Quotas", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-rescue", "description": "Instance rescue mode.", "links": [], "name": "Rescue", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-security-group-default-rules", "description": "Default rules for security group support.", "links": [], "name": "SecurityGroupDefaultRules", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-security-groups", "description": "Security group support.", "links": [], "name": "SecurityGroups", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-diagnostics", "description": "Allow Admins to view server diagnostics through " "server action.", "links": [], "name": "ServerDiagnostics", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-external-events", "description": "Server External Event Triggers.", "links": [], "name": "ServerExternalEvents", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-group-quotas", "description": "Adds quota support to server groups.", "links": [], "name": "ServerGroupQuotas", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-groups", "description": "Server group support.", "links": [], "name": "ServerGroups", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-list-multi-status", "description": "Allow to filter the servers by a set of status " "values.", "links": [], "name": "ServerListMultiStatus", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-password", "description": "Server password support.", "links": [], "name": "ServerPassword", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-sort-keys", "description": "Add sorting support in get Server v2 API.", "links": [], "name": "ServerSortKeys", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-server-start-stop", "description": "Start/Stop instance compute API support.", "links": [], "name": "ServerStartStop", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-services", "description": "Services support.", "links": [], "name": "Services", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-shelve", "description": "Instance shelve mode.", "links": [], "name": "Shelve", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-simple-tenant-usage", "description": "Simple tenant usage extension.", "links": [], "name": "SimpleTenantUsage", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-suspend-server", "description": "Enable suspend/resume server actions.", "links": [], "name": "SuspendServer", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-tenant-networks", "description": "Tenant-based Network Management Extension.", "links": [], "name": "OSTenantNetworks", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-used-limits", "description": "Provide data on limited resources that are being " "used.", "links": [], "name": "UsedLimits", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-used-limits-for-admin", "description": "Provide data to admin on limited resources used by " "other tenants.", "links": [], "name": "UsedLimitsForAdmin", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-user-data", "description": "Add user_data to the Create Server API.", "links": [], "name": "UserData", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-user-quotas", "description": "Project user quota support.", "links": [], "name": "UserQuotas", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-virtual-interfaces", "description": "Virtual interface support.", "links": [], "name": "VirtualInterfaces", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-volume-attachment-update", "description": "Support for updating a volume attachment.", "links": [], "name": "VolumeAttachmentUpdate", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" }, { "alias": "os-volumes", "description": "Volumes support.", "links": [], "name": "Volumes", "namespace": "http://docs.openstack.org/compute/ext/fake_xml", "updated": "2014-12-03T00:00:00Z" } ] EXTENSION_LIST_LEGACY_V2_COMPATIBLE = EXTENSION_LIST[:] EXTENSION_LIST_LEGACY_V2_COMPATIBLE.append({ 'alias': 'OS-EXT-VIF-NET', 'description': 'Adds network id parameter to the virtual interface list.', 'links': [], 'name': 'ExtendedVIFNet', 'namespace': 'http://docs.openstack.org/compute/ext/fake_xml', "updated": "2014-12-03T00:00:00Z" }) EXTENSION_LIST_LEGACY_V2_COMPATIBLE = sorted( EXTENSION_LIST_LEGACY_V2_COMPATIBLE, key=lambda x: x['alias']) @validation.validated class ExtensionInfoController(wsgi.Controller): @wsgi.expected_errors(()) @validation.query_schema(schema.index_query) @validation.response_body_schema(schema.index_response) def index(self, req): context = req.environ['nova.context'] context.can(ext_policies.BASE_POLICY_NAME, target={}) # NOTE(gmann): This is for v2.1 compatible mode where # extension list should show all extensions as shown by v2. if req.is_legacy_v2(): return dict(extensions=EXTENSION_LIST_LEGACY_V2_COMPATIBLE) return dict(extensions=EXTENSION_LIST) @wsgi.expected_errors(404) @validation.query_schema(schema.show_query) @validation.response_body_schema(schema.show_response) def show(self, req, id): context = req.environ['nova.context'] context.can(ext_policies.BASE_POLICY_NAME, target={}) all_exts = EXTENSION_LIST # NOTE(gmann): This is for v2.1 compatible mode where # extension list should show all extensions as shown by v2. if req.is_legacy_v2(): all_exts = EXTENSION_LIST_LEGACY_V2_COMPATIBLE # NOTE(dprince): the extensions alias is used as the 'id' for show for ext in all_exts: if ext['alias'] == id: return dict(extension=ext) raise webob.exc.HTTPNotFound() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/fixed_ips.py0000664000175000017500000000347400000000000022275 0ustar00zuulzuul00000000000000# Copyright 2012 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from webob import exc from nova.api.openstack.compute.schemas import fixed_ips as schema from nova.api.openstack import wsgi from nova.api import validation _removal_reason = """\ This API only works with *nova-network*, which was deprecated in the 14.0.0 (Newton) release. It fails with HTTP 404 starting from microversion 2.36. It was removed in the 18.0.0 (Rocky) release. """ @validation.validated class FixedIPController(wsgi.Controller): @wsgi.expected_errors((410)) @wsgi.removed('18.0.0', _removal_reason) @validation.query_schema(schema.show_query) @validation.response_body_schema(schema.show_response) def show(self, req, id): raise exc.HTTPGone() @wsgi.expected_errors((410)) @wsgi.action('reserve') @wsgi.removed('18.0.0', _removal_reason) @validation.schema(schema.reserve) @validation.response_body_schema(schema.reserve_response) def reserve(self, req, id, body): raise exc.HTTPGone() @wsgi.expected_errors((410)) @wsgi.action('unreserve') @wsgi.removed('18.0.0', _removal_reason) @validation.schema(schema.unreserve) @validation.response_body_schema(schema.unreserve_response) def unreserve(self, req, id, body): raise exc.HTTPGone() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/flavor_access.py0000664000175000017500000001114400000000000023126 0ustar00zuulzuul00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The flavor access extension.""" import webob from nova.api.openstack import api_version_request from nova.api.openstack import common from nova.api.openstack.compute.schemas import flavor_access as schema from nova.api.openstack import identity from nova.api.openstack import wsgi from nova.api import validation from nova import exception from nova.i18n import _ from nova.policies import flavor_access as fa_policies def _marshall_flavor_access(flavor): rval = [] for project_id in flavor.projects: rval.append({'flavor_id': flavor.flavorid, 'tenant_id': project_id}) return {'flavor_access': rval} @validation.validated class FlavorAccessController(wsgi.Controller): """The flavor access API controller for the OpenStack API.""" @wsgi.expected_errors(404) @validation.query_schema(schema.index_query) @validation.response_body_schema(schema.index_response) def index(self, req, flavor_id): context = req.environ['nova.context'] context.can(fa_policies.BASE_POLICY_NAME) flavor = common.get_flavor(context, flavor_id) # public flavor to all projects if flavor.is_public: explanation = _("Access list not available for public flavors.") raise webob.exc.HTTPNotFound(explanation=explanation) # private flavor to listed projects only return _marshall_flavor_access(flavor) @validation.validated class FlavorActionController(wsgi.Controller): """The flavor access API controller for the OpenStack API.""" @wsgi.expected_errors((400, 403, 404, 409)) @wsgi.action("addTenantAccess") @validation.schema(schema.add_tenant_access) @validation.response_body_schema(schema.add_tenant_access_response) def _add_tenant_access(self, req, id, body): context = req.environ['nova.context'] context.can(fa_policies.POLICY_ROOT % "add_tenant_access", target={}) vals = body['addTenantAccess'] tenant = vals['tenant'] identity.verify_project_id(context, tenant) flavor = common.get_flavor(context, id) try: if api_version_request.is_supported(req, '2.7'): if flavor.is_public: exp = _("Can not add access to a public flavor.") raise webob.exc.HTTPConflict(explanation=exp) flavor.add_access(tenant) except exception.FlavorNotFound as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) except exception.FlavorAccessExists as err: raise webob.exc.HTTPConflict(explanation=err.format_message()) return _marshall_flavor_access(flavor) @wsgi.expected_errors((400, 403, 404)) @wsgi.action("removeTenantAccess") @validation.schema(schema.remove_tenant_access) @validation.response_body_schema(schema.remove_tenant_access_response) def _remove_tenant_access(self, req, id, body): context = req.environ['nova.context'] context.can( fa_policies.POLICY_ROOT % "remove_tenant_access", target={}) vals = body['removeTenantAccess'] tenant = vals['tenant'] # It doesn't really matter if project exists or not: we can delete # it from flavor's access list in both cases. try: identity.verify_project_id(context, tenant) except webob.exc.HTTPBadRequest as identity_exc: msg = "Project ID %s is not a valid project." % tenant if msg not in identity_exc.explanation: raise # NOTE(gibi): We have to load a flavor from the db here as # flavor.remove_access() will try to emit a notification and that needs # a fully loaded flavor. flavor = common.get_flavor(context, id) try: flavor.remove_access(tenant) except (exception.FlavorAccessNotFound, exception.FlavorNotFound) as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) return _marshall_flavor_access(flavor) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/flavors.py0000664000175000017500000002523100000000000021772 0ustar00zuulzuul00000000000000# Copyright 2010 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import strutils import webob from nova.api.openstack import api_version_request from nova.api.openstack import common from nova.api.openstack.compute.schemas import flavors as schema from nova.api.openstack.compute.views import flavors as flavors_view from nova.api.openstack import wsgi from nova.api import validation from nova.compute import flavors from nova import exception from nova.i18n import _ from nova import objects from nova.policies import flavor_extra_specs as fes_policies from nova.policies import flavor_manage as fm_policies from nova import utils @validation.validated class FlavorsController(wsgi.Controller): """Flavor controller for the OpenStack API.""" _view_builder_class = flavors_view.ViewBuilder # NOTE(oomichi): Return 202 for backwards compatibility but should be # 204 as this operation complete the deletion of aggregate resource and # return no response body. @wsgi.response(202) @wsgi.expected_errors(404) @validation.response_body_schema(schema.delete_response) def delete(self, req, id): context = req.environ['nova.context'] context.can(fm_policies.POLICY_ROOT % 'delete', target={}) flavor = objects.Flavor(context=context, flavorid=id) try: flavor.destroy() except exception.FlavorNotFound as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) # NOTE(oomichi): Return 200 for backwards compatibility but should be 201 # as this operation complete the creation of flavor resource. @wsgi.expected_errors((400, 409)) @validation.schema(schema.create_v20, '2.0', '2.0') @validation.schema(schema.create, '2.1', '2.54') @validation.schema(schema.create_v255, '2.55') @validation.response_body_schema(schema.create_response, '2.0', '2.54') @validation.response_body_schema(schema.create_response_v255, '2.55', '2.60') # noqa: E501 @validation.response_body_schema(schema.create_response_v261, '2.61', '2.74') # noqa: E501 @validation.response_body_schema(schema.create_response_v275, '2.75') def create(self, req, body): context = req.environ['nova.context'] context.can(fm_policies.POLICY_ROOT % 'create', target={}) vals = body['flavor'] name = vals['name'] flavorid = vals.get('id') memory = vals['ram'] vcpus = vals['vcpus'] root_gb = vals['disk'] ephemeral_gb = vals.get('OS-FLV-EXT-DATA:ephemeral', 0) swap = vals.get('swap', 0) rxtx_factor = vals.get('rxtx_factor', 1.0) is_public = vals.get('os-flavor-access:is_public', True) # The user can specify a description starting with microversion 2.55. include_description = api_version_request.is_supported(req, '2.55') description = vals.get('description') if include_description else None try: flavor = flavors.create(name, memory, vcpus, root_gb, ephemeral_gb=ephemeral_gb, flavorid=flavorid, swap=swap, rxtx_factor=rxtx_factor, is_public=is_public, description=description) # NOTE(gmann): For backward compatibility, non public flavor # access is not being added for created tenant. Ref -bug/1209101 except (exception.FlavorExists, exception.FlavorIdExists) as err: raise webob.exc.HTTPConflict(explanation=err.format_message()) include_extra_specs = False if api_version_request.is_supported(req, '2.61'): include_extra_specs = context.can( fes_policies.POLICY_ROOT % 'index', fatal=False) # NOTE(yikun): This empty extra_specs only for keeping consistent # with PUT and GET flavor APIs. extra_specs in flavor is added # after creating the flavor so to avoid the error in _view_builder # flavor.extra_specs is populated with the empty string. flavor.extra_specs = {} return self._view_builder.show(req, flavor, include_description, include_extra_specs=include_extra_specs) @wsgi.api_version('2.55') @wsgi.expected_errors((400, 404)) @validation.schema(schema.update, '2.55') @validation.response_body_schema(schema.update_response, '2.55', '2.60') @validation.response_body_schema(schema.update_response_v261, '2.61', '2.74') # noqa: E501 @validation.response_body_schema(schema.update_response_v275, '2.75') def update(self, req, id, body): # Validate the policy. context = req.environ['nova.context'] context.can(fm_policies.POLICY_ROOT % 'update', target={}) # Get the flavor and update the description. try: flavor = objects.Flavor.get_by_flavor_id(context, id) flavor.description = body['flavor']['description'] flavor.save() except exception.FlavorNotFound as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) include_extra_specs = False if api_version_request.is_supported(req, '2.61'): include_extra_specs = context.can( fes_policies.POLICY_ROOT % 'index', fatal=False) return self._view_builder.show(req, flavor, include_description=True, include_extra_specs=include_extra_specs) @wsgi.expected_errors(400) @validation.query_schema(schema.index_query, '2.0', '2.74') @validation.query_schema(schema.index_query_275, '2.75') @validation.response_body_schema(schema.index_response, '2.0', '2.54') @validation.response_body_schema(schema.index_response_v255, '2.55') def index(self, req): """Return all flavors in brief.""" limited_flavors = self._get_flavors(req) return self._view_builder.index(req, limited_flavors) @wsgi.expected_errors(400) @validation.query_schema(schema.index_query, '2.0', '2.74') @validation.query_schema(schema.index_query_275, '2.75') @validation.response_body_schema(schema.detail_response, '2.0', '2.54') @validation.response_body_schema(schema.detail_response_v255, '2.55', '2.60') # noqa: E501 @validation.response_body_schema(schema.detail_response_v261, '2.61') def detail(self, req): """Return all flavors in detail.""" context = req.environ['nova.context'] limited_flavors = self._get_flavors(req) include_extra_specs = False if api_version_request.is_supported(req, '2.61'): include_extra_specs = context.can( fes_policies.POLICY_ROOT % 'index', fatal=False) return self._view_builder.detail( req, limited_flavors, include_extra_specs=include_extra_specs) @wsgi.expected_errors(404) @validation.query_schema(schema.show_query) @validation.response_body_schema(schema.show_response, '2.0', '2.54') @validation.response_body_schema(schema.show_response_v255, '2.55', '2.60') @validation.response_body_schema(schema.show_response_v261, '2.61', '2.74') @validation.response_body_schema(schema.show_response_v275, '2.75') def show(self, req, id): """Return data about the given flavor id.""" context = req.environ['nova.context'] try: flavor = flavors.get_flavor_by_flavor_id(id, ctxt=context) except exception.FlavorNotFound as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) include_extra_specs = False if api_version_request.is_supported(req, '2.61'): include_extra_specs = context.can( fes_policies.POLICY_ROOT % 'index', fatal=False) include_description = api_version_request.is_supported(req, '2.55') return self._view_builder.show( req, flavor, include_description=include_description, include_extra_specs=include_extra_specs) def _parse_is_public(self, is_public): """Parse is_public into something usable.""" if is_public is None: # preserve default value of showing only public flavors return True elif utils.is_none_string(is_public): return None else: try: return strutils.bool_from_string(is_public, strict=True) except ValueError: msg = _('Invalid is_public filter [%s]') % is_public raise webob.exc.HTTPBadRequest(explanation=msg) def _get_flavors(self, req): """Helper function that returns a list of flavor dicts.""" filters = {} sort_key = req.params.get('sort_key') or 'flavorid' sort_dir = req.params.get('sort_dir') or 'asc' limit, marker = common.get_limit_and_marker(req) context = req.environ['nova.context'] if context.is_admin: # Only admin has query access to all flavor types filters['is_public'] = self._parse_is_public( req.params.get('is_public', None)) else: filters['is_public'] = True filters['disabled'] = False if 'minRam' in req.params: try: filters['min_memory_mb'] = int(req.params['minRam']) except ValueError: msg = _('Invalid minRam filter [%s]') % req.params['minRam'] raise webob.exc.HTTPBadRequest(explanation=msg) if 'minDisk' in req.params: try: filters['min_root_gb'] = int(req.params['minDisk']) except ValueError: msg = (_('Invalid minDisk filter [%s]') % req.params['minDisk']) raise webob.exc.HTTPBadRequest(explanation=msg) try: limited_flavors = objects.FlavorList.get_all( context, filters=filters, sort_key=sort_key, sort_dir=sort_dir, limit=limit, marker=marker) except exception.MarkerNotFound: msg = _('marker [%s] not found') % marker raise webob.exc.HTTPBadRequest(explanation=msg) return limited_flavors ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/flavors_extraspecs.py0000664000175000017500000001446400000000000024241 0ustar00zuulzuul00000000000000# Copyright 2010 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import webob from nova.api.openstack import api_version_request from nova.api.openstack import common from nova.api.openstack.compute.schemas import flavors_extraspecs as schema from nova.api.openstack import wsgi from nova.api import validation from nova.api.validation.extra_specs import validators from nova import exception from nova.i18n import _ from nova.policies import flavor_extra_specs as fes_policies from nova import utils @validation.validated class FlavorExtraSpecsController(wsgi.Controller): """The flavor extra specs API controller for the OpenStack API.""" def _get_extra_specs(self, context, flavor_id): flavor = common.get_flavor(context, flavor_id) return dict(extra_specs=flavor.extra_specs) def _check_extra_specs_value(self, req, specs): validation_supported = api_version_request.is_supported(req, '2.86') for name, value in specs.items(): # NOTE(gmann): Max length for numeric value is being checked # explicitly as json schema cannot have max length check for # numeric value if isinstance(value, (int, float)): value = str(value) try: utils.check_string_length(value, 'extra_specs value', max_length=255) except exception.InvalidInput as error: raise webob.exc.HTTPBadRequest( explanation=error.format_message()) if validation_supported: validators.validate(name, value) @wsgi.expected_errors(404) @validation.query_schema(schema.index_query) @validation.response_body_schema(schema.index_response) def index(self, req, flavor_id): """Returns the list of extra specs for a given flavor.""" context = req.environ['nova.context'] context.can(fes_policies.POLICY_ROOT % 'index', target={'project_id': context.project_id}) return self._get_extra_specs(context, flavor_id) # NOTE(gmann): Here should be 201 instead of 200 by v2.1 # +microversions because the flavor extra specs has been created # completely when returning a response. @wsgi.expected_errors((400, 404, 409)) @validation.schema(schema.create) @validation.response_body_schema(schema.create_response) def create(self, req, flavor_id, body): context = req.environ['nova.context'] context.can(fes_policies.POLICY_ROOT % 'create', target={}) specs = body['extra_specs'] self._check_extra_specs_value(req, specs) flavor = common.get_flavor(context, flavor_id) try: flavor.extra_specs = dict(flavor.extra_specs, **specs) flavor.save() except exception.FlavorExtraSpecUpdateCreateFailed as e: raise webob.exc.HTTPConflict(explanation=e.format_message()) except exception.FlavorNotFound as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) return body @wsgi.expected_errors((400, 404, 409)) @validation.schema(schema.update) @validation.response_body_schema(schema.update_response) def update(self, req, flavor_id, id, body): context = req.environ['nova.context'] context.can(fes_policies.POLICY_ROOT % 'update', target={}) self._check_extra_specs_value(req, body) if id not in body: expl = _('Request body and URI mismatch') raise webob.exc.HTTPBadRequest(explanation=expl) flavor = common.get_flavor(context, flavor_id) try: flavor.extra_specs = dict(flavor.extra_specs, **body) flavor.save() except exception.FlavorExtraSpecUpdateCreateFailed as e: raise webob.exc.HTTPConflict(explanation=e.format_message()) except exception.FlavorNotFound as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) return body @wsgi.expected_errors(404) @validation.query_schema(schema.show_query) @validation.response_body_schema(schema.show_response) def show(self, req, flavor_id, id): """Return a single extra spec item.""" context = req.environ['nova.context'] context.can(fes_policies.POLICY_ROOT % 'show', target={'project_id': context.project_id}) flavor = common.get_flavor(context, flavor_id) try: return {id: flavor.extra_specs[id]} except KeyError: msg = _("Flavor %(flavor_id)s has no extra specs with " "key %(key)s.") % dict(flavor_id=flavor_id, key=id) raise webob.exc.HTTPNotFound(explanation=msg) # NOTE(gmann): Here should be 204(No Content) instead of 200 by v2.1 # +microversions because the flavor extra specs has been deleted # completely when returning a response. @wsgi.expected_errors(404) @validation.response_body_schema(schema.delete_response) def delete(self, req, flavor_id, id): """Deletes an existing extra spec.""" context = req.environ['nova.context'] context.can(fes_policies.POLICY_ROOT % 'delete', target={}) flavor = common.get_flavor(context, flavor_id) try: del flavor.extra_specs[id] flavor.save() except (exception.FlavorExtraSpecsNotFound, exception.FlavorNotFound) as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) except KeyError: msg = _("Flavor %(flavor_id)s has no extra specs with " "key %(key)s.") % dict(flavor_id=flavor_id, key=id) raise webob.exc.HTTPNotFound(explanation=msg) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/floating_ip_dns.py0000664000175000017500000000520700000000000023456 0ustar00zuulzuul00000000000000# Copyright 2011 Andrew Bogott for the Wikimedia Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from webob import exc from nova.api.openstack.compute.schemas import floating_ip_dns as schema from nova.api.openstack import wsgi from nova.api import validation _removal_reason = """\ This API only works with *nova-network*, which was deprecated in the 14.0.0 (Newton) release. It fails with HTTP 404 starting from microversion 2.36. It was removed in the 18.0.0 (Rocky) release. """ @validation.validated class FloatingIPDNSDomainController(wsgi.Controller): """(Removed) DNS domain controller for OpenStack API.""" @wsgi.expected_errors(410) @wsgi.removed('18.0.0', _removal_reason) @validation.query_schema(schema.index_query) @validation.response_body_schema(schema.index_response) def index(self, req): raise exc.HTTPGone() @wsgi.expected_errors(410) @wsgi.removed('18.0.0', _removal_reason) @validation.schema(schema.update) @validation.response_body_schema(schema.update_response) def update(self, req, id, body): raise exc.HTTPGone() @wsgi.expected_errors(410) @wsgi.removed('18.0.0', _removal_reason) @validation.response_body_schema(schema.delete_response) def delete(self, req, id): raise exc.HTTPGone() @validation.validated class FloatingIPDNSEntryController(wsgi.Controller): """(Removed) DNS Entry controller for OpenStack API.""" @wsgi.expected_errors(410) @wsgi.removed('18.0.0', _removal_reason) @validation.query_schema(schema.show_query) @validation.response_body_schema(schema.show_response) def show(self, req, domain_id, id): raise exc.HTTPGone() @wsgi.expected_errors(410) @wsgi.removed('18.0.0', _removal_reason) @validation.schema(schema.update) @validation.response_body_schema(schema.update_response) def update(self, req, domain_id, id, body): raise exc.HTTPGone() @wsgi.expected_errors(410) @wsgi.removed('18.0.0', _removal_reason) @validation.response_body_schema(schema.delete_response) def delete(self, req, domain_id, id): raise exc.HTTPGone() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/floating_ip_pools.py0000664000175000017500000000372300000000000024027 0ustar00zuulzuul00000000000000# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.api.openstack.api_version_request \ import MAX_PROXY_API_SUPPORT_VERSION from nova.api.openstack.compute.schemas import floating_ip_pools as schema from nova.api.openstack import wsgi from nova.api import validation from nova.network import neutron from nova.policies import floating_ip_pools as fip_policies def _translate_floating_ip_view(pool): return { 'name': pool['name'] or pool['id'], } def _translate_floating_ip_pools_view(pools): return { 'floating_ip_pools': [_translate_floating_ip_view(pool) for pool in pools] } @validation.validated class FloatingIPPoolsController(wsgi.Controller): """The Floating IP Pool API controller for the OpenStack API.""" def __init__(self): super(FloatingIPPoolsController, self).__init__() self.network_api = neutron.API() @wsgi.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) @wsgi.expected_errors(()) @validation.query_schema(schema.index_query) @validation.response_body_schema(schema.index_response) def index(self, req): """Return a list of pools.""" context = req.environ['nova.context'] context.can(fip_policies.BASE_POLICY_NAME, target={}) pools = self.network_api.get_floating_ip_pools(context) return _translate_floating_ip_pools_view(pools) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/floating_ips.py0000664000175000017500000003115300000000000022774 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2011 Grid Dynamics # Copyright 2011 Eldar Nugaev, Kirill Shileev, Ilya Alekseyev # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_utils import netutils import webob from nova.api.openstack.api_version_request \ import MAX_PROXY_API_SUPPORT_VERSION from nova.api.openstack import common from nova.api.openstack.compute.schemas import floating_ips as schema from nova.api.openstack import wsgi from nova.api import validation from nova.compute import api as compute from nova import exception from nova.i18n import _ from nova.network import neutron from nova.policies import floating_ips as fi_policies LOG = logging.getLogger(__name__) def _translate_floating_ip_view(floating_ip): instance_id = None if floating_ip['port_details']: instance_id = floating_ip['port_details']['device_id'] return { 'floating_ip': { 'id': floating_ip['id'], 'ip': floating_ip['floating_ip_address'], 'pool': floating_ip['network_details']['name'] or ( floating_ip['network_details']['id']), 'fixed_ip': floating_ip['fixed_ip_address'], 'instance_id': instance_id, } } def get_instance_by_floating_ip_addr(self, context, address): try: instance_id =\ self.network_api.get_instance_id_by_floating_address( context, address) except exception.FloatingIpNotFoundForAddress as ex: raise webob.exc.HTTPNotFound(explanation=ex.format_message()) except exception.FloatingIpMultipleFoundForAddress as ex: raise webob.exc.HTTPConflict(explanation=ex.format_message()) if instance_id: return common.get_instance(self.compute_api, context, instance_id, expected_attrs=['flavor']) def disassociate_floating_ip(self, context, instance, address): try: self.network_api.disassociate_floating_ip(context, instance, address) except exception.Forbidden: raise webob.exc.HTTPForbidden() class FloatingIPController(wsgi.Controller): """The Floating IPs API controller for the OpenStack API.""" def __init__(self): super(FloatingIPController, self).__init__() self.compute_api = compute.API() self.network_api = neutron.API() @wsgi.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) @wsgi.expected_errors((400, 404)) @validation.query_schema(schema.show_query) def show(self, req, id): """Return data about the given floating IP.""" context = req.environ['nova.context'] context.can(fi_policies.BASE_POLICY_NAME % 'show', target={'project_id': context.project_id}) try: floating_ip = self.network_api.get_floating_ip(context, id) except (exception.NotFound, exception.FloatingIpNotFound): msg = _("Floating IP not found for ID %s") % id raise webob.exc.HTTPNotFound(explanation=msg) except exception.InvalidID as e: raise webob.exc.HTTPBadRequest(explanation=e.format_message()) return _translate_floating_ip_view(floating_ip) @wsgi.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) @wsgi.expected_errors(()) @validation.query_schema(schema.index_query) def index(self, req): """Return a list of floating IPs allocated to a project.""" context = req.environ['nova.context'] context.can(fi_policies.BASE_POLICY_NAME % 'list', target={'project_id': context.project_id}) floating_ips = self.network_api.get_floating_ips_by_project(context) return {'floating_ips': [_translate_floating_ip_view(ip)['floating_ip'] for ip in floating_ips]} @wsgi.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) @wsgi.expected_errors((400, 403, 404)) @validation.schema(schema.create) def create(self, req, body=None): context = req.environ['nova.context'] context.can(fi_policies.BASE_POLICY_NAME % 'create', target={'project_id': context.project_id}) pool = None if body and 'pool' in body: pool = body['pool'] try: address = self.network_api.allocate_floating_ip(context, pool) ip = self.network_api.get_floating_ip_by_address(context, address) except exception.NoMoreFloatingIps: if pool: msg = _("No more floating IPs in pool %s.") % pool else: msg = _("No more floating IPs available.") raise webob.exc.HTTPNotFound(explanation=msg) except exception.FloatingIpLimitExceeded: if pool: msg = _("IP allocation over quota in pool %s.") % pool else: msg = _("IP allocation over quota.") raise webob.exc.HTTPForbidden(explanation=msg) except exception.FloatingIpPoolNotFound as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) except exception.FloatingIpBadRequest as e: raise webob.exc.HTTPBadRequest(explanation=e.format_message()) return _translate_floating_ip_view(ip) @wsgi.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) @wsgi.response(202) @wsgi.expected_errors((400, 403, 404, 409)) def delete(self, req, id): context = req.environ['nova.context'] context.can(fi_policies.BASE_POLICY_NAME % 'delete', target={'project_id': context.project_id}) # get the floating ip object try: floating_ip = self.network_api.get_floating_ip(context, id) except (exception.NotFound, exception.FloatingIpNotFound): msg = _("Floating IP not found for ID %s") % id raise webob.exc.HTTPNotFound(explanation=msg) except exception.InvalidID as e: raise webob.exc.HTTPBadRequest(explanation=e.format_message()) address = floating_ip['floating_ip_address'] # get the associated instance object (if any) instance = get_instance_by_floating_ip_addr(self, context, address) try: self.network_api.disassociate_and_release_floating_ip( context, instance, floating_ip) except exception.Forbidden: raise webob.exc.HTTPForbidden() except exception.FloatingIpNotFoundForAddress as exc: raise webob.exc.HTTPNotFound(explanation=exc.format_message()) @validation.validated class FloatingIPActionController(wsgi.Controller): """This API is deprecated from the Microversion '2.44'.""" def __init__(self): super(FloatingIPActionController, self).__init__() self.compute_api = compute.API() self.network_api = neutron.API() @wsgi.api_version("2.1", "2.43") @wsgi.expected_errors((400, 403, 404)) @wsgi.action('addFloatingIp') @validation.schema(schema.add_floating_ip) @validation.response_body_schema(schema.add_floating_ip_response) def _add_floating_ip(self, req, id, body): """Associate floating_ip to an instance.""" context = req.environ['nova.context'] instance = common.get_instance(self.compute_api, context, id, expected_attrs=['flavor']) context.can(fi_policies.BASE_POLICY_NAME % 'add', target={'project_id': instance.project_id}) address = body['addFloatingIp']['address'] cached_nwinfo = instance.get_network_info() if not cached_nwinfo: LOG.warning( 'Info cache is %r during associate with no nw_info cache', instance.info_cache, instance=instance) msg = _('Instance network is not ready yet') raise webob.exc.HTTPBadRequest(explanation=msg) fixed_ips = cached_nwinfo.fixed_ips() if not fixed_ips: msg = _('No fixed IPs associated to instance') raise webob.exc.HTTPBadRequest(explanation=msg) fixed_address = None if 'fixed_address' in body['addFloatingIp']: fixed_address = body['addFloatingIp']['fixed_address'] for fixed in fixed_ips: if fixed['address'] == fixed_address: break else: msg = _('Specified fixed address not assigned to instance') raise webob.exc.HTTPBadRequest(explanation=msg) if not fixed_address: try: fixed_address = next(ip['address'] for ip in fixed_ips if netutils.is_valid_ipv4(ip['address'])) except StopIteration: msg = _('Unable to associate floating IP %(address)s ' 'to any fixed IPs for instance %(id)s. ' 'Instance has no fixed IPv4 addresses to ' 'associate.') % ( {'address': address, 'id': id}) raise webob.exc.HTTPBadRequest(explanation=msg) if len(fixed_ips) > 1: LOG.warning('multiple fixed_ips exist, using the first ' 'IPv4 fixed_ip: %s', fixed_address) try: self.network_api.associate_floating_ip(context, instance, floating_address=address, fixed_address=fixed_address) except exception.FloatingIpAssociated: msg = _('floating IP is already associated') raise webob.exc.HTTPBadRequest(explanation=msg) except exception.FloatingIpAssociateFailed as e: raise webob.exc.HTTPBadRequest(explanation=e.format_message()) except exception.NoFloatingIpInterface: msg = _('l3driver call to add floating IP failed') raise webob.exc.HTTPBadRequest(explanation=msg) except exception.FloatingIpNotFoundForAddress: msg = _('floating IP not found') raise webob.exc.HTTPNotFound(explanation=msg) except exception.Forbidden as e: raise webob.exc.HTTPForbidden(explanation=e.format_message()) except Exception as e: msg = _('Unable to associate floating IP %(address)s to ' 'fixed IP %(fixed_address)s for instance %(id)s. ' 'Error: %(error)s') % ( {'address': address, 'fixed_address': fixed_address, 'id': id, 'error': e}) LOG.exception(msg) raise webob.exc.HTTPBadRequest(explanation=msg) return webob.Response(status_int=202) @wsgi.api_version("2.1", "2.43") @wsgi.expected_errors((400, 403, 404, 409)) @wsgi.action('removeFloatingIp') @validation.schema(schema.remove_floating_ip) @validation.response_body_schema(schema.remove_floating_ip_response) def _remove_floating_ip(self, req, id, body): """Dissociate floating_ip from an instance.""" context = req.environ['nova.context'] address = body['removeFloatingIp']['address'] # get the associated instance object (if any) instance = get_instance_by_floating_ip_addr(self, context, address) target = {} if instance: target = {'project_id': instance.project_id} context.can(fi_policies.BASE_POLICY_NAME % 'remove', target=target) # get the floating ip object try: floating_ip = self.network_api.get_floating_ip_by_address(context, address) except exception.FloatingIpNotFoundForAddress: msg = _("floating IP not found") raise webob.exc.HTTPNotFound(explanation=msg) # disassociate if associated if instance and floating_ip['port_id'] and instance.uuid == id: disassociate_floating_ip(self, context, instance, address) return webob.Response(status_int=202) else: msg = _("Floating IP %(address)s is not associated with instance " "%(id)s.") % {'address': address, 'id': id} raise webob.exc.HTTPConflict(explanation=msg) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/floating_ips_bulk.py0000664000175000017500000000375400000000000024017 0ustar00zuulzuul00000000000000# Copyright 2012 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from webob import exc from nova.api.openstack.compute.schemas import floating_ips_bulk as schema from nova.api.openstack import wsgi from nova.api import validation _removal_reason = """\ This API only works with *nova-network*, which was deprecated in the 14.0.0 (Newton) release. It fails with HTTP 404 starting from microversion 2.36. It was removed in the 18.0.0 (Rocky) release. """ @validation.validated class FloatingIPBulkController(wsgi.Controller): @wsgi.expected_errors(410) @wsgi.removed('18.0.0', _removal_reason) @validation.query_schema(schema.index_query) @validation.response_body_schema(schema.index_response) def index(self, req): raise exc.HTTPGone() @wsgi.expected_errors(410) @wsgi.removed('18.0.0', _removal_reason) @validation.query_schema(schema.show_query) @validation.response_body_schema(schema.show_response) def show(self, req, id): raise exc.HTTPGone() @wsgi.expected_errors(410) @wsgi.removed('18.0.0', _removal_reason) @validation.schema(schema.create) @validation.response_body_schema(schema.create_response) def create(self, req, body): raise exc.HTTPGone() @wsgi.expected_errors(410) @wsgi.removed('18.0.0', _removal_reason) @validation.schema(schema.update) @validation.response_body_schema(schema.update_response) def update(self, req, id, body): raise exc.HTTPGone() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/fping.py0000664000175000017500000000307000000000000021416 0ustar00zuulzuul00000000000000# Copyright 2011 Grid Dynamics # Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from webob import exc from nova.api.openstack.compute.schemas import fping as schema from nova.api.openstack import wsgi from nova.api import validation _removal_reason = """\ This API only works with *nova-network*, which was deprecated in the 14.0.0 (Newton) release. It fails with HTTP 404 starting from microversion 2.36. It was removed in the 18.0.0 (Rocky) release. """ @validation.validated class FpingController(wsgi.Controller): @wsgi.expected_errors(410) @wsgi.removed('18.0.0', _removal_reason) @validation.query_schema(schema.index_query) @validation.response_body_schema(schema.index_response) def index(self, req): raise exc.HTTPGone() @wsgi.expected_errors(410) @wsgi.removed('18.0.0', _removal_reason) @validation.query_schema(schema.show_query) @validation.response_body_schema(schema.show_response) def show(self, req, id): raise exc.HTTPGone() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/helpers.py0000664000175000017500000001054300000000000021760 0ustar00zuulzuul00000000000000# Copyright 2016 HPE, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import strutils from webob import exc from nova.i18n import _ API_DISK_CONFIG = "OS-DCF:diskConfig" API_ACCESS_V4 = "accessIPv4" API_ACCESS_V6 = "accessIPv6" # possible ops CREATE = 'create' UPDATE = 'update' REBUILD = 'rebuild' RESIZE = 'resize' def disk_config_from_api(value): if value == 'AUTO': return True elif value == 'MANUAL': return False else: msg = _("%s must be either 'MANUAL' or 'AUTO'.") % API_DISK_CONFIG raise exc.HTTPBadRequest(explanation=msg) def get_injected_files(personality): """Create a list of injected files from the personality attribute. At this time, injected_files must be formatted as a list of (file_path, file_content) pairs for compatibility with the underlying compute service. """ injected_files = [] for item in personality: injected_files.append((item['path'], item['contents'])) return injected_files def translate_attributes(op, server_dict, operation_kwargs): """Translate REST attributes on create to server object kwargs. Our REST API is relatively fixed, but internal representations change over time, this is a translator for inbound REST request attributes that modifies the server dict that we get and adds appropriate attributes to ``operation_kwargs`` that will be passed down to instance objects later. It's done in a common function as this is used for create / resize / rebuild / update The ``op`` is the operation that we are transforming, because there are times when we translate differently for different operations. (Yes, it's a little nuts, but legacy... ) The ``server_dict`` is a representation of the server in question. During ``create`` and ``update`` operations this will actually be the ``server`` element of the request body. During actions, such as ``rebuild`` and ``resize`` this will be the attributes passed to the action object during the operation. This is equivalent to the ``server`` object. Not all operations support all attributes listed here. Which is why it's important to only set operation_kwargs if there is something to set. Input validation will ensure that we are only operating on appropriate attributes for each operation. """ # Disk config auto_disk_config_raw = server_dict.pop(API_DISK_CONFIG, None) if auto_disk_config_raw is not None: auto_disk_config = disk_config_from_api(auto_disk_config_raw) operation_kwargs['auto_disk_config'] = auto_disk_config if API_ACCESS_V4 in server_dict: operation_kwargs['access_ip_v4'] = server_dict.pop(API_ACCESS_V4) if API_ACCESS_V6 in server_dict: operation_kwargs['access_ip_v6'] = server_dict.pop(API_ACCESS_V6) # This is only ever expected during rebuild operations, and only # does anything with Ironic driver. It also demonstrates the lack # of understanding of the word ephemeral. if 'preserve_ephemeral' in server_dict and op == REBUILD: preserve = strutils.bool_from_string( server_dict.pop('preserve_ephemeral'), strict=True) operation_kwargs['preserve_ephemeral'] = preserve # yes, we use different kwargs, this goes all the way back to # commit cebc98176926f57016a508d5c59b11f55dfcf2b3. if 'personality' in server_dict: if op == REBUILD: operation_kwargs['files_to_inject'] = get_injected_files( server_dict.pop('personality')) # NOTE(sdague): the deprecated hooks infrastructure doesn't # function if injected files is not defined as a list. Once hooks # are removed, this should go back inside the personality # conditional above. if op == CREATE: operation_kwargs['injected_files'] = get_injected_files( server_dict.pop('personality', [])) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/hosts.py0000664000175000017500000003127200000000000021460 0ustar00zuulzuul00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The hosts admin extension.""" from oslo_log import log as logging import webob.exc from nova.api.openstack import common from nova.api.openstack.compute.schemas import hosts as schema from nova.api.openstack import wsgi from nova.api import validation from nova.compute import api as compute from nova import context as nova_context from nova import exception from nova import objects from nova.policies import hosts as hosts_policies LOG = logging.getLogger(__name__) @validation.validated class HostController(wsgi.Controller): """The Hosts API controller for the OpenStack API.""" def __init__(self): super(HostController, self).__init__() self.api = compute.HostAPI() @wsgi.api_version("2.1", "2.42") @wsgi.expected_errors(()) @validation.query_schema(schema.index_query) @validation.response_body_schema(schema.index_response) def index(self, req): """Returns a dict in the format | {'hosts': [{'host_name': 'some.host.name', | 'service': 'cells', | 'zone': 'internal'}, | {'host_name': 'some.other.host.name', | 'service': 'cells', | 'zone': 'internal'}, | {'host_name': 'some.celly.host.name', | 'service': 'cells', | 'zone': 'internal'}, | {'host_name': 'compute1.host.com', | 'service': 'compute', | 'zone': 'nova'}, | {'host_name': 'compute2.host.com', | 'service': 'compute', | 'zone': 'nova'}, | {'host_name': 'sched1.host.com', | 'service': 'scheduler', | 'zone': 'internal'}, | {'host_name': 'sched2.host.com', | 'service': 'scheduler', | 'zone': 'internal'}, | {'host_name': 'vol1.host.com', | 'service': 'volume', | 'zone': 'internal'}]} """ context = req.environ['nova.context'] context.can(hosts_policies.POLICY_NAME % 'list', target={}) filters = {'disabled': False} zone = req.GET.get('zone', None) if zone: filters['availability_zone'] = zone services = self.api.service_get_all(context, filters=filters, set_zones=True, all_cells=True) hosts = [] api_services = ('nova-osapi_compute', 'nova-metadata') for service in services: if service.binary not in api_services: hosts.append({'host_name': service['host'], 'service': service['topic'], 'zone': service['availability_zone']}) return {'hosts': hosts} @wsgi.api_version("2.1", "2.42") @wsgi.expected_errors((400, 404, 501)) @validation.schema(schema.update) @validation.response_body_schema(schema.update_response) def update(self, req, id, body): """Return booleanized version of body dict. :param Request req: The request object (containing 'nova-context' env var). :param str id: The host name. :param dict body: example format {'host': {'status': 'enable', 'maintenance_mode': 'enable'}} :return: Same dict as body but 'enable' strings for 'status' and 'maintenance_mode' are converted into True, else False. :rtype: dict """ def read_enabled(orig_val): # Convert enable/disable str to a bool. val = orig_val.strip().lower() return val == "enable" context = req.environ['nova.context'] context.can(hosts_policies.POLICY_NAME % 'update', target={}) # See what the user wants to 'update' status = body.get('status') maint_mode = body.get('maintenance_mode') if status is not None: status = read_enabled(status) if maint_mode is not None: maint_mode = read_enabled(maint_mode) # Make the calls and merge the results result = {'host': id} if status is not None: result['status'] = self._set_enabled_status(context, id, status) if maint_mode is not None: result['maintenance_mode'] = self._set_host_maintenance(context, id, maint_mode) return result def _set_host_maintenance(self, context, host_name, mode=True): """Start/Stop host maintenance window. On start, it triggers guest VMs evacuation. """ LOG.info("Putting host %(host_name)s in maintenance mode %(mode)s.", {'host_name': host_name, 'mode': mode}) try: result = self.api.set_host_maintenance(context, host_name, mode) except NotImplementedError: common.raise_feature_not_supported() except (exception.HostNotFound, exception.HostMappingNotFound) as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) except exception.ComputeServiceUnavailable as e: raise webob.exc.HTTPBadRequest(explanation=e.format_message()) if result not in ("on_maintenance", "off_maintenance"): raise webob.exc.HTTPBadRequest(explanation=result) return result def _set_enabled_status(self, context, host_name, enabled): """Sets the specified host's ability to accept new instances. :param enabled: a boolean - if False no new VMs will be able to start on the host. """ if enabled: LOG.info("Enabling host %s.", host_name) else: LOG.info("Disabling host %s.", host_name) try: result = self.api.set_host_enabled(context, host_name, enabled) except NotImplementedError: common.raise_feature_not_supported() except (exception.HostNotFound, exception.HostMappingNotFound) as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) except exception.ComputeServiceUnavailable as e: raise webob.exc.HTTPBadRequest(explanation=e.format_message()) if result not in ("enabled", "disabled"): raise webob.exc.HTTPBadRequest(explanation=result) return result def _host_power_action(self, req, host_name, action): """Reboots, shuts down or powers up the host.""" context = req.environ['nova.context'] try: result = self.api.host_power_action(context, host_name, action) except NotImplementedError: common.raise_feature_not_supported() except (exception.HostNotFound, exception.HostMappingNotFound) as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) except exception.ComputeServiceUnavailable as e: raise webob.exc.HTTPBadRequest(explanation=e.format_message()) return {"host": host_name, "power_action": result} @wsgi.api_version("2.1", "2.42") @wsgi.expected_errors((400, 404, 501)) @validation.query_schema(schema.startup_query) @validation.response_body_schema(schema.startup_response) def startup(self, req, id): context = req.environ['nova.context'] context.can(hosts_policies.POLICY_NAME % 'start', target={}) return self._host_power_action(req, host_name=id, action="startup") @wsgi.api_version("2.1", "2.42") @wsgi.expected_errors((400, 404, 501)) @validation.query_schema(schema.shutdown_query) @validation.response_body_schema(schema.shutdown_response) def shutdown(self, req, id): context = req.environ['nova.context'] context.can(hosts_policies.POLICY_NAME % 'shutdown', target={}) return self._host_power_action(req, host_name=id, action="shutdown") @wsgi.api_version("2.1", "2.42") @wsgi.expected_errors((400, 404, 501)) @validation.query_schema(schema.reboot_query) @validation.response_body_schema(schema.reboot_response) def reboot(self, req, id): context = req.environ['nova.context'] context.can(hosts_policies.POLICY_NAME % 'reboot', target={}) return self._host_power_action(req, host_name=id, action="reboot") @staticmethod def _get_total_resources(host_name, compute_node): return {'resource': {'host': host_name, 'project': '(total)', 'cpu': compute_node.vcpus, 'memory_mb': compute_node.memory_mb, 'disk_gb': compute_node.local_gb}} @staticmethod def _get_used_now_resources(host_name, compute_node): return {'resource': {'host': host_name, 'project': '(used_now)', 'cpu': compute_node.vcpus_used, 'memory_mb': compute_node.memory_mb_used, 'disk_gb': compute_node.local_gb_used}} @staticmethod def _get_resource_totals_from_instances(host_name, instances): cpu_sum = 0 mem_sum = 0 hdd_sum = 0 for instance in instances: cpu_sum += instance['vcpus'] mem_sum += instance['memory_mb'] hdd_sum += instance['root_gb'] + instance['ephemeral_gb'] return {'resource': {'host': host_name, 'project': '(used_max)', 'cpu': cpu_sum, 'memory_mb': mem_sum, 'disk_gb': hdd_sum}} @staticmethod def _get_resources_by_project(host_name, instances): # Getting usage resource per project project_map = {} for instance in instances: resource = project_map.setdefault(instance['project_id'], {'host': host_name, 'project': instance['project_id'], 'cpu': 0, 'memory_mb': 0, 'disk_gb': 0}) resource['cpu'] += instance['vcpus'] resource['memory_mb'] += instance['memory_mb'] resource['disk_gb'] += (instance['root_gb'] + instance['ephemeral_gb']) return project_map @wsgi.api_version("2.1", "2.42") @wsgi.expected_errors(404) @validation.query_schema(schema.show_query) @validation.response_body_schema(schema.show_response) def show(self, req, id): """Shows the physical/usage resource given by hosts. :param id: hostname :returns: expected to use HostShowTemplate. ex.:: {'host': {'resource':D},..} D: {'host': 'hostname','project': 'admin', 'cpu': 1, 'memory_mb': 2048, 'disk_gb': 30} """ context = req.environ['nova.context'] context.can(hosts_policies.POLICY_NAME % 'show', target={}) host_name = id try: mapping = objects.HostMapping.get_by_host(context, host_name) nova_context.set_target_cell(context, mapping.cell_mapping) compute_node = ( objects.ComputeNode.get_first_node_by_host_for_old_compat( context, host_name)) instances = self.api.instance_get_all_by_host(context, host_name) except (exception.ComputeHostNotFound, exception.HostMappingNotFound) as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) resources = [self._get_total_resources(host_name, compute_node)] resources.append(self._get_used_now_resources(host_name, compute_node)) resources.append(self._get_resource_totals_from_instances(host_name, instances)) by_proj_resources = self._get_resources_by_project(host_name, instances) for resource in by_proj_resources.values(): resources.append({'resource': resource}) return {'host': resources} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/hypervisors.py0000664000175000017500000005626300000000000022724 0ustar00zuulzuul00000000000000# Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The hypervisors admin extension.""" from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import strutils from oslo_utils import uuidutils import webob.exc from nova.api.openstack import api_version_request from nova.api.openstack import common from nova.api.openstack.compute.schemas import hypervisors as schema from nova.api.openstack.compute.views import hypervisors as hyper_view from nova.api.openstack import wsgi from nova.api import validation from nova.compute import api as compute from nova import exception from nova.i18n import _ from nova.policies import hypervisors as hv_policies from nova import servicegroup from nova import utils LOG = logging.getLogger(__name__) @validation.validated class HypervisorsController(wsgi.Controller): """The Hypervisors API controller for the OpenStack API.""" _view_builder_class = hyper_view.ViewBuilder def __init__(self): super(HypervisorsController, self).__init__() self.host_api = compute.HostAPI() self.servicegroup_api = servicegroup.API() def _view_hypervisor( self, hypervisor, service, detail, req, servers=None, with_servers=False, ): alive = self.servicegroup_api.service_is_up(service) # The 2.53 microversion returns the compute node uuid rather than id. uuid_for_id = api_version_request.is_supported(req, "2.53") hyp_dict = { 'id': hypervisor.uuid if uuid_for_id else hypervisor.id, 'hypervisor_hostname': hypervisor.hypervisor_hostname, 'state': 'up' if alive else 'down', 'status': 'disabled' if service.disabled else 'enabled', } if detail: for field in ( 'hypervisor_type', 'hypervisor_version', 'host_ip', ): hyp_dict[field] = getattr(hypervisor, field) hyp_dict['service'] = { 'id': service.uuid if uuid_for_id else service.id, 'host': hypervisor.host, 'disabled_reason': service.disabled_reason, } # The 2.88 microversion removed these fields, so only add them on older # microversions if detail and not api_version_request.is_supported(req, '2.88'): for field in ( 'vcpus', 'memory_mb', 'local_gb', 'vcpus_used', 'memory_mb_used', 'local_gb_used', 'free_ram_mb', 'free_disk_gb', 'current_workload', 'running_vms', 'disk_available_least', ): hyp_dict[field] = getattr(hypervisor, field) if api_version_request.is_supported(req, '2.28'): if hypervisor.cpu_info: hyp_dict['cpu_info'] = jsonutils.loads(hypervisor.cpu_info) else: hyp_dict['cpu_info'] = {} else: hyp_dict['cpu_info'] = hypervisor.cpu_info # The 2.88 microversion also *added* the 'uptime' field to the response if detail and api_version_request.is_supported(req, '2.88'): uptime = None if "stats" in hypervisor and "uptime" in hypervisor.stats: uptime = hypervisor.stats.get("uptime") else: try: uptime = self.host_api.get_host_uptime( req.environ['nova.context'], hypervisor.host) except ( NotImplementedError, # only raised in tests exception.ComputeServiceUnavailable, exception.HostMappingNotFound, exception.HostNotFound, ): # Only libvirt and ZVM drivers support this, and it's # not generally possible to get uptime for a down host pass hyp_dict['uptime'] = uptime if servers: hyp_dict['servers'] = [ {'name': serv['name'], 'uuid': serv['uuid']} for serv in servers ] # The 2.75 microversion adds 'servers' field always in response. # Empty list if there are no servers on hypervisors and it is # requested in request. elif with_servers and api_version_request.is_supported(req, '2.75'): hyp_dict['servers'] = [] return hyp_dict def _get_compute_nodes_by_name_pattern(self, context, hostname_match): compute_nodes = self.host_api.compute_node_search_by_hypervisor( context, hostname_match) if not compute_nodes: msg = (_("No hypervisor matching '%s' could be found.") % hostname_match) raise webob.exc.HTTPNotFound(explanation=msg) return compute_nodes def _get_hypervisors(self, req, detail=False, limit=None, marker=None, links=False): """Get hypervisors for the given request. :param req: nova.api.openstack.wsgi.Request for the GET request :param detail: If True, return a detailed response. :param limit: An optional user-supplied page limit. :param marker: An optional user-supplied marker for paging. :param links: If True, return links in the response for paging. """ context = req.environ['nova.context'] # The 2.53 microversion moves the search and servers routes into # GET /os-hypervisors and GET /os-hypervisors/detail with query # parameters. if api_version_request.is_supported(req, "2.53"): hypervisor_match = req.GET.get('hypervisor_hostname_pattern') with_servers = strutils.bool_from_string( req.GET.get('with_servers', False), strict=True) else: hypervisor_match = None with_servers = False if hypervisor_match is not None: # We have to check for 'limit' in the request itself because # the limit passed in is CONF.api.max_limit by default. if 'limit' in req.GET or marker: # Paging with hostname pattern isn't supported. raise webob.exc.HTTPBadRequest( _('Paging over hypervisors with the ' 'hypervisor_hostname_pattern query parameter is not ' 'supported.')) # Explicitly do not try to generate links when querying with the # hostname pattern since the request in the link would fail the # check above. links = False # Get all compute nodes with a hypervisor_hostname that matches # the given pattern. If none are found then it's a 404 error. compute_nodes = self._get_compute_nodes_by_name_pattern( context, hypervisor_match) else: # Get all compute nodes. try: compute_nodes = self.host_api.compute_node_get_all( context, limit=limit, marker=marker) except exception.MarkerNotFound: msg = _('marker [%s] not found') % marker raise webob.exc.HTTPBadRequest(explanation=msg) hypervisors_list = [] for hyp in compute_nodes: try: instances = None if with_servers: instances = self.host_api.instance_get_all_by_host( context, hyp.host) service = self.host_api.service_get_by_compute_host( context, hyp.host) except ( exception.ComputeHostNotFound, exception.HostMappingNotFound, ): # The compute service could be deleted which doesn't delete # the compute node record, that has to be manually removed # from the database so we just ignore it when listing nodes. LOG.debug('Unable to find service for compute node %s. The ' 'service may be deleted and compute nodes need to ' 'be manually cleaned up.', hyp.host) continue hypervisor = self._view_hypervisor( hyp, service, detail, req, servers=instances, with_servers=with_servers, ) hypervisors_list.append(hypervisor) hypervisors_dict = dict(hypervisors=hypervisors_list) if links: hypervisors_links = self._view_builder.get_links( req, hypervisors_list, detail) if hypervisors_links: hypervisors_dict['hypervisors_links'] = hypervisors_links return hypervisors_dict @wsgi.expected_errors((), '2.1', '2.32') @wsgi.expected_errors(400, '2.33', '2.52') @wsgi.expected_errors((400, 404), '2.53') @validation.query_schema(schema.index_query, '2.1', '2.32') @validation.query_schema(schema.index_query_v233, '2.33', '2.52') @validation.query_schema(schema.index_query_v253, '2.53') @validation.response_body_schema(schema.index_response, '2.1', '2.32') @validation.response_body_schema(schema.index_response_v233, '2.33', '2.52') # noqa: E501 @validation.response_body_schema(schema.index_response_v253, '2.53') def index(self, req): """List hypervisors. Starting with the 2.53 microversion, the id field in the response is the compute_nodes.uuid value. Also, the search and servers routes are superseded and replaced with query parameters for listing hypervisors by a hostname pattern and whether or not to include hosted servers in the response. """ limit = None marker = None links = False if api_version_request.is_supported(req, '2.33'): limit, marker = common.get_limit_and_marker(req) links = True return self._index(req, limit=limit, marker=marker, links=links) def _index(self, req, limit=None, marker=None, links=False): context = req.environ['nova.context'] context.can(hv_policies.BASE_POLICY_NAME % 'list', target={}) return self._get_hypervisors(req, detail=False, limit=limit, marker=marker, links=links) @wsgi.expected_errors((), '2.1', '2.32') @wsgi.expected_errors((400), '2.33', '2.52') @wsgi.expected_errors((400, 404), '2.53') @validation.query_schema(schema.index_query, '2.1', '2.32') @validation.query_schema(schema.index_query_v233, '2.33', '2.52') @validation.query_schema(schema.index_query_v253, '2.53') @validation.response_body_schema(schema.detail_response, '2.1', '2.27') @validation.response_body_schema(schema.detail_response_v228, '2.28', '2.32') # noqa: E501 @validation.response_body_schema(schema.detail_response_v233, '2.33', '2.52') # noqa: E501 @validation.response_body_schema(schema.detail_response_v253, '2.53', '2.87') # noqa: E501 @validation.response_body_schema(schema.detail_response_v288, '2.88') def detail(self, req): """List hypervisors with extra details. Starting with the 2.53 microversion, the id field in the response is the compute_nodes.uuid value. Also, the search and servers routes are superseded and replaced with query parameters for listing hypervisors by a hostname pattern and whether or not to include hosted servers in the response. """ limit = None marker = None links = False if api_version_request.is_supported(req, '2.33'): limit, marker = common.get_limit_and_marker(req) links = True return self._detail(req, limit=limit, marker=marker, links=links) def _detail(self, req, limit=None, marker=None, links=False): context = req.environ['nova.context'] context.can(hv_policies.BASE_POLICY_NAME % 'list-detail', target={}) return self._get_hypervisors(req, detail=True, limit=limit, marker=marker, links=links) @staticmethod def _validate_id(req, hypervisor_id): """Validates that the id is a uuid for microversions that require it. :param req: The HTTP request object which contains the requested microversion information. :param hypervisor_id: The provided hypervisor id. :raises: webob.exc.HTTPBadRequest if the requested microversion is greater than or equal to 2.53 and the id is not a uuid. :raises: webob.exc.HTTPNotFound if the requested microversion is less than 2.53 and the id is not an integer. """ if api_version_request.is_supported(req, "2.53"): if not uuidutils.is_uuid_like(hypervisor_id): msg = _('Invalid uuid %s') % hypervisor_id raise webob.exc.HTTPBadRequest(explanation=msg) else: try: utils.validate_integer(hypervisor_id, 'id') except exception.InvalidInput: msg = (_("Hypervisor with ID '%s' could not be found.") % hypervisor_id) raise webob.exc.HTTPNotFound(explanation=msg) @wsgi.expected_errors(404, '2.1', '2.52') @wsgi.expected_errors((400, 404), '2.53') @validation.query_schema(schema.show_query, '2.1', '2.52') @validation.query_schema(schema.show_query_v253, '2.53') @validation.response_body_schema(schema.show_response, '2.1', '2.27') @validation.response_body_schema(schema.show_response_v228, '2.28', '2.52') @validation.response_body_schema(schema.show_response_v253, '2.53', '2.87') @validation.response_body_schema(schema.show_response_v288, '2.88') def show(self, req, id): """Show a hypervisor. The 2.53 microversion requires that the id is a uuid and as a result it can also return a 400 response if an invalid uuid is passed. The 2.53 microversion also supports the with_servers query parameter to include a list of servers on the given hypervisor if requested. """ with_servers = False if api_version_request.is_supported(req, '2.53'): with_servers = strutils.bool_from_string( req.GET.get('with_servers', False), strict=True) return self._show(req, id, with_servers=with_servers) def _show(self, req, id, with_servers=False): context = req.environ['nova.context'] context.can(hv_policies.BASE_POLICY_NAME % 'show', target={}) self._validate_id(req, id) try: hyp = self.host_api.compute_node_get(context, id) except exception.ComputeHostNotFound: # If the ComputeNode is missing, that's a straight up 404 msg = _("Hypervisor with ID '%s' could not be found.") % id raise webob.exc.HTTPNotFound(explanation=msg) instances = None if with_servers: try: instances = self.host_api.instance_get_all_by_host( context, hyp.host) except exception.HostMappingNotFound: msg = _("Hypervisor with ID '%s' could not be found.") % id raise webob.exc.HTTPNotFound(explanation=msg) try: service = self.host_api.service_get_by_compute_host( context, hyp.host) except ( exception.ComputeHostNotFound, exception.HostMappingNotFound, ): msg = _("Hypervisor with ID '%s' could not be found.") % id raise webob.exc.HTTPNotFound(explanation=msg) return { 'hypervisor': self._view_hypervisor( hyp, service, detail=True, req=req, servers=instances, with_servers=with_servers, ), } @wsgi.api_version('2.1', '2.87') @wsgi.expected_errors((400, 404, 501)) @validation.query_schema(schema.uptime_query) @validation.response_body_schema(schema.uptime_response, '2.1', '2.52') @validation.response_body_schema(schema.uptime_response_v253, '2.53', '2.87') # noqa: E501 def uptime(self, req, id): """Prior to microversion 2.88, you could retrieve a special version of the hypervisor detail view that included uptime. Starting in 2.88, this field is now included in the standard detail view, making this API unnecessary. """ context = req.environ['nova.context'] context.can(hv_policies.BASE_POLICY_NAME % 'uptime', target={}) self._validate_id(req, id) try: hyp = self.host_api.compute_node_get(context, id) except exception.ComputeHostNotFound: # If the ComputeNode is missing, that's a straight up 404 msg = _("Hypervisor with ID '%s' could not be found.") % id raise webob.exc.HTTPNotFound(explanation=msg) try: service = self.host_api.service_get_by_compute_host( context, hyp.host) except ( exception.ComputeHostNotFound, exception.HostMappingNotFound, ): msg = _("Hypervisor with ID '%s' could not be found.") % id raise webob.exc.HTTPNotFound(explanation=msg) # Get the uptime try: uptime = self.host_api.get_host_uptime(context, hyp.host) except NotImplementedError: common.raise_feature_not_supported() except ( exception.ComputeServiceUnavailable, exception.HostNotFound, ) as e: raise webob.exc.HTTPBadRequest(explanation=e.format_message()) except exception.HostMappingNotFound: # NOTE(danms): This mirrors the compute_node_get() behavior # where the node is missing, resulting in NotFound instead of # BadRequest if we fail on the map lookup. msg = _("Hypervisor with ID '%s' could not be found.") % id raise webob.exc.HTTPNotFound(explanation=msg) hypervisor = self._view_hypervisor(hyp, service, False, req) hypervisor['uptime'] = uptime return {'hypervisor': hypervisor} @wsgi.api_version('2.1', '2.52') @wsgi.expected_errors(404) @validation.query_schema(schema.search_query) @validation.response_body_schema(schema.search_response, '2.1', '2.52') def search(self, req, id): """Prior to microversion 2.53 you could search for hypervisors by a hostname pattern on a dedicated route. Starting with 2.53, searching by a hostname pattern is a query parameter in the GET /os-hypervisors index and detail methods. """ context = req.environ['nova.context'] context.can(hv_policies.BASE_POLICY_NAME % 'search', target={}) # Get all compute nodes with a hypervisor_hostname that matches # the given pattern. If none are found then it's a 404 error. compute_nodes = self._get_compute_nodes_by_name_pattern(context, id) hypervisors = [] for compute_node in compute_nodes: try: service = self.host_api.service_get_by_compute_host( context, compute_node.host) except exception.ComputeHostNotFound: # The compute service could be deleted which doesn't delete # the compute node record, that has to be manually removed # from the database so we just ignore it when listing nodes. LOG.debug( 'Unable to find service for compute node %s. The ' 'service may be deleted and compute nodes need to ' 'be manually cleaned up.', compute_node.host) continue except exception.HostMappingNotFound as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) hypervisor = self._view_hypervisor( compute_node, service, False, req) hypervisors.append(hypervisor) return {'hypervisors': hypervisors} @wsgi.api_version('2.1', '2.52') @wsgi.expected_errors(404) @validation.query_schema(schema.servers_query) @validation.response_body_schema(schema.servers_response, '2.1', '2.52') def servers(self, req, id): """Prior to microversion 2.53 you could search for hypervisors by a hostname pattern and include servers on those hosts in the response on a dedicated route. Starting with 2.53, searching by a hostname pattern and including hosted servers is a query parameter in the GET /os-hypervisors index and detail methods. """ context = req.environ['nova.context'] context.can(hv_policies.BASE_POLICY_NAME % 'servers', target={}) # Get all compute nodes with a hypervisor_hostname that matches # the given pattern. If none are found then it's a 404 error. compute_nodes = self._get_compute_nodes_by_name_pattern(context, id) hypervisors = [] for compute_node in compute_nodes: try: instances = self.host_api.instance_get_all_by_host( context, compute_node.host) except exception.HostMappingNotFound as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) try: service = self.host_api.service_get_by_compute_host( context, compute_node.host) except exception.ComputeHostNotFound: # The compute service could be deleted which doesn't delete # the compute node record, that has to be manually removed # from the database so we just ignore it when listing nodes. LOG.debug( 'Unable to find service for compute node %s. The ' 'service may be deleted and compute nodes need to ' 'be manually cleaned up.', compute_node.host) continue except exception.HostMappingNotFound as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) hypervisor = self._view_hypervisor( compute_node, service, False, req, instances) hypervisors.append(hypervisor) return {'hypervisors': hypervisors} @wsgi.api_version('2.1', '2.87') @wsgi.expected_errors(()) @validation.query_schema(schema.statistics_query) @validation.response_body_schema(schema.statistics_response, '2.1', '2.87') def statistics(self, req): """Prior to microversion 2.88, you could get statistics for the hypervisor. Most of these are now accessible from placement and the few that aren't as misleading and frequently misunderstood. """ context = req.environ['nova.context'] context.can(hv_policies.BASE_POLICY_NAME % 'statistics', target={}) stats = self.host_api.compute_node_statistics(context) return {'hypervisor_statistics': stats} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/image_metadata.py0000664000175000017500000001314000000000000023234 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from webob import exc from nova.api.openstack import common from nova.api.openstack.compute.schemas import image_metadata as schema from nova.api.openstack import wsgi from nova.api import validation from nova import exception from nova.i18n import _ from nova.image import glance @validation.validated class ImageMetadataController(wsgi.Controller): """The image metadata API controller for the OpenStack API.""" def __init__(self): super().__init__() self.image_api = glance.API() def _get_image(self, context, image_id): try: return self.image_api.get(context, image_id) except exception.ImageNotAuthorized as e: raise exc.HTTPForbidden(explanation=e.format_message()) except exception.ImageNotFound: msg = _("Image not found.") raise exc.HTTPNotFound(explanation=msg) @wsgi.api_version('2.1', '2.38') @wsgi.expected_errors((403, 404)) @validation.query_schema(schema.index_query) @validation.response_body_schema(schema.index_response) def index(self, req, image_id): """Returns the list of metadata for a given instance.""" context = req.environ['nova.context'] metadata = self._get_image(context, image_id)['properties'] return {'metadata': metadata} @wsgi.api_version('2.1', '2.38') @wsgi.expected_errors((403, 404)) @validation.query_schema(schema.show_query) @validation.response_body_schema(schema.show_response) def show(self, req, image_id, id): context = req.environ['nova.context'] metadata = self._get_image(context, image_id)['properties'] if id in metadata: return {'meta': {id: metadata[id]}} else: raise exc.HTTPNotFound() @wsgi.api_version('2.1', '2.38') @wsgi.expected_errors((400, 403, 404)) @validation.schema(schema.create) @validation.response_body_schema(schema.create_response) def create(self, req, image_id, body): context = req.environ['nova.context'] image = self._get_image(context, image_id) for key, value in body['metadata'].items(): image['properties'][key] = value common.check_img_metadata_properties_quota(context, image['properties']) try: image = self.image_api.update(context, image_id, image, data=None, purge_props=True) except exception.ImageNotAuthorized as e: raise exc.HTTPForbidden(explanation=e.format_message()) return {'metadata': image['properties']} @wsgi.api_version('2.1', '2.38') @wsgi.expected_errors((400, 403, 404)) @validation.schema(schema.update) @validation.response_body_schema(schema.update_response) def update(self, req, image_id, id, body): context = req.environ['nova.context'] meta = body['meta'] if id not in meta: expl = _('Request body and URI mismatch') raise exc.HTTPBadRequest(explanation=expl) image = self._get_image(context, image_id) image['properties'][id] = meta[id] common.check_img_metadata_properties_quota(context, image['properties']) try: self.image_api.update(context, image_id, image, data=None, purge_props=True) except exception.ImageNotAuthorized as e: raise exc.HTTPForbidden(explanation=e.format_message()) return {'meta': meta} @wsgi.api_version('2.1', '2.38') @wsgi.expected_errors((400, 403, 404)) @validation.schema(schema.update_all) @validation.response_body_schema(schema.update_all_response) def update_all(self, req, image_id, body): context = req.environ['nova.context'] image = self._get_image(context, image_id) metadata = body['metadata'] common.check_img_metadata_properties_quota(context, metadata) image['properties'] = metadata try: self.image_api.update(context, image_id, image, data=None, purge_props=True) except exception.ImageNotAuthorized as e: raise exc.HTTPForbidden(explanation=e.format_message()) return {'metadata': metadata} @wsgi.api_version('2.1', '2.38') @wsgi.expected_errors((403, 404)) @wsgi.response(204) @validation.response_body_schema(schema.delete_response) def delete(self, req, image_id, id): context = req.environ['nova.context'] image = self._get_image(context, image_id) if id not in image['properties']: msg = _("Invalid metadata key") raise exc.HTTPNotFound(explanation=msg) image['properties'].pop(id) try: self.image_api.update(context, image_id, image, data=None, purge_props=True) except exception.ImageNotAuthorized as e: raise exc.HTTPForbidden(explanation=e.format_message()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/images.py0000664000175000017500000001337100000000000021565 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import webob.exc from nova.api.openstack.api_version_request \ import MAX_PROXY_API_SUPPORT_VERSION from nova.api.openstack import common from nova.api.openstack.compute.schemas import images as schema from nova.api.openstack.compute.views import images as views_images from nova.api.openstack import wsgi from nova.api import validation from nova import exception from nova.i18n import _ from nova.image import glance SUPPORTED_FILTERS = { 'name': 'name', 'status': 'status', 'changes-since': 'changes-since', 'server': 'property-instance_uuid', 'type': 'property-image_type', 'minRam': 'min_ram', 'minDisk': 'min_disk', } @validation.validated class ImagesController(wsgi.Controller): """Base controller for retrieving/displaying images.""" _view_builder_class = views_images.ViewBuilder def __init__(self): super().__init__() self._image_api = glance.API() def _get_filters(self, req): """Return a dictionary of query param filters from the request. :param req: the Request object coming from the wsgi layer :returns: a dict of key/value filters """ filters = {} for param in req.params: if param in SUPPORTED_FILTERS or param.startswith('property-'): # map filter name or carry through if property-* filter_name = SUPPORTED_FILTERS.get(param, param) filters[filter_name] = req.params.get(param) # ensure server filter is the instance uuid filter_name = 'property-instance_uuid' try: filters[filter_name] = filters[filter_name].rsplit('/', 1)[1] except (AttributeError, IndexError, KeyError): pass filter_name = 'status' if filter_name in filters: # The Image API expects us to use lowercase strings for status filters[filter_name] = filters[filter_name].lower() return filters @wsgi.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) @wsgi.expected_errors(404) @validation.query_schema(schema.show_query) @validation.response_body_schema(schema.show_response) def show(self, req, id): """Return detailed information about a specific image. :param req: `wsgi.Request` object :param id: Image identifier """ context = req.environ['nova.context'] try: image = self._image_api.get(context, id) except (exception.ImageNotFound, exception.InvalidImageRef): explanation = _("Image not found.") raise webob.exc.HTTPNotFound(explanation=explanation) return self._view_builder.show(req, image) @wsgi.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) @wsgi.expected_errors((403, 404)) @wsgi.response(204) @validation.response_body_schema(schema.delete_response) def delete(self, req, id): """Delete an image, if allowed. :param req: `wsgi.Request` object :param id: Image identifier (integer) """ context = req.environ['nova.context'] try: self._image_api.delete(context, id) except exception.ImageNotFound: explanation = _("Image not found.") raise webob.exc.HTTPNotFound(explanation=explanation) except exception.ImageNotAuthorized: # The image service raises this exception on delete if glanceclient # raises HTTPForbidden. explanation = _("You are not allowed to delete the image.") raise webob.exc.HTTPForbidden(explanation=explanation) @wsgi.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) @wsgi.expected_errors(400) @validation.query_schema(schema.index_query) @validation.response_body_schema(schema.index_response) def index(self, req): """Return an index listing of images available to the request. :param req: `wsgi.Request` object """ context = req.environ['nova.context'] filters = self._get_filters(req) page_params = common.get_pagination_params(req) try: images = self._image_api.get_all(context, filters=filters, **page_params) except exception.Invalid as e: raise webob.exc.HTTPBadRequest(explanation=e.format_message()) return self._view_builder.index(req, images) @wsgi.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) @wsgi.expected_errors(400) @validation.query_schema(schema.detail_query) @validation.response_body_schema(schema.detail_response) def detail(self, req): """Return a detailed index listing of images available to the request. :param req: `wsgi.Request` object. """ context = req.environ['nova.context'] filters = self._get_filters(req) page_params = common.get_pagination_params(req) try: images = self._image_api.get_all(context, filters=filters, **page_params) except exception.Invalid as e: raise webob.exc.HTTPBadRequest(explanation=e.format_message()) return self._view_builder.detail(req, images) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/instance_actions.py0000664000175000017500000002307200000000000023643 0ustar00zuulzuul00000000000000# Copyright 2013 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from webob import exc from oslo_utils import timeutils from nova.api.openstack import api_version_request from nova.api.openstack import common from nova.api.openstack.compute.schemas import instance_actions as schema from nova.api.openstack.compute.views \ import instance_actions as instance_actions_view from nova.api.openstack import wsgi from nova.api import validation from nova.compute import api as compute from nova import exception from nova.i18n import _ from nova.policies import instance_actions as ia_policies from nova import utils ACTION_KEYS = ['action', 'instance_uuid', 'request_id', 'user_id', 'project_id', 'start_time', 'message'] ACTION_KEYS_V258 = ['action', 'instance_uuid', 'request_id', 'user_id', 'project_id', 'start_time', 'message', 'updated_at'] EVENT_KEYS = ['event', 'start_time', 'finish_time', 'result', 'traceback'] @validation.validated class InstanceActionsController(wsgi.Controller): _view_builder_class = instance_actions_view.ViewBuilder def __init__(self): super(InstanceActionsController, self).__init__() self.compute_api = compute.API() self.action_api = compute.InstanceActionAPI() def _format_action(self, action_raw, action_keys): action = {} for key in action_keys: action[key] = action_raw.get(key) return action @staticmethod def _format_event(event_raw, project_id, show_traceback=False, show_host=False, show_hostid=False, show_details=False): event = {} for key in EVENT_KEYS: # By default, non-admins are not allowed to see traceback details. if key == 'traceback' and not show_traceback: continue event[key] = event_raw.get(key) # By default, non-admins are not allowed to see host. if show_host: event['host'] = event_raw['host'] if show_hostid: event['hostId'] = utils.generate_hostid(event_raw['host'], project_id) if show_details: event['details'] = event_raw['details'] return event def _get_instance(self, req, context, server_id): if not api_version_request.is_supported(req, '2.21'): return common.get_instance(self.compute_api, context, server_id) with utils.temporary_mutation(context, read_deleted='yes'): return common.get_instance(self.compute_api, context, server_id) @wsgi.expected_errors(404, "2.1", "2.57") @wsgi.expected_errors((400, 404), "2.58") @validation.query_schema(schema.index_query, "2.1", "2.57") @validation.query_schema(schema.index_query_v258, "2.58", "2.65") @validation.query_schema(schema.index_query_v266, "2.66") @validation.response_body_schema(schema.index_response, "2.1", "2.57") @validation.response_body_schema(schema.index_response_v258, "2.58") def index(self, req, server_id): """Returns the list of actions recorded for a given instance.""" context = req.environ["nova.context"] instance = self._get_instance(req, context, server_id) context.can(ia_policies.BASE_POLICY_NAME % 'list', target={'project_id': instance.project_id}) if api_version_request.is_supported(req, '2.58'): search_opts = {} search_opts.update(req.GET) if 'changes-since' in search_opts: search_opts['changes-since'] = timeutils.parse_isotime( search_opts['changes-since']) if 'changes-before' in search_opts: search_opts['changes-before'] = timeutils.parse_isotime( search_opts['changes-before']) changes_since = search_opts.get('changes-since') if (changes_since and search_opts['changes-before'] < search_opts['changes-since']): msg = _('The value of changes-since must be less than ' 'or equal to changes-before.') raise exc.HTTPBadRequest(explanation=msg) limit, marker = common.get_limit_and_marker(req) else: limit, marker, search_opts = None, None, None try: actions_raw = self.action_api.actions_get(context, instance, limit=limit, marker=marker, filters=search_opts) except exception.MarkerNotFound as e: raise exc.HTTPBadRequest(explanation=e.format_message()) if api_version_request.is_supported(req, '2.58'): actions = [self._format_action(action, ACTION_KEYS_V258) for action in actions_raw] else: actions = [self._format_action(action, ACTION_KEYS) for action in actions_raw] actions_dict = {'instanceActions': actions} if api_version_request.is_supported(req, '2.58'): if actions_links := self._view_builder.get_links( req, server_id, actions ): actions_dict['links'] = actions_links return actions_dict @wsgi.expected_errors(404) @validation.query_schema(schema.show_query) @validation.response_body_schema(schema.show_response, "2.1", "2.50") @validation.response_body_schema(schema.show_response_v251, "2.51", "2.57") @validation.response_body_schema(schema.show_response_v258, "2.58", "2.61") @validation.response_body_schema(schema.show_response_v262, "2.62", "2.83") @validation.response_body_schema(schema.show_response_v284, "2.84") def show(self, req, server_id, id): """Return data about the given instance action.""" context = req.environ['nova.context'] instance = self._get_instance(req, context, server_id) context.can(ia_policies.BASE_POLICY_NAME % 'show', target={'project_id': instance.project_id}) action = self.action_api.action_get_by_request_id(context, instance, id) if action is None: msg = _("Action %s not found") % id raise exc.HTTPNotFound(explanation=msg) action_id = action['id'] if api_version_request.is_supported(req, '2.58'): action = self._format_action(action, ACTION_KEYS_V258) else: action = self._format_action(action, ACTION_KEYS) # Prior to microversion 2.51, events would only be returned in the # response for admins by default policy rules. Starting in # microversion 2.51, events are returned for admin_or_owner (of the # instance) but the "traceback" field is only shown for admin users # by default. show_events = False show_traceback = False show_host = False if context.can(ia_policies.BASE_POLICY_NAME % 'events', target={'project_id': instance.project_id}, fatal=False): # For all microversions, the user can see all event details # including the traceback. show_events = True show_traceback = True show_host = api_version_request.is_supported(req, '2.62') elif api_version_request.is_supported(req, '2.51'): # The user is not able to see all event details, but they can at # least see the non-traceback event details. show_events = True # An obfuscated hashed host id is returned since microversion 2.62 # for all users. show_hostid = api_version_request.is_supported(req, '2.62') if show_events: # NOTE(brinzhang): Event details are shown since microversion # 2.84. show_details = False if api_version_request.is_supported(req, '2.84'): show_details = context.can( ia_policies.BASE_POLICY_NAME % 'events:details', target={'project_id': instance.project_id}, fatal=False) events_raw = self.action_api.action_events_get(context, instance, action_id) # NOTE(takashin): The project IDs of instance action events # become null (None) when instance action events are created # by periodic tasks. If the project ID is null (None), # it causes an error when 'hostId' is generated. # If the project ID is null (None), pass the project ID of # the server instead of that of instance action events. action['events'] = [self._format_event( evt, action['project_id'] or instance.project_id, show_traceback=show_traceback, show_host=show_host, show_hostid=show_hostid, show_details=show_details ) for evt in events_raw] return {'instanceAction': action} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/instance_usage_audit_log.py0000664000175000017500000001142500000000000025335 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import webob.exc from nova.api.openstack.compute.schemas import instance_usage_audit_log as schema # noqa: E501 from nova.api.openstack import wsgi from nova.api import validation from nova.compute import api as compute from nova.compute import rpcapi as compute_rpcapi from nova.i18n import _ from nova.policies import instance_usage_audit_log as iual_policies from nova import utils class InstanceUsageAuditLogController(wsgi.Controller): def __init__(self): super(InstanceUsageAuditLogController, self).__init__() self.host_api = compute.HostAPI() @wsgi.expected_errors(()) @validation.query_schema(schema.index_query) def index(self, req): context = req.environ['nova.context'] context.can(iual_policies.BASE_POLICY_NAME % 'list', target={}) task_log = self._get_audit_task_logs(context) return {'instance_usage_audit_logs': task_log} @wsgi.expected_errors(400) @validation.query_schema(schema.show_query) def show(self, req, id): context = req.environ['nova.context'] context.can(iual_policies.BASE_POLICY_NAME % 'show', target={}) try: if '.' in id: before_date = datetime.datetime.strptime(str(id), "%Y-%m-%d %H:%M:%S.%f") else: before_date = datetime.datetime.strptime(str(id), "%Y-%m-%d %H:%M:%S") except ValueError: msg = _("Invalid timestamp for date %s") % id raise webob.exc.HTTPBadRequest(explanation=msg) task_log = self._get_audit_task_logs(context, before=before_date) return {'instance_usage_audit_log': task_log} def _get_audit_task_logs(self, context, before=None): """Returns a full log for all instance usage audit tasks on all computes. :param context: Nova request context. :param before: By default we look for the audit period most recently completed before this datetime. Has no effect if both begin and end are specified. """ begin, end = utils.last_completed_audit_period(before=before) task_logs = self.host_api.task_log_get_all(context, "instance_usage_audit", begin, end) # We do this in this way to include disabled compute services, # which can have instances on them. (mdragon) filters = {'topic': compute_rpcapi.RPC_TOPIC} services = self.host_api.service_get_all(context, filters=filters) hosts = set(serv['host'] for serv in services) seen_hosts = set() done_hosts = set() running_hosts = set() total_errors = 0 total_items = 0 for tlog in task_logs: seen_hosts.add(tlog['host']) if tlog['state'] == "DONE": done_hosts.add(tlog['host']) if tlog['state'] == "RUNNING": running_hosts.add(tlog['host']) total_errors += tlog['errors'] total_items += tlog['task_items'] log = {tl['host']: dict(state=tl['state'], instances=tl['task_items'], errors=tl['errors'], message=tl['message']) for tl in task_logs} missing_hosts = hosts - seen_hosts overall_status = "%s hosts done. %s errors." % ( 'ALL' if len(done_hosts) == len(hosts) else "%s of %s" % (len(done_hosts), len(hosts)), total_errors) return dict(period_beginning=str(begin), period_ending=str(end), num_hosts=len(hosts), num_hosts_done=len(done_hosts), num_hosts_running=len(running_hosts), num_hosts_not_run=len(missing_hosts), hosts_not_run=list(missing_hosts), total_instances=total_items, total_errors=total_errors, overall_status=overall_status, log=log) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/ips.py0000664000175000017500000000475300000000000021117 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from webob import exc from nova.api.openstack import common from nova.api.openstack.compute.schemas import ips as schema from nova.api.openstack.compute.views import ips as views from nova.api.openstack import wsgi from nova.api import validation from nova.compute import api as compute from nova.i18n import _ from nova.policies import ips as ips_policies @validation.validated class IPsController(wsgi.Controller): """The servers addresses API controller for the OpenStack API.""" _view_builder_class = views.ViewBuilder def __init__(self): super(IPsController, self).__init__() self._compute_api = compute.API() @wsgi.expected_errors(404) @validation.query_schema(schema.index_query) @validation.response_body_schema(schema.index_response) def index(self, req, server_id): context = req.environ["nova.context"] instance = common.get_instance(self._compute_api, context, server_id) context.can(ips_policies.POLICY_ROOT % 'index', target={'project_id': instance.project_id}) networks = common.get_networks_for_instance(context, instance) return self._view_builder.index(req, networks) @wsgi.expected_errors(404) @validation.query_schema(schema.show_query) @validation.response_body_schema(schema.show_response) def show(self, req, server_id, id): context = req.environ["nova.context"] instance = common.get_instance(self._compute_api, context, server_id) context.can(ips_policies.POLICY_ROOT % 'show', target={'project_id': instance.project_id}) networks = common.get_networks_for_instance(context, instance) if id not in networks: msg = _("Instance is not a member of specified network") raise exc.HTTPNotFound(explanation=msg) return self._view_builder.show(req, networks[id], id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/keypairs.py0000664000175000017500000002222600000000000022146 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Keypair management extension.""" import webob import webob.exc from nova.api.openstack import api_version_request from nova.api.openstack import common from nova.api.openstack.compute.schemas import keypairs as schema from nova.api.openstack.compute.views import keypairs as keypairs_view from nova.api.openstack import wsgi from nova.api import validation from nova.compute import api as compute_api from nova import exception from nova.objects import keypair as keypair_obj from nova.policies import keypairs as kp_policies @validation.validated class KeypairController(wsgi.Controller): """Keypair API controller for the OpenStack API.""" _view_builder_class = keypairs_view.ViewBuilder def __init__(self): super(KeypairController, self).__init__() self.api = compute_api.KeypairAPI() @wsgi.response(200, "2.0", "2.1") @wsgi.response(201, "2.2") @wsgi.expected_errors((400, 403, 409)) @validation.schema(schema.create_v20, "2.0", "2.0") @validation.schema(schema.create, "2.1", "2.1") @validation.schema(schema.create_v22, "2.2", "2.9") @validation.schema(schema.create_v210, "2.10", "2.91") @validation.schema(schema.create_v292, "2.92") @validation.response_body_schema(schema.create_response, "2.0", "2.1") @validation.response_body_schema(schema.create_response_v22, "2.2", "2.91") @validation.response_body_schema(schema.create_response_v292, "2.92") def create(self, req, body): """Create or import keypair. Sending name will generate a key and return private_key and fingerprint. You can send a public_key to add an existing ssh key. Starting in API microversion 2.2, keypairs will have the type ssh or x509, specified by type. Starting in API microversion 2.10, you can request a user if you are an admin. Starting in API microversion 2.91, keypair generation is no longer permitted. """ key_type = False if api_version_request.is_supported(req, '2.2'): key_type = True user_id = None if api_version_request.is_supported(req, '2.10'): # handle optional user-id for admin only user_id = body['keypair'].get('user_id') return self._create(req, body, key_type=key_type, user_id=user_id) def _create(self, req, body, user_id=None, key_type=False): context = req.environ['nova.context'] params = body['keypair'] name = common.normalize_name(params['name']) key_type_value = params.get('type', keypair_obj.KEYPAIR_TYPE_SSH) user_id = user_id or context.user_id context.can(kp_policies.POLICY_ROOT % 'create', target={'user_id': user_id}) try: if 'public_key' in params: keypair = self.api.import_key_pair( context, user_id, name, params['public_key'], key_type_value) return_priv_key = False else: # public_key is a required field starting with 2.92 so this # generation should only happen with older versions. keypair, private_key = self.api.create_key_pair( context, user_id, name, key_type_value) keypair['private_key'] = private_key return_priv_key = True except exception.KeypairLimitExceeded as e: raise webob.exc.HTTPForbidden(explanation=str(e)) except exception.InvalidKeypair as exc: raise webob.exc.HTTPBadRequest(explanation=exc.format_message()) except exception.KeyPairExists as exc: raise webob.exc.HTTPConflict(explanation=exc.format_message()) return self._view_builder.create(keypair, private_key=return_priv_key, key_type=key_type) def _get_user_id(self, req): if 'user_id' in req.GET.keys(): user_id = req.GET.getall('user_id')[0] return user_id @wsgi.response(202, '2.0', '2.1') @wsgi.response(204, '2.2') @validation.query_schema(schema.delete_query_schema_v20, '2.0', '2.9') @validation.query_schema(schema.delete_query_schema_v210, '2.10', '2.74') @validation.query_schema(schema.delete_query_schema_v275, '2.75') @validation.response_body_schema(schema.delete_response) @wsgi.expected_errors(404) def delete(self, req, id): user_id = None if api_version_request.is_supported(req, '2.10'): # handle optional user-id for admin only user_id = self._get_user_id(req) self._delete(req, id, user_id=user_id) def _delete(self, req, id, user_id=None): """Delete a keypair with a given name.""" context = req.environ['nova.context'] # handle optional user-id for admin only user_id = user_id or context.user_id context.can(kp_policies.POLICY_ROOT % 'delete', target={'user_id': user_id}) try: self.api.delete_key_pair(context, user_id, id) except exception.KeypairNotFound as exc: raise webob.exc.HTTPNotFound(explanation=exc.format_message()) @validation.query_schema(schema.show_query_schema_v20, '2.0', '2.9') @validation.query_schema(schema.show_query_schema_v210, '2.10', '2.74') @validation.query_schema(schema.show_query_schema_v275, '2.75') @validation.response_body_schema(schema.show_response, '2.0', '2.1') @validation.response_body_schema(schema.show_response_v22, '2.2') @wsgi.expected_errors(404) def show(self, req, id): key_type = False if api_version_request.is_supported(req, '2.2'): key_type = True user_id = None if api_version_request.is_supported(req, '2.10'): # handle optional user-id for admin only user_id = self._get_user_id(req) return self._show(req, id, key_type=key_type, user_id=user_id) def _show(self, req, id, key_type=False, user_id=None): """Return data for the given key name.""" context = req.environ['nova.context'] user_id = user_id or context.user_id context.can(kp_policies.POLICY_ROOT % 'show', target={'user_id': user_id}) try: keypair = self.api.get_key_pair(context, user_id, id) except exception.KeypairNotFound as exc: raise webob.exc.HTTPNotFound(explanation=exc.format_message()) return self._view_builder.show(keypair, key_type=key_type) @validation.query_schema(schema.index_query_schema_v20, '2.0', '2.9') @validation.query_schema(schema.index_query_schema_v210, '2.10', '2.34') @validation.query_schema(schema.index_query_schema_v235, '2.35', '2.74') @validation.query_schema(schema.index_query_schema_v275, '2.75') @validation.response_body_schema(schema.index_response, '2.0', '2.1') @validation.response_body_schema(schema.index_response_v22, '2.2', '2.34') @validation.response_body_schema(schema.index_response_v235, '2.35') @wsgi.expected_errors((), '2.0', '2.34') @wsgi.expected_errors(400, '2.35') def index(self, req): key_type = False if api_version_request.is_supported(req, '2.2'): key_type = True user_id = None if api_version_request.is_supported(req, '2.10'): # handle optional user-id for admin only user_id = self._get_user_id(req) links = False if api_version_request.is_supported(req, '2.35'): links = True # TODO(stephenfin): Remove deleted, deleted_at, and updated_at from # response in future microversion. return self._index( req, key_type=key_type, user_id=user_id, links=links ) def _index(self, req, key_type=False, user_id=None, links=False): """List of keypairs for a user.""" context = req.environ['nova.context'] user_id = user_id or context.user_id context.can(kp_policies.POLICY_ROOT % 'index', target={'user_id': user_id}) if api_version_request.is_supported(req, '2.35'): limit, marker = common.get_limit_and_marker(req) else: limit = marker = None try: key_pairs = self.api.get_key_pairs( context, user_id, limit=limit, marker=marker) except exception.MarkerNotFound as e: raise webob.exc.HTTPBadRequest(explanation=e.format_message()) return self._view_builder.index(req, key_pairs, key_type=key_type, links=links) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/limits.py0000664000175000017500000000563700000000000021627 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.api.openstack import api_version_request from nova.api.openstack.compute.schemas import limits from nova.api.openstack.compute.views import limits as limits_views from nova.api.openstack import wsgi from nova.api import validation from nova.policies import limits as limits_policies from nova import quota QUOTAS = quota.QUOTAS # This is a list of limits which needs to filter out from the API response. # This is due to the deprecation of network related proxy APIs, the related # limit should be removed from the API also. FILTERED_LIMITS_2_36 = ['floating_ips', 'security_groups', 'security_group_rules'] FILTERED_LIMITS_2_57 = list(FILTERED_LIMITS_2_36) FILTERED_LIMITS_2_57.extend(['injected_files', 'injected_file_content_bytes']) class LimitsController(wsgi.Controller): """Controller for accessing limits in the OpenStack API.""" @wsgi.expected_errors(()) @validation.query_schema(limits.limits_query_schema, '2.1', '2.56') @validation.query_schema(limits.limits_query_schema, '2.57', '2.74') @validation.query_schema(limits.limits_query_schema_275, '2.75') def index(self, req): filtered_limits = [] if api_version_request.is_supported(req, '2.57'): filtered_limits = FILTERED_LIMITS_2_57 elif api_version_request.is_supported(req, '2.36'): filtered_limits = FILTERED_LIMITS_2_36 max_image_meta = True if api_version_request.is_supported(req, '2.39'): max_image_meta = False return self._index(req, filtered_limits=filtered_limits, max_image_meta=max_image_meta) def _index(self, req, filtered_limits=None, max_image_meta=True): """Return all global limit information.""" context = req.environ['nova.context'] context.can(limits_policies.BASE_POLICY_NAME, target={}) project_id = context.project_id if 'tenant_id' in req.GET: project_id = req.GET.get('tenant_id') context.can(limits_policies.OTHER_PROJECT_LIMIT_POLICY_NAME) quotas = QUOTAS.get_project_quotas(context, project_id, usages=True) builder = limits_views.ViewBuilder() return builder.build(req, quotas, filtered_limits=filtered_limits, max_image_meta=max_image_meta) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/lock_server.py0000664000175000017500000000534200000000000022635 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.api.openstack import api_version_request from nova.api.openstack import common from nova.api.openstack.compute.schemas import lock_server as schema from nova.api.openstack import wsgi from nova.api import validation from nova.compute import api as compute from nova.policies import lock_server as ls_policies @validation.validated class LockServerController(wsgi.Controller): def __init__(self): super(LockServerController, self).__init__() self.compute_api = compute.API() @wsgi.response(202) @wsgi.expected_errors(404) @wsgi.action('lock') @validation.schema(schema.lock, "2.1", "2.72") @validation.schema(schema.lock_v273, "2.73") @validation.response_body_schema(schema.lock_response) def _lock(self, req, id, body): """Lock a server instance.""" context = req.environ['nova.context'] instance = common.get_instance(self.compute_api, context, id) context.can(ls_policies.POLICY_ROOT % 'lock', target={'user_id': instance.user_id, 'project_id': instance.project_id}) reason = None if ( api_version_request.is_supported(req, '2.73') and body['lock'] is not None ): reason = body['lock'].get('locked_reason') self.compute_api.lock(context, instance, reason=reason) @wsgi.response(202) @wsgi.expected_errors(404) @wsgi.action('unlock') @validation.schema(schema.unlock, "2.1", "2.72") @validation.response_body_schema(schema.unlock_response) def _unlock(self, req, id, body): """Unlock a server instance.""" context = req.environ['nova.context'] instance = common.get_instance(self.compute_api, context, id) context.can(ls_policies.POLICY_ROOT % 'unlock', target={'project_id': instance.project_id}) if not self.compute_api.is_expected_locked_by(context, instance): context.can(ls_policies.POLICY_ROOT % 'unlock:unlock_override', target={'project_id': instance.project_id}) self.compute_api.unlock(context, instance) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/migrate_server.py0000664000175000017500000001760200000000000023337 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import strutils from webob import exc from nova.api.openstack import api_version_request from nova.api.openstack import common from nova.api.openstack.compute.schemas import migrate_server as schema from nova.api.openstack import wsgi from nova.api import validation from nova.compute import api as compute from nova import exception from nova.i18n import _ from nova.policies import migrate_server as ms_policies LOG = logging.getLogger(__name__) @validation.validated class MigrateServerController(wsgi.Controller): def __init__(self): super(MigrateServerController, self).__init__() self.compute_api = compute.API() @wsgi.response(202) @wsgi.expected_errors((400, 403, 404, 409)) @wsgi.action('migrate') @validation.schema(schema.migrate, "2.56") @validation.response_body_schema(schema.migrate_response) def _migrate(self, req, id, body): """Permit admins to migrate a server to a new host.""" context = req.environ['nova.context'] instance = common.get_instance(self.compute_api, context, id, expected_attrs=['flavor', 'services']) host_name = None if ( api_version_request.is_supported(req, '2.56') and body['migrate'] is not None ): host_name = body['migrate'].get('host') if host_name: context.can(ms_policies.POLICY_ROOT % 'migrate:host', target={'project_id': instance.project_id}) else: context.can(ms_policies.POLICY_ROOT % 'migrate', target={'project_id': instance.project_id}) try: self.compute_api.resize(req.environ['nova.context'], instance, host_name=host_name) except exception.OverQuota as e: raise exc.HTTPForbidden(explanation=e.format_message()) except ( exception.InstanceIsLocked, exception.InstanceNotReady, exception.ServiceUnavailable, ) as e: raise exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'migrate', id) except exception.InstanceNotFound as e: raise exc.HTTPNotFound(explanation=e.format_message()) except ( exception.ComputeHostNotFound, exception.CannotMigrateToSameHost, exception.ForbiddenPortsWithAccelerator, exception.ExtendedResourceRequestOldCompute, ) as e: raise exc.HTTPBadRequest(explanation=e.format_message()) except ( exception.ForbiddenSharesNotSupported, exception.ForbiddenWithShare, ) as e: raise exc.HTTPConflict(explanation=e.format_message()) @wsgi.response(202) @wsgi.expected_errors((400, 403, 404, 409)) @wsgi.action('os-migrateLive') @validation.schema(schema.migrate_live, "2.0", "2.24") @validation.schema(schema.migrate_live_v2_25, "2.25", "2.29") @validation.schema(schema.migrate_live_v2_30, "2.30", "2.67") @validation.schema(schema.migrate_live_v2_68, "2.68") @validation.response_body_schema(schema.migrate_live_response) def _migrate_live(self, req, id, body): """Permit admins to (live) migrate a server to a new host.""" context = req.environ["nova.context"] # NOTE(stephenfin): we need 'numa_topology' because of the # 'LiveMigrationTask._check_instance_has_no_numa' check in the # conductor instance = common.get_instance(self.compute_api, context, id, expected_attrs=['numa_topology']) host = body["os-migrateLive"]["host"] if host: context.can(ms_policies.POLICY_ROOT % 'migrate_live:host', target={'project_id': instance.project_id}) else: context.can(ms_policies.POLICY_ROOT % 'migrate_live', target={'project_id': instance.project_id}) block_migration = body["os-migrateLive"]["block_migration"] force = None async_ = api_version_request.is_supported(req, '2.34') if api_version_request.is_supported(req, '2.30'): force = self._get_force_param_for_live_migration(body, host) if api_version_request.is_supported(req, '2.25'): if block_migration == 'auto': block_migration = None else: block_migration = strutils.bool_from_string(block_migration, strict=True) disk_over_commit = None else: disk_over_commit = body["os-migrateLive"]["disk_over_commit"] block_migration = strutils.bool_from_string(block_migration, strict=True) disk_over_commit = strutils.bool_from_string(disk_over_commit, strict=True) try: self.compute_api.live_migrate(context, instance, block_migration, disk_over_commit, host, force, async_) except (exception.NoValidHost, exception.ComputeServiceUnavailable, exception.InvalidHypervisorType, exception.InvalidCPUInfo, exception.UnableToMigrateToSelf, exception.DestinationHypervisorTooOld, exception.InvalidLocalStorage, exception.InvalidSharedStorage, exception.HypervisorUnavailable, exception.MigrationPreCheckError, exception.ForbiddenPortsWithAccelerator) as ex: if async_: with excutils.save_and_reraise_exception(): LOG.error("Unexpected exception received from " "conductor during pre-live-migration checks " "'%(ex)s'", {'ex': ex}) else: raise exc.HTTPBadRequest(explanation=ex.format_message()) except exception.InstanceIsLocked as e: raise exc.HTTPConflict(explanation=e.format_message()) except ( exception.ComputeHostNotFound, exception.ExtendedResourceRequestOldCompute, )as e: raise exc.HTTPBadRequest(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'os-migrateLive', id) except ( exception.ForbiddenSharesNotSupported, exception.ForbiddenWithShare, ) as e: raise exc.HTTPConflict(explanation=e.format_message()) def _get_force_param_for_live_migration(self, body, host): force = body["os-migrateLive"].get("force", False) force = strutils.bool_from_string(force, strict=True) if force is True and not host: message = _("Can't force to a non-provided destination") raise exc.HTTPBadRequest(explanation=message) return force ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/migrations.py0000664000175000017500000002367300000000000022502 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import timeutils from webob import exc from nova.api.openstack import api_version_request from nova.api.openstack import common from nova.api.openstack.compute.schemas import migrations as schema from nova.api.openstack.compute.views import migrations as migrations_view from nova.api.openstack import wsgi from nova.api import validation from nova.compute import api as compute from nova import exception from nova.i18n import _ from nova.objects import base as obj_base from nova.objects import fields from nova.policies import migrations as migrations_policies class MigrationsController(wsgi.Controller): """Controller for accessing migrations in OpenStack API.""" _view_builder_class = migrations_view.ViewBuilder _collection_name = "servers/%s/migrations" def __init__(self): super(MigrationsController, self).__init__() self.compute_api = compute.API() def _output(self, req, migrations_obj, add_link=False, add_uuid=False, add_user_project=False, add_host=True): """Returns the desired output of the API from an object. From a MigrationsList's object this method returns a list of primitive objects with the only necessary fields. """ detail_keys = ['memory_total', 'memory_processed', 'memory_remaining', 'disk_total', 'disk_processed', 'disk_remaining'] # TODO(Shaohe Feng) we should share the in-progress list. live_migration_in_progress = ['queued', 'preparing', 'running', 'post-migrating'] # Note(Shaohe Feng): We need to leverage the oslo.versionedobjects. # Then we can pass the target version to it's obj_to_primitive. objects = obj_base.obj_to_primitive(migrations_obj) objects = [x for x in objects if not x['hidden']] for obj in objects: del obj['deleted'] del obj['deleted_at'] del obj['hidden'] del obj['cross_cell_move'] del obj['dest_compute_id'] if not add_uuid: del obj['uuid'] if 'memory_total' in obj: for key in detail_keys: del obj[key] if not add_user_project: if 'user_id' in obj: del obj['user_id'] if 'project_id' in obj: del obj['project_id'] # TODO(gmaan): This API (and list/show server migrations) does not # return the 'server_host', which is strange and not consistent # with the info returned for the destination host. This needs to # be fixed with microversion bump. When we do that, there are some # more improvement can be done in this and list/show server # migrations API. It makes sense to do all those improvements # in a single microversion: # - Non-admin user can get their own project migrations if the # policy does not permit listing all projects migrations (for more # details, refer to the comment in _index() method). # - Check the comments in the file # api/openstack/compute/server_migrations.py for more possible # improvement in the list server migration API. if not add_host: obj['dest_compute'] = None obj['dest_host'] = None obj['dest_node'] = None obj['source_compute'] = None obj['source_node'] = None # NOTE(Shaohe Feng) above version 2.23, add migration_type for all # kinds of migration, but we only add links just for in-progress # live-migration. if (add_link and obj['migration_type'] == fields.MigrationType.LIVE_MIGRATION and obj["status"] in live_migration_in_progress): obj["links"] = self._view_builder._get_links( req, obj["id"], self._collection_name % obj['instance_uuid']) elif add_link is False: del obj['migration_type'] return objects def _index(self, req, add_link=False, next_link=False, add_uuid=False, sort_dirs=None, sort_keys=None, limit=None, marker=None, allow_changes_since=False, allow_changes_before=False): context = req.environ['nova.context'] context.can(migrations_policies.POLICY_ROOT % 'index') search_opts = {} search_opts.update(req.GET) project_id = search_opts.get('project_id') # TODO(gmaan): If the user request all or cross project migrations # (passing other project id or not passing project id itself) then # policy needs to permit the same otherwise it will raise 403 error. # This behavior can be improved by returning their own project # migrations if the policy does not permit to list all or cross # project migrations but that will be API behavior change and # needs to be done with microversion bump. if not project_id or project_id != context.project_id: context.can(migrations_policies.POLICY_ROOT % 'index:all_projects') add_host = context.can(migrations_policies.POLICY_ROOT % 'index:host', fatal=False) if 'changes-since' in search_opts: if allow_changes_since: search_opts['changes-since'] = timeutils.parse_isotime( search_opts['changes-since']) else: # Before microversion 2.59, the changes-since filter was not # supported in the DB API. However, the schema allowed # additionalProperties=True, so a user could pass it before # 2.59 and filter by the updated_at field if we don't remove # it from search_opts. del search_opts['changes-since'] if 'changes-before' in search_opts: if allow_changes_before: search_opts['changes-before'] = timeutils.parse_isotime( search_opts['changes-before']) changes_since = search_opts.get('changes-since') if (changes_since and search_opts['changes-before'] < search_opts['changes-since']): msg = _('The value of changes-since must be less than ' 'or equal to changes-before.') raise exc.HTTPBadRequest(explanation=msg) else: # Before microversion 2.59 the schema allowed # additionalProperties=True, so a user could pass # changes-before before 2.59 and filter by the updated_at # field if we don't remove it from search_opts. del search_opts['changes-before'] if sort_keys: try: migrations = self.compute_api.get_migrations_sorted( context, search_opts, sort_dirs=sort_dirs, sort_keys=sort_keys, limit=limit, marker=marker) except exception.MarkerNotFound as e: raise exc.HTTPBadRequest(explanation=e.format_message()) else: migrations = self.compute_api.get_migrations( context, search_opts) add_user_project = api_version_request.is_supported(req, '2.80') migrations = self._output(req, migrations, add_link, add_uuid, add_user_project, add_host) migrations_dict = {'migrations': migrations} if next_link: migrations_links = self._view_builder.get_links(req, migrations) if migrations_links: migrations_dict['migrations_links'] = migrations_links return migrations_dict @wsgi.expected_errors((), "2.1", "2.58") @wsgi.expected_errors(400, "2.59") @validation.query_schema(schema.list_query_schema_v20, "2.0", "2.22") @validation.query_schema(schema.list_query_schema_v20, "2.23", "2.58") @validation.query_schema(schema.list_query_params_v259, "2.59", "2.65") @validation.query_schema(schema.list_query_params_v266, "2.66", "2.79") @validation.query_schema(schema.list_query_params_v280, "2.80") def index(self, req): """Return all migrations using the query parameters as filters.""" add_link = False if api_version_request.is_supported(req, '2.23'): add_link = True next_link = False add_uuid = False sort_keys = None sort_dirs = None limit = None marker = None allow_changes_since = False if api_version_request.is_supported(req, '2.59'): next_link = True add_uuid = True sort_keys = ['created_at', 'id'] # FIXME(stephenfin): This looks like a typo? sort_dirs = ['desc', 'desc'] limit, marker = common.get_limit_and_marker(req) allow_changes_since = True allow_changes_before = False if api_version_request.is_supported(req, '2.66'): allow_changes_before = True return self._index( req, add_link=add_link, next_link=next_link, add_uuid=add_uuid, sort_keys=sort_keys, sort_dirs=sort_dirs, limit=limit, marker=marker, allow_changes_since=allow_changes_since, allow_changes_before=allow_changes_before) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/multinic.py0000664000175000017500000000556200000000000022147 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The multinic extension.""" from webob import exc from nova.api.openstack import common from nova.api.openstack.compute.schemas import multinic as schema from nova.api.openstack import wsgi from nova.api import validation from nova.compute import api as compute from nova import exception from nova.policies import multinic as multinic_policies @validation.validated class MultinicController(wsgi.Controller): """This API is deprecated from Microversion '2.44'.""" def __init__(self): super(MultinicController, self).__init__() self.compute_api = compute.API() @wsgi.api_version("2.1", "2.43") @wsgi.response(202) @wsgi.action('addFixedIp') @wsgi.expected_errors((400, 404)) @validation.schema(schema.add_fixed_ip) @validation.response_body_schema(schema.add_fixed_ip_response) def _add_fixed_ip(self, req, id, body): """Adds an IP on a given network to an instance.""" context = req.environ['nova.context'] instance = common.get_instance(self.compute_api, context, id) context.can(multinic_policies.BASE_POLICY_NAME % 'add', target={'project_id': instance.project_id}) network_id = body['addFixedIp']['networkId'] try: self.compute_api.add_fixed_ip(context, instance, network_id) except exception.NoMoreFixedIps as e: raise exc.HTTPBadRequest(explanation=e.format_message()) @wsgi.api_version("2.1", "2.43") @wsgi.response(202) @wsgi.action('removeFixedIp') @wsgi.expected_errors((400, 404)) @validation.schema(schema.remove_fixed_ip) @validation.response_body_schema(schema.remove_fixed_ip_response) def _remove_fixed_ip(self, req, id, body): """Removes an IP from an instance.""" context = req.environ['nova.context'] instance = common.get_instance(self.compute_api, context, id) context.can(multinic_policies.BASE_POLICY_NAME % 'remove', target={'project_id': instance.project_id}) address = body['removeFixedIp']['address'] try: self.compute_api.remove_fixed_ip(context, instance, address) except exception.FixedIpNotFoundForInstance as e: raise exc.HTTPBadRequest(explanation=e.format_message()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/networks.py0000664000175000017500000001171600000000000022175 0ustar00zuulzuul00000000000000# Copyright 2011 Grid Dynamics # Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from webob import exc from nova.api.openstack.api_version_request \ import MAX_PROXY_API_SUPPORT_VERSION from nova.api.openstack.compute.schemas import networks as schema from nova.api.openstack import wsgi from nova.api import validation from nova import exception from nova.i18n import _ from nova.network import neutron from nova.policies import networks as net_policies _removal_reason = """\ This %s only works with *nova-network*, which was deprecated in the 14.0.0 (Newton) release. It fails with HTTP 404 starting from microversion 2.36. It was removed in the 21.0.0 (Ussuri) release. """ _removal_reason_action = _removal_reason % 'action' _removal_reason_api = _removal_reason % 'API' def network_dict(context, network): fields = ('id', 'cidr', 'netmask', 'gateway', 'broadcast', 'dns1', 'dns2', 'cidr_v6', 'gateway_v6', 'label', 'netmask_v6') admin_fields = ('created_at', 'updated_at', 'deleted_at', 'deleted', 'injected', 'bridge', 'vlan', 'vpn_public_address', 'vpn_public_port', 'vpn_private_address', 'dhcp_start', 'project_id', 'host', 'bridge_interface', 'multi_host', 'priority', 'rxtx_base', 'mtu', 'dhcp_server', 'enable_dhcp', 'share_address') # NOTE(mnaser): We display a limited set of fields so users can know what # networks are available, extra system-only fields are only visible if they # are an admin. if context.is_admin: fields += admin_fields result = {} for field in fields: # we only provide a limited number of fields now that nova-network is # gone (yes, two fields of thirty) if field == 'id': result[field] = network['id'] elif field == 'label': result[field] = network['name'] else: result[field] = None return result @validation.validated class NetworkController(wsgi.Controller): def __init__(self, network_api=None): super(NetworkController, self).__init__() # NOTE(stephenfin): 'network_api' is only being passed for use by tests self.network_api = network_api or neutron.API() @wsgi.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) @wsgi.expected_errors(()) @validation.query_schema(schema.index_query) @validation.response_body_schema(schema.index_response) def index(self, req): context = req.environ['nova.context'] context.can(net_policies.POLICY_ROOT % 'list', target={'project_id': context.project_id}) networks = self.network_api.get_all(context) result = [network_dict(context, net_ref) for net_ref in networks] return {'networks': result} @wsgi.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) @wsgi.expected_errors(404) @validation.query_schema(schema.show_query) @validation.response_body_schema(schema.show_response) def show(self, req, id): context = req.environ['nova.context'] context.can(net_policies.POLICY_ROOT % 'show', target={'project_id': context.project_id}) try: network = self.network_api.get(context, id) except exception.NetworkNotFound: msg = _("Network not found") raise exc.HTTPNotFound(explanation=msg) return {'network': network_dict(context, network)} @wsgi.expected_errors(410) @wsgi.action("disassociate") @wsgi.removed('21.0.0', _removal_reason_action) @validation.schema(schema.disassociate) @validation.response_body_schema(schema.disassociate_response) def _disassociate_host_and_project(self, req, id, body): raise exc.HTTPGone() @wsgi.expected_errors(410) @wsgi.removed('21.0.0', _removal_reason_api) @validation.response_body_schema(schema.delete_response) def delete(self, req, id): raise exc.HTTPGone() @wsgi.expected_errors(410) @wsgi.removed('21.0.0', _removal_reason_api) @validation.schema(schema.create) @validation.response_body_schema(schema.create_response) def create(self, req, body): raise exc.HTTPGone() @wsgi.expected_errors(410) @wsgi.removed('21.0.0', _removal_reason_api) @validation.schema(schema.add) @validation.response_body_schema(schema.add_response) def add(self, req, body): raise exc.HTTPGone() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/networks_associate.py0000664000175000017500000000400000000000000024214 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from webob import exc from nova.api.openstack.compute.schemas import networks_associate as schema from nova.api.openstack import wsgi from nova.api import validation _removal_reason = """\ This action only works with *nova-network*, which was deprecated in the 14.0.0 (Newton) release. It fails with HTTP 404 starting from microversion 2.36. It was removed in the 21.0.0 (Ussuri) release. """ @validation.validated class NetworkAssociateActionController(wsgi.Controller): """Network Association API Controller.""" @wsgi.action("disassociate_host") @wsgi.expected_errors(410) @wsgi.removed('18.0.0', _removal_reason) @validation.schema(schema.disassociate_host) @validation.response_body_schema(schema.disassociate_host_response) def _disassociate_host_only(self, req, id, body): raise exc.HTTPGone() @wsgi.action("disassociate_project") @wsgi.expected_errors(410) @wsgi.removed('18.0.0', _removal_reason) @validation.schema(schema.disassociate_project) @validation.response_body_schema(schema.disassociate_project_response) def _disassociate_project_only(self, req, id, body): raise exc.HTTPGone() @wsgi.action("associate_host") @wsgi.expected_errors(410) @wsgi.removed('18.0.0', _removal_reason) @validation.schema(schema.associate_host) @validation.response_body_schema(schema.associate_host_response) def _associate_host(self, req, id, body): raise exc.HTTPGone() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/pause_server.py0000664000175000017500000000634500000000000023026 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from webob import exc from nova.api.openstack import common from nova.api.openstack.compute.schemas import pause_server as schema from nova.api.openstack import wsgi from nova.api import validation from nova.compute import api as compute from nova import exception from nova.policies import pause_server as ps_policies @validation.validated class PauseServerController(wsgi.Controller): def __init__(self): super(PauseServerController, self).__init__() self.compute_api = compute.API() @wsgi.response(202) @wsgi.expected_errors((404, 409, 501)) @wsgi.action('pause') @validation.schema(schema.pause) @validation.response_body_schema(schema.pause_response) def _pause(self, req, id, body): """Permit Admins to pause the server.""" ctxt = req.environ['nova.context'] server = common.get_instance(self.compute_api, ctxt, id) ctxt.can(ps_policies.POLICY_ROOT % 'pause', target={'user_id': server.user_id, 'project_id': server.project_id}) try: self.compute_api.pause(ctxt, server) except exception.InstanceIsLocked as e: raise exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'pause', id) except exception.InstanceNotFound as e: raise exc.HTTPNotFound(explanation=e.format_message()) except NotImplementedError: common.raise_feature_not_supported() @wsgi.response(202) @wsgi.expected_errors((404, 409, 501)) @wsgi.action('unpause') @validation.schema(schema.unpause) @validation.response_body_schema(schema.unpause_response) def _unpause(self, req, id, body): """Permit Admins to unpause the server.""" ctxt = req.environ['nova.context'] server = common.get_instance(self.compute_api, ctxt, id) ctxt.can(ps_policies.POLICY_ROOT % 'unpause', target={'project_id': server.project_id}) try: self.compute_api.unpause(ctxt, server) except exception.InstanceIsLocked as e: raise exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'unpause', id) except exception.InstanceNotFound as e: raise exc.HTTPNotFound(explanation=e.format_message()) except NotImplementedError: common.raise_feature_not_supported() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/quota_classes.py0000664000175000017500000001401600000000000023163 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import webob from nova.api.openstack import api_version_request from nova.api.openstack.compute.schemas import quota_classes from nova.api.openstack import wsgi from nova.api import validation from nova import exception from nova.limit import utils as limit_utils from nova import objects from nova.policies import quota_class_sets as qcs_policies from nova import quota from nova import utils QUOTAS = quota.QUOTAS # NOTE(gmann): Quotas which were returned in v2 but in v2.1 those # were not returned. Fixed in microversion 2.50. Bug#1693168. EXTENDED_QUOTAS = ['server_groups', 'server_group_members'] # NOTE(gmann): Network related quotas are filter out in # microversion 2.50. Bug#1701211. FILTERED_QUOTAS_2_50 = ["fixed_ips", "floating_ips", "security_group_rules", "security_groups"] # Microversion 2.57 removes personality (injected) files from the API. FILTERED_QUOTAS_2_57 = list(FILTERED_QUOTAS_2_50) FILTERED_QUOTAS_2_57.extend(['injected_files', 'injected_file_content_bytes', 'injected_file_path_bytes']) class QuotaClassSetsController(wsgi.Controller): supported_quotas = [] def __init__(self): super(QuotaClassSetsController, self).__init__() self.supported_quotas = QUOTAS.resources def _format_quota_set(self, quota_class, quota_set, filtered_quotas=None, exclude_server_groups=False): """Convert the quota object to a result dict.""" if quota_class: result = dict(id=str(quota_class)) else: result = {} original_quotas = copy.deepcopy(self.supported_quotas) if filtered_quotas: original_quotas = [resource for resource in original_quotas if resource not in filtered_quotas] # NOTE(gmann): Before microversion v2.50, v2.1 API does not return the # 'server_groups' & 'server_group_members' key in quota class API # response. if exclude_server_groups: for resource in EXTENDED_QUOTAS: original_quotas.remove(resource) for resource in original_quotas: if resource in quota_set: result[resource] = quota_set[resource] return dict(quota_class_set=result) def _get_filtered_quotas(self, req): if api_version_request.is_supported(req, '2.57'): return FILTERED_QUOTAS_2_57 elif api_version_request.is_supported(req, '2.50'): return FILTERED_QUOTAS_2_50 else: return [] @wsgi.expected_errors(()) @validation.query_schema(quota_classes.show_query) def show(self, req, id): filtered_quotas = self._get_filtered_quotas(req) exclude_server_groups = True if api_version_request.is_supported(req, '2.50'): exclude_server_groups = False return self._show( req, id, filtered_quotas=filtered_quotas, exclude_server_groups=exclude_server_groups, ) def _show(self, req, id, filtered_quotas=None, exclude_server_groups=False): context = req.environ['nova.context'] context.can(qcs_policies.POLICY_ROOT % 'show', target={}) values = QUOTAS.get_class_quotas(context, id) return self._format_quota_set(id, values, filtered_quotas, exclude_server_groups) @wsgi.expected_errors(400) @validation.schema(quota_classes.update, '2.1', '2.49') @validation.schema(quota_classes.update_v250, '2.50', '2.56') @validation.schema(quota_classes.update_v257, '2.57') def update(self, req, id, body): filtered_quotas = self._get_filtered_quotas(req) exclude_server_groups = True if api_version_request.is_supported(req, '2.50'): exclude_server_groups = False return self._update( req, id, body, filtered_quotas=filtered_quotas, exclude_server_groups=exclude_server_groups, ) def _update(self, req, id, body, filtered_quotas=None, exclude_server_groups=False): context = req.environ['nova.context'] context.can(qcs_policies.POLICY_ROOT % 'update', target={}) try: utils.check_string_length(id, 'quota_class_name', min_length=1, max_length=255) except exception.InvalidInput as e: raise webob.exc.HTTPBadRequest( explanation=e.format_message()) quota_class = id quota_updates = body['quota_class_set'].items() # TODO(johngarbutt) eventually cores, ram and instances changes will # get sent to keystone when using unified limits, but only when the # quota_class == "default". if not limit_utils.use_unified_limits(): # When not unified limits, keep updating the database, even though # the noop driver doesn't read these values for key, value in quota_updates: try: objects.Quotas.update_class( context, quota_class, key, value) except exception.QuotaClassNotFound: objects.Quotas.create_class( context, quota_class, key, value) values = QUOTAS.get_class_quotas(context, quota_class) return self._format_quota_set(None, values, filtered_quotas, exclude_server_groups) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/quota_sets.py0000664000175000017500000002525000000000000022506 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from urllib import parse as urlparse from oslo_utils import strutils import webob from nova.api.openstack import api_version_request from nova.api.openstack.compute.schemas import quota_sets from nova.api.openstack import identity from nova.api.openstack import wsgi from nova.api import validation import nova.conf from nova import exception from nova.i18n import _ from nova.limit import utils as limit_utils from nova import objects from nova.policies import quota_sets as qs_policies from nova import quota CONF = nova.conf.CONF QUOTAS = quota.QUOTAS FILTERED_QUOTAS_2_36 = ["fixed_ips", "floating_ips", "security_group_rules", "security_groups"] FILTERED_QUOTAS_2_57 = list(FILTERED_QUOTAS_2_36) FILTERED_QUOTAS_2_57.extend(['injected_files', 'injected_file_content_bytes', 'injected_file_path_bytes']) class QuotaSetsController(wsgi.Controller): def _format_quota_set(self, project_id, quota_set, filtered_quotas): """Convert the quota object to a result dict.""" if project_id: result = dict(id=str(project_id)) else: result = {} for resource in QUOTAS.resources: if (resource not in filtered_quotas and resource in quota_set): result[resource] = quota_set[resource] return dict(quota_set=result) def _validate_quota_limit(self, resource, limit, minimum, maximum): def conv_inf(value): return float("inf") if value == -1 else value if conv_inf(limit) < conv_inf(minimum): msg = (_("Quota limit %(limit)s for %(resource)s must " "be greater than or equal to already used and " "reserved %(minimum)s.") % {'limit': limit, 'resource': resource, 'minimum': minimum}) raise webob.exc.HTTPBadRequest(explanation=msg) if conv_inf(limit) > conv_inf(maximum): msg = (_("Quota limit %(limit)s for %(resource)s must be " "less than or equal to %(maximum)s.") % {'limit': limit, 'resource': resource, 'maximum': maximum}) raise webob.exc.HTTPBadRequest(explanation=msg) def _get_quotas(self, context, id, user_id=None, usages=False): if user_id: values = QUOTAS.get_user_quotas(context, id, user_id, usages=usages) else: values = QUOTAS.get_project_quotas(context, id, usages=usages) if usages: # NOTE(melwitt): For the detailed quota view with usages, the API # returns a response in the format: # { # "quota_set": { # "cores": { # "in_use": 0, # "limit": 20, # "reserved": 0 # }, # ... # We've re-architected quotas to eliminate reservations, so we no # longer have a 'reserved' key returned from get_*_quotas, so set # it here to satisfy the REST API response contract. reserved = QUOTAS.get_reserved() for v in values.values(): v['reserved'] = reserved return values else: return {k: v['limit'] for k, v in values.items()} def _get_filtered_quotas(self, req): if api_version_request.is_supported(req, '2.57'): return FILTERED_QUOTAS_2_57 elif api_version_request.is_supported(req, '2.36'): return FILTERED_QUOTAS_2_36 else: return [] @wsgi.expected_errors(400) @validation.query_schema(quota_sets.show_query, '2.0', '2.74') @validation.query_schema(quota_sets.show_query_v275, '2.75') def show(self, req, id): filtered_quotas = self._get_filtered_quotas(req) return self._show(req, id, filtered_quotas) def _show(self, req, id, filtered_quotas): context = req.environ['nova.context'] context.can(qs_policies.POLICY_ROOT % 'show', {'project_id': id}) identity.verify_project_id(context, id) params = urlparse.parse_qs(req.environ.get('QUERY_STRING', '')) user_id = params.get('user_id', [None])[0] return self._format_quota_set( id, self._get_quotas(context, id, user_id=user_id), filtered_quotas=filtered_quotas) @wsgi.expected_errors(400) @validation.query_schema(quota_sets.show_query, '2.0', '2.74') @validation.query_schema(quota_sets.show_query_v275, '2.75') def detail(self, req, id): filtered_quotas = self._get_filtered_quotas(req) return self._detail(req, id, filtered_quotas) def _detail(self, req, id, filtered_quotas): context = req.environ['nova.context'] context.can(qs_policies.POLICY_ROOT % 'detail', {'project_id': id}) identity.verify_project_id(context, id) user_id = req.GET.get('user_id', None) return self._format_quota_set( id, self._get_quotas(context, id, user_id=user_id, usages=True), filtered_quotas=filtered_quotas) @wsgi.expected_errors(400) @validation.schema(quota_sets.update, '2.0', '2.35') @validation.schema(quota_sets.update_v236, '2.36', '2.56') @validation.schema(quota_sets.update_v257, '2.57') @validation.query_schema(quota_sets.show_query, '2.0', '2.74') @validation.query_schema(quota_sets.show_query_v275, '2.75') def update(self, req, id, body): filtered_quotas = self._get_filtered_quotas(req) return self._update(req, id, body, filtered_quotas) def _update(self, req, id, body, filtered_quotas): context = req.environ['nova.context'] context.can(qs_policies.POLICY_ROOT % 'update', {'project_id': id}) identity.verify_project_id(context, id) project_id = id params = urlparse.parse_qs(req.environ.get('QUERY_STRING', '')) user_id = params.get('user_id', [None])[0] quota_set = body['quota_set'] # NOTE(stephenfin): network quotas were only used by nova-network and # therefore should be explicitly rejected if 'networks' in quota_set: raise webob.exc.HTTPBadRequest( explanation=_('The networks quota has been removed')) force_update = strutils.bool_from_string(quota_set.get('force', 'False')) settable_quotas = QUOTAS.get_settable_quotas(context, project_id, user_id=user_id) requested_quotas = body['quota_set'].items() if limit_utils.use_unified_limits(): # NOTE(johngarbutt) currently all info comes from keystone # we don't update the database. requested_quotas = [] # NOTE(dims): Pass #1 - In this loop for quota_set.items(), we validate # min/max values and bail out if any of the items in the set is bad. valid_quotas = {} for key, value in requested_quotas: if key == 'force' or (not value and value != 0): continue # validate whether already used and reserved exceeds the new # quota, this check will be ignored if admin want to force # update value = int(value) if not force_update: minimum = settable_quotas[key]['minimum'] maximum = settable_quotas[key]['maximum'] self._validate_quota_limit(key, value, minimum, maximum) valid_quotas[key] = value # NOTE(dims): Pass #2 - At this point we know that all the # values are correct and we can iterate and update them all in one # shot without having to worry about rolling back etc as we have done # the validation up front in the loop above. for key, value in valid_quotas.items(): try: objects.Quotas.create_limit(context, project_id, key, value, user_id=user_id) except exception.QuotaExists: objects.Quotas.update_limit(context, project_id, key, value, user_id=user_id) # Note(gmann): Removed 'id' from update's response to make it same # as V2. If needed it can be added with microversion. return self._format_quota_set( None, self._get_quotas(context, id, user_id=user_id), filtered_quotas=filtered_quotas) @wsgi.api_version('2.0') @wsgi.expected_errors(400) @validation.query_schema(quota_sets.defaults_query) def defaults(self, req, id): filtered_quotas = self._get_filtered_quotas(req) return self._defaults(req, id, filtered_quotas) def _defaults(self, req, id, filtered_quotas): context = req.environ['nova.context'] context.can(qs_policies.POLICY_ROOT % 'defaults', {'project_id': id}) identity.verify_project_id(context, id) values = QUOTAS.get_defaults(context) return self._format_quota_set(id, values, filtered_quotas=filtered_quotas) # TODO(oomichi): Here should be 204(No Content) instead of 202 by v2.1 # +microversions because the resource quota-set has been deleted completely # when returning a response. @wsgi.expected_errors(()) @validation.query_schema(quota_sets.show_query_v275, '2.75') @validation.query_schema(quota_sets.show_query, '2.0', '2.74') @wsgi.response(202) def delete(self, req, id): context = req.environ['nova.context'] context.can(qs_policies.POLICY_ROOT % 'delete', {'project_id': id}) params = urlparse.parse_qs(req.environ.get('QUERY_STRING', '')) user_id = params.get('user_id', [None])[0] # NOTE(johngarbutt) with unified limits we only use keystone, not the # db if not limit_utils.use_unified_limits(): if user_id: objects.Quotas.destroy_all_by_project_and_user( context, id, user_id) else: objects.Quotas.destroy_all_by_project(context, id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/remote_consoles.py0000664000175000017500000002110700000000000023514 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import webob from nova.api.openstack import common from nova.api.openstack.compute.schemas import remote_consoles as schema from nova.api.openstack import wsgi from nova.api import validation from nova.compute import api as compute from nova import exception from nova.policies import remote_consoles as rc_policies _rdp_console_removal_reason = """\ RDP consoles are only available when using the Hyper-V driver, which was removed from Nova in the 29.0.0 (Caracal) release. """ @validation.validated class RemoteConsolesController(wsgi.Controller): def __init__(self): super(RemoteConsolesController, self).__init__() self.compute_api = compute.API() self.handlers = {'vnc': self.compute_api.get_vnc_console, 'spice': self.compute_api.get_spice_console, 'serial': self.compute_api.get_serial_console, 'mks': self.compute_api.get_mks_console} @wsgi.api_version("2.1", "2.5") @wsgi.expected_errors((400, 404, 409, 501)) @wsgi.action('os-getVNCConsole') @validation.schema(schema.get_vnc_console) @validation.response_body_schema(schema.get_vnc_console_response) def get_vnc_console(self, req, id, body): """Get text console output.""" context = req.environ['nova.context'] context.can(rc_policies.BASE_POLICY_NAME) # If type is not supplied or unknown, get_vnc_console below will cope console_type = body['os-getVNCConsole'].get('type') instance = common.get_instance(self.compute_api, context, id) try: output = self.compute_api.get_vnc_console(context, instance, console_type) except exception.ConsoleTypeUnavailable as e: raise webob.exc.HTTPBadRequest(explanation=e.format_message()) except exception.InstanceNotFound as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) except exception.InstanceNotReady as e: raise webob.exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as e: common.raise_http_conflict_for_instance_invalid_state( e, 'get_vnc_console', id) except NotImplementedError: common.raise_feature_not_supported() return {'console': {'type': console_type, 'url': output['url']}} @wsgi.api_version("2.1", "2.5") @wsgi.expected_errors((400, 404, 409, 501)) @wsgi.action('os-getSPICEConsole') @validation.schema(schema.get_spice_console) @validation.response_body_schema(schema.get_spice_console_response) def get_spice_console(self, req, id, body): """Get text console output.""" context = req.environ['nova.context'] context.can(rc_policies.BASE_POLICY_NAME) # If type is not supplied or unknown, get_spice_console below will cope console_type = body['os-getSPICEConsole'].get('type') instance = common.get_instance(self.compute_api, context, id) try: output = self.compute_api.get_spice_console(context, instance, console_type) except exception.ConsoleTypeUnavailable as e: raise webob.exc.HTTPBadRequest(explanation=e.format_message()) except exception.InstanceNotFound as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) except exception.InstanceNotReady as e: raise webob.exc.HTTPConflict(explanation=e.format_message()) except NotImplementedError: common.raise_feature_not_supported() return {'console': {'type': console_type, 'url': output['url']}} @wsgi.api_version("2.1", "2.5") @wsgi.expected_errors((400, 404, 409, 501)) @wsgi.action('os-getRDPConsole') @wsgi.removed('29.0.0', _rdp_console_removal_reason) @validation.schema(schema.get_rdp_console) @validation.response_body_schema(schema.get_rdp_console_response) def get_rdp_console(self, req, id, body): """RDP console was available only for HyperV driver which has been removed from Nova in 29.0.0 (Caracal) release. """ raise webob.exc.HTTPBadRequest() @wsgi.api_version("2.1", "2.5") @wsgi.expected_errors((400, 404, 409, 501)) @wsgi.action('os-getSerialConsole') @validation.schema(schema.get_serial_console) @validation.response_body_schema(schema.get_serial_console_response) def get_serial_console(self, req, id, body): """Get connection to a serial console.""" context = req.environ['nova.context'] context.can(rc_policies.BASE_POLICY_NAME) # If type is not supplied or unknown get_serial_console below will cope console_type = body['os-getSerialConsole'].get('type') instance = common.get_instance(self.compute_api, context, id) try: output = self.compute_api.get_serial_console(context, instance, console_type) except exception.InstanceNotFound as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) except exception.InstanceNotReady as e: raise webob.exc.HTTPConflict(explanation=e.format_message()) except (exception.ConsoleTypeUnavailable, exception.ImageSerialPortNumberInvalid, exception.ImageSerialPortNumberExceedFlavorValue, exception.SocketPortRangeExhaustedException) as e: raise webob.exc.HTTPBadRequest(explanation=e.format_message()) except NotImplementedError: common.raise_feature_not_supported() return {'console': {'type': console_type, 'url': output['url']}} @wsgi.api_version("2.6") @wsgi.expected_errors((400, 404, 409, 501)) @validation.schema(schema.create_v26, "2.6", "2.7") @validation.schema(schema.create_v28, "2.8", "2.98") @validation.schema(schema.create_v299, "2.99") @validation.response_body_schema(schema.create_response, "2.6", "2.7") @validation.response_body_schema(schema.create_response_v28, "2.8", "2.98") @validation.response_body_schema(schema.create_response_v299, "2.99") def create(self, req, server_id, body): context = req.environ['nova.context'] instance = common.get_instance(self.compute_api, context, server_id) context.can(rc_policies.BASE_POLICY_NAME, target={'project_id': instance.project_id}) protocol = body['remote_console']['protocol'] console_type = body['remote_console']['type'] # handle removed console types if protocol in ('rdp',): raise webob.exc.HTTPBadRequest( 'Unavailable console type %s.' % protocol ) try: # this should never fail in the real world since our schema # prevents unsupported types getting through handler = self.handlers[protocol] output = handler(context, instance, console_type) return {'remote_console': {'protocol': protocol, 'type': console_type, 'url': output['url']}} except exception.InstanceNotFound as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) except exception.InstanceNotReady as e: raise webob.exc.HTTPConflict(explanation=e.format_message()) except (exception.ConsoleTypeInvalid, exception.ConsoleTypeUnavailable, exception.ImageSerialPortNumberInvalid, exception.ImageSerialPortNumberExceedFlavorValue, exception.SocketPortRangeExhaustedException) as e: raise webob.exc.HTTPBadRequest(explanation=e.format_message()) except (NotImplementedError, KeyError): common.raise_feature_not_supported() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/rescue.py0000664000175000017500000001020300000000000021575 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The rescue mode extension.""" from webob import exc from nova.api.openstack import api_version_request from nova.api.openstack import common from nova.api.openstack.compute.schemas import rescue as schema from nova.api.openstack import wsgi from nova.api import validation from nova.compute import api as compute import nova.conf from nova import exception from nova.policies import rescue as rescue_policies from nova import utils CONF = nova.conf.CONF @validation.validated class RescueController(wsgi.Controller): def __init__(self): super(RescueController, self).__init__() self.compute_api = compute.API() # TODO(cyeoh): Should be responding here with 202 Accept # because rescue is an async call, but keep to 200 # for backwards compatibility reasons. @wsgi.expected_errors((400, 404, 409, 501)) @wsgi.action('rescue') @validation.schema(schema.rescue) @validation.response_body_schema(schema.rescue_response) def _rescue(self, req, id, body): """Rescue an instance.""" context = req.environ["nova.context"] if body['rescue'] and 'adminPass' in body['rescue']: password = body['rescue']['adminPass'] else: password = utils.generate_password() instance = common.get_instance(self.compute_api, context, id) context.can(rescue_policies.BASE_POLICY_NAME, target={'user_id': instance.user_id, 'project_id': instance.project_id}) rescue_image_ref = None if body['rescue']: rescue_image_ref = body['rescue'].get('rescue_image_ref') allow_bfv_rescue = api_version_request.is_supported(req, '2.87') try: self.compute_api.rescue(context, instance, rescue_password=password, rescue_image_ref=rescue_image_ref, allow_bfv_rescue=allow_bfv_rescue) except ( exception.InstanceIsLocked, exception.InvalidVolume, ) as e: raise exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as e: common.raise_http_conflict_for_instance_invalid_state( e, 'rescue', id) except ( exception.InstanceNotRescuable, exception.UnsupportedRescueImage, ) as e: raise exc.HTTPBadRequest(explanation=e.format_message()) if CONF.api.enable_instance_password: return {'adminPass': password} else: return {} @wsgi.response(202) @wsgi.expected_errors((404, 409, 501)) @wsgi.action('unrescue') @validation.schema(schema.unrescue) @validation.response_body_schema(schema.unrescue_response) def _unrescue(self, req, id, body): """Unrescue an instance.""" context = req.environ["nova.context"] instance = common.get_instance(self.compute_api, context, id) context.can(rescue_policies.UNRESCUE_POLICY_NAME, target={'project_id': instance.project_id}) try: self.compute_api.unrescue(context, instance) except exception.InstanceIsLocked as e: raise exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'unrescue', id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/rest_api_version_history.rst0000664000175000017500000012412300000000000025632 0ustar00zuulzuul00000000000000REST API Version History ======================== This documents the changes made to the REST API with every microversion change. The description for each version should be a verbose one which has enough information to be suitable for use in user documentation. 2.1 --- This is the initial version of the v2.1 API which supports microversions. The V2.1 API is from the REST API users' point of view exactly the same as v2.0 except with strong input validation. A user can specify a header in the API request:: X-OpenStack-Nova-API-Version: where ```` is any valid api version for this API. If no version is specified then the API will behave as if a version request of v2.1 was requested. 2.2 --- Added Keypair type. A user can request the creation of a certain 'type' of keypair (``ssh`` or ``x509``) in the ``os-keypairs`` plugin If no keypair type is specified, then the default ``ssh`` type of keypair is created. Fixes status code for ``os-keypairs`` create method from 200 to 201 Fixes status code for ``os-keypairs`` delete method from 202 to 204 2.3 (Maximum in Kilo) --------------------- Exposed additional attributes in ``os-extended-server-attributes``: ``reservation_id``, ``launch_index``, ``ramdisk_id``, ``kernel_id``, ``hostname``, ``root_device_name``, ``userdata``. Exposed ``delete_on_termination`` for ``volumes_attached`` in ``os-extended-volumes``. This change is required for the extraction of EC2 API into a standalone service. It exposes necessary properties absent in public nova APIs yet. Add info for Standalone EC2 API to cut access to Nova DB. 2.4 --- Show the ``reserved`` status on a ``FixedIP`` object in the ``os-fixed-ips`` API extension. The extension allows one to ``reserve`` and ``unreserve`` a fixed IP but the show method does not report the current status. 2.5 --- Before version 2.5, the command ``nova list --ip6 xxx`` returns all servers for non-admins, as the filter option is silently discarded. There is no reason to treat ip6 different from ip, though, so we just add this option to the allowed list. 2.6 --- A new API for getting remote console is added:: POST /servers//remote-consoles { "remote_console": { "protocol": ["vnc"|"rdp"|"serial"|"spice"], "type": ["novnc"|"xpvnc"|"rdp-html5"|"spice-html5"|"serial"] } } Example response:: { "remote_console": { "protocol": "vnc", "type": "novnc", "url": "http://example.com:6080/vnc_auto.html?path=%3Ftoken%3DXYZ" } } The old APIs ``os-getVNCConsole``, ``os-getSPICEConsole``, ``os-getSerialConsole`` and ``os-getRDPConsole`` are removed. 2.7 --- Check the ``is_public`` attribute of a flavor before adding tenant access to it. Reject the request with ``HTTPConflict`` error. 2.8 --- Add ``mks`` protocol and ``webmks`` type for remote consoles. 2.9 --- Add a new ``locked`` attribute to the detailed view, update, and rebuild action. ``locked`` will be ``true`` if anyone is currently holding a lock on the server, ``false`` otherwise. 2.10 ---- Added ``user_id`` parameter to ``os-keypairs`` plugin, as well as a new property in the request body, for the create operation. Administrators will be able to list, get details and delete keypairs owned by users other than themselves and to create new keypairs on behalf of their users. 2.11 ---- Exposed attribute ``forced_down`` for ``os-services``. Added ability to change the ``forced_down`` attribute by calling an update. 2.12 (Maximum in Liberty) ------------------------- Exposes VIF ``net_id`` attribute in ``os-virtual-interfaces``. User will be able to get Virtual Interfaces ``net_id`` in Virtual Interfaces list and can determine in which network a Virtual Interface is plugged into. 2.13 ---- Add information ``project_id`` and ``user_id`` to ``os-server-groups`` API response data. 2.14 ---- Remove ``onSharedStorage`` parameter from server's evacuate action. Nova will automatically detect if the instance is on shared storage. ``adminPass`` is removed from the response body. The user can get the password with the server's ``os-server-password`` action. 2.15 ---- From this version of the API users can choose 'soft-affinity' and 'soft-anti-affinity' rules too for server-groups. 2.16 ---- Exposes new ``host_status`` attribute for servers/detail and servers/{server_id}. Ability to get nova-compute status when querying servers. By default, this is only exposed to cloud administrators. 2.17 ---- Add a new API for triggering crash dump in an instance. Different operation systems in instance may need different configurations to trigger crash dump. 2.18 ---- Establishes a set of routes that makes project_id an optional construct in v2.1. 2.19 ---- Allow the user to set and get the server description. The user will be able to set the description when creating, rebuilding, or updating a server, and get the description as part of the server details. 2.20 ---- From this version of the API user can call detach and attach volumes for instances which are in ``shelved`` and ``shelved_offloaded`` state. 2.21 ---- The ``os-instance-actions`` API now returns information from deleted instances. 2.22 ---- A new resource, ``servers:migrations``, is added. A new API to force live migration to complete added:: POST /servers//migrations//action { "force_complete": null } 2.23 ---- From this version of the API users can get the migration summary list by index API or the information of a specific migration by get API. Add ``migration_type`` for old ``/os-migrations`` API, also add ``ref`` link to the ``/servers/{uuid}/migrations/{id}`` for it when the migration is an in-progress live-migration. 2.24 ---- A new API call to cancel a running live migration:: DELETE /servers//migrations/ 2.25 (Maximum in Mitaka) ------------------------ Modify input parameter for ``os-migrateLive``. The ``block_migration`` field now supports an ``auto`` value and the ``disk_over_commit`` flag is removed. 2.26 ---- Added support of server tags. A user can create, update, delete or check existence of simple string tags for servers by the ``os-server-tags`` plugin. Tags have the following schema restrictions: * Tag is a Unicode bytestring no longer than 60 characters. * Tag is a non-empty string. * '/' is not allowed to be in a tag name * Comma is not allowed to be in a tag name in order to simplify requests that specify lists of tags * All other characters are allowed to be in a tag name * Each server can have up to 50 tags. The resource point for these operations is ``/servers//tags``. A user can add a single tag to the server by making a ``PUT`` request to ``/servers//tags/``. where ```` is any valid tag name. A user can replace **all** current server tags to the new set of tags by making a ``PUT`` request to the ``/servers//tags``. The new set of tags must be specified in request body. This set must be in list ``tags``. A user can remove specified tag from the server by making a ``DELETE`` request to ``/servers//tags/``. where ```` is tag name which user wants to remove. A user can remove **all** tags from the server by making a ``DELETE`` request to the ``/servers//tags``. A user can get a set of server tags with information about server by making a ``GET`` request to ``/servers/``. Request returns dictionary with information about specified server, including list ``tags``:: { 'id': {server_id}, ... 'tags': ['foo', 'bar', 'baz'] } A user can get **only** a set of server tags by making a ``GET`` request to ``/servers//tags``. Response :: { 'tags': ['foo', 'bar', 'baz'] } A user can check if a tag exists or not on a server by making a ``GET`` request to ``/servers/{server_id}/tags/{tag}``. Request returns ``204 No Content`` if tag exist on a server or ``404 Not Found`` if tag doesn't exist on a server. A user can filter servers in ``GET /servers`` request by new filters: * ``tags`` * ``tags-any`` * ``not-tags`` * ``not-tags-any`` These filters can be combined. Also user can use more than one string tags for each filter. In this case string tags for each filter must be separated by comma. For example:: GET /servers?tags=red&tags-any=green,orange 2.27 ---- Added support for the new form of microversion headers described in the `Microversion Specification `_. Both the original form of header and the new form is supported. 2.28 ---- Nova API ``hypervisor.cpu_info`` change from string to JSON object. From this version of the API the hypervisor's ``cpu_info`` field will be returned as JSON object (not string) by sending GET request to the ``/v2.1/os-hypervisors/{hypervisor_id}``. 2.29 ---- Updates the POST request body for the ``evacuate`` action to include the optional ``force`` boolean field defaulted to False. Also changes the evacuate action behaviour when providing a ``host`` string field by calling the nova scheduler to verify the provided host unless the ``force`` attribute is set. 2.30 ---- Updates the POST request body for the ``live-migrate`` action to include the optional ``force`` boolean field defaulted to False. Also changes the live-migrate action behaviour when providing a ``host`` string field by calling the nova scheduler to verify the provided host unless the ``force`` attribute is set. 2.31 ---- Fix ``os-console-auth-tokens`` to return connection info for all types of tokens, not just RDP. 2.32 ---- Adds an optional, arbitrary 'tag' item to the 'networks' item in the server boot request body. In addition, every item in the block_device_mapping_v2 array can also have an optional, arbitrary 'tag' item. These tags are used to identify virtual device metadata, as exposed in the metadata API and on the config drive. For example, a network interface on the virtual PCI bus tagged with 'nic1' will appear in the metadata along with its bus (PCI), bus address (ex: 0000:00:02.0), MAC address, and tag ('nic1'). .. note:: A bug has caused the tag attribute to no longer be accepted for networks starting with version 2.37 and for block_device_mapping_v2 starting with version 2.33. In other words, networks could only be tagged between versions 2.32 and 2.36 inclusively and block devices only in version 2.32. As of version 2.42 the tag attribute has been restored and both networks and block devices can be tagged again. 2.33 ---- Support pagination for hypervisor by accepting limit and marker from the GET API request:: GET /v2.1/{tenant_id}/os-hypervisors?marker={hypervisor_id}&limit={limit} In the context of device tagging at server create time, 2.33 also removes the tag attribute from block_device_mapping_v2. This is a bug that is fixed in 2.42, in which the tag attribute is reintroduced. 2.34 ---- Checks in ``os-migrateLive`` before live-migration actually starts are now made in background. ``os-migrateLive`` is not throwing `400 Bad Request` if pre-live-migration checks fail. 2.35 ---- Added pagination support for keypairs. Optional parameters 'limit' and 'marker' were added to GET /os-keypairs request, the default sort_key was changed to 'name' field as ASC order, the generic request format is:: GET /os-keypairs?limit={limit}&marker={kp_name} .. _2.36 microversion: 2.36 ---- All the APIs which proxy to another service were deprecated in this version, also the fping API. Those APIs will return 404 with Microversion 2.36. The network related quotas and limits are removed from API also. The deprecated API endpoints as below:: '/images' '/os-networks' '/os-tenant-networks' '/os-fixed-ips' '/os-floating-ips' '/os-floating-ips-bulk' '/os-floating-ip-pools' '/os-floating-ip-dns' '/os-security-groups' '/os-security-group-rules' '/os-security-group-default-rules' '/os-volumes' '/os-snapshots' '/os-baremetal-nodes' '/os-fping' .. note:: A `regression`__ was introduced in this microversion which broke the ``force`` parameter in the ``PUT /os-quota-sets`` API. The fix will have to be applied to restore this functionality. __ https://bugs.launchpad.net/nova/+bug/1733886 .. versionchanged:: 18.0.0 The ``os-fping`` API was completely removed in the 18.0.0 (Rocky) release. On deployments newer than this, the API will return HTTP 410 (Gone) regardless of the requested microversion. .. versionchanged:: 21.0.0 The ``os-security-group-default-rules`` API was completely removed in the 21.0.0 (Ussuri) release. On deployments newer than this, the APIs will return HTTP 410 (Gone) regardless of the requested microversion. .. versionchanged:: 21.0.0 The ``os-networks`` API was partially removed in the 21.0.0 (Ussuri) release. On deployments newer than this, some endpoints of the API will return HTTP 410 (Gone) regardless of the requested microversion. .. versionchanged:: 21.0.0 The ``os-tenant-networks`` API was partially removed in the 21.0.0 (Ussuri) release. On deployments newer than this, some endpoints of the API will return HTTP 410 (Gone) regardless of the requested microversion. 2.37 ---- Added support for automatic allocation of networking, also known as "Get Me a Network". With this microversion, when requesting the creation of a new server (or servers) the ``networks`` entry in the ``server`` portion of the request body is required. The ``networks`` object in the request can either be a list or an enum with values: #. *none* which means no networking will be allocated for the created server(s). #. *auto* which means either a network that is already available to the project will be used, or if one does not exist, will be automatically created for the project. Automatic network allocation for a project only happens once for a project. Subsequent requests using *auto* for the same project will reuse the network that was previously allocated. Also, the ``uuid`` field in the ``networks`` object in the server create request is now strictly enforced to be in UUID format. In the context of device tagging at server create time, 2.37 also removes the tag attribute from networks. This is a bug that is fixed in 2.42, in which the tag attribute is reintroduced. 2.38 (Maximum in Newton) ------------------------ Before version 2.38, the command ``nova list --status invalid_status`` was returning empty list for non admin user and 500 InternalServerError for admin user. As there are sufficient statuses defined already, any invalid status should not be accepted. From this version of the API admin as well as non admin user will get 400 HTTPBadRequest if invalid status is passed to nova list command. 2.39 ---- Deprecates image-metadata proxy API that is just a proxy for Glance API to operate the image metadata. Also removes the extra quota enforcement with Nova `metadata` quota (quota checks for 'createImage' and 'createBackup' actions in Nova were removed). After this version Glance configuration option `image_property_quota` should be used to control the quota of image metadatas. Also, removes the `maxImageMeta` field from `os-limits` API response. 2.40 ---- Optional query parameters ``limit`` and ``marker`` were added to the ``os-simple-tenant-usage`` endpoints for pagination. If a limit isn't provided, the configurable ``max_limit`` will be used which currently defaults to 1000. :: GET /os-simple-tenant-usage?limit={limit}&marker={instance_uuid} GET /os-simple-tenant-usage/{tenant_id}?limit={limit}&marker={instance_uuid} A tenant's usage statistics may span multiple pages when the number of instances exceeds limit, and API consumers will need to stitch together the aggregate results if they still want totals for all instances in a specific time window, grouped by tenant. Older versions of the ``os-simple-tenant-usage`` endpoints will not accept these new paging query parameters, but they will start to silently limit by ``max_limit`` to encourage the adoption of this new microversion, and circumvent the existing possibility of DoS-like usage requests when there are thousands of instances. 2.41 ---- The 'uuid' attribute of an aggregate is now returned from calls to the `/os-aggregates` endpoint. This attribute is auto-generated upon creation of an aggregate. The `os-aggregates` API resource endpoint remains an administrator-only API. 2.42 (Maximum in Ocata) ----------------------- In the context of device tagging at server create time, a bug has caused the tag attribute to no longer be accepted for networks starting with version 2.37 and for block_device_mapping_v2 starting with version 2.33. Microversion 2.42 restores the tag parameter to both networks and block_device_mapping_v2, allowing networks and block devices to be tagged again. 2.43 ---- The ``os-hosts`` API is deprecated as of the 2.43 microversion. Requests made with microversion >= 2.43 will result in a 404 error. To list and show host details, use the ``os-hypervisors`` API. To enable or disable a service, use the ``os-services`` API. There is no replacement for the `shutdown`, `startup`, `reboot`, or `maintenance_mode` actions as those are system-level operations which should be outside of the control of the compute service. 2.44 ---- The following APIs which are considered as proxies of Neutron networking API, are deprecated and will result in a 404 error response in new Microversion:: POST /servers/{server_uuid}/action { "addFixedIp": {...} } POST /servers/{server_uuid}/action { "removeFixedIp": {...} } POST /servers/{server_uuid}/action { "addFloatingIp": {...} } POST /servers/{server_uuid}/action { "removeFloatingIp": {...} } Those server actions can be replaced by calling the Neutron API directly. The nova-network specific API to query the server's interfaces is deprecated:: GET /servers/{server_uuid}/os-virtual-interfaces To query attached neutron interfaces for a specific server, the API `GET /servers/{server_uuid}/os-interface` can be used. 2.45 ---- The ``createImage`` and ``createBackup`` server action APIs no longer return a ``Location`` header in the response for the snapshot image, they now return a json dict in the response body with an ``image_id`` key and uuid value. 2.46 ---- The request_id created for every inbound request is now returned in ``X-OpenStack-Request-ID`` in addition to ``X-Compute-Request-ID`` to be consistent with the rest of OpenStack. This is a signaling only microversion, as these header settings happen well before microversion processing. 2.47 ---- Replace the ``flavor`` name/ref with the actual flavor details from the embedded flavor object when displaying server details. Requests made with microversion >= 2.47 will no longer return the flavor ID/link but instead will return a subset of the flavor details. If the user is prevented by policy from indexing extra-specs, then the ``extra_specs`` field will not be included in the flavor information. 2.48 ---- Before version 2.48, VM diagnostics response was just a 'blob' of data returned by each hypervisor. From this version VM diagnostics response is standardized. It has a set of fields which each hypervisor will try to fill. If a hypervisor driver is unable to provide a specific field then this field will be reported as 'None'. 2.49 ---- Continuing from device role tagging at server create time introduced in version 2.32 and later fixed in 2.42, microversion 2.49 allows the attachment of network interfaces and volumes with an optional ``tag`` parameter. This tag is used to identify the virtual devices in the guest and is exposed in the metadata API. Because the config drive cannot be updated while the guest is running, it will only contain metadata of devices that were tagged at boot time. Any changes made to devices while the instance is running - be it detaching a tagged device or performing a tagged device attachment - will not be reflected in the config drive. Tagged volume attachment is not supported for shelved-offloaded instances. 2.50 ---- The ``server_groups`` and ``server_group_members`` keys are exposed in GET & PUT ``os-quota-class-sets`` APIs Response body. Networks related quotas have been filtered out from os-quota-class. Below quotas are filtered out and not available in ``os-quota-class-sets`` APIs from this microversion onwards. - "fixed_ips" - "floating_ips" - "networks", - "security_group_rules" - "security_groups" 2.51 ---- There are two changes for the 2.51 microversion: * Add ``volume-extended`` event name to the ``os-server-external-events`` API. This will be used by the Block Storage service when extending the size of an attached volume. This signals the Compute service to perform any necessary actions on the compute host or hypervisor to adjust for the new volume block device size. * Expose the ``events`` field in the response body for the ``GET /servers/{server_id}/os-instance-actions/{request_id}`` API. This is useful for API users to monitor when a volume extend operation completes for the given server instance. By default only users with the administrator role will be able to see event ``traceback`` details. 2.52 ---- Adds support for applying tags when creating a server. The tag schema is the same as in the `2.26`_ microversion. .. _2.53-microversion: 2.53 (Maximum in Pike) ---------------------- **os-services** Services are now identified by uuid instead of database id to ensure uniqueness across cells. This microversion brings the following changes: * ``GET /os-services`` returns a uuid in the ``id`` field of the response * ``DELETE /os-services/{service_uuid}`` requires a service uuid in the path * The following APIs have been superseded by ``PUT /os-services/{service_uuid}/``: * ``PUT /os-services/disable`` * ``PUT /os-services/disable-log-reason`` * ``PUT /os-services/enable`` * ``PUT /os-services/force-down`` ``PUT /os-services/{service_uuid}`` takes the following fields in the body: * ``status`` - can be either "enabled" or "disabled" to enable or disable the given service * ``disabled_reason`` - specify with status="disabled" to log a reason for why the service is disabled * ``forced_down`` - boolean indicating if the service was forced down by an external service * ``PUT /os-services/{service_uuid}`` will now return a full service resource representation like in a ``GET`` response **os-hypervisors** Hypervisors are now identified by uuid instead of database id to ensure uniqueness across cells. This microversion brings the following changes: * ``GET /os-hypervisors/{hypervisor_hostname_pattern}/search`` is deprecated and replaced with the ``hypervisor_hostname_pattern`` query parameter on the ``GET /os-hypervisors`` and ``GET /os-hypervisors/detail`` APIs. Paging with ``hypervisor_hostname_pattern`` is not supported. * ``GET /os-hypervisors/{hypervisor_hostname_pattern}/servers`` is deprecated and replaced with the ``with_servers`` query parameter on the ``GET /os-hypervisors`` and ``GET /os-hypervisors/detail`` APIs. * ``GET /os-hypervisors/{hypervisor_id}`` supports the ``with_servers`` query parameter to include hosted server details in the response. * ``GET /os-hypervisors/{hypervisor_id}`` and ``GET /os-hypervisors/{hypervisor_id}/uptime`` APIs now take a uuid value for the ``{hypervisor_id}`` path parameter. * The ``GET /os-hypervisors`` and ``GET /os-hypervisors/detail`` APIs will now use a uuid marker for paging across cells. * The following APIs will now return a uuid value for the hypervisor id and optionally service id fields in the response: * ``GET /os-hypervisors`` * ``GET /os-hypervisors/detail`` * ``GET /os-hypervisors/{hypervisor_id}`` * ``GET /os-hypervisors/{hypervisor_id}/uptime`` 2.54 ---- Allow the user to set the server key pair while rebuilding. 2.55 ---- Adds a ``description`` field to the flavor resource in the following APIs: * ``GET /flavors`` * ``GET /flavors/detail`` * ``GET /flavors/{flavor_id}`` * ``POST /flavors`` * ``PUT /flavors/{flavor_id}`` The embedded flavor description will not be included in server representations. 2.56 ---- Updates the POST request body for the ``migrate`` action to include the optional ``host`` string field defaulted to ``null``. If ``host`` is set the migrate action verifies the provided host with the nova scheduler and uses it as the destination for the migration. 2.57 ---- The 2.57 microversion makes the following changes: * The ``personality`` parameter is removed from the server create and rebuild APIs. * The ``user_data`` parameter is added to the server rebuild API. * The ``maxPersonality`` and ``maxPersonalitySize`` limits are excluded from the ``GET /limits`` API response. * The ``injected_files``, ``injected_file_content_bytes`` and ``injected_file_path_bytes`` quotas are removed from the ``os-quota-sets`` and ``os-quota-class-sets`` APIs. 2.58 ---- Add pagination support and ``changes-since`` filter for os-instance-actions API. Users can now use ``limit`` and ``marker`` to perform paginated query when listing instance actions. Users can also use ``changes-since`` filter to filter the results based on the last time the instance action was updated. 2.59 ---- Added pagination support for migrations, there are four changes: * Add pagination support and ``changes-since`` filter for os-migrations API. Users can now use ``limit`` and ``marker`` to perform paginate query when listing migrations. * Users can also use ``changes-since`` filter to filter the results based on the last time the migration record was updated. * ``GET /os-migrations``, ``GET /servers/{server_id}/migrations/{migration_id}`` and ``GET /servers/{server_id}/migrations`` will now return a uuid value in addition to the migrations id in the response. * The query parameter schema of the ``GET /os-migrations`` API no longer allows additional properties. .. _api-microversion-queens: 2.60 (Maximum in Queens) ------------------------ From this version of the API users can attach a ``multiattach`` capable volume to multiple instances. The API request for creating the additional attachments is the same. The chosen virt driver and the volume back end has to support the functionality as well. 2.61 ---- Exposes flavor extra_specs in the flavor representation. Now users can see the flavor extra-specs in flavor APIs response and do not need to call ``GET /flavors/{flavor_id}/os-extra_specs`` API. If the user is prevented by policy from indexing extra-specs, then the ``extra_specs`` field will not be included in the flavor information. Flavor extra_specs will be included in Response body of the following APIs: * ``GET /flavors/detail`` * ``GET /flavors/{flavor_id}`` * ``POST /flavors`` * ``PUT /flavors/{flavor_id}`` 2.62 ---- Adds ``host`` (hostname) and ``hostId`` (an obfuscated hashed host id string) fields to the instance action ``GET /servers/{server_id}/os-instance-actions/{req_id}`` API. The display of the newly added ``host`` field will be controlled via policy rule ``os_compute_api:os-instance-actions:events``, which is the same policy used for the ``events.traceback`` field. If the user is prevented by policy, only ``hostId`` will be displayed. 2.63 ---- Adds support for the ``trusted_image_certificates`` parameter, which is used to define a list of trusted certificate IDs that can be used during image signature verification and certificate validation. The list is restricted to a maximum of 50 IDs. Note that ``trusted_image_certificates`` is not supported with volume-backed servers. The ``trusted_image_certificates`` request parameter can be passed to the server create and rebuild APIs: * ``POST /servers`` * ``POST /servers/{server_id}/action (rebuild)`` The ``trusted_image_certificates`` parameter will be in the response body of the following APIs: * ``GET /servers/detail`` * ``GET /servers/{server_id}`` * ``PUT /servers/{server_id}`` * ``POST /servers/{server_id}/action (rebuild)`` 2.64 ---- Enable users to define the policy rules on server group policy to meet more advanced policy requirement. This microversion brings the following changes in server group APIs: * Add ``policy`` and ``rules`` fields in the request of POST ``/os-server-groups``. The ``policy`` represents the name of policy. The ``rules`` field, which is a dict, can be applied to the policy, which currently only support ``max_server_per_host`` for ``anti-affinity`` policy. * The ``policy`` and ``rules`` fields will be returned in response body of POST, GET ``/os-server-groups`` API and GET ``/os-server-groups/{server_group_id}`` API. * The ``policies`` and ``metadata`` fields have been removed from the response body of POST, GET ``/os-server-groups`` API and GET ``/os-server-groups/{server_group_id}`` API. 2.65 (Maximum in Rocky) ----------------------- Add support for abort live migrations in ``queued`` and ``preparing`` status for API ``DELETE /servers/{server_id}/migrations/{migration_id}``. 2.66 ---- The ``changes-before`` filter can be included as a request parameter of the following APIs to filter by changes before or equal to the resource ``updated_at`` time: * ``GET /servers`` * ``GET /servers/detail`` * ``GET /servers/{server_id}/os-instance-actions`` * ``GET /os-migrations`` 2.67 ---- Adds the ``volume_type`` parameter to ``block_device_mapping_v2``, which can be used to specify cinder ``volume_type`` when creating a server. 2.68 ---- Remove support for forced live migration and evacuate server actions. 2.69 ---- Add support for returning minimal constructs for ``GET /servers``, ``GET /servers/detail``, ``GET /servers/{server_id}`` and ``GET /os-services`` when there is a transient unavailability condition in the deployment like an infrastructure failure. Starting from this microversion, the responses from the down part of the infrastructure for the above four requests will have missing key values to make it more resilient. The response body will only have a minimal set of information obtained from the available information in the API database for the down cells. See `handling down cells `__ for more information. 2.70 ---- Exposes virtual device tags for volume attachments and virtual interfaces (ports). A ``tag`` parameter is added to the response body for the following APIs: **Volumes** * GET /servers/{server_id}/os-volume_attachments (list) * GET /servers/{server_id}/os-volume_attachments/{volume_id} (show) * POST /servers/{server_id}/os-volume_attachments (attach) **Ports** * GET /servers/{server_id}/os-interface (list) * GET /servers/{server_id}/os-interface/{port_id} (show) * POST /servers/{server_id}/os-interface (attach) 2.71 ---- The ``server_groups`` parameter will be in the response body of the following APIs to list the server groups to which the server belongs: * ``GET /servers/{server_id}`` * ``PUT /servers/{server_id}`` * ``POST /servers/{server_id}/action (rebuild)`` 2.72 (Maximum in Stein) ----------------------- API microversion 2.72 adds support for creating servers with neutron ports that has resource request, e.g. neutron ports with `QoS minimum bandwidth rule`_. Deleting servers with such ports have already been handled properly as well as detaching these type of ports. API limitations: * Creating servers with Neutron networks having QoS minimum bandwidth rule is not supported. * Attaching Neutron ports and networks having QoS minimum bandwidth rule is not supported. * Moving (resizing, migrating, live-migrating, evacuating, unshelving after shelve offload) servers with ports having resource request is not yet supported. .. _QoS minimum bandwidth rule: https://docs.openstack.org/neutron/latest/admin/config-qos-min-bw.html 2.73 ---- API microversion 2.73 adds support for specifying a reason when locking the server and exposes this information via ``GET /servers/detail``, ``GET /servers/{server_id}``, ``PUT servers/{server_id}`` and ``POST /servers/{server_id}/action`` where the action is rebuild. It also supports ``locked`` as a filter/sort parameter for ``GET /servers/detail`` and ``GET /servers``. 2.74 ---- API microversion 2.74 adds support for specifying optional ``host`` and/or ``hypervisor_hostname`` parameters in the request body of ``POST /servers``. These request a specific destination host/node to boot the requested server. These parameters are mutually exclusive with the special ``availability_zone`` format of ``zone:host:node``. Unlike ``zone:host:node``, the ``host`` and/or ``hypervisor_hostname`` parameters still allow scheduler filters to be run. If the requested host/node is unavailable or otherwise unsuitable, earlier failure will be raised. There will be also a new policy named ``compute:servers:create:requested_destination``. By default, it can be specified by administrators only. 2.75 ---- Multiple API cleanups are done in API microversion 2.75: * 400 error response for an unknown parameter in the querystring or request body. * Make the server representation consistent among GET, PUT and rebuild server API responses. ``PUT /servers/{server_id}`` and ``POST /servers/{server_id}/action {rebuild}`` API responses are modified to add all the missing fields which are returned by ``GET /servers/{server_id}``. * Change the default return value of the ``swap`` field from the empty string to 0 (integer) in flavor APIs. * Always return the ``servers`` field in the response of the ``GET /os-hypervisors``, ``GET /os-hypervisors/detail`` and ``GET /os-hypervisors/{hypervisor_id}`` APIs even when there are no servers on a hypervisor. 2.76 ---- Adds ``power-update`` event name to ``os-server-external-events`` API. The changes to the power state of an instance caused by this event can be viewed through ``GET /servers/{server_id}/os-instance-actions`` and ``GET /servers/{server_id}/os-instance-actions/{request_id}``. 2.77 ---- API microversion 2.77 adds support for specifying availability zone when unshelving a shelved offloaded server. 2.78 ---- Add server sub-resource ``topology`` to show server NUMA information. * ``GET /servers/{server_id}/topology`` The default behavior is configurable using two new policies: * ``compute:server:topology:index`` * ``compute:server:topology:host:index`` .. Keep a reference for python-novaclient releasenotes .. _id71: 2.79 (Maximum in Train) ----------------------- API microversion 2.79 adds support for specifying the ``delete_on_termination`` field in the request body when attaching a volume to a server, to support configuring whether to delete the data volume when the server is destroyed. Also, ``delete_on_termination`` is added to the GET responses when showing attached volumes, and the ``delete_on_termination`` field is contained in the POST API response body when attaching a volume. The affected APIs are as follows: * ``POST /servers/{server_id}/os-volume_attachments`` * ``GET /servers/{server_id}/os-volume_attachments`` * ``GET /servers/{server_id}/os-volume_attachments/{volume_id}`` 2.80 ---- Microversion 2.80 changes the list migrations APIs and the os-migrations API. Expose the ``user_id`` and ``project_id`` fields in the following APIs: * ``GET /os-migrations`` * ``GET /servers/{server_id}/migrations`` * ``GET /servers/{server_id}/migrations/{migration_id}`` The ``GET /os-migrations`` API will also have optional ``user_id`` and ``project_id`` query parameters for filtering migrations by user and/or project, for example: * ``GET /os-migrations?user_id=ef9d34b4-45d0-4530-871b-3fb535988394`` * ``GET /os-migrations?project_id=011ee9f4-8f16-4c38-8633-a254d420fd54`` * ``GET /os-migrations?user_id=ef9d34b4-45d0-4530-871b-3fb535988394&project_id=011ee9f4-8f16-4c38-8633-a254d420fd54`` 2.81 ---- Adds support for image cache management by aggregate by adding ``POST /os-aggregates/{aggregate_id}/images``. 2.82 ---- Adds ``accelerator-request-bound`` event to ``os-server-external-events`` API. This event is sent by Cyborg to indicate completion of the binding event for one accelerator request (ARQ) associated with an instance. 2.83 ---- Allow the following filter parameters for ``GET /servers/detail`` and ``GET /servers`` for non-admin : * ``availability_zone`` * ``config_drive`` * ``key_name`` * ``created_at`` * ``launched_at`` * ``terminated_at`` * ``power_state`` * ``task_state`` * ``vm_state`` * ``progress`` * ``user_id`` 2.84 ---- The ``GET /servers/{server_id}/os-instance-actions/{request_id}`` API returns a ``details`` parameter for each failed event with a fault message, similar to the server ``fault.message`` parameter in ``GET /servers/{server_id}`` for a server with status ``ERROR``. 2.85 ---- Adds the ability to specify ``delete_on_termination`` in the ``PUT /servers/{server_id}/os-volume_attachments/{volume_id}`` API, which allows changing the behavior of volume deletion on instance deletion. 2.86 ---- Add support for validation of known extra specs. This is enabled by default for the following APIs: * ``POST /flavors/{flavor_id}/os-extra_specs`` * ``PUT /flavors/{flavor_id}/os-extra_specs/{id}`` Validation is only used for recognized extra spec namespaces, currently: ``accel``, ``aggregate_instance_extra_specs``, ``capabilities``, ``hw``, ``hw_rng``, ``hw_video``, ``os``, ``pci_passthrough``, ``powervm``, ``quota``, ``resources``, ``trait``, and ``vmware``. .. _microversion 2.87: 2.87 (Maximum in Ussuri and Victoria) ------------------------------------- Adds support for rescuing boot from volume instances when the compute host reports the ``COMPUTE_RESCUE_BFV`` capability trait. .. _microversion 2.88: 2.88 (Maximum in Wallaby) ------------------------- The following fields are no longer included in responses for the ``GET /os-hypervisors/detail`` and ``GET /os-hypervisors/{hypervisor_id}`` APIs: - ``current_workload`` - ``cpu_info`` - ``vcpus`` - ``vcpus_used`` - ``free_disk_gb`` - ``local_gb`` - ``local_gb_used`` - ``disk_available_least`` - ``free_ram_mb`` - ``memory_mb`` - ``memory_mb_used`` - ``running_vms`` These fields were removed as the information they provided were frequently misleading or outright wrong, and many can be better queried from placement. In addition, the ``GET /os-hypervisors/statistics`` API, which provided a summary view with just the fields listed above, has been removed entirely and will now raise a HTTP 404 with microversion 2.88 or greater. Finally, the ``GET /os-hypervisors/{hypervisor}/uptime`` API, which provided a similar response to the ``GET /os-hypervisors/detail`` and ``GET /os-hypervisors/{hypervisor_id}`` APIs but with an additional ``uptime`` field, has been removed in favour of including this field in the primary ``GET /os-hypervisors/detail`` and ``GET /os-hypervisors/{hypervisor_id}`` APIs. .. _microversion 2.89: 2.89 ---- ``attachment_id`` and ``bdm_uuid`` are now included in the responses for ``GET /servers/{server_id}/os-volume_attachments`` and ``GET /servers/{server_id}/os-volume_attachments/{volume_id}``. Additionally the ``id`` field is dropped from the response as it duplicates the ``volumeId`` field. .. _microversion 2.90: 2.90 (Maximum in Xena and Yoga) ------------------------------- The ``POST /servers`` (create server), ``PUT /servers/{id}`` (update server) and ``POST /servers/{server_id}/action (rebuild)`` (rebuild server) APIs now accept a ``hostname`` parameter, allowing users to configure a hostname when creating the instance. When specified, this will replace the auto-generated hostname based on the display name. In addition, the ``OS-EXT-SRV-ATTR:hostname`` field for all server responses is now visible to all users. Previously this was an admin-only field. .. _microversion 2.91: 2.91 ---- Add support to unshelve instance to a specific host. Add support to pin a server to an availability zone or unpin a server from any availability zone. .. _microversion 2.92: 2.92 ---- The ``POST /os-keypairs`` API now forbids to generate a keypair and allows new safe characters, specifically '@' and '.' (dot character). .. _microversion 2.93: 2.93 (Maximum in Zed) --------------------- Add support for volume backed server rebuild. The end user will provide the image with the rebuild command and it will rebuild the volume with the new image similar to the result of rebuilding an ephemeral disk. 2.94 ---- The ``hostname`` parameter to the ``POST /servers`` (create server), ``PUT /servers/{id}`` (update server) and ``POST /servers/{server_id}/action (rebuild)`` (rebuild server) APIs is now allowed to be a Fully Qualified Domain Name (FQDN). .. _microversion 2.95: 2.95 (Maximum in 2023.1 Antelope and 2023.2 Bobcat) --------------------------------------------------- Any evacuated instances will be now stopped at destination. This requires minimum nova release 27.0.0, OpenStack release 2023.1 Antelope. Operators can still use previous microversion for older behavior. .. _microversion 2.96: 2.96 (Maximum in 2024.1 Caracal and 2024.2 Dalmatian) ----------------------------------------------------- The ``server show``, ``server list --long``, ``server update``, and ``server rebuild`` responses now include the pinned availability zone as well. .. _microversion 2.97: 2.97 ---- This microversion introduces the new Manila Share Attachment feature, streamlining the process of attaching and mounting Manila file shares to instances. It includes a new set of APIs to easily add, remove, list, and display shares. For detailed insights and usage instructions, please refer to the `manage-shares documentation`_. .. _manage-shares documentation: https://docs.openstack.org/nova/latest/admin/manage-shares.html .. _microversion 2.98: 2.98 ---- Add support for including image properties as new ``properties`` subkey under the struct at the existing ``image`` key in the response for ``GET /servers/{server_id}`` (server show), ``GET /servers/detail`` (list server --long), ``PUT /servers/{server_id}`` (server update), and in the rebuild case of ``POST /server/{server_id}/action`` (server rebuild) API response. .. _microversion 2.99: 2.99 ---- Add the ``spice-direct`` console type to the spice console protocol. Also add a ``tls_port`` field to the return value from ``GET /os-console-auth-tokens/{console_token}`` and no longer allow random query string parameters. .. _microversion 2.100: 2.100 (Maximum in 2025.1 Epoxy and 2025.2 Flamingo) ----------------------------------------------------- The ``GET /servers/{server_id}``, ``GET /servers/detail`` ``PUT /servers/{server_id}`` and ``POST /server/{server_id}/action`` (rebuild) responses now include the scheduler hints provided during server creation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/routes.py0000664000175000017500000007506200000000000021646 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import nova.api.openstack from nova.api.openstack.compute import admin_actions from nova.api.openstack.compute import admin_password from nova.api.openstack.compute import agents from nova.api.openstack.compute import aggregates from nova.api.openstack.compute import assisted_volume_snapshots from nova.api.openstack.compute import attach_interfaces from nova.api.openstack.compute import availability_zone from nova.api.openstack.compute import baremetal_nodes from nova.api.openstack.compute import cells from nova.api.openstack.compute import certificates from nova.api.openstack.compute import cloudpipe from nova.api.openstack.compute import console_auth_tokens from nova.api.openstack.compute import console_output from nova.api.openstack.compute import consoles from nova.api.openstack.compute import create_backup from nova.api.openstack.compute import deferred_delete from nova.api.openstack.compute import evacuate from nova.api.openstack.compute import extension_info from nova.api.openstack.compute import fixed_ips from nova.api.openstack.compute import flavor_access from nova.api.openstack.compute import flavors from nova.api.openstack.compute import flavors_extraspecs from nova.api.openstack.compute import floating_ip_dns from nova.api.openstack.compute import floating_ip_pools from nova.api.openstack.compute import floating_ips from nova.api.openstack.compute import floating_ips_bulk from nova.api.openstack.compute import fping from nova.api.openstack.compute import hosts from nova.api.openstack.compute import hypervisors from nova.api.openstack.compute import image_metadata from nova.api.openstack.compute import images from nova.api.openstack.compute import instance_actions from nova.api.openstack.compute import instance_usage_audit_log from nova.api.openstack.compute import ips from nova.api.openstack.compute import keypairs from nova.api.openstack.compute import limits from nova.api.openstack.compute import lock_server from nova.api.openstack.compute import migrate_server from nova.api.openstack.compute import migrations from nova.api.openstack.compute import multinic from nova.api.openstack.compute import networks from nova.api.openstack.compute import networks_associate from nova.api.openstack.compute import pause_server from nova.api.openstack.compute import quota_classes from nova.api.openstack.compute import quota_sets from nova.api.openstack.compute import remote_consoles from nova.api.openstack.compute import rescue from nova.api.openstack.compute import security_group_default_rules from nova.api.openstack.compute import security_groups from nova.api.openstack.compute import server_diagnostics from nova.api.openstack.compute import server_external_events from nova.api.openstack.compute import server_groups from nova.api.openstack.compute import server_metadata from nova.api.openstack.compute import server_migrations from nova.api.openstack.compute import server_password from nova.api.openstack.compute import server_shares from nova.api.openstack.compute import server_tags from nova.api.openstack.compute import server_topology from nova.api.openstack.compute import servers from nova.api.openstack.compute import services from nova.api.openstack.compute import shelve from nova.api.openstack.compute import simple_tenant_usage from nova.api.openstack.compute import snapshots from nova.api.openstack.compute import suspend_server from nova.api.openstack.compute import tenant_networks from nova.api.openstack.compute import versionsV21 from nova.api.openstack.compute import virtual_interfaces from nova.api.openstack.compute import volume_attachments from nova.api.openstack.compute import volumes from nova.api.openstack import wsgi from nova.api import wsgi as base_wsgi def _create_controller(main_controller, action_controller_list): """This is a helper method to create controller with a list of action controller. """ controller = wsgi.Resource(main_controller()) for ctl in action_controller_list: controller.register_subcontroller_actions(ctl()) return controller agents_controller = functools.partial( _create_controller, agents.AgentController, []) aggregates_controller = functools.partial( _create_controller, aggregates.AggregateController, []) assisted_volume_snapshots_controller = functools.partial( _create_controller, assisted_volume_snapshots.AssistedVolumeSnapshotsController, []) availability_zone_controller = functools.partial( _create_controller, availability_zone.AvailabilityZoneController, []) baremetal_nodes_controller = functools.partial( _create_controller, baremetal_nodes.BareMetalNodeController, []) cells_controller = functools.partial( _create_controller, cells.CellsController, []) certificates_controller = functools.partial( _create_controller, certificates.CertificatesController, []) cloudpipe_controller = functools.partial( _create_controller, cloudpipe.CloudpipeController, []) extensions_controller = functools.partial( _create_controller, extension_info.ExtensionInfoController, []) fixed_ips_controller = functools.partial(_create_controller, fixed_ips.FixedIPController, []) flavor_controller = functools.partial(_create_controller, flavors.FlavorsController, [ flavor_access.FlavorActionController ] ) flavor_access_controller = functools.partial(_create_controller, flavor_access.FlavorAccessController, []) flavor_extraspec_controller = functools.partial(_create_controller, flavors_extraspecs.FlavorExtraSpecsController, []) floating_ip_dns_controller = functools.partial(_create_controller, floating_ip_dns.FloatingIPDNSDomainController, []) floating_ip_dnsentry_controller = functools.partial(_create_controller, floating_ip_dns.FloatingIPDNSEntryController, []) floating_ip_pools_controller = functools.partial(_create_controller, floating_ip_pools.FloatingIPPoolsController, []) floating_ips_controller = functools.partial(_create_controller, floating_ips.FloatingIPController, []) floating_ips_bulk_controller = functools.partial(_create_controller, floating_ips_bulk.FloatingIPBulkController, []) fping_controller = functools.partial(_create_controller, fping.FpingController, []) hosts_controller = functools.partial( _create_controller, hosts.HostController, []) hypervisors_controller = functools.partial( _create_controller, hypervisors.HypervisorsController, []) images_controller = functools.partial( _create_controller, images.ImagesController, []) image_metadata_controller = functools.partial( _create_controller, image_metadata.ImageMetadataController, []) instance_actions_controller = functools.partial(_create_controller, instance_actions.InstanceActionsController, []) instance_usage_audit_log_controller = functools.partial(_create_controller, instance_usage_audit_log.InstanceUsageAuditLogController, []) ips_controller = functools.partial(_create_controller, ips.IPsController, []) keypairs_controller = functools.partial( _create_controller, keypairs.KeypairController, []) limits_controller = functools.partial( _create_controller, limits.LimitsController, []) migrations_controller = functools.partial(_create_controller, migrations.MigrationsController, []) networks_controller = functools.partial(_create_controller, networks.NetworkController, [networks_associate.NetworkAssociateActionController]) quota_classes_controller = functools.partial(_create_controller, quota_classes.QuotaClassSetsController, []) quota_set_controller = functools.partial(_create_controller, quota_sets.QuotaSetsController, []) security_group_controller = functools.partial(_create_controller, security_groups.SecurityGroupController, []) security_group_default_rules_controller = functools.partial(_create_controller, security_group_default_rules.SecurityGroupDefaultRulesController, []) security_group_rules_controller = functools.partial(_create_controller, security_groups.SecurityGroupRulesController, []) server_controller = functools.partial(_create_controller, servers.ServersController, [ admin_actions.AdminActionsController, admin_password.AdminPasswordController, console_output.ConsoleOutputController, create_backup.CreateBackupController, deferred_delete.DeferredDeleteController, evacuate.EvacuateController, floating_ips.FloatingIPActionController, lock_server.LockServerController, migrate_server.MigrateServerController, multinic.MultinicController, pause_server.PauseServerController, remote_consoles.RemoteConsolesController, rescue.RescueController, security_groups.SecurityGroupActionController, shelve.ShelveController, suspend_server.SuspendServerController ] ) console_auth_tokens_controller = functools.partial(_create_controller, console_auth_tokens.ConsoleAuthTokensController, []) consoles_controller = functools.partial(_create_controller, consoles.ConsolesController, []) server_diagnostics_controller = functools.partial(_create_controller, server_diagnostics.ServerDiagnosticsController, []) server_external_events_controller = functools.partial(_create_controller, server_external_events.ServerExternalEventsController, []) server_groups_controller = functools.partial(_create_controller, server_groups.ServerGroupController, []) server_metadata_controller = functools.partial(_create_controller, server_metadata.ServerMetadataController, []) server_migrations_controller = functools.partial(_create_controller, server_migrations.ServerMigrationsController, []) server_os_interface_controller = functools.partial(_create_controller, attach_interfaces.InterfaceAttachmentController, []) server_password_controller = functools.partial(_create_controller, server_password.ServerPasswordController, []) server_remote_consoles_controller = functools.partial(_create_controller, remote_consoles.RemoteConsolesController, []) server_security_groups_controller = functools.partial(_create_controller, security_groups.ServerSecurityGroupController, []) server_shares_controller = functools.partial(_create_controller, server_shares.ServerSharesController, []) server_tags_controller = functools.partial(_create_controller, server_tags.ServerTagsController, []) server_topology_controller = functools.partial(_create_controller, server_topology.ServerTopologyController, []) server_volume_attachments_controller = functools.partial(_create_controller, volume_attachments.VolumeAttachmentController, []) services_controller = functools.partial(_create_controller, services.ServiceController, []) simple_tenant_usage_controller = functools.partial(_create_controller, simple_tenant_usage.SimpleTenantUsageController, []) snapshots_controller = functools.partial(_create_controller, snapshots.SnapshotController, []) tenant_networks_controller = functools.partial(_create_controller, tenant_networks.TenantNetworkController, []) version_controller = functools.partial(_create_controller, versionsV21.VersionsController, []) virtual_interfaces_controller = functools.partial(_create_controller, virtual_interfaces.ServerVirtualInterfaceController, []) volumes_controller = functools.partial(_create_controller, volumes.VolumeController, []) # NOTE(alex_xu): This is structure of this route list as below: # ( # ('Route path', { # 'HTTP method: [ # 'Controller', # 'The method of controller is used to handle this route' # ], # ... # }), # ... # ) # # Also note that this is ordered tuple. For example, the '/servers/detail' # should be in the front of '/servers/{id}', otherwise the request to # '/servers/detail' always matches to '/servers/{id}' as the id is 'detail'. ROUTE_LIST = ( # NOTE: This is a redirection from '' to '/'. The request to the '/v2.1' # or '/2.0' without the ending '/' will get a response with status code # '302' returned. ('', '/'), ('/', { 'GET': [version_controller, 'show'] }), ('/versions/{id}', { 'GET': [version_controller, 'show'] }), ('/extensions', { 'GET': [extensions_controller, 'index'], }), ('/extensions/{id}', { 'GET': [extensions_controller, 'show'], }), ('/flavors', { 'GET': [flavor_controller, 'index'], 'POST': [flavor_controller, 'create'] }), ('/flavors/detail', { 'GET': [flavor_controller, 'detail'] }), ('/flavors/{id}', { 'GET': [flavor_controller, 'show'], 'PUT': [flavor_controller, 'update'], 'DELETE': [flavor_controller, 'delete'] }), ('/flavors/{id}/action', { 'POST': [flavor_controller, 'action'] }), ('/flavors/{flavor_id}/os-extra_specs', { 'GET': [flavor_extraspec_controller, 'index'], 'POST': [flavor_extraspec_controller, 'create'] }), ('/flavors/{flavor_id}/os-extra_specs/{id}', { 'GET': [flavor_extraspec_controller, 'show'], 'PUT': [flavor_extraspec_controller, 'update'], 'DELETE': [flavor_extraspec_controller, 'delete'] }), ('/flavors/{flavor_id}/os-flavor-access', { 'GET': [flavor_access_controller, 'index'] }), ('/images', { 'GET': [images_controller, 'index'] }), ('/images/detail', { 'GET': [images_controller, 'detail'], }), ('/images/{id}', { 'GET': [images_controller, 'show'], 'DELETE': [images_controller, 'delete'] }), ('/images/{image_id}/metadata', { 'GET': [image_metadata_controller, 'index'], 'POST': [image_metadata_controller, 'create'], 'PUT': [image_metadata_controller, 'update_all'] }), ('/images/{image_id}/metadata/{id}', { 'GET': [image_metadata_controller, 'show'], 'PUT': [image_metadata_controller, 'update'], 'DELETE': [image_metadata_controller, 'delete'] }), ('/limits', { 'GET': [limits_controller, 'index'] }), ('/os-agents', { 'GET': [agents_controller, 'index'], 'POST': [agents_controller, 'create'] }), ('/os-agents/{id}', { 'PUT': [agents_controller, 'update'], 'DELETE': [agents_controller, 'delete'] }), ('/os-aggregates', { 'GET': [aggregates_controller, 'index'], 'POST': [aggregates_controller, 'create'] }), ('/os-aggregates/{id}', { 'GET': [aggregates_controller, 'show'], 'PUT': [aggregates_controller, 'update'], 'DELETE': [aggregates_controller, 'delete'] }), ('/os-aggregates/{id}/action', { 'POST': [aggregates_controller, 'action'], }), ('/os-aggregates/{id}/images', { 'POST': [aggregates_controller, 'images'], }), ('/os-assisted-volume-snapshots', { 'POST': [assisted_volume_snapshots_controller, 'create'] }), ('/os-assisted-volume-snapshots/{id}', { 'DELETE': [assisted_volume_snapshots_controller, 'delete'] }), ('/os-availability-zone', { 'GET': [availability_zone_controller, 'index'] }), ('/os-availability-zone/detail', { 'GET': [availability_zone_controller, 'detail'], }), ('/os-baremetal-nodes', { 'GET': [baremetal_nodes_controller, 'index'], 'POST': [baremetal_nodes_controller, 'create'] }), ('/os-baremetal-nodes/{id}', { 'GET': [baremetal_nodes_controller, 'show'], 'DELETE': [baremetal_nodes_controller, 'delete'] }), ('/os-baremetal-nodes/{id}/action', { 'POST': [baremetal_nodes_controller, 'action'] }), ('/os-cells', { 'POST': [cells_controller, 'create'], 'GET': [cells_controller, 'index'], }), ('/os-cells/capacities', { 'GET': [cells_controller, 'capacities'] }), ('/os-cells/detail', { 'GET': [cells_controller, 'detail'] }), ('/os-cells/info', { 'GET': [cells_controller, 'info'] }), ('/os-cells/sync_instances', { 'POST': [cells_controller, 'sync_instances'] }), ('/os-cells/{id}', { 'GET': [cells_controller, 'show'], 'PUT': [cells_controller, 'update'], 'DELETE': [cells_controller, 'delete'] }), ('/os-cells/{id}/capacities', { 'GET': [cells_controller, 'capacities'] }), ('/os-certificates', { 'POST': [certificates_controller, 'create'] }), ('/os-certificates/{id}', { 'GET': [certificates_controller, 'show'] }), ('/os-cloudpipe', { 'GET': [cloudpipe_controller, 'index'], 'POST': [cloudpipe_controller, 'create'] }), ('/os-cloudpipe/{id}', { 'PUT': [cloudpipe_controller, 'update'] }), ('/os-console-auth-tokens/{id}', { 'GET': [console_auth_tokens_controller, 'show'] }), ('/os-fixed-ips/{id}', { 'GET': [fixed_ips_controller, 'show'] }), ('/os-fixed-ips/{id}/action', { 'POST': [fixed_ips_controller, 'action'], }), ('/os-floating-ip-dns', { 'GET': [floating_ip_dns_controller, 'index'] }), ('/os-floating-ip-dns/{id}', { 'PUT': [floating_ip_dns_controller, 'update'], 'DELETE': [floating_ip_dns_controller, 'delete'] }), ('/os-floating-ip-dns/{domain_id}/entries/{id}', { 'GET': [floating_ip_dnsentry_controller, 'show'], 'PUT': [floating_ip_dnsentry_controller, 'update'], 'DELETE': [floating_ip_dnsentry_controller, 'delete'] }), ('/os-floating-ip-pools', { 'GET': [floating_ip_pools_controller, 'index'], }), ('/os-floating-ips', { 'GET': [floating_ips_controller, 'index'], 'POST': [floating_ips_controller, 'create'] }), ('/os-floating-ips/{id}', { 'GET': [floating_ips_controller, 'show'], 'DELETE': [floating_ips_controller, 'delete'] }), ('/os-floating-ips-bulk', { 'GET': [floating_ips_bulk_controller, 'index'], 'POST': [floating_ips_bulk_controller, 'create'] }), ('/os-floating-ips-bulk/{id}', { 'GET': [floating_ips_bulk_controller, 'show'], 'PUT': [floating_ips_bulk_controller, 'update'] }), ('/os-fping', { 'GET': [fping_controller, 'index'] }), ('/os-fping/{id}', { 'GET': [fping_controller, 'show'] }), ('/os-hosts', { 'GET': [hosts_controller, 'index'] }), ('/os-hosts/{id}', { 'GET': [hosts_controller, 'show'], 'PUT': [hosts_controller, 'update'] }), ('/os-hosts/{id}/reboot', { 'GET': [hosts_controller, 'reboot'] }), ('/os-hosts/{id}/shutdown', { 'GET': [hosts_controller, 'shutdown'] }), ('/os-hosts/{id}/startup', { 'GET': [hosts_controller, 'startup'] }), ('/os-hypervisors', { 'GET': [hypervisors_controller, 'index'] }), ('/os-hypervisors/detail', { 'GET': [hypervisors_controller, 'detail'] }), ('/os-hypervisors/statistics', { 'GET': [hypervisors_controller, 'statistics'] }), ('/os-hypervisors/{id}', { 'GET': [hypervisors_controller, 'show'] }), ('/os-hypervisors/{id}/search', { 'GET': [hypervisors_controller, 'search'] }), ('/os-hypervisors/{id}/servers', { 'GET': [hypervisors_controller, 'servers'] }), ('/os-hypervisors/{id}/uptime', { 'GET': [hypervisors_controller, 'uptime'] }), ('/os-instance_usage_audit_log', { 'GET': [instance_usage_audit_log_controller, 'index'] }), ('/os-instance_usage_audit_log/{id}', { 'GET': [instance_usage_audit_log_controller, 'show'] }), ('/os-keypairs', { 'GET': [keypairs_controller, 'index'], 'POST': [keypairs_controller, 'create'] }), ('/os-keypairs/{id}', { 'GET': [keypairs_controller, 'show'], 'DELETE': [keypairs_controller, 'delete'] }), ('/os-migrations', { 'GET': [migrations_controller, 'index'] }), ('/os-networks', { 'GET': [networks_controller, 'index'], 'POST': [networks_controller, 'create'] }), ('/os-networks/add', { 'POST': [networks_controller, 'add'] }), ('/os-networks/{id}', { 'GET': [networks_controller, 'show'], 'DELETE': [networks_controller, 'delete'] }), ('/os-networks/{id}/action', { 'POST': [networks_controller, 'action'], }), ('/os-quota-class-sets/{id}', { 'GET': [quota_classes_controller, 'show'], 'PUT': [quota_classes_controller, 'update'] }), ('/os-quota-sets/{id}', { 'GET': [quota_set_controller, 'show'], 'PUT': [quota_set_controller, 'update'], 'DELETE': [quota_set_controller, 'delete'] }), ('/os-quota-sets/{id}/detail', { 'GET': [quota_set_controller, 'detail'] }), ('/os-quota-sets/{id}/defaults', { 'GET': [quota_set_controller, 'defaults'] }), ('/os-security-group-default-rules', { 'GET': [security_group_default_rules_controller, 'index'], 'POST': [security_group_default_rules_controller, 'create'] }), ('/os-security-group-default-rules/{id}', { 'GET': [security_group_default_rules_controller, 'show'], 'DELETE': [security_group_default_rules_controller, 'delete'] }), ('/os-security-group-rules', { 'POST': [security_group_rules_controller, 'create'] }), ('/os-security-group-rules/{id}', { 'DELETE': [security_group_rules_controller, 'delete'] }), ('/os-security-groups', { 'GET': [security_group_controller, 'index'], 'POST': [security_group_controller, 'create'] }), ('/os-security-groups/{id}', { 'GET': [security_group_controller, 'show'], 'PUT': [security_group_controller, 'update'], 'DELETE': [security_group_controller, 'delete'] }), ('/os-server-external-events', { 'POST': [server_external_events_controller, 'create'] }), ('/os-server-groups', { 'GET': [server_groups_controller, 'index'], 'POST': [server_groups_controller, 'create'] }), ('/os-server-groups/{id}', { 'GET': [server_groups_controller, 'show'], 'DELETE': [server_groups_controller, 'delete'] }), ('/os-services', { 'GET': [services_controller, 'index'] }), ('/os-services/{id}', { 'PUT': [services_controller, 'update'], 'DELETE': [services_controller, 'delete'] }), ('/os-simple-tenant-usage', { 'GET': [simple_tenant_usage_controller, 'index'] }), ('/os-simple-tenant-usage/{id}', { 'GET': [simple_tenant_usage_controller, 'show'] }), ('/os-snapshots', { 'GET': [snapshots_controller, 'index'], 'POST': [snapshots_controller, 'create'] }), ('/os-snapshots/detail', { 'GET': [snapshots_controller, 'detail'] }), ('/os-snapshots/{id}', { 'GET': [snapshots_controller, 'show'], 'DELETE': [snapshots_controller, 'delete'] }), ('/os-tenant-networks', { 'GET': [tenant_networks_controller, 'index'], 'POST': [tenant_networks_controller, 'create'] }), ('/os-tenant-networks/{id}', { 'GET': [tenant_networks_controller, 'show'], 'DELETE': [tenant_networks_controller, 'delete'] }), ('/os-volumes', { 'GET': [volumes_controller, 'index'], 'POST': [volumes_controller, 'create'], }), ('/os-volumes/detail', { 'GET': [volumes_controller, 'detail'], }), ('/os-volumes/{id}', { 'GET': [volumes_controller, 'show'], 'DELETE': [volumes_controller, 'delete'] }), # NOTE: '/os-volumes_boot' is a clone of '/servers'. We may want to # deprecate it in the future. ('/os-volumes_boot', { 'GET': [server_controller, 'index'], 'POST': [server_controller, 'create'] }), ('/os-volumes_boot/detail', { 'GET': [server_controller, 'detail'] }), ('/os-volumes_boot/{id}', { 'GET': [server_controller, 'show'], 'PUT': [server_controller, 'update'], 'DELETE': [server_controller, 'delete'] }), ('/os-volumes_boot/{id}/action', { 'POST': [server_controller, 'action'] }), ('/servers', { 'GET': [server_controller, 'index'], 'POST': [server_controller, 'create'] }), ('/servers/detail', { 'GET': [server_controller, 'detail'] }), ('/servers/{id}', { 'GET': [server_controller, 'show'], 'PUT': [server_controller, 'update'], 'DELETE': [server_controller, 'delete'] }), ('/servers/{id}/action', { 'POST': [server_controller, 'action'] }), ('/servers/{server_id}/consoles', { 'GET': [consoles_controller, 'index'], 'POST': [consoles_controller, 'create'] }), ('/servers/{server_id}/consoles/{id}', { 'GET': [consoles_controller, 'show'], 'DELETE': [consoles_controller, 'delete'] }), ('/servers/{server_id}/diagnostics', { 'GET': [server_diagnostics_controller, 'index'] }), ('/servers/{server_id}/ips', { 'GET': [ips_controller, 'index'] }), ('/servers/{server_id}/ips/{id}', { 'GET': [ips_controller, 'show'] }), ('/servers/{server_id}/metadata', { 'GET': [server_metadata_controller, 'index'], 'POST': [server_metadata_controller, 'create'], 'PUT': [server_metadata_controller, 'update_all'], }), ('/servers/{server_id}/metadata/{id}', { 'GET': [server_metadata_controller, 'show'], 'PUT': [server_metadata_controller, 'update'], 'DELETE': [server_metadata_controller, 'delete'], }), ('/servers/{server_id}/migrations', { 'GET': [server_migrations_controller, 'index'] }), ('/servers/{server_id}/migrations/{id}', { 'GET': [server_migrations_controller, 'show'], 'DELETE': [server_migrations_controller, 'delete'] }), ('/servers/{server_id}/migrations/{id}/action', { 'POST': [server_migrations_controller, 'action'] }), ('/servers/{server_id}/os-instance-actions', { 'GET': [instance_actions_controller, 'index'] }), ('/servers/{server_id}/os-instance-actions/{id}', { 'GET': [instance_actions_controller, 'show'] }), ('/servers/{server_id}/os-interface', { 'GET': [server_os_interface_controller, 'index'], 'POST': [server_os_interface_controller, 'create'] }), ('/servers/{server_id}/os-interface/{id}', { 'GET': [server_os_interface_controller, 'show'], 'DELETE': [server_os_interface_controller, 'delete'] }), ('/servers/{server_id}/os-server-password', { 'GET': [server_password_controller, 'index'], 'DELETE': [server_password_controller, 'clear'] }), ('/servers/{server_id}/os-virtual-interfaces', { 'GET': [virtual_interfaces_controller, 'index'] }), ('/servers/{server_id}/os-volume_attachments', { 'GET': [server_volume_attachments_controller, 'index'], 'POST': [server_volume_attachments_controller, 'create'], }), ('/servers/{server_id}/os-volume_attachments/{id}', { 'GET': [server_volume_attachments_controller, 'show'], 'PUT': [server_volume_attachments_controller, 'update'], 'DELETE': [server_volume_attachments_controller, 'delete'] }), ('/servers/{server_id}/remote-consoles', { 'POST': [server_remote_consoles_controller, 'create'] }), ('/servers/{server_id}/os-security-groups', { 'GET': [server_security_groups_controller, 'index'] }), ('/servers/{server_id}/shares', { 'GET': [server_shares_controller, 'index'], 'POST': [server_shares_controller, 'create'], }), ('/servers/{server_id}/shares/{id}', { 'GET': [server_shares_controller, 'show'], 'DELETE': [server_shares_controller, 'delete'], }), ('/servers/{server_id}/tags', { 'GET': [server_tags_controller, 'index'], 'PUT': [server_tags_controller, 'update_all'], 'DELETE': [server_tags_controller, 'delete_all'], }), ('/servers/{server_id}/tags/{id}', { 'GET': [server_tags_controller, 'show'], 'PUT': [server_tags_controller, 'update'], 'DELETE': [server_tags_controller, 'delete'] }), ('/servers/{server_id}/topology', { 'GET': [server_topology_controller, 'index'] }), ) class APIRouterV21(base_wsgi.Router): """Routes requests on the OpenStack API to the appropriate controller and method. The URL mapping based on the plain list `ROUTE_LIST` is built at here. """ def __init__(self, custom_routes=None): """:param custom_routes: the additional routes can be added by this parameter. This parameter is used to test on some fake routes primarily. """ super(APIRouterV21, self).__init__(nova.api.openstack.ProjectMapper()) if custom_routes is None: custom_routes = tuple() for path, methods in ROUTE_LIST + custom_routes: # NOTE(alex_xu): The variable 'methods' is a dict in normal, since # the dict includes all the methods supported in the path. But # if the variable 'method' is a string, it means a redirection. # For example, the request to the '' will be redirect to the '/' in # the Nova API. To indicate that, using the target path instead of # a dict. The route entry just writes as "('', '/)". if isinstance(methods, str): self.map.redirect(path, methods) continue for method, controller_info in methods.items(): # TODO(alex_xu): In the end, I want to create single controller # instance instead of create controller instance for each # route. controller = controller_info[0]() action = controller_info[1] self.map.create_route(path, method, controller, action) @classmethod def factory(cls, global_config, **local_config): """Simple paste factory. :class:`nova.api.wsgi.Router` doesn't have one. """ return cls() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.3256083 nova-32.0.0/nova/api/openstack/compute/schemas/0000775000175000017500000000000000000000000021364 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/__init__.py0000664000175000017500000000000000000000000023463 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/admin_actions.py0000664000175000017500000000322100000000000024544 0ustar00zuulzuul00000000000000# Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(stephenfin): This schema is intentionally empty since the action has # been removed reset_network = {} # TODO(stephenfin): Restrict the value to 'null' in a future API version inject_network_info = { 'type': 'object', 'properties': { 'injectNetworkInfo': {}, }, 'required': ['injectNetworkInfo'], 'additionalProperties': False, } reset_state = { 'type': 'object', 'properties': { 'os-resetState': { 'type': 'object', 'properties': { 'state': { 'type': 'string', 'enum': ['active', 'error'], }, }, 'required': ['state'], 'additionalProperties': False, }, }, 'required': ['os-resetState'], 'additionalProperties': False, } # NOTE(stephenfin): This schema is intentionally empty since the action has # been removed reset_network_response = {} inject_network_info_response = { 'type': 'null', } reset_state_response = { 'type': 'null', } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/admin_password.py0000664000175000017500000000215200000000000024750 0ustar00zuulzuul00000000000000# Copyright 2013 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.api.validation import parameter_types change_password = { 'type': 'object', 'properties': { 'changePassword': { 'type': 'object', 'properties': { 'adminPass': parameter_types.admin_password, }, 'required': ['adminPass'], 'additionalProperties': False, }, }, 'required': ['changePassword'], 'additionalProperties': False, } change_password_response = { 'type': 'null', } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/agents.py0000664000175000017500000000137700000000000023227 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(stephenfin): These schemas are intentionally empty since these APIs have # been removed create = {} update = {} index_query = {} create_response = {} delete_response = {} index_response = {} update_response = {} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/aggregate_images.py0000664000175000017500000000220400000000000025207 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.api.validation import parameter_types aggregate_images = { 'type': 'object', 'properties': { 'cache': { 'type': ['array'], 'minItems': 1, 'items': { 'type': 'object', 'properties': { 'id': parameter_types.image_id, }, 'additionalProperties': False, 'required': ['id'], }, }, }, 'required': ['cache'], 'additionalProperties': False, } aggregate_images_response = { 'type': 'null', } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/aggregates.py0000664000175000017500000001626500000000000024061 0ustar00zuulzuul00000000000000# Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from nova.api.validation import parameter_types _availability_zone = {'oneOf': [parameter_types.az_name, {'type': 'null'}]} _availability_zone_with_leading_trailing_spaces = { 'oneOf': [ parameter_types.az_name_with_leading_trailing_spaces, {'type': 'null'}, ], } create = { 'type': 'object', 'properties': { 'aggregate': { 'type': 'object', 'properties': { 'name': parameter_types.name, 'availability_zone': _availability_zone, }, 'required': ['name'], 'additionalProperties': False, }, }, 'required': ['aggregate'], 'additionalProperties': False, } create_v20 = copy.deepcopy(create) create_v20['properties']['aggregate']['properties']['name'] = ( parameter_types.name_with_leading_trailing_spaces) create_v20['properties']['aggregate']['properties']['availability_zone'] = ( _availability_zone_with_leading_trailing_spaces) update = { 'type': 'object', 'properties': { 'aggregate': { 'type': 'object', 'properties': { 'name': parameter_types.name_with_leading_trailing_spaces, 'availability_zone': _availability_zone }, 'additionalProperties': False, 'anyOf': [ {'required': ['name']}, {'required': ['availability_zone']} ] }, }, 'required': ['aggregate'], 'additionalProperties': False, } update_v20 = copy.deepcopy(update) update_v20['properties']['aggregate']['properties']['name'] = ( parameter_types.name_with_leading_trailing_spaces) update_v20['properties']['aggregate']['properties']['availability_zone'] = ( _availability_zone_with_leading_trailing_spaces) add_host = { 'type': 'object', 'properties': { 'add_host': { 'type': 'object', 'properties': { 'host': parameter_types.fqdn, }, 'required': ['host'], 'additionalProperties': False, }, }, 'required': ['add_host'], 'additionalProperties': False, } remove_host = { 'type': 'object', 'properties': { 'remove_host': { 'type': 'object', 'properties': { 'host': parameter_types.fqdn, }, 'required': ['host'], 'additionalProperties': False, }, }, 'required': ['remove_host'], 'additionalProperties': False, } set_metadata = { 'type': 'object', 'properties': { 'set_metadata': { 'type': 'object', 'properties': { 'metadata': parameter_types.metadata_with_null }, 'required': ['metadata'], 'additionalProperties': False, }, }, 'required': ['set_metadata'], 'additionalProperties': False, } # TODO(stephenfin): Remove additionalProperties in a future API version index_query = { 'type': 'object', 'properties': {}, 'additionalProperties': True, } # TODO(stephenfin): Remove additionalProperties in a future API version show_query = { 'type': 'object', 'properties': {}, 'additionalProperties': True, } _aggregate_response = { 'type': 'object', 'properties': { 'availability_zone': {'type': ['null', 'string']}, 'created_at': {'type': 'string', 'format': 'date-time'}, 'deleted': {'type': 'boolean'}, 'deleted_at': {'type': ['string', 'null'], 'format': 'date-time'}, 'hosts': { 'type': ['array', 'null'], 'items': { 'type': 'string', }, }, 'id': {'type': 'integer'}, # TODO(stephenfin): This could be stricter 'metadata': { 'type': ['null', 'object'], 'properties': {}, 'additionalProperties': True, }, 'name': {'type': 'string'}, 'updated_at': {'type': ['string', 'null'], 'format': 'date-time'}, }, 'required': [ 'availability_zone', 'created_at', 'deleted', 'deleted_at', 'hosts', 'id', 'metadata', 'name', 'updated_at', ], 'additionalProperties': False, } _aggregate_response_v241 = copy.deepcopy(_aggregate_response) _aggregate_response_v241['properties'].update( {'uuid': {'type': 'string', 'format': 'uuid'}}, ) _aggregate_response_v241['required'].append('uuid') index_response = { 'type': 'object', 'properties': { 'aggregates': { 'type': 'array', 'items': copy.deepcopy(_aggregate_response), }, }, 'required': ['aggregates'], 'additionalProperties': False, } index_response_v241 = copy.deepcopy(index_response) index_response_v241['properties']['aggregates']['items'] = copy.deepcopy( _aggregate_response_v241 ) create_response = { 'type': 'object', 'properties': { 'aggregate': copy.deepcopy(_aggregate_response), }, 'required': ['aggregate'], 'additionalProperties': False, } del create_response['properties']['aggregate']['properties']['hosts'] del create_response['properties']['aggregate']['properties']['metadata'] create_response['properties']['aggregate']['required'].remove('hosts') create_response['properties']['aggregate']['required'].remove('metadata') create_response_v241 = copy.deepcopy(create_response) create_response_v241['properties']['aggregate'] = copy.deepcopy( _aggregate_response_v241 ) del create_response_v241['properties']['aggregate']['properties']['hosts'] del create_response_v241['properties']['aggregate']['properties']['metadata'] create_response_v241['properties']['aggregate']['required'].remove('hosts') create_response_v241['properties']['aggregate']['required'].remove('metadata') show_response = { 'type': 'object', 'properties': { 'aggregate': copy.deepcopy(_aggregate_response), }, 'required': ['aggregate'], 'additionalProperties': False, } show_response_v241 = copy.deepcopy(show_response) show_response_v241['properties']['aggregate'] = copy.deepcopy( _aggregate_response_v241 ) update_response = copy.deepcopy(show_response) update_response_v241 = copy.deepcopy(show_response_v241) delete_response = {'type': 'null'} add_host_response = copy.deepcopy(show_response) add_host_response_v241 = copy.deepcopy(show_response_v241) remove_host_response = copy.deepcopy(show_response) remove_host_response_v241 = copy.deepcopy(show_response_v241) set_metadata_response = copy.deepcopy(show_response) set_metadata_response_v241 = copy.deepcopy(show_response_v241) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/assisted_volume_snapshots.py0000664000175000017500000000555300000000000027256 0ustar00zuulzuul00000000000000# Copyright 2014 IBM Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from nova.api.validation import parameter_types create = { 'type': 'object', 'properties': { 'snapshot': { 'type': 'object', 'properties': { 'volume_id': { 'type': 'string', 'minLength': 1, }, 'create_info': { 'type': 'object', 'properties': { 'snapshot_id': { 'type': 'string', 'minLength': 1, }, 'type': { 'type': 'string', 'enum': ['qcow2'], }, 'new_file': { 'type': 'string', 'minLength': 1, }, 'id': { 'type': 'string', 'minLength': 1, }, }, 'required': ['snapshot_id', 'type', 'new_file'], 'additionalProperties': False, }, }, 'required': ['volume_id', 'create_info'], 'additionalProperties': False, } }, 'required': ['snapshot'], 'additionalProperties': False, } delete_query = { 'type': 'object', 'properties': { 'delete_info': parameter_types.multi_params({'type': 'string'}) }, # NOTE(gmann): This is kept True to keep backward compatibility. # As of now Schema validation stripped out the additional parameters and # does not raise 400. In microversion 2.75, we have blocked the additional # parameters. 'additionalProperties': True } delete_query_275 = copy.deepcopy(delete_query) delete_query_275['additionalProperties'] = False create_response = { 'type': 'object', 'properties': { 'snapshot': { 'type': 'object', 'properties': { 'id': {'type': ['null', 'string'], 'format': 'uuid'}, 'volumeId': {'type': 'string', 'format': 'uuid'}, }, 'required': ['id', 'volumeId'], 'additionalProperties': False, }, }, 'required': ['snapshot'], 'additionalProperties': False, } delete_response = { 'type': 'null', } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/attach_interfaces.py0000664000175000017500000001070500000000000025410 0ustar00zuulzuul00000000000000# Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from nova.api.validation import parameter_types create = { 'type': 'object', 'properties': { 'interfaceAttachment': { 'type': 'object', 'properties': { # NOTE: This parameter is passed to the search_opts of # Neutron list_network API: search_opts = {'id': net_id} 'net_id': parameter_types.network_id, # NOTE: This parameter is passed to Neutron show_port API # as a port id. 'port_id': parameter_types.network_port_id, 'fixed_ips': { 'type': 'array', 'minItems': 1, 'maxItems': 1, 'items': { 'type': 'object', 'properties': { 'ip_address': parameter_types.ip_address }, 'required': ['ip_address'], 'additionalProperties': False, }, }, }, 'additionalProperties': False, }, }, 'additionalProperties': False, } create_v249 = copy.deepcopy(create) create_v249['properties']['interfaceAttachment']['properties']['tag'] = parameter_types.tag # noqa: E501 # TODO(stephenfin): Remove additionalProperties in a future API version index_query = { 'type': 'object', 'properties': {}, 'additionalProperties': True, } # TODO(stephenfin): Remove additionalProperties in a future API version show_query = { 'type': 'object', 'properties': {}, 'additionalProperties': True, } _interface_attachment = { 'type': 'object', 'properties': { 'fixed_ips': { 'type': ['null', 'array'], 'items': { 'type': 'object', 'properties': { 'ip_address': { 'type': 'string', 'anyOf': [ {'format': 'ipv4'}, {'format': 'ipv6'}, ], }, 'subnet_id': {'type': 'string', 'format': 'uuid'}, }, 'required': ['ip_address', 'subnet_id'], 'additionalProperties': False, }, }, 'mac_addr': {'type': 'string', 'format': 'mac-address'}, 'net_id': {'type': 'string', 'format': 'uuid'}, 'port_id': {'type': 'string', 'format': 'uuid'}, 'port_state': {'type': 'string'}, }, 'required': ['fixed_ips', 'mac_addr', 'net_id', 'port_id', 'port_state'], 'additionalProperties': False, } _interface_attachment_v270 = copy.deepcopy(_interface_attachment) _interface_attachment_v270['properties']['tag'] = { 'type': ['null', 'string'], } _interface_attachment_v270['required'].append('tag') index_response = { 'type': 'object', 'properties': { 'interfaceAttachments': { 'type': 'array', 'items': copy.deepcopy(_interface_attachment), }, }, 'required': ['interfaceAttachments'], 'additionalProperties': False, } index_response_v270 = copy.deepcopy(index_response) index_response_v270['properties']['interfaceAttachments']['items'] = copy.deepcopy( # noqa: E501 _interface_attachment_v270 ) show_response = { 'type': 'object', 'properties': { 'interfaceAttachment': copy.deepcopy(_interface_attachment), }, 'required': ['interfaceAttachment'], 'additionalProperties': False, } show_response_v270 = copy.deepcopy(show_response) show_response_v270['properties']['interfaceAttachment'] = copy.deepcopy( _interface_attachment_v270 ) # create responses are identical to show, including microversions create_response = copy.deepcopy(show_response) create_response_v270 = copy.deepcopy(show_response_v270) delete_response = { 'type': 'null', } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/availability_zone.py0000664000175000017500000000477400000000000025457 0ustar00zuulzuul00000000000000# Copyright 2014 IBM Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy # TODO(stephenfin): Remove additionalProperties in a future API version index_query = { 'type': 'object', 'properties': {}, 'additionalProperties': True, } detail_query = index_query index_response = { 'type': 'object', 'properties': { 'availabilityZoneInfo': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'hosts': {'type': 'null'}, 'zoneName': {'type': 'string'}, 'zoneState': { 'type': 'object', 'properties': { 'available': {'type': 'boolean'}, }, }, }, 'required': ['hosts', 'zoneName', 'zoneState'], 'additionalProperties': False, }, }, }, 'required': ['availabilityZoneInfo'], 'additionalProperties': False, } detail_response = copy.deepcopy(index_response) detail_response['properties']['availabilityZoneInfo']['items']['properties']['hosts'] = { # noqa: E501 'type': ['null', 'object'], 'patternProperties': { '^.+$': { 'type': 'object', 'patternProperties': { '^.+$': { 'type': 'object', 'properties': { 'active': {'type': 'boolean'}, 'available': {'type': 'boolean'}, 'updated_at': { 'type': ['string', 'null'], 'format': 'date-time', }, }, 'required': ['active', 'available', 'updated_at'], 'additionalProperties': False, }, }, 'additionalProperties': False, }, }, 'additionalProperties': False, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/baremetal_nodes.py0000664000175000017500000000433400000000000025066 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy # NOTE(stephenfin): These schemas are intentionally empty since these APIs are # deprecated proxy APIs create = {} add_interface = {} remove_interface = {} index_query = {} show_query = {} _node = { 'type': 'object', 'properties': { 'cpus': {'type': 'string'}, 'disk_gb': {'type': 'string'}, 'host': {'const': 'IRONIC MANAGED'}, 'id': {'type': 'string', 'format': 'uuid'}, 'interfaces': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'address': {'type': 'string'}, }, }, }, 'memory_mb': {'type': 'string'}, 'task_state': {'type': 'string'}, }, 'required': [ 'cpus', 'disk_gb', 'host', 'id', 'interfaces', 'memory_mb', 'task_state', ], 'additionalProperties': False, } _node_detail = copy.deepcopy(_node) _node_detail['properties']['instance_uuid'] = { 'type': 'string', 'format': 'uuid' } _node_detail['required'].append('instance_uuid') index_response = { 'type': 'object', 'properties': { 'nodes': { 'type': 'array', 'items': copy.deepcopy(_node), }, }, 'required': ['nodes'], 'additionalProperties': False, } show_response = { 'type': 'object', 'properties': { 'node': copy.deepcopy(_node_detail), }, 'required': ['node'], 'additionalProperties': False, } create_response = { 'type': 'null', } delete_response = { 'type': 'null', } add_interface_response = { 'type': 'null', } remove_interface_response = { 'type': 'null', } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/cells.py0000664000175000017500000000171400000000000023043 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(stephenfin): These schemas are intentionally empty since these APIs have # been removed create = {} update = {} sync_instances = {} index_query = {} detail_query = {} info_query = {} capacities_query = {} show_query = {} index_response = {} detail_response = {} info_response = {} capacities_response = {} show_response = {} delete_response = {} create_response = {} update_response = {} sync_instances_response = {} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/certificates.py0000664000175000017500000000130700000000000024404 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(stephenfin): These schemas are intentionally empty since these APIs have # been removed create = {} show_query = {} show_response = {} create_response = {} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/cloudpipe.py0000664000175000017500000000135200000000000023723 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(stephenfin): These schemas are intentionally empty since these APIs have # been removed create = {} update = {} index_query = {} create_response = {} index_response = {} update_response = {} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/console_auth_tokens.py0000664000175000017500000000310700000000000026005 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy show_query = { 'type': 'object', 'properties': {}, 'additionalProperties': True, } show_query_v299 = copy.deepcopy(show_query) show_query_v299['additionalProperties'] = False show_response = { 'type': 'object', 'properties': { 'console': { 'type': 'object', 'properties': { 'instance_uuid': {'type': 'string', 'format': 'uuid'}, 'host': {'type': ['string', 'null']}, 'port': {'type': 'integer'}, 'internal_access_path': { 'type': ['string', 'null'], 'format': 'uuid', }, }, 'required': [ 'instance_uuid', 'host', 'port', 'internal_access_path', ], 'additionalProperties': False, }, }, 'required': ['console'], 'additionalProperties': False, } show_response_v299 = copy.deepcopy(show_response) show_response_v299['properties']['console']['properties'].update({ 'tls_port': {'type': ['integer', 'null']}, }) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/console_output.py0000664000175000017500000000305200000000000025020 0ustar00zuulzuul00000000000000# Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. get_console_output = { 'type': 'object', 'properties': { 'os-getConsoleOutput': { 'type': 'object', 'properties': { 'length': { 'type': ['integer', 'string', 'null'], 'pattern': '^-?[0-9]+$', # NOTE: -1 means an unlimited length. # TODO(cyeoh): None also means unlimited length # and is supported for v2 backwards compatibility # Should remove in the future with a microversion 'minimum': -1, }, }, 'additionalProperties': False, }, }, 'required': ['os-getConsoleOutput'], 'additionalProperties': False, } get_console_output_response = { 'type': 'object', 'properties': { 'output': {'type': 'string'}, }, 'required': ['output'], 'additionalProperties': False, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/consoles.py0000664000175000017500000000140100000000000023557 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(stephenfin): These schemas are intentionally empty since these APIs have # been removed create = {} index_query = {} show_query = {} index_response = {} create_response = {} show_response = {} delete_response = {} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/create_backup.py0000664000175000017500000000332500000000000024531 0ustar00zuulzuul00000000000000# Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from nova.api.validation import parameter_types create_backup = { 'type': 'object', 'properties': { 'createBackup': { 'type': 'object', 'properties': { 'name': parameter_types.name, 'backup_type': { 'type': 'string', }, 'rotation': parameter_types.non_negative_integer, 'metadata': parameter_types.metadata, }, 'required': ['name', 'backup_type', 'rotation'], 'additionalProperties': False, }, }, 'required': ['createBackup'], 'additionalProperties': False, } create_backup_v20 = copy.deepcopy(create_backup) create_backup_v20['properties'][ 'createBackup']['properties']['name'] = ( parameter_types.name_with_leading_trailing_spaces) create_backup_response = { 'type': 'null', } create_backup_response_v245 = { 'type': 'object', 'properties': { 'image_id': {'type': 'string', 'format': 'uuid'}, }, 'required': ['image_id'], 'additionalProperties': False, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/deferred_delete.py0000664000175000017500000000211200000000000025034 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # TODO(stephenfin): Restrict the value to 'null' in a future API version restore = { 'type': 'object', 'properties': { 'restore': {}, }, 'required': ['restore'], 'additionalProperties': False, } # TODO(stephenfin): Restrict the value to 'null' in a future API version force_delete = { 'type': 'object', 'properties': { 'forceDelete': {}, }, 'required': ['forceDelete'], 'additionalProperties': False, } restore_response = { 'type': 'null', } force_delete_response = { 'type': 'null', } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/evacuate.py0000664000175000017500000000421100000000000023531 0ustar00zuulzuul00000000000000# Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from nova.api.validation import parameter_types evacuate = { 'type': 'object', 'properties': { 'evacuate': { 'type': 'object', 'properties': { 'host': parameter_types.fqdn, 'onSharedStorage': parameter_types.boolean, 'adminPass': parameter_types.admin_password, }, 'required': ['onSharedStorage'], 'additionalProperties': False, }, }, 'required': ['evacuate'], 'additionalProperties': False, } evacuate_v214 = copy.deepcopy(evacuate) del evacuate_v214['properties']['evacuate']['properties']['onSharedStorage'] del evacuate_v214['properties']['evacuate']['required'] evacuate_v229 = copy.deepcopy(evacuate_v214) evacuate_v229['properties']['evacuate']['properties'][ 'force'] = parameter_types.boolean # v2.68 removes the 'force' parameter added in v2.29, meaning it is identical # to v2.14 evacuate_v268 = copy.deepcopy(evacuate_v214) # v2.95 keeps the same schema, evacuating an instance will now result its state # to be stopped at destination. evacuate_v295 = copy.deepcopy(evacuate_v268) evacuate_response = { 'type': ['object', 'null'], 'properties': { 'adminPass': { 'type': ['null', 'string'], } }, # adminPass is a rare-example of configuration-driven API behavior: the # value depends on '[api] enable_instance_password' 'required': [], 'additionalProperties': False, } evacuate_response_v214 = { 'type': 'null', } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/extension_info.py0000664000175000017500000000415000000000000024765 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # TODO(stephenfin): Remove additionalProperties in a future API version index_query = { 'type': 'object', 'properties': {}, 'additionalProperties': True, } # TODO(stephenfin): Remove additionalProperties in a future API version show_query = { 'type': 'object', 'properties': {}, 'additionalProperties': True, } _extension_obj = { 'type': 'object', 'properties': { 'alias': {'type': 'string'}, 'description': {'type': 'string'}, 'links': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'href': { 'type': 'string', 'format': 'url' }, 'rel': { 'type': 'string' }, } }, }, 'name': {'type': 'string'}, 'namespace': {'type': 'string'}, 'updated': {'type': 'string', 'format': 'date-time'}, }, 'required': [ 'alias', 'description', 'links', 'name', 'namespace', 'updated' ], 'additionalProperties': False, } index_response = { 'type': 'object', 'properties': { 'extensions': { 'type': 'array', 'items': _extension_obj, } }, 'required': ['extensions'], 'additionalProperties': False, } show_response = { 'type': 'object', 'properties': { 'extension': _extension_obj, }, 'required': ['extension'], 'additionalProperties': False, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/fixed_ips.py0000664000175000017500000000136000000000000023710 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(stephenfin): These schemas are intentionally empty since these APIs have # been removed reserve = {} unreserve = {} show_query = {} show_response = {} reserve_response = {} unreserve_response = {} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/flavor_access.py0000664000175000017500000000515200000000000024553 0ustar00zuulzuul00000000000000# Copyright 2013 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from nova.api.validation import parameter_types add_tenant_access = { 'type': 'object', 'properties': { 'addTenantAccess': { 'type': 'object', 'properties': { 'tenant': { # defined from project_id in instance_type_projects table 'type': 'string', 'minLength': 1, 'maxLength': 255, }, }, 'required': ['tenant'], 'additionalProperties': False, }, }, 'required': ['addTenantAccess'], 'additionalProperties': False, } remove_tenant_access = { 'type': 'object', 'properties': { 'removeTenantAccess': { 'type': 'object', 'properties': { 'tenant': { # defined from project_id in instance_type_projects table 'type': 'string', 'minLength': 1, 'maxLength': 255, }, }, 'required': ['tenant'], 'additionalProperties': False, }, }, 'required': ['removeTenantAccess'], 'additionalProperties': False, } # TODO(stephenfin): Remove additionalProperties in a future API version index_query = { 'type': 'object', 'properties': {}, 'additionalProperties': True, } _common_response = { 'type': 'object', 'properties': { 'flavor_access': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'flavor_id': {'type': 'string'}, 'tenant_id': parameter_types.project_id, }, 'required': ['flavor_id', 'tenant_id'], 'additionalProperties': True, }, }, }, 'required': ['flavor_access'], 'additionalProperties': True, } index_response = copy.deepcopy(_common_response) add_tenant_access_response = copy.deepcopy(_common_response) remove_tenant_access_response = copy.deepcopy(_common_response) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/flavors.py0000664000175000017500000002641000000000000023415 0ustar00zuulzuul00000000000000# Copyright 2017 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from nova.api.validation import parameter_types # NOTE(takashin): The following sort keys are defined for backward # compatibility. If they are changed, the API microversion should be bumped. VALID_SORT_KEYS = [ 'created_at', 'description', 'disabled', 'ephemeral_gb', 'flavorid', 'id', 'is_public', 'memory_mb', 'name', 'root_gb', 'rxtx_factor', 'swap', 'updated_at', 'vcpu_weight', 'vcpus' ] VALID_SORT_DIR = ['asc', 'desc'] create = { 'type': 'object', 'properties': { 'flavor': { 'type': 'object', 'properties': { # in nova/flavors.py, name with all white spaces is forbidden. 'name': parameter_types.name, # forbid leading/trailing whitespaces 'id': { 'type': ['string', 'number', 'null'], 'minLength': 1, 'maxLength': 255, 'pattern': '^(?! )[a-zA-Z0-9. _-]+(? 0) float 'rxtx_factor': { 'type': ['number', 'string'], 'pattern': r'^[0-9]+(\.[0-9]+)?$', # this is a float, so we want to allow everything close to # 0 (e.g. 0.1) but not 0 itself, hence exclusiveMinimum # rather than the usual minimum 'exclusiveMinimum': 0, # maximum's value is limited to db constant's # SQL_SP_FLOAT_MAX (in nova/db/constants.py) 'maximum': 3.40282e+38 }, 'os-flavor-access:is_public': parameter_types.boolean, }, # TODO(oomichi): 'id' should be required with v2.1+microversions. # On v2.0 API, nova-api generates a flavor-id automatically if # specifying null as 'id' or not specifying 'id'. Ideally a client # should specify null as 'id' for requesting auto-generated id # exactly. However, this strict limitation causes a backwards # incompatible issue on v2.1. So now here relaxes the requirement # of 'id'. 'required': ['name', 'ram', 'vcpus', 'disk'], 'additionalProperties': False, }, }, 'required': ['flavor'], 'additionalProperties': False, } create_v20 = copy.deepcopy(create) create_v20['properties']['flavor']['properties']['name'] = ( parameter_types.name_with_leading_trailing_spaces ) # 2.55 adds an optional description field with a max length of 65535 since the # backing database column is a TEXT column which is 64KiB. _flavor_description = { 'type': ['string', 'null'], 'minLength': 0, 'maxLength': 65535, 'pattern': parameter_types.valid_description_regex, } create_v255 = copy.deepcopy(create) create_v255['properties']['flavor']['properties']['description'] = ( _flavor_description) update = { 'type': 'object', 'properties': { 'flavor': { 'type': 'object', 'properties': { 'description': _flavor_description }, # Since the only property that can be specified on update is the # description field, it is required. If we allow updating other # flavor attributes in a later microversion, we should reconsider # what is required. 'required': ['description'], 'additionalProperties': False, }, }, 'required': ['flavor'], 'additionalProperties': False, } index_query = { 'type': 'object', 'properties': { 'limit': parameter_types.multi_params( parameter_types.non_negative_integer), 'marker': parameter_types.multi_params({'type': 'string'}), 'is_public': parameter_types.multi_params({'type': 'string'}), 'minRam': parameter_types.multi_params({'type': 'string'}), 'minDisk': parameter_types.multi_params({'type': 'string'}), 'sort_key': parameter_types.multi_params({'type': 'string', 'enum': VALID_SORT_KEYS}), 'sort_dir': parameter_types.multi_params({'type': 'string', 'enum': VALID_SORT_DIR}) }, # NOTE(gmann): This is kept True to keep backward compatibility. # As of now Schema validation stripped out the additional parameters and # does not raise 400. In microversion 2.75, we have blocked the additional # parameters. 'additionalProperties': True } index_query_275 = copy.deepcopy(index_query) index_query_275['additionalProperties'] = False # TODO(stephenfin): Remove additionalProperties in a future API version show_query = { 'type': 'object', 'properties': {}, 'additionalProperties': True, } _flavor_basic = { 'type': 'object', 'properties': { 'id': {'type': 'string'}, 'links': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'href': {'type': 'string', 'format': 'uri'}, 'rel': {'type': 'string'}, }, 'required': ['href', 'rel'], 'additionalProperties': False, }, }, 'name': {'type': 'string'}, }, 'required': ['id', 'links', 'name'], 'additionalProperties': False, } _flavor_basic_v255 = copy.deepcopy(_flavor_basic) _flavor_basic_v255['properties']['description'] = {'type': ['string', 'null']} _flavor_basic_v255['required'].append('description') _flavor = { 'type': 'object', 'properties': { 'disk': {'type': 'integer'}, 'id': {'type': 'string'}, 'links': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'href': {'type': 'string', 'format': 'uri'}, 'rel': {'type': 'string'}, }, 'required': ['href', 'rel'], }, }, 'name': {'type': 'string'}, 'os-flavor-access:is_public': {}, 'ram': {'type': 'integer'}, 'rxtx_factor': {}, 'swap': { 'anyOf': [ {'type': 'integer'}, {'const': ''}, ], }, 'vcpus': {'type': 'integer'}, 'OS-FLV-EXT-DATA:ephemeral': {'type': 'integer'}, 'OS-FLV-DISABLED:disabled': {'type': 'boolean'}, }, 'required': [ 'disk', 'id', 'links', 'name', 'os-flavor-access:is_public', 'ram', 'rxtx_factor', 'swap', 'vcpus', 'OS-FLV-DISABLED:disabled', 'OS-FLV-EXT-DATA:ephemeral', ], 'additionalProperties': False, } _flavor_v255 = copy.deepcopy(_flavor) _flavor_v255['properties']['description'] = {'type': ['string', 'null']} _flavor_v255['required'].append('description') _flavor_v261 = copy.deepcopy(_flavor_v255) _flavor_v261['properties']['extra_specs'] = { 'type': 'object', 'patternProperties': { '^[a-zA-Z0-9-_:. ]{1,255}$': {'type': 'string', 'maxLength': 255}, }, 'additionalProperties': False, } _flavor_v275 = copy.deepcopy(_flavor_v261) # we completely overwrite this since the new variant is much simpler _flavor_v275['properties']['swap'] = {'type': 'integer'} _flavors_links = { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'href': {'type': 'string', 'format': 'uri'}, 'rel': {'type': 'string'}, }, 'required': ['href', 'rel'], 'additionalProperties': False, }, } delete_response = { 'type': 'null', } create_response = { 'type': 'object', 'properties': { 'flavor': copy.deepcopy(_flavor), }, 'required': ['flavor'], 'additionalProperties': False, } create_response_v255 = copy.deepcopy(create_response) create_response_v255['properties']['flavor'] = copy.deepcopy(_flavor_v255) create_response_v261 = copy.deepcopy(create_response_v255) create_response_v261['properties']['flavor'] = copy.deepcopy(_flavor_v261) create_response_v275 = copy.deepcopy(create_response_v261) create_response_v275['properties']['flavor'] = copy.deepcopy(_flavor_v275) # NOTE(stephenfin): update is only available from 2.55 and the response is # identical to the create and show response from that point forward update_response = { 'type': 'object', 'properties': { 'flavor': copy.deepcopy(_flavor_v255), }, 'required': ['flavor'], 'additionalProperties': False, } update_response_v261 = copy.deepcopy(update_response) update_response_v261['properties']['flavor'] = copy.deepcopy(_flavor_v261) update_response_v275 = copy.deepcopy(update_response_v261) update_response_v275['properties']['flavor'] = copy.deepcopy(_flavor_v275) index_response = { 'type': 'object', 'properties': { 'flavors': { 'type': 'array', 'items': _flavor_basic, }, 'flavors_links': _flavors_links, }, 'required': ['flavors'], 'additionalProperties': False, } index_response_v255 = copy.deepcopy(index_response) index_response_v255['properties']['flavors']['items'] = _flavor_basic_v255 detail_response = { 'type': 'object', 'properties': { 'flavors': { 'type': 'array', 'items': _flavor, }, 'flavors_links': _flavors_links, }, 'required': ['flavors'], 'additionalProperties': False, } detail_response_v255 = copy.deepcopy(detail_response) detail_response_v255['properties']['flavors']['items'] = _flavor_v255 detail_response_v261 = copy.deepcopy(detail_response_v255) detail_response_v261['properties']['flavors']['items'] = _flavor_v261 detail_response_v275 = copy.deepcopy(detail_response_v261) detail_response_v275['properties']['flavors']['items'] = _flavor_v275 show_response = { 'type': 'object', 'properties': { 'flavor': copy.deepcopy(_flavor), }, 'required': ['flavor'], 'additionalProperties': False, } show_response_v255 = copy.deepcopy(show_response) show_response_v255['properties']['flavor'] = copy.deepcopy(_flavor_v255) show_response_v261 = copy.deepcopy(show_response_v255) show_response_v261['properties']['flavor'] = copy.deepcopy(_flavor_v261) show_response_v275 = copy.deepcopy(show_response_v261) show_response_v275['properties']['flavor'] = copy.deepcopy(_flavor_v275) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/flavors_extraspecs.py0000664000175000017500000000446600000000000025665 0ustar00zuulzuul00000000000000# Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from nova.api.validation import parameter_types _metadata_key = '^[a-zA-Z0-9-_:. ]{1,255}$' # NOTE(oomichi): The metadata of flavor_extraspecs should accept numbers # as its values. _metadata = copy.deepcopy(parameter_types.metadata) _metadata['patternProperties'][_metadata_key]['type'] = [ 'string', 'number' ] create = { 'type': 'object', 'properties': { 'extra_specs': _metadata }, 'required': ['extra_specs'], 'additionalProperties': False, } update = copy.deepcopy(_metadata) update.update({ 'minProperties': 1, 'maxProperties': 1 }) # TODO(stephenfin): Remove additionalProperties in a future API version index_query = { 'type': 'object', 'properties': {}, 'additionalProperties': True, } # TODO(stephenfin): Remove additionalProperties in a future API version show_query = { 'type': 'object', 'properties': {}, 'additionalProperties': True, } index_response = { 'type': 'object', 'properties': { # NOTE(stephenfin): While we accept numbers as values, we always return # strings 'extra_specs': parameter_types.metadata, }, 'required': ['extra_specs'], 'additionalProperties': False, } # NOTE(stephenfin): We return the request back to the user unmodified, meaning # if the user provided a number key then they'll get a number key back, even # though later requests will return a string create_response = copy.deepcopy(create) # NOTE(stephenfin): As above update_response = copy.deepcopy(update) # NOTE(stephenfin): Since we are retrieving here, we always return string keys # (like index) show_response = copy.deepcopy(parameter_types.metadata) delete_response = { 'type': 'null', } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/floating_ip_dns.py0000664000175000017500000000140100000000000025071 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(stephenfin): These schemas are intentionally empty since these APIs have # been removed update = {} index_query = {} show_query = {} index_response = {} show_response = {} update_response = {} delete_response = {} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/floating_ip_pools.py0000664000175000017500000000223700000000000025451 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # TODO(stephenfin): Remove additionalProperties in a future API version index_query = { 'type': 'object', 'properties': {}, 'additionalProperties': True, } index_response = { 'type': 'object', 'properties': { 'floating_ip_pools': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'name': {'type': 'string'} }, 'required': ['name'], 'additionalProperties': False, }, }, }, 'required': ['floating_ip_pools'], 'additionalProperties': True, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/floating_ips.py0000664000175000017500000000355700000000000024426 0ustar00zuulzuul00000000000000# Copyright 2015 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.api.validation import parameter_types create = { 'type': ['object', 'null'], 'properties': { 'pool': { 'type': ['string', 'null'], }, }, } add_floating_ip = { 'type': 'object', 'properties': { 'addFloatingIp': { 'type': 'object', 'properties': { 'address': parameter_types.ip_address, 'fixed_address': parameter_types.ip_address }, 'required': ['address'], 'additionalProperties': False } }, 'required': ['addFloatingIp'], 'additionalProperties': False } remove_floating_ip = { 'type': 'object', 'properties': { 'removeFloatingIp': { 'type': 'object', 'properties': { 'address': parameter_types.ip_address }, 'required': ['address'], 'additionalProperties': False } }, 'required': ['removeFloatingIp'], 'additionalProperties': False } # NOTE(stephenfin): These schemas are intentionally empty since these APIs are # deprecated proxy APIs show_query = {} index_query = {} add_floating_ip_response = { 'type': 'null', } remove_floating_ip_response = { 'type': 'null', } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/floating_ips_bulk.py0000664000175000017500000000141500000000000025432 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(stephenfin): These schemas are intentionally empty since these APIs have # been removed create = {} update = {} index_query = {} show_query = {} index_response = {} show_response = {} create_response = {} update_response = {} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/fping.py0000664000175000017500000000131200000000000023036 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(stephenfin): These schemas are intentionally empty since these APIs have # been removed index_query = {} show_query = {} index_response = {} show_response = {} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/hosts.py0000664000175000017500000001047100000000000023101 0ustar00zuulzuul00000000000000# Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from nova.api.validation import parameter_types update = { 'type': 'object', 'properties': { 'status': { 'type': 'string', 'enum': ['enable', 'disable', 'Enable', 'Disable', 'ENABLE', 'DISABLE'], }, 'maintenance_mode': { 'type': 'string', 'enum': ['enable', 'disable', 'Enable', 'Disable', 'ENABLE', 'DISABLE'], }, }, 'anyOf': [ {'required': ['status']}, {'required': ['maintenance_mode']} ], 'additionalProperties': False, } index_query = { 'type': 'object', 'properties': { 'zone': parameter_types.multi_params({'type': 'string'}) }, # NOTE(gmann): This is kept True to keep backward compatibility. # As of now Schema validation stripped out the additional parameters and # does not raise 400. This API is deprecated in microversion 2.43 so we # do not to update the additionalProperties to False. 'additionalProperties': True } # NOTE(stephenfin): These schemas are intentionally empty since these APIs are # deprecated proxy APIs startup_query = {} shutdown_query = {} reboot_query = {} show_query = {} index_response = { 'type': 'object', 'properties': { 'hosts': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'host_name': {'type': 'string'}, # TODO(stephenfin): This should be an enum 'service': {'type': 'string'}, 'zone': {'type': 'string'}, }, 'required': ['host_name', 'service', 'zone'], 'additionalProperties': False, }, }, }, 'required': ['hosts'], 'additionalProperties': False, } show_response = { 'type': 'object', 'properties': { 'host': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'resource': { 'type': 'object', 'properties': { 'cpu': {'type': 'integer'}, 'disk_gb': {'type': 'integer'}, 'host': {'type': 'string'}, 'memory_mb': {'type': 'integer'}, 'project': {'type': 'string'}, }, 'required': [ 'cpu', 'disk_gb', 'host', 'memory_mb', 'project' ], 'additionalProperties': False, }, }, 'required': ['resource'], 'additionalProperties': False, }, }, }, 'required': ['host'], 'additionalProperties': False, } update_response = { 'type': 'object', 'properties': { 'host': {'type': 'string'}, 'maintenance_mode': {'enum': ['on_maintenance', 'off_maintenance']}, 'status': {'enum': ['enabled', 'disabled']}, }, 'required': ['host'], 'additionalProperties': False, } _power_action_response = { 'type': 'object', 'properties': { 'host': {'type': 'string'}, # NOTE(stephenfin): This is virt driver specific and the API is # deprecated, so this is left empty 'power_action': {}, }, 'required': ['host', 'power_action'], 'additionalProperties': False, } startup_response = copy.deepcopy(_power_action_response) shutdown_response = copy.deepcopy(_power_action_response) reboot_response = copy.deepcopy(_power_action_response) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/hypervisors.py0000664000175000017500000002732600000000000024345 0ustar00zuulzuul00000000000000# Copyright 2017 Huawei Technologies Co.,LTD. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from nova.api.validation import parameter_types from nova.api.validation import response_types index_query = { 'type': 'object', 'properties': {}, 'additionalProperties': True, } index_query_v233 = { 'type': 'object', 'properties': parameter_types.pagination_parameters, # NOTE(gmann): This is kept True to keep backward compatibility. # As of now Schema validation stripped out the additional parameters and # does not raise 400. In microversion 2.53, the additional parameters # are not allowed. 'additionalProperties': True } index_query_v253 = { 'type': 'object', 'properties': { # The 2.33 microversion added support for paging by limit and marker. 'limit': parameter_types.single_param( parameter_types.non_negative_integer), 'marker': parameter_types.single_param({'type': 'string'}), # The 2.53 microversion adds support for filtering by hostname pattern # and requesting hosted servers in the GET /os-hypervisors and # GET /os-hypervisors/detail response. 'hypervisor_hostname_pattern': parameter_types.single_param( parameter_types.fqdn), 'with_servers': parameter_types.single_param( parameter_types.boolean) }, 'additionalProperties': False } show_query = { 'type': 'object', 'properties': {}, 'additionalProperties': True, } show_query_v253 = { 'type': 'object', 'properties': { 'with_servers': parameter_types.single_param( parameter_types.boolean) }, 'additionalProperties': False } # NOTE(stephenfin): These schemas are intentionally empty since these APIs are # deprecated statistics_query = {} search_query = {} servers_query = {} uptime_query = {} _servers_response = { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'name': {'type': 'string'}, 'uuid': {'type': 'string', 'format': 'uuid'}, }, 'required': ['name', 'uuid'], 'additionalProperties': False, }, } _hypervisor_response = { 'type': 'object', 'properties': { 'id': {'type': 'integer'}, 'hypervisor_hostname': {'type': 'string'}, 'state': {'enum': ['up', 'down']}, 'status': {'enum': ['enabled', 'disabled']}, }, 'required': ['id', 'hypervisor_hostname', 'state', 'status'], 'additionalProperties': False, } _hypervisor_response_v253 = copy.deepcopy(_hypervisor_response) _hypervisor_response_v253['properties'].update({ 'id': {'type': 'string', 'format': 'uuid'}, 'servers': copy.deepcopy(_servers_response), }) _hypervisor_detail_response = { 'type': 'object', 'properties': { 'cpu_info': {'type': 'string'}, 'current_workload': {'type': ['null', 'integer']}, 'disk_available_least': {'type': ['null', 'integer']}, 'free_disk_gb': {'type': ['null', 'integer']}, 'free_ram_mb': {'type': ['null', 'integer']}, 'host_ip': {'type': ['null', 'string']}, 'hypervisor_hostname': {'type': 'string'}, 'hypervisor_type': {'type': 'string'}, 'hypervisor_version': {'type': ['string', 'integer']}, 'id': {'type': 'integer'}, 'local_gb': {'type': 'integer'}, 'local_gb_used': {'type': 'integer'}, 'memory_mb': {'type': 'integer'}, 'memory_mb_used': {'type': 'integer'}, 'running_vms': {'type': ['null', 'integer']}, 'service': { 'type': 'object', 'properties': { 'disabled_reason': {'type': ['null', 'string']}, 'host': {'type': 'string'}, 'id': {'type': 'integer'}, }, 'required': ['disabled_reason', 'host', 'id'], }, 'state': {'enum': ['up', 'down']}, 'status': {'enum': ['enabled', 'disabled']}, 'vcpus': {'type': 'integer'}, 'vcpus_used': {'type': 'integer'}, }, 'required': [ 'cpu_info', 'current_workload', 'free_disk_gb', 'free_ram_mb', 'host_ip', 'hypervisor_hostname', 'hypervisor_type', 'hypervisor_version', 'id', 'local_gb', 'local_gb_used', 'memory_mb', 'memory_mb_used', 'running_vms', 'service', 'state', 'status', 'vcpus', 'vcpus_used', ], 'additionalProperties': False, } _hypervisor_detail_response_v228 = copy.deepcopy(_hypervisor_detail_response) _hypervisor_detail_response_v228['properties'].update({ 'cpu_info': { # NOTE(stephenfin): This is virt-driver specific hence no schema 'type': 'object', 'properties': {}, 'required': [], 'additionalProperties': True, }, }) _hypervisor_detail_response_v253 = copy.deepcopy( _hypervisor_detail_response_v228 ) _hypervisor_detail_response_v253['properties'].update({ 'id': {'type': 'string', 'format': 'uuid'}, 'servers': copy.deepcopy(_servers_response), }) _hypervisor_detail_response_v253['properties']['service'][ 'properties' ].update({ 'id': {'type': 'string', 'format': 'uuid'}, }) _hypervisor_detail_response_v288 = copy.deepcopy( _hypervisor_detail_response_v253 ) for field in { 'cpu_info', 'current_workload', 'free_disk_gb', 'free_ram_mb', 'local_gb', 'local_gb_used', 'memory_mb', 'memory_mb_used', 'running_vms', 'vcpus', 'vcpus_used', }: del _hypervisor_detail_response_v288['properties'][field] _hypervisor_detail_response_v288['required'].remove(field) _hypervisor_detail_response_v288['properties'].update({ 'uptime': {'type': ['string', 'null']} }) _hypervisor_detail_response_v288['required'].append('uptime') index_response = { 'type': 'object', 'properties': { 'hypervisors': { 'type': 'array', 'items': copy.deepcopy(_hypervisor_response), }, }, 'required': ['hypervisors'], 'additionalProperties': False, } # v2.33 adds the hypervisors_links field index_response_v233 = copy.deepcopy(index_response) index_response_v233['properties'].update({ 'hypervisors_links': response_types.collection_links, }) # v2.53 adds the 'servers' field but only if a user requests it via the # 'with_servers' query arg. It also changes the 'id' field to a UUID. Note that # v2.75 makes the 'servers' property always present even if empty *unless* the # 'with_servers' query parameter is 'false'. This dependency between a query # parameter and a response parameter is not something we can capture with # jsonschema and we can't update 'required' as a result index_response_v253 = copy.deepcopy(index_response_v233) index_response_v253['properties']['hypervisors']['items'] = ( _hypervisor_response_v253 ) search_response = copy.deepcopy(index_response) servers_response = copy.deepcopy(index_response) servers_response['properties']['hypervisors']['items']['properties'].update({ 'servers': copy.deepcopy(_servers_response), }) detail_response = copy.deepcopy(index_response) detail_response['properties']['hypervisors'][ 'items' ] = _hypervisor_detail_response # v2.28 changes the 'cpu_info' field from a stringified object to a real object detail_response_v228 = copy.deepcopy(detail_response) detail_response_v228['properties']['hypervisors'][ 'items' ] = _hypervisor_detail_response_v228 # v2.33 adds the hypervisors_links field detail_response_v233 = copy.deepcopy(detail_response_v228) detail_response_v233['properties'].update({ 'hypervisors_links': response_types.collection_links, }) # v2.53 adds the 'servers' field but only if a user requests it via the # 'with_servers' query arg. It also changes the 'id' field to a UUID. Note that # v2.75 makes the 'servers' property always present even if empty *unless* the # 'with_servers' query parameter is 'false'. This dependency between a query # parameter and a response parameter is not something we can capture with # jsonschema and we can't update 'required' as a result detail_response_v253 = copy.deepcopy(detail_response_v233) detail_response_v253['properties']['hypervisors'][ 'items' ] = _hypervisor_detail_response_v253 # v2.88 drops a whole lot of fields that were now duplicated in placement. It # also adds the uptime field into the response rather than a separate API detail_response_v288 = copy.deepcopy(detail_response_v253) detail_response_v288['properties']['hypervisors'][ 'items' ] = _hypervisor_detail_response_v288 show_response = { 'type': 'object', 'properties': { 'hypervisor': copy.deepcopy(_hypervisor_detail_response), }, 'required': ['hypervisor'], 'additionalProperties': False, } show_response_v228 = copy.deepcopy(show_response) show_response_v228['properties']['hypervisor'] = copy.deepcopy( _hypervisor_detail_response_v228 ) show_response_v253 = copy.deepcopy(show_response_v228) show_response_v253['properties']['hypervisor'] = copy.deepcopy( _hypervisor_detail_response_v253 ) show_response_v288 = copy.deepcopy(show_response_v253) show_response_v288['properties']['hypervisor'] = copy.deepcopy( _hypervisor_detail_response_v288 ) uptime_response = { 'type': 'object', 'properties': { 'hypervisor': copy.deepcopy(_hypervisor_response), }, 'required': ['hypervisor'], 'additionalProperties': False, } uptime_response['properties']['hypervisor']['properties'].update({ 'uptime': {'type': ['string', 'null']} }) uptime_response['properties']['hypervisor']['required'].append('uptime') uptime_response_v253 = copy.deepcopy(uptime_response) uptime_response_v253['properties']['hypervisor'] = copy.deepcopy( _hypervisor_response_v253 ) uptime_response_v253['properties']['hypervisor']['properties'].update({ 'uptime': {'type': ['string', 'null']} }) uptime_response_v253['properties']['hypervisor']['required'].append('uptime') statistics_response = { 'type': 'object', 'properties': { 'hypervisor_statistics': { 'type': 'object', 'properties': { 'count': {'type': 'integer'}, 'current_workload': {'type': ['null', 'integer']}, 'disk_available_least': {'type': ['null', 'integer']}, 'free_disk_gb': {'type': ['null', 'integer']}, 'free_ram_mb': {'type': ['null', 'integer']}, 'local_gb': {'type': 'integer'}, 'local_gb_used': {'type': 'integer'}, 'memory_mb': {'type': 'integer'}, 'memory_mb_used': {'type': 'integer'}, 'running_vms': {'type': ['null', 'integer']}, 'vcpus': {'type': 'integer'}, 'vcpus_used': {'type': 'integer'}, }, 'required': [ 'count', 'current_workload', 'disk_available_least', 'free_disk_gb', 'free_ram_mb', 'local_gb', 'local_gb_used', 'memory_mb', 'memory_mb_used', 'running_vms', 'vcpus', 'vcpus_used', ], 'additionalProperties': False, }, }, 'required': ['hypervisor_statistics'], 'additionalProperties': False, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/image_metadata.py0000664000175000017500000000354100000000000024663 0ustar00zuulzuul00000000000000# Copyright 2014 IBM Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from nova.api.validation import parameter_types from nova.api.validation import response_types create = { 'type': 'object', 'properties': { 'metadata': parameter_types.metadata }, 'required': ['metadata'], 'additionalProperties': False, } single_metadata = copy.deepcopy(parameter_types.metadata) single_metadata.update({ 'minProperties': 1, 'maxProperties': 1 }) update = { 'type': 'object', 'properties': { 'meta': single_metadata }, 'required': ['meta'], 'additionalProperties': False, } update_all = create # NOTE(stephenfin): These schemas are intentionally empty since these APIs have # been removed index_query = {} show_query = {} index_response = { 'type': 'object', 'properties': { 'metadata': response_types.metadata, }, 'required': ['metadata'], 'additionalProperties': False, } show_response = { 'type': 'object', 'properties': { 'meta': response_types.meta, }, 'required': ['meta'], 'additionalProperties': False, } create_response = copy.deepcopy(index_response) update_response = copy.deepcopy(show_response) update_all_response = copy.deepcopy(index_response) delete_response = {'type': 'null'} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/images.py0000664000175000017500000001623300000000000023210 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from nova.api.validation import parameter_types from nova.api.validation import response_types # NOTE(stephenfin): These schemas are incomplete but won't be enhanced further # since these APIs have been removed show_query = { 'type': 'object', 'properties': {}, 'additionalProperties': True, } index_query = { 'type': 'object', 'properties': { # NOTE(stephenfin): We never validated these and we're not going to add # that validation now. # field filters 'name': {}, 'status': {}, 'changes-since': {}, 'server': {}, 'type': {}, 'minRam': {}, 'minDisk': {}, # pagination filters 'limit': parameter_types.multi_params( parameter_types.positive_integer), 'page_size': parameter_types.multi_params( parameter_types.positive_integer), 'marker': {}, 'offset': parameter_types.multi_params( parameter_types.positive_integer), }, 'patternProperties': { '^property-.*$': {}, }, 'additionalProperties': True, } detail_query = index_query _links_response = { 'type': 'array', 'prefixItems': [ { 'type': 'object', 'properties': { 'href': {'type': 'string', 'format': 'uri'}, 'rel': {'const': 'self'}, }, 'required': ['href', 'rel'], 'additionalProperties': False, }, { 'type': 'object', 'properties': { 'href': {'type': 'string', 'format': 'uri'}, 'rel': {'const': 'bookmark'}, }, 'required': ['href', 'rel'], 'additionalProperties': False, }, { 'type': 'object', 'properties': { 'href': {'type': 'string', 'format': 'uri'}, 'rel': {'const': 'alternate'}, 'type': {'const': 'application/vnd.openstack.image'}, }, 'required': ['href', 'rel', 'type'], 'additionalProperties': False, }, ], 'minItems': 3, 'maxItems': 3, } _image_response = { 'type': 'object', 'properties': { 'created': {'type': 'string', 'format': 'date-time'}, 'id': {'type': 'string', 'format': 'uuid'}, 'links': _links_response, 'metadata': { 'type': 'object', 'patternProperties': { # unlike nova's metadata, glance doesn't have a maximum length # on property values. Also, while glance serializes all # non-null values as strings, nova's image API deserializes # these again, so we can expected practically any primitive # type here. Listing all these is effectively the same as # providing an empty schema so we're mainly doing it for the # benefit of tooling. '^[a-zA-Z0-9-_:. ]{1,255}$': { 'type': [ 'array', 'boolean', 'integer', 'number', 'object', 'string', 'null', ] }, }, 'additionalProperties': False, }, 'minDisk': {'type': 'integer', 'minimum': 0}, 'minRam': {'type': 'integer', 'minimum': 0}, 'name': {'type': ['string', 'null']}, 'progress': { 'type': 'integer', 'enum': [0, 25, 50, 100], }, 'server': { 'type': 'object', 'properties': { 'id': {'type': 'string', 'format': 'uuid'}, 'links': { 'type': 'array', 'prefixItems': [ { 'type': 'object', 'properties': { 'href': {'type': 'string', 'format': 'uri'}, 'rel': {'const': 'self'}, }, 'required': ['href', 'rel'], 'additionalProperties': False, }, { 'type': 'object', 'properties': { 'href': {'type': 'string', 'format': 'uri'}, 'rel': {'const': 'bookmark'}, }, 'required': ['href', 'rel'], 'additionalProperties': False, }, ], 'minItems': 2, 'maxItems': 2, }, }, 'required': ['id', 'links'], 'additionalProperties': False, }, 'status': { 'type': 'string', 'enum': ['ACTIVE', 'SAVING', 'DELETED', 'ERROR', 'UNKNOWN'], }, 'updated': {'type': ['string', 'null'], 'format': 'date-time'}, 'OS-DCF:diskConfig': {'type': 'string', 'enum': ['AUTO', 'MANUAL']}, 'OS-EXT-IMG-SIZE:size': {'type': 'integer'}, }, 'required': [ 'created', 'id', 'links', 'metadata', 'minDisk', 'minRam', 'name', 'progress', 'status', 'updated', 'OS-EXT-IMG-SIZE:size', ], 'additionalProperties': False, } show_response = { 'type': 'object', 'properties': { 'image': copy.deepcopy(_image_response), }, 'required': ['image'], 'additionalProperties': False, } delete_response = {'type': 'null'} index_response = { 'type': 'object', 'properties': { 'images': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'id': {'type': 'string', 'format': 'uuid'}, 'links': _links_response, 'name': {'type': ['string', 'null']}, }, 'required': ['id', 'links', 'name'], 'additionalProperties': False, }, }, 'images_links': response_types.collection_links, }, 'required': ['images'], 'additionalProperties': False, } detail_response = { 'type': 'object', 'properties': { 'images': { 'type': 'array', 'items': copy.deepcopy(_image_response), }, 'images_links': response_types.collection_links, }, 'required': ['images'], 'additionalProperties': False, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/instance_actions.py0000664000175000017500000001574600000000000025277 0ustar00zuulzuul00000000000000# Copyright 2017 Huawei Technologies Co.,LTD. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from nova.api.validation import parameter_types from nova.api.validation import response_types index_query = { 'type': 'object', 'properties': {}, 'additionalProperties': True, } index_query_v258 = { 'type': 'object', 'properties': { # The 2.58 microversion added support for paging by limit and marker # and filtering by changes-since. 'limit': parameter_types.single_param( parameter_types.non_negative_integer), 'marker': parameter_types.single_param({'type': 'string'}), 'changes-since': parameter_types.single_param( {'type': 'string', 'format': 'date-time'}), }, 'additionalProperties': False } index_query_v266 = copy.deepcopy(index_query_v258) index_query_v266['properties'].update({ 'changes-before': parameter_types.single_param( {'type': 'string', 'format': 'date-time'}), }) show_query = { 'type': 'object', 'properties': {}, 'additionalProperties': True, } index_response = { 'type': 'object', 'properties': { 'instanceActions': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'action': {'type': 'string'}, 'instance_uuid': {'type': 'string', 'format': 'uuid'}, 'message': {'type': ['null', 'string']}, # NOTE(stephenfin): While this will always be set for # API-initiated actions, it will not be set for e.g. # `nova-manage`-initiated actions 'project_id': { 'type': ['null', 'string'], 'pattern': '^[a-zA-Z0-9-]*$', 'minLength': 1, 'maxLength': 255, }, 'request_id': {'type': 'string'}, 'start_time': {'type': 'string', 'format': 'date-time'}, # NOTE(stephenfin): As with project_id, this can be null # under select circumstances. 'user_id': { 'type': ['null', 'string'], 'pattern': '^[a-zA-Z0-9-]*$', 'minLength': 1, 'maxLength': 255, }, }, 'required': [ 'action', 'instance_uuid', 'message', 'project_id', 'request_id', 'start_time', 'user_id', ], 'additionalProperties': False, }, }, }, 'required': ['instanceActions'], 'additionalProperties': False, } index_response_v258 = copy.deepcopy(index_response) index_response_v258['properties']['instanceActions']['items'][ 'properties' ].update({ 'updated_at': {'type': ['null', 'string'], 'format': 'date-time'}, }) index_response_v258['properties']['instanceActions']['items'][ 'required' ].append('updated_at') index_response_v258['properties']['links'] = response_types.collection_links show_response = { 'type': 'object', 'properties': { 'instanceAction': { 'type': 'object', 'properties': { 'action': {'type': 'string'}, 'instance_uuid': {'type': 'string', 'format': 'uuid'}, 'events': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'event': {'type': 'string'}, 'finish_time': { 'type': ['string', 'null'], 'format': 'date-time', }, 'result': {'type': ['string', 'null']}, 'start_time': { 'type': 'string', 'format': 'date-time', }, 'traceback': {'type': ['null', 'string']}, }, 'required': [ 'event', 'finish_time', 'result', 'start_time', ], 'additionalProperties': False, }, }, 'message': {'type': ['null', 'string']}, 'project_id': { 'type': ['null', 'string'], 'pattern': '^[a-zA-Z0-9-]*$', 'minLength': 1, 'maxLength': 255, }, 'request_id': {'type': 'string'}, 'start_time': {'type': 'string', 'format': 'date-time'}, 'user_id': { 'type': ['null', 'string'], 'pattern': '^[a-zA-Z0-9-]*$', 'minLength': 1, 'maxLength': 255, }, }, 'required': [ 'action', 'instance_uuid', 'message', 'project_id', 'request_id', 'start_time', 'user_id', ], 'additionalProperties': False, }, }, 'required': ['instanceAction'], 'additionalProperties': False, } show_response_v251 = copy.deepcopy(show_response) show_response_v251['properties']['instanceAction']['required'].append( 'events' ) show_response_v258 = copy.deepcopy(show_response_v251) show_response_v258['properties']['instanceAction']['properties'].update({ 'updated_at': {'type': ['null', 'string'], 'format': 'date-time'}, }) show_response_v258['properties']['instanceAction']['required'].append( 'updated_at' ) show_response_v262 = copy.deepcopy(show_response_v258) show_response_v262['properties']['instanceAction']['properties']['events'][ 'items' ]['properties'].update({ 'hostId': {'type': 'string'}, 'host': {'type': 'string'}, }) show_response_v262['properties']['instanceAction']['properties']['events'][ 'items' ]['required'].append('hostId') show_response_v284 = copy.deepcopy(show_response_v262) show_response_v284['properties']['instanceAction']['properties']['events'][ 'items' ]['properties'].update({ 'details': {'type': ['string', 'null']}, }) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/instance_usage_audit_log.py0000664000175000017500000000134100000000000026754 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. index_query = { 'type': 'object', 'properties': {}, 'additionalProperties': True, } show_query = { 'type': 'object', 'properties': {}, 'additionalProperties': True, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/ips.py0000664000175000017500000000376100000000000022540 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy # TODO(stephenfin): Remove additionalProperties in a future API version index_query = { 'type': 'object', 'properties': {}, 'additionalProperties': True, } # TODO(stephenfin): Remove additionalProperties in a future API version show_query = { 'type': 'object', 'properties': {}, 'additionalProperties': True, } _ip_address = { 'type': 'object', 'properties': { 'addr': { 'type': 'string', 'oneOf': [ {'format': 'ipv4'}, {'format': 'ipv6'}, ], }, 'version': { 'enum': [4, 6], }, }, 'required': ['addr', 'version'], 'additionalProperties': False, } index_response = { 'type': 'object', 'properties': { 'addresses': { 'type': 'object', 'patternProperties': { # TODO(stephenfin): Surely there are some limitations on # network names? '^.+$': { 'type': 'array', 'items': copy.deepcopy(_ip_address), }, }, }, }, 'required': ['addresses'], 'additionalProperties': False, } show_response = { 'type': 'object', 'patternProperties': { # TODO(stephenfin): Surely there are some limitations on # network names? '^.+$': { 'type': 'array', 'items': copy.deepcopy(_ip_address), }, }, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/keypairs.py0000664000175000017500000002002100000000000023560 0ustar00zuulzuul00000000000000# Copyright 2013 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from nova.api.validation import parameter_types from nova.api.validation import response_types create = { 'type': 'object', 'properties': { 'keypair': { 'type': 'object', 'properties': { 'name': parameter_types.keypair_name_special_chars, 'public_key': {'type': 'string'}, }, 'required': ['name'], 'additionalProperties': False, }, }, 'required': ['keypair'], 'additionalProperties': False, } create_v20 = copy.deepcopy(create) create_v20['properties']['keypair']['properties']['name'] = ( parameter_types.name_with_leading_trailing_spaces) create_v22 = { 'type': 'object', 'properties': { 'keypair': { 'type': 'object', 'properties': { 'name': parameter_types.keypair_name_special_chars, 'type': { 'type': 'string', 'enum': ['ssh', 'x509'] }, 'public_key': {'type': 'string'}, }, 'required': ['name'], 'additionalProperties': False, }, }, 'required': ['keypair'], 'additionalProperties': False, } create_v210 = { 'type': 'object', 'properties': { 'keypair': { 'type': 'object', 'properties': { 'name': parameter_types.keypair_name_special_chars, 'type': { 'type': 'string', 'enum': ['ssh', 'x509'] }, 'public_key': {'type': 'string'}, 'user_id': {'type': 'string'}, }, 'required': ['name'], 'additionalProperties': False, }, }, 'required': ['keypair'], 'additionalProperties': False, } create_v292 = copy.deepcopy(create_v210) create_v292['properties']['keypair']['properties']['name'] = ( parameter_types.keypair_name_special_chars_v292) create_v292['properties']['keypair']['required'] = ['name', 'public_key'] index_query_schema_v20 = { 'type': 'object', 'properties': {}, 'additionalProperties': True } index_query_schema_v210 = { 'type': 'object', 'properties': { 'user_id': parameter_types.multi_params({'type': 'string'}) }, 'additionalProperties': True } index_query_schema_v235 = copy.deepcopy(index_query_schema_v210) index_query_schema_v235['properties'].update( parameter_types.pagination_parameters) show_query_schema_v20 = index_query_schema_v20 show_query_schema_v210 = index_query_schema_v210 delete_query_schema_v20 = index_query_schema_v20 delete_query_schema_v210 = index_query_schema_v210 index_query_schema_v275 = copy.deepcopy(index_query_schema_v235) index_query_schema_v275['additionalProperties'] = False show_query_schema_v275 = copy.deepcopy(show_query_schema_v210) show_query_schema_v275['additionalProperties'] = False delete_query_schema_v275 = copy.deepcopy(delete_query_schema_v210) delete_query_schema_v275['additionalProperties'] = False create_response = { 'type': 'object', 'properties': { 'keypair': { 'type': 'object', 'properties': { 'fingerprint': {'type': 'string'}, 'name': parameter_types.keypair_name_special_chars, 'private_key': {'type': 'string'}, 'public_key': {'type': 'string'}, 'user_id': parameter_types.user_id, }, 'required': ['fingerprint', 'name', 'public_key', 'user_id'], 'additionalProperties': False, } }, 'required': ['keypair'], 'additionalProperties': False, } create_response_v22 = copy.deepcopy(create_response) create_response_v22['properties']['keypair']['properties'].update({ 'type': { 'type': 'string', 'enum': ['ssh', 'x509'] }, }) create_response_v22['properties']['keypair']['required'].append('type') create_response_v292 = copy.deepcopy(create_response_v22) del create_response_v292['properties']['keypair']['properties']['private_key'] create_response_v292['properties']['keypair']['properties']['name'] = ( parameter_types.keypair_name_special_chars_v292 ) delete_response = { 'type': 'null', } show_response = { 'type': 'object', 'properties': { 'keypair': { 'type': 'object', 'properties': { 'created_at': {'type': 'string', 'format': 'date-time'}, # NOTE(stephenfin): While we expose these soft delete fields, # we don't provide a mechanism to show actual soft deleted # entries. As such, they're never populated. 'deleted': {'type': 'boolean', 'const': False}, 'deleted_at': {'type': 'null'}, 'fingerprint': {'type': 'string'}, 'id': {'type': 'integer'}, 'name': parameter_types.keypair_name_special_chars, 'public_key': {'type': 'string'}, # NOTE(stephenfin): There is no way to update an existing # keypair, so this is never populated. 'updated_at': {'type': 'null'}, 'user_id': parameter_types.user_id, }, 'required': [ 'created_at', 'deleted', 'deleted_at', 'fingerprint', 'id', 'name', 'public_key', 'updated_at', 'user_id' ], 'additionalProperties': False, } }, 'required': ['keypair'], 'additionalProperties': False, } show_response_v22 = copy.deepcopy(show_response) show_response_v22['properties']['keypair']['properties'].update({ 'type': { 'type': 'string', 'enum': ['ssh', 'x509'] }, }) show_response_v22['properties']['keypair']['required'].append('type') index_response = { 'type': 'object', 'properties': { 'keypairs': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'keypair': { 'type': 'object', 'items': { 'type': 'object', 'properties': { 'fingerprint': {'type': 'string'}, 'name': parameter_types.keypair_name_special_chars, # noqa: E501 'public_key': {'type': 'string'}, }, 'required': ['fingerprint', 'name', 'public_key'], 'additionalProperties': False, }, }, }, 'required': ['keypair'], 'additionalProperties': False, }, }, }, 'required': ['keypairs'], 'additionalProperties': False, } index_response_v22 = copy.deepcopy(index_response) index_response_v22['properties']['keypairs']['items']['properties'][ 'keypair' ]['items']['properties'].update({ 'type': { 'type': 'string', 'enum': ['ssh', 'x509'] }, }) index_response_v22['properties']['keypairs']['items']['properties'][ 'keypair' ]['items']['required'].append( 'type' ) index_response_v235 = copy.deepcopy(index_response_v22) index_response_v235['properties'].update({ 'keypairs_links': response_types.collection_links, }) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/limits.py0000664000175000017500000000201400000000000023234 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from nova.api.validation import parameter_types limits_query_schema = { 'type': 'object', 'properties': { 'tenant_id': parameter_types.common_query_param, }, # For backward compatible changes # In microversion 2.75, we have blocked the additional # parameters. 'additionalProperties': True } limits_query_schema_275 = copy.deepcopy(limits_query_schema) limits_query_schema_275['additionalProperties'] = False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/lock_server.py0000664000175000017500000000262300000000000024257 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. lock = { 'type': 'object', 'properties': { 'lock': {}, }, 'required': ['lock'], 'additionalProperties': False, } lock_v273 = { 'type': 'object', 'properties': { 'lock': { 'type': ['object', 'null'], 'properties': { 'locked_reason': { 'type': 'string', 'minLength': 1, 'maxLength': 255, }, }, 'additionalProperties': False, }, }, 'required': ['lock'], 'additionalProperties': False, } # TODO(stephenfin): Restrict the value to 'null' in a future API version unlock = { 'type': 'object', 'properties': { 'unlock': {}, }, 'required': ['unlock'], 'additionalProperties': False, } lock_response = { 'type': 'null', } unlock_response = { 'type': 'null', } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/migrate_server.py0000664000175000017500000000463000000000000024757 0ustar00zuulzuul00000000000000# Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from nova.api.validation import parameter_types host = copy.deepcopy(parameter_types.fqdn) host['type'] = ['string', 'null'] migrate = { 'type': 'object', 'properties': { 'migrate': { 'type': ['object', 'null'], 'properties': { 'host': host, }, 'additionalProperties': False, }, }, 'required': ['migrate'], 'additionalProperties': False, } migrate_live = { 'type': 'object', 'properties': { 'os-migrateLive': { 'type': 'object', 'properties': { 'block_migration': parameter_types.boolean, 'disk_over_commit': parameter_types.boolean, 'host': host }, 'required': ['block_migration', 'disk_over_commit', 'host'], 'additionalProperties': False, }, }, 'required': ['os-migrateLive'], 'additionalProperties': False, } _block_migration = copy.deepcopy(parameter_types.boolean) _block_migration['enum'].append('auto') migrate_live_v2_25 = copy.deepcopy(migrate_live) del migrate_live_v2_25['properties']['os-migrateLive']['properties'][ 'disk_over_commit'] migrate_live_v2_25['properties']['os-migrateLive']['properties'][ 'block_migration'] = _block_migration migrate_live_v2_25['properties']['os-migrateLive']['required'] = ( ['block_migration', 'host']) migrate_live_v2_30 = copy.deepcopy(migrate_live_v2_25) migrate_live_v2_30['properties']['os-migrateLive']['properties'][ 'force'] = parameter_types.boolean # v2.68 removes the 'force' parameter added in v2.30, meaning it is identical # to v2.25 migrate_live_v2_68 = copy.deepcopy(migrate_live_v2_25) migrate_response = { 'type': 'null', } migrate_live_response = { 'type': 'null', } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/migrations.py0000664000175000017500000000431100000000000024111 0ustar00zuulzuul00000000000000# Copyright 2017 Huawei Technologies Co.,LTD. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from nova.api.validation import parameter_types list_query_schema_v20 = { 'type': 'object', 'properties': { 'hidden': parameter_types.common_query_param, 'host': parameter_types.common_query_param, 'instance_uuid': parameter_types.common_query_param, 'source_compute': parameter_types.common_query_param, 'status': parameter_types.common_query_param, 'migration_type': parameter_types.common_query_param, }, # For backward compatible changes 'additionalProperties': True } list_query_params_v259 = copy.deepcopy(list_query_schema_v20) list_query_params_v259['properties'].update({ # The 2.59 microversion added support for paging by limit and marker # and filtering by changes-since. 'limit': parameter_types.single_param( parameter_types.non_negative_integer), 'marker': parameter_types.single_param({'type': 'string'}), 'changes-since': parameter_types.single_param( {'type': 'string', 'format': 'date-time'}), }) list_query_params_v259['additionalProperties'] = False list_query_params_v266 = copy.deepcopy(list_query_params_v259) list_query_params_v266['properties'].update({ 'changes-before': parameter_types.single_param( {'type': 'string', 'format': 'date-time'}), }) list_query_params_v280 = copy.deepcopy(list_query_params_v266) list_query_params_v280['properties'].update({ # The 2.80 microversion added support for filtering migrations # by user_id and/or project_id 'user_id': parameter_types.single_param({'type': 'string'}), 'project_id': parameter_types.single_param({'type': 'string'}), }) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/multinic.py0000664000175000017500000000320700000000000023564 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.api.validation import parameter_types add_fixed_ip = { 'type': 'object', 'properties': { 'addFixedIp': { 'type': 'object', 'properties': { # The maxLength is from the column 'uuid' of the # table 'networks' 'networkId': { 'type': ['string', 'number'], 'minLength': 1, 'maxLength': 36, }, }, 'required': ['networkId'], 'additionalProperties': False, }, }, 'required': ['addFixedIp'], 'additionalProperties': False, } remove_fixed_ip = { 'type': 'object', 'properties': { 'removeFixedIp': { 'type': 'object', 'properties': { 'address': parameter_types.ip_address }, 'required': ['address'], 'additionalProperties': False, }, }, 'required': ['removeFixedIp'], 'additionalProperties': False, } add_fixed_ip_response = { 'type': 'null', } remove_fixed_ip_response = { 'type': 'null', } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/networks.py0000664000175000017500000000652200000000000023617 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(stephenfin): These schemas are intentionally empty since these APIs have # been removed create = {} add = {} disassociate = {} index_query = {} show_query = {} # NOTE(stephenfin): This is a *very* loose schema since this is a deprecated # API and only the id and label fields are populated with non-null values these # days. _network_response = { 'type': 'object', 'properties': { 'bridge': {'type': ['string', 'null']}, 'bridge_interface': {'type': ['string', 'null']}, 'broadcast': {'type': ['string', 'null']}, 'cidr': {'type': ['string', 'null']}, 'cidr_v6': {'type': ['string', 'null']}, 'created_at': {'type': ['string', 'null']}, 'deleted': {'type': ['string', 'null']}, 'deleted_at': {'type': ['string', 'null']}, 'dhcp_server': {'type': ['string', 'null']}, 'dhcp_start': {'type': ['string', 'null']}, 'dns1': {'type': ['string', 'null']}, 'dns2': {'type': ['string', 'null']}, 'enable_dhcp': {'type': ['string', 'null']}, 'gateway': {'type': ['string', 'null']}, 'gateway_v6': {'type': ['string', 'null']}, 'host': {'type': ['string', 'null']}, 'id': {'type': 'string', 'format': 'string'}, 'injected': {'type': ['string', 'null']}, 'label': {'type': 'string'}, 'multi_host': {'type': ['string', 'null']}, 'mtu': {'type': ['integer', 'null']}, 'netmask': {'type': ['string', 'null']}, 'netmask_v6': {'type': ['string', 'null']}, 'priority': {'type': ['string', 'null']}, 'project_id': {'type': ['string', 'null']}, 'rxtx_base': {'type': ['string', 'null']}, 'share_address': {'type': ['string', 'null']}, 'updated_at': {'type': ['string', 'null']}, 'vlan': {'type': ['string', 'null']}, 'vpn_private_address': {'type': ['string', 'null']}, 'vpn_public_address': {'type': ['string', 'null']}, 'vpn_public_port': {'type': ['integer', 'null']}, }, 'required': [ # admin fields are optional, but the rest will always be shown 'broadcast', 'cidr', 'cidr_v6', 'dns1', 'dns2', 'gateway', 'gateway_v6', 'id', 'label', 'netmask', 'netmask_v6', ], 'additionalProperties': False, } index_response = { 'type': 'object', 'properties': { 'networks': { 'type': 'array', 'items': _network_response, }, }, 'required': ['networks'], 'additionalProperties': False, } show_response = { 'type': 'object', 'properties': { 'network': _network_response, }, 'required': ['network'], 'additionalProperties': False, } disassociate_response = {} delete_response = {} create_response = {} add_response = {} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/networks_associate.py0000664000175000017500000000145200000000000025647 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(stephenfin): These schemas are intentionally empty since these actions # have been removed disassociate_host = {} disassociate_project = {} associate_host = {} disassociate_host_response = {} disassociate_project_response = {} associate_host_response = {} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/pause_server.py0000664000175000017500000000205600000000000024444 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # TODO(stephenfin): Restrict the value to 'null' in a future API version pause = { 'type': 'object', 'properties': { 'pause': {}, }, 'required': ['pause'], 'additionalProperties': False } # TODO(stephenfin): Restrict the value to 'null' in a future API version unpause = { 'type': 'object', 'properties': { 'unpause': {}, }, 'required': ['unpause'], 'additionalProperties': False } pause_response = { 'type': 'null', } unpause_response = { 'type': 'null', } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/quota_classes.py0000664000175000017500000000371500000000000024612 0ustar00zuulzuul00000000000000# Copyright 2015 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from nova.api.openstack.compute.schemas import quota_sets update = { 'type': 'object', 'properties': { 'quota_class_set': { 'type': 'object', 'properties': quota_sets.quota_resources, 'additionalProperties': False, }, }, 'required': ['quota_class_set'], 'additionalProperties': False, } update_v250 = copy.deepcopy(update) del update_v250['properties']['quota_class_set']['properties']['fixed_ips'] del update_v250['properties']['quota_class_set']['properties']['floating_ips'] del update_v250['properties']['quota_class_set']['properties'][ 'security_groups'] del update_v250['properties']['quota_class_set']['properties'][ 'security_group_rules'] del update_v250['properties']['quota_class_set']['properties']['networks'] # 2.57 builds on 2.50 and removes injected_file* quotas. update_v257 = copy.deepcopy(update_v250) del update_v257['properties']['quota_class_set']['properties'][ 'injected_files'] del update_v257['properties']['quota_class_set']['properties'][ 'injected_file_content_bytes'] del update_v257['properties']['quota_class_set']['properties'][ 'injected_file_path_bytes'] # TODO(stephenfin): Remove additionalProperties in a future API version show_query = { 'type': 'object', 'properties': {}, 'additionalProperties': True, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/quota_sets.py0000664000175000017500000000664400000000000024137 0ustar00zuulzuul00000000000000# Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from nova.api.validation import parameter_types common_quota = { 'type': ['integer', 'string'], 'pattern': '^-?[0-9]+$', # -1 is a flag value for unlimited 'minimum': -1, # maximum's value is limited to db constant's MAX_INT # (in nova/db/constants.py) 'maximum': 0x7FFFFFFF } quota_resources = { 'instances': common_quota, 'cores': common_quota, 'ram': common_quota, 'floating_ips': common_quota, 'fixed_ips': common_quota, 'metadata_items': common_quota, 'key_pairs': common_quota, 'security_groups': common_quota, 'security_group_rules': common_quota, 'injected_files': common_quota, 'injected_file_content_bytes': common_quota, 'injected_file_path_bytes': common_quota, 'server_groups': common_quota, 'server_group_members': common_quota, # NOTE(stephenfin): This will always be rejected since it was nova-network # only, but we need to allow users to submit it at a minimum 'networks': common_quota } update_quota_set = copy.deepcopy(quota_resources) update_quota_set.update({'force': parameter_types.boolean}) update_quota_set_v236 = copy.deepcopy(update_quota_set) del update_quota_set_v236['fixed_ips'] del update_quota_set_v236['floating_ips'] del update_quota_set_v236['security_groups'] del update_quota_set_v236['security_group_rules'] del update_quota_set_v236['networks'] update = { 'type': 'object', 'properties': { 'quota_set': { 'type': 'object', 'properties': update_quota_set, 'additionalProperties': False, }, }, 'required': ['quota_set'], 'additionalProperties': False, } update_v236 = copy.deepcopy(update) update_v236['properties']['quota_set']['properties'] = update_quota_set_v236 # 2.57 builds on 2.36 and removes injected_file* quotas. update_quota_set_v257 = copy.deepcopy(update_quota_set_v236) del update_quota_set_v257['injected_files'] del update_quota_set_v257['injected_file_content_bytes'] del update_quota_set_v257['injected_file_path_bytes'] update_v257 = copy.deepcopy(update_v236) update_v257['properties']['quota_set']['properties'] = update_quota_set_v257 show_query = { 'type': 'object', 'properties': { 'user_id': parameter_types.multi_params({'type': 'string'}) }, # NOTE(gmann): This is kept True to keep backward compatibility. # As of now Schema validation stripped out the additional parameters and # does not raise 400. In microversion 2.75, we have blocked the additional # parameters. 'additionalProperties': True } show_query_v275 = copy.deepcopy(show_query) show_query_v275['additionalProperties'] = False # TODO(stephenfin): Remove additionalProperties in a future API version defaults_query = { 'type': 'object', 'properties': {}, 'additionalProperties': True, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/remote_consoles.py0000664000175000017500000001525400000000000025145 0ustar00zuulzuul00000000000000# Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy get_vnc_console = { 'type': 'object', 'properties': { 'os-getVNCConsole': { 'type': 'object', 'properties': { # NOTE(stephenfin): While we only support novnc nowadays, we # previously supported xvpvnc for the XenServer driver. Since # our generated schemas are unversioned, we need to accept # these old values here and reject them lower down the stack. # Ditto for other schemas in this file. 'type': { 'type': 'string', 'enum': ['novnc', 'xvpvnc'], }, }, 'required': ['type'], 'additionalProperties': False, }, }, 'required': ['os-getVNCConsole'], 'additionalProperties': False, } get_spice_console = { 'type': 'object', 'properties': { 'os-getSPICEConsole': { 'type': 'object', 'properties': { 'type': { 'type': 'string', 'enum': ['spice-html5'], }, }, 'required': ['type'], 'additionalProperties': False, }, }, 'required': ['os-getSPICEConsole'], 'additionalProperties': False, } # NOTE(stephenfin): This schema is intentionally empty since the action has # been removed get_rdp_console = {} get_serial_console = { 'type': 'object', 'properties': { 'os-getSerialConsole': { 'type': 'object', 'properties': { 'type': { 'type': 'string', 'enum': ['serial'], }, }, 'required': ['type'], 'additionalProperties': False, }, }, 'required': ['os-getSerialConsole'], 'additionalProperties': False, } create_v26 = { 'type': 'object', 'properties': { 'remote_console': { 'type': 'object', 'properties': { 'protocol': { 'type': 'string', # While we no longer support the rdp console type, we still # list it here for documentation purposes. It is rejected # at the controller level. 'enum': ['vnc', 'spice', 'serial', 'rdp'], }, 'type': { 'type': 'string', 'enum': [ 'novnc', 'xvpvnc', 'spice-html5', 'serial', 'rdp-html5' ], }, }, 'required': ['protocol', 'type'], 'additionalProperties': False, }, }, 'required': ['remote_console'], 'additionalProperties': False, } create_v28 = copy.deepcopy(create_v26) create_v28['properties']['remote_console']['properties']['protocol'][ 'enum' ].append('mks') create_v28['properties']['remote_console']['properties']['type'][ 'enum' ].append('webmks') create_v299 = copy.deepcopy(create_v28) create_v299['properties']['remote_console']['properties']['type'][ 'enum' ].append('spice-direct') get_vnc_console_response = { 'type': 'object', 'properties': { 'console': { 'type': 'object', 'properties': { 'type': {'type': 'string', 'enum': ['novnc', 'xvpvnc']}, 'url': {'type': 'string', 'format': 'uri'}, }, 'required': ['type', 'url'], 'additionalProperties': False, }, }, 'required': ['console'], 'additionalProperties': False, } get_spice_console_response = { 'type': 'object', 'properties': { 'console': { 'type': 'object', 'properties': { 'type': {'type': 'string', 'const': 'spice-html5'}, 'url': {'type': 'string', 'format': 'uri'}, }, 'required': ['type', 'url'], 'additionalProperties': False, }, }, 'additionalProperties': False, } get_rdp_console_response = { 'type': 'object', 'properties': { 'console': { 'type': 'object', 'properties': { 'type': {'type': 'string', 'const': 'rdp-html5'}, 'url': {'type': 'string', 'format': 'uri'}, }, 'required': ['type', 'url'], 'additionalProperties': False, }, }, 'required': ['console'], 'additionalProperties': False, } get_serial_console_response = { 'type': 'object', 'properties': { 'console': { 'type': 'object', 'properties': { 'type': {'type': 'string', 'const': 'serial'}, 'url': {'type': 'string', 'format': 'uri'}, }, 'required': ['type', 'url'], 'additionalProperties': False, }, }, 'required': ['console'], 'additionalProperties': False, } create_response = { 'type': 'object', 'properties': { 'remote_console': { 'type': 'object', 'properties': { 'protocol': { 'type': 'string', 'enum': ['vnc', 'spice', 'serial', 'rdp'], }, 'type': { 'type': 'string', 'enum': [ 'novnc', 'xvpvnc', 'spice-html5', 'serial', 'rdp-html5' ], }, 'url': {'type': 'string', 'format': 'uri'}, }, 'required': ['protocol', 'type', 'url'], 'additionalProperties': False, }, }, 'required': ['remote_console'], 'additionalProperties': False, } create_response_v28 = copy.deepcopy(create_response) create_response_v28['properties']['remote_console']['properties'][ 'protocol' ]['enum'].append('mks') create_response_v28['properties']['remote_console']['properties'][ 'type' ]['enum'].append('webmks') create_response_v299 = copy.deepcopy(create_response_v28) create_response_v299['properties']['remote_console']['properties'][ 'type' ]['enum'].append('spice-direct') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/rescue.py0000664000175000017500000000277100000000000023233 0ustar00zuulzuul00000000000000# Copyright 2014 NEC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.api.validation import parameter_types rescue = { 'type': 'object', 'properties': { 'rescue': { 'type': ['object', 'null'], 'properties': { 'adminPass': parameter_types.admin_password, 'rescue_image_ref': parameter_types.image_id, }, 'additionalProperties': False, }, }, 'required': ['rescue'], 'additionalProperties': False, } # TODO(stephenfin): Restrict the value to 'null' in a future API version unrescue = { 'type': 'object', 'properties': { 'unrescue': {}, }, 'required': ['unrescue'], 'additionalProperties': False, } rescue_response = { 'type': 'object', 'properties': { 'adminPass': { 'type': 'string', }, }, 'additionalProperties': False, } unrescue_response = { 'type': 'null', } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/security_group_default_rules.py0000664000175000017500000000140100000000000027733 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(stephenfin): These schemas are intentionally empty since these APIs have # been removed create = {} show_query = {} index_query = {} create_response = {} show_response = {} delete_response = {} index_response = {} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/security_groups.py0000664000175000017500000001060400000000000025205 0ustar00zuulzuul00000000000000# Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.api.validation import parameter_types create = { 'type': 'object', 'properties': { 'security_group': { 'type': 'object', 'properties': { 'name': { 'type': 'string', 'minLength': 0, 'maxLength': 255, }, 'description': { 'type': 'string', 'minLength': 0, 'maxLength': 255, }, }, 'required': ['name', 'description'], # NOTE(stephenfin): Per gmann's note below 'additionalProperties': True, }, }, 'required': ['security_group'], # NOTE(stephenfin): Per gmann's note below 'additionalProperties': True, } update = create create_rules = { 'type': 'object', 'properties': { 'security_group_rule': { 'type': 'object', 'properties': { 'group_id': {'type': ['string', 'null'], 'format': 'uuid'}, 'parent_group_id': {'type': 'string', 'format': 'uuid'}, # NOTE(stephenfin): We never validated these and we're not # going to add that validation now. 'to_port': {}, 'from_port': {}, 'ip_protocol': {}, 'cidr': {}, }, 'required': ['parent_group_id'], # NOTE(stephenfin): Per gmann's note below 'additionalProperties': True, }, }, 'required': ['security_group_rule'], # NOTE(stephenfin): Per gmann's note below 'additionalProperties': True, } # TODO(stephenfin): Remove additionalProperties in a future API version show_query = { 'type': 'object', 'properties': {}, 'additionalProperties': True, } index_query = { 'type': 'object', 'properties': { 'limit': parameter_types.multi_params( parameter_types.non_negative_integer), 'offset': parameter_types.multi_params( parameter_types.non_negative_integer), 'all_tenants': parameter_types.multi_params({'type': 'string'}) }, # NOTE(gmann): This is kept True to keep backward compatibility. # As of now Schema validation stripped out the additional parameters and # does not raise 400. This API is deprecated in microversion 2.36 so we # do not to update the additionalProperties to False. 'additionalProperties': True } # TODO(stephenfin): Remove additionalProperties in a future API version server_sg_index_query = { 'type': 'object', 'properties': {}, 'additionalProperties': True, } # TODO(stephenfin): Remove additionalProperties in a future API version add_security_group = { 'type': 'object', 'properties': { 'addSecurityGroup': { 'type': 'object', 'properties': { 'name': { 'type': 'string', 'minLength': 1, }, }, 'required': ['name'], 'additionalProperties': False } }, 'required': ['addSecurityGroup'], 'additionalProperties': True, } # TODO(stephenfin): Remove additionalProperties in a future API version remove_security_group = { 'type': 'object', 'properties': { 'removeSecurityGroup': { 'type': 'object', 'properties': { 'name': { 'type': 'string', 'minLength': 1, }, }, 'required': ['name'], 'additionalProperties': False } }, 'required': ['removeSecurityGroup'], 'additionalProperties': True, } add_security_group_response = { 'type': 'null', } remove_security_group_response = { 'type': 'null', } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/server_diagnostics.py0000664000175000017500000001224000000000000025632 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # TODO(stephenfin): Remove additionalProperties in a future API version index_query = { 'type': 'object', 'properties': {}, 'additionalProperties': True, } # NOTE(stephenfin): We could define all available response types for the # various virt drivers, but we'd need to be able to do this (accurately) for # every virt driver including those we once supported but no longer do. Seems # like a lot of work with very little in return. index_response = { 'type': 'object', 'properties': {}, 'required': [], 'additionalProperties': True, } index_response_v248 = { 'type': 'object', 'properties': { 'config_drive': {'type': 'boolean'}, 'cpu_details': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'id': {'type': ['integer', 'null']}, 'time': {'type': ['integer', 'null']}, 'utilisation': {'type': ['integer', 'null']}, }, 'required': ['id', 'time', 'utilisation'], 'additionalProperties': False, }, }, 'disk_details': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'errors_count': {'type': ['integer', 'null']}, 'read_bytes': {'type': ['integer', 'null']}, 'read_requests': {'type': ['integer', 'null']}, 'write_bytes': {'type': ['integer', 'null']}, 'write_requests': {'type': ['integer', 'null']}, }, 'required': [ 'errors_count', 'read_bytes', 'read_requests', 'write_bytes', 'write_requests', ], 'additionalProperties': False, }, }, 'driver': { 'type': 'string', 'enum': [ 'ironic', 'libvirt', 'vmwareapi', 'xenapi', ], }, 'hypervisor': {'type': ['string', 'null']}, 'hypervisor_os': {'type': ['string', 'null']}, 'memory_details': { 'type': 'object', 'properties': { 'maximum': {'type': ['integer', 'null']}, 'used': {'type': ['integer', 'null']}, }, 'required': ['maximum', 'used'], 'additionalProperties': False, }, 'nic_details': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'mac_address': {'type': ['string', 'null']}, 'rx_drop': {'type': ['integer', 'null']}, 'rx_errors': {'type': ['integer', 'null']}, 'rx_octets': {'type': ['integer', 'null']}, 'rx_packets': {'type': ['integer', 'null']}, 'rx_rate': {'type': ['integer', 'null']}, 'tx_drop': {'type': ['integer', 'null']}, 'tx_errors': {'type': ['integer', 'null']}, 'tx_octets': {'type': ['integer', 'null']}, 'tx_packets': {'type': ['integer', 'null']}, 'tx_rate': {'type': ['integer', 'null']}, }, 'required': [ 'mac_address', 'rx_drop', 'rx_errors', 'rx_octets', 'rx_packets', 'rx_rate', 'tx_drop', 'tx_errors', 'tx_octets', 'tx_packets', 'tx_rate', ], 'additionalProperties': False, }, }, 'num_cpus': {'type': ['integer', 'null']}, 'num_disks': {'type': ['integer', 'null']}, 'num_nics': {'type': ['integer', 'null']}, 'state': { 'type': 'string', 'enum': [ 'pending', 'running', 'paused', 'shutdown', 'crashed', 'suspended', ], }, 'uptime': {'type': ['integer', 'null']}, }, 'required': [ 'config_drive', 'cpu_details', 'disk_details', 'driver', 'hypervisor', 'hypervisor_os', 'memory_details', 'nic_details', 'num_cpus', 'num_disks', 'num_nics', 'state', 'uptime', ], 'additionalProperties': False, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/server_external_events.py0000664000175000017500000001137600000000000026542 0ustar00zuulzuul00000000000000# Copyright 2014 IBM Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from nova.objects import external_event as external_event_obj create = { 'type': 'object', 'properties': { 'events': { 'type': 'array', 'minItems': 1, 'items': { 'type': 'object', 'properties': { 'name': { 'type': 'string', 'enum': [ 'network-changed', 'network-vif-plugged', 'network-vif-unplugged', 'network-vif-deleted', ], }, 'server_uuid': { 'type': 'string', 'format': 'uuid' }, 'status': { 'type': 'string', 'enum': external_event_obj.EVENT_STATUSES, }, 'tag': { 'type': 'string', 'maxLength': 255, }, }, 'required': ['name', 'server_uuid'], 'additionalProperties': False, }, }, }, 'required': ['events'], 'additionalProperties': False, } create_v251 = copy.deepcopy(create) name = create_v251['properties']['events']['items']['properties']['name'] name['enum'].append('volume-extended') create_v276 = copy.deepcopy(create_v251) name = create_v276['properties']['events']['items']['properties']['name'] name['enum'].append('power-update') create_v282 = copy.deepcopy(create_v276) name = create_v282['properties']['events']['items']['properties']['name'] name['enum'].append('accelerator-request-bound') create_v293 = copy.deepcopy(create_v282) name = create_v293['properties']['events']['items']['properties']['name'] name['enum'].append('volume-reimaged') create_response = { 'type': 'object', 'properties': { 'events': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'code': { 'type': 'integer', 'enum': [200, 400, 404, 422], }, 'name': { 'type': 'string', 'enum': [ 'network-changed', 'network-vif-plugged', 'network-vif-unplugged', 'network-vif-deleted', ], }, 'server_uuid': {'type': 'string', 'format': 'uuid'}, 'status': { 'type': 'string', 'enum': external_event_obj.EVENT_STATUSES, }, 'tag': { 'type': 'string', 'maxLength': 255, }, }, 'required': [ 'code', 'name', 'server_uuid', 'status', # tag is not required in responses, although omitting it # from requests will result in a failed event response ], 'additionalProperties': False, }, }, }, 'required': ['events'], 'additionalProperties': False, } create_response_v251 = copy.deepcopy(create_response) name = create_response_v251['properties']['events']['items']['properties'][ 'name' ] name['enum'].append('volume-extended') create_response_v276 = copy.deepcopy(create_response_v251) name = create_response_v276['properties']['events']['items']['properties'][ 'name' ] name['enum'].append('power-update') create_response_v282 = copy.deepcopy(create_response_v276) name = create_response_v282['properties']['events']['items']['properties'][ 'name' ] name['enum'].append('accelerator-request-bound') create_response_v293 = copy.deepcopy(create_response_v282) name = create_response_v293['properties']['events']['items']['properties'][ 'name' ] name['enum'].append('volume-reimaged') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/server_groups.py0000664000175000017500000001632500000000000024652 0ustar00zuulzuul00000000000000# Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from nova.api.validation import parameter_types # NOTE(russellb) There is one other policy, 'legacy', but we don't allow that # being set via the API. It's only used when a group gets automatically # created to support the legacy behavior of the 'group' scheduler hint. create = { 'type': 'object', 'properties': { 'server_group': { 'type': 'object', 'properties': { 'name': parameter_types.name, 'policies': { # This allows only a single item and it must be one of the # enumerated values. It's changed to a single string value # in 2.64. 'type': 'array', 'prefixItems': [ { 'type': 'string', 'enum': ['anti-affinity', 'affinity'], }, ], 'uniqueItems': True, 'minItems': 1, 'maxItems': 1, 'additionalItems': False, } }, 'required': ['name', 'policies'], 'additionalProperties': False, } }, 'required': ['server_group'], 'additionalProperties': False, } create_v215 = copy.deepcopy(create) policies = create_v215['properties']['server_group']['properties']['policies'] policies['prefixItems'][0]['enum'].extend( ['soft-anti-affinity', 'soft-affinity'] ) create_v264 = copy.deepcopy(create_v215) del create_v264['properties']['server_group']['properties']['policies'] create_v264['properties']['server_group']['required'].remove('policies') create_v264['properties']['server_group']['required'].append('policy') create_v264['properties']['server_group']['properties']['policy'] = { 'type': 'string', 'enum': ['anti-affinity', 'affinity', 'soft-anti-affinity', 'soft-affinity'], } create_v264['properties']['server_group']['properties']['rules'] = { 'type': 'object', 'properties': { 'max_server_per_host': parameter_types.positive_integer, }, 'additionalProperties': False, } index_query = { 'type': 'object', 'properties': { 'all_projects': parameter_types.multi_params({'type': 'string'}), 'limit': parameter_types.multi_params( parameter_types.non_negative_integer), 'offset': parameter_types.multi_params( parameter_types.non_negative_integer), }, # For backward compatible changes. In microversion 2.75, we have # blocked the additional parameters. 'additionalProperties': True } index_query_v275 = copy.deepcopy(index_query) index_query_v275['additionalProperties'] = False show_query = { 'type': 'object', 'properties': {}, 'additionalProperties': True, } _server_group_response = { 'type': 'object', 'properties': { 'id': {'type': 'string', 'format': 'uuid'}, 'members': { 'type': 'array', 'items': { 'type': 'string', 'format': 'uuid', }, }, # Why yes, this is a **totally empty object**. It's removed later 'metadata': { 'type': 'object', 'properties': {}, 'required': [], 'additionalProperties': False, }, 'name': {'type': 'string'}, 'policies': { 'type': 'array', 'prefixItems': [ { 'type': 'string', 'enum': ['affinity', 'anti-affinity',], }, ], 'minItems': 0, 'maxItems': 1, }, }, 'required': ['id', 'members', 'metadata', 'name', 'policies'], 'additionalProperties': False, } _server_group_response_v213 = copy.deepcopy(_server_group_response) _server_group_response_v213['properties'].update({ 'project_id': parameter_types.project_id, 'user_id': parameter_types.user_id, }) _server_group_response_v213['required'].extend(['project_id', 'user_id']) _server_group_response_v215 = copy.deepcopy(_server_group_response_v213) _server_group_response_v215['properties']['policies']['prefixItems'][0][ 'enum' ].extend(['soft-affinity', 'soft-anti-affinity']) _server_group_response_v264 = copy.deepcopy(_server_group_response_v215) del _server_group_response_v264['properties']['metadata'] del _server_group_response_v264['properties']['policies'] _server_group_response_v264['properties'].update({ 'policy': { 'type': 'string', 'enum': [ 'affinity', 'anti-affinity', 'soft-affinity', 'soft-anti-affinity', ], }, 'rules': { 'type': 'object', 'properties': { 'max_server_per_host': {'type': 'integer'}, }, 'required': [], 'additionalProperties': False, }, }) _server_group_response_v264['required'].remove('metadata') _server_group_response_v264['required'].remove('policies') _server_group_response_v264['required'].extend(['policy', 'rules']) show_response = { 'type': 'object', 'properties': { 'server_group': copy.deepcopy(_server_group_response), }, 'required': ['server_group'], 'additionalProperties': False, } show_response_v213 = copy.deepcopy(show_response) show_response_v213['properties']['server_group'] = _server_group_response_v213 show_response_v215 = copy.deepcopy(show_response_v213) show_response_v215['properties']['server_group'] = _server_group_response_v215 show_response_v264 = copy.deepcopy(show_response_v215) show_response_v264['properties']['server_group'] = _server_group_response_v264 delete_response = {'type': 'null'} index_response = { 'type': 'object', 'properties': { 'server_groups': { 'type': 'array', 'items': copy.deepcopy(_server_group_response), }, }, 'required': ['server_groups'], 'additionalProperties': False, } index_response_v213 = copy.deepcopy(index_response) index_response_v213['properties']['server_groups'][ 'items' ] = _server_group_response_v213 index_response_v215 = copy.deepcopy(index_response_v213) index_response_v215['properties']['server_groups'][ 'items' ] = _server_group_response_v215 index_response_v264 = copy.deepcopy(index_response_v215) index_response_v264['properties']['server_groups'][ 'items' ] = _server_group_response_v264 create_response = copy.deepcopy(show_response) create_response_v213 = copy.deepcopy(show_response_v213) create_response_v215 = copy.deepcopy(show_response_v215) create_response_v264 = copy.deepcopy(show_response_v264) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/server_metadata.py0000664000175000017500000000320600000000000025105 0ustar00zuulzuul00000000000000# Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from nova.api.validation import parameter_types create = { 'type': 'object', 'properties': { 'metadata': parameter_types.metadata }, 'required': ['metadata'], 'additionalProperties': False, } metadata_update = copy.deepcopy(parameter_types.metadata) metadata_update.update({ 'minProperties': 1, 'maxProperties': 1 }) update = { 'type': 'object', 'properties': { 'meta': metadata_update }, 'required': ['meta'], 'additionalProperties': False, } update_all = { 'type': 'object', 'properties': { 'metadata': parameter_types.metadata }, 'required': ['metadata'], 'additionalProperties': False, } # TODO(stephenfin): Remove additionalProperties in a future API version index_query = { 'type': 'object', 'properties': {}, 'additionalProperties': True, } # TODO(stephenfin): Remove additionalProperties in a future API version show_query = { 'type': 'object', 'properties': {}, 'additionalProperties': True, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/server_migrations.py0000664000175000017500000000231600000000000025502 0ustar00zuulzuul00000000000000# Copyright 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. force_complete = { 'type': 'object', 'properties': { 'force_complete': { 'type': 'null' } }, 'required': ['force_complete'], 'additionalProperties': False, } # TODO(stephenfin): Remove additionalProperties in a future API version index_query = { 'type': 'object', 'properties': {}, 'additionalProperties': True, } # TODO(stephenfin): Remove additionalProperties in a future API version show_query = { 'type': 'object', 'properties': {}, 'additionalProperties': True, } force_complete_response = { 'type': 'null', } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/server_password.py0000664000175000017500000000163400000000000025172 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # TODO(stephenfin): Remove additionalProperties in a future API version index_query = { 'type': 'object', 'properties': {}, 'additionalProperties': True, } index_response = { 'type': 'object', 'properties': { 'password': {'type': 'string'}, }, 'required': ['password'], 'additionalProperties': False, } clear_response = {'type': 'null'} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/server_shares.py0000664000175000017500000000445700000000000024623 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.api.validation import parameter_types create = { 'title': 'Server shares', 'type': 'object', 'properties': { 'share': { 'type': 'object', 'properties': { 'share_id': parameter_types.share_id, 'tag': parameter_types.share_tag, }, 'required': ['share_id'], 'additionalProperties': False } }, 'required': ['share'], 'additionalProperties': False } index_query = { 'type': 'object', 'properties': {}, 'additionalProperties': False } show_query = { 'type': 'object', 'properties': {}, 'additionalProperties': False } # "share": { # "uuid": "68ba1762-fd6d-4221-8311-f3193dd93404", # "export_location": "10.0.0.50:/mnt/foo", # "share_id": "e8debdc0-447a-4376-a10a-4cd9122d7986", # "status": "inactive", # "tag": "e8debdc0-447a-4376-a10a-4cd9122d7986" # } _share_response = { 'type': 'object', 'properties': { 'export_location': parameter_types.share_export_location, 'share_id': parameter_types.share_id, 'status': parameter_types.share_status, 'tag': parameter_types.share_tag, 'uuid': parameter_types.share_id, }, 'required': ['share_id', 'status', 'tag'], 'additionalProperties': False } show_response = { 'title': 'Server share', 'type': 'object', 'properties': { 'share': _share_response }, 'required': ['share'], 'additionalProperties': False } index_response = { 'title': 'Server shares', 'type': 'object', 'properties': { 'shares': { 'type': 'array', 'items': _share_response }, }, 'required': ['shares'], 'additionalProperties': False } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/server_tags.py0000664000175000017500000000251200000000000024262 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.api.validation import parameter_types from nova.objects import instance update_all = { "title": "Server tags", "type": "object", "properties": { "tags": { "type": "array", "items": parameter_types.tag, "maxItems": instance.MAX_TAG_COUNT } }, 'required': ['tags'], 'additionalProperties': False } update = { "title": "Server tag", "type": "null", } # TODO(stephenfin): Remove additionalProperties in a future API version index_query = { 'type': 'object', 'properties': {}, 'additionalProperties': True, } # TODO(stephenfin): Remove additionalProperties in a future API version show_query = { 'type': 'object', 'properties': {}, 'additionalProperties': True, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/server_topology.py0000664000175000017500000000375700000000000025214 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # TODO(stephenfin): Remove additionalProperties in a future API version query_params_v21 = { 'type': 'object', 'properties': {}, 'additionalProperties': True, } index_response = { 'type': 'object', 'properties': { 'nodes': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'cpu_pinning': { 'type': 'object', 'patternProperties': { '^[0-9]+$': {'type': 'integer'}, }, 'additionalProperties': False, }, 'host_node': {'type': 'integer'}, 'memory_mb': {'type': 'integer'}, 'siblings': { 'type': 'array', 'items': { 'type': 'array', 'items': {'type': 'integer'}, }, }, 'vcpu_set': { 'type': 'array', 'items': {'type': 'integer'}, }, }, 'required': ['memory_mb', 'siblings', 'vcpu_set'], 'additionalProperties': False, }, }, 'pagesize_kb': {'type': ['integer', 'null']}, }, 'required': ['nodes', 'pagesize_kb'], 'additionalProperties': False, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/servers.py0000664000175000017500000012634500000000000023442 0ustar00zuulzuul00000000000000# Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from nova.api.validation import parameter_types from nova.api.validation.parameter_types import multi_params from nova.objects import instance _legacy_block_device_mapping = { 'type': 'object', 'properties': { 'virtual_name': { 'type': 'string', 'maxLength': 255, }, 'volume_id': parameter_types.volume_id, 'snapshot_id': parameter_types.image_id, 'volume_size': parameter_types.volume_size, # Do not allow empty device names or spaces in name (defined in # nova/block_device.py:validate_device_name()) 'device_name': { 'type': 'string', 'minLength': 1, 'maxLength': 255, 'pattern': '^[a-zA-Z0-9._/-]*$', }, # Defined as boolean in nova/block_device.py:from_api() 'delete_on_termination': parameter_types.boolean, 'no_device': {}, # Defined as mediumtext in column "connection_info" in table # "block_device_mapping" 'connection_info': { 'type': 'string', 'maxLength': 16777215 }, }, 'additionalProperties': False } _block_device_mapping_v2_new_item = { # defined in nova/block_device.py:from_api() # NOTE: Client can specify the Id with the combination of # source_type and uuid, or a single attribute like volume_id/ # image_id/snapshot_id. 'source_type': { 'type': 'string', 'enum': ['volume', 'image', 'snapshot', 'blank'], }, 'uuid': { 'type': 'string', 'minLength': 1, 'maxLength': 255, 'pattern': '^[a-zA-Z0-9._-]*$', }, 'image_id': parameter_types.image_id, 'destination_type': { 'type': 'string', 'enum': ['local', 'volume'], }, # Defined as varchar(255) in column "guest_format" in table # "block_device_mapping" 'guest_format': { 'type': 'string', 'maxLength': 255, }, # Defined as varchar(255) in column "device_type" in table # "block_device_mapping" 'device_type': { 'type': 'string', 'maxLength': 255, }, # Defined as varchar(255) in column "disk_bus" in table # "block_device_mapping" 'disk_bus': { 'type': 'string', 'maxLength': 255, }, # Defined as integer in nova/block_device.py:from_api() # NOTE(mriedem): boot_index=None is also accepted for backward # compatibility with the legacy v2 API. 'boot_index': { 'type': ['integer', 'string', 'null'], 'pattern': '^-?[0-9]+$', }, } _block_device_mapping_v2 = copy.deepcopy(_legacy_block_device_mapping) _block_device_mapping_v2['properties'].update( _block_device_mapping_v2_new_item ) _hints = { 'type': 'object', 'properties': { 'group': { 'type': 'string', 'format': 'uuid' }, 'different_host': { # NOTE: The value of 'different_host' is the set of server # uuids where a new server is scheduled on a different host. # A user can specify one server as string parameter and should # specify multiple servers as array parameter instead. 'oneOf': [ { 'type': 'string', 'format': 'uuid' }, { 'type': 'array', 'items': parameter_types.server_id } ] }, 'same_host': { # NOTE: The value of 'same_host' is the set of server # uuids where a new server is scheduled on the same host. 'type': ['string', 'array'], 'items': parameter_types.server_id }, 'query': { # NOTE: The value of 'query' is converted to dict data with # jsonutils.loads() and used for filtering hosts. 'type': ['string', 'object'], }, # NOTE: The value of 'target_cell' is the cell name what cell # a new server is scheduled on. 'target_cell': parameter_types.name, 'different_cell': { 'type': ['string', 'array'], 'items': { 'type': 'string' } }, 'build_near_host_ip': parameter_types.ip_address, 'cidr': { 'type': 'string', 'pattern': '^/[0-9a-f.:]+$' }, }, # NOTE: As this Mail: # http://lists.openstack.org/pipermail/openstack-dev/2015-June/067996.html # pointed out the limit the scheduler-hints in the API is problematic. So # relax it. 'additionalProperties': True } create = { 'type': 'object', 'properties': { 'server': { 'type': 'object', 'properties': { 'name': parameter_types.name, # NOTE(gmann): In case of boot from volume, imageRef was # allowed as the empty string also So keeping the same # behavior and allow empty string in case of boot from # volume only. Python code make sure empty string is # not allowed for other cases. 'imageRef': parameter_types.image_id_or_empty_string, 'flavorRef': parameter_types.flavor_ref, 'adminPass': parameter_types.admin_password, 'metadata': parameter_types.metadata, 'networks': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'fixed_ip': parameter_types.ip_address, 'port': { 'type': ['string', 'null'], 'format': 'uuid', }, 'uuid': {'type': 'string'}, }, 'additionalProperties': False, } }, 'OS-DCF:diskConfig': parameter_types.disk_config, 'accessIPv4': parameter_types.accessIPv4, 'accessIPv6': parameter_types.accessIPv6, 'personality': parameter_types.personality, 'availability_zone': parameter_types.name, 'block_device_mapping': { 'type': 'array', 'items': _legacy_block_device_mapping }, 'block_device_mapping_v2': { 'type': 'array', 'items': _block_device_mapping_v2 }, 'config_drive': parameter_types.boolean, 'key_name': parameter_types.name, 'min_count': parameter_types.positive_integer, 'max_count': parameter_types.positive_integer, 'return_reservation_id': parameter_types.boolean, 'security_groups': { 'type': 'array', 'items': { 'type': 'object', 'properties': { # NOTE(oomichi): allocate_for_instance() of # network/neutron.py gets security_group names # or UUIDs from this parameter. # parameter_types.name allows both format. 'name': parameter_types.name, }, 'additionalProperties': False, } }, 'user_data': { 'type': 'string', 'format': 'base64', 'maxLength': 65535 } }, 'required': ['name', 'flavorRef'], 'additionalProperties': False, }, 'os:scheduler_hints': _hints, 'OS-SCH-HNT:scheduler_hints': _hints, }, 'required': ['server'], 'additionalProperties': False, } create_v20 = copy.deepcopy(create) create_v20['properties']['server'][ 'properties']['name'] = parameter_types.name_with_leading_trailing_spaces create_v20['properties']['server']['properties'][ 'availability_zone'] = parameter_types.name_with_leading_trailing_spaces create_v20['properties']['server']['properties'][ 'key_name'] = parameter_types.name_with_leading_trailing_spaces create_v20['properties']['server']['properties'][ 'security_groups']['items']['properties']['name'] = ( parameter_types.name_with_leading_trailing_spaces) create_v20['properties']['server']['properties']['user_data'] = { 'type': ['string', 'null'], 'format': 'base64', 'maxLength': 65535, } create_v219 = copy.deepcopy(create) create_v219['properties']['server'][ 'properties']['description'] = parameter_types.description create_v232 = copy.deepcopy(create_v219) create_v232['properties']['server'][ 'properties']['networks']['items'][ 'properties']['tag'] = parameter_types.tag create_v232['properties']['server'][ 'properties']['block_device_mapping_v2']['items'][ 'properties']['tag'] = parameter_types.tag # NOTE(artom) the following conditional was merged as # "if version == '2.32'" The intent all along was to check whether # version was greater than or equal to 2.32. In other words, we wanted # to support tags in versions 2.32 and up, but ended up supporting them # in version 2.32 only. Since we need a new microversion to add request # body attributes, tags have been re-added in version 2.42. # NOTE(gmann) Below schema 'create_v233' is added (builds on 2.19 schema) # to keep the above mentioned behavior while merging the extension schema code # into server schema file. Below is the ref code where BDM tag was originally # got added for 2.32 microversion *only*. # Ref- https://opendev.org/openstack/nova/src/commit/ # 9882a60e69a5ab8da314a199a56defc05098b743/nova/api/ # openstack/compute/block_device_mapping.py#L71 create_v233 = copy.deepcopy(create_v219) create_v233['properties']['server'][ 'properties']['networks']['items'][ 'properties']['tag'] = parameter_types.tag # 2.37 builds on 2.32 and makes the following changes: # 1. server.networks is required # 2. server.networks is now either an enum or a list # 3. server.networks.uuid is now required to be a uuid create_v237 = copy.deepcopy(create_v233) create_v237['properties']['server']['required'].append('networks') create_v237['properties']['server']['properties']['networks'] = { 'oneOf': [ { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'fixed_ip': parameter_types.ip_address, 'port': {'type': ['string', 'null'], 'format': 'uuid'}, 'uuid': {'type': 'string', 'format': 'uuid'}, }, 'additionalProperties': False, }, }, {'type': 'string', 'enum': ['none', 'auto']}, ], } # 2.42 builds on 2.37 and re-introduces the tag field to the list of network # objects. create_v242 = copy.deepcopy(create_v237) create_v242['properties']['server']['properties']['networks'] = { 'oneOf': [ { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'fixed_ip': parameter_types.ip_address, 'port': {'type': ['string', 'null'], 'format': 'uuid'}, 'uuid': {'type': 'string', 'format': 'uuid'}, 'tag': parameter_types.tag, }, 'additionalProperties': False, }, }, {'type': 'string', 'enum': ['none', 'auto']}, ], } create_v242['properties']['server'][ 'properties']['block_device_mapping_v2']['items'][ 'properties']['tag'] = parameter_types.tag # 2.52 builds on 2.42 and makes the following changes: # Allowing adding tags to instances when booting create_v252 = copy.deepcopy(create_v242) create_v252['properties']['server']['properties']['tags'] = { "type": "array", "items": parameter_types.tag, "maxItems": instance.MAX_TAG_COUNT } # 2.57 builds on 2.52 and removes the personality parameter. create_v257 = copy.deepcopy(create_v252) create_v257['properties']['server']['properties'].pop('personality') # 2.63 builds on 2.57 and makes the following changes: # Allowing adding trusted certificates to instances when booting create_v263 = copy.deepcopy(create_v257) create_v263['properties']['server']['properties'][ 'trusted_image_certificates'] = parameter_types.trusted_certs # Add volume type in block_device_mapping_v2. create_v267 = copy.deepcopy(create_v263) create_v267['properties']['server']['properties'][ 'block_device_mapping_v2']['items'][ 'properties']['volume_type'] = parameter_types.volume_type # Add host and hypervisor_hostname in server create_v274 = copy.deepcopy(create_v267) create_v274['properties']['server'][ 'properties']['host'] = parameter_types.fqdn create_v274['properties']['server'][ 'properties']['hypervisor_hostname'] = parameter_types.fqdn # Add hostname in server create_v290 = copy.deepcopy(create_v274) create_v290['properties']['server'][ 'properties']['hostname'] = parameter_types.hostname # Support FQDN as hostname create_v294 = copy.deepcopy(create_v290) create_v294['properties']['server'][ 'properties']['hostname'] = parameter_types.fqdn update = { 'type': 'object', 'properties': { 'server': { 'type': 'object', 'properties': { 'name': parameter_types.name, 'OS-DCF:diskConfig': parameter_types.disk_config, 'accessIPv4': parameter_types.accessIPv4, 'accessIPv6': parameter_types.accessIPv6, }, 'additionalProperties': False, }, }, 'required': ['server'], 'additionalProperties': False, } update_v20 = copy.deepcopy(update) update_v20['properties']['server'][ 'properties']['name'] = parameter_types.name_with_leading_trailing_spaces update_v219 = copy.deepcopy(update) update_v219['properties']['server'][ 'properties']['description'] = parameter_types.description update_v290 = copy.deepcopy(update_v219) update_v290['properties']['server'][ 'properties']['hostname'] = parameter_types.hostname update_v294 = copy.deepcopy(update_v290) update_v294['properties']['server'][ 'properties']['hostname'] = parameter_types.fqdn rebuild = { 'type': 'object', 'properties': { 'rebuild': { 'type': 'object', 'properties': { 'name': parameter_types.name, 'imageRef': parameter_types.image_id, 'adminPass': parameter_types.admin_password, 'metadata': parameter_types.metadata, 'preserve_ephemeral': parameter_types.boolean, 'OS-DCF:diskConfig': parameter_types.disk_config, 'accessIPv4': parameter_types.accessIPv4, 'accessIPv6': parameter_types.accessIPv6, 'personality': parameter_types.personality, }, 'required': ['imageRef'], 'additionalProperties': False, }, }, 'required': ['rebuild'], 'additionalProperties': False, } rebuild_v20 = copy.deepcopy(rebuild) rebuild_v20['properties']['rebuild'][ 'properties']['name'] = parameter_types.name_with_leading_trailing_spaces rebuild_v219 = copy.deepcopy(rebuild) rebuild_v219['properties']['rebuild'][ 'properties']['description'] = parameter_types.description rebuild_v254 = copy.deepcopy(rebuild_v219) rebuild_v254['properties']['rebuild'][ 'properties']['key_name'] = parameter_types.name_or_none # 2.57 builds on 2.54 and makes the following changes: # 1. Remove the personality parameter. # 2. Add the user_data parameter which is nullable so user_data can be reset. rebuild_v257 = copy.deepcopy(rebuild_v254) rebuild_v257['properties']['rebuild']['properties'].pop('personality') rebuild_v257['properties']['rebuild']['properties']['user_data'] = ({ 'type': ['string', 'null'], 'format': 'base64', 'maxLength': 65535, }) # 2.63 builds on 2.57 and makes the following changes: # Allowing adding trusted certificates to instances when rebuilding rebuild_v263 = copy.deepcopy(rebuild_v257) rebuild_v263['properties']['rebuild']['properties'][ 'trusted_image_certificates'] = parameter_types.trusted_certs rebuild_v290 = copy.deepcopy(rebuild_v263) rebuild_v290['properties']['rebuild']['properties'][ 'hostname'] = parameter_types.hostname rebuild_v294 = copy.deepcopy(rebuild_v290) rebuild_v294['properties']['rebuild']['properties'][ 'hostname'] = parameter_types.fqdn resize = { 'type': 'object', 'properties': { 'resize': { 'type': 'object', 'properties': { 'flavorRef': parameter_types.flavor_ref, 'OS-DCF:diskConfig': parameter_types.disk_config, }, 'required': ['flavorRef'], 'additionalProperties': False, }, }, 'required': ['resize'], 'additionalProperties': False, } create_image = { 'type': 'object', 'properties': { 'createImage': { 'type': 'object', 'properties': { 'name': parameter_types.name, 'metadata': parameter_types.metadata }, 'required': ['name'], 'additionalProperties': False } }, 'required': ['createImage'], 'additionalProperties': False } create_image_v20 = copy.deepcopy(create_image) create_image_v20['properties']['createImage'][ 'properties']['name'] = parameter_types.name_with_leading_trailing_spaces # TODO(stephenfin): Restrict the value to 'null' in a future API version confirm_resize = { 'type': 'object', 'properties': { 'confirmResize': {} }, 'required': ['confirmResize'], 'additionalProperties': False } # TODO(stephenfin): Restrict the value to 'null' in a future API version revert_resize = { 'type': 'object', 'properties': { 'revertResize': {}, }, 'required': ['revertResize'], 'additionalProperties': False, } reboot = { 'type': 'object', 'properties': { 'reboot': { 'type': 'object', 'properties': { 'type': { 'type': 'string', 'enum': ['HARD', 'Hard', 'hard', 'SOFT', 'Soft', 'soft'] } }, 'required': ['type'], 'additionalProperties': False } }, 'required': ['reboot'], 'additionalProperties': False } # TODO(stephenfin): Restrict the value to 'null' in a future API version start_server = { 'type': 'object', 'properties': { 'os-start': {}, }, 'required': ['os-start'], 'additionalProperties': False, } # TODO(stephenfin): Restrict the value to 'null' in a future API version stop_server = { 'type': 'object', 'properties': { 'os-stop': {}, }, 'required': ['os-stop'], 'additionalProperties': False, } trigger_crash_dump = { 'type': 'object', 'properties': { 'trigger_crash_dump': { 'type': 'null' } }, 'required': ['trigger_crash_dump'], 'additionalProperties': False } JOINED_TABLE_QUERY_PARAMS_SERVERS = { 'block_device_mapping': parameter_types.common_query_param, 'services': parameter_types.common_query_param, 'metadata': parameter_types.common_query_param, 'system_metadata': parameter_types.common_query_param, 'info_cache': parameter_types.common_query_param, 'security_groups': parameter_types.common_query_param, 'pci_devices': parameter_types.common_query_param } # These fields are valid values for sort_keys before we start # using schema validation, but are considered to be bad values # and disabled to use. In order to avoid backward incompatibility, # they are ignored instead of return HTTP 400. SERVER_LIST_IGNORE_SORT_KEY = [ 'architecture', 'cell_name', 'cleaned', 'default_ephemeral_device', 'default_swap_device', 'deleted', 'deleted_at', 'disable_terminate', 'ephemeral_gb', 'ephemeral_key_uuid', 'id', 'key_data', 'launched_on', 'locked', 'memory_mb', 'os_type', 'reservation_id', 'root_gb', 'shutdown_terminate', 'user_data', 'vcpus', 'vm_mode' ] # From microversion 2.73 we start offering locked as a valid sort key. SERVER_LIST_IGNORE_SORT_KEY_V273 = list(SERVER_LIST_IGNORE_SORT_KEY) SERVER_LIST_IGNORE_SORT_KEY_V273.remove('locked') VALID_SORT_KEYS = { "type": "string", "enum": ['access_ip_v4', 'access_ip_v6', 'auto_disk_config', 'availability_zone', 'config_drive', 'created_at', 'display_description', 'display_name', 'host', 'hostname', 'image_ref', 'instance_type_id', 'kernel_id', 'key_name', 'launch_index', 'launched_at', 'locked_by', 'node', 'power_state', 'progress', 'project_id', 'ramdisk_id', 'root_device_name', 'task_state', 'terminated_at', 'updated_at', 'user_id', 'uuid', 'vm_state'] + SERVER_LIST_IGNORE_SORT_KEY } # We reuse the existing list and add locked to the list of valid sort keys. VALID_SORT_KEYS_V273 = { "type": "string", "enum": ['locked'] + list( set(VALID_SORT_KEYS["enum"]) - set(SERVER_LIST_IGNORE_SORT_KEY)) + SERVER_LIST_IGNORE_SORT_KEY_V273 } query_params_v21 = { 'type': 'object', 'properties': { 'user_id': parameter_types.common_query_param, 'project_id': parameter_types.common_query_param, # The alias of project_id. It should be removed in the # future with microversion bump. 'tenant_id': parameter_types.common_query_param, 'launch_index': parameter_types.common_query_param, # The alias of image. It should be removed in the # future with microversion bump. 'image_ref': parameter_types.common_query_param, 'image': parameter_types.common_query_param, 'kernel_id': parameter_types.common_query_regex_param, 'ramdisk_id': parameter_types.common_query_regex_param, 'hostname': parameter_types.common_query_regex_param, 'key_name': parameter_types.common_query_regex_param, 'power_state': parameter_types.common_query_regex_param, 'vm_state': parameter_types.common_query_param, 'task_state': parameter_types.common_query_param, 'host': parameter_types.common_query_param, 'node': parameter_types.common_query_regex_param, 'flavor': parameter_types.common_query_regex_param, 'reservation_id': parameter_types.common_query_regex_param, 'launched_at': parameter_types.common_query_regex_param, 'terminated_at': parameter_types.common_query_regex_param, 'availability_zone': parameter_types.common_query_regex_param, # NOTE(alex_xu): This is pattern matching, it didn't get any benefit # from DB index. 'name': parameter_types.common_query_regex_param, # The alias of name. It should be removed in the future # with microversion bump. 'display_name': parameter_types.common_query_regex_param, 'description': parameter_types.common_query_regex_param, # The alias of description. It should be removed in the # future with microversion bump. 'display_description': parameter_types.common_query_regex_param, 'locked_by': parameter_types.common_query_regex_param, 'uuid': parameter_types.common_query_param, 'root_device_name': parameter_types.common_query_regex_param, 'config_drive': parameter_types.common_query_regex_param, 'access_ip_v4': parameter_types.common_query_regex_param, 'access_ip_v6': parameter_types.common_query_regex_param, 'auto_disk_config': parameter_types.common_query_regex_param, 'progress': parameter_types.common_query_regex_param, 'sort_key': multi_params(VALID_SORT_KEYS), 'sort_dir': parameter_types.common_query_param, 'all_tenants': parameter_types.common_query_param, 'soft_deleted': parameter_types.common_query_param, 'deleted': parameter_types.common_query_param, 'status': parameter_types.common_query_param, 'changes-since': multi_params({'type': 'string', 'format': 'date-time'}), # NOTE(alex_xu): The ip and ip6 are implemented in the python. 'ip': parameter_types.common_query_regex_param, 'ip6': parameter_types.common_query_regex_param, 'created_at': parameter_types.common_query_regex_param, }, # For backward-compatible additionalProperties is set to be True here. # And we will either strip the extra params out or raise HTTP 400 # according to the params' value in the later process. # This has been changed to False in microversion 2.75. From # microversion 2.75, no additional unknown parameter will be allowed. 'additionalProperties': True, # Prevent internal-attributes that are started with underscore from # being striped out in schema validation, and raise HTTP 400 in API. 'patternProperties': {"^_": parameter_types.common_query_param} } # Update the joined-table fields to the list so it will not be # stripped in later process, thus can be handled later in api # to raise HTTP 400. query_params_v21['properties'].update( JOINED_TABLE_QUERY_PARAMS_SERVERS) query_params_v21['properties'].update( parameter_types.pagination_parameters) query_params_v226 = copy.deepcopy(query_params_v21) query_params_v226['properties'].update({ 'tags': parameter_types.common_query_regex_param, 'tags-any': parameter_types.common_query_regex_param, 'not-tags': parameter_types.common_query_regex_param, 'not-tags-any': parameter_types.common_query_regex_param, }) query_params_v266 = copy.deepcopy(query_params_v226) query_params_v266['properties'].update({ 'changes-before': multi_params({'type': 'string', 'format': 'date-time'}), }) query_params_v273 = copy.deepcopy(query_params_v266) query_params_v273['properties'].update({ 'sort_key': multi_params(VALID_SORT_KEYS_V273), 'locked': parameter_types.common_query_param, }) # Microversion 2.75 makes query schema to disallow any invalid or unknown # query parameters (filter or sort keys). # *****Schema updates for microversion 2.75 start here******* query_params_v275 = copy.deepcopy(query_params_v273) # 1. Update sort_keys to allow only valid sort keys: # NOTE(gmann): Remove the ignored sort keys now because 'additionalProperties' # is False for query schema. Starting from miceoversion 2.75, API will # raise 400 for any not-allowed sort keys instead of ignoring them. VALID_SORT_KEYS_V275 = copy.deepcopy(VALID_SORT_KEYS_V273) VALID_SORT_KEYS_V275['enum'] = list( set(VALID_SORT_KEYS_V273["enum"]) - set( SERVER_LIST_IGNORE_SORT_KEY_V273)) query_params_v275['properties'].update({ 'sort_key': multi_params(VALID_SORT_KEYS_V275), }) # 2. Make 'additionalProperties' False. query_params_v275['additionalProperties'] = False # *****Schema updates for microversion 2.75 end here******* show_query = { 'type': 'object', 'properties': {}, 'additionalProperties': True, } resize_response = { 'type': 'null', } confirm_resize_response = { 'type': 'null', } revert_resize_response = { 'type': 'null', } reboot_response = { 'type': 'null', } start_server_response = { 'type': 'null', } stop_server_response = { 'type': 'null', } trigger_crash_dump_response = { 'type': 'null', } create_image_response = { 'type': 'null', } create_image_response_v245 = { 'type': 'object', 'properties': { 'image_id': {'type': 'string', 'format': 'uuid'}, }, 'required': ['image_id'], 'additionalProperties': False, } rebuild_response = { 'type': 'object', 'properties': { 'server': { 'type': 'object', 'properties': { 'accessIPv4': { 'type': 'string', 'oneOf': [ {'format': 'ipv4'}, {'const': ''}, ], }, 'accessIPv6': { 'type': 'string', 'oneOf': [ {'format': 'ipv6'}, {'const': ''}, ], }, 'addresses': { 'type': 'object', 'patternProperties': { '^.+$': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'addr': { 'type': 'string', 'oneOf': [ {'format': 'ipv4'}, {'format': 'ipv6'}, ], }, 'version': { 'type': 'number', 'enum': [4, 6], }, }, 'required': [ 'addr', 'version' ], 'additionalProperties': False, }, }, }, 'additionalProperties': False, }, 'adminPass': {'type': ['null', 'string']}, 'created': {'type': 'string', 'format': 'date-time'}, 'fault': { 'type': 'object', 'properties': { 'code': {'type': 'integer'}, 'created': {'type': 'string', 'format': 'date-time'}, 'details': {'type': 'string'}, 'message': {'type': 'string'}, }, 'required': ['code', 'created', 'message'], 'additionalProperties': False, }, 'flavor': { 'type': 'object', 'properties': { 'id': { 'type': 'string', }, 'links': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'href': { 'type': 'string', 'format': 'uri', }, 'rel': { 'type': 'string', }, }, 'required': [ 'href', 'rel' ], "additionalProperties": False, }, }, }, 'additionalProperties': False, }, 'hostId': {'type': 'string'}, 'id': {'type': 'string'}, 'image': { 'oneOf': [ { 'type': 'string', 'const': '', }, { 'type': 'object', 'properties': { 'id': { 'type': 'string' }, 'links': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'href': { 'type': 'string', 'format': 'uri', }, 'rel': { 'type': 'string', }, }, 'required': [ 'href', 'rel' ], "additionalProperties": False, }, }, }, 'additionalProperties': False, }, ], }, 'links': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'href': { 'type': 'string', 'format': 'uri', }, 'rel': { 'type': 'string', }, }, 'required': [ 'href', 'rel' ], 'additionalProperties': False, }, }, 'metadata': { 'type': 'object', 'patternProperties': { '^.+$': { 'type': 'string' }, }, 'additionalProperties': False, }, 'name': {'type': ['string', 'null']}, 'progress': {'type': ['null', 'number']}, 'status': {'type': 'string'}, 'tenant_id': parameter_types.project_id, 'updated': {'type': 'string', 'format': 'date-time'}, 'user_id': parameter_types.user_id, 'OS-DCF:diskConfig': {'type': 'string'}, }, 'required': [ 'accessIPv4', 'accessIPv6', 'addresses', 'created', 'flavor', 'hostId', 'id', 'image', 'links', 'metadata', 'name', 'progress', 'status', 'tenant_id', 'updated', 'user_id', 'OS-DCF:diskConfig', ], 'additionalProperties': False, }, }, 'required': [ 'server' ], 'additionalProperties': False, } rebuild_response_v29 = copy.deepcopy(rebuild_response) rebuild_response_v29['properties']['server']['properties']['locked'] = { 'type': 'boolean', } rebuild_response_v29['properties']['server']['required'].append('locked') rebuild_response_v219 = copy.deepcopy(rebuild_response_v29) rebuild_response_v219['properties']['server']['properties']['description'] = { 'type': ['null', 'string'], } rebuild_response_v219['properties']['server']['required'].append('description') rebuild_response_v226 = copy.deepcopy(rebuild_response_v219) rebuild_response_v226['properties']['server']['properties']['tags'] = { 'type': 'array', 'items': { 'type': 'string', }, 'maxItems': 50, } rebuild_response_v226['properties']['server']['required'].append('tags') # NOTE(stephenfin): We overwrite rather than extend 'flavor', since we now # embed the flavor in this version rebuild_response_v246 = copy.deepcopy(rebuild_response_v226) rebuild_response_v246['properties']['server']['properties']['flavor'] = { 'type': 'object', 'properties': { 'vcpus': { 'type': 'integer', }, 'ram': { 'type': 'integer', }, 'disk': { 'type': 'integer', }, 'ephemeral': { 'type': 'integer', }, 'swap': { 'type': 'integer', }, 'original_name': { 'type': 'string', }, 'extra_specs': { 'type': 'object', 'patternProperties': { '^.+$': { 'type': 'string' }, }, 'additionalProperties': False, }, }, 'required': ['vcpus', 'ram', 'disk', 'ephemeral', 'swap', 'original_name'], 'additionalProperties': False, } rebuild_response_v254 = copy.deepcopy(rebuild_response_v246) rebuild_response_v254['properties']['server']['properties']['key_name'] = { 'type': ['null', 'string'], } rebuild_response_v254['properties']['server']['required'].append('key_name') rebuild_response_v257 = copy.deepcopy(rebuild_response_v254) rebuild_response_v257['properties']['server']['properties']['user_data'] = { 'type': ['string', 'null'], 'format': 'base64', 'maxLength': 65535, } rebuild_response_v257['properties']['server']['required'].append('user_data') rebuild_response_v263 = copy.deepcopy(rebuild_response_v257) rebuild_response_v263['properties']['server']['properties'].update( { 'trusted_image_certificates': { 'type': ['array', 'null'], 'items': { 'type': 'string', }, }, }, ) rebuild_response_v263['properties']['server']['required'].append( 'trusted_image_certificates' ) rebuild_response_v271 = copy.deepcopy(rebuild_response_v263) rebuild_response_v271['properties']['server']['properties'].update( { 'server_groups': { 'type': 'array', 'items': { 'type': 'string', 'format': 'uuid', }, 'maxLength': 1, }, }, ) rebuild_response_v271['properties']['server']['required'].append( 'server_groups' ) rebuild_response_v273 = copy.deepcopy(rebuild_response_v271) rebuild_response_v273['properties']['server']['properties'].update( { 'locked_reason': { 'type': ['null', 'string'], }, }, ) rebuild_response_v273['properties']['server']['required'].append( 'locked_reason' ) rebuild_response_v275 = copy.deepcopy(rebuild_response_v273) rebuild_response_v275['properties']['server']['properties'].update( { 'config_drive': { # TODO(stephenfin): Our tests return null but this shouldn't happen # in practice, apparently? 'type': ['string', 'boolean', 'null'], }, 'OS-EXT-AZ:availability_zone': { 'type': 'string', }, 'OS-EXT-SRV-ATTR:host': { 'type': ['string', 'null'], }, 'OS-EXT-SRV-ATTR:hypervisor_hostname': { 'type': ['string', 'null'], }, 'OS-EXT-SRV-ATTR:instance_name': { 'type': 'string', }, 'OS-EXT-STS:power_state': { 'type': 'integer', 'enum': [0, 1, 3, 4, 6, 7], }, 'OS-EXT-STS:task_state': { 'type': ['null', 'string'], }, 'OS-EXT-STS:vm_state': { 'type': 'string', }, 'OS-EXT-SRV-ATTR:hostname': { 'type': 'string', }, 'OS-EXT-SRV-ATTR:reservation_id': { 'type': ['string', 'null'], }, 'OS-EXT-SRV-ATTR:launch_index': { 'type': 'integer', }, 'OS-EXT-SRV-ATTR:kernel_id': { 'type': ['string', 'null'], }, 'OS-EXT-SRV-ATTR:ramdisk_id': { 'type': ['string', 'null'], }, 'OS-EXT-SRV-ATTR:root_device_name': { 'type': ['string', 'null'], }, 'os-extended-volumes:volumes_attached': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'id': { 'type': 'string', }, 'delete_on_termination': { 'type': 'boolean', 'default': False, }, }, 'required': ['id', 'delete_on_termination'], 'additionalProperties': False, }, }, 'OS-SRV-USG:launched_at': { 'type': ['string', 'null'], 'format': 'date-time', }, 'OS-SRV-USG:terminated_at': { 'type': ['string', 'null'], 'format': 'date-time', }, 'security_groups': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'name': { 'type': 'string', }, }, 'required': ['name'], 'additionalProperties': False, }, }, 'host_status': { 'type': 'string', }, }, ) rebuild_response_v275['properties']['server']['required'].extend([ 'config_drive', 'OS-EXT-AZ:availability_zone', 'OS-EXT-STS:power_state', 'OS-EXT-STS:task_state', 'OS-EXT-STS:vm_state', 'os-extended-volumes:volumes_attached', 'OS-SRV-USG:launched_at', 'OS-SRV-USG:terminated_at', ]) rebuild_response_v275['properties']['server']['properties']['addresses'][ 'patternProperties' ]['^.+$']['items']['properties'].update({ 'OS-EXT-IPS-MAC:mac_addr': {'type': 'string', 'format': 'mac-address'}, 'OS-EXT-IPS:type': {'type': 'string', 'enum': ['fixed', 'floating']}, }) rebuild_response_v275['properties']['server']['properties']['addresses'][ 'patternProperties' ]['^.+$']['items']['required'].extend([ 'OS-EXT-IPS-MAC:mac_addr', 'OS-EXT-IPS:type' ]) rebuild_response_v296 = copy.deepcopy(rebuild_response_v275) rebuild_response_v296['properties']['server']['properties'].update({ 'pinned_availability_zone': { 'type': ['null', 'string'], }, }) rebuild_response_v296['properties']['server']['required'].append( 'pinned_availability_zone' ) rebuild_response_v298 = copy.deepcopy(rebuild_response_v296) rebuild_response_v298['properties']['server']['properties']['image'][ 'oneOf'][1]['properties'].update({ 'properties': { 'type': 'object', 'patternProperties': { '^[a-zA-Z0-9_:. ]{1,255}$': { 'type': 'string', 'maxLength': 255, }, }, 'additionalProperties': False, }, }) rebuild_response_v2100 = copy.deepcopy(rebuild_response_v298) rebuild_response_v2100['properties']['server']['properties'].update({ 'scheduler_hints': _hints, }) rebuild_response_v2100['properties']['server']['required'].append( 'scheduler_hints' ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/services.py0000664000175000017500000000521200000000000023561 0ustar00zuulzuul00000000000000# Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from nova.api.validation import parameter_types service_update = { 'type': 'object', 'properties': { 'host': parameter_types.fqdn, 'binary': { 'type': 'string', 'minLength': 1, 'maxLength': 255, }, 'disabled_reason': { 'type': 'string', 'minLength': 1, 'maxLength': 255, } }, 'required': ['host', 'binary'], 'additionalProperties': False } service_update_v211 = { 'type': 'object', 'properties': { 'host': parameter_types.fqdn, 'binary': { 'type': 'string', 'minLength': 1, 'maxLength': 255, }, 'disabled_reason': { 'type': 'string', 'minLength': 1, 'maxLength': 255, }, 'forced_down': parameter_types.boolean }, 'required': ['host', 'binary'], 'additionalProperties': False } # The 2.53 body is for updating a service's status and/or forced_down fields. # There are no required attributes since the service is identified using a # unique service_id on the request path, and status and/or forced_down can # be specified in the body. If status=='disabled', then 'disabled_reason' is # also checked in the body but is not required. Requesting status='enabled' and # including a 'disabled_reason' results in a 400, but this is checked in code. service_update_v253 = { 'type': 'object', 'properties': { 'status': { 'type': 'string', 'enum': ['enabled', 'disabled'], }, 'disabled_reason': { 'type': 'string', 'minLength': 1, 'maxLength': 255, }, 'forced_down': parameter_types.boolean }, 'additionalProperties': False } index_query_schema = { 'type': 'object', 'properties': { 'host': parameter_types.common_query_param, 'binary': parameter_types.common_query_param, }, # For backward compatible changes 'additionalProperties': True } index_query_schema_275 = copy.deepcopy(index_query_schema) index_query_schema_275['additionalProperties'] = False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/shelve.py0000664000175000017500000000671400000000000023234 0ustar00zuulzuul00000000000000# Copyright 2019 INSPUR Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.api.validation import parameter_types # TODO(stephenfin): Restrict the value to 'null' in a future API version shelve = { 'type': 'object', 'properties': { 'shelve': {}, }, 'required': ['shelve'], 'additionalProperties': False, } # TODO(stephenfin): Restrict the value to 'null' in a future API version shelve_offload = { 'type': 'object', 'properties': { 'shelveOffload': {}, }, 'required': ['shelveOffload'], 'additionalProperties': False, } unshelve = { 'type': 'object', 'properties': { 'unshelve': {}, }, 'required': ['unshelve'], 'additionalProperties': False, } # NOTE(brinzhang): For older microversion there will be no change as # schema is applied only for version < 2.91 with unshelve a server API. # Anything working in old version keep working as it is. unshelve_v277 = { 'type': 'object', 'properties': { 'unshelve': { 'type': ['object', 'null'], 'properties': { 'availability_zone': parameter_types.name }, # NOTE: The allowed request body is {'unshelve': null} or # {'unshelve': {'availability_zone': }}, not allowed # {'unshelve': {}} as the request body for unshelve. 'required': ['availability_zone'], 'additionalProperties': False, }, }, 'required': ['unshelve'], 'additionalProperties': False, } # NOTE(rribaud): # schema is applied only for version >= 2.91 with unshelve a server API. # Add host parameter to specify to unshelve to this specific host. # # Schema has been redefined for better clarity instead of extend 2.77. # # API can be called with the following body: # # - {"unshelve": null} (Keep compatibility with previous microversions) # # or # # - {"unshelve": {"availability_zone": }} # - {"unshelve": {"availability_zone": null}} (Unpin availability zone) # - {"unshelve": {"host": }} # - {"unshelve": {"availability_zone": , "host": }} # - {"unshelve": {"availability_zone": null, "host": }} # # # Everything else is not allowed, examples: # # - {"unshelve": {}} # - {"unshelve": {"host": , "host": }} # - {"unshelve": {"foo": }} unshelve_v291 = { "type": "object", "properties": { "unshelve": { "type": ["object", "null"], "properties": { "availability_zone": { "type": ["string", "null"], }, "host": { "type": "string" } }, "additionalProperties": False, } }, "required": ["unshelve"], "additionalProperties": False, } shelve_response = { 'type': 'null', } shelve_offload_response = { 'type': 'null', } unshelve_response = { 'type': 'null', } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/simple_tenant_usage.py0000664000175000017500000000410400000000000025763 0ustar00zuulzuul00000000000000# Copyright 2017 NEC Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from nova.api.validation import parameter_types index_query = { 'type': 'object', 'properties': { 'start': parameter_types.multi_params({'type': 'string'}), 'end': parameter_types.multi_params({'type': 'string'}), 'detailed': parameter_types.multi_params({'type': 'string'}) }, # NOTE(gmann): This is kept True to keep backward compatibility. # As of now Schema validation stripped out the additional parameters and # does not raise 400. In microversion 2.75, we have blocked the additional # parameters. 'additionalProperties': True } show_query = { 'type': 'object', 'properties': { 'start': parameter_types.multi_params({'type': 'string'}), 'end': parameter_types.multi_params({'type': 'string'}) }, # NOTE(gmann): This is kept True to keep backward compatibility. # As of now Schema validation stripped out the additional parameters and # does not raise 400. In microversion 2.75, we have blocked the additional # parameters. 'additionalProperties': True } index_query_v240 = copy.deepcopy(index_query) index_query_v240['properties'].update( parameter_types.pagination_parameters) show_query_v240 = copy.deepcopy(show_query) show_query_v240['properties'].update( parameter_types.pagination_parameters) index_query_v275 = copy.deepcopy(index_query_v240) index_query_v275['additionalProperties'] = False show_query_v275 = copy.deepcopy(show_query_v240) show_query_v275['additionalProperties'] = False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/snapshots.py0000664000175000017500000000313300000000000023760 0ustar00zuulzuul00000000000000# Copyright 2014 IBM Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.api.validation import parameter_types create = { 'type': 'object', 'properties': { 'snapshot': { 'type': 'object', 'properties': { 'volume_id': {'type': 'string'}, 'force': parameter_types.boolean, 'display_name': {'type': 'string'}, 'display_description': {'type': 'string'}, }, 'required': ['volume_id'], 'additionalProperties': False, }, }, 'required': ['snapshot'], 'additionalProperties': False, } index_query = { 'type': 'object', 'properties': { 'limit': parameter_types.multi_params( parameter_types.non_negative_integer), 'offset': parameter_types.multi_params( parameter_types.non_negative_integer) }, 'additionalProperties': True } detail_query = index_query show_query = { 'type': 'object', 'properties': {}, 'additionalProperties': True } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/suspend_server.py0000664000175000017500000000206200000000000025005 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # TODO(stephenfin): Restrict the value to 'null' in a future API version suspend = { 'type': 'object', 'properties': { 'suspend': {}, }, 'required': ['suspend'], 'additionalProperties': False } # TODO(stephenfin): Restrict the value to 'null' in a future API version resume = { 'type': 'object', 'properties': { 'resume': {}, }, 'required': ['resume'], 'additionalProperties': False } suspend_response = { 'type': 'null', } resume_response = { 'type': 'null', } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/tenant_networks.py0000664000175000017500000000133200000000000025162 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(stephenfin): These schemas are intentionally empty since these APIs have # been removed create = {} index_query = {} show_query = {} delete_response = {} create_response = {} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/versions.py0000664000175000017500000000740500000000000023614 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # TODO(stephenfin): Remove additionalProperties in a future API version show_query = { 'type': 'object', 'properties': {}, 'additionalProperties': True, } # TODO(stephenfin): Remove additionalProperties in a future API version multi_query = { 'type': 'object', 'properties': {}, 'additionalProperties': True, } _version_obj = { 'type': 'object', 'properties': { 'id': {'type': 'string'}, 'status': { 'type': 'string', 'enum': ['CURRENT', 'SUPPORTED', 'DEPRECATED'], }, 'links': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'href': {'type': 'string'}, 'rel': {'type': 'string'}, 'type': {'type': 'string'}, }, 'required': ['rel', 'href'], 'additionalProperties': False, }, }, 'min_version': {'type': 'string'}, 'updated': {'type': 'string', 'format': 'date-time'}, 'version': {'type': 'string'}, }, 'required': ['id', 'status', 'links', 'min_version', 'updated'], 'additionalProperties': False, } index_response = { 'type': 'object', 'properties': { 'versions': {'type': 'array', 'items': _version_obj} }, 'required': ['versions'], 'additionalProperties': False, } _version_obj_with_media_types = _version_obj _version_obj_with_media_types['properties'].update({ 'media-types': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'base': {'type': 'string'}, 'type': {'type': 'string'}, }, 'required': ['base', 'type'], 'additionalProperties': False, }, } }) show_response = { 'type': 'object', 'properties': { 'version': _version_obj_with_media_types }, 'required': ['version'], 'additionalProperties': False, } _legacy_version_obj = { 'type': 'object', 'properties': { 'id': {'type': 'string'}, 'status': {'type': 'string'}, 'links': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'href': {'type': 'string'}, 'rel': {'type': 'string'}, 'type': {'type': 'string'}, }, 'required': ['rel', 'href'], 'additionalProperties': False, }, }, 'media-types': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'base': {'type': 'string'}, 'type': {'type': 'string'}, }, 'required': ['base', 'type'], 'additionalProperties': False, }, }, }, 'required': ['id', 'status', 'links', 'media-types'], 'additionalProperties': False, } multi_response = { 'type': 'object', 'properties': { 'choices': {'type': 'array', 'items': _legacy_version_obj} }, 'required': ['choices'], 'additionalProperties': False, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/virtual_interfaces.py0000664000175000017500000000124700000000000025633 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(stephenfin): These schemas are intentionally empty since these APIs have # been removed index_query = {} index_response = {} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/volume_attachments.py0000664000175000017500000000704200000000000025643 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from nova.api.validation import parameter_types create = { 'type': 'object', 'properties': { 'volumeAttachment': { 'type': 'object', 'properties': { 'volumeId': parameter_types.volume_id, 'device': { 'type': ['string', 'null'], # NOTE: The validation pattern from match_device() in # nova/block_device.py. 'pattern': '(^/dev/x{0,1}[a-z]{0,1}d{0,1})([a-z]+)[0-9]*$' }, }, 'required': ['volumeId'], 'additionalProperties': False, }, }, 'required': ['volumeAttachment'], 'additionalProperties': False, } create_v249 = copy.deepcopy(create) create_v249['properties']['volumeAttachment'][ 'properties']['tag'] = parameter_types.tag create_v279 = copy.deepcopy(create_v249) create_v279['properties']['volumeAttachment'][ 'properties']['delete_on_termination'] = parameter_types.boolean update = copy.deepcopy(create) del update['properties']['volumeAttachment']['properties']['device'] # NOTE(brinzhang): Allow attachment_id, serverId, device, tag, and # delete_on_termination (i.e., follow the content of the GET response) # to be specified for RESTfulness, even though we will not allow updating # all of them. update_v285 = { 'type': 'object', 'properties': { 'volumeAttachment': { 'type': 'object', 'properties': { 'volumeId': parameter_types.volume_id, 'device': { 'type': ['string', 'null'], # NOTE: The validation pattern from match_device() in # nova/block_device.py. 'pattern': '(^/dev/x{0,1}[a-z]{0,1}d{0,1})([a-z]+)[0-9]*$' }, 'tag': parameter_types.tag, 'delete_on_termination': parameter_types.boolean, 'serverId': parameter_types.server_id, 'id': parameter_types.attachment_id }, 'required': ['volumeId'], 'additionalProperties': False, }, }, 'required': ['volumeAttachment'], 'additionalProperties': False, } index_query = { 'type': 'object', 'properties': { 'limit': parameter_types.multi_params( parameter_types.non_negative_integer), 'offset': parameter_types.multi_params( parameter_types.non_negative_integer) }, # NOTE(gmann): This is kept True to keep backward compatibility. # As of now Schema validation stripped out the additional parameters and # does not raise 400. In microversion 2.75, we have blocked the additional # parameters. 'additionalProperties': True } index_query_v275 = copy.deepcopy(index_query) index_query_v275['additionalProperties'] = False # TODO(stephenfin): Remove additionalProperties in a future API version show_query = { 'type': 'object', 'properties': {}, 'additionalProperties': True, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/schemas/volumes.py0000664000175000017500000000355300000000000023436 0ustar00zuulzuul00000000000000# Copyright 2014 IBM Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.api.validation import parameter_types create = { 'type': 'object', 'properties': { 'volume': { 'type': 'object', 'properties': { 'volume_type': {'type': 'string'}, 'metadata': {'type': 'object'}, 'snapshot_id': {'type': 'string'}, 'size': { 'type': ['integer', 'string'], 'pattern': '^[0-9]+$', 'minimum': 1 }, 'availability_zone': {'type': 'string'}, 'display_name': {'type': 'string'}, 'display_description': {'type': 'string'}, }, 'required': ['size'], 'additionalProperties': False, }, }, 'required': ['volume'], 'additionalProperties': False, } index_query = { 'type': 'object', 'properties': { 'limit': parameter_types.multi_params( parameter_types.non_negative_integer), 'offset': parameter_types.multi_params( parameter_types.non_negative_integer) }, 'additionalProperties': True } detail_query = index_query show_query = { 'type': 'object', 'properties': {}, 'additionalProperties': True } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/security_group_default_rules.py0000664000175000017500000000405600000000000026321 0ustar00zuulzuul00000000000000# Copyright 2013 Metacloud Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from webob import exc from nova.api.openstack.compute.schemas import security_group_default_rules as schema # noqa: E501 from nova.api.openstack import wsgi from nova.api import validation _removal_reason = """\ This API only works with *nova-network*, which was deprecated in the 14.0.0 (Newton) release. It fails with HTTP 404 starting from microversion 2.36. It was removed in the 21.0.0 (Ussuri) release. """ @validation.validated class SecurityGroupDefaultRulesController(wsgi.Controller): """(Removed) Controller for default project security groups.""" @wsgi.expected_errors(410) @wsgi.removed('21.0.0', _removal_reason) @validation.schema(schema.create) @validation.response_body_schema(schema.create_response) def create(self, req, body): raise exc.HTTPGone() @wsgi.expected_errors(410) @wsgi.removed('21.0.0', _removal_reason) @validation.query_schema(schema.show_query) @validation.response_body_schema(schema.show_response) def show(self, req, id): raise exc.HTTPGone() @wsgi.expected_errors(410) @wsgi.removed('21.0.0', _removal_reason) @validation.response_body_schema(schema.delete_response) def delete(self, req, id): raise exc.HTTPGone() @wsgi.expected_errors(410) @wsgi.removed('21.0.0', _removal_reason) @validation.query_schema(schema.index_query) @validation.response_body_schema(schema.index_response) def index(self, req): raise exc.HTTPGone() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/security_groups.py0000664000175000017500000004667500000000000023603 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # Copyright 2012 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The security groups extension.""" from oslo_log import log as logging from webob import exc from nova.api.openstack.api_version_request \ import MAX_PROXY_API_SUPPORT_VERSION from nova.api.openstack import common from nova.api.openstack.compute.schemas import security_groups as schema from nova.api.openstack import wsgi from nova.api import validation from nova.compute import api as compute from nova import exception from nova.i18n import _ from nova.network import security_group_api from nova.policies import security_groups as sg_policies from nova.virt import netutils LOG = logging.getLogger(__name__) SG_NOT_FOUND = object() class SecurityGroupControllerBase(object): """Base class for Security Group controllers.""" def __init__(self): super(SecurityGroupControllerBase, self).__init__() self.compute_api = compute.API() def _format_security_group_rule(self, context, rule, group_rule_data=None): """Return a security group rule in desired API response format. If group_rule_data is passed in that is used rather than querying for it. """ sg_rule = {} sg_rule['id'] = rule['id'] sg_rule['parent_group_id'] = rule['parent_group_id'] sg_rule['ip_protocol'] = rule['protocol'] sg_rule['from_port'] = rule['from_port'] sg_rule['to_port'] = rule['to_port'] sg_rule['group'] = {} sg_rule['ip_range'] = {} if group_rule_data: sg_rule['group'] = group_rule_data elif rule['group_id']: try: source_group = security_group_api.get( context, id=rule['group_id']) except exception.SecurityGroupNotFound: # NOTE(arosen): There is a possible race condition that can # occur here if two api calls occur concurrently: one that # lists the security groups and another one that deletes a # security group rule that has a group_id before the # group_id is fetched. To handle this if # SecurityGroupNotFound is raised we return None instead # of the rule and the caller should ignore the rule. LOG.debug("Security Group ID %s does not exist", rule['group_id']) return sg_rule['group'] = {'name': source_group.get('name'), 'tenant_id': source_group.get('project_id')} else: sg_rule['ip_range'] = {'cidr': rule['cidr']} return sg_rule def _format_security_group(self, context, group, group_rule_data_by_rule_group_id=None): security_group = {} security_group['id'] = group['id'] security_group['description'] = group['description'] security_group['name'] = group['name'] security_group['tenant_id'] = group['project_id'] security_group['rules'] = [] for rule in group['rules']: group_rule_data = None if rule['group_id'] and group_rule_data_by_rule_group_id: group_rule_data = ( group_rule_data_by_rule_group_id.get(rule['group_id'])) if group_rule_data == SG_NOT_FOUND: # The security group for the rule was not found so skip it. continue formatted_rule = self._format_security_group_rule( context, rule, group_rule_data) if formatted_rule: security_group['rules'] += [formatted_rule] return security_group def _get_group_rule_data_by_rule_group_id(self, context, groups): group_rule_data_by_rule_group_id = {} # Pre-populate with the group information itself in case any of the # rule group IDs are the in-scope groups. for group in groups: group_rule_data_by_rule_group_id[group['id']] = { 'name': group.get('name'), 'tenant_id': group.get('project_id')} for group in groups: for rule in group['rules']: rule_group_id = rule['group_id'] if (rule_group_id and rule_group_id not in group_rule_data_by_rule_group_id): try: source_group = security_group_api.get( context, id=rule['group_id']) group_rule_data_by_rule_group_id[rule_group_id] = { 'name': source_group.get('name'), 'tenant_id': source_group.get('project_id')} except exception.SecurityGroupNotFound: LOG.debug("Security Group %s does not exist", rule_group_id) # Use a sentinel so we don't process this group again. group_rule_data_by_rule_group_id[rule_group_id] = ( SG_NOT_FOUND) return group_rule_data_by_rule_group_id class SecurityGroupController(SecurityGroupControllerBase, wsgi.Controller): """The Security group API controller for the OpenStack API.""" @wsgi.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) @wsgi.expected_errors((400, 404)) @validation.query_schema(schema.show_query) def show(self, req, id): """Return data about the given security group.""" context = req.environ['nova.context'] context.can(sg_policies.POLICY_NAME % 'show', target={'project_id': context.project_id}) try: id = security_group_api.validate_id(id) security_group = security_group_api.get(context, id) except exception.SecurityGroupNotFound as exp: raise exc.HTTPNotFound(explanation=exp.format_message()) except exception.Invalid as exp: raise exc.HTTPBadRequest(explanation=exp.format_message()) return {'security_group': self._format_security_group(context, security_group)} @wsgi.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) @wsgi.expected_errors((400, 404)) @wsgi.response(202) def delete(self, req, id): """Delete a security group.""" context = req.environ['nova.context'] context.can(sg_policies.POLICY_NAME % 'delete', target={'project_id': context.project_id}) try: id = security_group_api.validate_id(id) security_group = security_group_api.get(context, id) security_group_api.destroy(context, security_group) except exception.SecurityGroupNotFound as exp: raise exc.HTTPNotFound(explanation=exp.format_message()) except exception.Invalid as exp: raise exc.HTTPBadRequest(explanation=exp.format_message()) @wsgi.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) @validation.query_schema(schema.index_query) @wsgi.expected_errors(404) def index(self, req): """Returns a list of security groups.""" context = req.environ['nova.context'] context.can(sg_policies.POLICY_NAME % 'get', target={'project_id': context.project_id}) search_opts = {} search_opts.update(req.GET) project_id = context.project_id raw_groups = security_group_api.list( context, project=project_id, search_opts=search_opts) limited_list = common.limited(raw_groups, req) result = [self._format_security_group(context, group) for group in limited_list] return {'security_groups': list(sorted(result, key=lambda k: (k['tenant_id'], k['name'])))} @wsgi.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) @wsgi.expected_errors((400, 403)) @validation.schema(schema.create) def create(self, req, body): """Creates a new security group.""" context = req.environ['nova.context'] context.can(sg_policies.POLICY_NAME % 'create', target={'project_id': context.project_id}) group_name = body['security_group']['name'] group_description = body['security_group']['description'] try: group_ref = security_group_api.create_security_group( context, group_name, group_description) except exception.Invalid as exp: raise exc.HTTPBadRequest(explanation=exp.format_message()) except exception.SecurityGroupLimitExceeded as exp: raise exc.HTTPForbidden(explanation=exp.format_message()) return {'security_group': self._format_security_group(context, group_ref)} @wsgi.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) @wsgi.expected_errors((400, 404)) @validation.schema(schema.update) def update(self, req, id, body): """Update a security group.""" context = req.environ['nova.context'] context.can(sg_policies.POLICY_NAME % 'update', target={'project_id': context.project_id}) try: id = security_group_api.validate_id(id) security_group = security_group_api.get(context, id) except exception.SecurityGroupNotFound as exp: raise exc.HTTPNotFound(explanation=exp.format_message()) except exception.Invalid as exp: raise exc.HTTPBadRequest(explanation=exp.format_message()) group_name = body['security_group']['name'] group_description = body['security_group']['description'] try: group_ref = security_group_api.update_security_group( context, security_group, group_name, group_description) except exception.SecurityGroupNotFound as exp: raise exc.HTTPNotFound(explanation=exp.format_message()) except exception.Invalid as exp: raise exc.HTTPBadRequest(explanation=exp.format_message()) return {'security_group': self._format_security_group(context, group_ref)} class SecurityGroupRulesController(SecurityGroupControllerBase, wsgi.Controller): @wsgi.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) @wsgi.expected_errors((400, 403, 404)) @validation.schema(schema.create_rules) def create(self, req, body): context = req.environ['nova.context'] context.can(sg_policies.POLICY_NAME % 'rule:create', target={'project_id': context.project_id}) sg_rule = body['security_group_rule'] group_id = sg_rule.get('group_id') parent_group_id = sg_rule['parent_group_id'] source_group = {} try: security_group = security_group_api.get( context, parent_group_id) if group_id is not None: source_group = security_group_api.get( context, id=group_id) new_rule = self._rule_args_to_dict(context, to_port=sg_rule.get('to_port'), from_port=sg_rule.get('from_port'), ip_protocol=sg_rule.get('ip_protocol'), cidr=sg_rule.get('cidr'), group_id=group_id) except (exception.Invalid, exception.InvalidCidr) as exp: raise exc.HTTPBadRequest(explanation=exp.format_message()) except exception.SecurityGroupNotFound as exp: raise exc.HTTPNotFound(explanation=exp.format_message()) if new_rule is None: msg = _("Not enough parameters to build a valid rule.") raise exc.HTTPBadRequest(explanation=msg) new_rule['parent_group_id'] = security_group['id'] if 'cidr' in new_rule: net, prefixlen = netutils.get_net_and_prefixlen(new_rule['cidr']) if net not in ('0.0.0.0', '::') and prefixlen == '0': msg = _("Bad prefix for network in cidr %s") % new_rule['cidr'] raise exc.HTTPBadRequest(explanation=msg) group_rule_data = None try: if group_id: group_rule_data = {'name': source_group.get('name'), 'tenant_id': source_group.get('project_id')} security_group_rule = ( security_group_api.create_security_group_rule( context, security_group, new_rule)) except exception.Invalid as exp: raise exc.HTTPBadRequest(explanation=exp.format_message()) except exception.SecurityGroupNotFound as exp: raise exc.HTTPNotFound(explanation=exp.format_message()) except exception.SecurityGroupLimitExceeded as exp: raise exc.HTTPForbidden(explanation=exp.format_message()) formatted_rule = self._format_security_group_rule(context, security_group_rule, group_rule_data) return {"security_group_rule": formatted_rule} def _rule_args_to_dict(self, context, to_port=None, from_port=None, ip_protocol=None, cidr=None, group_id=None): if group_id is not None: return security_group_api.new_group_ingress_rule( group_id, ip_protocol, from_port, to_port) else: cidr = security_group_api.parse_cidr(cidr) return security_group_api.new_cidr_ingress_rule( cidr, ip_protocol, from_port, to_port) @wsgi.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) @wsgi.expected_errors((400, 404, 409)) @wsgi.response(202) def delete(self, req, id): context = req.environ['nova.context'] context.can(sg_policies.POLICY_NAME % 'rule:delete', target={'project_id': context.project_id}) try: id = security_group_api.validate_id(id) rule = security_group_api.get_rule(context, id) group_id = rule['parent_group_id'] security_group = security_group_api.get(context, group_id) security_group_api.remove_rules( context, security_group, [rule['id']]) except exception.SecurityGroupNotFound as exp: raise exc.HTTPNotFound(explanation=exp.format_message()) except exception.NoUniqueMatch as exp: raise exc.HTTPConflict(explanation=exp.format_message()) except exception.Invalid as exp: raise exc.HTTPBadRequest(explanation=exp.format_message()) class ServerSecurityGroupController( SecurityGroupControllerBase, wsgi.Controller ): @wsgi.expected_errors(404) @validation.query_schema(schema.server_sg_index_query) def index(self, req, server_id): """Returns a list of security groups for the given instance.""" context = req.environ['nova.context'] instance = common.get_instance(self.compute_api, context, server_id) context.can(sg_policies.POLICY_NAME % 'list', target={'project_id': instance.project_id}) try: groups = security_group_api.get_instance_security_groups( context, instance, True) except (exception.SecurityGroupNotFound, exception.InstanceNotFound) as exp: msg = exp.format_message() raise exc.HTTPNotFound(explanation=msg) # Optimize performance here by loading up the group_rule_data per # rule['group_id'] ahead of time so we're not doing redundant # security group lookups for each rule. group_rule_data_by_rule_group_id = ( self._get_group_rule_data_by_rule_group_id(context, groups)) result = [self._format_security_group(context, group, group_rule_data_by_rule_group_id) for group in groups] return {'security_groups': list(sorted(result, key=lambda k: (k['tenant_id'], k['name'])))} @validation.validated class SecurityGroupActionController(wsgi.Controller): def __init__(self): super(SecurityGroupActionController, self).__init__() self.compute_api = compute.API() def _parse(self, body, action): try: body = body[action] group_name = body['name'] except TypeError: msg = _("Missing parameter dict") raise exc.HTTPBadRequest(explanation=msg) except KeyError: msg = _("Security group not specified") raise exc.HTTPBadRequest(explanation=msg) if not group_name or group_name.strip() == '': msg = _("Security group name cannot be empty") raise exc.HTTPBadRequest(explanation=msg) return group_name @wsgi.expected_errors((400, 404, 409)) @wsgi.response(202) @wsgi.action('addSecurityGroup') @validation.schema(schema.add_security_group) @validation.response_body_schema(schema.add_security_group_response) def _addSecurityGroup(self, req, id, body): context = req.environ['nova.context'] instance = common.get_instance(self.compute_api, context, id) context.can(sg_policies.POLICY_NAME % 'add', target={'project_id': instance.project_id}) group_name = self._parse(body, 'addSecurityGroup') try: security_group_api.add_to_instance( context, instance, group_name) except (exception.SecurityGroupNotFound, exception.InstanceNotFound) as exp: raise exc.HTTPNotFound(explanation=exp.format_message()) except (exception.NoUniqueMatch, exception.SecurityGroupConnectionStateConflict) as exp: raise exc.HTTPConflict(explanation=exp.format_message()) except exception.SecurityGroupCannotBeApplied as exp: raise exc.HTTPBadRequest(explanation=exp.format_message()) @wsgi.expected_errors((400, 404, 409)) @wsgi.response(202) @wsgi.action('removeSecurityGroup') @validation.schema(schema.remove_security_group) @validation.response_body_schema(schema.remove_security_group_response) def _removeSecurityGroup(self, req, id, body): context = req.environ['nova.context'] instance = common.get_instance(self.compute_api, context, id) context.can(sg_policies.POLICY_NAME % 'remove', target={'project_id': instance.project_id}) group_name = self._parse(body, 'removeSecurityGroup') try: security_group_api.remove_from_instance( context, instance, group_name) except (exception.SecurityGroupNotFound, exception.InstanceNotFound) as exp: raise exc.HTTPNotFound(explanation=exp.format_message()) except exception.NoUniqueMatch as exp: raise exc.HTTPConflict(explanation=exp.format_message()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/server_diagnostics.py0000664000175000017500000000503200000000000024210 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import webob from nova.api.openstack import api_version_request from nova.api.openstack import common from nova.api.openstack.compute.schemas import server_diagnostics as schema from nova.api.openstack.compute.views import server_diagnostics from nova.api.openstack import wsgi from nova.api import validation from nova.compute import api as compute from nova import exception from nova.policies import server_diagnostics as sd_policies @validation.validated class ServerDiagnosticsController(wsgi.Controller): _view_builder_class = server_diagnostics.ViewBuilder def __init__(self): super(ServerDiagnosticsController, self).__init__() self.compute_api = compute.API() @wsgi.expected_errors((400, 404, 409, 501)) @validation.query_schema(schema.index_query) @validation.response_body_schema(schema.index_response, '2.1', '2.47') @validation.response_body_schema(schema.index_response_v248, '2.48') def index(self, req, server_id): context = req.environ["nova.context"] instance = common.get_instance(self.compute_api, context, server_id) context.can(sd_policies.BASE_POLICY_NAME, target={'project_id': instance.project_id}) try: if api_version_request.is_supported(req, '2.48'): diagnostics = self.compute_api.get_instance_diagnostics( context, instance) return self._view_builder.instance_diagnostics(diagnostics) return self.compute_api.get_diagnostics(context, instance) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'get_diagnostics', server_id) except exception.InstanceNotReady as e: raise webob.exc.HTTPConflict(explanation=e.format_message()) except NotImplementedError: common.raise_feature_not_supported() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/server_external_events.py0000664000175000017500000001536300000000000025117 0ustar00zuulzuul00000000000000# Copyright 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from nova.api.openstack.compute.schemas import server_external_events as schema from nova.api.openstack import wsgi from nova.api import validation from nova.compute import api as compute from nova import context as nova_context from nova import objects from nova.policies import server_external_events as see_policies LOG = logging.getLogger(__name__) TAG_REQUIRED = ('volume-extended', 'power-update', 'accelerator-request-bound') @validation.validated class ServerExternalEventsController(wsgi.Controller): def __init__(self): super(ServerExternalEventsController, self).__init__() self.compute_api = compute.API() @staticmethod def _is_event_tag_present_when_required(event): if event.name in TAG_REQUIRED and event.tag is None: return False return True def _get_instances_all_cells(self, context, instance_uuids, instance_mappings): cells = {} instance_uuids_by_cell = {} for im in instance_mappings: if im.cell_mapping.uuid not in cells: cells[im.cell_mapping.uuid] = im.cell_mapping instance_uuids_by_cell.setdefault(im.cell_mapping.uuid, list()) instance_uuids_by_cell[im.cell_mapping.uuid].append( im.instance_uuid) instances = {} for cell_uuid, cell in cells.items(): with nova_context.target_cell(context, cell) as cctxt: instances.update( {inst.uuid: inst for inst in objects.InstanceList.get_by_filters( cctxt, {'uuid': instance_uuids_by_cell[cell_uuid]}, expected_attrs=['migration_context', 'info_cache'])}) return instances @wsgi.expected_errors(403) @wsgi.response(200) @validation.schema(schema.create, '2.0', '2.50') @validation.schema(schema.create_v251, '2.51', '2.75') @validation.schema(schema.create_v276, '2.76', '2.81') @validation.schema(schema.create_v282, '2.82', '2.92') @validation.schema(schema.create_v293, '2.93') @validation.response_body_schema(schema.create_response, '2.0', '2.50') @validation.response_body_schema(schema.create_response_v251, '2.51', '2.75') # noqa: E501 @validation.response_body_schema(schema.create_response_v276, '2.76', '2.81') # noqa: E501 @validation.response_body_schema(schema.create_response_v282, '2.82', '2.92') # noqa: E501 @validation.response_body_schema(schema.create_response_v293, '2.93') def create(self, req, body): """Creates a new instance event.""" context = req.environ['nova.context'] context.can(see_policies.POLICY_ROOT % 'create', target={}) response_events = [] accepted_events = [] accepted_instances = set() result = 200 body_events = body['events'] # Fetch instance objects for all relevant instances instance_uuids = set([event['server_uuid'] for event in body_events]) instance_mappings = objects.InstanceMappingList.get_by_instance_uuids( context, list(instance_uuids)) instances = self._get_instances_all_cells(context, instance_uuids, instance_mappings) for _event in body_events: client_event = dict(_event) event = objects.InstanceExternalEvent(context) event.instance_uuid = client_event.pop('server_uuid') event.name = client_event.pop('name') event.status = client_event.pop('status', 'completed') event.tag = client_event.pop('tag', None) response_events.append(_event) instance = instances.get(event.instance_uuid) if not instance: LOG.debug('Dropping event %(name)s:%(tag)s for unknown ' 'instance %(instance_uuid)s', {'name': event.name, 'tag': event.tag, 'instance_uuid': event.instance_uuid}) _event['status'] = 'failed' _event['code'] = 404 result = 207 continue # NOTE: before accepting the event, make sure the instance # for which the event is sent is assigned to a host; otherwise # it will not be possible to dispatch the event if not self._is_event_tag_present_when_required(event): LOG.debug("Event tag is missing for instance " "%(instance)s. Dropping event %(event)s", {'instance': event.instance_uuid, 'event': event.name}) _event['status'] = 'failed' _event['code'] = 400 result = 207 elif instance.host: accepted_events.append(event) accepted_instances.add(instance) LOG.info('Creating event %(name)s:%(tag)s for ' 'instance %(instance_uuid)s on %(host)s', {'name': event.name, 'tag': event.tag, 'instance_uuid': event.instance_uuid, 'host': instance.host}) # NOTE: as the event is processed asynchronously verify # whether 202 is a more suitable response code than 200 _event['status'] = 'completed' _event['code'] = 200 else: LOG.debug("Unable to find a host for instance " "%(instance)s. Dropping event %(event)s", {'instance': event.instance_uuid, 'event': event.name}) _event['status'] = 'failed' _event['code'] = 422 result = 207 if accepted_events: self.compute_api.external_instance_event( context, accepted_instances, accepted_events) # FIXME(cyeoh): This needs some infrastructure support so that # we have a general way to do this robj = wsgi.ResponseObject({'events': response_events}) robj._code = result return robj ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/server_groups.py0000664000175000017500000002763000000000000023230 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Cisco Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The Server Group API Extension.""" import collections from oslo_log import log as logging import webob from webob import exc from nova.api.openstack import api_version_request from nova.api.openstack import common from nova.api.openstack.compute.schemas import server_groups as schema from nova.api.openstack import wsgi from nova.api import validation import nova.conf from nova import context as nova_context import nova.exception from nova.i18n import _ from nova.limit import local as local_limit from nova import objects from nova.objects import service from nova.policies import server_groups as sg_policies LOG = logging.getLogger(__name__) CONF = nova.conf.CONF def _get_not_deleted(context, uuids): mappings = objects.InstanceMappingList.get_by_instance_uuids( context, uuids) inst_by_cell = collections.defaultdict(list) cell_mappings = {} found_inst_uuids = [] # Get a master list of cell mappings, and a list of instance # uuids organized by cell for im in mappings: if not im.cell_mapping: # Not scheduled yet, so just throw it in the final list # and move on found_inst_uuids.append(im.instance_uuid) continue if im.cell_mapping.uuid not in cell_mappings: cell_mappings[im.cell_mapping.uuid] = im.cell_mapping inst_by_cell[im.cell_mapping.uuid].append(im.instance_uuid) # Query each cell for the instances that are inside, building # a list of non-deleted instance uuids. for cell_uuid, cell_mapping in cell_mappings.items(): inst_uuids = inst_by_cell[cell_uuid] LOG.debug('Querying cell %(cell)s for %(num)i instances', {'cell': cell_mapping.identity, 'num': len(inst_uuids)}) filters = {'uuid': inst_uuids, 'deleted': False} with nova_context.target_cell(context, cell_mapping) as ctx: found_inst_uuids.extend([ inst.uuid for inst in objects.InstanceList.get_by_filters( ctx, filters=filters)]) return found_inst_uuids def _should_enable_custom_max_server_rules(context, rules): if rules and int(rules.get('max_server_per_host', 1)) > 1: minver = service.get_minimum_version_all_cells( context, ['nova-compute']) if minver < 33: return False return True @validation.validated class ServerGroupController(wsgi.Controller): """The Server group API controller for the OpenStack API.""" def _format_server_group(self, context, group, req): # the id field has its value as the uuid of the server group # There is no 'uuid' key in server_group seen by clients. # In addition, clients see policies as a ["policy-name"] list; # and they see members as a ["server-id"] list. server_group = {} server_group['id'] = group.uuid server_group['name'] = group.name if api_version_request.is_supported(req, '2.64'): server_group['policy'] = group.policy server_group['rules'] = group.rules else: server_group['policies'] = group.policies or [] # NOTE(yikun): Before v2.64, a empty metadata is exposed to the # user, and it is removed since v2.64. server_group['metadata'] = {} members = [] if group.members: # Display the instances that are not deleted. members = _get_not_deleted(context, group.members) server_group['members'] = members # Add project id information to the response data for # API version v2.13 if api_version_request.is_supported(req, "2.13"): server_group['project_id'] = group.project_id server_group['user_id'] = group.user_id return server_group @wsgi.expected_errors(404) @validation.query_schema(schema.show_query) @validation.response_body_schema(schema.show_response, '2.1', '2.12') @validation.response_body_schema(schema.show_response_v213, '2.13', '2.14') @validation.response_body_schema(schema.show_response_v215, '2.15', '2.63') @validation.response_body_schema(schema.show_response_v264, '2.64') def show(self, req, id): """Return data about the given server group.""" context = req.environ['nova.context'] try: sg = objects.InstanceGroup.get_by_uuid(context, id) except nova.exception.InstanceGroupNotFound as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) context.can(sg_policies.POLICY_ROOT % 'show', target={'project_id': sg.project_id}) return {'server_group': self._format_server_group(context, sg, req)} @wsgi.response(204) @wsgi.expected_errors(404) @validation.response_body_schema(schema.delete_response) def delete(self, req, id): """Delete a server group.""" context = req.environ['nova.context'] try: sg = objects.InstanceGroup.get_by_uuid(context, id) except nova.exception.InstanceGroupNotFound as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) context.can(sg_policies.POLICY_ROOT % 'delete', target={'project_id': sg.project_id}) try: sg.destroy() except nova.exception.InstanceGroupNotFound as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) @wsgi.expected_errors(()) @validation.query_schema(schema.index_query, '2.0', '2.74') @validation.query_schema(schema.index_query_v275, '2.75') @validation.response_body_schema(schema.index_response, '2.1', '2.12') @validation.response_body_schema(schema.index_response_v213, '2.13', '2.14') # noqa: E501 @validation.response_body_schema(schema.index_response_v215, '2.15', '2.63') # noqa: E501 @validation.response_body_schema(schema.index_response_v264, '2.64') def index(self, req): """Returns a list of server groups.""" context = req.environ['nova.context'] project_id = context.project_id # NOTE(gmann): Using context's project_id as target here so # that when we remove the default target from policy class, # it does not fail if user requesting operation on for their # own server group. context.can(sg_policies.POLICY_ROOT % 'index', target={'project_id': project_id}) if 'all_projects' in req.GET and context.is_admin: # TODO(gmann): Remove the is_admin check in the above condition # so that the below policy can raise error if not allowed. # In existing behavior, if non-admin users requesting # all projects server groups they do not get error instead # get their own server groups. Once we switch to policy # new defaults completely then we can remove the above check. # Until then, let's keep the old behaviour. context.can(sg_policies.POLICY_ROOT % 'index:all_projects', target={'project_id': project_id}) sgs = objects.InstanceGroupList.get_all(context) else: sgs = objects.InstanceGroupList.get_by_project_id( context, project_id) limited_list = common.limited(sgs.objects, req) result = [self._format_server_group(context, group, req) for group in limited_list] return {'server_groups': result} @wsgi.expected_errors((400, 403, 409)) @validation.schema(schema.create, "2.0", "2.14") @validation.schema(schema.create_v215, "2.15", "2.63") @validation.schema(schema.create_v264, "2.64") @validation.response_body_schema(schema.create_response, '2.1', '2.12') @validation.response_body_schema(schema.create_response_v213, '2.13', '2.14') # noqa: E501 @validation.response_body_schema(schema.create_response_v215, '2.15', '2.63') # noqa: E501 @validation.response_body_schema(schema.create_response_v264, '2.64') def create(self, req, body): """Creates a new server group.""" context = req.environ['nova.context'] project_id = context.project_id context.can(sg_policies.POLICY_ROOT % 'create', target={'project_id': project_id}) try: objects.Quotas.check_deltas(context, {'server_groups': 1}, project_id, context.user_id) local_limit.enforce_db_limit(context, local_limit.SERVER_GROUPS, entity_scope=project_id, delta=1) except nova.exception.ServerGroupLimitExceeded as e: raise exc.HTTPForbidden(explanation=str(e)) except nova.exception.OverQuota: msg = _("Quota exceeded, too many server groups.") raise exc.HTTPForbidden(explanation=msg) vals = body['server_group'] if api_version_request.is_supported(req, "2.64"): policy = vals['policy'] rules = vals.get('rules', {}) if policy != 'anti-affinity' and rules: msg = _("Only anti-affinity policy supports rules.") raise exc.HTTPBadRequest(explanation=msg) # NOTE(yikun): This should be removed in Stein version. if not _should_enable_custom_max_server_rules(context, rules): msg = _("Creating an anti-affinity group with rule " "max_server_per_host > 1 is not yet supported.") raise exc.HTTPConflict(explanation=msg) sg = objects.InstanceGroup(context, policy=policy, rules=rules) else: policies = vals.get('policies') sg = objects.InstanceGroup(context, policy=policies[0]) try: sg.name = vals.get('name') sg.project_id = project_id sg.user_id = context.user_id sg.create() except ValueError as e: raise exc.HTTPBadRequest(explanation=e) # NOTE(melwitt): We recheck the quota after creating the object to # prevent users from allocating more resources than their allowed quota # in the event of a race. This is configurable because it can be # expensive if strict quota limits are not required in a deployment. if CONF.quota.recheck_quota: try: objects.Quotas.check_deltas(context, {'server_groups': 0}, project_id, context.user_id) # TODO(johngarbutt): decide if we need this recheck # The quota rechecking of limits is really just to protect # against denial of service attacks that aim to fill up the # database. Its usefulness could be debated. local_limit.enforce_db_limit(context, local_limit.SERVER_GROUPS, project_id, delta=0) except nova.exception.ServerGroupLimitExceeded as e: sg.destroy() raise exc.HTTPForbidden(explanation=str(e)) except nova.exception.OverQuota: sg.destroy() msg = _("Quota exceeded, too many server groups.") raise exc.HTTPForbidden(explanation=msg) return {'server_group': self._format_server_group(context, sg, req)} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/server_metadata.py0000664000175000017500000001572500000000000023473 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from webob import exc from nova.api.openstack import common from nova.api.openstack.compute.schemas import server_metadata from nova.api.openstack import wsgi from nova.api import validation from nova.compute import api as compute from nova import exception from nova.i18n import _ from nova.policies import server_metadata as sm_policies class ServerMetadataController(wsgi.Controller): """The server metadata API controller for the OpenStack API.""" def __init__(self): super(ServerMetadataController, self).__init__() self.compute_api = compute.API() def _get_metadata(self, context, server): try: # NOTE(mikal): get_instance_metadata sometimes returns # InstanceNotFound in unit tests, even though the instance is # fetched on the line above. I blame mocking. meta = self.compute_api.get_instance_metadata(context, server) except exception.InstanceNotFound: msg = _('Server does not exist') raise exc.HTTPNotFound(explanation=msg) meta_dict = {} for key, value in meta.items(): meta_dict[key] = value return meta_dict @wsgi.expected_errors(404) @validation.query_schema(server_metadata.index_query) def index(self, req, server_id): """Returns the list of metadata for a given instance.""" context = req.environ['nova.context'] server = common.get_instance(self.compute_api, context, server_id) context.can(sm_policies.POLICY_ROOT % 'index', target={'project_id': server.project_id}) return {'metadata': self._get_metadata(context, server)} @wsgi.expected_errors((403, 404, 409)) # NOTE(gmann): Returns 200 for backwards compatibility but should be 201 # as this operation complete the creation of metadata. @validation.schema(server_metadata.create) def create(self, req, server_id, body): metadata = body['metadata'] context = req.environ['nova.context'] server = common.get_instance(self.compute_api, context, server_id) context.can(sm_policies.POLICY_ROOT % 'create', target={'project_id': server.project_id}) new_metadata = self._update_instance_metadata(context, server, metadata, delete=False) return {'metadata': new_metadata} @wsgi.expected_errors((400, 403, 404, 409)) @validation.schema(server_metadata.update) def update(self, req, server_id, id, body): context = req.environ['nova.context'] server = common.get_instance(self.compute_api, context, server_id) context.can(sm_policies.POLICY_ROOT % 'update', target={'project_id': server.project_id}) meta_item = body['meta'] if id not in meta_item: expl = _('Request body and URI mismatch') raise exc.HTTPBadRequest(explanation=expl) self._update_instance_metadata(context, server, meta_item, delete=False) return {'meta': meta_item} @wsgi.expected_errors((403, 404, 409)) @validation.schema(server_metadata.update_all) def update_all(self, req, server_id, body): context = req.environ['nova.context'] server = common.get_instance(self.compute_api, context, server_id) context.can(sm_policies.POLICY_ROOT % 'update_all', target={'project_id': server.project_id}) metadata = body['metadata'] new_metadata = self._update_instance_metadata(context, server, metadata, delete=True) return {'metadata': new_metadata} def _update_instance_metadata(self, context, server, metadata, delete=False): try: return self.compute_api.update_instance_metadata(context, server, metadata, delete) except exception.OverQuota as error: raise exc.HTTPForbidden(explanation=error.format_message()) except exception.InstanceIsLocked as e: raise exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'update metadata', server.uuid) @wsgi.expected_errors(404) @validation.query_schema(server_metadata.show_query) def show(self, req, server_id, id): """Return a single metadata item.""" context = req.environ['nova.context'] server = common.get_instance(self.compute_api, context, server_id) context.can(sm_policies.POLICY_ROOT % 'show', target={'project_id': server.project_id}) data = self._get_metadata(context, server) try: return {'meta': {id: data[id]}} except KeyError: msg = _("Metadata item was not found") raise exc.HTTPNotFound(explanation=msg) @wsgi.expected_errors((404, 409)) @wsgi.response(204) def delete(self, req, server_id, id): """Deletes an existing metadata.""" context = req.environ['nova.context'] server = common.get_instance(self.compute_api, context, server_id) context.can(sm_policies.POLICY_ROOT % 'delete', target={'project_id': server.project_id}) metadata = self._get_metadata(context, server) if id not in metadata: msg = _("Metadata item was not found") raise exc.HTTPNotFound(explanation=msg) try: self.compute_api.delete_instance_metadata(context, server, id) except exception.InstanceIsLocked as e: raise exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'delete metadata', server_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/server_migrations.py0000664000175000017500000002200500000000000024054 0ustar00zuulzuul00000000000000# Copyright 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from webob import exc from nova.api.openstack import api_version_request from nova.api.openstack import common from nova.api.openstack.compute.schemas import server_migrations as schema from nova.api.openstack import wsgi from nova.api import validation from nova.compute import api as compute from nova import exception from nova.i18n import _ from nova.policies import servers_migrations as sm_policies def output( migration, include_uuid=False, include_user_project=False, include_host=True, ): """Returns the desired output of the API from an object. From a Migrations's object this method returns the primitive object with the only necessary and expected fields. """ # TODO(gmaan): We have a separate policy to show the host related info. # For backward compatibility, we always return the host fields in response # but policy controls the value. To be consistent to other APIs, if the # policy does not permit then we should not include these fields in # response but that needs to be done with a new microversion. There # are more related improvements to be done in list migrations APIs, # refer to the comments in api/openstack/compute/migrations.py. result = { "created_at": migration.created_at, "dest_compute": None, "dest_host": None, "dest_node": None, "disk_processed_bytes": migration.disk_processed, "disk_remaining_bytes": migration.disk_remaining, "disk_total_bytes": migration.disk_total, "id": migration.id, "memory_processed_bytes": migration.memory_processed, "memory_remaining_bytes": migration.memory_remaining, "memory_total_bytes": migration.memory_total, "server_uuid": migration.instance_uuid, "source_compute": None, "source_node": None, "status": migration.status, "updated_at": migration.updated_at } if include_uuid: result['uuid'] = migration.uuid if include_user_project: result['user_id'] = migration.user_id result['project_id'] = migration.project_id # TODO(gmaan): This API (and list os-migrations) does not # return the 'server_host', which is strange and not consistent with # the info returned for the destination host. This needs to be fixed # with microversion bump. There are more related improvements to be # done in list migrations APIs, refer to the comments in # api/openstack/compute/migrations.py if include_host: result['dest_compute'] = migration.dest_compute result['dest_host'] = migration.dest_host result['dest_node'] = migration.dest_node result['source_compute'] = migration.source_compute result['source_node'] = migration.source_node return result class ServerMigrationsController(wsgi.Controller): """The server migrations API controller for the OpenStack API.""" def __init__(self): super(ServerMigrationsController, self).__init__() self.compute_api = compute.API() @wsgi.api_version("2.22") @wsgi.response(202) @wsgi.expected_errors((400, 403, 404, 409)) @wsgi.action('force_complete') @validation.schema(schema.force_complete) @validation.response_body_schema(schema.force_complete_response) def _force_complete(self, req, id, server_id, body): context = req.environ['nova.context'] instance = common.get_instance(self.compute_api, context, server_id) context.can(sm_policies.POLICY_ROOT % 'force_complete', target={'project_id': instance.project_id}) try: self.compute_api.live_migrate_force_complete(context, instance, id) except exception.InstanceNotFound as e: raise exc.HTTPNotFound(explanation=e.format_message()) except (exception.MigrationNotFoundByStatus, exception.InvalidMigrationState, exception.MigrationNotFoundForInstance) as e: raise exc.HTTPBadRequest(explanation=e.format_message()) except exception.InstanceIsLocked as e: raise exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state( state_error, 'force_complete', server_id) @wsgi.api_version("2.23") @wsgi.expected_errors(404) @validation.query_schema(schema.index_query) def index(self, req, server_id): """Return all migrations of an instance in progress.""" context = req.environ['nova.context'] # NOTE(Shaohe Feng) just check the instance is available. To keep # consistency with other API, check it before get migrations. instance = common.get_instance(self.compute_api, context, server_id) context.can(sm_policies.POLICY_ROOT % 'index', target={'project_id': instance.project_id}) include_host = context.can(sm_policies.POLICY_ROOT % 'index:host', fatal=False, target={'project_id': instance.project_id}) migrations = self.compute_api.get_migrations_in_progress_by_instance( context, server_id, 'live-migration') include_uuid = api_version_request.is_supported(req, '2.59') include_user_project = api_version_request.is_supported(req, '2.80') return {'migrations': [ output(migration, include_uuid, include_user_project, include_host) for migration in migrations]} @wsgi.api_version("2.23") @wsgi.expected_errors(404) @validation.query_schema(schema.show_query) def show(self, req, server_id, id): """Return the migration of an instance in progress by id.""" context = req.environ['nova.context'] # NOTE(Shaohe Feng) just check the instance is available. To keep # consistency with other API, check it before get migrations. instance = common.get_instance(self.compute_api, context, server_id) context.can(sm_policies.POLICY_ROOT % 'show', target={'project_id': instance.project_id}) try: migration = self.compute_api.get_migration_by_id_and_instance( context, id, server_id) except exception.MigrationNotFoundForInstance: msg = _("In-progress live migration %(id)s is not found for" " server %(uuid)s.") % {"id": id, "uuid": server_id} raise exc.HTTPNotFound(explanation=msg) if not migration.is_live_migration: msg = _("Migration %(id)s for server %(uuid)s is not" " live-migration.") % {"id": id, "uuid": server_id} raise exc.HTTPNotFound(explanation=msg) # TODO(Shaohe Feng) we should share the in-progress list. in_progress = ['queued', 'preparing', 'running', 'post-migrating'] if migration.status not in in_progress: msg = _("Live migration %(id)s for server %(uuid)s is not in" " progress.") % {"id": id, "uuid": server_id} raise exc.HTTPNotFound(explanation=msg) include_uuid = api_version_request.is_supported(req, '2.59') include_user_project = api_version_request.is_supported(req, '2.80') return {'migration': output(migration, include_uuid, include_user_project)} @wsgi.api_version("2.24") @wsgi.response(202) @wsgi.expected_errors((400, 404, 409)) def delete(self, req, server_id, id): """Abort an in progress migration of an instance.""" context = req.environ['nova.context'] instance = common.get_instance(self.compute_api, context, server_id) context.can(sm_policies.POLICY_ROOT % 'delete', target={'project_id': instance.project_id}) support_abort_in_queue = api_version_request.is_supported(req, '2.65') try: self.compute_api.live_migrate_abort( context, instance, id, support_abort_in_queue=support_abort_in_queue) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state( state_error, "abort live migration", server_id) except exception.MigrationNotFoundForInstance as e: raise exc.HTTPNotFound(explanation=e.format_message()) except exception.InvalidMigrationState as e: raise exc.HTTPBadRequest(explanation=e.format_message()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/server_password.py0000664000175000017500000000464300000000000023552 0ustar00zuulzuul00000000000000# Copyright (c) 2012 Nebula, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The server password extension.""" from nova.api.metadata import password from nova.api.openstack import common from nova.api.openstack.compute.schemas import server_password as schema from nova.api.openstack import wsgi from nova.api import validation from nova.compute import api as compute from nova.policies import server_password as sp_policies @validation.validated class ServerPasswordController(wsgi.Controller): """The Server Password API controller for the OpenStack API.""" def __init__(self): super(ServerPasswordController, self).__init__() self.compute_api = compute.API() @wsgi.expected_errors(404) @validation.query_schema(schema.index_query) @validation.response_body_schema(schema.index_response) def index(self, req, server_id): context = req.environ['nova.context'] instance = common.get_instance(self.compute_api, context, server_id) context.can(sp_policies.BASE_POLICY_NAME % 'show', target={'project_id': instance.project_id}) passw = password.extract_password(instance) return {'password': passw or ''} @wsgi.expected_errors(404) @wsgi.response(204) @validation.response_body_schema(schema.clear_response) def clear(self, req, server_id): """Removes the encrypted server password from the metadata server Note that this does not actually change the instance server password. """ context = req.environ['nova.context'] instance = common.get_instance(self.compute_api, context, server_id) context.can(sp_policies.BASE_POLICY_NAME % 'clear', target={'project_id': instance.project_id}) meta = password.convert_password(context, None) instance.system_metadata.update(meta) instance.save() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/server_shares.py0000664000175000017500000002477000000000000023200 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import webob from oslo_db import exception as db_exc from oslo_utils import uuidutils from nova.api.openstack import common from nova.api.openstack.compute.schemas import server_shares as schema from nova.api.openstack.compute.views import server_shares from nova.api.openstack import wsgi from nova.api import validation from nova.compute import api as compute from nova.compute import vm_states from nova import context as nova_context from nova import exception from nova import objects from nova.objects import fields from nova.policies import server_shares as ss_policies from nova.share import manila from nova.virt import hardware as hw def _get_instance_mapping(context, server_id): try: return objects.InstanceMapping.get_by_instance_uuid(context, server_id) except exception.InstanceMappingNotFound as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) class ServerSharesController(wsgi.Controller): _view_builder_class = server_shares.ViewBuilder def __init__(self): super(ServerSharesController, self).__init__() self.compute_api = compute.API() self.manila = manila.API() def _get_instance_from_server_uuid(self, context, server_id): instance = common.get_instance(self.compute_api, context, server_id) return instance def _check_instance_in_valid_state(self, context, server_id, action): instance = self._get_instance_from_server_uuid(context, server_id) if ( (action == "create share" and instance.vm_state not in vm_states.STOPPED) or (action == "delete share" and instance.vm_state not in vm_states.STOPPED and instance.vm_state not in vm_states.ERROR) ): exc = exception.InstanceInvalidState( attr="vm_state", instance_uuid=instance.uuid, state=instance.vm_state, method=action, ) common.raise_http_conflict_for_instance_invalid_state( exc, action, server_id ) return instance @wsgi.api_version("2.97") @wsgi.response(200) @wsgi.expected_errors((400, 403, 404)) @validation.query_schema(schema.index_query) @validation.response_body_schema(schema.index_response) def index(self, req, server_id): context = req.environ["nova.context"] # Get instance mapping to query the required cell database im = _get_instance_mapping(context, server_id) context.can(ss_policies.POLICY_ROOT % 'index', target={'project_id': im.project_id}) with nova_context.target_cell(context, im.cell_mapping) as cctxt: # Ensure the instance exists self._get_instance_from_server_uuid(cctxt, server_id) db_shares = objects.ShareMappingList.get_by_instance_uuid( cctxt, server_id ) return self._view_builder._list_view(db_shares) @wsgi.api_version("2.97") @wsgi.response(201) @wsgi.expected_errors((400, 403, 404, 409)) @validation.schema(schema.create, '2.97') @validation.response_body_schema(schema.show_response) def create(self, req, server_id, body): def _try_create_share_mapping(context, share_mapping): """Block the request if the share is already created. Prevent race conditions of requests that would hit the share_mapping.create() almost at the same time. Prevent user from using the same tag twice on the same instance. """ try: objects.ShareMapping.get_by_instance_uuid_and_share_id( context, share_mapping.instance_uuid, share_mapping.share_id ) raise exception.ShareMappingAlreadyExists( share_id=share_mapping.share_id, tag=share_mapping.tag ) except exception.ShareNotFound: pass try: share_mapping.create() except db_exc.DBDuplicateEntry: raise exception.ShareMappingAlreadyExists( share_id=share_mapping.share_id, tag=share_mapping.tag ) def _check_manila_share(manila_share_data): """Check that the targeted share in manila has correct export location, status 'available' and a supported protocol. """ if manila_share_data.status != 'available': raise exception.ShareStatusIncorect( share_id=share_id, status=manila_share_data.status ) if manila_share_data.export_location is None: raise exception.ShareMissingExportLocation(share_id=share_id) if ( manila_share_data.share_proto not in fields.ShareMappingProto.ALL ): raise exception.ShareProtocolNotSupported( share_proto=manila_share_data.share_proto ) context = req.environ["nova.context"] # Get instance mapping to query the required cell database im = _get_instance_mapping(context, server_id) context.can( ss_policies.POLICY_ROOT % 'create', target={'project_id': im.project_id} ) share_dict = body['share'] share_id = share_dict.get('share_id') with nova_context.target_cell(context, im.cell_mapping) as cctxt: instance = self._check_instance_in_valid_state( cctxt, server_id, "create share" ) try: hw.check_shares_supported(cctxt, instance) manila_share_data = self.manila.get(cctxt, share_id) _check_manila_share(manila_share_data) share_mapping = objects.ShareMapping(cctxt) share_mapping.uuid = uuidutils.generate_uuid() share_mapping.instance_uuid = server_id share_mapping.share_id = manila_share_data.id share_mapping.status = fields.ShareMappingStatus.ATTACHING share_mapping.tag = share_dict.get('tag', manila_share_data.id) share_mapping.export_location = ( manila_share_data.export_location) share_mapping.share_proto = manila_share_data.share_proto _try_create_share_mapping(cctxt, share_mapping) self.compute_api.allow_share(cctxt, instance, share_mapping) view = self._view_builder._show_view(cctxt, share_mapping) except (exception.ShareNotFound) as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) except (exception.ShareStatusIncorect) as e: raise webob.exc.HTTPConflict(explanation=e.format_message()) except (exception.ShareMissingExportLocation) as e: raise webob.exc.HTTPConflict(explanation=e.format_message()) except (exception.ShareProtocolNotSupported) as e: raise webob.exc.HTTPConflict(explanation=e.format_message()) except (exception.ShareMappingAlreadyExists) as e: raise webob.exc.HTTPConflict(explanation=e.format_message()) except (exception.ForbiddenSharesNotSupported) as e: raise webob.exc.HTTPForbidden(explanation=e.format_message()) except (exception.ForbiddenSharesNotConfiguredCorrectly) as e: raise webob.exc.HTTPConflict(explanation=e.format_message()) return view @wsgi.api_version("2.97") @wsgi.response(200) @wsgi.expected_errors((400, 403, 404)) @validation.query_schema(schema.show_query) @validation.response_body_schema(schema.show_response) def show(self, req, server_id, id): context = req.environ["nova.context"] # Get instance mapping to query the required cell database im = _get_instance_mapping(context, server_id) context.can( ss_policies.POLICY_ROOT % 'show', target={'project_id': im.project_id} ) with nova_context.target_cell(context, im.cell_mapping) as cctxt: try: # Ensure the instance exists self._get_instance_from_server_uuid(cctxt, server_id) share = objects.ShareMapping.get_by_instance_uuid_and_share_id( cctxt, server_id, id ) view = self._view_builder._show_view(cctxt, share) except (exception.ShareNotFound) as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) return view @wsgi.api_version("2.97") @wsgi.response(200) @wsgi.expected_errors((400, 403, 404, 409)) def delete(self, req, server_id, id): context = req.environ["nova.context"] # Get instance mapping to query the required cell database im = _get_instance_mapping(context, server_id) context.can( ss_policies.POLICY_ROOT % 'delete', target={'project_id': im.project_id} ) with nova_context.target_cell(context, im.cell_mapping) as cctxt: instance = self._check_instance_in_valid_state( cctxt, server_id, "delete share" ) try: # Ensure the instance exists self._get_instance_from_server_uuid(cctxt, server_id) share_mapping = ( objects.ShareMapping.get_by_instance_uuid_and_share_id( cctxt, server_id, id ) ) share_mapping.status = fields.ShareMappingStatus.DETACHING share_mapping.save() self.compute_api.deny_share(cctxt, instance, share_mapping) except (exception.ShareNotFound) as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/server_tags.py0000664000175000017500000002273000000000000022643 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import jsonschema import webob from nova.api.openstack import common from nova.api.openstack.compute.schemas import server_tags as schema from nova.api.openstack.compute.views import server_tags from nova.api.openstack import wsgi from nova.api import validation from nova.api.validation import parameter_types from nova.compute import api as compute from nova.compute import vm_states from nova import context as nova_context from nova import exception from nova.i18n import _ from nova.notifications import base as notifications_base from nova import objects from nova.policies import server_tags as st_policies def _get_tags_names(tags): return [t.tag for t in tags] def _get_instance_mapping(context, server_id): try: return objects.InstanceMapping.get_by_instance_uuid(context, server_id) except exception.InstanceMappingNotFound as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) class ServerTagsController(wsgi.Controller): _view_builder_class = server_tags.ViewBuilder def __init__(self): super(ServerTagsController, self).__init__() self.compute_api = compute.API() def _check_instance_in_valid_state(self, context, server_id, action): instance = common.get_instance(self.compute_api, context, server_id) if instance.vm_state not in (vm_states.ACTIVE, vm_states.PAUSED, vm_states.SUSPENDED, vm_states.STOPPED): exc = exception.InstanceInvalidState(attr='vm_state', instance_uuid=instance.uuid, state=instance.vm_state, method=action) common.raise_http_conflict_for_instance_invalid_state(exc, action, server_id) return instance @wsgi.api_version("2.26") @wsgi.response(204) @wsgi.expected_errors(404) @validation.query_schema(schema.show_query) def show(self, req, server_id, id): context = req.environ["nova.context"] im = _get_instance_mapping(context, server_id) context.can(st_policies.POLICY_ROOT % 'show', target={'project_id': im.project_id}) try: with nova_context.target_cell(context, im.cell_mapping) as cctxt: exists = objects.Tag.exists(cctxt, server_id, id) except (exception.InstanceNotFound) as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) if not exists: msg = (_("Server %(server_id)s has no tag '%(tag)s'") % {'server_id': server_id, 'tag': id}) raise webob.exc.HTTPNotFound(explanation=msg) @wsgi.api_version("2.26") @wsgi.expected_errors(404) @validation.query_schema(schema.index_query) def index(self, req, server_id): context = req.environ["nova.context"] im = _get_instance_mapping(context, server_id) context.can(st_policies.POLICY_ROOT % 'index', target={'project_id': im.project_id}) try: with nova_context.target_cell(context, im.cell_mapping) as cctxt: tags = objects.TagList.get_by_resource_id(cctxt, server_id) except (exception.InstanceNotFound) as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) return {'tags': _get_tags_names(tags)} @wsgi.api_version("2.26") @wsgi.expected_errors((400, 404, 409)) @validation.schema(schema.update) def update(self, req, server_id, id, body): context = req.environ["nova.context"] im = _get_instance_mapping(context, server_id) context.can(st_policies.POLICY_ROOT % 'update', target={'project_id': im.project_id}) with nova_context.target_cell(context, im.cell_mapping) as cctxt: instance = self._check_instance_in_valid_state( cctxt, server_id, 'update tag') try: jsonschema.validate(id, parameter_types.tag) except jsonschema.ValidationError as e: msg = (_("Tag '%(tag)s' is invalid. It must be a non empty string " "without characters '/' and ','. Validation error " "message: %(err)s") % {'tag': id, 'err': e.message}) raise webob.exc.HTTPBadRequest(explanation=msg) try: with nova_context.target_cell(context, im.cell_mapping) as cctxt: tags = objects.TagList.get_by_resource_id(cctxt, server_id) except exception.InstanceNotFound as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) if len(tags) >= objects.instance.MAX_TAG_COUNT: msg = (_("The number of tags exceeded the per-server limit %d") % objects.instance.MAX_TAG_COUNT) raise webob.exc.HTTPBadRequest(explanation=msg) if id in _get_tags_names(tags): # NOTE(snikitin): server already has specified tag return webob.Response(status_int=204) try: with nova_context.target_cell(context, im.cell_mapping) as cctxt: tag = objects.Tag(context=cctxt, resource_id=server_id, tag=id) tag.create() instance.tags = objects.TagList.get_by_resource_id(cctxt, server_id) except exception.InstanceNotFound as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) notifications_base.send_instance_update_notification( context, instance, service="nova-api") response = webob.Response(status_int=201) response.headers['Location'] = self._view_builder.get_location( req, server_id, id) return response @wsgi.api_version("2.26") @wsgi.expected_errors((404, 409)) @validation.schema(schema.update_all) def update_all(self, req, server_id, body): context = req.environ["nova.context"] im = _get_instance_mapping(context, server_id) context.can(st_policies.POLICY_ROOT % 'update_all', target={'project_id': im.project_id}) with nova_context.target_cell(context, im.cell_mapping) as cctxt: instance = self._check_instance_in_valid_state( cctxt, server_id, 'update tags') try: with nova_context.target_cell(context, im.cell_mapping) as cctxt: tags = objects.TagList.create(cctxt, server_id, body['tags']) instance.tags = tags except exception.InstanceNotFound as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) notifications_base.send_instance_update_notification( context, instance, service="nova-api") return {'tags': _get_tags_names(tags)} @wsgi.api_version("2.26") @wsgi.response(204) @wsgi.expected_errors((404, 409)) def delete(self, req, server_id, id): context = req.environ["nova.context"] im = _get_instance_mapping(context, server_id) context.can(st_policies.POLICY_ROOT % 'delete', target={'project_id': im.project_id}) with nova_context.target_cell(context, im.cell_mapping) as cctxt: instance = self._check_instance_in_valid_state( cctxt, server_id, 'delete tag') try: with nova_context.target_cell(context, im.cell_mapping) as cctxt: objects.Tag.destroy(cctxt, server_id, id) instance.tags = objects.TagList.get_by_resource_id(cctxt, server_id) except (exception.InstanceTagNotFound, exception.InstanceNotFound) as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) notifications_base.send_instance_update_notification( context, instance, service="nova-api") @wsgi.api_version("2.26") @wsgi.response(204) @wsgi.expected_errors((404, 409)) def delete_all(self, req, server_id): context = req.environ["nova.context"] im = _get_instance_mapping(context, server_id) context.can(st_policies.POLICY_ROOT % 'delete_all', target={'project_id': im.project_id}) with nova_context.target_cell(context, im.cell_mapping) as cctxt: instance = self._check_instance_in_valid_state( cctxt, server_id, 'delete tags') try: with nova_context.target_cell(context, im.cell_mapping) as cctxt: objects.TagList.destroy(cctxt, server_id) instance.tags = objects.TagList() except exception.InstanceNotFound as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) notifications_base.send_instance_update_notification( context, instance, service="nova-api") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/server_topology.py0000664000175000017500000000547200000000000023565 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.api.openstack import common from nova.api.openstack.compute.schemas import server_topology as schema from nova.api.openstack import wsgi from nova.api import validation from nova.compute import api as compute from nova.policies import server_topology as st_policies @validation.validated class ServerTopologyController(wsgi.Controller): def __init__(self, *args, **kwargs): super(ServerTopologyController, self).__init__(*args, **kwargs) self.compute_api = compute.API() @wsgi.api_version("2.78") @wsgi.expected_errors(404) @validation.query_schema(schema.query_params_v21) @validation.response_body_schema(schema.index_response) def index(self, req, server_id): context = req.environ["nova.context"] instance = common.get_instance(self.compute_api, context, server_id, expected_attrs=['numa_topology', 'vcpu_model']) context.can(st_policies.BASE_POLICY_NAME % 'index', target={'project_id': instance.project_id}) host_policy = (st_policies.BASE_POLICY_NAME % 'host:index') show_host_info = context.can(host_policy, target={'project_id': instance.project_id}, fatal=False) return self._get_numa_topology(context, instance, show_host_info) def _get_numa_topology(self, context, instance, show_host_info): if instance.numa_topology is None: return { 'nodes': [], 'pagesize_kb': None } topo = {} cells = [] pagesize_kb = None for cell_ in instance.numa_topology.cells: cell = {} cell['vcpu_set'] = cell_.total_cpus cell['siblings'] = cell_.siblings cell['memory_mb'] = cell_.memory if show_host_info: cell['host_node'] = cell_.id if cell_.cpu_pinning is None: cell['cpu_pinning'] = {} else: cell['cpu_pinning'] = cell_.cpu_pinning if cell_.pagesize: pagesize_kb = cell_.pagesize cells.append(cell) topo['nodes'] = cells topo['pagesize_kb'] = pagesize_kb return topo ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/servers.py0000664000175000017500000021533500000000000022015 0ustar00zuulzuul00000000000000# Copyright 2010 OpenStack Foundation # Copyright 2011 Piston Cloud Computing, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_log import log as logging import oslo_messaging as messaging from oslo_utils import strutils from oslo_utils import timeutils from oslo_utils import uuidutils import webob from webob import exc from nova.api.openstack import api_version_request from nova.api.openstack import common from nova.api.openstack.compute import helpers from nova.api.openstack.compute.schemas import servers as schema from nova.api.openstack.compute.views import servers as views_servers from nova.api.openstack import wsgi from nova.api import validation from nova import block_device from nova.compute import api as compute from nova.compute import flavors from nova.compute import utils as compute_utils import nova.conf from nova import context as nova_context from nova import exception from nova.i18n import _ from nova.image import glance from nova import objects from nova.policies import servers as server_policies from nova import utils TAG_SEARCH_FILTERS = ('tags', 'tags-any', 'not-tags', 'not-tags-any') PAGING_SORTING_PARAMS = ('sort_key', 'sort_dir', 'limit', 'marker') CONF = nova.conf.CONF LOG = logging.getLogger(__name__) INVALID_FLAVOR_IMAGE_EXCEPTIONS = ( exception.BadRequirementEmulatorThreadsPolicy, exception.CPUThreadPolicyConfigurationInvalid, exception.FlavorImageConflict, exception.FlavorDiskTooSmall, exception.FlavorMemoryTooSmall, exception.ImageCPUPinningForbidden, exception.ImageCPUThreadPolicyForbidden, exception.ImageNUMATopologyAsymmetric, exception.ImageNUMATopologyCPUDuplicates, exception.ImageNUMATopologyCPUOutOfRange, exception.ImageNUMATopologyCPUsUnassigned, exception.ImageNUMATopologyForbidden, exception.ImageNUMATopologyIncomplete, exception.ImageNUMATopologyMemoryOutOfRange, exception.ImageNUMATopologyRebuildConflict, exception.ImageSerialPortNumberExceedFlavorValue, exception.ImageSerialPortNumberInvalid, exception.ImageVCPULimitsRangeExceeded, exception.ImageVCPUTopologyRangeExceeded, exception.InvalidCPUAllocationPolicy, exception.InvalidCPUThreadAllocationPolicy, exception.InvalidEmulatorThreadsPolicy, exception.InvalidMachineType, exception.InvalidNUMANodesNumber, exception.InvalidRequest, exception.MemoryPageSizeForbidden, exception.MemoryPageSizeInvalid, exception.PciInvalidAlias, exception.PciRequestAliasNotDefined, exception.RealtimeConfigurationInvalid, exception.RealtimeMaskNotFoundOrInvalid, exception.RequiredMixedInstancePolicy, exception.RequiredMixedOrRealtimeCPUMask, exception.InvalidMixedInstanceDedicatedMask, ) class ServersController(wsgi.Controller): """The Server API base controller class for the OpenStack API.""" _view_builder_class = views_servers.ViewBuilder @staticmethod def _add_location(robj): # Just in case... if 'server' not in robj.obj: return robj link = [link for link in robj.obj['server'][ 'links'] if link['rel'] == 'self'] if link: robj['Location'] = link[0]['href'] # Convenience return return robj def __init__(self): super(ServersController, self).__init__() self.compute_api = compute.API() @wsgi.expected_errors((400, 403)) @validation.query_schema(schema.query_params_v275, '2.75') @validation.query_schema(schema.query_params_v273, '2.73', '2.74') @validation.query_schema(schema.query_params_v266, '2.66', '2.72') @validation.query_schema(schema.query_params_v226, '2.26', '2.65') @validation.query_schema(schema.query_params_v21, '2.1', '2.25') def index(self, req): """Returns a list of server names and ids for a given user.""" context = req.environ['nova.context'] context.can(server_policies.SERVERS % 'index') try: servers = self._get_servers(req, is_detail=False) except exception.Invalid as err: raise exc.HTTPBadRequest(explanation=err.format_message()) return servers @wsgi.expected_errors((400, 403)) @validation.query_schema(schema.query_params_v275, '2.75') @validation.query_schema(schema.query_params_v273, '2.73', '2.74') @validation.query_schema(schema.query_params_v266, '2.66', '2.72') @validation.query_schema(schema.query_params_v226, '2.26', '2.65') @validation.query_schema(schema.query_params_v21, '2.1', '2.25') def detail(self, req): """Returns a list of server details for a given user.""" context = req.environ['nova.context'] context.can(server_policies.SERVERS % 'detail') try: servers = self._get_servers(req, is_detail=True) except exception.Invalid as err: raise exc.HTTPBadRequest(explanation=err.format_message()) return servers @staticmethod def _is_cell_down_supported(req, search_opts): cell_down_support = api_version_request.is_supported(req, '2.69') if cell_down_support: # NOTE(tssurya): Minimal constructs would be returned from the down # cells if cell_down_support is True, however if filtering, sorting # or paging is requested by the user, then cell_down_support should # be made False and the down cells should be skipped (depending on # CONF.api.list_records_by_skipping_down_cells) as there is no # way to return correct results for the down cells in those # situations due to missing keys/information. # NOTE(tssurya): Since there is a chance that # remove_invalid_options function could have removed the paging and # sorting parameters, we add the additional check for that from the # request. pag_sort = any( ps in req.GET.keys() for ps in PAGING_SORTING_PARAMS) # NOTE(tssurya): ``nova list --all_tenants`` is the only # allowed filter exception when handling down cells. filters = list(search_opts.keys()) not in ([u'all_tenants'], []) if pag_sort or filters: cell_down_support = False return cell_down_support def _get_servers(self, req, is_detail): """Returns a list of servers, based on any search options specified.""" search_opts = {} search_opts.update(req.GET) context = req.environ['nova.context'] remove_invalid_options(context, search_opts, self._get_server_search_options(req)) cell_down_support = self._is_cell_down_supported(req, search_opts) for search_opt in search_opts: if (search_opt in schema.JOINED_TABLE_QUERY_PARAMS_SERVERS.keys() or search_opt.startswith('_')): msg = _("Invalid filter field: %s.") % search_opt raise exc.HTTPBadRequest(explanation=msg) # Verify search by 'status' contains a valid status. # Convert it to filter by vm_state or task_state for compute_api. # For non-admin user, vm_state and task_state are filtered through # remove_invalid_options function, based on value of status field. # Set value to vm_state and task_state to make search simple. search_opts.pop('status', None) if 'status' in req.GET.keys(): statuses = req.GET.getall('status') states = common.task_and_vm_state_from_status(statuses) vm_state, task_state = states if not vm_state and not task_state: if api_version_request.is_supported(req, '2.38'): msg = _('Invalid status value') raise exc.HTTPBadRequest(explanation=msg) return {'servers': []} search_opts['vm_state'] = vm_state # When we search by vm state, task state will return 'default'. # So we don't need task_state search_opt. if 'default' not in task_state: search_opts['task_state'] = task_state if 'changes-since' in search_opts: try: search_opts['changes-since'] = timeutils.parse_isotime( search_opts['changes-since']) except ValueError: # NOTE: This error handling is for V2.0 API to pass the # experimental jobs at the gate. V2.1 API covers this case # with JSON-Schema and it is a hard burden to apply it to # v2.0 API at this time. msg = _("Invalid filter field: changes-since.") raise exc.HTTPBadRequest(explanation=msg) if 'changes-before' in search_opts: try: search_opts['changes-before'] = timeutils.parse_isotime( search_opts['changes-before']) changes_since = search_opts.get('changes-since') if changes_since and search_opts['changes-before'] < \ search_opts['changes-since']: msg = _('The value of changes-since must be' ' less than or equal to changes-before.') raise exc.HTTPBadRequest(explanation=msg) except ValueError: msg = _("Invalid filter field: changes-before.") raise exc.HTTPBadRequest(explanation=msg) # By default, compute's get_all() will return deleted instances. # If an admin hasn't specified a 'deleted' search option, we need # to filter out deleted instances by setting the filter ourselves. # ... Unless 'changes-since' or 'changes-before' is specified, # because those will return recently deleted instances according to # the API spec. if 'deleted' not in search_opts: if 'changes-since' not in search_opts and \ 'changes-before' not in search_opts: # No 'changes-since' or 'changes-before', so we only # want non-deleted servers search_opts['deleted'] = False else: # Convert deleted filter value to a valid boolean. # Return non-deleted servers if an invalid value # is passed with deleted filter. search_opts['deleted'] = strutils.bool_from_string( search_opts['deleted'], default=False) if search_opts.get("vm_state") == ['deleted']: if context.is_admin: search_opts['deleted'] = True else: msg = _("Only administrators may list deleted instances") raise exc.HTTPForbidden(explanation=msg) if api_version_request.is_supported(req, '2.26'): for tag_filter in TAG_SEARCH_FILTERS: if tag_filter in search_opts: search_opts[tag_filter] = search_opts[ tag_filter].split(',') all_tenants = common.is_all_tenants(search_opts) # use the boolean from here on out so remove the entry from search_opts # if it's present. # NOTE(tssurya): In case we support handling down cells # we need to know further down the stack whether the 'all_tenants' # filter was passed with the true value or not, so we pass the flag # further down the stack. search_opts.pop('all_tenants', None) if 'locked' in search_opts: search_opts['locked'] = common.is_locked(search_opts) elevated = None if all_tenants: if is_detail: context.can(server_policies.SERVERS % 'detail:get_all_tenants') else: context.can(server_policies.SERVERS % 'index:get_all_tenants') elevated = context.elevated() else: # As explained in lp:#1185290, if `all_tenants` is not passed # we must ignore the `tenant_id` search option. search_opts.pop('tenant_id', None) if context.project_id: search_opts['project_id'] = context.project_id else: search_opts['user_id'] = context.user_id limit, marker = common.get_limit_and_marker(req) sort_keys, sort_dirs = common.get_sort_params(req.params) blacklist = schema.SERVER_LIST_IGNORE_SORT_KEY if api_version_request.is_supported(req, '2.73'): blacklist = schema.SERVER_LIST_IGNORE_SORT_KEY_V273 sort_keys, sort_dirs = remove_invalid_sort_keys( context, sort_keys, sort_dirs, blacklist, ('host', 'node')) expected_attrs = [] if is_detail: if api_version_request.is_supported(req, '2.16'): expected_attrs.append('services') if api_version_request.is_supported(req, '2.26'): expected_attrs.append("tags") if api_version_request.is_supported(req, '2.63'): expected_attrs.append("trusted_certs") if api_version_request.is_supported(req, '2.73'): expected_attrs.append("system_metadata") # merge our expected attrs with what the view builder needs for # showing details expected_attrs = self._view_builder.get_show_expected_attrs( expected_attrs) try: instance_list = self.compute_api.get_all(elevated or context, search_opts=search_opts, limit=limit, marker=marker, expected_attrs=expected_attrs, sort_keys=sort_keys, sort_dirs=sort_dirs, cell_down_support=cell_down_support, all_tenants=all_tenants) except exception.MarkerNotFound: msg = _('marker [%s] not found') % marker raise exc.HTTPBadRequest(explanation=msg) except exception.FlavorNotFound: LOG.debug("Flavor '%s' could not be found ", search_opts['flavor']) instance_list = objects.InstanceList() if is_detail: instance_list._context = context instance_list.fill_faults() response = self._view_builder.detail( req, instance_list, cell_down_support=cell_down_support) else: response = self._view_builder.index( req, instance_list, cell_down_support=cell_down_support) return response def _get_server(self, context, req, instance_uuid, is_detail=False, cell_down_support=False, columns_to_join=None): """Utility function for looking up an instance by uuid. :param context: request context for auth :param req: HTTP request. :param instance_uuid: UUID of the server instance to get :param is_detail: True if you plan on showing the details of the instance in the response, False otherwise. :param cell_down_support: True if the API (and caller) support returning a minimal instance construct if the relevant cell is down. :param columns_to_join: optional list of extra fields to join on the Instance object """ expected_attrs = ['flavor', 'numa_topology'] if is_detail: if api_version_request.is_supported(req, '2.26'): expected_attrs.append("tags") if api_version_request.is_supported(req, '2.63'): expected_attrs.append("trusted_certs") expected_attrs = self._view_builder.get_show_expected_attrs( expected_attrs) if columns_to_join: expected_attrs.extend(columns_to_join) instance = common.get_instance(self.compute_api, context, instance_uuid, expected_attrs=expected_attrs, cell_down_support=cell_down_support) return instance @staticmethod def _validate_network_id(net_id, network_uuids): """Validates that a requested network id. This method checks that the network id is in the proper UUID format. :param net_id: The network id to validate. :param network_uuids: A running list of requested network IDs that have passed validation already. :raises: webob.exc.HTTPBadRequest if validation fails """ if not uuidutils.is_uuid_like(net_id): msg = _("Bad networks format: network uuid is " "not in proper format (%s)") % net_id raise exc.HTTPBadRequest(explanation=msg) def _get_requested_networks(self, requested_networks): """Create a list of requested networks from the networks attribute.""" # Starting in the 2.37 microversion, requested_networks is either a # list or a string enum with value 'auto' or 'none'. The auto/none # values are verified via jsonschema so we don't check them again here. if isinstance(requested_networks, str): return objects.NetworkRequestList( objects=[objects.NetworkRequest( network_id=requested_networks)]) networks = [] network_uuids = [] port_uuids = [] for network in requested_networks: request = objects.NetworkRequest() try: # fixed IP address is optional # if the fixed IP address is not provided then # it will use one of the available IP address from the network request.address = network.get('fixed_ip', None) request.port_id = network.get('port', None) request.tag = network.get('tag', None) if request.port_id: if request.port_id in port_uuids: msg = _( "Port ID '%(port)s' was specified twice: you " "cannot attach a port multiple times." ) % { "port": request.port_id, } raise exc.HTTPBadRequest(explanation=msg) if request.address is not None: msg = _( "Specified Fixed IP '%(addr)s' cannot be used " "with port '%(port)s': the two cannot be " "specified together." ) % { "addr": request.address, "port": request.port_id, } raise exc.HTTPBadRequest(explanation=msg) request.network_id = None port_uuids.append(request.port_id) else: request.network_id = network['uuid'] self._validate_network_id( request.network_id, network_uuids) network_uuids.append(request.network_id) networks.append(request) except KeyError as key: expl = _('Bad network format: missing %s') % key raise exc.HTTPBadRequest(explanation=expl) except TypeError: expl = _('Bad networks format') raise exc.HTTPBadRequest(explanation=expl) return objects.NetworkRequestList(objects=networks) @wsgi.expected_errors(404) @validation.query_schema(schema.show_query) def show(self, req, id): """Returns server details by server id.""" context = req.environ['nova.context'] cell_down_support = api_version_request.is_supported(req, '2.69') show_server_groups = api_version_request.is_supported(req, '2.71') instance = self._get_server( context, req, id, is_detail=True, columns_to_join=['services'], cell_down_support=cell_down_support) context.can(server_policies.SERVERS % 'show', target={'project_id': instance.project_id}) return self._view_builder.show( req, instance, cell_down_support=cell_down_support, show_server_groups=show_server_groups) @staticmethod def _process_bdms_for_create( context, target, server_dict, create_kwargs): """Processes block_device_mapping(_v2) req parameters for server create :param context: The nova auth request context :param target: The target dict for ``context.can`` policy checks :param server_dict: The POST /servers request body "server" entry :param create_kwargs: dict that gets populated by this method and passed to nova.compute.api.API.create() :raises: webob.exc.HTTPBadRequest if the request parameters are invalid :raises: nova.exception.Forbidden if a policy check fails """ block_device_mapping_legacy = server_dict.get('block_device_mapping', []) block_device_mapping_v2 = server_dict.get('block_device_mapping_v2', []) if block_device_mapping_legacy and block_device_mapping_v2: expl = _('Using different block_device_mapping syntaxes ' 'is not allowed in the same request.') raise exc.HTTPBadRequest(explanation=expl) if block_device_mapping_legacy: for bdm in block_device_mapping_legacy: if 'delete_on_termination' in bdm: bdm['delete_on_termination'] = strutils.bool_from_string( bdm['delete_on_termination']) create_kwargs[ 'block_device_mapping'] = block_device_mapping_legacy # Sets the legacy_bdm flag if we got a legacy block device mapping. create_kwargs['legacy_bdm'] = True elif block_device_mapping_v2: # Have to check whether --image is given, see bug 1433609 image_href = server_dict.get('imageRef') image_uuid_specified = image_href is not None try: block_device_mapping = [ block_device.BlockDeviceDict.from_api(bdm_dict, image_uuid_specified) for bdm_dict in block_device_mapping_v2] except exception.InvalidBDMFormat as e: raise exc.HTTPBadRequest(explanation=e.format_message()) create_kwargs['block_device_mapping'] = block_device_mapping # Unset the legacy_bdm flag if we got a block device mapping. create_kwargs['legacy_bdm'] = False block_device_mapping = create_kwargs.get("block_device_mapping") if block_device_mapping: context.can(server_policies.SERVERS % 'create:attach_volume', target) def _process_networks_for_create( self, context, target, server_dict, create_kwargs): """Processes networks request parameter for server create :param context: The nova auth request context :param target: The target dict for ``context.can`` policy checks :param server_dict: The POST /servers request body "server" entry :param create_kwargs: dict that gets populated by this method and passed to nova.compute.api.API.create() :raises: webob.exc.HTTPBadRequest if the request parameters are invalid :raises: nova.exception.Forbidden if a policy check fails """ requested_networks = server_dict.get('networks', None) if requested_networks is not None: requested_networks = self._get_requested_networks( requested_networks) # Skip policy check for 'create:attach_network' if there is no # network allocation request. if requested_networks and len(requested_networks) and \ not requested_networks.no_allocate: context.can(server_policies.SERVERS % 'create:attach_network', target) create_kwargs['requested_networks'] = requested_networks @staticmethod def _validate_host_availability_zone(context, availability_zone, host): """Ensure the host belongs in the availability zone. This is slightly tricky and it's probably worth recapping how host aggregates and availability zones are related before reading. Hosts can belong to zero or more host aggregates, but they will always belong to exactly one availability zone. If the user has set the availability zone key on one of the host aggregates that the host is a member of then the host will belong to this availability zone. If the user has not set the availability zone key on any of the host aggregates that the host is a member of or the host is not a member of any host aggregates, then the host will belong to the default availability zone. Setting the availability zone key on more than one of host aggregates that the host is a member of is an error and will be rejected by the API. Given the above, our host-availability zone check needs to vary behavior based on whether we're requesting the default availability zone or not. If we are not, then we simply ask "does this host belong to a host aggregate and, if so, do any of the host aggregates have the requested availability zone metadata set". By comparison, if we *are* requesting the default availability zone then we want to ask the inverse, or "does this host not belong to a host aggregate or, if it does, is the availability zone information unset (or, naughty naughty, set to the default) for each of the host aggregates". If both cases, if the answer is no then we warn about the mismatch and then use the actual availability zone of the host to avoid mismatches. :param context: The nova auth request context :param availability_zone: The name of the requested availability zone :param host: The name of the requested host :returns: The availability zone that should actually be used for the request """ aggregates = objects.AggregateList.get_by_host(context, host=host) if not aggregates: # a host is assigned to the default availability zone if it is not # a member of any host aggregates if availability_zone == CONF.default_availability_zone: return availability_zone LOG.warning( "Requested availability zone '%s' but forced host '%s' " "does not belong to any availability zones; ignoring " "requested availability zone to avoid bug #1934770", availability_zone, host, ) return None # only one host aggregate will have the availability_zone field set so # use the first non-null value host_availability_zone = next( (a.availability_zone for a in aggregates if a.availability_zone), None, ) if availability_zone == host_availability_zone: # if there's an exact match, use what the user requested return availability_zone if ( availability_zone == CONF.default_availability_zone and host_availability_zone is None ): # special case the default availability zone since this won't (or # rather shouldn't) be explicitly stored on any host aggregate return availability_zone # no match, so use the host's availability zone information, if any LOG.warning( "Requested availability zone '%s' but forced host '%s' " "does not belong to this availability zone; overwriting " "requested availability zone to avoid bug #1934770", availability_zone, host, ) return None @staticmethod def _process_hosts_for_create( context, target, server_dict, create_kwargs, host, node): """Processes hosts request parameter for server create :param context: The nova auth request context :param target: The target dict for ``context.can`` policy checks :param server_dict: The POST /servers request body "server" entry :param create_kwargs: dict that gets populated by this method and passed to nova.compute.api.API.create() :param host: Forced host of availability_zone :param node: Forced node of availability_zone :raise: webob.exc.HTTPBadRequest if the request parameters are invalid :raise: nova.exception.Forbidden if a policy check fails """ requested_host = server_dict.get('host') requested_hypervisor_hostname = server_dict.get('hypervisor_hostname') if requested_host or requested_hypervisor_hostname: # If the policy check fails, this will raise Forbidden exception. context.can(server_policies.REQUESTED_DESTINATION, target=target) if host or node: msg = _("One mechanism with host and/or " "hypervisor_hostname and another mechanism " "with zone:host:node are mutually exclusive.") raise exc.HTTPBadRequest(explanation=msg) create_kwargs['requested_host'] = requested_host create_kwargs['requested_hypervisor_hostname'] = ( requested_hypervisor_hostname) @wsgi.response(202) @wsgi.expected_errors((400, 403, 409)) @validation.schema(schema.create_v20, '2.0', '2.0') @validation.schema(schema.create, '2.1', '2.18') @validation.schema(schema.create_v219, '2.19', '2.31') @validation.schema(schema.create_v232, '2.32', '2.32') @validation.schema(schema.create_v233, '2.33', '2.36') @validation.schema(schema.create_v237, '2.37', '2.41') @validation.schema(schema.create_v242, '2.42', '2.51') @validation.schema(schema.create_v252, '2.52', '2.56') @validation.schema(schema.create_v257, '2.57', '2.62') @validation.schema(schema.create_v263, '2.63', '2.66') @validation.schema(schema.create_v267, '2.67', '2.73') @validation.schema(schema.create_v274, '2.74', '2.89') @validation.schema(schema.create_v290, '2.90', '2.93') @validation.schema(schema.create_v294, '2.94') def create(self, req, body): """Creates a new server for a given user.""" context = req.environ['nova.context'] server_dict = body['server'] password = self._get_server_admin_password(server_dict) name = common.normalize_name(server_dict['name']) description = name if api_version_request.is_supported(req, '2.19'): description = server_dict.get('description') hostname = None if api_version_request.is_supported(req, '2.90'): hostname = server_dict.get('hostname') # Arguments to be passed to instance create function create_kwargs = {} create_kwargs['user_data'] = server_dict.get('user_data') # NOTE(alex_xu): The v2.1 API compat mode, we strip the spaces for # keypair create. But we didn't strip spaces at here for # backward-compatible some users already created keypair and name with # leading/trailing spaces by legacy v2 API. create_kwargs['key_name'] = server_dict.get('key_name') create_kwargs['config_drive'] = server_dict.get('config_drive') security_groups = server_dict.get('security_groups') if security_groups is not None: create_kwargs['security_groups'] = [ sg['name'] for sg in security_groups if sg.get('name')] create_kwargs['security_groups'] = list( set(create_kwargs['security_groups'])) scheduler_hints = {} if 'os:scheduler_hints' in body: scheduler_hints = body['os:scheduler_hints'] elif 'OS-SCH-HNT:scheduler_hints' in body: scheduler_hints = body['OS-SCH-HNT:scheduler_hints'] create_kwargs['scheduler_hints'] = scheduler_hints # min_count and max_count are optional. If they exist, they may come # in as strings. Verify that they are valid integers and > 0. # Also, we want to default 'min_count' to 1, and default # 'max_count' to be 'min_count'. min_count = int(server_dict.get('min_count', 1)) max_count = int(server_dict.get('max_count', min_count)) if min_count > max_count: msg = _('min_count must be <= max_count') raise exc.HTTPBadRequest(explanation=msg) create_kwargs['min_count'] = min_count create_kwargs['max_count'] = max_count availability_zone = server_dict.pop("availability_zone", None) if api_version_request.is_supported(req, '2.52'): create_kwargs['tags'] = server_dict.get('tags') helpers.translate_attributes(helpers.CREATE, server_dict, create_kwargs) target = { 'project_id': context.project_id, 'user_id': context.user_id, 'availability_zone': availability_zone} context.can(server_policies.SERVERS % 'create', target) # Skip policy check for 'create:trusted_certs' if no trusted # certificate IDs were provided. trusted_certs = server_dict.get('trusted_image_certificates', None) if trusted_certs: create_kwargs['trusted_certs'] = trusted_certs context.can(server_policies.SERVERS % 'create:trusted_certs', target=target) parse_az = self.compute_api.parse_availability_zone try: availability_zone, host, node = parse_az(context, availability_zone) except exception.InvalidInput as err: raise exc.HTTPBadRequest(explanation=str(err)) if host or node: context.can(server_policies.SERVERS % 'create:forced_host', target=target) availability_zone = self._validate_host_availability_zone( context, availability_zone, host) if api_version_request.is_supported(req, '2.74'): self._process_hosts_for_create(context, target, server_dict, create_kwargs, host, node) self._process_bdms_for_create( context, target, server_dict, create_kwargs) image_uuid = self._image_from_req_data(server_dict, create_kwargs) self._process_networks_for_create( context, target, server_dict, create_kwargs) flavor_id = self._flavor_id_from_req_data(body) try: flavor = flavors.get_flavor_by_flavor_id( flavor_id, ctxt=context, read_deleted="no") supports_multiattach = common.supports_multiattach_volume(req) supports_port_resource_request = \ common.supports_port_resource_request(req) instances, resv_id = self.compute_api.create( context, flavor, image_uuid, display_name=name, display_description=description, hostname=hostname, availability_zone=availability_zone, forced_host=host, forced_node=node, metadata=server_dict.get('metadata', {}), admin_password=password, check_server_group_quota=True, supports_multiattach=supports_multiattach, supports_port_resource_request=supports_port_resource_request, **create_kwargs) except exception.OverQuota as error: raise exc.HTTPForbidden( explanation=error.format_message()) except exception.ImageNotFound: msg = _("Can not find requested image") raise exc.HTTPBadRequest(explanation=msg) except exception.KeypairNotFound: msg = _("Invalid key_name provided.") raise exc.HTTPBadRequest(explanation=msg) except exception.ConfigDriveInvalidValue: msg = _("Invalid config_drive provided.") raise exc.HTTPBadRequest(explanation=msg) except (exception.BootFromVolumeRequiredForZeroDiskFlavor, exception.ExternalNetworkAttachForbidden) as error: raise exc.HTTPForbidden(explanation=error.format_message()) except messaging.RemoteError as err: msg = "%(err_type)s: %(err_msg)s" % {'err_type': err.exc_type, 'err_msg': err.value} raise exc.HTTPBadRequest(explanation=msg) except UnicodeDecodeError as error: msg = "UnicodeError: %s" % error raise exc.HTTPBadRequest(explanation=msg) except (exception.ImageNotActive, exception.ImageBadRequest, exception.ImageNotAuthorized, exception.ImageUnacceptable, exception.FixedIpNotFoundForAddress, exception.FlavorNotFound, exception.InvalidMetadata, exception.InvalidVolume, exception.VolumeNotFound, exception.MismatchVolumeAZException, exception.MultiplePortsNotApplicable, exception.InvalidFixedIpAndMaxCountRequest, exception.AmbiguousHostnameForMultipleInstances, exception.InstanceUserDataMalformed, exception.PortNotFound, exception.FixedIpAlreadyInUse, exception.SecurityGroupNotFound, exception.PortRequiresFixedIP, exception.NetworkRequiresSubnet, exception.NetworkNotFound, exception.InvalidBDM, exception.InvalidBDMSnapshot, exception.InvalidBDMVolume, exception.InvalidBDMImage, exception.InvalidBDMBootSequence, exception.InvalidBDMLocalsLimit, exception.InvalidBDMVolumeNotBootable, exception.InvalidBDMEphemeralSize, exception.InvalidBDMFormat, exception.InvalidBDMSwapSize, exception.InvalidBDMDiskBus, exception.VolumeTypeNotFound, exception.AutoDiskConfigDisabledByImage, exception.InstanceGroupNotFound, exception.SnapshotNotFound, exception.UnableToAutoAllocateNetwork, exception.MultiattachNotSupportedOldMicroversion, exception.CertificateValidationFailed, exception.CreateWithPortResourceRequestOldVersion, exception.DeviceProfileError, exception.ComputeHostNotFound, exception.ForbiddenPortsWithAccelerator, exception.ForbiddenWithRemoteManagedPorts, exception.ExtendedResourceRequestOldCompute, ) as error: raise exc.HTTPBadRequest(explanation=error.format_message()) except INVALID_FLAVOR_IMAGE_EXCEPTIONS as error: raise exc.HTTPBadRequest(explanation=error.format_message()) except (exception.PortInUse, exception.InstanceExists, exception.NetworkAmbiguous, exception.NoUniqueMatch, exception.MixedInstanceNotSupportByComputeService) as error: raise exc.HTTPConflict(explanation=error.format_message()) # If the caller wanted a reservation_id, return it if server_dict.get('return_reservation_id', False): return wsgi.ResponseObject({'reservation_id': resv_id}) server = self._view_builder.create(req, instances[0]) if CONF.api.enable_instance_password: server['server']['adminPass'] = password robj = wsgi.ResponseObject(server) return self._add_location(robj) def _delete(self, context, req, instance_uuid): instance = self._get_server(context, req, instance_uuid) context.can(server_policies.SERVERS % 'delete', target={'user_id': instance.user_id, 'project_id': instance.project_id}) if CONF.reclaim_instance_interval: try: self.compute_api.soft_delete(context, instance) except exception.InstanceInvalidState: # Note(yufang521247): instance which has never been active # is not allowed to be soft_deleted. Thus we have to call # delete() to clean up the instance. self.compute_api.delete(context, instance) else: self.compute_api.delete(context, instance) @wsgi.expected_errors(404) @validation.schema(schema.update_v20, '2.0', '2.0') @validation.schema(schema.update, '2.1', '2.18') @validation.schema(schema.update_v219, '2.19', '2.89') @validation.schema(schema.update_v290, '2.90', '2.93') @validation.schema(schema.update_v294, '2.94') def update(self, req, id, body): """Update server then pass on to version-specific controller.""" ctxt = req.environ['nova.context'] update_dict = {} instance = self._get_server(ctxt, req, id, is_detail=True) ctxt.can(server_policies.SERVERS % 'update', target={'user_id': instance.user_id, 'project_id': instance.project_id}) show_server_groups = api_version_request.is_supported(req, '2.71') server = body['server'] if 'name' in server: update_dict['display_name'] = common.normalize_name( server['name']) if 'description' in server: # This is allowed to be None (remove description) update_dict['display_description'] = server['description'] if 'hostname' in server: update_dict['hostname'] = server['hostname'] helpers.translate_attributes(helpers.UPDATE, server, update_dict) try: instance = self.compute_api.update_instance( ctxt, instance, update_dict) # NOTE(gmann): Starting from microversion 2.75, PUT and Rebuild # API response will show all attributes like GET /servers API. show_all_attributes = api_version_request.is_supported(req, '2.75') extend_address = show_all_attributes show_AZ = show_all_attributes show_config_drive = show_all_attributes show_keypair = show_all_attributes show_srv_usg = show_all_attributes show_sec_grp = show_all_attributes show_extended_status = show_all_attributes show_extended_volumes = show_all_attributes # NOTE(gmann): Below attributes need to be added in response # if respective policy allows.So setting these as None # to perform the policy check in view builder. show_extended_attr = None if show_all_attributes else False show_host_status = None if show_all_attributes else False return self._view_builder.show( req, instance, extend_address=extend_address, show_AZ=show_AZ, show_config_drive=show_config_drive, show_extended_attr=show_extended_attr, show_host_status=show_host_status, show_keypair=show_keypair, show_srv_usg=show_srv_usg, show_sec_grp=show_sec_grp, show_extended_status=show_extended_status, show_extended_volumes=show_extended_volumes, show_server_groups=show_server_groups) except exception.InstanceNotFound: msg = _("Instance could not be found") raise exc.HTTPNotFound(explanation=msg) # NOTE(gmann): Returns 204 for backwards compatibility but should be 202 # for representing async API as this API just accepts the request and # request hypervisor driver to complete the same in async mode. @wsgi.response(204) @wsgi.expected_errors((400, 404, 409)) @wsgi.action('confirmResize') @validation.schema(schema.confirm_resize) @validation.response_body_schema(schema.confirm_resize_response) def _action_confirm_resize(self, req, id, body): context = req.environ['nova.context'] instance = self._get_server(context, req, id) context.can(server_policies.SERVERS % 'confirm_resize', target={'project_id': instance.project_id}) try: self.compute_api.confirm_resize(context, instance) except exception.MigrationNotFound: msg = _("Instance has not been resized.") raise exc.HTTPBadRequest(explanation=msg) except ( exception.InstanceIsLocked, exception.ServiceUnavailable, ) as e: raise exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'confirmResize', id) @wsgi.response(202) @wsgi.expected_errors((400, 404, 409)) @wsgi.action('revertResize') @validation.schema(schema.revert_resize) @validation.response_body_schema(schema.revert_resize_response) def _action_revert_resize(self, req, id, body): context = req.environ['nova.context'] instance = self._get_server(context, req, id) context.can(server_policies.SERVERS % 'revert_resize', target={'project_id': instance.project_id}) try: self.compute_api.revert_resize(context, instance) except exception.MigrationNotFound: msg = _("Instance has not been resized.") raise exc.HTTPBadRequest(explanation=msg) except exception.FlavorNotFound: msg = _("Flavor used by the instance could not be found.") raise exc.HTTPBadRequest(explanation=msg) except exception.InstanceIsLocked as e: raise exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'revertResize', id) @wsgi.response(202) @wsgi.expected_errors((404, 409)) @wsgi.action('reboot') @validation.schema(schema.reboot) @validation.response_body_schema(schema.reboot_response) def _action_reboot(self, req, id, body): reboot_type = body['reboot']['type'].upper() context = req.environ['nova.context'] instance = self._get_server(context, req, id) context.can(server_policies.SERVERS % 'reboot', target={'project_id': instance.project_id}) try: self.compute_api.reboot(context, instance, reboot_type) except exception.InstanceIsLocked as e: raise exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'reboot', id) def _resize(self, req, instance_id, flavor_id, auto_disk_config=None): """Begin the resize process with given instance/flavor.""" context = req.environ["nova.context"] instance = self._get_server(context, req, instance_id, columns_to_join=['services', 'resources', 'pci_requests', 'pci_devices', 'trusted_certs', 'vcpu_model']) context.can(server_policies.SERVERS % 'resize', target={'user_id': instance.user_id, 'project_id': instance.project_id}) try: self.compute_api.resize(context, instance, flavor_id, auto_disk_config=auto_disk_config) except exception.OverQuota as error: raise exc.HTTPForbidden( explanation=error.format_message()) except ( exception.InstanceIsLocked, exception.InstanceNotReady, exception.MixedInstanceNotSupportByComputeService, exception.ServiceUnavailable, ) as e: raise exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'resize', instance_id) except exception.ImageNotAuthorized: msg = _("You are not authorized to access the image " "the instance was started with.") raise exc.HTTPUnauthorized(explanation=msg) except exception.ImageNotFound: msg = _("Image that the instance was started " "with could not be found.") raise exc.HTTPBadRequest(explanation=msg) except ( exception.AutoDiskConfigDisabledByImage, exception.CannotResizeDisk, exception.CannotResizeToSameFlavor, exception.FlavorNotFound, exception.ExtendedResourceRequestOldCompute, ) as e: raise exc.HTTPBadRequest(explanation=e.format_message()) except INVALID_FLAVOR_IMAGE_EXCEPTIONS as e: raise exc.HTTPBadRequest(explanation=e.format_message()) except exception.Invalid: msg = _("Invalid instance image.") raise exc.HTTPBadRequest(explanation=msg) except ( exception.ForbiddenSharesNotSupported, exception.ForbiddenWithShare) as e: raise exc.HTTPConflict(explanation=e.format_message()) @wsgi.response(204) @wsgi.expected_errors((404, 409)) def delete(self, req, id): """Destroys a server.""" try: self._delete(req.environ['nova.context'], req, id) except exception.InstanceNotFound: msg = _("Instance could not be found") raise exc.HTTPNotFound(explanation=msg) except (exception.InstanceIsLocked, exception.AllocationDeleteFailed) as e: raise exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'delete', id) def _image_from_req_data(self, server_dict, create_kwargs): """Get image data from the request or raise appropriate exceptions. The field imageRef is mandatory when no block devices have been defined and must be a proper uuid when present. """ image_href = server_dict.get('imageRef') if not image_href and create_kwargs.get('block_device_mapping'): return '' elif image_href: return image_href else: msg = _("Missing imageRef attribute") raise exc.HTTPBadRequest(explanation=msg) def _flavor_id_from_req_data(self, data): flavor_ref = data['server']['flavorRef'] return common.get_id_from_href(flavor_ref) @wsgi.response(202) @wsgi.expected_errors((400, 401, 403, 404, 409)) @wsgi.action('resize') @validation.schema(schema.resize) @validation.response_body_schema(schema.resize_response) def _action_resize(self, req, id, body): """Resizes a given instance to the flavor size requested.""" resize_dict = body['resize'] flavor_ref = str(resize_dict["flavorRef"]) kwargs = {} helpers.translate_attributes(helpers.RESIZE, resize_dict, kwargs) self._resize(req, id, flavor_ref, **kwargs) @wsgi.response(202) @wsgi.expected_errors((400, 403, 404, 409)) @wsgi.action('rebuild') @validation.schema(schema.rebuild_v20, '2.0', '2.0') @validation.schema(schema.rebuild, '2.1', '2.18') @validation.schema(schema.rebuild_v219, '2.19', '2.53') @validation.schema(schema.rebuild_v254, '2.54', '2.56') @validation.schema(schema.rebuild_v257, '2.57', '2.62') @validation.schema(schema.rebuild_v263, '2.63', '2.89') @validation.schema(schema.rebuild_v290, '2.90', '2.93') @validation.schema(schema.rebuild_v294, '2.94') @validation.response_body_schema(schema.rebuild_response, '2.0', '2.8') @validation.response_body_schema( schema.rebuild_response_v29, '2.9', '2.18') @validation.response_body_schema( schema.rebuild_response_v219, '2.19', '2.25') @validation.response_body_schema( schema.rebuild_response_v226, '2.26', '2.45') @validation.response_body_schema( schema.rebuild_response_v246, '2.46', '2.53') @validation.response_body_schema( schema.rebuild_response_v254, '2.54', '2.56') @validation.response_body_schema( schema.rebuild_response_v257, '2.57', '2.62') @validation.response_body_schema( schema.rebuild_response_v263, '2.63', '2.70') @validation.response_body_schema( schema.rebuild_response_v271, '2.71', '2.72') @validation.response_body_schema( schema.rebuild_response_v273, '2.73', '2.74') @validation.response_body_schema( schema.rebuild_response_v275, '2.75', '2.95') @validation.response_body_schema( schema.rebuild_response_v296, '2.96', '2.97') @validation.response_body_schema( schema.rebuild_response_v298, '2.98', '2.99') @validation.response_body_schema( schema.rebuild_response_v2100, '2.100') def _action_rebuild(self, req, id, body): """Rebuild an instance with the given attributes.""" rebuild_dict = body['rebuild'] image_href = rebuild_dict["imageRef"] password = self._get_server_admin_password(rebuild_dict) context = req.environ['nova.context'] instance = self._get_server(context, req, id, columns_to_join=['trusted_certs', 'pci_requests', 'pci_devices', 'resources', 'migration_context']) target = {'user_id': instance.user_id, 'project_id': instance.project_id} context.can(server_policies.SERVERS % 'rebuild', target=target) attr_map = { 'name': 'display_name', 'description': 'display_description', 'metadata': 'metadata', } kwargs = {} helpers.translate_attributes(helpers.REBUILD, rebuild_dict, kwargs) if ( api_version_request.is_supported(req, '2.54') and 'key_name' in rebuild_dict ): kwargs['key_name'] = rebuild_dict.get('key_name') # If user_data is not specified, we don't include it in kwargs because # we don't want to overwrite the existing user_data. include_user_data = api_version_request.is_supported(req, '2.57') if include_user_data and 'user_data' in rebuild_dict: kwargs['user_data'] = rebuild_dict['user_data'] # Skip policy check for 'rebuild:trusted_certs' if no trusted # certificate IDs were provided. if ( api_version_request.is_supported(req, '2.63') and # Note that this is different from server create since with # rebuild a user can unset/reset the trusted certs by # specifying trusted_image_certificates=None, similar to # key_name. 'trusted_image_certificates' in rebuild_dict ): kwargs['trusted_certs'] = rebuild_dict.get( 'trusted_image_certificates') context.can(server_policies.SERVERS % 'rebuild:trusted_certs', target=target) if ( api_version_request.is_supported(req, '2.90') and 'hostname' in rebuild_dict ): kwargs['hostname'] = rebuild_dict['hostname'] if api_version_request.is_supported(req, '2.93'): kwargs['reimage_boot_volume'] = True for request_attribute, instance_attribute in attr_map.items(): try: if request_attribute == 'name': kwargs[instance_attribute] = common.normalize_name( rebuild_dict[request_attribute]) else: kwargs[instance_attribute] = rebuild_dict[ request_attribute] except (KeyError, TypeError): pass try: self.compute_api.rebuild(context, instance, image_href, password, **kwargs) except exception.InstanceIsLocked as e: raise exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'rebuild', id) except exception.InstanceNotFound: msg = _("Instance could not be found") raise exc.HTTPNotFound(explanation=msg) except exception.ImageNotFound: msg = _("Cannot find image for rebuild") raise exc.HTTPBadRequest(explanation=msg) except exception.KeypairNotFound: msg = _("Invalid key_name provided.") raise exc.HTTPBadRequest(explanation=msg) except exception.OverQuota as error: raise exc.HTTPForbidden(explanation=error.format_message()) except (exception.AutoDiskConfigDisabledByImage, exception.CertificateValidationFailed, exception.ImageNotActive, exception.ImageUnacceptable, exception.InvalidMetadata, exception.InvalidArchitectureName, exception.InvalidVolume, ) as error: raise exc.HTTPBadRequest(explanation=error.format_message()) except INVALID_FLAVOR_IMAGE_EXCEPTIONS as error: raise exc.HTTPBadRequest(explanation=error.format_message()) instance = self._get_server(context, req, id, is_detail=True) # NOTE(liuyulong): set the new key_name for the API response. # from microversion 2.54 onwards. show_keypair = api_version_request.is_supported(req, '2.54') show_server_groups = api_version_request.is_supported(req, '2.71') # NOTE(gmann): Starting from microversion 2.75, PUT and Rebuild # API response will show all attributes like GET /servers API. show_all_attributes = api_version_request.is_supported(req, '2.75') extend_address = show_all_attributes show_AZ = show_all_attributes show_config_drive = show_all_attributes show_srv_usg = show_all_attributes show_sec_grp = show_all_attributes show_extended_status = show_all_attributes show_extended_volumes = show_all_attributes # NOTE(gmann): Below attributes need to be added in response # if respective policy allows.So setting these as None # to perform the policy check in view builder. show_extended_attr = None if show_all_attributes else False show_host_status = None if show_all_attributes else False view = self._view_builder.show( req, instance, extend_address=extend_address, show_AZ=show_AZ, show_config_drive=show_config_drive, show_extended_attr=show_extended_attr, show_host_status=show_host_status, show_keypair=show_keypair, show_srv_usg=show_srv_usg, show_sec_grp=show_sec_grp, show_extended_status=show_extended_status, show_extended_volumes=show_extended_volumes, show_server_groups=show_server_groups, # NOTE(gmann): user_data has been added in response (by code at # the end of this API method) since microversion 2.57 so tell # view builder not to include it. show_user_data=False) # Add on the admin_password attribute since the view doesn't do it # unless instance passwords are disabled if CONF.api.enable_instance_password: view['server']['adminPass'] = password if include_user_data: view['server']['user_data'] = instance.user_data robj = wsgi.ResponseObject(view) return self._add_location(robj) @wsgi.response(202) @wsgi.expected_errors((400, 403, 404, 409)) @wsgi.action('createImage') @validation.schema(schema.create_image, '2.0', '2.0') @validation.schema(schema.create_image, '2.1') @validation.response_body_schema( schema.create_image_response, '2.0', '2.44') @validation.response_body_schema(schema.create_image_response_v245, '2.45') def _action_create_image(self, req, id, body): """Snapshot a server instance.""" context = req.environ['nova.context'] instance = self._get_server(context, req, id) target = {'project_id': instance.project_id} context.can(server_policies.SERVERS % 'create_image', target=target) entity = body["createImage"] image_name = common.normalize_name(entity["name"]) metadata = entity.get('metadata', {}) # Starting from microversion 2.39 we don't check quotas on createImage if not api_version_request.is_supported(req, '2.39'): common.check_img_metadata_properties_quota(context, metadata) bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) try: if compute_utils.is_volume_backed_instance(context, instance, bdms): context.can(server_policies.SERVERS % 'create_image:allow_volume_backed', target=target) image = self.compute_api.snapshot_volume_backed( context, instance, image_name, extra_properties= metadata) else: image = self.compute_api.snapshot(context, instance, image_name, extra_properties=metadata) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'createImage', id) except exception.InstanceQuiesceFailed as err: raise exc.HTTPConflict(explanation=err.format_message()) except exception.Invalid as err: raise exc.HTTPBadRequest(explanation=err.format_message()) except exception.OverQuota as e: raise exc.HTTPForbidden(explanation=e.format_message()) # Starting with microversion 2.45 we return a response body containing # the snapshot image id without the Location header. if api_version_request.is_supported(req, '2.45'): return {'image_id': image['id']} # build location of newly-created image entity image_id = str(image['id']) image_ref = glance.API().generate_image_url(image_id, context) resp = webob.Response(status_int=202) resp.headers['Location'] = image_ref return resp def _get_server_admin_password(self, server): """Determine the admin password for a server on creation.""" if 'adminPass' in server: password = server['adminPass'] else: password = utils.generate_password() return password def _get_server_search_options(self, req): """Return server search options allowed by non-admin.""" # NOTE(mriedem): all_tenants is admin-only by default but because of # tight-coupling between this method, the remove_invalid_options method # and how _get_servers uses them, we include all_tenants here but it # will be removed later for non-admins. Fixing this would be nice but # probably not trivial. opt_list = ('reservation_id', 'name', 'status', 'image', 'flavor', 'ip', 'changes-since', 'all_tenants') if api_version_request.is_supported(req, '2.5'): opt_list += ('ip6',) if api_version_request.is_supported(req, '2.26'): opt_list += TAG_SEARCH_FILTERS if api_version_request.is_supported(req, '2.66'): opt_list += ('changes-before',) if api_version_request.is_supported(req, '2.73'): opt_list += ('locked',) if api_version_request.is_supported(req, '2.83'): opt_list += ('availability_zone', 'config_drive', 'key_name', 'created_at', 'launched_at', 'terminated_at', 'power_state', 'task_state', 'vm_state', 'progress', 'user_id',) if api_version_request.is_supported(req, '2.90'): opt_list += ('hostname',) return opt_list def _get_instance(self, context, instance_uuid): try: attrs = ['system_metadata', 'metadata'] mapping = objects.InstanceMapping.get_by_instance_uuid( context, instance_uuid) nova_context.set_target_cell(context, mapping.cell_mapping) return objects.Instance.get_by_uuid( context, instance_uuid, expected_attrs=attrs) except (exception.InstanceNotFound, exception.InstanceMappingNotFound) as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) @wsgi.response(202) @wsgi.expected_errors((404, 409)) @wsgi.action('os-start') @validation.schema(schema.start_server) @validation.response_body_schema(schema.start_server_response) def _start_server(self, req, id, body): """Start an instance.""" context = req.environ['nova.context'] instance = self._get_instance(context, id) context.can(server_policies.SERVERS % 'start', target={'user_id': instance.user_id, 'project_id': instance.project_id}) try: self.compute_api.start(context, instance) except (exception.InstanceNotReady, exception.InstanceIsLocked) as e: raise webob.exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'start', id) @wsgi.response(202) @wsgi.expected_errors((404, 409)) @wsgi.action('os-stop') @validation.schema(schema.stop_server) @validation.response_body_schema(schema.stop_server_response) def _stop_server(self, req, id, body): """Stop an instance.""" context = req.environ['nova.context'] instance = self._get_instance(context, id) context.can(server_policies.SERVERS % 'stop', target={'user_id': instance.user_id, 'project_id': instance.project_id}) try: self.compute_api.stop(context, instance) except (exception.InstanceNotReady, exception.InstanceIsLocked) as e: raise webob.exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state( state_error, 'stop', id ) @wsgi.api_version("2.17") @wsgi.response(202) @wsgi.expected_errors((400, 404, 409)) @wsgi.action('trigger_crash_dump') @validation.schema(schema.trigger_crash_dump) @validation.response_body_schema(schema.trigger_crash_dump_response) def _action_trigger_crash_dump(self, req, id, body): """Trigger crash dump in an instance""" context = req.environ['nova.context'] instance = self._get_instance(context, id) context.can(server_policies.SERVERS % 'trigger_crash_dump', target={'user_id': instance.user_id, 'project_id': instance.project_id}) try: self.compute_api.trigger_crash_dump(context, instance) except (exception.InstanceNotReady, exception.InstanceIsLocked) as e: raise webob.exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state( state_error, 'trigger_crash_dump', id ) def remove_invalid_options(context, search_options, allowed_search_options): """Remove search options that are not permitted unless policy allows.""" if context.can(server_policies.SERVERS % 'allow_all_filters', fatal=False): # Only remove parameters for sorting and pagination for key in PAGING_SORTING_PARAMS: search_options.pop(key, None) return # Otherwise, strip out all unknown options unknown_options = [opt for opt in search_options if opt not in allowed_search_options] if unknown_options: LOG.debug("Removing options '%s' from query", ", ".join(unknown_options)) for opt in unknown_options: search_options.pop(opt, None) def remove_invalid_sort_keys(context, sort_keys, sort_dirs, blacklist, admin_only_fields): key_list = copy.deepcopy(sort_keys) for key in key_list: # NOTE(Kevin Zheng): We are intend to remove the sort_key # in the blacklist and its' corresponding sort_dir, since # the sort_key and sort_dir are not strict to be provide # in pairs in the current implement, sort_dirs could be # less than sort_keys, in order to avoid IndexError, we # only pop sort_dir when number of sort_dirs is no less # than the sort_key index. if key in blacklist: if len(sort_dirs) > sort_keys.index(key): sort_dirs.pop(sort_keys.index(key)) sort_keys.pop(sort_keys.index(key)) elif key in admin_only_fields and not context.is_admin: msg = _("Only administrators can sort servers " "by %s") % key raise exc.HTTPForbidden(explanation=msg) return sort_keys, sort_dirs ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/services.py0000664000175000017500000005372600000000000022153 0ustar00zuulzuul00000000000000# Copyright 2012 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystoneauth1 import exceptions as ks_exc from oslo_log import log as logging from oslo_utils import strutils from oslo_utils import uuidutils import webob.exc from nova.api.openstack import api_version_request from nova.api.openstack.compute.schemas import services from nova.api.openstack import wsgi from nova.api import validation from nova import availability_zones from nova.compute import api as compute from nova import exception from nova.i18n import _ from nova import objects from nova.policies import services as services_policies from nova.scheduler.client import report from nova import servicegroup from nova import utils LOG = logging.getLogger(__name__) class ServiceController(wsgi.Controller): def __init__(self): super(ServiceController, self).__init__() self.host_api = compute.HostAPI() self.aggregate_api = compute.AggregateAPI() self.servicegroup_api = servicegroup.API() self.actions = {"enable": self._enable, "disable": self._disable, "disable-log-reason": self._disable_log_reason} @property def placementclient(self): return report.report_client_singleton() def _get_services(self, req): # The API services are filtered out since they are not RPC services # and therefore their state is not reported through the service group # API, so they would always be reported as 'down' (see bug 1543625). api_services = ('nova-osapi_compute', 'nova-metadata') context = req.environ['nova.context'] cell_down_support = api_version_request.is_supported(req, '2.69') _services = [ s for s in self.host_api.service_get_all(context, set_zones=True, all_cells=True, cell_down_support=cell_down_support) if s['binary'] not in api_services ] host = '' if 'host' in req.GET: host = req.GET['host'] binary = '' if 'binary' in req.GET: binary = req.GET['binary'] if host: _services = [s for s in _services if s['host'] == host] if binary: _services = [s for s in _services if s['binary'] == binary] return _services def _get_service_detail(self, svc, additional_fields, req, cell_down_support=False): # NOTE(tssurya): The below logic returns a minimal service construct # consisting of only the host, binary and status fields for the compute # services in the down cell. if (cell_down_support and 'uuid' not in svc): return {'binary': svc.binary, 'host': svc.host, 'status': "UNKNOWN"} alive = self.servicegroup_api.service_is_up(svc) state = (alive and "up") or "down" active = 'enabled' if svc['disabled']: active = 'disabled' updated_time = self.servicegroup_api.get_updated_time(svc) uuid_for_id = api_version_request.is_supported(req, '2.53') if 'availability_zone' not in svc: # The service wasn't loaded with the AZ so we need to do it here. # Yes this looks weird, but set_availability_zones makes a copy of # the list passed in and mutates the objects within it, so we have # to pull it back out from the resulting copied list. svc.availability_zone = ( availability_zones.set_availability_zones( req.environ['nova.context'], [svc])[0]['availability_zone']) service_detail = {'binary': svc['binary'], 'host': svc['host'], 'id': svc['uuid' if uuid_for_id else 'id'], 'zone': svc['availability_zone'], 'status': active, 'state': state, 'updated_at': updated_time, 'disabled_reason': svc['disabled_reason']} for field in additional_fields: service_detail[field] = svc[field] return service_detail def _get_services_list(self, req, additional_fields=()): _services = self._get_services(req) cell_down_support = api_version_request.is_supported(req, '2.69') return [self._get_service_detail(svc, additional_fields, req, cell_down_support=cell_down_support) for svc in _services] def _enable(self, body, context): """Enable scheduling for a service.""" return self._enable_disable(body, context, "enabled", {'disabled': False, 'disabled_reason': None}) def _disable(self, body, context, reason=None): """Disable scheduling for a service with optional log.""" return self._enable_disable(body, context, "disabled", {'disabled': True, 'disabled_reason': reason}) def _disable_log_reason(self, body, context): """Disable scheduling for a service with a log.""" try: reason = body['disabled_reason'] except KeyError: msg = _('Missing disabled reason field') raise webob.exc.HTTPBadRequest(explanation=msg) return self._disable(body, context, reason) def _enable_disable(self, body, context, status, params_to_update): """Enable/Disable scheduling for a service.""" reason = params_to_update.get('disabled_reason') ret_value = { 'service': { 'host': body['host'], 'binary': body['binary'], 'status': status }, } if reason: ret_value['service']['disabled_reason'] = reason self._update(context, body['host'], body['binary'], params_to_update) return ret_value def _forced_down(self, body, context): """Set or unset forced_down flag for the service""" try: forced_down = strutils.bool_from_string(body["forced_down"]) except KeyError: msg = _('Missing forced_down field') raise webob.exc.HTTPBadRequest(explanation=msg) host = body['host'] binary = body['binary'] if binary == 'nova-compute' and forced_down is False: self._check_for_evacuations(context, host) ret_value = {'service': {'host': host, 'binary': binary, 'forced_down': forced_down}} self._update(context, host, binary, {"forced_down": forced_down}) return ret_value def _update(self, context, host, binary, payload): """Do the actual PUT/update""" # If the user tried to perform an action # (disable/enable/force down) on a non-nova-compute # service, provide a more useful error message. if binary != 'nova-compute': msg = (_( 'Updating a %(binary)s service is not supported. Only ' 'nova-compute services can be updated.') % {'binary': binary}) raise webob.exc.HTTPBadRequest(explanation=msg) try: self.host_api.service_update_by_host_and_binary( context, host, binary, payload) except (exception.HostBinaryNotFound, exception.HostMappingNotFound) as exc: raise webob.exc.HTTPNotFound(explanation=exc.format_message()) def _perform_action(self, req, id, body, actions): """Calculate action dictionary dependent on provided fields""" context = req.environ['nova.context'] try: action = actions[id] except KeyError: msg = _("Unknown action") raise webob.exc.HTTPNotFound(explanation=msg) return action(body, context) def _check_for_evacuations(self, context, hostname): # NOTE(lyarwood): When forcing a compute service back up ensure that # there are no evacuation migration records against this host as the # source that are marked as done, suggesting that the compute service # hasn't restarted and moved such records to a completed state. filters = { 'source_compute': hostname, 'status': 'done', 'migration_type': objects.fields.MigrationType.EVACUATION, } if any(objects.MigrationList.get_by_filters(context, filters)): msg = _("Unable to force up host %(host)s as `done` evacuation " "migration records remain associated with the host. " "Ensure the compute service has been restarted, " "allowing these records to move to `completed` before " "retrying this request.") % {'host': hostname} # TODO(lyarwood): Move to 409 HTTPConflict under a new microversion raise webob.exc.HTTPBadRequest(explanation=msg) @wsgi.response(204) @wsgi.expected_errors((400, 404, 409)) def delete(self, req, id): """Deletes the specified service.""" context = req.environ['nova.context'] context.can(services_policies.BASE_POLICY_NAME % 'delete', target={}) if api_version_request.is_supported(req, '2.53'): if not uuidutils.is_uuid_like(id): msg = _('Invalid uuid %s') % id raise webob.exc.HTTPBadRequest(explanation=msg) else: try: utils.validate_integer(id, 'id') except exception.InvalidInput as exc: raise webob.exc.HTTPBadRequest( explanation=exc.format_message()) try: service = self.host_api.service_get_by_id(context, id) # remove the service from all the aggregates in which it's included if service.binary == 'nova-compute': # Check to see if there are any instances on this compute host # because if there are, we need to block the service (and # related compute_nodes record) delete since it will impact # resource accounting in Placement and orphan the compute node # resource provider. num_instances = objects.InstanceList.get_count_by_hosts( context, [service['host']]) if num_instances: raise webob.exc.HTTPConflict( explanation=_('Unable to delete compute service that ' 'is hosting instances. Migrate or ' 'delete the instances first.')) # Similarly, check to see if the are any in-progress migrations # involving this host because if there are we need to block the # service delete since we could orphan resource providers and # break the ability to do things like confirm/revert instances # in VERIFY_RESIZE status. compute_nodes = [] try: compute_nodes = objects.ComputeNodeList.get_all_by_host( context, service.host) self._assert_no_in_progress_migrations( context, id, compute_nodes) except exception.ComputeHostNotFound: # NOTE(artom) Consider the following situation: # - Using the Ironic virt driver # - Replacing (so removing and re-adding) all baremetal # nodes associated with a single nova-compute service # The update resources periodic will have destroyed the # compute node records because they're no longer being # reported by the virt driver. If we then attempt to # manually delete the compute service record, # get_all_host() above will raise, as there are no longer # any compute node records for the host. Catch it here and # continue to allow compute service deletion. LOG.info('Deleting compute service with no associated ' 'compute nodes.') aggrs = self.aggregate_api.get_aggregates_by_host(context, service.host) for ag in aggrs: self.aggregate_api.remove_host_from_aggregate(context, ag.id, service.host) # remove the corresponding resource provider record from # placement for the compute nodes managed by this service; # remember that an ironic compute service can manage multiple # nodes for compute_node in compute_nodes: try: self.placementclient.delete_resource_provider( context, compute_node, cascade=True) except ks_exc.ClientException as e: LOG.error( "Failed to delete compute node resource provider " "for compute node %s: %s", compute_node.uuid, str(e)) # Remove the host_mapping of this host. try: hm = objects.HostMapping.get_by_host(context, service.host) hm.destroy() except exception.HostMappingNotFound: # It's possible to startup a nova-compute service and then # delete it (maybe it was accidental?) before mapping it to # a cell using discover_hosts, so we just ignore this. pass service.destroy() except exception.ServiceNotFound: explanation = _("Service %s not found.") % id raise webob.exc.HTTPNotFound(explanation=explanation) except exception.ServiceNotUnique: explanation = _("Service id %s refers to multiple services.") % id raise webob.exc.HTTPBadRequest(explanation=explanation) @staticmethod def _assert_no_in_progress_migrations(context, service_id, compute_nodes): """Ensures there are no in-progress migrations on the given nodes. :param context: nova auth RequestContext :param service_id: id of the Service being deleted :param compute_nodes: ComputeNodeList of nodes on a compute service :raises: HTTPConflict if there are any in-progress migrations on the nodes """ for cn in compute_nodes: migrations = ( objects.MigrationList.get_in_progress_by_host_and_node( context, cn.host, cn.hypervisor_hostname)) if migrations: # Log the migrations for the operator and then raise # a 409 error. LOG.info('Unable to delete compute service with id %s ' 'for host %s. There are %i in-progress ' 'migrations involving the host. Migrations ' '(uuid:status): %s', service_id, cn.host, len(migrations), ','.join(['%s:%s' % (mig.uuid, mig.status) for mig in migrations])) raise webob.exc.HTTPConflict( explanation=_( 'Unable to delete compute service that has ' 'in-progress migrations. Complete the ' 'migrations or delete the instances first.')) @wsgi.expected_errors(()) @validation.query_schema(services.index_query_schema, '2.0', '2.74') @validation.query_schema(services.index_query_schema_275, '2.75') def index(self, req): """Return a list of all running services. Filter by host & service name """ context = req.environ['nova.context'] context.can(services_policies.BASE_POLICY_NAME % 'list', target={}) if api_version_request.is_supported(req, '2.11'): _services = self._get_services_list(req, ['forced_down']) else: _services = self._get_services_list(req) return {'services': _services} @wsgi.expected_errors((400, 404)) @validation.schema(services.service_update, '2.0', '2.10') @validation.schema(services.service_update_v211, '2.11', '2.52') @validation.schema(services.service_update_v253, '2.53') def update(self, req, id, body): """Perform service update Before microversion 2.53, the body contains a host and binary value to identify the service on which to perform the action. There is no service ID passed on the path, just the action, for example PUT /os-services/disable. Starting with microversion 2.53, the service uuid is passed in on the path of the request to uniquely identify the service record on which to perform a given update, which is defined in the body of the request. """ if api_version_request.is_supported(req, '2.53'): return self._update_v253(req, id, body) else: return self._update_v21(req, id, body) def _update_v21(self, req, id, body): context = req.environ['nova.context'] context.can(services_policies.BASE_POLICY_NAME % 'update', target={}) if api_version_request.is_supported(req, '2.11'): actions = self.actions.copy() actions["force-down"] = self._forced_down else: actions = self.actions return self._perform_action(req, id, body, actions) def _update_v253(self, req, id, body): service_id = id # Validate that the service ID is a UUID. if not uuidutils.is_uuid_like(service_id): msg = _('Invalid uuid %s') % service_id raise webob.exc.HTTPBadRequest(explanation=msg) # Validate the request context against the policy. context = req.environ['nova.context'] context.can(services_policies.BASE_POLICY_NAME % 'update', target={}) # Get the service by uuid. try: service = self.host_api.service_get_by_id(context, service_id) # At this point the context is targeted to the cell that the # service was found in so we don't need to do any explicit cell # targeting below. except exception.ServiceNotFound as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) # Return 400 if service.binary is not nova-compute. # Before the earlier PUT handlers were made cells-aware, you could # technically disable a nova-scheduler service, although that doesn't # really do anything within Nova and is just confusing. Now trying to # do that will fail as a nova-scheduler service won't have a host # mapping so you'll get a 400. In this new microversion, we close that # old gap and make sure you can only enable/disable and set forced_down # on nova-compute services since those are the only ones that make # sense to update for those operations. if service.binary != 'nova-compute': msg = (_('Updating a %(binary)s service is not supported. Only ' 'nova-compute services can be updated.') % {'binary': service.binary}) raise webob.exc.HTTPBadRequest(explanation=msg) # Now determine the update to perform based on the body. We are # intentionally not using _perform_action or the other old-style # action functions. if 'status' in body: # This is a status update for either enabled or disabled. if body['status'] == 'enabled': # Fail if 'disabled_reason' was requested when enabling the # service since those two combined don't make sense. if body.get('disabled_reason'): msg = _("Specifying 'disabled_reason' with status " "'enabled' is invalid.") raise webob.exc.HTTPBadRequest(explanation=msg) service.disabled = False service.disabled_reason = None elif body['status'] == 'disabled': service.disabled = True # The disabled reason is optional. service.disabled_reason = body.get('disabled_reason') # This is intentionally not an elif, i.e. it's in addition to the # status update. if 'forced_down' in body: service.forced_down = strutils.bool_from_string( body['forced_down'], strict=True) if service.forced_down is False: self._check_for_evacuations(context, service.host) # Check to see if anything was actually updated since the schema does # not define any required fields. if not service.obj_what_changed(): msg = _("No updates were requested. Fields 'status' or " "'forced_down' should be specified.") raise webob.exc.HTTPBadRequest(explanation=msg) # Now save our updates to the service record in the database. self.host_api.service_update(context, service) # Return the full service record details. additional_fields = ['forced_down'] return {'service': self._get_service_detail( service, additional_fields, req)} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/shelve.py0000664000175000017500000001453400000000000021610 0ustar00zuulzuul00000000000000# Copyright 2013 Rackspace Hosting # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The shelved mode extension.""" from oslo_log import log as logging from webob import exc from nova.api.openstack import api_version_request from nova.api.openstack import common from nova.api.openstack.compute.schemas import shelve as schema from nova.api.openstack import wsgi from nova.api import validation from nova.compute import api as compute from nova import exception from nova.policies import shelve as shelve_policies LOG = logging.getLogger(__name__) @validation.validated class ShelveController(wsgi.Controller): def __init__(self): super(ShelveController, self).__init__() self.compute_api = compute.API() @wsgi.response(202) @wsgi.expected_errors((404, 403, 409, 400)) @wsgi.action('shelve') @validation.schema(schema.shelve) @validation.response_body_schema(schema.shelve_response) def _shelve(self, req, id, body): """Move an instance into shelved mode.""" context = req.environ["nova.context"] instance = common.get_instance(self.compute_api, context, id) context.can(shelve_policies.POLICY_ROOT % 'shelve', target={'user_id': instance.user_id, 'project_id': instance.project_id}) try: self.compute_api.shelve(context, instance) except ( exception.InstanceIsLocked, exception.UnexpectedTaskStateError, ) as e: raise exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'shelve', id) except exception.ForbiddenPortsWithAccelerator as e: raise exc.HTTPBadRequest(explanation=e.format_message()) except ( exception.ForbiddenSharesNotSupported, exception.ForbiddenWithShare) as e: raise exc.HTTPConflict(explanation=e.format_message()) @wsgi.response(202) @wsgi.expected_errors((400, 404, 409)) @wsgi.action('shelveOffload') @validation.schema(schema.shelve_offload) @validation.response_body_schema(schema.shelve_offload_response) def _shelve_offload(self, req, id, body): """Force removal of a shelved instance from the compute node.""" context = req.environ["nova.context"] instance = common.get_instance(self.compute_api, context, id) context.can(shelve_policies.POLICY_ROOT % 'shelve_offload', target={'user_id': instance.user_id, 'project_id': instance.project_id}) try: self.compute_api.shelve_offload(context, instance) except exception.InstanceIsLocked as e: raise exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'shelveOffload', id) except exception.ForbiddenPortsWithAccelerator as e: raise exc.HTTPBadRequest(explanation=e.format_message()) @wsgi.response(202) @wsgi.expected_errors((400, 403, 404, 409)) @wsgi.action('unshelve') @validation.schema(schema.unshelve, '2.1', '2.76') # In microversion 2.77 we support specifying 'availability_zone' to # unshelve a server. But before 2.77 there is no request body # schema validation (because of body=null). @validation.schema(schema.unshelve_v277, '2.77', '2.90') # In microversion 2.91 we support specifying 'host' to # unshelve an instance to a specific hostself. # 'availability_zone' = None is supported as well to unpin the # availability zone of an instance bonded to this availability_zone @validation.schema(schema.unshelve_v291, '2.91') @validation.response_body_schema(schema.unshelve_response) def _unshelve(self, req, id, body): """Restore an instance from shelved mode.""" context = req.environ["nova.context"] instance = common.get_instance(self.compute_api, context, id) context.can( shelve_policies.POLICY_ROOT % 'unshelve', target={'project_id': instance.project_id} ) unshelve_args = {} unshelve_dict = body.get('unshelve') support_az = api_version_request.is_supported( req, '2.77') support_host = api_version_request.is_supported( req, '2.91') if unshelve_dict: if support_az and 'availability_zone' in unshelve_dict: unshelve_args['new_az'] = ( unshelve_dict['availability_zone'] ) if support_host: unshelve_args['host'] = unshelve_dict.get('host') try: self.compute_api.unshelve( context, instance, **unshelve_args, ) except ( exception.InstanceIsLocked, exception.UnshelveInstanceInvalidState, exception.UnshelveHostNotInAZ, exception.MismatchVolumeAZException, ) as e: raise exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state( state_error, 'unshelve', id) except ( exception.InvalidRequest, exception.ExtendedResourceRequestOldCompute, exception.ComputeHostNotFound, ) as e: raise exc.HTTPBadRequest(explanation=e.format_message()) except exception.OverQuota as e: raise exc.HTTPForbidden(explanation=e.format_message()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/simple_tenant_usage.py0000664000175000017500000003360500000000000024350 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import datetime from urllib import parse as urlparse import iso8601 from oslo_utils import timeutils from webob import exc from nova.api.openstack import api_version_request from nova.api.openstack import common from nova.api.openstack.compute.schemas import simple_tenant_usage as schema from nova.api.openstack.compute.views import usages as usages_view from nova.api.openstack import wsgi from nova.api import validation import nova.conf from nova import context as nova_context from nova import exception from nova.i18n import _ from nova import objects from nova.policies import simple_tenant_usage as stu_policies CONF = nova.conf.CONF def parse_strtime(dstr, fmt): try: return timeutils.parse_strtime(dstr, fmt) except (TypeError, ValueError) as e: raise exception.InvalidStrTime(reason=str(e)) class SimpleTenantUsageController(wsgi.Controller): _view_builder_class = usages_view.ViewBuilder def _hours_for(self, instance, period_start, period_stop): launched_at = instance.launched_at terminated_at = instance.terminated_at if terminated_at is not None: if not isinstance(terminated_at, datetime.datetime): # NOTE(mriedem): Instance object DateTime fields are # timezone-aware so convert using isotime. terminated_at = timeutils.parse_isotime(terminated_at) if launched_at is not None: if not isinstance(launched_at, datetime.datetime): launched_at = timeutils.parse_isotime(launched_at) if terminated_at and terminated_at < period_start: return 0 # nothing if it started after the usage report ended if launched_at and launched_at > period_stop: return 0 if launched_at: # if instance launched after period_started, don't charge for first start = max(launched_at, period_start) if terminated_at: # if instance stopped before period_stop, don't charge after stop = min(period_stop, terminated_at) else: # instance is still running, so charge them up to current time stop = period_stop dt = stop - start return dt.total_seconds() / 3600.0 else: # instance hasn't launched, so no charge return 0 def _get_flavor(self, context, instance, flavors_cache): """Get flavor information from the instance object, allowing a fallback to lookup by-id for deleted instances only. """ try: return instance.get_flavor() except exception.NotFound: if not instance.deleted: # Only support the fallback mechanism for deleted instances # that would have been skipped by migration #153 raise flavor_type = instance.instance_type_id if flavor_type in flavors_cache: return flavors_cache[flavor_type] try: flavor_ref = objects.Flavor.get_by_id(context, flavor_type) flavors_cache[flavor_type] = flavor_ref except exception.FlavorNotFound: # can't bill if there is no flavor flavor_ref = None return flavor_ref def _get_instances_all_cells(self, context, period_start, period_stop, tenant_id, limit, marker): all_instances = [] cells = objects.CellMappingList.get_all(context) for cell in cells: with nova_context.target_cell(context, cell) as cctxt: try: instances = ( objects.InstanceList.get_active_by_window_joined( cctxt, period_start, period_stop, tenant_id, expected_attrs=['flavor'], limit=limit, marker=marker)) except exception.MarkerNotFound: # NOTE(danms): We need to keep looking through the later # cells to find the marker continue all_instances.extend(instances) # NOTE(danms): We must have found a marker if we had one, # so make sure we don't require a marker in the next cell marker = None if limit: limit -= len(instances) if limit <= 0: break if marker is not None and len(all_instances) == 0: # NOTE(danms): If we did not find the marker in any cell, # mimic the db_api behavior here raise exception.MarkerNotFound(marker=marker) return all_instances def _tenant_usages_for_period(self, context, period_start, period_stop, tenant_id=None, detailed=True, limit=None, marker=None): instances = self._get_instances_all_cells(context, period_start, period_stop, tenant_id, limit, marker) rval = collections.OrderedDict() flavors = {} all_server_usages = [] for instance in instances: info = {} info['hours'] = self._hours_for(instance, period_start, period_stop) flavor = self._get_flavor(context, instance, flavors) if not flavor: info['flavor'] = '' else: info['flavor'] = flavor.name info['instance_id'] = instance.uuid info['name'] = instance.display_name info['tenant_id'] = instance.project_id try: info['memory_mb'] = instance.flavor.memory_mb info['local_gb'] = (instance.flavor.root_gb + instance.flavor.ephemeral_gb) info['vcpus'] = instance.flavor.vcpus except exception.InstanceNotFound: # This is rare case, instance disappear during analysis # As it's just info collection, we can try next one continue # NOTE(mriedem): We need to normalize the start/end times back # to timezone-naive so the response doesn't change after the # conversion to objects. info['started_at'] = timeutils.normalize_time(instance.launched_at) info['ended_at'] = ( timeutils.normalize_time(instance.terminated_at) if instance.terminated_at else None) if info['ended_at']: info['state'] = 'terminated' else: info['state'] = instance.vm_state now = timeutils.utcnow() if info['state'] == 'terminated': delta = info['ended_at'] - info['started_at'] else: delta = now - info['started_at'] info['uptime'] = int(delta.total_seconds()) if info['tenant_id'] not in rval: summary = {} summary['tenant_id'] = info['tenant_id'] if detailed: summary['server_usages'] = [] summary['total_local_gb_usage'] = 0 summary['total_vcpus_usage'] = 0 summary['total_memory_mb_usage'] = 0 summary['total_hours'] = 0 summary['start'] = timeutils.normalize_time(period_start) summary['stop'] = timeutils.normalize_time(period_stop) rval[info['tenant_id']] = summary summary = rval[info['tenant_id']] summary['total_local_gb_usage'] += info['local_gb'] * info['hours'] summary['total_vcpus_usage'] += info['vcpus'] * info['hours'] summary['total_memory_mb_usage'] += (info['memory_mb'] * info['hours']) summary['total_hours'] += info['hours'] all_server_usages.append(info) if detailed: summary['server_usages'].append(info) return list(rval.values()), all_server_usages def _parse_datetime(self, dtstr): if not dtstr: value = timeutils.utcnow() elif isinstance(dtstr, datetime.datetime): value = dtstr else: for fmt in ["%Y-%m-%dT%H:%M:%S", "%Y-%m-%dT%H:%M:%S.%f", "%Y-%m-%d %H:%M:%S.%f"]: try: value = parse_strtime(dtstr, fmt) break except exception.InvalidStrTime: pass else: msg = _("Datetime is in invalid format") raise exception.InvalidStrTime(reason=msg) # NOTE(mriedem): Instance object DateTime fields are timezone-aware # so we have to force UTC timezone for comparing this datetime against # instance object fields and still maintain backwards compatibility # in the API. if value.utcoffset() is None: value = value.replace(tzinfo=iso8601.UTC) return value def _get_datetime_range(self, req): qs = req.environ.get('QUERY_STRING', '') env = urlparse.parse_qs(qs) # NOTE(lzyeval): env.get() always returns a list period_start = self._parse_datetime(env.get('start', [None])[0]) period_stop = self._parse_datetime(env.get('end', [None])[0]) if not period_start < period_stop: msg = _("Invalid start time. The start time cannot occur after " "the end time.") raise exc.HTTPBadRequest(explanation=msg) detailed = env.get('detailed', ['0'])[0] == '1' return (period_start, period_stop, detailed) @validation.query_schema(schema.index_query, '2.1', '2.39') @validation.query_schema(schema.index_query_v240, '2.40', '2.74') @validation.query_schema(schema.index_query_v275, '2.75') @wsgi.expected_errors(400) def index(self, req): """Retrieve tenant_usage for all tenants.""" links = False if api_version_request.is_supported(req, '2.40'): links = True return self._index(req, links=links) def _index(self, req, links=False): context = req.environ['nova.context'] context.can(stu_policies.POLICY_ROOT % 'list') try: (period_start, period_stop, detailed) = self._get_datetime_range( req) except exception.InvalidStrTime as e: raise exc.HTTPBadRequest(explanation=e.format_message()) now = timeutils.parse_isotime(timeutils.utcnow().isoformat()) if period_stop > now: period_stop = now marker = None limit = CONF.api.max_limit if links: limit, marker = common.get_limit_and_marker(req) try: usages, server_usages = self._tenant_usages_for_period( context, period_start, period_stop, detailed=detailed, limit=limit, marker=marker) except exception.MarkerNotFound as e: raise exc.HTTPBadRequest(explanation=e.format_message()) tenant_usages = {'tenant_usages': usages} if links: usages_links = self._view_builder.get_links(req, server_usages) if usages_links: tenant_usages['tenant_usages_links'] = usages_links return tenant_usages @validation.query_schema(schema.show_query, '2.1', '2.39') @validation.query_schema(schema.show_query_v240, '2.40', '2.74') @validation.query_schema(schema.show_query_v275, '2.75') @wsgi.expected_errors(400) def show(self, req, id): """Retrieve tenant_usage for a specified tenant.""" links = False if api_version_request.is_supported(req, '2.40'): links = True return self._show(req, id, links=links) def _show(self, req, id, links=False): tenant_id = id context = req.environ['nova.context'] context.can(stu_policies.POLICY_ROOT % 'show', {'project_id': tenant_id}) try: (period_start, period_stop, ignore) = self._get_datetime_range( req) except exception.InvalidStrTime as e: raise exc.HTTPBadRequest(explanation=e.format_message()) now = timeutils.parse_isotime(timeutils.utcnow().isoformat()) if period_stop > now: period_stop = now marker = None limit = CONF.api.max_limit if links: limit, marker = common.get_limit_and_marker(req) try: usage, server_usages = self._tenant_usages_for_period( context, period_start, period_stop, tenant_id=tenant_id, detailed=True, limit=limit, marker=marker) except exception.MarkerNotFound as e: raise exc.HTTPBadRequest(explanation=e.format_message()) if len(usage): usage = list(usage)[0] else: usage = {} tenant_usage = {'tenant_usage': usage} if links: usages_links = self._view_builder.get_links( req, server_usages, tenant_id=tenant_id) if usages_links: tenant_usage['tenant_usage_links'] = usages_links return tenant_usage ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/snapshots.py0000664000175000017500000001274700000000000022350 0ustar00zuulzuul00000000000000# Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The volume snapshots extension.""" from oslo_utils import strutils from webob import exc from nova.api.openstack.api_version_request \ import MAX_PROXY_API_SUPPORT_VERSION from nova.api.openstack import common from nova.api.openstack.compute.schemas import snapshots as schema from nova.api.openstack import wsgi from nova.api import validation from nova import exception from nova.policies import volumes as vol_policies from nova.volume import cinder def _translate_snapshot_detail_view(context, vol): """Maps keys for snapshots details view.""" return _translate_snapshot_summary_view(context, vol) def _translate_snapshot_summary_view(context, vol): """Maps keys for snapshots summary view.""" d = {} d['id'] = vol['id'] d['volumeId'] = vol['volume_id'] d['status'] = vol['status'] # NOTE(gagupta): We map volume_size as the snapshot size d['size'] = vol['volume_size'] d['createdAt'] = vol['created_at'] d['displayName'] = vol['display_name'] d['displayDescription'] = vol['display_description'] return d class SnapshotController(wsgi.Controller): """The Snapshots API controller for the OpenStack API.""" def __init__(self): super().__init__() self.volume_api = cinder.API() @wsgi.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) @wsgi.expected_errors(404) @validation.query_schema(schema.show_query) def show(self, req, id): """Return data about the given snapshot.""" context = req.environ['nova.context'] context.can( vol_policies.POLICY_NAME % 'snapshots:show', target={'project_id': context.project_id}) try: vol = self.volume_api.get_snapshot(context, id) except exception.SnapshotNotFound as e: raise exc.HTTPNotFound(explanation=e.format_message()) return {'snapshot': _translate_snapshot_detail_view(context, vol)} @wsgi.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) @wsgi.response(202) @wsgi.expected_errors(404) def delete(self, req, id): """Delete a snapshot.""" context = req.environ['nova.context'] context.can( vol_policies.POLICY_NAME % 'snapshots:delete', target={'project_id': context.project_id}) try: self.volume_api.delete_snapshot(context, id) except exception.SnapshotNotFound as e: raise exc.HTTPNotFound(explanation=e.format_message()) @wsgi.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) @wsgi.expected_errors(()) @validation.query_schema(schema.index_query) def index(self, req): """Returns a summary list of snapshots.""" context = req.environ['nova.context'] context.can( vol_policies.POLICY_NAME % 'snapshots:list', target={'project_id': context.project_id}) return self._items(req, entity_maker=_translate_snapshot_summary_view) @wsgi.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) @wsgi.expected_errors(()) @validation.query_schema(schema.detail_query) def detail(self, req): """Returns a detailed list of snapshots.""" context = req.environ['nova.context'] context.can( vol_policies.POLICY_NAME % 'snapshots:detail', target={'project_id': context.project_id}) return self._items(req, entity_maker=_translate_snapshot_detail_view) def _items(self, req, entity_maker): """Returns a list of snapshots, transformed through entity_maker.""" context = req.environ['nova.context'] snapshots = self.volume_api.get_all_snapshots(context) limited_list = common.limited(snapshots, req) res = [entity_maker(context, snapshot) for snapshot in limited_list] return {'snapshots': res} @wsgi.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) @wsgi.expected_errors((400, 403)) @validation.schema(schema.create) def create(self, req, body): """Creates a new snapshot.""" context = req.environ['nova.context'] context.can( vol_policies.POLICY_NAME % 'snapshots:create', target={'project_id': context.project_id}) snapshot = body['snapshot'] volume_id = snapshot['volume_id'] force = snapshot.get('force', False) force = strutils.bool_from_string(force, strict=True) if force: create_func = self.volume_api.create_snapshot_force else: create_func = self.volume_api.create_snapshot try: new_snapshot = create_func( context, volume_id, snapshot.get('display_name'), snapshot.get('display_description')) except exception.OverQuota as e: raise exc.HTTPForbidden(explanation=e.format_message()) retval = _translate_snapshot_detail_view(context, new_snapshot) return {'snapshot': retval} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/suspend_server.py0000664000175000017500000000626200000000000023370 0ustar00zuulzuul00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from webob import exc from nova.api.openstack import common from nova.api.openstack.compute.schemas import suspend_server as schema from nova.api.openstack import wsgi from nova.api import validation from nova.compute import api as compute from nova import exception from nova.policies import suspend_server as ss_policies @validation.validated class SuspendServerController(wsgi.Controller): def __init__(self): super(SuspendServerController, self).__init__() self.compute_api = compute.API() @wsgi.response(202) @wsgi.expected_errors((403, 404, 409, 400)) @wsgi.action('suspend') @validation.schema(schema.suspend) @validation.response_body_schema(schema.suspend_response) def _suspend(self, req, id, body): """Permit admins to suspend the server.""" context = req.environ['nova.context'] server = common.get_instance(self.compute_api, context, id) try: context.can(ss_policies.POLICY_ROOT % 'suspend', target={'user_id': server.user_id, 'project_id': server.project_id}) self.compute_api.suspend(context, server) except exception.InstanceIsLocked as e: raise exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'suspend', id) except exception.ForbiddenPortsWithAccelerator as e: raise exc.HTTPBadRequest(explanation=e.format_message()) except ( exception.ForbiddenSharesNotSupported, exception.ForbiddenWithShare) as e: raise exc.HTTPConflict(explanation=e.format_message()) @wsgi.response(202) @wsgi.expected_errors((404, 409)) @wsgi.action('resume') @validation.schema(schema.resume) @validation.response_body_schema(schema.resume_response) def _resume(self, req, id, body): """Permit admins to resume the server from suspend.""" context = req.environ['nova.context'] server = common.get_instance(self.compute_api, context, id) context.can(ss_policies.POLICY_ROOT % 'resume', target={'project_id': server.project_id}) try: self.compute_api.resume(context, server) except exception.InstanceIsLocked as e: raise exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'resume', id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/tenant_networks.py0000664000175000017500000001016700000000000023545 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from webob import exc from nova.api.openstack.api_version_request \ import MAX_PROXY_API_SUPPORT_VERSION from nova.api.openstack.compute.schemas import tenant_networks as schema from nova.api.openstack import wsgi from nova.api import validation import nova.conf from nova import context as nova_context from nova import exception from nova.i18n import _ from nova.network import neutron from nova.policies import tenant_networks as tn_policies from nova import quota CONF = nova.conf.CONF QUOTAS = quota.QUOTAS LOG = logging.getLogger(__name__) _removal_reason = """\ This API only works with *nova-network*, which was deprecated in the 14.0.0 (Newton) release. It fails with HTTP 404 starting from microversion 2.36. It was removed in the 21.0.0 (Ussuri) release. """ def network_dict(network): # convert from a neutron response to something resembling what we used to # produce with nova-network return { 'id': network.get('id'), # yes, this is bananas, but this is what the API returned historically # when using neutron instead of nova-network, so we keep on returning # that 'cidr': str(None), 'label': network.get('name'), } class TenantNetworkController(wsgi.Controller): def __init__(self): super(TenantNetworkController, self).__init__() self.network_api = neutron.API() self._default_networks = [] def _refresh_default_networks(self): self._default_networks = [] if CONF.api.use_neutron_default_nets: try: self._default_networks = self._get_default_networks() except Exception: LOG.exception("Failed to get default networks") def _get_default_networks(self): project_id = CONF.api.neutron_default_project_id ctx = nova_context.RequestContext(user_id=None, project_id=project_id) return self.network_api.get_all(ctx) @wsgi.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) @wsgi.expected_errors(()) @validation.query_schema(schema.index_query) def index(self, req): context = req.environ['nova.context'] context.can(tn_policies.POLICY_NAME % 'list', target={'project_id': context.project_id}) networks = list(self.network_api.get_all(context)) if not self._default_networks: self._refresh_default_networks() networks.extend(self._default_networks) return {'networks': [network_dict(n) for n in networks]} @wsgi.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) @wsgi.expected_errors(404) @validation.query_schema(schema.show_query) def show(self, req, id): context = req.environ['nova.context'] context.can(tn_policies.POLICY_NAME % 'show', target={'project_id': context.project_id}) try: network = self.network_api.get(context, id) except exception.NetworkNotFound: msg = _("Network not found") raise exc.HTTPNotFound(explanation=msg) return {'network': network_dict(network)} @wsgi.expected_errors(410) @wsgi.removed('21.0.0', _removal_reason) @validation.response_body_schema(schema.delete_response) def delete(self, req, id): raise exc.HTTPGone() @wsgi.expected_errors(410) @wsgi.removed('21.0.0', _removal_reason) @validation.schema(schema.create) @validation.response_body_schema(schema.create_response) def create(self, req, body): raise exc.HTTPGone() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/versions.py0000664000175000017500000000762300000000000022173 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.api.openstack import api_version_request from nova.api.openstack.compute.schemas import versions as schema from nova.api.openstack.compute.views import versions as views_versions from nova.api.openstack import wsgi from nova.api import validation LINKS = { 'v2.0': { 'html': 'http://docs.openstack.org/' }, 'v2.1': { 'html': 'http://docs.openstack.org/' }, } VERSIONS = { "v2.0": { "id": "v2.0", "status": "DEPRECATED", "version": "", "min_version": "", "updated": "2025-07-04T12:00:00Z", "links": [ { "rel": "describedby", "type": "text/html", "href": LINKS['v2.0']['html'], }, ], "media-types": [ { "base": "application/json", "type": "application/vnd.openstack.compute+json;version=2", } ], }, "v2.1": { "id": "v2.1", "status": "CURRENT", "version": api_version_request._MAX_API_VERSION, "min_version": api_version_request._MIN_API_VERSION, "updated": "2013-07-23T11:33:21Z", "links": [ { "rel": "describedby", "type": "text/html", "href": LINKS['v2.1']['html'], }, ], "media-types": [ { "base": "application/json", "type": "application/vnd.openstack.compute+json;version=2.1", } ], } } @validation.validated class Versions(wsgi.Resource): # The root version API isn't under the microversion control. support_api_request_version = False def __init__(self): super(Versions, self).__init__(None) @validation.query_schema(schema.show_query) @validation.response_body_schema(schema.index_response) def index(self, req, body=None): """Return all versions.""" builder = views_versions.get_view_builder(req) return builder.build_versions(VERSIONS) @wsgi.response(300) @validation.query_schema(schema.multi_query) @validation.response_body_schema(schema.multi_response) def multi(self, req, body=None): """Return multiple choices.""" builder = views_versions.get_view_builder(req) return builder.build_choices(VERSIONS, req) def get_action_args(self, request_environment): """Parse dictionary created by routes library.""" args = {} if request_environment['PATH_INFO'] == '/': args['action'] = 'index' else: args['action'] = 'multi' return args @validation.validated class VersionsV2(wsgi.Resource): def __init__(self): super(VersionsV2, self).__init__(None) # NOTE(stephenfin): Despite being called index, this is actually called as # a show action @validation.query_schema(schema.show_query) @validation.response_body_schema(schema.show_response) def index(self, req, body=None): builder = views_versions.get_view_builder(req) ver = 'v2.0' if req.is_legacy_v2() else 'v2.1' return builder.build_version(VERSIONS[ver]) def get_action_args(self, request_environment): """Parse dictionary created by routes library.""" return {'action': 'index'} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/versionsV21.py0000664000175000017500000000256100000000000022460 0ustar00zuulzuul00000000000000# Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import webob.exc from nova.api.openstack.compute.schemas import versions as schema from nova.api.openstack.compute import versions from nova.api.openstack.compute.views import versions as views_versions from nova.api.openstack import wsgi from nova.api import validation @validation.validated class VersionsController(wsgi.Controller): @wsgi.expected_errors(404) @validation.query_schema(schema.show_query) @validation.response_body_schema(schema.show_response) def show(self, req, id='v2.1'): builder = views_versions.get_view_builder(req) if req.is_legacy_v2(): id = 'v2.0' if id not in versions.VERSIONS: raise webob.exc.HTTPNotFound() return builder.build_version(versions.VERSIONS[id]) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.3296082 nova-32.0.0/nova/api/openstack/compute/views/0000775000175000017500000000000000000000000021076 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/views/__init__.py0000664000175000017500000000000000000000000023175 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/views/flavors.py0000664000175000017500000001164400000000000023132 0ustar00zuulzuul00000000000000# Copyright 2010-2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.api.openstack import api_version_request from nova.api.openstack import common class ViewBuilder(common.ViewBuilder): _collection_name = "flavors" def basic(self, request, flavor, include_description=False, include_extra_specs=False): # include_extra_specs is placeholder param which is not used in # this method as basic() method is used by index() (GET /flavors) # which does not return those keys in response. flavor_dict = { "flavor": { "id": flavor["flavorid"], "name": flavor["name"], "links": self._get_links(request, flavor["flavorid"], self._collection_name), }, } if include_description: flavor_dict['flavor']['description'] = flavor.description return flavor_dict def show(self, request, flavor, include_description=False, include_extra_specs=False): flavor_dict = { "flavor": { "id": flavor["flavorid"], "name": flavor["name"], "ram": flavor["memory_mb"], "disk": flavor["root_gb"], "swap": flavor["swap"] or "", "OS-FLV-EXT-DATA:ephemeral": flavor["ephemeral_gb"], "OS-FLV-DISABLED:disabled": flavor["disabled"], "vcpus": flavor["vcpus"], "os-flavor-access:is_public": flavor['is_public'], "rxtx_factor": flavor['rxtx_factor'] or "", "links": self._get_links(request, flavor["flavorid"], self._collection_name), }, } if include_description: flavor_dict['flavor']['description'] = flavor.description if include_extra_specs: flavor_dict['flavor']['extra_specs'] = flavor.extra_specs if api_version_request.is_supported(request, '2.75'): flavor_dict['flavor']['swap'] = flavor["swap"] or 0 return flavor_dict def index(self, request, flavors): """Return the 'index' view of flavors.""" coll_name = self._collection_name include_description = api_version_request.is_supported(request, '2.55') return self._list_view(self.basic, request, flavors, coll_name, include_description=include_description) def detail(self, request, flavors, include_extra_specs=False): """Return the 'detail' view of flavors.""" coll_name = self._collection_name + '/detail' include_description = api_version_request.is_supported(request, '2.55') return self._list_view(self.show, request, flavors, coll_name, include_description=include_description, include_extra_specs=include_extra_specs) def _list_view(self, func, request, flavors, coll_name, include_description=False, include_extra_specs=False): """Provide a view for a list of flavors. :param func: Function used to format the flavor data :param request: API request :param flavors: List of flavors in dictionary format :param coll_name: Name of collection, used to generate the next link for a pagination query :param include_description: If the flavor.description should be included in the response dict. :param include_extra_specs: If the flavor.extra_specs should be included in the response dict. :returns: Flavor reply data in dictionary format """ flavor_list = [func(request, flavor, include_description, include_extra_specs)["flavor"] for flavor in flavors] flavors_links = self._get_collection_links(request, flavors, coll_name, "flavorid") flavors_dict = dict(flavors=flavor_list) if flavors_links: flavors_dict["flavors_links"] = flavors_links return flavors_dict ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/views/hypervisors.py0000664000175000017500000000202500000000000024044 0ustar00zuulzuul00000000000000# Copyright 2016 Kylin Cloud # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.api.openstack import common class ViewBuilder(common.ViewBuilder): _collection_name = "os-hypervisors" def get_links(self, request, hypervisors, detail=False): coll_name = (self._collection_name + '/detail' if detail else self._collection_name) return self._get_collection_links(request, hypervisors, coll_name, 'id') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/views/images.py0000664000175000017500000001377000000000000022725 0ustar00zuulzuul00000000000000# Copyright 2010-2011 OpenStack Foundation # Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import strutils from nova.api.openstack import common from nova.image import glance from nova import utils class ViewBuilder(common.ViewBuilder): _collection_name = "images" def basic(self, request, image): """Return a dictionary with basic image attributes.""" return { "image": { "id": image.get("id"), "name": image.get("name"), "links": self._get_links(request, image["id"], self._collection_name), }, } def show(self, request, image): """Return a dictionary with image details.""" image_dict = { "id": image.get("id"), "name": image.get("name"), "minRam": int(image.get("min_ram") or 0), "minDisk": int(image.get("min_disk") or 0), "metadata": image.get("properties", {}), "created": self._format_date(image.get("created_at")), "updated": self._format_date(image.get("updated_at")), "status": self._get_status(image), "progress": self._get_progress(image), "OS-EXT-IMG-SIZE:size": image.get("size"), "links": self._get_links(request, image["id"], self._collection_name), } instance_uuid = image.get("properties", {}).get("instance_uuid") if instance_uuid is not None: server_ref = self._get_href_link(request, instance_uuid, 'servers') image_dict["server"] = { "id": instance_uuid, "links": [{ "rel": "self", "href": server_ref, }, { "rel": "bookmark", "href": self._get_bookmark_link(request, instance_uuid, 'servers'), }], } auto_disk_config = image_dict['metadata'].get("auto_disk_config", None) if auto_disk_config is not None: value = strutils.bool_from_string(auto_disk_config) image_dict["OS-DCF:diskConfig"] = ( 'AUTO' if value else 'MANUAL') return dict(image=image_dict) def detail(self, request, images): """Show a list of images with details.""" list_func = self.show coll_name = self._collection_name + '/detail' return self._list_view(list_func, request, images, coll_name) def index(self, request, images): """Show a list of images with basic attributes.""" list_func = self.basic coll_name = self._collection_name return self._list_view(list_func, request, images, coll_name) def _list_view(self, list_func, request, images, coll_name): """Provide a view for a list of images. :param list_func: Function used to format the image data :param request: API request :param images: List of images in dictionary format :param coll_name: Name of collection, used to generate the next link for a pagination query :returns: Image reply data in dictionary format """ image_list = [list_func(request, image)["image"] for image in images] images_links = self._get_collection_links(request, images, coll_name) images_dict = dict(images=image_list) if images_links: images_dict["images_links"] = images_links return images_dict def _get_links(self, request, identifier, collection_name): """Return a list of links for this image.""" return [{ "rel": "self", "href": self._get_href_link(request, identifier, collection_name), }, { "rel": "bookmark", "href": self._get_bookmark_link(request, identifier, collection_name), }, { "rel": "alternate", "type": "application/vnd.openstack.image", "href": self._get_alternate_link(request, identifier), }] def _get_alternate_link(self, request, identifier): """Create an alternate link for a specific image id.""" glance_url = glance.generate_glance_url( request.environ['nova.context']) glance_url = self._update_glance_link_prefix(glance_url) return '/'.join([glance_url, self._collection_name, str(identifier)]) @staticmethod def _format_date(dt): """Return standard format for a given datetime object.""" if dt is not None: return utils.isotime(dt) @staticmethod def _get_status(image): """Update the status field to standardize format.""" return { 'active': 'ACTIVE', 'queued': 'SAVING', 'saving': 'SAVING', 'deleted': 'DELETED', 'pending_delete': 'DELETED', 'killed': 'ERROR', }.get(image.get("status"), 'UNKNOWN') @staticmethod def _get_progress(image): return { "queued": 25, "saving": 50, "active": 100, }.get(image.get("status"), 0) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/views/instance_actions.py0000664000175000017500000000166600000000000025005 0ustar00zuulzuul00000000000000# Copyright 2017 Huawei Technologies Co.,LTD. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.api.openstack import common class ViewBuilder(common.ViewBuilder): def get_links(self, request, server_id, instance_actions): collection_name = 'servers/%s/os-instance-actions' % server_id return self._get_collection_links(request, instance_actions, collection_name, 'request_id') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/views/ips.py0000664000175000017500000000365500000000000022254 0ustar00zuulzuul00000000000000# Copyright 2010-2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import itertools from nova.api.openstack import common class ViewBuilder(common.ViewBuilder): """Models server addresses as a dictionary.""" _collection_name = "addresses" def basic(self, request, ip, extend_address=False): """Return a dictionary describing an IP address.""" address = { "version": ip["version"], "addr": ip["address"], } if extend_address: address.update({ "OS-EXT-IPS:type": ip["type"], "OS-EXT-IPS-MAC:mac_addr": ip['mac_address'], }) return address def show(self, request, network, label, extend_address=False): """Returns a dictionary describing a network.""" all_ips = itertools.chain(network["ips"], network["floating_ips"]) return { label: [self.basic(request, ip, extend_address) for ip in all_ips], } def index(self, request, networks, extend_address=False): """Return a dictionary describing a list of networks.""" addresses = collections.OrderedDict() for label, network in networks.items(): network_dict = self.show(request, network, label, extend_address) addresses[label] = network_dict[label] return {'addresses': addresses} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/views/keypairs.py0000664000175000017500000000534200000000000023303 0ustar00zuulzuul00000000000000# Copyright 2016 Mirantis Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.api.openstack import common class ViewBuilder(common.ViewBuilder): _collection_name = 'os-keypairs' # TODO(takashin): After v2 and v2.1 is no longer supported, # 'type' can always be included in the response. _index_params = ('name', 'public_key', 'fingerprint') _create_params = _index_params + ('user_id',) _show_params = _create_params + ('created_at', 'deleted', 'deleted_at', 'id', 'updated_at') _index_params_v2_2 = _index_params + ('type',) _show_params_v2_2 = _show_params + ('type',) def get_links(self, request, keypairs): return self._get_collection_links(request, keypairs, self._collection_name, 'name') # TODO(oomichi): It is necessary to filter a response of keypair with # _build_keypair() when v2.1+microversions for implementing consistent # behaviors in this keypair resource. @staticmethod def _build_keypair(keypair, attrs): body = {} for attr in attrs: body[attr] = keypair[attr] return body def create(self, keypair, private_key=False, key_type=False): params = [] if private_key: params.append('private_key') if key_type: params.append('type') params.extend(self._create_params) return {'keypair': self._build_keypair(keypair, params)} def index(self, req, key_pairs, key_type=False, links=False): keypairs_list = [ {'keypair': self._build_keypair( key_pair, self._index_params_v2_2 if key_type else self._index_params)} for key_pair in key_pairs] keypairs_dict = {'keypairs': keypairs_list} if links: keypairs_links = self.get_links(req, key_pairs) if keypairs_links: keypairs_dict['keypairs_links'] = keypairs_links return keypairs_dict def show(self, keypair, key_type=False): return {'keypair': self._build_keypair( keypair, self._show_params_v2_2 if key_type else self._show_params)} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/views/limits.py0000664000175000017500000000667700000000000022771 0ustar00zuulzuul00000000000000# Copyright 2010-2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class ViewBuilder(object): """OpenStack API base limits view builder.""" limit_names = {} def __init__(self): self.limit_names = { "ram": ["maxTotalRAMSize"], "instances": ["maxTotalInstances"], "cores": ["maxTotalCores"], "key_pairs": ["maxTotalKeypairs"], "floating_ips": ["maxTotalFloatingIps"], "metadata_items": ["maxServerMeta", "maxImageMeta"], "injected_files": ["maxPersonality"], "injected_file_content_bytes": ["maxPersonalitySize"], "security_groups": ["maxSecurityGroups"], "security_group_rules": ["maxSecurityGroupRules"], "server_groups": ["maxServerGroups"], "server_group_members": ["maxServerGroupMembers"] } def build(self, request, quotas, filtered_limits=None, max_image_meta=True): filtered_limits = filtered_limits or [] absolute_limits = self._build_absolute_limits( quotas, filtered_limits, max_image_meta=max_image_meta) used_limits = self._build_used_limits( request, quotas, filtered_limits) absolute_limits.update(used_limits) output = { "limits": { "rate": [], "absolute": absolute_limits, }, } return output def _build_absolute_limits(self, quotas, filtered_limits=None, max_image_meta=True): """Builder for absolute limits absolute_limits should be given as a dict of limits. For example: {"ram": 512, "gigabytes": 1024}. filtered_limits is an optional list of limits to exclude from the result set. """ absolute_limits = {k: v['limit'] for k, v in quotas.items()} limits = {} for name, value in absolute_limits.items(): if (name in self.limit_names and value is not None and name not in filtered_limits): for limit_name in self.limit_names[name]: if not max_image_meta and limit_name == "maxImageMeta": continue limits[limit_name] = value return limits def _build_used_limits(self, request, quotas, filtered_limits): quota_map = { 'totalRAMUsed': 'ram', 'totalCoresUsed': 'cores', 'totalInstancesUsed': 'instances', 'totalFloatingIpsUsed': 'floating_ips', 'totalSecurityGroupsUsed': 'security_groups', 'totalServerGroupsUsed': 'server_groups', } used_limits = {} for display_name, key in quota_map.items(): if (key in quotas and key not in filtered_limits): used_limits[display_name] = quotas[key]['in_use'] return used_limits ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/views/migrations.py0000664000175000017500000000160000000000000023621 0ustar00zuulzuul00000000000000# Copyright 2017 Huawei Technologies Co.,LTD. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.api.openstack import common class ViewBuilder(common.ViewBuilder): _collection_name = "os-migrations" def get_links(self, request, migrations): return self._get_collection_links(request, migrations, self._collection_name, 'uuid') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/views/server_diagnostics.py0000664000175000017500000000500000000000000025340 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.api.openstack import common INSTANCE_DIAGNOSTICS_PRIMITIVE_FIELDS = ( 'state', 'driver', 'hypervisor', 'hypervisor_os', 'uptime', 'config_drive', 'num_cpus', 'num_nics', 'num_disks' ) INSTANCE_DIAGNOSTICS_LIST_FIELDS = { 'disk_details': ('read_bytes', 'read_requests', 'write_bytes', 'write_requests', 'errors_count'), 'cpu_details': ('id', 'time', 'utilisation'), 'nic_details': ('mac_address', 'rx_octets', 'rx_errors', 'rx_drop', 'rx_packets', 'rx_rate', 'tx_octets', 'tx_errors', 'tx_drop', 'tx_packets', 'tx_rate') } INSTANCE_DIAGNOSTICS_OBJECT_FIELDS = {'memory_details': ('maximum', 'used')} class ViewBuilder(common.ViewBuilder): @staticmethod def _get_obj_field(obj, field): if obj and obj.obj_attr_is_set(field): return getattr(obj, field) return None def instance_diagnostics(self, diagnostics): """Return a dictionary with instance diagnostics.""" diagnostics_dict = {} for field in INSTANCE_DIAGNOSTICS_PRIMITIVE_FIELDS: diagnostics_dict[field] = self._get_obj_field(diagnostics, field) for list_field in INSTANCE_DIAGNOSTICS_LIST_FIELDS: diagnostics_dict[list_field] = [] list_obj = getattr(diagnostics, list_field) for obj in list_obj: obj_dict = {} for field in INSTANCE_DIAGNOSTICS_LIST_FIELDS[list_field]: obj_dict[field] = self._get_obj_field(obj, field) diagnostics_dict[list_field].append(obj_dict) for obj_field in INSTANCE_DIAGNOSTICS_OBJECT_FIELDS: diagnostics_dict[obj_field] = {} obj = self._get_obj_field(diagnostics, obj_field) for field in INSTANCE_DIAGNOSTICS_OBJECT_FIELDS[obj_field]: diagnostics_dict[obj_field][field] = self._get_obj_field( obj, field) return diagnostics_dict ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/views/server_shares.py0000664000175000017500000000304000000000000024320 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.api.openstack import common from nova.api.openstack.compute.views import servers class ViewBuilder(common.ViewBuilder): _collection_name = 'shares' def __init__(self): super(ViewBuilder, self).__init__() self._server_builder = servers.ViewBuilder() def _list_view(self, db_shares): shares = {'shares': []} for db_share in db_shares: share = { 'share_id': db_share.share_id, 'status': db_share.status, 'tag': db_share.tag, } shares['shares'].append(share) return shares def _show_view(self, context, db_share): share = {'share': { 'share_id': db_share.share_id, 'status': db_share.status, 'tag': db_share.tag, }} if context.is_admin: share['share']['export_location'] = db_share.export_location share['share']['uuid'] = db_share.uuid return share ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/views/server_tags.py0000664000175000017500000000220400000000000023772 0ustar00zuulzuul00000000000000# Copyright 2016 Mirantis Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.api.openstack import common from nova.api.openstack.compute.views import servers class ViewBuilder(common.ViewBuilder): _collection_name = "tags" def __init__(self): super(ViewBuilder, self).__init__() self._server_builder = servers.ViewBuilder() def get_location(self, request, server_id, tag_name): server_location = self._server_builder._get_href_link( request, server_id, "servers") return "%s/%s/%s" % (server_location, self._collection_name, tag_name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/views/servers.py0000664000175000017500000011442600000000000023151 0ustar00zuulzuul00000000000000# Copyright 2010-2011 OpenStack Foundation # Copyright 2011 Piston Cloud Computing, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools from oslo_log import log as logging from oslo_serialization import jsonutils from nova.api.openstack import api_version_request from nova.api.openstack import common from nova.api.openstack.compute.views import flavors as views_flavors from nova.api.openstack.compute.views import images as views_images from nova.api.openstack.compute.views import ips as views_ips from nova import availability_zones as avail_zone from nova.compute import api as compute from nova.compute import vm_states from nova import context as nova_context from nova import exception from nova.network import security_group_api from nova import objects from nova.objects import fields from nova.objects import virtual_interface from nova.policies import extended_server_attributes as esa_policies from nova.policies import servers as servers_policies from nova import utils LOG = logging.getLogger(__name__) AZ_NOT_IN_REQUEST_SPEC = object() SCHED_HINTS_NOT_IN_REQUEST_SPEC = object() class ViewBuilder(common.ViewBuilder): """Model a server API response as a python dictionary.""" _collection_name = "servers" _progress_statuses = ( "ACTIVE", "BUILD", "REBUILD", "RESIZE", "VERIFY_RESIZE", "MIGRATING", ) _fault_statuses = ( "ERROR", "DELETED" ) # These are the lazy-loadable instance attributes required for showing # details about an instance. Add to this list as new things need to be # shown. _show_expected_attrs = ['flavor', 'info_cache', 'metadata'] def __init__(self): """Initialize view builder.""" super(ViewBuilder, self).__init__() self._image_builder = views_images.ViewBuilder() self._ips_builder = views_ips.ViewBuilder() self._flavor_builder = views_flavors.ViewBuilder() self.compute_api = compute.API() def create(self, request, instance): """View that should be returned when an instance is created.""" server = { "server": { "id": instance["uuid"], "links": self._get_links(request, instance["uuid"], self._collection_name), # NOTE(sdague): historically this was the # os-disk-config extension, but now that extensions # are gone, we merge these attributes here. "OS-DCF:diskConfig": ( 'AUTO' if instance.get('auto_disk_config') else 'MANUAL'), }, } self._add_security_grps(request, [server["server"]], [instance], create_request=True) return server def basic(self, request, instance, show_extra_specs=False, show_extended_attr=None, show_host_status=None, show_sec_grp=None, bdms=None, cell_down_support=False, show_user_data=False, provided_az=None, provided_sched_hints=None): """Generic, non-detailed view of an instance.""" if cell_down_support and 'display_name' not in instance: # NOTE(tssurya): If the microversion is >= 2.69, this boolean will # be true in which case we check if there are instances from down # cells (by checking if their objects have missing keys like # `display_name`) and return partial constructs based on the # information available from the nova_api database. return { "server": { "id": instance.uuid, "status": "UNKNOWN", "links": self._get_links(request, instance.uuid, self._collection_name), }, } return { "server": { "id": instance["uuid"], "name": instance["display_name"], "links": self._get_links(request, instance["uuid"], self._collection_name), }, } def get_show_expected_attrs(self, expected_attrs=None): """Returns a list of lazy-loadable expected attributes used by show This should be used when getting the instances from the database so that the necessary attributes are pre-loaded before needing to build the show response where lazy-loading can fail if an instance was deleted. :param list expected_attrs: The list of expected attributes that will be requested in addition to what this view builder requires. This method will merge the two lists and return what should be ultimately used when getting an instance from the database. :returns: merged and sorted list of expected attributes """ if expected_attrs is None: expected_attrs = [] # NOTE(mriedem): We sort the list so we can have predictable test # results. return sorted(list(set(self._show_expected_attrs + expected_attrs))) def _show_from_down_cell(self, request, instance, show_extra_specs, show_server_groups): """Function that constructs the partial response for the instance.""" ret = { "server": { "id": instance.uuid, "status": "UNKNOWN", "tenant_id": instance.project_id, "created": utils.isotime(instance.created_at), "links": self._get_links( request, instance.uuid, self._collection_name), }, } if 'flavor' in instance: # If the key 'flavor' is present for an instance from a down cell # it means that the request is ``GET /servers/{server_id}`` and # thus we include the information from the request_spec of the # instance like its flavor, image, avz, and user_id in addition to # the basic information from its instance_mapping. # If 'flavor' key is not present for an instance from a down cell # down cell it means the request is ``GET /servers/detail`` and we # do not expose the flavor in the response when listing servers # with details for performance reasons of fetching it from the # request specs table for the whole list of instances. ret["server"]["image"] = self._get_image(request, instance) ret["server"]["flavor"] = self._get_flavor(request, instance, show_extra_specs) # in case availability zone was not requested by the user during # boot time, return UNKNOWN. avz = instance.availability_zone or "UNKNOWN" ret["server"]["OS-EXT-AZ:availability_zone"] = avz ret["server"]["OS-EXT-STS:power_state"] = instance.power_state # in case its an old request spec which doesn't have the user_id # data migrated, return UNKNOWN. ret["server"]["user_id"] = instance.user_id or "UNKNOWN" if show_server_groups: context = request.environ['nova.context'] ret['server']['server_groups'] = self._get_server_groups( context, instance) return ret @staticmethod def _get_host_status_unknown_only(context, instance=None): """We will use the unknown_only variable to tell us what host status we can show, if any: * unknown_only = False means we can show any host status. * unknown_only = True means that we can only show host status: UNKNOWN. If the host status is anything other than UNKNOWN, we will not include the host_status field in the response. * unknown_only = None means we cannot show host status at all and we will not include the host_status field in the response. """ unknown_only = None # Check show:host_status policy first because if it passes, we know we # can show any host status and need not check the more restrictive # show:host_status:unknown-only policy. # Keeping target as None (which means policy will default these target # to context.project_id) for now which is case of 'detail' API which # policy is default to system and project reader. target = None if instance is not None: target = {'project_id': instance.project_id} if context.can( servers_policies.SERVERS % 'show:host_status', fatal=False, target=target): unknown_only = False # If we are not allowed to show any/all host status, check if we can at # least show only the host status: UNKNOWN. elif context.can( servers_policies.SERVERS % 'show:host_status:unknown-only', fatal=False, target=target): unknown_only = True return unknown_only def _get_pinned_az(self, context, instance, provided_az): if provided_az is AZ_NOT_IN_REQUEST_SPEC: # Case the provided_az is pre fetched, but not specified pinned_az = None elif provided_az is not None: # Case the provided_az is pre fetched, and specified pinned_az = provided_az else: # Case the provided_az is not pre fethed. try: req_spec = objects.RequestSpec.get_by_instance_uuid( context, instance.uuid) pinned_az = req_spec.availability_zone except exception.RequestSpecNotFound: pinned_az = None return pinned_az def _get_scheduler_hints(self, context, instance, provided_sched_hints): if provided_sched_hints is SCHED_HINTS_NOT_IN_REQUEST_SPEC: # Case where it was pre fetched, but not specified sched_hints = None elif provided_sched_hints is not None: sched_hints = provided_sched_hints else: # Case the provided_az is not pre fethed. try: req_spec = objects.RequestSpec.get_by_instance_uuid( context, instance.uuid) sched_hints = req_spec.scheduler_hints except exception.RequestSpecNotFound: sched_hints = {} return sched_hints def show(self, request, instance, extend_address=True, show_extra_specs=None, show_AZ=True, show_config_drive=True, show_extended_attr=None, show_host_status=None, show_keypair=True, show_srv_usg=True, show_sec_grp=True, show_extended_status=True, show_extended_volumes=True, bdms=None, cell_down_support=False, show_server_groups=False, show_user_data=True, provided_az=None, provided_sched_hints=None): """Detailed view of a single instance.""" if show_extra_specs is None: # detail will pre-calculate this for us. If we're doing show, # then figure it out here. show_extra_specs = False if api_version_request.is_supported(request, '2.47'): context = request.environ['nova.context'] show_extra_specs = context.can( servers_policies.SERVERS % 'show:flavor-extra-specs', fatal=False, target={'project_id': instance.project_id}) if cell_down_support and 'display_name' not in instance: # NOTE(tssurya): If the microversion is >= 2.69, this boolean will # be true in which case we check if there are instances from down # cells (by checking if their objects have missing keys like # `display_name`) and return partial constructs based on the # information available from the nova_api database. return self._show_from_down_cell( request, instance, show_extra_specs, show_server_groups) ip_v4 = instance.get('access_ip_v4') ip_v6 = instance.get('access_ip_v6') server = { "server": { "id": instance["uuid"], "name": instance["display_name"], "status": self._get_vm_status(instance), "tenant_id": instance.get("project_id") or "", "user_id": instance.get("user_id") or "", "metadata": self._get_metadata(instance), "hostId": self._get_host_id(instance), "image": self._get_image(request, instance), "flavor": self._get_flavor(request, instance, show_extra_specs), "created": utils.isotime(instance["created_at"]), "updated": utils.isotime(instance["updated_at"]), "addresses": self._get_addresses(request, instance, extend_address), "accessIPv4": str(ip_v4) if ip_v4 is not None else '', "accessIPv6": str(ip_v6) if ip_v6 is not None else '', "links": self._get_links(request, instance["uuid"], self._collection_name), # NOTE(sdague): historically this was the # os-disk-config extension, but now that extensions # are gone, we merge these attributes here. "OS-DCF:diskConfig": ( 'AUTO' if instance.get('auto_disk_config') else 'MANUAL'), }, } if server["server"]["status"] in self._fault_statuses: _inst_fault = self._get_fault(request, instance) if _inst_fault: server['server']['fault'] = _inst_fault if server["server"]["status"] in self._progress_statuses: server["server"]["progress"] = instance.get("progress", 0) context = request.environ['nova.context'] if show_AZ: az = avail_zone.get_instance_availability_zone(context, instance) # NOTE(mriedem): The OS-EXT-AZ prefix should not be used for new # attributes after v2.1. They are only in v2.1 for backward compat # with v2.0. server["server"]["OS-EXT-AZ:availability_zone"] = az or '' if api_version_request.is_supported(request, '2.96'): pinned_az = self._get_pinned_az(context, instance, provided_az) server['server']['pinned_availability_zone'] = pinned_az if api_version_request.is_supported(request, '2.100'): server['server']['scheduler_hints'] = ( self._get_scheduler_hints( context, instance, provided_sched_hints)) if show_config_drive: server["server"]["config_drive"] = instance["config_drive"] if show_keypair: server["server"]["key_name"] = instance["key_name"] if show_srv_usg: for k in ['launched_at', 'terminated_at']: key = "OS-SRV-USG:" + k # NOTE(danms): Historically, this timestamp has been generated # merely by grabbing str(datetime) of a TZ-naive object. The # only way we can keep that with instance objects is to strip # the tzinfo from the stamp and str() it. server["server"][key] = (instance[k].replace(tzinfo=None) if instance[k] else None) if show_sec_grp: self._add_security_grps(request, [server["server"]], [instance]) if show_extended_attr is None: show_extended_attr = context.can( esa_policies.BASE_POLICY_NAME, fatal=False, target={'project_id': instance.project_id}) if show_extended_attr: properties = ['host', 'name', 'node'] if api_version_request.is_supported(request, '2.3'): # NOTE(mriedem): These will use the OS-EXT-SRV-ATTR prefix # below and that's OK for microversion 2.3 which is being # compatible with v2.0 for the ec2 API split out from Nova. # After this, however, new microversions should not be using # the OS-EXT-SRV-ATTR prefix. properties += ['reservation_id', 'launch_index', 'hostname', 'kernel_id', 'ramdisk_id', 'root_device_name'] # NOTE(gmann): Since microversion 2.75, PUT and Rebuild # response include all the server attributes including these # extended attributes also. But microversion 2.57 already # adding the 'user_data' in Rebuild response in API method. # so we will skip adding the user data attribute for rebuild # case. 'show_user_data' is false only in case of rebuild. if show_user_data: properties += ['user_data'] for attr in properties: if attr == 'name': key = "OS-EXT-SRV-ATTR:instance_%s" % attr elif attr == 'node': key = "OS-EXT-SRV-ATTR:hypervisor_hostname" else: # NOTE(mriedem): Nothing after microversion 2.3 should use # the OS-EXT-SRV-ATTR prefix for the attribute key name. key = "OS-EXT-SRV-ATTR:%s" % attr server["server"][key] = getattr(instance, attr) if show_extended_status: # NOTE(gmann): Removed 'locked_by' from extended status # to make it same as V2. If needed it can be added with # microversion. for state in ['task_state', 'vm_state', 'power_state']: # NOTE(mriedem): The OS-EXT-STS prefix should not be used for # new attributes after v2.1. They are only in v2.1 for backward # compat with v2.0. key = "%s:%s" % ('OS-EXT-STS', state) server["server"][key] = instance[state] if show_extended_volumes: # NOTE(mriedem): The os-extended-volumes prefix should not be used # for new attributes after v2.1. They are only in v2.1 for backward # compat with v2.0. add_delete_on_termination = api_version_request.is_supported( request, '2.3') if bdms is None: bdms = objects.BlockDeviceMappingList.bdms_by_instance_uuid( context, [instance["uuid"]]) self._add_volumes_attachments(server["server"], bdms, add_delete_on_termination) if api_version_request.is_supported(request, '2.16'): if show_host_status is None: unknown_only = self._get_host_status_unknown_only( context, instance) # If we're not allowed by policy to show host status at all, # don't bother requesting instance host status from the compute # API. if unknown_only is not None: host_status = self.compute_api.get_instance_host_status( instance) # If we are allowed to show host status of some kind, set # the host status field only if: # * unknown_only = False, meaning we can show any status # OR # * if unknown_only = True and host_status == UNKNOWN if (not unknown_only or host_status == fields.HostStatus.UNKNOWN): server["server"]['host_status'] = host_status if api_version_request.is_supported(request, "2.9"): server["server"]["locked"] = (True if instance["locked_by"] else False) if api_version_request.is_supported(request, "2.73"): server["server"]["locked_reason"] = (instance.system_metadata.get( "locked_reason")) if api_version_request.is_supported(request, "2.19"): server["server"]["description"] = instance.get( "display_description") if api_version_request.is_supported(request, "2.26"): server["server"]["tags"] = [t.tag for t in instance.tags] if api_version_request.is_supported(request, "2.63"): trusted_certs = None if instance.trusted_certs: trusted_certs = instance.trusted_certs.ids server["server"]["trusted_image_certificates"] = trusted_certs # TODO(stephenfin): Remove this check once we remove the # OS-EXT-SRV-ATTR:hostname policy checks from the policy is Y or later if api_version_request.is_supported(request, '2.90'): # API 2.90 made this field visible to non-admins, but we only show # it if it's not already added if not show_extended_attr: server["server"]["OS-EXT-SRV-ATTR:hostname"] = \ instance.hostname if show_server_groups: server['server']['server_groups'] = self._get_server_groups( context, instance) return server def index(self, request, instances, cell_down_support=False): """Show a list of servers without many details.""" coll_name = self._collection_name return self._list_view(self.basic, request, instances, coll_name, False, cell_down_support=cell_down_support) def detail(self, request, instances, cell_down_support=False): """Detailed view of a list of instance.""" coll_name = self._collection_name + '/detail' context = request.environ['nova.context'] if api_version_request.is_supported(request, '2.47'): # Determine if we should show extra_specs in the inlined flavor # once before we iterate the list of instances show_extra_specs = context.can( servers_policies.SERVERS % 'show:flavor-extra-specs', fatal=False) else: show_extra_specs = False show_extended_attr = context.can( esa_policies.BASE_POLICY_NAME, fatal=False) instance_uuids = [inst['uuid'] for inst in instances] bdms = self._get_instance_bdms_in_multiple_cells(context, instance_uuids) # NOTE(gmann): pass show_sec_grp=False in _list_view() because # security groups for detail method will be added by separate # call to self._add_security_grps by passing the all servers # together. That help to avoid multiple neutron call for each server. servers_dict = self._list_view(self.show, request, instances, coll_name, show_extra_specs, show_extended_attr=show_extended_attr, # We process host_status in aggregate. show_host_status=False, show_sec_grp=False, bdms=bdms, cell_down_support=cell_down_support) if api_version_request.is_supported(request, '2.16'): unknown_only = self._get_host_status_unknown_only(context) # If we're not allowed by policy to show host status at all, don't # bother requesting instance host status from the compute API. if unknown_only is not None: self._add_host_status(list(servers_dict["servers"]), instances, unknown_only=unknown_only) self._add_security_grps(request, list(servers_dict["servers"]), instances) return servers_dict def _list_view(self, func, request, servers, coll_name, show_extra_specs, show_extended_attr=None, show_host_status=None, show_sec_grp=False, bdms=None, cell_down_support=False): """Provide a view for a list of servers. :param func: Function used to format the server data :param request: API request :param servers: List of servers in dictionary format :param coll_name: Name of collection, used to generate the next link for a pagination query :param show_extended_attr: If the server extended attributes should be included in the response dict. :param show_host_status: If the host status should be included in the response dict. :param show_sec_grp: If the security group should be included in the response dict. :param bdms: Instances bdms info from multiple cells. :param cell_down_support: True if the API (and caller) support returning a minimal instance construct if the relevant cell is down. :returns: Server data in dictionary format """ req_specs = None req_specs_dict = {} sched_hints_dict = {} if api_version_request.is_supported(request, '2.96'): context = request.environ['nova.context'] instance_uuids = [s.uuid for s in servers] req_specs = objects.RequestSpec.get_by_instance_uuids( context, instance_uuids) req_specs_dict.update({req.instance_uuid: req.availability_zone for req in req_specs if req.availability_zone is not None}) if api_version_request.is_supported(request, '2.100'): sched_hints_dict.update({ req.instance_uuid: req.scheduler_hints for req in req_specs if req.scheduler_hints is not None}) server_list = [ func(request, server, show_extra_specs=show_extra_specs, show_extended_attr=show_extended_attr, show_host_status=show_host_status, show_sec_grp=show_sec_grp, bdms=bdms, cell_down_support=cell_down_support, provided_az=req_specs_dict.get( server.uuid, AZ_NOT_IN_REQUEST_SPEC), provided_sched_hints=sched_hints_dict.get( server.uuid, SCHED_HINTS_NOT_IN_REQUEST_SPEC) )["server"] for server in servers # Filter out the fake marker instance created by the # fill_virtual_interface_list online data migration. if server.uuid != virtual_interface.FAKE_UUID] servers_links = self._get_collection_links(request, servers, coll_name) servers_dict = dict(servers=server_list) if servers_links: servers_dict["servers_links"] = servers_links return servers_dict @staticmethod def _get_metadata(instance): return instance.metadata or {} @staticmethod def _get_vm_status(instance): # If the instance is deleted the vm and task states don't really matter if instance.get("deleted"): return "DELETED" return common.status_from_state(instance.get("vm_state"), instance.get("task_state")) @staticmethod def _get_host_id(instance): host = instance.get("host") project = str(instance.get("project_id")) return utils.generate_hostid(host, project) def _get_addresses(self, request, instance, extend_address=False): # Hide server addresses while the server is building. if instance.vm_state == vm_states.BUILDING: return {} context = request.environ["nova.context"] networks = common.get_networks_for_instance(context, instance) return self._ips_builder.index( request, networks, extend_address, )["addresses"] def _get_image(self, request, instance): image_ref = instance["image_ref"] if image_ref: image_id = str(common.get_id_from_href(image_ref)) bookmark = self._image_builder._get_bookmark_link(request, image_id, "images") image = { "id": image_id, "links": [{ "rel": "bookmark", "href": bookmark, }], } if api_version_request.is_supported(request, '2.98'): prefix_len = len(utils.SM_IMAGE_PROP_PREFIX) # allow legacy names if that is what were stored in metadata std_fields = set(itertools.chain( objects.ImageMetaProps.fields.keys(), objects.ImageMetaProps._legacy_property_map.keys() )) image['properties'] = { key[prefix_len:]: value for key, value in instance.system_metadata.items() if (key.startswith(utils.SM_IMAGE_PROP_PREFIX) and key[prefix_len:] in std_fields and value is not None)} return image else: return "" def _get_flavor_dict(self, request, flavor, show_extra_specs): flavordict = { "vcpus": flavor.vcpus, "ram": flavor.memory_mb, "disk": flavor.root_gb, "ephemeral": flavor.ephemeral_gb, "swap": flavor.swap, "original_name": flavor.name } if show_extra_specs: flavordict['extra_specs'] = flavor.extra_specs return flavordict def _get_flavor(self, request, instance, show_extra_specs): flavor = instance.get_flavor() if not flavor: LOG.warning("Instance has had its flavor removed " "from the DB", instance=instance) return {} if api_version_request.is_supported(request, "2.47"): return self._get_flavor_dict(request, flavor, show_extra_specs) flavor_id = flavor["flavorid"] flavor_bookmark = self._flavor_builder._get_bookmark_link( request, flavor_id, "flavors") return { "id": str(flavor_id), "links": [{ "rel": "bookmark", "href": flavor_bookmark, }], } def _load_fault(self, request, instance): try: mapping = objects.InstanceMapping.get_by_instance_uuid( request.environ['nova.context'], instance.uuid) if mapping.cell_mapping is not None: with nova_context.target_cell(instance._context, mapping.cell_mapping): return instance.fault except exception.InstanceMappingNotFound: pass # NOTE(danms): No instance mapping at all, or a mapping with no cell, # which means a legacy environment or instance. return instance.fault def _get_fault(self, request, instance): if 'fault' in instance: fault = instance.fault else: fault = self._load_fault(request, instance) if not fault: return None fault_dict = { "code": fault["code"], "created": utils.isotime(fault["created_at"]), "message": fault["message"], } if fault.get('details', None): is_admin = False context = request.environ["nova.context"] if context: is_admin = getattr(context, 'is_admin', False) if is_admin or fault['code'] != 500: fault_dict['details'] = fault["details"] return fault_dict def _add_host_status(self, servers, instances, unknown_only=False): """Adds the ``host_status`` field to the list of servers This method takes care to filter instances from down cells since they do not have a host set and as such we cannot determine the host status. :param servers: list of detailed server dicts for the API response body; this list is modified by reference by updating the server dicts within the list :param instances: list of Instance objects :param unknown_only: whether to show only UNKNOWN host status """ # Filter out instances from down cells which do not have a host field. instances = [instance for instance in instances if 'host' in instance] # Get the dict, keyed by instance.uuid, of host status values. host_statuses = self.compute_api.get_instances_host_statuses(instances) for server in servers: # Filter out anything that is not in the resulting dict because # we had to filter the list of instances above for down cells. if server['id'] in host_statuses: host_status = host_statuses[server['id']] if unknown_only and host_status != fields.HostStatus.UNKNOWN: # Filter servers that are not allowed by policy to see # host_status values other than UNKNOWN. continue server['host_status'] = host_status def _add_security_grps(self, req, servers, instances, create_request=False): if not len(servers): return # If request is a POST create server we get the security groups # intended for an instance from the request. This is necessary because # the requested security groups for the instance have not yet been sent # to neutron. # Starting from microversion 2.75, security groups is returned in # PUT and POST Rebuild response also. if not create_request: context = req.environ['nova.context'] sg_instance_bindings = ( security_group_api.get_instances_security_groups_bindings( context, servers)) for server in servers: groups = sg_instance_bindings.get(server['id']) if groups: server['security_groups'] = groups # This section is for POST create server request. There can be # only one security group for POST create server request. else: # try converting to json req_obj = jsonutils.loads(req.body) # Add security group to server, if no security group was in # request add default since that is the group it is part of servers[0]['security_groups'] = req_obj['server'].get( 'security_groups', [{'name': 'default'}]) @staticmethod def _get_instance_bdms_in_multiple_cells(ctxt, instance_uuids): inst_maps = objects.InstanceMappingList.get_by_instance_uuids( ctxt, instance_uuids) cell_mappings = {} for inst_map in inst_maps: if (inst_map.cell_mapping is not None and inst_map.cell_mapping.uuid not in cell_mappings): cell_mappings.update( {inst_map.cell_mapping.uuid: inst_map.cell_mapping}) bdms = {} results = nova_context.scatter_gather_cells( ctxt, cell_mappings.values(), nova_context.CELL_TIMEOUT, objects.BlockDeviceMappingList.bdms_by_instance_uuid, instance_uuids) for cell_uuid, result in results.items(): if isinstance(result, Exception): LOG.warning('Failed to get block device mappings for cell %s', cell_uuid) elif result is nova_context.did_not_respond_sentinel: LOG.warning('Timeout getting block device mappings for cell ' '%s', cell_uuid) else: bdms.update(result) return bdms def _add_volumes_attachments(self, server, bdms, add_delete_on_termination): # server['id'] is guaranteed to be in the cache due to # the core API adding it in the 'detail' or 'show' method. # If that instance has since been deleted, it won't be in the # 'bdms' dictionary though, so use 'get' to avoid KeyErrors. instance_bdms = bdms.get(server['id'], []) volumes_attached = [] for bdm in instance_bdms: if bdm.get('volume_id'): volume_attached = {'id': bdm['volume_id']} if add_delete_on_termination: volume_attached['delete_on_termination'] = ( bdm['delete_on_termination']) volumes_attached.append(volume_attached) # NOTE(mriedem): The os-extended-volumes prefix should not be used for # new attributes after v2.1. They are only in v2.1 for backward compat # with v2.0. key = "os-extended-volumes:volumes_attached" server[key] = volumes_attached @staticmethod def _get_server_groups(context, instance): try: sg = objects.InstanceGroup.get_by_instance_uuid(context, instance.uuid) return [sg.uuid] except exception.InstanceGroupNotFound: return [] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/views/usages.py0000664000175000017500000000205700000000000022743 0ustar00zuulzuul00000000000000# Copyright 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.api.openstack import common class ViewBuilder(common.ViewBuilder): _collection_name = "os-simple-tenant-usage" def get_links(self, request, server_usages, tenant_id=None): coll_name = self._collection_name if tenant_id: coll_name = self._collection_name + '/{}'.format(tenant_id) return self._get_collection_links( request, server_usages, coll_name, 'instance_id') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/views/versions.py0000664000175000017500000000573600000000000023333 0ustar00zuulzuul00000000000000# Copyright 2010-2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from nova.api.openstack import common def get_view_builder(req): base_url = req.application_url return ViewBuilder(base_url) class ViewBuilder(common.ViewBuilder): def __init__(self, base_url): """:param base_url: url of the root wsgi application.""" self.prefix = self._update_compute_link_prefix(base_url) self.base_url = base_url def build_choices(self, VERSIONS, req): version_objs = [] for version in sorted(VERSIONS): version = VERSIONS[version] version_objs.append({ "id": version['id'], "status": version['status'], "links": [ { "rel": "self", "href": self.generate_href(version['id'], req.path), }, ], "media-types": version['media-types'], }) return dict(choices=version_objs) def build_versions(self, versions): version_objs = [] for version in sorted(versions.keys()): version = versions[version] version_objs.append({ "id": version['id'], "status": version['status'], "version": version['version'], "min_version": version['min_version'], "updated": version['updated'], "links": self._build_links(version), }) return dict(versions=version_objs) def build_version(self, version): reval = copy.deepcopy(version) reval['links'].insert(0, { "rel": "self", "href": self.prefix.rstrip('/') + '/', }) return dict(version=reval) def _build_links(self, version_data): """Generate a container of links that refer to the provided version.""" href = self.generate_href(version_data['id']) links = [ { "rel": "self", "href": href, }, ] return links def generate_href(self, version, path=None): """Create an url that refers to a specific version_number.""" if version.find('v2.1') == 0: version_number = 'v2.1' else: version_number = 'v2' path = path or '' return common.url_join(self.prefix, version_number, path) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/virtual_interfaces.py0000664000175000017500000000256600000000000024215 0ustar00zuulzuul00000000000000# Copyright (C) 2011 Midokura KK # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The virtual interfaces extension.""" from webob import exc from nova.api.openstack.compute.schemas import virtual_interfaces as schema from nova.api.openstack import wsgi from nova.api import validation _removal_reason = """\ This API only works with *nova-network*, which was deprecated in the 14.0.0 (Newton) release. It fails with HTTP 404 starting from microversion 2.44. It was removed in the 18.0.0 (Rocky) release. """ @validation.validated class ServerVirtualInterfaceController(wsgi.Controller): @wsgi.expected_errors((410)) @wsgi.removed('18.0.0', _removal_reason) @validation.query_schema(schema.index_query) @validation.response_body_schema(schema.index_response) def index(self, req, server_id): raise exc.HTTPGone() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/volume_attachments.py0000664000175000017500000004035500000000000024224 0ustar00zuulzuul00000000000000# Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The volume attachments extension.""" from oslo_utils import strutils from webob import exc from nova.api.openstack import api_version_request from nova.api.openstack import common from nova.api.openstack.compute.schemas import volume_attachments as schema from nova.api.openstack import wsgi from nova.api import validation from nova.compute import api as compute from nova.compute import vm_states from nova import exception from nova.i18n import _ from nova import objects from nova.policies import volumes_attachments as va_policies from nova.volume import cinder def _translate_attachment_detail_view( bdm, show_tag=False, show_delete_on_termination=False, show_attachment_id_bdm_uuid=False, ): """Maps keys for attachment details view. :param bdm: BlockDeviceMapping object for an attached volume :param show_tag: True if the "tag" field should be in the response, False to exclude the "tag" field from the response :param show_delete_on_termination: True if the "delete_on_termination" field should be in the response, False to exclude the "delete_on_termination" field from the response :param show_attachment_id_bdm_uuid: True if the "attachment_id" and "bdm_uuid" fields should be in the response. Also controls when the "id" field is included. """ d = {} if not show_attachment_id_bdm_uuid: d['id'] = bdm.volume_id d['volumeId'] = bdm.volume_id d['serverId'] = bdm.instance_uuid if bdm.device_name: d['device'] = bdm.device_name if show_tag: d['tag'] = bdm.tag if show_delete_on_termination: d['delete_on_termination'] = bdm.delete_on_termination if show_attachment_id_bdm_uuid: d['attachment_id'] = bdm.attachment_id d['bdm_uuid'] = bdm.uuid return d def _check_request_version(req, min_version, method, server_id, server_state): if api_version_request.is_supported(req, min_version): return exc_inv = exception.InstanceInvalidState( attr='vm_state', instance_uuid=server_id, state=server_state, method=method) common.raise_http_conflict_for_instance_invalid_state( exc_inv, method, server_id) class VolumeAttachmentController(wsgi.Controller): """The volume attachment API controller for the OpenStack API. A child resource of the server. Note that we use the volume id as the ID of the attachment (though this is not guaranteed externally) """ def __init__(self): super().__init__() self.compute_api = compute.API() self.volume_api = cinder.API() @wsgi.expected_errors(404) @validation.query_schema(schema.index_query, '2.0', '2.74') @validation.query_schema(schema.index_query_v275, '2.75') def index(self, req, server_id): """Returns the list of volume attachments for a given instance.""" context = req.environ['nova.context'] instance = common.get_instance(self.compute_api, context, server_id) context.can( va_policies.POLICY_ROOT % 'index', target={'project_id': instance.project_id}) bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) limited_list = common.limited(bdms, req) results = [] show_tag = api_version_request.is_supported(req, '2.70') show_delete_on_termination = api_version_request.is_supported( req, '2.79') show_attachment_id_bdm_uuid = api_version_request.is_supported( req, '2.89') for bdm in limited_list: if bdm.volume_id: va = _translate_attachment_detail_view( bdm, show_tag=show_tag, show_delete_on_termination=show_delete_on_termination, show_attachment_id_bdm_uuid=show_attachment_id_bdm_uuid, ) results.append(va) return {'volumeAttachments': results} @wsgi.expected_errors(404) @validation.query_schema(schema.show_query) def show(self, req, server_id, id): """Return data about the given volume attachment.""" context = req.environ['nova.context'] instance = common.get_instance(self.compute_api, context, server_id) context.can( va_policies.POLICY_ROOT % 'show', target={'project_id': instance.project_id}) volume_id = id try: bdm = objects.BlockDeviceMapping.get_by_volume_and_instance( context, volume_id, instance.uuid) except exception.VolumeBDMNotFound: msg = _( "Instance %(instance)s is not attached " "to volume %(volume)s" ) % {'instance': server_id, 'volume': volume_id} raise exc.HTTPNotFound(explanation=msg) show_tag = api_version_request.is_supported(req, '2.70') show_delete_on_termination = api_version_request.is_supported( req, '2.79') show_attachment_id_bdm_uuid = api_version_request.is_supported( req, '2.89') return { 'volumeAttachment': _translate_attachment_detail_view( bdm, show_tag=show_tag, show_delete_on_termination=show_delete_on_termination, show_attachment_id_bdm_uuid=show_attachment_id_bdm_uuid, ) } # TODO(mriedem): This API should return a 202 instead of a 200 response. @wsgi.expected_errors((400, 403, 404, 409)) @validation.schema(schema.create, '2.0', '2.48') @validation.schema(schema.create_v249, '2.49', '2.78') @validation.schema(schema.create_v279, '2.79') def create(self, req, server_id, body): """Attach a volume to an instance.""" context = req.environ['nova.context'] instance = common.get_instance(self.compute_api, context, server_id) context.can( va_policies.POLICY_ROOT % 'create', target={'project_id': instance.project_id}) volume_id = body['volumeAttachment']['volumeId'] device = body['volumeAttachment'].get('device') tag = body['volumeAttachment'].get('tag') delete_on_termination = body['volumeAttachment'].get( 'delete_on_termination', False) if instance.vm_state in ( vm_states.SHELVED, vm_states.SHELVED_OFFLOADED, ): _check_request_version( req, '2.20', 'attach_volume', server_id, instance.vm_state) try: supports_multiattach = common.supports_multiattach_volume(req) device = self.compute_api.attach_volume( context, instance, volume_id, device, tag=tag, supports_multiattach=supports_multiattach, delete_on_termination=delete_on_termination) except exception.VolumeNotFound as e: raise exc.HTTPNotFound(explanation=e.format_message()) except (exception.InstanceIsLocked, exception.DevicePathInUse) as e: raise exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state( state_error, 'attach_volume', server_id) except ( exception.InvalidVolume, exception.InvalidDevicePath, exception.InvalidInput, exception.VolumeTaggedAttachNotSupported, exception.MultiattachNotSupportedOldMicroversion, exception.MultiattachToShelvedNotSupported, ) as e: raise exc.HTTPBadRequest(explanation=e.format_message()) except exception.TooManyDiskDevices as e: raise exc.HTTPForbidden(explanation=e.format_message()) # The attach is async # NOTE(mriedem): It would be nice to use # _translate_attachment_summary_view here but that does not include # the 'device' key if device is None or the empty string which would # be a backward incompatible change. attachment = {} attachment['id'] = volume_id attachment['serverId'] = server_id attachment['volumeId'] = volume_id attachment['device'] = device if api_version_request.is_supported(req, '2.70'): attachment['tag'] = tag if api_version_request.is_supported(req, '2.79'): attachment['delete_on_termination'] = delete_on_termination return {'volumeAttachment': attachment} def _update_volume_swap(self, req, instance, id, body): context = req.environ['nova.context'] old_volume_id = id try: old_volume = self.volume_api.get(context, old_volume_id) except exception.VolumeNotFound as e: raise exc.HTTPNotFound(explanation=e.format_message()) if ( 'migration_status' not in old_volume or old_volume['migration_status'] in (None, '') ): message = ( f"volume {old_volume_id} is not migrating; this API " f"should only be called by Cinder") raise exc.HTTPConflict(explanation=message) new_volume_id = body['volumeAttachment']['volumeId'] try: new_volume = self.volume_api.get(context, new_volume_id) except exception.VolumeNotFound as e: # NOTE: This BadRequest is different from the above NotFound even # though the same VolumeNotFound exception. This is intentional # because new_volume_id is specified in a request body and if a # nonexistent resource in the body (not URI) the code should be # 400 Bad Request as API-WG guideline. On the other hand, # old_volume_id is specified with URI. So it is valid to return # NotFound response if that is not existent. raise exc.HTTPBadRequest(explanation=e.format_message()) try: self.compute_api.swap_volume( context, instance, old_volume, new_volume) except exception.VolumeBDMNotFound as e: raise exc.HTTPNotFound(explanation=e.format_message()) except ( exception.InvalidVolume, exception.MultiattachSwapVolumeNotSupported, ) as e: raise exc.HTTPBadRequest(explanation=e.format_message()) except exception.InstanceIsLocked as e: raise exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state( state_error, 'swap_volume', instance.uuid) def _update_volume_regular(self, req, instance, id, body): context = req.environ['nova.context'] att = body['volumeAttachment'] # NOTE(danms): We may be doing an update of regular parameters in # the midst of a swap operation, so to find the original BDM, we need # to use the old volume ID, which is the one in the path. volume_id = id try: bdm = objects.BlockDeviceMapping.get_by_volume_and_instance( context, volume_id, instance.uuid) # NOTE(danms): The attachment id is just the (current) volume id if 'id' in att and att['id'] != volume_id: raise exc.HTTPBadRequest( explanation='The id property is not mutable') if 'serverId' in att and att['serverId'] != instance.uuid: raise exc.HTTPBadRequest( explanation='The serverId property is not mutable') if 'device' in att and att['device'] != bdm.device_name: raise exc.HTTPBadRequest( explanation='The device property is not mutable') if 'tag' in att and att['tag'] != bdm.tag: raise exc.HTTPBadRequest( explanation='The tag property is not mutable') if 'delete_on_termination' in att: bdm.delete_on_termination = strutils.bool_from_string( att['delete_on_termination'], strict=True) bdm.save() except exception.VolumeBDMNotFound as e: raise exc.HTTPNotFound(explanation=e.format_message()) @wsgi.response(202) @wsgi.expected_errors((400, 404, 409)) @validation.schema(schema.update, '2.0', '2.84') @validation.schema(schema.update_v285, '2.85') def update(self, req, server_id, id, body): context = req.environ['nova.context'] instance = common.get_instance(self.compute_api, context, server_id) attachment = body['volumeAttachment'] volume_id = attachment['volumeId'] only_swap = not api_version_request.is_supported(req, '2.85') # NOTE(brinzhang): If the 'volumeId' requested by the user is # different from the 'id' in the url path, or only swap is allowed by # the microversion, we should check the swap volume policy. # otherwise, check the volume update policy. if only_swap or id != volume_id: context.can(va_policies.POLICY_ROOT % 'swap', target={}) else: context.can( va_policies.POLICY_ROOT % 'update', target={'project_id': instance.project_id}) if only_swap: # NOTE(danms): Original behavior is always call swap on PUT self._update_volume_swap(req, instance, id, body) else: # NOTE(danms): New behavior is update any supported attachment # properties first, and then call swap if volumeId differs self._update_volume_regular(req, instance, id, body) if id != volume_id: self._update_volume_swap(req, instance, id, body) @wsgi.response(202) @wsgi.expected_errors((400, 403, 404, 409)) def delete(self, req, server_id, id): """Detach a volume from an instance.""" context = req.environ['nova.context'] instance = common.get_instance( self.compute_api, context, server_id, expected_attrs=['device_metadata']) context.can( va_policies.POLICY_ROOT % 'delete', target={'project_id': instance.project_id}) volume_id = id if instance.vm_state in ( vm_states.SHELVED, vm_states.SHELVED_OFFLOADED ): _check_request_version( req, '2.20', 'detach_volume', server_id, instance.vm_state) try: volume = self.volume_api.get(context, volume_id) except exception.VolumeNotFound as e: raise exc.HTTPNotFound(explanation=e.format_message()) try: bdm = objects.BlockDeviceMapping.get_by_volume_and_instance( context, volume_id, instance.uuid) except exception.VolumeBDMNotFound: msg = _( "Instance %(instance)s is not attached " "to volume %(volume)s" ) % {'instance': server_id, 'volume': volume_id} raise exc.HTTPNotFound(explanation=msg) if bdm.is_root: msg = _("Cannot detach a root device volume") raise exc.HTTPBadRequest(explanation=msg) try: self.compute_api.detach_volume(context, instance, volume) except exception.InvalidVolume as e: raise exc.HTTPBadRequest(explanation=e.format_message()) except exception.InvalidInput as e: raise exc.HTTPBadRequest(explanation=e.format_message()) except (exception.InstanceIsLocked, exception.ServiceUnavailable) as e: raise exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state( state_error, 'detach_volume', server_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/volumes.py0000664000175000017500000001717600000000000022021 0ustar00zuulzuul00000000000000# Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The volumes extension.""" from webob import exc from nova.api.openstack.api_version_request \ import MAX_PROXY_API_SUPPORT_VERSION from nova.api.openstack import common from nova.api.openstack.compute.schemas import volumes as schema from nova.api.openstack import wsgi from nova.api import validation from nova import exception from nova.policies import volumes as vol_policies from nova.volume import cinder def _translate_volume_detail_view(context, vol): """Maps keys for volumes details view.""" return _translate_volume_summary_view(context, vol) def _translate_volume_summary_view(context, vol): """Maps keys for volumes summary view.""" d = {} d['id'] = vol['id'] d['status'] = vol['status'] d['size'] = vol['size'] d['availabilityZone'] = vol['availability_zone'] d['createdAt'] = vol['created_at'] if vol['attach_status'] == 'attached': # NOTE(ildikov): The attachments field in the volume info that # Cinder sends is converted to an OrderedDict with the # instance_uuid as key to make it easier for the multiattach # feature to check the required information. Multiattach will # be enable in the Nova API in Newton. # The format looks like the following: # attachments = {'instance_uuid': { # 'attachment_id': 'attachment_uuid', # 'mountpoint': '/dev/sda/ # } # } attachment = list(vol['attachments'].items())[0] d['attachments'] = [ { 'id': vol['id'], 'volumeId': vol['id'], 'serverId': attachment[0], } ] mountpoint = attachment[1].get('mountpoint') if mountpoint: d['attachments'][0]['device'] = mountpoint else: d['attachments'] = [{}] d['displayName'] = vol['display_name'] d['displayDescription'] = vol['display_description'] if vol['volume_type_id'] and vol.get('volume_type'): d['volumeType'] = vol['volume_type']['name'] else: d['volumeType'] = vol['volume_type_id'] d['snapshotId'] = vol['snapshot_id'] if vol.get('volume_metadata'): d['metadata'] = vol.get('volume_metadata') else: d['metadata'] = {} return d class VolumeController(wsgi.Controller): """The Volumes API controller for the OpenStack API.""" def __init__(self): super().__init__() self.volume_api = cinder.API() @wsgi.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) @wsgi.expected_errors(404) @validation.query_schema(schema.show_query) def show(self, req, id): """Return data about the given volume.""" context = req.environ['nova.context'] context.can( vol_policies.POLICY_NAME % 'show', target={'project_id': context.project_id}) try: vol = self.volume_api.get(context, id) except exception.VolumeNotFound as e: raise exc.HTTPNotFound(explanation=e.format_message()) return {'volume': _translate_volume_detail_view(context, vol)} @wsgi.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) @wsgi.response(202) @wsgi.expected_errors((400, 404)) def delete(self, req, id): """Delete a volume.""" context = req.environ['nova.context'] context.can( vol_policies.POLICY_NAME % 'delete', target={'project_id': context.project_id}) try: self.volume_api.delete(context, id) except exception.InvalidInput as e: raise exc.HTTPBadRequest(explanation=e.format_message()) except exception.VolumeNotFound as e: raise exc.HTTPNotFound(explanation=e.format_message()) @wsgi.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) @wsgi.expected_errors(()) @validation.query_schema(schema.index_query) def index(self, req): """Returns a summary list of volumes.""" context = req.environ['nova.context'] context.can( vol_policies.POLICY_NAME % 'list', target={'project_id': context.project_id}) return self._items(req, entity_maker=_translate_volume_summary_view) @wsgi.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) @wsgi.expected_errors(()) @validation.query_schema(schema.detail_query) def detail(self, req): """Returns a detailed list of volumes.""" context = req.environ['nova.context'] context.can( vol_policies.POLICY_NAME % 'detail', target={'project_id': context.project_id}) return self._items(req, entity_maker=_translate_volume_detail_view) def _items(self, req, entity_maker): """Returns a list of volumes, transformed through entity_maker.""" context = req.environ['nova.context'] volumes = self.volume_api.get_all(context) limited_list = common.limited(volumes, req) res = [entity_maker(context, vol) for vol in limited_list] return {'volumes': res} @wsgi.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) @wsgi.expected_errors((400, 403, 404)) @validation.schema(schema.create) def create(self, req, body): """Creates a new volume.""" context = req.environ['nova.context'] context.can(vol_policies.POLICY_NAME % 'create', target={'project_id': context.project_id}) vol = body['volume'] vol_type = vol.get('volume_type') metadata = vol.get('metadata') snapshot_id = vol.get('snapshot_id', None) if snapshot_id is not None: try: snapshot = self.volume_api.get_snapshot(context, snapshot_id) except exception.SnapshotNotFound as e: raise exc.HTTPNotFound(explanation=e.format_message()) else: snapshot = None size = vol.get('size', None) if size is None and snapshot is not None: size = snapshot['volume_size'] availability_zone = vol.get('availability_zone') try: new_volume = self.volume_api.create( context, size, vol.get('display_name'), vol.get('display_description'), snapshot=snapshot, volume_type=vol_type, metadata=metadata, availability_zone=availability_zone) except exception.InvalidInput as err: raise exc.HTTPBadRequest(explanation=err.format_message()) except exception.OverQuota as err: raise exc.HTTPForbidden(explanation=err.format_message()) # TODO(vish): Instance should be None at db layer instead of # trying to lazy load, but for now we turn it into # a dict to avoid an error. retval = _translate_volume_detail_view(context, dict(new_volume)) result = {'volume': retval} location = '%s/%s' % (req.url, new_volume['id']) return wsgi.ResponseObject(result, headers=dict(location=location)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/compute/wsgi.py0000664000175000017500000000141500000000000021265 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """WSGI application entry-point for Nova Compute API, installed by pbr.""" from nova.api.openstack import wsgi_app NAME = "osapi_compute" def init_application(): return wsgi_app.init_application(NAME) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/identity.py0000664000175000017500000000615600000000000020500 0ustar00zuulzuul00000000000000# Copyright 2017 IBM # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystoneauth1 import exceptions as kse from oslo_log import log as logging import webob from nova.i18n import _ from nova import utils LOG = logging.getLogger(__name__) def verify_project_id(context, project_id): """verify that a project_id exists. This attempts to verify that a project id exists. If it does not, an HTTPBadRequest is emitted. Also HTTPBadRequest is emitted if Keystone identity service version 3.0 is not found. """ adap = utils.get_ksa_adapter( 'identity', ksa_auth=context.get_auth_plugin(), min_version=(3, 0), max_version=(3, 'latest')) try: resp = adap.get('/projects/%s' % project_id) except kse.EndpointNotFound: LOG.error( "Keystone identity service version 3.0 was not found. This " "might be caused by Nova misconfiguration or Keystone " "problems.") msg = _("Nova was unable to find Keystone service endpoint.") # TODO(astupnik). It may be reasonable to switch to HTTP 503 # (HTTP Service Unavailable) instead of HTTP Bad Request here. # If proper Keystone service is inaccessible, then technially # this is a server side error and not an error in Nova. raise webob.exc.HTTPBadRequest(explanation=msg) except kse.ClientException: # something is wrong, like there isn't a keystone v3 endpoint, # or nova isn't configured for the interface to talk to it; # we'll take the pass and default to everything being ok. LOG.info("Unable to contact keystone to verify project_id") return True if resp: # All is good with this 20x status return True elif resp.status_code == 404: # we got access, and we know this project is not there msg = _("Project ID %s is not a valid project.") % project_id raise webob.exc.HTTPBadRequest(explanation=msg) elif resp.status_code == 403: # we don't have enough permission to verify this, so default # to "it's ok". LOG.info( "Insufficient permissions for user %(user)s to verify " "existence of project_id %(pid)s", {"user": context.user_id, "pid": project_id}) return True else: LOG.warning( "Unexpected response from keystone trying to " "verify project_id %(pid)s - resp: %(code)s %(content)s", {"pid": project_id, "code": resp.status_code, "content": resp.content}) # realize we did something wrong, but move on with a warning return True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/requestlog.py0000664000175000017500000000645600000000000021044 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Simple middleware for request logging.""" import time from oslo_log import log as logging from oslo_utils import excutils import webob.dec import webob.exc from nova.api.openstack import wsgi from nova.api import wsgi as base_wsgi import nova.conf CONF = nova.conf.CONF # TODO(sdague) maybe we can use a better name here for the logger LOG = logging.getLogger(__name__) class RequestLog(base_wsgi.Middleware): """WSGI Middleware to write a simple request log to. Borrowed from Paste Translogger """ # This matches the placement fil _log_format = ('%(REMOTE_ADDR)s "%(REQUEST_METHOD)s %(REQUEST_URI)s" ' 'status: %(status)s len: %(len)s ' 'microversion: %(microversion)s time: %(time).6f') @staticmethod def _get_uri(environ): req_uri = (environ.get('SCRIPT_NAME', '') + environ.get('PATH_INFO', '')) if environ.get('QUERY_STRING'): req_uri += '?' + environ['QUERY_STRING'] return req_uri @staticmethod def _should_emit(req): """Conditions under which we should skip logging. If we detect we are running under eventlet wsgi processing, we already are logging things, let it go. This is broken out as a separate method so that it can be easily adjusted for testing. """ if req.environ.get('eventlet.input', None) is not None: return False return True def _log_req(self, req, res, start): if not self._should_emit(req): return # in the event that things exploded really badly deeper in the # wsgi stack, res is going to be an empty dict for the # fallback logging. So never count on it having attributes. status = getattr(res, "status", "500 Error").split(None, 1)[0] remote_address = req.environ.get('REMOTE_ADDR', '-') data = { 'REMOTE_ADDR': remote_address, 'REQUEST_METHOD': req.environ['REQUEST_METHOD'], 'REQUEST_URI': self._get_uri(req.environ), 'status': status, 'len': getattr(res, "content_length", 0), 'time': time.time() - start, 'microversion': '-' } # set microversion if it exists if not req.api_version_request.is_null(): data["microversion"] = req.api_version_request.get_string() LOG.info(self._log_format, data) @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): res = {} start = time.time() try: res = req.get_response(self.application) self._log_req(req, res, start) return res except Exception: with excutils.save_and_reraise_exception(): self._log_req(req, res, start) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/urlmap.py0000664000175000017500000002651600000000000020151 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from urllib import request as urllib2 from oslo_log import log as logging import paste.urlmap from nova.api.openstack import wsgi LOG = logging.getLogger(__name__) _quoted_string_re = r'"[^"\\]*(?:\\.[^"\\]*)*"' _option_header_piece_re = re.compile(r';\s*([^\s;=]+|%s)\s*' r'(?:=\s*([^;]+|%s))?\s*' % (_quoted_string_re, _quoted_string_re)) def unquote_header_value(value): """Unquotes a header value. This does not use the real unquoting but what browsers are actually using for quoting. :param value: the header value to unquote. """ if value and value[0] == value[-1] == '"': # this is not the real unquoting, but fixing this so that the # RFC is met will result in bugs with internet explorer and # probably some other browsers as well. IE for example is # uploading files with "C:\foo\bar.txt" as filename value = value[1:-1] return value def parse_list_header(value): """Parse lists as described by RFC 2068 Section 2. In particular, parse comma-separated lists where the elements of the list may include quoted-strings. A quoted-string could contain a comma. A non-quoted string could have quotes in the middle. Quotes are removed automatically after parsing. The return value is a standard :class:`list`: >>> parse_list_header('token, "quoted value"') ['token', 'quoted value'] :param value: a string with a list header. :return: :class:`list` """ result = [] for item in urllib2.parse_http_list(value): if item[:1] == item[-1:] == '"': item = unquote_header_value(item[1:-1]) result.append(item) return result def parse_options_header(value): """Parse a ``Content-Type`` like header into a tuple with the content type and the options: >>> parse_options_header('Content-Type: text/html; mimetype=text/html') ('Content-Type:', {'mimetype': 'text/html'}) :param value: the header to parse. :return: (str, options) """ def _tokenize(string): for match in _option_header_piece_re.finditer(string): key, value = match.groups() key = unquote_header_value(key) if value is not None: value = unquote_header_value(value) yield key, value if not value: return '', {} parts = _tokenize(';' + value) name = next(parts)[0] extra = dict(parts) return name, extra class Accept(object): def __init__(self, value): self._content_types = [parse_options_header(v) for v in parse_list_header(value)] def best_match(self, supported_content_types): # FIXME: Should we have a more sophisticated matching algorithm that # takes into account the version as well? best_quality = -1 best_content_type = None best_params = {} best_match = '*/*' for content_type in supported_content_types: for content_mask, params in self._content_types: try: quality = float(params.get('q', 1)) except ValueError: continue if quality < best_quality: continue elif best_quality == quality: if best_match.count('*') <= content_mask.count('*'): continue if self._match_mask(content_mask, content_type): best_quality = quality best_content_type = content_type best_params = params best_match = content_mask return best_content_type, best_params def _match_mask(self, mask, content_type): if '*' not in mask: return content_type == mask if mask == '*/*': return True mask_major = mask[:-2] content_type_major = content_type.split('/', 1)[0] return content_type_major == mask_major def urlmap_factory(loader, global_conf, **local_conf): if 'not_found_app' in local_conf: not_found_app = local_conf.pop('not_found_app') else: not_found_app = global_conf.get('not_found_app') if not_found_app: not_found_app = loader.get_app(not_found_app, global_conf=global_conf) urlmap = URLMap(not_found_app=not_found_app) for path, app_name in local_conf.items(): path = paste.urlmap.parse_path_expression(path) app = loader.get_app(app_name, global_conf=global_conf) urlmap[path] = app return urlmap class URLMap(paste.urlmap.URLMap): def _match(self, host, port, path_info): """Find longest match for a given URL path.""" for (domain, app_url), app in self.applications: if domain and domain != host and domain != host + ':' + port: continue # Rudimentary "wildcard" support: # By declaring a urlmap path ending in '/+', you're saying the # incoming path must start with everything up to and including the # '/' *and* have something after that as well. For example, path # /foo/bar/+ will match /foo/bar/baz, but not /foo/bar/ or /foo/bar # NOTE(efried): This assumes we'll never need a path URI component # that legitimately starts with '+'. (We could use a # more obscure character/sequence here in that case.) if app_url.endswith('/+'): # Must be requesting at least the path element (including /) if not path_info.startswith(app_url[:-1]): continue # ...but also must be requesting something after that / if len(path_info) < len(app_url): continue # Trim the /+ off the app_url to make it look "normal" for e.g. # proper splitting of SCRIPT_NAME and PATH_INFO. return app, app_url[:-2] # Normal (non-wildcarded) prefix match if (path_info == app_url or path_info.startswith(app_url + '/')): return app, app_url return None, None def _set_script_name(self, app, app_url): def wrap(environ, start_response): environ['SCRIPT_NAME'] += app_url return app(environ, start_response) return wrap def _munge_path(self, app, path_info, app_url): def wrap(environ, start_response): environ['SCRIPT_NAME'] += app_url environ['PATH_INFO'] = path_info[len(app_url):] return app(environ, start_response) return wrap def _path_strategy(self, host, port, path_info): """Check path suffix for MIME type and path prefix for API version.""" mime_type = app = app_url = None parts = path_info.rsplit('.', 1) if len(parts) > 1: possible_type = 'application/' + parts[1] if possible_type in wsgi.get_supported_content_types(): mime_type = possible_type parts = path_info.split('/') if len(parts) > 1: possible_app, possible_app_url = self._match(host, port, path_info) # Don't use prefix if it ends up matching default if possible_app and possible_app_url: app_url = possible_app_url app = self._munge_path(possible_app, path_info, app_url) return mime_type, app, app_url def _content_type_strategy(self, host, port, environ): """Check Content-Type header for API version.""" app = None params = parse_options_header(environ.get('CONTENT_TYPE', ''))[1] if 'version' in params: app, app_url = self._match(host, port, '/v' + params['version']) if app: app = self._set_script_name(app, app_url) return app def _accept_strategy(self, host, port, environ, supported_content_types): """Check Accept header for best matching MIME type and API version.""" accept = Accept(environ.get('HTTP_ACCEPT', '')) app = None # Find the best match in the Accept header mime_type, params = accept.best_match(supported_content_types) if 'version' in params: app, app_url = self._match(host, port, '/v' + params['version']) if app: app = self._set_script_name(app, app_url) return mime_type, app def __call__(self, environ, start_response): host = environ.get('HTTP_HOST', environ.get('SERVER_NAME')).lower() if ':' in host: host, port = host.split(':', 1) else: if environ['wsgi.url_scheme'] == 'http': port = '80' else: port = '443' path_info = environ['PATH_INFO'] path_info = self.normalize_url(path_info, False)[1] # The MIME type for the response is determined in one of two ways: # 1) URL path suffix (eg /servers/detail.json) # 2) Accept header (eg application/json;q=0.8, application/xml;q=0.2) # The API version is determined in one of three ways: # 1) URL path prefix (eg /v1.1/tenant/servers/detail) # 2) Content-Type header (eg application/json;version=1.1) # 3) Accept header (eg application/json;q=0.8;version=1.1) supported_content_types = list(wsgi.get_supported_content_types()) mime_type, app, app_url = self._path_strategy(host, port, path_info) # Accept application/atom+xml for the index query of each API # version mount point as well as the root index if (app_url and app_url + '/' == path_info) or path_info == '/': supported_content_types.append('application/atom+xml') if not app: app = self._content_type_strategy(host, port, environ) if not mime_type or not app: possible_mime_type, possible_app = self._accept_strategy( host, port, environ, supported_content_types) if possible_mime_type and not mime_type: mime_type = possible_mime_type if possible_app and not app: app = possible_app if not mime_type: mime_type = 'application/json' if not app: # Didn't match a particular version, probably matches default app, app_url = self._match(host, port, path_info) if app: app = self._munge_path(app, path_info, app_url) if app: environ['nova.best_content_type'] = mime_type return app(environ, start_response) LOG.debug('Could not find application for %s', environ['PATH_INFO']) environ['paste.urlmap_object'] = self return self.not_found_application(environ, start_response) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/wsgi.py0000664000175000017500000010210400000000000017606 0ustar00zuulzuul00000000000000# Copyright 2013 IBM Corp. # Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import typing as ty import microversion_parse from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import encodeutils from oslo_utils import strutils import webob from nova.api.openstack import api_version_request from nova.api import wsgi from nova import exception from nova import i18n from nova.i18n import _ from nova import version LOG = logging.getLogger(__name__) _SUPPORTED_CONTENT_TYPES = ( 'application/json', 'application/vnd.openstack.compute+json', ) # These are typically automatically created by routes as either defaults # collection or member methods. _ROUTES_METHODS = [ 'create', 'delete', 'show', 'update', ] _METHODS_WITH_BODY = [ 'POST', 'PUT', ] # The default api version request if none is requested in the headers # Note(cyeoh): This only applies for the v2.1 API once microversions # support is fully merged. It does not affect the V2 API. DEFAULT_API_VERSION = "2.1" # Names of headers used by clients to request a specific version # of the REST API API_VERSION_REQUEST_HEADER = 'OpenStack-API-Version' LEGACY_API_VERSION_REQUEST_HEADER = 'X-OpenStack-Nova-API-Version' ENV_LEGACY_V2 = 'openstack.legacy_v2' def get_supported_content_types(): return _SUPPORTED_CONTENT_TYPES class Request(wsgi.Request): """Add some OpenStack API-specific logic to the base webob.Request.""" def __init__(self, *args, **kwargs): super(Request, self).__init__(*args, **kwargs) if not hasattr(self, 'api_version_request'): self.api_version_request = api_version_request.APIVersionRequest() def best_match_content_type(self): """Determine the requested response content-type.""" if 'nova.best_content_type' not in self.environ: # Calculate the best MIME type content_type = None # Check URL path suffix parts = self.path.rsplit('.', 1) if len(parts) > 1: possible_type = 'application/' + parts[1] if possible_type in get_supported_content_types(): content_type = possible_type if not content_type: best_matches = self.accept.acceptable_offers( get_supported_content_types()) if best_matches: content_type = best_matches[0][0] self.environ['nova.best_content_type'] = (content_type or 'application/json') return self.environ['nova.best_content_type'] def get_content_type(self): """Determine content type of the request body. Does not do any body introspection, only checks header """ if "Content-Type" not in self.headers: return None content_type = self.content_type # NOTE(markmc): text/plain is the default for eventlet and # other webservers which use mimetools.Message.gettype() # whereas twisted defaults to ''. if not content_type or content_type == 'text/plain': return None if content_type not in get_supported_content_types(): raise exception.InvalidContentType(content_type=content_type) return content_type def best_match_language(self): """Determine the best available language for the request. :returns: the best language match or None if the 'Accept-Language' header was not available in the request. """ if not self.accept_language: return None # NOTE(takashin): To decide the default behavior, 'default' is # preferred over 'default_tag' because that is return as it is when # no match. This is also little tricky that 'default' value cannot be # None. At least one of default_tag or default must be supplied as # an argument to the method, to define the defaulting behavior. # So passing a sentinel value to return None from this function. best_match = self.accept_language.lookup( i18n.get_available_languages(), default='fake_LANG') if best_match == 'fake_LANG': best_match = None return best_match def set_api_version_request(self): """Set API version request based on the request header information.""" hdr_string = microversion_parse.get_version( self.headers, service_type='compute', legacy_headers=[LEGACY_API_VERSION_REQUEST_HEADER]) if hdr_string is None: self.api_version_request = api_version_request.APIVersionRequest( api_version_request.DEFAULT_API_VERSION) elif hdr_string == 'latest': # 'latest' is a special keyword which is equivalent to # requesting the maximum version of the API supported self.api_version_request = api_version_request.max_api_version() else: self.api_version_request = api_version_request.APIVersionRequest( hdr_string) # Check that the version requested is within the global # minimum/maximum of supported API versions if not self.api_version_request.matches( api_version_request.min_api_version(), api_version_request.max_api_version()): raise exception.InvalidGlobalAPIVersion( req_ver=self.api_version_request.get_string(), min_ver=api_version_request.min_api_version().get_string(), max_ver=api_version_request.max_api_version().get_string()) def set_legacy_v2(self): self.environ[ENV_LEGACY_V2] = True def is_legacy_v2(self): return self.environ.get(ENV_LEGACY_V2, False) class ActionDispatcher(object): """Maps method name to local methods through action name.""" def dispatch(self, *args, **kwargs): """Find and call local method.""" action = kwargs.pop('action', 'default') action_method = getattr(self, str(action), self.default) return action_method(*args, **kwargs) def default(self, data): raise NotImplementedError() class JSONDeserializer(ActionDispatcher): def _from_json(self, datastring): try: return jsonutils.loads(datastring) except ValueError: msg = _("cannot understand JSON") raise exception.MalformedRequestBody(reason=msg) def deserialize(self, datastring, action='default'): return self.dispatch(datastring, action=action) def default(self, datastring): return {'body': self._from_json(datastring)} class JSONDictSerializer(ActionDispatcher): """Default JSON request body serialization.""" def serialize(self, data, action='default'): return self.dispatch(data, action=action) def default(self, data): return str(jsonutils.dumps(data)) class WSGICodes: """A microversion-aware WSGI code decorator. Allow definition and retrieval of WSGI return codes on a microversion-aware basis. """ def __init__(self) -> None: self._codes: list[tuple[int, ty.Optional[str], ty.Optional[str]]] = [] def add_code( self, code: tuple[int, ty.Optional[str], ty.Optional[str]] ) -> None: self._codes.append(code) def __call__(self, req: Request) -> int: ver = req.api_version_request for code, min_version, max_version in self._codes: min_ver = api_version_request.APIVersionRequest(min_version) max_ver = api_version_request.APIVersionRequest(max_version) if ver.matches(min_ver, max_ver): return code LOG.error("Unknown return code in API method") msg = _("Unknown return code in API method") raise webob.exc.HTTPInternalServerError(explanation=msg) def response( code: int, min_version: ty.Optional[str] = None, max_version: ty.Optional[str] = None, ): """Attaches response code to a method. This decorator associates a response code with a method. Note that the function attributes are directly manipulated; the method is not wrapped. """ def decorator(func): if not hasattr(func, 'wsgi_codes'): func.wsgi_codes = WSGICodes() func.wsgi_codes.add_code((code, min_version, max_version)) return func return decorator class ResponseObject(object): """Bundles a response object Object that app methods may return in order to allow its response to be modified by extensions in the code. Its use is optional (and should only be used if you really know what you are doing). """ def __init__(self, obj, code=None, headers=None): """Builds a response object.""" self.obj = obj self._default_code = 200 self._code = code self._headers = headers or {} self.serializer = JSONDictSerializer() def __getitem__(self, key): """Retrieves a header with the given name.""" return self._headers[key.lower()] def __setitem__(self, key, value): """Sets a header with the given name to the given value.""" self._headers[key.lower()] = value def __delitem__(self, key): """Deletes the header with the given name.""" del self._headers[key.lower()] def serialize(self, request, content_type): """Serializes the wrapped object. Utility method for serializing the wrapped object. Returns a webob.Response object. Header values are set to the appropriate Python type and encoding demanded by PEP 3333: whatever the native str type is. """ serializer = self.serializer body = None if self.obj is not None: body = serializer.serialize(self.obj) response = webob.Response(body=body) response.status_int = self.code for hdr, val in self._headers.items(): # In Py3.X Headers must be a str that was first safely # encoded to UTF-8 (to catch any bad encodings) and then # decoded back to a native str. response.headers[hdr] = encodeutils.safe_decode( encodeutils.safe_encode(val)) # Deal with content_type if not isinstance(content_type, str): content_type = str(content_type) # In Py3.X Headers must be a str. response.headers['Content-Type'] = encodeutils.safe_decode( encodeutils.safe_encode(content_type)) return response @property def code(self): """Retrieve the response status.""" return self._code or self._default_code @property def headers(self): """Retrieve the headers.""" return self._headers.copy() def action_peek(body): """Determine action to invoke. This looks inside the json body and fetches out the action method name. """ try: decoded = jsonutils.loads(body) except ValueError: msg = _("cannot understand JSON") raise exception.MalformedRequestBody(reason=msg) # Make sure there's exactly one key... if len(decoded) != 1: msg = _("too many body keys") raise exception.MalformedRequestBody(reason=msg) # Return the action name return list(decoded.keys())[0] class ResourceExceptionHandler(object): """Context manager to handle Resource exceptions. Used when processing exceptions generated by API implementation methods. Converts most exceptions to Fault exceptions, with the appropriate logging. """ def __enter__(self): return None def __exit__(self, ex_type, ex_value, ex_traceback): if not ex_value: return True if isinstance(ex_value, exception.Forbidden): raise Fault(webob.exc.HTTPForbidden( explanation=ex_value.format_message())) elif isinstance(ex_value, exception.VersionNotFoundForAPIMethod): raise elif isinstance(ex_value, exception.Invalid): raise Fault(exception.ConvertedException( code=ex_value.code, explanation=ex_value.format_message())) elif isinstance(ex_value, TypeError): exc_info = (ex_type, ex_value, ex_traceback) LOG.error('Exception handling resource: %s', ex_value, exc_info=exc_info) raise Fault(webob.exc.HTTPBadRequest()) elif isinstance(ex_value, Fault): LOG.info("Fault thrown: %s", ex_value) raise ex_value elif isinstance(ex_value, webob.exc.HTTPException): LOG.info("HTTP exception thrown: %s", ex_value) raise Fault(ex_value) # We didn't handle the exception return False class Resource(wsgi.Application): """WSGI app that handles (de)serialization and controller dispatch. WSGI app that reads routing information supplied by RoutesMiddleware and calls the requested action method upon its controller. All controller action methods must accept a 'req' argument, which is the incoming wsgi.Request. If the operation is a PUT or POST, the controller method must also accept a 'body' argument (the deserialized request body). They may raise a webob.exc exception or return a dict, which will be serialized by requested content type. Exceptions derived from webob.exc.HTTPException will be automatically wrapped in Fault() to provide API friendly error responses. """ support_api_request_version = True def __init__(self, controller): """:param controller: object that implement methods created by routes lib """ self.controller = controller self.sub_controllers = [] self.default_serializers = dict(json=JSONDictSerializer) # Copy over the actions dictionary self.wsgi_actions = {} if controller: self.register_actions(controller) def register_actions(self, controller): """Registers controller actions with this resource.""" actions = getattr(controller, 'wsgi_actions', {}) for key, method_name in actions.items(): self.wsgi_actions[key] = getattr(controller, method_name) def register_subcontroller_actions(self, sub_controller): """Registers sub-controller actions with this resource.""" self.sub_controllers.append(sub_controller) actions = getattr(sub_controller, 'wsgi_actions', {}) for key, method_name in actions.items(): self.wsgi_actions[key] = getattr(sub_controller, method_name) def get_action_args(self, request_environment): """Parse dictionary created by routes library.""" # NOTE(Vek): Check for get_action_args() override in the # controller if hasattr(self.controller, 'get_action_args'): return self.controller.get_action_args(request_environment) try: args = request_environment['wsgiorg.routing_args'][1].copy() except (KeyError, IndexError, AttributeError): return {} try: del args['controller'] except KeyError: pass try: del args['format'] except KeyError: pass return args def get_body(self, request): content_type = request.get_content_type() return content_type, request.body def deserialize(self, body): return JSONDeserializer().deserialize(body) def _should_have_body(self, request): return request.method in _METHODS_WITH_BODY @webob.dec.wsgify(RequestClass=Request) def __call__(self, request): """WSGI method that controls (de)serialization and method dispatch.""" if self.support_api_request_version: # Set the version of the API requested based on the header try: request.set_api_version_request() except exception.InvalidAPIVersionString as e: return Fault(webob.exc.HTTPBadRequest( explanation=e.format_message())) except exception.InvalidGlobalAPIVersion as e: return Fault(webob.exc.HTTPNotAcceptable( explanation=e.format_message())) # Identify the action, its arguments, and the requested # content type action_args = self.get_action_args(request.environ) action = action_args.pop('action', None) # NOTE(sdague): we filter out InvalidContentTypes early so we # know everything is good from here on out. try: content_type, body = self.get_body(request) accept = request.best_match_content_type() except exception.InvalidContentType: msg = _("Unsupported Content-Type") return Fault(webob.exc.HTTPUnsupportedMediaType(explanation=msg)) # NOTE(Vek): Splitting the function up this way allows for # auditing by external tools that wrap the existing # function. If we try to audit __call__(), we can # run into troubles due to the @webob.dec.wsgify() # decorator. return self._process_stack(request, action, action_args, content_type, body, accept) def _process_stack(self, request, action, action_args, content_type, body, accept): """Implement the processing stack.""" # Get the implementing method try: meth = self.get_method(request, action, content_type, body) except (AttributeError, TypeError): return Fault(webob.exc.HTTPNotFound()) except KeyError as ex: msg = _("There is no such action: %s") % ex.args[0] return Fault(webob.exc.HTTPBadRequest(explanation=msg)) except exception.MalformedRequestBody: msg = _("Malformed request body") return Fault(webob.exc.HTTPBadRequest(explanation=msg)) if body: msg = _("Action: '%(action)s', calling method: %(meth)s, body: " "%(body)s") % {'action': action, 'body': str(body, 'utf-8'), 'meth': str(meth)} LOG.debug(strutils.mask_password(msg)) else: LOG.debug("Calling method '%(meth)s'", {'meth': str(meth)}) # Now, deserialize the request body... try: contents = self._get_request_content(body, request) except exception.MalformedRequestBody: msg = _("Malformed request body") return Fault(webob.exc.HTTPBadRequest(explanation=msg)) # Update the action args action_args.update(contents) project_id = action_args.pop("project_id", None) context = request.environ.get('nova.context') if (context and project_id and (project_id != context.project_id)): msg = _("Malformed request URL: URL's project_id '%(project_id)s'" " doesn't match Context's project_id" " '%(context_project_id)s'") % \ {'project_id': project_id, 'context_project_id': context.project_id} return Fault(webob.exc.HTTPBadRequest(explanation=msg)) response = None try: with ResourceExceptionHandler(): action_result = self.dispatch(meth, request, action_args) except Fault as ex: response = ex if not response: # No exceptions; convert action_result into a # ResponseObject resp_obj = None if type(action_result) is dict or action_result is None: resp_obj = ResponseObject(action_result) elif isinstance(action_result, ResponseObject): resp_obj = action_result else: response = action_result # Run post-processing extensions if resp_obj: # Do a preserialize to set up the response object if hasattr(meth, 'wsgi_codes'): resp_obj._default_code = meth.wsgi_codes(request) if resp_obj and not response: response = resp_obj.serialize(request, accept) if hasattr(response, 'headers'): for hdr, val in list(response.headers.items()): if not isinstance(val, str): val = str(val) # In Py3.X Headers must be a string response.headers[hdr] = encodeutils.safe_decode( encodeutils.safe_encode(val)) if not request.api_version_request.is_null(): response.headers[API_VERSION_REQUEST_HEADER] = \ 'compute ' + request.api_version_request.get_string() response.headers[LEGACY_API_VERSION_REQUEST_HEADER] = \ request.api_version_request.get_string() response.headers.add('Vary', API_VERSION_REQUEST_HEADER) response.headers.add('Vary', LEGACY_API_VERSION_REQUEST_HEADER) return response def _get_request_content(self, body, request): contents = {} if self._should_have_body(request): # allow empty body with PUT and POST if request.content_length == 0 or request.content_length is None: contents = {'body': None} else: contents = self.deserialize(body) return contents def get_method(self, request, action, content_type, body): meth = self._get_method(request, action, content_type, body) return meth def _get_method(self, request, action, content_type, body): """Look up the action-specific method.""" # Look up the method try: if not self.controller: meth = getattr(self, action) else: meth = getattr(self.controller, action) return meth except AttributeError: if (not self.wsgi_actions or action not in _ROUTES_METHODS + ['action']): # Propagate the error raise if action == 'action': action_name = action_peek(body) else: action_name = action # Look up the action method return (self.wsgi_actions[action_name]) def dispatch(self, method, request, action_args): """Dispatch a call to the action-specific method.""" try: return method(req=request, **action_args) except exception.VersionNotFoundForAPIMethod: # We deliberately don't return any message information # about the exception to the user so it looks as if # the method is simply not implemented. return Fault(webob.exc.HTTPNotFound()) def action(name): """Mark a function as an action. The given name will be taken as the action key in the body. This is also overloaded to allow extensions to provide non-extending definitions of create and delete operations. """ def decorator(func): func.wsgi_action = name return func return decorator def removed(version: str, reason: str): """Mark a function as removed. The given reason will be stored as an attribute of the function. """ def decorator(func): func.removed = True func.removed_version = reason func.removed_reason = reason return func return decorator def api_version( min_version: ty.Optional[str] = None, max_version: ty.Optional[str] = None, ): """Mark an API as supporting lower and upper version bounds. :param min_version: A string of two numerals. X.Y indicating the minimum version of the JSON-Schema to validate against. :param max_version: A string of two numerals. X.Y indicating the maximum version of the JSON-Schema against to. """ def decorator(f): @functools.wraps(f) def wrapped(*args, **kwargs): min_ver = api_version_request.APIVersionRequest(min_version) max_ver = api_version_request.APIVersionRequest(max_version) # The request object is always the second argument. # However numerous unittests pass in the request object # via kwargs instead so we handle that as well. # TODO(cyeoh): cleanup unittests so we don't have to # to do this if 'req' in kwargs: ver = kwargs['req'].api_version_request else: ver = args[1].api_version_request if not ver.matches(min_ver, max_ver): raise exception.VersionNotFoundForAPIMethod(version=ver) return f(*args, **kwargs) wrapped.min_version = min_version wrapped.max_version = max_version return wrapped return decorator def expected_errors( errors: ty.Union[int, tuple[int, ...]], min_version: ty.Optional[str] = None, max_version: ty.Optional[str] = None, ): """Decorator for v2.1 API methods which specifies expected exceptions. Specify which exceptions may occur when an API method is called. If an unexpected exception occurs then return a 500 instead and ask the user of the API to file a bug report. """ def decorator(f): @functools.wraps(f) def wrapped(*args, **kwargs): min_ver = api_version_request.APIVersionRequest(min_version) max_ver = api_version_request.APIVersionRequest(max_version) # The request object is always the second argument. # However numerous unittests pass in the request object # via kwargs instead so we handle that as well. # TODO(cyeoh): cleanup unittests so we don't have to # to do this if 'req' in kwargs: ver = kwargs['req'].api_version_request else: ver = args[1].api_version_request try: return f(*args, **kwargs) except Exception as exc: # if this instance of the decorator is intended for other # versions, let the exception bubble up as-is if not ver.matches(min_ver, max_ver): raise if isinstance(exc, webob.exc.WSGIHTTPException): if isinstance(errors, int): t_errors = (errors,) else: t_errors = errors if exc.code in t_errors: raise elif isinstance(exc, exception.Forbidden): # Note(cyeoh): Special case to handle # Forbidden exceptions so every # extension method does not need to wrap authorize # calls. ResourceExceptionHandler silently # converts NotAuthorized to HTTPForbidden raise elif isinstance(exc, exception.NotSupported): # Note(gmann): Special case to handle # NotSupported exceptions. We want to raise 400 BadRequest # for the NotSupported exception which is basically used # to raise for not supported features. Converting it here # will avoid converting every NotSupported inherited # exception in API controller. raise webob.exc.HTTPBadRequest( explanation=exc.format_message()) elif isinstance(exc, exception.ValidationError): # Note(oomichi): Handle a validation error, which # happens due to invalid API parameters, as an # expected error. raise elif isinstance(exc, exception.Unauthorized): # Handle an authorized exception, will be # automatically converted to a HTTP 401, clients # like python-novaclient handle this error to # generate new token and do another attempt. raise LOG.exception("Unexpected exception in API method") msg = _("Unexpected API Error. " "{support}\n{exc}").format( support=version.support_string(), exc=type(exc)) raise webob.exc.HTTPInternalServerError(explanation=msg) return wrapped return decorator class ControllerMetaclass(type): """Controller metaclass. This metaclass automates the task of assembling a dictionary mapping action keys to method names. """ def __new__(mcs, name, bases, cls_dict): """Adds the wsgi_actions dictionary to the class.""" # Find all actions actions = {} # start with wsgi actions from base classes for base in bases: actions.update(getattr(base, 'wsgi_actions', {})) for key, value in cls_dict.items(): if not callable(value): continue if getattr(value, 'wsgi_action', None): actions[value.wsgi_action] = key # Add the actions to the class dict cls_dict['wsgi_actions'] = actions return super(ControllerMetaclass, mcs).__new__(mcs, name, bases, cls_dict) class Controller(metaclass=ControllerMetaclass): """Default controller.""" _view_builder_class = None def __init__(self): """Initialize controller with a view builder instance.""" if self._view_builder_class: self._view_builder = self._view_builder_class() else: self._view_builder = None @staticmethod def is_valid_body(body, entity_name): if not (body and entity_name in body): return False def is_dict(d): try: d.get(None) return True except AttributeError: return False return is_dict(body[entity_name]) class Fault(webob.exc.HTTPException): """Wrap webob.exc.HTTPException to provide API friendly response.""" _fault_names = { 400: "badRequest", 401: "unauthorized", 403: "forbidden", 404: "itemNotFound", 405: "badMethod", 409: "conflictingRequest", 413: "overLimit", 415: "badMediaType", 429: "overLimit", 501: "notImplemented", 503: "serviceUnavailable"} def __init__(self, exception): """Create a Fault for the given webob.exc.exception.""" self.wrapped_exc = exception for key, value in list(self.wrapped_exc.headers.items()): self.wrapped_exc.headers[key] = str(value) self.status_int = exception.status_int @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): """Generate a WSGI response based on the exception passed to ctor.""" user_locale = req.best_match_language() # Replace the body with fault details. code = self.wrapped_exc.status_int fault_name = self._fault_names.get(code, "computeFault") explanation = self.wrapped_exc.explanation LOG.debug("Returning %(code)s to user: %(explanation)s", {'code': code, 'explanation': explanation}) explanation = i18n.translate(explanation, user_locale) fault_data = { fault_name: { 'code': code, 'message': explanation}} if code == 413 or code == 429: retry = self.wrapped_exc.headers.get('Retry-After', None) if retry: fault_data[fault_name]['retryAfter'] = retry if not req.api_version_request.is_null(): self.wrapped_exc.headers[API_VERSION_REQUEST_HEADER] = \ 'compute ' + req.api_version_request.get_string() self.wrapped_exc.headers[LEGACY_API_VERSION_REQUEST_HEADER] = \ req.api_version_request.get_string() self.wrapped_exc.headers.add('Vary', API_VERSION_REQUEST_HEADER) self.wrapped_exc.headers.add('Vary', LEGACY_API_VERSION_REQUEST_HEADER) self.wrapped_exc.content_type = 'application/json' self.wrapped_exc.charset = 'UTF-8' self.wrapped_exc.text = JSONDictSerializer().serialize(fault_data) return self.wrapped_exc def __str__(self): return self.wrapped_exc.__str__() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/openstack/wsgi_app.py0000664000175000017500000001217000000000000020451 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """WSGI application initialization for Nova APIs.""" import os import sys from oslo_config import cfg from oslo_db import exception as odbe from oslo_log import log as logging from oslo_reports import guru_meditation_report as gmr from oslo_reports import opts as gmr_opts from oslo_service import _options as service_opts from paste import deploy from nova import config from nova import context from nova import exception from nova import objects from nova.pci import request from nova import service from nova import utils from nova import version CONF = cfg.CONF CONFIG_FILES = ['api-paste.ini', 'nova.conf'] LOG = logging.getLogger(__name__) objects.register_all() def _get_config_files(env=None): if env is None: env = os.environ dirname = env.get('OS_NOVA_CONFIG_DIR', '/etc/nova').strip() files = env.get('OS_NOVA_CONFIG_FILES', '').split(';') if files == ['']: files = CONFIG_FILES return [os.path.join(dirname, config_file) for config_file in files] def _setup_service(host, name): # NOTE(gibi): validate the [pci]alias config early to avoid late failures # at instance creation due to config errors. request.get_alias_from_config() try: utils.raise_if_old_compute() except exception.TooOldComputeService as e: if CONF.workarounds.disable_compute_service_check_for_ffu: LOG.warning(str(e)) else: raise binary = name if name.startswith('nova-') else "nova-%s" % name ctxt = context.get_admin_context() service_ref = objects.Service.get_by_host_and_binary( ctxt, host, binary) if service_ref: service._update_service_ref(service_ref) else: try: service_obj = objects.Service(ctxt) service_obj.host = host service_obj.binary = binary service_obj.topic = None service_obj.report_count = 0 service_obj.create() except (exception.ServiceTopicExists, exception.ServiceBinaryExists): # If we race to create a record with a sibling, don't # fail here. pass def error_application(exc, name): # TODO(cdent): make this something other than a stub def application(environ, start_response): start_response('500 Internal Server Error', [ ('Content-Type', 'text/plain; charset=UTF-8')]) return ['Out of date %s service %s\n' % (name, exc)] return application @utils.run_once('Global data already initialized, not re-initializing.', LOG.info) def init_global_data(conf_files, service_name): # NOTE(melwitt): parse_args initializes logging and calls global rpc.init() # and db_api.configure(). The db_api.configure() call does not initiate any # connection to the database. # NOTE(gibi): sys.argv is set by the wsgi runner e.g. uwsgi sets it based # on the --pyargv parameter of the uwsgi binary config.parse_args(sys.argv, default_config_files=conf_files) logging.setup(CONF, "nova") gmr_opts.set_defaults(CONF) gmr.TextGuruMeditation.setup_autorun( version, conf=CONF, service_name=service_name) # dump conf at debug (log_options option comes from oslo.service) # FIXME(mriedem): This is gross but we don't have a public hook into # oslo.service to register these options, so we are doing it manually for # now; remove this when we have a hook method into oslo.service. CONF.register_opts(service_opts.service_opts) if CONF.log_options: CONF.log_opt_values( logging.getLogger(__name__), logging.DEBUG) @utils.latch_error_on_raise(retryable=(odbe.DBConnectionError,)) def init_application(name): conf_files = _get_config_files() # NOTE(melwitt): The init_application method can be called multiple times # within a single python interpreter instance if any exception is raised # during it (example: DBConnectionError while setting up the service) and # apache/mod_wsgi reloads the init_application script. So, we initialize # global data separately and decorate the method to run only once in a # python interpreter instance. init_global_data(conf_files, name) try: _setup_service(CONF.host, name) except exception.ServiceTooOld as exc: return error_application(exc, name) # This global init is safe because if we got here, we already successfully # set up the service and setting up the profile cannot fail. service.setup_profiler(name, CONF.host) conf = conf_files[0] return deploy.loadapp('config:%s' % conf, name=name) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.3296082 nova-32.0.0/nova/api/validation/0000775000175000017500000000000000000000000016430 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/validation/__init__.py0000664000175000017500000003426100000000000020547 0ustar00zuulzuul00000000000000# Copyright 2013 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Request Body validating middleware. """ import functools import re import typing as ty from oslo_log import log as logging from oslo_serialization import jsonutils import webob from nova.api.openstack import api_version_request from nova.api.openstack import wsgi from nova.api.validation import validators import nova.conf from nova import exception from nova.i18n import _ CONF = nova.conf.CONF LOG = logging.getLogger(__name__) def validated(cls): cls._validated = True return cls class Schemas: """A microversion-aware schema container. Allow definition and retrieval of schemas on a microversion-aware basis. """ def __init__(self) -> None: self._schemas: list[ tuple[ dict[str, object], api_version_request.APIVersionRequest, api_version_request.APIVersionRequest, ] ] = [] def add_schema( self, schema: tuple[dict[str, object]], min_version: ty.Optional[str], max_version: ty.Optional[str], ) -> None: # we'd like to use bisect.insort but that doesn't accept a 'key' arg # until Python 3.10, so we need to sort after insertion instead :( self._schemas.append( ( schema, api_version_request.APIVersionRequest(min_version), api_version_request.APIVersionRequest(max_version), ) ) self._schemas.sort(key=lambda x: (x[1], x[2])) self.validate_schemas() def validate_schemas(self) -> None: """Ensure there are no overlapping schemas.""" prev_max_version: ty.Optional[ api_version_request.APIVersionRequest ] = None for schema, min_version, max_version in self._schemas: if prev_max_version: # it doesn't make sense to have multiple schemas if one of them # is unversioned (i.e. applies to everything) assert not prev_max_version.is_null() assert not min_version.is_null() # there should not be any gaps in schema coverage assert prev_max_version.ver_minor + 1 == min_version.ver_minor prev_max_version = max_version def __call__(self, req: wsgi.Request) -> ty.Optional[dict[str, object]]: ver = req.api_version_request for schema, min_version, max_version in self._schemas: if ver.matches(min_version, max_version): return schema # TODO(stephenfin): This should be an error in a future release return None def _schema_validation_helper( schema, target, min_version, max_version, args, kwargs, *, relax_additional_properties=False, is_body=True, ): """A helper method to execute JSON-Schema Validation. This method checks the request version whether matches the specified max version and min_version. It also takes a care of legacy v2 request. If the version range matches the request, we validate the schema against the target and a failure will result in a ValidationError being raised. :param schema: A dict, the JSON-Schema is used to validate the target. :param target: A dict, the target is validated by the JSON-Schema. :param min_version: A string of two numerals. X.Y indicating the minimum version of the JSON-Schema to validate against. :param max_version: A string of two numerals. X.Y indicating the maximum version of the JSON-Schema to validate against. :param args: Positional arguments which passed into original method. :param kwargs: Keyword arguments which passed into original method. :param relax_additional_properties: Whether to enable soft additionalProperties validation. This is only enabled for request validation. :param is_body: A boolean. Indicating whether the target is HTTP request body or not. :returns: A boolean. `True` if and only if the version range matches the request AND the schema is successfully validated. `False` if the version range does not match the request and no validation is performed. :raises: ValidationError, when the validation fails. """ min_ver = api_version_request.APIVersionRequest(min_version) max_ver = api_version_request.APIVersionRequest(max_version) # The request object is always the second argument. # However numerous unittests pass in the request object # via kwargs instead so we handle that as well. # TODO(cyeoh): cleanup unittests so we don't have to # to do this if 'req' in kwargs: ver = kwargs['req'].api_version_request legacy_v2 = kwargs['req'].is_legacy_v2() else: ver = args[1].api_version_request legacy_v2 = args[1].is_legacy_v2() if legacy_v2: relax_additional_properties = relax_additional_properties and legacy_v2 # NOTE: For v2.0 compatible API, here should work like # client | schema min_version | schema # -----------+--------------------+-------- # legacy_v2 | None | work # legacy_v2 | 2.0 | work # legacy_v2 | 2.1+ | don't if min_version is None or min_version == '2.0': schema_validator = validators._SchemaValidator( schema, relax_additional_properties, is_body ) schema_validator.validate(target) return True elif ver.matches(min_ver, max_ver): # Only validate against the schema if it lies within # the version range specified. Note that if both min # and max are not specified the validator will always # be run. schema_validator = validators._SchemaValidator(schema, False, is_body) schema_validator.validate(target) return True return False # TODO(stephenfin): This decorator should take the five schemas we validate: # request body, request query string, request headers, response body, and # response headers. As things stand, we're going to need five separate # decorators. def schema( request_body_schema: ty.Dict[str, ty.Any], min_version: ty.Optional[str] = None, max_version: ty.Optional[str] = None, ): """Register a schema to validate request body. Registered schema will be used for validating request body just before API method executing. :param dict request_body_schema: a schema to validate request body :param dict response_body_schema: a schema to validate response body :param str min_version: Minimum API microversion that the schema applies to :param str max_version: Maximum API microversion that the schema applies to """ def add_validator(func): @functools.wraps(func) def wrapper(*args, **kwargs): _schema_validation_helper( request_body_schema, kwargs['body'], min_version, max_version, args, kwargs, relax_additional_properties=True, ) return func(*args, **kwargs) if not hasattr(wrapper, 'request_body_schemas'): wrapper.request_body_schemas = Schemas() wrapper.request_body_schemas.add_schema( request_body_schema, min_version, max_version ) return wrapper return add_validator def response_body_schema( response_body_schema: ty.Dict[str, ty.Any], min_version: ty.Optional[str] = None, max_version: ty.Optional[str] = None, ): """Register a schema to validate response body. Registered schema will be used for validating response body just after API method executing. :param dict response_body_schema: a schema to validate response body :param str min_version: Minimum API microversion that the schema applies to :param str max_version: Maximum API microversion that the schema applies to """ def add_validator(func): @functools.wraps(func) def wrapper(*args, **kwargs): response = func(*args, **kwargs) if CONF.api.response_validation == 'ignore': # don't waste our time checking anything if we're ignoring # schema errors return response # NOTE(stephenfin): If our response is an object, we need to # serializer and deserialize to convert e.g. date-time to strings if isinstance(response, wsgi.ResponseObject): serializer = wsgi.JSONDictSerializer() _body = serializer.serialize(response.obj) # TODO(stephenfin): We should replace all instances of this with # wsgi.ResponseObject elif isinstance(response, webob.Response): _body = response.body else: serializer = wsgi.JSONDictSerializer() _body = serializer.serialize(response) if _body == b'': body = None else: body = jsonutils.loads(_body) try: _schema_validation_helper( response_body_schema, body, min_version, max_version, args, kwargs ) except exception.ValidationError: if CONF.api.response_validation == 'warn': LOG.exception('Schema failed to validate') else: raise return response if not hasattr(wrapper, 'response_body_schemas'): wrapper.response_body_schemas = Schemas() wrapper.response_body_schemas.add_schema( response_body_schema, min_version, max_version ) return wrapper return add_validator def _strip_additional_query_parameters(schema, req): """Strip the additional properties from the req.GET. This helper method assumes the JSON-Schema is only described as a dict without nesting. This method should be called after query parameters pass the JSON-Schema validation. It also means this method only can be called after _schema_validation_helper return `True`. """ additional_properties = schema.get('additionalProperties', True) pattern_regexes = [] patterns = schema.get('patternProperties', None) if patterns: for regex in patterns: pattern_regexes.append(re.compile(regex)) if additional_properties: # `req.GET.keys` will return duplicated keys for multiple values # parameters. To get rid of duplicated keys, convert it to set. for param in set(req.GET.keys()): if param not in schema['properties'].keys(): # keys that can match the patternProperties will be # retained and handle latter. if not (list(regex for regex in pattern_regexes if regex.match(param))): del req.GET[param] def query_schema(request_query_schema, min_version=None, max_version=None): """Register a schema to validate request query parameters. Registered schema will be used for validating request query params just before API method executing. :param request_query_schema: A dict, the JSON-Schema for validating the query parameters. :param min_version: A string of two numerals. X.Y indicating the minimum version of the JSON-Schema to validate against. :param max_version: A string of two numerals. X.Y indicating the maximum version of the JSON-Schema against to. """ def add_validator(func): @functools.wraps(func) def wrapper(*args, **kwargs): # The request object is always the second argument. # However numerous unittests pass in the request object # via kwargs instead so we handle that as well. # TODO(cyeoh): cleanup unittests so we don't have to # to do this if 'req' in kwargs: req = kwargs['req'] else: req = args[1] # NOTE(Kevin_Zheng): The webob package throws UnicodeError when # param cannot be decoded. Catch this and raise HTTP 400. try: query_dict = req.GET.dict_of_lists() except UnicodeDecodeError: msg = _('Query string is not UTF-8 encoded') raise exception.ValidationError(msg) if _schema_validation_helper( request_query_schema, query_dict, min_version, max_version, args, kwargs, relax_additional_properties=True, is_body=False, ): # NOTE(alex_xu): The additional query parameters were stripped # out when `additionalProperties=True`. This is for backward # compatible with v2.1 API and legacy v2 API. But it makes the # system more safe for no more unexpected parameters pass down # to the system. In microversion 2.75, we have blocked all of # those additional parameters. _strip_additional_query_parameters(request_query_schema, req) return func(*args, **kwargs) if not hasattr(wrapper, 'request_query_schemas'): wrapper.request_query_schemas = Schemas() wrapper.request_query_schemas.add_schema( request_query_schema, min_version, max_version ) return wrapper return add_validator ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.3336084 nova-32.0.0/nova/api/validation/extra_specs/0000775000175000017500000000000000000000000020750 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/validation/extra_specs/__init__.py0000664000175000017500000000000000000000000023047 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/validation/extra_specs/accel.py0000664000175000017500000000217500000000000022376 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Validators for ``accel`` namespaced extra specs.""" from nova.api.validation.extra_specs import base EXTRA_SPEC_VALIDATORS = [ base.ExtraSpecValidator( name='accel:device_profile', description=( 'The name of a device profile to configure for the instance. ' 'A device profile may be viewed as a "flavor for devices".' ), value={ 'type': str, 'description': 'A name of a device profile.', }, ), ] def register(): return EXTRA_SPEC_VALIDATORS ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/validation/extra_specs/aggregate_instance_extra_specs.py0000664000175000017500000000464000000000000027540 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Validators for (preferably) ``aggregate_instance_extra_specs`` namespaced extra specs. These are used by the ``AggregateInstanceExtraSpecsFilter`` scheduler filter. Note that we explicitly do not support the unnamespaced variant of extra specs since these have been deprecated since Havana (commit fbedf60a432). Users that insist on using these can disable extra spec validation. """ from nova.api.validation.extra_specs import base DESCRIPTION = """\ Specify metadata that must be present on the aggregate of a host. If this metadata is not present, the host will be rejected. Requires the ``AggregateInstanceExtraSpecsFilter`` scheduler filter. The value can be one of the following: * ``=`` (equal to or greater than as a number; same as vcpus case) * ``==`` (equal to as a number) * ``!=`` (not equal to as a number) * ``>=`` (greater than or equal to as a number) * ``<=`` (less than or equal to as a number) * ``s==`` (equal to as a string) * ``s!=`` (not equal to as a string) * ``s>=`` (greater than or equal to as a string) * ``s>`` (greater than as a string) * ``s<=`` (less than or equal to as a string) * ``s<`` (less than as a string) * ```` (substring) * ```` (all elements contained in collection) * ```` (find one of these) * A specific value, e.g. ``true``, ``123``, ``testing`` """ EXTRA_SPEC_VALIDATORS = [ base.ExtraSpecValidator( name='aggregate_instance_extra_specs:{key}', description=DESCRIPTION, parameters=[ { 'name': 'key', 'description': 'The metadata key to match on', 'pattern': r'.+', }, ], value={ # this is totally arbitrary, since we need to support specific # values 'type': str, }, ), ] def register(): return EXTRA_SPEC_VALIDATORS ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/validation/extra_specs/base.py0000664000175000017500000001014000000000000022230 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import dataclasses import re import typing as ty from oslo_utils import strutils from nova import exception @dataclasses.dataclass class ExtraSpecValidator: name: str description: str value: ty.Dict[str, ty.Any] deprecated: bool = False parameters: ty.List[ty.Dict[str, ty.Any]] = dataclasses.field( default_factory=list ) name_regex: str = None value_regex: str = None def __post_init__(self): # generate a regex for the name name_regex = self.name # replace the human-readable patterns with named regex groups; this # will transform e.g. 'hw:numa_cpus.{id}' to 'hw:numa_cpus.(?P\d+)' for param in self.parameters: pattern = f'(?P<{param["name"]}>{param["pattern"]})' name_regex = name_regex.replace(f'{{{param["name"]}}}', pattern) self.name_regex = name_regex # ...and do the same for the value, but only if we're using strings if self.value['type'] not in (int, str, bool): raise ValueError( f"Unsupported parameter type '{self.value['type']}'" ) value_regex = None if self.value['type'] == str and self.value.get('pattern'): value_regex = self.value['pattern'] self.value_regex = value_regex def _validate_str(self, value): if 'pattern' in self.value: value_match = re.fullmatch(self.value_regex, value) if not value_match: raise exception.ValidationError( f"Validation failed; '{value}' is not of the format " f"'{self.value_regex}'." ) elif 'enum' in self.value: if value not in self.value['enum']: values = ', '.join(str(x) for x in self.value['enum']) raise exception.ValidationError( f"Validation failed; '{value}' is not one of: {values}." ) def _validate_int(self, value): try: value = int(value) except ValueError: raise exception.ValidationError( f"Validation failed; '{value}' is not a valid integer value." ) if 'max' in self.value and self.value['max'] < value: raise exception.ValidationError( f"Validation failed; '{value}' is greater than the max value " f"of '{self.value['max']}'." ) if 'min' in self.value and self.value['min'] > value: raise exception.ValidationError( f"Validation failed; '{value}' is less than the min value " f"of '{self.value['min']}'." ) def _validate_bool(self, value): try: strutils.bool_from_string(value, strict=True) except ValueError: raise exception.ValidationError( f"Validation failed; '{value}' is not a valid boolean-like " f"value." ) def validate(self, name, value): name_match = re.fullmatch(self.name_regex, name) if not name_match: # NOTE(stephenfin): This is mainly here for testing purposes raise exception.ValidationError( f"Validation failed; expected a name of format '{self.name}' " f"but got '{name}'." ) if self.value['type'] == int: self._validate_int(value) elif self.value['type'] == bool: self._validate_bool(value) else: # str self._validate_str(value) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/validation/extra_specs/capabilities.py0000664000175000017500000000772300000000000023764 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Validators for (preferably) ``capabilities`` namespaced extra specs. These are used by the ``ComputeCapabilitiesFilter`` scheduler filter. Note that we explicitly do not allow the unnamespaced variant of extra specs since this has been deprecated since Grizzly (commit 8ce8e4b6c0d). Users that insist on using these can disable extra spec validation. For all extra specs, the value can be one of the following: * ``=`` (equal to or greater than as a number; same as vcpus case) * ``==`` (equal to as a number) * ``!=`` (not equal to as a number) * ``>=`` (greater than or equal to as a number) * ``<=`` (less than or equal to as a number) * ``s==`` (equal to as a string) * ``s!=`` (not equal to as a string) * ``s>=`` (greater than or equal to as a string) * ``s>`` (greater than as a string) * ``s<=`` (less than or equal to as a string) * ``s<`` (less than as a string) * ```` (substring) * ```` (all elements contained in collection) * ```` (find one of these) * A specific value, e.g. ``true``, ``123``, ``testing`` Examples are: ``>= 5``, ``s== 2.1.0``, `` gcc``, `` aes mmx``, and `` fpu gpu`` """ from nova.api.validation.extra_specs import base DESCRIPTION = """\ Specify that the '{capability}' capability provided by the host compute service satisfy the provided filter value. Requires the ``ComputeCapabilitiesFilter`` scheduler filter. """ EXTRA_SPEC_VALIDATORS = [] # non-nested capabilities (from 'nova.objects.compute_node.ComputeNode' and # nova.scheduler.host_manager.HostState') for capability in ( 'id', 'uuid', 'service_id', 'host', 'vcpus', 'memory_mb', 'local_gb', 'vcpus_used', 'memory_mb_used', 'local_gb_used', 'hypervisor_type', 'hypervisor_version', 'hypervisor_hostname', 'free_ram_mb', 'free_disk_gb', 'current_workload', 'running_vms', 'disk_available_least', 'host_ip', 'mapped', 'cpu_allocation_ratio', 'ram_allocation_ratio', 'disk_allocation_ratio', ) + ( 'total_usable_ram_mb', 'total_usable_disk_gb', 'disk_mb_used', 'free_disk_mb', 'vcpus_total', 'vcpus_used', 'num_instances', 'num_io_ops', 'failed_builds', 'aggregates', 'cell_uuid', 'updated', ): EXTRA_SPEC_VALIDATORS.append( base.ExtraSpecValidator( name=f'capabilities:{capability}', description=DESCRIPTION.format(capability=capability), value={ # this is totally arbitrary, since we need to support specific # values 'type': str, }, ), ) # nested capabilities (from 'nova.objects.compute_node.ComputeNode' and # nova.scheduler.host_manager.HostState') for capability in ( 'cpu_info', 'metrics', 'stats', 'numa_topology', 'supported_hv_specs', 'pci_device_pools', ) + ( 'nodename', 'pci_stats', 'supported_instances', 'limits', 'instances', ): EXTRA_SPEC_VALIDATORS.extend([ base.ExtraSpecValidator( name=f'capabilities:{capability}{{filter}}', description=DESCRIPTION.format(capability=capability), parameters=[ { 'name': 'filter', # this is optional, but if it's present it must be preceded # by ':' 'pattern': r'(:\w+)*', } ], value={ 'type': str, }, ), ]) def register(): return EXTRA_SPEC_VALIDATORS ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/validation/extra_specs/hw.py0000664000175000017500000005234200000000000021746 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Validators for ``hw`` namespaced extra specs.""" from nova.api.validation.extra_specs import base from nova.objects import fields realtime_validators = [ base.ExtraSpecValidator( name='hw:cpu_realtime', description=( 'Determine whether realtime mode should be enabled for the ' 'instance or not. ' 'Only supported by the libvirt virt driver.' ), value={ 'type': bool, 'description': 'Whether to enable realtime priority.', }, ), base.ExtraSpecValidator( name='hw:cpu_realtime_mask', description=( 'A exclusion mask of CPUs that should not be enabled for ' 'realtime. ' 'Only supported by the libvirt virt driver.' ), value={ 'type': str, 'pattern': r'(\^)?\d+((-\d+)?(,\^?\d+(-\d+)?)?)*', }, ), ] hide_hypervisor_id_validator = [ base.ExtraSpecValidator( name='hw:hide_hypervisor_id', description=( 'Determine whether the hypervisor ID should be hidden from the ' 'guest. ' 'Only supported by the libvirt virt driver.' ), value={ 'type': bool, 'description': 'Whether to hide the hypervisor ID.', }, ) ] cpu_policy_validators = [ base.ExtraSpecValidator( name='hw:cpu_policy', description=( 'The policy to apply when determining what host CPUs the guest ' 'CPUs can run on. ' 'If ``shared`` (default), guest CPUs can be overallocated but ' 'cannot float across host cores. ' 'If ``dedicated``, guest CPUs cannot be overallocated but are ' 'individually pinned to their own host core. ' 'If ``mixed``, the policy for each instance CPU can be specified ' 'using the ``hw:cpu_dedicated_mask`` or ``hw:cpu_realtime_mask`` ' 'extra specs. ' 'Only supported by the libvirt virt driver.' ), value={ 'type': str, 'description': 'The CPU policy.', 'enum': [ 'dedicated', 'shared', 'mixed', ], }, ), base.ExtraSpecValidator( name='hw:cpu_thread_policy', description=( 'The policy to apply when determining whether the destination ' 'host can have hardware threads enabled or not. ' 'If ``prefer`` (default), hosts with hardware threads will be ' 'preferred. ' 'If ``require``, hosts with hardware threads will be required. ' 'If ``isolate``, hosts with hardware threads will be forbidden. ' 'Only supported by the libvirt virt driver.' ), value={ 'type': str, 'description': 'The CPU thread policy.', 'enum': [ 'prefer', 'isolate', 'require', ], }, ), base.ExtraSpecValidator( name='hw:emulator_threads_policy', description=( 'The policy to apply when determining whether emulator threads ' 'should be offloaded to a separate isolated core or to a pool ' 'of shared cores. ' 'If ``share``, emulator overhead threads will be offloaded to a ' 'pool of shared cores. ' 'If ``isolate``, emulator overhead threads will be offloaded to ' 'their own core. ' 'Only supported by the libvirt virt driver.' ), value={ 'type': str, 'description': 'The emulator thread policy.', 'enum': [ 'isolate', 'share', ], }, ), base.ExtraSpecValidator( name='hw:cpu_dedicated_mask', description=( 'A mapping of **guest** (instance) CPUs to be pinned to **host** ' 'CPUs for an instance with a ``mixed`` CPU policy. ' 'Any **guest** CPUs which are not in this mapping will float ' 'across host cores. ' 'Only supported by the libvirt virt driver.' ), value={ 'type': str, 'description': ( 'The **guest** CPU mapping to be pinned to **host** CPUs for ' 'an instance with a ``mixed`` CPU policy.' ), # This pattern is identical to 'hw:cpu_realtime_mask' pattern. 'pattern': r'\^?\d+((-\d+)?(,\^?\d+(-\d+)?)?)*', }, ), ] hugepage_validators = [ base.ExtraSpecValidator( name='hw:mem_page_size', description=( 'The size of memory pages to allocate to the guest with. ' 'Can be one of the three alias - ``large``, ``small`` or ' '``any``, - or an actual size. ' 'Only supported by the libvirt virt driver.' ), value={ 'type': str, 'description': 'The size of memory page to allocate', 'pattern': r'(large|small|any|\d+([kKMGT]i?)?(b|bit|B)?)', }, ), base.ExtraSpecValidator( name='hw:locked_memory', description=( 'Determine if **guest** (instance) memory should be locked ' 'preventing swapping. This is required in rare cases for device ' 'DMA transfers. Only supported by the libvirt virt driver.' ), value={ 'type': bool, 'description': 'Whether to lock **guest** (instance) memory.', }, ), ] numa_validators = [ base.ExtraSpecValidator( name='hw:numa_nodes', description=( 'The number of virtual NUMA nodes to allocate to configure the ' 'guest with. ' 'Each virtual NUMA node will be mapped to a unique host NUMA ' 'node. ' 'Only supported by the libvirt virt driver.' ), value={ 'type': int, 'description': 'The number of virtual NUMA nodes to allocate', 'min': 1, }, ), base.ExtraSpecValidator( name='hw:numa_cpus.{num}', description=( 'A mapping of **guest** (instance) CPUs to the **guest** (not ' 'host!) NUMA node identified by ``{num}``. ' 'This can be used to provide asymmetric CPU-NUMA allocation and ' 'is necessary where the number of guest NUMA nodes is not a ' 'factor of the number of guest CPUs. ' 'Only supported by the libvirt virt driver.' ), parameters=[ { 'name': 'num', 'pattern': r'\d+', # positive integers 'description': 'The ID of the **guest** NUMA node.', }, ], value={ 'type': str, 'description': ( 'The guest CPUs, in the form of a CPU map, to allocate to the ' 'guest NUMA node identified by ``{num}``.' ), 'pattern': r'\^?\d+((-\d+)?(,\^?\d+(-\d+)?)?)*', }, ), base.ExtraSpecValidator( name='hw:numa_mem.{num}', description=( 'A mapping of **guest** memory to the **guest** (not host!) NUMA ' 'node identified by ``{num}``. ' 'This can be used to provide asymmetric memory-NUMA allocation ' 'and is necessary where the number of guest NUMA nodes is not a ' 'factor of the total guest memory. ' 'Only supported by the libvirt virt driver.' ), parameters=[ { 'name': 'num', 'pattern': r'\d+', # positive integers 'description': 'The ID of the **guest** NUMA node.', }, ], value={ 'type': int, 'description': ( 'The guest memory, in MB, to allocate to the guest NUMA node ' 'identified by ``{num}``.' ), 'min': 1, }, ), base.ExtraSpecValidator( name='hw:pci_numa_affinity_policy', description=( 'The NUMA affinity policy of any PCI passthrough devices or ' 'SR-IOV network interfaces attached to the instance. ' 'If ``required``, only PCI devices from one of the host NUMA ' 'nodes the instance VCPUs are allocated from can be used by said ' 'instance. ' 'If ``preferred``, any PCI device can be used, though preference ' 'will be given to those from the same NUMA node as the instance ' 'VCPUs. ' 'If ``legacy`` (default), behavior is as with ``required`` unless ' 'the PCI device does not support provide NUMA affinity ' 'information, in which case affinity is ignored. ' 'Only supported by the libvirt virt driver.' ), value={ 'type': str, 'description': 'The PCI NUMA affinity policy', 'enum': [ 'required', 'preferred', 'legacy', 'socket', ], }, ), ] cpu_topology_validators = [ base.ExtraSpecValidator( name='hw:cpu_sockets', description=( 'The number of virtual CPU sockets to emulate in the guest ' 'CPU topology. ' 'Defaults to the number of vCPUs requested. ' 'Only supported by the libvirt virt driver.' ), value={ 'type': int, 'description': 'A number of virtual CPU sockets', 'min': 1, }, ), base.ExtraSpecValidator( name='hw:cpu_cores', description=( 'The number of virtual CPU cores to emulate per socket in the ' 'guest CPU topology. ' 'Defaults to ``1``. ' 'Only supported by the libvirt virt driver. ' ), value={ 'type': int, 'description': 'A number of virtual CPU cores', 'min': 1, }, ), base.ExtraSpecValidator( name='hw:cpu_threads', description=( 'The number of virtual CPU threads to emulate per core in the ' 'guest CPU topology. ' 'Defaults to ``1``. ' 'Only supported by the libvirt virt driver. ' ), value={ 'type': int, 'description': 'A number of virtual CPU threads', 'min': 1, }, ), base.ExtraSpecValidator( name='hw:cpu_max_sockets', description=( 'The max number of virtual CPU sockets to emulate in the ' 'guest CPU topology. ' 'This is used to limit the topologies that can be requested by ' 'an image and will be used to validate the ``hw_cpu_sockets`` ' 'image metadata property. ' 'Only supported by the libvirt virt driver. ' ), value={ 'type': int, 'description': 'A number of virtual CPU sockets', 'min': 1, }, ), base.ExtraSpecValidator( name='hw:cpu_max_cores', description=( 'The max number of virtual CPU cores to emulate per socket in the ' 'guest CPU topology. ' 'This is used to limit the topologies that can be requested by an ' 'image and will be used to validate the ``hw_cpu_cores`` image ' 'metadata property. ' 'Only supported by the libvirt virt driver. ' ), value={ 'type': int, 'description': 'A number of virtual CPU cores', 'min': 1, }, ), base.ExtraSpecValidator( name='hw:cpu_max_threads', description=( 'The max number of virtual CPU threads to emulate per core in the ' 'guest CPU topology. ' 'This is used to limit the topologies that can be requested by an ' 'image and will be used to validate the ``hw_cpu_threads`` image ' 'metadata property. ' 'Only supported by the libvirt virt driver. ' ), value={ 'type': int, 'description': 'A number of virtual CPU threads', 'min': 1, }, ), ] feature_flag_validators = [ # TODO(stephenfin): Consider deprecating and moving this to the 'os:' # namespace base.ExtraSpecValidator( name='hw:boot_menu', description=( 'Whether to show a boot menu when booting the guest. ' 'Only supported by the libvirt virt driver. ' ), value={ 'type': bool, 'description': 'Whether to enable the boot menu', }, ), base.ExtraSpecValidator( name='hw:vif_multiqueue_enabled', description=( 'Whether to enable the virtio-net multiqueue feature. ' 'When set, the driver sets the number of queues equal to the ' 'number of guest vCPUs. This makes the network performance scale ' 'across a number of vCPUs. This requires guest support and is ' 'only supported by the libvirt driver.' ), value={ 'type': bool, 'description': 'Whether to enable multiqueue', }, ), base.ExtraSpecValidator( name='hw:mem_encryption', description=( 'Whether to enable memory encryption for the guest. ' 'Only supported by the libvirt virt driver on hosts with AMD SEV ' 'support.' ), value={ 'type': bool, 'description': 'Whether to enable memory encryption', }, ), base.ExtraSpecValidator( name='hw:mem_encryption_model', description=( 'CPU feature used for memory encryption of the guest. ' 'This has no effect unless hw:mem_encryption (or equivalent ' 'image property) is set to True.' ), value={ 'type': str, 'description': 'A CPU feature used for memory encryption', 'enum': [ 'amd-sev', 'amd-sev-es', ], }, ), base.ExtraSpecValidator( name='hw:pmem', description=( 'A comma-separated list of ``$LABEL``\\ s defined in config for ' 'vPMEM devices. ' 'Only supported by the libvirt virt driver on hosts with PMEM ' 'devices.' ), value={ 'type': str, 'description': ( 'A comma-separated list of valid resource class names.' ), 'pattern': '([a-zA-Z0-9_]+(,)?)+', }, ), base.ExtraSpecValidator( name='hw:pmu', description=( 'Whether to enable the Performance Monitory Unit (PMU) for the ' 'guest. ' 'If this option is not specified, the presence of the vPMU is ' 'determined by the hypervisor. ' 'The vPMU is used by tools like ``perf`` in the guest to provide ' 'more accurate information for profiling application and ' 'monitoring guest performance. ' 'For realtime workloads, the emulation of a vPMU can introduce ' 'additional latency which may be undesirable. ' 'If the telemetry it provides is not required, such workloads ' 'should disable this feature. ' 'For most workloads, the default of unset will be correct. ' 'Only supported by the libvirt virt driver.' ), value={ 'type': bool, 'description': 'Whether to enable the PMU', }, ), base.ExtraSpecValidator( name='hw:serial_port_count', description=( 'The number of serial ports to allocate to the guest. ' 'Only supported by the libvirt virt driver.' ), value={ 'type': int, 'min': 0, 'description': 'The number of serial ports to allocate', }, ), base.ExtraSpecValidator( name='hw:tpm_model', description=( 'The model of the attached TPM device. ' 'Only supported by the libvirt virt driver.' ), value={ 'type': str, 'description': 'A TPM model', 'enum': [ 'tpm-tis', 'tpm-crb', ], }, ), base.ExtraSpecValidator( name='hw:tpm_version', description=( "The TPM version. " "Required if requesting a vTPM via the 'hw:tpm_model' extra spec " "or equivalent image metadata property. " "Only supported by the libvirt virt driver." ), value={ 'type': str, 'description': 'A TPM version.', 'enum': [ '1.2', '2.0', ], }, ), base.ExtraSpecValidator( name='hw:watchdog_action', description=( 'The action to take when the watchdog timer is kicked. ' 'Watchdog devices keep an eye on the instance and carry out the ' 'specified action if the server hangs. ' 'The watchdog uses the ``i6300esb`` device, emulating a PCI Intel ' '6300ESB. ' 'Only supported by the libvirt virt driver.' ), value={ 'type': str, 'description': 'The action to take', 'enum': [ 'none', 'pause', 'poweroff', 'reset', 'disabled', ], }, ), base.ExtraSpecValidator( name='hw:viommu_model', description=( 'This can be used to set model for virtual IOMMU device.' ), value={ 'type': str, 'enum': [ 'intel', 'smmuv3', 'virtio', 'auto' ], 'description': 'model for vIOMMU', }, ), base.ExtraSpecValidator( name='hw:virtio_packed_ring', description=( 'Permit guests to negotiate the virtio packed ring format. ' 'This requires guest support and is only supported by ' 'the libvirt driver.' ), value={ 'type': bool, 'description': 'Whether to enable packed virtqueue', }, ), base.ExtraSpecValidator( name='hw:sound_model', description=( 'The model of the attached sound device. ' 'Only supported by the libvirt virt driver. ' 'If unset, no sound device is attached.' ), value={ 'type': str, 'description': 'A sound model', 'enum': fields.SoundModelType.ALL, }, ), base.ExtraSpecValidator( name='hw:usb_model', description=( 'The model of the attached USB controller device. ' 'Only supported by the libvirt virt driver. ' 'If unset, no USB controller device is attached.' ), value={ 'type': str, 'description': 'A USB controller model', 'enum': fields.USBControllerModelType.ALL, }, ), base.ExtraSpecValidator( name='hw:redirected_usb_ports', description=( 'The number of redirected USB ports to add to the virtual ' 'machine. Only supported by the libvirt virt driver. If unset, ' 'no redirected USB ports are added. The maximum value is 15.' ), value={ 'type': int, 'description': 'The number of USB redirection devices to add', 'min': 0, 'max': 15 }, ), ] ephemeral_encryption_validators = [ base.ExtraSpecValidator( name='hw:ephemeral_encryption', description=( 'Whether to enable ephemeral storage encryption.' ), value={ 'type': bool, 'description': 'Whether to enable ephemeral storage encryption.', }, ), base.ExtraSpecValidator( name='hw:ephemeral_encryption_format', description=( 'The encryption format to be used if ephemeral storage ' 'encryption is enabled via hw:ephemeral_encryption.' ), value={ 'type': str, 'description': 'The encryption format to be used if enabled.', 'enum': fields.BlockDeviceEncryptionFormatType.ALL, }, ), ] def register(): return ( realtime_validators + hide_hypervisor_id_validator + cpu_policy_validators + hugepage_validators + numa_validators + cpu_topology_validators + feature_flag_validators + ephemeral_encryption_validators ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/validation/extra_specs/hw_rng.py0000664000175000017500000000336700000000000022617 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Validators for ``hw_rng`` namespaced extra specs.""" from nova.api.validation.extra_specs import base # TODO(stephenfin): Move these to the 'hw:' namespace EXTRA_SPEC_VALIDATORS = [ base.ExtraSpecValidator( name='hw_rng:allowed', description=( 'Whether to disable configuration of a random number generator ' 'in their image. Before 21.0.0 (Ussuri), random number generators ' 'were not enabled by default so this was used to determine ' 'whether to **enable** configuration.' ), value={ 'type': bool, }, ), base.ExtraSpecValidator( name='hw_rng:rate_bytes', description=( 'The allowed amount of bytes for the guest to read from the ' 'host\'s entropy per period.' ), value={ 'type': int, 'min': 0, }, ), base.ExtraSpecValidator( name='hw_rng:rate_period', description='The duration of a read period in milliseconds.', value={ 'type': int, 'min': 0, }, ), ] def register(): return EXTRA_SPEC_VALIDATORS ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/validation/extra_specs/hw_video.py0000664000175000017500000000237300000000000023133 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Validators for ``hw_video`` namespaced extra specs.""" from nova.api.validation.extra_specs import base # TODO(stephenfin): Move these to the 'hw:' namespace EXTRA_SPEC_VALIDATORS = [ base.ExtraSpecValidator( name='hw_video:ram_max_mb', description=( 'The maximum amount of memory the user can request using the ' '``hw_video_ram`` image metadata property, which represents the ' 'video memory that the guest OS will see. This has no effect for ' 'vGPUs.' ), value={ 'type': int, 'min': 0, }, ), ] def register(): return EXTRA_SPEC_VALIDATORS ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/validation/extra_specs/null.py0000664000175000017500000000335700000000000022304 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Validators for non-namespaced extra specs.""" from nova.api.validation.extra_specs import base EXTRA_SPEC_VALIDATORS = [ base.ExtraSpecValidator( name='hide_hypervisor_id', description=( 'Determine whether the hypervisor ID should be hidden from the ' 'guest. Only supported by the libvirt virt driver. ' 'This extra spec is not compatible with the ' 'AggregateInstanceExtraSpecsFilter scheduler filter. ' 'The ``hw:hide_hypervisor_id`` extra spec should be used instead.' ), value={ 'type': bool, 'description': 'Whether to hide the hypervisor ID.', }, deprecated=True, ), # TODO(stephenfin): This should be moved to a namespace base.ExtraSpecValidator( name='group_policy', description=( 'The group policy to apply when using the granular resource ' 'request syntax.' ), value={ 'type': str, 'enum': [ 'isolate', 'none', ], }, ), ] def register(): return EXTRA_SPEC_VALIDATORS ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/validation/extra_specs/os.py0000664000175000017500000000243200000000000021744 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Validators for ``os`` namespaced extra specs.""" from nova.api.validation.extra_specs import base # TODO(stephenfin): Most of these belong in the 'hw:' namespace # and should be moved. EXTRA_SPEC_VALIDATORS = [ base.ExtraSpecValidator( name='os:secure_boot', description=( 'Determine whether secure boot is enabled or not. Only supported ' 'by the libvirt virt drivers.' ), value={ 'type': str, 'description': 'Whether secure boot is required or not', 'enum': [ 'disabled', 'required', ], }, ), ] def register(): return EXTRA_SPEC_VALIDATORS ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/validation/extra_specs/pci_passthrough.py0000664000175000017500000000305400000000000024526 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Validators for ``pci_passthrough`` namespaced extra specs.""" from nova.api.validation.extra_specs import base EXTRA_SPEC_VALIDATORS = [ base.ExtraSpecValidator( name='pci_passthrough:alias', description=( 'Specify the number of ``$alias`` PCI device(s) to attach to the ' 'instance. ' 'Must be of format ``$alias:$count``, where ``$alias`` ' 'corresponds to a particular PCI device class (as configured in ' '``nova.conf``) and ``$count`` is the amount of PCI devices of ' 'type ``$alias`` to be assigned to the instance. ' 'Use commas to specify multiple values. ' 'Only supported by the libvirt virt driver.' ), value={ 'type': str, # one or more comma-separated '$alias:$count' values 'pattern': r'[^:]+:\d+(?:\s*,\s*[^:]+:\d+)*', }, ), ] def register(): return EXTRA_SPEC_VALIDATORS ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/validation/extra_specs/quota.py0000664000175000017500000001532500000000000022461 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Validators for ``quota`` namespaced extra specs.""" from nova.api.validation.extra_specs import base EXTRA_SPEC_VALIDATORS = [] # CPU, memory, disk IO and VIF quotas (VMWare) for key, name, unit in ( ('cpu', 'CPU', 'MHz'), ('memory', 'memory', 'MB'), ('disk_io', 'disk IO', 'I/O per second'), ('vif', 'virtual interface', 'Mbps'), ): EXTRA_SPEC_VALIDATORS.extend( [ base.ExtraSpecValidator( name=f'quota:{key}_limit', description=( f'The upper limit for {name} allocation in {unit}. ' f'The utilization of an instance will not exceed this ' f'limit, even if there are available resources. ' f'This is typically used to ensure a consistent ' f'performance of instances independent of available ' f'resources. ' f'The value ``0`` indicates that {name} usage is not ' f'limited. ' f'Only supported by the VMWare virt driver.' ), value={ 'type': int, 'min': 0, }, ), base.ExtraSpecValidator( name=f'quota:{key}_reservation', description=( f'The guaranteed minimum {name} reservation in {unit}. ' f'This means the specified amount of {name} that will ' f'be guaranteed for the instance. ' f'Only supported by the VMWare virt driver.' ), value={ 'type': int, }, ), base.ExtraSpecValidator( name=f'quota:{key}_shares_level', description=( f"The allocation level for {name}. If you choose " f"'custom', set the number of {name} shares using " f"'quota:{key}_shares_share'. " f"Only supported by the VMWare virt driver." ), value={ 'type': str, 'enum': ['custom', 'high', 'normal', 'low'], }, ), base.ExtraSpecValidator( name=f'quota:{key}_shares_share', description=( f"The number of shares of {name} allocated in the " f"event that 'quota:{key}_shares_level=custom' is " f"used. " f"Ignored otherwise. " f"There is no unit for this value: it is a relative " f"measure based on the settings for other instances. " f"Only supported by the VMWare virt driver." ), value={ 'type': int, 'min': 0, }, ), ] ) # CPU quotas (libvirt) EXTRA_SPEC_VALIDATORS.extend( [ base.ExtraSpecValidator( name='quota:cpu_shares', description=( 'The proportional weighted share for the domain. ' 'If this element is omitted, the service defaults to the OS ' 'provided defaults. ' 'There is no unit for the value; it is a relative measure ' 'based on the setting of other VMs. ' 'For example, a VM configured with a value of 2048 gets ' 'twice as much CPU time as a VM configured with value 1024. ' 'Only supported by the libvirt virt driver.' ), value={ 'type': int, 'min': 0, }, ), base.ExtraSpecValidator( name='quota:cpu_period', description=( 'Specifies the enforcement interval in microseconds. ' 'Within a period, each VCPU of the instance is not allowed ' 'to consume more than the quota worth of runtime. ' 'The value should be in range 1,000 - 1,000,000. ' 'A period with a value of 0 means no value. ' 'Only supported by the libvirt virt driver.' ), value={ 'type': int, 'min': 0, }, ), base.ExtraSpecValidator( name='quota:cpu_quota', description=( "The maximum allowed bandwidth in microseconds. " "Can be combined with 'quota:cpu_period' to limit an instance " "to a percentage of capacity of a physical CPU. " "The value should be in range 1,000 - 2^64 or negative. " "A negative value indicates that the instance has infinite " "bandwidth. " "Only supported by the libvirt virt driver." ), value={ 'type': int, }, ), ] ) # Disk quotas (libvirt) for stat in ('read', 'write', 'total'): for metric in ('bytes', 'iops'): EXTRA_SPEC_VALIDATORS.append( base.ExtraSpecValidator( name=f'quota:disk_{stat}_{metric}_sec', description=( f'The quota {stat} {metric} for disk. ' f'Only supported by the libvirt virt driver.' ), value={ 'type': int, 'min': 0, }, ) ) # VIF quotas (libvirt) # TODO(stephenfin): Determine whether this should be deprecated now that # nova-network is dead for stat in ('inbound', 'outbound'): for metric in ('average', 'peak', 'burst'): EXTRA_SPEC_VALIDATORS.append( base.ExtraSpecValidator( name=f'quota:vif_{stat}_{metric}', description=( f'The quota {stat} {metric} for VIF. Only supported ' f'by the libvirt virt driver.' ), value={ 'type': int, 'min': 0, }, ) ) def register(): return EXTRA_SPEC_VALIDATORS ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/validation/extra_specs/resources.py0000664000175000017500000000347300000000000023343 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Validators for ``resources`` namespaced extra specs.""" import os_resource_classes from nova.api.validation.extra_specs import base EXTRA_SPEC_VALIDATORS = [] for resource_class in os_resource_classes.STANDARDS: EXTRA_SPEC_VALIDATORS.append( base.ExtraSpecValidator( name=f'resources{{group}}:{resource_class}', description=f'The amount of resource {resource_class} requested.', value={ 'type': int, }, parameters=[ { 'name': 'group', 'pattern': r'([a-zA-Z0-9_-]{1,64})?', }, ], ) ) EXTRA_SPEC_VALIDATORS.append( base.ExtraSpecValidator( name='resources{group}:CUSTOM_{resource}', description=( 'The amount of resource CUSTOM_{resource} requested.' ), value={ 'type': int, }, parameters=[ { 'name': 'group', 'pattern': r'([a-zA-Z0-9_-]{1,64})?', }, { 'name': 'resource', 'pattern': r'[A-Z0-9_]+', }, ], ) ) def register(): return EXTRA_SPEC_VALIDATORS ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/validation/extra_specs/traits.py0000664000175000017500000000366500000000000022642 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Validators for ``traits`` namespaced extra specs.""" import os_traits from nova.api.validation.extra_specs import base EXTRA_SPEC_VALIDATORS = [] for trait in os_traits.get_traits(): EXTRA_SPEC_VALIDATORS.append( base.ExtraSpecValidator( name=f'trait{{group}}:{trait}', description=f'Require or forbid trait {trait}.', value={ 'type': str, 'enum': [ 'required', 'forbidden', ], }, parameters=[ { 'name': 'group', 'pattern': r'([a-zA-Z0-9_-]{1,64})?', }, ], ) ) EXTRA_SPEC_VALIDATORS.append( base.ExtraSpecValidator( name='trait{group}:CUSTOM_{trait}', description=( 'Require or forbid trait CUSTOM_{trait}.' ), value={ 'type': str, 'enum': [ 'required', 'forbidden', ], }, parameters=[ { 'name': 'group', 'pattern': r'([a-zA-Z0-9_-]{1,64})?', }, { 'name': 'trait', 'pattern': r'[A-Z0-9_]+', }, ], ) ) def register(): return EXTRA_SPEC_VALIDATORS ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/validation/extra_specs/validators.py0000664000175000017500000000516700000000000023503 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Validators for all extra specs known by nova.""" import re import typing as ty from oslo_log import log as logging from stevedore import extension from nova.api.validation.extra_specs import base from nova import exception LOG = logging.getLogger(__name__) VALIDATORS: ty.Dict[str, base.ExtraSpecValidator] = {} NAMESPACES: ty.Set[str] = set() def validate(name: str, value: str): """Validate a given extra spec. :param name: Extra spec name. :param value: Extra spec value. :raises: exception.ValidationError if validation fails. """ # attempt a basic lookup for extra specs without embedded parameters if name in VALIDATORS: VALIDATORS[name].validate(name, value) return # if that failed, fallback to a linear search through the registry for validator in VALIDATORS.values(): if re.fullmatch(validator.name_regex, name): validator.validate(name, value) return # check if there's a namespace; if not, we've done all we can do if ':' not in name: # no namespace return # if there is, check if it's one we recognize for namespace in NAMESPACES: if re.fullmatch(namespace, name.split(':', 1)[0]): break else: return raise exception.ValidationError( f"Validation failed; extra spec '{name}' does not appear to be a " f"valid extra spec." ) def load_validators(): global VALIDATORS def _report_load_failure(mgr, ep, err): LOG.warning(u'Failed to load %s: %s', ep.module_name, err) mgr = extension.ExtensionManager( 'nova.api.extra_spec_validators', on_load_failure_callback=_report_load_failure, invoke_on_load=False, ) for ext in mgr: # TODO(stephenfin): Make 'register' return a dict rather than a list? for validator in ext.plugin.register(): VALIDATORS[validator.name] = validator if ':' in validator.name_regex: NAMESPACES.add(validator.name_regex.split(':', 1)[0]) load_validators() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/validation/extra_specs/vmware.py0000664000175000017500000000274500000000000022633 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Validators for ``vmware`` namespaced extra specs.""" from nova.api.validation.extra_specs import base EXTRA_SPEC_VALIDATORS = [ base.ExtraSpecValidator( name='vmware:hw_version', description=( 'Specify the hardware version used to create images. In an ' 'environment with different host versions, you can use this ' 'parameter to place instances on the correct hosts.' ), value={ 'type': str, }, ), base.ExtraSpecValidator( name='vmware:storage_policy', description=( 'Specify the storage policy used for new instances.' '\n' 'If Storage Policy-Based Management (SPBM) is not enabled, this ' 'parameter is ignored.' ), value={ 'type': str, }, ), ] def register(): return EXTRA_SPEC_VALIDATORS ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/validation/parameter_types.py0000664000175000017500000003364000000000000022214 0ustar00zuulzuul00000000000000# Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Common parameter types for validating request Body. """ import copy import functools import re import unicodedata from nova.i18n import _ from nova.objects import tag _REGEX_RANGE_CACHE = {} def memorize(func): @functools.wraps(func) def memorizer(*args, **kwargs): global _REGEX_RANGE_CACHE key = "%s:%s:%s" % (func.__name__, hash(str(args)), hash(str(kwargs))) value = _REGEX_RANGE_CACHE.get(key) if value is None: value = func(*args, **kwargs) _REGEX_RANGE_CACHE[key] = value return value return memorizer def _reset_cache(): global _REGEX_RANGE_CACHE _REGEX_RANGE_CACHE = {} def single_param(schema): """Macro function for use in JSONSchema to support query parameters that should have only one value. """ ret = multi_params(schema) ret['maxItems'] = 1 return ret def multi_params(schema): """Macro function for use in JSONSchema to support query parameters that may have multiple values. """ return {'type': 'array', 'items': schema} # NOTE: We don't check actual values of queries on params # which are defined as the following common_param. # Please note those are for backward compatible existing # query parameters because previously multiple parameters # might be input and accepted. common_query_param = multi_params({'type': 'string'}) common_query_regex_param = multi_params({'type': 'string', 'format': 'regex'}) class ValidationRegex(object): def __init__(self, regex, reason): self.regex = regex self.reason = reason def _is_printable(char): """determine if a unicode code point is printable. This checks if the character is either "other" (mostly control codes), or a non-horizontal space. All characters that don't match those criteria are considered printable; that is: letters; combining marks; numbers; punctuation; symbols; (horizontal) space separators. """ category = unicodedata.category(char) return (not category.startswith("C") and (not category.startswith("Z") or category == "Zs")) def _get_all_chars(): for i in range(0xFFFF): yield chr(i) # build a regex that matches all printable characters. This allows # spaces in the middle of the name. Also note that the regexp below # deliberately allows the empty string. This is so only the constraint # which enforces a minimum length for the name is triggered when an # empty string is tested. Otherwise it is not deterministic which # constraint fails and this causes issues for some unittests when # PYTHONHASHSEED is set randomly. @memorize def _build_regex_range(ws=True, invert=False, exclude=None): """Build a range regex for a set of characters in utf8. This builds a valid range regex for characters in utf8 by iterating the entire space and building up a set of x-y ranges for all the characters we find which are valid. :param ws: should we include whitespace in this range. :param exclude: any characters we want to exclude :param invert: invert the logic The inversion is useful when we want to generate a set of ranges which is everything that's not a certain class. For instance, produce all the non printable characters as a set of ranges. """ if exclude is None: exclude = [] regex = "" # are we currently in a range in_range = False # last character we found, for closing ranges last = None # last character we added to the regex, this lets us know that we # already have B in the range, which means we don't need to close # it out with B-B. While the later seems to work, it's kind of bad form. last_added = None def valid_char(char): if char in exclude: result = False elif ws: result = _is_printable(char) else: # Zs is the unicode class for space characters, of which # there are about 10 in this range. result = _is_printable(char) and unicodedata.category(char) != 'Zs' if invert is True: return not result return result # iterate through the entire character range. in_ for c in _get_all_chars(): if valid_char(c): if not in_range: regex += re.escape(c) last_added = c in_range = True else: if in_range and last != last_added: regex += "-" + re.escape(last) in_range = False last = c else: if in_range: regex += "-" + re.escape(c) return regex valid_name_regex_base = '^(?![%s])[%s]*(? 0: if self.is_body: # NOTE: For whole OpenStack message consistency, this error # message has been written as the similar format of # WSME. detail = _("Invalid input for field/attribute %(path)s. " "Value: %(value)s. %(message)s") % { 'path': ex.path.pop(), 'value': ex.instance, 'message': ex.message} else: # NOTE: Use 'ex.path.popleft()' instead of 'ex.path.pop()', # due to the structure of query parameters is a dict # with key as name and value is list. So the first # item in the 'ex.path' is the key, and second item # is the index of list in the value. We need the # key as the parameter name in the error message. # So pop the first value out of 'ex.path'. detail = _("Invalid input for query parameters %(path)s. " "Value: %(value)s. %(message)s") % { 'path': ex.path.popleft(), 'value': ex.instance, 'message': ex.message} else: detail = ex.message raise exception.ValidationError(detail=detail) except TypeError as ex: # NOTE: If passing non string value to patternProperties parameter, # TypeError happens. Here is for catching the TypeError. detail = str(ex) raise exception.ValidationError(detail=detail) def _number_from_str(self, instance): try: value = int(instance) except (ValueError, TypeError): try: value = float(instance) except (ValueError, TypeError): return None return value def _validate_minimum(self, validator, minimum, instance, schema): instance = self._number_from_str(instance) if instance is None: return return self.validator_org.VALIDATORS['minimum'](validator, minimum, instance, schema) def _validate_maximum(self, validator, maximum, instance, schema): instance = self._number_from_str(instance) if instance is None: return return self.validator_org.VALIDATORS['maximum'](validator, maximum, instance, schema) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/api/wsgi.py0000664000175000017500000002100100000000000015613 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """WSGI primitives used throughout the nova WSGI apps.""" import os from oslo_log import log as logging from paste import deploy import routes.middleware import webob import nova.conf from nova import exception from nova.i18n import _ CONF = nova.conf.CONF LOG = logging.getLogger(__name__) class Request(webob.Request): def __init__(self, environ, *args, **kwargs): if CONF.wsgi.secure_proxy_ssl_header: scheme = environ.get(CONF.wsgi.secure_proxy_ssl_header) if scheme: environ['wsgi.url_scheme'] = scheme super(Request, self).__init__(environ, *args, **kwargs) class Application(object): """Base WSGI application wrapper. Subclasses need to implement __call__.""" @classmethod def factory(cls, global_config, **local_config): """Used for paste app factories in paste.deploy config files. Any local configuration (that is, values under the [app:APPNAME] section of the paste config) will be passed into the `__init__` method as kwargs. A hypothetical configuration would look like: [app:wadl] latest_version = 1.3 paste.app_factory = nova.api.fancy_api:Wadl.factory which would result in a call to the `Wadl` class as import nova.api.fancy_api fancy_api.Wadl(latest_version='1.3') You could of course re-implement the `factory` method in subclasses, but using the kwarg passing it shouldn't be necessary. """ return cls(**local_config) def __call__(self, environ, start_response): r"""Subclasses will probably want to implement __call__ like this: @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): # Any of the following objects work as responses: # Option 1: simple string res = 'message\n' # Option 2: a nicely formatted HTTP exception page res = exc.HTTPForbidden(explanation='Nice try') # Option 3: a webob Response object (in case you need to play with # headers, or you want to be treated like an iterable, or ...) res = Response() res.app_iter = open('somefile') # Option 4: any wsgi app to be run next res = self.application # Option 5: you can get a Response object for a wsgi app, too, to # play with headers etc res = req.get_response(self.application) # You can then just return your response... return res # ... or set req.response and return None. req.response = res See the end of http://pythonpaste.org/webob/modules/dec.html for more info. """ raise NotImplementedError(_('You must implement __call__')) class Middleware(Application): """Base WSGI middleware. These classes require an application to be initialized that will be called next. By default the middleware will simply call its wrapped app, or you can override __call__ to customize its behavior. """ @classmethod def factory(cls, global_config, **local_config): """Used for paste app factories in paste.deploy config files. Any local configuration (that is, values under the [filter:APPNAME] section of the paste config) will be passed into the `__init__` method as kwargs. A hypothetical configuration would look like: [filter:analytics] redis_host = 127.0.0.1 paste.filter_factory = nova.api.analytics:Analytics.factory which would result in a call to the `Analytics` class as import nova.api.analytics analytics.Analytics(app_from_paste, redis_host='127.0.0.1') You could of course re-implement the `factory` method in subclasses, but using the kwarg passing it shouldn't be necessary. """ def _factory(app): return cls(app, **local_config) return _factory def __init__(self, application): self.application = application def process_request(self, req): """Called on each request. If this returns None, the next application down the stack will be executed. If it returns a response then that response will be returned and execution will stop here. """ return None def process_response(self, response): """Do whatever you'd like to the response.""" return response @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): response = self.process_request(req) if response: return response response = req.get_response(self.application) return self.process_response(response) class Router(object): """WSGI middleware that maps incoming requests to WSGI apps.""" def __init__(self, mapper): """Create a router for the given routes.Mapper. Each route in `mapper` must specify a 'controller', which is a WSGI app to call. You'll probably want to specify an 'action' as well and have your controller be an object that can route the request to the action-specific method. Examples: mapper = routes.Mapper() sc = ServerController() # Explicit mapping of one route to a controller+action mapper.connect(None, '/svrlist', controller=sc, action='list') # Actions are all implicitly defined mapper.resource('server', 'servers', controller=sc) # Pointing to an arbitrary WSGI app. You can specify the # {path_info:.*} parameter so the target app can be handed just that # section of the URL. mapper.connect(None, '/v1.0/{path_info:.*}', controller=BlogApp()) """ self.map = mapper self._router = routes.middleware.RoutesMiddleware(self._dispatch, self.map) @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): """Route the incoming request to a controller based on self.map. If no match, return a 404. """ return self._router @staticmethod @webob.dec.wsgify(RequestClass=Request) def _dispatch(req): """Dispatch the request to the appropriate controller. Called by self._router after matching the incoming request to a route and putting the information into req.environ. Either returns 404 or the routed WSGI app's response. """ match = req.environ['wsgiorg.routing_args'][1] if not match: return webob.exc.HTTPNotFound() app = match['controller'] return app class Loader(object): """Used to load WSGI applications from paste configurations.""" def __init__(self, config_path=None): """Initialize the loader, and attempt to find the config. :param config_path: Full or relative path to the paste config. :returns: None """ self.config_path = None config_path = config_path or CONF.wsgi.api_paste_config if not os.path.isabs(config_path): self.config_path = CONF.find_file(config_path) elif os.path.exists(config_path): self.config_path = config_path if not self.config_path: raise exception.ConfigNotFound(path=config_path) def load_app(self, name): """Return the paste URLMap wrapped WSGI application. :param name: Name of the application to load. :returns: Paste URLMap object wrapping the requested application. :raises: `nova.exception.PasteAppNotFound` """ try: LOG.debug("Loading app %(name)s from %(path)s", {'name': name, 'path': self.config_path}) return deploy.loadapp("config:%s" % self.config_path, name=name) except LookupError: LOG.exception("Couldn't lookup app: %s", name) raise exception.PasteAppNotFound(name=name, path=self.config_path) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/availability_zones.py0000664000175000017500000001653600000000000020002 0ustar00zuulzuul00000000000000# Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Availability zone helper functions.""" import collections from nova import cache_utils import nova.conf from nova import objects # NOTE(vish): azs don't change that often, so cache them for an hour to # avoid hitting the db multiple times on every request. AZ_CACHE_SECONDS = 60 * 60 MC = None CONF = nova.conf.CONF def _get_cache(): global MC if MC is None: MC = cache_utils.get_client(expiration_time=AZ_CACHE_SECONDS) return MC def reset_cache(): """Reset the cache, mainly for testing purposes and update availability_zone for host aggregate """ global MC MC = None def _make_cache_key(host): return "azcache-%s" % host def _build_metadata_by_host(aggregates, hosts=None): if hosts and not isinstance(hosts, set): hosts = set(hosts) metadata = collections.defaultdict(set) for aggregate in aggregates: for host in aggregate.hosts: if hosts and host not in hosts: continue metadata[host].add(list(aggregate.metadata.values())[0]) return metadata def set_availability_zones(context, services): # Makes sure services isn't a sqlalchemy object services = [dict(service) for service in services] hosts = set([service['host'] for service in services]) aggregates = objects.AggregateList.get_by_metadata_key(context, 'availability_zone', hosts=hosts) metadata = _build_metadata_by_host(aggregates, hosts=hosts) # gather all of the availability zones associated with a service host for service in services: az = CONF.internal_service_availability_zone if service['topic'] == "compute": if metadata.get(service['host']): az = u','.join(list(metadata[service['host']])) else: az = CONF.default_availability_zone # update the cache update_host_availability_zone_cache(context, service['host'], az) service['availability_zone'] = az return services def get_host_availability_zone(context, host): aggregates = objects.AggregateList.get_by_host(context, host, key='availability_zone') if aggregates: az = aggregates[0].metadata['availability_zone'] else: az = CONF.default_availability_zone return az def update_host_availability_zone_cache(context, host, availability_zone=None): if not availability_zone: availability_zone = get_host_availability_zone(context, host) cache = _get_cache() cache_key = _make_cache_key(host) cache.delete(cache_key) cache.set(cache_key, availability_zone) def get_availability_zones(context, hostapi, get_only_available=False, with_hosts=False, services=None): """Return available and unavailable zones on demand. :param context: nova auth RequestContext :param hostapi: nova.compute.api.HostAPI instance :param get_only_available: flag to determine whether to return available zones only, default False indicates return both available zones and not available zones, True indicates return available zones only :param with_hosts: whether to return hosts part of the AZs :type with_hosts: bool :param services: list of services to use; if None, enabled services will be retrieved from all cells with zones set """ if services is None: services = hostapi.service_get_all( context, set_zones=True, all_cells=True) enabled_services = [] disabled_services = [] for service in services: if not service.disabled: enabled_services.append(service) else: disabled_services.append(service) if with_hosts: return _get_availability_zones_with_hosts( enabled_services, disabled_services, get_only_available) else: return _get_availability_zones( enabled_services, disabled_services, get_only_available) def _get_availability_zones( enabled_services, disabled_services, get_only_available=False): available_zones = { service['availability_zone'] for service in enabled_services } if get_only_available: return sorted(available_zones) not_available_zones = { service['availability_zone'] for service in disabled_services if service['availability_zone'] not in available_zones } return sorted(available_zones), sorted(not_available_zones) def _get_availability_zones_with_hosts( enabled_services, disabled_services, get_only_available=False): available_zones = collections.defaultdict(set) for service in enabled_services: available_zones[service['availability_zone']].add(service['host']) if get_only_available: return sorted(available_zones.items()) not_available_zones = collections.defaultdict(set) for service in disabled_services: if service['availability_zone'] in available_zones: continue not_available_zones[service['availability_zone']].add(service['host']) return sorted(available_zones.items()), sorted(not_available_zones.items()) def get_instance_availability_zone(context, instance): """Return availability zone of specified instance.""" host = instance.host if 'host' in instance else None if not host: # Likely hasn't reached a viable compute node yet so give back the # desired availability_zone in the instance record if the boot request # specified one. az = instance.get('availability_zone') return az cache_key = _make_cache_key(host) cache = _get_cache() az = cache.get(cache_key) az_inst = instance.get('availability_zone') if az_inst is not None and az != az_inst: # NOTE(sbauza): Cache is wrong, we need to invalidate it by fetching # again the right AZ related to the aggregate the host belongs to. # As the API is also calling this method for setting the instance # AZ field, we don't need to update the instance.az field. # This case can happen because the cache is populated before the # instance has been assigned to the host so that it would keep the # former reference which was incorrect. Instead of just taking the # instance AZ information for refilling the cache, we prefer to # invalidate the cache and fetch it again because there could be some # corner cases where this method could be called before the instance # has been assigned to the host also. az = None if not az: elevated = context.elevated() az = get_host_availability_zone(elevated, host) cache.set(cache_key, az) return az ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/baserpc.py0000664000175000017500000000450700000000000015524 0ustar00zuulzuul00000000000000# # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Base RPC client and server common to all services. """ import oslo_messaging as messaging from oslo_serialization import jsonutils import nova.conf from nova import rpc CONF = nova.conf.CONF _NAMESPACE = 'baseapi' class BaseAPI(object): """Client side of the base rpc API. API version history: 1.0 - Initial version. 1.1 - Add get_backdoor_port """ VERSION_ALIASES = { # baseapi was added in havana } def __init__(self, topic): super(BaseAPI, self).__init__() target = messaging.Target(topic=topic, namespace=_NAMESPACE, version='1.0') version_cap = self.VERSION_ALIASES.get(CONF.upgrade_levels.baseapi, CONF.upgrade_levels.baseapi) self.client = rpc.get_client(target, version_cap=version_cap) def ping(self, context, arg, timeout=None): arg_p = jsonutils.to_primitive(arg) cctxt = self.client.prepare(timeout=timeout) return cctxt.call(context, 'ping', arg=arg_p) def get_backdoor_port(self, context, host): cctxt = self.client.prepare(server=host, version='1.1') return cctxt.call(context, 'get_backdoor_port') class BaseRPCAPI(object): """Server side of the base RPC API.""" target = messaging.Target(namespace=_NAMESPACE, version='1.1') def __init__(self, service_name, backdoor_port): self.service_name = service_name self.backdoor_port = backdoor_port def ping(self, context, arg): resp = {'service': self.service_name, 'arg': arg} return jsonutils.to_primitive(resp) def get_backdoor_port(self, context): return self.backdoor_port ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/block_device.py0000664000175000017500000006110300000000000016511 0ustar00zuulzuul00000000000000# Copyright 2011 Isaku Yamahata # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import re from oslo_log import log as logging from oslo_utils import strutils from oslo_utils import units import nova.conf from nova import exception from nova.i18n import _ from nova import utils from nova.virt import driver CONF = nova.conf.CONF LOG = logging.getLogger(__name__) DEFAULT_ROOT_DEV_NAME = '/dev/sda1' _DEFAULT_MAPPINGS = {'ami': 'sda1', 'ephemeral0': 'sda2', 'root': DEFAULT_ROOT_DEV_NAME, 'swap': 'sda3'} # Image attributes which Cinder stores in volume image metadata # as regular properties VIM_IMAGE_ATTRIBUTES = ( 'image_id', 'image_name', 'size', 'checksum', 'container_format', 'disk_format', 'min_ram', 'min_disk', ) bdm_legacy_fields = set(['device_name', 'delete_on_termination', 'virtual_name', 'snapshot_id', 'volume_id', 'volume_size', 'no_device', 'connection_info']) bdm_new_fields = set(['source_type', 'destination_type', 'guest_format', 'device_type', 'disk_bus', 'boot_index', 'device_name', 'delete_on_termination', 'snapshot_id', 'volume_id', 'volume_size', 'image_id', 'no_device', 'connection_info', 'tag', 'volume_type', 'encrypted', 'encryption_secret_uuid', 'encryption_format', 'encryption_options']) bdm_db_only_fields = set(['id', 'instance_uuid', 'attachment_id', 'uuid']) bdm_db_inherited_fields = set(['created_at', 'updated_at', 'deleted_at', 'deleted']) class BlockDeviceDict(dict): """Represents a Block Device Mapping in Nova.""" _fields = bdm_new_fields _db_only_fields = (bdm_db_only_fields | bdm_db_inherited_fields) _required_fields = set(['source_type']) def __init__(self, bdm_dict=None, do_not_default=None, **kwargs): super(BlockDeviceDict, self).__init__() bdm_dict = bdm_dict or {} bdm_dict.update(kwargs) do_not_default = do_not_default or set() self._validate(bdm_dict) if bdm_dict.get('device_name'): bdm_dict['device_name'] = prepend_dev(bdm_dict['device_name']) bdm_dict['delete_on_termination'] = bool( bdm_dict.get('delete_on_termination')) # NOTE (ndipanov): Never default db fields self.update({field: None for field in self._fields - do_not_default}) self.update(bdm_dict.items()) def _validate(self, bdm_dict): """Basic data format validations.""" dict_fields = set(key for key, _ in bdm_dict.items()) valid_fields = self._fields | self._db_only_fields # Check that there are no bogus fields if not (dict_fields <= valid_fields): raise exception.InvalidBDMFormat( details=("Following fields are invalid: %s" % " ".join(dict_fields - valid_fields))) if bdm_dict.get('no_device'): return # Check that all required fields are there if (self._required_fields and not ((dict_fields & self._required_fields) == self._required_fields)): raise exception.InvalidBDMFormat( details=_("Some required fields are missing")) if 'delete_on_termination' in bdm_dict: bdm_dict['delete_on_termination'] = strutils.bool_from_string( bdm_dict['delete_on_termination']) if bdm_dict.get('device_name') is not None: validate_device_name(bdm_dict['device_name']) validate_and_default_volume_size(bdm_dict) if bdm_dict.get('boot_index'): try: bdm_dict['boot_index'] = int(bdm_dict['boot_index']) except ValueError: raise exception.InvalidBDMFormat( details=_("Boot index is invalid.")) @classmethod def from_legacy(cls, legacy_bdm): copy_over_fields = bdm_legacy_fields & bdm_new_fields copy_over_fields |= (bdm_db_only_fields | bdm_db_inherited_fields) # NOTE (ndipanov): These fields cannot be computed # from legacy bdm, so do not default them # to avoid overwriting meaningful values in the db non_computable_fields = set(['boot_index', 'disk_bus', 'guest_format', 'device_type']) new_bdm = {fld: val for fld, val in legacy_bdm.items() if fld in copy_over_fields} virt_name = legacy_bdm.get('virtual_name') if is_swap_or_ephemeral(virt_name): new_bdm['source_type'] = 'blank' new_bdm['delete_on_termination'] = True new_bdm['destination_type'] = 'local' if virt_name == 'swap': new_bdm['guest_format'] = 'swap' else: new_bdm['guest_format'] = CONF.default_ephemeral_format elif legacy_bdm.get('snapshot_id'): new_bdm['source_type'] = 'snapshot' new_bdm['destination_type'] = 'volume' elif legacy_bdm.get('volume_id'): new_bdm['source_type'] = 'volume' new_bdm['destination_type'] = 'volume' elif legacy_bdm.get('no_device'): # NOTE (ndipanov): Just keep the BDM for now, pass else: raise exception.InvalidBDMFormat( details=_("Unrecognized legacy format.")) return cls(new_bdm, non_computable_fields) @classmethod def from_api(cls, api_dict, image_uuid_specified): """Transform the API format of data to the internally used one. Only validate if the source_type field makes sense. """ if not api_dict.get('no_device'): source_type = api_dict.get('source_type') device_uuid = api_dict.get('uuid') destination_type = api_dict.get('destination_type') volume_type = api_dict.get('volume_type') if source_type == 'blank' and device_uuid: raise exception.InvalidBDMFormat( details=_("Invalid device UUID.")) elif source_type != 'blank': if not device_uuid: raise exception.InvalidBDMFormat( details=_("Missing device UUID.")) api_dict[source_type + '_id'] = device_uuid if source_type == 'image' and destination_type == 'local': # NOTE(mriedem): boot_index can be None so we need to # account for that to avoid a TypeError. boot_index = api_dict.get('boot_index', -1) if boot_index is None: # boot_index=None is equivalent to -1. boot_index = -1 boot_index = int(boot_index) # if this bdm is generated from --image, then # source_type = image and destination_type = local is allowed if not (image_uuid_specified and boot_index == 0): raise exception.InvalidBDMFormat( details=_("Mapping image to local is not supported.")) if destination_type == 'local' and volume_type: raise exception.InvalidBDMFormat( details=_("Specifying a volume_type with destination_type=" "local is not supported.")) # Specifying a volume_type with a pre-existing source volume is # not supported. if source_type == 'volume' and volume_type: raise exception.InvalidBDMFormat( details=_("Specifying volume type to existing volume is " "not supported.")) api_dict.pop('uuid', None) return cls(api_dict) def legacy(self): copy_over_fields = bdm_legacy_fields - set(['virtual_name']) copy_over_fields |= (bdm_db_only_fields | bdm_db_inherited_fields) legacy_block_device = {field: self.get(field) for field in copy_over_fields if field in self} source_type = self.get('source_type') destination_type = self.get('destination_type') no_device = self.get('no_device') if source_type == 'blank': if self['guest_format'] == 'swap': legacy_block_device['virtual_name'] = 'swap' else: # NOTE (ndipanov): Always label as 0, it is up to # the calling routine to re-enumerate them legacy_block_device['virtual_name'] = 'ephemeral0' elif source_type in ('volume', 'snapshot') or no_device: legacy_block_device['virtual_name'] = None elif source_type == 'image': if destination_type != 'volume': # NOTE(ndipanov): Image bdms with local destination # have no meaning in the legacy format - raise raise exception.InvalidBDMForLegacy() legacy_block_device['virtual_name'] = None return legacy_block_device def get_image_mapping(self): drop_fields = (set(['connection_info']) | self._db_only_fields) mapping_dict = dict(self) for fld in drop_fields: mapping_dict.pop(fld, None) return mapping_dict def is_safe_for_update(block_device_dict): """Determine if passed dict is a safe subset for update. Safe subset in this case means a safe subset of both legacy and new versions of data, that can be passed to an UPDATE query without any transformation. """ fields = set(block_device_dict.keys()) return fields <= (bdm_new_fields | bdm_db_inherited_fields | bdm_db_only_fields) def create_image_bdm(image_ref, boot_index=0): """Create a block device dict based on the image_ref. This is useful in the API layer to keep the compatibility with having an image_ref as a field in the instance requests """ return BlockDeviceDict( {'source_type': 'image', 'image_id': image_ref, 'delete_on_termination': True, 'boot_index': boot_index, 'device_type': 'disk', 'destination_type': 'local'}) def create_blank_bdm(size, guest_format=None): return BlockDeviceDict( {'source_type': 'blank', 'delete_on_termination': True, 'device_type': 'disk', 'boot_index': -1, 'destination_type': 'local', 'guest_format': guest_format, 'volume_size': size}) def snapshot_from_bdm(snapshot_id, template): """Create a basic volume snapshot BDM from a given template bdm.""" copy_from_template = ('disk_bus', 'device_type', 'boot_index', 'delete_on_termination', 'volume_size', 'device_name') snapshot_dict = {'source_type': 'snapshot', 'destination_type': 'volume', 'snapshot_id': snapshot_id} for key in copy_from_template: snapshot_dict[key] = template.get(key) return BlockDeviceDict(snapshot_dict) def from_legacy_mapping(legacy_block_device_mapping, image_uuid='', root_device_name=None, no_root=False): """Transform a legacy list of block devices to the new data format.""" new_bdms = [BlockDeviceDict.from_legacy(legacy_bdm) for legacy_bdm in legacy_block_device_mapping] # NOTE (ndipanov): We will not decide which device is root here - we assume # that it will be supplied later. This is useful for having the root device # as part of the image defined mappings that are already in the v2 format. if no_root: for bdm in new_bdms: bdm['boot_index'] = -1 return new_bdms image_bdm = None volume_backed = False # Try to assign boot_device if not root_device_name and not image_uuid: # NOTE (ndipanov): If there is no root_device, pick the first non # blank one. non_blank = [bdm for bdm in new_bdms if bdm['source_type'] != 'blank'] if non_blank: non_blank[0]['boot_index'] = 0 else: for bdm in new_bdms: if (bdm['source_type'] in ('volume', 'snapshot', 'image') and root_device_name is not None and (strip_dev(bdm.get('device_name')) == strip_dev(root_device_name))): bdm['boot_index'] = 0 volume_backed = True elif not bdm['no_device']: bdm['boot_index'] = -1 else: bdm['boot_index'] = None if not volume_backed and image_uuid: image_bdm = create_image_bdm(image_uuid, boot_index=0) return ([image_bdm] if image_bdm else []) + new_bdms def properties_root_device_name(properties): """Get root device name from image meta data. If it isn't specified, return None. """ root_device_name = None # NOTE(yamahata): see image_service.s3.s3create() for bdm in properties.get('mappings', []): if bdm['virtual'] == 'root': root_device_name = bdm['device'] # NOTE(yamahata): register_image's command line can override # .manifest.xml if 'root_device_name' in properties: root_device_name = properties['root_device_name'] return root_device_name def validate_device_name(value): try: # NOTE (ndipanov): Do not allow empty device names # until assigning default values # are supported by nova.compute utils.check_string_length(value, 'Device name', min_length=1, max_length=255) except exception.InvalidInput: raise exception.InvalidBDMFormat( details=_("Device name empty or too long.")) if ' ' in value: raise exception.InvalidBDMFormat( details=_("Device name contains spaces.")) def validate_and_default_volume_size(bdm): if bdm.get('volume_size'): try: bdm['volume_size'] = utils.validate_integer( bdm['volume_size'], 'volume_size', min_value=0) except exception.InvalidInput: # NOTE: We can remove this validation code after removing # Nova v2.0 API code, because v2.1 API validates this case # already at its REST API layer. raise exception.InvalidBDMFormat( details=_("Invalid volume_size.")) _ephemeral = re.compile(r'^ephemeral(\d|[1-9]\d+)$') def is_ephemeral(device_name): return _ephemeral.match(device_name) is not None def ephemeral_num(ephemeral_name): assert is_ephemeral(ephemeral_name) return int(_ephemeral.sub('\\1', ephemeral_name)) def is_swap_or_ephemeral(device_name): return (device_name and (device_name == 'swap' or is_ephemeral(device_name))) def new_format_is_swap(bdm): if (bdm.get('source_type') == 'blank' and bdm.get('destination_type') == 'local' and bdm.get('guest_format') == 'swap'): return True return False def new_format_is_ephemeral(bdm): if (bdm.get('source_type') == 'blank' and bdm.get('destination_type') == 'local' and bdm.get('guest_format') != 'swap'): return True return False def get_root_bdm(bdms): try: return next(bdm for bdm in bdms if bdm.get('boot_index', -1) == 0) except StopIteration: return None def get_bdms_to_connect(bdms, exclude_root_mapping=False): """Will return non-root mappings, when exclude_root_mapping is true. Otherwise all mappings will be returned. """ return (bdm for bdm in bdms if bdm.get('boot_index', -1) != 0 or not exclude_root_mapping) def mappings_prepend_dev(mappings): """Prepend '/dev/' to 'device' entry of swap/ephemeral virtual type.""" for m in mappings: virtual = m['virtual'] if (is_swap_or_ephemeral(virtual) and (not m['device'].startswith('/'))): m['device'] = '/dev/' + m['device'] return mappings _dev = re.compile('^/dev/') def strip_dev(device_name): """remove leading '/dev/'.""" return _dev.sub('', device_name) if device_name else device_name def prepend_dev(device_name): """Make sure there is a leading '/dev/'.""" return device_name and '/dev/' + strip_dev(device_name) _pref = re.compile('^((x?v|s|h)d)') def strip_prefix(device_name): """remove both leading /dev/ and xvd or sd or vd or hd.""" device_name = strip_dev(device_name) return _pref.sub('', device_name) if device_name else device_name _nums = re.compile(r'\d+') def get_device_letter(device_name): letter = strip_prefix(device_name) # NOTE(vish): delete numbers in case we have something like # /dev/sda1 return _nums.sub('', letter) if device_name else device_name def generate_device_letter(index): """Returns device letter by index (starts by zero) i.e. index = 0, 1,..., 18277 results = a, b,..., zzz """ base = ord('z') - ord('a') + 1 unit_dev_name = "" while index >= 0: letter = chr(ord('a') + (index % base)) unit_dev_name = letter + unit_dev_name index = int(index / base) - 1 return unit_dev_name def generate_device_name(prefix, index): """Returns device unit name by index (starts by zero) i.e. prefix = vd index = 0, 1,..., 18277 results = vda, vdb,..., vdzzz """ return prefix + generate_device_letter(index) def instance_block_mapping(instance, bdms): root_device_name = instance['root_device_name'] if root_device_name is None: return _DEFAULT_MAPPINGS mappings = {} mappings['ami'] = strip_dev(root_device_name) mappings['root'] = root_device_name default_ephemeral_device = instance.get('default_ephemeral_device') if default_ephemeral_device: mappings['ephemeral0'] = default_ephemeral_device default_swap_device = instance.get('default_swap_device') if default_swap_device: mappings['swap'] = default_swap_device ebs_devices = [] blanks = [] # 'ephemeralN', 'swap' and ebs for bdm in bdms: # ebs volume case if bdm.destination_type == 'volume': ebs_devices.append(bdm.device_name) continue if bdm.source_type == 'blank': blanks.append(bdm) # NOTE(yamahata): I'm not sure how ebs device should be numbered. # Right now sort by device name for deterministic # result. if ebs_devices: # NOTE(claudiub): python2.7 sort places None values first. # this sort will maintain the same behaviour for both py27 and py34. ebs_devices = sorted(ebs_devices, key=lambda x: (x is not None, x)) for nebs, ebs in enumerate(ebs_devices): mappings['ebs%d' % nebs] = ebs swap = [bdm for bdm in blanks if bdm.guest_format == 'swap'] if swap: mappings['swap'] = swap.pop().device_name ephemerals = [bdm for bdm in blanks if bdm.guest_format != 'swap'] if ephemerals: for num, eph in enumerate(ephemerals): mappings['ephemeral%d' % num] = eph.device_name return mappings def match_device(device): """Matches device name and returns prefix, suffix.""" match = re.match("(^/dev/x{0,1}[a-z]{0,1}d{0,1})([a-z]+)[0-9]*$", device) if not match: return None return match.groups() def volume_in_mapping(mount_device, block_device_info): block_device_list = [strip_dev(vol['mount_device']) for vol in driver.block_device_info_get_mapping( block_device_info)] swap = driver.block_device_info_get_swap(block_device_info) if driver.swap_is_usable(swap): block_device_list.append(strip_dev(swap['device_name'])) block_device_list += [strip_dev(ephemeral['device_name']) for ephemeral in driver.block_device_info_get_ephemerals( block_device_info)] LOG.debug("block_device_list %s", sorted(filter(None, block_device_list))) return strip_dev(mount_device) in block_device_list def get_bdm_ephemeral_disk_size(block_device_mappings): return sum(bdm.get('volume_size', 0) for bdm in block_device_mappings if new_format_is_ephemeral(bdm)) def get_bdm_swap_list(block_device_mappings): return [bdm for bdm in block_device_mappings if new_format_is_swap(bdm)] def get_bdm_local_disk_num(block_device_mappings): return len([bdm for bdm in block_device_mappings if bdm.get('destination_type') == 'local']) def get_bdm_image_metadata(context, image_api, volume_api, block_device_mapping, legacy_bdm=True): """Attempt to retrieve image metadata from a given block_device_mapping. If we are booting from a volume, we need to get the volume details from Cinder and make sure we pass the metadata back accordingly. :param context: request context :param image_api: Image API :param volume_api: Volume API :param block_device_mapping: :param legacy_bdm: """ if not block_device_mapping: return {} for bdm in block_device_mapping: if (legacy_bdm and get_device_letter( bdm.get('device_name', '')) != 'a'): continue elif not legacy_bdm and bdm.get('boot_index') != 0: continue volume_id = bdm.get('volume_id') snapshot_id = bdm.get('snapshot_id') if snapshot_id: # NOTE(alaski): A volume snapshot inherits metadata from the # originating volume, but the API does not expose metadata # on the snapshot itself. So we query the volume for it below. snapshot = volume_api.get_snapshot(context, snapshot_id) volume_id = snapshot['volume_id'] if bdm.get('image_id'): try: image_id = bdm['image_id'] image_meta = image_api.get(context, image_id) return image_meta except Exception: raise exception.InvalidBDMImage(id=image_id) elif volume_id: try: volume = volume_api.get(context, volume_id) except exception.CinderConnectionFailed: raise except Exception: raise exception.InvalidBDMVolume(id=volume_id) if not volume.get('bootable', True): raise exception.InvalidBDMVolumeNotBootable(id=volume_id) return get_image_metadata_from_volume(volume) return {} def get_image_metadata_from_volume(volume): properties = copy.copy(volume.get('volume_image_metadata', {})) image_meta = {'properties': properties} # Volume size is no longer related to the original image size, # so we take it from the volume directly. Cinder creates # volumes in Gb increments, and stores size in Gb, whereas # glance reports size in bytes. As we're returning glance # metadata here, we need to convert it. image_meta['size'] = volume.get('size', 0) * units.Gi # NOTE(yjiang5): restore the basic attributes # NOTE(mdbooth): These values come from volume_glance_metadata # in cinder. This is a simple key/value table, and all values # are strings. We need to convert them to ints to avoid # unexpected type errors. for attr in VIM_IMAGE_ATTRIBUTES: val = properties.pop(attr, None) if attr in ('min_ram', 'min_disk'): image_meta[attr] = int(val or 0) # NOTE(mriedem): Set the status to 'active' as a really old hack # from when this method was in the compute API class and is # needed for _validate_flavor_image which makes sure the image # is 'active'. For volume-backed servers, if the volume is not # available because the image backing the volume is not active, # then the compute API trying to reserve the volume should fail. image_meta['status'] = 'active' return image_meta ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/cache_utils.py0000664000175000017500000000745100000000000016371 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Simple wrapper for oslo_cache.""" from oslo_cache import core as cache from oslo_log import log as logging import nova.conf from nova.i18n import _ CONF = nova.conf.CONF LOG = logging.getLogger(__name__) WEEK = 604800 def _warn_if_null_backend(): if CONF.cache.backend == 'dogpile.cache.null': LOG.warning("Cache enabled with backend dogpile.cache.null.") def get_memcached_client(expiration_time=0): """Used ONLY when memcached is explicitly needed.""" # If the operator has [cache]/enabled flag on then we let oslo_cache # configure the region from the configuration settings if CONF.cache.enabled and CONF.cache.memcache_servers: _warn_if_null_backend() return CacheClient( _get_default_cache_region(expiration_time=expiration_time)) def get_client(expiration_time=0): """Used to get a caching client.""" # If the operator has [cache]/enabled flag on then we let oslo_cache # configure the region from configuration settings. if CONF.cache.enabled: _warn_if_null_backend() return CacheClient( _get_default_cache_region(expiration_time=expiration_time)) # If [cache]/enabled flag is off, we use the dictionary backend return CacheClient( _get_custom_cache_region(expiration_time=expiration_time, backend='oslo_cache.dict')) def _get_default_cache_region(expiration_time): region = cache.create_region() if expiration_time != 0: CONF.cache.expiration_time = expiration_time cache.configure_cache_region(CONF, region) return region def _get_custom_cache_region(expiration_time=WEEK, backend=None, url=None): """Create instance of oslo_cache client. For backends you can pass specific parameters by kwargs. For 'dogpile.cache.memcached' backend 'url' parameter must be specified. :param backend: backend name :param expiration_time: interval in seconds to indicate maximum time-to-live value for each key :param url: memcached url(s) """ region = cache.create_region() region_params = {} if expiration_time != 0: region_params['expiration_time'] = expiration_time if backend == 'oslo_cache.dict': region_params['arguments'] = {'expiration_time': expiration_time} elif backend == 'dogpile.cache.memcached': region_params['arguments'] = {'url': url} else: raise RuntimeError(_('old style configuration can use ' 'only dictionary or memcached backends')) region.configure(backend, **region_params) return region class CacheClient(object): """Replicates a tiny subset of memcached client interface.""" def __init__(self, region): self.region = region def get(self, key): value = self.region.get(key) if value == cache.NO_VALUE: return None return value def set(self, key, value): return self.region.set(key, value) def delete(self, key): return self.region.delete(key) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.3376083 nova-32.0.0/nova/cmd/0000775000175000017500000000000000000000000014270 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/cmd/__init__.py0000664000175000017500000000122000000000000016374 0ustar00zuulzuul00000000000000# Copyright (c) 2019 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova import monkey_patch monkey_patch.patch() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/cmd/baseproxy.py0000664000175000017500000000517300000000000016664 0ustar00zuulzuul00000000000000# # Copyright (C) 2014 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Base proxy module used to create compatible consoles for OpenStack Nova.""" import os import sys from oslo_log import log as logging from oslo_reports import guru_meditation_report as gmr from oslo_reports import opts as gmr_opts import nova.conf from nova.conf import novnc from nova.console import websocketproxy from nova import objects from nova import version CONF = nova.conf.CONF # NOTE this is required ensure CONF.web # is registered properly novnc.register_cli_opts(CONF) gmr_opts.set_defaults(CONF) objects.register_all() def exit_with_error(msg, errno=-1): sys.stderr.write(msg + '\n') sys.exit(errno) def proxy(host, port, security_proxy=None): """:param host: local address to listen on :param port: local port to listen on :param security_proxy: instance of nova.console.securityproxy.base.SecurityProxy Setup a proxy listening on @host:@port. If the @security_proxy parameter is not None, this instance is used to negotiate security layer with the proxy target """ if CONF.ssl_only and not os.path.exists(CONF.cert): exit_with_error("SSL only and %s not found" % CONF.cert) # Check to see if tty html/js/css files are present if CONF.web and not os.path.exists(CONF.web): exit_with_error("Can not find html/js files at %s." % CONF.web) logging.setup(CONF, "nova") gmr.TextGuruMeditation.setup_autorun(version, conf=CONF) # Create and start the NovaWebSockets proxy websocketproxy.NovaWebSocketProxy( listen_host=host, listen_port=port, source_is_ipv6=CONF.source_is_ipv6, cert=CONF.cert, key=CONF.key, ssl_only=CONF.ssl_only, ssl_ciphers=CONF.console.ssl_ciphers, ssl_minimum_version=CONF.console.ssl_minimum_version, daemon=CONF.daemon, record=CONF.record, traffic=not CONF.daemon, web=CONF.web, file_only=True, RequestHandlerClass=websocketproxy.NovaProxyRequestHandler, security_proxy=security_proxy, ).start_server() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/cmd/common.py0000664000175000017500000001431500000000000016136 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Common functions used by different CLI interfaces. """ import argparse import inspect from oslo_log import log as logging import nova.conf import nova.db.main.api from nova import exception from nova.i18n import _ CONF = nova.conf.CONF LOG = logging.getLogger(__name__) def validate_args(fn, *args, **kwargs): """Check that the supplied args are sufficient for calling a function. >>> validate_args(lambda a: None) Traceback (most recent call last): ... MissingArgs: Missing argument(s): a >>> validate_args(lambda a, b, c, d: None, 0, c=1) Traceback (most recent call last): ... MissingArgs: Missing argument(s): b, d :param fn: the function to check :param arg: the positional arguments supplied :param kwargs: the keyword arguments supplied """ argspec = inspect.getfullargspec(fn) num_defaults = len(argspec.defaults or []) required_args = argspec.args[:len(argspec.args) - num_defaults] if fn.__self__ is not None: required_args.pop(0) missing = [arg for arg in required_args if arg not in kwargs] missing = missing[len(args):] return missing # Decorators for actions def args(*args, **kwargs): """Decorator which adds the given args and kwargs to the args list of the desired func's __dict__. """ def _decorator(func): func.__dict__.setdefault('args', []).insert(0, (args, kwargs)) return func return _decorator def methods_of(obj): """Get all callable methods of an object that don't start with underscore returns a list of tuples of the form (method_name, method) """ result = [] for i in dir(obj): if callable(getattr(obj, i)) and not i.startswith('_'): result.append((i, getattr(obj, i))) return result def add_command_parsers(subparsers, categories): """Adds command parsers to the given subparsers. Adds version and bash-completion parsers. Adds a parser with subparsers for each category in the categories dict given. """ parser = subparsers.add_parser('version') parser = subparsers.add_parser('bash-completion') parser.add_argument('query_category', nargs='?') for category in categories: command_object = categories[category]() desc = getattr(command_object, 'description', None) parser = subparsers.add_parser(category, description=desc) parser.set_defaults(command_object=command_object) category_subparsers = parser.add_subparsers(dest='action') category_subparsers.required = True for (action, action_fn) in methods_of(command_object): parser = category_subparsers.add_parser( action, description=getattr(action_fn, 'description', desc)) action_kwargs = [] for args, kwargs in getattr(action_fn, 'args', []): # we must handle positional parameters (ARG) separately from # positional parameters (--opt). Detect this by checking for # the presence of leading '--' if args[0] != args[0].lstrip('-'): kwargs.setdefault('dest', args[0].lstrip('-')) if kwargs['dest'].startswith('action_kwarg_'): action_kwargs.append( kwargs['dest'][len('action_kwarg_'):]) else: action_kwargs.append(kwargs['dest']) kwargs['dest'] = 'action_kwarg_' + kwargs['dest'] else: action_kwargs.append(args[0]) args = ['action_kwarg_' + arg for arg in args] parser.add_argument(*args, **kwargs) parser.set_defaults(action_fn=action_fn) parser.set_defaults(action_kwargs=action_kwargs) parser.add_argument('action_args', nargs='*', help=argparse.SUPPRESS) def print_bash_completion(categories): if not CONF.category.query_category: print(" ".join(categories.keys())) elif CONF.category.query_category in categories: fn = categories[CONF.category.query_category] command_object = fn() actions = methods_of(command_object) print(" ".join([k for (k, v) in actions])) def get_action_fn(): fn = CONF.category.action_fn fn_args = [] for arg in CONF.category.action_args: if isinstance(arg, bytes): arg = arg.decode('utf-8') fn_args.append(arg) fn_kwargs = {} for k in CONF.category.action_kwargs: v = getattr(CONF.category, 'action_kwarg_' + k) if v is None: continue if isinstance(v, bytes): v = v.decode('utf-8') fn_kwargs[k] = v # call the action with the remaining arguments # check arguments missing = validate_args(fn, *fn_args, **fn_kwargs) if missing: # NOTE(mikal): this isn't the most helpful error message ever. It is # long, and tells you a lot of things you probably don't want to know # if you just got a single arg wrong. print(fn.__doc__) CONF.print_help() raise exception.Invalid( _("Missing arguments: %s") % ", ".join(missing)) return fn, fn_args, fn_kwargs def action_description(text): """Decorator for adding a description to command action. To display help text on action call instead of common category help text action function can be decorated. command -h will show description and arguments. """ def _decorator(func): func.description = text return func return _decorator ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/cmd/compute.py0000664000175000017500000000407400000000000016323 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Starter script for Nova Compute.""" import shlex import sys import os_brick import os_vif from oslo_log import log as logging from oslo_privsep import priv_context from oslo_reports import guru_meditation_report as gmr from oslo_reports import opts as gmr_opts from nova.compute import rpcapi as compute_rpcapi from nova.conductor import rpcapi as conductor_rpcapi import nova.conf from nova import config import nova.db.main.api from nova import objects from nova.objects import base as objects_base from nova import service from nova import utils from nova import version CONF = nova.conf.CONF def main(): config.parse_args(sys.argv) logging.setup(CONF, 'nova') priv_context.init(root_helper=shlex.split(utils.get_root_helper())) objects.register_all() gmr_opts.set_defaults(CONF) os_brick.setup(CONF) # Ensure os-vif objects are registered and plugins loaded os_vif.initialize() gmr.TextGuruMeditation.setup_autorun(version, conf=CONF) # disable database access for this service nova.db.main.api.DISABLE_DB_ACCESS = True objects_base.NovaObject.indirection_api = conductor_rpcapi.ConductorAPI() objects.Service.enable_min_version_cache() server = service.Service.create(binary='nova-compute', topic=compute_rpcapi.RPC_TOPIC) service.serve(server) service.wait() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/cmd/conductor.py0000664000175000017500000000274600000000000016653 0ustar00zuulzuul00000000000000# Copyright 2012 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Starter script for Nova Conductor.""" import sys from oslo_concurrency import processutils from oslo_log import log as logging from oslo_reports import guru_meditation_report as gmr from oslo_reports import opts as gmr_opts from nova.conductor import rpcapi import nova.conf from nova import config from nova import objects from nova import service from nova import version CONF = nova.conf.CONF def main(): config.parse_args(sys.argv) logging.setup(CONF, "nova") objects.register_all() gmr_opts.set_defaults(CONF) objects.Service.enable_min_version_cache() gmr.TextGuruMeditation.setup_autorun(version, conf=CONF) server = service.Service.create(binary='nova-conductor', topic=rpcapi.RPC_TOPIC) workers = CONF.conductor.workers or processutils.get_worker_count() service.serve(server, workers=workers) service.wait() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/cmd/manage.py0000664000175000017500000052314600000000000016105 0ustar00zuulzuul00000000000000# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ CLI interface for nova management. """ import collections from contextlib import contextmanager import functools import os import re import sys import textwrap import time import traceback import typing as ty from urllib import parse as urlparse from dateutil import parser as dateutil_parser from keystoneauth1 import exceptions as ks_exc from neutronclient.common import exceptions as neutron_client_exc from os_brick.initiator import connector import os_resource_classes as orc from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import log as logging import oslo_messaging as messaging from oslo_serialization import jsonutils from oslo_utils import encodeutils from oslo_utils import uuidutils import prettytable from sqlalchemy.engine import url as sqla_url from nova.cmd import common as cmd_common from nova.compute import api from nova.compute import instance_actions from nova.compute import instance_list as list_instances from nova.compute import rpcapi import nova.conf from nova.conf import utils as conf_utils from nova import config from nova import context from nova.db import constants as db_const from nova.db.main import api as db from nova.db import migration from nova import exception from nova.i18n import _ from nova.limit import local as local_limit from nova.limit import placement as placement_limit from nova.network import constants from nova.network import neutron as neutron_api from nova import objects from nova.objects import block_device as block_device_obj from nova.objects import compute_node as compute_node_obj from nova.objects import fields as obj_fields from nova.objects import host_mapping as host_mapping_obj from nova.objects import instance as instance_obj from nova.objects import instance_mapping as instance_mapping_obj from nova.objects import pci_device as pci_device_obj from nova.objects import quotas as quotas_obj from nova.objects import virtual_interface as virtual_interface_obj import nova.quota from nova import rpc from nova.scheduler.client import report from nova.scheduler import utils as scheduler_utils from nova import utils from nova import version from nova.virt.libvirt import machine_type_utils from nova.volume import cinder CONF = nova.conf.CONF LOG = logging.getLogger(__name__) # Keep this list sorted and one entry per line for readability. _EXTRA_DEFAULT_LOG_LEVELS = [ 'nova=ERROR', 'oslo_concurrency=INFO', 'oslo_db=INFO', 'oslo_policy=INFO', 'oslo.privsep=ERROR', 'os_brick=ERROR', ] # Consts indicating whether allocations need to be healed by creating them or # by updating existing allocations. _CREATE = 'create' _UPDATE = 'update' # Decorators for actions args = cmd_common.args action_description = cmd_common.action_description def mask_passwd_in_url(url): parsed = urlparse.urlparse(url) safe_netloc = re.sub(':.*@', ':****@', parsed.netloc) new_parsed = urlparse.ParseResult( parsed.scheme, safe_netloc, parsed.path, parsed.params, parsed.query, parsed.fragment) return urlparse.urlunparse(new_parsed) def format_dict(dct, dict_property="Property", dict_value='Value', sort_key=None): """Print a `dict` as a table of two columns. :param dct: `dict` to print :param dict_property: name of the first column :param dict_value: header label for the value (second) column :param sort_key: key used for sorting the dict """ pt = prettytable.PrettyTable([dict_property, dict_value]) pt.align = 'l' # starting in PrettyTable 3.4.0 we need to also set the header # as align now only applies to the data. if hasattr(pt, 'header_align'): pt.header_align = 'l' for k, v in sorted(dct.items(), key=sort_key): # convert dict to str to check length if isinstance(v, dict): v = str(v) # if value has a newline, add in multiple rows # e.g. fault with stacktrace if v and isinstance(v, str) and r'\n' in v: lines = v.strip().split(r'\n') col1 = k for line in lines: pt.add_row([col1, line]) col1 = '' else: pt.add_row([k, v]) return encodeutils.safe_encode(pt.get_string()).decode() @contextmanager def locked_instance(cell_mapping, instance, reason): """Context manager to lock and unlock instance, lock state will be restored regardless of the success or failure of target functionality. :param cell_mapping: instance-cell-mapping :param instance: instance to be lock and unlock :param reason: reason, why lock is required """ compute_api = api.API() initial_state = 'locked' if instance.locked else 'unlocked' if not instance.locked: with context.target_cell( context.get_admin_context(), cell_mapping) as cctxt: compute_api.lock(cctxt, instance, reason=reason) try: yield finally: if initial_state == 'unlocked': with context.target_cell( context.get_admin_context(), cell_mapping) as cctxt: compute_api.unlock(cctxt, instance) class DbCommands(object): """Class for managing the main database.""" # NOTE(danms): These functions are called with a DB context and a # count, which is the maximum batch size requested by the # user. They must be idempotent. At most $count records should be # migrated. The function must return a tuple of (found, done). The # found value indicates how many unmigrated/candidate records existed in # the database prior to the migration (either total, or up to the # $count limit provided), and a nonzero found value may tell the user # that there is still work to do. The done value indicates whether # or not any records were actually migrated by the function. Thus # if both (found, done) are nonzero, work was done and some work # remains. If found is nonzero and done is zero, some records are # not migratable (or don't need migrating), but all migrations that can # complete have finished. # NOTE(stephenfin): These names must be unique online_migrations = ( # Added in Pike quotas_obj.migrate_quota_limits_to_api_db, # Added in Pike quotas_obj.migrate_quota_classes_to_api_db, # Added in Queens db.migration_migrate_to_uuid, # Added in Queens block_device_obj.BlockDeviceMapping.populate_uuids, # Added in Rocky # NOTE(tssurya): This online migration is going to be backported to # Queens and Pike since instance.avz of instances before Pike # need to be populated if it was not specified during boot time. instance_obj.populate_missing_availability_zones, # Added in Rocky instance_mapping_obj.populate_queued_for_delete, # Added in Stein compute_node_obj.migrate_empty_ratio, # Added in Stein virtual_interface_obj.fill_virtual_interface_list, # Added in Stein instance_mapping_obj.populate_user_id, # Added in Victoria pci_device_obj.PciDevice.populate_dev_uuids, # Added in 2023.2 instance_obj.populate_instance_compute_id, ) @args('--local_cell', action='store_true', help='Only sync db in the local cell: do not attempt to fan-out ' 'to all cells') @args('version', metavar='VERSION', nargs='?', help='Database version') def sync(self, version=None, local_cell=False): """Sync the database up to the most recent version.""" if not local_cell: ctxt = context.RequestContext() # NOTE(mdoff): Multiple cells not yet implemented. Currently # fanout only looks for cell0. try: cell_mapping = objects.CellMapping.get_by_uuid( ctxt, objects.CellMapping.CELL0_UUID, ) with context.target_cell(ctxt, cell_mapping) as cctxt: migration.db_sync(version, context=cctxt) except exception.CellMappingNotFound: msg = _( 'WARNING: cell0 mapping not found - not syncing cell0.' ) print(msg) except Exception as e: msg = _( 'ERROR: Could not access cell0.\n' 'Has the nova_api database been created?\n' 'Has the nova_cell0 database been created?\n' 'Has "nova-manage api_db sync" been run?\n' 'Has "nova-manage cell_v2 map_cell0" been run?\n' 'Is [api_database]/connection set in nova.conf?\n' 'Is the cell0 database connection URL correct?\n' 'Error: %s' ) print(msg % str(e)) return 1 return migration.db_sync(version) def version(self): """Print the current database version.""" print(migration.db_version()) @args('--max_rows', type=int, metavar='', dest='max_rows', help='Maximum number of deleted rows to archive per table. Defaults ' 'to 1000. Note that this number is a soft limit and does not ' 'include the corresponding rows, if any, that are removed ' 'from the API database for deleted instances.') @args('--before', metavar='', help=('Archive rows that have been deleted before this date. ' 'Accepts date strings in the default format output by the ' '``date`` command, as well as ``YYYY-MM-DD [HH:mm:ss]``.')) @args('--verbose', action='store_true', dest='verbose', default=False, help='Print how many rows were archived per table.') @args('--until-complete', action='store_true', dest='until_complete', default=False, help=('Run continuously until all deleted rows are archived. Use ' 'max_rows as a batch size for each iteration.')) @args('--purge', action='store_true', dest='purge', default=False, help='Purge all data from shadow tables after archive completes') @args('--all-cells', action='store_true', dest='all_cells', default=False, help='Run command across all cells.') @args('--task-log', action='store_true', dest='task_log', default=False, help=('Also archive ``task_log`` table records. Note that ' '``task_log`` records are never deleted, so archiving them ' 'will move all of the ``task_log`` records up to now into the ' 'shadow tables. It is recommended to also specify the ' '``--before`` option to avoid races for those consuming ' '``task_log`` record data via the ' '``/os-instance_usage_audit_log`` API (example: Telemetry).')) @args('--sleep', type=int, metavar='', dest='sleep', help='The amount of time in seconds to sleep between batches when ' '``--until-complete`` is used. Defaults to 0.') def archive_deleted_rows( self, max_rows=1000, verbose=False, until_complete=False, purge=False, before=None, all_cells=False, task_log=False, sleep=0, ): """Move deleted rows from production tables to shadow tables. Returns 0 if nothing was archived, 1 if some number of rows were archived, 2 if max_rows is invalid, 3 if no connection could be established to the API DB, 4 if before date is invalid. If automating, this should be run continuously while the result is 1, stopping at 0. """ max_rows = int(max_rows) if max_rows < 0: print(_("Must supply a positive value for max_rows")) return 2 if max_rows > db_const.MAX_INT: print(_('max rows must be <= %(max_value)d') % {'max_value': db_const.MAX_INT}) return 2 ctxt = context.get_admin_context() try: # NOTE(tssurya): This check has been added to validate if the API # DB is reachable or not as this is essential for purging the # related API database records of the deleted instances. cell_mappings = objects.CellMappingList.get_all(ctxt) except db_exc.CantStartEngineError: print(_('Failed to connect to API DB so aborting this archival ' 'attempt. Please check your config file to make sure that ' '[api_database]/connection is set and run this ' 'command again.')) return 3 if before: try: before_date = dateutil_parser.parse(before, fuzzy=True) except ValueError as e: print(_('Invalid value for --before: %s') % e) return 4 else: before_date = None table_to_rows_archived = {} if until_complete and verbose: sys.stdout.write(_('Archiving') + '..') # noqa interrupt = False if all_cells: # Sort first by cell name, then by table: # +--------------------------------+-------------------------+ # | Table | Number of Rows Archived | # +--------------------------------+-------------------------+ # | cell0.block_device_mapping | 1 | # | cell1.block_device_mapping | 1 | # | cell1.instance_actions | 2 | # | cell1.instance_actions_events | 2 | # | cell2.block_device_mapping | 1 | # | cell2.instance_actions | 2 | # | cell2.instance_actions_events | 2 | # ... def sort_func(item): cell_name, table = item[0].split('.') return cell_name, table print_sort_func = sort_func else: cell_mappings = [None] print_sort_func = None total_rows_archived = 0 for cell_mapping in cell_mappings: # NOTE(Kevin_Zheng): No need to calculate limit for each # cell if until_complete=True. # We need not adjust max rows to avoid exceeding a specified total # limit because with until_complete=True, we have no total limit. if until_complete: max_rows_to_archive = max_rows elif max_rows > total_rows_archived: # We reduce the max rows to archive based on what we've # archived so far to avoid potentially exceeding the specified # total limit. max_rows_to_archive = max_rows - total_rows_archived else: break # If all_cells=False, cell_mapping is None with context.target_cell(ctxt, cell_mapping) as cctxt: cell_name = cell_mapping.name if cell_mapping else None try: rows_archived = self._do_archive( table_to_rows_archived, cctxt, max_rows_to_archive, until_complete, verbose, before_date, cell_name, task_log, sleep) except KeyboardInterrupt: interrupt = True break # TODO(melwitt): Handle skip/warn for unreachable cells. Note # that cell_mappings = [None] if not --all-cells total_rows_archived += rows_archived if until_complete and verbose: if interrupt: print('.' + _('stopped')) # noqa else: print('.' + _('complete')) # noqa if verbose: if table_to_rows_archived: print(format_dict( table_to_rows_archived, dict_property=_('Table'), dict_value=_('Number of Rows Archived'), sort_key=print_sort_func, )) else: print(_('Nothing was archived.')) if table_to_rows_archived and purge: if verbose: print(_('Rows were archived, running purge...')) self.purge(purge_all=True, verbose=verbose, all_cells=all_cells) # NOTE(danms): Return nonzero if we archived something return int(bool(table_to_rows_archived)) def _do_archive( self, table_to_rows_archived, cctxt, max_rows, until_complete, verbose, before_date, cell_name, task_log, sleep, ): """Helper function for archiving deleted rows for a cell. This will archive deleted rows for a cell database and remove the associated API database records for deleted instances. :param table_to_rows_archived: Dict tracking the number of rows archived by .. Example: {'cell0.instances': 2, 'cell1.instances': 5} :param cctxt: Cell-targeted nova.context.RequestContext if archiving across all cells :param max_rows: Maximum number of deleted rows to archive per table. Note that this number is a soft limit and does not include the corresponding rows, if any, that are removed from the API database for deleted instances. :param until_complete: Whether to run continuously until all deleted rows are archived :param verbose: Whether to print how many rows were archived per table :param before_date: Archive rows that were deleted before this date :param cell_name: Name of the cell or None if not archiving across all cells :param task_log: Whether to archive task_log table rows :param sleep: The amount of time in seconds to sleep between batches when ``until_complete`` is True. """ ctxt = context.get_admin_context() while True: # table_to_rows = {table_name: number_of_rows_archived} # deleted_instance_uuids = ['uuid1', 'uuid2', ...] table_to_rows, deleted_instance_uuids, total_rows_archived = \ db.archive_deleted_rows( cctxt, max_rows, before=before_date, task_log=task_log) for table_name, rows_archived in table_to_rows.items(): if cell_name: table_name = cell_name + '.' + table_name table_to_rows_archived.setdefault(table_name, 0) table_to_rows_archived[table_name] += rows_archived # deleted_instance_uuids does not necessarily mean that any # instances rows were archived because it is obtained by a query # separate from the archive queries. For example, if a # DBReferenceError was raised while processing the instances table, # we would have skipped the table and had 0 rows archived even # though deleted instances rows were found. instances_archived = table_to_rows.get('instances', 0) if deleted_instance_uuids and instances_archived: table_to_rows_archived.setdefault( 'API_DB.instance_mappings', 0) table_to_rows_archived.setdefault( 'API_DB.request_specs', 0) table_to_rows_archived.setdefault( 'API_DB.instance_group_member', 0) deleted_mappings = objects.InstanceMappingList.destroy_bulk( ctxt, deleted_instance_uuids) table_to_rows_archived[ 'API_DB.instance_mappings'] += deleted_mappings deleted_specs = objects.RequestSpec.destroy_bulk( ctxt, deleted_instance_uuids) table_to_rows_archived[ 'API_DB.request_specs'] += deleted_specs deleted_group_members = ( objects.InstanceGroup.destroy_members_bulk( ctxt, deleted_instance_uuids)) table_to_rows_archived[ 'API_DB.instance_group_member'] += deleted_group_members # If we're not archiving until there is nothing more to archive, we # have reached max_rows in this cell DB or there was nothing to # archive. We check the values() in case we get something like # table_to_rows = {'instances': 0} back somehow. if not until_complete or not any(table_to_rows.values()): break if verbose: sys.stdout.write('.') # Optionally sleep between batches to throttle the archiving. time.sleep(sleep) return total_rows_archived @args('--before', metavar='', dest='before', help='If specified, purge rows from shadow tables that are older ' 'than this. Accepts date strings in the default format output ' 'by the ``date`` command, as well as ``YYYY-MM-DD ' '[HH:mm:ss]``.') @args('--all', dest='purge_all', action='store_true', help='Purge all rows in the shadow tables') @args('--verbose', dest='verbose', action='store_true', default=False, help='Print information about purged records') @args('--all-cells', dest='all_cells', action='store_true', default=False, help='Run against all cell databases') def purge(self, before=None, purge_all=False, verbose=False, all_cells=False): if before is None and purge_all is False: print(_('Either --before or --all is required')) return 1 if before: try: before_date = dateutil_parser.parse(before, fuzzy=True) except ValueError as e: print(_('Invalid value for --before: %s') % e) return 2 else: before_date = None def status(msg): if verbose: print('%s: %s' % (identity, msg)) deleted = 0 admin_ctxt = context.get_admin_context() if all_cells: try: cells = objects.CellMappingList.get_all(admin_ctxt) except db_exc.DBError: print(_('Unable to get cell list from API DB. ' 'Is it configured?')) return 4 for cell in cells: identity = _('Cell %s') % cell.identity with context.target_cell(admin_ctxt, cell) as cctxt: deleted += db.purge_shadow_tables( cctxt, before_date, status_fn=status) else: identity = _('DB') deleted = db.purge_shadow_tables( admin_ctxt, before_date, status_fn=status) if deleted: return 0 else: return 3 def _run_migration(self, ctxt, max_count): ran = 0 exceptions = False migrations = {} for migration_meth in self.online_migrations: count = max_count - ran try: found, done = migration_meth(ctxt, count) except Exception: msg = (_("Error attempting to run %(method)s") % dict( method=migration_meth)) print(msg) LOG.exception(msg) exceptions = True found = done = 0 name = migration_meth.__name__ if found: print(_('%(total)i rows matched query %(meth)s, %(done)i ' 'migrated') % {'total': found, 'meth': name, 'done': done}) # This is the per-migration method result for this batch, and # _run_migration will either continue on to the next migration, # or stop if up to this point we've processed max_count of # records across all migration methods. migrations[name] = found, done if max_count is not None: ran += done if ran >= max_count: break return migrations, exceptions @args('--max-count', metavar='', dest='max_count', help='Maximum number of objects to consider') def online_data_migrations(self, max_count=None): ctxt = context.get_admin_context() if max_count is not None: try: max_count = int(max_count) except ValueError: max_count = -1 unlimited = False if max_count < 1: print(_('Must supply a positive value for max_number')) return 127 else: unlimited = True max_count = 50 print(_('Running batches of %i until complete') % max_count) ran = None migration_info = {} exceptions = False while ran is None or ran != 0: migrations, exceptions = self._run_migration(ctxt, max_count) ran = 0 # For each batch of migration method results, build the cumulative # set of results. for name in migrations: migration_info.setdefault(name, (0, 0)) migration_info[name] = ( migration_info[name][0] + migrations[name][0], migration_info[name][1] + migrations[name][1], ) ran += migrations[name][1] if not unlimited: break t = prettytable.PrettyTable([_('Migration'), _('Total Needed'), # Really: Total Found _('Completed')]) for name in sorted(migration_info.keys()): info = migration_info[name] t.add_row([name, info[0], info[1]]) print(t) # NOTE(imacdonn): In the "unlimited" case, the loop above will only # terminate when all possible migrations have been effected. If we're # still getting exceptions, there's a problem that requires # intervention. In the max-count case, exceptions are only considered # fatal if no work was done by any other migrations ("not ran"), # because otherwise work may still remain to be done, and that work # may resolve dependencies for the failing migrations. if exceptions and (unlimited or not ran): print(_("Some migrations failed unexpectedly. Check log for " "details.")) return 2 # TODO(mriedem): Potentially add another return code for # "there are more migrations, but not completable right now" return ran and 1 or 0 @args('--ironic-node-uuid', metavar='', dest='compute_node_uuid', help='UUID of Ironic node to be moved between services') @args('--destination-host', metavar='', dest='destination_service_host', help='Destination ironic nova-compute service CONF.host') def ironic_compute_node_move(self, compute_node_uuid, destination_service_host): ctxt = context.get_admin_context() destination_service = objects.Service.get_by_compute_host( ctxt, destination_service_host) if destination_service.forced_down: raise exception.NovaException( "Destination compute is forced down!") target_compute_node = objects.ComputeNode.get_by_uuid( ctxt, compute_node_uuid) source_service = objects.Service.get_by_id( ctxt, target_compute_node.service_id) if not source_service.forced_down: raise exception.NovaException( "Source service is not yet forced down!") instances = objects.InstanceList.get_by_host_and_node( ctxt, target_compute_node.host, target_compute_node.hypervisor_hostname) if len(instances) > 1: raise exception.NovaException( "Found an ironic host with more than one instance! " "Please delete all Nova instances that do not match " "the instance uuid recorded on the Ironic node.") target_compute_node.service_id = destination_service.id target_compute_node.host = destination_service.host target_compute_node.save() for instance in instances: # this is a bit like evacuate, except no need to rebuild instance.host = destination_service.host instance.save() class ApiDbCommands(object): """Class for managing the api database.""" def __init__(self): pass @args('version', metavar='VERSION', nargs='?', help='Database version') def sync(self, version=None): """Sync the database up to the most recent version.""" return migration.db_sync(version, database='api') def version(self): """Print the current database version.""" print(migration.db_version(database='api')) class CellV2Commands(object): """Commands for managing cells v2.""" def _validate_transport_url(self, transport_url, warn_about_none=True): if not transport_url: if not CONF.transport_url: if warn_about_none: print(_( 'Must specify --transport-url if ' '[DEFAULT]/transport_url is not set in the ' 'configuration file.')) return None print(_('--transport-url not provided in the command line, ' 'using the value [DEFAULT]/transport_url from the ' 'configuration file')) transport_url = CONF.transport_url try: messaging.TransportURL.parse(conf=CONF, url=objects.CellMapping.format_mq_url( transport_url)) except (messaging.InvalidTransportURL, ValueError) as e: print(_('Invalid transport URL: %s') % str(e)) return None return transport_url def _validate_database_connection( self, database_connection, warn_about_none=True): if not database_connection: if not CONF.database.connection: if warn_about_none: print(_( 'Must specify --database_connection if ' '[database]/connection is not set in the ' 'configuration file.')) return None print(_('--database_connection not provided in the command line, ' 'using the value [database]/connection from the ' 'configuration file')) return CONF.database.connection return database_connection def _non_unique_transport_url_database_connection_checker(self, ctxt, cell_mapping, transport_url, database_connection): for cell in objects.CellMappingList.get_all(ctxt): if cell_mapping and cell.uuid == cell_mapping.uuid: # If we're looking for a specific cell, then don't check # that one for same-ness to allow idempotent updates continue if (cell.database_connection == database_connection or cell.transport_url == transport_url): print(_('The specified transport_url and/or ' 'database_connection combination already exists ' 'for another cell with uuid %s.') % cell.uuid) return True return False @args('--transport-url', metavar='', dest='transport_url', help='The transport url for the cell message queue') def simple_cell_setup(self, transport_url=None): """Simple cellsv2 setup. This simplified command is for use by existing non-cells users to configure the default environment. Returns 0 if setup is completed (or has already been done) and 1 if no hosts are reporting (and this cannot be mapped). """ transport_url = self._validate_transport_url(transport_url) if not transport_url: return 1 ctxt = context.RequestContext() try: cell0_mapping = self._map_cell0() except db_exc.DBDuplicateEntry: print(_('Cell0 is already setup')) cell0_mapping = objects.CellMapping.get_by_uuid( ctxt, objects.CellMapping.CELL0_UUID) # Run migrations so cell0 is usable with context.target_cell(ctxt, cell0_mapping) as cctxt: try: migration.db_sync(None, context=cctxt) except db_exc.DBError as ex: print(_('Unable to sync cell0 schema: %s') % ex) cell_uuid = self._map_cell_and_hosts(transport_url) if cell_uuid is None: # There are no compute hosts which means no cell_mapping was # created. This should also mean that there are no instances. return 1 self.map_instances(cell_uuid) return 0 @args('--database_connection', metavar='', help='The database connection url for cell0. ' 'This is optional. If not provided, a standard database ' 'connection will be used based on the main database connection ' 'from the Nova configuration.' ) def map_cell0(self, database_connection=None): """Create a cell mapping for cell0. cell0 is used for instances that have not been scheduled to any cell. This generally applies to instances that have encountered an error before they have been scheduled. This command creates a cell mapping for this special cell which requires a database to store the instance data. Returns 0 if cell0 created successfully or already setup. """ try: self._map_cell0(database_connection=database_connection) except db_exc.DBDuplicateEntry: print(_('Cell0 is already setup')) return 0 def _map_cell0(self, database_connection=None): """Facilitate creation of a cell mapping for cell0. See map_cell0 for more. """ def cell0_default_connection(): # If no database connection is provided one is generated # based on the database connection url. # The cell0 database will use the same database scheme and # netloc as the main database, with a related path. # NOTE(sbauza): The URL has to be RFC1738 compliant in order to # be usable by sqlalchemy. connection = CONF.database.connection # sqlalchemy has a nice utility for parsing database connection # URLs so we use that here to get the db name so we don't have to # worry about parsing and splitting a URL which could have special # characters in the password, which makes parsing a nightmare. url = sqla_url.make_url(connection) url = url.set(database=url.database + '_cell0') return urlparse.unquote(url.render_as_string(hide_password=False)) dbc = database_connection or cell0_default_connection() ctxt = context.RequestContext() # A transport url of 'none://' is provided for cell0. RPC should not # be used to access cell0 objects. Cells transport switching will # ignore any 'none' transport type. cell_mapping = objects.CellMapping( ctxt, uuid=objects.CellMapping.CELL0_UUID, name="cell0", transport_url="none:///", database_connection=dbc) cell_mapping.create() return cell_mapping def _get_and_map_instances(self, ctxt, cell_mapping, limit, marker): filters = {} with context.target_cell(ctxt, cell_mapping) as cctxt: instances = objects.InstanceList.get_by_filters( cctxt.elevated(read_deleted='yes'), filters, sort_key='created_at', sort_dir='asc', limit=limit, marker=marker) for instance in instances: try: mapping = objects.InstanceMapping(ctxt) mapping.instance_uuid = instance.uuid mapping.cell_mapping = cell_mapping mapping.project_id = instance.project_id mapping.user_id = instance.user_id mapping.create() except db_exc.DBDuplicateEntry: continue if len(instances) == 0 or len(instances) < limit: # We've hit the end of the instances table marker = None else: marker = instances[-1].uuid return marker @args('--cell_uuid', metavar='', dest='cell_uuid', required=True, help='Unmigrated instances will be mapped to the cell with the ' 'uuid provided.') @args('--max-count', metavar='', dest='max_count', help='Maximum number of instances to map. If not set, all instances ' 'in the cell will be mapped in batches of 50. If you have a ' 'large number of instances, consider specifying a custom value ' 'and run the command until it exits with 0.') @args('--reset', action='store_true', dest='reset_marker', help='The command will start from the beginning as opposed to the ' 'default behavior of starting from where the last run ' 'finished') def map_instances(self, cell_uuid, max_count=None, reset_marker=None): """Map instances into the provided cell. Instances in the nova database of the provided cell (nova database info is obtained from the nova-api database) will be queried from oldest to newest and if unmapped, will be mapped to the provided cell. A max-count can be set on the number of instance to map in a single run. Repeated runs of the command will start from where the last run finished so it is not necessary to increase max-count to finish. A reset option can be passed which will reset the marker, thus making the command start from the beginning as opposed to the default behavior of starting from where the last run finished. An exit code of 0 indicates that all instances have been mapped. """ # NOTE(stephenfin): The support for batching in this command relies on # a bit of a hack. We initially process N instance-cell mappings, where # N is the value of '--max-count' if provided else 50. To ensure we # can continue from N on the next iteration, we store a instance-cell # mapping object with a special name and the UUID of the last # instance-cell mapping processed (N - 1) in munged form. On the next # iteration, we search for the special name and unmunge the UUID to # pick up where we left off. This is done until all mappings are # processed. The munging is necessary as there's a unique constraint on # the UUID field and we need something reversible. For more # information, see commit 9038738d0. if max_count is not None: try: max_count = int(max_count) except ValueError: max_count = -1 map_all = False if max_count < 1: print(_('Must supply a positive value for max-count')) return 127 else: map_all = True max_count = 50 ctxt = context.RequestContext() marker_project_id = 'INSTANCE_MIGRATION_MARKER' # Validate the cell exists, this will raise if not cell_mapping = objects.CellMapping.get_by_uuid(ctxt, cell_uuid) # Check for a marker from a previous run marker_mapping = objects.InstanceMappingList.get_by_project_id(ctxt, marker_project_id) if len(marker_mapping) == 0: marker = None else: # There should be only one here marker = marker_mapping[0].instance_uuid.replace(' ', '-') if reset_marker: marker = None marker_mapping[0].destroy() next_marker = True while next_marker is not None: next_marker = self._get_and_map_instances(ctxt, cell_mapping, max_count, marker) marker = next_marker if not map_all: break if next_marker: # Don't judge me. There's already an InstanceMapping with this UUID # so the marker needs to be non destructively modified. next_marker = next_marker.replace('-', ' ') # This is just the marker record, so set user_id to the special # marker name as well. objects.InstanceMapping(ctxt, instance_uuid=next_marker, project_id=marker_project_id, user_id=marker_project_id).create() return 1 return 0 def _map_cell_and_hosts(self, transport_url, name=None, verbose=False): ctxt = context.RequestContext() cell_mapping_uuid = cell_mapping = None # First, try to detect if a CellMapping has already been created compute_nodes = objects.ComputeNodeList.get_all(ctxt) if not compute_nodes: print(_('No hosts found to map to cell, exiting.')) return None missing_nodes = set() for compute_node in compute_nodes: try: host_mapping = objects.HostMapping.get_by_host( ctxt, compute_node.host) except exception.HostMappingNotFound: missing_nodes.add(compute_node.host) else: if verbose: print(_( 'Host %(host)s is already mapped to cell %(uuid)s' ) % {'host': host_mapping.host, 'uuid': host_mapping.cell_mapping.uuid}) # Reusing the existing UUID in case there is already a mapping # NOTE(sbauza): There could be possibly multiple CellMappings # if the operator provides another configuration file and moves # the hosts to another cell v2, but that's not really something # we should support. cell_mapping_uuid = host_mapping.cell_mapping.uuid if not missing_nodes: print(_('All hosts are already mapped to cell(s).')) return cell_mapping_uuid # Create the cell mapping in the API database if cell_mapping_uuid is not None: cell_mapping = objects.CellMapping.get_by_uuid( ctxt, cell_mapping_uuid) if cell_mapping is None: cell_mapping_uuid = uuidutils.generate_uuid() cell_mapping = objects.CellMapping( ctxt, uuid=cell_mapping_uuid, name=name, transport_url=transport_url, database_connection=CONF.database.connection) cell_mapping.create() # Pull the hosts from the cell database and create the host mappings for compute_host in missing_nodes: host_mapping = objects.HostMapping( ctxt, host=compute_host, cell_mapping=cell_mapping) host_mapping.create() if verbose: print(cell_mapping_uuid) return cell_mapping_uuid @args('--transport-url', metavar='', dest='transport_url', help='The transport url for the cell message queue') @args('--name', metavar='', help='The name of the cell') @args('--verbose', action='store_true', help='Output the cell mapping uuid for any newly mapped hosts.') def map_cell_and_hosts(self, transport_url=None, name=None, verbose=False): """EXPERIMENTAL. Create a cell mapping and host mappings for a cell. Users not dividing their cloud into multiple cells will be a single cell v2 deployment and should specify: nova-manage cell_v2 map_cell_and_hosts --config-file Users running multiple cells can add a cell v2 by specifying: nova-manage cell_v2 map_cell_and_hosts --config-file """ transport_url = self._validate_transport_url(transport_url) if not transport_url: return 1 self._map_cell_and_hosts(transport_url, name, verbose) # online_data_migrations established a pattern of 0 meaning everything # is done, 1 means run again to do more work. This command doesn't do # partial work so 0 is appropriate. return 0 @args('--uuid', metavar='', dest='uuid', required=True, help=_('The instance UUID to verify')) @args('--quiet', action='store_true', dest='quiet', help=_('Do not print anything')) def verify_instance(self, uuid, quiet=False): """Verify instance mapping to a cell. This command is useful to determine if the cellsv2 environment is properly setup, specifically in terms of the cell, host, and instance mapping records required. This prints one of three strings (and exits with a code) indicating whether the instance is successfully mapped to a cell (0), is unmapped due to an incomplete upgrade (1), unmapped due to normally transient state (2), it is a deleted instance which has instance mapping (3), or it is an archived instance which still has an instance mapping (4). """ def say(string): if not quiet: print(string) ctxt = context.get_admin_context() try: mapping = objects.InstanceMapping.get_by_instance_uuid( ctxt, uuid) except exception.InstanceMappingNotFound: say('Instance %s is not mapped to a cell ' '(upgrade is incomplete) or instance ' 'does not exist' % uuid) return 1 if mapping.cell_mapping is None: say('Instance %s is not mapped to a cell' % uuid) return 2 else: with context.target_cell(ctxt, mapping.cell_mapping) as cctxt: try: instance = objects.Instance.get_by_uuid(cctxt, uuid) except exception.InstanceNotFound: try: el_ctx = cctxt.elevated(read_deleted='yes') instance = objects.Instance.get_by_uuid(el_ctx, uuid) # instance is deleted if instance: say('The instance with uuid %s has been deleted.' % uuid) say('Execute ' '`nova-manage db archive_deleted_rows` ' 'command to archive this deleted ' 'instance and remove its instance_mapping.') return 3 except exception.InstanceNotFound: # instance is archived say('The instance with uuid %s has been archived.' % uuid) say('However its instance_mapping remains.') return 4 # instance is alive and mapped to a cell say('Instance %s is in cell: %s (%s)' % ( uuid, mapping.cell_mapping.name, mapping.cell_mapping.uuid)) return 0 @args('--cell_uuid', metavar='', dest='cell_uuid', help='If provided only this cell will be searched for new hosts to ' 'map.') @args('--verbose', action='store_true', help=_('Provide detailed output when discovering hosts.')) @args('--strict', action='store_true', help=_('Considered successful (exit code 0) only when an unmapped ' 'host is discovered. Any other outcome will be considered a ' 'failure (non-zero exit code).')) @args('--by-service', action='store_true', default=False, dest='by_service', help=_('Discover hosts by service instead of compute node')) def discover_hosts(self, cell_uuid=None, verbose=False, strict=False, by_service=False): """Searches cells, or a single cell, and maps found hosts. When a new host is added to a deployment it will add a service entry to the db it's configured to use. This command will check the db for each cell, or a single one if passed in, and map any hosts which are not currently mapped. If a host is already mapped nothing will be done. This command should be run once after all compute hosts have been deployed and should not be run in parallel. When run in parallel, the commands will collide with each other trying to map the same hosts in the database at the same time. """ def status_fn(msg): if verbose: print(msg) ctxt = context.RequestContext() try: hosts = host_mapping_obj.discover_hosts(ctxt, cell_uuid, status_fn, by_service) except exception.HostMappingExists as exp: print(_('ERROR: Duplicate host mapping was encountered. This ' 'command should be run once after all compute hosts have ' 'been deployed and should not be run in parallel. When ' 'run in parallel, the commands will collide with each ' 'other trying to map the same hosts in the database at ' 'the same time. Error: %s') % exp) return 2 # discover_hosts will return an empty list if no hosts are discovered if strict: return int(not hosts) @action_description( _("Add a new cell to nova API database. " "DB and MQ urls can be provided directly " "or can be taken from config. The result is cell uuid.")) @args('--name', metavar='', help=_('The name of the cell')) @args('--database_connection', metavar='', dest='database_connection', help=_('The database url for the cell database')) @args('--transport-url', metavar='', dest='transport_url', help=_('The transport url for the cell message queue')) @args('--verbose', action='store_true', help=_('Output the uuid of the created cell')) @args('--disabled', action='store_true', help=_('To create a pre-disabled cell.')) def create_cell(self, name=None, database_connection=None, transport_url=None, verbose=False, disabled=False): ctxt = context.get_context() transport_url = self._validate_transport_url(transport_url) if not transport_url: return 1 database_connection = self._validate_database_connection( database_connection) if not database_connection: return 1 if (self._non_unique_transport_url_database_connection_checker(ctxt, None, transport_url, database_connection)): return 2 cell_mapping_uuid = uuidutils.generate_uuid() cell_mapping = objects.CellMapping( ctxt, uuid=cell_mapping_uuid, name=name, transport_url=transport_url, database_connection=database_connection, disabled=disabled) cell_mapping.create() if verbose: print(cell_mapping_uuid) return 0 @args('--verbose', action='store_true', help=_('Show sensitive details, such as passwords')) def list_cells(self, verbose=False): """Lists the v2 cells in the deployment. By default the cell name, uuid, disabled state, masked transport URL and database connection details are shown. Use the --verbose option to see transport URL and database connection with their sensitive details. """ cell_mappings = objects.CellMappingList.get_all( context.get_admin_context()) field_names = [_('Name'), _('UUID'), _('Transport URL'), _('Database Connection'), _('Disabled')] t = prettytable.PrettyTable(field_names) for cell in sorted(cell_mappings, # CellMapping.name is optional key=lambda _cell: _cell.name or ''): fields = [cell.name or '', cell.uuid] if verbose: fields.extend([cell.transport_url, cell.database_connection]) else: fields.extend([ mask_passwd_in_url(cell.transport_url), mask_passwd_in_url(cell.database_connection)]) fields.extend([cell.disabled]) t.add_row(fields) print(t) return 0 @args('--force', action='store_true', default=False, help=_('Delete hosts and instance_mappings that belong ' 'to the cell as well.')) @args('--cell_uuid', metavar='', dest='cell_uuid', required=True, help=_('The uuid of the cell to delete.')) def delete_cell(self, cell_uuid, force=False): """Delete an empty cell by the given uuid. This command will return a non-zero exit code in the following cases. * The cell is not found by uuid. * It has hosts and force is False. * It has instance mappings and force is False. If force is True and the cell has hosts and/or instance_mappings, they are deleted as well (as long as there are no living instances). Returns 0 in the following cases. * The empty cell is found and deleted successfully. * The cell has hosts and force is True then the cell, hosts and instance_mappings are deleted successfully; if there are no living instances. """ ctxt = context.get_admin_context() # Find the CellMapping given the uuid. try: cell_mapping = objects.CellMapping.get_by_uuid(ctxt, cell_uuid) except exception.CellMappingNotFound: print(_('Cell with uuid %s was not found.') % cell_uuid) return 1 # Check to see if there are any HostMappings for this cell. host_mappings = objects.HostMappingList.get_by_cell_id( ctxt, cell_mapping.id) nodes = [] if host_mappings: if not force: print(_('There are existing hosts mapped to cell with uuid ' '%s.') % cell_uuid) return 2 # We query for the compute nodes in the cell, # so that they can be unmapped. with context.target_cell(ctxt, cell_mapping) as cctxt: nodes = objects.ComputeNodeList.get_all(cctxt) # Check to see if there are any InstanceMappings for this cell. instance_mappings = objects.InstanceMappingList.get_by_cell_id( ctxt, cell_mapping.id) if instance_mappings: with context.target_cell(ctxt, cell_mapping) as cctxt: instances = objects.InstanceList.get_all(cctxt) if instances: # There are instances in the cell. print(_('There are existing instances mapped to cell with ' 'uuid %s.') % cell_uuid) return 3 else: if not force: # There are no instances in the cell but the records remain # in the 'instance_mappings' table. print(_("There are instance mappings to cell with uuid " "%s, but all instances have been deleted " "in the cell.") % cell_uuid) print(_("So execute 'nova-manage db archive_deleted_rows' " "to delete the instance mappings.")) return 4 # Delete instance_mappings of the deleted instances for instance_mapping in instance_mappings: instance_mapping.destroy() # Unmap the compute nodes so that they can be discovered # again in future, if needed. for node in nodes: node.mapped = 0 node.save() # Delete hosts mapped to the cell. for host_mapping in host_mappings: host_mapping.destroy() # There are no hosts or instances mapped to the cell so delete it. cell_mapping.destroy() return 0 @args('--cell_uuid', metavar='', dest='cell_uuid', required=True, help=_('The uuid of the cell to update.')) @args('--name', metavar='', dest='name', help=_('Set the cell name.')) @args('--transport-url', metavar='', dest='transport_url', help=_('Set the cell transport_url. NOTE that running nodes ' 'will not see the change until restart!')) @args('--database_connection', metavar='', dest='db_connection', help=_('Set the cell database_connection. NOTE that running nodes ' 'will not see the change until restart!')) @args('--disable', action='store_true', dest='disable', help=_('Disables the cell. Note that the scheduling will be blocked ' 'to this cell until its enabled and followed by a SIGHUP of ' 'nova-scheduler service.')) @args('--enable', action='store_true', dest='enable', help=_('Enables the cell. Note that this makes a disabled cell ' 'available for scheduling after a SIGHUP of the ' 'nova-scheduler service')) def update_cell(self, cell_uuid, name=None, transport_url=None, db_connection=None, disable=False, enable=False): """Updates the properties of a cell by the given uuid. If the cell is not found by uuid, this command will return an exit code of 1. If the provided transport_url or/and database_connection is/are same as another cell, this command will return an exit code of 3. If the properties cannot be set, this will return 2. If an attempt is made to disable and enable a cell at the same time, this command will exit with a return code of 4. If an attempt is made to disable or enable cell0 this command will exit with a return code of 5. Otherwise, the exit code will be 0. NOTE: Updating the transport_url or database_connection fields on a running system will NOT result in all nodes immediately using the new values. Use caution when changing these values. NOTE (tssurya): The scheduler will not notice that a cell has been enabled/disabled until it is restarted or sent the SIGHUP signal. """ ctxt = context.get_admin_context() try: cell_mapping = objects.CellMapping.get_by_uuid(ctxt, cell_uuid) except exception.CellMappingNotFound: print(_('Cell with uuid %s was not found.') % cell_uuid) return 1 if name: cell_mapping.name = name # Having empty transport_url and db_connection means leaving the # existing values transport_url = self._validate_transport_url( transport_url, warn_about_none=False) db_connection = self._validate_database_connection( db_connection, warn_about_none=False) if (self._non_unique_transport_url_database_connection_checker(ctxt, cell_mapping, transport_url, db_connection)): # We use the return code 3 before 2 to avoid changing the # semantic meanings of return codes. return 3 if transport_url: cell_mapping.transport_url = transport_url if db_connection: cell_mapping.database_connection = db_connection if disable and enable: print(_('Cell cannot be disabled and enabled at the same time.')) return 4 if disable or enable: if cell_mapping.is_cell0(): print(_('Cell0 cannot be disabled.')) return 5 elif disable and not cell_mapping.disabled: cell_mapping.disabled = True elif enable and cell_mapping.disabled: cell_mapping.disabled = False elif disable and cell_mapping.disabled: print(_('Cell %s is already disabled') % cell_uuid) elif enable and not cell_mapping.disabled: print(_('Cell %s is already enabled') % cell_uuid) try: cell_mapping.save() except Exception as e: print(_('Unable to update CellMapping: %s') % e) return 2 return 0 @args('--cell_uuid', metavar='', dest='cell_uuid', help=_('The uuid of the cell.')) def list_hosts(self, cell_uuid=None): """Lists the hosts in one or all v2 cells.""" ctxt = context.get_admin_context() if cell_uuid: # Find the CellMapping given the uuid. try: cell_mapping = objects.CellMapping.get_by_uuid(ctxt, cell_uuid) except exception.CellMappingNotFound: print(_('Cell with uuid %s was not found.') % cell_uuid) return 1 host_mappings = objects.HostMappingList.get_by_cell_id( ctxt, cell_mapping.id) else: host_mappings = objects.HostMappingList.get_all(ctxt) field_names = [_('Cell Name'), _('Cell UUID'), _('Hostname')] t = prettytable.PrettyTable(field_names) for host in sorted(host_mappings, key=lambda _host: _host.host): fields = [host.cell_mapping.name, host.cell_mapping.uuid, host.host] t.add_row(fields) print(t) return 0 @args('--cell_uuid', metavar='', dest='cell_uuid', required=True, help=_('The uuid of the cell.')) @args('--host', metavar='', dest='host', required=True, help=_('The host to delete.')) def delete_host(self, cell_uuid, host): """Delete a host in a cell (host mappings) by the given host name This command will return a non-zero exit code in the following cases. * The cell is not found by uuid. * The host is not found by host name. * The host is not in the cell. * The host has instances. Returns 0 if the host is deleted successfully. NOTE: The scheduler caches host-to-cell mapping information so when deleting a host the scheduler may need to be restarted or sent the SIGHUP signal. """ ctxt = context.get_admin_context() # Find the CellMapping given the uuid. try: cell_mapping = objects.CellMapping.get_by_uuid(ctxt, cell_uuid) except exception.CellMappingNotFound: print(_('Cell with uuid %s was not found.') % cell_uuid) return 1 try: host_mapping = objects.HostMapping.get_by_host(ctxt, host) except exception.HostMappingNotFound: print(_('The host %s was not found.') % host) return 2 if host_mapping.cell_mapping.uuid != cell_mapping.uuid: print(_('The host %(host)s was not found ' 'in the cell %(cell_uuid)s.') % {'host': host, 'cell_uuid': cell_uuid}) return 3 with context.target_cell(ctxt, cell_mapping) as cctxt: instances = objects.InstanceList.get_by_host(cctxt, host) try: nodes = objects.ComputeNodeList.get_all_by_host(cctxt, host) except exception.ComputeHostNotFound: nodes = [] if instances: print(_('There are instances on the host %s.') % host) return 4 for node in nodes: node.mapped = 0 node.save() host_mapping.destroy() return 0 class PlacementCommands(object): """Commands for managing placement resources.""" @staticmethod def _get_compute_node_uuid(ctxt, instance, node_cache): """Find the ComputeNode.uuid for the given Instance :param ctxt: cell-targeted nova.context.RequestContext :param instance: the instance to lookup a compute node :param node_cache: dict of Instance.node keys to ComputeNode.uuid values; this cache is updated if a new node is processed. :returns: ComputeNode.uuid for the given instance :raises: nova.exception.ComputeHostNotFound """ if instance.node in node_cache: return node_cache[instance.node] compute_node = objects.ComputeNode.get_by_host_and_nodename( ctxt, instance.host, instance.node) node_uuid = compute_node.uuid node_cache[instance.node] = node_uuid return node_uuid @staticmethod def _get_ports(ctxt, instance, neutron): """Return the ports that are bound to the instance :param ctxt: nova.context.RequestContext :param instance: the instance to return the ports for :param neutron: nova.network.neutron.ClientWrapper to communicate with Neutron :return: a list of neutron port dict objects :raise UnableToQueryPorts: If the neutron list ports query fails. """ try: return neutron.list_ports( ctxt, device_id=instance.uuid, fields=['id', constants.RESOURCE_REQUEST, constants.BINDING_PROFILE] )['ports'] except neutron_client_exc.NeutronClientException as e: raise exception.UnableToQueryPorts( instance_uuid=instance.uuid, error=str(e)) @staticmethod def _has_request_but_no_allocation(port, neutron): has_res_req = neutron_api.API()._has_resource_request( context.get_admin_context(), port, neutron) binding_profile = neutron_api.get_binding_profile(port) allocation = binding_profile.get(constants.ALLOCATION) return has_res_req and not allocation @staticmethod def _merge_allocations(alloc1, alloc2): """Return a new allocation dict that contains the sum of alloc1 and alloc2. :param alloc1: a dict in the form of { : {'resources': {: amount, : amount}, : {'resources': {: amount}, } :param alloc2: a dict in the same form as alloc1 :return: the merged allocation of alloc1 and alloc2 in the same format """ allocations = collections.defaultdict( lambda: {'resources': collections.defaultdict(int)}) for alloc in [alloc1, alloc2]: for rp_uuid in alloc: for rc, amount in alloc[rp_uuid]['resources'].items(): allocations[rp_uuid]['resources'][rc] += amount return allocations @staticmethod def _get_resource_request_from_ports( ctxt: context.RequestContext, ports: ty.List[ty.Dict[str, ty.Any]] ) -> ty.Tuple[ ty.Dict[str, ty.List['objects.RequestGroup']], 'objects.RequestLevelParams']: """Collect RequestGroups and RequestLevelParams for all ports :param ctxt: the request context :param ports: a list of port dicts :returns: A two tuple where the first item is a dict mapping port uuids to a list of request groups coming from that port, the second item is a combined RequestLevelParams object from all ports. """ groups = {} request_level_params = objects.RequestLevelParams() extended_res_req = ( neutron_api.API().has_extended_resource_request_extension( ctxt) ) for port in ports: resource_request = port.get(constants.RESOURCE_REQUEST) if extended_res_req: groups[port['id']] = ( objects.RequestGroup.from_extended_port_request( ctxt, resource_request ) ) request_level_params.extend_with( objects.RequestLevelParams.from_port_request( resource_request ) ) else: # This is the legacy format, only one group per port and no # request level param support # TODO(gibi): remove this path once the extended resource # request extension is mandatory in neutron groups[port['id']] = [ objects.RequestGroup.from_port_request( ctxt, port['id'], resource_request ) ] return groups, request_level_params @staticmethod def _get_port_binding_profile_allocation( ctxt: context.RequestContext, neutron: neutron_api.ClientWrapper, port: ty.Dict[str, ty.Any], request_groups: ty.List['objects.RequestGroup'], resource_provider_mapping: ty.Dict[str, ty.List[str]], ) -> ty.Dict[str, str]: """Generate the value of the allocation key of the port binding profile based on the provider mapping returned from placement :param ctxt: the request context :param neutron: the neutron client :param port: the port dict from neutron :param request_groups: the list of RequestGroups object generated from the port resource request :param resource_provider_mapping: The dict of request group to resource provider mapping returned by the Placement allocation candidate query :returns: a dict mapping request group ids to resource provider uuids in the form as Neutron expects in the port binding profile. """ if neutron_api.API().has_extended_resource_request_extension( ctxt, neutron ): # The extended resource request format also means that a # port has more than a one request groups. # Each request group id from the port needs to be mapped to # a single provider id from the provider mappings. Each # group from the port is mapped to a numbered request group # in placement so we can assume that they are mapped to # a single provider and therefore the provider mapping list # has a single provider id. allocation = { group.requester_id: resource_provider_mapping[ group.requester_id][0] for group in request_groups } else: # This is the legacy resource request format where a port # is mapped to a single request group # NOTE(gibi): In the resource provider mapping there can be # more than one RP fulfilling a request group. But resource # requests of a Neutron port is always mapped to a # numbered request group that is always fulfilled by one # resource provider. So we only pass that single RP UUID # here. allocation = resource_provider_mapping[ port['id']][0] return allocation def _get_port_allocations_to_heal( self, ctxt, instance, node_cache, placement, neutron, output): """Return the needed extra allocation for the ports of the instance. :param ctxt: nova.context.RequestContext :param instance: instance to get the port allocations for :param node_cache: dict of Instance.node keys to ComputeNode.uuid values; this cache is updated if a new node is processed. :param placement: nova.scheduler.client.report.SchedulerReportClient to communicate with the Placement service API. :param neutron: nova.network.neutron.ClientWrapper to communicate with Neutron :param output: function that takes a single message for verbose output :raise UnableToQueryPorts: If the neutron list ports query fails. :raise nova.exception.ComputeHostNotFound: if compute node of the instance not found in the db. :raise PlacementAPIConnectFailure: if placement API cannot be reached :raise AllocationUpdateFailed: if there is either no allocation candidate returned from placement for the missing port allocations or there are more than one candidates making the healing ambiguous. :return: A two tuple where the first item is a dict of resources keyed by RP uuid to be included in the instance allocation dict. The second item is a list of port dicts to be updated in Neutron. """ # We need to heal port allocations for ports that have resource_request # but do not have an RP uuid in the binding:profile.allocation field. # We cannot use the instance info_cache to check the binding profile # as this code needs to be able to handle ports that were attached # before nova in stein started updating the allocation key in the # binding:profile. # In theory a port can be assigned to an instance without it being # bound to any host (e.g. in case of shelve offload) but # _heal_allocations_for_instance() already filters out instances that # are not on any host. ports_to_heal = [ port for port in self._get_ports(ctxt, instance, neutron) if self._has_request_but_no_allocation(port, neutron)] if not ports_to_heal: # nothing to do, return early return {}, [] node_uuid = self._get_compute_node_uuid( ctxt, instance, node_cache) # NOTE(gibi): We need to handle both legacy and extended resource # request. So we need to handle ports with multiple request groups # allocating from multiple providers. # The logic what we follow here is pretty similar to the logic # implemented in ComputeManager._allocate_port_resource_for_instance # for the interface attach case. We just apply it to more then one # ports here. request_groups_per_port, req_lvl_params = ( self._get_resource_request_from_ports(ctxt, ports_to_heal) ) # flatten the list of list of groups request_groups = [ group for groups in request_groups_per_port.values() for group in groups ] # we can have multiple request groups, it would be enough to restrict # only one of them to the compute tree but for symmetry we restrict # all of them for request_group in request_groups: request_group.in_tree = node_uuid # If there are multiple groups then the group_policy is mandatory in # the allocation candidate query. We can assume that if this instance # booted successfully then we have the policy in the flavor. If there # is only one group and therefore no policy then the value of the # policy in the allocation candidate query is ignored, so we simply # default it here. group_policy = instance.flavor.extra_specs.get("group_policy", "none") rr = scheduler_utils.ResourceRequest.from_request_groups( request_groups, req_lvl_params, group_policy) res = placement.get_allocation_candidates(ctxt, rr) # NOTE(gibi): the get_allocation_candidates method has the # @safe_connect decorator applied. Such decorator will return None # if the connection to Placement is failed. So we raise an exception # here. The case when Placement successfully return a response, even # if it is a negative or empty response, the method will return a three # tuple. That case is handled couple of lines below. if not res: raise exception.PlacementAPIConnectFailure() alloc_reqs, __, __ = res if not alloc_reqs: port_ids = [port['id'] for port in ports_to_heal] raise exception.AllocationUpdateFailed( consumer_uuid=instance.uuid, error=f'Placement returned no allocation candidate to fulfill ' f'the resource request of the port(s) {port_ids}' ) if len(alloc_reqs) > 1: # If there is more than one candidates then it is an ambiguous # situation that we cannot handle here because selecting the right # one might need extra information from the compute node. For # example which PCI PF the VF is allocated from and which RP # represents that PCI PF in placement. # TODO(gibi): One way to get that missing information to resolve # ambiguity would be to load up the InstancePciRequest objects and # try to use the parent_if_name in their spec to find the proper # candidate that allocates for the same port from the PF RP that # has the same name. port_ids = [port['id'] for port in ports_to_heal] raise exception.AllocationUpdateFailed( consumer_uuid=instance.uuid, error=f'Placement returned more than one possible allocation ' f'candidates to fulfill the resource request of the ' f'port(s) {port_ids}. This script does not have enough ' f'information to select the proper candidate to heal the' f'missing allocations. A possible way to heal the' f'allocation of this instance is to migrate it to ' f'another compute as the migration process re-creates ' f'the full allocation on the target host.' ) # so we have one candidate, lets use that to get the needed allocations # and the provider mapping for the ports' binding profile alloc_req = alloc_reqs[0] allocations = alloc_req["allocations"] provider_mappings = alloc_req["mappings"] for port in ports_to_heal: # We also need to record the RPs we are allocated from in the # port. This will be sent back to Neutron before the allocation # is updated in placement profile_allocation = self._get_port_binding_profile_allocation( ctxt, neutron, port, request_groups_per_port[port['id']], provider_mappings ) binding_profile = neutron_api.get_binding_profile(port) binding_profile[constants.ALLOCATION] = profile_allocation port[constants.BINDING_PROFILE] = binding_profile output(_( "Found a request group : resource provider mapping " "%(mapping)s for the port %(port_uuid)s with resource request " "%(request)s attached to the instance %(instance_uuid)s") % {"mapping": profile_allocation, "port_uuid": port['id'], "request": port.get(constants.RESOURCE_REQUEST), "instance_uuid": instance.uuid} ) return allocations, ports_to_heal def _update_ports(self, neutron, ports_to_update, output): succeeded = [] try: for port in ports_to_update: profile = neutron_api.get_binding_profile(port) body = { 'port': { constants.BINDING_PROFILE: profile } } output( _('Updating port %(port_uuid)s with attributes ' '%(attributes)s') % {'port_uuid': port['id'], 'attributes': body['port']}) neutron.update_port(port['id'], body=body) succeeded.append(port) except neutron_client_exc.NeutronClientException as e: output( _('Updating port %(port_uuid)s failed: %(error)s') % {'port_uuid': port['id'], 'error': str(e)}) # one of the port updates failed. We need to roll back the updates # that succeeded before self._rollback_port_updates(neutron, succeeded, output) # we failed to heal so we need to stop but we successfully rolled # back the partial updates so the admin can retry the healing. raise exception.UnableToUpdatePorts(error=str(e)) @staticmethod def _rollback_port_updates(neutron, ports_to_rollback, output): # _update_ports() added the allocation key to these ports, so we need # to remove them during the rollback. manual_rollback_needed = [] last_exc = None for port in ports_to_rollback: profile = neutron_api.get_binding_profile(port) profile.pop(constants.ALLOCATION) body = { 'port': { constants.BINDING_PROFILE: profile } } try: output(_('Rolling back port update for %(port_uuid)s') % {'port_uuid': port['id']}) neutron.update_port(port['id'], body=body) except neutron_client_exc.NeutronClientException as e: output( _('Rolling back update for port %(port_uuid)s failed: ' '%(error)s') % {'port_uuid': port['id'], 'error': str(e)}) # TODO(gibi): We could implement a retry mechanism with # back off. manual_rollback_needed.append(port['id']) last_exc = e if manual_rollback_needed: # At least one of the port operation failed so we failed to roll # back. There are partial updates in neutron. Human intervention # needed. raise exception.UnableToRollbackPortUpdates( error=str(last_exc), port_uuids=manual_rollback_needed) def _heal_missing_alloc(self, ctxt, instance, node_cache): node_uuid = self._get_compute_node_uuid( ctxt, instance, node_cache) # Now get the resource allocations for the instance based # on its embedded flavor. resources = scheduler_utils.resources_from_flavor( instance, instance.flavor) payload = { 'allocations': { node_uuid: {'resources': resources}, }, 'project_id': instance.project_id, 'user_id': instance.user_id, 'consumer_generation': None } return payload def _heal_missing_project_and_user_id(self, allocations, instance): allocations['project_id'] = instance.project_id allocations['user_id'] = instance.user_id return allocations @staticmethod def ensure_instance_has_no_vgpu_request(instance): if instance.flavor.extra_specs.get("resources:VGPU"): raise exception.HealvGPUAllocationNotSupported( instance_uuid=instance.uuid) @staticmethod def ensure_instance_has_no_cyborg_device_profile_request(instance): if instance.flavor.extra_specs.get("accel:device_profile"): raise exception.HealDeviceProfileAllocationNotSupported( instance_uuid=instance.uuid) def _heal_allocations_for_instance(self, ctxt, instance, node_cache, output, placement, dry_run, heal_port_allocations, neutron, force): """Checks the given instance to see if it needs allocation healing :param ctxt: cell-targeted nova.context.RequestContext :param instance: the instance to check for allocation healing :param node_cache: dict of Instance.node keys to ComputeNode.uuid values; this cache is updated if a new node is processed. :param output: function that takes a single message for verbose output :param placement: nova.scheduler.client.report.SchedulerReportClient to communicate with the Placement service API. :param dry_run: Process instances and print output but do not commit any changes. :param heal_port_allocations: True if healing port allocation is requested, False otherwise. :param neutron: nova.network.neutron.ClientWrapper to communicate with Neutron :param force: True if force healing is requested for particular instance, False otherwise. :return: True if allocations were created or updated for the instance, None if nothing needed to be done :raises: nova.exception.ComputeHostNotFound if a compute node for a given instance cannot be found :raises: AllocationCreateFailed if unable to create allocations for a given instance against a given compute node resource provider :raises: AllocationUpdateFailed if unable to update allocations for a given instance with consumer project/user information :raise UnableToQueryPorts: If the neutron list ports query fails. :raise PlacementAPIConnectFailure: if placement API cannot be reached :raise UnableToUpdatePorts: if a port update failed in neutron but any partial update was rolled back successfully. :raise UnableToRollbackPortUpdates: if a port update failed in neutron and the rollback of the partial updates also failed. """ if instance.task_state is not None: output(_('Instance %(instance)s is undergoing a task ' 'state transition: %(task_state)s') % {'instance': instance.uuid, 'task_state': instance.task_state}) return if instance.node is None: output(_('Instance %s is not on a host.') % instance.uuid) return self.ensure_instance_has_no_vgpu_request(instance) self.ensure_instance_has_no_cyborg_device_profile_request(instance) try: allocations = placement.get_allocs_for_consumer( ctxt, instance.uuid) except (ks_exc.ClientException, exception.ConsumerAllocationRetrievalFailed) as e: raise exception.AllocationUpdateFailed( consumer_uuid=instance.uuid, error=_("Allocation retrieval failed: %s") % e) need_healing = False # Placement response can have an empty {'allocations': {}} in it if # there are no allocations for the instance if not allocations.get('allocations'): # This instance doesn't have allocations need_healing = _CREATE allocations = self._heal_missing_alloc(ctxt, instance, node_cache) if (allocations.get('project_id') != instance.project_id or allocations.get('user_id') != instance.user_id): # We have an instance with allocations but not the correct # project_id/user_id, so we want to update the allocations # and re-put them. We don't use put_allocations here # because we don't want to mess up shared or nested # provider allocations. need_healing = _UPDATE allocations = self._heal_missing_project_and_user_id( allocations, instance) if force: output(_('Force flag passed for instance %s') % instance.uuid) need_healing = _UPDATE # get default allocations alloc = self._heal_missing_alloc(ctxt, instance, node_cache) # set consumer generation of existing allocations alloc["consumer_generation"] = allocations["consumer_generation"] # set allocations allocations = alloc if heal_port_allocations: to_heal = self._get_port_allocations_to_heal( ctxt, instance, node_cache, placement, neutron, output) port_allocations, ports_to_update = to_heal else: port_allocations, ports_to_update = {}, [] if port_allocations: need_healing = need_healing or _UPDATE # Merge in any missing port allocations allocations['allocations'] = self._merge_allocations( allocations['allocations'], port_allocations) if need_healing: if dry_run: # json dump the allocation dict as it contains nested default # dicts that is pretty hard to read in the verbose output alloc = jsonutils.dumps(allocations) if need_healing == _CREATE: output(_('[dry-run] Create allocations for instance ' '%(instance)s: %(allocations)s') % {'instance': instance.uuid, 'allocations': alloc}) elif need_healing == _UPDATE: output(_('[dry-run] Update allocations for instance ' '%(instance)s: %(allocations)s') % {'instance': instance.uuid, 'allocations': alloc}) else: # First update ports in neutron. If any of those operations # fail, then roll back the successful part of it and fail the # healing. We do this first because rolling back the port # updates is more straight-forward than rolling back allocation # changes. self._update_ports(neutron, ports_to_update, output) # Now that neutron update succeeded we can try to update # placement. If it fails we need to rollback every neutron port # update done before. resp = placement.put_allocations(ctxt, instance.uuid, allocations) if resp: if need_healing == _CREATE: output(_('Successfully created allocations for ' 'instance %(instance)s.') % {'instance': instance.uuid}) elif need_healing == _UPDATE: output(_('Successfully updated allocations for ' 'instance %(instance)s.') % {'instance': instance.uuid}) return True else: # Rollback every neutron update. If we succeed to # roll back then it is safe to stop here and let the admin # retry. If the rollback fails then # _rollback_port_updates() will raise another exception # that instructs the operator how to clean up manually # before the healing can be retried self._rollback_port_updates( neutron, ports_to_update, output) raise exception.AllocationUpdateFailed( consumer_uuid=instance.uuid, error='') else: output(_('The allocation of instance %s is up-to-date. ' 'Nothing to be healed.') % instance.uuid) return def _heal_instances_in_cell(self, ctxt, max_count, unlimited, output, placement, dry_run, instance_uuid, heal_port_allocations, neutron, force): """Checks for instances to heal in a given cell. :param ctxt: cell-targeted nova.context.RequestContext :param max_count: batch size (limit per instance query) :param unlimited: True if all instances in the cell should be processed, else False to just process $max_count instances :param output: function that takes a single message for verbose output :param placement: nova.scheduler.client.report.SchedulerReportClient to communicate with the Placement service API. :param dry_run: Process instances and print output but do not commit any changes. :param instance_uuid: UUID of a specific instance to process. :param heal_port_allocations: True if healing port allocation is requested, False otherwise. :param neutron: nova.network.neutron.ClientWrapper to communicate with Neutron :param force: True if force healing is requested for particular instance, False otherwise. :return: Number of instances that had allocations created. :raises: nova.exception.ComputeHostNotFound if a compute node for a given instance cannot be found :raises: AllocationCreateFailed if unable to create allocations for a given instance against a given compute node resource provider :raises: AllocationUpdateFailed if unable to update allocations for a given instance with consumer project/user information :raise UnableToQueryPorts: If the neutron list ports query fails. :raise PlacementAPIConnectFailure: if placement API cannot be reached :raise UnableToUpdatePorts: if a port update failed in neutron but any partial update was rolled back successfully. :raise UnableToRollbackPortUpdates: if a port update failed in neutron and the rollback of the partial updates also failed. """ # Keep a cache of instance.node to compute node resource provider UUID. # This will save some queries for non-ironic instances to the # compute_nodes table. node_cache = {} # Track the total number of instances that have allocations created # for them in this cell. We return when num_processed equals max_count # and unlimited=True or we exhaust the number of instances to process # in this cell. num_processed = 0 # Get all instances from this cell which have a host and are not # undergoing a task state transition. Go from oldest to newest. # NOTE(mriedem): Unfortunately we don't have a marker to use # between runs where the user is specifying --max-count. # TODO(mriedem): Store a marker in system_metadata so we can # automatically pick up where we left off without the user having # to pass it in (if unlimited is False). filters = {'deleted': False} if instance_uuid: filters['uuid'] = instance_uuid instances = objects.InstanceList.get_by_filters( ctxt, filters=filters, sort_key='created_at', sort_dir='asc', limit=max_count, expected_attrs=['flavor']) while instances: output(_('Found %s candidate instances.') % len(instances)) # For each instance in this list, we need to see if it has # allocations in placement and if so, assume it's correct and # continue. for instance in instances: if self._heal_allocations_for_instance( ctxt, instance, node_cache, output, placement, dry_run, heal_port_allocations, neutron, force): num_processed += 1 # Make sure we don't go over the max count. Note that we # don't include instances that already have allocations in the # max_count number, only the number of instances that have # successfully created allocations. # If a specific instance was requested we return here as well. if (not unlimited and num_processed == max_count) or instance_uuid: return num_processed # Use a marker to get the next page of instances in this cell. # Note that InstanceList doesn't support slice notation. marker = instances[len(instances) - 1].uuid instances = objects.InstanceList.get_by_filters( ctxt, filters=filters, sort_key='created_at', sort_dir='asc', limit=max_count, marker=marker, expected_attrs=['flavor']) return num_processed @action_description( _("Iterates over non-cell0 cells looking for instances which do " "not have allocations in the Placement service, or have incomplete " "consumer project_id/user_id values in existing allocations or " "missing allocations for ports having resource request, and " "which are not undergoing a task state transition. For each " "instance found, allocations are created (or updated) against the " "compute node resource provider for that instance based on the " "flavor associated with the instance. This command requires that " "the [api_database]/connection and [placement] configuration " "options are set.")) @args('--max-count', metavar='', dest='max_count', help='Maximum number of instances to process. If not specified, all ' 'instances in each cell will be mapped in batches of 50. ' 'If you have a large number of instances, consider specifying ' 'a custom value and run the command until it exits with ' '0 or 4.') @args('--verbose', action='store_true', dest='verbose', default=False, help='Provide verbose output during execution.') @args('--dry-run', action='store_true', dest='dry_run', default=False, help='Runs the command and prints output but does not commit any ' 'changes. The return code should be 4.') @args('--instance', metavar='', dest='instance_uuid', help='UUID of a specific instance to process. If specified ' '--max-count has no effect. ' 'The --cell and --instance options are mutually exclusive.') @args('--skip-port-allocations', action='store_true', dest='skip_port_allocations', default=False, help='Skip the healing of the resource allocations of bound ports. ' 'E.g. healing bandwidth resource allocation for ports having ' 'minimum QoS policy rules attached. If your deployment does ' 'not use such a feature then the performance impact of ' 'querying neutron ports for each instance can be avoided with ' 'this flag.') @args('--cell', metavar='', dest='cell_uuid', help='Heal allocations within a specific cell. ' 'The --cell and --instance options are mutually exclusive.') @args('--force', action='store_true', dest='force', default=False, help='Force heal allocations. Requires the --instance argument.') def heal_allocations(self, max_count=None, verbose=False, dry_run=False, instance_uuid=None, skip_port_allocations=False, cell_uuid=None, force=False): """Heals instance allocations in the Placement service Return codes: * 0: Command completed successfully and allocations were created. * 1: --max-count was reached and there are more instances to process. * 2: Unable to find a compute node record for a given instance. * 3: Unable to create (or update) allocations for an instance against its compute node resource provider. * 4: Command completed successfully but no allocations were created. * 5: Unable to query ports from neutron * 6: Unable to update ports in neutron * 7: Cannot roll back neutron port updates. Manual steps needed. * 8: Cannot heal instance with vGPU or Cyborg resource request * 127: Invalid input. """ # NOTE(mriedem): Thoughts on ways to expand this: # - allow filtering on enabled/disabled cells # - add a force option to force allocations for instances which have # task_state is not None (would get complicated during a migration); # for example, this could cleanup ironic instances that have # allocations on VCPU/MEMORY_MB/DISK_GB but are now using a custom # resource class # - deal with nested resource providers? heal_port_allocations = not skip_port_allocations output = lambda msg: None if verbose: output = lambda msg: print(msg) # If user has provided both cell and instance # Throw an error if instance_uuid and cell_uuid: print(_('The --cell and --instance options ' 'are mutually exclusive.')) return 127 if force and not instance_uuid: print(_('The --instance flag is required ' 'when using --force flag.')) return 127 # TODO(mriedem): Rather than --max-count being both a total and batch # count, should we have separate options to be specific, i.e. --total # and --batch-size? Then --batch-size defaults to 50 and --total # defaults to None to mean unlimited. if instance_uuid: max_count = 1 unlimited = False elif max_count is not None: try: max_count = int(max_count) except ValueError: max_count = -1 unlimited = False if max_count < 1: print(_('Must supply a positive integer for --max-count.')) return 127 else: max_count = 50 unlimited = True output(_('Running batches of %i until complete') % max_count) ctxt = context.get_admin_context() # If we are going to process a specific instance, just get the cell # it is in up front. if instance_uuid: try: im = objects.InstanceMapping.get_by_instance_uuid( ctxt, instance_uuid) cells = objects.CellMappingList(objects=[im.cell_mapping]) except exception.InstanceMappingNotFound: print('Unable to find cell for instance %s, is it mapped? Try ' 'running "nova-manage cell_v2 verify_instance" or ' '"nova-manage cell_v2 map_instances".' % instance_uuid) return 127 elif cell_uuid: try: # validate cell_uuid cell = objects.CellMapping.get_by_uuid(ctxt, cell_uuid) # create CellMappingList cells = objects.CellMappingList(objects=[cell]) except exception.CellMappingNotFound: print(_('Cell with uuid %s was not found.') % cell_uuid) return 127 else: cells = objects.CellMappingList.get_all(ctxt) if not cells: output(_('No cells to process.')) return 4 placement = report.report_client_singleton() neutron = None if heal_port_allocations: neutron = neutron_api.get_client(ctxt, admin=True) num_processed = 0 # TODO(mriedem): Use context.scatter_gather_skip_cell0. for cell in cells: # Skip cell0 since that is where instances go that do not get # scheduled and hence would not have allocations against a host. if cell.uuid == objects.CellMapping.CELL0_UUID: continue output(_('Looking for instances in cell: %s') % cell.identity) limit_per_cell = max_count if not unlimited: # Adjust the limit for the next cell. For example, if the user # only wants to process a total of 100 instances and we did # 75 in cell1, then we only need 25 more from cell2 and so on. limit_per_cell = max_count - num_processed with context.target_cell(ctxt, cell) as cctxt: try: num_processed += self._heal_instances_in_cell( cctxt, limit_per_cell, unlimited, output, placement, dry_run, instance_uuid, heal_port_allocations, neutron, force) except exception.ComputeHostNotFound as e: print(e.format_message()) return 2 except ( exception.AllocationCreateFailed, exception.AllocationUpdateFailed, exception.PlacementAPIConnectFailure ) as e: print(e.format_message()) return 3 except exception.UnableToQueryPorts as e: print(e.format_message()) return 5 except exception.UnableToUpdatePorts as e: print(e.format_message()) return 6 except exception.UnableToRollbackPortUpdates as e: print(e.format_message()) return 7 except ( exception.HealvGPUAllocationNotSupported, exception.HealDeviceProfileAllocationNotSupported, ) as e: print(e.format_message()) return 8 # Make sure we don't go over the max count. Note that we # don't include instances that already have allocations in the # max_count number, only the number of instances that have # successfully created allocations. # If a specific instance was provided then we'll just exit # the loop and process it below (either return 4 or 0). if num_processed == max_count and not instance_uuid: output(_('Max count reached. Processed %s instances.') % num_processed) return 1 output(_('Processed %s instances.') % num_processed) if not num_processed: return 4 return 0 @staticmethod def _get_rp_uuid_for_host(ctxt, host): """Finds the resource provider (compute node) UUID for the given host. :param ctxt: cell-targeted nova RequestContext :param host: name of the compute host :returns: The UUID of the resource provider (compute node) for the host :raises: nova.exception.HostMappingNotFound if no host_mappings record is found for the host; indicates "nova-manage cell_v2 discover_hosts" needs to be run on the cell. :raises: nova.exception.ComputeHostNotFound if no compute_nodes record is found in the cell database for the host; indicates the nova-compute service on that host might need to be restarted. :raises: nova.exception.TooManyComputesForHost if there are more than one compute_nodes records in the cell database for the host which is only possible (under normal circumstances) for ironic hosts but ironic hosts are not currently supported with host aggregates so if more than one compute node is found for the host, it is considered an error which the operator will need to resolve manually. """ # Get the host mapping to determine which cell it's in. hm = objects.HostMapping.get_by_host(ctxt, host) # Now get the compute node record for the host from the cell. with context.target_cell(ctxt, hm.cell_mapping) as cctxt: # There should really only be one, since only ironic # hosts can have multiple nodes, and you can't have # ironic hosts in aggregates for that reason. If we # find more than one, it's an error. nodes = objects.ComputeNodeList.get_all_by_host( cctxt, host) if len(nodes) > 1: # This shouldn't happen, so we need to bail since we # won't know which node to use. raise exception.TooManyComputesForHost( num_computes=len(nodes), host=host) return nodes[0].uuid @action_description( _("Mirrors compute host aggregates to resource provider aggregates " "in the Placement service. Requires the [api_database] and " "[placement] sections of the nova configuration file to be " "populated.")) @args('--verbose', action='store_true', dest='verbose', default=False, help='Provide verbose output during execution.') # TODO(mriedem): Add an option for the 'remove aggregate' behavior. # We know that we want to mirror hosts aggregate membership to # placement, but regarding removal, what if the operator or some external # tool added the resource provider to an aggregate but there is no matching # host aggregate, e.g. ironic nodes or shared storage provider # relationships? # TODO(mriedem): Probably want an option to pass a specific host instead of # doing all of them. def sync_aggregates(self, verbose=False): """Synchronizes nova host aggregates with resource provider aggregates Adds nodes to missing provider aggregates in Placement. NOTE: Depending on the size of your deployment and the number of compute hosts in aggregates, this command could cause a non-negligible amount of traffic to the placement service and therefore is recommended to be run during maintenance windows. Return codes: * 0: Successful run * 1: A host was found with more than one matching compute node record * 2: An unexpected error occurred while working with the placement API * 3: Failed updating provider aggregates in placement * 4: Host mappings not found for one or more host aggregate members * 5: Compute node records not found for one or more hosts * 6: Resource provider not found by uuid for a given host """ # Start by getting all host aggregates. ctxt = context.get_admin_context() aggregate_api = api.AggregateAPI() placement = aggregate_api.placement_client aggregates = aggregate_api.get_aggregate_list(ctxt) # Now we're going to loop over the existing compute hosts in aggregates # and check to see if their corresponding resource provider, found via # the host's compute node uuid, are in the same aggregate. If not, we # add the resource provider to the aggregate in Placement. output = lambda msg: None if verbose: output = lambda msg: print(msg) output(_('Filling in missing placement aggregates')) # Since hosts can be in more than one aggregate, keep track of the host # to its corresponding resource provider uuid to avoid redundant # lookups. host_to_rp_uuid = {} unmapped_hosts = set() # keep track of any missing host mappings computes_not_found = set() # keep track of missing nodes providers_not_found = {} # map of hostname to missing provider uuid for aggregate in aggregates: output(_('Processing aggregate: %s') % aggregate.name) for host in aggregate.hosts: output(_('Processing host: %s') % host) rp_uuid = host_to_rp_uuid.get(host) if not rp_uuid: try: rp_uuid = self._get_rp_uuid_for_host(ctxt, host) host_to_rp_uuid[host] = rp_uuid except exception.HostMappingNotFound: # Don't fail on this now, we can dump it at the end. unmapped_hosts.add(host) continue except exception.ComputeHostNotFound: # Don't fail on this now, we can dump it at the end. computes_not_found.add(host) continue except exception.TooManyComputesForHost as e: # TODO(mriedem): Should we treat this like the other # errors and not fail immediately but dump at the end? print(e.format_message()) return 1 # We've got our compute node record, so now we can ensure that # the matching resource provider, found via compute node uuid, # is in the same aggregate in placement, found via aggregate # uuid. try: placement.aggregate_add_host(ctxt, aggregate.uuid, rp_uuid=rp_uuid) output(_('Successfully added host (%(host)s) and ' 'provider (%(provider)s) to aggregate ' '(%(aggregate)s).') % {'host': host, 'provider': rp_uuid, 'aggregate': aggregate.uuid}) except exception.ResourceProviderNotFound: # The resource provider wasn't found. Store this for later. providers_not_found[host] = rp_uuid except exception.ResourceProviderAggregateRetrievalFailed as e: print(e.message) return 2 except exception.NovaException as e: # The exception message is too generic in this case print(_('Failed updating provider aggregates for ' 'host (%(host)s), provider (%(provider)s) ' 'and aggregate (%(aggregate)s). Error: ' '%(error)s') % {'host': host, 'provider': rp_uuid, 'aggregate': aggregate.uuid, 'error': e.message}) return 3 # Now do our error handling. Note that there is no real priority on # the error code we return. We want to dump all of the issues we hit # so the operator can fix them before re-running the command, but # whether we return 4 or 5 or 6 doesn't matter. return_code = 0 if unmapped_hosts: print(_('The following hosts were found in nova host aggregates ' 'but no host mappings were found in the nova API DB. Run ' '"nova-manage cell_v2 discover_hosts" and then retry. ' 'Missing: %s') % ','.join(unmapped_hosts)) return_code = 4 if computes_not_found: print(_('Unable to find matching compute_nodes record entries in ' 'the cell database for the following hosts; does the ' 'nova-compute service on each host need to be restarted? ' 'Missing: %s') % ','.join(computes_not_found)) return_code = 5 if providers_not_found: print(_('Unable to find matching resource provider record in ' 'placement with uuid for the following hosts: %s. Try ' 'restarting the nova-compute service on each host and ' 'then retry.') % ','.join('(%s=%s)' % (host, providers_not_found[host]) for host in sorted(providers_not_found.keys()))) return_code = 6 return return_code def _get_instances_and_current_migrations(self, ctxt, cn_uuid): if self.cn_uuid_mapping.get(cn_uuid): cell_uuid, cn_host, cn_node = self.cn_uuid_mapping[cn_uuid] else: # We need to find the compute node record from all cells. results = context.scatter_gather_skip_cell0( ctxt, objects.ComputeNode.get_by_uuid, cn_uuid) for result_cell_uuid, result in results.items(): if not context.is_cell_failure_sentinel(result): cn = result cell_uuid = result_cell_uuid break else: return False cn_host, cn_node = (cn.host, cn.hypervisor_hostname) self.cn_uuid_mapping[cn_uuid] = (cell_uuid, cn_host, cn_node) cell_mapping = objects.CellMapping.get_by_uuid(ctxt, cell_uuid) # Get all the active instances from this compute node if self.instances_mapping.get(cn_uuid): inst_uuids = self.instances_mapping[cn_uuid] else: # Get the instance list record from the cell. with context.target_cell(ctxt, cell_mapping) as cctxt: instances = objects.InstanceList.get_by_host_and_node( cctxt, cn_host, cn_node, expected_attrs=[]) inst_uuids = [instance.uuid for instance in instances] self.instances_mapping[cn_uuid] = inst_uuids # Get all *active* migrations for this compute node # NOTE(sbauza): Since migrations are transient, it's better to not # cache the results as they could be stale with context.target_cell(ctxt, cell_mapping) as cctxt: migs = objects.MigrationList.get_in_progress_by_host_and_node( cctxt, cn_host, cn_node) mig_uuids = [migration.uuid for migration in migs] return (inst_uuids, mig_uuids) def _delete_allocations_from_consumer(self, ctxt, placement, provider, consumer_uuid, consumer_type): """Deletes allocations from a resource provider with consumer UUID. :param ctxt: nova.context.RequestContext :param placement: nova.scheduler.client.report.SchedulerReportClient to communicate with the Placement service API. :param provider: Resource Provider to look at. :param consumer_uuid: the consumer UUID having allocations. :param consumer_type: the type of consumer, either 'instance' or 'migration' :returns: bool whether the allocations were deleted. """ # We need to be careful and only remove the allocations # against this specific RP or we would delete the # whole instance usage and then it would require some # healing. # TODO(sbauza): Remove this extra check once placement # supports querying allocation delete on both # consumer and resource provider parameters. allocations = placement.get_allocs_for_consumer( ctxt, consumer_uuid) if len(allocations['allocations']) > 1: # This consumer has resources spread among multiple RPs (think # nested or shared for example) # We then need to just update the usage to remove # the orphaned resources on the specific RP del allocations['allocations'][provider['uuid']] try: placement.put_allocations( ctxt, consumer_uuid, allocations) except exception.AllocationUpdateFailed: return False else: try: placement.delete_allocation_for_instance( ctxt, consumer_uuid, consumer_type, force=True) except exception.AllocationDeleteFailed: return False return True def _check_orphaned_allocations_for_provider(self, ctxt, placement, output, provider, delete): """Finds orphaned allocations for a specific resource provider. :param ctxt: nova.context.RequestContext :param placement: nova.scheduler.client.report.SchedulerReportClient to communicate with the Placement service API. :param output: function that takes a single message for verbose output :param provider: Resource Provider to look at. :param delete: deletes the found orphaned allocations. :return: a tuple (, ) """ num_processed = 0 faults = 0 # TODO(sbauza): Are we sure we have all Nova RCs ? # FIXME(sbauza): Possibly use consumer types once Placement API # supports them. # NOTE(sbauza): We check allocations having *any* below RC, not having # *all* of them. NOVA_RCS = [orc.VCPU, orc.MEMORY_MB, orc.DISK_GB, orc.VGPU, orc.NET_BW_EGR_KILOBIT_PER_SEC, orc.NET_BW_IGR_KILOBIT_PER_SEC, orc.PCPU, orc.MEM_ENCRYPTION_CONTEXT] # Since the RP can be a child RP, we need to get the root RP as it's # the compute node UUID # NOTE(sbauza): In case Placement doesn't support 1.14 microversion, # that means we don't have nested RPs. # Since we ask for microversion 1.14, all RPs have a root RP UUID. cn_uuid = provider.get("root_provider_uuid") # Now get all the existing instances and active migrations for this # compute node result = self._get_instances_and_current_migrations(ctxt, cn_uuid) if result is False: # We don't want to hard stop here because the compute service could # have disappear while we could still have orphaned allocations. output(_('The compute node for UUID %s can not be ' 'found') % cn_uuid) inst_uuids, mig_uuids = result or ([], []) try: pallocs = placement.get_allocations_for_resource_provider( ctxt, provider['uuid']) except exception.ResourceProviderAllocationRetrievalFailed: print(_('Not able to find allocations for resource ' 'provider %s.') % provider['uuid']) raise # Verify every allocations for each consumer UUID for consumer_uuid, consumer_resources in pallocs.allocations.items(): consumer_allocs = consumer_resources['resources'] if any(rc in NOVA_RCS for rc in consumer_allocs): # We reset the consumer type for each allocation consumer_type = None # This is an allocation for Nova resources # We need to guess whether the instance was deleted # or if the instance is currently migrating if not (consumer_uuid in inst_uuids or consumer_uuid in mig_uuids): # By default we suspect the orphaned allocation was for a # migration... consumer_type = 'migration' if consumer_uuid not in inst_uuids: # ... but if we can't find it either for an instance, # that means it was for this. consumer_type = 'instance' if consumer_type is not None: output(_('Allocations were set against consumer UUID ' '%(consumer_uuid)s but no existing instances or ' 'active migrations are related. ') % {'consumer_uuid': consumer_uuid}) if delete: deleted = self._delete_allocations_from_consumer( ctxt, placement, provider, consumer_uuid, consumer_type) if not deleted: print(_('Not able to delete allocations ' 'for consumer UUID %s') % consumer_uuid) faults += 1 continue output(_('Deleted allocations for consumer UUID ' '%(consumer_uuid)s on Resource Provider ' '%(rp)s: %(allocations)s') % {'consumer_uuid': consumer_uuid, 'rp': provider['uuid'], 'allocations': consumer_allocs}) else: output(_('Allocations for consumer UUID ' '%(consumer_uuid)s on Resource Provider ' '%(rp)s can be deleted: ' '%(allocations)s') % {'consumer_uuid': consumer_uuid, 'rp': provider['uuid'], 'allocations': consumer_allocs}) num_processed += 1 return (num_processed, faults) # TODO(sbauza): Move this to the scheduler report client ? def _get_resource_provider(self, context, placement, uuid): """Returns a single Resource Provider by its UUID. :param context: The nova.context.RequestContext auth context :param placement: nova.scheduler.client.report.SchedulerReportClient to communicate with the Placement service API. :param uuid: A specific Resource Provider UUID :return: the existing resource provider. :raises: keystoneauth1.exceptions.base.ClientException on failure to communicate with the placement API """ resource_providers = self._get_resource_providers(context, placement, uuid=uuid) if not resource_providers: # The endpoint never returns a 404, it rather returns an empty list raise exception.ResourceProviderNotFound(name_or_uuid=uuid) return resource_providers[0] def _get_resource_providers(self, context, placement, **kwargs): """Returns all resource providers regardless of their relationships. :param context: The nova.context.RequestContext auth context :param placement: nova.scheduler.client.report.SchedulerReportClient to communicate with the Placement service API. :param kwargs: extra attributes for the query string :return: list of resource providers. :raises: keystoneauth1.exceptions.base.ClientException on failure to communicate with the placement API """ url = '/resource_providers' if 'uuid' in kwargs: url += '?uuid=%s' % kwargs['uuid'] resp = placement.get(url, global_request_id=context.global_id, version='1.14') if resp is None: raise exception.PlacementAPIConnectFailure() data = resp.json() resource_providers = data.get('resource_providers') return resource_providers @action_description( _("Audits orphaned allocations that are no longer corresponding to " "existing instance resources. This command requires that " "the [api_database]/connection and [placement] configuration " "options are set.")) @args('--verbose', action='store_true', dest='verbose', default=False, help='Provide verbose output during execution.') @args('--resource_provider', metavar='', dest='provider_uuid', help='UUID of a specific resource provider to verify.') @args('--delete', action='store_true', dest='delete', default=False, help='Deletes orphaned allocations that were found.') def audit(self, verbose=False, provider_uuid=None, delete=False): """Provides information about orphaned allocations that can be removed Return codes: * 0: Command completed successfully and no orphaned allocations exist. * 1: An unexpected error happened during run. * 3: Orphaned allocations were detected. * 4: Orphaned allocations were detected and deleted. * 127: Invalid input. """ ctxt = context.get_admin_context() output = lambda msg: None if verbose: output = lambda msg: print(msg) placement = report.report_client_singleton() # Resets two in-memory dicts for knowing instances per compute node self.cn_uuid_mapping = collections.defaultdict(tuple) self.instances_mapping = collections.defaultdict(list) num_processed = 0 faults = 0 if provider_uuid: try: resource_provider = self._get_resource_provider( ctxt, placement, provider_uuid) except exception.ResourceProviderNotFound: print(_('Resource provider with UUID %s does not exist.') % provider_uuid) return 127 resource_providers = [resource_provider] else: resource_providers = self._get_resource_providers(ctxt, placement) for provider in resource_providers: nb_p, faults = self._check_orphaned_allocations_for_provider( ctxt, placement, output, provider, delete) num_processed += nb_p if faults > 0: print(_('The Resource Provider %s had problems when ' 'deleting allocations. Stopping now. Please fix the ' 'problem by hand and run again.') % provider['uuid']) return 1 if num_processed > 0: suffix = 's.' if num_processed > 1 else '.' output(_('Processed %(num)s allocation%(suffix)s') % {'num': num_processed, 'suffix': suffix}) return 4 if delete else 3 return 0 class LibvirtCommands(object): """Commands for managing libvirt instances""" @action_description( _("Fetch the stored machine type of the instance from the database.")) @args('instance_uuid', metavar='', help='UUID of instance to fetch the machine type for') def get_machine_type(self, instance_uuid=None): """Fetch the stored machine type of the instance from the database. Return codes: * 0: Command completed successfully. * 1: An unexpected error happened. * 2: Unable to find instance or instance mapping. * 3: No machine type found for the instance. """ try: ctxt = context.get_admin_context() mtype = machine_type_utils.get_machine_type(ctxt, instance_uuid) if mtype: print(mtype) return 0 else: print(_('No machine type registered for instance %s') % instance_uuid) return 3 except (exception.InstanceNotFound, exception.InstanceMappingNotFound) as e: print(str(e)) return 2 except Exception as e: print('Unexpected error, see nova-manage.log for the full ' 'trace: %s ' % str(e)) LOG.exception('Unexpected error') return 1 @action_description( _("Set or update the stored machine type of the instance in the " "database. This is only allowed for instances with a STOPPED, " "SHELVED or SHELVED_OFFLOADED vm_state.")) @args('instance_uuid', metavar='', help='UUID of instance to update') @args('machine_type', metavar='', help='Machine type to set') @args('--force', action='store_true', default=False, dest='force', help='Force the update of the stored machine type') def update_machine_type( self, instance_uuid=None, machine_type=None, force=False ): """Set or update the machine type of a given instance. Return codes: * 0: Command completed successfully. * 1: An unexpected error happened. * 2: Unable to find the instance or instance cell mapping. * 3: Invalid instance vm_state. * 4: Unable to move between underlying machine types (pc to q35 etc) or to older versions. * 5: Unsupported machine type. """ ctxt = context.get_admin_context() if force: print(_("Forcing update of machine type.")) try: rtype, ptype = machine_type_utils.update_machine_type( ctxt, instance_uuid, machine_type, force=force) except exception.UnsupportedMachineType as e: print(str(e)) return 5 except exception.InvalidMachineTypeUpdate as e: print(str(e)) return 4 except exception.InstanceInvalidState as e: print(str(e)) return 3 except ( exception.InstanceNotFound, exception.InstanceMappingNotFound, ) as e: print(str(e)) return 2 except Exception as e: print('Unexpected error, see nova-manage.log for the full ' 'trace: %s ' % str(e)) LOG.exception('Unexpected error') return 1 print(_("Updated instance %(instance_uuid)s machine type to " "%(machine_type)s (previously %(previous_type)s)") % {'instance_uuid': instance_uuid, 'machine_type': rtype, 'previous_type': ptype}) return 0 @action_description( _("List the UUIDs of instances that do not have hw_machine_type set " "in their image metadata")) @args('--cell-uuid', metavar='', dest='cell_uuid', required=False, help='UUID of cell from which to list instances') def list_unset_machine_type(self, cell_uuid=None): """List the UUIDs of instances without image_hw_machine_type set Return codes: * 0: Command completed successfully, no instances found. * 1: An unexpected error happened. * 2: Unable to find cell mapping. * 3: Instances found without hw_machine_type set. """ try: instance_list = machine_type_utils.get_instances_without_type( context.get_admin_context(), cell_uuid) except exception.CellMappingNotFound as e: print(str(e)) return 2 except Exception as e: print('Unexpected error, see nova-manage.log for the full ' 'trace: %s ' % str(e)) LOG.exception('Unexpected error') return 1 if instance_list: print('\n'.join(i.uuid for i in instance_list)) return 3 else: print(_("No instances found without hw_machine_type set.")) return 0 class VolumeAttachmentCommands(object): @action_description(_("Show the details of a given volume attachment.")) @args( 'instance_uuid', metavar='', help='UUID of the instance') @args( 'volume_id', metavar='', help='UUID of the volume') @args( '--connection_info', action='store_true', default=False, dest='connection_info', required=False, help='Only display the connection_info of the volume attachment.') @args( '--json', action='store_true', default=False, dest='json', required=False, help='Display output as json without a table.') def show( self, instance_uuid=None, volume_id=None, connection_info=False, json=False ): """Show attributes of a given volume attachment. Return codes: * 0: Command completed successfully. * 1: An unexpected error happened. * 2: Instance not found. * 3: Volume is not attached to instance. """ try: ctxt = context.get_admin_context() im = objects.InstanceMapping.get_by_instance_uuid( ctxt, instance_uuid) with context.target_cell(ctxt, im.cell_mapping) as cctxt: bdm = objects.BlockDeviceMapping.get_by_volume_and_instance( cctxt, volume_id, instance_uuid) if connection_info and json: print(bdm.connection_info) elif connection_info: print(format_dict(jsonutils.loads(bdm.connection_info))) elif json: print(jsonutils.dumps(bdm)) else: print(format_dict(bdm)) return 0 except exception.VolumeBDMNotFound as e: print(str(e)) return 3 except ( exception.InstanceNotFound, exception.InstanceMappingNotFound, ) as e: print(str(e)) return 2 except Exception as e: print('Unexpected error, see nova-manage.log for the full ' 'trace: %s ' % str(e)) LOG.exception('Unexpected error') return 1 @action_description(_('Show the host connector for this host')) @args( '--json', action='store_true', default=False, dest='json', required=False, help='Display output as json without a table.') def get_connector(self, json=False): """Show the host connector for this host. Return codes: * 0: Command completed successfully. * 1: An unexpected error happened. """ try: root_helper = utils.get_root_helper() host_connector = connector.get_connector_properties( root_helper, CONF.my_block_storage_ip, CONF.libvirt.volume_use_multipath, enforce_multipath=True, host=CONF.host) if json: print(jsonutils.dumps(host_connector)) else: print(format_dict(host_connector)) return 0 except Exception as e: print('Unexpected error, see nova-manage.log for the full ' 'trace: %s ' % str(e)) LOG.exception('Unexpected error') return 1 def _refresh(self, instance_uuid, volume_id, connector): """Refresh the bdm.connection_info associated with a volume attachment Unlike the current driver BDM implementation under nova.virt.block_device.DriverVolumeBlockDevice.refresh_connection_info that simply GETs an existing volume attachment from cinder this method cleans up any existing volume connections from the host before creating a fresh attachment in cinder and populates the underlying BDM with connection_info from the new attachment. We can do that here as the command requires that the instance is stopped, something that isn't always the case with the current driver BDM approach and thus the two are kept separate for the time being. :param instance_uuid: UUID of instance :param volume_id: ID of volume attached to the instance :param connector: Connector with which to create the new attachment :return status_code: volume-refresh status_code 0 on success """ ctxt = context.get_admin_context() im = objects.InstanceMapping.get_by_instance_uuid(ctxt, instance_uuid) with context.target_cell(ctxt, im.cell_mapping) as cctxt: instance = objects.Instance.get_by_uuid(cctxt, instance_uuid) bdm = objects.BlockDeviceMapping.get_by_volume_and_instance( cctxt, volume_id, instance_uuid) if instance.vm_state != obj_fields.InstanceState.STOPPED: raise exception.InstanceInvalidState( instance_uuid=instance_uuid, attr='vm_state', state=instance.vm_state, method='refresh connection_info (must be stopped)') locking_reason = ( f'Refreshing connection_info for BDM {bdm.uuid} ' f'associated with instance {instance_uuid} and volume ' f'{volume_id}.') with locked_instance(im.cell_mapping, instance, locking_reason): return self._do_refresh( cctxt, instance, volume_id, bdm, connector) def _do_refresh(self, cctxt, instance, volume_id, bdm, connector): volume_api = cinder.API() compute_rpcapi = rpcapi.ComputeAPI() new_attachment_id = None try: # Log this as an instance action so operators and users are # aware that this has happened. instance_action = objects.InstanceAction.action_start( cctxt, instance.uuid, instance_actions.NOVA_MANAGE_REFRESH_VOLUME_ATTACHMENT) # Create a blank attachment to keep the volume reserved new_attachment_id = volume_api.attachment_create( cctxt, volume_id, instance.uuid)['id'] # RPC call to the compute to cleanup the connections, which # will in turn unmap the volume from the compute host if instance.host == connector['host']: compute_rpcapi.remove_volume_connection( cctxt, instance, volume_id, instance.host, delete_attachment=True) else: msg = ( f"The compute host '{connector['host']}' in the " f"connector does not match the instance host " f"'{instance.host}'.") raise exception.HostConflict(_(msg)) # Update the attachment with host connector, this regenerates # the connection_info that we can now stash in the bdm. new_connection_info = volume_api.attachment_update( cctxt, new_attachment_id, connector, bdm.device_name)['connection_info'] # Before we save it to the BDM ensure the serial is stashed as # is done in various other codepaths when attaching volumes. if 'serial' not in new_connection_info: new_connection_info['serial'] = bdm.volume_id # Save the new attachment id and connection_info to the DB bdm.attachment_id = new_attachment_id bdm.connection_info = jsonutils.dumps(new_connection_info) bdm.save() # Finally mark the attachment as complete, moving the volume # status from attaching to in-use ahead of the instance # restarting volume_api.attachment_complete(cctxt, new_attachment_id) return 0 finally: # If the bdm.attachment_id wasn't updated make sure we clean # up any attachments created during the run. bdm = objects.BlockDeviceMapping.get_by_volume_and_instance( cctxt, volume_id, instance.uuid) if ( new_attachment_id and bdm.attachment_id != new_attachment_id ): volume_api.attachment_delete(cctxt, new_attachment_id) # If we failed during attachment_update the bdm.attachment_id # has already been deleted so recreate it now to ensure the # volume is still associated with the instance and clear the # now stale connection_info. try: volume_api.attachment_get(cctxt, bdm.attachment_id) except exception.VolumeAttachmentNotFound: bdm.attachment_id = volume_api.attachment_create( cctxt, volume_id, instance.uuid)['id'] bdm.connection_info = None bdm.save() # Finish the instance action if it was created and started # TODO(lyarwood): While not really required we should store # the exec and traceback in here on failure. if instance_action: instance_action.finish() @action_description( _("Refresh the connection info for a given volume attachment")) @args( 'instance_uuid', metavar='', help='UUID of the instance') @args( 'volume_id', metavar='', help='UUID of the volume') @args( 'connector_path', metavar='', help='Path to file containing the host connector in json format.') def refresh(self, instance_uuid=None, volume_id=None, connector_path=None): """Refresh the connection_info associated with a volume attachment Return codes: * 0: Command completed successfully. * 1: An unexpected error happened. * 2: Connector path does not exist. * 3: Failed to open connector path. * 4: Instance does not exist. * 5: Instance state invalid. * 6: Volume is not attached to instance. * 7: Connector host is not correct. """ try: # TODO(lyarwood): Make this optional and provide a rpcapi capable # of pulling this down from the target compute during this flow. if not os.path.exists(connector_path): raise exception.InvalidInput( reason=f'Connector file not found at {connector_path}') # Read in the json connector file with open(connector_path, 'rb') as connector_file: connector = jsonutils.load(connector_file) # Refresh the volume attachment return self._refresh(instance_uuid, volume_id, connector) except exception.HostConflict as e: print( f"The command 'nova-manage volume_attachment get_connector' " f"may have been run on the wrong compute host. Or the " f"instance host may be wrong and in need of repair.\n{e}") return 7 except exception.VolumeBDMNotFound as e: print(str(e)) return 6 except exception.InstanceInvalidState as e: print(str(e)) return 5 except ( exception.InstanceNotFound, exception.InstanceMappingNotFound, ) as e: print(str(e)) return 4 except ValueError as e: print( f'Failed to open {connector_path}. Does it contain valid ' f'connector_info data?\nError: {str(e)}' ) return 3 except OSError as e: print(str(e)) return 3 except exception.InvalidInput as e: print(str(e)) return 2 except Exception as e: print('Unexpected error, see nova-manage.log for the full ' 'trace: %s ' % str(e)) LOG.exception('Unexpected error') return 1 class ImagePropertyCommands: @action_description(_("Show the value of an instance image property.")) @args( 'instance_uuid', metavar='', help='UUID of the instance') @args( 'image_property', metavar='', help='Image property to show') def show(self, instance_uuid=None, image_property=None): """Show value of a given instance image property. Return codes: * 0: Command completed successfully. * 1: An unexpected error happened. * 2: Instance not found. * 3: Image property not found. """ try: ctxt = context.get_admin_context() im = objects.InstanceMapping.get_by_instance_uuid( ctxt, instance_uuid) with context.target_cell(ctxt, im.cell_mapping) as cctxt: instance = objects.Instance.get_by_uuid( cctxt, instance_uuid, expected_attrs=['system_metadata']) property_value = instance.system_metadata.get( f'image_{image_property}') if property_value: print(property_value) return 0 else: print(f'Image property {image_property} not found ' f'for instance {instance_uuid}.') return 3 except ( exception.InstanceNotFound, exception.InstanceMappingNotFound, ) as e: print(str(e)) return 2 except Exception as e: print(f'Unexpected error, see nova-manage.log for the full ' f'trace: {str(e)}') LOG.exception('Unexpected error') return 1 def _validate_image_properties(self, image_properties): """Validate the provided image property names and values :param image_properties: List of image property names and values """ # Sanity check the format of the provided properties, this should be # in the format of name=value. if any(x for x in image_properties if '=' not in x): raise exception.InvalidInput( "--property should use the format key=value") # Transform the list of delimited properties to a dict image_properties = dict(prop.split('=') for prop in image_properties) # Validate the names of each property by checking against the o.vo # fields currently listed by ImageProps. We can't use from_dict to # do this as it silently ignores invalid property keys. for image_property_name in image_properties.keys(): if image_property_name.startswith("trait:"): continue if image_property_name not in objects.ImageMetaProps.fields: raise exception.InvalidImagePropertyName( image_property_name=image_property_name) # Validate the values by creating an object from the provided dict. objects.ImageMetaProps.from_dict(image_properties) # Return the dict so we can update the instance system_metadata return image_properties def _update_image_properties(self, ctxt, instance, image_properties): """Update instance image properties :param ctxt: nova.context.RequestContext :param instance: The instance to update :param image_properties: List of image properties and values to update """ # Check the state of the instance allowed_states = [ obj_fields.InstanceState.STOPPED, obj_fields.InstanceState.SHELVED, obj_fields.InstanceState.SHELVED_OFFLOADED, ] if instance.vm_state not in allowed_states: raise exception.InstanceInvalidState( instance_uuid=instance.uuid, attr='vm_state', state=instance.vm_state, method='image_property set (must be STOPPED, SHELVED, OR ' 'SHELVED_OFFLOADED).') # Validate the property names and values image_properties = self._validate_image_properties(image_properties) # Update the image properties and save the instance record for image_property, value in image_properties.items(): instance.system_metadata[f'image_{image_property}'] = value request_spec = objects.RequestSpec.get_by_instance_uuid( ctxt, instance.uuid) request_spec.image = instance.image_meta # Save and return 0 instance.save() request_spec.save() return 0 @action_description(_( "Set the values of instance image properties stored in the database. " "This is only allowed for " "instances with a STOPPED, SHELVED or " "SHELVED_OFFLOADED vm_state.")) @args( 'instance_uuid', metavar='', help='UUID of the instance') @args( '--property', metavar='', action='append', dest='image_properties', help='Image property to set using the format name=value. For example: ' '--property hw_disk_bus=virtio --property hw_cdrom_bus=sata') def set(self, instance_uuid=None, image_properties=None): """Set instance image property values Return codes: * 0: Command completed successfully. * 1: An unexpected error happened. * 2: Unable to find instance. * 3: Instance is in an invalid state. * 4: Invalid input format. * 5: Invalid image property name. * 6: Invalid image property value. """ try: ctxt = context.get_admin_context() im = objects.InstanceMapping.get_by_instance_uuid( ctxt, instance_uuid) with context.target_cell(ctxt, im.cell_mapping) as cctxt: instance = objects.Instance.get_by_uuid( cctxt, instance_uuid, expected_attrs=['system_metadata']) return self._update_image_properties( ctxt, instance, image_properties) except ValueError as e: print(str(e)) return 6 except exception.InvalidImagePropertyName as e: print(str(e)) return 5 except exception.InvalidInput as e: print(str(e)) return 4 except exception.InstanceInvalidState as e: print(str(e)) return 3 except ( exception.InstanceNotFound, exception.InstanceMappingNotFound, ) as e: print(str(e)) return 2 except Exception as e: print('Unexpected error, see nova-manage.log for the full ' 'trace: %s ' % str(e)) LOG.exception('Unexpected error') return 1 class LimitsCommands(): def _create_unified_limits(self, ctxt, keystone_api, service_id, legacy_defaults, project_id, region_id, output, dry_run): return_code = 0 # Create registered (default) limits first. unified_to_legacy_names = dict( **local_limit.LEGACY_LIMITS, **placement_limit.LEGACY_LIMITS) legacy_to_unified_names = dict( zip(unified_to_legacy_names.values(), unified_to_legacy_names.keys())) # Handle the special case of PCPU. With legacy quotas, there is no # dedicated quota limit for PCPUs, so they share the quota limit for # VCPUs: 'cores'. With unified limits, class:PCPU has its own dedicated # quota limit, so we will just mirror the limit for class:VCPU and # create a limit with the same value for class:PCPU. if 'cores' in legacy_defaults: # Just make up a dummy legacy resource 'pcores' for this. legacy_defaults['pcores'] = legacy_defaults['cores'] unified_to_legacy_names['class:PCPU'] = 'pcores' legacy_to_unified_names['pcores'] = 'class:PCPU' # Retrieve the existing resource limits from Keystone. registered_limits = keystone_api.registered_limits(region_id=region_id) unified_defaults = { rl.resource_name: rl.default_limit for rl in registered_limits} # f-strings don't seem to work well with the _() translation function. msg = f'Found default limits in Keystone: {unified_defaults} ...' output(_(msg)) # Determine which resource limits are missing in Keystone so that we # can create them. output(_('Creating default limits in Keystone ...')) for resource, rlimit in legacy_defaults.items(): resource_name = legacy_to_unified_names[resource] if resource_name not in unified_defaults: msg = f'Creating default limit: {resource_name} = {rlimit}' if region_id: msg += f' in region {region_id}' output(_(msg)) if not dry_run: try: keystone_api.create_registered_limit( resource_name=resource_name, default_limit=rlimit, region_id=region_id, service_id=service_id) except Exception as e: msg = f'Failed to create default limit: {str(e)}' print(_(msg)) return_code = 1 else: existing_rlimit = unified_defaults[resource_name] msg = (f'A default limit: {resource_name} = {existing_rlimit} ' 'already exists in Keystone, skipping ...') output(_(msg)) # Create project limits if there are any. if not project_id: return return_code output(_('Reading project limits from the Nova API database ...')) legacy_projects = objects.Quotas.get_all_by_project(ctxt, project_id) legacy_projects.pop('project_id', None) msg = f'Found project limits in the database: {legacy_projects} ...' output(_(msg)) # Handle the special case of PCPU again for project limits. if 'cores' in legacy_projects: # Just make up a dummy legacy resource 'pcores' for this. legacy_projects['pcores'] = legacy_projects['cores'] # Retrieve existing limits from Keystone. project_limits = keystone_api.limits( project_id=project_id, region_id=region_id) unified_projects = { pl.resource_name: pl.resource_limit for pl in project_limits} msg = f'Found project limits in Keystone: {unified_projects} ...' output(_(msg)) output(_('Creating project limits in Keystone ...')) for resource, plimit in legacy_projects.items(): resource_name = legacy_to_unified_names[resource] if resource_name not in unified_projects: msg = ( f'Creating project limit: {resource_name} = {plimit} ' f'for project {project_id}') if region_id: msg += f' in region {region_id}' output(_(msg)) if not dry_run: try: keystone_api.create_limit( resource_name=resource_name, resource_limit=plimit, project_id=project_id, region_id=region_id, service_id=service_id) except Exception as e: msg = f'Failed to create project limit: {str(e)}' print(_(msg)) return_code = 1 else: existing_plimit = unified_projects[resource_name] msg = (f'A project limit: {resource_name} = {existing_plimit} ' 'already exists in Keystone, skipping ...') output(_(msg)) return return_code @staticmethod def _get_resources_from_flavor(flavor, warn_output): resources = set() for spec in [ s for s in flavor.extra_specs if s.startswith('resources:')]: resources.add('class:' + spec.lstrip('resources:')) try: for resource in scheduler_utils.resources_for_limits(flavor, is_bfv=False): resources.add('class:' + resource) except Exception as e: # This is to be resilient about potential extra spec translation # bugs like https://bugs.launchpad.net/nova/+bug/2088831 msg = _('An exception was raised: %s, skipping flavor %s' % (str(e), flavor.flavorid)) warn_output(msg) return resources def _get_resources_from_api_flavors(self, ctxt, output, warn_output): msg = _('Scanning flavors in API database for resource classes ...') output(msg) resources = set() marker = None while True: flavors = objects.FlavorList.get_all(ctxt, limit=500, marker=marker) for flavor in flavors: resources |= self._get_resources_from_flavor( flavor, warn_output) if not flavors: break marker = flavors[-1].flavorid return resources def _get_resources_from_embedded_flavors(self, ctxt, project_id, output, warn_output): project_str = f' project {project_id}' if project_id else '' msg = _('Scanning%s non-deleted instances embedded flavors for ' 'resource classes ...' % project_str) output(msg) resources = set() down_cell_uuids = set() marker = None while True: filters = {'deleted': False} if project_id: filters['project_id'] = project_id instances, cells = list_instances.get_instance_objects_sorted( ctxt, filters=filters, limit=500, marker=marker, expected_attrs=['flavor'], sort_keys=None, sort_dirs=None) down_cell_uuids |= set(cells) for instance in instances: resources |= self._get_resources_from_flavor( instance.flavor, warn_output) if not instances: break marker = instances[-1].uuid return resources, down_cell_uuids def _scan_flavors(self, ctxt, keystone_api, service_id, project_id, region_id, output, warn_output, verbose, no_embedded_flavor_scan): return_code = 0 # We already know we need to check class:DISK_GB because it is not a # legacy resource from a quota perspective. flavor_resources = set(['class:DISK_GB']) # Scan existing flavors to check whether any requestable resources are # missing registered limits in Keystone. flavor_resources |= self._get_resources_from_api_flavors( ctxt, output, warn_output) down_cell_uuids = None if not no_embedded_flavor_scan: # Scan the embedded flavors of non-deleted instances. resources, down_cell_uuids = ( self._get_resources_from_embedded_flavors( ctxt, project_id, output, warn_output)) flavor_resources |= resources # Retrieve the existing resource limits from Keystone (we may have # added new ones above). registered_limits = keystone_api.registered_limits( service_id=service_id, region_id=region_id) existing_limits = { li.resource_name: li.default_limit for li in registered_limits} table = prettytable.PrettyTable() table.align = 'l' table.field_names = ['Resource', 'Registered Limit'] table.sortby = 'Resource' found_missing = False for resource in flavor_resources: if resource in existing_limits: if verbose: table.add_row([resource, existing_limits[resource]]) else: found_missing = True table.add_row([resource, 'missing']) if table.rows: msg = _( 'The following resource classes were found during the scan:\n') warn_output(msg) warn_output(table) if down_cell_uuids: msg = _( 'NOTE: Cells %s did not respond and their data is not ' 'included in this table.' % down_cell_uuids) warn_output('\n' + textwrap.fill(msg, width=80)) if found_missing: msg = _( 'WARNING: It is strongly recommended to create registered ' 'limits for resource classes missing limits in Keystone ' 'before proceeding.') warn_output('\n' + textwrap.fill(msg, width=80)) return_code = 3 else: msg = _( 'SUCCESS: All resource classes have registered limits set.') warn_output(msg) return return_code @action_description( _("Copy quota limits from the Nova API database to Keystone.")) @args('--project-id', metavar='', dest='project_id', help='Project ID for which to migrate quota limits') @args('--region-id', metavar='', dest='region_id', help='Region ID for which to migrate quota limits') @args('--verbose', action='store_true', dest='verbose', default=False, help='Provide verbose output during execution.') @args('--dry-run', action='store_true', dest='dry_run', default=False, help='Show what limits would be created without actually ' 'creating them. Flavors will still be scanned for resource ' 'classes missing limits.') @args('--quiet', action='store_true', dest='quiet', default=False, help='Do not output anything during execution.') @args('--no-embedded-flavor-scan', action='store_true', dest='no_embedded_flavor_scan', default=False, help='Do not scan instances embedded flavors for resource classes ' 'missing limits.') def migrate_to_unified_limits(self, project_id=None, region_id=None, verbose=False, dry_run=False, quiet=False, no_embedded_flavor_scan=False): """Migrate quota limits from legacy quotas to unified limits. Return codes: * 0: Command completed successfully. * 1: An unexpected error occurred. * 2: Failed to connect to the database. * 3: Missing registered limits were identified. """ if verbose and quiet: print('--verbose and --quiet are mutually exclusive') return 1 ctxt = context.get_admin_context() # Verbose output is optional details. output = lambda msg: print(msg) if verbose else None # In general, we always want to show important warning output (for # example, warning about missing registered limits). Only suppress # warning output if --quiet was specified by the caller. warn_output = lambda msg: None if quiet else print(msg) output(_('Reading default limits from the Nova API database ...')) try: # This will look for limits in the 'default' quota class first and # then fall back to the [quota] config options. legacy_defaults = nova.quota.QUOTAS.get_defaults(ctxt) except db_exc.CantStartEngineError: print(_('Failed to connect to the database so aborting this ' 'migration attempt. Please check your config file to make ' 'sure that [api_database]/connection and ' '[database]/connection are set and run this ' 'command again.')) return 2 # Remove obsolete resource limits. for resource in ('fixed_ips', 'floating_ips', 'security_groups', 'security_group_rules'): if resource in legacy_defaults: msg = f'Skipping obsolete limit for {resource} ...' output(_(msg)) legacy_defaults.pop(resource) msg = ( f'Found default limits in the database: {legacy_defaults} ...') output(_(msg)) # For auth, reuse the [keystone_authtoken] section. if not hasattr(CONF, 'keystone_authtoken'): conf_utils.register_ksa_opts( CONF, 'keystone_authtoken', 'identity') keystone_api = utils.get_sdk_adapter( 'identity', admin=True, conf_group='keystone_authtoken') # Service ID is required in unified limits APIs. service_id = keystone_api.find_service('nova').id try: result = self._create_unified_limits( ctxt, keystone_api, service_id, legacy_defaults, project_id, region_id, output, dry_run) if result: # If there was an error, just return now. return result result = self._scan_flavors( ctxt, keystone_api, service_id, project_id, region_id, output, warn_output, verbose, no_embedded_flavor_scan) return result except db_exc.CantStartEngineError: print(_('Failed to connect to the database so aborting this ' 'migration attempt. Please check your config file to make ' 'sure that [api_database]/connection and ' '[database]/connection are set and run this ' 'command again.')) return 2 except Exception as e: msg = (f'Unexpected error, see nova-manage.log for the full ' f'trace: {str(e)}') print(_(msg)) LOG.exception('Unexpected error') return 1 CATEGORIES = { 'api_db': ApiDbCommands, 'cell_v2': CellV2Commands, 'db': DbCommands, 'placement': PlacementCommands, 'libvirt': LibvirtCommands, 'volume_attachment': VolumeAttachmentCommands, 'image_property': ImagePropertyCommands, 'limits': LimitsCommands, } add_command_parsers = functools.partial(cmd_common.add_command_parsers, categories=CATEGORIES) category_opt = cfg.SubCommandOpt('category', title='Command categories', help='Available categories', handler=add_command_parsers) post_mortem_opt = cfg.BoolOpt('post-mortem', default=False, help='Allow post-mortem debugging') def main(): """Parse options and call the appropriate class/method.""" CONF.register_cli_opts([category_opt, post_mortem_opt]) config.parse_args(sys.argv) logging.set_defaults( default_log_levels=logging.get_default_log_levels() + _EXTRA_DEFAULT_LOG_LEVELS) logging.setup(CONF, "nova") objects.register_all() if CONF.category.name == "version": print(version.version_string_with_package()) return 0 if CONF.category.name == "bash-completion": cmd_common.print_bash_completion(CATEGORIES) return 0 try: fn, fn_args, fn_kwargs = cmd_common.get_action_fn() ret = fn(*fn_args, **fn_kwargs) rpc.cleanup() return ret except Exception: if CONF.post_mortem: import pdb pdb.post_mortem() else: print(_("An error has occurred:\n%s") % traceback.format_exc()) return 255 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/cmd/novncproxy.py0000664000175000017500000000233000000000000017065 0ustar00zuulzuul00000000000000# Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Websocket proxy that is compatible with OpenStack Nova noVNC consoles. Leverages websockify.py by Joel Martin """ import sys from nova.cmd import baseproxy import nova.conf from nova.conf import vnc from nova import config from nova.console.securityproxy import rfb CONF = nova.conf.CONF vnc.register_cli_opts(CONF) def main(): # set default web flag option CONF.set_default('web', '/usr/share/novnc') config.parse_args(sys.argv) baseproxy.proxy( host=CONF.vnc.novncproxy_host, port=CONF.vnc.novncproxy_port, security_proxy=rfb.RFBSecurityProxy()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/cmd/policy.py0000664000175000017500000001307000000000000016142 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ CLI interface for nova policy rule commands. """ import functools import os import sys from oslo_config import cfg from nova.cmd import common as cmd_common import nova.conf from nova import config from nova import context as nova_context from nova.db.main import api as db from nova import exception from nova.i18n import _ from nova import policies from nova import version CONF = nova.conf.CONF cli_opts = [ cfg.ListOpt( 'os-roles', metavar='', default=os.environ.get('OS_ROLES'), help=_('Defaults to env[OS_ROLES].')), cfg.StrOpt( 'os-user-id', metavar='', default=os.environ.get('OS_USER_ID'), help=_('Defaults to env[OS_USER_ID].')), cfg.StrOpt( 'os-tenant-id', metavar='', default=os.environ.get('OS_TENANT_ID'), help=_('Defaults to env[OS_TENANT_ID].')), ] class PolicyCommands(object): """Commands for policy rules.""" _ACCEPTABLE_TARGETS = [ 'project_id', 'user_id', 'quota_class', 'availability_zone', 'instance_id'] @cmd_common.args( '--api-name', dest='api_name', metavar='', help=( 'Return only the passing policy rules containing the given API ' 'name. If unspecified, all passing policy rules will be returned.' ), ) @cmd_common.args( '--target', nargs='+', dest='target', metavar='', help=( "The target(s) against which the policy rule authorization will " "be tested. The available targets are: %s. When 'instance_id' is " "used, the other targets will be overwritten. If unspecified, the " "given user will be considered the target." % ', '.join( _ACCEPTABLE_TARGETS ) ), ) def check(self, api_name=None, target=None): """Prints all passing policy rules for the given user.""" context = self._get_context() api_name = api_name or '' target = self._get_target(context, target) allowed_operations = self._filter_rules(context, api_name, target) if allowed_operations: print('\n'.join(allowed_operations)) return 0 else: print('No rules matched or allowed') return 1 def _get_context(self): return nova_context.RequestContext( roles=CONF.os_roles, user_id=CONF.os_user_id, project_id=CONF.os_tenant_id) def _get_target(self, context, target): """Processes and validates the CLI given target and adapts it for policy authorization. :returns: None if the given target is None, otherwise returns a proper authorization target. :raises nova.exception.InvalidAttribute: if a key in the given target is not an acceptable. :raises nova.exception.InstanceNotFound: if 'instance_id' is given, and there is no instance match the id. """ if not target: return None new_target = {} for t in target: key, value = t.split('=') if key not in self._ACCEPTABLE_TARGETS: raise exception.InvalidAttribute(attr=key) new_target[key] = value # if the target is an instance_id, return an instance instead. instance_id = new_target.get('instance_id') if instance_id: admin_ctxt = nova_context.get_admin_context() instance = db.instance_get_by_uuid(admin_ctxt, instance_id) new_target = {'user_id': instance['user_id'], 'project_id': instance['project_id']} return new_target def _filter_rules(self, context, api_name, target): all_rules = policies.list_rules() return [rule.name for rule in all_rules if api_name in rule.name and context.can(rule.name, target, fatal=False)] CATEGORIES = { 'policy': PolicyCommands, } add_command_parsers = functools.partial(cmd_common.add_command_parsers, categories=CATEGORIES) category_opt = cfg.SubCommandOpt('category', title='Command categories', help='Available categories', handler=add_command_parsers) def main(): """Parse options and call the appropriate class/method.""" CONF.register_cli_opts(cli_opts) CONF.register_cli_opt(category_opt) config.parse_args(sys.argv) if CONF.category.name == "version": print(version.version_string_with_package()) return 0 if CONF.category.name == "bash-completion": cmd_common.print_bash_completion(CATEGORIES) return 0 try: fn, fn_args, fn_kwargs = cmd_common.get_action_fn() ret = fn(*fn_args, **fn_kwargs) return ret except Exception as ex: print(_("error: %s") % ex) return 1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/cmd/scheduler.py0000664000175000017500000000453100000000000016623 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Starter script for Nova Scheduler.""" import sys from oslo_concurrency import processutils from oslo_log import log as logging from oslo_reports import guru_meditation_report as gmr from oslo_reports import opts as gmr_opts import nova.conf from nova import config from nova import objects from nova.scheduler import rpcapi from nova import service from nova import utils from nova import version CONF = nova.conf.CONF def main(): config.parse_args(sys.argv) logging.setup(CONF, "nova") objects.register_all() gmr_opts.set_defaults(CONF) objects.Service.enable_min_version_cache() gmr.TextGuruMeditation.setup_autorun(version, conf=CONF) server = service.Service.create( binary='nova-scheduler', topic=rpcapi.RPC_TOPIC) # Determine the number of workers; if not specified in config, default # to number of CPUs workers = CONF.scheduler.workers or processutils.get_worker_count() # NOTE(gibi): The oslo.service backend creates the worker processes # via os.fork. As nova already initialized these executor(s) # in the master process, and os.fork's behavior is to copy the state of # parent to the child process, we destroy the executor in the parent # process before the forking, so that the workers initialize new # executor(s) and therefore avoid working with the wrong internal executor # state (i.e. number of workers idle in the pool). A long therm solution # would be to use os.spawn instead of os.fork for the workers. utils.destroy_scatter_gather_executor() utils.destroy_default_executor() service.serve(server, workers=workers) service.wait() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/cmd/serialproxy.py0000664000175000017500000000222000000000000017217 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Websocket proxy that is compatible with OpenStack Nova Serial consoles. Leverages websockify.py by Joel Martin. Based on nova-novncproxy. """ import sys from nova.cmd import baseproxy import nova.conf from nova.conf import serial_console as serial from nova import config CONF = nova.conf.CONF serial.register_cli_opts(CONF) def main(): # set default web flag option CONF.set_default('web', None) config.parse_args(sys.argv) baseproxy.proxy( host=CONF.serial_console.serialproxy_host, port=CONF.serial_console.serialproxy_port) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/cmd/spicehtml5proxy.py0000664000175000017500000000207100000000000020021 0ustar00zuulzuul00000000000000# Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Websocket proxy that is compatible with OpenStack Nova SPICE HTML5 consoles. Leverages websockify.py by Joel Martin """ import sys from nova.cmd import baseproxy import nova.conf from nova.conf import spice from nova import config CONF = nova.conf.CONF spice.register_cli_opts(CONF) def main(): config.parse_args(sys.argv) baseproxy.proxy( host=CONF.spice.html5proxy_host, port=CONF.spice.html5proxy_port) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/cmd/status.py0000664000175000017500000003555500000000000016202 0ustar00zuulzuul00000000000000# Copyright 2016 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ CLI interface for nova status commands. """ import functools import sys import traceback from keystoneauth1 import exceptions as ks_exc import microversion_parse from oslo_config import cfg from oslo_upgradecheck import common_checks from oslo_upgradecheck import upgradecheck import sqlalchemy as sa from sqlalchemy import func as sqlfunc from nova.cmd import common as cmd_common import nova.conf from nova import config from nova import context as nova_context from nova.db.api import api as api_db_api from nova.db.main import api as main_db_api from nova import exception from nova.i18n import _ from nova.objects import cell_mapping as cell_mapping_obj # NOTE(lyarwood): The following are imported as machine_type_utils expects them # to be registered under nova.objects when called via _check_machine_type_set from nova.objects import image_meta as image_meta_obj # noqa: F401 from nova.objects import instance as instance_obj # noqa: F401 from nova import utils from nova import version from nova.virt.libvirt import machine_type_utils from nova.volume import cinder CONF = nova.conf.CONF # NOTE(gibi): 1.36 is required by nova-scheduler to support the same_subtree # queryparam to make GET /allocation_candidates require that a list of request # groups are satisfied from the same provider subtree. # NOTE: If you bump this version, remember to update the history # section in the nova-status man page (doc/source/cli/nova-status). MIN_PLACEMENT_MICROVERSION = "1.36" # NOTE(mriedem): 3.44 is needed to work with volume attachment records which # are required for supporting multi-attach capable volumes. MIN_CINDER_MICROVERSION = '3.44' class UpgradeCommands(upgradecheck.UpgradeCommands): """Commands related to upgrades. The subcommands here must not rely on the nova object model since they should be able to run on n-1 data. Any queries to the database should be done through the sqlalchemy query language directly like the database schema migrations. """ def _count_compute_nodes(self, context=None): """Returns the number of compute nodes in the cell database.""" # NOTE(mriedem): This does not filter based on the service status # because a disabled nova-compute service could still be reporting # inventory info to the placement service. There could be an outside # chance that there are compute node records in the database for # disabled nova-compute services that aren't yet upgraded to Ocata or # the nova-compute service was deleted and the service isn't actually # running on the compute host but the operator hasn't cleaned up the # compute_nodes entry in the database yet. We consider those edge cases # here and the worst case scenario is we give a warning that there are # more compute nodes than resource providers. We can tighten this up # later if needed, for example by not including compute nodes that # don't have a corresponding nova-compute service in the services # table, or by only counting compute nodes with a service version of at # least 15 which was the highest service version when Newton was # released. meta = sa.MetaData() engine = main_db_api.get_engine(context=context) compute_nodes = sa.Table('compute_nodes', meta, autoload_with=engine) with engine.connect() as conn: return conn.execute( sa.select(sqlfunc.count()).select_from(compute_nodes).where( compute_nodes.c.deleted == 0 ) ).scalars().first() def _check_cellsv2(self): """Checks to see if cells v2 has been setup. These are the same checks performed in the 030_require_cell_setup API DB migration except we expect this to be run AFTER the nova-manage cell_v2 simple_cell_setup command, which would create the cell and host mappings and sync the cell0 database schema, so we don't check for flavors at all because you could create those after doing this on an initial install. This also has to be careful about checking for compute nodes if there are no host mappings on a fresh install. """ meta = sa.MetaData() engine = api_db_api.get_engine() cell_mappings = self._get_cell_mappings() count = len(cell_mappings) # Two mappings are required at a minimum, cell0 and your first cell if count < 2: msg = _('There needs to be at least two cell mappings, one for ' 'cell0 and one for your first cell. Run command ' '\'nova-manage cell_v2 simple_cell_setup\' and then ' 'retry.') return upgradecheck.Result(upgradecheck.Code.FAILURE, msg) cell0 = any(mapping.is_cell0() for mapping in cell_mappings) if not cell0: msg = _('No cell0 mapping found. Run command ' '\'nova-manage cell_v2 simple_cell_setup\' and then ' 'retry.') return upgradecheck.Result(upgradecheck.Code.FAILURE, msg) host_mappings = sa.Table('host_mappings', meta, autoload_with=engine) with engine.connect() as conn: count = conn.execute( sa.select(sqlfunc.count()).select_from(host_mappings) ).scalars().first() if count == 0: # This may be a fresh install in which case there may not be any # compute_nodes in the cell database if the nova-compute service # hasn't started yet to create those records. So let's query the # cell database for compute_nodes records and if we find at least # one it's a failure. num_computes = self._count_compute_nodes() if num_computes > 0: msg = _('No host mappings found but there are compute nodes. ' 'Run command \'nova-manage cell_v2 ' 'simple_cell_setup\' and then retry.') return upgradecheck.Result(upgradecheck.Code.FAILURE, msg) msg = _('No host mappings or compute nodes were found. Remember ' 'to run command \'nova-manage cell_v2 discover_hosts\' ' 'when new compute hosts are deployed.') return upgradecheck.Result(upgradecheck.Code.SUCCESS, msg) return upgradecheck.Result(upgradecheck.Code.SUCCESS) @staticmethod def _placement_get(path): """Do an HTTP get call against placement engine. This is in a dedicated method to make it easier for unit testing purposes. """ client = utils.get_ksa_adapter('placement') return client.get(path, raise_exc=True).json() def _check_placement(self): """Checks to see if the placement API is ready for scheduling. Checks to see that the placement API service is registered in the service catalog and that we can make requests against it. """ try: # TODO(efried): Use ksa's version filtering in _placement_get versions = self._placement_get("/") max_version = microversion_parse.parse_version_string( versions["versions"][0]["max_version"]) needs_version = microversion_parse.parse_version_string( MIN_PLACEMENT_MICROVERSION) if max_version < needs_version: msg = (_('Placement API version %(needed)s needed, ' 'you have %(current)s.') % {'needed': needs_version, 'current': max_version}) return upgradecheck.Result(upgradecheck.Code.FAILURE, msg) except ks_exc.MissingAuthPlugin: msg = _('No credentials specified for placement API in nova.conf.') return upgradecheck.Result(upgradecheck.Code.FAILURE, msg) except ks_exc.Unauthorized: msg = _('Placement service credentials do not work.') return upgradecheck.Result(upgradecheck.Code.FAILURE, msg) except ks_exc.EndpointNotFound: msg = _('Placement API endpoint not found.') return upgradecheck.Result(upgradecheck.Code.FAILURE, msg) except ks_exc.DiscoveryFailure: msg = _('Discovery for placement API URI failed.') return upgradecheck.Result(upgradecheck.Code.FAILURE, msg) except ks_exc.NotFound: msg = _('Placement API does not seem to be running.') return upgradecheck.Result(upgradecheck.Code.FAILURE, msg) return upgradecheck.Result(upgradecheck.Code.SUCCESS) @staticmethod def _get_cell_mappings(): """Queries the API database for cell mappings. .. note:: This method is unique in that it queries the database using CellMappingList.get_all() rather than a direct query using the sqlalchemy models. This is because CellMapping.database_connection can be a template and the object takes care of formatting the URL. We cannot use RowProxy objects from sqlalchemy because we cannot set the formatted 'database_connection' value back on those objects (they are read-only). :returns: list of nova.objects.CellMapping objects """ ctxt = nova_context.get_admin_context() cell_mappings = cell_mapping_obj.CellMappingList.get_all(ctxt) return cell_mappings def _check_cinder(self): """Checks to see that the cinder API is available at a given minimum microversion. """ # Check to see if nova is even configured for Cinder yet (fresh install # or maybe not using Cinder at all). if CONF.cinder.auth_type is None: return upgradecheck.Result(upgradecheck.Code.SUCCESS) try: # TODO(mriedem): Eventually use get_ksa_adapter here when it # supports cinder. cinder.is_microversion_supported( nova_context.get_admin_context(), MIN_CINDER_MICROVERSION) except exception.CinderAPIVersionNotAvailable: return upgradecheck.Result( upgradecheck.Code.FAILURE, _('Cinder API %s or greater is required. Deploy at least ' 'Cinder 12.0.0 (Queens).') % MIN_CINDER_MICROVERSION) except Exception as ex: # Anything else trying to connect, like bad config, is out of our # hands so just return a warning. return upgradecheck.Result( upgradecheck.Code.WARNING, _('Unable to determine Cinder API version due to error: %s') % str(ex)) return upgradecheck.Result(upgradecheck.Code.SUCCESS) def _check_old_computes(self): # warn if there are computes in the system older than the previous # major release try: utils.raise_if_old_compute() except exception.TooOldComputeService as e: return upgradecheck.Result(upgradecheck.Code.FAILURE, str(e)) return upgradecheck.Result(upgradecheck.Code.SUCCESS) def _check_machine_type_set(self): ctxt = nova_context.get_admin_context() if machine_type_utils.get_instances_without_type(ctxt): msg = (_(""" Instances found without hw_machine_type set. This warning can be ignored if your environment does not contain libvirt based compute hosts. Use the `nova-manage libvirt list_unset_machine_type` command to list these instances. For more details see the following: https://docs.openstack.org/nova/latest/admin/hw-machine-type.html""")) return upgradecheck.Result(upgradecheck.Code.WARNING, msg) return upgradecheck.Result(upgradecheck.Code.SUCCESS) def _check_service_user_token(self): if not CONF.service_user.send_service_user_token: msg = (_(""" Service user token configuration is required for all Nova services. For more details see the following: https://docs.openstack.org/nova/latest/admin/configuration/service-user-token.html""")) # noqa return upgradecheck.Result(upgradecheck.Code.FAILURE, msg) return upgradecheck.Result(upgradecheck.Code.SUCCESS) # The format of the check functions is to return an upgradecheck.Result # object with the appropriate upgradecheck.Code and details set. If the # check hits warnings or failures then those should be stored in the # returned upgradecheck.Result's "details" attribute. The summary will # be rolled up at the end of the check() function. These functions are # intended to be run in order and build on top of each other so order # matters. _upgrade_checks = ( # Added in Ocata (_('Cells v2'), _check_cellsv2), # Added in Ocata (_('Placement API'), _check_placement), # Added in Train (_('Cinder API'), _check_cinder), # Added in Victoria ( _('Policy File JSON to YAML Migration'), (common_checks.check_policy_json, {'conf': CONF}) ), # Added in Wallaby (_('Older than N-1 computes'), _check_old_computes), # Added in Wallaby (_('hw_machine_type unset'), _check_machine_type_set), # Added in Bobcat (_('Service User Token Configuration'), _check_service_user_token), ) CATEGORIES = { 'upgrade': UpgradeCommands, } add_command_parsers = functools.partial(cmd_common.add_command_parsers, categories=CATEGORIES) category_opt = cfg.SubCommandOpt('category', title='Command categories', help='Available categories', handler=add_command_parsers) def main(): """Parse options and call the appropriate class/method.""" CONF.register_cli_opt(category_opt) config.parse_args(sys.argv) if CONF.category.name == "version": print(version.version_string_with_package()) return 0 if CONF.category.name == "bash-completion": cmd_common.print_bash_completion(CATEGORIES) return 0 try: fn, fn_args, fn_kwargs = cmd_common.get_action_fn() ret = fn(*fn_args, **fn_kwargs) return ret except Exception: print(_('Error:\n%s') % traceback.format_exc()) # This is 255 so it's not confused with the upgrade check exit codes. return 255 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.3416083 nova-32.0.0/nova/compute/0000775000175000017500000000000000000000000015201 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/compute/__init__.py0000664000175000017500000000000000000000000017300 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/compute/api.py0000664000175000017500000121754500000000000016343 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Piston Cloud Computing, Inc. # Copyright 2012-2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Handles all requests relating to compute resources (e.g. guest VMs, networking and storage of VMs, and compute hosts on which they run).""" import collections import functools import re import typing as ty from castellan import key_manager import os_traits from oslo_log import log as logging from oslo_messaging import exceptions as oslo_exceptions from oslo_serialization import base64 as base64utils from oslo_utils import excutils from oslo_utils import strutils from oslo_utils import timeutils from oslo_utils import units from oslo_utils import uuidutils from nova.accelerator import cyborg from nova import availability_zones from nova import block_device from nova.compute import flavors from nova.compute import instance_actions from nova.compute import instance_list from nova.compute import migration_list from nova.compute import power_state from nova.compute import rpcapi as compute_rpcapi from nova.compute import task_states from nova.compute import utils as compute_utils from nova.compute.utils import wrap_instance_event from nova.compute import vm_states from nova import conductor import nova.conf from nova import context as nova_context from nova import crypto from nova.db.api import api as api_db_api from nova.db.main import api as main_db_api from nova import exception from nova import exception_wrapper from nova.i18n import _ from nova.image import glance from nova.limit import local as local_limit from nova.limit import placement as placement_limits from nova.limit import utils as limit_utils from nova.network import constants from nova.network import model as network_model from nova.network import neutron from nova.network import security_group_api from nova import objects from nova.objects import block_device as block_device_obj from nova.objects import external_event as external_event_obj from nova.objects import fields as fields_obj from nova.objects import image_meta as image_meta_obj from nova.objects import keypair as keypair_obj from nova.objects import quotas as quotas_obj from nova.objects import service as service_obj from nova.pci import request as pci_request from nova.policies import servers as servers_policies from nova.policies import shelve as shelve_policies import nova.policy from nova import profiler from nova import rpc from nova.scheduler.client import query from nova.scheduler.client import report from nova.scheduler import utils as scheduler_utils from nova import servicegroup from nova import utils from nova.virt import hardware from nova.volume import cinder LOG = logging.getLogger(__name__) # NOTE(gibi): legacy notification used compute as a service but these # calls still run on the client side of the compute service which is # nova-api. By setting the binary to nova-api below, we can make sure # that the new versioned notifications has the right publisher_id but the # legacy notifications does not change. wrap_exception = functools.partial( exception_wrapper.wrap_exception, service='compute', binary='nova-api') CONF = nova.conf.CONF AGGREGATE_ACTION_UPDATE = 'Update' AGGREGATE_ACTION_UPDATE_META = 'UpdateMeta' AGGREGATE_ACTION_DELETE = 'Delete' AGGREGATE_ACTION_ADD = 'Add' MIN_COMPUTE_SYNC_COMPUTE_STATUS_DISABLED = 38 MIN_COMPUTE_CROSS_CELL_RESIZE = 47 MIN_COMPUTE_SAME_HOST_COLD_MIGRATE = 48 # TODO(huaqiang): Remove in Wallaby MIN_VER_NOVA_COMPUTE_MIXED_POLICY = 52 SUPPORT_ACCELERATOR_SERVICE_FOR_REBUILD = 53 SUPPORT_VNIC_TYPE_ACCELERATOR = 57 MIN_COMPUTE_BOOT_WITH_EXTENDED_RESOURCE_REQUEST = 58 MIN_COMPUTE_MOVE_WITH_EXTENDED_RESOURCE_REQUEST = 59 MIN_COMPUTE_INT_ATTACH_WITH_EXTENDED_RES_REQ = 60 SUPPORT_VNIC_TYPE_REMOTE_MANAGED = 61 MIN_COMPUTE_VDPA_ATTACH_DETACH = 62 MIN_COMPUTE_VDPA_HOTPLUG_LIVE_MIGRATION = 63 SUPPORT_SHARES = 67 MIN_COMPUTE_SOUND_MODEL_TRAITS = 69 MIN_COMPUTE_USB_MODEL_TRAITS = 70 # FIXME(danms): Keep a global cache of the cells we find the # first time we look. This needs to be refreshed on a timer or # trigger. CELLS = [] def check_instance_state(vm_state=None, task_state=(None,), must_have_launched=True): """Decorator to check VM and/or task state before entry to API functions. If the instance is in the wrong state, or has not been successfully started at least once the wrapper will raise an exception. """ if vm_state is not None and not isinstance(vm_state, set): vm_state = set(vm_state) if task_state is not None and not isinstance(task_state, set): task_state = set(task_state) def outer(f): @functools.wraps(f) def inner(self, context, instance, *args, **kw): if vm_state is not None and instance.vm_state not in vm_state: raise exception.InstanceInvalidState( attr='vm_state', instance_uuid=instance.uuid, state=instance.vm_state, method=f.__name__) if (task_state is not None and instance.task_state not in task_state): raise exception.InstanceInvalidState( attr='task_state', instance_uuid=instance.uuid, state=instance.task_state, method=f.__name__) if must_have_launched and not instance.launched_at: raise exception.InstanceInvalidState( attr='launched_at', instance_uuid=instance.uuid, state=instance.launched_at, method=f.__name__) return f(self, context, instance, *args, **kw) return inner return outer def _set_or_none(q): return q if q is None or isinstance(q, set) else set(q) def reject_instance_state(vm_state=None, task_state=None): """Decorator. Raise InstanceInvalidState if instance is in any of the given states. """ vm_state = _set_or_none(vm_state) task_state = _set_or_none(task_state) def outer(f): @functools.wraps(f) def inner(self, context, instance, *args, **kw): _InstanceInvalidState = functools.partial( exception.InstanceInvalidState, instance_uuid=instance.uuid, method=f.__name__) if vm_state is not None and instance.vm_state in vm_state: raise _InstanceInvalidState( attr='vm_state', state=instance.vm_state) if task_state is not None and instance.task_state in task_state: raise _InstanceInvalidState( attr='task_state', state=instance.task_state) return f(self, context, instance, *args, **kw) return inner return outer def check_instance_host(check_is_up=False): """Validate the instance.host before performing the operation. At a minimum this method will check that the instance.host is set. :param check_is_up: If True, check that the instance.host status is UP or MAINTENANCE (disabled but not down). :raises: InstanceNotReady if the instance.host is not set :raises: ServiceUnavailable if check_is_up=True and the instance.host compute service status is not UP or MAINTENANCE """ def outer(function): @functools.wraps(function) def wrapped(self, context, instance, *args, **kwargs): if not instance.host: raise exception.InstanceNotReady(instance_id=instance.uuid) if check_is_up: # Make sure the source compute service is not down otherwise we # cannot proceed. service = [ service for service in instance.services if service.binary == 'nova-compute'][0] if not self.servicegroup_api.service_is_up(service): # ComputeServiceUnavailable would make more sense here but # we do not want to leak hostnames to end users. raise exception.ServiceUnavailable() return function(self, context, instance, *args, **kwargs) return wrapped return outer def check_instance_lock(function): @functools.wraps(function) def inner(self, context, instance, *args, **kwargs): if instance.locked and not context.is_admin: raise exception.InstanceIsLocked(instance_uuid=instance.uuid) return function(self, context, instance, *args, **kwargs) return inner def reject_sev_instances(operation): """Reject requests to decorated function if instance has SEV enabled. Raise OperationNotSupportedForSEV if instance has SEV enabled. """ def outer(f): @functools.wraps(f) def inner(self, context, instance, *args, **kw): if hardware.get_mem_encryption_constraint(instance.flavor, instance.image_meta): raise exception.OperationNotSupportedForSEV( instance_uuid=instance.uuid, operation=operation) return f(self, context, instance, *args, **kw) return inner return outer def reject_vtpm_instances(operation): """Reject requests to decorated function if instance has vTPM enabled. Raise OperationNotSupportedForVTPM if instance has vTPM enabled. """ def outer(f): @functools.wraps(f) def inner(self, context, instance, *args, **kw): if hardware.get_vtpm_constraint( instance.flavor, instance.image_meta, ): raise exception.OperationNotSupportedForVTPM( instance_uuid=instance.uuid, operation=operation) return f(self, context, instance, *args, **kw) return inner return outer def reject_vdpa_instances(operation, until=None): """Reject requests to decorated function if instance has vDPA interfaces. Raise OperationNotSupportedForVDPAInterfaces if operations involves one or more vDPA interfaces. """ def outer(f): @functools.wraps(f) def inner(self, context, instance, *args, **kw): if any( vif['vnic_type'] == network_model.VNIC_TYPE_VDPA for vif in instance.get_network_info() ): reject = True if until is not None: min_ver = objects.service.get_minimum_version_all_cells( nova_context.get_admin_context(), ['nova-compute'] ) if min_ver >= until: reject = False if reject: raise exception.OperationNotSupportedForVDPAInterface( instance_uuid=instance.uuid, operation=operation ) return f(self, context, instance, *args, **kw) return inner return outer def load_cells(): global CELLS if not CELLS: CELLS = objects.CellMappingList.get_all( nova_context.get_admin_context()) LOG.debug('Found %(count)i cells: %(cells)s', dict(count=len(CELLS), cells=','.join([c.identity for c in CELLS]))) if not CELLS: LOG.error('No cells are configured, unable to continue') def _get_image_meta_obj(image_meta_dict): try: image_meta = objects.ImageMeta.from_dict(image_meta_dict) except ValueError as e: # there must be invalid values in the image meta properties so # consider this an invalid request msg = _('Invalid image metadata. Error: %s') % str(e) raise exception.InvalidRequest(msg) return image_meta def block_accelerators(until_service=None): def inner(func): @functools.wraps(func) def wrapper(self, context, instance, *args, **kwargs): # NOTE(brinzhang): Catch a request operating a mixed instance, # make sure all nova-compute services have been upgraded and # support the accelerators. dp_name = instance.flavor.extra_specs.get('accel:device_profile') service_support = False if not dp_name: service_support = True elif until_service: min_version = objects.service.get_minimum_version_all_cells( nova_context.get_admin_context(), ['nova-compute']) if min_version >= until_service: service_support = True if not service_support: raise exception.ForbiddenWithAccelerators() return func(self, context, instance, *args, **kwargs) return wrapper return inner def block_port_accelerators(): def inner(func): @functools.wraps(func) def wrapper(self, context, instance, *args, **kwargs): # Catch a request operating a instance with accelerators # attach to ports. nw_info = instance.get_network_info() for vif in nw_info: vnic_type = vif['vnic_type'] if vnic_type in (network_model.VNIC_TYPE_ACCELERATOR_DIRECT, network_model.VNIC_TYPE_ACCELERATOR_DIRECT_PHYSICAL): raise exception.ForbiddenPortsWithAccelerator() return func(self, context, instance, *args, **kwargs) return wrapper return inner def block_shares_not_supported(): """Block actions not allowed if the instance has a share. """ def inner(func): @functools.wraps(func) def wrapper(self, context, instance, *args, **kwargs): # Check if instance has a share mapped if instance_has_share(context, instance): raise exception.ForbiddenWithShare() return func(self, context, instance, *args, **kwargs) return wrapper return inner def instance_has_share(context, instance): im = objects.InstanceMapping.get_by_instance_uuid( context, instance.uuid) with nova_context.target_cell(context, im.cell_mapping) as cctxt: db_shares = ( objects.share_mapping.ShareMappingList.get_by_instance_uuid( cctxt, instance.uuid) ) return db_shares def block_extended_resource_request(function): @functools.wraps(function) def inner(self, context, instance, *args, **kwargs): if self.network_api.instance_has_extended_resource_request( instance.uuid ): version = service_obj.get_minimum_version_all_cells( context, ["nova-compute"]) if version < MIN_COMPUTE_MOVE_WITH_EXTENDED_RESOURCE_REQUEST: raise exception.ExtendedResourceRequestOldCompute() return function(self, context, instance, *args, **kwargs) return inner @profiler.trace_cls("compute_api") class API: """API for interacting with the compute manager.""" _sentinel = object() def __init__(self, image_api=None, network_api=None, volume_api=None): self.image_api = image_api or glance.API() self.network_api = network_api or neutron.API() self.volume_api = volume_api or cinder.API() self.compute_rpcapi = compute_rpcapi.ComputeAPI() self.compute_task_api = conductor.ComputeTaskAPI() self.servicegroup_api = servicegroup.API() self.host_api = HostAPI(self.compute_rpcapi, self.servicegroup_api) self.notifier = rpc.get_notifier('compute') if CONF.ephemeral_storage_encryption.enabled: self.key_manager = key_manager.API() # Help us to record host in EventReporter self.host = CONF.host def _record_action_start(self, context, instance, action): objects.InstanceAction.action_start(context, instance.uuid, action, want_result=False) def _check_injected_file_quota(self, context, injected_files): """Enforce quota limits on injected files. Raises a OverQuota if any limit is exceeded. """ if not injected_files: return # Check number of files first try: objects.Quotas.limit_check(context, injected_files=len(injected_files)) local_limit.enforce_api_limit(local_limit.INJECTED_FILES, len(injected_files)) except exception.OnsetFileLimitExceeded: raise except exception.OverQuota: raise exception.OnsetFileLimitExceeded() # OK, now count path and content lengths; we're looking for # the max... max_path = 0 max_content = 0 for path, content in injected_files: max_path = max(max_path, len(path)) max_content = max(max_content, len(content)) try: objects.Quotas.limit_check(context, injected_file_path_bytes=max_path, injected_file_content_bytes=max_content) # TODO(johngarbutt) we can simplify the except clause when # the above legacy quota check is removed. local_limit.enforce_api_limit( local_limit.INJECTED_FILES_PATH, max_path) local_limit.enforce_api_limit( local_limit.INJECTED_FILES_CONTENT, max_content) except exception.OnsetFilePathLimitExceeded: raise except exception.OnsetFileContentLimitExceeded: raise except exception.OverQuota as exc: # Favor path limit over content limit for reporting # purposes if 'injected_file_path_bytes' in exc.kwargs['overs']: raise exception.OnsetFilePathLimitExceeded( allowed=exc.kwargs['quotas']['injected_file_path_bytes']) else: raise exception.OnsetFileContentLimitExceeded( allowed=exc.kwargs['quotas']['injected_file_content_bytes']) def _check_metadata_properties_quota(self, context, metadata=None): """Enforce quota limits on metadata properties.""" if not metadata: return if not isinstance(metadata, dict): msg = (_("Metadata type should be dict.")) raise exception.InvalidMetadata(reason=msg) num_metadata = len(metadata) try: objects.Quotas.limit_check(context, metadata_items=num_metadata) local_limit.enforce_api_limit( local_limit.SERVER_METADATA_ITEMS, num_metadata) except exception.MetadataLimitExceeded: raise except exception.OverQuota as exc: quota_metadata = exc.kwargs['quotas']['metadata_items'] raise exception.MetadataLimitExceeded(allowed=quota_metadata) # Because metadata is stored in the DB, we hard-code the size limits # In future, we may support more variable length strings, so we act # as if this is quota-controlled for forwards compatibility. # Those are only used in V2 API, from V2.1 API, those checks are # validated at API layer schema validation. for k, v in metadata.items(): try: utils.check_string_length(v) utils.check_string_length(k, min_length=1) except exception.InvalidInput as e: raise exception.InvalidMetadata(reason=e.format_message()) if len(k) > 255: msg = _("Metadata property key greater than 255 characters") raise exception.InvalidMetadataSize(reason=msg) if len(v) > 255: msg = _("Metadata property value greater than 255 characters") raise exception.InvalidMetadataSize(reason=msg) def _check_requested_secgroups(self, context, secgroups): """Check if the security group requested exists and belongs to the project. :param context: The nova request context. :type context: nova.context.RequestContext :param secgroups: list of requested security group names :type secgroups: list :returns: list of requested security group UUIDs; note that 'default' is a special case and will be unmodified if it's requested. """ security_groups = [] for secgroup in secgroups: # NOTE(sdague): default is handled special if secgroup == "default": security_groups.append(secgroup) continue secgroup_uuid = security_group_api.validate_name(context, secgroup) security_groups.append(secgroup_uuid) return security_groups def _check_requested_networks(self, context, requested_networks, max_count): """Check if the networks requested belongs to the project and the fixed IP address for each network provided is within same the network block """ if requested_networks is not None: if requested_networks.no_allocate: # If the network request was specifically 'none' meaning don't # allocate any networks, we just return the number of requested # instances since quotas don't change at all. return max_count # NOTE(danms): Temporary transition requested_networks = requested_networks.as_tuples() return self.network_api.validate_networks(context, requested_networks, max_count) def _handle_kernel_and_ramdisk(self, context, kernel_id, ramdisk_id, image): """Choose kernel and ramdisk appropriate for the instance. The kernel and ramdisk can be chosen in one of two ways: 1. Passed in with create-instance request. 2. Inherited from image metadata. If inherited from image metadata, and if that image metadata value is set to 'nokernel', both kernel and ramdisk will default to None. """ # Inherit from image if not specified image_properties = image.get('properties', {}) if kernel_id is None: kernel_id = image_properties.get('kernel_id') if ramdisk_id is None: ramdisk_id = image_properties.get('ramdisk_id') # Force to None if kernel_id indicates that a kernel is not to be used if kernel_id == 'nokernel': kernel_id = None ramdisk_id = None # Verify kernel and ramdisk exist (fail-fast) if kernel_id is not None: kernel_image = self.image_api.get(context, kernel_id) # kernel_id could have been a URI, not a UUID, so to keep behaviour # from before, which leaked that implementation detail out to the # caller, we return the image UUID of the kernel image and ramdisk # image (below) and not any image URIs that might have been # supplied. # TODO(jaypipes): Get rid of this silliness once we move to a real # Image object and hide all of that stuff within nova.image.glance kernel_id = kernel_image['id'] if ramdisk_id is not None: ramdisk_image = self.image_api.get(context, ramdisk_id) ramdisk_id = ramdisk_image['id'] return kernel_id, ramdisk_id @staticmethod def parse_availability_zone(context, availability_zone): # NOTE(vish): We have a legacy hack to allow admins to specify hosts # via az using az:host:node. It might be nice to expose an # api to specify specific hosts to force onto, but for # now it just supports this legacy hack. # NOTE(deva): It is also possible to specify az::node, in which case # the host manager will determine the correct host. forced_host = None forced_node = None if availability_zone and ':' in availability_zone: c = availability_zone.count(':') if c == 1: availability_zone, forced_host = availability_zone.split(':') elif c == 2: if '::' in availability_zone: availability_zone, forced_node = \ availability_zone.split('::') else: availability_zone, forced_host, forced_node = \ availability_zone.split(':') else: raise exception.InvalidInput( reason="Unable to parse availability_zone") if not availability_zone: availability_zone = CONF.default_schedule_zone return availability_zone, forced_host, forced_node def _ensure_auto_disk_config_is_valid(self, auto_disk_config_img, auto_disk_config, image): auto_disk_config_disabled = \ utils.is_auto_disk_config_disabled(auto_disk_config_img) if auto_disk_config_disabled and auto_disk_config: raise exception.AutoDiskConfigDisabledByImage(image=image) def _inherit_properties_from_image(self, image, auto_disk_config): image_properties = image.get('properties', {}) auto_disk_config_img = \ utils.get_auto_disk_config_from_image_props(image_properties) self._ensure_auto_disk_config_is_valid(auto_disk_config_img, auto_disk_config, image.get("id")) if auto_disk_config is None: auto_disk_config = strutils.bool_from_string(auto_disk_config_img) return { 'os_type': image_properties.get('os_type'), 'architecture': image_properties.get('architecture'), 'vm_mode': image_properties.get('vm_mode'), 'auto_disk_config': auto_disk_config } def _check_config_drive(self, config_drive): if config_drive: try: bool_val = strutils.bool_from_string(config_drive, strict=True) except ValueError: raise exception.ConfigDriveInvalidValue(option=config_drive) else: bool_val = False # FIXME(comstud): Bug ID 1193438 filed for this. This looks silly, # but this is because the config drive column is a String. False # is represented by using an empty string. And for whatever # reason, we rely on the DB to cast True to a String. return True if bool_val else '' def _validate_flavor_image( self, context, image_id, image, flavor, root_bdm, validate_numa=True, ): """Validate the flavor and image. This is called from the API service to ensure that the flavor extra-specs and image properties are self-consistent and compatible with each other. :param context: A context.RequestContext :param image_id: UUID of the image :param image: a dict representation of the image including properties, enforces the image status is active. :param flavor: Flavor object :param root_bdm: BlockDeviceMapping for root disk. Will be None for the resize case. :param validate_numa: Flag to indicate whether or not to validate the NUMA-related metadata. :raises: Many different possible exceptions. See api.openstack.compute.servers.INVALID_FLAVOR_IMAGE_EXCEPTIONS for the full list. """ if image and image['status'] != 'active': raise exception.ImageNotActive(image_id=image_id) self._validate_flavor_image_nostatus( context, image, flavor, root_bdm, validate_numa) @staticmethod def _detect_nonbootable_image_from_properties(image_id, image): """Check image for a property indicating it's nonbootable. This is called from the API service to ensure that there are no known image properties indicating that this image is of a type that we do not support booting from. Currently the only such property is 'cinder_encryption_key_id'. :param image_id: UUID of the image :param image: a dict representation of the image including properties :raises: ImageUnacceptable if the image properties indicate that booting this image is not supported """ if not image: return image_properties = image.get('properties', {}) # NOTE(lyarwood) Skip this check when image_id is None indicating that # the instance is booting from a volume that was itself initially # created from an image. As such we don't care if # cinder_encryption_key_id was against the original image as we are now # booting from an encrypted volume. if image_properties.get('cinder_encryption_key_id') and image_id: reason = _('Direct booting of an image uploaded from an ' 'encrypted volume is unsupported.') raise exception.ImageUnacceptable(image_id=image_id, reason=reason) @staticmethod def _validate_flavor_image_nostatus( context, image, flavor, root_bdm, validate_numa=True, validate_pci=False, ): """Validate the flavor and image. This is called from the API service to ensure that the flavor extra-specs and image properties are self-consistent and compatible with each other. :param context: A context.RequestContext :param image: a dict representation of the image including properties :param flavor: Flavor object :param root_bdm: BlockDeviceMapping for root disk. Will be None for the resize case. :param validate_numa: Flag to indicate whether or not to validate the NUMA-related metadata. :param validate_pci: Flag to indicate whether or not to validate the PCI-related metadata. :raises: Many different possible exceptions. See api.openstack.compute.servers.INVALID_FLAVOR_IMAGE_EXCEPTIONS for the full list. """ if not image: return image_properties = image.get('properties', {}) config_drive_option = image_properties.get( 'img_config_drive', 'optional') if config_drive_option not in ['optional', 'mandatory']: raise exception.InvalidImageConfigDrive( config_drive=config_drive_option) if flavor['memory_mb'] < int(image.get('min_ram') or 0): raise exception.FlavorMemoryTooSmall() # Verify flavor/image Virtio Packed Ring configuration conflict. hardware.get_packed_virtqueue_constraint(flavor, image) # Image min_disk is in gb, size is in bytes. For sanity, have them both # in bytes. image_min_disk = int(image.get('min_disk') or 0) * units.Gi image_size = int(image.get('size') or 0) # Target disk is a volume. Don't check flavor disk size because it # doesn't make sense, and check min_disk against the volume size. if root_bdm is not None and root_bdm.is_volume: # There are 2 possibilities here: # # 1. The target volume already exists but bdm.volume_size is not # yet set because this method is called before # _bdm_validate_set_size_and_instance during server create. # 2. The target volume doesn't exist, in which case the bdm will # contain the intended volume size # # Note that rebuild also calls this method with potentially a new # image but you can't rebuild a volume-backed server with a new # image (yet). # # Cinder does its own check against min_disk, so if the target # volume already exists this has already been done and we don't # need to check it again here. In this case, volume_size may not be # set on the bdm. # # If we're going to create the volume, the bdm will contain # volume_size. Therefore we should check it if it exists. This will # still be checked again by cinder when the volume is created, but # that will not happen until the request reaches a host. By # checking it here, the user gets an immediate and useful failure # indication. # # The third possibility is that we have failed to consider # something, and there are actually more than 2 possibilities. In # this case cinder will still do the check at volume creation time. # The behaviour will still be correct, but the user will not get an # immediate failure from the api, and will instead have to # determine why the instance is in an error state with a task of # block_device_mapping. # # We could reasonably refactor this check into _validate_bdm at # some future date, as the various size logic is already split out # in there. dest_size = root_bdm.volume_size if dest_size is not None: dest_size *= units.Gi if image_min_disk > dest_size: raise exception.VolumeSmallerThanMinDisk( volume_size=dest_size, image_min_disk=image_min_disk) # Target disk is a local disk whose size is taken from the flavor else: dest_size = flavor['root_gb'] * units.Gi # NOTE(johannes): root_gb is allowed to be 0 for legacy reasons # since libvirt interpreted the value differently than other # drivers. A value of 0 means don't check size. if dest_size != 0: if image_size > dest_size: raise exception.FlavorDiskSmallerThanImage( flavor_size=dest_size, image_size=image_size) if image_min_disk > dest_size: raise exception.FlavorDiskSmallerThanMinDisk( flavor_size=dest_size, image_min_disk=image_min_disk) else: # The user is attempting to create a server with a 0-disk # image-backed flavor, which can lead to issues with a large # image consuming an unexpectedly large amount of local disk # on the compute host. Check to see if the deployment will # allow that. if not context.can( servers_policies.ZERO_DISK_FLAVOR, fatal=False): raise exception.BootFromVolumeRequiredForZeroDiskFlavor() API._validate_flavor_image_numa_pci( image, flavor, validate_numa=validate_numa, validate_pci=validate_pci) # TODO(huaqiang): Remove in Wallaby when there is no nova-compute node # having a version prior to Victoria. @staticmethod def _check_compute_service_for_mixed_instance(numa_topology, min_comp_ver): """Check if the nova-compute service is ready to support mixed instance when the CPU allocation policy is 'mixed'. """ # No need to check the instance with no NUMA topology associated with. if numa_topology is None: return # No need to check if instance CPU policy is not 'mixed' if numa_topology.cpu_policy != fields_obj.CPUAllocationPolicy.MIXED: return # Catch a request creating a mixed instance, make sure all nova-compute # service have been upgraded and support the mixed policy. if min_comp_ver < MIN_VER_NOVA_COMPUTE_MIXED_POLICY: raise exception.MixedInstanceNotSupportByComputeService() @staticmethod def _validate_flavor_image_numa_pci( image, flavor, validate_numa=True, validate_pci=False, ): """Validate the flavor and image NUMA/PCI values. This is called from the API service to ensure that the flavor extra-specs and image properties are self-consistent and compatible with each other. :param image: a dict representation of the image including properties :param flavor: Flavor object :param validate_numa: Flag to indicate whether or not to validate the NUMA-related metadata. :param validate_pci: Flag to indicate whether or not to validate the PCI-related metadata. :raises: Many different possible exceptions. See api.openstack.compute.servers.INVALID_FLAVOR_IMAGE_EXCEPTIONS for the full list. """ image_meta = _get_image_meta_obj(image) # Only validate values of flavor/image so the return results of # following 'get' functions are not used. hardware.get_mem_encryption_constraint(flavor, image_meta) hardware.get_pmu_constraint(flavor, image_meta) hardware.get_number_of_serial_ports(flavor, image_meta) hardware.get_realtime_cpu_constraint(flavor, image_meta) hardware.get_cpu_topology_constraints(flavor, image_meta) hardware.get_vif_multiqueue_constraint(flavor, image_meta) if validate_numa: hardware.numa_get_constraints(flavor, image_meta) if validate_pci: pci_request.get_pci_requests_from_flavor(flavor) def _get_image_defined_bdms(self, flavor, image_meta, root_device_name): image_properties = image_meta.get('properties', {}) # Get the block device mappings defined by the image. image_defined_bdms = image_properties.get('block_device_mapping', []) legacy_image_defined = not image_properties.get('bdm_v2', False) image_mapping = image_properties.get('mappings', []) if legacy_image_defined: image_defined_bdms = block_device.from_legacy_mapping( image_defined_bdms, None, root_device_name) else: image_defined_bdms = list(map(block_device.BlockDeviceDict, image_defined_bdms)) if image_mapping: image_mapping = self._prepare_image_mapping(flavor, image_mapping) image_defined_bdms = self._merge_bdms_lists( image_mapping, image_defined_bdms) return image_defined_bdms def _get_flavor_defined_bdms(self, flavor, block_device_mapping): flavor_defined_bdms = [] have_ephemeral_bdms = any(filter( block_device.new_format_is_ephemeral, block_device_mapping)) have_swap_bdms = any(filter( block_device.new_format_is_swap, block_device_mapping)) if flavor.get('ephemeral_gb') and not have_ephemeral_bdms: flavor_defined_bdms.append( block_device.create_blank_bdm(flavor['ephemeral_gb'])) if flavor.get('swap') and not have_swap_bdms: flavor_defined_bdms.append( block_device.create_blank_bdm(flavor['swap'], 'swap')) return flavor_defined_bdms def _merge_bdms_lists(self, overridable_mappings, overrider_mappings): """Override any block devices from the first list by device name :param overridable_mappings: list which items are overridden :param overrider_mappings: list which items override :returns: A merged list of bdms """ device_names = set(bdm['device_name'] for bdm in overrider_mappings if bdm['device_name']) return (overrider_mappings + [bdm for bdm in overridable_mappings if bdm['device_name'] not in device_names]) def _check_and_transform_bdm( self, context, base_options, flavor, image_meta, min_count, max_count, block_device_mapping, legacy_bdm, ): # NOTE (ndipanov): Assume root dev name is 'vda' if not supplied. # It's needed for legacy conversion to work. root_device_name = (base_options.get('root_device_name') or 'vda') image_ref = base_options.get('image_ref', '') # If the instance is booted by image and has a volume attached, # the volume cannot have the same device name as root_device_name if image_ref: for bdm in block_device_mapping: if (bdm.get('destination_type') == 'volume' and block_device.strip_dev(bdm.get( 'device_name')) == root_device_name): msg = _('The volume cannot be assigned the same device' ' name as the root device %s') % root_device_name raise exception.InvalidRequest(msg) image_defined_bdms = self._get_image_defined_bdms( flavor, image_meta, root_device_name) root_in_image_bdms = ( block_device.get_root_bdm(image_defined_bdms) is not None) if legacy_bdm: block_device_mapping = block_device.from_legacy_mapping( block_device_mapping, image_ref, root_device_name, no_root=root_in_image_bdms) elif root_in_image_bdms: # NOTE (ndipanov): client will insert an image mapping into the v2 # block_device_mapping, but if there is a bootable device in image # mappings - we need to get rid of the inserted image # NOTE (gibi): another case is when a server is booted with an # image to bdm mapping where the image only contains a bdm to a # snapshot. In this case the other image to bdm mapping # contains an unnecessary device with boot_index == 0. # Also in this case the image_ref is None as we are booting from # an image to volume bdm. def not_image_and_root_bdm(bdm): return not (bdm.get('boot_index') == 0 and bdm.get('source_type') == 'image') block_device_mapping = list( filter(not_image_and_root_bdm, block_device_mapping)) block_device_mapping = self._merge_bdms_lists( image_defined_bdms, block_device_mapping) if min_count > 1 or max_count > 1: if any(map(lambda bdm: bdm['source_type'] == 'volume', block_device_mapping)): msg = _('Cannot attach one or more volumes to multiple' ' instances') raise exception.InvalidRequest(msg) block_device_mapping += self._get_flavor_defined_bdms( flavor, block_device_mapping) return block_device_obj.block_device_make_list_from_dicts( context, block_device_mapping) def _get_image(self, context, image_href): if not image_href: return None, {} image = self.image_api.get(context, image_href) return image['id'], image def _checks_for_create_and_rebuild( self, context, image_id, image, flavor, metadata, files_to_inject, root_bdm, min_comp_ver, validate_numa=True, ): self._check_metadata_properties_quota(context, metadata) self._check_injected_file_quota(context, files_to_inject) self._detect_nonbootable_image_from_properties(image_id, image) self._validate_flavor_image(context, image_id, image, flavor, root_bdm, validate_numa=validate_numa) # Do we support adding a sound device? image_meta = objects.ImageMeta.from_dict(image) sound_model = hardware.get_sound_model(flavor, image_meta) if sound_model and (min_comp_ver < MIN_COMPUTE_SOUND_MODEL_TRAITS): raise exception.SoundModelRequestOldCompute() def _check_support_vnic_accelerator( self, context, requested_networks, min_comp_ver): if requested_networks: for request_net in requested_networks: if request_net.device_profile: if min_comp_ver < SUPPORT_VNIC_TYPE_ACCELERATOR: msg = ("Port with cyborg profile is not available" " until upgrade finished.") raise exception.ForbiddenPortsWithAccelerator(msg) def _check_vnic_remote_managed_min_version(self, context): min_version = (objects.service.get_minimum_version_all_cells( context, ['nova-compute'])) if min_version < SUPPORT_VNIC_TYPE_REMOTE_MANAGED: msg = ("Remote-managed ports are not supported" " until an upgrade is fully finished.") raise exception.ForbiddenWithRemoteManagedPorts(msg) def _check_support_vnic_remote_managed(self, context, requested_networks): if requested_networks: for request_net in requested_networks: if (request_net.port_id and self.network_api.is_remote_managed_port( context, request_net.port_id)): self._check_vnic_remote_managed_min_version(context) def _validate_and_build_base_options( self, context, flavor, boot_meta, image_href, image_id, kernel_id, ramdisk_id, display_name, display_description, hostname, key_name, key_data, security_groups, availability_zone, user_data, metadata, access_ip_v4, access_ip_v6, requested_networks, config_drive, auto_disk_config, reservation_id, max_count, supports_port_resource_request, min_comp_ver, ): """Verify all the input parameters regardless of the provisioning strategy being performed. """ if flavor['disabled']: raise exception.FlavorNotFound(flavor_id=flavor['id']) if user_data: try: base64utils.decode_as_bytes(user_data) except TypeError: raise exception.InstanceUserDataMalformed() # When using Neutron, _check_requested_secgroups will translate and # return any requested security group names to uuids. security_groups = self._check_requested_secgroups( context, security_groups) # Note: max_count is the number of instances requested by the user, # max_network_count is the maximum number of instances taking into # account any network quotas max_network_count = self._check_requested_networks( context, requested_networks, max_count) kernel_id, ramdisk_id = self._handle_kernel_and_ramdisk( context, kernel_id, ramdisk_id, boot_meta) config_drive = self._check_config_drive(config_drive) if key_data is None and key_name is not None: key_pair = objects.KeyPair.get_by_name(context, context.user_id, key_name) key_data = key_pair.public_key else: key_pair = None root_device_name = block_device.prepend_dev( block_device.properties_root_device_name( boot_meta.get('properties', {}))) image_meta = _get_image_meta_obj(boot_meta) numa_topology = hardware.numa_get_constraints(flavor, image_meta) system_metadata = {} pci_numa_affinity_policy = hardware.get_pci_numa_policy_constraint( flavor, image_meta) # PCI requests come from two sources: instance flavor and # requested_networks. The first call in below returns an # InstancePCIRequests object which is a list of InstancePCIRequest # objects. The second call in below creates an InstancePCIRequest # object for each SR-IOV port, and append it to the list in the # InstancePCIRequests object pci_request_info = pci_request.get_pci_requests_from_flavor( flavor, affinity_policy=pci_numa_affinity_policy) result = self.network_api.create_resource_requests( context, requested_networks, pci_request_info, affinity_policy=pci_numa_affinity_policy) network_metadata, port_resource_requests, req_lvl_params = result self._check_support_vnic_accelerator( context, requested_networks, min_comp_ver) self._check_support_vnic_remote_managed(context, requested_networks) # Creating servers with ports that have resource requests, like QoS # minimum bandwidth rules, is only supported in a requested minimum # microversion. if port_resource_requests and not supports_port_resource_request: raise exception.CreateWithPortResourceRequestOldVersion() # TODO(gibi): remove this when Nova does not need to support Wallaby # computes any more. if (port_resource_requests and self.network_api.has_extended_resource_request_extension(context) ): # we only support the extended resource request if the computes are # upgraded to Xena. if min_comp_ver < MIN_COMPUTE_BOOT_WITH_EXTENDED_RESOURCE_REQUEST: raise exception.ExtendedResourceRequestOldCompute() base_options = { 'reservation_id': reservation_id, 'image_ref': image_href, 'kernel_id': kernel_id or '', 'ramdisk_id': ramdisk_id or '', 'power_state': power_state.NOSTATE, 'vm_state': vm_states.BUILDING, 'config_drive': config_drive, 'user_id': context.user_id, 'project_id': context.project_id, 'instance_type_id': flavor['id'], 'memory_mb': flavor['memory_mb'], 'vcpus': flavor['vcpus'], 'root_gb': flavor['root_gb'], 'ephemeral_gb': flavor['ephemeral_gb'], 'display_name': display_name, 'display_description': display_description, 'hostname': hostname, 'user_data': user_data, 'key_name': key_name, 'key_data': key_data, 'locked': False, 'metadata': metadata or {}, 'access_ip_v4': access_ip_v4, 'access_ip_v6': access_ip_v6, 'availability_zone': availability_zone, 'root_device_name': root_device_name, 'progress': 0, 'pci_requests': pci_request_info, 'numa_topology': numa_topology, 'system_metadata': system_metadata, 'port_resource_requests': port_resource_requests, 'request_level_params': req_lvl_params, } options_from_image = self._inherit_properties_from_image( boot_meta, auto_disk_config) base_options.update(options_from_image) # return the validated options and maximum number of instances allowed # by the network quotas return (base_options, max_network_count, key_pair, security_groups, network_metadata) @staticmethod @api_db_api.context_manager.writer def _create_reqspec_buildreq_instmapping(context, rs, br, im): """Create the request spec, build request, and instance mapping in a single database transaction. The RequestContext must be passed in to this method so that the database transaction context manager decorator will nest properly and include each create() into the same transaction context. """ rs.create() br.create() im.create() def _validate_host_or_node(self, context, host, hypervisor_hostname): """Check whether compute nodes exist by validating the host and/or the hypervisor_hostname. There are three cases: 1. If only host is supplied, we can lookup the HostMapping in the API DB. 2. If only node is supplied, we can query a resource provider with that name in placement. 3. If both host and node are supplied, we can get the cell from HostMapping and from that lookup the ComputeNode with the given cell. :param context: The API request context. :param host: Target host. :param hypervisor_hostname: Target node. :raises: ComputeHostNotFound if we find no compute nodes with host and/or hypervisor_hostname. """ if host: # When host is specified. try: host_mapping = objects.HostMapping.get_by_host(context, host) except exception.HostMappingNotFound: LOG.warning('No host-to-cell mapping found for host ' '%(host)s.', {'host': host}) raise exception.ComputeHostNotFound(host=host) # When both host and node are specified. if hypervisor_hostname: cell = host_mapping.cell_mapping with nova_context.target_cell(context, cell) as cctxt: # Here we only do an existence check, so we don't # need to store the return value into a variable. objects.ComputeNode.get_by_host_and_nodename( cctxt, host, hypervisor_hostname) elif hypervisor_hostname: # When only node is specified. try: self.placementclient.get_provider_by_name( context, hypervisor_hostname) except exception.ResourceProviderNotFound: raise exception.ComputeHostNotFound(host=hypervisor_hostname) def _get_volumes_for_bdms(self, context, bdms): """Get the pre-existing volumes from cinder for the list of BDMs. :param context: nova auth RequestContext :param bdms: BlockDeviceMappingList which has zero or more BDMs with a pre-existing volume_id specified. :return: dict, keyed by volume id, of volume dicts :raises: VolumeNotFound - if a given volume does not exist :raises: CinderConnectionFailed - if there are problems communicating with the cinder API :raises: Forbidden - if the user token does not have authority to see a volume """ volumes = {} for bdm in bdms: if bdm.volume_id: volumes[bdm.volume_id] = self.volume_api.get( context, bdm.volume_id) return volumes @staticmethod def _validate_vol_az_for_create(instance_az, volumes): """Performs cross_az_attach validation for the instance and volumes. If [cinder]/cross_az_attach=True (default) this method is a no-op. If [cinder]/cross_az_attach=False, this method will validate that: 1. All volumes are in the same availability zone. 2. The volume AZ matches the instance AZ. If the instance is being created without a specific AZ (either via the user request or the [DEFAULT]/default_schedule_zone option), and the volume AZ matches [DEFAULT]/default_availability_zone for compute services, then the method returns the volume AZ so it can be set in the RequestSpec as if the user requested the zone explicitly. :param instance_az: Availability zone for the instance. In this case the host is not yet selected so the instance AZ value should come from one of the following cases: * The user requested availability zone. * [DEFAULT]/default_schedule_zone (defaults to None) if the request does not specify an AZ (see parse_availability_zone). :param volumes: iterable of dicts of cinder volumes to be attached to the server being created :returns: None or volume AZ to set in the RequestSpec for the instance :raises: MismatchVolumeAZException if the instance and volume AZ do not match """ if CONF.cinder.cross_az_attach: return if not volumes: return # First make sure that all of the volumes are in the same zone. vol_zones = [vol['availability_zone'] for vol in volumes] if len(set(vol_zones)) > 1: msg = (_("Volumes are in different availability zones: %s") % ','.join(vol_zones)) raise exception.MismatchVolumeAZException(reason=msg) volume_az = vol_zones[0] # In this case the instance.host should not be set so the instance AZ # value should come from instance.availability_zone which will be one # of the following cases: # * The user requested availability zone. # * [DEFAULT]/default_schedule_zone (defaults to None) if the request # does not specify an AZ (see parse_availability_zone). # If the instance is not being created with a specific AZ (the AZ is # input via the API create request *or* [DEFAULT]/default_schedule_zone # is not None), then check to see if we should use the default AZ # (which by default matches the default AZ in Cinder, i.e. 'nova'). if instance_az is None: # Check if the volume AZ is the same as our default AZ for compute # hosts (nova) and if so, assume we are OK because the user did not # request an AZ and will get the same default. If the volume AZ is # not the same as our default, return the volume AZ so the caller # can put it into the request spec so the instance is scheduled # to the same zone as the volume. Note that we are paranoid about # the default here since both nova and cinder's default backend AZ # is "nova" and we do not want to pin the server to that AZ since # it's special, i.e. just like we tell users in the docs to not # specify availability_zone='nova' when creating a server since we # might not be able to migrate it later. if volume_az != CONF.default_availability_zone: return volume_az # indication to set in request spec # The volume AZ is the same as the default nova AZ so we will be OK return if instance_az != volume_az: msg = _("Server and volumes are not in the same availability " "zone. Server is in: %(instance_az)s. Volumes are in: " "%(volume_az)s") % { 'instance_az': instance_az, 'volume_az': volume_az} raise exception.MismatchVolumeAZException(reason=msg) def _provision_instances( self, context, flavor, min_count, max_count, base_options, boot_meta, security_groups, block_device_mapping, shutdown_terminate, instance_group, check_server_group_quota, filter_properties, key_pair, tags, trusted_certs, supports_multiattach, network_metadata=None, requested_host=None, requested_hypervisor_hostname=None, ): # NOTE(boxiang): Check whether compute nodes exist by validating # the host and/or the hypervisor_hostname. Pass the destination # to the scheduler with host and/or hypervisor_hostname(node). destination = None if requested_host or requested_hypervisor_hostname: self._validate_host_or_node(context, requested_host, requested_hypervisor_hostname) destination = objects.Destination() if requested_host: destination.host = requested_host destination.node = requested_hypervisor_hostname # Check quotas num_instances = compute_utils.check_num_instances_quota( context, flavor, min_count, max_count) # Find out whether or not we are a BFV instance if block_device_mapping: root = block_device_mapping.root_bdm() is_bfv = bool(root and root.is_volume) else: # If we have no BDMs, we're clearly not BFV is_bfv = False # NOTE(johngarbutt) when unified limits not used, this just # returns num_instances back again # NOTE: If we want to enforce quota on port or cyborg resources in the # future, this enforce call will need to move after we have populated # the RequestSpec with all of the requested resources and use the real # RequestSpec to get the overall resource usage of the instance. num_instances = placement_limits.enforce_num_instances_and_flavor( context, context.project_id, flavor, is_bfv, min_count, num_instances) security_groups = security_group_api.populate_security_groups( security_groups) port_resource_requests = base_options.pop('port_resource_requests') req_lvl_params = base_options.pop('request_level_params') instances_to_build = [] # We could be iterating over several instances with several BDMs per # instance and those BDMs could be using a lot of the same images so # we want to cache the image API GET results for performance. image_cache = {} # dict of image dicts keyed by image id # Before processing the list of instances get all of the requested # pre-existing volumes so we can do some validation here rather than # down in the bowels of _validate_bdm. volumes = self._get_volumes_for_bdms(context, block_device_mapping) volume_az = self._validate_vol_az_for_create( base_options['availability_zone'], volumes.values()) if volume_az: # This means the instance is not being created in a specific zone # but needs to match the zone that the volumes are in so update # base_options to match the volume zone. base_options['availability_zone'] = volume_az LOG.debug("Going to run %s instances...", num_instances) extra_specs = flavor.extra_specs dp_name = extra_specs.get('accel:device_profile') dp_request_groups = [] if dp_name: dp_request_groups = cyborg.get_device_profile_request_groups( context, dp_name) try: for idx in range(num_instances): # Create a uuid for the instance so we can store the # RequestSpec before the instance is created. instance_uuid = uuidutils.generate_uuid() # Store the RequestSpec that will be used for scheduling. req_spec = objects.RequestSpec.from_components( context, instance_uuid, boot_meta, flavor, base_options['numa_topology'], base_options['pci_requests'], filter_properties, instance_group, base_options['availability_zone'], security_groups=security_groups, port_resource_requests=port_resource_requests, request_level_params=req_lvl_params) req_spec.is_bfv = is_bfv # NOTE(danms): We need to record num_instances on the request # spec as this is how the conductor knows how many were in this # batch. req_spec.num_instances = num_instances # NOTE(stephenfin): The network_metadata field is not persisted # inside RequestSpec object. if network_metadata: req_spec.network_metadata = network_metadata if destination: req_spec.requested_destination = destination if dp_request_groups: req_spec.requested_resources.extend(dp_request_groups) # Create an instance object, but do not store in db yet. instance = objects.Instance(context=context) instance.uuid = instance_uuid instance.update(base_options) instance.keypairs = objects.KeyPairList(objects=[]) if key_pair: instance.keypairs.objects.append(key_pair) instance.trusted_certs = self._retrieve_trusted_certs_object( context, trusted_certs) self._populate_instance_for_create( context, instance, boot_meta, idx, security_groups, flavor, num_instances, shutdown_terminate) block_device_mapping = ( self._bdm_validate_set_size_and_instance(context, instance, flavor, block_device_mapping, image_cache, volumes, supports_multiattach)) instance_tags = self._transform_tags(tags, instance.uuid) build_request = objects.BuildRequest(context, instance=instance, instance_uuid=instance.uuid, project_id=instance.project_id, block_device_mappings=block_device_mapping, tags=instance_tags) # Create an instance_mapping. The null cell_mapping indicates # that the instance doesn't yet exist in a cell, and lookups # for it need to instead look for the RequestSpec. # cell_mapping will be populated after scheduling, with a # scheduling failure using the cell_mapping for the special # cell0. inst_mapping = objects.InstanceMapping(context=context) inst_mapping.instance_uuid = instance_uuid inst_mapping.project_id = context.project_id inst_mapping.user_id = context.user_id inst_mapping.cell_mapping = None # Create the request spec, build request, and instance mapping # records in a single transaction so that if a DBError is # raised from any of them, all INSERTs will be rolled back and # no orphaned records will be left behind. self._create_reqspec_buildreq_instmapping(context, req_spec, build_request, inst_mapping) instances_to_build.append( (req_spec, build_request, inst_mapping)) if instance_group: if check_server_group_quota: try: objects.Quotas.check_deltas( context, {'server_group_members': 1}, instance_group, context.user_id) local_limit.enforce_db_limit( context, local_limit.SERVER_GROUP_MEMBERS, entity_scope=instance_group.uuid, delta=1) except exception.GroupMemberLimitExceeded: raise except exception.OverQuota: msg = _("Quota exceeded, too many servers in " "group") raise exception.OverQuota(msg) members = objects.InstanceGroup.add_members( context, instance_group.uuid, [instance.uuid]) # NOTE(melwitt): We recheck the quota after creating the # object to prevent users from allocating more resources # than their allowed quota in the event of a race. This is # configurable because it can be expensive if strict quota # limits are not required in a deployment. if CONF.quota.recheck_quota and check_server_group_quota: try: objects.Quotas.check_deltas( context, {'server_group_members': 0}, instance_group, context.user_id) # TODO(johngarbutt): decide if we need this check # The quota rechecking of limits is really just to # protect against denial of service attacks that # aim to fill up the database. Its usefulness could # be debated. local_limit.enforce_db_limit( context, local_limit.SERVER_GROUP_MEMBERS, entity_scope=instance_group.uuid, delta=0) except exception.GroupMemberLimitExceeded: with excutils.save_and_reraise_exception(): objects.InstanceGroup._remove_members_in_db( context, instance_group.id, [instance.uuid]) except exception.OverQuota: objects.InstanceGroup._remove_members_in_db( context, instance_group.id, [instance.uuid]) msg = _("Quota exceeded, too many servers in " "group") raise exception.OverQuota(msg) # list of members added to servers group in this iteration # is needed to check quota of server group during add next # instance instance_group.members.extend(members) # In the case of any exceptions, attempt DB cleanup except Exception: with excutils.save_and_reraise_exception(): self._cleanup_build_artifacts(None, instances_to_build) return instances_to_build @staticmethod def _retrieve_trusted_certs_object(context, trusted_certs, rebuild=False): """Convert user-requested trusted cert IDs to TrustedCerts object Also validates that the deployment is new enough to support trusted image certification validation. :param context: The user request auth context :param trusted_certs: list of user-specified trusted cert string IDs, may be None :param rebuild: True if rebuilding the server, False if creating a new server :returns: nova.objects.TrustedCerts object or None if no user-specified trusted cert IDs were given and nova is not configured with default trusted cert IDs """ # Retrieve trusted_certs parameter, or use CONF value if certificate # validation is enabled if trusted_certs: certs_to_return = objects.TrustedCerts(ids=trusted_certs) elif (CONF.glance.verify_glance_signatures and CONF.glance.enable_certificate_validation and CONF.glance.default_trusted_certificate_ids): certs_to_return = objects.TrustedCerts( ids=CONF.glance.default_trusted_certificate_ids) else: return None return certs_to_return @staticmethod def _get_requested_instance_group(context, filter_properties): if (not filter_properties or not filter_properties.get('scheduler_hints')): return group_hint = filter_properties.get('scheduler_hints').get('group') if not group_hint: return return objects.InstanceGroup.get_by_uuid(context, group_hint) def _update_ephemeral_encryption_bdms( self, flavor: 'objects.Flavor', image_meta_dict: ty.Dict[str, ty.Any], block_device_mapping: 'objects.BlockDeviceMappingList', ) -> None: """Update local BlockDeviceMappings when ephemeral encryption requested Enable ephemeral encryption in all local BlockDeviceMappings when requested in the flavor or image. Also optionally set the format and options if also provided. :param flavor: The instance flavor for the request :param image_meta_dict: The image metadata for the request :block_device_mapping: The current block_device_mapping for the request """ image_meta = _get_image_meta_obj(image_meta_dict) if not hardware.get_ephemeral_encryption_constraint( flavor, image_meta): return # NOTE(lyarwood): Attempt to find the format in the flavor and image, # if one isn't found then the compute will need to provide and save a # default format during a the initial build. eph_format = hardware.get_ephemeral_encryption_format( flavor, image_meta) # NOTE(lyarwood): The term ephemeral is overloaded in the codebase, # what it actually means in the context of ephemeral encryption is # anything local to the compute host so use the is_local property. # TODO(lyarwood): Add .get_local_devices() to BlockDeviceMappingList for bdm in [b for b in block_device_mapping if b.is_local]: bdm.encrypted = True if eph_format: bdm.encryption_format = eph_format def _create_instance(self, context, flavor, image_href, kernel_id, ramdisk_id, min_count, max_count, display_name, display_description, hostname, key_name, key_data, security_groups, availability_zone, user_data, metadata, injected_files, admin_password, access_ip_v4, access_ip_v6, requested_networks, config_drive, block_device_mapping, auto_disk_config, filter_properties, reservation_id=None, legacy_bdm=True, shutdown_terminate=False, check_server_group_quota=False, tags=None, supports_multiattach=False, trusted_certs=None, supports_port_resource_request=False, requested_host=None, requested_hypervisor_hostname=None): """Verify all the input parameters regardless of the provisioning strategy being performed and schedule the instance(s) for creation. """ # Normalize and setup some parameters if reservation_id is None: reservation_id = utils.generate_uid('r') security_groups = security_groups or ['default'] min_count = min_count or 1 max_count = max_count or min_count block_device_mapping = block_device_mapping or [] tags = tags or [] if image_href: image_id, boot_meta = self._get_image(context, image_href) else: # This is similar to the logic in _retrieve_trusted_certs_object. if (trusted_certs or (CONF.glance.verify_glance_signatures and CONF.glance.enable_certificate_validation and CONF.glance.default_trusted_certificate_ids)): msg = _("Image certificate validation is not supported " "when booting from volume") raise exception.CertificateValidationFailed(message=msg) image_id = None boot_meta = block_device.get_bdm_image_metadata( context, self.image_api, self.volume_api, block_device_mapping, legacy_bdm) # Only lookup the minimum compute version once min_comp_ver = objects.service.get_minimum_version_all_cells( context, ["nova-compute"]) self._check_auto_disk_config(image=boot_meta, auto_disk_config=auto_disk_config) ( base_options, max_net_count, key_pair, security_groups, network_metadata, ) = self._validate_and_build_base_options( context, flavor, boot_meta, image_href, image_id, kernel_id, ramdisk_id, display_name, display_description, hostname, key_name, key_data, security_groups, availability_zone, user_data, metadata, access_ip_v4, access_ip_v6, requested_networks, config_drive, auto_disk_config, reservation_id, max_count, supports_port_resource_request, min_comp_ver ) # TODO(huaqiang): Remove in Wallaby # check nova-compute nodes have been updated to Victoria to support the # mixed CPU policy for creating a new instance. numa_topology = base_options.get('numa_topology') self._check_compute_service_for_mixed_instance( numa_topology, min_comp_ver) # max_net_count is the maximum number of instances requested by the # user adjusted for any network quota constraints, including # consideration of connections to each requested network if max_net_count < min_count: raise exception.PortLimitExceeded() elif max_net_count < max_count: LOG.info("max count reduced from %(max_count)d to " "%(max_net_count)d due to network port quota", {'max_count': max_count, 'max_net_count': max_net_count}) max_count = max_net_count # _check_and_transform_bdm transforms block_device_mapping from API # bdms (dicts) to a BlockDeviceMappingList. block_device_mapping = self._check_and_transform_bdm(context, base_options, flavor, boot_meta, min_count, max_count, block_device_mapping, legacy_bdm) # Update any local BlockDeviceMapping objects if ephemeral encryption # has been requested though flavor extra specs or image properties self._update_ephemeral_encryption_bdms( flavor, boot_meta, block_device_mapping) # We can't do this check earlier because we need bdms from all sources # to have been merged in order to get the root bdm. # Set validate_numa=False since numa validation is already done by # _validate_and_build_base_options(). self._checks_for_create_and_rebuild(context, image_id, boot_meta, flavor, metadata, injected_files, block_device_mapping.root_bdm(), min_comp_ver, validate_numa=False) instance_group = self._get_requested_instance_group( context, filter_properties) tags = self._create_tag_list_obj(context, tags) instances_to_build = self._provision_instances( context, flavor, min_count, max_count, base_options, boot_meta, security_groups, block_device_mapping, shutdown_terminate, instance_group, check_server_group_quota, filter_properties, key_pair, tags, trusted_certs, supports_multiattach, network_metadata, requested_host, requested_hypervisor_hostname) instances = [] request_specs = [] build_requests = [] for rs, build_request, im in instances_to_build: build_requests.append(build_request) instance = build_request.get_new_instance(context) instances.append(instance) # NOTE(sbauza): Add the requested networks so the related scheduler # pre-filter can verify them if requested_networks is not None: rs.requested_networks = requested_networks request_specs.append(rs) self.compute_task_api.schedule_and_build_instances( context, build_requests=build_requests, request_spec=request_specs, image=boot_meta, admin_password=admin_password, injected_files=injected_files, requested_networks=requested_networks, block_device_mapping=block_device_mapping, tags=tags) return instances, reservation_id @staticmethod def _cleanup_build_artifacts(instances, instances_to_build): # instances_to_build is a list of tuples: # (RequestSpec, BuildRequest, InstanceMapping) # Be paranoid about artifacts being deleted underneath us. for instance in instances or []: try: instance.destroy() except exception.InstanceNotFound: pass for rs, build_request, im in instances_to_build or []: try: rs.destroy() except exception.RequestSpecNotFound: pass try: build_request.destroy() except exception.BuildRequestNotFound: pass try: im.destroy() except exception.InstanceMappingNotFound: pass @staticmethod def _volume_size(flavor, bdm): size = bdm.get('volume_size') # NOTE (ndipanov): inherit flavor size only for swap and ephemeral if (size is None and bdm.get('source_type') == 'blank' and bdm.get('destination_type') == 'local'): if bdm.get('guest_format') == 'swap': size = flavor.get('swap', 0) else: size = flavor.get('ephemeral_gb', 0) return size def _prepare_image_mapping(self, flavor, mappings): """Extract and format blank devices from image mappings.""" prepared_mappings = [] for bdm in block_device.mappings_prepend_dev(mappings): LOG.debug("Image bdm %s", bdm) virtual_name = bdm['virtual'] if virtual_name == 'ami' or virtual_name == 'root': continue if not block_device.is_swap_or_ephemeral(virtual_name): continue guest_format = bdm.get('guest_format') if virtual_name == 'swap': guest_format = 'swap' if not guest_format: guest_format = CONF.default_ephemeral_format values = block_device.BlockDeviceDict({ 'device_name': bdm['device'], 'source_type': 'blank', 'destination_type': 'local', 'device_type': 'disk', 'guest_format': guest_format, 'delete_on_termination': True, 'boot_index': -1}) values['volume_size'] = self._volume_size( flavor, values) if values['volume_size'] == 0: continue prepared_mappings.append(values) return prepared_mappings def _bdm_validate_set_size_and_instance(self, context, instance, flavor, block_device_mapping, image_cache, volumes, supports_multiattach=False): """Ensure the bdms are valid, then set size and associate with instance Because this method can be called multiple times when more than one instance is booted in a single request it makes a copy of the bdm list. :param context: nova auth RequestContext :param instance: Instance object :param flavor: Flavor object - used for swap and ephemeral BDMs :param block_device_mapping: BlockDeviceMappingList object :param image_cache: dict of image dicts keyed by id which is used as a cache in case there are multiple BDMs in the same request using the same image to avoid redundant GET calls to the image service :param volumes: dict, keyed by volume id, of volume dicts from cinder :param supports_multiattach: True if the request supports multiattach volumes, False otherwise """ LOG.debug("block_device_mapping %s", list(block_device_mapping), instance_uuid=instance.uuid) self._validate_bdm( context, instance, flavor, block_device_mapping, image_cache, volumes, supports_multiattach) instance_block_device_mapping = block_device_mapping.obj_clone() for bdm in instance_block_device_mapping: bdm.volume_size = self._volume_size(flavor, bdm) bdm.instance_uuid = instance.uuid return instance_block_device_mapping @staticmethod def _check_requested_volume_type(bdm, volume_type_id_or_name, volume_types): """If we are specifying a volume type, we need to get the volume type details from Cinder and make sure the ``volume_type`` is available. """ # NOTE(brinzhang): Verify that the specified volume type exists. # And save the volume type name internally for consistency in the # BlockDeviceMapping object. for vol_type in volume_types: if (volume_type_id_or_name == vol_type['id'] or volume_type_id_or_name == vol_type['name']): bdm.volume_type = vol_type['name'] break else: raise exception.VolumeTypeNotFound( id_or_name=volume_type_id_or_name) def _validate_bdm( self, context, instance, flavor, block_device_mappings, image_cache, volumes, supports_multiattach=False, ): """Validate requested block device mappings. :param context: nova auth RequestContext :param instance: Instance object :param flavor: Flavor object - used for swap and ephemeral BDMs :param block_device_mappings: BlockDeviceMappingList object :param image_cache: dict of image dicts keyed by id which is used as a cache in case there are multiple BDMs in the same request using the same image to avoid redundant GET calls to the image service :param volumes: dict, keyed by volume id, of volume dicts from cinder :param supports_multiattach: True if the request supports multiattach volumes, False otherwise """ # Make sure that the boot indexes make sense. # Setting a negative value or None indicates that the device should not # be used for booting. boot_indexes = sorted([bdm.boot_index for bdm in block_device_mappings if bdm.boot_index is not None and bdm.boot_index >= 0]) # Each device which is capable of being used as boot device should # be given a unique boot index, starting from 0 in ascending order, and # there needs to be at least one boot device. if not boot_indexes or any(i != v for i, v in enumerate(boot_indexes)): # Convert the BlockDeviceMappingList to a list for repr details. LOG.debug('Invalid block device mapping boot sequence for ' 'instance: %s', list(block_device_mappings), instance=instance) raise exception.InvalidBDMBootSequence() volume_types = None for bdm in block_device_mappings: volume_type = bdm.volume_type if volume_type: if not volume_types: # In order to reduce the number of hit cinder APIs, # initialize our cache of volume types. volume_types = self.volume_api.get_all_volume_types( context) # NOTE(brinzhang): Ensure the validity of volume_type. self._check_requested_volume_type(bdm, volume_type, volume_types) # NOTE(vish): For now, just make sure the volumes are accessible. # Additionally, check that the volume can be attached to this # instance. snapshot_id = bdm.snapshot_id volume_id = bdm.volume_id image_id = bdm.image_id if image_id is not None: if (image_id != instance.get('image_ref') and image_id not in image_cache): try: # Cache the results of the image GET so we do not make # the same request for the same image if processing # multiple BDMs or multiple servers with the same image image_cache[image_id] = self._get_image( context, image_id) except Exception: raise exception.InvalidBDMImage(id=image_id) if (bdm.source_type == 'image' and bdm.destination_type == 'volume' and not bdm.volume_size): raise exception.InvalidBDM(message=_("Images with " "destination_type 'volume' need to have a non-zero " "size specified")) elif volume_id is not None: try: volume = volumes[volume_id] # We do not validate the instance and volume AZ here # because that is done earlier by _provision_instances. self._check_attach_and_reserve_volume( context, volume, instance, bdm, supports_multiattach, validate_az=False) bdm.volume_size = volume.get('size') except (exception.CinderConnectionFailed, exception.InvalidVolume, exception.MultiattachNotSupportedOldMicroversion): raise except exception.InvalidInput as exc: raise exception.InvalidVolume(reason=exc.format_message()) except Exception as e: LOG.info('Failed validating volume %s. Error: %s', volume_id, e) raise exception.InvalidBDMVolume(id=volume_id) elif snapshot_id is not None: try: snap = self.volume_api.get_snapshot(context, snapshot_id) bdm.volume_size = bdm.volume_size or snap.get('size') except exception.CinderConnectionFailed: raise except Exception: raise exception.InvalidBDMSnapshot(id=snapshot_id) elif (bdm.source_type == 'blank' and bdm.destination_type == 'volume' and not bdm.volume_size): raise exception.InvalidBDM(message=_("Blank volumes " "(source: 'blank', dest: 'volume') need to have non-zero " "size")) # NOTE(lyarwood): Ensure the disk_bus is at least known to Nova. # The virt driver may reject this later but for now just ensure # it's listed as an acceptable value of the DiskBus field class. disk_bus = bdm.disk_bus if 'disk_bus' in bdm else None if disk_bus and disk_bus not in fields_obj.DiskBus.ALL: raise exception.InvalidBDMDiskBus(disk_bus=disk_bus) ephemeral_size = sum(bdm.volume_size or flavor['ephemeral_gb'] for bdm in block_device_mappings if block_device.new_format_is_ephemeral(bdm)) if ephemeral_size > flavor['ephemeral_gb']: raise exception.InvalidBDMEphemeralSize() # There should be only one swap swap_list = block_device.get_bdm_swap_list(block_device_mappings) if len(swap_list) > 1: msg = _("More than one swap drive requested.") raise exception.InvalidBDMFormat(details=msg) if swap_list: swap_size = swap_list[0].volume_size or 0 if swap_size > flavor['swap']: raise exception.InvalidBDMSwapSize() max_local = CONF.max_local_block_devices if max_local >= 0: num_local = len([bdm for bdm in block_device_mappings if bdm.destination_type == 'local']) if num_local > max_local: raise exception.InvalidBDMLocalsLimit() def _populate_instance_names(self, instance, num_instances, index): """Populate instance display_name and hostname. :param instance: The instance to set the display_name, hostname for :type instance: nova.objects.Instance :param num_instances: Total number of instances being created in this request :param index: The 0-based index of this particular instance """ # NOTE(mriedem): This is only here for test simplicity since a server # name is required in the REST API. if 'display_name' not in instance or instance.display_name is None: instance.display_name = 'Server %s' % instance.uuid # only set the hostname if the user hasn't already requested one if 'hostname' not in instance or not instance.hostname: # if we're booting multiple instances, we need to add an indexing # suffix to both instance.hostname and instance.display_name. # This is not necessary for a single instance. hostname = utils.sanitize_hostname(instance.display_name) if not hostname: hostname = f'Server-{instance.uuid}' elif num_instances > 1: hostname = f'{hostname}-{index + 1}' instance.hostname = hostname if num_instances > 1: instance.display_name = f'{instance.display_name}-{index + 1}' def _populate_instance_for_create( self, context, instance, image, index, security_groups, flavor, num_instances, shutdown_terminate, ): """Build the beginning of a new instance.""" instance.launch_index = index instance.vm_state = vm_states.BUILDING instance.task_state = task_states.SCHEDULING info_cache = objects.InstanceInfoCache() info_cache.instance_uuid = instance.uuid info_cache.network_info = network_model.NetworkInfo() instance.info_cache = info_cache instance.flavor = flavor instance.old_flavor = None instance.new_flavor = None if CONF.ephemeral_storage_encryption.enabled: # NOTE(kfarr): dm-crypt expects the cipher in a # hyphenated format: cipher-chainmode-ivmode # (ex: aes-xts-plain64). The algorithm needs # to be parsed out to pass to the key manager (ex: aes). cipher = CONF.ephemeral_storage_encryption.cipher algorithm = cipher.split('-')[0] if cipher else None instance.ephemeral_key_uuid = self.key_manager.create_key( context, algorithm=algorithm, length=CONF.ephemeral_storage_encryption.key_size) else: instance.ephemeral_key_uuid = None # Store image properties so we can use them later # (for notifications, etc). Only store what we can. if not instance.obj_attr_is_set('system_metadata'): instance.system_metadata = {} # Make sure we have the dict form that we need for instance_update. instance.system_metadata = utils.instance_sys_meta(instance) system_meta = utils.get_system_metadata_from_image( image, flavor) # In case we couldn't find any suitable base_image system_meta.setdefault('image_base_image_ref', instance.image_ref) system_meta['owner_user_name'] = context.user_name system_meta['owner_project_name'] = context.project_name instance.system_metadata.update(system_meta) # Since the removal of nova-network, we don't actually store anything # in the database. Instead, we proxy the security groups on the # instance from the ports attached to the instance. instance.security_groups = objects.SecurityGroupList() self._populate_instance_names(instance, num_instances, index) instance.shutdown_terminate = shutdown_terminate return instance def _create_tag_list_obj(self, context, tags): """Create TagList objects from simple string tags. :param context: security context. :param tags: simple string tags from API request. :returns: TagList object. """ tag_list = [objects.Tag(context=context, tag=t) for t in tags] tag_list_obj = objects.TagList(objects=tag_list) return tag_list_obj def _transform_tags(self, tags, resource_id): """Change the resource_id of the tags according to the input param. Because this method can be called multiple times when more than one instance is booted in a single request it makes a copy of the tags list. :param tags: TagList object. :param resource_id: string. :returns: TagList object. """ instance_tags = tags.obj_clone() for tag in instance_tags: tag.resource_id = resource_id return instance_tags def _check_multiple_instances_with_neutron_ports(self, requested_networks): """Check whether multiple instances are created from port id(s).""" for requested_net in requested_networks: if requested_net.port_id: msg = _("Unable to launch multiple instances with" " a single configured port ID. Please launch your" " instance one by one with different ports.") raise exception.MultiplePortsNotApplicable(reason=msg) def _check_multiple_instances_with_specified_ip(self, requested_networks): """Check whether multiple instances are created with specified ip.""" for requested_net in requested_networks: if requested_net.network_id and requested_net.address: msg = _("max_count cannot be greater than 1 if an fixed_ip " "is specified.") raise exception.InvalidFixedIpAndMaxCountRequest(reason=msg) def create( self, context, flavor, image_href, kernel_id=None, ramdisk_id=None, min_count=None, max_count=None, display_name=None, display_description=None, hostname=None, key_name=None, key_data=None, security_groups=None, availability_zone=None, forced_host=None, forced_node=None, user_data=None, metadata=None, injected_files=None, admin_password=None, block_device_mapping=None, access_ip_v4=None, access_ip_v6=None, requested_networks=None, config_drive=None, auto_disk_config=None, scheduler_hints=None, legacy_bdm=True, shutdown_terminate=False, check_server_group_quota=False, tags=None, supports_multiattach=False, trusted_certs=None, supports_port_resource_request=False, requested_host=None, requested_hypervisor_hostname=None, ): """Provision instances, sending instance information to the scheduler. The scheduler will determine where the instance(s) go and will handle creating the DB entries. Returns a tuple of (instances, reservation_id) """ if requested_networks and max_count is not None and max_count > 1: self._check_multiple_instances_with_specified_ip( requested_networks) self._check_multiple_instances_with_neutron_ports( requested_networks) if hostname and max_count is not None and max_count > 1: raise exception.AmbiguousHostnameForMultipleInstances() if availability_zone and forced_host is None: azs = availability_zones.get_availability_zones( context.elevated(), self.host_api, get_only_available=True) if availability_zone not in azs: msg = _('The requested availability zone is not available') raise exception.InvalidRequest(msg) filter_properties = scheduler_utils.build_filter_properties( scheduler_hints, forced_host, forced_node, flavor) return self._create_instance( context, flavor, image_href, kernel_id, ramdisk_id, min_count, max_count, display_name, display_description, hostname, key_name, key_data, security_groups, availability_zone, user_data, metadata, injected_files, admin_password, access_ip_v4, access_ip_v6, requested_networks, config_drive, block_device_mapping, auto_disk_config, filter_properties=filter_properties, legacy_bdm=legacy_bdm, shutdown_terminate=shutdown_terminate, check_server_group_quota=check_server_group_quota, tags=tags, supports_multiattach=supports_multiattach, trusted_certs=trusted_certs, supports_port_resource_request=supports_port_resource_request, requested_host=requested_host, requested_hypervisor_hostname=requested_hypervisor_hostname) def _check_auto_disk_config(self, instance=None, image=None, auto_disk_config=None): if auto_disk_config is None: return if not image and not instance: return if image: image_props = image.get("properties", {}) auto_disk_config_img = \ utils.get_auto_disk_config_from_image_props(image_props) image_ref = image.get("id") else: sys_meta = utils.instance_sys_meta(instance) image_ref = sys_meta.get('image_base_image_ref') auto_disk_config_img = \ utils.get_auto_disk_config_from_instance(sys_meta=sys_meta) self._ensure_auto_disk_config_is_valid(auto_disk_config_img, auto_disk_config, image_ref) def _lookup_instance(self, context, uuid): '''Helper method for pulling an instance object from a database. During the transition to cellsv2 there is some complexity around retrieving an instance from the database which this method hides. If there is an instance mapping then query the cell for the instance, if no mapping exists then query the configured nova database. Once we are past the point that all deployments can be assumed to be migrated to cellsv2 this method can go away. ''' inst_map = None try: inst_map = objects.InstanceMapping.get_by_instance_uuid( context, uuid) except exception.InstanceMappingNotFound: # TODO(alaski): This exception block can be removed once we're # guaranteed everyone is using cellsv2. pass if inst_map is None or inst_map.cell_mapping is None: # If inst_map is None then the deployment has not migrated to # cellsv2 yet. # If inst_map.cell_mapping is None then the instance is not in a # cell yet. Until instance creation moves to the conductor the # instance can be found in the configured database, so attempt # to look it up. cell = None try: instance = objects.Instance.get_by_uuid(context, uuid) except exception.InstanceNotFound: # If we get here then the conductor is in charge of writing the # instance to the database and hasn't done that yet. It's up to # the caller of this method to determine what to do with that # information. return None, None else: cell = inst_map.cell_mapping with nova_context.target_cell(context, cell) as cctxt: try: instance = objects.Instance.get_by_uuid(cctxt, uuid) except exception.InstanceNotFound: # Since the cell_mapping exists we know the instance is in # the cell, however InstanceNotFound means it's already # deleted. return None, None return cell, instance def _delete_while_booting(self, context, instance): """Handle deletion if the instance has not reached a cell yet Deletion before an instance reaches a cell needs to be handled differently. What we're attempting to do is delete the BuildRequest before the api level conductor does. If we succeed here then the boot request stops before reaching a cell. If not then the instance will need to be looked up in a cell db and the normal delete path taken. """ deleted = self._attempt_delete_of_buildrequest(context, instance) if deleted: # If we've reached this block the successful deletion of the # buildrequest indicates that the build process should be halted by # the conductor. # NOTE(alaski): Though the conductor halts the build process it # does not currently delete the instance record. This is # because in the near future the instance record will not be # created if the buildrequest has been deleted here. For now we # ensure the instance has been set to deleted at this point. # Yes this directly contradicts the comment earlier in this # method, but this is a temporary measure. # Look up the instance because the current instance object was # stashed on the buildrequest and therefore not complete enough # to run .destroy(). try: instance_uuid = instance.uuid cell, instance = self._lookup_instance(context, instance_uuid) if instance is not None: # If instance is None it has already been deleted. if cell: with nova_context.target_cell(context, cell) as cctxt: # FIXME: When the instance context is targeted, # we can remove this with compute_utils.notify_about_instance_delete( self.notifier, cctxt, instance): instance.destroy() else: instance.destroy() except exception.InstanceNotFound: pass return True return False def _local_delete_cleanup(self, context, instance_uuid): # NOTE(aarents) Ensure instance allocation is cleared and instance # mapping queued as deleted before _delete() return try: self.placementclient.delete_allocation_for_instance( context, instance_uuid, force=True) except exception.AllocationDeleteFailed: LOG.info("Allocation delete failed during local delete cleanup.", instance_uuid=instance_uuid) try: self._update_queued_for_deletion(context, instance_uuid, True) except exception.InstanceMappingNotFound: LOG.info("Instance Mapping does not exist while attempting " "local delete cleanup.", instance_uuid=instance_uuid) def _attempt_delete_of_buildrequest(self, context, instance): # If there is a BuildRequest then the instance may not have been # written to a cell db yet. Delete the BuildRequest here, which # will indicate that the Instance build should not proceed. try: build_req = objects.BuildRequest.get_by_instance_uuid( context, instance.uuid) build_req.destroy() except exception.BuildRequestNotFound: # This means that conductor has deleted the BuildRequest so the # instance is now in a cell and the delete needs to proceed # normally. return False # We need to detach from any volumes so they aren't orphaned. self._local_cleanup_bdm_volumes( build_req.block_device_mappings, instance, context) return True def _delete(self, context, instance, delete_type, cb, **instance_attrs): if instance.disable_terminate: LOG.info('instance termination disabled', instance=instance) return cell = None # If there is an instance.host (or the instance is shelved-offloaded or # in error state), the instance has been scheduled and sent to a # cell/compute which means it was pulled from the cell db. # Normal delete should be attempted. may_have_ports_or_volumes = compute_utils.may_have_ports_or_volumes( instance) # Save a copy of the instance UUID early, in case # _lookup_instance returns instance = None, to pass to # _local_delete_cleanup if needed. instance_uuid = instance.uuid if not instance.host and not may_have_ports_or_volumes: try: if self._delete_while_booting(context, instance): self._local_delete_cleanup(context, instance.uuid) return # If instance.host was not set it's possible that the Instance # object here was pulled from a BuildRequest object and is not # fully populated. Notably it will be missing an 'id' field # which will prevent instance.destroy from functioning # properly. A lookup is attempted which will either return a # full Instance or None if not found. If not found then it's # acceptable to skip the rest of the delete processing. cell, instance = self._lookup_instance(context, instance.uuid) if cell and instance: try: # Now destroy the instance from the cell it lives in. with compute_utils.notify_about_instance_delete( self.notifier, context, instance): instance.destroy() except exception.InstanceNotFound: pass # The instance was deleted or is already gone. self._local_delete_cleanup(context, instance.uuid) return if not instance: # Instance is already deleted. self._local_delete_cleanup(context, instance_uuid) return except exception.ObjectActionError: # NOTE(melwitt): This means the instance.host changed # under us indicating the instance became scheduled # during the destroy(). Refresh the instance from the DB and # continue on with the delete logic for a scheduled instance. # NOTE(danms): If instance.host is set, we should be able to # do the following lookup. If not, there's not much we can # do to recover. cell, instance = self._lookup_instance(context, instance.uuid) if not instance: # Instance is already deleted self._local_delete_cleanup(context, instance_uuid) return bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) # At these states an instance has a snapshot associate. if instance.vm_state in (vm_states.SHELVED, vm_states.SHELVED_OFFLOADED): snapshot_id = instance.system_metadata.get('shelved_image_id') LOG.info("Working on deleting snapshot %s " "from shelved instance...", snapshot_id, instance=instance) try: self.image_api.delete(context, snapshot_id) except (exception.ImageNotFound, exception.ImageNotAuthorized) as exc: LOG.warning("Failed to delete snapshot " "from shelved instance (%s).", exc.format_message(), instance=instance) except Exception: LOG.exception("Something wrong happened when trying to " "delete snapshot from shelved instance.", instance=instance) original_task_state = instance.task_state try: # NOTE(maoy): no expected_task_state needs to be set instance.update(instance_attrs) instance.progress = 0 instance.save() if not instance.host and not may_have_ports_or_volumes: try: with compute_utils.notify_about_instance_delete( self.notifier, context, instance, delete_type if delete_type != 'soft_delete' else 'delete'): instance.destroy() LOG.info('Instance deleted and does not have host ' 'field, its vm_state is %(state)s.', {'state': instance.vm_state}, instance=instance) self._local_delete_cleanup(context, instance.uuid) return except exception.ObjectActionError as ex: # The instance's host likely changed under us as # this instance could be building and has since been # scheduled. Continue with attempts to delete it. LOG.debug('Refreshing instance because: %s', ex, instance=instance) instance.refresh() if instance.vm_state == vm_states.RESIZED: self._confirm_resize_on_deleting(context, instance) # NOTE(neha_alhat): After confirm resize vm_state will become # 'active' and task_state will be set to 'None'. But for soft # deleting a vm, the _do_soft_delete callback requires # task_state in 'SOFT_DELETING' status. So, we need to set # task_state as 'SOFT_DELETING' again for soft_delete case. # After confirm resize and before saving the task_state to # "SOFT_DELETING", during the short window, user can submit # soft delete vm request again and system will accept and # process it without any errors. if delete_type == 'soft_delete': instance.task_state = instance_attrs['task_state'] instance.save() is_local_delete = True try: # instance.host must be set in order to look up the service. if instance.host is not None: service = objects.Service.get_by_compute_host( context.elevated(), instance.host) is_local_delete = not self.servicegroup_api.service_is_up( service) if not is_local_delete: if original_task_state in (task_states.DELETING, task_states.SOFT_DELETING): LOG.info('Instance is already in deleting state, ' 'ignoring this request', instance=instance) return self._record_action_start(context, instance, instance_actions.DELETE) cb(context, instance, bdms) except exception.ComputeHostNotFound: LOG.debug('Compute host %s not found during service up check, ' 'going to local delete instance', instance.host, instance=instance) if is_local_delete: # If instance is in shelved_offloaded state or compute node # isn't up, delete instance from db and clean bdms info and # network info if cell is None: # NOTE(danms): If we didn't get our cell from one of the # paths above, look it up now. try: im = objects.InstanceMapping.get_by_instance_uuid( context, instance.uuid) cell = im.cell_mapping except exception.InstanceMappingNotFound: LOG.warning('During local delete, failed to find ' 'instance mapping', instance=instance) return LOG.debug('Doing local delete in cell %s', cell.identity, instance=instance) with nova_context.target_cell(context, cell) as cctxt: self._local_delete(cctxt, instance, bdms, delete_type, cb) self._record_action_start(context, instance, instance_actions.DELETE) except exception.InstanceNotFound: # NOTE(comstud): Race condition. Instance already gone. pass def _confirm_resize_on_deleting(self, context, instance): # If in the middle of a resize, use confirm_resize to # ensure the original instance is cleaned up too along # with its allocations (and migration-based allocations) # in placement. migration = None for status in ('finished', 'confirming'): try: migration = objects.Migration.get_by_instance_and_status( context.elevated(), instance.uuid, status) LOG.info('Found an unconfirmed migration during delete, ' 'id: %(id)s, status: %(status)s', {'id': migration.id, 'status': migration.status}, instance=instance) break except exception.MigrationNotFoundByStatus: pass if not migration: LOG.info('Instance may have been confirmed during delete', instance=instance) return self._record_action_start(context, instance, instance_actions.CONFIRM_RESIZE) # If migration.cross_cell_move, we need to also cleanup the instance # data from the source cell database. if migration.cross_cell_move: self.compute_task_api.confirm_snapshot_based_resize( context, instance, migration, do_cast=False) else: self.compute_rpcapi.confirm_resize(context, instance, migration, migration.source_compute, cast=False) def _local_cleanup_bdm_volumes(self, bdms, instance, context): """The method deletes the bdm records and, if a bdm is a volume, call the terminate connection and the detach volume via the Volume API. """ elevated = context.elevated() for bdm in bdms: if bdm.is_volume: try: if bdm.attachment_id: self.volume_api.attachment_delete(context, bdm.attachment_id) else: connector = compute_utils.get_stashed_volume_connector( bdm, instance) if connector: self.volume_api.terminate_connection(context, bdm.volume_id, connector) else: LOG.debug('Unable to find connector for volume %s,' ' not attempting terminate_connection.', bdm.volume_id, instance=instance) # Attempt to detach the volume. If there was no # connection made in the first place this is just # cleaning up the volume state in the Cinder DB. self.volume_api.detach(elevated, bdm.volume_id, instance.uuid) if bdm.delete_on_termination: self.volume_api.delete(context, bdm.volume_id) except Exception as exc: LOG.warning("Ignoring volume cleanup failure due to %s", exc, instance=instance) # If we're cleaning up volumes from an instance that wasn't yet # created in a cell, i.e. the user deleted the server while # the BuildRequest still existed, then the BDM doesn't actually # exist in the DB to destroy it. if 'id' in bdm: bdm.destroy() @property def placementclient(self): return report.report_client_singleton() def _local_delete(self, context, instance, bdms, delete_type, cb): if instance.vm_state == vm_states.SHELVED_OFFLOADED: LOG.info("instance is in SHELVED_OFFLOADED state, cleanup" " the instance's info from database.", instance=instance) else: LOG.warning("instance's host %s is down, deleting from " "database", instance.host, instance=instance) with compute_utils.notify_about_instance_delete( self.notifier, context, instance, delete_type if delete_type != 'soft_delete' else 'delete'): elevated = context.elevated() self.network_api.deallocate_for_instance(elevated, instance) # cleanup volumes self._local_cleanup_bdm_volumes(bdms, instance, context) # cleanup accelerator requests (ARQs) compute_utils.delete_arqs_if_needed(context, instance) # Cleanup allocations in Placement since we can't do it from the # compute service. self.placementclient.delete_allocation_for_instance( context, instance.uuid, force=True) cb(context, instance, bdms, local=True) instance.destroy() @staticmethod def _update_queued_for_deletion(context, instance_uuid, qfd): # NOTE(tssurya): We query the instance_mapping record of this instance # and update the queued_for_delete flag to True (or False according to # the state of the instance). This just means that the instance is # queued for deletion (or is no longer queued for deletion). It does # not guarantee its successful deletion (or restoration). Hence the # value could be stale which is fine, considering its use is only # during down cell (desperate) situation. im = objects.InstanceMapping.get_by_instance_uuid(context, instance_uuid) im.queued_for_delete = qfd im.save() def _do_delete(self, context, instance, bdms, local=False): if local: instance.vm_state = vm_states.DELETED instance.task_state = None instance.terminated_at = timeutils.utcnow() instance.save() else: self.compute_rpcapi.terminate_instance(context, instance, bdms) self._update_queued_for_deletion(context, instance.uuid, True) def _do_soft_delete(self, context, instance, bdms, local=False): if local: instance.vm_state = vm_states.SOFT_DELETED instance.task_state = None instance.terminated_at = timeutils.utcnow() instance.save() else: self.compute_rpcapi.soft_delete_instance(context, instance) self._update_queued_for_deletion(context, instance.uuid, True) # NOTE(maoy): we allow delete to be called no matter what vm_state says. @check_instance_lock @check_instance_state(vm_state=None, task_state=None, must_have_launched=True) def soft_delete(self, context, instance): """Terminate an instance.""" LOG.debug('Going to try to soft delete instance', instance=instance) self._delete(context, instance, 'soft_delete', self._do_soft_delete, task_state=task_states.SOFT_DELETING, deleted_at=timeutils.utcnow()) def _delete_instance(self, context, instance): self._delete(context, instance, 'delete', self._do_delete, task_state=task_states.DELETING) @check_instance_lock @check_instance_state(vm_state=None, task_state=None, must_have_launched=False) def delete(self, context, instance): """Terminate an instance.""" LOG.debug("Going to try to terminate instance", instance=instance) self._delete_instance(context, instance) @check_instance_lock @check_instance_state(vm_state=[vm_states.SOFT_DELETED]) def restore(self, context, instance): """Restore a previously deleted (but not reclaimed) instance.""" # Check quotas flavor = instance.get_flavor() project_id, user_id = quotas_obj.ids_from_instance(context, instance) compute_utils.check_num_instances_quota(context, flavor, 1, 1, project_id=project_id, user_id=user_id) is_bfv = compute_utils.is_volume_backed_instance(context, instance) placement_limits.enforce_num_instances_and_flavor(context, project_id, flavor, is_bfv, 1, 1) self._record_action_start(context, instance, instance_actions.RESTORE) if instance.host: instance.task_state = task_states.RESTORING instance.deleted_at = None instance.save(expected_task_state=[None]) # TODO(melwitt): We're not rechecking for strict quota here to # guard against going over quota during a race at this time because # the resource consumption for this operation is written to the # database by compute. self.compute_rpcapi.restore_instance(context, instance) else: instance.vm_state = vm_states.ACTIVE instance.task_state = None instance.deleted_at = None instance.save(expected_task_state=[None]) self._update_queued_for_deletion(context, instance.uuid, False) @check_instance_lock @check_instance_state(task_state=None, must_have_launched=False) def force_delete(self, context, instance): """Force delete an instance in any vm_state/task_state.""" self._delete(context, instance, 'force_delete', self._do_delete, task_state=task_states.DELETING) def force_stop(self, context, instance, do_cast=True, clean_shutdown=True): LOG.debug("Going to try to stop instance", instance=instance) instance.task_state = task_states.POWERING_OFF instance.progress = 0 instance.save(expected_task_state=[None]) self._record_action_start(context, instance, instance_actions.STOP) self.compute_rpcapi.stop_instance(context, instance, do_cast=do_cast, clean_shutdown=clean_shutdown) @check_instance_lock @check_instance_host() @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.ERROR]) def stop(self, context, instance, do_cast=True, clean_shutdown=True): """Stop an instance.""" self.force_stop(context, instance, do_cast, clean_shutdown) @check_instance_lock @check_instance_host() @check_instance_state(vm_state=[vm_states.STOPPED]) def start(self, context, instance): """Start an instance.""" LOG.debug("Going to try to start instance", instance=instance) instance.task_state = task_states.POWERING_ON instance.save(expected_task_state=[None]) self._record_action_start(context, instance, instance_actions.START) self.compute_rpcapi.start_instance(context, instance) @check_instance_lock @check_instance_host() @check_instance_state(vm_state=vm_states.ALLOW_TRIGGER_CRASH_DUMP) def trigger_crash_dump(self, context, instance): """Trigger crash dump in an instance.""" LOG.debug("Try to trigger crash dump", instance=instance) self._record_action_start(context, instance, instance_actions.TRIGGER_CRASH_DUMP) self.compute_rpcapi.trigger_crash_dump(context, instance) def _generate_minimal_construct_for_down_cells(self, context, down_cell_uuids, project, limit): """Generate a list of minimal instance constructs for a given list of cells that did not respond to a list operation. This will list every instance mapping in the affected cells and return a minimal objects.Instance for each (non-queued-for-delete) mapping. :param context: RequestContext :param down_cell_uuids: A list of cell UUIDs that did not respond :param project: A project ID to filter mappings, or None :param limit: A numeric limit on the number of results, or None :returns: An InstanceList() of partial Instance() objects """ unavailable_servers = objects.InstanceList() for cell_uuid in down_cell_uuids: LOG.warning("Cell %s is not responding and hence only " "partial results are available from this " "cell if any.", cell_uuid) instance_mappings = (objects.InstanceMappingList. get_not_deleted_by_cell_and_project(context, cell_uuid, project, limit=limit)) for im in instance_mappings: unavailable_servers.objects.append( objects.Instance( context=context, uuid=im.instance_uuid, project_id=im.project_id, created_at=im.created_at ) ) if limit is not None: limit -= len(instance_mappings) if limit <= 0: break return unavailable_servers def _get_instance_map_or_none(self, context, instance_uuid): try: inst_map = objects.InstanceMapping.get_by_instance_uuid( context, instance_uuid) except exception.InstanceMappingNotFound: # InstanceMapping should always be found generally. This exception # may be raised if a deployment has partially migrated the nova-api # services. inst_map = None return inst_map @staticmethod def _save_user_id_in_instance_mapping(mapping, instance): # TODO(melwitt): We take the opportunity to migrate user_id on the # instance mapping if it's not yet been migrated. This can be removed # in a future release, when all migrations are complete. # If the instance came from a RequestSpec because of a down cell, its # user_id could be None and the InstanceMapping.user_id field is # non-nullable. Avoid trying to set/save the user_id in that case. if 'user_id' not in mapping and instance.user_id is not None: mapping.user_id = instance.user_id mapping.save() def _get_instance_from_cell(self, context, im, expected_attrs, cell_down_support): # NOTE(danms): Even though we're going to scatter/gather to the # right cell, other code depends on this being force targeted when # the get call returns. nova_context.set_target_cell(context, im.cell_mapping) uuid = im.instance_uuid result = nova_context.scatter_gather_single_cell(context, im.cell_mapping, objects.Instance.get_by_uuid, uuid, expected_attrs=expected_attrs) cell_uuid = im.cell_mapping.uuid if not nova_context.is_cell_failure_sentinel(result[cell_uuid]): inst = result[cell_uuid] self._save_user_id_in_instance_mapping(im, inst) return inst elif isinstance(result[cell_uuid], exception.InstanceNotFound): raise exception.InstanceNotFound(instance_id=uuid) elif cell_down_support: if im.queued_for_delete: # should be treated like deleted instance. raise exception.InstanceNotFound(instance_id=uuid) # instance in down cell, return a minimal construct LOG.warning("Cell %s is not responding and hence only " "partial results are available from this " "cell.", cell_uuid) try: rs = objects.RequestSpec.get_by_instance_uuid(context, uuid) # For BFV case, we could have rs.image but rs.image.id might # still not be set. So we check the existence of both image # and its id. image_ref = (rs.image.id if rs.image and 'id' in rs.image else None) inst = objects.Instance(context=context, power_state=0, uuid=uuid, project_id=im.project_id, created_at=im.created_at, user_id=rs.user_id, flavor=rs.flavor, image_ref=image_ref, availability_zone=rs.availability_zone) self._save_user_id_in_instance_mapping(im, inst) return inst except exception.RequestSpecNotFound: # could be that a deleted instance whose request # spec has been archived is being queried. raise exception.InstanceNotFound(instance_id=uuid) else: if isinstance(result[cell_uuid], exception.NovaException): LOG.exception(result[cell_uuid]) raise exception.NovaException( _("Cell %s is not responding or returned an exception, " "hence instance info is not available.") % cell_uuid) def _get_instance(self, context, instance_uuid, expected_attrs, cell_down_support=False): inst_map = self._get_instance_map_or_none(context, instance_uuid) if inst_map and (inst_map.cell_mapping is not None): instance = self._get_instance_from_cell(context, inst_map, expected_attrs, cell_down_support) elif inst_map and (inst_map.cell_mapping is None): # This means the instance has not been scheduled and put in # a cell yet. For now it also may mean that the deployer # has not created their cell(s) yet. try: build_req = objects.BuildRequest.get_by_instance_uuid( context, instance_uuid) instance = build_req.instance except exception.BuildRequestNotFound: # Instance was mapped and the BuildRequest was deleted # while fetching. Try again. inst_map = self._get_instance_map_or_none(context, instance_uuid) if inst_map and (inst_map.cell_mapping is not None): instance = self._get_instance_from_cell(context, inst_map, expected_attrs, cell_down_support) else: raise exception.InstanceNotFound(instance_id=instance_uuid) else: # If we got here, we don't have an instance mapping, but we aren't # sure why. The instance mapping might be missing because the # upgrade is incomplete (map_instances wasn't run). Or because the # instance was deleted and the DB was archived at which point the # mapping is deleted. The former case is bad, but because of the # latter case we can't really log any kind of warning/error here # since it might be normal. raise exception.InstanceNotFound(instance_id=instance_uuid) return instance def get(self, context, instance_id, expected_attrs=None, cell_down_support=False): """Get a single instance with the given instance_id. :param cell_down_support: True if the API (and caller) support returning a minimal instance construct if the relevant cell is down. If False, an error is raised since the instance cannot be retrieved due to the cell being down. """ if not expected_attrs: expected_attrs = [] expected_attrs.extend(['metadata', 'system_metadata', 'security_groups', 'info_cache']) # NOTE(ameade): we still need to support integer ids for ec2 try: if uuidutils.is_uuid_like(instance_id): LOG.debug("Fetching instance by UUID", instance_uuid=instance_id) instance = self._get_instance(context, instance_id, expected_attrs, cell_down_support=cell_down_support) else: LOG.debug("Failed to fetch instance by id %s", instance_id) raise exception.InstanceNotFound(instance_id=instance_id) except exception.InvalidID: LOG.debug("Invalid instance id %s", instance_id) raise exception.InstanceNotFound(instance_id=instance_id) return instance def get_all(self, context, search_opts=None, limit=None, marker=None, expected_attrs=None, sort_keys=None, sort_dirs=None, cell_down_support=False, all_tenants=False): """Get all instances filtered by one of the given parameters. If there is no filter and the context is an admin, it will retrieve all instances in the system. Deleted instances will be returned by default, unless there is a search option that says otherwise. The results will be sorted based on the list of sort keys in the 'sort_keys' parameter (first value is primary sort key, second value is secondary sort key, etc.). For each sort key, the associated sort direction is based on the list of sort directions in the 'sort_dirs' parameter. :param cell_down_support: True if the API (and caller) support returning a minimal instance construct if the relevant cell is down. If False, instances from unreachable cells will be omitted. :param all_tenants: True if the "all_tenants" filter was passed. """ if search_opts is None: search_opts = {} LOG.debug("Searching by: %s", str(search_opts)) # Fixups for the DB call filters = {} def _remap_flavor_filter(flavor_id): flavor = objects.Flavor.get_by_flavor_id(context, flavor_id) filters['instance_type_id'] = flavor.id def _remap_fixed_ip_filter(fixed_ip): # Turn fixed_ip into a regexp match. Since '.' matches # any character, we need to use regexp escaping for it. filters['ip'] = '^%s$' % fixed_ip.replace('.', '\\.') # search_option to filter_name mapping. filter_mapping = { 'image': 'image_ref', 'name': 'display_name', 'tenant_id': 'project_id', 'flavor': _remap_flavor_filter, 'fixed_ip': _remap_fixed_ip_filter} # copy from search_opts, doing various remappings as necessary for opt, value in search_opts.items(): # Do remappings. # Values not in the filter_mapping table are copied as-is. # If remapping is None, option is not copied # If the remapping is a string, it is the filter_name to use try: remap_object = filter_mapping[opt] except KeyError: filters[opt] = value else: # Remaps are strings to translate to, or functions to call # to do the translating as defined by the table above. if isinstance(remap_object, str): filters[remap_object] = value else: try: remap_object(value) # We already know we can't match the filter, so # return an empty list except ValueError: return objects.InstanceList() # IP address filtering cannot be applied at the DB layer, remove any DB # limit so that it can be applied after the IP filter. filter_ip = 'ip6' in filters or 'ip' in filters skip_build_request = False orig_limit = limit if filter_ip: # We cannot skip build requests if there is a marker since the # the marker could be a build request. skip_build_request = marker is None if self.network_api.has_substr_port_filtering_extension(context): # We're going to filter by IP using Neutron so set filter_ip # to False so we don't attempt post-DB query filtering in # memory below. filter_ip = False instance_uuids = self._ip_filter_using_neutron(context, filters) if instance_uuids: # Note that 'uuid' is not in the 2.1 GET /servers query # parameter schema, however, we allow additionalProperties # so someone could filter instances by uuid, which doesn't # make a lot of sense but we have to account for it. if 'uuid' in filters and filters['uuid']: filter_uuids = filters['uuid'] if isinstance(filter_uuids, list): instance_uuids.extend(filter_uuids) else: # Assume a string. If it's a dict or tuple or # something, well...that's too bad. This is why # we have query parameter schema definitions. if filter_uuids not in instance_uuids: instance_uuids.append(filter_uuids) filters['uuid'] = instance_uuids else: # No matches on the ip filter(s), return an empty list. return objects.InstanceList() elif limit: LOG.debug('Removing limit for DB query due to IP filter') limit = None # Skip get BuildRequest if filtering by IP address, as building # instances will not have IP addresses. if skip_build_request: build_requests = objects.BuildRequestList() else: # The ordering of instances will be # [sorted instances with no host] + [sorted instances with host]. # This means BuildRequest and cell0 instances first, then cell # instances try: build_requests = objects.BuildRequestList.get_by_filters( context, filters, limit=limit, marker=marker, sort_keys=sort_keys, sort_dirs=sort_dirs) # If we found the marker in we need to set it to None # so we don't expect to find it in the cells below. marker = None except exception.MarkerNotFound: # If we didn't find the marker in the build requests then keep # looking for it in the cells. build_requests = objects.BuildRequestList() build_req_instances = objects.InstanceList( objects=[build_req.instance for build_req in build_requests]) # Only subtract from limit if it is not None limit = (limit - len(build_req_instances)) if limit else limit # We could arguably avoid joining on security_groups if we're using # neutron (which is the default) but if you're using neutron then the # security_group_instance_association table should be empty anyway # and the DB should optimize out that join, making it insignificant. fields = ['metadata', 'info_cache', 'security_groups'] if expected_attrs: fields.extend(expected_attrs) insts, down_cell_uuids = instance_list.get_instance_objects_sorted( context, filters, limit, marker, fields, sort_keys, sort_dirs, cell_down_support=cell_down_support) def _get_unique_filter_method(): seen_uuids = set() def _filter(instance): # During a cross-cell move operation we could have the instance # in more than one cell database so we not only have to filter # duplicates but we want to make sure we only return the # "current" one which should also be the one that the instance # mapping points to, but we don't want to do that expensive # lookup here. The DB API will filter out hidden instances by # default but there is a small window where two copies of an # instance could be hidden=False in separate cell DBs. # NOTE(mriedem): We could make this better in the case that we # have duplicate instances that are both hidden=False by # showing the one with the newer updated_at value, but that # could be tricky if the user is filtering on # changes-since/before or updated_at, or sorting on updated_at, # but technically that was already potentially broken with this # _filter method if we return an older BuildRequest.instance, # and given the window should be very small where we have # duplicates, it's probably not worth the complexity. if instance.uuid in seen_uuids: return False seen_uuids.add(instance.uuid) return True return _filter filter_method = _get_unique_filter_method() # Only subtract from limit if it is not None limit = (limit - len(insts)) if limit else limit # TODO(alaski): Clean up the objects concatenation when List objects # support it natively. instances = objects.InstanceList( objects=list(filter(filter_method, build_req_instances.objects + insts.objects))) if filter_ip: instances = self._ip_filter(instances, filters, orig_limit) if cell_down_support: # API and client want minimal construct instances for any cells # that didn't return, so generate and prefix those to the actual # results. project = search_opts.get('project_id', context.project_id) if all_tenants: # NOTE(tssurya): The only scenario where project has to be None # is when using "all_tenants" in which case we do not want # the query to be restricted based on the project_id. project = None limit = (orig_limit - len(instances)) if limit else limit return (self._generate_minimal_construct_for_down_cells(context, down_cell_uuids, project, limit) + instances) return instances @staticmethod def _ip_filter(inst_models, filters, limit): ipv4_f = re.compile(str(filters.get('ip'))) ipv6_f = re.compile(str(filters.get('ip6'))) def _match_instance(instance): nw_info = instance.get_network_info() for vif in nw_info: for fixed_ip in vif.fixed_ips(): address = fixed_ip.get('address') if not address: continue version = fixed_ip.get('version') if ((version == 4 and ipv4_f.match(address)) or (version == 6 and ipv6_f.match(address))): return True return False result_objs = [] for instance in inst_models: if _match_instance(instance): result_objs.append(instance) if limit and len(result_objs) == limit: break return objects.InstanceList(objects=result_objs) def _ip_filter_using_neutron(self, context, filters): ip4_address = filters.get('ip') ip6_address = filters.get('ip6') addresses = [ip4_address, ip6_address] uuids = [] for address in addresses: if address: try: ports = self.network_api.list_ports( context, fixed_ips='ip_address_substr=' + address, fields=['device_id'])['ports'] for port in ports: uuids.append(port['device_id']) except Exception as e: LOG.error('An error occurred while listing ports ' 'with an ip_address filter value of "%s". ' 'Error: %s', address, str(e)) return uuids def update_instance(self, context, instance, updates): """Updates a single Instance object with some updates dict. Returns the updated instance. """ # NOTE(sbauza): Given we only persist the Instance object after we # create the BuildRequest, we are sure that if the Instance object # has an ID field set, then it was persisted in the right Cell DB. if instance.obj_attr_is_set('id'): instance.update(updates) instance.save() else: # Instance is not yet mapped to a cell, so we need to update # BuildRequest instead # TODO(sbauza): Fix the possible race conditions where BuildRequest # could be deleted because of either a concurrent instance delete # or because the scheduler just returned a destination right # after we called the instance in the API. try: build_req = objects.BuildRequest.get_by_instance_uuid( context, instance.uuid) instance = build_req.instance instance.update(updates) # FIXME(sbauza): Here we are updating the current # thread-related BuildRequest object. Given that another worker # could have looking up at that BuildRequest in the API, it # means that it could pass it down to the conductor without # making sure that it's not updated, we could have some race # condition where it would missing the updated fields, but # that's something we could discuss once the instance record # is persisted by the conductor. build_req.save() except exception.BuildRequestNotFound: # Instance was mapped and the BuildRequest was deleted # while fetching (and possibly the instance could have been # deleted as well). We need to lookup again the Instance object # in order to correctly update it. # TODO(sbauza): Figure out a good way to know the expected # attributes by checking which fields are set or not. expected_attrs = ['flavor', 'pci_devices', 'numa_topology', 'tags', 'metadata', 'system_metadata', 'security_groups', 'info_cache'] inst_map = self._get_instance_map_or_none(context, instance.uuid) if inst_map and (inst_map.cell_mapping is not None): with nova_context.target_cell( context, inst_map.cell_mapping) as cctxt: instance = objects.Instance.get_by_uuid( cctxt, instance.uuid, expected_attrs=expected_attrs) instance.update(updates) instance.save() else: # Conductor doesn't delete the BuildRequest until after the # InstanceMapping record is created, so if we didn't get # that and the BuildRequest doesn't exist, then the # instance is already gone and we need to just error out. raise exception.InstanceNotFound(instance_id=instance.uuid) return instance # NOTE(melwitt): We don't check instance lock for backup because lock is # intended to prevent accidental change/delete of instances @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED, vm_states.PAUSED, vm_states.SUSPENDED]) def backup(self, context, instance, name, backup_type, rotation, extra_properties=None): """Backup the given instance :param instance: nova.objects.instance.Instance object :param name: name of the backup :param backup_type: 'daily' or 'weekly' :param rotation: int representing how many backups to keep around; None if rotation shouldn't be used (as in the case of snapshots) :param extra_properties: dict of extra image properties to include when creating the image. :returns: A dict containing image metadata """ props_copy = dict(extra_properties, backup_type=backup_type) if compute_utils.is_volume_backed_instance(context, instance): LOG.info("It's not supported to backup volume backed " "instance.", instance=instance) raise exception.InvalidRequest( _('Backup is not supported for volume-backed instances.')) else: image_meta = compute_utils.create_image( context, instance, name, 'backup', self.image_api, extra_properties=props_copy) instance.task_state = task_states.IMAGE_BACKUP instance.save(expected_task_state=[None]) self._record_action_start(context, instance, instance_actions.BACKUP) self.compute_rpcapi.backup_instance(context, instance, image_meta['id'], backup_type, rotation) return image_meta # NOTE(melwitt): We don't check instance lock for snapshot because lock is # intended to prevent accidental change/delete of instances @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED, vm_states.PAUSED, vm_states.SUSPENDED]) def snapshot(self, context, instance, name, extra_properties=None): """Snapshot the given instance. :param instance: nova.objects.instance.Instance object :param name: name of the snapshot :param extra_properties: dict of extra image properties to include when creating the image. :returns: A dict containing image metadata """ image_meta = compute_utils.create_image( context, instance, name, 'snapshot', self.image_api, extra_properties=extra_properties) instance.task_state = task_states.IMAGE_SNAPSHOT_PENDING try: instance.save(expected_task_state=[None]) except (exception.InstanceNotFound, exception.UnexpectedDeletingTaskStateError) as ex: # Changing the instance task state to use in raising the # InstanceInvalidException below LOG.debug('Instance disappeared during snapshot.', instance=instance) try: image_id = image_meta['id'] self.image_api.delete(context, image_id) LOG.info('Image %s deleted because instance ' 'deleted before snapshot started.', image_id, instance=instance) except exception.ImageNotFound: pass except Exception as exc: LOG.warning("Error while trying to clean up image %(img_id)s: " "%(error_msg)s", {"img_id": image_meta['id'], "error_msg": str(exc)}) attr = 'task_state' state = task_states.DELETING if type(ex) is exception.InstanceNotFound: attr = 'vm_state' state = vm_states.DELETED raise exception.InstanceInvalidState(attr=attr, instance_uuid=instance.uuid, state=state, method='snapshot') self._record_action_start(context, instance, instance_actions.CREATE_IMAGE) self.compute_rpcapi.snapshot_instance(context, instance, image_meta['id']) return image_meta # NOTE(melwitt): We don't check instance lock for snapshot because lock is # intended to prevent accidental change/delete of instances @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED, vm_states.PAUSED, vm_states.SUSPENDED]) def snapshot_volume_backed(self, context, instance, name, extra_properties=None): """Snapshot the given volume-backed instance. :param instance: nova.objects.instance.Instance object :param name: name of the backup or snapshot :param extra_properties: dict of extra image properties to include :returns: the new image metadata """ image_meta = compute_utils.initialize_instance_snapshot_metadata( context, instance, name, extra_properties) # the new image is simply a bucket of properties (particularly the # block device mapping, kernel and ramdisk IDs) with no image data, # hence the zero size image_meta['size'] = 0 for attr in ('container_format', 'disk_format'): image_meta.pop(attr, None) properties = image_meta['properties'] # clean properties before filling for key in ('block_device_mapping', 'bdm_v2', 'root_device_name'): properties.pop(key, None) if instance.root_device_name: properties['root_device_name'] = instance.root_device_name bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) mapping = [] # list of BDM dicts that can go into the image properties # Do some up-front filtering of the list of BDMs from # which we are going to create snapshots. volume_bdms = [] for bdm in bdms: if bdm.no_device: continue if bdm.is_volume: # These will be handled below. volume_bdms.append(bdm) else: mapping.append(bdm.get_image_mapping()) # Check limits in Cinder before creating snapshots to avoid going over # quota in the middle of a list of volumes. This is a best-effort check # but concurrently running snapshot requests from the same project # could still fail to create volume snapshots if they go over limit. if volume_bdms: limits = self.volume_api.get_absolute_limits(context) total_snapshots_used = limits['totalSnapshotsUsed'] max_snapshots = limits['maxTotalSnapshots'] # -1 means there is unlimited quota for snapshots if (max_snapshots > -1 and len(volume_bdms) + total_snapshots_used > max_snapshots): LOG.debug('Unable to create volume snapshots for instance. ' 'Currently has %s snapshots, requesting %s new ' 'snapshots, with a limit of %s.', total_snapshots_used, len(volume_bdms), max_snapshots, instance=instance) raise exception.OverQuota(overs='snapshots') quiesced = False if instance.vm_state == vm_states.ACTIVE: try: LOG.info("Attempting to quiesce instance before volume " "snapshot.", instance=instance) self.compute_rpcapi.quiesce_instance(context, instance) quiesced = True except (exception.InstanceQuiesceNotSupported, exception.QemuGuestAgentNotEnabled, exception.NovaException, NotImplementedError) as err: if strutils.bool_from_string(instance.system_metadata.get( 'image_os_require_quiesce')): raise if isinstance(err, exception.NovaException): LOG.info('Skipping quiescing instance: %(reason)s.', {'reason': err.format_message()}, instance=instance) else: LOG.info('Skipping quiescing instance because the ' 'operation is not supported by the underlying ' 'compute driver.', instance=instance) # NOTE(tasker): discovered that an uncaught exception could occur # after the instance has been frozen. catch and thaw. except Exception as ex: with excutils.save_and_reraise_exception(): LOG.error("An error occurred during quiesce of instance. " "Unquiescing to ensure instance is thawed. " "Error: %s", str(ex), instance=instance) self.compute_rpcapi.unquiesce_instance(context, instance, mapping=None) @wrap_instance_event(prefix='api') def snapshot_instance(self, context, instance, bdms): try: for bdm in volume_bdms: # create snapshot based on volume_id volume = self.volume_api.get(context, bdm.volume_id) # NOTE(yamahata): Should we wait for snapshot creation? # Linux LVM snapshot creation completes in # short time, it doesn't matter for now. name = _('snapshot for %s') % image_meta['name'] LOG.debug('Creating snapshot from volume %s.', volume['id'], instance=instance) snapshot = self.volume_api.create_snapshot_force( context, volume['id'], name, volume['display_description']) mapping_dict = block_device.snapshot_from_bdm( snapshot['id'], bdm) mapping_dict = mapping_dict.get_image_mapping() mapping.append(mapping_dict) return mapping # NOTE(tasker): No error handling is done in the above for loop. # This means that if the snapshot fails and throws an exception # the traceback will skip right over the unquiesce needed below. # Here, catch any exception, unquiesce the instance, and raise the # error so that the calling function can do what it needs to in # order to properly treat a failed snap. except Exception: with excutils.save_and_reraise_exception(): if quiesced: LOG.info("Unquiescing instance after volume snapshot " "failure.", instance=instance) self.compute_rpcapi.unquiesce_instance( context, instance, mapping) self._record_action_start(context, instance, instance_actions.CREATE_IMAGE) mapping = snapshot_instance(self, context, instance, bdms) if quiesced: self.compute_rpcapi.unquiesce_instance(context, instance, mapping) if mapping: properties['block_device_mapping'] = mapping properties['bdm_v2'] = True return self.image_api.create(context, image_meta) @check_instance_lock def reboot(self, context, instance, reboot_type): """Reboot the given instance.""" if reboot_type == 'SOFT': self._soft_reboot(context, instance) else: self._hard_reboot(context, instance) @check_instance_state(vm_state=set(vm_states.ALLOW_SOFT_REBOOT), task_state=[None]) def _soft_reboot(self, context, instance): expected_task_state = [None] instance.task_state = task_states.REBOOTING instance.save(expected_task_state=expected_task_state) self._record_action_start(context, instance, instance_actions.REBOOT) self.compute_rpcapi.reboot_instance(context, instance=instance, block_device_info=None, reboot_type='SOFT') @check_instance_state(vm_state=set(vm_states.ALLOW_HARD_REBOOT), task_state=task_states.ALLOW_REBOOT) def _hard_reboot(self, context, instance): instance.task_state = task_states.REBOOTING_HARD instance.save(expected_task_state=task_states.ALLOW_REBOOT) self._record_action_start(context, instance, instance_actions.REBOOT) self.compute_rpcapi.reboot_instance(context, instance=instance, block_device_info=None, reboot_type='HARD') def _check_image_arch(self, image=None): if image: img_arch = image.get("properties", {}).get('hw_architecture') if img_arch: fields_obj.Architecture.canonicalize(img_arch) @block_shares_not_supported() @reject_vtpm_instances(instance_actions.REBUILD) @block_accelerators(until_service=SUPPORT_ACCELERATOR_SERVICE_FOR_REBUILD) # TODO(stephenfin): We should expand kwargs out to named args @check_instance_lock @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED, vm_states.ERROR]) def rebuild(self, context, instance, image_href, admin_password, files_to_inject=None, reimage_boot_volume=False, **kwargs): """Rebuild the given instance with the provided attributes.""" files_to_inject = files_to_inject or [] metadata = kwargs.get('metadata', {}) preserve_ephemeral = kwargs.get('preserve_ephemeral', False) auto_disk_config = kwargs.get('auto_disk_config') if 'key_name' in kwargs: key_name = kwargs.pop('key_name') if key_name: # NOTE(liuyulong): we are intentionally using the user_id from # the request context rather than the instance.user_id because # users own keys but instances are owned by projects, and # another user in the same project can rebuild an instance # even if they didn't create it. key_pair = objects.KeyPair.get_by_name(context, context.user_id, key_name) instance.key_name = key_pair.name instance.key_data = key_pair.public_key instance.keypairs = objects.KeyPairList(objects=[key_pair]) else: instance.key_name = None instance.key_data = None instance.keypairs = objects.KeyPairList(objects=[]) # Only lookup the minimum compute version once min_comp_ver = objects.service.get_minimum_version_all_cells( context, ["nova-compute"]) # Use trusted_certs value from kwargs to create TrustedCerts object trusted_certs = None if 'trusted_certs' in kwargs: # Note that the user can set, change, or unset / reset trusted # certs. If they are explicitly specifying # trusted_image_certificates=None, that means we'll either unset # them on the instance *or* reset to use the defaults (if defaults # are configured). trusted_certs = kwargs.pop('trusted_certs') instance.trusted_certs = self._retrieve_trusted_certs_object( context, trusted_certs, rebuild=True) if 'hostname' in kwargs: instance.hostname = kwargs.pop('hostname') image_id, image = self._get_image(context, image_href) self._check_auto_disk_config(image=image, auto_disk_config=auto_disk_config) self._check_image_arch(image=image) flavor = instance.get_flavor() bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) root_bdm = compute_utils.get_root_bdm(context, instance, bdms) # Check to see if the image is changing and we have a volume-backed # server. The compute doesn't support changing the image in the # root disk of a volume-backed server, so we need to just fail fast. is_volume_backed = compute_utils.is_volume_backed_instance( context, instance, bdms) if is_volume_backed: if trusted_certs: # The only way we can get here is if the user tried to set # trusted certs or specified trusted_image_certificates=None # and default_trusted_certificate_ids is configured. msg = _("Image certificate validation is not supported " "for volume-backed servers.") raise exception.CertificateValidationFailed(message=msg) # For boot from volume, instance.image_ref is empty, so we need to # query the image from the volume. if root_bdm is None: # This shouldn't happen and is an error, we need to fail. This # is not the users fault, it's an internal error. Without a # root BDM we have no way of knowing the backing volume (or # image in that volume) for this instance. raise exception.NovaException( _('Unable to find root block device mapping for ' 'volume-backed instance.')) volume = self.volume_api.get(context, root_bdm.volume_id) volume_image_metadata = volume.get('volume_image_metadata', {}) orig_image_ref = volume_image_metadata.get('image_id') if orig_image_ref != image_href: if not reimage_boot_volume: # Leave a breadcrumb. LOG.debug('Requested to rebuild instance with a new image ' '%s for a volume-backed server with image %s in ' 'its root volume which is not supported.', image_href, orig_image_ref, instance=instance) msg = _('Unable to rebuild with a different image for a ' 'volume-backed server.') raise exception.ImageUnacceptable( image_id=image_href, reason=msg) else: orig_image_ref = instance.image_ref request_spec = objects.RequestSpec.get_by_instance_uuid( context, instance.uuid) self._checks_for_create_and_rebuild(context, image_id, image, flavor, metadata, files_to_inject, root_bdm, min_comp_ver) # Check the state of the volume. If it is not in-use, an exception # will occur when creating attachment during reconstruction, # resulting in the failure of reconstruction and the instance # turning into an error state. self._check_volume_status(context, bdms) # NOTE(sean-k-mooney): When we rebuild with a new image we need to # validate that the NUMA topology does not change as we do a NOOP claim # in resource tracker. As such we cannot allow the resource usage or # assignment to change as a result of a new image altering the # numa constraints. if orig_image_ref != image_href: self._validate_numa_rebuild(instance, image, flavor) kernel_id, ramdisk_id = self._handle_kernel_and_ramdisk( context, None, None, image) def _reset_image_metadata(): """Remove old image properties that we're storing as instance system metadata. These properties start with 'image_'. Then add the properties for the new image. """ # FIXME(comstud): There's a race condition here in that if # the system_metadata for this instance is updated after # we do the previous save() and before we update.. those # other updates will be lost. Since this problem exists in # a lot of other places, I think it should be addressed in # a DB layer overhaul. orig_sys_metadata = dict(instance.system_metadata) # Remove the old keys for key in list(instance.system_metadata.keys()): if key.startswith(utils.SM_IMAGE_PROP_PREFIX): del instance.system_metadata[key] # Add the new ones new_sys_metadata = utils.get_system_metadata_from_image( image, flavor) new_sys_metadata.update({'image_base_image_ref': image_id}) instance.system_metadata.update(new_sys_metadata) instance.save() return orig_sys_metadata # Since image might have changed, we may have new values for # os_type, vm_mode, etc options_from_image = self._inherit_properties_from_image( image, auto_disk_config) instance.update(options_from_image) instance.task_state = task_states.REBUILDING # An empty instance.image_ref is currently used as an indication # of BFV. Preserve that over a rebuild to not break users. if not is_volume_backed: instance.image_ref = image_href instance.kernel_id = kernel_id or "" instance.ramdisk_id = ramdisk_id or "" instance.progress = 0 instance.update(kwargs) instance.save(expected_task_state=[None]) # On a rebuild, since we're potentially changing images, we need to # wipe out the old image properties that we're storing as instance # system metadata... and copy in the properties for the new image. orig_sys_metadata = _reset_image_metadata() self._record_action_start(context, instance, instance_actions.REBUILD) # NOTE(sbauza): The migration script we provided in Newton should make # sure that all our instances are currently migrated to have an # attached RequestSpec object but let's consider that the operator only # half migrated all their instances in the meantime. host = instance.host # If a new image is provided on rebuild, we will need to run # through the scheduler again, but we want the instance to be # rebuilt on the same host it's already on. if orig_image_ref != image_href: # We have to modify the request spec that goes to the scheduler # to contain the new image. We persist this since we've already # changed the instance.image_ref above so we're being # consistent. request_spec.image = objects.ImageMeta.from_dict(image) request_spec.save() if 'scheduler_hints' not in request_spec: request_spec.scheduler_hints = {} # Nuke the id on this so we can't accidentally save # this hint hack later del request_spec.id # NOTE(danms): Passing host=None tells conductor to # call the scheduler. The _nova_check_type hint # requires that the scheduler returns only the same # host that we are currently on and only checks # rebuild-related filters. request_spec.scheduler_hints['_nova_check_type'] = ['rebuild'] request_spec.force_hosts = [instance.host] request_spec.force_nodes = [instance.node] host = None self.compute_task_api.rebuild_instance(context, instance=instance, new_pass=admin_password, injected_files=files_to_inject, image_ref=image_href, orig_image_ref=orig_image_ref, orig_sys_metadata=orig_sys_metadata, bdms=bdms, preserve_ephemeral=preserve_ephemeral, host=host, request_spec=request_spec, reimage_boot_volume=reimage_boot_volume, target_state=None) def _check_volume_status(self, context, bdms): """Check whether the status of the volume is "in-use". :param context: A context.RequestContext :param bdms: BlockDeviceMappingList of BDMs for the instance """ for bdm in bdms: if bdm.volume_id: vol = self.volume_api.get(context, bdm.volume_id) self.volume_api.check_attached(context, vol) @staticmethod def _validate_numa_rebuild(instance, image, flavor): """validates that the NUMA constraints do not change on rebuild. :param instance: nova.objects.instance.Instance object :param image: the new image the instance will be rebuilt with. :param flavor: the flavor of the current instance. :raises: nova.exception.ImageNUMATopologyRebuildConflict """ # NOTE(sean-k-mooney): currently it is not possible to express # a PCI NUMA affinity policy via flavor or image but that will # change in the future. we pull out the image metadata into # separate variable to make future testing of this easier. old_image_meta = instance.image_meta new_image_meta = objects.ImageMeta.from_dict(image) old_constraints = hardware.numa_get_constraints(flavor, old_image_meta) new_constraints = hardware.numa_get_constraints(flavor, new_image_meta) # early out for non NUMA instances if old_constraints is None and new_constraints is None: return # if only one of the constraints are non-None (or 'set') then the # constraints changed so raise an exception. if old_constraints is None or new_constraints is None: action = "removing" if old_constraints else "introducing" LOG.debug("NUMA rebuild validation failed. The requested image " "would alter the NUMA constraints by %s a NUMA " "topology.", action, instance=instance) raise exception.ImageNUMATopologyRebuildConflict() # otherwise since both the old a new constraints are non none compare # them as dictionaries. old = old_constraints.obj_to_primitive() new = new_constraints.obj_to_primitive() if old != new: LOG.debug("NUMA rebuild validation failed. The requested image " "conflicts with the existing NUMA constraints.", instance=instance) raise exception.ImageNUMATopologyRebuildConflict() # TODO(sean-k-mooney): add PCI NUMA affinity policy check. @staticmethod def _check_quota_for_upsize(context, instance, current_flavor, new_flavor, is_bfv, is_revert): project_id, user_id = quotas_obj.ids_from_instance(context, instance) # NOTE(johngarbutt) for resize, check for sum of existing usage # plus the usage from new flavor, as it will be claimed in # placement that way, even if there is no change in flavor # But for revert resize, we are just removing claims in placement # so we can ignore the quota check if not is_revert: placement_limits.enforce_num_instances_and_flavor(context, project_id, new_flavor, is_bfv, 1, 1) # Old quota system only looks at the change in size. # Deltas will be empty if the resize is not an upsize. deltas = compute_utils.upsize_quota_delta(new_flavor, current_flavor) if deltas: try: res_deltas = {'cores': deltas.get('cores', 0), 'ram': deltas.get('ram', 0)} objects.Quotas.check_deltas(context, res_deltas, project_id, user_id=user_id, check_project_id=project_id, check_user_id=user_id) except exception.OverQuota as exc: quotas = exc.kwargs['quotas'] overs = exc.kwargs['overs'] usages = exc.kwargs['usages'] headroom = compute_utils.get_headroom(quotas, usages, deltas) (overs, reqs, total_alloweds, useds) = compute_utils.get_over_quota_detail(headroom, overs, quotas, deltas) LOG.info("%(overs)s quota exceeded for %(pid)s," " tried to resize instance.", {'overs': overs, 'pid': context.project_id}) raise exception.TooManyInstances(overs=overs, req=reqs, used=useds, allowed=total_alloweds) @check_instance_lock @check_instance_state(vm_state=[vm_states.RESIZED]) def revert_resize(self, context, instance): """Reverts a resize or cold migration, deleting the 'new' instance in the process. """ elevated = context.elevated() migration = objects.Migration.get_by_instance_and_status( elevated, instance.uuid, 'finished') # If this is a resize down, a revert might go over quota. reqspec = objects.RequestSpec.get_by_instance_uuid( context, instance.uuid) self._check_quota_for_upsize(context, instance, instance.flavor, instance.old_flavor, reqspec.is_bfv, is_revert=True) # The AZ for the server may have changed when it was migrated so while # we are in the API and have access to the API DB, update the # instance.availability_zone before casting off to the compute service. # Note that we do this in the API to avoid an "up-call" from the # compute service to the API DB. This is not great in case something # fails during revert before the instance.host is updated to the # original source host, but it is good enough for now. Long-term we # could consider passing the AZ down to compute so it can set it when # the instance.host value is set in finish_revert_resize. instance.availability_zone = ( availability_zones.get_host_availability_zone( context, migration.source_compute)) # If this was a resize, the conductor may have updated the # RequestSpec.flavor field (to point at the new flavor) and the # RequestSpec.numa_topology field (to reflect the new flavor's extra # specs) during the initial resize operation, so we need to update the # RequestSpec to point back at the original flavor and reflect the NUMA # settings of this flavor, otherwise subsequent move operations through # the scheduler will be using the wrong values. There's no need to do # this if the flavor hasn't changed though and we're migrating rather # than resizing. if reqspec.flavor['id'] != instance.old_flavor['id']: reqspec.flavor = instance.old_flavor reqspec.numa_topology = hardware.numa_get_constraints( instance.old_flavor, instance.image_meta) reqspec.save() # NOTE(gibi): This is a performance optimization. If the network info # cache does not have ports with allocations in the binding profile # then we can skip reading port resource request from neutron below. # If a port has resource request then that would have already caused # that the finish_resize call put allocation in the binding profile # during the resize. if instance.get_network_info().has_port_with_allocation(): # TODO(gibi): do not directly overwrite the # RequestSpec.requested_resources and # RequestSpec.request_level_paramsas others like cyborg might added # to things there already # NOTE(gibi): We need to collect the requested resource again as it # is intentionally not persisted in nova. Note that this needs to # be done here as the nova API code directly calls revert on the # dest compute service skipping the conductor. port_res_req, req_lvl_params = ( self.network_api.get_requested_resource_for_instance( context, instance.uuid)) reqspec.requested_resources = port_res_req reqspec.request_level_params = req_lvl_params instance.task_state = task_states.RESIZE_REVERTING instance.save(expected_task_state=[None]) migration.status = 'reverting' migration.save() self._record_action_start(context, instance, instance_actions.REVERT_RESIZE) if migration.cross_cell_move: # RPC cast to conductor to orchestrate the revert of the cross-cell # resize. self.compute_task_api.revert_snapshot_based_resize( context, instance, migration) else: # TODO(melwitt): We're not rechecking for strict quota here to # guard against going over quota during a race at this time because # the resource consumption for this operation is written to the # database by compute. self.compute_rpcapi.revert_resize(context, instance, migration, migration.dest_compute, reqspec) @staticmethod def _get_source_compute_service(context, migration): """Find the source compute Service object given the Migration. :param context: nova auth RequestContext target at the destination compute cell :param migration: Migration object for the move operation :return: Service object representing the source host nova-compute """ if migration.cross_cell_move: # The source compute could be in another cell so look up the # HostMapping to determine the source cell. hm = objects.HostMapping.get_by_host( context, migration.source_compute) with nova_context.target_cell(context, hm.cell_mapping) as cctxt: return objects.Service.get_by_compute_host( cctxt, migration.source_compute) # Same-cell migration so just use the context we have. return objects.Service.get_by_compute_host( context, migration.source_compute) @check_instance_lock @check_instance_state(vm_state=[vm_states.RESIZED]) def confirm_resize(self, context, instance, migration=None): """Confirms a migration/resize and deletes the 'old' instance. :param context: nova auth RequestContext :param instance: Instance object to confirm the resize :param migration: Migration object; provided if called from the _poll_unconfirmed_resizes periodic task on the dest compute. :raises: MigrationNotFound if migration is not provided and a migration cannot be found for the instance with status "finished". :raises: ServiceUnavailable if the source compute service is down. """ elevated = context.elevated() # NOTE(melwitt): We're not checking quota here because there isn't a # change in resource usage when confirming a resize. Resource # consumption for resizes are written to the database by compute, so # a confirm resize is just a clean up of the migration objects and a # state change in compute. if migration is None: migration = objects.Migration.get_by_instance_and_status( elevated, instance.uuid, 'finished') # Check if the source compute service is up before modifying the # migration record because once we do we cannot come back through this # method since it will be looking for a "finished" status migration. source_svc = self._get_source_compute_service(context, migration) if not self.servicegroup_api.service_is_up(source_svc): raise exception.ServiceUnavailable() migration.status = 'confirming' migration.save() self._record_action_start(context, instance, instance_actions.CONFIRM_RESIZE) # Check to see if this was a cross-cell resize, in which case the # resized instance is in the target cell (the migration and instance # came from the target cell DB in this case), and we need to cleanup # the source host and source cell database records. if migration.cross_cell_move: self.compute_task_api.confirm_snapshot_based_resize( context, instance, migration) else: # It's a traditional resize within a single cell, so RPC cast to # the source compute host to cleanup the host since the instance # is already on the target host. self.compute_rpcapi.confirm_resize(context, instance, migration, migration.source_compute) def _allow_cross_cell_resize(self, context, instance, min_comp_ver): """Determine if the request can perform a cross-cell resize on this instance. :param context: nova auth request context for the resize operation :param instance: Instance object being resized :returns: True if cross-cell resize is allowed, False otherwise """ # First check to see if the requesting project/user is allowed by # policy to perform cross-cell resize. allowed = context.can( servers_policies.CROSS_CELL_RESIZE, target={'user_id': instance.user_id, 'project_id': instance.project_id}, fatal=False) # If the user is allowed by policy, check to make sure the deployment # is upgraded to the point of supporting cross-cell resize on all # compute services. if allowed: # TODO(mriedem): We can remove this minimum compute version check # in the 22.0.0 "V" release. if min_comp_ver < MIN_COMPUTE_CROSS_CELL_RESIZE: LOG.debug('Request is allowed by policy to perform cross-cell ' 'resize but the minimum nova-compute service ' 'version in the deployment %s is less than %s so ' 'cross-cell resize is not allowed at this time.', min_comp_ver, MIN_COMPUTE_CROSS_CELL_RESIZE) return False res_req, req_lvl_params = ( self.network_api.get_requested_resource_for_instance( context, instance.uuid) ) if res_req: LOG.info( 'Request is allowed by policy to perform cross-cell ' 'resize but the instance has ports with resource request ' 'and cross-cell resize is not supported with such ports.', instance=instance) return False return allowed @staticmethod def _validate_host_for_cold_migrate( context, instance, host_name, allow_cross_cell_resize): """Validates a host specified for cold migration. :param context: nova auth request context for the cold migration :param instance: Instance object being cold migrated :param host_name: User-specified compute service hostname for the desired destination of the instance during the cold migration :param allow_cross_cell_resize: If True, cross-cell resize is allowed for this operation and the host could be in a different cell from the one that the instance is currently in. If False, the specified host must be in the same cell as the instance. :returns: ComputeNode object of the requested host :raises: CannotMigrateToSameHost if the host is the same as the current instance.host :raises: ComputeHostNotFound if the specified host cannot be found """ # Cannot migrate to the host where the instance exists # because it is useless. if host_name == instance.host: raise exception.CannotMigrateToSameHost() # Check whether host exists or not. If a cross-cell resize is # allowed, the host could be in another cell from the one the # instance is currently in, so we need to lookup the HostMapping # to get the cell and lookup the ComputeNode in that cell. if allow_cross_cell_resize: try: hm = objects.HostMapping.get_by_host(context, host_name) except exception.HostMappingNotFound: LOG.info('HostMapping not found for host: %s', host_name) raise exception.ComputeHostNotFound(host=host_name) with nova_context.target_cell(context, hm.cell_mapping) as cctxt: node = objects.ComputeNode.\ get_first_node_by_host_for_old_compat( cctxt, host_name, use_slave=True) else: node = objects.ComputeNode.get_first_node_by_host_for_old_compat( context, host_name, use_slave=True) return node @block_shares_not_supported() # TODO(stephenfin): This logic would be so much easier to grok if we # finally split resize and cold migration into separate code paths @block_extended_resource_request @block_port_accelerators() @block_accelerators() @check_instance_lock @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED]) @check_instance_host(check_is_up=True) def resize(self, context, instance, flavor_id=None, clean_shutdown=True, host_name=None, auto_disk_config=None): """Resize (ie, migrate) a running instance. If flavor_id is None, the process is considered a migration, keeping the original flavor_id. If flavor_id is not None, the instance should be migrated to a new host and resized to the new flavor_id. host_name is always None in the resize case. host_name can be set in the cold migration case only. """ # Only lookup the minimum compute version once min_comp_ver = objects.service.get_minimum_version_all_cells( context, ["nova-compute"]) allow_cross_cell_resize = self._allow_cross_cell_resize( context, instance, min_comp_ver) if host_name is not None: node = self._validate_host_for_cold_migrate( context, instance, host_name, allow_cross_cell_resize) self._check_auto_disk_config( instance, auto_disk_config=auto_disk_config) current_flavor = instance.get_flavor() # NOTE(aarents): Ensure image_base_image_ref is present as it will be # needed during finish_resize/cross_cell_resize. Instances upgraded # from an older nova release may not have this property because of # a rebuild bug Bug/1893618. instance.system_metadata.update( {'image_base_image_ref': instance.image_ref} ) # If flavor_id is not provided, only migrate the instance. volume_backed = None if not flavor_id: LOG.debug("flavor_id is None. Assuming migration.", instance=instance) new_flavor = current_flavor else: new_flavor = flavors.get_flavor_by_flavor_id( flavor_id, read_deleted="no") # NOTE(wenping): We use this instead of the 'block_accelerator' # decorator since the operation can differ depending on args, # and for resize we have two flavors to worry about, we should # reject resize with new flavor with accelerator. if new_flavor.extra_specs.get('accel:device_profile'): raise exception.ForbiddenWithAccelerators() # Check to see if we're resizing to a zero-disk flavor which is # only supported with volume-backed servers. if (new_flavor.get('root_gb') == 0 and current_flavor.get('root_gb') != 0): volume_backed = compute_utils.is_volume_backed_instance( context, instance) if not volume_backed: reason = _('Resize to zero disk flavor is not allowed.') raise exception.CannotResizeDisk(reason=reason) current_flavor_name = current_flavor['name'] new_flavor_name = new_flavor['name'] LOG.debug("Old instance type %(current_flavor_name)s, " "new instance type %(new_flavor_name)s", {'current_flavor_name': current_flavor_name, 'new_flavor_name': new_flavor_name}, instance=instance) same_flavor = current_flavor['id'] == new_flavor['id'] # NOTE(sirp): We don't want to force a customer to change their flavor # when Ops is migrating off of a failed host. if not same_flavor and new_flavor.get('disabled'): raise exception.FlavorNotFound(flavor_id=flavor_id) if same_flavor and flavor_id: raise exception.CannotResizeToSameFlavor() # ensure there is sufficient headroom for upsizes if flavor_id: # Figure out if the instance is volume-backed but only if we didn't # already figure that out above (avoid the extra db hit). if volume_backed is None: # TODO(johngarbutt) should we just use the request spec? volume_backed = compute_utils.is_volume_backed_instance( context, instance) self._check_quota_for_upsize(context, instance, current_flavor, new_flavor, volume_backed, is_revert=False) if not same_flavor: image = utils.get_image_from_system_metadata( instance.system_metadata) # Figure out if the instance is volume-backed but only if we didn't # already figure that out above (avoid the extra db hit). if volume_backed is None: volume_backed = compute_utils.is_volume_backed_instance( context, instance) # If the server is volume-backed, we still want to validate numa # and pci information in the new flavor, but we don't call # _validate_flavor_image_nostatus because how it handles checking # disk size validation was not intended for a volume-backed # resize case. if volume_backed: self._validate_flavor_image_numa_pci( image, new_flavor, validate_pci=True) # The server that image-backed already has the verification of # image min_ram when calling _validate_flavor_image_nostatus. # Here, the verification is added for the server that # volume-backed. if new_flavor['memory_mb'] < int(image.get('min_ram', 0)): raise exception.FlavorMemoryTooSmall() else: self._validate_flavor_image_nostatus( context, image, new_flavor, root_bdm=None, validate_pci=True) filter_properties = {'ignore_hosts': []} if not self._allow_resize_to_same_host(same_flavor, instance): filter_properties['ignore_hosts'].append(instance.host) request_spec = objects.RequestSpec.get_by_instance_uuid( context, instance.uuid) request_spec.ignore_hosts = filter_properties['ignore_hosts'] # don't recalculate the NUMA topology unless the flavor has changed if not same_flavor: request_spec.numa_topology = hardware.numa_get_constraints( new_flavor, instance.image_meta) # if the flavor is changed then we need to recalculate the # pci_requests as well because the new flavor might request # different pci_aliases new_pci_requests = pci_request.get_pci_requests_from_flavor( new_flavor) new_pci_requests.instance_uuid = instance.uuid # The neutron based InstancePCIRequest cannot change during resize, # so we just need to copy them from the old request for request in request_spec.pci_requests.requests or []: if request.source == objects.InstancePCIRequest.NEUTRON_PORT: new_pci_requests.requests.append(request) request_spec.pci_requests = new_pci_requests # TODO(huaqiang): Remove in Wallaby # check nova-compute nodes have been updated to Victoria to resize # instance to a new mixed instance from a dedicated or shared # instance. self._check_compute_service_for_mixed_instance( request_spec.numa_topology, min_comp_ver) instance.task_state = task_states.RESIZE_PREP instance.progress = 0 instance.auto_disk_config = auto_disk_config or False instance.save(expected_task_state=[None]) if not flavor_id: self._record_action_start(context, instance, instance_actions.MIGRATE) else: self._record_action_start(context, instance, instance_actions.RESIZE) # TODO(melwitt): We're not rechecking for strict quota here to guard # against going over quota during a race at this time because the # resource consumption for this operation is written to the database # by compute. scheduler_hint = {'filter_properties': filter_properties} if host_name is None: # If 'host_name' is not specified, # clear the 'requested_destination' field of the RequestSpec # except set the allow_cross_cell_move flag since conductor uses # it prior to scheduling. request_spec.requested_destination = objects.Destination( allow_cross_cell_move=allow_cross_cell_resize) else: # Set the host and the node so that the scheduler will # validate them. request_spec.requested_destination = objects.Destination( host=node.host, node=node.hypervisor_hostname, allow_cross_cell_move=allow_cross_cell_resize) # Asynchronously RPC cast to conductor so the response is not blocked # during scheduling. If something fails the user can find out via # instance actions. self.compute_task_api.resize_instance( context, instance, scheduler_hint=scheduler_hint, flavor=new_flavor, clean_shutdown=clean_shutdown, request_spec=request_spec, do_cast=True) def _allow_resize_to_same_host(self, cold_migrate, instance): """Contains logic for excluding the instance.host on resize/migrate. If performing a cold migration and the compute node resource provider reports the COMPUTE_SAME_HOST_COLD_MIGRATE trait then same-host cold migration is allowed otherwise it is not and the current instance.host should be excluded as a scheduling candidate. :param cold_migrate: true if performing a cold migration, false for resize :param instance: Instance object being resized or cold migrated :returns: True if same-host resize/cold migrate is allowed, False otherwise """ if cold_migrate: # Check to see if the compute node resource provider on which the # instance is running has the COMPUTE_SAME_HOST_COLD_MIGRATE # trait. # Note that we check this here in the API since we cannot # pre-filter allocation candidates in the scheduler using this # trait as it would not work. For example, libvirt nodes will not # report the trait but using it as a forbidden trait filter when # getting allocation candidates would still return libvirt nodes # which means we could attempt to cold migrate to the same libvirt # node, which would fail. ctxt = instance._context cn = objects.ComputeNode.get_by_host_and_nodename( ctxt, instance.host, instance.node) traits = self.placementclient.get_provider_traits( ctxt, cn.uuid).traits # If the provider has the trait it is (1) new enough to report that # trait and (2) supports cold migration on the same host. if os_traits.COMPUTE_SAME_HOST_COLD_MIGRATE in traits: allow_same_host = True else: # TODO(mriedem): Remove this compatibility code after one # release. If the compute is old we will not know if it # supports same-host cold migration so we fallback to config. service = objects.Service.get_by_compute_host(ctxt, cn.host) if service.version >= MIN_COMPUTE_SAME_HOST_COLD_MIGRATE: # The compute is new enough to report the trait but does # not so same-host cold migration is not allowed. allow_same_host = False else: # The compute is not new enough to report the trait so we # fallback to config. allow_same_host = CONF.allow_resize_to_same_host else: allow_same_host = CONF.allow_resize_to_same_host return allow_same_host @block_shares_not_supported() @block_port_accelerators() @reject_vtpm_instances(instance_actions.SHELVE) @block_accelerators(until_service=54) @check_instance_lock @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED, vm_states.PAUSED, vm_states.SUSPENDED]) def shelve(self, context, instance, clean_shutdown=True): """Shelve an instance. Shuts down an instance and frees it up to be removed from the hypervisor. """ instance.task_state = task_states.SHELVING # NOTE(aarents): Ensure image_base_image_ref is present as it will be # needed during unshelve and instance rebuild done before Bug/1893618 # Fix dropped it. instance.system_metadata.update( {'image_base_image_ref': instance.image_ref} ) instance.save(expected_task_state=[None]) self._record_action_start(context, instance, instance_actions.SHELVE) accel_uuids = [] if instance.flavor.extra_specs.get('accel:device_profile'): cyclient = cyborg.get_client(context) accel_uuids = cyclient.get_arq_uuids_for_instance(instance) if not compute_utils.is_volume_backed_instance(context, instance): name = '%s-shelved' % instance.display_name image_meta = compute_utils.create_image( context, instance, name, 'snapshot', self.image_api) image_id = image_meta['id'] self.compute_rpcapi.shelve_instance(context, instance=instance, image_id=image_id, clean_shutdown=clean_shutdown, accel_uuids=accel_uuids) else: self.compute_rpcapi.shelve_offload_instance( context, instance=instance, clean_shutdown=clean_shutdown, accel_uuids=accel_uuids) @block_port_accelerators() @check_instance_lock @check_instance_state(vm_state=[vm_states.SHELVED]) def shelve_offload(self, context, instance, clean_shutdown=True): """Remove a shelved instance from the hypervisor.""" instance.task_state = task_states.SHELVING_OFFLOADING instance.save(expected_task_state=[None]) self._record_action_start(context, instance, instance_actions.SHELVE_OFFLOAD) accel_uuids = [] if instance.flavor.extra_specs.get('accel:device_profile'): cyclient = cyborg.get_client(context) accel_uuids = cyclient.get_arq_uuids_for_instance(instance) self.compute_rpcapi.shelve_offload_instance( context, instance=instance, clean_shutdown=clean_shutdown, accel_uuids=accel_uuids) def _check_offloaded(self, context, instance): """Check if the status of an instance is SHELVE_OFFLOADED, if not raise an exception. """ if instance.vm_state != vm_states.SHELVED_OFFLOADED: # NOTE(brinzhang): If the server status is 'SHELVED', it still # belongs to a host, the availability_zone should not change. # Unshelving a shelved offloaded server will go through the # scheduler to find a new host. raise exception.UnshelveInstanceInvalidState( state=instance.vm_state, instance_uuid=instance.uuid) def _ensure_host_in_az(self, context, host, availability_zone): """Ensure the host provided belongs to the availability zone, if not raise an exception. """ if availability_zone is not None: host_az = availability_zones.get_host_availability_zone( context, host ) if host_az != availability_zone: raise exception.UnshelveHostNotInAZ( host=host, availability_zone=availability_zone) def _validate_unshelve_az(self, context, instance, availability_zone): """Verify the specified availability_zone during unshelve. Verifies the AZ exists and if [cinder]/cross_az_attach=False, that any attached volumes are in the same AZ. :param context: nova auth RequestContext for the unshelve action :param instance: Instance object for the server being unshelved :param availability_zone: The user-requested availability zone in which to unshelve the server. :raises: InvalidRequest if the requested AZ does not exist :raises: MismatchVolumeAZException if [cinder]/cross_az_attach=False and any attached volumes are not in the requested AZ """ available_zones = availability_zones.get_availability_zones( context, self.host_api, get_only_available=True) if availability_zone not in available_zones: msg = _('The requested availability zone is not available') raise exception.InvalidRequest(msg) # NOTE(brinzhang): When specifying a availability zone to unshelve # a shelved offloaded server, and conf cross_az_attach=False, need # to determine if attached volume AZ matches the user-specified AZ. if not CONF.cinder.cross_az_attach: bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) for bdm in bdms: if bdm.is_volume and bdm.volume_id: volume = self.volume_api.get(context, bdm.volume_id) if availability_zone != volume['availability_zone']: msg = _("The specified availability zone does not " "match the volume %(vol_id)s attached to the " "server. Specified availability zone is " "%(az)s. Volume is in %(vol_zone)s.") % { "vol_id": volume['id'], "az": availability_zone, "vol_zone": volume['availability_zone']} raise exception.MismatchVolumeAZException(reason=msg) @staticmethod def _check_quota_unshelve_offloaded( context: nova_context.RequestContext, instance: 'objects.Instance', request_spec: 'objects.RequestSpec' ): if not (CONF.quota.count_usage_from_placement or limit_utils.use_unified_limits()): return # TODO(melwitt): This is ugly but we have to do it this way because # instances quota is currently counted from the API database but cores # and ram are counted from placement. That means while an instance is # SHELVED_OFFLOADED, it will still consume instances quota but it will # not consume cores and ram. So we need an instances delta of # 0 but cores and ram deltas from the flavor. # Once instances usage is also being counted from placement, we can # replace this method with a normal check_num_instances_quota() call. vcpus = instance.flavor.vcpus memory_mb = instance.flavor.memory_mb # We are not looking to create a new server, we are unshelving an # existing one. deltas = {'instances': 0, 'cores': vcpus, 'ram': memory_mb} objects.Quotas.check_deltas( context, deltas, context.project_id, user_id=context.user_id, check_project_id=instance.project_id, check_user_id=instance.user_id, ) # Do the same for unified limits. placement_limits.enforce_num_instances_and_flavor( context, context.project_id, instance.flavor, request_spec.is_bfv, 0, 0, delta_updates={'servers': 0}) @block_extended_resource_request @check_instance_lock @check_instance_state( vm_state=[vm_states.SHELVED, vm_states.SHELVED_OFFLOADED]) def unshelve( self, context, instance, new_az=_sentinel, host=None): """Restore a shelved instance. :param context: the nova request context :param instance: nova.objects.instance.Instance object :param new_az: (optional) target AZ. If None is provided then the current AZ restriction will be removed from the instance. If the parameter is not provided then the current AZ restriction will not be changed. :param host: (optional) a host to target """ # Unshelving a shelved offloaded server will go through the # scheduler to pick a new host, so we update the # RequestSpec.availability_zone here. Note that if scheduling # fails the RequestSpec will remain updated, which is not great. # Bug open to track this https://bugs.launchpad.net/nova/+bug/1978573 az_passed = new_az is not self._sentinel request_spec = objects.RequestSpec.get_by_instance_uuid( context, instance.uuid) # Check quota before we save any changes to the database, but only if # we are counting quota usage from placement. When an instance is # SHELVED_OFFLOADED, it will not consume cores or ram resources in # placement. This means it is possible that an unshelve would cause the # project/user to go over quota. if instance.vm_state == vm_states.SHELVED_OFFLOADED: self._check_quota_unshelve_offloaded( context, instance, request_spec) # We need to check a list of preconditions and validate inputs first # Ensure instance is shelve offloaded if az_passed or host: self._check_offloaded(context, instance) if az_passed and new_az: # we have to ensure that new AZ is valid self._validate_unshelve_az(context, instance, new_az) # This will be the AZ of the instance after the unshelve. It can be # None indicating that the instance is not pinned to any AZ after the # unshelve expected_az_after_unshelve = ( request_spec.availability_zone if not az_passed else new_az ) # host is requested, so we have to see if it exists and does not # contradict with the AZ of the instance if host: # Make sure only admin can unshelve to a specific host. context.can( shelve_policies.POLICY_ROOT % 'unshelve_to_host', target={ 'user_id': instance.user_id, 'project_id': instance.project_id } ) # Ensure that the requested host exists otherwise raise # a ComputeHostNotFound exception objects.ComputeNode.get_first_node_by_host_for_old_compat( context, host, use_slave=True) # A specific host is requested so we need to make sure that it is # not contradicts with the AZ of the instance self._ensure_host_in_az( context, host, expected_az_after_unshelve) if new_az is None: LOG.debug( 'Unpin instance from AZ "%(old_az)s".', {'old_az': request_spec.availability_zone}, instance=instance ) LOG.debug( 'Unshelving instance with old availability_zone "%(old_az)s" to ' 'new availability_zone "%(new_az)s" and host "%(host)s".', { 'old_az': request_spec.availability_zone, 'new_az': '%s' % new_az if az_passed else 'not provided', 'host': host, }, instance=instance, ) # OK every precondition checks out, we just need to tell the scheduler # where to put the instance # We have the expected AZ already calculated. So we just need to # set it in the request_spec to drive the scheduling request_spec.availability_zone = expected_az_after_unshelve # if host is requested we also need to tell the scheduler that if host: request_spec.requested_destination = objects.Destination(host=host) request_spec.save() instance.task_state = task_states.UNSHELVING instance.save(expected_task_state=[None]) self._record_action_start(context, instance, instance_actions.UNSHELVE) self.compute_task_api.unshelve_instance(context, instance, request_spec) @check_instance_lock def add_fixed_ip(self, context, instance, network_id): """Add fixed_ip from specified network to given instance.""" self.compute_rpcapi.add_fixed_ip_to_instance(context, instance=instance, network_id=network_id) @check_instance_lock def remove_fixed_ip(self, context, instance, address): """Remove fixed_ip from specified network to given instance.""" self.compute_rpcapi.remove_fixed_ip_from_instance(context, instance=instance, address=address) @check_instance_lock @check_instance_state(vm_state=[vm_states.ACTIVE]) def pause(self, context, instance): """Pause the given instance.""" instance.task_state = task_states.PAUSING instance.save(expected_task_state=[None]) self._record_action_start(context, instance, instance_actions.PAUSE) self.compute_rpcapi.pause_instance(context, instance) @check_instance_lock @check_instance_state(vm_state=[vm_states.PAUSED]) def unpause(self, context, instance): """Unpause the given instance.""" instance.task_state = task_states.UNPAUSING instance.save(expected_task_state=[None]) self._record_action_start(context, instance, instance_actions.UNPAUSE) self.compute_rpcapi.unpause_instance(context, instance) @check_instance_host() def get_diagnostics(self, context, instance): """Retrieve diagnostics for the given instance.""" return self.compute_rpcapi.get_diagnostics(context, instance=instance) @check_instance_host() def get_instance_diagnostics(self, context, instance): """Retrieve diagnostics for the given instance.""" return self.compute_rpcapi.get_instance_diagnostics(context, instance=instance) @block_shares_not_supported() @block_port_accelerators() @reject_vdpa_instances( instance_actions.SUSPEND, until=MIN_COMPUTE_VDPA_HOTPLUG_LIVE_MIGRATION ) @block_accelerators() @reject_sev_instances(instance_actions.SUSPEND) @check_instance_lock @check_instance_state(vm_state=[vm_states.ACTIVE]) def suspend(self, context, instance): """Suspend the given instance.""" instance.task_state = task_states.SUSPENDING instance.save(expected_task_state=[None]) self._record_action_start(context, instance, instance_actions.SUSPEND) self.compute_rpcapi.suspend_instance(context, instance) @check_instance_lock @reject_vdpa_instances( instance_actions.RESUME, until=MIN_COMPUTE_VDPA_HOTPLUG_LIVE_MIGRATION ) @check_instance_state(vm_state=[vm_states.SUSPENDED]) def resume(self, context, instance): """Resume the given instance.""" instance.task_state = task_states.RESUMING instance.save(expected_task_state=[None]) self._record_action_start(context, instance, instance_actions.RESUME) self.compute_rpcapi.resume_instance(context, instance) @reject_vtpm_instances(instance_actions.RESCUE) @check_instance_lock @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED, vm_states.ERROR]) def rescue(self, context, instance, rescue_password=None, rescue_image_ref=None, clean_shutdown=True, allow_bfv_rescue=False): """Rescue the given instance.""" image_meta = None if rescue_image_ref: try: image_meta = image_meta_obj.ImageMeta.from_image_ref( context, self.image_api, rescue_image_ref) except (exception.ImageNotFound, exception.ImageBadRequest): LOG.warning("Failed to fetch rescue image metadata using " "image_ref %(image_ref)s", {'image_ref': rescue_image_ref}) raise exception.UnsupportedRescueImage( image=rescue_image_ref) # FIXME(lyarwood): There is currently no support for rescuing # instances using a volume snapshot so fail here before we cast to # the compute. if image_meta.properties.get('img_block_device_mapping'): LOG.warning("Unable to rescue an instance using a volume " "snapshot image with img_block_device_mapping " "image properties set") raise exception.UnsupportedRescueImage( image=rescue_image_ref) else: image_meta = instance.image_meta bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) self._check_volume_status(context, bdms) volume_backed = compute_utils.is_volume_backed_instance( context, instance, bdms) allow_bfv_rescue &= 'hw_rescue_bus' in image_meta.properties and \ 'hw_rescue_device' in image_meta.properties if volume_backed and allow_bfv_rescue: cn = objects.ComputeNode.get_by_host_and_nodename( context, instance.host, instance.node) traits = self.placementclient.get_provider_traits( context, cn.uuid).traits if os_traits.COMPUTE_RESCUE_BFV not in traits: reason = _("Host unable to rescue a volume-backed instance") raise exception.InstanceNotRescuable(instance_id=instance.uuid, reason=reason) elif volume_backed: reason = _("Cannot rescue a volume-backed instance") raise exception.InstanceNotRescuable(instance_id=instance.uuid, reason=reason) instance.task_state = task_states.RESCUING instance.save(expected_task_state=[None]) self._record_action_start(context, instance, instance_actions.RESCUE) self.compute_rpcapi.rescue_instance(context, instance=instance, rescue_password=rescue_password, rescue_image_ref=rescue_image_ref, clean_shutdown=clean_shutdown) @check_instance_lock @check_instance_state(vm_state=[vm_states.RESCUED]) def unrescue(self, context, instance): """Unrescue the given instance.""" instance.task_state = task_states.UNRESCUING instance.save(expected_task_state=[None]) self._record_action_start(context, instance, instance_actions.UNRESCUE) self.compute_rpcapi.unrescue_instance(context, instance=instance) @check_instance_lock @check_instance_state(vm_state=[vm_states.ACTIVE]) def set_admin_password(self, context, instance, password): """Set the root/admin password for the given instance. @param context: Nova auth context. @param instance: Nova instance object. @param password: The admin password for the instance. """ instance.task_state = task_states.UPDATING_PASSWORD instance.save(expected_task_state=[None]) self._record_action_start(context, instance, instance_actions.CHANGE_PASSWORD) self.compute_rpcapi.set_admin_password(context, instance=instance, new_pass=password) @check_instance_host() @reject_instance_state( task_state=[task_states.DELETING, task_states.MIGRATING]) def get_vnc_console(self, context, instance, console_type): """Get a url to an instance Console.""" connect_info = self.compute_rpcapi.get_vnc_console(context, instance=instance, console_type=console_type) return {'url': connect_info['access_url']} @check_instance_host() @reject_instance_state( task_state=[task_states.DELETING, task_states.MIGRATING]) def get_spice_console(self, context, instance, console_type): """Get a url to an instance Console.""" connect_info = self.compute_rpcapi.get_spice_console(context, instance=instance, console_type=console_type) return {'url': connect_info['access_url']} @check_instance_host() @reject_instance_state( task_state=[task_states.DELETING, task_states.MIGRATING]) def get_serial_console(self, context, instance, console_type): """Get a url to a serial console.""" connect_info = self.compute_rpcapi.get_serial_console(context, instance=instance, console_type=console_type) return {'url': connect_info['access_url']} @check_instance_host() @reject_instance_state( task_state=[task_states.DELETING, task_states.MIGRATING]) def get_mks_console(self, context, instance, console_type): """Get a url to a MKS console.""" connect_info = self.compute_rpcapi.get_mks_console(context, instance=instance, console_type=console_type) return {'url': connect_info['access_url']} @check_instance_host() def get_console_output(self, context, instance, tail_length=None): """Get console output for an instance.""" return self.compute_rpcapi.get_console_output(context, instance=instance, tail_length=tail_length) def lock(self, context, instance, reason=None): """Lock the given instance.""" # Only update the lock if we are an admin (non-owner) is_owner = instance.project_id == context.project_id if instance.locked and is_owner: return context = context.elevated() self._record_action_start(context, instance, instance_actions.LOCK) @wrap_instance_event(prefix='api') def lock(self, context, instance, reason=None): LOG.debug('Locking', instance=instance) instance.locked = True instance.locked_by = 'owner' if is_owner else 'admin' if reason: instance.system_metadata['locked_reason'] = reason instance.save() lock(self, context, instance, reason=reason) compute_utils.notify_about_instance_action( context, instance, CONF.host, action=fields_obj.NotificationAction.LOCK, source=fields_obj.NotificationSource.API) def is_expected_locked_by(self, context, instance): is_owner = instance.project_id == context.project_id expect_locked_by = 'owner' if is_owner else 'admin' locked_by = instance.locked_by if locked_by and locked_by != expect_locked_by: return False return True def unlock(self, context, instance): """Unlock the given instance.""" context = context.elevated() self._record_action_start(context, instance, instance_actions.UNLOCK) @wrap_instance_event(prefix='api') def unlock(self, context, instance): LOG.debug('Unlocking', instance=instance) instance.locked = False instance.locked_by = None instance.system_metadata.pop('locked_reason', None) instance.save() unlock(self, context, instance) compute_utils.notify_about_instance_action( context, instance, CONF.host, action=fields_obj.NotificationAction.UNLOCK, source=fields_obj.NotificationSource.API) @check_instance_lock def inject_network_info(self, context, instance): """Inject network info for the instance.""" self.compute_rpcapi.inject_network_info(context, instance=instance) def _create_volume_bdm(self, context, instance, device, volume, disk_bus, device_type, is_local_creation=False, tag=None, delete_on_termination=False): volume_id = volume['id'] if is_local_creation: # when the creation is done locally we can't specify the device # name as we do not have a way to check that the name specified is # a valid one. # We leave the setting of that value when the actual attach # happens on the compute manager # NOTE(artom) Local attach (to a shelved-offload instance) cannot # support device tagging because we have no way to call the compute # manager to check that it supports device tagging. In fact, we # don't even know which computer manager the instance will # eventually end up on when it's unshelved. volume_bdm = objects.BlockDeviceMapping( context=context, source_type='volume', destination_type='volume', instance_uuid=instance.uuid, boot_index=None, volume_id=volume_id, device_name=None, guest_format=None, disk_bus=disk_bus, device_type=device_type, delete_on_termination=delete_on_termination) volume_bdm.create() else: # NOTE(vish): This is done on the compute host because we want # to avoid a race where two devices are requested at # the same time. When db access is removed from # compute, the bdm will be created here and we will # have to make sure that they are assigned atomically. volume_bdm = self.compute_rpcapi.reserve_block_device_name( context, instance, device, volume_id, disk_bus=disk_bus, device_type=device_type, tag=tag, multiattach=volume['multiattach']) volume_bdm.delete_on_termination = delete_on_termination volume_bdm.save() return volume_bdm def _check_volume_already_attached( self, context: nova_context.RequestContext, instance: objects.Instance, volume: ty.Mapping[str, ty.Any], ): """Avoid duplicate volume attachments. Since the 3.44 Cinder microversion, Cinder allows us to attach the same volume to the same instance twice. This is ostensibly to enable live migration, but it's not something we want to occur outside of this particular code path. In addition, we also need to ensure that non-multiattached volumes are not attached to multiple instances. This check is also carried out later by c-api itself but it can however be circumvented by admins resetting the state of an attached volume to available. As a result we also need to perform a check within Nova before creating a new BDM for the attachment. :param context: nova auth RequestContext :param instance: Instance object :param volume: volume dict from cinder """ # Fetch a list of active bdms for the volume, return if none are found. try: bdms = objects.BlockDeviceMappingList.get_by_volume( context, volume['id']) except exception.VolumeBDMNotFound: return # Fail if the volume isn't multiattach but BDMs already exist if not volume.get('multiattach'): instance_uuids = ' '.join(f"{b.instance_uuid}" for b in bdms) msg = _( "volume %(volume_id)s is already attached to instances: " "%(instance_uuids)s" ) % { 'volume_id': volume['id'], 'instance_uuids': instance_uuids } raise exception.InvalidVolume(reason=msg) # Fail if the volume is already attached to our instance if any(b for b in bdms if b.instance_uuid == instance.uuid): msg = _("volume %s already attached") % volume['id'] raise exception.InvalidVolume(reason=msg) def _check_attach_and_reserve_volume(self, context, volume, instance, bdm, supports_multiattach=False, validate_az=True): """Perform checks against the instance and volume before attaching. If validation succeeds, the bdm is updated with an attachment_id which effectively reserves it during the attach process in cinder. :param context: nova auth RequestContext :param volume: volume dict from cinder :param instance: Instance object :param bdm: BlockDeviceMapping object :param supports_multiattach: True if the request supports multiattach volumes, i.e. microversion >= 2.60, False otherwise :param validate_az: True if the instance and volume availability zones should be validated for cross_az_attach, False to not validate AZ """ volume_id = volume['id'] if validate_az: self.volume_api.check_availability_zone(context, volume, instance=instance) # If volume.multiattach=True and the microversion to # support multiattach is not used, fail the request. if volume['multiattach'] and not supports_multiattach: raise exception.MultiattachNotSupportedOldMicroversion() attachment_id = self.volume_api.attachment_create( context, volume_id, instance.uuid)['id'] bdm.attachment_id = attachment_id # NOTE(ildikov): In case of boot from volume the BDM at this # point is not yet created in a cell database, so we can't # call save(). When attaching a volume to an existing # instance, the instance is already in a cell and the BDM has # been created in that same cell so updating here in that case # is "ok". if bdm.obj_attr_is_set('id'): bdm.save() # TODO(stephenfin): Fold this back in now that cells v1 no longer needs to # override it. def _attach_volume(self, context, instance, volume, device, disk_bus, device_type, tag=None, supports_multiattach=False, delete_on_termination=False): """Attach an existing volume to an existing instance. This method is separated to make it possible for cells version to override it. """ try: volume_bdm = self._create_volume_bdm( context, instance, device, volume, disk_bus=disk_bus, device_type=device_type, tag=tag, delete_on_termination=delete_on_termination) except oslo_exceptions.MessagingTimeout: # The compute node might have already created the attachment but # we never received the answer. In this case it is safe to delete # the attachment as nobody will ever pick it up again. with excutils.save_and_reraise_exception(): try: objects.BlockDeviceMapping.get_by_volume_and_instance( context, volume['id'], instance.uuid).destroy() LOG.debug("Delete BDM after compute did not respond to " f"attachment request for volume {volume['id']}") except exception.VolumeBDMNotFound: LOG.debug("BDM not found, ignoring removal. " f"Error attaching volume {volume['id']}") try: self._check_attach_and_reserve_volume(context, volume, instance, volume_bdm, supports_multiattach) self._record_action_start( context, instance, instance_actions.ATTACH_VOLUME) self.compute_rpcapi.attach_volume(context, instance, volume_bdm) except Exception: with excutils.save_and_reraise_exception(): volume_bdm.destroy() return volume_bdm.device_name def _attach_volume_shelved_offloaded(self, context, instance, volume, device, disk_bus, device_type, delete_on_termination): """Attach an existing volume to an instance in shelved offloaded state. Attaching a volume for an instance in shelved offloaded state requires to perform the regular check to see if we can attach and reserve the volume then we need to call the attach method on the volume API to mark the volume as 'in-use'. The instance at this stage is not managed by a compute manager therefore the actual attachment will be performed once the instance will be unshelved. """ volume_id = volume['id'] @wrap_instance_event(prefix='api') def attach_volume(self, context, v_id, instance, dev, attachment_id): if attachment_id: # Normally we wouldn't complete an attachment without a host # connector, but we do this to make the volume status change # to "in-use" to maintain the API semantics with the old flow. # When unshelving the instance, the compute service will deal # with this disconnected attachment. self.volume_api.attachment_complete(context, attachment_id) else: self.volume_api.attach(context, v_id, instance.uuid, dev) volume_bdm = self._create_volume_bdm( context, instance, device, volume, disk_bus=disk_bus, device_type=device_type, is_local_creation=True, delete_on_termination=delete_on_termination) try: self._check_attach_and_reserve_volume(context, volume, instance, volume_bdm) self._record_action_start( context, instance, instance_actions.ATTACH_VOLUME) attach_volume(self, context, volume_id, instance, device, volume_bdm.attachment_id) except Exception: with excutils.save_and_reraise_exception(): volume_bdm.destroy() return volume_bdm.device_name @check_instance_lock @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED, vm_states.STOPPED, vm_states.RESIZED, vm_states.SOFT_DELETED, vm_states.SHELVED, vm_states.SHELVED_OFFLOADED]) def attach_volume(self, context, instance, volume_id, device=None, disk_bus=None, device_type=None, tag=None, supports_multiattach=False, delete_on_termination=False): """Attach an existing volume to an existing instance.""" # NOTE(vish): Fail fast if the device is not going to pass. This # will need to be removed along with the test if we # change the logic in the manager for what constitutes # a valid device. if device and not block_device.match_device(device): raise exception.InvalidDevicePath(path=device) # Make sure the volume isn't already attached to this instance # because we'll use the v3.44 attachment flow in # _check_attach_and_reserve_volume and Cinder will allow multiple # attachments between the same volume and instance but the old flow # API semantics don't allow that so we enforce it here. # NOTE(lyarwood): Ensure that non multiattach volumes don't already # have active block device mappings present in Nova. volume = self.volume_api.get(context, volume_id) self._check_volume_already_attached(context, instance, volume) is_shelved_offloaded = instance.vm_state == vm_states.SHELVED_OFFLOADED if is_shelved_offloaded: if tag: # NOTE(artom) Local attach (to a shelved-offload instance) # cannot support device tagging because we have no way to call # the compute manager to check that it supports device tagging. # In fact, we don't even know which computer manager the # instance will eventually end up on when it's unshelved. raise exception.VolumeTaggedAttachToShelvedNotSupported() if volume['multiattach']: # NOTE(mriedem): Similar to tagged attach, we don't support # attaching a multiattach volume to shelved offloaded instances # because we can't tell if the compute host (since there isn't # one) supports it. This could possibly be supported in the # future if the scheduler was made aware of which computes # support multiattach volumes. raise exception.MultiattachToShelvedNotSupported() return self._attach_volume_shelved_offloaded(context, instance, volume, device, disk_bus, device_type, delete_on_termination) return self._attach_volume(context, instance, volume, device, disk_bus, device_type, tag=tag, supports_multiattach=supports_multiattach, delete_on_termination=delete_on_termination) def _detach_volume_shelved_offloaded(self, context, instance, volume): """Detach a volume from an instance in shelved offloaded state. If the instance is shelved offloaded we just need to cleanup volume calling the volume api detach, the volume api terminate_connection and delete the bdm record. If the volume has delete_on_termination option set then we call the volume api delete as well. """ @wrap_instance_event(prefix='api') def detach_volume(self, context, instance, bdms): self._local_cleanup_bdm_volumes(bdms, instance, context) bdms = [objects.BlockDeviceMapping.get_by_volume_id( context, volume['id'], instance.uuid)] # The begin_detaching() call only works with in-use volumes, # which will not be the case for volumes attached to a shelved # offloaded server via the attachments API since those volumes # will have `reserved` status. if not bdms[0].attachment_id: try: self.volume_api.begin_detaching(context, volume['id']) except exception.InvalidInput as exc: raise exception.InvalidVolume(reason=exc.format_message()) self._record_action_start( context, instance, instance_actions.DETACH_VOLUME) detach_volume(self, context, instance, bdms) @check_instance_host(check_is_up=True) def _detach_volume(self, context, instance, volume): try: self.volume_api.begin_detaching(context, volume['id']) except exception.InvalidInput as exc: raise exception.InvalidVolume(reason=exc.format_message()) attachments = volume.get('attachments', {}) attachment_id = None if attachments and instance.uuid in attachments: attachment_id = attachments[instance.uuid]['attachment_id'] self._record_action_start( context, instance, instance_actions.DETACH_VOLUME) self.compute_rpcapi.detach_volume(context, instance=instance, volume_id=volume['id'], attachment_id=attachment_id) @check_instance_lock @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED, vm_states.STOPPED, vm_states.RESIZED, vm_states.SOFT_DELETED, vm_states.SHELVED, vm_states.SHELVED_OFFLOADED]) def detach_volume(self, context, instance, volume): """Detach a volume from an instance.""" if instance.vm_state == vm_states.SHELVED_OFFLOADED: self._detach_volume_shelved_offloaded(context, instance, volume) else: self._detach_volume(context, instance, volume) def _count_attachments_for_swap(self, ctxt, volume): """Counts the number of attachments for a swap-related volume. Attempts to only count read/write attachments if the volume attachment records exist, otherwise simply just counts the number of attachments regardless of attach mode. :param ctxt: nova.context.RequestContext - user request context :param volume: nova-translated volume dict from nova.volume.cinder. :returns: count of attachments for the volume """ # This is a dict, keyed by server ID, to a dict of attachment_id and # mountpoint. attachments = volume.get('attachments', {}) # Multiattach volumes can have more than one attachment, so if there # is more than one attachment, attempt to count the read/write # attachments. if len(attachments) > 1: count = 0 for attachment in attachments.values(): attachment_id = attachment['attachment_id'] # Get the attachment record for this attachment so we can # get the attach_mode. # TODO(mriedem): This could be optimized if we had # GET /attachments/detail?volume_id=volume['id'] in Cinder. try: attachment_record = self.volume_api.attachment_get( ctxt, attachment_id) # Note that the attachment record from Cinder has # attach_mode in the top-level of the resource but the # nova.volume.cinder code translates it and puts the # attach_mode in the connection_info for some legacy # reason... if attachment_record['attach_mode'] == 'rw': count += 1 except exception.VolumeAttachmentNotFound: # attachments are read/write by default so count it count += 1 else: count = len(attachments) return count @check_instance_lock @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED, vm_states.RESIZED]) def swap_volume(self, context, instance, old_volume, new_volume): """Swap volume attached to an instance.""" # The caller likely got the instance from volume['attachments'] # in the first place, but let's sanity check. if not old_volume.get('attachments', {}).get(instance.uuid): msg = _("Old volume is attached to a different instance.") raise exception.InvalidVolume(reason=msg) if new_volume['attach_status'] == 'attached': msg = _("New volume must be detached in order to swap.") raise exception.InvalidVolume(reason=msg) if int(new_volume['size']) < int(old_volume['size']): msg = _("New volume must be the same size or larger.") raise exception.InvalidVolume(reason=msg) self.volume_api.check_availability_zone(context, new_volume, instance=instance) try: self.volume_api.begin_detaching(context, old_volume['id']) except exception.InvalidInput as exc: raise exception.InvalidVolume(reason=exc.format_message()) # Disallow swapping from multiattach volumes that have more than one # read/write attachment. We know the old_volume has at least one # attachment since it's attached to this server. The new_volume # can't have any attachments because of the attach_status check above. # We do this count after calling "begin_detaching" to lock against # concurrent attachments being made while we're counting. try: if self._count_attachments_for_swap(context, old_volume) > 1: raise exception.MultiattachSwapVolumeNotSupported() except Exception: # This is generic to handle failures while counting # We need to reset the detaching status before raising. with excutils.save_and_reraise_exception(): self.volume_api.roll_detaching(context, old_volume['id']) # Get the BDM for the attached (old) volume so we can tell if it was # attached with the new-style Cinder 3.44 API. bdm = objects.BlockDeviceMapping.get_by_volume_and_instance( context, old_volume['id'], instance.uuid) new_attachment_id = None if bdm.attachment_id is None: # This is an old-style attachment so reserve the new volume before # we cast to the compute host. self.volume_api.reserve_volume(context, new_volume['id']) else: try: self._check_volume_already_attached( context, instance, new_volume) except exception.InvalidVolume: with excutils.save_and_reraise_exception(): self.volume_api.roll_detaching(context, old_volume['id']) # This is a new-style attachment so for the volume that we are # going to swap to, create a new volume attachment. new_attachment_id = self.volume_api.attachment_create( context, new_volume['id'], instance.uuid)['id'] self._record_action_start( context, instance, instance_actions.SWAP_VOLUME) try: self.compute_rpcapi.swap_volume( context, instance=instance, old_volume_id=old_volume['id'], new_volume_id=new_volume['id'], new_attachment_id=new_attachment_id) except Exception: with excutils.save_and_reraise_exception(): self.volume_api.roll_detaching(context, old_volume['id']) if new_attachment_id is None: self.volume_api.unreserve_volume(context, new_volume['id']) else: self.volume_api.attachment_delete( context, new_attachment_id) def ensure_compute_version_for_resource_request( self, context, instance, port ): """Checks that the compute service version is new enough for the resource request of the port. """ if self.network_api.has_extended_resource_request_extension( context ): # TODO(gibi): Remove this check in Y where we can be sure that # the compute is already upgraded to X. res_req = port.get(constants.RESOURCE_REQUEST) or {} groups = res_req.get('request_groups', []) if groups: svc = objects.Service.get_by_host_and_binary( context, instance.host, 'nova-compute') if svc.version < MIN_COMPUTE_INT_ATTACH_WITH_EXTENDED_RES_REQ: raise exception.ExtendedResourceRequestOldCompute() else: # NOTE(gibi): Checking if the requested port has resource request # as such ports are only supported if the compute service version # is >= 55. # TODO(gibi): Remove this check in X as there we can be sure # that all computes are new enough. if port.get(constants.RESOURCE_REQUEST): svc = objects.Service.get_by_host_and_binary( context, instance.host, 'nova-compute') if svc.version < 55: raise exception.AttachInterfaceWithQoSPolicyNotSupported( instance_uuid=instance.uuid) @check_instance_lock @reject_vdpa_instances( instance_actions.ATTACH_INTERFACE, until=MIN_COMPUTE_VDPA_ATTACH_DETACH ) @check_instance_state( vm_state=[ vm_states.ACTIVE, vm_states.PAUSED, vm_states.STOPPED ], task_state=[None] ) def attach_interface(self, context, instance, network_id, port_id, requested_ip, tag=None): """Use hotplug to add an network adapter to an instance.""" self._record_action_start( context, instance, instance_actions.ATTACH_INTERFACE) if port_id: # We need to query the port with admin context as # ensure_compute_version_for_resource_request depends on the # port.resource_request field which only returned for admins port = self.network_api.show_port( context.elevated(), port_id)['port'] if port.get('binding:vnic_type', 'normal') in ( network_model.VNIC_TYPE_ACCELERATOR_DIRECT, network_model.VNIC_TYPE_ACCELERATOR_DIRECT_PHYSICAL): raise exception.ForbiddenPortsWithAccelerator() if port.get('binding:vnic_type', 'normal') == network_model.VNIC_TYPE_REMOTE_MANAGED: self._check_vnic_remote_managed_min_version(context) self.ensure_compute_version_for_resource_request( context, instance, port) return self.compute_rpcapi.attach_interface(context, instance=instance, network_id=network_id, port_id=port_id, requested_ip=requested_ip, tag=tag) @check_instance_lock @reject_vdpa_instances( instance_actions.DETACH_INTERFACE, until=MIN_COMPUTE_VDPA_ATTACH_DETACH ) @check_instance_state( vm_state=[ vm_states.ACTIVE, vm_states.PAUSED, vm_states.STOPPED ], task_state=[None] ) def detach_interface(self, context, instance, port_id): """Detach an network adapter from an instance.""" for vif in instance.get_network_info(): if vif['id'] == port_id: if vif['vnic_type'] in ( network_model.VNIC_TYPE_ACCELERATOR_DIRECT, network_model.VNIC_TYPE_ACCELERATOR_DIRECT_PHYSICAL): raise exception.ForbiddenPortsWithAccelerator() break self._record_action_start( context, instance, instance_actions.DETACH_INTERFACE) self.compute_rpcapi.detach_interface(context, instance=instance, port_id=port_id) def get_instance_metadata(self, context, instance): """Get all metadata associated with an instance.""" return main_db_api.instance_metadata_get(context, instance.uuid) @check_instance_lock @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED, vm_states.SUSPENDED, vm_states.STOPPED], task_state=None) def delete_instance_metadata(self, context, instance, key): """Delete the given metadata item from an instance.""" instance.delete_metadata_key(key) @check_instance_lock @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED, vm_states.SUSPENDED, vm_states.STOPPED], task_state=None) def update_instance_metadata(self, context, instance, metadata, delete=False): """Updates or creates instance metadata. If delete is True, metadata items that are not specified in the `metadata` argument will be deleted. """ if delete: _metadata = metadata else: _metadata = dict(instance.metadata) _metadata.update(metadata) self._check_metadata_properties_quota(context, _metadata) instance.metadata = _metadata instance.save() return _metadata @block_shares_not_supported() @block_extended_resource_request @block_port_accelerators() @reject_vdpa_instances( instance_actions.LIVE_MIGRATION, until=MIN_COMPUTE_VDPA_HOTPLUG_LIVE_MIGRATION ) @block_accelerators() @reject_vtpm_instances(instance_actions.LIVE_MIGRATION) @reject_sev_instances(instance_actions.LIVE_MIGRATION) @check_instance_lock @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED]) def live_migrate(self, context, instance, block_migration, disk_over_commit, host_name, force=None, async_=False): """Migrate a server lively to a new host.""" LOG.debug("Going to try to live migrate instance to %s", host_name or "another host", instance=instance) if host_name: # Validate the specified host before changing the instance task # state. nodes = objects.ComputeNodeList.get_all_by_host(context, host_name) request_spec = objects.RequestSpec.get_by_instance_uuid( context, instance.uuid) instance.task_state = task_states.MIGRATING instance.save(expected_task_state=[None]) self._record_action_start(context, instance, instance_actions.LIVE_MIGRATION) # NOTE(sbauza): Force is a boolean by the new related API version if force is False and host_name: # Unset the host to make sure we call the scheduler # from the conductor LiveMigrationTask. Yes this is tightly-coupled # to behavior in conductor and not great. host_name = None # FIXME(sbauza): Since only Ironic driver uses more than one # compute per service but doesn't support live migrations, # let's provide the first one. target = nodes[0] destination = objects.Destination( host=target.host, node=target.hypervisor_hostname ) # This is essentially a hint to the scheduler to only consider # the specified host but still run it through the filters. request_spec.requested_destination = destination try: self.compute_task_api.live_migrate_instance(context, instance, host_name, block_migration=block_migration, disk_over_commit=disk_over_commit, request_spec=request_spec, async_=async_) except oslo_exceptions.MessagingTimeout as messaging_timeout: with excutils.save_and_reraise_exception(): # NOTE(pkoniszewski): It is possible that MessagingTimeout # occurs, but LM will still be in progress, so write # instance fault to database compute_utils.add_instance_fault_from_exc(context, instance, messaging_timeout) @check_instance_lock @check_instance_state(vm_state=[vm_states.ACTIVE], task_state=[task_states.MIGRATING]) def live_migrate_force_complete(self, context, instance, migration_id): """Force live migration to complete. :param context: Security context :param instance: The instance that is being migrated :param migration_id: ID of ongoing migration """ LOG.debug("Going to try to force live migration to complete", instance=instance) # NOTE(pkoniszewski): Get migration object to check if there is ongoing # live migration for particular instance. Also pass migration id to # compute to double check and avoid possible race condition. migration = objects.Migration.get_by_id_and_instance( context, migration_id, instance.uuid) if migration.status != 'running': raise exception.InvalidMigrationState(migration_id=migration_id, instance_uuid=instance.uuid, state=migration.status, method='force complete') self._record_action_start( context, instance, instance_actions.LIVE_MIGRATION_FORCE_COMPLETE) self.compute_rpcapi.live_migration_force_complete( context, instance, migration) @check_instance_lock @check_instance_state(task_state=[task_states.MIGRATING]) def live_migrate_abort(self, context, instance, migration_id, support_abort_in_queue=False): """Abort an in-progress live migration. :param context: Security context :param instance: The instance that is being migrated :param migration_id: ID of in-progress live migration :param support_abort_in_queue: Flag indicating whether we can support abort migrations in "queued" or "preparing" status. """ migration = objects.Migration.get_by_id_and_instance(context, migration_id, instance.uuid) LOG.debug("Going to cancel live migration %s", migration.id, instance=instance) # If the microversion does not support abort migration in queue, # we are only be able to abort migrations with `running` status; # if it is supported, we are able to also abort migrations in # `queued` and `preparing` status. allowed_states = ['running'] queued_states = ['queued', 'preparing'] if support_abort_in_queue: # The user requested a microversion that supports aborting a queued # or preparing live migration. But we need to check that the # compute service hosting the instance is new enough to support # aborting a queued/preparing live migration, so we check the # service version here. allowed_states.extend(queued_states) if migration.status not in allowed_states: raise exception.InvalidMigrationState(migration_id=migration_id, instance_uuid=instance.uuid, state=migration.status, method='abort live migration') self._record_action_start(context, instance, instance_actions.LIVE_MIGRATION_CANCEL) self.compute_rpcapi.live_migration_abort(context, instance, migration.id) @block_shares_not_supported() @block_extended_resource_request @block_port_accelerators() @reject_vtpm_instances(instance_actions.EVACUATE) @block_accelerators(until_service=SUPPORT_ACCELERATOR_SERVICE_FOR_REBUILD) @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED, vm_states.ERROR], task_state=None) def evacuate(self, context, instance, host, on_shared_storage, admin_password=None, force=None, target_state=None): """Running evacuate to target host. Checking vm compute host state, if the host not in expected_state, raising an exception. :param instance: The instance to evacuate :param host: Target host. if not set, the scheduler will pick up one :param on_shared_storage: True if instance files on shared storage :param admin_password: password to set on rebuilt instance :param force: Force the evacuation to the specific host target :param target_state: Set a target state for the evacuated instance """ LOG.debug('vm evacuation scheduled', instance=instance) inst_host = instance.host service = objects.Service.get_by_compute_host(context, inst_host) if self.servicegroup_api.service_is_up(service): LOG.error('Instance compute service state on %s ' 'expected to be down, but it was up.', inst_host) raise exception.ComputeServiceInUse(host=inst_host) request_spec = objects.RequestSpec.get_by_instance_uuid( context, instance.uuid) instance.task_state = task_states.REBUILDING instance.save(expected_task_state=None) self._record_action_start(context, instance, instance_actions.EVACUATE) # NOTE(danms): Create this as a tombstone for the source compute # to find and cleanup. No need to pass it anywhere else. migration = objects.Migration( context, source_compute=instance.host, source_node=instance.node, instance_uuid=instance.uuid, status='accepted', migration_type=fields_obj.MigrationType.EVACUATION) if host: migration.dest_compute = host migration.create() compute_utils.notify_about_instance_usage( self.notifier, context, instance, "evacuate") compute_utils.notify_about_instance_action( context, instance, CONF.host, action=fields_obj.NotificationAction.EVACUATE, source=fields_obj.NotificationSource.API) # NOTE(sbauza): Force is a boolean by the new related API version # TODO(stephenfin): Any reason we can't use 'not force' here to handle # the pre-v2.29 API microversion, which wouldn't set force if force is False and host: nodes = objects.ComputeNodeList.get_all_by_host(context, host) # NOTE(sbauza): Unset the host to make sure we call the scheduler host = None # FIXME(sbauza): Since only Ironic driver uses more than one # compute per service but doesn't support evacuations, # let's provide the first one. target = nodes[0] destination = objects.Destination( host=target.host, node=target.hypervisor_hostname ) request_spec.requested_destination = destination return self.compute_task_api.rebuild_instance(context, instance=instance, new_pass=admin_password, injected_files=None, image_ref=None, orig_image_ref=None, orig_sys_metadata=None, bdms=None, recreate=True, on_shared_storage=on_shared_storage, host=host, request_spec=request_spec, target_state=target_state) def get_migrations(self, context, filters): """Get all migrations for the given filters.""" load_cells() migrations = [] for cell in CELLS: if cell.uuid == objects.CellMapping.CELL0_UUID: continue with nova_context.target_cell(context, cell) as cctxt: migrations.extend(objects.MigrationList.get_by_filters( cctxt, filters).objects) return objects.MigrationList(objects=migrations) def get_migrations_sorted(self, context, filters, sort_dirs=None, sort_keys=None, limit=None, marker=None): """Get all migrations for the given parameters.""" mig_objs = migration_list.get_migration_objects_sorted( context, filters, limit, marker, sort_keys, sort_dirs) # Due to cross-cell resize, we could have duplicate migration records # while the instance is in VERIFY_RESIZE state in the destination cell # but the original migration record still exists in the source cell. # Filter out duplicate migration records here based on which record # is newer (last updated). def _get_newer_obj(obj1, obj2): # created_at will always be set. created_at1 = obj1.created_at created_at2 = obj2.created_at # updated_at might be None updated_at1 = obj1.updated_at updated_at2 = obj2.updated_at # If both have updated_at, compare using that field. if updated_at1 and updated_at2: if updated_at1 > updated_at2: return obj1 return obj2 # Compare created_at versus updated_at. if updated_at1: if updated_at1 > created_at2: return obj1 return obj2 if updated_at2: if updated_at2 > created_at1: return obj2 return obj1 # Compare created_at only. if created_at1 > created_at2: return obj1 return obj2 # TODO(mriedem): This could be easier if we leveraged the "hidden" # field on the Migration record and then just did like # _get_unique_filter_method in the get_all() method for instances. migrations_by_uuid = collections.OrderedDict() # maintain sort order for migration in mig_objs: if migration.uuid not in migrations_by_uuid: migrations_by_uuid[migration.uuid] = migration else: # We have a collision, keep the newer record. # Note that using updated_at could be wrong if changes-since or # changes-before filters are being used but we have the same # issue in _get_unique_filter_method for instances. doppelganger = migrations_by_uuid[migration.uuid] newer = _get_newer_obj(doppelganger, migration) migrations_by_uuid[migration.uuid] = newer return objects.MigrationList(objects=list(migrations_by_uuid.values())) def get_migrations_in_progress_by_instance(self, context, instance_uuid, migration_type=None): """Get all migrations of an instance in progress.""" return objects.MigrationList.get_in_progress_by_instance( context, instance_uuid, migration_type) def get_migration_by_id_and_instance(self, context, migration_id, instance_uuid): """Get the migration of an instance by id.""" return objects.Migration.get_by_id_and_instance( context, migration_id, instance_uuid) def _get_bdm_by_volume_id(self, context, volume_id, expected_attrs=None): """Retrieve a BDM without knowing its cell. .. note:: The context will be targeted to the cell in which the BDM is found, if any. :param context: The API request context. :param volume_id: The ID of the volume. :param expected_attrs: list of any additional attributes that should be joined when the BDM is loaded from the database. :raises: nova.exception.VolumeBDMNotFound if not found in any cell """ load_cells() for cell in CELLS: nova_context.set_target_cell(context, cell) try: return objects.BlockDeviceMapping.get_by_volume( context, volume_id, expected_attrs=expected_attrs) except exception.NotFound: continue raise exception.VolumeBDMNotFound(volume_id=volume_id) def volume_snapshot_create(self, context, volume_id, create_info): bdm = self._get_bdm_by_volume_id( context, volume_id, expected_attrs=['instance']) # We allow creating the snapshot in any vm_state as long as there is # no task being performed on the instance and it has a host. @check_instance_host() @check_instance_state(vm_state=None) def do_volume_snapshot_create(self, context, instance): self.compute_rpcapi.volume_snapshot_create(context, instance, volume_id, create_info) snapshot = { 'snapshot': { 'id': create_info.get('id'), 'volumeId': volume_id } } return snapshot return do_volume_snapshot_create(self, context, bdm.instance) def volume_snapshot_delete(self, context, volume_id, snapshot_id, delete_info): bdm = self._get_bdm_by_volume_id( context, volume_id, expected_attrs=['instance']) @check_instance_host() @check_instance_state(vm_state=None) def do_volume_snapshot_delete(self, context, instance): # FIXME(lyarwood): Avoid bug #1919487 by rejecting the request # to delete an intermediary volume snapshot offline as this isn't # currently implemented within the libvirt driver and will fail. # This should be fixed in a future release but as it is essentially # a new feature wouldn't be something we could backport. As such # reject the request here so n-api can respond correctly to c-vol. if (delete_info.get('merge_target_file') is not None and instance.vm_state != vm_states.ACTIVE ): raise exception.InstanceInvalidState( attr='vm_state', instance_uuid=instance.uuid, state=instance.vm_state, method='volume_snapshot_delete' ) self.compute_rpcapi.volume_snapshot_delete(context, instance, volume_id, snapshot_id, delete_info) do_volume_snapshot_delete(self, context, bdm.instance) def external_instance_event(self, api_context, instances, events): # NOTE(danms): The external API consumer just provides events, # but doesn't know where they go. We need to collate lists # by the host the affected instance is on and dispatch them # according to host instances_by_host = collections.defaultdict(list) events_by_host = collections.defaultdict(list) hosts_by_instance = collections.defaultdict(list) cell_contexts_by_host = {} for instance in instances: # instance._context is used here since it's already targeted to # the cell that the instance lives in, and we need to use that # cell context to lookup any migrations associated to the instance. hosts, cross_cell_move = self._get_relevant_hosts( instance._context, instance) for host in hosts: # NOTE(danms): All instances on a host must have the same # mapping, so just use that if host not in cell_contexts_by_host: # NOTE(mriedem): If the instance is being migrated across # cells then we have to get the host mapping to determine # which cell a given host is in. if cross_cell_move: hm = objects.HostMapping.get_by_host(api_context, host) ctxt = nova_context.get_admin_context() nova_context.set_target_cell(ctxt, hm.cell_mapping) cell_contexts_by_host[host] = ctxt else: # The instance is not migrating across cells so just # use the cell-targeted context already in the # instance since the host has to be in that same cell. cell_contexts_by_host[host] = instance._context instances_by_host[host].append(instance) hosts_by_instance[instance.uuid].append(host) for event in events: if event.name == 'volume-extended': # Volume extend is a user-initiated operation starting in the # Block Storage service API. We record an instance action so # the user can monitor the operation to completion. host = hosts_by_instance[event.instance_uuid][0] cell_context = cell_contexts_by_host[host] objects.InstanceAction.action_start( cell_context, event.instance_uuid, instance_actions.EXTEND_VOLUME, want_result=False) elif event.name == 'power-update': host = hosts_by_instance[event.instance_uuid][0] cell_context = cell_contexts_by_host[host] if event.tag == external_event_obj.POWER_ON: inst_action = instance_actions.START elif event.tag == external_event_obj.POWER_OFF: inst_action = instance_actions.STOP else: LOG.warning("Invalid power state %s. Cannot process " "the event %s. Skipping it.", event.tag, event) continue objects.InstanceAction.action_start( cell_context, event.instance_uuid, inst_action, want_result=False) for host in hosts_by_instance[event.instance_uuid]: events_by_host[host].append(event) for host in instances_by_host: cell_context = cell_contexts_by_host[host] # TODO(salv-orlando): Handle exceptions raised by the rpc api layer # in order to ensure that a failure in processing events on a host # will not prevent processing events on other hosts self.compute_rpcapi.external_instance_event( cell_context, instances_by_host[host], events_by_host[host], host=host) def _get_relevant_hosts(self, context, instance): """Get the relevant hosts for an external server event on an instance. :param context: nova auth request context targeted at the same cell that the instance lives in :param instance: Instance object which is the target of an external server event :returns: 2-item tuple of: - set of at least one host (the host where the instance lives); if the instance is being migrated the source and dest compute hostnames are in the returned set - boolean indicating if the instance is being migrated across cells """ hosts = set() hosts.add(instance.host) cross_cell_move = False if instance.migration_context is not None: migration_id = instance.migration_context.migration_id migration = objects.Migration.get_by_id(context, migration_id) cross_cell_move = migration.cross_cell_move hosts.add(migration.dest_compute) hosts.add(migration.source_compute) cells_msg = ( 'across cells' if cross_cell_move else 'within the same cell') LOG.debug('Instance %(instance)s is migrating %(cells_msg)s, ' 'copying events to all relevant hosts: ' '%(hosts)s', {'cells_msg': cells_msg, 'instance': instance.uuid, 'hosts': hosts}) return hosts, cross_cell_move def get_instance_host_status(self, instance): if instance.host: try: service = [service for service in instance.services if service.binary == 'nova-compute'][0] if service.forced_down: host_status = fields_obj.HostStatus.DOWN elif service.disabled: host_status = fields_obj.HostStatus.MAINTENANCE else: alive = self.servicegroup_api.service_is_up(service) host_status = ((alive and fields_obj.HostStatus.UP) or fields_obj.HostStatus.UNKNOWN) except IndexError: host_status = fields_obj.HostStatus.NONE else: host_status = fields_obj.HostStatus.NONE return host_status def get_instances_host_statuses(self, instance_list): host_status_dict = dict() host_statuses = dict() for instance in instance_list: if instance.host: if instance.host not in host_status_dict: host_status = self.get_instance_host_status(instance) host_status_dict[instance.host] = host_status else: host_status = host_status_dict[instance.host] else: host_status = fields_obj.HostStatus.NONE host_statuses[instance.uuid] = host_status return host_statuses def allow_share(self, context, instance, share_mapping): self._record_action_start( context, instance, instance_actions.ATTACH_SHARE) self.compute_rpcapi.allow_share( context, instance, share_mapping) def deny_share(self, context, instance, share_mapping): self._record_action_start( context, instance, instance_actions.DETACH_SHARE) self.compute_rpcapi.deny_share( context, instance, share_mapping) def target_host_cell(fn): """Target a host-based function to a cell. Expects to wrap a function of signature: func(self, context, host, ...) """ @functools.wraps(fn) def targeted(self, context, host, *args, **kwargs): mapping = objects.HostMapping.get_by_host(context, host) nova_context.set_target_cell(context, mapping.cell_mapping) return fn(self, context, host, *args, **kwargs) return targeted def _get_service_in_cell_by_host(context, host_name): # validates the host; ComputeHostNotFound is raised if invalid try: mapping = objects.HostMapping.get_by_host(context, host_name) nova_context.set_target_cell(context, mapping.cell_mapping) service = objects.Service.get_by_compute_host(context, host_name) except exception.HostMappingNotFound: try: # NOTE(danms): This targets our cell service = _find_service_in_cell(context, service_host=host_name) except exception.NotFound: raise exception.ComputeHostNotFound(host=host_name) return service def _find_service_in_cell(context, service_id=None, service_host=None): """Find a service by id or hostname by searching all cells. If one matching service is found, return it. If none or multiple are found, raise an exception. :param context: A context.RequestContext :param service_id: If not none, the DB ID of the service to find :param service_host: If not None, the hostname of the service to find :returns: An objects.Service :raises: ServiceNotUnique if multiple matching IDs are found :raises: NotFound if no matches are found :raises: NovaException if called with neither search option """ load_cells() service = None found_in_cell = None is_uuid = False if service_id is not None: is_uuid = uuidutils.is_uuid_like(service_id) if is_uuid: lookup_fn = lambda c: objects.Service.get_by_uuid(c, service_id) else: lookup_fn = lambda c: objects.Service.get_by_id(c, service_id) elif service_host is not None: lookup_fn = lambda c: ( objects.Service.get_by_compute_host(c, service_host)) else: LOG.exception('_find_service_in_cell called with no search parameters') # This is intentionally cryptic so we don't leak implementation details # out of the API. raise exception.NovaException() for cell in CELLS: # NOTE(danms): Services can be in cell0, so don't skip it here try: with nova_context.target_cell(context, cell) as cctxt: cell_service = lookup_fn(cctxt) except exception.NotFound: # NOTE(danms): Keep looking in other cells continue if service and cell_service: raise exception.ServiceNotUnique() service = cell_service found_in_cell = cell if service and is_uuid: break if service: # NOTE(danms): Set the cell on the context so it remains # when we return to our caller nova_context.set_target_cell(context, found_in_cell) return service else: raise exception.NotFound() class HostAPI: """Sub-set of the Compute Manager API for managing host operations.""" def __init__(self, rpcapi=None, servicegroup_api=None): self.rpcapi = rpcapi or compute_rpcapi.ComputeAPI() self.servicegroup_api = servicegroup_api or servicegroup.API() def _assert_host_exists(self, context, host_name, must_be_up=False): """Raise HostNotFound if compute host doesn't exist.""" service = objects.Service.get_by_compute_host(context, host_name) if not service: raise exception.HostNotFound(host=host_name) if must_be_up and not self.servicegroup_api.service_is_up(service): raise exception.ComputeServiceUnavailable(host=host_name) return service['host'] @wrap_exception() @target_host_cell def set_host_enabled(self, context, host_name, enabled): """Sets the specified host's ability to accept new instances.""" host_name = self._assert_host_exists(context, host_name) payload = {'host_name': host_name, 'enabled': enabled} compute_utils.notify_about_host_update(context, 'set_enabled.start', payload) result = self.rpcapi.set_host_enabled(context, enabled=enabled, host=host_name) compute_utils.notify_about_host_update(context, 'set_enabled.end', payload) return result @target_host_cell def get_host_uptime(self, context, host_name): """Returns the result of calling "uptime" on the target host.""" host_name = self._assert_host_exists(context, host_name, must_be_up=True) return self.rpcapi.get_host_uptime(context, host=host_name) @wrap_exception() @target_host_cell def host_power_action(self, context, host_name, action): """Reboots, shuts down or powers up the host.""" host_name = self._assert_host_exists(context, host_name) payload = {'host_name': host_name, 'action': action} compute_utils.notify_about_host_update(context, 'power_action.start', payload) result = self.rpcapi.host_power_action(context, action=action, host=host_name) compute_utils.notify_about_host_update(context, 'power_action.end', payload) return result @wrap_exception() @target_host_cell def set_host_maintenance(self, context, host_name, mode): """Start/Stop host maintenance window. On start, it triggers guest VMs evacuation. """ host_name = self._assert_host_exists(context, host_name) payload = {'host_name': host_name, 'mode': mode} compute_utils.notify_about_host_update(context, 'set_maintenance.start', payload) result = self.rpcapi.host_maintenance_mode(context, host_param=host_name, mode=mode, host=host_name) compute_utils.notify_about_host_update(context, 'set_maintenance.end', payload) return result def _service_get_all_cells(self, context, disabled, set_zones, cell_down_support): services = [] service_dict = nova_context.scatter_gather_all_cells(context, objects.ServiceList.get_all, disabled, set_zones=set_zones) cell0_computes = [ x for x in service_dict.get(objects.CellMapping.CELL0_UUID, []) if x.binary == 'nova-compute'] for cn in cell0_computes: LOG.warning( 'Found compute service %(service)s in cell0; ' 'This should never happen!', {'service': cn.host}) for cell_uuid, cell_services in service_dict.items(): if not nova_context.is_cell_failure_sentinel(cell_services): services.extend(cell_services) elif cell_down_support: unavailable_services = objects.ServiceList() cid = [cm.id for cm in nova_context.CELLS if cm.uuid == cell_uuid] # We know cid[0] is in the list because we are using the # same list that scatter_gather_all_cells used hms = objects.HostMappingList.get_by_cell_id(context, cid[0]) for hm in hms: unavailable_services.objects.append(objects.Service( binary='nova-compute', host=hm.host)) LOG.warning("Cell %s is not responding and hence only " "partial results are available from this " "cell.", cell_uuid) services.extend(unavailable_services) else: LOG.warning("Cell %s is not responding and hence skipped " "from the results.", cell_uuid) return services def service_get_all(self, context, filters=None, set_zones=False, all_cells=False, cell_down_support=False): """Returns a list of services, optionally filtering the results. If specified, 'filters' should be a dictionary containing services attributes and matching values. Ie, to get a list of services for the 'compute' topic, use filters={'topic': 'compute'}. If all_cells=True, then scan all cells and merge the results. If cell_down_support=True then return minimal service records for cells that do not respond based on what we have in the host mappings. These will have only 'binary' and 'host' set. """ if filters is None: filters = {} disabled = filters.pop('disabled', None) if 'availability_zone' in filters: set_zones = True # NOTE(danms): Eventually this all_cells nonsense should go away # and we should always iterate over the cells. However, certain # callers need the legacy behavior for now. if all_cells: services = self._service_get_all_cells(context, disabled, set_zones, cell_down_support) else: services = objects.ServiceList.get_all(context, disabled, set_zones=set_zones) ret_services = [] for service in services: for key, val in filters.items(): if service[key] != val: break else: # All filters matched. ret_services.append(service) return ret_services def service_get_by_id(self, context, service_id): """Get service entry for the given service id or uuid.""" try: return _find_service_in_cell(context, service_id=service_id) except exception.NotFound: raise exception.ServiceNotFound(service_id=service_id) @target_host_cell def service_get_by_compute_host(self, context, host_name): """Get service entry for the given compute hostname.""" return objects.Service.get_by_compute_host(context, host_name) def _update_compute_provider_status(self, context, service): """Calls the compute service to sync the COMPUTE_STATUS_DISABLED trait. There are two cases where the API will not call the compute service: * The compute service is down. In this case the trait is synchronized when the compute service is restarted. * The compute service is old. In this case the trait is synchronized when the compute service is upgraded and restarted. :param context: nova auth RequestContext :param service: nova.objects.Service object which has been enabled or disabled (see ``service_update``). """ # Make sure the service is up so we can make the RPC call. if not self.servicegroup_api.service_is_up(service): LOG.info('Compute service on host %s is down. The ' 'COMPUTE_STATUS_DISABLED trait will be synchronized ' 'when the service is restarted.', service.host) return # Make sure the compute service is new enough for the trait sync # behavior. # TODO(mriedem): Remove this compat check in the U release. if service.version < MIN_COMPUTE_SYNC_COMPUTE_STATUS_DISABLED: LOG.info('Compute service on host %s is too old to sync the ' 'COMPUTE_STATUS_DISABLED trait in Placement. The ' 'trait will be synchronized when the service is ' 'upgraded and restarted.', service.host) return enabled = not service.disabled # Avoid leaking errors out of the API. try: LOG.debug('Calling the compute service on host %s to sync the ' 'COMPUTE_STATUS_DISABLED trait.', service.host) self.rpcapi.set_host_enabled(context, service.host, enabled) except Exception: LOG.exception('An error occurred while updating the ' 'COMPUTE_STATUS_DISABLED trait on compute node ' 'resource providers managed by host %s. The trait ' 'will be synchronized automatically by the compute ' 'service when the update_available_resource ' 'periodic task runs.', service.host) def service_update(self, context, service): """Performs the actual service update operation. If the "disabled" field is changed, potentially calls the compute service to sync the COMPUTE_STATUS_DISABLED trait on the compute node resource providers managed by this compute service. :param context: nova auth RequestContext :param service: nova.objects.Service object with changes already set on the object """ # Before persisting changes and resetting the changed fields on the # Service object, determine if the disabled field changed. update_placement = 'disabled' in service.obj_what_changed() # Persist the Service object changes to the database. service.save() # If the disabled field changed, potentially call the compute service # to sync the COMPUTE_STATUS_DISABLED trait. if update_placement: self._update_compute_provider_status(context, service) return service @target_host_cell def service_update_by_host_and_binary(self, context, host_name, binary, params_to_update): """Enable / Disable a service. Determines the cell that the service is in using the HostMapping. For compute services, this stops new builds and migrations going to the host. See also ``service_update``. :param context: nova auth RequestContext :param host_name: hostname of the service :param binary: service binary (really only supports "nova-compute") :param params_to_update: dict of changes to make to the Service object :raises: HostMappingNotFound if the host is not mapped to a cell :raises: HostBinaryNotFound if a services table record is not found with the given host_name and binary """ # TODO(mriedem): Service.get_by_args is deprecated; we should use # get_by_compute_host here (remember to update the "raises" docstring). service = objects.Service.get_by_args(context, host_name, binary) service.update(params_to_update) return self.service_update(context, service) @target_host_cell def instance_get_all_by_host(self, context, host_name): """Return all instances on the given host.""" return objects.InstanceList.get_by_host(context, host_name) def task_log_get_all(self, context, task_name, period_beginning, period_ending, host=None, state=None): """Return the task logs within a given range, optionally filtering by host and/or state. """ return main_db_api.task_log_get_all( context, task_name, period_beginning, period_ending, host=host, state=state) def compute_node_get(self, context, compute_id): """Return compute node entry for particular integer ID or UUID.""" load_cells() # NOTE(danms): Unfortunately this API exposes database identifiers # which means we really can't do something efficient here is_uuid = uuidutils.is_uuid_like(compute_id) for cell in CELLS: if cell.uuid == objects.CellMapping.CELL0_UUID: continue with nova_context.target_cell(context, cell) as cctxt: try: if is_uuid: return objects.ComputeNode.get_by_uuid(cctxt, compute_id) return objects.ComputeNode.get_by_id(cctxt, int(compute_id)) except exception.ComputeHostNotFound: # NOTE(danms): Keep looking in other cells continue raise exception.ComputeHostNotFound(host=compute_id) def compute_node_get_all(self, context, limit=None, marker=None): load_cells() computes = [] uuid_marker = marker and uuidutils.is_uuid_like(marker) for cell in CELLS: if cell.uuid == objects.CellMapping.CELL0_UUID: continue with nova_context.target_cell(context, cell) as cctxt: # If we have a marker and it's a uuid, see if the compute node # is in this cell. if marker and uuid_marker: try: compute_marker = objects.ComputeNode.get_by_uuid( cctxt, marker) # we found the marker compute node, so use it's id # for the actual marker for paging in this cell's db marker = compute_marker.id except exception.ComputeHostNotFound: # The marker node isn't in this cell so keep looking. continue try: cell_computes = objects.ComputeNodeList.get_by_pagination( cctxt, limit=limit, marker=marker) except exception.MarkerNotFound: # NOTE(danms): Keep looking through cells continue computes.extend(cell_computes) # NOTE(danms): We must have found the marker, so continue on # without one marker = None if limit: limit -= len(cell_computes) if limit <= 0: break if marker is not None and len(computes) == 0: # NOTE(danms): If we did not find the marker in any cell, # mimic the db_api behavior here. raise exception.MarkerNotFound(marker=marker) return objects.ComputeNodeList(objects=computes) def compute_node_search_by_hypervisor(self, context, hypervisor_match): load_cells() computes = [] for cell in CELLS: if cell.uuid == objects.CellMapping.CELL0_UUID: continue with nova_context.target_cell(context, cell) as cctxt: cell_computes = objects.ComputeNodeList.get_by_hypervisor( cctxt, hypervisor_match) computes.extend(cell_computes) return objects.ComputeNodeList(objects=computes) def compute_node_statistics(self, context): load_cells() cell_stats = [] for cell in CELLS: if cell.uuid == objects.CellMapping.CELL0_UUID: continue with nova_context.target_cell(context, cell) as cctxt: cell_stats.append(main_db_api.compute_node_statistics(cctxt)) if cell_stats: keys = cell_stats[0].keys() return {k: sum(stats[k] for stats in cell_stats) for k in keys} else: return {} class InstanceActionAPI: """Sub-set of the Compute Manager API for managing instance actions.""" def actions_get(self, context, instance, limit=None, marker=None, filters=None): return objects.InstanceActionList.get_by_instance_uuid( context, instance.uuid, limit, marker, filters) def action_get_by_request_id(self, context, instance, request_id): return objects.InstanceAction.get_by_request_id( context, instance.uuid, request_id) def action_events_get(self, context, instance, action_id): return objects.InstanceActionEventList.get_by_action( context, action_id) class AggregateAPI: """Sub-set of the Compute Manager API for managing host aggregates.""" def __init__(self): self.compute_rpcapi = compute_rpcapi.ComputeAPI() self.query_client = query.SchedulerQueryClient() @property def placement_client(self): return report.report_client_singleton() @wrap_exception() def create_aggregate(self, context, aggregate_name, availability_zone): """Creates the model for the aggregate.""" aggregate = objects.Aggregate(context=context) aggregate.name = aggregate_name if availability_zone: aggregate.metadata = {'availability_zone': availability_zone} aggregate.create() self.query_client.update_aggregates(context, [aggregate]) return aggregate def get_aggregate(self, context, aggregate_id): """Get an aggregate by id.""" return objects.Aggregate.get_by_id(context, aggregate_id) def get_aggregate_list(self, context): """Get all the aggregates.""" return objects.AggregateList.get_all(context) def get_aggregates_by_host(self, context, compute_host): """Get all the aggregates where the given host is presented.""" return objects.AggregateList.get_by_host(context, compute_host) @wrap_exception() def update_aggregate(self, context, aggregate_id, values): """Update the properties of an aggregate.""" aggregate = objects.Aggregate.get_by_id(context, aggregate_id) if 'name' in values: aggregate.name = values.pop('name') aggregate.save() self.is_safe_to_update_az(context, values, aggregate=aggregate, action_name=AGGREGATE_ACTION_UPDATE, check_no_instances_in_az=True) if values: aggregate.update_metadata(values) aggregate.updated_at = timeutils.utcnow() self.query_client.update_aggregates(context, [aggregate]) # If updated values include availability_zones, then the cache # which stored availability_zones and host need to be reset if values.get('availability_zone'): availability_zones.reset_cache() return aggregate @wrap_exception() def update_aggregate_metadata(self, context, aggregate_id, metadata): """Updates the aggregate metadata.""" aggregate = objects.Aggregate.get_by_id(context, aggregate_id) self.is_safe_to_update_az(context, metadata, aggregate=aggregate, action_name=AGGREGATE_ACTION_UPDATE_META, check_no_instances_in_az=True) aggregate.update_metadata(metadata) self.query_client.update_aggregates(context, [aggregate]) # If updated metadata include availability_zones, then the cache # which stored availability_zones and host need to be reset if metadata and metadata.get('availability_zone'): availability_zones.reset_cache() aggregate.updated_at = timeutils.utcnow() return aggregate @wrap_exception() def delete_aggregate(self, context, aggregate_id): """Deletes the aggregate.""" aggregate_payload = {'aggregate_id': aggregate_id} compute_utils.notify_about_aggregate_update(context, "delete.start", aggregate_payload) aggregate = objects.Aggregate.get_by_id(context, aggregate_id) compute_utils.notify_about_aggregate_action( context=context, aggregate=aggregate, action=fields_obj.NotificationAction.DELETE, phase=fields_obj.NotificationPhase.START) if len(aggregate.hosts) > 0: msg = _("Host aggregate is not empty") raise exception.InvalidAggregateActionDelete( aggregate_id=aggregate_id, reason=msg) aggregate.destroy() self.query_client.delete_aggregate(context, aggregate) compute_utils.notify_about_aggregate_update(context, "delete.end", aggregate_payload) compute_utils.notify_about_aggregate_action( context=context, aggregate=aggregate, action=fields_obj.NotificationAction.DELETE, phase=fields_obj.NotificationPhase.END) def is_safe_to_update_az(self, context, metadata, aggregate, hosts=None, action_name=AGGREGATE_ACTION_ADD, check_no_instances_in_az=False): """Determine if updates alter an aggregate's availability zone. :param context: local context :param metadata: Target metadata for updating aggregate :param aggregate: Aggregate to update :param hosts: Hosts to check. If None, aggregate.hosts is used :type hosts: list :param action_name: Calling method for logging purposes :param check_no_instances_in_az: if True, it checks there is no instances on any hosts of the aggregate """ if 'availability_zone' in metadata: if not metadata['availability_zone']: msg = _("Aggregate %s does not support empty named " "availability zone") % aggregate.name self._raise_invalid_aggregate_exc(action_name, aggregate.id, msg) _hosts = hosts or aggregate.hosts host_aggregates = objects.AggregateList.get_by_metadata_key( context, 'availability_zone', hosts=_hosts) conflicting_azs = [ agg.availability_zone for agg in host_aggregates if agg.availability_zone != metadata['availability_zone'] and agg.id != aggregate.id] if conflicting_azs: msg = _("One or more hosts already in availability zone(s) " "%s") % conflicting_azs self._raise_invalid_aggregate_exc(action_name, aggregate.id, msg) same_az_name = (aggregate.availability_zone == metadata['availability_zone']) if check_no_instances_in_az and not same_az_name: instance_count_by_cell = ( nova_context.scatter_gather_skip_cell0( context, objects.InstanceList.get_count_by_hosts, _hosts)) if any(cnt for cnt in instance_count_by_cell.values()): msg = _("One or more hosts contain instances in this zone") self._raise_invalid_aggregate_exc( action_name, aggregate.id, msg) def _raise_invalid_aggregate_exc(self, action_name, aggregate_id, reason): if action_name == AGGREGATE_ACTION_ADD: raise exception.InvalidAggregateActionAdd( aggregate_id=aggregate_id, reason=reason) elif action_name == AGGREGATE_ACTION_UPDATE: raise exception.InvalidAggregateActionUpdate( aggregate_id=aggregate_id, reason=reason) elif action_name == AGGREGATE_ACTION_UPDATE_META: raise exception.InvalidAggregateActionUpdateMeta( aggregate_id=aggregate_id, reason=reason) elif action_name == AGGREGATE_ACTION_DELETE: raise exception.InvalidAggregateActionDelete( aggregate_id=aggregate_id, reason=reason) raise exception.NovaException( _("Unexpected aggregate action %s") % action_name) def _update_az_cache_for_host(self, context, host_name, aggregate_meta): # Update the availability_zone cache to avoid getting wrong # availability_zone in cache retention time when add/remove # host to/from aggregate. if aggregate_meta and aggregate_meta.get('availability_zone'): availability_zones.update_host_availability_zone_cache(context, host_name) def ensure_no_instances_need_to_move_az_when_host_added( self, context, aggregate, host_name ): instances = objects.InstanceList.get_by_host(context, host_name) if not instances: # if no instance then nothing moves return new_az = aggregate.metadata.get('availability_zone') if not new_az: # if we add a host to an aggregate without AZ that cannot change # existing, effective AZ of the host. The host was either not # in any AZ and will not be in an AZ. Or the host was already in # an AZ but this aggregate does not challenge that as it has no AZ. return # let's gather what is the AZ of the instances on the host before the # host is added to the aggregate aggregates = objects.AggregateList.get_by_host(context, host_name) az = { agg.metadata['availability_zone'] for agg in aggregates if 'availability_zone' in agg.metadata} # There can only be one or zero AZ names. Two different AZ names case # is already rejected by is_safe_to_update_az() old_az = list(az)[0] if az else None # So here we know that the host is being added to a new AZ if it is # different from the existing, effective AZ of the host then the # instances on this host would need to move between AZs, that is not # supported. So reject it. if old_az != new_az: msg = _( "The host cannot be added to the aggregate as the " "availability zone of the host would change from '%s' to '%s' " "but the host already has %d instance(s). Changing the AZ of " "an existing instance is not supported by this action. Move " "the instances away from this host then try again. If you " "need to move the instances between AZs then you can use " "shelve_offload and unshelve to achieve this." ) % (old_az, new_az, len(instances)) self._raise_invalid_aggregate_exc( AGGREGATE_ACTION_ADD, aggregate.id, msg) @wrap_exception() def add_host_to_aggregate(self, context, aggregate_id, host_name): """Adds the host to an aggregate.""" aggregate_payload = {'aggregate_id': aggregate_id, 'host_name': host_name} compute_utils.notify_about_aggregate_update(context, "addhost.start", aggregate_payload) service = _get_service_in_cell_by_host(context, host_name) if service.host != host_name: # NOTE(danms): If we found a service but it is not an # exact match, we may have a case-insensitive backend # database (like mysql) which will end up with us # adding the host-aggregate mapping with a # non-matching hostname. raise exception.ComputeHostNotFound(host=host_name) aggregate = objects.Aggregate.get_by_id(context, aggregate_id) compute_utils.notify_about_aggregate_action( context=context, aggregate=aggregate, action=fields_obj.NotificationAction.ADD_HOST, phase=fields_obj.NotificationPhase.START) self.is_safe_to_update_az(context, aggregate.metadata, hosts=[host_name], aggregate=aggregate) self.ensure_no_instances_need_to_move_az_when_host_added( context, aggregate, host_name) aggregate.add_host(host_name) self.query_client.update_aggregates(context, [aggregate]) nodes = objects.ComputeNodeList.get_all_by_host(context, host_name) node_name = nodes[0].hypervisor_hostname try: self.placement_client.aggregate_add_host( context, aggregate.uuid, host_name=node_name) except (exception.ResourceProviderNotFound, exception.ResourceProviderAggregateRetrievalFailed, exception.ResourceProviderUpdateFailed, exception.ResourceProviderUpdateConflict) as err: # NOTE(jaypipes): We don't want a failure perform the mirroring # action in the placement service to be returned to the user (they # probably don't know anything about the placement service and # would just be confused). So, we just log a warning here, noting # that on the next run of nova-manage placement sync_aggregates # things will go back to normal LOG.warning("Failed to associate %s with a placement " "aggregate: %s. This may be corrected after running " "nova-manage placement sync_aggregates.", node_name, err) self._update_az_cache_for_host(context, host_name, aggregate.metadata) aggregate_payload.update({'name': aggregate.name}) compute_utils.notify_about_aggregate_update(context, "addhost.end", aggregate_payload) compute_utils.notify_about_aggregate_action( context=context, aggregate=aggregate, action=fields_obj.NotificationAction.ADD_HOST, phase=fields_obj.NotificationPhase.END) return aggregate def ensure_no_instances_need_to_move_az_when_host_removed( self, context, aggregate, host_name ): instances = objects.InstanceList.get_by_host(context, host_name) if not instances: # if no instance then nothing moves return current_az = aggregate.metadata.get('availability_zone') if not current_az: # if we remove a host from an aggregate without AZ that cannot # change existing, effective AZ of the host. If the host has an AZ # before the removal then that is due to a different aggregate # membership so that does not change here. If the host has no AZ # before the removal then it won't have either after the removal # from an aggregate without az return # let's gather what would be the AZ of the instances on the host # if we exclude the current aggregate. aggregates = objects.AggregateList.get_by_host(context, host_name) azs = { agg.metadata['availability_zone'] for agg in aggregates if agg.id != aggregate.id and 'availability_zone' in agg.metadata } # There can only be one or zero AZ names. Two different AZ names case # is already rejected by is_safe_to_update_az() new_az = list(azs)[0] if azs else None # So here we know that the host is being removed from an aggregate # that has an AZ. So if the new AZ without this aggregate is different # then, that would mean the instances on this host need to change AZ. # That is not supported. if current_az != new_az: msg = _( "The host cannot be removed from the aggregate as the " "availability zone of the host would change from '%s' to '%s' " "but the host already has %d instance(s). Changing the AZ of " "an existing instance is not supported by this action. Move " "the instances away from this host then try again. If you " "need to move the instances between AZs then you can use " "shelve_offload and unshelve to achieve this." ) % (current_az, new_az, len(instances)) self._raise_invalid_aggregate_exc( AGGREGATE_ACTION_DELETE, aggregate.id, msg) @wrap_exception() def remove_host_from_aggregate(self, context, aggregate_id, host_name): """Removes host from the aggregate.""" aggregate_payload = {'aggregate_id': aggregate_id, 'host_name': host_name} compute_utils.notify_about_aggregate_update(context, "removehost.start", aggregate_payload) _get_service_in_cell_by_host(context, host_name) aggregate = objects.Aggregate.get_by_id(context, aggregate_id) compute_utils.notify_about_aggregate_action( context=context, aggregate=aggregate, action=fields_obj.NotificationAction.REMOVE_HOST, phase=fields_obj.NotificationPhase.START) self.ensure_no_instances_need_to_move_az_when_host_removed( context, aggregate, host_name) # Remove the resource provider from the provider aggregate first before # we change anything on the nova side because if we did the nova stuff # first we can't re-attempt this from the compute API if cleaning up # placement fails. nodes = objects.ComputeNodeList.get_all_by_host(context, host_name) node_name = nodes[0].hypervisor_hostname try: # Anything else this raises is handled in the route handler as # either a 409 (ResourceProviderUpdateConflict) or 500. self.placement_client.aggregate_remove_host( context, aggregate.uuid, node_name) except exception.ResourceProviderNotFound as err: # If the resource provider is not found then it's likely not part # of the aggregate anymore anyway since provider aggregates are # not resources themselves with metadata like nova aggregates, they # are just a grouping concept around resource providers. Log and # continue. LOG.warning("Failed to remove association of %s with a placement " "aggregate: %s.", node_name, err) aggregate.delete_host(host_name) self.query_client.update_aggregates(context, [aggregate]) self._update_az_cache_for_host(context, host_name, aggregate.metadata) compute_utils.notify_about_aggregate_update(context, "removehost.end", aggregate_payload) compute_utils.notify_about_aggregate_action( context=context, aggregate=aggregate, action=fields_obj.NotificationAction.REMOVE_HOST, phase=fields_obj.NotificationPhase.END) return aggregate class KeypairAPI: """Subset of the Compute Manager API for managing key pairs.""" wrap_exception = functools.partial( exception_wrapper.wrap_exception, service='api', binary='nova-api') def __init__(self): self.notifier = rpc.get_notifier('api') def _notify(self, context, event_suffix, keypair_name): payload = { 'tenant_id': context.project_id, 'user_id': context.user_id, 'key_name': keypair_name, } self.notifier.info(context, 'keypair.%s' % event_suffix, payload) def _check_key_pair_quotas(self, context, user_id, key_name, key_type): try: objects.Quotas.check_deltas(context, {'key_pairs': 1}, user_id) local_limit.enforce_db_limit(context, local_limit.KEY_PAIRS, entity_scope=user_id, delta=1) except exception.KeypairLimitExceeded: raise except exception.OverQuota: raise exception.KeypairLimitExceeded() @wrap_exception() def import_key_pair(self, context, user_id, key_name, public_key, key_type=keypair_obj.KEYPAIR_TYPE_SSH): """Import a key pair using an existing public key.""" self._check_key_pair_quotas(context, user_id, key_name, key_type) self._notify(context, 'import.start', key_name) keypair = objects.KeyPair(context) keypair.user_id = user_id keypair.name = key_name keypair.type = key_type keypair.fingerprint = None keypair.public_key = public_key compute_utils.notify_about_keypair_action( context=context, keypair=keypair, action=fields_obj.NotificationAction.IMPORT, phase=fields_obj.NotificationPhase.START) fingerprint = self._generate_fingerprint(public_key, key_type) keypair.fingerprint = fingerprint keypair.create() compute_utils.notify_about_keypair_action( context=context, keypair=keypair, action=fields_obj.NotificationAction.IMPORT, phase=fields_obj.NotificationPhase.END) self._notify(context, 'import.end', key_name) return keypair @wrap_exception() def create_key_pair(self, context, user_id, key_name, key_type=keypair_obj.KEYPAIR_TYPE_SSH): """Create a new key pair.""" self._check_key_pair_quotas(context, user_id, key_name, key_type) keypair = objects.KeyPair(context) keypair.user_id = user_id keypair.name = key_name keypair.type = key_type keypair.fingerprint = None keypair.public_key = None self._notify(context, 'create.start', key_name) compute_utils.notify_about_keypair_action( context=context, keypair=keypair, action=fields_obj.NotificationAction.CREATE, phase=fields_obj.NotificationPhase.START) private_key, public_key, fingerprint = self._generate_key_pair( user_id, key_type) keypair.fingerprint = fingerprint keypair.public_key = public_key keypair.create() # NOTE(melwitt): We recheck the quota after creating the object to # prevent users from allocating more resources than their allowed quota # in the event of a race. This is configurable because it can be # expensive if strict quota limits are not required in a deployment. if CONF.quota.recheck_quota: try: objects.Quotas.check_deltas(context, {'key_pairs': 0}, user_id) # TODO(johngarbutt) do we really need this recheck? # The quota rechecking of limits is really just to protect # against denial of service attacks that aim to fill up the # database. Its usefulness could be debated. local_limit.enforce_db_limit(context, local_limit.KEY_PAIRS, entity_scope=user_id, delta=0) except exception.KeypairLimitExceeded: with excutils.save_and_reraise_exception(): keypair.destroy() except exception.OverQuota: keypair.destroy() raise exception.KeypairLimitExceeded() compute_utils.notify_about_keypair_action( context=context, keypair=keypair, action=fields_obj.NotificationAction.CREATE, phase=fields_obj.NotificationPhase.END) self._notify(context, 'create.end', key_name) return keypair, private_key def _generate_fingerprint(self, public_key, key_type): if key_type == keypair_obj.KEYPAIR_TYPE_SSH: return crypto.generate_fingerprint(public_key) elif key_type == keypair_obj.KEYPAIR_TYPE_X509: return crypto.generate_x509_fingerprint(public_key) def _generate_key_pair(self, user_id, key_type): if key_type == keypair_obj.KEYPAIR_TYPE_SSH: return crypto.generate_key_pair() elif key_type == keypair_obj.KEYPAIR_TYPE_X509: return crypto.generate_winrm_x509_cert(user_id) @wrap_exception() def delete_key_pair(self, context, user_id, key_name): """Delete a keypair by name.""" self._notify(context, 'delete.start', key_name) keypair = self.get_key_pair(context, user_id, key_name) compute_utils.notify_about_keypair_action( context=context, keypair=keypair, action=fields_obj.NotificationAction.DELETE, phase=fields_obj.NotificationPhase.START) objects.KeyPair.destroy_by_name(context, user_id, key_name) compute_utils.notify_about_keypair_action( context=context, keypair=keypair, action=fields_obj.NotificationAction.DELETE, phase=fields_obj.NotificationPhase.END) self._notify(context, 'delete.end', key_name) def get_key_pairs(self, context, user_id, limit=None, marker=None): """List key pairs.""" return objects.KeyPairList.get_by_user( context, user_id, limit=limit, marker=marker) def get_key_pair(self, context, user_id, key_name): """Get a keypair by name.""" return objects.KeyPair.get_by_name(context, user_id, key_name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/compute/build_results.py0000664000175000017500000000264300000000000020440 0ustar00zuulzuul00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Possible results from instance build Results represent the ultimate result of an attempt to build an instance. Results describe whether an instance was actually built, failed to build, or was rescheduled. """ ACTIVE = 'active' # Instance is running FAILED = 'failed' # Instance failed to build and was not rescheduled RESCHEDULED = 'rescheduled' # Instance failed to build, but was rescheduled # Instance failed by policy violation (such as affinity or anti-affinity) # and was not rescheduled. In this case, the node's failed count won't be # increased. FAILED_BY_POLICY = 'failed_by_policy' # Instance failed by policy violation (such as affinity or anti-affinity) # but was rescheduled. In this case, the node's failed count won't be # increased. RESCHEDULED_BY_POLICY = 'rescheduled_by_policy' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/compute/claims.py0000664000175000017500000002232700000000000017031 0ustar00zuulzuul00000000000000# Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Claim objects for use with resource tracking. """ from oslo_log import log as logging from nova import exception from nova.i18n import _ from nova import objects from nova.virt import hardware LOG = logging.getLogger(__name__) class NopClaim(object): """For use with compute drivers that do not support resource tracking.""" def __init__(self, *args, **kwargs): self.migration = kwargs.pop('migration', None) self.claimed_numa_topology = None def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): if exc_type is not None: self.abort() def abort(self): pass class Claim(NopClaim): """A declaration that a compute host operation will require free resources. Claims serve as marker objects that resources are being held until the update_available_resource audit process runs to do a full reconciliation of resource usage. This information will be used to help keep the local compute hosts's ComputeNode model in sync to aid the scheduler in making efficient / more correct decisions with respect to host selection. """ def __init__( self, context, instance, nodename, tracker, compute_node, pci_requests, migration=None, limits=None, ): super().__init__(migration=migration) # Stash a copy of the instance at the current point of time self.instance = instance.obj_clone() self.instance_ref = instance self.nodename = nodename self.tracker = tracker self._pci_requests = pci_requests self.context = context # Check claim at constructor to avoid mess code # Raise exception ComputeResourcesUnavailable if claim failed self._claim_test(compute_node, limits) @property def numa_topology(self): return self.instance.numa_topology def abort(self): """Compute operation requiring claimed resources has failed or been aborted. """ LOG.debug("Aborting claim: %s", self, instance=self.instance) self.tracker.abort_instance_claim(self.context, self.instance_ref, self.nodename) def _claim_test(self, compute_node, limits=None): """Test if this claim can be satisfied given available resources and optional oversubscription limits This should be called before the compute node actually consumes the resources required to execute the claim. :param compute_node: available local ComputeNode object :param limits: Optional limits to test, either dict or objects.SchedulerLimits :raises: exception.ComputeResourcesUnavailable if any resource claim fails """ if not limits: limits = {} if isinstance(limits, objects.SchedulerLimits): limits = limits.to_dict() # If an individual limit is None, the resource will be considered # unlimited: numa_topology_limit = limits.get('numa_topology') reasons = [self._test_numa_topology(compute_node, numa_topology_limit), self._test_pci()] reasons = [r for r in reasons if r is not None] if len(reasons) > 0: LOG.info('Failed to claim: %s', '; '.join(reasons), instance=self.instance) raise exception.ComputeResourcesUnavailable(reason= "; ".join(reasons)) LOG.info('Claim successful on node %s', self.nodename, instance=self.instance) def _test_pci(self): pci_requests = self._pci_requests if pci_requests.requests: stats = self.tracker.pci_tracker.stats if not stats.support_requests( pci_requests.requests, # We explicitly signal that we are _after_ the scheduler made # allocations in placement and therefore pci_requests.requests # carry its own placement provider mapping information provider_mapping=None, ): return _('Claim pci failed') def _test_numa_topology(self, compute_node, limit): host_topology = (compute_node.numa_topology if 'numa_topology' in compute_node else None) requested_topology = self.numa_topology if host_topology: host_topology = objects.NUMATopology.obj_from_db_obj( host_topology) pci_requests = self._pci_requests pci_stats = None if pci_requests.requests: pci_stats = self.tracker.pci_tracker.stats instance_topology = hardware.numa_fit_instance_to_host( host_topology, requested_topology, limits=limit, pci_requests=pci_requests.requests, pci_stats=pci_stats, # We explicitly signal that we are _after_ the scheduler made # allocations in placement and therefore pci_requests.requests # carry its own placement provider mapping information provider_mapping=None, ) if requested_topology and not instance_topology: if pci_requests.requests: return (_("Requested instance NUMA topology together with " "requested PCI devices cannot fit the given " "host NUMA topology")) else: return (_("Requested instance NUMA topology cannot fit " "the given host NUMA topology")) elif instance_topology: self.claimed_numa_topology = instance_topology class MoveClaim(Claim): """Claim used for holding resources for an incoming move operation. Move can be either a migrate/resize, live-migrate or an evacuate operation. """ def __init__( self, context, instance, nodename, flavor, image_meta, tracker, compute_node, pci_requests, migration, limits=None, ): self.context = context self.flavor = flavor if isinstance(image_meta, dict): image_meta = objects.ImageMeta.from_dict(image_meta) self.image_meta = image_meta super().__init__( context, instance, nodename, tracker, compute_node, pci_requests, migration=migration, limits=limits, ) @property def numa_topology(self): return hardware.numa_get_constraints(self.flavor, self.image_meta) def abort(self): """Compute operation requiring claimed resources has failed or been aborted. """ LOG.debug("Aborting claim: %s", self, instance=self.instance) self.tracker.drop_move_claim( self.context, self.instance, self.nodename, flavor=self.flavor) self.instance.drop_migration_context() def _test_live_migration_page_size(self): """Tests that the current page size and the requested page size are the same. Must be called after _test_numa_topology() to make sure self.claimed_numa_topology is set. This only applies for live migrations when the hw:mem_page_size extra spec has been set to a non-numeric value (like 'large'). That would in theory allow an instance to live migrate from a host with a 1M page size to a host with a 2M page size, for example. This is not something we want to support, so fail the claim if the page sizes are different. """ if (self.migration.is_live_migration and self.instance.numa_topology and # NOTE(artom) We only support a single page size across all # cells, checking cell 0 is sufficient. self.claimed_numa_topology.cells[0].pagesize != self.instance.numa_topology.cells[0].pagesize): return (_('Requested page size is different from current ' 'page size.')) def _test_numa_topology(self, resources, limit): """Test whether this host can accept the instance's NUMA topology. The _test methods return None on success, and a string-like Message _() object explaining the reason on failure. So we call up to the normal Claim's _test_numa_topology(), and if we get nothing back we test the page size. """ numa_test_failure = super(MoveClaim, self)._test_numa_topology(resources, limit) if numa_test_failure: return numa_test_failure return self._test_live_migration_page_size() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/compute/flavors.py0000664000175000017500000001555700000000000017244 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright (c) 2010 Citrix Systems, Inc. # Copyright 2011 Ken Pepple # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Built-in instance properties.""" import re from oslo_utils import strutils from oslo_utils import uuidutils import nova.conf from nova import context from nova.db import constants as db_const from nova import exception from nova.i18n import _ from nova import objects from nova import utils CONF = nova.conf.CONF # Validate extra specs key names. VALID_EXTRASPEC_NAME_REGEX = re.compile(r"[\w\.\- :]+$", re.UNICODE) def _int_or_none(val): if val is not None: return int(val) system_metadata_flavor_props = { 'id': int, 'name': str, 'memory_mb': int, 'vcpus': int, 'root_gb': int, 'ephemeral_gb': int, 'flavorid': str, 'swap': int, 'rxtx_factor': float, 'vcpu_weight': _int_or_none, } system_metadata_flavor_extra_props = [ 'hw:numa_cpus.', 'hw:numa_mem.', ] def create(name, memory, vcpus, root_gb, ephemeral_gb=0, flavorid=None, swap=0, rxtx_factor=1.0, is_public=True, description=None): """Creates flavors.""" if not flavorid: flavorid = uuidutils.generate_uuid() kwargs = { 'memory_mb': memory, 'vcpus': vcpus, 'root_gb': root_gb, 'ephemeral_gb': ephemeral_gb, 'swap': swap, 'rxtx_factor': rxtx_factor, 'description': description } if isinstance(name, str): name = name.strip() # NOTE(vish): Internally, flavorid is stored as a string but it comes # in through json as an integer, so we convert it here. flavorid = str(flavorid) # NOTE(wangbo): validate attributes of the creating flavor. # ram and vcpus should be positive ( > 0) integers. # disk, ephemeral and swap should be non-negative ( >= 0) integers. flavor_attributes = { 'memory_mb': ('ram', 1), 'vcpus': ('vcpus', 1), 'root_gb': ('disk', 0), 'ephemeral_gb': ('ephemeral', 0), 'swap': ('swap', 0) } for key, value in flavor_attributes.items(): kwargs[key] = utils.validate_integer( kwargs[key], value[0], value[1], db_const.MAX_INT, ) # rxtx_factor should be a positive float try: kwargs['rxtx_factor'] = float(kwargs['rxtx_factor']) if ( kwargs['rxtx_factor'] <= 0 or kwargs['rxtx_factor'] > db_const.SQL_SP_FLOAT_MAX ): raise ValueError() except ValueError: msg = _("'rxtx_factor' argument must be a float between 0 and %g") raise exception.InvalidInput(reason=msg % db_const.SQL_SP_FLOAT_MAX) kwargs['name'] = name kwargs['flavorid'] = flavorid # ensure is_public attribute is boolean try: kwargs['is_public'] = strutils.bool_from_string( is_public, strict=True) except ValueError: raise exception.InvalidInput(reason=_("is_public must be a boolean")) flavor = objects.Flavor(context=context.get_admin_context(), **kwargs) flavor.create() return flavor # TODO(termie): flavor-specific code should probably be in the API that uses # flavors. def get_flavor_by_flavor_id(flavorid, ctxt=None, read_deleted="yes"): """Retrieve flavor by flavorid. :raises: FlavorNotFound """ if ctxt is None: ctxt = context.get_admin_context(read_deleted=read_deleted) return objects.Flavor.get_by_flavor_id(ctxt, flavorid, read_deleted) # NOTE(danms): This method is deprecated, do not use it! # Use instance.{old_,new_,}flavor instead, as instances no longer # have flavor information in system_metadata. def extract_flavor(instance, prefix=''): """Create a Flavor object from instance's system_metadata information. """ flavor = objects.Flavor() sys_meta = utils.instance_sys_meta(instance) if not sys_meta: return None for key in system_metadata_flavor_props.keys(): type_key = '%sinstance_type_%s' % (prefix, key) setattr(flavor, key, sys_meta[type_key]) # NOTE(danms): We do NOT save all of extra_specs, but only the # NUMA-related ones that we need to avoid an uglier alternative. This # should be replaced by a general split-out of flavor information from # system_metadata very soon. extra_specs = [(k, v) for k, v in sys_meta.items() if k.startswith('%sinstance_type_extra_' % prefix)] if extra_specs: flavor.extra_specs = {} for key, value in extra_specs: extra_key = key[len('%sinstance_type_extra_' % prefix):] flavor.extra_specs[extra_key] = value return flavor # NOTE(danms): This method is deprecated, do not use it! # Use instance.{old_,new_,}flavor instead, as instances no longer # have flavor information in system_metadata. # NOTE(stephenfin): 'prefix' is unused and could be removed def save_flavor_info(metadata, flavor, prefix=''): """Save properties from flavor into instance's system_metadata, in the format of: [prefix]instance_type_[key] This can be used to update system_metadata in place from a type, as well as stash information about another flavor for later use (such as during resize). """ for key in system_metadata_flavor_props.keys(): to_key = '%sinstance_type_%s' % (prefix, key) metadata[to_key] = flavor[key] # NOTE(danms): We do NOT save all of extra_specs here, but only the # NUMA-related ones that we need to avoid an uglier alternative. This # should be replaced by a general split-out of flavor information from # system_metadata very soon. extra_specs = flavor.get('extra_specs', {}) for extra_prefix in system_metadata_flavor_extra_props: for key in extra_specs: if key.startswith(extra_prefix): to_key = '%sinstance_type_extra_%s' % (prefix, key) metadata[to_key] = extra_specs[key] return metadata def validate_extra_spec_keys(key_names_list): for key_name in key_names_list: if not VALID_EXTRASPEC_NAME_REGEX.match(key_name): expl = _('Key Names can only contain alphanumeric characters, ' 'periods, dashes, underscores, colons and spaces.') raise exception.InvalidInput(message=expl) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/compute/instance_actions.py0000664000175000017500000000554500000000000021110 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Possible actions on an instance. Actions should probably match a user intention at the API level. Because they can be user visible that should help to avoid confusion. For that reason they tend to maintain the casing sent to the API. Maintaining a list of actions here should protect against inconsistencies when they are used. The naming style of instance actions should be snake_case, as it will consistent with the API names. Do not modify the old ones because they have been exposed to users. """ CREATE = 'create' DELETE = 'delete' EVACUATE = 'evacuate' RESTORE = 'restore' STOP = 'stop' START = 'start' REBOOT = 'reboot' REBUILD = 'rebuild' REVERT_RESIZE = 'revertResize' CONFIRM_RESIZE = 'confirmResize' RESIZE = 'resize' MIGRATE = 'migrate' PAUSE = 'pause' UNPAUSE = 'unpause' SUSPEND = 'suspend' RESUME = 'resume' RESCUE = 'rescue' UNRESCUE = 'unrescue' CHANGE_PASSWORD = 'changePassword' SHELVE = 'shelve' SHELVE_OFFLOAD = 'shelveOffload' UNSHELVE = 'unshelve' LIVE_MIGRATION = 'live-migration' LIVE_MIGRATION_CANCEL = 'live_migration_cancel' LIVE_MIGRATION_FORCE_COMPLETE = 'live_migration_force_complete' TRIGGER_CRASH_DUMP = 'trigger_crash_dump' # The extend_volume action is not like the traditional instance actions which # are driven directly through the compute API. The extend_volume action is # initiated by a Cinder volume extend (resize) action. Cinder will call the # server external events API after the volume extend is performed so that Nova # can perform any updates on the compute side. The instance actions framework # is used for tracking this asynchronous operation so the user/admin can know # when it is done in case they need/want to reboot the guest operating system. EXTEND_VOLUME = 'extend_volume' ATTACH_INTERFACE = 'attach_interface' DETACH_INTERFACE = 'detach_interface' ATTACH_VOLUME = 'attach_volume' DETACH_VOLUME = 'detach_volume' ATTACH_SHARE = 'attach_share' DETACH_SHARE = 'detach_share' SWAP_VOLUME = 'swap_volume' LOCK = 'lock' UNLOCK = 'unlock' BACKUP = 'createBackup' CREATE_IMAGE = 'createImage' RESET_STATE = 'resetState' # nova-manage instance actions logged to allow operators and users alike to # track out of band changes made to their instances. NOVA_MANAGE_REFRESH_VOLUME_ATTACHMENT = 'refresh_volume_attachment' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/compute/instance_list.py0000664000175000017500000001714400000000000020421 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from nova.compute import multi_cell_list import nova.conf from nova import context from nova.db.main import api as db from nova import exception from nova import objects from nova.objects import instance as instance_obj CONF = nova.conf.CONF class InstanceSortContext(multi_cell_list.RecordSortContext): def __init__(self, sort_keys, sort_dirs): if not sort_keys: sort_keys = ['created_at', 'id'] sort_dirs = ['desc', 'desc'] if 'uuid' not in sort_keys: # Historically the default sort includes 'id' (see above), which # should give us a stable ordering. Since we're striping across # cell databases here, many sort_keys arrangements will yield # nothing unique across all the databases to give us a stable # ordering, which can mess up expected client pagination behavior. # So, throw uuid into the sort_keys at the end if it's not already # there to keep us repeatable. sort_keys = copy.copy(sort_keys) + ['uuid'] sort_dirs = copy.copy(sort_dirs) + ['asc'] super(InstanceSortContext, self).__init__(sort_keys, sort_dirs) class InstanceLister(multi_cell_list.CrossCellLister): def __init__(self, sort_keys, sort_dirs, cells=None, batch_size=None): super(InstanceLister, self).__init__( InstanceSortContext(sort_keys, sort_dirs), cells=cells, batch_size=batch_size) @property def marker_identifier(self): return 'uuid' def get_marker_record(self, ctx, marker): try: im = objects.InstanceMapping.get_by_instance_uuid(ctx, marker) except exception.InstanceMappingNotFound: raise exception.MarkerNotFound(marker=marker) elevated = ctx.elevated(read_deleted='yes') with context.target_cell(elevated, im.cell_mapping) as cctx: try: # NOTE(danms): We query this with no columns_to_join() # as we're just getting values for the sort keys from # it and none of the valid sort keys are on joined # columns. db_inst = db.instance_get_by_uuid(cctx, marker, columns_to_join=[]) except exception.InstanceNotFound: raise exception.MarkerNotFound(marker=marker) return im.cell_mapping.uuid, db_inst def get_marker_by_values(self, ctx, values): return db.instance_get_by_sort_filters(ctx, self.sort_ctx.sort_keys, self.sort_ctx.sort_dirs, values) def get_by_filters(self, ctx, filters, limit, marker, **kwargs): return db.instance_get_all_by_filters_sort( ctx, filters, limit=limit, marker=marker, sort_keys=self.sort_ctx.sort_keys, sort_dirs=self.sort_ctx.sort_dirs, **kwargs) # NOTE(danms): These methods are here for legacy glue reasons. We should not # replicate these for every data type we implement. def get_instances_sorted(ctx, filters, limit, marker, columns_to_join, sort_keys, sort_dirs, cell_mappings=None, batch_size=None, cell_down_support=False): instance_lister = InstanceLister(sort_keys, sort_dirs, cells=cell_mappings, batch_size=batch_size) instance_generator = instance_lister.get_records_sorted( ctx, filters, limit, marker, columns_to_join=columns_to_join, cell_down_support=cell_down_support) return instance_lister, instance_generator def get_instance_list_cells_batch_size(limit, cells): """Calculate the proper batch size for a list request. This will consider config, request limit, and cells being queried and return an appropriate batch size to use for querying said cells. :param limit: The overall limit specified in the request :param cells: The list of CellMapping objects being queried :returns: An integer batch size """ strategy = CONF.api.instance_list_cells_batch_strategy limit = limit or CONF.api.max_limit if len(cells) <= 1: # If we're limited to one (or no) cell for whatever reason, do # not do any batching and just pull the desired limit from the # single cell in one shot. return limit if strategy == 'fixed': # Fixed strategy, always a static batch size batch_size = CONF.api.instance_list_cells_batch_fixed_size elif strategy == 'distributed': # Distributed strategy, 10% more than even partitioning batch_size = int((limit / len(cells)) * 1.10) # We never query a larger batch than the total requested, and never # smaller than the lower limit of 100. return max(min(batch_size, limit), 100) def get_instance_objects_sorted(ctx, filters, limit, marker, expected_attrs, sort_keys, sort_dirs, cell_down_support=False): """Return a list of instances and information about down cells. This returns a tuple of (objects.InstanceList, list(of down cell uuids) for the requested operation. The instances returned are those that were collected from the cells that responded. The uuids of any cells that did not respond (or raised an error) are included in the list as the second element of the tuple. That list is empty if all cells responded. """ query_cell_subset = CONF.api.instance_list_per_project_cells # NOTE(danms): Replicated in part from instance_get_all_by_sort_filters(), # where if we're not admin we're restricted to our context's project if query_cell_subset and not ctx.is_admin: # We are not admin, and configured to only query the subset of cells # we could possibly have instances in. cell_mappings = objects.CellMappingList.get_by_project_id( ctx, ctx.project_id) else: # Either we are admin, or configured to always hit all cells, # so don't limit the list to a subset. context.load_cells() cell_mappings = context.CELLS batch_size = get_instance_list_cells_batch_size(limit, cell_mappings) columns_to_join = instance_obj._expected_cols(expected_attrs) instance_lister, instance_generator = get_instances_sorted(ctx, filters, limit, marker, columns_to_join, sort_keys, sort_dirs, cell_mappings=cell_mappings, batch_size=batch_size, cell_down_support=cell_down_support) if 'fault' in expected_attrs: # We join fault above, so we need to make sure we don't ask # make_instance_list to do it again for us expected_attrs = copy.copy(expected_attrs) expected_attrs.remove('fault') instance_list = instance_obj._make_instance_list(ctx, objects.InstanceList(), instance_generator, expected_attrs) down_cell_uuids = (instance_lister.cells_failed + instance_lister.cells_timed_out) return instance_list, down_cell_uuids ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315633.0 nova-32.0.0/nova/compute/manager.py0000664000175000017500000220574500000000000017204 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Handles all processes relating to instances (guest vms). The :py:class:`ComputeManager` class is a :py:class:`nova.manager.Manager` that handles RPC calls relating to creating instances. It is responsible for building a disk image, launching it via the underlying virtualization driver, responding to calls to check its state, attaching persistent storage, and terminating it. """ import base64 import binascii import contextlib import copy import functools import inspect import math import sys import threading import time import traceback import typing as ty from cinderclient import exceptions as cinder_exception from cursive import exception as cursive_exception import futurist from keystoneauth1 import exceptions as keystone_exception from openstack import exceptions as sdk_exc import os_traits from oslo_log import log as logging import oslo_messaging as messaging from oslo_serialization import jsonutils from oslo_service import loopingcall from oslo_service import periodic_task from oslo_utils import excutils from oslo_utils import strutils from oslo_utils import timeutils from oslo_utils import units from nova.accelerator import cyborg from nova import block_device from nova.compute import api as compute from nova.compute import build_results from nova.compute import claims from nova.compute import power_state from nova.compute import resource_tracker from nova.compute import rpcapi as compute_rpcapi from nova.compute import task_states from nova.compute import utils as compute_utils from nova.compute.utils import wrap_instance_event from nova.compute import vm_states from nova import conductor import nova.conf import nova.context from nova import exception from nova import exception_wrapper from nova.i18n import _ from nova.image import glance from nova import manager from nova.network import model as network_model from nova.network import neutron from nova import objects from nova.objects import base as obj_base from nova.objects import external_event as external_event_obj from nova.objects import fields from nova.objects import instance as obj_instance from nova.objects import migrate_data as migrate_data_obj from nova.objects import service as service_obj from nova.pci import request as pci_req_module from nova.pci import whitelist from nova import safe_utils from nova.scheduler.client import query from nova.scheduler.client import report from nova.scheduler import utils as scheduler_utils from nova.share import manila from nova import utils from nova.virt import block_device as driver_block_device from nova.virt import configdrive from nova.virt import driver from nova.virt import event as virtevent from nova.virt import hardware import nova.virt.node from nova.virt import storage_users from nova.virt import virtapi from nova.volume import cinder CONF = nova.conf.CONF LOG = logging.getLogger(__name__) wrap_exception = functools.partial( exception_wrapper.wrap_exception, service='compute', binary='nova-compute') @contextlib.contextmanager def errors_out_migration_ctxt(migration): """Context manager to error out migration on failure.""" try: yield except Exception: with excutils.save_and_reraise_exception(): if migration: # We may have been passed None for our migration if we're # receiving from an older client. The migration will be # errored via the legacy path. migration.status = 'error' try: migration.save() except Exception: LOG.debug( 'Error setting migration status for instance %s.', migration.instance_uuid, exc_info=True) @utils.expects_func_args('migration') def errors_out_migration(function): """Decorator to error out migration on failure.""" @functools.wraps(function) def decorated_function(self, context, *args, **kwargs): wrapped_func = safe_utils.get_wrapped_function(function) keyed_args = inspect.getcallargs(wrapped_func, self, context, *args, **kwargs) migration = keyed_args['migration'] with errors_out_migration_ctxt(migration): return function(self, context, *args, **kwargs) return decorated_function @utils.expects_func_args('instance') def reverts_task_state(function): """Decorator to revert task_state on failure.""" @functools.wraps(function) def decorated_function(self, context, *args, **kwargs): try: return function(self, context, *args, **kwargs) except exception.UnexpectedTaskStateError as e: # Note(maoy): unexpected task state means the current # task is preempted. Do not clear task state in this # case. with excutils.save_and_reraise_exception(): LOG.info("Task possibly preempted: %s", e.format_message()) except Exception: with excutils.save_and_reraise_exception(): wrapped_func = safe_utils.get_wrapped_function(function) keyed_args = inspect.getcallargs(wrapped_func, self, context, *args, **kwargs) # NOTE(mriedem): 'instance' must be in keyed_args because we # have utils.expects_func_args('instance') decorating this # method. instance = keyed_args['instance'] original_task_state = instance.task_state try: self._instance_update(context, instance, task_state=None) LOG.info("Successfully reverted task state from %s on " "failure for instance.", original_task_state, instance=instance) except exception.InstanceNotFound: # We might delete an instance that failed to build shortly # after it errored out this is an expected case and we # should not trace on it. pass except Exception as e: LOG.warning("Failed to revert task state for instance. " "Error: %s", e, instance=instance) return decorated_function @utils.expects_func_args('instance') def wrap_instance_fault(function): """Wraps a method to catch exceptions related to instances. This decorator wraps a method to catch any exceptions having to do with an instance that may get thrown. It then logs an instance fault in the db. """ @functools.wraps(function) def decorated_function(self, context, *args, **kwargs): try: return function(self, context, *args, **kwargs) except exception.InstanceNotFound: raise except Exception as e: # NOTE(gtt): If argument 'instance' is in args rather than kwargs, # we will get a KeyError exception which will cover up the real # exception. So, we update kwargs with the values from args first. # then, we can get 'instance' from kwargs easily. kwargs.update(dict(zip(function.__code__.co_varnames[2:], args))) with excutils.save_and_reraise_exception(): compute_utils.add_instance_fault_from_exc(context, kwargs['instance'], e, sys.exc_info()) return decorated_function @utils.expects_func_args('image_id', 'instance') def delete_image_on_error(function): """Used for snapshot related method to ensure the image created in compute.api is deleted when an error occurs. """ @functools.wraps(function) def decorated_function(self, context, image_id, instance, *args, **kwargs): try: return function(self, context, image_id, instance, *args, **kwargs) except Exception: with excutils.save_and_reraise_exception(): compute_utils.delete_image( context, instance, self.image_api, image_id, log_exc_info=True) return decorated_function class ThreadingEventWithResult(threading.Event): UNSET_SENTINEL = object() FAILED_SENTINEL = object() def __init__(self): super().__init__() self._result = self.UNSET_SENTINEL self._lock = threading.Lock() def set(self, result=None): with self._lock: if super().is_set() and result != self._result: raise ValueError('Cannot change the result once it is set') self._result = result super().set() def wait(self, timeout=None): succeeded = super().wait(timeout) if succeeded: return self._result else: return self.FAILED_SENTINEL # Each collection of events is a dict of eventlet Events keyed by a tuple of # event name and associated tag _InstanceEvents = ty.Dict[ty.Tuple[str, str], ThreadingEventWithResult] class InstanceEvents(object): def __init__(self): self._events: ty.Optional[ty.Dict[str, _InstanceEvents]] = {} @staticmethod def _lock_name(instance) -> str: return '%s-%s' % (instance.uuid, 'events') def prepare_for_instance_event( self, instance: 'objects.Instance', name: str, tag: str, ) -> ThreadingEventWithResult: """Prepare to receive an event for an instance. This will register an event for the given instance that we will wait on later. This should be called before initiating whatever action will trigger the event. The resulting ThreadingEventWithResult object should be wait()'d on to ensure completion. :param instance: the instance for which the event will be generated :param name: the name of the event we're expecting :param tag: the tag associated with the event we're expecting :returns: an event object that should be wait()'d on """ @utils.synchronized(self._lock_name(instance)) def _create_or_get_event(): if self._events is None: # NOTE(danms): We really should have a more specific error # here, but this is what we use for our default error case raise exception.NovaException( 'In shutdown, no new events can be scheduled') instance_events = self._events.setdefault(instance.uuid, {}) return instance_events.setdefault((name, tag), ThreadingEventWithResult()) LOG.debug('Preparing to wait for external event %(name)s-%(tag)s', {'name': name, 'tag': tag}, instance=instance) return _create_or_get_event() def pop_instance_event(self, instance, event): """Remove a pending event from the wait list. This will remove a pending event from the wait list so that it can be used to signal the waiters to wake up. :param instance: the instance for which the event was generated :param event: the nova.objects.external_event.InstanceExternalEvent that describes the event :returns: the ThreadingEventWithResult object on which the waiters are blocked """ no_events_sentinel = object() no_matching_event_sentinel = object() @utils.synchronized(self._lock_name(instance)) def _pop_event(): if self._events is None: LOG.debug('Unexpected attempt to pop events during shutdown', instance=instance) return no_events_sentinel events = self._events.get(instance.uuid) if not events: return no_events_sentinel _event = events.pop((event.name, event.tag), None) if not events: del self._events[instance.uuid] if _event is None: return no_matching_event_sentinel return _event result = _pop_event() if result is no_events_sentinel: LOG.debug('No waiting events found dispatching %(event)s', {'event': event.key}, instance=instance) return None elif result is no_matching_event_sentinel: LOG.debug( 'No event matching %(event)s in %(events)s', { 'event': event.key, # mypy can't identify the none check in _pop_event 'events': self._events.get( # type: ignore instance.uuid, {}).keys(), }, instance=instance, ) return None else: return result def clear_events_for_instance(self, instance): """Remove all pending events for an instance. This will remove all events currently pending for an instance and return them (indexed by event name). :param instance: the instance for which events should be purged :returns: a dictionary of {event_name: ThreadingEventWithResult} """ @utils.synchronized(self._lock_name(instance)) def _clear_events(): if self._events is None: LOG.debug('Unexpected attempt to clear events during shutdown', instance=instance) return dict() # NOTE(danms): We have historically returned the raw internal # format here, which is {event.key: [events, ...])} so just # trivially convert it here. return {'%s-%s' % k: e for k, e in self._events.pop(instance.uuid, {}).items()} return _clear_events() def cancel_all_events(self): if self._events is None: LOG.debug('Unexpected attempt to cancel events during shutdown.') return our_events = self._events # NOTE(danms): Block new events self._events = None for instance_uuid, events in our_events.items(): for (name, tag), eventlet_event in events.items(): LOG.debug('Canceling in-flight event %(name)s-%(tag)s for ' 'instance %(instance_uuid)s', {'name': name, 'tag': tag, 'instance_uuid': instance_uuid}) event = objects.InstanceExternalEvent( instance_uuid=instance_uuid, name=name, status='failed', tag=tag, data={}) eventlet_event.set(event) class ComputeVirtAPI(virtapi.VirtAPI): def __init__(self, compute): super(ComputeVirtAPI, self).__init__() self._compute = compute self.reportclient = compute.reportclient class ExitEarly(Exception): def __init__(self, events): super(Exception, self).__init__() self.events = events self._exit_early_exc = ExitEarly def exit_wait_early(self, events): """Exit a wait_for_instance_event() immediately and avoid waiting for some events. :param: events: A list of (name, tag) tuples for events that we should skip waiting for during a wait_for_instance_event(). """ raise self._exit_early_exc(events=events) def _default_error_callback(self, event_name, instance): raise exception.NovaException(_('Instance event failed')) class _InstanceEvent: EXPECTED = "expected" WAITING = "waiting" RECEIVED = "received" RECEIVED_EARLY = "received early" TIMED_OUT = "timed out" RECEIVED_NOT_PROCESSED = "received but not processed" def __init__(self, name: str, event: ThreadingEventWithResult) -> None: self.name = name self.event = event self.status = self.EXPECTED self.wait_time = None def mark_as_received_early(self) -> None: self.status = self.RECEIVED_EARLY def is_received_early(self) -> bool: return self.status == self.RECEIVED_EARLY def _update_status_no_wait(self): if self.status == self.EXPECTED and self.event.is_set(): self.status = self.RECEIVED_NOT_PROCESSED def wait(self, timeout) -> 'objects.InstanceExternalEvent': self.status = self.WAITING with timeutils.StopWatch() as sw: instance_event = self.event.wait(timeout) if instance_event is ThreadingEventWithResult.FAILED_SENTINEL: self.status = self.TIMED_OUT self.wait_time = sw.elapsed() raise exception.InstanceEventTimeout() self.status = self.RECEIVED self.wait_time = sw.elapsed() return instance_event def __str__(self) -> str: self._update_status_no_wait() if self.status == self.EXPECTED: return f"{self.name}: expected but not received" if self.status == self.RECEIVED: return ( f"{self.name}: received after waiting " f"{self.wait_time:.2f} seconds") if self.status == self.TIMED_OUT: return ( f"{self.name}: timed out after " f"{self.wait_time:.2f} seconds") return f"{self.name}: {self.status}" @staticmethod def _wait_for_instance_events( instance: 'objects.Instance', events: dict, error_callback: ty.Callable, timeout: int, ) -> None: deadline = time.monotonic() + timeout for event_name, event in events.items(): if event.is_received_early(): continue remaining_time = deadline - time.monotonic() if remaining_time <= 0: raise exception.InstanceEventTimeout() actual_event = event.wait(timeout=remaining_time) if actual_event.status == 'completed': continue # If we get here, we have an event that was not completed, # nor skipped via exit_wait_early(). Decide whether to # keep waiting by calling the error_callback() hook. decision = error_callback(event_name, instance) if decision is False: break @contextlib.contextmanager def wait_for_instance_event(self, instance, event_names, deadline=300, error_callback=None): """Plan to wait for some events, run some code, then wait. This context manager will first create plans to wait for the provided event_names, yield, and then wait for all the scheduled events to complete. Note that this uses an InstanceEventTimeout to bound the operation, so callers should be prepared to catch that failure and handle that situation appropriately. If the event is not received by the specified timeout deadline, InstanceEventTimeout is raised. If the event is received but did not have a 'completed' status, a NovaException is raised. If an error_callback is provided, instead of raising an exception as detailed above for the failure case, the callback will be called with the event_name and instance, and can return True to continue waiting for the rest of the events, False to stop processing, or raise an exception which will bubble up to the waiter. If the inner code wishes to abort waiting for one or more events because it knows some state to be finished or condition to be satisfied, it can use VirtAPI.exit_wait_early() with a list of event (name,tag) items to avoid waiting for those events upon context exit. Note that exit_wait_early() exits the context immediately and should be used to signal that all work has been completed and provide the unified list of events that need not be waited for. Waiting for the remaining events will begin immediately upon early exit as if the context was exited normally. :param instance: The instance for which an event is expected :param event_names: A list of event names. Each element is a tuple of strings to indicate (name, tag), where name is required, but tag may be None. :param deadline: Maximum number of seconds we should wait for all of the specified events to arrive. :param error_callback: A function to be called if an event arrives """ if error_callback is None: error_callback = self._default_error_callback events = {} for event_name in event_names: name, tag = event_name event_name = objects.InstanceExternalEvent.make_key(name, tag) try: event = ( self._compute.instance_events.prepare_for_instance_event( instance, name, tag)) events[event_name] = self._InstanceEvent(event_name, event) except exception.NovaException: error_callback(event_name, instance) # NOTE(danms): Don't wait for any of the events. They # should all be canceled and fired immediately below, # but don't stick around if not. deadline = 0 try: yield except self._exit_early_exc as e: early_events = set([objects.InstanceExternalEvent.make_key(n, t) for n, t in e.events]) # If there are expected events that received early, mark them, # so they won't be waited for later for early_event_name in early_events: if early_event_name in events: events[early_event_name].mark_as_received_early() sw = timeutils.StopWatch() sw.start() try: self._wait_for_instance_events( instance, events, error_callback, timeout=deadline) except exception.InstanceEventTimeout: LOG.warning( 'Timeout waiting for %(events)s for instance with ' 'vm_state %(vm_state)s and task_state %(task_state)s. ' 'Event states are: %(event_states)s', { 'events': list(events.keys()), 'vm_state': instance.vm_state, 'task_state': instance.task_state, 'event_states': ', '.join([str(event) for event in events.values()]), }, instance=instance) raise LOG.debug('Instance event wait completed in %i seconds for %s', sw.elapsed(), ','.join(x[0] for x in event_names), instance=instance) def update_compute_provider_status(self, context, rp_uuid, enabled): """Used to add/remove the COMPUTE_STATUS_DISABLED trait on the provider :param context: nova auth RequestContext :param rp_uuid: UUID of a compute node resource provider in Placement :param enabled: True if the node is enabled in which case the trait would be removed, False if the node is disabled in which case the trait would be added. :raises: ResourceProviderTraitRetrievalFailed :raises: ResourceProviderUpdateConflict :raises: ResourceProviderUpdateFailed :raises: TraitRetrievalFailed :raises: keystoneauth1.exceptions.ClientException """ trait_name = os_traits.COMPUTE_STATUS_DISABLED # Get the current traits (and generation) for the provider. # TODO(mriedem): Leverage the ProviderTree cache in get_provider_traits trait_info = self.reportclient.get_provider_traits(context, rp_uuid) # If the host is enabled, remove the trait (if set), else add # the trait if it doesn't already exist. original_traits = trait_info.traits new_traits = None if enabled and trait_name in original_traits: new_traits = original_traits - {trait_name} LOG.debug('Removing trait %s from compute node resource ' 'provider %s in placement.', trait_name, rp_uuid) elif not enabled and trait_name not in original_traits: new_traits = original_traits | {trait_name} LOG.debug('Adding trait %s to compute node resource ' 'provider %s in placement.', trait_name, rp_uuid) if new_traits is not None: self.reportclient.set_traits_for_provider( context, rp_uuid, new_traits, generation=trait_info.generation) class ComputeManager(manager.Manager): """Manages the running instances from creation to destruction.""" target = messaging.Target(version='6.4') def __init__(self, compute_driver=None, *args, **kwargs): """Load configuration options and connect to the hypervisor.""" # We want the ComputeManager, ResourceTracker and ComputeVirtAPI all # using the same instance of SchedulerReportClient which has the # ProviderTree cache for this compute service. # NOTE(danms): We do not use the global placement client # singleton here, because the above-mentioned stack of objects # maintain local state in the client. Thus, keeping our own # private object for that stack avoids any potential conflict # with other users in our process outside of the above. self.reportclient = report.SchedulerReportClient() self.virtapi = ComputeVirtAPI(self) self.network_api = neutron.API() self.volume_api = cinder.API() self.manila_api = manila.API() self.image_api = glance.API() self._last_bw_usage_poll = 0.0 self.compute_api = compute.API() self.compute_rpcapi = compute_rpcapi.ComputeAPI() self.compute_task_api = conductor.ComputeTaskAPI() self.query_client = query.SchedulerQueryClient() self.instance_events = InstanceEvents() self._sync_power_executor = futurist.GreenThreadPoolExecutor( max_workers=CONF.sync_power_state_pool_size) self._syncs_in_progress = {} self.send_instance_updates = ( CONF.filter_scheduler.track_instance_changes) if CONF.max_concurrent_builds != 0: self._build_semaphore = threading.Semaphore( CONF.max_concurrent_builds) else: self._build_semaphore = compute_utils.UnlimitedSemaphore() if CONF.max_concurrent_snapshots > 0: self._snapshot_semaphore = threading.Semaphore( CONF.max_concurrent_snapshots) else: self._snapshot_semaphore = compute_utils.UnlimitedSemaphore() if CONF.max_concurrent_live_migrations > 0: self._live_migration_executor = futurist.GreenThreadPoolExecutor( max_workers=CONF.max_concurrent_live_migrations) else: # CONF.max_concurrent_live_migrations is 0 (unlimited) self._live_migration_executor = futurist.GreenThreadPoolExecutor() # This is a dict, keyed by instance uuid, to a two-item tuple of # migration object and Future for the queued live migration. self._waiting_live_migrations = {} super(ComputeManager, self).__init__(service_name="compute", *args, **kwargs) # TODO(sbauza): Remove this call once we delete the V5Proxy class self.additional_endpoints.append(_ComputeV5Proxy(self)) # NOTE(russellb) Load the driver last. It may call back into the # compute manager via the virtapi, so we want it to be fully # initialized before that happens. self.service_ref = None self.driver = driver.load_compute_driver(self.virtapi, compute_driver) self.rt = resource_tracker.ResourceTracker( self.host, self.driver, reportclient=self.reportclient) def reset(self): LOG.info('Reloading compute RPC API') compute_rpcapi.reset_globals() self.compute_rpcapi = compute_rpcapi.ComputeAPI() self.reportclient.clear_provider_cache() def _update_resource_tracker(self, context, instance): """Let the resource tracker know that an instance has changed state.""" if instance.host == self.host: self.rt.update_usage(context, instance, instance.node) def _instance_update(self, context, instance, **kwargs): """Update an instance in the database using kwargs as value.""" for k, v in kwargs.items(): setattr(instance, k, v) instance.save() self._update_resource_tracker(context, instance) def _nil_out_instance_obj_host_and_node(self, instance): # NOTE(jwcroppe): We don't do instance.save() here for performance # reasons; a call to this is expected to be immediately followed by # another call that does instance.save(), thus avoiding two writes # to the database layer. instance.host = None instance.node = None instance.compute_id = None # ResourceTracker._set_instance_host_and_node also sets launched_on # to the same value as host and is really only ever used by legacy # nova-network code, but we should also null it out to avoid confusion # if there is an instance in the database with no host set but # launched_on is set. Note that we do not care about using launched_on # as some kind of debug helper if diagnosing a build failure, that is # what instance action events are for. instance.launched_on = None # If the instance is not on a host, it's not in an aggregate and # therefore is not in an availability zone. instance.availability_zone = None def _set_instance_obj_error_state(self, instance, clean_task_state=False): try: instance.vm_state = vm_states.ERROR if clean_task_state: instance.task_state = None instance.save() except exception.InstanceNotFound: LOG.debug('Instance has been destroyed from under us while ' 'trying to set it to ERROR', instance=instance) def _get_instances_on_driver(self, context, filters=None): """Return a list of instance records for the instances found on the hypervisor which satisfy the specified filters. If filters=None return a list of instance records for all the instances found on the hypervisor. """ if not filters: filters = {} try: driver_uuids = self.driver.list_instance_uuids() if len(driver_uuids) == 0: # Short circuit, don't waste a DB call return objects.InstanceList() filters['uuid'] = driver_uuids local_instances = objects.InstanceList.get_by_filters( context, filters, use_slave=True) return local_instances except NotImplementedError: pass # The driver doesn't support uuids listing, so we'll have # to brute force. driver_instances = self.driver.list_instances() # NOTE(mjozefcz): In this case we need to apply host filter. # Without this all instance data would be fetched from db. filters['host'] = self.host instances = objects.InstanceList.get_by_filters(context, filters, use_slave=True) name_map = {instance.name: instance for instance in instances} local_instances = [] for driver_instance in driver_instances: instance = name_map.get(driver_instance) if not instance: continue local_instances.append(instance) return local_instances def _destroy_evacuated_instances(self, context, node_cache): """Destroys evacuated instances. While nova-compute was down, the instances running on it could be evacuated to another host. This method looks for evacuation migration records where this is the source host and which were either started (accepted), in-progress (pre-migrating) or migrated (done). From those migration records, local instances reported by the hypervisor are compared to the instances for the migration records and those local guests are destroyed, along with instance allocation records in Placement for this node. Then allocations are removed from Placement for every instance that is evacuated from this host regardless if the instance is reported by the hypervisor or not. :param context: The request context :param node_cache: A dict of ComputeNode objects keyed by the UUID of the compute node :return: A dict keyed by instance uuid mapped to Migration objects for instances that were migrated away from this host """ filters = { 'source_compute': self.host, # NOTE(mriedem): Migration records that have been accepted are # included in case the source node comes back up while instances # are being evacuated to another host. We don't want the same # instance being reported from multiple hosts. # NOTE(lyarwood): pre-migrating is also included here as the # source compute can come back online shortly after the RT # claims on the destination that in-turn moves the migration to # pre-migrating. If the evacuate fails on the destination host, # the user can rebuild the instance (in ERROR state) on the source # host. 'status': ['accepted', 'pre-migrating', 'done'], 'migration_type': fields.MigrationType.EVACUATION, } with utils.temporary_mutation(context, read_deleted='yes'): evacuations = objects.MigrationList.get_by_filters(context, filters) if not evacuations: return {} evacuations = {mig.instance_uuid: mig for mig in evacuations} # TODO(mriedem): We could optimize by pre-loading the joined fields # we know we'll use, like info_cache and flavor. local_instances = self._get_instances_on_driver(context) evacuated_local_instances = {inst.uuid: inst for inst in local_instances if inst.uuid in evacuations} for instance in evacuated_local_instances.values(): LOG.info('Destroying instance as it has been evacuated from ' 'this host but still exists in the hypervisor', instance=instance) try: network_info = self.network_api.get_instance_nw_info( context, instance) bdi = self._get_instance_block_device_info(context, instance) evac = evacuations[instance.uuid] destroy_disks = not (self._is_instance_storage_shared( context, instance, host=evac.dest_compute)) except exception.InstanceNotFound: network_info = network_model.NetworkInfo() bdi = {} LOG.info('Instance has been marked deleted already, ' 'removing it from the hypervisor.', instance=instance) # always destroy disks if the instance was deleted destroy_disks = True self.driver.destroy(context, instance, network_info, bdi, destroy_disks) hostname_to_cn_uuid = { cn.hypervisor_hostname: cn.uuid for cn in node_cache.values()} for instance_uuid, migration in evacuations.items(): try: if instance_uuid in evacuated_local_instances: # Avoid the db call if we already have the instance loaded # above instance = evacuated_local_instances[instance_uuid] else: instance = objects.Instance.get_by_uuid( context, instance_uuid) except exception.InstanceNotFound: # The instance already deleted so we expect that every # allocation of that instance has already been cleaned up continue LOG.info('Cleaning up allocations of the instance as it has been ' 'evacuated from this host', instance=instance) if migration.source_node not in hostname_to_cn_uuid: LOG.error("Failed to clean allocation of evacuated " "instance as the source node %s is not found", migration.source_node, instance=instance) continue cn_uuid = hostname_to_cn_uuid[migration.source_node] # If the instance was deleted in the interim, assume its # allocations were properly cleaned up (either by its hosting # compute service or the API). if (not instance.deleted and not self.reportclient. remove_provider_tree_from_instance_allocation( context, instance.uuid, cn_uuid)): LOG.error("Failed to clean allocation of evacuated instance " "on the source node %s", cn_uuid, instance=instance) migration.status = 'completed' migration.save() return evacuations def _is_instance_storage_shared(self, context, instance, host=None): shared_storage = True data = None try: data = self.driver.check_instance_shared_storage_local(context, instance) if data: shared_storage = (self.compute_rpcapi. check_instance_shared_storage(context, data, instance=instance, host=host)) except NotImplementedError: LOG.debug('Hypervisor driver does not support ' 'instance shared storage check, ' 'assuming it\'s not on shared storage', instance=instance) shared_storage = False except Exception: LOG.exception('Failed to check if instance shared', instance=instance) finally: if data: self.driver.check_instance_shared_storage_cleanup(context, data) return shared_storage def _complete_partial_deletion(self, context, instance): """Complete deletion for instances in DELETED status but not marked as deleted in the DB """ instance.destroy() bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) self._complete_deletion(context, instance) self._notify_about_instance_usage(context, instance, "delete.end") compute_utils.notify_about_instance_action(context, instance, self.host, action=fields.NotificationAction.DELETE, phase=fields.NotificationPhase.END, bdms=bdms) def _complete_deletion(self, context, instance): self._update_resource_tracker(context, instance) # If we're configured to do deferred deletes, don't force deletion of # allocations if there's a conflict. force = False if CONF.reclaim_instance_interval > 0 else True self.reportclient.delete_allocation_for_instance(context, instance.uuid, force=force) self._clean_instance_console_tokens(context, instance) self._delete_scheduler_instance_info(context, instance.uuid) def _validate_pinning_configuration(self, instances): if not self.driver.capabilities.get('supports_pcpus', False): return for instance in instances: # ignore deleted instances if instance.deleted: continue # if this is an unpinned instance and the host only has # 'cpu_dedicated_set' configured, we need to tell the operator to # correct their configuration if not instance.numa_topology or ( instance.numa_topology.cpu_policy in ( None, fields.CPUAllocationPolicy.SHARED ) ): # we don't need to check 'vcpu_pin_set' since it can't coexist # alongside 'cpu_dedicated_set' if (CONF.compute.cpu_dedicated_set and not CONF.compute.cpu_shared_set): msg = _("This host has unpinned instances but has no CPUs " "set aside for this purpose; configure '[compute] " "cpu_shared_set' instead of, or in addition to, " "'[compute] cpu_dedicated_set'") raise exception.InvalidConfiguration(msg) continue # ditto for pinned instances if only 'cpu_shared_set' is configured if (CONF.compute.cpu_shared_set and not CONF.compute.cpu_dedicated_set and not CONF.vcpu_pin_set): msg = _("This host has pinned instances but has no CPUs " "set aside for this purpose; configure '[compute] " "cpu_dedicated_set' instead of, or in addition to, " "'[compute] cpu_shared_set'.") raise exception.InvalidConfiguration(msg) # if this is a mixed instance with both pinned and unpinned CPUs, # the host must have both 'cpu_dedicated_set' and 'cpu_shared_set' # configured. check if 'cpu_shared_set' is set. if (instance.numa_topology.cpu_policy == fields.CPUAllocationPolicy.MIXED and not CONF.compute.cpu_shared_set): msg = _("This host has mixed instance requesting both pinned " "and unpinned CPUs but hasn't set aside unpinned CPUs " "for this purpose; Configure " "'[compute] cpu_shared_set'.") raise exception.InvalidConfiguration(msg) # for mixed instance check if 'cpu_dedicated_set' is set. if (instance.numa_topology.cpu_policy == fields.CPUAllocationPolicy.MIXED and not CONF.compute.cpu_dedicated_set): msg = _("This host has mixed instance requesting both pinned " "and unpinned CPUs but hasn't set aside pinned CPUs " "for this purpose; Configure " "'[compute] cpu_dedicated_set'") raise exception.InvalidConfiguration(msg) # also check to make sure the operator hasn't accidentally # dropped some cores that instances are currently using available_dedicated_cpus = (hardware.get_vcpu_pin_set() or hardware.get_cpu_dedicated_set()) pinned_cpus = instance.numa_topology.cpu_pinning if available_dedicated_cpus and ( pinned_cpus - available_dedicated_cpus): # we can't raise an exception because of bug #1289064, # which meant we didn't recalculate CPU pinning information # when we live migrated a pinned instance LOG.warning( "Instance is pinned to host CPUs %(cpus)s " "but one or more of these CPUs are not included in " "either '[compute] cpu_dedicated_set' or " "'vcpu_pin_set'; you should update these " "configuration options to include the missing CPUs " "or rebuild or cold migrate this instance.", {'cpus': list(pinned_cpus)}, instance=instance) def _validate_vtpm_configuration(self, instances): if self.driver.capabilities.get('supports_vtpm', False): return for instance in instances: if instance.deleted: continue # NOTE(stephenfin): We don't have an attribute on the instance to # check for this, so we need to inspect the flavor/image metadata if hardware.get_vtpm_constraint( instance.flavor, instance.image_meta, ): msg = _( 'This host has instances with the vTPM feature enabled, ' 'but the host is not correctly configured; enable ' 'vTPM support.' ) raise exception.InvalidConfiguration(msg) def _reset_live_migration(self, context, instance): migration = None try: migration = objects.Migration.get_by_instance_and_status( context, instance.uuid, 'running') if migration: self.live_migration_abort(context, instance, migration.id) except Exception: LOG.exception('Failed to abort live-migration', instance=instance) finally: if migration: self._set_migration_status(migration, 'error') LOG.info('Instance found in migrating state during ' 'startup. Resetting task_state', instance=instance) instance.task_state = None instance.save(expected_task_state=[task_states.MIGRATING]) def _init_instance(self, context, instance): """Initialize this instance during service init.""" # NOTE(danms): If the instance appears to not be owned by this # host, it may have been evacuated away, but skipped by the # evacuation cleanup code due to configuration. Thus, if that # is a possibility, don't touch the instance in any way, but # log the concern. This will help avoid potential issues on # startup due to misconfiguration. if instance.host != self.host: LOG.warning('Instance %(uuid)s appears to not be owned ' 'by this host, but by %(host)s. Startup ' 'processing is being skipped.', {'uuid': instance.uuid, 'host': instance.host}) return # Instances that are shut down, or in an error state can not be # initialized and are not attempted to be recovered. The exception # to this are instances that are in RESIZE_MIGRATING or DELETING, # which are dealt with further down. if (instance.vm_state == vm_states.SOFT_DELETED or (instance.vm_state == vm_states.ERROR and instance.task_state not in (task_states.RESIZE_MIGRATING, task_states.DELETING))): LOG.debug("Instance is in %s state.", instance.vm_state, instance=instance) return if instance.vm_state == vm_states.DELETED: try: self._complete_partial_deletion(context, instance) except Exception: # we don't want that an exception blocks the init_host LOG.exception('Failed to complete a deletion', instance=instance) return if (instance.vm_state == vm_states.BUILDING or instance.task_state in [task_states.SCHEDULING, task_states.BLOCK_DEVICE_MAPPING, task_states.NETWORKING, task_states.SPAWNING]): # NOTE(dave-mcnally) compute stopped before instance was fully # spawned so set to ERROR state. This is safe to do as the state # may be set by the api but the host is not so if we get here the # instance has already been scheduled to this particular host. LOG.debug("Instance failed to spawn correctly, " "setting to ERROR state", instance=instance) self._set_instance_obj_error_state(instance, clean_task_state=True) return if (instance.vm_state in [vm_states.ACTIVE, vm_states.STOPPED] and instance.task_state in [task_states.REBUILDING, task_states.REBUILD_BLOCK_DEVICE_MAPPING, task_states.REBUILD_SPAWNING]): # NOTE(jichenjc) compute stopped before instance was fully # spawned so set to ERROR state. This is consistent to BUILD LOG.debug("Instance failed to rebuild correctly, " "setting to ERROR state", instance=instance) self._set_instance_obj_error_state(instance, clean_task_state=True) return if (instance.vm_state != vm_states.ERROR and instance.task_state in [task_states.IMAGE_SNAPSHOT_PENDING, task_states.IMAGE_PENDING_UPLOAD, task_states.IMAGE_UPLOADING, task_states.IMAGE_SNAPSHOT]): LOG.debug("Instance in transitional state %s at start-up " "clearing task state", instance.task_state, instance=instance) instance.task_state = None instance.save() if (instance.vm_state != vm_states.ERROR and instance.task_state in [task_states.RESIZE_PREP]): LOG.debug("Instance in transitional state %s at start-up " "clearing task state", instance['task_state'], instance=instance) instance.task_state = None instance.save() if instance.task_state == task_states.DELETING: try: LOG.info('Service started deleting the instance during ' 'the previous run, but did not finish. Restarting' ' the deletion now.', instance=instance) instance.obj_load_attr('metadata') instance.obj_load_attr('system_metadata') bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) self._delete_instance(context, instance, bdms) except Exception: # we don't want that an exception blocks the init_host LOG.exception('Failed to complete a deletion', instance=instance) self._set_instance_obj_error_state(instance) return current_power_state = self._get_power_state(instance) try_reboot, reboot_type = self._retry_reboot( instance, current_power_state) # NOTE(amorin) # If the instance is in power_state_SHUTDOWN, we will try_reboot if try_reboot: LOG.debug("Instance in transitional state (%(task_state)s) at " "start-up and power state is (%(power_state)s), " "triggering reboot", {'task_state': instance.task_state, 'power_state': current_power_state}, instance=instance) # NOTE(mikal): if the instance was doing a soft reboot that got as # far as shutting down the instance but not as far as starting it # again, then we've just become a hard reboot. That means the # task state for the instance needs to change so that we're in one # of the expected task states for a hard reboot. if (instance.task_state in task_states.soft_reboot_states and reboot_type == 'HARD'): instance.task_state = task_states.REBOOT_PENDING_HARD instance.save() self.reboot_instance(context, instance, block_device_info=None, reboot_type=reboot_type) return # NOTE(plestang): an instance might be in power_state.RUNNING with a # transient state when a host is brutally shutdown or rebooted while a # reboot/pause/unpause is scheduled on client side elif (current_power_state == power_state.RUNNING and instance.task_state in [task_states.REBOOT_STARTED, task_states.REBOOT_STARTED_HARD, task_states.REBOOTING_HARD, task_states.REBOOTING, task_states.PAUSING, task_states.UNPAUSING]): LOG.warning("Instance in transitional state " "(%(task_state)s) at start-up and power state " "is (%(power_state)s), clearing task state", {'task_state': instance.task_state, 'power_state': current_power_state}, instance=instance) instance.task_state = None instance.vm_state = vm_states.ACTIVE instance.save() elif (current_power_state == power_state.PAUSED and instance.task_state == task_states.UNPAUSING): LOG.warning("Instance in transitional state " "(%(task_state)s) at start-up and power state " "is (%(power_state)s), clearing task state " "and unpausing the instance", {'task_state': instance.task_state, 'power_state': current_power_state}, instance=instance) try: self.unpause_instance(context, instance) except NotImplementedError: # Some virt driver didn't support pause and unpause pass except Exception: LOG.exception('Failed to unpause instance', instance=instance) return if instance.task_state == task_states.POWERING_OFF: try: LOG.debug("Instance in transitional state %s at start-up " "retrying stop request", instance.task_state, instance=instance) self.stop_instance(context, instance, True) except Exception: # we don't want that an exception blocks the init_host LOG.exception('Failed to stop instance', instance=instance) return if instance.task_state == task_states.POWERING_ON: try: LOG.debug("Instance in transitional state %s at start-up " "retrying start request", instance.task_state, instance=instance) self.start_instance(context, instance) except Exception: # we don't want that an exception blocks the init_host LOG.exception('Failed to start instance', instance=instance) return net_info = instance.get_network_info() try: self.driver.plug_vifs(instance, net_info) except NotImplementedError as e: LOG.debug(e, instance=instance) except exception.VirtualInterfacePlugException: # NOTE(mriedem): If we get here, it could be because the vif_type # in the cache is "binding_failed" or "unbound". # The periodic task _heal_instance_info_cache checks for this # condition. It should fix this by binding the ports again when # it gets to this instance. LOG.exception('Virtual interface plugging failed for instance. ' 'The port binding:host_id may need to be manually ' 'updated.', instance=instance) self._set_instance_obj_error_state(instance) return except exception.PciDeviceNotFoundById: # This is bug 1981813 where the bound port vnic_type has changed # from direct to macvtap. Nova does not support that and it # already printed an ERROR when the change is detected during # _heal_instance_info_cache. Now we print an ERROR again and skip # plugging the vifs but let the service startup continue to init # the other instances LOG.exception( 'Virtual interface plugging failed for instance. Probably the ' 'vnic_type of the bound port has been changed. Nova does not ' 'support such change.', instance=instance ) return if instance.task_state == task_states.RESIZE_MIGRATING: # We crashed during resize/migration, so roll back for safety try: # NOTE(mriedem): check old_vm_state for STOPPED here, if it's # not in system_metadata we default to True for backwards # compatibility power_on = (instance.system_metadata.get('old_vm_state') != vm_states.STOPPED) block_dev_info = self._get_instance_block_device_info(context, instance) migration = objects.Migration.get_by_id_and_instance( context, instance.migration_context.migration_id, instance.uuid) self.driver.finish_revert_migration(context, instance, net_info, migration, block_dev_info, power_on) except Exception: LOG.exception('Failed to revert crashed migration', instance=instance) finally: LOG.info('Instance found in migrating state during ' 'startup. Resetting task_state', instance=instance) instance.task_state = None instance.save() if instance.task_state == task_states.MIGRATING: # Live migration did not complete, but instance is on this # host. Abort ongoing migration if still running and reset state. self._reset_live_migration(context, instance) db_state = instance.power_state drv_state = self._get_power_state(instance) expect_running = (db_state == power_state.RUNNING and drv_state != db_state) LOG.debug('Current state is %(drv_state)s, state in DB is ' '%(db_state)s.', {'drv_state': drv_state, 'db_state': db_state}, instance=instance) if expect_running and CONF.resume_guests_state_on_host_boot: self._resume_guests_state(context, instance, net_info) def _resume_guests_state(self, context, instance, net_info): LOG.info('Rebooting instance after nova-compute restart.', instance=instance) block_device_info = \ self._get_instance_block_device_info(context, instance) share_info = self._get_share_info(context, instance) self._mount_all_shares(context, instance, share_info) try: self.driver.resume_state_on_host_boot( context, instance, net_info, share_info, block_device_info) except NotImplementedError: LOG.warning('Hypervisor driver does not support ' 'resume guests', instance=instance) except Exception: # NOTE(vish): The instance failed to resume, so we set the # instance to error and attempt to continue. LOG.warning('Failed to resume instance', instance=instance) self._set_instance_obj_error_state(instance) def _retry_reboot(self, instance, current_power_state): current_task_state = instance.task_state retry_reboot = False reboot_type = compute_utils.get_reboot_type(current_task_state, current_power_state) pending_soft = ( current_task_state == task_states.REBOOT_PENDING and instance.vm_state in vm_states.ALLOW_SOFT_REBOOT) pending_hard = ( current_task_state == task_states.REBOOT_PENDING_HARD and instance.vm_state in vm_states.ALLOW_HARD_REBOOT) started_not_running = (current_task_state in [task_states.REBOOTING, task_states.REBOOTING_HARD, task_states.REBOOT_STARTED, task_states.REBOOT_STARTED_HARD] and current_power_state != power_state.RUNNING) if pending_soft or pending_hard or started_not_running: retry_reboot = True return retry_reboot, reboot_type def handle_lifecycle_event(self, event): LOG.info("VM %(state)s (Lifecycle Event)", {'state': event.get_name()}, instance_uuid=event.get_instance_uuid()) context = nova.context.get_admin_context(read_deleted='yes') vm_power_state = None event_transition = event.get_transition() if event_transition == virtevent.EVENT_LIFECYCLE_STOPPED: vm_power_state = power_state.SHUTDOWN elif event_transition == virtevent.EVENT_LIFECYCLE_STARTED: vm_power_state = power_state.RUNNING elif event_transition in ( virtevent.EVENT_LIFECYCLE_PAUSED, virtevent.EVENT_LIFECYCLE_POSTCOPY_STARTED, virtevent.EVENT_LIFECYCLE_MIGRATION_COMPLETED): vm_power_state = power_state.PAUSED elif event_transition == virtevent.EVENT_LIFECYCLE_RESUMED: vm_power_state = power_state.RUNNING elif event_transition == virtevent.EVENT_LIFECYCLE_SUSPENDED: vm_power_state = power_state.SUSPENDED else: LOG.warning("Unexpected lifecycle event: %d", event_transition) migrate_finish_statuses = { # This happens on the source node and indicates live migration # entered post-copy mode. virtevent.EVENT_LIFECYCLE_POSTCOPY_STARTED: 'running (post-copy)', # Suspended for offline migration. virtevent.EVENT_LIFECYCLE_MIGRATION_COMPLETED: 'running' } expected_attrs = [] if event_transition in migrate_finish_statuses: # Join on info_cache since that's needed in migrate_instance_start. expected_attrs.append('info_cache') instance = objects.Instance.get_by_uuid(context, event.get_instance_uuid(), expected_attrs=expected_attrs) # Note(lpetrut): The event may be delayed, thus not reflecting # the current instance power state. In that case, ignore the event. current_power_state = self._get_power_state(instance) if current_power_state == vm_power_state: LOG.debug('Synchronizing instance power state after lifecycle ' 'event "%(event)s"; current vm_state: %(vm_state)s, ' 'current task_state: %(task_state)s, current DB ' 'power_state: %(db_power_state)s, VM power_state: ' '%(vm_power_state)s', {'event': event.get_name(), 'vm_state': instance.vm_state, 'task_state': instance.task_state, 'db_power_state': instance.power_state, 'vm_power_state': vm_power_state}, instance_uuid=instance.uuid) self._sync_instance_power_state(context, instance, vm_power_state) # The following checks are for live migration. We want to activate # the port binding for the destination host before the live migration # is resumed on the destination host in order to reduce network # downtime. Otherwise the ports are bound to the destination host # in post_live_migration_at_destination. # TODO(danms): Explore options for using a different live migration # specific callback for this instead of piggy-backing on the # handle_lifecycle_event callback. if (instance.task_state == task_states.MIGRATING and event_transition in migrate_finish_statuses): status = migrate_finish_statuses[event_transition] try: migration = objects.Migration.get_by_instance_and_status( context, instance.uuid, status) LOG.debug('Binding ports to destination host: %s', migration.dest_compute, instance=instance) # For neutron, migrate_instance_start will activate the # destination host port bindings, if there are any created by # conductor before live migration started. self.network_api.migrate_instance_start( context, instance, migration) except exception.MigrationNotFoundByStatus: LOG.warning("Unable to find migration record with status " "'%s' for instance. Port binding will happen in " "post live migration.", status, instance=instance) def handle_events(self, event): if isinstance(event, virtevent.LifecycleEvent): try: self.handle_lifecycle_event(event) except exception.InstanceNotFound: LOG.debug("Event %s arrived for non-existent instance. The " "instance was probably deleted.", event) else: LOG.debug("Ignoring event %s", event) def init_virt_events(self): if CONF.workarounds.handle_virt_lifecycle_events: self.driver.register_event_listener(self.handle_events) else: # NOTE(mriedem): If the _sync_power_states periodic task is # disabled we should emit a warning in the logs. if CONF.sync_power_state_interval < 0: LOG.warning('Instance lifecycle events from the compute ' 'driver have been disabled. Note that lifecycle ' 'changes to an instance outside of the compute ' 'service will not be synchronized ' 'automatically since the _sync_power_states ' 'periodic task is also disabled.') else: LOG.info('Instance lifecycle events from the compute ' 'driver have been disabled. Note that lifecycle ' 'changes to an instance outside of the compute ' 'service will only be synchronized by the ' '_sync_power_states periodic task.') def _get_nodes(self, context): """Queried the ComputeNode objects from the DB that are reported by the hypervisor. :param context: the request context :return: a dict of ComputeNode objects keyed by the UUID of the given node. """ try: node_ids = self.driver.get_nodenames_by_uuid() except exception.VirtDriverNotReady: LOG.warning( "Virt driver is not ready. If this is the first time this " "service is starting on this host, then you can ignore " "this warning.") return {} nodes = objects.ComputeNodeList.get_all_by_uuids(context, list(node_ids.keys())) if not nodes: # NOTE(danms): This should only happen if the compute_id is # pre-provisioned on a host that has never started. LOG.warning('Compute nodes %s for host %s were not found in the ' 'database. If this is the first time this service is ' 'starting on this host, then you can ignore this ' 'warning.', list(node_ids.keys()), self.host) return {} for node in nodes: if node.hypervisor_hostname != node_ids.get(node.uuid): raise exception.InvalidConfiguration( ('My compute node %s has hypervisor_hostname %s ' 'but virt driver reports it should be %s. Possible ' 'rename detected, refusing to start!') % ( node.uuid, node.hypervisor_hostname, node_ids.get(node.uuid))) return {n.uuid: n for n in nodes} def _ensure_existing_node_identity(self, service_ref): """If we are upgrading from an older service version, we need to write our node identity uuid (if not already done) based on nodes assigned to us in the database. """ if 'ironic' in CONF.compute_driver.lower(): # We do not persist a single local node identity for # ironic return if service_ref.version >= service_obj.NODE_IDENTITY_VERSION: # Already new enough, nothing to do here, but make sure that we # have a UUID file already, as this is not our first time starting. if nova.virt.node.read_local_node_uuid() is None: raise exception.InvalidConfiguration( ('No local node identity found, but this is not our ' 'first startup on this host. Refusing to start after ' 'potentially having lost that state!')) return if nova.virt.node.read_local_node_uuid(): # We already have a local node identity, no migration needed return context = nova.context.get_admin_context() db_nodes = objects.ComputeNodeList.get_all_by_host(context, self.host) if not db_nodes: # This means we have no nodes in the database (that we # know of) and thus have no need to record an existing # UUID. That is probably strange, so log a warning. raise exception.InvalidConfiguration( ('Upgrading from service version %i but found no ' 'nodes in the database for host %s to persist ' 'locally; Possible rename detected, ' 'refusing to start!') % ( service_ref.version, self.host)) if len(db_nodes) > 1: # If this happens we can't do the right thing, so raise an # exception to abort host startup LOG.warning('Multiple nodes found in the database for host %s; ' 'unable to persist local node identity automatically') raise exception.InvalidConfiguration( 'Multiple nodes found in database, manual node uuid ' 'configuration required') nova.virt.node.write_local_node_uuid(db_nodes[0].uuid) def _check_for_host_rename(self, nodes_by_uuid): if 'ironic' in CONF.compute_driver.lower(): # Ironic (currently) rebalances nodes at various times, and as # such, nodes being discovered as assigned to this host with a # different hostname is not surprising. Skip this check for # ironic. return for node in nodes_by_uuid.values(): if node.host != self.host: raise exception.InvalidConfiguration( 'My node %s has host %r but my host is %r; ' 'Possible rename detected, refusing to start!' % ( node.uuid, node.host, self.host)) LOG.debug('Verified node %s matches my host %s', node.uuid, self.host) def _sanity_check_new_host(self): instances_on_hv = self.driver.list_instance_uuids() if len(instances_on_hv) > 0: # This means we have instances on our hypervisor, but we think # we are a new host (i.e. we created a new service record). That # likely means we're pointed at an empty database or the wrong # cell. raise exception.InvalidConfiguration( 'My hypervisor has existing instances, but I appear to be ' 'a new service in this database. Possible database ' 'configuration error, refusing to start!') def init_host(self, service_ref): """Initialization for a standalone compute service.""" if service_ref: # If we are an existing service, check to see if we need # to record a locally-persistent node identity because # we have upgraded from a previous version. self._ensure_existing_node_identity(service_ref) else: # If we are a new service (in the database), make sure we have no # instances on our hypervisor as we would expect. self._sanity_check_new_host() if CONF.pci.device_spec: # Simply loading the PCI passthrough spec will do a bunch of # validation that would otherwise wait until the PciDevTracker is # constructed when updating available resources for the compute # node(s) in the resource tracker, effectively killing that task. # So load up the spec when starting the compute service to # flush any invalid configuration early, so we can kill the service # if the configuration is wrong. whitelist.Whitelist(CONF.pci.device_spec) # NOTE(gibi): validate the [pci]alias config early to avoid late # failures at instance lifecycle operations due to config errors. pci_req_module.get_alias_from_config() nova.conf.neutron.register_dynamic_opts(CONF) # Even if only libvirt uses them, make it available for all drivers nova.conf.devices.register_dynamic_opts(CONF) # Override the number of concurrent disk operations allowed if the # user has specified a limit. if CONF.compute.max_concurrent_disk_ops != 0: compute_utils.disk_ops_semaphore = \ threading.BoundedSemaphore( CONF.compute.max_concurrent_disk_ops) if CONF.compute.max_disk_devices_to_attach == 0: msg = _('[compute]max_disk_devices_to_attach has been set to 0, ' 'which will prevent instances from being able to boot. ' 'Set -1 for unlimited or set >= 1 to limit the maximum ' 'number of disk devices.') raise exception.InvalidConfiguration(msg) self.driver.init_host(host=self.host) # NOTE(gibi): At this point the compute_nodes of the resource tracker # has not been populated yet so we cannot rely on the resource tracker # here. context = nova.context.get_admin_context() nodes_by_uuid = self._get_nodes(context) # NOTE(danms): Check for a possible host rename and abort # startup before we start mucking with instances we think are # ours. self._check_for_host_rename(nodes_by_uuid) instances = objects.InstanceList.get_by_host( context, self.host, expected_attrs=['info_cache', 'metadata', 'numa_topology']) self.init_virt_events() self._validate_pinning_configuration(instances) self._validate_vtpm_configuration(instances) # NOTE(gibi): If ironic and vcenter virt driver slow start time # becomes problematic here then we should consider adding a config # option or a driver flag to tell us if we should thread # _destroy_evacuated_instances and # _error_out_instances_whose_build_was_interrupted out in the # background on startup try: # checking that instance was not already evacuated to other host evacuated_instances = self._destroy_evacuated_instances( context, nodes_by_uuid) # Initialise instances on the host that are not evacuating for instance in instances: if instance.uuid not in evacuated_instances: self._init_instance(context, instance) # NOTE(gibi): collect all the instance uuids that is in some way # was already handled above. Either by init_instance or by # _destroy_evacuated_instances. This way we can limit the scope of # the _error_out_instances_whose_build_was_interrupted call to look # only for instances that have allocations on this node and not # handled by the above calls. already_handled = {instance.uuid for instance in instances}.union( evacuated_instances) self._error_out_instances_whose_build_was_interrupted( context, already_handled, nodes_by_uuid.keys()) finally: if instances: # We only send the instance info to the scheduler on startup # if there is anything to send, otherwise this host might # not be mapped yet in a cell and the scheduler may have # issues dealing with the information. Later changes to # instances on this host will update the scheduler, or the # _sync_scheduler_instance_info periodic task will. self._update_scheduler_instance_info(context, instances) def _error_out_instances_whose_build_was_interrupted( self, context, already_handled_instances, node_uuids): """If there are instances in BUILDING state that are not assigned to this host but have allocations in placement towards this compute that means the nova-compute service was restarted while those instances waited for the resource claim to finish and the _set_instance_host_and_node() to update the instance.host field. We need to push them to ERROR state here to prevent keeping them in BUILDING state forever. :param context: The request context :param already_handled_instances: The set of instance UUIDs that the host initialization process already handled in some way. :param node_uuids: The list of compute node uuids handled by this service """ # Strategy: # 1) Get the allocations from placement for our compute node(s) # 2) Remove the already handled instances from the consumer list; # they are either already initialized or need to be skipped. # 3) Check which remaining consumer is an instance in BUILDING state # and push it to ERROR state. LOG.info( "Looking for unclaimed instances stuck in BUILDING status for " "nodes managed by this host") for cn_uuid in node_uuids: try: f = self.reportclient.get_allocations_for_resource_provider allocations = f(context, cn_uuid).allocations except (exception.ResourceProviderAllocationRetrievalFailed, keystone_exception.ClientException) as e: LOG.error( "Could not retrieve compute node resource provider %s and " "therefore unable to error out any instances stuck in " "BUILDING state. Error: %s", cn_uuid, str(e)) continue not_handled_consumers = (set(allocations) - already_handled_instances) if not not_handled_consumers: continue filters = { 'vm_state': vm_states.BUILDING, 'uuid': not_handled_consumers } instances = objects.InstanceList.get_by_filters( context, filters, expected_attrs=[]) for instance in instances: LOG.debug( "Instance spawn was interrupted before instance_claim, " "setting instance to ERROR state", instance=instance) self._set_instance_obj_error_state( instance, clean_task_state=True) def cleanup_host(self): self.driver.register_event_listener(None) self.instance_events.cancel_all_events() self.driver.cleanup_host(host=self.host) self._cleanup_live_migrations_in_pool() def _cleanup_live_migrations_in_pool(self): # Shutdown the pool so we don't get new requests. self._live_migration_executor.shutdown(wait=False) # For any queued migrations, cancel the migration and update # its status. for migration, future in self._waiting_live_migrations.values(): # If we got here before the Future was submitted then we need # to move on since there isn't anything we can do. if future is None: continue if future.cancel(): self._set_migration_status(migration, 'cancelled') LOG.info('Successfully cancelled queued live migration.', instance_uuid=migration.instance_uuid) else: LOG.warning('Unable to cancel live migration.', instance_uuid=migration.instance_uuid) self._waiting_live_migrations.clear() def pre_start_hook(self, service_ref): """After the service is initialized, but before we fully bring the service up by listening on RPC queues, make sure to update our available resources (and indirectly our available nodes). """ self.service_ref = service_ref self.rt.set_service_ref(service_ref) self.update_available_resource(nova.context.get_admin_context(), startup=True) def _get_power_state(self, instance): """Retrieve the power state for the given instance.""" LOG.debug('Checking state', instance=instance) try: return self.driver.get_info(instance, use_cache=False).state except exception.InstanceNotFound: return power_state.NOSTATE def _await_block_device_map_created(self, context, vol_id): # TODO(yamahata): creating volume simultaneously # reduces creation time? # TODO(yamahata): eliminate dumb polling start = time.time() retries = CONF.block_device_allocate_retries # (1) if the configured value is 0, one attempt should be made # (2) if the configured value is > 0, then the total number attempts # is (retries + 1) attempts = 1 if retries >= 1: attempts = retries + 1 for attempt in range(1, attempts + 1): volume = self.volume_api.get(context, vol_id) volume_status = volume['status'] if volume_status not in ['creating', 'downloading']: if volume_status == 'available': return attempt LOG.warning("Volume id: %(vol_id)s finished being " "created but its status is %(vol_status)s.", {'vol_id': vol_id, 'vol_status': volume_status}) break time.sleep( CONF.block_device_allocate_retries_interval) raise exception.VolumeNotCreated(volume_id=vol_id, seconds=int(time.time() - start), attempts=attempt, volume_status=volume_status) def _decode_files(self, injected_files): """Base64 decode the list of files to inject.""" if not injected_files: return [] def _decode(f): path, contents = f # Py3 raises binascii.Error instead of TypeError as in Py27 try: decoded = base64.b64decode(contents) return path, decoded except (TypeError, binascii.Error): raise exception.Base64Exception(path=path) return [_decode(f) for f in injected_files] def _validate_instance_group_policy(self, context, instance, scheduler_hints=None): if CONF.workarounds.disable_group_policy_check_upcall: return # NOTE(russellb) Instance group policy is enforced by the scheduler. # However, there is a race condition with the enforcement of # the policy. Since more than one instance may be scheduled at the # same time, it's possible that more than one instance with an # anti-affinity policy may end up here. It's also possible that # multiple instances with an affinity policy could end up on different # hosts. This is a validation step to make sure that starting the # instance here doesn't violate the policy. if scheduler_hints is not None: # only go through here if scheduler_hints is provided, # even if it is empty. group_hint = scheduler_hints.get('group') if not group_hint: return else: # The RequestSpec stores scheduler_hints as key=list pairs # so we need to check the type on the value and pull the # single entry out. The API request schema validates that # the 'group' hint is a single value. if isinstance(group_hint, list): group_hint = group_hint[0] try: group = objects.InstanceGroup.get_by_hint( context, group_hint ) except exception.InstanceGroupNotFound: return else: # TODO(ganso): a call to DB can be saved by adding request_spec # to rpcapi payload of live_migration, pre_live_migration and # check_can_live_migrate_destination try: group = objects.InstanceGroup.get_by_instance_uuid( context, instance.uuid ) except exception.InstanceGroupNotFound: return @utils.synchronized(group['uuid']) def _do_validation(context, instance, group): if group.policy and 'anti-affinity' == group.policy: # instances on host instances_uuids = objects.InstanceList.get_uuids_by_host( context, self.host) ins_on_host = set(instances_uuids) # instance param is just for logging, the nodename obtained is # not actually related to the instance at all nodename = self._get_nodename(instance) # instances being migrated to host migrations = ( objects.MigrationList.get_in_progress_by_host_and_node( context, self.host, nodename)) migration_vm_uuids = {mig.instance_uuid for mig in migrations} total_instances = migration_vm_uuids | ins_on_host # refresh group to get updated members within locked block group = objects.InstanceGroup.get_by_uuid(context, group['uuid']) members = set(group.members) # Determine the set of instance group members on this host # which are not the instance in question. This is used to # determine how many other members from the same anti-affinity # group can be on this host. members_on_host = (total_instances & members - set([instance.uuid])) rules = group.rules if rules and 'max_server_per_host' in rules: max_server = rules['max_server_per_host'] else: max_server = 1 if len(members_on_host) >= max_server: raise exception.GroupAffinityViolation( instance_uuid=instance.uuid, policy='Anti-affinity') # NOTE(ganso): The check for affinity below does not work and it # can easily be violated because the lock happens in different # compute hosts. # The only fix seems to be a DB lock to perform the check whenever # setting the host field to an instance. elif group.policy and 'affinity' == group.policy: group_hosts = group.get_hosts(exclude=[instance.uuid]) if group_hosts and self.host not in group_hosts: raise exception.GroupAffinityViolation( instance_uuid=instance.uuid, policy='Affinity') _do_validation(context, instance, group) def _log_original_error(self, exc_info, instance_uuid): LOG.error('Error: %s', exc_info[1], instance_uuid=instance_uuid, exc_info=exc_info) @periodic_task.periodic_task def _check_instance_build_time(self, context): """Ensure that instances are not stuck in build.""" timeout = CONF.instance_build_timeout if timeout == 0: return filters = {'vm_state': vm_states.BUILDING, 'host': self.host} building_insts = objects.InstanceList.get_by_filters(context, filters, expected_attrs=[], use_slave=True) for instance in building_insts: if timeutils.is_older_than(instance.created_at, timeout): self._set_instance_obj_error_state(instance) LOG.warning("Instance build timed out. Set to error " "state.", instance=instance) def _check_instance_exists(self, instance): """Ensure an instance with the same name is not already present.""" if self.driver.instance_exists(instance): raise exception.InstanceExists(name=instance.name) def _allocate_network_async(self, context, instance, requested_networks, security_groups, resource_provider_mapping, network_arqs): """Method used to allocate networks in the background. Broken out for testing. """ # First check to see if we're specifically not supposed to allocate # networks because if so, we can exit early. if requested_networks and requested_networks.no_allocate: LOG.debug("Not allocating networking since 'none' was specified.", instance=instance) return network_model.NetworkInfo([]) LOG.debug("Allocating IP information in the background.", instance=instance) retries = CONF.network_allocate_retries attempts = retries + 1 retry_time = 1 bind_host_id = self.driver.network_binding_host_id(context, instance) for attempt in range(1, attempts + 1): try: nwinfo = self.network_api.allocate_for_instance( context, instance, requested_networks=requested_networks, security_groups=security_groups, bind_host_id=bind_host_id, resource_provider_mapping=resource_provider_mapping, network_arqs=network_arqs) LOG.debug('Instance network_info: |%s|', nwinfo, instance=instance) instance.system_metadata['network_allocated'] = 'True' # NOTE(JoshNang) do not save the instance here, as it can cause # races. The caller shares a reference to instance and waits # for this async greenthread to finish before calling # instance.save(). return nwinfo except Exception as e: log_info = {'attempt': attempt, 'attempts': attempts} if attempt == attempts: LOG.exception('Instance failed network setup ' 'after %(attempts)d attempt(s)', log_info) raise e LOG.warning('Instance failed network setup ' '(attempt %(attempt)d of %(attempts)d)', log_info, instance=instance) time.sleep(retry_time) retry_time *= 2 if retry_time > 30: retry_time = 30 # Not reached. def _build_networks_for_instance(self, context, instance, requested_networks, security_groups, resource_provider_mapping, network_arqs): # If we're here from a reschedule the network may already be allocated. if strutils.bool_from_string( instance.system_metadata.get('network_allocated', 'False')): # NOTE(alex_xu): The network_allocated is True means the network # resource already allocated at previous scheduling, and the # network setup is cleanup at previous. After rescheduling, the # network resource need setup on the new host. self.network_api.setup_instance_network_on_host( context, instance, instance.host) return self.network_api.get_instance_nw_info(context, instance) network_info = self._allocate_network(context, instance, requested_networks, security_groups, resource_provider_mapping, network_arqs) return network_info def _allocate_network(self, context, instance, requested_networks, security_groups, resource_provider_mapping, network_arqs): """Start network allocation asynchronously. Return an instance of NetworkInfoAsyncWrapper that can be used to retrieve the allocated networks when the operation has finished. """ # NOTE(comstud): Since we're allocating networks asynchronously, # this task state has little meaning, as we won't be in this # state for very long. instance.vm_state = vm_states.BUILDING instance.task_state = task_states.NETWORKING instance.save(expected_task_state=[None]) return network_model.NetworkInfoAsyncWrapper( self._allocate_network_async, context, instance, requested_networks, security_groups, resource_provider_mapping, network_arqs) def _default_root_device_name(self, instance, image_meta, root_bdm): """Gets a default root device name from the driver. :param nova.objects.Instance instance: The instance for which to get the root device name. :param nova.objects.ImageMeta image_meta: The metadata of the image of the instance. :param nova.objects.BlockDeviceMapping root_bdm: The description of the root device. :returns: str -- The default root device name. :raises: InternalError, TooManyDiskDevices """ try: return self.driver.default_root_device_name(instance, image_meta, root_bdm) except NotImplementedError: return compute_utils.get_next_device_name(instance, []) def _default_device_names_for_instance(self, instance, root_device_name, *block_device_lists): """Default the missing device names in the BDM from the driver. :param nova.objects.Instance instance: The instance for which to get default device names. :param str root_device_name: The root device name. :param list block_device_lists: List of block device mappings. :returns: None :raises: InternalError, TooManyDiskDevices """ try: self.driver.default_device_names_for_instance(instance, root_device_name, *block_device_lists) except NotImplementedError: compute_utils.default_device_names_for_instance( instance, root_device_name, *block_device_lists) def _get_device_name_for_instance(self, instance, bdms, block_device_obj): """Get the next device name from the driver, based on the BDM. :param nova.objects.Instance instance: The instance whose volume is requesting a device name. :param nova.objects.BlockDeviceMappingList bdms: The block device mappings for the instance. :param nova.objects.BlockDeviceMapping block_device_obj: A block device mapping containing info about the requested block device. :returns: The next device name. :raises: InternalError, TooManyDiskDevices """ # NOTE(ndipanov): Copy obj to avoid changing the original block_device_obj = block_device_obj.obj_clone() try: return self.driver.get_device_name_for_instance( instance, bdms, block_device_obj) except NotImplementedError: return compute_utils.get_device_name_for_instance( instance, bdms, block_device_obj.get("device_name")) def _default_block_device_names(self, instance, image_meta, block_devices): """Verify that all the devices have the device_name set. If not, provide a default name. It also ensures that there is a root_device_name and is set to the first block device in the boot sequence (boot_index=0). """ root_bdm = block_device.get_root_bdm(block_devices) if not root_bdm: return # Get the root_device_name from the root BDM or the instance root_device_name = None update_root_bdm = False if root_bdm.device_name: root_device_name = root_bdm.device_name instance.root_device_name = root_device_name elif instance.root_device_name: root_device_name = instance.root_device_name root_bdm.device_name = root_device_name update_root_bdm = True else: root_device_name = self._default_root_device_name(instance, image_meta, root_bdm) instance.root_device_name = root_device_name root_bdm.device_name = root_device_name update_root_bdm = True if update_root_bdm: root_bdm.save() ephemerals = [] swap = [] block_device_mapping = [] image = [] for device in block_devices: if block_device.new_format_is_ephemeral(device): ephemerals.append(device) if block_device.new_format_is_swap(device): swap.append(device) if driver_block_device.is_block_device_mapping(device): block_device_mapping.append(device) if driver_block_device.is_local_image(device): image.append(device) self._default_device_names_for_instance(instance, root_device_name, image, ephemerals, swap, block_device_mapping) def _add_missing_dev_names(self, bdms, instance): for bdm in bdms: if bdm.device_name is not None: continue device_name = self._get_device_name_for_instance(instance, bdms, bdm) values = {'device_name': device_name} bdm.update(values) bdm.save() def _prep_block_device(self, context, instance, bdms): """Set up the block device for an instance with error logging.""" try: self._add_missing_dev_names(bdms, instance) block_device_info = driver.get_block_device_info(instance, bdms) mapping = driver.block_device_info_get_mapping(block_device_info) driver_block_device.attach_block_devices( mapping, context, instance, self.volume_api, self.driver, wait_func=self._await_block_device_map_created) return block_device_info except exception.OverQuota as e: LOG.warning('Failed to create block device for instance due' ' to exceeding volume related resource quota.' ' Error: %s', e.message, instance=instance) raise except Exception as ex: LOG.exception('Instance failed block device setup', instance=instance) # InvalidBDM will eventually result in a BuildAbortException when # booting from volume, and will be recorded as an instance fault. # Maintain the original exception message which most likely has # useful details which the standard InvalidBDM error message lacks. raise exception.InvalidBDM(str(ex)) def _update_instance_after_spawn(self, instance, vm_state=vm_states.ACTIVE): instance.power_state = self._get_power_state(instance) instance.vm_state = vm_state instance.task_state = None # NOTE(sean-k-mooney): configdrive.update_instance checks # instance.launched_at to determine if it is the first or # subsequent spawn of an instance. We need to call update_instance # first before setting instance.launched_at or instance.config_drive # will never be set to true based on the value of force_config_drive. # As a result the config drive will be lost on a hard reboot of the # instance even when force_config_drive=true. see bug #1835822. configdrive.update_instance(instance) instance.launched_at = timeutils.utcnow() def _update_scheduler_instance_info(self, context, instance): """Sends an InstanceList with created or updated Instance objects to the Scheduler client. In the case of init_host, the value passed will already be an InstanceList. Other calls will send individual Instance objects that have been created or resized. In this case, we create an InstanceList object containing that Instance. """ if not self.send_instance_updates: return if isinstance(instance, obj_instance.Instance): instance = objects.InstanceList(objects=[instance]) context = context.elevated() self.query_client.update_instance_info(context, self.host, instance) def _delete_scheduler_instance_info(self, context, instance_uuid): """Sends the uuid of the deleted Instance to the Scheduler client.""" if not self.send_instance_updates: return context = context.elevated() self.query_client.delete_instance_info(context, self.host, instance_uuid) @periodic_task.periodic_task(spacing=CONF.scheduler_instance_sync_interval) def _sync_scheduler_instance_info(self, context): if not self.send_instance_updates: return context = context.elevated() instances = objects.InstanceList.get_by_host(context, self.host, expected_attrs=[], use_slave=True) uuids = [instance.uuid for instance in instances] self.query_client.sync_instance_info(context, self.host, uuids) def _notify_about_instance_usage(self, context, instance, event_suffix, network_info=None, extra_usage_info=None, fault=None, best_effort=False): compute_utils.notify_about_instance_usage( self.notifier, context, instance, event_suffix, network_info=network_info, extra_usage_info=extra_usage_info, fault=fault, best_effort=best_effort) def _deallocate_network(self, context, instance, requested_networks=None): # If we were told not to allocate networks let's save ourselves # the trouble of calling the network API. if requested_networks and requested_networks.no_allocate: LOG.debug("Skipping network deallocation for instance since " "networking was not requested.", instance=instance) return LOG.debug('Deallocating network for instance', instance=instance) with timeutils.StopWatch() as timer: self.network_api.deallocate_for_instance( context, instance, requested_networks=requested_networks) # nova-network does an rpc call so we're OK tracking time spent here LOG.info('Took %0.2f seconds to deallocate network for instance.', timer.elapsed(), instance=instance) def _get_instance_block_device_info(self, context, instance, refresh_conn_info=False, bdms=None): """Transform block devices to the driver block_device format.""" if bdms is None: bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) block_device_info = driver.get_block_device_info(instance, bdms) if not refresh_conn_info: # if the block_device_mapping has no value in connection_info # (returned as None), don't include in the mapping block_device_info['block_device_mapping'] = [ bdm for bdm in driver.block_device_info_get_mapping( block_device_info) if bdm.get('connection_info')] else: driver_block_device.refresh_conn_infos( driver.block_device_info_get_mapping(block_device_info), context, instance, self.volume_api, self.driver) return block_device_info def _build_failed(self, node): if CONF.compute.consecutive_build_service_disable_threshold: # NOTE(danms): Update our counter, but wait for the next # update_available_resource() periodic to flush it to the DB self.rt.build_failed(node) def _build_succeeded(self, node): self.rt.build_succeeded(node) # NOTE(gibi): The normal RPC handler decorators are here but please note # that build_and_run_instance immediately spawns and returns. So any # error from the real work is not propagated back to these decorators. # All those error needs to be handled by _locked_do_build_and_run_instance # and _do_build_and_run_instance @wrap_exception() @reverts_task_state @wrap_instance_fault def build_and_run_instance(self, context, instance, image, request_spec, filter_properties, accel_uuids, admin_password=None, injected_files=None, requested_networks=None, security_groups=None, block_device_mapping=None, node=None, limits=None, host_list=None): @utils.synchronized(instance.uuid) def _locked_do_build_and_run_instance(*args, **kwargs): # NOTE(danms): We grab the semaphore with the instance uuid # locked because we could wait in line to build this instance # for a while and we want to make sure that nothing else tries # to do anything with this instance while we wait. with self._build_semaphore: try: result = self._do_build_and_run_instance(*args, **kwargs) except Exception: # NOTE(mriedem): This should really only happen if # _decode_files in _do_build_and_run_instance fails, and # that's before a guest is spawned so it's OK to remove # allocations for the instance for this node from Placement # below as there is no guest consuming resources anyway. # The _decode_files case could be handled more specifically # but that's left for another day. result = build_results.FAILED raise finally: if result == build_results.FAILED: # Remove the allocation records from Placement for the # instance if the build failed. The instance.host is # likely set to None in _do_build_and_run_instance # which means if the user deletes the instance, it # will be deleted in the API, not the compute service. # Setting the instance.host to None in # _do_build_and_run_instance means that the # ResourceTracker will no longer consider this instance # to be claiming resources against it, so we want to # reflect that same thing in Placement. No need to # call this for a reschedule, as the allocations will # have already been removed in # self._do_build_and_run_instance(). self.reportclient.delete_allocation_for_instance( context, instance.uuid, force=True) if result in (build_results.FAILED_BY_POLICY, build_results.RESCHEDULED_BY_POLICY): return if result in (build_results.FAILED, build_results.RESCHEDULED): self._build_failed(node) else: self._build_succeeded(node) # NOTE(danms): We spawn here to return the RPC worker thread back to # the pool. Since what follows could take a really long time, we don't # want to tie up RPC workers. utils.spawn(_locked_do_build_and_run_instance, context, instance, image, request_spec, filter_properties, admin_password, injected_files, requested_networks, security_groups, block_device_mapping, node, limits, host_list, accel_uuids) def _check_device_tagging(self, requested_networks, block_device_mapping): tagging_requested = False if requested_networks: for net in requested_networks: if 'tag' in net and net.tag is not None: tagging_requested = True break if block_device_mapping and not tagging_requested: for bdm in block_device_mapping: if 'tag' in bdm and bdm.tag is not None: tagging_requested = True break if (tagging_requested and not self.driver.capabilities.get('supports_device_tagging', False)): raise exception.BuildAbortException('Attempt to boot guest with ' 'tagged devices on host that ' 'does not support tagging.') def _check_trusted_certs(self, instance): if (instance.trusted_certs and not self.driver.capabilities.get('supports_trusted_certs', False)): raise exception.BuildAbortException( 'Trusted image certificates provided on host that does not ' 'support certificate validation.') @wrap_exception() @reverts_task_state @wrap_instance_event(prefix='compute') @wrap_instance_fault def _do_build_and_run_instance(self, context, instance, image, request_spec, filter_properties, admin_password, injected_files, requested_networks, security_groups, block_device_mapping, node=None, limits=None, host_list=None, accel_uuids=None): try: LOG.debug('Starting instance...', instance=instance) instance.vm_state = vm_states.BUILDING instance.task_state = None instance.save(expected_task_state= (task_states.SCHEDULING, None)) except exception.InstanceNotFound: msg = 'Instance disappeared before build.' LOG.debug(msg, instance=instance) return build_results.FAILED except exception.UnexpectedTaskStateError as e: LOG.debug(e.format_message(), instance=instance) return build_results.FAILED # b64 decode the files to inject: decoded_files = self._decode_files(injected_files) if limits is None: limits = {} if node is None: node = self._get_nodename(instance, refresh=True) try: with timeutils.StopWatch() as timer: self._build_and_run_instance(context, instance, image, decoded_files, admin_password, requested_networks, security_groups, block_device_mapping, node, limits, filter_properties, request_spec, accel_uuids) LOG.info('Took %0.2f seconds to build instance.', timer.elapsed(), instance=instance) return build_results.ACTIVE except exception.RescheduledException as e: retry = filter_properties.get('retry') if not retry: # no retry information, do not reschedule. LOG.debug("Retry info not present, will not reschedule", instance=instance) self._cleanup_allocated_networks(context, instance, requested_networks) compute_utils.add_instance_fault_from_exc(context, instance, e, sys.exc_info(), fault_message=e.kwargs['reason']) self._nil_out_instance_obj_host_and_node(instance) self._set_instance_obj_error_state(instance, clean_task_state=True) if isinstance(e, exception.RescheduledByPolicyException): return build_results.FAILED_BY_POLICY return build_results.FAILED LOG.debug(e.format_message(), instance=instance) # This will be used for logging the exception retry['exc'] = traceback.format_exception(*sys.exc_info()) # This will be used for setting the instance fault message retry['exc_reason'] = e.kwargs['reason'] self._cleanup_allocated_networks(context, instance, requested_networks) self._nil_out_instance_obj_host_and_node(instance) instance.task_state = task_states.SCHEDULING instance.save() # The instance will have already claimed resources from this host # before this build was attempted. Now that it has failed, we need # to unclaim those resources before casting to the conductor, so # that if there are alternate hosts available for a retry, it can # claim resources on that new host for the instance. self.reportclient.delete_allocation_for_instance( context, instance.uuid, force=True) self.compute_task_api.build_instances(context, [instance], image, filter_properties, admin_password, injected_files, requested_networks, security_groups, block_device_mapping, request_spec=request_spec, host_lists=[host_list]) if isinstance(e, exception.RescheduledByPolicyException): return build_results.RESCHEDULED_BY_POLICY return build_results.RESCHEDULED except (exception.InstanceNotFound, exception.UnexpectedDeletingTaskStateError): msg = 'Instance disappeared during build.' LOG.debug(msg, instance=instance) self._cleanup_allocated_networks(context, instance, requested_networks) return build_results.FAILED except Exception as e: if isinstance(e, exception.BuildAbortException): LOG.error(e.format_message(), instance=instance) else: # Should not reach here. LOG.exception('Unexpected build failure, not rescheduling ' 'build.', instance=instance) self._cleanup_allocated_networks(context, instance, requested_networks) self._cleanup_volumes(context, instance, block_device_mapping, raise_exc=False) compute_utils.add_instance_fault_from_exc(context, instance, e, sys.exc_info()) self._nil_out_instance_obj_host_and_node(instance) self._set_instance_obj_error_state(instance, clean_task_state=True) return build_results.FAILED @staticmethod def _get_scheduler_hints(filter_properties, request_spec=None): """Helper method to get scheduler hints. This method prefers to get the hints out of the request spec, but that might not be provided. Conductor will pass request_spec down to the first compute chosen for a build but older computes will not pass the request_spec to conductor's build_instances method for a a reschedule, so if we're on a host via a retry, request_spec may not be provided so we need to fallback to use the filter_properties to get scheduler hints. """ hints = {} if request_spec is not None and 'scheduler_hints' in request_spec: hints = request_spec.scheduler_hints if not hints: hints = filter_properties.get('scheduler_hints') or {} return hints @staticmethod def _get_request_group_mapping(request_spec): """Return request group resource - provider mapping. This is currently used for Neutron ports that have resource request due to the port having QoS minimum bandwidth policy rule attached. :param request_spec: A RequestSpec object or None :returns: A dict keyed by RequestGroup requester_id, currently Neutron port_id, to resource provider UUID that provides resource for that RequestGroup. Or None if the request_spec was None. """ # TODO(sbauza): Remove this conditional once we only support # RPC API 6.0 if request_spec: return request_spec.get_request_group_mapping() else: return None def _build_and_run_instance(self, context, instance, image, injected_files, admin_password, requested_networks, security_groups, block_device_mapping, node, limits, filter_properties, request_spec=None, accel_uuids=None): image_name = image.get('name') self._notify_about_instance_usage(context, instance, 'create.start', extra_usage_info={'image_name': image_name}) compute_utils.notify_about_instance_create( context, instance, self.host, phase=fields.NotificationPhase.START, bdms=block_device_mapping) # NOTE(mikal): cache the keystone roles associated with the instance # at boot time for later reference instance.system_metadata.update( {'boot_roles': ','.join(context.roles)}) self._check_device_tagging(requested_networks, block_device_mapping) self._check_trusted_certs(instance) provider_mapping = self._get_request_group_mapping(request_spec) if provider_mapping: try: compute_utils.update_pci_request_with_placement_allocations( context, self.reportclient, instance.pci_requests.requests, provider_mapping, ) except (exception.AmbiguousResourceProviderForPCIRequest, exception.UnexpectedResourceProviderNameForPCIRequest ) as e: raise exception.BuildAbortException( reason=str(e), instance_uuid=instance.uuid) # TODO(Luyao) cut over to get_allocs_for_consumer allocs = self.reportclient.get_allocations_for_consumer( context, instance.uuid) try: scheduler_hints = self._get_scheduler_hints(filter_properties, request_spec) with self.rt.instance_claim(context, instance, node, allocs, limits): # NOTE(russellb) It's important that this validation be done # *after* the resource tracker instance claim, as that is where # the host is set on the instance. self._validate_instance_group_policy(context, instance, scheduler_hints) image_meta = objects.ImageMeta.from_dict(image) with self._build_resources(context, instance, requested_networks, security_groups, image_meta, block_device_mapping, provider_mapping, accel_uuids) as resources: instance.vm_state = vm_states.BUILDING instance.task_state = task_states.SPAWNING # NOTE(JoshNang) This also saves the changes to the # instance from _allocate_network_async, as they aren't # saved in that function to prevent races. instance.save(expected_task_state= task_states.BLOCK_DEVICE_MAPPING) block_device_info = resources['block_device_info'] network_info = resources['network_info'] accel_info = resources['accel_info'] LOG.debug('Start spawning the instance on the hypervisor.', instance=instance) with timeutils.StopWatch() as timer: self.driver.spawn(context, instance, image_meta, injected_files, admin_password, allocs, network_info=network_info, block_device_info=block_device_info, accel_info=accel_info) LOG.info('Took %0.2f seconds to spawn the instance on ' 'the hypervisor.', timer.elapsed(), instance=instance) except (exception.InstanceNotFound, exception.UnexpectedDeletingTaskStateError) as e: with excutils.save_and_reraise_exception(): self._notify_about_instance_usage(context, instance, 'create.error', fault=e) compute_utils.notify_about_instance_create( context, instance, self.host, phase=fields.NotificationPhase.ERROR, exception=e, bdms=block_device_mapping) except exception.ComputeResourcesUnavailable as e: LOG.debug(e.format_message(), instance=instance) self._notify_about_instance_usage(context, instance, 'create.error', fault=e) compute_utils.notify_about_instance_create( context, instance, self.host, phase=fields.NotificationPhase.ERROR, exception=e, bdms=block_device_mapping) raise exception.RescheduledException( instance_uuid=instance.uuid, reason=e.format_message()) except exception.BuildAbortException as e: with excutils.save_and_reraise_exception(): LOG.debug(e.format_message(), instance=instance) self._notify_about_instance_usage(context, instance, 'create.error', fault=e) compute_utils.notify_about_instance_create( context, instance, self.host, phase=fields.NotificationPhase.ERROR, exception=e, bdms=block_device_mapping) except exception.NoMoreFixedIps as e: LOG.warning('No more fixed IP to be allocated', instance=instance) self._notify_about_instance_usage(context, instance, 'create.error', fault=e) compute_utils.notify_about_instance_create( context, instance, self.host, phase=fields.NotificationPhase.ERROR, exception=e, bdms=block_device_mapping) msg = _('Failed to allocate the network(s) with error %s, ' 'not rescheduling.') % e.format_message() raise exception.BuildAbortException(instance_uuid=instance.uuid, reason=msg) except (exception.ExternalNetworkAttachForbidden, exception.VirtualInterfaceCreateException, exception.VirtualInterfaceMacAddressException, exception.FixedIpInvalidOnHost, exception.UnableToAutoAllocateNetwork, exception.NetworksWithQoSPolicyNotSupported) as e: LOG.exception('Failed to allocate network(s)', instance=instance) self._notify_about_instance_usage(context, instance, 'create.error', fault=e) compute_utils.notify_about_instance_create( context, instance, self.host, phase=fields.NotificationPhase.ERROR, exception=e, bdms=block_device_mapping) msg = _('Failed to allocate the network(s), not rescheduling.') raise exception.BuildAbortException(instance_uuid=instance.uuid, reason=msg) except (exception.FlavorDiskTooSmall, exception.FlavorMemoryTooSmall, exception.ImageNotActive, exception.ImageUnacceptable, exception.InvalidDiskInfo, exception.InvalidDiskFormat, cursive_exception.SignatureVerificationError, exception.CertificateValidationFailed, exception.VolumeEncryptionNotSupported, exception.InvalidInput, # TODO(mriedem): We should be validating RequestedVRamTooHigh # in the API during server create and rebuild. exception.RequestedVRamTooHigh) as e: self._notify_about_instance_usage(context, instance, 'create.error', fault=e) compute_utils.notify_about_instance_create( context, instance, self.host, phase=fields.NotificationPhase.ERROR, exception=e, bdms=block_device_mapping) raise exception.BuildAbortException(instance_uuid=instance.uuid, reason=e.format_message()) except exception.GroupAffinityViolation as e: LOG.exception('Failed to build and run instance', instance=instance) self._notify_about_instance_usage(context, instance, 'create.error', fault=e) compute_utils.notify_about_instance_create( context, instance, self.host, phase=fields.NotificationPhase.ERROR, exception=e, bdms=block_device_mapping) raise exception.RescheduledByPolicyException( instance_uuid=instance.uuid, reason=str(e)) except Exception as e: LOG.exception('Failed to build and run instance', instance=instance) self._notify_about_instance_usage(context, instance, 'create.error', fault=e) compute_utils.notify_about_instance_create( context, instance, self.host, phase=fields.NotificationPhase.ERROR, exception=e, bdms=block_device_mapping) raise exception.RescheduledException( instance_uuid=instance.uuid, reason=str(e)) # NOTE(alaski): This is only useful during reschedules, remove it now. instance.system_metadata.pop('network_allocated', None) # If CONF.default_access_ip_network_name is set, grab the # corresponding network and set the access ip values accordingly. network_name = CONF.default_access_ip_network_name if (network_name and not instance.access_ip_v4 and not instance.access_ip_v6): # Note that when there are multiple ips to choose from, an # arbitrary one will be chosen. for vif in network_info: if vif['network']['label'] == network_name: for ip in vif.fixed_ips(): if not instance.access_ip_v4 and ip['version'] == 4: instance.access_ip_v4 = ip['address'] if not instance.access_ip_v6 and ip['version'] == 6: instance.access_ip_v6 = ip['address'] break self._update_instance_after_spawn(instance) try: instance.save(expected_task_state=task_states.SPAWNING) except (exception.InstanceNotFound, exception.UnexpectedDeletingTaskStateError) as e: with excutils.save_and_reraise_exception(): self._notify_about_instance_usage(context, instance, 'create.error', fault=e) compute_utils.notify_about_instance_create( context, instance, self.host, phase=fields.NotificationPhase.ERROR, exception=e, bdms=block_device_mapping) self._update_scheduler_instance_info(context, instance) self._notify_about_instance_usage(context, instance, 'create.end', extra_usage_info={'message': _('Success')}, network_info=network_info) compute_utils.notify_about_instance_create(context, instance, self.host, phase=fields.NotificationPhase.END, bdms=block_device_mapping) def _build_resources_cleanup(self, instance, network_info): # Make sure the async call finishes if network_info is not None: network_info.wait(do_raise=False) self.driver.clean_networks_preparation(instance, network_info) self.driver.failed_spawn_cleanup(instance) @contextlib.contextmanager def _build_resources(self, context, instance, requested_networks, security_groups, image_meta, block_device_mapping, resource_provider_mapping, accel_uuids): resources = {} network_info = None spec_arqs = {} network_arqs = {} try: if accel_uuids: arqs = self._get_bound_arq_resources( context, instance, accel_uuids) spec_arqs, network_arqs = self._split_network_arqs( arqs, requested_networks) LOG.debug("ARQs for spec:%s, ARQs for network:%s", spec_arqs, network_arqs) except (Exception, exception.InstanceEventTimeout) as exc: LOG.exception(exc) # ARQs created for instance or ports. # The port binding isn't done yet. # Unbind port won't clean port ARQs. compute_utils.delete_arqs_if_needed( context, instance, accel_uuids) msg = _('Failure getting accelerator requests.') raise exception.BuildAbortException( reason=msg, instance_uuid=instance.uuid) try: LOG.debug('Start building networks asynchronously for instance.', instance=instance) network_info = self._build_networks_for_instance(context, instance, requested_networks, security_groups, resource_provider_mapping, network_arqs) resources['network_info'] = network_info except (exception.InstanceNotFound, exception.UnexpectedDeletingTaskStateError): raise except exception.UnexpectedTaskStateError as e: raise exception.BuildAbortException(instance_uuid=instance.uuid, reason=e.format_message()) except Exception: # Because this allocation is async any failures are likely to occur # when the driver accesses network_info during spawn(). LOG.exception('Failed to allocate network(s)', instance=instance) msg = _('Failed to allocate the network(s), not rescheduling.') raise exception.BuildAbortException(instance_uuid=instance.uuid, reason=msg) try: # Perform any driver preparation work for the driver. self.driver.prepare_for_spawn(instance) # Depending on a virt driver, some network configuration is # necessary before preparing block devices. self.driver.prepare_networks_before_block_device_mapping( instance, network_info) # Verify that all the BDMs have a device_name set and assign a # default to the ones missing it with the help of the driver. self._default_block_device_names(instance, image_meta, block_device_mapping) LOG.debug('Start building block device mappings for instance.', instance=instance) instance.vm_state = vm_states.BUILDING instance.task_state = task_states.BLOCK_DEVICE_MAPPING instance.save() block_device_info = self._prep_block_device(context, instance, block_device_mapping) resources['block_device_info'] = block_device_info except (exception.InstanceNotFound, exception.UnexpectedDeletingTaskStateError, exception.ComputeResourcesUnavailable): with excutils.save_and_reraise_exception(): self._build_resources_cleanup(instance, network_info) except (exception.UnexpectedTaskStateError, exception.InstanceUnacceptable, exception.OverQuota, exception.InvalidBDM) as e: self._build_resources_cleanup(instance, network_info) raise exception.BuildAbortException(instance_uuid=instance.uuid, reason=e.format_message()) except Exception: LOG.exception('Failure prepping block device', instance=instance) self._build_resources_cleanup(instance, network_info) msg = _('Failure prepping block device.') raise exception.BuildAbortException(instance_uuid=instance.uuid, reason=msg) resources['accel_info'] = list(spec_arqs.values()) try: yield resources except Exception as exc: with excutils.save_and_reraise_exception() as ctxt: if not isinstance(exc, ( exception.InstanceNotFound, exception.UnexpectedDeletingTaskStateError)): LOG.exception('Instance failed to spawn', instance=instance) # Make sure the async call finishes if network_info is not None: network_info.wait(do_raise=False) # if network_info is empty we're likely here because of # network allocation failure. Since nothing can be reused on # rescheduling it's better to deallocate network to eliminate # the chance of orphaned ports in neutron deallocate_networks = False if network_info else True try: self._shutdown_instance(context, instance, block_device_mapping, requested_networks, try_deallocate_networks=deallocate_networks) except Exception as exc2: ctxt.reraise = False LOG.warning('Could not clean up failed build,' ' not rescheduling. Error: %s', str(exc2)) raise exception.BuildAbortException( instance_uuid=instance.uuid, reason=str(exc)) finally: # Call Cyborg to delete accelerator requests if accel_uuids: # ARQs created for instance or ports. # Port bind may not successful. # unbind port won't clean port ARQs. compute_utils.delete_arqs_if_needed( context, instance, accel_uuids) def _get_bound_arq_resources(self, context, instance, arq_uuids): """Get bound accelerator requests. The ARQ binding was kicked off in the conductor as an async operation. Here we wait for the notification from Cyborg. If the notification arrived before this point, which can happen in many/most cases (see [1]), it will be lost. To handle that, we use exit_wait_early. [1] https://review.opendev.org/#/c/631244/46/nova/compute/ manager.py@2627 :param instance: instance object :param arq_uuids: List of accelerator request (ARQ) UUIDs. :returns: List of ARQs for which bindings have completed, successfully or otherwise """ cyclient = cyborg.get_client(context) if arq_uuids is None: arqs = cyclient.get_arqs_for_instance(instance.uuid) arq_uuids = [arq['uuid'] for arq in arqs] events = [('accelerator-request-bound', arq_uuid) for arq_uuid in arq_uuids] timeout = CONF.arq_binding_timeout with self.virtapi.wait_for_instance_event( instance, events, deadline=timeout): resolved_arqs = cyclient.get_arqs_for_instance( instance.uuid, only_resolved=True) # Events for these resolved ARQs may have already arrived. # Such 'early' events need to be ignored. early_events = [('accelerator-request-bound', arq['uuid']) for arq in resolved_arqs] if early_events: self.virtapi.exit_wait_early(early_events) # Since a timeout in wait_for_instance_event will raise, we get # here only if all binding events have been received. resolved_uuids = [arq['uuid'] for arq in resolved_arqs] if sorted(resolved_uuids) != sorted(arq_uuids): # Query Cyborg to get all. arqs = cyclient.get_arqs_for_instance(instance.uuid) else: arqs = resolved_arqs return arqs def _split_network_arqs(self, arqs, requested_networks): """split arq request by extra spec from ARQ requested by port. Return ARQ groups tuple:(spec_arqs, port_arqs) Each item in the tuple is a dict like: { arq1_uuid: arq1 } """ port_arqs = {} spec_arqs = {} port_arqs_uuids = [req_net.arq_uuid for req_net in requested_networks] for arq in arqs: if arq['uuid'] in port_arqs_uuids: port_arqs.update({arq['uuid']: arq}) else: spec_arqs.update({arq['uuid']: arq}) return spec_arqs, port_arqs def _cleanup_allocated_networks(self, context, instance, requested_networks): """Cleanup networks allocated for instance. :param context: nova request context :param instance: nova.objects.instance.Instance object :param requested_networks: nova.objects.NetworkRequestList """ LOG.debug('Unplugging VIFs for instance', instance=instance) network_info = instance.get_network_info() # NOTE(stephenfin) to avoid nova destroying the instance without # unplugging the interface, refresh network_info if it is empty. if not network_info: try: network_info = self.network_api.get_instance_nw_info( context, instance, ) except Exception as exc: LOG.warning( 'Failed to update network info cache when cleaning up ' 'allocated networks. Stale VIFs may be left on this host.' 'Error: %s', str(exc) ) return try: self.driver.unplug_vifs(instance, network_info) except NotImplementedError: # This is an optional method so ignore things if it doesn't exist LOG.debug( 'Virt driver does not provide unplug_vifs method, so it ' 'is not possible determine if VIFs should be unplugged.' ) except Exception as exc: # It's possible that the instance never got as far as plugging # VIFs, in which case we would see an exception which can be # mostly ignored LOG.warning( 'Cleaning up VIFs failed for instance. Error: %s', str(exc), instance=instance, ) else: LOG.debug('Unplugged VIFs for instance', instance=instance) try: self._deallocate_network(context, instance, requested_networks) except Exception: LOG.exception('Failed to deallocate networks', instance=instance) return instance.system_metadata['network_allocated'] = 'False' try: instance.save() except exception.InstanceNotFound: # NOTE(alaski): It's possible that we're cleaning up the networks # because the instance was deleted. If that's the case then this # exception will be raised by instance.save() pass def _try_deallocate_network(self, context, instance, requested_networks=None): # During auto-scale cleanup, we could be deleting a large number # of servers at the same time and overloading parts of the system, # so we retry a few times in case of connection failures to the # networking service. @loopingcall.RetryDecorator( max_retry_count=3, inc_sleep_time=2, max_sleep_time=12, exceptions=(keystone_exception.connection.ConnectFailure,)) def _deallocate_network_with_retries(): try: self._deallocate_network( context, instance, requested_networks) except keystone_exception.connection.ConnectFailure as e: # Provide a warning that something is amiss. with excutils.save_and_reraise_exception(): LOG.warning('Failed to deallocate network for instance; ' 'retrying. Error: %s', str(e), instance=instance) try: # tear down allocated network structure _deallocate_network_with_retries() except Exception as ex: with excutils.save_and_reraise_exception(): LOG.error('Failed to deallocate network for instance. ' 'Error: %s', ex, instance=instance) self._set_instance_obj_error_state(instance) def _get_power_off_values(self, instance, clean_shutdown): """Get the timing configuration for powering down this instance.""" if clean_shutdown: timeout = compute_utils.get_value_from_system_metadata(instance, key='image_os_shutdown_timeout', type=int, default=CONF.shutdown_timeout) retry_interval = CONF.compute.shutdown_retry_interval else: timeout = 0 retry_interval = 0 return timeout, retry_interval def _power_off_instance(self, context, instance, clean_shutdown=True): """Power off an instance on this host.""" share_info = self._get_share_info(context, instance) timeout, retry_interval = self._get_power_off_values( instance, clean_shutdown) self.driver.power_off(instance, timeout, retry_interval) share_info.deactivate_all() self._umount_all_shares(context, instance, share_info) def _shutdown_instance(self, context, instance, bdms, requested_networks=None, notify=True, try_deallocate_networks=True): """Shutdown an instance on this host. :param:context: security context :param:instance: a nova.objects.Instance object :param:bdms: the block devices for the instance to be torn down :param:requested_networks: the networks on which the instance has ports :param:notify: true if a final usage notification should be emitted :param:try_deallocate_networks: false if we should avoid trying to teardown networking """ context = context.elevated() LOG.info('Terminating instance', instance=instance) if notify: self._notify_about_instance_usage(context, instance, "shutdown.start") compute_utils.notify_about_instance_action(context, instance, self.host, action=fields.NotificationAction.SHUTDOWN, phase=fields.NotificationPhase.START, bdms=bdms) network_info = instance.get_network_info() share_info = self._get_share_info( context, instance, check_status=False ) # NOTE(arnaudmorin) to avoid nova destroying the instance without # unplugging the interface, refresh network_info if it is empty. if not network_info: network_info = self.network_api.get_instance_nw_info( context, instance) # NOTE(vish) get bdms before destroying the instance vol_bdms = [bdm for bdm in bdms if bdm.is_volume] block_device_info = self._get_instance_block_device_info( context, instance, bdms=bdms) # NOTE(melwitt): attempt driver destroy before releasing ip, may # want to keep ip allocated for certain failures try: LOG.debug('Start destroying the instance on the hypervisor.', instance=instance) with timeutils.StopWatch() as timer: self.driver.destroy(context, instance, network_info, block_device_info) LOG.info('Took %0.2f seconds to destroy the instance on the ' 'hypervisor.', timer.elapsed(), instance=instance) except exception.InstancePowerOffFailure: # if the instance can't power off, don't release the ip with excutils.save_and_reraise_exception(): pass except Exception: with excutils.save_and_reraise_exception(): # deallocate ip and fail without proceeding to # volume api calls, preserving current behavior if try_deallocate_networks: self._try_deallocate_network(context, instance, requested_networks) if try_deallocate_networks: self._try_deallocate_network(context, instance, requested_networks) timer.restart() for share in share_info: # If we fail umounting or denying the share we may have a # dangling share_mapping in the DB (share_mapping entry with an # instance that does not exist anymore). try: self._umount_share(context, instance, share) share.deactivate() self.deny_share(context, instance, share) except ( exception.ShareUmountError, exception.ShareNotFound, exception.ShareAccessNotFound, exception.ShareAccessRemovalError, ): LOG.warning("An error occurred while unmounting or " "denying the share '%s'. This error is ignored to " "proceed with instance removal. " "Consequently, there may be a dangling " "share_mapping.", share.share_id) connector = None for bdm in vol_bdms: try: if bdm.attachment_id: self.volume_api.attachment_delete(context, bdm.attachment_id) else: # NOTE(vish): actual driver detach done in driver.destroy, # so just tell cinder that we are done with it. if connector is None: connector = self.driver.get_volume_connector(instance) self.volume_api.terminate_connection(context, bdm.volume_id, connector) self.volume_api.detach(context, bdm.volume_id, instance.uuid) except exception.VolumeAttachmentNotFound as exc: LOG.debug('Ignoring VolumeAttachmentNotFound: %s', exc, instance=instance) except exception.DiskNotFound as exc: LOG.debug('Ignoring DiskNotFound: %s', exc, instance=instance) except exception.VolumeNotFound as exc: LOG.debug('Ignoring VolumeNotFound: %s', exc, instance=instance) except (cinder_exception.EndpointNotFound, keystone_exception.EndpointNotFound) as exc: LOG.warning('Ignoring EndpointNotFound for ' 'volume %(volume_id)s: %(exc)s', {'exc': exc, 'volume_id': bdm.volume_id}, instance=instance) except cinder_exception.ClientException as exc: LOG.warning('Ignoring unknown cinder exception for ' 'volume %(volume_id)s: %(exc)s', {'exc': exc, 'volume_id': bdm.volume_id}, instance=instance) except Exception as exc: LOG.warning('Ignoring unknown exception for ' 'volume %(volume_id)s: %(exc)s', {'exc': exc, 'volume_id': bdm.volume_id}, instance=instance) if vol_bdms: LOG.info('Took %(time).2f seconds to detach %(num)s volumes ' 'for instance.', {'time': timer.elapsed(), 'num': len(vol_bdms)}, instance=instance) if notify: self._notify_about_instance_usage(context, instance, "shutdown.end") compute_utils.notify_about_instance_action(context, instance, self.host, action=fields.NotificationAction.SHUTDOWN, phase=fields.NotificationPhase.END, bdms=bdms) def _cleanup_volumes(self, context, instance, bdms, raise_exc=True, detach=True): original_exception = None for bdm in bdms: if detach and bdm.volume_id: try: LOG.debug("Detaching volume: %s", bdm.volume_id, instance_uuid=instance.uuid) destroy = bdm.delete_on_termination self._detach_volume(context, bdm, instance, destroy_bdm=destroy) except Exception as exc: original_exception = exc LOG.warning('Failed to detach volume: %(volume_id)s ' 'due to %(exc)s', {'volume_id': bdm.volume_id, 'exc': exc}) if bdm.volume_id and bdm.delete_on_termination: try: LOG.debug("Deleting volume: %s", bdm.volume_id, instance_uuid=instance.uuid) self.volume_api.delete(context, bdm.volume_id) except Exception as exc: original_exception = exc LOG.warning('Failed to delete volume: %(volume_id)s ' 'due to %(exc)s', {'volume_id': bdm.volume_id, 'exc': exc}) if original_exception is not None and raise_exc: raise original_exception def _delete_instance(self, context, instance, bdms): """Delete an instance on this host. :param context: nova request context :param instance: nova.objects.instance.Instance object :param bdms: nova.objects.block_device.BlockDeviceMappingList object """ events = self.instance_events.clear_events_for_instance(instance) if events: LOG.debug('Events pending at deletion: %(events)s', {'events': ','.join(events.keys())}, instance=instance) self._notify_about_instance_usage(context, instance, "delete.start") compute_utils.notify_about_instance_action(context, instance, self.host, action=fields.NotificationAction.DELETE, phase=fields.NotificationPhase.START, bdms=bdms) self._shutdown_instance(context, instance, bdms) # NOTE(vish): We have already deleted the instance, so we have # to ignore problems cleaning up the volumes. It # would be nice to let the user know somehow that # the volume deletion failed, but it is not # acceptable to have an instance that can not be # deleted. Perhaps this could be reworked in the # future to set an instance fault the first time # and to only ignore the failure if the instance # is already in ERROR. # NOTE(ameeda): The volumes have already been detached during # the above _shutdown_instance() call and this is # why detach is not requested from # _cleanup_volumes() in this case self._cleanup_volumes(context, instance, bdms, raise_exc=False, detach=False) # Delete Cyborg ARQs if the instance has a device profile. compute_utils.delete_arqs_if_needed(context, instance) # if a delete task succeeded, always update vm state and task # state without expecting task state to be DELETING instance.vm_state = vm_states.DELETED instance.task_state = None instance.power_state = power_state.NOSTATE instance.terminated_at = timeutils.utcnow() instance.save() self._complete_deletion(context, instance) # only destroy the instance in the db if the _complete_deletion # doesn't raise and therefore allocation is successfully # deleted in placement instance.destroy() self._notify_about_instance_usage(context, instance, "delete.end") compute_utils.notify_about_instance_action(context, instance, self.host, action=fields.NotificationAction.DELETE, phase=fields.NotificationPhase.END, bdms=bdms) @wrap_exception() @reverts_task_state @wrap_instance_event(prefix='compute') @wrap_instance_fault def terminate_instance(self, context, instance, bdms): """Terminate an instance on this host.""" @utils.synchronized(instance.uuid) def do_terminate_instance(instance, bdms): # NOTE(mriedem): If we are deleting the instance while it was # booting from volume, we could be racing with a database update of # the BDM volume_id. Since the compute API passes the BDMs over RPC # to compute here, the BDMs may be stale at this point. So check # for any volume BDMs that don't have volume_id set and if we # detect that, we need to refresh the BDM list before proceeding. # TODO(mriedem): Move this into _delete_instance and make the bdms # parameter optional. for bdm in list(bdms): if bdm.is_volume and not bdm.volume_id: LOG.debug('There are potentially stale BDMs during ' 'delete, refreshing the BlockDeviceMappingList.', instance=instance) bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) break try: self._delete_instance(context, instance, bdms) except exception.InstanceNotFound: LOG.info("Instance disappeared during terminate", instance=instance) except Exception: # As we're trying to delete always go to Error if something # goes wrong that _delete_instance can't handle. with excutils.save_and_reraise_exception(): LOG.exception('Setting instance vm_state to ERROR', instance=instance) self._set_instance_obj_error_state(instance) do_terminate_instance(instance, bdms) # NOTE(johannes): This is probably better named power_off_instance # so it matches the driver method, but because of other issues, we # can't use that name in grizzly. @wrap_exception() @reverts_task_state @wrap_instance_event(prefix='compute') @wrap_instance_fault def stop_instance(self, context, instance, clean_shutdown): """Stopping an instance on this host.""" @utils.synchronized(instance.uuid) def do_stop_instance(): current_power_state = self._get_power_state(instance) LOG.debug('Stopping instance; current vm_state: %(vm_state)s, ' 'current task_state: %(task_state)s, current DB ' 'power_state: %(db_power_state)s, current VM ' 'power_state: %(current_power_state)s', {'vm_state': instance.vm_state, 'task_state': instance.task_state, 'db_power_state': instance.power_state, 'current_power_state': current_power_state}, instance_uuid=instance.uuid) # NOTE(mriedem): If the instance is already powered off, we are # possibly tearing down and racing with other operations, so we can # expect the task_state to be None if something else updates the # instance and we're not locking it. expected_task_state = [task_states.POWERING_OFF] # The list of power states is from _sync_instance_power_state. if current_power_state in (power_state.NOSTATE, power_state.SHUTDOWN, power_state.CRASHED): LOG.info('Instance is already powered off in the ' 'hypervisor when stop is called.', instance=instance) expected_task_state.append(None) self._notify_about_instance_usage(context, instance, "power_off.start") compute_utils.notify_about_instance_action(context, instance, self.host, action=fields.NotificationAction.POWER_OFF, phase=fields.NotificationPhase.START) self._power_off_instance(context, instance, clean_shutdown) instance.power_state = self._get_power_state(instance) instance.vm_state = vm_states.STOPPED instance.task_state = None instance.save(expected_task_state=expected_task_state) self._notify_about_instance_usage(context, instance, "power_off.end") compute_utils.notify_about_instance_action(context, instance, self.host, action=fields.NotificationAction.POWER_OFF, phase=fields.NotificationPhase.END) do_stop_instance() def _power_on(self, context, instance): network_info = self.network_api.get_instance_nw_info(context, instance) block_device_info = self._get_instance_block_device_info(context, instance) accel_info = self._get_accel_info(context, instance) share_info = self._get_share_info(context, instance) self._mount_all_shares(context, instance, share_info) self.driver.power_on(context, instance, network_info, block_device_info, accel_info, share_info) share_info.activate_all() def _delete_snapshot_of_shelved_instance(self, context, instance, snapshot_id): """Delete snapshot of shelved instance.""" try: self.image_api.delete(context, snapshot_id) except (exception.ImageNotFound, exception.ImageNotAuthorized) as exc: LOG.warning("Failed to delete snapshot " "from shelved instance (%s).", exc.format_message(), instance=instance) except Exception: LOG.exception("Something wrong happened when trying to " "delete snapshot from shelved instance.", instance=instance) # NOTE(johannes): This is probably better named power_on_instance # so it matches the driver method, but because of other issues, we # can't use that name in grizzly. @wrap_exception() @reverts_task_state @wrap_instance_event(prefix='compute') @wrap_instance_fault def start_instance(self, context, instance): """Starting an instance on this host.""" self._notify_about_instance_usage(context, instance, "power_on.start") compute_utils.notify_about_instance_action(context, instance, self.host, action=fields.NotificationAction.POWER_ON, phase=fields.NotificationPhase.START) self._power_on(context, instance) instance.power_state = self._get_power_state(instance) instance.vm_state = vm_states.ACTIVE instance.task_state = None # Delete an image(VM snapshot) for a shelved instance snapshot_id = instance.system_metadata.get('shelved_image_id') if snapshot_id: self._delete_snapshot_of_shelved_instance(context, instance, snapshot_id) # Delete system_metadata for a shelved instance compute_utils.remove_shelved_keys_from_system_metadata(instance) instance.save(expected_task_state=task_states.POWERING_ON) self._notify_about_instance_usage(context, instance, "power_on.end") compute_utils.notify_about_instance_action(context, instance, self.host, action=fields.NotificationAction.POWER_ON, phase=fields.NotificationPhase.END) @messaging.expected_exceptions(NotImplementedError, exception.TriggerCrashDumpNotSupported, exception.InstanceNotRunning) @wrap_exception() @wrap_instance_event(prefix='compute') @wrap_instance_fault def trigger_crash_dump(self, context, instance): """Trigger crash dump in an instance.""" self._notify_about_instance_usage(context, instance, "trigger_crash_dump.start") compute_utils.notify_about_instance_action(context, instance, self.host, action=fields.NotificationAction.TRIGGER_CRASH_DUMP, phase=fields.NotificationPhase.START) # This method does not change task_state and power_state because the # effect of a trigger depends on user's configuration. self.driver.trigger_crash_dump(instance) self._notify_about_instance_usage(context, instance, "trigger_crash_dump.end") compute_utils.notify_about_instance_action(context, instance, self.host, action=fields.NotificationAction.TRIGGER_CRASH_DUMP, phase=fields.NotificationPhase.END) @wrap_exception() @reverts_task_state @wrap_instance_event(prefix='compute') @wrap_instance_fault def soft_delete_instance(self, context, instance): """Soft delete an instance on this host.""" with compute_utils.notify_about_instance_delete( self.notifier, context, instance, 'soft_delete', source=fields.NotificationSource.COMPUTE): try: self.driver.soft_delete(instance) except NotImplementedError: # Fallback to just powering off the instance if the # hypervisor doesn't implement the soft_delete method self._power_off_instance( context, instance, clean_shutdown=False ) instance.power_state = self._get_power_state(instance) instance.vm_state = vm_states.SOFT_DELETED instance.task_state = None instance.save(expected_task_state=[task_states.SOFT_DELETING]) @wrap_exception() @reverts_task_state @wrap_instance_event(prefix='compute') @wrap_instance_fault def restore_instance(self, context, instance): """Restore a soft-deleted instance on this host.""" self._notify_about_instance_usage(context, instance, "restore.start") compute_utils.notify_about_instance_action(context, instance, self.host, action=fields.NotificationAction.RESTORE, phase=fields.NotificationPhase.START) try: self.driver.restore(instance) except NotImplementedError: # Fallback to just powering on the instance if the hypervisor # doesn't implement the restore method self._power_on(context, instance) instance.power_state = self._get_power_state(instance) instance.vm_state = vm_states.ACTIVE instance.task_state = None instance.save(expected_task_state=task_states.RESTORING) self._notify_about_instance_usage(context, instance, "restore.end") compute_utils.notify_about_instance_action(context, instance, self.host, action=fields.NotificationAction.RESTORE, phase=fields.NotificationPhase.END) @staticmethod def _set_migration_status(migration, status): """Set the status, and guard against a None being passed in. This is useful as some of the compute RPC calls will not pass a migration object in older versions. The check can be removed when we move past 4.x major version of the RPC API. """ if migration: migration.status = status migration.save() @staticmethod def _reimage_failed_callback(event_name, instance): msg = ('Cinder reported failure during reimaging ' 'with %(event)s for instance %(uuid)s') msg_args = {'event': event_name, 'uuid': instance.uuid} LOG.error(msg, msg_args) raise exception.ReimageException(msg % msg_args) def _detach_root_volume(self, context, instance, root_bdm): volume_id = root_bdm.volume_id mp = root_bdm.device_name old_connection_info = jsonutils.loads(root_bdm.connection_info) try: self.driver.detach_volume(context, old_connection_info, instance, root_bdm.device_name) except exception.DiskNotFound as err: LOG.warning('Ignoring DiskNotFound exception while ' 'detaching volume %(volume_id)s from ' '%(mp)s : %(err)s', {'volume_id': volume_id, 'mp': mp, 'err': err}, instance=instance) except exception.DeviceDetachFailed: with excutils.save_and_reraise_exception(): LOG.warning('Guest refused to detach volume %(vol)s', {'vol': volume_id}, instance=instance) self.volume_api.roll_detaching(context, volume_id) except Exception: with excutils.save_and_reraise_exception(): LOG.exception('Failed to detach volume ' '%(volume_id)s from %(mp)s', {'volume_id': volume_id, 'mp': mp}, instance=instance) self.volume_api.roll_detaching(context, volume_id) def _rebuild_volume_backed_instance(self, context, instance, bdms, image_id): # Get root bdm and attachment ID associated to it root_bdm = compute_utils.get_root_bdm(context, instance, bdms) old_attachment_id = root_bdm.attachment_id # Create a new attachment and delete the previous attachment # We create a new attachment first to keep the volume in # reserved state after old attachment is deleted and avoid any # races in between the attachment create and delete. attachment_id = None try: attachment_id = self.volume_api.attachment_create( context, root_bdm.volume_id, instance.uuid)['id'] self._detach_root_volume(context, instance, root_bdm) root_bdm.attachment_id = attachment_id root_bdm.save() self.volume_api.attachment_delete(context, old_attachment_id) except exception.InstanceNotFound: # This means we failed to save the new attachment because # the instance is deleted, so (try to) delete it and abort. try: self.volume_api.attachment_delete(context, attachment_id) except cinder_exception.ClientException: LOG.error('Failed to delete new attachment %s', attachment_id) msg = _('Failed to rebuild volume backed instance.') raise exception.BuildAbortException( instance_uuid=instance.uuid, reason=msg) except cinder_exception.ClientException: if attachment_id: LOG.error('Failed to delete old attachment %s', old_attachment_id) else: LOG.error('Failed to create new attachment') msg = _('Failed to rebuild volume backed instance.') raise exception.BuildAbortException( instance_uuid=instance.uuid, reason=msg) events = [('volume-reimaged', root_bdm.volume_id)] # Get the image requested for rebuild try: image = self.image_api.get(context, image_id) except exception.ImageNotFound: msg = _('Image %s not found.') % image_id LOG.error(msg) raise exception.BuildAbortException( instance_uuid=instance.uuid, reason=msg) image_size = int(math.ceil(float(image.get('size')) / units.Gi)) deadline = CONF.reimage_timeout_per_gb * image_size error_cb = self._reimage_failed_callback # Call cinder to perform reimage operation and wait until an # external event is triggered. try: with self.virtapi.wait_for_instance_event(instance, events, deadline=deadline, error_callback=error_cb): self.volume_api.reimage_volume( context, root_bdm.volume_id, image_id, reimage_reserved=True) except Exception as ex: LOG.error('Failed to rebuild volume backed instance: %s', str(ex), instance=instance) msg = _('Failed to rebuild volume backed instance.') raise exception.BuildAbortException( instance_uuid=instance.uuid, reason=msg) def _rebuild_default_impl( self, context, instance, image_meta, injected_files, admin_password, allocations, bdms, detach_block_devices, attach_block_devices, network_info=None, evacuate=False, block_device_info=None, preserve_ephemeral=False, accel_uuids=None, reimage_boot_volume=False): if preserve_ephemeral: # The default code path does not support preserving ephemeral # partitions. raise exception.PreserveEphemeralNotSupported() accel_info = [] detach_root_bdm = not reimage_boot_volume if evacuate: if instance.flavor.extra_specs.get('accel:device_profile'): try: accel_info = self._get_bound_arq_resources( context, instance, accel_uuids or []) except (Exception, exception.InstanceEventTimeout) as exc: LOG.exception(exc) self._build_resources_cleanup(instance, network_info) msg = _('Failure getting accelerator resources.') raise exception.BuildAbortException( instance_uuid=instance.uuid, reason=msg) detach_block_devices(context, bdms, detach_root_bdm=detach_root_bdm) else: self._power_off_instance(context, instance, clean_shutdown=True) detach_block_devices(context, bdms, detach_root_bdm=detach_root_bdm) if reimage_boot_volume: # Previously, the calls reaching here were for image # backed instance rebuild and didn't have a root bdm # so now we need to handle the case for root bdm. # For the root BDM, we are doing attach/detach operations # manually as we want to maintain a 'reserved' state # throughout the reimage process from the cinder side so # we are excluding the root BDM from certain operations # here i.e. deleting it's mapping before the destroy call. block_device_info_copy = copy.deepcopy(block_device_info) root_bdm = compute_utils.get_root_bdm(context, instance, bdms) mapping = block_device_info_copy["block_device_mapping"] # drop root bdm from the mapping mapping = [ bdm for bdm in mapping if bdm["volume_id"] != root_bdm.volume_id ] self.driver.destroy(context, instance, network_info=network_info, block_device_info=block_device_info_copy) else: self.driver.destroy(context, instance, network_info=network_info, block_device_info=block_device_info) try: accel_info = self._get_accel_info(context, instance) except Exception as exc: LOG.exception(exc) self._build_resources_cleanup(instance, network_info) msg = _('Failure getting accelerator resources.') raise exception.BuildAbortException( instance_uuid=instance.uuid, reason=msg) if reimage_boot_volume: is_volume_backed = compute_utils.is_volume_backed_instance( context, instance, bdms) if is_volume_backed: self._rebuild_volume_backed_instance( context, instance, bdms, image_meta.id) instance.task_state = task_states.REBUILD_BLOCK_DEVICE_MAPPING instance.save(expected_task_state=[task_states.REBUILDING]) new_block_device_info = attach_block_devices(context, instance, bdms) instance.task_state = task_states.REBUILD_SPAWNING instance.save( expected_task_state=[task_states.REBUILD_BLOCK_DEVICE_MAPPING]) with instance.mutated_migration_context(): self.driver.spawn(context, instance, image_meta, injected_files, admin_password, allocations, network_info=network_info, block_device_info=new_block_device_info, accel_info=accel_info) def _notify_instance_rebuild_error(self, context, instance, error, bdms): self._notify_about_instance_usage(context, instance, 'rebuild.error', fault=error) compute_utils.notify_about_instance_rebuild( context, instance, self.host, phase=fields.NotificationPhase.ERROR, exception=error, bdms=bdms) @messaging.expected_exceptions(exception.PreserveEphemeralNotSupported, exception.BuildAbortException) @wrap_exception() @reverts_task_state @wrap_instance_event(prefix='compute') @wrap_instance_fault def rebuild_instance(self, context, instance, orig_image_ref, image_ref, injected_files, new_pass, orig_sys_metadata, bdms, recreate, on_shared_storage, preserve_ephemeral, migration, scheduled_node, limits, request_spec, accel_uuids, reimage_boot_volume=None, target_state=None): """Destroy and re-make this instance. A 'rebuild' effectively purges all existing data from the system and remakes the VM with given 'metadata' and 'personalities'. :param context: `nova.RequestContext` object :param instance: Instance object :param orig_image_ref: Original image_ref before rebuild :param image_ref: New image_ref for rebuild :param injected_files: Files to inject :param new_pass: password to set on rebuilt instance :param orig_sys_metadata: instance system metadata from pre-rebuild :param bdms: block-device-mappings to use for rebuild :param recreate: True if the instance is being evacuated (e.g. the hypervisor it was on failed) - cleanup of old state will be skipped. :param on_shared_storage: True if instance files on shared storage. If not provided then information from the driver will be used to decide if the instance files are available or not on the target host :param preserve_ephemeral: True if the default ephemeral storage partition must be preserved on rebuild :param migration: a Migration object if one was created for this rebuild operation (if it's a part of evacuate) :param scheduled_node: A node of the host chosen by the scheduler. If a host was specified by the user, this will be None :param limits: Overcommit limits set by the scheduler. If a host was specified by the user, this will be None :param request_spec: a RequestSpec object used to schedule the instance :param accel_uuids: a list of cyborg ARQ uuids :param reimage_boot_volume: Boolean to specify whether the user has explicitly requested to rebuild a boot volume or None if RPC version is <=6.0 :param target_state: Set a target state for the evacuated instance or None if RPC version is <=6.1. """ # recreate=True means the instance is being evacuated from a failed # host to a new destination host (this host). The 'recreate' variable # name is confusing, so rename it to evacuate here at the top, which # is simpler than renaming a parameter in an RPC versioned method. evacuate = recreate context = context.elevated() if evacuate: LOG.info("Evacuating instance", instance=instance) else: LOG.info("Rebuilding instance", instance=instance) if evacuate: # This is an evacuation to a new host, so we need to perform a # resource claim. rebuild_claim = self.rt.rebuild_claim else: # This is a rebuild to the same host, so we don't need to make # a claim since the instance is already on this host. rebuild_claim = claims.NopClaim if image_ref: image_meta = objects.ImageMeta.from_image_ref( context, self.image_api, image_ref) elif evacuate: # For evacuate the API does not send down the image_ref since the # image does not change so just get it from what was stashed in # the instance system_metadata when the instance was created (or # last rebuilt). This also works for volume-backed instances. image_meta = instance.image_meta else: image_meta = objects.ImageMeta() # NOTE(mriedem): On an evacuate, we need to update # the instance's host and node properties to reflect it's # destination node for the evacuate. if not scheduled_node: if evacuate: try: compute_node = self._get_compute_info(context, self.host) scheduled_node = compute_node.hypervisor_hostname except exception.ComputeHostNotFound as e: # This means we were asked to rebuild one of our own # instances, or another instance as a target of an # evacuation, but we are unable to find a matching compute # node. LOG.exception('Failed to get compute_info for %s', self.host) self._set_migration_status(migration, 'failed') self._notify_instance_rebuild_error(context, instance, e, bdms) raise exception.InstanceFaultRollback( inner_exception=exception.BuildAbortException( instance_uuid=instance.uuid, reason=e.format_message())) else: scheduled_node = instance.node allocs = self.reportclient.get_allocations_for_consumer( context, instance.uuid) # If the resource claim or group policy validation fails before we # do anything to the guest or its networking/volumes we want to keep # the current status rather than put the instance into ERROR status. instance_state = instance.vm_state with self._error_out_instance_on_exception( context, instance, instance_state=instance_state): try: self._do_rebuild_instance_with_claim( context, instance, orig_image_ref, image_meta, injected_files, new_pass, orig_sys_metadata, bdms, evacuate, on_shared_storage, preserve_ephemeral, migration, request_spec, allocs, rebuild_claim, scheduled_node, limits, accel_uuids, reimage_boot_volume, target_state) except (exception.ComputeResourcesUnavailable, exception.RescheduledException) as e: if isinstance(e, exception.ComputeResourcesUnavailable): LOG.debug("Could not rebuild instance on this host, not " "enough resources available.", instance=instance) else: # RescheduledException is raised by the late server group # policy check during evacuation if a parallel scheduling # violated the policy. # We catch the RescheduledException here but we don't have # the plumbing to do an actual reschedule so we abort the # operation. LOG.debug("Could not rebuild instance on this host, " "late server group check failed.", instance=instance) # NOTE(ndipanov): We just abort the build for now and leave a # migration record for potential cleanup later self._set_migration_status(migration, 'failed') # Since the claim failed, we need to remove the allocation # created against the destination node. Note that we can only # get here when evacuating to a destination node. Rebuilding # on the same host (not evacuate) uses the NopClaim which will # not raise ComputeResourcesUnavailable. self.rt.delete_allocation_for_evacuated_instance( context, instance, scheduled_node, node_type='destination') self._notify_instance_rebuild_error(context, instance, e, bdms) # Wrap this in InstanceFaultRollback so that the # _error_out_instance_on_exception context manager keeps the # vm_state unchanged. raise exception.InstanceFaultRollback( inner_exception=exception.BuildAbortException( instance_uuid=instance.uuid, reason=e.format_message())) except (exception.InstanceNotFound, exception.UnexpectedDeletingTaskStateError) as e: LOG.debug('Instance was deleted while rebuilding', instance=instance) self._set_migration_status(migration, 'failed') self._notify_instance_rebuild_error(context, instance, e, bdms) except Exception as e: self._set_migration_status(migration, 'failed') if evacuate or scheduled_node is not None: self.rt.delete_allocation_for_evacuated_instance( context, instance, scheduled_node, node_type='destination') self._notify_instance_rebuild_error(context, instance, e, bdms) raise else: # NOTE(gibi): Let the resource tracker set the instance # host and drop the migration context as we need to hold the # COMPUTE_RESOURCE_SEMAPHORE to avoid the race with # _update_available_resources. See bug 1896463. self.rt.finish_evacuation(instance, scheduled_node, migration) def _do_rebuild_instance_with_claim( self, context, instance, orig_image_ref, image_meta, injected_files, new_pass, orig_sys_metadata, bdms, evacuate, on_shared_storage, preserve_ephemeral, migration, request_spec, allocations, rebuild_claim, scheduled_node, limits, accel_uuids, reimage_boot_volume, target_state): """Helper to avoid deep nesting in the top-level method.""" provider_mapping = None if evacuate: provider_mapping = self._get_request_group_mapping(request_spec) if provider_mapping: compute_utils.update_pci_request_with_placement_allocations( context, self.reportclient, instance.pci_requests.requests, provider_mapping, ) claim_context = rebuild_claim( context, instance, scheduled_node, allocations, limits=limits, image_meta=image_meta, migration=migration) with claim_context: self._do_rebuild_instance( context, instance, orig_image_ref, image_meta, injected_files, new_pass, orig_sys_metadata, bdms, evacuate, on_shared_storage, preserve_ephemeral, migration, request_spec, allocations, provider_mapping, accel_uuids, reimage_boot_volume, target_state) @staticmethod def _get_image_name(image_meta): if image_meta.obj_attr_is_set("name"): return image_meta.name else: return '' def _do_rebuild_instance( self, context, instance, orig_image_ref, image_meta, injected_files, new_pass, orig_sys_metadata, bdms, evacuate, on_shared_storage, preserve_ephemeral, migration, request_spec, allocations, request_group_resource_providers_mapping, accel_uuids, reimage_boot_volume, target_state): orig_vm_state = instance.vm_state if evacuate: if target_state and orig_vm_state != vm_states.ERROR: # This will ensure that at destination the instance will have # the desired state. if target_state not in vm_states.ALLOW_TARGET_STATES: raise exception.InstanceEvacuateNotSupportedTargetState( target_state=target_state) orig_vm_state = target_state if request_spec: # NOTE(gibi): Do a late check of server group policy as # parallel scheduling could violate such policy. This will # cause the evacuate to fail as rebuild does not implement # reschedule. hints = self._get_scheduler_hints({}, request_spec) self._validate_instance_group_policy(context, instance, hints) if not self.driver.capabilities.get("supports_evacuate", False): raise exception.InstanceEvacuateNotSupported self._check_instance_exists(instance) if on_shared_storage is None: LOG.debug('on_shared_storage is not provided, using driver ' 'information to decide if the instance needs to ' 'be evacuated') on_shared_storage = self.driver.instance_on_disk(instance) elif (on_shared_storage != self.driver.instance_on_disk(instance)): # To cover case when admin expects that instance files are # on shared storage, but not accessible and vice versa raise exception.InvalidSharedStorage( _("Invalid state of instance files on shared" " storage")) if on_shared_storage: LOG.info('disk on shared storage, evacuating using' ' existing disk') elif instance.image_ref: orig_image_ref = instance.image_ref LOG.info("disk not on shared storage, evacuating from " "image: '%s'", str(orig_image_ref)) else: LOG.info('disk on volume, evacuating using existing ' 'volume') # We check trusted certs capabilities for both evacuate (rebuild on # another host) and rebuild (rebuild on the same host) because for # evacuate we need to make sure an instance with trusted certs can # have the image verified with those certs during rebuild, and for # rebuild we could be rebuilding a server that started out with no # trusted certs on this host, and then was rebuilt with trusted certs # for a new image, in which case we need to validate that new image # with the trusted certs during the rebuild. self._check_trusted_certs(instance) # This instance.exists message should contain the original # image_ref, not the new one. Since the DB has been updated # to point to the new one... we have to override it. orig_image_ref_url = self.image_api.generate_image_url(orig_image_ref, context) extra_usage_info = {'image_ref_url': orig_image_ref_url} compute_utils.notify_usage_exists( self.notifier, context, instance, self.host, current_period=True, system_metadata=orig_sys_metadata, extra_usage_info=extra_usage_info) # This message should contain the new image_ref extra_usage_info = {'image_name': self._get_image_name(image_meta)} self._notify_about_instance_usage(context, instance, "rebuild.start", extra_usage_info=extra_usage_info) # NOTE: image_name is not included in the versioned notification # because we already provide the image_uuid in the notification # payload and the image details can be looked up via the uuid. compute_utils.notify_about_instance_rebuild( context, instance, self.host, phase=fields.NotificationPhase.START, bdms=bdms) instance.power_state = self._get_power_state(instance) instance.task_state = task_states.REBUILDING instance.save(expected_task_state=[task_states.REBUILDING]) if evacuate: self.network_api.setup_networks_on_host( context, instance, self.host) # For nova-network this is needed to move floating IPs # For neutron this updates the host in the port binding # TODO(cfriesen): this network_api call and the one above # are so similar, we should really try to unify them. self.network_api.setup_instance_network_on_host( context, instance, self.host, migration, provider_mappings=request_group_resource_providers_mapping) # TODO(mriedem): Consider decorating setup_instance_network_on_host # with @api.refresh_cache and then we wouldn't need this explicit # call to get_instance_nw_info. network_info = self.network_api.get_instance_nw_info(context, instance) else: network_info = instance.get_network_info() if bdms is None: bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) block_device_info = \ self._get_instance_block_device_info( context, instance, bdms=bdms) def detach_block_devices(context, bdms, detach_root_bdm=True): for bdm in bdms: # Previously, the calls made to this method by rebuild # instance operation were for image backed instances which # assumed we only had attached volumes and no root BDM. # Now we need to handle case for root BDM which we are # doing manually so skipping the attachment create/delete # calls from here. # The detach_root_bdm parameter is only passed while # rebuilding the volume backed instance so we don't have # to worry about other callers as they won't satisfy this # condition. # For evacuate case, we have detach_root_bdm always True # since we don't have reimage_boot_volume parameter in # this case so this will not be executed. if not detach_root_bdm and bdm.is_root: continue if bdm.is_volume: # NOTE (ildikov): Having the attachment_id set in the BDM # means that it's the new Cinder attach/detach flow # (available from v3.44). In that case we explicitly # attach and detach the volumes through attachment level # operations. In this scenario _detach_volume will delete # the existing attachment which would make the volume # status change to 'available' if we don't pre-create # another empty attachment before deleting the old one. attachment_id = None if bdm.attachment_id: attachment_id = self.volume_api.attachment_create( context, bdm['volume_id'], instance.uuid)['id'] self._detach_volume(context, bdm, instance, destroy_bdm=False) if attachment_id: bdm.attachment_id = attachment_id bdm.save() files = self._decode_files(injected_files) kwargs = dict( context=context, instance=instance, image_meta=image_meta, injected_files=files, admin_password=new_pass, allocations=allocations, bdms=bdms, detach_block_devices=detach_block_devices, attach_block_devices=self._prep_block_device, block_device_info=block_device_info, network_info=network_info, preserve_ephemeral=preserve_ephemeral, evacuate=evacuate, accel_uuids=accel_uuids, reimage_boot_volume=reimage_boot_volume) try: with instance.mutated_migration_context(): self.driver.rebuild(**kwargs) except NotImplementedError: # NOTE(rpodolyaka): driver doesn't provide specialized version # of rebuild, fall back to the default implementation self._rebuild_default_impl(**kwargs) self._update_instance_after_spawn(instance) instance.save(expected_task_state=[task_states.REBUILD_SPAWNING]) if orig_vm_state == vm_states.STOPPED: LOG.info("bringing vm to original state: '%s'", orig_vm_state, instance=instance) instance.vm_state = vm_states.ACTIVE instance.task_state = task_states.POWERING_OFF instance.progress = 0 instance.save() self.stop_instance(context, instance, False) # TODO(melwitt): We should clean up instance console tokens here in the # case of evacuate. The instance is on a new host and will need to # establish a new console connection. self._update_scheduler_instance_info(context, instance) self._notify_about_instance_usage( context, instance, "rebuild.end", network_info=network_info, extra_usage_info=extra_usage_info) compute_utils.notify_about_instance_rebuild( context, instance, self.host, phase=fields.NotificationPhase.END, bdms=bdms) def _handle_bad_volumes_detached(self, context, instance, bad_devices, block_device_info): """Handle cases where the virt-layer had to detach non-working volumes in order to complete an operation. """ for bdm in block_device_info['block_device_mapping']: if bdm.get('mount_device') in bad_devices: try: volume_id = bdm['connection_info']['data']['volume_id'] except KeyError: continue # NOTE(sirp): ideally we'd just call # `compute_api.detach_volume` here but since that hits the # DB directly, that's off limits from within the # compute-manager. # # API-detach LOG.info("Detaching from volume api: %s", volume_id) self.volume_api.begin_detaching(context, volume_id) # Manager-detach self.detach_volume(context, volume_id, instance) def _get_accel_info(self, context, instance): dp_name = instance.flavor.extra_specs.get('accel:device_profile') if dp_name: cyclient = cyborg.get_client(context) accel_info = cyclient.get_arqs_for_instance(instance.uuid) else: accel_info = [] return accel_info def _delete_dangling_bdms(self, context, instance, bdms): """Deletes dangling or stale attachments for volume from Nova and Cinder DB so both service DBs can be in sync. Retrieves volume attachments from the Nova block_device_mapping table and verifies them with the Cinder volume_attachment table. If attachment is not present in any one of the DBs, delete attachments from the other DB. :param context: The nova request context. :param instance: instance object. :param bdms: BlockDeviceMappingList list object. """ try: cinder_attachments = self.volume_api.attachment_get_all( context, instance.uuid) except (keystone_exception.EndpointNotFound, cinder_exception.ClientException): # if cinder is not deployed we never need to check for # attachments as there cannot be dangling bdms. # if we cannot connect to cinder we cannot check for dangling # bdms so we skip the check. Intermittent connection issues # to cinder should not cause instance reboot to fail. return # attachments present in nova DB, ones nova knows about nova_attachments = [] bdms_to_delete = [] for bdm in bdms.objects: if bdm.volume_id and bdm.attachment_id: try: self.volume_api.attachment_get(context, bdm.attachment_id) except exception.VolumeAttachmentNotFound: LOG.info( f"Removing stale volume attachment " f"'{bdm.attachment_id}' from instance for " f"volume '{bdm.volume_id}'.", instance=instance) bdm.destroy() bdms_to_delete.append(bdm) else: nova_attachments.append(bdm.attachment_id) cinder_attachments = [each['id'] for each in cinder_attachments] if len(set(cinder_attachments) - set(nova_attachments)): LOG.info( "Removing stale volume attachments of instance from " "Cinder", instance=instance) for each_attach in set(cinder_attachments) - set(nova_attachments): # delete only cinder known attachments, from cinder DB. LOG.debug( f"Removing attachment '{each_attach}'", instance=instance) self.volume_api.attachment_delete(context, each_attach) # refresh bdms object for bdm in bdms_to_delete: bdms.objects.remove(bdm) def _get_share_info(self, context, instance, check_status=True): share_info = objects.ShareMappingList(context) for share_mapping in objects.ShareMappingList.get_by_instance_uuid( context, instance.uuid ): share_info.objects.append(share_mapping) if check_status: fsm = fields.ShareMappingStatus if ( share_mapping.status == fsm.ATTACHING or share_mapping.status == fsm.DETACHING ): # If the share status is attaching it means we are racing # with the compute node. The mount is not completed yet or # something really bad happened. So we set the instance in # error state. LOG.error( "Share id '%s' attached to server id '%s' is " "still in '%s' state. Setting the instance " "in error.", share_mapping.share_id, instance.id, share_mapping.status, ) self._set_instance_obj_error_state( instance, clean_task_state=True ) raise exception.ShareErrorUnexpectedStatus( share_id=share_mapping.share_id, instance_uuid=instance.id, ) if share_mapping.status == fsm.ERROR: LOG.warning( "Share id '%s' attached to server id '%s' is in " "error state.", share_mapping.share_id, instance.id ) return share_info @wrap_exception() @reverts_task_state @wrap_instance_event(prefix='compute') @wrap_instance_fault def reboot_instance(self, context, instance, block_device_info, reboot_type): @utils.synchronized(instance.uuid) def do_reboot_instance(context, instance, block_device_info, reboot_type): self._reboot_instance(context, instance, block_device_info, reboot_type) do_reboot_instance(context, instance, block_device_info, reboot_type) def _reboot_instance(self, context, instance, block_device_info, reboot_type): """Reboot an instance on this host.""" # acknowledge the request made it to the manager if reboot_type == "SOFT": instance.task_state = task_states.REBOOT_PENDING expected_states = task_states.soft_reboot_states else: instance.task_state = task_states.REBOOT_PENDING_HARD expected_states = task_states.hard_reboot_states context = context.elevated() LOG.info("Rebooting instance", instance=instance) bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) self._delete_dangling_bdms(context, instance, bdms) block_device_info = self._get_instance_block_device_info( context, instance, bdms=bdms) network_info = self.network_api.get_instance_nw_info(context, instance) accel_info = self._get_accel_info(context, instance) share_info = self._get_share_info(context, instance) self._notify_about_instance_usage(context, instance, "reboot.start") compute_utils.notify_about_instance_action( context, instance, self.host, action=fields.NotificationAction.REBOOT, phase=fields.NotificationPhase.START, bdms=bdms ) instance.power_state = self._get_power_state(instance) instance.save(expected_task_state=expected_states) if instance.power_state != power_state.RUNNING: state = instance.power_state running = power_state.RUNNING LOG.warning('trying to reboot a non-running instance:' ' (state: %(state)s expected: %(running)s)', {'state': state, 'running': running}, instance=instance) def bad_volumes_callback(bad_devices): self._handle_bad_volumes_detached( context, instance, bad_devices, block_device_info) try: # Don't change it out of rescue mode if instance.vm_state == vm_states.RESCUED: new_vm_state = vm_states.RESCUED else: new_vm_state = vm_states.ACTIVE new_power_state = None if reboot_type == "SOFT": instance.task_state = task_states.REBOOT_STARTED expected_state = task_states.REBOOT_PENDING else: instance.task_state = task_states.REBOOT_STARTED_HARD expected_state = task_states.REBOOT_PENDING_HARD instance.save(expected_task_state=expected_state) # Attempt to mount the shares again. # Note: The API ref states that soft reboot can only be # done if the instance is in ACTIVE state. If the instance # is in ACTIVE state it cannot have a share_mapping in ERROR # so it is safe to ignore the re-mounting of the share for # soft reboot. if reboot_type == "HARD": self._mount_all_shares(context, instance, share_info) self.driver.reboot(context, instance, network_info, reboot_type, block_device_info=block_device_info, accel_info=accel_info, share_info=share_info, bad_volumes_callback=bad_volumes_callback) share_info.activate_all() except Exception as error: with excutils.save_and_reraise_exception() as ctxt: exc_info = sys.exc_info() # if the reboot failed but the VM is running don't # put it into an error state new_power_state = self._get_power_state(instance) if new_power_state == power_state.RUNNING: LOG.warning('Reboot failed but instance is running', instance=instance) compute_utils.add_instance_fault_from_exc(context, instance, error, exc_info) self._notify_about_instance_usage(context, instance, 'reboot.error', fault=error) compute_utils.notify_about_instance_action( context, instance, self.host, action=fields.NotificationAction.REBOOT, phase=fields.NotificationPhase.ERROR, exception=error, bdms=bdms ) ctxt.reraise = False else: LOG.error('Cannot reboot instance: %s', error, instance=instance) self._set_instance_obj_error_state(instance) if not new_power_state: new_power_state = self._get_power_state(instance) try: instance.power_state = new_power_state instance.vm_state = new_vm_state instance.task_state = None instance.save() except exception.InstanceNotFound: LOG.warning("Instance disappeared during reboot", instance=instance) self._notify_about_instance_usage(context, instance, "reboot.end") compute_utils.notify_about_instance_action( context, instance, self.host, action=fields.NotificationAction.REBOOT, phase=fields.NotificationPhase.END, bdms=bdms ) @delete_image_on_error def _do_snapshot_instance(self, context, image_id, instance): self._snapshot_instance(context, image_id, instance, task_states.IMAGE_BACKUP) @wrap_exception() @reverts_task_state @wrap_instance_event(prefix='compute') @wrap_instance_fault def backup_instance(self, context, image_id, instance, backup_type, rotation): """Backup an instance on this host. :param backup_type: daily | weekly :param rotation: int representing how many backups to keep around """ self._do_snapshot_instance(context, image_id, instance) self._rotate_backups(context, instance, backup_type, rotation) @wrap_exception() @reverts_task_state @wrap_instance_event(prefix='compute') @wrap_instance_fault @delete_image_on_error def snapshot_instance(self, context, image_id, instance): """Snapshot an instance on this host. :param context: security context :param image_id: glance.db.sqlalchemy.models.Image.Id :param instance: a nova.objects.instance.Instance object """ # NOTE(dave-mcnally) the task state will already be set by the api # but if the compute manager has crashed/been restarted prior to the # request getting here the task state may have been cleared so we set # it again and things continue normally try: instance.task_state = task_states.IMAGE_SNAPSHOT instance.save( expected_task_state=task_states.IMAGE_SNAPSHOT_PENDING) except exception.InstanceNotFound: # possibility instance no longer exists, no point in continuing LOG.debug("Instance not found, could not set state %s " "for instance.", task_states.IMAGE_SNAPSHOT, instance=instance) return except exception.UnexpectedDeletingTaskStateError: LOG.debug("Instance being deleted, snapshot cannot continue", instance=instance) return with self._snapshot_semaphore: self._snapshot_instance(context, image_id, instance, task_states.IMAGE_SNAPSHOT) def _snapshot_instance(self, context, image_id, instance, expected_task_state): context = context.elevated() instance.power_state = self._get_power_state(instance) try: instance.save() LOG.info('instance snapshotting', instance=instance) if instance.power_state != power_state.RUNNING: state = instance.power_state running = power_state.RUNNING LOG.warning('trying to snapshot a non-running instance: ' '(state: %(state)s expected: %(running)s)', {'state': state, 'running': running}, instance=instance) self._notify_about_instance_usage( context, instance, "snapshot.start") compute_utils.notify_about_instance_snapshot(context, instance, self.host, phase=fields.NotificationPhase.START, snapshot_image_id=image_id) def update_task_state(task_state, expected_state=expected_task_state): instance.task_state = task_state instance.save(expected_task_state=expected_state) with timeutils.StopWatch() as timer: self.driver.snapshot(context, instance, image_id, update_task_state) LOG.info('Took %0.2f seconds to snapshot the instance on ' 'the hypervisor.', timer.elapsed(), instance=instance) instance.task_state = None instance.save(expected_task_state=task_states.IMAGE_UPLOADING) self._notify_about_instance_usage(context, instance, "snapshot.end") compute_utils.notify_about_instance_snapshot(context, instance, self.host, phase=fields.NotificationPhase.END, snapshot_image_id=image_id) except (exception.InstanceNotFound, exception.InstanceNotRunning, exception.UnexpectedDeletingTaskStateError): # the instance got deleted during the snapshot # Quickly bail out of here msg = 'Instance disappeared during snapshot' LOG.debug(msg, instance=instance) try: image = self.image_api.get(context, image_id) if image['status'] != 'active': self.image_api.delete(context, image_id) except exception.ImageNotFound: LOG.debug('Image not found during clean up %s', image_id) except Exception: LOG.warning("Error while trying to clean up image %s", image_id, instance=instance) except exception.ImageNotFound: instance.task_state = None instance.save() LOG.warning("Image not found during snapshot", instance=instance) @messaging.expected_exceptions(NotImplementedError) @wrap_exception() def volume_snapshot_create(self, context, instance, volume_id, create_info): try: self.driver.volume_snapshot_create(context, instance, volume_id, create_info) except exception.InstanceNotRunning: # Libvirt driver can raise this exception LOG.debug('Instance disappeared during volume snapshot create', instance=instance) @messaging.expected_exceptions(NotImplementedError) @wrap_exception() def volume_snapshot_delete(self, context, instance, volume_id, snapshot_id, delete_info): try: self.driver.volume_snapshot_delete(context, instance, volume_id, snapshot_id, delete_info) except exception.InstanceNotRunning: # Libvirt driver can raise this exception LOG.debug('Instance disappeared during volume snapshot delete', instance=instance) @messaging.expected_exceptions(NotImplementedError) @wrap_exception() @wrap_instance_event(prefix='compute') @wrap_instance_fault def allow_share(self, context, instance, share_mapping): @utils.synchronized(share_mapping.share_id) def _allow_share(context, instance, share_mapping): def _apply_policy(): # self.manila_api.lock(share_mapping.share_id) # Explicitly locking the share is not needed as # create_access_rule() from the sdk will do it if the # lock_visibility and lock_deletion flags are passed self.manila_api.allow( context, share_mapping.share_id, share_mapping.access_type, share_mapping.access_to, "rw", ) def _wait_policy_to_be_applied(): # Ensure the share policy is updated, this will avoid # a race condition mounting the share if it is not the case. max_retries = CONF.manila.share_apply_policy_timeout attempt_count = 0 while attempt_count < max_retries: if self.manila_api.has_access( context, share_mapping.share_id, share_mapping.access_type, share_mapping.access_to, ): LOG.debug( "Allow policy set on share %s ", share_mapping.share_id, ) break else: LOG.debug( "Waiting policy to be set on share %s ", share_mapping.share_id, ) time.sleep(1) attempt_count += 1 if attempt_count >= max_retries: raise exception.ShareAccessGrantError( share_id=share_mapping.share_id, reason="Failed to set allow policy on share, " "too many retries", ) try: compute_utils.notify_about_share_attach_detach( context, instance, instance.host, action=fields.NotificationAction.SHARE_ATTACH, phase=fields.NotificationPhase.START, share_id=share_mapping.share_id ) share_mapping.set_access_according_to_protocol() if not self.manila_api.has_access( context, share_mapping.share_id, share_mapping.access_type, share_mapping.access_to, ): _apply_policy() _wait_policy_to_be_applied() # Set the share from attaching to inactive self._set_share_mapping_status( share_mapping, fields.ShareMappingStatus.INACTIVE ) compute_utils.notify_about_share_attach_detach( context, instance, instance.host, action=fields.NotificationAction.SHARE_ATTACH, phase=fields.NotificationPhase.END, share_id=share_mapping.share_id ) except ( exception.ShareNotFound, exception.ShareProtocolNotSupported, exception.ShareAccessGrantError, ) as e: self._set_share_mapping_status( share_mapping, fields.ShareMappingStatus.ERROR ) compute_utils.notify_about_share_attach_detach( context, instance, instance.host, action=fields.NotificationAction.SHARE_ATTACH, phase=fields.NotificationPhase.ERROR, share_id=share_mapping.share_id, exception=e ) LOG.error(e.format_message()) raise except ( sdk_exc.BadRequestException, ) as e: self._set_share_mapping_status( share_mapping, fields.ShareMappingStatus.ERROR ) compute_utils.notify_about_share_attach_detach( context, instance, instance.host, action=fields.NotificationAction.SHARE_ATTACH, phase=fields.NotificationPhase.ERROR, share_id=share_mapping.share_id, exception=e ) LOG.error( "%s: %s error from url: %s, %s", e.message, e.source, e.url, e.details, ) raise except keystone_exception.http.Unauthorized as e: self._set_share_mapping_status( share_mapping, fields.ShareMappingStatus.ERROR ) compute_utils.notify_about_share_attach_detach( context, instance, instance.host, action=fields.NotificationAction.SHARE_ATTACH, phase=fields.NotificationPhase.ERROR, share_id=share_mapping.share_id, exception=e ) LOG.error(e) raise _allow_share(context, instance, share_mapping) @messaging.expected_exceptions(NotImplementedError) @wrap_exception() @wrap_instance_event(prefix='compute') @wrap_instance_fault def deny_share(self, context, instance, share_mapping): @utils.synchronized(share_mapping.share_id) def _deny_share(context, instance, share_mapping): def check_share_usage(context, instance_uuid): share_mappings_used_by_share = ( objects.share_mapping.ShareMappingList.get_by_share_id( context, share_mapping.share_id ) ) # Logic explanation: # # Warning: Here we have a list of share_mapping using our # share (usually share_mappings is a list of share_mapping used # by an instance). # A share IS NOT used (detachable) if: # - The share status is INACTIVE or ERROR on our instance. # - The share status is DETACHING on all other instances. # +-- reverse the logic as the function check if a share # | IS used. # v return not all( ( ( sm.instance_uuid == instance_uuid and ( sm.status in ( fields.ShareMappingStatus.INACTIVE, fields.ShareMappingStatus.ERROR, ) ) ) or sm.status == fields.ShareMappingStatus.DETACHING ) for sm in share_mappings_used_by_share ) try: compute_utils.notify_about_share_attach_detach( context, instance, instance.host, action=fields.NotificationAction.SHARE_DETACH, phase=fields.NotificationPhase.START, share_id=share_mapping.share_id, ) still_used = check_share_usage(context, instance.uuid) share_mapping.set_access_according_to_protocol() if not still_used: # self.manila_api.unlock(share_mapping.share_id) # Explicit unlocking the share is not needed as # delete_access_rule() from the sdk will do it if the # "unrestrict" parameter is passed self.manila_api.deny( context, share_mapping.share_id, share_mapping.access_type, share_mapping.access_to, ) share_mapping.delete() compute_utils.notify_about_share_attach_detach( context, instance, instance.host, action=fields.NotificationAction.SHARE_DETACH, phase=fields.NotificationPhase.END, share_id=share_mapping.share_id, ) except ( exception.ShareAccessRemovalError, exception.ShareProtocolNotSupported, ) as e: self._set_share_mapping_status( share_mapping, fields.ShareMappingStatus.ERROR ) compute_utils.notify_about_share_attach_detach( context, instance, instance.host, action=fields.NotificationAction.SHARE_DETACH, phase=fields.NotificationPhase.ERROR, share_id=share_mapping.share_id, exception=e ) LOG.error(e.format_message()) raise except keystone_exception.http.Unauthorized as e: self._set_share_mapping_status( share_mapping, fields.ShareMappingStatus.ERROR ) compute_utils.notify_about_share_attach_detach( context, instance, instance.host, action=fields.NotificationAction.SHARE_DETACH, phase=fields.NotificationPhase.ERROR, share_id=share_mapping.share_id, exception=e ) LOG.error(e) raise except (exception.ShareNotFound, exception.ShareAccessNotFound): # Ignore the error if for any reason there is nothing to # remove from manila, so we can still detach the share. share_mapping.delete() compute_utils.notify_about_share_attach_detach( context, instance, instance.host, action=fields.NotificationAction.SHARE_DETACH, phase=fields.NotificationPhase.END, share_id=share_mapping.share_id, ) _deny_share(context, instance, share_mapping) @wrap_exception() def _mount_all_shares(self, context, instance, share_info): for share_mapping in share_info: self._mount_share(context, instance, share_mapping) @wrap_exception() def _umount_all_shares(self, context, instance, share_info): for share_mapping in share_info: self._umount_share(context, instance, share_mapping) @wrap_exception() def _mount_share(self, context, instance, share_mapping): @utils.synchronized(share_mapping.share_id) def _mount_share(context, instance, share_mapping): try: share_mapping.set_access_according_to_protocol() if share_mapping.share_proto == ( fields.ShareMappingProto.CEPHFS): share_mapping.enhance_with_ceph_credentials(context) LOG.debug("Mounting share %s", share_mapping.share_id) self.driver.mount_share(context, instance, share_mapping) except ( exception.ShareNotFound, exception.ShareProtocolNotSupported, exception.ShareMountError, ) as e: self._set_share_mapping_and_instance_in_error( instance, share_mapping ) LOG.error(e.format_message()) raise except (sdk_exc.BadRequestException) as e: self._set_share_mapping_and_instance_in_error( instance, share_mapping ) LOG.error("%s: %s error from url: %s, %s", e.message, e.source, e.url, e.details) raise _mount_share(context, instance, share_mapping) @wrap_exception() def _umount_share(self, context, instance, share_mapping): @utils.synchronized(share_mapping.share_id) def _umount_share(context, instance, share_mapping): try: share_mapping.set_access_according_to_protocol() if share_mapping.share_proto == ( fields.ShareMappingProto.CEPHFS): share_mapping.enhance_with_ceph_credentials(context) self.driver.umount_share(context, instance, share_mapping) except ( exception.ShareNotFound, exception.ShareUmountError, exception.ShareProtocolNotSupported, ) as e: self._set_share_mapping_and_instance_in_error( instance, share_mapping ) LOG.error(e.format_message()) raise _umount_share(context, instance, share_mapping) def _set_share_mapping_status(self, share_mapping, status): share_mapping.status = status share_mapping.save() def _set_share_mapping_and_instance_in_error( self, instance, share_mapping ): share_mapping.status = fields.ShareMappingStatus.ERROR share_mapping.save() self._set_instance_obj_error_state( instance, clean_task_state=True ) @wrap_instance_fault def _rotate_backups(self, context, instance, backup_type, rotation): """Delete excess backups associated to an instance. Instances are allowed a fixed number of backups (the rotation number); this method deletes the oldest backups that exceed the rotation threshold. :param context: security context :param instance: Instance dict :param backup_type: a user-defined type, like "daily" or "weekly" etc. :param rotation: int representing how many backups to keep around; None if rotation shouldn't be used (as in the case of snapshots) """ filters = {'property-image_type': 'backup', 'property-backup_type': backup_type, 'property-instance_uuid': instance.uuid} images = self.image_api.get_all(context, filters=filters, sort_key='created_at', sort_dir='desc') num_images = len(images) LOG.debug("Found %(num_images)d images (rotation: %(rotation)d)", {'num_images': num_images, 'rotation': rotation}, instance=instance) if num_images > rotation: # NOTE(sirp): this deletes all backups that exceed the rotation # limit excess = len(images) - rotation LOG.debug("Rotating out %d backups", excess, instance=instance) for i in range(excess): image = images.pop() image_id = image['id'] LOG.debug("Deleting image %s", image_id, instance=instance) try: self.image_api.delete(context, image_id) except exception.ImageNotFound: LOG.info("Failed to find image %(image_id)s to " "delete", {'image_id': image_id}, instance=instance) except (exception.ImageDeleteConflict, Exception) as exc: LOG.info("Failed to delete image %(image_id)s during " "deleting excess backups. " "Continuing for next image.. %(exc)s", {'image_id': image_id, 'exc': exc}, instance=instance) @wrap_exception() @reverts_task_state @wrap_instance_event(prefix='compute') @wrap_instance_fault def set_admin_password(self, context, instance, new_pass): """Set the root/admin password for an instance on this host. This is generally only called by API password resets after an image has been built. @param context: Nova auth context. @param instance: Nova instance object. @param new_pass: The admin password for the instance. """ context = context.elevated() current_power_state = self._get_power_state(instance) expected_state = power_state.RUNNING if current_power_state != expected_state: instance.task_state = None instance.save(expected_task_state=task_states.UPDATING_PASSWORD) _msg = _('instance %s is not running') % instance.uuid raise exception.InstancePasswordSetFailed( instance=instance.uuid, reason=_msg) try: self.driver.set_admin_password(instance, new_pass) LOG.info("Admin password set", instance=instance) instance.task_state = None instance.save( expected_task_state=task_states.UPDATING_PASSWORD) except exception.InstanceAgentNotEnabled: with excutils.save_and_reraise_exception(): LOG.debug('Guest agent is not enabled for the instance.', instance=instance) instance.task_state = None instance.save( expected_task_state=task_states.UPDATING_PASSWORD) except exception.SetAdminPasswdNotSupported: with excutils.save_and_reraise_exception(): LOG.info('set_admin_password is not supported ' 'by this driver or guest instance.', instance=instance) instance.task_state = None instance.save( expected_task_state=task_states.UPDATING_PASSWORD) except NotImplementedError: LOG.warning('set_admin_password is not implemented ' 'by this driver or guest instance.', instance=instance) instance.task_state = None instance.save( expected_task_state=task_states.UPDATING_PASSWORD) raise NotImplementedError(_('set_admin_password is not ' 'implemented by this driver or guest ' 'instance.')) except exception.UnexpectedTaskStateError: # interrupted by another (most likely delete) task # do not retry raise except Exception: # Catch all here because this could be anything. LOG.exception('set_admin_password failed', instance=instance) # We create a new exception here so that we won't # potentially reveal password information to the # API caller. The real exception is logged above _msg = _('error setting admin password') raise exception.InstancePasswordSetFailed( instance=instance.uuid, reason=_msg) def _get_rescue_image(self, context, instance, rescue_image_ref=None): """Determine what image should be used to boot the rescue VM.""" # 1. If rescue_image_ref is passed in, use that for rescue. # 2. Else, use the base image associated with instance's current image. # The idea here is to provide the customer with a rescue # environment which they are familiar with. # So, if they built their instance off of a Debian image, # their rescue VM will also be Debian. # 3. As a last resort, use instance's current image. if not rescue_image_ref: system_meta = utils.instance_sys_meta(instance) rescue_image_ref = system_meta.get('image_base_image_ref') if not rescue_image_ref: LOG.warning('Unable to find a different image to use for ' 'rescue VM, using instance\'s current image', instance=instance) rescue_image_ref = instance.image_ref return objects.ImageMeta.from_image_ref( context, self.image_api, rescue_image_ref) @wrap_exception() @reverts_task_state @wrap_instance_event(prefix='compute') @wrap_instance_fault def rescue_instance(self, context, instance, rescue_password, rescue_image_ref, clean_shutdown): context = context.elevated() LOG.info('Rescuing', instance=instance) admin_password = (rescue_password if rescue_password else utils.generate_password()) network_info = self.network_api.get_instance_nw_info(context, instance) rescue_image_meta = self._get_rescue_image(context, instance, rescue_image_ref) bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) block_device_info = self._get_instance_block_device_info( context, instance, bdms=bdms) share_info = self._get_share_info(context, instance) extra_usage_info = {'rescue_image_name': self._get_image_name(rescue_image_meta)} self._notify_about_instance_usage(context, instance, "rescue.start", extra_usage_info=extra_usage_info, network_info=network_info) compute_utils.notify_about_instance_rescue_action( context, instance, self.host, rescue_image_ref, phase=fields.NotificationPhase.START) try: self._power_off_instance(context, instance, clean_shutdown) self._mount_all_shares(context, instance, share_info) self.driver.rescue(context, instance, network_info, rescue_image_meta, admin_password, block_device_info, share_info) except Exception as e: LOG.exception("Error trying to Rescue Instance", instance=instance) self._set_instance_obj_error_state(instance) raise exception.InstanceNotRescuable( instance_id=instance.uuid, reason=_("Driver Error: %s") % e) compute_utils.notify_usage_exists(self.notifier, context, instance, self.host, current_period=True) instance.vm_state = vm_states.RESCUED instance.task_state = None instance.power_state = self._get_power_state(instance) instance.launched_at = timeutils.utcnow() instance.save(expected_task_state=task_states.RESCUING) self._notify_about_instance_usage(context, instance, "rescue.end", extra_usage_info=extra_usage_info, network_info=network_info) compute_utils.notify_about_instance_rescue_action( context, instance, self.host, rescue_image_ref, phase=fields.NotificationPhase.END) @wrap_exception() @reverts_task_state @wrap_instance_event(prefix='compute') @wrap_instance_fault def unrescue_instance(self, context, instance): orig_context = context context = context.elevated() LOG.info('Unrescuing', instance=instance) network_info = self.network_api.get_instance_nw_info(context, instance) self._notify_about_instance_usage(context, instance, "unrescue.start", network_info=network_info) compute_utils.notify_about_instance_action(context, instance, self.host, action=fields.NotificationAction.UNRESCUE, phase=fields.NotificationPhase.START) with self._error_out_instance_on_exception(context, instance): self.driver.unrescue(orig_context, instance) instance.vm_state = vm_states.ACTIVE instance.task_state = None instance.power_state = self._get_power_state(instance) instance.save(expected_task_state=task_states.UNRESCUING) self._notify_about_instance_usage(context, instance, "unrescue.end", network_info=network_info) compute_utils.notify_about_instance_action(context, instance, self.host, action=fields.NotificationAction.UNRESCUE, phase=fields.NotificationPhase.END) @wrap_exception() @wrap_instance_event(prefix='compute') @errors_out_migration @wrap_instance_fault def confirm_resize(self, context, instance, migration): """Confirms a migration/resize and deletes the 'old' instance. This is called from the API and runs on the source host. Nothing needs to happen on the destination host at this point since the instance is already running there. This routine just cleans up the source host. """ @utils.synchronized(instance.uuid) def do_confirm_resize(context, instance, migration): LOG.debug("Going to confirm migration %s", migration.id, instance=instance) if migration.status == 'confirmed': LOG.info("Migration %s is already confirmed", migration.id, instance=instance) return if migration.status not in ('finished', 'confirming'): LOG.warning("Unexpected confirmation status '%(status)s' " "of migration %(id)s, exit confirmation process", {"status": migration.status, "id": migration.id}, instance=instance) return # NOTE(wangpan): Get the instance from db, if it has been # deleted, we do nothing and return here expected_attrs = ['metadata', 'system_metadata', 'flavor'] try: instance = objects.Instance.get_by_uuid( context, instance.uuid, expected_attrs=expected_attrs) except exception.InstanceNotFound: LOG.info("Instance is not found during confirmation", instance=instance) return with self._error_out_instance_on_exception(context, instance): try: self._confirm_resize( context, instance, migration=migration) except Exception: # Something failed when cleaning up the source host so # log a traceback and leave a hint about hard rebooting # the server to correct its state in the DB. with excutils.save_and_reraise_exception(logger=LOG): LOG.exception( 'Confirm resize failed on source host %s. ' 'Resource allocations in the placement service ' 'will be removed regardless because the instance ' 'is now on the destination host %s. You can try ' 'hard rebooting the instance to correct its ' 'state.', self.host, migration.dest_compute, instance=instance) finally: # Whether an error occurred or not, at this point the # instance is on the dest host. Avoid leaking allocations # in placement by deleting them here... self._delete_allocation_after_move( context, instance, migration) # ...inform the scheduler about the move... self._delete_scheduler_instance_info( context, instance.uuid) # ...and unset the cached flavor information (this is done # last since the resource tracker relies on it for its # periodic tasks) self._delete_stashed_flavor_info(instance) do_confirm_resize(context, instance, migration) def _get_updated_nw_info_with_pci_mapping(self, nw_info, pci_mapping): # NOTE(adrianc): This method returns a copy of nw_info if modifications # are made else it returns the original nw_info. updated_nw_info = nw_info if nw_info and pci_mapping: updated_nw_info = copy.deepcopy(nw_info) for vif in updated_nw_info: if vif['vnic_type'] in network_model.VNIC_TYPES_SRIOV: try: vif_pci_addr = vif['profile']['pci_slot'] new_addr = pci_mapping[vif_pci_addr].address vif['profile']['pci_slot'] = new_addr LOG.debug("Updating VIF's PCI address for VIF %(id)s. " "Original value %(orig_val)s, " "new value %(new_val)s", {'id': vif['id'], 'orig_val': vif_pci_addr, 'new_val': new_addr}) except (KeyError, AttributeError): with excutils.save_and_reraise_exception(): # NOTE(adrianc): This should never happen. If we # get here it means there is some inconsistency # with either 'nw_info' or 'pci_mapping'. LOG.error("Unexpected error when updating network " "information with PCI mapping.") return updated_nw_info def _confirm_resize(self, context, instance, migration=None): """Destroys the source instance.""" self._notify_about_instance_usage(context, instance, "resize.confirm.start") compute_utils.notify_about_instance_action(context, instance, self.host, action=fields.NotificationAction.RESIZE_CONFIRM, phase=fields.NotificationPhase.START) # NOTE(tr3buchet): tear down networks on source host self.network_api.setup_networks_on_host(context, instance, migration.source_compute, teardown=True) # TODO(stephenfin): These next three calls should be bundled network_info = self.network_api.get_instance_nw_info(context, instance) # NOTE(adrianc): Populate old PCI device in VIF profile # to allow virt driver to properly unplug it from Hypervisor. pci_mapping = (instance.migration_context. get_pci_mapping_for_migration(True)) network_info = self._get_updated_nw_info_with_pci_mapping( network_info, pci_mapping) self.driver.confirm_migration(context, migration, instance, network_info) # Free up the old_flavor usage from the resource tracker for this host. self.rt.drop_move_claim_at_source(context, instance, migration) # NOTE(mriedem): The old_vm_state could be STOPPED but the user # might have manually powered up the instance to confirm the # resize/migrate, so we need to check the current power state # on the instance and set the vm_state appropriately. We default # to ACTIVE because if the power state is not SHUTDOWN, we # assume _sync_instance_power_state will clean it up. p_state = instance.power_state vm_state = None if p_state == power_state.SHUTDOWN: vm_state = vm_states.STOPPED LOG.debug("Resized/migrated instance is powered off. " "Setting vm_state to '%s'.", vm_state, instance=instance) else: vm_state = vm_states.ACTIVE instance.vm_state = vm_state instance.task_state = None instance.save(expected_task_state=[None, task_states.DELETING, task_states.SOFT_DELETING]) self._notify_about_instance_usage( context, instance, "resize.confirm.end", network_info=network_info) compute_utils.notify_about_instance_action(context, instance, self.host, action=fields.NotificationAction.RESIZE_CONFIRM, phase=fields.NotificationPhase.END) def _delete_allocation_after_move(self, context, instance, migration): """Deletes resource allocations held by the migration record against the source compute node resource provider after a confirmed cold / successful live migration. """ try: # NOTE(danms): We're finishing on the source node, so try # to delete the allocation based on the migration uuid self.reportclient.delete_allocation_for_instance( context, migration.uuid, consumer_type='migration', force=False) except exception.AllocationDeleteFailed: LOG.error('Deleting allocation in placement for migration ' '%(migration_uuid)s failed. The instance ' '%(instance_uuid)s will be put to ERROR state ' 'but the allocation held by the migration is ' 'leaked.', {'instance_uuid': instance.uuid, 'migration_uuid': migration.uuid}) raise def _delete_stashed_flavor_info(self, instance): """Remove information about the flavor change after a resize.""" instance.old_flavor = None instance.new_flavor = None instance.system_metadata.pop('old_vm_state', None) instance.save() @wrap_exception() @wrap_instance_event(prefix='compute') @errors_out_migration @wrap_instance_fault def confirm_snapshot_based_resize_at_source( self, ctxt, instance, migration): """Confirms a snapshot-based resize on the source host. Cleans the guest from the source hypervisor including disks and drops the MoveClaim which will free up "old_flavor" usage from the ResourceTracker. Deletes the allocations held by the migration consumer against the source compute node resource provider. :param ctxt: nova auth request context targeted at the source cell :param instance: Instance object being resized which should have the "old_flavor" attribute set :param migration: Migration object for the resize operation """ @utils.synchronized(instance.uuid) def do_confirm(): LOG.info('Confirming resize on source host.', instance=instance) with self._error_out_instance_on_exception(ctxt, instance): # TODO(mriedem): Could probably make this try/except/finally # a context manager to share with confirm_resize(). try: self._confirm_snapshot_based_resize_at_source( ctxt, instance, migration) except Exception: # Something failed when cleaning up the source host so # log a traceback and leave a hint about hard rebooting # the server to correct its state in the DB. with excutils.save_and_reraise_exception(logger=LOG): LOG.exception( 'Confirm resize failed on source host %s. ' 'Resource allocations in the placement service ' 'will be removed regardless because the instance ' 'is now on the destination host %s. You can try ' 'hard rebooting the instance to correct its ' 'state.', self.host, migration.dest_compute, instance=instance) finally: # Whether an error occurred or not, at this point the # instance is on the dest host so to avoid leaking # allocations in placement, delete them here. # TODO(mriedem): Should we catch and just log # AllocationDeleteFailed? What is the user's recourse if # we got this far but this fails? At this point the # instance is on the target host and the allocations # could just be manually cleaned up by the operator. self._delete_allocation_after_move(ctxt, instance, migration) do_confirm() def _confirm_snapshot_based_resize_at_source( self, ctxt, instance, migration): """Private version of confirm_snapshot_based_resize_at_source This allows the main method to be decorated with error handlers. :param ctxt: nova auth request context targeted at the source cell :param instance: Instance object being resized which should have the "old_flavor" attribute set :param migration: Migration object for the resize operation """ # Cleanup the guest from the hypervisor including local disks. network_info = self.network_api.get_instance_nw_info(ctxt, instance) LOG.debug('Cleaning up guest from source hypervisor including disks.', instance=instance) # FIXME(mriedem): Per bug 1809095, _confirm_resize calls # _get_updated_nw_info_with_pci_mapping here prior to unplugging # VIFs on the source, but in our case we have already unplugged # VIFs during prep_snapshot_based_resize_at_source, so what do we # need to do about those kinds of ports? Do we need to wait to unplug # VIFs until confirm like normal resize? # Note that prep_snapshot_based_resize_at_source already destroyed the # guest which disconnected volumes and unplugged VIFs but did not # destroy disks in case something failed during the resize and the # instance needed to be rebooted or rebuilt on the source host. Now # that we are confirming the resize we want to cleanup the disks left # on the source host. We call cleanup() instead of destroy() to avoid # any InstanceNotFound confusion from the driver since the guest was # already destroyed on this host. block_device_info=None and # destroy_vifs=False means cleanup() will not try to disconnect volumes # or unplug VIFs. self.driver.cleanup( ctxt, instance, network_info, block_device_info=None, destroy_disks=True, destroy_vifs=False) # Delete port bindings for the source host. self._confirm_snapshot_based_resize_delete_port_bindings( ctxt, instance) # Delete volume attachments for the source host. self._delete_volume_attachments(ctxt, instance.get_bdms()) # Free up the old_flavor usage from the resource tracker for this host. self.rt.drop_move_claim_at_source(ctxt, instance, migration) def _confirm_snapshot_based_resize_delete_port_bindings( self, ctxt, instance): """Delete port bindings for the source host when confirming snapshot-based resize on the source host." :param ctxt: nova auth RequestContext :param instance: Instance object that was resized/cold migrated """ LOG.debug('Deleting port bindings for source host.', instance=instance) try: self.network_api.cleanup_instance_network_on_host( ctxt, instance, self.host) except exception.PortBindingDeletionFailed as e: # Do not let this stop us from cleaning up since the guest # is already gone. LOG.error('Failed to delete port bindings from source host. ' 'Error: %s', str(e), instance=instance) def _delete_volume_attachments(self, ctxt, bdms): """Deletes volume attachment records for the given bdms. This method will log but not re-raise any exceptions if the volume attachment delete fails. :param ctxt: nova auth request context used to make DELETE /attachments/{attachment_id} requests to cinder. :param bdms: objects.BlockDeviceMappingList representing volume attachments to delete based on BlockDeviceMapping.attachment_id. """ for bdm in bdms: if bdm.attachment_id: try: self.volume_api.attachment_delete(ctxt, bdm.attachment_id) except Exception as e: LOG.error('Failed to delete volume attachment with ID %s. ' 'Error: %s', bdm.attachment_id, str(e), instance_uuid=bdm.instance_uuid) def _update_bdm_for_swap_to_finish_resize( self, context, instance, confirm=True): """This updates bdm.swap with new swap info""" bdms = instance.get_bdms() if not (instance.old_flavor and instance.new_flavor): return bdms if instance.old_flavor.swap == instance.new_flavor.swap: return bdms old_swap = instance.old_flavor.swap new_swap = instance.new_flavor.swap if not confirm: # revert flavor on _finish_revert_resize old_swap = instance.new_flavor.swap new_swap = instance.old_flavor.swap # add swap if old_swap == 0 and new_swap: # (auniyal)old_swap = 0 means we did not have swap bdm # for this instance. # and as there is a new_swap, its a swap addition new_swap_bdm = block_device.create_blank_bdm(new_swap, 'swap') bdm_obj = objects.BlockDeviceMapping( context, instance_uuid=instance.uuid, **new_swap_bdm) bdm_obj.update_or_create() return instance.get_bdms() # update swap for bdm in bdms: if bdm.guest_format == 'swap' and bdm.device_type == 'disk': if new_swap > 0: LOG.info('Adding swap BDM.', instance=instance) bdm.volume_size = new_swap bdm.save() break elif new_swap == 0: LOG.info('Deleting swap BDM.', instance=instance) bdm.destroy() bdms.objects.remove(bdm) break return bdms @wrap_exception() @reverts_task_state @wrap_instance_event(prefix='compute') @errors_out_migration @wrap_instance_fault def revert_snapshot_based_resize_at_dest(self, ctxt, instance, migration): """Reverts a snapshot-based resize at the destination host. Cleans the guest from the destination compute service host hypervisor and related resources (ports, volumes) and frees resource usage from the compute service on that host. :param ctxt: nova auth request context targeted at the target cell :param instance: Instance object whose vm_state is "resized" and task_state is "resize_reverting". :param migration: Migration object whose status is "reverting". """ # A resize revert is essentially a resize back to the old size, so we # need to send a usage event here. compute_utils.notify_usage_exists( self.notifier, ctxt, instance, self.host, current_period=True) @utils.synchronized(instance.uuid) def do_revert(): LOG.info('Reverting resize on destination host.', instance=instance) with self._error_out_instance_on_exception(ctxt, instance): self._revert_snapshot_based_resize_at_dest( ctxt, instance, migration) do_revert() # Broadcast to all schedulers that the instance is no longer on # this host and clear any waiting callback events. This is best effort # so if anything fails just log it. try: self._delete_scheduler_instance_info(ctxt, instance.uuid) self.instance_events.clear_events_for_instance(instance) except Exception as e: LOG.warning('revert_snapshot_based_resize_at_dest failed during ' 'post-processing. Error: %s', e, instance=instance) def _revert_snapshot_based_resize_at_dest( self, ctxt, instance, migration): """Private version of revert_snapshot_based_resize_at_dest. This allows the main method to be decorated with error handlers. :param ctxt: nova auth request context targeted at the target cell :param instance: Instance object whose vm_state is "resized" and task_state is "resize_reverting". :param migration: Migration object whose status is "reverting". """ # Cleanup the guest from the hypervisor including local disks. network_info = self.network_api.get_instance_nw_info(ctxt, instance) bdms = instance.get_bdms() block_device_info = self._get_instance_block_device_info( ctxt, instance, bdms=bdms) LOG.debug('Destroying guest from destination hypervisor including ' 'disks.', instance=instance) self.driver.destroy( ctxt, instance, network_info, block_device_info=block_device_info) # Activate source host port bindings. We need to do this before # deleting the (active) dest host port bindings in # setup_networks_on_host otherwise the ports will be unbound and # finish on the source will fail. # migrate_instance_start uses migration.dest_compute for the port # binding host and since we want to activate the source host port # bindings, we need to temporarily mutate the migration object. with utils.temporary_mutation( migration, dest_compute=migration.source_compute): LOG.debug('Activating port bindings for source host %s.', migration.source_compute, instance=instance) # TODO(mriedem): https://review.opendev.org/#/c/594139/ would allow # us to remove this and make setup_networks_on_host do it. # TODO(mriedem): Should we try/except/log any errors but continue? self.network_api.migrate_instance_start( ctxt, instance, migration) # Delete port bindings for the target host. LOG.debug('Deleting port bindings for target host %s.', self.host, instance=instance) try: # Note that deleting the destination host port bindings does # not automatically activate the source host port bindings. self.network_api.cleanup_instance_network_on_host( ctxt, instance, self.host) except exception.PortBindingDeletionFailed as e: # Do not let this stop us from cleaning up since the guest # is already gone. LOG.error('Failed to delete port bindings from target host. ' 'Error: %s', str(e), instance=instance) # Delete any volume attachments remaining for this target host. LOG.debug('Deleting volume attachments for target host.', instance=instance) self._delete_volume_attachments(ctxt, bdms) # Free up the new_flavor usage from the resource tracker for this host. self.rt.drop_move_claim_at_dest(ctxt, instance, migration) def _revert_instance_flavor_host_node(self, instance, migration): """Revert host, node and flavor fields after a resize-revert.""" self._set_instance_info(instance, instance.old_flavor) instance.host = migration.source_compute # NOTE(danms): This could fail if we somehow ended up on the wrong # host without the desired node. That's a big problem, so let the # ComputeHostNotFound raise from here. cn = self.rt.get_node_by_name(migration.source_node) instance.node = cn.hypervisor_hostname instance.compute_id = cn.id instance.save(expected_task_state=[task_states.RESIZE_REVERTING]) @wrap_exception() @reverts_task_state @wrap_instance_event(prefix='compute') @errors_out_migration @wrap_instance_fault def finish_revert_snapshot_based_resize_at_source( self, ctxt, instance, migration): """Reverts a snapshot-based resize at the source host. Spawn the guest and re-connect volumes/VIFs on the source host and revert the instance to use the old_flavor for resource usage reporting. Updates allocations in the placement service to move the source node allocations, held by the migration record, to the instance and drop the allocations held by the instance on the destination node. :param ctxt: nova auth request context targeted at the target cell :param instance: Instance object whose vm_state is "resized" and task_state is "resize_reverting". :param migration: Migration object whose status is "reverting". """ @utils.synchronized(instance.uuid) def do_revert(): LOG.info('Reverting resize on source host.', instance=instance) with self._error_out_instance_on_exception(ctxt, instance): self._finish_revert_snapshot_based_resize_at_source( ctxt, instance, migration) try: do_revert() finally: self._delete_stashed_flavor_info(instance) # Broadcast to all schedulers that the instance is on this host. # This is best effort so if anything fails just log it. try: self._update_scheduler_instance_info(ctxt, instance) except Exception as e: LOG.warning('finish_revert_snapshot_based_resize_at_source failed ' 'during post-processing. Error: %s', e, instance=instance) def _finish_revert_snapshot_based_resize_at_source( self, ctxt, instance, migration): """Private version of finish_revert_snapshot_based_resize_at_source. This allows the main method to be decorated with error handlers. :param ctxt: nova auth request context targeted at the source cell :param instance: Instance object whose vm_state is "resized" and task_state is "resize_reverting". :param migration: Migration object whose status is "reverting". """ # Get stashed old_vm_state information to determine if guest should # be powered on after spawn; we default to ACTIVE for backwards # compatibility if old_vm_state is not set old_vm_state = instance.system_metadata.get( 'old_vm_state', vm_states.ACTIVE) # Revert the flavor and host/node fields to their previous values self._revert_instance_flavor_host_node(instance, migration) # Move the allocations against the source compute node resource # provider, held by the migration, to the instance which will drop # the destination compute node resource provider allocations held by # the instance. This puts the allocations against the source node # back to the old_flavor and owned by the instance. try: self._revert_allocation(ctxt, instance, migration) except exception.AllocationMoveFailed: # Log the error but do not re-raise because we want to continue to # process ports and volumes below. LOG.error('Reverting allocation in placement for migration ' '%(migration_uuid)s failed. You may need to manually ' 'remove the allocations for the migration consumer ' 'against the source node resource provider ' '%(source_provider)s and the allocations for the ' 'instance consumer against the destination node ' 'resource provider %(dest_provider)s and then run the ' '"nova-manage placement heal_allocations" command.', {'instance_uuid': instance.uuid, 'migration_uuid': migration.uuid, 'source_provider': migration.source_node, 'dest_provider': migration.dest_node}, instance=instance) bdms = instance.get_bdms() # prep_snapshot_based_resize_at_source created empty volume attachments # that we need to update here to get the connection_info before calling # driver.finish_revert_migration which will connect the volumes to this # host. LOG.debug('Updating volume attachments for target host %s.', self.host, instance=instance) # TODO(mriedem): We should probably make _update_volume_attachments # (optionally) graceful to errors so we (1) try to process all # attachments and (2) continue to process networking below. self._update_volume_attachments(ctxt, instance, bdms) LOG.debug('Updating port bindings for source host %s.', self.host, instance=instance) # TODO(mriedem): Calculate provider mappings when we support # cross-cell resize/migrate with ports having resource requests. # NOTE(hanrong): we need to change migration.dest_compute to # source host temporarily. # "network_api.migrate_instance_finish" will setup the network # for the instance on the destination host. For revert resize, # the instance will back to the source host, the setup of the # network for instance should be on the source host. So set # the migration.dest_compute to source host at here. with utils.temporary_mutation( migration, dest_compute=migration.source_compute ): self.network_api.migrate_instance_finish( ctxt, instance, migration, provider_mappings=None) network_info = self.network_api.get_instance_nw_info(ctxt, instance) # Remember that prep_snapshot_based_resize_at_source destroyed the # guest but left the disks intact so we cannot call spawn() here but # finish_revert_migration should do the job. block_device_info = self._get_instance_block_device_info( ctxt, instance, bdms=bdms) power_on = old_vm_state == vm_states.ACTIVE driver_error = None try: self.driver.finish_revert_migration( ctxt, instance, network_info, migration, block_device_info=block_device_info, power_on=power_on) except Exception as e: driver_error = e # Leave a hint about hard rebooting the guest and reraise so the # instance is put into ERROR state. with excutils.save_and_reraise_exception(logger=LOG): LOG.error('An error occurred during finish_revert_migration. ' 'The instance may need to be hard rebooted. Error: ' '%s', driver_error, instance=instance) else: # Perform final cleanup of the instance in the database. instance.drop_migration_context() # If the original vm_state was STOPPED, set it back to STOPPED. vm_state = vm_states.ACTIVE if power_on else vm_states.STOPPED self._update_instance_after_spawn(instance, vm_state=vm_state) instance.save(expected_task_state=[task_states.RESIZE_REVERTING]) finally: # Complete any volume attachments so the volumes are in-use. We # do this regardless of finish_revert_migration failing because # the instance is back on this host now and we do not want to leave # the volumes in a pending state in case the instance is hard # rebooted. LOG.debug('Completing volume attachments for instance on source ' 'host.', instance=instance) with excutils.save_and_reraise_exception( reraise=driver_error is not None, logger=LOG): self._complete_volume_attachments(ctxt, bdms) migration.status = 'reverted' migration.save() @wrap_exception() @reverts_task_state @wrap_instance_event(prefix='compute') @errors_out_migration @wrap_instance_fault def revert_resize(self, context, instance, migration, request_spec): """Destroys the new instance on the destination machine. Reverts the model changes, and powers on the old instance on the source machine. """ # NOTE(comstud): A revert_resize is essentially a resize back to # the old size, so we need to send a usage event here. compute_utils.notify_usage_exists(self.notifier, context, instance, self.host, current_period=True) with self._error_out_instance_on_exception(context, instance): # NOTE(tr3buchet): tear down networks on destination host self.network_api.setup_networks_on_host(context, instance, teardown=True) self.network_api.migrate_instance_start(context, instance, migration) network_info = self.network_api.get_instance_nw_info(context, instance) bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) block_device_info = self._get_instance_block_device_info( context, instance, bdms=bdms) destroy_disks = not self._is_instance_storage_shared( context, instance, host=migration.source_compute) self.driver.destroy(context, instance, network_info, block_device_info, destroy_disks) self._terminate_volume_connections(context, instance, bdms) # Free up the new_flavor usage from the resource tracker for this # host. self.rt.drop_move_claim_at_dest(context, instance, migration) # RPC cast back to the source host to finish the revert there. self.compute_rpcapi.finish_revert_resize(context, instance, migration, migration.source_compute, request_spec) @wrap_exception() @reverts_task_state @wrap_instance_event(prefix='compute') @errors_out_migration @wrap_instance_fault def finish_revert_resize(self, context, instance, migration, request_spec): """Finishes the second half of reverting a resize on the source host. Bring the original source instance state back (active/shutoff) and revert the resized attributes in the database. """ try: self._finish_revert_resize( context, instance, migration, request_spec) finally: self._delete_stashed_flavor_info(instance) def _finish_revert_resize( self, context, instance, migration, request_spec=None, ): """Inner version of finish_revert_resize.""" with self._error_out_instance_on_exception(context, instance): bdms = self._update_bdm_for_swap_to_finish_resize( context, instance, confirm=False) self._notify_about_instance_usage( context, instance, "resize.revert.start") compute_utils.notify_about_instance_action(context, instance, self.host, action=fields.NotificationAction.RESIZE_REVERT, phase=fields.NotificationPhase.START, bdms=bdms) # Get stashed old_vm_state information to determine if guest should # be powered on after spawn; we default to ACTIVE for backwards # compatibility if old_vm_state is not set old_vm_state = instance.system_metadata.get( 'old_vm_state', vm_states.ACTIVE) # Revert the flavor and host/node fields to their previous values self._revert_instance_flavor_host_node(instance, migration) try: source_allocations = self._revert_allocation( context, instance, migration) except exception.AllocationMoveFailed: LOG.error('Reverting allocation in placement for migration ' '%(migration_uuid)s failed. The instance ' '%(instance_uuid)s will be put into ERROR state but ' 'the allocation held by the migration is leaked.', {'instance_uuid': instance.uuid, 'migration_uuid': migration.uuid}) raise provider_mappings = self._fill_provider_mapping_based_on_allocs( context, source_allocations, request_spec) self.network_api.setup_networks_on_host(context, instance, migration.source_compute) # NOTE(hanrong): we need to change migration.dest_compute to # source host temporarily. "network_api.migrate_instance_finish" # will setup the network for the instance on the destination host. # For revert resize, the instance will back to the source host, the # setup of the network for instance should be on the source host. # So set the migration.dest_compute to source host at here. with utils.temporary_mutation( migration, dest_compute=migration.source_compute): self.network_api.migrate_instance_finish(context, instance, migration, provider_mappings) network_info = self.network_api.get_instance_nw_info(context, instance) # revert_resize deleted any volume attachments for the instance # and created new ones to be used on this host, but we # have to update those attachments with the host connector so the # BDM.connection_info will get set in the call to # _get_instance_block_device_info below with refresh_conn_info=True # and then the volumes can be re-connected via the driver on this # host. self._update_volume_attachments(context, instance, bdms) block_device_info = self._get_instance_block_device_info( context, instance, refresh_conn_info=True, bdms=bdms) power_on = old_vm_state != vm_states.STOPPED self.driver.finish_revert_migration( context, instance, network_info, migration, block_device_info, power_on) instance.drop_migration_context() instance.launched_at = timeutils.utcnow() instance.save(expected_task_state=task_states.RESIZE_REVERTING) # Complete any volume attachments so the volumes are in-use. self._complete_volume_attachments(context, bdms) # if the original vm state was STOPPED, set it back to STOPPED LOG.info("Updating instance to original state: '%s'", old_vm_state, instance=instance) if power_on: instance.vm_state = vm_states.ACTIVE instance.task_state = None instance.save() else: instance.task_state = task_states.POWERING_OFF instance.save() self.stop_instance(context, instance=instance, clean_shutdown=True) self._notify_about_instance_usage( context, instance, "resize.revert.end") compute_utils.notify_about_instance_action(context, instance, self.host, action=fields.NotificationAction.RESIZE_REVERT, phase=fields.NotificationPhase.END, bdms=bdms) def _fill_provider_mapping_based_on_allocs( self, context, allocations, request_spec): """Fills and returns the request group - resource provider mapping based on the allocation passed in. :param context: The security context :param allocation: allocation dict keyed by RP UUID. :param request_spec: The RequestSpec object associated with the operation :returns: None if the request_spec is None. Otherwise a mapping between RequestGroup requester_id, currently Neutron port_id, and a list of resource provider UUIDs providing resource for that RequestGroup. """ if request_spec: # NOTE(gibi): We need to re-calculate the resource provider - # port mapping as we have to have the neutron ports allocate # from the source compute after revert. scheduler_utils.fill_provider_mapping_based_on_allocation( context, self.reportclient, request_spec, allocations) provider_mappings = self._get_request_group_mapping( request_spec) else: # TODO(sbauza): Remove this conditional once we only support RPC # API 6.0 # NOTE(gibi): The compute RPC is pinned to be older than 5.2 # and therefore request_spec is not sent. We cannot calculate # the provider mappings. If the instance has ports with # resource request then the port update will fail in # _update_port_binding_for_instance() called via # migrate_instance_finish() in finish_revert_resize. provider_mappings = None return provider_mappings def _revert_allocation(self, context, instance, migration): """Revert an allocation that is held by migration to our instance.""" # Fetch the original allocation that the instance had on the source # node, which are now held by the migration orig_alloc = self.reportclient.get_allocations_for_consumer( context, migration.uuid) if not orig_alloc: LOG.error('Did not find resource allocations for migration ' '%s on source node %s. Unable to revert source node ' 'allocations back to the instance.', migration.uuid, migration.source_node, instance=instance) return False LOG.info('Swapping old allocation on %(rp_uuids)s held by migration ' '%(mig)s for instance', {'rp_uuids': orig_alloc.keys(), 'mig': migration.uuid}, instance=instance) # FIXME(gibi): This method is flawed in that it does not handle # allocations against sharing providers in any special way. This leads # to duplicate allocations against the sharing provider during # migration. # TODO(cdent): Should we be doing anything with return values here? self.reportclient.move_allocations(context, migration.uuid, instance.uuid) return orig_alloc def _prep_resize( self, context, image, instance, flavor, filter_properties, node, migration, request_spec, clean_shutdown=True, ): if not filter_properties: filter_properties = {} if not instance.host: self._set_instance_obj_error_state(instance) msg = _('Instance has no source host') raise exception.MigrationError(reason=msg) same_host = instance.host == self.host # if the flavor IDs match, it's migrate; otherwise resize if same_host and flavor.id == instance['instance_type_id']: # check driver whether support migrate to same host if not self.driver.capabilities.get( 'supports_migrate_to_same_host', False): # Raise InstanceFaultRollback so that the # _error_out_instance_on_exception context manager in # prep_resize will set the instance.vm_state properly. raise exception.InstanceFaultRollback( inner_exception=exception.UnableToMigrateToSelf( instance_id=instance.uuid, host=self.host)) # NOTE(danms): Stash the new flavor to avoid having to # look it up in the database later instance.new_flavor = flavor # NOTE(mriedem): Stash the old vm_state so we can set the # resized/reverted instance back to the same state later. vm_state = instance.vm_state LOG.debug('Stashing vm_state: %s', vm_state, instance=instance) instance.system_metadata['old_vm_state'] = vm_state instance.save() if not isinstance(request_spec, objects.RequestSpec): # Prior to compute RPC API 5.1 conductor would pass a legacy dict # version of the request spec to compute and since Stein compute # could be sending that back to conductor on reschedule, so if we # got a dict convert it to an object. # TODO(mriedem): We can drop this compat code when we only support # compute RPC API >=6.0. request_spec = objects.RequestSpec.from_primitives( context, request_spec, filter_properties) # We don't have to set the new flavor on the request spec because # if we got here it was due to a reschedule from the compute and # the request spec would already have the new flavor in it from the # else block below. provider_mapping = self._get_request_group_mapping(request_spec) if provider_mapping: try: compute_utils.update_pci_request_with_placement_allocations( context, self.reportclient, instance.pci_requests.requests, provider_mapping, ) except (exception.AmbiguousResourceProviderForPCIRequest, exception.UnexpectedResourceProviderNameForPCIRequest ) as e: raise exception.BuildAbortException( reason=str(e), instance_uuid=instance.uuid) limits = filter_properties.get('limits', {}) allocs = self.reportclient.get_allocations_for_consumer( context, instance.uuid) with self.rt.resize_claim( context, instance, flavor, node, migration, allocs, image_meta=image, limits=limits, ) as claim: LOG.info('Migrating', instance=instance) # RPC cast to the source host to start the actual resize/migration. self.compute_rpcapi.resize_instance( context, instance, claim.migration, image, flavor, request_spec, clean_shutdown) def _send_prep_resize_notifications( self, context, instance, phase, flavor): """Send "resize.prep.*" notifications. :param context: nova auth request context :param instance: The instance being resized :param phase: The phase of the action (NotificationPhase enum) :param flavor: The (new) flavor for the resize (same as existing instance.flavor for a cold migration) """ # Only send notify_usage_exists if it's the "start" phase. if phase == fields.NotificationPhase.START: compute_utils.notify_usage_exists( self.notifier, context, instance, self.host, current_period=True) # Send extra usage info about the flavor if it's the "end" phase for # the legacy unversioned notification. extra_usage_info = None if phase == fields.NotificationPhase.END: extra_usage_info = dict( new_instance_type=flavor.name, new_instance_type_id=flavor.id) self._notify_about_instance_usage( context, instance, "resize.prep.%s" % phase, extra_usage_info=extra_usage_info) # Send the versioned notification. compute_utils.notify_about_resize_prep_instance( context, instance, self.host, phase, flavor) @wrap_exception() @reverts_task_state @wrap_instance_event(prefix='compute') @wrap_instance_fault def prep_resize(self, context, image, instance, flavor, request_spec, filter_properties, node, clean_shutdown, migration, host_list): """Initiates the process of moving a running instance to another host. Possibly changes the VCPU, RAM and disk size in the process. This is initiated from conductor and runs on the destination host. The main purpose of this method is performing some checks on the destination host and making a claim for resources. If the claim fails then a reschedule to another host may be attempted which involves calling back to conductor to start the process over again. """ if node is None: node = self._get_nodename(instance, refresh=True) # Pass instance_state=instance.vm_state because we can resize # a STOPPED server and we don't want to set it back to ACTIVE # in case _prep_resize fails. instance_state = instance.vm_state with self._error_out_instance_on_exception( context, instance, instance_state=instance_state), \ errors_out_migration_ctxt(migration): self._send_prep_resize_notifications( context, instance, fields.NotificationPhase.START, flavor) try: scheduler_hints = self._get_scheduler_hints(filter_properties, request_spec) # Error out if this host cannot accept the new instance due # to anti-affinity. At this point the migration is already # in-progress, so this is the definitive moment to abort due to # the policy violation. Also, exploding here is covered by the # cleanup methods in except block. try: self._validate_instance_group_policy(context, instance, scheduler_hints) except exception.RescheduledException as e: raise exception.InstanceFaultRollback(inner_exception=e) self._prep_resize(context, image, instance, flavor, filter_properties, node, migration, request_spec, clean_shutdown) except exception.BuildAbortException: # NOTE(gibi): We failed # update_pci_request_with_placement_allocations so # there is no reason to re-schedule. Just revert the allocation # and fail the migration. with excutils.save_and_reraise_exception(): self._revert_allocation(context, instance, migration) except Exception: # Since we hit a failure, we're either rescheduling or dead # and either way we need to cleanup any allocations created # by the scheduler for the destination node. self._revert_allocation(context, instance, migration) # try to re-schedule the resize elsewhere: exc_info = sys.exc_info() self._reschedule_resize_or_reraise(context, instance, exc_info, flavor, request_spec, filter_properties, host_list) finally: self._send_prep_resize_notifications( context, instance, fields.NotificationPhase.END, flavor) def _reschedule_resize_or_reraise(self, context, instance, exc_info, flavor, request_spec, filter_properties, host_list): """Try to re-schedule the resize or re-raise the original error to error out the instance. """ if not filter_properties: filter_properties = {} rescheduled = False instance_uuid = instance.uuid try: retry = filter_properties.get('retry') if retry: LOG.debug('Rescheduling, attempt %d', retry['num_attempts'], instance_uuid=instance_uuid) # reset the task state task_state = task_states.RESIZE_PREP self._instance_update(context, instance, task_state=task_state) if exc_info: # stringify to avoid circular ref problem in json # serialization retry['exc'] = traceback.format_exception_only( exc_info[0], exc_info[1]) scheduler_hint = {'filter_properties': filter_properties} self.compute_task_api.resize_instance( context, instance, scheduler_hint, flavor, request_spec=request_spec, host_list=host_list) rescheduled = True else: # no retry information, do not reschedule. LOG.debug('Retry info not present, will not reschedule', instance_uuid=instance_uuid) rescheduled = False except Exception as error: rescheduled = False LOG.exception("Error trying to reschedule", instance_uuid=instance_uuid) compute_utils.add_instance_fault_from_exc(context, instance, error, exc_info=sys.exc_info()) self._notify_about_instance_usage(context, instance, 'resize.error', fault=error) compute_utils.notify_about_instance_action( context, instance, self.host, action=fields.NotificationAction.RESIZE, phase=fields.NotificationPhase.ERROR, exception=error, ) if rescheduled: self._log_original_error(exc_info, instance_uuid) compute_utils.add_instance_fault_from_exc(context, instance, exc_info[1], exc_info=exc_info) self._notify_about_instance_usage(context, instance, 'resize.error', fault=exc_info[1]) compute_utils.notify_about_instance_action( context, instance, self.host, action=fields.NotificationAction.RESIZE, phase=fields.NotificationPhase.ERROR, exception=exc_info[1], ) else: # not re-scheduling exc = exc_info[1] or exc_info[0]() if exc.__traceback__ is not exc_info[2]: raise exc.with_traceback(exc_info[2]) raise exc @messaging.expected_exceptions(exception.MigrationPreCheckError) @wrap_exception() @wrap_instance_event(prefix='compute') @wrap_instance_fault def prep_snapshot_based_resize_at_dest( self, ctxt, instance, flavor, nodename, migration, limits): """Performs pre-cross-cell resize resource claim on the dest host. This runs on the destination host in a cross-cell resize operation before the resize is actually started. Performs a resize_claim for resources that are not claimed in placement like PCI devices and NUMA topology. Note that this is different from same-cell prep_resize in that this: * Does not RPC cast to the source compute, that is orchestrated from conductor. * This does not reschedule on failure, conductor handles that since conductor is synchronously RPC calling this method. As such, the reverts_task_state decorator is not used on this method. :param ctxt: user auth request context :param instance: the instance being resized :param flavor: the flavor being resized to (unchanged for cold migrate) :param nodename: Name of the target compute node :param migration: nova.objects.Migration object for the operation :param limits: nova.objects.SchedulerLimits object of resource limits :returns: nova.objects.MigrationContext; the migration context created on the destination host during the resize_claim. :raises: nova.exception.MigrationPreCheckError if the pre-check validation fails for the given host selection """ LOG.debug('Checking if we can cross-cell migrate instance to this ' 'host (%s).', self.host, instance=instance) self._send_prep_resize_notifications( ctxt, instance, fields.NotificationPhase.START, flavor) # TODO(mriedem): update_pci_request_with_placement_allocations # should be called here if the request spec has request group mappings, # e.g. for things like QoS ports with resource requests. Do it outside # the try/except so if it raises BuildAbortException we do not attempt # to reschedule. try: # Get the allocations within the try/except block in case we get # an error so MigrationPreCheckError is raised up. allocations = self.reportclient.get_allocs_for_consumer( ctxt, instance.uuid)['allocations'] # Claim resources on this target host using the new flavor which # will create the MigrationContext object. Note that in the future # if we want to do other validation here we should do it within # the MoveClaim context so we can drop the claim if anything fails. self.rt.resize_claim( ctxt, instance, flavor, nodename, migration, allocations, image_meta=instance.image_meta, limits=limits) except Exception as ex: err = str(ex) LOG.warning( 'Cross-cell resize pre-checks failed for this host (%s). ' 'Cleaning up. Failure: %s', self.host, err, instance=instance, exc_info=True) raise exception.MigrationPreCheckError( reason=(_("Pre-checks failed on host '%(host)s'. " "Error: %(error)s") % {'host': self.host, 'error': err})) finally: self._send_prep_resize_notifications( ctxt, instance, fields.NotificationPhase.END, flavor) # ResourceTracker.resize_claim() sets instance.migration_context. return instance.migration_context @messaging.expected_exceptions(exception.InstancePowerOffFailure) @wrap_exception() @reverts_task_state @wrap_instance_event(prefix='compute') @errors_out_migration @wrap_instance_fault def prep_snapshot_based_resize_at_source( self, ctxt, instance, migration, snapshot_id=None): """Prepares the instance at the source host for cross-cell resize Performs actions like powering off the guest, upload snapshot data if the instance is not volume-backed, disconnecting volumes, unplugging VIFs and activating the destination host port bindings. :param ctxt: user auth request context targeted at source cell :param instance: nova.objects.Instance; the instance being resized. The expected instance.task_state is "resize_migrating" when calling this method, and the expected task_state upon successful completion is "resize_migrated". :param migration: nova.objects.Migration object for the operation. The expected migration.status is "pre-migrating" when calling this method and the expected status upon successful completion is "post-migrating". :param snapshot_id: ID of the image snapshot to upload if not a volume-backed instance :raises: nova.exception.InstancePowerOffFailure if stopping the instance fails """ LOG.info('Preparing for snapshot based resize on source host %s.', self.host, instance=instance) # Note that if anything fails here, the migration-based allocations # created in conductor should be reverted by conductor as well, # see MigrationTask.rollback. self._prep_snapshot_based_resize_at_source( ctxt, instance, migration, snapshot_id=snapshot_id) @delete_image_on_error def _snapshot_for_resize(self, ctxt, image_id, instance): """Uploads snapshot for the instance during a snapshot-based resize If the snapshot operation fails the image will be deleted. :param ctxt: the nova auth request context for the resize operation :param image_id: the snapshot image ID :param instance: the instance to snapshot/resize """ LOG.debug('Uploading snapshot data for image %s', image_id, instance=instance) # Note that we do not track the snapshot phase task states # during resize since we do not want to reflect those into the # actual instance.task_state. update_task_state = lambda *args, **kwargs: None with timeutils.StopWatch() as timer: self.driver.snapshot(ctxt, instance, image_id, update_task_state) LOG.debug('Took %0.2f seconds to snapshot the instance on ' 'the hypervisor.', timer.elapsed(), instance=instance) def _prep_snapshot_based_resize_at_source( self, ctxt, instance, migration, snapshot_id=None): """Private method for prep_snapshot_based_resize_at_source so calling code can handle errors and perform rollbacks as necessary. """ # Fetch and update the instance.info_cache. network_info = self.network_api.get_instance_nw_info(ctxt, instance) # Get the BDMs attached to this instance on this source host. bdms = instance.get_bdms() # Send the resize.start notification. self._send_resize_instance_notifications( ctxt, instance, bdms, network_info, fields.NotificationPhase.START) # Update the migration status from "pre-migrating" to "migrating". migration.status = 'migrating' migration.save() # Since the instance is going to be left on the source host during the # resize, we need to power it off so we do not have the instance # potentially running in two places. LOG.debug('Stopping instance', instance=instance) try: self._power_off_instance(ctxt, instance) except Exception as e: LOG.exception('Failed to power off instance.', instance=instance) raise exception.InstancePowerOffFailure(reason=str(e)) instance.power_state = self._get_power_state(instance) # If a snapshot image ID was provided, we need to snapshot the guest # disk image and upload it to the image service. if snapshot_id: self._snapshot_for_resize(ctxt, snapshot_id, instance) block_device_info = self._get_instance_block_device_info( ctxt, instance, bdms=bdms) # If something fails at this point the instance must go to ERROR # status for operator intervention or to reboot/rebuild the instance. with self._error_out_instance_on_exception( ctxt, instance, instance_state=vm_states.ERROR): # Destroy the guest on the source host which will disconnect # volumes and unplug VIFs. Note that we DO NOT destroy disks since # we want to leave those on the source host in case of a later # failure and disks are needed to recover the guest or in case the # resize is reverted. LOG.debug('Destroying guest on source host but retaining disks.', instance=instance) self.driver.destroy( ctxt, instance, network_info, block_device_info=block_device_info, destroy_disks=False) # At this point the volumes are disconnected from this source host. # Delete the old volume attachment records and create new empty # ones which will be used later if the resize is reverted. LOG.debug('Deleting volume attachments for the source host.', instance=instance) self._terminate_volume_connections(ctxt, instance, bdms) # At this point the VIFs are unplugged from this source host. # Activate the dest host port bindings created by conductor. self.network_api.migrate_instance_start(ctxt, instance, migration) # Update the migration status from "migrating" to "post-migrating". migration.status = 'post-migrating' migration.save() # At this point, the traditional resize_instance would update the # instance host/node values to point at the dest host/node because # that is where the disk is transferred during resize_instance, but # with cross-cell resize the instance is not yet at the dest host # so we do not make that update here. instance.task_state = task_states.RESIZE_MIGRATED instance.save(expected_task_state=task_states.RESIZE_MIGRATING) self._send_resize_instance_notifications( ctxt, instance, bdms, network_info, fields.NotificationPhase.END) self.instance_events.clear_events_for_instance(instance) @wrap_exception() @reverts_task_state @wrap_instance_event(prefix='compute') @wrap_instance_fault def resize_instance(self, context, instance, image, migration, flavor, clean_shutdown, request_spec): """Starts the migration of a running instance to another host. This is initiated from the destination host's ``prep_resize`` routine and runs on the source host. """ try: self._resize_instance(context, instance, image, migration, flavor, clean_shutdown, request_spec) except Exception: with excutils.save_and_reraise_exception(): self._revert_allocation(context, instance, migration) def _resize_instance( self, context, instance, image, migration, flavor, clean_shutdown, request_spec, ): # Pass instance_state=instance.vm_state because we can resize # a STOPPED server and we don't want to set it back to ACTIVE # in case migrate_disk_and_power_off raises InstanceFaultRollback. instance_state = instance.vm_state with self._error_out_instance_on_exception( context, instance, instance_state=instance_state), \ errors_out_migration_ctxt(migration): network_info = self.network_api.get_instance_nw_info(context, instance) migration.status = 'migrating' migration.save() instance.task_state = task_states.RESIZE_MIGRATING instance.save(expected_task_state=task_states.RESIZE_PREP) bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) self._send_resize_instance_notifications( context, instance, bdms, network_info, fields.NotificationPhase.START) block_device_info = self._get_instance_block_device_info( context, instance, bdms=bdms) timeout, retry_interval = self._get_power_off_values( instance, clean_shutdown) disk_info = self.driver.migrate_disk_and_power_off( context, instance, migration.dest_host, flavor, network_info, block_device_info, timeout, retry_interval) self._terminate_volume_connections(context, instance, bdms) self.network_api.migrate_instance_start(context, instance, migration) migration.status = 'post-migrating' migration.save() instance.host = migration.dest_compute instance.node = migration.dest_node # NOTE(danms): This could fail if the dest_compute_id was not set # by the sending host *and* migration.dest_node does not point to # a legit node on the dest_compute. Since that would be very bad, # let the ComputeHostNotFound raise from here. instance.compute_id = migration.get_dest_compute_id() # NOTE(gibi): as the instance now tracked on the destination we # have to make sure that the source compute resource track can # track this instance as a migration. For that the resource tracker # needs to see the old_flavor set on the instance. The old_flavor # setting used to be done on the destination host in finish_resize # but that is racy with a source host update_available_resource # periodic run instance.old_flavor = instance.flavor instance.task_state = task_states.RESIZE_MIGRATED instance.save(expected_task_state=task_states.RESIZE_MIGRATING) # RPC cast to the destination host to finish the resize/migration. self.compute_rpcapi.finish_resize(context, instance, migration, image, disk_info, migration.dest_compute, request_spec) self._send_resize_instance_notifications( context, instance, bdms, network_info, fields.NotificationPhase.END) self.instance_events.clear_events_for_instance(instance) def _send_resize_instance_notifications( self, context, instance, bdms, network_info, phase): """Send "resize.(start|end)" notifications. :param context: nova auth request context :param instance: The instance being resized :param bdms: BlockDeviceMappingList for the BDMs associated with the instance :param network_info: NetworkInfo for the instance info cache of ports :param phase: The phase of the action (NotificationPhase enum, either ``start`` or ``end``) """ action = fields.NotificationAction.RESIZE # Send the legacy unversioned notification. self._notify_about_instance_usage( context, instance, "%s.%s" % (action, phase), network_info=network_info) # Send the versioned notification. compute_utils.notify_about_instance_action( context, instance, self.host, action=action, phase=phase, bdms=bdms) def _terminate_volume_connections(self, context, instance, bdms): connector = None for bdm in bdms: if bdm.is_volume: if bdm.attachment_id: # NOTE(jdg): So here's the thing, the idea behind the new # attach API's was to have a new code fork/path that we # followed, we're not going to do that so we have to do # some extra work in here to make it *behave* just like the # old code. Cinder doesn't allow disconnect/reconnect (you # just delete the attachment and get a new one) # attachments in the new attach code so we have to do # a delete and create without a connector (reserve), # in other words, beware attachment_id = self.volume_api.attachment_create( context, bdm.volume_id, instance.uuid)['id'] self.volume_api.attachment_delete(context, bdm.attachment_id) bdm.attachment_id = attachment_id bdm.save() else: if connector is None: connector = self.driver.get_volume_connector(instance) self.volume_api.terminate_connection(context, bdm.volume_id, connector) @staticmethod def _set_instance_info(instance, flavor): instance.instance_type_id = flavor.id instance.memory_mb = flavor.memory_mb instance.vcpus = flavor.vcpus instance.root_gb = flavor.root_gb instance.ephemeral_gb = flavor.ephemeral_gb instance.flavor = flavor def _update_volume_attachments(self, context, instance, bdms): """Updates volume attachments using the virt driver host connector. :param context: nova.context.RequestContext - user request context :param instance: nova.objects.Instance :param bdms: nova.objects.BlockDeviceMappingList - the list of block device mappings for the given instance """ if bdms: connector = None for bdm in bdms: if bdm.is_volume and bdm.attachment_id: if connector is None: connector = self.driver.get_volume_connector(instance) self.volume_api.attachment_update( context, bdm.attachment_id, connector, bdm.device_name) def _complete_volume_attachments(self, context, bdms): """Completes volume attachments for the instance :param context: nova.context.RequestContext - user request context :param bdms: nova.objects.BlockDeviceMappingList - the list of block device mappings for the given instance """ if bdms: for bdm in bdms: if bdm.is_volume and bdm.attachment_id: self.volume_api.attachment_complete( context, bdm.attachment_id) def _finish_resize(self, context, instance, migration, disk_info, image_meta, bdms, request_spec): resize_instance = False # indicates disks have been resized old_instance_type_id = migration.old_instance_type_id new_instance_type_id = migration.new_instance_type_id old_flavor = instance.flavor # the current flavor is now old # NOTE(mriedem): Get the old_vm_state so we know if we should # power on the instance. If old_vm_state is not set we need to default # to ACTIVE for backwards compatibility old_vm_state = instance.system_metadata.get('old_vm_state', vm_states.ACTIVE) # NOTE(gibi): this is already set by the resize_instance on the source # node before calling finish_resize on destination but during upgrade # it can be that the source node is not having the fix for bug 1944759 # yet. This assignment can be removed in Z release. instance.old_flavor = old_flavor if old_instance_type_id != new_instance_type_id: new_flavor = instance.new_flavor # this is set in _prep_resize # Set the flavor-related fields on the instance object including # making instance.flavor = new_flavor. self._set_instance_info(instance, new_flavor) for key in ('root_gb', 'swap', 'ephemeral_gb'): if old_flavor[key] != new_flavor[key]: resize_instance = True break instance.apply_migration_context() # NOTE(tr3buchet): setup networks on destination host self.network_api.setup_networks_on_host(context, instance, migration.dest_compute) provider_mappings = self._get_request_group_mapping(request_spec) # For neutron, migrate_instance_finish updates port bindings for this # host including any PCI devices claimed for SR-IOV ports. self.network_api.migrate_instance_finish( context, instance, migration, provider_mappings) network_info = self.network_api.get_instance_nw_info(context, instance) instance.task_state = task_states.RESIZE_FINISH instance.save(expected_task_state=task_states.RESIZE_MIGRATED) self._send_finish_resize_notifications( context, instance, bdms, network_info, fields.NotificationPhase.START) # We need to update any volume attachments using the destination # host connector so that we can update the BDM.connection_info # before calling driver.finish_migration otherwise the driver # won't know how to connect the volumes to this host. # Note that _get_instance_block_device_info with # refresh_conn_info=True will update the BDM.connection_info value # in the database so we must do this before calling that method. self._update_volume_attachments(context, instance, bdms) block_device_info = self._get_instance_block_device_info( context, instance, refresh_conn_info=True, bdms=bdms) # NOTE(mriedem): If the original vm_state was STOPPED, we don't # automatically power on the instance after it's migrated power_on = old_vm_state != vm_states.STOPPED # NOTE(sbauza): During a migration, the original allocation is against # the migration UUID while the target allocation (for the destination # node) is related to the instance UUID, so here we need to pass the # new ones. allocations = self.reportclient.get_allocs_for_consumer( context, instance.uuid)['allocations'] try: self.driver.finish_migration(context, migration, instance, disk_info, network_info, image_meta, resize_instance, allocations, block_device_info, power_on) except Exception: # Note that we do not rollback port bindings to the source host # because resize_instance (on the source host) updated the # instance.host to point to *this* host (the destination host) # so the port bindings pointing at this host are correct even # though we failed to create the guest. with excutils.save_and_reraise_exception(): # If we failed to create the guest on this host, reset the # instance flavor-related fields to the old flavor. An # error handler like reverts_task_state will save the changes. if old_instance_type_id != new_instance_type_id: self._set_instance_info(instance, old_flavor) # Now complete any volume attachments that were previously updated. self._complete_volume_attachments(context, bdms) migration.status = 'finished' migration.save() instance.vm_state = vm_states.RESIZED instance.task_state = None instance.launched_at = timeutils.utcnow() instance.save(expected_task_state=task_states.RESIZE_FINISH) return network_info @wrap_exception() @reverts_task_state @wrap_instance_event(prefix='compute') @errors_out_migration @wrap_instance_fault def finish_resize(self, context, disk_info, image, instance, migration, request_spec): """Completes the migration process. Sets up the newly transferred disk and turns on the instance at its new host machine. """ try: self._finish_resize_helper(context, disk_info, image, instance, migration, request_spec) except Exception: with excutils.save_and_reraise_exception(): # At this point, resize_instance (which runs on the source) has # already updated the instance host/node values to point to # this (the dest) compute, so we need to leave the allocations # against the dest node resource provider intact and drop the # allocations against the source node resource provider. If the # user tries to recover the server by hard rebooting it, it # will happen on this host so that's where the allocations # should go. Note that this is the same method called from # confirm_resize to cleanup the source node allocations held # by the migration record. LOG.info('Deleting allocations for old flavor on source node ' '%s after finish_resize failure. You may be able to ' 'recover the instance by hard rebooting it.', migration.source_compute, instance=instance) self._delete_allocation_after_move( context, instance, migration) def _finish_resize_helper(self, context, disk_info, image, instance, migration, request_spec): """Completes the migration process. The caller must revert the instance's allocations if the migration process failed. """ bdms = self._update_bdm_for_swap_to_finish_resize(context, instance) with self._error_out_instance_on_exception(context, instance): image_meta = objects.ImageMeta.from_dict(image) network_info = self._finish_resize(context, instance, migration, disk_info, image_meta, bdms, request_spec) # TODO(melwitt): We should clean up instance console tokens here. The # instance is on a new host and will need to establish a new console # connection. self._update_scheduler_instance_info(context, instance) self._send_finish_resize_notifications( context, instance, bdms, network_info, fields.NotificationPhase.END) def _send_finish_resize_notifications( self, context, instance, bdms, network_info, phase): """Send notifications for the finish_resize flow. :param context: nova auth request context :param instance: The instance being resized :param bdms: BlockDeviceMappingList for the BDMs associated with the instance :param network_info: NetworkInfo for the instance info cache of ports :param phase: The phase of the action (NotificationPhase enum, either ``start`` or ``end``) """ # Send the legacy unversioned notification. self._notify_about_instance_usage( context, instance, "finish_resize.%s" % phase, network_info=network_info) # Send the versioned notification. compute_utils.notify_about_instance_action( context, instance, self.host, action=fields.NotificationAction.RESIZE_FINISH, phase=phase, bdms=bdms) @wrap_exception() @reverts_task_state @wrap_instance_event(prefix='compute') @errors_out_migration @wrap_instance_fault def finish_snapshot_based_resize_at_dest( self, ctxt, instance, migration, snapshot_id): """Finishes the snapshot-based resize at the destination compute. Sets up block devices and networking on the destination compute and spawns the guest. :param ctxt: nova auth request context targeted at the target cell DB :param instance: The Instance object being resized with the ``migration_context`` field set. Upon successful completion of this method the vm_state should be "resized", the task_state should be None, and migration context, host/node and flavor-related fields should be set on the instance. :param migration: The Migration object for this resize operation. Upon successful completion of this method the migration status should be "finished". :param snapshot_id: ID of the image snapshot created for a non-volume-backed instance, else None. """ LOG.info('Finishing snapshot based resize on destination host %s.', self.host, instance=instance) with self._error_out_instance_on_exception(ctxt, instance): # Note that if anything fails here, the migration-based allocations # created in conductor should be reverted by conductor as well, # see MigrationTask.rollback. self._finish_snapshot_based_resize_at_dest( ctxt, instance, migration, snapshot_id) def _finish_snapshot_based_resize_at_dest( self, ctxt, instance, migration, snapshot_id): """Private variant of finish_snapshot_based_resize_at_dest so the caller can handle reverting resource allocations on failure and perform other generic error handling. """ # Figure out the image metadata to use when spawning the guest. origin_image_ref = instance.image_ref if snapshot_id: instance.image_ref = snapshot_id image_meta = objects.ImageMeta.from_image_ref( ctxt, self.image_api, snapshot_id) else: # Just use what is already on the volume-backed instance. image_meta = instance.image_meta resize = migration.migration_type == 'resize' instance.old_flavor = instance.flavor if resize: flavor = instance.new_flavor # If we are resizing to a new flavor we need to set the # flavor-related fields on the instance. # NOTE(mriedem): This is likely where storing old/new_flavor on # the MigrationContext would make this cleaner. self._set_instance_info(instance, flavor) instance.apply_migration_context() instance.task_state = task_states.RESIZE_FINISH instance.save(expected_task_state=task_states.RESIZE_MIGRATED) # This seems a bit late to be sending the start notification but # it is what traditional resize has always done as well and it does # contain the changes to the instance with the new_flavor and # task_state. bdms = instance.get_bdms() network_info = instance.get_network_info() self._send_finish_resize_notifications( ctxt, instance, bdms, network_info, fields.NotificationPhase.START) # Setup volumes and networking and spawn the guest in the hypervisor. self._finish_snapshot_based_resize_at_dest_spawn( ctxt, instance, migration, image_meta, bdms) # If we spawned from a temporary snapshot image we can delete that now, # similar to how unshelve works. if snapshot_id: instance.image_ref = origin_image_ref compute_utils.delete_image( ctxt, instance, self.image_api, snapshot_id) migration.status = 'finished' migration.save() self._update_instance_after_spawn(instance, vm_state=vm_states.RESIZED) # Setting the host/node values will make the ResourceTracker continue # to track usage for this instance on this host. instance.host = migration.dest_compute # NOTE(danms): This could fail if we somehow ended up on the wrong # host without the desired node. That's a big problem, so let the # ComputeHostNotFound raise from here. cn = self.rt.get_node_by_name(migration.dest_node) instance.compute_id = cn.id instance.node = cn.hypervisor_hostname instance.save(expected_task_state=task_states.RESIZE_FINISH) # Broadcast to all schedulers that the instance is on this host. self._update_scheduler_instance_info(ctxt, instance) self._send_finish_resize_notifications( ctxt, instance, bdms, network_info, fields.NotificationPhase.END) def _finish_snapshot_based_resize_at_dest_spawn( self, ctxt, instance, migration, image_meta, bdms): """Sets up volumes and networking and spawns the guest on the dest host If the instance was stopped when the resize was initiated the guest will be created but remain in a shutdown power state. If the spawn fails, port bindings are rolled back to the source host and volume connections are terminated for this dest host. :param ctxt: nova auth request context :param instance: Instance object being migrated :param migration: Migration object for the operation :param image_meta: ImageMeta object used during driver.spawn :param bdms: BlockDeviceMappingList of BDMs for the instance """ # Update the volume attachments using this host's connector. # That will update the BlockDeviceMapping.connection_info which # will be used to connect the volumes on this host during spawn(). block_device_info = self._prep_block_device(ctxt, instance, bdms) allocations = self.reportclient.get_allocations_for_consumer( ctxt, instance.uuid) # We do not call self.network_api.setup_networks_on_host here because # for neutron that sets up the port migration profile which is only # used during live migration with DVR. Yes it is gross knowing what # that method does internally. We could change this when bug 1814837 # is fixed if setup_networks_on_host is made smarter by passing the # migration record and the method checks the migration_type. # Activate the port bindings for this host. # FIXME(mriedem): We're going to have the same issue as bug 1813789 # here because this will update the port bindings and send the # network-vif-plugged event and that means when driver.spawn waits for # it we might have already gotten the event and neutron won't send # another one so we could timeout. # TODO(mriedem): Calculate provider mappings when we support cross-cell # resize/migrate with ports having resource requests. self.network_api.migrate_instance_finish( ctxt, instance, migration, provider_mappings=None) network_info = self.network_api.get_instance_nw_info(ctxt, instance) # If the original vm_state was STOPPED, we do not automatically # power on the instance after it is migrated. power_on = instance.system_metadata['old_vm_state'] == vm_states.ACTIVE try: # NOTE(mriedem): If this instance uses a config drive, it will get # rebuilt here which means any personality files will be lost, # similar to unshelve. If the instance is not using a config drive # and getting metadata from the metadata API service, personality # files would be lost regardless of the move operation. self.driver.spawn( ctxt, instance, image_meta, injected_files=[], admin_password=None, allocations=allocations, network_info=network_info, block_device_info=block_device_info, power_on=power_on) except Exception: with excutils.save_and_reraise_exception(logger=LOG): # Rollback port bindings to the source host. try: # This is gross but migrate_instance_start looks at the # migration.dest_compute to determine where to activate the # port bindings and we want the source compute port # bindings to be re-activated. Remember at this point the # instance.host is still pointing at the source compute. # TODO(mriedem): Maybe we should be calling # setup_instance_network_on_host here to deal with pci # devices? with utils.temporary_mutation( migration, dest_compute=migration.source_compute): self.network_api.migrate_instance_start( ctxt, instance, migration) except Exception: LOG.exception( 'Failed to activate port bindings on the source ' 'host: %s', migration.source_compute, instance=instance) # Rollback volume connections on this host. for bdm in bdms: if bdm.is_volume: try: self._remove_volume_connection( ctxt, bdm, instance, delete_attachment=True) except Exception: LOG.exception('Failed to remove volume connection ' 'on this host %s for volume %s.', self.host, bdm.volume_id, instance=instance) @wrap_exception() @wrap_instance_fault def add_fixed_ip_to_instance(self, context, network_id, instance): """Calls network_api to add new fixed_ip to instance then injects the new network info and resets instance networking. """ self._notify_about_instance_usage( context, instance, "create_ip.start") network_info = self.network_api.add_fixed_ip_to_instance(context, instance, network_id) self._inject_network_info(instance, network_info) # NOTE(russellb) We just want to bump updated_at. See bug 1143466. instance.updated_at = timeutils.utcnow() instance.save() self._notify_about_instance_usage( context, instance, "create_ip.end", network_info=network_info) @wrap_exception() @wrap_instance_fault def remove_fixed_ip_from_instance(self, context, address, instance): """Calls network_api to remove existing fixed_ip from instance by injecting the altered network info and resetting instance networking. """ self._notify_about_instance_usage( context, instance, "delete_ip.start") network_info = self.network_api.remove_fixed_ip_from_instance(context, instance, address) self._inject_network_info(instance, network_info) # NOTE(russellb) We just want to bump updated_at. See bug 1143466. instance.updated_at = timeutils.utcnow() instance.save() self._notify_about_instance_usage( context, instance, "delete_ip.end", network_info=network_info) @wrap_exception() @reverts_task_state @wrap_instance_event(prefix='compute') @wrap_instance_fault def pause_instance(self, context, instance): """Pause an instance on this host.""" context = context.elevated() LOG.info('Pausing', instance=instance) self._notify_about_instance_usage(context, instance, 'pause.start') compute_utils.notify_about_instance_action(context, instance, self.host, action=fields.NotificationAction.PAUSE, phase=fields.NotificationPhase.START) self.driver.pause(instance) instance.power_state = self._get_power_state(instance) instance.vm_state = vm_states.PAUSED instance.task_state = None instance.save(expected_task_state=task_states.PAUSING) self._notify_about_instance_usage(context, instance, 'pause.end') compute_utils.notify_about_instance_action(context, instance, self.host, action=fields.NotificationAction.PAUSE, phase=fields.NotificationPhase.END) @wrap_exception() @reverts_task_state @wrap_instance_event(prefix='compute') @wrap_instance_fault def unpause_instance(self, context, instance): """Unpause a paused instance on this host.""" context = context.elevated() LOG.info('Unpausing', instance=instance) self._notify_about_instance_usage(context, instance, 'unpause.start') compute_utils.notify_about_instance_action(context, instance, self.host, action=fields.NotificationAction.UNPAUSE, phase=fields.NotificationPhase.START) self.driver.unpause(instance) instance.power_state = self._get_power_state(instance) instance.vm_state = vm_states.ACTIVE instance.task_state = None instance.save(expected_task_state=task_states.UNPAUSING) self._notify_about_instance_usage(context, instance, 'unpause.end') compute_utils.notify_about_instance_action(context, instance, self.host, action=fields.NotificationAction.UNPAUSE, phase=fields.NotificationPhase.END) @wrap_exception() def host_power_action(self, context, action): """Reboots, shuts down or powers up the host.""" return self.driver.host_power_action(action) @wrap_exception() def host_maintenance_mode(self, context, host, mode): """Start/Stop host maintenance window. On start, it triggers guest VMs evacuation. """ return self.driver.host_maintenance_mode(host, mode) def _update_compute_provider_status(self, context, enabled): """Adds or removes the COMPUTE_STATUS_DISABLED trait for this host. For each ComputeNode managed by this service, adds or removes the COMPUTE_STATUS_DISABLED traits to/from the associated resource provider in Placement. :param context: nova auth RequestContext :param enabled: True if the node is enabled in which case the trait would be removed, False if the node is disabled in which case the trait would be added. :raises: ComputeHostNotFound if there are no compute nodes found in the ResourceTracker for this service. """ # Get the compute node(s) on this host. Remember that ironic can be # managing more than one compute node. nodes = self.rt.compute_nodes.values() if not nodes: raise exception.ComputeHostNotFound(host=self.host) # For each node, we want to add (or remove) the COMPUTE_STATUS_DISABLED # trait on the related resource provider in placement so the scheduler # (pre-)filters the provider based on its status. for node in nodes: try: self.virtapi.update_compute_provider_status( context, node.uuid, enabled) except (exception.ResourceProviderTraitRetrievalFailed, exception.ResourceProviderUpdateConflict, exception.ResourceProviderUpdateFailed, exception.TraitRetrievalFailed) as e: # This is best effort so just log a warning and continue. LOG.warning('An error occurred while updating ' 'COMPUTE_STATUS_DISABLED trait on compute node ' 'resource provider %s. The trait will be ' 'synchronized when the update_available_resource ' 'periodic task runs. Error: %s', node.uuid, e.format_message()) except Exception: LOG.exception('An error occurred while updating ' 'COMPUTE_STATUS_DISABLED trait on compute node ' 'resource provider %s. The trait will be ' 'synchronized when the ' 'update_available_resource periodic task runs.', node.uuid) @wrap_exception() def set_host_enabled(self, context, enabled): """Sets the specified host's ability to accept new instances. This method will add or remove the COMPUTE_STATUS_DISABLED trait to/from the associated compute node resource provider(s) for this compute service. """ try: self._update_compute_provider_status(context, enabled) except exception.ComputeHostNotFound: LOG.warning('Unable to add/remove trait COMPUTE_STATUS_DISABLED. ' 'No ComputeNode(s) found for host: %s', self.host) try: return self.driver.set_host_enabled(enabled) except NotImplementedError: # Only the xenapi driver implements set_host_enabled but we don't # want NotImplementedError to get raised back to the API. We still # need to honor the compute RPC API contract and return 'enabled' # or 'disabled' though. return 'enabled' if enabled else 'disabled' @wrap_exception() def get_host_uptime(self, context): """Returns the result of calling "uptime" on the target host.""" return self.driver.get_host_uptime() @wrap_exception() @wrap_instance_fault def get_diagnostics(self, context, instance): """Retrieve diagnostics for an instance on this host.""" current_power_state = self._get_power_state(instance) if current_power_state == power_state.RUNNING: LOG.info("Retrieving diagnostics", instance=instance) return self.driver.get_diagnostics(instance) else: raise exception.InstanceInvalidState( attr='power state', instance_uuid=instance.uuid, state=power_state.STATE_MAP[instance.power_state], method='get_diagnostics') @wrap_exception() @wrap_instance_fault def get_instance_diagnostics(self, context, instance): """Retrieve diagnostics for an instance on this host.""" current_power_state = self._get_power_state(instance) if current_power_state == power_state.RUNNING: LOG.info("Retrieving diagnostics", instance=instance) return self.driver.get_instance_diagnostics(instance) else: raise exception.InstanceInvalidState( attr='power state', instance_uuid=instance.uuid, state=power_state.STATE_MAP[instance.power_state], method='get_diagnostics') @wrap_exception() @reverts_task_state @wrap_instance_event(prefix='compute') @wrap_instance_fault def suspend_instance(self, context, instance): """Suspend the given instance.""" context = context.elevated() # Store the old state instance.system_metadata['old_vm_state'] = instance.vm_state self._notify_about_instance_usage(context, instance, 'suspend.start') compute_utils.notify_about_instance_action(context, instance, self.host, action=fields.NotificationAction.SUSPEND, phase=fields.NotificationPhase.START) with self._error_out_instance_on_exception(context, instance, instance_state=instance.vm_state): self.driver.suspend(context, instance) instance.power_state = self._get_power_state(instance) instance.vm_state = vm_states.SUSPENDED instance.task_state = None instance.save(expected_task_state=task_states.SUSPENDING) self._notify_about_instance_usage(context, instance, 'suspend.end') compute_utils.notify_about_instance_action(context, instance, self.host, action=fields.NotificationAction.SUSPEND, phase=fields.NotificationPhase.END) @wrap_exception() @reverts_task_state @wrap_instance_event(prefix='compute') @wrap_instance_fault def resume_instance(self, context, instance): """Resume the given suspended instance.""" context = context.elevated() LOG.info('Resuming', instance=instance) self._notify_about_instance_usage(context, instance, 'resume.start') bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) block_device_info = self._get_instance_block_device_info( context, instance, bdms=bdms) # This allows passing share_info to the resume operation for # futur usage. However, this scenario is currently not possible # because suspending an instance with a share is not permitted # by libvirt. As a result, the suspend action involving a share # is blocked by the API. share_info = self._get_share_info(context, instance) compute_utils.notify_about_instance_action(context, instance, self.host, action=fields.NotificationAction.RESUME, phase=fields.NotificationPhase.START, bdms=bdms) network_info = self.network_api.get_instance_nw_info(context, instance) with self._error_out_instance_on_exception(context, instance, instance_state=instance.vm_state): self.driver.resume(context, instance, network_info, block_device_info, share_info) instance.power_state = self._get_power_state(instance) # We default to the ACTIVE state for backwards compatibility instance.vm_state = instance.system_metadata.pop('old_vm_state', vm_states.ACTIVE) instance.task_state = None instance.save(expected_task_state=task_states.RESUMING) self._notify_about_instance_usage(context, instance, 'resume.end') compute_utils.notify_about_instance_action(context, instance, self.host, action=fields.NotificationAction.RESUME, phase=fields.NotificationPhase.END, bdms=bdms) @wrap_exception() @reverts_task_state @wrap_instance_event(prefix='compute') @wrap_instance_fault def shelve_instance(self, context, instance, image_id, clean_shutdown, accel_uuids): """Shelve an instance. This should be used when you want to take a snapshot of the instance. It also adds system_metadata that can be used by a periodic task to offload the shelved instance after a period of time. :param context: request context :param instance: an Instance object :param image_id: an image id to snapshot to. :param clean_shutdown: give the GuestOS a chance to stop :param accel_uuids: the accelerators uuids for the instance """ @utils.synchronized(instance.uuid) def do_shelve_instance(): self._shelve_instance(context, instance, image_id, clean_shutdown, accel_uuids) do_shelve_instance() def _shelve_instance(self, context, instance, image_id, clean_shutdown, accel_uuids=None): LOG.info('Shelving', instance=instance) offload = CONF.shelved_offload_time == 0 if offload: # Get the BDMs early so we can pass them into versioned # notifications since _shelve_offload_instance needs the # BDMs anyway. bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) else: bdms = None compute_utils.notify_usage_exists(self.notifier, context, instance, self.host, current_period=True) self._notify_about_instance_usage(context, instance, 'shelve.start') compute_utils.notify_about_instance_action(context, instance, self.host, action=fields.NotificationAction.SHELVE, phase=fields.NotificationPhase.START, bdms=bdms) def update_task_state(task_state, expected_state=task_states.SHELVING): shelving_state_map = { task_states.IMAGE_PENDING_UPLOAD: task_states.SHELVING_IMAGE_PENDING_UPLOAD, task_states.IMAGE_UPLOADING: task_states.SHELVING_IMAGE_UPLOADING, task_states.SHELVING: task_states.SHELVING} task_state = shelving_state_map[task_state] expected_state = shelving_state_map[expected_state] instance.task_state = task_state instance.save(expected_task_state=expected_state) # Do not attempt a clean shutdown of a paused guest since some # hypervisors will fail the clean shutdown if the guest is not # running. if instance.power_state == power_state.PAUSED: clean_shutdown = False self._power_off_instance(context, instance, clean_shutdown) self.driver.snapshot(context, instance, image_id, update_task_state) instance.system_metadata['shelved_at'] = timeutils.utcnow().isoformat() instance.system_metadata['shelved_image_id'] = image_id instance.system_metadata['shelved_host'] = self.host instance.vm_state = vm_states.SHELVED instance.task_state = None if offload: instance.task_state = task_states.SHELVING_OFFLOADING instance.power_state = self._get_power_state(instance) instance.save(expected_task_state=[ task_states.SHELVING, task_states.SHELVING_IMAGE_UPLOADING]) self._notify_about_instance_usage(context, instance, 'shelve.end') compute_utils.notify_about_instance_action(context, instance, self.host, action=fields.NotificationAction.SHELVE, phase=fields.NotificationPhase.END, bdms=bdms) if offload: self._shelve_offload_instance( context, instance, clean_shutdown=False, bdms=bdms, accel_uuids=accel_uuids) @wrap_exception() @reverts_task_state @wrap_instance_event(prefix='compute') @wrap_instance_fault def shelve_offload_instance(self, context, instance, clean_shutdown, accel_uuids): """Remove a shelved instance from the hypervisor. This frees up those resources for use by other instances, but may lead to slower unshelve times for this instance. This method is used by volume backed instances since restoring them doesn't involve the potentially large download of an image. :param context: request context :param instance: nova.objects.instance.Instance :param clean_shutdown: give the GuestOS a chance to stop :param accel_uuids: the accelerators uuids for the instance """ @utils.synchronized(instance.uuid) def do_shelve_offload_instance(): self._shelve_offload_instance(context, instance, clean_shutdown, accel_uuids=accel_uuids) do_shelve_offload_instance() def _shelve_offload_instance(self, context, instance, clean_shutdown, bdms=None, accel_uuids=None): LOG.info('Shelve offloading', instance=instance) if bdms is None: bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) self._notify_about_instance_usage(context, instance, 'shelve_offload.start') compute_utils.notify_about_instance_action(context, instance, self.host, action=fields.NotificationAction.SHELVE_OFFLOAD, phase=fields.NotificationPhase.START, bdms=bdms) self._power_off_instance(context, instance, clean_shutdown) current_power_state = self._get_power_state(instance) network_info = self.network_api.get_instance_nw_info(context, instance) ports_id = [vif['id'] for vif in network_info] self.network_api.unbind_ports(context, ports_id, detach=False) block_device_info = self._get_instance_block_device_info(context, instance, bdms=bdms) self.driver.destroy(context, instance, network_info, block_device_info) # the instance is going to be removed from the host so we want to # terminate all the connections with the volume server and the host self._terminate_volume_connections(context, instance, bdms) # NOTE(brinzhang): Free up the accelerator resource occupied # in the cyborg service. if accel_uuids: cyclient = cyborg.get_client(context) cyclient.delete_arqs_for_instance(instance.uuid) # Free up the resource allocations in the placement service. # This should happen *before* the vm_state is changed to # SHELVED_OFFLOADED in case client-side code is polling the API to # schedule more instances (or unshelve) once this server is offloaded. self.rt.delete_allocation_for_shelve_offloaded_instance(context, instance) instance.power_state = current_power_state # NOTE(mriedem): The vm_state has to be set before updating the # resource tracker, see vm_states.allow_resource_removal(). The # host/node values cannot be nulled out until after updating the # resource tracker though. instance.vm_state = vm_states.SHELVED_OFFLOADED instance.task_state = None instance.save(expected_task_state=[task_states.SHELVING, task_states.SHELVING_OFFLOADING]) # NOTE(ndipanov): Free resources from the resource tracker self._update_resource_tracker(context, instance) # NOTE(sfinucan): RPC calls should no longer be attempted against this # instance, so ensure any calls result in errors self._nil_out_instance_obj_host_and_node(instance) instance.save(expected_task_state=None) # TODO(melwitt): We should clean up instance console tokens here. The # instance has no host at this point and will need to establish a new # console connection in the future after it is unshelved. self._delete_scheduler_instance_info(context, instance.uuid) self._notify_about_instance_usage(context, instance, 'shelve_offload.end') compute_utils.notify_about_instance_action(context, instance, self.host, action=fields.NotificationAction.SHELVE_OFFLOAD, phase=fields.NotificationPhase.END, bdms=bdms) @wrap_exception() @reverts_task_state @wrap_instance_event(prefix='compute') @wrap_instance_fault def unshelve_instance(self, context, instance, image, filter_properties, node, request_spec, accel_uuids): """Unshelve the instance. :param context: request context :param instance: a nova.objects.instance.Instance object :param image: an image to build from. If None we assume a volume backed instance. :param filter_properties: dict containing limits, retry info etc. :param node: target compute node :param request_spec: the RequestSpec object used to schedule the instance :param accel_uuids: the accelerators uuids for the instance """ if filter_properties is None: filter_properties = {} @utils.synchronized(instance.uuid) def do_unshelve_instance(): self._unshelve_instance( context, instance, image, filter_properties, node, request_spec, accel_uuids) do_unshelve_instance() def _unshelve_instance_key_scrub(self, instance): """Remove data from the instance that may cause side effects.""" cleaned_keys = dict( key_data=instance.key_data, auto_disk_config=instance.auto_disk_config) instance.key_data = None instance.auto_disk_config = False return cleaned_keys def _unshelve_instance_key_restore(self, instance, keys): """Restore previously scrubbed keys before saving the instance.""" instance.update(keys) def _unshelve_instance(self, context, instance, image, filter_properties, node, request_spec, accel_uuids): LOG.info('Unshelving', instance=instance) bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) self._notify_about_instance_usage(context, instance, 'unshelve.start') compute_utils.notify_about_instance_action(context, instance, self.host, action=fields.NotificationAction.UNSHELVE, phase=fields.NotificationPhase.START, bdms=bdms) instance.task_state = task_states.SPAWNING instance.save() block_device_info = self._prep_block_device(context, instance, bdms) scrubbed_keys = self._unshelve_instance_key_scrub(instance) if node is None: node = self._get_nodename(instance) limits = filter_properties.get('limits', {}) allocations = self.reportclient.get_allocations_for_consumer( context, instance.uuid) shelved_image_ref = instance.image_ref if image: instance.image_ref = image['id'] image_meta = objects.ImageMeta.from_dict(image) else: image_meta = objects.ImageMeta.from_dict( utils.get_image_from_system_metadata( instance.system_metadata)) provider_mappings = self._get_request_group_mapping(request_spec) try: if provider_mappings: compute_utils.update_pci_request_with_placement_allocations( context, self.reportclient, instance.pci_requests.requests, provider_mappings, ) accel_info = [] if accel_uuids: try: accel_info = self._get_bound_arq_resources( context, instance, accel_uuids) except (Exception, exception.InstanceEventTimeout) as exc: LOG.exception('Failure getting accelerator requests ' 'with the exception: %s', exc, instance=instance) self._build_resources_cleanup(instance, None) raise with self.rt.instance_claim(context, instance, node, allocations, limits): self.network_api.setup_instance_network_on_host( context, instance, self.host, provider_mappings=provider_mappings) network_info = self.network_api.get_instance_nw_info( context, instance) self.driver.spawn(context, instance, image_meta, injected_files=[], admin_password=None, allocations=allocations, network_info=network_info, block_device_info=block_device_info, accel_info=accel_info) except Exception: with excutils.save_and_reraise_exception(logger=LOG): LOG.exception('Instance failed to spawn', instance=instance) # Set the image_ref back to initial image_ref because instance # object might have been saved with image['id'] # https://bugs.launchpad.net/nova/+bug/1934094 instance.image_ref = shelved_image_ref # Cleanup allocations created by the scheduler on this host # since we failed to spawn the instance. We do this both if # the instance claim failed with ComputeResourcesUnavailable # or if we did claim but the spawn failed, because aborting the # instance claim will not remove the allocations. self.reportclient.delete_allocation_for_instance( context, instance.uuid, force=True) # FIXME: Umm, shouldn't we be rolling back port bindings too? self._terminate_volume_connections(context, instance, bdms) # The reverts_task_state decorator on unshelve_instance will # eventually save these updates. self._nil_out_instance_obj_host_and_node(instance) if image: instance.image_ref = shelved_image_ref self._delete_snapshot_of_shelved_instance(context, instance, image['id']) self._unshelve_instance_key_restore(instance, scrubbed_keys) self._update_instance_after_spawn(instance) # Delete system_metadata for a shelved instance compute_utils.remove_shelved_keys_from_system_metadata(instance) instance.save(expected_task_state=task_states.SPAWNING) self._update_scheduler_instance_info(context, instance) self._notify_about_instance_usage(context, instance, 'unshelve.end') compute_utils.notify_about_instance_action(context, instance, self.host, action=fields.NotificationAction.UNSHELVE, phase=fields.NotificationPhase.END, bdms=bdms) def _inject_network_info(self, instance, network_info): """Inject network info for the given instance.""" LOG.debug('Inject network info', instance=instance) LOG.debug('network_info to inject: |%s|', network_info, instance=instance) self.driver.inject_network_info(instance, network_info) @wrap_instance_fault def inject_network_info(self, context, instance): """Inject network info, but don't return the info.""" network_info = self.network_api.get_instance_nw_info(context, instance) self._inject_network_info(instance, network_info) @messaging.expected_exceptions(NotImplementedError, exception.ConsoleNotAvailable, exception.InstanceNotFound) @wrap_exception() @wrap_instance_fault def get_console_output(self, context, instance, tail_length): """Send the console output for the given instance.""" context = context.elevated() LOG.info("Get console output", instance=instance) output = self.driver.get_console_output(context, instance) if type(output) is str: output = output.encode("latin-1") if tail_length is not None: output = self._tail_log(output, tail_length) return output.decode('ascii', 'replace') def _tail_log(self, log, length): try: length = int(length) except ValueError: length = 0 if length == 0: return b'' else: return b'\n'.join(log.split(b'\n')[-int(length):]) @messaging.expected_exceptions(exception.ConsoleTypeInvalid, exception.InstanceNotReady, exception.InstanceNotFound, exception.ConsoleTypeUnavailable, NotImplementedError) @wrap_exception() @wrap_instance_fault def get_vnc_console(self, context, console_type, instance): """Return connection information for a vnc console.""" context = context.elevated() LOG.debug("Getting vnc console", instance=instance) if not CONF.vnc.enabled: raise exception.ConsoleTypeUnavailable(console_type=console_type) if console_type == 'novnc': # For essex, novncproxy_base_url must include the full path # including the html file (like http://myhost/vnc_auto.html) access_url_base = CONF.vnc.novncproxy_base_url else: raise exception.ConsoleTypeInvalid(console_type=console_type) try: # Retrieve connect info from driver, and then decorate with our # access info token console = self.driver.get_vnc_console(context, instance) console_auth = objects.ConsoleAuthToken( context=context, console_type=console_type, host=console.host, port=console.port, internal_access_path=console.internal_access_path, instance_uuid=instance.uuid, access_url_base=access_url_base, ) console_auth.authorize(CONF.consoleauth.token_ttl) connect_info = console.get_connection_info( console_auth.token, console_auth.access_url) except exception.InstanceNotFound: if instance.vm_state != vm_states.BUILDING: raise raise exception.InstanceNotReady(instance_id=instance.uuid) return connect_info @messaging.expected_exceptions(exception.ConsoleTypeInvalid, exception.InstanceNotReady, exception.InstanceNotFound, exception.ConsoleTypeUnavailable, NotImplementedError) @wrap_exception() @wrap_instance_fault def get_spice_console(self, context, console_type, instance): """Return connection information for a spice console.""" context = context.elevated() LOG.debug("Getting spice console", instance=instance) if not CONF.spice.enabled: raise exception.ConsoleTypeUnavailable(console_type=console_type) if console_type not in ['spice-html5', 'spice-direct']: raise exception.ConsoleTypeInvalid(console_type=console_type) try: # Retrieve connect info from driver, and then decorate with our # access info token console = self.driver.get_spice_console(context, instance) fields = { 'context': context, 'console_type': console_type, 'host': console.host, 'port': console.port, 'tls_port': console.tlsPort, 'instance_uuid': instance.uuid } if console_type == 'spice-html5': fields['internal_access_path'] = console.internal_access_path fields['access_url_base'] = CONF.spice.html5proxy_base_url if console_type == 'spice-direct': fields['internal_access_path'] = None fields['access_url_base'] = \ CONF.spice.spice_direct_proxy_base_url console_auth = objects.ConsoleAuthToken(**fields) console_auth.authorize(CONF.consoleauth.token_ttl) connect_info = console.get_connection_info( console_auth.token, console_auth.access_url) except exception.InstanceNotFound: if instance.vm_state != vm_states.BUILDING: raise raise exception.InstanceNotReady(instance_id=instance.uuid) return connect_info # TODO(gmann): HyperV virt driver has been removed in Nova 29.0.0 # but we need to keep this method to avoid RPC error in case of using # old controller with new compute. This can be removed in RPC API 7.0 @messaging.expected_exceptions(exception.ConsoleTypeInvalid, exception.InstanceNotReady, exception.InstanceNotFound, exception.ConsoleTypeUnavailable, NotImplementedError) @wrap_exception() @wrap_instance_fault def get_rdp_console(self, context, console_type, instance): """Return connection information for a RDP console.""" msg = ("RDP console is applicable for HyperV virt driver only which " "has been removed in Nova 29.0.0") raise NotImplementedError(msg) @messaging.expected_exceptions(exception.ConsoleTypeInvalid, exception.InstanceNotReady, exception.InstanceNotFound, exception.ConsoleTypeUnavailable, NotImplementedError) @wrap_exception() @wrap_instance_fault def get_mks_console(self, context, console_type, instance): """Return connection information for a MKS console.""" context = context.elevated() LOG.debug("Getting MKS console", instance=instance) if not CONF.mks.enabled: raise exception.ConsoleTypeUnavailable(console_type=console_type) if console_type != 'webmks': raise exception.ConsoleTypeInvalid(console_type=console_type) try: # Retrieve connect info from driver, and then decorate with our # access info token console = self.driver.get_mks_console(context, instance) console_auth = objects.ConsoleAuthToken( context=context, console_type=console_type, host=console.host, port=console.port, internal_access_path=console.internal_access_path, instance_uuid=instance.uuid, access_url_base=CONF.mks.mksproxy_base_url, ) console_auth.authorize(CONF.consoleauth.token_ttl) connect_info = console.get_connection_info( console_auth.token, console_auth.access_url) except exception.InstanceNotFound: if instance.vm_state != vm_states.BUILDING: raise raise exception.InstanceNotReady(instance_id=instance.uuid) return connect_info @messaging.expected_exceptions( exception.ConsoleTypeInvalid, exception.InstanceNotReady, exception.InstanceNotFound, exception.ConsoleTypeUnavailable, exception.SocketPortRangeExhaustedException, exception.ImageSerialPortNumberInvalid, exception.ImageSerialPortNumberExceedFlavorValue, NotImplementedError) @wrap_exception() @wrap_instance_fault def get_serial_console(self, context, console_type, instance): """Returns connection information for a serial console.""" LOG.debug("Getting serial console", instance=instance) if not CONF.serial_console.enabled: raise exception.ConsoleTypeUnavailable(console_type=console_type) context = context.elevated() try: # Retrieve connect info from driver, and then decorate with our # access info token console = self.driver.get_serial_console(context, instance) console_auth = objects.ConsoleAuthToken( context=context, console_type=console_type, host=console.host, port=console.port, internal_access_path=console.internal_access_path, instance_uuid=instance.uuid, access_url_base=CONF.serial_console.base_url, ) console_auth.authorize(CONF.consoleauth.token_ttl) connect_info = console.get_connection_info( console_auth.token, console_auth.access_url) except exception.InstanceNotFound: if instance.vm_state != vm_states.BUILDING: raise raise exception.InstanceNotReady(instance_id=instance.uuid) return connect_info @messaging.expected_exceptions(exception.ConsoleTypeInvalid, exception.InstanceNotReady, exception.InstanceNotFound) @wrap_exception() @wrap_instance_fault def validate_console_port(self, ctxt, instance, port, console_type): if console_type in ["spice-html5", "spice-direct"]: console_info = self.driver.get_spice_console(ctxt, instance) elif console_type == "serial": console_info = self.driver.get_serial_console(ctxt, instance) elif console_type == "webmks": console_info = self.driver.get_mks_console(ctxt, instance) else: console_info = self.driver.get_vnc_console(ctxt, instance) # Some drivers may return an int on console_info.port but the port # variable in this method is a string, so cast to be sure we are # comparing the correct types. return str(console_info.port) == port @wrap_exception() @reverts_task_state @wrap_instance_fault def reserve_block_device_name(self, context, instance, device, volume_id, disk_bus, device_type, tag, multiattach): if (tag and not self.driver.capabilities.get('supports_tagged_attach_volume', False)): raise exception.VolumeTaggedAttachNotSupported() if (multiattach and not self.driver.capabilities.get('supports_multiattach', False)): raise exception.MultiattachNotSupportedByVirtDriver( volume_id=volume_id) @utils.synchronized(instance.uuid) def do_reserve(): bdms = ( objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid)) # Now that we have the lock check that we haven't raced another # request and ensure there is no existing attachment if any(b for b in bdms if b.volume_id == volume_id): msg = _("volume %s already attached") % volume_id raise exception.InvalidVolume(reason=msg) # NOTE(ndipanov): We need to explicitly set all the fields on the # object so that obj_load_attr does not fail new_bdm = objects.BlockDeviceMapping( context=context, source_type='volume', destination_type='volume', instance_uuid=instance.uuid, boot_index=None, volume_id=volume_id, device_name=device, guest_format=None, disk_bus=disk_bus, device_type=device_type, tag=tag) new_bdm.device_name = self._get_device_name_for_instance( instance, bdms, new_bdm) # NOTE(vish): create bdm here to avoid race condition new_bdm.create() return new_bdm return do_reserve() @wrap_exception() @wrap_instance_event(prefix='compute') @wrap_instance_fault def attach_volume(self, context, instance, bdm): """Attach a volume to an instance.""" driver_bdm = driver_block_device.convert_volume(bdm) @utils.synchronized(instance.uuid) def do_attach_volume(context, instance, driver_bdm): try: return self._attach_volume(context, instance, driver_bdm) except Exception: with excutils.save_and_reraise_exception(): bdm.destroy() do_attach_volume(context, instance, driver_bdm) def _attach_volume(self, context, instance, bdm): context = context.elevated() LOG.info('Attaching volume %(volume_id)s to %(mountpoint)s', {'volume_id': bdm.volume_id, 'mountpoint': bdm['mount_device']}, instance=instance) compute_utils.notify_about_volume_attach_detach( context, instance, self.host, action=fields.NotificationAction.VOLUME_ATTACH, phase=fields.NotificationPhase.START, volume_id=bdm.volume_id) try: bdm.attach(context, instance, self.volume_api, self.driver, do_driver_attach=True) except Exception as e: with excutils.save_and_reraise_exception(): LOG.exception("Failed to attach %(volume_id)s " "at %(mountpoint)s", {'volume_id': bdm.volume_id, 'mountpoint': bdm['mount_device']}, instance=instance) if bdm['attachment_id']: # Try to delete the attachment to make the volume # available again. Note that DriverVolumeBlockDevice # may have already deleted the attachment so ignore # VolumeAttachmentNotFound. try: self.volume_api.attachment_delete( context, bdm['attachment_id']) except exception.VolumeAttachmentNotFound as exc: LOG.debug('Ignoring VolumeAttachmentNotFound: %s', exc, instance=instance) else: self.volume_api.unreserve_volume(context, bdm.volume_id) compute_utils.notify_about_volume_attach_detach( context, instance, self.host, action=fields.NotificationAction.VOLUME_ATTACH, phase=fields.NotificationPhase.ERROR, exception=e, volume_id=bdm.volume_id) info = {'volume_id': bdm.volume_id} self._notify_about_instance_usage( context, instance, "volume.attach", extra_usage_info=info) compute_utils.notify_about_volume_attach_detach( context, instance, self.host, action=fields.NotificationAction.VOLUME_ATTACH, phase=fields.NotificationPhase.END, volume_id=bdm.volume_id) def _notify_volume_usage_detach(self, context, instance, bdm): if CONF.volume_usage_poll_interval <= 0: return mp = bdm.device_name # Handle bootable volumes which will not contain /dev/ if '/dev/' in mp: mp = mp[5:] try: vol_stats = self.driver.block_stats(instance, mp) if vol_stats is None: return except NotImplementedError: return LOG.debug("Updating volume usage cache with totals", instance=instance) rd_req, rd_bytes, wr_req, wr_bytes, flush_ops = vol_stats vol_usage = objects.VolumeUsage(context) vol_usage.volume_id = bdm.volume_id vol_usage.instance_uuid = instance.uuid vol_usage.project_id = instance.project_id vol_usage.user_id = instance.user_id vol_usage.availability_zone = instance.availability_zone vol_usage.curr_reads = rd_req vol_usage.curr_read_bytes = rd_bytes vol_usage.curr_writes = wr_req vol_usage.curr_write_bytes = wr_bytes vol_usage.save(update_totals=True) self.notifier.info(context, 'volume.usage', vol_usage.to_dict()) compute_utils.notify_about_volume_usage(context, vol_usage, self.host) def _detach_volume(self, context, bdm, instance, destroy_bdm=True, attachment_id=None): """Detach a volume from an instance. :param context: security context :param bdm: nova.objects.BlockDeviceMapping volume bdm to detach :param instance: the Instance object to detach the volume from :param destroy_bdm: if True, the corresponding BDM entry will be marked as deleted. Disabling this is useful for operations like rebuild, when we don't want to destroy BDM :param attachment_id: The volume attachment_id for the given instance and volume. """ volume_id = bdm.volume_id compute_utils.notify_about_volume_attach_detach( context, instance, self.host, action=fields.NotificationAction.VOLUME_DETACH, phase=fields.NotificationPhase.START, volume_id=volume_id) self._notify_volume_usage_detach(context, instance, bdm) LOG.info('Detaching volume %(volume_id)s', {'volume_id': volume_id}, instance=instance) driver_bdm = driver_block_device.convert_volume(bdm) driver_bdm.detach(context, instance, self.volume_api, self.driver, attachment_id=attachment_id, destroy_bdm=destroy_bdm) info = dict(volume_id=volume_id) self._notify_about_instance_usage( context, instance, "volume.detach", extra_usage_info=info) compute_utils.notify_about_volume_attach_detach( context, instance, self.host, action=fields.NotificationAction.VOLUME_DETACH, phase=fields.NotificationPhase.END, volume_id=volume_id) if 'tag' in bdm and bdm.tag: self._delete_disk_metadata(instance, bdm) if destroy_bdm: bdm.destroy() def _delete_disk_metadata(self, instance, bdm): for device in instance.device_metadata.devices: if isinstance(device, objects.DiskMetadata): if 'serial' in device: if device.serial == bdm.volume_id: instance.device_metadata.devices.remove(device) instance.save() break else: # NOTE(artom) We log the entire device object because all # fields are nullable and may not be set LOG.warning('Unable to determine whether to clean up ' 'device metadata for disk %s', device, instance=instance) @wrap_exception() @wrap_instance_event(prefix='compute') @wrap_instance_fault def detach_volume(self, context, volume_id, instance, attachment_id): """Detach a volume from an instance. :param context: security context :param volume_id: the volume id :param instance: the Instance object to detach the volume from :param attachment_id: The volume attachment_id for the given instance and volume. """ @utils.synchronized(instance.uuid) def do_detach_volume(context, volume_id, instance, attachment_id): bdm = objects.BlockDeviceMapping.get_by_volume_and_instance( context, volume_id, instance.uuid) self._detach_volume(context, bdm, instance, attachment_id=attachment_id) do_detach_volume(context, volume_id, instance, attachment_id) def _init_volume_connection(self, context, new_volume, old_volume_id, connector, bdm, new_attachment_id, mountpoint): new_volume_id = new_volume['id'] if new_attachment_id is None: # We're dealing with an old-style attachment so initialize the # connection so we can get the connection_info. new_cinfo = self.volume_api.initialize_connection(context, new_volume_id, connector) else: # Check for multiattach on the new volume and if True, check to # see if the virt driver supports multiattach. # TODO(mriedem): This is copied from DriverVolumeBlockDevice # and should be consolidated into some common code at some point. vol_multiattach = new_volume.get('multiattach', False) virt_multiattach = self.driver.capabilities.get( 'supports_multiattach', False) if vol_multiattach and not virt_multiattach: raise exception.MultiattachNotSupportedByVirtDriver( volume_id=new_volume_id) # This is a new style attachment and the API created the new # volume attachment and passed the id to the compute over RPC. # At this point we need to update the new volume attachment with # the host connector, which will give us back the new attachment # connection_info. new_cinfo = self.volume_api.attachment_update( context, new_attachment_id, connector, mountpoint)['connection_info'] if vol_multiattach: # This will be used by the volume driver to determine the # proper disk configuration. new_cinfo['multiattach'] = True old_cinfo = jsonutils.loads(bdm['connection_info']) if old_cinfo and 'serial' not in old_cinfo: old_cinfo['serial'] = old_volume_id # NOTE(lyarwood): serial is not always present in the returned # connection_info so set it if it is missing as we do in # DriverVolumeBlockDevice.attach(). if 'serial' not in new_cinfo: new_cinfo['serial'] = new_volume_id return (old_cinfo, new_cinfo) def _swap_volume(self, context, instance, bdm, connector, old_volume_id, new_volume, resize_to, new_attachment_id, is_cinder_migration): new_volume_id = new_volume['id'] mountpoint = bdm['device_name'] failed = False new_cinfo = None try: old_cinfo, new_cinfo = self._init_volume_connection( context, new_volume, old_volume_id, connector, bdm, new_attachment_id, mountpoint) # NOTE(lyarwood): The Libvirt driver, the only virt driver # currently implementing swap_volume, will modify the contents of # new_cinfo when connect_volume is called. This is then saved to # the BDM in swap_volume for future use outside of this flow. msg = ("swap_volume: Calling driver volume swap with " "connection infos: new: %(new_cinfo)s; " "old: %(old_cinfo)s" % {'new_cinfo': new_cinfo, 'old_cinfo': old_cinfo}) # Both new and old info might contain password LOG.debug(strutils.mask_password(msg), instance=instance) self.driver.swap_volume(context, old_cinfo, new_cinfo, instance, mountpoint, resize_to) if new_attachment_id: self.volume_api.attachment_complete(context, new_attachment_id) msg = ("swap_volume: Driver volume swap returned, new " "connection_info is now : %(new_cinfo)s" % {'new_cinfo': new_cinfo}) LOG.debug(strutils.mask_password(msg)) except Exception as ex: failed = True with excutils.save_and_reraise_exception(): compute_utils.notify_about_volume_swap( context, instance, self.host, fields.NotificationPhase.ERROR, old_volume_id, new_volume_id, ex) if new_cinfo: msg = ("Failed to swap volume %(old_volume_id)s " "for %(new_volume_id)s") LOG.exception(msg, {'old_volume_id': old_volume_id, 'new_volume_id': new_volume_id}, instance=instance) else: msg = ("Failed to connect to volume %(volume_id)s " "with volume at %(mountpoint)s") LOG.exception(msg, {'volume_id': new_volume_id, 'mountpoint': bdm['device_name']}, instance=instance) # The API marked the volume as 'detaching' for the old volume # so we need to roll that back so the volume goes back to # 'in-use' state. self.volume_api.roll_detaching(context, old_volume_id) if new_attachment_id is None: # The API reserved the new volume so it would be in # 'attaching' status, so we need to unreserve it so it # goes back to 'available' status. self.volume_api.unreserve_volume(context, new_volume_id) else: # This is a new style attachment for the new volume, which # was created in the API. We just need to delete it here # to put the new volume back into 'available' status. self.volume_api.attachment_delete( context, new_attachment_id) finally: # TODO(mriedem): This finally block is terribly confusing and is # trying to do too much. We should consider removing the finally # block and move whatever needs to happen on success and failure # into the blocks above for clarity, even if it means a bit of # redundant code. conn_volume = new_volume_id if failed else old_volume_id if new_cinfo: LOG.debug("swap_volume: removing Cinder connection " "for volume %(volume)s", {'volume': conn_volume}, instance=instance) if bdm.attachment_id is None: # This is the pre-3.44 flow for new-style volume # attachments so just terminate the connection. self.volume_api.terminate_connection(context, conn_volume, connector) else: # This is a new style volume attachment. If we failed, then # the new attachment was already deleted above in the # exception block and we have nothing more to do here. If # swap_volume was successful in the driver, then we need to # "detach" the original attachment by deleting it. if not failed: self.volume_api.attachment_delete( context, bdm.attachment_id) # Need to make some decisions based on whether this was # a Cinder initiated migration or not. The callback to # migration completion isn't needed in the case of a # nova initiated simple swap of two volume # "volume-update" call so skip that. The new attachment # scenarios will give us a new attachment record and # that's what we want. if bdm.attachment_id and not is_cinder_migration: # we don't callback to cinder comp_ret = {'save_volume_id': new_volume_id} else: # NOTE(lyarwood): The following call to # os-migrate-volume-completion returns a dict containing # save_volume_id, this volume id has two possible values : # 1. old_volume_id if we are migrating (retyping) volumes # 2. new_volume_id if we are swapping between two existing # volumes # This volume id is later used to update the volume_id and # connection_info['serial'] of the BDM. comp_ret = self.volume_api.migrate_volume_completion( context, old_volume_id, new_volume_id, error=failed) LOG.debug("swap_volume: Cinder migrate_volume_completion " "returned: %(comp_ret)s", {'comp_ret': comp_ret}, instance=instance) return (comp_ret, new_cinfo) @wrap_exception() @wrap_instance_event(prefix='compute') @wrap_instance_fault def swap_volume(self, context, old_volume_id, new_volume_id, instance, new_attachment_id): """Replace the old volume with the new volume within the active server :param context: User request context :param old_volume_id: Original volume id :param new_volume_id: New volume id being swapped to :param instance: Instance with original_volume_id attached :param new_attachment_id: ID of the new attachment for new_volume_id """ @utils.synchronized(instance.uuid) def _do_locked_swap_volume(context, old_volume_id, new_volume_id, instance, new_attachment_id): self._do_swap_volume(context, old_volume_id, new_volume_id, instance, new_attachment_id) _do_locked_swap_volume(context, old_volume_id, new_volume_id, instance, new_attachment_id) def _do_swap_volume(self, context, old_volume_id, new_volume_id, instance, new_attachment_id): """Replace the old volume with the new volume within the active server :param context: User request context :param old_volume_id: Original volume id :param new_volume_id: New volume id being swapped to :param instance: Instance with original_volume_id attached :param new_attachment_id: ID of the new attachment for new_volume_id """ context = context.elevated() compute_utils.notify_about_volume_swap( context, instance, self.host, fields.NotificationPhase.START, old_volume_id, new_volume_id) bdm = objects.BlockDeviceMapping.get_by_volume_and_instance( context, old_volume_id, instance.uuid) connector = self.driver.get_volume_connector(instance) resize_to = 0 old_volume = self.volume_api.get(context, old_volume_id) # Yes this is a tightly-coupled state check of what's going on inside # cinder, but we need this while we still support old (v1/v2) and # new style attachments (v3.44). Once we drop support for old style # attachments we could think about cleaning up the cinder-initiated # swap volume API flows. is_cinder_migration = False if 'migration_status' in old_volume: is_cinder_migration = old_volume['migration_status'] == 'migrating' old_vol_size = old_volume['size'] new_volume = self.volume_api.get(context, new_volume_id) new_vol_size = new_volume['size'] if new_vol_size > old_vol_size: resize_to = new_vol_size LOG.info('Swapping volume %(old_volume)s for %(new_volume)s', {'old_volume': old_volume_id, 'new_volume': new_volume_id}, instance=instance) comp_ret, new_cinfo = self._swap_volume(context, instance, bdm, connector, old_volume_id, new_volume, resize_to, new_attachment_id, is_cinder_migration) # NOTE(lyarwood): Update the BDM with the modified new_cinfo and # correct volume_id returned by Cinder. save_volume_id = comp_ret['save_volume_id'] # NOTE(lyarwood): Overwrite the possibly stale serial and volume_id in # the connection_info with the volume_id returned from Cinder. This # could be the case during a volume migration where the new_cinfo here # refers to the temporary volume *before* Cinder renames it to the # original volume UUID at the end of the migration. new_cinfo['serial'] = save_volume_id new_cinfo['volume_id'] = save_volume_id if 'data' in new_cinfo: new_cinfo['data']['volume_id'] = save_volume_id values = { 'connection_info': jsonutils.dumps(new_cinfo), 'source_type': 'volume', 'destination_type': 'volume', 'snapshot_id': None, 'volume_id': save_volume_id, 'no_device': None} if resize_to: values['volume_size'] = resize_to if new_attachment_id is not None: # This was a volume swap for a new-style attachment so we # need to update the BDM attachment_id for the new attachment. values['attachment_id'] = new_attachment_id LOG.debug("swap_volume: Updating volume %(volume_id)s BDM record with " "%(updates)s", {'volume_id': bdm.volume_id, 'updates': values}, instance=instance) bdm.update(values) bdm.save() compute_utils.notify_about_volume_swap( context, instance, self.host, fields.NotificationPhase.END, old_volume_id, new_volume_id) @wrap_exception() def remove_volume_connection( self, context, volume_id, instance, delete_attachment=False): """Remove the volume connection on this host Detach the volume from this instance on this host, and if this is the cinder v2 flow, call cinder to terminate the connection. """ try: # NOTE(mriedem): If the BDM was just passed directly we would not # need to do this DB query, but this is an RPC interface so # changing that requires some care. bdm = objects.BlockDeviceMapping.get_by_volume_and_instance( context, volume_id, instance.uuid) # NOTE(mriedem): Normally we would pass delete_attachment=True to # _remove_volume_connection to delete a v3 style volume attachment, # but this method is RPC called from _rollback_live_migration which # already deletes the attachment, so because of that tight coupling # we cannot simply delete a v3 style attachment here without # needing to do some behavior modification of that # _rollback_live_migration flow which gets messy. self._remove_volume_connection( context, bdm, instance, delete_attachment) except exception.NotFound: pass def _remove_volume_connection(self, context, bdm, instance, delete_attachment=False): """Remove the volume connection on this host Detach the volume from this instance on this host. :param context: nova auth request context :param bdm: BlockDeviceMapping object for a volume attached to the instance :param instance: Instance object with a volume attached represented by ``bdm`` :param delete_attachment: If ``bdm.attachment_id`` is not None the attachment was made as a cinder v3 style attachment and if True, then deletes the volume attachment, otherwise just terminates the connection for a cinder legacy style connection. """ driver_bdm = driver_block_device.convert_volume(bdm) driver_bdm.driver_detach(context, instance, self.volume_api, self.driver) if bdm.attachment_id is None: # cinder v2 api flow connector = self.driver.get_volume_connector(instance) self.volume_api.terminate_connection(context, bdm.volume_id, connector) elif delete_attachment: # cinder v3 api flow self.volume_api.attachment_delete(context, bdm.attachment_id) def _deallocate_port_resource_for_instance( self, context: nova.context.RequestContext, instance: 'objects.Instance', port_id: str, port_allocation: ty.Dict[str, ty.Dict[str, ty.Dict[str, int]]], ) -> None: if not port_allocation: return try: client = self.reportclient client.remove_resources_from_instance_allocation( context, instance.uuid, port_allocation) except Exception as ex: # We always raise here as it is not a race condition where # somebody has already deleted the port we want to cleanup. # Here we see that the port exists, the allocation exists, # but we cannot clean it up so we will actually leak # allocations. with excutils.save_and_reraise_exception(): LOG.warning( 'Failed to remove resource allocation of port %(port_id)s ' 'for instance. Error: %(error)s', {'port_id': port_id, 'error': ex}, instance=instance) def _deallocate_port_for_instance( self, context, instance, port_id, raise_on_failure=False, pci_device=None): try: result = self.network_api.deallocate_port_for_instance( context, instance, port_id) __, port_allocation = result except Exception as ex: with excutils.save_and_reraise_exception( reraise=raise_on_failure): LOG.warning('Failed to deallocate port %(port_id)s ' 'for instance. Error: %(error)s', {'port_id': port_id, 'error': ex}, instance=instance) else: if pci_device: self.rt.unclaim_pci_devices(context, pci_device, instance) instance.remove_pci_device_and_request(pci_device) # Deallocate the resources in placement that were used by the # detached port. self._deallocate_port_resource_for_instance( context, instance, port_id, port_allocation) def _claim_pci_device_for_interface_attach( self, context: nova.context.RequestContext, instance: 'objects.Instance', pci_reqs: 'objects.InstancePCIRequests', ) -> ty.Optional['objects.PciDevice']: """Claim PCI devices if there are PCI requests :param context: nova.context.RequestContext :param instance: the objects.Instance to where the interface is being attached :param pci_reqs: A InstancePCIRequests object describing the needed PCI devices :raises InterfaceAttachPciClaimFailed: if the PCI device claim fails :returns: An objects.PciDevice describing the claimed PCI device for the interface or None if no device is requested """ if not pci_reqs.requests: return None try: devices = self.rt.claim_pci_devices( context, pci_reqs, instance.numa_topology) except exception.PciDeviceRequestFailed: LOG.info('Failed to claim PCI devices during interface attach ' 'for PCI request %s', pci_reqs, instance=instance) raise exception.InterfaceAttachPciClaimFailed( instance_uuid=instance.uuid) # NOTE(gibi): We assume that maximum one PCI devices is attached per # interface attach request. device = devices[0] instance.pci_devices.objects.append(device) return device def _allocate_port_resource_for_instance( self, context: nova.context.RequestContext, instance: 'objects.Instance', pci_reqs: 'objects.InstancePCIRequests', request_groups: ty.List['objects.RequestGroup'], request_level_params: 'objects.RequestLevelParams', ) -> ty.Tuple[ty.Optional[ty.Dict[str, ty.List[str]]], ty.Optional[ty.Dict[str, ty.Dict[str, ty.Dict[str, int]]]]]: """Allocate resources for the request in placement :param context: nova.context.RequestContext :param instance: the objects.Instance to where the interface is being attached :param pci_reqs: A list of InstancePCIRequest objects describing the needed PCI devices :param request_groups: A list of RequestGroup objects describing the resources the port requests from placement :param request_level_params: A RequestLevelParams object describing the non group specific request of the port. :raises InterfaceAttachResourceAllocationFailed: if we failed to allocate resource in placement for the request :returns: A tuple of provider mappings and allocated resources or (None, None) if no resource allocation was needed for the request """ if not request_groups: return None, None # restrict the resource request to the current compute node. The # compute node uuid is the uuid of the root provider of the node in # placement compute_node_uuid = objects.ComputeNode.get_by_nodename( context, instance.node).uuid # we can have multiple request groups, it would be enough to restrict # only one of them to the compute tree but for symmetry we restrict # all of them for request_group in request_groups: request_group.in_tree = compute_node_uuid # NOTE(gibi): group policy is mandatory in a resource request if there # are multiple groups. The policy can only come from the flavor today # and a new flavor is not provided with an interface attach request and # the instance's current flavor might not have a policy. Still we are # attaching a single port where currently the two possible groups # (one for bandwidth and one for packet rate) will always be allocated # from different providers. So both possible policies (none, isolated) # are always fulfilled for this single port. We still has to specify # one so we specify the least restrictive now. rr = scheduler_utils.ResourceRequest.from_request_groups( request_groups, request_level_params, group_policy='none') res = self.reportclient.get_allocation_candidates(context, rr) alloc_reqs, provider_sums, version = res if not alloc_reqs: # no allocation candidates available, we run out of free resources raise exception.InterfaceAttachResourceAllocationFailed( instance_uuid=instance.uuid) # select one of the candidates and update the instance # allocation # TODO(gibi): We could loop over all possible candidates # if the first one selected here does not work due to race or due # to not having free PCI devices. However the latter is only # detected later in the interface attach code path. alloc_req = alloc_reqs[0] resources = alloc_req['allocations'] provider_mappings = alloc_req['mappings'] try: self.reportclient.add_resources_to_instance_allocation( context, instance.uuid, resources) except exception.AllocationUpdateFailed as e: # We lost a race. We could retry another candidate raise exception.InterfaceAttachResourceAllocationFailed( instance_uuid=instance.uuid) from e except ( exception.ConsumerAllocationRetrievalFailed, keystone_exception.ClientException, ) as e: # These are non-recoverable errors so we should not retry raise exception.InterfaceAttachResourceAllocationFailed( instance_uuid=instance.uuid) from e try: compute_utils.update_pci_request_with_placement_allocations( context, self.reportclient, pci_reqs.requests, provider_mappings, ) except ( exception.AmbiguousResourceProviderForPCIRequest, exception.UnexpectedResourceProviderNameForPCIRequest ): # These are programming errors. So we clean up an re-raise to let # the request fail with excutils.save_and_reraise_exception(): self.reportclient.remove_resources_from_instance_allocation( context, instance.uuid, resources) return provider_mappings, resources # TODO(mriedem): There are likely race failures which can result in # NotFound and QuotaError exceptions getting traced as well. @messaging.expected_exceptions( # Do not log a traceback for user errors. We use Invalid generically # since this method can raise lots of different exceptions: # AttachInterfaceNotSupported # NetworkInterfaceTaggedAttachNotSupported # NetworkAmbiguous # PortNotUsable # PortInUse # PortNotUsableDNS # AttachSRIOVPortNotSupported # NetworksWithQoSPolicyNotSupported # InterfaceAttachResourceAllocationFailed exception.Invalid) @wrap_exception() @wrap_instance_event(prefix='compute') @wrap_instance_fault def attach_interface(self, context, instance, network_id, port_id, requested_ip, tag): """Use hotplug to add an network adapter to an instance.""" lockname = 'interface-%s-%s' % (instance.uuid, port_id) @utils.synchronized(lockname) def do_attach_interface(context, instance, network_id, port_id, requested_ip, tag): return self._attach_interface(context, instance, network_id, port_id, requested_ip, tag) return do_attach_interface(context, instance, network_id, port_id, requested_ip, tag) def _attach_interface(self, context, instance, network_id, port_id, requested_ip, tag): if not self.driver.capabilities.get('supports_attach_interface', False): raise exception.AttachInterfaceNotSupported( instance_uuid=instance.uuid) if (tag and not self.driver.capabilities.get('supports_tagged_attach_interface', False)): raise exception.NetworkInterfaceTaggedAttachNotSupported() compute_utils.notify_about_instance_action( context, instance, self.host, action=fields.NotificationAction.INTERFACE_ATTACH, phase=fields.NotificationPhase.START) bind_host_id = self.driver.network_binding_host_id(context, instance) requested_networks = objects.NetworkRequestList( objects=[ objects.NetworkRequest( network_id=network_id, port_id=port_id, address=requested_ip, tag=tag, ) ] ) if len(requested_networks) != 1: LOG.warning( "Interface attach only supports one interface per attach " "request", instance=instance) raise exception.InterfaceAttachFailed(instance_uuid=instance.uuid) pci_numa_affinity_policy = hardware.get_pci_numa_policy_constraint( instance.flavor, instance.image_meta) pci_reqs = objects.InstancePCIRequests( requests=[], instance_uuid=instance.uuid) _, request_groups, req_lvl_params = ( self.network_api.create_resource_requests( context, requested_networks, pci_reqs, affinity_policy=pci_numa_affinity_policy ) ) result = self._allocate_port_resource_for_instance( context, instance, pci_reqs, request_groups, req_lvl_params) provider_mappings, resources = result try: pci_device = self._claim_pci_device_for_interface_attach( context, instance, pci_reqs) except exception.InterfaceAttachPciClaimFailed: with excutils.save_and_reraise_exception(): if resources: # TODO(gibi): Instead of giving up we could try another # allocation candidate from _allocate_resources() if any self._deallocate_port_resource_for_instance( context, instance, port_id, resources) instance.pci_requests.requests.extend(pci_reqs.requests) network_info = self.network_api.allocate_for_instance( context, instance, requested_networks, bind_host_id=bind_host_id, resource_provider_mapping=provider_mappings, ) if len(network_info) != 1: LOG.error('allocate_for_instance returned %(ports)s ' 'ports', {'ports': len(network_info)}) # TODO(elod.illes): an instance.interface_attach.error notification # should be sent here raise exception.InterfaceAttachFailed( instance_uuid=instance.uuid) image_meta = objects.ImageMeta.from_instance(instance) try: self.driver.attach_interface(context, instance, image_meta, network_info[0]) except exception.NovaException as ex: port_id = network_info[0].get('id') LOG.warning("attach interface failed , try to deallocate " "port %(port_id)s, reason: %(msg)s", {'port_id': port_id, 'msg': ex}, instance=instance) self._deallocate_port_for_instance( context, instance, port_id, pci_device=pci_device) compute_utils.notify_about_instance_action( context, instance, self.host, action=fields.NotificationAction.INTERFACE_ATTACH, phase=fields.NotificationPhase.ERROR, exception=ex) raise exception.InterfaceAttachFailed( instance_uuid=instance.uuid) if pci_device: # NOTE(gibi): The _claim_pci_device_for_interface_attach() call # found a pci device but it only marked the device as claimed. The # periodic update_available_resource would move the device to # allocated state. But as driver.attach_interface() has been # succeeded we now know that the interface is also allocated # (used by) to the instance. So make sure the pci tracker also # tracks this device as allocated. This way we can avoid a possible # race condition when a detach arrives for a device that is only # in claimed state. self.rt.allocate_pci_devices_for_instance(context, instance) instance.save() compute_utils.notify_about_instance_action( context, instance, self.host, action=fields.NotificationAction.INTERFACE_ATTACH, phase=fields.NotificationPhase.END) return network_info[0] @wrap_exception() @wrap_instance_event(prefix='compute') @wrap_instance_fault def detach_interface(self, context, instance, port_id): """Detach a network adapter from an instance.""" lockname = 'interface-%s-%s' % (instance.uuid, port_id) @utils.synchronized(lockname) def do_detach_interface(context, instance, port_id): self._detach_interface(context, instance, port_id) do_detach_interface(context, instance, port_id) def _detach_interface(self, context, instance, port_id): # NOTE(aarents): we need to refresh info cache from DB here, # as previous detach/attach lock holder just updated it. compute_utils.refresh_info_cache_for_instance(context, instance) network_info = instance.info_cache.network_info condemned = None for vif in network_info: if vif['id'] == port_id: condemned = vif break if condemned is None: raise exception.PortNotFound(_("Port %s is not " "attached") % port_id) pci_req = pci_req_module.get_instance_pci_request_from_vif( context, instance, condemned) pci_device = None if pci_req: pci_devices = [pci_device for pci_device in instance.pci_devices.objects if pci_device.request_id == pci_req.request_id] if not pci_devices: LOG.warning( "Detach interface failed, port_id=%(port_id)s, " "reason: PCI device not found for PCI request %(pci_req)s", {'port_id': port_id, 'pci_req': pci_req}) raise exception.InterfaceDetachFailed( instance_uuid=instance.uuid) pci_device = pci_devices[0] compute_utils.notify_about_instance_action( context, instance, self.host, action=fields.NotificationAction.INTERFACE_DETACH, phase=fields.NotificationPhase.START) try: self.driver.detach_interface(context, instance, condemned) except exception.NovaException as ex: # If the instance was deleted before the interface was detached, # just log it at debug. log_level = (logging.DEBUG if isinstance(ex, exception.InstanceNotFound) else logging.WARNING) LOG.log(log_level, "Detach interface failed, port_id=%(port_id)s, reason: " "%(msg)s", {'port_id': port_id, 'msg': ex}, instance=instance) raise exception.InterfaceDetachFailed(instance_uuid=instance.uuid) else: self._deallocate_port_for_instance( context, instance, port_id, raise_on_failure=True, pci_device=pci_device) instance.save() compute_utils.notify_about_instance_action( context, instance, self.host, action=fields.NotificationAction.INTERFACE_DETACH, phase=fields.NotificationPhase.END) def _get_compute_info(self, context, host): return objects.ComputeNode.get_first_node_by_host_for_old_compat( context, host) @wrap_exception() def check_instance_shared_storage(self, ctxt, data): """Check if the instance files are shared :param ctxt: security context :param data: result of driver.check_instance_shared_storage_local Returns True if instance disks located on shared storage and False otherwise. """ return self.driver.check_instance_shared_storage_remote(ctxt, data) def _dest_can_numa_live_migrate(self, dest_check_data, migration): # TODO(artom) If we have a libvirt driver we expect it to set # dst_supports_numa_live_migration, but we have to remove it if we # did not get a migration from the conductor, indicating that it # cannot send RPC 5.3. This check can be removed in RPC 6.0. if ('dst_supports_numa_live_migration' in dest_check_data and dest_check_data.dst_supports_numa_live_migration and not migration): delattr(dest_check_data, 'dst_supports_numa_live_migration') return dest_check_data @wrap_exception() @wrap_instance_event(prefix='compute') @wrap_instance_fault def check_can_live_migrate_destination(self, ctxt, instance, block_migration, disk_over_commit, migration, limits): """Check if it is possible to execute live migration. This runs checks on the destination host, and then calls back to the source host to check the results. :param context: security context :param instance: dict of instance data :param block_migration: if true, prepare for block migration if None, calculate it in driver :param disk_over_commit: if true, allow disk over commit if None, ignore disk usage checking :param migration: objects.Migration object for this live migration. :param limits: objects.SchedulerLimits object for this live migration. :returns: a LiveMigrateData object (hypervisor-dependent) """ # Error out if this host cannot accept the new instance due # to anti-affinity. This check at this moment is not very accurate, as # multiple requests may be happening concurrently and miss the lock, # but when it works it provides a better user experience by failing # earlier. Also, it should be safe to explode here, error becomes # NoValidHost and instance status remains ACTIVE. try: self._validate_instance_group_policy(ctxt, instance) except exception.RescheduledException as e: msg = ("Failed to validate instance group policy " "due to: {}".format(e)) raise exception.MigrationPreCheckError(reason=msg) src_compute_info = obj_base.obj_to_primitive( self._get_compute_info(ctxt, instance.host)) dst_compute_info = obj_base.obj_to_primitive( self._get_compute_info(ctxt, self.host)) dest_check_data = self.driver.check_can_live_migrate_destination(ctxt, instance, src_compute_info, dst_compute_info, block_migration, disk_over_commit) dest_check_data = self._dest_can_numa_live_migrate(dest_check_data, migration) LOG.debug('destination check data is %s', dest_check_data) try: allocs = self.reportclient.get_allocations_for_consumer( ctxt, instance.uuid) migrate_data = self.compute_rpcapi.check_can_live_migrate_source( ctxt, instance, dest_check_data) if ('src_supports_numa_live_migration' in migrate_data and migrate_data.src_supports_numa_live_migration): migrate_data = self._live_migration_claim( ctxt, instance, migrate_data, migration, limits, allocs) elif 'dst_supports_numa_live_migration' in dest_check_data: LOG.info('Destination was ready for NUMA live migration, ' 'but source is either too old, or is set to an ' 'older upgrade level.', instance=instance) # At this point, we know that this compute node (destination) # potentially has enough live-migratable PCI devices, based on the # fact that the scheduler selected this host as the destination. # The claim code below is the decisive step to move from a # potentially correct to a known to be correct destination. flavored_pci_reqs = [ pci_req for pci_req in instance.pci_requests.requests if pci_req.source == objects.InstancePCIRequest.FLAVOR_ALIAS ] migrate_data.pci_dev_map_src_dst = self._flavor_based_pci_claim( ctxt, instance, flavored_pci_reqs ) if self.network_api.has_port_binding_extension(ctxt): # Create migrate_data vifs if not provided by driver. if 'vifs' not in migrate_data: migrate_data.vifs = ( migrate_data_obj. VIFMigrateData.create_skeleton_migrate_vifs( instance.get_network_info())) # Claim PCI devices for VIFs on destination (if needed) port_id_to_pci = self._claim_pci_for_instance_vifs( ctxt, instance) # Update migrate VIFs with the newly claimed PCI devices self._update_migrate_vifs_profile_with_pci( migrate_data.vifs, port_id_to_pci) # This should always be the last check as we persist some internal # dictionary value in the libvirt driver. migrate_data = self.driver.check_source_migrate_data_at_dest( ctxt, instance, migrate_data, migration, limits, allocs) finally: self.driver.cleanup_live_migration_destination_check(ctxt, dest_check_data) return migrate_data def _flavor_based_pci_claim(self, ctxt, instance, pci_requests): if not pci_requests: # Return an empty dict as migrate_data.pci_dev_map_src_dst # cannot be None return {} # Create an InstancePCIRequests and claim against PCI resource # tracker for out dest instance pci_requests = objects.InstancePCIRequests( requests=pci_requests, instance_uuid=instance.uuid) src_devs = objects.PciDeviceList.get_by_instance_uuid( ctxt, instance.uuid ) claimed_dst_devs = self._claim_from_pci_reqs( ctxt, instance, pci_requests ) pci_dev_map_src_dst = {} for req_id in ( pci_req.request_id for pci_req in pci_requests.requests ): req_src_addr = [ dev.address for dev in src_devs if dev.request_id == req_id ] req_claimed_dst__addr = [ dev.address for dev in claimed_dst_devs if dev.request_id == req_id ] # This still depends on ordering, but only within the scope # of the same InstancePCIRequest. The only case where multiple # devices per compute node are allocated for a single request # is when req.count > 1. In that case, the allocated devices are # interchangeable since they match the same specification from # the same InstancePCIRequest. # # Therefore, this ordering dependency is acceptable. pci_dev_map_src_dst.update(dict( zip(req_src_addr, req_claimed_dst__addr) )) return pci_dev_map_src_dst def _claim_from_pci_reqs(self, ctxt, instance, pci_requests): # if we are called during the live migration with NUMA topology # support the PCI claim needs to consider the destination NUMA # topology that is then stored in the migration_context dest_topo = None if instance.migration_context: dest_topo = instance.migration_context.new_numa_topology claimed_pci_devices_objs = self.rt.claim_pci_devices( ctxt, pci_requests, dest_topo) for pci_dev in claimed_pci_devices_objs: LOG.debug("PCI device: %s Claimed on destination node", pci_dev.address) return claimed_pci_devices_objs def _live_migration_claim(self, ctxt, instance, migrate_data, migration, limits, allocs): """Runs on the destination and does a resources claim, if necessary. Currently, only NUMA live migrations require it. :param ctxt: Request context :param instance: The Instance being live migrated :param migrate_data: The MigrateData object for this live migration :param migration: The Migration object for this live migration :param limits: The SchedulerLimits object for this live migration :returns: migrate_data with dst_numa_info set if necessary """ try: # NOTE(artom) We might have gotten here from _find_destination() in # the conductor live migrate task. At that point, # migration.dest_node is not set yet (nor should it be, we're still # looking for a destination, after all). Therefore, we cannot use # migration.dest_node here and must use self._get_nodename(). claim = self.rt.live_migration_claim( ctxt, instance, self._get_nodename(instance), migration, limits, allocs) LOG.debug('Created live migration claim.', instance=instance) except exception.ComputeResourcesUnavailable as e: raise exception.MigrationPreCheckError( reason=e.format_message()) return self.driver.post_claim_migrate_data(ctxt, instance, migrate_data, claim) def _source_can_numa_live_migrate(self, ctxt, dest_check_data, source_check_data): # TODO(artom) Our virt driver may have told us that it supports NUMA # live migration. However, the following other conditions must be met # for a NUMA live migration to happen: # 1. We got a True dst_supports_numa_live_migration in # dest_check_data, indicating that the dest virt driver supports # NUMA live migration and that the conductor can send RPC 5.3 and # that the destination compute manager can receive it. # 2. Ourselves, the source, can send RPC 5.3. There's no # sentinel/parameter for this, so we just ask our rpcapi directly. # If any of these are not met, we need to remove the # src_supports_numa_live_migration flag from source_check_data to avoid # incorrectly initiating a NUMA live migration. # All of this can be removed in RPC 6.0/objects 2.0. can_numa_live_migrate = ( 'dst_supports_numa_live_migration' in dest_check_data and dest_check_data.dst_supports_numa_live_migration and self.compute_rpcapi.supports_numa_live_migration(ctxt)) if ('src_supports_numa_live_migration' in source_check_data and source_check_data.src_supports_numa_live_migration and not can_numa_live_migrate): delattr(source_check_data, 'src_supports_numa_live_migration') return source_check_data @wrap_exception() @wrap_instance_event(prefix='compute') @wrap_instance_fault def check_can_live_migrate_source(self, ctxt, instance, dest_check_data): """Check if it is possible to execute live migration. This checks if the live migration can succeed, based on the results from check_can_live_migrate_destination. :param ctxt: security context :param instance: dict of instance data :param dest_check_data: result of check_can_live_migrate_destination :returns: a LiveMigrateData object """ bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( ctxt, instance.uuid) is_volume_backed = compute_utils.is_volume_backed_instance( ctxt, instance, bdms) dest_check_data.is_volume_backed = is_volume_backed block_device_info = self._get_instance_block_device_info( ctxt, instance, refresh_conn_info=False, bdms=bdms) result = self.driver.check_can_live_migrate_source(ctxt, instance, dest_check_data, block_device_info) result = self._source_can_numa_live_migrate(ctxt, dest_check_data, result) LOG.debug('source check data is %s', result) return result @wrap_exception() @wrap_instance_event(prefix='compute') @wrap_instance_fault def pre_live_migration(self, context, instance, disk, migrate_data): """Preparations for live migration at dest host. :param context: security context :param instance: dict of instance data :param disk: disk info of instance :param migrate_data: A dict or LiveMigrateData object holding data required for live migration without shared storage. :returns: migrate_data containing additional migration info """ LOG.debug('pre_live_migration data is %s', migrate_data) # Error out if this host cannot accept the new instance due # to anti-affinity. At this point the migration is already in-progress, # so this is the definitive moment to abort due to the policy # violation. Also, it should be safe to explode here. The instance # status remains ACTIVE, migration status failed. self._validate_instance_group_policy(context, instance) migrate_data.old_vol_attachment_ids = {} bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) network_info = self.network_api.get_instance_nw_info(context, instance) self._notify_about_instance_usage( context, instance, "live_migration.pre.start", network_info=network_info) compute_utils.notify_about_instance_action( context, instance, self.host, action=fields.NotificationAction.LIVE_MIGRATION_PRE, phase=fields.NotificationPhase.START, bdms=bdms) connector = None try: for bdm in bdms: if bdm.is_volume and bdm.attachment_id is not None: # This bdm uses the new cinder v3.44 API. # We will create a new attachment for this # volume on this migration destination host. The old # attachment will be deleted on the source host # when the migration succeeds. The old attachment_id # is stored in dict with the key being the bdm.volume_id # so it can be restored on rollback. # # Also note that attachment_update is not needed as we # are providing the connector in the create call. if connector is None: connector = self.driver.get_volume_connector(instance) attach_ref = self.volume_api.attachment_create( context, bdm.volume_id, bdm.instance_uuid, connector=connector, mountpoint=bdm.device_name) # save current attachment so we can detach it on success, # or restore it on a rollback. # NOTE(mdbooth): This data is no longer used by the source # host since change Ibe9215c0. We can't remove it until we # are sure the source host has been upgraded. migrate_data.old_vol_attachment_ids[bdm.volume_id] = \ bdm.attachment_id # update the bdm with the new attachment_id. bdm.attachment_id = attach_ref['id'] bdm.save() # Retrieve connection_info for the destination. Note that it is not # saved back to the database yet. block_device_info = self._get_instance_block_device_info( context, instance, refresh_conn_info=True, bdms=bdms) # The driver pre_live_migration will plug vifs and connect volumes # on the host try: migrate_data = self.driver.pre_live_migration( context, instance, block_device_info, network_info, disk, migrate_data) LOG.debug('driver pre_live_migration data is %s', migrate_data) # driver.pre_live_migration is what plugs vifs on the # destination host so now we can set the wait_for_vif_plugged # flag in the migrate_data object which the source compute will # use to determine if it should wait for a # 'network-vif-plugged' event from neutron before starting the # actual guest transfer in the hypervisor using_multiple_port_bindings = ( 'vifs' in migrate_data and migrate_data.vifs) migrate_data.wait_for_vif_plugged = ( CONF.compute.live_migration_wait_for_vif_plug and using_multiple_port_bindings ) # NOTE(tr3buchet): setup networks on destination host self.network_api.setup_networks_on_host(context, instance, self.host) # NOTE(lyarwood): The above call to driver.pre_live_migration # can result in the virt drivers attempting to stash additional # metadata into the connection_info of the underlying bdm. # Ensure this is saved to the database by calling .save() # against the driver BDMs we passed down via block_device_info. for driver_bdm in block_device_info['block_device_mapping']: driver_bdm.save() except Exception: # NOTE(melwitt): Try to disconnect any volumes which may have # been connected during driver pre_live_migration(). By the # time this error is received by the source host, BDM records # in the database will refer only to the source host. Detach # volumes while we still have connection_info about the # destination host. for driver_bdm in block_device_info['block_device_mapping']: driver_bdm.driver_detach( context, instance, self.volume_api, self.driver) # Re-raise to perform any remaining rollback actions. raise except Exception: # If we raise, migrate_data with the updated attachment ids # will not be returned to the source host for rollback. # So we need to rollback new attachments here. with excutils.save_and_reraise_exception(): old_attachments = migrate_data.old_vol_attachment_ids for bdm in bdms: if (bdm.is_volume and bdm.attachment_id is not None and bdm.volume_id in old_attachments): self.volume_api.attachment_delete(context, bdm.attachment_id) bdm.attachment_id = old_attachments[bdm.volume_id] bdm.save() # Volume connections are complete, tell cinder that all the # attachments have completed. for bdm in bdms: if bdm.is_volume and bdm.attachment_id is not None: self.volume_api.attachment_complete(context, bdm.attachment_id) self._notify_about_instance_usage( context, instance, "live_migration.pre.end", network_info=network_info) compute_utils.notify_about_instance_action( context, instance, self.host, action=fields.NotificationAction.LIVE_MIGRATION_PRE, phase=fields.NotificationPhase.END, bdms=bdms) LOG.debug('pre_live_migration result data is %s', migrate_data) return migrate_data @staticmethod def _neutron_failed_live_migration_callback(event_name, instance): msg = ('Neutron reported failure during live migration ' 'with %(event)s for instance %(uuid)s') msg_args = {'event': event_name, 'uuid': instance.uuid} if CONF.vif_plugging_is_fatal: raise exception.VirtualInterfacePlugException(msg % msg_args) LOG.error(msg, msg_args) @staticmethod def _get_neutron_events_for_live_migration(instance): # We don't generate events if CONF.vif_plugging_timeout=0 # meaning that the operator disabled using them. if CONF.vif_plugging_timeout: return [('network-vif-plugged', vif['id']) for vif in instance.get_network_info()] else: return [] def _cleanup_pre_live_migration(self, context, dest, instance, migration, migrate_data, source_bdms): """Helper method for when pre_live_migration fails Sets the migration status to "error" and rolls back the live migration setup on the destination host. :param context: The user request context. :type context: nova.context.RequestContext :param dest: The live migration destination hostname. :type dest: str :param instance: The instance being live migrated. :type instance: nova.objects.Instance :param migration: The migration record tracking this live migration. :type migration: nova.objects.Migration :param migrate_data: Data about the live migration, populated from the destination host. :type migrate_data: Subclass of nova.objects.LiveMigrateData :param source_bdms: BDMs prior to modification by the destination compute host. Set by _do_live_migration and not part of the callback interface, so this is never None """ self._set_migration_status(migration, 'error') # Make sure we set this for _rollback_live_migration() # so it can find it, as expected if it was called later migrate_data.migration = migration self._rollback_live_migration(context, instance, dest, migrate_data=migrate_data, source_bdms=source_bdms, pre_live_migration=True) def _do_pre_live_migration_from_source(self, context, dest, instance, block_migration, migration, migrate_data, source_bdms): """Prepares for pre-live-migration on the source host and calls dest Will setup a callback networking event handler (if configured) and then call the dest host's pre_live_migration method to prepare the dest host for live migration (plugs vifs, connect volumes, etc). _rollback_live_migration (on the source) will be called if pre_live_migration (on the dest) fails. :param context: nova auth request context for this operation :param dest: name of the destination compute service host :param instance: Instance object being live migrated :param block_migration: If true, prepare for block migration. :param migration: Migration object tracking this operation :param migrate_data: MigrateData object for this operation populated by the destination host compute driver as part of the check_can_live_migrate_destination call. :param source_bdms: BlockDeviceMappingList of BDMs currently attached to the instance from the source host. :returns: MigrateData object which is a modified version of the ``migrate_data`` argument from the compute driver on the dest host during the ``pre_live_migration`` call. :raises: MigrationError if waiting for the network-vif-plugged event timed out and is fatal. """ class _BreakWaitForInstanceEvent(Exception): """Used as a signal to stop waiting for the network-vif-plugged event when we discover that [compute]/live_migration_wait_for_vif_plug is not set on the destination. """ pass events = self._get_neutron_events_for_live_migration(instance) try: if ('block_migration' in migrate_data and migrate_data.block_migration): block_device_info = self._get_instance_block_device_info( context, instance, bdms=source_bdms) disk = self.driver.get_instance_disk_info( instance, block_device_info=block_device_info) else: disk = None deadline = CONF.vif_plugging_timeout error_cb = self._neutron_failed_live_migration_callback # In order to avoid a race with the vif plugging that the virt # driver does on the destination host, we register our events # to wait for before calling pre_live_migration. Then if the # dest host reports back that we shouldn't wait, we can break # out of the context manager using _BreakWaitForInstanceEvent. with self.virtapi.wait_for_instance_event( instance, events, deadline=deadline, error_callback=error_cb): with timeutils.StopWatch() as timer: # TODO(mriedem): The "block_migration" parameter passed # here is not actually used in pre_live_migration but it # is not optional in the RPC interface either. migrate_data = self.compute_rpcapi.pre_live_migration( context, instance, block_migration, disk, dest, migrate_data) LOG.info('Took %0.2f seconds for pre_live_migration on ' 'destination host %s.', timer.elapsed(), dest, instance=instance) wait_for_vif_plugged = ( 'wait_for_vif_plugged' in migrate_data and migrate_data.wait_for_vif_plugged) if events and not wait_for_vif_plugged: raise _BreakWaitForInstanceEvent except _BreakWaitForInstanceEvent: if events: LOG.debug('Not waiting for events after pre_live_migration: ' '%s. ', events, instance=instance) except exception.VirtualInterfacePlugException: with excutils.save_and_reraise_exception(): LOG.exception('Failed waiting for network virtual interfaces ' 'to be plugged on the destination host %s.', dest, instance=instance) self._cleanup_pre_live_migration( context, dest, instance, migration, migrate_data, source_bdms) except exception.InstanceEventTimeout: # We only get here if wait_for_vif_plugged is True which means # live_migration_wait_for_vif_plug=True on the destination host. msg = ( 'Timed out waiting for events: %(events)s. If these timeouts ' 'are a persistent issue it could mean the networking backend ' 'on host %(dest)s does not support sending these events ' 'unless there are port binding host changes which does not ' 'happen at this point in the live migration process. You may ' 'need to disable the live_migration_wait_for_vif_plug option ' 'on host %(dest)s.') subs = {'events': events, 'dest': dest} LOG.warning(msg, subs, instance=instance) if CONF.vif_plugging_is_fatal: self._cleanup_pre_live_migration( context, dest, instance, migration, migrate_data, source_bdms) raise exception.MigrationError(reason=msg % subs) except Exception: with excutils.save_and_reraise_exception(): LOG.exception('Pre live migration failed at %s', dest, instance=instance) self._cleanup_pre_live_migration( context, dest, instance, migration, migrate_data, source_bdms) return migrate_data def _do_live_migration(self, context, dest, instance, block_migration, migration, migrate_data): # NOTE(danms): We should enhance the RT to account for migrations # and use the status field to denote when the accounting has been # done on source/destination. For now, this is just here for status # reporting self._set_migration_status(migration, 'preparing') source_bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) migrate_data = self._do_pre_live_migration_from_source( context, dest, instance, block_migration, migration, migrate_data, source_bdms) # Set migrate_data.migration because that is how _post_live_migration # and _rollback_live_migration get the migration object for cleanup. # Yes this is gross but changing the _post_live_migration and # _rollback_live_migration interfaces would also mean changing how the # virt drivers call them from the driver.live_migration method, i.e. # we would have to pass the migration object through the driver (or # consider using a partial but some do not like that pattern). migrate_data.migration = migration # NOTE(Kevin_Zheng): Pop the migration from the waiting queue # if it exist in the queue, then we are good to moving on, if # not, some other process must have aborted it, then we should # rollback. try: self._waiting_live_migrations.pop(instance.uuid) except KeyError: LOG.debug('Migration %s aborted by another process, rollback.', migration.uuid, instance=instance) self._rollback_live_migration(context, instance, dest, migrate_data, 'cancelled', source_bdms=source_bdms) self._notify_live_migrate_abort_end(context, instance) return self._set_migration_status(migration, 'running') # NOTE(mdbooth): pre_live_migration will update connection_info and # attachment_id on all volume BDMS to reflect the new destination # host attachment. We fetch BDMs before that to retain connection_info # and attachment_id relating to the source host for post migration # cleanup. post_live_migration = functools.partial( self._post_live_migration_update_host, source_bdms=source_bdms ) rollback_live_migration = functools.partial( self._rollback_live_migration, source_bdms=source_bdms) LOG.debug('live_migration data is %s', migrate_data) try: self.driver.live_migration(context, instance, dest, post_live_migration, rollback_live_migration, block_migration, migrate_data) except Exception: LOG.exception('Live migration failed.', instance=instance) with excutils.save_and_reraise_exception(): # Put instance and migration into error state, # as its almost certainly too late to rollback self._set_migration_status(migration, 'error') # first refresh instance as it may have got updated by # post_live_migration_at_destination instance.refresh() self._set_instance_obj_error_state(instance, clean_task_state=True) @wrap_exception() @wrap_instance_event(prefix='compute') @errors_out_migration @wrap_instance_fault def live_migration(self, context, dest, instance, block_migration, migration, migrate_data): """Executing live migration. :param context: security context :param dest: destination host :param instance: a nova.objects.instance.Instance object :param block_migration: if true, prepare for block migration :param migration: an nova.objects.Migration object :param migrate_data: implementation specific params """ self._set_migration_status(migration, 'queued') # NOTE(Kevin_Zheng): Submit the live_migration job to the pool and # put the returned Future object into dict mapped with migration.uuid # in order to be able to track and abort it in the future. self._waiting_live_migrations[instance.uuid] = (None, None) try: future = nova.utils.spawn_on( self._live_migration_executor, self._do_live_migration, context, dest, instance, block_migration, migration, migrate_data) self._waiting_live_migrations[instance.uuid] = (migration, future) except RuntimeError: # GreenThreadPoolExecutor.submit will raise RuntimeError if the # pool is shutdown, which happens in # _cleanup_live_migrations_in_pool. LOG.info('Migration %s failed to submit as the compute service ' 'is shutting down.', migration.uuid, instance=instance) raise exception.LiveMigrationNotSubmitted( migration_uuid=migration.uuid, instance_uuid=instance.uuid) @wrap_exception() @wrap_instance_event(prefix='compute') @wrap_instance_fault def live_migration_force_complete(self, context, instance): """Force live migration to complete. :param context: Security context :param instance: The instance that is being migrated """ self._notify_about_instance_usage( context, instance, 'live.migration.force.complete.start') compute_utils.notify_about_instance_action( context, instance, self.host, action=fields.NotificationAction.LIVE_MIGRATION_FORCE_COMPLETE, phase=fields.NotificationPhase.START) self.driver.live_migration_force_complete(instance) self._notify_about_instance_usage( context, instance, 'live.migration.force.complete.end') compute_utils.notify_about_instance_action( context, instance, self.host, action=fields.NotificationAction.LIVE_MIGRATION_FORCE_COMPLETE, phase=fields.NotificationPhase.END) def _notify_live_migrate_abort_end(self, context, instance): self._notify_about_instance_usage( context, instance, 'live.migration.abort.end') compute_utils.notify_about_instance_action( context, instance, self.host, action=fields.NotificationAction.LIVE_MIGRATION_ABORT, phase=fields.NotificationPhase.END) @wrap_exception() @wrap_instance_event(prefix='compute') @wrap_instance_fault def live_migration_abort(self, context, instance, migration_id): """Abort an in-progress live migration. :param context: Security context :param instance: The instance that is being migrated :param migration_id: ID of in-progress live migration """ self._notify_about_instance_usage( context, instance, 'live.migration.abort.start') compute_utils.notify_about_instance_action( context, instance, self.host, action=fields.NotificationAction.LIVE_MIGRATION_ABORT, phase=fields.NotificationPhase.START) # NOTE(Kevin_Zheng): Pop the migration out from the queue, this might # lead to 3 scenarios: # 1. The selected migration is still in queue, and the future.cancel() # succeed, then the abort action is succeed, mark the migration # status to 'cancelled'. # 2. The selected migration is still in queue, but the future.cancel() # failed, then the _do_live_migration() has started executing, and # the migration status is 'preparing', then we just pop it from the # queue, and the migration process will handle it later. And the # migration status couldn't be 'running' in this scenario because # if _do_live_migration has started executing and we've already # popped it from the queue and set the migration status to # 'running' at this point, popping it here will raise KeyError at # which point we check if it's running and if so, we abort the old # way. # 3. The selected migration is not in the queue, then the migration # status is 'running', let the driver handle it. try: migration, future = ( self._waiting_live_migrations.pop(instance.uuid)) if future and future.cancel(): # If we got here, we've successfully dropped a queued # migration from the queue, so _do_live_migration won't run # and we only need to revert minor changes introduced by Nova # control plane (port bindings, resource allocations and # instance's PCI devices), restore VM's state, set the # migration's status to cancelled and send the notification. # If Future.cancel() fails, it means _do_live_migration is # running and the migration status is preparing, and # _do_live_migration() itself will attempt to pop the queued # migration, hit a KeyError, and rollback, set the migration # to cancelled and send the live.migration.abort.end # notification. self._revert_allocation(context, instance, migration) try: # This call will delete any inactive destination host # port bindings. self.network_api.setup_networks_on_host( context, instance, host=migration.dest_compute, teardown=True) except exception.PortBindingDeletionFailed as e: # Removing the inactive port bindings from the destination # host is not critical so just log an error but don't fail. LOG.error( 'Network cleanup failed for destination host %s ' 'during live migration rollback. You may need to ' 'manually clean up resources in the network service. ' 'Error: %s', migration.dest_compute, str(e)) except Exception: with excutils.save_and_reraise_exception(): LOG.exception( 'An error occurred while cleaning up networking ' 'during live migration rollback.', instance=instance) instance.task_state = None instance.save(expected_task_state=[task_states.MIGRATING]) self._set_migration_status(migration, 'cancelled') except KeyError: migration = objects.Migration.get_by_id(context, migration_id) if migration.status != 'running': raise exception.InvalidMigrationState( migration_id=migration_id, instance_uuid=instance.uuid, state=migration.status, method='abort live migration') self.driver.live_migration_abort(instance) self._notify_live_migrate_abort_end(context, instance) def _live_migration_cleanup_flags(self, migrate_data, migr_ctxt=None): """Determine whether disks, instance path or other resources need to be cleaned up after live migration (at source on success, at destination on rollback) Block migration needs empty image at destination host before migration starts, so if any failure occurs, any empty images has to be deleted. Also Volume backed live migration w/o shared storage needs to delete newly created instance-xxx dir on the destination as a part of its rollback process There may be other resources which need cleanup; currently this is limited to vPMEM and mdev devices with the libvirt driver. :param migrate_data: implementation specific data :param migr_ctxt: specific resources stored in migration_context :returns: (bool, bool) -- do_cleanup, destroy_disks """ # NOTE(pkoniszewski): block migration specific params are set inside # migrate_data objects for drivers that expose block live migration # information (i.e. Libvirt, HyperV). For other drivers cleanup is not # needed. do_cleanup = False destroy_disks = False if isinstance(migrate_data, migrate_data_obj.LibvirtLiveMigrateData): has_vpmem = False if migr_ctxt and migr_ctxt.old_resources: for resource in migr_ctxt.old_resources: if ('metadata' in resource and isinstance(resource.metadata, objects.LibvirtVPMEMDevice)): has_vpmem = True break has_mdevs = 'target_mdevs' in migrate_data power_management_possible = ( 'dst_numa_info' in migrate_data and migrate_data.dst_numa_info is not None) # No instance booting at source host, but instance dir # must be deleted for preparing next block migration # must be deleted for preparing next live migration w/o shared # storage # vpmem must be cleaned do_cleanup = (not migrate_data.is_shared_instance_path or has_vpmem or has_mdevs or power_management_possible) destroy_disks = not ( migrate_data.is_shared_block_storage or migrate_data.is_shared_instance_path) elif isinstance(migrate_data, migrate_data_obj.HyperVLiveMigrateData): # NOTE(claudiub): We need to cleanup any zombie Planned VM. do_cleanup = True destroy_disks = not migrate_data.is_shared_instance_path return (do_cleanup, destroy_disks) def _post_live_migration_remove_source_vol_connections( self, context, instance, source_bdms): """Disconnect volume connections from the source host during _post_live_migration. :param context: nova auth RequestContext :param instance: Instance object being live migrated :param source_bdms: BlockDeviceMappingList representing the attached volumes with connection_info set for the source host """ # Detaching volumes. connector = None for bdm in source_bdms: if bdm.is_volume: # Detaching volumes is a call to an external API that can fail. # If it does, we need to handle it gracefully so that the call # to post_live_migration_at_destination - where we set instance # host and task state - still happens. We need to rethink the # current approach of setting instance host and task state # AFTER a whole bunch of things that could fail in unhandled # ways, but that is left as a TODO(artom). try: if bdm.attachment_id is None: # Prior to cinder v3.44: # We don't want to actually mark the volume detached, # or delete the bdm, just remove the connection from # this host. # # remove the volume connection without detaching from # hypervisor because the instance is not running # anymore on the current host if connector is None: connector = self.driver.get_volume_connector( instance) self.volume_api.terminate_connection(context, bdm.volume_id, connector) else: # cinder v3.44 api flow - delete the old attachment # for the source host self.volume_api.attachment_delete(context, bdm.attachment_id) except Exception as e: if bdm.attachment_id is None: LOG.error('Connection for volume %s not terminated on ' 'source host %s during post_live_migration: ' '%s', bdm.volume_id, self.host, str(e), instance=instance) else: LOG.error('Volume attachment %s not deleted on source ' 'host %s during post_live_migration: %s', bdm.attachment_id, self.host, str(e), instance=instance) # TODO(sean-k-mooney): add typing def _post_live_migration_update_host( self, ctxt, instance, dest, block_migration=False, migrate_data=None, source_bdms=None ): try: self._post_live_migration( ctxt, instance, dest, block_migration, migrate_data, source_bdms) except Exception: # Restore the instance object compute_node = None node_name = None try: # get node name of compute, where instance will be # running after migration, that is destination host compute_node = self._get_compute_info(ctxt, dest) node_name = compute_node.hypervisor_hostname except exception.ComputeHostNotFound: LOG.exception('Failed to get compute_info for %s', dest) # we can never rollback from post live migration and we can only # get here if the instance is running on the dest so we ensure # the instance.host is set correctly and reraise the original # exception unmodified. if instance.host != dest: # apply saves the new fields while drop actually removes the # migration context from the instance, so migration persists. instance.apply_migration_context() instance.drop_migration_context() instance.host = dest instance.task_state = None instance.node = node_name instance.compute_id = compute_node and compute_node.id or None instance.progress = 0 instance.save() raise @wrap_exception() @wrap_instance_fault def _post_live_migration(self, ctxt, instance, dest, block_migration=False, migrate_data=None, source_bdms=None): """Post operations for live migration. This method is called from live_migration and mainly updating database record. :param ctxt: security context :param instance: instance object :param dest: destination host :param block_migration: if true, prepare for block migration :param migrate_data: if not None, it is a dict which has data :param source_bdms: BDMs prior to modification by the destination compute host. Set by _do_live_migration and not part of the callback interface, so this is never None required for live migration without shared storage """ LOG.info('_post_live_migration() is started..', instance=instance) # NOTE(artom) The ordering and exception handling are important here. # We want to immediately activate the port bindings on the destination # host to minimize network downtime. This happens in # migrate_instance_start(). It's also crucial that we call # _post_live_migration_remove_source_vol_connections() to clean up # source volume connections and prevent potential data leaks. We # therefore activate the port bindings in a try block, and, regardless # of any exceptions during that process, clean up volume connections in # a finally block. try: # NOTE(artom) At this point in time we have not bound the ports to # the destination host yet (this happens in # migrate_instance_start() below). Therefore, the "old" source # network info that's still in the instance info cache is safe to # use here, since it'll be used below during # driver.post_live_migration_at_source() to unplug the VIFs on the # source. network_info = instance.get_network_info() self._notify_about_instance_usage(ctxt, instance, "live_migration._post.start", network_info=network_info, best_effort=True) compute_utils.notify_about_instance_action( ctxt, instance, self.host, action=fields.NotificationAction.LIVE_MIGRATION_POST, phase=fields.NotificationPhase.START, best_effort=True) migration = objects.Migration( source_compute=self.host, dest_compute=dest, ) # For neutron, migrate_instance_start will activate the destination # host port bindings, if there are any created by conductor before # live migration started. self.network_api.migrate_instance_start(ctxt, instance, migration) finally: # Cleanup source host post live-migration block_device_info = self._get_instance_block_device_info( ctxt, instance, bdms=source_bdms) self.driver.post_live_migration(ctxt, instance, block_device_info, migrate_data) # Disconnect volumes from this (the source) host. self._post_live_migration_remove_source_vol_connections( ctxt, instance, source_bdms) destroy_vifs = False try: # It's possible that the vif type changed on the destination # host and is already bound and active, so we need to use the # stashed source vifs in migrate_data.vifs (if present) to unplug # on the source host. unplug_nw_info = network_info if migrate_data and 'vifs' in migrate_data: nw_info = [] for migrate_vif in migrate_data.vifs: nw_info.append(migrate_vif.source_vif) unplug_nw_info = network_model.NetworkInfo.hydrate(nw_info) LOG.debug('Calling driver.post_live_migration_at_source ' 'with original source VIFs from migrate_data: %s', unplug_nw_info, instance=instance) self.driver.post_live_migration_at_source(ctxt, instance, unplug_nw_info) except NotImplementedError as ex: LOG.debug(ex, instance=instance) # For all hypervisors other than libvirt, there is a possibility # they are unplugging networks from source node in the cleanup # method destroy_vifs = True # Free instance allocations on source before claims are allocated on # destination node self.rt.free_pci_device_allocations_for_instance(ctxt, instance) # NOTE(danms): Save source node before calling post method on # destination, which will update it source_node = instance.node do_cleanup, destroy_disks = self._live_migration_cleanup_flags( migrate_data, migr_ctxt=instance.migration_context) if do_cleanup: LOG.debug('Calling driver.cleanup from _post_live_migration', instance=instance) self.driver.cleanup(ctxt, instance, unplug_nw_info, destroy_disks=destroy_disks, migrate_data=migrate_data, destroy_vifs=destroy_vifs) # Define domain at destination host, without doing it, # pause/suspend/terminate do not work. post_at_dest_success = True try: self.compute_rpcapi.post_live_migration_at_destination(ctxt, instance, block_migration, dest) except Exception as error: post_at_dest_success = False # If post_live_migration_at_destination() fails, we now have the # _post_live_migration_update_host() method that will handle # this case. LOG.exception("Post live migration at destination %s failed", dest, instance=instance, error=error) raise self.instance_events.clear_events_for_instance(instance) # NOTE(timello): make sure we update available resources on source # host even before next periodic task. self.update_available_resource(ctxt) self._update_scheduler_instance_info(ctxt, instance) self._notify_about_instance_usage(ctxt, instance, "live_migration._post.end", network_info=network_info) compute_utils.notify_about_instance_action( ctxt, instance, self.host, action=fields.NotificationAction.LIVE_MIGRATION_POST, phase=fields.NotificationPhase.END) if post_at_dest_success: LOG.info('Migrating instance to %s finished successfully.', dest, instance=instance) self._clean_instance_console_tokens(ctxt, instance) if migrate_data and migrate_data.obj_attr_is_set('migration'): migrate_data.migration.status = 'completed' migrate_data.migration.save() self._delete_allocation_after_move(ctxt, instance, migrate_data.migration) else: # We didn't have data on a migration, which means we can't # look up to see if we had new-style migration-based # allocations. This should really only happen in cases of # a buggy virt driver. Log a warning so we know it happened. LOG.warning('Live migration ended with no migrate_data ' 'record. Unable to clean up migration-based ' 'allocations for node %s which is almost certainly ' 'not an expected situation.', source_node, instance=instance) def _consoles_enabled(self): """Returns whether a console is enable.""" return (CONF.vnc.enabled or CONF.spice.enabled or CONF.serial_console.enabled or CONF.mks.enabled) def _clean_instance_console_tokens(self, ctxt, instance): """Clean console tokens stored for an instance.""" # If the database backend isn't in use, don't bother trying to clean # tokens. if self._consoles_enabled(): objects.ConsoleAuthToken.\ clean_console_auths_for_instance(ctxt, instance.uuid) @wrap_exception() @wrap_instance_event(prefix='compute') @wrap_instance_fault def post_live_migration_at_destination(self, context, instance, block_migration): """Post operations for live migration . :param context: security context :param instance: Instance dict :param block_migration: if true, prepare for block migration """ LOG.info('Post operation of migration started', instance=instance) # NOTE(tr3buchet): setup networks on destination host # this is called a second time because # multi_host does not create the bridge in # plug_vifs # NOTE(mriedem): This is a no-op for neutron. self.network_api.setup_networks_on_host(context, instance, self.host) migration = objects.Migration( source_compute=instance.host, dest_compute=self.host, migration_type=fields.MigrationType.LIVE_MIGRATION) self.network_api.migrate_instance_finish( context, instance, migration, provider_mappings=None) network_info = self.network_api.get_instance_nw_info(context, instance) self._notify_about_instance_usage( context, instance, "live_migration.post.dest.start", network_info=network_info) compute_utils.notify_about_instance_action(context, instance, self.host, action=fields.NotificationAction.LIVE_MIGRATION_POST_DEST, phase=fields.NotificationPhase.START) block_device_info = self._get_instance_block_device_info(context, instance) # Allocate the claimed PCI resources at destination. self.rt.allocate_pci_devices_for_instance(context, instance) try: self.driver.post_live_migration_at_destination( context, instance, network_info, block_migration, block_device_info) except Exception: with excutils.save_and_reraise_exception(): instance.vm_state = vm_states.ERROR LOG.error('Unexpected error during post live migration at ' 'destination host.', instance=instance) finally: # Restore instance state and update host current_power_state = self._get_power_state(instance) compute_node = None node_name = None prev_host = instance.host try: compute_node = self._get_compute_info(context, self.host) node_name = compute_node.hypervisor_hostname except exception.ComputeHostNotFound: LOG.exception('Failed to get compute_info for %s', self.host) finally: # NOTE(artom) We need to apply the migration context here # regardless of whether the driver's # post_live_migration_at_destination succeeded or not: the # instance is on the destination, potentially with a new NUMA # topology and resource usage. We need to persist that. # NOTE(artom) Apply followed by drop looks weird, but apply # just saves the new fields while drop actually removes the # migration context from the instance. instance.apply_migration_context() instance.drop_migration_context() instance.host = self.host instance.power_state = current_power_state instance.task_state = None instance.node = node_name instance.compute_id = compute_node and compute_node.id or None instance.progress = 0 instance.save(expected_task_state=task_states.MIGRATING) # NOTE(tr3buchet): tear down networks on source host (nova-net) # NOTE(mriedem): For neutron, this will delete any inactive source # host port bindings. try: self.network_api.setup_networks_on_host(context, instance, prev_host, teardown=True) except exception.PortBindingDeletionFailed as e: # Removing the inactive port bindings from the source host is not # critical so just log an error but don't fail. LOG.error('Network cleanup failed for source host %s during post ' 'live migration. You may need to manually clean up ' 'resources in the network service. Error: %s', prev_host, str(e)) # NOTE(vish): this is necessary to update dhcp for nova-network # NOTE(mriedem): This is a no-op for neutron. self.network_api.setup_networks_on_host(context, instance, self.host) self._notify_about_instance_usage( context, instance, "live_migration.post.dest.end", network_info=network_info) compute_utils.notify_about_instance_action(context, instance, self.host, action=fields.NotificationAction.LIVE_MIGRATION_POST_DEST, phase=fields.NotificationPhase.END) def _remove_remote_volume_connections(self, context, dest, bdms, instance): """Rollback remote volume connections on the dest""" for bdm in bdms: try: # remove the connection on the destination host # NOTE(lyarwood): This actually calls the cinderv2 # os-terminate_connection API if required. self.compute_rpcapi.remove_volume_connection( context, instance, bdm.volume_id, dest) except Exception: LOG.warning("Ignoring exception while attempting " "to rollback volume connections for " "volume %s on host %s.", bdm.volume_id, dest, instance=instance) def _rollback_volume_bdms(self, context, bdms, original_bdms, instance): """Rollback the connection_info and attachment_id for each bdm""" original_bdms_by_volid = {bdm.volume_id: bdm for bdm in original_bdms if bdm.is_volume} for bdm in bdms: try: original_bdm = original_bdms_by_volid[bdm.volume_id] # NOTE(lyarwood): Only delete the referenced attachment if it # is different to the original in order to avoid accidentally # removing the source host volume attachment after it has # already been rolled back by a failure in pre_live_migration. if (bdm.attachment_id and original_bdm.attachment_id and bdm.attachment_id != original_bdm.attachment_id): # NOTE(lyarwood): 3.44 cinder api flow. Delete the # attachment used by the bdm and reset it to that of # the original bdm. self.volume_api.attachment_delete(context, bdm.attachment_id) bdm.attachment_id = original_bdm.attachment_id # NOTE(lyarwood): Reset the connection_info to the original bdm.connection_info = original_bdm.connection_info bdm.save() except cinder_exception.ClientException: LOG.warning("Ignoring cinderclient exception when " "attempting to delete attachment %s for volume " "%s while rolling back volume bdms.", bdm.attachment_id, bdm.volume_id, instance=instance) except Exception: with excutils.save_and_reraise_exception(): LOG.exception("Exception while attempting to rollback " "BDM for volume %s.", bdm.volume_id, instance=instance) @wrap_exception() @wrap_instance_fault def _rollback_live_migration(self, context, instance, dest, migrate_data=None, migration_status='failed', source_bdms=None, pre_live_migration=False): """Recovers Instance/volume state from migrating -> running. :param context: security context :param instance: nova.objects.instance.Instance object :param dest: This method is called from live migration src host. This param specifies destination host. :param migrate_data: if not none, contains implementation specific data. :param migration_status: Contains the status we want to set for the migration object :param source_bdms: BDMs prior to modification by the destination compute host. Set by _do_live_migration and not part of the callback interface, so this is never None """ # NOTE(gibi): We need to refresh pci_requests of the instance as it # might be changed by the conductor during scheduling based on the # selected destination host. If the instance has SRIOV ports with # resource request then the LiveMigrationTask._find_destination call # updated the instance.pci_requests.requests[].spec with the SRIOV PF # device name to be used on the destination host. As the migration is # rolling back to the source host now we don't want to persist the # destination host related changes in the DB. instance.pci_requests = \ objects.InstancePCIRequests.get_by_instance_uuid( context, instance.uuid) if (isinstance(migrate_data, migrate_data_obj.LiveMigrateData) and migrate_data.obj_attr_is_set('migration')): migration = migrate_data.migration else: migration = None if migration: # Remove allocations created in Placement for the dest node. # If migration is None, the virt driver didn't pass it which is # a bug. self._revert_allocation(context, instance, migration) else: LOG.error('Unable to revert allocations during live migration ' 'rollback; compute driver did not provide migrate_data', instance=instance) # NOTE(tr3buchet): setup networks on source host (really it's re-setup # for nova-network) # NOTE(mriedem): This is a no-op for neutron. self.network_api.setup_networks_on_host(context, instance, self.host) # NOTE(erlon): We should make sure that rollback_live_migration_at_src # is not called in the pre_live_migration rollback as that will trigger # the src host to re-attach interfaces which were not detached # previously. if not pre_live_migration: self.driver.rollback_live_migration_at_source(context, instance, migrate_data) # NOTE(lyarwood): Fetch the current list of BDMs, disconnect any # connected volumes from the dest and delete any volume attachments # used by the destination host before rolling back to the original # still valid source host volume attachments. bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) # TODO(lyarwood): Turn the following into a lookup method within # BlockDeviceMappingList. vol_bdms = [bdm for bdm in bdms if bdm.is_volume] if not pre_live_migration: # This will do both a driver detach and a Cinder attachment delete. # If we are in here due to a pre_live_migration failure, BDMs have # already been rolled back to contain info for the source, so don't # try to remove volume connections on the destination. # See ComputeManager.pre_live_migration() for details. self._remove_remote_volume_connections( context, dest, vol_bdms, instance) self._rollback_volume_bdms(context, vol_bdms, source_bdms, instance) self._notify_about_instance_usage(context, instance, "live_migration._rollback.start") compute_utils.notify_about_instance_action(context, instance, self.host, action=fields.NotificationAction.LIVE_MIGRATION_ROLLBACK, phase=fields.NotificationPhase.START, bdms=bdms) do_cleanup, destroy_disks = self._live_migration_cleanup_flags( migrate_data, migr_ctxt=instance.migration_context) if do_cleanup: self.compute_rpcapi.rollback_live_migration_at_destination( context, instance, dest, destroy_disks=destroy_disks, migrate_data=migrate_data) else: # The port binding profiles need to be cleaned up. with errors_out_migration_ctxt(migration): try: # This call will delete any inactive destination host # port bindings. self.network_api.setup_networks_on_host( context, instance, host=dest, teardown=True) except exception.PortBindingDeletionFailed as e: # Removing the inactive port bindings from the destination # host is not critical so just log an error but don't fail. LOG.error( 'Network cleanup failed for destination host %s ' 'during live migration rollback. You may need to ' 'manually clean up resources in the network service. ' 'Error: %s', dest, str(e)) except Exception: with excutils.save_and_reraise_exception(): LOG.exception( 'An error occurred while cleaning up networking ' 'during live migration rollback.', instance=instance) # NOTE(luyao): We drop move_claim and migration_context after cleanup # is complete, to ensure the specific resources claimed on destination # are released safely. # TODO(artom) drop_move_claim_at_destination() is new in RPC 5.3, only # call it if we performed a NUMA-aware live migration (which implies us # being able to send RPC 5.3). To check this, we can use the # src_supports_numa_live_migration flag, as it will be set if and only # if: # - dst_supports_numa_live_migration made its way to the source # (meaning both dest and source are new and conductor can speak # RPC 5.3) # - src_supports_numa_live_migration was set by the source driver and # passed the send-RPC-5.3 check. # This check can be removed in RPC 6.0. if ('src_supports_numa_live_migration' in migrate_data and migrate_data.src_supports_numa_live_migration): LOG.debug('Calling destination to drop move claim.', instance=instance) self.compute_rpcapi.drop_move_claim_at_destination(context, instance, dest) # NOTE(luyao): We only update instance info after rollback operations # are complete instance.task_state = None instance.progress = 0 instance.drop_migration_context() instance.save(expected_task_state=[task_states.MIGRATING]) self._notify_about_instance_usage(context, instance, "live_migration._rollback.end") compute_utils.notify_about_instance_action(context, instance, self.host, action=fields.NotificationAction.LIVE_MIGRATION_ROLLBACK, phase=fields.NotificationPhase.END, bdms=bdms) # NOTE(luyao): we have cleanup everything and get instance # back to normal status, now set migration status to 'failed' self._set_migration_status(migration, migration_status) @wrap_exception() @wrap_instance_fault def drop_move_claim_at_destination(self, context, instance): """Called by the source of a live migration during rollback to ask the destination to drop the MoveClaim object that was created for the live migration on the destination. """ nodename = self._get_nodename(instance) LOG.debug('Dropping live migration resource claim on destination ' 'node %s', nodename, instance=instance) self.rt.drop_move_claim( context, instance, nodename, flavor=instance.flavor) @wrap_exception() @wrap_instance_event(prefix='compute') @wrap_instance_fault def rollback_live_migration_at_destination(self, context, instance, destroy_disks, migrate_data): """Cleaning up image directory that is created pre_live_migration. :param context: security context :param instance: a nova.objects.instance.Instance object sent over rpc :param destroy_disks: whether to destroy volumes or not :param migrate_data: contains migration info """ network_info = self.network_api.get_instance_nw_info(context, instance) self._notify_about_instance_usage( context, instance, "live_migration.rollback.dest.start", network_info=network_info) compute_utils.notify_about_instance_action( context, instance, self.host, action=fields.NotificationAction.LIVE_MIGRATION_ROLLBACK_DEST, phase=fields.NotificationPhase.START) try: # NOTE(tr3buchet): tear down networks on dest host (nova-net) # NOTE(mriedem): For neutron, this call will delete any # destination host port bindings. # TODO(mriedem): We should eventually remove this call from # this method (rollback_live_migration_at_destination) since this # method is only called conditionally based on whether or not the # instance is running on shared storage. _rollback_live_migration # already calls this method for neutron if we are running on # shared storage. self.network_api.setup_networks_on_host(context, instance, self.host, teardown=True) except exception.PortBindingDeletionFailed as e: # Removing the inactive port bindings from the destination # host is not critical so just log an error but don't fail. LOG.error( 'Network cleanup failed for destination host %s ' 'during live migration rollback. You may need to ' 'manually clean up resources in the network service. ' 'Error: %s', self.host, str(e)) except Exception: with excutils.save_and_reraise_exception(): # NOTE(tdurakov): even if teardown networks fails driver # should try to rollback live migration on destination. LOG.exception('An error occurred while deallocating network.', instance=instance) finally: # always run this even if setup_networks_on_host fails # NOTE(vish): The mapping is passed in so the driver can disconnect # from remote volumes if necessary block_device_info = self._get_instance_block_device_info(context, instance) # NOTE(melwitt): By the time we get here, the instance BDMs have # already been rolled back to contain info for the source during # _rollback_live_migration(). # The code above predates the addition of rollback of the instance # BDM records to point at the source. It also predates the addition # of a driver detach call to remove_volume_connection(). # Set the list for Cinder volumes to empty to avoid attempting to # disconnect volumes during driver.cleanup() on the destination. bdi_without_volumes = copy.deepcopy(block_device_info) bdi_without_volumes['block_device_mapping'] = [] # free any instance PCI claims done on destination during # check_can_live_migrate_destination() self.rt.free_pci_device_claims_for_instance(context, instance) # NOTE(luyao): Apply migration_context temporarily since it's # on destination host, we rely on instance object to cleanup # specific resources like vpmem with instance.mutated_migration_context(): self.driver.rollback_live_migration_at_destination( context, instance, network_info, bdi_without_volumes, destroy_disks=destroy_disks, migrate_data=migrate_data) self._notify_about_instance_usage( context, instance, "live_migration.rollback.dest.end", network_info=network_info) compute_utils.notify_about_instance_action( context, instance, self.host, action=fields.NotificationAction.LIVE_MIGRATION_ROLLBACK_DEST, phase=fields.NotificationPhase.END) def _require_nw_info_update(self, context, instance): """Detect whether there is a mismatch in binding:host_id, or binding_failed or unbound binding:vif_type for any of the instances ports. """ # Only update port bindings if compute manager does manage port # bindings instead of the compute driver. For example IronicDriver # manages the port binding for baremetal instance ports, hence, # external intervention with the binding is not desired. if self.driver.manages_network_binding_host_id(): return False search_opts = {'device_id': instance.uuid, 'fields': ['binding:host_id', 'binding:vif_type']} ports = self.network_api.list_ports(context, **search_opts) for p in ports['ports']: if p.get('binding:host_id') != self.host: return True vif_type = p.get('binding:vif_type') if (vif_type == network_model.VIF_TYPE_UNBOUND or vif_type == network_model.VIF_TYPE_BINDING_FAILED): return True return False @periodic_task.periodic_task( spacing= CONF.heal_instance_info_cache_interval if CONF.heal_instance_info_cache_interval != 0 else -1) def _heal_instance_info_cache(self, context): """Called periodically. On every call, try to update the info_cache's network information for another instance by calling to the network manager. This is implemented by keeping a cache of uuids of instances that live on this host. On each call, we pop one off of a list, pull the DB record, and try the call to the network API. If anything errors don't fail, as it's possible the instance has been deleted, etc. """ instance_uuids = getattr(self, '_instance_uuids_to_heal', []) instance = None LOG.debug('Starting heal instance info cache') if not instance_uuids: # The list of instances to heal is empty so rebuild it LOG.debug('Rebuilding the list of instances to heal') db_instances = objects.InstanceList.get_by_host( context, self.host, expected_attrs=[], use_slave=True) for inst in db_instances: # We don't want to refresh the cache for instances # which are building or deleting so don't put them # in the list. If they are building they will get # added to the list next time we build it. if (inst.vm_state == vm_states.BUILDING): LOG.debug('Skipping network cache update for instance ' 'because it is Building.', instance=inst) continue if (inst.task_state == task_states.DELETING): LOG.debug('Skipping network cache update for instance ' 'because it is being deleted.', instance=inst) continue if not instance: # Save the first one we find so we don't # have to get it again instance = inst else: instance_uuids.append(inst['uuid']) self._instance_uuids_to_heal = instance_uuids else: # Find the next valid instance on the list while instance_uuids: try: inst = objects.Instance.get_by_uuid( context, instance_uuids.pop(0), expected_attrs=['system_metadata', 'info_cache', 'flavor'], use_slave=True) except exception.InstanceNotFound: # Instance is gone. Try to grab another. continue # Check the instance hasn't been migrated if inst.host != self.host: LOG.debug('Skipping network cache update for instance ' 'because it has been migrated to another ' 'host.', instance=inst) # Check the instance isn't being deleting elif inst.task_state == task_states.DELETING: LOG.debug('Skipping network cache update for instance ' 'because it is being deleted.', instance=inst) else: instance = inst break if instance: # We have an instance now to refresh try: # Fix potential mismatch in port binding if evacuation failed # after reassigning the port binding to the dest host but # before the instance host is changed. # Do this only when instance has no pending task. if instance.task_state is None and \ self._require_nw_info_update(context, instance): LOG.info("Updating ports in neutron", instance=instance) self.network_api.setup_instance_network_on_host( context, instance, self.host) # Call to network API to get instance info.. this will # force an update to the instance's info_cache self.network_api.get_instance_nw_info( context, instance, force_refresh=True) LOG.debug('Updated the network info_cache for instance', instance=instance) except exception.InstanceNotFound: # Instance is gone. LOG.debug('Instance no longer exists. Unable to refresh', instance=instance) return except exception.InstanceInfoCacheNotFound: # InstanceInfoCache is gone. LOG.debug('InstanceInfoCache no longer exists. ' 'Unable to refresh', instance=instance) except Exception: LOG.error('An error occurred while refreshing the network ' 'cache.', instance=instance, exc_info=True) else: LOG.debug("Didn't find any instances for network info cache " "update.") @periodic_task.periodic_task def _poll_rebooting_instances(self, context): if CONF.reboot_timeout > 0: filters = {'task_state': [task_states.REBOOTING, task_states.REBOOT_STARTED, task_states.REBOOT_PENDING], 'host': self.host} rebooting = objects.InstanceList.get_by_filters( context, filters, expected_attrs=[], use_slave=True) to_poll = [] for instance in rebooting: if timeutils.is_older_than(instance.updated_at, CONF.reboot_timeout): to_poll.append(instance) self.driver.poll_rebooting_instances(CONF.reboot_timeout, to_poll) @periodic_task.periodic_task def _poll_rescued_instances(self, context): if CONF.rescue_timeout > 0: filters = {'vm_state': vm_states.RESCUED, 'host': self.host} rescued_instances = objects.InstanceList.get_by_filters( context, filters, expected_attrs=["system_metadata"], use_slave=True) to_unrescue = [] for instance in rescued_instances: if timeutils.is_older_than(instance.launched_at, CONF.rescue_timeout): to_unrescue.append(instance) for instance in to_unrescue: self.compute_api.unrescue(context, instance) @periodic_task.periodic_task def _poll_unconfirmed_resizes(self, context): if CONF.resize_confirm_window == 0: return migrations = objects.MigrationList.get_unconfirmed_by_dest_compute( context, CONF.resize_confirm_window, self.host, use_slave=True) migrations_info = dict(migration_count=len(migrations), confirm_window=CONF.resize_confirm_window) if migrations_info["migration_count"] > 0: LOG.info("Found %(migration_count)d unconfirmed migrations " "older than %(confirm_window)d seconds", migrations_info) def _set_migration_to_error(migration, reason, **kwargs): LOG.warning("Setting migration %(migration_id)s to error: " "%(reason)s", {'migration_id': migration.id, 'reason': reason}, **kwargs) migration.status = 'error' migration.save() for migration in migrations: instance_uuid = migration.instance_uuid LOG.info("Automatically confirming migration " "%(migration_id)s for instance %(instance_uuid)s", {'migration_id': migration.id, 'instance_uuid': instance_uuid}) expected_attrs = ['metadata', 'system_metadata'] try: instance = objects.Instance.get_by_uuid(context, instance_uuid, expected_attrs=expected_attrs, use_slave=True) except exception.InstanceNotFound: reason = (_("Instance %s not found") % instance_uuid) _set_migration_to_error(migration, reason) continue if instance.vm_state == vm_states.ERROR: reason = _("In ERROR state") _set_migration_to_error(migration, reason, instance=instance) continue # race condition: The instance in DELETING state should not be # set the migration state to error, otherwise the instance in # to be deleted which is in RESIZED state # will not be able to confirm resize if instance.task_state in [task_states.DELETING, task_states.SOFT_DELETING]: msg = ("Instance being deleted or soft deleted during resize " "confirmation. Skipping.") LOG.debug(msg, instance=instance) continue # race condition: This condition is hit when this method is # called between the save of the migration record with a status of # finished and the save of the instance object with a state of # RESIZED. The migration record should not be set to error. if instance.task_state == task_states.RESIZE_FINISH: msg = ("Instance still resizing during resize " "confirmation. Skipping.") LOG.debug(msg, instance=instance) continue vm_state = instance.vm_state task_state = instance.task_state if vm_state != vm_states.RESIZED or task_state is not None: reason = (_("In states %(vm_state)s/%(task_state)s, not " "RESIZED/None") % {'vm_state': vm_state, 'task_state': task_state}) _set_migration_to_error(migration, reason, instance=instance) continue try: self.compute_api.confirm_resize(context, instance, migration=migration) except Exception as e: LOG.info("Error auto-confirming resize: %s. " "Will retry later.", e, instance=instance) @periodic_task.periodic_task(spacing=CONF.shelved_poll_interval) def _poll_shelved_instances(self, context): if CONF.shelved_offload_time <= 0: return filters = {'vm_state': vm_states.SHELVED, 'task_state': None, 'host': self.host} shelved_instances = objects.InstanceList.get_by_filters( context, filters=filters, expected_attrs=['system_metadata'], use_slave=True) to_gc = [] for instance in shelved_instances: sys_meta = instance.system_metadata shelved_at = timeutils.parse_strtime(sys_meta['shelved_at']) if timeutils.is_older_than(shelved_at, CONF.shelved_offload_time): to_gc.append(instance) cyclient = cyborg.get_client(context) for instance in to_gc: try: instance.task_state = task_states.SHELVING_OFFLOADING instance.save(expected_task_state=(None,)) accel_uuids = [] if instance.flavor.extra_specs.get('accel:device_profile'): # TODO(brinzhang): After cyborg support batch query ARQs # for more than one instances, we will improve efficiency # with this implementation. accel_uuids = cyclient.get_arq_uuids_for_instance(instance) self.shelve_offload_instance( context, instance, clean_shutdown=False, accel_uuids=accel_uuids) except Exception: LOG.exception('Periodic task failed to offload instance.', instance=instance) @periodic_task.periodic_task def _instance_usage_audit(self, context): if not CONF.instance_usage_audit: return begin, end = utils.last_completed_audit_period() if objects.TaskLog.get(context, 'instance_usage_audit', begin, end, self.host): return instances = objects.InstanceList.get_active_by_window_joined( context, begin, end, host=self.host, expected_attrs=['system_metadata', 'info_cache', 'metadata', 'flavor'], use_slave=True) num_instances = len(instances) errors = 0 successes = 0 LOG.info("Running instance usage audit for host %(host)s " "from %(begin_time)s to %(end_time)s. " "%(number_instances)s instances.", {'host': self.host, 'begin_time': begin, 'end_time': end, 'number_instances': num_instances}) start_time = time.time() task_log = objects.TaskLog(context) task_log.task_name = 'instance_usage_audit' task_log.period_beginning = begin task_log.period_ending = end task_log.host = self.host task_log.task_items = num_instances task_log.message = 'Instance usage audit started...' task_log.begin_task() for instance in instances: try: compute_utils.notify_usage_exists( self.notifier, context, instance, self.host, ignore_missing_network_data=False) successes += 1 except Exception: LOG.exception('Failed to generate usage ' 'audit for instance ' 'on host %s', self.host, instance=instance) errors += 1 task_log.errors = errors task_log.message = ( 'Instance usage audit ran for host %s, %s instances in %s seconds.' % (self.host, num_instances, time.time() - start_time)) task_log.end_task() def _get_host_volume_bdms(self, context, use_slave=False): """Return all block device mappings on a compute host.""" compute_host_bdms = [] instances = objects.InstanceList.get_by_host(context, self.host, use_slave=use_slave) for instance in instances: bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid, use_slave=use_slave) instance_bdms = [bdm for bdm in bdms if bdm.is_volume] compute_host_bdms.append(dict(instance=instance, instance_bdms=instance_bdms)) return compute_host_bdms def _update_volume_usage_cache(self, context, vol_usages): """Updates the volume usage cache table with a list of stats.""" for usage in vol_usages: # Allow switching of greenthreads between queries. utils.cooperative_yield() vol_usage = objects.VolumeUsage(context) vol_usage.volume_id = usage['volume'] vol_usage.instance_uuid = usage['instance'].uuid vol_usage.project_id = usage['instance'].project_id vol_usage.user_id = usage['instance'].user_id vol_usage.availability_zone = usage['instance'].availability_zone vol_usage.curr_reads = usage['rd_req'] vol_usage.curr_read_bytes = usage['rd_bytes'] vol_usage.curr_writes = usage['wr_req'] vol_usage.curr_write_bytes = usage['wr_bytes'] vol_usage.save() self.notifier.info(context, 'volume.usage', vol_usage.to_dict()) compute_utils.notify_about_volume_usage(context, vol_usage, self.host) @periodic_task.periodic_task(spacing=CONF.volume_usage_poll_interval) def _poll_volume_usage(self, context): if CONF.volume_usage_poll_interval == 0: return compute_host_bdms = self._get_host_volume_bdms(context, use_slave=True) if not compute_host_bdms: return LOG.debug("Updating volume usage cache") try: vol_usages = self.driver.get_all_volume_usage(context, compute_host_bdms) except NotImplementedError: return self._update_volume_usage_cache(context, vol_usages) @periodic_task.periodic_task(spacing=CONF.sync_power_state_interval, run_immediately=True) def _sync_power_states(self, context): """Align power states between the database and the hypervisor. To sync power state data we make a DB call to get the number of virtual machines known by the hypervisor and if the number matches the number of virtual machines known by the database, we proceed in a lazy loop, one database record at a time, checking if the hypervisor has the same power state as is in the database. """ db_instances = objects.InstanceList.get_by_host(context, self.host, expected_attrs=[], use_slave=True) try: num_vm_instances = self.driver.get_num_instances() except exception.VirtDriverNotReady as e: # If the virt driver is not ready, like ironic-api not being up # yet in the case of ironic, just log it and exit. LOG.info('Skipping _sync_power_states periodic task due to: %s', e) return num_db_instances = len(db_instances) if num_vm_instances != num_db_instances: LOG.warning("While synchronizing instance power states, found " "%(num_db_instances)s instances in the database " "and %(num_vm_instances)s instances on the " "hypervisor.", {'num_db_instances': num_db_instances, 'num_vm_instances': num_vm_instances}) def _sync(db_instance): # NOTE(melwitt): This must be synchronized as we query state from # two separate sources, the driver and the database. # They are set (in stop_instance) and read, in sync. @utils.synchronized(db_instance.uuid) def query_driver_power_state_and_sync(): self._query_driver_power_state_and_sync(context, db_instance) try: query_driver_power_state_and_sync() except Exception: LOG.exception("Periodic sync_power_state task had an " "error while processing an instance.", instance=db_instance) self._syncs_in_progress.pop(db_instance.uuid) for db_instance in db_instances: # process syncs asynchronously - don't want instance locking to # block entire periodic task thread uuid = db_instance.uuid if uuid in self._syncs_in_progress: LOG.debug('Sync already in progress for %s', uuid) else: LOG.debug('Triggering sync for uuid %s', uuid) self._syncs_in_progress[uuid] = True nova.utils.spawn_on( self._sync_power_executor, _sync, db_instance) def _query_driver_power_state_and_sync(self, context, db_instance): if db_instance.task_state is not None: LOG.info("During sync_power_state the instance has a " "pending task (%(task)s). Skip.", {'task': db_instance.task_state}, instance=db_instance) return # No pending tasks. Now try to figure out the real vm_power_state. try: vm_instance = self.driver.get_info(db_instance) vm_power_state = vm_instance.state except exception.InstanceNotFound: vm_power_state = power_state.NOSTATE # Note(maoy): the above get_info call might take a long time, # for example, because of a broken libvirt driver. try: self._sync_instance_power_state(context, db_instance, vm_power_state, use_slave=True) except exception.InstanceNotFound: # NOTE(hanlind): If the instance gets deleted during sync, # silently ignore. pass def _stop_unexpected_shutdown_instance(self, context, vm_state, db_instance, orig_db_power_state): # this is an exceptional case; make sure our data is up # to date before slamming through a power off vm_instance = self.driver.get_info(db_instance, use_cache=False) vm_power_state = vm_instance.state # if it still looks off, go ahead and call stop() if vm_power_state in (power_state.SHUTDOWN, power_state.CRASHED): LOG.warning("Instance shutdown by itself. Calling the " "stop API. Current vm_state: %(vm_state)s, " "current task_state: %(task_state)s, " "original DB power_state: %(db_power_state)s, " "current VM power_state: %(vm_power_state)s", {'vm_state': vm_state, 'task_state': db_instance.task_state, 'db_power_state': orig_db_power_state, 'vm_power_state': vm_power_state}, instance=db_instance) try: # Note(maoy): here we call the API instead of # brutally updating the vm_state in the database # to allow all the hooks and checks to be performed. if db_instance.shutdown_terminate: self.compute_api.delete(context, db_instance) else: self.compute_api.stop(context, db_instance) except Exception: # Note(maoy): there is no need to propagate the error # because the same power_state will be retrieved next # time and retried. # For example, there might be another task scheduled. LOG.exception("error during stop() in sync_power_state.", instance=db_instance) def _sync_instance_power_state(self, context, db_instance, vm_power_state, use_slave=False): """Align instance power state between the database and hypervisor. If the instance is not found on the hypervisor, but is in the database, then a stop() API will be called on the instance. """ # We re-query the DB to get the latest instance info to minimize # (not eliminate) race condition. db_instance.refresh(use_slave=use_slave) db_power_state = db_instance.power_state vm_state = db_instance.vm_state if self.host != db_instance.host: # on the sending end of nova-compute _sync_power_state # may have yielded to the greenthread performing a live # migration; this in turn has changed the resident-host # for the VM; However, the instance is still active, it # is just in the process of migrating to another host. # This implies that the compute source must relinquish # control to the compute destination. LOG.info("During the sync_power process the " "instance has moved from " "host %(src)s to host %(dst)s", {'src': db_instance.host, 'dst': self.host}, instance=db_instance) return elif db_instance.task_state is not None: # on the receiving end of nova-compute, it could happen # that the DB instance already report the new resident # but the actual VM has not showed up on the hypervisor # yet. In this case, let's allow the loop to continue # and run the state sync in a later round LOG.info("During sync_power_state the instance has a " "pending task (%(task)s). Skip.", {'task': db_instance.task_state}, instance=db_instance) return orig_db_power_state = db_power_state if vm_power_state != db_power_state: LOG.info('During _sync_instance_power_state the DB ' 'power_state (%(db_power_state)s) does not match ' 'the vm_power_state from the hypervisor ' '(%(vm_power_state)s). Updating power_state in the ' 'DB to match the hypervisor.', {'db_power_state': db_power_state, 'vm_power_state': vm_power_state}, instance=db_instance) # power_state is always updated from hypervisor to db db_instance.power_state = vm_power_state db_instance.save() db_power_state = vm_power_state # Note(maoy): Now resolve the discrepancy between vm_state and # vm_power_state. We go through all possible vm_states. if vm_state in (vm_states.BUILDING, vm_states.RESCUED, vm_states.RESIZED, vm_states.SUSPENDED, vm_states.ERROR): # TODO(maoy): we ignore these vm_state for now. pass elif vm_state == vm_states.ACTIVE: # The only rational power state should be RUNNING if vm_power_state in (power_state.SHUTDOWN, power_state.CRASHED): self._stop_unexpected_shutdown_instance( context, vm_state, db_instance, orig_db_power_state) elif vm_power_state == power_state.SUSPENDED: LOG.warning("Instance is suspended unexpectedly. Calling " "the stop API.", instance=db_instance) try: self.compute_api.stop(context, db_instance) except Exception: LOG.exception("error during stop() in sync_power_state.", instance=db_instance) elif vm_power_state == power_state.PAUSED: # Note(maoy): a VM may get into the paused state not only # because the user request via API calls, but also # due to (temporary) external instrumentations. # Before the virt layer can reliably report the reason, # we simply ignore the state discrepancy. In many cases, # the VM state will go back to running after the external # instrumentation is done. See bug 1097806 for details. LOG.warning("Instance is paused unexpectedly. Ignore.", instance=db_instance) elif vm_power_state == power_state.NOSTATE: # Occasionally, depending on the status of the hypervisor, # which could be restarting for example, an instance may # not be found. Therefore just log the condition. LOG.warning("Instance is unexpectedly not found. Ignore.", instance=db_instance) elif vm_state == vm_states.STOPPED: if vm_power_state not in (power_state.NOSTATE, power_state.SHUTDOWN, power_state.CRASHED): LOG.warning("Instance is not stopped. Calling " "the stop API. Current vm_state: %(vm_state)s," " current task_state: %(task_state)s, " "original DB power_state: %(db_power_state)s, " "current VM power_state: %(vm_power_state)s", {'vm_state': vm_state, 'task_state': db_instance.task_state, 'db_power_state': orig_db_power_state, 'vm_power_state': vm_power_state}, instance=db_instance) try: # NOTE(russellb) Force the stop, because normally the # compute API would not allow an attempt to stop a stopped # instance. self.compute_api.force_stop(context, db_instance) except Exception: LOG.exception("error during stop() in sync_power_state.", instance=db_instance) elif vm_state == vm_states.PAUSED: if vm_power_state in (power_state.SHUTDOWN, power_state.CRASHED): LOG.warning("Paused instance shutdown by itself. Calling " "the stop API.", instance=db_instance) try: self.compute_api.force_stop(context, db_instance) except Exception: LOG.exception("error during stop() in sync_power_state.", instance=db_instance) elif vm_state in (vm_states.SOFT_DELETED, vm_states.DELETED): if vm_power_state not in (power_state.NOSTATE, power_state.SHUTDOWN): # Note(maoy): this should be taken care of periodically in # _cleanup_running_deleted_instances(). LOG.warning("Instance is not (soft-)deleted.", instance=db_instance) @periodic_task.periodic_task def _reclaim_queued_deletes(self, context): """Reclaim instances that are queued for deletion.""" interval = CONF.reclaim_instance_interval if interval <= 0: LOG.debug("CONF.reclaim_instance_interval <= 0, skipping...") return filters = {'vm_state': vm_states.SOFT_DELETED, 'task_state': None, 'host': self.host} instances = objects.InstanceList.get_by_filters( context, filters, expected_attrs=objects.instance.INSTANCE_DEFAULT_FIELDS, use_slave=True) for instance in instances: if self._deleted_old_enough(instance, interval): bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) LOG.info('Reclaiming deleted instance', instance=instance) try: self._delete_instance(context, instance, bdms) except Exception as e: LOG.warning("Periodic reclaim failed to delete " "instance: %s", e, instance=instance) def _get_nodename(self, instance, refresh=False): """Helper method to get the name of the first available node on this host. This method should not be used with any operations on ironic instances since it does not handle multiple nodes. """ node = self.driver.get_available_nodes(refresh=refresh)[0] LOG.debug("No node specified, defaulting to %s", node, instance=instance) return node def _update_available_resource_for_node(self, context, nodename, startup=False): try: self.rt.update_available_resource(context, nodename, startup=startup) except exception.ComputeHostNotFound: LOG.warning("Compute node '%s' not found in " "update_available_resource.", nodename) except exception.ReshapeFailed: # We're only supposed to get here on startup, if a reshape was # needed, was attempted, and failed. We want to kill the service. with excutils.save_and_reraise_exception(): LOG.critical("Resource provider data migration failed " "fatally during startup for node %s.", nodename) except exception.ReshapeNeeded: # This exception should only find its way here if the virt driver's # update_provider_tree raised it incorrectly: either # a) After the resource tracker already caught it once and # reinvoked update_provider_tree with allocations. At this point # the driver is just supposed to *do* the reshape, so if it raises # ReshapeNeeded, it's a bug, and we want to kill the compute # service. # b) On periodic rather than startup (we only allow reshapes to # happen on startup). In this case we'll just make the logs red and # go again at the next periodic interval, where the same thing may # or may not happen again. Depending on the previous and intended # shape of the providers/inventories, this may not actually cause # any immediately visible symptoms (in terms of scheduling, etc.) # If this becomes a problem, we may wish to make it pop immediately # (e.g. disable the service). with excutils.save_and_reraise_exception(): LOG.exception("ReshapeNeeded exception is unexpected here!") except exception.PlacementPciException: # If we are at startup and the Placement PCI inventory handling # failed then probably there is a configuration error. Propagate # the error up to kill the service. if startup: raise # If we are not at startup then we can assume that the # configuration was correct at startup so the error is probably # transient. Anyhow we cannot kill the service any more so just # log the error and continue. LOG.exception( "Error updating PCI resources for node %(node)s.", {'node': nodename}) except exception.InvalidConfiguration as e: if startup: # If this happens during startup, we need to let it raise to # abort our service startup. raise else: LOG.error("Error updating resources for node %s: %s", nodename, e) except Exception: LOG.exception("Error updating resources for node %(node)s.", {'node': nodename}) @periodic_task.periodic_task(spacing=CONF.update_resources_interval) def update_available_resource(self, context, startup=False): """See driver.get_available_resource() Periodic process that keeps that the compute host's understanding of resource availability and usage in sync with the underlying hypervisor. :param context: security context :param startup: True if this is being called when the nova-compute service is starting, False otherwise. """ try: nodenames = set(self.driver.get_available_nodes()) except exception.VirtDriverNotReady: LOG.warning("Virt driver is not ready.") return compute_nodes_in_db = self._get_compute_nodes_in_db(context, nodenames, use_slave=True, startup=startup) self.rt.clean_compute_node_cache(compute_nodes_in_db) # Delete orphan compute node not reported by driver but still in db for cn in compute_nodes_in_db: if cn.hypervisor_hostname not in nodenames: # if the node could be migrated, we don't delete # the compute node database records if not self.driver.is_node_deleted(cn.hypervisor_hostname): LOG.warning( "Found orphan compute node %(id)s " "hypervisor host is %(hh)s, " "nodes are %(nodes)s. " "We are not deleting this as the driver " "says this node has not been deleted.", {'id': cn.id, 'hh': cn.hypervisor_hostname, 'nodes': nodenames}) continue LOG.info("Deleting orphan compute node %(id)s " "hypervisor host is %(hh)s, " "nodes are %(nodes)s", {'id': cn.id, 'hh': cn.hypervisor_hostname, 'nodes': nodenames}) try: cn.destroy() except exception.ObjectActionError: # NOTE(mgoddard): it's possible that another compute # service took ownership of this compute node since we # queried it due to a rebalance, and this will cause the # deletion to fail. Ignore the error in that case. LOG.info("Ignoring failure to delete orphan compute node " "%(id)s on hypervisor host %(hh)s due to " "possible node rebalance", {'id': cn.id, 'hh': cn.hypervisor_hostname}) self.rt.remove_node(cn.hypervisor_hostname) self.reportclient.invalidate_resource_provider(cn.uuid) else: self.rt.remove_node(cn.hypervisor_hostname) # Delete the corresponding resource provider in placement, # along with any associated allocations. try: self.reportclient.delete_resource_provider( context, cn, cascade=True) except keystone_exception.ClientException as e: LOG.error( "Failed to delete compute node resource provider " "for compute node %s: %s", cn.uuid, str(e)) for nodename in nodenames: self._update_available_resource_for_node(context, nodename, startup=startup) def _get_compute_nodes_in_db(self, context, nodenames, use_slave=False, startup=False): try: return objects.ComputeNodeList.get_all_by_host(context, self.host, use_slave=use_slave) except exception.NotFound: # If the driver is not reporting any nodenames we should not # expect there to be compute nodes so we just return in that case. # For example, this could be an ironic compute and it is not # managing any nodes yet. if nodenames: if startup: LOG.warning( "No compute node record found for host %s. If this is " "the first time this service is starting on this " "host, then you can ignore this warning.", self.host) else: LOG.error("No compute node record for host %s", self.host) return [] @periodic_task.periodic_task( spacing=CONF.running_deleted_instance_poll_interval, run_immediately=True) def _cleanup_running_deleted_instances(self, context): """Cleanup any instances which are erroneously still running after having been deleted. Valid actions to take are: 1. noop - do nothing 2. log - log which instances are erroneously running 3. reap - shutdown and cleanup any erroneously running instances 4. shutdown - power off *and disable* any erroneously running instances The use-case for this cleanup task is: for various reasons, it may be possible for the database to show an instance as deleted but for that instance to still be running on a host machine (see bug https://bugs.launchpad.net/nova/+bug/911366). This cleanup task is a cross-hypervisor utility for finding these zombied instances and either logging the discrepancy (likely what you should do in production), or automatically reaping the instances (more appropriate for dev environments). """ action = CONF.running_deleted_instance_action if action == "noop": return # NOTE(sirp): admin contexts don't ordinarily return deleted records with utils.temporary_mutation(context, read_deleted="yes"): try: instances = self._running_deleted_instances(context) except exception.VirtDriverNotReady: # Since this task runs immediately on startup, if the # hypervisor is not yet ready handle it gracefully. LOG.debug('Unable to check for running deleted instances ' 'at this time since the hypervisor is not ready.') return for instance in instances: if action == "log": LOG.warning("Detected instance with name label " "'%s' which is marked as " "DELETED but still present on host.", instance.name, instance=instance) elif action == 'shutdown': LOG.info("Powering off instance with name label " "'%s' which is marked as " "DELETED but still present on host.", instance.name, instance=instance) try: self.driver.power_off(context, instance) except Exception: LOG.warning("Failed to power off instance", instance=instance, exc_info=True) elif action == 'reap': LOG.info("Destroying instance with name label " "'%s' which is marked as " "DELETED but still present on host.", instance.name, instance=instance) bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid, use_slave=True) self.instance_events.clear_events_for_instance(instance) try: self._shutdown_instance(context, instance, bdms, notify=False) self._cleanup_volumes(context, instance, bdms, detach=False) except Exception as e: LOG.warning("Periodic cleanup failed to delete " "instance: %s", e, instance=instance) else: raise Exception(_("Unrecognized value '%s'" " for CONF.running_deleted_" "instance_action") % action) def _running_deleted_instances(self, context): """Returns a list of instances nova thinks is deleted, but the hypervisor thinks is still running. """ timeout = CONF.running_deleted_instance_timeout filters = {'deleted': True, 'soft_deleted': False} instances = self._get_instances_on_driver(context, filters) return [i for i in instances if self._deleted_old_enough(i, timeout)] def _deleted_old_enough(self, instance, timeout): deleted_at = instance.deleted_at if deleted_at: deleted_at = deleted_at.replace(tzinfo=None) return (not deleted_at or timeutils.is_older_than(deleted_at, timeout)) @contextlib.contextmanager def _error_out_instance_on_exception(self, context, instance, instance_state=vm_states.ACTIVE): """Context manager to set instance.vm_state after some operation raises Used to handle NotImplementedError and InstanceFaultRollback errors and reset the instance vm_state and task_state. The vm_state is set to the $instance_state parameter and task_state is set to None. For all other types of exceptions, the vm_state is set to ERROR and the task_state is left unchanged (although most callers will have the @reverts_task_state decorator which will set the task_state to None). Re-raises the original exception *except* in the case of InstanceFaultRollback in which case the wrapped `inner_exception` is re-raised. :param context: The nova auth request context for the operation. :param instance: The instance to update. The vm_state will be set by this context manager when an exception is raised. :param instance_state: For NotImplementedError and InstanceFaultRollback this is the vm_state to set the instance to when handling one of those types of exceptions. By default the instance will be set to ACTIVE, but the caller should control this in case there have been no changes to the running state of the instance. For example, resizing a stopped server where prep_resize fails early and does not change the power state of the guest should not set the instance status to ACTIVE but remain STOPPED. This parameter is ignored for all other types of exceptions and the instance vm_state is set to ERROR. """ # NOTE(mriedem): Why doesn't this method just save off the # original instance.vm_state here rather than use a parameter? Or use # instance_state=None as an override but default to the current # vm_state when rolling back. instance_uuid = instance.uuid try: yield except (NotImplementedError, exception.InstanceFaultRollback) as error: # Use reraise=False to determine if we want to raise the original # exception or something else. with excutils.save_and_reraise_exception(reraise=False) as ctxt: LOG.info("Setting instance back to %(state)s after: %(error)s", {'state': instance_state, 'error': error}, instance_uuid=instance_uuid) self._instance_update(context, instance, vm_state=instance_state, task_state=None) if isinstance(error, exception.InstanceFaultRollback): # Raise the wrapped exception. raise error.inner_exception # Else re-raise the NotImplementedError. ctxt.reraise = True except Exception: LOG.exception('Setting instance vm_state to ERROR', instance_uuid=instance_uuid) with excutils.save_and_reraise_exception(): # NOTE(mriedem): Why don't we pass clean_task_state=True here? self._set_instance_obj_error_state(instance) def _process_instance_event(self, instance, event): _event = self.instance_events.pop_instance_event(instance, event) if _event: LOG.debug('Processing event %(event)s', {'event': event.key}, instance=instance) _event.set(event) else: # If it's a network-vif-unplugged event and the instance is being # deleted or live migrated then we don't need to make this a # warning as it's expected. There are other expected things which # could trigger this event like detaching an interface, but we # don't have a task state for that. # TODO(mriedem): We have other move operations and things like # hard reboot (probably rebuild as well) which trigger this event # but nothing listens for network-vif-unplugged. We should either # handle those other known cases or consider just not logging a # warning if we get this event and the instance is undergoing some # task state transition. if (event.name == 'network-vif-unplugged' and instance.task_state in ( task_states.DELETING, task_states.MIGRATING)): LOG.debug('Received event %s for instance with task_state %s.', event.key, instance.task_state, instance=instance) else: LOG.warning('Received unexpected event %(event)s for ' 'instance with vm_state %(vm_state)s and ' 'task_state %(task_state)s.', {'event': event.key, 'vm_state': instance.vm_state, 'task_state': instance.task_state}, instance=instance) def _process_instance_vif_deleted_event(self, context, instance, deleted_vif_id): # If an attached port is deleted by neutron, it needs to # be detached from the instance. # And info cache needs to be updated. network_info = instance.info_cache.network_info for index, vif in enumerate(network_info): if vif['id'] == deleted_vif_id: LOG.info('Neutron deleted interface %(intf)s; ' 'detaching it from the instance and ' 'deleting it from the info cache', {'intf': vif['id']}, instance=instance) profile = vif.get('profile', {}) or {} # profile can be None rps = profile.get('allocation') if rps: if isinstance(rps, dict): # if extended resource request extension is enabled # then we have a dict of providers, flatten it for the # log. rps = ','.join(rps.values()) LOG.error( 'The bound port %(port_id)s is deleted in Neutron but ' 'the resource allocation on the resource providers ' '%(rp_uuid)s are leaked until the server ' '%(server_uuid)s is deleted.', {'port_id': vif['id'], 'rp_uuid': rps, 'server_uuid': instance.uuid}) del network_info[index] neutron.update_instance_cache_with_nw_info( self.network_api, context, instance, nw_info=network_info) try: self.driver.detach_interface(context, instance, vif) except NotImplementedError: # Not all virt drivers support attach/detach of interfaces # yet (like Ironic), so just ignore this. pass except exception.NovaException as ex: # If the instance was deleted before the interface was # detached, just log it at debug. log_level = (logging.DEBUG if isinstance(ex, exception.InstanceNotFound) else logging.WARNING) LOG.log(log_level, "Detach interface failed, " "port_id=%(port_id)s, reason: %(msg)s", {'port_id': deleted_vif_id, 'msg': ex}, instance=instance) break @wrap_instance_event(prefix='compute') @wrap_instance_fault def extend_volume(self, context, instance, extended_volume_id): # If an attached volume is extended by cinder, it needs to # be extended by virt driver so host can detect its new size. # And bdm needs to be updated. LOG.debug('Handling volume-extended event for volume %(vol)s', {'vol': extended_volume_id}, instance=instance) try: bdm = objects.BlockDeviceMapping.get_by_volume_and_instance( context, extended_volume_id, instance.uuid) except exception.NotFound: LOG.warning('Extend volume failed, ' 'volume %(vol)s is not attached to instance.', {'vol': extended_volume_id}, instance=instance) return LOG.info('Cinder extended volume %(vol)s; ' 'extending it to detect new size', {'vol': extended_volume_id}, instance=instance) volume = self.volume_api.get(context, bdm.volume_id) if bdm.connection_info is None: LOG.warning('Extend volume failed, ' 'attached volume %(vol)s has no connection_info', {'vol': extended_volume_id}, instance=instance) return connection_info = jsonutils.loads(bdm.connection_info) bdm.volume_size = volume['size'] bdm.save() if not self.driver.capabilities.get('supports_extend_volume', False): raise exception.ExtendVolumeNotSupported() try: self.driver.extend_volume(context, connection_info, instance, bdm.volume_size * units.Gi) except Exception as ex: LOG.warning('Extend volume failed, ' 'volume_id=%(volume_id)s, reason: %(msg)s', {'volume_id': extended_volume_id, 'msg': ex}, instance=instance) raise @staticmethod def _is_state_valid_for_power_update_event(instance, target_power_state): """Check if the current state of the instance allows it to be a candidate for the power-update event. :param instance: The nova instance object. :param target_power_state: The desired target power state; this should either be "POWER_ON" or "POWER_OFF". :returns Boolean: True if the instance can be subjected to the power-update event. """ if ((target_power_state == external_event_obj.POWER_ON and instance.task_state is None and instance.vm_state == vm_states.STOPPED and instance.power_state == power_state.SHUTDOWN) or (target_power_state == external_event_obj.POWER_OFF and instance.task_state is None and instance.vm_state == vm_states.ACTIVE and instance.power_state == power_state.RUNNING)): return True return False @wrap_exception() @reverts_task_state @wrap_instance_event(prefix='compute') @wrap_instance_fault def power_update(self, context, instance, target_power_state): """Power update of an instance prompted by an external event. :param context: The API request context. :param instance: The nova instance object. :param target_power_state: The desired target power state; this should either be "POWER_ON" or "POWER_OFF". """ @utils.synchronized(instance.uuid) def do_power_update(): LOG.debug('Handling power-update event with target_power_state %s ' 'for instance', target_power_state, instance=instance) if not self._is_state_valid_for_power_update_event( instance, target_power_state): pow_state = fields.InstancePowerState.from_index( instance.power_state) LOG.info('The power-update %(tag)s event for instance ' '%(uuid)s is a no-op since the instance is in ' 'vm_state %(vm_state)s, task_state ' '%(task_state)s and power_state ' '%(power_state)s.', {'tag': target_power_state, 'uuid': instance.uuid, 'vm_state': instance.vm_state, 'task_state': instance.task_state, 'power_state': pow_state}) return LOG.debug("Trying to %s instance", target_power_state, instance=instance) if target_power_state == external_event_obj.POWER_ON: action = fields.NotificationAction.POWER_ON notification_name = "power_on." instance.task_state = task_states.POWERING_ON else: # It's POWER_OFF action = fields.NotificationAction.POWER_OFF notification_name = "power_off." instance.task_state = task_states.POWERING_OFF instance.progress = 0 try: # Note that the task_state is set here rather than the API # because this is a best effort operation and deferring # updating the task_state until we get to the compute service # avoids error handling in the API and needing to account for # older compute services during rolling upgrades from Stein. # If we lose a race, UnexpectedTaskStateError is handled # below. instance.save(expected_task_state=[None]) self._notify_about_instance_usage(context, instance, notification_name + "start") compute_utils.notify_about_instance_action(context, instance, self.host, action=action, phase=fields.NotificationPhase.START) # UnexpectedTaskStateError raised from the driver will be # handled below and not result in a fault, error notification # or failure of the instance action. Other driver errors like # NotImplementedError will be record a fault, send an error # notification and mark the instance action as failed. self.driver.power_update_event(instance, target_power_state) self._notify_about_instance_usage(context, instance, notification_name + "end") compute_utils.notify_about_instance_action(context, instance, self.host, action=action, phase=fields.NotificationPhase.END) except exception.UnexpectedTaskStateError as e: # Handling the power-update event is best effort and if we lost # a race with some other action happening to the instance we # just log it and return rather than fail the action. LOG.info("The power-update event was possibly preempted: %s ", e.format_message(), instance=instance) return do_power_update() @wrap_exception() def external_instance_event(self, context, instances, events): # NOTE(danms): Some event types are handled by the manager, such # as when we're asked to update the instance's info_cache. If it's # not one of those, look for some thread(s) waiting for the event and # unblock them if so. for event in events: instance = [inst for inst in instances if inst.uuid == event.instance_uuid][0] LOG.debug('Received event %(event)s', {'event': event.key}, instance=instance) if event.name == 'network-changed': try: LOG.debug('Refreshing instance network info cache due to ' 'event %s.', event.key, instance=instance) self.network_api.get_instance_nw_info( context, instance, refresh_vif_id=event.tag) except exception.NotFound as e: LOG.info('Failed to process external instance event ' '%(event)s due to: %(error)s', {'event': event.key, 'error': str(e)}, instance=instance) elif event.name == 'network-vif-deleted': try: self._process_instance_vif_deleted_event(context, instance, event.tag) except exception.NotFound as e: LOG.info('Failed to process external instance event ' '%(event)s due to: %(error)s', {'event': event.key, 'error': str(e)}, instance=instance) elif event.name == 'volume-extended': self.extend_volume(context, instance, event.tag) elif event.name == 'power-update': self.power_update(context, instance, event.tag) else: self._process_instance_event(instance, event) @periodic_task.periodic_task(spacing=CONF.image_cache.manager_interval, external_process_ok=True) def _run_image_cache_manager_pass(self, context): """Run a single pass of the image cache manager.""" if not self.driver.capabilities.get("has_imagecache", False): return # Determine what other nodes use this storage storage_users.register_storage_use(CONF.instances_path, CONF.host) nodes = storage_users.get_storage_users(CONF.instances_path) # Filter all_instances to only include those nodes which share this # storage path. # TODO(mikal): this should be further refactored so that the cache # cleanup code doesn't know what those instances are, just a remote # count, and then this logic should be pushed up the stack. filters = {'deleted': False, 'soft_deleted': True, 'host': nodes} filtered_instances = objects.InstanceList.get_by_filters(context, filters, expected_attrs=[], use_slave=True) self.driver.manage_image_cache(context, filtered_instances) def cache_images(self, context, image_ids): """Ask the virt driver to pre-cache a set of base images. :param context: The RequestContext :param image_ids: The image IDs to be cached :return: A dict, keyed by image-id where the values are one of: 'cached' if the image was downloaded, 'existing' if the image was already in the cache, 'unsupported' if the virt driver does not support caching, 'error' if the virt driver raised an exception. """ results = {} LOG.info('Caching %i image(s) by request', len(image_ids)) for image_id in image_ids: try: cached = self.driver.cache_image(context, image_id) if cached: results[image_id] = 'cached' else: results[image_id] = 'existing' except NotImplementedError: LOG.warning('Virt driver does not support image pre-caching;' ' ignoring request') # NOTE(danms): Yes, technically we could short-circuit here to # avoid trying the rest of the images, but it's very cheap to # just keep hitting the NotImplementedError to keep the logic # clean. results[image_id] = 'unsupported' except Exception as e: results[image_id] = 'error' LOG.error('Failed to cache image %(image_id)s: %(err)s', {'image_id': image_id, 'err': e}) return results @periodic_task.periodic_task(spacing=CONF.instance_delete_interval) def _run_pending_deletes(self, context): """Retry any pending instance file deletes.""" LOG.debug('Cleaning up deleted instances') filters = {'deleted': True, 'soft_deleted': False, 'host': CONF.host, 'cleaned': False} attrs = ['system_metadata'] with utils.temporary_mutation(context, read_deleted='yes'): instances = objects.InstanceList.get_by_filters( context, filters, expected_attrs=attrs, use_slave=True) LOG.debug('There are %d instances to clean', len(instances)) for instance in instances: attempts = int(instance.system_metadata.get('clean_attempts', '0')) LOG.debug('Instance has had %(attempts)s of %(max)s ' 'cleanup attempts', {'attempts': attempts, 'max': CONF.maximum_instance_delete_attempts}, instance=instance) if attempts < CONF.maximum_instance_delete_attempts: success = self.driver.delete_instance_files(instance) instance.system_metadata['clean_attempts'] = str(attempts + 1) if success: instance.cleaned = True with utils.temporary_mutation(context, read_deleted='yes'): instance.save() @periodic_task.periodic_task(spacing=CONF.instance_delete_interval) def _cleanup_incomplete_migrations(self, context): """Cleanup on failed resize/revert-resize operation and failed rollback live migration operation. During resize/revert-resize operation, or after a failed rollback live migration operation, if that instance gets deleted then instance files might remain either on source or destination compute node and other specific resources might not be cleaned up because of the race condition. """ LOG.debug('Cleaning up deleted instances with incomplete migration ') migration_filters = {'host': CONF.host, 'status': 'error'} migrations = objects.MigrationList.get_by_filters(context, migration_filters) if not migrations: return inst_uuid_from_migrations = set([migration.instance_uuid for migration in migrations]) inst_filters = {'deleted': True, 'soft_deleted': False, 'uuid': inst_uuid_from_migrations} attrs = ['info_cache', 'security_groups', 'system_metadata'] with utils.temporary_mutation(context, read_deleted='yes'): instances = objects.InstanceList.get_by_filters( context, inst_filters, expected_attrs=attrs, use_slave=True) for instance in instances: if instance.host == CONF.host: continue for migration in migrations: if instance.uuid != migration.instance_uuid: continue self.driver.delete_instance_files(instance) # we are not sure whether the migration_context is applied # during incompleted migrating, we need to apply/revert # migration_context to get instance object content matching # current host. revert = (True if migration.source_compute == CONF.host else False) with instance.mutated_migration_context(revert=revert): self.driver.cleanup_lingering_instance_resources(instance) try: migration.status = 'failed' migration.save() except exception.MigrationNotFound: LOG.warning("Migration %s is not found.", migration.id, instance=instance) break @messaging.expected_exceptions(exception.InstanceQuiesceNotSupported, exception.QemuGuestAgentNotEnabled, exception.NovaException, NotImplementedError) @wrap_exception() def quiesce_instance(self, context, instance): """Quiesce an instance on this host.""" context = context.elevated() image_meta = objects.ImageMeta.from_instance(instance) self.driver.quiesce(context, instance, image_meta) def _wait_for_snapshots_completion(self, context, mapping): for mapping_dict in mapping: if mapping_dict.get('source_type') == 'snapshot': def _wait_snapshot(): snapshot = self.volume_api.get_snapshot( context, mapping_dict['snapshot_id']) if snapshot.get('status') != 'creating': raise loopingcall.LoopingCallDone() timer = loopingcall.FixedIntervalLoopingCall(_wait_snapshot) timer.start(interval=0.5).wait() @messaging.expected_exceptions(exception.InstanceQuiesceNotSupported, exception.QemuGuestAgentNotEnabled, exception.NovaException, NotImplementedError) @wrap_exception() def unquiesce_instance(self, context, instance, mapping=None): """Unquiesce an instance on this host. If snapshots' image mapping is provided, it waits until snapshots are completed before unqueiscing. """ context = context.elevated() if mapping: try: self._wait_for_snapshots_completion(context, mapping) except Exception as error: LOG.exception("Exception while waiting completion of " "volume snapshots: %s", error, instance=instance) image_meta = objects.ImageMeta.from_instance(instance) self.driver.unquiesce(context, instance, image_meta) @periodic_task.periodic_task(spacing=CONF.instance_delete_interval) def _cleanup_expired_console_auth_tokens(self, context): """Remove all expired console auth tokens. Console authorization tokens and their connection data are stored in the database when a user asks for a console connection to an instance. After a time they expire. We periodically remove any expired tokens from the database. """ objects.ConsoleAuthToken.clean_expired_console_auths(context) def _claim_pci_for_instance_vifs(self, ctxt, instance): """Claim PCI devices for the instance's VIFs on the compute node :param ctxt: Context :param instance: Instance object :return: mapping for the VIFs that yielded a PCI claim on the compute node """ pci_req_id_to_port_id = {} pci_reqs = [] port_id_to_pci_dev = {} for vif in instance.get_network_info(): pci_req = pci_req_module.get_instance_pci_request_from_vif( ctxt, instance, vif) if pci_req: pci_req_id_to_port_id[pci_req.request_id] = vif['id'] pci_reqs.append(pci_req) if pci_reqs: # Create PCI requests and claim against PCI resource tracker # NOTE(adrianc): We claim against the same requests as on the # source node. vif_pci_requests = objects.InstancePCIRequests( requests=pci_reqs, instance_uuid=instance.uuid) # if we are called during the live migration with NUMA topology # support the PCI claim needs to consider the destination NUMA # topology that is then stored in the migration_context dest_topo = None if instance.migration_context: dest_topo = instance.migration_context.new_numa_topology claimed_pci_devices_objs = self.rt.claim_pci_devices( ctxt, vif_pci_requests, dest_topo) # Update VIFMigrateData profile with the newly claimed PCI # device for pci_dev in claimed_pci_devices_objs: LOG.debug("PCI device: %s Claimed on destination node", pci_dev.address) port_id = pci_req_id_to_port_id[pci_dev.request_id] port_id_to_pci_dev[port_id] = pci_dev return port_id_to_pci_dev def _update_migrate_vifs_profile_with_pci(self, migrate_vifs, port_id_to_pci_dev): """Update migrate vifs profile with the claimed PCI devices :param migrate_vifs: list of VIFMigrateData objects :param port_id_to_pci_dev: a mapping :return: None. """ for mig_vif in migrate_vifs: port_id = mig_vif.port_id if port_id not in port_id_to_pci_dev: continue pci_dev = port_id_to_pci_dev[port_id] profile = copy.deepcopy(mig_vif.source_vif['profile']) profile['pci_slot'] = pci_dev.address profile['pci_vendor_info'] = ':'.join([pci_dev.vendor_id, pci_dev.product_id]) if profile.get('card_serial_number'): # Assume it is there since Nova makes sure that PCI devices # tagged as remote-managed have a serial in PCI VPD. profile['card_serial_number'] = pci_dev.card_serial_number if profile.get('pf_mac_address'): profile['pf_mac_address'] = pci_dev.sriov_cap['pf_mac_address'] if profile.get('vf_num'): profile['vf_num'] = pci_dev.sriov_cap['vf_num'] if pci_dev.mac_address: profile['device_mac_address'] = pci_dev.mac_address mig_vif.profile = profile LOG.debug("Updating migrate VIF profile for port %(port_id)s:" "%(profile)s", {'port_id': port_id, 'profile': profile}) # TODO(sbauza): Remove this proxy class in the X release once we drop the 5.x # support. # NOTE(sbauza): This proxy class will support the existing <=5.13 RPC calls # from any RPC client but will also make sure that the new 6.0 RPC calls will # be supported. class _ComputeV5Proxy(object): target = messaging.Target(version='5.13') def __init__(self, manager): self.manager = manager def __getattr__(self, name): # NOTE(sbauza): Proxying all the other methods but the V5 ones. return getattr(self.manager, name) # 5.0 support for block_migration argument def pre_live_migration(self, context, instance, block_migration, disk, migrate_data): return self.manager.pre_live_migration(context, instance, disk, migrate_data) # 5.1 support for legacy request_spec argument def prep_resize(self, context, image, instance, instance_type, request_spec, filter_properties, node, clean_shutdown, migration, host_list): if not isinstance(request_spec, objects.RequestSpec): # Prior to compute RPC API 5.1 conductor would pass a legacy dict # version of the request spec to compute and since Stein compute # could be sending that back to conductor on reschedule, so if we # got a dict convert it to an object. # TODO(mriedem): We can drop this compat code when we only support # compute RPC API >=6.0. request_spec = objects.RequestSpec.from_primitives( context, request_spec, filter_properties) # We don't have to set the new flavor on the request spec because # if we got here it was due to a reschedule from the compute and # the request spec would already have the new flavor in it from the # else block below. self.manager.prep_resize(context, image, instance, instance_type, request_spec, filter_properties, node, clean_shutdown, migration, host_list) # 5.2 support for optional request_spec argument def resize_instance(self, context, instance, image, migration, instance_type, clean_shutdown, request_spec=None): self.manager.resize_instance(context, instance, image, migration, instance_type, clean_shutdown, request_spec) # 5.2 support for optional request_spec argument def finish_resize(self, context, disk_info, image, instance, migration, request_spec=None): self.manager.finish_resize(context, disk_info, image, instance, migration, request_spec) # 5.2 support for optional request_spec argument def revert_resize(self, context, instance, migration, request_spec=None): self.manager.revert_resize(context, instance, migration, request_spec) # 5.2 support for optional request_spec argument def finish_revert_resize( self, context, instance, migration, request_spec=None): self.manager.finish_revert_resize(context, instance, migration, request_spec) # 5.2 support for optional request_spec argument # 5.13 support for optional accel_uuids argument def unshelve_instance(self, context, instance, image, filter_properties, node, request_spec=None, accel_uuids=None): self.manager.unshelve_instance(context, instance, image, filter_properties, node, request_spec, accel_uuids or []) # 5.3 support for optional migration and limits arguments def check_can_live_migrate_destination(self, ctxt, instance, block_migration, disk_over_commit, migration=None, limits=None): return self.manager.check_can_live_migrate_destination( ctxt, instance, block_migration, disk_over_commit, migration, limits) # 5.11 support for optional accel_uuids argument def build_and_run_instance(self, context, instance, image, request_spec, filter_properties, admin_password=None, injected_files=None, requested_networks=None, security_groups=None, block_device_mapping=None, node=None, limits=None, host_list=None, accel_uuids=None): self.manager.build_and_run_instance( context, instance, image, request_spec, filter_properties, accel_uuids, admin_password, injected_files, requested_networks, security_groups, block_device_mapping, node, limits, host_list) # 5.12 support for optional accel_uuids argument def rebuild_instance(self, context, instance, orig_image_ref, image_ref, injected_files, new_pass, orig_sys_metadata, bdms, recreate, on_shared_storage, preserve_ephemeral, migration, scheduled_node, limits, request_spec, accel_uuids=None): self.manager.rebuild_instance( context, instance, orig_image_ref, image_ref, injected_files, new_pass, orig_sys_metadata, bdms, recreate, on_shared_storage, preserve_ephemeral, migration, scheduled_node, limits, request_spec, accel_uuids, False, None) # 5.13 support for optional accel_uuids argument def shelve_instance(self, context, instance, image_id, clean_shutdown, accel_uuids=None): self.manager.shelve_instance(context, instance, image_id, clean_shutdown, accel_uuids) # 5.13 support for optional accel_uuids argument def shelve_offload_instance(self, context, instance, clean_shutdown, accel_uuids=None): self.manager.shelve_offload_instance( context, instance, clean_shutdown, accel_uuids) # 6.0 drop unused request_spec argument def prep_snapshot_based_resize_at_dest( self, ctxt, instance, flavor, nodename, migration, limits, request_spec): return self.manager.prep_snapshot_based_resize_at_dest( ctxt, instance, flavor, nodename, migration, limits) # 6.0 drop unused request_spec argument def finish_snapshot_based_resize_at_dest( self, ctxt, instance, migration, snapshot_id, request_spec): self.manager.finish_snapshot_based_resize_at_dest( ctxt, instance, migration, snapshot_id) # 6.0 drop unused instance argument def check_instance_shared_storage(self, ctxt, instance, data): return self.manager.check_instance_shared_storage(ctxt, data) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/compute/migration_list.py0000664000175000017500000000667300000000000020613 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from nova.compute import multi_cell_list from nova import context from nova.db.main import api as db from nova import exception from nova import objects from nova.objects import base class MigrationSortContext(multi_cell_list.RecordSortContext): def __init__(self, sort_keys, sort_dirs): if not sort_keys: sort_keys = ['created_at', 'id'] sort_dirs = ['desc', 'desc'] if 'uuid' not in sort_keys: # Add uuid into the list of sort_keys. Since we're striping # across cell databases here, many sort_keys arrangements will # yield nothing unique across all the databases to give us a stable # ordering, which can mess up expected client pagination behavior. # So, throw uuid into the sort_keys at the end if it's not already # there to keep us repeatable. sort_keys = copy.copy(sort_keys) + ['uuid'] sort_dirs = copy.copy(sort_dirs) + ['asc'] super(MigrationSortContext, self).__init__(sort_keys, sort_dirs) class MigrationLister(multi_cell_list.CrossCellLister): def __init__(self, sort_keys, sort_dirs): super(MigrationLister, self).__init__( MigrationSortContext(sort_keys, sort_dirs)) @property def marker_identifier(self): return 'uuid' def get_marker_record(self, ctx, marker): """Get the marker migration from its cell. This returns the marker migration from the cell in which it lives """ results = context.scatter_gather_skip_cell0( ctx, db.migration_get_by_uuid, marker) db_migration = None for result_cell_uuid, result in results.items(): if not context.is_cell_failure_sentinel(result): db_migration = result cell_uuid = result_cell_uuid break if not db_migration: raise exception.MarkerNotFound(marker=marker) return cell_uuid, db_migration def get_marker_by_values(self, ctx, values): return db.migration_get_by_sort_filters(ctx, self.sort_ctx.sort_keys, self.sort_ctx.sort_dirs, values) def get_by_filters(self, ctx, filters, limit, marker, **kwargs): return db.migration_get_all_by_filters( ctx, filters, limit=limit, marker=marker, sort_keys=self.sort_ctx.sort_keys, sort_dirs=self.sort_ctx.sort_dirs) def get_migration_objects_sorted(ctx, filters, limit, marker, sort_keys, sort_dirs): mig_generator = MigrationLister(sort_keys, sort_dirs).get_records_sorted( ctx, filters, limit, marker) return base.obj_make_list(ctx, objects.MigrationList(), objects.Migration, mig_generator) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.3416083 nova-32.0.0/nova/compute/monitors/0000775000175000017500000000000000000000000017053 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/compute/monitors/__init__.py0000664000175000017500000000766200000000000021177 0ustar00zuulzuul00000000000000# Copyright 2013 Intel Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Resource monitor API specification. """ from oslo_log import log as logging from stevedore import enabled import nova.conf CONF = nova.conf.CONF LOG = logging.getLogger(__name__) class MonitorHandler(object): NAMESPACES = [ 'nova.compute.monitors.cpu', ] def __init__(self, resource_tracker): # Dictionary keyed by the monitor type namespace. Value is the # first loaded monitor of that namespace or False. self.type_monitor_loaded = {ns: False for ns in self.NAMESPACES} self.monitors = [] for ns in self.NAMESPACES: plugin_mgr = enabled.EnabledExtensionManager( namespace=ns, invoke_on_load=True, check_func=self.check_enabled_monitor, invoke_args=(resource_tracker,) ) self.monitors += [ext.obj for ext in plugin_mgr] def check_enabled_monitor(self, ext): """Ensures that only one monitor is specified of any type.""" # The extension does not have a namespace attribute, unfortunately, # but we can get the namespace by examining the first part of the # entry_point_target attribute, which looks like this: # 'nova.compute.monitors.cpu.virt_driver:Monitor' ept = ext.entry_point_target ept_parts = ept.split(':') namespace_parts = ept_parts[0].split('.') namespace = '.'.join(namespace_parts[0:-1]) if self.type_monitor_loaded[namespace] is not False: LOG.warning("Excluding %(namespace)s monitor " "%(monitor_name)s. Already loaded " "%(loaded_monitor)s.", {'namespace': namespace, 'monitor_name': ext.name, 'loaded_monitor': self.type_monitor_loaded[namespace] }) return False # NOTE(jaypipes): We used to only have CPU monitors, so # CONF.compute_monitors could contain "virt_driver" without any monitor # type namespace. So, to maintain backwards-compatibility with that # older way of specifying monitors, we first loop through any values in # CONF.compute_monitors and put any non-namespace'd values into the # 'cpu' namespace. cfg_monitors = ['cpu.' + cfg if '.' not in cfg else cfg for cfg in CONF.compute_monitors] # NOTE(jaypipes): Append 'nova.compute.monitors.' to any monitor value # that doesn't have it to allow CONF.compute_monitors to use shortened # namespaces (like 'cpu.' instead of 'nova.compute.monitors.cpu.') cfg_monitors = ['nova.compute.monitors.' + cfg if 'nova.compute.monitors.' not in cfg else cfg for cfg in cfg_monitors] if namespace + '.' + ext.name in cfg_monitors: self.type_monitor_loaded[namespace] = ext.name return True # Only log something if compute_monitors is not empty. if CONF.compute_monitors: LOG.warning("Excluding %(namespace)s monitor %(monitor_name)s. " "Not in the list of enabled monitors " "(CONF.compute_monitors).", {'namespace': namespace, 'monitor_name': ext.name}) return False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/compute/monitors/base.py0000664000175000017500000000556200000000000020347 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from nova.objects import fields class MonitorBase(metaclass=abc.ABCMeta): """Base class for all resource monitor plugins. A monitor is responsible for adding a set of related metrics to a `nova.objects.MonitorMetricList` object after the monitor has performed some sampling or monitoring action. """ def __init__(self, compute_manager): self.compute_manager = compute_manager self.source = None @abc.abstractmethod def get_metric_names(self): """Get available metric names. Get available metric names, which are represented by a set of keys that can be used to check conflicts and duplications :returns: set containing one or more values from :py:attr: nova.objects.fields.MonitorMetricType.ALL """ raise NotImplementedError('get_metric_names') @abc.abstractmethod def populate_metrics(self, metric_list): """Monitors are responsible for populating this metric_list object with nova.objects.MonitorMetric objects with values collected via the respective compute drivers. Note that if the monitor class is responsible for tracking a *related* set of metrics -- e.g. a set of percentages of CPU time allocated to user, kernel, and idle -- it is the responsibility of the monitor implementation to do a single sampling call to the underlying monitor to ensure that related metric values make logical sense. :param metric_list: A mutable reference of the metric list object """ raise NotImplementedError('populate_metrics') class CPUMonitorBase(MonitorBase): """Base class for all monitors that return CPU-related metrics.""" def get_metric_names(self): return set([ fields.MonitorMetricType.CPU_FREQUENCY, fields.MonitorMetricType.CPU_USER_TIME, fields.MonitorMetricType.CPU_KERNEL_TIME, fields.MonitorMetricType.CPU_IDLE_TIME, fields.MonitorMetricType.CPU_IOWAIT_TIME, fields.MonitorMetricType.CPU_USER_PERCENT, fields.MonitorMetricType.CPU_KERNEL_PERCENT, fields.MonitorMetricType.CPU_IDLE_PERCENT, fields.MonitorMetricType.CPU_IOWAIT_PERCENT, fields.MonitorMetricType.CPU_PERCENT, ]) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.3416083 nova-32.0.0/nova/compute/monitors/cpu/0000775000175000017500000000000000000000000017642 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/compute/monitors/cpu/__init__.py0000664000175000017500000000000000000000000021741 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/compute/monitors/cpu/virt_driver.py0000664000175000017500000001005100000000000022550 0ustar00zuulzuul00000000000000# Copyright 2013 Intel Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ CPU monitor based on virt driver to retrieve CPU information """ from oslo_log import log as logging from oslo_utils import timeutils from nova.compute.monitors import base import nova.conf from nova import exception from nova import objects CONF = nova.conf.CONF LOG = logging.getLogger(__name__) class Monitor(base.CPUMonitorBase): """CPU monitor that uses the virt driver's get_host_cpu_stats() call.""" def __init__(self, resource_tracker): super(Monitor, self).__init__(resource_tracker) self.source = CONF.compute_driver self.driver = resource_tracker.driver self._data = {} self._cpu_stats = {} def populate_metrics(self, metric_list): self._update_data() for name in self.get_metric_names(): metric_object = objects.MonitorMetric() metric_object.name = name metric_object.value = self._data[name] metric_object.timestamp = self._data["timestamp"] metric_object.source = self.source metric_list.objects.append(metric_object) def _update_data(self): self._data = {} self._data["timestamp"] = timeutils.utcnow() # Extract node's CPU statistics. try: stats = self.driver.get_host_cpu_stats() self._data["cpu.user.time"] = stats["user"] self._data["cpu.kernel.time"] = stats["kernel"] self._data["cpu.idle.time"] = stats["idle"] self._data["cpu.iowait.time"] = stats["iowait"] self._data["cpu.frequency"] = stats["frequency"] except (TypeError, KeyError): LOG.exception("Not all properties needed are implemented " "in the compute driver") raise exception.ResourceMonitorError( monitor=self.__class__.__name__) # The compute driver API returns the absolute values for CPU times. # We compute the utilization percentages for each specific CPU time # after calculating the delta between the current reading and the # previous reading. stats["total"] = (stats["user"] + stats["kernel"] + stats["idle"] + stats["iowait"]) cputime = float(stats["total"] - self._cpu_stats.get("total", 0)) # NOTE(jwcroppe): Convert all the `perc` values to their integer forms # since pre-conversion their values are within the range [0, 1] and the # objects.MonitorMetric.value field requires an integer. perc = (stats["user"] - self._cpu_stats.get("user", 0)) / cputime self._data["cpu.user.percent"] = int(perc * 100) perc = (stats["kernel"] - self._cpu_stats.get("kernel", 0)) / cputime self._data["cpu.kernel.percent"] = int(perc * 100) perc = (stats["idle"] - self._cpu_stats.get("idle", 0)) / cputime self._data["cpu.idle.percent"] = int(perc * 100) perc = (stats["iowait"] - self._cpu_stats.get("iowait", 0)) / cputime self._data["cpu.iowait.percent"] = int(perc * 100) # Compute the current system-wide CPU utilization as a percentage. used = stats["user"] + stats["kernel"] + stats["iowait"] prev_used = (self._cpu_stats.get("user", 0) + self._cpu_stats.get("kernel", 0) + self._cpu_stats.get("iowait", 0)) perc = (used - prev_used) / cputime self._data["cpu.percent"] = int(perc * 100) self._cpu_stats = stats.copy() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315633.0 nova-32.0.0/nova/compute/multi_cell_list.py0000664000175000017500000004711100000000000020743 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import copy import heapq import eventlet from oslo_log import log as logging import nova.conf from nova import context from nova import exception from nova.i18n import _ LOG = logging.getLogger(__name__) CONF = nova.conf.CONF class RecordSortContext(object): def __init__(self, sort_keys, sort_dirs): self.sort_keys = sort_keys self.sort_dirs = sort_dirs def compare_records(self, rec1, rec2): """Implements cmp(rec1, rec2) for the first key that is different. Adjusts for the requested sort direction by inverting the result as needed. """ for skey, sdir in zip(self.sort_keys, self.sort_dirs): resultflag = 1 if sdir == 'desc' else -1 if rec1[skey] < rec2[skey]: return resultflag elif rec1[skey] > rec2[skey]: return resultflag * -1 return 0 class RecordWrapper(object): """Wrap a DB object from the database so it is sortable. We use heapq.merge() below to do the merge sort of things from the cell databases. That routine assumes it can use regular python operators (> and <) on the contents. Since that won't work with instances from the database (and depends on the sort keys/dirs), we need this wrapper class to provide that. Implementing __lt__ is enough for heapq.merge() to do its work. """ def __init__(self, ctx, sort_ctx, db_record): self.cell_uuid = ctx.cell_uuid self._sort_ctx = sort_ctx self._db_record = db_record def __lt__(self, other): # NOTE(danms): This makes us always sort failure sentinels # higher than actual results. We do this so that they bubble # up in the get_records_sorted() feeder loop ahead of anything # else, and so that the implementation of RecordSortContext # never sees or has to handle the sentinels. If we did not # sort these to the top then we could potentially return # $limit results from good cells before we noticed the failed # cells, and would not properly report them as failed for # fix-up in the higher layers. if context.is_cell_failure_sentinel(self._db_record): return True elif context.is_cell_failure_sentinel(other._db_record): return False r = self._sort_ctx.compare_records(self._db_record, other._db_record) # cmp(x, y) returns -1 if x < y return r == -1 def query_wrapper(ctx, fn, *args, **kwargs): """This is a helper to run a query with predictable fail semantics. This is a generator which will mimic the scatter_gather_cells() behavior by honoring a timeout and catching exceptions, yielding the usual sentinel objects instead of raising. It wraps these in RecordWrapper objects, which will prioritize them to the merge sort, causing them to be handled by the main get_objects_sorted() feeder loop quickly and gracefully. """ with eventlet.timeout.Timeout(context.CELL_TIMEOUT, exception.CellTimeout): try: for record in fn(ctx, *args, **kwargs): yield record except exception.CellTimeout: # Here, we yield a RecordWrapper (no sort_ctx needed since # we won't call into the implementation's comparison routines) # wrapping the sentinel indicating timeout. yield RecordWrapper(ctx, None, context.did_not_respond_sentinel) return except Exception as e: # Here, we yield a RecordWrapper (no sort_ctx needed since # we won't call into the implementation's comparison routines) # wrapping the exception object indicating failure. yield RecordWrapper(ctx, None, e.__class__(e.args)) return class CrossCellLister(metaclass=abc.ABCMeta): """An implementation of a cross-cell efficient lister. This primarily provides a listing implementation for fetching records from across multiple cells, paginated and sorted appropriately. The external interface is the get_records_sorted() method. You should implement this if you need to efficiently list your data type from cell databases. """ def __init__(self, sort_ctx, cells=None, batch_size=None): self.sort_ctx = sort_ctx self.cells = cells self.batch_size = batch_size self._cells_responded = set() self._cells_failed = set() self._cells_timed_out = set() @property def cells_responded(self): """A list of uuids representing those cells that returned a successful result. """ return list(self._cells_responded) @property def cells_failed(self): """A list of uuids representing those cells that failed to return a successful result. """ return list(self._cells_failed) @property def cells_timed_out(self): """A list of uuids representing those cells that timed out while being contacted. """ return list(self._cells_timed_out) @property @abc.abstractmethod def marker_identifier(self): """Return the name of the property used as the marker identifier. For instances (and many other types) this is 'uuid', but could also be things like 'id' or anything else used as the marker identifier when fetching a page of results. """ pass @abc.abstractmethod def get_marker_record(self, ctx, marker_id): """Get the cell UUID and instance of the marker record by id. This needs to look up the marker record in whatever cell it is in and return it. It should be populated with values corresponding to what is in self.sort_ctx.sort_keys. :param ctx: A RequestContext :param marker_id: The identifier of the marker to find :returns: A tuple of cell_uuid where the marker was found and an instance of the marker from the database :raises: MarkerNotFound if the marker does not exist """ pass @abc.abstractmethod def get_marker_by_values(self, ctx, values): """Get the identifier of the marker record by value. When we need to paginate across cells, the marker record exists in only one of those cells. The rest of the cells must decide on a record to be their equivalent marker with which to return the next page of results. This must be done by value, based on the values of the sort_keys properties on the actual marker, as if the results were sorted appropriately and the actual marker existed in each cell. :param ctx: A RequestContext :param values: The values of the sort_keys properties of fhe actual marker instance :returns: The identifier of the equivalent marker in the local database """ pass @abc.abstractmethod def get_by_filters(self, ctx, filters, limit, marker, **kwargs): """List records by filters, sorted and paginated. This is the standard filtered/sorted list method for the data type we are trying to list out of the database. Additional kwargs are passed through. :param ctx: A RequestContext :param filters: A dict of column=filter items :param limit: A numeric limit on the number of results, or None :param marker: The marker identifier, or None :returns: A list of records """ pass def get_records_sorted(self, ctx, filters, limit, marker, **kwargs): """Get a cross-cell list of records matching filters. This iterates cells in parallel generating a unified and sorted list of records as efficiently as possible. It takes care to iterate the list as infrequently as possible. We wrap the results in RecordWrapper objects so that they are sortable by heapq.merge(), which requires that the '<' operator just works. Our sorting requirements are encapsulated into the RecordSortContext provided to the constructor for this object. This function is a generator of records from the database like what you would get from instance_get_all_by_filters_sort() in the DB API. NOTE: Since we do these in parallel, a nonzero limit will be passed to each database query, although the limit will be enforced in the output of this function. Meaning, we will still query $limit from each database, but only return $limit total results. :param cell_down_support: True if the API (and caller) support returning a minimal instance construct if the relevant cell is down. If its True, then the value of CONF.api.list_records_by_skipping_down_cells is ignored and if its False, results are either skipped or erred based on the value of CONF.api.list_records_by_skipping_down_cells. """ cell_down_support = kwargs.pop('cell_down_support', False) if marker: # A marker identifier was provided from the API. Call this # the 'global' marker as it determines where we start the # process across all cells. Look up the record in # whatever cell it is in and record the values for the # sort keys so we can find the marker instance in each # cell (called the 'local' marker). global_marker_cell, global_marker_record = self.get_marker_record( ctx, marker) global_marker_values = [global_marker_record[key] for key in self.sort_ctx.sort_keys] def do_query(cctx): """Generate RecordWrapper(record) objects from a cell. We do this inside the thread (created by scatter_gather_all_cells()) so that we return wrappers and avoid having to iterate the combined result list in the caller again. This is run against each cell by the scatter_gather routine. """ # The local marker is an identifier of a record in a cell # that is found by the special method # get_marker_by_values(). It should be the next record # in order according to the sort provided, but after the # marker instance which may have been in another cell. local_marker = None # Since the regular DB query routines take a marker and assume that # the marked record was the last entry of the previous page, we # may need to prefix it to our result query if we're not the cell # that had the actual marker record. local_marker_prefix = [] marker_id = self.marker_identifier if marker: if cctx.cell_uuid == global_marker_cell: local_marker = marker else: local_marker = self.get_marker_by_values( cctx, global_marker_values) if local_marker: if local_marker != marker: # We did find a marker in our cell, but it wasn't # the global marker. Thus, we will use it as our # marker in the main query below, but we also need # to prefix that result with this marker instance # since the result below will not return it and it # has not been returned to the user yet. Note that # we do _not_ prefix the marker instance if our # marker was the global one since that has already # been sent to the user. local_marker_filters = copy.copy(filters) if marker_id not in local_marker_filters: # If an $id filter was provided, it will # have included our marker already if this # instance is desired in the output # set. If it wasn't, we specifically query # for it. If the other filters would have # excluded it, then we'll get an empty set # here and not include it in the output as # expected. local_marker_filters[marker_id] = [local_marker] local_marker_prefix = self.get_by_filters( cctx, local_marker_filters, limit=1, marker=None, **kwargs) else: # There was a global marker but everything in our # cell is _before_ that marker, so we return # nothing. If we didn't have this clause, we'd # pass marker=None to the query below and return a # full unpaginated set for our cell. return if local_marker_prefix: # Per above, if we had a matching marker object, that is # the first result we should generate. yield RecordWrapper(cctx, self.sort_ctx, local_marker_prefix[0]) # If a batch size was provided, use that as the limit per # batch. If not, then ask for the entire $limit in a single # batch. batch_size = self.batch_size or limit # Keep track of how many we have returned in all batches return_count = 0 # If limit was unlimited then keep querying batches until # we run out of results. Otherwise, query until the total count # we have returned exceeds the limit. while limit is None or return_count < limit: batch_count = 0 # Do not query a full batch if it would cause our total # to exceed the limit if limit: query_size = min(batch_size, limit - return_count) else: query_size = batch_size # Get one batch query_result = self.get_by_filters( cctx, filters, limit=query_size or None, marker=local_marker, **kwargs) # Yield wrapped results from the batch, counting as we go # (to avoid traversing the list to count). Also, update our # local_marker each time so that local_marker is the end of # this batch in order to find the next batch. for item in query_result: local_marker = item[self.marker_identifier] yield RecordWrapper(cctx, self.sort_ctx, item) batch_count += 1 # No results means we are done for this cell if not batch_count: break return_count += batch_count LOG.debug(('Listed batch of %(batch)i results from cell ' 'out of %(limit)s limit. Returned %(total)i ' 'total so far.'), {'batch': batch_count, 'total': return_count, 'limit': limit or 'no'}) # NOTE(danms): The calls to do_query() will return immediately # with a generator. There is no point in us checking the # results for failure or timeout since we have not actually # run any code in do_query() until the first iteration # below. The query_wrapper() utility handles inline # translation of failures and timeouts to sentinels which will # be generated and consumed just like any normal result below. if self.cells: results = context.scatter_gather_cells(ctx, self.cells, context.CELL_TIMEOUT, query_wrapper, do_query) else: results = context.scatter_gather_all_cells(ctx, query_wrapper, do_query) # If a limit was provided, it was passed to the per-cell query # routines. That means we have NUM_CELLS * limit items across # results. So, we need to consume from that limit below and # stop returning results. Call that total_limit since we will # modify it in the loop below, but do_query() above also looks # at the original provided limit. total_limit = limit or 0 # Generate results from heapq so we can return the inner # instance instead of the wrapper. This is basically free # as it works as our caller iterates the results. feeder = heapq.merge(*results.values()) while True: try: item = next(feeder) except StopIteration: return if context.is_cell_failure_sentinel(item._db_record): if (not CONF.api.list_records_by_skipping_down_cells and not cell_down_support): # Value the config # ``CONF.api.list_records_by_skipping_down_cells`` only if # cell_down_support is False and generate the exception # if CONF.api.list_records_by_skipping_down_cells is False. # In all other cases the results from the down cell should # be skipped now to either construct minimal constructs # later if cell_down_support is True or to simply return # the skipped results if cell_down_support is False. raise exception.NovaException( _('Cell %s is not responding but configuration ' 'indicates that we should fail.') % item.cell_uuid) LOG.warning('Cell %s is not responding and hence is ' 'being omitted from the results', item.cell_uuid) if item._db_record == context.did_not_respond_sentinel: self._cells_timed_out.add(item.cell_uuid) elif isinstance(item._db_record, Exception): self._cells_failed.add(item.cell_uuid) # We might have received one batch but timed out or failed # on a later one, so be sure we fix the accounting. if item.cell_uuid in self._cells_responded: self._cells_responded.remove(item.cell_uuid) continue yield item._db_record self._cells_responded.add(item.cell_uuid) total_limit -= 1 if total_limit == 0: # We'll only hit this if limit was nonzero and we just # generated our last one return ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/compute/pci_placement_translator.py0000664000175000017500000007720100000000000022636 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import copy import typing as ty import os_resource_classes import os_traits from oslo_log import log as logging from oslo_utils import strutils from oslo_utils import uuidutils from nova.compute import provider_tree import nova.conf from nova import exception from nova.i18n import _ from nova.objects import fields from nova.objects import pci_device from nova.pci import devspec from nova.pci import manager as pci_manager CONF = nova.conf.CONF LOG = logging.getLogger(__name__) # Devs with this type are in one to one mapping with an RP in placement PARENT_TYPES = ( fields.PciDeviceType.STANDARD, fields.PciDeviceType.SRIOV_PF) # Devs with these type need to have a parent and that parent is the one # that mapped to a placement RP CHILD_TYPES = ( fields.PciDeviceType.SRIOV_VF, fields.PciDeviceType.VDPA) def _is_placement_tracking_enabled() -> bool: return CONF.pci.report_in_placement def _normalize_traits(traits: ty.List[str]) -> ty.List[str]: """Make the trait names acceptable for placement. It keeps the already valid standard or custom traits but normalizes trait names that are not already normalized. """ standard_traits, rest = os_traits.check_traits(traits) custom_traits = [] for name in rest: name = name.upper() if os_traits.is_custom(name): custom_traits.append(name) else: custom_traits.append(os_traits.normalize_name(name)) return list(standard_traits) + custom_traits def get_traits(traits_str: str) -> ty.Set[str]: """Return a normalized set of placement standard and custom traits from a string of comma separated trait names. """ # traits is a comma separated list of placement trait names if not traits_str: return set() return set(_normalize_traits(traits_str.split(','))) def _get_traits_for_dev( dev_spec_tags: ty.Dict[str, str], ) -> ty.Set[str]: return get_traits(dev_spec_tags.get("traits", "")) | { os_traits.COMPUTE_MANAGED_PCI_DEVICE } def _normalize_resource_class(rc: str) -> str: rc = rc.upper() if ( rc not in os_resource_classes.STANDARDS and not os_resource_classes.is_custom(rc) ): rc = os_resource_classes.normalize_name(rc) # mypy: normalize_name will return non None for non None input assert rc return rc def get_resource_class( requested_name: ty.Optional[str], vendor_id: str, product_id: str ) -> str: """Return the normalized resource class name based on what is requested or if nothing is requested then generated from the vendor_id and product_id """ if requested_name: rc = _normalize_resource_class(requested_name) else: rc = f"CUSTOM_PCI_{vendor_id}_{product_id}".upper() return rc def _get_rc_for_dev( dev: pci_device.PciDevice, dev_spec_tags: ty.Dict[str, str], ) -> str: """Return the resource class to represent the device. It is either provided by the user in the configuration as the resource_class tag, or we are generating one from vendor_id and product_id. The user specified resource class is normalized if it is not already an acceptable standard or custom resource class. """ rc = dev_spec_tags.get("resource_class") return get_resource_class(rc, dev.vendor_id, dev.product_id) class PciResourceProvider: """A PCI Resource Provider""" def __init__(self, name: str) -> None: self.name = name self.parent_dev = None self.children_devs: ty.List[pci_device.PciDevice] = [] self.resource_class: ty.Optional[str] = None self.traits: ty.Optional[ty.Set[str]] = None self.is_otu = False # This is an adjustment for the total inventory based on normal device # due to possibility of devices held in the tracker even though they # are removed from the configuration due to still having allocations. # This number will be calculated based on the existing allocations # during update_provider_tree call. self.adjustment = 0 @property def devs(self) -> ty.List[pci_device.PciDevice]: return [self.parent_dev] if self.parent_dev else self.children_devs @property def total(self): return len(self.devs) + self.adjustment @property def to_be_deleted(self): return self.total == 0 def add_child(self, dev, dev_spec_tags: ty.Dict[str, str]) -> None: if self.parent_dev: raise exception.PlacementPciDependentDeviceException( parent_dev=dev.address, children_devs=",".join(dev.address for dev in self.devs) ) rc = _get_rc_for_dev(dev, dev_spec_tags) if self.resource_class and rc != self.resource_class: raise exception.PlacementPciMixedResourceClassException( new_rc=rc, new_dev=dev.address, current_rc=self.resource_class, current_devs=",".join( dev.address for dev in self.children_devs) ) traits = _get_traits_for_dev(dev_spec_tags) if self.traits is not None and self.traits != traits: raise exception.PlacementPciMixedTraitsException( new_traits=",".join(sorted(traits)), new_dev=dev.address, current_traits=",".join(sorted(self.traits)), current_devs=",".join( dev.address for dev in self.children_devs), ) if 'one_time_use' in dev_spec_tags: # Child devices cannot be OTU. Do not even tolerate setting =false raise exception.PlacementPciException( error=('Only type-PCI and type-PF devices may set ' 'one_time_use and %s does not qualify') % self.name) self.children_devs.append(dev) self.resource_class = rc self.traits = traits def add_parent(self, dev, dev_spec_tags: ty.Dict[str, str]) -> None: if self.parent_dev or self.children_devs: raise exception.PlacementPciDependentDeviceException( parent_dev=dev.address, children_devs=",".join(dev.address for dev in self.devs) ) self.parent_dev = dev self.resource_class = _get_rc_for_dev(dev, dev_spec_tags) self.is_otu = strutils.bool_from_string( dev_spec_tags.get("one_time_use", "false")) traits = _get_traits_for_dev(dev_spec_tags) if self.is_otu: # We always decorate OTU providers with a trait so they can be # easily found traits.add(os_traits.HW_PCI_ONE_TIME_USE) self.traits = traits def remove_child(self, dev: pci_device.PciDevice) -> None: # Nothing to do here. The update_provider_tree will handle the # inventory decrease or the full RP removal pass def remove_parent(self, dev: pci_device.PciDevice) -> None: # Nothing to do here. The update_provider_tree we handle full RP pass def _get_allocations(self) -> ty.Mapping[str, int]: """Return a dict of used resources keyed by consumer UUID. Note that: 1) a single consumer can consume more than one resource from a single RP. I.e. A VM with two VFs from the same parent PF 2) multiple consumers can consume resources from a single RP. I.e. two VMs consuming one VF from the same PF each 3) regardless of how many consumers we have on a single PCI RP, they are always consuming resources from the same resource class as we are not supporting dependent devices modelled by the same RP but different resource classes. """ return collections.Counter( [ dev.instance_uuid for dev in self.devs if "instance_uuid" in dev and dev.instance_uuid ] ) def _get_inventories(self): # NOTE(gibi): The rest of the inventory fields (allocation_ratio, # etc.) are defaulted by placement and the default value makes # sense for PCI devices, i.e. no overallocation and PCI can be # allocated one by one. We may set the reserved value to a nonzero # amount on the provider if the operator requests it via the # one_time_use=true flag, but otherwise the operator controls # reserved and nova will not override that value periodically. inventory = { "total": self.total, "max_unit": self.total, } self._handle_one_time_use(inventory) return {self.resource_class: inventory} def _handle_one_time_use(self, inventory: dict): """Modifies the inventory to reserve the OTU device if allocated""" def is_allocated(dev: pci_device.PciDevice) -> bool: return 'instance_uuid' in dev and dev.instance_uuid if self.parent_dev and self.is_otu and is_allocated(self.parent_dev): # If we are an allocated parent device, and our one-time-use flag # is set, we need to also set our inventory to reserved. # NOTE(danms): VERY IMPORTANT: we never *ever* want to update # reserved to anything other than self.total, and definitely # not if we are not allocated. These devices are intended to go # from unallocated to allocated AND reserved. They may be # unreserved by an external entity, but never nova. inventory['reserved'] = self.total def _adjust_for_removals_and_held_devices( self, provider_tree: provider_tree.ProviderTree, rp_rc_usage: ty.Dict[str, ty.Dict[str, int]], ) -> None: rp_uuid = provider_tree.data(self.name).uuid rc_usage = rp_rc_usage[rp_uuid] if not self.resource_class: # The resource_class is undefined when there are no normal devices # exists any more on this RP. If no normal devs exists then there # is no device_spec to derive the RC and traits from. But if we # still have allocations in placement against this RP that means # there are devices removed from the configuration but kept in the # tracker as they are still allocated. In this case we # need to recover the resource class and traits from the # existing allocation. if len(rc_usage) == 0: # no usage so nothing to adjust here return else: # The len > 1 case should not happen for PCI RPs as we either # track the parent PF or the child VFs there on the RP but # never both. self.resource_class = list(rc_usage.keys())[0] self.traits = provider_tree.data(rp_uuid).traits # If device being removed but still held due to still having # allocations then we need to adjust the total inventory to never go # below the current usage otherwise Placement will reject the update. usage = rc_usage[self.resource_class] inventory = self.total if usage > inventory: LOG.warning( "Needed to adjust inventories of %s on " "resource provider %s from %d to %d due to existing " "placement allocations. This should only happen while " "VMs using already removed devices.", self.resource_class, self.name, inventory, usage) # This is counted into self.total to adjust the inventory self.adjustment += usage - inventory def update_provider_tree( self, provider_tree: provider_tree.ProviderTree, parent_rp_name: str, rp_rc_usage: ty.Dict[str, ty.Dict[str, int]], ) -> None: if not provider_tree.exists(self.name): # NOTE(gibi): We need to generate UUID for the new provider in Nova # instead of letting Placement assign one. We are potentially # healing a missing RP along with missing allocations on that RP. # The allocation healing happens with POST /reshape, and that API # only takes RP UUIDs. provider_tree.new_child( self.name, parent_rp_name, uuid=uuidutils.generate_uuid(dashed=True) ) self._adjust_for_removals_and_held_devices(provider_tree, rp_rc_usage) # if after the adjustment no inventory left then we need to delete # the RP explicitly if self.total == 0: provider_tree.remove(self.name) return provider_tree.update_inventory( self.name, self._get_inventories(), ) provider_tree.update_traits(self.name, self.traits) # Here we are sure the RP exists in the provider_tree. So, we can # record the RP UUID in each PciDevice this RP represents rp_uuid = provider_tree.data(self.name).uuid for dev in self.devs: dev.extra_info['rp_uuid'] = rp_uuid def update_allocations( self, allocations: dict, provider_tree: provider_tree.ProviderTree, same_host_instances: ty.List[str], ) -> bool: updated = False if self.to_be_deleted: # the RP is going away because either removed from the hypervisor # or the compute's config is changed to ignore the device. return updated # we assume here that if this RP has been created in the current round # of healing then it already has a UUID assigned. rp_uuid = provider_tree.data(self.name).uuid for consumer, amount in self._get_allocations().items(): if consumer not in allocations: # We have PCI device(s) allocated to an instance, but we don't # see any instance allocation in placement. This # happens for two reasons: # 1) The instance is being migrated and therefore the # allocation is held by the migration UUID in placement. In # this case the PciDevice is still allocated to the instance # UUID in the nova DB hence our lookup for the instance # allocation here. We can ignore this case as: i) We healed # the PCI allocation for the instance before the migration # was started. ii) Nova simply moves the allocation from the # instance UUID to the migration UUID in placement. So we # assume the migration allocation is correct without # healing. One limitation of this is that if there is in # progress migration when nova is upgraded, then the PCI # allocation of that migration will be missing from # placement on the source host. But it is temporary and the # allocation will be fixed as soon as the migration is # completed or reverted. # 2) We have a bug in the scheduler or placement and the whole # instance allocation is lost. We cannot handle that here. # It is expected to be healed via nova-manage placement # heal_allocation CLI instead. continue if consumer in same_host_instances: # This is a nasty special case. This instance is undergoing # a same host resize. So in Placement the source host # allocation is held by the migration UUID *but* the # PciDevice.instance_uuid is set for the instance UUID both # on the source and on the destination host. As the source and # dest are the same for migration we will see PciDevice # objects assigned to this instance that should not be # allocated to the instance UUID in placement. # As noted above we don't want to take care in progress # migration during healing. So we simply ignore this instance. # If the instance needs healing then it will be healed when # after the migration is confirmed or reverted. continue current_allocs = allocations[consumer]['allocations'] current_rp_allocs = current_allocs.get(rp_uuid) if current_rp_allocs: # update an existing allocation if the current one differs current_rc_allocs = current_rp_allocs["resources"].get( self.resource_class, 0) if current_rc_allocs != amount: current_rp_allocs[ "resources"][self.resource_class] = amount updated = True else: # insert a new allocation as it is missing current_allocs[rp_uuid] = { "resources": {self.resource_class: amount} } updated = True return updated def __str__(self) -> str: if not self.to_be_deleted: return ( f"RP({self.name}, {self.resource_class}={self.total}, " f"traits={','.join(sorted(self.traits or set()))})" ) else: return f"RP({self.name}, )" class PlacementView: """The PCI Placement view""" def __init__( self, hypervisor_hostname: str, instances_under_same_host_resize: ty.List[str], ) -> None: self.rps: ty.Dict[str, PciResourceProvider] = {} self.root_rp_name = hypervisor_hostname self.same_host_instances = instances_under_same_host_resize def _get_rp_name_for_address(self, addr: str) -> str: return f"{self.root_rp_name}_{addr.upper()}" def _ensure_rp(self, rp_name: str) -> PciResourceProvider: return self.rps.setdefault(rp_name, PciResourceProvider(rp_name)) def _get_rp_name_for_child(self, dev: pci_device.PciDevice) -> str: if not dev.parent_addr: msg = _( "Missing parent address for PCI device s(dev)% with " "type s(type)s" ) % { "dev": dev.address, "type": dev.dev_type, } raise exception.PlacementPciException(error=msg) return self._get_rp_name_for_address(dev.parent_addr) def _add_dev( self, dev: pci_device.PciDevice, dev_spec_tags: ty.Dict[str, str] ) -> None: if dev_spec_tags.get("physical_network"): # NOTE(gibi): We ignore devices that has physnet configured as # those are there for Neutron based SRIOV and that is out of scope # for now. Later these devices will be tracked as PCI_NETDEV # devices in placement. return rp = self._ensure_rp_for_dev(dev) if dev.dev_type in PARENT_TYPES: rp.add_parent(dev, dev_spec_tags) elif dev.dev_type in CHILD_TYPES: rp.add_child(dev, dev_spec_tags) else: msg = _( "Unhandled PCI device type %(type)s for %(dev)s. Please " "report a bug." ) % { "type": dev.dev_type, "dev": dev.address, } raise exception.PlacementPciException(error=msg) def _remove_dev(self, dev: pci_device.PciDevice) -> None: """Remove PCI devices from Placement that existed before but now deleted from the hypervisor or unlisted from [pci]device_spec """ rp = self._ensure_rp_for_dev(dev) if dev.dev_type in PARENT_TYPES: rp.remove_parent(dev) elif dev.dev_type in CHILD_TYPES: rp.remove_child(dev) def _ensure_rp_for_dev( self, dev: pci_device.PciDevice ) -> PciResourceProvider: """Ensures that the RP exists for the device and returns it but does not do any inventory accounting for the given device on the RP. """ if dev.dev_type in PARENT_TYPES: rp_name = self._get_rp_name_for_address(dev.address) return self._ensure_rp(rp_name) elif dev.dev_type in CHILD_TYPES: rp_name = self._get_rp_name_for_child(dev) return self._ensure_rp(rp_name) else: raise ValueError( f"Unhandled PCI device type {dev.dev_type} " f"for dev {dev.address}.") def process_dev( self, dev: pci_device.PciDevice, dev_spec: ty.Optional[devspec.PciDeviceSpec], ) -> None: # NOTE(gibi): We never observer dev.status DELETED as when that is set # the device is also removed from the PCI tracker. So we can ignore # that state. if dev.status == fields.PciDeviceStatus.REMOVED: # NOTE(gibi): We need to handle the situation when an instance # uses a device where a dev_spec is removed. Here we need to keep # the device in the Placement view similarly how the PCI tracker # does it. # However, we also need to handle the situation when such VM is # being deleted. In that case we are called after the dev is freed # and marked as removed by the tracker so dev.instance_uuid is # None and dev.status is REMOVED. At this point the Placement # allocation for this dev is still not deleted so we still have to # keep the device in our view. The device will be deleted when the # PCI tracker is saved which happens after us. # However, we cannot overly eagerly keep devices here as a # device in REMOVED state might be a device that had no allocation # in Placement so it can be removed already without waiting for # the next periodic update when the device disappears from the # PCI tracker's list. If we are over eagerly keeping such device # when it is not allocated then that will prevent a single step # reconfiguration from whitelisting a VF to whitelisting its # parent PF, because the VF will be kept at restart and conflict # with the PF being added. # We choose to remove these devs so the happy path of removing # not allocated devs is simple. And then we do an extra # step later in update_provider_tree to reconcile Placement # allocations with our view and add back some inventories to handle # removed but allocated devs. self._remove_dev(dev) else: if not dev_spec: if dev.instance_uuid: LOG.warning( "Device spec is not found for device %s in " "[pci]device_spec. The device is allocated by " "%s. We are keeping this device in the Placement " "view. You should not remove an allocated device from " "the configuration. Please restore the configuration. " "If you cannot restore the configuration as the " "device is dead then delete or cold migrate the " "instance and then restart the nova-compute service " "to resolve the inconsistency.", dev.address, dev.instance_uuid ) # We need to keep the RP, but we cannot just use _add_dev # to generate the inventory on the RP as that would require # to know the dev_spec to e.g. have the RC. So we only # ensure that the RP exists, the inventory will be adjusted # based on the existing allocation in a later step. self._ensure_rp_for_dev(dev) else: LOG.warning( "Device spec is not found for device %s in " "[pci]device_spec. Ignoring device in Placement " "resource view. This should not happen. Please file a " "bug.", dev.address ) return self._add_dev(dev, dev_spec.get_tags()) def __str__(self) -> str: return ( f"Placement PCI view on {self.root_rp_name}: " f"{', '.join(str(rp) for rp in self.rps.values())}" ) @staticmethod def get_usage_per_rc_and_rp( allocations ) -> ty.Dict[str, ty.Dict[str, int]]: """Returns a dict keyed by RP uuid and the value is a dict of resource class: usage pairs telling how much total usage the given RP has from the given resource class across all the allocations. """ rp_rc_usage: ty.Dict[str, ty.Dict[str, int]] = ( collections.defaultdict(lambda: collections.defaultdict(int))) for consumer in allocations.values(): for rp_uuid, alloc in consumer["allocations"].items(): for rc, amount in alloc["resources"].items(): rp_rc_usage[rp_uuid][rc] += amount return rp_rc_usage def _remove_managed_rps_from_tree_not_in_view( self, provider_tree: provider_tree.ProviderTree ) -> None: """Removes PCI RPs from the provider_tree that are not present in the current PlacementView. """ rp_names_in_view = {rp.name for rp in self.rps.values()} uuids_in_tree = provider_tree.get_provider_uuids_in_tree( self.root_rp_name) for rp_uuid in uuids_in_tree: rp_data = provider_tree.data(rp_uuid) is_pci_rp = provider_tree.has_traits( rp_uuid, [os_traits.COMPUTE_MANAGED_PCI_DEVICE]) if is_pci_rp and rp_data.name not in rp_names_in_view: provider_tree.remove(rp_uuid) def update_provider_tree( self, provider_tree: provider_tree.ProviderTree, allocations: dict, ) -> None: self._remove_managed_rps_from_tree_not_in_view(provider_tree) rp_rc_usage = self.get_usage_per_rc_and_rp(allocations) for rp_name, rp in self.rps.items(): rp.update_provider_tree( provider_tree, self.root_rp_name, rp_rc_usage) def update_allocations( self, allocations: dict, provider_tree: provider_tree.ProviderTree ) -> bool: """Updates the passed in allocations dict inplace with any PCI allocations that is inferred from the PciDevice objects already added to the view. It returns True if the allocations dict has been changed, False otherwise. """ updated = False for rp in self.rps.values(): updated |= rp.update_allocations( allocations, provider_tree, self.same_host_instances, ) return updated def ensure_no_dev_spec_with_devname(dev_specs: ty.List[devspec.PciDeviceSpec]): for dev_spec in dev_specs: if dev_spec.dev_spec_conf.get("devname"): msg = _( "Invalid [pci]device_spec configuration. PCI Placement " "reporting does not support 'devname' based device " "specification but we got %(dev_spec)s. " "Please use PCI address in the configuration instead." ) % {"dev_spec": dev_spec.dev_spec_conf} raise exception.PlacementPciException(error=msg) def ensure_tracking_was_not_enabled_before( provider_tree: provider_tree.ProviderTree ) -> None: # If placement tracking was enabled before then we do not support # disabling it later. To check for that we can look for RPs with # the COMPUTE_MANAGED_PCI_DEVICE trait. If any then we raise to # kill the service for rp_uuid in provider_tree.get_provider_uuids(): if ( os_traits.COMPUTE_MANAGED_PCI_DEVICE in provider_tree.data(rp_uuid).traits ): msg = _( "The [pci]report_in_placement is False but it was enabled " "before on this compute. Nova does not support disabling " "it after it is enabled." ) raise exception.PlacementPciException(error=msg) def update_provider_tree_for_pci( provider_tree: provider_tree.ProviderTree, nodename: str, pci_tracker: pci_manager.PciDevTracker, allocations: dict, instances_under_same_host_resize: ty.List[str], ) -> bool: """Based on the PciDevice objects in the pci_tracker it calculates what inventories and allocations needs to exist in placement and create the missing peaces. It returns True if not just the provider_tree but also allocations needed to be changed. :param allocations: Dict of allocation data of the form: { $CONSUMER_UUID: { # The shape of each "allocations" dict below is identical # to the return from GET /allocations/{consumer_uuid} "allocations": { $RP_UUID: { "generation": $RP_GEN, "resources": { $RESOURCE_CLASS: $AMOUNT, ... }, }, ... }, "project_id": $PROJ_ID, "user_id": $USER_ID, "consumer_generation": $CONSUMER_GEN, }, ... } :param instances_under_same_host_resize: A list of instance UUIDs that are undergoing same host resize on this host. """ if not _is_placement_tracking_enabled(): ensure_tracking_was_not_enabled_before(provider_tree) # If tracking is not enabled we just return without touching anything return False ensure_no_dev_spec_with_devname(pci_tracker.dev_filter.specs) LOG.debug( 'Collecting PCI inventories and allocations to track them in Placement' ) pv = PlacementView(nodename, instances_under_same_host_resize) for dev in pci_tracker.pci_devs: # match the PCI device with the [pci]dev_spec config to access # the configuration metadata tags dev_spec = pci_tracker.dev_filter.get_devspec(dev) pv.process_dev(dev, dev_spec) pv.update_provider_tree(provider_tree, allocations) LOG.info("Placement PCI resource view: %s", pv) old_alloc = copy.deepcopy(allocations) # update_provider_tree correlated the PciDevice objects with RPs in # placement and recorded the RP UUID in the PciDevice object. We need to # trigger an update on the device pools in the tracker to get the device # RP UUID mapped to the device pools pci_tracker.stats.populate_pools_metadata_from_assigned_devices() updated = pv.update_allocations(allocations, provider_tree) if updated: LOG.debug( "Placement PCI view needs allocation healing. This should only " "happen if [filter_scheduler]pci_in_placement is still disabled. " "Original allocations: %s New allocations: %s", old_alloc, allocations, ) return updated ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/compute/power_state.py0000664000175000017500000000432600000000000020114 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # Copyright (c) 2010 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Power state is the state we get by calling virt driver on a particular domain. The hypervisor is always considered the authority on the status of a particular VM, and the power_state in the DB should be viewed as a snapshot of the VMs's state in the (recent) past. It can be periodically updated, and should also be updated at the end of a task if the task is supposed to affect power_state. """ from nova.objects import fields # NOTE(maoy): These are *not* virDomainState values from libvirt. # The hex value happens to match virDomainState for backward-compatibility # reasons. NOSTATE = fields.InstancePowerState.index(fields.InstancePowerState.NOSTATE) RUNNING = fields.InstancePowerState.index(fields.InstancePowerState.RUNNING) PAUSED = fields.InstancePowerState.index(fields.InstancePowerState.PAUSED) # the VM is powered off SHUTDOWN = fields.InstancePowerState.index(fields.InstancePowerState.SHUTDOWN) CRASHED = fields.InstancePowerState.index(fields.InstancePowerState.CRASHED) SUSPENDED = fields.InstancePowerState.index( fields.InstancePowerState.SUSPENDED) # TODO(justinsb): Power state really needs to be a proper class, # so that we're not locked into the libvirt status codes and can put mapping # logic here rather than spread throughout the code STATE_MAP = {fields.InstancePowerState.index(state): state for state in fields.InstancePowerState.ALL if state != fields.InstancePowerState._UNUSED} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/compute/provider_config.py0000664000175000017500000004371300000000000020742 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import glob import jsonschema import logging import microversion_parse import os import yaml import os_resource_classes import os_traits from nova import exception as nova_exc from nova.i18n import _ LOG = logging.getLogger(__name__) # A dictionary with keys for all supported major versions with lists of # corresponding minor versions as values. SUPPORTED_SCHEMA_VERSIONS = { 1: {0} } # Supported provider config file schema SCHEMA_V1 = { # This definition uses JSON Schema Draft 7. # https://json-schema.org/draft-07/json-schema-release-notes.html 'type': 'object', 'properties': { # This property is used to track where the provider.yaml file # originated. It is reserved for internal use and should never be # set in a provider.yaml file supplied by an end user. '__source_file': {'not': {}}, 'meta': { 'type': 'object', 'properties': { # Version ($Major, $minor) of the schema must successfully # parse documents conforming to ($Major, 0..N). # Any breaking schema change (e.g. removing fields, adding # new required fields, imposing a stricter pattern on a value, # etc.) must bump $Major. 'schema_version': { 'type': 'string', 'pattern': '^1.([0-9]|[1-9][0-9]+)$' } }, 'required': ['schema_version'], 'additionalProperties': True }, 'providers': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'identification': { '$ref': '#/$defs/providerIdentification' }, 'inventories': { '$ref': '#/$defs/providerInventories' }, 'traits': { '$ref': '#/$defs/providerTraits' } }, 'required': ['identification'], 'additionalProperties': True } } }, 'required': ['meta'], 'additionalProperties': True, '$defs': { 'providerIdentification': { # Identify a single provider to configure. # Exactly one identification method should be used. Currently # `uuid` or `name` are supported, but future versions may # support others. The uuid can be set to the sentinel value # `$COMPUTE_NODE` which will cause the consuming compute service to # apply the configuration to all compute node root providers # it manages that are not otherwise specified using a uuid or name. 'type': 'object', 'properties': { 'uuid': { 'oneOf': [ { # TODO(sean-k-mooney): replace this with type uuid # when we can depend on a version of the jsonschema # lib that implements draft 8 or later of the # jsonschema spec. 'type': 'string', 'pattern': '^[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-' '[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-' '[0-9A-Fa-f]{12}$' }, { 'type': 'string', 'const': '$COMPUTE_NODE' } ] }, 'name': { 'type': 'string', 'minLength': 1, 'maxLength': 200 } }, # This introduces the possibility of an unsupported key name being # used to get by schema validation, but is necessary to support # forward compatibility with new identification methods. # This should be checked after schema validation. 'minProperties': 1, 'maxProperties': 1, 'additionalProperties': False }, 'providerInventories': { # Allows the admin to specify various adjectives to create and # manage providers' inventories. This list of adjectives can be # extended in the future as the schema evolves to meet new use # cases. As of v1.0, only one adjective, `additional`, is # supported. 'type': 'object', 'properties': { 'additional': { 'type': 'array', 'items': { 'patternProperties': { # Allows any key name matching the resource class # pattern, check to prevent conflicts with virt # driver owned resources classes will be done after # schema validation. '^[A-Z0-9_]{1,255}$': { 'type': 'object', 'properties': { # Any optional properties not populated # will be given a default value by # placement. If overriding a pre-existing # provider values will not be preserved # from the existing inventory. 'total': { 'type': 'integer' }, 'reserved': { 'type': 'integer' }, 'min_unit': { 'type': 'integer' }, 'max_unit': { 'type': 'integer' }, 'step_size': { 'type': 'integer' }, 'allocation_ratio': { 'type': 'number' } }, 'required': ['total'], # The defined properties reflect the current # placement data model. While defining those # in the schema and not allowing additional # properties means we will need to bump the # schema version if they change, that is likely # to be part of a large change that may have # other impacts anyway. The benefit of stricter # validation of property names outweighs the # (small) chance of having to bump the schema # version as described above. 'additionalProperties': False } }, # This ensures only keys matching the pattern # above are allowed. 'additionalProperties': False } } }, 'additionalProperties': True }, 'providerTraits': { # Allows the admin to specify various adjectives to create and # manage providers' traits. This list of adjectives can be extended # in the future as the schema evolves to meet new use cases. # As of v1.0, only one adjective, `additional`, is supported. 'type': 'object', 'properties': { 'additional': { 'type': 'array', 'items': { # Allows any value matching the trait pattern here, # additional validation will be done after schema # validation. 'type': 'string', 'pattern': '^[A-Z0-9_]{1,255}$' } } }, 'additionalProperties': True } } } def _load_yaml_file(path): """Loads and parses a provider.yaml config file into a dict. :param path: Path to the yaml file to load. :return: Dict representing the yaml file requested. :raise: ProviderConfigException if the path provided cannot be read or the file is not valid yaml. """ try: with open(path) as open_file: try: return yaml.safe_load(open_file) except yaml.YAMLError as ex: message = _("Unable to load yaml file: %s ") % ex if hasattr(ex, 'problem_mark'): pos = ex.problem_mark message += _("File: %s ") % open_file.name message += _("Error position: " "({line}:{column})").format( line=pos.line + 1, column=pos.column + 1) raise nova_exc.ProviderConfigException(error=message) except OSError: message = _("Unable to read yaml config file: %s") % path raise nova_exc.ProviderConfigException(error=message) def _validate_provider_config(config, provider_config_path): """Accepts a schema-verified provider config in the form of a dict and performs additional checks for format and required keys. :param config: Dict containing a provider config file :param provider_config_path: Path to the provider config, used for logging :return: List of valid providers :raise nova.exception.ProviderConfigException: If provider id is missing, or a resource class or trait name is invalid. """ def _validate_traits(provider): # Check that traits are custom additional_traits = set(provider.get("traits", {}).get( "additional", [])) trait_conflicts = [trait for trait in additional_traits if not os_traits.is_custom(trait)] if trait_conflicts: # sort for more predictable message for testing message = _( "Invalid traits, only custom traits are allowed: %s" ) % sorted(trait_conflicts) raise nova_exc.ProviderConfigException(error=message) return additional_traits def _validate_rc(provider): # Check that resource classes are custom additional_inventories = provider.get("inventories", {}).get( "additional", []) all_inventory_conflicts = [] for inventory in additional_inventories: inventory_conflicts = [rc for rc in inventory if not os_resource_classes.is_custom(rc)] all_inventory_conflicts += inventory_conflicts if all_inventory_conflicts: # sort for more predictable message for testing message = _( "Invalid resource class, only custom resource classes " "are allowed: %s") % ', '.join(sorted(all_inventory_conflicts)) raise nova_exc.ProviderConfigException(error=message) return additional_inventories # store valid providers valid_providers = [] for provider in config.get("providers", []): # Check that the identification method is known since # the schema only requires that some property be present pid = provider["identification"] provider_id = pid.get("name") or pid.get("uuid") # Not checking the validity of provider_id since # the schema has already ensured that. additional_traits = _validate_traits(provider) additional_inventories = _validate_rc(provider) # filter out no-op providers so they will not be returned if not additional_traits and not additional_inventories: message = ( "Provider %(provider_id)s defined in %(provider_config_path)s " "has no additional inventories or traits and will be ignored." ) % { "provider_id": provider_id, "provider_config_path": provider_config_path } LOG.warning(message) else: valid_providers.append(provider) return valid_providers def _parse_provider_yaml(path): """Loads schema, parses a provider.yaml file and validates the content. :param path: File system path to the file to parse. :return: dict representing the contents of the file. :raise ProviderConfigException: If the specified file does not validate against the schema, the schema version is not supported, or if unable to read configuration or schema files. """ yaml_file = _load_yaml_file(path) try: schema_version = microversion_parse.parse_version_string( yaml_file['meta']['schema_version']) except (KeyError, TypeError): message = _("Unable to detect schema version: %s") % yaml_file raise nova_exc.ProviderConfigException(error=message) if schema_version.major not in SUPPORTED_SCHEMA_VERSIONS: message = _( "Unsupported schema major version: %d") % schema_version.major raise nova_exc.ProviderConfigException(error=message) if schema_version.minor not in \ SUPPORTED_SCHEMA_VERSIONS[schema_version.major]: # TODO(sean-k-mooney): We should try to provide a better # message that identifies which fields may be ignored # and the max minor version supported by this version of nova. message = ( "Provider config file [%(path)s] is at schema version " "%(schema_version)s. Nova supports the major version, " "but not the minor. Some fields may be ignored." % {"path": path, "schema_version": schema_version}) LOG.warning(message) try: jsonschema.validate(yaml_file, SCHEMA_V1) except jsonschema.exceptions.ValidationError as e: message = _( "The provider config file %(path)s did not pass validation " "for schema version %(schema_version)s: %(reason)s") % { "path": path, "schema_version": schema_version, "reason": e} raise nova_exc.ProviderConfigException(error=message) return yaml_file def get_provider_configs(provider_config_dir): """Gathers files in the provided path and calls the parser for each file and merges them into a list while checking for a number of possible conflicts. :param provider_config_dir: Path to a directory containing provider config files to be loaded. :raise nova.exception.ProviderConfigException: If unable to read provider config directory or if one of a number of validation checks fail: - Unknown, unsupported, or missing schema major version. - Unknown, unsupported, or missing resource provider identification. - A specific resource provider is identified twice with the same method. If the same provider identified by *different* methods, such conflict will be detected in a later stage. - A resource class or trait name is invalid or not custom. - A general schema validation error occurs (required fields, types, etc). :return: A dict of dicts keyed by uuid_or_name with the parsed and validated contents of all files in the provided dir. Each value in the dict will include the source file name the value of the __source_file key. """ provider_configs = {} provider_config_paths = glob.glob( os.path.join(provider_config_dir, "*.yaml")) provider_config_paths.sort() if not provider_config_paths: message = ( "No provider configs found in %s. If files are present, " "ensure the Nova process has access." ) LOG.info(message, provider_config_dir) # return an empty dict as no provider configs found return provider_configs for provider_config_path in provider_config_paths: provider_config = _parse_provider_yaml(provider_config_path) for provider in _validate_provider_config( provider_config, provider_config_path, ): provider['__source_file'] = os.path.basename(provider_config_path) pid = provider["identification"] uuid_or_name = pid.get("uuid") or pid.get("name") # raise exception if this provider was already processed if uuid_or_name in provider_configs: raise nova_exc.ProviderConfigException( error=_( "Provider %(provider_id)s has multiple definitions " "in source file(s): %(source_files)s." ) % { "provider_id": uuid_or_name, # sorted set for deduplication and consistent order "source_files": sorted({ provider_configs[uuid_or_name]["__source_file"], provider_config_path }) } ) provider_configs[uuid_or_name] = provider return provider_configs ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/compute/provider_tree.py0000664000175000017500000007536100000000000020440 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """An object describing a tree of resource providers and their inventories. This object is not stored in the Nova API or cell databases; rather, this object is constructed and used by the scheduler report client to track state changes for resources on the hypervisor or baremetal node. As such, there are no remoteable methods nor is there any interaction with the nova.db modules. """ import collections import copy import os_traits from oslo_concurrency import lockutils from oslo_log import log as logging from oslo_utils import uuidutils from nova.i18n import _ LOG = logging.getLogger(__name__) _LOCK_NAME = 'provider-tree-lock' # Point-in-time representation of a resource provider in the tree. # Note that, whereas namedtuple enforces read-only-ness of instances as a # whole, nothing prevents modification of the internals of attributes of # complex types (children/inventory/traits/aggregates). However, any such # modifications still have no effect on the ProviderTree the instance came # from. Like, you can Sharpie a moustache on a Polaroid of my face, but that # doesn't make a moustache appear on my actual face. ProviderData = collections.namedtuple( 'ProviderData', ['uuid', 'name', 'generation', 'parent_uuid', 'inventory', 'traits', 'aggregates', 'resources']) class _Provider(object): """Represents a resource provider in the tree. All operations against the tree should be done using the ProviderTree interface, since it controls thread-safety. """ def __init__(self, name, uuid=None, generation=None, parent_uuid=None): if uuid is None: uuid = uuidutils.generate_uuid() self.uuid = uuid self.name = name self.generation = generation self.parent_uuid = parent_uuid # Contains a dict, keyed by uuid of child resource providers having # this provider as a parent self.children = {} # dict of inventory records, keyed by resource class self.inventory = {} # Set of trait names self.traits = set() # Set of aggregate UUIDs self.aggregates = set() # dict of resource records, keyed by resource class # the value is the set of objects.Resource self.resources = {} @classmethod def from_dict(cls, pdict): """Factory method producing a _Provider based on a dict with appropriate keys. :param pdict: Dictionary representing a provider, with keys 'name', 'uuid', 'generation', 'parent_provider_uuid'. Of these, only 'name' is mandatory. """ return cls(pdict['name'], uuid=pdict.get('uuid'), generation=pdict.get('generation'), parent_uuid=pdict.get('parent_provider_uuid')) def data(self): inventory = copy.deepcopy(self.inventory) traits = copy.copy(self.traits) aggregates = copy.copy(self.aggregates) resources = copy.deepcopy(self.resources) return ProviderData( self.uuid, self.name, self.generation, self.parent_uuid, inventory, traits, aggregates, resources) def get_provider_uuids(self): """Returns a list, in top-down traversal order, of UUIDs of this provider and all its descendants. """ ret = [self.uuid] for child in self.children.values(): ret.extend(child.get_provider_uuids()) return ret def find(self, search): if self.name == search or self.uuid == search: return self if search in self.children: return self.children[search] if self.children: for child in self.children.values(): # We already searched for the child by UUID above, so here we # just check for a child name match if child.name == search: return child subchild = child.find(search) if subchild: return subchild return None def add_child(self, provider): self.children[provider.uuid] = provider def remove_child(self, provider): if provider.uuid in self.children: del self.children[provider.uuid] def has_inventory(self): """Returns whether the provider has any inventory records at all. """ return self.inventory != {} def has_inventory_changed(self, new): """Returns whether the inventory has changed for the provider.""" cur = self.inventory if set(cur) != set(new): return True for key, cur_rec in cur.items(): new_rec = new[key] # If the new record contains new fields (e.g. we're adding on # `reserved` or `allocation_ratio`) we want to make sure to pick # them up if set(new_rec) - set(cur_rec): return True for rec_key, cur_val in cur_rec.items(): if rec_key not in new_rec: # Deliberately don't want to compare missing keys in the # *new* inventory record. For instance, we will be passing # in fields like allocation_ratio in the current dict but # the resource tracker may only pass in the total field. We # want to return that inventory didn't change when the # total field values are the same even if the # allocation_ratio field is missing from the new record. continue if new_rec[rec_key] != cur_val: return True return False def _update_generation(self, generation, operation): if generation is not None and generation != self.generation: msg_args = { 'rp_uuid': self.uuid, 'old': self.generation, 'new': generation, 'op': operation } LOG.debug("Updating resource provider %(rp_uuid)s generation " "from %(old)s to %(new)s during operation: %(op)s", msg_args) self.generation = generation def update_inventory(self, inventory, generation): """Update the stored inventory for the provider along with a resource provider generation to set the provider to. The method returns whether the inventory has changed. """ self._update_generation(generation, 'update_inventory') if self.has_inventory_changed(inventory): LOG.debug('Updating inventory in ProviderTree for provider %s ' 'with inventory: %s', self.uuid, inventory) self.inventory = copy.deepcopy(inventory) return True LOG.debug('Inventory has not changed in ProviderTree for provider: %s', self.uuid) return False def have_traits_changed(self, new): """Returns whether the provider's traits have changed.""" return set(new) != self.traits def update_traits(self, new, generation=None): """Update the stored traits for the provider along with a resource provider generation to set the provider to. The method returns whether the traits have changed. """ self._update_generation(generation, 'update_traits') if self.have_traits_changed(new): self.traits = set(new) # create a copy of the new traits return True return False def has_traits(self, traits): """Query whether the provider has certain traits. :param traits: Iterable of string trait names to look for. :return: True if this provider has *all* of the specified traits; False if any of the specified traits are absent. Returns True if the traits parameter is empty. """ return not bool(set(traits) - self.traits) def have_aggregates_changed(self, new): """Returns whether the provider's aggregates have changed.""" return set(new) != self.aggregates def update_aggregates(self, new, generation=None): """Update the stored aggregates for the provider along with a resource provider generation to set the provider to. The method returns whether the aggregates have changed. """ self._update_generation(generation, 'update_aggregates') if self.have_aggregates_changed(new): self.aggregates = set(new) # create a copy of the new aggregates return True return False def in_aggregates(self, aggregates): """Query whether the provider is a member of certain aggregates. :param aggregates: Iterable of string aggregate UUIDs to look for. :return: True if this provider is a member of *all* of the specified aggregates; False if any of the specified aggregates are absent. Returns True if the aggregates parameter is empty. """ return not bool(set(aggregates) - self.aggregates) def have_resources_changed(self, new): """Returns whether the resources have changed for the provider.""" return self.resources != new def update_resources(self, resources): """Update the stored resources for the provider. The method returns whether the resources have changed. """ if self.have_resources_changed(resources): self.resources = copy.deepcopy(resources) return True return False class ProviderTree(object): def __init__(self): """Create an empty provider tree.""" self.lock = lockutils.internal_lock(_LOCK_NAME) self.roots_by_uuid = {} self.roots_by_name = {} @property def roots(self): return self.roots_by_uuid.values() def get_provider_uuids(self, name_or_uuid=None): """Return a list, in top-down traversable order, of the UUIDs of all providers (in a (sub)tree). :param name_or_uuid: Provider name or UUID representing the root of a (sub)tree for which to return UUIDs. If not specified, the method returns all UUIDs in the ProviderTree. """ if name_or_uuid is not None: with self.lock: return self._find_with_lock(name_or_uuid).get_provider_uuids() # If no name_or_uuid, get UUIDs for all providers recursively. ret = [] with self.lock: for root in self.roots: ret.extend(root.get_provider_uuids()) return ret def get_provider_uuids_in_tree(self, name_or_uuid): """Returns a list, in top-down traversable order, of the UUIDs of all providers in the whole tree of which the provider identified by ``name_or_uuid`` is a member. :param name_or_uuid: Provider name or UUID representing any member of whole tree for which to return UUIDs. """ with self.lock: return self._find_with_lock( name_or_uuid, return_root=True).get_provider_uuids() def populate_from_iterable(self, provider_dicts): """Populates this ProviderTree from an iterable of provider dicts. This method will ADD providers to the tree if provider_dicts contains providers that do not exist in the tree already and will REPLACE providers in the tree if provider_dicts contains providers that are already in the tree. This method will NOT remove providers from the tree that are not in provider_dicts. But if a parent provider is in provider_dicts and the descendents are not, this method will remove the descendents from the tree. :param provider_dicts: An iterable of dicts of resource provider information. If a provider is present in provider_dicts, all its descendants must also be present. :raises: ValueError if any provider in provider_dicts has a parent that is not in this ProviderTree or elsewhere in provider_dicts. """ if not provider_dicts: return # Map of provider UUID to provider dict for the providers we're # *adding* via this method. to_add_by_uuid = {pd['uuid']: pd for pd in provider_dicts} with self.lock: # Sanity check for orphans. Every parent UUID must either be None # (the provider is a root), or be in the tree already, or exist as # a key in to_add_by_uuid (we're adding it). all_parents = set([None]) | set(to_add_by_uuid) # NOTE(efried): Can't use get_provider_uuids directly because we're # already under lock. for root in self.roots: all_parents |= set(root.get_provider_uuids()) missing_parents = set() for pd in to_add_by_uuid.values(): parent_uuid = pd.get('parent_provider_uuid') if parent_uuid not in all_parents: missing_parents.add(parent_uuid) if missing_parents: raise ValueError( _("The following parents were not found: %s") % ', '.join(missing_parents)) # Ready to do the work. # Use to_add_by_uuid to keep track of which providers are left to # be added. while to_add_by_uuid: # Find a provider that's suitable to inject. for uuid, pd in to_add_by_uuid.items(): # Roots are always okay to inject (None won't be a key in # to_add_by_uuid). Otherwise, we have to make sure we # already added the parent (and, by recursion, all # ancestors) if present in the input. parent_uuid = pd.get('parent_provider_uuid') if parent_uuid not in to_add_by_uuid: break else: # This should never happen - we already ensured all parents # exist in the tree, which means we can't have any branches # that don't wind up at the root, which means we can't have # cycles. But to quell the paranoia... raise ValueError( _("Unexpectedly failed to find parents already in the " "tree for any of the following: %s") % ','.join(set(to_add_by_uuid))) # Add or replace the provider, either as a root or under its # parent try: self._remove_with_lock(uuid) except ValueError: # Wasn't there in the first place - fine. pass provider = _Provider.from_dict(pd) if parent_uuid is None: self.roots_by_uuid[provider.uuid] = provider self.roots_by_name[provider.name] = provider else: parent = self._find_with_lock(parent_uuid) parent.add_child(provider) # Remove this entry to signify we're done with it. to_add_by_uuid.pop(uuid) def _remove_with_lock(self, name_or_uuid): found = self._find_with_lock(name_or_uuid) if found.parent_uuid: parent = self._find_with_lock(found.parent_uuid) parent.remove_child(found) else: del self.roots_by_uuid[found.uuid] del self.roots_by_name[found.name] def remove(self, name_or_uuid): """Safely removes the provider identified by the supplied name_or_uuid parameter and all of its children from the tree. :raises ValueError if name_or_uuid points to a non-existing provider. :param name_or_uuid: Either name or UUID of the resource provider to remove from the tree. """ with self.lock: self._remove_with_lock(name_or_uuid) def new_root(self, name, uuid, generation=None): """Adds a new root provider to the tree, returning its UUID. :param name: The name of the new root provider :param uuid: The UUID of the new root provider :param generation: Generation to set for the new root provider :returns: the UUID of the new provider :raises: ValueError if a provider with the specified uuid already exists in the tree. """ with self.lock: exists = True try: self._find_with_lock(uuid) except ValueError: exists = False if exists: err = _("Provider %s already exists.") raise ValueError(err % uuid) p = _Provider(name, uuid=uuid, generation=generation) self.roots_by_uuid[uuid] = p self.roots_by_name[name] = p return p.uuid def _find_with_lock(self, name_or_uuid, return_root=False): # Optimization for large number of roots (e.g. ironic): if name_or_uuid # represents a root, this is O(1). found = self.roots_by_uuid.get(name_or_uuid) if found: return found found = self.roots_by_name.get(name_or_uuid) if found: return found # Okay, it's a child; do it the hard way. for root in self.roots: found = root.find(name_or_uuid) if found: return root if return_root else found raise ValueError(_("No such provider %s") % name_or_uuid) def data(self, name_or_uuid): """Return a point-in-time copy of the specified provider's data. :param name_or_uuid: Either name or UUID of the resource provider whose data is to be returned. :return: ProviderData object representing the specified provider. :raises: ValueError if a provider with name_or_uuid was not found in the tree. """ with self.lock: return self._find_with_lock(name_or_uuid).data() def exists(self, name_or_uuid): """Given either a name or a UUID, return True if the tree contains the provider, False otherwise. """ with self.lock: try: self._find_with_lock(name_or_uuid) return True except ValueError: return False def new_child(self, name, parent, uuid=None, generation=None): """Creates a new child provider with the given name and uuid under the given parent. :param name: The name of the new child provider :param parent: Either name or UUID of the parent provider :param uuid: The UUID of the new child provider :param generation: Generation to set for the new child provider :returns: the UUID of the new provider :raises ValueError if a provider with the specified uuid or name already exists; or if parent_uuid points to a nonexistent provider. """ with self.lock: try: self._find_with_lock(uuid or name) except ValueError: pass else: err = _("Provider %s already exists.") raise ValueError(err % (uuid or name)) parent_node = self._find_with_lock(parent) p = _Provider(name, uuid, generation, parent_node.uuid) parent_node.add_child(p) return p.uuid def has_inventory(self, name_or_uuid): """Returns True if the provider identified by name_or_uuid has any inventory records at all. :raises: ValueError if a provider with uuid was not found in the tree. :param name_or_uuid: Either name or UUID of the resource provider """ with self.lock: p = self._find_with_lock(name_or_uuid) return p.has_inventory() def has_inventory_changed(self, name_or_uuid, inventory): """Returns True if the supplied inventory is different for the provider with the supplied name or UUID. :raises: ValueError if a provider with name_or_uuid was not found in the tree. :param name_or_uuid: Either name or UUID of the resource provider to query inventory for. :param inventory: dict, keyed by resource class, of inventory information. """ with self.lock: provider = self._find_with_lock(name_or_uuid) return provider.has_inventory_changed(inventory) def update_inventory(self, name_or_uuid, inventory, generation=None): """Given a name or UUID of a provider and a dict of inventory resource records, update the provider's inventory and set the provider's generation. :returns: True if the inventory has changed. :note: The provider's generation is always set to the supplied generation, even if there were no changes to the inventory. :raises: ValueError if a provider with name_or_uuid was not found in the tree. :param name_or_uuid: Either name or UUID of the resource provider to update inventory for. :param inventory: dict, keyed by resource class, of inventory information. :param generation: The resource provider generation to set. If not specified, the provider's generation is not changed. """ with self.lock: provider = self._find_with_lock(name_or_uuid) return provider.update_inventory(inventory, generation) def has_sharing_provider(self, resource_class): """Returns whether the specified provider_tree contains any sharing providers of inventory of the specified resource_class. """ for rp_uuid in self.get_provider_uuids(): pdata = self.data(rp_uuid) has_rc = resource_class in pdata.inventory is_sharing = os_traits.MISC_SHARES_VIA_AGGREGATE in pdata.traits if has_rc and is_sharing: return True return False def has_traits(self, name_or_uuid, traits): """Given a name or UUID of a provider, query whether that provider has *all* of the specified traits. :raises: ValueError if a provider with name_or_uuid was not found in the tree. :param name_or_uuid: Either name or UUID of the resource provider to query for traits. :param traits: Iterable of string trait names to search for. :return: True if this provider has *all* of the specified traits; False if any of the specified traits are absent. Returns True if the traits parameter is empty, even if the provider has no traits. """ with self.lock: provider = self._find_with_lock(name_or_uuid) return provider.has_traits(traits) def have_traits_changed(self, name_or_uuid, traits): """Returns True if the specified traits list is different for the provider with the specified name or UUID. :raises: ValueError if a provider with name_or_uuid was not found in the tree. :param name_or_uuid: Either name or UUID of the resource provider to query traits for. :param traits: Iterable of string trait names to compare against the provider's traits. """ with self.lock: provider = self._find_with_lock(name_or_uuid) return provider.have_traits_changed(traits) def update_traits(self, name_or_uuid, traits, generation=None): """Given a name or UUID of a provider and an iterable of string trait names, update the provider's traits and set the provider's generation. :returns: True if the traits list has changed. :note: The provider's generation is always set to the supplied generation, even if there were no changes to the traits. :raises: ValueError if a provider with name_or_uuid was not found in the tree. :param name_or_uuid: Either name or UUID of the resource provider to update traits for. :param traits: Iterable of string trait names to set. :param generation: The resource provider generation to set. If None, the provider's generation is not changed. """ with self.lock: provider = self._find_with_lock(name_or_uuid) return provider.update_traits(traits, generation=generation) def add_traits(self, name_or_uuid, *traits): """Set traits on a provider, without affecting existing traits. :param name_or_uuid: The name or UUID of the provider whose traits are to be affected. :param traits: String names of traits to be added. """ if not traits: return with self.lock: provider = self._find_with_lock(name_or_uuid) final_traits = provider.traits | set(traits) provider.update_traits(final_traits) def remove_traits(self, name_or_uuid, *traits): """Unset traits on a provider, without affecting other existing traits. :param name_or_uuid: The name or UUID of the provider whose traits are to be affected. :param traits: String names of traits to be removed. """ if not traits: return with self.lock: provider = self._find_with_lock(name_or_uuid) final_traits = provider.traits - set(traits) provider.update_traits(final_traits) def in_aggregates(self, name_or_uuid, aggregates): """Given a name or UUID of a provider, query whether that provider is a member of *all* the specified aggregates. :raises: ValueError if a provider with name_or_uuid was not found in the tree. :param name_or_uuid: Either name or UUID of the resource provider to query for aggregates. :param aggregates: Iterable of string aggregate UUIDs to search for. :return: True if this provider is associated with *all* of the specified aggregates; False if any of the specified aggregates are absent. Returns True if the aggregates parameter is empty, even if the provider has no aggregate associations. """ with self.lock: provider = self._find_with_lock(name_or_uuid) return provider.in_aggregates(aggregates) def have_aggregates_changed(self, name_or_uuid, aggregates): """Returns True if the specified aggregates list is different for the provider with the specified name or UUID. :raises: ValueError if a provider with name_or_uuid was not found in the tree. :param name_or_uuid: Either name or UUID of the resource provider to query aggregates for. :param aggregates: Iterable of string aggregate UUIDs to compare against the provider's aggregates. """ with self.lock: provider = self._find_with_lock(name_or_uuid) return provider.have_aggregates_changed(aggregates) def update_aggregates(self, name_or_uuid, aggregates, generation=None): """Given a name or UUID of a provider and an iterable of string aggregate UUIDs, update the provider's aggregates and set the provider's generation. :returns: True if the aggregates list has changed. :note: The provider's generation is always set to the supplied generation, even if there were no changes to the aggregates. :raises: ValueError if a provider with name_or_uuid was not found in the tree. :param name_or_uuid: Either name or UUID of the resource provider to update aggregates for. :param aggregates: Iterable of string aggregate UUIDs to set. :param generation: The resource provider generation to set. If None, the provider's generation is not changed. """ with self.lock: provider = self._find_with_lock(name_or_uuid) return provider.update_aggregates(aggregates, generation=generation) def add_aggregates(self, name_or_uuid, *aggregates): """Set aggregates on a provider, without affecting existing aggregates. :param name_or_uuid: The name or UUID of the provider whose aggregates are to be affected. :param aggregates: String UUIDs of aggregates to be added. """ with self.lock: provider = self._find_with_lock(name_or_uuid) final_aggs = provider.aggregates | set(aggregates) provider.update_aggregates(final_aggs) def remove_aggregates(self, name_or_uuid, *aggregates): """Unset aggregates on a provider, without affecting other existing aggregates. :param name_or_uuid: The name or UUID of the provider whose aggregates are to be affected. :param aggregates: String UUIDs of aggregates to be removed. """ with self.lock: provider = self._find_with_lock(name_or_uuid) final_aggs = provider.aggregates - set(aggregates) provider.update_aggregates(final_aggs) def update_resources(self, name_or_uuid, resources): """Given a name or UUID of a provider and a dict of resources, update the provider's resources. :param name_or_uuid: The name or UUID of the provider whose resources are to be affected. :param resources: A dict keyed by resource class, and the value is a set of objects.Resource instance. :returns: True if the resources are updated else False """ with self.lock: provider = self._find_with_lock(name_or_uuid) return provider.update_resources(resources) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/compute/resource_tracker.py0000664000175000017500000031336400000000000021127 0ustar00zuulzuul00000000000000# Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Track resources like memory and disk for a compute host. Provides the scheduler with useful information about availability through the ComputeNode model. """ import collections import copy from keystoneauth1 import exceptions as ks_exc import os_traits from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import excutils import retrying from nova.compute import claims from nova.compute import monitors from nova.compute import pci_placement_translator from nova.compute import provider_config from nova.compute import stats as compute_stats from nova.compute import task_states from nova.compute import utils as compute_utils from nova.compute import vm_states import nova.conf from nova import exception from nova.i18n import _ from nova import objects from nova.objects import base as obj_base from nova.objects import fields from nova.objects import migration as migration_obj from nova.pci import manager as pci_manager from nova.pci import request as pci_request from nova import rpc from nova.scheduler.client import report from nova import utils from nova.virt import hardware from nova.virt import node CONF = nova.conf.CONF LOG = logging.getLogger(__name__) COMPUTE_RESOURCE_SEMAPHORE = "compute_resources" def _instance_in_resize_state(instance): """Returns True if the instance is in one of the resizing states. :param instance: `nova.objects.Instance` object """ vm = instance.vm_state task = instance.task_state if vm == vm_states.RESIZED: return True if vm in [vm_states.ACTIVE, vm_states.STOPPED] and task in ( task_states.resizing_states + task_states.rebuild_states): return True return False def _instance_is_live_migrating(instance): vm = instance.vm_state task = instance.task_state if task == task_states.MIGRATING and vm in [vm_states.ACTIVE, vm_states.PAUSED]: return True return False class ResourceTracker(object): """Compute helper class for keeping track of resource usage as instances are built and destroyed. """ def __init__(self, host, driver, reportclient=None): self.service_ref = None self.host = host self.driver = driver self.pci_tracker = None # Dict of objects.ComputeNode objects, keyed by nodename self.compute_nodes = {} # Dict of Stats objects, keyed by nodename self.stats = collections.defaultdict(compute_stats.Stats) # Set of UUIDs of instances tracked on this host. self.tracked_instances = set() self.tracked_migrations = {} self.is_bfv = {} # dict, keyed by instance uuid, to is_bfv boolean monitor_handler = monitors.MonitorHandler(self) self.monitors = monitor_handler.monitors self.old_resources = collections.defaultdict(objects.ComputeNode) self.reportclient = reportclient or report.report_client_singleton() self.ram_allocation_ratio = CONF.ram_allocation_ratio self.cpu_allocation_ratio = CONF.cpu_allocation_ratio self.disk_allocation_ratio = CONF.disk_allocation_ratio self.provider_tree = None # Dict of assigned_resources, keyed by resource provider uuid # the value is a dict again, keyed by resource class # and value of this sub-dict is a set of Resource obj self.assigned_resources = collections.defaultdict( lambda: collections.defaultdict(set)) # Retrieves dict of provider config data. This can fail with # nova.exception.ProviderConfigException if invalid or conflicting # data exists in the provider config files. self.provider_configs = provider_config.get_provider_configs( CONF.compute.provider_config_location) # Set of ids for providers identified in provider config files that # are not found on the provider tree. These are tracked to facilitate # smarter logging. self.absent_providers = set() def set_service_ref(self, service_ref): # NOTE(danms): Neither of these should ever happen, but sanity check # just in case if self.service_ref and self.service_ref.id != service_ref.id: raise exception.ComputeServiceInUse( 'Resource tracker re-initialized with a different service') elif service_ref.host != self.host: raise exception.ServiceNotUnique( 'Resource tracker initialized with service that does ' 'not match host') self.service_ref = service_ref def _invalidate_pci_in_placement_cached_rps(self, allocs): """Invalidate cache for PCI-in-placement providers. This invalidates the local cached copy of any provider for which an allocation of a PCI-in-placement device exists. We do this in case the reserved count has been modified externally to make sure we see it. """ if not allocs: return for rp, rp_allocs in allocs.items(): try: p_data = self.provider_tree.data(rp) except ValueError: # Not all allocations for an instance are necessarily against # a provider in our tree continue if os_traits.COMPUTE_MANAGED_PCI_DEVICE in p_data.traits: self.reportclient.invalidate_resource_provider( rp, cacheonly=True) @utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True) def instance_claim(self, context, instance, nodename, allocations, limits=None): """Indicate that some resources are needed for an upcoming compute instance build operation. This should be called before the compute node is about to perform an instance build operation that will consume additional resources. :param context: security context :param instance: instance to reserve resources for. :type instance: nova.objects.instance.Instance object :param nodename: The Ironic nodename selected by the scheduler :param allocations: The placement allocation records for the instance. :param limits: Dict of oversubscription limits for memory, disk, and CPUs. :returns: A Claim ticket representing the reserved resources. It can be used to revert the resource usage if an error occurs during the instance build. """ if self.disabled(nodename): # If we get here, it means we are trying to claim for an instance # that was scheduled to a node that we do not have in our list, # or is in some other way unmanageable by this node. This would # mean that we are unable to account for resources, create # allocations in placement, or do any of the other accounting # necessary for this to work. In the past, this situation was # effectively ignored silently, but in a world where we track # resources with placement and instance assignment to compute nodes # by service, we can no longer be leaky. raise exception.ComputeResourcesUnavailable( ('Attempt to claim resources for instance %(inst)s ' 'on unknown node %(node)s failed') % { 'inst': instance.uuid, 'node': nodename}) # sanity checks: if instance.host: LOG.warning("Host field should not be set on the instance " "until resources have been claimed.", instance=instance) if instance.node: LOG.warning("Node field should not be set on the instance " "until resources have been claimed.", instance=instance) cn = self.compute_nodes[nodename] pci_requests = instance.pci_requests claim = claims.Claim(context, instance, nodename, self, cn, pci_requests, limits=limits) # self._set_instance_host_and_node() will save instance to the DB # so set instance.numa_topology first. We need to make sure # that numa_topology is saved while under COMPUTE_RESOURCE_SEMAPHORE # so that the resource audit knows about any cpus we've pinned. instance_numa_topology = claim.claimed_numa_topology instance.numa_topology = instance_numa_topology self._set_instance_host_and_node(instance, cn) if self.pci_tracker: # NOTE(jaypipes): ComputeNode.pci_device_pools is set below # in _update_usage_from_instance(). self.pci_tracker.claim_instance(context, pci_requests, instance_numa_topology) claimed_resources = self._claim_resources(allocations) instance.resources = claimed_resources # In case we have any allocations for PCI-in-placement devices, be # sure to invalidate our cache of those providers before we run # _update() below (which does the PCI-in-placement sync). self._invalidate_pci_in_placement_cached_rps(allocations) # Mark resources in-use and update stats self._update_usage_from_instance(context, instance, nodename) elevated = context.elevated() # persist changes to the compute node: self._update(elevated, cn) return claim @utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True) def rebuild_claim(self, context, instance, nodename, allocations, limits=None, image_meta=None, migration=None): """Create a claim for a rebuild operation.""" return self._move_claim( context, instance, instance.flavor, nodename, migration, allocations, move_type=fields.MigrationType.EVACUATION, image_meta=image_meta, limits=limits) @utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True) def resize_claim( self, context, instance, flavor, nodename, migration, allocations, image_meta=None, limits=None, ): """Create a claim for a resize or cold-migration move. Note that this code assumes ``instance.new_flavor`` is set when resizing with a new flavor. """ return self._move_claim( context, instance, flavor, nodename, migration, allocations, image_meta=image_meta, limits=limits) @utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True) def live_migration_claim( self, context, instance, nodename, migration, limits, allocs, ): """Builds a MoveClaim for a live migration. :param context: The request context. :param instance: The instance being live migrated. :param nodename: The nodename of the destination host. :param migration: The Migration object associated with this live migration. :param limits: A SchedulerLimits object from when the scheduler selected the destination host. :param allocs: The placement allocation records for the instance. :returns: A MoveClaim for this live migration. """ # Flavor and image cannot change during a live migration. flavor = instance.flavor image_meta = instance.image_meta return self._move_claim( context, instance, flavor, nodename, migration, allocs, move_type=fields.MigrationType.LIVE_MIGRATION, image_meta=image_meta, limits=limits, ) def _move_claim( self, context, instance, new_flavor, nodename, migration, allocations, move_type=None, image_meta=None, limits=None, ): """Indicate that resources are needed for a move to this host. Move can be either a migrate/resize, live-migrate or an evacuate/rebuild operation. :param context: security context :param instance: instance object to reserve resources for :param new_flavor: new flavor being resized to :param nodename: The Ironic nodename selected by the scheduler :param migration: A migration object if one was already created elsewhere for this operation (otherwise None) :param allocations: the placement allocation records. :param move_type: move type - can be one of 'migration', 'resize', 'live-migration', 'evacuate' :param image_meta: instance image metadata :param limits: Dict of oversubscription limits for memory, disk, and CPUs :returns: A Claim ticket representing the reserved resources. This should be turned into finalize a resource claim or free resources after the compute operation is finished. """ image_meta = image_meta or {} if self.disabled(nodename): # This means we were asked to accept an incoming migration to a # node that we do not own or track. We really should not get here, # but if we do, we must refuse to continue with the migration # process, since we cannot account for those resources, create # allocations in placement, etc. This has been a silent resource # leak in the past, but it must be a hard failure now. raise exception.ComputeResourcesUnavailable( ('Attempt to claim move resources for instance %(inst)s on ' 'unknown node %(node)s failed') % { 'inst': instance.uuid, 'node': 'nodename'}) cn = self.compute_nodes[nodename] if migration: self._claim_existing_migration(migration, cn) else: migration = self._create_migration( context, instance, new_flavor, cn, move_type) # TODO(moshele): we are recreating the pci requests even if # there was no change on resize. This will cause allocating # the old/new pci device in the resize phase. In the future # we would like to optimise this. new_pci_requests = pci_request.get_pci_requests_from_flavor( new_flavor) new_pci_requests.instance_uuid = instance.uuid # On resize merge the SR-IOV ports pci_requests # with the new instance flavor pci_requests. if instance.pci_requests: for request in instance.pci_requests.requests: if request.source == objects.InstancePCIRequest.NEUTRON_PORT: new_pci_requests.requests.append(request) claim = claims.MoveClaim(context, instance, nodename, new_flavor, image_meta, self, cn, new_pci_requests, migration, limits=limits) claimed_pci_devices_objs = [] # TODO(artom) The second part of this condition should not be # necessary, but since SRIOV live migration is currently handled # elsewhere - see for example _claim_pci_for_instance_vifs() in the # compute manager - we don't do any PCI claims if this is a live # migration to avoid stepping on that code's toes. Ideally, # MoveClaim/this method would be used for all live migration resource # claims. if self.pci_tracker and not migration.is_live_migration: # NOTE(jaypipes): ComputeNode.pci_device_pools is set below # in _update_usage_from_instance(). claimed_pci_devices_objs = self.pci_tracker.claim_instance( context, new_pci_requests, claim.claimed_numa_topology) claimed_pci_devices = objects.PciDeviceList( objects=claimed_pci_devices_objs) claimed_resources = self._claim_resources(allocations) old_resources = instance.resources # TODO(jaypipes): Move claimed_numa_topology out of the Claim's # constructor flow so the Claim constructor only tests whether # resources can be claimed, not consume the resources directly. mig_context = objects.MigrationContext( context=context, instance_uuid=instance.uuid, migration_id=migration.id, old_numa_topology=instance.numa_topology, new_numa_topology=claim.claimed_numa_topology, # NOTE(gibi): the _update_usage_from_migration call below appends # the newly claimed pci devices to the instance.pci_devices list # to keep the migration context independent we need to make a copy # that list here. We need a deep copy as we need to duplicate # the instance.pci_devices.objects list old_pci_devices=copy.deepcopy(instance.pci_devices), new_pci_devices=claimed_pci_devices, old_pci_requests=instance.pci_requests, new_pci_requests=new_pci_requests, old_resources=old_resources, new_resources=claimed_resources) instance.migration_context = mig_context instance.save() # In case we have any allocations for PCI-in-placement devices, be # sure to invalidate our cache of those providers before we run # _update() below (which does the PCI-in-placement sync). self._invalidate_pci_in_placement_cached_rps(allocations) # Mark the resources in-use for the resize landing on this # compute host: self._update_usage_from_migration(context, instance, migration, nodename) elevated = context.elevated() self._update(elevated, cn) return claim def _create_migration( self, context, instance, new_flavor, node, move_type=None, ): """Create a migration record for the upcoming resize. This should be done while the COMPUTE_RESOURCES_SEMAPHORE is held so the resource claim will not be lost if the audit process starts. """ migration = objects.Migration(context=context.elevated()) migration.dest_compute = self.host migration.dest_node = node.hypervisor_hostname migration.dest_compute_id = node.id migration.dest_host = self.driver.get_host_ip_addr() migration.old_instance_type_id = instance.flavor.id migration.new_instance_type_id = new_flavor.id migration.status = 'pre-migrating' migration.instance_uuid = instance.uuid migration.source_compute = instance.host migration.source_node = instance.node if move_type: migration.migration_type = move_type else: migration.migration_type = migration_obj.determine_migration_type( migration) migration.create() return migration def _claim_existing_migration(self, migration, node): """Make an existing migration record count for resource tracking. If a migration record was created already before the request made it to this compute host, only set up the migration so it's included in resource tracking. This should be done while the COMPUTE_RESOURCES_SEMAPHORE is held. """ migration.dest_compute = self.host migration.dest_node = node.hypervisor_hostname migration.dest_compute_id = node.id migration.dest_host = self.driver.get_host_ip_addr() # NOTE(artom) Migration objects for live migrations are created with # status 'accepted' by the conductor in live_migrate_instance() and do # not have a 'pre-migrating' status. if not migration.is_live_migration: migration.status = 'pre-migrating' migration.save() def _claim_resources(self, allocations): """Claim resources according to assigned resources from allocations and available resources in provider tree """ if not allocations: return None claimed_resources = [] for rp_uuid, alloc_dict in allocations.items(): try: provider_data = self.provider_tree.data(rp_uuid) except ValueError: # If an instance is in evacuating, it will hold new and old # allocations, but the provider UUIDs in old allocations won't # exist in the current provider tree, so skip it. LOG.debug("Skip claiming resources of provider %(rp_uuid)s, " "since the provider UUIDs are not in provider tree.", {'rp_uuid': rp_uuid}) continue for rc, amount in alloc_dict['resources'].items(): if rc not in provider_data.resources: # This means we don't use provider_data.resources to # assign this kind of resource class, such as 'VCPU' for # now, otherwise the provider_data.resources will be # populated with this resource class when updating # provider tree. continue assigned = self.assigned_resources[rp_uuid][rc] free = provider_data.resources[rc] - assigned if amount > len(free): reason = (_("Needed %(amount)d units of resource class " "%(rc)s, but %(avail)d are available.") % {'amount': amount, 'rc': rc, 'avail': len(free)}) raise exception.ComputeResourcesUnavailable(reason=reason) for i in range(amount): claimed_resources.append(free.pop()) if claimed_resources: self._add_assigned_resources(claimed_resources) return objects.ResourceList(objects=claimed_resources) def _populate_assigned_resources(self, context, instance_by_uuid): """Populate self.assigned_resources organized by resource class and reource provider uuid, which is as following format: { $RP_UUID: { $RESOURCE_CLASS: [objects.Resource, ...], $RESOURCE_CLASS: [...]}, ...} """ resources = [] # Get resources assigned to migrations for mig in self.tracked_migrations.values(): mig_ctx = mig.instance.migration_context # We might have a migration whose instance hasn't arrived here yet. # Ignore it. if not mig_ctx: continue if mig.source_compute == self.host and 'old_resources' in mig_ctx: resources.extend(mig_ctx.old_resources or []) if mig.dest_compute == self.host and 'new_resources' in mig_ctx: resources.extend(mig_ctx.new_resources or []) # Get resources assigned to instances for uuid in self.tracked_instances: resources.extend(instance_by_uuid[uuid].resources or []) self.assigned_resources.clear() self._add_assigned_resources(resources) def _check_resources(self, context): """Check if there are assigned resources not found in provider tree""" notfound = set() for rp_uuid in self.assigned_resources: provider_data = self.provider_tree.data(rp_uuid) for rc, assigned in self.assigned_resources[rp_uuid].items(): notfound |= (assigned - provider_data.resources[rc]) if not notfound: return # This only happens when assigned resources are removed # from the configuration and the compute service is SIGHUP'd # or restarted. resources = [(res.identifier, res.resource_class) for res in notfound] reason = _("The following resources are assigned to instances, " "but were not listed in the configuration: %s " "Please check if this will influence your instances, " "and restore your configuration if necessary") % resources raise exception.AssignedResourceNotFound(reason=reason) def _release_assigned_resources(self, resources): """Remove resources from self.assigned_resources.""" if not resources: return for resource in resources: rp_uuid = resource.provider_uuid rc = resource.resource_class try: self.assigned_resources[rp_uuid][rc].remove(resource) except KeyError: LOG.warning("Release resource %(rc)s: %(id)s of provider " "%(rp_uuid)s, not tracked in " "ResourceTracker.assigned_resources.", {'rc': rc, 'id': resource.identifier, 'rp_uuid': rp_uuid}) def _add_assigned_resources(self, resources): """Add resources to self.assigned_resources""" if not resources: return for resource in resources: rp_uuid = resource.provider_uuid rc = resource.resource_class self.assigned_resources[rp_uuid][rc].add(resource) def _set_instance_host_and_node(self, instance, node): """Tag the instance as belonging to this host. This should be done while the COMPUTE_RESOURCES_SEMAPHORE is held so the resource claim will not be lost if the audit process starts. """ # NOTE(mriedem): ComputeManager._nil_out_instance_obj_host_and_node is # somewhat tightly coupled to the fields set in this method so if this # method changes that method might need to be updated. instance.host = self.host instance.launched_on = self.host if node is not None: # NOTE(danms): This method can be called when the node is disabled, # which means not in our list. The note in instance_claim() # explains that we should actually never get there (here) and that # if we do, we will likely record the instance membership # incorrectly. As such, if we do not have a compute node to # update the instance, just avoid making a change. instance.node = node.hypervisor_hostname instance.compute_id = node.id instance.save() def _unset_instance_host_and_node(self, instance): """Untag the instance so it no longer belongs to the host. This should be done while the COMPUTE_RESOURCES_SEMAPHORE is held so the resource claim will not be lost if the audit process starts. """ instance.host = None instance.node = None instance.compute_id = None instance.save() @utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True) def abort_instance_claim(self, context, instance, nodename): """Remove usage from the given instance.""" self._update_usage_from_instance(context, instance, nodename, is_removed=True) instance.clear_numa_topology() self._unset_instance_host_and_node(instance) self._update(context.elevated(), self.compute_nodes[nodename]) def _drop_pci_devices(self, instance, nodename, prefix): if self.pci_tracker: # free old/new allocated pci devices pci_devices = self._get_migration_context_resource( 'pci_devices', instance, prefix=prefix) if pci_devices: for pci_device in pci_devices: self.pci_tracker.free_device(pci_device, instance) dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj() self.compute_nodes[nodename].pci_device_pools = dev_pools_obj @utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True) def drop_move_claim_at_source(self, context, instance, migration): """Drop a move claim after confirming a resize or cold migration.""" migration.status = 'confirmed' migration.save() self._drop_move_claim( context, instance, migration.source_node, instance.old_flavor, prefix='old_') # NOTE(stephenfin): Unsetting this is unnecessary for cross-cell # resize, since the source and dest instance objects are different and # the source instance will be deleted soon. It's easier to just do it # though. instance.drop_migration_context() @utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True) def drop_move_claim_at_dest(self, context, instance, migration): """Drop a move claim after reverting a resize or cold migration.""" # NOTE(stephenfin): This runs on the destination, before we return to # the source and resume the instance there. As such, the migration # isn't really reverted yet, but this status is what we use to # indicate that we no longer needs to account for usage on this host migration.status = 'reverted' migration.save() self._drop_move_claim( context, instance, migration.dest_node, instance.new_flavor, prefix='new_') instance.revert_migration_context() instance.save(expected_task_state=[task_states.RESIZE_REVERTING]) @utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True) def drop_move_claim(self, context, instance, nodename, flavor=None, prefix='new_'): self._drop_move_claim( context, instance, nodename, flavor, prefix='new_') def _drop_move_claim( self, context, instance, nodename, flavor=None, prefix='new_', ): """Remove usage for an incoming/outgoing migration. :param context: Security context. :param instance: The instance whose usage is to be removed. :param nodename: Host on which to remove usage. If the migration completed successfully, this is normally the source. If it did not complete successfully (failed or reverted), this is normally the destination. :param flavor: The flavor that determines the usage to remove. If the migration completed successfully, this is the old flavor to be removed from the source. If the migration did not complete successfully, this is the new flavor to be removed from the destination. :param prefix: Prefix to use when accessing migration context attributes. 'old_' or 'new_', with 'new_' being the default. """ if instance["uuid"] in self.tracked_migrations: if not flavor: flavor = self._get_flavor( instance, prefix, self.tracked_migrations[instance["uuid"]] ) if flavor is not None: numa_topology = self._get_migration_context_resource( 'numa_topology', instance, prefix=prefix) usage = self._get_usage_dict( flavor, instance, numa_topology=numa_topology) self._drop_pci_devices(instance, nodename, prefix) resources = self._get_migration_context_resource( 'resources', instance, prefix=prefix) self._release_assigned_resources(resources) self._update_usage(usage, nodename, sign=-1) ctxt = context.elevated() self._update(ctxt, self.compute_nodes[nodename]) # Remove usage for an instance that is tracked in migrations, such as # on the dest node during revert resize. self.tracked_migrations.pop(instance['uuid'], None) # Remove usage for an instance that is not tracked in migrations (such # as on the source node after a migration). # NOTE(lbeliveau): On resize on the same node, the instance is # included in both tracked_migrations and tracked_instances. self.tracked_instances.discard(instance['uuid']) @utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True) def update_usage(self, context, instance, nodename): """Update the resource usage and stats after a change in an instance """ if self.disabled(nodename): return uuid = instance['uuid'] # don't update usage for this instance unless it submitted a resource # claim first: if uuid in self.tracked_instances: self._update_usage_from_instance(context, instance, nodename) self._update(context.elevated(), self.compute_nodes[nodename]) def disabled(self, nodename): return (nodename not in self.compute_nodes or not self.driver.node_is_available(nodename)) def _init_compute_node(self, context, resources): """Initialize the compute node if it does not already exist. The resource tracker will be inoperable if compute_node is not defined. The compute_node will remain undefined if we fail to create it or if there is no associated service registered. If this method has to create a compute node it needs initial values - these come from resources. :param context: security context :param resources: initial values :returns: True if a new compute_nodes table record was created, False otherwise """ nodename = resources['hypervisor_hostname'] node_uuid = resources['uuid'] # if there is already a compute node just use resources # to initialize if nodename in self.compute_nodes: cn = self.compute_nodes[nodename] if 'service_id' not in cn or cn.service_id is None: LOG.debug('Setting ComputeNode %s service_id to %i', cn.uuid, self.service_ref.id) cn.service_id = self.service_ref.id elif cn.service_id != self.service_ref.id: LOG.warning('Moving ComputeNode %s from service %i to %i', cn.uuid, cn.service_id, self.service_ref.id) cn.service_id = self.service_ref.id self._copy_resources(cn, resources) self._setup_pci_tracker(context, cn, resources) return False # now try to get the compute node record from the # database. If we get one we use resources to initialize # We use read_deleted=True so that we will find and recover a deleted # node object, if necessary. with utils.temporary_mutation(context, read_deleted='yes'): cn = self._get_compute_node(context, node_uuid) if cn and cn.deleted: # Undelete and save this right now so that everything below # can continue without read_deleted=yes LOG.info('Undeleting compute node %s', cn.uuid) cn.deleted = False cn.deleted_at = None cn.save() if cn: if cn.host != self.host: LOG.info("ComputeNode %(name)s moving from %(old)s to %(new)s", {"name": nodename, "old": cn.host, "new": self.host}) cn.host = self.host cn.service_id = self.service_ref.id self._update(context, cn) self.compute_nodes[nodename] = cn self._copy_resources(cn, resources) self._setup_pci_tracker(context, cn, resources) return False # there was no local copy and none in the database # so we need to create a new compute node. This needs # to be initialized with resource values. cn = objects.ComputeNode(context) cn.host = self.host cn.service_id = self.service_ref.id self._copy_resources(cn, resources, initial=True) try: cn.create() except exception.DuplicateRecord: raise exception.InvalidConfiguration( 'Duplicate compute node record found for host %s node %s' % ( cn.host, cn.hypervisor_hostname)) # Only map the ComputeNode into compute_nodes if create() was OK # because if create() fails, on the next run through here nodename # would be in compute_nodes and we won't try to create again (because # of the logic above). self.compute_nodes[nodename] = cn LOG.info('Compute node record created for ' '%(host)s:%(node)s with uuid: %(uuid)s', {'host': self.host, 'node': nodename, 'uuid': cn.uuid}) self._setup_pci_tracker(context, cn, resources) return True def _setup_pci_tracker(self, context, compute_node, resources): if not self.pci_tracker: self.pci_tracker = pci_manager.PciDevTracker(context, compute_node) if 'pci_passthrough_devices' in resources: dev_json = resources.pop('pci_passthrough_devices') self.pci_tracker.update_devices_from_hypervisor_resources( dev_json) dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj() compute_node.pci_device_pools = dev_pools_obj def _copy_resources(self, compute_node, resources, initial=False): """Copy resource values to supplied compute_node.""" nodename = resources['hypervisor_hostname'] stats = self.stats[nodename] # purge old stats and init with anything passed in by the driver # NOTE(danms): Preserve 'failed_builds' across the stats clearing, # as that is not part of resources # TODO(danms): Stop doing this when we get a column to store this # directly prev_failed_builds = stats.get('failed_builds', 0) stats.clear() stats['failed_builds'] = prev_failed_builds stats.digest_stats(resources.get('stats')) compute_node.stats = stats # Update the allocation ratios for the related ComputeNode object # but only if the configured values are not the default; the # ComputeNode._from_db_object method takes care of providing default # allocation ratios when the config is left at the default, so # we'll really end up with something like a # ComputeNode.cpu_allocation_ratio of 16.0. We want to avoid # resetting the ComputeNode fields to None because that will make # the _resource_change method think something changed when really it # didn't. # NOTE(yikun): The CONF.initial_(cpu|ram|disk)_allocation_ratio would # be used when we initialize the compute node object, that means the # ComputeNode.(cpu|ram|disk)_allocation_ratio will be set to # CONF.initial_(cpu|ram|disk)_allocation_ratio when initial flag is # True. for res in ('cpu', 'disk', 'ram'): attr = '%s_allocation_ratio' % res if initial: conf_alloc_ratio = getattr(CONF, 'initial_%s' % attr) else: conf_alloc_ratio = getattr(self, attr) # NOTE(yikun): In Stein version, we change the default value of # (cpu|ram|disk)_allocation_ratio from 0.0 to None, but we still # should allow 0.0 to keep compatibility, and this 0.0 condition # will be removed in the next version (T version). if conf_alloc_ratio not in (0.0, None): setattr(compute_node, attr, conf_alloc_ratio) # now copy rest to compute_node compute_node.update_from_virt_driver(resources) def remove_node(self, nodename): """Handle node removal/rebalance. Clean up any stored data about a compute node no longer managed by this host. """ self.stats.pop(nodename, None) self.compute_nodes.pop(nodename, None) self.old_resources.pop(nodename, None) def _get_host_metrics(self, context, nodename): """Get the metrics from monitors and notify information to message bus. """ metrics = objects.MonitorMetricList() metrics_info = {} for monitor in self.monitors: try: monitor.populate_metrics(metrics) except NotImplementedError: LOG.debug("The compute driver doesn't support host " "metrics for %(mon)s", {'mon': monitor}) except Exception as exc: LOG.warning("Cannot get the metrics from %(mon)s; " "error: %(exc)s", {'mon': monitor, 'exc': exc}) # TODO(jaypipes): Remove this when compute_node.metrics doesn't need # to be populated as a JSONified string. metric_list = metrics.to_list() if len(metric_list): metrics_info['nodename'] = nodename metrics_info['metrics'] = metric_list metrics_info['host'] = self.host metrics_info['host_ip'] = CONF.my_ip notifier = rpc.get_notifier(service='compute', host=nodename) notifier.info(context, 'compute.metrics.update', metrics_info) compute_utils.notify_about_metrics_update( context, self.host, CONF.my_ip, nodename, metrics) return metric_list def update_available_resource(self, context, nodename, startup=False): """Override in-memory calculations of compute node resource usage based on data audited from the hypervisor layer. Add in resource claims in progress to account for operations that have declared a need for resources, but not necessarily retrieved them from the hypervisor layer yet. :param nodename: Temporary parameter representing the Ironic resource node. This parameter will be removed once Ironic baremetal resource nodes are handled like any other resource in the system. :param startup: Boolean indicating whether we're running this on on startup (True) or periodic (False). """ LOG.debug("Auditing locally available compute resources for " "%(host)s (node: %(node)s)", {'node': nodename, 'host': self.host}) resources = self.driver.get_available_resource(nodename) # NOTE(jaypipes): The resources['hypervisor_hostname'] field now # contains a non-None value, even for non-Ironic nova-compute hosts. It # is this value that will be populated in the compute_nodes table. resources['host_ip'] = CONF.my_ip if 'uuid' not in resources: # NOTE(danms): Any driver that does not provide a uuid per # node gets the locally-persistent compute_id. Only ironic # should be setting the per-node uuid (and returning # multiple nodes in general). If this is the first time we # are creating a compute node on this host, we will # generate and persist this uuid for the future. resources['uuid'] = node.get_local_node_uuid() # We want the 'cpu_info' to be None from the POV of the # virt driver, but the DB requires it to be non-null so # just force it to empty string if "cpu_info" not in resources or resources["cpu_info"] is None: resources["cpu_info"] = '' self._verify_resources(resources) self._report_hypervisor_resource_view(resources) self._update_available_resource(context, resources, startup=startup) def _pair_instances_to_migrations(self, migrations, instance_by_uuid): for migration in migrations: try: migration.instance = instance_by_uuid[migration.instance_uuid] except KeyError: # NOTE(danms): If this happens, we don't set it here, and # let the code either fail or lazy-load the instance later # which is what happened before we added this optimization. # NOTE(tdurakov) this situation is possible for resize/cold # migration when migration is finished but haven't yet # confirmed/reverted in that case instance already changed host # to destination and no matching happens LOG.debug('Migration for instance %(uuid)s refers to ' 'another host\'s instance!', {'uuid': migration.instance_uuid}) def _ensure_compute_id_for_instances(self, context, instances, node): """Check the instances on a given node for compute_id linkage""" for instance in instances: changed = False if 'compute_id' not in instance or instance.compute_id is None: LOG.info('Setting Instance.compute_id=%i for %s', node.id, instance.uuid) instance.compute_id = node.id changed = True elif instance.compute_id != node.id: LOG.warning( 'Correcting compute_id=%i from %i for instance %s', node.id, instance.compute_id, instance.uuid) instance.compute_id = node.id changed = True if changed: # NOTE(danms): Only save if we made a change here, even if # the instance had other pending changes instance.save() @utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True) def _update_available_resource(self, context, resources, startup=False): # initialize the compute node object, creating it # if it does not already exist. is_new_compute_node = self._init_compute_node(context, resources) nodename = resources['hypervisor_hostname'] # if we could not init the compute node the tracker will be # disabled and we should quit now if self.disabled(nodename): return # Grab all instances assigned to this node: instances = objects.InstanceList.get_by_host_and_node( context, self.host, nodename, expected_attrs=['system_metadata', 'numa_topology', 'flavor', 'migration_context', 'resources']) # Grab all in-progress migrations and error migrations: migrations = objects.MigrationList.get_in_progress_and_error( context, self.host, nodename) # Check for tracked instances with in-progress, incoming, but not # finished migrations. For those instance the migration context # is not applied yet (it will be during finish_resize when the # migration goes to finished state). We need to manually and # temporary apply the migration context here when the resource usage is # updated. See bug 1953359 for more details. instance_by_uuid = {instance.uuid: instance for instance in instances} for migration in migrations: if ( migration.instance_uuid in instance_by_uuid and migration.dest_compute == self.host and migration.dest_node == nodename ): # we does not check for the 'post-migrating' migration status # as applying the migration context for an instance already # in finished migration status is a no-op anyhow. instance = instance_by_uuid[migration.instance_uuid] LOG.debug( 'Applying migration context for instance %s as it has an ' 'incoming, in-progress migration %s. ' 'Migration status is %s', migration.instance_uuid, migration.uuid, migration.status ) # It is OK not to revert the migration context at the end of # the periodic as the instance is not saved during the periodic instance.apply_migration_context() # Now calculate usage based on instance utilization: instance_by_uuid = self._update_usage_from_instances( context, instances, nodename) cn = self.compute_nodes[nodename] # Make sure these instances have a proper compute_id->CN.id link # NOTE(danms): This is for migrating old records, so we can remove # this once we are sure all records have been migrated and we can # rely on this linkage. self._ensure_compute_id_for_instances(context, instances, cn) self._pair_instances_to_migrations(migrations, instance_by_uuid) self._update_usage_from_migrations(context, migrations, nodename) # A new compute node means there won't be a resource provider yet since # that would be created via the _update() call below, and if there is # no resource provider then there are no allocations against it. if not is_new_compute_node: self._remove_deleted_instances_allocations( context, self.compute_nodes[nodename], migrations, instance_by_uuid) # NOTE(yjiang5): Because pci device tracker status is not cleared in # this periodic task, and also because the resource tracker is not # notified when instances are deleted, we need remove all usages # from deleted instances. self.pci_tracker.clean_usage(instances, migrations) self._report_final_resource_view(nodename) metrics = self._get_host_metrics(context, nodename) # TODO(pmurray): metrics should not be a json string in ComputeNode, # but it is. This should be changed in ComputeNode cn.metrics = jsonutils.dumps(metrics) # Update assigned resources to self.assigned_resources self._populate_assigned_resources(context, instance_by_uuid) # update the compute_node self._update(context, cn, startup=startup) LOG.debug('Compute_service record updated for %(host)s:%(node)s', {'host': self.host, 'node': nodename}) # Check if there is any resource assigned but not found # in provider tree if startup: self._check_resources(context) def _get_compute_node(self, context, node_uuid): """Returns compute node for the host and nodename.""" try: return objects.ComputeNode.get_by_uuid(context, node_uuid) except exception.NotFound: LOG.warning("No compute node record for %(host)s:%(node)s", {'host': self.host, 'node': node_uuid}) def _report_hypervisor_resource_view(self, resources): """Log the hypervisor's view of free resources. This is just a snapshot of resource usage recorded by the virt driver. The following resources are logged: - free memory - free disk - free CPUs - assignable PCI devices """ nodename = resources['hypervisor_hostname'] free_ram_mb = resources['memory_mb'] - resources['memory_mb_used'] free_disk_gb = resources['local_gb'] - resources['local_gb_used'] vcpus = resources['vcpus'] if vcpus: free_vcpus = vcpus - resources['vcpus_used'] else: free_vcpus = 'unknown' pci_devices = resources.get('pci_passthrough_devices') LOG.debug("Hypervisor/Node resource view: " "name=%(node)s " "free_ram=%(free_ram)sMB " "free_disk=%(free_disk)sGB " "free_vcpus=%(free_vcpus)s " "pci_devices=%(pci_devices)s", {'node': nodename, 'free_ram': free_ram_mb, 'free_disk': free_disk_gb, 'free_vcpus': free_vcpus, 'pci_devices': pci_devices}) def _report_final_resource_view(self, nodename): """Report final calculate of physical memory, used virtual memory, disk, usable vCPUs, used virtual CPUs and PCI devices, including instance calculations and in-progress resource claims. These values will be exposed via the compute node table to the scheduler. """ cn = self.compute_nodes[nodename] vcpus = cn.vcpus if vcpus: tcpu = vcpus ucpu = cn.vcpus_used LOG.debug("Total usable vcpus: %(tcpu)s, " "total allocated vcpus: %(ucpu)s", {'tcpu': vcpus, 'ucpu': ucpu}) else: tcpu = 0 ucpu = 0 pci_stats = (list(cn.pci_device_pools) if cn.pci_device_pools else []) LOG.debug("Final resource view: " "name=%(node)s " "phys_ram=%(phys_ram)sMB " "used_ram=%(used_ram)sMB " "phys_disk=%(phys_disk)sGB " "used_disk=%(used_disk)sGB " "total_vcpus=%(total_vcpus)s " "used_vcpus=%(used_vcpus)s " "pci_stats=%(pci_stats)s " "stats=%(stats)s", {'node': nodename, 'phys_ram': cn.memory_mb, 'used_ram': cn.memory_mb_used, 'phys_disk': cn.local_gb, 'used_disk': cn.local_gb_used, 'total_vcpus': tcpu, 'used_vcpus': ucpu, 'pci_stats': pci_stats, 'stats': cn.stats or {} }) def _resource_change(self, compute_node): """Check to see if any resources have changed.""" nodename = compute_node.hypervisor_hostname old_compute = self.old_resources[nodename] if not obj_base.obj_equal_prims( compute_node, old_compute, ['updated_at']): self.old_resources[nodename] = copy.deepcopy(compute_node) return True return False def _sync_compute_service_disabled_trait(self, context, traits): """Synchronize the COMPUTE_STATUS_DISABLED trait on the node provider. Determines if the COMPUTE_STATUS_DISABLED trait should be added to or removed from the provider's set of traits based on the related nova-compute service disabled status. :param context: RequestContext for cell database access :param traits: set of traits for the compute node resource provider; this is modified by reference """ trait = os_traits.COMPUTE_STATUS_DISABLED try: service = objects.Service.get_by_compute_host(context, self.host) if service.disabled: # The service is disabled so make sure the trait is reported. traits.add(trait) else: # The service is not disabled so do not report the trait. traits.discard(trait) except exception.NotFound: # This should not happen but handle it gracefully. The scheduler # should ignore this node if the compute service record is gone. LOG.error('Unable to find services table record for nova-compute ' 'host %s', self.host) def _should_expose_remote_managed_ports_trait(self, is_supported: bool): """Determine whether COMPUTE_REMOTE_MANAGED_PORTS should be exposed. Determines if the COMPUTE_REMOTE_MANAGED_PORTS trait needs to be exposed based on the respective compute driver capability and the presence of remote managed devices on a given host. Whether such devices are present or not depends on the Whitelist configuration (presence of a remote_managed tag association with some PCI devices) and their physical presence (plugged in, enumerated by the OS). The aim of having this check is to optimize host lookup by prefiltering hosts that have compute driver support but no hardware. The check does not consider free device count - just the presence of device pools since device availability may change between a prefilter check and a later check in PciPassthroughFilter. :param bool is_supported: Is the trait supported by the compute driver """ return (is_supported and self.pci_tracker.pci_stats.has_remote_managed_device_pools()) def _get_traits(self, context, nodename, provider_tree): """Synchronizes internal and external traits for the node provider. This works in conjunction with the ComptueDriver.update_provider_tree flow and is used to synchronize traits reported by the compute driver, traits based on information in the ComputeNode record, and traits set externally using the placement REST API. :param context: RequestContext for cell database access :param nodename: ComputeNode.hypervisor_hostname for the compute node resource provider whose traits are being synchronized; the node must be in the ProviderTree. :param provider_tree: ProviderTree being updated """ # Get the traits from the ProviderTree which will be the set # of virt-owned traits plus any externally defined traits set # on the provider that aren't owned by the virt driver. traits = provider_tree.data(nodename).traits # Now get the driver's capabilities and add any supported # traits that are missing, and remove any existing set traits # that are not currently supported. for trait, supported in self.driver.capabilities_as_traits().items(): add_trait = supported if trait == os_traits.COMPUTE_REMOTE_MANAGED_PORTS: add_trait &= self._should_expose_remote_managed_ports_trait( supported) if add_trait: traits.add(trait) elif trait in traits: traits.remove(trait) # Always mark the compute node. This lets other processes (possibly # unrelated to nova or even OpenStack) find and distinguish these # providers easily. traits.add(os_traits.COMPUTE_NODE) self._sync_compute_service_disabled_trait(context, traits) return list(traits) @retrying.retry( stop_max_attempt_number=4, retry_on_exception=lambda e: isinstance( e, ( exception.ResourceProviderUpdateConflict, exception.PlacementReshapeConflict, ), ), ) def _update_to_placement(self, context, compute_node, startup): """Send resource and inventory changes to placement.""" # NOTE(jianghuaw): Some resources(e.g. VGPU) are not saved in the # object of compute_node; instead the inventory data for these # resource is reported by driver's update_provider_tree(). So even if # there is no resource change for compute_node, we need proceed # to get inventory and use report client interfaces to update # inventory to placement. It's report client's responsibility to # ensure the update request to placement only happens when inventory # is changed. nodename = compute_node.hypervisor_hostname # Persist the stats to the Scheduler # Retrieve the provider tree associated with this compute node. If # it doesn't exist yet, this will create it with a (single, root) # provider corresponding to the compute node. prov_tree = self.reportclient.get_provider_tree_and_ensure_root( context, compute_node.uuid, name=compute_node.hypervisor_hostname) # Let the virt driver rearrange the provider tree and set/update # the inventory, traits, and aggregates throughout. allocs = self.reportclient.get_allocations_for_provider_tree( context, nodename) driver_reshaped = False try: self.driver.update_provider_tree(prov_tree, nodename) except exception.ReshapeNeeded: if not startup: # This isn't supposed to happen during periodic, so raise # it up; the compute manager will treat it specially. raise LOG.info("Performing resource provider inventory and " "allocation data migration during compute service " "startup or fast-forward upgrade.") self.driver.update_provider_tree( prov_tree, nodename, allocations=allocs) driver_reshaped = True # Inject driver capabilities traits into the provider # tree. We need to determine the traits that the virt # driver owns - so those that come from the tree itself # (via the virt driver) plus the compute capabilities # traits, and then merge those with the traits set # externally that the driver does not own - and remove any # set on the provider externally that the virt owns but # aren't in the current list of supported traits. For # example, let's say we reported multiattach support as a # trait at t1 and then at t2 it's not, so we need to # remove it. But at both t1 and t2 there is a # CUSTOM_VENDOR_TRAIT_X which we can't touch because it # was set externally on the provider. # We also want to sync the COMPUTE_STATUS_DISABLED trait based # on the related nova-compute service's disabled status. traits = self._get_traits( context, nodename, provider_tree=prov_tree) prov_tree.update_traits(nodename, traits) instances_under_same_host_resize = [ migration.instance_uuid for migration in self.tracked_migrations.values() if migration.is_same_host_resize ] # NOTE(gibi): Tracking PCI in placement is different from other # resources. # # While driver.update_provider_tree is used to let the virt driver # create any kind of placement model for a resource the PCI data # modelling is done virt driver independently by the PCI tracker. # So the placement reporting needs to be also done here in the resource # tracker independently of the virt driver. # # Additionally, when PCI tracking in placement was introduced there was # already PCI allocations in nova. So both the PCI inventories and # allocations needs to be healed. Moreover, to support rolling upgrade # the placement prefilter for PCI devices was not turned on by default # at the first release of this feature. Therefore, there could be new # PCI allocation without placement being involved until the prefilter # is enabled. So we need to be ready to heal PCI allocations at # every call not just at startup. pci_reshaped = pci_placement_translator.update_provider_tree_for_pci( prov_tree, nodename, self.pci_tracker, allocs, instances_under_same_host_resize, ) self.provider_tree = prov_tree # This merges in changes from the provider config files loaded in init self._merge_provider_configs(self.provider_configs, prov_tree) try: # Flush any changes. If we either processed ReshapeNeeded above or # update_provider_tree_for_pci did reshape, then we need to pass # allocs to update_from_provider_tree to hit placement's POST # /reshaper route. self.reportclient.update_from_provider_tree( context, prov_tree, allocations=allocs if driver_reshaped or pci_reshaped else None ) except exception.InventoryInUse as e: # This means an inventory reconfiguration (e.g.: removing a parent # PF and adding a VF under that parent) was not possible due to # existing allocations. Translate the exception to prevent the # compute service to start raise exception.PlacementPciException(error=str(e)) def _update(self, context, compute_node, startup=False): """Update partial stats locally and populate them to Scheduler.""" self._update_to_placement(context, compute_node, startup) if self.pci_tracker: # sync PCI device pool state stored in the compute node with # the actual state from the PCI tracker as we commit changes in # the DB and in the PCI tracker below dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj() compute_node.pci_device_pools = dev_pools_obj # _resource_change will update self.old_resources if it detects changes # but we want to restore those if compute_node.save() fails. nodename = compute_node.hypervisor_hostname old_compute = self.old_resources[nodename] if self._resource_change(compute_node): # If the compute_node's resource changed, update to DB. Note that # _update_to_placement above does not supersede the need to do this # because there are stats-related fields in the ComputeNode object # which could have changed and still need to be reported to the # scheduler filters/weighers (which could be out of tree as well). try: compute_node.save() except Exception: # Restore the previous state in self.old_resources so that on # the next trip through here _resource_change does not have # stale data to compare. with excutils.save_and_reraise_exception(logger=LOG): self.old_resources[nodename] = old_compute if self.pci_tracker: self.pci_tracker.save(context) def _update_usage(self, usage, nodename, sign=1): # TODO(stephenfin): We don't use the CPU, RAM and disk fields for much # except 'Aggregate(Core|Ram|Disk)Filter', the 'os-hypervisors' API, # and perhaps some out-of-tree filters. Once the in-tree stuff is # removed or updated to use information from placement, we can think # about dropping the fields from the 'ComputeNode' object entirely mem_usage = usage['memory_mb'] disk_usage = usage.get('root_gb', 0) vcpus_usage = usage.get('vcpus', 0) cn = self.compute_nodes[nodename] cn.memory_mb_used += sign * mem_usage cn.local_gb_used += sign * disk_usage cn.local_gb_used += sign * usage.get('ephemeral_gb', 0) cn.local_gb_used += sign * usage.get('swap', 0) / 1024 cn.vcpus_used += sign * vcpus_usage # free ram and disk may be negative, depending on policy: cn.free_ram_mb = cn.memory_mb - cn.memory_mb_used cn.free_disk_gb = cn.local_gb - cn.local_gb_used stats = self.stats[nodename] cn.running_vms = stats.num_instances # calculate the NUMA usage, assuming the instance is actually using # NUMA, of course if cn.numa_topology and usage.get('numa_topology'): instance_numa_topology = usage.get('numa_topology') # the ComputeNode.numa_topology field is a StringField, so # deserialize host_numa_topology = objects.NUMATopology.obj_from_db_obj( cn.numa_topology) free = sign == -1 # ...and reserialize once we save it back cn.numa_topology = hardware.numa_usage_from_instance_numa( host_numa_topology, instance_numa_topology, free)._to_json() def _get_migration_context_resource(self, resource, instance, prefix='new_'): migration_context = instance.migration_context resource = prefix + resource if migration_context and resource in migration_context: return getattr(migration_context, resource) return None def _update_usage_from_migration(self, context, instance, migration, nodename): """Update usage for a single migration. The record may represent an incoming or outbound migration. """ uuid = migration.instance_uuid LOG.info("Updating resource usage from migration %s", migration.uuid, instance_uuid=uuid) incoming = (migration.dest_compute == self.host and migration.dest_node == nodename) outbound = (migration.source_compute == self.host and migration.source_node == nodename) same_node = (incoming and outbound) tracked = uuid in self.tracked_instances itype = None numa_topology = None sign = 0 if same_node: # Same node resize. Record usage for the 'new_' resources. This # is executed on resize_claim(). if instance['instance_type_id'] == migration.old_instance_type_id: itype = self._get_flavor(instance, 'new_', migration) numa_topology = self._get_migration_context_resource( 'numa_topology', instance) # Allocate pci device(s) for the instance. sign = 1 else: # The instance is already set to the new flavor (this is done # by the compute manager on finish_resize()), hold space for a # possible revert to the 'old_' resources. # NOTE(lbeliveau): When the periodic audit timer gets # triggered, the compute usage gets reset. The usage for an # instance that is migrated to the new flavor but not yet # confirmed/reverted will first get accounted for by # _update_usage_from_instances(). This method will then be # called, and we need to account for the '_old' resources # (just in case). itype = self._get_flavor(instance, 'old_', migration) numa_topology = self._get_migration_context_resource( 'numa_topology', instance, prefix='old_') elif incoming and not tracked: # instance has not yet migrated here: itype = self._get_flavor(instance, 'new_', migration) numa_topology = self._get_migration_context_resource( 'numa_topology', instance) # Allocate pci device(s) for the instance. sign = 1 LOG.debug('Starting to track incoming migration %s with flavor %s', migration.uuid, itype.flavorid, instance=instance) elif outbound and not tracked: # instance migrated, but record usage for a possible revert: itype = self._get_flavor(instance, 'old_', migration) numa_topology = self._get_migration_context_resource( 'numa_topology', instance, prefix='old_') # We could be racing with confirm_resize setting the # instance.old_flavor field to None before the migration status # is "confirmed" so if we did not find the flavor in the outgoing # resized instance we won't track it. if itype: LOG.debug('Starting to track outgoing migration %s with ' 'flavor %s', migration.uuid, itype.flavorid, instance=instance) if itype: cn = self.compute_nodes[nodename] usage = self._get_usage_dict( itype, instance, numa_topology=numa_topology) if self.pci_tracker and sign: self.pci_tracker.update_pci_for_instance( context, instance, sign=sign) self._update_usage(usage, nodename) if self.pci_tracker: obj = self.pci_tracker.stats.to_device_pools_obj() cn.pci_device_pools = obj else: obj = objects.PciDevicePoolList() cn.pci_device_pools = obj self.tracked_migrations[uuid] = migration def _update_usage_from_migrations(self, context, migrations, nodename): filtered = {} instances = {} self.tracked_migrations.clear() # do some defensive filtering against bad migrations records in the # database: for migration in migrations: uuid = migration.instance_uuid try: if uuid not in instances: # Track migrating instance even if it is deleted but still # has database record. This kind of instance might be # deleted during unfinished migrating but exist in the # hypervisor. migration._context = context.elevated(read_deleted='yes') instances[uuid] = migration.instance except exception.InstanceNotFound as e: # migration referencing deleted instance LOG.debug('Migration instance not found: %s', e) continue # Skip migration if instance is neither in a resize state nor is # live-migrating. if (not _instance_in_resize_state(instances[uuid]) and not _instance_is_live_migrating(instances[uuid])): LOG.debug('Skipping migration as instance is neither ' 'resizing nor live-migrating.', instance_uuid=uuid) continue # filter to most recently updated migration for each instance: other_migration = filtered.get(uuid, None) # NOTE(claudiub): In Python 3, you cannot compare NoneTypes. if other_migration: om = other_migration other_time = om.updated_at or om.created_at migration_time = migration.updated_at or migration.created_at if migration_time > other_time: filtered[uuid] = migration else: filtered[uuid] = migration for migration in filtered.values(): instance = instances[migration.instance_uuid] # Skip migration (and mark it as error) if it doesn't match the # instance migration id. # This can happen if we have a stale migration record. # We want to proceed if instance.migration_context is None if (instance.migration_context is not None and instance.migration_context.migration_id != migration.id): LOG.info("Current instance migration %(im)s doesn't match " "migration %(m)s, marking migration as error. " "This can occur if a previous migration for this " "instance did not complete.", {'im': instance.migration_context.migration_id, 'm': migration.id}) migration.status = "error" migration.save() continue try: self._update_usage_from_migration(context, instance, migration, nodename) except exception.FlavorNotFound: LOG.warning("Flavor could not be found, skipping migration.", instance_uuid=instance.uuid) continue def _update_usage_from_instance(self, context, instance, nodename, is_removed=False): """Update usage for a single instance.""" uuid = instance['uuid'] is_new_instance = uuid not in self.tracked_instances # NOTE(sfinucan): Both brand new instances as well as instances that # are being unshelved will have is_new_instance == True is_removed_instance = not is_new_instance and (is_removed or vm_states.allow_resource_removal( vm_state=instance['vm_state'], task_state=instance.task_state)) if is_new_instance: self.tracked_instances.add(uuid) sign = 1 if is_removed_instance: self.tracked_instances.remove(uuid) self._release_assigned_resources(instance.resources) sign = -1 cn = self.compute_nodes[nodename] stats = self.stats[nodename] stats.update_stats_for_instance(instance, is_removed_instance) cn.stats = stats # if it's a new or deleted instance: if is_new_instance or is_removed_instance: if self.pci_tracker: self.pci_tracker.update_pci_for_instance(context, instance, sign=sign) # new instance, update compute node resource usage: self._update_usage(self._get_usage_dict(instance, instance), nodename, sign=sign) # Stop tracking removed instances in the is_bfv cache. This needs to # happen *after* calling _get_usage_dict() since that relies on the # is_bfv cache. if is_removed_instance and uuid in self.is_bfv: del self.is_bfv[uuid] cn.current_workload = stats.calculate_workload() if self.pci_tracker: obj = self.pci_tracker.stats.to_device_pools_obj() cn.pci_device_pools = obj else: cn.pci_device_pools = objects.PciDevicePoolList() def _update_usage_from_instances(self, context, instances, nodename): """Calculate resource usage based on instance utilization. This is different than the hypervisor's view as it will account for all instances assigned to the local compute host, even if they are not currently powered on. """ self.tracked_instances.clear() cn = self.compute_nodes[nodename] # set some initial values, reserve room for host/hypervisor: cn.local_gb_used = CONF.reserved_host_disk_mb / 1024 cn.memory_mb_used = CONF.reserved_host_memory_mb cn.vcpus_used = CONF.reserved_host_cpus cn.free_ram_mb = (cn.memory_mb - cn.memory_mb_used) cn.free_disk_gb = (cn.local_gb - cn.local_gb_used) cn.current_workload = 0 cn.running_vms = 0 instance_by_uuid = {} for instance in instances: if not vm_states.allow_resource_removal( vm_state=instance['vm_state'], task_state=instance.task_state): self._update_usage_from_instance(context, instance, nodename) instance_by_uuid[instance.uuid] = instance return instance_by_uuid def _remove_deleted_instances_allocations(self, context, cn, migrations, instance_by_uuid): migration_uuids = [migration.uuid for migration in migrations if 'uuid' in migration] # NOTE(jaypipes): All of this code sucks. It's basically dealing with # all the corner cases in move, local delete, unshelve and rebuild # operations for when allocations should be deleted when things didn't # happen according to the normal flow of events where the scheduler # always creates allocations for an instance try: # pai: report.ProviderAllocInfo namedtuple pai = self.reportclient.get_allocations_for_resource_provider( context, cn.uuid) except (exception.ResourceProviderAllocationRetrievalFailed, ks_exc.ClientException) as e: LOG.error("Skipping removal of allocations for deleted instances: " "%s", e) return allocations = pai.allocations if not allocations: # The main loop below would short-circuit anyway, but this saves us # the (potentially expensive) context.elevated construction below. return read_deleted_context = context.elevated(read_deleted='yes') for consumer_uuid, alloc in allocations.items(): if consumer_uuid in self.tracked_instances: LOG.debug("Instance %s actively managed on this compute host " "and has allocations in placement: %s.", consumer_uuid, alloc) continue if consumer_uuid in migration_uuids: LOG.debug("Migration %s is active on this compute host " "and has allocations in placement: %s.", consumer_uuid, alloc) continue # We know these are instances now, so proceed instance_uuid = consumer_uuid instance = instance_by_uuid.get(instance_uuid) if not instance: try: instance = objects.Instance.get_by_uuid( read_deleted_context, consumer_uuid, expected_attrs=[]) except exception.InstanceNotFound: # The instance isn't even in the database. Either the # scheduler _just_ created an allocation for it and we're # racing with the creation in the cell database, or the # instance was deleted and fully archived before we got a # chance to run this. The former is far more likely than # the latter. Avoid deleting allocations for a building # instance here. LOG.info("Instance %(uuid)s has allocations against this " "compute host but is not found in the database.", {'uuid': instance_uuid}, exc_info=False) continue # NOTE(mriedem): A cross-cell migration will work with instance # records across two cells once the migration is confirmed/reverted # one of them will be deleted but the instance still exists in the # other cell. Before the instance is destroyed from the old cell # though it is marked hidden=True so if we find a deleted hidden # instance with allocations against this compute node we just # ignore it since the migration operation will handle cleaning up # those allocations. if instance.deleted and not instance.hidden: # The instance is gone, so we definitely want to remove # allocations associated with it. LOG.debug("Instance %s has been deleted (perhaps locally). " "Deleting allocations that remained for this " "instance against this compute host: %s.", instance_uuid, alloc) # We don't force delete the allocation in this case because if # there is a conflict we'll retry on the next # update_available_resource periodic run. self.reportclient.delete_allocation_for_instance(context, instance_uuid, force=False) continue if not instance.host: # Allocations related to instances being scheduled should not # be deleted if we already wrote the allocation previously. LOG.debug("Instance %s has been scheduled to this compute " "host, the scheduler has made an allocation " "against this compute node but the instance has " "yet to start. Skipping heal of allocation: %s.", instance_uuid, alloc) continue if (instance.host == cn.host and instance.node == cn.hypervisor_hostname): # The instance is supposed to be on this compute host but is # not in the list of actively managed instances. This could be # because we are racing with an instance_claim call during # initial build or unshelve where the instance host/node is set # before the instance is added to tracked_instances. If the # task_state is set, then consider things in motion and log at # debug level instead of warning. if instance.task_state: LOG.debug('Instance with task_state "%s" is not being ' 'actively managed by this compute host but has ' 'allocations referencing this compute node ' '(%s): %s. Skipping heal of allocations during ' 'the task state transition.', instance.task_state, cn.uuid, alloc, instance=instance) else: LOG.warning("Instance %s is not being actively managed by " "this compute host but has allocations " "referencing this compute host: %s. Skipping " "heal of allocation because we do not know " "what to do.", instance_uuid, alloc) continue if instance.host != cn.host: # The instance has been moved to another host either via a # migration, evacuation or unshelve in between the time when we # ran InstanceList.get_by_host_and_node(), added those # instances to RT.tracked_instances and the above # Instance.get_by_uuid() call. We SHOULD attempt to remove any # allocations that reference this compute host if the VM is in # a stable terminal state (i.e. it isn't in a state of waiting # for resize to confirm/revert), however if the destination # host is an Ocata compute host, it will delete the allocation # that contains this source compute host information anyway and # recreate an allocation that only refers to itself. So we # don't need to do anything in that case. Just log the # situation here for information but don't attempt to delete or # change the allocation. LOG.warning("Instance %s has been moved to another host " "%s(%s). There are allocations remaining against " "the source host that might need to be removed: " "%s.", instance_uuid, instance.host, instance.node, alloc) def delete_allocation_for_evacuated_instance(self, context, instance, node, node_type='source'): # Clean up the instance allocation from this node in placement cn_uuid = self.compute_nodes[node].uuid if not self.reportclient.remove_provider_tree_from_instance_allocation( context, instance.uuid, cn_uuid): LOG.error("Failed to clean allocation of evacuated " "instance on the %s node %s", node_type, cn_uuid, instance=instance) def delete_allocation_for_shelve_offloaded_instance(self, context, instance): self.reportclient.delete_allocation_for_instance( context, instance.uuid, force=True) def _verify_resources(self, resources): resource_keys = ["vcpus", "memory_mb", "local_gb", "cpu_info", "vcpus_used", "memory_mb_used", "local_gb_used", "numa_topology"] missing_keys = [k for k in resource_keys if k not in resources] if missing_keys: reason = _("Missing keys: %s") % missing_keys raise exception.InvalidInput(reason=reason) def _get_flavor(self, instance, prefix, migration): """Get the flavor from instance.""" if migration.is_resize: return getattr(instance, '%sflavor' % prefix) # NOTE(ndipanov): Certain migration types (all but resize) # do not change flavors so there is no need to stash # them. In that case - just get the instance flavor. return instance.flavor def _get_usage_dict(self, object_or_dict, instance, **updates): """Make a usage dict _update methods expect. Accepts a dict or an Instance or Flavor object, and a set of updates. Converts the object to a dict and applies the updates. :param object_or_dict: instance or flavor as an object or just a dict :param instance: nova.objects.Instance for the related operation; this is needed to determine if the instance is volume-backed :param updates: key-value pairs to update the passed object. Currently only considers 'numa_topology', all other keys are ignored. :returns: a dict with all the information from object_or_dict updated with updates """ def _is_bfv(): # Check to see if we have the is_bfv value cached. if instance.uuid in self.is_bfv: is_bfv = self.is_bfv[instance.uuid] else: is_bfv = compute_utils.is_volume_backed_instance( instance._context, instance) self.is_bfv[instance.uuid] = is_bfv return is_bfv usage = {} if isinstance(object_or_dict, objects.Instance): is_bfv = _is_bfv() usage = {'memory_mb': object_or_dict.flavor.memory_mb, 'swap': object_or_dict.flavor.swap, 'vcpus': object_or_dict.flavor.vcpus, 'root_gb': (0 if is_bfv else object_or_dict.flavor.root_gb), 'ephemeral_gb': object_or_dict.flavor.ephemeral_gb, 'numa_topology': object_or_dict.numa_topology} elif isinstance(object_or_dict, objects.Flavor): usage = obj_base.obj_to_primitive(object_or_dict) if _is_bfv(): usage['root_gb'] = 0 else: usage.update(object_or_dict) for key in ('numa_topology',): if key in updates: usage[key] = updates[key] return usage def _merge_provider_configs(self, provider_configs, provider_tree): """Takes a provider tree and merges any provider configs. Any providers in the update that are not present in the tree are logged and ignored. Providers identified by both $COMPUTE_NODE and explicit UUID/NAME will only be updated with the additional inventories and traits in the explicit provider config entry. :param provider_configs: The provider configs to merge :param provider_tree: The provider tree to be updated in place """ processed_providers = {} provider_custom_traits = {} for uuid_or_name, provider_data in provider_configs.items(): additional_traits = provider_data.get( "traits", {}).get("additional", []) additional_inventories = provider_data.get( "inventories", {}).get("additional", []) # This is just used to make log entries more useful source_file_name = provider_data['__source_file'] # In most cases this will contain a single provider except in # the case of UUID=$COMPUTE_NODE, it may contain multiple. providers_to_update = self._get_providers_to_update( uuid_or_name, provider_tree, source_file_name) for provider in providers_to_update: # $COMPUTE_NODE is used to define a "default" rule to apply # to all your compute nodes, but then override it for # specific ones. # # If this is for UUID=$COMPUTE_NODE, check if provider is also # explicitly identified. If it is, skip updating it with the # $COMPUTE_NODE entry data. if uuid_or_name == "$COMPUTE_NODE": if any(_pid in provider_configs for _pid in [provider.name, provider.uuid]): continue # for each provider specified by name or uuid check that # we have not already processed it to prevent duplicate # declarations of the same provider. current_uuid = provider.uuid if current_uuid in processed_providers: raise ValueError(_( "Provider config '%(source_file_name)s' conflicts " "with provider config '%(processed_providers)s'. " "The same provider is specified using both name " "'%(uuid_or_name)s' and uuid '%(current_uuid)s'.") % { 'source_file_name': source_file_name, 'processed_providers': processed_providers[current_uuid], 'uuid_or_name': uuid_or_name, 'current_uuid': current_uuid } ) # NOTE(sean-k-mooney): since each provider should be processed # at most once if a provider has custom traits they were # set either in previous iteration, the virt driver or via the # the placement api. As a result we must ignore them when # checking for duplicate traits so we construct a set of the # existing custom traits. if current_uuid not in provider_custom_traits: provider_custom_traits[current_uuid] = { trait for trait in provider.traits if trait.startswith('CUSTOM') } existing_custom_traits = provider_custom_traits[current_uuid] if additional_traits: intersect = set(provider.traits) & set(additional_traits) intersect -= existing_custom_traits if intersect: invalid = ','.join(intersect) raise ValueError(_( "Provider config '%(source_file_name)s' attempts " "to define a trait that is owned by the " "virt driver or specified via the placement api. " "Invalid traits '%(invalid)s' must be removed " "from '%(source_file_name)s'.") % { 'source_file_name': source_file_name, 'invalid': invalid } ) provider_tree.add_traits(provider.uuid, *additional_traits) if additional_inventories: merged_inventory = provider.inventory intersect = (merged_inventory.keys() & {rc for inv_dict in additional_inventories for rc in inv_dict}) if intersect: raise ValueError(_( "Provider config '%(source_file_name)s' attempts " "to define an inventory that is owned by the " "virt driver. Invalid inventories '%(invalid)s' " "must be removed from '%(source_file_name)s'.") % { 'source_file_name': source_file_name, 'invalid': ','.join(intersect) } ) for inventory in additional_inventories: merged_inventory.update(inventory) provider_tree.update_inventory( provider.uuid, merged_inventory) processed_providers[current_uuid] = source_file_name def _get_providers_to_update(self, uuid_or_name, provider_tree, source_file): """Identifies the providers to be updated. Intended only to be consumed by _merge_provider_configs() :param provider: Provider config data :param provider_tree: Provider tree to get providers from :param source_file: Provider config file containing the inventories :returns: list of ProviderData """ # $COMPUTE_NODE is used to define a "default" rule to apply # to all your compute nodes, but then override it for # specific ones. if uuid_or_name == "$COMPUTE_NODE": return [root.data() for root in provider_tree.roots if os_traits.COMPUTE_NODE in root.traits] try: providers_to_update = [provider_tree.data(uuid_or_name)] # Remove the provider from absent provider list if present # so we can re-warn if the provider disappears again later self.absent_providers.discard(uuid_or_name) except ValueError: providers_to_update = [] if uuid_or_name not in self.absent_providers: LOG.warning( "Provider '%(uuid_or_name)s' specified in provider " "config file '%(source_file)s' does not exist in the " "ProviderTree and will be ignored.", {"uuid_or_name": uuid_or_name, "source_file": source_file}) self.absent_providers.add(uuid_or_name) return providers_to_update def build_failed(self, nodename): """Increments the failed_builds stats for the given node.""" self.stats[nodename].build_failed() def build_succeeded(self, nodename): """Resets the failed_builds stats for the given node.""" self.stats[nodename].build_succeeded() @utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True) def claim_pci_devices(self, context, pci_requests, instance_numa_topology): """Claim instance PCI resources :param context: security context :param pci_requests: a list of nova.objects.InstancePCIRequests :param instance_numa_topology: an InstanceNumaTopology object used to ensure PCI devices are aligned with the NUMA topology of the instance :returns: a list of nova.objects.PciDevice objects """ result = self.pci_tracker.claim_instance( context, pci_requests, instance_numa_topology) self.pci_tracker.save(context) return result @utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True) def unclaim_pci_devices(self, context, pci_device, instance): """Deallocate PCI devices :param context: security context :param pci_device: the objects.PciDevice describing the PCI device to be freed :param instance: the objects.Instance the PCI resources are freed from """ self.pci_tracker.free_device(pci_device, instance) self.pci_tracker.save(context) @utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True) def allocate_pci_devices_for_instance(self, context, instance): """Allocate instance claimed PCI resources :param context: security context :param instance: instance object """ self.pci_tracker.allocate_instance(instance) self.pci_tracker.save(context) @utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True) def free_pci_device_allocations_for_instance(self, context, instance): """Free instance allocated PCI resources :param context: security context :param instance: instance object """ self.pci_tracker.free_instance_allocations(context, instance) self.pci_tracker.save(context) @utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True) def free_pci_device_claims_for_instance(self, context, instance): """Free instance claimed PCI resources :param context: security context :param instance: instance object """ self.pci_tracker.free_instance_claims(context, instance) self.pci_tracker.save(context) @utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True) def finish_evacuation(self, instance, node, migration): instance.apply_migration_context() # NOTE (ndipanov): This save will now update the host and node # attributes making sure that next RT pass is consistent since # it will be based on the instance and not the migration DB # entry. instance.host = self.host instance.node = node # NOTE(danms): We can be called with node=None, which means compute_id # should also be None instance.compute_id = node and self.compute_nodes[node].id or None instance.save() instance.drop_migration_context() # NOTE (ndipanov): Mark the migration as done only after we # mark the instance as belonging to this host. if migration: migration.status = 'done' migration.save() @utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True) def clean_compute_node_cache(self, compute_nodes_in_db): """Clean the compute node cache of any nodes that no longer exist. :param compute_nodes_in_db: list of ComputeNode objects from the DB. """ compute_nodes_in_db_nodenames = {cn.hypervisor_hostname for cn in compute_nodes_in_db} stale_cns = set(self.compute_nodes) - compute_nodes_in_db_nodenames for stale_cn in stale_cns: # NOTE(mgoddard): we have found a node in the cache that has no # compute node in the DB. This could be due to a node rebalance # where another compute service took ownership of the node. Clean # up the cache. self.remove_node(stale_cn) self.reportclient.invalidate_resource_provider(stale_cn) def get_node_by_name(self, nodename): """Get a node from our list by name. :raises: ComputeHostNotFound if missing """ try: return self.compute_nodes[nodename] except KeyError: raise exception.ComputeHostNotFound(host=nodename) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/compute/rpcapi.py0000664000175000017500000022115700000000000017041 0ustar00zuulzuul00000000000000# Copyright 2013 Red Hat, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Client side of the compute RPC API. """ from oslo_concurrency import lockutils from oslo_log import log as logging import oslo_messaging as messaging from oslo_utils import excutils import nova.conf from nova import context from nova import exception from nova.i18n import _ from nova import objects from nova.objects import base as objects_base from nova.objects import service as service_obj from nova import profiler from nova import rpc CONF = nova.conf.CONF RPC_TOPIC = "compute" LOG = logging.getLogger(__name__) LAST_VERSION = None NO_COMPUTES_WARNING = False # Global for ComputeAPI.router. _ROUTER = None def reset_globals(): global NO_COMPUTES_WARNING global LAST_VERSION global _ROUTER NO_COMPUTES_WARNING = False LAST_VERSION = None _ROUTER = None def _compute_host(host, instance): '''Get the destination host for a message. :param host: explicit host to send the message to. :param instance: If an explicit host was not specified, use instance['host'] :returns: A host ''' if host: return host if not instance: raise exception.NovaException(_('No compute host specified')) if not instance.host: raise exception.NovaException(_('Unable to find host for ' 'Instance %s') % instance.uuid) return instance.host @profiler.trace_cls("rpc") class ComputeAPI(object): '''Client side of the compute rpc API. API version history: * 1.0 - Initial version. * 1.1 - Adds get_host_uptime() * 1.2 - Adds check_can_live_migrate_[destination|source] * 1.3 - Adds change_instance_metadata() * 1.4 - Remove instance_uuid, add instance argument to reboot_instance() * 1.5 - Remove instance_uuid, add instance argument to pause_instance(), unpause_instance() * 1.6 - Remove instance_uuid, add instance argument to suspend_instance() * 1.7 - Remove instance_uuid, add instance argument to get_console_output() * 1.8 - Remove instance_uuid, add instance argument to add_fixed_ip_to_instance() * 1.9 - Remove instance_uuid, add instance argument to attach_volume() * 1.10 - Remove instance_id, add instance argument to check_can_live_migrate_destination() * 1.11 - Remove instance_id, add instance argument to check_can_live_migrate_source() * 1.12 - Remove instance_uuid, add instance argument to confirm_resize() * 1.13 - Remove instance_uuid, add instance argument to detach_volume() * 1.14 - Remove instance_uuid, add instance argument to finish_resize() * 1.15 - Remove instance_uuid, add instance argument to finish_revert_resize() * 1.16 - Remove instance_uuid, add instance argument to get_diagnostics() * 1.17 - Remove instance_uuid, add instance argument to get_vnc_console() * 1.18 - Remove instance_uuid, add instance argument to inject_file() * 1.19 - Remove instance_uuid, add instance argument to inject_network_info() * 1.20 - Remove instance_id, add instance argument to post_live_migration_at_destination() * 1.21 - Remove instance_uuid, add instance argument to power_off_instance() and stop_instance() * 1.22 - Remove instance_uuid, add instance argument to power_on_instance() and start_instance() * 1.23 - Remove instance_id, add instance argument to pre_live_migration() * 1.24 - Remove instance_uuid, add instance argument to rebuild_instance() * 1.25 - Remove instance_uuid, add instance argument to remove_fixed_ip_from_instance() * 1.26 - Remove instance_id, add instance argument to remove_volume_connection() * 1.27 - Remove instance_uuid, add instance argument to rescue_instance() * 1.28 - Remove instance_uuid, add instance argument to reset_network() * 1.29 - Remove instance_uuid, add instance argument to resize_instance() * 1.30 - Remove instance_uuid, add instance argument to resume_instance() * 1.31 - Remove instance_uuid, add instance argument to revert_resize() * 1.32 - Remove instance_id, add instance argument to rollback_live_migration_at_destination() * 1.33 - Remove instance_uuid, add instance argument to set_admin_password() * 1.34 - Remove instance_uuid, add instance argument to snapshot_instance() * 1.35 - Remove instance_uuid, add instance argument to unrescue_instance() * 1.36 - Remove instance_uuid, add instance argument to change_instance_metadata() * 1.37 - Remove instance_uuid, add instance argument to terminate_instance() * 1.38 - Changes to prep_resize(): * remove instance_uuid, add instance * remove instance_type_id, add instance_type * remove topic, it was unused * 1.39 - Remove instance_uuid, add instance argument to run_instance() * 1.40 - Remove instance_id, add instance argument to live_migration() * 1.41 - Adds refresh_instance_security_rules() * 1.42 - Add reservations arg to prep_resize(), resize_instance(), finish_resize(), confirm_resize(), revert_resize() and finish_revert_resize() * 1.43 - Add migrate_data to live_migration() * 1.44 - Adds reserve_block_device_name() * 2.0 - Remove 1.x backwards compat * 2.1 - Adds orig_sys_metadata to rebuild_instance() * 2.2 - Adds slave_info parameter to add_aggregate_host() and remove_aggregate_host() * 2.3 - Adds volume_id to reserve_block_device_name() * 2.4 - Add bdms to terminate_instance * 2.5 - Add block device and network info to reboot_instance * 2.6 - Remove migration_id, add migration to resize_instance * 2.7 - Remove migration_id, add migration to confirm_resize * 2.8 - Remove migration_id, add migration to finish_resize * 2.9 - Add publish_service_capabilities() * 2.10 - Adds filter_properties and request_spec to prep_resize() * 2.11 - Adds soft_delete_instance() and restore_instance() * 2.12 - Remove migration_id, add migration to revert_resize * 2.13 - Remove migration_id, add migration to finish_revert_resize * 2.14 - Remove aggregate_id, add aggregate to add_aggregate_host * 2.15 - Remove aggregate_id, add aggregate to remove_aggregate_host * 2.16 - Add instance_type to resize_instance * 2.17 - Add get_backdoor_port() * 2.18 - Add bdms to rebuild_instance * 2.19 - Add node to run_instance * 2.20 - Add node to prep_resize * 2.21 - Add migrate_data dict param to pre_live_migration() * 2.22 - Add recreate, on_shared_storage and host arguments to rebuild_instance() * 2.23 - Remove network_info from reboot_instance * 2.24 - Added get_spice_console method * 2.25 - Add attach_interface() and detach_interface() * 2.26 - Add validate_console_port to ensure the service connects to vnc on the correct port * 2.27 - Adds 'reservations' to terminate_instance() and soft_delete_instance() ... Grizzly supports message version 2.27. So, any changes to existing methods in 2.x after that point should be done such that they can handle the version_cap being set to 2.27. * 2.28 - Adds check_instance_shared_storage() * 2.29 - Made start_instance() and stop_instance() take new-world instance objects * 2.30 - Adds live_snapshot_instance() * 2.31 - Adds shelve_instance(), shelve_offload_instance, and unshelve_instance() * 2.32 - Make reboot_instance take a new world instance object * 2.33 - Made suspend_instance() and resume_instance() take new-world instance objects * 2.34 - Added swap_volume() * 2.35 - Made terminate_instance() and soft_delete_instance() take new-world instance objects * 2.36 - Made pause_instance() and unpause_instance() take new-world instance objects * 2.37 - Added the legacy_bdm_in_spec parameter to run_instance * 2.38 - Made check_can_live_migrate_[destination|source] take new-world instance objects * 2.39 - Made revert_resize() and confirm_resize() take new-world instance objects * 2.40 - Made reset_network() take new-world instance object * 2.41 - Make inject_network_info take new-world instance object * 2.42 - Splits snapshot_instance() into snapshot_instance() and backup_instance() and makes them take new-world instance objects. * 2.43 - Made prep_resize() take new-world instance object * 2.44 - Add volume_snapshot_create(), volume_snapshot_delete() * 2.45 - Made resize_instance() take new-world objects * 2.46 - Made finish_resize() take new-world objects * 2.47 - Made finish_revert_resize() take new-world objects ... Havana supports message version 2.47. So, any changes to existing methods in 2.x after that point should be done such that they can handle the version_cap being set to 2.47. * 2.48 - Make add_aggregate_host() and remove_aggregate_host() take new-world objects * ... - Remove live_snapshot() that was never actually used * 3.0 - Remove 2.x compatibility * 3.1 - Update get_spice_console() to take an instance object * 3.2 - Update get_vnc_console() to take an instance object * 3.3 - Update validate_console_port() to take an instance object * 3.4 - Update rebuild_instance() to take an instance object * 3.5 - Pass preserve_ephemeral flag to rebuild_instance() * 3.6 - Make volume_snapshot_{create,delete} use new-world objects * 3.7 - Update change_instance_metadata() to take an instance object * 3.8 - Update set_admin_password() to take an instance object * 3.9 - Update rescue_instance() to take an instance object * 3.10 - Added get_rdp_console method * 3.11 - Update unrescue_instance() to take an object * 3.12 - Update add_fixed_ip_to_instance() to take an object * 3.13 - Update remove_fixed_ip_from_instance() to take an object * 3.14 - Update post_live_migration_at_destination() to take an object * 3.15 - Adds filter_properties and node to unshelve_instance() * 3.16 - Make reserve_block_device_name and attach_volume use new-world objects, and add disk_bus and device_type params to reserve_block_device_name, and bdm param to attach_volume * 3.17 - Update attach_interface and detach_interface to take an object * 3.18 - Update get_diagnostics() to take an instance object * Removed inject_file(), as it was unused. * 3.19 - Update pre_live_migration to take instance object * 3.20 - Make restore_instance take an instance object * 3.21 - Made rebuild take new-world BDM objects * 3.22 - Made terminate_instance take new-world BDM objects * 3.23 - Added external_instance_event() * build_and_run_instance was added in Havana and not used or documented. ... Icehouse supports message version 3.23. So, any changes to existing methods in 3.x after that point should be done such that they can handle the version_cap being set to 3.23. * 3.24 - Update rescue_instance() to take optional rescue_image_ref * 3.25 - Make detach_volume take an object * 3.26 - Make live_migration() and rollback_live_migration_at_destination() take an object * ... Removed run_instance() * 3.27 - Make run_instance() accept a new-world object * 3.28 - Update get_console_output() to accept a new-world object * 3.29 - Make check_instance_shared_storage accept a new-world object * 3.30 - Make remove_volume_connection() accept a new-world object * 3.31 - Add get_instance_diagnostics * 3.32 - Add destroy_disks and migrate_data optional parameters to rollback_live_migration_at_destination() * 3.33 - Make build_and_run_instance() take a NetworkRequestList object * 3.34 - Add get_serial_console method * 3.35 - Make reserve_block_device_name return a BDM object ... Juno supports message version 3.35. So, any changes to existing methods in 3.x after that point should be done such that they can handle the version_cap being set to 3.35. * 3.36 - Make build_and_run_instance() send a Flavor object * 3.37 - Add clean_shutdown to stop, resize, rescue, shelve, and shelve_offload * 3.38 - Add clean_shutdown to prep_resize * 3.39 - Add quiesce_instance and unquiesce_instance methods * 3.40 - Make build_and_run_instance() take a new-world topology limits object ... Kilo supports messaging version 3.40. So, any changes to existing methods in 3.x after that point should be done so that they can handle the version_cap being set to 3.40 ... Version 4.0 is equivalent to 3.40. Kilo sends version 4.0 by default, can accept 3.x calls from Juno nodes, and can be pinned to 3.x for Juno compatibility. All new changes should go against 4.x. * 4.0 - Remove 3.x compatibility * 4.1 - Make prep_resize() and resize_instance() send Flavor object * 4.2 - Add migration argument to live_migration() * 4.3 - Added get_mks_console method * 4.4 - Make refresh_instance_security_rules send an instance object * 4.5 - Add migration, scheduler_node and limits arguments to rebuild_instance() ... Liberty supports messaging version 4.5. So, any changes to existing methods in 4.x after that point should be done so that they can handle the version_cap being set to 4.5 * ... - Remove refresh_security_group_members() * ... - Remove refresh_security_group_rules() * 4.6 - Add trigger_crash_dump() * 4.7 - Add attachment_id argument to detach_volume() * 4.8 - Send migrate_data in object format for live_migration, rollback_live_migration_at_destination, and pre_live_migration. * ... - Remove refresh_provider_fw_rules() * 4.9 - Add live_migration_force_complete() * 4.10 - Add live_migration_abort() * 4.11 - Allow block_migration and disk_over_commit be None ... Mitaka supports messaging version 4.11. So, any changes to existing methods in 4.x after that point should be done so that they can handle the version_cap being set to 4.11 * 4.12 - Remove migration_id from live_migration_force_complete * 4.13 - Make get_instance_diagnostics send an instance object ... Newton and Ocata support messaging version 4.13. So, any changes to existing methods in 4.x after that point should be done so that they can handle the version_cap being set to 4.13 * 4.14 - Make get_instance_diagnostics return a diagnostics object instead of dictionary. Strictly speaking we don't need to bump the version because this method was unused before. The version was bumped to signal the availability of the corrected RPC API * 4.15 - Add tag argument to reserve_block_device_name() * 4.16 - Add tag argument to attach_interface() * 4.17 - Add new_attachment_id to swap_volume. ... Pike supports messaging version 4.17. So any changes to existing methods in 4.x after that point should be done so that they can handle the version_cap being set to 4.17. * 4.18 - Add migration to prep_resize() * 4.19 - build_and_run_instance() now gets a 'host_list' parameter representing potential alternate hosts for retries within a cell. * 4.20 - Add multiattach argument to reserve_block_device_name(). * 4.21 - prep_resize() now gets a 'host_list' parameter representing potential alternate hosts for retries within a cell. * 4.22 - Add request_spec to rebuild_instance() ... Version 5.0 is functionally equivalent to 4.22, aside from removing deprecated parameters. Queens sends 5.0 by default, can accept 4.x calls from Pike nodes, and can be pinned to 4.x for Pike compatibility. All new changes should go against 5.x. * 5.0 - Remove 4.x compatibility * 5.1 - Make prep_resize() take a RequestSpec object rather than a legacy dict. * 5.2 - Add request_spec parameter for the following: resize_instance, finish_resize, revert_resize, finish_revert_resize, unshelve_instance * 5.3 - Add migration and limits parameters to check_can_live_migrate_destination(), and a new drop_move_claim_at_destination() method * 5.4 - Add cache_images() support * 5.5 - Add prep_snapshot_based_resize_at_dest() * 5.6 - Add prep_snapshot_based_resize_at_source() * 5.7 - Add finish_snapshot_based_resize_at_dest() * 5.8 - Add confirm_snapshot_based_resize_at_source() * 5.9 - Add revert_snapshot_based_resize_at_dest() * 5.10 - Add finish_revert_snapshot_based_resize_at_source() * 5.11 - Add accel_uuids (accelerator requests) parameter to build_and_run_instance() * 5.12 - Add accel_uuids (accelerator requests) parameter to rebuild_instance() * 5.13 - Add accel_uuids (accelerator requests) parameter to shelve_instance(), shelve_offload_instance() and unshelve_instance() ... Version 6.0 is functionally equivalent to 5.13, aside from removing deprecated parameters and methods. Wallaby sends 6.0 by default, can accept 5.x calls from Victoria nodes, and can be pinned to 5.x for Victoria compatibility. All new changes should go against 6.x. * 6.0 - Remove 5.x compatibility * ... - Remove add_aggregate_host() * ... - Remove remove_aggregate_host() * ... - Remove test_get_console_pool_info() * ... - Remove test_get_console_topic() * ... - Remove refresh_instance_security_rules() * ... - Remove request_spec argument from prep_snapshot_based_resize_at_dest() and finish_snapshot_based_resize_at_dest() * ... - Remove instance argument from check_instance_shared_storage() * ... - Rename the instance_type argument of prep_resize() to flavor * ... - Rename the instance_type argument of resize_instance() to flavor * 6.1 - Add reimage_boot_volume parameter to rebuild_instance() * 6.2 - Add target_state parameter to rebuild_instance() * 6.3 - Add delete_attachment parameter to remove_volume_connection * 6.4 - Add allow_share() and deny_share() ''' VERSION_ALIASES = { 'icehouse': '3.23', 'juno': '3.35', 'kilo': '4.0', 'liberty': '4.5', 'mitaka': '4.11', 'newton': '4.13', 'ocata': '4.13', 'pike': '4.17', 'queens': '5.0', 'rocky': '5.0', 'stein': '5.1', 'train': '5.3', 'ussuri': '5.11', 'victoria': '5.12', 'wallaby': '6.0', 'xena': '6.0', 'yoga': '6.0', 'zed': '6.1', 'antelope': '6.2', 'bobcat': '6.2', 'caracal': '6.3', 'dalmatian': '6.3', 'epoxy': '6.4', 'flamingo': '6.4', } @property def router(self): """Provides singleton access to nova.rpc.ClientRouter for this API The ClientRouter is constructed and accessed as a singleton to avoid querying all cells for a minimum nova-compute service version when [upgrade_levels]/compute=auto and we have access to the API DB. """ global _ROUTER if _ROUTER is None: with lockutils.lock('compute-rpcapi-router'): if _ROUTER is None: target = messaging.Target(topic=RPC_TOPIC, version='6.0') upgrade_level = CONF.upgrade_levels.compute if upgrade_level == 'auto': version_cap = self._determine_version_cap(target) else: version_cap = self.VERSION_ALIASES.get(upgrade_level, upgrade_level) serializer = objects_base.NovaObjectSerializer() # NOTE(danms): We need to poke this path to register CONF # options that we use in self.get_client() rpc.get_client(target, version_cap, serializer) default_client = self.get_client(target, version_cap, serializer) _ROUTER = rpc.ClientRouter(default_client) return _ROUTER def _ver(self, ctxt, old): """Determine compatibility version. This is to be used when we could send either the current major or a revision of the previous major when they are equivalent. This should only be used by calls that are the exact same in the current and previous major versions. Returns either old, or the current major version. :param old: The version under the previous major version that should be sent if we're pinned to it. """ client = self.router.client(ctxt) if client.can_send_version('6.0'): return '6.0' else: return old @staticmethod def _determine_version_cap(target): global LAST_VERSION global NO_COMPUTES_WARNING if LAST_VERSION: return LAST_VERSION # NOTE(danms): If we have a connection to the api database, # we should iterate all cells. If not, we must only look locally. if CONF.api_database.connection: try: service_version = service_obj.get_minimum_version_all_cells( context.get_admin_context(), ['nova-compute']) except exception.DBNotAllowed: # This most likely means we are in a nova-compute service # configured with [upgrade_levels]/compute=auto and a # connection to the API database. We should not be attempting # to "get out" of our cell to look at the minimum versions of # nova-compute services in other cells, so DBNotAllowed was # raised. Log a user-friendly message and re-raise the error. with excutils.save_and_reraise_exception(): LOG.error('This service is configured for access to the ' 'API database but is not allowed to directly ' 'access the database. You should run this ' 'service without the [api_database]/connection ' 'config option.') else: service_version = objects.Service.get_minimum_version( context.get_admin_context(), 'nova-compute') history = service_obj.SERVICE_VERSION_HISTORY # NOTE(johngarbutt) when there are no nova-compute services running we # get service_version == 0. In that case we do not want to cache # this result, because we will get a better answer next time. # As a sane default, return the current version. if service_version == 0: if not NO_COMPUTES_WARNING: # NOTE(danms): Only show this warning once LOG.debug("Not caching compute RPC version_cap, because min " "service_version is 0. Please ensure a nova-compute " "service has been started. Defaulting to current " "version.") NO_COMPUTES_WARNING = True return history[service_obj.SERVICE_VERSION]['compute_rpc'] try: version_cap = history[service_version]['compute_rpc'] except IndexError: LOG.error('Failed to extract compute RPC version from ' 'service history because I am too ' 'old (minimum version is now %(version)i)', {'version': service_version}) raise exception.ServiceTooOld(thisver=service_obj.SERVICE_VERSION, minver=service_version) except KeyError: LOG.error('Failed to extract compute RPC version from ' 'service history for version %(version)i', {'version': service_version}) return target.version LAST_VERSION = version_cap LOG.info('Automatically selected compute RPC version %(rpc)s ' 'from minimum service version %(service)i', {'rpc': version_cap, 'service': service_version}) return version_cap def get_client(self, target, version_cap, serializer): if CONF.rpc_response_timeout > rpc.HEARTBEAT_THRESHOLD: # NOTE(danms): If the operator has overridden RPC timeout # to be longer than rpc.HEARTBEAT_THRESHOLD then configure # the call monitor timeout to be the threshold to keep the # failure timing characteristics that our code likely # expects (from history) while allowing healthy calls # to run longer. cmt = rpc.HEARTBEAT_THRESHOLD else: cmt = None return rpc.get_client(target, version_cap=version_cap, serializer=serializer, call_monitor_timeout=cmt) def add_fixed_ip_to_instance(self, ctxt, instance, network_id): version = self._ver(ctxt, '5.0') cctxt = self.router.client(ctxt).prepare( server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'add_fixed_ip_to_instance', instance=instance, network_id=network_id) def attach_interface(self, ctxt, instance, network_id, port_id, requested_ip, tag=None): kw = {'instance': instance, 'network_id': network_id, 'port_id': port_id, 'requested_ip': requested_ip, 'tag': tag} version = self._ver(ctxt, '5.0') client = self.router.client(ctxt) cctxt = client.prepare(server=_compute_host(None, instance), version=version) return cctxt.call(ctxt, 'attach_interface', **kw) def attach_volume(self, ctxt, instance, bdm): version = self._ver(ctxt, '5.0') cctxt = self.router.client(ctxt).prepare( server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'attach_volume', instance=instance, bdm=bdm) def check_can_live_migrate_destination(self, ctxt, instance, destination, block_migration, disk_over_commit, migration, limits): client = self.router.client(ctxt) version = self._ver(ctxt, '5.3') kwargs = { 'instance': instance, 'block_migration': block_migration, 'disk_over_commit': disk_over_commit, 'migration': migration, 'limits': limits } if not client.can_send_version(version): kwargs.pop('migration') kwargs.pop('limits') version = '5.0' cctxt = client.prepare(server=destination, version=version, call_monitor_timeout=CONF.rpc_response_timeout, timeout=CONF.long_rpc_timeout) return cctxt.call(ctxt, 'check_can_live_migrate_destination', **kwargs) def check_can_live_migrate_source(self, ctxt, instance, dest_check_data): version = self._ver(ctxt, '5.0') client = self.router.client(ctxt) source = _compute_host(None, instance) cctxt = client.prepare(server=source, version=version) return cctxt.call(ctxt, 'check_can_live_migrate_source', instance=instance, dest_check_data=dest_check_data) def check_instance_shared_storage(self, ctxt, data, instance=None, host=None): msg_args = {'data': data} version = self._ver(ctxt, '5.0') client = self.router.client(ctxt) if not client.can_send_version('6.0'): # We always pass the instance until the 5.0 version msg_args['instance'] = instance cctxt = client.prepare( server=_compute_host(host, instance), version=version) return cctxt.call(ctxt, 'check_instance_shared_storage', **msg_args) def confirm_resize(self, ctxt, instance, migration, host, cast=True): client = self.router.client(ctxt) version = self._ver(ctxt, '5.0') cctxt = client.prepare( server=_compute_host(host, instance), version=version) rpc_method = cctxt.cast if cast else cctxt.call return rpc_method(ctxt, 'confirm_resize', instance=instance, migration=migration) def confirm_snapshot_based_resize_at_source( self, ctxt, instance, migration): """Confirms a snapshot-based resize on the source host. Cleans the guest from the source hypervisor including disks and drops the MoveClaim which will free up "old_flavor" usage from the ResourceTracker. Deletes the allocations held by the migration consumer against the source compute node resource provider. This is a synchronous RPC call using the ``long_rpc_timeout`` configuration option. :param ctxt: nova auth request context targeted at the source cell :param instance: Instance object being resized which should have the "old_flavor" attribute set :param migration: Migration object for the resize operation :raises: nova.exception.MigrationError if the source compute is too old to perform the operation :raises: oslo_messaging.exceptions.MessagingTimeout if the RPC call times out """ version = self._ver(ctxt, '5.8') client = self.router.client(ctxt) if not client.can_send_version(version): raise exception.MigrationError(reason=_('Compute too old')) cctxt = client.prepare(server=migration.source_compute, version=version, call_monitor_timeout=CONF.rpc_response_timeout, timeout=CONF.long_rpc_timeout) return cctxt.call( ctxt, 'confirm_snapshot_based_resize_at_source', instance=instance, migration=migration) def detach_interface(self, ctxt, instance, port_id): version = self._ver(ctxt, '5.0') cctxt = self.router.client(ctxt).prepare( server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'detach_interface', instance=instance, port_id=port_id) def detach_volume(self, ctxt, instance, volume_id, attachment_id=None): version = self._ver(ctxt, '5.0') client = self.router.client(ctxt) cctxt = client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'detach_volume', instance=instance, volume_id=volume_id, attachment_id=attachment_id) def finish_resize(self, ctxt, instance, migration, image, disk_info, host, request_spec): msg_args = { 'instance': instance, 'migration': migration, 'image': image, 'disk_info': disk_info, 'request_spec': request_spec, } client = self.router.client(ctxt) version = self._ver(ctxt, '5.2') if not client.can_send_version(version): msg_args.pop('request_spec') version = '5.0' cctxt = client.prepare( server=host, version=version) cctxt.cast(ctxt, 'finish_resize', **msg_args) def finish_revert_resize(self, ctxt, instance, migration, host, request_spec): msg_args = { 'instance': instance, 'migration': migration, 'request_spec': request_spec, } client = self.router.client(ctxt) version = self._ver(ctxt, '5.2') if not client.can_send_version(version): msg_args.pop('request_spec') version = '5.0' cctxt = client.prepare( server=host, version=version) cctxt.cast(ctxt, 'finish_revert_resize', **msg_args) def finish_snapshot_based_resize_at_dest( self, ctxt, instance, migration, snapshot_id, request_spec): """Finishes the snapshot-based resize at the destination compute. Sets up block devices and networking on the destination compute and spawns the guest. This is a synchronous RPC call using the ``long_rpc_timeout`` configuration option. :param ctxt: nova auth request context targeted at the target cell DB :param instance: The Instance object being resized with the ``migration_context`` field set. Upon successful completion of this method the vm_state should be "resized", the task_state should be None, and migration context, host/node and flavor-related fields should be set on the instance. :param migration: The Migration object for this resize operation. Upon successful completion of this method the migration status should be "finished". :param snapshot_id: ID of the image snapshot created for a non-volume-backed instance, else None. :param request_spec: nova.objects.RequestSpec object for the operation :raises: nova.exception.MigrationError if the destination compute service is too old for this method :raises: oslo_messaging.exceptions.MessagingTimeout if the pre-check RPC call times out """ msg_args = {'instance': instance, 'migration': migration, 'snapshot_id': snapshot_id} client = self.router.client(ctxt) version = self._ver(ctxt, '5.7') if not client.can_send_version('6.0'): msg_args['request_spec'] = request_spec if not client.can_send_version(version): raise exception.MigrationError(reason=_('Compute too old')) cctxt = client.prepare( server=migration.dest_compute, version=version, call_monitor_timeout=CONF.rpc_response_timeout, timeout=CONF.long_rpc_timeout) return cctxt.call( ctxt, 'finish_snapshot_based_resize_at_dest', **msg_args) def finish_revert_snapshot_based_resize_at_source( self, ctxt, instance, migration): """Reverts a snapshot-based resize at the source host. Spawn the guest and re-connect volumes/VIFs on the source host and revert the instance to use the old_flavor for resource usage reporting. Updates allocations in the placement service to move the source node allocations, held by the migration record, to the instance and drop the allocations held by the instance on the destination node. This is a synchronous RPC call using the ``long_rpc_timeout`` configuration option. :param ctxt: nova auth request context targeted at the source cell :param instance: Instance object whose vm_state is "resized" and task_state is "resize_reverting". :param migration: Migration object whose status is "reverting". :raises: nova.exception.MigrationError if the source compute is too old to perform the operation :raises: oslo_messaging.exceptions.MessagingTimeout if the RPC call times out """ version = self._ver(ctxt, '5.10') client = self.router.client(ctxt) if not client.can_send_version(version): raise exception.MigrationError(reason=_('Compute too old')) cctxt = client.prepare(server=migration.source_compute, version=version, call_monitor_timeout=CONF.rpc_response_timeout, timeout=CONF.long_rpc_timeout) return cctxt.call( ctxt, 'finish_revert_snapshot_based_resize_at_source', instance=instance, migration=migration) def get_console_output(self, ctxt, instance, tail_length): version = self._ver(ctxt, '5.0') cctxt = self.router.client(ctxt).prepare( server=_compute_host(None, instance), version=version) return cctxt.call(ctxt, 'get_console_output', instance=instance, tail_length=tail_length) def get_diagnostics(self, ctxt, instance): version = self._ver(ctxt, '5.0') cctxt = self.router.client(ctxt).prepare( server=_compute_host(None, instance), version=version) return cctxt.call(ctxt, 'get_diagnostics', instance=instance) def get_instance_diagnostics(self, ctxt, instance): version = self._ver(ctxt, '5.0') client = self.router.client(ctxt) cctxt = client.prepare(server=_compute_host(None, instance), version=version) return cctxt.call(ctxt, 'get_instance_diagnostics', instance=instance) def get_vnc_console(self, ctxt, instance, console_type): version = self._ver(ctxt, '5.0') cctxt = self.router.client(ctxt).prepare( server=_compute_host(None, instance), version=version) return cctxt.call(ctxt, 'get_vnc_console', instance=instance, console_type=console_type) def get_spice_console(self, ctxt, instance, console_type): version = self._ver(ctxt, '5.0') cctxt = self.router.client(ctxt).prepare( server=_compute_host(None, instance), version=version) return cctxt.call(ctxt, 'get_spice_console', instance=instance, console_type=console_type) def get_mks_console(self, ctxt, instance, console_type): version = self._ver(ctxt, '5.0') cctxt = self.router.client(ctxt).prepare( server=_compute_host(None, instance), version=version) return cctxt.call(ctxt, 'get_mks_console', instance=instance, console_type=console_type) def get_serial_console(self, ctxt, instance, console_type): version = self._ver(ctxt, '5.0') cctxt = self.router.client(ctxt).prepare( server=_compute_host(None, instance), version=version) return cctxt.call(ctxt, 'get_serial_console', instance=instance, console_type=console_type) def validate_console_port(self, ctxt, instance, port, console_type): version = self._ver(ctxt, '5.0') cctxt = self.router.client(ctxt).prepare( server=_compute_host(None, instance), version=version) return cctxt.call(ctxt, 'validate_console_port', instance=instance, port=port, console_type=console_type) def host_maintenance_mode(self, ctxt, host, host_param, mode): '''Set host maintenance mode :param ctxt: request context :param host_param: This value is placed in the message to be the 'host' parameter for the remote method. :param mode: :param host: This is the host to send the message to. ''' version = self._ver(ctxt, '5.0') cctxt = self.router.client(ctxt).prepare( server=host, version=version) return cctxt.call(ctxt, 'host_maintenance_mode', host=host_param, mode=mode) def host_power_action(self, ctxt, host, action): version = self._ver(ctxt, '5.0') cctxt = self.router.client(ctxt).prepare( server=host, version=version) return cctxt.call(ctxt, 'host_power_action', action=action) def inject_network_info(self, ctxt, instance): version = self._ver(ctxt, '5.0') cctxt = self.router.client(ctxt).prepare( server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'inject_network_info', instance=instance) def live_migration(self, ctxt, instance, dest, block_migration, host, migration, migrate_data=None): version = self._ver(ctxt, '5.0') client = self.router.client(ctxt) cctxt = client.prepare(server=host, version=version) cctxt.cast(ctxt, 'live_migration', instance=instance, dest=dest, block_migration=block_migration, migrate_data=migrate_data, migration=migration) def live_migration_force_complete(self, ctxt, instance, migration): version = self._ver(ctxt, '5.0') client = self.router.client(ctxt) cctxt = client.prepare( server=_compute_host(migration.source_compute, instance), version=version) cctxt.cast(ctxt, 'live_migration_force_complete', instance=instance) def live_migration_abort(self, ctxt, instance, migration_id): version = self._ver(ctxt, '5.0') cctxt = self.router.client(ctxt).prepare( server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'live_migration_abort', instance=instance, migration_id=migration_id) def pause_instance(self, ctxt, instance): version = self._ver(ctxt, '5.0') cctxt = self.router.client(ctxt).prepare( server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'pause_instance', instance=instance) def post_live_migration_at_destination(self, ctxt, instance, block_migration, host): version = self._ver(ctxt, '5.0') cctxt = self.router.client(ctxt).prepare( server=host, version=version, call_monitor_timeout=CONF.rpc_response_timeout, timeout=CONF.long_rpc_timeout) return cctxt.call(ctxt, 'post_live_migration_at_destination', instance=instance, block_migration=block_migration) # TODO(mriedem): Remove the unused block_migration argument in v6.0 of # the compute RPC API. def pre_live_migration(self, ctxt, instance, block_migration, disk, host, migrate_data): version = '6.0' msg_args = {} client = self.router.client(ctxt) if not client.can_send_version(version): version = '5.0' # We just need to honor the argument in the v5.0 RPC API method msg_args['block_migration'] = None cctxt = client.prepare(server=host, version=version, timeout=CONF.long_rpc_timeout, call_monitor_timeout=CONF.rpc_response_timeout) return cctxt.call(ctxt, 'pre_live_migration', instance=instance, disk=disk, migrate_data=migrate_data, **msg_args) # TODO(mriedem): Drop compat for request_spec being a legacy dict in v6.0. def prep_resize(self, ctxt, instance, image, flavor, host, migration, request_spec, filter_properties, node, clean_shutdown, host_list): version = '6.0' # TODO(mriedem): We should pass the ImageMeta object through to the # compute but that also requires plumbing changes through the resize # flow for other methods like resize_instance and finish_resize. image_p = objects_base.obj_to_primitive(image) msg_args = {'instance': instance, 'flavor': flavor, 'image': image_p, 'request_spec': request_spec, 'filter_properties': filter_properties, 'node': node, 'migration': migration, 'clean_shutdown': clean_shutdown, 'host_list': host_list} client = self.router.client(ctxt) if not client.can_send_version(version): version = '5.1' del msg_args['flavor'] msg_args['instance_type'] = flavor if not client.can_send_version(version): version = '5.0' msg_args['request_spec'] = ( request_spec.to_legacy_request_spec_dict()) cctxt = client.prepare(server=host, version=version) cctxt.cast(ctxt, 'prep_resize', **msg_args) def prep_snapshot_based_resize_at_dest( self, ctxt, instance, flavor, nodename, migration, limits, request_spec, destination): """Performs pre-cross-cell resize resource claim on the dest host. This runs on the destination host in a cross-cell resize operation before the resize is actually started. Performs a resize_claim for resources that are not claimed in placement like PCI devices and NUMA topology. Note that this is different from same-cell prep_resize in that this: * Does not RPC cast to the source compute, that is orchestrated from conductor. * This does not reschedule on failure, conductor handles that since conductor is synchronously RPC calling this method. :param ctxt: user auth request context :param instance: the instance being resized :param flavor: the flavor being resized to (unchanged for cold migrate) :param nodename: Name of the target compute node :param migration: nova.objects.Migration object for the operation :param limits: nova.objects.SchedulerLimits object of resource limits :param request_spec: nova.objects.RequestSpec object for the operation :param destination: possible target host for the cross-cell resize :returns: nova.objects.MigrationContext; the migration context created on the destination host during the resize_claim. :raises: nova.exception.MigrationPreCheckError if the pre-check validation fails for the given host selection or the destination compute service is too old for this method :raises: oslo_messaging.exceptions.MessagingTimeout if the pre-check RPC call times out """ msg_args = {'instance': instance, 'flavor': flavor, 'nodename': nodename, 'migration': migration, 'limits': limits} version = self._ver(ctxt, '5.5') client = self.router.client(ctxt) if not client.can_send_version('6.0'): msg_args['request_spec'] = request_spec if not client.can_send_version(version): raise exception.MigrationPreCheckError(reason=_('Compute too old')) cctxt = client.prepare(server=destination, version=version, call_monitor_timeout=CONF.rpc_response_timeout, timeout=CONF.long_rpc_timeout) return cctxt.call(ctxt, 'prep_snapshot_based_resize_at_dest', **msg_args) def prep_snapshot_based_resize_at_source( self, ctxt, instance, migration, snapshot_id=None): """Prepares the instance at the source host for cross-cell resize Performs actions like powering off the guest, upload snapshot data if the instance is not volume-backed, disconnecting volumes, unplugging VIFs and activating the destination host port bindings. :param ctxt: user auth request context targeted at source cell :param instance: nova.objects.Instance; the instance being resized. The expected instance.task_state is "resize_migrating" when calling this method, and the expected task_state upon successful completion is "resize_migrated". :param migration: nova.objects.Migration object for the operation. The expected migration.status is "pre-migrating" when calling this method and the expected status upon successful completion is "post-migrating". :param snapshot_id: ID of the image snapshot to upload if not a volume-backed instance :raises: nova.exception.InstancePowerOffFailure if stopping the instance fails :raises: nova.exception.MigrationError if the source compute is too old to perform the operation :raises: oslo_messaging.exceptions.MessagingTimeout if the RPC call times out """ version = self._ver(ctxt, '5.6') client = self.router.client(ctxt) if not client.can_send_version(version): raise exception.MigrationError(reason=_('Compute too old')) cctxt = client.prepare(server=_compute_host(None, instance), version=version, call_monitor_timeout=CONF.rpc_response_timeout, timeout=CONF.long_rpc_timeout) return cctxt.call( ctxt, 'prep_snapshot_based_resize_at_source', instance=instance, migration=migration, snapshot_id=snapshot_id) def reboot_instance(self, ctxt, instance, block_device_info, reboot_type): version = self._ver(ctxt, '5.0') cctxt = self.router.client(ctxt).prepare( server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'reboot_instance', instance=instance, block_device_info=block_device_info, reboot_type=reboot_type) def rebuild_instance( self, ctxt, instance, new_pass, injected_files, image_ref, orig_image_ref, orig_sys_metadata, bdms, recreate, on_shared_storage, host, node, preserve_ephemeral, migration, limits, request_spec, accel_uuids, reimage_boot_volume, target_state): # NOTE(edleafe): compute nodes can only use the dict form of limits. if isinstance(limits, objects.SchedulerLimits): limits = limits.to_dict() msg_args = { 'preserve_ephemeral': preserve_ephemeral, 'migration': migration, 'scheduled_node': node, 'limits': limits, 'request_spec': request_spec, 'accel_uuids': accel_uuids, 'reimage_boot_volume': reimage_boot_volume, 'target_state': target_state, } version = '6.2' client = self.router.client(ctxt) if not client.can_send_version(version): if msg_args['target_state']: raise exception.UnsupportedRPCVersion( api="rebuild_instance", required="6.2") else: del msg_args['target_state'] version = '6.1' if not client.can_send_version(version): if msg_args['reimage_boot_volume']: raise exception.NovaException( 'Compute RPC version does not support ' 'reimage_boot_volume parameter.') else: del msg_args['reimage_boot_volume'] version = self._ver(ctxt, '5.12') if not client.can_send_version(version): del msg_args['accel_uuids'] version = '5.0' cctxt = client.prepare(server=_compute_host(host, instance), version=version) cctxt.cast(ctxt, 'rebuild_instance', instance=instance, new_pass=new_pass, injected_files=injected_files, image_ref=image_ref, orig_image_ref=orig_image_ref, orig_sys_metadata=orig_sys_metadata, bdms=bdms, recreate=recreate, on_shared_storage=on_shared_storage, **msg_args) def remove_fixed_ip_from_instance(self, ctxt, instance, address): version = self._ver(ctxt, '5.0') cctxt = self.router.client(ctxt).prepare( server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'remove_fixed_ip_from_instance', instance=instance, address=address) def remove_volume_connection( self, ctxt, instance, volume_id, host, delete_attachment=False ): version = '6.3' client = self.router.client(ctxt) kwargs = { 'instance': instance, 'volume_id': volume_id, 'delete_attachment': delete_attachment } if not client.can_send_version(version): kwargs.pop('delete_attachment') version = self._ver(ctxt, '5.0') cctxt = client.prepare(server=host, version=version) return cctxt.call(ctxt, 'remove_volume_connection', **kwargs) def rescue_instance(self, ctxt, instance, rescue_password, rescue_image_ref=None, clean_shutdown=True): version = self._ver(ctxt, '5.0') msg_args = {'rescue_password': rescue_password, 'clean_shutdown': clean_shutdown, 'rescue_image_ref': rescue_image_ref, 'instance': instance, } cctxt = self.router.client(ctxt).prepare( server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'rescue_instance', **msg_args) def resize_instance(self, ctxt, instance, migration, image, flavor, request_spec, clean_shutdown=True): version = '6.0' msg_args = {'instance': instance, 'migration': migration, 'image': image, 'flavor': flavor, 'clean_shutdown': clean_shutdown, 'request_spec': request_spec, } client = self.router.client(ctxt) if not client.can_send_version(version): version = self._ver(ctxt, '5.2') del msg_args['flavor'] msg_args['instance_type'] = flavor if not client.can_send_version(version): msg_args.pop('request_spec') version = '5.0' cctxt = client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'resize_instance', **msg_args) def resume_instance(self, ctxt, instance): version = self._ver(ctxt, '5.0') cctxt = self.router.client(ctxt).prepare( server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'resume_instance', instance=instance) def revert_resize(self, ctxt, instance, migration, host, request_spec): msg_args = { 'instance': instance, 'migration': migration, 'request_spec': request_spec, } client = self.router.client(ctxt) version = self._ver(ctxt, '5.2') if not client.can_send_version(version): msg_args.pop('request_spec') version = '5.0' cctxt = client.prepare( server=_compute_host(host, instance), version=version) cctxt.cast(ctxt, 'revert_resize', **msg_args) def revert_snapshot_based_resize_at_dest(self, ctxt, instance, migration): """Reverts a snapshot-based resize at the destination host. Cleans the guest from the destination compute service host hypervisor and related resources (ports, volumes) and frees resource usage from the compute service on that host. This is a synchronous RPC call using the ``long_rpc_timeout`` configuration option. :param ctxt: nova auth request context targeted at the target cell :param instance: Instance object whose vm_state is "resized" and task_state is "resize_reverting". :param migration: Migration object whose status is "reverting". :raises: nova.exception.MigrationError if the destination compute service is too old to perform the operation :raises: oslo_messaging.exceptions.MessagingTimeout if the RPC call times out """ version = self._ver(ctxt, '5.9') client = self.router.client(ctxt) if not client.can_send_version(version): raise exception.MigrationError(reason=_('Compute too old')) cctxt = client.prepare(server=migration.dest_compute, version=version, call_monitor_timeout=CONF.rpc_response_timeout, timeout=CONF.long_rpc_timeout) return cctxt.call( ctxt, 'revert_snapshot_based_resize_at_dest', instance=instance, migration=migration) def rollback_live_migration_at_destination(self, ctxt, instance, host, destroy_disks, migrate_data): version = self._ver(ctxt, '5.0') client = self.router.client(ctxt) cctxt = client.prepare(server=host, version=version) cctxt.cast(ctxt, 'rollback_live_migration_at_destination', instance=instance, destroy_disks=destroy_disks, migrate_data=migrate_data) # TODO(sbauza): Remove this when we bump the compute API to v6.0 def supports_numa_live_migration(self, ctxt): """Returns whether we can send 5.3, needed for NUMA live migration. """ client = self.router.client(ctxt) version = self._ver(ctxt, '5.3') return client.can_send_version(version) def drop_move_claim_at_destination(self, ctxt, instance, host): """Called by the source of a live migration that's being rolled back. This is a call not because we care about the return value, but because dropping the move claim depends on instance.migration_context being set, and we drop the migration context on the source. Thus, to avoid races, we call the destination synchronously to make sure it's done dropping the move claim before we drop the migration context from the instance. """ version = self._ver(ctxt, '5.3') client = self.router.client(ctxt) cctxt = client.prepare(server=host, version=version) cctxt.call(ctxt, 'drop_move_claim_at_destination', instance=instance) def set_admin_password(self, ctxt, instance, new_pass): version = self._ver(ctxt, '5.0') cctxt = self.router.client(ctxt).prepare( server=_compute_host(None, instance), version=version) return cctxt.call(ctxt, 'set_admin_password', instance=instance, new_pass=new_pass) def set_host_enabled(self, ctxt, host, enabled): version = self._ver(ctxt, '5.0') cctxt = self.router.client(ctxt).prepare( server=host, version=version, call_monitor_timeout=CONF.rpc_response_timeout, timeout=CONF.long_rpc_timeout) return cctxt.call(ctxt, 'set_host_enabled', enabled=enabled) def swap_volume(self, ctxt, instance, old_volume_id, new_volume_id, new_attachment_id): version = self._ver(ctxt, '5.0') client = self.router.client(ctxt) kwargs = dict(instance=instance, old_volume_id=old_volume_id, new_volume_id=new_volume_id, new_attachment_id=new_attachment_id) cctxt = client.prepare( server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'swap_volume', **kwargs) def get_host_uptime(self, ctxt, host): version = self._ver(ctxt, '5.0') cctxt = self.router.client(ctxt).prepare( server=host, version=version) return cctxt.call(ctxt, 'get_host_uptime') def reserve_block_device_name(self, ctxt, instance, device, volume_id, disk_bus, device_type, tag, multiattach): kw = {'instance': instance, 'device': device, 'volume_id': volume_id, 'disk_bus': disk_bus, 'device_type': device_type, 'tag': tag, 'multiattach': multiattach} version = self._ver(ctxt, '5.0') client = self.router.client(ctxt) cctxt = client.prepare(server=_compute_host(None, instance), version=version, call_monitor_timeout=CONF.rpc_response_timeout, timeout=CONF.long_rpc_timeout) return cctxt.call(ctxt, 'reserve_block_device_name', **kw) def backup_instance(self, ctxt, instance, image_id, backup_type, rotation): version = self._ver(ctxt, '5.0') cctxt = self.router.client(ctxt).prepare( server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'backup_instance', instance=instance, image_id=image_id, backup_type=backup_type, rotation=rotation) def snapshot_instance(self, ctxt, instance, image_id): version = self._ver(ctxt, '5.0') cctxt = self.router.client(ctxt).prepare( server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'snapshot_instance', instance=instance, image_id=image_id) def start_instance(self, ctxt, instance): version = self._ver(ctxt, '5.0') cctxt = self.router.client(ctxt).prepare( server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'start_instance', instance=instance) def stop_instance(self, ctxt, instance, do_cast=True, clean_shutdown=True): msg_args = {'instance': instance, 'clean_shutdown': clean_shutdown} version = self._ver(ctxt, '5.0') cctxt = self.router.client(ctxt).prepare( server=_compute_host(None, instance), version=version) rpc_method = cctxt.cast if do_cast else cctxt.call return rpc_method(ctxt, 'stop_instance', **msg_args) def suspend_instance(self, ctxt, instance): version = self._ver(ctxt, '5.0') cctxt = self.router.client(ctxt).prepare( server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'suspend_instance', instance=instance) def terminate_instance(self, ctxt, instance, bdms): client = self.router.client(ctxt) version = self._ver(ctxt, '5.0') cctxt = client.prepare( server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'terminate_instance', instance=instance, bdms=bdms) def unpause_instance(self, ctxt, instance): version = self._ver(ctxt, '5.0') cctxt = self.router.client(ctxt).prepare( server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'unpause_instance', instance=instance) def unrescue_instance(self, ctxt, instance): version = self._ver(ctxt, '5.0') cctxt = self.router.client(ctxt).prepare( server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'unrescue_instance', instance=instance) def soft_delete_instance(self, ctxt, instance): client = self.router.client(ctxt) version = self._ver(ctxt, '5.0') cctxt = client.prepare( server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'soft_delete_instance', instance=instance) def restore_instance(self, ctxt, instance): version = self._ver(ctxt, '5.0') cctxt = self.router.client(ctxt).prepare( server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'restore_instance', instance=instance) def shelve_instance(self, ctxt, instance, image_id=None, clean_shutdown=True, accel_uuids=None): msg_kwargs = { 'instance': instance, 'image_id': image_id, 'clean_shutdown': clean_shutdown, 'accel_uuids': accel_uuids, } client = self.router.client(ctxt) version = self._ver(ctxt, '5.13') if not client.can_send_version(version): if accel_uuids: LOG.error("Shelve with accelerators is not supported as " "RPC version is too old.") raise exception.ForbiddenWithAccelerators() else: msg_kwargs.pop('accel_uuids') version = '5.0' cctxt = client.prepare( server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'shelve_instance', **msg_kwargs) def shelve_offload_instance(self, ctxt, instance, clean_shutdown=True, accel_uuids=None): msg_kwargs = { 'instance': instance, 'clean_shutdown': clean_shutdown, 'accel_uuids': accel_uuids, } client = self.router.client(ctxt) version = self._ver(ctxt, '5.13') if not client.can_send_version(version): msg_kwargs.pop('accel_uuids') version = '5.0' cctxt = client.prepare( server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'shelve_offload_instance', **msg_kwargs) def unshelve_instance(self, ctxt, instance, host, request_spec, image=None, filter_properties=None, node=None, accel_uuids=None): version = self._ver(ctxt, '5.13') msg_kwargs = { 'instance': instance, 'image': image, 'filter_properties': filter_properties, 'node': node, 'request_spec': request_spec, 'accel_uuids': accel_uuids, } client = self.router.client(ctxt) if not client.can_send_version(version): msg_kwargs.pop('accel_uuids') version = '5.2' if not client.can_send_version(version): msg_kwargs.pop('request_spec') version = '5.0' cctxt = client.prepare(server=host, version=version) cctxt.cast(ctxt, 'unshelve_instance', **msg_kwargs) def volume_snapshot_create(self, ctxt, instance, volume_id, create_info): version = self._ver(ctxt, '5.0') cctxt = self.router.client(ctxt).prepare( server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'volume_snapshot_create', instance=instance, volume_id=volume_id, create_info=create_info) def volume_snapshot_delete(self, ctxt, instance, volume_id, snapshot_id, delete_info): version = self._ver(ctxt, '5.0') cctxt = self.router.client(ctxt).prepare( server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'volume_snapshot_delete', instance=instance, volume_id=volume_id, snapshot_id=snapshot_id, delete_info=delete_info) def allow_share(self, ctxt, instance, share_mapping): version = '6.4' client = self.router.client(ctxt) if not client.can_send_version(version): raise exception.UnsupportedRPCVersion( api="allow_share", required=version) cctxt = self.router.client(ctxt).prepare( server=_compute_host(None, instance), version=version) cctxt.cast( ctxt, "allow_share", instance=instance, share_mapping=share_mapping ) def deny_share(self, ctxt, instance, share_mapping): version = '6.4' client = self.router.client(ctxt) if not client.can_send_version(version): raise exception.UnsupportedRPCVersion( api="deny_share", required=version) cctxt = self.router.client(ctxt).prepare( server=_compute_host(None, instance), version=version) cctxt.cast( ctxt, "deny_share", instance=instance, share_mapping=share_mapping, ) def external_instance_event(self, ctxt, instances, events, host=None): instance = instances[0] version = self._ver(ctxt, '5.0') cctxt = self.router.client(ctxt).prepare( server=_compute_host(host, instance), version=version) cctxt.cast(ctxt, 'external_instance_event', instances=instances, events=events) def build_and_run_instance(self, ctxt, instance, host, image, request_spec, filter_properties, admin_password=None, injected_files=None, requested_networks=None, security_groups=None, block_device_mapping=None, node=None, limits=None, host_list=None, accel_uuids=None): # NOTE(edleafe): compute nodes can only use the dict form of limits. if isinstance(limits, objects.SchedulerLimits): limits = limits.to_dict() kwargs = {"instance": instance, "image": image, "request_spec": request_spec, "filter_properties": filter_properties, "admin_password": admin_password, "injected_files": injected_files, "requested_networks": requested_networks, "security_groups": security_groups, "block_device_mapping": block_device_mapping, "node": node, "limits": limits, "host_list": host_list, "accel_uuids": accel_uuids, } client = self.router.client(ctxt) version = self._ver(ctxt, '5.11') if not client.can_send_version(version): kwargs.pop('accel_uuids') version = '5.0' cctxt = client.prepare(server=host, version=version) cctxt.cast(ctxt, 'build_and_run_instance', **kwargs) def quiesce_instance(self, ctxt, instance): version = self._ver(ctxt, '5.0') cctxt = self.router.client(ctxt).prepare( server=_compute_host(None, instance), version=version) return cctxt.call(ctxt, 'quiesce_instance', instance=instance) def unquiesce_instance(self, ctxt, instance, mapping=None): version = self._ver(ctxt, '5.0') cctxt = self.router.client(ctxt).prepare( server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'unquiesce_instance', instance=instance, mapping=mapping) def trigger_crash_dump(self, ctxt, instance): version = self._ver(ctxt, '5.0') client = self.router.client(ctxt) cctxt = client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, "trigger_crash_dump", instance=instance) def cache_images(self, ctxt, host, image_ids): version = self._ver(ctxt, '5.4') client = self.router.client(ctxt) if not client.can_send_version(version): raise exception.NovaException('Compute RPC version pin does not ' 'allow cache_images() to be called') # This is a potentially very long-running call, so we provide the # two timeout values which enables the call monitor in oslo.messaging # so that this can run for extended periods. cctxt = client.prepare(server=host, version=version, call_monitor_timeout=CONF.rpc_response_timeout, timeout=CONF.long_rpc_timeout) return cctxt.call(ctxt, 'cache_images', image_ids=image_ids) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/compute/stats.py0000664000175000017500000001271400000000000016716 0ustar00zuulzuul00000000000000# Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.compute import task_states from nova.compute import vm_states from nova.i18n import _ class Stats(dict): """Handler for updates to compute node workload stats.""" def __init__(self): super(Stats, self).__init__() # Track instance states for compute node workload calculations: self.states = {} def clear(self): super(Stats, self).clear() self.states.clear() def digest_stats(self, stats): """Apply stats provided as a dict or a json encoded string.""" if stats is None: return if isinstance(stats, dict): # use None as a sentinel to the API that # the driver does not support uptime # setdefault will update the dict if and only if # uptime is not set then return the value. # since we dont need it we just discard the result stats.setdefault('uptime', None) self.update(stats) return raise ValueError(_('Unexpected type adding stats')) @property def io_workload(self): """Calculate an I/O based load by counting I/O heavy operations.""" def _get(state, state_type): key = "num_%s_%s" % (state_type, state) return self.get(key, 0) num_builds = _get(vm_states.BUILDING, "vm") num_migrations = _get(task_states.RESIZE_MIGRATING, "task") num_rebuilds = _get(task_states.REBUILDING, "task") num_resizes = _get(task_states.RESIZE_PREP, "task") num_snapshots = _get(task_states.IMAGE_SNAPSHOT, "task") num_backups = _get(task_states.IMAGE_BACKUP, "task") num_rescues = _get(task_states.RESCUING, "task") num_unshelves = _get(task_states.UNSHELVING, "task") return (num_builds + num_rebuilds + num_resizes + num_migrations + num_snapshots + num_backups + num_rescues + num_unshelves) def calculate_workload(self): """Calculate current load of the compute host based on task states. """ current_workload = 0 for k in self: if k.startswith("num_task") and not k.endswith("None"): current_workload += self[k] return current_workload @property def num_instances(self): return self.get("num_instances", 0) def num_instances_for_project(self, project_id): key = "num_proj_%s" % project_id return self.get(key, 0) def num_os_type(self, os_type): key = "num_os_type_%s" % os_type return self.get(key, 0) def update_stats_for_instance(self, instance, is_removed=False): """Update stats after an instance is changed.""" uuid = instance['uuid'] # First, remove stats from the previous instance # state: if uuid in self.states: old_state = self.states[uuid] self._decrement("num_vm_%s" % old_state['vm_state']) self._decrement("num_task_%s" % old_state['task_state']) self._decrement("num_os_type_%s" % old_state['os_type']) self._decrement("num_proj_%s" % old_state['project_id']) else: # new instance self._increment("num_instances") # Now update stats from the new instance state: (vm_state, task_state, os_type, project_id) = \ self._extract_state_from_instance(instance) if is_removed or vm_states.allow_resource_removal( vm_state=vm_state, task_state=task_state): self._decrement("num_instances") self.states.pop(uuid) else: self._increment("num_vm_%s" % vm_state) self._increment("num_task_%s" % task_state) self._increment("num_os_type_%s" % os_type) self._increment("num_proj_%s" % project_id) # save updated I/O workload in stats: self["io_workload"] = self.io_workload def _decrement(self, key): x = self.get(key, 0) self[key] = x - 1 def _increment(self, key): x = self.get(key, 0) self[key] = x + 1 def _extract_state_from_instance(self, instance): """Save the useful bits of instance state for tracking purposes.""" uuid = instance['uuid'] vm_state = instance['vm_state'] task_state = instance['task_state'] os_type = instance['os_type'] project_id = instance['project_id'] self.states[uuid] = dict(vm_state=vm_state, task_state=task_state, os_type=os_type, project_id=project_id) return (vm_state, task_state, os_type, project_id) def build_failed(self): self['failed_builds'] = self.get('failed_builds', 0) + 1 def build_succeeded(self): # FIXME(danms): Make this more graceful, either by time-based aging or # a fixed decline upon success self['failed_builds'] = 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/compute/task_states.py0000664000175000017500000001165100000000000020104 0ustar00zuulzuul00000000000000# Copyright 2010 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Possible task states for instances. Compute instance task states represent what is happening to the instance at the current moment. These tasks can be generic, such as 'spawning', or specific, such as 'block_device_mapping'. These task states allow for a better view into what an instance is doing and should be displayed to users/administrators as necessary. """ from nova.objects import fields # possible task states during create() SCHEDULING = fields.InstanceTaskState.SCHEDULING BLOCK_DEVICE_MAPPING = fields.InstanceTaskState.BLOCK_DEVICE_MAPPING NETWORKING = fields.InstanceTaskState.NETWORKING SPAWNING = fields.InstanceTaskState.SPAWNING # possible task states during snapshot() IMAGE_SNAPSHOT = fields.InstanceTaskState.IMAGE_SNAPSHOT IMAGE_SNAPSHOT_PENDING = fields.InstanceTaskState.IMAGE_SNAPSHOT_PENDING IMAGE_PENDING_UPLOAD = fields.InstanceTaskState.IMAGE_PENDING_UPLOAD IMAGE_UPLOADING = fields.InstanceTaskState.IMAGE_UPLOADING # possible task states during backup() IMAGE_BACKUP = fields.InstanceTaskState.IMAGE_BACKUP # possible task states during set_admin_password() UPDATING_PASSWORD = fields.InstanceTaskState.UPDATING_PASSWORD # possible task states during resize() RESIZE_PREP = fields.InstanceTaskState.RESIZE_PREP RESIZE_MIGRATING = fields.InstanceTaskState.RESIZE_MIGRATING RESIZE_MIGRATED = fields.InstanceTaskState.RESIZE_MIGRATED RESIZE_FINISH = fields.InstanceTaskState.RESIZE_FINISH # possible task states during revert_resize() RESIZE_REVERTING = fields.InstanceTaskState.RESIZE_REVERTING # possible task states during confirm_resize() RESIZE_CONFIRMING = fields.InstanceTaskState.RESIZE_CONFIRMING # possible task states during reboot() REBOOTING = fields.InstanceTaskState.REBOOTING REBOOT_PENDING = fields.InstanceTaskState.REBOOT_PENDING REBOOT_STARTED = fields.InstanceTaskState.REBOOT_STARTED REBOOTING_HARD = fields.InstanceTaskState.REBOOTING_HARD REBOOT_PENDING_HARD = fields.InstanceTaskState.REBOOT_PENDING_HARD REBOOT_STARTED_HARD = fields.InstanceTaskState.REBOOT_STARTED_HARD # possible task states during pause() PAUSING = fields.InstanceTaskState.PAUSING # possible task states during unpause() UNPAUSING = fields.InstanceTaskState.UNPAUSING # possible task states during suspend() SUSPENDING = fields.InstanceTaskState.SUSPENDING # possible task states during resume() RESUMING = fields.InstanceTaskState.RESUMING # possible task states during power_off() POWERING_OFF = fields.InstanceTaskState.POWERING_OFF # possible task states during power_on() POWERING_ON = fields.InstanceTaskState.POWERING_ON # possible task states during rescue() RESCUING = fields.InstanceTaskState.RESCUING # possible task states during unrescue() UNRESCUING = fields.InstanceTaskState.UNRESCUING # possible task states during rebuild() REBUILDING = fields.InstanceTaskState.REBUILDING REBUILD_BLOCK_DEVICE_MAPPING = \ fields.InstanceTaskState.REBUILD_BLOCK_DEVICE_MAPPING REBUILD_SPAWNING = fields.InstanceTaskState.REBUILD_SPAWNING # possible task states during live_migrate() MIGRATING = fields.InstanceTaskState.MIGRATING # possible task states during delete() DELETING = fields.InstanceTaskState.DELETING # possible task states during soft_delete() SOFT_DELETING = fields.InstanceTaskState.SOFT_DELETING # possible task states during restore() RESTORING = fields.InstanceTaskState.RESTORING # possible task states during shelve() SHELVING = fields.InstanceTaskState.SHELVING SHELVING_IMAGE_PENDING_UPLOAD = \ fields.InstanceTaskState.SHELVING_IMAGE_PENDING_UPLOAD SHELVING_IMAGE_UPLOADING = fields.InstanceTaskState.SHELVING_IMAGE_UPLOADING # possible task states during shelve_offload() SHELVING_OFFLOADING = fields.InstanceTaskState.SHELVING_OFFLOADING # possible task states during unshelve() UNSHELVING = fields.InstanceTaskState.UNSHELVING ALLOW_REBOOT = [None, REBOOTING, REBOOT_PENDING, REBOOT_STARTED, RESUMING, REBOOTING_HARD, UNPAUSING, PAUSING, SUSPENDING] # These states indicate a reboot soft_reboot_states = (REBOOTING, REBOOT_PENDING, REBOOT_STARTED) hard_reboot_states = (REBOOTING_HARD, REBOOT_PENDING_HARD, REBOOT_STARTED_HARD) # These states indicate a resize in progress resizing_states = (RESIZE_PREP, RESIZE_MIGRATING, RESIZE_MIGRATED, RESIZE_FINISH) # These states indicate a rebuild rebuild_states = (REBUILDING, REBUILD_BLOCK_DEVICE_MAPPING, REBUILD_SPAWNING) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/compute/utils.py0000664000175000017500000020117000000000000016714 0ustar00zuulzuul00000000000000# Copyright (c) 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Compute-related Utilities and helpers.""" import contextlib import functools import inspect import itertools import math import socket import traceback from oslo_log import log from oslo_serialization import jsonutils from oslo_utils import excutils import psutil from nova.accelerator import cyborg from nova import block_device from nova.compute import power_state from nova.compute import task_states from nova.compute import vm_states import nova.conf from nova import exception from nova import notifications from nova.notifications.objects import aggregate as aggregate_notification from nova.notifications.objects import base as notification_base from nova.notifications.objects import compute_task as task_notification from nova.notifications.objects import exception as notification_exception from nova.notifications.objects import flavor as flavor_notification from nova.notifications.objects import instance as instance_notification from nova.notifications.objects import keypair as keypair_notification from nova.notifications.objects import libvirt as libvirt_notification from nova.notifications.objects import metrics as metrics_notification from nova.notifications.objects import request_spec as reqspec_notification from nova.notifications.objects import scheduler as scheduler_notification from nova.notifications.objects import server_group as sg_notification from nova.notifications.objects import volume as volume_notification from nova import objects from nova.objects import fields from nova import rpc from nova import safe_utils from nova import utils CONF = nova.conf.CONF LOG = log.getLogger(__name__) # These properties are specific to a particular image by design. It # does not make sense for them to be inherited by server snapshots. # This list is distinct from the configuration option of the same # (lowercase) name. NON_INHERITABLE_IMAGE_PROPERTIES = frozenset([ 'cinder_encryption_key_id', 'cinder_encryption_key_deletion_policy', 'img_signature', 'img_signature_hash_method', 'img_signature_key_type', 'img_signature_certificate_uuid']) # Properties starting with these namespaces are reserved for internal # use by other services. It does not make sense (and may cause a request # fail) if we include them in a snapshot. NON_INHERITABLE_IMAGE_NAMESPACES = frozenset([ 'os_glance', ]) def exception_to_dict(fault, message=None): """Converts exceptions to a dict for use in notifications. :param fault: Exception that occurred :param message: Optional fault message, otherwise the message is derived from the fault itself. :returns: dict with the following items: - exception: the fault itself - message: one of (in priority order): - the provided message to this method - a formatted NovaException message - the fault class name - code: integer code for the fault (defaults to 500) """ # TODO(johngarbutt) move to nova/exception.py to share with wrap_exception code = 500 if hasattr(fault, "kwargs"): code = fault.kwargs.get('code', 500) # get the message from the exception that was thrown # if that does not exist, use the name of the exception class itself try: if not message: message = fault.format_message() # These exception handlers are broad so we don't fail to log the fault # just because there is an unexpected error retrieving the message except Exception: # In this case either we have a NovaException which failed to format # the message or we have a non-nova exception which could contain # sensitive details. Since we're not sure, be safe and set the message # to the exception class name. Note that we don't guard on # context.is_admin here because the message is always shown in the API, # even to non-admin users (e.g. NoValidHost) but only the traceback # details are shown to users with the admin role. Checking for admin # context here is also not helpful because admins can perform # operations on a tenant user's server (migrations, reboot, etc) and # service startup and periodic tasks could take actions on a server # and those use an admin context. message = fault.__class__.__name__ # NOTE(dripton) The message field in the database is limited to 255 chars. # MySQL silently truncates overly long messages, but PostgreSQL throws an # error if we don't truncate it. u_message = utils.safe_truncate(message, 255) fault_dict = dict(exception=fault) fault_dict["message"] = u_message fault_dict["code"] = code return fault_dict def _get_fault_details(exc_info, error_code): details = '' # TODO(mriedem): Why do we only include the details if the code is 500? # Though for non-nova exceptions the code will probably be 500. if exc_info and error_code == 500: # We get the full exception details including the value since # the fault message may not contain that information for non-nova # exceptions (see exception_to_dict). details = ''.join(traceback.format_exception( exc_info[0], exc_info[1], exc_info[2])) return str(details) def add_instance_fault_from_exc(context, instance, fault, exc_info=None, fault_message=None): """Adds the specified fault to the database.""" fault_obj = objects.InstanceFault(context=context) fault_obj.host = CONF.host fault_obj.instance_uuid = instance.uuid fault_obj.update(exception_to_dict(fault, message=fault_message)) code = fault_obj.code fault_obj.details = _get_fault_details(exc_info, code) fault_obj.create() def get_device_name_for_instance(instance, bdms, device): """Validates (or generates) a device name for instance. This method is a wrapper for get_next_device_name that gets the list of used devices and the root device from a block device mapping. :raises TooManyDiskDevices: if the maximum allowed devices to attach to a single instance is exceeded. """ mappings = block_device.instance_block_mapping(instance, bdms) return get_next_device_name(instance, mappings.values(), mappings['root'], device) def default_device_names_for_instance(instance, root_device_name, *block_device_lists): """Generate missing device names for an instance. :raises TooManyDiskDevices: if the maximum allowed devices to attach to a single instance is exceeded. """ dev_list = [bdm.device_name for bdm in itertools.chain(*block_device_lists) if bdm.device_name] if root_device_name not in dev_list: dev_list.append(root_device_name) for bdm in itertools.chain(*block_device_lists): dev = bdm.device_name if not dev: dev = get_next_device_name(instance, dev_list, root_device_name) bdm.device_name = dev bdm.save() dev_list.append(dev) def check_max_disk_devices_to_attach(num_devices): maximum = CONF.compute.max_disk_devices_to_attach if maximum < 0: return if num_devices > maximum: raise exception.TooManyDiskDevices(maximum=maximum) def get_next_device_name(instance, device_name_list, root_device_name=None, device=None): """Validates (or generates) a device name for instance. If device is not set, it will generate a unique device appropriate for the instance. It uses the root_device_name (if provided) and the list of used devices to find valid device names. If the device name is valid but applicable to a different backend (for example /dev/vdc is specified but the backend uses /dev/xvdc), the device name will be converted to the appropriate format. :raises TooManyDiskDevices: if the maximum allowed devices to attach to a single instance is exceeded. """ req_prefix = None req_letter = None if device: try: req_prefix, req_letter = block_device.match_device(device) except (TypeError, AttributeError, ValueError): raise exception.InvalidDevicePath(path=device) if not root_device_name: root_device_name = block_device.DEFAULT_ROOT_DEV_NAME try: prefix = block_device.match_device( block_device.prepend_dev(root_device_name))[0] except (TypeError, AttributeError, ValueError): raise exception.InvalidDevicePath(path=root_device_name) if req_prefix != prefix: LOG.debug("Using %(prefix)s instead of %(req_prefix)s", {'prefix': prefix, 'req_prefix': req_prefix}) used_letters = set() for device_path in device_name_list: letter = block_device.get_device_letter(device_path) used_letters.add(letter) check_max_disk_devices_to_attach(len(used_letters) + 1) if not req_letter: req_letter = _get_unused_letter(used_letters) if req_letter in used_letters: raise exception.DevicePathInUse(path=device) return prefix + req_letter def get_root_bdm(context, instance, bdms=None): if bdms is None: if isinstance(instance, objects.Instance): uuid = instance.uuid else: uuid = instance['uuid'] bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, uuid) return bdms.root_bdm() def is_volume_backed_instance(context, instance, bdms=None): root_bdm = get_root_bdm(context, instance, bdms) if root_bdm is not None: return root_bdm.is_volume # in case we hit a very old instance without root bdm, we _assume_ that # instance is backed by a volume, if and only if image_ref is not set if isinstance(instance, objects.Instance): return not instance.image_ref return not instance['image_ref'] def heal_reqspec_is_bfv(ctxt, request_spec, instance): """Calculates the is_bfv flag for a RequestSpec created before Rocky. Starting in Rocky, new instances have their RequestSpec created with the "is_bfv" flag to indicate if they are volume-backed which is used by the scheduler when determining root disk resource allocations. RequestSpecs created before Rocky will not have the is_bfv flag set so we need to calculate it here and update the RequestSpec. :param ctxt: nova.context.RequestContext auth context :param request_spec: nova.objects.RequestSpec used for scheduling :param instance: nova.objects.Instance being scheduled """ if 'is_bfv' in request_spec: return # Determine if this is a volume-backed instance and set the field # in the request spec accordingly. request_spec.is_bfv = is_volume_backed_instance(ctxt, instance) request_spec.save() def convert_mb_to_ceil_gb(mb_value): gb_int = 0 if mb_value: gb_float = mb_value / 1024.0 # ensure we reserve/allocate enough space by rounding up to nearest GB gb_int = int(math.ceil(gb_float)) return gb_int def _get_unused_letter(used_letters): # Return the first unused device letter index = 0 while True: letter = block_device.generate_device_letter(index) if letter not in used_letters: return letter index += 1 def get_value_from_system_metadata(instance, key, type, default): """Get a value of a specified type from image metadata. @param instance: The instance object @param key: The name of the property to get @param type: The python type the value is be returned as @param default: The value to return if key is not set or not the right type """ value = instance.system_metadata.get(key, default) try: return type(value) except ValueError: LOG.warning("Metadata value %(value)s for %(key)s is not of " "type %(type)s. Using default value %(default)s.", {'value': value, 'key': key, 'type': type, 'default': default}, instance=instance) return default def notify_usage_exists(notifier, context, instance_ref, host, current_period=False, ignore_missing_network_data=True, system_metadata=None, extra_usage_info=None): """Generates 'exists' unversioned legacy and transformed notification for an instance for usage auditing purposes. :param notifier: a messaging.Notifier :param context: request context for the current operation :param instance_ref: nova.objects.Instance object from which to report usage :param host: the host emitting the notification :param current_period: if True, this will generate a usage for the current usage period; if False, this will generate a usage for the previous audit period. :param ignore_missing_network_data: if True, log any exceptions generated while getting network info; if False, raise the exception. :param system_metadata: system_metadata override for the instance. If None, the instance_ref.system_metadata will be used. :param extra_usage_info: Dictionary containing extra values to add or override in the notification if not None. """ audit_start, audit_end = notifications.audit_period_bounds(current_period) if system_metadata is None: system_metadata = utils.instance_sys_meta(instance_ref) # add image metadata to the notification: image_meta = notifications.image_meta(system_metadata) extra_info = dict(audit_period_beginning=str(audit_start), audit_period_ending=str(audit_end), image_meta=image_meta) if extra_usage_info: extra_info.update(extra_usage_info) notify_about_instance_usage(notifier, context, instance_ref, 'exists', extra_usage_info=extra_info) audit_period = instance_notification.AuditPeriodPayload( audit_period_beginning=audit_start, audit_period_ending=audit_end, ) payload = instance_notification.InstanceExistsPayload( context=context, instance=instance_ref, audit_period=audit_period, ) notification = instance_notification.InstanceExistsNotification( context=context, priority=fields.NotificationPriority.INFO, publisher=notification_base.NotificationPublisher( host=host, source=fields.NotificationSource.COMPUTE, ), event_type=notification_base.EventType( object='instance', action=fields.NotificationAction.EXISTS, ), payload=payload, ) notification.emit(context) def notify_about_instance_usage(notifier, context, instance, event_suffix, network_info=None, extra_usage_info=None, fault=None, best_effort=False): """Send an unversioned legacy notification about an instance. All new notifications should use notify_about_instance_action which sends a versioned notification. :param notifier: a messaging.Notifier :param event_suffix: Event type like "delete.start" or "exists" :param network_info: Networking information, if provided. :param extra_usage_info: Dictionary containing extra values to add or override in the notification. """ if not extra_usage_info: extra_usage_info = {} usage_info = notifications.info_from_instance(context, instance, network_info, populate_image_ref_url=True, **extra_usage_info) if fault: # NOTE(johngarbutt) mirrors the format in wrap_exception fault_payload = exception_to_dict(fault) LOG.debug(fault_payload["message"], instance=instance) usage_info.update(fault_payload) if event_suffix.endswith("error"): method = notifier.error else: method = notifier.info try: method(context, 'compute.instance.%s' % event_suffix, usage_info) except Exception as e: if best_effort: LOG.error('Exception during notification sending: %s. ' 'Attempting to proceed with normal operation.', e) else: raise e def _get_fault_and_priority_from_exception(exception: Exception): fault = None priority = fields.NotificationPriority.INFO if not exception: return fault, priority fault = notification_exception.ExceptionPayload.from_exception(exception) priority = fields.NotificationPriority.ERROR return fault, priority @rpc.if_notifications_enabled def notify_about_instance_action(context, instance, host, action, phase=None, source=fields.NotificationSource.COMPUTE, exception=None, bdms=None, best_effort=False): """Send versioned notification about the action made on the instance :param instance: the instance which the action performed on :param host: the host emitting the notification :param action: the name of the action :param phase: the phase of the action :param source: the source of the notification :param exception: the thrown exception (used in error notifications) :param bdms: BlockDeviceMappingList object for the instance. If it is not provided then we will load it from the db if so configured """ fault, priority = _get_fault_and_priority_from_exception(exception) payload = instance_notification.InstanceActionPayload( context=context, instance=instance, fault=fault, bdms=bdms) notification = instance_notification.InstanceActionNotification( context=context, priority=priority, publisher=notification_base.NotificationPublisher( host=host, source=source), event_type=notification_base.EventType( object='instance', action=action, phase=phase), payload=payload) try: notification.emit(context) except Exception as e: if best_effort: LOG.error('Exception during notification sending: %s. ' 'Attempting to proceed with normal operation.', e) else: raise e @rpc.if_notifications_enabled def notify_about_instance_create(context, instance, host, phase=None, exception=None, bdms=None): """Send versioned notification about instance creation :param context: the request context :param instance: the instance being created :param host: the host emitting the notification :param phase: the phase of the creation :param exception: the thrown exception (used in error notifications) :param bdms: BlockDeviceMappingList object for the instance. If it is not provided then we will load it from the db if so configured """ fault, priority = _get_fault_and_priority_from_exception(exception) payload = instance_notification.InstanceCreatePayload( context=context, instance=instance, fault=fault, bdms=bdms) notification = instance_notification.InstanceCreateNotification( context=context, priority=priority, publisher=notification_base.NotificationPublisher( host=host, source=fields.NotificationSource.COMPUTE), event_type=notification_base.EventType( object='instance', action=fields.NotificationAction.CREATE, phase=phase), payload=payload) notification.emit(context) @rpc.if_notifications_enabled def notify_about_scheduler_action(context, request_spec, action, phase=None, source=fields.NotificationSource.SCHEDULER): """Send versioned notification about the action made by the scheduler :param context: the RequestContext object :param request_spec: the RequestSpec object :param action: the name of the action :param phase: the phase of the action :param source: the source of the notification """ payload = reqspec_notification.RequestSpecPayload( request_spec=request_spec) notification = scheduler_notification.SelectDestinationsNotification( context=context, priority=fields.NotificationPriority.INFO, publisher=notification_base.NotificationPublisher( host=CONF.host, source=source), event_type=notification_base.EventType( object='scheduler', action=action, phase=phase), payload=payload) notification.emit(context) @rpc.if_notifications_enabled def notify_about_volume_attach_detach(context, instance, host, action, phase, volume_id=None, exception=None): """Send versioned notification about the action made on the instance :param instance: the instance which the action performed on :param host: the host emitting the notification :param action: the name of the action :param phase: the phase of the action :param volume_id: id of the volume will be attached :param exception: the thrown exception (used in error notifications) """ fault, priority = _get_fault_and_priority_from_exception(exception) payload = instance_notification.InstanceActionVolumePayload( context=context, instance=instance, fault=fault, volume_id=volume_id) notification = instance_notification.InstanceActionVolumeNotification( context=context, priority=priority, publisher=notification_base.NotificationPublisher( host=host, source=fields.NotificationSource.COMPUTE), event_type=notification_base.EventType( object='instance', action=action, phase=phase), payload=payload) notification.emit(context) @rpc.if_notifications_enabled def notify_about_share_attach_detach(context, instance, host, action, phase, share_id=None, exception=None): """Send versioned notification about the action made on the instance :param instance: the instance which the action performed on :param host: the host emitting the notification :param action: the name of the action :param phase: the phase of the action :param share_info: share related information :param exception: the thrown exception (used in error notifications) """ fault, priority = _get_fault_and_priority_from_exception(exception) payload = instance_notification.InstanceActionSharePayload( context=context, instance=instance, fault=fault, share_id=share_id ) notification = instance_notification.InstanceActionShareNotification( context=context, priority=priority, publisher=notification_base.NotificationPublisher( host=host, source=fields.NotificationSource.API), event_type=notification_base.EventType( object='instance', action=action, phase=phase), payload=payload) notification.emit(context) @rpc.if_notifications_enabled def notify_about_instance_rescue_action(context, instance, host, rescue_image_ref, phase=None, exception=None): """Send versioned notification about the action made on the instance :param instance: the instance which the action performed on :param host: the host emitting the notification :param rescue_image_ref: the rescue image ref :param phase: the phase of the action :param exception: the thrown exception (used in error notifications) """ fault, priority = _get_fault_and_priority_from_exception(exception) payload = instance_notification.InstanceActionRescuePayload( context=context, instance=instance, fault=fault, rescue_image_ref=rescue_image_ref) notification = instance_notification.InstanceActionRescueNotification( context=context, priority=priority, publisher=notification_base.NotificationPublisher( host=host, source=fields.NotificationSource.COMPUTE), event_type=notification_base.EventType( object='instance', action=fields.NotificationAction.RESCUE, phase=phase), payload=payload) notification.emit(context) @rpc.if_notifications_enabled def notify_about_keypair_action(context, keypair, action, phase): """Send versioned notification about the keypair action on the instance :param context: the request context :param keypair: the keypair which the action performed on :param action: the name of the action :param phase: the phase of the action """ payload = keypair_notification.KeypairPayload(keypair=keypair) notification = keypair_notification.KeypairNotification( priority=fields.NotificationPriority.INFO, publisher=notification_base.NotificationPublisher( host=CONF.host, source=fields.NotificationSource.API), event_type=notification_base.EventType( object='keypair', action=action, phase=phase), payload=payload) notification.emit(context) @rpc.if_notifications_enabled def notify_about_volume_swap(context, instance, host, phase, old_volume_id, new_volume_id, exception=None): """Send versioned notification about the volume swap action on the instance :param context: the request context :param instance: the instance which the action performed on :param host: the host emitting the notification :param phase: the phase of the action :param old_volume_id: the ID of the volume that is copied from and detached :param new_volume_id: the ID of the volume that is copied to and attached :param exception: an exception """ fault, priority = _get_fault_and_priority_from_exception(exception) payload = instance_notification.InstanceActionVolumeSwapPayload( context=context, instance=instance, fault=fault, old_volume_id=old_volume_id, new_volume_id=new_volume_id) instance_notification.InstanceActionVolumeSwapNotification( context=context, priority=priority, publisher=notification_base.NotificationPublisher( host=host, source=fields.NotificationSource.COMPUTE), event_type=notification_base.EventType( object='instance', action=fields.NotificationAction.VOLUME_SWAP, phase=phase), payload=payload).emit(context) @rpc.if_notifications_enabled def notify_about_instance_snapshot(context, instance, host, phase, snapshot_image_id): """Send versioned notification about the snapshot action executed on the instance :param context: the request context :param instance: the instance from which a snapshot image is being created :param host: the host emitting the notification :param phase: the phase of the action :param snapshot_image_id: the ID of the snapshot """ payload = instance_notification.InstanceActionSnapshotPayload( context=context, instance=instance, fault=None, snapshot_image_id=snapshot_image_id) instance_notification.InstanceActionSnapshotNotification( context=context, priority=fields.NotificationPriority.INFO, publisher=notification_base.NotificationPublisher( host=host, source=fields.NotificationSource.COMPUTE), event_type=notification_base.EventType( object='instance', action=fields.NotificationAction.SNAPSHOT, phase=phase), payload=payload).emit(context) @rpc.if_notifications_enabled def notify_about_resize_prep_instance(context, instance, host, phase, new_flavor): """Send versioned notification about the instance resize action on the instance :param context: the request context :param instance: the instance which the resize action performed on :param host: the host emitting the notification :param phase: the phase of the action :param new_flavor: new flavor """ payload = instance_notification.InstanceActionResizePrepPayload( context=context, instance=instance, fault=None, new_flavor=flavor_notification.FlavorPayload(flavor=new_flavor)) instance_notification.InstanceActionResizePrepNotification( context=context, priority=fields.NotificationPriority.INFO, publisher=notification_base.NotificationPublisher( host=host, source=fields.NotificationSource.COMPUTE), event_type=notification_base.EventType( object='instance', action=fields.NotificationAction.RESIZE_PREP, phase=phase), payload=payload).emit(context) def notify_about_server_group_update(context, event_suffix, sg_payload): """Send a notification about server group update. :param event_suffix: Event type like "create.start" or "create.end" :param sg_payload: payload for server group update """ notifier = rpc.get_notifier(service='servergroup') notifier.info(context, 'servergroup.%s' % event_suffix, sg_payload) def notify_about_aggregate_update(context, event_suffix, aggregate_payload): """Send a notification about aggregate update. :param event_suffix: Event type like "create.start" or "create.end" :param aggregate_payload: payload for aggregate update """ aggregate_identifier = aggregate_payload.get('aggregate_id', None) if not aggregate_identifier: aggregate_identifier = aggregate_payload.get('name', None) if not aggregate_identifier: LOG.debug("No aggregate id or name specified for this " "notification and it will be ignored") return notifier = rpc.get_notifier(service='aggregate', host=aggregate_identifier) notifier.info(context, 'aggregate.%s' % event_suffix, aggregate_payload) @rpc.if_notifications_enabled def notify_about_aggregate_action(context, aggregate, action, phase): payload = aggregate_notification.AggregatePayload(aggregate) notification = aggregate_notification.AggregateNotification( priority=fields.NotificationPriority.INFO, publisher=notification_base.NotificationPublisher( host=CONF.host, source=fields.NotificationSource.API), event_type=notification_base.EventType( object='aggregate', action=action, phase=phase), payload=payload) notification.emit(context) @rpc.if_notifications_enabled def notify_about_aggregate_cache(context, aggregate, host, image_status, index, total): """Send a notification about aggregate cache_images progress. :param context: The RequestContext :param aggregate: The target aggregate :param host: The host within the aggregate for which to report status :param image_status: The result from the compute host, which is a dict of {image_id: status} :param index: An integer indicating progress toward completion, between 1 and $total :param total: The total number of hosts being processed in this operation, to bound $index """ success_statuses = ('cached', 'existing') payload = aggregate_notification.AggregateCachePayload(aggregate, host, index, total) payload.images_cached = [] payload.images_failed = [] for img, status in image_status.items(): if status in success_statuses: payload.images_cached.append(img) else: payload.images_failed.append(img) notification = aggregate_notification.AggregateCacheNotification( priority=fields.NotificationPriority.INFO, publisher=notification_base.NotificationPublisher( host=CONF.host, source=fields.NotificationSource.CONDUCTOR), event_type=notification_base.EventType( object='aggregate', action=fields.NotificationAction.IMAGE_CACHE, phase=fields.NotificationPhase.PROGRESS), payload=payload) notification.emit(context) def notify_about_host_update(context, event_suffix, host_payload): """Send a notification about host update. :param event_suffix: Event type like "create.start" or "create.end" :param host_payload: payload for host update. It is a dict and there should be at least the 'host_name' key in this dict. """ host_identifier = host_payload.get('host_name') if not host_identifier: LOG.warning("No host name specified for the notification of " "HostAPI.%s and it will be ignored", event_suffix) return notifier = rpc.get_notifier(service='api', host=host_identifier) notifier.info(context, 'HostAPI.%s' % event_suffix, host_payload) @rpc.if_notifications_enabled def notify_about_server_group_action(context, group, action): payload = sg_notification.ServerGroupPayload(group) notification = sg_notification.ServerGroupNotification( priority=fields.NotificationPriority.INFO, publisher=notification_base.NotificationPublisher( host=CONF.host, source=fields.NotificationSource.API), event_type=notification_base.EventType( object='server_group', action=action), payload=payload) notification.emit(context) @rpc.if_notifications_enabled def notify_about_server_group_add_member(context, group_id): group = objects.InstanceGroup.get_by_uuid(context, group_id) payload = sg_notification.ServerGroupPayload(group) notification = sg_notification.ServerGroupNotification( priority=fields.NotificationPriority.INFO, publisher=notification_base.NotificationPublisher( host=CONF.host, source=fields.NotificationSource.API), event_type=notification_base.EventType( object='server_group', action=fields.NotificationAction.ADD_MEMBER), payload=payload) notification.emit(context) @rpc.if_notifications_enabled def notify_about_instance_rebuild(context, instance, host, action=fields.NotificationAction.REBUILD, phase=None, source=fields.NotificationSource.COMPUTE, exception=None, bdms=None): """Send versioned notification about instance rebuild :param instance: the instance which the action performed on :param host: the host emitting the notification :param action: the name of the action :param phase: the phase of the action :param source: the source of the notification :param exception: the thrown exception (used in error notifications) :param bdms: BlockDeviceMappingList object for the instance. If it is not provided then we will load it from the db if so configured """ fault, priority = _get_fault_and_priority_from_exception(exception) payload = instance_notification.InstanceActionRebuildPayload( context=context, instance=instance, fault=fault, bdms=bdms) notification = instance_notification.InstanceActionRebuildNotification( context=context, priority=priority, publisher=notification_base.NotificationPublisher( host=host, source=source), event_type=notification_base.EventType( object='instance', action=action, phase=phase), payload=payload) notification.emit(context) @rpc.if_notifications_enabled def notify_about_metrics_update(context, host, host_ip, nodename, monitor_metric_list): """Send versioned notification about updating metrics :param context: the request context :param host: the host emitting the notification :param host_ip: the IP address of the host :param nodename: the node name :param monitor_metric_list: the MonitorMetricList object """ payload = metrics_notification.MetricsPayload( host=host, host_ip=host_ip, nodename=nodename, monitor_metric_list=monitor_metric_list) notification = metrics_notification.MetricsNotification( context=context, priority=fields.NotificationPriority.INFO, publisher=notification_base.NotificationPublisher( host=host, source=fields.NotificationSource.COMPUTE), event_type=notification_base.EventType( object='metrics', action=fields.NotificationAction.UPDATE), payload=payload) notification.emit(context) @rpc.if_notifications_enabled def notify_about_libvirt_connect_error(context, ip, exception): """Send a versioned notification about libvirt connect error. :param context: the request context :param ip: the IP address of the host :param exception: the thrown exception """ fault, _ = _get_fault_and_priority_from_exception(exception) payload = libvirt_notification.LibvirtErrorPayload(ip=ip, reason=fault) notification = libvirt_notification.LibvirtErrorNotification( priority=fields.NotificationPriority.ERROR, publisher=notification_base.NotificationPublisher( host=CONF.host, source=fields.NotificationSource.COMPUTE), event_type=notification_base.EventType( object='libvirt', action=fields.NotificationAction.CONNECT, phase=fields.NotificationPhase.ERROR), payload=payload) notification.emit(context) @rpc.if_notifications_enabled def notify_about_volume_usage(context, vol_usage, host): """Send versioned notification about the volume usage :param context: the request context :param vol_usage: the volume usage object :param host: the host emitting the notification """ payload = volume_notification.VolumeUsagePayload( vol_usage=vol_usage) notification = volume_notification.VolumeUsageNotification( context=context, priority=fields.NotificationPriority.INFO, publisher=notification_base.NotificationPublisher( host=host, source=fields.NotificationSource.COMPUTE), event_type=notification_base.EventType( object='volume', action=fields.NotificationAction.USAGE), payload=payload) notification.emit(context) @rpc.if_notifications_enabled def notify_about_compute_task_error(context, action, instance_uuid, request_spec, state, exception): """Send a versioned notification about compute task error. :param context: the request context :param action: the name of the action :param instance_uuid: the UUID of the instance :param request_spec: the request spec object or the dict includes request spec information :param state: the vm state of the instance :param exception: the thrown exception :param tb: the traceback """ if (request_spec is not None and not isinstance(request_spec, objects.RequestSpec)): request_spec = objects.RequestSpec.from_primitives( context, request_spec, {}) fault, _ = _get_fault_and_priority_from_exception(exception) payload = task_notification.ComputeTaskPayload( instance_uuid=instance_uuid, request_spec=request_spec, state=state, reason=fault) notification = task_notification.ComputeTaskNotification( priority=fields.NotificationPriority.ERROR, publisher=notification_base.NotificationPublisher( host=CONF.host, source=fields.NotificationSource.CONDUCTOR), event_type=notification_base.EventType( object='compute_task', action=action, phase=fields.NotificationPhase.ERROR), payload=payload) notification.emit(context) def refresh_info_cache_for_instance(context, instance): """Refresh the info cache for an instance. :param instance: The instance object. """ if instance.info_cache is not None and not instance.deleted: # Catch the exception in case the instance got deleted after the check # instance.deleted was executed try: instance.info_cache.refresh() except exception.InstanceInfoCacheNotFound: LOG.debug("Can not refresh info_cache because instance " "was not found", instance=instance) def get_reboot_type(task_state, current_power_state): """Checks if the current instance state requires a HARD reboot.""" if current_power_state != power_state.RUNNING: return 'HARD' if task_state in task_states.soft_reboot_states: return 'SOFT' return 'HARD' def get_machine_ips(): """Get the machine's ip addresses :returns: list of Strings of ip addresses """ addresses = [] for interface, ifaddresses in psutil.net_if_addrs().items(): for ifaddress in ifaddresses: if ifaddress.family not in (socket.AF_INET, socket.AF_INET6): continue addr = ifaddress.address # If we have an ipv6 address remove the # %ether_interface at the end if ifaddress.family == socket.AF_INET6: addr = addr.split('%')[0] addresses.append(addr) return addresses def upsize_quota_delta(new_flavor, old_flavor): """Calculate deltas required to adjust quota for an instance upsize. :param new_flavor: the target instance type :param old_flavor: the original instance type """ def _quota_delta(resource): return (new_flavor[resource] - old_flavor[resource]) deltas = {} if _quota_delta('vcpus') > 0: deltas['cores'] = _quota_delta('vcpus') if _quota_delta('memory_mb') > 0: deltas['ram'] = _quota_delta('memory_mb') return deltas def get_headroom(quotas, usages, deltas): headroom = {res: quotas[res] - usages[res] for res in quotas.keys()} # If quota_cores is unlimited [-1]: # - set cores headroom based on instances headroom: if quotas.get('cores') == -1: if deltas.get('cores'): hc = headroom.get('instances', 1) * deltas['cores'] headroom['cores'] = hc / deltas.get('instances', 1) else: headroom['cores'] = headroom.get('instances', 1) # If quota_ram is unlimited [-1]: # - set ram headroom based on instances headroom: if quotas.get('ram') == -1: if deltas.get('ram'): hr = headroom.get('instances', 1) * deltas['ram'] headroom['ram'] = hr / deltas.get('instances', 1) else: headroom['ram'] = headroom.get('instances', 1) return headroom def check_num_instances_quota( context, flavor, min_count, max_count, project_id=None, user_id=None, orig_num_req=None, ): """Enforce quota limits on number of instances created.""" # project_id is also used for the TooManyInstances error message if project_id is None: project_id = context.project_id if user_id is None: user_id = context.user_id # Check whether we need to count resources per-user and check a per-user # quota limit. If we have no per-user quota limit defined for a # project/user, we can avoid wasteful resource counting. user_quotas = objects.Quotas.get_all_by_project_and_user( context, project_id, user_id) if not any(r in user_quotas for r in ['instances', 'cores', 'ram']): user_id = None # Determine requested cores and ram req_cores = max_count * flavor.vcpus req_ram = max_count * flavor.memory_mb deltas = {'instances': max_count, 'cores': req_cores, 'ram': req_ram} try: # NOTE(johngarbutt) when using unified limits, this is call # is a no op, and as such, this function always returns max_count objects.Quotas.check_deltas(context, deltas, project_id, user_id=user_id, check_project_id=project_id, check_user_id=user_id) except exception.OverQuota as exc: quotas = exc.kwargs['quotas'] overs = exc.kwargs['overs'] usages = exc.kwargs['usages'] # This is for the recheck quota case where we used a delta of zero. if min_count == max_count == 0: # orig_num_req is the original number of instances requested in the # case of a recheck quota, for use in the over quota exception. req_cores = orig_num_req * flavor.vcpus req_ram = orig_num_req * flavor.memory_mb requested = {'instances': orig_num_req, 'cores': req_cores, 'ram': req_ram} (overs, reqs, total_alloweds, useds) = get_over_quota_detail( deltas, overs, quotas, requested) msg = "Cannot run any more instances of this type." params = {'overs': overs, 'pid': project_id, 'msg': msg} LOG.debug("%(overs)s quota exceeded for %(pid)s. %(msg)s", params) raise exception.TooManyInstances(overs=overs, req=reqs, used=useds, allowed=total_alloweds) # OK, we exceeded quota; let's figure out why... headroom = get_headroom(quotas, usages, deltas) allowed = headroom.get('instances', 1) # Reduce 'allowed' instances in line with the cores & ram headroom if flavor.vcpus: allowed = min(allowed, headroom['cores'] // flavor.vcpus) if flavor.memory_mb: allowed = min(allowed, headroom['ram'] // flavor.memory_mb) # Convert to the appropriate exception message if allowed <= 0: msg = "Cannot run any more instances of this type." elif min_count <= allowed <= max_count: # We're actually OK, but still need to check against allowed return check_num_instances_quota( context, flavor, min_count, allowed, project_id=project_id, user_id=user_id) else: msg = "Can only run %s more instances of this type." % allowed num_instances = (str(min_count) if min_count == max_count else "%s-%s" % (min_count, max_count)) requested = dict(instances=num_instances, cores=req_cores, ram=req_ram) (overs, reqs, total_alloweds, useds) = get_over_quota_detail( headroom, overs, quotas, requested) params = {'overs': overs, 'pid': project_id, 'min_count': min_count, 'max_count': max_count, 'msg': msg} if min_count == max_count: LOG.debug("%(overs)s quota exceeded for %(pid)s," " tried to run %(min_count)d instances. " "%(msg)s", params) else: LOG.debug("%(overs)s quota exceeded for %(pid)s," " tried to run between %(min_count)d and" " %(max_count)d instances. %(msg)s", params) raise exception.TooManyInstances(overs=overs, req=reqs, used=useds, allowed=total_alloweds) return max_count def get_over_quota_detail(headroom, overs, quotas, requested): reqs = [] useds = [] total_alloweds = [] for resource in overs: reqs.append(str(requested[resource])) useds.append(str(quotas[resource] - headroom[resource])) total_alloweds.append(str(quotas[resource])) (overs, reqs, useds, total_alloweds) = map(', '.join, ( overs, reqs, useds, total_alloweds)) return overs, reqs, total_alloweds, useds def remove_shelved_keys_from_system_metadata(instance): # Delete system_metadata for a shelved instance for key in ['shelved_at', 'shelved_image_id', 'shelved_host']: if key in instance.system_metadata: del (instance.system_metadata[key]) def create_image(context, instance, name, image_type, image_api, extra_properties=None): """Create new image entry in the image service. This new image will be reserved for the compute manager to upload a snapshot or backup. :param context: security context :param instance: nova.objects.instance.Instance object :param name: string for name of the snapshot :param image_type: snapshot | backup :param image_api: instance of nova.image.glance.API :param extra_properties: dict of extra image properties to include """ properties = { 'instance_uuid': instance.uuid, 'user_id': str(context.user_id), 'image_type': image_type, } properties.update(extra_properties or {}) image_meta = initialize_instance_snapshot_metadata( context, instance, name, properties) # if we're making a snapshot, omit the disk and container formats, # since the image may have been converted to another format, and the # original values won't be accurate. The driver will populate these # with the correct values later, on image upload. if image_type == 'snapshot': image_meta.pop('disk_format', None) image_meta.pop('container_format', None) return image_api.create(context, image_meta) def initialize_instance_snapshot_metadata(context, instance, name, extra_properties=None): """Initialize new metadata for a snapshot of the given instance. :param context: authenticated RequestContext; note that this may not be the owner of the instance itself, e.g. an admin creates a snapshot image of some user instance :param instance: nova.objects.instance.Instance object :param name: string for name of the snapshot :param extra_properties: dict of extra metadata properties to include :returns: the new instance snapshot metadata """ image_meta = utils.get_image_from_system_metadata( instance.system_metadata) image_meta['name'] = name # If the user creating the snapshot is not in the same project as # the owner of the instance, then the image visibility should be # "shared" so the owner of the instance has access to the image, like # in the case of an admin creating a snapshot of another user's # server, either directly via the createImage API or via shelve. extra_properties = extra_properties or {} if context.project_id != instance.project_id: # The glance API client-side code will use this to add the # instance project as a member of the image for access. image_meta['visibility'] = 'shared' extra_properties['instance_owner'] = instance.project_id # TODO(mriedem): Should owner_project_name and owner_user_name # be removed from image_meta['properties'] here, or added to # [DEFAULT]/non_inheritable_image_properties? It is confusing # otherwise to see the owner project not match those values. else: # The request comes from the owner of the instance so make the # image private. image_meta['visibility'] = 'private' # Delete properties that are non-inheritable properties = image_meta['properties'] keys_to_pop = set(CONF.non_inheritable_image_properties).union( NON_INHERITABLE_IMAGE_PROPERTIES) for ns in NON_INHERITABLE_IMAGE_NAMESPACES: keys_to_pop |= {key for key in properties if key.startswith(ns)} for key in keys_to_pop: properties.pop(key, None) # The properties in extra_properties have precedence properties.update(extra_properties) return image_meta def delete_image(context, instance, image_api, image_id, log_exc_info=False): """Deletes the image if it still exists. Ignores ImageNotFound if the image is already gone. :param context: the nova auth request context where the context.project_id matches the owner of the image :param instance: the instance for which the snapshot image was created :param image_api: the image API used to delete the image :param image_id: the ID of the image to delete :param log_exc_info: True if this is being called from an exception handler block and traceback should be logged at DEBUG level, False otherwise. """ LOG.debug("Cleaning up image %s", image_id, instance=instance, log_exc_info=log_exc_info) try: image_api.delete(context, image_id) except exception.ImageNotFound: # Since we're trying to cleanup an image, we don't care if # if it's already gone. pass except Exception: LOG.exception("Error while trying to clean up image %s", image_id, instance=instance) def may_have_ports_or_volumes(instance): """Checks to see if an instance may have ports or volumes based on vm_state This is primarily only useful when instance.host is None. :param instance: The nova.objects.Instance in question. :returns: True if the instance may have ports of volumes, False otherwise """ # NOTE(melwitt): When an instance build fails in the compute manager, # the instance host and node are set to None and the vm_state is set # to ERROR. In the case, the instance with host = None has actually # been scheduled and may have ports and/or volumes allocated on the # compute node. if instance.vm_state in (vm_states.SHELVED_OFFLOADED, vm_states.ERROR): return True return False def get_stashed_volume_connector(bdm, instance): """Lookup a connector dict from the bdm.connection_info if set Gets the stashed connector dict out of the bdm.connection_info if set and the connector host matches the instance host. :param bdm: nova.objects.block_device.BlockDeviceMapping :param instance: nova.objects.instance.Instance :returns: volume connector dict or None """ if 'connection_info' in bdm and bdm.connection_info is not None: # NOTE(mriedem): We didn't start stashing the connector in the # bdm.connection_info until Mitaka so it might not be there on old # attachments. Also, if the volume was attached when the instance # was in shelved_offloaded state and it hasn't been unshelved yet # we don't have the attachment/connection information either. connector = jsonutils.loads(bdm.connection_info).get('connector') if connector: if connector.get('host') == instance.host: return connector LOG.debug('Found stashed volume connector for instance but ' 'connector host %(connector_host)s does not match ' 'the instance host %(instance_host)s.', {'connector_host': connector.get('host'), 'instance_host': instance.host}, instance=instance) if (instance.host is None and may_have_ports_or_volumes(instance)): LOG.debug('Allowing use of stashed volume connector with ' 'instance host None because instance with ' 'vm_state %(vm_state)s has been scheduled in ' 'the past.', {'vm_state': instance.vm_state}, instance=instance) return connector class EventReporter(object): """Context manager to report instance action events. If constructed with ``graceful_exit=True`` the __exit__ function will handle and not re-raise on InstanceActionNotFound. """ def __init__(self, context, event_name, host, *instance_uuids, graceful_exit=False): self.context = context self.event_name = event_name self.instance_uuids = instance_uuids self.host = host self.graceful_exit = graceful_exit def __enter__(self): for uuid in self.instance_uuids: objects.InstanceActionEvent.event_start( self.context, uuid, self.event_name, want_result=False, host=self.host) return self def __exit__(self, exc_type, exc_val, exc_tb): for uuid in self.instance_uuids: try: objects.InstanceActionEvent.event_finish_with_failure( self.context, uuid, self.event_name, exc_val=exc_val, exc_tb=exc_tb, want_result=False) except exception.InstanceActionNotFound: # If the instance action was not found then determine if we # should re-raise based on the graceful_exit attribute. with excutils.save_and_reraise_exception( reraise=not self.graceful_exit): if self.graceful_exit: return True return False def wrap_instance_event(prefix, graceful_exit=False): """Wraps a method to log the event taken on the instance, and result. This decorator wraps a method to log the start and result of an event, as part of an action taken on an instance. :param prefix: prefix for the event name, usually a service binary like "compute" or "conductor" to indicate the origin of the event. :param graceful_exit: True if the decorator should gracefully handle InstanceActionNotFound errors, False otherwise. This should rarely be True. """ @utils.expects_func_args('instance') def helper(function): @functools.wraps(function) def decorated_function(self, context, *args, **kwargs): wrapped_func = safe_utils.get_wrapped_function(function) keyed_args = inspect.getcallargs(wrapped_func, self, context, *args, **kwargs) instance_uuid = keyed_args['instance']['uuid'] event_name = '{0}_{1}'.format(prefix, function.__name__) host = self.host if hasattr(self, 'host') else None with EventReporter(context, event_name, host, instance_uuid, graceful_exit=graceful_exit): return function(self, context, *args, **kwargs) return decorated_function return helper class UnlimitedSemaphore(object): def __enter__(self): pass def __exit__(self, exc_type, exc_val, exc_tb): pass @property def balance(self): return 0 # This semaphore is used to enforce a limit on disk-IO-intensive operations # (image downloads, image conversions) at any given time. # It is initialized at ComputeManager.init_host() disk_ops_semaphore = UnlimitedSemaphore() @contextlib.contextmanager def notify_about_instance_delete(notifier, context, instance, delete_type='delete', source=fields.NotificationSource.API): try: notify_about_instance_usage(notifier, context, instance, "%s.start" % delete_type) # Note(gibi): force_delete types will be handled in a # subsequent patch if delete_type in ['delete', 'soft_delete']: notify_about_instance_action( context, instance, host=CONF.host, source=source, action=delete_type, phase=fields.NotificationPhase.START) yield finally: notify_about_instance_usage(notifier, context, instance, "%s.end" % delete_type) if delete_type in ['delete', 'soft_delete']: notify_about_instance_action( context, instance, host=CONF.host, source=source, action=delete_type, phase=fields.NotificationPhase.END) def update_pci_request_with_placement_allocations( context, report_client, pci_requests, provider_mapping): """Update the instance's PCI request based on the request group - resource provider mapping and the device RP name from placement. :param context: the request context :param report_client: a SchedulerReportClient instance :param pci_requests: A list of InstancePCIRequest objects to be updated :param provider_mapping: the request group - resource provider mapping in the form returned by the RequestSpec.get_request_group_mapping() call. :raises AmbigousResourceProviderForPCIRequest: if more than one resource provider provides resource for the given PCI request. :raises UnexpectResourceProviderNameForPCIRequest: if the resource provider, which provides resource for the pci request, does not have a well formatted name so we cannot parse the parent interface name out of it. """ if not pci_requests: return def needs_update_due_to_qos(pci_request, mapping): return (pci_request.requester_id and pci_request.requester_id in mapping) def get_group_mapping_for_flavor_based_pci_request(pci_request, mapping): # NOTE(gibi): for flavor based PCI requests nova generates RequestGroup # suffixes from InstancePCIRequests in the form of # {request_id}-{count_index} # NOTE(gibi): a suffixed request group always fulfilled from a single # RP return { group_id: rp_uuids[0] for group_id, rp_uuids in mapping.items() if group_id.startswith(pci_request.request_id) } for pci_request in pci_requests: mapping = get_group_mapping_for_flavor_based_pci_request( pci_request, provider_mapping) if mapping: for spec in pci_request.spec: # FIXME(gibi): this is baaad but spec is a dict of strings so # we need to serialize spec['rp_uuids'] = ','.join(mapping.values()) elif needs_update_due_to_qos(pci_request, provider_mapping): provider_uuids = provider_mapping[pci_request.requester_id] if len(provider_uuids) != 1: raise exception.AmbiguousResourceProviderForPCIRequest( providers=provider_uuids, requester=pci_request.requester_id) dev_rp_name = report_client.get_resource_provider_name( context, provider_uuids[0]) # NOTE(gibi): the device RP name reported by neutron is # structured like :: rp_name_pieces = dev_rp_name.split(':') if len(rp_name_pieces) != 3: ex = exception.UnexpectedResourceProviderNameForPCIRequest raise ex( provider=provider_uuids[0], requester=pci_request.requester_id, provider_name=dev_rp_name) for spec in pci_request.spec: spec['parent_ifname'] = rp_name_pieces[2] def delete_arqs_if_needed(context, instance, arq_uuids=None): """Delete Cyborg ARQs for the instance. :param context :param instance: instance who own the args :param uuids: delete arqs by uuids while did not bind to instance yet. """ cyclient = cyborg.get_client(context) dp_name = instance.flavor.extra_specs.get('accel:device_profile') if dp_name: LOG.debug('Calling Cyborg to delete ARQs for instance %(instance)s', {'instance': instance.uuid}) try: cyclient.delete_arqs_for_instance(instance.uuid) except exception.AcceleratorRequestOpFailed as e: LOG.exception('Failed to delete accelerator requests for ' 'instance %s. Exception: %s', instance.uuid, e) if arq_uuids: LOG.debug('Calling Cyborg to delete ARQs by uuids %(uuid)s for' ' instance %(instance)s', {'instance': instance.uuid, 'uuid': arq_uuids}) cyclient.delete_arqs_by_uuid(arq_uuids) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/compute/vm_states.py0000664000175000017500000000541400000000000017564 0ustar00zuulzuul00000000000000# Copyright 2010 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Possible vm states for instances. Compute instance vm states represent the state of an instance as it pertains to a user or administrator. vm_state describes a VM's current stable (not transition) state. That is, if there is no ongoing compute API calls (running tasks), vm_state should reflect what the customer expect the VM to be. When combined with task states (task_states.py), a better picture can be formed regarding the instance's health and progress. See http://wiki.openstack.org/VMState """ from nova.compute import task_states from nova.objects import fields # VM is running ACTIVE = fields.InstanceState.ACTIVE # VM only exists in DB BUILDING = fields.InstanceState.BUILDING PAUSED = fields.InstanceState.PAUSED # VM is suspended to disk. SUSPENDED = fields.InstanceState.SUSPENDED # VM is powered off, the disk image is still there. STOPPED = fields.InstanceState.STOPPED # A rescue image is running with the original VM image attached RESCUED = fields.InstanceState.RESCUED # a VM with the new size is active. The user is expected to manually confirm # or revert. RESIZED = fields.InstanceState.RESIZED # VM is marked as deleted but the disk images are still available to restore. SOFT_DELETED = fields.InstanceState.SOFT_DELETED # VM is permanently deleted. DELETED = fields.InstanceState.DELETED ERROR = fields.InstanceState.ERROR # VM is powered off, resources still on hypervisor SHELVED = fields.InstanceState.SHELVED # VM and associated resources are not on hypervisor SHELVED_OFFLOADED = fields.InstanceState.SHELVED_OFFLOADED # states we can soft reboot from ALLOW_SOFT_REBOOT = [ACTIVE] # states we allow hard reboot from ALLOW_HARD_REBOOT = ALLOW_SOFT_REBOOT + [STOPPED, PAUSED, SUSPENDED, ERROR] # states we allow to trigger crash dump ALLOW_TRIGGER_CRASH_DUMP = [ACTIVE, PAUSED, RESCUED, RESIZED, ERROR] # states we allow for evacuate instance ALLOW_TARGET_STATES = [STOPPED] def allow_resource_removal(vm_state, task_state=None): """(vm_state, task_state) combinations we allow resources to be freed in""" return ( vm_state == DELETED or vm_state == SHELVED_OFFLOADED and task_state != task_states.SPAWNING ) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.3456085 nova-32.0.0/nova/conductor/0000775000175000017500000000000000000000000015525 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conductor/__init__.py0000664000175000017500000000132600000000000017640 0ustar00zuulzuul00000000000000# Copyright 2012 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.conductor import api as conductor_api API = conductor_api.API ComputeTaskAPI = conductor_api.ComputeTaskAPI ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conductor/api.py0000664000175000017500000002106700000000000016656 0ustar00zuulzuul00000000000000# Copyright 2012 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Handles all requests to the conductor service.""" from oslo_log import log as logging import oslo_messaging as messaging from nova import baserpc from nova.conductor import rpcapi import nova.conf from nova.image import glance CONF = nova.conf.CONF LOG = logging.getLogger(__name__) class API(object): """Conductor API that does updates via RPC to the ConductorManager.""" def __init__(self): self.conductor_rpcapi = rpcapi.ConductorAPI() self.base_rpcapi = baserpc.BaseAPI(topic=rpcapi.RPC_TOPIC) def object_backport_versions(self, context, objinst, object_versions): return self.conductor_rpcapi.object_backport_versions(context, objinst, object_versions) def wait_until_ready(self, context, early_timeout=10, early_attempts=10): '''Wait until a conductor service is up and running. This method calls the remote ping() method on the conductor topic until it gets a response. It starts with a shorter timeout in the loop (early_timeout) up to early_attempts number of tries. It then drops back to the globally configured timeout for rpc calls for each retry. ''' attempt = 0 timeout = early_timeout # if we show the timeout message, make sure we show a similar # message saying that everything is now working to avoid # confusion has_timedout = False while True: # NOTE(danms): Try ten times with a short timeout, and then punt # to the configured RPC timeout after that if attempt == early_attempts: timeout = None attempt += 1 # NOTE(russellb): This is running during service startup. If we # allow an exception to be raised, the service will shut down. # This may fail the first time around if nova-conductor wasn't # running when this service started. try: self.base_rpcapi.ping(context, '1.21 GigaWatts', timeout=timeout) if has_timedout: LOG.info('nova-conductor connection ' 'established successfully') break except messaging.MessagingTimeout: has_timedout = True LOG.warning('Timed out waiting for nova-conductor. ' 'Is it running? Or did this service start ' 'before nova-conductor? ' 'Reattempting establishment of ' 'nova-conductor connection...') class ComputeTaskAPI(object): """ComputeTask API that queues up compute tasks for nova-conductor.""" def __init__(self): self.conductor_compute_rpcapi = rpcapi.ComputeTaskAPI() self.image_api = glance.API() # TODO(stephenfin): Remove the 'reservations' parameter since we don't use # reservations anymore def resize_instance(self, context, instance, scheduler_hint, flavor, reservations=None, clean_shutdown=True, request_spec=None, host_list=None, do_cast=False): self.conductor_compute_rpcapi.migrate_server( context, instance, scheduler_hint, live=False, rebuild=False, flavor=flavor, block_migration=None, disk_over_commit=None, reservations=reservations, clean_shutdown=clean_shutdown, request_spec=request_spec, host_list=host_list, do_cast=do_cast) def live_migrate_instance(self, context, instance, host_name, block_migration, disk_over_commit, request_spec=None, async_=False): scheduler_hint = {'host': host_name} if async_: self.conductor_compute_rpcapi.live_migrate_instance( context, instance, scheduler_hint, block_migration, disk_over_commit, request_spec) else: self.conductor_compute_rpcapi.migrate_server( context, instance, scheduler_hint, True, False, None, block_migration, disk_over_commit, None, request_spec=request_spec) def build_instances(self, context, instances, image, filter_properties, admin_password, injected_files, requested_networks, security_groups, block_device_mapping, legacy_bdm=True, request_spec=None, host_lists=None): self.conductor_compute_rpcapi.build_instances(context, instances=instances, image=image, filter_properties=filter_properties, admin_password=admin_password, injected_files=injected_files, requested_networks=requested_networks, security_groups=security_groups, block_device_mapping=block_device_mapping, legacy_bdm=legacy_bdm, request_spec=request_spec, host_lists=host_lists) def schedule_and_build_instances(self, context, build_requests, request_spec, image, admin_password, injected_files, requested_networks, block_device_mapping, tags=None): self.conductor_compute_rpcapi.schedule_and_build_instances( context, build_requests, request_spec, image, admin_password, injected_files, requested_networks, block_device_mapping, tags) def unshelve_instance(self, context, instance, request_spec=None): self.conductor_compute_rpcapi.unshelve_instance(context, instance=instance, request_spec=request_spec) def rebuild_instance(self, context, instance, orig_image_ref, image_ref, injected_files, new_pass, orig_sys_metadata, bdms, recreate=False, on_shared_storage=False, preserve_ephemeral=False, host=None, request_spec=None, reimage_boot_volume=False, target_state=None): self.conductor_compute_rpcapi.rebuild_instance(context, instance=instance, new_pass=new_pass, injected_files=injected_files, image_ref=image_ref, orig_image_ref=orig_image_ref, orig_sys_metadata=orig_sys_metadata, bdms=bdms, recreate=recreate, on_shared_storage=on_shared_storage, preserve_ephemeral=preserve_ephemeral, host=host, request_spec=request_spec, reimage_boot_volume=reimage_boot_volume, target_state=target_state) def cache_images(self, context, aggregate, image_ids): """Request images be pre-cached on hosts within an aggregate. :param context: The RequestContext :param aggregate: The objects.Aggregate representing the hosts to contact :param image_ids: A list of image ID strings to send to the hosts """ for image_id in image_ids: # Validate that we can get the image by id before we go # ask a bunch of hosts to do the same. We let this bubble # up to the API, which catches NovaException for the 4xx and # otherwise 500s if this fails in some unexpected way. self.image_api.get(context, image_id) self.conductor_compute_rpcapi.cache_images(context, aggregate, image_ids) def confirm_snapshot_based_resize( self, ctxt, instance, migration, do_cast=True): self.conductor_compute_rpcapi.confirm_snapshot_based_resize( ctxt, instance, migration, do_cast=do_cast) def revert_snapshot_based_resize( self, ctxt, instance, migration): self.conductor_compute_rpcapi.revert_snapshot_based_resize( ctxt, instance, migration) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conductor/manager.py0000664000175000017500000032664200000000000017526 0ustar00zuulzuul00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Handles database requests from other nova services.""" import collections import contextlib import copy import functools import sys import typing as ty import futurist from keystoneauth1 import exceptions as ks_exc from oslo_config import cfg from oslo_db import exception as db_exc from oslo_limit import exception as limit_exceptions from oslo_log import log as logging import oslo_messaging as messaging from oslo_serialization import jsonutils from oslo_utils import excutils from oslo_utils import timeutils from oslo_utils import versionutils from nova.accelerator import cyborg from nova import availability_zones from nova.compute import instance_actions from nova.compute import rpcapi as compute_rpcapi from nova.compute import task_states from nova.compute import utils as compute_utils from nova.compute.utils import wrap_instance_event from nova.compute import vm_states from nova.conductor.tasks import cross_cell_migrate from nova.conductor.tasks import live_migrate from nova.conductor.tasks import migrate from nova import context as nova_context from nova import exception from nova.i18n import _ from nova.image import glance from nova.limit import placement as placement_limits from nova.limit import utils as limit_utils from nova import manager from nova.network import neutron from nova import notifications from nova import objects from nova.objects import base as nova_object from nova.objects import fields from nova import profiler from nova import rpc from nova.scheduler.client import query from nova.scheduler.client import report from nova.scheduler import utils as scheduler_utils from nova import servicegroup from nova import utils from nova.volume import cinder LOG = logging.getLogger(__name__) CONF = cfg.CONF def targets_cell(fn): """Wrap a method and automatically target the instance's cell. This decorates a method with signature func(self, context, instance, ...) and automatically targets the context with the instance's cell mapping. It does this by looking up the InstanceMapping. """ @functools.wraps(fn) def wrapper(self, context, *args, **kwargs): instance = kwargs.get('instance') or args[0] try: im = objects.InstanceMapping.get_by_instance_uuid( context, instance.uuid) except exception.InstanceMappingNotFound: LOG.error('InstanceMapping not found, unable to target cell', instance=instance) except db_exc.CantStartEngineError: # Check to see if we can ignore API DB connection failures # because we might already be in the cell conductor. with excutils.save_and_reraise_exception() as err_ctxt: if CONF.api_database.connection is None: err_ctxt.reraise = False else: LOG.debug('Targeting cell %(cell)s for conductor method %(meth)s', {'cell': im.cell_mapping.identity, 'meth': fn.__name__}) # NOTE(danms): Target our context to the cell for the rest of # this request, so that none of the subsequent code needs to # care about it. nova_context.set_target_cell(context, im.cell_mapping) return fn(self, context, *args, **kwargs) return wrapper class ConductorManager(manager.Manager): """Mission: Conduct things. The methods in the base API for nova-conductor are various proxy operations performed on behalf of the nova-compute service running on compute nodes. Compute nodes are not allowed to directly access the database, so this set of methods allows them to get specific work done without locally accessing the database. The nova-conductor service also exposes an API in the 'compute_task' namespace. See the ComputeTaskManager class for details. """ target = messaging.Target(version='3.0') def __init__(self, *args, **kwargs): super(ConductorManager, self).__init__(service_name='conductor', *args, **kwargs) self.compute_task_mgr = ComputeTaskManager() self.additional_endpoints.append(self.compute_task_mgr) # NOTE(hanlind): This can be removed in version 4.0 of the RPC API def provider_fw_rule_get_all(self, context): # NOTE(hanlind): Simulate an empty db result for compat reasons. return [] def _object_dispatch(self, target, method, args, kwargs): """Dispatch a call to an object method. This ensures that object methods get called and any exception that is raised gets wrapped in an ExpectedException for forwarding back to the caller (without spamming the conductor logs). """ try: # NOTE(danms): Keep the getattr inside the try block since # a missing method is really a client problem return getattr(target, method)(*args, **kwargs) except Exception: raise messaging.ExpectedException() def object_class_action_versions(self, context, objname, objmethod, object_versions, args, kwargs): objclass = nova_object.NovaObject.obj_class_from_name( objname, object_versions[objname]) args = tuple([context] + list(args)) result = self._object_dispatch(objclass, objmethod, args, kwargs) # NOTE(danms): The RPC layer will convert to primitives for us, # but in this case, we need to honor the version the client is # asking for, so we do it before returning here. # NOTE(hanlind): Do not convert older than requested objects, # see bug #1596119. if isinstance(result, nova_object.NovaObject): target_version = object_versions[objname] requested_version = versionutils.convert_version_to_tuple( target_version) actual_version = versionutils.convert_version_to_tuple( result.VERSION) do_backport = requested_version < actual_version other_major_version = requested_version[0] != actual_version[0] if do_backport or other_major_version: result = result.obj_to_primitive( target_version=target_version, version_manifest=object_versions) return result def object_action(self, context, objinst, objmethod, args, kwargs): """Perform an action on an object.""" oldobj = objinst.obj_clone() result = self._object_dispatch(objinst, objmethod, args, kwargs) updates = dict() # NOTE(danms): Diff the object with the one passed to us and # generate a list of changes to forward back for name, field in objinst.fields.items(): if not objinst.obj_attr_is_set(name): # Avoid demand-loading anything continue if (not oldobj.obj_attr_is_set(name) or getattr(oldobj, name) != getattr(objinst, name)): updates[name] = field.to_primitive(objinst, name, getattr(objinst, name)) # This is safe since a field named this would conflict with the # method anyway updates['obj_what_changed'] = objinst.obj_what_changed() return updates, result def object_backport_versions(self, context, objinst, object_versions): target = object_versions[objinst.obj_name()] LOG.debug('Backporting %(obj)s to %(ver)s with versions %(manifest)s', {'obj': objinst.obj_name(), 'ver': target, 'manifest': ','.join( ['%s=%s' % (name, ver) for name, ver in object_versions.items()])}) return objinst.obj_to_primitive(target_version=target, version_manifest=object_versions) def reset(self): objects.Service.clear_min_version_cache() @contextlib.contextmanager def try_target_cell(context, cell): """If cell is not None call func with context.target_cell. This is a method to help during the transition period. Currently various mappings may not exist if a deployment has not migrated to cellsv2. If there is no mapping call the func as normal, otherwise call it in a target_cell context. """ if cell: with nova_context.target_cell(context, cell) as cell_context: yield cell_context else: yield context @contextlib.contextmanager def obj_target_cell(obj, cell): """Run with object's context set to a specific cell""" with try_target_cell(obj._context, cell) as target: with obj.obj_alternate_context(target): yield target @profiler.trace_cls("rpc") class ComputeTaskManager: """Namespace for compute methods. This class presents an rpc API for nova-conductor under the 'compute_task' namespace. The methods here are compute operations that are invoked by the API service. These methods see the operation to completion, which may involve coordinating activities on multiple compute nodes. """ target = messaging.Target(namespace='compute_task', version='1.25') def __init__(self): self.compute_rpcapi = compute_rpcapi.ComputeAPI() self.volume_api = cinder.API() self.image_api = glance.API() self.network_api = neutron.API() self.servicegroup_api = servicegroup.API() self.query_client = query.SchedulerQueryClient() self.notifier = rpc.get_notifier('compute') # Help us to record host in EventReporter self.host = CONF.host try: # Test our placement client during initialization self.report_client except (ks_exc.EndpointNotFound, ks_exc.DiscoveryFailure, ks_exc.RequestTimeout, ks_exc.GatewayTimeout, ks_exc.ConnectFailure) as e: # Non-fatal, likely transient (although not definitely); # continue startup but log the warning so that when things # fail later, it will be clear why we can not do certain # things. LOG.warning('Unable to initialize placement client (%s); ' 'Continuing with startup, but some operations ' 'will not be possible.', e) except (ks_exc.MissingAuthPlugin, ks_exc.Unauthorized) as e: # This is almost definitely fatal mis-configuration. The # Unauthorized error might be transient, but it is # probably reasonable to consider it fatal. LOG.error('Fatal error initializing placement client; ' 'config is incorrect or incomplete: %s', e) raise except Exception as e: # Unknown/unexpected errors here are fatal LOG.error('Fatal error initializing placement client: %s', e) raise @property def report_client(self): return report.report_client_singleton() def reset(self): LOG.info('Reloading compute RPC API') compute_rpcapi.LAST_VERSION = None self.compute_rpcapi = compute_rpcapi.ComputeAPI() # TODO(tdurakov): remove `live` parameter here on compute task api RPC # version bump to 2.x # TODO(danms): remove the `reservations` parameter here on compute task api # RPC version bump to 2.x @messaging.expected_exceptions( exception.NoValidHost, exception.ComputeServiceUnavailable, exception.ComputeHostNotFound, exception.InvalidHypervisorType, exception.InvalidCPUInfo, exception.UnableToMigrateToSelf, exception.DestinationHypervisorTooOld, exception.InvalidLocalStorage, exception.InvalidSharedStorage, exception.HypervisorUnavailable, exception.InstanceInvalidState, exception.MigrationPreCheckError, exception.UnsupportedPolicyException) @targets_cell @wrap_instance_event(prefix='conductor') def migrate_server(self, context, instance, scheduler_hint, live, rebuild, flavor, block_migration, disk_over_commit, reservations=None, clean_shutdown=True, request_spec=None, host_list=None): if instance and not isinstance(instance, nova_object.NovaObject): # NOTE(danms): Until v2 of the RPC API, we need to tolerate # old-world instance objects here attrs = ['metadata', 'system_metadata', 'info_cache', 'security_groups'] instance = objects.Instance._from_db_object( context, objects.Instance(), instance, expected_attrs=attrs) # NOTE: Remove this when we drop support for v1 of the RPC API if flavor and not isinstance(flavor, objects.Flavor): # Code downstream may expect extra_specs to be populated since it # is receiving an object, so lookup the flavor to ensure this. flavor = objects.Flavor.get_by_id(context, flavor['id']) if live and not rebuild and not flavor: self._live_migrate(context, instance, scheduler_hint, block_migration, disk_over_commit, request_spec) elif not live and not rebuild and flavor: instance_uuid = instance.uuid with compute_utils.EventReporter(context, 'cold_migrate', self.host, instance_uuid): self._cold_migrate(context, instance, flavor, scheduler_hint['filter_properties'], clean_shutdown, request_spec, host_list) else: raise NotImplementedError() @staticmethod def _get_request_spec_for_cold_migrate(context, instance, flavor, filter_properties, request_spec): # NOTE(sbauza): If a reschedule occurs when prep_resize(), then # it only provides filter_properties legacy dict back to the # conductor with no RequestSpec part of the payload for =6.0. request_spec = objects.RequestSpec.from_primitives( context, request_spec, filter_properties) # We don't have to set the new flavor on the request spec because # if we got here it was due to a reschedule from the compute and # the request spec would already have the new flavor in it from the # else block below. else: # NOTE(sbauza): Resizes means new flavor, so we need to update the # original RequestSpec object for make sure the scheduler verifies # the right one and not the original flavor request_spec.flavor = flavor return request_spec def _cold_migrate(self, context, instance, flavor, filter_properties, clean_shutdown, request_spec, host_list): request_spec = self._get_request_spec_for_cold_migrate( context, instance, flavor, filter_properties, request_spec) task = self._build_cold_migrate_task(context, instance, flavor, request_spec, clean_shutdown, host_list) try: task.execute() except exception.NoValidHost as ex: vm_state = instance.vm_state if not vm_state: vm_state = vm_states.ACTIVE updates = {'vm_state': vm_state, 'task_state': None} self._set_vm_state_and_notify(context, instance.uuid, 'migrate_server', updates, ex, request_spec) # if the flavor IDs match, it's migrate; otherwise resize if flavor.id == instance.instance_type_id: msg = _("No valid host found for cold migrate") else: msg = _("No valid host found for resize") raise exception.NoValidHost(reason=msg) except exception.UnsupportedPolicyException as ex: with excutils.save_and_reraise_exception(): vm_state = instance.vm_state if not vm_state: vm_state = vm_states.ACTIVE updates = {'vm_state': vm_state, 'task_state': None} self._set_vm_state_and_notify(context, instance.uuid, 'migrate_server', updates, ex, request_spec) except Exception as ex: with excutils.save_and_reraise_exception(): # Refresh the instance so we don't overwrite vm_state changes # set after we executed the task. try: instance.refresh() # Passing vm_state is kind of silly but it's expected in # set_vm_state_and_notify. updates = {'vm_state': instance.vm_state, 'task_state': None} self._set_vm_state_and_notify(context, instance.uuid, 'migrate_server', updates, ex, request_spec) except exception.InstanceNotFound: # We can't send the notification because the instance is # gone so just log it. LOG.info('During %s the instance was deleted.', 'resize' if instance.instance_type_id != flavor.id else 'cold migrate', instance=instance) # NOTE(sbauza): Make sure we persist the new flavor in case we had # a successful scheduler call if and only if nothing bad happened if request_spec.obj_what_changed(): request_spec.save() def _set_vm_state_and_notify(self, context, instance_uuid, method, updates, ex, request_spec): scheduler_utils.set_vm_state_and_notify( context, instance_uuid, 'compute_task', method, updates, ex, request_spec) def _cleanup_allocated_networks( self, context, instance, requested_networks): try: # If we were told not to allocate networks let's save ourselves # the trouble of calling the network API. if not (requested_networks and requested_networks.no_allocate): self.network_api.deallocate_for_instance( context, instance, requested_networks=requested_networks) except Exception: LOG.exception('Failed to deallocate networks', instance=instance) return instance.system_metadata['network_allocated'] = 'False' try: instance.save() except exception.InstanceNotFound: # NOTE: It's possible that we're cleaning up the networks # because the instance was deleted. If that's the case then this # exception will be raised by instance.save() pass @targets_cell @wrap_instance_event(prefix='conductor') def live_migrate_instance(self, context, instance, scheduler_hint, block_migration, disk_over_commit, request_spec): self._live_migrate(context, instance, scheduler_hint, block_migration, disk_over_commit, request_spec) def _live_migrate(self, context, instance, scheduler_hint, block_migration, disk_over_commit, request_spec): destination = scheduler_hint.get("host") def _set_vm_state(context, instance, ex, vm_state=None, task_state=None): request_spec = {'instance_properties': { 'uuid': instance.uuid, }, } scheduler_utils.set_vm_state_and_notify(context, instance.uuid, 'compute_task', 'migrate_server', dict(vm_state=vm_state, task_state=task_state, expected_task_state=task_states.MIGRATING,), ex, request_spec) migration = objects.Migration(context=context.elevated()) migration.dest_compute = destination migration.status = 'accepted' migration.instance_uuid = instance.uuid migration.source_compute = instance.host migration.migration_type = fields.MigrationType.LIVE_MIGRATION if instance.obj_attr_is_set('flavor'): migration.old_instance_type_id = instance.flavor.id migration.new_instance_type_id = instance.flavor.id else: migration.old_instance_type_id = instance.instance_type_id migration.new_instance_type_id = instance.instance_type_id migration.create() task = self._build_live_migrate_task(context, instance, destination, block_migration, disk_over_commit, migration, request_spec) try: task.execute() except (exception.NoValidHost, exception.ComputeHostNotFound, exception.ComputeServiceUnavailable, exception.InvalidHypervisorType, exception.InvalidCPUInfo, exception.UnableToMigrateToSelf, exception.DestinationHypervisorTooOld, exception.InvalidLocalStorage, exception.InvalidSharedStorage, exception.HypervisorUnavailable, exception.InstanceInvalidState, exception.MigrationPreCheckError, exception.MigrationSchedulerRPCError) as ex: with excutils.save_and_reraise_exception(): _set_vm_state(context, instance, ex, instance.vm_state) migration.status = 'error' migration.save() except Exception as ex: LOG.error('Migration of instance %(instance_id)s to host' ' %(dest)s unexpectedly failed.', {'instance_id': instance.uuid, 'dest': destination}, exc_info=True) # Reset the task state to None to indicate completion of # the operation as it is done in case of known exceptions. _set_vm_state(context, instance, ex, vm_states.ERROR, task_state=None) migration.status = 'error' migration.save() raise exception.MigrationError(reason=str(ex)) def _build_live_migrate_task(self, context, instance, destination, block_migration, disk_over_commit, migration, request_spec=None): return live_migrate.LiveMigrationTask(context, instance, destination, block_migration, disk_over_commit, migration, self.compute_rpcapi, self.servicegroup_api, self.query_client, self.report_client, request_spec) def _build_cold_migrate_task(self, context, instance, flavor, request_spec, clean_shutdown, host_list): return migrate.MigrationTask(context, instance, flavor, request_spec, clean_shutdown, self.compute_rpcapi, self.query_client, self.report_client, host_list, self.network_api) def _destroy_build_request(self, context, instance): # The BuildRequest needs to be stored until the instance is mapped to # an instance table. At that point it will never be used again and # should be deleted. build_request = objects.BuildRequest.get_by_instance_uuid( context, instance.uuid) # TODO(alaski): Sync API updates of the build_request to the # instance before it is destroyed. Right now only locked_by can # be updated before this is destroyed. build_request.destroy() def _populate_instance_mapping(self, context, instance, host): try: inst_mapping = objects.InstanceMapping.get_by_instance_uuid( context, instance.uuid) except exception.InstanceMappingNotFound: # NOTE(alaski): If nova-api is up to date this exception should # never be hit. But during an upgrade it's possible that an old # nova-api didn't create an instance_mapping during this boot # request. LOG.debug('Instance was not mapped to a cell, likely due ' 'to an older nova-api service running.', instance=instance) return None else: try: host_mapping = objects.HostMapping.get_by_host(context, host.service_host) except exception.HostMappingNotFound: # NOTE(alaski): For now this exception means that a # deployment has not migrated to cellsv2 and we should # remove the instance_mapping that has been created. # Eventually this will indicate a failure to properly map a # host to a cell and we may want to reschedule. inst_mapping.destroy() return None else: inst_mapping.cell_mapping = host_mapping.cell_mapping inst_mapping.save() return inst_mapping def _validate_existing_attachment_ids(self, context, instance, bdms): """Ensure any attachment ids referenced by the bdms exist. New attachments will only be created if the attachment ids referenced by the bdms no longer exist. This can happen when an instance is rescheduled after a failure to spawn as cleanup code on the previous host will delete attachments before rescheduling. """ for bdm in bdms: if bdm.is_volume and bdm.attachment_id: try: self.volume_api.attachment_get(context, bdm.attachment_id) except exception.VolumeAttachmentNotFound: attachment = self.volume_api.attachment_create( context, bdm.volume_id, instance.uuid) bdm.attachment_id = attachment['id'] bdm.save() def _cleanup_when_reschedule_fails( self, context, instance, exception, legacy_request_spec, requested_networks): """Set the instance state and clean up. It is only used in case build_instance fails while rescheduling the instance """ updates = {'vm_state': vm_states.ERROR, 'task_state': None} self._set_vm_state_and_notify( context, instance.uuid, 'build_instances', updates, exception, legacy_request_spec) self._cleanup_allocated_networks( context, instance, requested_networks) arq_uuids = None # arqs have not bound to port/instance yet if requested_networks: arq_uuids = [req.arq_uuid for req in requested_networks if req.arq_uuid] compute_utils.delete_arqs_if_needed(context, instance, arq_uuids) # NOTE(danms): This is never cell-targeted because it is only used for # n-cpu reschedules which go to the cell conductor and thus are always # cell-specific. def build_instances(self, context, instances, image, filter_properties, admin_password, injected_files, requested_networks, security_groups, block_device_mapping=None, legacy_bdm=True, request_spec=None, host_lists=None): # TODO(ndipanov): Remove block_device_mapping and legacy_bdm in version # 2.0 of the RPC API. # TODO(danms): Remove this in version 2.0 of the RPC API if (requested_networks and not isinstance(requested_networks, objects.NetworkRequestList)): requested_networks = objects.NetworkRequestList.from_tuples( requested_networks) # TODO(melwitt): Remove this in version 2.0 of the RPC API flavor = filter_properties.get('instance_type') if flavor and not isinstance(flavor, objects.Flavor): # Code downstream may expect extra_specs to be populated since it # is receiving an object, so lookup the flavor to ensure this. flavor = objects.Flavor.get_by_id(context, flavor['id']) filter_properties = dict(filter_properties, instance_type=flavor) # Older computes will not send a request_spec during reschedules so we # need to check and build our own if one is not provided. if request_spec is None: legacy_request_spec = scheduler_utils.build_request_spec( image, instances) else: # TODO(mriedem): This is annoying but to populate the local # request spec below using the filter_properties, we have to pass # in a primitive version of the request spec. Yes it's inefficient # and we can remove it once the populate_retry and # populate_filter_properties utility methods are converted to # work on a RequestSpec object rather than filter_properties. # NOTE(gibi): we have to keep a reference to the original # RequestSpec object passed to this function as we lose information # during the below legacy conversion legacy_request_spec = request_spec.to_legacy_request_spec_dict() # 'host_lists' will be None during a reschedule from a pre-Queens # compute. In all other cases, it will be a list of lists, though the # lists may be empty if there are no more hosts left in a rescheduling # situation. is_reschedule = host_lists is not None try: # check retry policy. Rather ugly use of instances[0]... # but if we've exceeded max retries... then we really only # have a single instance. # TODO(sbauza): Provide directly the RequestSpec object # when populate_retry() accepts it scheduler_utils.populate_retry( filter_properties, instances[0].uuid) instance_uuids = [instance.uuid for instance in instances] spec_obj = objects.RequestSpec.from_primitives( context, legacy_request_spec, filter_properties) LOG.debug("Rescheduling: %s", is_reschedule) if is_reschedule: # Make sure that we have a host, as we may have exhausted all # our alternates if not host_lists[0]: # We have an empty list of hosts, so this instance has # failed to build. msg = ("Exhausted all hosts available for retrying build " "failures for instance %(instance_uuid)s." % {"instance_uuid": instances[0].uuid}) raise exception.MaxRetriesExceeded(reason=msg) else: # This is not a reschedule, so we need to call the scheduler to # get appropriate hosts for the request. # NOTE(gibi): We only call the scheduler if we are rescheduling # from a really old compute. In that case we do not support # externally-defined resource requests, like port QoS. So no # requested_resources are set on the RequestSpec here. host_lists = self._schedule_instances(context, spec_obj, instance_uuids, return_alternates=True) except Exception as exc: # NOTE(mriedem): If we're rescheduling from a failed build on a # compute, "retry" will be set and num_attempts will be >1 because # populate_retry above will increment it. If the server build was # forced onto a host/node or [scheduler]/max_attempts=1, "retry" # won't be in filter_properties and we won't get here because # nova-compute will just abort the build since reschedules are # disabled in those cases. num_attempts = filter_properties.get( 'retry', {}).get('num_attempts', 1) for instance in instances: # If num_attempts > 1, we're in a reschedule and probably # either hit NoValidHost or MaxRetriesExceeded. Either way, # the build request should already be gone and we probably # can't reach the API DB from the cell conductor. if num_attempts <= 1: try: # If the BuildRequest stays around then instance # show/lists will pull from it rather than the errored # instance. self._destroy_build_request(context, instance) except exception.BuildRequestNotFound: pass self._cleanup_when_reschedule_fails( context, instance, exc, legacy_request_spec, requested_networks) return elevated = context.elevated() for (instance, host_list) in zip(instances, host_lists): host = host_list.pop(0) if is_reschedule: # If this runs in the superconductor, the first instance will # already have its resources claimed in placement. If this is a # retry, though, this is running in the cell conductor, and we # need to claim first to ensure that the alternate host still # has its resources available. Note that there are schedulers # that don't support Placement, so must assume that the host is # still available. host_available = False while host and not host_available: if host.allocation_request: alloc_req = jsonutils.loads(host.allocation_request) else: alloc_req = None if alloc_req: try: host_available = scheduler_utils.claim_resources( elevated, self.report_client, spec_obj, instance.uuid, alloc_req, host.allocation_request_version) if request_spec and host_available: # NOTE(gibi): redo the request group - resource # provider mapping as the above claim call # moves the allocation of the instance to # another host scheduler_utils.fill_provider_mapping( request_spec, host) except Exception as exc: self._cleanup_when_reschedule_fails( context, instance, exc, legacy_request_spec, requested_networks) return else: # Some deployments use different schedulers that do not # use Placement, so they will not have an # allocation_request to claim with. For those cases, # there is no concept of claiming, so just assume that # the host is valid. host_available = True if not host_available: # Insufficient resources remain on that host, so # discard it and try the next. host = host_list.pop(0) if host_list else None if not host_available: # No more available hosts for retrying the build. msg = ("Exhausted all hosts available for retrying build " "failures for instance %(instance_uuid)s." % {"instance_uuid": instance.uuid}) exc = exception.MaxRetriesExceeded(reason=msg) self._cleanup_when_reschedule_fails( context, instance, exc, legacy_request_spec, requested_networks) return # The availability_zone field was added in v1.1 of the Selection # object so make sure to handle the case where it is missing. if 'availability_zone' in host: instance.availability_zone = host.availability_zone else: try: instance.availability_zone = ( availability_zones.get_host_availability_zone(context, host.service_host)) except Exception as exc: # Put the instance into ERROR state, set task_state to # None, inject a fault, etc. self._cleanup_when_reschedule_fails( context, instance, exc, legacy_request_spec, requested_networks) continue try: # NOTE(danms): This saves the az change above, refreshes our # instance, and tells us if it has been deleted underneath us instance.save() except (exception.InstanceNotFound, exception.InstanceInfoCacheNotFound): LOG.debug('Instance deleted during build', instance=instance) continue local_filter_props = copy.deepcopy(filter_properties) scheduler_utils.populate_filter_properties(local_filter_props, host) # Populate the request_spec with the local_filter_props information # like retries and limits. Note that at this point the request_spec # could have come from a compute via reschedule and it would # already have some things set, like scheduler_hints. local_reqspec = objects.RequestSpec.from_primitives( context, legacy_request_spec, local_filter_props) # NOTE(gibi): at this point the request spec already got converted # to a legacy dict and then back to an object so we lost the non # legacy part of the spec. Re-populate the requested_resources # field based on the original request spec object passed to this # function. if request_spec: local_reqspec.requested_resources = ( request_spec.requested_resources) # The block_device_mapping passed from the api doesn't contain # instance specific information bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) # This is populated in scheduler_utils.populate_retry num_attempts = local_filter_props.get('retry', {}).get('num_attempts', 1) if num_attempts <= 1: # If this is a reschedule the instance is already mapped to # this cell and the BuildRequest is already deleted so ignore # the logic below. inst_mapping = self._populate_instance_mapping(context, instance, host) try: self._destroy_build_request(context, instance) except exception.BuildRequestNotFound: # This indicates an instance delete has been requested in # the API. Stop the build, cleanup the instance_mapping and # potentially the block_device_mappings # TODO(alaski): Handle block_device_mapping cleanup if inst_mapping: inst_mapping.destroy() return else: # NOTE(lyarwood): If this is a reschedule then recreate any # attachments that were previously removed when cleaning up # after failures to spawn etc. self._validate_existing_attachment_ids(context, instance, bdms) alts = [(alt.service_host, alt.nodename) for alt in host_list] LOG.debug("Selected host: %s; Selected node: %s; Alternates: %s", host.service_host, host.nodename, alts, instance=instance) try: accel_uuids = self._create_and_bind_arq_for_instance( context, instance, host.nodename, local_reqspec, requested_networks) except Exception as exc: LOG.exception('Failed to reschedule. Reason: %s', exc) self._cleanup_when_reschedule_fails( context, instance, exc, legacy_request_spec, requested_networks) continue self.compute_rpcapi.build_and_run_instance(context, instance=instance, host=host.service_host, image=image, request_spec=local_reqspec, filter_properties=local_filter_props, admin_password=admin_password, injected_files=injected_files, requested_networks=requested_networks, security_groups=security_groups, block_device_mapping=bdms, node=host.nodename, limits=host.limits, host_list=host_list, accel_uuids=accel_uuids) def _create_and_bind_arq_for_instance( self, context, instance, hostname, request_spec, requested_networks=None): try: resource_provider_mapping = ( request_spec.get_request_group_mapping()) # Using nodename instead of hostname. See: # http://lists.openstack.org/pipermail/openstack-discuss/2019-November/011044.html # noqa cyclient = cyborg.get_client(context) bindings = {} port_bindings = {} # Create ARQs comes from extra specs. bindings = self._create_and_bind_arqs( cyclient, instance.uuid, instance.flavor.extra_specs, hostname, resource_provider_mapping) if requested_networks: # Create ARQs comes from port device profile port_bindings = self._create_arqs_for_ports( cyclient, instance.uuid, requested_networks, hostname, resource_provider_mapping) # Initiate Cyborg binding asynchronously bindings.update(port_bindings) if bindings: cyclient.bind_arqs(bindings) return list(bindings.keys()) except exception.AcceleratorRequestBindingFailed as exc: # If anything failed here we need to cleanup and bail out. cyclient = cyborg.get_client(context) cyclient.delete_arqs_by_uuid(exc.arqs) raise def _schedule_instances(self, context, request_spec, instance_uuids=None, return_alternates=False): scheduler_utils.setup_instance_group(context, request_spec) with timeutils.StopWatch() as timer: host_lists = self.query_client.select_destinations( context, request_spec, instance_uuids, return_objects=True, return_alternates=return_alternates) LOG.debug('Took %0.2f seconds to select destinations for %s ' 'instance(s).', timer.elapsed(), len(instance_uuids)) return host_lists @staticmethod def _restrict_request_spec_to_cell(context, instance, request_spec): """Sets RequestSpec.requested_destination.cell for the move operation Move operations, e.g. evacuate and unshelve, must be restricted to the cell in which the instance already exists, so this method is used to target the RequestSpec, which is sent to the scheduler via the _schedule_instances method, to the instance's current cell. :param context: nova auth RequestContext """ instance_mapping = \ objects.InstanceMapping.get_by_instance_uuid( context, instance.uuid) LOG.debug('Requesting cell %(cell)s during scheduling', {'cell': instance_mapping.cell_mapping.identity}, instance=instance) if ('requested_destination' in request_spec and request_spec.requested_destination): request_spec.requested_destination.cell = ( instance_mapping.cell_mapping) else: request_spec.requested_destination = ( objects.Destination( cell=instance_mapping.cell_mapping)) def _recheck_quota( self, context: nova_context.RequestContext, flavor: 'objects.Flavor', request_spec: 'objects.RequestSpec', orig_num_req: int, project_id: ty.Optional[str] = None, user_id: ty.Optional[str] = None ) -> None: # A quota "recheck" is a quota check that is performed *after* quota # limited resources are consumed. It is meant to address race # conditions where a request that was not over quota at the beginning # of the request before resources are allocated becomes over quota # after resources (like database rows or placement allocations) are # created. An example of this would be a large number of requests for # the same resource for the same project sent simultaneously. if CONF.quota.recheck_quota: # The orig_num_req is the number of instances requested, which is # the delta that was quota checked before resources were allocated. # This is only used for the exception message is the recheck fails # for lack of enough quota. compute_utils.check_num_instances_quota( context, flavor, 0, 0, project_id=project_id, user_id=user_id, orig_num_req=orig_num_req) placement_limits.enforce_num_instances_and_flavor( context, project_id, flavor, request_spec.is_bfv, 0, 0) # TODO(mriedem): Make request_spec required in ComputeTaskAPI RPC v2.0. @targets_cell def unshelve_instance(self, context, instance, request_spec=None): sys_meta = instance.system_metadata def safe_image_show(ctx, image_id): if image_id: return self.image_api.get(ctx, image_id, show_deleted=False) else: raise exception.ImageNotFound(image_id='') if instance.vm_state == vm_states.SHELVED: instance.task_state = task_states.POWERING_ON instance.save(expected_task_state=task_states.UNSHELVING) self.compute_rpcapi.start_instance(context, instance) elif instance.vm_state == vm_states.SHELVED_OFFLOADED: image = None image_id = sys_meta.get('shelved_image_id') # No need to check for image if image_id is None as # "shelved_image_id" key is not set for volume backed # instance during the shelve process if image_id: with compute_utils.EventReporter( context, 'get_image_info', self.host, instance.uuid): try: image = safe_image_show(context, image_id) except exception.ImageNotFound as error: instance.vm_state = vm_states.ERROR instance.save() reason = _('Unshelve attempted but the image %s ' 'cannot be found.') % image_id LOG.error(reason, instance=instance) compute_utils.add_instance_fault_from_exc( context, instance, error, sys.exc_info(), fault_message=reason) raise exception.UnshelveException( instance_id=instance.uuid, reason=reason) try: with compute_utils.EventReporter(context, 'schedule_instances', self.host, instance.uuid): # NOTE(sbauza): Force_hosts/nodes needs to be reset # if we want to make sure that the next destination # is not forced to be the original host request_spec.reset_forced_destinations() # TODO(sbauza): Provide directly the RequestSpec object # when populate_filter_properties accepts it filter_properties = request_spec.\ to_legacy_filter_properties_dict() res_req, req_lvl_params = ( self.network_api.get_requested_resource_for_instance( context, instance.uuid) ) extra_specs = request_spec.flavor.extra_specs device_profile = extra_specs.get('accel:device_profile') res_req.extend( cyborg.get_device_profile_request_groups( context, device_profile) if device_profile else []) # NOTE(gibi): When other modules want to handle similar # non-nova resources then here we have to collect all # the external resource requests in a single list and # add them to the RequestSpec. request_spec.requested_resources = res_req request_spec.request_level_params = req_lvl_params # NOTE(gibi): as PCI devices is tracked in placement we # need to generate request groups from InstancePCIRequests. # This will append new RequestGroup objects to the # request_spec.requested_resources list if needed request_spec.generate_request_groups_from_pci_requests() # NOTE(cfriesen): Ensure that we restrict the scheduler to # the cell specified by the instance mapping. self._restrict_request_spec_to_cell( context, instance, request_spec) request_spec.ensure_project_and_user_id(instance) request_spec.ensure_network_information(instance) compute_utils.heal_reqspec_is_bfv( context, request_spec, instance) host_lists = self._schedule_instances(context, request_spec, [instance.uuid], return_alternates=False) # NOTE(melwitt): We recheck the quota after allocating the # resources in placement, to prevent users from allocating # more resources than their allowed quota in the event of a # race. This is configurable because it can be expensive if # strict quota limits are not required in a deployment. try: # Quota should only be checked for unshelve only if # resources are being counted in placement. Legacy # quotas continue to consume resources while # SHELVED_OFFLOADED and will not allocate any new # resources during unshelve. if (CONF.quota.count_usage_from_placement or limit_utils.use_unified_limits()): self._recheck_quota( context, instance.flavor, request_spec, 0, project_id=instance.project_id, user_id=instance.user_id) except (exception.TooManyInstances, limit_exceptions.ProjectOverLimit): with excutils.save_and_reraise_exception(): self.report_client.delete_allocation_for_instance( context, instance.uuid, force=True) host_list = host_lists[0] selection = host_list[0] scheduler_utils.populate_filter_properties( filter_properties, selection) (host, node) = (selection.service_host, selection.nodename) LOG.debug( "Scheduler selected host: %s, node:%s", host, node, instance=instance ) instance.availability_zone = ( availability_zones.get_host_availability_zone( context, host)) scheduler_utils.fill_provider_mapping( request_spec, selection) # NOTE(brinzhang): For unshelve operation we should # re-create-and-bound the arqs for the instance. accel_uuids = self._create_and_bind_arq_for_instance( context, instance, node, request_spec) self.compute_rpcapi.unshelve_instance( context, instance, host, request_spec, image=image, filter_properties=filter_properties, node=node, accel_uuids=accel_uuids) except (exception.NoValidHost, exception.UnsupportedPolicyException): instance.task_state = None instance.save() LOG.warning("No valid host found for unshelve instance", instance=instance) return except Exception as exc: if isinstance(exc, exception.AcceleratorRequestBindingFailed): cyclient = cyborg.get_client(context) cyclient.delete_arqs_by_uuid(exc.arqs) LOG.exception('Failed to unshelve. Reason: %s', exc) with excutils.save_and_reraise_exception(): instance.task_state = None instance.save() LOG.error("Unshelve attempted but an error " "has occurred", instance=instance) else: LOG.error('Unshelve attempted but vm_state not SHELVED or ' 'SHELVED_OFFLOADED', instance=instance) instance.vm_state = vm_states.ERROR instance.save() return def _allocate_for_evacuate_dest_host(self, context, instance, host, request_spec=None): # The user is forcing the destination host and bypassing the # scheduler. We need to copy the source compute node # allocations in Placement to the destination compute node. # Normally select_destinations() in the scheduler would do this # for us, but when forcing the target host we don't call the # scheduler. source_node = None # This is used for error handling below. try: source_node = objects.ComputeNode.get_by_host_and_nodename( context, instance.host, instance.node) dest_node = ( objects.ComputeNode.get_first_node_by_host_for_old_compat( context, host, use_slave=True)) except exception.ComputeHostNotFound as ex: with excutils.save_and_reraise_exception(): self._set_vm_state_and_notify( context, instance.uuid, 'rebuild_server', {'vm_state': instance.vm_state, 'task_state': None}, ex, request_spec) if source_node: LOG.warning('Specified host %s for evacuate was not ' 'found.', host, instance=instance) else: LOG.warning('Source host %s and node %s for evacuate was ' 'not found.', instance.host, instance.node, instance=instance) try: scheduler_utils.claim_resources_on_destination( context, self.report_client, instance, source_node, dest_node) except exception.NoValidHost as ex: with excutils.save_and_reraise_exception(): self._set_vm_state_and_notify( context, instance.uuid, 'rebuild_server', {'vm_state': instance.vm_state, 'task_state': None}, ex, request_spec) LOG.warning('Specified host %s for evacuate is ' 'invalid.', host, instance=instance) # TODO(mriedem): Make request_spec required in ComputeTaskAPI RPC v2.0. @targets_cell def rebuild_instance(self, context, instance, orig_image_ref, image_ref, injected_files, new_pass, orig_sys_metadata, bdms, recreate, on_shared_storage, preserve_ephemeral=False, host=None, request_spec=None, reimage_boot_volume=False, target_state=None): # recreate=True means the instance is being evacuated from a failed # host to a new destination host. The 'recreate' variable name is # confusing, so rename it to evacuate here at the top, which is simpler # than renaming a parameter in an RPC versioned method. evacuate = recreate # NOTE(efried): It would be nice if this were two separate events, one # for 'rebuild' and one for 'evacuate', but this is part of the API # now, so it would be nontrivial to change. with compute_utils.EventReporter(context, 'rebuild_server', self.host, instance.uuid): node = limits = None try: migration = objects.Migration.get_by_instance_and_status( context, instance.uuid, 'accepted') except exception.MigrationNotFoundByStatus: LOG.debug("No migration record for the rebuild/evacuate " "request.", instance=instance) migration = None # The host variable is passed in two cases: # 1. rebuild - the instance.host is passed to rebuild on the # same host and bypass the scheduler *unless* a new image # was specified # 2. evacuate with specified host and force=True - the specified # host is passed and is meant to bypass the scheduler. # NOTE(mriedem): This could be a lot more straight-forward if we # had separate methods for rebuild and evacuate... if host: # We only create a new allocation on the specified host if # we're doing an evacuate since that is a move operation. if host != instance.host: # If a destination host is forced for evacuate, create # allocations against it in Placement. try: self._allocate_for_evacuate_dest_host( context, instance, host, request_spec) except exception.AllocationUpdateFailed as ex: with excutils.save_and_reraise_exception(): if migration: migration.status = 'error' migration.save() # NOTE(efried): It would be nice if this were two # separate events, one for 'rebuild' and one for # 'evacuate', but this is part of the API now, so # it would be nontrivial to change. self._set_vm_state_and_notify( context, instance.uuid, 'rebuild_server', {'vm_state': vm_states.ERROR, 'task_state': None}, ex, request_spec) LOG.warning('Rebuild failed: %s', str(ex), instance=instance) except exception.NoValidHost: with excutils.save_and_reraise_exception(): if migration: migration.status = 'error' migration.save() else: # At this point, the user is either: # # 1. Doing a rebuild on the same host (not evacuate) and # specified a new image. # 2. Evacuating and specified a host but are not forcing it. # # In either case, the API passes host=None but sets up the # RequestSpec.requested_destination field for the specified # host. if evacuate: # NOTE(sbauza): Augment the RequestSpec object by excluding # the source host for avoiding the scheduler to pick it request_spec.ignore_hosts = [instance.host] # NOTE(sbauza): Force_hosts/nodes needs to be reset # if we want to make sure that the next destination # is not forced to be the original host request_spec.reset_forced_destinations() res_req, req_lvl_params = ( self.network_api.get_requested_resource_for_instance( context, instance.uuid) ) extra_specs = request_spec.flavor.extra_specs device_profile = extra_specs.get('accel:device_profile') res_req.extend( cyborg.get_device_profile_request_groups( context, device_profile) if device_profile else []) # NOTE(gibi): When other modules want to handle similar # non-nova resources then here we have to collect all # the external resource requests in a single list and # add them to the RequestSpec. request_spec.requested_resources = res_req request_spec.request_level_params = req_lvl_params # NOTE(gibi): as PCI devices is tracked in placement we # need to generate request groups from InstancePCIRequests. # This will append new RequestGroup objects to the # request_spec.requested_resources list if needed request_spec.generate_request_groups_from_pci_requests() try: # if this is a rebuild of instance on the same host with # new image. if not evacuate and orig_image_ref != image_ref: self._validate_image_traits_for_rebuild(context, instance, image_ref) self._restrict_request_spec_to_cell( context, instance, request_spec) request_spec.ensure_project_and_user_id(instance) request_spec.ensure_network_information(instance) compute_utils.heal_reqspec_is_bfv( context, request_spec, instance) host_lists = self._schedule_instances(context, request_spec, [instance.uuid], return_alternates=False) host_list = host_lists[0] selection = host_list[0] host, node, limits = (selection.service_host, selection.nodename, selection.limits) if recreate: scheduler_utils.fill_provider_mapping( request_spec, selection) except (exception.NoValidHost, exception.UnsupportedPolicyException, exception.AllocationUpdateFailed, # the next two can come from fill_provider_mapping and # signals a software error. NotImplementedError, ValueError) as ex: if migration: migration.status = 'error' migration.save() # Rollback the image_ref if a new one was provided (this # only happens in the rebuild case, not evacuate). if orig_image_ref and orig_image_ref != image_ref: instance.image_ref = orig_image_ref instance.save() with excutils.save_and_reraise_exception(): # NOTE(efried): It would be nice if this were two # separate events, one for 'rebuild' and one for # 'evacuate', but this is part of the API now, so it # would be nontrivial to change. self._set_vm_state_and_notify(context, instance.uuid, 'rebuild_server', {'vm_state': vm_states.ERROR, 'task_state': None}, ex, request_spec) LOG.warning('Rebuild failed: %s', str(ex), instance=instance) compute_utils.notify_about_instance_usage( self.notifier, context, instance, "rebuild.scheduled") compute_utils.notify_about_instance_rebuild( context, instance, host, action=fields.NotificationAction.REBUILD_SCHEDULED, source=fields.NotificationSource.CONDUCTOR) instance.availability_zone = ( availability_zones.get_host_availability_zone( context, host)) accel_uuids = [] try: if instance.flavor.extra_specs.get('accel:device_profile'): cyclient = cyborg.get_client(context) if evacuate: # NOTE(brinzhang): For evacuate operation we should # delete the bound arqs, then re-create-and-bound the # arqs for the instance. cyclient.delete_arqs_for_instance(instance.uuid) accel_uuids = self._create_and_bind_arq_for_instance( context, instance, node, request_spec) else: accel_uuids = cyclient.get_arq_uuids_for_instance( instance) except Exception as exc: if isinstance(exc, exception.AcceleratorRequestBindingFailed): cyclient = cyborg.get_client(context) cyclient.delete_arqs_by_uuid(exc.arqs) LOG.exception('Failed to rebuild. Reason: %s', exc) raise exc self.compute_rpcapi.rebuild_instance( context, instance=instance, new_pass=new_pass, injected_files=injected_files, image_ref=image_ref, orig_image_ref=orig_image_ref, orig_sys_metadata=orig_sys_metadata, bdms=bdms, recreate=evacuate, on_shared_storage=on_shared_storage, preserve_ephemeral=preserve_ephemeral, migration=migration, host=host, node=node, limits=limits, request_spec=request_spec, accel_uuids=accel_uuids, reimage_boot_volume=reimage_boot_volume, target_state=target_state) def _validate_image_traits_for_rebuild(self, context, instance, image_ref): """Validates that the traits specified in the image can be satisfied by the providers of the current allocations for the instance during rebuild of the instance. If the traits cannot be satisfied, fails the action by raising a NoValidHost exception. :raises: NoValidHost exception in case the traits on the providers of the allocated resources for the instance do not match the required traits on the image. """ image_meta = objects.ImageMeta.from_image_ref( context, self.image_api, image_ref) if ('properties' not in image_meta or 'traits_required' not in image_meta.properties or not image_meta.properties.traits_required): return image_traits = set(image_meta.properties.traits_required) # check any of the image traits are forbidden in flavor traits. # if so raise an exception extra_specs = instance.flavor.extra_specs forbidden_flavor_traits = set() for key, val in extra_specs.items(): if key.startswith('trait'): # get the actual key. prefix, parsed_key = key.split(':', 1) if val == 'forbidden': forbidden_flavor_traits.add(parsed_key) forbidden_traits = image_traits & forbidden_flavor_traits if forbidden_traits: raise exception.NoValidHost( reason=_("Image traits are part of forbidden " "traits in flavor associated with the server. " "Either specify a different image during rebuild " "or create a new server with the specified image " "and a compatible flavor.")) # If image traits are present, then validate against allocations. allocations = self.report_client.get_allocations_for_consumer( context, instance.uuid) instance_rp_uuids = list(allocations) # Get provider tree for the instance. We use the uuid of the host # on which the instance is rebuilding to get the provider tree. compute_node = objects.ComputeNode.get_by_host_and_nodename( context, instance.host, instance.node) # TODO(karimull): Call with a read-only version, when available. instance_rp_tree = ( self.report_client.get_provider_tree_and_ensure_root( context, compute_node.uuid)) traits_in_instance_rps = set() for rp_uuid in instance_rp_uuids: traits_in_instance_rps.update( instance_rp_tree.data(rp_uuid).traits) missing_traits = image_traits - traits_in_instance_rps if missing_traits: raise exception.NoValidHost( reason=_("Image traits cannot be " "satisfied by the current resource providers. " "Either specify a different image during rebuild " "or create a new server with the specified image.")) # TODO(avolkov): move method to bdm @staticmethod def _volume_size(flavor, bdm): size = bdm.get('volume_size') # NOTE (ndipanov): inherit flavor size only for swap and ephemeral if (size is None and bdm.get('source_type') == 'blank' and bdm.get('destination_type') == 'local'): if bdm.get('guest_format') == 'swap': size = flavor.get('swap', 0) else: size = flavor.get('ephemeral_gb', 0) return size def _create_block_device_mapping(self, cell, flavor, instance_uuid, block_device_mapping): """Create the BlockDeviceMapping objects in the db. This method makes a copy of the list in order to avoid using the same id field in case this is called for multiple instances. """ LOG.debug("block_device_mapping %s", list(block_device_mapping), instance_uuid=instance_uuid) instance_block_device_mapping = copy.deepcopy(block_device_mapping) for bdm in instance_block_device_mapping: bdm.volume_size = self._volume_size(flavor, bdm) bdm.instance_uuid = instance_uuid with obj_target_cell(bdm, cell): bdm.update_or_create() return instance_block_device_mapping def _create_tags(self, context, instance_uuid, tags): """Create the Tags objects in the db.""" if tags: tag_list = [tag.tag for tag in tags] instance_tags = objects.TagList.create( context, instance_uuid, tag_list) return instance_tags else: return tags def _create_instance_action_for_cell0(self, context, instance, exc): """Create a failed "create" instance action for the instance in cell0. :param context: nova auth RequestContext targeted at cell0 :param instance: Instance object being buried in cell0 :param exc: Exception that occurred which resulted in burial """ # First create the action record. objects.InstanceAction.action_start( context, instance.uuid, instance_actions.CREATE, want_result=False) # Now create an event for that action record. event_name = 'conductor_schedule_and_build_instances' objects.InstanceActionEvent.event_start( context, instance.uuid, event_name, want_result=False, host=self.host) # And finish the event with the exception. Note that we expect this # method to be called from _bury_in_cell0 which is called from within # an exception handler so sys.exc_info should return values but if not # it's not the end of the world - this is best effort. objects.InstanceActionEvent.event_finish_with_failure( context, instance.uuid, event_name, exc_val=exc, exc_tb=sys.exc_info()[2], want_result=False) def _bury_in_cell0(self, context, request_spec, exc, build_requests=None, instances=None, block_device_mapping=None, tags=None): """Ensure all provided build_requests and instances end up in cell0. Cell0 is the fake cell we schedule dead instances to when we can't schedule them somewhere real. Requests that don't yet have instances will get a new instance, created in cell0. Instances that have not yet been created will be created in cell0. All build requests are destroyed after we're done. Failure to delete a build request will trigger the instance deletion, just like the happy path in schedule_and_build_instances() below. """ try: cell0 = objects.CellMapping.get_by_uuid( context, objects.CellMapping.CELL0_UUID) except exception.CellMappingNotFound: # Not yet setup for cellsv2. Instances will need to be written # to the configured database. This will become a deployment # error in Ocata. LOG.error('No cell mapping found for cell0 while ' 'trying to record scheduling failure. ' 'Setup is incomplete.') return build_requests = build_requests or [] instances = instances or [] instances_by_uuid = {inst.uuid: inst for inst in instances} for build_request in build_requests: if build_request.instance_uuid not in instances_by_uuid: # This is an instance object with no matching db entry. instance = build_request.get_new_instance(context) instances_by_uuid[instance.uuid] = instance updates = {'vm_state': vm_states.ERROR, 'task_state': None} for instance in instances_by_uuid.values(): inst_mapping = None try: # We don't need the cell0-targeted context here because the # instance mapping is in the API DB. inst_mapping = \ objects.InstanceMapping.get_by_instance_uuid( context, instance.uuid) except exception.InstanceMappingNotFound: # The API created the instance mapping record so it should # definitely be here. Log an error but continue to create the # instance in the cell0 database. LOG.error('While burying instance in cell0, no instance ' 'mapping was found.', instance=instance) # Perform a final sanity check that the instance is not mapped # to some other cell already because of maybe some crazy # clustered message queue weirdness. if inst_mapping and inst_mapping.cell_mapping is not None: LOG.error('When attempting to bury instance in cell0, the ' 'instance is already mapped to cell %s. Ignoring ' 'bury in cell0 attempt.', inst_mapping.cell_mapping.identity, instance=instance) continue with obj_target_cell(instance, cell0) as cctxt: instance.create() if inst_mapping: inst_mapping.cell_mapping = cell0 inst_mapping.save() # Record an instance action with a failed event. self._create_instance_action_for_cell0( cctxt, instance, exc) # NOTE(mnaser): In order to properly clean-up volumes after # being buried in cell0, we need to store BDMs. if block_device_mapping: self._create_block_device_mapping( cell0, instance.flavor, instance.uuid, block_device_mapping) self._create_tags(cctxt, instance.uuid, tags) # Use the context targeted to cell0 here since the instance is # now in cell0. self._set_vm_state_and_notify( cctxt, instance.uuid, 'build_instances', updates, exc, request_spec) for build_request in build_requests: try: build_request.destroy() except exception.BuildRequestNotFound: # Instance was deleted before we finished scheduling inst = instances_by_uuid[build_request.instance_uuid] with obj_target_cell(inst, cell0): inst.destroy() def schedule_and_build_instances(self, context, build_requests, request_specs, image, admin_password, injected_files, requested_networks, block_device_mapping, tags=None): # Add all the UUIDs for the instances instance_uuids = [spec.instance_uuid for spec in request_specs] try: host_lists = self._schedule_instances(context, request_specs[0], instance_uuids, return_alternates=True) except Exception as exc: LOG.exception('Failed to schedule instances') self._bury_in_cell0(context, request_specs[0], exc, build_requests=build_requests, block_device_mapping=block_device_mapping, tags=tags) return host_mapping_cache = {} cell_mapping_cache = {} instances = [] host_az = {} # host=az cache to optimize multi-create for (build_request, request_spec, host_list) in zip( build_requests, request_specs, host_lists): instance = build_request.get_new_instance(context) # host_list is a list of one or more Selection objects, the first # of which has been selected and its resources claimed. host = host_list[0] # Convert host from the scheduler into a cell record if host.service_host not in host_mapping_cache: try: host_mapping = objects.HostMapping.get_by_host( context, host.service_host) host_mapping_cache[host.service_host] = host_mapping except exception.HostMappingNotFound as exc: LOG.error('No host-to-cell mapping found for selected ' 'host %(host)s. Setup is incomplete.', {'host': host.service_host}) self._bury_in_cell0( context, request_spec, exc, build_requests=[build_request], instances=[instance], block_device_mapping=block_device_mapping, tags=tags) # This is a placeholder in case the quota recheck fails. instances.append(None) continue else: host_mapping = host_mapping_cache[host.service_host] cell = host_mapping.cell_mapping # Before we create the instance, let's make one final check that # the build request is still around and wasn't deleted by the user # already. try: objects.BuildRequest.get_by_instance_uuid( context, instance.uuid) except exception.BuildRequestNotFound: # the build request is gone so we're done for this instance LOG.debug('While scheduling instance, the build request ' 'was already deleted.', instance=instance) # This is a placeholder in case the quota recheck fails. instances.append(None) # If the build request was deleted and the instance is not # going to be created, there is on point in leaving an orphan # instance mapping so delete it. try: im = objects.InstanceMapping.get_by_instance_uuid( context, instance.uuid) im.destroy() except exception.InstanceMappingNotFound: pass self.report_client.delete_allocation_for_instance( context, instance.uuid, force=True) continue else: if host.service_host not in host_az: host_az[host.service_host] = ( availability_zones.get_host_availability_zone( context, host.service_host)) instance.availability_zone = host_az[host.service_host] with obj_target_cell(instance, cell): instance.create() instances.append(instance) cell_mapping_cache[instance.uuid] = cell # NOTE(melwitt): We recheck the quota after allocating the # resources to prevent users from allocating more resources # than their allowed quota in the event of a race. This is # configurable because it can be expensive if strict quota # limits are not required in a deployment. try: self._recheck_quota(context, instance.flavor, request_specs[0], len(build_requests), project_id=instance.project_id, user_id=instance.user_id ) except (exception.TooManyInstances, limit_exceptions.ProjectOverLimit) as exc: with excutils.save_and_reraise_exception(): self._cleanup_build_artifacts( context, exc, instances, build_requests, request_specs, block_device_mapping, tags, cell_mapping_cache) zipped = zip(build_requests, request_specs, host_lists, instances) for (build_request, request_spec, host_list, instance) in zipped: if instance is None: # Skip placeholders that were buried in cell0 or had their # build requests deleted by the user before instance create. continue cell = cell_mapping_cache[instance.uuid] # host_list is a list of one or more Selection objects, the first # of which has been selected and its resources claimed. host = host_list.pop(0) alts = [(alt.service_host, alt.nodename) for alt in host_list] LOG.debug("Selected host: %s; Selected node: %s; Alternates: %s", host.service_host, host.nodename, alts, instance=instance) filter_props = request_spec.to_legacy_filter_properties_dict() scheduler_utils.populate_retry(filter_props, instance.uuid) scheduler_utils.populate_filter_properties(filter_props, host) # Now that we have a selected host (which has claimed resource # allocations in the scheduler) for this instance, we may need to # map allocations to resource providers in the request spec. try: scheduler_utils.fill_provider_mapping(request_spec, host) except Exception as exc: # If anything failed here we need to cleanup and bail out. with excutils.save_and_reraise_exception(): self._cleanup_build_artifacts( context, exc, instances, build_requests, request_specs, block_device_mapping, tags, cell_mapping_cache) # TODO(melwitt): Maybe we should set_target_cell on the contexts # once we map to a cell, and remove these separate with statements. with obj_target_cell(instance, cell) as cctxt: # send a state update notification for the initial create to # show it going from non-existent to BUILDING # This can lazy-load attributes on instance. notifications.send_update_with_states(cctxt, instance, None, vm_states.BUILDING, None, None, service="conductor") objects.InstanceAction.action_start( cctxt, instance.uuid, instance_actions.CREATE, want_result=False) instance_bdms = self._create_block_device_mapping( cell, instance.flavor, instance.uuid, block_device_mapping) instance_tags = self._create_tags(cctxt, instance.uuid, tags) # TODO(Kevin Zheng): clean this up once instance.create() handles # tags; we do this so the instance.create notification in # build_and_run_instance in nova-compute doesn't lazy-load tags instance.tags = instance_tags if instance_tags \ else objects.TagList() # Update mapping for instance. self._map_instance_to_cell(context, instance, cell) if not self._delete_build_request( context, build_request, instance, cell, instance_bdms, instance_tags): # The build request was deleted before/during scheduling so # the instance is gone and we don't have anything to build for # this one. continue try: accel_uuids = self._create_and_bind_arq_for_instance( context, instance, host.nodename, request_spec, requested_networks) except Exception as exc: with excutils.save_and_reraise_exception(): self._cleanup_build_artifacts( context, exc, instances, build_requests, request_specs, block_device_mapping, tags, cell_mapping_cache) # NOTE(danms): Compute RPC expects security group names or ids # not objects, so convert this to a list of names until we can # pass the objects. legacy_secgroups = [s.identifier for s in request_spec.security_groups] with obj_target_cell(instance, cell) as cctxt: self.compute_rpcapi.build_and_run_instance( cctxt, instance=instance, image=image, request_spec=request_spec, filter_properties=filter_props, admin_password=admin_password, injected_files=injected_files, requested_networks=requested_networks, security_groups=legacy_secgroups, block_device_mapping=instance_bdms, host=host.service_host, node=host.nodename, limits=host.limits, host_list=host_list, accel_uuids=accel_uuids) def _create_and_bind_arqs( self, cyclient, instance_uuid, extra_specs, hostname, resource_provider_mapping): """Create ARQs comes from extra specs, determine their RPs. The binding is asynchronous; Cyborg will notify on completion. The notification will be handled in the compute manager. """ arqs = [] bindings = {} dp_name = extra_specs.get('accel:device_profile') # profiles from request spec: Create ARQ and binding if not dp_name: # empty arq list and binding info return bindings LOG.debug('Calling Cyborg to get ARQs. dp_name=%s instance=%s', dp_name, instance_uuid) arqs = cyclient.create_arqs_and_match_resource_providers( dp_name, resource_provider_mapping) LOG.debug('Got ARQs with resource provider mapping %s', arqs) bindings = { arq['uuid']: {"hostname": hostname, "device_rp_uuid": arq['device_rp_uuid'], "instance_uuid": instance_uuid } for arq in arqs} return bindings def _create_arqs_for_ports(self, cyclient, instance_uuid, requested_networks, hostname, resource_provider_mapping): """Create ARQs for port with backend device profile. The binding is asynchronous; Cyborg will notify on completion. The notification will be handled in the compute manager. """ bindings = {} for request_net in requested_networks: if request_net.port_id and request_net.device_profile: device_profile = request_net.device_profile # the port doesn't support multiple devices arqs = cyclient.create_arqs(device_profile) if len(arqs) > 1: raise exception.AcceleratorRequestOpFailed( op=_('create'), msg='the port does not support multiple devices.') arq = arqs[0] LOG.debug("Create ARQ %s for port %s of instance %s", arq["uuid"], request_net.port_id, instance_uuid) request_net.arq_uuid = arq["uuid"] rp_uuid = cyclient.get_arq_device_rp_uuid( arq, resource_provider_mapping, request_net.port_id) arq_binding = {request_net.arq_uuid: {"hostname": hostname, "device_rp_uuid": rp_uuid, "instance_uuid": instance_uuid} } LOG.debug("ARQ %s binding: %s", request_net.arq_uuid, arq_binding) bindings.update(arq_binding) return bindings @staticmethod def _map_instance_to_cell(context, instance, cell): """Update the instance mapping to point at the given cell. During initial scheduling once a host and cell is selected in which to build the instance this method is used to update the instance mapping to point at that cell. :param context: nova auth RequestContext :param instance: Instance object being built :param cell: CellMapping representing the cell in which the instance was created and is being built. :returns: InstanceMapping object that was updated. """ inst_mapping = objects.InstanceMapping.get_by_instance_uuid( context, instance.uuid) # Perform a final sanity check that the instance is not mapped # to some other cell already because of maybe some crazy # clustered message queue weirdness. if inst_mapping.cell_mapping is not None: LOG.error('During scheduling instance is already mapped to ' 'another cell: %s. This should not happen and is an ' 'indication of bigger problems. If you see this you ' 'should report it to the nova team. Overwriting ' 'the mapping to point at cell %s.', inst_mapping.cell_mapping.identity, cell.identity, instance=instance) inst_mapping.cell_mapping = cell inst_mapping.save() return inst_mapping def _cleanup_build_artifacts(self, context, exc, instances, build_requests, request_specs, block_device_mappings, tags, cell_mapping_cache): for (instance, build_request, request_spec) in zip( instances, build_requests, request_specs): # Skip placeholders that were buried in cell0 or had their # build requests deleted by the user before instance create. if instance is None: continue updates = {'vm_state': vm_states.ERROR, 'task_state': None} cell = cell_mapping_cache[instance.uuid] with try_target_cell(context, cell) as cctxt: self._set_vm_state_and_notify(cctxt, instance.uuid, 'build_instances', updates, exc, request_spec) # In order to properly clean-up volumes when deleting a server in # ERROR status with no host, we need to store BDMs in the same # cell. if block_device_mappings: self._create_block_device_mapping( cell, instance.flavor, instance.uuid, block_device_mappings) # Like BDMs, the server tags provided by the user when creating the # server should be persisted in the same cell so they can be shown # from the API. if tags: with nova_context.target_cell(context, cell) as cctxt: self._create_tags(cctxt, instance.uuid, tags) # NOTE(mdbooth): To avoid an incomplete instance record being # returned by the API, the instance mapping must be # created after the instance record is complete in # the cell, and before the build request is # destroyed. # TODO(mnaser): The cell mapping should already be populated by # this point to avoid setting it below here. inst_mapping = objects.InstanceMapping.get_by_instance_uuid( context, instance.uuid) inst_mapping.cell_mapping = cell inst_mapping.save() # Be paranoid about artifacts being deleted underneath us. try: build_request.destroy() except exception.BuildRequestNotFound: pass try: request_spec.destroy() except exception.RequestSpecNotFound: pass def _delete_build_request(self, context, build_request, instance, cell, instance_bdms, instance_tags): """Delete a build request after creating the instance in the cell. This method handles cleaning up the instance in case the build request is already deleted by the time we try to delete it. :param context: the context of the request being handled :type context: nova.context.RequestContext :param build_request: the build request to delete :type build_request: nova.objects.BuildRequest :param instance: the instance created from the build_request :type instance: nova.objects.Instance :param cell: the cell in which the instance was created :type cell: nova.objects.CellMapping :param instance_bdms: list of block device mappings for the instance :type instance_bdms: nova.objects.BlockDeviceMappingList :param instance_tags: list of tags for the instance :type instance_tags: nova.objects.TagList :returns: True if the build request was successfully deleted, False if the build request was already deleted and the instance is now gone. """ try: build_request.destroy() except exception.BuildRequestNotFound: # This indicates an instance deletion request has been # processed, and the build should halt here. Clean up the # bdm, tags and instance record. with obj_target_cell(instance, cell) as cctxt: with compute_utils.notify_about_instance_delete( self.notifier, cctxt, instance, source=fields.NotificationSource.CONDUCTOR): try: instance.destroy() except exception.InstanceNotFound: pass except exception.ObjectActionError: # NOTE(melwitt): Instance became scheduled during # the destroy, "host changed". Refresh and re-destroy. try: instance.refresh() instance.destroy() except exception.InstanceNotFound: pass for bdm in instance_bdms: with obj_target_cell(bdm, cell): try: bdm.destroy() except exception.ObjectActionError: pass if instance_tags: with try_target_cell(context, cell) as target_ctxt: try: objects.TagList.destroy(target_ctxt, instance.uuid) except exception.InstanceNotFound: pass return False return True def cache_images(self, context, aggregate, image_ids): """Cache a set of images on the set of hosts in an aggregate. :param context: The RequestContext :param aggregate: The Aggregate object from the request to constrain the host list :param image_id: The IDs of the image to cache """ # TODO(mriedem): Consider including the list of images in the # notification payload. compute_utils.notify_about_aggregate_action( context, aggregate, fields.NotificationAction.IMAGE_CACHE, fields.NotificationPhase.START) clock = timeutils.StopWatch() threads = CONF.image_cache.precache_concurrency fetch_executor = futurist.GreenThreadPoolExecutor(max_workers=threads) hosts_by_cell = {} cells_by_uuid = {} # TODO(danms): Make this a much more efficient bulk query for hostname in aggregate.hosts: hmap = objects.HostMapping.get_by_host(context, hostname) cells_by_uuid.setdefault(hmap.cell_mapping.uuid, hmap.cell_mapping) hosts_by_cell.setdefault(hmap.cell_mapping.uuid, []) hosts_by_cell[hmap.cell_mapping.uuid].append(hostname) LOG.info('Preparing to request pre-caching of image(s) %(image_ids)s ' 'on %(hosts)i hosts across %(cells)i cells.', {'image_ids': ','.join(image_ids), 'hosts': len(aggregate.hosts), 'cells': len(hosts_by_cell)}) clock.start() stats = collections.defaultdict(lambda: (0, 0, 0, 0)) failed_images = collections.defaultdict(int) down_hosts = set() host_stats = { 'completed': 0, 'total': len(aggregate.hosts), } def host_completed(context, host, result): for image_id, status in result.items(): cached, existing, error, unsupported = stats[image_id] if status == 'error': failed_images[image_id] += 1 error += 1 elif status == 'cached': cached += 1 elif status == 'existing': existing += 1 elif status == 'unsupported': unsupported += 1 stats[image_id] = (cached, existing, error, unsupported) host_stats['completed'] += 1 compute_utils.notify_about_aggregate_cache(context, aggregate, host, result, host_stats['completed'], host_stats['total']) def wrap_cache_images(ctxt, host, image_ids): result = self.compute_rpcapi.cache_images( ctxt, host=host, image_ids=image_ids) host_completed(context, host, result) def skipped_host(context, host, image_ids): result = {image: 'skipped' for image in image_ids} host_completed(context, host, result) for cell_uuid, hosts in hosts_by_cell.items(): cell = cells_by_uuid[cell_uuid] with nova_context.target_cell(context, cell) as target_ctxt: for host in hosts: service = objects.Service.get_by_compute_host(target_ctxt, host) if not self.servicegroup_api.service_is_up(service): down_hosts.add(host) LOG.info( 'Skipping image pre-cache request to compute ' '%(host)r because it is not up', {'host': host}) skipped_host(target_ctxt, host, image_ids) continue utils.spawn_on(fetch_executor, wrap_cache_images, target_ctxt, host, image_ids) # Wait until all those things finish fetch_executor.shutdown(wait=True) overall_stats = {'cached': 0, 'existing': 0, 'error': 0, 'unsupported': 0} for cached, existing, error, unsupported in stats.values(): overall_stats['cached'] += cached overall_stats['existing'] += existing overall_stats['error'] += error overall_stats['unsupported'] += unsupported clock.stop() LOG.info('Image pre-cache operation for image(s) %(image_ids)s ' 'completed in %(time).2f seconds; ' '%(cached)i cached, %(existing)i existing, %(error)i errors, ' '%(unsupported)i unsupported, %(skipped)i skipped (down) ' 'hosts', {'image_ids': ','.join(image_ids), 'time': clock.elapsed(), 'cached': overall_stats['cached'], 'existing': overall_stats['existing'], 'error': overall_stats['error'], 'unsupported': overall_stats['unsupported'], 'skipped': len(down_hosts), }) # Log error'd images specifically at warning level for image_id, fails in failed_images.items(): LOG.warning('Image pre-cache operation for image %(image)s ' 'failed %(fails)i times', {'image': image_id, 'fails': fails}) compute_utils.notify_about_aggregate_action( context, aggregate, fields.NotificationAction.IMAGE_CACHE, fields.NotificationPhase.END) @targets_cell @wrap_instance_event(prefix='conductor') def confirm_snapshot_based_resize(self, context, instance, migration): """Executes the ConfirmResizeTask :param context: nova auth request context targeted at the target cell :param instance: Instance object in "resized" status from the target cell :param migration: Migration object from the target cell for the resize operation expected to have status "confirming" """ task = cross_cell_migrate.ConfirmResizeTask( context, instance, migration, self.notifier, self.compute_rpcapi) task.execute() @targets_cell # NOTE(mriedem): Upon successful completion of RevertResizeTask the # instance is hard-deleted, along with its instance action record(s), from # the target cell database so EventReporter hits InstanceActionNotFound on # __exit__. Pass graceful_exit=True to avoid an ugly traceback. @wrap_instance_event(prefix='conductor', graceful_exit=True) def revert_snapshot_based_resize(self, context, instance, migration): """Executes the RevertResizeTask :param context: nova auth request context targeted at the target cell :param instance: Instance object in "resized" status from the target cell :param migration: Migration object from the target cell for the resize operation expected to have status "reverting" """ task = cross_cell_migrate.RevertResizeTask( context, instance, migration, self.notifier, self.compute_rpcapi) task.execute() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conductor/rpcapi.py0000664000175000017500000005235600000000000017370 0ustar00zuulzuul00000000000000# Copyright 2013 IBM Corp. # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Client side of the conductor RPC API.""" import oslo_messaging as messaging from oslo_serialization import jsonutils from oslo_versionedobjects import base as ovo_base import nova.conf from nova import exception from nova.i18n import _ from nova.objects import base as objects_base from nova import profiler from nova import rpc CONF = nova.conf.CONF RPC_TOPIC = 'conductor' @profiler.trace_cls("rpc") class ConductorAPI(object): """Client side of the conductor RPC API API version history: * 1.0 - Initial version. * 1.1 - Added migration_update * 1.2 - Added instance_get_by_uuid and instance_get_all_by_host * 1.3 - Added aggregate_host_add and aggregate_host_delete * 1.4 - Added migration_get * 1.5 - Added bw_usage_update * 1.6 - Added get_backdoor_port() * 1.7 - Added aggregate_get_by_host, aggregate_metadata_add, and aggregate_metadata_delete * 1.8 - Added security_group_get_by_instance and security_group_rule_get_by_security_group * 1.9 - Added provider_fw_rule_get_all * 1.10 - Added agent_build_get_by_triple * 1.11 - Added aggregate_get * 1.12 - Added block_device_mapping_update_or_create * 1.13 - Added block_device_mapping_get_all_by_instance * 1.14 - Added block_device_mapping_destroy * 1.15 - Added instance_get_all_by_filters and instance_get_all_hung_in_rebooting and instance_get_active_by_window Deprecated instance_get_all_by_host * 1.16 - Added instance_destroy * 1.17 - Added instance_info_cache_delete * 1.18 - Added instance_type_get * 1.19 - Added vol_get_usage_by_time and vol_usage_update * 1.20 - Added migration_get_unconfirmed_by_dest_compute * 1.21 - Added service_get_all_by * 1.22 - Added ping * 1.23 - Added instance_get_all Un-Deprecate instance_get_all_by_host * 1.24 - Added instance_get * 1.25 - Added action_event_start and action_event_finish * 1.26 - Added instance_info_cache_update * 1.27 - Added service_create * 1.28 - Added binary arg to service_get_all_by * 1.29 - Added service_destroy * 1.30 - Added migration_create * 1.31 - Added migration_get_in_progress_by_host_and_node * 1.32 - Added optional node to instance_get_all_by_host * 1.33 - Added compute_node_create and compute_node_update * 1.34 - Added service_update * 1.35 - Added instance_get_active_by_window_joined * 1.36 - Added instance_fault_create * 1.37 - Added task_log_get, task_log_begin_task, task_log_end_task * 1.38 - Added service name to instance_update * 1.39 - Added notify_usage_exists * 1.40 - Added security_groups_trigger_handler and security_groups_trigger_members_refresh Remove instance_get_active_by_window * 1.41 - Added fixed_ip_get_by_instance, network_get, instance_floating_address_get_all, quota_commit, quota_rollback * 1.42 - Added get_ec2_ids, aggregate_metadata_get_by_host * 1.43 - Added compute_stop * 1.44 - Added compute_node_delete * 1.45 - Added project_id to quota_commit and quota_rollback * 1.46 - Added compute_confirm_resize * 1.47 - Added columns_to_join to instance_get_all_by_host and instance_get_all_by_filters * 1.48 - Added compute_unrescue ... Grizzly supports message version 1.48. So, any changes to existing methods in 2.x after that point should be done such that they can handle the version_cap being set to 1.48. * 1.49 - Added columns_to_join to instance_get_by_uuid * 1.50 - Added object_action() and object_class_action() * 1.51 - Added the 'legacy' argument to block_device_mapping_get_all_by_instance * 1.52 - Pass instance objects for compute_confirm_resize * 1.53 - Added compute_reboot * 1.54 - Added 'update_cells' argument to bw_usage_update * 1.55 - Pass instance objects for compute_stop * 1.56 - Remove compute_confirm_resize and migration_get_unconfirmed_by_dest_compute * 1.57 - Remove migration_create() * 1.58 - Remove migration_get() ... Havana supports message version 1.58. So, any changes to existing methods in 1.x after that point should be done such that they can handle the version_cap being set to 1.58. * 1.59 - Remove instance_info_cache_update() * 1.60 - Remove aggregate_metadata_add() and aggregate_metadata_delete() * ... - Remove security_group_get_by_instance() and security_group_rule_get_by_security_group() * 1.61 - Return deleted instance from instance_destroy() * 1.62 - Added object_backport() * 1.63 - Changed the format of values['stats'] from a dict to a JSON string in compute_node_update() * 1.64 - Added use_slave to instance_get_all_filters() - Remove instance_type_get() - Remove aggregate_get() - Remove aggregate_get_by_host() - Remove instance_get() - Remove migration_update() - Remove block_device_mapping_destroy() * 2.0 - Drop backwards compatibility - Remove quota_rollback() and quota_commit() - Remove aggregate_host_add() and aggregate_host_delete() - Remove network_migrate_instance_start() and network_migrate_instance_finish() - Remove vol_get_usage_by_time ... Icehouse supports message version 2.0. So, any changes to existing methods in 2.x after that point should be done such that they can handle the version_cap being set to 2.0. * Remove instance_destroy() * Remove compute_unrescue() * Remove instance_get_all_by_filters() * Remove instance_get_active_by_window_joined() * Remove instance_fault_create() * Remove action_event_start() and action_event_finish() * Remove instance_get_by_uuid() * Remove agent_build_get_by_triple() ... Juno supports message version 2.0. So, any changes to existing methods in 2.x after that point should be done such that they can handle the version_cap being set to 2.0. * 2.1 - Make notify_usage_exists() take an instance object * Remove bw_usage_update() * Remove notify_usage_exists() ... Kilo supports message version 2.1. So, any changes to existing methods in 2.x after that point should be done such that they can handle the version_cap being set to 2.1. * Remove get_ec2_ids() * Remove service_get_all_by() * Remove service_create() * Remove service_destroy() * Remove service_update() * Remove migration_get_in_progress_by_host_and_node() * Remove aggregate_metadata_get_by_host() * Remove block_device_mapping_update_or_create() * Remove block_device_mapping_get_all_by_instance() * Remove instance_get_all_by_host() * Remove compute_node_update() * Remove compute_node_delete() * Remove security_groups_trigger_handler() * Remove task_log_get() * Remove task_log_begin_task() * Remove task_log_end_task() * Remove security_groups_trigger_members_refresh() * Remove vol_usage_update() * Remove instance_update() * 2.2 - Add object_backport_versions() * 2.3 - Add object_class_action_versions() * Remove compute_node_create() * Remove object_backport() * 3.0 - Drop backwards compatibility ... Liberty, Mitaka, Newton, and Ocata support message version 3.0. So, any changes to existing methods in 3.x after that point should be done such that they can handle the version_cap being set to 3.0. * Remove provider_fw_rule_get_all() """ VERSION_ALIASES = { 'grizzly': '1.48', 'havana': '1.58', 'icehouse': '2.0', 'juno': '2.0', 'kilo': '2.1', 'liberty': '3.0', 'mitaka': '3.0', 'newton': '3.0', 'ocata': '3.0', } def __init__(self): super(ConductorAPI, self).__init__() target = messaging.Target(topic=RPC_TOPIC, version='3.0') version_cap = self.VERSION_ALIASES.get(CONF.upgrade_levels.conductor, CONF.upgrade_levels.conductor) serializer = objects_base.NovaObjectSerializer() self.client = rpc.get_client(target, version_cap=version_cap, serializer=serializer) # TODO(hanlind): This method can be removed once oslo.versionedobjects # has been converted to use version_manifests in remotable_classmethod # operations, which will use the new class action handler. def object_class_action(self, context, objname, objmethod, objver, args, kwargs): versions = ovo_base.obj_tree_get_versions(objname) return self.object_class_action_versions(context, objname, objmethod, versions, args, kwargs) def object_class_action_versions(self, context, objname, objmethod, object_versions, args, kwargs): cctxt = self.client.prepare() return cctxt.call(context, 'object_class_action_versions', objname=objname, objmethod=objmethod, object_versions=object_versions, args=args, kwargs=kwargs) def object_action(self, context, objinst, objmethod, args, kwargs): cctxt = self.client.prepare() return cctxt.call(context, 'object_action', objinst=objinst, objmethod=objmethod, args=args, kwargs=kwargs) def object_backport_versions(self, context, objinst, object_versions): cctxt = self.client.prepare() return cctxt.call(context, 'object_backport_versions', objinst=objinst, object_versions=object_versions) @profiler.trace_cls("rpc") class ComputeTaskAPI(object): """Client side of the conductor 'compute' namespaced RPC API API version history: 1.0 - Initial version (empty). 1.1 - Added unified migrate_server call. 1.2 - Added build_instances 1.3 - Added unshelve_instance 1.4 - Added reservations to migrate_server. 1.5 - Added the legacy_bdm parameter to build_instances 1.6 - Made migrate_server use instance objects 1.7 - Do not send block_device_mapping and legacy_bdm to build_instances 1.8 - Add rebuild_instance 1.9 - Converted requested_networks to NetworkRequestList object 1.10 - Made migrate_server() and build_instances() send flavor objects 1.11 - Added clean_shutdown to migrate_server() 1.12 - Added request_spec to rebuild_instance() 1.13 - Added request_spec to migrate_server() 1.14 - Added request_spec to unshelve_instance() 1.15 - Added live_migrate_instance 1.16 - Added schedule_and_build_instances 1.17 - Added tags to schedule_and_build_instances() 1.18 - Added request_spec to build_instances(). 1.19 - build_instances() now gets a 'host_lists' parameter that represents potential alternate hosts for retries within a cell for each instance. 1.20 - migrate_server() now gets a 'host_list' parameter that represents potential alternate hosts for retries within a cell. 1.21 - Added cache_images() 1.22 - Added confirm_snapshot_based_resize() 1.23 - Added revert_snapshot_based_resize() 1.24 - Add reimage_boot_volume parameter to rebuild_instance() 1.25 - Add target_state parameter to rebuild_instance() """ def __init__(self): super(ComputeTaskAPI, self).__init__() target = messaging.Target(topic=RPC_TOPIC, namespace='compute_task', version='1.0') serializer = objects_base.NovaObjectSerializer() self.client = rpc.get_client(target, serializer=serializer) def live_migrate_instance(self, context, instance, scheduler_hint, block_migration, disk_over_commit, request_spec): kw = {'instance': instance, 'scheduler_hint': scheduler_hint, 'block_migration': block_migration, 'disk_over_commit': disk_over_commit, 'request_spec': request_spec, } version = '1.15' cctxt = self.client.prepare(version=version) cctxt.cast(context, 'live_migrate_instance', **kw) # TODO(melwitt): Remove the reservations parameter in version 2.0 of the # RPC API. # TODO(mriedem): Make request_spec required *and* a RequestSpec object # rather than a legacy dict in version 2.0 of the RPC API. def migrate_server(self, context, instance, scheduler_hint, live, rebuild, flavor, block_migration, disk_over_commit, reservations=None, clean_shutdown=True, request_spec=None, host_list=None, do_cast=False): kw = {'instance': instance, 'scheduler_hint': scheduler_hint, 'live': live, 'rebuild': rebuild, 'flavor': flavor, 'block_migration': block_migration, 'disk_over_commit': disk_over_commit, 'reservations': reservations, 'clean_shutdown': clean_shutdown, 'request_spec': request_spec, 'host_list': host_list, } version = '1.20' if not self.client.can_send_version(version): del kw['host_list'] version = '1.13' if not self.client.can_send_version(version): del kw['request_spec'] version = '1.11' if not self.client.can_send_version(version): del kw['clean_shutdown'] version = '1.10' if not self.client.can_send_version(version): kw['flavor'] = objects_base.obj_to_primitive(flavor) version = '1.6' if not self.client.can_send_version(version): kw['instance'] = jsonutils.to_primitive( objects_base.obj_to_primitive(instance)) version = '1.4' cctxt = self.client.prepare( version=version, call_monitor_timeout=CONF.rpc_response_timeout, timeout=CONF.long_rpc_timeout) if do_cast: return cctxt.cast(context, 'migrate_server', **kw) return cctxt.call(context, 'migrate_server', **kw) def build_instances(self, context, instances, image, filter_properties, admin_password, injected_files, requested_networks, security_groups, block_device_mapping, legacy_bdm=True, request_spec=None, host_lists=None): image_p = jsonutils.to_primitive(image) kwargs = {"instances": instances, "image": image_p, "filter_properties": filter_properties, "admin_password": admin_password, "injected_files": injected_files, "requested_networks": requested_networks, "security_groups": security_groups, "request_spec": request_spec, "host_lists": host_lists} version = '1.19' if not self.client.can_send_version(version): version = '1.18' kwargs.pop("host_lists") if not self.client.can_send_version(version): version = '1.10' kwargs.pop("request_spec") if not self.client.can_send_version(version): version = '1.9' if 'instance_type' in filter_properties: flavor = filter_properties['instance_type'] flavor_p = objects_base.obj_to_primitive(flavor) kwargs["filter_properties"] = dict(filter_properties, instance_type=flavor_p) if not self.client.can_send_version(version): version = '1.8' nets = kwargs['requested_networks'].as_tuples() kwargs['requested_networks'] = nets if not self.client.can_send_version('1.7'): version = '1.5' bdm_p = objects_base.obj_to_primitive(block_device_mapping) kwargs.update({'block_device_mapping': bdm_p, 'legacy_bdm': legacy_bdm}) cctxt = self.client.prepare(version=version) cctxt.cast(context, 'build_instances', **kwargs) def schedule_and_build_instances(self, context, build_requests, request_specs, image, admin_password, injected_files, requested_networks, block_device_mapping, tags=None): version = '1.17' kw = {'build_requests': build_requests, 'request_specs': request_specs, 'image': jsonutils.to_primitive(image), 'admin_password': admin_password, 'injected_files': injected_files, 'requested_networks': requested_networks, 'block_device_mapping': block_device_mapping, 'tags': tags} if not self.client.can_send_version(version): version = '1.16' del kw['tags'] cctxt = self.client.prepare(version=version) cctxt.cast(context, 'schedule_and_build_instances', **kw) def unshelve_instance(self, context, instance, request_spec=None): version = '1.14' kw = {'instance': instance, 'request_spec': request_spec } if not self.client.can_send_version(version): version = '1.3' del kw['request_spec'] cctxt = self.client.prepare(version=version) cctxt.cast(context, 'unshelve_instance', **kw) def rebuild_instance(self, ctxt, instance, new_pass, injected_files, image_ref, orig_image_ref, orig_sys_metadata, bdms, recreate=False, on_shared_storage=False, host=None, preserve_ephemeral=False, request_spec=None, reimage_boot_volume=False, target_state=None): version = '1.25' kw = {'instance': instance, 'new_pass': new_pass, 'injected_files': injected_files, 'image_ref': image_ref, 'orig_image_ref': orig_image_ref, 'orig_sys_metadata': orig_sys_metadata, 'bdms': bdms, 'recreate': recreate, 'on_shared_storage': on_shared_storage, 'preserve_ephemeral': preserve_ephemeral, 'host': host, 'request_spec': request_spec, 'reimage_boot_volume': reimage_boot_volume, 'target_state': target_state, } if not self.client.can_send_version(version): if kw['target_state']: raise exception.UnsupportedRPCVersion( api="rebuild_instance", required="1.25") else: del kw['target_state'] version = '1.24' if not self.client.can_send_version(version): if kw['reimage_boot_volume']: raise exception.NovaException( 'Conductor RPC version does not support ' 'reimage_boot_volume parameter.') else: del kw['reimage_boot_volume'] version = '1.12' if not self.client.can_send_version(version): version = '1.8' del kw['request_spec'] cctxt = self.client.prepare(version=version) cctxt.cast(ctxt, 'rebuild_instance', **kw) def cache_images(self, ctxt, aggregate, image_ids): version = '1.21' if not self.client.can_send_version(version): raise exception.NovaException('Conductor RPC version pin does not ' 'allow cache_images() to be called') cctxt = self.client.prepare(version=version) cctxt.cast(ctxt, 'cache_images', aggregate=aggregate, image_ids=image_ids) def confirm_snapshot_based_resize( self, ctxt, instance, migration, do_cast=True): version = '1.22' if not self.client.can_send_version(version): raise exception.ServiceTooOld(_('nova-conductor too old')) kw = {'instance': instance, 'migration': migration} cctxt = self.client.prepare(version=version) if do_cast: return cctxt.cast(ctxt, 'confirm_snapshot_based_resize', **kw) return cctxt.call(ctxt, 'confirm_snapshot_based_resize', **kw) def revert_snapshot_based_resize(self, ctxt, instance, migration): version = '1.23' if not self.client.can_send_version(version): raise exception.ServiceTooOld(_('nova-conductor too old')) kw = {'instance': instance, 'migration': migration} cctxt = self.client.prepare(version=version) cctxt.cast(ctxt, 'revert_snapshot_based_resize', **kw) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.3456085 nova-32.0.0/nova/conductor/tasks/0000775000175000017500000000000000000000000016652 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conductor/tasks/__init__.py0000664000175000017500000000000000000000000020751 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conductor/tasks/base.py0000664000175000017500000000312700000000000020141 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import functools from oslo_utils import excutils def rollback_wrapper(original): @functools.wraps(original) def wrap(self): try: return original(self) except Exception as ex: with excutils.save_and_reraise_exception(): self.rollback(ex) return wrap class TaskBase(metaclass=abc.ABCMeta): def __init__(self, context, instance): self.context = context self.instance = instance @rollback_wrapper def execute(self): """Run task's logic, written in _execute() method """ return self._execute() @abc.abstractmethod def _execute(self): """Descendants should place task's logic here, while resource initialization should be performed over __init__ """ pass def rollback(self, ex): """Rollback failed task Descendants should implement this method to allow task user to rollback status to state before execute method was call """ pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conductor/tasks/cross_cell_migrate.py0000664000175000017500000021760300000000000023075 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import copy from oslo_log import log as logging import oslo_messaging as messaging from oslo_utils import excutils from nova import availability_zones from nova.compute import instance_actions from nova.compute import power_state from nova.compute import task_states from nova.compute import utils as compute_utils from nova.compute import vm_states from nova.conductor.tasks import base from nova import conf from nova import context as nova_context from nova import exception from nova.i18n import _ from nova.image import glance from nova.network import constants as neutron_constants from nova.network import neutron from nova import objects from nova.objects import fields from nova.scheduler import utils as scheduler_utils from nova.volume import cinder LOG = logging.getLogger(__name__) CONF = conf.CONF def clone_creatable_object(ctxt, obj, delete_fields=None): """Targets the object at the given context and removes its id attribute Dirties all of the set fields on a new copy of the object. This is necessary before the object is created in a new cell. :param ctxt: cell-targeted nova auth request context to set on the clone :param obj: the object to re-target :param delete_fields: list of fields to delete from the new object; note that the ``id`` field is always deleted :returns: Cloned version of ``obj`` with all set fields marked as "changed" so they will be persisted on a subsequent ``obj.create()`` call. """ if delete_fields is None: delete_fields = [] if 'id' not in delete_fields: delete_fields.append('id') new_obj = obj.obj_clone() new_obj._context = ctxt for field in obj.obj_fields: if field in obj: if field in delete_fields: delattr(new_obj, field) else: # Dirty the field since obj_clone does not modify # _changed_fields. setattr(new_obj, field, getattr(obj, field)) return new_obj class TargetDBSetupTask(base.TaskBase): """Sub-task to create the instance data in the target cell DB. This is needed before any work can be done with the instance in the target cell, like validating the selected target compute host. """ def __init__(self, context, instance, source_migration, target_cell_context): """Initialize this task. :param context: source-cell targeted auth RequestContext :param instance: source-cell Instance object :param source_migration: source-cell Migration object for this operation :param target_cell_context: target-cell targeted auth RequestContext """ super(TargetDBSetupTask, self).__init__(context, instance) self.target_ctx = target_cell_context self.source_migration = source_migration self._target_cell_instance = None def _copy_migrations(self, migrations): """Copy migration records from the source cell to the target cell. :param migrations: MigrationList object of source cell DB records. :returns: Migration record in the target cell database that matches the active migration in the source cell. """ target_cell_migration = None for migration in migrations: migration = clone_creatable_object(self.target_ctx, migration) migration.create() if self.source_migration.uuid == migration.uuid: # Save this off so subsequent tasks don't need to look it up. target_cell_migration = migration return target_cell_migration def _execute(self): """Creates the instance and its related records in the target cell Instance.pci_devices are not copied over since those records are tightly coupled to the compute_nodes records and are meant to track inventory and allocations of PCI devices on a specific compute node. The instance.pci_requests are what "move" with the instance to the target cell and will result in new PCIDevice allocations on the target compute node in the target cell during the resize_claim. The instance.services field is not copied over since that represents the nova-compute service mapped to the instance.host, which will not make sense in the target cell. :returns: A two-item tuple of the Instance and Migration object created in the target cell """ LOG.debug( 'Creating (hidden) instance and its related records in the target ' 'cell: %s', self.target_ctx.cell_uuid, instance=self.instance) # We also have to create the BDMs and tags separately, just like in # ComputeTaskManager.schedule_and_build_instances, so get those out # of the source cell DB first before we start creating anything. # NOTE(mriedem): Console auth tokens are not copied over to the target # cell DB since they will be regenerated in the target cell as needed. # Similarly, expired console auth tokens will be automatically cleaned # from the source cell. bdms = self.instance.get_bdms() vifs = objects.VirtualInterfaceList.get_by_instance_uuid( self.context, self.instance.uuid) tags = self.instance.tags # We copy instance actions to preserve the history of the instance # in case the resize is confirmed. actions = objects.InstanceActionList.get_by_instance_uuid( self.context, self.instance.uuid) migrations = objects.MigrationList.get_by_filters( self.context, filters={'instance_uuid': self.instance.uuid}) # db.instance_create cannot handle some fields which might be loaded on # the instance object, so we omit those from the cloned object and # explicitly create the ones we care about (like tags) below. Things # like pci_devices and services will not make sense in the target DB # so we omit those as well. # TODO(mriedem): Determine if we care about copying faults over to the # target cell in case people use those for auditing (remember that # faults are only shown in the API for ERROR/DELETED instances and only # the most recent fault is shown). inst = clone_creatable_object( self.target_ctx, self.instance, delete_fields=['fault', 'pci_devices', 'services', 'tags']) # This part is important - we want to create the instance in the target # cell as "hidden" so while we have two copies of the instance in # different cells, listing servers out of the API will filter out the # hidden one. inst.hidden = True inst.create() self._target_cell_instance = inst # keep track of this for rollbacks # TODO(mriedem): Consider doing all of the inserts in a single # transaction context. If any of the following creates fail, the # rollback should perform a cascading hard-delete anyway. # Do the same dance for the other instance-related records. for bdm in bdms: bdm = clone_creatable_object(self.target_ctx, bdm) bdm.create() for vif in vifs: vif = clone_creatable_object(self.target_ctx, vif) vif.create() if tags: primitive_tags = [tag.tag for tag in tags] objects.TagList.create(self.target_ctx, inst.uuid, primitive_tags) for action in actions: new_action = clone_creatable_object(self.target_ctx, action) new_action.create() # For each pre-existing action, we need to also re-create its # events in the target cell. events = objects.InstanceActionEventList.get_by_action( self.context, action.id) for event in events: new_event = clone_creatable_object(self.target_ctx, event) new_event.create(action.instance_uuid, action.request_id) target_cell_migration = self._copy_migrations(migrations) return inst, target_cell_migration def rollback(self, ex): """Deletes the instance data from the target cell in case of failure""" if self._target_cell_instance: # Deleting the instance in the target cell DB should perform a # cascading delete of all related records, e.g. BDMs, VIFs, etc. LOG.debug('Destroying instance from target cell: %s', self.target_ctx.cell_uuid, instance=self._target_cell_instance) # This needs to be a hard delete because if resize fails later for # some reason, we want to be able to retry the resize to this cell # again without hitting a duplicate entry unique constraint error. self._target_cell_instance.destroy(hard_delete=True) class PrepResizeAtDestTask(base.TaskBase): """Task used to verify a given target host in a target cell. Upon successful completion, port bindings and volume attachments should be created for the target host in the target cell and resources should be claimed on the target host for the resize. Also, the instance task_state should be ``resize_prep``. """ def __init__(self, context, instance, flavor, target_migration, request_spec, compute_rpcapi, host_selection, network_api, volume_api): """Construct the PrepResizeAtDestTask instance :param context: The user request auth context. This should be targeted at the target cell. :param instance: The instance being migrated (this is the target cell copy of the instance record). :param flavor: The new flavor if performing resize and not just a cold migration :param target_migration: The Migration object from the target cell DB. :param request_spec: nova.objects.RequestSpec object for the operation :param compute_rpcapi: instance of nova.compute.rpcapi.ComputeAPI :param host_selection: nova.objects.Selection which is a possible target host for the cross-cell resize :param network_api: The neutron (client side) networking API. :param volume_api: The cinder (client side) block-storage API. """ super(PrepResizeAtDestTask, self).__init__(context, instance) self.flavor = flavor self.target_migration = target_migration self.request_spec = request_spec self.compute_rpcapi = compute_rpcapi self.host_selection = host_selection self.network_api = network_api self.volume_api = volume_api # Keep track of anything we created so we can rollback. self._bindings_by_port_id = {} self._created_volume_attachment_ids = [] def _create_port_bindings(self): """Creates inactive port bindings against the selected target host for the ports attached to the instance. The ``self._bindings_by_port_id`` variable will be set upon successful completion. :raises: MigrationPreCheckError if port binding failed """ LOG.debug('Creating port bindings for destination host %s', self.host_selection.service_host) try: self._bindings_by_port_id = self.network_api.bind_ports_to_host( self.context, self.instance, self.host_selection.service_host) except exception.PortBindingFailed: raise exception.MigrationPreCheckError(reason=_( 'Failed to create port bindings for host %s') % self.host_selection.service_host) def _create_volume_attachments(self): """Create empty volume attachments for volume BDMs attached to the instance in the target cell. The BlockDeviceMapping.attachment_id field is updated for each volume BDM processed. Remember that these BDM records are from the target cell database so the changes will only go there. :return: BlockDeviceMappingList of volume BDMs with an updated attachment_id field for the newly created empty attachment for that BDM """ LOG.debug('Creating volume attachments for destination host %s', self.host_selection.service_host) volume_bdms = objects.BlockDeviceMappingList(objects=[ bdm for bdm in self.instance.get_bdms() if bdm.is_volume]) for bdm in volume_bdms: # Create the empty (no host connector) attachment. attach_ref = self.volume_api.attachment_create( self.context, bdm.volume_id, bdm.instance_uuid) # Keep track of what we create for rollbacks. self._created_volume_attachment_ids.append(attach_ref['id']) # Update the BDM in the target cell database. bdm.attachment_id = attach_ref['id'] # Note that ultimately the BDMs in the target cell are either # pointing at attachments that we can use, or this sub-task has # failed in which case we will fail the main task and should # rollback and delete the instance and its BDMs in the target cell # database, so that is why we do not track the original attachment # IDs in order to roll them back on the BDM records. bdm.save() return volume_bdms def _execute(self): """Performs pre-cross-cell resize checks/claims on the targeted host This ensures things like networking (ports) will continue to work on the target host in the other cell before we initiate the migration of the server. Resources are also claimed on the target host which in turn creates the MigrationContext for the instance in the target cell database. :returns: MigrationContext created in the target cell database during the resize_claim in the destination compute service. :raises: nova.exception.MigrationPreCheckError if the pre-check validation fails for the given host selection; this indicates an alternative host *may* work but this one does not. """ destination = self.host_selection.service_host LOG.debug('Verifying selected host %s for cross-cell resize.', destination, instance=self.instance) # Validate networking by creating port bindings for this host. self._create_port_bindings() # Create new empty volume attachments for the volume BDMs attached # to the instance. Technically this is not host specific and we could # do this outside of the PrepResizeAtDestTask sub-task but volume # attachments are meant to be cheap and plentiful so it is nice to # keep them self-contained within each execution of this task and # rollback anything we created if we fail. self._create_volume_attachments() try: LOG.debug('Calling destination host %s to prepare for cross-cell ' 'resize and claim resources.', destination) return self.compute_rpcapi.prep_snapshot_based_resize_at_dest( self.context, self.instance, self.flavor, self.host_selection.nodename, self.target_migration, self.host_selection.limits, self.request_spec, destination) except messaging.MessagingTimeout: msg = _('RPC timeout while checking if we can cross-cell migrate ' 'to host: %s') % destination raise exception.MigrationPreCheckError(reason=msg) def rollback(self, ex): # Rollback anything we created. host = self.host_selection.service_host # Cleanup any destination host port bindings. LOG.debug('Cleaning up port bindings for destination host %s', host) for port_id in self._bindings_by_port_id: try: self.network_api.delete_port_binding( self.context, port_id, host) except Exception: # Don't raise if we fail to cleanup, just log it. LOG.exception('An error occurred while cleaning up binding ' 'for port %s on host %s.', port_id, host, instance=self.instance) # Cleanup any destination host volume attachments. LOG.debug( 'Cleaning up volume attachments for destination host %s', host) for attachment_id in self._created_volume_attachment_ids: try: self.volume_api.attachment_delete(self.context, attachment_id) except Exception: # Don't raise if we fail to cleanup, just log it. LOG.exception('An error occurred while cleaning up volume ' 'attachment %s.', attachment_id, instance=self.instance) class PrepResizeAtSourceTask(base.TaskBase): """Task to prepare the instance at the source host for the resize. Will power off the instance at the source host, create and upload a snapshot image for a non-volume-backed server, and disconnect volumes and networking from the source host. The vm_state is recorded with the "old_vm_state" key in the instance.system_metadata field prior to powering off the instance so the revert flow can determine if the guest should be running or stopped. Returns the snapshot image ID, if one was created, from the ``execute`` method. Upon successful completion, the instance.task_state will be ``resize_migrated`` and the migration.status will be ``post-migrating``. """ def __init__( self, context, instance, flavor, migration, request_spec, compute_rpcapi, image_api, ): """Initializes this PrepResizeAtSourceTask instance. :param context: nova auth context targeted at the source cell :param instance: Instance object from the source cell :param flavor: The new flavor if performing resize and not just a cold migration :param migration: Migration object from the source cell :param request_spec: RequestSpec object for the resize operation :param compute_rpcapi: instance of nova.compute.rpcapi.ComputeAPI :param image_api: instance of nova.image.glance.API """ super(PrepResizeAtSourceTask, self).__init__(context, instance) self.flavor = flavor self.migration = migration self.request_spec = request_spec self.compute_rpcapi = compute_rpcapi self.image_api = image_api self._image_id = None def _execute(self): # Save off the vm_state so we can use that later on the source host # if the resize is reverted - it is used to determine if the reverted # guest should be powered on. self.instance.system_metadata['old_vm_state'] = self.instance.vm_state self.instance.task_state = task_states.RESIZE_MIGRATING self.instance.old_flavor = self.instance.flavor self.instance.new_flavor = self.flavor # If the instance is not volume-backed, create a snapshot of the root # disk. if not self.request_spec.is_bfv: # Create an empty image. name = '%s-resize-temp' % self.instance.display_name image_meta = compute_utils.create_image( self.context, self.instance, name, 'snapshot', self.image_api) self._image_id = image_meta['id'] LOG.debug('Created snapshot image %s for cross-cell resize.', self._image_id, instance=self.instance) self.instance.save(expected_task_state=task_states.RESIZE_PREP) # RPC call the source host to prepare for resize. self.compute_rpcapi.prep_snapshot_based_resize_at_source( self.context, self.instance, self.migration, snapshot_id=self._image_id) return self._image_id def rollback(self, ex): # If we created a snapshot image, attempt to delete it. if self._image_id: compute_utils.delete_image( self.context, self.instance, self.image_api, self._image_id) # If the compute service successfully powered off the guest but failed # to snapshot (or timed out during the snapshot), then the # _sync_power_states periodic task should mark the instance as stopped # and the user can start/reboot it. # If the compute service powered off the instance, snapshot it and # destroyed the guest and then a failure occurred, the instance should # have been set to ERROR status (by the compute service) so the user # has to hard reboot or rebuild it. LOG.error('Preparing for cross-cell resize at the source host %s ' 'failed. The instance may need to be hard rebooted.', self.instance.host, instance=self.instance) class FinishResizeAtDestTask(base.TaskBase): """Task to finish the resize at the destination host. Calls the finish_snapshot_based_resize_at_dest method on the destination compute service which sets up networking and block storage and spawns the guest on the destination host. Upon successful completion of this task, the migration status should be 'finished', the instance task_state should be None and the vm_state should be 'resized'. The instance host/node information should also reflect the destination compute. If the compute call is successful, the task will change the instance mapping to point at the target cell and hide the source cell instance thus making the confirm/revert operations act on the target cell instance. """ def __init__(self, context, instance, migration, source_cell_instance, compute_rpcapi, target_cell_mapping, snapshot_id, request_spec): """Initialize this task. :param context: nova auth request context targeted at the target cell :param instance: Instance object in the target cell database :param migration: Migration object in the target cell database :param source_cell_instance: Instance object in the source cell DB :param compute_rpcapi: instance of nova.compute.rpcapi.ComputeAPI :param target_cell_mapping: CellMapping object for the target cell :param snapshot_id: ID of the image snapshot to use for a non-volume-backed instance. :param request_spec: nova.objects.RequestSpec object for the operation """ super(FinishResizeAtDestTask, self).__init__(context, instance) self.migration = migration self.source_cell_instance = source_cell_instance self.compute_rpcapi = compute_rpcapi self.target_cell_mapping = target_cell_mapping self.snapshot_id = snapshot_id self.request_spec = request_spec def _finish_snapshot_based_resize_at_dest(self): """Synchronously RPC calls finish_snapshot_based_resize_at_dest If the finish_snapshot_based_resize_at_dest method fails in the compute service, this method will update the source cell instance data to reflect the error (vm_state='error', copy the fault and instance action events for that compute method). """ LOG.debug('Finishing cross-cell resize at the destination host %s', self.migration.dest_compute, instance=self.instance) # prep_snapshot_based_resize_at_source in the source cell would have # changed the source cell instance.task_state to resize_migrated and # we need to reflect that in the target cell instance before calling # the destination compute. self.instance.task_state = task_states.RESIZE_MIGRATED self.instance.save() event_name = 'compute_finish_snapshot_based_resize_at_dest' source_cell_context = self.source_cell_instance._context try: with compute_utils.EventReporter( source_cell_context, event_name, self.migration.dest_compute, self.instance.uuid): self.compute_rpcapi.finish_snapshot_based_resize_at_dest( self.context, self.instance, self.migration, self.snapshot_id, self.request_spec) # finish_snapshot_based_resize_at_dest updates the target cell # instance so we need to refresh it here to have the latest copy. self.instance.refresh() except Exception: # We need to mimic the error handlers on # finish_snapshot_based_resize_at_dest in the destination compute # service so those changes are reflected in the source cell # instance. with excutils.save_and_reraise_exception(logger=LOG): # reverts_task_state and _error_out_instance_on_exception: self.source_cell_instance.task_state = None self.source_cell_instance.vm_state = vm_states.ERROR self.source_cell_instance.save() # wrap_instance_fault (this is best effort) self._copy_latest_fault(source_cell_context) def _copy_latest_fault(self, source_cell_context): """Copies the latest instance fault from the target cell to the source :param source_cell_context: nova auth request context targeted at the source cell """ try: # Get the latest fault from the target cell database. fault = objects.InstanceFault.get_latest_for_instance( self.context, self.instance.uuid) if fault: fault_clone = clone_creatable_object(source_cell_context, fault) fault_clone.create() except Exception: LOG.exception( 'Failed to copy instance fault from target cell DB', instance=self.instance) def _update_instance_mapping(self): """Swaps the hidden field value on the source and target cell instance and updates the instance mapping to point at the target cell. """ LOG.debug('Marking instance in source cell as hidden and updating ' 'instance mapping to point at target cell %s.', self.target_cell_mapping.identity, instance=self.instance) # Get the instance mapping first to make the window of time where both # instances are hidden=False as small as possible. instance_mapping = objects.InstanceMapping.get_by_instance_uuid( self.context, self.instance.uuid) # Mark the target cell instance record as hidden=False so it will show # up when listing servers. Note that because of how the API filters # duplicate instance records, even if the user is listing servers at # this exact moment only one copy of the instance will be returned. self.instance.hidden = False self.instance.save() # Update the instance mapping to point at the target cell. This is so # that the confirm/revert actions will be performed on the resized # instance in the target cell rather than the destroyed guest in the # source cell. Note that we could do this before finishing the resize # on the dest host, but it makes sense to defer this until the # instance is successfully resized in the dest host because if that # fails, we want to be able to rebuild in the source cell to recover # the instance. instance_mapping.cell_mapping = self.target_cell_mapping # If this fails the cascading task failures should delete the instance # in the target cell database so we do not need to hide it again. instance_mapping.save() # Mark the source cell instance record as hidden=True to hide it from # the user when listing servers. self.source_cell_instance.hidden = True self.source_cell_instance.save() def _execute(self): # Finish the resize on the destination host in the target cell. self._finish_snapshot_based_resize_at_dest() # Do the instance.hidden/instance_mapping.cell_mapping swap. self._update_instance_mapping() def rollback(self, ex): # The method executed in this task are self-contained for rollbacks. pass class CrossCellMigrationTask(base.TaskBase): """Orchestrates a cross-cell cold migration (resize).""" def __init__(self, context, instance, flavor, request_spec, source_migration, compute_rpcapi, host_selection, alternate_hosts): """Construct the CrossCellMigrationTask instance :param context: The user request auth context. This should be targeted to the source cell in which the instance is currently running. :param instance: The instance being migrated (from the source cell) :param flavor: The new flavor if performing resize and not just a cold migration :param request_spec: nova.objects.RequestSpec with scheduling details :param source_migration: nova.objects.Migration record for this operation (from the source cell) :param compute_rpcapi: instance of nova.compute.rpcapi.ComputeAPI :param host_selection: nova.objects.Selection of the initial selected target host from the scheduler where the selected host is in another cell which is different from the cell in which the instance is currently running :param alternate_hosts: list of 0 or more nova.objects.Selection objects representing alternate hosts within the same target cell as ``host_selection``. """ super(CrossCellMigrationTask, self).__init__(context, instance) self.request_spec = request_spec self.flavor = flavor self.source_migration = source_migration self.compute_rpcapi = compute_rpcapi self.host_selection = host_selection self.alternate_hosts = alternate_hosts self._target_cell_instance = None self._target_cell_context = None self.network_api = neutron.API() self.volume_api = cinder.API() self.image_api = glance.API() # Keep an ordered dict of the sub-tasks completed so we can call their # rollback routines if something fails. self._completed_tasks = collections.OrderedDict() def _get_target_cell_mapping(self): """Get the target host CellMapping for the selected host :returns: nova.objects.CellMapping for the cell of the selected target host :raises: nova.exception.CellMappingNotFound if the cell mapping for the selected target host cannot be found (this should not happen if the scheduler just selected it) """ return objects.CellMapping.get_by_uuid( self.context, self.host_selection.cell_uuid) def _setup_target_cell_db(self): """Creates the instance and its related records in the target cell Upon successful completion the self._target_cell_context and self._target_cell_instance variables are set. :returns: A 2-item tuple of: - The active Migration object from the target cell DB - The CellMapping for the target cell """ LOG.debug('Setting up the target cell database for the instance and ' 'its related records.', instance=self.instance) target_cell_mapping = self._get_target_cell_mapping() # Clone the context targeted at the source cell and then target the # clone at the target cell. self._target_cell_context = copy.copy(self.context) nova_context.set_target_cell( self._target_cell_context, target_cell_mapping) task = TargetDBSetupTask( self.context, self.instance, self.source_migration, self._target_cell_context) self._target_cell_instance, target_cell_migration = task.execute() self._completed_tasks['TargetDBSetupTask'] = task return target_cell_migration, target_cell_mapping def _perform_external_api_checks(self): """Performs checks on external service APIs for support. * Checks that the neutron port binding-extended API is available :raises: MigrationPreCheckError if any checks fail """ LOG.debug('Making sure neutron is new enough for cross-cell resize.') # Check that the port binding-extended API extension is available in # neutron because if it's not we can just fail fast. if not self.network_api.has_port_binding_extension(self.context): raise exception.MigrationPreCheckError( reason=_("Required networking service API extension '%s' " "not found.") % neutron_constants.PORT_BINDING_EXTENDED) def _prep_resize_at_dest(self, target_cell_migration): """Executes PrepResizeAtDestTask and updates the source migration. :param target_cell_migration: Migration record from the target cell DB :returns: Refreshed Migration record from the target cell DB after the resize_claim on the destination host has updated the record. """ # TODO(mriedem): Check alternates if the primary selected host fails; # note that alternates are always in the same cell as the selected host # so if the primary fails pre-checks, the alternates may also fail. We # could reschedule but the scheduler does not yet have an ignore_cells # capability like ignore_hosts. # We set the target cell instance new_flavor attribute now since the # ResourceTracker.resize_claim on the destination host uses it. self._target_cell_instance.new_flavor = self.flavor verify_task = PrepResizeAtDestTask( self._target_cell_context, self._target_cell_instance, self.flavor, target_cell_migration, self.request_spec, self.compute_rpcapi, self.host_selection, self.network_api, self.volume_api) target_cell_migration_context = verify_task.execute() self._completed_tasks['PrepResizeAtDestTask'] = verify_task # Stash the old vm_state so we can set the resized/reverted instance # back to the same state later, i.e. if STOPPED do not power on the # guest. self._target_cell_instance.system_metadata['old_vm_state'] = ( self._target_cell_instance.vm_state) # Update the target cell instance availability zone now that we have # prepared the resize on the destination host. We do this in conductor # to avoid the "up-call" from the compute service to the API database. self._target_cell_instance.availability_zone = ( availability_zones.get_host_availability_zone( self.context, self.host_selection.service_host)) self._target_cell_instance.save() # We need to mirror the MigrationContext, created in the target cell # database, into the source cell database. Keep in mind that the # MigrationContext has pci_devices and a migration_id in it which # are specific to the target cell database. The only one we care about # correcting for the source cell database is migration_id since that # is used to route neutron external events to the source and target # hosts. self.instance.migration_context = ( target_cell_migration_context.obj_clone()) self.instance.migration_context.migration_id = self.source_migration.id self.instance.save() return self._update_migration_from_dest_after_claim( target_cell_migration) def _update_migration_from_dest_after_claim(self, target_cell_migration): """Update the source cell migration record with target cell info. The PrepResizeAtDestTask runs a resize_claim on the target compute host service in the target cell which sets fields about the destination in the migration record in the target cell. We need to reflect those changes back into the migration record in the source cell. :param target_cell_migration: Migration record from the target cell DB :returns: Refreshed Migration record from the target cell DB after the resize_claim on the destination host has updated the record. """ # Copy information about the dest compute that was set on the dest # migration record during the resize claim on the dest host. # We have to get a fresh copy of the target cell migration record to # pick up the changes made in the dest compute service. target_cell_migration = objects.Migration.get_by_uuid( self._target_cell_context, target_cell_migration.uuid) self.source_migration.dest_compute = target_cell_migration.dest_compute self.source_migration.dest_node = target_cell_migration.dest_node self.source_migration.dest_host = target_cell_migration.dest_host self.source_migration.save() return target_cell_migration def _prep_resize_at_source(self): """Executes PrepResizeAtSourceTask :return: The image snapshot ID if the instance is not volume-backed, else None. """ LOG.debug('Preparing source host %s for cross-cell resize.', self.source_migration.source_compute, instance=self.instance) prep_source_task = PrepResizeAtSourceTask( self.context, self.instance, self.flavor, self.source_migration, self.request_spec, self.compute_rpcapi, self.image_api) snapshot_id = prep_source_task.execute() self._completed_tasks['PrepResizeAtSourceTask'] = prep_source_task return snapshot_id def _finish_resize_at_dest( self, target_cell_migration, target_cell_mapping, snapshot_id): """Executes FinishResizeAtDestTask :param target_cell_migration: Migration object from the target cell DB :param target_cell_mapping: CellMapping object for the target cell :param snapshot_id: ID of the image snapshot to use for a non-volume-backed instance. """ task = FinishResizeAtDestTask( self._target_cell_context, self._target_cell_instance, target_cell_migration, self.instance, self.compute_rpcapi, target_cell_mapping, snapshot_id, self.request_spec) task.execute() self._completed_tasks['FinishResizeAtDestTask'] = task def _execute(self): """Execute high-level orchestration of the cross-cell resize""" # We are committed to a cross-cell move at this point so update the # migration record to reflect that. If we fail after this we are not # going to go back and try to run the MigrationTask to do a same-cell # migration, so we set the cross_cell_move flag early for audit/debug # in case something fails later and the operator wants to know if this # was a cross-cell or same-cell move operation. self.source_migration.cross_cell_move = True self.source_migration.save() # Make sure neutron APIs we need are available. self._perform_external_api_checks() # Before preparing the target host create the instance record data # in the target cell database since we cannot do anything in the # target cell without having an instance record there. Remember that # we lose the cell-targeting on the request context over RPC so we # cannot simply pass the source cell context and instance over RPC # to the target compute host and assume changes get mirrored back to # the source cell database. target_cell_migration, target_cell_mapping = ( self._setup_target_cell_db()) # Claim resources and validate the selected host in the target cell. target_cell_migration = self._prep_resize_at_dest( target_cell_migration) # Prepare the instance at the source host (stop it, optionally snapshot # it, disconnect volumes and VIFs, etc). snapshot_id = self._prep_resize_at_source() # Finish the resize at the destination host, swap the hidden fields # on the instances and update the instance mapping. self._finish_resize_at_dest( target_cell_migration, target_cell_mapping, snapshot_id) def rollback(self, ex): """Rollback based on how sub-tasks completed Sub-tasks should rollback appropriately for whatever they do but here we need to handle cleaning anything up from successful tasks, e.g. if tasks A and B were successful but task C fails, then we might need to cleanup changes from A and B here. """ # Rollback the completed tasks in reverse order. for task_name in reversed(self._completed_tasks): try: self._completed_tasks[task_name].rollback(ex) except Exception: LOG.exception('Rollback for task %s failed.', task_name) def get_inst_and_cell_map_from_source( target_cell_context, source_compute, instance_uuid): """Queries the instance from the source cell database. :param target_cell_context: nova auth request context targeted at the target cell database :param source_compute: name of the source compute service host :param instance_uuid: UUID of the instance :returns: 2-item tuple of: - Instance object from the source cell database. - CellMapping object of the source cell mapping """ # We can get the source cell via the host mapping based on the # source_compute in the migration object. source_host_mapping = objects.HostMapping.get_by_host( target_cell_context, source_compute) source_cell_mapping = source_host_mapping.cell_mapping # Clone the context targeted at the target cell and then target the # clone at the source cell. source_cell_context = copy.copy(target_cell_context) nova_context.set_target_cell(source_cell_context, source_cell_mapping) # Now get the instance from the source cell DB using the source # cell context which will make the source cell instance permanently # targeted to the source cell database. instance = objects.Instance.get_by_uuid( source_cell_context, instance_uuid, expected_attrs=['flavor', 'info_cache', 'system_metadata']) return instance, source_cell_mapping class ConfirmResizeTask(base.TaskBase): """Task which orchestrates a cross-cell resize confirm operation When confirming a cross-cell resize, the instance is in both the source and target cell databases and on the source and target compute hosts. The API operation is performed on the target cell instance and it is the job of this task to cleanup the source cell host and database and update the status of the instance in the target cell. This can be called either asynchronously from the API service during a normal confirmResize server action or synchronously when deleting a server in VERIFY_RESIZE status. """ def __init__(self, context, instance, migration, legacy_notifier, compute_rpcapi): """Initialize this ConfirmResizeTask instance :param context: nova auth request context targeted at the target cell :param instance: Instance object in "resized" status from the target cell :param migration: Migration object from the target cell for the resize operation expected to have status "confirming" :param legacy_notifier: LegacyValidatingNotifier for sending legacy unversioned notifications :param compute_rpcapi: instance of nova.compute.rpcapi.ComputeAPI """ super(ConfirmResizeTask, self).__init__(context, instance) self.migration = migration self.legacy_notifier = legacy_notifier self.compute_rpcapi = compute_rpcapi def _send_resize_confirm_notification(self, instance, phase): """Sends an unversioned and versioned resize.confirm.(phase) notification. :param instance: The instance whose resize is being confirmed. :param phase: The phase for the resize.confirm operation (either "start" or "end"). """ ctxt = instance._context # Send the legacy unversioned notification. compute_utils.notify_about_instance_usage( self.legacy_notifier, ctxt, instance, 'resize.confirm.%s' % phase) # Send the versioned notification. compute_utils.notify_about_instance_action( ctxt, instance, CONF.host, action=fields.NotificationAction.RESIZE_CONFIRM, phase=phase) def _cleanup_source_host(self, source_instance): """Cleans up the instance from the source host. Creates a confirmResize instance action in the source cell DB. Destroys the guest from the source hypervisor, cleans up networking and storage and frees up resource usage on the source host. :param source_instance: Instance object from the source cell DB """ ctxt = source_instance._context # The confirmResize instance action has to be created in the source # cell database before calling the compute service to properly # track action events. Note that the API created the same action # record but on the target cell instance. objects.InstanceAction.action_start( ctxt, source_instance.uuid, instance_actions.CONFIRM_RESIZE, want_result=False) # Get the Migration record from the source cell database. source_migration = objects.Migration.get_by_uuid( ctxt, self.migration.uuid) LOG.debug('Cleaning up source host %s for cross-cell resize confirm.', source_migration.source_compute, instance=source_instance) # Use the EventReport context manager to create the same event that # the source compute will create but in the target cell DB so we do not # have to explicitly copy it over from source to target DB. event_name = 'compute_confirm_snapshot_based_resize_at_source' with compute_utils.EventReporter( self.context, event_name, source_migration.source_compute, source_instance.uuid): self.compute_rpcapi.confirm_snapshot_based_resize_at_source( ctxt, source_instance, source_migration) def _finish_confirm_in_target_cell(self): """Sets "terminal" states on the migration and instance in target cell. This is similar to how ``confirm_resize`` works in the compute service for same-cell resize. """ LOG.debug('Updating migration and instance status in target cell DB.', instance=self.instance) # Update the target cell migration. self.migration.status = 'confirmed' self.migration.save() # Update the target cell instance. # Delete stashed information for the resize. self.instance.old_flavor = None self.instance.new_flavor = None self.instance.system_metadata.pop('old_vm_state', None) self._set_vm_and_task_state() self.instance.drop_migration_context() # There are multiple possible task_states set on the instance because # if we are called from the confirmResize instance action the # task_state should be None, but if we are called from # _confirm_resize_on_deleting then the instance is being deleted. self.instance.save(expected_task_state=[ None, task_states.DELETING, task_states.SOFT_DELETING]) def _set_vm_and_task_state(self): """Sets the target cell instance vm_state based on the power_state. The task_state is set to None. """ # The old_vm_state could be STOPPED but the user might have manually # powered up the instance to confirm the resize/migrate, so we need to # check the current power state on the instance and set the vm_state # appropriately. We default to ACTIVE because if the power state is # not SHUTDOWN, we assume the _sync_power_states periodic task in the # compute service will clean it up. p_state = self.instance.power_state if p_state == power_state.SHUTDOWN: vm_state = vm_states.STOPPED LOG.debug("Resized/migrated instance is powered off. " "Setting vm_state to '%s'.", vm_state, instance=self.instance) else: vm_state = vm_states.ACTIVE self.instance.vm_state = vm_state self.instance.task_state = None def _execute(self): # First get the instance from the source cell so we can cleanup. source_cell_instance = get_inst_and_cell_map_from_source( self.context, self.migration.source_compute, self.instance.uuid)[0] # Send the resize.confirm.start notification(s) using the source # cell instance since we start there. self._send_resize_confirm_notification( source_cell_instance, fields.NotificationPhase.START) # RPC call the source compute to cleanup. self._cleanup_source_host(source_cell_instance) # Now we can delete the instance in the source cell database. LOG.info('Deleting instance record from source cell %s', source_cell_instance._context.cell_uuid, instance=source_cell_instance) # This needs to be a hard delete because we want to be able to resize # back to this cell without hitting a duplicate entry unique constraint # error. source_cell_instance.destroy(hard_delete=True) # Update the information in the target cell database. self._finish_confirm_in_target_cell() # Send the resize.confirm.end notification using the target cell # instance since we end there. self._send_resize_confirm_notification( self.instance, fields.NotificationPhase.END) def rollback(self, ex): with excutils.save_and_reraise_exception(): LOG.exception( 'An error occurred while confirming the resize for instance ' 'in target cell %s. Depending on the error, a copy of the ' 'instance may still exist in the source cell database which ' 'contains the source host %s. At this point the instance is ' 'on the target host %s and anything left in the source cell ' 'can be cleaned up.', self.context.cell_uuid, self.migration.source_compute, self.migration.dest_compute, instance=self.instance) # If anything failed set the migration status to 'error'. self.migration.status = 'error' self.migration.save() # Put the instance in the target DB into ERROR status, record # a fault and send an error notification. updates = {'vm_state': vm_states.ERROR, 'task_state': None} request_spec = objects.RequestSpec.get_by_instance_uuid( self.context, self.instance.uuid) scheduler_utils.set_vm_state_and_notify( self.context, self.instance.uuid, 'compute_task', 'migrate_server', updates, ex, request_spec) class RevertResizeTask(base.TaskBase): """Task to orchestrate a cross-cell resize revert operation. This task is responsible for coordinating the cleanup of the resources in the target cell and restoring the server and its related resources (e.g. networking and volumes) in the source cell. Upon successful completion the instance mapping should point back at the source cell, the source cell instance should no longer be hidden and the instance in the target cell should be destroyed. """ def __init__(self, context, instance, migration, legacy_notifier, compute_rpcapi): """Initialize this RevertResizeTask instance :param context: nova auth request context targeted at the target cell :param instance: Instance object in "resized" status from the target cell with task_state "resize_reverting" :param migration: Migration object from the target cell for the resize operation expected to have status "reverting" :param legacy_notifier: LegacyValidatingNotifier for sending legacy unversioned notifications :param compute_rpcapi: instance of nova.compute.rpcapi.ComputeAPI """ super(RevertResizeTask, self).__init__(context, instance) self.migration = migration self.legacy_notifier = legacy_notifier self.compute_rpcapi = compute_rpcapi # These are used for rollback handling. self._source_cell_migration = None self._source_cell_instance = None self.volume_api = cinder.API() def _send_resize_revert_notification(self, instance, phase): """Sends an unversioned and versioned resize.revert.(phase) notification. :param instance: The instance whose resize is being reverted. :param phase: The phase for the resize.revert operation (either "start" or "end"). """ ctxt = instance._context # Send the legacy unversioned notification. compute_utils.notify_about_instance_usage( self.legacy_notifier, ctxt, instance, 'resize.revert.%s' % phase) # Send the versioned notification. compute_utils.notify_about_instance_action( ctxt, instance, CONF.host, action=fields.NotificationAction.RESIZE_REVERT, phase=phase) @staticmethod def _update_source_obj_from_target_cell(source_obj, target_obj): """Updates the object from the source cell using the target cell object WARNING: This method does not support objects with nested objects, i.e. objects that have fields which are other objects. An error will be raised in that case. All fields on the source object are updated from the target object except for the ``id`` and ``created_at`` fields since those value must not change during an update. The ``updated_at`` field is also skipped because saving changes to ``source_obj`` will automatically update the ``updated_at`` field. It is expected that the two objects represent the same thing but from different cell databases, so for example, a uuid field (if one exists) should not change. Note that the changes to ``source_obj`` are not persisted in this method. :param source_obj: Versioned object from the source cell database :param target_obj: Versioned object from the target cell database :raises: ObjectActionError if nested object fields are encountered """ ignore_fields = ['created_at', 'id', 'updated_at'] for field in source_obj.obj_fields: if field in target_obj and field not in ignore_fields: if isinstance(source_obj.fields[field], fields.ObjectField): raise exception.ObjectActionError( action='_update_source_obj_from_target_cell', reason='nested objects are not supported') setattr(source_obj, field, getattr(target_obj, field)) def _update_bdms_in_source_cell(self, source_cell_context): """Update BlockDeviceMappings in the source cell database. It is possible to attach/detach volumes to/from a resized instance, which would create/delete BDM records in the target cell, so we have to recreate newly attached BDMs in the source cell database and delete any old BDMs that were detached while resized in the target cell. :param source_cell_context: nova auth request context targeted at the source cell database """ bdms_from_source_cell = ( objects.BlockDeviceMappingList.get_by_instance_uuid( source_cell_context, self.instance.uuid)) source_cell_bdms_by_uuid = { bdm.uuid: bdm for bdm in bdms_from_source_cell} bdms_from_target_cell = ( objects.BlockDeviceMappingList.get_by_instance_uuid( self.context, self.instance.uuid)) # Copy new/updated BDMs from the target cell DB to the source cell DB. for bdm in bdms_from_target_cell: if bdm.uuid in source_cell_bdms_by_uuid: # Remove this BDM from the list since we want to preserve it # along with its attachment_id. source_cell_bdms_by_uuid.pop(bdm.uuid) else: # Newly attached BDM while in the target cell, so create it # in the source cell. source_bdm = clone_creatable_object(source_cell_context, bdm) # revert_snapshot_based_resize_at_dest is going to delete the # attachment for this BDM so we need to create a new empty # attachment to reserve this volume so that # finish_revert_snapshot_based_resize_at_source can use it. attach_ref = self.volume_api.attachment_create( source_cell_context, bdm.volume_id, self.instance.uuid) source_bdm.attachment_id = attach_ref['id'] LOG.debug('Creating BlockDeviceMapping with volume ID %s ' 'and attachment %s in the source cell database ' 'since the volume was attached while the server was ' 'resized.', bdm.volume_id, attach_ref['id'], instance=self.instance) source_bdm.create() # If there are any source bdms left that were not processed from the # target cell bdms, it means those source bdms were detached while # resized in the target cell, and we need to delete them from the # source cell so they don't re-appear once the revert is complete. self._delete_orphan_source_cell_bdms(source_cell_bdms_by_uuid.values()) def _delete_orphan_source_cell_bdms(self, source_cell_bdms): """Deletes orphaned BDMs and volume attachments from the source cell. If any volumes were detached while the server was resized into the target cell they are destroyed here so they do not show up again once the instance is mapped back to the source cell. :param source_cell_bdms: Iterator of BlockDeviceMapping objects. """ for bdm in source_cell_bdms: LOG.debug('Destroying BlockDeviceMapping with volume ID %s and ' 'attachment ID %s from source cell database during ' 'cross-cell resize revert since the volume was detached ' 'while the server was resized.', bdm.volume_id, bdm.attachment_id, instance=self.instance) # First delete the (empty) attachment, created by # prep_snapshot_based_resize_at_source, so it is not leaked. try: self.volume_api.attachment_delete( bdm._context, bdm.attachment_id) except Exception as e: LOG.error('Failed to delete attachment %s for volume %s. The ' 'attachment may be leaked and needs to be manually ' 'cleaned up. Error: %s', bdm.attachment_id, bdm.volume_id, e, instance=self.instance) bdm.destroy() def _update_instance_actions_in_source_cell(self, source_cell_context): """Update instance action records in the source cell database We need to copy the REVERT_RESIZE instance action and related events from the target cell to the source cell. Otherwise the revert operation in the source compute service will not be able to lookup the correct instance action to track events. :param source_cell_context: nova auth request context targeted at the source cell database """ # FIXME(mriedem): This is a hack to just get revert working on # the source; we need to re-create any actions created in the target # cell DB after the instance was moved while it was in # VERIFY_RESIZE status, like if volumes were attached/detached. # Can we use a changes-since filter for that, i.e. find the last # instance action for the instance in the source cell database and then # get all instance actions from the target cell database that were # created after that time. action = objects.InstanceAction.get_by_request_id( self.context, self.instance.uuid, self.context.request_id) new_action = clone_creatable_object(source_cell_context, action) new_action.create() # Also create the events under this action. events = objects.InstanceActionEventList.get_by_action( self.context, action.id) for event in events: new_event = clone_creatable_object(source_cell_context, event) new_event.create(action.instance_uuid, action.request_id) def _update_migration_in_source_cell(self, source_cell_context): """Update the migration record in the source cell database. Updates the migration record in the source cell database based on the current information about the migration in the target cell database. :param source_cell_context: nova auth request context targeted at the source cell database :return: Migration object of the updated source cell database migration record """ source_cell_migration = objects.Migration.get_by_uuid( source_cell_context, self.migration.uuid) # The only change we really expect here is the status changing to # "reverting". self._update_source_obj_from_target_cell( source_cell_migration, self.migration) source_cell_migration.save() return source_cell_migration def _update_instance_in_source_cell(self, instance): """Updates the instance and related records in the source cell DB. Before reverting in the source cell we need to copy the latest state information from the target cell database where the instance lived before the revert. This is because data about the instance could have changed while it was in VERIFY_RESIZE status, like attached volumes. :param instance: Instance object from the source cell database :return: Migration object of the updated source cell database migration record """ LOG.debug('Updating instance-related records in the source cell ' 'database based on target cell database information.', instance=instance) # Copy information from the target cell instance that we need in the # source cell instance for doing the revert on the source compute host. instance.system_metadata['old_vm_state'] = ( self.instance.system_metadata.get('old_vm_state')) instance.task_state = task_states.RESIZE_REVERTING instance.save() source_cell_context = instance._context self._update_bdms_in_source_cell(source_cell_context) self._update_instance_actions_in_source_cell(source_cell_context) source_cell_migration = self._update_migration_in_source_cell( source_cell_context) # NOTE(mriedem): We do not have to worry about ports changing while # resized since the API does not allow attach/detach interface while # resized. Same for tags. return source_cell_migration def _update_instance_mapping( self, source_cell_instance, source_cell_mapping): """Swaps the hidden field value on the source and target cell instance and updates the instance mapping to point at the source cell. :param source_cell_instance: Instance object from the source cell DB :param source_cell_mapping: CellMapping object for the source cell """ LOG.debug('Marking instance in target cell as hidden and updating ' 'instance mapping to point at source cell %s.', source_cell_mapping.identity, instance=source_cell_instance) # Get the instance mapping first to make the window of time where both # instances are hidden=False as small as possible. instance_mapping = objects.InstanceMapping.get_by_instance_uuid( self.context, self.instance.uuid) # Mark the source cell instance record as hidden=False so it will show # up when listing servers. Note that because of how the API filters # duplicate instance records, even if the user is listing servers at # this exact moment only one copy of the instance will be returned. source_cell_instance.hidden = False source_cell_instance.save() # Update the instance mapping to point at the source cell. We do this # before cleaning up the target host/cell because that is really best # effort and if something fails on the target we want the user to # now interact with the instance in the source cell with the original # flavor because they are ultimately trying to revert and get back # there, so if they hard reboot/rebuild after an error (for example) # that should happen in the source cell. instance_mapping.cell_mapping = source_cell_mapping instance_mapping.save() # Mark the target cell instance record as hidden=True to hide it from # the user when listing servers. self.instance.hidden = True self.instance.save() def _execute(self): # Send the resize.revert.start notification(s) using the target # cell instance since we start there. self._send_resize_revert_notification( self.instance, fields.NotificationPhase.START) source_cell_instance, source_cell_mapping = ( get_inst_and_cell_map_from_source( self.context, self.migration.source_compute, self.instance.uuid)) self._source_cell_instance = source_cell_instance # Update the source cell database information based on the target cell # database, i.e. the instance/migration/BDMs/action records. Do all of # this before updating the instance mapping in case it fails. source_cell_migration = self._update_instance_in_source_cell( source_cell_instance) # Swap the instance.hidden values and update the instance mapping to # point at the source cell. From here on out the user will see and # operate on the instance in the source cell. self._update_instance_mapping( source_cell_instance, source_cell_mapping) # Save off the source cell migration record for rollbacks. self._source_cell_migration = source_cell_migration # Clean the instance from the target host. LOG.debug('Calling destination host %s to revert cross-cell resize.', self.migration.dest_compute, instance=self.instance) # Use the EventReport context manager to create the same event that # the dest compute will create but in the source cell DB so we do not # have to explicitly copy it over from target to source DB. event_name = 'compute_revert_snapshot_based_resize_at_dest' with compute_utils.EventReporter( source_cell_instance._context, event_name, self.migration.dest_compute, self.instance.uuid): self.compute_rpcapi.revert_snapshot_based_resize_at_dest( self.context, self.instance, self.migration) # NOTE(mriedem): revert_snapshot_based_resize_at_dest updates the # target cell instance so if we need to do something with it here # in the future before destroying it, it should be refreshed. # Destroy the instance and its related records from the target cell DB. LOG.info('Deleting instance record from target cell %s', self.context.cell_uuid, instance=source_cell_instance) # This needs to be a hard delete because if we retry the resize to the # target cell we could hit a duplicate entry unique constraint error. self.instance.destroy(hard_delete=True) # Launch the guest at the source host with the old flavor. LOG.debug('Calling source host %s to finish reverting cross-cell ' 'resize.', self.migration.source_compute, instance=self.instance) self.compute_rpcapi.finish_revert_snapshot_based_resize_at_source( source_cell_instance._context, source_cell_instance, source_cell_migration) # finish_revert_snapshot_based_resize_at_source updates the source cell # instance so refresh it here so we have the latest copy. source_cell_instance.refresh() # Finish the conductor_revert_snapshot_based_resize event in the source # cell DB. ComputeTaskManager.revert_snapshot_based_resize uses the # wrap_instance_event decorator to create this action/event in the # target cell DB but now that the target cell instance is gone the # event needs to show up in the source cell DB. objects.InstanceActionEvent.event_finish( source_cell_instance._context, source_cell_instance.uuid, 'conductor_revert_snapshot_based_resize', want_result=False) # Send the resize.revert.end notification using the instance from # the source cell since we end there. self._send_resize_revert_notification( source_cell_instance, fields.NotificationPhase.END) def rollback(self, ex): with excutils.save_and_reraise_exception(): # If we have updated the instance mapping to point at the source # cell we update the records in the source cell, otherwise we # update the records in the target cell. instance_at_source = self._source_cell_migration is not None migration = self._source_cell_migration or self.migration instance = self._source_cell_instance or self.instance # NOTE(mriedem): This exception log is fairly generic. We could # probably make this more targeted based on what we know of the # state of the system if we want to make it more detailed, e.g. # the execute method could "record" checkpoints to be used here # or we could check to see if the instance was deleted from the # target cell by trying to refresh it and handle InstanceNotFound. LOG.exception( 'An error occurred while reverting the resize for instance. ' 'The instance is mapped to the %s cell %s. If the instance ' 'was deleted from the target cell %s then the target host %s ' 'was already cleaned up. If the instance is back in the ' 'source cell then you can try hard-rebooting it to recover.', ('source' if instance_at_source else 'target'), migration._context.cell_uuid, self.context.cell_uuid, migration.dest_compute, instance=instance) # If anything failed set the migration status to 'error'. migration.status = 'error' migration.save() # Put the instance into ERROR status, record a fault and send an # error notification. updates = {'vm_state': vm_states.ERROR, 'task_state': None} request_spec = objects.RequestSpec.get_by_instance_uuid( self.context, instance.uuid) scheduler_utils.set_vm_state_and_notify( instance._context, instance.uuid, 'compute_task', 'migrate_server', updates, ex, request_spec) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conductor/tasks/live_migrate.py0000664000175000017500000007152700000000000021707 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging import oslo_messaging as messaging from oslo_utils import excutils from nova import availability_zones from nova.compute import power_state from nova.compute import utils as compute_utils from nova.conductor.tasks import base from nova.conductor.tasks import migrate import nova.conf from nova import exception from nova.i18n import _ from nova.network import neutron from nova import objects from nova.objects import fields as obj_fields from nova.scheduler import utils as scheduler_utils LOG = logging.getLogger(__name__) CONF = nova.conf.CONF def supports_vif_related_pci_allocations(context, host): """Checks if the compute host service is new enough to support VIF related PCI allocation during live migration :param context: The user request context. :param host: The nova-compute host to check. :returns: True if the compute host is new enough to support vif related PCI allocations """ svc = objects.Service.get_by_host_and_binary(context, host, 'nova-compute') return svc.version >= 36 def supports_vpmem_live_migration(context): """Checks if the compute host service is new enough to support instance live migration with virtual persistent memory. :param context: The user request context. :returns: True if the compute hosts are new enough to support live migration with vpmem """ return objects.Service.get_minimum_version(context, 'nova-compute') >= 51 class LiveMigrationTask(base.TaskBase): def __init__(self, context, instance, destination, block_migration, disk_over_commit, migration, compute_rpcapi, servicegroup_api, query_client, report_client, request_spec=None): super(LiveMigrationTask, self).__init__(context, instance) self.destination = destination self.block_migration = block_migration self.disk_over_commit = disk_over_commit self.migration = migration self.source = instance.host self.migrate_data = None self.limits = None self.compute_rpcapi = compute_rpcapi self.servicegroup_api = servicegroup_api self.query_client = query_client self.report_client = report_client self.request_spec = request_spec self._source_cn = None self._held_allocations = None self.network_api = neutron.API() def _execute(self): self._check_instance_is_active() self._check_instance_has_no_numa() self._check_host_is_up(self.source) self._source_cn, self._held_allocations = ( # NOTE(danms): This may raise various exceptions, which will # propagate to the API and cause a 500. This is what we # want, as it would indicate internal data structure corruption # (such as missing migrations, compute nodes, etc). migrate.replace_allocation_with_migration(self.context, self.instance, self.migration)) if not self.destination: # Either no host was specified in the API request and the user # wants the scheduler to pick a destination host, or a host was # specified but is not forcing it, so they want the scheduler # filters to run on the specified host, like a scheduler hint. self.destination, dest_node, self.limits = self._find_destination() else: # This is the case that the user specified the 'force' flag when # live migrating with a specific destination host so the scheduler # is bypassed. There are still some minimal checks performed here # though. self._check_destination_is_not_source() self._check_host_is_up(self.destination) self._check_destination_has_enough_memory() source_node, dest_node = ( self._check_compatible_with_source_hypervisor( self.destination)) # TODO(mriedem): Call select_destinations() with a # skip_filters=True flag so the scheduler does the work of claiming # resources on the destination in Placement but still bypass the # scheduler filters, which honors the 'force' flag in the API. # This raises NoValidHost which will be handled in # ComputeTaskManager. # NOTE(gibi): consumer_generation = None as we expect that the # source host allocation is held by the migration therefore the # instance is a new, empty consumer for the dest allocation. If # this assumption fails then placement will return consumer # generation conflict and this call raise a AllocationUpdateFailed # exception. We let that propagate here to abort the migration. # NOTE(luyao): When forcing the target host we don't call the # scheduler, that means we need to get allocations from placement # first, then claim resources in resource tracker on the # destination host based on these allocations. scheduler_utils.claim_resources_on_destination( self.context, self.report_client, self.instance, source_node, dest_node, source_allocations=self._held_allocations, consumer_generation=None) try: self._check_requested_destination() except Exception: with excutils.save_and_reraise_exception(): self._remove_host_allocations(dest_node.uuid) # dest_node is a ComputeNode object, so we need to get the actual # node name off it to set in the Migration object below. dest_node = dest_node.hypervisor_hostname self.instance.availability_zone = ( availability_zones.get_host_availability_zone( self.context, self.destination)) self.migration.source_node = self.instance.node self.migration.dest_node = dest_node self.migration.dest_compute = self.destination self.migration.save() # TODO(johngarbutt) need to move complexity out of compute manager # TODO(johngarbutt) disk_over_commit? return self.compute_rpcapi.live_migration(self.context, host=self.source, instance=self.instance, dest=self.destination, block_migration=self.block_migration, migration=self.migration, migrate_data=self.migrate_data) def rollback(self, ex): # TODO(johngarbutt) need to implement the clean up operation # but this will make sense only once we pull in the compute # calls, since this class currently makes no state changes, # except to call the compute method, that has no matching # rollback call right now. if self._held_allocations: migrate.revert_allocation_for_migration(self.context, self._source_cn, self.instance, self.migration) def _check_instance_is_active(self): if self.instance.power_state not in (power_state.RUNNING, power_state.PAUSED): raise exception.InstanceInvalidState( instance_uuid=self.instance.uuid, attr='power_state', state=power_state.STATE_MAP[self.instance.power_state], method='live migrate') def _check_instance_has_no_numa(self): """Prevent live migrations of instances with NUMA topologies. TODO(artom) Remove this check in compute RPC 6.0. """ if not self.instance.numa_topology: return # Only KVM (libvirt) supports NUMA topologies with CPU pinning; # HyperV's vNUMA feature doesn't allow specific pinning hypervisor_type = objects.ComputeNode.get_by_host_and_nodename( self.context, self.source, self.instance.node).hypervisor_type # KVM is not a hypervisor, so when using a virt_type of "kvm" the # hypervisor_type will still be "QEMU". if hypervisor_type.lower() != obj_fields.HVType.QEMU: return # We're fully upgraded to a version that supports NUMA live # migration, carry on. if objects.Service.get_minimum_version( self.context, 'nova-compute') >= 40: return if CONF.workarounds.enable_numa_live_migration: LOG.warning( 'Instance has an associated NUMA topology, cell contains ' 'compute nodes older than train, but the ' 'enable_numa_live_migration workaround is enabled. Live ' 'migration will not be NUMA-aware. The instance NUMA ' 'topology, including related attributes such as CPU pinning, ' 'huge page and emulator thread pinning information, will not ' 'be recalculated. See bug #1289064 for more information.', instance=self.instance) else: raise exception.MigrationPreCheckError( reason='Instance has an associated NUMA topology, cell ' 'contains compute nodes older than train, and the ' 'enable_numa_live_migration workaround is disabled. ' 'Refusing to perform the live migration, as the ' 'instance NUMA topology, including related attributes ' 'such as CPU pinning, huge page and emulator thread ' 'pinning information, cannot be recalculated. See ' 'bug #1289064 for more information.') def _check_can_migrate_pci(self, src_host, dest_host): """Checks that an instance can migrate with PCI requests. At the moment support only if: a) Instance contains flavor based PCI requests configured with live_migratable tag specified. b) Instance contains neutron port related PCI request and: - Neutron supports multiple port binding extension. - Src and Dest host support VIF related PCI allocations. """ if self.instance.pci_requests is None or not len( self.instance.pci_requests.requests): return for pci_request in self.instance.pci_requests.requests: if pci_request.source == objects.InstancePCIRequest.FLAVOR_ALIAS: # A pre-Epoxy instance using a device that would # technically be live-migratable will be accepted only if # all InstancePCIRequests have the 'live_migratable' flag # set to "true". if not pci_request.is_live_migratable(): # Ensure the request explicitly requests migratable devices LOG.warning( "Migration pre-check failed: The request does " "not explicitly request live-migratable devices." ) raise exception.MigrationPreCheckError( reason="This request does not explicitly request " "live-migratable devices." ) if not self.instance.pci_requests.neutron_requests(): return # All PCI requests are VIF related, now check neutron, # source and destination compute nodes. if not self.network_api.has_port_binding_extension(self.context): raise exception.MigrationPreCheckError( reason="Cannot live migrate VIF with related PCI, Neutron " "does not support required port binding extension.") if not (supports_vif_related_pci_allocations(self.context, src_host) and supports_vif_related_pci_allocations(self.context, dest_host)): raise exception.MigrationPreCheckError( reason="Cannot live migrate VIF with related PCI, " "source and destination nodes do not support " "the operation.") def _check_can_migrate_specific_resources(self): """Checks that an instance can migrate with specific resources. For virtual persistent memory resource: 1. check if Instance contains vpmem resources 2. check if live migration with vpmem is supported """ if not self.instance.resources: return has_vpmem = False for resource in self.instance.resources: if resource.resource_class.startswith("CUSTOM_PMEM_NAMESPACE_"): has_vpmem = True break if has_vpmem and not supports_vpmem_live_migration(self.context): raise exception.MigrationPreCheckError( reason="Cannot live migrate with virtual persistent memory, " "the operation is not supported.") def _check_host_is_up(self, host): service = objects.Service.get_by_compute_host(self.context, host) if not self.servicegroup_api.service_is_up(service): raise exception.ComputeServiceUnavailable(host=host) def _check_requested_destination(self): """Performs basic pre-live migration checks for the forced host.""" # NOTE(gibi): This code path is used when the live migration is forced # to a target host and skipping the scheduler. Such operation is # rejected for servers with nested resource allocations since # I7cbd5d9fb875ebf72995362e0b6693492ce32051. So here we can safely # assume that the provider mapping is empty. self._call_livem_checks_on_host(self.destination, {}) # Make sure the forced destination host is in the same cell that the # instance currently lives in. # NOTE(mriedem): This can go away if/when the forced destination host # case calls select_destinations. source_cell_mapping = self._get_source_cell_mapping() dest_cell_mapping = self._get_destination_cell_mapping() if source_cell_mapping.uuid != dest_cell_mapping.uuid: raise exception.MigrationPreCheckError( reason=(_('Unable to force live migrate instance %s ' 'across cells.') % self.instance.uuid)) def _check_destination_is_not_source(self): if self.destination == self.source: raise exception.UnableToMigrateToSelf( instance_id=self.instance.uuid, host=self.destination) def _check_destination_has_enough_memory(self): compute = self._get_compute_info(self.destination) free_ram_mb = compute.free_ram_mb total_ram_mb = compute.memory_mb mem_inst = self.instance.memory_mb # NOTE(sbauza): Now the ComputeNode object reports an allocation ratio # that can be provided by the compute_node if new or by the controller ram_ratio = compute.ram_allocation_ratio # NOTE(sbauza): Mimic the RAMFilter logic in order to have the same # ram validation avail = total_ram_mb * ram_ratio - (total_ram_mb - free_ram_mb) if not mem_inst or avail <= mem_inst: instance_uuid = self.instance.uuid dest = self.destination reason = _("Unable to migrate %(instance_uuid)s to %(dest)s: " "Lack of memory(host:%(avail)s <= " "instance:%(mem_inst)s)") raise exception.MigrationPreCheckError(reason=reason % dict( instance_uuid=instance_uuid, dest=dest, avail=avail, mem_inst=mem_inst)) def _get_compute_info(self, host): return objects.ComputeNode.get_first_node_by_host_for_old_compat( self.context, host) def _check_compatible_with_source_hypervisor(self, destination): source_info = self._get_compute_info(self.source) destination_info = self._get_compute_info(destination) source_type = source_info.hypervisor_type destination_type = destination_info.hypervisor_type if source_type != destination_type: raise exception.InvalidHypervisorType() source_version = source_info.hypervisor_version destination_version = destination_info.hypervisor_version if not CONF.workarounds.skip_hypervisor_version_check_on_lm: if source_version > destination_version: raise exception.DestinationHypervisorTooOld() return source_info, destination_info def _call_livem_checks_on_host(self, destination, provider_mapping): self._check_can_migrate_specific_resources() self._check_can_migrate_pci(self.source, destination) try: self.migrate_data = self.compute_rpcapi.\ check_can_live_migrate_destination(self.context, self.instance, destination, self.block_migration, self.disk_over_commit, self.migration, self.limits) except messaging.MessagingTimeout: msg = _("Timeout while checking if we can live migrate to host: " "%s") % destination raise exception.MigrationPreCheckError(msg) # Check to see that neutron supports the binding-extended API. if self.network_api.has_port_binding_extension(self.context): bindings = self._bind_ports_on_destination( destination, provider_mapping) self._update_migrate_vifs_from_bindings(self.migrate_data.vifs, bindings) def _get_port_profile_from_provider_mapping( self, port_id, provider_mappings ): allocation = self.network_api.get_binding_profile_allocation( self.context, port_id, provider_mappings) if allocation: return {'allocation': allocation} else: return {} def _bind_ports_on_destination(self, destination, provider_mappings): LOG.debug('Start binding ports on destination host: %s', destination, instance=self.instance) # Bind ports on the destination host; returns a dict, keyed by # port ID, of a new destination host port binding dict per port # that was bound. This information is then stuffed into the # migrate_data. try: # NOTE(adrianc): migrate_data.vifs was partially filled # by destination compute if compute is new enough. # if that is the case, it may have updated the required port # profile for the destination node (e.g new PCI address if SR-IOV) # perform port binding against the requested profile ports_profile = {} for mig_vif in self.migrate_data.vifs: profile = mig_vif.profile if 'profile_json' in mig_vif else {} # NOTE(gibi): provider_mappings also contribute to the # binding profile of the ports if the port has resource # request. So we need to merge the profile information from # both sources. profile.update( self._get_port_profile_from_provider_mapping( mig_vif.port_id, provider_mappings)) if profile: ports_profile[mig_vif.port_id] = profile bindings = self.network_api.bind_ports_to_host( context=self.context, instance=self.instance, host=destination, vnic_types=None, port_profiles=ports_profile) except exception.PortBindingFailed as e: # Port binding failed for that host, try another one. raise exception.MigrationPreCheckError( reason=e.format_message()) return bindings def _update_migrate_vifs_from_bindings(self, migrate_vifs, bindings): for migrate_vif in migrate_vifs: binding = bindings[migrate_vif.port_id] migrate_vif.profile = binding.get('profile') migrate_vif.vnic_type = binding['vnic_type'] if 'vif_details' in binding: migrate_vif.vif_details = binding['vif_details'] if 'vif_type' in binding: migrate_vif.vif_type = binding['vif_type'] def _get_source_cell_mapping(self): """Returns the CellMapping for the cell in which the instance lives :returns: nova.objects.CellMapping record for the cell where the instance currently lives. :raises: MigrationPreCheckError - in case a mapping is not found """ try: return objects.InstanceMapping.get_by_instance_uuid( self.context, self.instance.uuid).cell_mapping except exception.InstanceMappingNotFound: raise exception.MigrationPreCheckError( reason=(_('Unable to determine in which cell ' 'instance %s lives.') % self.instance.uuid)) def _get_destination_cell_mapping(self): """Returns the CellMapping for the destination host :returns: nova.objects.CellMapping record for the cell where the destination host is mapped. :raises: MigrationPreCheckError - in case a mapping is not found """ try: return objects.HostMapping.get_by_host( self.context, self.destination).cell_mapping except exception.HostMappingNotFound: raise exception.MigrationPreCheckError( reason=(_('Unable to determine in which cell ' 'destination host %s lives.') % self.destination)) def _get_request_spec_for_select_destinations(self, attempted_hosts=None): """Builds a RequestSpec that can be passed to select_destinations Used when calling the scheduler to pick a destination host for live migrating the instance. :param attempted_hosts: List of host names to ignore in the scheduler. This is generally at least seeded with the source host. :returns: nova.objects.RequestSpec object """ request_spec = self.request_spec # NOTE(sbauza): Force_hosts/nodes needs to be reset # if we want to make sure that the next destination # is not forced to be the original host request_spec.reset_forced_destinations() port_res_req, req_lvl_params = ( self.network_api.get_requested_resource_for_instance( self.context, self.instance.uuid)) # NOTE(gibi): When cyborg or other module wants to handle # similar non-nova resources then here we have to collect # all the external resource requests in a single list and # add them to the RequestSpec. request_spec.requested_resources = port_res_req request_spec.request_level_params = req_lvl_params # NOTE(gibi): as PCI devices is tracked in placement we # need to generate request groups from InstancePCIRequests. # This will append new RequestGroup objects to the # request_spec.requested_resources list if needed request_spec.generate_request_groups_from_pci_requests() scheduler_utils.setup_instance_group(self.context, request_spec) # We currently only support live migrating to hosts in the same # cell that the instance lives in, so we need to tell the scheduler # to limit the applicable hosts based on cell. cell_mapping = self._get_source_cell_mapping() LOG.debug('Requesting cell %(cell)s while live migrating', {'cell': cell_mapping.identity}, instance=self.instance) if ('requested_destination' in request_spec and request_spec.requested_destination): request_spec.requested_destination.cell = cell_mapping else: request_spec.requested_destination = objects.Destination( cell=cell_mapping) request_spec.ensure_project_and_user_id(self.instance) request_spec.ensure_network_information(self.instance) compute_utils.heal_reqspec_is_bfv( self.context, request_spec, self.instance) return request_spec def _find_destination(self): # TODO(johngarbutt) this retry loop should be shared attempted_hosts = [self.source] request_spec = self._get_request_spec_for_select_destinations( attempted_hosts) host = None while host is None: self._check_not_over_max_retries(attempted_hosts) request_spec.ignore_hosts = attempted_hosts try: selection_lists = self.query_client.select_destinations( self.context, request_spec, [self.instance.uuid], return_objects=True, return_alternates=False) # We only need the first item in the first list, as there is # only one instance, and we don't care about any alternates. selection = selection_lists[0][0] host = selection.service_host except messaging.RemoteError as ex: # TODO(ShaoHe Feng) There maybe multi-scheduler, and the # scheduling algorithm is R-R, we can let other scheduler try. # Note(ShaoHe Feng) There are types of RemoteError, such as # NoSuchMethod, UnsupportedVersion, we can distinguish it by # ex.exc_type. raise exception.MigrationSchedulerRPCError(reason=str(ex)) scheduler_utils.fill_provider_mapping(request_spec, selection) provider_mapping = request_spec.get_request_group_mapping() if provider_mapping: # NOTE(gibi): this call might update the pci_requests of the # instance based on the destination host if so then such change # will be persisted when post_live_migration_at_destination # runs. compute_utils.\ update_pci_request_with_placement_allocations( self.context, self.report_client, self.instance.pci_requests.requests, provider_mapping) try: self._check_compatible_with_source_hypervisor(host) self._call_livem_checks_on_host(host, provider_mapping) except (exception.Invalid, exception.MigrationPreCheckError) as e: LOG.debug("Skipping host: %(host)s because: %(e)s", {"host": host, "e": e}) attempted_hosts.append(host) # The scheduler would have created allocations against the # selected destination host in Placement, so we need to remove # those before moving on. self._remove_host_allocations(selection.compute_node_uuid) host = None # TODO(artom) We should probably just return the whole selection object # at this point. return (selection.service_host, selection.nodename, selection.limits) def _remove_host_allocations(self, compute_node_uuid): """Removes instance allocations against the given node from Placement :param compute_node_uuid: UUID of ComputeNode resource provider """ # Now remove the allocations for our instance against that node. # Note that this does not remove allocations against any other node # or shared resource provider, it's just undoing what the scheduler # allocated for the given (destination) node. self.report_client.remove_provider_tree_from_instance_allocation( self.context, self.instance.uuid, compute_node_uuid) def _check_not_over_max_retries(self, attempted_hosts): if CONF.migrate_max_retries == -1: return retries = len(attempted_hosts) - 1 if retries > CONF.migrate_max_retries: if self.migration: self.migration.status = 'failed' self.migration.save() msg = (_('Exceeded max scheduling retries %(max_retries)d for ' 'instance %(instance_uuid)s during live migration') % {'max_retries': retries, 'instance_uuid': self.instance.uuid}) raise exception.MaxRetriesExceeded(reason=msg) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conductor/tasks/migrate.py0000664000175000017500000004704400000000000020665 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_serialization import jsonutils from nova import availability_zones from nova.compute import utils as compute_utils from nova.conductor.tasks import base from nova.conductor.tasks import cross_cell_migrate from nova import exception from nova.i18n import _ from nova import objects from nova.scheduler.client import report from nova.scheduler import utils as scheduler_utils LOG = logging.getLogger(__name__) def replace_allocation_with_migration(context, instance, migration): """Replace instance's allocation with one for a migration. :raises: keystoneauth1.exceptions.base.ClientException on failure to communicate with the placement API :raises: ConsumerAllocationRetrievalFailed if reading the current allocation from placement fails :raises: ComputeHostNotFound if the host of the instance is not found in the database :raises: AllocationMoveFailed if moving the allocation from the instance.uuid to the migration.uuid fails due to parallel placement operation on the instance consumer :raises: NoValidHost if placement rejects the update for other reasons (e.g. not enough resources) :returns: (source_compute_node, migration_allocation) """ try: source_cn = objects.ComputeNode.get_by_host_and_nodename( context, instance.host, instance.node) except exception.ComputeHostNotFound: LOG.error('Unable to find record for source ' 'node %(node)s on %(host)s', {'host': instance.host, 'node': instance.node}, instance=instance) # A generic error like this will just error out the migration # and do any rollback required raise reportclient = report.report_client_singleton() orig_alloc = reportclient.get_allocs_for_consumer( context, instance.uuid)['allocations'] root_alloc = orig_alloc.get(source_cn.uuid, {}).get('resources', {}) if not root_alloc: # TODO(stephenfin): This was a valid code path when there was support # for multiple schedulers, but it should probably be an error now LOG.debug( 'Unable to find existing allocations for instance on ' 'source compute node: %s', source_cn.uuid, instance=instance) return None, None # FIXME(gibi): This method is flawed in that it does not handle allocations # against sharing providers in any special way. This leads to duplicate # allocations against the sharing provider during migration. success = reportclient.move_allocations(context, instance.uuid, migration.uuid) if not success: LOG.error('Unable to replace resource claim on source ' 'host %(host)s node %(node)s for instance', {'host': instance.host, 'node': instance.node}, instance=instance) # Mimic the "no space" error that could have come from the # scheduler. Once we have an atomic replace operation, this # would be a severe error. raise exception.NoValidHost( reason=_('Unable to replace instance claim on source')) else: LOG.debug('Created allocations for migration %(mig)s on %(rp)s', {'mig': migration.uuid, 'rp': source_cn.uuid}) return source_cn, orig_alloc def revert_allocation_for_migration(context, source_cn, instance, migration): """Revert an allocation made for a migration back to the instance.""" reportclient = report.report_client_singleton() # FIXME(gibi): This method is flawed in that it does not handle allocations # against sharing providers in any special way. This leads to duplicate # allocations against the sharing provider during migration. success = reportclient.move_allocations(context, migration.uuid, instance.uuid) if not success: LOG.error('Unable to replace resource claim on source ' 'host %(host)s node %(node)s for instance', {'host': instance.host, 'node': instance.node}, instance=instance) else: LOG.debug('Created allocations for instance %(inst)s on %(rp)s', {'inst': instance.uuid, 'rp': source_cn.uuid}) class MigrationTask(base.TaskBase): def __init__(self, context, instance, flavor, request_spec, clean_shutdown, compute_rpcapi, query_client, report_client, host_list, network_api): super(MigrationTask, self).__init__(context, instance) self.clean_shutdown = clean_shutdown self.request_spec = request_spec self.flavor = flavor self.compute_rpcapi = compute_rpcapi self.query_client = query_client self.reportclient = report_client self.host_list = host_list self.network_api = network_api # Persist things from the happy path so we don't have to look # them up if we need to roll back self._migration = None self._held_allocations = None self._source_cn = None def _preallocate_migration(self): # If this is a rescheduled migration, don't create a new record. migration_type = ("resize" if self.instance.flavor.id != self.flavor.id else "migration") filters = {"instance_uuid": self.instance.uuid, "migration_type": migration_type, "status": "pre-migrating"} migrations = objects.MigrationList.get_by_filters(self.context, filters).objects if migrations: migration = migrations[0] else: migration = objects.Migration(context=self.context.elevated()) migration.old_instance_type_id = self.instance.flavor.id migration.new_instance_type_id = self.flavor.id migration.status = 'pre-migrating' migration.instance_uuid = self.instance.uuid migration.source_compute = self.instance.host migration.source_node = self.instance.node migration.migration_type = migration_type migration.create() self._migration = migration self._source_cn, self._held_allocations = ( replace_allocation_with_migration(self.context, self.instance, self._migration)) return migration def _set_requested_destination_cell(self, legacy_props): instance_mapping = objects.InstanceMapping.get_by_instance_uuid( self.context, self.instance.uuid) if not ('requested_destination' in self.request_spec and self.request_spec.requested_destination): self.request_spec.requested_destination = objects.Destination() targeted = 'host' in self.request_spec.requested_destination # NOTE(mriedem): If the user is allowed to perform a cross-cell resize # then add the current cell to the request spec as "preferred" so the # scheduler will (by default) weigh hosts within the current cell over # hosts in another cell, all other things being equal. If the user is # not allowed to perform cross-cell resize, then we limit the request # spec and tell the scheduler to only look at hosts in the current # cell. cross_cell_allowed = ( self.request_spec.requested_destination.allow_cross_cell_move) if targeted and cross_cell_allowed: # If a target host is specified it might be in another cell so # we cannot restrict the cell in this case. We would not prefer # the source cell in that case either since we know where the # user wants it to go. We just let the scheduler figure it out. self.request_spec.requested_destination.cell = None else: self.request_spec.requested_destination.cell = ( instance_mapping.cell_mapping) # NOTE(takashin): In the case that the target host is specified, # if the migration is failed, it is not necessary to retry # the cold migration to the same host. So make sure that # reschedule will not occur. if targeted: legacy_props.pop('retry', None) self.request_spec.retry = None # Log our plan before calling the scheduler. if cross_cell_allowed and targeted: LOG.debug('Not restricting cell for targeted cold migration.', instance=self.instance) elif cross_cell_allowed: LOG.debug('Allowing migration from cell %(cell)s', {'cell': instance_mapping.cell_mapping.identity}, instance=self.instance) else: LOG.debug('Restricting to cell %(cell)s while migrating', {'cell': instance_mapping.cell_mapping.identity}, instance=self.instance) def _is_selected_host_in_source_cell(self, selection): """Checks if the given Selection is in the same cell as the instance :param selection: Selection object returned from the scheduler ``select_destinations`` method. :returns: True if the host Selection is in the same cell as the instance, False otherwise. """ # Note that the context is already targeted to the current cell in # which the instance exists. same_cell = selection.cell_uuid == self.context.cell_uuid if not same_cell: LOG.debug('Selected target host %s is in cell %s and instance is ' 'in cell: %s', selection.service_host, selection.cell_uuid, self.context.cell_uuid, instance=self.instance) return same_cell def _execute(self): # NOTE(sbauza): Force_hosts/nodes needs to be reset if we want to make # sure that the next destination is not forced to be the original host. # This needs to be done before the populate_retry call otherwise # retries will be disabled if the server was created with a forced # host/node. self.request_spec.reset_forced_destinations() # TODO(sbauza): Remove once all the scheduler.utils methods accept a # RequestSpec object in the signature. legacy_props = self.request_spec.to_legacy_filter_properties_dict() scheduler_utils.setup_instance_group(self.context, self.request_spec) # If a target host is set in a requested destination, # 'populate_retry' need not be executed. if not ('requested_destination' in self.request_spec and self.request_spec.requested_destination and 'host' in self.request_spec.requested_destination): scheduler_utils.populate_retry(legacy_props, self.instance.uuid) port_res_req, req_lvl_params = ( self.network_api.get_requested_resource_for_instance( self.context, self.instance.uuid) ) # NOTE(gibi): When cyborg or other module wants to handle similar # non-nova resources then here we have to collect all the external # resource requests in a single list and add them to the RequestSpec. self.request_spec.requested_resources = port_res_req self.request_spec.request_level_params = req_lvl_params # NOTE(gibi): as PCI devices is tracked in placement we need to # generate request groups from InstancePCIRequests. This will append # new RequestGroup objects to the request_spec.requested_resources list # if needed self.request_spec.generate_request_groups_from_pci_requests() self._set_requested_destination_cell(legacy_props) # Once _preallocate_migration() is done, the source node allocation is # moved from the instance consumer to the migration record consumer, # and the instance consumer doesn't have any allocations. If this is # the first time through here (not a reschedule), select_destinations # below will allocate resources on the selected destination node for # the instance consumer. If we're rescheduling, host_list is not None # and we'll call claim_resources for the instance and the selected # alternate. If we exhaust our alternates and raise MaxRetriesExceeded, # the rollback() method should revert the allocation swaparoo and move # the source node allocation from the migration record back to the # instance record. migration = self._preallocate_migration() self.request_spec.ensure_project_and_user_id(self.instance) self.request_spec.ensure_network_information(self.instance) compute_utils.heal_reqspec_is_bfv( self.context, self.request_spec, self.instance) # On an initial call to migrate, 'self.host_list' will be None, so we # have to call the scheduler to get a list of acceptable hosts to # migrate to. That list will consist of a selected host, along with # zero or more alternates. On a reschedule, though, the alternates will # be passed to this object and stored in 'self.host_list', so we can # pop the first alternate from the list to use for the destination, and # pass the remaining alternates to the compute. if self.host_list is None: selection = self._schedule() if not self._is_selected_host_in_source_cell(selection): # If the selected host is in another cell, we need to execute # another task to do the cross-cell migration. LOG.info('Executing cross-cell resize task starting with ' 'target host: %s', selection.service_host, instance=self.instance) task = cross_cell_migrate.CrossCellMigrationTask( self.context, self.instance, self.flavor, self.request_spec, self._migration, self.compute_rpcapi, selection, self.host_list) task.execute() return else: # This is a reschedule that will use the supplied alternate hosts # in the host_list as destinations. selection = self._reschedule() scheduler_utils.populate_filter_properties(legacy_props, selection) (host, node) = (selection.service_host, selection.nodename) # The availability_zone field was added in v1.1 of the Selection # object so make sure to handle the case where it is missing. if 'availability_zone' in selection: self.instance.availability_zone = selection.availability_zone else: self.instance.availability_zone = ( availability_zones.get_host_availability_zone( self.context, host)) LOG.debug("Calling prep_resize with selected host: %s; " "Selected node: %s; Alternates: %s", host, node, self.host_list, instance=self.instance) # RPC cast to the destination host to start the migration process. self.compute_rpcapi.prep_resize( # NOTE(mriedem): Using request_spec.image here is potentially # dangerous if it is not kept up to date (i.e. rebuild/unshelve); # seems like the sane thing to do would be to pass the current # instance.image_meta since that is what MoveClaim will use for # any NUMA topology claims on the destination host... self.context, self.instance, self.request_spec.image, self.flavor, host, migration, request_spec=self.request_spec, filter_properties=legacy_props, node=node, clean_shutdown=self.clean_shutdown, host_list=self.host_list) def _schedule(self): selection_lists = self.query_client.select_destinations( self.context, self.request_spec, [self.instance.uuid], return_objects=True, return_alternates=True) # Since there is only ever one instance to migrate per call, we # just need the first returned element. selection_list = selection_lists[0] # Scheduler allocated resources on the first host so try that first selection, self.host_list = selection_list[0], selection_list[1:] scheduler_utils.fill_provider_mapping(self.request_spec, selection) return selection def _reschedule(self): # Since the resources on these alternates may have been consumed and # might not be able to support the migrated instance, we need to first # claim the resources to verify the host still has sufficient # available resources. elevated = self.context.elevated() host_available = False selection = None while self.host_list and not host_available: selection = self.host_list.pop(0) if selection.allocation_request: alloc_req = jsonutils.loads(selection.allocation_request) else: alloc_req = None if alloc_req: # If this call succeeds, the resources on the destination # host will be claimed by the instance. host_available = scheduler_utils.claim_resources( elevated, self.reportclient, self.request_spec, self.instance.uuid, alloc_req, selection.allocation_request_version) if host_available: scheduler_utils.fill_provider_mapping( self.request_spec, selection) else: # Some deployments use different schedulers that do not # use Placement, so they will not have an # allocation_request to claim with. For those cases, # there is no concept of claiming, so just assume that # the host is valid. host_available = True # There are no more available hosts. Raise a MaxRetriesExceeded # exception in that case. if not host_available: reason = ("Exhausted all hosts available for retrying build " "failures for instance %(instance_uuid)s." % {"instance_uuid": self.instance.uuid}) raise exception.MaxRetriesExceeded(reason=reason) return selection def rollback(self, ex): if self._migration: self._migration.status = 'error' self._migration.save() if not self._held_allocations: return # NOTE(danms): We created new-style migration-based # allocations for the instance, but failed before we kicked # off the migration in the compute. Normally the latter would # do that cleanup but we never got that far, so do it here and # now. revert_allocation_for_migration(self.context, self._source_cn, self.instance, self._migration) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.3576086 nova-32.0.0/nova/conf/0000775000175000017500000000000000000000000014452 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conf/__init__.py0000664000175000017500000000676400000000000016600 0ustar00zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This package got introduced during the Mitaka cycle in 2015 to # have a central place where the config options of Nova can be maintained. # For more background see the blueprint "centralize-config-options" from oslo_config import cfg from nova.conf import api from nova.conf import availability_zone from nova.conf import base from nova.conf import cache from nova.conf import cinder from nova.conf import compute from nova.conf import conductor from nova.conf import configdrive from nova.conf import console from nova.conf import consoleauth from nova.conf import cyborg from nova.conf import database from nova.conf import devices from nova.conf import ephemeral_storage from nova.conf import glance from nova.conf import guestfs from nova.conf import imagecache from nova.conf import ironic from nova.conf import key_manager from nova.conf import keystone from nova.conf import libvirt from nova.conf import manila from nova.conf import mks from nova.conf import netconf from nova.conf import neutron from nova.conf import notifications from nova.conf import novnc from nova.conf import paths from nova.conf import pci from nova.conf import placement from nova.conf import quota from nova.conf import rpc from nova.conf import scheduler from nova.conf import serial_console from nova.conf import service from nova.conf import service_token from nova.conf import servicegroup from nova.conf import spice from nova.conf import upgrade_levels from nova.conf import vendordata from nova.conf import vmware from nova.conf import vnc from nova.conf import workarounds from nova.conf import wsgi from nova.conf import zvm CONF = cfg.CONF api.register_opts(CONF) availability_zone.register_opts(CONF) base.register_opts(CONF) cache.register_opts(CONF) cinder.register_opts(CONF) compute.register_opts(CONF) conductor.register_opts(CONF) configdrive.register_opts(CONF) console.register_opts(CONF) consoleauth.register_opts(CONF) cyborg.register_opts(CONF) database.register_opts(CONF) devices.register_opts(CONF) ephemeral_storage.register_opts(CONF) glance.register_opts(CONF) guestfs.register_opts(CONF) manila.register_opts(CONF) mks.register_opts(CONF) imagecache.register_opts(CONF) ironic.register_opts(CONF) key_manager.register_opts(CONF) keystone.register_opts(CONF) libvirt.register_opts(CONF) netconf.register_opts(CONF) neutron.register_opts(CONF) notifications.register_opts(CONF) novnc.register_opts(CONF) paths.register_opts(CONF) pci.register_opts(CONF) placement.register_opts(CONF) quota.register_opts(CONF) rpc.register_opts(CONF) scheduler.register_opts(CONF) serial_console.register_opts(CONF) service.register_opts(CONF) service_token.register_opts(CONF) servicegroup.register_opts(CONF) spice.register_opts(CONF) upgrade_levels.register_opts(CONF) vendordata.register_opts(CONF) vmware.register_opts(CONF) vnc.register_opts(CONF) workarounds.register_opts(CONF) wsgi.register_opts(CONF) zvm.register_opts(CONF) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conf/api.py0000664000175000017500000003352100000000000015601 0ustar00zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg api_group = cfg.OptGroup('api', title='API options', help=""" Options under this group are used to define Nova API. """) metadata_opts = [ cfg.StrOpt("config_drive_skip_versions", default=("1.0 2007-01-19 2007-03-01 2007-08-29 2007-10-10 " "2007-12-15 2008-02-01 2008-09-01"), help=""" When gathering the existing metadata for a config drive, the EC2-style metadata is returned for all versions that don't appear in this option. As of the Liberty release, the available versions are: * 1.0 * 2007-01-19 * 2007-03-01 * 2007-08-29 * 2007-10-10 * 2007-12-15 * 2008-02-01 * 2008-09-01 * 2009-04-04 The option is in the format of a single string, with each version separated by a space. Possible values: * Any string that represents zero or more versions, separated by spaces. """), cfg.ListOpt('vendordata_providers', item_type=cfg.types.String(choices=[ ('StaticJSON', 'Load a JSON file from the path configured by ' '``vendordata_jsonfile_path`` and use this as the source for ' '``vendor_data.json`` and ``vendor_data2.json``.'), ('DynamicJSON', 'Build a JSON file using values defined in ' '``vendordata_dynamic_targets`` and use this as the source ' 'for ``vendor_data2.json``.'), ]), default=['StaticJSON'], help=""" A list of vendordata providers. vendordata providers are how deployers can provide metadata via configdrive and metadata that is specific to their deployment. For more information on the requirements for implementing a vendordata dynamic endpoint, please see the vendordata.rst file in the nova developer reference. Related options: * ``vendordata_dynamic_targets`` * ``vendordata_dynamic_ssl_certfile`` * ``vendordata_dynamic_connect_timeout`` * ``vendordata_dynamic_read_timeout`` * ``vendordata_dynamic_failure_fatal`` """), cfg.ListOpt('vendordata_dynamic_targets', default=[], help=""" A list of targets for the dynamic vendordata provider. These targets are of the form ``@``. The dynamic vendordata provider collects metadata by contacting external REST services and querying them for information about the instance. This behaviour is documented in the vendordata.rst file in the nova developer reference. """), cfg.StrOpt('vendordata_dynamic_ssl_certfile', default='', help=""" Path to an optional certificate file or CA bundle to verify dynamic vendordata REST services ssl certificates against. Possible values: * An empty string, or a path to a valid certificate file Related options: * vendordata_providers * vendordata_dynamic_targets * vendordata_dynamic_connect_timeout * vendordata_dynamic_read_timeout * vendordata_dynamic_failure_fatal """), cfg.IntOpt('vendordata_dynamic_connect_timeout', default=5, min=3, help=""" Maximum wait time for an external REST service to connect. Possible values: * Any integer with a value greater than three (the TCP packet retransmission timeout). Note that instance start may be blocked during this wait time, so this value should be kept small. Related options: * vendordata_providers * vendordata_dynamic_targets * vendordata_dynamic_ssl_certfile * vendordata_dynamic_read_timeout * vendordata_dynamic_failure_fatal """), cfg.IntOpt('vendordata_dynamic_read_timeout', default=5, min=0, help=""" Maximum wait time for an external REST service to return data once connected. Possible values: * Any integer. Note that instance start is blocked during this wait time, so this value should be kept small. Related options: * vendordata_providers * vendordata_dynamic_targets * vendordata_dynamic_ssl_certfile * vendordata_dynamic_connect_timeout * vendordata_dynamic_failure_fatal """), cfg.BoolOpt('vendordata_dynamic_failure_fatal', default=False, help=""" Should failures to fetch dynamic vendordata be fatal to instance boot? Related options: * vendordata_providers * vendordata_dynamic_targets * vendordata_dynamic_ssl_certfile * vendordata_dynamic_connect_timeout * vendordata_dynamic_read_timeout """), cfg.IntOpt("metadata_cache_expiration", default=15, min=0, help=""" This option is the time (in seconds) to cache metadata. When set to 0, metadata caching is disabled entirely; this is generally not recommended for performance reasons. Increasing this setting should improve response times of the metadata API when under heavy load. Higher values may increase memory usage, and result in longer times for host metadata changes to take effect. """), cfg.BoolOpt("local_metadata_per_cell", default=False, help=""" Indicates that the nova-metadata API service has been deployed per-cell, so that we can have better performance and data isolation in a multi-cell deployment. Users should consider the use of this configuration depending on how neutron is setup. If you have networks that span cells, you might need to run nova-metadata API service globally. If your networks are segmented along cell boundaries, then you can run nova-metadata API service per cell. When running nova-metadata API service per cell, you should also configure each Neutron metadata-agent to point to the corresponding nova-metadata API service. """), cfg.StrOpt("dhcp_domain", default="novalocal", help=""" Domain name used to configure FQDN for instances. Configure a fully-qualified domain name for instance hostnames. The value is suffixed to the instance hostname from the database to construct the hostname that appears in the metadata API. To disable this behavior (for example in order to correctly support microversion's 2.94 FQDN hostnames), set this to the empty string. Possible values: * Any string that is a valid domain name. """), ] file_opts = [ cfg.StrOpt("vendordata_jsonfile_path", help=""" Cloud providers may store custom data in vendor data file that will then be available to the instances via the metadata service, and to the rendering of config-drive. The default class for this, JsonFileVendorData, loads this information from a JSON file, whose path is configured by this option. If there is no path set by this option, the class returns an empty dictionary. Note that when using this to provide static vendor data to a configuration drive, the nova-compute service must be configured with this option and the file must be accessible from the nova-compute host. Possible values: * Any string representing the path to the data file, or an empty string (default). """) ] osapi_opts = [ cfg.IntOpt("max_limit", default=1000, min=0, help=""" As a query can potentially return many thousands of items, you can limit the maximum number of items in a single response by setting this option. """), cfg.StrOpt("compute_link_prefix", help=""" This string is prepended to the normal URL that is returned in links to the OpenStack Compute API. If it is empty (the default), the URLs are returned unchanged. Possible values: * Any string, including an empty string (the default). """), cfg.StrOpt("glance_link_prefix", help=""" This string is prepended to the normal URL that is returned in links to Glance resources. If it is empty (the default), the URLs are returned unchanged. Possible values: * Any string, including an empty string (the default). """), cfg.BoolOpt("instance_list_per_project_cells", default=False, help=""" When enabled, this will cause the API to only query cell databases in which the tenant has mapped instances. This requires an additional (fast) query in the API database before each list, but also (potentially) limits the number of cell databases that must be queried to provide the result. If you have a small number of cells, or tenants are likely to have instances in all cells, then this should be False. If you have many cells, especially if you confine tenants to a small subset of those cells, this should be True. """), cfg.StrOpt("instance_list_cells_batch_strategy", default="distributed", choices=[ ("distributed", "Divide the " "limit requested by the user by the number of cells in the " "system. This requires counting the cells in the system " "initially, which will not be refreshed until service restart " "or SIGHUP. The actual batch size will be increased by 10% " "over the result of ($limit / $num_cells)."), ("fixed", "Request fixed-size batches from each cell, as defined " "by ``instance_list_cells_batch_fixed_size``. " "If the limit is smaller than the batch size, the limit " "will be used instead. If you do not wish batching to be used " "at all, setting the fixed size equal to the ``max_limit`` " "value will cause only one request per cell database to be " "issued."), ], help=""" This controls the method by which the API queries cell databases in smaller batches during large instance list operations. If batching is performed, a large instance list operation will request some fraction of the overall API limit from each cell database initially, and will re-request that same batch size as records are consumed (returned) from each cell as necessary. Larger batches mean less chattiness between the API and the database, but potentially more wasted effort processing the results from the database which will not be returned to the user. Any strategy will yield a batch size of at least 100 records, to avoid a user causing many tiny database queries in their request. Related options: * instance_list_cells_batch_fixed_size * max_limit """), cfg.IntOpt("instance_list_cells_batch_fixed_size", min=100, default=100, help=""" This controls the batch size of instances requested from each cell database if ``instance_list_cells_batch_strategy``` is set to ``fixed``. This integral value will define the limit issued to each cell every time a batch of instances is requested, regardless of the number of cells in the system or any other factors. Per the general logic called out in the documentation for ``instance_list_cells_batch_strategy``, the minimum value for this is 100 records per batch. Related options: * instance_list_cells_batch_strategy * max_limit """), cfg.BoolOpt("list_records_by_skipping_down_cells", default=True, help=""" When set to False, this will cause the API to return a 500 error if there is an infrastructure failure like non-responsive cells. If you want the API to skip the down cells and return the results from the up cells set this option to True. Note that from API microversion 2.69 there could be transient conditions in the deployment where certain records are not available and the results could be partial for certain requests containing those records. In those cases this option will be ignored. See "Handling Down Cells" section of the Compute API guide (https://docs.openstack.org/api-guide/compute/down_cells.html) for more information. """), ] os_network_opts = [ cfg.BoolOpt("use_neutron_default_nets", default=False, help=""" When True, the TenantNetworkController will query the Neutron API to get the default networks to use. Related options: * neutron_default_project_id """), cfg.StrOpt("neutron_default_project_id", deprecated_name="neutron_default_tenant_id", default="default", help=""" Tenant ID for getting the default network from Neutron API (also referred in some places as the 'project ID') to use. Related options: * use_neutron_default_nets """), ] enable_inst_pw_opts = [ cfg.BoolOpt("enable_instance_password", default=True, help=""" Enables returning of the instance password by the relevant server API calls such as create, rebuild, evacuate, or rescue. If the hypervisor does not support password injection, then the password returned will not be correct, so if your hypervisor does not support password injection, set this to False. """) ] validation_opts = [ cfg.StrOpt( "response_validation", choices=( ( "error", "Raise a HTTP 500 (Server Error) for responses that fail " "response body schema validation", ), ( "warn", "Log a warning for responses that fail response body schema " "validation", ), ( "ignore", "Ignore response body schema validation failures", ), ), default="warn", help="""\ Configure validation of API responses. ``warn`` is the current recommendation for production environments. This is expected to change to ``error`` in a future release. If you find it necessary to enable the ``ignore`` option, please report the issues you are seeing to the Nova team so we can improve our schemas. """, ), ] API_OPTS = ( metadata_opts + file_opts + osapi_opts + os_network_opts + enable_inst_pw_opts + validation_opts ) def register_opts(conf): conf.register_group(api_group) conf.register_opts(API_OPTS, group=api_group) def list_opts(): return {api_group: API_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conf/availability_zone.py0000664000175000017500000000431000000000000020527 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Intel, Inc. # Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg availability_zone_opts = [ cfg.StrOpt('internal_service_availability_zone', default='internal', help=""" Availability zone for internal services. This option determines the availability zone for the various internal nova services, such as 'nova-scheduler', 'nova-conductor', etc. Possible values: * Any string representing an existing availability zone name. """), cfg.StrOpt('default_availability_zone', default='nova', help=""" Default availability zone for compute services. This option determines the default availability zone for 'nova-compute' services, which will be used if the service(s) do not belong to aggregates with availability zone metadata. Possible values: * Any string representing an existing availability zone name. """), cfg.StrOpt('default_schedule_zone', help=""" Default availability zone for instances. This option determines the default availability zone for instances, which will be used when a user does not specify one when creating an instance. The instance(s) will be bound to this availability zone for their lifetime. Possible values: * Any string representing an existing availability zone name. * None, which means that the instance can move from one availability zone to another during its lifetime if it is moved from one compute node to another. Related options: * ``[cinder]/cross_az_attach`` """), ] def register_opts(conf): conf.register_opts(availability_zone_opts) def list_opts(): return {'DEFAULT': availability_zone_opts} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conf/base.py0000664000175000017500000000776100000000000015751 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg base_options = [ cfg.IntOpt( 'password_length', default=12, min=0, help='Length of generated instance admin passwords.'), cfg.StrOpt( 'instance_usage_audit_period', default='month', regex='^(hour|month|day|year)(@([0-9]+))?$', help=''' Time period to generate instance usages for. It is possible to define optional offset to given period by appending @ character followed by a number defining offset. Possible values: * period, example: ``hour``, ``day``, ``month` or ``year`` * period with offset, example: ``month@15`` will result in monthly audits starting on 15th day of month. '''), cfg.BoolOpt( 'use_rootwrap_daemon', default=False, help=''' Start and use a daemon that can run the commands that need to be run with root privileges. This option is usually enabled on nodes that run nova compute processes. '''), cfg.StrOpt( 'rootwrap_config', default="/etc/nova/rootwrap.conf", help=''' Path to the rootwrap configuration file. Goal of the root wrapper is to allow a service-specific unprivileged user to run a number of actions as the root user in the safest manner possible. The configuration file used here must match the one defined in the sudoers entry. '''), cfg.StrOpt( 'tempdir', help='Explicitly specify the temporary working directory.'), cfg.IntOpt( 'default_green_pool_size', deprecated_for_removal=True, deprecated_since='32.0.0', deprecated_reason=""" This option is only used if the service is running in Eventlet mode. When that mode is removed this config option will be removed too. """, default=1000, min=100, help=''' The total number of coroutines that can be run via nova's default greenthread pool concurrently, defaults to 1000, min value is 100. It is only used if the service is running in Eventlet mode. '''), cfg.IntOpt( 'default_thread_pool_size', default=10, min=1, help=''' The total number of threads that can be run via nova's default thread pool concurrently. It is only used if the service is running in native threading mode. '''), cfg.IntOpt( 'cell_worker_thread_pool_size', default=5, min=1, help=''' The number of tasks that can run concurrently, one for each cell, for operations requires cross cell data gathering a.k.a scatter-gather, like listing instances across multiple cells. This is only used if the service is running in native thread mode. '''), cfg.IntOpt( 'thread_pool_statistic_period', default=-1, min=-1, help=''' When new work is submitted to any of the thread pools nova logs the statistics of the pool (work executed, threads available, work queued, etc). This parameter defines how frequently such logging happens from a specific pool in seconds. A value of 60 means that statistic will be logged from a pool maximum once every 60 seconds. The value 0 means that logging happens every time work is submitted to the pool. The value -1 means the logging is disabled. '''), ] def register_opts(conf): conf.register_opts(base_options) def list_opts(): return {'DEFAULT': base_options} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conf/cache.py0000664000175000017500000000170300000000000016070 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_cache import core def register_opts(conf): core.configure(conf) def list_opts(): # The oslo_cache library returns a list of tuples return dict(core._opts.list_opts()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conf/cinder.py0000664000175000017500000001025000000000000016266 0ustar00zuulzuul00000000000000# Copyright (c) 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystoneauth1 import loading as ks_loading from oslo_config import cfg cinder_group = cfg.OptGroup( 'cinder', title='Cinder Options', help="Configuration options for the block storage") cinder_opts = [ cfg.StrOpt('catalog_info', default='volumev3::publicURL', regex=r'^[\w-]+:\w*:.*$', help=""" Info to match when looking for cinder in the service catalog. The ```` is optional and omitted by default since it should not be necessary in most deployments. Possible values: * Format is separated values of the form: :: Note: Nova does not support the Cinder v2 API since the Nova 17.0.0 Queens release. Related options: * endpoint_template - Setting this option will override catalog_info """), cfg.StrOpt('endpoint_template', help=""" If this option is set then it will override service catalog lookup with this template for cinder endpoint Possible values: * URL for cinder endpoint API e.g. http://localhost:8776/v3/%(project_id)s Note: Nova does not support the Cinder v2 API since the Nova 17.0.0 Queens release. Related options: * catalog_info - If endpoint_template is not set, catalog_info will be used. """), cfg.StrOpt('os_region_name', help=""" Region name of this node. This is used when picking the URL in the service catalog. Possible values: * Any string representing region name """), cfg.IntOpt('http_retries', default=3, min=0, help=""" Number of times cinderclient should retry on any failed http call. 0 means connection is attempted only once. Setting it to any positive integer means that on failure connection is retried that many times e.g. setting it to 3 means total attempts to connect will be 4. Possible values: * Any integer value. 0 means connection is attempted only once """), cfg.BoolOpt('cross_az_attach', default=True, help=""" Allow attach between instance and volume in different availability zones. If False, volumes attached to an instance must be in the same availability zone in Cinder as the instance availability zone in Nova. This also means care should be taken when booting an instance from a volume where source is not "volume" because Nova will attempt to create a volume using the same availability zone as what is assigned to the instance. If that AZ is not in Cinder (or ``allow_availability_zone_fallback=False`` in cinder.conf), the volume create request will fail and the instance will fail the build request. By default there is no availability zone restriction on volume attach. Related options: * ``[DEFAULT]/default_schedule_zone`` """), cfg.BoolOpt('debug', default=False, help=""" Enable DEBUG logging with cinderclient and os_brick independently of the rest of Nova. """), ] def register_opts(conf): conf.register_group(cinder_group) conf.register_opts(cinder_opts, group=cinder_group) ks_loading.register_session_conf_options(conf, cinder_group.name) ks_loading.register_auth_conf_options(conf, cinder_group.name) def list_opts(): return { cinder_group.name: ( cinder_opts + ks_loading.get_session_conf_options() + ks_loading.get_auth_common_conf_options() + ks_loading.get_auth_plugin_conf_options('password') + ks_loading.get_auth_plugin_conf_options('v2password') + ks_loading.get_auth_plugin_conf_options('v3password')) } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conf/compute.py0000664000175000017500000015171400000000000016511 0ustar00zuulzuul00000000000000# needs:check_deprecation_status # Copyright 2015 Huawei Technology corp. # Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import socket from oslo_config import cfg from oslo_config import types from nova.conf import paths compute_group = cfg.OptGroup( 'compute', title='Compute Manager Options', help=""" A collection of options specific to the nova-compute service. """) compute_opts = [ cfg.StrOpt('compute_driver', help=""" Defines which driver to use for controlling virtualization. Possible values: * ``libvirt.LibvirtDriver`` * ``fake.FakeDriver`` * ``ironic.IronicDriver`` * ``vmwareapi.VMwareVCDriver`` * ``zvm.ZVMDriver`` """), cfg.BoolOpt('allow_resize_to_same_host', default=False, help=""" Allow destination machine to match source for resize. Useful when testing in single-host environments. By default it is not allowed to resize to the same host. Setting this option to true will add the same host to the destination options. Also set to true if you allow the ServerGroupAffinityFilter and need to resize. For changes to this option to take effect, the nova-api service needs to be restarted. """), cfg.ListOpt('non_inheritable_image_properties', default=['cache_in_nova', 'bittorrent'], help=""" Image properties that should not be inherited from the instance when taking a snapshot. This option gives an opportunity to select which image-properties should not be inherited by newly created snapshots. .. note:: The following image properties are *never* inherited regardless of whether they are listed in this configuration option or not: * cinder_encryption_key_id * cinder_encryption_key_deletion_policy * img_signature * img_signature_hash_method * img_signature_key_type * img_signature_certificate_uuid Possible values: * A comma-separated list whose item is an image property. Usually only the image properties that are only needed by base images can be included here, since the snapshots that are created from the base images don't need them. * Default list: cache_in_nova, bittorrent """), cfg.IntOpt('max_local_block_devices', default=3, help=""" Maximum number of devices that will result in a local image being created on the hypervisor node. A negative number means unlimited. Setting ``max_local_block_devices`` to 0 means that any request that attempts to create a local disk will fail. This option is meant to limit the number of local discs (so root local disc that is the result of ``imageRef`` being used when creating a server, and any other ephemeral and swap disks). 0 does not mean that images will be automatically converted to volumes and boot instances from volumes - it just means that all requests that attempt to create a local disk will fail. Possible values: * 0: Creating a local disk is not allowed. * Negative number: Allows unlimited number of local discs. * Positive number: Allows only these many number of local discs. """), cfg.ListOpt('compute_monitors', default=[], help=""" A comma-separated list of monitors that can be used for getting compute metrics. You can use the alias/name from the setuptools entry points for nova.compute.monitors.* namespaces. If no namespace is supplied, the "cpu." namespace is assumed for backwards-compatibility. NOTE: Only one monitor per namespace (For example: cpu) can be loaded at a time. Possible values: * An empty list will disable the feature (Default). * An example value that would enable the CPU bandwidth monitor that uses the virt driver variant:: compute_monitors = cpu.virt_driver """), cfg.StrOpt('default_ephemeral_format', help=""" The default format an ephemeral_volume will be formatted with on creation. Possible values: * ``ext2`` * ``ext3`` * ``ext4`` * ``xfs`` * ``ntfs`` (only for Windows guests) """), cfg.BoolOpt('vif_plugging_is_fatal', default=True, help=""" Determine if instance should boot or fail on VIF plugging timeout. Nova sends a port update to Neutron after an instance has been scheduled, providing Neutron with the necessary information to finish setup of the port. Once completed, Neutron notifies Nova that it has finished setting up the port, at which point Nova resumes the boot of the instance since network connectivity is now supposed to be present. A timeout will occur if the reply is not received after a given interval. This option determines what Nova does when the VIF plugging timeout event happens. When enabled, the instance will error out. When disabled, the instance will continue to boot on the assumption that the port is ready. Possible values: * True: Instances should fail after VIF plugging timeout * False: Instances should continue booting after VIF plugging timeout """), cfg.IntOpt('vif_plugging_timeout', default=300, min=0, help=""" Timeout for Neutron VIF plugging event message arrival. Number of seconds to wait for Neutron vif plugging events to arrive before continuing or failing (see 'vif_plugging_is_fatal'). If you are hitting timeout failures at scale, consider running rootwrap in "daemon mode" in the neutron agent via the ``[agent]/root_helper_daemon`` neutron configuration option. Related options: * vif_plugging_is_fatal - If ``vif_plugging_timeout`` is set to zero and ``vif_plugging_is_fatal`` is False, events should not be expected to arrive at all. """), cfg.IntOpt('arq_binding_timeout', default=300, min=1, help=""" Timeout for Accelerator Request (ARQ) bind event message arrival. Number of seconds to wait for ARQ bind resolution event to arrive. The event indicates that every ARQ for an instance has either bound successfully or failed to bind. If it does not arrive, instance bringup is aborted with an exception. """), cfg.StrOpt('injected_network_template', default=paths.basedir_def('nova/virt/interfaces.template'), help="""Path to '/etc/network/interfaces' template. The path to a template file for the '/etc/network/interfaces'-style file, which will be populated by nova and subsequently used by cloudinit. This provides a method to configure network connectivity in environments without a DHCP server. The template will be rendered using Jinja2 template engine, and receive a top-level key called ``interfaces``. This key will contain a list of dictionaries, one for each interface. Refer to the cloudinit documentation for more information: https://cloudinit.readthedocs.io/en/latest/topics/datasources.html Possible values: * A path to a Jinja2-formatted template for a Debian '/etc/network/interfaces' file. This applies even if using a non Debian-derived guest. Related options: * ``flat_inject``: This must be set to ``True`` to ensure nova embeds network configuration information in the metadata provided through the config drive. """), cfg.StrOpt('preallocate_images', default='none', choices=[ ('none', 'No storage provisioning is done up front'), ('space', 'Storage is fully allocated at instance start') ], help=""" The image preallocation mode to use. Image preallocation allows storage for instance images to be allocated up front when the instance is initially provisioned. This ensures immediate feedback is given if enough space isn't available. In addition, it should significantly improve performance on writes to new blocks and may even improve I/O performance to prewritten blocks due to reduced fragmentation. """), cfg.BoolOpt('use_cow_images', default=True, help=""" Enable use of copy-on-write (cow) images. QEMU/KVM allow the use of qcow2 as backing files. By disabling this, backing files will not be used. """), cfg.BoolOpt('force_raw_images', default=True, help=""" Force conversion of backing images to raw format. Possible values: * True: Backing image files will be converted to raw image format * False: Backing image files will not be converted Related options: * ``compute_driver``: Only the libvirt driver uses this option. * ``[libvirt]/images_type``: If images_type is rbd, setting this option to False is not allowed. See the bug https://bugs.launchpad.net/nova/+bug/1816686 for more details. """), # NOTE(yamahata): ListOpt won't work because the command may include a comma. # For example: # # mkfs.ext4 -O dir_index,extent -E stride=8,stripe-width=16 # --label %(fs_label)s %(target)s # # list arguments are comma separated and there is no way to escape such # commas. cfg.MultiStrOpt('virt_mkfs', default=[], help=""" Name of the mkfs commands for ephemeral device. The format is = """), cfg.BoolOpt('resize_fs_using_block_device', default=False, help=""" Enable resizing of filesystems via a block device. If enabled, attempt to resize the filesystem by accessing the image over a block device. This is done by the host and may not be necessary if the image contains a recent version of cloud-init. Possible mechanisms require the nbd driver (for qcow and raw), or loop (for raw). """), cfg.IntOpt('timeout_nbd', default=10, min=0, help='Amount of time, in seconds, to wait for NBD device start up.'), cfg.StrOpt('pointer_model', default='usbtablet', choices=[ ('ps2mouse', 'Uses relative movement. Mouse connected by PS2'), ('usbtablet', 'Uses absolute movement. Tablet connect by USB'), (None, 'Uses default behavior provided by drivers (mouse on PS2 ' 'for libvirt x86)'), ], help=""" Generic property to specify the pointer type. Input devices allow interaction with a graphical framebuffer. For example to provide a graphic tablet for absolute cursor movement. If set, either the ``hw_input_bus`` or ``hw_pointer_model`` image metadata properties will take precedence over this configuration option. Related options: * usbtablet must be configured with VNC enabled or SPICE enabled and SPICE agent disabled. When used with libvirt the instance mode should be configured as HVM. """), cfg.IntOpt('reimage_timeout_per_gb', default=20, min=1, help=""" Timeout for reimaging a volume. Number of seconds to wait for volume-reimaged events to arrive before continuing or failing. This is a per gigabyte time which has a default value of 20 seconds and will be multiplied by the GB size of image. Eg: an image of 6 GB will have a timeout of 20 * 6 = 120 seconds. Try increasing the timeout if the image copy per GB takes more time and you are hitting timeout failures. """), ] resource_tracker_opts = [ cfg.StrOpt('vcpu_pin_set', deprecated_for_removal=True, deprecated_since='20.0.0', deprecated_reason=""" This option has been superseded by the ``[compute] cpu_dedicated_set`` and ``[compute] cpu_shared_set`` options, which allow things like the co-existence of pinned and unpinned instances on the same host (for the libvirt driver). """, help=""" Mask of host CPUs that can be used for ``VCPU`` resources. The behavior of this option depends on the definition of the ``[compute] cpu_dedicated_set`` option and affects the behavior of the ``[compute] cpu_shared_set`` option. * If ``[compute] cpu_dedicated_set`` is defined, defining this option will result in an error. * If ``[compute] cpu_dedicated_set`` is not defined, this option will be used to determine inventory for ``VCPU`` resources and to limit the host CPUs that both pinned and unpinned instances can be scheduled to, overriding the ``[compute] cpu_shared_set`` option. Possible values: * A comma-separated list of physical CPU numbers that virtual CPUs can be allocated from. Each element should be either a single CPU number, a range of CPU numbers, or a caret followed by a CPU number to be excluded from a previous range. For example:: vcpu_pin_set = "4-12,^8,15" Related options: * ``[compute] cpu_dedicated_set`` * ``[compute] cpu_shared_set`` """), cfg.MultiOpt('reserved_huge_pages', item_type=types.Dict(), help=""" Number of huge/large memory pages to reserve per NUMA host cell. Possible values: * A list of valid key=value which reflect NUMA node ID, page size (Default unit is KiB) and number of pages to be reserved. For example:: reserved_huge_pages = node:0,size:2048,count:64 reserved_huge_pages = node:1,size:1GB,count:1 In this example we are reserving on NUMA node 0 64 pages of 2MiB and on NUMA node 1 1 page of 1GiB. """), cfg.IntOpt('reserved_host_disk_mb', min=0, default=0, help=""" Amount of disk resources in MB to make them always available to host. The disk usage gets reported back to the scheduler from nova-compute running on the compute nodes. To prevent the disk resources from being considered as available, this option can be used to reserve disk space for that host. Possible values: * Any positive integer representing amount of disk in MB to reserve for the host. """), cfg.IntOpt('reserved_host_memory_mb', default=512, min=0, help=""" Amount of memory in MB to reserve for the host so that it is always available to host processes. The host resources usage is reported back to the scheduler continuously from nova-compute running on the compute node. To prevent the host memory from being considered as available, this option is used to reserve memory for the host. Possible values: * Any positive integer representing amount of memory in MB to reserve for the host. """), cfg.IntOpt('reserved_host_cpus', default=0, min=0, help=""" Number of host CPUs to reserve for host processes. The host resources usage is reported back to the scheduler continuously from nova-compute running on the compute node. This value is used to determine the ``reserved`` value reported to placement. This option cannot be set if the ``[compute] cpu_shared_set`` or ``[compute] cpu_dedicated_set`` config options have been defined. When these options are defined, any host CPUs not included in these values are considered reserved for the host. Possible values: * Any positive integer representing number of physical CPUs to reserve for the host. Related options: * ``[compute] cpu_shared_set`` * ``[compute] cpu_dedicated_set`` """), ] allocation_ratio_opts = [ cfg.FloatOpt('cpu_allocation_ratio', default=None, min=0.0, help=""" Virtual CPU to physical CPU allocation ratio. This option is used to influence the hosts selected by the Placement API by configuring the allocation ratio for ``VCPU`` inventory. .. note:: This option does not affect ``PCPU`` inventory, which cannot be overcommitted. .. note:: If this option is set to something *other than* ``None`` or ``0.0``, the allocation ratio will be overwritten by the value of this option, otherwise, the allocation ratio will not change. Once set to a non-default value, it is not possible to "unset" the config to get back to the default behavior. If you want to reset back to the initial value, explicitly specify it to the value of ``initial_cpu_allocation_ratio``. Possible values: * Any valid positive integer or float value Related options: * ``initial_cpu_allocation_ratio`` """), cfg.FloatOpt('ram_allocation_ratio', default=None, min=0.0, help=""" Virtual RAM to physical RAM allocation ratio. This option is used to influence the hosts selected by the Placement API by configuring the allocation ratio for ``MEMORY_MB`` inventory. .. note:: If this option is set to something *other than* ``None`` or ``0.0``, the allocation ratio will be overwritten by the value of this option, otherwise, the allocation ratio will not change. Once set to a non-default value, it is not possible to "unset" the config to get back to the default behavior. If you want to reset back to the initial value, explicitly specify it to the value of ``initial_ram_allocation_ratio``. Possible values: * Any valid positive integer or float value Related options: * ``initial_ram_allocation_ratio`` """), cfg.FloatOpt('disk_allocation_ratio', default=None, min=0.0, help=""" Virtual disk to physical disk allocation ratio. This option is used to influence the hosts selected by the Placement API by configuring the allocation ratio for ``DISK_GB`` inventory. When configured, a ratio greater than 1.0 will result in over-subscription of the available physical disk, which can be useful for more efficiently packing instances created with images that do not use the entire virtual disk, such as sparse or compressed images. It can be set to a value between 0.0 and 1.0 in order to preserve a percentage of the disk for uses other than instances. .. note:: If the value is set to ``>1``, we recommend keeping track of the free disk space, as the value approaching ``0`` may result in the incorrect functioning of instances using it at the moment. .. note:: If this option is set to something *other than* ``None`` or ``0.0``, the allocation ratio will be overwritten by the value of this option, otherwise, the allocation ratio will not change. Once set to a non-default value, it is not possible to "unset" the config to get back to the default behavior. If you want to reset back to the initial value, explicitly specify it to the value of ``initial_disk_allocation_ratio``. Possible values: * Any valid positive integer or float value Related options: * ``initial_disk_allocation_ratio`` """), cfg.FloatOpt('initial_cpu_allocation_ratio', default=4.0, min=0.0, help=""" Initial virtual CPU to physical CPU allocation ratio. This is only used when initially creating the ``computes_nodes`` table record for a given nova-compute service. See https://docs.openstack.org/nova/latest/admin/configuration/schedulers.html for more details and usage scenarios. Related options: * ``cpu_allocation_ratio`` """), cfg.FloatOpt('initial_ram_allocation_ratio', default=1.0, min=0.0, help=""" Initial virtual RAM to physical RAM allocation ratio. This is only used when initially creating the ``computes_nodes`` table record for a given nova-compute service. See https://docs.openstack.org/nova/latest/admin/configuration/schedulers.html for more details and usage scenarios. Related options: * ``ram_allocation_ratio`` """), cfg.FloatOpt('initial_disk_allocation_ratio', default=1.0, min=0.0, help=""" Initial virtual disk to physical disk allocation ratio. This is only used when initially creating the ``computes_nodes`` table record for a given nova-compute service. See https://docs.openstack.org/nova/latest/admin/configuration/schedulers.html for more details and usage scenarios. Related options: * ``disk_allocation_ratio`` """) ] compute_manager_opts = [ cfg.StrOpt('console_host', default=socket.gethostname(), sample_default="", help=""" Console proxy host to be used to connect to instances on this host. It is the publicly visible name for the console host. Possible values: * Current hostname (default) or any string representing hostname. """), cfg.StrOpt('default_access_ip_network_name', help=""" Name of the network to be used to set access IPs for instances. If there are multiple IPs to choose from, an arbitrary one will be chosen. Possible values: * None (default) * Any string representing network name. """), cfg.StrOpt('instances_path', default=paths.state_path_def('instances'), sample_default="$state_path/instances", help=""" Specifies where instances are stored on the hypervisor's disk. It can point to locally attached storage or a directory on NFS. Possible values: * $state_path/instances where state_path is a config option that specifies the top-level directory for maintaining nova's state. (default) or Any string representing directory path. Related options: * ``[workarounds]/ensure_libvirt_rbd_instance_dir_cleanup`` """), cfg.BoolOpt('instance_usage_audit', default=False, help=""" This option enables periodic compute.instance.exists notifications. Each compute node must be configured to generate system usage data. These notifications are consumed by OpenStack Telemetry service. """), cfg.IntOpt('live_migration_retry_count', default=30, min=0, advanced=True, help=""" This option controls the maximum number of attempts to plug VIFs on the destination host. Historically this could fail due to rate limiting in iptables. Operators should not need to modify this value from its default. Possible values: * Any positive integer representing retry count. """), cfg.BoolOpt('resume_guests_state_on_host_boot', default=False, help=""" This option specifies whether to start guests that were running before the host rebooted. It ensures that all of the instances on a Nova compute node resume their state each time the compute node boots or restarts. """), cfg.IntOpt('network_allocate_retries', default=0, min=0, help=""" Number of times to retry network allocation. It is required to attempt network allocation retries if the virtual interface plug fails. Possible values: * Any positive integer representing retry count. """), cfg.IntOpt('max_concurrent_builds', default=10, min=0, help=""" Limits the maximum number of instance builds to run concurrently by nova-compute. Compute service can attempt to build an infinite number of instances, if asked to do so. This limit is enforced to avoid building unlimited instance concurrently on a compute node. This value can be set per compute node. Possible Values: * 0 : treated as unlimited. * Any positive integer representing maximum concurrent builds. """), cfg.IntOpt('max_concurrent_snapshots', default=5, min=0, help=""" Maximum number of instance snapshot operations to run concurrently. This limit is enforced to prevent snapshots overwhelming the host/network/storage and causing failure. This value can be set per compute node. Possible Values: * 0 : treated as unlimited. * Any positive integer representing maximum concurrent snapshots. """), cfg.IntOpt('max_concurrent_live_migrations', default=1, min=0, help=""" Maximum number of live migrations to run concurrently. This limit is enforced to avoid outbound live migrations overwhelming the host/network and causing failures. It is not recommended that you change this unless you are very sure that doing so is safe and stable in your environment. Possible values: * 0 : treated as unlimited. * Any positive integer representing maximum number of live migrations to run concurrently. """), cfg.IntOpt('block_device_allocate_retries', default=60, min=0, help=""" The number of times to check for a volume to be "available" before attaching it during server create. When creating a server with block device mappings where ``source_type`` is one of ``blank``, ``image`` or ``snapshot`` and the ``destination_type`` is ``volume``, the ``nova-compute`` service will create a volume and then attach it to the server. Before the volume can be attached, it must be in status "available". This option controls how many times to check for the created volume to be "available" before it is attached. If the operation times out, the volume will be deleted if the block device mapping ``delete_on_termination`` value is True. It is recommended to configure the image cache in the block storage service to speed up this operation. See https://docs.openstack.org/cinder/latest/admin/blockstorage-image-volume-cache.html for details. Possible values: * 60 (default) * If value is 0, then one attempt is made. * For any value > 0, total attempts are (value + 1) Related options: * ``block_device_allocate_retries_interval`` - controls the interval between checks """), cfg.IntOpt('sync_power_state_pool_size', default=1000, help=""" Number of greenthreads available for use to sync power states. This option can be used to reduce the number of concurrent requests made to the hypervisor or system with real instance power states for performance reasons, for example, with Ironic. Possible values: * Any positive integer representing greenthreads count. """) ] compute_group_opts = [ cfg.IntOpt('consecutive_build_service_disable_threshold', default=10, help=""" Enables reporting of build failures to the scheduler. Any nonzero value will enable sending build failure statistics to the scheduler for use by the BuildFailureWeigher. Possible values: * Any positive integer enables reporting build failures. * Zero to disable reporting build failures. Related options: * [filter_scheduler]/build_failure_weight_multiplier """), cfg.IntOpt("shutdown_retry_interval", default=10, min=1, help=""" Time to wait in seconds before resending an ACPI shutdown signal to instances. The overall time to wait is set by ``shutdown_timeout``. Possible values: * Any integer greater than 0 in seconds Related options: * ``shutdown_timeout`` """), cfg.IntOpt('sharing_providers_max_uuids_per_request', default=200, min=1, help=""" Maximum number of aggregate UUIDs per API request. The default is 200. In deployments with a large number of aggregates, a 'Request-Too-Long' error may be raised by the web server or load balancer. This value allows setting the batch size to limit the query length. Possible values: * Any positive integer. """), cfg.IntOpt('resource_provider_association_refresh', default=300, min=0, mutable=True, # TODO(efried): Provide more/better explanation of what this option is # all about. Reference bug(s). Unless we're just going to remove it. help=""" Interval for updating nova-compute-side cache of the compute node resource provider's inventories, aggregates, and traits. This option specifies the number of seconds between attempts to update a provider's inventories, aggregates and traits in the local cache of the compute node. A value of zero disables cache refresh completely. The cache can be cleared manually at any time by sending SIGHUP to the compute process, causing it to be repopulated the next time the data is accessed. Possible values: * Any positive integer in seconds, or zero to disable refresh. """), cfg.StrOpt('cpu_shared_set', help=""" Mask of host CPUs that can be used for ``VCPU`` resources and offloaded emulator threads. The behavior of this option depends on the definition of the deprecated ``vcpu_pin_set`` option. * If ``vcpu_pin_set`` is not defined, ``[compute] cpu_shared_set`` will be used to provide ``VCPU`` inventory and to determine the host CPUs that unpinned instances can be scheduled to. It will also be used to determine the host CPUS that instance emulator threads should be offloaded to for instances configured with the ``share`` emulator thread policy (``hw:emulator_threads_policy=share``). * If ``vcpu_pin_set`` is defined, ``[compute] cpu_shared_set`` will only be used to determine the host CPUs that instance emulator threads should be offloaded to for instances configured with the ``share`` emulator thread policy (``hw:emulator_threads_policy=share``). ``vcpu_pin_set`` will be used to provide ``VCPU`` inventory and to determine the host CPUs that both pinned and unpinned instances can be scheduled to. This behavior will be simplified in a future release when ``vcpu_pin_set`` is removed. Possible values: * A comma-separated list of physical CPU numbers that instance VCPUs can be allocated from. Each element should be either a single CPU number, a range of CPU numbers, or a caret followed by a CPU number to be excluded from a previous range. For example:: cpu_shared_set = "4-12,^8,15" Related options: * ``[compute] cpu_dedicated_set``: This is the counterpart option for defining where ``PCPU`` resources should be allocated from. * ``vcpu_pin_set``: A legacy option whose definition may change the behavior of this option. """), cfg.StrOpt('cpu_dedicated_set', help=""" Mask of host CPUs that can be used for ``PCPU`` resources. The behavior of this option affects the behavior of the deprecated ``vcpu_pin_set`` option. * If this option is defined, defining ``vcpu_pin_set`` will result in an error. * If this option is not defined, ``vcpu_pin_set`` will be used to determine inventory for ``VCPU`` resources and to limit the host CPUs that both pinned and unpinned instances can be scheduled to. This behavior will be simplified in a future release when ``vcpu_pin_set`` is removed. Possible values: * A comma-separated list of physical CPU numbers that instance VCPUs can be allocated from. Each element should be either a single CPU number, a range of CPU numbers, or a caret followed by a CPU number to be excluded from a previous range. For example:: cpu_dedicated_set = "4-12,^8,15" Related options: * ``[compute] cpu_shared_set``: This is the counterpart option for defining where ``VCPU`` resources should be allocated from. * ``vcpu_pin_set``: A legacy option that this option partially replaces. """), cfg.BoolOpt('live_migration_wait_for_vif_plug', default=True, help=""" Determine if the source compute host should wait for a ``network-vif-plugged`` event from the (neutron) networking service before starting the actual transfer of the guest to the destination compute host. Note that this option is read on the destination host of a live migration. If you set this option the same on all of your compute hosts, which you should do if you use the same networking backend universally, you do not have to worry about this. Before starting the transfer of the guest, some setup occurs on the destination compute host, including plugging virtual interfaces. Depending on the networking backend **on the destination host**, a ``network-vif-plugged`` event may be triggered and then received on the source compute host and the source compute can wait for that event to ensure networking is set up on the destination host before starting the guest transfer in the hypervisor. .. note:: The compute service cannot reliably determine which types of virtual interfaces (``port.binding:vif_type``) will send ``network-vif-plugged`` events without an accompanying port ``binding:host_id`` change. Open vSwitch and linuxbridge should be OK, but OpenDaylight is at least one known backend that will not currently work in this case, see bug https://launchpad.net/bugs/1755890 for more details. Possible values: * True: wait for ``network-vif-plugged`` events before starting guest transfer * False: do not wait for ``network-vif-plugged`` events before starting guest transfer (this is the legacy behavior) Related options: * [DEFAULT]/vif_plugging_is_fatal: if ``live_migration_wait_for_vif_plug`` is True and ``vif_plugging_timeout`` is greater than 0, and a timeout is reached, the live migration process will fail with an error but the guest transfer will not have started to the destination host * [DEFAULT]/vif_plugging_timeout: if ``live_migration_wait_for_vif_plug`` is True, this controls the amount of time to wait before timing out and either failing if ``vif_plugging_is_fatal`` is True, or simply continuing with the live migration """), cfg.IntOpt('max_concurrent_disk_ops', default=0, min=0, help=""" Number of concurrent disk-IO-intensive operations (glance image downloads, image format conversions, etc.) that we will do in parallel. If this is set too high then response time suffers. The default value of 0 means no limit. """), cfg.IntOpt('max_disk_devices_to_attach', default=-1, min=-1, help=""" Maximum number of disk devices allowed to attach to a single server. Note that the number of disks supported by an server depends on the bus used. For example, the ``ide`` disk bus is limited to 4 attached devices. The configured maximum is enforced during server create, rebuild, evacuate, unshelve, live migrate, and attach volume. Usually, disk bus is determined automatically from the device type or disk device, and the virtualization type. However, disk bus can also be specified via a block device mapping or an image property. See the ``disk_bus`` field in :doc:`/user/block-device-mapping` for more information about specifying disk bus in a block device mapping, and see https://docs.openstack.org/glance/latest/admin/useful-image-properties.html for more information about the ``hw_disk_bus`` image property. Operators changing the ``[compute]/max_disk_devices_to_attach`` on a compute service that is hosting servers should be aware that it could cause rebuilds to fail, if the maximum is decreased lower than the number of devices already attached to servers. For example, if server A has 26 devices attached and an operators changes ``[compute]/max_disk_devices_to_attach`` to 20, a request to rebuild server A will fail and go into ERROR state because 26 devices are already attached and exceed the new configured maximum of 20. Operators setting ``[compute]/max_disk_devices_to_attach`` should also be aware that during a cold migration, the configured maximum is only enforced in-place and the destination is not checked before the move. This means if an operator has set a maximum of 26 on compute host A and a maximum of 20 on compute host B, a cold migration of a server with 26 attached devices from compute host A to compute host B will succeed. Then, once the server is on compute host B, a subsequent request to rebuild the server will fail and go into ERROR state because 26 devices are already attached and exceed the configured maximum of 20 on compute host B. The configured maximum is not enforced on shelved offloaded servers, as they have no compute host. .. warning:: If this option is set to 0, the ``nova-compute`` service will fail to start, as 0 disk devices is an invalid configuration that would prevent instances from being able to boot. Possible values: * -1 means unlimited * Any integer >= 1 represents the maximum allowed. A value of 0 will cause the ``nova-compute`` service to fail to start, as 0 disk devices is an invalid configuration that would prevent instances from being able to boot. """), cfg.StrOpt('provider_config_location', default='/etc/nova/provider_config/', help=""" Location of YAML files containing resource provider configuration data. These files allow the operator to specify additional custom inventory and traits to assign to one or more resource providers. Additional documentation is available here: https://docs.openstack.org/nova/latest/admin/managing-resource-providers.html """), cfg.ListOpt('image_type_exclude_list', default=[], help=""" A list of image formats that should not be advertised as supported by this compute node. In some situations, it may be desirable to have a compute node refuse to support an expensive or complex image format. This factors into the decisions made by the scheduler about which compute node to select when booted with a given image. Possible values: * Any glance image ``disk_format`` name (i.e. ``raw``, ``qcow2``, etc) Related options: * ``[scheduler]query_placement_for_image_type_support`` - enables filtering computes based on supported image types, which is required to be enabled for this to take effect. """), cfg.ListOpt('vmdk_allowed_types', default=['streamOptimized', 'monolithicSparse'], help=""" A list of strings describing allowed VMDK "create-type" subformats that will be allowed. This is recommended to only include single-file-with-sparse-header variants to avoid potential host file exposure due to processing named extents. If this list is empty, then no form of VMDK image will be allowed. """), cfg.BoolOpt('packing_host_numa_cells_allocation_strategy', default=False, help=""" This option controls allocation strategy used to choose NUMA cells on host for placing VM's NUMA cells (for VMs with defined numa topology). By default host's NUMA cell with more resources consumed will be chosen last for placing attempt. When the packing_host_numa_cells_allocation_strategy variable is set to ``False``, host's NUMA cell with more resources available will be used. When set to ``True`` cells with some usage will be packed with VM's cell until it will be completely exhausted, before a new free host's cell will be used. Possible values: * ``True``: Packing VM's NUMA cell on most used host NUMA cell. * ``False``: Spreading VM's NUMA cell on host's NUMA cells with more resources available. """), ] interval_opts = [ cfg.IntOpt('sync_power_state_interval', default=600, help=""" Interval to sync power states between the database and the hypervisor. The interval that Nova checks the actual virtual machine power state and the power state that Nova has in its database. If a user powers down their VM, Nova updates the API to report the VM has been powered down. Should something turn on the VM unexpectedly, Nova will turn the VM back off to keep the system in the expected state. Possible values: * 0: Will run at the default periodic interval. * Any value < 0: Disables the option. * Any positive integer in seconds. Related options: * If ``handle_virt_lifecycle_events`` in the ``workarounds`` group is false and this option is negative, then instances that get out of sync between the hypervisor and the Nova database will have to be synchronized manually. """), cfg.IntOpt('heal_instance_info_cache_interval', default=-1, help=""" Interval between instance network information cache updates. Number of seconds after which each compute node runs the task of querying Neutron for all of its instances networking information, then updates the Nova db with that information. Nova will not update it's cache periodically if this option is set to <= 0. Nova will still react to network-changed external events to update its cache. Each in tree neutron backend is sending network-changed external events to update nova's view. So in these deployment scenarios this periodic is safe to be turned off. Out of tree neutron backends might not send this event and if the cache is not up to date, then the metadata service and nova-api endpoints will be proxying incorrect network data about the instance. So for these backends it is not recommended to turn the periodic off. Possible values: * Any positive integer in seconds. * Any value <=0 will disable the sync. """), cfg.IntOpt('reclaim_instance_interval', default=0, help=""" Interval for reclaiming deleted instances. A value greater than 0 will enable SOFT_DELETE of instances. This option decides whether the server to be deleted will be put into the SOFT_DELETED state. If this value is greater than 0, the deleted server will not be deleted immediately, instead it will be put into a queue until it's too old (deleted time greater than the value of reclaim_instance_interval). The server can be recovered from the delete queue by using the restore action. If the deleted server remains longer than the value of reclaim_instance_interval, it will be deleted by a periodic task in the compute service automatically. Note that this option is read from both the API and compute nodes, and must be set globally otherwise servers could be put into a soft deleted state in the API and never actually reclaimed (deleted) on the compute node. .. note:: When using this option, you should also configure the ``[cinder]`` auth options, e.g. ``auth_type``, ``auth_url``, ``username``, etc. Since the reclaim happens in a periodic task, there is no user token to cleanup volumes attached to any SOFT_DELETED servers so nova must be configured with administrator role access to cleanup those resources in cinder. Possible values: * Any positive integer(in seconds) greater than 0 will enable this option. * Any value <=0 will disable the option. Related options: * [cinder] auth options for cleaning up volumes attached to servers during the reclaim process """), cfg.IntOpt('volume_usage_poll_interval', default=0, help=""" Interval for gathering volume usages. This option updates the volume usage cache for every volume_usage_poll_interval number of seconds. Possible values: * Any positive integer(in seconds) greater than 0 will enable this option. * Any value <=0 will disable the option. """), cfg.IntOpt('shelved_poll_interval', default=3600, help=""" Interval for polling shelved instances to offload. The periodic task runs for every shelved_poll_interval number of seconds and checks if there are any shelved instances. If it finds a shelved instance, based on the 'shelved_offload_time' config value it offloads the shelved instances. Check 'shelved_offload_time' config option description for details. Possible values: * Any value <= 0: Disables the option. * Any positive integer in seconds. Related options: * ``shelved_offload_time`` """), cfg.IntOpt('shelved_offload_time', default=0, help=""" Time before a shelved instance is eligible for removal from a host. By default this option is set to 0 and the shelved instance will be removed from the hypervisor immediately after shelve operation. Otherwise, the instance will be kept for the value of shelved_offload_time(in seconds) so that during the time period the unshelve action will be faster, then the periodic task will remove the instance from hypervisor after shelved_offload_time passes. Possible values: * 0: Instance will be immediately offloaded after being shelved. * Any value < 0: An instance will never offload. * Any positive integer in seconds: The instance will exist for the specified number of seconds before being offloaded. """), # NOTE(melwitt): We're also using this option as the interval for cleaning # up expired console authorizations from the database. It's related to the # delete_instance_interval in that it's another task for cleaning up # resources related to an instance. cfg.IntOpt('instance_delete_interval', default=300, help=""" Interval for retrying failed instance file deletes. This option depends on 'maximum_instance_delete_attempts'. This option specifies how often to retry deletes whereas 'maximum_instance_delete_attempts' specifies the maximum number of retry attempts that can be made. Possible values: * 0: Will run at the default periodic interval. * Any value < 0: Disables the option. * Any positive integer in seconds. Related options: * ``maximum_instance_delete_attempts`` from instance_cleaning_opts group. """), cfg.IntOpt('block_device_allocate_retries_interval', default=3, min=0, help=""" Interval (in seconds) between block device allocation retries on failures. This option allows the user to specify the time interval between consecutive retries. The ``block_device_allocate_retries`` option specifies the maximum number of retries. Possible values: * 0: Disables the option. * Any positive integer in seconds enables the option. Related options: * ``block_device_allocate_retries`` - controls the number of retries """), cfg.IntOpt('scheduler_instance_sync_interval', default=120, help=""" Interval between sending the scheduler a list of current instance UUIDs to verify that its view of instances is in sync with nova. If the CONF option 'scheduler_tracks_instance_changes' is False, the sync calls will not be made. So, changing this option will have no effect. If the out of sync situations are not very common, this interval can be increased to lower the number of RPC messages being sent. Likewise, if sync issues turn out to be a problem, the interval can be lowered to check more frequently. Possible values: * 0: Will run at the default periodic interval. * Any value < 0: Disables the option. * Any positive integer in seconds. Related options: * This option has no impact if ``scheduler_tracks_instance_changes`` is set to False. """), cfg.IntOpt('update_resources_interval', default=0, help=""" Interval for updating compute resources. This option specifies how often the update_available_resource periodic task should run. A number less than 0 means to disable the task completely. Leaving this at the default of 0 will cause this to run at the default periodic interval. Setting it to any positive value will cause it to run at approximately that number of seconds. Possible values: * 0: Will run at the default periodic interval. * Any value < 0: Disables the option. * Any positive integer in seconds. """) ] timeout_opts = [ cfg.IntOpt("reboot_timeout", default=0, min=0, help=""" Time interval after which an instance is hard rebooted automatically. When doing a soft reboot, it is possible that a guest kernel is completely hung in a way that causes the soft reboot task to not ever finish. Setting this option to a time period in seconds will automatically hard reboot an instance if it has been stuck in a rebooting state longer than N seconds. Possible values: * 0: Disables the option (default). * Any positive integer in seconds: Enables the option. """), cfg.IntOpt("instance_build_timeout", default=0, min=0, help=""" Maximum time in seconds that an instance can take to build. If this timer expires, instance status will be changed to ERROR. Enabling this option will make sure an instance will not be stuck in BUILD state for a longer period. Possible values: * 0: Disables the option (default) * Any positive integer in seconds: Enables the option. """), cfg.IntOpt("rescue_timeout", default=0, min=0, help=""" Interval to wait before un-rescuing an instance stuck in RESCUE. Possible values: * 0: Disables the option (default) * Any positive integer in seconds: Enables the option. """), cfg.IntOpt("resize_confirm_window", default=0, min=0, help=""" Automatically confirm resizes after N seconds. Resize functionality will save the existing server before resizing. After the resize completes, user is requested to confirm the resize. The user has the opportunity to either confirm or revert all changes. Confirm resize removes the original server and changes server status from resized to active. Setting this option to a time period (in seconds) will automatically confirm the resize if the server is in resized state longer than that time. Possible values: * 0: Disables the option (default) * Any positive integer in seconds: Enables the option. """), cfg.IntOpt("shutdown_timeout", default=60, min=0, help=""" Total time to wait in seconds for an instance to perform a clean shutdown. It determines the overall period (in seconds) a VM is allowed to perform a clean shutdown. While performing stop, rescue and shelve, rebuild operations, configuring this option gives the VM a chance to perform a controlled shutdown before the instance is powered off. The default timeout is 60 seconds. A value of 0 (zero) means the guest will be powered off immediately with no opportunity for guest OS clean-up. The timeout value can be overridden on a per image basis by means of os_shutdown_timeout that is an image metadata setting allowing different types of operating systems to specify how much time they need to shut down cleanly. Possible values: * A positive integer or 0 (default value is 60). """) ] running_deleted_opts = [ cfg.StrOpt("running_deleted_instance_action", default="reap", choices=[ ('reap', 'Powers down the instances and deletes them'), ('log', 'Logs warning message about deletion of the resource'), ('shutdown', 'Powers down instances and marks them as ' 'non-bootable which can be later used for debugging/analysis'), ('noop', 'Takes no action'), ], help=""" The compute service periodically checks for instances that have been deleted in the database but remain running on the compute node. The above option enables action to be taken when such instances are identified. Related options: * ``running_deleted_instance_poll_interval`` * ``running_deleted_instance_timeout`` """), cfg.IntOpt("running_deleted_instance_poll_interval", default=1800, help=""" Time interval in seconds to wait between runs for the clean up action. If set to 0, above check will be disabled. If "running_deleted_instance _action" is set to "log" or "reap", a value greater than 0 must be set. Possible values: * Any positive integer in seconds enables the option. * 0: Disables the option. * 1800: Default value. Related options: * running_deleted_instance_action """), cfg.IntOpt("running_deleted_instance_timeout", default=0, help=""" Time interval in seconds to wait for the instances that have been marked as deleted in database to be eligible for cleanup. Possible values: * Any positive integer in seconds(default is 0). Related options: * "running_deleted_instance_action" """), ] instance_cleaning_opts = [ cfg.IntOpt('maximum_instance_delete_attempts', default=5, min=1, help=""" The number of times to attempt to reap an instance's files. This option specifies the maximum number of retry attempts that can be made. Possible values: * Any positive integer defines how many attempts are made. Related options: * ``[DEFAULT] instance_delete_interval`` can be used to disable this option. """) ] db_opts = [ cfg.StrOpt('osapi_compute_unique_server_name_scope', default='', choices=[ ('', 'An empty value means that no uniqueness check is done and ' 'duplicate names are possible'), ('project', 'The instance name check is done only for instances ' 'within the same project'), ('global', 'The instance name check is done for all instances ' 'regardless of the project'), ], help=""" Sets the scope of the check for unique instance names. The default doesn't check for unique names. If a scope for the name check is set, a launch of a new instance or an update of an existing instance with a duplicate name will result in an ''InstanceExists'' error. The uniqueness is case-insensitive. Setting this option can increase the usability for end users as they don't have to distinguish among instances with the same name by their IDs. """), cfg.BoolOpt('enable_new_services', default=True, help=""" Enable new nova-compute services on this host automatically. When a new nova-compute service starts up, it gets registered in the database as an enabled service. Sometimes it can be useful to register new compute services in disabled state and then enabled them at a later point in time. This option only sets this behavior for nova-compute services, it does not auto-disable other services like nova-conductor, nova-scheduler, or nova-osapi_compute. Possible values: * ``True``: Each new compute service is enabled as soon as it registers itself. * ``False``: Compute services must be enabled via an os-services REST API call or with the CLI with ``nova service-enable ``, otherwise they are not ready to use. """), cfg.StrOpt('instance_name_template', default='instance-%08x', help=""" Template string to be used to generate instance names. This template controls the creation of the database name of an instance. This is *not* the display name you enter when creating an instance (via Horizon or CLI). For a new deployment it is advisable to change the default value (which uses the database autoincrement) to another value which makes use of the attributes of an instance, like ``instance-%(uuid)s``. If you already have instances in your deployment when you change this, your deployment will break. Possible values: * A string which either uses the instance database ID (like the default) * A string with a list of named database columns, for example ``%(id)d`` or ``%(uuid)s`` or ``%(hostname)s``. """), ] ALL_OPTS = (compute_opts + resource_tracker_opts + allocation_ratio_opts + compute_manager_opts + interval_opts + timeout_opts + running_deleted_opts + instance_cleaning_opts + db_opts) def register_opts(conf): conf.register_opts(ALL_OPTS) conf.register_group(compute_group) conf.register_opts(compute_group_opts, group=compute_group) def list_opts(): return {'DEFAULT': ALL_OPTS, 'compute': compute_group_opts} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conf/conductor.py0000664000175000017500000000324600000000000017031 0ustar00zuulzuul00000000000000# Copyright (c) 2010 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg conductor_group = cfg.OptGroup( 'conductor', title='Conductor Options', help=""" Options under this group are used to define Conductor's communication, which manager should be act as a proxy between computes and database, and finally, how many worker processes will be used. """, ) ALL_OPTS = [ cfg.IntOpt( 'workers', help=""" Number of workers for OpenStack Conductor service. The default will be the number of CPUs available. """), ] migrate_opts = [ cfg.IntOpt( 'migrate_max_retries', default=-1, min=-1, help=""" Number of times to retry live-migration before failing. Possible values: * If == -1, try until out of hosts (default) * If == 0, only try once, no retries * Integer greater than 0 """), ] def register_opts(conf): conf.register_group(conductor_group) conf.register_opts(ALL_OPTS, group=conductor_group) conf.register_opts(migrate_opts) def list_opts(): return {"DEFAULT": migrate_opts, conductor_group: ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conf/configdrive.py0000664000175000017500000000636400000000000017334 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg config_drive_opts = [ cfg.StrOpt('config_drive_format', default='iso9660', deprecated_for_removal=True, deprecated_since='19.0.0', deprecated_reason=""" This option was originally added as a workaround for bug in libvirt, #1246201, that was resolved in libvirt v1.2.17. As a result, this option is no longer necessary or useful. """, choices=[ ('iso9660', 'A file system image standard that is widely ' 'supported across operating systems.'), ('vfat', 'Provided for legacy reasons and to enable live ' 'migration with the libvirt driver and non-shared storage')], help=""" Config drive format. Config drive format that will contain metadata attached to the instance when it boots. Related options: * This option is meaningful when one of the following alternatives occur: 1. ``force_config_drive`` option set to ``true`` 2. the REST API call to create the instance contains an enable flag for config drive option 3. the image used to create the instance requires a config drive, this is defined by ``img_config_drive`` property for that image. """), cfg.BoolOpt('force_config_drive', default=False, help=""" Force injection to take place on a config drive When this option is set to true config drive functionality will be forced enabled by default, otherwise users can still enable config drives via the REST API or image metadata properties. Launched instances are not affected by this option. Possible values: * True: Force to use of config drive regardless the user's input in the REST API call. * False: Do not force use of config drive. Config drives can still be enabled via the REST API or image metadata properties. Related options: * Use the 'mkisofs_cmd' flag to set the path where you install the genisoimage program. If genisoimage is in same path as the nova-compute service, you do not need to set this flag. """), cfg.StrOpt('mkisofs_cmd', default='genisoimage', help=""" Name or path of the tool used for ISO image creation. Use the ``mkisofs_cmd`` flag to set the path where you install the ``genisoimage`` program. If ``genisoimage`` is on the system path, you do not need to change the default value. Possible values: * Name of the ISO image creator program, in case it is in the same directory as the nova-compute service * Path to ISO image creator program Related options: * This option is meaningful when config drives are enabled. """), ] def register_opts(conf): conf.register_opts(config_drive_opts) def list_opts(): return {"DEFAULT": config_drive_opts} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conf/console.py0000664000175000017500000000551400000000000016473 0ustar00zuulzuul00000000000000# Copyright 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg console_group = cfg.OptGroup('console', title='Console Options', help=""" Options under this group allow to tune the configuration of the console proxy service. Note: in configuration of every compute is a ``console_host`` option, which allows to select the console proxy service to connect to. """) console_opts = [ cfg.ListOpt('allowed_origins', default=[], deprecated_group='DEFAULT', deprecated_name='console_allowed_origins', help=""" Adds list of allowed origins to the console websocket proxy to allow connections from other origin hostnames. Websocket proxy matches the host header with the origin header to prevent cross-site requests. This list specifies if any there are values other than host are allowed in the origin header. Possible values: * A list where each element is an allowed origin hostnames, else an empty list """), cfg.StrOpt('ssl_ciphers', help=""" OpenSSL cipher preference string that specifies what ciphers to allow for TLS connections from clients. For example:: ssl_ciphers = "kEECDH+aECDSA+AES:kEECDH+AES+aRSA:kEDH+aRSA+AES" See the man page for the OpenSSL `ciphers` command for details of the cipher preference string format and allowed values:: https://docs.openssl.org/master/man1/openssl-ciphers/#cipher-list-format Related options: * [DEFAULT] cert * [DEFAULT] key """), cfg.StrOpt('ssl_minimum_version', default='default', choices=[ # These values must align with SSL_OPTIONS in # websockify/websocketproxy.py ('default', 'Use the underlying system OpenSSL defaults'), ('tlsv1_1', 'Require TLS v1.1 or greater for TLS connections'), ('tlsv1_2', 'Require TLS v1.2 or greater for TLS connections'), ('tlsv1_3', 'Require TLS v1.3 or greater for TLS connections'), ], help=""" Minimum allowed SSL/TLS protocol version. Related options: * [DEFAULT] cert * [DEFAULT] key """), ] def register_opts(conf): conf.register_group(console_group) conf.register_opts(console_opts, group=console_group) def list_opts(): return { console_group: console_opts, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conf/consoleauth.py0000664000175000017500000000343100000000000017351 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Intel, Inc. # Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg consoleauth_group = cfg.OptGroup( name='consoleauth', title='Console auth options') consoleauth_opts = [ cfg.IntOpt('token_ttl', default=600, min=0, deprecated_name='console_token_ttl', deprecated_group='DEFAULT', help=""" The lifetime of a console auth token (in seconds). A console auth token is used in authorizing console access for a user. Once the auth token time to live count has elapsed, the token is considered expired. Expired tokens are then deleted. """), cfg.BoolOpt( 'enforce_session_timeout', default=False, help=""" Enable or disable enforce session timeout for VM console. This allows operators to enforce a console session timeout. When set to True, Nova will automatically close the console session at the server end once token_ttl expires, providing enhanced control over console session duration. """), ] def register_opts(conf): conf.register_group(consoleauth_group) conf.register_opts(consoleauth_opts, group=consoleauth_group) def list_opts(): return {consoleauth_group: consoleauth_opts} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conf/cyborg.py0000664000175000017500000000245000000000000016312 0ustar00zuulzuul00000000000000# Copyright 2019 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystoneauth1 import loading as ks_loading from oslo_config import cfg from nova.conf import utils as confutils DEFAULT_SERVICE_TYPE = 'accelerator' CYBORG_GROUP = 'cyborg' cyborg_group = cfg.OptGroup( CYBORG_GROUP, title='Cyborg Options', help=""" Configuration options for Cyborg (accelerator as a service). """) def register_opts(conf): conf.register_group(cyborg_group) confutils.register_ksa_opts(conf, cyborg_group, DEFAULT_SERVICE_TYPE, include_auth=False) def list_opts(): return { cyborg_group: ( ks_loading.get_session_conf_options() + confutils.get_ksa_adapter_opts(DEFAULT_SERVICE_TYPE)) } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conf/database.py0000664000175000017500000000443700000000000016600 0ustar00zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_config import cfg from oslo_db import options as oslo_db_opts main_db_group = cfg.OptGroup( name='database', title='Main Database Options', help=""" The *Nova Database* is the primary database which is used for information local to a *cell*. This group should **not** be configured for the ``nova-compute`` service. """) api_db_group = cfg.OptGroup( name='api_database', title='API Database Options', help=""" The *Nova API Database* is a separate database which is used for information which is used across *cells*. This database is mandatory since the Mitaka release (13.0.0). This group should **not** be configured for the ``nova-compute`` service. """) # NOTE(stephenfin): We cannot simply use 'oslo_db_options.database_opts' # directly. If we reuse a db config option for two different groups # ("api_database" and "database") and deprecate or rename a config option in # one of these groups, "oslo.config" cannot correctly determine which one to # update. That's why we copy these. main_db_opts = copy.deepcopy(oslo_db_opts.database_opts) api_db_opts = copy.deepcopy(oslo_db_opts.database_opts) # We don't support the experimental use of database reconnect on connection # lost, so remove the config option that would suggest we do main_db_opts = [opt for opt in main_db_opts if opt.name != 'use_db_reconnect'] api_db_opts = [opt for opt in api_db_opts if opt.name != 'use_db_reconnect'] def register_opts(conf): conf.register_opts(main_db_opts, group=main_db_group) conf.register_opts(api_db_opts, group=api_db_group) def list_opts(): return { main_db_group: main_db_opts, api_db_group: api_db_opts, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conf/devices.py0000664000175000017500000001041000000000000016442 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg devices_group = cfg.OptGroup( name='devices', title='physical or virtual device options') mdev_opts = [ cfg.ListOpt('enabled_mdev_types', default=[], deprecated_name='enabled_vgpu_types', help=""" The mdev types enabled in the compute node. Some hardware (e.g. NVIDIA GRID K1) support different mdev types. User can use this option to specify a list of enabled mdev types that may be assigned to a guest instance. If more than one single mdev type is provided, then for each *mdev type* an additional section, ``[mdev_$(MDEV_TYPE)]``, must be added to the configuration file. Each section then can be configured with a single configuration option, ``device_addresses``, which should be a list of PCI addresses corresponding to the physical GPU(s) or mdev-capable hardware to assign to this type. If `device_addresses` is not provided, then the related GPU type will be the default for all the found GPUs that aren't used by other types. If one or more sections are missing (meaning that a specific type is not wanted to use for at least one physical device), then Nova will only use the first type that was provided by ``[devices]/enabled_mdev_types``. If two or more sections are not set with ``device_addresses`` values, then only the first one will be used for defaulting all the non-defined GPUs to use this type. If the same PCI address is provided for two different types, nova-compute will return an InvalidLibvirtMdevConfig exception at restart. As an interim period, old configuration groups named ``[vgpu_$(MDEV_TYPE)]`` will be accepted. A valid configuration could then be:: [devices] enabled_mdev_types = nvidia-35, nvidia-36 [mdev_nvidia-35] device_addresses = 0000:84:00.0,0000:85:00.0 [vgpu_nvidia-36] device_addresses = 0000:86:00.0 Another valid configuration could be:: [devices] enabled_mdev_types = nvidia-35, nvidia-36 [mdev_nvidia-35] [mdev_nvidia-36] device_addresses = 0000:86:00.0 """) ] def register_opts(conf): conf.register_group(devices_group) conf.register_opts(mdev_opts, group=devices_group) def register_dynamic_opts(conf): """Register dynamically-generated options and groups. This must be called by the service that wishes to use the options **after** the initial configuration has been loaded. """ for mdev_type in conf.devices.enabled_mdev_types: # Register the '[mdev_$(MDEV_TYPE)]/device_addresses' opts, implicitly # registering the '[mdev_$(MDEV_TYPE)]' groups in the process opt = cfg.ListOpt('device_addresses', default=[], item_type=cfg.types.String(), deprecated_group='vgpu_%s' % mdev_type) conf.register_opt(opt, group='mdev_%s' % mdev_type) # Register the '[mdev_$(MDEV_TYPE)]/mdev_class' opts class_opt = cfg.StrOpt( 'mdev_class', default='VGPU', regex=r'^(VGPU|CUSTOM_[A-Z0-9_]+)$', max_length=255, help='Class of mediated device to manage used to differentiate ' 'between device types. The name has to be prefixed by ' 'CUSTOM_ if it is not VGPU.') conf.register_opt(class_opt, group='mdev_%s' % mdev_type) # Register the '[mdev_$(MDEV_TYPE)]/max_instances' opts max_inst_opt = cfg.IntOpt( 'max_instances', default=None, min=1, help='Number of mediated devices that type can create. ' 'If not set, it implies that we use the maximum allowed by ' 'the type.') conf.register_opt(max_inst_opt, group='mdev_%s' % mdev_type) def list_opts(): return {devices_group: mdev_opts} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conf/ephemeral_storage.py0000664000175000017500000000436300000000000020520 0ustar00zuulzuul00000000000000# Copyright 2015 Huawei Technology corp. # Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg ephemeral_storage_encryption_group = cfg.OptGroup( name='ephemeral_storage_encryption', title='Ephemeral storage encryption options') ephemeral_storage_encryption_opts = [ cfg.BoolOpt('enabled', default=False, help=""" Enables/disables LVM ephemeral storage encryption. """), cfg.StrOpt('cipher', default='aes-xts-plain64', help=""" Cipher-mode string to be used. The cipher and mode to be used to encrypt ephemeral storage. The set of cipher-mode combinations available depends on kernel support. According to the dm-crypt documentation, the cipher is expected to be in the format: "--". Possible values: * Any crypto option listed in ``/proc/crypto``. """), cfg.IntOpt('key_size', default=512, min=1, help=""" Encryption key length in bits. The bit length of the encryption key to be used to encrypt ephemeral storage. In XTS mode only half of the bits are used for encryption key. """), cfg.StrOpt( 'default_format', default='luks', choices=('luks',), help=""" Default ephemeral encryption format. Only 'luks' is supported at this time. Note that this does not apply to LVM ephemeral storage encryption. """), ] def register_opts(conf): conf.register_group(ephemeral_storage_encryption_group) conf.register_opts(ephemeral_storage_encryption_opts, group=ephemeral_storage_encryption_group) def list_opts(): return {ephemeral_storage_encryption_group: ephemeral_storage_encryption_opts} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conf/glance.py0000664000175000017500000001721000000000000016256 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystoneauth1 import loading as ks_loading from oslo_config import cfg from nova.conf import utils as confutils DEFAULT_SERVICE_TYPE = 'image' glance_group = cfg.OptGroup( 'glance', title='Glance Options', help='Configuration options for the Image service') glance_opts = [ # NOTE(sdague/efried): there is intentionally no default here. This # requires configuration if ksa adapter config is not used. cfg.ListOpt('api_servers', deprecated_for_removal=True, deprecated_since='21.0.0', deprecated_reason=""" Support for image service configuration via standard keystoneauth1 Adapter options was added in the 17.0.0 Queens release. The api_servers option was retained temporarily to allow consumers time to cut over to a real load balancing solution. """, help=""" List of glance api servers endpoints available to nova. https is used for ssl-based glance api servers. NOTE: The preferred mechanism for endpoint discovery is via keystoneauth1 loading options. Only use api_servers if you need multiple endpoints and are unable to use a load balancer for some reason. Possible values: * A list of any fully qualified url of the form "scheme://hostname:port[/path]" (i.e. "http://10.0.1.0:9292" or "https://my.glance.server/image"). """), cfg.IntOpt('num_retries', default=3, min=0, help=""" Enable glance operation retries. Specifies the number of retries when uploading / downloading an image to / from glance. 0 means no retries. """), cfg.BoolOpt('verify_glance_signatures', default=False, help=""" Enable image signature verification. nova uses the image signature metadata from glance and verifies the signature of a signed image while downloading that image. If the image signature cannot be verified or if the image signature metadata is either incomplete or unavailable, then nova will not boot the image and instead will place the instance into an error state. This provides end users with stronger assurances of the integrity of the image data they are using to create servers. Related options: * The options in the `key_manager` group, as the key_manager is used for the signature validation. * Both enable_certificate_validation and default_trusted_certificate_ids below depend on this option being enabled. """), cfg.BoolOpt('enable_certificate_validation', default=False, deprecated_for_removal=True, deprecated_since='16.0.0', deprecated_reason=""" This option is intended to ease the transition for deployments leveraging image signature verification. The intended state long-term is for signature verification and certificate validation to always happen together. """, help=""" Enable certificate validation for image signature verification. During image signature verification nova will first verify the validity of the image's signing certificate using the set of trusted certificates associated with the instance. If certificate validation fails, signature verification will not be performed and the instance will be placed into an error state. This provides end users with stronger assurances that the image data is unmodified and trustworthy. If left disabled, image signature verification can still occur but the end user will not have any assurance that the signing certificate used to generate the image signature is still trustworthy. Related options: * This option only takes effect if verify_glance_signatures is enabled. * The value of default_trusted_certificate_ids may be used when this option is enabled. """), cfg.ListOpt('default_trusted_certificate_ids', default=[], help=""" List of certificate IDs for certificates that should be trusted. May be used as a default list of trusted certificate IDs for certificate validation. The value of this option will be ignored if the user provides a list of trusted certificate IDs with an instance API request. The value of this option will be persisted with the instance data if signature verification and certificate validation are enabled and if the user did not provide an alternative list. If left empty when certificate validation is enabled the user must provide a list of trusted certificate IDs otherwise certificate validation will fail. Related options: * The value of this option may be used if both verify_glance_signatures and enable_certificate_validation are enabled. """), cfg.BoolOpt('enable_rbd_download', default=False, help=""" Enable Glance image downloads directly via RBD. Allow non-rbd computes using local storage to download and cache images from Ceph via rbd rather than the Glance API via http. .. note:: This option should only be enabled when the compute itself is not also using Ceph as a backing store. For example with the libvirt driver it should only be enabled when :oslo.config:option:`libvirt.images_type` is not set to ``rbd``. Related options: * :oslo.config:option:`glance.rbd_user` * :oslo.config:option:`glance.rbd_connect_timeout` * :oslo.config:option:`glance.rbd_pool` * :oslo.config:option:`glance.rbd_ceph_conf` * :oslo.config:option:`libvirt.images_type` """), cfg.StrOpt('rbd_user', default='', help=""" The RADOS client name for accessing Glance images stored as rbd volumes. Related options: * This option is only used if :oslo.config:option:`glance.enable_rbd_download` is set to ``True``. """), cfg.IntOpt('rbd_connect_timeout', default=5, help=""" The RADOS client timeout in seconds when initially connecting to the cluster. Related options: * This option is only used if :oslo.config:option:`glance.enable_rbd_download` is set to ``True``. """), cfg.StrOpt('rbd_pool', default='', help=""" The RADOS pool in which the Glance images are stored as rbd volumes. Related options: * This option is only used if :oslo.config:option:`glance.enable_rbd_download` is set to ``True``. """), cfg.StrOpt('rbd_ceph_conf', default='', help=""" Path to the ceph configuration file to use. Related options: * This option is only used if :oslo.config:option:`glance.enable_rbd_download` is set to ``True``. """), cfg.BoolOpt('debug', default=False, help='Enable or disable debug logging with glanceclient.') ] deprecated_ksa_opts = { 'insecure': [cfg.DeprecatedOpt('api_insecure', group=glance_group.name)], 'cafile': [cfg.DeprecatedOpt('ca_file', group="ssl")], 'certfile': [cfg.DeprecatedOpt('cert_file', group="ssl")], 'keyfile': [cfg.DeprecatedOpt('key_file', group="ssl")], } def register_opts(conf): conf.register_group(glance_group) conf.register_opts(glance_opts, group=glance_group) confutils.register_ksa_opts( conf, glance_group, DEFAULT_SERVICE_TYPE, include_auth=False, deprecated_opts=deprecated_ksa_opts) def list_opts(): return {glance_group: ( glance_opts + ks_loading.get_session_conf_options() + confutils.get_ksa_adapter_opts(DEFAULT_SERVICE_TYPE, deprecated_opts=deprecated_ksa_opts)) } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conf/guestfs.py0000664000175000017500000000370200000000000016506 0ustar00zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg guestfs_group = cfg.OptGroup('guestfs', title='Guestfs Options', help=""" libguestfs is a set of tools for accessing and modifying virtual machine (VM) disk images. You can use this for viewing and editing files inside guests, scripting changes to VMs, monitoring disk used/free statistics, creating guests, P2V, V2V, performing backups, cloning VMs, building VMs, formatting disks and resizing disks. """) enable_guestfs_debug_opts = [ cfg.BoolOpt('debug', default=False, help=""" Enable/disables guestfs logging. This configures guestfs to debug messages and push them to OpenStack logging system. When set to True, it traces libguestfs API calls and enable verbose debug messages. In order to use the above feature, "libguestfs" package must be installed. Related options: Since libguestfs access and modifies VM's managed by libvirt, below options should be set to give access to those VM's. * ``libvirt.inject_key`` * ``libvirt.inject_partition`` * ``libvirt.inject_password`` """) ] def register_opts(conf): conf.register_group(guestfs_group) conf.register_opts(enable_guestfs_debug_opts, group=guestfs_group) def list_opts(): return {guestfs_group: enable_guestfs_debug_opts} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conf/imagecache.py0000664000175000017500000000731100000000000017074 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg imagecache_group = cfg.OptGroup( 'image_cache', title='Image Cache Options', help=""" A collection of options specific to image caching. """) imagecache_opts = [ cfg.IntOpt('manager_interval', default=2400, min=-1, deprecated_name='image_cache_manager_interval', deprecated_group='DEFAULT', help=""" Number of seconds to wait between runs of the image cache manager. Note that when using shared storage for the ``[DEFAULT]/instances_path`` configuration option across multiple nova-compute services, this periodic could process a large number of instances. Similarly, using a compute driver that manages a cluster (like vmwareapi.VMwareVCDriver) could result in processing a large number of instances. Therefore you may need to adjust the time interval for the anticipated load, or only run on one nova-compute service within a shared storage aggregate. Additional note, every time the image_cache_manager runs the timestamps of images in ``[DEFAULT]/instances_path`` are updated. Possible values: * 0: run at the default interval of 60 seconds (not recommended) * -1: disable * Any other value Related options: * ``[DEFAULT]/compute_driver`` * ``[DEFAULT]/instances_path`` """), cfg.StrOpt('subdirectory_name', default='_base', deprecated_name='image_cache_subdirectory_name', deprecated_group='DEFAULT', help=""" Location of cached images. This is NOT the full path - just a folder name relative to '$instances_path'. For per-compute-host cached images, set to '_base_$my_ip' """), cfg.BoolOpt('remove_unused_base_images', default=True, deprecated_group='DEFAULT', help=""" Should unused base images be removed? When there are no remaining instances on the hypervisor created from this base image or linked to it, the base image is considered unused. """), cfg.IntOpt('remove_unused_original_minimum_age_seconds', default=(24 * 3600), deprecated_group='DEFAULT', help=""" Unused unresized base images younger than this will not be removed. """), cfg.IntOpt('remove_unused_resized_minimum_age_seconds', default=3600, deprecated_group='libvirt', help=""" Unused resized base images younger than this will not be removed. """), cfg.IntOpt('precache_concurrency', default=1, min=1, help=""" Maximum number of compute hosts to trigger image precaching in parallel. When an image precache request is made, compute nodes will be contacted to initiate the download. This number constrains the number of those that will happen in parallel. Higher numbers will cause more computes to work in parallel and may result in reduced time to complete the operation, but may also DDoS the image service. Lower numbers will result in more sequential operation, lower image service load, but likely longer runtime to completion. """), ] ALL_OPTS = (imagecache_opts,) def register_opts(conf): conf.register_group(imagecache_group) conf.register_opts(imagecache_opts, group=imagecache_group) def list_opts(): return {imagecache_group: imagecache_opts} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conf/ironic.py0000664000175000017500000001015300000000000016307 0ustar00zuulzuul00000000000000# Copyright 2015 Intel Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystoneauth1 import loading as ks_loading from oslo_config import cfg from nova.conf import utils as confutils DEFAULT_SERVICE_TYPE = 'baremetal' ironic_group = cfg.OptGroup( 'ironic', title='Ironic Options', help=""" Configuration options for Ironic driver (Bare Metal). If using the Ironic driver following options must be set: * auth_type * auth_url * project_name * username * password * project_domain_id or project_domain_name * user_domain_id or user_domain_name """) ironic_options = [ cfg.IntOpt( 'api_max_retries', # TODO(raj_singh): Change this default to some sensible number default=60, min=0, help=""" The number of times to retry when a request conflicts. If set to 0, only try once, no retries. Related options: * api_retry_interval """), cfg.IntOpt( 'api_retry_interval', default=2, min=0, help=""" The number of seconds to wait before retrying the request. Related options: * api_max_retries """), cfg.IntOpt( 'serial_console_state_timeout', default=10, min=0, help='Timeout (seconds) to wait for node serial console state ' 'changed. Set to 0 to disable timeout.'), cfg.StrOpt( 'conductor_group', deprecated_name='partition_key', default=None, mutable=True, max_length=255, regex=r'^[a-zA-Z0-9_.-]*$', help='Case-insensitive key to limit the set of nodes that may be ' 'managed by this service to the set of nodes in Ironic which ' 'have a matching conductor_group property. If unset, all ' 'available nodes will be eligible to be managed by this ' 'service. Note that setting this to the empty string (``""``) ' 'will match the default conductor group, and is different than ' 'leaving the option unset.'), cfg.StrOpt( 'shard', default=None, max_length=255, regex=r'^[a-zA-Z0-9_.-]+$', help='Specify which ironic shard this nova-compute will manage. ' 'This allows you to shard Ironic nodes between compute ' 'services across conductors and conductor groups. ' 'When a shard is set, the peer_list configuration is ignored. ' 'We require that there is at most one nova-compute service ' 'for each shard.'), cfg.ListOpt( 'peer_list', deprecated_for_removal=True, deprecated_since='28.0.0', deprecated_reason="""\ We do not recommend using nova-compute HA, please use passive failover of a single nova-compute service instead.""", default=[], help='List of hostnames for all nova-compute services (including ' 'this host) with this conductor_group config value. ' 'Nodes matching the conductor_group value will be distributed ' 'between all services specified here. ' 'If conductor_group is unset, this option is ignored.'), ] def register_opts(conf): conf.register_group(ironic_group) conf.register_opts(ironic_options, group=ironic_group) confutils.register_ksa_opts(conf, ironic_group, DEFAULT_SERVICE_TYPE) def list_opts(): return {ironic_group: ( ironic_options + ks_loading.get_session_conf_options() + ks_loading.get_auth_common_conf_options() + ks_loading.get_auth_plugin_conf_options('v3password') + confutils.get_ksa_adapter_opts(DEFAULT_SERVICE_TYPE)) } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conf/key_manager.py0000664000175000017500000000445100000000000017312 0ustar00zuulzuul00000000000000# Copyright 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from castellan import options as castellan_opts from oslo_config import cfg key_manager_group = cfg.OptGroup( 'key_manager', title='Key manager options') key_manager_opts = [ # TODO(raj_singh): Deprecate or move this option to The Castellan library # NOTE(kfarr): The ability to use fixed_key should be deprecated and # removed and Barbican should be tested in the gate instead cfg.StrOpt( 'fixed_key', deprecated_group='keymgr', secret=True, help=""" Fixed key returned by key manager, specified in hex. Possible values: * Empty string or a key in hex value """), ] def register_opts(conf): castellan_opts.set_defaults(conf) conf.register_group(key_manager_group) conf.register_opts(key_manager_opts, group=key_manager_group) def list_opts(): # Castellan library also has a group name key_manager. So if # we append list returned from Castellan to this list, oslo will remove # one group as duplicate and only one group (either from this file or # Castellan library) will show up. So fix is to merge options of same # group name from this file and Castellan library opts = {key_manager_group.name: key_manager_opts} for group, options in castellan_opts.list_opts(): if group not in opts.keys(): opts[group] = options else: opts[group] = opts[group] + options return opts # TODO(raj_singh): When the last option "fixed_key" is removed/moved from # this file, then comment in code below and delete the code block above. # Castellan already returned a list which can be returned # directly from list_opts() # return castellan_opts.list_opts() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conf/keystone.py0000664000175000017500000000275300000000000016674 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystoneauth1 import loading as ks_loading from oslo_config import cfg from nova.conf import utils as confutils DEFAULT_SERVICE_TYPE = 'identity' keystone_group = cfg.OptGroup( 'keystone', title='Keystone Options', help='Configuration options for the identity service') def register_opts(conf): conf.register_group(keystone_group) confutils.register_ksa_opts(conf, keystone_group.name, DEFAULT_SERVICE_TYPE, include_auth=False) def list_opts(): return { keystone_group: ( ks_loading.get_session_conf_options() + ks_loading.get_auth_common_conf_options() + ks_loading.get_auth_plugin_conf_options('password') + ks_loading.get_auth_plugin_conf_options('v2password') + ks_loading.get_auth_plugin_conf_options('v3password') + confutils.get_ksa_adapter_opts(DEFAULT_SERVICE_TYPE) ) } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conf/libvirt.py0000664000175000017500000016671100000000000016513 0ustar00zuulzuul00000000000000# needs:fix_opt_description # needs:check_deprecation_status # needs:check_opt_group_and_type # needs:fix_opt_description_indentation # needs:fix_opt_registration_consistency # Copyright 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools from oslo_config import cfg from oslo_config import types from nova.conf import paths libvirt_group = cfg.OptGroup("libvirt", title="Libvirt Options", help=""" Libvirt options allows cloud administrator to configure related libvirt hypervisor driver to be used within an OpenStack deployment. Almost all of the libvirt config options are influence by ``virt_type`` config which describes the virtualization type (or so called domain type) libvirt should use for specific features such as live migration, snapshot. """) libvirt_general_opts = [ cfg.StrOpt('rescue_image_id', help=""" The ID of the image to boot from to rescue data from a corrupted instance. If the rescue REST API operation doesn't provide an ID of an image to use, the image which is referenced by this ID is used. If this option is not set, the image from the instance is used. Possible values: * An ID of an image or nothing. If it points to an *Amazon Machine Image* (AMI), consider to set the config options ``rescue_kernel_id`` and ``rescue_ramdisk_id`` too. If nothing is set, the image of the instance is used. Related options: * ``rescue_kernel_id``: If the chosen rescue image allows the separate definition of its kernel disk, the value of this option is used, if specified. This is the case when *Amazon*'s AMI/AKI/ARI image format is used for the rescue image. * ``rescue_ramdisk_id``: If the chosen rescue image allows the separate definition of its RAM disk, the value of this option is used if, specified. This is the case when *Amazon*'s AMI/AKI/ARI image format is used for the rescue image. """), cfg.StrOpt('rescue_kernel_id', help=""" The ID of the kernel (AKI) image to use with the rescue image. If the chosen rescue image allows the separate definition of its kernel disk, the value of this option is used, if specified. This is the case when *Amazon*'s AMI/AKI/ARI image format is used for the rescue image. Possible values: * An ID of an kernel image or nothing. If nothing is specified, the kernel disk from the instance is used if it was launched with one. Related options: * ``rescue_image_id``: If that option points to an image in *Amazon*'s AMI/AKI/ARI image format, it's useful to use ``rescue_kernel_id`` too. """), cfg.StrOpt('rescue_ramdisk_id', help=""" The ID of the RAM disk (ARI) image to use with the rescue image. If the chosen rescue image allows the separate definition of its RAM disk, the value of this option is used, if specified. This is the case when *Amazon*'s AMI/AKI/ARI image format is used for the rescue image. Possible values: * An ID of a RAM disk image or nothing. If nothing is specified, the RAM disk from the instance is used if it was launched with one. Related options: * ``rescue_image_id``: If that option points to an image in *Amazon*'s AMI/AKI/ARI image format, it's useful to use ``rescue_ramdisk_id`` too. """), cfg.StrOpt('virt_type', default='kvm', choices=('kvm', 'lxc', 'qemu', 'parallels'), help=""" Describes the virtualization type (or so called domain type) libvirt should use. The choice of this type must match the underlying virtualization strategy you have chosen for this host. Related options: * ``connection_uri``: depends on this * ``disk_prefix``: depends on this * ``cpu_mode``: depends on this * ``cpu_models``: depends on this * ``tb_cache_size``: depends on this """), cfg.StrOpt('connection_uri', default='', help=""" Overrides the default libvirt URI of the chosen virtualization type. If set, Nova will use this URI to connect to libvirt. Possible values: * An URI like ``qemu:///system``. This is only necessary if the URI differs to the commonly known URIs for the chosen virtualization type. Related options: * ``virt_type``: Influences what is used as default value here. """), cfg.BoolOpt('inject_password', default=False, help=""" Allow the injection of an admin password for instance only at ``create`` and ``rebuild`` process. There is no agent needed within the image to do this. If *libguestfs* is available on the host, it will be used. Otherwise *nbd* is used. The file system of the image will be mounted and the admin password, which is provided in the REST API call will be injected as password for the root user. If no root user is available, the instance won't be launched and an error is thrown. Be aware that the injection is *not* possible when the instance gets launched from a volume. *Linux* distribution guest only. Possible values: * True: Allows the injection. * False: Disallows the injection. Any via the REST API provided admin password will be silently ignored. Related options: * ``inject_partition``: That option will decide about the discovery and usage of the file system. It also can disable the injection at all. """), cfg.BoolOpt('inject_key', default=False, help=""" Allow the injection of an SSH key at boot time. There is no agent needed within the image to do this. If *libguestfs* is available on the host, it will be used. Otherwise *nbd* is used. The file system of the image will be mounted and the SSH key, which is provided in the REST API call will be injected as SSH key for the root user and appended to the ``authorized_keys`` of that user. The SELinux context will be set if necessary. Be aware that the injection is *not* possible when the instance gets launched from a volume. This config option will enable directly modifying the instance disk and does not affect what cloud-init may do using data from config_drive option or the metadata service. *Linux* distribution guest only. Related options: * ``inject_partition``: That option will decide about the discovery and usage of the file system. It also can disable the injection at all. """), cfg.IntOpt('inject_partition', default=-2, min=-2, help=""" Determines how the file system is chosen to inject data into it. *libguestfs* is used to inject data. If libguestfs is not able to determine the root partition (because there are more or less than one root partition) or cannot mount the file system it will result in an error and the instance won't boot. Possible values: * -2 => disable the injection of data. * -1 => find the root partition with the file system to mount with libguestfs * 0 => The image is not partitioned * >0 => The number of the partition to use for the injection *Linux* distribution guest only. Related options: * ``inject_key``: If this option allows the injection of a SSH key it depends on value greater or equal to -1 for ``inject_partition``. * ``inject_password``: If this option allows the injection of an admin password it depends on value greater or equal to -1 for ``inject_partition``. * ``[guestfs]/debug`` You can enable the debug log level of libguestfs with this config option. A more verbose output will help in debugging issues. * ``virt_type``: If you use ``lxc`` as virt_type it will be treated as a single partition image """), cfg.StrOpt('live_migration_scheme', help=""" URI scheme for live migration used by the source of live migration traffic. Override the default libvirt live migration scheme (which is dependent on virt_type). If this option is set to None, nova will automatically choose a sensible default based on the hypervisor. It is not recommended that you change this unless you are very sure that hypervisor supports a particular scheme. Related options: * ``virt_type``: This option is meaningful only when ``virt_type`` is set to `kvm` or `qemu`. * ``live_migration_uri``: If ``live_migration_uri`` value is not None, the scheme used for live migration is taken from ``live_migration_uri`` instead. """), cfg.HostDomainOpt('live_migration_inbound_addr', help=""" IP address used as the live migration address for this host. This option indicates the IP address which should be used as the target for live migration traffic when migrating to this hypervisor. This metadata is then used by the source of the live migration traffic to construct a migration URI. If this option is set to None, the hostname of the migration target compute node will be used. This option is useful in environments where the live-migration traffic can impact the network plane significantly. A separate network for live-migration traffic can then use this config option and avoids the impact on the management network. """), cfg.StrOpt('live_migration_uri', deprecated_for_removal=True, deprecated_since="15.0.0", deprecated_reason=""" live_migration_uri is deprecated for removal in favor of two other options that allow to change live migration scheme and target URI: ``live_migration_scheme`` and ``live_migration_inbound_addr`` respectively. """, help=""" Live migration target URI used by the source of live migration traffic. Override the default libvirt live migration target URI (which is dependent on virt_type). Any included "%s" is replaced with the migration target hostname, or `live_migration_inbound_addr` if set. If this option is set to None (which is the default), Nova will automatically generate the `live_migration_uri` value based on only 4 supported `virt_type` in following list: * 'kvm': 'qemu+tcp://%s/system' * 'qemu': 'qemu+tcp://%s/system' * 'parallels': 'parallels+tcp://%s/system' Related options: * ``live_migration_inbound_addr``: If ``live_migration_inbound_addr`` value is not None and ``live_migration_tunnelled`` is False, the ip/hostname address of target compute node is used instead of ``live_migration_uri`` as the uri for live migration. * ``live_migration_scheme``: If ``live_migration_uri`` is not set, the scheme used for live migration is taken from ``live_migration_scheme`` instead. """), cfg.BoolOpt('live_migration_tunnelled', default=False, deprecated_for_removal=True, deprecated_since='23.0.0', deprecated_reason=""" The "tunnelled live migration" has two inherent limitations: it cannot handle live migration of disks in a non-shared storage setup; and it has a huge performance cost. Both these problems are solved by ``live_migration_with_native_tls`` (requires a pre-configured TLS environment), which is the recommended approach for securing all live migration streams.""", help=""" Enable tunnelled migration. This option enables the tunnelled migration feature, where migration data is transported over the libvirtd connection. If enabled, we use the VIR_MIGRATE_TUNNELLED migration flag, avoiding the need to configure the network to allow direct hypervisor to hypervisor communication. If False, use the native transport. If not set, Nova will choose a sensible default based on, for example the availability of native encryption support in the hypervisor. Enabling this option will definitely impact performance massively. Note that this option is NOT compatible with use of block migration. """), cfg.IntOpt('live_migration_bandwidth', default=0, help=""" Maximum bandwidth(in MiB/s) to be used during migration. If set to 0, the hypervisor will choose a suitable default. Some hypervisors do not support this feature and will return an error if bandwidth is not 0. Please refer to the libvirt documentation for further details. """), cfg.IntOpt('live_migration_downtime', default=500, min=100, help=""" Target maximum period of time Nova will try to keep the instance paused during the last part of the memory copy, in *milliseconds*. Minimum downtime is 100ms. You can increase this value if you want to allow live-migrations to complete faster, or avoid live-migration timeout errors by allowing the guest to be paused for longer during the live-migration switch over. This value may be exceeded if there is any reduction on the transfer rate after the VM is paused. Related options: * live_migration_completion_timeout """), cfg.IntOpt('live_migration_downtime_steps', default=10, min=3, help=""" Number of incremental steps to reach max downtime value. Minimum number of steps is 3. """), cfg.IntOpt('live_migration_downtime_delay', default=75, min=3, help=""" Time to wait, in seconds, between each step increase of the migration downtime. Minimum delay is 3 seconds. Value is per GiB of guest RAM + disk to be transferred, with lower bound of a minimum of 2 GiB per device. """), cfg.IntOpt('live_migration_completion_timeout', default=800, min=0, mutable=True, help=""" Time to wait, in seconds, for migration to successfully complete transferring data before aborting the operation. Value is per GiB of guest RAM + disk to be transferred, with lower bound of a minimum of 2 GiB. Should usually be larger than downtime delay * downtime steps. Set to 0 to disable timeouts. Related options: * live_migration_downtime * live_migration_downtime_steps * live_migration_downtime_delay """), cfg.StrOpt('live_migration_timeout_action', default='abort', choices=('abort', 'force_complete'), mutable=True, help=""" This option will be used to determine what action will be taken against a VM after ``live_migration_completion_timeout`` expires. By default, the live migrate operation will be aborted after completion timeout. If it is set to ``force_complete``, the compute service will either pause the VM or trigger post-copy depending on if post copy is enabled and available (``live_migration_permit_post_copy`` is set to True). Related options: * live_migration_completion_timeout * live_migration_permit_post_copy """), cfg.BoolOpt('live_migration_permit_post_copy', default=False, help=""" This option allows nova to switch an on-going live migration to post-copy mode, i.e., switch the active VM to the one on the destination node before the migration is complete, therefore ensuring an upper bound on the memory that needs to be transferred. Post-copy requires libvirt>=1.3.3 and QEMU>=2.5.0. When permitted, post-copy mode will be automatically activated if we reach the timeout defined by ``live_migration_completion_timeout`` and ``live_migration_timeout_action`` is set to 'force_complete'. Note if you change to no timeout or choose to use 'abort', i.e. ``live_migration_completion_timeout = 0``, then there will be no automatic switch to post-copy. The live-migration force complete API also uses post-copy when permitted. If post-copy mode is not available, force complete falls back to pausing the VM to ensure the live-migration operation will complete. When using post-copy mode, if the source and destination hosts lose network connectivity, the VM being live-migrated will need to be rebooted. For more details, please see the Administration guide. Related options: * live_migration_permit_auto_converge * live_migration_timeout_action """), cfg.BoolOpt('live_migration_permit_auto_converge', default=False, help=""" This option allows nova to start live migration with auto converge on. Auto converge throttles down CPU if a progress of on-going live migration is slow. Auto converge will only be used if this flag is set to True and post copy is not permitted or post copy is unavailable due to the version of libvirt and QEMU in use. Related options: * live_migration_permit_post_copy """), cfg.StrOpt('snapshot_image_format', choices=[ ('raw', 'RAW disk format'), ('qcow2', 'KVM default disk format'), ('vmdk', 'VMWare default disk format'), ('vdi', 'VirtualBox default disk format'), ], help=""" Determine the snapshot image format when sending to the image service. If set, this decides what format is used when sending the snapshot to the image service. If not set, defaults to same type as source image. """), cfg.BoolOpt('live_migration_with_native_tls', default=False, help=""" Use QEMU-native TLS encryption when live migrating. This option will allow both migration stream (guest RAM plus device state) *and* disk stream to be transported over native TLS, i.e. TLS support built into QEMU. Prerequisite: TLS environment is configured correctly on all relevant Compute nodes. This means, Certificate Authority (CA), server, client certificates, their corresponding keys, and their file permissions are in place, and are validated. Notes: * To have encryption for migration stream and disk stream (also called: "block migration"), ``live_migration_with_native_tls`` is the preferred config attribute instead of ``live_migration_tunnelled``. * The ``live_migration_tunnelled`` will be deprecated in the long-term for two main reasons: (a) it incurs a huge performance penalty; and (b) it is not compatible with block migration. Therefore, if your compute nodes have at least libvirt 4.4.0 and QEMU 2.11.0, it is strongly recommended to use ``live_migration_with_native_tls``. * The ``live_migration_tunnelled`` and ``live_migration_with_native_tls`` should not be used at the same time. * Unlike ``live_migration_tunnelled``, the ``live_migration_with_native_tls`` *is* compatible with block migration. That is, with this option, NBD stream, over which disks are migrated to a target host, will be encrypted. Related options: ``live_migration_tunnelled``: This transports migration stream (but not disk stream) over libvirtd. """), cfg.StrOpt('disk_prefix', help=""" Override the default disk prefix for the devices attached to an instance. If set, this is used to identify a free disk device name for a bus. Possible values: * Any prefix which will result in a valid disk device name like 'sda' or 'hda' for example. This is only necessary if the device names differ to the commonly known device name prefixes for a virtualization type such as: sd, xvd, uvd, vd. Related options: * ``virt_type``: Influences which device type is used, which determines the default disk prefix. """), cfg.IntOpt('wait_soft_reboot_seconds', default=120, help='Number of seconds to wait for instance to shut down after' ' soft reboot request is made. We fall back to hard reboot' ' if instance does not shutdown within this window.'), cfg.StrOpt('cpu_mode', choices=[ ('host-model', 'Clone the host CPU feature flags'), ('host-passthrough', 'Use the host CPU model exactly'), ('custom', 'Use the CPU model in ``[libvirt]cpu_models``'), ('none', "Don't set a specific CPU model. For instances with " "``[libvirt] virt_type`` as KVM/QEMU, the default CPU model from " "QEMU will be used, which provides a basic set of CPU features " "that are compatible with most hosts"), ], help=""" Is used to set the CPU mode an instance should have. If ``virt_type="kvm|qemu"``, it will default to ``host-model``, otherwise it will default to ``none``. Related options: * ``cpu_models``: This should be set ONLY when ``cpu_mode`` is set to ``custom``. Otherwise, it would result in an error and the instance launch will fail. """), cfg.ListOpt('cpu_models', deprecated_name='cpu_model', default=[], help=""" An ordered list of CPU models the host supports. It is expected that the list is ordered so that the more common and less advanced CPU models are listed earlier. Here is an example: ``SandyBridge,IvyBridge,Haswell,Broadwell``, the latter CPU model's features is richer that the previous CPU model. Possible values: * The named CPU models can be found via ``virsh cpu-models ARCH``, where ARCH is your host architecture. Related options: * ``cpu_mode``: This should be set to ``custom`` ONLY when you want to configure (via ``cpu_models``) a specific named CPU model. Otherwise, it would result in an error and the instance launch will fail. * ``virt_type``: Only the virtualization types ``kvm`` and ``qemu`` use this. .. note:: Be careful to only specify models which can be fully supported in hardware. """), cfg.ListOpt( 'cpu_model_extra_flags', item_type=types.String( ignore_case=True, ), default=[], help=""" Enable or disable guest CPU flags. To explicitly enable or disable CPU flags, use the ``+flag`` or ``-flag`` notation -- the ``+`` sign will enable the CPU flag for the guest, while a ``-`` sign will disable it. If neither ``+`` nor ``-`` is specified, the flag will be enabled, which is the default behaviour. For example, if you specify the following (assuming the said CPU model and features are supported by the host hardware and software):: [libvirt] cpu_mode = custom cpu_models = Cascadelake-Server cpu_model_extra_flags = -hle, -rtm, +ssbd, mtrr Nova will disable the ``hle`` and ``rtm`` flags for the guest; and it will enable ``ssbd`` and ``mttr`` (because it was specified with neither ``+`` nor ``-`` prefix). The CPU flags are case-insensitive. In the following example, the ``pdpe1gb`` flag will be disabled for the guest; ``vmx`` and ``pcid`` flags will be enabled:: [libvirt] cpu_mode = custom cpu_models = Haswell-noTSX-IBRS cpu_model_extra_flags = -PDPE1GB, +VMX, pcid Specifying extra CPU flags is valid in combination with all the three possible values of ``cpu_mode`` config attribute: ``custom`` (this also requires an explicit CPU model to be specified via the ``cpu_models`` config attribute), ``host-model``, or ``host-passthrough``. There can be scenarios where you may need to configure extra CPU flags even for ``host-passthrough`` CPU mode, because sometimes QEMU may disable certain CPU features. An example of this is Intel's "invtsc" (Invariable Time Stamp Counter) CPU flag -- if you need to expose this flag to a Nova instance, you need to explicitly enable it. The possible values for ``cpu_model_extra_flags`` depends on the CPU model in use. Refer to ``/usr/share/libvirt/cpu_map/*.xml`` for possible CPU feature flags for a given CPU model. A special note on a particular CPU flag: ``pcid`` (an Intel processor feature that alleviates guest performance degradation as a result of applying the 'Meltdown' CVE fixes). When configuring this flag with the ``custom`` CPU mode, not all CPU models (as defined by QEMU and libvirt) need it: * The only virtual CPU models that include the ``pcid`` capability are Intel "Haswell", "Broadwell", and "Skylake" variants. * The libvirt / QEMU CPU models "Nehalem", "Westmere", "SandyBridge", and "IvyBridge" will _not_ expose the ``pcid`` capability by default, even if the host CPUs by the same name include it. I.e. 'PCID' needs to be explicitly specified when using the said virtual CPU models. The libvirt driver's default CPU mode, ``host-model``, will do the right thing with respect to handling 'PCID' CPU flag for the guest -- *assuming* you are running updated processor microcode, host and guest kernel, libvirt, and QEMU. The other mode, ``host-passthrough``, checks if 'PCID' is available in the hardware, and if so directly passes it through to the Nova guests. Thus, in context of 'PCID', with either of these CPU modes (``host-model`` or ``host-passthrough``), there is no need to use the ``cpu_model_extra_flags``. Related options: * cpu_mode * cpu_models """), cfg.StrOpt('snapshots_directory', default='$instances_path/snapshots', help='Location where libvirt driver will store snapshots ' 'before uploading them to image service'), cfg.ListOpt('disk_cachemodes', default=[], help=""" Specific cache modes to use for different disk types. For example: file=directsync,block=none,network=writeback For local or direct-attached storage, it is recommended that you use writethrough (default) mode, as it ensures data integrity and has acceptable I/O performance for applications running in the guest, especially for read operations. However, caching mode none is recommended for remote NFS storage, because direct I/O operations (O_DIRECT) perform better than synchronous I/O operations (with O_SYNC). Caching mode none effectively turns all guest I/O operations into direct I/O operations on the host, which is the NFS client in this environment. Possible cache modes: * default: "It Depends" -- For Nova-managed disks, ``none``, if the host file system is capable of Linux's 'O_DIRECT' semantics; otherwise ``writeback``. For volume drivers, the default is driver-dependent: ``none`` for everything except for SMBFS and Virtuzzo (which use ``writeback``). * none: With caching mode set to none, the host page cache is disabled, but the disk write cache is enabled for the guest. In this mode, the write performance in the guest is optimal because write operations bypass the host page cache and go directly to the disk write cache. If the disk write cache is battery-backed, or if the applications or storage stack in the guest transfer data properly (either through fsync operations or file system barriers), then data integrity can be ensured. However, because the host page cache is disabled, the read performance in the guest would not be as good as in the modes where the host page cache is enabled, such as writethrough mode. Shareable disk devices, like for a multi-attachable block storage volume, will have their cache mode set to 'none' regardless of configuration. * writethrough: With caching set to writethrough mode, the host page cache is enabled, but the disk write cache is disabled for the guest. Consequently, this caching mode ensures data integrity even if the applications and storage stack in the guest do not transfer data to permanent storage properly (either through fsync operations or file system barriers). Because the host page cache is enabled in this mode, the read performance for applications running in the guest is generally better. However, the write performance might be reduced because the disk write cache is disabled. * writeback: With caching set to writeback mode, both the host page cache and the disk write cache are enabled for the guest. Because of this, the I/O performance for applications running in the guest is good, but the data is not protected in a power failure. As a result, this caching mode is recommended only for temporary data where potential data loss is not a concern. NOTE: Certain backend disk mechanisms may provide safe writeback cache semantics. Specifically those that bypass the host page cache, such as QEMU's integrated RBD driver. Ceph documentation recommends setting this to writeback for maximum performance while maintaining data safety. * directsync: Like "writethrough", but it bypasses the host page cache. * unsafe: Caching mode of unsafe ignores cache transfer operations completely. As its name implies, this caching mode should be used only for temporary data where data loss is not a concern. This mode can be useful for speeding up guest installations, but you should switch to another caching mode in production environments. """), cfg.StrOpt('rng_dev_path', default='/dev/urandom', help=""" The path to an RNG (Random Number Generator) device that will be used as the source of entropy on the host. Since libvirt 1.3.4, any path (that returns random numbers when read) is accepted. The recommended source of entropy is ``/dev/urandom`` -- it is non-blocking, therefore relatively fast; and avoids the limitations of ``/dev/random``, which is a legacy interface. For more details (and comparison between different RNG sources), refer to the "Usage" section in the Linux kernel API documentation for ``[u]random``: http://man7.org/linux/man-pages/man4/urandom.4.html and http://man7.org/linux/man-pages/man7/random.7.html. """), cfg.ListOpt('hw_machine_type', help='For qemu or KVM guests, set this option to specify ' 'a default machine type per host architecture. ' 'You can find a list of supported machine types ' 'in your environment by checking the output of the ' ':command:`virsh capabilities` command. The format of ' 'the value for this config option is ' '``host-arch=machine-type``. For example: ' '``x86_64=machinetype1,armv7l=machinetype2``.'), cfg.StrOpt('sysinfo_serial', default='unique', choices=( ('none', 'A serial number entry is not added to the guest ' 'domain xml.'), ('os', 'A UUID serial number is generated from the host ' '``/etc/machine-id`` file.'), ('hardware', 'A UUID for the host hardware as reported by ' 'libvirt. This is typically from the host ' 'SMBIOS data, unless it has been overridden ' 'in ``libvirtd.conf``.'), ('auto', 'Uses the "os" source if possible, else ' '"hardware".'), ('unique', 'Uses instance UUID as the serial number.'), ), help=""" The data source used to the populate the host "serial" UUID exposed to guest in the virtual BIOS. All choices except ``unique`` will change the serial when migrating the instance to another host. Changing the choice of this option will also affect existing instances on this host once they are stopped and started again. It is recommended to use the default choice (``unique``) since that will not change when an instance is migrated. However, if you have a need for per-host serials in addition to per-instance serial numbers, then consider restricting flavors via host aggregates. """ ), cfg.IntOpt('mem_stats_period_seconds', default=10, help='A number of seconds to memory usage statistics period. ' 'Zero or negative value mean to disable memory usage ' 'statistics.'), cfg.ListOpt('uid_maps', default=[], help='List of uid targets and ranges.' 'Syntax is guest-uid:host-uid:count. ' 'Maximum of 5 allowed.'), cfg.ListOpt('gid_maps', default=[], help='List of guid targets and ranges.' 'Syntax is guest-gid:host-gid:count. ' 'Maximum of 5 allowed.'), cfg.IntOpt('realtime_scheduler_priority', default=1, help='In a realtime host context vCPUs for guest will run in ' 'that scheduling priority. Priority depends on the host ' 'kernel (usually 1-99)'), cfg.ListOpt('enabled_perf_events', default=[], help= """ Performance events to monitor and collect statistics for. This will allow you to specify a list of events to monitor low-level performance of guests, and collect related statistics via the libvirt driver, which in turn uses the Linux kernel's ``perf`` infrastructure. With this config attribute set, Nova will generate libvirt guest XML to monitor the specified events. For example, to monitor the count of CPU cycles (total/elapsed) and the count of cache misses, enable them as follows:: [libvirt] enabled_perf_events = cpu_clock, cache_misses Possible values: A string list. The list of supported events can be found `here`__. Note that Intel CMT events - ``cmt``, ``mbmbt`` and ``mbml`` - are unsupported by recent Linux kernel versions (4.14+) and will be ignored by nova. __ https://libvirt.org/formatdomain.html#elementsPerf. """), cfg.IntOpt('num_pcie_ports', default=0, min=0, max=28, help= """ The number of PCIe ports an instance will get. Libvirt allows a custom number of PCIe ports (pcie-root-port controllers) a target instance will get. Some will be used by default, rest will be available for hotplug use. By default we have just 1-2 free ports which limits hotplug. More info: https://github.com/qemu/qemu/blob/master/docs/pcie.txt Due to QEMU limitations for aarch64/virt maximum value is set to '28'. Default value '0' moves calculating amount of ports to libvirt. """), cfg.IntOpt('file_backed_memory', default=0, min=0, help=""" Available capacity in MiB for file-backed memory. Set to 0 to disable file-backed memory. When enabled, instances will create memory files in the directory specified in ``/etc/libvirt/qemu.conf``'s ``memory_backing_dir`` option. The default location is ``/var/lib/libvirt/qemu/ram``. When enabled, the value defined for this option is reported as the node memory capacity. Compute node system memory will be used as a cache for file-backed memory, via the kernel's pagecache mechanism. .. note:: This feature is not compatible with hugepages. .. note:: This feature is not compatible with memory overcommit. Related options: * ``virt_type`` must be set to ``kvm`` or ``qemu``. * ``ram_allocation_ratio`` must be set to 1.0. """), cfg.IntOpt('num_memory_encrypted_guests', default=None, min=0, deprecated_for_removal=True, deprecated_since='32.0.0', deprecated_reason=""" This option is effective for only SEV and has no effect for SEV-ES. Libvirt is capable to present maximum number of SEV guests and one of SEV-ES guests since 8.0.0 and this option is no longer necessary. """, help=""" Maximum number of guests with encrypted memory which can run concurrently on this compute host. For now this is only relevant for AMD machines which support SEV (Secure Encrypted Virtualization). Such machines have a limited number of slots in their memory controller for storing encryption keys. Each running guest with encrypted memory will consume one of these slots. The option may be reused for other equivalent technologies in the future. If the machine does not support memory encryption, the option will be ignored and inventory will be set to 0. If the machine does support memory encryption and this option is not set, the driver detects maximum number of SEV guests from the libvirt API which is available since v8.0.0. Setting this option overrides the detected limit, unless the given value is not larger than the detected limit. On the other hand, if an older version of libvirt is used, ``None`` means an effectively unlimited inventory, i.e. no limit will be imposed by Nova on the number of SEV guests which can be launched, even though the underlying hardware will enforce its own limit. .. note:: It is recommended to read :ref:`the deployment documentation's section on this option ` before deciding whether to configure this setting or leave it at the default. Related options: * :oslo.config:option:`libvirt.virt_type` must be set to ``kvm``. * It's recommended to consider including ``x86_64=q35`` in :oslo.config:option:`libvirt.hw_machine_type`; see :ref:`deploying-sev-capable-infrastructure` for more on this. """), cfg.IntOpt('device_detach_attempts', default=8, min=1, help=""" Maximum number of attempts the driver tries to detach a device in libvirt. Related options: * :oslo.config:option:`libvirt.device_detach_timeout` """), cfg.IntOpt('device_detach_timeout', default=20, min=1, help=""" Maximum number of seconds the driver waits for the success or the failure event from libvirt for a given device detach attempt before it re-trigger the detach. Related options: * :oslo.config:option:`libvirt.device_detach_attempts` """), cfg.IntOpt('tb_cache_size', min=0, help=""" Qemu>=5.0.0 bumped the default tb-cache size to 1GiB(from 32MiB) and this made it difficult to run multiple guest VMs on systems running with lower memory. With Libvirt>=8.0.0 this config option can be used to configure lower tb-cache size. Set it to > 0 to configure tb-cache for guest VMs. Related options: * ``compute_driver`` (libvirt) * ``virt_type`` (qemu) """), cfg.StrOpt('migration_inbound_addr', default='$my_ip', help=""" The address used as the migration address for this host. This option indicates the IP address, hostname, or FQDN which should be used as the target for cold migration, resize, and evacuate traffic when moving to this hypervisor. This metadata is then used by the source of the migration traffic to construct the commands used to copy data (e.g. disk image) to the destination. An included "%s" is replaced with the hostname of the migration target hypervisor. Related options: * ``my_ip`` * ``live_migration_inbound_addr`` """), ] libvirt_imagebackend_opts = [ cfg.StrOpt('images_type', default='default', choices=('raw', 'flat', 'qcow2', 'lvm', 'rbd', 'ploop', 'default'), help=""" VM Images format. If default is specified, then use_cow_images flag is used instead of this one. Related options: * compute.use_cow_images * images_volume_group * [workarounds]/ensure_libvirt_rbd_instance_dir_cleanup * compute.force_raw_images """), cfg.StrOpt('images_volume_group', help=""" LVM Volume Group that is used for VM images, when you specify images_type=lvm Related options: * images_type """), cfg.BoolOpt('sparse_logical_volumes', default=False, deprecated_for_removal=True, deprecated_since='18.0.0', deprecated_reason=""" Sparse logical volumes is a feature that is not tested hence not supported. LVM logical volumes are preallocated by default. If you want thin provisioning, use Cinder thin-provisioned volumes. """, help=""" Create sparse logical volumes (with virtualsize) if this flag is set to True. """), cfg.StrOpt('images_rbd_pool', default='rbd', help='The RADOS pool in which rbd volumes are stored'), cfg.StrOpt('images_rbd_ceph_conf', default='', # default determined by librados help='Path to the ceph configuration file to use'), cfg.StrOpt('images_rbd_glance_store_name', default='', help=""" The name of the Glance store that represents the rbd cluster in use by this node. If set, this will allow Nova to request that Glance copy an image from an existing non-local store into the one named by this option before booting so that proper Copy-on-Write behavior is maintained. Related options: * images_type - must be set to ``rbd`` * images_rbd_glance_copy_poll_interval - controls the status poll frequency * images_rbd_glance_copy_timeout - controls the overall copy timeout """), cfg.IntOpt('images_rbd_glance_copy_poll_interval', default=15, help=""" The interval in seconds with which to poll Glance after asking for it to copy an image to the local rbd store. This affects how often we ask Glance to report on copy completion, and thus should be short enough that we notice quickly, but not too aggressive that we generate undue load on the Glance server. Related options: * images_type - must be set to ``rbd`` * images_rbd_glance_store_name - must be set to a store name """), cfg.IntOpt('images_rbd_glance_copy_timeout', default=600, help=""" The overall maximum time we will wait for Glance to complete an image copy to our local rbd store. This should be long enough to allow large images to be copied over the network link between our local store and the one where images typically reside. The downside of setting this too long is just to catch the case where the image copy is stalled or proceeding too slowly to be useful. Actual errors will be reported by Glance and noticed according to the poll interval. Related options: * images_type - must be set to ``rbd`` * images_rbd_glance_store_name - must be set to a store name * images_rbd_glance_copy_poll_interval - controls the failure time-to-notice """), cfg.StrOpt('hw_disk_discard', choices=('ignore', 'unmap'), help=""" Discard option for nova managed disks. Requires: * Libvirt >= 1.0.6 * Qemu >= 1.5 (raw format) * Qemu >= 1.6 (qcow2 format) """), ] libvirt_lvm_opts = [ cfg.StrOpt('volume_clear', default='zero', choices=[ ('zero', 'Overwrite volumes with zeroes'), ('shred', 'Overwrite volumes repeatedly'), ('none', 'Do not wipe deleted volumes'), ], help=""" Method used to wipe ephemeral disks when they are deleted. Only takes effect if LVM is set as backing storage. Related options: * images_type - must be set to ``lvm`` * volume_clear_size """), cfg.IntOpt('volume_clear_size', default=0, min=0, help=""" Size of area in MiB, counting from the beginning of the allocated volume, that will be cleared using method set in ``volume_clear`` option. Possible values: * 0 - clear whole volume * >0 - clear specified amount of MiB Related options: * images_type - must be set to ``lvm`` * volume_clear - must be set and the value must be different than ``none`` for this option to have any impact """), ] libvirt_utils_opts = [ cfg.BoolOpt('snapshot_compression', default=False, help=""" Enable snapshot compression for ``qcow2`` images. Note: you can set ``snapshot_image_format`` to ``qcow2`` to force all snapshots to be in ``qcow2`` format, independently from their original image type. Related options: * snapshot_image_format """), ] libvirt_vif_opts = [ cfg.BoolOpt('use_virtio_for_bridges', default=True, help='Use virtio for bridge interfaces with KVM/QEMU'), ] libvirt_volume_opts = [ cfg.BoolOpt('volume_use_multipath', default=False, deprecated_name='iscsi_use_multipath', help=""" Use multipath connection of the iSCSI or FC volume Volumes can be connected in the LibVirt as multipath devices. This will provide high availability and fault tolerance. """), cfg.BoolOpt('volume_enforce_multipath', default=False, help=""" Require multipathd when attaching a volume to an instance. When enabled, attachment of volumes will be aborted when multipathd is not running. Otherwise, it will fallback to single path without error. When enabled, the libvirt driver checks availability of mulitpathd when it is initialized, and the compute service fails to start if multipathd is not running. Related options: * volume_use_multipath must be True when this is True """), cfg.IntOpt('num_volume_scan_tries', deprecated_name='num_iscsi_scan_tries', default=5, help=""" Number of times to scan given storage protocol to find volume. """), ] libvirt_volume_aoe_opts = [ cfg.IntOpt('num_aoe_discover_tries', default=3, help=""" Number of times to rediscover AoE target to find volume. Nova provides support for block storage attaching to hosts via AOE (ATA over Ethernet). This option allows the user to specify the maximum number of retry attempts that can be made to discover the AoE device. """) ] libvirt_volume_iscsi_opts = [ cfg.StrOpt('iscsi_iface', deprecated_name='iscsi_transport', help=""" The iSCSI transport iface to use to connect to target in case offload support is desired. Default format is of the form ``.``, where ```` is one of (``be2iscsi``, ``bnx2i``, ``cxgb3i``, ``cxgb4i``, ``qla4xxx``, ``ocs``, ``tcp``) and ```` is the MAC address of the interface and can be generated via the ``iscsiadm -m iface`` command. Do not confuse the ``iscsi_iface`` parameter to be provided here with the actual transport name. """) # iser is also supported, but use LibvirtISERVolumeDriver # instead ] libvirt_volume_iser_opts = [ cfg.IntOpt('num_iser_scan_tries', default=5, help=""" Number of times to scan iSER target to find volume. iSER is a server network protocol that extends iSCSI protocol to use Remote Direct Memory Access (RDMA). This option allows the user to specify the maximum number of scan attempts that can be made to find iSER volume. """), cfg.BoolOpt('iser_use_multipath', default=False, help=""" Use multipath connection of the iSER volume. iSER volumes can be connected as multipath devices. This will provide high availability and fault tolerance. """) ] libvirt_volume_net_opts = [ cfg.StrOpt('rbd_user', help=""" The RADOS client name for accessing rbd(RADOS Block Devices) volumes. Libvirt will refer to this user when connecting and authenticating with the Ceph RBD server. """), cfg.StrOpt('rbd_secret_uuid', help=""" The libvirt UUID of the secret for the rbd_user volumes. """), cfg.IntOpt('rbd_connect_timeout', default=5, help=""" The RADOS client timeout in seconds when initially connecting to the cluster. """), cfg.IntOpt('rbd_destroy_volume_retry_interval', default=5, min=0, help=""" Number of seconds to wait between each consecutive retry to destroy a RBD volume. Related options: * [libvirt]/images_type = 'rbd' """), cfg.IntOpt('rbd_destroy_volume_retries', default=12, min=0, help=""" Number of retries to destroy a RBD volume. Related options: * [libvirt]/images_type = 'rbd' """), ] libvirt_volume_nfs_opts = [ cfg.StrOpt('nfs_mount_point_base', default=paths.state_path_def('mnt'), help=""" Directory where the NFS volume is mounted on the compute node. The default is 'mnt' directory of the location where nova's Python module is installed. NFS provides shared storage for the OpenStack Block Storage service. Possible values: * A string representing absolute path of mount point. """), cfg.StrOpt('nfs_mount_options', help=""" Mount options passed to the NFS client. See section of the nfs man page for details. Mount options controls the way the filesystem is mounted and how the NFS client behaves when accessing files on this mount point. Possible values: * Any string representing mount options separated by commas. * Example string: vers=3,lookupcache=pos """), ] libvirt_volume_ceph_opts = [ cfg.StrOpt('ceph_mount_point_base', default=paths.state_path_def('mnt'), help=""" Directory where the ceph volume for each manila share is mounted on the compute node. The default is 'mnt' directory of the location where nova's Python module is installed. Possible values: * A string representing absolute path of mount point. """), cfg.ListOpt('ceph_mount_options', help=""" Mount options passed to the ceph client. See section of the ceph man page for details. Mount options controls the way the filesystem is mounted and how the ceph client behaves when accessing files on this mount point. Possible values: * Any string representing mount options separated by commas. * Example string: vers=3,lookupcache=pos """), ] libvirt_volume_quobyte_opts = [ cfg.StrOpt('quobyte_mount_point_base', default=paths.state_path_def('mnt'), deprecated_for_removal=True, deprecated_since="31.0.0", deprecated_reason=""" Quobyte volume driver in cinder was marked unsupported. Quobyte volume support will be removed from nova when the volume driver is removed from cinder. """, help=""" Directory where the Quobyte volume is mounted on the compute node. Nova supports Quobyte volume driver that enables storing Block Storage service volumes on a Quobyte storage back end. This Option specifies the path of the directory where Quobyte volume is mounted. Possible values: * A string representing absolute path of mount point. """), cfg.StrOpt('quobyte_client_cfg', deprecated_for_removal=True, deprecated_since="31.0.0", deprecated_reason=""" Quobyte volume driver in cinder was marked unsupported. Quobyte volume support will be removed from nova when the volume driver is removed from cinder. """, help='Path to a Quobyte Client configuration file.'), ] libvirt_volume_smbfs_opts = [ cfg.StrOpt('smbfs_mount_point_base', default=paths.state_path_def('mnt'), deprecated_for_removal=True, deprecated_since="31.0.0", deprecated_reason=""" Windows SMB volume driver in cinder was marked unsupported. SMBFS volume support will be removed from nova when the volume driver is removed from cinder. """, help=""" Directory where the SMBFS shares are mounted on the compute node. """), cfg.StrOpt('smbfs_mount_options', default='', deprecated_for_removal=True, deprecated_since="31.0.0", deprecated_reason=""" Windows SMB volume driver in cinder was marked unsupported. SMBFS volume support will be removed from nova when the volume driver is removed from cinder. """, help=""" Mount options passed to the SMBFS client. Provide SMBFS options as a single string containing all parameters. See mount.cifs man page for details. Note that the libvirt-qemu ``uid`` and ``gid`` must be specified. """), ] libvirt_remotefs_opts = [ cfg.StrOpt('remote_filesystem_transport', default='ssh', choices=('ssh', 'rsync'), help=""" libvirt's transport method for remote file operations. Because libvirt cannot use RPC to copy files over network to/from other compute nodes, other method must be used for: * creating directory on remote host * creating file on remote host * removing file from remote host * copying file to remote host """) ] libvirt_volume_vzstorage_opts = [ cfg.StrOpt('vzstorage_mount_point_base', default=paths.state_path_def('mnt'), deprecated_for_removal=True, deprecated_since="31.0.0", deprecated_reason=""" Virtuozzo Storage volume driver in cinder was marked unsupported. Virtuozzo Storage volume support will be removed from nova when the volume driver is removed from cinder. """, help=""" Directory where the Virtuozzo Storage clusters are mounted on the compute node. This option defines non-standard mountpoint for Vzstorage cluster. Related options: * vzstorage_mount_* group of parameters """ ), cfg.StrOpt('vzstorage_mount_user', default='stack', deprecated_for_removal=True, deprecated_since="31.0.0", deprecated_reason=""" Virtuozzo Storage volume driver in cinder was marked unsupported. Virtuozzo Storage volume support will be removed from nova when the volume driver is removed from cinder. """, help=""" Mount owner user name. This option defines the owner user of Vzstorage cluster mountpoint. Related options: * vzstorage_mount_* group of parameters """ ), cfg.StrOpt('vzstorage_mount_group', default='qemu', help=""" Mount owner group name. This option defines the owner group of Vzstorage cluster mountpoint. Related options: * vzstorage_mount_* group of parameters """ ), cfg.StrOpt('vzstorage_mount_perms', default='0770', help=""" Mount access mode. This option defines the access bits of Vzstorage cluster mountpoint, in the format similar to one of chmod(1) utility, like this: 0770. It consists of one to four digits ranging from 0 to 7, with missing lead digits assumed to be 0's. Related options: * vzstorage_mount_* group of parameters """ ), cfg.StrOpt('vzstorage_log_path', default='/var/log/vstorage/%(cluster_name)s/nova.log.gz', help=""" Path to vzstorage client log. This option defines the log of cluster operations, it should include "%(cluster_name)s" template to separate logs from multiple shares. Related options: * vzstorage_mount_opts may include more detailed logging options. """ ), cfg.StrOpt('vzstorage_cache_path', default=None, help=""" Path to the SSD cache file. You can attach an SSD drive to a client and configure the drive to store a local cache of frequently accessed data. By having a local cache on a client's SSD drive, you can increase the overall cluster performance by up to 10 and more times. WARNING! There is a lot of SSD models which are not server grade and may loose arbitrary set of data changes on power loss. Such SSDs should not be used in Vstorage and are dangerous as may lead to data corruptions and inconsistencies. Please consult with the manual on which SSD models are known to be safe or verify it using vstorage-hwflush-check(1) utility. This option defines the path which should include "%(cluster_name)s" template to separate caches from multiple shares. Related options: * vzstorage_mount_opts may include more detailed cache options. """ ), cfg.ListOpt('vzstorage_mount_opts', default=[], help=""" Extra mount options for pstorage-mount For full description of them, see https://static.openvz.org/vz-man/man1/pstorage-mount.1.gz.html Format is a python string representation of arguments list, like: "[\'-v\', \'-R\', \'500\']" Shouldn\'t include -c, -l, -C, -u, -g and -m as those have explicit vzstorage_* options. Related options: * All other vzstorage_* options """ ), ] # The queue size requires value to be a power of two from [256, 1024] # range. # https://libvirt.org/formatdomain.html#elementsDriverBackendOptions QueueSizeType = types.Integer(choices=(256, 512, 1024)) libvirt_virtio_queue_sizes = [ cfg.Opt('rx_queue_size', type=QueueSizeType, help=""" Configure virtio rx queue size. This option is only usable for virtio-net device with vhost and vhost-user backend. Available only with QEMU/KVM. Requires libvirt v2.3 QEMU v2.7."""), cfg.Opt('tx_queue_size', type=QueueSizeType, help=""" Configure virtio tx queue size. This option is only usable for virtio-net device with vhost-user backend. Available only with QEMU/KVM. Requires libvirt v3.7 QEMU v2.10."""), cfg.IntOpt('max_queues', default=None, min=1, help=""" The maximum number of virtio queue pairs that can be enabled when creating a multiqueue guest. The number of virtio queues allocated will be the lesser of the CPUs requested by the guest and the max value defined. By default, this value is set to none meaning the legacy limits based on the reported kernel major version will be used. """), ] libvirt_volume_nvmeof_opts = [ cfg.IntOpt('num_nvme_discover_tries', default=5, help=""" Number of times to rediscover NVMe target to find volume Nova provides support for block storage attaching to hosts via NVMe (Non-Volatile Memory Express). This option allows the user to specify the maximum number of retry attempts that can be made to discover the NVMe device. """), ] libvirt_pmem_opts = [ cfg.ListOpt('pmem_namespaces', item_type=cfg.types.String(), default=[], help=""" Configure persistent memory(pmem) namespaces. These namespaces must have been already created on the host. This config option is in the following format:: "$LABEL:$NSNAME[|$NSNAME][,$LABEL:$NSNAME[|$NSNAME]]" * ``$NSNAME`` is the name of the pmem namespace. * ``$LABEL`` represents one resource class, this is used to generate the resource class name as ``CUSTOM_PMEM_NAMESPACE_$LABEL``. For example:: [libvirt] pmem_namespaces=128G:ns0|ns1|ns2|ns3,262144MB:ns4|ns5,MEDIUM:ns6|ns7 """), ] libvirt_vtpm_opts = [ cfg.BoolOpt('swtpm_enabled', default=False, help=""" Enable emulated TPM (Trusted Platform Module) in guests. """), cfg.StrOpt('swtpm_user', default='tss', help=""" User that swtpm binary runs as. When using emulated TPM, the ``swtpm`` binary will run to emulate a TPM device. The user this binary runs as depends on libvirt configuration, with ``tss`` being the default. In order to support cold migration and resize, nova needs to know what user the swtpm binary is running as in order to ensure that files get the proper ownership after being moved between nodes. Related options: * ``swtpm_group`` must also be set. """), cfg.StrOpt('swtpm_group', default='tss', help=""" Group that swtpm binary runs as. When using emulated TPM, the ``swtpm`` binary will run to emulate a TPM device. The user this binary runs as depends on libvirt configuration, with ``tss`` being the default. In order to support cold migration and resize, nova needs to know what group the swtpm binary is running as in order to ensure that files get the proper ownership after being moved between nodes. Related options: * ``swtpm_user`` must also be set. """), ] libvirt_cpu_mgmt_opts = [ cfg.BoolOpt('cpu_power_management', default=False, help='Use libvirt to manage CPU cores performance.'), cfg.StrOpt('cpu_power_management_strategy', choices=['cpu_state', 'governor'], default='cpu_state', help='Tuning strategy to reduce CPU power consumption when ' 'unused'), cfg.StrOpt('cpu_power_governor_low', default='powersave', help='Governor to use in order ' 'to reduce CPU power consumption'), cfg.StrOpt('cpu_power_governor_high', default='performance', help='Governor to use in order to have best CPU performance'), ] ALL_OPTS = list(itertools.chain( libvirt_general_opts, libvirt_imagebackend_opts, libvirt_lvm_opts, libvirt_utils_opts, libvirt_vif_opts, libvirt_volume_opts, libvirt_volume_aoe_opts, libvirt_volume_iscsi_opts, libvirt_volume_iser_opts, libvirt_volume_net_opts, libvirt_volume_nfs_opts, libvirt_volume_ceph_opts, libvirt_volume_quobyte_opts, libvirt_volume_smbfs_opts, libvirt_remotefs_opts, libvirt_volume_vzstorage_opts, libvirt_virtio_queue_sizes, libvirt_volume_nvmeof_opts, libvirt_pmem_opts, libvirt_vtpm_opts, libvirt_cpu_mgmt_opts, )) def register_opts(conf): conf.register_group(libvirt_group) conf.register_opts(ALL_OPTS, group=libvirt_group) def list_opts(): return {libvirt_group: ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conf/manila.py0000664000175000017500000000351400000000000016270 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystoneauth1 import loading as ks_loading from oslo_config import cfg from nova.conf import utils as confutils DEFAULT_SERVICE_TYPE = 'shared-file-system' manila_group = cfg.OptGroup( 'manila', title='Manila Options', help="Configuration options for the share-file-system service") manila_opts = [ cfg.IntOpt('share_apply_policy_timeout', default=10, help=""" Timeout period for share policy application. Maximum duration to await a response from the Manila service for the application of a share policy before experiencing a timeout. 0 means do not wait (0s). Possible values: * A positive integer or 0 (default value is 10). """), ] def register_opts(conf): conf.register_group(manila_group) conf.register_opts(manila_opts, group=manila_group) ks_loading.register_session_conf_options(conf, manila_group.name) ks_loading.register_auth_conf_options(conf, manila_group.name) confutils.register_ksa_opts(conf, manila_group, DEFAULT_SERVICE_TYPE) def list_opts(): return { manila_group.name: ( manila_opts + ks_loading.get_session_conf_options() + ks_loading.get_auth_common_conf_options() + ks_loading.get_auth_plugin_conf_options('v3password')) } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conf/mks.py0000664000175000017500000000352300000000000015621 0ustar00zuulzuul00000000000000# Copyright 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg mks_group = cfg.OptGroup('mks', title='MKS Options', help=""" Nova compute node uses WebMKS, a desktop sharing protocol to provide instance console access to VM's created by VMware hypervisors. Related options: Following options must be set to provide console access. * mksproxy_base_url * enabled """) mks_opts = [ cfg.URIOpt('mksproxy_base_url', schemes=['http', 'https'], default='http://127.0.0.1:6090/', help=""" Location of MKS web console proxy The URL in the response points to a WebMKS proxy which starts proxying between client and corresponding vCenter server where instance runs. In order to use the web based console access, WebMKS proxy should be installed and configured Possible values: * Must be a valid URL of the form:``http://host:port/`` or ``https://host:port/`` """), cfg.BoolOpt('enabled', default=False, help=""" Enables graphical console access for virtual machines. """), ] def register_opts(conf): conf.register_group(mks_group) conf.register_opts(mks_opts, group=mks_group) def list_opts(): return {mks_group: mks_opts} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conf/netconf.py0000664000175000017500000000640500000000000016465 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import socket from oslo_config import cfg from oslo_utils import netutils netconf_opts = [ cfg.StrOpt("my_ip", default=netutils.get_my_ipv4(), sample_default='', help=""" The IP address which the host is using to connect to the management network. Possible values: * String with valid IP address. Default is IPv4 address of this host. Related options: * my_block_storage_ip * my_shared_fs_storage_ip """), cfg.StrOpt("my_block_storage_ip", default="$my_ip", help=""" The IP address which is used to connect to the block storage network. Possible values: * String with valid IP address. Default is IP address of this host. Related options: * my_ip - if my_block_storage_ip is not set, then my_ip value is used. """), cfg.StrOpt("my_shared_fs_storage_ip", default="$my_ip", help=""" The IP address which is used to connect to the shared_fs storage (manila) network. Possible values: * String with valid IP address. Default is IP address of this host. Related options: * my_ip - if my_shared_fs_storage_ip is not set, then my_ip value is used. """), cfg.HostDomainOpt("host", default=socket.gethostname(), sample_default='', help=""" Hostname, FQDN or IP address of this host. Used as: * the oslo.messaging queue name for nova-compute worker * we use this value for the binding_host sent to neutron. This means if you use a neutron agent, it should have the same value for host. * cinder host attachment information Must be valid within AMQP key. Possible values: * String with hostname, FQDN or IP address. Default is hostname of this host. """), # TODO(sfinucan): This option is tied into the VMWare and Libvirt drivers. # We should remove this dependency by either adding a new opt for each # driver or simply removing the offending code. Until then we cannot # deprecate this option. cfg.BoolOpt("flat_injected", default=False, help=""" This option determines whether the network setup information is injected into the VM before it is booted. While it was originally designed to be used only by nova-network, it is also used by the vmware virt driver to control whether network information is injected into a VM. The libvirt virt driver also uses it when we use config_drive to configure network to control whether network information is injected into a VM. """), ] def register_opts(conf): conf.register_opts(netconf_opts) def list_opts(): return {'DEFAULT': netconf_opts} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conf/neutron.py0000664000175000017500000001345200000000000016523 0ustar00zuulzuul00000000000000# Copyright 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystoneauth1 import loading as ks_loading from oslo_config import cfg from nova.conf import utils as confutils DEFAULT_SERVICE_TYPE = 'network' NEUTRON_GROUP = 'neutron' neutron_group = cfg.OptGroup( NEUTRON_GROUP, title='Neutron Options', help=""" Configuration options for neutron (network connectivity as a service). """) neutron_opts = [ cfg.StrOpt('ovs_bridge', default='br-int', help=""" Default name for the Open vSwitch integration bridge. Specifies the name of an integration bridge interface used by OpenvSwitch. This option is only used if Neutron does not specify the OVS bridge name in port binding responses. """), cfg.StrOpt('default_floating_pool', default='nova', help=""" Default name for the floating IP pool. Specifies the name of floating IP pool used for allocating floating IPs. This option is only used if Neutron does not specify the floating IP pool name in port binding responses. """), cfg.IntOpt('extension_sync_interval', default=600, min=0, help=""" Integer value representing the number of seconds to wait before querying Neutron for extensions. After this number of seconds the next time Nova needs to create a resource in Neutron it will requery Neutron for the extensions that it has loaded. Setting value to 0 will refresh the extensions with no wait. """), cfg.ListOpt('physnets', default=[], help=""" List of physnets present on this host. For each *physnet* listed, an additional section, ``[neutron_physnet_$PHYSNET]``, will be added to the configuration file. Each section must be configured with a single configuration option, ``numa_nodes``, which should be a list of node IDs for all NUMA nodes this physnet is associated with. For example:: [neutron] physnets = foo, bar [neutron_physnet_foo] numa_nodes = 0 [neutron_physnet_bar] numa_nodes = 0,1 Any *physnet* that is not listed using this option will be treated as having no particular NUMA node affinity. Tunnelled networks (VXLAN, GRE, ...) cannot be accounted for in this way and are instead configured using the ``[neutron_tunnel]`` group. For example:: [neutron_tunnel] numa_nodes = 1 Related options: * ``[neutron_tunnel] numa_nodes`` can be used to configure NUMA affinity for all tunneled networks * ``[neutron_physnet_$PHYSNET] numa_nodes`` must be configured for each value of ``$PHYSNET`` specified by this option """), cfg.IntOpt('http_retries', default=3, min=0, help=""" Number of times neutronclient should retry on any failed http call. 0 means connection is attempted only once. Setting it to any positive integer means that on failure connection is retried that many times e.g. setting it to 3 means total attempts to connect will be 4. Possible values: * Any integer value. 0 means connection is attempted only once """), ] metadata_proxy_opts = [ cfg.BoolOpt("service_metadata_proxy", default=False, help=""" When set to True, this option indicates that Neutron will be used to proxy metadata requests and resolve instance ids. Otherwise, the instance ID must be passed to the metadata request in the 'X-Instance-ID' header. Related options: * metadata_proxy_shared_secret """), cfg.StrOpt("metadata_proxy_shared_secret", default="", secret=True, help=""" This option holds the shared secret string used to validate proxy requests to Neutron metadata requests. In order to be used, the 'X-Metadata-Provider-Signature' header must be supplied in the request. Related options: * service_metadata_proxy """), ] ALL_OPTS = (neutron_opts + metadata_proxy_opts) def register_opts(conf): conf.register_group(neutron_group) conf.register_opts(ALL_OPTS, group=neutron_group) confutils.register_ksa_opts(conf, neutron_group, DEFAULT_SERVICE_TYPE) def register_dynamic_opts(conf): """Register dynamically-generated options and groups. This must be called by the service that wishes to use the options **after** the initial configuration has been loaded. """ opt = cfg.ListOpt('numa_nodes', default=[], item_type=cfg.types.Integer()) # Register the '[neutron_tunnel] numa_nodes' opt, implicitly # registering the '[neutron_tunnel]' group in the process. This could # be done statically but is done to avoid this group appearing in # nova.conf documentation while the other group does not. conf.register_opt(opt, group='neutron_tunnel') # Register the '[neutron_physnet_$PHYSNET] numa_nodes' opts, implicitly # registering the '[neutron_physnet_$PHYSNET]' groups in the process for physnet in conf.neutron.physnets: conf.register_opt(opt, group='neutron_physnet_%s' % physnet) def list_opts(): return { neutron_group: ( ALL_OPTS + ks_loading.get_session_conf_options() + ks_loading.get_auth_common_conf_options() + ks_loading.get_auth_plugin_conf_options('password') + ks_loading.get_auth_plugin_conf_options('v2password') + ks_loading.get_auth_plugin_conf_options('v3password') + confutils.get_ksa_adapter_opts(DEFAULT_SERVICE_TYPE)) } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conf/notifications.py0000664000175000017500000001140400000000000017675 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Intel, Inc. # Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg notifications_group = cfg.OptGroup( name='notifications', title='Notifications options', help=""" Most of the actions in Nova which manipulate the system state generate notifications which are posted to the messaging component (e.g. RabbitMQ) and can be consumed by any service outside the OpenStack. More technical details at https://docs.openstack.org/nova/latest/reference/notifications.html """) ALL_OPTS = [ cfg.StrOpt( 'notify_on_state_change', choices=[ (None, 'no notifications'), ('vm_state', 'Notifications are sent with VM state transition ' 'information in the ``old_state`` and ``state`` fields. The ' '``old_task_state`` and ``new_task_state`` fields will be set to ' 'the current task_state of the instance'), ('vm_and_task_state', 'Notifications are sent with VM and task ' 'state transition information'), ], deprecated_group='DEFAULT', help=""" If set, send compute.instance.update notifications on instance state changes. Please refer to https://docs.openstack.org/nova/latest/reference/notifications.html for additional information on notifications. """), cfg.StrOpt( 'default_level', default='INFO', choices=('DEBUG', 'INFO', 'WARN', 'ERROR', 'CRITICAL'), deprecated_group='DEFAULT', deprecated_name='default_notification_level', help="Default notification level for outgoing notifications."), cfg.StrOpt( 'notification_format', default='unversioned', choices=[ ('both', 'Both the legacy unversioned and the new versioned ' 'notifications are emitted'), ('versioned', 'Only the new versioned notifications are emitted'), ('unversioned', 'Only the legacy unversioned notifications are ' 'emitted'), ], deprecated_group='DEFAULT', help=""" Specifies which notification format shall be emitted by nova. The versioned notification interface are in feature parity with the legacy interface and the versioned interface is actively developed so new consumers should used the versioned interface. However, the legacy interface is heavily used by ceilometer and other mature OpenStack components so it remains the default. Note that notifications can be completely disabled by setting ``driver=noop`` in the ``[oslo_messaging_notifications]`` group. The list of versioned notifications is visible in https://docs.openstack.org/nova/latest/reference/notifications.html """), cfg.ListOpt( 'versioned_notifications_topics', default=['versioned_notifications'], help=""" Specifies the topics for the versioned notifications issued by nova. The default value is fine for most deployments and rarely needs to be changed. However, if you have a third-party service that consumes versioned notifications, it might be worth getting a topic for that service. Nova will send a message containing a versioned notification payload to each topic queue in this list. The list of versioned notifications is visible in https://docs.openstack.org/nova/latest/reference/notifications.html """), cfg.BoolOpt( 'bdms_in_notifications', default=False, help=""" If enabled, include block device information in the versioned notification payload. Sending block device information is disabled by default as providing that information can incur some overhead on the system since the information may need to be loaded from the database. """), cfg.BoolOpt( 'include_share_mapping', default=False, help=""" If enabled, include share mapping information in the versioned notification payload. Sending share mapping information is disabled by default as providing that information can incur some overhead on the system since the information may need to be loaded from the database. """) ] def register_opts(conf): conf.register_group(notifications_group) conf.register_opts(ALL_OPTS, group=notifications_group) def list_opts(): return {notifications_group: ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conf/novnc.py0000664000175000017500000000374500000000000016160 0ustar00zuulzuul00000000000000# Copyright (c) 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg novnc_opts = [ cfg.StrOpt('record', help=""" Filename that will be used for storing websocket frames received and sent by a proxy service (like VNC, spice, serial) running on this host. If this is not set, no recording will be done. """), cfg.BoolOpt('daemon', default=False, help="Run as a background process."), cfg.BoolOpt('ssl_only', default=False, help=""" Disallow non-encrypted connections. Related options: * cert * key """), cfg.BoolOpt('source_is_ipv6', default=False, help="Set to True if source host is addressed with IPv6."), cfg.StrOpt('cert', default='self.pem', help=""" Path to SSL certificate file. Related options: * key * ssl_only * [console] ssl_ciphers * [console] ssl_minimum_version """), cfg.StrOpt('key', help=""" SSL key file (if separate from cert). Related options: * cert """), cfg.StrOpt('web', default='/usr/share/spice-html5', help=""" Path to directory with content which will be served by a web server. """), ] def register_opts(conf): conf.register_opts(novnc_opts) def register_cli_opts(conf): conf.register_cli_opts(novnc_opts) def list_opts(): return {'DEFAULT': novnc_opts} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conf/opts.py0000664000175000017500000000521200000000000016011 0ustar00zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ This is the single point of entry to generate the sample configuration file for Nova. It collects all the necessary info from the other modules in this package. It is assumed that: * every other module in this package has a 'list_opts' function which return a dict where * the keys are strings which are the group names * the value of each key is a list of config options for that group * the nova.conf package doesn't have further packages with config options * this module is only used in the context of sample file generation """ import collections import importlib import os import pkgutil LIST_OPTS_FUNC_NAME = "list_opts" def _tupleize(dct): """Take the dict of options and convert to the 2-tuple format.""" return [(key, val) for key, val in dct.items()] def list_opts(): opts = collections.defaultdict(list) module_names = _list_module_names() imported_modules = _import_modules(module_names) _append_config_options(imported_modules, opts) return _tupleize(opts) def _list_module_names(): module_names = [] package_path = os.path.dirname(os.path.abspath(__file__)) for _, modname, ispkg in pkgutil.iter_modules(path=[package_path]): if modname == "opts" or ispkg: continue else: module_names.append(modname) return module_names def _import_modules(module_names): imported_modules = [] for modname in module_names: mod = importlib.import_module("nova.conf." + modname) if not hasattr(mod, LIST_OPTS_FUNC_NAME): msg = "The module 'nova.conf.%s' should have a '%s' "\ "function which returns the config options." % \ (modname, LIST_OPTS_FUNC_NAME) raise Exception(msg) else: imported_modules.append(mod) return imported_modules def _append_config_options(imported_modules, config_options): for mod in imported_modules: configs = mod.list_opts() for key, val in configs.items(): config_options[key].extend(val) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conf/paths.py0000664000175000017500000000460300000000000016146 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_config import cfg ALL_OPTS = [ cfg.StrOpt('pybasedir', default=os.path.abspath(os.path.join(os.path.dirname(__file__), '../../')), sample_default='', help=""" The directory where the Nova python modules are installed. This directory is used to store template files for networking and remote console access. It is also the default path for other config options which need to persist Nova internal data. It is very unlikely that you need to change this option from its default value. Possible values: * The full path to a directory. Related options: * ``state_path`` """), cfg.StrOpt('state_path', default='$pybasedir', help=""" The top-level directory for maintaining Nova's state. This directory is used to store Nova's internal state. It is used by a variety of other config options which derive from this. In some scenarios (for example migrations) it makes sense to use a storage location which is shared between multiple compute hosts (for example via NFS). Unless the option ``instances_path`` gets overwritten, this directory can grow very large. Possible values: * The full path to a directory. Defaults to value provided in ``pybasedir``. """), ] def basedir_def(*args): """Return an uninterpolated path relative to $pybasedir.""" return os.path.join('$pybasedir', *args) def state_path_def(*args): """Return an uninterpolated path relative to $state_path.""" return os.path.join('$state_path', *args) def register_opts(conf): conf.register_opts(ALL_OPTS) def list_opts(): return {"DEFAULT": ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conf/pci.py0000664000175000017500000003365600000000000015614 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Intel, Inc. # Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg pci_group = cfg.OptGroup( name='pci', title='PCI passthrough options') pci_opts = [ cfg.MultiStrOpt('alias', default=[], deprecated_name='pci_alias', deprecated_group='DEFAULT', help=""" An alias for a PCI passthrough device requirement. This allows users to specify the alias in the extra specs for a flavor, without needing to repeat all the PCI property requirements. This should be configured for the ``nova-api`` service and, assuming you wish to use move operations, for each ``nova-compute`` service. Possible Values: * A JSON dictionary which describe a PCI device. It should take the following format:: alias = { "name": "", ["product_id": ""], ["vendor_id": ""], "device_type": "", ["numa_policy": ""], ["resource_class": ""], ["traits": ""] ["live_migratable": ""], } Note that ``[...]`` indicates optional field. For example:: alias = { "name": "QuickAssist", "product_id": "0443", "vendor_id": "8086", "device_type": "type-PCI", "numa_policy": "required" } This defines an alias for the Intel QuickAssist card. (multi valued). Another example:: alias = { "name": "A16_16A", "device_type": "type-VF", "resource_class": "GPU_VF", "traits": "blue, big" } Valid key values are : ``name`` Name of the PCI alias. ``product_id`` Product ID of the device in hexadecimal. ``vendor_id`` Vendor ID of the device in hexadecimal. ``device_type`` Type of PCI device. Valid values are: ``type-PCI``, ``type-PF`` and ``type-VF``. Note that ``"device_type": "type-PF"`` **must** be specified if you wish to passthrough a device that supports SR-IOV in its entirety. ``numa_policy`` Required NUMA affinity of device. Valid values are: ``legacy``, ``preferred``, ``required``, and ``socket``. ``resource_class`` The optional Placement resource class name that is used to track the requested PCI devices in Placement. It can be a standard resource class from the ``os-resource-classes`` lib. Or it can be an arbitrary string. If it is an non-standard resource class then Nova will normalize it to a proper Placement resource class by making it upper case, replacing any consecutive character outside of ``[A-Z0-9_]`` with a single '_', and prefixing the name with ``CUSTOM_`` if not yet prefixed. The maximum allowed length is 255 character including the prefix. If ``resource_class`` is not provided Nova will generate it from ``vendor_id`` and ``product_id`` values of the alias in the form of ``CUSTOM_PCI_{vendor_id}_{product_id}``. The ``resource_class`` requested in the alias is matched against the ``resource_class`` defined in the ``[pci]device_spec``. This field can only be used only if ``[filter_scheduler]pci_in_placement`` is enabled. Either the product_id and vendor_id or the resource_class field must be provided in each alias. ``traits`` An optional comma separated list of Placement trait names requested to be present on the resource provider that fulfills this alias. Each trait can be a standard trait from ``os-traits`` lib or it can be an arbitrary string. If it is a non-standard trait then Nova will normalize the trait name by making it upper case, replacing any consecutive character outside of ``[A-Z0-9_]`` with a single '_', and prefixing the name with ``CUSTOM_`` if not yet prefixed. The maximum allowed length of a trait name is 255 character including the prefix. Every trait in ``traits`` requested in the alias ensured to be in the list of traits provided in the ``traits`` field of the ``[pci]device_spec`` when scheduling the request. This field can only be used only if ``[filter_scheduler]pci_in_placement`` is enabled. ``live_migratable`` Specify if live-migratable devices are desired. May have boolean-like string values case-insensitive values: "yes" or "no". - ``live_migratable='yes'`` means that the user wants a device(s) allowing live migration to a similar device(s) on another host. - ``live_migratable='no'`` This explicitly indicates that the user requires a non-live migratable device, making migration impossible. - If not specified, the default is ``live_migratable=None``, meaning that either a live migratable or non-live migratable device will be picked automatically. However, in such cases, migration will **not** be possible. * Supports multiple aliases by repeating the option (not by specifying a list value):: alias = { "name": "QuickAssist-1", "product_id": "0443", "vendor_id": "8086", "device_type": "type-PCI", "numa_policy": "required" } alias = { "name": "QuickAssist-2", "product_id": "0444", "vendor_id": "8086", "device_type": "type-PCI", "numa_policy": "required", "live_migratable": "yes", } """), cfg.MultiStrOpt('device_spec', default=[], deprecated_opts=[ cfg.DeprecatedOpt('passthrough_whitelist', group='pci'), cfg.DeprecatedOpt('pci_passthrough_whitelist', group='DEFAULT'), ], help=""" Specify the PCI devices available to VMs. Possible values: * A JSON dictionary which describe a PCI device. It should take the following format:: ["vendor_id": "",] ["product_id": "",] ["address": "[[[[]:]]:][][.[]]" | "devname": "",] {"": "",} Where ``[`` indicates zero or one occurrences, ``{`` indicates zero or multiple occurrences, and ``|`` mutually exclusive options. Note that any missing fields are automatically wildcarded. Valid key values are : ``vendor_id`` Vendor ID of the device in hexadecimal. ``product_id`` Product ID of the device in hexadecimal. ``address`` PCI address of the device. Both traditional glob style and regular expression syntax is supported. Please note that the address fields are restricted to the following maximum values: * domain - 0xFFFF * bus - 0xFF * slot - 0x1F * function - 0x7 ``devname`` Device name of the device (for e.g. interface name). Not all PCI devices have a name. ```` Additional ```` and ```` used for specifying PCI devices. Supported ```` values are : - ``physical_network`` - ``trusted`` - ``remote_managed`` - a VF is managed remotely by an off-path networking backend. May have boolean-like string values case-insensitive values: "true" or "false". By default, "false" is assumed for all devices. Using this option requires a networking service backend capable of handling those devices. PCI devices are also required to have a PCI VPD capability with a card serial number (either on a VF itself on its corresponding PF), otherwise they will be ignored and not available for allocation. - ``managed`` - Specify if the PCI device is managed by libvirt. May have boolean-like string values case-insensitive values: "yes" or "no". By default, "yes" is assumed for all devices. - ``managed='yes'`` means that nova will use libvirt to detach the device from the host before attaching it to the guest and re-attach it to the host after the guest is deleted. - ``managed='no'`` means that nova will not request libvirt to detach / attach the device from / to the host. In this case nova assumes that the operator configured the host in a way that these VFs are not attached to the host. Warning: Incorrect configuration of this parameter may result in compute node crashes. - ``live_migratable`` - Specify if the PCI device is live_migratable by libvirt. May have boolean-like string values case-insensitive values: "yes" or "no". By default, "no" is assumed for all devices. - ``live_migratable='yes'`` means that the device can be live migrated. Of course, this requires hardware support, as well as proper system and hypervisor configuration. - ``live_migratable='no'`` means that the device cannot be live migrated. - ``resource_class`` - optional Placement resource class name to be used to track the matching PCI devices in Placement when [pci]report_in_placement is True. It can be a standard resource class from the ``os-resource-classes`` lib. Or can be any string. In that case Nova will normalize it to a proper Placement resource class by making it upper case, replacing any consecutive character outside of ``[A-Z0-9_]`` with a single '_', and prefixing the name with ``CUSTOM_`` if not yet prefixed. The maximum allowed length is 255 character including the prefix. If ``resource_class`` is not provided Nova will generate it from the PCI device's ``vendor_id`` and ``product_id`` in the form of ``CUSTOM_PCI_{vendor_id}_{product_id}``. The ``resource_class`` can be requested from a ``[pci]alias`` - ``traits`` - optional comma separated list of Placement trait names to report on the resource provider that will represent the matching PCI device. Each trait can be a standard trait from ``os-traits`` lib or can be any string. If it is not a standard trait then Nova will normalize the trait name by making it upper case, replacing any consecutive character outside of ``[A-Z0-9_]`` with a single '_', and prefixing the name with ``CUSTOM_`` if not yet prefixed. The maximum allowed length of a trait name is 255 character including the prefix. Any trait from ``traits`` can be requested from a ``[pci]alias``. Valid examples are:: device_spec = {"devname":"eth0", "physical_network":"physnet"} device_spec = {"address":"*:0a:00.*"} device_spec = {"address":":0a:00.", "physical_network":"physnet1"} device_spec = {"vendor_id":"1137", "product_id":"0071"} device_spec = {"vendor_id":"1137", "product_id":"0071", "address": "0000:0a:00.1", "physical_network":"physnet1"} device_spec = {"address":{"domain": ".*", "bus": "02", "slot": "01", "function": "[2-7]"}, "physical_network":"physnet1"} device_spec = {"address":{"domain": ".*", "bus": "02", "slot": "0[1-2]", "function": ".*"}, "physical_network":"physnet1"} device_spec = {"devname": "eth0", "physical_network":"physnet1", "trusted": "true"} device_spec = {"vendor_id":"a2d6", "product_id":"15b3", "remote_managed": "true"} device_spec = {"vendor_id":"a2d6", "product_id":"15b3", "address": "0000:82:00.0", "physical_network":"physnet1", "remote_managed": "true"} device_spec = {"vendor_id":"1002", "product_id":"6929", "address": "0000:82:00.0", "resource_class": "PGPU", "traits": "HW_GPU_API_VULKAN,my-awesome-gpu"} device_spec = {"vendor_id":"10de", "product_id":"25b6", "address": "0000:25:00.4", "managed": "no"} device_spec = {"vendor_id":"10de", "product_id":"25b6", "address": "0000:25:00.4", "resource_class": "CUSTOM_A16_16A", "managed": "no"} The following are invalid, as they specify mutually exclusive options:: device_spec = {"devname":"eth0", "physical_network":"physnet", "address":"*:0a:00.*"} The following example is invalid because it specifies the ``remote_managed`` tag for a PF - it will result in an error during config validation at the Nova Compute service startup:: device_spec = {"address": "0000:82:00.0", "product_id": "a2d6", "vendor_id": "15b3", "physical_network": null, "remote_managed": "true"} * A JSON list of JSON dictionaries corresponding to the above format. For example:: device_spec = [{"product_id":"0001", "vendor_id":"8086"}, {"product_id":"0002", "vendor_id":"8086"}] """), cfg.BoolOpt('report_in_placement', default=False, help=""" Enable PCI resource inventory reporting to Placement. If it is enabled then the nova-compute service will report PCI resource inventories to Placement according to the [pci]device_spec configuration and the PCI devices reported by the hypervisor. Once it is enabled it cannot be disabled any more. In a future release the default of this config will be change to True. Related options: * [pci]device_spec: to define which PCI devices nova are allowed to track and assign to guests. """), ] def register_opts(conf): conf.register_group(pci_group) conf.register_opts(pci_opts, group=pci_group) def list_opts(): return {pci_group: pci_opts} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conf/placement.py0000664000175000017500000000271700000000000017003 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystoneauth1 import loading as ks_loading from oslo_config import cfg from nova.conf import utils as confutils DEFAULT_SERVICE_TYPE = 'placement' placement_group = cfg.OptGroup( 'placement', title='Placement Service Options', help="Configuration options for connecting to the placement API service") def register_opts(conf): conf.register_group(placement_group) confutils.register_ksa_opts(conf, placement_group, DEFAULT_SERVICE_TYPE) def list_opts(): return { placement_group.name: ( ks_loading.get_session_conf_options() + ks_loading.get_auth_common_conf_options() + ks_loading.get_auth_plugin_conf_options('password') + ks_loading.get_auth_plugin_conf_options('v2password') + ks_loading.get_auth_plugin_conf_options('v3password') + confutils.get_ksa_adapter_opts(DEFAULT_SERVICE_TYPE)) } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conf/quota.py0000664000175000017500000003427200000000000016165 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import os_resource_classes as orc from oslo_config import cfg from oslo_config import types as cfg_types class UnifiedLimitsResource(cfg_types.String): # NOTE(melwitt): Attempting to import nova.limit.(local|placement) for # LEGACY_LIMITS resource names results in error: # AttributeError: module 'nova' has no attribute 'conf' resources = { 'server_metadata_items', 'server_injected_files', 'server_injected_file_content_bytes', 'server_injected_file_path_bytes', 'server_key_pairs', 'server_groups', 'server_group_members', 'servers'} def __call__(self, value): super().__call__(value) valid_resources = self.resources valid_resources |= {f'class:{cls}' for cls in orc.STANDARDS} custom_regex = r'^class:CUSTOM_[A-Z0-9_]+$' if value in valid_resources or re.fullmatch(custom_regex, value): return value msg = ( f'Value {value} is not a valid resource class name. Must be ' f'one of: {valid_resources} or a custom resource class name ' f'of the form {custom_regex[1:-1]}') raise ValueError(msg) quota_group = cfg.OptGroup( name='quota', title='Quota Options', help=""" Quota options allow to manage quotas in openstack deployment. """) quota_opts = [ cfg.IntOpt('instances', min=-1, default=10, deprecated_group='DEFAULT', deprecated_name='quota_instances', help=""" The number of instances allowed per project. Possible Values * A positive integer or 0. * -1 to disable the quota. """), cfg.IntOpt('cores', min=-1, default=20, deprecated_group='DEFAULT', deprecated_name='quota_cores', help=""" The number of instance cores or vCPUs allowed per project. Possible values: * A positive integer or 0. * -1 to disable the quota. """), cfg.IntOpt('ram', min=-1, default=50 * 1024, deprecated_group='DEFAULT', deprecated_name='quota_ram', help=""" The number of megabytes of instance RAM allowed per project. Possible values: * A positive integer or 0. * -1 to disable the quota. """), cfg.IntOpt('metadata_items', min=-1, default=128, deprecated_group='DEFAULT', deprecated_name='quota_metadata_items', help=""" The number of metadata items allowed per instance. Users can associate metadata with an instance during instance creation. This metadata takes the form of key-value pairs. Possible values: * A positive integer or 0. * -1 to disable the quota. """), cfg.IntOpt('injected_files', min=-1, default=5, deprecated_group='DEFAULT', deprecated_name='quota_injected_files', help=""" The number of injected files allowed. File injection allows users to customize the personality of an instance by injecting data into it upon boot. Only text file injection is permitted: binary or ZIP files are not accepted. During file injection, any existing files that match specified files are renamed to include ``.bak`` extension appended with a timestamp. Possible values: * A positive integer or 0. * -1 to disable the quota. """), cfg.IntOpt('injected_file_content_bytes', min=-1, default=10 * 1024, deprecated_group='DEFAULT', deprecated_name='quota_injected_file_content_bytes', help=""" The number of bytes allowed per injected file. Possible values: * A positive integer or 0. * -1 to disable the quota. """), cfg.IntOpt('injected_file_path_length', min=-1, default=255, deprecated_group='DEFAULT', deprecated_name='quota_injected_file_path_length', help=""" The maximum allowed injected file path length. Possible values: * A positive integer or 0. * -1 to disable the quota. """), cfg.IntOpt('key_pairs', min=-1, default=100, deprecated_group='DEFAULT', deprecated_name='quota_key_pairs', help=""" The maximum number of key pairs allowed per user. Users can create at least one key pair for each project and use the key pair for multiple instances that belong to that project. Possible values: * A positive integer or 0. * -1 to disable the quota. """), cfg.IntOpt('server_groups', min=-1, default=10, deprecated_group='DEFAULT', deprecated_name='quota_server_groups', help=""" The maximum number of server groups per project. Server groups are used to control the affinity and anti-affinity scheduling policy for a group of servers or instances. Reducing the quota will not affect any existing group, but new servers will not be allowed into groups that have become over quota. Possible values: * A positive integer or 0. * -1 to disable the quota. """), cfg.IntOpt('server_group_members', min=-1, default=10, deprecated_group='DEFAULT', deprecated_name='quota_server_group_members', help=""" The maximum number of servers per server group. Possible values: * A positive integer or 0. * -1 to disable the quota. """), cfg.StrOpt('driver', default='nova.quota.DbQuotaDriver', choices=[ ('nova.quota.DbQuotaDriver', '(deprecated) Stores quota limit ' 'information in the database and relies on the ``quota_*`` ' 'configuration options for default quota limit values. Counts ' 'quota usage on-demand.'), ('nova.quota.NoopQuotaDriver', 'Ignores quota and treats all ' 'resources as unlimited.'), ('nova.quota.UnifiedLimitsDriver', 'Uses Keystone unified limits ' 'to store quota limit information and relies on resource ' 'usage counting from Placement. Counts quota usage on-demand. ' 'Resources missing unified limits in Keystone will be treated ' 'as a quota limit of 0, so it is important to ensure all ' 'resources have registered limits in Keystone. The ``nova-manage ' 'limits migrate_to_unified_limits`` command can be used to copy ' 'existing quota limits from the Nova database to Keystone ' 'unified limits via the Keystone API. Alternatively, unified ' 'limits can be created manually using the OpenStackClient or ' 'by calling the Keystone API directly.'), ], help=""" Provides abstraction for quota checks. Users can configure a specific driver to use for quota checks. """), cfg.BoolOpt('recheck_quota', default=True, help=""" Recheck quota after resource creation to prevent allowing quota to be exceeded. This defaults to True (recheck quota after resource creation) but can be set to False to avoid additional load if allowing quota to be exceeded because of racing requests is considered acceptable. For example, when set to False, if a user makes highly parallel REST API requests to create servers, it will be possible for them to create more servers than their allowed quota during the race. If their quota is 10 servers, they might be able to create 50 during the burst. After the burst, they will not be able to create any more servers but they will be able to keep their 50 servers until they delete them. The initial quota check is done before resources are created, so if multiple parallel requests arrive at the same time, all could pass the quota check and create resources, potentially exceeding quota. When recheck_quota is True, quota will be checked a second time after resources have been created and if the resource is over quota, it will be deleted and OverQuota will be raised, usually resulting in a 403 response to the REST API user. This makes it impossible for a user to exceed their quota with the caveat that it will, however, be possible for a REST API user to be rejected with a 403 response in the event of a collision close to reaching their quota limit, even if the user has enough quota available when they made the request. """), cfg.BoolOpt( 'count_usage_from_placement', default=False, help=""" Enable the counting of quota usage from the placement service. Starting in Train, it is possible to count quota usage for cores and ram from the placement service and instances from the API database instead of counting from cell databases. This works well if there is only one Nova deployment running per placement deployment. However, if an operator is running more than one Nova deployment sharing a placement deployment, they should not set this option to True because currently the placement service has no way to partition resource providers per Nova deployment. When this option is left as the default or set to False, Nova will use the legacy counting method to count quota usage for instances, cores, and ram from its cell databases. Note that quota usage behavior related to resizes will be affected if this option is set to True. Placement resource allocations are claimed on the destination while holding allocations on the source during a resize, until the resize is confirmed or reverted. During this time, when the server is in VERIFY_RESIZE state, quota usage will reflect resource consumption on both the source and the destination. This can be beneficial as it reserves space for a revert of a downsize, but it also means quota usage will be inflated until a resize is confirmed or reverted. Behavior will also be different for unscheduled servers in ERROR state. A server in ERROR state that has never been scheduled to a compute host will not have placement allocations, so it will not consume quota usage for cores and ram. Behavior will be different for servers in SHELVED_OFFLOADED state. A server in SHELVED_OFFLOADED state will not have placement allocations, so it will not consume quota usage for cores and ram. Note that because of this, it will be possible for a request to unshelve a server to be rejected if the user does not have enough quota available to support the cores and ram needed by the server to be unshelved. The ``populate_queued_for_delete`` and ``populate_user_id`` online data migrations must be completed before usage can be counted from placement. Until the data migration is complete, the system will fall back to legacy quota usage counting from cell databases depending on the result of an EXISTS database query during each quota check, if this configuration option is set to True. Operators who want to avoid the performance hit from the EXISTS queries should wait to set this configuration option to True until after they have completed their online data migrations via ``nova-manage db online_data_migrations``. """), cfg.StrOpt( 'unified_limits_resource_strategy', default='require', choices=[ ('require', 'Require the resources in ' '``unified_limits_resource_list`` to have registered limits set ' 'in Keystone'), ('ignore', 'Ignore the resources in ' '``unified_limits_resource_list`` if they do not have registered ' 'limits set in Keystone'), ], help=""" Specify the semantics of the ``unified_limits_resource_list``. When the quota driver is set to the ``UnifiedLimitsDriver``, resources may be specified to ether require registered limits set in Keystone or ignore if they do not have registered limits set. When set to ``require``, if a resource in ``unified_limits_resource_list`` is requested and has no registered limit set, the quota limit for that resource will be considered to be 0 and all requests to allocate that resource will be rejected for being over quota. When set to ``ignore``, if a resource in ``unified_limits_resource_list`` is requested and has no registered limit set, the quota limit for that resource will be considered to be unlimited and all requests to allocate that resource will be accepted. Related options: * ``unified_limits_resource_list``: This must contain either resources for which to require registered limits set or resources to ignore if they do not have registered limits set. It can also be set to an empty list. """), cfg.ListOpt( 'unified_limits_resource_list', item_type=UnifiedLimitsResource(), default=['servers'], help=""" Specify a list of resources to require or ignore registered limits. When the quota driver is set to the ``UnifiedLimitsDriver``, require or ignore resources in this list to have registered limits set in Keystone. When ``unified_limits_resource_strategy`` is ``require``, if a resource in this list is requested and has no registered limit set, the quota limit for that resource will be considered to be 0 and all requests to allocate that resource will be rejected for being over quota. When ``unified_limits_resource_strategy`` is ``ignore``, if a resource in this list is requested and has no registered limit set, the quota limit for that resource will be considered to be unlimited and all requests to allocate that resource will be accepted. The list can also be set to an empty list. Valid list item values are: * ``servers`` * ``class:`` * ``server_key_pairs`` * ``server_groups`` * ``server_group_members`` * ``server_metadata_items`` * ``server_injected_files`` * ``server_injected_file_content_bytes`` * ``server_injected_file_path_bytes`` Related options: * ``unified_limits_resource_strategy``: This must be set to ``require`` or ``ignore`` """), ] def register_opts(conf): conf.register_group(quota_group) conf.register_opts(quota_opts, group=quota_group) def list_opts(): return {quota_group: quota_opts} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conf/rpc.py0000664000175000017500000000257500000000000015621 0ustar00zuulzuul00000000000000# Copyright 2018 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg rpc_opts = [ cfg.IntOpt("long_rpc_timeout", default=1800, help=""" This option allows setting an alternate timeout value for RPC calls that have the potential to take a long time. If set, RPC calls to other services will use this value for the timeout (in seconds) instead of the global rpc_response_timeout value. Operations with RPC calls that utilize this value: * live migration * scheduling * enabling/disabling a compute service * image pre-caching * snapshot-based / cross-cell resize * resize / cold migration * volume attach Related options: * rpc_response_timeout """), ] ALL_OPTS = rpc_opts def register_opts(conf): conf.register_opts(ALL_OPTS) def list_opts(): return {'DEFAULT': ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conf/scheduler.py0000664000175000017500000010407500000000000017011 0ustar00zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.virt import arch scheduler_group = cfg.OptGroup( name="scheduler", title="Scheduler configuration") scheduler_opts = [ cfg.IntOpt("max_attempts", default=3, min=1, help=""" The maximum number of schedule attempts. This is the maximum number of attempts that will be made for a given instance build/move operation. It limits the number of alternate hosts returned by the scheduler. When that list of hosts is exhausted, a ``MaxRetriesExceeded`` exception is raised and the instance is set to an error state. Possible values: * A positive integer, where the integer corresponds to the max number of attempts that can be made when building or moving an instance. """), cfg.IntOpt("discover_hosts_in_cells_interval", default=-1, min=-1, help=""" Periodic task interval. This value controls how often (in seconds) the scheduler should attempt to discover new hosts that have been added to cells. If negative (the default), no automatic discovery will occur. Deployments where compute nodes come and go frequently may want this enabled, where others may prefer to manually discover hosts when one is added to avoid any overhead from constantly checking. If enabled, every time this runs, we will select any unmapped hosts out of each cell database on every run. Possible values: * An integer, where the integer corresponds to periodic task interval in seconds. 0 uses the default interval (60 seconds). A negative value disables periodic tasks. """), cfg.IntOpt("max_placement_results", default=1000, min=1, help=""" The maximum number of placement results to request. This setting determines the maximum limit on results received from the placement service during a scheduling operation. It effectively limits the number of hosts that may be considered for scheduling requests that match a large number of candidates. A value of 1 (the minimum) will effectively defer scheduling to the placement service strictly on "will it fit" grounds. A higher value will put an upper cap on the number of results the scheduler will consider during the filtering and weighing process. Large deployments may need to set this lower than the total number of hosts available to limit memory consumption, network traffic, etc. of the scheduler. Possible values: * An integer, where the integer corresponds to the number of placement results to return. """), cfg.IntOpt("workers", min=0, help=""" Number of workers for the nova-scheduler service. Defaults to the number of CPUs available. Possible values: * An integer, where the integer corresponds to the number of worker processes. """), cfg.BoolOpt("query_placement_for_routed_network_aggregates", default=False, help=""" Enable the scheduler to filter compute hosts affined to routed network segment aggregates. See https://docs.openstack.org/neutron/latest/admin/config-routed-networks.html for details. """), cfg.BoolOpt("limit_tenants_to_placement_aggregate", default=False, help=""" Restrict tenants to specific placement aggregates. This setting causes the scheduler to look up a host aggregate with the metadata key of ``filter_tenant_id`` set to the project of an incoming request, and request results from placement be limited to that aggregate. Multiple tenants may be added to a single aggregate by appending a serial number to the key, such as ``filter_tenant_id:123``. The matching aggregate UUID must be mirrored in placement for proper operation. If the aggregate does not match one in placement, the result will be the same as not finding any suitable hosts for the request. If no host aggregate with the tenant id is found and ``[scheduler] placement_aggregate_required_for_tenants = False``, the request will not be restricted to only aggregates matching the tenant. If no host aggregate with the tenant id is found and ``[scheduler] placement_aggregate_required_for_tenants = True``, the result will be the same as not finding any suitable hosts for the request. Possible values: - A boolean value. Related options: - ``[scheduler] placement_aggregate_required_for_tenants`` """), cfg.BoolOpt("placement_aggregate_required_for_tenants", default=False, help=""" Require a placement aggregate association for all tenants. This setting, when limit_tenants_to_placement_aggregate=True, will control whether or not a tenant with no aggregate affinity will be allowed to schedule to any available node. If aggregates are used to limit some tenants but not all, then this should be False. If all tenants should be confined via aggregate, then this should be True to prevent them from receiving unrestricted scheduling to any available node. Possible values: - A boolean value. Related options: - ``[scheduler] placement_aggregate_required_for_tenants`` """), cfg.BoolOpt("query_placement_for_image_type_support", default=False, help=""" Use placement to determine host support for the instance's image type. This setting causes the scheduler to ask placement only for compute hosts that support the ``disk_format`` of the image used in the request. Possible values: - A boolean value. """), cfg.BoolOpt("enable_isolated_aggregate_filtering", default=False, help=""" Restrict use of aggregates to instances with matching metadata. This setting allows the scheduler to restrict hosts in aggregates based on matching required traits in the aggregate metadata and the instance flavor/image. If an aggregate is configured with a property with key ``trait:$TRAIT_NAME`` and value ``required``, the instance flavor extra_specs and/or image metadata must also contain ``trait:$TRAIT_NAME=required`` to be eligible to be scheduled to hosts in that aggregate. More technical details at https://docs.openstack.org/nova/latest/reference/isolate-aggregates.html Possible values: - A boolean value. """), cfg.BoolOpt("image_metadata_prefilter", default=False, help=""" Use placement to filter hosts based on image metadata. This setting causes the scheduler to transform well known image metadata properties into placement required traits to filter host based on image metadata. This feature requires host support and is currently supported by the following compute drivers: - ``libvirt.LibvirtDriver`` (since Ussuri (21.0.0)) Possible values: - A boolean value. Related options: - ``[compute] compute_driver`` """), ] filter_scheduler_group = cfg.OptGroup( name="filter_scheduler", title="Filter scheduler options") filter_scheduler_opts = [ cfg.IntOpt("host_subset_size", default=1, min=1, help=""" Size of subset of best hosts selected by scheduler. New instances will be scheduled on a host chosen randomly from a subset of the N best hosts, where N is the value set by this option. Setting this to a value greater than 1 will reduce the chance that multiple scheduler processes handling similar requests will select the same host, creating a potential race condition. By selecting a host randomly from the N hosts that best fit the request, the chance of a conflict is reduced. However, the higher you set this value, the less optimal the chosen host may be for a given request. Possible values: * An integer, where the integer corresponds to the size of a host subset. """), cfg.IntOpt("max_io_ops_per_host", default=8, min=0, help=""" The number of instances that can be actively performing IO on a host. Instances performing IO includes those in the following states: build, resize, snapshot, migrate, rescue, unshelve. Note that this setting only affects scheduling if the ``IoOpsFilter`` filter is enabled. Possible values: * An integer, where the integer corresponds to the max number of instances that can be actively performing IO on any given host. Related options: - ``[filter_scheduler] enabled_filters`` """), cfg.IntOpt("max_instances_per_host", default=50, min=1, help=""" Maximum number of instances that can exist on a host. If you need to limit the number of instances on any given host, set this option to the maximum number of instances you want to allow. The NumInstancesFilter and AggregateNumInstancesFilter will reject any host that has at least as many instances as this option's value. Note that this setting only affects scheduling if the ``NumInstancesFilter`` or ``AggregateNumInstancesFilter`` filter is enabled. Possible values: * An integer, where the integer corresponds to the max instances that can be scheduled on a host. Related options: - ``[filter_scheduler] enabled_filters`` """), cfg.BoolOpt("track_instance_changes", default=True, help=""" Enable querying of individual hosts for instance information. The scheduler may need information about the instances on a host in order to evaluate its filters and weighers. The most common need for this information is for the (anti-)affinity filters, which need to choose a host based on the instances already running on a host. If the configured filters and weighers do not need this information, disabling this option will improve performance. It may also be disabled when the tracking overhead proves too heavy, although this will cause classes requiring host usage data to query the database on each request instead. .. note:: In a multi-cell (v2) setup where the cell MQ is separated from the top-level, computes cannot directly communicate with the scheduler. Thus, this option cannot be enabled in that scenario. See also the ``[workarounds] disable_group_policy_check_upcall`` option. Related options: - ``[filter_scheduler] enabled_filters`` - ``[workarounds] disable_group_policy_check_upcall`` """), cfg.MultiStrOpt("available_filters", default=["nova.scheduler.filters.all_filters"], help=""" Filters that the scheduler can use. An unordered list of the filter classes the nova scheduler may apply. Only the filters specified in the ``[filter_scheduler] enabled_filters`` option will be used, but any filter appearing in that option must also be included in this list. By default, this is set to all filters that are included with nova. Possible values: * A list of zero or more strings, where each string corresponds to the name of a filter that may be used for selecting a host Related options: * ``[filter_scheduler] enabled_filters`` """), cfg.ListOpt("enabled_filters", # NOTE(artom) If we change the defaults here, we should also update # Tempest's scheduler_enabled_filters to keep the default values in # sync. default=[ "ComputeFilter", "ComputeCapabilitiesFilter", "ImagePropertiesFilter", "ServerGroupAntiAffinityFilter", "ServerGroupAffinityFilter", ], help=""" Filters that the scheduler will use. An ordered list of filter class names that will be used for filtering hosts. These filters will be applied in the order they are listed so place your most restrictive filters first to make the filtering process more efficient. All of the filters in this option *must* be present in the ``[scheduler_filter] available_filter`` option, or a ``SchedulerHostFilterNotFound`` exception will be raised. Possible values: * A list of zero or more strings, where each string corresponds to the name of a filter to be used for selecting a host Related options: - ``[filter_scheduler] available_filters`` """), cfg.ListOpt("weight_classes", default=["nova.scheduler.weights.all_weighers"], help=""" Weighers that the scheduler will use. Only hosts which pass the filters are weighed. The weight for any host starts at 0, and the weighers order these hosts by adding to or subtracting from the weight assigned by the previous weigher. Weights may become negative. An instance will be scheduled to one of the N most-weighted hosts, where N is ``[filter_scheduler] host_subset_size``. By default, this is set to all weighers that are included with Nova. Possible values: * A list of zero or more strings, where each string corresponds to the name of a weigher that will be used for selecting a host """), cfg.FloatOpt("ram_weight_multiplier", default=1.0, help=""" RAM weight multiplier ratio. This option determines how hosts with more or less available RAM are weighed. A positive value will result in the scheduler preferring hosts with more available RAM, and a negative number will result in the scheduler preferring hosts with less available RAM. Another way to look at it is that positive values for this option will tend to spread instances across many hosts, while negative values will tend to fill up (stack) hosts as much as possible before scheduling to a less-used host. The absolute value, whether positive or negative, controls how strong the RAM weigher is relative to other weighers. Note that this setting only affects scheduling if the ``RAMWeigher`` weigher is enabled. Possible values: * An integer or float value, where the value corresponds to the multiplier ratio for this weigher. Related options: * ``[filter_scheduler] weight_classes`` """), cfg.FloatOpt("cpu_weight_multiplier", default=1.0, help=""" CPU weight multiplier ratio. Multiplier used for weighting free vCPUs. Negative numbers indicate stacking rather than spreading. Note that this setting only affects scheduling if the ``CPUWeigher`` weigher is enabled. Possible values: * An integer or float value, where the value corresponds to the multiplier ratio for this weigher. Related options: * ``[filter_scheduler] weight_classes`` """), cfg.FloatOpt("disk_weight_multiplier", default=1.0, help=""" Disk weight multiplier ratio. Multiplier used for weighing free disk space. Negative numbers mean to stack vs spread. Note that this setting only affects scheduling if the ``DiskWeigher`` weigher is enabled. Possible values: * An integer or float value, where the value corresponds to the multiplier ratio for this weigher. """), cfg.FloatOpt("hypervisor_version_weight_multiplier", default=1.0, help=""" Hypervisor Version weight multiplier ratio. The multiplier is used for weighting hosts based on the reported hypervisor version. Negative numbers indicate preferring older hosts, the default is to prefer newer hosts to aid with upgrades. Possible values: * An integer or float value, where the value corresponds to the multiplier ratio for this weigher. Example: * Strongly prefer older hosts .. code-block:: ini [filter_scheduler] hypervisor_version_weight_multiplier=-1000 * Moderately prefer new hosts .. code-block:: ini [filter_scheduler] hypervisor_version_weight_multiplier=2.5 * Disable weigher influence .. code-block:: ini [filter_scheduler] hypervisor_version_weight_multiplier=0 Related options: * ``[filter_scheduler] weight_classes`` """), cfg.FloatOpt("num_instances_weight_multiplier", default=0.0, help=""" Number of instances weight multiplier ratio. The multiplier is used for weighting hosts based on the reported number of instances they have. Negative numbers indicate preferring hosts with fewer instances (i.e. choosing to spread instances), while positive numbers mean preferring hosts with more hosts (ie. choosing to pack). The default is 0.0 which means that you have to choose a strategy if you want to use it. Possible values: * An integer or float value, where the value corresponds to the multiplier ratio for this weigher. Example: * Strongly prefer to pack instances to hosts. .. code-block:: ini [filter_scheduler] num_instances_weight_multiplier=1000 * Softly prefer to spread instances between hosts. .. code-block:: ini [filter_scheduler] num_instances_weight_multiplier=1.0 * Disable weigher influence .. code-block:: ini [filter_scheduler] num_instances_weight_multiplier=0 Related options: * ``[filter_scheduler] weight_classes`` """), cfg.FloatOpt("io_ops_weight_multiplier", default=-1.0, help=""" IO operations weight multiplier ratio. This option determines how hosts with differing workloads are weighed. Negative values, such as the default, will result in the scheduler preferring hosts with lighter workloads whereas positive values will prefer hosts with heavier workloads. Another way to look at it is that positive values for this option will tend to schedule instances onto hosts that are already busy, while negative values will tend to distribute the workload across more hosts. The absolute value, whether positive or negative, controls how strong the io_ops weigher is relative to other weighers. Note that this setting only affects scheduling if the ``IoOpsWeigher`` weigher is enabled. Possible values: * An integer or float value, where the value corresponds to the multiplier ratio for this weigher. Related options: * ``[filter_scheduler] weight_classes`` """), cfg.FloatOpt("image_props_weight_multiplier", default=0.0, help=""" Image Properties weight multiplier ratio. The multiplier is used for weighting hosts based on the reported image properties for the instances they have. A positive value will favor hosts with the same image properties (packing strategy) while a negative value will follow a spread strategy that will favor hosts not already having instances with those image properties. The default value of the multiplier is 0, which disables the weigher. Possible values: * An integer or float value, where the value corresponds to the multiplier ratio for this weigher. Example: * Strongly prefer to pack instances with related image properties. .. code-block:: ini [filter_scheduler] image_props_weight_multiplier=1000 * Softly prefer to spread instances having same properties between hosts .. code-block:: ini [filter_scheduler] image_props_weight_multiplier=-1.0 * Disable weigher influence .. code-block:: ini [filter_scheduler] image_props_weight_multiplier=0 Related options: * ``[filter_scheduler] weight_classes`` """), cfg.ListOpt("image_props_weight_setting", default=[], help=""" Mapping of image properties to weight modifier. This setting specifies the properties to be weighed and the relative ratios for each property. This should be a list of key/value pairs, consisting of a series of one or more 'name=ratio' pairs, separated by commas, where ``name`` is the name of the property to be weighed, and ``ratio`` is the relative weight for that metric. Note that if the ratio is set to 0, the property value is ignored, and instead the weight will be set to the value of the ``[filter_scheduler] image_props_weight_multiplier`` option. As an example, let's consider the case where this option is set to: ``os_distro=1, hw_machine_type=-1`` If an instance would boot with an image having ``os_distro=windows`` and ``hw_machine_type=q35``, the final host weight will be: ``(nb_inst(``os_distro=windows``) * 1.0) + (nb_inst(``hw_machine_type=q35``) * -1)`` where nb_inst(``prop=value``) would give me the number of instances having an image where ``prop`` is set to ``value`` (eg. the number of instances running with ``os_distro=windows``) Possible values: * A list of zero or more key/value pairs separated by commas, where the key is a string representing the name of a property and the value is a numeric weight for that property. If any value is set to 0, the number of instances match is ignored for that specific property key. If no key/value pairs are provided, then the weigher will compare all the instance's images with the requested image properties, all of them weighed evenly. The overall host weight will be multiplied by the value of the ``[filter_scheduler] image_props_weight_multiplier`` option. Related options: * ``[filter_scheduler] image_props_weight_multiplier`` """), cfg.FloatOpt("pci_weight_multiplier", default=1.0, min=0.0, help=""" PCI device affinity weight multiplier. The PCI device affinity weighter computes a weighting based on the number of PCI devices on the host and the number of PCI devices requested by the instance. Note that this setting only affects scheduling if the ``PCIWeigher`` weigher and ``NUMATopologyFilter`` filter are enabled. Possible values: * A positive integer or float value, where the value corresponds to the multiplier ratio for this weigher. Related options: * ``[filter_scheduler] weight_classes`` """), cfg.FloatOpt("soft_affinity_weight_multiplier", default=1.0, min=0.0, help=""" Multiplier used for weighing hosts for group soft-affinity. Note that this setting only affects scheduling if the ``ServerGroupSoftAffinityWeigher`` weigher is enabled. Possible values: * A non-negative integer or float value, where the value corresponds to weight multiplier for hosts with group soft affinity. Related options: * ``[filter_scheduler] weight_classes`` """), cfg.FloatOpt( "soft_anti_affinity_weight_multiplier", default=1.0, min=0.0, help=""" Multiplier used for weighing hosts for group soft-anti-affinity. Note that this setting only affects scheduling if the ``ServerGroupSoftAntiAffinityWeigher`` weigher is enabled. Possible values: * A non-negative integer or float value, where the value corresponds to weight multiplier for hosts with group soft anti-affinity. Related options: * ``[filter_scheduler] weight_classes`` """), cfg.FloatOpt( "build_failure_weight_multiplier", default=1000000.0, help=""" Multiplier used for weighing hosts that have had recent build failures. This option determines how much weight is placed on a compute node with recent build failures. Build failures may indicate a failing, misconfigured, or otherwise ailing compute node, and avoiding it during scheduling may be beneficial. The weight is inversely proportional to the number of recent build failures the compute node has experienced. This value should be set to some high value to offset weight given by other enabled weighers due to available resources. To disable weighing compute hosts by the number of recent failures, set this to zero. Note that this setting only affects scheduling if the ``BuildFailureWeigher`` weigher is enabled. Possible values: * An integer or float value, where the value corresponds to the multiplier ratio for this weigher. Related options: * ``[compute] consecutive_build_service_disable_threshold`` - Must be nonzero for a compute to report data considered by this weigher. * ``[filter_scheduler] weight_classes`` """), cfg.FloatOpt( "cross_cell_move_weight_multiplier", default=1000000.0, help=""" Multiplier used for weighing hosts during a cross-cell move. This option determines how much weight is placed on a host which is within the same source cell when moving a server, for example during cross-cell resize. By default, when moving an instance, the scheduler will prefer hosts within the same cell since cross-cell move operations can be slower and riskier due to the complicated nature of cross-cell migrations. Note that this setting only affects scheduling if the ``CrossCellWeigher`` weigher is enabled. If your cloud is not configured to support cross-cell migrations, then this option has no effect. The value of this configuration option can be overridden per host aggregate by setting the aggregate metadata key with the same name (``cross_cell_move_weight_multiplier``). Possible values: * An integer or float value, where the value corresponds to the multiplier ratio for this weigher. Positive values mean the weigher will prefer hosts within the same cell in which the instance is currently running. Negative values mean the weigher will prefer hosts in *other* cells from which the instance is currently running. Related options: * ``[filter_scheduler] weight_classes`` """), cfg.BoolOpt( "shuffle_best_same_weighed_hosts", default=False, help=""" Enable spreading the instances between hosts with the same best weight. Enabling it is beneficial for cases when ``[filter_scheduler] host_subset_size`` is 1 (default), but there is a large number of hosts with same maximal weight. This scenario is common in Ironic deployments where there are typically many baremetal nodes with identical weights returned to the scheduler. In such case enabling this option will reduce contention and chances for rescheduling events. At the same time it will make the instance packing (even in unweighed case) less dense. """), cfg.StrOpt( "image_properties_default_architecture", choices=arch.ALL, help=""" The default architecture to be used when using the image properties filter. When using the ``ImagePropertiesFilter``, it is possible that you want to define a default architecture to make the user experience easier and avoid having something like x86_64 images landing on AARCH64 compute nodes because the user did not specify the ``hw_architecture`` property in Glance. Possible values: * CPU Architectures such as x86_64, aarch64, s390x. """), # TODO(mikal): replace this option with something involving host aggregates cfg.ListOpt("isolated_images", default=[], help=""" List of UUIDs for images that can only be run on certain hosts. If there is a need to restrict some images to only run on certain designated hosts, list those image UUIDs here. Note that this setting only affects scheduling if the ``IsolatedHostsFilter`` filter is enabled. Possible values: * A list of UUID strings, where each string corresponds to the UUID of an image Related options: * ``[filter_scheduler] isolated_hosts`` * ``[filter_scheduler] restrict_isolated_hosts_to_isolated_images`` """), cfg.ListOpt("isolated_hosts", default=[], help=""" List of hosts that can only run certain images. If there is a need to restrict some images to only run on certain designated hosts, list those host names here. Note that this setting only affects scheduling if the ``IsolatedHostsFilter`` filter is enabled. Possible values: * A list of strings, where each string corresponds to the name of a host Related options: * ``[filter_scheduler] isolated_images`` * ``[filter_scheduler] restrict_isolated_hosts_to_isolated_images`` """), cfg.BoolOpt( "restrict_isolated_hosts_to_isolated_images", default=True, help=""" Prevent non-isolated images from being built on isolated hosts. Note that this setting only affects scheduling if the ``IsolatedHostsFilter`` filter is enabled. Even then, this option doesn't affect the behavior of requests for isolated images, which will *always* be restricted to isolated hosts. Related options: * ``[filter_scheduler] isolated_images`` * ``[filter_scheduler] isolated_hosts`` """), # TODO(stephenfin): Consider deprecating these next two options: they're # effectively useless now that we don't support arbitrary image metadata # properties cfg.StrOpt( "aggregate_image_properties_isolation_namespace", help=""" Image property namespace for use in the host aggregate. Images and hosts can be configured so that certain images can only be scheduled to hosts in a particular aggregate. This is done with metadata values set on the host aggregate that are identified by beginning with the value of this option. If the host is part of an aggregate with such a metadata key, the image in the request spec must have the value of that metadata in its properties in order for the scheduler to consider the host as acceptable. Note that this setting only affects scheduling if the ``AggregateImagePropertiesIsolation`` filter is enabled. Possible values: * A string, where the string corresponds to an image property namespace Related options: * ``[filter_scheduler] aggregate_image_properties_isolation_separator`` """), cfg.StrOpt( "aggregate_image_properties_isolation_separator", default=".", help=""" Separator character(s) for image property namespace and name. When using the aggregate_image_properties_isolation filter, the relevant metadata keys are prefixed with the namespace defined in the aggregate_image_properties_isolation_namespace configuration option plus a separator. This option defines the separator to be used. Note that this setting only affects scheduling if the ``AggregateImagePropertiesIsolation`` filter is enabled. Possible values: * A string, where the string corresponds to an image property namespace separator character Related options: * ``[filter_scheduler] aggregate_image_properties_isolation_namespace`` """), cfg.BoolOpt( "pci_in_placement", default=False, help=""" Enable scheduling and claiming PCI devices in Placement. This can be enabled after ``[pci]report_in_placement`` is enabled on all compute hosts. When enabled the scheduler queries Placement about the PCI device availability to select destination for a server with PCI request. The scheduler also allocates the selected PCI devices in Placement. Note that this logic does not replace the PCIPassthroughFilter but extends it. Note that this config option needs to be set in the configuration of nova-api, nova-scheduler, and all nova-conductor services. * ``[pci] report_in_placement`` * ``[pci] alias`` * ``[pci] device_spec`` """), ] metrics_group = cfg.OptGroup( name="metrics", title="Metrics parameters", help=""" Configuration options for metrics Options under this group allow to adjust how values assigned to metrics are calculated. """) # TODO(stephenfin): This entire feature could probably be removed. It's not # tested and likely doesn't work with most drivers now. metrics_weight_opts = [ cfg.FloatOpt("weight_multiplier", default=1.0, help=""" Multiplier used for weighing hosts based on reported metrics. When using metrics to weight the suitability of a host, you can use this option to change how the calculated weight influences the weight assigned to a host as follows: * ``>1.0``: increases the effect of the metric on overall weight * ``1.0``: no change to the calculated weight * ``>0.0,<1.0``: reduces the effect of the metric on overall weight * ``0.0``: the metric value is ignored, and the value of the ``[metrics] weight_of_unavailable`` option is returned instead * ``>-1.0,<0.0``: the effect is reduced and reversed * ``-1.0``: the effect is reversed * ``<-1.0``: the effect is increased proportionally and reversed Possible values: * An integer or float value, where the value corresponds to the multiplier ratio for this weigher. Related options: * ``[filter_scheduler] weight_classes`` * ``[metrics] weight_of_unavailable`` """), cfg.ListOpt("weight_setting", default=[], help=""" Mapping of metric to weight modifier. This setting specifies the metrics to be weighed and the relative ratios for each metric. This should be a single string value, consisting of a series of one or more 'name=ratio' pairs, separated by commas, where ``name`` is the name of the metric to be weighed, and ``ratio`` is the relative weight for that metric. Note that if the ratio is set to 0, the metric value is ignored, and instead the weight will be set to the value of the ``[metrics] weight_of_unavailable`` option. As an example, let's consider the case where this option is set to: ``name1=1.0, name2=-1.3`` The final weight will be: ``(name1.value * 1.0) + (name2.value * -1.3)`` Possible values: * A list of zero or more key/value pairs separated by commas, where the key is a string representing the name of a metric and the value is a numeric weight for that metric. If any value is set to 0, the value is ignored and the weight will be set to the value of the ``[metrics] weight_of_unavailable`` option. Related options: * ``[metrics] weight_of_unavailable`` """), cfg.BoolOpt("required", default=True, help=""" Whether metrics are required. This setting determines how any unavailable metrics are treated. If this option is set to True, any hosts for which a metric is unavailable will raise an exception, so it is recommended to also use the MetricFilter to filter out those hosts before weighing. Possible values: * A boolean value, where False ensures any metric being unavailable for a host will set the host weight to ``[metrics] weight_of_unavailable``. Related options: * ``[metrics] weight_of_unavailable`` """), cfg.FloatOpt("weight_of_unavailable", default=float(-10000.0), help=""" Default weight for unavailable metrics. When any of the following conditions are met, this value will be used in place of any actual metric value: - One of the metrics named in ``[metrics] weight_setting`` is not available for a host, and the value of ``required`` is ``False``. - The ratio specified for a metric in ``[metrics] weight_setting`` is 0. - The ``[metrics] weight_multiplier`` option is set to 0. Possible values: * An integer or float value, where the value corresponds to the multiplier ratio for this weigher. Related options: * ``[metrics] weight_setting`` * ``[metrics] required`` * ``[metrics] weight_multiplier`` """), ] def register_opts(conf): conf.register_group(scheduler_group) conf.register_opts(scheduler_opts, group=scheduler_group) conf.register_group(filter_scheduler_group) conf.register_opts(filter_scheduler_opts, group=filter_scheduler_group) conf.register_group(metrics_group) conf.register_opts(metrics_weight_opts, group=metrics_group) def list_opts(): return { scheduler_group: scheduler_opts, filter_scheduler_group: filter_scheduler_opts, metrics_group: metrics_weight_opts, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conf/serial_console.py0000664000175000017500000001025700000000000020032 0ustar00zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg DEFAULT_PORT_RANGE = '10000:20000' serial_opt_group = cfg.OptGroup("serial_console", title="The serial console feature", help=""" The serial console feature allows you to connect to a guest in case a graphical console like VNC, RDP or SPICE is not available. This is only currently supported for the libvirt, Ironic and hyper-v drivers.""") ALL_OPTS = [ cfg.BoolOpt('enabled', default=False, help=""" Enable the serial console feature. In order to use this feature, the service ``nova-serialproxy`` needs to run. This service is typically executed on the controller node. """), cfg.StrOpt('port_range', default=DEFAULT_PORT_RANGE, regex=r'^\d+:\d+$', help=r""" A range of TCP ports a guest can use for its backend. Each instance which gets created will use one port out of this range. If the range is not big enough to provide another port for an new instance, this instance won't get launched. Possible values: * Each string which passes the regex ``^\d+:\d+$`` For example ``10000:20000``. Be sure that the first port number is lower than the second port number and that both are in range from 0 to 65535. """), # TODO(macsz) check if WS protocol is still being used cfg.URIOpt('base_url', default='ws://127.0.0.1:6083/', help=""" The URL an end user would use to connect to the ``nova-serialproxy`` service. The ``nova-serialproxy`` service is called with this token enriched URL and establishes the connection to the proper instance. Related options: * The IP address must be identical to the address to which the ``nova-serialproxy`` service is listening (see option ``serialproxy_host`` in this section). * The port must be the same as in the option ``serialproxy_port`` of this section. * If you choose to use a secured websocket connection, then start this option with ``wss://`` instead of the unsecured ``ws://``. The options ``cert`` and ``key`` in the ``[DEFAULT]`` section have to be set for that. """), cfg.StrOpt('proxyclient_address', default='127.0.0.1', help=""" The IP address to which proxy clients (like ``nova-serialproxy``) should connect to get the serial console of an instance. This is typically the IP address of the host of a ``nova-compute`` service. """), ] CLI_OPTS = [ cfg.StrOpt('serialproxy_host', default='0.0.0.0', help=""" The IP address which is used by the ``nova-serialproxy`` service to listen for incoming requests. The ``nova-serialproxy`` service listens on this IP address for incoming connection requests to instances which expose serial console. Related options: * Ensure that this is the same IP address which is defined in the option ``base_url`` of this section or use ``0.0.0.0`` to listen on all addresses. """), cfg.PortOpt('serialproxy_port', default=6083, help=""" The port number which is used by the ``nova-serialproxy`` service to listen for incoming requests. The ``nova-serialproxy`` service listens on this port number for incoming connection requests to instances which expose serial console. Related options: * Ensure that this is the same port number which is defined in the option ``base_url`` of this section. """) ] ALL_OPTS.extend(CLI_OPTS) def register_opts(conf): conf.register_group(serial_opt_group) conf.register_opts(ALL_OPTS, group=serial_opt_group) def register_cli_opts(conf): conf.register_group(serial_opt_group) conf.register_cli_opts(CLI_OPTS, serial_opt_group) def list_opts(): return {serial_opt_group: ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conf/service.py0000664000175000017500000000576000000000000016474 0ustar00zuulzuul00000000000000# needs:check_deprecation_status # Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg service_opts = [ # TODO(johngarbutt) we need a better default and minimum, in a backwards # compatible way for report_interval cfg.IntOpt('report_interval', default=10, help=""" Number of seconds indicating how frequently the state of services on a given hypervisor is reported. Nova needs to know this to determine the overall health of the deployment. Related Options: * service_down_time report_interval should be less than service_down_time. If service_down_time is less than report_interval, services will routinely be considered down, because they report in too rarely. """), # TODO(johngarbutt) the code enforces the min value here, but we could # do to add some min value here, once we sort out report_interval cfg.IntOpt('service_down_time', default=60, help=""" Maximum time in seconds since last check-in for up service Each compute node periodically updates their database status based on the specified report interval. If the compute node hasn't updated the status for more than service_down_time, then the compute node is considered down. Related Options: * report_interval (service_down_time should not be less than report_interval) """), cfg.BoolOpt('periodic_enable', default=True, help=""" Enable periodic tasks. If set to true, this option allows services to periodically run tasks on the manager. In case of running multiple schedulers or conductors you may want to run periodic tasks on only one host - in this case disable this option for all hosts but one. """), cfg.IntOpt('periodic_fuzzy_delay', default=60, min=0, help=""" Number of seconds to randomly delay when starting the periodic task scheduler to reduce stampeding. When compute workers are restarted in unison across a cluster, they all end up running the periodic tasks at the same time causing problems for the external services. To mitigate this behavior, periodic_fuzzy_delay option allows you to introduce a random initial delay when starting the periodic task scheduler. Possible Values: * Any positive integer (in seconds) * 0 : disable the random delay """), ] def register_opts(conf): conf.register_opts(service_opts) def list_opts(): return {'DEFAULT': service_opts} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conf/service_token.py0000664000175000017500000000461400000000000017671 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystoneauth1 import loading as ks_loading from oslo_config import cfg SERVICE_USER_GROUP = 'service_user' service_user = cfg.OptGroup( SERVICE_USER_GROUP, title = 'Service token authentication type options', help = """ Configuration options for service to service authentication using a service token. These options allow sending a service token along with the user's token when contacting external REST APIs. """ ) service_user_opts = [ cfg.BoolOpt('send_service_user_token', default=False, help=""" When True, if sending a user token to a REST API, also send a service token. Nova often reuses the user token provided to the nova-api to talk to other REST APIs, such as Cinder, Glance and Neutron. It is possible that while the user token was valid when the request was made to Nova, the token may expire before it reaches the other service. To avoid any failures, and to make it clear it is Nova calling the service on the user's behalf, we include a service token along with the user token. Should the user's token have expired, a valid service token ensures the REST API request will still be accepted by the keystone middleware. """), ] def register_opts(conf): conf.register_group(service_user) conf.register_opts(service_user_opts, group=service_user) ks_loading.register_session_conf_options(conf, SERVICE_USER_GROUP) ks_loading.register_auth_conf_options(conf, SERVICE_USER_GROUP) def list_opts(): return { service_user: ( service_user_opts + ks_loading.get_session_conf_options() + ks_loading.get_auth_common_conf_options() + ks_loading.get_auth_plugin_conf_options('password') + ks_loading.get_auth_plugin_conf_options('v2password') + ks_loading.get_auth_plugin_conf_options('v3password')) } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conf/servicegroup.py0000664000175000017500000000323300000000000017542 0ustar00zuulzuul00000000000000# Copyright (c) 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg SERVICEGROUP_OPTS = [ cfg.StrOpt('servicegroup_driver', default='db', choices=[ ('db', 'Database ServiceGroup driver'), ('mc', 'Memcache ServiceGroup driver'), ], help=""" This option specifies the driver to be used for the servicegroup service. ServiceGroup API in nova enables checking status of a compute node. When a compute worker running the nova-compute daemon starts, it calls the join API to join the compute group. Services like nova scheduler can query the ServiceGroup API to check if a node is alive. Internally, the ServiceGroup client driver automatically updates the compute worker status. There are multiple backend implementations for this service: Database ServiceGroup driver and Memcache ServiceGroup driver. Related Options: * ``service_down_time`` (maximum time since last check-in for up service) """), ] def register_opts(conf): conf.register_opts(SERVICEGROUP_OPTS) def list_opts(): return {'DEFAULT': SERVICEGROUP_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conf/spice.py0000664000175000017500000002042700000000000016134 0ustar00zuulzuul00000000000000# Copyright 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg spice_opt_group = cfg.OptGroup('spice', title="SPICE console features", help=""" SPICE console feature allows you to connect to a guest virtual machine. SPICE is a replacement for fairly limited VNC protocol. Following requirements must be met in order to use SPICE: * Virtualization driver must be libvirt * spice.enabled set to True * vnc.enabled set to False * update html5proxy_base_url * update server_proxyclient_address """) CLI_OPTS = [ cfg.HostAddressOpt('html5proxy_host', default='0.0.0.0', help=""" IP address or a hostname on which the ``nova-spicehtml5proxy`` service listens for incoming requests. Related options: * This option depends on the ``html5proxy_base_url`` option. The ``nova-spicehtml5proxy`` service must be listening on a host that is accessible from the HTML5 client. """), cfg.PortOpt('html5proxy_port', default=6082, help=""" Port on which the ``nova-spicehtml5proxy`` service listens for incoming requests. Related options: * This option depends on the ``html5proxy_base_url`` option. The ``nova-spicehtml5proxy`` service must be listening on a port that is accessible from the HTML5 client. """) ] ALL_OPTS = [ cfg.BoolOpt('enabled', default=False, help=""" Enable SPICE related features. Related options: * VNC must be explicitly disabled to get access to the SPICE console. Set the enabled option to False in the [vnc] section to disable the VNC console. """), cfg.BoolOpt('agent_enabled', default=True, help=""" Enable the SPICE guest agent support on the instances. The Spice agent works with the Spice protocol to offer a better guest console experience. However, the Spice console can still be used without the Spice Agent. With the Spice agent installed the following features are enabled: * Copy & Paste of text and images between the guest and client machine * Automatic adjustment of resolution when the client screen changes - e.g. if you make the Spice console full screen the guest resolution will adjust to match it rather than letterboxing. * Better mouse integration - The mouse can be captured and released without needing to click inside the console or press keys to release it. The performance of mouse movement is also improved. """), cfg.StrOpt('image_compression', advanced=True, choices=[ ('auto_glz', 'enable image compression mode to choose between glz ' 'and quic algorithm, based on image properties'), ('auto_lz', 'enable image compression mode to choose between lz ' 'and quic algorithm, based on image properties'), ('quic', 'enable image compression based on the SFALIC algorithm'), ('glz', 'enable image compression using lz with history based ' 'global dictionary'), ('lz', 'enable image compression with the Lempel-Ziv algorithm'), ('off', 'disable image compression') ], help=""" Configure the SPICE image compression (lossless). """), cfg.StrOpt('jpeg_compression', advanced=True, choices=[ ('auto', 'enable JPEG image compression automatically'), ('never', 'disable JPEG image compression'), ('always', 'enable JPEG image compression') ], help=""" Configure the SPICE wan image compression (lossy for slow links). """), cfg.StrOpt('zlib_compression', advanced=True, choices=[ ('auto', 'enable zlib image compression automatically'), ('never', 'disable zlib image compression'), ('always', 'enable zlib image compression') ], help=""" Configure the SPICE wan image compression (lossless for slow links). """), cfg.BoolOpt('playback_compression', advanced=True, help=""" Enable the SPICE audio stream compression (using celt). """), cfg.StrOpt('streaming_mode', advanced=True, choices=[ ('filter', 'SPICE server adds additional filters to decide if ' 'video streaming should be activated'), ('all', 'any fast-refreshing window can be encoded into a video ' 'stream'), ('off', 'no video detection and (lossy) compression is performed') ], help=""" Configure the SPICE video stream detection and (lossy) compression. """), cfg.URIOpt('html5proxy_base_url', default='http://127.0.0.1:6082/spice_auto.html', help=""" Location of the SPICE HTML5 console proxy. End user would use this URL to connect to the `nova-spicehtml5proxy`` service. This service will forward request to the console of an instance. In order to use SPICE console, the service ``nova-spicehtml5proxy`` should be running. This service is typically launched on the controller node. Possible values: * Must be a valid URL of the form: ``http://host:port/spice_auto.html`` where host is the node running ``nova-spicehtml5proxy`` and the port is typically 6082. Consider not using default value as it is not well defined for any real deployment. Related options: * This option depends on ``html5proxy_host`` and ``html5proxy_port`` options. The access URL returned by the compute node must have the host and port where the ``nova-spicehtml5proxy`` service is listening. """), cfg.URIOpt('spice_direct_proxy_base_url', default='http://127.0.0.1:13002/nova', help=""" Location of a SPICE protocol native console proxy. A user can retrieve a virt-viewer style .vv connection configuration file by accessing this URL with the attached token when a console is created. Possible values: * Must be a valid URL of the form: ``http://host:port/nova`` where host is the node running the SPICE protocol native proxy and the port is typically 13002. Note that the port component is optional if you are using the default port for HTTP or HTTPS. Consider not using the default value as it is not well defined for any real deployment. """), cfg.StrOpt('server_listen', default='127.0.0.1', help=""" The address where the SPICE server running on the instances should listen. Typically, the ``nova-spicehtml5proxy`` proxy client runs on the controller node and connects over the private network to this address on the compute node(s). Possible values: * IP address to listen on. """), cfg.StrOpt('server_proxyclient_address', default='127.0.0.1', help=""" The address used by ``nova-spicehtml5proxy`` client to connect to instance console. Typically, the ``nova-spicehtml5proxy`` proxy client runs on the controller node and connects over the private network to this address on the compute node(s). Possible values: * Any valid IP address on the compute node. Related options: * This option depends on the ``server_listen`` option. The proxy client must be able to access the address specified in ``server_listen`` using the value of this option. """), cfg.BoolOpt('require_secure', default=False, help=""" Whether to require secure TLS connections to SPICE consoles. If you're providing direct access to SPICE consoles instead of using the HTML5 proxy, you may wish those connections to be encrypted. If so, set this value to True. Note that use of secure consoles requires that you setup TLS certificates on each hypervisor. Possible values: * False: console traffic is not encrypted. * True: console traffic is required to be protected by TLS. """), ] ALL_OPTS.extend(CLI_OPTS) def register_opts(conf): conf.register_opts(ALL_OPTS, group=spice_opt_group) def register_cli_opts(conf): conf.register_cli_opts(CLI_OPTS, group=spice_opt_group) def list_opts(): return {spice_opt_group: ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conf/upgrade_levels.py0000664000175000017500000000670300000000000020033 0ustar00zuulzuul00000000000000# Copyright 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg upgrade_group = cfg.OptGroup('upgrade_levels', title='Upgrade levels Options', help=""" upgrade_levels options are used to set version cap for RPC messages sent between different nova services. By default all services send messages using the latest version they know about. The compute upgrade level is an important part of rolling upgrades where old and new nova-compute services run side by side. The other options can largely be ignored, and are only kept to help with a possible future backport issue. """) # TODO(sneti): Add default=auto for compute upgrade_levels_opts = [ cfg.StrOpt('compute', help=""" Compute RPC API version cap. By default, we always send messages using the most recent version the client knows about. Where you have old and new compute services running, you should set this to the lowest deployed version. This is to guarantee that all services never send messages that one of the compute nodes can't understand. Note that we only support upgrading from release N to release N+1. Set this option to "auto" if you want to let the compute RPC module automatically determine what version to use based on the service versions in the deployment. Possible values: * By default send the latest version the client knows about * 'auto': Automatically determines what version to use based on the service versions in the deployment. * A string representing a version number in the format 'N.N'; for example, possible values might be '1.12' or '2.0'. * An OpenStack release name, in lower case, such as 'mitaka' or 'liberty'. """), cfg.StrOpt("scheduler", help=""" Scheduler RPC API version cap. Possible values: * By default send the latest version the client knows about * A string representing a version number in the format 'N.N'; for example, possible values might be '1.12' or '2.0'. * An OpenStack release name, in lower case, such as 'mitaka' or 'liberty'. """), cfg.StrOpt('conductor', help=""" Conductor RPC API version cap. Possible values: * By default send the latest version the client knows about * A string representing a version number in the format 'N.N'; for example, possible values might be '1.12' or '2.0'. * An OpenStack release name, in lower case, such as 'mitaka' or 'liberty'. """), cfg.StrOpt('baseapi', help=""" Base API RPC API version cap. Possible values: * By default send the latest version the client knows about * A string representing a version number in the format 'N.N'; for example, possible values might be '1.12' or '2.0'. * An OpenStack release name, in lower case, such as 'mitaka' or 'liberty'. """) ] def register_opts(conf): conf.register_group(upgrade_group) conf.register_opts(upgrade_levels_opts, group=upgrade_group) def list_opts(): return {upgrade_group: upgrade_levels_opts} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conf/utils.py0000664000175000017500000000763000000000000016172 0ustar00zuulzuul00000000000000# Copyright 2017 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Common utilities for conf providers. This module does not provide any actual conf options. """ from keystoneauth1 import loading as ks_loading from oslo_config import cfg _ADAPTER_VERSION_OPTS = ('version', 'min_version', 'max_version') def get_ksa_adapter_opts(default_service_type, deprecated_opts=None): """Get auth, Session, and Adapter conf options from keystonauth1.loading. :param default_service_type: Default for the service_type conf option on the Adapter. :param deprecated_opts: dict of deprecated opts to register with the ksa Adapter opts. Works the same as the deprecated_opts kwarg to: keystoneauth1.loading.session.Session.register_conf_options :return: List of cfg.Opts. """ opts = ks_loading.get_adapter_conf_options(include_deprecated=False, deprecated_opts=deprecated_opts) for opt in opts[:]: # Remove version-related opts. Required/supported versions are # something the code knows about, not the operator. if opt.dest in _ADAPTER_VERSION_OPTS: opts.remove(opt) # Override defaults that make sense for nova cfg.set_defaults(opts, valid_interfaces=['internal', 'public'], service_type=default_service_type) return opts def _dummy_opt(name): # A config option that can't be set by the user, so it behaves as if it's # ignored; but consuming code may expect it to be present in a conf group. return cfg.Opt(name, type=lambda x: None) def register_ksa_opts(conf, group, default_service_type, include_auth=True, deprecated_opts=None): """Register keystoneauth auth, Session, and Adapter opts. :param conf: oslo_config.cfg.CONF in which to register the options :param group: Conf group, or string name thereof, in which to register the options. :param default_service_type: Default for the service_type conf option on the Adapter. :param include_auth: For service types where Nova is acting on behalf of the user, auth should come from the user context. In those cases, set this arg to False to avoid registering ksa auth options. :param deprecated_opts: dict of deprecated opts to register with the ksa Session or Adapter opts. See docstring for the deprecated_opts param of: keystoneauth1.loading.session.Session.register_conf_options """ # ksa register methods need the group name as a string. oslo doesn't care. group = getattr(group, 'name', group) ks_loading.register_session_conf_options( conf, group, deprecated_opts=deprecated_opts) if include_auth: ks_loading.register_auth_conf_options(conf, group) conf.register_opts(get_ksa_adapter_opts( default_service_type, deprecated_opts=deprecated_opts), group=group) # Have to register dummies for the version-related opts we removed for name in _ADAPTER_VERSION_OPTS: conf.register_opt(_dummy_opt(name), group=group) # NOTE(efried): Required for docs build. def list_opts(): return {} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conf/vendordata.py0000664000175000017500000000306500000000000017157 0ustar00zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystoneauth1 import loading as ks_loading from oslo_config import cfg vendordata_group = cfg.OptGroup('vendordata_dynamic_auth', title='Vendordata dynamic fetch auth options', help=""" Options within this group control the authentication of the vendordata subsystem of the metadata API server (and config drive) with external systems. """) def register_opts(conf): conf.register_group(vendordata_group) ks_loading.register_session_conf_options(conf, vendordata_group.name) ks_loading.register_auth_conf_options(conf, vendordata_group.name) def list_opts(): return { vendordata_group: ( ks_loading.get_session_conf_options() + ks_loading.get_auth_common_conf_options() + ks_loading.get_auth_plugin_conf_options('password') + ks_loading.get_auth_plugin_conf_options('v2password') + ks_loading.get_auth_plugin_conf_options('v3password') ) } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conf/vmware.py0000664000175000017500000002201400000000000016324 0ustar00zuulzuul00000000000000# Copyright 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg vmware_group = cfg.OptGroup('vmware', title='VMWare Options', help=""" Related options: Following options must be set in order to launch VMware-based virtual machines. * compute_driver: Must use vmwareapi.VMwareVCDriver. * vmware.host_username * vmware.host_password * vmware.cluster_name """) vmwareapi_vif_opts = [ cfg.StrOpt('integration_bridge', help=""" This option should be configured only when using the NSX-MH Neutron plugin. This is the name of the integration bridge on the ESXi server or host. This should not be set for any other Neutron plugin. Hence the default value is not set. Possible values: * Any valid string representing the name of the integration bridge """), ] vmware_utils_opts = [ cfg.IntOpt('console_delay_seconds', min=0, help=""" Set this value if affected by an increased network latency causing repeated characters when typing in a remote console. """), # NOTE(takashin): 'serial_port_service_uri' can be non URI format. # See https://opendev.org/x/vmware-vspc/src/branch/master/README.rst cfg.StrOpt('serial_port_service_uri', help=""" Identifies the remote system where the serial port traffic will be sent. This option adds a virtual serial port which sends console output to a configurable service URI. At the service URI address there will be virtual serial port concentrator that will collect console logs. If this is not set, no serial ports will be added to the created VMs. Possible values: * Any valid URI """), cfg.URIOpt('serial_port_proxy_uri', schemes=['telnet', 'telnets'], help=""" Identifies a proxy service that provides network access to the serial_port_service_uri. Possible values: * Any valid URI (The scheme is 'telnet' or 'telnets'.) Related options: This option is ignored if serial_port_service_uri is not specified. * serial_port_service_uri """), cfg.StrOpt('serial_log_dir', default='/opt/vmware/vspc', help=""" Specifies the directory where the Virtual Serial Port Concentrator is storing console log files. It should match the 'serial_log_dir' config value of VSPC. """), ] vmwareapi_opts = [ cfg.HostAddressOpt('host_ip', help=""" Hostname or IP address for connection to VMware vCenter host."""), cfg.PortOpt('host_port', default=443, help="Port for connection to VMware vCenter host."), cfg.StrOpt('host_username', help="Username for connection to VMware vCenter host."), cfg.StrOpt('host_password', secret=True, help="Password for connection to VMware vCenter host."), cfg.StrOpt('ca_file', help=""" Specifies the CA bundle file to be used in verifying the vCenter server certificate. """), cfg.BoolOpt('insecure', default=False, help=""" If true, the vCenter server certificate is not verified. If false, then the default CA truststore is used for verification. Related options: * ca_file: This option is ignored if "ca_file" is set. """), cfg.StrOpt('cluster_name', help="Name of a VMware Cluster ComputeResource."), cfg.StrOpt('datastore_regex', help=""" Regular expression pattern to match the name of datastore. The datastore_regex setting specifies the datastores to use with Compute. For example, datastore_regex="nas.*" selects all the data stores that have a name starting with "nas". NOTE: If no regex is given, it just picks the datastore with the most freespace. Possible values: * Any matching regular expression to a datastore must be given """), cfg.FloatOpt('task_poll_interval', default=0.5, help=""" Time interval in seconds to poll remote tasks invoked on VMware VC server. """), cfg.IntOpt('api_retry_count', min=0, default=10, help=""" Number of times VMware vCenter server API must be retried on connection failures, e.g. socket error, etc. """), cfg.PortOpt('vnc_port', default=5900, help=""" This option specifies VNC starting port. Every VM created by ESX host has an option of enabling VNC client for remote connection. Above option 'vnc_port' helps you to set default starting port for the VNC client. Possible values: * Any valid port number within 5900 -(5900 + vnc_port_total) Related options: Below options should be set to enable VNC client. * vnc.enabled = True * vnc_port_total """), cfg.IntOpt('vnc_port_total', min=0, default=10000, help=""" Total number of VNC ports. """), cfg.StrOpt('vnc_keymap', default='en-us', help=""" Keymap for VNC. The keyboard mapping (keymap) determines which keyboard layout a VNC session should use by default. Possible values: * A keyboard layout which is supported by the underlying hypervisor on this node. This is usually an 'IETF language tag' (for example 'en-us'). """), cfg.BoolOpt('use_linked_clone', default=True, help=""" This option enables/disables the use of linked clone. The ESX hypervisor requires a copy of the VMDK file in order to boot up a virtual machine. The compute driver must download the VMDK via HTTP from the OpenStack Image service to a datastore that is visible to the hypervisor and cache it. Subsequent virtual machines that need the VMDK use the cached version and don't have to copy the file again from the OpenStack Image service. If set to false, even with a cached VMDK, there is still a copy operation from the cache location to the hypervisor file directory in the shared datastore. If set to true, the above copy operation is avoided as it creates copy of the virtual machine that shares virtual disks with its parent VM. """), cfg.IntOpt('connection_pool_size', min=10, default=10, help=""" This option sets the http connection pool size The connection pool size is the maximum number of connections from nova to vSphere. It should only be increased if there are warnings indicating that the connection pool is full, otherwise, the default should suffice. """) ] spbm_opts = [ cfg.BoolOpt('pbm_enabled', default=False, help=""" This option enables or disables storage policy based placement of instances. Related options: * pbm_default_policy """), cfg.StrOpt('pbm_wsdl_location', help=""" This option specifies the PBM service WSDL file location URL. Setting this will disable storage policy based placement of instances. Possible values: * Any valid file path e.g file:///opt/SDK/spbm/wsdl/pbmService.wsdl """), cfg.StrOpt('pbm_default_policy', help=""" This option specifies the default policy to be used. If pbm_enabled is set and there is no defined storage policy for the specific request, then this policy will be used. Possible values: * Any valid storage policy such as VSAN default storage policy Related options: * pbm_enabled """), ] vmops_opts = [ cfg.IntOpt('maximum_objects', min=0, default=100, help=""" This option specifies the limit on the maximum number of objects to return in a single result. A positive value will cause the operation to suspend the retrieval when the count of objects reaches the specified limit. The server may still limit the count to something less than the configured value. Any remaining objects may be retrieved with additional requests. """), cfg.StrOpt('cache_prefix', help=""" This option adds a prefix to the folder where cached images are stored This is not the full path - just a folder prefix. This should only be used when a datastore cache is shared between compute nodes. Note: This should only be used when the compute nodes are running on same host or they have a shared file system. Possible values: * Any string representing the cache prefix to the folder """) ] ALL_VMWARE_OPTS = (vmwareapi_vif_opts + vmware_utils_opts + vmwareapi_opts + spbm_opts + vmops_opts) def register_opts(conf): conf.register_group(vmware_group) conf.register_opts(ALL_VMWARE_OPTS, group=vmware_group) def list_opts(): return {vmware_group: ALL_VMWARE_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conf/vnc.py0000664000175000017500000001340400000000000015614 0ustar00zuulzuul00000000000000# Copyright (c) 2010 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_config import types vnc_group = cfg.OptGroup( 'vnc', title='VNC options', help=""" Virtual Network Computer (VNC) can be used to provide remote desktop console access to instances for tenants and/or administrators.""") ALL_OPTS = [ cfg.BoolOpt( 'enabled', default=True, deprecated_group='DEFAULT', deprecated_name='vnc_enabled', help=""" Enable VNC related features. Guests will get created with graphical devices to support this. Clients (for example Horizon) can then establish a VNC connection to the guest. """), cfg.HostAddressOpt( 'server_listen', default='127.0.0.1', help=""" The IP address or hostname on which an instance should listen to for incoming VNC connection requests on this node. """), cfg.HostAddressOpt( 'server_proxyclient_address', default='127.0.0.1', help=""" Private, internal IP address or hostname of VNC console proxy. The VNC proxy is an OpenStack component that enables compute service users to access their instances through VNC clients. This option sets the private address to which proxy clients, such as ``nova-novncproxy``, should connect to. """), cfg.URIOpt( 'novncproxy_base_url', default='http://127.0.0.1:6080/vnc_auto.html', deprecated_group='DEFAULT', help=""" Public address of noVNC VNC console proxy. The VNC proxy is an OpenStack component that enables compute service users to access their instances through VNC clients. noVNC provides VNC support through a websocket-based client. This option sets the public base URL to which client systems will connect. noVNC clients can use this address to connect to the noVNC instance and, by extension, the VNC sessions. If using noVNC >= 1.0.0, you should use ``vnc_lite.html`` instead of ``vnc_auto.html``. You can also supply extra request arguments which will be passed to the backend. This might be useful to move console URL to subpath, for example: ``http://127.0.0.1/novnc/vnc_auto.html?path=novnc`` Related options: * novncproxy_host * novncproxy_port """), ] CLI_OPTS = [ cfg.StrOpt( 'novncproxy_host', default='0.0.0.0', deprecated_group='DEFAULT', help=""" IP address that the noVNC console proxy should bind to. The VNC proxy is an OpenStack component that enables compute service users to access their instances through VNC clients. noVNC provides VNC support through a websocket-based client. This option sets the private address to which the noVNC console proxy service should bind to. Related options: * novncproxy_port * novncproxy_base_url """), cfg.PortOpt( 'novncproxy_port', default=6080, deprecated_group='DEFAULT', help=""" Port that the noVNC console proxy should bind to. The VNC proxy is an OpenStack component that enables compute service users to access their instances through VNC clients. noVNC provides VNC support through a websocket-based client. This option sets the private port to which the noVNC console proxy service should bind to. Related options: * novncproxy_host * novncproxy_base_url """), cfg.ListOpt( 'auth_schemes', item_type=types.String(choices=( ('none', 'Allow connection without authentication'), ('vencrypt', 'Use VeNCrypt authentication scheme'), )), default=['none'], help=""" The authentication schemes to use with the compute node. Control what RFB authentication schemes are permitted for connections between the proxy and the compute host. If multiple schemes are enabled, the first matching scheme will be used, thus the strongest schemes should be listed first. Related options: * ``[vnc]vencrypt_client_key``, ``[vnc]vencrypt_client_cert``: must also be set """), cfg.StrOpt( 'vencrypt_client_key', help="""The path to the client certificate PEM file (for x509) The fully qualified path to a PEM file containing the private key which the VNC proxy server presents to the compute node during VNC authentication. Related options: * ``vnc.auth_schemes``: must include ``vencrypt`` * ``vnc.vencrypt_client_cert``: must also be set """), cfg.StrOpt( 'vencrypt_client_cert', help="""The path to the client key file (for x509) The fully qualified path to a PEM file containing the x509 certificate which the VNC proxy server presents to the compute node during VNC authentication. Related options: * ``vnc.auth_schemes``: must include ``vencrypt`` * ``vnc.vencrypt_client_key``: must also be set """), cfg.StrOpt( 'vencrypt_ca_certs', help="""The path to the CA certificate PEM file The fully qualified path to a PEM file containing one or more x509 certificates for the certificate authorities used by the compute node VNC server. Related options: * ``vnc.auth_schemes``: must include ``vencrypt`` """), ] ALL_OPTS.extend(CLI_OPTS) def register_opts(conf): conf.register_group(vnc_group) conf.register_opts(ALL_OPTS, group=vnc_group) def register_cli_opts(conf): conf.register_cli_opts(CLI_OPTS, group=vnc_group) def list_opts(): return {vnc_group: ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conf/workarounds.py0000664000175000017500000004531200000000000017407 0ustar00zuulzuul00000000000000# Copyright 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The 'workarounds' group is for very specific reasons. If you're: - Working around an issue in a system tool (e.g. libvirt or qemu) where the fix is in flight/discussed in that community. - The tool can be/is fixed in some distributions and rather than patch the code those distributions can trivially set a config option to get the "correct" behavior. Then this is a good place for your workaround. .. warning:: Please use with care! Document the BugID that your workaround is paired with. """ from oslo_config import cfg workarounds_group = cfg.OptGroup( 'workarounds', title='Workaround Options', help=""" A collection of workarounds used to mitigate bugs or issues found in system tools (e.g. Libvirt or QEMU) or Nova itself under certain conditions. These should only be enabled in exceptional circumstances. All options are linked against bug IDs, where more information on the issue can be found. """) ALL_OPTS = [ cfg.BoolOpt( 'disable_rootwrap', default=False, help=""" Use sudo instead of rootwrap. Allow fallback to sudo for performance reasons. For more information, refer to the bug report: https://bugs.launchpad.net/nova/+bug/1415106 Possible values: * True: Use sudo instead of rootwrap * False: Use rootwrap as usual Interdependencies to other options: * Any options that affect 'rootwrap' will be ignored. """), cfg.BoolOpt( 'disable_libvirt_livesnapshot', default=False, deprecated_for_removal=True, deprecated_since='19.0.0', deprecated_reason=""" This option was added to work around issues with libvirt 1.2.2. We no longer support this version of libvirt, which means this workaround is no longer necessary. It will be removed in a future release. """, help=""" Disable live snapshots when using the libvirt driver. Live snapshots allow the snapshot of the disk to happen without an interruption to the guest, using coordination with a guest agent to quiesce the filesystem. When using libvirt 1.2.2 live snapshots fail intermittently under load (likely related to concurrent libvirt/qemu operations). This config option provides a mechanism to disable live snapshot, in favor of cold snapshot, while this is resolved. Cold snapshot causes an instance outage while the guest is going through the snapshotting process. For more information, refer to the bug report: https://bugs.launchpad.net/nova/+bug/1334398 Possible values: * True: Live snapshot is disabled when using libvirt * False: Live snapshots are always used when snapshotting (as long as there is a new enough libvirt and the backend storage supports it) """), cfg.BoolOpt( 'handle_virt_lifecycle_events', default=True, help=""" Enable handling of events emitted from compute drivers. Many compute drivers emit lifecycle events, which are events that occur when, for example, an instance is starting or stopping. If the instance is going through task state changes due to an API operation, like resize, the events are ignored. This is an advanced feature which allows the hypervisor to signal to the compute service that an unexpected state change has occurred in an instance and that the instance can be shutdown automatically. Unfortunately, this can race in some conditions, for example in reboot operations or when the compute service or when host is rebooted (planned or due to an outage). If such races are common, then it is advisable to disable this feature. Care should be taken when this feature is disabled and 'sync_power_state_interval' is set to a negative value. In this case, any instances that get out of sync between the hypervisor and the Nova database will have to be synchronized manually. For more information, refer to the bug report: https://bugs.launchpad.net/bugs/1444630 Interdependencies to other options: * If ``sync_power_state_interval`` is negative and this feature is disabled, then instances that get out of sync between the hypervisor and the Nova database will have to be synchronized manually. """), cfg.BoolOpt( 'disable_group_policy_check_upcall', default=False, help=""" Disable the server group policy check upcall in compute. In order to detect races with server group affinity policy, the compute service attempts to validate that the policy was not violated by the scheduler. It does this by making an upcall to the API database to list the instances in the server group for one that it is booting, which violates our api/cell isolation goals. Eventually this will be solved by proper affinity guarantees in the scheduler and placement service, but until then, this late check is needed to ensure proper affinity policy. Operators that desire api/cell isolation over this check should enable this flag, which will avoid making that upcall from compute. Related options: * [filter_scheduler]/track_instance_changes also relies on upcalls from the compute service to the scheduler service. """), cfg.BoolOpt( 'enable_numa_live_migration', default=False, deprecated_for_removal=True, deprecated_since='20.0.0', deprecated_reason="""This option was added to mitigate known issues when live migrating instances with a NUMA topology with the libvirt driver. Those issues are resolved in Train. Clouds using the libvirt driver and fully upgraded to Train support NUMA-aware live migration. This option will be removed in a future release. """, help=""" Enable live migration of instances with NUMA topologies. Live migration of instances with NUMA topologies when using the libvirt driver is only supported in deployments that have been fully upgraded to Train. In previous versions, or in mixed Stein/Train deployments with a rolling upgrade in progress, live migration of instances with NUMA topologies is disabled by default when using the libvirt driver. This includes live migration of instances with CPU pinning or hugepages. CPU pinning and huge page information for such instances is not currently re-calculated, as noted in `bug #1289064`_. This means that if instances were already present on the destination host, the migrated instance could be placed on the same dedicated cores as these instances or use hugepages allocated for another instance. Alternately, if the host platforms were not homogeneous, the instance could be assigned to non-existent cores or be inadvertently split across host NUMA nodes. Despite these known issues, there may be cases where live migration is necessary. By enabling this option, operators that are aware of the issues and are willing to manually work around them can enable live migration support for these instances. Related options: * ``compute_driver``: Only the libvirt driver is affected. .. _bug #1289064: https://bugs.launchpad.net/nova/+bug/1289064 """), cfg.BoolOpt( 'ensure_libvirt_rbd_instance_dir_cleanup', default=False, help=""" Ensure the instance directory is removed during clean up when using rbd. When enabled this workaround will ensure that the instance directory is always removed during cleanup on hosts using ``[libvirt]/images_type=rbd``. This avoids the following bugs with evacuation and revert resize clean up that lead to the instance directory remaining on the host: https://bugs.launchpad.net/nova/+bug/1414895 https://bugs.launchpad.net/nova/+bug/1761062 Both of these bugs can then result in ``DestinationDiskExists`` errors being raised if the instances ever attempt to return to the host. .. warning:: Operators will need to ensure that the instance directory itself, specified by ``[DEFAULT]/instances_path``, is not shared between computes before enabling this workaround otherwise the console.log, kernels, ramdisks and any additional files being used by the running instance will be lost. Related options: * ``compute_driver`` (libvirt) * ``[libvirt]/images_type`` (rbd) * ``instances_path`` """), cfg.BoolOpt( 'disable_fallback_pcpu_query', default=False, deprecated_for_removal=True, deprecated_since='20.0.0', help=""" Disable fallback request for VCPU allocations when using pinned instances. Starting in Train, compute nodes using the libvirt virt driver can report ``PCPU`` inventory and will use this for pinned instances. The scheduler will automatically translate requests using the legacy CPU pinning-related flavor extra specs, ``hw:cpu_policy`` and ``hw:cpu_thread_policy``, their image metadata property equivalents, and the emulator threads pinning flavor extra spec, ``hw:emulator_threads_policy``, to new placement requests. However, compute nodes require additional configuration in order to report ``PCPU`` inventory and this configuration may not be present immediately after an upgrade. To ensure pinned instances can be created without this additional configuration, the scheduler will make a second request to placement for old-style ``VCPU``-based allocations and fallback to these allocation candidates if necessary. This has a slight performance impact and is not necessary on new or upgraded deployments where the new configuration has been set on all hosts. By setting this option, the second lookup is disabled and the scheduler will only request ``PCPU``-based allocations. """), cfg.BoolOpt( 'never_download_image_if_on_rbd', default=False, help=""" When booting from an image on a ceph-backed compute node, if the image does not already reside on the ceph cluster (as would be the case if glance is also using the same cluster), nova will download the image from glance and upload it to ceph itself. If using multiple ceph clusters, this may cause nova to unintentionally duplicate the image in a non-COW-able way in the local ceph deployment, wasting space. For more information, refer to the bug report: https://bugs.launchpad.net/nova/+bug/1858877 Enabling this option will cause nova to *refuse* to boot an instance if it would require downloading the image from glance and uploading it to ceph itself. Related options: * ``compute_driver`` (libvirt) * ``[libvirt]/images_type`` (rbd) """), cfg.BoolOpt('reserve_disk_resource_for_image_cache', default=False, help=""" If it is set to True then the libvirt driver will reserve DISK_GB resource for the images stored in the image cache. If the :oslo.config:option:`DEFAULT.instances_path` is on different disk partition than the image cache directory then the driver will not reserve resource for the cache. Such disk reservation is done by a periodic task in the resource tracker that runs every :oslo.config:option:`update_resources_interval` seconds. So the reservation is not updated immediately when an image is cached. Related options: * :oslo.config:option:`DEFAULT.instances_path` * :oslo.config:option:`image_cache.subdirectory_name` * :oslo.config:option:`update_resources_interval` """), cfg.BoolOpt('libvirt_disable_apic', default=False, help=""" With some kernels initializing the guest apic can result in a kernel hang that renders the guest unusable. This happens as a result of a kernel bug. In most cases the correct fix it to update the guest image kernel to one that is patched however in some cases this is not possible. This workaround allows the emulation of an apic to be disabled per host however it is not recommended to use outside of a CI or developer cloud. """), cfg.ListOpt('wait_for_vif_plugged_event_during_hard_reboot', item_type=cfg.types.String( choices=[ "normal", "direct", "macvtap", "baremetal", "direct-physical", "virtio-forwarder", "smart-nic", "vdpa", "accelerator-direct", "accelerator-direct-physical", "remote-managed", ]), default=[], help=""" The libvirt virt driver implements power on and hard reboot by tearing down every vif of the instance being rebooted then plug them again. By default nova does not wait for network-vif-plugged event from neutron before it lets the instance run. This can cause the instance to requests the IP via DHCP before the neutron backend has a chance to set up the networking backend after the vif plug. This flag defines which vifs nova expects network-vif-plugged events from during hard reboot. The possible values are neutron port vnic types: * normal * direct * macvtap * baremetal * direct-physical * virtio-forwarder * smart-nic * vdpa * accelerator-direct * accelerator-direct-physical * remote-managed Adding a ``vnic_type`` to this configuration makes Nova wait for a network-vif-plugged event for each of the instance's vifs having the specific ``vnic_type`` before unpausing the instance, similarly to how new instance creation works. Please note that not all neutron networking backends send plug time events, for certain ``vnic_type`` therefore this config is empty by default. The ml2/ovs and the networking-odl backends are known to send plug time events for ports with ``normal`` ``vnic_type`` so it is safe to add ``normal`` to this config if you are using only those backends in the compute host. The neutron in-tree SRIOV backend does not reliably send network-vif-plugged event during plug time for ports with ``direct`` vnic_type and never sends that event for port with ``direct-physical`` vnic_type during plug time. For other ``vnic_type`` and backend pairs, please consult the developers of the backend. Related options: * :oslo.config:option:`DEFAULT.vif_plugging_timeout` """), cfg.BoolOpt('enable_qemu_monitor_announce_self', default=False, help=""" If it is set to True the libvirt driver will try as a best effort to send the announce-self command to the QEMU monitor so that it generates RARP frames to update network switches in the post live migration phase on the destination. Please note that this causes the domain to be considered tainted by libvirt. Related options: * :oslo.config:option:`DEFAULT.compute_driver` (libvirt) """), cfg.IntOpt('qemu_monitor_announce_self_count', default=3, min=1, help=""" The total number of times to send the announce_self command to the QEMU monitor when enable_qemu_monitor_announce_self is enabled. Related options: * :oslo.config:option:`WORKAROUNDS.enable_qemu_monitor_announce_self` (libvirt) """), cfg.IntOpt('qemu_monitor_announce_self_interval', default=1, min=1, help=""" The number of seconds to wait before re-sending the announce_self command to the QEMU monitor. Related options: * :oslo.config:option:`WORKAROUNDS.enable_qemu_monitor_announce_self` (libvirt) """), cfg.BoolOpt('disable_compute_service_check_for_ffu', default=False, help=""" If this is set, the normal safety check for old compute services will be treated as a warning instead of an error. This is only to be enabled to facilitate a Fast-Forward upgrade where new control services are being started before compute nodes have been able to update their service record. In an FFU, the service records in the database will be more than one version old until the compute nodes start up, but control services need to be online first. """), cfg.BoolOpt('unified_limits_count_pcpu_as_vcpu', default=False, help=""" When using unified limits, use VCPU + PCPU for VCPU quota usage. If the deployment is configured to use unified limits via ``[quota]driver=nova.quota.UnifiedLimitsDriver``, by default VCPU resources are counted independently from PCPU resources, consistent with how they are represented in the placement service. Legacy quota behavior counts PCPU as VCPU and returns the sum of VCPU + PCPU usage as the usage count for VCPU. Operators relying on the aggregation of VCPU and PCPU resource usage counts should set this option to True. Related options: * :oslo.config:option:`quota.driver` """), cfg.BoolOpt('skip_cpu_compare_on_dest', default=False, help=""" With the libvirt driver, during live migration, skip comparing guest CPU with the destination host. When using QEMU >= 2.9 and libvirt >= 4.4.0, libvirt will do the correct thing with respect to checking CPU compatibility on the destination host during live migration. """), cfg.BoolOpt('skip_cpu_compare_at_startup', default=False, help=""" This will skip the CPU comparison call at the startup of Compute service and lets libvirt handle it. """), cfg.BoolOpt( 'skip_hypervisor_version_check_on_lm', default=False, help=""" When this is enabled, it will skip version-checking of hypervisors during live migration. """), cfg.BoolOpt( 'skip_reserve_in_use_ironic_nodes', default=False, help=""" This may be useful if you use the Ironic driver, but don't have automatic cleaning enabled in Ironic. Nova, by default, will mark Ironic nodes as reserved as soon as they are in use. When you free the Ironic node (by deleting the nova instance) it takes a while for Nova to un-reserve that Ironic node in placement. Usually this is a good idea, because it avoids placement providing an Ironic as a valid candidate when it is still being cleaned. However, if you don't use automatic cleaning, it can cause an extra delay before and Ironic node is available for building a new Nova instance. """), cfg.BoolOpt( 'disable_deep_image_inspection', default=False, help=""" This disables the additional deep image inspection that the compute node does when downloading from glance. This includes backing-file, data-file, and known-features detection *before* passing the image to qemu-img. Generally, this inspection should be enabled for maximum safety, but this workaround option allows disabling it if there is a compatibility concern. """), ] def register_opts(conf): conf.register_group(workarounds_group) conf.register_opts(ALL_OPTS, group=workarounds_group) def list_opts(): return {workarounds_group: ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conf/wsgi.py0000664000175000017500000000502400000000000015776 0ustar00zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg wsgi_group = cfg.OptGroup( 'wsgi', title='WSGI Options', help=''' Options under this group are used to configure WSGI (Web Server Gateway Interface). WSGI is used to serve API requests. ''', ) ALL_OPTS = [ cfg.StrOpt( 'api_paste_config', default="api-paste.ini", deprecated_group='DEFAULT', help=""" This option represents a file name for the paste.deploy config for nova-api. Possible values: * A string representing file name for the paste.deploy config. """), cfg.StrOpt( 'secure_proxy_ssl_header', deprecated_group='DEFAULT', deprecated_for_removal=True, deprecated_since='31.0.0', deprecated_reason=""" The functionality of this parameter is duplicate of the http_proxy_to_wsgi middleware of oslo.middleware and will be completely replaced. """, help=""" This option specifies the HTTP header used to determine the protocol scheme for the original request, even if it was removed by a SSL terminating proxy. Possible values: * None (default) - the request scheme is not influenced by any HTTP headers * Valid HTTP header, like ``HTTP_X_FORWARDED_PROTO`` WARNING: Do not set this unless you know what you are doing. Make sure ALL of the following are true before setting this (assuming the values from the example above): * Your API is behind a proxy. * Your proxy strips the X-Forwarded-Proto header from all incoming requests. In other words, if end users include that header in their requests, the proxy will discard it. * Your proxy sets the X-Forwarded-Proto header and sends it to API, but only for requests that originally come in via HTTPS. If any of those are not true, you should keep this setting set to None. """), ] def register_opts(conf): conf.register_group(wsgi_group) conf.register_opts(ALL_OPTS, group=wsgi_group) def list_opts(): return {wsgi_group: ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/conf/zvm.py0000664000175000017500000000566700000000000015656 0ustar00zuulzuul00000000000000# Copyright 2017,2018 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.conf import paths zvm_opt_group = cfg.OptGroup('zvm', title='zVM Options', help=""" zvm options allows cloud administrator to configure related z/VM hypervisor driver to be used within an OpenStack deployment. zVM options are used when the compute_driver is set to use zVM (compute_driver=zvm.ZVMDriver) """) zvm_opts = [ cfg.URIOpt('cloud_connector_url', sample_default='http://zvm.example.org:8080/', help=""" URL to be used to communicate with z/VM Cloud Connector. """), cfg.StrOpt('ca_file', default=None, help=""" CA certificate file to be verified in httpd server with TLS enabled A string, it must be a path to a CA bundle to use. """), cfg.StrOpt('image_tmp_path', default=paths.state_path_def('images'), sample_default="$state_path/images", help=""" The path at which images will be stored (snapshot, deploy, etc). Images used for deploy and images captured via snapshot need to be stored on the local disk of the compute host. This configuration identifies the directory location. Possible values: A file system path on the host running the compute service. """), cfg.IntOpt('reachable_timeout', default=300, help=""" Timeout (seconds) to wait for an instance to start. The z/VM driver relies on communication between the instance and cloud connector. After an instance is created, it must have enough time to wait for all the network info to be written into the user directory. The driver will keep rechecking network status to the instance with the timeout value, If setting network failed, it will notify the user that starting the instance failed and put the instance in ERROR state. The underlying z/VM guest will then be deleted. Possible Values: Any positive integer. Recommended to be at least 300 seconds (5 minutes), but it will vary depending on instance and system load. A value of 0 is used for debug. In this case the underlying z/VM guest will not be deleted when the instance is marked in ERROR state. """), ] def register_opts(conf): conf.register_group(zvm_opt_group) conf.register_opts(zvm_opts, group=zvm_opt_group) def list_opts(): return {zvm_opt_group: zvm_opts} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/config.py0000664000175000017500000000667300000000000015360 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from oslo_log import log from oslo_utils import importutils import nova.conf from nova.db.api import api as api_db_api from nova.db.main import api as main_db_api from nova import middleware from nova import rpc from nova import version profiler = importutils.try_import('osprofiler.opts') CONF = nova.conf.CONF def set_lib_defaults(): """Update default value for configuration options from other namespace. Example, oslo lib config options. This is needed for config generator tool to pick these default value changes. https://docs.openstack.org/oslo.config/latest/cli/ generator.html#modifying-defaults-from-other-namespaces """ # Update default value of oslo.middleware cors config option. middleware.set_defaults() # Update default value of RPC transport control_exchange config option. rpc.set_defaults(control_exchange='nova') # Update default value of oslo_log default_log_levels and # logging_context_format_string config option. set_log_defaults() def rabbit_heartbeat_filter(log_record): message = "Unexpected error during heartbeat thread processing" return message not in log_record.msg def set_log_defaults(): # We use the oslo.log default log levels which includes suds=INFO # and add only the extra levels that Nova needs if CONF.glance.debug: extra_default_log_levels = ['glanceclient=DEBUG'] else: extra_default_log_levels = ['glanceclient=WARN'] # Allow cinderclient and os_brick to log at DEBUG without Nova if CONF.cinder.debug: extra_default_log_levels += ['cinderclient=DEBUG', 'os_brick=DEBUG'] # NOTE(danms): DEBUG logging in privsep will result in some large # and potentially sensitive things being logged. extra_default_log_levels.append('oslo.privsep.daemon=INFO') log.set_defaults(default_log_levels=log.get_default_log_levels() + extra_default_log_levels) def parse_args(argv, default_config_files=None, configure_db=True, init_rpc=True): log.register_options(CONF) # NOTE(sean-k-mooney): this filter addresses bug #1825584 # https://bugs.launchpad.net/nova/+bug/1825584 # eventlet monkey-patching breaks AMQP heartbeat on uWSGI rabbit_logger = logging.getLogger('oslo.messaging._drivers.impl_rabbit') rabbit_logger.addFilter(rabbit_heartbeat_filter) set_lib_defaults() if profiler: profiler.set_defaults(CONF) CONF(argv[1:], project='nova', version=version.version_string(), default_config_files=default_config_files) if init_rpc: rpc.init(CONF) if configure_db: main_db_api.configure(CONF) api_db_api.configure(CONF) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.3576086 nova-32.0.0/nova/console/0000775000175000017500000000000000000000000015167 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/console/__init__.py0000664000175000017500000000152700000000000017305 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ :mod:`nova.console` -- Wrappers around console proxies ====================================================== .. automodule:: nova.console :platform: Unix :synopsis: Wrapper around console proxies such as noVNC to set up multi-tenant VM console access. """ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.3576086 nova-32.0.0/nova/console/rfb/0000775000175000017500000000000000000000000015740 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/console/rfb/__init__.py0000664000175000017500000000000000000000000020037 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/console/rfb/auth.py0000664000175000017500000000340700000000000017257 0ustar00zuulzuul00000000000000# Copyright (c) 2014-2017 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import enum VERSION_LENGTH = 12 SUBTYPE_LENGTH = 4 AUTH_STATUS_FAIL = b"\x00" AUTH_STATUS_PASS = b"\x01" @enum.unique class AuthType(enum.IntEnum): INVALID = 0 NONE = 1 VNC = 2 RA2 = 5 RA2NE = 6 TIGHT = 16 ULTRA = 17 TLS = 18 # Used by VINO VENCRYPT = 19 # Used by VeNCrypt and QEMU SASL = 20 # SASL type used by VINO and QEMU ARD = 30 # Apple remote desktop (screen sharing) MSLOGON = 0xfffffffa # Used by UltraVNC class RFBAuthScheme(metaclass=abc.ABCMeta): @abc.abstractmethod def security_type(self): """Return the security type supported by this scheme Returns the nova.console.rfb.auth.AuthType.XX constant representing the scheme implemented. """ pass @abc.abstractmethod def security_handshake(self, compute_sock): """Perform security-type-specific functionality. This method is expected to return the socket-like object used to communicate with the server securely. Should raise exception.RFBAuthHandshakeFailed if an error occurs :param compute_sock: socket connected to the compute node instance """ pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/console/rfb/authnone.py0000664000175000017500000000145000000000000020133 0ustar00zuulzuul00000000000000# Copyright (c) 2014-2016 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.console.rfb import auth class RFBAuthSchemeNone(auth.RFBAuthScheme): def security_type(self): return auth.AuthType.NONE def security_handshake(self, compute_sock): return compute_sock ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/console/rfb/auths.py0000664000175000017500000000334000000000000017436 0ustar00zuulzuul00000000000000# Copyright (c) 2014-2017 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.console.rfb import authnone from nova.console.rfb import authvencrypt from nova import exception CONF = cfg.CONF class RFBAuthSchemeList(object): AUTH_SCHEME_MAP = { "none": authnone.RFBAuthSchemeNone, "vencrypt": authvencrypt.RFBAuthSchemeVeNCrypt, } def __init__(self): self.schemes = {} for name in CONF.vnc.auth_schemes: scheme = self.AUTH_SCHEME_MAP[name]() self.schemes[scheme.security_type()] = scheme def find_scheme(self, desired_types): """Find a suitable authentication scheme to use with compute node. Identify which of the ``desired_types`` we can accept. :param desired_types: A list of ints corresponding to the various authentication types supported. """ for security_type in desired_types: if security_type in self.schemes: return self.schemes[security_type] raise exception.RFBAuthNoAvailableScheme( allowed_types=", ".join([str(s) for s in self.schemes.keys()]), desired_types=", ".join([str(s) for s in desired_types])) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/console/rfb/authvencrypt.py0000664000175000017500000001276300000000000021057 0ustar00zuulzuul00000000000000# Copyright (c) 2014-2016 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import enum import ssl import struct from oslo_config import cfg from oslo_log import log as logging from nova.console.rfb import auth from nova import exception from nova.i18n import _ LOG = logging.getLogger(__name__) CONF = cfg.CONF class AuthVeNCryptSubtype(enum.IntEnum): """Possible VeNCrypt subtypes. From https://github.com/rfbproto/rfbproto/blob/master/rfbproto.rst """ PLAIN = 256 TLSNONE = 257 TLSVNC = 258 TLSPLAIN = 259 X509NONE = 260 X509VNC = 261 X509PLAIN = 262 X509SASL = 263 TLSSASL = 264 class RFBAuthSchemeVeNCrypt(auth.RFBAuthScheme): """A security proxy helper which uses VeNCrypt. This security proxy helper uses the VeNCrypt security type to achieve SSL/TLS-secured VNC. It supports both standard SSL/TLS encryption and SSL/TLS encryption with x509 authentication. Refer to https://www.berrange.com/~dan/vencrypt.txt for a brief overview of the protocol. """ def security_type(self): return auth.AuthType.VENCRYPT def security_handshake(self, compute_sock): def recv(num): b = compute_sock.recv(num) if len(b) != num: reason = _("Short read from compute socket, wanted " "%(wanted)d bytes but got %(got)d") % { 'wanted': num, 'got': len(b)} raise exception.RFBAuthHandshakeFailed(reason=reason) return b # get the VeNCrypt version from the server maj_ver = ord(recv(1)) min_ver = ord(recv(1)) LOG.debug("Server sent VeNCrypt version " "%(maj)s.%(min)s", {'maj': maj_ver, 'min': min_ver}) if maj_ver != 0 or min_ver != 2: reason = _("Only VeNCrypt version 0.2 is supported by this " "proxy, but the server wanted to use version " "%(maj)s.%(min)s") % {'maj': maj_ver, 'min': min_ver} raise exception.RFBAuthHandshakeFailed(reason=reason) # use version 0.2 compute_sock.sendall(b"\x00\x02") can_use_version = ord(recv(1)) if can_use_version > 0: reason = _("Server could not use VeNCrypt version 0.2") raise exception.RFBAuthHandshakeFailed(reason=reason) # get the supported auth subtypes sub_types_cnt = ord(recv(1)) sub_types_raw = recv(sub_types_cnt * auth.SUBTYPE_LENGTH) sub_types = struct.unpack('!' + str(sub_types_cnt) + 'I', sub_types_raw) LOG.debug( "Server supports VeNCrypt subtypes: %s", ', '.join( '%d (%s)' % ( AuthVeNCryptSubtype(t).value, AuthVeNCryptSubtype(t).name, ) for t in sub_types )) # We use X509None as we're only seeking to encrypt the channel (ruling # out PLAIN) and prevent MITM (ruling out TLS*, which uses trivially # MITM'd Anonymous Diffie Hellmann (DH) cyphers) if AuthVeNCryptSubtype.X509NONE not in sub_types: reason = _( "Server does not support the {value} ({name}) " "VeNCrypt auth subtype" ).format(value=AuthVeNCryptSubtype.X509NONE.value, name=AuthVeNCryptSubtype.X509NONE.name) raise exception.RFBAuthHandshakeFailed(reason=reason) LOG.debug( "Attempting to use the %d (%s) VeNCrypt auth subtype", AuthVeNCryptSubtype.X509NONE.value, AuthVeNCryptSubtype.X509NONE.name) compute_sock.sendall(struct.pack( '!I', AuthVeNCryptSubtype.X509NONE)) # NB(sross): the spec is missing a U8 here that's used in # multiple implementations (e.g. QEMU, GTK-VNC). 1 means # acceptance, 0 means failure (unlike the rest of RFB) auth_accepted = ord(recv(1)) if auth_accepted == 0: reason = _( "Server didn't accept the requested VeNCrypt auth subtype") raise exception.RFBAuthHandshakeFailed(reason=reason) LOG.debug("Server accepted the requested VeNCrypt auth subtype") if CONF.vnc.vencrypt_client_key and CONF.vnc.vencrypt_client_cert: client_key = CONF.vnc.vencrypt_client_key client_cert = CONF.vnc.vencrypt_client_cert else: client_key = None client_cert = None try: wrapped_sock = ssl.wrap_socket( compute_sock, keyfile=client_key, certfile=client_cert, server_side=False, cert_reqs=ssl.CERT_REQUIRED, ca_certs=CONF.vnc.vencrypt_ca_certs) LOG.info("VeNCrypt security handshake accepted") return wrapped_sock except ssl.SSLError as e: reason = _("Error establishing TLS connection to server: %s") raise exception.RFBAuthHandshakeFailed(reason=reason % str(e)) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.3576086 nova-32.0.0/nova/console/securityproxy/0000775000175000017500000000000000000000000020140 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/console/securityproxy/__init__.py0000664000175000017500000000000000000000000022237 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/console/securityproxy/base.py0000664000175000017500000000271300000000000021427 0ustar00zuulzuul00000000000000# Copyright (c) 2014-2016 Red Hat, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc class SecurityProxy(metaclass=abc.ABCMeta): """A console security Proxy Helper Console security proxy helpers should subclass this class and implement a generic `connect` for the particular protocol being used. Security drivers can then subclass the protocol-specific helper class. """ @abc.abstractmethod def connect(self, tenant_sock, compute_sock): """Initiate the console connection This method performs the protocol specific negotiation, and returns the socket-like object to use to communicate with the server securely. :param tenant_sock: socket connected to the remote tenant user :param compute_sock: socket connected to the compute node instance :returns: a new compute_sock for the instance """ pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/console/securityproxy/rfb.py0000664000175000017500000002046600000000000021273 0ustar00zuulzuul00000000000000# Copyright (c) 2014-2016 Red Hat, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import struct from oslo_log import log as logging from nova.console.rfb import auth from nova.console.rfb import auths from nova.console.securityproxy import base from nova import exception from nova.i18n import _ LOG = logging.getLogger(__name__) class RFBSecurityProxy(base.SecurityProxy): """RFB Security Proxy Negotiation Helper. This class proxies the initial setup of the RFB connection between the client and the server. Then, when the RFB security negotiation step arrives, it intercepts the communication, posing as a server with the "None" authentication type to the client, and acting as a client (via the methods below) to the server. After security negotiation, normal proxying can be used. Note: this code mandates RFB version 3.8, since this is supported by any client and server impl written in the past 10+ years. See the general RFB specification at: https://tools.ietf.org/html/rfc6143 See an updated, maintained RDB specification at: https://github.com/rfbproto/rfbproto/blob/master/rfbproto.rst """ def __init__(self): self.auth_schemes = auths.RFBAuthSchemeList() def _make_var_str(self, message): message_str = str(message) message_bytes = message_str.encode('utf-8') message_len = struct.pack("!I", len(message_bytes)) return message_len + message_bytes def _fail(self, tenant_sock, compute_sock, message): # Tell the client there's been a problem result_code = struct.pack("!I", 1) tenant_sock.sendall(result_code + self._make_var_str(message)) if compute_sock is not None: # Tell the server that there's been a problem # by sending the "Invalid" security type compute_sock.sendall(auth.AUTH_STATUS_FAIL) @staticmethod def _parse_version(version_str): r"""Convert a version string to a float. >>> RFBSecurityProxy._parse_version('RFB 003.008\n') 0.2 """ maj_str = version_str[4:7] min_str = version_str[8:11] return float("%d.%d" % (int(maj_str), int(min_str))) def connect(self, tenant_sock, compute_sock): """Initiate the RFB connection process. This method performs the initial ProtocolVersion and Security messaging, and returns the socket-like object to use to communicate with the server securely. If an error occurs SecurityProxyNegotiationFailed will be raised. """ def recv(sock, num): b = sock.recv(num) if len(b) != num: reason = _("Incorrect read from socket, wanted %(wanted)d " "bytes but got %(got)d. Socket returned " "%(result)r") % {'wanted': num, 'got': len(b), 'result': b} raise exception.RFBAuthHandshakeFailed(reason=reason) return b # Negotiate version with compute server compute_version = recv(compute_sock, auth.VERSION_LENGTH) LOG.debug( "Got version string '%s' from compute node", compute_version[:-1].decode('utf-8')) if self._parse_version(compute_version) != 3.8: reason = _( "Security proxying requires RFB protocol version 3.8, " "but server sent %s") raise exception.SecurityProxyNegotiationFailed( reason=reason % compute_version[:-1].decode('utf-8')) compute_sock.sendall(compute_version) # Negotiate version with tenant tenant_sock.sendall(compute_version) tenant_version = recv(tenant_sock, auth.VERSION_LENGTH) LOG.debug( "Got version string '%s' from tenant", tenant_version[:-1].decode('utf-8')) if self._parse_version(tenant_version) != 3.8: reason = _( "Security proxying requires RFB protocol version 3.8, " "but tenant asked for %s") raise exception.SecurityProxyNegotiationFailed( reason=reason % tenant_version[:-1].decode('utf-8')) # Negotiate security with server permitted_auth_types_cnt = recv(compute_sock, 1)[0] if permitted_auth_types_cnt == 0: # Decode the reason why the request failed reason_len_raw = recv(compute_sock, 4) reason_len = struct.unpack('!I', reason_len_raw)[0] reason = recv(compute_sock, reason_len) tenant_sock.sendall(auth.AUTH_STATUS_FAIL + reason_len_raw + reason) raise exception.SecurityProxyNegotiationFailed(reason=reason) f = recv(compute_sock, permitted_auth_types_cnt) permitted_auth_types = [] for auth_type in f: if isinstance(auth_type, str): auth_type = ord(auth_type) permitted_auth_types.append(auth_type) LOG.debug( "Server sent security types: %s", ", ".join( '%d (%s)' % (auth.AuthType(t).value, auth.AuthType(t).name) for t in permitted_auth_types )) # Negotiate security with client before we say "ok" to the server # send 1:[None] tenant_sock.sendall(auth.AUTH_STATUS_PASS + bytes((auth.AuthType.NONE,))) client_auth = recv(tenant_sock, 1)[0] if client_auth != auth.AuthType.NONE: self._fail( tenant_sock, compute_sock, _("Only the security type {value} ({name}) " "is supported").format(value=auth.AuthType.NONE.value, name=auth.AuthType.NONE.name)) reason = _( "Client requested a security type other than " "{value} ({name}): {client_value} ({client_name})" ).format(value=auth.AuthType.NONE.value, name=auth.AuthType.NONE.name, client_value=auth.AuthType(client_auth).value, client_name=auth.AuthType(client_auth).name) raise exception.SecurityProxyNegotiationFailed(reason=reason) try: scheme = self.auth_schemes.find_scheme(permitted_auth_types) except exception.RFBAuthNoAvailableScheme as e: # Intentionally don't tell client what really failed # as that's information leakage self._fail(tenant_sock, compute_sock, _("Unable to negotiate security with server")) raise exception.SecurityProxyNegotiationFailed( reason=_("No compute auth available: %s") % str(e)) compute_sock.sendall(bytes((scheme.security_type(),))) LOG.debug( "Using security type %d (%s) with server, %d (%s) with client", scheme.security_type().value, scheme.security_type().name, auth.AuthType.NONE.value, auth.AuthType.NONE.name) try: compute_sock = scheme.security_handshake(compute_sock) except exception.RFBAuthHandshakeFailed as e: # Intentionally don't tell client what really failed # as that's information leakage self._fail(tenant_sock, None, _("Unable to negotiate security with server")) LOG.debug("Auth failed %s", str(e)) raise exception.SecurityProxyNegotiationFailed( reason=_("Auth handshake failed")) LOG.info("Finished security handshake, resuming normal proxy " "mode using secured socket") # we can just proxy the security result -- if the server security # negotiation fails, we want the client to think it has failed return compute_sock ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/console/serial.py0000664000175000017500000000540600000000000017025 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Serial consoles module.""" import socket from oslo_log import log as logging import nova.conf from nova import exception from nova import utils LOG = logging.getLogger(__name__) ALLOCATED_PORTS = set() # in-memory set of already allocated ports SERIAL_LOCK = 'serial-lock' CONF = nova.conf.CONF # TODO(sahid): Add a method to initialize ALLOCATED_PORTS with the # already binded TPC port(s). (cf from danpb: list all running guests and # query the XML in libvirt driver to find out the TCP port(s) it uses). @utils.synchronized(SERIAL_LOCK) def acquire_port(host): """Returns a free TCP port on host. Find and returns a free TCP port on 'host' in the range of 'CONF.serial_console.port_range'. """ start, stop = _get_port_range() for port in range(start, stop): if (host, port) in ALLOCATED_PORTS: continue try: _verify_port(host, port) ALLOCATED_PORTS.add((host, port)) return port except exception.SocketPortInUseException as e: LOG.warning(e.format_message()) raise exception.SocketPortRangeExhaustedException(host=host) @utils.synchronized(SERIAL_LOCK) def release_port(host, port): """Release TCP port to be used next time.""" ALLOCATED_PORTS.discard((host, port)) def _get_port_range(): config_range = CONF.serial_console.port_range start, stop = map(int, config_range.split(':')) if start >= stop: default_port_range = nova.conf.serial_console.DEFAULT_PORT_RANGE LOG.warning("serial_console.port_range should be in the " "format : and start < stop, " "Given value %(port_range)s is invalid. " "Taking the default port range %(default)s.", {'port_range': config_range, 'default': default_port_range}) start, stop = map(int, default_port_range.split(':')) return start, stop def _verify_port(host, port): s = socket.socket() try: s.bind((host, port)) except socket.error as e: raise exception.SocketPortInUseException( host=host, port=port, error=e) finally: s.close() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/console/type.py0000664000175000017500000000253100000000000016523 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class Console(object): def __init__(self, host, port, internal_access_path=None): self.host = host self.port = port self.internal_access_path = internal_access_path def get_connection_info(self, token, access_url): """Returns an unreferenced dict with connection information.""" ret = dict(self.__dict__) ret['token'] = token ret['access_url'] = access_url return ret class ConsoleVNC(Console): pass class ConsoleSpice(Console): def __init__(self, host, port, tlsPort, internal_access_path=None): super(ConsoleSpice, self).__init__(host, port, internal_access_path) self.tlsPort = tlsPort class ConsoleSerial(Console): pass class ConsoleMKS(Console): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/console/websocketproxy.py0000664000175000017500000003136500000000000020641 0ustar00zuulzuul00000000000000# Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ''' Websocket proxy that is compatible with OpenStack Nova. Leverages websockify.py by Joel Martin ''' import copy from http import cookies as Cookie from http import HTTPStatus import os import socket from urllib import parse as urlparse from oslo_log import log as logging from oslo_utils import encodeutils import websockify from websockify import websockifyserver from nova.compute import rpcapi as compute_rpcapi import nova.conf from nova import context from nova import exception from nova.i18n import _ from nova import objects from oslo_utils import timeutils import threading LOG = logging.getLogger(__name__) CONF = nova.conf.CONF class TenantSock(object): """A socket wrapper for communicating with the tenant. This class provides a socket-like interface to the internal websockify send/receive queue for the client connection to the tenant user. It is used with the security proxy classes. """ def __init__(self, reqhandler): self.reqhandler = reqhandler self.queue = [] def recv(self, cnt): # NB(sross): it's ok to block here because we know # exactly the sequence of data arriving while len(self.queue) < cnt: # new_frames looks like ['abc', 'def'] new_frames, closed = self.reqhandler.recv_frames() # flatten frames onto queue for frame in new_frames: self.queue.extend( [bytes(chr(c), 'ascii') for c in frame]) if closed: break popped = self.queue[0:cnt] del self.queue[0:cnt] return b''.join(popped) def sendall(self, data): self.reqhandler.send_frames([encodeutils.safe_encode(data)]) def finish_up(self): self.reqhandler.send_frames([b''.join(self.queue)]) def close(self): self.finish_up() self.reqhandler.send_close() class NovaProxyRequestHandler(websockify.ProxyRequestHandler): def __init__(self, *args, **kwargs): self._compute_rpcapi = None websockify.ProxyRequestHandler.__init__(self, *args, **kwargs) @property def compute_rpcapi(self): # Lazy load the rpcapi/ComputeAPI upon first use for this connection. # This way, if we receive a TCP RST, we will not create a ComputeAPI # object we won't use. if not self._compute_rpcapi: self._compute_rpcapi = compute_rpcapi.ComputeAPI() return self._compute_rpcapi def verify_origin_proto(self, connect_info, origin_proto): if 'access_url_base' not in connect_info: detail = _("No access_url_base in connect_info. " "Cannot validate protocol") raise exception.ValidationError(detail=detail) expected_protos = [ urlparse.urlparse(connect_info.access_url_base).scheme] # NOTE: For serial consoles the expected protocol could be ws or # wss which correspond to http and https respectively in terms of # security. if 'ws' in expected_protos: expected_protos.append('http') if 'wss' in expected_protos: expected_protos.append('https') return origin_proto in expected_protos def _check_console_port(self, ctxt, instance_uuid, port, console_type): try: instance = objects.Instance.get_by_uuid(ctxt, instance_uuid) except exception.InstanceNotFound: return # NOTE(melwitt): The port is expected to be a str for validation. return self.compute_rpcapi.validate_console_port(ctxt, instance, str(port), console_type) def _get_connect_info(self, ctxt, token): """Validate the token and get the connect info.""" # NOTE(PaulMurray) ConsoleAuthToken.validate validates the token. # We call the compute manager directly to check the console port # is correct. connect_info = objects.ConsoleAuthToken.validate(ctxt, token) valid_port = self._check_console_port( ctxt, connect_info.instance_uuid, connect_info.port, connect_info.console_type) if not valid_port: raise exception.InvalidToken(token='***') return connect_info def _close_connection(self, tsock, host, port): """takes target socket and close the connection. """ try: tsock.shutdown(socket.SHUT_RDWR) except OSError: pass finally: if tsock.fileno() != -1: tsock.close() self.vmsg(_("%(host)s:%(port)s: " "Websocket client or target closed") % {'host': host, 'port': port}) def new_websocket_client(self): """Called after a new WebSocket connection has been established.""" # Reopen the eventlet hub to make sure we don't share an epoll # fd with parent and/or siblings, which would be bad from eventlet import hubs hubs.use_hub() # The nova expected behavior is to have token # passed to the method GET of the request token = urlparse.parse_qs( urlparse.urlparse(self.path).query ).get('token', ['']).pop() if not token: # NoVNC uses it's own convention that forward token # from the request to a cookie header, we should check # also for this behavior hcookie = self.headers.get('cookie') if hcookie: cookie = Cookie.SimpleCookie() for hcookie_part in hcookie.split(';'): hcookie_part = hcookie_part.lstrip() try: cookie.load(hcookie_part) except Cookie.CookieError: # NOTE(stgleb): Do not print out cookie content # for security reasons. LOG.warning('Found malformed cookie') else: if 'token' in cookie: token = cookie['token'].value ctxt = context.get_admin_context() connect_info = self._get_connect_info(ctxt, token) # Verify Origin expected_origin_hostname = self.headers.get('Host') if ':' in expected_origin_hostname: e = expected_origin_hostname if '[' in e and ']' in e: expected_origin_hostname = e.split(']')[0][1:] else: expected_origin_hostname = e.split(':')[0] expected_origin_hostnames = CONF.console.allowed_origins expected_origin_hostnames.append(expected_origin_hostname) origin_url = self.headers.get('Origin') # missing origin header indicates non-browser client which is OK if origin_url is not None: origin = urlparse.urlparse(origin_url) origin_hostname = origin.hostname origin_scheme = origin.scheme # If the console connection was forwarded by a proxy (example: # haproxy), the original protocol could be contained in the # X-Forwarded-Proto header instead of the Origin header. Prefer the # forwarded protocol if it is present. forwarded_proto = self.headers.get('X-Forwarded-Proto') if forwarded_proto is not None: origin_scheme = forwarded_proto if origin_hostname == '' or origin_scheme == '': detail = _("Origin header not valid.") raise exception.ValidationError(detail=detail) if origin_hostname not in expected_origin_hostnames: detail = _("Origin header does not match this host.") raise exception.ValidationError(detail=detail) if not self.verify_origin_proto(connect_info, origin_scheme): detail = _("Origin header protocol does not match this host.") raise exception.ValidationError(detail=detail) sanitized_info = copy.copy(connect_info) sanitized_info.token = '***' self.msg(_('connect info: %s'), sanitized_info) host = connect_info.host port = connect_info.port # Connect to the target self.msg(_("connecting to: %(host)s:%(port)s") % {'host': host, 'port': port}) tsock = self.socket(host, port, connect=True) # Handshake as necessary if 'internal_access_path' in connect_info: path = connect_info.internal_access_path if path: tsock.send(encodeutils.safe_encode( 'CONNECT %s HTTP/1.1\r\n\r\n' % path)) end_token = "\r\n\r\n" while True: data = tsock.recv(4096, socket.MSG_PEEK) token_loc = data.find(end_token) if token_loc != -1: if data.split("\r\n")[0].find("200") == -1: raise exception.InvalidConnectionInfo() # remove the response from recv buffer tsock.recv(token_loc + len(end_token)) break if self.server.security_proxy is not None: tenant_sock = TenantSock(self) try: tsock = self.server.security_proxy.connect(tenant_sock, tsock) except exception.SecurityProxyNegotiationFailed: LOG.exception("Unable to perform security proxying, shutting " "down connection") tenant_sock.close() tsock.shutdown(socket.SHUT_RDWR) tsock.close() raise tenant_sock.finish_up() # Start proxying try: if CONF.consoleauth.enforce_session_timeout: conn_timeout = connect_info.expires - timeutils.utcnow_ts() LOG.info('%s seconds to terminate connection.', conn_timeout) threading.Timer(conn_timeout, self._close_connection, [tsock, host, port]).start() self.do_proxy(tsock) except Exception: self._close_connection(tsock, host, port) raise def socket(self, *args, **kwargs): return websockifyserver.WebSockifyServer.socket(*args, **kwargs) def send_head(self): # This code is copied from this example patch: # https://bugs.python.org/issue32084#msg306545 path = self.translate_path(self.path) if os.path.isdir(path): parts = urlparse.urlsplit(self.path) if not parts.path.endswith('/'): # Browsers interpret "Location: //uri" as an absolute URI # like "http://URI" if self.path.startswith('//'): self.send_error(HTTPStatus.BAD_REQUEST, "URI must not start with //") return None return super(NovaProxyRequestHandler, self).send_head() class NovaWebSocketProxy(websockify.WebSocketProxy): def __init__(self, *args, **kwargs): """:param security_proxy: instance of nova.console.securityproxy.base.SecurityProxy Create a new web socket proxy, optionally using the @security_proxy instance to negotiate security layer with the compute node. """ self.security_proxy = kwargs.pop('security_proxy', None) # If 'default' was specified as the ssl_minimum_version, we leave # ssl_options unset to default to the underlying system defaults. # We do this to avoid using websockify's behaviour for 'default' # in select_ssl_version(), which hardcodes the versions to be # quite relaxed and prevents us from using system crypto policies. ssl_min_version = kwargs.pop('ssl_minimum_version', None) if ssl_min_version and ssl_min_version != 'default': kwargs['ssl_options'] = websockify.websocketproxy. \ select_ssl_version(ssl_min_version) super(NovaWebSocketProxy, self).__init__(*args, **kwargs) @staticmethod def get_logger(): return LOG ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/context.py0000664000175000017500000005314700000000000015575 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """RequestContext: context for requests that persist through all of nova.""" from contextlib import contextmanager import copy import futurist.waiters from keystoneauth1.access import service_catalog as ksa_service_catalog from keystoneauth1 import plugin from oslo_context import context from oslo_db.sqlalchemy import enginefacade from oslo_log import log as logging from oslo_utils import timeutils from nova import exception from nova.i18n import _ from nova import objects from nova import policy from nova import utils LOG = logging.getLogger(__name__) CELL_CACHE = {} # NOTE(melwitt): Used for the scatter-gather utility to indicate we timed out # waiting for a result from a cell. did_not_respond_sentinel = object() # FIXME(danms): Keep a global cache of the cells we find the # first time we look. This needs to be refreshed on a timer or # trigger. CELLS = [] # Timeout value for waiting for cells to respond CELL_TIMEOUT = 60 class _ContextAuthPlugin(plugin.BaseAuthPlugin): """A keystoneauth auth plugin that uses the values from the Context. Ideally we would use the plugin provided by auth_token middleware however this plugin isn't serialized yet so we construct one from the serialized auth data. """ def __init__(self, auth_token, sc): super(_ContextAuthPlugin, self).__init__() self.auth_token = auth_token self.service_catalog = ksa_service_catalog.ServiceCatalogV2(sc) def get_token(self, *args, **kwargs): return self.auth_token def get_endpoint(self, session, service_type=None, interface=None, region_name=None, service_name=None, **kwargs): return self.service_catalog.url_for(service_type=service_type, service_name=service_name, interface=interface, region_name=region_name) @enginefacade.transaction_context_provider class RequestContext(context.RequestContext): """Security context and request information. Represents the user taking a given action within the system. """ def __init__(self, user_id=None, project_id=None, is_admin=None, read_deleted="no", remote_address=None, timestamp=None, quota_class=None, service_catalog=None, user_auth_plugin=None, **kwargs): """:param read_deleted: 'no' indicates deleted records are hidden, 'yes' indicates deleted records are visible, 'only' indicates that *only* deleted records are visible. :param overwrite: Set to False to ensure that the greenthread local copy of the index is not overwritten. :param user_auth_plugin: The auth plugin for the current request's authentication data. """ if user_id: kwargs['user_id'] = user_id if project_id: kwargs['project_id'] = project_id super(RequestContext, self).__init__(is_admin=is_admin, **kwargs) self.read_deleted = read_deleted self.remote_address = remote_address if not timestamp: timestamp = timeutils.utcnow() if isinstance(timestamp, str): timestamp = timeutils.parse_strtime(timestamp) self.timestamp = timestamp if service_catalog: # Only include required parts of service_catalog self.service_catalog = [s for s in service_catalog if s.get('type') in ('image', 'block-storage', 'volumev3', 'key-manager', 'placement', 'network', 'accelerator', 'sharev2')] else: # if list is empty or none self.service_catalog = [] # NOTE(markmc): this attribute is currently only used by the # rs_limits turnstile pre-processor. # See https://lists.launchpad.net/openstack/msg12200.html self.quota_class = quota_class # NOTE(dheeraj): The following attributes are used by cellsv2 to store # connection information for connecting to the target cell. # It is only manipulated using the target_cell contextmanager # provided by this module self.db_connection = None self.mq_connection = None self.cell_uuid = None self.user_auth_plugin = user_auth_plugin if self.is_admin is None: self.is_admin = policy.check_is_admin(self) def get_auth_plugin(self): if self.user_auth_plugin: return self.user_auth_plugin else: return _ContextAuthPlugin(self.auth_token, self.service_catalog) def _get_read_deleted(self): return self._read_deleted def _set_read_deleted(self, read_deleted): if read_deleted not in ('no', 'yes', 'only'): raise ValueError(_("read_deleted can only be one of 'no', " "'yes' or 'only', not %r") % read_deleted) self._read_deleted = read_deleted def _del_read_deleted(self): del self._read_deleted read_deleted = property(_get_read_deleted, _set_read_deleted, _del_read_deleted) def to_dict(self): values = super(RequestContext, self).to_dict() # FIXME(dims): defensive hasattr() checks need to be # removed once we figure out why we are seeing stack # traces values.update({ 'user_id': getattr(self, 'user_id', None), 'project_id': getattr(self, 'project_id', None), 'is_admin': getattr(self, 'is_admin', None), 'read_deleted': getattr(self, 'read_deleted', 'no'), 'remote_address': getattr(self, 'remote_address', None), 'timestamp': utils.strtime(self.timestamp) if hasattr( self, 'timestamp') else None, 'request_id': getattr(self, 'request_id', None), 'quota_class': getattr(self, 'quota_class', None), 'user_name': getattr(self, 'user_name', None), 'service_catalog': getattr(self, 'service_catalog', None), 'project_name': getattr(self, 'project_name', None), }) # NOTE(tonyb): This can be removed once we're certain to have a # RequestContext contains 'is_admin_project', We can only get away with # this because we "know" the default value of 'is_admin_project' which # is very fragile. values.update({ 'is_admin_project': getattr(self, 'is_admin_project', True), }) return values @classmethod def from_dict(cls, values): return super(RequestContext, cls).from_dict( values, user_id=values.get('user_id'), project_id=values.get('project_id'), # TODO(sdague): oslo.context has show_deleted, if # possible, we should migrate to that in the future so we # don't need to be different here. read_deleted=values.get('read_deleted', 'no'), remote_address=values.get('remote_address'), timestamp=values.get('timestamp'), quota_class=values.get('quota_class'), service_catalog=values.get('service_catalog'), ) def elevated(self, read_deleted=None): """Return a version of this context with admin flag set.""" context = copy.copy(self) # context.roles must be deepcopied to leave original roles # without changes context.roles = copy.deepcopy(self.roles) context.is_admin = True if 'admin' not in context.roles: context.roles.append('admin') if read_deleted is not None: context.read_deleted = read_deleted return context def can(self, action, target=None, fatal=True): """Verifies that the given action is valid on the target in this context. :param action: string representing the action to be checked. :param target: dictionary representing the object of the action for object creation this should be a dictionary representing the location of the object e.g. ``{'project_id': instance.project_id}``. :param fatal: if False, will return False when an exception.Forbidden occurs. :raises nova.exception.Forbidden: if verification fails and fatal is True. :return: returns a non-False value (not necessarily "True") if authorized and False if not authorized and fatal is False. """ try: return policy.authorize(self, action, target) except exception.Forbidden: if fatal: raise return False def to_policy_values(self): policy = super(RequestContext, self).to_policy_values() policy['is_admin'] = self.is_admin return policy def __str__(self): return "" % self.to_dict() def get_context(): """A helper method to get a blank context. Note that overwrite is False here so this context will not update the greenthread-local stored context that is used when logging. """ return RequestContext(user_id=None, project_id=None, is_admin=False, overwrite=False) def get_admin_context(read_deleted="no"): # NOTE(alaski): This method should only be used when an admin context is # necessary for the entirety of the context lifetime. If that's not the # case please use get_context(), or create the RequestContext manually, and # use context.elevated() where necessary. Some periodic tasks may use # get_admin_context so that their database calls are not filtered on # project_id. return RequestContext(user_id=None, project_id=None, is_admin=True, read_deleted=read_deleted, overwrite=False) def is_user_context(context): """Indicates if the request context is a normal user.""" if not context: return False if context.is_admin: return False if not context.user_id or not context.project_id: return False return True def require_context(ctxt): """Raise exception.Forbidden() if context is not a user or an admin context. """ if not ctxt.is_admin and not is_user_context(ctxt): raise exception.Forbidden() def authorize_project_context(context, project_id): """Ensures a request has permission to access the given project.""" if is_user_context(context): if not context.project_id: raise exception.Forbidden() elif context.project_id != project_id: raise exception.Forbidden() def authorize_user_context(context, user_id): """Ensures a request has permission to access the given user.""" if is_user_context(context): if not context.user_id: raise exception.Forbidden() elif context.user_id != user_id: raise exception.Forbidden() def authorize_quota_class_context(context, class_name): """Ensures a request has permission to access the given quota class.""" if is_user_context(context): if not context.quota_class: raise exception.Forbidden() elif context.quota_class != class_name: raise exception.Forbidden() def set_target_cell(context, cell_mapping): """Adds database connection information to the context for communicating with the given target_cell. This is used for permanently targeting a cell in a context. Use this when you want all subsequent code to target a cell. Passing None for cell_mapping will untarget the context. :param context: The RequestContext to add connection information :param cell_mapping: An objects.CellMapping object or None """ global CELL_CACHE if cell_mapping is not None: # avoid circular import from nova.db.main import api as db from nova import rpc # Synchronize access to the cache by multiple API workers. @utils.synchronized(cell_mapping.uuid) def get_or_set_cached_cell_and_set_connections(): try: cell_tuple = CELL_CACHE[cell_mapping.uuid] except KeyError: db_connection_string = cell_mapping.database_connection context.db_connection = db.create_context_manager( db_connection_string) if not cell_mapping.transport_url.startswith('none'): context.mq_connection = rpc.create_transport( cell_mapping.transport_url) context.cell_uuid = cell_mapping.uuid CELL_CACHE[cell_mapping.uuid] = (context.db_connection, context.mq_connection) else: context.db_connection = cell_tuple[0] context.mq_connection = cell_tuple[1] context.cell_uuid = cell_mapping.uuid get_or_set_cached_cell_and_set_connections() else: context.db_connection = None context.mq_connection = None context.cell_uuid = None @contextmanager def target_cell(context, cell_mapping): """Yields a new context with connection information for a specific cell. This function yields a copy of the provided context, which is targeted to the referenced cell for MQ and DB connections. Passing None for cell_mapping will yield an untargetd copy of the context. :param context: The RequestContext to add connection information :param cell_mapping: An objects.CellMapping object or None """ # Create a sanitized copy of context by serializing and deserializing it # (like we would do over RPC). This help ensure that we have a clean # copy of the context with all the tracked attributes, but without any # of the hidden/private things we cache on a context. We do this to avoid # unintentional sharing of cached thread-local data across threads. # Specifically, this won't include any oslo_db-set transaction context, or # any existing cell targeting. cctxt = RequestContext.from_dict(context.to_dict()) set_target_cell(cctxt, cell_mapping) yield cctxt def scatter_gather_cells(context, cell_mappings, timeout, fn, *args, **kwargs): """Target cells in parallel and return their results. The first parameter in the signature of the function to call for each cell should be of type RequestContext. :param context: The RequestContext for querying cells :param cell_mappings: The CellMappings to target in parallel :param timeout: The total time in seconds to wait for all the results to be gathered :param fn: The function to call for each cell :param args: The args for the function to call for each cell, not including the RequestContext :param kwargs: The kwargs for the function to call for each cell :returns: A dict {cell_uuid: result} containing the joined results. The did_not_respond_sentinel will be returned if a cell did not respond within the timeout. The exception object will be returned if the call to a cell raised an exception. The exception will be logged. """ tasks = {} results = {} def gather_result(cell_uuid, fn, *args, **kwargs): try: result = fn(*args, **kwargs) except Exception as e: # Only log the exception traceback for non-nova exceptions. if not isinstance(e, exception.NovaException): LOG.exception('Error gathering result from cell %s', cell_uuid) result = e return result executor = utils.get_scatter_gather_executor() for cell_mapping in cell_mappings: with target_cell(context, cell_mapping) as cctxt: future = utils.spawn_on( executor, gather_result, cell_mapping.uuid, fn, cctxt, *args, **kwargs) tasks[cell_mapping.uuid] = future futurist.waiters.wait_for_all(tasks.values(), timeout) for cell_uuid, future in tasks.items(): if not future.done(): results[cell_uuid] = did_not_respond_sentinel cancelled = future.cancel() if cancelled: if utils.concurrency_mode_threading(): LOG.warning( 'Timed out waiting for response from cell %s. ' 'The cell worker thread did not start and is now ' 'cancelled. The cell_worker_thread_pool_size is too ' 'small for the load or there are stuck worker threads ' 'filling the pool.', cell_uuid) else: LOG.warning( 'Timed out waiting for response from cell %s.', cell_uuid) else: LOG.warning( 'Timed out waiting for response from cell %s. Left the ' 'cell worker thread to finish in the background.', cell_uuid) else: results[cell_uuid] = future.result() return results def load_cells(): global CELLS if not CELLS: CELLS = objects.CellMappingList.get_all(get_admin_context()) LOG.debug('Found %(count)i cells: %(cells)s', dict(count=len(CELLS), cells=','.join([c.identity for c in CELLS]))) if not CELLS: LOG.error('No cells are configured, unable to continue') def is_cell_failure_sentinel(record): return (record is did_not_respond_sentinel or isinstance(record, Exception)) def scatter_gather_skip_cell0(context, fn, *args, **kwargs): """Target all cells except cell0 in parallel and return their results. The first parameter in the signature of the function to call for each cell should be of type RequestContext. There is a timeout for waiting on all results to be gathered. :param context: The RequestContext for querying cells :param fn: The function to call for each cell :param args: The args for the function to call for each cell, not including the RequestContext :param kwargs: The kwargs for the function to call for each cell :returns: A dict {cell_uuid: result} containing the joined results. The did_not_respond_sentinel will be returned if a cell did not respond within the timeout. The exception object will be returned if the call to a cell raised an exception. The exception will be logged. """ load_cells() cell_mappings = [cell for cell in CELLS if not cell.is_cell0()] return scatter_gather_cells(context, cell_mappings, CELL_TIMEOUT, fn, *args, **kwargs) def scatter_gather_single_cell(context, cell_mapping, fn, *args, **kwargs): """Target the provided cell and return its results or sentinels in case of failure. The first parameter in the signature of the function to call for each cell should be of type RequestContext. :param context: The RequestContext for querying cells :param cell_mapping: The CellMapping to target :param fn: The function to call for each cell :param args: The args for the function to call for each cell, not including the RequestContext :param kwargs: The kwargs for the function to call for this cell :returns: A dict {cell_uuid: result} containing the joined results. The did_not_respond_sentinel will be returned if the cell did not respond within the timeout. The exception object will be returned if the call to the cell raised an exception. The exception will be logged. """ return scatter_gather_cells(context, [cell_mapping], CELL_TIMEOUT, fn, *args, **kwargs) def scatter_gather_all_cells(context, fn, *args, **kwargs): """Target all cells in parallel and return their results. The first parameter in the signature of the function to call for each cell should be of type RequestContext. There is a timeout for waiting on all results to be gathered. :param context: The RequestContext for querying cells :param fn: The function to call for each cell :param args: The args for the function to call for each cell, not including the RequestContext :param kwargs: The kwargs for the function to call for each cell :returns: A dict {cell_uuid: result} containing the joined results. The did_not_respond_sentinel will be returned if a cell did not respond within the timeout. The exception object will be returned if the call to a cell raised an exception. The exception will be logged. """ load_cells() return scatter_gather_cells(context, CELLS, CELL_TIMEOUT, fn, *args, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/crypto.py0000664000175000017500000002562500000000000015431 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Wrappers around standard crypto data elements. Includes root and intermediate CAs, SSH key_pairs and x509 certificates. """ import base64 import binascii import hashlib import io import os import typing as ty from castellan.common import exception as castellan_exception from castellan.common.objects import passphrase from castellan import key_manager from cryptography.hazmat import backends from cryptography.hazmat.primitives.asymmetric import padding from cryptography.hazmat.primitives import hashes from cryptography.hazmat.primitives import serialization from cryptography import x509 from oslo_concurrency import processutils from oslo_log import log as logging from oslo_serialization import base64 as oslo_base64 import paramiko import nova.conf from nova import context as nova_context from nova import exception from nova.i18n import _ from nova import objects from nova import utils from nova.virt import block_device as driver_block_device LOG = logging.getLogger(__name__) CONF = nova.conf.CONF _KEYMGR = None _VTPM_SECRET_BYTE_LENGTH = 384 _EPHEMERAL_ENCRYPTION_SECRET_BYTE_LENGTH = 64 def _get_key_manager(): global _KEYMGR if _KEYMGR is None: _KEYMGR = key_manager.API(configuration=CONF) return _KEYMGR def generate_fingerprint(public_key: str) -> str: try: pub_bytes = public_key.encode('utf-8') # Test that the given public_key string is a proper ssh key. The # returned object is unused since pyca/cryptography does not have a # fingerprint method. serialization.load_ssh_public_key( pub_bytes, backends.default_backend()) pub_data = base64.b64decode(public_key.split(' ')[1]) raw_fp = hashlib.md5(pub_data, usedforsecurity=False).hexdigest() return ':'.join(a + b for a, b in zip(raw_fp[::2], raw_fp[1::2])) except Exception: raise exception.InvalidKeypair( reason=_('failed to generate fingerprint')) def generate_x509_fingerprint(pem_key: ty.Union[bytes, str]) -> str: try: if isinstance(pem_key, str): pem_key = pem_key.encode('utf-8') cert = x509.load_pem_x509_certificate( pem_key, backends.default_backend()) raw_fp = binascii.hexlify( cert.fingerprint(hashes.SHA1()) ).decode('ascii') return ':'.join(a + b for a, b in zip(raw_fp[::2], raw_fp[1::2])) except (ValueError, TypeError, binascii.Error) as ex: raise exception.InvalidKeypair( reason=_('failed to generate X509 fingerprint. ' 'Error message: %s') % ex) def generate_key_pair(bits: int = 2048) -> ty.Tuple[str, str, str]: key = paramiko.RSAKey.generate(bits) keyout = io.StringIO() key.write_private_key(keyout) private_key = keyout.getvalue() public_key = '%s %s Generated-by-Nova' % (key.get_name(), key.get_base64()) fingerprint = generate_fingerprint(public_key) return (private_key, public_key, fingerprint) def ssh_encrypt_text(ssh_public_key: str, text: ty.Union[str, bytes]) -> bytes: """Encrypt text with an ssh public key. If text is a Unicode string, encode it to UTF-8. """ if isinstance(text, str): text = text.encode('utf-8') try: pub_bytes = ssh_public_key.encode('utf-8') pub_key = serialization.load_ssh_public_key( pub_bytes, backends.default_backend()) return pub_key.encrypt(text, padding.PKCS1v15()) except Exception as exc: raise exception.EncryptionFailure(reason=str(exc)) def generate_winrm_x509_cert( user_id: str, bits: int = 2048 ) -> ty.Tuple[str, str, str]: """Generate a cert for passwordless auth for user in project.""" subject = '/CN=%s' % user_id upn = '%s@localhost' % user_id with utils.tempdir() as tmpdir: keyfile = os.path.abspath(os.path.join(tmpdir, 'temp.key')) conffile = os.path.abspath(os.path.join(tmpdir, 'temp.conf')) _create_x509_openssl_config(conffile, upn) out, _ = processutils.execute( 'openssl', 'req', '-x509', '-nodes', '-days', '3650', '-config', conffile, '-newkey', 'rsa:%s' % bits, '-outform', 'PEM', '-keyout', keyfile, '-subj', subject, '-extensions', 'v3_req_client', binary=True) certificate = out.decode('utf-8') out, _ = processutils.execute( 'openssl', 'pkcs12', '-export', '-inkey', keyfile, '-password', 'pass:', process_input=out, binary=True) private_key = base64.b64encode(out).decode('ascii') fingerprint = generate_x509_fingerprint(certificate) return (private_key, certificate, fingerprint) def _create_x509_openssl_config(conffile: str, upn: str): content = ("distinguished_name = req_distinguished_name\n" "[req_distinguished_name]\n" "[v3_req_client]\n" "extendedKeyUsage = clientAuth\n" "subjectAltName = otherName:""1.3.6.1.4.1.311.20.2.3;UTF8:%s\n") with open(conffile, 'w') as file: file.write(content % upn) def ensure_vtpm_secret( context: nova_context.RequestContext, instance: 'objects.Instance', ) -> ty.Tuple[str, str]: """Communicates with the key manager service to retrieve or create a secret for an instance's emulated TPM. When creating a secret, its UUID is saved to the instance's system_metadata as ``vtpm_secret_uuid``. :param context: Nova auth context. :param instance: Instance object. :return: A tuple comprising (secret_uuid, passphrase). :raise: castellan_exception.ManagedObjectNotFoundError if communication with the key manager API fails, or if a vtpm_secret_uuid was present in the instance's system metadata but could not be found in the key manager service. """ key_mgr = _get_key_manager() secret_uuid = instance.system_metadata.get('vtpm_secret_uuid') if secret_uuid is not None: # Try to retrieve the secret from the key manager try: secret = key_mgr.get(context, secret_uuid) # assert secret_uuid == secret.id ? LOG.debug( "Found existing vTPM secret with UUID %s.", secret_uuid, instance=instance) return secret.id, secret.get_encoded() except castellan_exception.ManagedObjectNotFoundError: LOG.warning( "Despite being set on the instance, failed to find a vTPM " "secret with UUID %s. This should only happen if the secret " "was manually deleted from the key manager service. Your vTPM " "is likely to be unrecoverable.", secret_uuid, instance=instance) raise # If we get here, the instance has no vtpm_secret_uuid. Create a new one # and register it with the key manager. secret = base64.b64encode(os.urandom(_VTPM_SECRET_BYTE_LENGTH)) # Castellan ManagedObject cmo = passphrase.Passphrase( secret, name="vTPM secret for instance %s" % instance.uuid) secret_uuid = key_mgr.store(context, cmo) LOG.debug("Created vTPM secret with UUID %s", secret_uuid, instance=instance) instance.system_metadata['vtpm_secret_uuid'] = secret_uuid instance.save() return secret_uuid, secret def delete_vtpm_secret( context: nova_context.RequestContext, instance: 'objects.Instance', ): """Communicates with the key manager service to destroy the secret for an instance's emulated TPM. This operation is idempotent: if the instance never had a vTPM secret, OR if the secret has already been deleted, it is a no-op. The ``vtpm_secret_uuid`` member of the instance's system_metadata is cleared as a side effect of this method. :param context: Nova auth context. :param instance: Instance object. :return: None :raise: castellan_exception.ManagedObjectNotFoundError if communication with the key manager API. """ secret_uuid = instance.system_metadata.get('vtpm_secret_uuid') if not secret_uuid: return key_mgr = _get_key_manager() try: key_mgr.delete(context, secret_uuid) LOG.debug("Deleted vTPM secret with UUID %s", secret_uuid, instance=instance) except castellan_exception.ManagedObjectNotFoundError: LOG.debug("vTPM secret with UUID %s already deleted or never existed.", secret_uuid, instance=instance) del instance.system_metadata['vtpm_secret_uuid'] instance.save() def create_encryption_secret( context: nova_context.RequestContext, instance: 'objects.Instance', driver_bdm: 'driver_block_device.DriverBlockDevice', for_detail: ty.Optional[str] = None, ): # Use oslo.serialization to encode some random data as passphrase secret = oslo_base64.encode_as_text( os.urandom(_EPHEMERAL_ENCRYPTION_SECRET_BYTE_LENGTH)) if for_detail is None: for_detail = f"instance {instance.uuid} BDM {driver_bdm['uuid']}" secret_name = f'Ephemeral encryption secret for {for_detail}' cmo = passphrase.Passphrase(secret, name=secret_name) key_mgr = _get_key_manager() secret_uuid = key_mgr.store(context, cmo) LOG.debug( f'Created "{secret_name}" with UUID {secret_uuid}', instance=instance ) return secret_uuid, secret def get_encryption_secret( context: nova_context.RequestContext, secret_uuid: str, ) -> ty.Optional[str]: key_mgr = _get_key_manager() try: key = key_mgr.get(context, secret_uuid) LOG.debug(f"Retrieved secret with UUID {secret_uuid}") return key.get_encoded() except castellan_exception.ManagedObjectNotFoundError: LOG.debug(f"Encryption secret with UUID {secret_uuid} was not found.") return None def delete_encryption_secret( context: nova_context.RequestContext, instance_uuid: str, secret_uuid: str, ): key_mgr = _get_key_manager() try: key_mgr.delete(context, secret_uuid) LOG.debug(f"Deleted secret with UUID {secret_uuid}", instance_uuid=instance_uuid) except castellan_exception.ManagedObjectNotFoundError: LOG.debug(f"Encryption secret with UUID {secret_uuid} already deleted " "or never existed.", instance_uuid=instance_uuid) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.3616085 nova-32.0.0/nova/db/0000775000175000017500000000000000000000000014112 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/db/__init__.py0000664000175000017500000000000000000000000016211 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.3616085 nova-32.0.0/nova/db/api/0000775000175000017500000000000000000000000014663 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/db/api/__init__.py0000664000175000017500000000000000000000000016762 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/db/api/alembic.ini0000664000175000017500000000434600000000000016767 0ustar00zuulzuul00000000000000# A generic, single database configuration. [alembic] # path to migration scripts script_location = %(here)s/migrations # template used to generate migration files # file_template = %%(rev)s_%%(slug)s # sys.path path, will be prepended to sys.path if present. # defaults to the current working directory. prepend_sys_path = . # indicates what character is used to split lists of file paths, including # version_locations and prepend_sys_path path_separator = os # timezone to use when rendering the date # within the migration file as well as the filename. # string value is passed to dateutil.tz.gettz() # leave blank for localtime # timezone = # max length of characters to apply to the # "slug" field # truncate_slug_length = 40 # set to 'true' to run the environment during # the 'revision' command, regardless of autogenerate # revision_environment = false # set to 'true' to allow .pyc and .pyo files without # a source .py file to be detected as revisions in the # versions/ directory # sourceless = false # version location specification; this defaults # to migrations/versions. When using multiple version # directories, initial revisions must be specified with --version-path # version_locations = %(here)s/bar %(here)s/bat migrations/versions # the output encoding used when revision files # are written from script.py.mako # output_encoding = utf-8 sqlalchemy.url = sqlite:///nova_api.db [post_write_hooks] # post_write_hooks defines scripts or Python functions that are run # on newly generated revision scripts. See the documentation for further # detail and examples # format using "black" - use the console_scripts runner, against the "black" entrypoint # hooks=black # black.type=console_scripts # black.entrypoint=black # black.options=-l 79 # Logging configuration [loggers] keys = root,sqlalchemy,alembic [handlers] keys = console [formatters] keys = generic [logger_root] level = WARN handlers = console qualname = [logger_sqlalchemy] level = WARN handlers = qualname = sqlalchemy.engine [logger_alembic] level = INFO handlers = qualname = alembic [handler_console] class = StreamHandler args = (sys.stderr,) level = NOTSET formatter = generic [formatter_generic] format = %(levelname)-5.5s [%(name)s] %(message)s datefmt = %H:%M:%S ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/db/api/api.py0000664000175000017500000000303200000000000016004 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db.sqlalchemy import enginefacade from oslo_utils import importutils import sqlalchemy as sa import nova.conf profiler_sqlalchemy = importutils.try_import('osprofiler.sqlalchemy') CONF = nova.conf.CONF context_manager = enginefacade.transaction_context() # NOTE(stephenfin): We don't need equivalents of the 'get_context_manager' or # 'create_context_manager' APIs found in 'nova.db.main.api' since we don't need # to be cell-aware here def _get_db_conf(conf_group, connection=None): kw = dict(conf_group.items()) if connection is not None: kw['connection'] = connection return kw def configure(conf): context_manager.configure(**_get_db_conf(conf.api_database)) if ( profiler_sqlalchemy and CONF.profiler.enabled and CONF.profiler.trace_sqlalchemy ): context_manager.append_on_engine_create( lambda eng: profiler_sqlalchemy.add_tracing(sa, eng, "db")) def get_engine(): return context_manager.writer.get_engine() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.3616085 nova-32.0.0/nova/db/api/migrations/0000775000175000017500000000000000000000000017037 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/db/api/migrations/README.rst0000664000175000017500000000104600000000000020527 0ustar00zuulzuul00000000000000Migrations for the API database =============================== This directory contains migrations for the API database. These are implemented using `alembic`__, a lightweight database migration tool designed for usage with `SQLAlchemy`__. The best place to start understanding Alembic is with its own `tutorial`__. You can also play around with the :command:`alembic` command:: $ alembic --help .. __: https://alembic.sqlalchemy.org/en/latest/ .. __: https://www.sqlalchemy.org/ .. __: https://alembic.sqlalchemy.org/en/latest/tutorial.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/db/api/migrations/env.py0000664000175000017500000001004600000000000020202 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from logging.config import fileConfig from alembic import context from sqlalchemy import engine_from_config from sqlalchemy import pool from nova.db.api import models # this is the Alembic Config object, which provides # access to the values within the .ini file in use. config = context.config # Interpret the config file for Python logging unless we're told not to. # This line sets up loggers basically. if config.attributes.get('configure_logger', True): fileConfig(config.config_file_name) # this is the MetaData object for the various models in the API database target_metadata = models.BASE.metadata def include_name(name, type_, parent_names): """Determine which tables or columns to skip. This is used when we decide to "delete" a table or column. In this instance, we will remove the SQLAlchemy model or field but leave the underlying database table or column in place for a number of releases after. Once we're sure that there is no code running that contains references to the old models, we can then remove the underlying table. In the interim, we must track the discrepancy between models and actual database data here. """ if type_ == 'table': return name not in models.REMOVED_TABLES if type_ == 'column': return (parent_names['table_name'], name) not in models.REMOVED_COLUMNS return True def run_migrations_offline(): """Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output. """ url = config.get_main_option("sqlalchemy.url") context.configure( url=url, target_metadata=target_metadata, render_as_batch=True, include_name=include_name, literal_binds=True, dialect_opts={"paramstyle": "named"}, ) with context.begin_transaction(): context.run_migrations() def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. This is modified from the default based on the below, since we want to share an engine when unit testing so in-memory database testing actually works. https://alembic.sqlalchemy.org/en/latest/cookbook.html#connection-sharing """ connectable = config.attributes.get('connection', None) if connectable is None: # only create Engine if we don't have a Connection from the outside connectable = engine_from_config( config.get_section(config.config_ini_section), prefix="sqlalchemy.", poolclass=pool.NullPool, ) with connectable.connect() as connection: context.configure( connection=connection, target_metadata=target_metadata, render_as_batch=True, include_name=include_name, ) with context.begin_transaction(): context.run_migrations() else: context.configure( connection=connectable, target_metadata=target_metadata, render_as_batch=True, include_name=include_name, ) with context.begin_transaction(): context.run_migrations() if context.is_offline_mode(): run_migrations_offline() else: run_migrations_online() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/db/api/migrations/script.py.mako0000664000175000017500000000172000000000000021643 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """${message} Revision ID: ${up_revision} Revises: ${down_revision | comma,n} Create Date: ${create_date} """ from alembic import op import sqlalchemy as sa ${imports if imports else ""} # revision identifiers, used by Alembic. revision = ${repr(up_revision)} down_revision = ${repr(down_revision)} branch_labels = ${repr(branch_labels)} depends_on = ${repr(depends_on)} def upgrade(): ${upgrades if upgrades else "pass"} ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.3616085 nova-32.0.0/nova/db/api/migrations/versions/0000775000175000017500000000000000000000000020707 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/db/api/migrations/versions/b30f573d3377_remove_unused_build_requests_columns.py0000664000175000017500000000306600000000000032561 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Remove unused build_requests columns Revision ID: b30f573d3377 Revises: d67eeaabee36 Create Date: 2021-09-27 14:46:05.986174 """ from alembic import op # revision identifiers, used by Alembic. revision = 'b30f573d3377' down_revision = 'd67eeaabee36' branch_labels = None depends_on = None def upgrade(): with op.batch_alter_table('build_requests', schema=None) as batch_op: batch_op.drop_column('vm_state') batch_op.drop_column('access_ip_v6') batch_op.drop_column('config_drive') batch_op.drop_column('locked_by') batch_op.drop_column('security_groups') batch_op.drop_column('progress') batch_op.drop_column('info_cache') batch_op.drop_column('display_name') batch_op.drop_column('instance_metadata') batch_op.drop_column('image_ref') batch_op.drop_column('key_name') batch_op.drop_column('user_id') batch_op.drop_column('access_ip_v4') batch_op.drop_column('task_state') batch_op.drop_column('request_spec_id') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/db/api/migrations/versions/cdeec0c85668_drop_legacy_migrate_version_table.py0000664000175000017500000000210600000000000032163 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Drop legacy migrate_version table Revision ID: cdeec0c85668 Revises: b30f573d3377 Create Date: 2023-02-01 17:04:52.984703 """ from alembic import op from sqlalchemy.engine import reflection # revision identifiers, used by Alembic. revision = 'cdeec0c85668' down_revision = 'b30f573d3377' branch_labels = None depends_on = None def upgrade(): conn = op.get_bind() inspector = reflection.Inspector.from_engine(conn) tables = inspector.get_table_names() if 'migrate_version' in tables: op.drop_table('migrate_version') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/db/api/migrations/versions/d67eeaabee36_initial_version.py0000664000175000017500000005666400000000000026621 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Initial version Revision ID: d67eeaabee36 Revises: Create Date: 2021-04-13 12:45:35.549607 """ from alembic import op import sqlalchemy as sa from sqlalchemy import dialects from nova.db import types from nova.objects import keypair # revision identifiers, used by Alembic. revision = 'd67eeaabee36' down_revision = None branch_labels = None depends_on = None def InetSmall(): return sa.String(length=39).with_variant( dialects.postgresql.INET(), 'postgresql' ) def upgrade(): bind = op.get_bind() op.create_table( 'cell_mappings', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('uuid', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255)), sa.Column('transport_url', sa.Text()), sa.Column('database_connection', sa.Text()), # NOTE(stephenfin): These were originally added by sqlalchemy-migrate # which did not generate the constraints sa.Column( 'disabled', sa.Boolean(create_constraint=False), default=False), sa.UniqueConstraint('uuid', name='uniq_cell_mappings0uuid'), sa.Index('uuid_idx', 'uuid'), mysql_engine='InnoDB', mysql_charset='utf8' ) op.create_table( 'host_mappings', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('cell_id', sa.Integer, nullable=False), sa.Column('host', sa.String(length=255), nullable=False), sa.UniqueConstraint( 'host', name='uniq_host_mappings0host'), sa.Index('host_idx', 'host'), sa.ForeignKeyConstraint( columns=['cell_id'], refcolumns=['cell_mappings.id']), mysql_engine='InnoDB', mysql_charset='utf8' ) op.create_table( 'instance_mappings', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('instance_uuid', sa.String(length=36), nullable=False), sa.Column('cell_id', sa.Integer, nullable=True), sa.Column('project_id', sa.String(length=255), nullable=False), # NOTE(stephenfin): These were originally added by sqlalchemy-migrate # which did not generate the constraints sa.Column( 'queued_for_delete', sa.Boolean(create_constraint=False), default=False), sa.Column('user_id', sa.String(length=255), nullable=True), sa.UniqueConstraint( 'instance_uuid', name='uniq_instance_mappings0instance_uuid'), sa.Index('instance_uuid_idx', 'instance_uuid'), sa.Index('project_id_idx', 'project_id'), sa.Index( 'instance_mappings_user_id_project_id_idx', 'user_id', 'project_id'), sa.ForeignKeyConstraint( columns=['cell_id'], refcolumns=['cell_mappings.id']), mysql_engine='InnoDB', mysql_charset='utf8' ) op.create_table( 'flavors', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('name', sa.String(length=255), nullable=False), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('memory_mb', sa.Integer, nullable=False), sa.Column('vcpus', sa.Integer, nullable=False), sa.Column('swap', sa.Integer, nullable=False), sa.Column('vcpu_weight', sa.Integer), sa.Column('flavorid', sa.String(length=255), nullable=False), sa.Column('rxtx_factor', sa.Float), sa.Column('root_gb', sa.Integer), sa.Column('ephemeral_gb', sa.Integer), sa.Column('disabled', sa.Boolean), sa.Column('is_public', sa.Boolean), sa.Column('description', sa.Text()), sa.UniqueConstraint('flavorid', name='uniq_flavors0flavorid'), sa.UniqueConstraint('name', name='uniq_flavors0name'), mysql_engine='InnoDB', mysql_charset='utf8' ) op.create_table( 'flavor_extra_specs', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('flavor_id', sa.Integer, nullable=False), sa.Column('key', sa.String(length=255), nullable=False), sa.Column('value', sa.String(length=255)), sa.UniqueConstraint( 'flavor_id', 'key', name='uniq_flavor_extra_specs0flavor_id0key'), sa.Index('flavor_extra_specs_flavor_id_key_idx', 'flavor_id', 'key'), sa.ForeignKeyConstraint( columns=['flavor_id'], refcolumns=['flavors.id']), mysql_engine='InnoDB', mysql_charset='utf8' ) op.create_table( 'flavor_projects', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('flavor_id', sa.Integer, nullable=False), sa.Column('project_id', sa.String(length=255), nullable=False), sa.UniqueConstraint( 'flavor_id', 'project_id', name='uniq_flavor_projects0flavor_id0project_id'), sa.ForeignKeyConstraint( columns=['flavor_id'], refcolumns=['flavors.id']), mysql_engine='InnoDB', mysql_charset='utf8' ) op.create_table( 'request_specs', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('instance_uuid', sa.String(36), nullable=False), sa.Column('spec', types.MediumText(), nullable=False), sa.UniqueConstraint( 'instance_uuid', name='uniq_request_specs0instance_uuid'), sa.Index('request_spec_instance_uuid_idx', 'instance_uuid'), mysql_engine='InnoDB', mysql_charset='utf8' ) op.create_table( 'build_requests', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('request_spec_id', sa.Integer, nullable=True), sa.Column('project_id', sa.String(length=255), nullable=False), sa.Column('user_id', sa.String(length=255), nullable=True), sa.Column('display_name', sa.String(length=255)), sa.Column('instance_metadata', sa.Text), sa.Column('progress', sa.Integer), sa.Column('vm_state', sa.String(length=255)), sa.Column('task_state', sa.String(length=255)), sa.Column('image_ref', sa.String(length=255)), sa.Column('access_ip_v4', InetSmall()), sa.Column('access_ip_v6', InetSmall()), sa.Column('info_cache', sa.Text), sa.Column('security_groups', sa.Text, nullable=True), sa.Column('config_drive', sa.Boolean, default=False, nullable=True), sa.Column('key_name', sa.String(length=255)), sa.Column( 'locked_by', sa.Enum('owner', 'admin', name='build_requests0locked_by')), sa.Column('instance_uuid', sa.String(length=36)), sa.Column('instance', types.MediumText()), sa.Column('block_device_mappings', types.MediumText()), sa.Column('tags', sa.Text()), sa.UniqueConstraint( 'instance_uuid', name='uniq_build_requests0instance_uuid'), sa.Index('build_requests_project_id_idx', 'project_id'), sa.Index('build_requests_instance_uuid_idx', 'instance_uuid'), mysql_engine='InnoDB', mysql_charset='utf8' ) op.create_table( 'key_pairs', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('name', sa.String(255), nullable=False), sa.Column('user_id', sa.String(255), nullable=False), sa.Column('fingerprint', sa.String(255)), sa.Column('public_key', sa.Text()), sa.Column( 'type', sa.Enum('ssh', 'x509', name='keypair_types'), nullable=False, server_default=keypair.KEYPAIR_TYPE_SSH), sa.UniqueConstraint( 'user_id', 'name', name='uniq_key_pairs0user_id0name'), mysql_engine='InnoDB', mysql_charset='utf8' ) op.create_table( 'projects', sa.Column( 'id', sa.Integer, primary_key=True, nullable=False, autoincrement=True), sa.Column('external_id', sa.String(length=255), nullable=False), sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.UniqueConstraint('external_id', name='uniq_projects0external_id'), mysql_engine='InnoDB', mysql_charset='latin1', ) op.create_table( 'users', sa.Column( 'id', sa.Integer, primary_key=True, nullable=False, autoincrement=True), sa.Column('external_id', sa.String(length=255), nullable=False), sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.UniqueConstraint('external_id', name='uniq_users0external_id'), mysql_engine='InnoDB', mysql_charset='latin1', ) op.create_table( 'resource_classes', sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('name', sa.String(length=255), nullable=False), sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.UniqueConstraint('name', name='uniq_resource_classes0name'), mysql_engine='InnoDB', mysql_charset='latin1' ) nameargs = {} if bind.engine.name == 'mysql': nameargs['collation'] = 'utf8_bin' op.create_table( 'resource_providers', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('uuid', sa.String(36), nullable=False), sa.Column('name', sa.Unicode(200, **nameargs), nullable=True), sa.Column('generation', sa.Integer, default=0), sa.Column('can_host', sa.Integer, default=0), sa.Column( 'root_provider_id', sa.Integer, sa.ForeignKey('resource_providers.id')), sa.Column( 'parent_provider_id', sa.Integer, sa.ForeignKey('resource_providers.id')), sa.UniqueConstraint('uuid', name='uniq_resource_providers0uuid'), sa.UniqueConstraint('name', name='uniq_resource_providers0name'), sa.Index('resource_providers_name_idx', 'name'), sa.Index('resource_providers_uuid_idx', 'uuid'), sa.Index( 'resource_providers_root_provider_id_idx', 'root_provider_id'), sa.Index( 'resource_providers_parent_provider_id_idx', 'parent_provider_id'), mysql_engine='InnoDB', mysql_charset='latin1' ) op.create_table( 'inventories', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('resource_provider_id', sa.Integer, nullable=False), sa.Column('resource_class_id', sa.Integer, nullable=False), sa.Column('total', sa.Integer, nullable=False), sa.Column('reserved', sa.Integer, nullable=False), sa.Column('min_unit', sa.Integer, nullable=False), sa.Column('max_unit', sa.Integer, nullable=False), sa.Column('step_size', sa.Integer, nullable=False), sa.Column('allocation_ratio', sa.Float, nullable=False), sa.Index( 'inventories_resource_provider_id_idx', 'resource_provider_id'), sa.Index( 'inventories_resource_provider_resource_class_idx', 'resource_provider_id', 'resource_class_id'), sa.Index( 'inventories_resource_class_id_idx', 'resource_class_id'), sa.UniqueConstraint( 'resource_provider_id', 'resource_class_id', name='uniq_inventories0resource_provider_resource_class'), mysql_engine='InnoDB', mysql_charset='latin1' ) op.create_table( 'traits', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column( 'id', sa.Integer, primary_key=True, nullable=False, autoincrement=True), sa.Column('name', sa.Unicode(255, **nameargs), nullable=False), sa.UniqueConstraint('name', name='uniq_traits0name'), mysql_engine='InnoDB', mysql_charset='latin1', ) op.create_table( 'allocations', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('resource_provider_id', sa.Integer, nullable=False), sa.Column('consumer_id', sa.String(36), nullable=False), sa.Column('resource_class_id', sa.Integer, nullable=False), sa.Column('used', sa.Integer, nullable=False), sa.Index( 'allocations_resource_provider_class_used_idx', 'resource_provider_id', 'resource_class_id', 'used'), sa.Index( 'allocations_resource_class_id_idx', 'resource_class_id'), sa.Index('allocations_consumer_id_idx', 'consumer_id'), mysql_engine='InnoDB', mysql_charset='latin1' ) op.create_table( 'consumers', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column( 'id', sa.Integer, primary_key=True, nullable=False, autoincrement=True), sa.Column('uuid', sa.String(length=36), nullable=False), sa.Column('project_id', sa.Integer, nullable=False), sa.Column('user_id', sa.Integer, nullable=False), sa.Column( 'generation', sa.Integer, default=0, server_default=sa.text('0'), nullable=False), sa.Index('consumers_project_id_uuid_idx', 'project_id', 'uuid'), sa.Index( 'consumers_project_id_user_id_uuid_idx', 'project_id', 'user_id', 'uuid'), sa.UniqueConstraint('uuid', name='uniq_consumers0uuid'), mysql_engine='InnoDB', mysql_charset='latin1', ) op.create_table( 'resource_provider_aggregates', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column( 'resource_provider_id', sa.Integer, primary_key=True, nullable=False), sa.Column( 'aggregate_id', sa.Integer, primary_key=True, nullable=False), sa.Index( 'resource_provider_aggregates_aggregate_id_idx', 'aggregate_id'), mysql_engine='InnoDB', mysql_charset='latin1' ) op.create_table( 'resource_provider_traits', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column( 'trait_id', sa.Integer, sa.ForeignKey('traits.id'), primary_key=True, nullable=False), sa.Column( 'resource_provider_id', sa.Integer, primary_key=True, nullable=False), sa.Index( 'resource_provider_traits_resource_provider_trait_idx', 'resource_provider_id', 'trait_id'), sa.ForeignKeyConstraint( columns=['resource_provider_id'], refcolumns=['resource_providers.id']), mysql_engine='InnoDB', mysql_charset='latin1', ) op.create_table( 'placement_aggregates', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('uuid', sa.String(length=36), index=True), sa.UniqueConstraint('uuid', name='uniq_placement_aggregates0uuid'), mysql_engine='InnoDB', mysql_charset='latin1' ) op.create_table( 'aggregates', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('uuid', sa.String(length=36)), sa.Column('name', sa.String(length=255)), sa.Index('aggregate_uuid_idx', 'uuid'), sa.UniqueConstraint('name', name='uniq_aggregate0name'), mysql_engine='InnoDB', mysql_charset='utf8' ) op.create_table( 'aggregate_hosts', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('host', sa.String(length=255)), sa.Column( 'aggregate_id', sa.Integer, sa.ForeignKey('aggregates.id'), nullable=False), sa.UniqueConstraint( 'host', 'aggregate_id', name='uniq_aggregate_hosts0host0aggregate_id'), mysql_engine='InnoDB', mysql_charset='utf8' ) op.create_table( 'aggregate_metadata', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column( 'aggregate_id', sa.Integer, sa.ForeignKey('aggregates.id'), nullable=False), sa.Column('key', sa.String(length=255), nullable=False), sa.Column('value', sa.String(length=255), nullable=False), sa.UniqueConstraint( 'aggregate_id', 'key', name='uniq_aggregate_metadata0aggregate_id0key'), sa.Index('aggregate_metadata_key_idx', 'key'), mysql_engine='InnoDB', mysql_charset='utf8' ) op.create_table( 'instance_groups', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('user_id', sa.String(length=255)), sa.Column('project_id', sa.String(length=255)), sa.Column('uuid', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255)), sa.UniqueConstraint( 'uuid', name='uniq_instance_groups0uuid'), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'instance_group_policy', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('policy', sa.String(length=255)), sa.Column( 'group_id', sa.Integer, sa.ForeignKey('instance_groups.id'), nullable=False), sa.Column('rules', sa.Text), sa.Index('instance_group_policy_policy_idx', 'policy'), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'instance_group_member', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('instance_uuid', sa.String(length=255)), sa.Column( 'group_id', sa.Integer, sa.ForeignKey('instance_groups.id'), nullable=False), sa.Index('instance_group_member_instance_idx', 'instance_uuid'), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'quota_classes', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('class_name', sa.String(length=255)), sa.Column('resource', sa.String(length=255)), sa.Column('hard_limit', sa.Integer), sa.Index('quota_classes_class_name_idx', 'class_name'), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'quota_usages', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('project_id', sa.String(length=255)), sa.Column('resource', sa.String(length=255), nullable=False), sa.Column('in_use', sa.Integer, nullable=False), sa.Column('reserved', sa.Integer, nullable=False), sa.Column('until_refresh', sa.Integer), sa.Column('user_id', sa.String(length=255)), sa.Index('quota_usages_project_id_idx', 'project_id'), sa.Index('quota_usages_user_id_idx', 'user_id'), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'quotas', sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('project_id', sa.String(length=255)), sa.Column('resource', sa.String(length=255), nullable=False), sa.Column('hard_limit', sa.Integer), sa.UniqueConstraint( 'project_id', 'resource', name='uniq_quotas0project_id0resource'), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'project_user_quotas', sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('user_id', sa.String(length=255), nullable=False), sa.Column('project_id', sa.String(length=255), nullable=False), sa.Column('resource', sa.String(length=255), nullable=False), sa.Column('hard_limit', sa.Integer, nullable=True), sa.UniqueConstraint( 'user_id', 'project_id', 'resource', name='uniq_project_user_quotas0user_id0project_id0resource'), sa.Index( 'project_user_quotas_project_id_idx', 'project_id'), sa.Index( 'project_user_quotas_user_id_idx', 'user_id'), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'reservations', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('uuid', sa.String(length=36), nullable=False), sa.Column( 'usage_id', sa.Integer, sa.ForeignKey('quota_usages.id'), nullable=False), sa.Column('project_id', sa.String(length=255)), sa.Column('resource', sa.String(length=255)), sa.Column('delta', sa.Integer, nullable=False), sa.Column('expire', sa.DateTime), sa.Column('user_id', sa.String(length=255)), sa.Index('reservations_project_id_idx', 'project_id'), sa.Index('reservations_uuid_idx', 'uuid'), sa.Index('reservations_expire_idx', 'expire'), sa.Index('reservations_user_id_idx', 'user_id'), mysql_engine='InnoDB', mysql_charset='utf8', ) def downgrade(): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/db/api/models.py0000664000175000017500000004106600000000000016527 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db.sqlalchemy import models from oslo_log import log as logging import sqlalchemy as sa import sqlalchemy.dialects.mysql from sqlalchemy import orm from sqlalchemy import schema from nova.db import types LOG = logging.getLogger(__name__) # NOTE(stephenfin): This is a list of fields that have been removed from # various SQLAlchemy models but which still exist in the underlying tables. Our # upgrade policy dictates that we remove fields from models at least one cycle # before we remove the column from the underlying table. Not doing so would # prevent us from applying the new database schema before rolling out any of # the new code since the old code could attempt to access data in the removed # columns. Alembic identifies this temporary mismatch between the models and # underlying tables and attempts to resolve it. Tell it instead to ignore these # until we're ready to remove them ourselves. REMOVED_COLUMNS = [] # NOTE(stephenfin): A list of foreign key constraints that were removed when # the column they were covering was removed. REMOVED_FKEYS = [] # NOTE(stephenfin): A list of entire models that have been removed. REMOVED_TABLES = { # Tables that were moved the placement database in Train. The # models were removed in Y and the tables can be dropped in Z or # later 'allocations', 'consumers', 'inventories', 'placement_aggregates', 'projects', 'resource_classes', 'resource_provider_aggregates', 'resource_provider_traits', 'resource_providers', 'traits', 'users', } class _NovaAPIBase(models.ModelBase, models.TimestampMixin): pass BASE = orm.declarative_base(cls=_NovaAPIBase) class AggregateHost(BASE): """Represents a host that is member of an aggregate.""" __tablename__ = 'aggregate_hosts' __table_args__ = (schema.UniqueConstraint( "host", "aggregate_id", name="uniq_aggregate_hosts0host0aggregate_id" ), ) id = sa.Column(sa.Integer, primary_key=True, autoincrement=True) host = sa.Column(sa.String(255)) aggregate_id = sa.Column( sa.Integer, sa.ForeignKey('aggregates.id'), nullable=False) class AggregateMetadata(BASE): """Represents a metadata key/value pair for an aggregate.""" __tablename__ = 'aggregate_metadata' __table_args__ = ( schema.UniqueConstraint("aggregate_id", "key", name="uniq_aggregate_metadata0aggregate_id0key" ), sa.Index('aggregate_metadata_key_idx', 'key'), ) id = sa.Column(sa.Integer, primary_key=True) key = sa.Column(sa.String(255), nullable=False) value = sa.Column(sa.String(255), nullable=False) aggregate_id = sa.Column( sa.Integer, sa.ForeignKey('aggregates.id'), nullable=False) class Aggregate(BASE): """Represents a cluster of hosts that exists in this zone.""" __tablename__ = 'aggregates' __table_args__ = ( sa.Index('aggregate_uuid_idx', 'uuid'), schema.UniqueConstraint("name", name="uniq_aggregate0name") ) id = sa.Column(sa.Integer, primary_key=True, autoincrement=True) uuid = sa.Column(sa.String(36)) name = sa.Column(sa.String(255)) _hosts = orm.relationship( AggregateHost, primaryjoin='Aggregate.id == AggregateHost.aggregate_id', cascade='delete') _metadata = orm.relationship( AggregateMetadata, primaryjoin='Aggregate.id == AggregateMetadata.aggregate_id', cascade='delete') @property def _extra_keys(self): return ['hosts', 'metadetails', 'availability_zone'] @property def hosts(self): return [h.host for h in self._hosts] @property def metadetails(self): return {m.key: m.value for m in self._metadata} @property def availability_zone(self): if 'availability_zone' not in self.metadetails: return None return self.metadetails['availability_zone'] class CellMapping(BASE): """Contains information on communicating with a cell""" __tablename__ = 'cell_mappings' __table_args__ = ( sa.Index('uuid_idx', 'uuid'), schema.UniqueConstraint('uuid', name='uniq_cell_mappings0uuid'), ) id = sa.Column(sa.Integer, primary_key=True) uuid = sa.Column(sa.String(36), nullable=False) name = sa.Column(sa.String(255)) transport_url = sa.Column(sa.Text()) database_connection = sa.Column(sa.Text()) disabled = sa.Column(sa.Boolean, default=False) host_mapping = orm.relationship( 'HostMapping', back_populates='cell_mapping', ) instance_mapping = orm.relationship( 'InstanceMapping', back_populates='cell_mapping', ) class InstanceMapping(BASE): """Contains the mapping of an instance to which cell it is in""" __tablename__ = 'instance_mappings' __table_args__ = ( sa.Index('project_id_idx', 'project_id'), sa.Index('instance_uuid_idx', 'instance_uuid'), schema.UniqueConstraint( 'instance_uuid', name='uniq_instance_mappings0instance_uuid'), sa.Index( 'instance_mappings_user_id_project_id_idx', 'user_id', 'project_id', ), ) id = sa.Column(sa.Integer, primary_key=True) instance_uuid = sa.Column(sa.String(36), nullable=False) cell_id = sa.Column( sa.Integer, sa.ForeignKey('cell_mappings.id'), nullable=True) project_id = sa.Column(sa.String(255), nullable=False) # FIXME(melwitt): This should eventually be non-nullable, but we need a # transition period first. user_id = sa.Column(sa.String(255), nullable=True) queued_for_delete = sa.Column(sa.Boolean) cell_mapping = orm.relationship( 'CellMapping', back_populates='instance_mapping', ) class HostMapping(BASE): """Contains mapping of a compute host to which cell it is in""" __tablename__ = "host_mappings" __table_args__ = ( sa.Index('host_idx', 'host'), schema.UniqueConstraint('host', name='uniq_host_mappings0host'), ) id = sa.Column(sa.Integer, primary_key=True) cell_id = sa.Column( sa.Integer, sa.ForeignKey('cell_mappings.id'), nullable=False) host = sa.Column(sa.String(255), nullable=False) cell_mapping = orm.relationship( 'CellMapping', back_populates='host_mapping', ) class RequestSpec(BASE): """Represents the information passed to the scheduler.""" __tablename__ = 'request_specs' __table_args__ = ( sa.Index('request_spec_instance_uuid_idx', 'instance_uuid'), schema.UniqueConstraint( 'instance_uuid', name='uniq_request_specs0instance_uuid'), ) id = sa.Column(sa.Integer, primary_key=True) instance_uuid = sa.Column(sa.String(36), nullable=False) spec = sa.Column(types.MediumText(), nullable=False) class Flavors(BASE): """Represents possible flavors for instances""" __tablename__ = 'flavors' __table_args__ = ( schema.UniqueConstraint("flavorid", name="uniq_flavors0flavorid"), schema.UniqueConstraint("name", name="uniq_flavors0name")) id = sa.Column(sa.Integer, primary_key=True) name = sa.Column(sa.String(255), nullable=False) memory_mb = sa.Column(sa.Integer, nullable=False) vcpus = sa.Column(sa.Integer, nullable=False) root_gb = sa.Column(sa.Integer) ephemeral_gb = sa.Column(sa.Integer) flavorid = sa.Column(sa.String(255), nullable=False) swap = sa.Column(sa.Integer, nullable=False, default=0) rxtx_factor = sa.Column(sa.Float, default=1) vcpu_weight = sa.Column(sa.Integer) disabled = sa.Column(sa.Boolean, default=False) is_public = sa.Column(sa.Boolean, default=True) description = sa.Column(sa.Text) extra_specs = orm.relationship('FlavorExtraSpecs', back_populates='flavor') projects = orm.relationship('FlavorProjects', back_populates='flavor') class FlavorExtraSpecs(BASE): """Represents additional specs as key/value pairs for a flavor""" __tablename__ = 'flavor_extra_specs' __table_args__ = ( sa.Index('flavor_extra_specs_flavor_id_key_idx', 'flavor_id', 'key'), schema.UniqueConstraint('flavor_id', 'key', name='uniq_flavor_extra_specs0flavor_id0key'), {'mysql_collate': 'utf8_bin'}, ) id = sa.Column(sa.Integer, primary_key=True) key = sa.Column(sa.String(255), nullable=False) value = sa.Column(sa.String(255)) flavor_id = sa.Column( sa.Integer, sa.ForeignKey('flavors.id'), nullable=False) flavor = orm.relationship(Flavors, back_populates='extra_specs') class FlavorProjects(BASE): """Represents projects associated with flavors""" __tablename__ = 'flavor_projects' __table_args__ = (schema.UniqueConstraint('flavor_id', 'project_id', name='uniq_flavor_projects0flavor_id0project_id'),) id = sa.Column(sa.Integer, primary_key=True) flavor_id = sa.Column( sa.Integer, sa.ForeignKey('flavors.id'), nullable=False) project_id = sa.Column(sa.String(255), nullable=False) flavor = orm.relationship(Flavors, back_populates='projects') class BuildRequest(BASE): """Represents the information passed to the scheduler.""" __tablename__ = 'build_requests' __table_args__ = ( sa.Index('build_requests_instance_uuid_idx', 'instance_uuid'), sa.Index('build_requests_project_id_idx', 'project_id'), schema.UniqueConstraint( 'instance_uuid', name='uniq_build_requests0instance_uuid'), ) id = sa.Column(sa.Integer, primary_key=True) # TODO(mriedem): instance_uuid should be nullable=False instance_uuid = sa.Column(sa.String(36)) project_id = sa.Column(sa.String(255), nullable=False) instance = sa.Column(types.MediumText()) block_device_mappings = sa.Column(types.MediumText()) tags = sa.Column(sa.Text()) class KeyPair(BASE): """Represents a public key pair for ssh / WinRM.""" __tablename__ = 'key_pairs' __table_args__ = ( schema.UniqueConstraint( "user_id", "name", name="uniq_key_pairs0user_id0name"), ) id = sa.Column(sa.Integer, primary_key=True, nullable=False) name = sa.Column(sa.String(255), nullable=False) user_id = sa.Column(sa.String(255), nullable=False) fingerprint = sa.Column(sa.String(255)) public_key = sa.Column(sa.Text()) type = sa.Column( sa.Enum('ssh', 'x509', name='keypair_types'), nullable=False, server_default='ssh') class InstanceGroupMember(BASE): """Represents the members for an instance group.""" __tablename__ = 'instance_group_member' __table_args__ = ( sa.Index('instance_group_member_instance_idx', 'instance_uuid'), ) id = sa.Column(sa.Integer, primary_key=True, nullable=False) instance_uuid = sa.Column(sa.String(255)) group_id = sa.Column( sa.Integer, sa.ForeignKey('instance_groups.id'), nullable=False) class InstanceGroupPolicy(BASE): """Represents the policy type for an instance group.""" __tablename__ = 'instance_group_policy' __table_args__ = ( sa.Index('instance_group_policy_policy_idx', 'policy'), ) id = sa.Column(sa.Integer, primary_key=True, nullable=False) policy = sa.Column(sa.String(255)) group_id = sa.Column( sa.Integer, sa.ForeignKey('instance_groups.id'), nullable=False) rules = sa.Column(sa.Text) class InstanceGroup(BASE): """Represents an instance group. A group will maintain a collection of instances and the relationship between them. """ __tablename__ = 'instance_groups' __table_args__ = ( schema.UniqueConstraint('uuid', name='uniq_instance_groups0uuid'), ) id = sa.Column(sa.Integer, primary_key=True, autoincrement=True) user_id = sa.Column(sa.String(255)) project_id = sa.Column(sa.String(255)) uuid = sa.Column(sa.String(36), nullable=False) name = sa.Column(sa.String(255)) _policies = orm.relationship(InstanceGroupPolicy) _members = orm.relationship(InstanceGroupMember) @property def policy(self): if len(self._policies) > 1: msg = ("More than one policy (%(policies)s) is associated with " "group %(group_name)s, only the first one in the list " "would be returned.") LOG.warning(msg, {"policies": [p.policy for p in self._policies], "group_name": self.name}) return self._policies[0] if self._policies else None @property def members(self): return [m.instance_uuid for m in self._members] class Quota(BASE): """Represents a single quota override for a project. If there is no row for a given project id and resource, then the default for the quota class is used. If there is no row for a given quota class and resource, then the default for the deployment is used. If the row is present but the hard limit is Null, then the resource is unlimited. """ __tablename__ = 'quotas' __table_args__ = ( schema.UniqueConstraint( "project_id", "resource", name="uniq_quotas0project_id0resource" ), ) id = sa.Column(sa.Integer, primary_key=True) project_id = sa.Column(sa.String(255)) resource = sa.Column(sa.String(255), nullable=False) hard_limit = sa.Column(sa.Integer) class ProjectUserQuota(BASE): """Represents a single quota override for a user with in a project.""" __tablename__ = 'project_user_quotas' __table_args__ = ( schema.UniqueConstraint( "user_id", "project_id", "resource", name="uniq_project_user_quotas0user_id0project_id0resource", ), sa.Index( 'project_user_quotas_project_id_idx', 'project_id'), sa.Index( 'project_user_quotas_user_id_idx', 'user_id',) ) id = sa.Column(sa.Integer, primary_key=True, nullable=False) project_id = sa.Column(sa.String(255), nullable=False) user_id = sa.Column(sa.String(255), nullable=False) resource = sa.Column(sa.String(255), nullable=False) hard_limit = sa.Column(sa.Integer) class QuotaClass(BASE): """Represents a single quota override for a quota class. If there is no row for a given quota class and resource, then the default for the deployment is used. If the row is present but the hard limit is Null, then the resource is unlimited. """ __tablename__ = 'quota_classes' __table_args__ = ( sa.Index('quota_classes_class_name_idx', 'class_name'), ) id = sa.Column(sa.Integer, primary_key=True) class_name = sa.Column(sa.String(255)) resource = sa.Column(sa.String(255)) hard_limit = sa.Column(sa.Integer) class QuotaUsage(BASE): """Represents the current usage for a given resource.""" __tablename__ = 'quota_usages' __table_args__ = ( sa.Index('quota_usages_project_id_idx', 'project_id'), sa.Index('quota_usages_user_id_idx', 'user_id'), ) id = sa.Column(sa.Integer, primary_key=True) project_id = sa.Column(sa.String(255)) user_id = sa.Column(sa.String(255)) resource = sa.Column(sa.String(255), nullable=False) in_use = sa.Column(sa.Integer, nullable=False) reserved = sa.Column(sa.Integer, nullable=False) @property def total(self): return self.in_use + self.reserved until_refresh = sa.Column(sa.Integer) class Reservation(BASE): """Represents a resource reservation for quotas.""" __tablename__ = 'reservations' __table_args__ = ( sa.Index('reservations_project_id_idx', 'project_id'), sa.Index('reservations_uuid_idx', 'uuid'), sa.Index('reservations_expire_idx', 'expire'), sa.Index('reservations_user_id_idx', 'user_id'), ) id = sa.Column(sa.Integer, primary_key=True, nullable=False) uuid = sa.Column(sa.String(36), nullable=False) usage_id = sa.Column( sa.Integer, sa.ForeignKey('quota_usages.id'), nullable=False) project_id = sa.Column(sa.String(255)) user_id = sa.Column(sa.String(255)) resource = sa.Column(sa.String(255)) delta = sa.Column(sa.Integer, nullable=False) expire = sa.Column(sa.DateTime) usage = orm.relationship('QuotaUsage') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/db/constants.py0000664000175000017500000000237100000000000016503 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Useful db-related constants. In their own file so they can be imported cleanly.""" # The maximum value a signed INT type may have MAX_INT = 0x7FFFFFFF # NOTE(dosaboy): This is supposed to represent the maximum value that we can # place into a SQL single precision float so that we can check whether values # are oversize. Postgres and MySQL both define this as their max whereas Sqlite # uses dynamic typing so this would not apply. Different dbs react in different # ways to oversize values e.g. postgres will raise an exception while mysql # will round off the value. Nevertheless we may still want to know prior to # insert whether the value is oversize or not. SQL_SP_FLOAT_MAX = 3.40282e+38 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.3656087 nova-32.0.0/nova/db/main/0000775000175000017500000000000000000000000015036 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/db/main/__init__.py0000664000175000017500000000000000000000000017135 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/db/main/alembic.ini0000664000175000017500000000437400000000000017143 0ustar00zuulzuul00000000000000# A generic, single database configuration. [alembic] # path to migration scripts script_location = %(here)s/migrations # template used to generate migration files # file_template = %%(rev)s_%%(slug)s # sys.path path, will be prepended to sys.path if present. # defaults to the current working directory. prepend_sys_path = . # indicates what character is used to split lists of file paths, including # version_locations and prepend_sys_path path_separator = os # timezone to use when rendering the date # within the migration file as well as the filename. # string value is passed to dateutil.tz.gettz() # leave blank for localtime # timezone = # max length of characters to apply to the # "slug" field # truncate_slug_length = 40 # set to 'true' to run the environment during # the 'revision' command, regardless of autogenerate # revision_environment = false # set to 'true' to allow .pyc and .pyo files without # a source .py file to be detected as revisions in the # versions/ directory # sourceless = false # version location specification; this defaults # to nova/db/main/migrations/versions. When using multiple version # directories, initial revisions must be specified with --version-path # version_locations = %(here)s/bar %(here)s/bat nova/db/main/migrations/versions # the output encoding used when revision files # are written from script.py.mako # output_encoding = utf-8 sqlalchemy.url = sqlite:///nova.db [post_write_hooks] # post_write_hooks defines scripts or Python functions that are run # on newly generated revision scripts. See the documentation for further # detail and examples # format using "black" - use the console_scripts runner, against the "black" entrypoint # hooks=black # black.type=console_scripts # black.entrypoint=black # black.options=-l 79 # Logging configuration [loggers] keys = root,sqlalchemy,alembic [handlers] keys = console [formatters] keys = generic [logger_root] level = WARN handlers = console qualname = [logger_sqlalchemy] level = WARN handlers = qualname = sqlalchemy.engine [logger_alembic] level = INFO handlers = qualname = alembic [handler_console] class = StreamHandler args = (sys.stderr,) level = NOTSET formatter = generic [formatter_generic] format = %(levelname)-5.5s [%(name)s] %(message)s datefmt = %H:%M:%S ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/db/main/api.py0000664000175000017500000054532000000000000016172 0ustar00zuulzuul00000000000000# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of SQLAlchemy backend.""" import collections import copy import datetime import functools import inspect import traceback from oslo_db import api as oslo_db_api from oslo_db import exception as db_exc from oslo_db.sqlalchemy import enginefacade from oslo_db.sqlalchemy import update_match from oslo_db.sqlalchemy import utils as sqlalchemyutils from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import importutils from oslo_utils import timeutils from oslo_utils import uuidutils import sqlalchemy as sa from sqlalchemy import exc as sqla_exc from sqlalchemy import orm from sqlalchemy import schema from sqlalchemy import sql from sqlalchemy.sql import expression from sqlalchemy.sql import func from nova import block_device from nova.compute import task_states from nova.compute import vm_states import nova.conf import nova.context from nova.db.main import models from nova.db import utils as db_utils from nova.db.utils import require_context from nova import exception from nova.i18n import _ from nova import safe_utils profiler_sqlalchemy = importutils.try_import('osprofiler.sqlalchemy') CONF = nova.conf.CONF LOG = logging.getLogger(__name__) DISABLE_DB_ACCESS = False context_manager = enginefacade.transaction_context() def _get_db_conf(conf_group, connection=None): kw = dict(conf_group.items()) if connection is not None: kw['connection'] = connection return kw def _context_manager_from_context(context): if context: try: return context.db_connection except AttributeError: pass def _joinedload_all(lead_entity, column): """Do a nested load. For example, resolve the following:: _joinedload_all(models.SecurityGroup, 'instances.info_cache') to: orm.joinedload( models.SecurityGroup.instances ).joinedload( Instance.info_cache ) """ elements = column.split('.') relationship_attr = getattr(lead_entity, elements.pop(0)) joined = orm.joinedload(relationship_attr) for element in elements: relationship_entity = relationship_attr.entity.class_ relationship_attr = getattr(relationship_entity, element) joined = joined.joinedload(relationship_attr) return joined def configure(conf): context_manager.configure(**_get_db_conf(conf.database)) if ( profiler_sqlalchemy and CONF.profiler.enabled and CONF.profiler.trace_sqlalchemy ): context_manager.append_on_engine_create( lambda eng: profiler_sqlalchemy.add_tracing(sa, eng, "db")) def create_context_manager(connection=None): """Create a database context manager object for a cell database connection. :param connection: The database connection string """ ctxt_mgr = enginefacade.transaction_context() ctxt_mgr.configure(**_get_db_conf(CONF.database, connection=connection)) return ctxt_mgr def get_context_manager(context): """Get a database context manager object. :param context: The request context that can contain a context manager """ return _context_manager_from_context(context) or context_manager def get_engine(use_slave=False, context=None): """Get a database engine object. :param use_slave: Whether to use the slave connection :param context: The request context that can contain a context manager """ ctxt_mgr = get_context_manager(context) if use_slave: return ctxt_mgr.reader.get_engine() return ctxt_mgr.writer.get_engine() _SHADOW_TABLE_PREFIX = 'shadow_' _DEFAULT_QUOTA_NAME = 'default' PER_PROJECT_QUOTAS = ['fixed_ips', 'floating_ips', 'networks'] def select_db_reader_mode(f): """Decorator to select synchronous or asynchronous reader mode. The kwarg argument 'use_slave' defines reader mode. Asynchronous reader will be used if 'use_slave' is True and synchronous reader otherwise. If 'use_slave' is not specified default value 'False' will be used. Wrapped function must have a context in the arguments. """ @functools.wraps(f) def wrapper(*args, **kwargs): wrapped_func = safe_utils.get_wrapped_function(f) keyed_args = inspect.getcallargs(wrapped_func, *args, **kwargs) context = keyed_args['context'] use_slave = keyed_args.get('use_slave', False) if use_slave: reader_mode = get_context_manager(context).async_ else: reader_mode = get_context_manager(context).reader with reader_mode.using(context): return f(*args, **kwargs) wrapper.__signature__ = inspect.signature(f) return wrapper def _check_db_access(): # disable all database access if required if DISABLE_DB_ACCESS: service_name = 'nova-compute' stacktrace = ''.join(traceback.format_stack()) LOG.error( 'No DB access allowed in %(service_name)s: %(stacktrace)s', {'service_name': service_name, 'stacktrace': stacktrace}) raise exception.DBNotAllowed(binary=service_name) def pick_context_manager_writer(f): """Decorator to use a writer db context manager. The db context manager will be picked from the RequestContext. Wrapped function must have a RequestContext in the arguments. """ @functools.wraps(f) def wrapper(context, *args, **kwargs): _check_db_access() ctxt_mgr = get_context_manager(context) with ctxt_mgr.writer.using(context): return f(context, *args, **kwargs) wrapper.__signature__ = inspect.signature(f) return wrapper def pick_context_manager_reader(f): """Decorator to use a reader db context manager. The db context manager will be picked from the RequestContext. Wrapped function must have a RequestContext in the arguments. """ @functools.wraps(f) def wrapper(context, *args, **kwargs): _check_db_access() ctxt_mgr = get_context_manager(context) with ctxt_mgr.reader.using(context): return f(context, *args, **kwargs) wrapper.__signature__ = inspect.signature(f) return wrapper def pick_context_manager_reader_allow_async(f): """Decorator to use a reader.allow_async db context manager. The db context manager will be picked from the RequestContext. Wrapped function must have a RequestContext in the arguments. """ @functools.wraps(f) def wrapper(context, *args, **kwargs): _check_db_access() ctxt_mgr = get_context_manager(context) with ctxt_mgr.reader.allow_async.using(context): return f(context, *args, **kwargs) wrapper.__signature__ = inspect.signature(f) return wrapper def model_query( context, model, args=None, read_deleted=None, project_only=False, ): """Query helper that accounts for context's `read_deleted` field. :param context: The request context that can contain a context manager :param model: Model to query. Must be a subclass of ModelBase. :param args: Arguments to query. If None - model is used. :param read_deleted: If not None, overrides context's read_deleted field. Permitted values are 'no', which does not return deleted values; 'only', which only returns deleted values; and 'yes', which does not filter deleted values. :param project_only: If set and context is user-type, then restrict query to match the context's project_id. If set to 'allow_none', restriction includes project_id = None. """ if read_deleted is None: read_deleted = context.read_deleted query_kwargs = {} if 'no' == read_deleted: query_kwargs['deleted'] = False elif 'only' == read_deleted: query_kwargs['deleted'] = True elif 'yes' == read_deleted: pass else: raise ValueError(_("Unrecognized read_deleted value '%s'") % read_deleted) query = sqlalchemyutils.model_query( model, context.session, args, **query_kwargs) # We can't use oslo.db model_query's project_id here, as it doesn't allow # us to return both our projects and unowned projects. if nova.context.is_user_context(context) and project_only: if project_only == 'allow_none': query = query.filter(sql.or_( model.project_id == context.project_id, model.project_id == sql.null() )) else: query = query.filter_by(project_id=context.project_id) return query def convert_objects_related_datetimes(values, *datetime_keys): if not datetime_keys: datetime_keys = ('created_at', 'deleted_at', 'updated_at') for key in datetime_keys: if key in values and values[key]: if isinstance(values[key], str): try: values[key] = timeutils.parse_strtime(values[key]) except ValueError: # Try alternate parsing since parse_strtime will fail # with say converting '2015-05-28T19:59:38+00:00' values[key] = timeutils.parse_isotime(values[key]) # NOTE(danms): Strip UTC timezones from datetimes, since they're # stored that way in the database values[key] = values[key].replace(tzinfo=None) return values ################### def constraint(**conditions): """Return a constraint object suitable for use with some updates.""" return Constraint(conditions) def equal_any(*values): """Return an equality condition object suitable for use in a constraint. Equal_any conditions require that a model object's attribute equal any one of the given values. """ return EqualityCondition(values) def not_equal(*values): """Return an inequality condition object suitable for use in a constraint. Not_equal conditions require that a model object's attribute differs from all of the given values. """ return InequalityCondition(values) class Constraint(object): def __init__(self, conditions): self.conditions = conditions def apply(self, model, query): for key, condition in self.conditions.items(): for clause in condition.clauses(getattr(model, key)): query = query.filter(clause) return query class EqualityCondition(object): def __init__(self, values): self.values = values def clauses(self, field): # method signature requires us to return an iterable even if for OR # operator this will actually be a single clause return [sql.or_(*[field == value for value in self.values])] class InequalityCondition(object): def __init__(self, values): self.values = values def clauses(self, field): return [field != value for value in self.values] ################### @pick_context_manager_writer def service_destroy(context, service_id): """Destroy the service or raise if it does not exist.""" service = service_get(context, service_id) model_query(context, models.Service).\ filter_by(id=service_id).\ soft_delete(synchronize_session=False) if service.binary == 'nova-compute': # TODO(sbauza): Remove the service_id filter in a later release # once we are sure that all compute nodes report the host field model_query(context, models.ComputeNode).\ filter(sql.or_( models.ComputeNode.service_id == service_id, models.ComputeNode.host == service['host'])).\ soft_delete(synchronize_session=False) @pick_context_manager_reader def service_get(context, service_id): """Get a service or raise if it does not exist.""" query = model_query(context, models.Service).filter_by(id=service_id) result = query.first() if not result: raise exception.ServiceNotFound(service_id=service_id) return result @pick_context_manager_reader def service_get_by_uuid(context, service_uuid): """Get a service by it's uuid or raise ServiceNotFound if it does not exist. """ query = model_query(context, models.Service).filter_by(uuid=service_uuid) result = query.first() if not result: raise exception.ServiceNotFound(service_id=service_uuid) return result @pick_context_manager_reader_allow_async def service_get_minimum_version(context, binaries): """Get the minimum service version in the database.""" min_versions = context.session.query( models.Service.binary, func.min(models.Service.version)).\ filter(models.Service.binary.in_(binaries)).\ filter(models.Service.deleted == 0).\ filter(models.Service.forced_down == sql.false()).\ group_by(models.Service.binary) return dict(min_versions) @pick_context_manager_reader def service_get_all(context, disabled=None): """Get all services.""" query = model_query(context, models.Service) if disabled is not None: query = query.filter_by(disabled=disabled) return query.all() @pick_context_manager_reader def service_get_all_by_topic(context, topic): """Get all services for a given topic.""" return model_query(context, models.Service, read_deleted="no").\ filter_by(disabled=False).\ filter_by(topic=topic).\ all() @pick_context_manager_reader def service_get_by_host_and_topic(context, host, topic): """Get a service by hostname and topic it listens to.""" return model_query(context, models.Service, read_deleted="no").\ filter_by(disabled=False).\ filter_by(host=host).\ filter_by(topic=topic).\ first() @pick_context_manager_reader def service_get_all_by_binary(context, binary, include_disabled=False): """Get services for a given binary. Includes disabled services if 'include_disabled' parameter is True """ query = model_query(context, models.Service).filter_by(binary=binary) if not include_disabled: query = query.filter_by(disabled=False) return query.all() @pick_context_manager_reader def service_get_all_computes_by_hv_type(context, hv_type, include_disabled=False): """Get all compute services for a given hypervisor type. Includes disabled services if 'include_disabled' parameter is True. """ query = model_query(context, models.Service, read_deleted="no").\ filter_by(binary='nova-compute') if not include_disabled: query = query.filter_by(disabled=False) query = query.join(models.ComputeNode, models.Service.host == models.ComputeNode.host).\ filter(models.ComputeNode.hypervisor_type == hv_type).\ distinct() return query.all() @pick_context_manager_reader def service_get_by_host_and_binary(context, host, binary): """Get a service by hostname and binary.""" result = model_query(context, models.Service, read_deleted="no").\ filter_by(host=host).\ filter_by(binary=binary).\ first() if not result: raise exception.HostBinaryNotFound(host=host, binary=binary) return result @pick_context_manager_reader def service_get_all_by_host(context, host): """Get all services for a given host.""" return model_query(context, models.Service, read_deleted="no").\ filter_by(host=host).\ all() @pick_context_manager_reader_allow_async def service_get_by_compute_host(context, host): """Get the service entry for a given compute host. Returns the service entry joined with the compute_node entry. """ result = model_query(context, models.Service, read_deleted="no").\ filter_by(host=host).\ filter_by(binary='nova-compute').\ first() if not result: raise exception.ComputeHostNotFound(host=host) return result @pick_context_manager_writer def service_create(context, values): """Create a service from the values dictionary.""" service_ref = models.Service() service_ref.update(values) # We only auto-disable nova-compute services since those are the only # ones that can be enabled using the os-services REST API and they are # the only ones where being disabled means anything. It does # not make sense to be able to disable non-compute services like # nova-scheduler or nova-osapi_compute since that does nothing. if not CONF.enable_new_services and values.get('binary') == 'nova-compute': msg = _("New compute service disabled due to config option.") service_ref.disabled = True service_ref.disabled_reason = msg try: service_ref.save(context.session) except db_exc.DBDuplicateEntry as e: if 'binary' in e.columns: raise exception.ServiceBinaryExists(host=values.get('host'), binary=values.get('binary')) raise exception.ServiceTopicExists(host=values.get('host'), topic=values.get('topic')) return service_ref @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @pick_context_manager_writer def service_update(context, service_id, values): """Set the given properties on a service and update it. :raises: NotFound if service does not exist. """ service_ref = service_get(context, service_id) # Only servicegroup.drivers.db.DbDriver._report_state() updates # 'report_count', so if that value changes then store the timestamp # as the last time we got a state report. if 'report_count' in values: if values['report_count'] > service_ref.report_count: service_ref.last_seen_up = timeutils.utcnow() service_ref.update(values) return service_ref ################### def _compute_node_select(context, filters=None, limit=None, marker=None): if filters is None: filters = {} cn_tbl = models.ComputeNode.__table__.alias('cn') select = sa.select(cn_tbl) if context.read_deleted == "no": select = select.where(cn_tbl.c.deleted == 0) if "compute_id" in filters: select = select.where(cn_tbl.c.id == filters["compute_id"]) if "service_id" in filters: select = select.where(cn_tbl.c.service_id == filters["service_id"]) if "host" in filters: select = select.where(cn_tbl.c.host == filters["host"]) if "hypervisor_hostname" in filters: hyp_hostname = filters["hypervisor_hostname"] select = select.where(cn_tbl.c.hypervisor_hostname == hyp_hostname) if "mapped" in filters: select = select.where(cn_tbl.c.mapped < filters['mapped']) if marker is not None: try: compute_node_get(context, marker) except exception.ComputeHostNotFound: raise exception.MarkerNotFound(marker=marker) select = select.where(cn_tbl.c.id > marker) if limit is not None: select = select.limit(limit) # Explicitly order by id, so we're not dependent on the native sort # order of the underlying DB. select = select.order_by(expression.asc("id")) return select def _compute_node_fetchall(context, filters=None, limit=None, marker=None): select = _compute_node_select(context, filters, limit=limit, marker=marker) engine = get_engine(context=context) with engine.connect() as conn, conn.begin(): results = conn.execute(select).fetchall() # Callers expect dict-like objects, not SQLAlchemy RowProxy objects... results = [dict(r._mapping) for r in results] conn.close() return results @pick_context_manager_reader def compute_node_get(context, compute_id): """Get a compute node by its id. :param context: The security context :param compute_id: ID of the compute node :returns: Dictionary-like object containing properties of the compute node :raises: ComputeHostNotFound if compute node with the given ID doesn't exist. """ results = _compute_node_fetchall(context, {"compute_id": compute_id}) if not results: raise exception.ComputeHostNotFound(host=compute_id) return results[0] # TODO(edleafe): remove once the compute node resource provider migration is # complete, and this distinction is no longer necessary. @pick_context_manager_reader def compute_node_get_model(context, compute_id): """Get a compute node sqlalchemy model object by its id. :param context: The security context :param compute_id: ID of the compute node :returns: Sqlalchemy model object containing properties of the compute node :raises: ComputeHostNotFound if compute node with the given ID doesn't exist. """ result = model_query(context, models.ComputeNode).\ filter_by(id=compute_id).\ first() if not result: raise exception.ComputeHostNotFound(host=compute_id) return result @pick_context_manager_reader def compute_nodes_get_by_service_id(context, service_id): """Get a list of compute nodes by their associated service id. :param context: The security context :param service_id: ID of the associated service :returns: List of dictionary-like objects, each containing properties of the compute node, including its corresponding service and statistics :raises: ServiceNotFound if service with the given ID doesn't exist. """ results = _compute_node_fetchall(context, {"service_id": service_id}) if not results: raise exception.ServiceNotFound(service_id=service_id) return results @pick_context_manager_reader def compute_node_get_by_host_and_nodename(context, host, nodename): """Get a compute node by its associated host and nodename. :param context: The security context (admin) :param host: Name of the host :param nodename: Name of the node :returns: Dictionary-like object containing properties of the compute node, including its statistics :raises: ComputeHostNotFound if host with the given name doesn't exist. """ results = _compute_node_fetchall(context, {"host": host, "hypervisor_hostname": nodename}) if not results: raise exception.ComputeHostNotFound(host=host) return results[0] @pick_context_manager_reader def compute_node_get_by_nodename(context, hypervisor_hostname): """Get a compute node by hypervisor_hostname. :param context: The security context (admin) :param hypervisor_hostname: Name of the node :returns: Dictionary-like object containing properties of the compute node, including its statistics :raises: ComputeHostNotFound if hypervisor_hostname with the given name doesn't exist. """ results = _compute_node_fetchall(context, {"hypervisor_hostname": hypervisor_hostname}) if not results: raise exception.ComputeHostNotFound(host=hypervisor_hostname) return results[0] @pick_context_manager_reader def compute_node_get_all(context): """Get all compute nodes. :param context: The security context :returns: List of dictionaries each containing compute node properties """ return _compute_node_fetchall(context) @pick_context_manager_reader_allow_async def compute_node_get_all_by_host(context, host): """Get all compute nodes by host name. :param context: The security context (admin) :param host: Name of the host :returns: List of dictionaries each containing compute node properties """ results = _compute_node_fetchall(context, {"host": host}) if not results: raise exception.ComputeHostNotFound(host=host) return results @pick_context_manager_reader def compute_node_get_all_mapped_less_than(context, mapped_less_than): """Get all compute nodes with specific mapped values. :param context: The security context :param mapped_less_than: Get compute nodes with mapped less than this value :returns: List of dictionaries each containing compute node properties """ return _compute_node_fetchall(context, {'mapped': mapped_less_than}) @pick_context_manager_reader def compute_node_get_all_by_pagination(context, limit=None, marker=None): """Get all compute nodes by pagination. :param context: The security context :param limit: Maximum number of items to return :param marker: The last item of the previous page, the next results after this value will be returned :returns: List of dictionaries each containing compute node properties """ return _compute_node_fetchall(context, limit=limit, marker=marker) @pick_context_manager_reader def compute_node_search_by_hypervisor(context, hypervisor_match): """Get all compute nodes by hypervisor hostname. :param context: The security context :param hypervisor_match: The hypervisor hostname :returns: List of dictionary-like objects each containing compute node properties """ field = models.ComputeNode.hypervisor_hostname return model_query(context, models.ComputeNode).\ filter(field.like('%%%s%%' % hypervisor_match)).\ all() @pick_context_manager_writer def _compute_node_create(context, values): """Create a compute node from the values dictionary. :param context: The security context :param values: Dictionary containing compute node properties :returns: Dictionary-like object containing the properties of the created node, including its corresponding service and statistics """ convert_objects_related_datetimes(values) compute_node_ref = models.ComputeNode() compute_node_ref.update(values) compute_node_ref.save(context.session) return compute_node_ref # NOTE(mgoddard): We avoid decorating this with @pick_context_manager_writer, # so that we get a separate transaction in the exception handler. This avoids # an error message about inactive DB sessions during a transaction rollback. # See https://bugs.launchpad.net/nova/+bug/1853159. def compute_node_create(context, values): """Creates a new ComputeNode and populates the capacity fields with the most recent data. Will restore a soft deleted compute node if a UUID has been explicitly requested. """ try: compute_node_ref = _compute_node_create(context, values) except db_exc.DBDuplicateEntry: with excutils.save_and_reraise_exception(logger=LOG) as err_ctx: # Check to see if we have a (soft) deleted ComputeNode with the # same UUID and if so just update it and mark as no longer (soft) # deleted. See bug 1839560 for details. if 'uuid' in values: # Get a fresh context for a new DB session and allow it to # get a deleted record. ctxt = nova.context.get_admin_context(read_deleted='yes') compute_node_ref = _compute_node_get_and_update_deleted( ctxt, values) # If we didn't get anything back we failed to find the node # by uuid and update it so re-raise the DBDuplicateEntry. if compute_node_ref: err_ctx.reraise = False return compute_node_ref @pick_context_manager_writer def _compute_node_get_and_update_deleted(context, values): """Find a compute node by uuid, update and un-delete it. This is a special case from the ``compute_node_create`` method which needs to be separate to get a new Session. This method will update the ComputeNode, if found, to have deleted=0 and deleted_at=None values. :param context: request auth context which should be able to read deleted records :param values: values used to update the ComputeNode record - must include uuid :return: updated ComputeNode sqlalchemy model object if successfully found and updated, None otherwise """ cn = model_query( context, models.ComputeNode).filter_by(uuid=values['uuid']).first() if cn: # Update with the provided values but un-soft-delete. update_values = copy.deepcopy(values) update_values['deleted'] = 0 update_values['deleted_at'] = None return compute_node_update(context, cn.id, update_values) @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @pick_context_manager_writer def compute_node_update(context, compute_id, values): """Set the given properties on a compute node and update it. :param context: The security context :param compute_id: ID of the compute node :param values: Dictionary containing compute node properties to be updated :returns: Dictionary-like object containing the properties of the updated compute node, including its corresponding service and statistics :raises: ComputeHostNotFound if compute node with the given ID doesn't exist. """ compute_ref = compute_node_get_model(context, compute_id) # Always update this, even if there's going to be no other # changes in data. This ensures that we invalidate the # scheduler cache of compute node data in case of races. values['updated_at'] = timeutils.utcnow() convert_objects_related_datetimes(values) compute_ref.update(values) return compute_ref @pick_context_manager_writer def compute_node_delete(context, compute_id, constraint=None): """Delete a compute node from the database. :param context: The security context :param compute_id: ID of the compute node :param constraint: a constraint object :raises: ComputeHostNotFound if compute node with the given ID doesn't exist. :raises: ConstraintNotMet if a constraint was specified and it was not met. """ query = model_query(context, models.ComputeNode).filter_by(id=compute_id) if constraint is not None: query = constraint.apply(models.ComputeNode, query) result = query.soft_delete(synchronize_session=False) if not result: # The soft_delete could fail for one of two reasons: # 1) The compute node no longer exists # 2) The constraint, if specified, was not met # Try to read the compute node and let it raise ComputeHostNotFound if # 1) happened. compute_node_get(context, compute_id) # Else, raise ConstraintNotMet if 2) happened. raise exception.ConstraintNotMet() @pick_context_manager_reader def compute_node_statistics(context): """Get aggregate statistics over all compute nodes. :param context: The security context :returns: Dictionary containing compute node characteristics summed up over all the compute nodes, e.g. 'vcpus', 'free_ram_mb' etc. """ engine = get_engine(context=context) services_tbl = models.Service.__table__ inner_sel = _compute_node_select(context).alias('inner_sel') # TODO(sbauza): Remove the service_id filter in a later release # once we are sure that all compute nodes report the host field j = sa.join( inner_sel, services_tbl, sql.and_( sql.or_( inner_sel.c.host == services_tbl.c.host, inner_sel.c.service_id == services_tbl.c.id ), services_tbl.c.disabled == sql.false(), services_tbl.c.binary == 'nova-compute', services_tbl.c.deleted == 0 ) ) # NOTE(jaypipes): This COALESCE() stuff is temporary while the data # migration to the new resource providers inventories and allocations # tables is completed. agg_cols = [ func.count().label('count'), sql.func.sum( inner_sel.c.current_workload ).label('current_workload'), sql.func.sum( inner_sel.c.disk_available_least ).label('disk_available_least'), sql.func.sum( inner_sel.c.free_disk_gb ).label('free_disk_gb'), sql.func.sum( inner_sel.c.free_ram_mb ).label('free_ram_mb'), sql.func.sum( inner_sel.c.local_gb ).label('local_gb'), sql.func.sum( inner_sel.c.local_gb_used ).label('local_gb_used'), sql.func.sum( inner_sel.c.memory_mb ).label('memory_mb'), sql.func.sum( inner_sel.c.memory_mb_used ).label('memory_mb_used'), sql.func.sum( inner_sel.c.running_vms ).label('running_vms'), sql.func.sum( inner_sel.c.vcpus ).label('vcpus'), sql.func.sum( inner_sel.c.vcpus_used ).label('vcpus_used'), ] select = sql.select(*agg_cols).select_from(j) with engine.connect() as conn, conn.begin(): results = conn.execute(select).mappings().fetchone() # Build a dict of the info--making no assumptions about result fields = ( 'count', 'current_workload', 'disk_available_least', 'free_disk_gb', 'free_ram_mb', 'local_gb', 'local_gb_used', 'memory_mb', 'memory_mb_used', 'running_vms', 'vcpus', 'vcpus_used') return {field: int(results[field] or 0) for field in fields} ################### @pick_context_manager_writer def certificate_create(context, values): """Create a certificate from the values dictionary.""" certificate_ref = models.Certificate() for (key, value) in values.items(): certificate_ref[key] = value certificate_ref.save(context.session) return certificate_ref @pick_context_manager_reader def certificate_get_all_by_project(context, project_id): """Get all certificates for a project.""" return model_query(context, models.Certificate, read_deleted="no").\ filter_by(project_id=project_id).\ all() @pick_context_manager_reader def certificate_get_all_by_user(context, user_id): """Get all certificates for a user.""" return model_query(context, models.Certificate, read_deleted="no").\ filter_by(user_id=user_id).\ all() @pick_context_manager_reader def certificate_get_all_by_user_and_project(context, user_id, project_id): """Get all certificates for a user and project.""" return model_query(context, models.Certificate, read_deleted="no").\ filter_by(user_id=user_id).\ filter_by(project_id=project_id).\ all() ################### @require_context @pick_context_manager_writer def virtual_interface_create(context, values): """Create a new virtual interface record. :param values: Dict containing column values. """ try: vif_ref = models.VirtualInterface() vif_ref.update(values) vif_ref.save(context.session) except db_exc.DBError: LOG.exception("VIF creation failed with a database error.") raise exception.VirtualInterfaceCreateException() return vif_ref def _virtual_interface_query(context): return model_query(context, models.VirtualInterface, read_deleted="no") @require_context @pick_context_manager_writer def virtual_interface_update(context, vif_uuid, values): """Create a virtual interface record in the database.""" vif_ref = virtual_interface_get_by_uuid(context, vif_uuid) vif_ref.update(values) vif_ref.save(context.session) return vif_ref @require_context @pick_context_manager_reader def virtual_interface_get(context, vif_id): """Get a virtual interface by ID. :param vif_id: ID of the virtual interface. """ vif_ref = _virtual_interface_query(context).\ filter_by(id=vif_id).\ first() return vif_ref @require_context @pick_context_manager_reader def virtual_interface_get_by_address(context, address): """Get a virtual interface by address. :param address: The address of the interface you're looking to get. """ try: vif_ref = _virtual_interface_query(context).\ filter_by(address=address).\ first() except db_exc.DBError: msg = _("Invalid virtual interface address %s in request") % address LOG.warning(msg) raise exception.InvalidIpAddressError(msg) return vif_ref @require_context @pick_context_manager_reader def virtual_interface_get_by_uuid(context, vif_uuid): """Get a virtual interface by UUID. :param vif_uuid: The uuid of the interface you're looking to get """ vif_ref = _virtual_interface_query(context).\ filter_by(uuid=vif_uuid).\ first() return vif_ref @require_context @pick_context_manager_reader_allow_async def virtual_interface_get_by_instance(context, instance_uuid): """Gets all virtual interfaces for instance. :param instance_uuid: UUID of the instance to filter on. """ vif_refs = _virtual_interface_query(context).\ filter_by(instance_uuid=instance_uuid).\ order_by(expression.asc("created_at"), expression.asc("id")).\ all() return vif_refs @require_context @pick_context_manager_reader def virtual_interface_get_by_instance_and_network(context, instance_uuid, network_id): """Get all virtual interface for instance that's associated with network. """ vif_ref = _virtual_interface_query(context).\ filter_by(instance_uuid=instance_uuid).\ filter_by(network_id=network_id).\ first() return vif_ref @require_context @pick_context_manager_writer def virtual_interface_delete_by_instance(context, instance_uuid): """Delete virtual interface records associated with instance. :param instance_uuid: UUID of the instance to filter on. """ _virtual_interface_query(context).\ filter_by(instance_uuid=instance_uuid).\ soft_delete() @require_context @pick_context_manager_writer def virtual_interface_delete(context, id): """Delete a virtual interface records. :param id: ID of the interface. """ _virtual_interface_query(context).\ filter_by(id=id).\ soft_delete() @require_context @pick_context_manager_reader def virtual_interface_get_all(context): """Get all virtual interface records.""" vif_refs = _virtual_interface_query(context).all() return vif_refs ################### def _metadata_refs(metadata_dict, meta_class): metadata_refs = [] if metadata_dict: for k, v in metadata_dict.items(): metadata_ref = meta_class() metadata_ref['key'] = k metadata_ref['value'] = v metadata_refs.append(metadata_ref) return metadata_refs def _validate_unique_server_name(context, name): if not CONF.osapi_compute_unique_server_name_scope: return lowername = name.lower() base_query = model_query(context, models.Instance, read_deleted='no').\ filter(func.lower(models.Instance.hostname) == lowername) if CONF.osapi_compute_unique_server_name_scope == 'project': instance_with_same_name = base_query.\ filter_by(project_id=context.project_id).\ count() elif CONF.osapi_compute_unique_server_name_scope == 'global': instance_with_same_name = base_query.count() else: return if instance_with_same_name > 0: raise exception.InstanceExists(name=lowername) def _handle_objects_related_type_conversions(values): """Make sure that certain things in values (which may have come from an objects.instance.Instance object) are in suitable form for the database. """ # NOTE(danms): Make sure IP addresses are passed as strings to # the database engine for key in ('access_ip_v4', 'access_ip_v6'): if key in values and values[key] is not None: values[key] = str(values[key]) datetime_keys = ('created_at', 'deleted_at', 'updated_at', 'launched_at', 'terminated_at') convert_objects_related_datetimes(values, *datetime_keys) def _check_instance_exists_in_project(context, instance_uuid): if not model_query(context, models.Instance, read_deleted="no", project_only=True).filter_by( uuid=instance_uuid).first(): raise exception.InstanceNotFound(instance_id=instance_uuid) @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @pick_context_manager_writer def instance_create(context, values): """Create an instance from the values dictionary. :param context: Request context object :param values: Dict containing column values. """ default_group = security_group_ensure_default(context) values = values.copy() values['metadata'] = _metadata_refs( values.get('metadata'), models.InstanceMetadata) values['system_metadata'] = _metadata_refs( values.get('system_metadata'), models.InstanceSystemMetadata) _handle_objects_related_type_conversions(values) instance_ref = models.Instance() if not values.get('uuid'): values['uuid'] = uuidutils.generate_uuid() instance_ref['info_cache'] = models.InstanceInfoCache() info_cache = values.pop('info_cache', None) if info_cache is not None: instance_ref['info_cache'].update(info_cache) security_groups = values.pop('security_groups', []) instance_ref['extra'] = models.InstanceExtra() instance_ref['extra'].update( {'numa_topology': None, 'pci_requests': None, 'vcpu_model': None, 'trusted_certs': None, 'resources': None, }) instance_ref['extra'].update(values.pop('extra', {})) instance_ref.update(values) # Gather the security groups for the instance sg_models = [] if 'default' in security_groups: sg_models.append(default_group) # Generate a new list, so we don't modify the original security_groups = [x for x in security_groups if x != 'default'] if security_groups: sg_models.extend(_security_group_get_by_names( context, security_groups)) if 'hostname' in values: _validate_unique_server_name(context, values['hostname']) instance_ref.security_groups = sg_models context.session.add(instance_ref) # create the instance uuid to ec2_id mapping entry for instance ec2_instance_create(context, instance_ref['uuid']) # Parity with the return value of instance_get_all_by_filters_sort() # Obviously a newly-created instance record can't already have a fault # record because of the FK constraint, so this is fine. instance_ref.fault = None return instance_ref @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @pick_context_manager_writer def instance_destroy( context, instance_uuid, constraint=None, hard_delete=False, ): """Destroy the instance or raise if it does not exist. :param context: request context object :param instance_uuid: uuid of the instance to delete :param constraint: a constraint object :param hard_delete: when set to True, removes all records related to the instance """ if uuidutils.is_uuid_like(instance_uuid): instance_ref = _instance_get_by_uuid(context, instance_uuid) else: raise exception.InvalidUUID(uuid=instance_uuid) query = model_query(context, models.Instance).\ filter_by(uuid=instance_uuid) if constraint is not None: query = constraint.apply(models.Instance, query) # Either in hard or soft delete, we soft delete the instance first # to make sure that the constraints were met. count = query.soft_delete() if count == 0: # The failure to soft delete could be due to one of two things: # 1) A racing request has deleted the instance out from under us # 2) A constraint was not met # Try to read the instance back once more and let it raise # InstanceNotFound if 1) happened. This will give the caller an error # that more accurately reflects the reason for the failure. _instance_get_by_uuid(context, instance_uuid) # Else, raise ConstraintNotMet if 2) happened. raise exception.ConstraintNotMet() models_to_delete = [ models.SecurityGroupInstanceAssociation, models.InstanceInfoCache, models.InstanceMetadata, models.InstanceFault, models.InstanceExtra, models.InstanceSystemMetadata, models.BlockDeviceMapping, models.Migration, models.VirtualInterface ] # For most referenced models we filter by the instance_uuid column, but for # these models we filter by the uuid column. filtered_by_uuid = [models.InstanceIdMapping] for model in models_to_delete + filtered_by_uuid: key = 'instance_uuid' if model not in filtered_by_uuid else 'uuid' filter_ = {key: instance_uuid} if hard_delete: # We need to read any soft-deleted related records to make sure # and clean those up as well otherwise we can fail with ForeignKey # constraint errors when hard deleting the instance. model_query(context, model, read_deleted='yes').filter_by( **filter_).delete() else: model_query(context, model).filter_by(**filter_).soft_delete() # NOTE(snikitin): We can't use model_query here, because there is no # column 'deleted' in 'tags' or 'console_auth_tokens' tables. context.session.query(models.Tag).filter_by( resource_id=instance_uuid).delete() context.session.query(models.ConsoleAuthToken).filter_by( instance_uuid=instance_uuid).delete() # NOTE(cfriesen): We intentionally do not soft-delete entries in the # instance_actions or instance_actions_events tables because they # can be used by operators to find out what actions were performed on a # deleted instance. Both of these tables are special-cased in # _archive_deleted_rows_for_table(). if hard_delete: # NOTE(ttsiousts): In case of hard delete, we need to remove the # instance actions too since instance_uuid is a foreign key and # for this we need to delete the corresponding InstanceActionEvents actions = context.session.query(models.InstanceAction).filter_by( instance_uuid=instance_uuid).all() for action in actions: context.session.query(models.InstanceActionEvent).filter_by( action_id=action.id).delete() context.session.query(models.InstanceAction).filter_by( instance_uuid=instance_uuid).delete() # NOTE(ttsiouts): The instance is the last thing to be deleted in # order to respect all constraints context.session.query(models.Instance).filter_by( uuid=instance_uuid).delete() return instance_ref @require_context @pick_context_manager_reader_allow_async def instance_get_by_uuid(context, uuid, columns_to_join=None): """Get an instance or raise if it does not exist.""" return _instance_get_by_uuid(context, uuid, columns_to_join=columns_to_join) def _instance_get_by_uuid(context, uuid, columns_to_join=None): result = _build_instance_get( context, columns_to_join=columns_to_join ).filter_by(uuid=uuid).first() if not result: raise exception.InstanceNotFound(instance_id=uuid) return result @require_context @pick_context_manager_reader def instance_get(context, instance_id, columns_to_join=None): """Get an instance or raise if it does not exist.""" try: result = _build_instance_get(context, columns_to_join=columns_to_join ).filter_by(id=instance_id).first() if not result: raise exception.InstanceNotFound(instance_id=instance_id) return result except db_exc.DBError: # NOTE(sdague): catch all in case the db engine chokes on the # id because it's too long of an int to store. LOG.warning("Invalid instance id %s in request", instance_id) raise exception.InvalidID(id=instance_id) def _build_instance_get(context, columns_to_join=None): query = model_query( context, models.Instance, project_only=True, ).options( orm.joinedload( models.Instance.security_groups ).joinedload(models.SecurityGroup.rules) ).options(orm.joinedload(models.Instance.info_cache)) if columns_to_join is None: columns_to_join = ['metadata', 'system_metadata'] for column in columns_to_join: if column in ['info_cache', 'security_groups']: # Already always joined above continue if 'extra.' in column: column_ref = getattr(models.InstanceExtra, column.split('.')[1]) query = query.options( orm.joinedload(models.Instance.extra).undefer(column_ref) ) elif column in ['metadata', 'system_metadata']: # NOTE(melwitt): We use subqueryload() instead of joinedload() for # metadata and system_metadata because of the one-to-many # relationship of the data. Directly joining these columns can # result in a large number of additional rows being queried if an # instance has a large number of (system_)metadata items, resulting # in a large data transfer. Instead, the subqueryload() will # perform additional queries to obtain metadata and system_metadata # for the instance. column_ref = getattr(models.Instance, column) query = query.options(orm.subqueryload(column_ref)) else: column_ref = getattr(models.Instance, column) query = query.options(orm.joinedload(column_ref)) # NOTE(alaski) Stop lazy loading of columns not needed. for column in ['metadata', 'system_metadata']: if column not in columns_to_join: column_ref = getattr(models.Instance, column) query = query.options(orm.noload(column_ref)) # NOTE(melwitt): We need to use order_by() so that the # additional queries emitted by subqueryload() include the same ordering as # used by the parent query. # https://docs.sqlalchemy.org/en/13/orm/loading_relationships.html#the-importance-of-ordering return query.order_by(models.Instance.id) def _instances_fill_metadata(context, instances, manual_joins=None): """Selectively fill instances with manually-joined metadata. Note that instance will be converted to a dict. :param context: security context :param instances: list of instances to fill :param manual_joins: list of tables to manually join (can be any combination of 'metadata' and 'system_metadata' or None to take the default of both) """ uuids = [inst['uuid'] for inst in instances] if manual_joins is None: manual_joins = ['metadata', 'system_metadata'] meta = collections.defaultdict(list) if 'metadata' in manual_joins: for row in _instance_metadata_get_multi(context, uuids): meta[row['instance_uuid']].append(row) sys_meta = collections.defaultdict(list) if 'system_metadata' in manual_joins: for row in _instance_system_metadata_get_multi(context, uuids): sys_meta[row['instance_uuid']].append(row) pcidevs = collections.defaultdict(list) if 'pci_devices' in manual_joins: for row in _instance_pcidevs_get_multi(context, uuids): pcidevs[row['instance_uuid']].append(row) if 'fault' in manual_joins: faults = instance_fault_get_by_instance_uuids(context, uuids, latest=True) else: faults = {} filled_instances = [] for inst in instances: inst = dict(inst) inst['system_metadata'] = sys_meta[inst['uuid']] inst['metadata'] = meta[inst['uuid']] if 'pci_devices' in manual_joins: inst['pci_devices'] = pcidevs[inst['uuid']] inst_faults = faults.get(inst['uuid']) inst['fault'] = inst_faults and inst_faults[0] or None filled_instances.append(inst) return filled_instances @require_context @pick_context_manager_reader def instances_fill_metadata(context, instances, manual_joins=None): """Selectively fill instances with manually-joined metadata. See _instances_fill_metadata(). This is only for use as a standalone operation in its own transaction. """ return _instances_fill_metadata(context, instances, manual_joins=manual_joins) def _manual_join_columns(columns_to_join): """Separate manually joined columns from columns_to_join If columns_to_join contains 'metadata', 'system_metadata', 'fault', or 'pci_devices' those columns are removed from columns_to_join and added to a manual_joins list to be used with the _instances_fill_metadata method. The columns_to_join formal parameter is copied and not modified, the return tuple has the modified columns_to_join list to be used with joinedload in a model query. :param:columns_to_join: List of columns to join in a model query. :return: tuple of (manual_joins, columns_to_join) """ manual_joins = [] columns_to_join_new = copy.copy(columns_to_join) for column in ('metadata', 'system_metadata', 'pci_devices', 'fault'): if column in columns_to_join_new: columns_to_join_new.remove(column) manual_joins.append(column) return manual_joins, columns_to_join_new @require_context @pick_context_manager_reader def instance_get_all(context, columns_to_join=None): """Get all instances.""" if columns_to_join is None: columns_to_join_new = ['info_cache', 'security_groups'] manual_joins = ['metadata', 'system_metadata'] else: manual_joins, columns_to_join_new = ( _manual_join_columns(columns_to_join)) query = model_query(context, models.Instance) for column in columns_to_join_new: column_ref = getattr(models.Instance, column) query = query.options(orm.joinedload(column_ref)) if not context.is_admin: # If we're not admin context, add appropriate filter.. if context.project_id: query = query.filter_by(project_id=context.project_id) else: query = query.filter_by(user_id=context.user_id) instances = query.all() return _instances_fill_metadata(context, instances, manual_joins) @require_context @pick_context_manager_reader_allow_async def instance_get_all_by_filters( context, filters, sort_key='created_at', sort_dir='desc', limit=None, marker=None, columns_to_join=None, ): """Get all instances matching all filters sorted by the primary key. See instance_get_all_by_filters_sort for more information. """ # Invoke the API with the multiple sort keys and directions using the # single sort key/direction return instance_get_all_by_filters_sort(context, filters, limit=limit, marker=marker, columns_to_join=columns_to_join, sort_keys=[sort_key], sort_dirs=[sort_dir]) def _get_query_nova_resource_by_changes_time(query, filters, model_object): """Filter resources by changes-since or changes-before. Special keys are used to tweak the query further:: | 'changes-since' - only return resources updated after | 'changes-before' - only return resources updated before Return query results. :param query: query to apply filters to. :param filters: dictionary of filters with regex values. :param model_object: object of the operation target. """ for change_filter in ['changes-since', 'changes-before']: if filters and filters.get(change_filter): changes_filter_time = timeutils.normalize_time( filters.get(change_filter)) updated_at = getattr(model_object, 'updated_at') if change_filter == 'changes-since': query = query.filter(updated_at >= changes_filter_time) else: query = query.filter(updated_at <= changes_filter_time) return query @require_context @pick_context_manager_reader_allow_async def instance_get_all_by_filters_sort(context, filters, limit=None, marker=None, columns_to_join=None, sort_keys=None, sort_dirs=None): """Get all instances that match all filters sorted by the given keys. Deleted instances will be returned by default, unless there's a filter that says otherwise. Depending on the name of a filter, matching for that filter is performed using either exact matching or as regular expression matching. Exact matching is applied for the following filters:: | ['project_id', 'user_id', 'image_ref', | 'vm_state', 'instance_type_id', 'uuid', | 'metadata', 'host', 'system_metadata', 'locked', 'hidden'] Hidden instances will *not* be returned by default, unless there's a filter that says otherwise. A third type of filter (also using exact matching), filters based on instance metadata tags when supplied under a special key named 'filter':: | filters = { | 'filter': [ | {'name': 'tag-key', 'value': ''}, | {'name': 'tag-value', 'value': ''}, | {'name': 'tag:', 'value': ''} | ] | } Special keys are used to tweak the query further:: | 'changes-since' - only return instances updated after | 'changes-before' - only return instances updated before | 'deleted' - only return (or exclude) deleted instances | 'soft_deleted' - modify behavior of 'deleted' to either | include or exclude instances whose | vm_state is SOFT_DELETED. A fourth type of filter (also using exact matching), filters based on instance tags (not metadata tags). There are two types of these tags: `tags` -- One or more strings that will be used to filter results in an AND expression: T1 AND T2 `tags-any` -- One or more strings that will be used to filter results in an OR expression: T1 OR T2 `not-tags` -- One or more strings that will be used to filter results in an NOT AND expression: NOT (T1 AND T2) `not-tags-any` -- One or more strings that will be used to filter results in an NOT OR expression: NOT (T1 OR T2) Tags should be represented as list:: | filters = { | 'tags': [some-tag, some-another-tag], | 'tags-any: [some-any-tag, some-another-any-tag], | 'not-tags: [some-not-tag, some-another-not-tag], | 'not-tags-any: [some-not-any-tag, some-another-not-any-tag] | } """ # NOTE(mriedem): If the limit is 0 there is no point in even going # to the database since nothing is going to be returned anyway. if limit == 0: return [] sort_keys, sort_dirs = db_utils.process_sort_params( sort_keys, sort_dirs, default_dir='desc') if columns_to_join is None: columns_to_join_new = ['info_cache', 'security_groups'] manual_joins = ['metadata', 'system_metadata'] else: manual_joins, columns_to_join_new = ( _manual_join_columns(columns_to_join)) query_prefix = context.session.query(models.Instance) for column in columns_to_join_new: if 'extra.' in column: column_ref = getattr(models.InstanceExtra, column.split('.')[1]) query_prefix = query_prefix.options( orm.joinedload(models.Instance.extra).undefer(column_ref) ) else: column_ref = getattr(models.Instance, column) query_prefix = query_prefix.options(orm.joinedload(column_ref)) # Note: order_by is done in the sqlalchemy.utils.py paginate_query(), # no need to do it here as well # Make a copy of the filters dictionary to use going forward, as we'll # be modifying it and we shouldn't affect the caller's use of it. filters = copy.deepcopy(filters) model_object = models.Instance query_prefix = _get_query_nova_resource_by_changes_time( query_prefix, filters, model_object, ) if 'deleted' in filters: # Instances can be soft or hard deleted and the query needs to # include or exclude both deleted = filters.pop('deleted') if deleted: if filters.pop('soft_deleted', True): delete = sql.or_( models.Instance.deleted == models.Instance.id, models.Instance.vm_state == vm_states.SOFT_DELETED ) query_prefix = query_prefix.filter(delete) else: query_prefix = query_prefix.\ filter(models.Instance.deleted == models.Instance.id) else: query_prefix = query_prefix.filter_by(deleted=0) if not filters.pop('soft_deleted', False): # It would be better to have vm_state not be nullable # but until then we test it explicitly as a workaround. not_soft_deleted = sql.or_( models.Instance.vm_state != vm_states.SOFT_DELETED, models.Instance.vm_state == sql.null() ) query_prefix = query_prefix.filter(not_soft_deleted) if 'cleaned' in filters: cleaned = 1 if filters.pop('cleaned') else 0 query_prefix = query_prefix.filter(models.Instance.cleaned == cleaned) if 'tags' in filters: tags = filters.pop('tags') # We build a JOIN ladder expression for each tag, JOIN'ing # the first tag to the instances table, and each subsequent # tag to the last JOIN'd tags table first_tag = tags.pop(0) query_prefix = query_prefix.join(models.Instance.tags) query_prefix = query_prefix.filter(models.Tag.tag == first_tag) for tag in tags: tag_alias = orm.aliased(models.Tag) query_prefix = query_prefix.join(tag_alias, models.Instance.tags) query_prefix = query_prefix.filter(tag_alias.tag == tag) if 'tags-any' in filters: tags = filters.pop('tags-any') tag_alias = orm.aliased(models.Tag) query_prefix = query_prefix.join(tag_alias, models.Instance.tags) query_prefix = query_prefix.filter(tag_alias.tag.in_(tags)) if 'not-tags' in filters: tags = filters.pop('not-tags') first_tag = tags.pop(0) subq = query_prefix.session.query(models.Tag.resource_id) subq = subq.join(models.Instance.tags) subq = subq.filter(models.Tag.tag == first_tag) for tag in tags: tag_alias = orm.aliased(models.Tag) subq = subq.join(tag_alias, models.Instance.tags) subq = subq.filter(tag_alias.tag == tag) query_prefix = query_prefix.filter(~models.Instance.uuid.in_(subq)) if 'not-tags-any' in filters: tags = filters.pop('not-tags-any') query_prefix = query_prefix.filter(~models.Instance.tags.any( models.Tag.tag.in_(tags))) if not context.is_admin: # If we're not admin context, add appropriate filter.. if context.project_id: filters['project_id'] = context.project_id else: filters['user_id'] = context.user_id if filters.pop('hidden', False): query_prefix = query_prefix.filter( models.Instance.hidden == sql.true()) else: # If the query should not include hidden instances, then # filter instances with hidden=False or hidden=NULL because # older records may have no value set. query_prefix = query_prefix.filter(sql.or_( models.Instance.hidden == sql.false(), models.Instance.hidden == sql.null())) # Filters for exact matches that we can do along with the SQL query... # For other filters that don't match this, we will do regexp matching exact_match_filter_names = ['project_id', 'user_id', 'image_ref', 'vm_state', 'instance_type_id', 'uuid', 'metadata', 'host', 'task_state', 'system_metadata', 'locked', 'hidden'] # Filter the query query_prefix = _exact_instance_filter(query_prefix, filters, exact_match_filter_names) if query_prefix is None: return [] query_prefix = _regex_instance_filter(query_prefix, filters) # paginate query if marker is not None: try: marker = _instance_get_by_uuid( context.elevated(read_deleted='yes'), marker, ) except exception.InstanceNotFound: raise exception.MarkerNotFound(marker=marker) try: query_prefix = sqlalchemyutils.paginate_query( query_prefix, models.Instance, limit, sort_keys, marker=marker, sort_dirs=sort_dirs, ) except db_exc.InvalidSortKey: raise exception.InvalidSortKey() instances = query_prefix.all() return _instances_fill_metadata(context, instances, manual_joins) @require_context @pick_context_manager_reader_allow_async def instance_get_by_sort_filters(context, sort_keys, sort_dirs, values): """Get the UUID of the first instance in a sort order. Attempt to get a single instance based on a combination of sort keys, directions and filter values. This is used to try to find a marker instance when we don't have a marker uuid. :returns: The UUID of the instance that matched, if any. """ model = models.Instance return _model_get_uuid_by_sort_filters(context, model, sort_keys, sort_dirs, values) def _model_get_uuid_by_sort_filters(context, model, sort_keys, sort_dirs, values): query = context.session.query(model.uuid) # NOTE(danms): Below is a re-implementation of our # oslo_db.sqlalchemy.utils.paginate_query() utility. We can't use that # directly because it does not return the marker and we need it to. # The below is basically the same algorithm, stripped down to just what # we need, and augmented with the filter criteria required for us to # get back the instance that would correspond to our query. # This is our position in sort_keys,sort_dirs,values for the loop below key_index = 0 # We build a list of criteria to apply to the query, which looks # approximately like this (assuming all ascending): # # OR(row.key1 > val1, # AND(row.key1 == val1, row.key2 > val2), # AND(row.key1 == val1, row.key2 == val2, row.key3 >= val3), # ) # # The final key is compared with the "or equal" variant so that # a complete match instance is still returned. criteria = [] for skey, sdir, val in zip(sort_keys, sort_dirs, values): # Apply ordering to our query for the key, direction we're processing if sdir == 'desc': query = query.order_by(expression.desc(getattr(model, skey))) else: query = query.order_by(expression.asc(getattr(model, skey))) # Build a list of equivalence requirements on keys we've already # processed through the loop. In other words, if we're adding # key2 > val2, make sure that key1 == val1 crit_attrs = [] for equal_attr in range(0, key_index): crit_attrs.append( (getattr(model, sort_keys[equal_attr]) == values[equal_attr])) model_attr = getattr(model, skey) if isinstance(model_attr.type, sa.Boolean): model_attr = expression.cast(model_attr, sa.Integer) val = int(val) if skey == sort_keys[-1]: # If we are the last key, then we should use or-equal to # allow a complete match to be returned if sdir == 'asc': crit = (model_attr >= val) else: crit = (model_attr <= val) else: # If we're not the last key, then strict greater or less than # so we order strictly. if sdir == 'asc': crit = (model_attr > val) else: crit = (model_attr < val) # AND together all the above crit_attrs.append(crit) criteria.append(sql.and_(*crit_attrs)) key_index += 1 # OR together all the ANDs query = query.filter(sql.or_(*criteria)) # We can't raise InstanceNotFound because we don't have a uuid to # be looking for, so just return nothing if no match. result = query.limit(1).first() if result: # We're querying for a single column, which means we get back a # tuple of one thing. Strip that out and just return the uuid # for our caller. return result[0] else: return result def _db_connection_type(db_connection): """Returns a lowercase symbol for the db type. This is useful when we need to change what we are doing per DB (like handling regexes). In a CellsV2 world it probably needs to do something better than use the database configuration string. """ db_string = db_connection.split(':')[0].split('+')[0] return db_string.lower() def _safe_regex_mysql(raw_string): """Make regex safe to mysql. Certain items like '|' are interpreted raw by mysql REGEX. If you search for a single | then you trigger an error because it's expecting content on either side. For consistency sake we escape all '|'. This does mean we wouldn't support something like foo|bar to match completely different things, however, one can argue putting such complicated regex into name search probably means you are doing this wrong. """ return raw_string.replace('|', '\\|') def _get_regexp_ops(connection): """Return safety filter and db opts for regex.""" regexp_op_map = { 'postgresql': '~', 'mysql': 'REGEXP', 'sqlite': 'REGEXP' } regex_safe_filters = { 'mysql': _safe_regex_mysql } db_type = _db_connection_type(connection) return (regex_safe_filters.get(db_type, lambda x: x), regexp_op_map.get(db_type, 'LIKE')) def _regex_instance_filter(query, filters): """Applies regular expression filtering to an Instance query. Returns the updated query. :param query: query to apply filters to :param filters: dictionary of filters with regex values """ model = models.Instance safe_regex_filter, db_regexp_op = _get_regexp_ops(CONF.database.connection) for filter_name in filters: try: column_attr = getattr(model, filter_name) except AttributeError: continue if 'property' == type(column_attr).__name__: continue filter_val = filters[filter_name] # Sometimes the REGEX filter value is not a string if not isinstance(filter_val, str): filter_val = str(filter_val) if db_regexp_op == 'LIKE': query = query.filter(column_attr.op(db_regexp_op)( u'%' + filter_val + u'%')) else: filter_val = safe_regex_filter(filter_val) query = query.filter(column_attr.op(db_regexp_op)( filter_val)) return query def _exact_instance_filter(query, filters, legal_keys): """Applies exact match filtering to an Instance query. Returns the updated query. Modifies filters argument to remove filters consumed. :param query: query to apply filters to :param filters: dictionary of filters; values that are lists, tuples, sets, or frozensets cause an 'IN' test to be performed, while exact matching ('==' operator) is used for other values :param legal_keys: list of keys to apply exact filtering to """ filter_dict = {} model = models.Instance # Walk through all the keys for key in legal_keys: # Skip ones we're not filtering on if key not in filters: continue # OK, filtering on this key; what value do we search for? value = filters.pop(key) if key in ('metadata', 'system_metadata'): column_attr = getattr(model, key) if isinstance(value, list): for item in value: for k, v in item.items(): query = query.filter(column_attr.any(key=k)) query = query.filter(column_attr.any(value=v)) else: for k, v in value.items(): query = query.filter(column_attr.any(key=k)) query = query.filter(column_attr.any(value=v)) elif isinstance(value, (list, tuple, set, frozenset)): if not value: return None # empty IN-predicate; short circuit # Looking for values in a list; apply to query directly column_attr = getattr(model, key) query = query.filter(column_attr.in_(value)) else: # OK, simple exact match; save for later filter_dict[key] = value # Apply simple exact matches if filter_dict: query = query.filter(*[getattr(models.Instance, k) == v for k, v in filter_dict.items()]) return query @require_context @pick_context_manager_reader_allow_async def instance_get_active_by_window_joined(context, begin, end=None, project_id=None, host=None, columns_to_join=None, limit=None, marker=None): """Get instances and joins active during a certain time window. Specifying a project_id will filter for a certain project. Specifying a host will filter for instances on a given compute host. """ query = context.session.query(models.Instance) if columns_to_join is None: columns_to_join_new = ['info_cache', 'security_groups'] manual_joins = ['metadata', 'system_metadata'] else: manual_joins, columns_to_join_new = ( _manual_join_columns(columns_to_join)) for column in columns_to_join_new: if 'extra.' in column: column_ref = getattr(models.InstanceExtra, column.split('.')[1]) query = query.options( orm.joinedload(models.Instance.extra).undefer(column_ref) ) else: column_ref = getattr(models.Instance, column) query = query.options(orm.joinedload(column_ref)) query = query.filter(sql.or_( models.Instance.terminated_at == sql.null(), models.Instance.terminated_at > begin)) if end: query = query.filter(models.Instance.launched_at < end) if project_id: query = query.filter_by(project_id=project_id) if host: query = query.filter_by(host=host) if marker is not None: try: marker = _instance_get_by_uuid( context.elevated(read_deleted='yes'), marker) except exception.InstanceNotFound: raise exception.MarkerNotFound(marker=marker) query = sqlalchemyutils.paginate_query( query, models.Instance, limit, ['project_id', 'uuid'], marker=marker, ) instances = query.all() return _instances_fill_metadata(context, instances, manual_joins) def _instance_get_all_query(context, project_only=False, joins=None): if joins is None: joins = ['info_cache', 'security_groups'] query = model_query( context, models.Instance, project_only=project_only, ) for column in joins: if 'extra.' in column: column_ref = getattr(models.InstanceExtra, column.split('.')[1]) query = query.options( orm.joinedload(models.Instance.extra).undefer(column_ref) ) else: column_ref = getattr(models.Instance, column) query = query.options(orm.joinedload(column_ref)) return query @pick_context_manager_reader_allow_async def instance_get_all_by_host(context, host, columns_to_join=None): """Get all instances belonging to a host.""" query = _instance_get_all_query(context, joins=columns_to_join) instances = query.filter_by(host=host).all() return _instances_fill_metadata( context, instances, manual_joins=columns_to_join, ) def _instance_get_all_uuids_by_hosts(context, hosts): itbl = models.Instance.__table__ default_deleted_value = itbl.c.deleted.default.arg sel = sql.select(itbl.c.host, itbl.c.uuid) sel = sel.where(sql.and_( itbl.c.deleted == default_deleted_value, itbl.c.host.in_(sa.bindparam('hosts', expanding=True)))) # group the instance UUIDs by hostname res = collections.defaultdict(list) for rec in context.session.execute(sel, {'hosts': hosts}).fetchall(): res[rec[0]].append(rec[1]) return res @pick_context_manager_reader def instance_get_all_uuids_by_hosts(context, hosts): """Get a dict, keyed by hostname, of a list of the instance UUIDs on the host for each supplied hostname, not Instance model objects. The dict is a defaultdict of list, thus inspecting the dict for a host not in the dict will return an empty list not a KeyError. """ return _instance_get_all_uuids_by_hosts(context, hosts) @pick_context_manager_reader def instance_get_all_by_host_and_node( context, host, node, columns_to_join=None, ): """Get all instances belonging to a node.""" if columns_to_join is None: manual_joins = [] else: candidates = ['system_metadata', 'metadata'] manual_joins = [x for x in columns_to_join if x in candidates] columns_to_join = list(set(columns_to_join) - set(candidates)) instances = _instance_get_all_query( context, joins=columns_to_join, ).filter_by(host=host).filter_by(node=node).all() return _instances_fill_metadata( context, instances, manual_joins=manual_joins, ) @pick_context_manager_reader def instance_get_all_by_host_and_not_type(context, host, type_id=None): """Get all instances belonging to a host with a different type_id.""" instances = _instance_get_all_query(context).filter_by( host=host, ).filter( models.Instance.instance_type_id != type_id ).all() return _instances_fill_metadata(context, instances) # NOTE(hanlind): This method can be removed as conductor RPC API moves to v2.0. @pick_context_manager_reader def instance_get_all_hung_in_rebooting(context, reboot_window): """Get all instances stuck in a rebooting state.""" reboot_window = (timeutils.utcnow() - datetime.timedelta(seconds=reboot_window)) # NOTE(danms): this is only used in the _poll_rebooting_instances() # call in compute/manager, so we can avoid the metadata lookups # explicitly instances = model_query(context, models.Instance).filter( models.Instance.updated_at <= reboot_window ).filter_by(task_state=task_states.REBOOTING).all() return _instances_fill_metadata( context, instances, manual_joins=[], ) def _retry_instance_update(): """Wrap with oslo_db_api.wrap_db_retry, and also retry on UnknownInstanceUpdateConflict. """ exception_checker = \ lambda exc: isinstance(exc, (exception.UnknownInstanceUpdateConflict,)) return oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True, exception_checker=exception_checker) @require_context @_retry_instance_update() @pick_context_manager_writer def instance_update(context, instance_uuid, values, expected=None): """Set the given properties on an instance and update it. :raises: NotFound if instance does not exist. """ return _instance_update(context, instance_uuid, values, expected) @require_context @_retry_instance_update() @pick_context_manager_writer def instance_update_and_get_original(context, instance_uuid, values, columns_to_join=None, expected=None): """Set the given properties on an instance and update it. Return a shallow copy of the original instance reference, as well as the updated one. If "expected_task_state" exists in values, the update can only happen when the task state before update matches expected_task_state. Otherwise a UnexpectedTaskStateError is thrown. :param context: request context object :param instance_uuid: instance uuid :param values: dict containing column values :returns: a tuple of the form (old_instance_ref, new_instance_ref) :raises: NotFound if instance does not exist. """ instance_ref = _instance_get_by_uuid(context, instance_uuid, columns_to_join=columns_to_join) return (copy.copy(instance_ref), _instance_update( context, instance_uuid, values, expected, original=instance_ref)) # NOTE(danms): This updates the instance's metadata list in-place and in # the database to avoid stale data and refresh issues. It assumes the # delete=True behavior of instance_metadata_update(...) def _instance_metadata_update_in_place(context, instance, metadata_type, model, metadata): metadata = dict(metadata) to_delete = [] for keyvalue in instance[metadata_type]: key = keyvalue['key'] if key in metadata: keyvalue['value'] = metadata.pop(key) elif key not in metadata: to_delete.append(keyvalue) # NOTE: we have to hard_delete here otherwise we will get more than one # system_metadata record when we read deleted for an instance; # regular metadata doesn't have the same problem because we don't # allow reading deleted regular metadata anywhere. if metadata_type == 'system_metadata': for condemned in to_delete: context.session.delete(condemned) instance[metadata_type].remove(condemned) else: for condemned in to_delete: condemned.soft_delete(context.session) for key, value in metadata.items(): newitem = model() newitem.update({'key': key, 'value': value, 'instance_uuid': instance['uuid']}) context.session.add(newitem) instance[metadata_type].append(newitem) def _instance_update(context, instance_uuid, values, expected, original=None): if not uuidutils.is_uuid_like(instance_uuid): raise exception.InvalidUUID(uuid=instance_uuid) # NOTE(mdbooth): We pop values from this dict below, so we copy it here to # ensure there are no side effects for the caller or if we retry the # function due to a db conflict. updates = copy.copy(values) if expected is None: expected = {} else: # Coerce all single values to singleton lists expected = {k: [None] if v is None else sqlalchemyutils.to_list(v) for (k, v) in expected.items()} # Extract 'expected_' values from values dict, as these aren't actually # updates for field in ('task_state', 'vm_state'): expected_field = 'expected_%s' % field if expected_field in updates: value = updates.pop(expected_field, None) # Coerce all single values to singleton lists if value is None: expected[field] = [None] else: expected[field] = sqlalchemyutils.to_list(value) # Values which need to be updated separately metadata = updates.pop('metadata', None) system_metadata = updates.pop('system_metadata', None) _handle_objects_related_type_conversions(updates) # Hostname is potentially unique, but this is enforced in code rather # than the DB. The query below races, but the number of users of # osapi_compute_unique_server_name_scope is small, and a robust fix # will be complex. This is intentionally left as is for the moment. if 'hostname' in updates: _validate_unique_server_name(context, updates['hostname']) compare = models.Instance(uuid=instance_uuid, **expected) try: instance_ref = model_query(context, models.Instance, project_only=True).\ update_on_match(compare, 'uuid', updates) except update_match.NoRowsMatched: # Update failed. Try to find why and raise a specific error. # We should get here only because our expected values were not current # when update_on_match executed. Having failed, we now have a hint that # the values are out of date and should check them. # This code is made more complex because we are using repeatable reads. # If we have previously read the original instance in the current # transaction, reading it again will return the same data, even though # the above update failed because it has changed: it is not possible to # determine what has changed in this transaction. In this case we raise # UnknownInstanceUpdateConflict, which will cause the operation to be # retried in a new transaction. # Because of the above, if we have previously read the instance in the # current transaction it will have been passed as 'original', and there # is no point refreshing it. If we have not previously read the # instance, we can fetch it here and we will get fresh data. if original is None: original = _instance_get_by_uuid(context, instance_uuid) conflicts_expected = {} conflicts_actual = {} for (field, expected_values) in expected.items(): actual = original[field] if actual not in expected_values: conflicts_expected[field] = expected_values conflicts_actual[field] = actual # Exception properties exc_props = { 'instance_uuid': instance_uuid, 'expected': conflicts_expected, 'actual': conflicts_actual } # There was a conflict, but something (probably the MySQL read view, # but possibly an exceptionally unlikely second race) is preventing us # from seeing what it is. When we go round again we'll get a fresh # transaction and a fresh read view. if len(conflicts_actual) == 0: raise exception.UnknownInstanceUpdateConflict(**exc_props) # Task state gets special handling for convenience. We raise the # specific error UnexpectedDeletingTaskStateError or # UnexpectedTaskStateError as appropriate if 'task_state' in conflicts_actual: conflict_task_state = conflicts_actual['task_state'] if conflict_task_state == task_states.DELETING: exc = exception.UnexpectedDeletingTaskStateError else: exc = exception.UnexpectedTaskStateError # Everything else is an InstanceUpdateConflict else: exc = exception.InstanceUpdateConflict raise exc(**exc_props) if metadata is not None: _instance_metadata_update_in_place(context, instance_ref, 'metadata', models.InstanceMetadata, metadata) if system_metadata is not None: _instance_metadata_update_in_place(context, instance_ref, 'system_metadata', models.InstanceSystemMetadata, system_metadata) return instance_ref @pick_context_manager_writer def instance_add_security_group(context, instance_uuid, security_group_id): """Associate the given security group with the given instance.""" sec_group_ref = models.SecurityGroupInstanceAssociation() sec_group_ref.update({'instance_uuid': instance_uuid, 'security_group_id': security_group_id}) sec_group_ref.save(context.session) @require_context @pick_context_manager_writer def instance_remove_security_group(context, instance_uuid, security_group_id): """Disassociate the given security group from the given instance.""" model_query(context, models.SecurityGroupInstanceAssociation).\ filter_by(instance_uuid=instance_uuid).\ filter_by(security_group_id=security_group_id).\ soft_delete() ################### @require_context @pick_context_manager_reader def instance_info_cache_get(context, instance_uuid): """Gets an instance info cache from the table. :param instance_uuid: = uuid of the info cache's instance """ return model_query(context, models.InstanceInfoCache).\ filter_by(instance_uuid=instance_uuid).\ first() @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @pick_context_manager_writer def instance_info_cache_update(context, instance_uuid, values): """Update an instance info cache record in the table. :param instance_uuid: = uuid of info cache's instance :param values: = dict containing column values to update """ convert_objects_related_datetimes(values) info_cache = model_query(context, models.InstanceInfoCache).\ filter_by(instance_uuid=instance_uuid).\ first() needs_create = False if info_cache and info_cache['deleted']: raise exception.InstanceInfoCacheNotFound( instance_uuid=instance_uuid) elif not info_cache: # NOTE(tr3buchet): just in case someone blows away an instance's # cache entry, re-create it. values['instance_uuid'] = instance_uuid info_cache = models.InstanceInfoCache(**values) needs_create = True try: with get_context_manager(context).writer.savepoint.using(context): if needs_create: info_cache.save(context.session) else: info_cache.update(values) except db_exc.DBDuplicateEntry: # NOTE(sirp): Possible race if two greenthreads attempt to # recreate the instance cache entry at the same time. First one # wins. pass return info_cache @require_context @pick_context_manager_writer def instance_info_cache_delete(context, instance_uuid): """Deletes an existing instance_info_cache record :param instance_uuid: = uuid of the instance tied to the cache record """ model_query(context, models.InstanceInfoCache).\ filter_by(instance_uuid=instance_uuid).\ soft_delete() ################### def _instance_extra_create(context, values): inst_extra_ref = models.InstanceExtra() inst_extra_ref.update(values) inst_extra_ref.save(context.session) return inst_extra_ref @pick_context_manager_writer def instance_extra_update_by_uuid(context, instance_uuid, updates): """Update the instance extra record by instance uuid :param instance_uuid: UUID of the instance tied to the record :param updates: A dict of updates to apply """ rows_updated = model_query(context, models.InstanceExtra).\ filter_by(instance_uuid=instance_uuid).\ update(updates) if not rows_updated: LOG.debug("Created instance_extra for %s", instance_uuid) create_values = copy.copy(updates) create_values["instance_uuid"] = instance_uuid _instance_extra_create(context, create_values) rows_updated = 1 return rows_updated @pick_context_manager_reader def instance_extra_get_by_instance_uuid( context, instance_uuid, columns=None, ): """Get the instance extra record :param instance_uuid: UUID of the instance tied to the topology record :param columns: A list of the columns to load, or None for 'all of them' """ query = model_query(context, models.InstanceExtra).filter_by( instance_uuid=instance_uuid, ) if columns is None: columns = ['numa_topology', 'pci_requests', 'flavor', 'vcpu_model', 'trusted_certs', 'resources', 'migration_context'] for column in columns: column_ref = getattr(models.InstanceExtra, column) query = query.options(orm.undefer(column_ref)) instance_extra = query.first() return instance_extra ################### @require_context @pick_context_manager_reader def quota_get(context, project_id, resource, user_id=None): """Retrieve a quota or raise if it does not exist.""" model = models.ProjectUserQuota if user_id else models.Quota query = model_query(context, model).\ filter_by(project_id=project_id).\ filter_by(resource=resource) if user_id: query = query.filter_by(user_id=user_id) result = query.first() if not result: if user_id: raise exception.ProjectUserQuotaNotFound(project_id=project_id, user_id=user_id) else: raise exception.ProjectQuotaNotFound(project_id=project_id) return result @require_context @pick_context_manager_reader def quota_get_all_by_project_and_user(context, project_id, user_id): """Retrieve all quotas associated with a given project and user.""" user_quotas = model_query(context, models.ProjectUserQuota, (models.ProjectUserQuota.resource, models.ProjectUserQuota.hard_limit)).\ filter_by(project_id=project_id).\ filter_by(user_id=user_id).\ all() result = {'project_id': project_id, 'user_id': user_id} for user_quota in user_quotas: result[user_quota.resource] = user_quota.hard_limit return result @require_context @pick_context_manager_reader def quota_get_all_by_project(context, project_id): """Retrieve all quotas associated with a given project.""" rows = model_query(context, models.Quota, read_deleted="no").\ filter_by(project_id=project_id).\ all() result = {'project_id': project_id} for row in rows: result[row.resource] = row.hard_limit return result @require_context @pick_context_manager_reader def quota_get_all(context, project_id): """Retrieve all user quotas associated with a given project.""" result = model_query(context, models.ProjectUserQuota).\ filter_by(project_id=project_id).\ all() return result def quota_get_per_project_resources(): """Retrieve the names of resources whose quotas are calculated on a per-project rather than a per-user basis. """ return PER_PROJECT_QUOTAS @pick_context_manager_writer def quota_create(context, project_id, resource, limit, user_id=None): """Create a quota for the given project and resource.""" per_user = user_id and resource not in PER_PROJECT_QUOTAS quota_ref = models.ProjectUserQuota() if per_user else models.Quota() if per_user: quota_ref.user_id = user_id quota_ref.project_id = project_id quota_ref.resource = resource quota_ref.hard_limit = limit try: quota_ref.save(context.session) except db_exc.DBDuplicateEntry: raise exception.QuotaExists(project_id=project_id, resource=resource) return quota_ref @pick_context_manager_writer def quota_update(context, project_id, resource, limit, user_id=None): """Update a quota or raise if it does not exist.""" per_user = user_id and resource not in PER_PROJECT_QUOTAS model = models.ProjectUserQuota if per_user else models.Quota query = model_query(context, model).\ filter_by(project_id=project_id).\ filter_by(resource=resource) if per_user: query = query.filter_by(user_id=user_id) result = query.update({'hard_limit': limit}) if not result: if per_user: raise exception.ProjectUserQuotaNotFound(project_id=project_id, user_id=user_id) else: raise exception.ProjectQuotaNotFound(project_id=project_id) ################### @require_context @pick_context_manager_reader def quota_class_get(context, class_name, resource): """Retrieve a quota class or raise if it does not exist.""" result = model_query(context, models.QuotaClass, read_deleted="no").\ filter_by(class_name=class_name).\ filter_by(resource=resource).\ first() if not result: raise exception.QuotaClassNotFound(class_name=class_name) return result @pick_context_manager_reader def quota_class_get_default(context): """Retrieve all default quotas.""" rows = model_query(context, models.QuotaClass, read_deleted="no").\ filter_by(class_name=_DEFAULT_QUOTA_NAME).\ all() result = {'class_name': _DEFAULT_QUOTA_NAME} for row in rows: result[row.resource] = row.hard_limit return result @require_context @pick_context_manager_reader def quota_class_get_all_by_name(context, class_name): """Retrieve all quotas associated with a given quota class.""" rows = model_query(context, models.QuotaClass, read_deleted="no").\ filter_by(class_name=class_name).\ all() result = {'class_name': class_name} for row in rows: result[row.resource] = row.hard_limit return result @pick_context_manager_writer def quota_class_create(context, class_name, resource, limit): """Create a quota class for the given name and resource.""" quota_class_ref = models.QuotaClass() quota_class_ref.class_name = class_name quota_class_ref.resource = resource quota_class_ref.hard_limit = limit quota_class_ref.save(context.session) return quota_class_ref @pick_context_manager_writer def quota_class_update(context, class_name, resource, limit): """Update a quota class or raise if it does not exist.""" result = model_query(context, models.QuotaClass, read_deleted="no").\ filter_by(class_name=class_name).\ filter_by(resource=resource).\ update({'hard_limit': limit}) if not result: raise exception.QuotaClassNotFound(class_name=class_name) ################### @pick_context_manager_writer def quota_destroy_all_by_project_and_user(context, project_id, user_id): """Destroy all quotas associated with a given project and user.""" model_query(context, models.ProjectUserQuota, read_deleted="no").\ filter_by(project_id=project_id).\ filter_by(user_id=user_id).\ soft_delete(synchronize_session=False) @pick_context_manager_writer def quota_destroy_all_by_project(context, project_id): """Destroy all quotas associated with a given project.""" model_query(context, models.Quota, read_deleted="no").\ filter_by(project_id=project_id).\ soft_delete(synchronize_session=False) model_query(context, models.ProjectUserQuota, read_deleted="no").\ filter_by(project_id=project_id).\ soft_delete(synchronize_session=False) ################### def _block_device_mapping_get_query(context, columns_to_join=None): if columns_to_join is None: columns_to_join = [] query = model_query(context, models.BlockDeviceMapping) for column in columns_to_join: column_ref = getattr(models.BlockDeviceMapping, column) query = query.options(orm.joinedload(column_ref)) return query def _scrub_empty_str_values(dct, keys_to_scrub): """Remove any keys found in sequence keys_to_scrub from the dict if they have the value ''. """ for key in keys_to_scrub: if key in dct and dct[key] == '': del dct[key] def _from_legacy_values(values, legacy, allow_updates=False): if legacy: if allow_updates and block_device.is_safe_for_update(values): return values else: return block_device.BlockDeviceDict.from_legacy(values) else: return values def _set_or_validate_uuid(values): uuid = values.get('uuid') # values doesn't contain uuid, or it's blank if not uuid: values['uuid'] = uuidutils.generate_uuid() # values contains a uuid else: if not uuidutils.is_uuid_like(uuid): raise exception.InvalidUUID(uuid=uuid) @require_context @pick_context_manager_writer def block_device_mapping_create(context, values, legacy=True): """Create an entry of block device mapping.""" _scrub_empty_str_values(values, ['volume_size']) values = _from_legacy_values(values, legacy) convert_objects_related_datetimes(values) _set_or_validate_uuid(values) bdm_ref = models.BlockDeviceMapping() bdm_ref.update(values) bdm_ref.save(context.session) return bdm_ref @require_context @pick_context_manager_writer def block_device_mapping_update(context, bdm_id, values, legacy=True): """Update an entry of block device mapping.""" _scrub_empty_str_values(values, ['volume_size']) values = _from_legacy_values(values, legacy, allow_updates=True) convert_objects_related_datetimes(values) query = _block_device_mapping_get_query(context).filter_by(id=bdm_id) query.update(values) return query.first() @pick_context_manager_writer def block_device_mapping_update_or_create(context, values, legacy=True): """Update an entry of block device mapping. If not existed, create a new entry """ # TODO(mdbooth): Remove this method entirely. Callers should know whether # they require update or create, and call the appropriate method. _scrub_empty_str_values(values, ['volume_size']) values = _from_legacy_values(values, legacy, allow_updates=True) convert_objects_related_datetimes(values) result = None # NOTE(xqueralt,danms): Only update a BDM when device_name or # uuid was provided. Prefer the uuid, if available, but fall # back to device_name if no uuid is provided, which can happen # for BDMs created before we had a uuid. We allow empty device # names so they will be set later by the manager. if 'uuid' in values: query = _block_device_mapping_get_query(context) result = query.filter_by(instance_uuid=values['instance_uuid'], uuid=values['uuid']).one_or_none() if not result and values['device_name']: query = _block_device_mapping_get_query(context) result = query.filter_by(instance_uuid=values['instance_uuid'], device_name=values['device_name']).first() if result: result.update(values) else: # Either the device_name or uuid doesn't exist in the database yet, or # neither was provided. Both cases mean creating a new BDM. _set_or_validate_uuid(values) result = models.BlockDeviceMapping(**values) result.save(context.session) # NOTE(xqueralt): Prevent from having multiple swap devices for the # same instance. This will delete all the existing ones. if block_device.new_format_is_swap(values): query = _block_device_mapping_get_query(context) query = query.filter_by(instance_uuid=values['instance_uuid'], source_type='blank', guest_format='swap') query = query.filter(models.BlockDeviceMapping.id != result.id) query.soft_delete() return result @require_context @pick_context_manager_reader_allow_async def block_device_mapping_get_all_by_instance_uuids(context, instance_uuids): """Get all block device mapping belonging to a list of instances.""" if not instance_uuids: return [] return _block_device_mapping_get_query(context).filter( models.BlockDeviceMapping.instance_uuid.in_(instance_uuids)).all() @require_context @pick_context_manager_reader_allow_async def block_device_mapping_get_all_by_instance(context, instance_uuid): """Get all block device mapping belonging to an instance.""" return _block_device_mapping_get_query(context).\ filter_by(instance_uuid=instance_uuid).\ all() @require_context @pick_context_manager_reader def block_device_mapping_get_all_by_volume_id( context, volume_id, columns_to_join=None, ): """Get block device mapping for a given volume.""" return _block_device_mapping_get_query(context, columns_to_join=columns_to_join).\ filter_by(volume_id=volume_id).\ all() @require_context @pick_context_manager_reader def block_device_mapping_get_by_instance_and_volume_id( context, volume_id, instance_uuid, columns_to_join=None, ): """Get block device mapping for a given volume ID and instance UUID.""" return _block_device_mapping_get_query(context, columns_to_join=columns_to_join).\ filter_by(volume_id=volume_id).\ filter_by(instance_uuid=instance_uuid).\ first() @require_context @pick_context_manager_writer def block_device_mapping_destroy(context, bdm_id): """Destroy the block device mapping.""" _block_device_mapping_get_query(context).\ filter_by(id=bdm_id).\ soft_delete() @require_context @pick_context_manager_writer def block_device_mapping_destroy_by_instance_and_volume( context, instance_uuid, volume_id, ): """Destroy the block device mapping.""" _block_device_mapping_get_query(context).\ filter_by(instance_uuid=instance_uuid).\ filter_by(volume_id=volume_id).\ soft_delete() @require_context @pick_context_manager_writer def block_device_mapping_destroy_by_instance_and_device( context, instance_uuid, device_name, ): """Destroy the block device mapping.""" _block_device_mapping_get_query(context).\ filter_by(instance_uuid=instance_uuid).\ filter_by(device_name=device_name).\ soft_delete() ################### @require_context @pick_context_manager_writer def security_group_create(context, values): """Create a new security group.""" security_group_ref = models.SecurityGroup() # FIXME(devcamcar): Unless I do this, rules fails with lazy load exception # once save() is called. This will get cleaned up in next orm pass. security_group_ref.rules = [] security_group_ref.update(values) try: with get_context_manager(context).writer.savepoint.using(context): security_group_ref.save(context.session) except db_exc.DBDuplicateEntry: raise exception.SecurityGroupExists( project_id=values['project_id'], security_group_name=values['name']) return security_group_ref def _security_group_get_query(context, read_deleted=None, project_only=False, join_rules=True): query = model_query( context, models.SecurityGroup, read_deleted=read_deleted, project_only=project_only, ) if join_rules: query = query.options( orm.joinedload( models.SecurityGroup.rules ).joinedload(models.SecurityGroupIngressRule.grantee_group) ) return query def _security_group_get_by_names(context, group_names): """Get security group models for a project by a list of names. Raise SecurityGroupNotFoundForProject for a name not found. """ query = _security_group_get_query(context, read_deleted="no", join_rules=False).\ filter_by(project_id=context.project_id).\ filter(models.SecurityGroup.name.in_(group_names)) sg_models = query.all() if len(sg_models) == len(group_names): return sg_models # Find the first one missing and raise group_names_from_models = [x.name for x in sg_models] for group_name in group_names: if group_name not in group_names_from_models: raise exception.SecurityGroupNotFoundForProject( project_id=context.project_id, security_group_id=group_name) # Not Reached @require_context @pick_context_manager_reader def security_group_get_all(context): """Get all security groups.""" return _security_group_get_query(context).all() @require_context @pick_context_manager_reader def security_group_get(context, security_group_id, columns_to_join=None): """Get security group by its ID.""" join_rules = columns_to_join and 'rules' in columns_to_join if join_rules: columns_to_join.remove('rules') query = _security_group_get_query(context, project_only=True, join_rules=join_rules).\ filter_by(id=security_group_id) if columns_to_join is None: columns_to_join = [] for column in columns_to_join: query = query.options(_joinedload_all(models.SecurityGroup, column)) result = query.first() if not result: raise exception.SecurityGroupNotFound( security_group_id=security_group_id) return result @require_context @pick_context_manager_reader def security_group_get_by_name(context, project_id, group_name): """Returns a security group with the specified name from a project.""" query = _security_group_get_query( context, read_deleted="no", join_rules=False, ).filter_by( project_id=project_id, ).filter_by( name=group_name, ).options( orm.joinedload(models.SecurityGroup.instances) ).options( orm.joinedload( models.SecurityGroup.rules ).joinedload(models.SecurityGroupIngressRule.grantee_group) ) result = query.first() if not result: raise exception.SecurityGroupNotFoundForProject( project_id=project_id, security_group_id=group_name, ) return result @require_context @pick_context_manager_reader def security_group_get_by_project(context, project_id): """Get all security groups belonging to a project.""" return _security_group_get_query(context, read_deleted="no").\ filter_by(project_id=project_id).\ all() @require_context @pick_context_manager_reader def security_group_get_by_instance(context, instance_uuid): """Get security groups to which the instance is assigned.""" return _security_group_get_query(context, read_deleted="no").\ join(models.SecurityGroup.instances).\ filter_by(uuid=instance_uuid).\ all() @require_context @pick_context_manager_reader def security_group_in_use(context, group_id): """Indicates if a security group is currently in use.""" # Are there any instances that haven't been deleted # that include this group? inst_assoc = model_query(context, models.SecurityGroupInstanceAssociation, read_deleted="no").\ filter_by(security_group_id=group_id).\ all() for ia in inst_assoc: num_instances = model_query(context, models.Instance, read_deleted="no").\ filter_by(uuid=ia.instance_uuid).\ count() if num_instances: return True return False @require_context @pick_context_manager_writer def security_group_update(context, security_group_id, values): """Update a security group.""" query = model_query(context, models.SecurityGroup).filter_by( id=security_group_id, ) security_group_ref = query.first() if not security_group_ref: raise exception.SecurityGroupNotFound( security_group_id=security_group_id) security_group_ref.update(values) name = security_group_ref['name'] project_id = security_group_ref['project_id'] try: security_group_ref.save(context.session) except db_exc.DBDuplicateEntry: raise exception.SecurityGroupExists( project_id=project_id, security_group_name=name) return security_group_ref def security_group_ensure_default(context): """Ensure default security group exists for a project_id. Returns a tuple with the first element being a bool indicating if the default security group previously existed. Second element is the dict used to create the default security group. """ try: # NOTE(rpodolyaka): create the default security group, if it doesn't # exist. This must be done in a separate transaction, so that # this one is not aborted in case a concurrent one succeeds first # and the unique constraint for security group names is violated # by a concurrent INSERT with get_context_manager(context).writer.independent.using(context): return _security_group_ensure_default(context) except exception.SecurityGroupExists: # NOTE(rpodolyaka): a concurrent transaction has succeeded first, # suppress the error and proceed return security_group_get_by_name(context, context.project_id, 'default') @pick_context_manager_writer def _security_group_ensure_default(context): try: default_group = _security_group_get_by_names(context, ['default'])[0] except exception.NotFound: values = {'name': 'default', 'description': 'default', 'user_id': context.user_id, 'project_id': context.project_id} default_group = security_group_create(context, values) return default_group @require_context @pick_context_manager_writer def security_group_destroy(context, security_group_id): """Deletes a security group.""" model_query(context, models.SecurityGroup).\ filter_by(id=security_group_id).\ soft_delete() model_query(context, models.SecurityGroupInstanceAssociation).\ filter_by(security_group_id=security_group_id).\ soft_delete() model_query(context, models.SecurityGroupIngressRule).\ filter_by(group_id=security_group_id).\ soft_delete() model_query(context, models.SecurityGroupIngressRule).\ filter_by(parent_group_id=security_group_id).\ soft_delete() ################### @pick_context_manager_writer def migration_create(context, values): """Create a migration record.""" migration = models.Migration() migration.update(values) migration.save(context.session) return migration @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @pick_context_manager_writer def migration_update(context, migration_id, values): """Update a migration instance.""" migration = migration_get(context, migration_id) migration.update(values) return migration @pick_context_manager_reader def migration_get(context, migration_id): """Finds a migration by the ID.""" result = model_query(context, models.Migration, read_deleted="yes").\ filter_by(id=migration_id).\ first() if not result: raise exception.MigrationNotFound(migration_id=migration_id) return result @pick_context_manager_reader def migration_get_by_uuid(context, migration_uuid): """Finds a migration by the migration UUID.""" result = model_query(context, models.Migration, read_deleted="yes").\ filter_by(uuid=migration_uuid).\ first() if not result: raise exception.MigrationNotFound(migration_id=migration_uuid) return result @pick_context_manager_reader def migration_get_by_id_and_instance(context, migration_id, instance_uuid): """Finds a migration by the migration ID and the instance UUID.""" result = model_query(context, models.Migration).\ filter_by(id=migration_id).\ filter_by(instance_uuid=instance_uuid).\ first() if not result: raise exception.MigrationNotFoundForInstance( migration_id=migration_id, instance_id=instance_uuid) return result @pick_context_manager_reader def migration_get_by_instance_and_status(context, instance_uuid, status): """Finds a migration by the instance UUID it's migrating.""" result = model_query(context, models.Migration, read_deleted="yes").\ filter_by(instance_uuid=instance_uuid).\ filter_by(status=status).\ first() if not result: raise exception.MigrationNotFoundByStatus(instance_id=instance_uuid, status=status) return result @pick_context_manager_reader_allow_async def migration_get_unconfirmed_by_dest_compute( context, confirm_window, dest_compute, ): """Finds all unconfirmed migrations within the confirmation window for a specific destination compute host. """ confirm_window = (timeutils.utcnow() - datetime.timedelta(seconds=confirm_window)) return model_query(context, models.Migration, read_deleted="yes").\ filter(models.Migration.updated_at <= confirm_window).\ filter_by(status="finished").\ filter_by(dest_compute=dest_compute).\ all() @pick_context_manager_reader def migration_get_in_progress_by_host_and_node(context, host, node): """Finds all migrations for the given host + node that are not yet confirmed or reverted. """ # TODO(mriedem): Tracking what various code flows set for # migration status is nutty, since it happens all over the place # and several of the statuses are redundant (done and completed). # We need to define these in an enum somewhere and just update # that one central place that defines what "in progress" means. # NOTE(mriedem): The 'finished' status is not in this list because # 'finished' means a resize is finished on the destination host # and the instance is in VERIFY_RESIZE state, so the end state # for a resize is actually 'confirmed' or 'reverted'. return model_query( context, models.Migration, ).filter( sql.or_( sql.and_( models.Migration.source_compute == host, models.Migration.source_node == node, ), sql.and_( models.Migration.dest_compute == host, models.Migration.dest_node == node, ), ) ).filter( ~models.Migration.status.in_( [ 'confirmed', 'reverted', 'error', 'failed', 'completed', 'cancelled', 'done', ] ) ).options( orm.joinedload( models.Migration.instance ).joinedload(models.Instance.system_metadata) ).all() @pick_context_manager_reader def migration_get_in_progress_by_instance(context, instance_uuid, migration_type=None): """Finds all migrations of an instance in progress.""" # TODO(Shaohe Feng) we should share the in-progress list. # TODO(Shaohe Feng) will also summarize all status to a new # MigrationStatus class. query = model_query(context, models.Migration).\ filter_by(instance_uuid=instance_uuid).\ filter(models.Migration.status.in_(['queued', 'preparing', 'running', 'post-migrating'])) if migration_type: query = query.filter(models.Migration.migration_type == migration_type) return query.all() @pick_context_manager_reader def migration_get_all_by_filters(context, filters, sort_keys=None, sort_dirs=None, limit=None, marker=None): """Finds all migrations using the provided filters.""" if limit == 0: return [] query = model_query(context, models.Migration) if "uuid" in filters: # The uuid filter is here for the MigrationLister and multi-cell # paging support in the compute API. uuid = filters["uuid"] uuid = [uuid] if isinstance(uuid, str) else uuid query = query.filter(models.Migration.uuid.in_(uuid)) model_object = models.Migration query = _get_query_nova_resource_by_changes_time(query, filters, model_object) if "status" in filters: status = filters["status"] status = [status] if isinstance(status, str) else status query = query.filter(models.Migration.status.in_(status)) if "host" in filters: host = filters["host"] query = query.filter(sql.or_( models.Migration.source_compute == host, models.Migration.dest_compute == host)) elif "source_compute" in filters: host = filters['source_compute'] query = query.filter(models.Migration.source_compute == host) if "node" in filters: node = filters['node'] query = query.filter(sql.or_( models.Migration.source_node == node, models.Migration.dest_node == node)) if "migration_type" in filters: migtype = filters["migration_type"] query = query.filter(models.Migration.migration_type == migtype) if "hidden" in filters: hidden = filters["hidden"] query = query.filter(models.Migration.hidden == hidden) if "instance_uuid" in filters: instance_uuid = filters["instance_uuid"] query = query.filter(models.Migration.instance_uuid == instance_uuid) if 'user_id' in filters: user_id = filters['user_id'] query = query.filter(models.Migration.user_id == user_id) if 'project_id' in filters: project_id = filters['project_id'] query = query.filter(models.Migration.project_id == project_id) if marker: try: marker = migration_get_by_uuid(context, marker) except exception.MigrationNotFound: raise exception.MarkerNotFound(marker=marker) if limit or marker or sort_keys or sort_dirs: # Default sort by desc(['created_at', 'id']) sort_keys, sort_dirs = db_utils.process_sort_params( sort_keys, sort_dirs, default_dir='desc') return sqlalchemyutils.paginate_query(query, models.Migration, limit=limit, sort_keys=sort_keys, marker=marker, sort_dirs=sort_dirs).all() else: return query.all() @require_context @pick_context_manager_reader_allow_async def migration_get_by_sort_filters(context, sort_keys, sort_dirs, values): """Get the uuid of the first migration in a sort order. Return the first migration (uuid) of the set where each column value is greater than or equal to the matching one in @values, for each key in @sort_keys. This is used to try to find a marker migration when we don't have a marker uuid. :returns: A UUID of the migration that matched. """ model = models.Migration return _model_get_uuid_by_sort_filters(context, model, sort_keys, sort_dirs, values) @pick_context_manager_writer def migration_migrate_to_uuid(context, max_count): # Avoid circular import from nova import objects db_migrations = model_query(context, models.Migration).filter_by( uuid=None).limit(max_count).all() done = 0 for db_migration in db_migrations: mig = objects.Migration(context) mig._from_db_object(context, mig, db_migration) done += 1 # We don't have any situation where we can (detectably) not # migrate a thing, so report anything that matched as "completed". return done, done @pick_context_manager_reader def migration_get_in_progress_and_error_by_host_and_node(context, host, node): """Finds all in progress migrations and error migrations for the given host and node. """ return model_query( context, models.Migration, ).filter( sql.or_( sql.and_( models.Migration.source_compute == host, models.Migration.source_node == node), sql.and_( models.Migration.dest_compute == host, models.Migration.dest_node == node, ), ) ).filter( ~models.Migration.status.in_([ 'confirmed', 'reverted', 'failed', 'completed', 'cancelled', 'done', ]) ).options( orm.joinedload( models.Migration.instance ).joinedload(models.Instance.system_metadata) ).all() ######################## # User-provided metadata def _instance_metadata_get_multi(context, instance_uuids): if not instance_uuids: return [] return model_query(context, models.InstanceMetadata).filter( models.InstanceMetadata.instance_uuid.in_(instance_uuids)) def _instance_metadata_get_query(context, instance_uuid): return model_query(context, models.InstanceMetadata, read_deleted="no").\ filter_by(instance_uuid=instance_uuid) @require_context @pick_context_manager_reader def instance_metadata_get(context, instance_uuid): """Get all metadata for an instance.""" rows = _instance_metadata_get_query(context, instance_uuid).all() return {row['key']: row['value'] for row in rows} @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @pick_context_manager_writer def instance_metadata_delete(context, instance_uuid, key): """Delete the given metadata item.""" _instance_metadata_get_query(context, instance_uuid).\ filter_by(key=key).\ soft_delete() @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @pick_context_manager_writer def instance_metadata_update(context, instance_uuid, metadata, delete): """Update metadata if it exists, otherwise create it.""" all_keys = metadata.keys() if delete: _instance_metadata_get_query(context, instance_uuid).\ filter(~models.InstanceMetadata.key.in_(all_keys)).\ soft_delete(synchronize_session=False) already_existing_keys = [] meta_refs = _instance_metadata_get_query(context, instance_uuid).\ filter(models.InstanceMetadata.key.in_(all_keys)).\ all() for meta_ref in meta_refs: already_existing_keys.append(meta_ref.key) meta_ref.update({"value": metadata[meta_ref.key]}) new_keys = set(all_keys) - set(already_existing_keys) for key in new_keys: meta_ref = models.InstanceMetadata() meta_ref.update({"key": key, "value": metadata[key], "instance_uuid": instance_uuid}) context.session.add(meta_ref) return metadata ####################### # System-owned metadata def _instance_system_metadata_get_multi(context, instance_uuids): if not instance_uuids: return [] return model_query(context, models.InstanceSystemMetadata, read_deleted='yes').filter( models.InstanceSystemMetadata.instance_uuid.in_(instance_uuids)) def _instance_system_metadata_get_query(context, instance_uuid): return model_query(context, models.InstanceSystemMetadata).\ filter_by(instance_uuid=instance_uuid) @require_context @pick_context_manager_reader def instance_system_metadata_get(context, instance_uuid): """Get all system metadata for an instance.""" rows = _instance_system_metadata_get_query(context, instance_uuid).all() return {row['key']: row['value'] for row in rows} @require_context @pick_context_manager_writer def instance_system_metadata_update(context, instance_uuid, metadata, delete): """Update metadata if it exists, otherwise create it.""" all_keys = metadata.keys() if delete: _instance_system_metadata_get_query(context, instance_uuid).\ filter(~models.InstanceSystemMetadata.key.in_(all_keys)).\ soft_delete(synchronize_session=False) already_existing_keys = [] meta_refs = _instance_system_metadata_get_query(context, instance_uuid).\ filter(models.InstanceSystemMetadata.key.in_(all_keys)).\ all() for meta_ref in meta_refs: already_existing_keys.append(meta_ref.key) meta_ref.update({"value": metadata[meta_ref.key]}) new_keys = set(all_keys) - set(already_existing_keys) for key in new_keys: meta_ref = models.InstanceSystemMetadata() meta_ref.update({"key": key, "value": metadata[key], "instance_uuid": instance_uuid}) context.session.add(meta_ref) return metadata #################### @require_context @pick_context_manager_reader def vol_get_usage_by_time(context, begin): """Return volumes usage that have been updated after a specified time.""" return model_query(context, models.VolumeUsage, read_deleted="yes").\ filter(sql.or_( models.VolumeUsage.tot_last_refreshed == sql.null(), models.VolumeUsage.tot_last_refreshed > begin, models.VolumeUsage.curr_last_refreshed == sql.null(), models.VolumeUsage.curr_last_refreshed > begin, )).all() @require_context @pick_context_manager_writer def vol_usage_update( context, id, rd_req, rd_bytes, wr_req, wr_bytes, instance_id, project_id, user_id, availability_zone, update_totals=False, ): """Update cached volume usage for a volume Creates new record if needed. """ refreshed = timeutils.utcnow() values = {} # NOTE(dricco): We will be mostly updating current usage records vs # updating total or creating records. Optimize accordingly. if not update_totals: values = {'curr_last_refreshed': refreshed, 'curr_reads': rd_req, 'curr_read_bytes': rd_bytes, 'curr_writes': wr_req, 'curr_write_bytes': wr_bytes, 'instance_uuid': instance_id, 'project_id': project_id, 'user_id': user_id, 'availability_zone': availability_zone} else: values = {'tot_last_refreshed': refreshed, 'tot_reads': models.VolumeUsage.tot_reads + rd_req, 'tot_read_bytes': models.VolumeUsage.tot_read_bytes + rd_bytes, 'tot_writes': models.VolumeUsage.tot_writes + wr_req, 'tot_write_bytes': models.VolumeUsage.tot_write_bytes + wr_bytes, 'curr_reads': 0, 'curr_read_bytes': 0, 'curr_writes': 0, 'curr_write_bytes': 0, 'instance_uuid': instance_id, 'project_id': project_id, 'user_id': user_id, 'availability_zone': availability_zone} current_usage = model_query(context, models.VolumeUsage, read_deleted="yes").\ filter_by(volume_id=id).\ first() if current_usage: if (rd_req < current_usage['curr_reads'] or rd_bytes < current_usage['curr_read_bytes'] or wr_req < current_usage['curr_writes'] or wr_bytes < current_usage['curr_write_bytes']): LOG.info("Volume(%s) has lower stats then what is in " "the database. Instance must have been rebooted " "or crashed. Updating totals.", id) if not update_totals: values['tot_reads'] = (models.VolumeUsage.tot_reads + current_usage['curr_reads']) values['tot_read_bytes'] = ( models.VolumeUsage.tot_read_bytes + current_usage['curr_read_bytes']) values['tot_writes'] = (models.VolumeUsage.tot_writes + current_usage['curr_writes']) values['tot_write_bytes'] = ( models.VolumeUsage.tot_write_bytes + current_usage['curr_write_bytes']) else: values['tot_reads'] = (models.VolumeUsage.tot_reads + current_usage['curr_reads'] + rd_req) values['tot_read_bytes'] = ( models.VolumeUsage.tot_read_bytes + current_usage['curr_read_bytes'] + rd_bytes) values['tot_writes'] = (models.VolumeUsage.tot_writes + current_usage['curr_writes'] + wr_req) values['tot_write_bytes'] = ( models.VolumeUsage.tot_write_bytes + current_usage['curr_write_bytes'] + wr_bytes) current_usage.update(values) current_usage.save(context.session) context.session.refresh(current_usage) return current_usage vol_usage = models.VolumeUsage() vol_usage.volume_id = id vol_usage.instance_uuid = instance_id vol_usage.project_id = project_id vol_usage.user_id = user_id vol_usage.availability_zone = availability_zone if not update_totals: vol_usage.curr_last_refreshed = refreshed vol_usage.curr_reads = rd_req vol_usage.curr_read_bytes = rd_bytes vol_usage.curr_writes = wr_req vol_usage.curr_write_bytes = wr_bytes else: vol_usage.tot_last_refreshed = refreshed vol_usage.tot_reads = rd_req vol_usage.tot_read_bytes = rd_bytes vol_usage.tot_writes = wr_req vol_usage.tot_write_bytes = wr_bytes vol_usage.save(context.session) return vol_usage #################### @pick_context_manager_reader def s3_image_get(context, image_id): """Find local s3 image represented by the provided id.""" result = model_query(context, models.S3Image, read_deleted="yes").\ filter_by(id=image_id).\ first() if not result: raise exception.ImageNotFound(image_id=image_id) return result @pick_context_manager_reader def s3_image_get_by_uuid(context, image_uuid): """Find local s3 image represented by the provided uuid.""" result = model_query(context, models.S3Image, read_deleted="yes").\ filter_by(uuid=image_uuid).\ first() if not result: raise exception.ImageNotFound(image_id=image_uuid) return result @pick_context_manager_writer def s3_image_create(context, image_uuid): """Create local s3 image represented by provided uuid.""" try: s3_image_ref = models.S3Image() s3_image_ref.update({'uuid': image_uuid}) s3_image_ref.save(context.session) except Exception as e: raise db_exc.DBError(e) return s3_image_ref #################### @pick_context_manager_writer def instance_fault_create(context, values): """Create a new instance fault.""" fault_ref = models.InstanceFault() fault_ref.update(values) fault_ref.save(context.session) return dict(fault_ref) @pick_context_manager_reader def instance_fault_get_by_instance_uuids( context, instance_uuids, latest=False, ): """Get all instance faults for the provided instance_uuids. :param instance_uuids: List of UUIDs of instances to grab faults for :param latest: Optional boolean indicating we should only return the latest fault for the instance """ if not instance_uuids: return {} faults_tbl = models.InstanceFault.__table__ # NOTE(rpodolyaka): filtering by instance_uuids is performed in both # code branches below for the sake of a better query plan. On change, # make sure to update the other one as well. query = model_query(context, models.InstanceFault, [faults_tbl], read_deleted='no') if latest: # NOTE(jaypipes): We join instance_faults to a derived table of the # latest faults per instance UUID. The SQL produced below looks like # this: # # SELECT instance_faults.* # FROM instance_faults # JOIN ( # SELECT instance_uuid, MAX(id) AS max_id # FROM instance_faults # WHERE instance_uuid IN ( ... ) # AND deleted = 0 # GROUP BY instance_uuid # ) AS latest_faults # ON instance_faults.id = latest_faults.max_id; latest_faults = model_query( context, models.InstanceFault, [faults_tbl.c.instance_uuid, sql.func.max(faults_tbl.c.id).label('max_id')], read_deleted='no' ).filter( faults_tbl.c.instance_uuid.in_(instance_uuids) ).group_by( faults_tbl.c.instance_uuid ).subquery(name="latest_faults") query = query.join(latest_faults, faults_tbl.c.id == latest_faults.c.max_id) else: query = query.filter( models.InstanceFault.instance_uuid.in_(instance_uuids) ).order_by(expression.desc("id")) output = {} for instance_uuid in instance_uuids: output[instance_uuid] = [] for row in query: output[row.instance_uuid].append(row._asdict()) return output ################## @pick_context_manager_writer def action_start(context, values): """Start an action for an instance.""" convert_objects_related_datetimes(values, 'start_time', 'updated_at') action_ref = models.InstanceAction() action_ref.update(values) action_ref.save(context.session) return action_ref @pick_context_manager_writer def action_finish(context, values): """Finish an action for an instance.""" convert_objects_related_datetimes(values, 'start_time', 'finish_time', 'updated_at') query = model_query(context, models.InstanceAction).\ filter_by(instance_uuid=values['instance_uuid']).\ filter_by(request_id=values['request_id']) if query.update(values) != 1: raise exception.InstanceActionNotFound( request_id=values['request_id'], instance_uuid=values['instance_uuid']) return query.one() @pick_context_manager_reader def actions_get(context, instance_uuid, limit=None, marker=None, filters=None): """Get all instance actions for the provided instance and filters.""" if limit == 0: return [] sort_keys = ['created_at', 'id'] sort_dirs = ['desc', 'desc'] query_prefix = model_query(context, models.InstanceAction).\ filter_by(instance_uuid=instance_uuid) model_object = models.InstanceAction query_prefix = _get_query_nova_resource_by_changes_time(query_prefix, filters, model_object) if marker is not None: marker = action_get_by_request_id(context, instance_uuid, marker) if not marker: raise exception.MarkerNotFound(marker=marker) actions = sqlalchemyutils.paginate_query(query_prefix, models.InstanceAction, limit, sort_keys, marker=marker, sort_dirs=sort_dirs).all() return actions @pick_context_manager_reader def action_get_by_request_id(context, instance_uuid, request_id): """Get the action by request_id and given instance.""" action = _action_get_by_request_id(context, instance_uuid, request_id) return action def _action_get_by_request_id(context, instance_uuid, request_id): result = model_query(context, models.InstanceAction).\ filter_by(instance_uuid=instance_uuid).\ filter_by(request_id=request_id).\ order_by(expression.desc("created_at"), expression.desc("id")).\ first() return result def _action_get_last_created_by_instance_uuid(context, instance_uuid): result = model_query(context, models.InstanceAction).\ filter_by(instance_uuid=instance_uuid).\ order_by(expression.desc("created_at"), expression.desc("id")).\ first() return result @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @pick_context_manager_writer def action_event_start(context, values): """Start an event on an instance action.""" convert_objects_related_datetimes(values, 'start_time') action = _action_get_by_request_id(context, values['instance_uuid'], values['request_id']) # When nova-compute restarts, the context is generated again in # init_host workflow, the request_id was different with the request_id # recorded in InstanceAction, so we can't get the original record # according to request_id. Try to get the last created action so that # init_instance can continue to finish the recovery action, like: # powering_off, unpausing, and so on. update_action = True if not action and not context.project_id: action = _action_get_last_created_by_instance_uuid( context, values['instance_uuid']) # If we couldn't find an action by the request_id, we don't want to # update this action since it likely represents an inactive action. update_action = False if not action: raise exception.InstanceActionNotFound( request_id=values['request_id'], instance_uuid=values['instance_uuid']) values['action_id'] = action['id'] event_ref = models.InstanceActionEvent() event_ref.update(values) context.session.add(event_ref) # Update action updated_at. if update_action: action.update({'updated_at': values['start_time']}) action.save(context.session) return event_ref # NOTE: We need the retry_on_deadlock decorator for cases like resize where # a lot of events are happening at once between multiple hosts trying to # update the same action record in a small time window. @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @pick_context_manager_writer def action_event_finish(context, values): """Finish an event on an instance action.""" convert_objects_related_datetimes(values, 'start_time', 'finish_time') action = _action_get_by_request_id(context, values['instance_uuid'], values['request_id']) # When nova-compute restarts, the context is generated again in # init_host workflow, the request_id was different with the request_id # recorded in InstanceAction, so we can't get the original record # according to request_id. Try to get the last created action so that # init_instance can continue to finish the recovery action, like: # powering_off, unpausing, and so on. update_action = True if not action and not context.project_id: action = _action_get_last_created_by_instance_uuid( context, values['instance_uuid']) # If we couldn't find an action by the request_id, we don't want to # update this action since it likely represents an inactive action. update_action = False if not action: raise exception.InstanceActionNotFound( request_id=values['request_id'], instance_uuid=values['instance_uuid']) event_ref = model_query(context, models.InstanceActionEvent).\ filter_by(action_id=action['id']).\ filter_by(event=values['event']).\ first() if not event_ref: raise exception.InstanceActionEventNotFound(action_id=action['id'], event=values['event']) event_ref.update(values) if values['result'].lower() == 'error': action.update({'message': 'Error'}) # Update action updated_at. if update_action: action.update({'updated_at': values['finish_time']}) action.save(context.session) return event_ref @pick_context_manager_reader def action_events_get(context, action_id): """Get the events by action id.""" events = model_query(context, models.InstanceActionEvent).\ filter_by(action_id=action_id).\ order_by(expression.desc("created_at"), expression.desc("id")).\ all() return events @pick_context_manager_reader def action_event_get_by_id(context, action_id, event_id): event = model_query(context, models.InstanceActionEvent).\ filter_by(action_id=action_id).\ filter_by(id=event_id).\ first() return event ################## @require_context @pick_context_manager_writer def ec2_instance_create(context, instance_uuid, id=None): """Create the EC2 ID to instance UUID mapping on demand.""" ec2_instance_ref = models.InstanceIdMapping() ec2_instance_ref.update({'uuid': instance_uuid}) if id is not None: ec2_instance_ref.update({'id': id}) ec2_instance_ref.save(context.session) return ec2_instance_ref @require_context @pick_context_manager_reader def ec2_instance_get_by_uuid(context, instance_uuid): """Get UUID through EC2 ID from instance_id_mappings table.""" result = _ec2_instance_get_query(context).\ filter_by(uuid=instance_uuid).\ first() if not result: raise exception.InstanceNotFound(instance_id=instance_uuid) return result @require_context @pick_context_manager_reader def ec2_instance_get_by_id(context, instance_id): result = _ec2_instance_get_query(context).\ filter_by(id=instance_id).\ first() if not result: raise exception.InstanceNotFound(instance_id=instance_id) return result @require_context @pick_context_manager_reader def get_instance_uuid_by_ec2_id(context, ec2_id): """Get UUID through EC2 ID from instance_id_mappings table.""" result = ec2_instance_get_by_id(context, ec2_id) return result['uuid'] def _ec2_instance_get_query(context): return model_query(context, models.InstanceIdMapping, read_deleted='yes') ################## def _task_log_get_query(context, task_name, period_beginning, period_ending, host=None, state=None): values = {'period_beginning': period_beginning, 'period_ending': period_ending} values = convert_objects_related_datetimes(values, *values.keys()) query = model_query(context, models.TaskLog).\ filter_by(task_name=task_name).\ filter_by(period_beginning=values['period_beginning']).\ filter_by(period_ending=values['period_ending']) if host is not None: query = query.filter_by(host=host) if state is not None: query = query.filter_by(state=state) return query @pick_context_manager_reader def task_log_get(context, task_name, period_beginning, period_ending, host, state=None): return _task_log_get_query(context, task_name, period_beginning, period_ending, host, state).first() @pick_context_manager_reader def task_log_get_all(context, task_name, period_beginning, period_ending, host=None, state=None): return _task_log_get_query(context, task_name, period_beginning, period_ending, host, state).all() @pick_context_manager_writer def task_log_begin_task( context, task_name, period_beginning, period_ending, host, task_items=None, message=None, ): """Mark a task as started for a given host/time period.""" values = {'period_beginning': period_beginning, 'period_ending': period_ending} values = convert_objects_related_datetimes(values, *values.keys()) task = models.TaskLog() task.task_name = task_name task.period_beginning = values['period_beginning'] task.period_ending = values['period_ending'] task.host = host task.state = "RUNNING" if message: task.message = message if task_items: task.task_items = task_items try: task.save(context.session) except db_exc.DBDuplicateEntry: raise exception.TaskAlreadyRunning(task_name=task_name, host=host) @pick_context_manager_writer def task_log_end_task( context, task_name, period_beginning, period_ending, host, errors, message=None, ): """Mark a task as complete for a given host/time period.""" values = dict(state="DONE", errors=errors) if message: values["message"] = message rows = _task_log_get_query(context, task_name, period_beginning, period_ending, host).update(values) if rows == 0: # It's not running! raise exception.TaskNotRunning(task_name=task_name, host=host) ################## def _get_tables_with_fk_to_table(table): """Get a list of tables that refer to the given table by foreign key (FK). :param table: Table object (parent) for which to find references by FK :returns: A list of Table objects that refer to the specified table by FK """ tables = [] for t in models.BASE.metadata.tables.values(): for fk in t.foreign_keys: if fk.references(table): tables.append(t) return tables def _get_fk_stmts(metadata, conn, table, column, records): """Find records related to this table by foreign key (FK) and create and return insert/delete statements for them. Logic is: find the tables that reference the table passed to this method and walk the tree of references by FK. As child records are found, prepend them to deques to execute later in a single database transaction (to avoid orphaning related records if any one insert/delete fails or the archive process is otherwise interrupted). :param metadata: Metadata object to use to construct a shadow Table object :param conn: Connection object to use to select records related by FK :param table: Table object (parent) for which to find references by FK :param column: Column object (parent) to use to select records related by FK :param records: A list of records (column values) to use to select records related by FK :returns: tuple of (insert statements, delete statements) for records related by FK to insert into shadow tables and delete from main tables """ inserts = collections.deque() deletes = collections.deque() fk_tables = _get_tables_with_fk_to_table(table) for fk_table in fk_tables: # Create the shadow table for the referencing table. fk_shadow_tablename = _SHADOW_TABLE_PREFIX + fk_table.name try: with conn.begin(): fk_shadow_table = schema.Table( fk_shadow_tablename, metadata, autoload_with=conn, ) except sqla_exc.NoSuchTableError: # No corresponding shadow table; skip it. continue # TODO(stephenfin): Drop this when we drop the table if fk_table.name == "dns_domains": # We have one table (dns_domains) where the key is called # "domain" rather than "id" fk_column = fk_table.c.domain else: fk_column = fk_table.c.id for fk in fk_table.foreign_keys: if table != fk.column.table: # if the foreign key doesn't actually point to the table we're # archiving entries from then it's not relevant; trying to # resolve this would result in a cartesian product continue # We need to find the records in the referring (child) table that # correspond to the records in our (parent) table so we can archive # them. # First, select the column in the parent referenced by the child # table that corresponds to the parent table records that were # passed in. # Example: table = 'instances' and fk_table = 'instance_extra' # fk.parent = instance_extra.instance_uuid # fk.column = instances.uuid # SELECT instances.uuid FROM instances, instance_extra # WHERE instance_extra.instance_uuid = instances.uuid # AND instance.id IN () # We need the instance uuids for the in order to # look up the matching instance_extra records. select = sql.select(fk.column).where( sql.and_(fk.parent == fk.column, column.in_(records)) ) with conn.begin(): rows = conn.execute(select).fetchall() p_records = [r[0] for r in rows] # Then, select rows in the child table that correspond to the # parent table records that were passed in. # Example: table = 'instances' and fk_table = 'instance_extra' # fk.parent = instance_extra.instance_uuid # fk.column = instances.uuid # SELECT instance_extra.id FROM instance_extra, instances # WHERE instance_extra.instance_uuid = instances.uuid # AND instances.uuid IN () # We will get the instance_extra ids we need to archive # them. fk_select = sql.select(fk_column).where( sql.and_(fk.parent == fk.column, fk.column.in_(p_records)) ) with conn.begin(): fk_rows = conn.execute(fk_select).fetchall() fk_records = [r[0] for r in fk_rows] if fk_records: # If we found any records in the child table, create shadow # table insert statements for them and prepend them to the # deque. fk_columns = [c.name for c in fk_table.c] fk_insert = fk_shadow_table.insert().from_select( fk_columns, sql.select(fk_table).where(fk_column.in_(fk_records)) ).inline() inserts.appendleft(fk_insert) # Create main table delete statements and prepend them to the # deque. fk_delete = fk_table.delete().where(fk_column.in_(fk_records)) deletes.appendleft(fk_delete) # Repeat for any possible nested child tables. i, d = _get_fk_stmts(metadata, conn, fk_table, fk_column, fk_records) inserts.extendleft(i) deletes.extendleft(d) return inserts, deletes def _archive_deleted_rows_for_table( metadata, engine, tablename, max_rows, before, task_log, ): """Move up to max_rows rows from one tables to the corresponding shadow table. Will also follow FK constraints and archive all referring rows. Example: archiving a record from the 'instances' table will also archive the 'instance_extra' record before archiving the 'instances' record. :returns: 3-item tuple: - number of rows archived - list of UUIDs of instances that were archived - number of extra rows archived (due to FK constraints) dict of {tablename: rows_archived} """ conn = engine.connect() # NOTE(tdurakov): table metadata should be received # from models, not db tables. Default value specified by SoftDeleteMixin # is known only by models, not DB layer. # IMPORTANT: please do not change source of metadata information for table. table = models.BASE.metadata.tables[tablename] shadow_tablename = _SHADOW_TABLE_PREFIX + tablename rows_archived = 0 deleted_instance_uuids = [] try: with conn.begin(): shadow_table = schema.Table( shadow_tablename, metadata, autoload_with=conn, ) except sqla_exc.NoSuchTableError: # No corresponding shadow table; skip it. conn.close() return rows_archived, deleted_instance_uuids, {} # TODO(stephenfin): Drop this when we drop the table if tablename == "dns_domains": # We have one table (dns_domains) where the key is called # "domain" rather than "id" column = table.c.domain else: column = table.c.id deleted_column = table.c.deleted columns = [c.name for c in table.c] select = sql.select(column).where( deleted_column != deleted_column.default.arg ) if tablename == "task_log" and task_log: # task_log table records are never deleted by anything, so we won't # base our select statement on the 'deleted' column status. select = sql.select(column) if before: if tablename != "task_log": select = select.where(table.c.deleted_at < before) elif task_log: # task_log table records are never deleted by anything, so we won't # base our select statement on the 'deleted_at' column status. select = select.where(table.c.updated_at < before) select = select.order_by(column).limit(max_rows) with conn.begin(): rows = conn.execute(select).fetchall() # This is a list of IDs of rows that should be archived from this table, # limited to a length of max_rows. records = [r[0] for r in rows] # We will archive deleted rows for this table and also generate insert and # delete statements for extra rows we may archive by following FK # relationships. Because we are iterating over the sorted_tables (list of # Table objects sorted in order of foreign key dependency), new inserts and # deletes ("leaves") will be added to the fronts of the deques created in # _get_fk_stmts. This way, we make sure we delete child table records # before we delete their parent table records. # Keep track of any extra tablenames to number of rows that we archive by # following FK relationships. # # extras = {tablename: number_of_extra_rows_archived} extras = collections.defaultdict(int) if not records: # Nothing to archive, so return. return rows_archived, deleted_instance_uuids, extras # Keep track of how many rows we accumulate for the insert+delete database # transaction and cap it as soon as it is >= max_rows. Because we will # archive all child rows of a parent row along with the parent at the same # time, we end up with extra rows to archive in addition to len(records). num_rows_in_batch = 0 # The sequence of query statements we will execute in a batch. These are # ordered: [child1, child1, parent1, child2, child2, child2, parent2, ...] # Parent + child "trees" are kept together to avoid FK constraint # violations. statements_in_batch = [] # The list of records in the batch. This is used for collecting deleted # instance UUIDs in the case of the 'instances' table. records_in_batch = [] # (melwitt): We will gather rows related by foreign key relationship for # each deleted row, one at a time. We do it this way to keep track of and # limit the total number of rows that will be archived in a single database # transaction. In a large scale database with potentially hundreds of # thousands of deleted rows, if we don't limit the size of the transaction # based on max_rows, we can get into a situation where we get stuck not # able to make much progress. The value of max_rows has to be 1) small # enough to not exceed the database's max packet size limit or timeout with # a deadlock but 2) large enough to make progress in an environment with a # constant high volume of create and delete traffic. By archiving each # parent + child rows tree one at a time, we can ensure meaningful progress # can be made while allowing the caller to predictably control the size of # the database transaction with max_rows. for record in records: # Walk FK relationships and add insert/delete statements for rows that # refer to this table via FK constraints. fk_inserts and fk_deletes # will be prepended to by _get_fk_stmts if referring rows are found by # FK constraints. fk_inserts, fk_deletes = _get_fk_stmts( metadata, conn, table, column, [record]) statements_in_batch.extend(fk_inserts + fk_deletes) # statement to add parent row to shadow table insert = shadow_table.insert().from_select( columns, sql.select(table).where(column.in_([record]))).inline() statements_in_batch.append(insert) # statement to remove parent row from main table delete = table.delete().where(column.in_([record])) statements_in_batch.append(delete) records_in_batch.append(record) # Check whether were have a full batch >= max_rows. Rows are counted as # the number of rows that will be moved in the database transaction. # So each insert+delete pair represents one row that will be moved. # 1 parent + its fks num_rows_in_batch += 1 + len(fk_inserts) if max_rows is not None and num_rows_in_batch >= max_rows: break # NOTE(tssurya): In order to facilitate the deletion of records from # instance_mappings, request_specs and instance_group_member tables in the # nova_api DB, the rows of deleted instances from the instances table are # stored prior to their deletion. Basically the uuids of the archived # instances are queried and returned. if tablename == "instances": query_select = sql.select(table.c.uuid).where( table.c.id.in_(records_in_batch)) with conn.begin(): rows = conn.execute(query_select).fetchall() # deleted_instance_uuids = ['uuid1', 'uuid2', ...] deleted_instance_uuids = [r[0] for r in rows] try: # Group the insert and delete in a transaction. with conn.begin(): for statement in statements_in_batch: result = conn.execute(statement) result_tablename = statement.table.name # Add to archived row counts if not a shadow table. if not result_tablename.startswith(_SHADOW_TABLE_PREFIX): if result_tablename == tablename: # Number of tablename (parent) rows archived. rows_archived += result.rowcount else: # Number(s) of child rows archived. extras[result_tablename] += result.rowcount except db_exc.DBReferenceError as ex: # A foreign key constraint keeps us from deleting some of these rows # until we clean up a dependent table. Just skip this table for now; # we'll come back to it later. LOG.warning("IntegrityError detected when archiving table " "%(tablename)s: %(error)s", {'tablename': tablename, 'error': str(ex)}) conn.close() return rows_archived, deleted_instance_uuids, extras def archive_deleted_rows(context=None, max_rows=None, before=None, task_log=False): """Move up to max_rows rows from production tables to the corresponding shadow tables. :param context: nova.context.RequestContext for database access :param max_rows: Maximum number of rows to archive (required) :param before: optional datetime which when specified filters the records to only archive those records deleted before the given date :param task_log: Optional for whether to archive task_log table records :returns: 3-item tuple: - dict that maps table name to number of rows archived from that table, for example:: { 'instances': 5, 'block_device_mapping': 5, 'pci_devices': 2, } - list of UUIDs of instances that were archived - total number of rows that were archived """ table_to_rows_archived = collections.defaultdict(int) deleted_instance_uuids = [] total_rows_archived = 0 meta = sa.MetaData() engine = get_engine(use_slave=True, context=context) meta.reflect(bind=engine) # Get the sorted list of tables in order of foreign key dependency. # Process the parent tables and find their dependent records in order to # archive the related records in a single database transactions. The goal # is to avoid a situation where, for example, an 'instances' table record # is missing its corresponding 'instance_extra' record due to running the # archive_deleted_rows command with max_rows. for table in meta.sorted_tables: tablename = table.name rows_archived = 0 # skip the special alembic_version version table and any shadow tables if ( tablename == 'alembic_version' or tablename.startswith(_SHADOW_TABLE_PREFIX) ): continue # skip the tables that we've since removed the models for if tablename in models.REMOVED_TABLES: continue rows_archived, _deleted_instance_uuids, extras = ( _archive_deleted_rows_for_table( meta, engine, tablename, max_rows=max_rows - total_rows_archived, before=before, task_log=task_log)) total_rows_archived += rows_archived if tablename == 'instances': deleted_instance_uuids = _deleted_instance_uuids # Only report results for tables that had updates. if rows_archived: table_to_rows_archived[tablename] = rows_archived for tablename, extra_rows_archived in extras.items(): table_to_rows_archived[tablename] += extra_rows_archived total_rows_archived += extra_rows_archived if total_rows_archived >= max_rows: break return table_to_rows_archived, deleted_instance_uuids, total_rows_archived def _purgeable_tables(metadata): return [ t for t in metadata.sorted_tables if ( t.name.startswith(_SHADOW_TABLE_PREFIX) and not t.name == 'alembic_version' ) ] def purge_shadow_tables(context, before_date, status_fn=None): engine = get_engine(context=context) conn = engine.connect() metadata = sa.MetaData() metadata.reflect(bind=engine) total_deleted = 0 if status_fn is None: status_fn = lambda m: None # Some things never get formally deleted, and thus deleted_at # is never set. So, prefer specific timestamp columns here # for those special cases. overrides = { 'shadow_instance_actions': 'created_at', 'shadow_instance_actions_events': 'created_at', 'shadow_task_log': 'updated_at', } for table in _purgeable_tables(metadata): if before_date is None: col = None elif table.name in overrides: col = getattr(table.c, overrides[table.name]) elif hasattr(table.c, 'deleted_at'): col = table.c.deleted_at elif hasattr(table.c, 'updated_at'): col = table.c.updated_at elif hasattr(table.c, 'created_at'): col = table.c.created_at else: status_fn(_('Unable to purge table %(table)s because it ' 'has no timestamp column') % { 'table': table.name}) continue if col is not None: delete = table.delete().where(col < before_date) else: delete = table.delete() with conn.begin(): deleted = conn.execute(delete) if deleted.rowcount > 0: status_fn(_('Deleted %(rows)i rows from %(table)s based on ' 'timestamp column %(col)s') % { 'rows': deleted.rowcount, 'table': table.name, 'col': col is None and '(n/a)' or col.name}) total_deleted += deleted.rowcount conn.close() return total_deleted #################### @pick_context_manager_reader def pci_device_get_by_addr(context, node_id, dev_addr): """Get PCI device by address.""" pci_dev_ref = model_query(context, models.PciDevice).\ filter_by(compute_node_id=node_id).\ filter_by(address=dev_addr).\ first() if not pci_dev_ref: raise exception.PciDeviceNotFound(node_id=node_id, address=dev_addr) return pci_dev_ref @pick_context_manager_reader def pci_device_get_by_id(context, id): """Get PCI device by id.""" pci_dev_ref = model_query(context, models.PciDevice).\ filter_by(id=id).\ first() if not pci_dev_ref: raise exception.PciDeviceNotFoundById(id=id) return pci_dev_ref @pick_context_manager_reader def pci_device_get_all_by_node(context, node_id): """Get all PCI devices for one host.""" return model_query(context, models.PciDevice).\ filter_by(compute_node_id=node_id).\ all() @pick_context_manager_reader def pci_device_get_all_by_parent_addr(context, node_id, parent_addr): """Get all PCI devices by parent address.""" return model_query(context, models.PciDevice).\ filter_by(compute_node_id=node_id).\ filter_by(parent_addr=parent_addr).\ all() @require_context @pick_context_manager_reader def pci_device_get_all_by_instance_uuid(context, instance_uuid): """Get PCI devices allocated to instance.""" return model_query(context, models.PciDevice).\ filter_by(status='allocated').\ filter_by(instance_uuid=instance_uuid).\ all() @pick_context_manager_reader def _instance_pcidevs_get_multi(context, instance_uuids): if not instance_uuids: return [] return model_query(context, models.PciDevice).\ filter_by(status='allocated').\ filter(models.PciDevice.instance_uuid.in_(instance_uuids)) @pick_context_manager_writer def pci_device_destroy(context, node_id, address): """Delete a PCI device record.""" result = model_query(context, models.PciDevice).\ filter_by(compute_node_id=node_id).\ filter_by(address=address).\ soft_delete() if not result: raise exception.PciDeviceNotFound(node_id=node_id, address=address) @pick_context_manager_writer def pci_device_update(context, node_id, address, values): """Update a pci device.""" query = model_query(context, models.PciDevice, read_deleted="no").\ filter_by(compute_node_id=node_id).\ filter_by(address=address) if query.update(values) == 0: device = models.PciDevice() device.update(values) context.session.add(device) return query.one() #################### @pick_context_manager_writer def instance_tag_add(context, instance_uuid, tag): """Add tag to the instance.""" tag_ref = models.Tag() tag_ref.resource_id = instance_uuid tag_ref.tag = tag try: _check_instance_exists_in_project(context, instance_uuid) with get_context_manager(context).writer.savepoint.using(context): context.session.add(tag_ref) except db_exc.DBDuplicateEntry: # NOTE(snikitin): We should ignore tags duplicates pass return tag_ref @pick_context_manager_writer def instance_tag_set(context, instance_uuid, tags): """Replace all of the instance tags with specified list of tags.""" _check_instance_exists_in_project(context, instance_uuid) existing = context.session.query(models.Tag.tag).filter_by( resource_id=instance_uuid).all() existing = set(row.tag for row in existing) tags = set(tags) to_delete = existing - tags to_add = tags - existing if to_delete: context.session.query(models.Tag).filter_by( resource_id=instance_uuid).filter( models.Tag.tag.in_(to_delete)).delete( synchronize_session=False) if to_add: data = [ {'resource_id': instance_uuid, 'tag': tag} for tag in to_add] context.session.execute(models.Tag.__table__.insert(), data) return context.session.query(models.Tag).filter_by( resource_id=instance_uuid).all() @pick_context_manager_reader def instance_tag_get_by_instance_uuid(context, instance_uuid): """Get all tags for a given instance.""" _check_instance_exists_in_project(context, instance_uuid) return context.session.query(models.Tag).filter_by( resource_id=instance_uuid).all() @pick_context_manager_writer def instance_tag_delete(context, instance_uuid, tag): """Delete specified tag from the instance.""" _check_instance_exists_in_project(context, instance_uuid) result = context.session.query(models.Tag).filter_by( resource_id=instance_uuid, tag=tag).delete() if not result: raise exception.InstanceTagNotFound(instance_id=instance_uuid, tag=tag) @pick_context_manager_writer def instance_tag_delete_all(context, instance_uuid): """Delete all tags from the instance.""" _check_instance_exists_in_project(context, instance_uuid) context.session.query(models.Tag).filter_by( resource_id=instance_uuid).delete() @pick_context_manager_reader def instance_tag_exists(context, instance_uuid, tag): """Check if specified tag exist on the instance.""" _check_instance_exists_in_project(context, instance_uuid) q = context.session.query(models.Tag).filter_by( resource_id=instance_uuid, tag=tag) return context.session.query(q.exists()).scalar() #################### @pick_context_manager_writer def console_auth_token_create(context, values): """Create a console authorization.""" instance_uuid = values.get('instance_uuid') _check_instance_exists_in_project(context, instance_uuid) token_ref = models.ConsoleAuthToken() token_ref.update(values) context.session.add(token_ref) return token_ref @pick_context_manager_reader def console_auth_token_get_valid(context, token_hash, instance_uuid=None): """Get a valid console authorization by token_hash and instance_uuid. The console authorizations expire at the time specified by their 'expires' column. An expired console auth token will not be returned to the caller - it is treated as if it does not exist. If instance_uuid is specified, the token is validated against both expiry and instance_uuid. If instance_uuid is not specified, the token is validated against expiry only. """ if instance_uuid is not None: _check_instance_exists_in_project(context, instance_uuid) query = context.session.query(models.ConsoleAuthToken).\ filter_by(token_hash=token_hash) if instance_uuid is not None: query = query.filter_by(instance_uuid=instance_uuid) return query.filter( models.ConsoleAuthToken.expires > timeutils.utcnow_ts()).first() @pick_context_manager_writer def console_auth_token_destroy_all_by_instance(context, instance_uuid): """Delete all console authorizations belonging to the instance.""" context.session.query(models.ConsoleAuthToken).\ filter_by(instance_uuid=instance_uuid).delete() @pick_context_manager_writer def console_auth_token_destroy_expired(context): """Delete expired console authorizations. The console authorizations expire at the time specified by their 'expires' column. This function is used to garbage collect expired tokens. """ context.session.query(models.ConsoleAuthToken).\ filter(models.ConsoleAuthToken.expires <= timeutils.utcnow_ts()).\ delete() @pick_context_manager_writer def console_auth_token_destroy_expired_by_host(context, host): """Delete expired console authorizations belonging to the host. The console authorizations expire at the time specified by their 'expires' column. This function is used to garbage collect expired tokens associated with the given host. """ context.session.query(models.ConsoleAuthToken).\ filter_by(host=host).\ filter(models.ConsoleAuthToken.expires <= timeutils.utcnow_ts()).\ delete() #################### @require_context @pick_context_manager_reader def share_mapping_get_all(context): """Get all share_mapping.""" return context.session.query(models.ShareMapping).all() @require_context @pick_context_manager_reader def share_mapping_get_by_share_id(context, share_id): """Get share_mapping records for a specific share.""" return context.session.query(models.ShareMapping).\ filter_by(share_id=share_id).all() @require_context @pick_context_manager_reader def share_mapping_get_by_instance_uuid(context, instance_uuid): """Get share_mapping records for a specific instance.""" return context.session.query(models.ShareMapping).\ filter_by(instance_uuid=instance_uuid).all() @require_context @pick_context_manager_reader def share_mapping_get_by_instance_uuid_and_share_id( context, instance_uuid, share_id): """Get share_mapping record for a specific instance and share_id.""" return context.session.query(models.ShareMapping).\ filter_by(instance_uuid=instance_uuid, share_id=share_id).first() @require_context @pick_context_manager_writer def share_mapping_delete_by_instance_uuid_and_share_id( context, instance_uuid, share_id): """Delete share_mapping record for a specific instance and share_id.""" context.session.query(models.ShareMapping).\ filter_by(instance_uuid=instance_uuid, share_id=share_id).delete() @require_context @pick_context_manager_writer def share_mapping_update( context, uuid, instance_uuid, share_id, status, tag, export_location, share_proto ): """Update share_mapping for a share Creates new record if needed. """ share_mapping = share_mapping_get_by_instance_uuid_and_share_id( context, instance_uuid, share_id) if share_mapping: share_mapping.status = status share_mapping.tag = tag share_mapping.export_location = export_location share_mapping.share_proto = share_proto share_mapping.save(context.session) context.session.refresh(share_mapping) else: share_mapping = models.ShareMapping() share_mapping.uuid = uuid share_mapping.instance_uuid = instance_uuid share_mapping.share_id = share_id share_mapping.status = status share_mapping.tag = tag share_mapping.export_location = export_location share_mapping.share_proto = share_proto share_mapping.save(context.session) return share_mapping ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.3656087 nova-32.0.0/nova/db/main/migrations/0000775000175000017500000000000000000000000017212 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/db/main/migrations/README.rst0000664000175000017500000000105100000000000020676 0ustar00zuulzuul00000000000000Migrations for the main database ================================ This directory contains migrations for the main database. These are implemented using `alembic`__, a lightweight database migration tool designed for usage with `SQLAlchemy`__. The best place to start understanding Alembic is with its own `tutorial`__. You can also play around with the :command:`alembic` command:: $ alembic --help .. __: https://alembic.sqlalchemy.org/en/latest/ .. __: https://www.sqlalchemy.org/ .. __: https://alembic.sqlalchemy.org/en/latest/tutorial.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/db/main/migrations/env.py0000664000175000017500000001044700000000000020362 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from logging.config import fileConfig from alembic import context from sqlalchemy import engine_from_config from sqlalchemy import pool from nova.db.main import models # this is the Alembic Config object, which provides # access to the values within the .ini file in use. config = context.config # Interpret the config file for Python logging unless we're told not to. # This line sets up loggers basically. if config.attributes.get('configure_logger', True): fileConfig(config.config_file_name) # this is the MetaData object for the various models in the main database target_metadata = models.BASE.metadata def include_name(name, type_, parent_names): """Determine which tables or columns to skip. This is used when we decide to "delete" a table or column. In this instance, we will remove the SQLAlchemy model or field but leave the underlying database table or column in place for a number of releases after. Once we're sure that there is no code running that contains references to the old models, we can then remove the underlying table. In the interim, we must track the discrepancy between models and actual database data here. """ if type_ == 'table': # NOTE(stephenfin): We don't have models corresponding to the various # shadow tables. Alembic doesn't like this. Tell Alembic to look the # other way. Good Alembic. if name.startswith('shadow_'): return False return name not in models.REMOVED_TABLES if type_ == 'column': return (parent_names['table_name'], name) not in models.REMOVED_COLUMNS return True def run_migrations_offline(): """Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output. """ url = config.get_main_option("sqlalchemy.url") context.configure( url=url, target_metadata=target_metadata, render_as_batch=True, include_name=include_name, literal_binds=True, dialect_opts={"paramstyle": "named"}, ) with context.begin_transaction(): context.run_migrations() def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. This is modified from the default based on the below, since we want to share an engine when unit testing so in-memory database testing actually works. https://alembic.sqlalchemy.org/en/latest/cookbook.html#connection-sharing """ connectable = config.attributes.get('connection', None) if connectable is None: # only create Engine if we don't have a Connection from the outside connectable = engine_from_config( config.get_section(config.config_ini_section), prefix="sqlalchemy.", poolclass=pool.NullPool, ) with connectable.connect() as connection: context.configure( connection=connection, target_metadata=target_metadata, render_as_batch=True, include_name=include_name, ) with context.begin_transaction(): context.run_migrations() else: context.configure( connection=connectable, target_metadata=target_metadata, render_as_batch=True, include_name=include_name, ) with context.begin_transaction(): context.run_migrations() if context.is_offline_mode(): run_migrations_offline() else: run_migrations_online() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/db/main/migrations/script.py.mako0000664000175000017500000000172000000000000022016 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """${message} Revision ID: ${up_revision} Revises: ${down_revision | comma,n} Create Date: ${create_date} """ from alembic import op import sqlalchemy as sa ${imports if imports else ""} # revision identifiers, used by Alembic. revision = ${repr(up_revision)} down_revision = ${repr(down_revision)} branch_labels = ${repr(branch_labels)} depends_on = ${repr(depends_on)} def upgrade(): ${upgrades if upgrades else "pass"} ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.3656087 nova-32.0.0/nova/db/main/migrations/versions/0000775000175000017500000000000000000000000021062 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/db/main/migrations/versions/13863f4e1612_create_share_mapping_table.py0000664000175000017500000000362200000000000030423 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """create_share_mapping_table Revision ID: 13863f4e1612 Revises: 960aac0e09ea Create Date: 2022-02-17 18:34:09.050246 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '13863f4e1612' down_revision = '1acf2c98e646' branch_labels = None depends_on = None def upgrade(): op.create_table( 'share_mapping', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column( "id", sa.BigInteger().with_variant(sa.Integer, "sqlite"), primary_key=True, autoincrement=True, nullable=False, ), sa.Column('uuid', sa.String(36)), sa.Column( 'instance_uuid', sa.String(length=36), sa.ForeignKey( 'instances.uuid', name='share_mapping_instance_uuid_fkey')), sa.Column('share_id', sa.String(length=36)), sa.Column('status', sa.String(length=32)), sa.Column('tag', sa.String(48)), sa.Column('export_location', sa.Text), sa.Column('share_proto', sa.String(32)), sa.Index('share_idx', 'share_id'), sa.Index( 'share_mapping_instance_uuid_share_id_idx', 'instance_uuid', 'share_id'), mysql_engine='InnoDB', mysql_charset='utf8' ) def downgrade(): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/db/main/migrations/versions/16f1fbcab42b_resolve_shadow_table_diffs.py0000664000175000017500000000365300000000000031034 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Resolve shadow table diffs Revision ID: 16f1fbcab42b Revises: 8f2f1571d55b Create Date: 2021-08-20 13:26:30.204633 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '16f1fbcab42b' down_revision = '8f2f1571d55b' branch_labels = None depends_on = None def upgrade(): bind = op.get_bind() # 244_increase_user_id_length_volume_usage_cache; the length in the # corresponding shadow table was not increased with op.batch_alter_table('shadow_volume_usage_cache') as batch_op: batch_op.alter_column( 'user_id', type_=sa.String(64), existing_type=sa.String(36), ) # 252_add_instance_extra_table; we shouldn't have created an index for the # shadow table op.drop_index(index_name='shadow_instance_extra_idx', table_name='shadow_instance_extra') # 373_migration_uuid; we shouldn't have created an index for the shadow # table op.drop_index(index_name='shadow_migrations_uuid', table_name='shadow_migrations') # 298_mysql_extra_specs_binary_collation; we changed the collation on the # main table but not the shadow table if bind.engine.name == 'mysql': op.execute( 'ALTER TABLE shadow_instance_type_extra_specs ' 'CONVERT TO CHARACTER SET utf8 ' 'COLLATE utf8_bin' ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/db/main/migrations/versions/1acf2c98e646_add_compute_id_to_instance.py0000664000175000017500000000331300000000000030667 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add compute_id to instance Revision ID: 1acf2c98e646 Revises: 960aac0e09ea Create Date: 2023-04-03 07:10:42.410832 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '1acf2c98e646' down_revision = '1b91788ec3a6' branch_labels = None depends_on = None def upgrade(): for prefix in ('', 'shadow_'): table_name = prefix + 'instances' with op.batch_alter_table(table_name, schema=None) as batch_op: batch_op.add_column( sa.Column( 'compute_id', sa.BigInteger(), nullable=True)) if not prefix: batch_op.create_index('instances_compute_id_deleted_idx', ('compute_id', 'deleted')) table_name = prefix + 'migrations' with op.batch_alter_table(table_name, schema=None) as batch_op: batch_op.add_column( sa.Column( 'dest_compute_id', sa.BigInteger(), nullable=True)) if not prefix: batch_op.create_index('migrations_dest_compute_id_deleted_idx', ('dest_compute_id', 'deleted')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/db/main/migrations/versions/1b91788ec3a6_drop_legacy_migrate_version_table.py0000664000175000017500000000210600000000000032174 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Drop legacy migrate_version table Revision ID: 1b91788ec3a6 Revises: 960aac0e09ea Create Date: 2023-02-01 16:46:24.206580 """ from alembic import op from sqlalchemy.engine import reflection # revision identifiers, used by Alembic. revision = '1b91788ec3a6' down_revision = '960aac0e09ea' branch_labels = None depends_on = None def upgrade(): conn = op.get_bind() inspector = reflection.Inspector.from_engine(conn) tables = inspector.get_table_names() if 'migrate_version' in tables: op.drop_table('migrate_version') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/db/main/migrations/versions/2903cd72dc14_add_tls_port_to_console_auth_tokens.py0000664000175000017500000000200100000000000032544 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add_tls_port_to_console_auth_tokens Revision ID: 2903cd72dc14 Revises: d60bddf7a903 Create Date: 2024-07-18 22:55:25.736157 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '2903cd72dc14' down_revision = 'd60bddf7a903' branch_labels = None depends_on = None def upgrade(): with op.batch_alter_table('console_auth_tokens', schema=None) as batch_op: batch_op.add_column(sa.Column('tls_port', sa.Integer())) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/db/main/migrations/versions/8f2f1571d55b_initial_version.py0000664000175000017500000020521700000000000026464 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Initial version Revision ID: 8f2f1571d55b Revises: Create Date: 2021-04-13 11:59:19.234123 """ from alembic import op import sqlalchemy as sa from sqlalchemy import dialects from sqlalchemy.ext import compiler from sqlalchemy import types as sqla_types from nova.db import types from nova.objects import keypair # revision identifiers, used by Alembic. revision = '8f2f1571d55b' down_revision = None branch_labels = None depends_on = None def Inet(): return sa.String(length=43).with_variant( dialects.postgresql.INET(), 'postgresql', ) def InetSmall(): return sa.String(length=39).with_variant( dialects.postgresql.INET(), 'postgresql', ) # We explicitly name many of our foreignkeys for MySQL so they match Havana @compiler.compiles(sa.ForeignKeyConstraint, 'postgresql') def process(element, compiler, **kw): element.name = None return compiler.visit_foreign_key_constraint(element, **kw) def _create_shadow_tables(connection): meta = sa.MetaData() meta.reflect(bind=connection) table_names = list(meta.tables.keys()) for table_name in table_names: # Skip tables that are not soft-deletable if table_name in ( 'tags', 'resource_providers', 'inventories', 'allocations', 'resource_provider_aggregates', 'console_auth_tokens', ): continue table = sa.Table(table_name, meta, autoload_with=connection) columns = [] for column in table.columns: column_copy = None # NOTE(boris-42): BigInteger is not supported by sqlite so # after copy it will have NullType. The other types that are used # in Nova are supported by SQLite if isinstance(column.type, sqla_types.NullType): column_copy = sa.Column( column.name, sa.BigInteger(), default=0, ) if table_name == 'instances' and column.name == 'locked_by': enum = sa.Enum( 'owner', 'admin', name='shadow_instances0locked_by', ) column_copy = sa.Column(column.name, enum) # NOTE(stephenfin): There were some bugs in the squashed # sqlalchemy-migrate migrations which we need to account for here # 244_increase_user_id_length_volume_usage_cache; this # alteration should apply to shadow tables also # (fixed in migration 16f1fbcab42b) if table_name == 'volume_usage_cache' and column.name == 'user_id': # column type should be String(64) column_copy = sa.Column('user_id', sa.String(36)) # 247_nullable_mismatch; these alterations were not applied to # shadow tables # (wontfix since there could be null entries in the database still) if table_name == 'quota_usages' and column.name == 'resources': # nullable should be False column_copy = sa.Column('resource', sa.String(length=255)) if table_name == 'pci_devices': if column.name == 'deleted': # nullable should be True column_copy = sa.Column( 'deleted', sa.Integer, default=0, nullable=False, ) if column.name == 'product_id': # nullable should be False column_copy = sa.Column('product_id', sa.String(4)) if column.name == 'vendor_id': # nullable should be False column_copy = sa.Column('vendor_id', sa.String(4)) if column.name == 'dev_type': # nullable should be False column_copy = sa.Column('dev_type', sa.String(8)) # 280_add_nullable_false_to_keypairs_name; these alterations were # not applied to the shadow tables # (wontfix since there could be null entries in the database still) if table_name == 'key_pairs' and column.name == 'name': # nullable should be False column_copy = sa.Column('name', sa.String(length=255)) # NOTE(stephenfin): By default, 'sqlalchemy.Enum' will issue a # 'CREATE TYPE' command on PostgreSQL, even if the type already # exists. We work around this by using the PostgreSQL-specific # 'sqlalchemy.dialects.postgresql.ENUM' type and setting # 'create_type' to 'False'. See [1] for more information. # # [1] https://stackoverflow.com/a/28894354/613428 if connection.engine.name == 'postgresql': if table_name == 'key_pairs' and column.name == 'type': enum = dialects.postgresql.ENUM( 'ssh', 'x509', name='keypair_types', create_type=False) column_copy = sa.Column( column.name, enum, nullable=False, server_default=keypair.KEYPAIR_TYPE_SSH) elif ( table_name == 'migrations' and column.name == 'migration_type' ): enum = dialects.postgresql.ENUM( 'migration', 'resize', 'live-migration', 'evacuation', name='migration_type', create_type=False) column_copy = sa.Column(column.name, enum, nullable=True) if column_copy is None: # NOTE(stephenfin): Yes, this is private. Yes, this is what we # were told to use. Blame zzzeek! column_copy = column._copy() columns.append(column_copy) op.create_table( 'shadow_' + table_name, meta, *columns, mysql_engine='InnoDB', ) # 252_add_instance_extra_table; we don't create indexes for shadow tables # in general and these should be removed # (fixed in migration 16f1fbcab42b) op.create_index( 'shadow_instance_extra_idx', 'shadow_instance_extra', ['instance_uuid']) # 373_migration_uuid; we shouldn't create indexes for shadow tables # (fixed in migration 16f1fbcab42b) op.create_index( 'shadow_migrations_uuid', 'shadow_migrations', ['uuid'], unique=True) def upgrade(): bind = op.get_bind() op.create_table( 'instances', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('internal_id', sa.Integer), sa.Column('user_id', sa.String(length=255)), sa.Column('project_id', sa.String(length=255)), sa.Column('image_ref', sa.String(length=255)), sa.Column('kernel_id', sa.String(length=255)), sa.Column('ramdisk_id', sa.String(length=255)), sa.Column('launch_index', sa.Integer), sa.Column('key_name', sa.String(length=255)), sa.Column('key_data', types.MediumText()), sa.Column('power_state', sa.Integer), sa.Column('vm_state', sa.String(length=255)), sa.Column('memory_mb', sa.Integer), sa.Column('vcpus', sa.Integer), sa.Column('hostname', sa.String(length=255)), sa.Column('host', sa.String(length=255)), sa.Column('user_data', types.MediumText()), sa.Column('reservation_id', sa.String(length=255)), sa.Column('launched_at', sa.DateTime), sa.Column('terminated_at', sa.DateTime), sa.Column('display_name', sa.String(length=255)), sa.Column('display_description', sa.String(length=255)), sa.Column('availability_zone', sa.String(length=255)), sa.Column('locked', sa.Boolean), sa.Column('os_type', sa.String(length=255)), sa.Column('launched_on', types.MediumText()), sa.Column('instance_type_id', sa.Integer), sa.Column('vm_mode', sa.String(length=255)), sa.Column('uuid', sa.String(length=36), nullable=False), sa.Column('architecture', sa.String(length=255)), sa.Column('root_device_name', sa.String(length=255)), sa.Column('access_ip_v4', InetSmall()), sa.Column('access_ip_v6', InetSmall()), sa.Column('config_drive', sa.String(length=255)), sa.Column('task_state', sa.String(length=255)), sa.Column('default_ephemeral_device', sa.String(length=255)), sa.Column('default_swap_device', sa.String(length=255)), sa.Column('progress', sa.Integer), sa.Column('auto_disk_config', sa.Boolean), sa.Column('shutdown_terminate', sa.Boolean), sa.Column('disable_terminate', sa.Boolean), sa.Column('root_gb', sa.Integer), sa.Column('ephemeral_gb', sa.Integer), sa.Column('cell_name', sa.String(length=255)), sa.Column('node', sa.String(length=255)), sa.Column('deleted', sa.Integer), sa.Column( 'locked_by', sa.Enum('owner', 'admin', name='instances0locked_by')), sa.Column('cleaned', sa.Integer, default=0), sa.Column('ephemeral_key_uuid', sa.String(36)), # NOTE(danms): This column originally included default=False. We # discovered in bug #1862205 that this will attempt to rewrite # the entire instances table with that value, which can time out # for large data sets (and does not even abort). # NOTE(stephenfin): This was originally added by sqlalchemy-migrate # which did not generate the constraints sa.Column('hidden', sa.Boolean(create_constraint=False)), sa.Index('uuid', 'uuid', unique=True), sa.Index('instances_reservation_id_idx', 'reservation_id'), sa.Index( 'instances_terminated_at_launched_at_idx', 'terminated_at', 'launched_at'), sa.Index( 'instances_task_state_updated_at_idx', 'task_state', 'updated_at'), sa.Index('instances_uuid_deleted_idx', 'uuid', 'deleted'), sa.Index('instances_host_node_deleted_idx', 'host', 'node', 'deleted'), sa.Index( 'instances_host_deleted_cleaned_idx', 'host', 'deleted', 'cleaned'), sa.Index('instances_project_id_deleted_idx', 'project_id', 'deleted'), sa.Index('instances_deleted_created_at_idx', 'deleted', 'created_at'), sa.Index('instances_project_id_idx', 'project_id'), sa.Index( 'instances_updated_at_project_id_idx', 'updated_at', 'project_id'), sa.UniqueConstraint('uuid', name='uniq_instances0uuid'), mysql_engine='InnoDB', mysql_charset='utf8' ) op.create_table( 'agent_builds', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('hypervisor', sa.String(length=255)), sa.Column('os', sa.String(length=255)), sa.Column('architecture', sa.String(length=255)), sa.Column('version', sa.String(length=255)), sa.Column('url', sa.String(length=255)), sa.Column('md5hash', sa.String(length=255)), sa.Column('deleted', sa.Integer), sa.Index( 'agent_builds_hypervisor_os_arch_idx', 'hypervisor', 'os', 'architecture'), sa.UniqueConstraint( 'hypervisor', 'os', 'architecture', 'deleted', name='uniq_agent_builds0hypervisor0os0architecture0deleted'), mysql_engine='InnoDB', mysql_charset='utf8' ) op.create_table( 'aggregates', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('name', sa.String(length=255)), sa.Column('deleted', sa.Integer), sa.Column('uuid', sa.String(36)), sa.Index('aggregate_uuid_idx', 'uuid'), mysql_engine='InnoDB', mysql_charset='utf8' ) op.create_table( 'aggregate_hosts', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('host', sa.String(length=255)), sa.Column( 'aggregate_id', sa.Integer, sa.ForeignKey('aggregates.id'), nullable=False), sa.Column('deleted', sa.Integer), sa.UniqueConstraint( 'host', 'aggregate_id', 'deleted', name='uniq_aggregate_hosts0host0aggregate_id0deleted'), mysql_engine='InnoDB', mysql_charset='utf8' ) op.create_table( 'aggregate_metadata', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column( 'aggregate_id', sa.Integer, sa.ForeignKey('aggregates.id'), nullable=False), sa.Column('key', sa.String(length=255), nullable=False), sa.Column('value', sa.String(length=255), nullable=False), sa.Column('deleted', sa.Integer), sa.Index('aggregate_metadata_key_idx', 'key'), sa.Index('aggregate_metadata_value_idx', 'value'), sa.UniqueConstraint( 'aggregate_id', 'key', 'deleted', name='uniq_aggregate_metadata0aggregate_id0key0deleted'), mysql_engine='InnoDB', mysql_charset='utf8' ) op.create_table( 'allocations', sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('resource_provider_id', sa.Integer, nullable=False), sa.Column('consumer_id', sa.String(36), nullable=False), sa.Column('resource_class_id', sa.Integer, nullable=False), sa.Column('used', sa.Integer, nullable=False), sa.Index( 'allocations_resource_provider_class_used_idx', 'resource_provider_id', 'resource_class_id', 'used'), sa.Index('allocations_consumer_id_idx', 'consumer_id'), sa.Index('allocations_resource_class_id_idx', 'resource_class_id'), mysql_engine='InnoDB', mysql_charset='latin1', ) op.create_table( 'block_device_mapping', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('device_name', sa.String(length=255), nullable=True), sa.Column('delete_on_termination', sa.Boolean), sa.Column('snapshot_id', sa.String(length=36), nullable=True), sa.Column('volume_id', sa.String(length=36), nullable=True), sa.Column('volume_size', sa.Integer), sa.Column('no_device', sa.Boolean), sa.Column('connection_info', types.MediumText()), sa.Column( 'instance_uuid', sa.String(length=36), sa.ForeignKey( 'instances.uuid', name='block_device_mapping_instance_uuid_fkey')), sa.Column('deleted', sa.Integer), sa.Column('source_type', sa.String(length=255), nullable=True), sa.Column('destination_type', sa.String(length=255), nullable=True), sa.Column('guest_format', sa.String(length=255), nullable=True), sa.Column('device_type', sa.String(length=255), nullable=True), sa.Column('disk_bus', sa.String(length=255), nullable=True), sa.Column('boot_index', sa.Integer), sa.Column('image_id', sa.String(length=36), nullable=True), sa.Column('tag', sa.String(255)), sa.Column('attachment_id', sa.String(36), nullable=True), sa.Column('uuid', sa.String(36), nullable=True), sa.Column('volume_type', sa.String(255), nullable=True), sa.Index('snapshot_id', 'snapshot_id'), sa.Index('volume_id', 'volume_id'), sa.Index('block_device_mapping_instance_uuid_idx', 'instance_uuid'), sa.Index( 'block_device_mapping_instance_uuid_device_name_idx', 'instance_uuid', 'device_name'), sa.Index( 'block_device_mapping_instance_uuid_volume_id_idx', 'instance_uuid', 'volume_id'), sa.UniqueConstraint('uuid', name='uniq_block_device_mapping0uuid'), mysql_engine='InnoDB', mysql_charset='utf8' ) op.create_table( 'bw_usage_cache', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('start_period', sa.DateTime, nullable=False), sa.Column('last_refreshed', sa.DateTime), sa.Column('bw_in', sa.BigInteger), sa.Column('bw_out', sa.BigInteger), sa.Column('mac', sa.String(length=255)), sa.Column('uuid', sa.String(length=36)), sa.Column('last_ctr_in', sa.BigInteger()), sa.Column('last_ctr_out', sa.BigInteger()), sa.Column('deleted', sa.Integer), sa.Index( 'bw_usage_cache_uuid_start_period_idx', 'uuid', 'start_period'), mysql_engine='InnoDB', mysql_charset='utf8' ) op.create_table( 'cells', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('api_url', sa.String(length=255)), sa.Column('weight_offset', sa.Float), sa.Column('weight_scale', sa.Float), sa.Column('name', sa.String(length=255)), sa.Column('is_parent', sa.Boolean), sa.Column('deleted', sa.Integer), sa.Column('transport_url', sa.String(length=255), nullable=False), sa.UniqueConstraint( 'name', 'deleted', name='uniq_cells0name0deleted'), mysql_engine='InnoDB', mysql_charset='utf8' ) op.create_table( 'certificates', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('user_id', sa.String(length=255)), sa.Column('project_id', sa.String(length=255)), sa.Column('file_name', sa.String(length=255)), sa.Column('deleted', sa.Integer), sa.Index( 'certificates_project_id_deleted_idx', 'project_id', 'deleted'), sa.Index('certificates_user_id_deleted_idx', 'user_id', 'deleted'), mysql_engine='InnoDB', mysql_charset='utf8' ) op.create_table( 'compute_nodes', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('service_id', sa.Integer, nullable=True), sa.Column('vcpus', sa.Integer, nullable=False), sa.Column('memory_mb', sa.Integer, nullable=False), sa.Column('local_gb', sa.Integer, nullable=False), sa.Column('vcpus_used', sa.Integer, nullable=False), sa.Column('memory_mb_used', sa.Integer, nullable=False), sa.Column('local_gb_used', sa.Integer, nullable=False), sa.Column('hypervisor_type', types.MediumText(), nullable=False), sa.Column('hypervisor_version', sa.Integer, nullable=False), sa.Column('cpu_info', types.MediumText(), nullable=False), sa.Column('disk_available_least', sa.Integer), sa.Column('free_ram_mb', sa.Integer), sa.Column('free_disk_gb', sa.Integer), sa.Column('current_workload', sa.Integer), sa.Column('running_vms', sa.Integer), sa.Column('hypervisor_hostname', sa.String(length=255)), sa.Column('deleted', sa.Integer), sa.Column('host_ip', InetSmall()), sa.Column('supported_instances', sa.Text), sa.Column('pci_stats', sa.Text, nullable=True), sa.Column('metrics', sa.Text, nullable=True), sa.Column('extra_resources', sa.Text, nullable=True), sa.Column('stats', sa.Text, default='{}'), sa.Column('numa_topology', sa.Text, nullable=True), sa.Column('host', sa.String(255), nullable=True), sa.Column('ram_allocation_ratio', sa.Float, nullable=True), sa.Column('cpu_allocation_ratio', sa.Float, nullable=True), sa.Column('uuid', sa.String(36), nullable=True), sa.Column('disk_allocation_ratio', sa.Float, nullable=True), sa.Column('mapped', sa.Integer, default=0, nullable=True), sa.Index('compute_nodes_uuid_idx', 'uuid', unique=True), sa.UniqueConstraint( 'host', 'hypervisor_hostname', 'deleted', name='uniq_compute_nodes0host0hypervisor_hostname0deleted', ), mysql_engine='InnoDB', mysql_charset='utf8' ) op.create_table( 'console_auth_tokens', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('token_hash', sa.String(255), nullable=False), sa.Column('console_type', sa.String(255), nullable=False), sa.Column('host', sa.String(255), nullable=False), sa.Column('port', sa.Integer, nullable=False), sa.Column('internal_access_path', sa.String(255)), sa.Column('instance_uuid', sa.String(36), nullable=False), sa.Column('expires', sa.Integer, nullable=False), sa.Column('access_url_base', sa.String(255), nullable=True), sa.Index('console_auth_tokens_instance_uuid_idx', 'instance_uuid'), sa.Index('console_auth_tokens_host_expires_idx', 'host', 'expires'), sa.Index('console_auth_tokens_token_hash_idx', 'token_hash'), sa.Index( 'console_auth_tokens_token_hash_instance_uuid_idx', 'token_hash', 'instance_uuid'), sa.UniqueConstraint( 'token_hash', name='uniq_console_auth_tokens0token_hash'), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'console_pools', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('address', InetSmall()), sa.Column('username', sa.String(length=255)), sa.Column('password', sa.String(length=255)), sa.Column('console_type', sa.String(length=255)), sa.Column('public_hostname', sa.String(length=255)), sa.Column('host', sa.String(length=255)), sa.Column('compute_host', sa.String(length=255)), sa.Column('deleted', sa.Integer), sa.UniqueConstraint( 'host', 'console_type', 'compute_host', 'deleted', name='uniq_console_pools0host0console_type0compute_host0deleted'), mysql_engine='InnoDB', mysql_charset='utf8' ) op.create_table( 'consoles', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('instance_name', sa.String(length=255)), sa.Column('password', sa.String(length=255)), sa.Column('port', sa.Integer), sa.Column('pool_id', sa.Integer, sa.ForeignKey('console_pools.id')), sa.Column( 'instance_uuid', sa.String(length=36), sa.ForeignKey( 'instances.uuid', name='consoles_instance_uuid_fkey')), sa.Column('deleted', sa.Integer), sa.Index('consoles_instance_uuid_idx', 'instance_uuid'), mysql_engine='InnoDB', mysql_charset='utf8' ) op.create_table( 'dns_domains', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('deleted', sa.Boolean), sa.Column( 'domain', sa.String(length=255), primary_key=True, nullable=False), sa.Column('scope', sa.String(length=255)), sa.Column('availability_zone', sa.String(length=255)), sa.Column('project_id', sa.String(length=255)), sa.Index('dns_domains_domain_deleted_idx', 'domain', 'deleted'), sa.Index('dns_domains_project_id_idx', 'project_id'), mysql_engine='InnoDB', mysql_charset='utf8' ) op.create_table( 'fixed_ips', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('address', InetSmall()), sa.Column('network_id', sa.Integer), sa.Column('allocated', sa.Boolean), sa.Column('leased', sa.Boolean), sa.Column('reserved', sa.Boolean), sa.Column('virtual_interface_id', sa.Integer), sa.Column('host', sa.String(length=255)), sa.Column( 'instance_uuid', sa.String(length=36), sa.ForeignKey( 'instances.uuid', name='fixed_ips_instance_uuid_fkey'), ), sa.Column('deleted', sa.Integer), sa.Index('network_id', 'network_id'), sa.Index('address', 'address'), sa.Index('fixed_ips_instance_uuid_fkey', 'instance_uuid'), sa.Index( 'fixed_ips_virtual_interface_id_fkey', 'virtual_interface_id'), sa.Index('fixed_ips_host_idx', 'host'), sa.Index( 'fixed_ips_network_id_host_deleted_idx', 'network_id', 'host', 'deleted'), sa.Index( 'fixed_ips_address_reserved_network_id_deleted_idx', 'address', 'reserved', 'network_id', 'deleted'), sa.Index( 'fixed_ips_deleted_allocated_idx', 'address', 'deleted', 'allocated'), sa.Index( 'fixed_ips_deleted_allocated_updated_at_idx', 'deleted', 'allocated', 'updated_at'), sa.UniqueConstraint( 'address', 'deleted', name='uniq_fixed_ips0address0deleted'), mysql_engine='InnoDB', mysql_charset='utf8' ) op.create_table( 'floating_ips', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('address', InetSmall()), sa.Column('fixed_ip_id', sa.Integer), sa.Column('project_id', sa.String(length=255)), sa.Column('host', sa.String(length=255)), sa.Column('auto_assigned', sa.Boolean), sa.Column('pool', sa.String(length=255)), sa.Column('interface', sa.String(length=255)), sa.Column('deleted', sa.Integer), sa.Index('fixed_ip_id', 'fixed_ip_id'), sa.Index('floating_ips_host_idx', 'host'), sa.Index('floating_ips_project_id_idx', 'project_id'), sa.Index( 'floating_ips_pool_deleted_fixed_ip_id_project_id_idx', 'pool', 'deleted', 'fixed_ip_id', 'project_id'), sa.UniqueConstraint( 'address', 'deleted', name='uniq_floating_ips0address0deleted'), mysql_engine='InnoDB', mysql_charset='utf8' ) op.create_table( 'instance_faults', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column( 'instance_uuid', sa.String(length=36), sa.ForeignKey( 'instances.uuid', name='fk_instance_faults_instance_uuid')), sa.Column('code', sa.Integer, nullable=False), sa.Column('message', sa.String(length=255)), sa.Column('details', types.MediumText()), sa.Column('host', sa.String(length=255)), sa.Column('deleted', sa.Integer), sa.Index('instance_faults_host_idx', 'host'), sa.Index( 'instance_faults_instance_uuid_deleted_created_at_idx', 'instance_uuid', 'deleted', 'created_at'), mysql_engine='InnoDB', mysql_charset='utf8' ) op.create_table( 'instance_id_mappings', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('uuid', sa.String(36), nullable=False), sa.Column('deleted', sa.Integer), sa.Index('ix_instance_id_mappings_uuid', 'uuid'), mysql_engine='InnoDB', mysql_charset='utf8' ) op.create_table( 'instance_info_caches', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('network_info', types.MediumText()), sa.Column( 'instance_uuid', sa.String(length=36), sa.ForeignKey( 'instances.uuid', name='instance_info_caches_instance_uuid_fkey'), nullable=False), sa.Column('deleted', sa.Integer), sa.UniqueConstraint( 'instance_uuid', name='uniq_instance_info_caches0instance_uuid'), mysql_engine='InnoDB', mysql_charset='utf8' ) op.create_table( 'instance_groups', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('deleted', sa.Integer), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('user_id', sa.String(length=255)), sa.Column('project_id', sa.String(length=255)), sa.Column('uuid', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=255)), sa.UniqueConstraint( 'uuid', 'deleted', name='uniq_instance_groups0uuid0deleted'), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'instance_group_policy', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('deleted', sa.Integer), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('policy', sa.String(length=255)), sa.Column( 'group_id', sa.Integer, sa.ForeignKey('instance_groups.id'), nullable=False), sa.Index('instance_group_policy_policy_idx', 'policy'), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'instance_group_member', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('deleted', sa.Integer), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('instance_id', sa.String(length=255)), sa.Column( 'group_id', sa.Integer, sa.ForeignKey('instance_groups.id'), nullable=False), sa.Index( 'instance_group_member_instance_idx', 'instance_id'), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'instance_metadata', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('key', sa.String(length=255)), sa.Column('value', sa.String(length=255)), sa.Column( 'instance_uuid', sa.String(length=36), sa.ForeignKey( 'instances.uuid', name='instance_metadata_instance_uuid_fkey'), nullable=True), sa.Column('deleted', sa.Integer), sa.Index('instance_metadata_instance_uuid_idx', 'instance_uuid'), mysql_engine='InnoDB', mysql_charset='utf8' ) op.create_table( 'instance_system_metadata', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column( 'instance_uuid', sa.String(length=36), sa.ForeignKey( 'instances.uuid', name='instance_system_metadata_ibfk_1'), nullable=False), sa.Column('key', sa.String(length=255), nullable=False), sa.Column('value', sa.String(length=255)), sa.Column('deleted', sa.Integer), sa.Index('instance_uuid', 'instance_uuid'), mysql_engine='InnoDB', mysql_charset='utf8' ) # TODO(stephenfin): Remove this table since it has been moved to the API DB op.create_table( 'instance_types', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('name', sa.String(length=255)), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('memory_mb', sa.Integer, nullable=False), sa.Column('vcpus', sa.Integer, nullable=False), sa.Column('swap', sa.Integer, nullable=False), sa.Column('vcpu_weight', sa.Integer), sa.Column('flavorid', sa.String(length=255)), sa.Column('rxtx_factor', sa.Float), sa.Column('root_gb', sa.Integer), sa.Column('ephemeral_gb', sa.Integer), sa.Column('disabled', sa.Boolean), sa.Column('is_public', sa.Boolean), sa.Column('deleted', sa.Integer), sa.UniqueConstraint( 'name', 'deleted', name='uniq_instance_types0name0deleted'), sa.UniqueConstraint( 'flavorid', 'deleted', name='uniq_instance_types0flavorid0deleted'), mysql_engine='InnoDB', mysql_charset='utf8' ) # TODO(stephenfin): Remove this table since it has been moved to the API DB op.create_table( 'instance_type_extra_specs', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column( 'instance_type_id', sa.Integer, sa.ForeignKey('instance_types.id'), nullable=False), sa.Column('key', sa.String(length=255)), sa.Column('value', sa.String(length=255)), sa.Column('deleted', sa.Integer), sa.Index( 'instance_type_extra_specs_instance_type_id_key_idx', 'instance_type_id', 'key'), sa.UniqueConstraint( 'instance_type_id', 'key', 'deleted', name='uniq_instance_type_extra_specs0instance_type_id0key0deleted' ), mysql_engine='InnoDB', mysql_charset='utf8' ) # TODO(stephenfin): Remove this table since it has been moved to the API DB op.create_table( 'instance_type_projects', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column( 'instance_type_id', sa.Integer, sa.ForeignKey( 'instance_types.id', name='instance_type_projects_ibfk_1'), nullable=False), sa.Column('project_id', sa.String(length=255)), sa.Column('deleted', sa.Integer), sa.UniqueConstraint( 'instance_type_id', 'project_id', 'deleted', name='uniq_instance_type_projects0instance_type_id0project_id' '0deleted'), mysql_engine='InnoDB', mysql_charset='utf8' ) op.create_table( 'instance_actions', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('action', sa.String(length=255)), sa.Column( 'instance_uuid', sa.String(length=36), sa.ForeignKey( 'instances.uuid', name='fk_instance_actions_instance_uuid')), sa.Column('request_id', sa.String(length=255)), sa.Column('user_id', sa.String(length=255)), sa.Column('project_id', sa.String(length=255)), sa.Column('start_time', sa.DateTime), sa.Column('finish_time', sa.DateTime), sa.Column('message', sa.String(length=255)), sa.Column('deleted', sa.Integer), sa.Index('instance_uuid_idx', 'instance_uuid'), sa.Index('request_id_idx', 'request_id'), sa.Index( 'instance_actions_instance_uuid_updated_at_idx', 'instance_uuid', 'updated_at'), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'instance_actions_events', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('event', sa.String(length=255)), sa.Column( 'action_id', sa.Integer, sa.ForeignKey('instance_actions.id')), sa.Column('start_time', sa.DateTime), sa.Column('finish_time', sa.DateTime), sa.Column('result', sa.String(length=255)), sa.Column('traceback', sa.Text), sa.Column('deleted', sa.Integer), sa.Column('host', sa.String(255)), sa.Column('details', sa.Text), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'instance_extra', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('deleted', sa.Integer), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column( 'instance_uuid', sa.String(length=36), sa.ForeignKey( 'instances.uuid', name='instance_extra_instance_uuid_fkey'), nullable=False), sa.Column('numa_topology', sa.Text, nullable=True), sa.Column('pci_requests', sa.Text, nullable=True), sa.Column('flavor', sa.Text, nullable=True), sa.Column('vcpu_model', sa.Text, nullable=True), sa.Column('migration_context', sa.Text, nullable=True), sa.Column('keypairs', sa.Text, nullable=True), sa.Column('device_metadata', sa.Text, nullable=True), sa.Column('trusted_certs', sa.Text, nullable=True), sa.Column('vpmems', sa.Text, nullable=True), sa.Column('resources', sa.Text, nullable=True), sa.Index('instance_extra_idx', 'instance_uuid'), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'inventories', sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('resource_provider_id', sa.Integer, nullable=False), sa.Column('resource_class_id', sa.Integer, nullable=False), sa.Column('total', sa.Integer, nullable=False), sa.Column('reserved', sa.Integer, nullable=False), sa.Column('min_unit', sa.Integer, nullable=False), sa.Column('max_unit', sa.Integer, nullable=False), sa.Column('step_size', sa.Integer, nullable=False), sa.Column('allocation_ratio', sa.Float, nullable=False), sa.Index( 'inventories_resource_provider_id_idx', 'resource_provider_id'), sa.Index( 'inventories_resource_class_id_idx', 'resource_class_id'), sa.Index( 'inventories_resource_provider_resource_class_idx', 'resource_provider_id', 'resource_class_id'), sa.UniqueConstraint( 'resource_provider_id', 'resource_class_id', name='uniq_inventories0resource_provider_resource_class'), mysql_engine='InnoDB', mysql_charset='latin1', ) op.create_table( 'key_pairs', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('name', sa.String(length=255), nullable=False), sa.Column('user_id', sa.String(length=255)), sa.Column('fingerprint', sa.String(length=255)), sa.Column('public_key', types.MediumText()), sa.Column('deleted', sa.Integer), sa.Column( 'type', sa.Enum('ssh', 'x509', name='keypair_types'), nullable=False, server_default=keypair.KEYPAIR_TYPE_SSH), sa.UniqueConstraint( 'user_id', 'name', 'deleted', name='uniq_key_pairs0user_id0name0deleted'), mysql_engine='InnoDB', mysql_charset='utf8' ) op.create_table( 'migrations', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('source_compute', sa.String(length=255)), sa.Column('dest_compute', sa.String(length=255)), sa.Column('dest_host', sa.String(length=255)), sa.Column('status', sa.String(length=255)), sa.Column( 'instance_uuid', sa.String(length=36), sa.ForeignKey( 'instances.uuid', name='fk_migrations_instance_uuid')), sa.Column('old_instance_type_id', sa.Integer), sa.Column('new_instance_type_id', sa.Integer), sa.Column('source_node', sa.String(length=255)), sa.Column('dest_node', sa.String(length=255)), sa.Column('deleted', sa.Integer), sa.Column( 'migration_type', sa.Enum( 'migration', 'resize', 'live-migration', 'evacuation', name='migration_type'), nullable=True), # NOTE(stephenfin): This was originally added by sqlalchemy-migrate # which did not generate the constraints sa.Column( 'hidden', sa.Boolean(create_constraint=False), default=False), sa.Column('memory_total', sa.BigInteger, nullable=True), sa.Column('memory_processed', sa.BigInteger, nullable=True), sa.Column('memory_remaining', sa.BigInteger, nullable=True), sa.Column('disk_total', sa.BigInteger, nullable=True), sa.Column('disk_processed', sa.BigInteger, nullable=True), sa.Column('disk_remaining', sa.BigInteger, nullable=True), sa.Column('uuid', sa.String(36)), # NOTE(stephenfin): This was originally added by sqlalchemy-migrate # which did not generate the constraints sa.Column( 'cross_cell_move', sa.Boolean(create_constraint=False), default=False), sa.Column('user_id', sa.String(255), nullable=True), sa.Column('project_id', sa.String(255), nullable=True), sa.Index('migrations_uuid', 'uuid', unique=True), sa.Index( 'migrations_instance_uuid_and_status_idx', 'deleted', 'instance_uuid', 'status'), sa.Index('migrations_updated_at_idx', 'updated_at'), # mysql-specific index by leftmost 100 chars. (mysql gets angry if the # index key length is too long.) sa.Index( 'migrations_by_host_nodes_and_status_idx', 'deleted', 'source_compute', 'dest_compute', 'source_node', 'dest_node', 'status', mysql_length={ 'source_compute': 100, 'dest_compute': 100, 'source_node': 100, 'dest_node': 100, }), mysql_engine='InnoDB', mysql_charset='utf8' ) op.create_table( 'networks', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('injected', sa.Boolean), sa.Column('cidr', Inet()), sa.Column('netmask', InetSmall()), sa.Column('bridge', sa.String(length=255)), sa.Column('gateway', InetSmall()), sa.Column('broadcast', InetSmall()), sa.Column('dns1', InetSmall()), sa.Column('vlan', sa.Integer), sa.Column('vpn_public_address', InetSmall()), sa.Column('vpn_public_port', sa.Integer), sa.Column('vpn_private_address', InetSmall()), sa.Column('dhcp_start', InetSmall()), sa.Column('project_id', sa.String(length=255)), sa.Column('host', sa.String(length=255)), sa.Column('cidr_v6', Inet()), sa.Column('gateway_v6', InetSmall()), sa.Column('label', sa.String(length=255)), sa.Column('netmask_v6', InetSmall()), sa.Column('bridge_interface', sa.String(length=255)), sa.Column('multi_host', sa.Boolean), sa.Column('dns2', InetSmall()), sa.Column('uuid', sa.String(length=36)), sa.Column('priority', sa.Integer), sa.Column('rxtx_base', sa.Integer), sa.Column('deleted', sa.Integer), sa.Column('mtu', sa.Integer), sa.Column('dhcp_server', types.IPAddress), # NOTE(stephenfin): These were originally added by sqlalchemy-migrate # which did not generate the constraints sa.Column( 'enable_dhcp', sa.Boolean(create_constraint=False), default=True), sa.Column( 'share_address', sa.Boolean(create_constraint=False), default=False), sa.Index('networks_host_idx', 'host'), sa.Index('networks_cidr_v6_idx', 'cidr_v6'), sa.Index('networks_bridge_deleted_idx', 'bridge', 'deleted'), sa.Index('networks_project_id_deleted_idx', 'project_id', 'deleted'), sa.Index( 'networks_uuid_project_id_deleted_idx', 'uuid', 'project_id', 'deleted'), sa.Index('networks_vlan_deleted_idx', 'vlan', 'deleted'), sa.UniqueConstraint( 'vlan', 'deleted', name='uniq_networks0vlan0deleted'), mysql_engine='InnoDB', mysql_charset='utf8' ) op.create_table( 'pci_devices', sa.Column('created_at', sa.DateTime(timezone=False)), sa.Column('updated_at', sa.DateTime(timezone=False)), sa.Column('deleted_at', sa.DateTime(timezone=False)), sa.Column('deleted', sa.Integer, default=0, nullable=True), sa.Column('id', sa.Integer, primary_key=True), sa.Column( 'compute_node_id', sa.Integer, sa.ForeignKey( 'compute_nodes.id', name='pci_devices_compute_node_id_fkey'), nullable=False), sa.Column('address', sa.String(12), nullable=False), sa.Column('product_id', sa.String(4), nullable=False), sa.Column('vendor_id', sa.String(4), nullable=False), sa.Column('dev_type', sa.String(8), nullable=False), sa.Column('dev_id', sa.String(255)), sa.Column('label', sa.String(255), nullable=False), sa.Column('status', sa.String(36), nullable=False), sa.Column('extra_info', sa.Text, nullable=True), sa.Column('instance_uuid', sa.String(36), nullable=True), sa.Column('request_id', sa.String(36), nullable=True), sa.Column('numa_node', sa.Integer, default=None), sa.Column('parent_addr', sa.String(12), nullable=True), sa.Column('uuid', sa.String(36)), sa.Index( 'ix_pci_devices_instance_uuid_deleted', 'instance_uuid', 'deleted'), sa.Index( 'ix_pci_devices_compute_node_id_deleted', 'compute_node_id', 'deleted'), sa.Index( 'ix_pci_devices_compute_node_id_parent_addr_deleted', 'compute_node_id', 'parent_addr', 'deleted'), sa.UniqueConstraint( 'compute_node_id', 'address', 'deleted', name='uniq_pci_devices0compute_node_id0address0deleted'), mysql_engine='InnoDB', mysql_charset='utf8') op.create_table( 'provider_fw_rules', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('protocol', sa.String(length=5)), sa.Column('from_port', sa.Integer), sa.Column('to_port', sa.Integer), sa.Column('cidr', Inet()), sa.Column('deleted', sa.Integer), mysql_engine='InnoDB', mysql_charset='utf8' ) op.create_table( 'quota_classes', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('class_name', sa.String(length=255)), sa.Column('resource', sa.String(length=255)), sa.Column('hard_limit', sa.Integer), sa.Column('deleted', sa.Integer), sa.Index('ix_quota_classes_class_name', 'class_name'), mysql_engine='InnoDB', mysql_charset='utf8' ) op.create_table( 'quota_usages', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('project_id', sa.String(length=255)), sa.Column('resource', sa.String(length=255), nullable=False), sa.Column('in_use', sa.Integer, nullable=False), sa.Column('reserved', sa.Integer, nullable=False), sa.Column('until_refresh', sa.Integer), sa.Column('deleted', sa.Integer), sa.Column('user_id', sa.String(length=255)), sa.Index('ix_quota_usages_project_id', 'project_id'), sa.Index('ix_quota_usages_user_id_deleted', 'user_id', 'deleted'), mysql_engine='InnoDB', mysql_charset='utf8' ) op.create_table( 'quotas', sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('project_id', sa.String(length=255)), sa.Column('resource', sa.String(length=255), nullable=False), sa.Column('hard_limit', sa.Integer), sa.Column('deleted', sa.Integer), sa.UniqueConstraint( 'project_id', 'resource', 'deleted', name='uniq_quotas0project_id0resource0deleted'), mysql_engine='InnoDB', mysql_charset='utf8' ) op.create_table( 'project_user_quotas', sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('deleted', sa.Integer), sa.Column('user_id', sa.String(length=255), nullable=False), sa.Column('project_id', sa.String(length=255), nullable=False), sa.Column('resource', sa.String(length=255), nullable=False), sa.Column('hard_limit', sa.Integer, nullable=True), sa.Index( 'project_user_quotas_project_id_deleted_idx', 'project_id', 'deleted'), sa.Index( 'project_user_quotas_user_id_deleted_idx', 'user_id', 'deleted'), sa.UniqueConstraint( 'user_id', 'project_id', 'resource', 'deleted', name='uniq_project_user_quotas0user_id0project_id0resource0' 'deleted'), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'reservations', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('uuid', sa.String(length=36), nullable=False), sa.Column( 'usage_id', sa.Integer, sa.ForeignKey('quota_usages.id', name='reservations_ibfk_1'), nullable=False), sa.Column('project_id', sa.String(length=255)), sa.Column('resource', sa.String(length=255)), sa.Column('delta', sa.Integer, nullable=False), sa.Column('expire', sa.DateTime), sa.Column('deleted', sa.Integer), sa.Column('user_id', sa.String(length=255)), sa.Index('ix_reservations_project_id', 'project_id'), sa.Index('ix_reservations_user_id_deleted', 'user_id', 'deleted'), sa.Index('reservations_uuid_idx', 'uuid'), sa.Index('reservations_deleted_expire_idx', 'deleted', 'expire'), mysql_engine='InnoDB', mysql_charset='utf8' ) op.create_table( 'resource_providers', sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('uuid', sa.String(36), nullable=False), sa.Column('name', sa.Unicode(200), nullable=True), sa.Column('generation', sa.Integer, default=0), sa.Column('can_host', sa.Integer, default=0), sa.UniqueConstraint('uuid', name='uniq_resource_providers0uuid'), sa.UniqueConstraint('name', name='uniq_resource_providers0name'), sa.Index('resource_providers_name_idx', 'name'), sa.Index('resource_providers_uuid_idx', 'uuid'), mysql_engine='InnoDB', mysql_charset='latin1', ) op.create_table( 'resource_provider_aggregates', sa.Column( 'resource_provider_id', sa.Integer, primary_key=True, nullable=False), sa.Column( 'aggregate_id', sa.Integer, primary_key=True, nullable=False), sa.Index( 'resource_provider_aggregates_aggregate_id_idx', 'aggregate_id'), mysql_engine='InnoDB', mysql_charset='latin1', ) op.create_table( 's3_images', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('uuid', sa.String(length=36), nullable=False), sa.Column('deleted', sa.Integer), mysql_engine='InnoDB', mysql_charset='utf8' ) op.create_table( 'security_groups', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('name', sa.String(length=255)), sa.Column('description', sa.String(length=255)), sa.Column('user_id', sa.String(length=255)), sa.Column('project_id', sa.String(length=255)), sa.Column('deleted', sa.Integer), sa.UniqueConstraint( 'project_id', 'name', 'deleted', name='uniq_security_groups0project_id0name0deleted'), mysql_engine='InnoDB', mysql_charset='utf8' ) op.create_table( 'security_group_instance_association', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column( 'security_group_id', sa.Integer, sa.ForeignKey( 'security_groups.id', name='security_group_instance_association_ibfk_1'), ), sa.Column( 'instance_uuid', sa.String(length=36), sa.ForeignKey( 'instances.uuid', name='security_group_instance_association_instance_uuid_fkey'), ), sa.Column('deleted', sa.Integer), sa.Index( 'security_group_instance_association_instance_uuid_idx', 'instance_uuid'), mysql_engine='InnoDB', mysql_charset='utf8' ) op.create_table( 'security_group_rules', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column( 'parent_group_id', sa.Integer, sa.ForeignKey('security_groups.id')), sa.Column('protocol', sa.String(length=255)), sa.Column('from_port', sa.Integer), sa.Column('to_port', sa.Integer), sa.Column('cidr', Inet()), sa.Column('group_id', sa.Integer, sa.ForeignKey('security_groups.id')), sa.Column('deleted', sa.Integer), mysql_engine='InnoDB', mysql_charset='utf8' ) op.create_table( 'security_group_default_rules', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('deleted', sa.Integer, default=0), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('protocol', sa.String(length=5)), sa.Column('from_port', sa.Integer), sa.Column('to_port', sa.Integer), sa.Column('cidr', Inet()), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'services', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('host', sa.String(length=255)), sa.Column('binary', sa.String(length=255)), sa.Column('topic', sa.String(length=255)), sa.Column('report_count', sa.Integer, nullable=False), sa.Column('disabled', sa.Boolean), sa.Column('deleted', sa.Integer), sa.Column('disabled_reason', sa.String(length=255)), sa.Column('last_seen_up', sa.DateTime, nullable=True), # NOTE(stephenfin): This was originally added by sqlalchemy-migrate # which did not generate the constraints sa.Column( 'forced_down', sa.Boolean(create_constraint=False), default=False), sa.Column('version', sa.Integer, default=0), sa.Column('uuid', sa.String(36), nullable=True), sa.Index('services_uuid_idx', 'uuid', unique=True), sa.UniqueConstraint( 'host', 'topic', 'deleted', name='uniq_services0host0topic0deleted'), sa.UniqueConstraint( 'host', 'binary', 'deleted', name='uniq_services0host0binary0deleted'), mysql_engine='InnoDB', mysql_charset='utf8' ) op.create_table( 'snapshot_id_mappings', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('uuid', sa.String(length=36), nullable=False), sa.Column('deleted', sa.Integer), mysql_engine='InnoDB', mysql_charset='utf8' ) op.create_table( 'snapshots', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column( 'id', sa.String(length=36), primary_key=True, nullable=False), sa.Column('volume_id', sa.String(length=36), nullable=False), sa.Column('user_id', sa.String(length=255)), sa.Column('project_id', sa.String(length=255)), sa.Column('status', sa.String(length=255)), sa.Column('progress', sa.String(length=255)), sa.Column('volume_size', sa.Integer), sa.Column('scheduled_at', sa.DateTime), sa.Column('display_name', sa.String(length=255)), sa.Column('display_description', sa.String(length=255)), sa.Column('deleted', sa.String(length=36)), mysql_engine='InnoDB', mysql_charset='utf8' ) op.create_table( 'tags', sa.Column( 'resource_id', sa.String(36), primary_key=True, nullable=False), sa.Column('tag', sa.Unicode(80), primary_key=True, nullable=False), sa.Index('tags_tag_idx', 'tag'), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'task_log', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('task_name', sa.String(length=255), nullable=False), sa.Column('state', sa.String(length=255), nullable=False), sa.Column('host', sa.String(length=255), nullable=False), sa.Column('period_beginning', sa.DateTime, nullable=False), sa.Column('period_ending', sa.DateTime, nullable=False), sa.Column('message', sa.String(length=255), nullable=False), sa.Column('task_items', sa.Integer), sa.Column('errors', sa.Integer), sa.Column('deleted', sa.Integer), sa.Index('ix_task_log_period_beginning', 'period_beginning'), sa.Index('ix_task_log_host', 'host'), sa.Index('ix_task_log_period_ending', 'period_ending'), sa.UniqueConstraint( 'task_name', 'host', 'period_beginning', 'period_ending', name='uniq_task_log0task_name0host0period_beginning0period_ending', ), mysql_engine='InnoDB', mysql_charset='utf8' ) op.create_table( 'virtual_interfaces', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('address', sa.String(length=255)), sa.Column('network_id', sa.Integer), sa.Column('uuid', sa.String(length=36)), sa.Column( 'instance_uuid', sa.String(length=36), sa.ForeignKey( 'instances.uuid', name='virtual_interfaces_instance_uuid_fkey'), nullable=True), sa.Column('deleted', sa.Integer), sa.Column('tag', sa.String(255)), sa.Index('virtual_interfaces_instance_uuid_fkey', 'instance_uuid'), sa.Index('virtual_interfaces_network_id_idx', 'network_id'), sa.Index('virtual_interfaces_uuid_idx', 'uuid'), sa.UniqueConstraint( 'address', 'deleted', name='uniq_virtual_interfaces0address0deleted'), mysql_engine='InnoDB', mysql_charset='utf8' ) op.create_table( 'volume_id_mappings', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('uuid', sa.String(length=36), nullable=False), sa.Column('deleted', sa.Integer), mysql_engine='InnoDB', mysql_charset='utf8' ) op.create_table( 'volume_usage_cache', sa.Column('created_at', sa.DateTime(timezone=False)), sa.Column('updated_at', sa.DateTime(timezone=False)), sa.Column('deleted_at', sa.DateTime(timezone=False)), sa.Column('id', sa.Integer(), primary_key=True, nullable=False), sa.Column('volume_id', sa.String(36), nullable=False), sa.Column('tot_last_refreshed', sa.DateTime(timezone=False)), sa.Column('tot_reads', sa.BigInteger(), default=0), sa.Column('tot_read_bytes', sa.BigInteger(), default=0), sa.Column('tot_writes', sa.BigInteger(), default=0), sa.Column('tot_write_bytes', sa.BigInteger(), default=0), sa.Column('curr_last_refreshed', sa.DateTime(timezone=False)), sa.Column('curr_reads', sa.BigInteger(), default=0), sa.Column('curr_read_bytes', sa.BigInteger(), default=0), sa.Column('curr_writes', sa.BigInteger(), default=0), sa.Column('curr_write_bytes', sa.BigInteger(), default=0), sa.Column('deleted', sa.Integer), sa.Column('instance_uuid', sa.String(length=36)), sa.Column('project_id', sa.String(length=36)), sa.Column('user_id', sa.String(length=64)), sa.Column('availability_zone', sa.String(length=255)), mysql_engine='InnoDB', mysql_charset='utf8' ) # MySQL specific indexes if bind.engine.name == 'mysql': # NOTE(stephenfin): For some reason, we have to put this within the if # statement to avoid it being evaluated for the sqlite case. Even # though we don't call create except in the MySQL case... Failure to do # this will result in the following ugly error message: # # sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) no such # index: instance_type_id # # Yeah, I don't get it either... op.create_index( 'instance_type_id', 'instance_type_projects', ['instance_type_id'], ), op.create_index('usage_id', 'reservations', ['usage_id']), op.create_index( 'security_group_id', 'security_group_instance_association', ['security_group_id'], ), if bind.engine.name == 'mysql': # Set default DB charset to UTF8. op.execute( 'ALTER DATABASE `%s` DEFAULT CHARACTER SET utf8' % bind.engine.url.database) # NOTE(cdent): The resource_providers table is defined as latin1 to be # more efficient. Now we need the name column to be UTF8. We modify it # here otherwise the declarative handling in sqlalchemy gets confused. op.execute( 'ALTER TABLE resource_providers MODIFY name ' 'VARCHAR(200) CHARACTER SET utf8') _create_shadow_tables(bind) # NOTE(stephenfin): There were some bugs in the squashed sqlalchemy-migrate # migrations which we need to account for here # 298_mysql_extra_specs_binary_collation; we should update the shadow table # also # (fixed in migration 16f1fbcab42b) if bind.engine.name == 'mysql': # Use binary collation for extra specs table op.execute( 'ALTER TABLE instance_type_extra_specs ' 'CONVERT TO CHARACTER SET utf8 ' 'COLLATE utf8_bin' ) def downgrade(): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/db/main/migrations/versions/960aac0e09ea_de_duplicate_indexes_in_instances__.py0000664000175000017500000000214700000000000032603 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """de-duplicate_indexes_in_instances__console_auth_tokens Revision ID: 960aac0e09ea Revises: ccb0fa1a2252 Create Date: 2022-09-15 17:00:23.175991 """ from alembic import op # revision identifiers, used by Alembic. revision = '960aac0e09ea' down_revision = 'ccb0fa1a2252' branch_labels = None depends_on = None def upgrade(): with op.batch_alter_table('console_auth_tokens', schema=None) as batch_op: batch_op.drop_index('console_auth_tokens_token_hash_idx') with op.batch_alter_table('instances', schema=None) as batch_op: batch_op.drop_index('uuid') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/db/main/migrations/versions/ccb0fa1a2252_add_encryption_fields_to_.py0000664000175000017500000000345700000000000030571 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add encryption fields to BlockDeviceMapping Revision ID: ccb0fa1a2252 Revises: 16f1fbcab42b Create Date: 2022-01-12 15:22:47.524285 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'ccb0fa1a2252' down_revision = '16f1fbcab42b' branch_labels = None depends_on = None def upgrade(): for prefix in ('', 'shadow_'): table_name = prefix + 'block_device_mapping' with op.batch_alter_table(table_name, schema=None) as batch_op: batch_op.add_column( sa.Column( 'encrypted', sa.Boolean(), nullable=True, ) ) batch_op.add_column( sa.Column( 'encryption_secret_uuid', sa.String(length=36), nullable=True, ) ) batch_op.add_column( sa.Column('encryption_format', sa.String(length=128), nullable=True, ) ) batch_op.add_column( sa.Column('encryption_options', sa.String(length=4096), nullable=True, ) ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/db/main/migrations/versions/d60bddf7a903_add_constraint_instance_share_avoid_.py0000664000175000017500000000437300000000000033003 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add_constraint_instance_share_avoid_duplicates Revision ID: d60bddf7a903 Revises: 13863f4e1612 Create Date: 2024-03-06 17:05:29.361678 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'd60bddf7a903' down_revision = '13863f4e1612' branch_labels = None depends_on = None def upgrade(): op.drop_table("share_mapping") op.create_table( "share_mapping", sa.Column("created_at", sa.DateTime), sa.Column("updated_at", sa.DateTime), sa.Column( "id", sa.BigInteger().with_variant(sa.Integer, "sqlite"), primary_key=True, autoincrement=True, nullable=False, ), sa.Column("uuid", sa.String(36)), sa.Column( "instance_uuid", sa.String(length=36), sa.ForeignKey( "instances.uuid", name="share_mapping_instance_uuid_fkey" ), ), sa.Column("share_id", sa.String(length=36)), sa.Column("status", sa.String(length=32)), sa.Column("tag", sa.String(48)), sa.Column("export_location", sa.Text), sa.Column("share_proto", sa.String(32)), sa.UniqueConstraint( "instance_uuid", "share_id", name="uniq_key_pairs0instance_uuid0share_id", ), sa.UniqueConstraint( "instance_uuid", "tag", name="uniq_key_pairs0instance_uuid0tag", ), sa.Index("share_idx", "share_id"), sa.Index( "share_mapping_instance_uuid_share_id_idx", "instance_uuid", "share_id", ), mysql_engine="InnoDB", mysql_charset="utf8", ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/db/main/models.py0000664000175000017500000013617500000000000016710 0ustar00zuulzuul00000000000000# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Piston Cloud Computing, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ SQLAlchemy models for nova data. """ from oslo_config import cfg from oslo_db.sqlalchemy import models from oslo_utils import timeutils import sqlalchemy as sa import sqlalchemy.dialects.mysql from sqlalchemy import orm from sqlalchemy import schema from nova.db import types CONF = cfg.CONF # NOTE(stephenfin): This is a list of fields that have been removed from # various SQLAlchemy models but which still exist in the underlying tables. Our # upgrade policy dictates that we remove fields from models at least one cycle # before we remove the column from the underlying table. Not doing so would # prevent us from applying the new database schema before rolling out any of # the new code since the old code could attempt to access data in the removed # columns. Alembic identifies this temporary mismatch between the models and # underlying tables and attempts to resolve it. Tell it instead to ignore these # until we're ready to remove them ourselves. REMOVED_COLUMNS = { ('instances', 'internal_id'), ('instance_extra', 'vpmems'), } # NOTE(stephenfin): A list of foreign key constraints that were removed when # the column they were covering was removed. REMOVED_FKEYS = [] # NOTE(stephenfin): A list of entire models that have been removed. REMOVED_TABLES = { # Tables that were moved to the API database in Newton. The models # were removed in Y and the tables can be dropped in Z or later 'aggregate_hosts', 'aggregate_metadata', 'aggregates', 'allocations', 'instance_group_member', 'instance_group_policy', 'instance_groups', 'instance_type_extra_specs', 'instance_type_projects', 'instance_types', 'inventories', 'key_pairs', 'resource_provider_aggregates', 'resource_providers', # Tables for the removed XenAPI virt driver. The models were # removed in Y and the tables can be dropped in Z or later 'agent_builds', 'bw_usage_cache', 'console_pools', 'consoles', # Tables for the removed cells v1 feature. The model was removed in # Y and the table can be dropped in Z or later 'cells', # Tables for the removed volume snapshot feature. The model was # removed in Y and the table can be dropped in Z or later 'snapshots', # Tables for the removed in-tree EC2 API. The models were removed # in Y and the table can be dropped in Z or later 'snapshot_id_mappings', 'volume_id_mappings', # Tables for the removed nova-network feature. The models were # removed in Y and the tables can be dropped in Z or later 'dns_domains', 'fixed_ips', 'floating_ips', 'networks', 'provider_fw_rules', 'security_group_default_rules', } # we don't configure 'cls' since we have models that don't use the # TimestampMixin BASE = orm.declarative_base() class NovaBase(models.TimestampMixin, models.ModelBase): def __copy__(self): """Implement a safe copy.copy(). SQLAlchemy-mapped objects travel with an object called an InstanceState, which is pegged to that object specifically and tracks everything about that object. It's critical within all attribute operations, including gets and deferred loading. This object definitely cannot be shared among two instances, and must be handled. The copy routine here makes use of session.merge() which already essentially implements a "copy" style of operation, which produces a new instance with a new InstanceState and copies all the data along mapped attributes without using any SQL. The mode we are using here has the caveat that the given object must be "clean", e.g. that it has no database-loaded state that has been updated and not flushed. This is a good thing, as creating a copy of an object including non-flushed, pending database state is probably not a good idea; neither represents what the actual row looks like, and only one should be flushed. """ session = orm.Session() copy = session.merge(self, load=False) session.expunge(copy) return copy class Service(BASE, NovaBase, models.SoftDeleteMixin): """Represents a running service on a host.""" __tablename__ = 'services' __table_args__ = ( schema.UniqueConstraint("host", "topic", "deleted", name="uniq_services0host0topic0deleted"), schema.UniqueConstraint("host", "binary", "deleted", name="uniq_services0host0binary0deleted"), sa.Index('services_uuid_idx', 'uuid', unique=True), ) id = sa.Column(sa.Integer, primary_key=True) uuid = sa.Column(sa.String(36), nullable=True) host = sa.Column(sa.String(255)) binary = sa.Column(sa.String(255)) topic = sa.Column(sa.String(255)) report_count = sa.Column(sa.Integer, nullable=False, default=0) disabled = sa.Column(sa.Boolean, default=False) disabled_reason = sa.Column(sa.String(255)) last_seen_up = sa.Column(sa.DateTime, nullable=True) forced_down = sa.Column(sa.Boolean, default=False) version = sa.Column(sa.Integer, default=0) instance = orm.relationship( 'Instance', back_populates='services', primaryjoin='and_(Service.host == Instance.host,' 'Service.binary == "nova-compute",' 'Instance.deleted == 0)', foreign_keys=host, ) class ComputeNode(BASE, NovaBase, models.SoftDeleteMixin): """Represents a running compute service on a host.""" __tablename__ = 'compute_nodes' __table_args__ = ( sa.Index('compute_nodes_uuid_idx', 'uuid', unique=True), schema.UniqueConstraint( 'host', 'hypervisor_hostname', 'deleted', name="uniq_compute_nodes0host0hypervisor_hostname0deleted"), ) id = sa.Column(sa.Integer, primary_key=True) service_id = sa.Column(sa.Integer, nullable=True) # FIXME(sbauza: Host field is nullable because some old Juno compute nodes # can still report stats from an old ResourceTracker without setting this # field. # This field has to be set non-nullable in a later cycle (probably Lxxx) # once we are sure that all compute nodes in production report it. host = sa.Column(sa.String(255), nullable=True) uuid = sa.Column(sa.String(36), nullable=True) vcpus = sa.Column(sa.Integer, nullable=False) memory_mb = sa.Column(sa.Integer, nullable=False) local_gb = sa.Column(sa.Integer, nullable=False) vcpus_used = sa.Column(sa.Integer, nullable=False) memory_mb_used = sa.Column(sa.Integer, nullable=False) local_gb_used = sa.Column(sa.Integer, nullable=False) hypervisor_type = sa.Column(types.MediumText(), nullable=False) hypervisor_version = sa.Column(sa.Integer, nullable=False) hypervisor_hostname = sa.Column(sa.String(255)) # Free Ram, amount of activity (resize, migration, boot, etc) and # the number of running VM's are a good starting point for what's # important when making scheduling decisions. free_ram_mb = sa.Column(sa.Integer) free_disk_gb = sa.Column(sa.Integer) current_workload = sa.Column(sa.Integer) running_vms = sa.Column(sa.Integer) # Note(masumotok): Expected Strings example: # # '{"arch":"x86_64", # "model":"Nehalem", # "topology":{"sockets":1, "threads":2, "cores":3}, # "features":["tdtscp", "xtpr"]}' # # Points are "json translatable" and it must have all dictionary keys # above, since it is copied from tag of getCapabilities() # (See libvirt.virtConnection). cpu_info = sa.Column(types.MediumText(), nullable=False) disk_available_least = sa.Column(sa.Integer) host_ip = sa.Column(types.IPAddress()) supported_instances = sa.Column(sa.Text) metrics = sa.Column(sa.Text) # Note(yongli): json string PCI Stats # '[{"vendor_id":"8086", "product_id":"1234", "count":3 }, ...]' pci_stats = sa.Column(sa.Text) # extra_resources is a json string containing arbitrary # data about additional resources. extra_resources = sa.Column(sa.Text) # json-encode string containing compute node statistics stats = sa.Column(sa.Text, default='{}') # json-encoded dict that contains NUMA topology as generated by # objects.NUMATopology._to_json() numa_topology = sa.Column(sa.Text) # allocation ratios provided by the RT ram_allocation_ratio = sa.Column(sa.Float, nullable=True) cpu_allocation_ratio = sa.Column(sa.Float, nullable=True) disk_allocation_ratio = sa.Column(sa.Float, nullable=True) mapped = sa.Column(sa.Integer, nullable=True, default=0) class Certificate(BASE, NovaBase, models.SoftDeleteMixin): """Represents a x509 certificate.""" __tablename__ = 'certificates' __table_args__ = ( sa.Index( 'certificates_project_id_deleted_idx', 'project_id', 'deleted', ), sa.Index('certificates_user_id_deleted_idx', 'user_id', 'deleted') ) id = sa.Column(sa.Integer, primary_key=True) user_id = sa.Column(sa.String(255)) project_id = sa.Column(sa.String(255)) file_name = sa.Column(sa.String(255)) class Instance(BASE, NovaBase, models.SoftDeleteMixin): """Represents a guest VM.""" __tablename__ = 'instances' __table_args__ = ( sa.Index('instances_project_id_idx', 'project_id'), sa.Index('instances_project_id_deleted_idx', 'project_id', 'deleted'), sa.Index('instances_reservation_id_idx', 'reservation_id'), sa.Index('instances_terminated_at_launched_at_idx', 'terminated_at', 'launched_at'), sa.Index('instances_uuid_deleted_idx', 'uuid', 'deleted'), sa.Index('instances_task_state_updated_at_idx', 'task_state', 'updated_at'), sa.Index('instances_host_node_deleted_idx', 'host', 'node', 'deleted'), sa.Index('instances_host_deleted_cleaned_idx', 'host', 'deleted', 'cleaned'), sa.Index('instances_deleted_created_at_idx', 'deleted', 'created_at'), sa.Index('instances_updated_at_project_id_idx', 'updated_at', 'project_id'), sa.Index('instances_compute_id_deleted_idx', 'compute_id', 'deleted'), schema.UniqueConstraint('uuid', name='uniq_instances0uuid'), ) injected_files = [] id = sa.Column(sa.Integer, primary_key=True, autoincrement=True) @property def name(self): try: base_name = CONF.instance_name_template % self.id except TypeError: # Support templates like "uuid-%(uuid)s", etc. info = {} # NOTE(russellb): Don't use self.iteritems() here, as it will # result in infinite recursion on the name property. for column in iter(orm.object_mapper(self).columns): key = column.name # prevent recursion if someone specifies %(name)s # %(name)s will not be valid. if key == 'name': continue info[key] = self[key] try: base_name = CONF.instance_name_template % info except KeyError: base_name = self.uuid return base_name @property def _extra_keys(self): return ['name'] user_id = sa.Column(sa.String(255)) project_id = sa.Column(sa.String(255)) image_ref = sa.Column(sa.String(255)) kernel_id = sa.Column(sa.String(255)) ramdisk_id = sa.Column(sa.String(255)) hostname = sa.Column(sa.String(255)) launch_index = sa.Column(sa.Integer) key_name = sa.Column(sa.String(255)) key_data = sa.Column(types.MediumText()) power_state = sa.Column(sa.Integer) vm_state = sa.Column(sa.String(255)) task_state = sa.Column(sa.String(255)) memory_mb = sa.Column(sa.Integer) vcpus = sa.Column(sa.Integer) root_gb = sa.Column(sa.Integer) ephemeral_gb = sa.Column(sa.Integer) ephemeral_key_uuid = sa.Column(sa.String(36)) # This is not related to hostname, above. It refers # to the nova node. host = sa.Column(sa.String(255)) # To identify the "ComputeNode" which the instance resides in. # This equals to ComputeNode.hypervisor_hostname. node = sa.Column(sa.String(255)) # This identifies the ComputeNode object that this instance resides # on and should be equivalent to the one referenced by the 'node' # field above. compute_id = sa.Column(sa.BigInteger()) # *not* flavorid, this is the internal primary_key instance_type_id = sa.Column(sa.Integer) user_data = sa.Column(types.MediumText()) reservation_id = sa.Column(sa.String(255)) launched_at = sa.Column(sa.DateTime) terminated_at = sa.Column(sa.DateTime) # This always refers to the availability_zone kwarg passed in /servers and # provided as an API option, not at all related to the host AZ the instance # belongs to. availability_zone = sa.Column(sa.String(255)) # User editable field for display in user-facing UIs display_name = sa.Column(sa.String(255)) display_description = sa.Column(sa.String(255)) # To remember on which host an instance booted. # An instance may have moved to another host by live migration. launched_on = sa.Column(types.MediumText()) # locked is superseded by locked_by and locked is not really # necessary but still used in API code so it remains. locked = sa.Column(sa.Boolean) locked_by = sa.Column( sa.Enum('owner', 'admin', name='instances0locked_by')) os_type = sa.Column(sa.String(255)) architecture = sa.Column(sa.String(255)) vm_mode = sa.Column(sa.String(255)) uuid = sa.Column(sa.String(36), nullable=False) root_device_name = sa.Column(sa.String(255)) default_ephemeral_device = sa.Column(sa.String(255)) default_swap_device = sa.Column(sa.String(255)) config_drive = sa.Column(sa.String(255)) # User editable field meant to represent what ip should be used # to connect to the instance access_ip_v4 = sa.Column(types.IPAddress()) access_ip_v6 = sa.Column(types.IPAddress()) auto_disk_config = sa.Column(sa.Boolean()) progress = sa.Column(sa.Integer) # EC2 instance_initiated_shutdown_terminate # True: -> 'terminate' # False: -> 'stop' # Note(maoy): currently Nova will always stop instead of terminate # no matter what the flag says. So we set the default to False. shutdown_terminate = sa.Column(sa.Boolean(), default=False) # EC2 disable_api_termination disable_terminate = sa.Column(sa.Boolean(), default=False) # OpenStack compute cell name. This will only be set at the top of # the cells tree and it'll be a full cell name such as 'api!hop1!hop2' # TODO(stephenfin): Remove this cell_name = sa.Column(sa.String(255)) # NOTE(pumaranikar): internal_id attribute is no longer used (bug 1441242) # Hence, removing from object layer in current release (Ocata) and will # treated as deprecated. The column can be removed from schema with # a migration at the start of next release. # internal_id = sa.Column(sa.Integer) # Records whether an instance has been deleted from disk cleaned = sa.Column(sa.Integer, default=0) hidden = sa.Column(sa.Boolean, default=False) block_device_mapping = orm.relationship( 'BlockDeviceMapping', back_populates='instance', primaryjoin=( 'and_(BlockDeviceMapping.instance_uuid == Instance.uuid, ' 'BlockDeviceMapping.deleted == 0)' ), ) console_auth_tokens = orm.relationship( 'ConsoleAuthToken', back_populates='instance', foreign_keys='ConsoleAuthToken.instance_uuid', primaryjoin=( 'and_(Instance.uuid == ConsoleAuthToken.instance_uuid,' 'Instance.deleted == 0)' ), ) extra = orm.relationship( 'InstanceExtra', back_populates='instance', uselist=False, ) info_cache = orm.relationship( 'InstanceInfoCache', back_populates='instance', uselist=False, ) pci_devices = orm.relationship( 'PciDevice', back_populates='instance', foreign_keys='PciDevice.instance_uuid', primaryjoin=( 'and_(Instance.uuid == PciDevice.instance_uuid,' 'PciDevice.deleted == 0)' ), ) services = orm.relationship( 'Service', back_populates='instance', primaryjoin=( 'and_(Instance.host == Service.host,' 'Service.binary == "nova-compute",' 'Instance.deleted == 0)' ), foreign_keys='Service.host', ) security_groups = orm.relationship( 'SecurityGroup', secondary='security_group_instance_association', back_populates='instances', primaryjoin=( 'and_(' 'SecurityGroupInstanceAssociation.instance_uuid == Instance.uuid,' # (anthony) the condition below shouldn't be necessary now that the # association is being marked as deleted. However, removing this # may cause existing deployments to choke, so I'm leaving it 'Instance.deleted == 0)' ), secondaryjoin=( 'and_(' 'SecurityGroup.id == SecurityGroupInstanceAssociation.security_group_id,' # noqa: E501 'SecurityGroupInstanceAssociation.deleted == 0,' 'SecurityGroup.deleted == 0)' ), ) system_metadata = orm.relationship( 'InstanceSystemMetadata', back_populates='instance', ) tags = orm.relationship( 'Tag', back_populates='instance', primaryjoin=( 'and_(Instance.uuid == Tag.resource_id,Instance.deleted == 0)' ), foreign_keys='Tag.resource_id', ) # NOTE(stephenfin): https://github.com/sqlalchemy/sqlalchemy/discussions/8619 Instance.metadata = orm.relationship( 'InstanceMetadata', back_populates='instance', foreign_keys='InstanceMetadata.instance_uuid', primaryjoin=( 'and_(Instance.uuid == InstanceMetadata.instance_uuid,' 'InstanceMetadata.deleted == 0)' ), ) class InstanceInfoCache(BASE, NovaBase, models.SoftDeleteMixin): """Represents a cache of information about an instance """ __tablename__ = 'instance_info_caches' __table_args__ = ( schema.UniqueConstraint( "instance_uuid", name="uniq_instance_info_caches0instance_uuid"),) id = sa.Column(sa.Integer, primary_key=True, autoincrement=True) # text column used for storing a json object of network data for api network_info = sa.Column(types.MediumText()) instance_uuid = sa.Column(sa.String(36), sa.ForeignKey('instances.uuid'), nullable=False) instance = orm.relationship(Instance, back_populates='info_cache', foreign_keys=instance_uuid, primaryjoin=instance_uuid == Instance.uuid) class InstanceExtra(BASE, NovaBase, models.SoftDeleteMixin): __tablename__ = 'instance_extra' __table_args__ = ( sa.Index('instance_extra_idx', 'instance_uuid'),) id = sa.Column(sa.Integer, primary_key=True, autoincrement=True) instance_uuid = sa.Column(sa.String(36), sa.ForeignKey('instances.uuid'), nullable=False) device_metadata = orm.deferred(sa.Column(sa.Text)) numa_topology = orm.deferred(sa.Column(sa.Text)) pci_requests = orm.deferred(sa.Column(sa.Text)) flavor = orm.deferred(sa.Column(sa.Text)) vcpu_model = orm.deferred(sa.Column(sa.Text)) migration_context = orm.deferred(sa.Column(sa.Text)) keypairs = orm.deferred(sa.Column(sa.Text)) trusted_certs = orm.deferred(sa.Column(sa.Text)) # NOTE(Luyao): 'vpmems' is still in the database # and can be removed in the future release. resources = orm.deferred(sa.Column(sa.Text)) instance = orm.relationship(Instance, back_populates='extra', foreign_keys=instance_uuid, primaryjoin=instance_uuid == Instance.uuid) class Quota(BASE, NovaBase, models.SoftDeleteMixin): """Represents a single quota override for a project. If there is no row for a given project id and resource, then the default for the quota class is used. If there is no row for a given quota class and resource, then the default for the deployment is used. If the row is present but the hard limit is Null, then the resource is unlimited. """ __tablename__ = 'quotas' __table_args__ = ( schema.UniqueConstraint("project_id", "resource", "deleted", name="uniq_quotas0project_id0resource0deleted" ), ) id = sa.Column(sa.Integer, primary_key=True) project_id = sa.Column(sa.String(255)) resource = sa.Column(sa.String(255), nullable=False) hard_limit = sa.Column(sa.Integer) class ProjectUserQuota(BASE, NovaBase, models.SoftDeleteMixin): """Represents a single quota override for a user with in a project.""" __tablename__ = 'project_user_quotas' uniq_name = "uniq_project_user_quotas0user_id0project_id0resource0deleted" __table_args__ = ( schema.UniqueConstraint("user_id", "project_id", "resource", "deleted", name=uniq_name), sa.Index('project_user_quotas_project_id_deleted_idx', 'project_id', 'deleted'), sa.Index('project_user_quotas_user_id_deleted_idx', 'user_id', 'deleted') ) id = sa.Column(sa.Integer, primary_key=True, nullable=False) project_id = sa.Column(sa.String(255), nullable=False) user_id = sa.Column(sa.String(255), nullable=False) resource = sa.Column(sa.String(255), nullable=False) hard_limit = sa.Column(sa.Integer) class QuotaClass(BASE, NovaBase, models.SoftDeleteMixin): """Represents a single quota override for a quota class. If there is no row for a given quota class and resource, then the default for the deployment is used. If the row is present but the hard limit is Null, then the resource is unlimited. """ __tablename__ = 'quota_classes' __table_args__ = ( sa.Index('ix_quota_classes_class_name', 'class_name'), ) id = sa.Column(sa.Integer, primary_key=True) class_name = sa.Column(sa.String(255)) resource = sa.Column(sa.String(255)) hard_limit = sa.Column(sa.Integer) class QuotaUsage(BASE, NovaBase, models.SoftDeleteMixin): """Represents the current usage for a given resource.""" __tablename__ = 'quota_usages' __table_args__ = ( sa.Index('ix_quota_usages_project_id', 'project_id'), sa.Index('ix_quota_usages_user_id_deleted', 'user_id', 'deleted'), ) id = sa.Column(sa.Integer, primary_key=True) project_id = sa.Column(sa.String(255)) user_id = sa.Column(sa.String(255)) resource = sa.Column(sa.String(255), nullable=False) in_use = sa.Column(sa.Integer, nullable=False) reserved = sa.Column(sa.Integer, nullable=False) @property def total(self): return self.in_use + self.reserved until_refresh = sa.Column(sa.Integer) class Reservation(BASE, NovaBase, models.SoftDeleteMixin): """Represents a resource reservation for quotas.""" __tablename__ = 'reservations' __table_args__ = ( sa.Index('ix_reservations_project_id', 'project_id'), sa.Index('reservations_uuid_idx', 'uuid'), sa.Index('reservations_deleted_expire_idx', 'deleted', 'expire'), sa.Index('ix_reservations_user_id_deleted', 'user_id', 'deleted'), ) id = sa.Column(sa.Integer, primary_key=True, nullable=False) uuid = sa.Column(sa.String(36), nullable=False) usage_id = sa.Column( sa.Integer, sa.ForeignKey('quota_usages.id'), nullable=False) project_id = sa.Column(sa.String(255)) user_id = sa.Column(sa.String(255)) resource = sa.Column(sa.String(255)) delta = sa.Column(sa.Integer, nullable=False) expire = sa.Column(sa.DateTime) usage = orm.relationship( "QuotaUsage", foreign_keys=usage_id, primaryjoin='and_(Reservation.usage_id == QuotaUsage.id,' 'QuotaUsage.deleted == 0)') class BlockDeviceMapping(BASE, NovaBase, models.SoftDeleteMixin): """Represents block device mapping that is defined by EC2.""" __tablename__ = "block_device_mapping" __table_args__ = ( sa.Index('snapshot_id', 'snapshot_id'), sa.Index('volume_id', 'volume_id'), sa.Index('block_device_mapping_instance_uuid_device_name_idx', 'instance_uuid', 'device_name'), sa.Index('block_device_mapping_instance_uuid_volume_id_idx', 'instance_uuid', 'volume_id'), sa.Index('block_device_mapping_instance_uuid_idx', 'instance_uuid'), schema.UniqueConstraint('uuid', name='uniq_block_device_mapping0uuid'), ) id = sa.Column(sa.Integer, primary_key=True, autoincrement=True) instance_uuid = sa.Column(sa.String(36), sa.ForeignKey('instances.uuid')) # NOTE(mdbooth): The REST API for BDMs includes a UUID field. That uuid # refers to an image, volume, or snapshot which will be used in the # initialisation of the BDM. It is only relevant during the API call, and # is not persisted directly. This is the UUID of the BDM itself. # FIXME(danms): This should eventually be non-nullable, but we need a # transition period first. uuid = sa.Column(sa.String(36)) instance = orm.relationship(Instance, back_populates='block_device_mapping', foreign_keys=instance_uuid, primaryjoin='and_(BlockDeviceMapping.' 'instance_uuid==' 'Instance.uuid,' 'BlockDeviceMapping.deleted==' '0)') source_type = sa.Column(sa.String(255)) destination_type = sa.Column(sa.String(255)) guest_format = sa.Column(sa.String(255)) device_type = sa.Column(sa.String(255)) disk_bus = sa.Column(sa.String(255)) boot_index = sa.Column(sa.Integer) device_name = sa.Column(sa.String(255)) # default=False for compatibility of the existing code. # With EC2 API, # default True for ami specified device. # default False for created with other timing. # TODO(sshturm) add default in db delete_on_termination = sa.Column(sa.Boolean, default=False) snapshot_id = sa.Column(sa.String(36)) volume_id = sa.Column(sa.String(36)) volume_size = sa.Column(sa.Integer) volume_type = sa.Column(sa.String(255)) image_id = sa.Column(sa.String(36)) # for no device to suppress devices. no_device = sa.Column(sa.Boolean) connection_info = sa.Column(types.MediumText()) tag = sa.Column(sa.String(255)) attachment_id = sa.Column(sa.String(36)) encrypted = sa.Column(sa.Boolean, default=False) encryption_secret_uuid = sa.Column(sa.String(36)) encryption_format = sa.Column(sa.String(128)) encryption_options = sa.Column(sa.String(4096)) class ShareMapping(BASE, NovaBase): """Represents share / instance mapping.""" __tablename__ = "share_mapping" __table_args__ = ( sa.Index('share_idx', 'share_id'), sa.Index('share_mapping_instance_uuid_share_id_idx', 'instance_uuid', 'share_id'), sa.UniqueConstraint( "instance_uuid", "share_id", name="uniq_key_pairs0instance_uuid0share_id", ), sa.UniqueConstraint( "instance_uuid", "tag", name="uniq_key_pairs0instance_uuid0tag", ), ) # sqlite> create table my_table(id bigint primary key AUTOINCREMENT, # name text); # Parse error: AUTOINCREMENT is only allowed on an INTEGER PRIMARY KEY # Use BigInteger variant for sqlite to allow unit tests. Other database # should support BigInteger and autoincrement. id = sa.Column( sa.BigInteger().with_variant(sa.Integer, "sqlite"), primary_key=True, autoincrement=True, nullable=False, ) uuid = sa.Column(sa.String(36)) instance_uuid = sa.Column(sa.String(36), sa.ForeignKey('instances.uuid')) instance = orm.relationship( "Instance", foreign_keys=instance_uuid, primaryjoin='and_(ShareMapping.instance_uuid == Instance.uuid,' 'Instance.deleted == 0)' ) share_id = sa.Column(sa.String(36)) status = sa.Column(sa.String(32)) tag = sa.Column(sa.String(48)) export_location = sa.Column(sa.Text) share_proto = sa.Column(sa.String(32)) # TODO(stephenfin): Remove once we drop the security_groups field from the # Instance table. Until then, this is tied to the SecurityGroup table class SecurityGroupInstanceAssociation(BASE, NovaBase, models.SoftDeleteMixin): __tablename__ = 'security_group_instance_association' __table_args__ = ( sa.Index('security_group_instance_association_instance_uuid_idx', 'instance_uuid'), ) id = sa.Column(sa.Integer, primary_key=True, nullable=False) security_group_id = sa.Column( sa.Integer, sa.ForeignKey('security_groups.id')) instance_uuid = sa.Column(sa.String(36), sa.ForeignKey('instances.uuid')) # TODO(stephenfin): Remove once we drop the security_groups field from the # Instance table class SecurityGroup(BASE, NovaBase, models.SoftDeleteMixin): """Represents a security group.""" __tablename__ = 'security_groups' __table_args__ = ( schema.UniqueConstraint('project_id', 'name', 'deleted', name='uniq_security_groups0project_id0' 'name0deleted'), ) id = sa.Column(sa.Integer, primary_key = True) name = sa.Column(sa.String(255)) description = sa.Column(sa.String(255)) user_id = sa.Column(sa.String(255)) project_id = sa.Column(sa.String(255)) instances = orm.relationship(Instance, back_populates='security_groups', secondary = "security_group_instance_association", primaryjoin = 'and_(' 'SecurityGroup.id == ' 'SecurityGroupInstanceAssociation.security_group_id,' 'SecurityGroupInstanceAssociation.deleted == 0,' 'SecurityGroup.deleted == 0)', secondaryjoin='and_(' 'SecurityGroupInstanceAssociation.instance_uuid == Instance.uuid,' # (anthony) the condition below shouldn't be necessary now that the # association is being marked as deleted. However, removing this # may cause existing deployments to choke, so I'm leaving it 'Instance.deleted == 0)') rules = orm.relationship( 'SecurityGroupIngressRule', primaryjoin=( 'and_(' 'SecurityGroupIngressRule.parent_group_id == SecurityGroup.id,' 'SecurityGroupIngressRule.deleted == 0)' ), back_populates='parent_group', ) # TODO(stephenfin): Remove once we drop the security_groups field from the # Instance table. Until then, this is tied to the SecurityGroup table class SecurityGroupIngressRule(BASE, NovaBase, models.SoftDeleteMixin): """Represents a rule in a security group.""" __tablename__ = 'security_group_rules' __table_args__ = () id = sa.Column(sa.Integer, primary_key=True) parent_group_id = sa.Column( sa.Integer, sa.ForeignKey('security_groups.id')) parent_group = orm.relationship("SecurityGroup", back_populates="rules", foreign_keys=[parent_group_id], primaryjoin='and_(' 'SecurityGroupIngressRule.parent_group_id == SecurityGroup.id,' 'SecurityGroupIngressRule.deleted == 0)') protocol = sa.Column(sa.String(255)) from_port = sa.Column(sa.Integer) to_port = sa.Column(sa.Integer) cidr = sa.Column(types.CIDR()) # Note: This is not the parent SecurityGroup. It's SecurityGroup we're # granting access for. group_id = sa.Column(sa.Integer, sa.ForeignKey('security_groups.id')) grantee_group = orm.relationship("SecurityGroup", foreign_keys=[group_id], primaryjoin='and_(' 'SecurityGroupIngressRule.group_id == SecurityGroup.id,' 'SecurityGroupIngressRule.deleted == 0)') class Migration(BASE, NovaBase, models.SoftDeleteMixin): """Represents a running host-to-host migration.""" __tablename__ = 'migrations' __table_args__ = ( sa.Index('migrations_instance_uuid_and_status_idx', 'deleted', 'instance_uuid', 'status'), sa.Index('migrations_by_host_nodes_and_status_idx', 'deleted', 'source_compute', 'dest_compute', 'source_node', 'dest_node', 'status'), sa.Index('migrations_uuid', 'uuid', unique=True), sa.Index('migrations_updated_at_idx', 'updated_at'), sa.Index('migrations_dest_compute_id_deleted_idx', 'dest_compute_id', 'deleted'), ) id = sa.Column(sa.Integer, primary_key=True, nullable=False) # NOTE(tr3buchet): the ____compute variables are instance['host'] source_compute = sa.Column(sa.String(255)) dest_compute = sa.Column(sa.String(255)) # nodes are equivalent to a compute node's 'hypervisor_hostname' source_node = sa.Column(sa.String(255)) dest_node = sa.Column(sa.String(255)) # The ID of the ComputeNode that matches dest_node dest_compute_id = sa.Column(sa.BigInteger()) # NOTE(tr3buchet): dest_host, btw, is an ip address dest_host = sa.Column(sa.String(255)) old_instance_type_id = sa.Column(sa.Integer()) new_instance_type_id = sa.Column(sa.Integer()) instance_uuid = sa.Column(sa.String(36), sa.ForeignKey('instances.uuid')) uuid = sa.Column(sa.String(36), nullable=True) # TODO(_cerberus_): enum status = sa.Column(sa.String(255)) migration_type = sa.Column(sa.Enum('migration', 'resize', 'live-migration', 'evacuation', name='migration_type'), nullable=True) hidden = sa.Column(sa.Boolean, default=False) memory_total = sa.Column(sa.BigInteger, nullable=True) memory_processed = sa.Column(sa.BigInteger, nullable=True) memory_remaining = sa.Column(sa.BigInteger, nullable=True) disk_total = sa.Column(sa.BigInteger, nullable=True) disk_processed = sa.Column(sa.BigInteger, nullable=True) disk_remaining = sa.Column(sa.BigInteger, nullable=True) cross_cell_move = sa.Column(sa.Boolean, default=False) user_id = sa.Column(sa.String(255), nullable=True) project_id = sa.Column(sa.String(255), nullable=True) instance = orm.relationship("Instance", foreign_keys=instance_uuid, primaryjoin='and_(Migration.instance_uuid == ' 'Instance.uuid, Instance.deleted == ' '0)') class VirtualInterface(BASE, NovaBase, models.SoftDeleteMixin): """Represents a virtual interface on an instance.""" __tablename__ = 'virtual_interfaces' __table_args__ = ( schema.UniqueConstraint("address", "deleted", name="uniq_virtual_interfaces0address0deleted"), sa.Index('virtual_interfaces_network_id_idx', 'network_id'), sa.Index('virtual_interfaces_instance_uuid_fkey', 'instance_uuid'), sa.Index('virtual_interfaces_uuid_idx', 'uuid'), ) id = sa.Column(sa.Integer, primary_key=True, nullable=False) address = sa.Column(sa.String(255)) network_id = sa.Column(sa.Integer) instance_uuid = sa.Column(sa.String(36), sa.ForeignKey('instances.uuid')) uuid = sa.Column(sa.String(36)) tag = sa.Column(sa.String(255)) class InstanceMetadata(BASE, NovaBase, models.SoftDeleteMixin): """Represents a user-provided metadata key/value pair for an instance.""" __tablename__ = 'instance_metadata' __table_args__ = ( sa.Index('instance_metadata_instance_uuid_idx', 'instance_uuid'), ) id = sa.Column(sa.Integer, primary_key=True) key = sa.Column(sa.String(255)) value = sa.Column(sa.String(255)) instance_uuid = sa.Column(sa.String(36), sa.ForeignKey('instances.uuid')) instance = orm.relationship( Instance, back_populates="metadata", foreign_keys=instance_uuid, primaryjoin=( 'and_(InstanceMetadata.instance_uuid == Instance.uuid,' 'InstanceMetadata.deleted == 0)' ), ) class InstanceSystemMetadata(BASE, NovaBase, models.SoftDeleteMixin): """Represents a system-owned metadata key/value pair for an instance.""" __tablename__ = 'instance_system_metadata' __table_args__ = ( sa.Index('instance_uuid', 'instance_uuid'), ) id = sa.Column(sa.Integer, primary_key=True) key = sa.Column(sa.String(255), nullable=False) value = sa.Column(sa.String(255)) instance_uuid = sa.Column(sa.String(36), sa.ForeignKey('instances.uuid'), nullable=False) instance = orm.relationship( Instance, back_populates='system_metadata', foreign_keys=instance_uuid, ) class VolumeUsage(BASE, NovaBase, models.SoftDeleteMixin): """Cache for volume usage data pulled from the hypervisor.""" __tablename__ = 'volume_usage_cache' __table_args__ = () id = sa.Column(sa.Integer, primary_key=True, nullable=False) volume_id = sa.Column(sa.String(36), nullable=False) instance_uuid = sa.Column(sa.String(36)) project_id = sa.Column(sa.String(36)) user_id = sa.Column(sa.String(64)) availability_zone = sa.Column(sa.String(255)) tot_last_refreshed = sa.Column(sa.DateTime) tot_reads = sa.Column(sa.BigInteger, default=0) tot_read_bytes = sa.Column(sa.BigInteger, default=0) tot_writes = sa.Column(sa.BigInteger, default=0) tot_write_bytes = sa.Column(sa.BigInteger, default=0) curr_last_refreshed = sa.Column(sa.DateTime) curr_reads = sa.Column(sa.BigInteger, default=0) curr_read_bytes = sa.Column(sa.BigInteger, default=0) curr_writes = sa.Column(sa.BigInteger, default=0) curr_write_bytes = sa.Column(sa.BigInteger, default=0) class S3Image(BASE, NovaBase, models.SoftDeleteMixin): """Compatibility layer for the S3 image service talking to Glance.""" __tablename__ = 's3_images' __table_args__ = () id = sa.Column( sa.Integer, primary_key=True, nullable=False, autoincrement=True) uuid = sa.Column(sa.String(36), nullable=False) class InstanceFault(BASE, NovaBase, models.SoftDeleteMixin): __tablename__ = 'instance_faults' __table_args__ = ( sa.Index('instance_faults_host_idx', 'host'), sa.Index('instance_faults_instance_uuid_deleted_created_at_idx', 'instance_uuid', 'deleted', 'created_at') ) id = sa.Column(sa.Integer, primary_key=True, nullable=False) instance_uuid = sa.Column(sa.String(36), sa.ForeignKey('instances.uuid')) code = sa.Column(sa.Integer(), nullable=False) message = sa.Column(sa.String(255)) details = sa.Column(types.MediumText()) host = sa.Column(sa.String(255)) class InstanceAction(BASE, NovaBase, models.SoftDeleteMixin): """Track client actions on an instance. The intention is that there will only be one of these per user request. A lookup by (instance_uuid, request_id) should always return a single result. """ __tablename__ = 'instance_actions' __table_args__ = ( sa.Index('instance_uuid_idx', 'instance_uuid'), sa.Index('request_id_idx', 'request_id'), sa.Index('instance_actions_instance_uuid_updated_at_idx', 'instance_uuid', 'updated_at') ) id = sa.Column( sa.Integer, primary_key=True, nullable=False, autoincrement=True) action = sa.Column(sa.String(255)) instance_uuid = sa.Column(sa.String(36), sa.ForeignKey('instances.uuid')) request_id = sa.Column(sa.String(255)) user_id = sa.Column(sa.String(255)) project_id = sa.Column(sa.String(255)) start_time = sa.Column(sa.DateTime, default=timeutils.utcnow) finish_time = sa.Column(sa.DateTime) message = sa.Column(sa.String(255)) class InstanceActionEvent(BASE, NovaBase, models.SoftDeleteMixin): """Track events that occur during an InstanceAction.""" __tablename__ = 'instance_actions_events' __table_args__ = () id = sa.Column( sa.Integer, primary_key=True, nullable=False, autoincrement=True) event = sa.Column(sa.String(255)) action_id = sa.Column(sa.Integer, sa.ForeignKey('instance_actions.id')) start_time = sa.Column(sa.DateTime, default=timeutils.utcnow) finish_time = sa.Column(sa.DateTime) result = sa.Column(sa.String(255)) traceback = sa.Column(sa.Text) host = sa.Column(sa.String(255)) details = sa.Column(sa.Text) class InstanceIdMapping(BASE, NovaBase, models.SoftDeleteMixin): """Compatibility layer for the EC2 instance service.""" __tablename__ = 'instance_id_mappings' __table_args__ = ( sa.Index('ix_instance_id_mappings_uuid', 'uuid'), ) id = sa.Column( sa.Integer, primary_key=True, nullable=False, autoincrement=True) uuid = sa.Column(sa.String(36), nullable=False) class TaskLog(BASE, NovaBase, models.SoftDeleteMixin): """Audit log for background periodic tasks.""" __tablename__ = 'task_log' __table_args__ = ( schema.UniqueConstraint( 'task_name', 'host', 'period_beginning', 'period_ending', name="uniq_task_log0task_name0host0period_beginning0period_ending" ), sa.Index('ix_task_log_period_beginning', 'period_beginning'), sa.Index('ix_task_log_host', 'host'), sa.Index('ix_task_log_period_ending', 'period_ending'), ) id = sa.Column( sa.Integer, primary_key=True, nullable=False, autoincrement=True) task_name = sa.Column(sa.String(255), nullable=False) state = sa.Column(sa.String(255), nullable=False) host = sa.Column(sa.String(255), nullable=False) period_beginning = sa.Column(sa.DateTime, default=timeutils.utcnow, nullable=False) period_ending = sa.Column(sa.DateTime, default=timeutils.utcnow, nullable=False) message = sa.Column(sa.String(255), nullable=False) task_items = sa.Column(sa.Integer(), default=0) errors = sa.Column(sa.Integer(), default=0) class PciDevice(BASE, NovaBase, models.SoftDeleteMixin): """Represents a PCI host device that can be passed through to instances. """ __tablename__ = 'pci_devices' __table_args__ = ( sa.Index('ix_pci_devices_compute_node_id_deleted', 'compute_node_id', 'deleted'), sa.Index('ix_pci_devices_instance_uuid_deleted', 'instance_uuid', 'deleted'), sa.Index('ix_pci_devices_compute_node_id_parent_addr_deleted', 'compute_node_id', 'parent_addr', 'deleted'), schema.UniqueConstraint( "compute_node_id", "address", "deleted", name="uniq_pci_devices0compute_node_id0address0deleted") ) id = sa.Column(sa.Integer, primary_key=True) uuid = sa.Column(sa.String(36)) compute_node_id = sa.Column(sa.Integer, sa.ForeignKey('compute_nodes.id'), nullable=False) # physical address of device domain:bus:slot.func (0000:09:01.1) address = sa.Column(sa.String(12), nullable=False) vendor_id = sa.Column(sa.String(4), nullable=False) product_id = sa.Column(sa.String(4), nullable=False) dev_type = sa.Column(sa.String(8), nullable=False) dev_id = sa.Column(sa.String(255)) # label is abstract device name, that is used to unify devices with the # same functionality with different addresses or host. label = sa.Column(sa.String(255), nullable=False) status = sa.Column(sa.String(36), nullable=False) # the request_id is used to identify a device that is allocated for a # particular request request_id = sa.Column(sa.String(36), nullable=True) extra_info = sa.Column(sa.Text) instance_uuid = sa.Column(sa.String(36)) numa_node = sa.Column(sa.Integer, nullable=True) parent_addr = sa.Column(sa.String(12), nullable=True) instance = orm.relationship(Instance, back_populates="pci_devices", foreign_keys=instance_uuid, primaryjoin='and_(' 'PciDevice.instance_uuid == Instance.uuid,' 'PciDevice.deleted == 0)') class Tag(BASE, models.ModelBase): """Represents the tag for a resource.""" __tablename__ = "tags" __table_args__ = ( sa.Index('tags_tag_idx', 'tag'), ) resource_id = sa.Column(sa.String(36), primary_key=True, nullable=False) tag = sa.Column(sa.Unicode(80), primary_key=True, nullable=False) instance = orm.relationship( 'Instance', back_populates='tags', foreign_keys=resource_id, primaryjoin='and_(Tag.resource_id == Instance.uuid,' 'Instance.deleted == 0)', ) class ConsoleAuthToken(BASE, NovaBase): """Represents a console auth token""" __tablename__ = 'console_auth_tokens' __table_args__ = ( sa.Index('console_auth_tokens_instance_uuid_idx', 'instance_uuid'), sa.Index('console_auth_tokens_host_expires_idx', 'host', 'expires'), sa.Index( 'console_auth_tokens_token_hash_instance_uuid_idx', 'token_hash', 'instance_uuid', ), schema.UniqueConstraint("token_hash", name="uniq_console_auth_tokens0token_hash") ) id = sa.Column(sa.Integer, primary_key=True, nullable=False) token_hash = sa.Column(sa.String(255), nullable=False) console_type = sa.Column(sa.String(255), nullable=False) host = sa.Column(sa.String(255), nullable=False) port = sa.Column(sa.Integer, nullable=False) tls_port = sa.Column(sa.Integer, nullable=True) internal_access_path = sa.Column(sa.String(255)) instance_uuid = sa.Column(sa.String(36), nullable=False) expires = sa.Column(sa.Integer, nullable=False) access_url_base = sa.Column(sa.String(255)) instance = orm.relationship( "Instance", back_populates='console_auth_tokens', primaryjoin='and_(ConsoleAuthToken.instance_uuid == Instance.uuid,' 'Instance.deleted == 0)', foreign_keys=instance_uuid ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/db/migration.py0000664000175000017500000000751100000000000016461 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from alembic import command as alembic_api from alembic import config as alembic_config from alembic.runtime import migration as alembic_migration from oslo_log import log as logging from nova.db.api import api as api_db_api from nova.db.main import api as main_db_api from nova import exception LOG = logging.getLogger(__name__) def _get_engine(database='main', context=None): if database == 'main': return main_db_api.get_engine(context=context) if database == 'api': return api_db_api.get_engine() def _find_alembic_conf(database='main'): """Get the path for the alembic repository.""" path = os.path.join( os.path.abspath(os.path.dirname(__file__)), database, 'alembic.ini') config = alembic_config.Config(path) # we don't want to use the logger configuration from the file, which is # only really intended for the CLI # https://stackoverflow.com/a/42691781/613428 config.attributes['configure_logger'] = False return config def _upgrade_alembic(engine, config, version): # reuse the connection rather than creating a new one with engine.begin() as connection: config.attributes['connection'] = connection alembic_api.upgrade(config, version or 'head') def db_sync(version=None, database='main', context=None): """Migrate the database to `version` or the most recent version.""" if database not in ('main', 'api'): raise exception.Invalid('%s is not a valid database' % database) # if the user requested a specific version, check if it's an integer: # if so, we're almost certainly in sqlalchemy-migrate land and won't # support that if version is not None and version.isdigit(): raise exception.Invalid( 'You requested an sqlalchemy-migrate database version; this is ' 'no longer supported' ) engine = _get_engine(database, context=context) config = _find_alembic_conf(database) # discard the URL stored in alembic.ini in favour of the URL configured # for the engine, casting from 'sqlalchemy.engine.url.URL' to str in the # process # NOTE(sean-k-mooney): the engine has already url encoded the connection # string using a mix of url encode styles for different parts of the url. # since we are updating the alembic config parser instance we need to # escape '%' to '%%' to account for ConfigParser's string interpolation. url = engine.url.render_as_string(hide_password=False).replace('%', '%%') config.set_main_option('sqlalchemy.url', url) # apply anything later LOG.info('Applying migration(s)') _upgrade_alembic(engine, config, version) LOG.info('Migration(s) applied') def db_version(database='main', context=None): """Display the current database version.""" if database not in ('main', 'api'): raise exception.Invalid('%s is not a valid database' % database) engine = _get_engine(database, context=context) with engine.connect() as conn: m_context = alembic_migration.MigrationContext.configure(conn) version = m_context.get_current_revision() return version ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/db/types.py0000664000175000017500000000567000000000000015640 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Custom SQLAlchemy types.""" import netaddr from oslo_utils import netutils import sqlalchemy as sa import sqlalchemy.dialects.mysql import sqlalchemy.dialects.postgresql from sqlalchemy import types from nova import utils # NOTE(dprince): This wrapper allows us to easily match the Folsom MySQL # Schema. In Folsom we created tables as latin1 and converted them to utf8 # later. This conversion causes some of the Text columns on MySQL to get # created as mediumtext instead of just text. def MediumText(): return sa.Text().with_variant( sqlalchemy.dialects.mysql.MEDIUMTEXT(), 'mysql') class IPAddress(types.TypeDecorator): """An SQLAlchemy type representing an IP-address.""" impl = types.String cache_ok = True def load_dialect_impl(self, dialect): if dialect.name == 'postgresql': return dialect.type_descriptor( sqlalchemy.dialects.postgresql.INET()) return dialect.type_descriptor(types.String(39)) def process_bind_param(self, value, dialect): """Process/Formats the value before insert it into the db.""" if dialect.name == 'postgresql': return value # NOTE(maurosr): The purpose here is to convert ipv6 to the shortened # form, not validate it. if netutils.is_valid_ipv6(value): return utils.get_shortened_ipv6(value) return value class CIDR(types.TypeDecorator): """An SQLAlchemy type representing a CIDR definition.""" impl = types.String cache_ok = True def load_dialect_impl(self, dialect): if dialect.name == 'postgresql': return dialect.type_descriptor( sqlalchemy.dialects.postgresql.INET()) return dialect.type_descriptor(types.String(43)) def process_bind_param(self, value, dialect): """Process/Formats the value before insert it into the db.""" # NOTE(sdague): normalize all the inserts if netutils.is_valid_ipv6_cidr(value): return utils.get_shortened_ipv6_cidr(value) return value def process_result_value(self, value, dialect): try: return str(netaddr.IPNetwork(value, version=4).cidr) except netaddr.AddrFormatError: return str(netaddr.IPNetwork(value, version=6).cidr) except TypeError: return None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/db/utils.py0000664000175000017500000000760100000000000015630 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import inspect import nova.context from nova import exception from nova.i18n import _ def require_context(f): """Decorator to require *any* user or admin context. This does no authorization for user or project access matching, see :py:func:`nova.context.authorize_project_context` and :py:func:`nova.context.authorize_user_context`. The first argument to the wrapped function must be the context. """ @functools.wraps(f) def wrapper(*args, **kwargs): nova.context.require_context(args[0]) return f(*args, **kwargs) wrapper.__signature__ = inspect.signature(f) return wrapper def process_sort_params( sort_keys, sort_dirs, default_keys=['created_at', 'id'], default_dir='asc', ): """Process the sort parameters to include default keys. Creates a list of sort keys and a list of sort directions. Adds the default keys to the end of the list if they are not already included. When adding the default keys to the sort keys list, the associated direction is: 1. The first element in the 'sort_dirs' list (if specified), else 2. 'default_dir' value (Note that 'asc' is the default value since this is the default in sqlalchemy.utils.paginate_query) :param sort_keys: List of sort keys to include in the processed list :param sort_dirs: List of sort directions to include in the processed list :param default_keys: List of sort keys that need to be included in the processed list, they are added at the end of the list if not already specified. :param default_dir: Sort direction associated with each of the default keys that are not supplied, used when they are added to the processed list :returns: list of sort keys, list of sort directions :raise exception.InvalidInput: If more sort directions than sort keys are specified or if an invalid sort direction is specified """ # Determine direction to use for when adding default keys if sort_dirs and len(sort_dirs) != 0: default_dir_value = sort_dirs[0] else: default_dir_value = default_dir # Create list of keys (do not modify the input list) if sort_keys: result_keys = list(sort_keys) else: result_keys = [] # If a list of directions is not provided, use the default sort direction # for all provided keys if sort_dirs: result_dirs = [] # Verify sort direction for sort_dir in sort_dirs: if sort_dir not in ('asc', 'desc'): msg = _("Unknown sort direction, must be 'desc' or 'asc'") raise exception.InvalidInput(reason=msg) result_dirs.append(sort_dir) else: result_dirs = [default_dir_value for _sort_key in result_keys] # Ensure that the key and direction length match while len(result_dirs) < len(result_keys): result_dirs.append(default_dir_value) # Unless more direction are specified, which is an error if len(result_dirs) > len(result_keys): msg = _("Sort direction size exceeds sort key size") raise exception.InvalidInput(reason=msg) # Ensure defaults are included for key in default_keys: if key not in result_keys: result_keys.append(key) result_dirs.append(default_dir_value) return result_keys, result_dirs ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/exception.py0000664000175000017500000024040000000000000016075 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Nova base exception handling. Includes decorator for re-raising Nova-type exceptions. SHOULD include dedicated exception logging. """ from oslo_log import log as logging import webob.exc from webob import util as woutil from nova.i18n import _ LOG = logging.getLogger(__name__) class ConvertedException(webob.exc.WSGIHTTPException): def __init__(self, code, title="", explanation=""): self.code = code # There is a strict rule about constructing status line for HTTP: # '...Status-Line, consisting of the protocol version followed by a # numeric status code and its associated textual phrase, with each # element separated by SP characters' # (http://www.faqs.org/rfcs/rfc2616.html) # 'code' and 'title' can not be empty because they correspond # to numeric status code and its associated text if title: self.title = title else: try: self.title = woutil.status_reasons[self.code] except KeyError: msg = "Improper or unknown HTTP status code used: %d" LOG.error(msg, code) self.title = woutil.status_generic_reasons[self.code // 100] self.explanation = explanation super(ConvertedException, self).__init__() class NovaException(Exception): """Base Nova Exception To correctly use this class, inherit from it and define a 'msg_fmt' property. That msg_fmt will get printf'd with the keyword arguments provided to the constructor. """ msg_fmt = _("An unknown exception occurred.") code = 500 headers = {} safe = False def __init__(self, message=None, **kwargs): self.kwargs = kwargs if 'code' not in self.kwargs: try: self.kwargs['code'] = self.code except AttributeError: pass try: if not message: message = self.msg_fmt % kwargs else: message = str(message) except Exception: # NOTE(melwitt): This is done in a separate method so it can be # monkey-patched during testing to make it a hard failure. self._log_exception() message = self.msg_fmt self.message = message super(NovaException, self).__init__(message) def _log_exception(self): # kwargs doesn't match a variable in the message # log the issue and the kwargs LOG.exception('Exception in string format operation') for name, value in self.kwargs.items(): LOG.error("%s: %s" % (name, value)) # noqa def format_message(self): # NOTE(mrodden): use the first argument to the python Exception object # which should be our full NovaException message, (see __init__) return self.args[0] def __repr__(self): dict_repr = self.__dict__ dict_repr['class'] = self.__class__.__name__ return str(dict_repr) class EncryptionFailure(NovaException): msg_fmt = _("Failed to encrypt text: %(reason)s") class VirtualInterfaceCreateException(NovaException): msg_fmt = _("Virtual Interface creation failed") class VirtualInterfaceMacAddressException(NovaException): msg_fmt = _("Creation of virtual interface with " "unique mac address failed") class VirtualInterfacePlugException(NovaException): msg_fmt = _("Virtual interface plugin failed") class VirtualInterfaceUnplugException(NovaException): msg_fmt = _("Failed to unplug virtual interface: %(reason)s") class GlanceConnectionFailed(NovaException): msg_fmt = _("Connection to glance host %(server)s failed: " "%(reason)s") class KeystoneConnectionFailed(NovaException): msg_fmt = _("Connection to keystone host failed: %(reason)s") class CinderConnectionFailed(NovaException): msg_fmt = _("Connection to cinder host failed: %(reason)s") class ManilaConnectionFailed(NovaException): msg_fmt = _("Connection to manila service failed: %(reason)s") class UnsupportedCinderAPIVersion(NovaException): msg_fmt = _('Nova does not support Cinder API version %(version)s') class CinderAPIVersionNotAvailable(NovaException): """Used to indicate that a requested Cinder API version, generally a microversion, is not available. """ msg_fmt = _('Cinder API version %(version)s is not available.') class Forbidden(NovaException): msg_fmt = _("Forbidden") code = 403 class NotSupported(NovaException): # This exception use return code as 400 and can be used # directly or as base exception for operations which are not # supported in Nova. Any feature that is not yet implemented # but plan to implement in future (example: Cyborg # integration operations), should use this exception as base # and override the msg_fmt with feature details. # Example: MultiattachNotSupportedByVirtDriver exception. msg_fmt = _("Bad Request - Feature is not supported in Nova") code = 400 class ForbiddenWithAccelerators(NotSupported): msg_fmt = _("Feature not supported with instances that have accelerators.") class ForbiddenSharesNotSupported(NotSupported): msg_fmt = _("Feature not supported until computes have not been updated.") class ForbiddenSharesNotConfiguredCorrectly(NotSupported): msg_fmt = _("Feature not supported because either compute or instance " "are not configured correctly.") class ForbiddenWithShare(NotSupported): msg_fmt = _("Feature not supported with instances that have shares.") class ForbiddenPortsWithAccelerator(NotSupported): msg_fmt = _("Feature not supported with Ports that have accelerators.") class ForbiddenWithRemoteManagedPorts(NotSupported): msg_fmt = _("This feature is not supported when remote-managed ports" " are in use.") class AdminRequired(Forbidden): msg_fmt = _("User does not have admin privileges") class PolicyNotAuthorized(Forbidden): msg_fmt = _("Policy doesn't allow %(action)s to be performed.") class ImageNotActive(NovaException): # NOTE(jruzicka): IncorrectState is used for volumes only in EC2, # but it still seems like the most appropriate option. msg_fmt = _("Image %(image_id)s is not active.") class ImageNotAuthorized(NovaException): msg_fmt = _("Not authorized for image %(image_id)s.") class Invalid(NovaException): msg_fmt = _("Bad Request - Invalid Parameters") code = 400 class InvalidVIOMMUMachineType(Invalid): msg_fmt = _("vIOMMU is not supported by Current machine type %(mtype)s " "(Architecture: %(arch)s).") class InvalidVIOMMUArchitecture(Invalid): msg_fmt = _("vIOMMU required either x86 or AArch64 architecture, " "but given architecture %(arch)s.") class InstanceQuiesceFailed(Invalid): msg_fmt = _("Failed to quiesce instance: %(reason)s") code = 409 class InvalidConfiguration(Invalid): msg_fmt = _("Configuration is Invalid.") class InvalidBDM(Invalid): msg_fmt = _("Block Device Mapping is Invalid.") class InvalidBDMSnapshot(InvalidBDM): msg_fmt = _("Block Device Mapping is Invalid: " "failed to get snapshot %(id)s.") class InvalidBDMVolume(InvalidBDM): msg_fmt = _("Block Device Mapping is Invalid: " "failed to get volume %(id)s.") class InvalidBDMImage(InvalidBDM): msg_fmt = _("Block Device Mapping is Invalid: " "failed to get image %(id)s.") class InvalidBDMBootSequence(InvalidBDM): msg_fmt = _("Block Device Mapping is Invalid: " "Boot sequence for the instance " "and image/block device mapping " "combination is not valid.") class InvalidBDMLocalsLimit(InvalidBDM): msg_fmt = _("Block Device Mapping is Invalid: " "You specified more local devices than the " "limit allows") class InvalidBDMEphemeralSize(InvalidBDM): msg_fmt = _("Ephemeral disks requested are larger than " "the instance type allows. If no size is given " "in one block device mapping, flavor ephemeral " "size will be used.") class InvalidBDMSwapSize(InvalidBDM): msg_fmt = _("Swap drive requested is larger than instance type allows.") class InvalidBDMFormat(InvalidBDM): msg_fmt = _("Block Device Mapping is Invalid: " "%(details)s") class InvalidBDMForLegacy(InvalidBDM): msg_fmt = _("Block Device Mapping cannot " "be converted to legacy format. ") class InvalidBDMVolumeNotBootable(InvalidBDM): msg_fmt = _("Block Device %(id)s is not bootable.") class TooManyDiskDevices(InvalidBDM): msg_fmt = _('The maximum allowed number of disk devices (%(maximum)d) to ' 'attach to a single instance has been exceeded.') code = 403 class InvalidBDMDiskBus(InvalidBDM): msg_fmr = _("Block Device Mapping is invalid: The provided disk bus " "%(disk_bus)s is not valid.") class InvalidAttribute(Invalid): msg_fmt = _("Attribute not supported: %(attr)s") class ValidationError(Invalid): msg_fmt = "%(detail)s" class VolumeAttachFailed(Invalid): msg_fmt = _("Volume %(volume_id)s could not be attached. " "Reason: %(reason)s") class VolumeDetachFailed(Invalid): msg_fmt = _("Volume %(volume_id)s could not be detached. " "Reason: %(reason)s") class VolumeExtendFailed(Invalid): msg_fmt = _("Volume %(volume_id)s could not be extended. " "Reason: %(reason)s") class MultiattachNotSupportedByVirtDriver(NotSupported): # This exception indicates the compute hosting the instance does not # support multiattach volumes. This should generally be considered a # 400 HTTPBadRequest error in the API since we expect all virt drivers to # eventually support multiattach volumes. msg_fmt = _("Volume %(volume_id)s has 'multiattach' set, " "which is not supported for this instance.") class MultiattachNotSupportedOldMicroversion(Invalid): msg_fmt = _('Multiattach volumes are only supported starting with ' 'compute API version 2.60.') class MultiattachToShelvedNotSupported(Invalid): msg_fmt = _("Attaching multiattach volumes is not supported for " "shelved-offloaded instances.") class MultiattachSwapVolumeNotSupported(Invalid): msg_fmt = _('Swapping multi-attach volumes with more than one read/write ' 'attachment is not supported.') class VolumeNotCreated(NovaException): msg_fmt = _("Volume %(volume_id)s did not finish being created" " even after we waited %(seconds)s seconds or %(attempts)s" " attempts. And its status is %(volume_status)s.") class ExtendVolumeNotSupported(Invalid): msg_fmt = _("Volume size extension is not supported by the hypervisor.") class VolumeEncryptionNotSupported(Invalid): msg_fmt = _("Volume encryption is not supported for %(volume_type)s " "volume %(volume_id)s") class VolumeTaggedAttachNotSupported(Invalid): msg_fmt = _("Tagged volume attachment is not supported for this server " "instance.") class VolumeTaggedAttachToShelvedNotSupported(VolumeTaggedAttachNotSupported): msg_fmt = _("Tagged volume attachment is not supported for " "shelved-offloaded instances.") class NetworkInterfaceTaggedAttachNotSupported(Invalid): msg_fmt = _("Tagged network interface attachment is not supported for " "this server instance.") class InvalidKeypair(Invalid): msg_fmt = _("Keypair data is invalid: %(reason)s") class InvalidRequest(Invalid): msg_fmt = _("The request is invalid.") class InvalidInput(Invalid): msg_fmt = _("Invalid input received: %(reason)s") class InvalidVolume(Invalid): msg_fmt = _("Invalid volume: %(reason)s") class InvalidVolumeAccessMode(Invalid): msg_fmt = _("Invalid volume access mode: %(access_mode)s") class StaleVolumeMount(InvalidVolume): msg_fmt = _("The volume mount at %(mount_path)s is unusable.") class InvalidMetadata(Invalid): msg_fmt = _("Invalid metadata: %(reason)s") class InvalidMetadataSize(Invalid): msg_fmt = _("Invalid metadata size: %(reason)s") class InvalidPortRange(Invalid): msg_fmt = _("Invalid port range %(from_port)s:%(to_port)s. %(msg)s") class InvalidIpProtocol(Invalid): msg_fmt = _("Invalid IP protocol %(protocol)s.") class InvalidContentType(Invalid): msg_fmt = _("Invalid content type %(content_type)s.") class InvalidAPIVersionString(Invalid): msg_fmt = _("API Version String %(version)s is of invalid format. Must " "be of format MajorNum.MinorNum.") class VersionNotFoundForAPIMethod(Invalid): msg_fmt = _("API version %(version)s is not supported on this method.") class InvalidGlobalAPIVersion(Invalid): msg_fmt = _("Version %(req_ver)s is not supported by the API. Minimum " "is %(min_ver)s and maximum is %(max_ver)s.") class ApiVersionsIntersect(Invalid): msg_fmt = _("Version of %(name)s %(min_ver)s %(max_ver)s intersects " "with another versions.") # Cannot be templated as the error syntax varies. # msg needs to be constructed when raised. class InvalidParameterValue(Invalid): msg_fmt = "%(err)s" class InvalidAggregateAction(Invalid): msg_fmt = _("Unacceptable parameters.") code = 400 class InvalidAggregateActionAdd(InvalidAggregateAction): msg_fmt = _("Cannot add host to aggregate " "%(aggregate_id)s. Reason: %(reason)s.") class InvalidAggregateActionDelete(InvalidAggregateAction): msg_fmt = _("Cannot remove host from aggregate " "%(aggregate_id)s. Reason: %(reason)s.") class InvalidAggregateActionUpdate(InvalidAggregateAction): msg_fmt = _("Cannot update aggregate " "%(aggregate_id)s. Reason: %(reason)s.") class InvalidAggregateActionUpdateMeta(InvalidAggregateAction): msg_fmt = _("Cannot update metadata of aggregate " "%(aggregate_id)s. Reason: %(reason)s.") class AggregateMetadataKeyExists(NovaException): msg_fmt = _("Aggregate %(aggregate_id)s already contain metadata " "key %(key)s.") code = 400 class InvalidSortKey(Invalid): msg_fmt = _("Sort key supplied was not valid.") class InvalidStrTime(Invalid): msg_fmt = _("Invalid datetime string: %(reason)s") class InvalidNUMANodesNumber(Invalid): msg_fmt = _("The property 'numa_nodes' cannot be '%(nodes)s'. " "It must be a number greater than 0") class InvalidName(Invalid): msg_fmt = _("An invalid 'name' value was provided. " "The name must be: %(reason)s") class InstanceInvalidState(Invalid): msg_fmt = _("Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot " "%(method)s while the instance is in this state.") class InstanceNotRunning(Invalid): msg_fmt = _("Instance %(instance_id)s is not running.") class InstanceNotInRescueMode(Invalid): msg_fmt = _("Instance %(instance_id)s is not in rescue mode") class InstanceNotRescuable(Invalid): msg_fmt = _("Instance %(instance_id)s cannot be rescued: %(reason)s") class InstanceNotReady(Invalid): msg_fmt = _("Instance %(instance_id)s is not ready") class InstanceSuspendFailure(Invalid): msg_fmt = _("Failed to suspend instance: %(reason)s") class InstanceResumeFailure(Invalid): msg_fmt = _("Failed to resume instance: %(reason)s") class InstancePowerOnFailure(Invalid): msg_fmt = _("Failed to power on instance: %(reason)s") class InstancePowerOffFailure(Invalid): msg_fmt = _("Failed to power off instance: %(reason)s") class InstanceRebootFailure(Invalid): msg_fmt = _("Failed to reboot instance: %(reason)s") class InstanceTerminationFailure(Invalid): msg_fmt = _("Failed to terminate instance: %(reason)s") class InstanceDeployFailure(Invalid): msg_fmt = _("Failed to deploy instance: %(reason)s") class MultiplePortsNotApplicable(Invalid): msg_fmt = _("Failed to launch instances: %(reason)s") class AmbiguousHostnameForMultipleInstances(Invalid): msg_fmt = _("Unable to allocate a single hostname to multiple instances") class InvalidFixedIpAndMaxCountRequest(Invalid): msg_fmt = _("Failed to launch instances: %(reason)s") class ServiceUnavailable(Invalid): msg_fmt = _("Service is unavailable at this time.") class ServiceNotUnique(Invalid): msg_fmt = _("More than one possible service found.") class ComputeResourcesUnavailable(ServiceUnavailable): msg_fmt = _("Insufficient compute resources: %(reason)s.") class HypervisorUnavailable(NovaException): msg_fmt = _("Connection to the hypervisor is broken on host") class ComputeServiceUnavailable(ServiceUnavailable): msg_fmt = _("Compute service of %(host)s is unavailable at this time.") class ComputeServiceInUse(NovaException): msg_fmt = _("Compute service of %(host)s is still in use.") class UnableToMigrateToSelf(Invalid): msg_fmt = _("Unable to migrate instance (%(instance_id)s) " "to current host (%(host)s).") class OperationNotSupportedForSEV(NotSupported): msg_fmt = _("Operation '%(operation)s' not supported for SEV-enabled " "instance (%(instance_uuid)s).") class OperationNotSupportedForVTPM(NotSupported): msg_fmt = _("Operation '%(operation)s' not supported for vTPM-enabled " "instance (%(instance_uuid)s).") class OperationNotSupportedForVDPAInterface(NotSupported): msg_fmt = _( "Operation '%(operation)s' not supported for instance with " "vDPA ports ((instance_uuid)s)." ) class InvalidHypervisorType(Invalid): msg_fmt = _("The supplied hypervisor type of is invalid.") class HypervisorTooOld(Invalid): msg_fmt = _("This compute node's hypervisor is older than the minimum " "supported version: %(version)s.") class DestinationHypervisorTooOld(Invalid): msg_fmt = _("The instance requires a newer hypervisor version than " "has been provided.") class ServiceTooOld(Invalid): msg_fmt = _("This service is older (v%(thisver)i) than the minimum " "(v%(minver)i) version of the rest of the deployment. " "Unable to continue.") class TooOldComputeService(Invalid): msg_fmt = _("Current Nova version does not support computes older than " "%(oldest_supported_version)s but the minimum compute service " "level in your %(scope)s is %(min_service_level)d and the " "oldest supported service level is " "%(oldest_supported_service)d.") class DestinationDiskExists(Invalid): msg_fmt = _("The supplied disk path (%(path)s) already exists, " "it is expected not to exist.") class InvalidDevicePath(Invalid): msg_fmt = _("The supplied device path (%(path)s) is invalid.") class DevicePathInUse(Invalid): msg_fmt = _("The supplied device path (%(path)s) is in use.") code = 409 class InvalidCPUInfo(Invalid): msg_fmt = _("Unacceptable CPU info: %(reason)s") class InvalidIpAddressError(Invalid): msg_fmt = _("%(address)s is not a valid IP v4/6 address.") class InvalidDiskFormat(Invalid): msg_fmt = _("Disk format %(disk_format)s is not acceptable") class InvalidDiskInfo(Invalid): msg_fmt = _("Disk info file is invalid: %(reason)s") class DiskInfoReadWriteFail(Invalid): msg_fmt = _("Failed to read or write disk info file: %(reason)s") class ImageUnacceptable(Invalid): msg_fmt = _("Image %(image_id)s is unacceptable: %(reason)s") class ImageBadRequest(Invalid): msg_fmt = _("Request of image %(image_id)s got BadRequest response: " "%(response)s") class ImageImportImpossible(Invalid): msg_fmt = _("Import of image %(image_id)s refused: %(reason)s") class ImageQuotaExceeded(NovaException): msg_fmt = _("Quota exceeded or out of space for image %(image_id)s " "in the image service.") class InstanceUnacceptable(Invalid): msg_fmt = _("Instance %(instance_id)s is unacceptable: %(reason)s") class InvalidUUID(Invalid): msg_fmt = _("Expected a uuid but received %(uuid)s.") class InvalidID(Invalid): msg_fmt = _("Invalid ID received %(id)s.") class ConstraintNotMet(NovaException): msg_fmt = _("Constraint not met.") code = 412 class NotFound(NovaException): msg_fmt = _("Resource could not be found.") code = 404 class VolumeAttachmentNotFound(NotFound): msg_fmt = _("Volume attachment %(attachment_id)s could not be found.") class VolumeNotFound(NotFound): msg_fmt = _("Volume %(volume_id)s could not be found.") class ShareNotFound(NotFound): msg_fmt = _("Share %(share_id)s could not be found.") class ShareStatusIncorect(NotFound): msg_fmt = _("Share %(share_id)s is in '%(status)s' instead of " "'available' status.") class ShareMappingAlreadyExists(NotFound): msg_fmt = _("Share '%(share_id)s' or tag '%(tag)s' already associated " "to this server.") class ShareProtocolNotSupported(NotFound): msg_fmt = _("Share protocol %(share_proto)s is not supported.") class ShareMissingExportLocation(NotFound): msg_fmt = _("Share %(share_id)s export location is missing.") class ShareError(NovaException): msg_fmt = _("Share %(share_id)s used by instance %(instance_uuid)s " "is in error state.") class ShareErrorUnexpectedStatus(NovaException): msg_fmt = _("Share %(share_id)s used by instance %(instance_uuid)s " "is in an unexpected state.") class ShareUmountError(NovaException): msg_fmt = _("Share id %(share_id)s umount error " "from server %(server_id)s.\n" "Reason: %(reason)s.") class ShareMountError(NovaException): msg_fmt = _("Share id %(share_id)s mount error " "from server %(server_id)s.\n" "Reason: %(reason)s.") class ShareAccessNotFound(NotFound): msg_fmt = _("Share access from Manila could not be found for " "share id %(share_id)s.") class ShareAccessGrantError(NovaException): msg_fmt = _("Share access could not be granted to " "share id %(share_id)s.\n" "Reason: %(reason)s.") class ShareAccessRemovalError(NovaException): msg_fmt = _("Share access could not be removed from " "share id %(share_id)s.\n" "Reason: %(reason)s.") class ShareLockNotFound(NovaException): msg_fmt = _("Share lock can not be found for " "share id %(share_id)s.\n") class ShareLockError(NovaException): msg_fmt = _("Share lock can not be acquired from " "share id %(share_id)s.\n" "Reason: %(reason)s.") class ShareLockAlreadyExists(NovaException): msg_fmt = _("Share lock can not be acquired from " "share id %(share_id)s.\n" "Reason: lock already exists.") class ShareUnlockError(NovaException): msg_fmt = _("Share can not be unlocked from " "share id %(share_id)s.\n" "Reason: %(reason)s.") class VolumeTypeNotFound(NotFound): msg_fmt = _("Volume type %(id_or_name)s could not be found.") class UndefinedRootBDM(NovaException): msg_fmt = _("Undefined Block Device Mapping root: BlockDeviceMappingList " "contains Block Device Mappings from multiple instances.") class BDMNotFound(NotFound): msg_fmt = _("No Block Device Mapping with id %(id)s.") class VolumeBDMNotFound(NotFound): msg_fmt = _("No volume Block Device Mapping with id %(volume_id)s.") class VolumeBDMIsMultiAttach(Invalid): msg_fmt = _("Block Device Mapping %(volume_id)s is a multi-attach volume" " and is not valid for this operation.") class VolumeBDMPathNotFound(VolumeBDMNotFound): msg_fmt = _("No volume Block Device Mapping at path: %(path)s") class DeviceDetachFailed(NovaException): msg_fmt = _("Device detach failed for %(device)s: %(reason)s") class DeviceNotFound(NotFound): msg_fmt = _("Device '%(device)s' not found.") class SnapshotNotFound(NotFound): msg_fmt = _("Snapshot %(snapshot_id)s could not be found.") class DiskNotFound(NotFound): msg_fmt = _("No disk at %(location)s") class VolumeDriverNotFound(NotFound): msg_fmt = _("Could not find a handler for %(driver_type)s volume.") class VolumeDriverNotSupported(VolumeDriverNotFound): msg_fmt = _("The %(volume_driver)s volume driver is not supported on this " "platform.") class InvalidImageRef(Invalid): msg_fmt = _("Invalid image href %(image_href)s.") class InvalidImagePropertyName(Invalid): msg_fmt = _("Invalid image property name %(image_property_name)s.") class AutoDiskConfigDisabledByImage(Invalid): msg_fmt = _("Requested image %(image)s " "has automatic disk resize disabled.") class ImageNotFound(NotFound): msg_fmt = _("Image %(image_id)s could not be found.") class ImageDeleteConflict(NovaException): msg_fmt = _("Conflict deleting image. Reason: %(reason)s.") class PreserveEphemeralNotSupported(Invalid): msg_fmt = _("The current driver does not support " "preserving ephemeral partitions.") class InstanceMappingNotFound(NotFound): msg_fmt = _("Instance %(uuid)s has no mapping to a cell.") class InvalidCidr(Invalid): msg_fmt = _("%(cidr)s is not a valid IP network.") class NetworkNotFound(NotFound): msg_fmt = _("Network %(network_id)s could not be found.") class PortNotFound(NotFound): msg_fmt = _("Port id %(port_id)s could not be found.") class NetworkNotFoundForBridge(NetworkNotFound): msg_fmt = _("Network could not be found for bridge %(bridge)s") class NetworkNotFoundForInstance(NetworkNotFound): msg_fmt = _("Network could not be found for instance %(instance_id)s.") class NetworkAmbiguous(Invalid): msg_fmt = _("More than one possible network found. Specify " "network ID(s) to select which one(s) to connect to.") class UnableToAutoAllocateNetwork(Invalid): msg_fmt = _('Unable to automatically allocate a network for project ' '%(project_id)s') class NetworkRequiresSubnet(Invalid): msg_fmt = _("Network %(network_uuid)s requires a subnet in order to boot" " instances on.") class ExternalNetworkAttachForbidden(Forbidden): msg_fmt = _("It is not allowed to create an interface on " "external network %(network_uuid)s") class NetworkMissingPhysicalNetwork(NovaException): msg_fmt = _("Physical network is missing for network %(network_uuid)s") class VifDetailsMissingVhostuserSockPath(Invalid): msg_fmt = _("vhostuser_sock_path not present in vif_details" " for vif %(vif_id)s") class VifDetailsMissingMacvtapParameters(Invalid): msg_fmt = _("Parameters %(missing_params)s not present in" " vif_details for vif %(vif_id)s. Check your Neutron" " configuration to validate that the macvtap parameters are" " correct.") class DatastoreNotFound(NotFound): msg_fmt = _("Could not find the datastore reference(s) which the VM uses.") class PortInUse(Invalid): msg_fmt = _("Port %(port_id)s is still in use.") class PortRequiresFixedIP(Invalid): msg_fmt = _("Port %(port_id)s requires a FixedIP in order to be used.") class PortNotUsable(Invalid): msg_fmt = _("Port %(port_id)s not usable for instance %(instance)s.") class PortNotUsableDNS(Invalid): msg_fmt = _("Port %(port_id)s not usable for instance %(instance)s. " "Value %(value)s assigned to dns_name attribute does not " "match instance's hostname %(hostname)s") class PortBindingFailed(Invalid): msg_fmt = _("Binding failed for port %(port_id)s, please check neutron " "logs for more information.") class PortBindingDeletionFailed(NovaException): msg_fmt = _("Failed to delete binding for port(s) %(port_id)s on host " "%(host)s; please check neutron logs for more information") class PortBindingActivationFailed(NovaException): msg_fmt = _("Failed to activate binding for port %(port_id)s on host " "%(host)s; please check neutron logs for more information") class PortUpdateFailed(Invalid): msg_fmt = _("Port update failed for port %(port_id)s: %(reason)s") class AttachSRIOVPortNotSupported(Invalid): msg_fmt = _('Attaching SR-IOV port %(port_id)s to server ' '%(instance_uuid)s is not supported. SR-IOV ports must be ' 'specified during server creation.') class FixedIpNotFoundForAddress(NotFound): msg_fmt = _("Fixed IP not found for address %(address)s.") class FixedIpNotFoundForInstance(NotFound): msg_fmt = _("Instance %(instance_uuid)s does not have fixed IP '%(ip)s'.") class FixedIpAlreadyInUse(NovaException): msg_fmt = _("Fixed IP address %(address)s is already in use on instance " "%(instance_uuid)s.") class FixedIpAssociatedWithMultipleInstances(NovaException): msg_fmt = _("More than one instance is associated with fixed IP address " "'%(address)s'.") class FixedIpInvalidOnHost(Invalid): msg_fmt = _("The fixed IP associated with port %(port_id)s is not " "compatible with the host.") class NoMoreFixedIps(NovaException): msg_fmt = _("No fixed IP addresses available for network: %(net)s") class FloatingIpNotFound(NotFound): msg_fmt = _("Floating IP not found for ID %(id)s.") class FloatingIpNotFoundForAddress(FloatingIpNotFound): msg_fmt = _("Floating IP not found for address %(address)s.") class FloatingIpMultipleFoundForAddress(NovaException): msg_fmt = _("Multiple floating IPs are found for address %(address)s.") class FloatingIpPoolNotFound(NotFound): msg_fmt = _("Floating IP pool not found.") safe = True class NoMoreFloatingIps(FloatingIpNotFound): msg_fmt = _("Zero floating IPs available.") safe = True class FloatingIpAssociated(NovaException): msg_fmt = _("Floating IP %(address)s is associated.") class NoFloatingIpInterface(NotFound): msg_fmt = _("Interface %(interface)s not found.") class FloatingIpAssociateFailed(NovaException): msg_fmt = _("Floating IP %(address)s association has failed.") class FloatingIpBadRequest(Invalid): msg_fmt = _("The floating IP request failed with a BadRequest") class KeypairNotFound(NotFound): msg_fmt = _("Keypair %(name)s not found for user %(user_id)s") class ServiceNotFound(NotFound): msg_fmt = _("Service %(service_id)s could not be found.") class ConfGroupForServiceTypeNotFound(ServiceNotFound): msg_fmt = _("No conf group name could be found for service type " "%(stype)s.") class ServiceBinaryExists(NovaException): msg_fmt = _("Service with host %(host)s binary %(binary)s exists.") class ServiceTopicExists(NovaException): msg_fmt = _("Service with host %(host)s topic %(topic)s exists.") class HostNotFound(NotFound): msg_fmt = _("Host %(host)s could not be found.") class ComputeHostNotFound(HostNotFound): msg_fmt = _("Compute host %(host)s could not be found.") class HostBinaryNotFound(NotFound): msg_fmt = _("Could not find binary %(binary)s on host %(host)s.") class InvalidQuotaValue(Invalid): msg_fmt = _("Change would make usage less than 0 for the following " "resources: %(unders)s") class InvalidQuotaMethodUsage(Invalid): msg_fmt = _("Wrong quota method %(method)s used on resource %(res)s") class QuotaNotFound(NotFound): msg_fmt = _("Quota could not be found") class QuotaExists(NovaException): msg_fmt = _("Quota exists for project %(project_id)s, " "resource %(resource)s") class QuotaResourceUnknown(QuotaNotFound): msg_fmt = _("Unknown quota resources %(unknown)s.") class ProjectUserQuotaNotFound(QuotaNotFound): msg_fmt = _("Quota for user %(user_id)s in project %(project_id)s " "could not be found.") class ProjectQuotaNotFound(QuotaNotFound): msg_fmt = _("Quota for project %(project_id)s could not be found.") class QuotaClassNotFound(QuotaNotFound): msg_fmt = _("Quota class %(class_name)s could not be found.") class QuotaClassExists(NovaException): msg_fmt = _("Quota class %(class_name)s exists for resource %(resource)s") class SecurityGroupNotFound(NotFound): msg_fmt = _("Security group %(security_group_id)s not found.") class SecurityGroupNotFoundForProject(SecurityGroupNotFound): msg_fmt = _("Security group %(security_group_id)s not found " "for project %(project_id)s.") class SecurityGroupExists(Invalid): msg_fmt = _("Security group %(security_group_name)s already exists " "for project %(project_id)s.") class SecurityGroupCannotBeApplied(Invalid): msg_fmt = _("Network requires port_security_enabled and subnet associated" " in order to apply security groups.") class SecurityGroupConnectionStateConflict(Invalid): code = 409 msg_fmt = _("Cannot apply both stateful and stateless security groups on " "the same port.") class NoUniqueMatch(NovaException): msg_fmt = _("No Unique Match Found.") code = 409 class NoActiveMigrationForInstance(NotFound): msg_fmt = _("Active live migration for instance %(instance_id)s not found") class MigrationNotFound(NotFound): msg_fmt = _("Migration %(migration_id)s could not be found.") class MigrationNotFoundByStatus(MigrationNotFound): msg_fmt = _("Migration not found for instance %(instance_id)s " "with status %(status)s.") class MigrationNotFoundForInstance(MigrationNotFound): msg_fmt = _("Migration %(migration_id)s not found for instance " "%(instance_id)s") class InvalidMigrationState(Invalid): msg_fmt = _("Migration %(migration_id)s state of instance " "%(instance_uuid)s is %(state)s. Cannot %(method)s while the " "migration is in this state.") class ConsoleLogOutputException(NovaException): msg_fmt = _("Console log output could not be retrieved for instance " "%(instance_id)s. Reason: %(reason)s") class ConsoleNotAvailable(NotFound): msg_fmt = _("Guest does not have a console available.") class ConsoleTypeInvalid(Invalid): msg_fmt = _("Invalid console type %(console_type)s") class ConsoleTypeUnavailable(Invalid): msg_fmt = _("Unavailable console type %(console_type)s.") class ConsolePortRangeExhausted(NovaException): msg_fmt = _("The console port range %(min_port)d-%(max_port)d is " "exhausted.") class FlavorNotFound(NotFound): msg_fmt = _("Flavor %(flavor_id)s could not be found.") class FlavorNotFoundByName(FlavorNotFound): msg_fmt = _("Flavor with name %(flavor_name)s could not be found.") class FlavorAccessNotFound(NotFound): msg_fmt = _("Flavor access not found for %(flavor_id)s / " "%(project_id)s combination.") class FlavorExtraSpecUpdateCreateFailed(NovaException): msg_fmt = _("Flavor %(id)s extra spec cannot be updated or created " "after %(retries)d retries.") class CellTimeout(NotFound): msg_fmt = _("Timeout waiting for response from cell") class SchedulerHostFilterNotFound(NotFound): msg_fmt = _("Scheduler Host Filter %(filter_name)s could not be found.") class FlavorExtraSpecsNotFound(NotFound): msg_fmt = _("Flavor %(flavor_id)s has no extra specs with " "key %(extra_specs_key)s.") class ComputeHostMetricNotFound(NotFound): msg_fmt = _("Metric %(name)s could not be found on the compute " "host node %(host)s.%(node)s.") class FileNotFound(NotFound): msg_fmt = _("File %(file_path)s could not be found.") class DeviceBusy(NovaException): msg_fmt = _("device %(file_path)s is busy.") class ClassNotFound(NotFound): msg_fmt = _("Class %(class_name)s could not be found: %(exception)s") class InstanceTagNotFound(NotFound): msg_fmt = _("Instance %(instance_id)s has no tag '%(tag)s'") class KeyPairExists(NovaException): msg_fmt = _("Key pair '%(key_name)s' already exists.") class InstanceExists(NovaException): msg_fmt = _("Instance %(name)s already exists.") class FlavorExists(NovaException): msg_fmt = _("Flavor with name %(name)s already exists.") class FlavorIdExists(NovaException): msg_fmt = _("Flavor with ID %(flavor_id)s already exists.") class FlavorAccessExists(NovaException): msg_fmt = _("Flavor access already exists for flavor %(flavor_id)s " "and project %(project_id)s combination.") class InvalidSharedStorage(NovaException): msg_fmt = _("%(path)s is not on shared storage: %(reason)s") class InvalidLocalStorage(NovaException): msg_fmt = _("%(path)s is not on local storage: %(reason)s") class StorageError(NovaException): msg_fmt = _("Storage error: %(reason)s") class MigrationError(NovaException): msg_fmt = _("Migration error: %(reason)s") class MigrationPreCheckError(MigrationError): msg_fmt = _("Migration pre-check error: %(reason)s") class MigrationSchedulerRPCError(MigrationError): msg_fmt = _("Migration select destinations error: %(reason)s") class MalformedRequestBody(NovaException): msg_fmt = _("Malformed message body: %(reason)s") # NOTE(johannes): NotFound should only be used when a 404 error is # appropriate to be returned class ConfigNotFound(NovaException): msg_fmt = _("Could not find config at %(path)s") class PasteAppNotFound(NovaException): msg_fmt = _("Could not load paste app '%(name)s' from %(path)s") class CannotResizeToSameFlavor(NovaException): msg_fmt = _("When resizing, instances must change flavor!") class ResizeError(NovaException): msg_fmt = _("Resize error: %(reason)s") class CannotResizeDisk(NovaException): msg_fmt = _("Server disk was unable to be resized because: %(reason)s") class FlavorMemoryTooSmall(NovaException): msg_fmt = _("Flavor's memory is too small for requested image.") class FlavorDiskTooSmall(NovaException): msg_fmt = _("The created instance's disk would be too small.") class FlavorDiskSmallerThanImage(FlavorDiskTooSmall): msg_fmt = _("Flavor's disk is too small for requested image. Flavor disk " "is %(flavor_size)i bytes, image is %(image_size)i bytes.") class FlavorDiskSmallerThanMinDisk(FlavorDiskTooSmall): msg_fmt = _("Flavor's disk is smaller than the minimum size specified in " "image metadata. Flavor disk is %(flavor_size)i bytes, " "minimum size is %(image_min_disk)i bytes.") class VolumeSmallerThanMinDisk(FlavorDiskTooSmall): msg_fmt = _("Volume is smaller than the minimum size specified in image " "metadata. Volume size is %(volume_size)i bytes, minimum " "size is %(image_min_disk)i bytes.") class BootFromVolumeRequiredForZeroDiskFlavor(Forbidden): msg_fmt = _("Only volume-backed servers are allowed for flavors with " "zero disk.") class NoValidHost(NovaException): msg_fmt = _("No valid host was found. %(reason)s") class RequestFilterFailed(NovaException): msg_fmt = _("Scheduling failed: %(reason)s") class InvalidRoutedNetworkConfiguration(NovaException): msg_fmt = _("Neutron routed networks configuration is invalid: " "%(reason)s.") class MaxRetriesExceeded(NoValidHost): msg_fmt = _("Exceeded maximum number of retries. %(reason)s") class OverQuota(NovaException): msg_fmt = _("Quota exceeded for resources: %(overs)s") code = 413 safe = True class TooManyInstances(OverQuota): msg_fmt = _("Quota exceeded for %(overs)s: Requested %(req)s," " but already used %(used)s of %(allowed)s %(overs)s") class FloatingIpLimitExceeded(OverQuota): msg_fmt = _("Maximum number of floating IPs exceeded") class MetadataLimitExceeded(OverQuota): msg_fmt = _("Maximum number of metadata items exceeds %(allowed)d") class OnsetFileLimitExceeded(OverQuota): msg_fmt = _("Personality file limit exceeded") class OnsetFilePathLimitExceeded(OnsetFileLimitExceeded): msg_fmt = _("Personality file path exceeds maximum %(allowed)s") class OnsetFileContentLimitExceeded(OnsetFileLimitExceeded): msg_fmt = _("Personality file content exceeds maximum %(allowed)s") class KeypairLimitExceeded(OverQuota): msg_fmt = _("Quota exceeded, too many key pairs.") class SecurityGroupLimitExceeded(OverQuota): msg_fmt = _("Maximum number of security groups or rules exceeded") class PortLimitExceeded(OverQuota): msg_fmt = _("Maximum number of ports exceeded") class ServerGroupLimitExceeded(OverQuota): msg_fmt = _("Quota exceeded, too many server groups.") class GroupMemberLimitExceeded(OverQuota): msg_fmt = _("Quota exceeded, too many servers in group") class AggregateNotFound(NotFound): msg_fmt = _("Aggregate %(aggregate_id)s could not be found.") class AggregateNameExists(NovaException): msg_fmt = _("Aggregate %(aggregate_name)s already exists.") class AggregateHostNotFound(NotFound): msg_fmt = _("Aggregate %(aggregate_id)s has no host %(host)s.") class AggregateMetadataNotFound(NotFound): msg_fmt = _("Aggregate %(aggregate_id)s has no metadata with " "key %(metadata_key)s.") class AggregateHostExists(NovaException): msg_fmt = _("Aggregate %(aggregate_id)s already has host %(host)s.") class InstancePasswordSetFailed(NovaException): msg_fmt = _("Failed to set admin password on %(instance)s " "because %(reason)s") safe = True class InstanceNotFound(NotFound): msg_fmt = _("Instance %(instance_id)s could not be found.") class InstanceInfoCacheNotFound(NotFound): msg_fmt = _("Info cache for instance %(instance_uuid)s could not be " "found.") class MarkerNotFound(NotFound): msg_fmt = _("Marker %(marker)s could not be found.") class CouldNotFetchImage(NovaException): msg_fmt = _("Could not fetch image %(image_id)s") class CouldNotUploadImage(NovaException): msg_fmt = _("Could not upload image %(image_id)s") class TaskAlreadyRunning(NovaException): msg_fmt = _("Task %(task_name)s is already running on host %(host)s") class TaskNotRunning(NovaException): msg_fmt = _("Task %(task_name)s is not running on host %(host)s") class InstanceIsLocked(InstanceInvalidState): msg_fmt = _("Instance %(instance_uuid)s is locked") class ConfigDriveInvalidValue(Invalid): msg_fmt = _("Invalid value for Config Drive option: %(option)s") class ConfigDriveUnsupportedFormat(Invalid): msg_fmt = _("Config drive format '%(format)s' is not supported.") class ConfigDriveMountFailed(NovaException): msg_fmt = _("Could not mount vfat config drive. %(operation)s failed. " "Error: %(error)s") class ConfigDriveUnknownFormat(NovaException): msg_fmt = _("Unknown config drive format %(format)s. Select one of " "iso9660 or vfat.") class ConfigDriveNotFound(NotFound): msg_fmt = _("Instance %(instance_uuid)s requires config drive, but it " "does not exist.") class InterfaceAttachFailed(NovaException): msg_fmt = _("Failed to attach network adapter device to " "%(instance_uuid)s") class InterfaceAttachFailedNoNetwork(Invalid): msg_fmt = _("No specific network was requested and none are available " "for project '%(project_id)s'.") class InterfaceAttachPciClaimFailed(Invalid): msg_fmt = _("Failed to claim PCI device for %(instance_uuid)s during " "interface attach") class InterfaceAttachResourceAllocationFailed(Invalid): msg_fmt = _("Failed to allocate additional resources to %(instance_uuid)s " "during interface attach") class InterfaceDetachFailed(Invalid): msg_fmt = _("Failed to detach network adapter device from " "%(instance_uuid)s") class InstanceUserDataMalformed(NovaException): msg_fmt = _("User data needs to be valid base 64.") class InstanceUpdateConflict(NovaException): msg_fmt = _("Conflict updating instance %(instance_uuid)s. " "Expected: %(expected)s. Actual: %(actual)s") class UnknownInstanceUpdateConflict(InstanceUpdateConflict): msg_fmt = _("Conflict updating instance %(instance_uuid)s, but we were " "unable to determine the cause") class UnexpectedTaskStateError(InstanceUpdateConflict): pass class UnexpectedDeletingTaskStateError(UnexpectedTaskStateError): pass class InstanceActionNotFound(NovaException): msg_fmt = _("Action for request_id %(request_id)s on instance" " %(instance_uuid)s not found") class InstanceActionEventNotFound(NovaException): msg_fmt = _("Event %(event)s not found for action id %(action_id)s") class InstanceEvacuateNotSupported(Invalid): msg_fmt = _('Instance evacuate is not supported.') class InstanceEvacuateNotSupportedTargetState(Invalid): msg_fmt = _("Target state '%(target_state)s' for instance evacuate " "is not supported.") class DBNotAllowed(NovaException): msg_fmt = _('%(binary)s attempted direct database access which is ' 'not allowed by policy') class UnsupportedVirtType(Invalid): msg_fmt = _("Virtualization type '%(virt)s' is not supported by " "this compute driver") class UnsupportedHardware(Invalid): msg_fmt = _("Requested hardware '%(model)s' is not supported by " "the '%(virt)s' virt driver") class UnsupportedRescueBus(Invalid): msg_fmt = _("Requested rescue bus '%(bus)s' is not supported by " "the '%(virt)s' virt driver") class UnsupportedRescueDevice(Invalid): msg_fmt = _("Requested rescue device '%(device)s' is not supported") class UnsupportedRescueImage(Invalid): msg_fmt = _("Requested rescue image '%(image)s' is not supported") class UnsupportedRPCVersion(Invalid): msg_fmt = _("Unsupported RPC version for %(api)s. " "Required >= %(required)s") class Base64Exception(NovaException): msg_fmt = _("Invalid Base 64 data for file %(path)s") class BuildAbortException(NovaException): msg_fmt = _("Build of instance %(instance_uuid)s aborted: %(reason)s") class RescheduledException(NovaException): msg_fmt = _("Build of instance %(instance_uuid)s was re-scheduled: " "%(reason)s") class RescheduledByPolicyException(RescheduledException): msg_fmt = _("Build of instance %(instance_uuid)s was re-scheduled: " "%(reason)s") class GroupAffinityViolation(NovaException): msg_fmt = _("%(policy)s instance group policy was violated") class InstanceFaultRollback(NovaException): def __init__(self, inner_exception=None): message = _("Instance rollback performed due to: %s") self.inner_exception = inner_exception super(InstanceFaultRollback, self).__init__(message % inner_exception) class OrphanedObjectError(NovaException): msg_fmt = _('Cannot call %(method)s on orphaned %(objtype)s object') class ObjectActionError(NovaException): msg_fmt = _('Object action %(action)s failed because: %(reason)s') class InstanceGroupNotFound(NotFound): msg_fmt = _("Instance group %(group_uuid)s could not be found.") class InstanceGroupIdExists(NovaException): msg_fmt = _("Instance group %(group_uuid)s already exists.") class InstanceGroupSaveException(NovaException): msg_fmt = _("%(field)s should not be part of the updates.") class ResourceMonitorError(NovaException): msg_fmt = _("Error when creating resource monitor: %(monitor)s") class PciDeviceWrongAddressFormat(NovaException): msg_fmt = _("The PCI address %(address)s has an incorrect format.") class PciDeviceInvalidDeviceName(NovaException): msg_fmt = _("Invalid PCI Whitelist: " "The PCI whitelist can specify devname or address," " but not both") class PciDeviceNotFoundById(NotFound): msg_fmt = _("PCI device %(id)s not found") class PciDeviceNotFound(NotFound): msg_fmt = _("PCI Device %(node_id)s:%(address)s not found.") class PciDeviceInvalidStatus(Invalid): msg_fmt = _( "PCI device %(compute_node_id)s:%(address)s is %(status)s " "instead of %(hopestatus)s") class PciDeviceVFInvalidStatus(Invalid): msg_fmt = _( "Not all Virtual Functions of PF %(compute_node_id)s:%(address)s " "are free.") class PciDevicePFInvalidStatus(Invalid): msg_fmt = _( "Physical Function %(compute_node_id)s:%(address)s, related to VF" " %(compute_node_id)s:%(vf_address)s is %(status)s " "instead of %(hopestatus)s") class PciDeviceInvalidOwner(Invalid): msg_fmt = _( "PCI device %(compute_node_id)s:%(address)s is owned by %(owner)s " "instead of %(hopeowner)s") class PciDeviceRequestFailed(NovaException): msg_fmt = _( "PCI device request %(requests)s failed") class PciDevicePoolEmpty(NovaException): msg_fmt = _( "Attempt to consume PCI device %(compute_node_id)s:%(address)s " "from empty pool") class PciInvalidAlias(Invalid): msg_fmt = _("Invalid PCI alias definition: %(reason)s") class PciRequestAliasNotDefined(NovaException): msg_fmt = _("PCI alias %(alias)s is not defined") class PciConfigInvalidSpec(Invalid): msg_fmt = _("Invalid [pci]device_spec config: %(reason)s") class PciRequestFromVIFNotFound(NotFound): msg_fmt = _("Failed to locate PCI request associated with the given VIF " "PCI address: %(pci_slot)s on compute node: %(node_id)s") class PciDeviceRemoteManagedNotPresent(NovaException): msg_fmt = _('Invalid PCI Whitelist: A device specified as "remote_managed"' ' is not actually present on the host') class PciDeviceInvalidPFRemoteManaged(NovaException): msg_fmt = _('Invalid PCI Whitelist: PFs must not have the "remote_managed"' 'tag, device address: %(address)s') # Cannot be templated, msg needs to be constructed when raised. class InternalError(NovaException): """Generic hypervisor errors. Consider subclassing this to provide more specific exceptions. """ msg_fmt = "%(err)s" class PciDeviceDetachFailed(NovaException): msg_fmt = _("Failed to detach PCI device %(dev)s: %(reason)s") class PciDeviceUnsupportedHypervisor(NovaException): msg_fmt = _("%(type)s hypervisor does not support PCI devices") class KeyManagerError(NovaException): msg_fmt = _("Key manager error: %(reason)s") class VolumesNotRemoved(Invalid): msg_fmt = _("Failed to remove volume(s): (%(reason)s)") class VolumeRebaseFailed(NovaException): msg_fmt = _("Volume rebase failed: %(reason)s") class InvalidVideoMode(Invalid): msg_fmt = _("Provided video model (%(model)s) is not supported.") class RngDeviceNotExist(Invalid): msg_fmt = _("The provided RNG device path: (%(path)s) is not " "present on the host.") class RequestedVRamTooHigh(NovaException): msg_fmt = _("The requested amount of video memory %(req_vram)d is higher " "than the maximum allowed by flavor %(max_vram)d.") class SecurityProxyNegotiationFailed(NovaException): msg_fmt = _("Failed to negotiate security type with server: %(reason)s") class RFBAuthHandshakeFailed(NovaException): msg_fmt = _("Failed to complete auth handshake: %(reason)s") class RFBAuthNoAvailableScheme(NovaException): msg_fmt = _("No matching auth scheme: allowed types: '%(allowed_types)s', " "desired types: '%(desired_types)s'") class InvalidWatchdogAction(Invalid): msg_fmt = _("Provided watchdog action (%(action)s) is not supported.") class LiveMigrationNotSubmitted(NovaException): msg_fmt = _("Failed to submit live migration %(migration_uuid)s for " "instance %(instance_uuid)s for processing.") class SelectionObjectsWithOldRPCVersionNotSupported(NovaException): msg_fmt = _("Requests for Selection objects with alternates are not " "supported in select_destinations() before RPC version 4.5; " "version %(version)s requested.") class LiveMigrationURINotAvailable(NovaException): msg_fmt = _('No live migration URI configured and no default available ' 'for "%(virt_type)s" hypervisor virtualization type.') class UnshelveException(NovaException): msg_fmt = _("Error during unshelve instance %(instance_id)s: %(reason)s") class MismatchVolumeAZException(Invalid): msg_fmt = _("The availability zone between the server and its attached " "volumes do not match: %(reason)s.") code = 409 class UnshelveInstanceInvalidState(InstanceInvalidState): msg_fmt = _('Specifying an availability zone or a host when unshelving ' 'server "%(instance_uuid)s" with status "%(state)s" is not ' 'supported. The server status must be SHELVED_OFFLOADED.') code = 409 class UnshelveHostNotInAZ(Invalid): msg_fmt = _('Host "%(host)s" is not in the availability zone ' '"%(availability_zone)s".') code = 409 class ImageVCPULimitsRangeExceeded(Invalid): msg_fmt = _('Image vCPU topology limits (sockets=%(image_sockets)d, ' 'cores=%(image_cores)d, threads=%(image_threads)d) exceeds ' 'the limits of the flavor (sockets=%(flavor_sockets)d, ' 'cores=%(flavor_cores)d, threads=%(flavor_threads)d)') class ImageVCPUTopologyRangeExceeded(Invalid): msg_fmt = _('Image vCPU topology (sockets=%(image_sockets)d, ' 'cores=%(image_cores)d, threads=%(image_threads)d) exceeds ' 'the limits of the flavor or image (sockets=%(max_sockets)d, ' 'cores=%(max_cores)d, threads=%(max_threads)d)') class ImageVCPULimitsRangeImpossible(Invalid): msg_fmt = _("Requested vCPU limits %(sockets)d:%(cores)d:%(threads)d " "are impossible to satisfy for vcpus count %(vcpus)d") class InvalidArchitectureName(Invalid): msg_fmt = _("Architecture name '%(arch)s' is not recognised") class ImageNUMATopologyIncomplete(Invalid): msg_fmt = _("CPU and memory allocation must be provided for all " "NUMA nodes") class ImageNUMATopologyForbidden(Forbidden): msg_fmt = _("Image property '%(name)s' is not permitted to override " "NUMA configuration set against the flavor") class ImageNUMATopologyRebuildConflict(Invalid): msg_fmt = _( "An instance's NUMA topology cannot be changed as part of a rebuild. " "The image provided is invalid for this instance.") class ImagePCINUMAPolicyForbidden(Forbidden): msg_fmt = _("Image property 'hw_pci_numa_affinity_policy' is not " "permitted to override the 'hw:pci_numa_affinity_policy' " "flavor extra spec.") class ImageNUMATopologyAsymmetric(Invalid): msg_fmt = _("Instance CPUs and/or memory cannot be evenly distributed " "across instance NUMA nodes. Explicit assignment of CPUs " "and memory to nodes is required") class ImageNUMATopologyCPUOutOfRange(Invalid): msg_fmt = _("CPU number %(cpunum)d is larger than max %(cpumax)d") class ImageNUMATopologyCPUDuplicates(Invalid): msg_fmt = _("CPU number %(cpunum)d is assigned to two nodes") class ImageNUMATopologyCPUsUnassigned(Invalid): msg_fmt = _("CPU number %(cpuset)s is not assigned to any node") class ImageNUMATopologyMemoryOutOfRange(Invalid): msg_fmt = _("%(memsize)d MB of memory assigned, but expected " "%(memtotal)d MB") class InvalidHostname(Invalid): msg_fmt = _("Invalid characters in hostname '%(hostname)s'") class NumaTopologyNotFound(NotFound): msg_fmt = _("Instance %(instance_uuid)s does not specify a NUMA topology") class MigrationContextNotFound(NotFound): msg_fmt = _("Instance %(instance_uuid)s does not specify a migration " "context.") class SocketPortRangeExhaustedException(NovaException): msg_fmt = _("Not able to acquire a free port for %(host)s") class SocketPortInUseException(NovaException): msg_fmt = _("Not able to bind %(host)s:%(port)d, %(error)s") class ImageSerialPortNumberInvalid(Invalid): msg_fmt = _("Number of serial ports specified in flavor is invalid: " "expected an integer, got '%(num_ports)s'") class ImageSerialPortNumberExceedFlavorValue(Invalid): msg_fmt = _("Forbidden to exceed flavor value of number of serial " "ports passed in image meta.") class SerialPortNumberLimitExceeded(Invalid): msg_fmt = _("Maximum number of serial port exceeds %(allowed)d " "for %(virt_type)s") class InvalidImageConfigDrive(Invalid): msg_fmt = _("Image's config drive option '%(config_drive)s' is invalid") class InvalidHypervisorVirtType(Invalid): msg_fmt = _("Hypervisor virtualization type '%(hv_type)s' is not " "recognised") class InvalidMachineType(Invalid): msg_fmt = _("Machine type '%(mtype)s' is not compatible with image " "%(image_name)s (%(image_id)s): %(reason)s") class InvalidMachineTypeUpdate(Invalid): msg_fmt = _("Cannot update machine type %(existing_machine_type)s to " "%(machine_type)s.") class UnsupportedMachineType(Invalid): msg_fmt = _("Machine type %(machine_type)s is not supported.") class InvalidVirtualMachineMode(Invalid): msg_fmt = _("Virtual machine mode '%(vmmode)s' is not recognised") class InvalidToken(Invalid): msg_fmt = _("The token '%(token)s' is invalid or has expired") class TokenInUse(Invalid): msg_fmt = _("The generated token is invalid") class InvalidConnectionInfo(Invalid): msg_fmt = _("Invalid Connection Info") class InstanceQuiesceNotSupported(Invalid): msg_fmt = _('Quiescing is not supported in instance %(instance_id)s') class InstanceAgentNotEnabled(Invalid): msg_fmt = _('Guest agent is not enabled for the instance') safe = True class QemuGuestAgentNotEnabled(InstanceAgentNotEnabled): msg_fmt = _('QEMU guest agent is not enabled') class SetAdminPasswdNotSupported(Invalid): msg_fmt = _('Set admin password is not supported') safe = True class MemoryPageSizeInvalid(Invalid): msg_fmt = _("Invalid memory page size '%(pagesize)s'") class MemoryPageSizeForbidden(Invalid): msg_fmt = _("Page size %(pagesize)s forbidden against '%(against)s'") class MemoryPageSizeNotSupported(Invalid): msg_fmt = _("Page size %(pagesize)s is not supported by the host.") class LockMemoryForbidden(Forbidden): msg_fmt = _("locked_memory value in image or flavor is forbidden when " "mem_page_size is not set.") class FlavorImageLockedMemoryConflict(NovaException): msg_fmt = _("locked_memory value in image (%(image)s) and flavor " "(%(flavor)s) conflict. A consistent value is expected if " "both specified.") class CPUPinningInvalid(Invalid): msg_fmt = _("CPU set to pin %(requested)s must be a subset of " "free CPU set %(available)s") class CPUUnpinningInvalid(Invalid): msg_fmt = _("CPU set to unpin %(requested)s must be a subset of " "pinned CPU set %(available)s") class CPUPinningUnknown(Invalid): msg_fmt = _("CPU set to pin %(requested)s must be a subset of " "known CPU set %(available)s") class CPUUnpinningUnknown(Invalid): msg_fmt = _("CPU set to unpin %(requested)s must be a subset of " "known CPU set %(available)s") class ImageCPUPinningForbidden(Forbidden): msg_fmt = _("Image property 'hw_cpu_policy' is not permitted to override " "CPU pinning policy set against the flavor") class ImageCPUThreadPolicyForbidden(Forbidden): msg_fmt = _("Image property 'hw_cpu_thread_policy' is not permitted to " "override CPU thread pinning policy set against the flavor") class UnsupportedPolicyException(Invalid): msg_fmt = _("ServerGroup policy is not supported: %(reason)s") class CellMappingNotFound(NotFound): msg_fmt = _("Cell %(uuid)s has no mapping.") class NUMATopologyUnsupported(Invalid): msg_fmt = _("Host does not support guests with NUMA topology set") class MemoryPagesUnsupported(Invalid): msg_fmt = _("Host does not support guests with custom memory page sizes") class InvalidImageFormat(Invalid): msg_fmt = _("Invalid image format '%(format)s'") class UnsupportedImageModel(Invalid): msg_fmt = _("Image model '%(image)s' is not supported") class HostMappingNotFound(Invalid): msg_fmt = _("Host '%(name)s' is not mapped to any cell") class HostMappingExists(Invalid): msg_fmt = _("Host '%(name)s' mapping already exists") class RealtimeConfigurationInvalid(Invalid): msg_fmt = _("Cannot set realtime policy in a non dedicated " "cpu pinning policy") class CPUThreadPolicyConfigurationInvalid(Invalid): msg_fmt = _("Cannot set cpu thread pinning policy in a non dedicated " "cpu pinning policy") class RequestSpecNotFound(NotFound): msg_fmt = _("RequestSpec not found for instance %(instance_uuid)s") class UEFINotSupported(Invalid): msg_fmt = _("UEFI is not supported") class SecureBootNotSupported(Invalid): msg_fmt = _("Secure Boot is not supported by host") class FirmwareSMMNotSupported(Invalid): msg_fmt = _("This firmware doesn't require (support) SMM") class TriggerCrashDumpNotSupported(Invalid): msg_fmt = _("Triggering crash dump is not supported") class UnsupportedHostCPUControlPolicy(Invalid): msg_fmt = _("Requested CPU control policy not supported by host") class LibguestfsCannotReadKernel(Invalid): msg_fmt = _("Libguestfs does not have permission to read host kernel.") class RealtimeMaskNotFoundOrInvalid(Invalid): msg_fmt = _("Use of realtime CPUs requires either one or more " "non-realtime CPU(s) or offloaded emulator threads.") class OsInfoNotFound(NotFound): msg_fmt = _("No configuration information found for operating system " "%(os_name)s") class BuildRequestNotFound(NotFound): msg_fmt = _("BuildRequest not found for instance %(uuid)s") class AttachInterfaceNotSupported(Invalid): msg_fmt = _("Attaching interfaces is not supported for " "instance %(instance_uuid)s.") class AttachInterfaceWithQoSPolicyNotSupported(AttachInterfaceNotSupported): msg_fmt = _("Attaching interfaces with QoS policy is not supported for " "instance %(instance_uuid)s.") class AttachWithExtendedQoSPolicyNotSupported(AttachInterfaceNotSupported): msg_fmt = _( "The interface attach server operation with port having extended " "resource request, like a port with both QoS minimum bandwidth and " "packet rate policies, is not yet supported.") class NetworksWithQoSPolicyNotSupported(Invalid): msg_fmt = _("Using networks with QoS policy is not supported for " "instance %(instance_uuid)s. (Network ID is %(network_id)s)") class CreateWithPortResourceRequestOldVersion(Invalid): msg_fmt = _("Creating servers with ports having resource requests, like a " "port with a QoS minimum bandwidth policy, is not supported " "until microversion 2.72.") class ExtendedResourceRequestOldCompute(Invalid): msg_fmt = _("The port-resource-request-groups neutron API extension is " "not supported by old nova compute service. Upgrade your " "compute services to Xena (24.0.0) or later.") class InvalidReservedMemoryPagesOption(Invalid): msg_fmt = _("The format of the option 'reserved_huge_pages' is invalid. " "(found '%(conf)s') Please refer to the nova " "config-reference.") # An exception with this name is used on both sides of the placement/ # nova interaction. class ResourceProviderInUse(NovaException): msg_fmt = _("Resource provider has allocations.") class ResourceProviderRetrievalFailed(NovaException): msg_fmt = _("Failed to get resource provider with UUID %(uuid)s") class ResourceProviderAggregateRetrievalFailed(NovaException): msg_fmt = _("Failed to get aggregates for resource provider with UUID" " %(uuid)s") class ResourceProviderTraitRetrievalFailed(NovaException): msg_fmt = _("Failed to get traits for resource provider with UUID" " %(uuid)s") class ResourceProviderCreationFailed(NovaException): msg_fmt = _("Failed to create resource provider %(name)s") class ResourceProviderDeletionFailed(NovaException): msg_fmt = _("Failed to delete resource provider %(uuid)s") class ResourceProviderUpdateFailed(NovaException): msg_fmt = _("Failed to update resource provider via URL %(url)s: " "%(error)s") class ResourceProviderNotFound(NotFound): msg_fmt = _("No such resource provider %(name_or_uuid)s.") class ResourceProviderSyncFailed(NovaException): msg_fmt = _("Failed to synchronize the placement service with resource " "provider information supplied by the compute host.") class PlacementAPIConnectFailure(NovaException): msg_fmt = _("Unable to communicate with the Placement API.") class PlacementAPIConflict(NovaException): """Any 409 error from placement APIs should use (a subclass of) this exception. """ msg_fmt = _("A conflict was encountered attempting to invoke the " "placement API at URL %(url)s: %(error)s") class ResourceProviderUpdateConflict(PlacementAPIConflict): """A 409 caused by generation mismatch from attempting to update an existing provider record or its associated data (aggregates, traits, etc.). """ msg_fmt = _("A conflict was encountered attempting to update resource " "provider %(uuid)s (generation %(generation)d): %(error)s") class PlacementReshapeConflict(PlacementAPIConflict): """A 409 caused by generation mismatch from attempting to reshape a provider tree. """ msg_fmt = _( "A conflict was encountered attempting to reshape a provider tree: " "$(error)s" ) class InvalidResourceClass(Invalid): msg_fmt = _("Resource class '%(resource_class)s' invalid.") class InvalidInventory(Invalid): msg_fmt = _("Inventory for '%(resource_class)s' on " "resource provider '%(resource_provider)s' invalid.") # An exception with this name is used on both sides of the placement/ # nova interaction. class InventoryInUse(InvalidInventory): pass class UsagesRetrievalFailed(NovaException): msg_fmt = _("Failed to retrieve usages for project '%(project_id)s' and " "user '%(user_id)s'.") class NotSupportedWithOption(Invalid): msg_fmt = _("%(operation)s is not supported in conjunction with the " "current %(option)s setting. Please refer to the nova " "config-reference.") class Unauthorized(NovaException): msg_fmt = _("Not authorized.") code = 401 class NeutronAdminCredentialConfigurationInvalid(Invalid): msg_fmt = _("Networking client is experiencing an unauthorized exception.") class InvalidEmulatorThreadsPolicy(Invalid): msg_fmt = _("CPU emulator threads option requested is invalid, " "given: '%(requested)s', available: '%(available)s'.") class InvalidCPUAllocationPolicy(Invalid): msg_fmt = _("CPU policy requested from '%(source)s' is invalid, " "given: '%(requested)s', available: '%(available)s'.") class InvalidCPUThreadAllocationPolicy(Invalid): msg_fmt = _("CPU thread policy requested from '%(source)s' is invalid, " "given: '%(requested)s', available: '%(available)s'.") class BadRequirementEmulatorThreadsPolicy(Invalid): msg_fmt = _("An isolated CPU emulator threads option requires a dedicated " "CPU policy option.") class InvalidNetworkNUMAAffinity(Invalid): msg_fmt = _("Invalid NUMA network affinity configured: %(reason)s") class InvalidPCINUMAAffinity(Invalid): msg_fmt = _("Invalid PCI NUMA affinity configured: %(policy)s") class TraitRetrievalFailed(NovaException): msg_fmt = _("Failed to retrieve traits from the placement API: %(error)s") class TraitCreationFailed(NovaException): msg_fmt = _("Failed to create trait %(name)s: %(error)s") class CannotMigrateToSameHost(NovaException): msg_fmt = _("Cannot migrate to the host where the server exists.") class VirtDriverNotReady(NovaException): msg_fmt = _("Virt driver is not ready.") class InvalidPeerList(NovaException): msg_fmt = _("Configured nova-compute peer list for the ironic virt " "driver is invalid on host %(host)s") class InstanceDiskMappingFailed(NovaException): msg_fmt = _("Failed to map boot disk of instance %(instance_name)s to " "the management partition from any Virtual I/O Server.") class NewMgmtMappingNotFoundException(NovaException): msg_fmt = _("Failed to find newly-created mapping of storage element " "%(stg_name)s from Virtual I/O Server %(vios_name)s to the " "management partition.") class NoDiskDiscoveryException(NovaException): msg_fmt = _("Having scanned SCSI bus %(bus)x on the management partition, " "disk with UDID %(udid)s failed to appear after %(polls)d " "polls over %(timeout)d seconds.") class UniqueDiskDiscoveryException(NovaException): msg_fmt = _("Expected to find exactly one disk on the management " "partition at %(path_pattern)s; found %(count)d.") class DeviceDeletionException(NovaException): msg_fmt = _("Device %(devpath)s is still present on the management " "partition after attempting to delete it. Polled %(polls)d " "times over %(timeout)d seconds.") class OptRequiredIfOtherOptValue(NovaException): msg_fmt = _("The %(then_opt)s option is required if %(if_opt)s is " "specified as '%(if_value)s'.") class AllocationCreateFailed(NovaException): msg_fmt = _('Failed to create allocations for instance %(instance)s ' 'against resource provider %(provider)s.') class AllocationUpdateFailed(NovaException): msg_fmt = _('Failed to update allocations for consumer %(consumer_uuid)s. ' 'Error: %(error)s') class AllocationMoveFailed(NovaException): msg_fmt = _('Failed to move allocations from consumer %(source_consumer)s ' 'to consumer %(target_consumer)s. ' 'Error: %(error)s') class AllocationDeleteFailed(NovaException): msg_fmt = _('Failed to delete allocations for consumer %(consumer_uuid)s. ' 'Error: %(error)s') class TooManyComputesForHost(NovaException): msg_fmt = _('Unexpected number of compute node records ' '(%(num_computes)d) found for host %(host)s. There should ' 'only be a one-to-one mapping.') class CertificateValidationFailed(NovaException): msg_fmt = _("Image signature certificate validation failed for " "certificate: %(cert_uuid)s. %(reason)s") class InstanceRescueFailure(NovaException): msg_fmt = _("Failed to move instance to rescue mode: %(reason)s") class InstanceUnRescueFailure(NovaException): msg_fmt = _("Failed to unrescue instance: %(reason)s") class IronicAPIVersionNotAvailable(NovaException): msg_fmt = _('Ironic API version %(version)s is not available.') class ZVMDriverException(NovaException): msg_fmt = _("ZVM Driver has error: %(error)s") class ZVMConnectorError(ZVMDriverException): msg_fmt = _("zVM Cloud Connector request failed: %(results)s") def __init__(self, message=None, **kwargs): """Exception for zVM ConnectorClient calls. :param results: The object returned from ZVMConnector.send_request. """ super(ZVMConnectorError, self).__init__(message=message, **kwargs) results = kwargs.get('results', {}) self.overallRC = results.get('overallRC') self.rc = results.get('rc') self.rs = results.get('rs') self.errmsg = results.get('errmsg') class NoResourceClass(NovaException): msg_fmt = _("Resource class not found for Ironic node %(node)s.") class ResourceProviderAllocationRetrievalFailed(NovaException): msg_fmt = _("Failed to retrieve allocations for resource provider " "%(rp_uuid)s: %(error)s") class ConsumerAllocationRetrievalFailed(NovaException): msg_fmt = _("Failed to retrieve allocations for consumer " "%(consumer_uuid)s: %(error)s") class ReshapeFailed(NovaException): msg_fmt = _("Resource provider inventory and allocation data migration " "failed: %(error)s") class ReshapeNeeded(NovaException): msg_fmt = _("Virt driver indicates that provider inventories need to be " "moved.") class FlavorImageConflict(NovaException): msg_fmt = _("Conflicting values for %(setting)s found in the flavor " "(%(flavor_val)s) and the image (%(image_val)s).") class MissingDomainCapabilityFeatureException(NovaException): msg_fmt = _("Guest config could not be built without domain capabilities " "including <%(feature)s> feature.") class HealAllocationException(NovaException): msg_fmt = _("Healing instance allocation failed.") class HealvGPUAllocationNotSupported(HealAllocationException): msg_fmt = _( "Healing allocation for instance %(instance_uuid)s with vGPU resource " "request is not supported." ) class HealDeviceProfileAllocationNotSupported(HealAllocationException): msg_fmt = _( "Healing allocation for instance %(instance_uuid)s with Cyborg device " "profile request is not supported." ) class HealPortAllocationException(NovaException): msg_fmt = _("Healing port allocation failed.") class UnableToQueryPorts(HealPortAllocationException): msg_fmt = _("Unable to query ports for instance %(instance_uuid)s: " "%(error)s") class UnableToUpdatePorts(HealPortAllocationException): msg_fmt = _("Unable to update ports with allocations that are about to be " "created in placement: %(error)s. The healing of the " "instance is aborted. It is safe to try to heal the instance " "again.") class UnableToRollbackPortUpdates(HealPortAllocationException): msg_fmt = _("Failed to update neutron ports with allocation keys and the " "automatic rollback of the previously successful port updates " "also failed: %(error)s. Make sure that the " "binding:profile.allocation key of the affected ports " "%(port_uuids)s are manually cleaned in neutron according to " "document https://docs.openstack.org/nova/latest/cli/" "nova-manage.html#placement. If you re-run the script without " "the manual fix then the missing allocation for these ports " "will not be healed in placement.") class AssignedResourceNotFound(NovaException): msg_fmt = _("Assigned resources not found: %(reason)s") class PMEMNamespaceConfigInvalid(NovaException): msg_fmt = _("The pmem_namespaces configuration is invalid: %(reason)s, " "please check your conf file. ") class GetPMEMNamespacesFailed(NovaException): msg_fmt = _("Get PMEM namespaces on host failed: %(reason)s.") class VPMEMCleanupFailed(NovaException): msg_fmt = _("Failed to clean up the vpmem backend device %(dev)s: " "%(error)s") class RequestGroupSuffixConflict(NovaException): msg_fmt = _("Duplicate request group suffix %(suffix)s.") class AmbiguousResourceProviderForPCIRequest(NovaException): msg_fmt = _("Allocating resources from multiple resource providers " "%(providers)s for a single pci request %(requester)s is not " "supported.") class UnexpectedResourceProviderNameForPCIRequest(NovaException): msg_fmt = _("Resource provider %(provider)s used to allocate resources " "for the pci request %(requester)s does not have a properly " "formatted name. Expected name format is " "::, but got " "%(provider_name)s") class DeviceProfileError(NovaException): msg_fmt = _("Device profile name %(name)s: %(msg)s") class AcceleratorRequestOpFailed(NovaException): msg_fmt = _("Failed to %(op)s accelerator requests: %(msg)s") class AcceleratorRequestBindingFailed(NovaException): msg_fmt = _("Failed to bind accelerator requests: %(msg)s") def __init__(self, message=None, arqs=None, **kwargs): super(AcceleratorRequestBindingFailed, self).__init__( message=message, **kwargs) self.arqs = arqs or [] class InvalidLibvirtMdevConfig(NovaException): msg_fmt = _('Invalid configuration for mdev-capable devices: %(reason)s') class RequiredMixedInstancePolicy(Invalid): msg_fmt = _("Cannot specify 'hw:cpu_dedicated_mask' without the " "'mixed' policy.") class RequiredMixedOrRealtimeCPUMask(Invalid): msg_fmt = _("Dedicated CPU set can be specified from either " "'hw:cpu_dedicated_mask' or 'hw:cpu_realtime_mask' when " "using 'mixed' CPU policy. 'hw:cpu_dedicated_mask' and " "'hw:cpu_realtime_mask' can not be specified at the same " "time, or be specified with none of them.") class MixedInstanceNotSupportByComputeService(NovaException): msg_fmt = _("To support 'mixed' policy instance 'nova-compute' service " "must be upgraded to 'Victoria' or later.") class InvalidMixedInstanceDedicatedMask(Invalid): msg_fmt = _("Mixed instance must have at least 1 pinned vCPU and 1 " "unpinned vCPU. See 'hw:cpu_dedicated_mask'.") class ProviderConfigException(NovaException): """Exception indicating an error occurred processing provider config files. This class is used to avoid a raised exception inadvertently being caught and mishandled by the resource tracker. """ msg_fmt = _("An error occurred while processing " "a provider config file: %(error)s") class PlacementPciException(NovaException): msg_fmt = _( "Failed to gather or report PCI resources to Placement: %(error)s") class PlacementPciDependentDeviceException(PlacementPciException): msg_fmt = _( "Configuring both %(parent_dev)s and %(children_devs)s in " "[pci]device_spec is not supported. Either the parent PF or its " "children VFs can be configured." ) class PlacementPciMixedResourceClassException(PlacementPciException): msg_fmt = _( "VFs from the same PF cannot be configured with different " "'resource_class' values in [pci]device_spec. We got %(new_rc)s " "for %(new_dev)s and %(current_rc)s for %(current_devs)s." ) class PlacementPciMixedTraitsException(PlacementPciException): msg_fmt = _( "VFs from the same PF cannot be configured with different set " "of 'traits' in [pci]device_spec. We got %(new_traits)s for " "%(new_dev)s and %(current_traits)s for %(current_devs)s." ) class ReimageException(NovaException): msg_fmt = _("Reimaging volume failed.") class InvalidNodeConfiguration(NovaException): msg_fmt = _('Invalid node identity configuration: %(reason)s') class DuplicateRecord(NovaException): msg_fmt = _('Unable to create duplicate record for %(target)s') class NotSupportedComputeForEvacuateV295(NotSupported): msg_fmt = _("Starting with microversion 2.95, evacuate API will stop " "instance on destination. To evacuate before upgrades are " "complete please use an older microversion. Required version " "for compute %(expected), current version %(currently)s") class EphemeralEncryptionSecretNotFound(Invalid): msg_fmt = _( 'Encryption secret %(secret_uuid)s was not found in the key manager') class EphemeralEncryptionCleanupFailed(NovaException): msg_fmt = _("Failed to clean up ephemeral encryption secrets: " "%(error)s") class HostConflict(Exception): pass class InstanceEventTimeout(Exception): """A custom timeout exception to replace eventlet.timeout.Timeout.""" pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/exception_wrapper.py0000664000175000017500000000651300000000000017642 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import inspect from oslo_utils import excutils import nova.conf from nova.notifications.objects import base from nova.notifications.objects import exception as exception_obj from nova.objects import fields from nova import rpc from nova import safe_utils CONF = nova.conf.CONF @rpc.if_notifications_enabled def _emit_versioned_exception_notification(context, exception, source): payload = exception_obj.ExceptionPayload.from_exception(exception) publisher = base.NotificationPublisher(host=CONF.host, source=source) event_type = base.EventType( object='compute', action=fields.NotificationAction.EXCEPTION, ) notification = exception_obj.ExceptionNotification( publisher=publisher, event_type=event_type, priority=fields.NotificationPriority.ERROR, payload=payload, ) notification.emit(context) def _emit_legacy_exception_notification( context, exception, service, function_name, args, ): notifier = rpc.get_notifier(service) payload = {'exception': exception, 'args': args} notifier.error(context, function_name, payload) def wrap_exception(service, binary): """This decorator wraps a method to catch any exceptions that may get thrown. It also optionally sends the exception to the notification system. """ def inner(f): def wrapped(self, context, *args, **kw): # Don't store self or context in the payload, it now seems to # contain confidential information. try: return f(self, context, *args, **kw) except Exception as exc: with excutils.save_and_reraise_exception(): call_dict = _get_call_dict(f, self, context, *args, **kw) function_name = f.__name__ _emit_legacy_exception_notification( context, exc, service, function_name, call_dict) _emit_versioned_exception_notification( context, exc, binary) return functools.wraps(f)(wrapped) return inner def _get_call_dict(function, self, context, *args, **kw): wrapped_func = safe_utils.get_wrapped_function(function) call_dict = inspect.getcallargs(wrapped_func, self, context, *args, **kw) # self can't be serialized and shouldn't be in the # payload call_dict.pop('self', None) # NOTE(gibi) remove context as well as it contains sensitive information # and it can also contain circular references call_dict.pop('context', None) return _cleanse_dict(call_dict) def _cleanse_dict(original): """Strip all admin_password, new_pass, rescue_pass keys from a dict.""" return {k: v for k, v in original.items() if "_pass" not in k} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/filesystem.py0000664000175000017500000001006000000000000016260 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Functions to address filesystem calls, particularly sysfs.""" import functools import os import time from oslo_log import log as logging from nova import exception LOG = logging.getLogger(__name__) SYS = '/sys' RETRY_LIMIT = 5 # a retry decorator to handle EBUSY def retry_if_busy(func): """Decorator to retry a function if it raises DeviceBusy. This decorator will retry the function RETRY_LIMIT=5 times if it raises DeviceBusy. It will sleep for 1 second on the first retry, 2 seconds on the second retry, and so on, up to RETRY_LIMIT seconds. If the function still raises DeviceBusy after RETRY_LIMIT retries, the exception will be raised. """ @functools.wraps(func) def wrapper(*args, **kwargs): for i in range(RETRY_LIMIT): try: return func(*args, **kwargs) except exception.DeviceBusy as e: # if we have retried RETRY_LIMIT times, raise the exception # otherwise, sleep and retry, i is 0-based so we need # to add 1 to it count = i + 1 if count < RETRY_LIMIT: LOG.debug( f"File {e.kwargs['file_path']} is busy, " f"sleeping {count} seconds before retrying") time.sleep(count) continue raise return wrapper # NOTE(bauzas): this method is deliberately not wrapped in a privsep entrypoint @retry_if_busy def read_sys(path: str) -> str: """Reads the content of a file in the sys filesystem. :param path: relative or absolute. If relative, will be prefixed by /sys. :returns: contents of that file. :raises: nova.exception.FileNotFound if we can't read that file. :raises: nova.exception.DeviceBusy if the file is busy. """ try: # The path can be absolute with a /sys prefix but that's fine. with open(os.path.join(SYS, path), mode='r') as data: return data.read() except OSError as exc: # errno 16 is EBUSY, which means the file is busy. if exc.errno == 16: raise exception.DeviceBusy(file_path=path) from exc # convert permission denied to file not found raise exception.FileNotFound(file_path=path) from exc except ValueError as exc: raise exception.FileNotFound(file_path=path) from exc # NOTE(bauzas): this method is deliberately not wrapped in a privsep entrypoint # In order to correctly use it, you need to decorate the caller with a specific # privsep entrypoint. @retry_if_busy def write_sys(path: str, data: str) -> None: """Writes the content of a file in the sys filesystem with data. :param path: relative or absolute. If relative, will be prefixed by /sys. :param data: the data to write. :returns: contents of that file. :raises: nova.exception.FileNotFound if we can't write that file. :raises: nova.exception.DeviceBusy if the file is busy. """ try: # The path can be absolute with a /sys prefix but that's fine. with open(os.path.join(SYS, path), mode='w') as fd: fd.write(data) except OSError as exc: # errno 16 is EBUSY, which means the file is busy. if exc.errno == 16: raise exception.DeviceBusy(file_path=path) from exc # convert permission denied to file not found raise exception.FileNotFound(file_path=path) from exc except ValueError as exc: raise exception.FileNotFound(file_path=path) from exc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/filters.py0000664000175000017500000001162600000000000015555 0ustar00zuulzuul00000000000000# Copyright (c) 2011-2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Filter support """ from oslo_log import log as logging from nova import loadables LOG = logging.getLogger(__name__) class BaseFilter(object): """Base class for all filter classes.""" def _filter_one(self, obj, spec_obj): """Return True if it passes the filter, False otherwise. Override this in a subclass. """ return True def filter_all(self, filter_obj_list, spec_obj): """Yield objects that pass the filter. Can be overridden in a subclass, if you need to base filtering decisions on all objects. Otherwise, one can just override _filter_one() to filter a single object. """ for obj in filter_obj_list: if self._filter_one(obj, spec_obj): yield obj # Set to true in a subclass if a filter only needs to be run once # for each request rather than for each instance run_filter_once_per_request = False def run_filter_for_index(self, index): """Return True if the filter needs to be run for the "index-th" instance in a request. Only need to override this if a filter needs anything other than "first only" or "all" behaviour. """ if self.run_filter_once_per_request and index > 0: return False else: return True class BaseFilterHandler(loadables.BaseLoader): """Base class to handle loading filter classes. This class should be subclassed where one needs to use filters. """ def get_filtered_objects(self, filters, objs, spec_obj, index=0): list_objs = list(objs) LOG.debug("Starting with %d host(s)", len(list_objs)) # Track the hosts as they are removed. The 'full_filter_results' list # contains the host/nodename info for every host that passes each # filter, while the 'part_filter_results' list just tracks the number # removed by each filter, unless the filter returns zero hosts, in # which case it records the host/nodename for the last batch that was # removed. Since the full_filter_results can be very large, it is only # recorded if the LOG level is set to debug. part_filter_results = [] full_filter_results = [] log_msg = "%(cls_name)s: (start: %(start)s, end: %(end)s)" for filter_ in filters: if filter_.run_filter_for_index(index): cls_name = filter_.__class__.__name__ start_count = len(list_objs) objs = filter_.filter_all(list_objs, spec_obj) if objs is None: LOG.debug("Filter %s says to stop filtering", cls_name) return list_objs = list(objs) end_count = len(list_objs) part_filter_results.append(log_msg % {"cls_name": cls_name, "start": start_count, "end": end_count}) if list_objs: remaining = [(getattr(obj, "host", obj), getattr(obj, "nodename", "")) for obj in list_objs] full_filter_results.append((cls_name, remaining)) else: LOG.info("Filter %s returned 0 hosts", cls_name) full_filter_results.append((cls_name, None)) break LOG.debug("Filter %(cls_name)s returned %(obj_len)d host(s)", {'cls_name': cls_name, 'obj_len': len(list_objs)}) if not list_objs: # Log the filtration history msg_dict = { "inst_uuid": spec_obj.instance_uuid, "str_results": str(full_filter_results), } full_msg = ("Filtering removed all hosts for the request with " "instance ID " "'%(inst_uuid)s'. Filter results: %(str_results)s" ) % msg_dict LOG.debug(full_msg) msg_dict["str_results"] = str(part_filter_results) part_msg = ("Filtering removed all hosts for the request with " "instance ID " "'%(inst_uuid)s'. Filter results: %(str_results)s" ) % msg_dict LOG.info(part_msg) return list_objs ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.3656087 nova-32.0.0/nova/hacking/0000775000175000017500000000000000000000000015131 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/hacking/__init__.py0000664000175000017500000000000000000000000017230 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/hacking/checks.py0000664000175000017500000011326400000000000016752 0ustar00zuulzuul00000000000000# Copyright (c) 2012, Cloudscaling # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Guidelines for writing new hacking checks - Use only for Nova specific tests. OpenStack general tests should be submitted to the common 'hacking' module. - Pick numbers in the range N3xx. Find the current test with the highest allocated number and then pick the next value. - Keep the test method code in the source file ordered based on the N3xx value. - List the new rule in the top level HACKING.rst file - Add test cases for each new rule to nova/tests/unit/test_hacking.py """ import ast import os import re from hacking import core UNDERSCORE_IMPORT_FILES = [] session_check = re.compile(r"\w*def [a-zA-Z0-9].*[(].*session.*[)]") cfg_re = re.compile(r".*\scfg\.") # Excludes oslo.config OptGroup objects cfg_opt_re = re.compile(r".*[\s\[]cfg\.[a-zA-Z]*Opt\(") rule_default_re = re.compile(r".*RuleDefault\(") policy_enforce_re = re.compile(r".*_ENFORCER\.enforce\(") virt_file_re = re.compile(r"\./nova/(?:tests/)?virt/(\w+)/") virt_import_re = re.compile( r"^\s*(?:import|from) nova\.(?:tests\.)?virt\.(\w+)") virt_config_re = re.compile( r"CONF\.import_opt\('.*?', 'nova\.virt\.(\w+)('|.)") asse_trueinst_re = re.compile( r"(.)*assertTrue\(isinstance\((\w|\.|\'|\"|\[|\])+, " r"(\w|\.|\'|\"|\[|\])+\)\)") asse_equal_type_re = re.compile( r"(.)*assertEqual\(type\((\w|\.|\'|\"|\[|\])+\), " r"(\w|\.|\'|\"|\[|\])+\)") asse_equal_in_end_with_true_or_false_re = re.compile(r"assertEqual\(" r"(\w|[][.'\"])+ in (\w|[][.'\", ])+, (True|False)\)") asse_equal_in_start_with_true_or_false_re = re.compile(r"assertEqual\(" r"(True|False), (\w|[][.'\"])+ in (\w|[][.'\", ])+\)") # NOTE(snikitin): Next two regexes weren't united to one for more readability. # asse_true_false_with_in_or_not_in regex checks # assertTrue/False(A in B) cases where B argument has no spaces # asse_true_false_with_in_or_not_in_spaces regex checks cases # where B argument has spaces and starts/ends with [, ', ". # For example: [1, 2, 3], "some string", 'another string'. # We have to separate these regexes to escape a false positives # results. B argument should have spaces only if it starts # with [, ", '. Otherwise checking of string # "assertFalse(A in B and C in D)" will be false positives. # In this case B argument is "B and C in D". asse_true_false_with_in_or_not_in = re.compile(r"assert(True|False)\(" r"(\w|[][.'\"])+( not)? in (\w|[][.'\",])+(, .*)?\)") asse_true_false_with_in_or_not_in_spaces = re.compile(r"assert(True|False)" r"\((\w|[][.'\"])+( not)? in [\[|'|\"](\w|[][.'\", ])+" r"[\[|'|\"](, .*)?\)") asse_raises_regexp = re.compile(r"assertRaisesRegexp\(") conf_attribute_set_re = re.compile(r"CONF\.[a-z0-9_.]+\s*=\s*\w") translated_log = re.compile(r"(.)*LOG\.\w+\(\s*_\(\s*('|\")") mutable_default_args = re.compile(r"^\s*def .+\((.+=\{\}|.+=\[\])") string_translation = re.compile(r"[^_]*_\(\s*('|\")") underscore_import_check = re.compile(r"(.)*import _(.)*") import_translation_for_log_or_exception = re.compile( r"(.)*(from\snova.i18n\simport)\s_") # We need this for cases where they have created their own _ function. custom_underscore_check = re.compile(r"(.)*_\s*=\s*(.)*") api_version_re = re.compile(r"@.*\bapi_version\b") dict_constructor_with_list_copy_re = re.compile(r".*\bdict\((\[)?(\(|\[)") decorator_re = re.compile(r"@.*") http_not_implemented_re = re.compile(r"raise .*HTTPNotImplemented\(") spawn_re = re.compile( r".*(eventlet|greenthread)\.(?Pspawn(_n)?)\(.*\)") contextlib_nested = re.compile(r"^with (contextlib\.)?nested\(") doubled_words_re = re.compile( r"\b(then?|[iao]n|i[fst]|but|f?or|at|and|[dt]o)\s+\1\b") log_remove_context = re.compile( r"(.)*LOG\.(.*)\(.*(context=[_a-zA-Z0-9].*)+.*\)") return_not_followed_by_space = re.compile(r"^\s*return(?:\(|{|\"|'|#).*$") uuid4_re = re.compile(r"uuid4\(\)($|[^\.]|\.hex)") redundant_import_alias_re = re.compile(r"import (?:.*\.)?(.+) as \1$") yield_not_followed_by_space = re.compile(r"^\s*yield(?:\(|{|\[|\"|').*$") asse_regexpmatches = re.compile( r"(assertRegexpMatches|assertNotRegexpMatches)\(") privsep_file_re = re.compile('^nova/privsep[./]') privsep_import_re = re.compile( r"^(?:import|from).*\bprivsep\b") # Redundant parenthetical masquerading as a tuple, used with ``in``: # Space, "in", space, open paren # Optional single or double quote (so we match strings or symbols) # A sequence of the characters that can make up a symbol. (This is weak: a # string can contain other characters; and a numeric symbol can start with a # minus, and a method call has a param list, and... Not sure this gets better # without a lexer.) # The same closing quote # Close paren disguised_as_tuple_re = re.compile(r''' in \((['"]?)[a-zA-Z0-9_.]+\1\)''') # NOTE(takashin): The patterns of nox-existent mock assertion methods and # attributes do not cover all cases. If you find a new pattern, # add the pattern in the following regex patterns. mock_assert_method_re = re.compile( r"\.((called_once(_with)*|has_calls)|" r"mock_assert_(called(_(once|with|once_with))?" r"|any_call|has_calls|not_called)|" r"(asser|asset|asssert|assset)_(called(_(once|with|once_with))?" r"|any_call|has_calls|not_called))\(") mock_attribute_re = re.compile(r"[\.\(](retrun_value)[,=\s]") # Regex for useless assertions useless_assertion_re = re.compile( r"\.((assertIsNone)\(None|(assertTrue)\((True|\d+|'.+'|\".+\")),") # Regex for misuse of assert_has_calls mock_assert_has_calls_re = re.compile(r"\.assert_has_calls\s?=") # Regex for catching aliasing mock.Mock class in test mock_class_aliasing_re = re.compile( r"^[A-Za-z0-9_.]+\s*=\s*mock\.(Magic|NonCallable)?Mock$") # Regex for catching aliasing mock.Mock class in test mock_class_as_new_value_in_patching_re = re.compile( r"mock\.patch(\.object)?.* new=mock\.(Magic|NonCallable)?Mock[^(]") # Regex for direct use of oslo.concurrency lockutils.ReaderWriterLock rwlock_re = re.compile( r"(?P(oslo_concurrency\.)?(lockutils|fasteners))" r"\.ReaderWriterLock\(.*\)") six_re = re.compile(r"^(import six(\..*)?|from six(\..*)? import .*)$") # Regex for catching the setDaemon method set_daemon_re = re.compile(r"\.setDaemon\(") eventlet_stdlib_primitives_re = re.compile( r".*(eventlet|greenthread)\.sleep\(.*") class BaseASTChecker(ast.NodeVisitor): """Provides a simple framework for writing AST-based checks. Subclasses should implement visit_* methods like any other AST visitor implementation. When they detect an error for a particular node the method should call ``self.add_error(offending_node)``. Details about where in the code the error occurred will be pulled from the node object. Subclasses should also provide a class variable named CHECK_DESC to be used for the human readable error message. """ def __init__(self, tree, filename): """This object is created automatically by pycodestyle. :param tree: an AST tree :param filename: name of the file being analyzed (ignored by our checks) """ self._tree = tree self._errors = [] def run(self): """Called automatically by pycodestyle.""" self.visit(self._tree) return self._errors def add_error(self, node, message=None): """Add an error caused by a node to the list of errors.""" message = message or self.CHECK_DESC error = (node.lineno, node.col_offset, message, self.__class__) self._errors.append(error) def _check_call_names(self, call_node, names): if isinstance(call_node, ast.Call): if isinstance(call_node.func, ast.Name): if call_node.func.id in names: return True return False @core.flake8ext def import_no_db_in_virt(logical_line, filename): """Check for db calls from nova/virt As of grizzly-2 all the database calls have been removed from nova/virt, and we want to keep it that way. N307 """ if "nova/virt" in filename and not filename.endswith("fake.py"): if logical_line.startswith("from nova.db.main import api"): yield (0, "N307: nova.db.* import not allowed in nova/virt/*") @core.flake8ext def no_db_session_in_public_api(logical_line, filename): if "db/api.py" in filename: if session_check.match(logical_line): yield (0, "N309: public db api methods may not accept session") @core.flake8ext def use_timeutils_utcnow(logical_line, filename): # tools are OK to use the standard datetime module if "/tools/" in filename: return msg = "N310: timeutils.utcnow() must be used instead of datetime.%s()" datetime_funcs = ['now', 'utcnow'] for f in datetime_funcs: pos = logical_line.find('datetime.%s' % f) if pos != -1: yield (pos, msg % f) def _get_virt_name(regex, data): m = regex.match(data) if m is None: return None driver = m.group(1) # Ignore things we mis-detect as virt drivers in the regex if driver in ["test_virt_drivers", "driver", "disk", "api", "imagecache", "cpu", "hardware", "image"]: return None return driver @core.flake8ext def import_no_virt_driver_import_deps(physical_line, filename): """Check virt drivers' modules aren't imported by other drivers Modules under each virt driver's directory are considered private to that virt driver. Other drivers in Nova must not access those drivers. Any code that is to be shared should be refactored into a common module N311 """ thisdriver = _get_virt_name(virt_file_re, filename) thatdriver = _get_virt_name(virt_import_re, physical_line) if (thatdriver is not None and thisdriver is not None and thisdriver != thatdriver): return (0, "N311: importing code from other virt drivers forbidden") @core.flake8ext def import_no_virt_driver_config_deps(physical_line, filename): """Check virt drivers' config vars aren't used by other drivers Modules under each virt driver's directory are considered private to that virt driver. Other drivers in Nova must not use their config vars. Any config vars that are to be shared should be moved into a common module N312 """ thisdriver = _get_virt_name(virt_file_re, filename) thatdriver = _get_virt_name(virt_config_re, physical_line) if (thatdriver is not None and thisdriver is not None and thisdriver != thatdriver): return (0, "N312: using config vars from other virt drivers forbidden") @core.flake8ext def capital_cfg_help(logical_line, tokens): msg = "N313: capitalize help string" if cfg_re.match(logical_line): for t in range(len(tokens)): if tokens[t][1] == "help": txt = tokens[t + 2][1] if len(txt) > 1 and txt[1].islower(): yield (0, msg) @core.flake8ext def assert_true_instance(logical_line): """Check for assertTrue(isinstance(a, b)) sentences N316 """ if asse_trueinst_re.match(logical_line): yield (0, "N316: assertTrue(isinstance(a, b)) sentences not allowed") @core.flake8ext def assert_equal_type(logical_line): """Check for assertEqual(type(A), B) sentences N317 """ if asse_equal_type_re.match(logical_line): yield (0, "N317: assertEqual(type(A), B) sentences not allowed") @core.flake8ext def no_translate_logs(logical_line, filename): """Check for 'LOG.foo(_(' As per our translation policy, we shouldn't translate logs. This check assumes that 'LOG' is a logger. N319 """ if translated_log.match(logical_line): yield (0, "N319 Don't translate logs") @core.flake8ext def no_import_translation_in_tests(logical_line, filename): """Check for 'from nova.i18n import _' N337 """ if 'nova/tests/' in filename: res = import_translation_for_log_or_exception.match(logical_line) if res: yield (0, "N337 Don't import translation in tests") @core.flake8ext def no_setting_conf_directly_in_tests(logical_line, filename): """Check for setting CONF.* attributes directly in tests The value can leak out of tests affecting how subsequent tests run. Using self.flags(option=value) is the preferred method to temporarily set config options in tests. N320 """ if 'nova/tests/' in filename: res = conf_attribute_set_re.match(logical_line) if res: yield (0, "N320: Setting CONF.* attributes directly in tests is " "forbidden. Use self.flags(option=value) instead") @core.flake8ext def no_mutable_default_args(logical_line): msg = "N322: Method's default argument shouldn't be mutable!" if mutable_default_args.match(logical_line): yield (0, msg) @core.flake8ext def check_explicit_underscore_import(logical_line, filename): """Check for explicit import of the _ function We need to ensure that any files that are using the _() function to translate logs are explicitly importing the _ function. We can't trust unit test to catch whether the import has been added so we need to check for it here. """ # Build a list of the files that have _ imported. No further # checking needed once it is found. if filename in UNDERSCORE_IMPORT_FILES: pass elif (underscore_import_check.match(logical_line) or custom_underscore_check.match(logical_line)): UNDERSCORE_IMPORT_FILES.append(filename) elif string_translation.match(logical_line): yield (0, "N323: Found use of _() without explicit import of _ !") @core.flake8ext def use_jsonutils(logical_line, filename): # tools are OK to use the standard json module if "/tools/" in filename: return msg = "N324: jsonutils.%(fun)s must be used instead of json.%(fun)s" if "json." in logical_line: json_funcs = ['dumps(', 'dump(', 'loads(', 'load('] for f in json_funcs: pos = logical_line.find('json.%s' % f) if pos != -1: yield (pos, msg % {'fun': f[:-1]}) @core.flake8ext def check_api_version_decorator(logical_line, previous_logical, blank_before, filename): msg = ("N332: the api_version decorator must be the first decorator" " on a method.") if blank_before == 0 and re.match(api_version_re, logical_line) \ and re.match(decorator_re, previous_logical): yield (0, msg) class CheckForTransAdd(BaseASTChecker): """Checks for the use of concatenation on a translated string. Translations should not be concatenated with other strings, but should instead include the string being added to the translated string to give the translators the most information. """ name = 'check_for_trans_add' version = '0.1' CHECK_DESC = ('N326 Translated messages cannot be concatenated. ' 'String should be included in translated message.') TRANS_FUNC = ['_'] def visit_BinOp(self, node): if isinstance(node.op, ast.Add): for node_x in (node.left, node.right): if isinstance(node_x, ast.Call): if isinstance(node_x.func, ast.Name): if node_x.func.id == '_': self.add_error(node_x) super(CheckForTransAdd, self).generic_visit(node) class _FindVariableReferences(ast.NodeVisitor): def __init__(self): super(_FindVariableReferences, self).__init__() self._references = [] def visit_Name(self, node): if isinstance(node.ctx, ast.Load): # This means the value of a variable was loaded. For example a # variable 'foo' was used like: # mocked_thing.bar = foo # foo() # self.assertRaises(exception, foo) self._references.append(node.id) super(_FindVariableReferences, self).generic_visit(node) class CheckForUncalledTestClosure(BaseASTChecker): """Look for closures that are never called in tests. A recurring pattern when using multiple mocks is to create a closure decorated with mocks like: def test_thing(self): @mock.patch.object(self.compute, 'foo') @mock.patch.object(self.compute, 'bar') def _do_test(mock_bar, mock_foo): # Test things _do_test() However it is easy to leave off the _do_test() and have the test pass because nothing runs. This check looks for methods defined within a test method and ensures that there is a reference to them. Only methods defined one level deep are checked. Something like: def test_thing(self): class FakeThing: def foo(self): would not ensure that foo is referenced. N349 """ name = 'check_for_uncalled_test_closure' version = '0.1' def __init__(self, tree, filename): super(CheckForUncalledTestClosure, self).__init__(tree, filename) self._filename = filename def visit_FunctionDef(self, node): # self._filename is 'stdin' in the unit test for this check. if (not os.path.basename(self._filename).startswith('test_') and os.path.basename(self._filename) != 'stdin'): return closures = [] references = [] # Walk just the direct nodes of the test method for child_node in ast.iter_child_nodes(node): if isinstance(child_node, ast.FunctionDef): closures.append(child_node.name) # Walk all nodes to find references find_references = _FindVariableReferences() find_references.generic_visit(node) references = find_references._references missed = set(closures) - set(references) if missed: self.add_error(node, 'N349: Test closures not called: %s' % ','.join(missed)) @core.flake8ext def assert_true_or_false_with_in(logical_line): """Check for assertTrue/False(A in B), assertTrue/False(A not in B), assertTrue/False(A in B, message) or assertTrue/False(A not in B, message) sentences. N334 """ res = (asse_true_false_with_in_or_not_in.search(logical_line) or asse_true_false_with_in_or_not_in_spaces.search(logical_line)) if res: yield (0, "N334: Use assertIn/NotIn(A, B) rather than " "assertTrue/False(A in/not in B) when checking collection " "contents.") @core.flake8ext def assert_raises_regexp(logical_line): """Check for usage of deprecated assertRaisesRegexp N335 """ res = asse_raises_regexp.search(logical_line) if res: yield (0, "N335: assertRaisesRegex must be used instead " "of assertRaisesRegexp") @core.flake8ext def dict_constructor_with_list_copy(logical_line): msg = ("N336: Must use a dict comprehension instead of a dict constructor" " with a sequence of key-value pairs." ) if dict_constructor_with_list_copy_re.match(logical_line): yield (0, msg) @core.flake8ext def assert_equal_in(logical_line): """Check for assertEqual(A in B, True), assertEqual(True, A in B), assertEqual(A in B, False) or assertEqual(False, A in B) sentences N338 """ res = (asse_equal_in_start_with_true_or_false_re.search(logical_line) or asse_equal_in_end_with_true_or_false_re.search(logical_line)) if res: yield (0, "N338: Use assertIn/NotIn(A, B) rather than " "assertEqual(A in B, True/False) when checking collection " "contents.") @core.flake8ext def check_http_not_implemented(logical_line, filename, noqa): msg = ("N339: HTTPNotImplemented response must be implemented with" " common raise_feature_not_supported().") if noqa: return if ("nova/api/openstack/compute" not in filename): return if re.match(http_not_implemented_re, logical_line): yield (0, msg) @core.flake8ext def check_greenthread_spawns(logical_line, filename): """Check for use of greenthread.spawn(), greenthread.spawn_n(), eventlet.spawn(), and eventlet.spawn_n() N340 """ msg = ("N340: Use nova.utils.spawn() rather than " "greenthread.%(spawn)s() and eventlet.%(spawn)s()") if "nova/utils.py" in filename or "nova/tests/" in filename: return match = re.match(spawn_re, logical_line) if match: yield (0, msg % {'spawn': match.group('spawn_part')}) @core.flake8ext def check_no_contextlib_nested(logical_line, filename): msg = ("N341: contextlib.nested is deprecated. With Python 2.7 and later " "the with-statement supports multiple nested objects. See https://" "docs.python.org/2/library/contextlib.html#contextlib.nested for " "more information. nova.test.nested() is an alternative as well.") if contextlib_nested.match(logical_line): yield (0, msg) @core.flake8ext def check_config_option_in_central_place(logical_line, filename): msg = ("N342: Config options should be in the central location " "'/nova/conf/*'. Do not declare new config options outside " "of that folder.") # That's the correct location if "nova/conf/" in filename: return # (macsz) All config options (with exceptions that are clarified # in the list below) were moved to the central place. List below is for # all options that were impossible to move without doing a major impact # on code. Add full path to a module or folder. conf_exceptions = [ # CLI opts are allowed to be outside of nova/conf directory 'nova/cmd/manage.py', 'nova/cmd/policy.py', 'nova/cmd/status.py', # config options should not be declared in tests, but there is # another checker for it (N320) 'nova/tests', ] if any(f in filename for f in conf_exceptions): return if cfg_opt_re.match(logical_line): yield (0, msg) @core.flake8ext def check_policy_registration_in_central_place(logical_line, filename): msg = ('N350: Policy registration should be in the central location(s) ' '"/nova/policies/*"') # This is where registration should happen if "nova/policies/" in filename: return # A couple of policy tests register rules if "nova/tests/unit/test_policy.py" in filename: return if rule_default_re.match(logical_line): yield (0, msg) @core.flake8ext def check_policy_enforce(logical_line, filename): """Look for uses of nova.policy._ENFORCER.enforce() Now that policy defaults are registered in code the _ENFORCER.authorize method should be used. That ensures that only registered policies are used. Uses of _ENFORCER.enforce could allow unregistered policies to be used, so this check looks for uses of that method. N351 """ msg = ('N351: nova.policy._ENFORCER.enforce() should not be used. ' 'Use the authorize() method instead.') if policy_enforce_re.match(logical_line): yield (0, msg) @core.flake8ext def check_doubled_words(physical_line, filename): """Check for the common doubled-word typos N343 """ msg = ("N343: Doubled word '%(word)s' typo found") match = re.search(doubled_words_re, physical_line) if match: return (0, msg % {'word': match.group(1)}) @core.flake8ext def no_os_popen(logical_line): """Disallow 'os.popen(' Deprecated library function os.popen() Replace it using subprocess https://bugs.launchpad.net/tempest/+bug/1529836 N348 """ if 'os.popen(' in logical_line: yield (0, 'N348 Deprecated library function os.popen(). ' 'Replace it using subprocess module. ') @core.flake8ext def no_log_warn(logical_line): """Disallow 'LOG.warn(' Deprecated LOG.warn(), instead use LOG.warning https://bugs.launchpad.net/senlin/+bug/1508442 N352 """ msg = ("N352: LOG.warn is deprecated, please use LOG.warning!") if "LOG.warn(" in logical_line: yield (0, msg) @core.flake8ext def check_context_log(logical_line, filename, noqa): """check whether context is being passed to the logs Not correct: LOG.info("Rebooting instance", context=context) Correct: LOG.info("Rebooting instance") https://bugs.launchpad.net/nova/+bug/1500896 N353 """ if noqa: return if "nova/tests" in filename: return if log_remove_context.match(logical_line): yield (0, "N353: Nova is using oslo.context's RequestContext " "which means the context object is in scope when " "doing logging using oslo.log, so no need to pass it as " "kwarg.") @core.flake8ext def no_assert_equal_true_false(logical_line): """Enforce use of assertTrue/assertFalse. Prevent use of assertEqual(A, True|False), assertEqual(True|False, A), assertNotEqual(A, True|False), and assertNotEqual(True|False, A). N355 """ _start_re = re.compile(r'assert(Not)?Equal\((True|False),') _end_re = re.compile(r'assert(Not)?Equal\(.*,\s+(True|False)\)$') if _start_re.search(logical_line) or _end_re.search(logical_line): yield (0, "N355: assertEqual(A, True|False), " "assertEqual(True|False, A), assertNotEqual(A, True|False), " "or assertEqual(True|False, A) sentences must not be used. " "Use assertTrue(A) or assertFalse(A) instead") @core.flake8ext def no_assert_true_false_is_not(logical_line): """Enforce use of assertIs/assertIsNot. Prevent use of assertTrue(A is|is not B) and assertFalse(A is|is not B). N356 """ _re = re.compile(r'assert(True|False)\(.+\s+is\s+(not\s+)?.+\)$') if _re.search(logical_line): yield (0, "N356: assertTrue(A is|is not B) or " "assertFalse(A is|is not B) sentences must not be used. " "Use assertIs(A, B) or assertIsNot(A, B) instead") @core.flake8ext def check_uuid4(logical_line): """Generating UUID Use oslo_utils.uuidutils or uuidsentinel(in case of test cases) to generate UUID instead of uuid4(). N357 """ msg = ("N357: Use oslo_utils.uuidutils or uuidsentinel(in case of test " "cases) to generate UUID instead of uuid4().") if uuid4_re.search(logical_line): yield (0, msg) @core.flake8ext def return_followed_by_space(logical_line): """Return should be followed by a space. Return should be followed by a space to clarify that return is not a function. Adding a space may force the developer to rethink if there are unnecessary parentheses in the written code. Not correct: return(42), return(a, b) Correct: return, return 42, return (a, b), return a, b N358 """ if return_not_followed_by_space.match(logical_line): yield (0, "N358: Return keyword should be followed by a space.") @core.flake8ext def no_redundant_import_alias(logical_line): """Check for redundant import aliases. Imports should not be in the forms below. from x import y as y import x as x import x.y as y N359 """ if re.search(redundant_import_alias_re, logical_line): yield (0, "N359: Import alias should not be redundant.") @core.flake8ext def yield_followed_by_space(logical_line): """Yield should be followed by a space. Yield should be followed by a space to clarify that yield is not a function. Adding a space may force the developer to rethink if there are unnecessary parentheses in the written code. Not correct: yield(x), yield(a, b) Correct: yield x, yield (a, b), yield a, b N360 """ if yield_not_followed_by_space.match(logical_line): yield (0, "N360: Yield keyword should be followed by a space.") @core.flake8ext def assert_regexpmatches(logical_line): """Check for usage of deprecated assertRegexpMatches/assertNotRegexpMatches N361 """ res = asse_regexpmatches.search(logical_line) if res: yield (0, "N361: assertRegex/assertNotRegex must be used instead " "of assertRegexpMatches/assertNotRegexpMatches.") @core.flake8ext def privsep_imports_not_aliased(logical_line, filename): """Do not abbreviate or alias privsep module imports. When accessing symbols under nova.privsep in code or tests, the full module path (e.g. nova.privsep.path.readfile(...)) should be used explicitly rather than importing and using an alias/abbreviation such as: from nova.privsep import path ... path.readfile(...) See Ief177dbcb018da6fbad13bb0ff153fc47292d5b9. N362 """ if ( # Give modules under nova.privsep a pass not privsep_file_re.match(filename) and # Any style of import of privsep... privsep_import_re.match(logical_line) and # ...that isn't 'import nova.privsep[.foo...]' logical_line.count(' ') > 1): yield (0, "N362: always import privsep modules so that the use of " "escalated permissions is obvious to callers. For example, " "use 'import nova.privsep.path' instead of " "'from nova.privsep import path'.") @core.flake8ext def did_you_mean_tuple(logical_line): """Disallow ``(not_a_tuple)`` because you meant ``(a_tuple_of_one,)``. N363 """ if disguised_as_tuple_re.search(logical_line): yield (0, "N363: You said ``in (not_a_tuple)`` when you almost " "certainly meant ``in (a_tuple_of_one,)``.") @core.flake8ext def nonexistent_assertion_methods_and_attributes(logical_line, filename): """Check non-existent mock assertion methods and attributes. The following assertion methods do not exist. - called_once() - called_once_with() - has_calls() - mock_assert_*() The following typos were found in the past cases. - asser_* - asset_* - assset_* - asssert_* - retrun_value N364 """ msg = ("N364: Non existent mock assertion method or attribute (%s) is " "used. Check a typo or whether the assertion method should begin " "with 'assert_'.") if 'nova/tests/' in filename: match = mock_assert_method_re.search(logical_line) if match: yield (0, msg % match.group(1)) match = mock_attribute_re.search(logical_line) if match: yield (0, msg % match.group(1)) @core.flake8ext def useless_assertion(logical_line, filename): """Check useless assertions in tests. The following assertions are useless. - assertIsNone(None, ...) - assertTrue(True, ...) - assertTrue(2, ...) # Constant number - assertTrue('Constant string', ...) - assertTrue("Constant string", ...) They are usually misuses of assertIsNone or assertTrue. N365 """ msg = "N365: Misuse of %s." if 'nova/tests/' in filename: match = useless_assertion_re.search(logical_line) if match: yield (0, msg % (match.group(2) or match.group(3))) @core.flake8ext def check_assert_has_calls(logical_line, filename): """Check misuse of assert_has_calls. Not correct: mock_method.assert_has_calls = [mock.call(0)] Correct: mock_method.assert_has_calls([mock.call(0)]) N366 """ msg = "N366: The assert_has_calls is a method rather than a variable." if ('nova/tests/' in filename and mock_assert_has_calls_re.search(logical_line)): yield (0, msg) @core.flake8ext def do_not_alias_mock_class(logical_line, filename): """Check for aliasing Mock class Aliasing Mock class almost always a bad idea. Consider the test code trying to catch the instantiation of the Rados class but instead introducing a global change on the Mock object: https://github.com/openstack/nova/blob/10b1dc84f47a71061340f8e0ae0fe32dca44061a/nova/tests/unit/storage/test_rbd.py#L122-L125 After this code every test that assumes that mock.Mock().shutdown is a new auto-generated mock.Mock() object will fail a shutdown is now defined in the Mock class level and therefore surviving between test cases. N367 """ if 'nova/tests/' in filename: res = mock_class_aliasing_re.match(logical_line) if res: yield ( 0, "N367: Aliasing mock.Mock class is dangerous as it easy to " "introduce class level changes in Mock that survives " "between test cases. If you want to mock object creation " "then mock the class under test with a mock _instance_ and " "set the return_value of the mock to return mock instances. " "See for example: " "https://review.opendev.org/c/openstack/nova/+/805657" ) @core.flake8ext def do_not_use_mock_class_as_new_mock_value(logical_line, filename): """Check if mock.Mock class is used during set up of a patcher as new kwargs. The mock.patch and mock.patch.object takes a `new` kwargs and use that value as the replacement during the patching. Using new=mock.Mock (instead of new=mock.Mock() or new_callable=mock.Mock) results in code under test pointing to the Mock class. This is almost always a wrong thing as any changes on that class will leak between test cases uncontrollably. N368 """ if 'nova/tests/' in filename: res = mock_class_as_new_value_in_patching_re.search(logical_line) if res: yield ( 0, "N368: Using mock.patch(..., new=mock.Mock) causes that the " "patching will introduce the Mock class as replacement value " "instead of a mock object. Any change on the Mock calls will " "leak out from the test and can cause interference. " "Use new=mock.Mock() or new_callable=mock.Mock instead." ) @core.flake8ext def check_lockutils_rwlocks(logical_line): """Check for direct use of oslo.concurrency lockutils.ReaderWriterLock() oslo.concurrency lockutils uses fasteners.ReaderWriterLock to provide read/write locks and fasteners calls threading.current_thread() to track and identify lock holders and waiters. The eventlet implementation of current_thread() only supports greenlets of type GreenThread, else it falls back on the native threading.current_thread() method. See https://github.com/eventlet/eventlet/issues/731 for details. N369 """ msg = ("N369: %(module)s.ReaderWriterLock() does not " "function correctly with eventlet patched code. " "Use nova.utils.ReaderWriterLock() instead.") match = re.match(rwlock_re, logical_line) if match: yield ( 0, msg % {'module': match.group('module_part')} ) @core.flake8ext def check_six(logical_line): """Check for use of six nova is now Python 3-only so we don't want six. However, people might use it out of habit and it will likely work since six is a transitive dependency. N370 """ match = re.match(six_re, logical_line) if match: yield (0, "N370: Don't use or import six") @core.flake8ext def import_stock_mock(logical_line): """Use python's mock, not the mock library. Since we `dropped support for python 2`__, we no longer need to use the mock library, which existed to backport py3 functionality into py2. Change Ib44b5bff657c8e76c4f701e14d51a4efda3f6d32 cut over to importing the stock mock, which must be done by saying:: from unittest import mock ...because if you say:: import mock ...you may be getting the stock mock; or, due to transitive dependencies in the environment, the library mock. This check can be removed in the future (and we can start saying ``import mock`` again) if we manage to purge these transitive dependencies. .. __: https://review.opendev.org/#/c/687954/ N371 """ if logical_line == 'import mock' or logical_line.startswith('from mock'): yield ( 0, "N371: You must explicitly import python's mock: " "``from unittest import mock``" ) @core.flake8ext def check_set_daemon(logical_line): """Check for use of the setDaemon method of the threading.Thread class The setDaemon method of the threading.Thread class has been deprecated since Python 3.10. Use the daemon attribute instead. See https://docs.python.org/3.10/library/threading.html#threading.Thread.setDaemon for details. N372 """ res = set_daemon_re.search(logical_line) if res: yield (0, "N372: Don't use the setDaemon method. " "Use the daemon attribute instead.") @core.flake8ext def check_eventlet_primitives(logical_line, filename): """Check for use of any eventlet primitives where the stdlib equivalent should be used N373 """ msg = ( "N373: Use the stdlib concurrency primitive instead of the Eventelt " "specific one") match = re.match(eventlet_stdlib_primitives_re, logical_line) if match: yield (0, msg) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/i18n.py0000664000175000017500000000201700000000000014656 0ustar00zuulzuul00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """oslo.i18n integration module. See https://docs.openstack.org/oslo.i18n/latest/user/index.html . """ import oslo_i18n DOMAIN = 'nova' _translators = oslo_i18n.TranslatorFactory(domain=DOMAIN) # The primary translation function using the well-known name "_" _ = _translators.primary def translate(value, user_locale): return oslo_i18n.translate(value, user_locale) def get_available_languages(): return oslo_i18n.get_available_languages(DOMAIN) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.3656087 nova-32.0.0/nova/image/0000775000175000017500000000000000000000000014607 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/image/__init__.py0000664000175000017500000000000000000000000016706 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/image/glance.py0000664000175000017500000015665500000000000016434 0ustar00zuulzuul00000000000000# Copyright 2010 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of an image service that uses Glance as the backend.""" import copy import inspect import itertools import os import random import re import stat import sys import time import urllib.parse as urlparse import cryptography from cursive import certificate_utils from cursive import exception as cursive_exception from cursive import signature_utils import glanceclient from glanceclient.common import utils as glance_utils import glanceclient.exc from glanceclient.v2 import schemas from keystoneauth1 import loading as ks_loading from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import excutils from oslo_utils import timeutils import nova.conf from nova import exception from nova import objects from nova.objects import fields from nova import profiler from nova import service_auth from nova import utils LOG = logging.getLogger(__name__) CONF = nova.conf.CONF _SESSION = None def _session_and_auth(context): # Session is cached, but auth needs to be pulled from context each time. global _SESSION if not _SESSION: _SESSION = ks_loading.load_session_from_conf_options( CONF, nova.conf.glance.glance_group.name) auth = service_auth.get_auth_plugin(context) return _SESSION, auth def _glanceclient_from_endpoint(context, endpoint, version): sess, auth = _session_and_auth(context) return glanceclient.Client(version, session=sess, auth=auth, endpoint_override=endpoint, global_request_id=context.global_id) def generate_glance_url(context): """Return a random glance url from the api servers we know about.""" return next(get_api_servers(context)) def _endpoint_from_image_ref(image_href): """Return the image_ref and guessed endpoint from an image url. :param image_href: href of an image :returns: a tuple of the form (image_id, endpoint_url) """ parts = image_href.split('/') image_id = parts[-1] # the endpoint is everything in the url except the last 3 bits # which are version, 'images', and image_id endpoint = '/'.join(parts[:-3]) return (image_id, endpoint) def get_api_servers(context): """Shuffle a list of service endpoints and return an iterator that will cycle through the list, looping around to the beginning if necessary. """ # NOTE(efried): utils.get_ksa_adapter().get_endpoint() is the preferred # mechanism for endpoint discovery. Only use `api_servers` if you really # need to shuffle multiple endpoints. if CONF.glance.api_servers: api_servers = CONF.glance.api_servers random.shuffle(api_servers) else: sess, auth = _session_and_auth(context) ksa_adap = utils.get_ksa_adapter( nova.conf.glance.DEFAULT_SERVICE_TYPE, ksa_auth=auth, ksa_session=sess, min_version='2.0', max_version='2.latest') endpoint = utils.get_endpoint(ksa_adap) if endpoint: # NOTE(mriedem): Due to python-glanceclient bug 1707995 we have # to massage the endpoint URL otherwise it won't work properly. # We can't use glanceclient.common.utils.strip_version because # of bug 1748009. endpoint = re.sub(r'/v\d+(\.\d+)?/?$', '/', endpoint) api_servers = [endpoint] return itertools.cycle(api_servers) class GlanceClientWrapper(object): """Glance client wrapper class that implements retries.""" def __init__(self, context=None, endpoint=None): version = 2 if endpoint is not None: self.client = self._create_static_client(context, endpoint, version) else: self.client = None self.api_servers = None def _create_static_client(self, context, endpoint, version): """Create a client that we'll use for every call.""" self.api_server = str(endpoint) return _glanceclient_from_endpoint(context, endpoint, version) def _create_onetime_client(self, context, version): """Create a client that will be used for one call.""" if self.api_servers is None: self.api_servers = get_api_servers(context) self.api_server = next(self.api_servers) return _glanceclient_from_endpoint(context, self.api_server, version) def call(self, context, version, method, controller=None, args=None, kwargs=None): """Call a glance client method. If we get a connection error, retry the request according to CONF.glance.num_retries. :param context: RequestContext to use :param version: Numeric version of the *Glance API* to use :param method: string method name to execute on the glanceclient :param controller: optional string name of the client controller to use. Default (None) is to use the 'images' controller :param args: optional iterable of arguments to pass to the glanceclient method, splatted as positional args :param kwargs: optional dict of arguments to pass to the glanceclient, splatted into named arguments """ args = args or [] kwargs = kwargs or {} retry_excs = (glanceclient.exc.ServiceUnavailable, glanceclient.exc.InvalidEndpoint, glanceclient.exc.CommunicationError, IOError) num_attempts = 1 + CONF.glance.num_retries controller_name = controller or 'images' for attempt in range(1, num_attempts + 1): client = self.client or self._create_onetime_client(context, version) try: controller = getattr(client, controller_name) result = getattr(controller, method)(*args, **kwargs) if inspect.isgenerator(result): # Convert generator results to a list, so that we can # catch any potential exceptions now and retry the call. return list(result) return result except retry_excs as e: if attempt < num_attempts: extra = "retrying" else: extra = 'done trying' LOG.exception("Error contacting glance server " "'%(server)s' for '%(method)s', " "%(extra)s.", {'server': self.api_server, 'method': method, 'extra': extra}) if attempt == num_attempts: raise exception.GlanceConnectionFailed( server=str(self.api_server), reason=str(e)) time.sleep(1) class GlanceImageServiceV2(object): """Provides storage and retrieval of disk image objects within Glance.""" def __init__(self, client=None): self._client = client or GlanceClientWrapper() # NOTE(danms): This used to be built from a list of external modules # that were loaded at runtime. Preserve this list for implementations # to be added here. self._download_handlers = {} if CONF.glance.enable_rbd_download: self._download_handlers['rbd'] = self.rbd_download def rbd_download(self, context, url_parts, dst_path, metadata=None): """Use an explicit rbd call to download an image. :param context: The `nova.context.RequestContext` object for the request :param url_parts: Parts of URL pointing to the image location :param dst_path: Filepath to transfer the image file to. :param metadata: Image location metadata (currently unused) """ # avoid circular import from nova.storage import rbd_utils try: # Parse the RBD URL from url_parts, it should consist of 4 # sections and be in the format of: # /// url_path = str(urlparse.unquote(url_parts.path)) cluster_uuid, pool_name, image_uuid, snapshot_name = ( url_path.split('/')) except ValueError as e: msg = f"Invalid RBD URL format: {e}" LOG.error(msg) raise nova.exception.InvalidParameterValue(msg) rbd_driver = rbd_utils.RBDDriver( user=CONF.glance.rbd_user, pool=CONF.glance.rbd_pool, ceph_conf=CONF.glance.rbd_ceph_conf, connect_timeout=CONF.glance.rbd_connect_timeout) try: LOG.debug("Attempting to export RBD image: " "[pool_name: %s] [image_uuid: %s] " "[snapshot_name: %s] [dst_path: %s]", pool_name, image_uuid, snapshot_name, dst_path) rbd_driver.export_image(dst_path, image_uuid, snapshot_name, pool_name) except Exception as e: LOG.error("Error during RBD image export: %s", e) raise nova.exception.CouldNotFetchImage(image_id=image_uuid) def show(self, context, image_id, include_locations=False, show_deleted=True): """Returns a dict with image data for the given opaque image id. :param context: The context object to pass to image client :param image_id: The UUID of the image :param include_locations: (Optional) include locations in the returned dict of information if the image service API supports it. If the image service API does not support the locations attribute, it will still be included in the returned dict, as an empty list. :param show_deleted: (Optional) show the image even the status of image is deleted. """ try: image = self._client.call(context, 2, 'get', args=(image_id,)) except Exception: _reraise_translated_image_exception(image_id) if not show_deleted and getattr(image, 'deleted', False): raise exception.ImageNotFound(image_id=image_id) if not _is_image_available(context, image): raise exception.ImageNotFound(image_id=image_id) image = _translate_from_glance(image, include_locations=include_locations) if include_locations: locations = image.get('locations', None) or [] du = image.get('direct_url', None) if du: locations.append({'url': du, 'metadata': {}}) image['locations'] = locations return image def _get_transfer_method(self, scheme): """Returns a transfer method for scheme, or None.""" try: return self._download_handlers[scheme] except KeyError: return None def detail(self, context, **kwargs): """Calls out to Glance for a list of detailed image information.""" params = _extract_query_params_v2(kwargs) try: images = self._client.call(context, 2, 'list', kwargs=params) except Exception: _reraise_translated_exception() _images = [] for image in images: if _is_image_available(context, image): _images.append(_translate_from_glance(image)) return _images @staticmethod def _safe_fsync(fh): """Performs os.fsync on a filehandle only if it is supported. fsync on a pipe, FIFO, or socket raises OSError with EINVAL. This method discovers whether the target filehandle is one of these types and only performs fsync if it isn't. :param fh: Open filehandle (not a path or fileno) to maybe fsync. """ fileno = fh.fileno() mode = os.fstat(fileno).st_mode # A pipe answers True to S_ISFIFO if not any(check(mode) for check in (stat.S_ISFIFO, stat.S_ISSOCK)): os.fsync(fileno) def _try_special_handlers(self, context, image_id, dst_path, verifier): image = self.show(context, image_id, include_locations=True) for entry in image.get('locations', []): loc_url = entry['url'] loc_meta = entry['metadata'] o = urlparse.urlparse(loc_url) xfer_method = self._get_transfer_method(o.scheme) if not xfer_method: continue try: xfer_method(context, o, dst_path, loc_meta) LOG.info("Successfully transferred using %s", o.scheme) if not verifier: return True # Load chunks from the downloaded image file # for verification with open(dst_path, 'rb') as fh: downloaded_length = os.path.getsize(dst_path) image_chunks = glance_utils.IterableWithLength(fh, downloaded_length) self._verify_and_write(context, image_id, verifier, image_chunks, None, None) return True except Exception: LOG.exception("Download image error") return False def download(self, context, image_id, data=None, dst_path=None, trusted_certs=None): """Calls out to Glance for data and writes data.""" # First, try to get the verifier, so we do not even start to download # the image and then fail on the metadata verifier = self._get_verifier(context, image_id, trusted_certs) # Second, try to delegate image download to a special handler if (self._download_handlers and dst_path is not None): if self._try_special_handlers(context, image_id, dst_path, verifier): return # By default (or if direct download has failed), use glance client call # to fetch the image and fill image_chunks try: image_chunks = self._client.call( context, 2, 'data', args=(image_id,)) except Exception: _reraise_translated_image_exception(image_id) if image_chunks.wrapped is None: # None is a valid return value, but there's nothing we can do with # a image with no associated data raise exception.ImageUnacceptable(image_id=image_id, reason='Image has no associated data') return self._verify_and_write(context, image_id, verifier, image_chunks, data, dst_path) def _verify_and_write(self, context, image_id, verifier, image_chunks, data, dst_path): """Perform image signature verification and save the image file if needed. This function writes the content of the image_chunks iterator either to a file object provided by the data parameter or to a filepath provided by dst_path parameter. If none of them are provided then no data will be written out but instead image_chunks iterator is returned. :param image_id: The UUID of the image :param verifier: An instance of a 'cursive.verifier' :param image_chunks An iterator pointing to the image data :param data: File object to use when writing the image. If passed as None and dst_path is provided, new file is opened. :param dst_path: Filepath to transfer the image file to. :returns an iterable with image data, or nothing. Iterable is returned only when data param is None and dst_path is not provided (assuming the caller wants to process the data by itself). """ close_file = False if data is None and dst_path: data = open(dst_path, 'wb') close_file = True write_image = True if data is None: write_image = False try: # Exit early if we do not need write nor verify if verifier is None and not write_image: return image_chunks for chunk in image_chunks: if verifier: verifier.update(chunk) if write_image: data.write(chunk) if verifier: verifier.verify() LOG.info('Image signature verification succeeded ' 'for image %s', image_id) except cryptography.exceptions.InvalidSignature: if write_image: data.truncate(0) with excutils.save_and_reraise_exception(): LOG.error('Image signature verification failed ' 'for image %s', image_id) except Exception as ex: if write_image: with excutils.save_and_reraise_exception(): LOG.error("Error writing to %(path)s: %(exception)s", {'path': dst_path, 'exception': ex}) else: with excutils.save_and_reraise_exception(): LOG.error("Error during image verification: %s", ex) finally: if close_file: # Ensure that the data is pushed all the way down to # persistent storage. This ensures that in the event of a # subsequent host crash we don't have running instances # using a corrupt backing file. data.flush() self._safe_fsync(data) data.close() if data is None: return image_chunks def _get_verifier(self, context, image_id, trusted_certs): verifier = None # Use the default certs if the user didn't provide any (and there are # default certs configured). if (not trusted_certs and CONF.glance.enable_certificate_validation and CONF.glance.default_trusted_certificate_ids): trusted_certs = objects.TrustedCerts( ids=CONF.glance.default_trusted_certificate_ids) # Verify image signature if feature is enabled or trusted # certificates were provided if trusted_certs or CONF.glance.verify_glance_signatures: image_meta_dict = self.show(context, image_id, include_locations=False) image_meta = objects.ImageMeta.from_dict(image_meta_dict) img_signature = image_meta.properties.get('img_signature') img_sig_hash_method = image_meta.properties.get( 'img_signature_hash_method' ) img_sig_cert_uuid = image_meta.properties.get( 'img_signature_certificate_uuid' ) img_sig_key_type = image_meta.properties.get( 'img_signature_key_type' ) try: verifier = signature_utils.get_verifier( context=context, img_signature_certificate_uuid=img_sig_cert_uuid, img_signature_hash_method=img_sig_hash_method, img_signature=img_signature, img_signature_key_type=img_sig_key_type, ) except cursive_exception.SignatureVerificationError: with excutils.save_and_reraise_exception(): LOG.error('Image signature verification failed ' 'for image: %s', image_id) # Validate image signature certificate if trusted certificates # were provided # NOTE(jackie-truong): Certificate validation will occur if # trusted_certs are provided, even if the certificate validation # feature is disabled. This is to provide safety for the user. # We may want to consider making this a "soft" check in the future. if trusted_certs: _verify_certs(context, img_sig_cert_uuid, trusted_certs) elif CONF.glance.enable_certificate_validation: msg = ('Image signature certificate validation enabled, ' 'but no trusted certificate IDs were provided. ' 'Unable to validate the certificate used to ' 'verify the image signature.') LOG.warning(msg) raise exception.CertificateValidationFailed(msg) else: LOG.debug('Certificate validation was not performed. A list ' 'of trusted image certificate IDs must be provided ' 'in order to validate an image certificate.') return verifier def create(self, context, image_meta, data=None): """Store the image data and return the new image object.""" # Here we workaround the situation when user wants to activate an # empty image right after the creation. In Glance v1 api (and # therefore in Nova) it is enough to set 'size = 0'. v2 api # doesn't allow this hack - we have to send an upload request with # empty data. force_activate = data is None and image_meta.get('size') == 0 # The "instance_owner" property is set in the API if a user, who is # not the owner of an instance, is creating the image, e.g. admin # snapshots or shelves another user's instance. This is used to add # member access to the image for the instance owner. sharing_member_id = image_meta.get('properties', {}).pop( 'instance_owner', None) sent_service_image_meta = _translate_to_glance(image_meta) try: image = self._create_v2(context, sent_service_image_meta, data, force_activate, sharing_member_id=sharing_member_id) except glanceclient.exc.HTTPException: _reraise_translated_exception() return _translate_from_glance(image) def _add_location(self, context, image_id, location): # 'show_multiple_locations' must be enabled in glance api conf file. try_methods = ('add_image_location', 'add_location') exc = None for method in try_methods: try: return self._client.call( context, 2, method, args=(image_id, location, {})) except glanceclient.exc.HTTPNotImplemented as e: exc = e LOG.debug('Glance method %s not available', method) except glanceclient.exc.HTTPBadRequest as e: exc = e _reraise_translated_exception() raise exc def _add_image_member(self, context, image_id, member_id): """Grant access to another project that does not own the image :param context: nova auth RequestContext where context.project_id is the owner of the image :param image_id: ID of the image on which to grant access :param member_id: ID of the member project to grant access to the image; this should not be the owner of the image :returns: A Member schema object of the created image member """ try: return self._client.call( context, 2, 'create', controller='image_members', args=(image_id, member_id)) except glanceclient.exc.HTTPBadRequest: _reraise_translated_exception() def _upload_data(self, context, image_id, data): # NOTE(aarents) offload upload in a native thread as it can block # coroutine in busy environment. utils.tpool_execute(self._client.call, context, 2, 'upload', args=(image_id, data)) return self._client.call(context, 2, 'get', args=(image_id,)) def _get_image_create_disk_format_default(self, context): """Gets an acceptable default image disk_format based on the schema. """ # These preferred disk formats are in order: # 1. we want qcow2 if possible (at least for backward compat) # 2. vhd for hyperv # 3. vmdk for vmware # 4. raw should be universally accepted preferred_disk_formats = ( fields.DiskFormat.QCOW2, fields.DiskFormat.VHD, fields.DiskFormat.VMDK, fields.DiskFormat.RAW, ) # Get the image schema - note we don't cache this value since it could # change under us. This looks a bit funky, but what it's basically # doing is calling glanceclient.v2.Client.schemas.get('image'). image_schema = self._client.call( context, 2, 'get', args=('image',), controller='schemas') # get the disk_format schema property from the raw schema disk_format_schema = ( image_schema.raw()['properties'].get('disk_format') if image_schema else {} ) if disk_format_schema and 'enum' in disk_format_schema: supported_disk_formats = disk_format_schema['enum'] # try a priority ordered list for preferred_format in preferred_disk_formats: if preferred_format in supported_disk_formats: return preferred_format # alright, let's just return whatever is available LOG.debug('Unable to find a preferred disk_format for image ' 'creation with the Image Service v2 API. Using: %s', supported_disk_formats[0]) return supported_disk_formats[0] LOG.warning('Unable to determine disk_format schema from the ' 'Image Service v2 API. Defaulting to ' '%(preferred_disk_format)s.', {'preferred_disk_format': preferred_disk_formats[0]}) return preferred_disk_formats[0] def _create_v2(self, context, sent_service_image_meta, data=None, force_activate=False, sharing_member_id=None): # Glance v1 allows image activation without setting disk and # container formats, v2 doesn't. It leads to the dirtiest workaround # where we have to hardcode this parameters. if force_activate: data = '' # NOTE(danms): If we are using this terrible hack to upload # zero-length data to activate the image, we cannot claim it # is some format other than 'raw'. If the caller asked for # something specific, that's a bug. Otherwise, we must force # disk_format=raw. if 'disk_format' not in sent_service_image_meta: sent_service_image_meta['disk_format'] = 'raw' elif sent_service_image_meta['disk_format'] != 'raw': raise exception.ImageBadRequest( 'Unable to force activate with disk_format=%s' % ( sent_service_image_meta['disk_format'])) if 'container_format' not in sent_service_image_meta: sent_service_image_meta['container_format'] = 'bare' location = sent_service_image_meta.pop('location', None) image = self._client.call( context, 2, 'create', kwargs=sent_service_image_meta) image_id = image['id'] # Sending image location in a separate request. if location: self._add_location(context, image_id, location) # Add image membership in a separate request. if sharing_member_id: LOG.debug('Adding access for member %s to image %s', sharing_member_id, image_id) self._add_image_member(context, image_id, sharing_member_id) # If we have some data we have to send it in separate request and # update the image then. if data is not None: image = self._upload_data(context, image_id, data) return image def update(self, context, image_id, image_meta, data=None, purge_props=True): """Modify the given image with the new data.""" sent_service_image_meta = _translate_to_glance(image_meta) # NOTE(bcwaldon): id is not an editable field, but it is likely to be # passed in by calling code. Let's be nice and ignore it. sent_service_image_meta.pop('id', None) sent_service_image_meta['image_id'] = image_id try: if purge_props: # In Glance v2 we have to explicitly set prop names # we want to remove. all_props = set(self.show( context, image_id)['properties'].keys()) props_to_update = set( image_meta.get('properties', {}).keys()) remove_props = list(all_props - props_to_update) sent_service_image_meta['remove_props'] = remove_props image = self._update_v2(context, sent_service_image_meta, data) except Exception: _reraise_translated_image_exception(image_id) return _translate_from_glance(image) def _update_v2(self, context, sent_service_image_meta, data=None): location = sent_service_image_meta.pop('location', None) image_id = sent_service_image_meta['image_id'] image = self._client.call( context, 2, 'update', kwargs=sent_service_image_meta) # Sending image location in a separate request. if location: self._add_location(context, image_id, location) # If we have some data we have to send it in separate request and # update the image then. if data is not None: image = self._upload_data(context, image_id, data) return image def delete(self, context, image_id): """Delete the given image. :raises: ImageNotFound if the image does not exist. :raises: NotAuthorized if the user is not an owner. :raises: ImageNotAuthorized if the user is not authorized. :raises: ImageDeleteConflict if the image is conflicted to delete. """ try: self._client.call(context, 2, 'delete', args=(image_id,)) except glanceclient.exc.NotFound: raise exception.ImageNotFound(image_id=image_id) except glanceclient.exc.HTTPForbidden: raise exception.ImageNotAuthorized(image_id=image_id) except glanceclient.exc.HTTPConflict as exc: raise exception.ImageDeleteConflict(reason=str(exc)) return True def image_import_copy(self, context, image_id, stores): """Copy an image to another store using image_import. This triggers the Glance image_import API with an opinionated method of 'copy-image' to a list of stores. This will initiate a copy of the image from one of the existing stores to the stores provided. :param context: The RequestContext :param image_id: The image to copy :param stores: A list of stores to copy the image to :raises: ImageNotFound if the image does not exist. :raises: ImageNotAuthorized if the user is not permitted to import/copy this image :raises: ImageImportImpossible if the image cannot be imported for workflow reasons (not active, etc) :raises: ImageBadRequest if the image is already in the requested store (which may be a race) """ try: self._client.call(context, 2, 'image_import', args=(image_id,), kwargs={'method': 'copy-image', 'stores': stores}) except glanceclient.exc.NotFound: raise exception.ImageNotFound(image_id=image_id) except glanceclient.exc.HTTPForbidden: raise exception.ImageNotAuthorized(image_id=image_id) except glanceclient.exc.HTTPConflict as exc: raise exception.ImageImportImpossible(image_id=image_id, reason=str(exc)) except glanceclient.exc.HTTPBadRequest as exc: raise exception.ImageBadRequest(image_id=image_id, response=str(exc)) def _extract_query_params_v2(params): _params = {} accepted_params = ('filters', 'marker', 'limit', 'page_size', 'sort_key', 'sort_dir') for param in accepted_params: if params.get(param): _params[param] = params.get(param) # ensure filters is a dict _params.setdefault('filters', {}) # NOTE(vish): don't filter out private images _params['filters'].setdefault('is_public', 'none') # adopt filters to be accepted by glance v2 api filters = _params['filters'] new_filters = {} for filter_ in filters: # remove 'property-' prefix from filters by custom properties if filter_.startswith('property-'): new_filters[filter_.lstrip('property-')] = filters[filter_] elif filter_ == 'changes-since': # convert old 'changes-since' into new 'updated_at' filter updated_at = 'gte:' + filters['changes-since'] new_filters['updated_at'] = updated_at elif filter_ == 'is_public': # convert old 'is_public' flag into 'visibility' filter # omit the filter if is_public is None is_public = filters['is_public'] if is_public.lower() in ('true', '1'): new_filters['visibility'] = 'public' elif is_public.lower() in ('false', '0'): new_filters['visibility'] = 'private' else: new_filters[filter_] = filters[filter_] _params['filters'] = new_filters return _params def _is_image_available(context, image): """Check image availability. This check is needed in case Nova and Glance are deployed without authentication turned on. """ # The presence of an auth token implies this is an authenticated # request and we need not handle the noauth use-case. if hasattr(context, 'auth_token') and context.auth_token: return True def _is_image_public(image): # NOTE(jaypipes) V2 Glance API replaced the is_public attribute # with a visibility attribute. We do this here to prevent the # glanceclient for a V2 image model from throwing an # exception from warlock when trying to access an is_public # attribute. if hasattr(image, 'visibility'): return str(image.visibility).lower() == 'public' else: return image.is_public if context.is_admin or _is_image_public(image): return True properties = image.properties if context.project_id and ('owner_id' in properties): return str(properties['owner_id']) == str(context.project_id) if context.project_id and ('project_id' in properties): return str(properties['project_id']) == str(context.project_id) try: user_id = properties['user_id'] except KeyError: return False return str(user_id) == str(context.user_id) def _translate_to_glance(image_meta): image_meta = _convert_to_string(image_meta) image_meta = _remove_read_only(image_meta) image_meta = _convert_to_v2(image_meta) return image_meta def _convert_to_v2(image_meta): output = {} for name, value in image_meta.items(): if name == 'properties': for prop_name, prop_value in value.items(): # if allow_additional_image_properties is disabled we can't # define kernel_id and ramdisk_id as None, so we have to omit # these properties if they are not set. if prop_name in ('kernel_id', 'ramdisk_id') and \ prop_value is not None and \ prop_value.strip().lower() in ('none', ''): continue # in glance only string and None property values are allowed, # v1 client accepts any values and converts them to string, # v2 doesn't - so we have to take care of it. elif prop_value is None or isinstance(prop_value, str): output[prop_name] = prop_value else: output[prop_name] = str(prop_value) elif name in ('min_ram', 'min_disk'): output[name] = int(value) elif name == 'is_public': output['visibility'] = 'public' if value else 'private' elif name in ('size', 'deleted'): continue else: output[name] = value return output def _translate_from_glance(image, include_locations=False): image_meta = _extract_attributes_v2( image, include_locations=include_locations) image_meta = _convert_timestamps_to_datetimes(image_meta) image_meta = _convert_from_string(image_meta) return image_meta def _convert_timestamps_to_datetimes(image_meta): """Returns image with timestamp fields converted to datetime objects.""" for attr in ['created_at', 'updated_at', 'deleted_at']: if image_meta.get(attr): image_meta[attr] = timeutils.parse_isotime(image_meta[attr]) return image_meta # NOTE(bcwaldon): used to store non-string data in glance metadata def _json_loads(properties, attr): prop = properties[attr] if isinstance(prop, str): properties[attr] = jsonutils.loads(prop) def _json_dumps(properties, attr): prop = properties[attr] if not isinstance(prop, str): properties[attr] = jsonutils.dumps(prop) _CONVERT_PROPS = ('block_device_mapping', 'mappings') def _convert(method, metadata): metadata = copy.deepcopy(metadata) properties = metadata.get('properties') if properties: for attr in _CONVERT_PROPS: if attr in properties: method(properties, attr) return metadata def _convert_from_string(metadata): return _convert(_json_loads, metadata) def _convert_to_string(metadata): return _convert(_json_dumps, metadata) def _extract_attributes(image, include_locations=False): # TODO(mfedosin): Remove this function once we move to glance V2 # completely. # NOTE(hdd): If a key is not found, base.Resource.__getattr__() may perform # a get(), resulting in a useless request back to glance. This list is # therefore sorted, with dependent attributes as the end # 'deleted_at' depends on 'deleted' # 'checksum' depends on 'status' == 'active' IMAGE_ATTRIBUTES = ['size', 'disk_format', 'owner', 'container_format', 'status', 'id', 'name', 'created_at', 'updated_at', 'deleted', 'deleted_at', 'checksum', 'min_disk', 'min_ram', 'is_public', 'direct_url', 'locations'] queued = getattr(image, 'status') == 'queued' queued_exclude_attrs = ['disk_format', 'container_format'] include_locations_attrs = ['direct_url', 'locations'] output = {} for attr in IMAGE_ATTRIBUTES: if attr == 'deleted_at' and not output['deleted']: output[attr] = None elif attr == 'checksum' and output['status'] != 'active': output[attr] = None # image may not have 'name' attr elif attr == 'name': output[attr] = getattr(image, attr, None) # NOTE(liusheng): queued image may not have these attributes and 'name' elif queued and attr in queued_exclude_attrs: output[attr] = getattr(image, attr, None) # NOTE(mriedem): Only get location attrs if including locations. elif attr in include_locations_attrs: if include_locations: output[attr] = getattr(image, attr, None) # NOTE(mdorman): 'size' attribute must not be 'None', so use 0 instead elif attr == 'size': # NOTE(mriedem): A snapshot image may not have the size attribute # set so default to 0. output[attr] = getattr(image, attr, 0) or 0 else: # NOTE(xarses): Anything that is caught with the default value # will result in an additional lookup to glance for said attr. # Notable attributes that could have this issue: # disk_format, container_format, name, deleted, checksum output[attr] = getattr(image, attr, None) output['properties'] = getattr(image, 'properties', {}) return output def _extract_attributes_v2(image, include_locations=False): include_locations_attrs = ['direct_url', 'locations'] omit_attrs = ['self', 'schema', 'protected', 'virtual_size', 'file', 'tags'] raw_schema = image.schema schema = schemas.Schema(raw_schema) output = {'properties': {}, 'deleted': False, 'deleted_at': None, 'disk_format': None, 'container_format': None, 'name': None, 'checksum': None} for name, value in image.items(): if (name in omit_attrs or name in include_locations_attrs and not include_locations): continue elif name == 'visibility': output['is_public'] = value == 'public' elif name == 'size' and value is None: output['size'] = 0 elif schema.is_base_property(name): output[name] = value else: output['properties'][name] = value return output def _remove_read_only(image_meta): IMAGE_ATTRIBUTES = ['status', 'updated_at', 'created_at', 'deleted_at'] output = copy.deepcopy(image_meta) for attr in IMAGE_ATTRIBUTES: if attr in output: del output[attr] return output def _reraise_translated_image_exception(image_id): """Transform the exception for the image but keep its traceback intact.""" exc_type, exc_value, exc_trace = sys.exc_info() new_exc = _translate_image_exception(image_id, exc_value) raise new_exc.with_traceback(exc_trace) def _reraise_translated_exception(): """Transform the exception but keep its traceback intact.""" exc_type, exc_value, exc_trace = sys.exc_info() new_exc = _translate_plain_exception(exc_value) raise new_exc.with_traceback(exc_trace) def _translate_image_exception(image_id, exc_value): if isinstance(exc_value, (glanceclient.exc.Forbidden, glanceclient.exc.Unauthorized)): return exception.ImageNotAuthorized(image_id=image_id) if isinstance(exc_value, glanceclient.exc.NotFound): return exception.ImageNotFound(image_id=image_id) if isinstance(exc_value, glanceclient.exc.BadRequest): return exception.ImageBadRequest(image_id=image_id, response=str(exc_value)) if isinstance(exc_value, glanceclient.exc.HTTPOverLimit): return exception.ImageQuotaExceeded(image_id=image_id) return exc_value def _translate_plain_exception(exc_value): if isinstance(exc_value, (glanceclient.exc.Forbidden, glanceclient.exc.Unauthorized)): return exception.Forbidden(str(exc_value)) if isinstance(exc_value, glanceclient.exc.NotFound): return exception.NotFound(str(exc_value)) if isinstance(exc_value, glanceclient.exc.BadRequest): return exception.Invalid(str(exc_value)) return exc_value def _verify_certs(context, img_sig_cert_uuid, trusted_certs): try: certificate_utils.verify_certificate( context=context, certificate_uuid=img_sig_cert_uuid, trusted_certificate_uuids=trusted_certs.ids) LOG.debug('Image signature certificate validation ' 'succeeded for certificate: %s', img_sig_cert_uuid) except cursive_exception.SignatureVerificationError as e: LOG.warning('Image signature certificate validation ' 'failed for certificate: %s', img_sig_cert_uuid) raise exception.CertificateValidationFailed( cert_uuid=img_sig_cert_uuid, reason=str(e)) def get_remote_image_service(context, image_href): """Create an image_service and parse the id from the given image_href. The image_href param can be an href of the form 'http://example.com:9292/v1/images/b8b2c6f7-7345-4e2f-afa2-eedaba9cbbe3', or just an id such as 'b8b2c6f7-7345-4e2f-afa2-eedaba9cbbe3'. If the image_href is a standalone id, then the default image service is returned. :param image_href: href that describes the location of an image :returns: a tuple of the form (image_service, image_id) """ # NOTE(bcwaldon): If image_href doesn't look like a URI, assume its a # standalone image ID if '/' not in str(image_href): image_service = get_default_image_service() return image_service, image_href try: (image_id, endpoint) = _endpoint_from_image_ref(image_href) glance_client = GlanceClientWrapper(context=context, endpoint=endpoint) except ValueError: raise exception.InvalidImageRef(image_href=image_href) image_service = GlanceImageServiceV2(client=glance_client) return image_service, image_id def get_default_image_service(): return GlanceImageServiceV2() class UpdateGlanceImage(object): def __init__(self, context, image_id, metadata, stream): self.context = context self.image_id = image_id self.metadata = metadata self.image_stream = stream def start(self): image_service, image_id = get_remote_image_service( self.context, self.image_id) image_service.update(self.context, image_id, self.metadata, self.image_stream, purge_props=False) @profiler.trace_cls("nova_image") class API(object): """API for interacting with the image service.""" def _get_session_and_image_id(self, context, id_or_uri): """Returns a tuple of (session, image_id). If the supplied `id_or_uri` is an image ID, then the default client session will be returned for the context's user, along with the image ID. If the supplied `id_or_uri` parameter is a URI, then a client session connecting to the URI's image service endpoint will be returned along with a parsed image ID from that URI. :param context: The `nova.context.Context` object for the request :param id_or_uri: A UUID identifier or an image URI to look up image information for. """ return get_remote_image_service(context, id_or_uri) def _get_session(self, _context): """Returns a client session that can be used to query for image information. :param _context: The `nova.context.Context` object for the request """ # TODO(jaypipes): Refactor get_remote_image_service and # get_default_image_service into a single # method that takes a context and actually respects # it, returning a real session object that keeps # the context alive... return get_default_image_service() @staticmethod def generate_image_url(image_ref, context): """Generate an image URL from an image_ref. :param image_ref: The image ref to generate URL :param context: The `nova.context.Context` object for the request """ return "%s/images/%s" % (next(get_api_servers(context)), image_ref) def get_all(self, context, **kwargs): """Retrieves all information records about all disk images available to show to the requesting user. If the requesting user is an admin, all images in an ACTIVE status are returned. If the requesting user is not an admin, the all public images and all private images that are owned by the requesting user in the ACTIVE status are returned. :param context: The `nova.context.Context` object for the request :param kwargs: A dictionary of filter and pagination values that may be passed to the underlying image info driver. """ session = self._get_session(context) return session.detail(context, **kwargs) def get(self, context, id_or_uri, include_locations=False, show_deleted=True): """Retrieves the information record for a single disk image. If the supplied identifier parameter is a UUID, the default driver will be used to return information about the image. If the supplied identifier is a URI, then the driver that matches that URI endpoint will be used to query for image information. :param context: The `nova.context.Context` object for the request :param id_or_uri: A UUID identifier or an image URI to look up image information for. :param include_locations: (Optional) include locations in the returned dict of information if the image service API supports it. If the image service API does not support the locations attribute, it will still be included in the returned dict, as an empty list. :param show_deleted: (Optional) show the image even the status of image is deleted. """ session, image_id = self._get_session_and_image_id(context, id_or_uri) return session.show(context, image_id, include_locations=include_locations, show_deleted=show_deleted) def create(self, context, image_info, data=None): """Creates a new image record, optionally passing the image bits to backend storage. :param context: The `nova.context.Context` object for the request :param image_info: A dict of information about the image that is passed to the image registry. :param data: Optional file handle or bytestream iterator that is passed to backend storage. """ session = self._get_session(context) return session.create(context, image_info, data=data) def update(self, context, id_or_uri, image_info, data=None, purge_props=False): """Update the information about an image, optionally along with a file handle or bytestream iterator for image bits. If the optional file handle for updated image bits is supplied, the image may not have already uploaded bits for the image. :param context: The `nova.context.Context` object for the request :param id_or_uri: A UUID identifier or an image URI to look up image information for. :param image_info: A dict of information about the image that is passed to the image registry. :param data: Optional file handle or bytestream iterator that is passed to backend storage. :param purge_props: Optional, defaults to False. If set, the backend image registry will clear all image properties and replace them the image properties supplied in the image_info dictionary's 'properties' collection. """ session, image_id = self._get_session_and_image_id(context, id_or_uri) return session.update(context, image_id, image_info, data=data, purge_props=purge_props) def delete(self, context, id_or_uri): """Delete the information about an image and mark the image bits for deletion. :param context: The `nova.context.Context` object for the request :param id_or_uri: A UUID identifier or an image URI to look up image information for. """ session, image_id = self._get_session_and_image_id(context, id_or_uri) return session.delete(context, image_id) def download(self, context, id_or_uri, data=None, dest_path=None, trusted_certs=None): """Transfer image bits from Glance or a known source location to the supplied destination filepath. :param context: The `nova.context.RequestContext` object for the request :param id_or_uri: A UUID identifier or an image URI to look up image information for. :param data: A file object to use in downloading image data. :param dest_path: Filepath to transfer image bits to. :param trusted_certs: A 'nova.objects.trusted_certs.TrustedCerts' object with a list of trusted image certificate IDs. Note that because of the poor design of the `glance.ImageService.download` method, the function returns different things depending on what arguments are passed to it. If a data argument is supplied but no dest_path is specified (not currently done by any caller) then None is returned from the method. If the data argument is not specified but a destination path *is* specified, then a writeable file handle to the destination path is constructed in the method and the image bits written to that file, and again, None is returned from the method. If no data argument is supplied and no dest_path argument is supplied (VMWare virt driver), then the method returns an iterator to the image bits that the caller uses to write to wherever location it wants. Finally, if the allow_direct_url_schemes CONF option is set to something, then the nova.image.download modules are used to attempt to do an SCP copy of the image bits from a file location to the dest_path and None is returned after retrying one or more download locations (libvirt and Hyper-V virt drivers through nova.virt.images.fetch). I think the above points to just how hacky/wacky all of this code is, and the reason it needs to be cleaned up and standardized across the virt driver callers. """ # TODO(jaypipes): Deprecate and remove this method entirely when we # move to a system that simply returns a file handle # to a bytestream iterator and allows the caller to # handle streaming/copying/zero-copy as they see fit. session, image_id = self._get_session_and_image_id(context, id_or_uri) return session.download(context, image_id, data=data, dst_path=dest_path, trusted_certs=trusted_certs) def copy_image_to_store(self, context, image_id, store): """Initiate a store-to-store copy in glance. :param context: The RequestContext. :param image_id: The image to copy. :param store: The glance store to target the copy. """ session, image_id = self._get_session_and_image_id(context, image_id) return session.image_import_copy(context, image_id, [store]) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.3696086 nova-32.0.0/nova/keymgr/0000775000175000017500000000000000000000000015023 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/keymgr/__init__.py0000664000175000017500000000000000000000000017122 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/keymgr/conf_key_mgr.py0000664000175000017500000001235300000000000020043 0ustar00zuulzuul00000000000000# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ An implementation of a key manager that reads its key from the project's configuration options. This key manager implementation provides limited security, assuming that the key remains secret. Using the volume encryption feature as an example, encryption provides protection against a lost or stolen disk, assuming that the configuration file that contains the key is not stored on the disk. Encryption also protects the confidentiality of data as it is transmitted via iSCSI from the compute host to the storage host (again assuming that an attacker who intercepts the data does not know the secret key). Because this implementation uses a single, fixed key, it proffers no protection once that key is compromised. In particular, different volumes encrypted with a key provided by this key manager actually share the same encryption key so *any* volume can be decrypted once the fixed key is known. """ import binascii from castellan.common.objects import symmetric_key as key from castellan.key_manager import key_manager from oslo_log import log as logging import nova.conf from nova import exception from nova.i18n import _ CONF = nova.conf.CONF LOG = logging.getLogger(__name__) class ConfKeyManager(key_manager.KeyManager): """This key manager implementation supports all the methods specified by the key manager interface. This implementation creates a single key in response to all invocations of create_key. Side effects (e.g., raising exceptions) for each method are handled as specified by the key manager interface. """ def __init__(self, configuration): LOG.warning('This key manager is insecure and is not recommended ' 'for production deployments') super(ConfKeyManager, self).__init__(configuration) self.key_id = '00000000-0000-0000-0000-000000000000' self.conf = CONF if configuration is None else configuration if CONF.key_manager.fixed_key is None: raise ValueError(_('keymgr.fixed_key not defined')) self._hex_key = CONF.key_manager.fixed_key super(ConfKeyManager, self).__init__(configuration) def _get_key(self): key_bytes = bytes(binascii.unhexlify(self._hex_key)) return key.SymmetricKey('AES', len(key_bytes) * 8, key_bytes) def create_key(self, context, algorithm, length, **kwargs): """Creates a symmetric key. This implementation returns a UUID for the key read from the configuration file. A Forbidden exception is raised if the specified context is None. """ if context is None: raise exception.Forbidden() return self.key_id def create_key_pair(self, context, **kwargs): raise NotImplementedError( "ConfKeyManager does not support asymmetric keys") def store(self, context, managed_object, **kwargs): """Stores (i.e., registers) a key with the key manager.""" if context is None: raise exception.Forbidden() if managed_object != self._get_key(): raise exception.KeyManagerError( reason="cannot store arbitrary keys") return self.key_id def get(self, context, managed_object_id): """Retrieves the key identified by the specified id. This implementation returns the key that is associated with the specified UUID. A Forbidden exception is raised if the specified context is None; a KeyError is raised if the UUID is invalid. """ if context is None: raise exception.Forbidden() if managed_object_id != self.key_id: raise KeyError(str(managed_object_id) + " != " + str(self.key_id)) return self._get_key() def delete(self, context, managed_object_id): """Represents deleting the key. Because the ConfKeyManager has only one key, which is read from the configuration file, the key is not actually deleted when this is called. """ if context is None: raise exception.Forbidden() if managed_object_id != self.key_id: raise exception.KeyManagerError( reason="cannot delete non-existent key") LOG.warning("Not deleting key %s", managed_object_id) def add_consumer(self, context, managed_object_id, consumer_data): raise NotImplementedError( 'ConfKeyManager does not implement adding consumers' ) def remove_consumer(self, context, managed_object_id, consumer_data): raise NotImplementedError( 'ConfKeyManager does not implement removing consumers' ) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.3696086 nova-32.0.0/nova/limit/0000775000175000017500000000000000000000000014643 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/limit/__init__.py0000664000175000017500000000000000000000000016742 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/limit/local.py0000664000175000017500000002040300000000000016306 0ustar00zuulzuul00000000000000# Copyright 2022 StackHPC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import typing as ty from oslo_limit import exception as limit_exceptions from oslo_limit import limit from oslo_log import log as logging import nova.conf from nova import exception from nova.limit import utils as nova_limit_utils from nova import objects LOG = logging.getLogger(__name__) CONF = nova.conf.CONF # Entity types for API Limits, same as names of config options prefixed with # "server_" to disambiguate them in keystone SERVER_METADATA_ITEMS = "server_metadata_items" INJECTED_FILES = "server_injected_files" INJECTED_FILES_CONTENT = "server_injected_file_content_bytes" INJECTED_FILES_PATH = "server_injected_file_path_bytes" API_LIMITS = set([ SERVER_METADATA_ITEMS, INJECTED_FILES, INJECTED_FILES_CONTENT, INJECTED_FILES_PATH, ]) # Entity types for all DB limits, same as names of config options prefixed with # "server_" to disambiguate them in keystone KEY_PAIRS = "server_key_pairs" SERVER_GROUPS = "server_groups" SERVER_GROUP_MEMBERS = "server_group_members" DB_LIMITS = set([ KEY_PAIRS, SERVER_GROUPS, SERVER_GROUP_MEMBERS, ]) # Checks only happen when we are using the unified limits driver UNIFIED_LIMITS_DRIVER = "nova.quota.UnifiedLimitsDriver" # Map entity types to the exception we raise in the case that the resource is # over the allowed limit. Each of these should be a subclass of # exception.OverQuota. EXCEPTIONS = { KEY_PAIRS: exception.KeypairLimitExceeded, INJECTED_FILES_CONTENT: exception.OnsetFileContentLimitExceeded, INJECTED_FILES_PATH: exception.OnsetFilePathLimitExceeded, INJECTED_FILES: exception.OnsetFileLimitExceeded, SERVER_METADATA_ITEMS: exception.MetadataLimitExceeded, SERVER_GROUPS: exception.ServerGroupLimitExceeded, SERVER_GROUP_MEMBERS: exception.GroupMemberLimitExceeded, } # Map new limit-based quota names to the legacy ones. LEGACY_LIMITS = { SERVER_METADATA_ITEMS: "metadata_items", INJECTED_FILES: "injected_files", INJECTED_FILES_CONTENT: "injected_file_content_bytes", INJECTED_FILES_PATH: "injected_file_path_bytes", KEY_PAIRS: "key_pairs", SERVER_GROUPS: SERVER_GROUPS, SERVER_GROUP_MEMBERS: SERVER_GROUP_MEMBERS, } def get_in_use( context: 'nova.context.RequestContext', project_id: str ) -> ty.Dict[str, int]: """Returns in use counts for each resource, for given project. This sounds simple but many resources can't be counted per project, so the only sensible value is 0. For example, key pairs are counted per user, and server group members are counted per server group, and metadata items are counted per server. This behaviour is consistent with what is returned today by the DB based quota driver. """ count = _server_group_count(context, project_id)['server_groups'] usages = { # DB limits SERVER_GROUPS: count, SERVER_GROUP_MEMBERS: 0, KEY_PAIRS: 0, # API limits SERVER_METADATA_ITEMS: 0, INJECTED_FILES: 0, INJECTED_FILES_CONTENT: 0, INJECTED_FILES_PATH: 0, } return _convert_keys_to_legacy_name(usages) def always_zero_usage( project_id: str, resource_names: ty.List[str] ) -> ty.Dict[str, int]: """Called by oslo_limit's enforcer""" # Return usage of 0 for API limits. Values in API requests will be used as # the deltas. return {resource_name: 0 for resource_name in resource_names} def enforce_api_limit(entity_type: str, count: int) -> None: """Check if the values given are over the limit for that key. This is generally used for limiting the size of certain API requests that eventually get stored in the database. """ if not nova_limit_utils.use_unified_limits(): return if entity_type not in API_LIMITS: fmt = "%s is not a valid API limit: %s" raise ValueError(fmt % (entity_type, API_LIMITS)) try: enforcer = limit.Enforcer(always_zero_usage) except limit_exceptions.SessionInitError as e: msg = ("Failed to connect to keystone while enforcing %s quota limit." % entity_type) LOG.error(msg + " Error: " + str(e)) raise exception.KeystoneConnectionFailed(msg) try: enforcer.enforce(None, {entity_type: count}) except limit_exceptions.ProjectOverLimit as e: if nova_limit_utils.should_enforce(e): # Copy the exception message to a OverQuota to propagate to the # API layer. raise EXCEPTIONS.get(entity_type, exception.OverQuota)(str(e)) def enforce_db_limit( context: 'nova.context.RequestContext', entity_type: str, entity_scope: ty.Any, delta: int ) -> None: """Check provided delta does not put resource over limit. Firstly we count the current usage given the specified scope. We then add that count to the specified delta to see if we are over the limit for that kind of entity. Note previously we used to recheck these limits. However these are really soft DDoS protections, not hard resource limits, so we don't do the recheck for these. The scope is specific to the limit type: * key_pairs scope is context.user_id * server_groups scope is context.project_id * server_group_members scope is server_group_uuid """ if not nova_limit_utils.use_unified_limits(): return if entity_type not in DB_COUNT_FUNCTION.keys(): fmt = "%s does not have a DB count function defined: %s" raise ValueError(fmt % (entity_type, DB_COUNT_FUNCTION.keys())) if delta < 0: raise ValueError("delta must be a positive integer") count_function = DB_COUNT_FUNCTION[entity_type] try: enforcer = limit.Enforcer( functools.partial(count_function, context, entity_scope)) except limit_exceptions.SessionInitError as e: msg = ("Failed to connect to keystone while enforcing %s quota limit." % entity_type) LOG.error(msg + " Error: " + str(e)) raise exception.KeystoneConnectionFailed(msg) try: enforcer.enforce(None, {entity_type: delta}) except limit_exceptions.ProjectOverLimit as e: if nova_limit_utils.should_enforce(e): # Copy the exception message to a OverQuota to propagate to the # API layer. raise EXCEPTIONS.get(entity_type, exception.OverQuota)(str(e)) def _convert_keys_to_legacy_name( new_dict: ty.Dict[str, int] ) -> ty.Dict[str, int]: legacy = {} for new_name, old_name in LEGACY_LIMITS.items(): # defensive in case oslo or keystone doesn't give us an answer legacy[old_name] = new_dict.get(new_name) or 0 return legacy def get_legacy_default_limits() -> ty.Dict[str, int]: # TODO(johngarbutt): need oslo.limit API for this, it should do caching enforcer = limit.Enforcer(lambda: None) new_limits = enforcer.get_registered_limits(LEGACY_LIMITS.keys()) return _convert_keys_to_legacy_name(dict(new_limits)) def _keypair_count(context, user_id, *args): count = objects.KeyPairList.get_count_by_user(context, user_id) return {'server_key_pairs': count} def _server_group_count(context, project_id, *args): raw_counts = objects.InstanceGroupList.get_counts(context, project_id) return {'server_groups': raw_counts['project']['server_groups']} def _server_group_members_count(context, server_group_uuid, *args): # NOTE(johngarbutt) we used to count members added per user server_group = objects.InstanceGroup.get_by_uuid(context, server_group_uuid) return {'server_group_members': len(server_group.members)} DB_COUNT_FUNCTION = { KEY_PAIRS: _keypair_count, SERVER_GROUPS: _server_group_count, SERVER_GROUP_MEMBERS: _server_group_members_count } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/limit/placement.py0000664000175000017500000001773700000000000017204 0ustar00zuulzuul00000000000000# Copyright 2022 StackHPC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import typing as ty import os_resource_classes as orc from oslo_limit import exception as limit_exceptions from oslo_limit import limit from oslo_log import log as logging import nova.conf from nova import exception from nova.limit import utils as limit_utils from nova import objects from nova import quota from nova.scheduler.client import report from nova.scheduler import utils LOG = logging.getLogger(__name__) CONF = nova.conf.CONF # Cache to avoid repopulating ksa state PLACEMENT_CLIENT = None LEGACY_LIMITS = { "servers": "instances", "class:VCPU": "cores", "class:MEMORY_MB": "ram", } def _get_placement_usages( context: 'nova.context.RequestContext', project_id: str ) -> ty.Dict[str, int]: return report.report_client_singleton().get_usages_counts_for_limits( context, project_id) def _get_usage( context: 'nova.context.RequestContext', project_id: str, resource_names: ty.List[str], ) -> ty.Dict[str, int]: """Called by oslo_limit's enforcer""" if not limit_utils.use_unified_limits(): raise NotImplementedError("Unified limits support is disabled") count_servers = False resource_classes = [] for resource in resource_names: if resource == "servers": count_servers = True continue if not resource.startswith("class:"): raise ValueError("Unknown resource type: %s" % resource) # Temporarily strip resource class prefix as placement does not use it. # Example: limit resource 'class:VCPU' will be returned as 'VCPU' from # placement. r_class = resource.lstrip("class:") if r_class in orc.STANDARDS or orc.is_custom(r_class): resource_classes.append(r_class) else: raise ValueError("Unknown resource class: %s" % r_class) if not count_servers and len(resource_classes) == 0: raise ValueError("no resources to check") resource_counts = {} if count_servers: # TODO(melwitt): Change this to count servers from placement once nova # is using placement consumer types and is able to differentiate # between "instance" allocations vs "migration" allocations. if not quota.is_qfd_populated(context): LOG.error('Must migrate all instance mappings before using ' 'unified limits') raise ValueError("must first migrate instance mappings") mappings = objects.InstanceMappingList.get_counts(context, project_id) resource_counts['servers'] = mappings['project']['instances'] try: usages = _get_placement_usages(context, project_id) except exception.UsagesRetrievalFailed as e: msg = ("Failed to retrieve usages from placement while enforcing " "%s quota limits." % ", ".join(resource_names)) LOG.error(msg + " Error: " + str(e)) raise exception.UsagesRetrievalFailed(msg) # Use legacy behavior VCPU = VCPU + PCPU if configured. if CONF.workarounds.unified_limits_count_pcpu_as_vcpu: # If PCPU is in resource_classes, that means it was specified in the # flavor explicitly. In that case, we expect it to have its own limit # registered and we should not fold it into VCPU. if orc.PCPU in usages and orc.PCPU not in resource_classes: usages[orc.VCPU] = (usages.get(orc.VCPU, 0) + usages.get(orc.PCPU, 0)) for resource_class in resource_classes: # Need to add back resource class prefix that was stripped earlier resource_name = 'class:' + resource_class # Placement doesn't know about classes with zero usage # so default to zero to tell oslo.limit usage is zero resource_counts[resource_name] = usages.get(resource_class, 0) return resource_counts def _get_deltas_by_flavor( flavor: 'objects.Flavor', is_bfv: bool, count: int ) -> ty.Dict[str, int]: if flavor is None: raise ValueError("flavor") if count < 0: raise ValueError("count") # NOTE(johngarbutt): this skips bfv, port, and cyborg resources # but it still gives us better checks than before unified limits # We need an instance in the DB to use the current is_bfv logic # which doesn't work well for instances that don't yet have a uuid deltas_from_flavor = utils.resources_for_limits(flavor, is_bfv) deltas = {"servers": count} for resource, amount in deltas_from_flavor.items(): if amount != 0: deltas["class:%s" % resource] = amount * count return deltas def _get_enforcer( context: 'nova.context.RequestContext', project_id: str ) -> limit.Enforcer: # NOTE(johngarbutt) should we move context arg into oslo.limit? def callback(project_id, resource_names): return _get_usage(context, project_id, resource_names) return limit.Enforcer(callback) def enforce_num_instances_and_flavor( context: 'nova.context.RequestContext', project_id: str, flavor: 'objects.Flavor', is_bfvm: bool, min_count: int, max_count: int, enforcer: ty.Optional[limit.Enforcer] = None, delta_updates: ty.Optional[ty.Dict[str, int]] = None, ) -> int: """Return max instances possible, else raise TooManyInstances exception.""" if not limit_utils.use_unified_limits(): return max_count # Ensure the recursion will always complete if min_count < 0 or min_count > max_count: raise ValueError("invalid min_count") if max_count < 0: raise ValueError("invalid max_count") deltas = _get_deltas_by_flavor(flavor, is_bfvm, max_count) if delta_updates: deltas.update(delta_updates) enforcer = enforcer or _get_enforcer(context, project_id) try: enforcer.enforce(project_id, deltas) except limit_exceptions.ProjectOverLimit as e: if limit_utils.should_enforce(e): # NOTE(johngarbutt) we can do better, but this is very simple LOG.debug( "Limit check failed with count %s retrying with count %s", max_count, max_count - 1) try: return enforce_num_instances_and_flavor( context, project_id, flavor, is_bfvm, min_count, max_count - 1, enforcer=enforcer) except ValueError: # Copy the *original* exception message to a OverQuota to # propagate to the API layer raise exception.TooManyInstances(str(e)) # no problems with max_count, so we return max count return max_count def _convert_keys_to_legacy_name(new_dict): legacy = {} for new_name, old_name in LEGACY_LIMITS.items(): # defensive in case oslo or keystone doesn't give us an answer legacy[old_name] = new_dict.get(new_name) or 0 return legacy def get_legacy_default_limits(): enforcer = limit.Enforcer(lambda: None) new_limits = enforcer.get_registered_limits(LEGACY_LIMITS.keys()) return _convert_keys_to_legacy_name(dict(new_limits)) def get_legacy_project_limits(project_id): enforcer = limit.Enforcer(lambda: None) new_limits = enforcer.get_project_limits(project_id, LEGACY_LIMITS.keys()) return _convert_keys_to_legacy_name(dict(new_limits)) def get_legacy_counts(context, project_id): resource_names = list(LEGACY_LIMITS.keys()) resource_names.sort() new_usage = _get_usage(context, project_id, resource_names) return _convert_keys_to_legacy_name(new_usage) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/limit/utils.py0000664000175000017500000001671100000000000016363 0ustar00zuulzuul00000000000000# Copyright 2022 StackHPC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import typing as ty if ty.TYPE_CHECKING: from openstack import proxy from oslo_limit import exception as limit_exceptions from oslo_log import log as logging import nova.conf from nova import utils as nova_utils LOG = logging.getLogger(__name__) CONF = nova.conf.CONF UNIFIED_LIMITS_DRIVER = "nova.quota.UnifiedLimitsDriver" IDENTITY_CLIENT = None def use_unified_limits(): return CONF.quota.driver == UNIFIED_LIMITS_DRIVER class IdentityClient: connection: 'proxy.Proxy' service_id: str region_id: str def __init__(self, connection, service_id, region_id): self.connection = connection self.service_id = service_id self.region_id = region_id def registered_limits(self): return list(self.connection.registered_limits( service_id=self.service_id, region_id=self.region_id)) def _identity_client(): global IDENTITY_CLIENT if not IDENTITY_CLIENT: connection = nova_utils.get_sdk_adapter( 'identity', True, conf_group='oslo_limit') service_id = None region_id = None # Prefer the endpoint_id if present, same as oslo.limit. if CONF.oslo_limit.endpoint_id is not None: endpoint = connection.get_endpoint(CONF.oslo_limit.endpoint_id) service_id = endpoint.service_id region_id = endpoint.region_id elif 'endpoint_service_type' in CONF.oslo_limit: # This must be oslo.limit >= 2.6.0 and this block is more or less # copied from there. if (not CONF.oslo_limit.endpoint_service_type and not CONF.oslo_limit.endpoint_service_name): raise ValueError( 'Either endpoint_service_type or endpoint_service_name ' 'must be set') # Get the service_id for registered limits calls. services = connection.services( type=CONF.oslo_limit.endpoint_service_type, name=CONF.oslo_limit.endpoint_service_name) if len(services) > 1: raise ValueError('Multiple services found') service_id = services[0].id # Get the region_id if region name is configured. # endpoint_region_name was added in oslo.limit 2.6.0. if CONF.oslo_limit.endpoint_region_name: regions = connection.regions( name=CONF.oslo_limit.endpoint_region_name) if len(regions) > 1: raise ValueError('Multiple regions found') region_id = regions[0].id IDENTITY_CLIENT = IdentityClient(connection, service_id, region_id) return IDENTITY_CLIENT def should_enforce(exc: limit_exceptions.ProjectOverLimit) -> bool: """Whether the exceeded resource limit should be enforced. Given a ProjectOverLimit exception from oslo.limit, check whether the involved limit(s) should be enforced. This is needed if we need more logic than is available by default in oslo.limit. :param exc: An oslo.limit ProjectOverLimit exception instance, which contains a list of OverLimitInfo. Each OverLimitInfo includes a resource_name, limit, current_usage, and delta. """ # If any exceeded limit is greater than zero, it means an explicitly set # limit has been enforced. And if any explicitly set limit has gone over # quota, the enforcement should be upheld and there is no need to consider # the potential for unset limits. if any(info.limit > 0 for info in exc.over_limit_info_list): return True # Next, if all of the exceeded limits are -1, we don't need to enforce and # we can avoid calling Keystone for the list of registered limits. # # A value of -1 is documented in Keystone as meaning unlimited: # # "Note # The default limit of registered limit and the resource limit of project # limit now are limited from -1 to 2147483647 (integer). -1 means no limit # and 2147483647 is the max value for user to define limits." # # https://docs.openstack.org/keystone/latest/admin/unified-limits.html#what-is-a-limit # # but oslo.limit enforce does not treat -1 as unlimited at this time and # instead uses its literal integer value. We will consider any negative # limit value as unlimited. if all(info.limit < 0 for info in exc.over_limit_info_list): return False # Only resources with exceeded limits of "0" are candidates for # enforcement. # # A limit of "0" in the over_limit_info_list means that oslo.limit is # telling us the limit is 0. But oslo.limit returns 0 for two cases: # a) it found a limit of 0 in Keystone or b) it did not find a limit in # Keystone at all. # # We will need to query the list of registered limits from Keystone in # order to determine whether each "0" limit is case a) or case b). enforce_candidates = { info.resource_name for info in exc.over_limit_info_list if info.limit == 0} # Get a list of all the registered limits. There is not a way to filter by # resource names however this will do one API call whereas the alternative # is calling GET /registered_limits/{registered_limit_id} for each resource # name. registered_limits = _identity_client().registered_limits() # Make a set of resource names of the registered limits. have_limits_set = {limit.resource_name for limit in registered_limits} # If any candidates have limits set, enforce. It means at least one limit # has been explicitly set to 0. if enforce_candidates & have_limits_set: return True # The resource list will be either a require list or an ignore list. require_or_ignore = CONF.quota.unified_limits_resource_list strategy = CONF.quota.unified_limits_resource_strategy enforced = enforce_candidates if strategy == 'require': # Resources that are in both the candidate list and in the require list # should be enforced. enforced = enforce_candidates & set(require_or_ignore) elif strategy == 'ignore': # Resources that are in the candidate list but are not in the ignore # list should be enforced. enforced = enforce_candidates - set(require_or_ignore) else: LOG.error( f'Invalid strategy value: {strategy} is specified in the ' '[quota]unified_limits_resource_strategy config option, so ' f'enforcing for resources {enforced}') # Log in case we need to debug unexpected enforcement or non-enforcement. msg = ( f'enforcing for resources {enforced}' if enforced else 'not enforcing') LOG.debug( f'Resources {enforce_candidates} have no registered limits set in ' f'Keystone. [quota]unified_limits_resource_strategy is {strategy} and ' f'[quota]unified_limits_resource_list is {require_or_ignore}, ' f'so {msg}') return bool(enforced) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/loadables.py0000664000175000017500000001036400000000000016031 0ustar00zuulzuul00000000000000# Copyright (c) 2011-2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Generic Loadable class support. Meant to be used by such things as scheduler filters and weights where we want to load modules from certain directories and find certain types of classes within those modules. Note that this is quite different than generic plugins and the pluginmanager code that exists elsewhere. Usage: Create a directory with an __init__.py with code such as: class SomeLoadableClass(object): pass class MyLoader(nova.loadables.BaseLoader) def __init__(self): super(MyLoader, self).__init__(SomeLoadableClass) If you create modules in the same directory and subclass SomeLoadableClass within them, MyLoader().get_all_classes() will return a list of such classes. """ import inspect import os import sys from oslo_utils import importutils from nova import exception class BaseLoader(object): def __init__(self, loadable_cls_type): mod = sys.modules[self.__class__.__module__] self.path = os.path.abspath(mod.__path__[0]) self.package = mod.__package__ self.loadable_cls_type = loadable_cls_type def _is_correct_class(self, obj): """Return whether an object is a class of the correct type and is not prefixed with an underscore. """ return (inspect.isclass(obj) and (not obj.__name__.startswith('_')) and issubclass(obj, self.loadable_cls_type)) def _get_classes_from_module(self, module_name): """Get the classes from a module that match the type we want.""" classes = [] module = importutils.import_module(module_name) for obj_name in dir(module): # Skip objects that are meant to be private. if obj_name.startswith('_'): continue itm = getattr(module, obj_name) if self._is_correct_class(itm): classes.append(itm) return classes def get_all_classes(self): """Get the classes of the type we want from all modules found in the directory that defines this class. """ classes = [] for dirpath, _, filenames in os.walk(self.path): relpath = os.path.relpath(dirpath, self.path) if relpath == '.': relpkg = '' else: relpkg = '.%s' % '.'.join(relpath.split(os.sep)) for fname in filenames: root, ext = os.path.splitext(fname) if ext != '.py' or root == '__init__': continue module_name = "%s%s.%s" % (self.package, relpkg, root) mod_classes = self._get_classes_from_module(module_name) classes.extend(mod_classes) return classes def get_matching_classes(self, loadable_class_names): """Get loadable classes from a list of names. Each name can be a full module path or the full path to a method that returns classes to use. The latter behavior is useful to specify a method that returns a list of classes to use in a default case. """ classes = [] for cls_name in loadable_class_names: obj = importutils.import_class(cls_name) if self._is_correct_class(obj): classes.append(obj) elif inspect.isfunction(obj): # Get list of classes from a function for cls in obj(): classes.append(cls) else: error_str = 'Not a class of the correct type' raise exception.ClassNotFound(class_name=cls_name, exception=error_str) return classes ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315688.893605 nova-32.0.0/nova/locale/0000775000175000017500000000000000000000000014764 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315688.889605 nova-32.0.0/nova/locale/cs/0000775000175000017500000000000000000000000015371 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.3696086 nova-32.0.0/nova/locale/cs/LC_MESSAGES/0000775000175000017500000000000000000000000017156 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/locale/cs/LC_MESSAGES/nova.po0000664000175000017500000024522100000000000020467 0ustar00zuulzuul00000000000000# Translations template for nova. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the nova project. # # Translators: # David Soukup , 2013 # FIRST AUTHOR , 2011 # Jaroslav Lichtblau , 2014 # Zbyněk Schwarz , 2013,2015 # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: nova VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2025-07-04 18:13+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 06:05+0000\n" "Last-Translator: Copied by Zanata \n" "Language: cs\n" "Plural-Forms: nplurals=3; plural=(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: Czech\n" #, python-format msgid "%(address)s is not a valid IP v4/6 address." msgstr "%(address)s není platná IP adresa v4/6." #, python-format msgid "" "%(binary)s attempted direct database access which is not allowed by policy" msgstr "" "%(binary)s se pokusil o přímý přístup k databázi, což není povoleno zásadou" #, python-format msgid "%(cidr)s is not a valid IP network." msgstr "%(cidr)s není platná IP adresa sítě." #, python-format msgid "%(field)s should not be part of the updates." msgstr "%(field)s by nemělo být součástí aktualizací." #, python-format msgid "%(memsize)d MB of memory assigned, but expected %(memtotal)d MB" msgstr "%(memsize)d MB paměti přiděleno, ale očekáváno %(memtotal)d MB" #, python-format msgid "%(path)s is not on local storage: %(reason)s" msgstr "%(path)s není v místním úložišti: %(reason)s" #, python-format msgid "%(path)s is not on shared storage: %(reason)s" msgstr "%(path)s není ve sdíleném úložišti: %(reason)s" #, python-format msgid "%(type)s hypervisor does not support PCI devices" msgstr "Hypervizor %(type)s nepodporuje zařízení PCI" #, python-format msgid "%s does not support disk hotplug." msgstr "%s nepodporuje zapojování disku za běhu." #, python-format msgid "%s format is not supported" msgstr "formát %s není podporován" #, python-format msgid "%s is not supported." msgstr "%s není podporováno." #, python-format msgid "%s must be either 'MANUAL' or 'AUTO'." msgstr "%s musí být buď 'MANUAL' nebo 'AUTO'." #, python-format msgid "'%(other)s' should be an instance of '%(cls)s'" msgstr "'%(other)s' by měla být instancí '%(cls)s'" msgid "'qemu-img info' parsing failed." msgstr "zpracování 'qemu-img info' selhalo." #, python-format msgid "'rxtx_factor' argument must be a float between 0 and %g" msgstr "Argument 'rxtx_factor' musí být desetinné číslo mezi 0 a %g" #, python-format msgid "A NetworkModel is required in field %s" msgstr "NetworkModel je v poli %s vyžadováno" #, python-format msgid "" "API Version String %(version)s is of invalid format. Must be of format " "MajorNum.MinorNum." msgstr "" "Řetězec verze API %(version)s je v neplatném formátu- Musí být ve formátu " "Hlavní číslo verze a Vedlejší číslo verze" #, python-format msgid "API version %(version)s is not supported on this method." msgstr "API s verzí %(version)s není v této metodě podporován." msgid "Access list not available for public flavors." msgstr "Seznam přístupu není dostupný pro veřejné konfigurace." #, python-format msgid "Action %s not found" msgstr "Činnost %s nenalezena" #, python-format msgid "" "Action for request_id %(request_id)s on instance %(instance_uuid)s not found" msgstr "" "Činnost pro id žádosti %(request_id)s v instanci %(instance_uuid)s nenalezena" #, python-format msgid "Action: '%(action)s', calling method: %(meth)s, body: %(body)s" msgstr "Činnost: '%(action)s', volání metody: %(meth)s, tělo: %(body)s" #, python-format msgid "Add metadata failed for aggregate %(id)s after %(retries)s retries" msgstr "" "Přidání popisných dat selhalo pro agregát %(id)s po %(retries)s pokusech" #, python-format msgid "Aggregate %(aggregate_id)s already has host %(host)s." msgstr "Agregát %(aggregate_id)s již má hostitele %(host)s." #, python-format msgid "Aggregate %(aggregate_id)s could not be found." msgstr "Agregát %(aggregate_id)s nemohl být nalezen." #, python-format msgid "Aggregate %(aggregate_id)s has no host %(host)s." msgstr "Agregát %(aggregate_id)s nemá hostitele %(host)s." #, python-format msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." msgstr "" "Agregát %(aggregate_id)s nemá žádná metadata s klíčem %(metadata_key)s." #, python-format msgid "Aggregate %(aggregate_name)s already exists." msgstr "Agregát %(aggregate_name)s již existuje." msgid "An unknown error has occurred. Please try your request again." msgstr "Vyskytla se neznámá chyba. Prosím zopakujte Váš požadavek." msgid "An unknown exception occurred." msgstr "Vyskytla se neočekávaná výjimka." #, python-format msgid "Architecture name '%(arch)s' is not recognised" msgstr "Název architektury '%(arch)s' nebyl rozpoznán" #, python-format msgid "Architecture name '%s' is not valid" msgstr "Název architektury '%s' není platný" #, python-format msgid "" "Attempt to consume PCI device %(compute_node_id)s:%(address)s from empty pool" msgstr "" "Pokus o spotřebu zařízení PCI %(compute_node_id)s:%(address)s z prázdné " "zásoby" msgid "Attempted overwrite of an existing value." msgstr "Pokus o přepsání existující hodnoty." #, python-format msgid "Attribute not supported: %(attr)s" msgstr "Vlastnost není podporována: %(attr)s" #, python-format msgid "Bad network format: missing %s" msgstr "Špatný formát sítě: chybí %s" msgid "Bad networks format" msgstr "Špatný formát sítě" #, python-format msgid "Bad networks format: network uuid is not in proper format (%s)" msgstr "Špatný formát sítí: uuid sítě není ve správném formátu (%s)" #, python-format msgid "Bad prefix for network in cidr %s" msgstr "Špatná předpona pro síť v cidr %s" #, python-format msgid "" "Binding failed for port %(port_id)s, please check neutron logs for more " "information." msgstr "" "Svázání portu %(port_id)s selhalo, pro další informace zkontrolujte prosím " "záznamy neutron." msgid "Blank components" msgstr "Prázdné součásti" msgid "" "Blank volumes (source: 'blank', dest: 'volume') need to have non-zero size" msgstr "" "Prázdné svazky (zdroj: 'blank', cíl: 'volume') potřebují mít nenulovou " "velikost" #, python-format msgid "Block Device %(id)s is not bootable." msgstr "Blokové zařízení %(id)s nelze zavést." msgid "Block Device Mapping cannot be converted to legacy format. " msgstr "Mapování blokového zařízení nemůže být převedeny na zastaralý formát." msgid "Block Device Mapping is Invalid." msgstr "Mapování blokového zařízení je neplatné." #, python-format msgid "Block Device Mapping is Invalid: %(details)s" msgstr "Mapování blokového zařízení je neplatné: %(details)s" msgid "" "Block Device Mapping is Invalid: Boot sequence for the instance and image/" "block device mapping combination is not valid." msgstr "" "Mapování blokového zařízení je neplatné: Zaváděcí sekvence pro kombinaci " "instance a mapování obrazu/blokového zařízení je neplatná." msgid "" "Block Device Mapping is Invalid: You specified more local devices than the " "limit allows" msgstr "" "Mapování blokového zařízení je neplatné: Počet vámi zadaných místních " "zařízení přesahuje limit" #, python-format msgid "Block Device Mapping is Invalid: failed to get image %(id)s." msgstr "Mapování blokového zařízení je neplatné: nelze získat obraz %(id)s." #, python-format msgid "Block Device Mapping is Invalid: failed to get snapshot %(id)s." msgstr "Mapování blokového zařízení je neplatné: nelze získat snímek %(id)s." #, python-format msgid "Block Device Mapping is Invalid: failed to get volume %(id)s." msgstr "Mapování blokového zařízení je neplatné: nelze získat svazek %(id)s." msgid "Block migration can not be used with shared storage." msgstr "Přesunutí bloku nemůže být použito ve sdíleném úložišti." msgid "Boot index is invalid." msgstr "Index zavedení je neplatný." #, python-format msgid "Build of instance %(instance_uuid)s aborted: %(reason)s" msgstr "Sestavení instance %(instance_uuid)s ukončeno: %(reason)s" #, python-format msgid "Build of instance %(instance_uuid)s was re-scheduled: %(reason)s" msgstr "" "Sestavení instance %(instance_uuid)s bylo znovu naplánováno: %(reason)s" msgid "CPU and memory allocation must be provided for all NUMA nodes" msgstr "Přidělení CPU a paměti musí být provedeno u všech uzlů NUMA" #, python-format msgid "" "CPU doesn't have compatibility.\n" "\n" "%(ret)s\n" "\n" "Refer to %(u)s" msgstr "" "CPU nemá kompatibilitu.\n" "\n" "%(ret)s\n" "\n" "Prohlédněte si %(u)s" #, python-format msgid "CPU number %(cpunum)d is assigned to two nodes" msgstr "Počet CPU %(cpunum)d je přidělen ke dvěma uzlům" #, python-format msgid "CPU number %(cpunum)d is larger than max %(cpumax)d" msgstr "Počet CPU %(cpunum)d je větší než maximum %(cpumax)d" #, python-format msgid "CPU number %(cpuset)s is not assigned to any node" msgstr "Počet CPU %(cpuset)s není přidělen k žádným uzlům" msgid "Can not add access to a public flavor." msgstr "Nelze zpřístupnit veřejnou konfiguraci" msgid "Can not find requested image" msgstr "Nelze najít požadovaný obraz" #, python-format msgid "Can not handle authentication request for %d credentials" msgstr "Nelze zpracovat žádost o ověření přihlašovacích údajů %d" msgid "Can't retrieve root device path from instance libvirt configuration" msgstr "Nelze získat cestu kořenového zařízení z nastavení libvirt instance" #, python-format msgid "" "Cannot '%(action)s' instance %(server_id)s while it is in %(attr)s %(state)s" msgstr "" "Nelze '%(action)s' v instanci %(server_id)s zatímco je v %(attr)s %(state)s" #, python-format msgid "Cannot add host to aggregate %(aggregate_id)s. Reason: %(reason)s." msgstr "" "Nelze přidat hostitele do agregátu %(aggregate_id)s. Důvod: %(reason)s." msgid "Cannot attach one or more volumes to multiple instances" msgstr "Nelze připojit jeden nebo více svazků k mnoha instancím" #, python-format msgid "Cannot call %(method)s on orphaned %(objtype)s object" msgstr "Nelze volat %(method)s na osiřelém objektu %(objtype)s" msgid "Cannot find image for rebuild" msgstr "Nelze najít obraz ke znovu sestavení" #, python-format msgid "Cannot remove host %(host)s in aggregate %(id)s" msgstr "Nelze odstranit hostitele %(host)s z agregátu %(id)s" #, python-format msgid "Cannot remove host from aggregate %(aggregate_id)s. Reason: %(reason)s." msgstr "" "Nelze odstranit hostitele z agregátu %(aggregate_id)s. Důvod: %(reason)s." msgid "Cannot rescue a volume-backed instance" msgstr "Nelze zachránit instanci zálohovanou na svazku" msgid "Cannot set realtime policy in a non dedicated cpu pinning policy" msgstr "" "Nelze nastavit zásadu v reálném čase v nevyhrazené zásadě pro připnutí " "procesoru" #, python-format msgid "Cannot update aggregate %(aggregate_id)s. Reason: %(reason)s." msgstr "Nelze aktualizovat agregát %(aggregate_id)s. Důvod: %(reason)s." #, python-format msgid "" "Cannot update metadata of aggregate %(aggregate_id)s. Reason: %(reason)s." msgstr "" "Nelze aktualizovat popisná data agregátu %(aggregate_id)s. Důvod " "%(reason)s.: " #, python-format msgid "Cell %(uuid)s has no mapping." msgstr "Buňka %(uuid)s nemá žádné mapování." #, python-format msgid "" "Change would make usage less than 0 for the following resources: %(unders)s" msgstr "" "Změna by využití změnila na méně než 0 pro následující zdroje: %(unders)s" #, python-format msgid "Class %(class_name)s could not be found: %(exception)s" msgstr "Třída %(class_name)s nemohla být nalezena: %(exception)s" #, python-format msgid "Compute host %(host)s could not be found." msgstr "Výpočetní hostitel %(host)s nemohl být nalezen." #, python-format msgid "Compute host %s not found." msgstr "Výpočetní hostitel %s nenalezen." #, python-format msgid "Compute service of %(host)s is still in use." msgstr "Výpočetní služba na %(host)s se stále používá." #, python-format msgid "Compute service of %(host)s is unavailable at this time." msgstr "Výpočetní služba na %(host)s je v současnosti nedostupná." #, python-format msgid "Config drive format '%(format)s' is not supported." msgstr "Jednotka s nastavením ve formátu '%(format)s' není podporována." #, python-format msgid "" "Config requested an explicit CPU model, but the current libvirt hypervisor " "'%s' does not support selecting CPU models" msgstr "" "Konfigurace vyžaduje konkrétní model procesu, ale současný hypervizor " "libvirt '%s' nepodporuje výběr modelů procesoru" #, python-format msgid "" "Conflict updating instance %(instance_uuid)s, but we were unable to " "determine the cause" msgstr "" "Konflikt při aktualizaci instance %(instance_uuid)s, ale příčina nemohla být " "zjištěna" #, python-format msgid "" "Conflict updating instance %(instance_uuid)s. Expected: %(expected)s. " "Actual: %(actual)s" msgstr "" "Konflikt při aktualizaci instance %(instance_uuid)s. Očekáváno %(expected)s. " "Skutečnost: %(actual)s" #, python-format msgid "Connection to cinder host failed: %(reason)s" msgstr "Připojení k hostiteli cinder selhalo: %(reason)s" #, python-format msgid "Connection to glance host %(server)s failed: %(reason)s" msgstr "Připojení k hostiteli glance %(server)s selhalo: %(reason)s" #, python-format msgid "Connection to libvirt lost: %s" msgstr "Připojování k libvirt ztraceno: %s" #, python-format msgid "" "Console log output could not be retrieved for instance %(instance_id)s. " "Reason: %(reason)s" msgstr "" "Nelze získat výstup záznamu konzole instance %(instance_id)s. Důvod: " "%(reason)s" msgid "Constraint not met." msgstr "Omezení nesplněna." #, python-format msgid "Converted to raw, but format is now %s" msgstr "Převedeno na prosté, ale formát je nyní %s" #, python-format msgid "Could not attach image to loopback: %s" msgstr "Nelze připojit obraz do zpětné smyčky: %s" #, python-format msgid "Could not fetch image %(image_id)s" msgstr "Nelze získat obraz %(image_id)s" #, python-format msgid "Could not find a handler for %(driver_type)s volume." msgstr "Nelze najít obslužnou rutinu pro svazek %(driver_type)s." #, python-format msgid "Could not find binary %(binary)s on host %(host)s." msgstr "Nelze najít binární soubor %(binary)s v hostiteli %(host)s." #, python-format msgid "Could not find config at %(path)s" msgstr "Nelze najít nastavení v %(path)s" msgid "Could not find the datastore reference(s) which the VM uses." msgstr "Nelze najít odkazy datového úložiště, který VM používá." #, python-format msgid "Could not load line %(line)s, got error %(error)s" msgstr "Nelze načíst řádek %(line)s, obdržena chyba %(error)s" #, python-format msgid "Could not load paste app '%(name)s' from %(path)s" msgstr "Nelze načíst aplikaci vložení '%(name)s' z %(path)s" #, python-format msgid "" "Could not mount vfat config drive. %(operation)s failed. Error: %(error)s" msgstr "" "Nelze připojit konfigurační jednotky vfat. %(operation)s selhala. Chyba: " "%(error)s" #, python-format msgid "Could not upload image %(image_id)s" msgstr "Nelze nahrát obraz %(image_id)s" msgid "Creation of virtual interface with unique mac address failed" msgstr "Vytváření virtuálního rozhraní s jedinečnou mac adresou selhalo" #, python-format msgid "Datastore regex %s did not match any datastores" msgstr "" "Regulární výraz datového úložiště %s neodpovídá žádným datovým úložištím" msgid "Datetime is in invalid format" msgstr "Datum a čas jsou v neplatném formátu" msgid "Default PBM policy is required if PBM is enabled." msgstr "Výchozí zásada PVM je pro povolení PBM vyžadována." #, python-format msgid "Device '%(device)s' not found." msgstr "Zařízení '%(device)s' nenalezeno." msgid "Device name contains spaces." msgstr "Název zařízení obsahuje mezery." msgid "Device name empty or too long." msgstr "Název zařízení je prázdný nebo příliš dlouhý." #, python-format msgid "Disk format %(disk_format)s is not acceptable" msgstr "Formát disku %(disk_format)s není přijatelný" #, python-format msgid "Disk info file is invalid: %(reason)s" msgstr "Soubor informací o disku je neplatný: %(reason)s" #, python-format msgid "Driver Error: %s" msgstr "Chyba ovladače: %s" #, python-format msgid "" "Error destroying the instance on node %(node)s. Provision state still " "'%(state)s'." msgstr "" "Chyba při ničení instance na uzlu %(node)s. Stav poskytování bude " "'%(state)s'." #, python-format msgid "Error during unshelve instance %(instance_id)s: %(reason)s" msgstr "Chyba během vyskladňování instance %(instance_id)s: %(reason)s" #, python-format msgid "" "Error from libvirt while getting domain info for %(instance_name)s: [Error " "Code %(error_code)s] %(ex)s" msgstr "" "Chyba od libvirt při získávání informací o doméně pro %(instance_name)s: " "[Kód chyby %(error_code)s] %(ex)s" #, python-format msgid "" "Error from libvirt while looking up %(instance_name)s: [Error Code " "%(error_code)s] %(ex)s" msgstr "" "Chyba od libvirt při hledání %(instance_name)s: [Kód chyby %(error_code)s] " "%(ex)s" #, python-format msgid "" "Error from libvirt while quiescing %(instance_name)s: [Error Code " "%(error_code)s] %(ex)s" msgstr "" "Chyba od libvirt při ztišování %(instance_name)s: [Kód chyby %(error_code)s] " "%(ex)s" #, python-format msgid "" "Error from libvirt while set password for username \"%(user)s\": [Error Code " "%(error_code)s] %(ex)s" msgstr "" "Chyba od libvirt při nastavování hesla pro uživatele \"%(user)s\": [Kód " "chyby %(error_code)s] %(ex)s" #, python-format msgid "" "Error mounting %(device)s to %(dir)s in image %(image)s with libguestfs " "(%(e)s)" msgstr "" "Chyba při připojování %(device)s do %(dir)s v obrazu %(image)s používající " "libguestfs (%(e)s)" #, python-format msgid "Error mounting %(image)s with libguestfs (%(e)s)" msgstr "Chyba při připojování %(image)s pomocí libguestfs (%(e)s)" #, python-format msgid "Error when creating resource monitor: %(monitor)s" msgstr "Chyba při vytváření monitoru zdroje: %(monitor)s" #, python-format msgid "Event %(event)s not found for action id %(action_id)s" msgstr "Událost %(event)s nenalezena pro id žádosti %(action_id)s" msgid "Event must be an instance of nova.virt.event.Event" msgstr "Událost musí být instancí nova.virt.event.Event" #, python-format msgid "" "Exceeded max scheduling attempts %(max_attempts)d for instance " "%(instance_uuid)s. Last exception: %(exc_reason)s" msgstr "" "Překročen maximální počet pokusů o znovu naplánování %(max_attempts)d pro " "instanci %(instance_uuid)s. Poslední výjimka: %(exc_reason)s" #, python-format msgid "" "Exceeded max scheduling retries %(max_retries)d for instance " "%(instance_uuid)s during live migration" msgstr "" "Překročen maximální počet pokusů o znovu naplánování %(max_retries)d pro " "instanci %(instance_uuid)s během přesunu za běhu" #, python-format msgid "Exceeded maximum number of retries. %(reason)s" msgstr "Překročen maximální počet pokusů. %(reason)s" #, python-format msgid "Expected a uuid but received %(uuid)s." msgstr "Očekáváno uuid ale obdrženo %(uuid)s." msgid "Extracting vmdk from OVA failed." msgstr "Extrahování vmdk z OVA selhalo." #, python-format msgid "Failed to access port %(port_id)s: %(reason)s" msgstr "K portu %(port_id)s není přístup: %(reason)s" #, python-format msgid "Failed to allocate the network(s) with error %s, not rescheduling." msgstr "Nelze přidělit síť(ě) s chybou %s, nebudou znovu naplánovány" msgid "Failed to allocate the network(s), not rescheduling." msgstr "Nelze přidělit síť(ě), nebudou znovu naplánovány." #, python-format msgid "Failed to attach network adapter device to %(instance_uuid)s" msgstr "Nelze připojit zařízení síťového adaptéru k %(instance_uuid)s" #, python-format msgid "Failed to deploy instance: %(reason)s" msgstr "Nelze zavést instanci: %(reason)s" #, python-format msgid "Failed to detach PCI device %(dev)s: %(reason)s" msgstr "Nelze odpojit zařízení PCI %(dev)s: %(reason)s" #, python-format msgid "Failed to detach network adapter device from %(instance_uuid)s" msgstr "Nelze odpojit zařízení síťového adaptéru od %(instance_uuid)s" #, python-format msgid "Failed to encrypt text: %(reason)s" msgstr "Nelze zašifrovat text: %(reason)s" #, python-format msgid "Failed to launch instances: %(reason)s" msgstr "Nelze spustit instance: %(reason)s" #, python-format msgid "Failed to map partitions: %s" msgstr "Nelze mapovat oddíly: %s" #, python-format msgid "Failed to mount filesystem: %s" msgstr "Nelze připojit souborový systém: %s" #, python-format msgid "Failed to power off instance: %(reason)s" msgstr "Nelze vypnout instanci: %(reason)s" #, python-format msgid "Failed to power on instance: %(reason)s" msgstr "Nelze zapnout instanci: %(reason)s" #, python-format msgid "Failed to provision instance %(inst)s: %(reason)s" msgstr "Poskytnutí instance %(inst)s selhalo: %(reason)s" #, python-format msgid "Failed to read or write disk info file: %(reason)s" msgstr "Nelze přečíst nebo zapsat soubor informací o disku: %(reason)s" #, python-format msgid "Failed to reboot instance: %(reason)s" msgstr "Nelze restartovat instanci: %(reason)s" #, python-format msgid "Failed to remove volume(s): (%(reason)s)" msgstr "Nelze odpojit svazky: (%(reason)s)" #, python-format msgid "Failed to request Ironic to rebuild instance %(inst)s: %(reason)s" msgstr "Nelze zažádat Ironic o znovu sestavení instance %(inst)s: %(reason)s" #, python-format msgid "Failed to resume instance: %(reason)s" msgstr "Nelze obnovit instanci: %(reason)s" #, python-format msgid "Failed to run qemu-img info on %(path)s : %(error)s" msgstr "Nelze spustit qemu-img info na %(path)s : %(error)s" #, python-format msgid "Failed to set admin password on %(instance)s because %(reason)s" msgstr "Nelze nastavit heslo správce v %(instance)s z důvodu %(reason)s" #, python-format msgid "Failed to suspend instance: %(reason)s" msgstr "Nelze pozastavit instanci: %(reason)s" #, python-format msgid "Failed to terminate instance: %(reason)s" msgstr "Nelze ukončit instanci: %(reason)s" msgid "Failure prepping block device." msgstr "Chyba při přípravě blokového zařízení." #, python-format msgid "File %(file_path)s could not be found." msgstr "Soubor %(file_path)s nemohl být nalezen." #, python-format msgid "Fixed IP %(ip)s is not a valid ip address for network %(network_id)s." msgstr "Pevná IP %(ip)s není platnou ip adresou pro síť %(network_id)s." #, python-format msgid "Fixed IP %s is already in use." msgstr "Pevná IP adresa %s se již používá." #, python-format msgid "" "Fixed IP address %(address)s is already in use on instance %(instance_uuid)s." msgstr "" "Pevná IP adresa (%(address)s) se již používá v instanci %(instance_uuid)s." #, python-format msgid "Fixed IP not found for address %(address)s." msgstr "Pevná IP není pro adresu %(address)s nalezena." #, python-format msgid "Flavor %(flavor_id)s could not be found." msgstr "Konfigurace %(flavor_id)s nemohla být nalezena." #, python-format msgid "Flavor %(flavor_id)s has no extra specs with key %(extra_specs_key)s." msgstr "" "Konfigurace %(flavor_id)s nemá žádnou dodatečnou specifikaci s klíčem " "%(extra_specs_key)s." #, python-format msgid "Flavor %(flavor_id)s has no extra specs with key %(key)s." msgstr "" "Konfigurace %(flavor_id)s nemá žádnou dodatečnou specifikaci s klíčem " "%(key)s." #, python-format msgid "" "Flavor access already exists for flavor %(flavor_id)s and project " "%(project_id)s combination." msgstr "" "Přístup ke konfiguraci již existuje u kombinace konfigurace %(flavor_id)s a " "projektu %(project_id)s." #, python-format msgid "Flavor access not found for %(flavor_id)s / %(project_id)s combination." msgstr "" "Přístup ke konfiguraci nelze nalézt pro kombinaci %(flavor_id)s / " "%(project_id)s." msgid "Flavor used by the instance could not be found." msgstr "Konfigurace použitá instancí nemohla být nalezena." #, python-format msgid "Flavor with ID %(flavor_id)s already exists." msgstr "Konfigurace s ID %(flavor_id)s již existuje." #, python-format msgid "Flavor with name %(flavor_name)s could not be found." msgstr "Konfigurace s názvem %(flavor_name)s nemohla být nalezena." #, python-format msgid "Flavor with name %(name)s already exists." msgstr "Konfigurace s názvem %(name)s již existuje." #, python-format msgid "" "Flavor's disk is smaller than the minimum size specified in image metadata. " "Flavor disk is %(flavor_size)i bytes, minimum size is %(image_min_disk)i " "bytes." msgstr "" "Disk konfigurace je menší, než minimální zadaná velikost zadaná v popisných " "datech obrazu. Disk konfigurace má %(flavor_size)i bajtů, minimální velikost " "je %(image_min_disk)i bajtů." #, python-format msgid "" "Flavor's disk is too small for requested image. Flavor disk is " "%(flavor_size)i bytes, image is %(image_size)i bytes." msgstr "" "Disk konfigurace je pro požadovaný obraz příliš malý. Disk konfigurace má " "%(flavor_size)i bajtů, obraz má %(image_size)i bajtů." msgid "Flavor's memory is too small for requested image." msgstr "Paměť konfigurace je na požadovaný obraz příliš malá." #, python-format msgid "Floating IP %(address)s association has failed." msgstr "Přidružení plovoucí IP adresy %(address)s selhalo." #, python-format msgid "Floating IP %(address)s is associated." msgstr "Plovoucí IP %(address)s je přidružena." #, python-format msgid "Floating IP %(address)s is not associated with instance %(id)s." msgstr "Plovoucí IP adresa %(address)s není přidružena k instanci %(id)s." #, python-format msgid "Floating IP not found for ID %(id)s." msgstr "Plovoucí IP není nalezena pro ID %(id)s." #, python-format msgid "Floating IP not found for ID %s" msgstr "Plovoucí IP adresa nenalezena pro ID %s" #, python-format msgid "Floating IP not found for address %(address)s." msgstr "Plovoucí IP adresa nenalezena pro adresu %(address)s." msgid "Floating IP pool not found." msgstr "Zásoba plovoucích IP adres nenalezena." msgid "" "Forbidden to exceed flavor value of number of serial ports passed in image " "meta." msgstr "" "Je zakázáno překročit hodnotu z konfigurace ohledně počtu sériových portů " "předaných v popisných datech obrazu." msgid "Found no disk to snapshot." msgstr "Nenalezen žádný disk k pořízení snímku." #, python-format msgid "Host %(host)s could not be found." msgstr "Hostitel %(host)s nemohl být nalezen." #, python-format msgid "Host '%(name)s' is not mapped to any cell" msgstr "Hostitel '%(name)s' není namapován k žádné buňce" msgid "Host aggregate is not empty" msgstr "Agregát hostitele není prázdný" msgid "Host does not support guests with NUMA topology set" msgstr "Hostitel nepodporuje hosty s nastavenou topologií NUMA" msgid "Host does not support guests with custom memory page sizes" msgstr "Hostitel nepodporuje hosty s vlastní velikostí stránek paměti" msgid "Hypervisor driver does not support post_live_migration_at_source method" msgstr "" "Ovladač hypervizoru nepodporuje metodu po přesunutí za provozu ve zdroji" #, python-format msgid "Hypervisor virt type '%s' is not valid" msgstr "Typ virtualizace hypervizoru '%s' není platný" #, python-format msgid "Hypervisor virtualization type '%(hv_type)s' is not recognised" msgstr "Typ virtualizace hypervizoru '%(hv_type)s' nebyl rozpoznán" #, python-format msgid "Hypervisor with ID '%s' could not be found." msgstr "Hypervizor s ID !%s! nemohl být nalezen." #, python-format msgid "IP allocation over quota in pool %s." msgstr "Přidělení IP adres přesahující kvótu v zásobě %s." msgid "IP allocation over quota." msgstr "Přidělení IP přesahuje kvótu." #, python-format msgid "Image %(image_id)s could not be found." msgstr "Obraz %(image_id)s nemohl být nalezen." #, python-format msgid "Image %(image_id)s is not active." msgstr "Obraz %(image_id)s není aktivní." #, python-format msgid "Image %(image_id)s is unacceptable: %(reason)s" msgstr "Obraz %(image_id)s je nepřijatelný: %(reason)s" msgid "Image is not raw format" msgstr "Obraz není v prostém formátu" msgid "Image metadata limit exceeded" msgstr "Popisná data obrazu překračují limit" #, python-format msgid "Image model '%(image)s' is not supported" msgstr "Model obrazu '%(image)s' není podporován" msgid "Image not found." msgstr "Obraz nenalezen" #, python-format msgid "" "Image property '%(name)s' is not permitted to override NUMA configuration " "set against the flavor" msgstr "" "Vlastnost obrazu '%(name)s' nemá povoleno potlačit nastavení NUMA dané z " "nastavení konfigurace" msgid "" "Image property 'hw_cpu_policy' is not permitted to override CPU pinning " "policy set against the flavor" msgstr "" "Vlastnost obrazu 'hw_cpu_policy' nemá oprávnění potlačit zásadu připnutí CPU " "danou z konfigurace" msgid "Image that the instance was started with could not be found." msgstr "Obraz, z kterého byla instance spuštěna, nemohl být nalezen." #, python-format msgid "Image's config drive option '%(config_drive)s' is invalid" msgstr "Volba konfigurační jednotky obrazu '%(config_drive)s' není platná" msgid "" "Images with destination_type 'volume' need to have a non-zero size specified" msgstr "" "Obrazy, které mají zadány 'volume' jako typ cíle, potřebují mít zadánu " "nenulovou velikost" msgid "In ERROR state" msgstr "ve stavu CHYBA" #, python-format msgid "In states %(vm_state)s/%(task_state)s, not RESIZED/None" msgstr "Ve stavech %(vm_state)s/%(task_state)s, není RESIZED/None" msgid "" "Incompatible settings: ephemeral storage encryption is supported only for " "LVM images." msgstr "" "Nekompatibilní nastavení: šifrování efemerního úložiště je podporováno pouze " "pro obrazy LVM." #, python-format msgid "Info cache for instance %(instance_uuid)s could not be found." msgstr "Mezipaměť informaci instance %(instance_uuid)s nemohla být nalezena." #, python-format msgid "" "Instance %(instance)s and volume %(vol)s are not in the same " "availability_zone. Instance is in %(ins_zone)s. Volume is in %(vol_zone)s" msgstr "" "Instance %(instance)s a svazek %(vol)s nejsou ve stejné zóně dostupnosti. " "Instance je v %(ins_zone)s. Svazek je v %(vol_zone)s" #, python-format msgid "Instance %(instance)s does not have a port with id %(port)s" msgstr "Instance %(instance)s nemá port s id %(port)s" #, python-format msgid "Instance %(instance_id)s cannot be rescued: %(reason)s" msgstr "Instanci %(instance_id)s nelze zachránit: %(reason)s" #, python-format msgid "Instance %(instance_id)s could not be found." msgstr "Instance %(instance_id)s nemohla být nalezena." #, python-format msgid "Instance %(instance_id)s has no tag '%(tag)s'" msgstr "Instance %(instance_id)s nemá žádnou značku '%(tag)s'" #, python-format msgid "Instance %(instance_id)s is not in rescue mode" msgstr "Instance %(instance_id)s není v nouzovém režimu." #, python-format msgid "Instance %(instance_id)s is not ready" msgstr "Instance %(instance_id)s není připravena" #, python-format msgid "Instance %(instance_id)s is not running." msgstr "Instance %(instance_id)s není spuštěna." #, python-format msgid "Instance %(instance_id)s is unacceptable: %(reason)s" msgstr "Instance %(instance_id)s je nepřijatelná: %(reason)s" #, python-format msgid "Instance %(instance_uuid)s does not specify a NUMA topology" msgstr "Instance %(instance_uuid)s neurčuje topologii NUMA" #, python-format msgid "Instance %(instance_uuid)s does not specify a migration context." msgstr "Instance %(instance_uuid)s neurčuje kontext přesunu." #, python-format msgid "" "Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while " "the instance is in this state." msgstr "" "Instance %(instance_uuid)s v %(attr)s %(state)s. Nelze %(method)s zatímco " "je instance v tomto stavu." #, python-format msgid "Instance %(instance_uuid)s is locked" msgstr "Instance %(instance_uuid)s je uzamčena." #, python-format msgid "" "Instance %(instance_uuid)s requires config drive, but it does not exist." msgstr "" "Instance %(instance_uuid)s vyžaduje jednotku s nastavením, ale ta neexistuje." #, python-format msgid "Instance %(name)s already exists." msgstr "Instance %(name)s již existuje." #, python-format msgid "Instance %(server_id)s is in an invalid state for '%(action)s'" msgstr "Instance %(server_id)s je v neplatném stavu pro '%(action)s'" #, python-format msgid "Instance %(uuid)s has no mapping to a cell." msgstr "Instance %(uuid)s nemá žádné mapování na buňku." #, python-format msgid "Instance %s not found" msgstr "Instance %s není uložena" #, python-format msgid "Instance %s provisioning was aborted" msgstr "Poskytování instance %s bylo ukončeno" msgid "Instance could not be found" msgstr "Instance nemohla být nalezena" msgid "Instance disk to be encrypted but no context provided" msgstr "Disk instance má být šifrován, ale není zadán kontext" msgid "Instance event failed" msgstr "Událost instance selhala" #, python-format msgid "Instance group %(group_uuid)s already exists." msgstr "Skupina instance %(group_uuid)s již existuje." #, python-format msgid "Instance group %(group_uuid)s could not be found." msgstr "Skupina instance %(group_uuid)s nemohla být nalezena." msgid "Instance has no source host" msgstr "Instance nemá žádného zdrojového hostitele" msgid "Instance has not been resized." msgstr "Instanci nebyla změněna velikost." msgid "Instance is not a member of specified network" msgstr "Instance není členem zadané sítě" #, python-format msgid "Instance rollback performed due to: %s" msgstr "Provedeno zpětné vrácení instance kvůli: %s" #, python-format msgid "" "Insufficient Space on Volume Group %(vg)s. Only %(free_space)db available, " "but %(size)d bytes required by volume %(lv)s." msgstr "" "Nedostatečné místo ve skupině svazku %(vg)s. Dostupné pouze %(free_space)db, " "ale svazkem %(lv)s je vyžadováno %(size)db bajtů." #, python-format msgid "Insufficient compute resources: %(reason)s." msgstr "Nedostatečné výpočetní zdroje: %(reason)s." #, python-format msgid "Interface %(interface)s not found." msgstr "Rozhraní %(interface)s nenalezeno." #, python-format msgid "Invalid Base 64 data for file %(path)s" msgstr "Neplatná data Base 64 pro soubor %(path)s" msgid "Invalid Connection Info" msgstr "Neplatné informace o připojení" #, python-format msgid "Invalid ID received %(id)s." msgstr "Obdrženo neplatné ID %(id)s." #, python-format msgid "Invalid IP format %s" msgstr "Neplatný formát IP adresy %s" #, python-format msgid "Invalid IP protocol %(protocol)s." msgstr "Neplatný protokol IP %(protocol)s." msgid "" "Invalid PCI Whitelist: The PCI whitelist can specify devname or address, but " "not both" msgstr "" "Neplatný seznam povolených PCI: Seznam povolených PCI může zadat název " "zařízení nebo adresu, ale ne oboje" #, python-format msgid "Invalid PCI alias definition: %(reason)s" msgstr "Neplatné určení přezdívky PCI: %(reason)s" #, python-format msgid "Invalid Regular Expression %s" msgstr "Neplatný regulární výraz %s" #, python-format msgid "Invalid characters in hostname '%(hostname)s'" msgstr "Neplatné znaky v názvu hostitele '%(hostname)s'" msgid "Invalid config_drive provided." msgstr "Zadána neplatná konfigurační jednotka." #, python-format msgid "Invalid config_drive_format \"%s\"" msgstr "Neplatný formát konfigurační jednotky \"%s\"" #, python-format msgid "Invalid console type %(console_type)s" msgstr "Neplatný typ konzole %(console_type)s " #, python-format msgid "Invalid content type %(content_type)s." msgstr "Neplatný typ obsahu %(content_type)s." #, python-format msgid "Invalid datetime string: %(reason)s" msgstr "Neplatný řetězec data a času: %(reason)s" msgid "Invalid device UUID." msgstr "Neplatné UUID zařízení." #, python-format msgid "Invalid entry: '%s'" msgstr "Neplatná položka: '%s'" #, python-format msgid "Invalid entry: '%s'; Expecting dict" msgstr "Neplatná položka: '%s'; Očekáváno dict" #, python-format msgid "Invalid entry: '%s'; Expecting list or dict" msgstr "Neplatná položka: '%s'; Očekáváno list nebo dict" #, python-format msgid "Invalid exclusion expression %r" msgstr "Neplatný výraz vyloučení %r" #, python-format msgid "Invalid image format '%(format)s'" msgstr "Neplatný formát obrazu '%(format)s'" #, python-format msgid "Invalid image href %(image_href)s." msgstr "Neplatný href %(image_href)s obrazu." #, python-format msgid "Invalid inclusion expression %r" msgstr "Neplatný výraz začlenění %r" #, python-format msgid "" "Invalid input for field/attribute %(path)s. Value: %(value)s. %(message)s" msgstr "" "Neplatný vstup pro pole/vlastnost %(path)s. Hodnota: %(value)s. %(message)s" #, python-format msgid "Invalid input received: %(reason)s" msgstr "Obdržen neplatný vstup: %(reason)s" msgid "Invalid instance image." msgstr "Neplatný obraz instance." #, python-format msgid "Invalid is_public filter [%s]" msgstr "Neplatný filtr is_public [%s]" msgid "Invalid key_name provided." msgstr "Zadán neplatný název klíče." #, python-format msgid "Invalid memory page size '%(pagesize)s'" msgstr "Neplatná velikost stránky paměti '%(pagesize)s'" msgid "Invalid metadata key" msgstr "Neplatný klíč popisných dat" #, python-format msgid "Invalid metadata size: %(reason)s" msgstr "Neplatná velikost popisných dat: %(reason)s" #, python-format msgid "Invalid metadata: %(reason)s" msgstr "Neplatná popisná data: %(reason)s" #, python-format msgid "Invalid minDisk filter [%s]" msgstr "Neplatný filtr minDisk [%s]" #, python-format msgid "Invalid minRam filter [%s]" msgstr "Neplatný filtr minRam [%s]" #, python-format msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s" msgstr "Neplatný rozsah portů %(from_port)s:%(to_port)s. %(msg)s" msgid "Invalid proxy request signature." msgstr "Neplatný podpis žádosti od prostředníka." #, python-format msgid "Invalid range expression %r" msgstr "Neplatný výraz rozsahu %r" msgid "Invalid service catalog json." msgstr "Neplatný json katalog služeb" msgid "Invalid start time. The start time cannot occur after the end time." msgstr "Neplatný čas spuštění. Čas spuštění nemůže nastat po čase ukončení." msgid "Invalid state of instance files on shared storage" msgstr "Neplatný stav souborů instance na sdíleném úložišti" #, python-format msgid "Invalid timestamp for date %s" msgstr "Neplatné časové razítko pro datum %s" #, python-format msgid "Invalid usage_type: %s" msgstr "Neplatný typ použití: %s" #, python-format msgid "Invalid value for Config Drive option: %(option)s" msgstr "Neplatná hodnota pro volbu konfigurační jednotky: %(option)s" #, python-format msgid "Invalid virtual interface address %s in request" msgstr "Neplatná adresa virtuálního rozhraní %s v požadavku." #, python-format msgid "Invalid volume access mode: %(access_mode)s" msgstr "Neplatný režim přístupu svazku: %(access_mode)s" #, python-format msgid "Invalid volume: %(reason)s" msgstr "Neplatný svazek: %(reason)s" msgid "Invalid volume_size." msgstr "Neplatná velikost svazku" #, python-format msgid "Ironic node uuid not supplied to driver for instance %s." msgstr "UUID uzlu Ironic nebylo předáno ovladači instance %s." #, python-format msgid "" "It is not allowed to create an interface on external network %(network_uuid)s" msgstr "Na vnější síti %(network_uuid)s není povoleno vytvářet rozhraní" msgid "" "Key Names can only contain alphanumeric characters, periods, dashes, " "underscores, colons and spaces." msgstr "" "Názvy klíče mohou obsahovat pouze alfanumerické znaky, tečky, pomlčky, " "podtržítka, dvojtečky a mezery." #, python-format msgid "Key manager error: %(reason)s" msgstr "Chyba správce klíčů: %(reason)s" #, python-format msgid "Key pair '%(key_name)s' already exists." msgstr "Pár klíčů '%(key_name)s' již existuje." #, python-format msgid "Keypair %(name)s not found for user %(user_id)s" msgstr "Pár klíčů %(name)s nenalezena pro uživatele %(user_id)s" #, python-format msgid "Keypair data is invalid: %(reason)s" msgstr "Data páru klíčů jsou neplatná: %(reason)s" msgid "Limits only supported from vCenter 6.0 and above" msgstr "Limity jsou podporovány pouze ve vCenter verze 6.0 a vyšší" #, python-format msgid "Malformed message body: %(reason)s" msgstr "Poškozené tělo zprávy: %(reason)s" #, python-format msgid "" "Malformed request URL: URL's project_id '%(project_id)s' doesn't match " "Context's project_id '%(context_project_id)s'" msgstr "" "Poškozená URL požadavku: ID projektu URL '%(project_id)s' neodpovídá ID " "projektu obsahu '%(context_project_id)s'" msgid "Malformed request body" msgstr "Poškozené tělo požadavku" msgid "Mapping image to local is not supported." msgstr "Mapování obrazu na místní není podporováno" #, python-format msgid "Marker %(marker)s could not be found." msgstr "Indikátor %(marker)s nemohl být nalezen." msgid "Maximum number of floating IPs exceeded" msgstr "Překročen maximálních počet plovoucích IP adres" #, python-format msgid "Maximum number of metadata items exceeds %(allowed)d" msgstr "Maximální počet popisných položek překračuje %(allowed)d" msgid "Maximum number of ports exceeded" msgstr "Překročen maximálních počet portů" msgid "Maximum number of security groups or rules exceeded" msgstr "Překročen maximálních počet bezpečnostních skupin nebo pravidel" msgid "Metadata item was not found" msgstr "Položka popisných dat nenalezena" msgid "Metadata property key greater than 255 characters" msgstr "Klíč vlastnosti popisných dat je větší než 255 znaků" msgid "Metadata property value greater than 255 characters" msgstr "Hodnota vlastnosti popisných dat je vetší než 255 znaků" msgid "Metadata type should be dict." msgstr "Typ popisných dat by měl být dict" #, python-format msgid "" "Metric %(name)s could not be found on the compute host node %(host)s." "%(node)s." msgstr "" "Metrika %(name)s nemohla být nalezena v uzlu výpočetního hostitele %(host)s." "%(node)s." #, python-format msgid "Migration %(migration_id)s could not be found." msgstr "Přesun %(migration_id)s nemohl být nalezen." #, python-format msgid "Migration error: %(reason)s" msgstr "Chyba přesunu: %(reason)s" msgid "Migration is not supported for LVM backed instances" msgstr "Přesun není podporován u instancí zálohovaných na LVM" #, python-format msgid "" "Migration not found for instance %(instance_id)s with status %(status)s." msgstr "Přesun nenalezen v instanci %(instance_id)s se stavem %(status)s." #, python-format msgid "Migration pre-check error: %(reason)s" msgstr "Chyba kontroly před přesunem: %(reason)s" #, python-format msgid "Missing arguments: %s" msgstr "Chybí argumenty: %s" msgid "Missing device UUID." msgstr "UUID zařízení chybí." msgid "Missing disabled reason field" msgstr "Chybí pole důvodu zakázání" msgid "Missing forced_down field" msgstr "Chybí pole " msgid "Missing imageRef attribute" msgstr "Chybí vlastnost imageRef" #, python-format msgid "Missing keys: %s" msgstr "Chybí klíče: %s" msgid "Missing parameter dict" msgstr "Chybí parametr dict" #, python-format msgid "" "More than one instance is associated with fixed IP address '%(address)s'." msgstr "" "K pevné IP adrese '%(address)s'. je přidružena více než jedna instance." msgid "" "More than one possible network found. Specify network ID(s) to select which " "one(s) to connect to." msgstr "" "Nalezena více než jedna možná síť. Zadejte ID sítě pro výběr té, ke které se " "chcete připojit." msgid "More than one swap drive requested." msgstr "Je požadován více než jeden odkládací disk." #, python-format msgid "Multi-boot operating system found in %s" msgstr "V %s nalezen operační systém s více zavaděči" msgid "Multiple X-Instance-ID headers found within request." msgstr "V žádosti nalezeno více hlaviček X-Instance-ID." msgid "Multiple X-Tenant-ID headers found within request." msgstr "V žádosti nalezeno více hlaviček X-Tenant-ID." #, python-format msgid "Multiple floating IP pools matches found for name '%s'" msgstr "Při hledání názvu '%s' nalezeno mnoho zásob plovoucích ip adres" #, python-format msgid "Multiple floating IPs are found for address %(address)s." msgstr "Nalezeno mnoho plovoucích IP pro adresu %(address)s." msgid "" "Multiple hosts may be managed by the VMWare vCenter driver; therefore we do " "not return uptime for just one host." msgstr "" "Ovladač VMware vCenter může spravovat mnoho hostitelů; proto se čas provozu " "pouze pro jednoho hostitele neměří." msgid "Multiple possible networks found, use a Network ID to be more specific." msgstr "" "Nalezeno mnoho možných sítí, použijte ID sítě, nebo buďte konkrétnější." #, python-format msgid "" "Multiple security groups found matching '%s'. Use an ID to be more specific." msgstr "" "Nalezeno mnoho bezpečnostních skupin odpovídající '%s'. Použijte ID nebo " "buďte konkrétnější." msgid "Must input network_id when request IP address" msgstr "Při žádání o IP adresu musíte zadat id sítě" msgid "Must not input both network_id and port_id" msgstr "Id sítě a id portu nesmí být zadány najednou" msgid "" "Must specify host_ip, host_username and host_password to use vmwareapi." "VMwareVCDriver" msgstr "" "Pro použití vmwareapi.VMwareVCDriver musíte zadat ip hostitele, jeho " "uživatelské jméno a heslo" msgid "Must supply a positive value for max_rows" msgstr "Musíte zadat kladnou hodnotu pro maximum řádků" #, python-format msgid "Network %(network_id)s could not be found." msgstr "Síť %(network_id)s nemohla být nalezena." #, python-format msgid "" "Network %(network_uuid)s requires a subnet in order to boot instances on." msgstr "Síť %(network_uuid)s vyžaduje podsíť, na které může zavádět instance." #, python-format msgid "Network could not be found for bridge %(bridge)s" msgstr "Síť nemohla být pro most %(bridge)s nalezena." #, python-format msgid "Network could not be found for instance %(instance_id)s." msgstr "Síť nemohla být pro instanci %(instance_id)s nalezena." msgid "Network not found" msgstr "Síť nenalezena" msgid "" "Network requires port_security_enabled and subnet associated in order to " "apply security groups." msgstr "" "Síť vyžaduje povolení bezpečnostního portu a přidruženou podsíť, aby mohla " "používat bezpečnostní skupiny." msgid "New volume must be detached in order to swap." msgstr "Pro výměnu musí být nový svazek odpojen." msgid "New volume must be the same size or larger." msgstr "Nový svazek musí mít stejnou nebo větší velikost." #, python-format msgid "No Block Device Mapping with id %(id)s." msgstr "Žádné mapování blokového zařízení s id %(id)s." msgid "No Unique Match Found." msgstr "Nenalezena žádná jedinečná shoda." msgid "No compute host specified" msgstr "Nezadán žádný výpočetní hostitel" #, python-format msgid "No device with MAC address %s exists on the VM" msgstr "Na VM neexistuje žádné zařízení s MAC adresou %s" #, python-format msgid "No device with interface-id %s exists on VM" msgstr "Na VM neexistuje žádné zařízení mající id rozhraní %s" #, python-format msgid "No disk at %(location)s" msgstr "Źádný disk ve %(location)s" #, python-format msgid "No fixed IP addresses available for network: %(net)s" msgstr "Nejsou dostupné žádné pevné IP adresy pro síť: %(net)s" msgid "No fixed IPs associated to instance" msgstr "K instanci nejsou přidruženy žádné pevné IP adresy" msgid "No free nbd devices" msgstr "Žádná volná zařízení nbd" msgid "No host available on cluster" msgstr "V clusteru není dostupný žádný hostitel" #, python-format msgid "No hypervisor matching '%s' could be found." msgstr "Nemohl být nalezen žádný hypervizor shodující se s '%s'." msgid "No image locations are accessible" msgstr "Nejsou přístupné žádná umístění obrazu" msgid "No more floating IPs available." msgstr "Žádné další plovoucí IP adresy nejsou dostupné." #, python-format msgid "No more floating IPs in pool %s." msgstr "Žádné další plovoucí IP adresa v zásobě %s." #, python-format msgid "No mount points found in %(root)s of %(image)s" msgstr "V %(root)s z %(image)s nenalezeny žádné body připojení" #, python-format msgid "No operating system found in %s" msgstr "V %s nenalezen žádný operační systém" msgid "No root disk defined." msgstr "Nezadán žádný kořenový disk." msgid "No valid host found for cold migrate" msgstr "Nebyl nalezen žádný platný hostitel pro přesun při nepoužívání" msgid "No valid host found for resize" msgstr "Nebyl nalezen žádný platný hostitel pro změnu velikosti" #, python-format msgid "No valid host was found. %(reason)s" msgstr "Nebyl nalezen žádný platný hostitel. %(reason)s" #, python-format msgid "No volume Block Device Mapping at path: %(path)s" msgstr "Žádné mapování blokového zařízení svazku v cestě: %(path)s" #, python-format msgid "No volume Block Device Mapping with id %(volume_id)s." msgstr "Žádné mapování blokového zařízení svazku s id %(volume_id)s." #, python-format msgid "Node %s could not be found." msgstr "Uzel %s nemohl být nalezen." #, python-format msgid "Not able to acquire a free port for %(host)s" msgstr "Nelze získat volný port pro %(host)s" #, python-format msgid "Not able to bind %(host)s:%(port)d, %(error)s" msgstr "Nelze svázat %(host)s:%(port)d, %(error)s" msgid "Not an rbd snapshot" msgstr "Není snímkem rbd" #, python-format msgid "Not authorized for image %(image_id)s." msgstr "Nemáte oprávnění k použití obrazu %(image_id)s." msgid "Not authorized." msgstr "Neschváleno." msgid "Not enough parameters to build a valid rule." msgstr "Není dostatek parametrů k sestavení platného pravidla." msgid "Not stored in rbd" msgstr "Neuloženo v rbd" msgid "Nothing was archived." msgstr "Nic nebylo archivováno." #, python-format msgid "Nova requires libvirt version %s or greater." msgstr "Nova vyžaduje verzi libvirt %s nebo novější" msgid "Number of Rows Archived" msgstr "Počet archivovaných řádků" #, python-format msgid "Object action %(action)s failed because: %(reason)s" msgstr "Činnost objektu %(action)s selhala protože: %(reason)s" msgid "Old volume is attached to a different instance." msgstr "Starý svazek je připojen k jiné instanci." #, python-format msgid "One or more hosts already in availability zone(s) %s" msgstr "Jeden nebo více hostitelů již jsou v zónách dostupnosti %s" msgid "Only administrators may list deleted instances" msgstr "Pouze správci mohou vypsat smazané instance" msgid "Origin header does not match this host." msgstr "Hlavička původu neodpovídá tomuto hostiteli." msgid "Origin header not valid." msgstr "Hlavička původu není platná." msgid "Origin header protocol does not match this host." msgstr "Protokol původu hlavičky neodpovídá tomuto hostiteli." #, python-format msgid "PCI Device %(node_id)s:%(address)s not found." msgstr "PCI zařízení %(node_id)s:%(address)s nenalezeno." #, python-format msgid "PCI alias %(alias)s is not defined" msgstr "Přezdívka PCI %(alias)s není určena" #, python-format msgid "" "PCI device %(compute_node_id)s:%(address)s is %(status)s instead of " "%(hopestatus)s" msgstr "" "PCI zařízení %(compute_node_id)s:%(address)s je %(status)s místo " "%(hopestatus)s" #, python-format msgid "" "PCI device %(compute_node_id)s:%(address)s is owned by %(owner)s instead of " "%(hopeowner)s" msgstr "" "PCI zařízení %(compute_node_id)s:%(address)s vlastní %(owner)s místo " "%(hopeowner)s" #, python-format msgid "PCI device %(id)s not found" msgstr "PCI zařízení %(id)s nenalezeno" #, python-format msgid "PCI device request %(requests)s failed" msgstr "Žádost zařízení PCI %(requests)s selhala" #, python-format msgid "Page size %(pagesize)s forbidden against '%(against)s'" msgstr "Velikost stránky %(pagesize)s je zakázána v '%(against)s'" #, python-format msgid "Page size %(pagesize)s is not supported by the host." msgstr "Velikost stránky %(pagesize)s není hostitelem podporována" #, python-format msgid "" "Parameters %(missing_params)s not present in vif_details for vif %(vif_id)s. " "Check your Neutron configuration to validate that the macvtap parameters are " "correct." msgstr "" "Parametry %(missing_params)s nejsou uvedeny v podrobnostech vif %(vif_id)s. " "Zkontrolujte nastavení Neutron a ověřte, že parametry mavctap jsou správné." #, python-format msgid "Path %s must be LVM logical volume" msgstr "Cesta %s musí být logickým svazkem LVM" msgid "Paused" msgstr "Pozastaveno" msgid "Personality file limit exceeded" msgstr "Překročen limit osobnostního souboru" #, python-format msgid "Physical network is missing for network %(network_uuid)s" msgstr "Fyzická síť chybí u sítě %(network_uuid)s" #, python-format msgid "Policy doesn't allow %(action)s to be performed." msgstr "Zásady nedovolují, aby bylo %(action)s provedeno." #, python-format msgid "Port %(port_id)s is still in use." msgstr "Port %(port_id)s se stále používá." #, python-format msgid "Port %(port_id)s not usable for instance %(instance)s." msgstr "Port %(port_id)s není použitelný pro instanci %(instance)s." #, python-format msgid "Port %(port_id)s requires a FixedIP in order to be used." msgstr "Port %(port_id)s pro použití vyžaduje pevnou IP adresu." #, python-format msgid "Port %s is not attached" msgstr "Port %s není připojen" #, python-format msgid "Port id %(port_id)s could not be found." msgstr "Port id %(port_id)s nemohlo být nalezeno." #, python-format msgid "Provided video model (%(model)s) is not supported." msgstr "Zadaný model videa (%(model)s) není podporován." #, python-format msgid "Provided watchdog action (%(action)s) is not supported." msgstr "Zadaná činnost sledovače (%(action)s) není podporována" msgid "QEMU guest agent is not enabled" msgstr "Agent hosta QEMU není povolen" #, python-format msgid "Quiescing is not supported in instance %(instance_id)s" msgstr "Ztišení není podporováno v instanci %(instance_id)s" #, python-format msgid "Quota class %(class_name)s could not be found." msgstr "Třída kvóty %(class_name)s nemohla být nalezena." msgid "Quota could not be found" msgstr "Kvóta nemohla být nalezena." #, python-format msgid "" "Quota exceeded for %(overs)s: Requested %(req)s, but already used %(used)s " "of %(allowed)s %(overs)s" msgstr "" "Kvóta překročena pro %(overs)s: Požadováno %(req)s, ale již je použito " "%(used)s z %(allowed)s %(overs)s" #, python-format msgid "Quota exceeded for resources: %(overs)s" msgstr "Kvóta překročena pro zdroje: %(overs)s" msgid "Quota exceeded, too many key pairs." msgstr "Kvóta překročena, příliš mnoho párů klíčů." msgid "Quota exceeded, too many server groups." msgstr "Kvóta překročena, příliš mnoho skupin serveru." msgid "Quota exceeded, too many servers in group" msgstr "Kvóta překročena, příliš mnoho serverů ve skupině" #, python-format msgid "Quota exists for project %(project_id)s, resource %(resource)s" msgstr "Kvóta existuje pro projekt %(project_id)s, zdroj %(resource)s" #, python-format msgid "Quota for project %(project_id)s could not be found." msgstr "Kvóta pro projekt %(project_id)s nemohla být nalezena." #, python-format msgid "" "Quota for user %(user_id)s in project %(project_id)s could not be found." msgstr "" "Kvóta pro uživatele %(user_id)s v projektu %(project_id)s nemohla být " "nalezena." #, python-format msgid "" "Quota limit %(limit)s for %(resource)s must be greater than or equal to " "already used and reserved %(minimum)s." msgstr "" "Limit kvóty %(limit)s pro %(resource)s musí být větší nebo rovno již použité " "a vyhrazené %(minimum)s." #, python-format msgid "" "Quota limit %(limit)s for %(resource)s must be less than or equal to " "%(maximum)s." msgstr "" "Limit kvóty %(limit)s pro %(resource)s musí být menší nebo rovno %(maximum)s." msgid "Request body and URI mismatch" msgstr "Neshoda s tělem požadavku a URI" msgid "Request is too large." msgstr "Požadavek je příliš velký." #, python-format msgid "Request of image %(image_id)s got BadRequest response: %(response)s" msgstr "" "Žádost o obraz %(image_id)s obdržel odpověď o špatné žádosti: %(response)s" #, python-format msgid "RequestSpec not found for instance %(instance_uuid)s" msgstr "U instance %(instance_uuid)s nebyla nalezena žádost o specifikaci" msgid "Requested CPU control policy not supported by host" msgstr "" "Požadovaná zásada kontroly procesoru není podporována na tomto hostiteli" #, python-format msgid "" "Requested hardware '%(model)s' is not supported by the '%(virt)s' virt driver" msgstr "" "Požadovaný hardware '%(model)s' není podporován ovladačem virtualizace " "'%(virt)s'" #, python-format msgid "Requested image %(image)s has automatic disk resize disabled." msgstr "" "Požadovaný obraz %(image)s má zakázanou automatickou změnu své velikosti." msgid "" "Requested instance NUMA topology cannot fit the given host NUMA topology" msgstr "" "Požadovaná instance topologie NUMA se nemůže vejít do zadané topologie NUMA " "hostitele" msgid "" "Requested instance NUMA topology together with requested PCI devices cannot " "fit the given host NUMA topology" msgstr "" "Požadovaná instance topologie NUMA spolu s požadovanými zařízeními PCI se " "nemohou vejít do zadané topologie NUMA hostitele" #, python-format msgid "" "Requested vCPU limits %(sockets)d:%(cores)d:%(threads)d are impossible to " "satisfy for vcpus count %(vcpus)d" msgstr "" "Požadované limity vCPU %(sockets)d:%(cores)d:%(threads)d je nemožné pslnit " "pro daný počet vCPU %(vcpus)d" #, python-format msgid "Rescue device does not exist for instance %s" msgstr "Záchranné zařízení neexistuje v instanci %s" #, python-format msgid "Resize error: %(reason)s" msgstr "Chyba změny velikosti: %(reason)s" msgid "Resize to zero disk flavor is not allowed." msgstr "Změna na konfiguraci s nulovým diskem není povoleno." msgid "Resource could not be found." msgstr "Zdroj nemohl být nalezen." msgid "Resumed" msgstr "Obnoveno" #, python-format msgid "Root element name should be '%(name)s' not '%(tag)s'" msgstr "Název kořenového prvku by měl být '%(name)s' ne '%(tag)s'" #, python-format msgid "Scheduler Host Filter %(filter_name)s could not be found." msgstr "Filtr hostitelů plánovače %(filter_name)s nemohl být nalezen." #, python-format msgid "Security group %(name)s is not found for project %(project)s" msgstr "Bezpečnostní skupina %(name)s nenalezena v projektu %(project)s" #, python-format msgid "" "Security group %(security_group_id)s not found for project %(project_id)s." msgstr "" "Bezpečnostní skupina %(security_group_id)s není nalezena v projektu " "%(project_id)s." #, python-format msgid "Security group %(security_group_id)s not found." msgstr "Bezpečnostní skupina %(security_group_id)s není nalezena." #, python-format msgid "" "Security group %(security_group_name)s already exists for project " "%(project_id)s." msgstr "" "Bezpečnostní skupina %(security_group_name)s již existuje v projektu " "%(project_id)s." #, python-format msgid "" "Security group %(security_group_name)s not associated with the instance " "%(instance)s" msgstr "" "Bezpečnostní skupina %(security_group_name)s není přidružena k instanci " "%(instance)s" msgid "Security group id should be uuid" msgstr "IP bezpečnostní skupiny by mělo být uuid" msgid "Security group name cannot be empty" msgstr "Název bezpečnostní skupiny nemůže být prázdné" msgid "Security group not specified" msgstr "Není zadána bezpečnostní skupina" #, python-format msgid "Server disk was unable to be resized because: %(reason)s" msgstr "" "Změna velikosti disku serveru nemohla být provedena z důvodu: %(reason)s" msgid "Server does not exist" msgstr "Server neexistuje" #, python-format msgid "ServerGroup policy is not supported: %(reason)s" msgstr "Zásada ServerGroup není podporována: %(reason)s" msgid "ServerGroupAffinityFilter not configured" msgstr "Filtr slučivosti skupiny serveru není nastaven" msgid "ServerGroupAntiAffinityFilter not configured" msgstr "Filtr proti slučivosti skupiny serveru není nastaven" #, python-format msgid "Service %(service_id)s could not be found." msgstr "Služba %(service_id)s nemohla být nalezena." #, python-format msgid "Service %s not found." msgstr "Služba %s nenalezena." msgid "Service is unavailable at this time." msgstr "Služba je v tuto chvíli nedostupná." #, python-format msgid "Service with host %(host)s binary %(binary)s exists." msgstr "Služba na hostiteli %(host)s, binární soubor %(binary)s existuje." #, python-format msgid "Service with host %(host)s topic %(topic)s exists." msgstr "Služba na hostiteli %(host)s, téma %(topic)s existuje." msgid "Set admin password is not supported" msgstr "Nastavení hesla správce není podporováno" #, python-format msgid "Share '%s' is not supported" msgstr "Sdílení '%s' není podporováno" #, python-format msgid "Share level '%s' cannot have share configured" msgstr "Úroveň sdílení '%s' nemůže mít sdílení nastavena" #, python-format msgid "Snapshot %(snapshot_id)s could not be found." msgstr "Snímek %(snapshot_id)s nemohl být nalezen." msgid "Some required fields are missing" msgstr "Některá povinná pole chybí" #, python-format msgid "" "Something went wrong when deleting a volume snapshot: rebasing a " "%(protocol)s network disk using qemu-img has not been fully tested" msgstr "" "Při mazání snímku svazku se něco zvrtlo: Přeskládání síťového disku " "%(protocol)s pomocí qemu-img nebylo řádně ozkoušeno." msgid "Sort direction size exceeds sort key size" msgstr "Velikost směru řazení převyšuje velikost klíče řazení" msgid "Sort key supplied was not valid." msgstr "Zadaný klíč řazení byl neplatný." msgid "Specified fixed address not assigned to instance" msgstr "Zadaná pevná adresa není k instanci přidělena" msgid "Started" msgstr "Spuštěno" msgid "Stopped" msgstr "Zastaveno" #, python-format msgid "Storage error: %(reason)s" msgstr "Chyba úložiště: %(reason)s" #, python-format msgid "Storage policy %s did not match any datastores" msgstr "Zásada uložení %s neodpovídá žádným datovým úložištím" msgid "Success" msgstr "Úspěch" msgid "Suspended" msgstr "Uspáno" msgid "Swap drive requested is larger than instance type allows." msgstr "Požadovaný odkládací disk je větší než typ instance umožňuje." msgid "Table" msgstr "Tabulka" #, python-format msgid "Task %(task_name)s is already running on host %(host)s" msgstr "Úkol %(task_name)s již je spuštěn na hostiteli %(host)s" #, python-format msgid "Task %(task_name)s is not running on host %(host)s" msgstr "Úkol %(task_name)s není spuštěn na hostiteli %(host)s" #, python-format msgid "The PCI address %(address)s has an incorrect format." msgstr "PCI adresa %(address)s je ve špatném formátu." #, python-format msgid "The console port range %(min_port)d-%(max_port)d is exhausted." msgstr "Rozsah portů konzole %(min_port)d-%(max_port)d je vyčerpán." msgid "The created instance's disk would be too small." msgstr "Vytvořený disk instance by byl příliš malý." msgid "The current driver does not support preserving ephemeral partitions." msgstr "Současný ovladač nepodporuje zachování efemerních oddílů." msgid "The default PBM policy doesn't exist on the backend." msgstr "Výchozí zásada PBM neexistuje na této podpůrné vrstvě. " msgid "The floating IP request failed with a BadRequest" msgstr "Žádost o plovoucí IP selhala s chybou Špatný požadavek." msgid "" "The instance requires a newer hypervisor version than has been provided." msgstr "Instance vyžaduje novější verzi hypervizoru, než byla poskytnuta." #, python-format msgid "The number of defined ports: %(ports)d is over the limit: %(quota)d" msgstr "Počet zadaných portů: %(ports)d přesahuje limit: %(quota)d" #, python-format msgid "The provided RNG device path: (%(path)s) is not present on the host." msgstr "Zadaná cesta zařízení RNG: (%(path)s) se nevyskytuje na hostiteli." msgid "The request is invalid." msgstr "Požadavek je neplatný." #, python-format msgid "" "The requested amount of video memory %(req_vram)d is higher than the maximum " "allowed by flavor %(max_vram)d." msgstr "" "Požadované množství video paměti %(req_vram)d je vyšší než maximální " "množství povolené konfigurací %(max_vram)d." msgid "The requested availability zone is not available" msgstr "Požadovaná zóna dostupnosti není dostupná" msgid "The requested functionality is not supported." msgstr "Požadovaná funkce není podporována." #, python-format msgid "The specified cluster '%s' was not found in vCenter" msgstr "Zadaný cluster '%s' nebyl nalezen ve vCenter" #, python-format msgid "The supplied device path (%(path)s) is in use." msgstr "Zadaná cesta zařízení (%(path)s) se již používá." #, python-format msgid "The supplied device path (%(path)s) is invalid." msgstr "Zadaná cesta zařízení (%(path)s) je neplatná." #, python-format msgid "" "The supplied disk path (%(path)s) already exists, it is expected not to " "exist." msgstr "Zadaná cesta disku (%(path)s) již existuje, očekává se, že nebude." msgid "The supplied hypervisor type of is invalid." msgstr "Zadaný typ hypervizoru je neplatný." msgid "The target host can't be the same one." msgstr "Cílový hostitel nemůže být ten stejný." #, python-format msgid "The token '%(token)s' is invalid or has expired" msgstr "Známka '%(token)s' je neplatná nebo vypršela" #, python-format msgid "" "The volume cannot be assigned the same device name as the root device %s" msgstr "" "Svazek nemůže být přidělen ke stejnému názvu zařízení jako kořenové zařízení " "%s" msgid "There are not enough hosts available." msgstr "Není dostatek dostupných hostitelů." #, python-format msgid "There is no such action: %s" msgstr "Žádná taková činnost: %s" #, python-format msgid "" "This compute node's hypervisor is older than the minimum supported version: " "%(version)s." msgstr "" "Hypervizor tohoto uzlu výpočtu je starší než minimální podporovaná verze: " "%(version)s." msgid "" "This method needs to be called with either networks=None and port_ids=None " "or port_ids and networks as not none." msgstr "" "Tuto metodu je třeba zavolat buď s parametry networks=None a port_ids=None " "nebo id portů a sítě nezadány jako none." #, python-format msgid "This rule already exists in group %s" msgstr "Toto pravidlo již existuje ve skupině %s" #, python-format msgid "" "This service is older (v%(thisver)i) than the minimum (v%(minver)i) version " "of the rest of the deployment. Unable to continue." msgstr "" "Tato služba je starší (v%(thisver)i) než minimální verze (v%(minver)i) ve " "zbytku nasazení. Nelze pokračovat." msgid "Timeout waiting for response from cell" msgstr "Při čekání na odpověď od buňky vypršel čas" #, python-format msgid "Timeout while checking if we can live migrate to host: %s" msgstr "" "Při kontrole možnosti přesunu za provozu na hostitele %s vypršel časový " "limit." msgid "To and From ports must be integers" msgstr "Porty Do a Od musí být celá čísla" msgid "Token not found" msgstr "Známka nenalezena" msgid "Type and Code must be integers for ICMP protocol type" msgstr "Typ a kód musí být v protokolu ICMP celá čísla" #, python-format msgid "" "Unable to associate floating IP %(address)s to any fixed IPs for instance " "%(id)s. Instance has no fixed IPv4 addresses to associate." msgstr "" "Nelze přidružit plovoucí IP adresu %(address)s k žádné z pevných IP adres " "instance %(id)s. Instance nemá žádné pevné adresy IPv4 k přidružení." #, python-format msgid "" "Unable to associate floating IP %(address)s to fixed IP %(fixed_address)s " "for instance %(id)s. Error: %(error)s" msgstr "" "Nelze přidružit plovoucí IP adresu %(address)s k pevné IP adrese " "%(fixed_address)s instance %(id)s. Chyba: %(error)s" #, python-format msgid "Unable to determine disk bus for '%s'" msgstr "Nelze zjistit řadič disku pro '%s'" #, python-format msgid "Unable to determine disk prefix for %s" msgstr "Nelze zjistit předponu disku pro %s" #, python-format msgid "Unable to find host for Instance %s" msgstr "Nelze najít hostitele pro instanci %s" msgid "Unable to find iSCSI Target" msgstr "Nelze najít cíl ISCSI" msgid "Unable to find volume" msgstr "Nelze najít svazek" msgid "Unable to get host UUID: /etc/machine-id does not exist" msgstr "Nelze získat UUID hostitele: /etc/machine-id neexistuje" msgid "Unable to get host UUID: /etc/machine-id is empty" msgstr "Nelze získat UUID hostitele: /etc/machine-id je prázdné" msgid "" "Unable to launch multiple instances with a single configured port ID. Please " "launch your instance one by one with different ports." msgstr "" "Nelze spustit více instancí s jedním nastaveným ID portu. Prosím spusťte " "vaše instance jednotlivě s odlišnými porty." #, python-format msgid "" "Unable to migrate %(instance_uuid)s to %(dest)s: Lack of memory(host:" "%(avail)s <= instance:%(mem_inst)s)" msgstr "" "Nelze přesunout %(instance_uuid)s do %(dest)s: Nedostatek paměti (hostitel:" "%(avail)s <= instance:%(mem_inst)s)" #, python-format msgid "" "Unable to migrate %(instance_uuid)s: Disk of instance is too large(available " "on destination host:%(available)s < need:%(necessary)s)" msgstr "" "Nelze přesunout %(instance_uuid)s: Disk instance je příliš velký (dostupná " "kapacita na cílovém hostiteli:%(available)s < nutná kapacita:%(necessary)s)" #, python-format msgid "" "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." msgstr "" "Nelze přesunout instanci (%(instance_id)s) na současného hostitele " "(%(host)s)." msgid "Unable to resize disk down." msgstr "Nelze zmenšit velikost disku." msgid "Unable to set password on instance" msgstr "Nelze nastavit heslo instance" msgid "Unable to shrink disk." msgstr "Nelze zmenšit disk." #, python-format msgid "Unacceptable CPU info: %(reason)s" msgstr "Nepřijatelné informace o CPU: %(reason)s" msgid "Unacceptable parameters." msgstr "Nepřijatelné parametry." #, python-format msgid "Unavailable console type %(console_type)s." msgstr "Nedostupný typ konzole %(console_type)s." msgid "" "Undefined Block Device Mapping root: BlockDeviceMappingList contains Block " "Device Mappings from multiple instances." msgstr "" "Neurčený kořen mapování blokového zařízení: BlockDeviceMappingList obsahuje " "mapování blokového zařízení z více instancí." #, python-format msgid "Unexpected aggregate action %s" msgstr "Neočekávaná činnost agregátu %s" msgid "Unexpected type adding stats" msgstr "Neočekávaný typ při přidávání statistik" #, python-format msgid "Unexpected vif_type=%s" msgstr "Neočekávaný typ vif=%s" msgid "Unknown" msgstr "Neznámé" msgid "Unknown action" msgstr "Neznámá činnost" #, python-format msgid "Unknown config drive format %(format)s. Select one of iso9660 or vfat." msgstr "" "Neznámý formát konfigurační jednotky %(format)s. Vyberte jedno z iso9660 " "nebo vfat." #, python-format msgid "Unknown delete_info type %s" msgstr "Neznámý typ mazání informací %s" #, python-format msgid "Unknown image_type=%s" msgstr "Neznámý typ obrazu=%s" #, python-format msgid "Unknown quota resources %(unknown)s." msgstr "Neznámý zdroj kvóty %(unknown)s." msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "Neznámý směr řazení, musí být 'desc' nebo 'asc'" #, python-format msgid "Unknown type: %s" msgstr "Neznámý typ: %s" msgid "Unrecognized legacy format." msgstr "Nerozpoznaný zastaralý formát." #, python-format msgid "Unrecognized read_deleted value '%s'" msgstr "Nerozpoznaná hodnota čtení smazaných '%s'" #, python-format msgid "Unrecognized value '%s' for CONF.running_deleted_instance_action" msgstr "Nerozpoznaná hodnota '%s' pro CONF.running_deleted_instance_action" #, python-format msgid "Unshelve attempted but the image %s cannot be found." msgstr "Pokus o vyskladnění, ale obraz %s nemůže být nalezen." msgid "Unsupported Content-Type" msgstr "Nepodporovaný Content-Type" #, python-format msgid "User %(username)s not found in password file." msgstr "Uživatel %(username)s nenalezen v souboru hesel." #, python-format msgid "User %(username)s not found in shadow file." msgstr "Uživatel %(username)s nenalezen ve stínovém souboru." msgid "User data needs to be valid base 64." msgstr "Uživatelská data potřebují být v platném formátu base 64." msgid "User does not have admin privileges" msgstr "Uživatel nemá správcovská oprávnění" msgid "" "Using different block_device_mapping syntaxes is not allowed in the same " "request." msgstr "" "Použití různých syntaxí mapování blokového zařízení není povoleno ve stejné " "žádosti." #, python-format msgid "" "Version %(req_ver)s is not supported by the API. Minimum is %(min_ver)s and " "maximum is %(max_ver)s." msgstr "" "Verze %(req_ver)s není API podporována. Minimální verze je %(min_ver)s a " "maximální %(max_ver)s." msgid "Virtual Interface creation failed" msgstr "Vytvoření virtuálního rozhraní selhalo" msgid "Virtual interface plugin failed" msgstr "Zásuvný modul virtuálního rozhraní selhalo" #, python-format msgid "Virtual machine mode '%(vmmode)s' is not recognised" msgstr "Uzel virtuálního stroje '%(vmmode)s' nebyl rozpoznán" #, python-format msgid "Virtual machine mode '%s' is not valid" msgstr "Režim virtuálního stroje '%s' není platný" #, python-format msgid "Virtualization type '%(virt)s' is not supported by this compute driver" msgstr "Typ virtualizace '%(virt)s' není podporováno tímto ovladačem výpočtu" #, python-format msgid "Volume %(volume_id)s could not be attached. Reason: %(reason)s" msgstr "Svazek %(volume_id)s nelze připojit. Důvod: %(reason)s" #, python-format msgid "Volume %(volume_id)s could not be found." msgstr "Svazek %(volume_id)s nemohl být nalezen." #, python-format msgid "" "Volume %(volume_id)s did not finish being created even after we waited " "%(seconds)s seconds or %(attempts)s attempts. And its status is " "%(volume_status)s." msgstr "" "Svazek %(volume_id)s nedokončil proces vytváření i po vyčkávání po dobu " "%(seconds)s vteřin nebo %(attempts)s pokusech. A jeho stav je " "%(volume_status)s." msgid "Volume does not belong to the requested instance." msgstr "Svazek nepatří do požadované instance." #, python-format msgid "" "Volume encryption is not supported for %(volume_type)s volume %(volume_id)s" msgstr "" "Šifrování svazku není podporováno v %(volume_type)s svazku %(volume_id)s" #, python-format msgid "" "Volume is smaller than the minimum size specified in image metadata. Volume " "size is %(volume_size)i bytes, minimum size is %(image_min_disk)i bytes." msgstr "" "Svazek je menší, než minimální zadaná velikost zadaná v popisných datech " "obrazu. Svazek má %(volume_size)i bajtů, minimální velikost je " "%(image_min_disk)i bajtů." #, python-format msgid "" "Volume sets block size, but the current libvirt hypervisor '%s' does not " "support custom block size" msgstr "" "Svazek nastavuje velikost bloku, ale současný hypervizor libvirt '%s' " "nepodporuje vlastní velikost bloku" msgid "When resizing, instances must change flavor!" msgstr "Při změně velikosti musí instance změnit konfiguraci!" #, python-format msgid "Wrong quota method %(method)s used on resource %(res)s" msgstr "Špatná metoda kvóty %(method)s použita na zdroj %(res)s" msgid "X-Forwarded-For is missing from request." msgstr "X-Forwarded-For v žádosti chybí" msgid "X-Instance-ID header is missing from request." msgstr "Hlavička X-Instance-ID v žádosti chybí." msgid "X-Instance-ID-Signature header is missing from request." msgstr "Hlavička X-Instance-ID-Signature v žádosti chybí." msgid "X-Metadata-Provider is missing from request." msgstr "X-Metadata-Provider v žádosti chybí." msgid "X-Tenant-ID header is missing from request." msgstr "Hlavička X-Tenant-ID v žádosti chybí." msgid "You are not allowed to delete the image." msgstr "Nemáte oprávnění smazat tento obraz." msgid "" "You are not authorized to access the image the instance was started with." msgstr "" "Nemáte oprávnění pro přístup k obrazu, z kterého byla instance spuštěna." msgid "You must implement __call__" msgstr "Musíte zavést __call__" msgid "You should specify images_rbd_pool flag to use rbd images." msgstr "Pro použití obrazů rbd byste měli zadat příznak images_rbd_pool." msgid "You should specify images_volume_group flag to use LVM images." msgstr "Pro použití obrazů LVM byste měli zadat příznak images_volume_group." msgid "Zero floating IPs available." msgstr "Je dostupných nula plovoucích IP adres." msgid "admin password can't be changed on existing disk" msgstr "heslo správce nelze měnit na existujícím disku" msgid "cannot understand JSON" msgstr "JSON nelze porozumět" msgid "clone() is not implemented" msgstr "clone() není zavedeno." #, python-format msgid "connect info: %s" msgstr "informace o připojení: %s" #, python-format msgid "connecting to: %(host)s:%(port)s" msgstr "připojování k: %(host)s:%(port)s" #, python-format msgid "disk type '%s' not supported" msgstr "typ disku '%s' není podporován" #, python-format msgid "empty project id for instance %s" msgstr "prázdné id projektu pro instanci %s" msgid "error setting admin password" msgstr "chyba při nastavování hesla správce" #, python-format msgid "error: %s" msgstr "chyba: %s" #, python-format msgid "failed to generate X509 fingerprint. Error message: %s" msgstr "nelze vytvořit otisk X509. Chybová zpráva: %s" msgid "failed to generate fingerprint" msgstr "nelze vytvořit otisk" msgid "filename cannot be None" msgstr "název souboru nemůže být None" msgid "floating IP is already associated" msgstr "Plovoucí IP adresa již přidružena." msgid "floating IP not found" msgstr "Plovoucí IP adresa nenalezena" #, python-format msgid "fmt=%(fmt)s backed by: %(backing_file)s" msgstr "fmt=%(fmt)s zálohováno: %(backing_file)s" #, python-format msgid "href %s does not contain version" msgstr "href %s neobsahuje verzi" msgid "image already mounted" msgstr "obraz již je připojen" #, python-format msgid "instance %s is not running" msgstr "Instance %s není spuštěna" msgid "instance is a required argument to use @refresh_cache" msgstr "instance je povinný argument pro použití @refresh_cache" msgid "instance is not in a suspended state" msgstr "instance není v pozastaveném stavu" msgid "instance is not powered on" msgstr "Instance není zapnuta" msgid "instance is powered off and cannot be suspended." msgstr "instance je vypnutá a nemůže být pozastavena" #, python-format msgid "instance_id %s could not be found as device id on any ports" msgstr "" "ID instance %s nemohlo být nalezeno na žádném z portů zařízení zadaného " "pomocí id" msgid "is_public must be a boolean" msgstr "is_public musí být boolean" msgid "keymgr.fixed_key not defined" msgstr "keymgr.fixed_key není zadáno" msgid "l3driver call to add floating IP failed" msgstr "Volání ovladače l3 pro přidání plovoucí IP adresy selhalo" #, python-format msgid "libguestfs installed but not usable (%s)" msgstr "libguestfs je nainstalováno ale nelze použít (%s)" #, python-format msgid "libguestfs is not installed (%s)" msgstr "libguestfs není nainstalováno (%s)" #, python-format msgid "marker [%s] not found" msgstr "značka [%s] nenalezena" msgid "max_count cannot be greater than 1 if an fixed_ip is specified." msgstr "Pokud je zadána pevná IP, maximální počet nemůže být větší než 1." msgid "min_count must be <= max_count" msgstr "min_count musí být <= max_count" #, python-format msgid "nbd device %s did not show up" msgstr "zařízení nbd %s se nezobrazilo" msgid "nbd unavailable: module not loaded" msgstr "nbd nedostupné: modul nenačten" #, python-format msgid "no match found for %s" msgstr "nebyla nalezena shoda pro %s" #, python-format msgid "not able to execute ssh command: %s" msgstr "nelze spustit příkaz ssh: %s" msgid "operation time out" msgstr "operace vypršela" #, python-format msgid "partition %s not found" msgstr "Oddíl %s nenalezen" #, python-format msgid "partition search unsupported with %s" msgstr "Hledání oddílu není podporováno v %s" msgid "pause not supported for vmwareapi" msgstr "pozastavení není v vmwareapi podporováno" #, python-format msgid "qemu-nbd error: %s" msgstr "chyba qemu-nbd: %s" msgid "rbd python libraries not found" msgstr "python knihovny rbd nenalezeny" #, python-format msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" msgstr "čtení smazaného může být buď 'no', 'yes' nebo 'only', ne %r" msgid "serve() can only be called once" msgstr "serve() mlže být voláno pouze jednou" msgid "service is a mandatory argument for DB based ServiceGroup driver" msgstr "" "service je povinný argument pro ovladače Skupiny serveru založené na databázi" msgid "service is a mandatory argument for Memcached based ServiceGroup driver" msgstr "" "service je povinný argument pro ovladače Skupiny serveru založené na " "Memcached" msgid "set_admin_password is not implemented by this driver or guest instance." msgstr "set_admin_password není tímto ovladačem nebo hostem instance zavedeno." #, python-format msgid "snapshot for %s" msgstr "snímek pro %s" msgid "snapshot_id required in create_info" msgstr "Id snímku vyžadováno při vytváření informací" msgid "token not provided" msgstr "příznak není zadán" msgid "too many body keys" msgstr "příliš mnoho klíčů těla" msgid "unpause not supported for vmwareapi" msgstr "zrušení pozastavení není v vmwareapi podporováno" #, python-format msgid "vg %s must be LVM volume group" msgstr "vg %s musí být skupina svazku LVM" #, python-format msgid "vhostuser_sock_path not present in vif_details for vif %(vif_id)s" msgstr "" "cesta soketu uživatele vhostitele není přítomna v podrobnostech vif " "%(vif_id)s" #, python-format msgid "vif type %s not supported" msgstr "vif typu %s není podporován" msgid "vif_type parameter must be present for this vif_driver implementation" msgstr "parametr vif_type musí být přítomen pro toto zavedení ovladače vif." #, python-format msgid "volume %s already attached" msgstr "svazek %s již je připojen" #, python-format msgid "" "volume '%(vol)s' status must be 'in-use'. Currently in '%(status)s' status" msgstr "" "stav svazku '%(vol)s' musí být 'in-use'. Nyní je ve stavu '%(status)s'." ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315688.889605 nova-32.0.0/nova/locale/de/0000775000175000017500000000000000000000000015354 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.3696086 nova-32.0.0/nova/locale/de/LC_MESSAGES/0000775000175000017500000000000000000000000017141 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/locale/de/LC_MESSAGES/nova.po0000664000175000017500000027724300000000000020463 0ustar00zuulzuul00000000000000# Translations template for nova. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the nova project. # # Translators: # Alec Hans , 2013 # Ettore Atalan , 2014 # FIRST AUTHOR , 2011 # iLennart21 , 2013 # Laera Loris , 2013 # matthew wagoner , 2012 # English translations for nova. # Andreas Jaeger , 2016. #zanata # Andreas Jaeger , 2019. #zanata # Andreas Jaeger , 2020. #zanata msgid "" msgstr "" "Project-Id-Version: nova VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2025-07-04 18:13+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2020-04-25 10:30+0000\n" "Last-Translator: Andreas Jaeger \n" "Language: de\n" "Language-Team: German\n" "Plural-Forms: nplurals=2; plural=(n != 1)\n" "Generated-By: Babel 2.2.0\n" "X-Generator: Zanata 4.3.3\n" #, python-format msgid "%(address)s is not a valid IP v4/6 address." msgstr "%(address)s ist keine gültige IP v4/6-Adresse." #, python-format msgid "" "%(binary)s attempted direct database access which is not allowed by policy" msgstr "" "%(binary)s hat versucht, direkt auf die Datenbank zuzugreifen, dies ist laut " "Richtlinie nicht zulässig" #, python-format msgid "%(cidr)s is not a valid IP network." msgstr "%(cidr)s ist kein gültiges IP-Netzwerk." #, python-format msgid "%(field)s should not be part of the updates." msgstr "%(field)s sollte nicht Teil der Aktualisierung sein." #, python-format msgid "%(memsize)d MB of memory assigned, but expected %(memtotal)d MB" msgstr "" "%(memsize)d MB Speicher zugewiesen, erwartet werden jedoch %(memtotal)d MB" #, python-format msgid "%(path)s is not on local storage: %(reason)s" msgstr "%(path)s ist nicht auf dem lokalen Speicher: %(reason)s" #, python-format msgid "%(path)s is not on shared storage: %(reason)s" msgstr "%(path)s ist nicht auf dem gemeinsamen Speicher: %(reason)s" #, python-format msgid "%(total)i rows matched query %(meth)s, %(done)i migrated" msgstr "" "%(total)i Zeilen stimmten mit der Abfrage %(meth)s überein. %(done)i wurden " "migriert." #, python-format msgid "%(type)s hypervisor does not support PCI devices" msgstr "%(type)s Hypervisor unterstützt PCI Gerät nicht" #, python-format msgid "%s does not support disk hotplug." msgstr "%s unterstützt kein Anschließen von Platten im laufenden Betrieb." #, python-format msgid "%s format is not supported" msgstr "%s-Format wird nicht unterstützt" #, python-format msgid "%s is not supported." msgstr "%s wird nicht unterstützt." #, python-format msgid "%s must be either 'MANUAL' or 'AUTO'." msgstr "%s muss entweder 'MANUAL' oder 'AUTO' sein." #, python-format msgid "'%(other)s' should be an instance of '%(cls)s'" msgstr "'%(other)s' sollte keine Instanz sein von '%(cls)s'" msgid "'qemu-img info' parsing failed." msgstr "Auswertung von 'qemu-img info' fehlgeschlagen." #, python-format msgid "'rxtx_factor' argument must be a float between 0 and %g" msgstr "Argument 'rxtx_factor' muss eine Gleitkommazahl zwischen 0 und %g sein" #, python-format msgid "A NetworkModel is required in field %s" msgstr "Ein NetworkModel ist erforderlich im Feld %s" #, python-format msgid "" "API Version String %(version)s is of invalid format. Must be of format " "MajorNum.MinorNum." msgstr "" "API Versionszeichenkette %(version)s ist im falschen Format. Muss im Format " "sein MajorNum.MinorNum." #, python-format msgid "API version %(version)s is not supported on this method." msgstr "API Version %(version)s ist nicht unterstützt für diese Methode." msgid "Access list not available for public flavors." msgstr "Zugriffsliste ist für öffentliche Versionen nicht verfügbar. " #, python-format msgid "Action %s not found" msgstr "Aktion %s nicht gefunden" #, python-format msgid "" "Action for request_id %(request_id)s on instance %(instance_uuid)s not found" msgstr "" "Aktion für request_id '%(request_id)s' für Instanz '%(instance_uuid)s' nicht " "gefunden" #, python-format msgid "Action: '%(action)s', calling method: %(meth)s, body: %(body)s" msgstr "Aktion '%(action)s', Aufrufmethode %(meth)s, Hauptteil %(body)s" #, python-format msgid "Add metadata failed for aggregate %(id)s after %(retries)s retries" msgstr "" "Fehler beim Hinzufügen von Metadaten für Aggregat %(id)s nach %(retries)s " "Wiederholungen" #, python-format msgid "Aggregate %(aggregate_id)s already has host %(host)s." msgstr "Aggregate %(aggregate_id)s hat bereits einen Host %(host)s." #, python-format msgid "Aggregate %(aggregate_id)s could not be found." msgstr "Aggregat %(aggregate_id)s konnte nicht gefunden werden." #, python-format msgid "Aggregate %(aggregate_id)s has no host %(host)s." msgstr "Aggregat %(aggregate_id)s hat keinen Host %(host)s." #, python-format msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." msgstr "" "Aggregat %(aggregate_id)s enthält keine Metadaten mit Schlüssel " "%(metadata_key)s." #, python-format msgid "Aggregate %(aggregate_name)s already exists." msgstr "Aggregat %(aggregate_name)s ist bereits vorhanden." #, python-format msgid "Aggregate %s does not support empty named availability zone" msgstr "Aggregat %s unterstützt keine leeren, bezeichneten Verfügbarkeitszonen" #, python-format msgid "An invalid 'name' value was provided. The name must be: %(reason)s" msgstr "" "Es wurde ein ungültiger 'name'-Wert angegeben. Der Name muss lauten: " "%(reason)s" msgid "An unknown error has occurred. Please try your request again." msgstr "" "Ein unbekannter Fehler ist aufgetreten. Stellen Sie Ihre Anforderung erneut." msgid "An unknown exception occurred." msgstr "Eine unbekannte Ausnahme ist aufgetreten." #, python-format msgid "Architecture name '%(arch)s' is not recognised" msgstr "Architekturname '%(arch)s' wird nicht erkannt" #, python-format msgid "Architecture name '%s' is not valid" msgstr "Architekturname '%s' ist nicht gültig" #, python-format msgid "" "Attempt to consume PCI device %(compute_node_id)s:%(address)s from empty pool" msgstr "" "Versuche PCI Gerät %(compute_node_id)s:%(address)s vom leeren Pool zu " "beziehen" msgid "Attempted overwrite of an existing value." msgstr "Versuchte Überschreibung eines vorhandenen Wertes." #, python-format msgid "Attribute not supported: %(attr)s" msgstr "Attribut nicht unterstützt: %(attr)s" msgid "Bad Request - Invalid Parameters" msgstr "Fehlerhafte Anfrage - Ungültige Parameter" #, python-format msgid "Bad network format: missing %s" msgstr "Falsches Netzwerkformat: fehlendes %s" msgid "Bad networks format" msgstr "Falsches Netzwerkformat" #, python-format msgid "Bad networks format: network uuid is not in proper format (%s)" msgstr "Ungültiges Netzformat: Netz-UUID ist nicht im richtigen Format (%s)" #, python-format msgid "Bad prefix for network in cidr %s" msgstr "Falsches Präfix für Netz in CIDR %s" #, python-format msgid "" "Binding failed for port %(port_id)s, please check neutron logs for more " "information." msgstr "" "Binden fehlgeschlagen für Port %(port_id)s, bitte überprüfen Sie die Neutron " "Logs für mehr Informationen." msgid "Blank components" msgstr "Leere Komponenten" msgid "" "Blank volumes (source: 'blank', dest: 'volume') need to have non-zero size" msgstr "" "Leerer Datenträger (source: 'blank', dest: 'volume') muss eine Nicht-Null-" "Größe haben" #, python-format msgid "Block Device %(id)s is not bootable." msgstr "Blockgerät %(id)s ist nicht bootfähig." #, python-format msgid "" "Block Device Mapping %(volume_id)s is a multi-attach volume and is not valid " "for this operation." msgstr "" "Die Blockgerätezuordnung %(volume_id)s ist ein Datenträger mit mehreren " "Zuordnungen und für diese Operation nicht zulässig." msgid "Block Device Mapping cannot be converted to legacy format. " msgstr "" "Block-Geräte-Zuordnung kann nicht zu einem legalen Format konvertiert werden." msgid "Block Device Mapping is Invalid." msgstr "Block-Geräte-Zuordnung ist ungültig." #, python-format msgid "Block Device Mapping is Invalid: %(details)s" msgstr "Block-Geräte-Zuordnung ist ungültig; %(details)s" msgid "" "Block Device Mapping is Invalid: Boot sequence for the instance and image/" "block device mapping combination is not valid." msgstr "" "Blockgerätezuordnung ist ungültig; Bootsequenz der Instanz und Abbild/Block-" "Geräte-Kombination ist ungültig." msgid "" "Block Device Mapping is Invalid: You specified more local devices than the " "limit allows" msgstr "" "Blockgerätezuordnung ist ungültig; Sie haben mehr lokale Geräte " "spezifiziert, als das Limit erlaubt." #, python-format msgid "Block Device Mapping is Invalid: failed to get image %(id)s." msgstr "" "Blockgerätezuordnung ist ungültig; auslesen des Abbild %(id)s fehlgeschlagen." #, python-format msgid "Block Device Mapping is Invalid: failed to get snapshot %(id)s." msgstr "" "Block-Geräte-Zuordnung ist ungültig; auslesen der Schattenkopie %(id)s " "fehlgeschlagen." #, python-format msgid "Block Device Mapping is Invalid: failed to get volume %(id)s." msgstr "" "Blockgerätezuordnung ist ungültig; auslesen der Datenträger %(id)s " "fehlgeschlagen." msgid "Block migration can not be used with shared storage." msgstr "" "Blockmigration kann nicht mit gemeinsam genutztem Speicher verwendet werden." msgid "Boot index is invalid." msgstr "Bootindex ist ungültig." #, python-format msgid "Build of instance %(instance_uuid)s aborted: %(reason)s" msgstr "Build der Instanz %(instance_uuid)s abgebrochen: %(reason)s" #, python-format msgid "Build of instance %(instance_uuid)s was re-scheduled: %(reason)s" msgstr "Build der Instanz %(instance_uuid)s neu geplant: %(reason)s" #, python-format msgid "BuildRequest not found for instance %(uuid)s" msgstr "BuildRequest für Instanz %(uuid)s nicht gefunden." msgid "CPU and memory allocation must be provided for all NUMA nodes" msgstr "" "CPU- und Speicherzuordnung müssen für alle NUMA-Knoten angegeben werden" #, python-format msgid "" "CPU doesn't have compatibility.\n" "\n" "%(ret)s\n" "\n" "Refer to %(u)s" msgstr "" "CPU ist nicht kompatibel.\n" "\n" "%(ret)s\n" "\n" "Siehe %(u)s" #, python-format msgid "CPU number %(cpunum)d is assigned to two nodes" msgstr "CPU-Nummer %(cpunum)d ist zwei Knoten zugeordnet" #, python-format msgid "CPU number %(cpunum)d is larger than max %(cpumax)d" msgstr "CPU-Nummer %(cpunum)d ist größer als das Maximum %(cpumax)d" #, python-format msgid "CPU number %(cpuset)s is not assigned to any node" msgstr "CPU-Nummer %(cpuset)s ist keinem Knoten zugeordnet" msgid "Can not add access to a public flavor." msgstr "Kann keinen Zugriff auf eine öffentliche Version herstellen. " msgid "Can not find requested image" msgstr "Angefordertes Image kann nicht gefunden werden" #, python-format msgid "Can not handle authentication request for %d credentials" msgstr "" "Authentifizierungsanforderung für %d-Berechtigungsnachweis kann nicht " "verarbeitet werden" msgid "Can't retrieve root device path from instance libvirt configuration" msgstr "" "Stammdatenträgerpfad kann nicht aus libvirt-Konfiguration der Instanz " "abgerufen werden" #, python-format msgid "" "Cannot '%(action)s' instance %(server_id)s while it is in %(attr)s %(state)s" msgstr "" "'%(action)s' für Instanz %(server_id)s nicht möglich, während sie sich in " "%(attr)s %(state)s befindet" #, python-format msgid "Cannot add host to aggregate %(aggregate_id)s. Reason: %(reason)s." msgstr "" "Host kann nicht zu Aggregat %(aggregate_id)s hinzugefügt werden. Ursache: " "%(reason)s." msgid "Cannot attach one or more volumes to multiple instances" msgstr "" "Ein oder mehrere Datenträger können nicht an mehrere Instanzen angehängt " "werden" #, python-format msgid "Cannot call %(method)s on orphaned %(objtype)s object" msgstr "" "%(method)s kann nicht für ein verwaistes %(objtype)s-Objekt aufgerufen werden" #, python-format msgid "" "Cannot determine the parent storage pool for %s; cannot determine where to " "store images" msgstr "" "Übergeordneter Speicherpool für %s wurde nicht erkannt. Der Speicherort für " "Abbilder kann nicht ermittelt werden." msgid "Cannot find image for rebuild" msgstr "Image für Wiederherstellung kann nicht gefunden werden" #, python-format msgid "Cannot remove host %(host)s in aggregate %(id)s" msgstr "Host %(host)s in Aggregat %(id)s kann nicht entfernt werden" #, python-format msgid "Cannot remove host from aggregate %(aggregate_id)s. Reason: %(reason)s." msgstr "" "Host kann nicht von Aggregat %(aggregate_id)s entfernt werden. Ursache: " "%(reason)s." msgid "Cannot rescue a volume-backed instance" msgstr "Eine Instanz vom Typ 'volume-backed' kann nicht gesichert werden" msgid "" "Cannot set cpu thread pinning policy in a non dedicated cpu pinning policy" msgstr "" "In einer nicht dedizierten CPU-Pinning-Richtlinie kann keine CPU-Thread-" "Pinning-Richtlinie definiert werden." msgid "Cannot set realtime policy in a non dedicated cpu pinning policy" msgstr "" "In einer nicht dedizierten CPU-Pinning-Richtlinie kann keine " "Echtzeitrichtlinie definiert werden." #, python-format msgid "Cannot update aggregate %(aggregate_id)s. Reason: %(reason)s." msgstr "" "Aggregat %(aggregate_id)s kann nicht aktualisiert werden. Ursache: " "%(reason)s." #, python-format msgid "" "Cannot update metadata of aggregate %(aggregate_id)s. Reason: %(reason)s." msgstr "" "Metadaten von Aggregat %(aggregate_id)s können nicht aktualisiert werden. " "Ursache: %(reason)s." #, python-format msgid "Cell %(uuid)s has no mapping." msgstr "Zelle %(uuid)s hat keine Zuordnung." #, python-format msgid "" "Change would make usage less than 0 for the following resources: %(unders)s" msgstr "" "Durch die Änderung wäre die Nutzung kleiner als 0 für die folgenden " "Ressourcen: %(unders)s" #, python-format msgid "Cinder API version %(version)s is not available." msgstr "Die Cinder API-Version %(version)s ist nicht verfügbar." #, python-format msgid "Class %(class_name)s could not be found: %(exception)s" msgstr "Klasse %(class_name)s konnte nicht gefunden werden: %(exception)s" #, python-format msgid "Compute host %(host)s could not be found." msgstr "Rechenhost %(host)s konnte nicht gefunden werden." #, python-format msgid "Compute host %s not found." msgstr "Rechenhost %s nicht gefunden." #, python-format msgid "Compute service of %(host)s is still in use." msgstr "Compute Dienst von %(host)s wird noch verwendet." #, python-format msgid "Compute service of %(host)s is unavailable at this time." msgstr "Rechenservice von %(host)s ist derzeit nicht verfügbar." #, python-format msgid "Config drive format '%(format)s' is not supported." msgstr "" "Das Konfigurationslaufwerksformat '%(format)s' wird nicht unterstützt. " #, python-format msgid "" "Config requested an explicit CPU model, but the current libvirt hypervisor " "'%s' does not support selecting CPU models" msgstr "" "Config hat ein explizites CPU-Modell angefordert, aber der aktuelle libvirt-" "Hypervisor '%s' unterstützt nicht die Auswahl von CPU-Modellen" msgid "Configuration is Invalid." msgstr "Konfiguration ist ungültig." #, python-format msgid "" "Conflict updating instance %(instance_uuid)s, but we were unable to " "determine the cause" msgstr "" "Konflikt beim Aktualisieren der Instanz %(instance_uuid)s, aber wir waren " "nicht in der Lage den Grund herauszufinden" #, python-format msgid "" "Conflict updating instance %(instance_uuid)s. Expected: %(expected)s. " "Actual: %(actual)s" msgstr "" "Konflikt beim Aktualisieren der Instanz %(instance_uuid)s. Erwartet: " "%(expected)s. Aktuell: %(actual)s" #, python-format msgid "Connection to cinder host failed: %(reason)s" msgstr "Verbindung zu Cinder Host fehlgeschlagen: %(reason)s" #, python-format msgid "Connection to glance host %(server)s failed: %(reason)s" msgstr "Verbindung zu Glance-Host %(server)s fehlgeschlagen: %(reason)s" #, python-format msgid "Connection to libvirt lost: %s" msgstr "Verbindung zu libvirt verloren: %s" #, python-format msgid "" "Console log output could not be retrieved for instance %(instance_id)s. " "Reason: %(reason)s" msgstr "" "Die Ausgabe des Konsolenprotokolls konnte für die Instanz %(instance_id)s " "nicht abgerufen werden. Ursache: %(reason)s" msgid "Constraint not met." msgstr "Bedingung nicht erfüllt." #, python-format msgid "Converted to raw, but format is now %s" msgstr "In unformatierten Zustand konvertiert, Format ist nun jedoch %s" #, python-format msgid "Could not attach image to loopback: %s" msgstr "Image konnte nicht an Loopback angehängt werden: %s" #, python-format msgid "Could not fetch image %(image_id)s" msgstr "Abbild %(image_id)s konnte nicht abgerufen werden" #, python-format msgid "Could not find a handler for %(driver_type)s volume." msgstr "Konnte keinen Handler finden für %(driver_type)s Datenträger." #, python-format msgid "Could not find binary %(binary)s on host %(host)s." msgstr "" "Binärprogramm %(binary)s auf Host %(host)s konnte nicht gefunden werden." #, python-format msgid "Could not find config at %(path)s" msgstr "Konfiguration konnte unter %(path)s nicht gefunden werden" msgid "Could not find the datastore reference(s) which the VM uses." msgstr "" "Die von der VM verwendeten Datenspeicherverweise konnten nicht gefunden " "werden." #, python-format msgid "Could not load line %(line)s, got error %(error)s" msgstr "" "Zeile %(line)s konnte nicht geladen werden, Fehler %(error)s ist aufgetreten" #, python-format msgid "Could not load paste app '%(name)s' from %(path)s" msgstr "paste-App '%(name)s' konnte von %(path)s nicht geladen werden" #, python-format msgid "" "Could not mount vfat config drive. %(operation)s failed. Error: %(error)s" msgstr "" "VFAT-Konfigurationslaufwerk konnte nicht angehängt werden. %(operation)s " "fehlgeschlagen. Fehler: %(error)s" #, python-format msgid "Could not upload image %(image_id)s" msgstr "Abbild %(image_id)s konnte nicht hochgeladen werden" msgid "Creation of virtual interface with unique mac address failed" msgstr "" "Erstellung der virtuellen Schnittstelle mit eindeutiger MAC-Adresse " "fehlgeschlagen." #, python-format msgid "Datastore regex %s did not match any datastores" msgstr "Datenspeicher-regex %s stimmt mit keinem Datenspeicher überein" msgid "Datetime is in invalid format" msgstr "Datum/Uhrzeit hat ein ungültiges Format" msgid "Default PBM policy is required if PBM is enabled." msgstr "Standard-PBM-Richtlinie ist erforderlich, wenn PBM aktiviert ist." #, python-format msgid "Device '%(device)s' not found." msgstr "Das Gerät '%(device)s' wurde nicht gefunden." #, python-format msgid "Device detach failed for %(device)s: %(reason)s" msgstr "Abhängen des Geräts fehlgeschlagen für %(device)s: %(reason)s" msgid "Device name contains spaces." msgstr "Gerätename enthält Leerzeichen." msgid "Device name empty or too long." msgstr "Gerätename leer oder zu lang." #, python-format msgid "Device type mismatch for alias '%s'" msgstr "Gerätetypabweichung für Alias '%s'" #, python-format msgid "Disk format %(disk_format)s is not acceptable" msgstr "Datenträgerformat %(disk_format)s ist nicht zulässig" #, python-format msgid "Disk info file is invalid: %(reason)s" msgstr "Datei mit Datenträgerinformationen ist ungültig: %(reason)s" #, python-format msgid "Driver Error: %s" msgstr "Treiberfehler: %s" #, python-format msgid "Error attempting to run %(method)s" msgstr "Fehler bei dem Versuch, %(method)s auszuführen." #, python-format msgid "" "Error destroying the instance on node %(node)s. Provision state still " "'%(state)s'." msgstr "" "Fehler beim Löschen der Instanz auf Knoten %(node)s. Bereitstellungsstatus " "ist noch '%(state)s'." #, python-format msgid "Error during unshelve instance %(instance_id)s: %(reason)s" msgstr "Fehler beim Aufnehmen von Instanz %(instance_id)s: %(reason)s" #, python-format msgid "" "Error from libvirt while getting domain info for %(instance_name)s: [Error " "Code %(error_code)s] %(ex)s" msgstr "" "Fehler von libvirt beim Abrufen der Domäneninformationen für " "%(instance_name)s: [Fehlercode %(error_code)s] %(ex)s" #, python-format msgid "" "Error from libvirt while looking up %(instance_name)s: [Error Code " "%(error_code)s] %(ex)s" msgstr "" "Fehler von libvirt während Suche nach %(instance_name)s: [Fehlercode " "%(error_code)s] %(ex)s" #, python-format msgid "" "Error from libvirt while quiescing %(instance_name)s: [Error Code " "%(error_code)s] %(ex)s" msgstr "" "Fehler von libvirt beim Versetzen in den Quiescemodus %(instance_name)s: " "[Fehlercode %(error_code)s] %(ex)s" #, python-format msgid "" "Error from libvirt while set password for username \"%(user)s\": [Error Code " "%(error_code)s] %(ex)s" msgstr "" "Fehler von libvirt beim Setzen des Passworts für Benutzername \"%(user)s\": " "[Fehlercode %(error_code)s] %(ex)s" #, python-format msgid "" "Error mounting %(device)s to %(dir)s in image %(image)s with libguestfs " "(%(e)s)" msgstr "" "Fehler beim Einhängen %(device)s zu %(dir)s in Abbild %(image)s mit " "libguestfs (%(e)s)" #, python-format msgid "Error mounting %(image)s with libguestfs (%(e)s)" msgstr "Fehler beim Einhängen %(image)s mit libguestfs (%(e)s)" #, python-format msgid "Error when creating resource monitor: %(monitor)s" msgstr "Fehler beim Erstellen von Ressourcenüberwachung: %(monitor)s" #, python-format msgid "Event %(event)s not found for action id %(action_id)s" msgstr "Ereignis %(event)s nicht gefunden für Aktions-ID '%(action_id)s'" msgid "Event must be an instance of nova.virt.event.Event" msgstr "Ereignis muss eine Instanz von 'nova.virt.event.Event' sein" #, python-format msgid "" "Exceeded max scheduling attempts %(max_attempts)d for instance " "%(instance_uuid)s. Last exception: %(exc_reason)s" msgstr "" "Maximale Anzahl %(max_attempts)d an Planungsversuchen für die Instanz " "%(instance_uuid)s überschritten. Letzte Ausnahme: %(exc_reason)s" #, python-format msgid "" "Exceeded max scheduling retries %(max_retries)d for instance " "%(instance_uuid)s during live migration" msgstr "" "Maximale Anzahl %(max_retries)d Planungswiederholungen überschritten für " "Instanz %(instance_uuid)s während Livemigration" #, python-format msgid "Exceeded maximum number of retries. %(reason)s" msgstr "Maximale Anzahl der WIederholungen überschritten. %(reason)s" #, python-format msgid "Expected a uuid but received %(uuid)s." msgstr "UUID erwartet, aber %(uuid)s erhalten." msgid "Extracting vmdk from OVA failed." msgstr "Extraktion von vmdk aus OVA fehlgeschlagen." #, python-format msgid "Failed to access port %(port_id)s: %(reason)s" msgstr "Zugriff auf Port %(port_id)s fehlgeschlagen: %(reason)s" #, python-format msgid "Failed to allocate the network(s) with error %s, not rescheduling." msgstr "" "Zuordnung von Netz(en) fehlgeschlagen mit Fehler %s; keine Neuterminierung." msgid "Failed to allocate the network(s), not rescheduling." msgstr "Netz(e) konnte(n) nicht zugeordnet werden; keine Neuterminierung." #, python-format msgid "Failed to attach network adapter device to %(instance_uuid)s" msgstr "" "Anhängen des Netzwerkgeräteadapters an %(instance_uuid)s fehlgeschlagen" #, python-format msgid "Failed to deploy instance: %(reason)s" msgstr "Instanz konnte nicht implementiert werden: %(reason)s" #, python-format msgid "Failed to detach PCI device %(dev)s: %(reason)s" msgstr "Abhängen des PCI Gerätes %(dev)s fehlgeschlagen: %(reason)s" #, python-format msgid "Failed to detach network adapter device from %(instance_uuid)s" msgstr "" "Abhängen des Netzwerkgeräteadapters von %(instance_uuid)s fehlgeschlagen" #, python-format msgid "Failed to encrypt text: %(reason)s" msgstr "Fehler beim Verschlüsseln des Textes: %(reason)s" #, python-format msgid "Failed to launch instances: %(reason)s" msgstr "Fehler beim Starten der Instanz: %(reason)s" #, python-format msgid "Failed to map partitions: %s" msgstr "Partitionen konnten nicht zugeordnet werden: %s" #, python-format msgid "Failed to mount filesystem: %s" msgstr "Fehler beim Anhängen von Dateisystem: %s" #, python-format msgid "Failed to power off instance: %(reason)s" msgstr "Instanz konnte nicht ausgeschaltet werden: %(reason)s" #, python-format msgid "Failed to power on instance: %(reason)s" msgstr "Instanz konnte nicht eingeschaltet werden: %(reason)s" #, python-format msgid "Failed to provision instance %(inst)s: %(reason)s" msgstr "Fehler beim Bereitstellen der Instanz %(inst)s: %(reason)s" #, python-format msgid "Failed to read or write disk info file: %(reason)s" msgstr "" "Datei mit Datenträgerinformationen konnte nicht gelesen oder geschrieben " "werden: %(reason)s" #, python-format msgid "Failed to reboot instance: %(reason)s" msgstr "Instanz konnte nicht neu gestartet werden: %(reason)s" #, python-format msgid "Failed to remove volume(s): (%(reason)s)" msgstr "Löschen Datenträger fehlgeschlagen: (%(reason)s)" #, python-format msgid "Failed to request Ironic to rebuild instance %(inst)s: %(reason)s" msgstr "" "Neuerstellung von Instanz %(inst)s konnte nicht bei Ironic angefordert " "werden: %(reason)s" #, python-format msgid "Failed to resume instance: %(reason)s" msgstr "Instanz konnte nicht wiederaufgenommen werden: %(reason)s" #, python-format msgid "Failed to run qemu-img info on %(path)s : %(error)s" msgstr "qemu-img info konnte nicht ausgeführt werden für %(path)s : %(error)s" #, python-format msgid "Failed to set admin password on %(instance)s because %(reason)s" msgstr "" "Administratorkennwort für %(instance)s konnte nicht festgelegt werden " "aufgrund von %(reason)s" #, python-format msgid "Failed to suspend instance: %(reason)s" msgstr "Instanz konnte nicht ausgesetzt werden: %(reason)s" #, python-format msgid "Failed to terminate instance: %(reason)s" msgstr "Instanz konnte nicht beendet werden: %(reason)s" #, python-format msgid "Failed to unplug virtual interface: %(reason)s" msgstr "Virtuelle Schnittstelle konnte nicht entfernt werden: %(reason)s" msgid "Failure prepping block device." msgstr "Fehler beim Vorbereiten des Block-Gerätes." #, python-format msgid "File %(file_path)s could not be found." msgstr "Datei %(file_path)s konnte nicht gefunden werden." #, python-format msgid "Fixed IP %(ip)s is not a valid ip address for network %(network_id)s." msgstr "" "Feste IP %(ip)s ist keine gültige IP Adresse für Netzwerk %(network_id)s." #, python-format msgid "Fixed IP %s is already in use." msgstr "Feste IP %s wird bereits verwendet." #, python-format msgid "" "Fixed IP address %(address)s is already in use on instance %(instance_uuid)s." msgstr "" "Statische IP-Adresse %(address)s wird bereits verwendet in Instanz " "%(instance_uuid)s." #, python-format msgid "Fixed IP not found for address %(address)s." msgstr "Keine feste IP für Adresse %(address)s gefunden." #, python-format msgid "Flavor %(flavor_id)s could not be found." msgstr "Version %(flavor_id)s konnte nicht gefunden werden." #, python-format msgid "Flavor %(flavor_id)s has no extra specs with key %(extra_specs_key)s." msgstr "" "Version %(flavor_id)s hat keine Sonderspezifikationen mit Schlüssel " "%(extra_specs_key)s." #, python-format msgid "Flavor %(flavor_id)s has no extra specs with key %(key)s." msgstr "" "Version %(flavor_id)s hat keine Sonderspezifikationen mit Schlüssel %(key)s." #, python-format msgid "" "Flavor %(id)s extra spec cannot be updated or created after %(retries)d " "retries." msgstr "" "Zusätzliche Spezifikation für die Variante %(id)s kann nach %(retries)d " "Neuversuchen nicht aktualisiert oder erstellt werden." #, python-format msgid "" "Flavor access already exists for flavor %(flavor_id)s and project " "%(project_id)s combination." msgstr "" "Versionszugriff bereits vorhanden für Kombination aus Version %(flavor_id)s " "und Projekt %(project_id)s." #, python-format msgid "Flavor access not found for %(flavor_id)s / %(project_id)s combination." msgstr "" "Versionszugriff nicht gefunden für Kombination aus %(flavor_id)s und " "%(project_id)s." msgid "Flavor used by the instance could not be found." msgstr "Die von der Instanz verwendete Version konnte nicht gefunden werden. " #, python-format msgid "Flavor with ID %(flavor_id)s already exists." msgstr "Version mit ID '%(flavor_id)s' ist bereits vorhanden." #, python-format msgid "Flavor with name %(flavor_name)s could not be found." msgstr "Version mit dem Namen %(flavor_name)s konnte nicht gefunden werden." #, python-format msgid "Flavor with name %(name)s already exists." msgstr "Version mit dem Namen '%(name)s' ist bereits vorhanden." #, python-format msgid "" "Flavor's disk is smaller than the minimum size specified in image metadata. " "Flavor disk is %(flavor_size)i bytes, minimum size is %(image_min_disk)i " "bytes." msgstr "" "Variante des Datenträgers ist kleiner als die kleinste Größe der " "spezifizierten Abbild-Metadaten. Variante des Datenträgers ist " "%(flavor_size)i Bytes, kleinste Größe ist %(image_min_disk)i Bytes." #, python-format msgid "" "Flavor's disk is too small for requested image. Flavor disk is " "%(flavor_size)i bytes, image is %(image_size)i bytes." msgstr "" "Variante des Datenträgers ist zu klein für das angefragte Abbild. Variante " "des Datenträgers ist %(flavor_size)i Bytes, Abbild ist %(image_size)i Bytes." msgid "Flavor's memory is too small for requested image." msgstr "Speicher der Version ist zu klein für angefordertes Image." #, python-format msgid "Floating IP %(address)s association has failed." msgstr "Verknüpfung der Floating IP %(address)s ist fehlgeschlagen." #, python-format msgid "Floating IP %(address)s is associated." msgstr "Floating IP %(address)s ist zugeordnet." #, python-format msgid "Floating IP %(address)s is not associated with instance %(id)s." msgstr "Floating IP %(address)s ist Instanz %(id)s nicht zugeordnet." #, python-format msgid "Floating IP not found for ID %(id)s." msgstr "Floating IP für ID %(id)s nicht gefunden." #, python-format msgid "Floating IP not found for ID %s" msgstr "Floating IP für ID %s nicht gefunden." #, python-format msgid "Floating IP not found for address %(address)s." msgstr "Floating IP für Adresse %(address)s nicht gefunden." msgid "Floating IP pool not found." msgstr "Pool mit Floating IPs nicht gefunden." msgid "Forbidden" msgstr "Verboten" msgid "" "Forbidden to exceed flavor value of number of serial ports passed in image " "meta." msgstr "" "Es nicht zulässig, den Versionswert der Anzahl der seriellen Ports, die in " "den Imagemetadaten übergeben werden, zu überschreiten. " msgid "Found no disk to snapshot." msgstr "Es wurde keine Platte für eine Momentaufnahme gefunden." msgid "Guest does not have a console available." msgstr "Für Gast ist keine Konsole verfügbar." #, python-format msgid "Host %(host)s could not be found." msgstr "Host %(host)s konnte nicht gefunden werden." #, python-format msgid "Host %(host)s is already mapped to cell %(uuid)s" msgstr "Der Host %(host)s ist bereits der Zelle %(uuid)s zugeorndet." #, python-format msgid "Host '%(name)s' is not mapped to any cell" msgstr "Host '%(name)s' wird nicht abgebildet in irgendeiner Zelle" msgid "Host aggregate is not empty" msgstr "Hostaggregat ist nicht leer" msgid "Host does not support guests with NUMA topology set" msgstr "Host unterstützt keine Gäste mit NUMA-Topologiegruppe" msgid "Host does not support guests with custom memory page sizes" msgstr "" "Host unterstützt keine Gäste mit benutzerdefinierter Speicherseitengröße" msgid "Hypervisor driver does not support post_live_migration_at_source method" msgstr "" "Hypervisortreiber unterstützt die Methode post_live_migration_at_source nicht" #, python-format msgid "Hypervisor virt type '%s' is not valid" msgstr "Der Hypervisor-Virtualisierungstype '%s' ist nicht gültig" #, python-format msgid "Hypervisor virtualization type '%(hv_type)s' is not recognised" msgstr "Der Hypervisor-Virtualisierungstyp '%(hv_type)s' wird nicht erkannt" #, python-format msgid "Hypervisor with ID '%s' could not be found." msgstr "Hypervisor mit ID '%s' konnte nicht gefunden werden. " #, python-format msgid "IP allocation over quota in pool %s." msgstr "IP-Zuordnung über Quote in Pool %s." msgid "IP allocation over quota." msgstr "IP-Zuordnung über Quote." #, python-format msgid "Image %(image_id)s could not be found." msgstr "Abbild %(image_id)s konnte nicht gefunden werden." #, python-format msgid "Image %(image_id)s is not active." msgstr "Abbild %(image_id)s ist nicht aktiv." #, python-format msgid "Image %(image_id)s is unacceptable: %(reason)s" msgstr "Image %(image_id)s ist nicht zulässig: %(reason)s" msgid "Image disk size greater than requested disk size" msgstr "" "Abbildfestplattengröße ist größer als die angeforderte Festplattengröße" msgid "Image is not raw format" msgstr "Abbild ist kein Rohformat" msgid "Image metadata limit exceeded" msgstr "Grenzwert für Imagemetadaten überschritten" #, python-format msgid "Image model '%(image)s' is not supported" msgstr "Abbild-Modell '%(image)s' wird nicht unterstützt" msgid "Image not found." msgstr "Abbild nicht gefunden." #, python-format msgid "" "Image property '%(name)s' is not permitted to override NUMA configuration " "set against the flavor" msgstr "" "Imageeigenschaft '%(name)s' darf die NUMA-Konfiguration für die Version " "nicht überschreiben" msgid "" "Image property 'hw_cpu_policy' is not permitted to override CPU pinning " "policy set against the flavor" msgstr "" "Image-Eigenschaft 'hw_cpu_policy' darf die CPU-Pinn-Richtlinie, die für die " "Version festgelegt wurde, nicht überschreiben" msgid "" "Image property 'hw_cpu_thread_policy' is not permitted to override CPU " "thread pinning policy set against the flavor" msgstr "" "Die Abbildeigenschaft 'hw_cpu_thread_policy' darf die CPU-Thread-Pinning-" "Richtlinie, die für die Variante festgelegt wurde, nicht überschreiben." msgid "Image that the instance was started with could not be found." msgstr "" "Image, mit dem die Instanz gestartet wurde, konnte nicht gefunden werden." #, python-format msgid "Image's config drive option '%(config_drive)s' is invalid" msgstr "" "Die Konfigurationslaufwerkoption '%(config_drive)s' des Image ist ungültig" msgid "" "Images with destination_type 'volume' need to have a non-zero size specified" msgstr "" "Für Images mit destination_type 'volume' muss eine Größe ungleich null " "angegeben werden" msgid "In ERROR state" msgstr "Im FEHLER-Zustand" #, python-format msgid "In states %(vm_state)s/%(task_state)s, not RESIZED/None" msgstr "In den Status %(vm_state)s/%(task_state)s, nicht RESIZED/None" #, python-format msgid "In-progress live migration %(id)s is not found for server %(uuid)s." msgstr "" "Die in Bearbeitung befindliche Livemigration %(id)s wurde für den Server " "%(uuid)s nicht gefunden." msgid "" "Incompatible settings: ephemeral storage encryption is supported only for " "LVM images." msgstr "" "Inkompatible Einstellungen: Verschlüsselung für ephemeren Speicher wird nur " "unterstützt für LVM-Images. " #, python-format msgid "Info cache for instance %(instance_uuid)s could not be found." msgstr "Infocache für Instanz %(instance_uuid)s konnte nicht gefunden werden." #, python-format msgid "" "Instance %(instance)s and volume %(vol)s are not in the same " "availability_zone. Instance is in %(ins_zone)s. Volume is in %(vol_zone)s" msgstr "" "Instanz %(instance)s und Datenträger %(vol)s befinden sich nicht in " "derselben availability_zone. Die Instanz befindet sich in %(ins_zone)s. Der " "Datenträger befindet sich in %(vol_zone)s" #, python-format msgid "Instance %(instance)s does not have a port with id %(port)s" msgstr "Instanz %(instance)s weist keinen Port mit ID %(port)s auf" #, python-format msgid "Instance %(instance_id)s cannot be rescued: %(reason)s" msgstr "Instanz %(instance_id)s kann nicht gerettet werden: %(reason)s" #, python-format msgid "Instance %(instance_id)s could not be found." msgstr "Instanz %(instance_id)s konnte nicht gefunden werden." #, python-format msgid "Instance %(instance_id)s has no tag '%(tag)s'" msgstr "Instanz %(instance_id)s weist kein Tag '%(tag)s' auf" #, python-format msgid "Instance %(instance_id)s is not in rescue mode" msgstr "Instanz %(instance_id)s ist nicht im Rettungsmodus" #, python-format msgid "Instance %(instance_id)s is not ready" msgstr "Instanz %(instance_id)s ist nicht bereit" #, python-format msgid "Instance %(instance_id)s is not running." msgstr "Instanz %(instance_id)s läuft nicht." #, python-format msgid "Instance %(instance_id)s is unacceptable: %(reason)s" msgstr "Instanz %(instance_id)s ist nicht zulässig: %(reason)s" #, python-format msgid "Instance %(instance_uuid)s does not specify a NUMA topology" msgstr "Instanz %(instance_uuid)s gibt keine NUMA-Topologie an" #, python-format msgid "Instance %(instance_uuid)s does not specify a migration context." msgstr "Instanz %(instance_uuid)s hat keinen spezifischen Migrationskontext." #, python-format msgid "" "Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while " "the instance is in this state." msgstr "" "Instanz %(instance_uuid)s in %(attr)s %(state)s. %(method)s nicht möglich, " "während sich die Instanz in diesem Zustand befindet." #, python-format msgid "Instance %(instance_uuid)s is locked" msgstr "Instanz %(instance_uuid)s ist gesperrt" #, python-format msgid "" "Instance %(instance_uuid)s requires config drive, but it does not exist." msgstr "" "Die Instanz %(instance_uuid)s erfordert ein Konfigurationslaufwerk. Es ist " "jedoch nicht vorhanden." #, python-format msgid "Instance %(name)s already exists." msgstr "Instanz %(name)s bereits vorhanden." #, python-format msgid "Instance %(server_id)s is in an invalid state for '%(action)s'" msgstr "" "Instanz %(server_id)s befindet sich in einem ungültigen Status für " "'%(action)s'" #, python-format msgid "Instance %(uuid)s has no mapping to a cell." msgstr "Instanz %(uuid)s hat keine Zuordung zu einer Zelle." #, python-format msgid "Instance %s not found" msgstr "Instanz %s nicht gefunden" #, python-format msgid "Instance %s provisioning was aborted" msgstr "Instanz %s Provisionierung wurde abgebrochen" msgid "Instance could not be found" msgstr "Instanz konnte nicht gefunden werden" msgid "Instance disk to be encrypted but no context provided" msgstr "" "Verschlüsselung der Platte der Instanz steht an, aber es ist kein Kontext " "angegeben" msgid "Instance event failed" msgstr "Instanzereignis fehlgeschlagen" #, python-format msgid "Instance group %(group_uuid)s already exists." msgstr "Instanzgruppe %(group_uuid)s bereits vorhanden." #, python-format msgid "Instance group %(group_uuid)s could not be found." msgstr "Instanzgruppe %(group_uuid)s konnte nicht gefunden werden." msgid "Instance has no source host" msgstr "Instanz weist keinen Quellenhost auf" msgid "Instance has not been resized." msgstr "Instanzgröße wurde nicht angepasst." #, python-format msgid "Instance hostname %(hostname)s is not a valid DNS name" msgstr "Instanzhostname %(hostname)s ist kein gültiger DNS-Name." msgid "Instance is not a member of specified network" msgstr "Instanz ist nicht Mitglied des angegebenen Netzes" #, python-format msgid "Instance rollback performed due to: %s" msgstr "Instanz-Rollback ausgeführt. Ursache: %s" #, python-format msgid "" "Insufficient Space on Volume Group %(vg)s. Only %(free_space)db available, " "but %(size)d bytes required by volume %(lv)s." msgstr "" "Nicht ausreichend Speicherplatz in Datenträgergruppe %(vg)s. Nur " "%(free_space)db verfügbar, aber %(size)d Bytes für Datenträger %(lv)s " "erforderlich." #, python-format msgid "Insufficient compute resources: %(reason)s." msgstr "Nicht genug Compute Ressourcen: %(reason)s" #, python-format msgid "Interface %(interface)s not found." msgstr "Schnittstelle %(interface)s nicht gefunden." #, python-format msgid "Invalid Base 64 data for file %(path)s" msgstr "Ungültige Basis-64-Daten für Datei %(path)s" msgid "Invalid Connection Info" msgstr "Ungültige Verbindungsinformation" #, python-format msgid "Invalid ID received %(id)s." msgstr "Ungültige Kennung erhalten %(id)s." #, python-format msgid "Invalid IP format %s" msgstr "Ungültiges IP-Format %s" #, python-format msgid "Invalid IP protocol %(protocol)s." msgstr "Ungültiges IP Protokoll %(protocol)s." msgid "" "Invalid PCI Whitelist: The PCI whitelist can specify devname or address, but " "not both" msgstr "" "Ungültige PCI-Whitelist: Die PCI-Whitelist kann den Einheitennamen oder die " "Adresse angeben, aber nicht beides" #, python-format msgid "Invalid PCI alias definition: %(reason)s" msgstr "Ungültige PCI-Aliasdefinition: %(reason)s" #, python-format msgid "Invalid Regular Expression %s" msgstr "Ungültiger Regulärer Ausdruck %s" #, python-format msgid "Invalid characters in hostname '%(hostname)s'" msgstr "Ungültige Zeichen im Hostnamen '%(hostname)s'" msgid "Invalid config_drive provided." msgstr "Ungültige Angabe für 'config_drive'." #, python-format msgid "Invalid config_drive_format \"%s\"" msgstr "Ungültiges config_drive_format \"%s\"" #, python-format msgid "Invalid console type %(console_type)s" msgstr "Ungültiger Konsolentyp %(console_type)s" #, python-format msgid "Invalid content type %(content_type)s." msgstr "Ungültiger Inhaltstyp %(content_type)s." #, python-format msgid "Invalid datetime string: %(reason)s" msgstr "Falsche Datumszeit-Zeichenkette: %(reason)s" msgid "Invalid device UUID." msgstr "Ungültige Gerät-UUID." #, python-format msgid "Invalid entry: '%s'" msgstr "Ungültiger Eintrag: '%s'" #, python-format msgid "Invalid entry: '%s'; Expecting dict" msgstr "Ungültiger Eintrag: '%s'; dict erwartet" #, python-format msgid "Invalid entry: '%s'; Expecting list or dict" msgstr "Ungültiger Eintrag: '%s'; list oder dict erwartet" #, python-format msgid "Invalid exclusion expression %r" msgstr "Ungültiger Ausschlussausdruck: %r" #, python-format msgid "Invalid image format '%(format)s'" msgstr "Ungültiges Abbild-Format '%(format)s'" #, python-format msgid "Invalid image href %(image_href)s." msgstr "Ungültiger Abbild-Hyperlink %(image_href)s." #, python-format msgid "Invalid image metadata. Error: %s" msgstr "Ungültige Abbildmetadaten. Fehler: %s" #, python-format msgid "Invalid inclusion expression %r" msgstr "Ungültiger Einschlussausdruck: %r" #, python-format msgid "" "Invalid input for field/attribute %(path)s. Value: %(value)s. %(message)s" msgstr "" "Ungültige Eingabe für Feld/Attribut %(path)s. Wert: %(value)s. %(message)s" #, python-format msgid "Invalid input received: %(reason)s" msgstr "Ungültige Eingabe erhalten: %(reason)s" msgid "Invalid instance image." msgstr "Ungültiges Instanzabbild." #, python-format msgid "Invalid is_public filter [%s]" msgstr "'is_public-Filter' [%s] ungültig" msgid "Invalid key_name provided." msgstr "Ungültige Angabe für 'key_name'." #, python-format msgid "Invalid memory page size '%(pagesize)s'" msgstr "Ungültige Speicherseitengröße '%(pagesize)s'" msgid "Invalid metadata key" msgstr "Ungültiger Metadatenschlüssel" #, python-format msgid "Invalid metadata size: %(reason)s" msgstr "Ungültige Metadatengröße: %(reason)s" #, python-format msgid "Invalid metadata: %(reason)s" msgstr "Ungültige Metadaten: %(reason)s" #, python-format msgid "Invalid minDisk filter [%s]" msgstr "'minDisk-Filter' [%s] ungültig" #, python-format msgid "Invalid minRam filter [%s]" msgstr "Ungültiger minRam-Filter [%s]" #, python-format msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s" msgstr "Ungültiger Portbereich %(from_port)s:%(to_port)s. %(msg)s" msgid "Invalid proxy request signature." msgstr "Ungültige Proxy-Anforderungssignatur." #, python-format msgid "Invalid range expression %r" msgstr "Ungültiger Bereichsausdruck %r" msgid "Invalid service catalog json." msgstr "Ungültige Servicekatalog-JSON." msgid "Invalid start time. The start time cannot occur after the end time." msgstr "Ungültige Startzeit. Die Startzeit darf nicht nach der Endzeit liegen." msgid "Invalid state of instance files on shared storage" msgstr "Ungültiger Status der Instanzdateien im gemeinsam genutzten Speicher" msgid "Invalid status value" msgstr "Ungültiger Status Wert" #, python-format msgid "Invalid timestamp for date %s" msgstr "Ungültiger Zeitstempel für Datum %s" #, python-format msgid "Invalid usage_type: %s" msgstr "Ungültiger usage_type: %s" #, python-format msgid "Invalid value for Config Drive option: %(option)s" msgstr "Ungültiger Wert für Konfigurationslaufwerkoption: %(option)s" #, python-format msgid "Invalid virtual interface address %s in request" msgstr "Ungültige virtuelle Schnittstellenadresse %s in der Anforderung" #, python-format msgid "Invalid volume access mode: %(access_mode)s" msgstr "Ungültiger Datenträgerzugriffsmodus: %(access_mode)s" #, python-format msgid "Invalid volume: %(reason)s" msgstr "Ungültiger Datenträger: %(reason)s" msgid "Invalid volume_size." msgstr "Ungültige volume_size." #, python-format msgid "Ironic node uuid not supplied to driver for instance %s." msgstr "UUID des Ironic-Knotens nicht angegeben für Treiber für Instanz %s." #, python-format msgid "" "It is not allowed to create an interface on external network %(network_uuid)s" msgstr "" "Das Erstellen einer Schnittstelle auf externem Netz %(network_uuid)s ist " "nicht zulässig" msgid "" "Key Names can only contain alphanumeric characters, periods, dashes, " "underscores, colons and spaces." msgstr "" "Schlüsselnamen dürfen nur alphanumerische Zeichen, Punkte, Gedankenstriche, " "Unterstriche, Doppelpunkte und Leerzeichen enthalten." #, python-format msgid "Key manager error: %(reason)s" msgstr "Schlüsselmanagerfehler: %(reason)s" #, python-format msgid "Key pair '%(key_name)s' already exists." msgstr "Schlüsselpaar '%(key_name)s' bereits vorhanden." #, python-format msgid "Keypair %(name)s not found for user %(user_id)s" msgstr "Schlüsselpaar %(name)s für Benutzer %(user_id)s nicht gefunden" #, python-format msgid "Keypair data is invalid: %(reason)s" msgstr "Schlüsselpaardaten ungültig: %(reason)s" msgid "Limits only supported from vCenter 6.0 and above" msgstr "Grenzwerte werden nur von vCenter ab Version 6.0 unterstützt." #, python-format msgid "Live migration %(id)s for server %(uuid)s is not in progress." msgstr "" "Die Livemigration %(id)s für den Server %(uuid)s ist nicht in Bearbeitung." #, python-format msgid "Malformed message body: %(reason)s" msgstr "Fehlerhafter Nachrichtentext: %(reason)s" #, python-format msgid "" "Malformed request URL: URL's project_id '%(project_id)s' doesn't match " "Context's project_id '%(context_project_id)s'" msgstr "" "Fehlerhafte Anforderungs-URL: project_id '%(project_id)s' der URL stimmt " "nicht mit der project_id '%(context_project_id)s' des Kontextes überein" msgid "Malformed request body" msgstr "Fehlerhafter Anforderungshauptteil" msgid "Mapping image to local is not supported." msgstr "Zuordnung Abbild zu Lokal ist nicht unterstuetzt." #, python-format msgid "Marker %(marker)s could not be found." msgstr "Marker %(marker)s konnte nicht gefunden werden. " msgid "Maximum number of floating IPs exceeded" msgstr "Maximale Anzahl an Floating IPs überschritten" #, python-format msgid "Maximum number of metadata items exceeds %(allowed)d" msgstr "Maximale Anzahl an Metadatenelementen überschreitet %(allowed)d" msgid "Maximum number of ports exceeded" msgstr "Maximale Anzahl an Ports überschritten" msgid "Maximum number of security groups or rules exceeded" msgstr "Maximale Anzahl an Sicherheitsgruppen oder -regeln überschritten" msgid "Metadata item was not found" msgstr "Metadatenelement wurde nicht gefunden" msgid "Metadata property key greater than 255 characters" msgstr "Metadateneigenschaftenschlüssel größer als 255 Zeichen" msgid "Metadata property value greater than 255 characters" msgstr "Metadateneigenschaftenwert größer als 255 Zeichen" msgid "Metadata type should be dict." msgstr "Metadatentyp sollte 'dict' sein." #, python-format msgid "" "Metric %(name)s could not be found on the compute host node %(host)s." "%(node)s." msgstr "" "Messwert %(name)s konnte auf dem Rechenhostknoten %(host)s.%(node)s nicht " "gefunden werden." #, python-format msgid "Migration %(id)s for server %(uuid)s is not live-migration." msgstr "Die Migration %(id)s für den Server %(uuid)s ist keine Livermigration." #, python-format msgid "Migration %(migration_id)s could not be found." msgstr "Migration %(migration_id)s konnte nicht gefunden werden." #, python-format msgid "Migration %(migration_id)s not found for instance %(instance_id)s" msgstr "Migration %(migration_id)s für Instanz %(instance_id)s nicht gefunden." #, python-format msgid "" "Migration %(migration_id)s state of instance %(instance_uuid)s is %(state)s. " "Cannot %(method)s while the migration is in this state." msgstr "" "Status der Migration %(migration_id)s von Instanz %(instance_uuid)s ist " "%(state)s. %(method)s nicht möglich, während sich die Miration in diesem " "Status befindet. " #, python-format msgid "Migration error: %(reason)s" msgstr "Migrationsfehler: %(reason)s" msgid "Migration is not supported for LVM backed instances" msgstr "Migration wird für LVM-gesicherte Instanzen nicht unterstützt" #, python-format msgid "" "Migration not found for instance %(instance_id)s with status %(status)s." msgstr "" "Migration für Instanz %(instance_id)s mit Status %(status)s nicht gefunden." #, python-format msgid "Migration pre-check error: %(reason)s" msgstr "Fehler bei der Migrationsvorabprüfung: %(reason)s" #, python-format msgid "Migration select destinations error: %(reason)s" msgstr "Fehler für ausgewählte Migrationsziele: %(reason)s" #, python-format msgid "Missing arguments: %s" msgstr "Fehlende Argumente: %s" msgid "Missing device UUID." msgstr "Fehlende Gerät-UUID." msgid "Missing disabled reason field" msgstr "Feld für Inaktivierungsgrund fehlt" msgid "Missing forced_down field" msgstr "Feld forced_down wird vermisst" msgid "Missing imageRef attribute" msgstr "Attribut 'imageRef' fehlt" #, python-format msgid "Missing keys: %s" msgstr "Fehlende Schlüssel: %s" msgid "Missing parameter dict" msgstr "Parameter 'dict' fehlt" #, python-format msgid "" "More than one instance is associated with fixed IP address '%(address)s'." msgstr "Der festen IP-Adresse '%(address)s' sind mehrere Instanzen zugeordnet." msgid "" "More than one possible network found. Specify network ID(s) to select which " "one(s) to connect to." msgstr "" "Es wurde mehr als ein mögliches Netz gefunden. Geben Sie die Netz-ID(s) an, " "um auszuwählen, zu welchem Netz die Verbindung hergestellt werden soll." msgid "More than one swap drive requested." msgstr "Mehr als ein Swap-Laufwerk erforderlich." #, python-format msgid "Multi-boot operating system found in %s" msgstr "MultiBoot-Betriebssystem gefunden in %s" msgid "Multiple X-Instance-ID headers found within request." msgstr "Mehrere X-Instance-ID-Header in Anforderung gefunden. " msgid "Multiple X-Tenant-ID headers found within request." msgstr "Mehrere X-Tenant-ID-Header in Anforderung gefunden." #, python-format msgid "Multiple floating IP pools matches found for name '%s'" msgstr "" "Mehrere Übereinstimmungen in Pools dynamischer IP-Adressen gefunden für Name " "'%s'" #, python-format msgid "Multiple floating IPs are found for address %(address)s." msgstr "Mehrere Floating IPs für Adresse %(address)s gefunden." msgid "" "Multiple hosts may be managed by the VMWare vCenter driver; therefore we do " "not return uptime for just one host." msgstr "" "Mehrere Hosts können vom VMWare-vCenter-Treiber verwaltet werden; daher " "wurde nicht die Betriebszeit für nur einen Host zurückgegeben." msgid "Multiple possible networks found, use a Network ID to be more specific." msgstr "" "Mehrere mögliche Netze gefunden; verwenden Sie eine Netz-ID, die genauer ist." #, python-format msgid "" "Multiple security groups found matching '%s'. Use an ID to be more specific." msgstr "" "Mehrere mit '%s' übereinstimmende Sicherheitsgruppen gefunden. Verwenden Sie " "zur genaueren Bestimmung eine ID." msgid "Must input network_id when request IP address" msgstr "network_id muss beim Anfordern einer IP-Adresse eingegeben werden" msgid "Must not input both network_id and port_id" msgstr "Es dürfen nicht sowohl network_id als auch port_id eingegeben werden" msgid "" "Must specify host_ip, host_username and host_password to use vmwareapi." "VMwareVCDriver" msgstr "" "Angabe von host_ip, host_username und host_password erforderlich für die " "Verwendung von vmwareapi.VMwareVCDriver" msgid "Must supply a positive value for max-count" msgstr "Für max-count muss ein positiver Wert angegeben werden" msgid "Must supply a positive value for max_number" msgstr "Für max_number muss ein positiver Wert angegeben werden" msgid "Must supply a positive value for max_rows" msgstr "Für max_rows muss ein positiver Wert angegeben werden" #, python-format msgid "Network %(network_id)s could not be found." msgstr "Netzwerk %(network_id)s konnte nicht gefunden werden." #, python-format msgid "" "Network %(network_uuid)s requires a subnet in order to boot instances on." msgstr "" "Netz %(network_uuid)s benötigt ein Teilnetz, damit Instanzen darauf gebootet " "werden können." #, python-format msgid "Network could not be found for bridge %(bridge)s" msgstr "Netz konnte für Brücke %(bridge)s nicht gefunden werden" #, python-format msgid "Network could not be found for instance %(instance_id)s." msgstr "Netzwerk konnten nicht gefunden werden für Instanz %(instance_id)s." msgid "Network not found" msgstr "Netzwerk nicht gefunden" msgid "" "Network requires port_security_enabled and subnet associated in order to " "apply security groups." msgstr "" "Netz erfordert 'port_security_enabled' und ein zugeordnetes Teilnetz, damit " "Sicherheitsgruppen zugeordnet werden können." msgid "New volume must be detached in order to swap." msgstr "Neuer Datenträger muss für den Austausch abgehängt werden." msgid "New volume must be the same size or larger." msgstr "Neuer Datenträger muss dieselbe Größe aufweisen oder größer sein." #, python-format msgid "No Block Device Mapping with id %(id)s." msgstr "Keine Block-Geräte-Zuordnung mit ID %(id)s." msgid "No Unique Match Found." msgstr "Keine eindeutige Übereinstimmung gefunden." msgid "No compute host specified" msgstr "Kein Rechenhost angegeben " #, python-format msgid "No configuration information found for operating system %(os_name)s" msgstr "" "Es wurden keine Konfigurationsinformationen für das Betriebssystem " "%(os_name)s gefunden." #, python-format msgid "No device with MAC address %s exists on the VM" msgstr "Kein Gerät mit der MAC-Adresse %s ist auf der VM vorhanden" #, python-format msgid "No device with interface-id %s exists on VM" msgstr "Es ist keine Einheit mit interface-id %s auf VM vorhanden" #, python-format msgid "No disk at %(location)s" msgstr "Kein Datenträger unter %(location)s" #, python-format msgid "No fixed IP addresses available for network: %(net)s" msgstr "Keine statischen IP-Adressen verfügbar für Netz: %(net)s" msgid "No fixed IPs associated to instance" msgstr "Der Instanz sind keine festen IP-Adressen zugeordnet." msgid "No free nbd devices" msgstr "Keine freien nbd-Geräte" msgid "No host available on cluster" msgstr "Kein Host verfügbar auf Cluster" msgid "No hosts found to map to cell, exiting." msgstr "Keine Host für die Zuordnung zur Zelle gefunden. Wird beendet." #, python-format msgid "No hypervisor matching '%s' could be found." msgstr "Kein mit '%s' übereinstimmender Hypervisor konnte gefunden werden. " msgid "No image locations are accessible" msgstr "Es sind keine Imagepositionen verfügbar." #, python-format msgid "" "No live migration URI configured and no default available for " "\"%(virt_type)s\" hypervisor virtualization type." msgstr "" "Keine URI für Livemigration konfiguriert und kein Standardwert für den " "Hypervisorvirtualisierungstyp \"%(virt_type)s\" verfügbar." msgid "No more floating IPs available." msgstr "Keine Floating IPs mehr verfügbar." #, python-format msgid "No more floating IPs in pool %s." msgstr "Keine Floating IPs mehr in Pool %s vorhanden." #, python-format msgid "No mount points found in %(root)s of %(image)s" msgstr "Kein Einhängepunkt gefunden in %(root)s von %(image)s" #, python-format msgid "No operating system found in %s" msgstr "Kein Betriebssystem gefunden in %s" msgid "No root disk defined." msgstr "Keine Root-Festplatte bestimmt." #, python-format msgid "" "No specific network was requested and none are available for project " "'%(project_id)s'." msgstr "" "Es wurde kein bestimmtes Netzwerk angefordert und für das Projekt " "'%(project_id)s' ist kein Netzwerk verfügbar." msgid "No valid host found for cold migrate" msgstr "Keinen gültigen Host gefunden für Migration ohne Daten" msgid "No valid host found for resize" msgstr "Kein gültiger Host für die Größenänderung gefunden" #, python-format msgid "No valid host was found. %(reason)s" msgstr "Es wurde kein gültiger Host gefunden. %(reason)s" #, python-format msgid "No volume Block Device Mapping at path: %(path)s" msgstr "Keine Datenträger Block-Geräte-Zuordnung im Pfad: %(path)s" #, python-format msgid "No volume Block Device Mapping with id %(volume_id)s." msgstr "Keine Datenträger Block-Geräte-Zuordnung mit ID %(volume_id)s." #, python-format msgid "Node %s could not be found." msgstr "Knoten %s konnte nicht gefunden werden." #, python-format msgid "Not able to acquire a free port for %(host)s" msgstr "Es kann kein freier Port für %(host)s angefordert werden" #, python-format msgid "Not able to bind %(host)s:%(port)d, %(error)s" msgstr "%(host)s:%(port)d kann nicht gebunden werden, %(error)s" #, python-format msgid "" "Not all Virtual Functions of PF %(compute_node_id)s:%(address)s are free." msgstr "" "Nicht alle virtuellen Funktionen von PF %(compute_node_id)s:%(address)s sind " "frei." msgid "Not an rbd snapshot" msgstr "Keine RBD-Momentaufnahme" #, python-format msgid "Not authorized for image %(image_id)s." msgstr "Für Abbild %(image_id)s nicht autorisiert." msgid "Not authorized." msgstr "Nicht berechtigt." msgid "Not enough parameters to build a valid rule." msgstr "Nicht genügend Parameter zum Erstellen einer gültigen Regel." msgid "Not stored in rbd" msgstr "Nicht in RBD gespeichert" msgid "Nothing was archived." msgstr "Es wurde nichts archiviert." #, python-format msgid "Nova requires QEMU version %s or greater." msgstr "Nova erfordert QEMU ab Version %s." #, python-format msgid "Nova requires libvirt version %s or greater." msgstr "Nova erfordert libvirt ab Version %s." msgid "Number of Rows Archived" msgstr "Anzahl der archivierten Zeilen" #, python-format msgid "Object action %(action)s failed because: %(reason)s" msgstr "Objektaktion %(action)s fehlgeschlagen, weil: %(reason)s" msgid "Old volume is attached to a different instance." msgstr "Alter Datenträger ist an eine andere Instanz angehängt." #, python-format msgid "One or more hosts already in availability zone(s) %s" msgstr "Eine oder mehrere Hosts sind schon in Verfügbarkeitszone(n) %s" #, python-format msgid "Only %d SCSI controllers are allowed to be created on this instance." msgstr "Nur %d SCSI-Controller dürfen in dieser Instanz erstellt werden." msgid "Only administrators may list deleted instances" msgstr "Nur Administratoren können gelöschte Instanzen auflisten" msgid "Origin header does not match this host." msgstr "Ursprungsheader stimmt nicht mit diesem Host überein." msgid "Origin header not valid." msgstr "Ursprungsheader nicht gültig." msgid "Origin header protocol does not match this host." msgstr "Ursprungsheaderprotokoll stimmt nicht mit diesem Host überein." #, python-format msgid "PCI Device %(node_id)s:%(address)s not found." msgstr "PCI-Gerät %(node_id)s:%(address)s nicht gefunden." #, python-format msgid "PCI alias %(alias)s is not defined" msgstr "PCI-Alias %(alias)s ist nicht definiert" #, python-format msgid "" "PCI device %(compute_node_id)s:%(address)s is %(status)s instead of " "%(hopestatus)s" msgstr "" "PCI-Gerät %(compute_node_id)s:%(address)s ist %(status)s anstatt " "%(hopestatus)s" #, python-format msgid "" "PCI device %(compute_node_id)s:%(address)s is owned by %(owner)s instead of " "%(hopeowner)s" msgstr "" "PCI Gerät %(compute_node_id)s:%(address)s gehört zu %(owner)s anstatt " "%(hopeowner)s" #, python-format msgid "PCI device %(id)s not found" msgstr "PCI-Gerät %(id)s nicht gefunden" #, python-format msgid "PCI device request %(requests)s failed" msgstr "PCI-Geräteanforderung %(requests)s fehlgeschlagen" #, python-format msgid "Page size %(pagesize)s forbidden against '%(against)s'" msgstr "Seitengröße %(pagesize)s nicht zulässig für '%(against)s'" #, python-format msgid "Page size %(pagesize)s is not supported by the host." msgstr "Seitengröße %(pagesize)s wird vom Host nicht unterstützt." #, python-format msgid "" "Parameters %(missing_params)s not present in vif_details for vif %(vif_id)s. " "Check your Neutron configuration to validate that the macvtap parameters are " "correct." msgstr "" "Parameter %(missing_params)s ist in vif_details für vif %(vif_id)s nicht " "vorhanden. Überprüfen Sie Ihre Neutron Konfiguration, ob der macvtap " "Parameter richtig gesetzt ist." #, python-format msgid "Path %s must be LVM logical volume" msgstr "Pfad '%s' muss sich auf dem logischen LVM-Datenträger befinden" msgid "Paused" msgstr "Pausiert" msgid "Personality file limit exceeded" msgstr "Grenzwert von Persönlichkeitsdatei überschritten" #, python-format msgid "" "Physical Function %(compute_node_id)s:%(address)s, related to VF " "%(compute_node_id)s:%(vf_address)s is %(status)s instead of %(hopestatus)s" msgstr "" "Physische Funktion %(compute_node_id)s:%(address)s in Relation zur VF " "%(compute_node_id)s:%(vf_address)s ist %(status)s anstatt %(hopestatus)s" #, python-format msgid "Physical network is missing for network %(network_uuid)s" msgstr "Physisches Netz für Netz %(network_uuid)s fehlt" #, python-format msgid "Policy doesn't allow %(action)s to be performed." msgstr "Richtlinie lässt Ausführung von %(action)s nicht zu." #, python-format msgid "Port %(port_id)s is still in use." msgstr "Port %(port_id)s ist noch im Gebrauch. " #, python-format msgid "Port %(port_id)s not usable for instance %(instance)s." msgstr "Port %(port_id)s ist für Instanz %(instance)s nicht verwendbar." #, python-format msgid "" "Port %(port_id)s not usable for instance %(instance)s. Value %(value)s " "assigned to dns_name attribute does not match instance's hostname " "%(hostname)s" msgstr "" "Der Port %(port_id)s kann für die Instanz %(instance)s nicht verwendet " "werden. Der Wert %(value)s, der dem dns_name-Attribut zugeordnet ist, stimmt " "nicht mit dem Instanzhostnamen %(hostname)s überein." #, python-format msgid "Port %(port_id)s requires a FixedIP in order to be used." msgstr "" "Damit Port %(port_id)s verwendet werden kann, ist eine statische IP-Adresse " "erforderlich." #, python-format msgid "Port %s is not attached" msgstr "Port %s ist nicht angehängt" #, python-format msgid "Port id %(port_id)s could not be found." msgstr "Portkennung %(port_id)s konnte nicht gefunden werden." #, python-format msgid "Provided video model (%(model)s) is not supported." msgstr "Angegebenes Videomodell (%(model)s) wird nicht unterstützt." #, python-format msgid "Provided watchdog action (%(action)s) is not supported." msgstr "Angegebene Watchdogaktion (%(action)s) wird nicht unterstützt." msgid "QEMU guest agent is not enabled" msgstr "QEMU-Gastagent nicht aktiviert" #, python-format msgid "Quiescing is not supported in instance %(instance_id)s" msgstr "Stillegen der Instanz %(instance_id)s wird nicht unterstützt" #, python-format msgid "Quota class %(class_name)s could not be found." msgstr "Quotenklasse %(class_name)s konnte nicht gefunden werden." msgid "Quota could not be found" msgstr "Quote konnte nicht gefunden werden" #, python-format msgid "" "Quota exceeded for %(overs)s: Requested %(req)s, but already used %(used)s " "of %(allowed)s %(overs)s" msgstr "" "Kontingent für %(overs)s: Requested %(req)s überschritten, schon %(used)s " "von %(allowed)s %(overs)s benutzt" #, python-format msgid "Quota exceeded for resources: %(overs)s" msgstr "Quote für Ressourcen überschritten: %(overs)s" msgid "Quota exceeded, too many key pairs." msgstr "Quote überschritten, zu viele Schlüsselpaare" msgid "Quota exceeded, too many server groups." msgstr "Quote überschritten, zu viele Servergruppen. " msgid "Quota exceeded, too many servers in group" msgstr "Quote überschritten, zu viele Server in Gruppe" #, python-format msgid "Quota exists for project %(project_id)s, resource %(resource)s" msgstr "" "Für Projekt %(project_id)s, Ressource %(resource)s ist eine Quote vorhanden" #, python-format msgid "Quota for project %(project_id)s could not be found." msgstr "Quote für Projekt %(project_id)s konnte nicht gefunden werden." #, python-format msgid "" "Quota for user %(user_id)s in project %(project_id)s could not be found." msgstr "" "Quote für Benutzer %(user_id)s im Projekt %(project_id)s konnte nicht " "gefunden werden." #, python-format msgid "" "Quota limit %(limit)s for %(resource)s must be greater than or equal to " "already used and reserved %(minimum)s." msgstr "" "Quotengrenzwert %(limit)s für %(resource)s muss größer-gleich dem bereits " "verwendeten und reservierten Wert %(minimum)s sein." #, python-format msgid "" "Quota limit %(limit)s for %(resource)s must be less than or equal to " "%(maximum)s." msgstr "" "Quotengrenzwert %(limit)s für %(resource)s muss kleiner-gleich %(maximum)s " "sein." msgid "Request body and URI mismatch" msgstr "Abweichung zwischen Anforderungshauptteil und URI" msgid "Request is too large." msgstr "Anforderung ist zu groß." #, python-format msgid "Request of image %(image_id)s got BadRequest response: %(response)s" msgstr "" "Bei der Anforderung des Image %(image_id)s wurde eine BadRequest-Antwort " "empfangen: %(response)s" #, python-format msgid "RequestSpec not found for instance %(instance_uuid)s" msgstr "" "Die Anforderungsspezifikation für die Instanz %(instance_uuid)s wurde nicht " "gefunden." msgid "Requested CPU control policy not supported by host" msgstr "" "Die angeforderte CPU-Steuerungsrichtlinie wird vom Host nicht unterstützt." #, python-format msgid "" "Requested hardware '%(model)s' is not supported by the '%(virt)s' virt driver" msgstr "" "Angeforderte Hardware '%(model)s' wird vom virtuellen Treiber '%(virt)s' " "nicht unterstützt" #, python-format msgid "Requested image %(image)s has automatic disk resize disabled." msgstr "" "Für das angeforderte Image %(image)s wurde die automatische " "Plattengrößenänderung inaktiviert." msgid "" "Requested instance NUMA topology cannot fit the given host NUMA topology" msgstr "" "Angeforderte Instanz-NUMA-Topologie passt nicht zur angegebenen Host-NUMA-" "Topologie" msgid "" "Requested instance NUMA topology together with requested PCI devices cannot " "fit the given host NUMA topology" msgstr "" "Angefragte Instanz NUMA Topologie zusammen mit dem angefragten PCI Gerät " "stimmt nicht überein mit dem Host NUMA Topologie" #, python-format msgid "" "Requested vCPU limits %(sockets)d:%(cores)d:%(threads)d are impossible to " "satisfy for vcpus count %(vcpus)d" msgstr "" "Angeforderte vCPU-Grenzwerte %(sockets)d:%(cores)d:%(threads)d sind für die " "VCPU-Anzahl %(vcpus)d nicht ausreichend" #, python-format msgid "Rescue device does not exist for instance %s" msgstr "Rescue-Einheit ist für Instanz %s nicht vorhanden" #, python-format msgid "Resize error: %(reason)s" msgstr "Größenanpassungsfehler: %(reason)s" msgid "Resize to zero disk flavor is not allowed." msgstr "Größenänderung in Plattengröße null ist nicht zulässig." msgid "Resource could not be found." msgstr "Ressource konnte nicht gefunden werden." msgid "Resumed" msgstr "Fortgesetzt" #, python-format msgid "Root element name should be '%(name)s' not '%(tag)s'" msgstr "Stammelementname muss '%(name)s' lauten, nicht '%(tag)s'" #, python-format msgid "Running batches of %i until complete" msgstr "%i-Batches werden ausgeführt, bis Ausführung abgeschlossen ist." #, python-format msgid "Scheduler Host Filter %(filter_name)s could not be found." msgstr "Scheduler-Hostfilter %(filter_name)s konnte nicht gefunden werden." #, python-format msgid "Security group %(name)s is not found for project %(project)s" msgstr "Sicherheitsgruppe %(name)s nicht gefunden für Projekt %(project)s" #, python-format msgid "" "Security group %(security_group_id)s not found for project %(project_id)s." msgstr "" "Sicherheitsgruppe %(security_group_id)s für Projekt %(project_id)s nicht " "gefunden." #, python-format msgid "Security group %(security_group_id)s not found." msgstr "Sicherheitsgruppe %(security_group_id)s nicht gefunden." #, python-format msgid "" "Security group %(security_group_name)s already exists for project " "%(project_id)s." msgstr "" "Sicherheitsgruppe %(security_group_name)s für Projekt %(project_id)s ist " "bereits vorhanden." #, python-format msgid "" "Security group %(security_group_name)s not associated with the instance " "%(instance)s" msgstr "" "Sicherheitsgruppe %(security_group_name)s ist der Instanz %(instance)s nicht " "zugeordnet" msgid "Security group id should be uuid" msgstr "ID von Sicherheitsgruppe sollte eine UUID sein" msgid "Security group name cannot be empty" msgstr "Sicherheitsgruppenname darf nicht leer sein" msgid "Security group not specified" msgstr "Sicherheitsgruppe nicht angegeben" #, python-format msgid "Server %(server_id)s has no tag '%(tag)s'" msgstr "Server %(server_id)s hat kein Tag '%(tag)s'" #, python-format msgid "Server disk was unable to be resized because: %(reason)s" msgstr "" "Größe der Serverplatte konnte nicht geändert werden. Ursache: %(reason)s" msgid "Server does not exist" msgstr "Server ist nicht vorhanden" #, python-format msgid "ServerGroup policy is not supported: %(reason)s" msgstr "ServerGroup-Richtlinie wird nicht unterstützt: %(reason)s" msgid "ServerGroupAffinityFilter not configured" msgstr "ServerGroupAffinityFilter nicht konfiguriert" msgid "ServerGroupAntiAffinityFilter not configured" msgstr "ServerGroupAntiAffinityFilter nicht konfiguriert" msgid "ServerGroupSoftAffinityWeigher not configured" msgstr "ServerGroupSoftAffinityWeigher nicht konfiguriert" msgid "ServerGroupSoftAntiAffinityWeigher not configured" msgstr "ServerGroupSoftAntiAffinityWeigher nicht konfiguriert" #, python-format msgid "Service %(service_id)s could not be found." msgstr "Service %(service_id)s konnte nicht gefunden werden." #, python-format msgid "Service %s not found." msgstr "Dienst %s nicht gefunden." msgid "Service is unavailable at this time." msgstr "Service ist derzeit nicht verfügbar." #, python-format msgid "Service with host %(host)s binary %(binary)s exists." msgstr "Service mit Host %(host)s, Binärcode %(binary)s ist bereits vorhanden." #, python-format msgid "Service with host %(host)s topic %(topic)s exists." msgstr "Service mit Host %(host)s, Topic %(topic)s ist bereits vorhanden." msgid "Set admin password is not supported" msgstr "Das Setzen des Admin-Passwortes wird nicht unterstützt" #, python-format msgid "Share '%s' is not supported" msgstr "Das freigegebene Verzeichnis '%s' wird nicht unterstützt" #, python-format msgid "Share level '%s' cannot have share configured" msgstr "Geteilte Ebene '%s' kann keine geteilte Konfigration haben" #, python-format msgid "Snapshot %(snapshot_id)s could not be found." msgstr "Momentaufnahme %(snapshot_id)s konnte nicht gefunden werden." msgid "Some required fields are missing" msgstr "Einige benötigte Felder fehlen" #, python-format msgid "" "Something went wrong when deleting a volume snapshot: rebasing a " "%(protocol)s network disk using qemu-img has not been fully tested" msgstr "" "Beim Löschen einer Datenträgerschattenkopie ist ein Fehler aufgetreten: " "Zurücksetzen einer %(protocol)s-Netzplatte mit 'qemu-img' wurde nicht " "vollständig getestet." msgid "Sort direction size exceeds sort key size" msgstr "Größe der Sortierrichtung überschreitet Sortierschlüsselgröße" msgid "Sort key supplied was not valid." msgstr "Der angegebene Sortierschlüssel war nicht gültig. " msgid "Specified fixed address not assigned to instance" msgstr "Angegebene statische Adresse ist nicht der Instanz zugeordnet" msgid "Started" msgstr "Gestartet" msgid "Stopped" msgstr "Gestoppt" #, python-format msgid "Storage error: %(reason)s" msgstr "Speicherfehler: %(reason)s" #, python-format msgid "Storage policy %s did not match any datastores" msgstr "Speicherrichtlinie %s stimmt mit keinem Datenspeicher überein" msgid "Success" msgstr "Erfolg" msgid "Suspended" msgstr "Ausgesetzt" msgid "Swap drive requested is larger than instance type allows." msgstr "" "Angeforderte Auslagerungsplatte ist größer als der Instanz-Typ erlaubt." msgid "Table" msgstr "Tabelle" #, python-format msgid "Task %(task_name)s is already running on host %(host)s" msgstr "Task %(task_name)s wird bereits auf Host %(host)s ausgeführt" #, python-format msgid "Task %(task_name)s is not running on host %(host)s" msgstr "Task %(task_name)s wird nicht auf Host %(host)s ausgeführt" #, python-format msgid "The PCI address %(address)s has an incorrect format." msgstr "Die PCI-Adresse %(address)s hat ein falsches Format." #, python-format msgid "The console port range %(min_port)d-%(max_port)d is exhausted." msgstr "Der Konsolenportbereich %(min_port)d-%(max_port)d ist ausgeschöpft." msgid "The created instance's disk would be too small." msgstr "Der erstellte Datenträger für die Instanz würde zu klein sein." msgid "The current driver does not support preserving ephemeral partitions." msgstr "" "Das Beibehalten ephemerer Partitionen wird vom aktuellen Treiber nicht " "unterstützt." msgid "The default PBM policy doesn't exist on the backend." msgstr "Die Standard-PBM-Richtlinie ist auf dem Back-End nicht vorhanden." msgid "The floating IP request failed with a BadRequest" msgstr "" "Die Anfrage für dynamische IP-Adresse ist fehlgeschlagen mit BadRequest" #, python-format msgid "" "The format of the option 'reserved_huge_pages' is invalid. (found " "'%(conf)s') Please refer to the nova config-reference." msgstr "" "Das Format der Option 'reserved_huge_pages' ist ungültig ('%(conf)s' " "gefunden). Ziehen Sie die Nova-Konfigurationsreferenz zu Rate." msgid "" "The instance requires a newer hypervisor version than has been provided." msgstr "" "Die Instanz erfordert eine neuere als die bereitgestellte Hypervisorversion." #, python-format msgid "The number of defined ports: %(ports)d is over the limit: %(quota)d" msgstr "" "Die Anzahl der definierten Ports (%(ports)d) ist über dem Limit: %(quota)d" #, python-format msgid "The provided RNG device path: (%(path)s) is not present on the host." msgstr "" "Der angegebene RNG Gerätepfad: (%(path)s) existiert nicht auf dem Host." msgid "The request is invalid." msgstr "Die Anfrage ist ungültig." #, python-format msgid "" "The requested amount of video memory %(req_vram)d is higher than the maximum " "allowed by flavor %(max_vram)d." msgstr "" "Die angeforderte Menge an Bildspeicher %(req_vram)d ist größer als der für " "Version %(max_vram)d zulässige Höchstwert." msgid "The requested availability zone is not available" msgstr "Die angeforderte Verfügbarkeitszone ist nicht verfügbar" msgid "The requested functionality is not supported." msgstr "Die angeforderte Funktionalität wird nicht unterstützt." #, python-format msgid "The specified cluster '%s' was not found in vCenter" msgstr "Der angegebene Cluster '%s' wurde im vCenter nicht gefunden" #, python-format msgid "The supplied device path (%(path)s) is in use." msgstr "Der gelieferte Gerätepfad (%(path)s) ist in Benutzung." #, python-format msgid "The supplied device path (%(path)s) is invalid." msgstr "Der gelieferte Gerätepfad (%(path)s) ist ungültig." #, python-format msgid "" "The supplied disk path (%(path)s) already exists, it is expected not to " "exist." msgstr "" "Der angegebene Plattenpfad (%(path)s) ist bereits vorhanden. Er sollte nicht " "vorhanden sein." msgid "The supplied hypervisor type of is invalid." msgstr "Der gelieferte Hypervisor-Typ ist ungültig." msgid "The target host can't be the same one." msgstr "Der Zielhost kann nicht der gleiche sein." #, python-format msgid "The token '%(token)s' is invalid or has expired" msgstr "Das Token '%(token)s' ist ungültig oder abgelaufen" #, python-format msgid "" "The volume cannot be assigned the same device name as the root device %s" msgstr "" "Der Datenträger kann nicht zum delben Gerätenamen wir das Root Gerät %s " "zugewiesen werden" msgid "There are not enough hosts available." msgstr "Es sind nicht genügend Hosts verfügbar." #, python-format msgid "There is no such action: %s" msgstr "Aktion existiert nicht: %s" #, python-format msgid "" "This compute node's hypervisor is older than the minimum supported version: " "%(version)s." msgstr "" "Die Hypervisorversion des Compute-Knotens ist älter als die Version, die für " "die Mindestunterstützung erforderlich ist: %(version)s." msgid "" "This method needs to be called with either networks=None and port_ids=None " "or port_ids and networks as not none." msgstr "" "Diese Methode muss entweder mit Angabe von 'networks=None' und " "'port_ids=None' oder 'port_ids' und 'networks' mit einem Wert ungleich " "'None' aufgerufen werden. " #, python-format msgid "This rule already exists in group %s" msgstr "Diese Regel ist in Gruppe %s bereits vorhanden" #, python-format msgid "" "This service is older (v%(thisver)i) than the minimum (v%(minver)i) version " "of the rest of the deployment. Unable to continue." msgstr "" "Dieser Dienst ist älter (v%(thisver)i) als die Mindestversion (v%(minver)i) " "der übrigen Implementierung. Fortfahren nicht möglich. " msgid "Timeout waiting for response from cell" msgstr "Zeitüberschreitung beim Warten auf Antwort von der Zelle" #, python-format msgid "Timeout while checking if we can live migrate to host: %s" msgstr "Zeitüberschreitung bei der Überprüfung der Live-Migration zu Host: %s" msgid "To and From ports must be integers" msgstr "Eingangs- und Ausgangsports müssen Ganzzahlen sein" msgid "Token not found" msgstr "Token nicht gefunden" msgid "Triggering crash dump is not supported" msgstr "Auslösen von Absturzabbildern wird nicht unterstützt. " msgid "Type and Code must be integers for ICMP protocol type" msgstr "Typ und Code müssen für den ICMP-Protokolltyp Ganzzahlen sein" msgid "UEFI is not supported" msgstr "UEFI wird nicht unterstützt." #, python-format msgid "" "Unable to associate floating IP %(address)s to any fixed IPs for instance " "%(id)s. Instance has no fixed IPv4 addresses to associate." msgstr "" "Verknüpfung der Floating IP %(address)s zu irgendeiner festen IP-Adresse für " "Instanz %(id)s fehlgeschlagen. Instanz hat keine feste IPv4-Adresse." #, python-format msgid "" "Unable to associate floating IP %(address)s to fixed IP %(fixed_address)s " "for instance %(id)s. Error: %(error)s" msgstr "" "Floating IP %(address)s kann nicht der festen IP-Adresse %(fixed_address)s " "für Instanz %(id)s zugeordnet werden. Fehler: %(error)s" #, python-format msgid "Unable to convert image to %(format)s: %(exp)s" msgstr "Abbild kann nicht konvertiert werden in %(format)s: %(exp)s" #, python-format msgid "Unable to convert image to raw: %(exp)s" msgstr "Abbild kann nicht in ein Rohformat konvertiert werden: %(exp)s" #, python-format msgid "Unable to determine disk bus for '%s'" msgstr "Plattenbus für '%s' kann nicht bestimmt werden" #, python-format msgid "Unable to determine disk prefix for %s" msgstr "Plattenpräfix für %s kann nicht bestimmt werden" #, python-format msgid "Unable to find host for Instance %s" msgstr "Host für Instanz %s kann nicht gefunden werden" msgid "Unable to find iSCSI Target" msgstr "iSCSI-Ziel konnte nicht gefunden worden" msgid "Unable to find volume" msgstr "Datenträger kann nicht gefunden werden" msgid "Unable to get host UUID: /etc/machine-id does not exist" msgstr "Host UUID kann nicht abgerufen werden: /etc/machine-id existiert nicht" msgid "Unable to get host UUID: /etc/machine-id is empty" msgstr "Host UUID kann nicht abgerufen werden: /etc/machine-id ist leer" msgid "" "Unable to launch multiple instances with a single configured port ID. Please " "launch your instance one by one with different ports." msgstr "" "Es können nicht mehrere Instanzen mit einer einzelnen konfigurierten Port-ID " "gestartet werden. Starten Sie Ihre Instanzen nacheinander mit " "unterschiedlichen Ports." #, python-format msgid "" "Unable to migrate %(instance_uuid)s to %(dest)s: Lack of memory(host:" "%(avail)s <= instance:%(mem_inst)s)" msgstr "" "%(instance_uuid)s kann nicht nach %(dest)s migriert werden: Mangel an " "Speicher (Host:%(avail)s <= Instanz:%(mem_inst)s)" #, python-format msgid "" "Unable to migrate %(instance_uuid)s: Disk of instance is too large(available " "on destination host:%(available)s < need:%(necessary)s)" msgstr "" "%(instance_uuid)s kann nicht migriert werden: Platte der Instanz ist zu groß " "(verfügbar auf Zielhost: %(available)s < Bedarf:%(necessary)s)" #, python-format msgid "" "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." msgstr "" "Instanz (%(instance_id)s) kann nicht auf aktuellen Host (%(host)s) migriert " "werden." msgid "Unable to resize disk down." msgstr "Größe der inaktiven Platte kann nicht geändert werden." msgid "Unable to set password on instance" msgstr "Es kann kein Kennwort für die Instanz festgelegt werden" msgid "Unable to shrink disk." msgstr "Platte kann nicht verkleinert werden." #, python-format msgid "Unacceptable CPU info: %(reason)s" msgstr "Unzulässige CPU Information: %(reason)s" msgid "Unacceptable parameters." msgstr "Inakzeptable Parameter." #, python-format msgid "Unavailable console type %(console_type)s." msgstr "Nicht verfügbarer Konsolentyp %(console_type)s." msgid "" "Undefined Block Device Mapping root: BlockDeviceMappingList contains Block " "Device Mappings from multiple instances." msgstr "" "Nicht definiertes Stammverzeichnis für Blockgerätezuordnung: " "BlockDeviceMappingList enthält Blockgerätezuordnungen aus mehreren Instanzen." #, python-format msgid "Unexpected aggregate action %s" msgstr "Unerwartete Aggregataktion %s" msgid "Unexpected type adding stats" msgstr "Unerwarteter Typ beim Hinzufügen von Statistiken" #, python-format msgid "Unexpected vif_type=%s" msgstr "Unerwarteter vif_type=%s" msgid "Unknown" msgstr "Unbekannt" msgid "Unknown action" msgstr "Unbekannte Aktion" #, python-format msgid "Unknown auth type: %s" msgstr "Unbekannter Authentifizierungstyp: %s" #, python-format msgid "Unknown config drive format %(format)s. Select one of iso9660 or vfat." msgstr "" "Unbekanntes Format %(format)s des Konfigurationslaufwerks. Wählen Sie " "entweder 'iso9660' oder 'vfat' aus." #, python-format msgid "Unknown delete_info type %s" msgstr "Unbekannter delete_info-Typ %s" #, python-format msgid "Unknown image_type=%s" msgstr "Unbekannter image_type=%s" #, python-format msgid "Unknown quota resources %(unknown)s." msgstr "Unbekannte Quotenressourcen %(unknown)s." msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "" "Unbekannte Sortierrichtung; muss 'desc' (absteigend) oder " "'asc' (aufsteigend) sein" #, python-format msgid "Unknown type: %s" msgstr "Unbekannter Typ: %s" msgid "Unrecognized legacy format." msgstr "Nicht erkanntes Altformat." #, python-format msgid "Unrecognized read_deleted value '%s'" msgstr "Nicht erkannter read_deleted-Wert '%s'" #, python-format msgid "Unrecognized value '%s' for CONF.running_deleted_instance_action" msgstr "Nicht erkannter Wert '%s' für 'CONF.running_deleted_instance_action'" #, python-format msgid "Unshelve attempted but the image %s cannot be found." msgstr "Aufnahme wurde versucht, aber das Image %s kann nicht gefunden werden." msgid "Unsupported Content-Type" msgstr "Nicht unterstützter Inhaltstyp" #, python-format msgid "User %(username)s not found in password file." msgstr "Benutzer %(username)s in Kennwortdatei nicht gefunden." #, python-format msgid "User %(username)s not found in shadow file." msgstr "Benutzer %(username)s in Spiegeldatei nicht gefunden." msgid "User data needs to be valid base 64." msgstr "Benutzerdaten müssen gültige Base64-Daten sein. " msgid "User does not have admin privileges" msgstr "Benutzer hat keine Admin Rechte." msgid "" "Using different block_device_mapping syntaxes is not allowed in the same " "request." msgstr "" "Benutzung unterschiedlicher Block_Geräte_Zuordnung-Schreibweisen ist nicht " "erlaubt in der selben Anfrage." #, python-format msgid "Value must match %s" msgstr "Wert muss %s entsprechen" #, python-format msgid "" "Version %(req_ver)s is not supported by the API. Minimum is %(min_ver)s and " "maximum is %(max_ver)s." msgstr "" "Version %(req_ver)s wird von der API nicht unterstützt. Minimum ist " "%(min_ver)s und Maximum ist %(max_ver)s." msgid "Virtual Interface creation failed" msgstr "Virtuelle Schnittstellenerstellung fehlgeschlagen" msgid "Virtual interface plugin failed" msgstr "Virtuelles Schnittstellen-Plugin fehlgeschlagen" #, python-format msgid "Virtual machine mode '%(vmmode)s' is not recognised" msgstr "Der Modus '%(vmmode)s' der virtuellen Maschine wird nicht erkannt" #, python-format msgid "Virtual machine mode '%s' is not valid" msgstr "Der Modus '%s' der virtuellen Machine ist nicht gültig" #, python-format msgid "Virtualization type '%(virt)s' is not supported by this compute driver" msgstr "" "Virtualisierungstyp '%(virt)s' wird von diesem Rechentreiber nicht " "unterstützt" #, python-format msgid "Volume %(volume_id)s could not be attached. Reason: %(reason)s" msgstr "" "Datenträger %(volume_id)s konnte nicht angehängt werden. Ursache: %(reason)s" #, python-format msgid "Volume %(volume_id)s could not be detached. Reason: %(reason)s" msgstr "" "Datenträger %(volume_id)s konnte nicht abgehängt werden. Ursache: %(reason)s" #, python-format msgid "Volume %(volume_id)s could not be found." msgstr "Datenträger %(volume_id)s konnte nicht gefunden werden." #, python-format msgid "" "Volume %(volume_id)s did not finish being created even after we waited " "%(seconds)s seconds or %(attempts)s attempts. And its status is " "%(volume_status)s." msgstr "" "Datenträger %(volume_id)s wurde nicht erstellt nach einer Wartezeit von " "%(seconds)s Sekunden oder %(attempts)s Versuchen. Und der Status ist " "%(volume_status)s." msgid "Volume does not belong to the requested instance." msgstr "Datenträger gehört nicht zur angeforderten Instanz." #, python-format msgid "" "Volume encryption is not supported for %(volume_type)s volume %(volume_id)s" msgstr "" "Datenträgerverschlüsselung wird nicht unterstützt für %(volume_type)s " "Datenträger %(volume_id)s" #, python-format msgid "" "Volume is smaller than the minimum size specified in image metadata. Volume " "size is %(volume_size)i bytes, minimum size is %(image_min_disk)i bytes." msgstr "" "Datenträger ist kleiner als die minimale Größe spezifiziert in den Abbild-" "Metadaten. Datenträgergröße ist %(volume_size)i Bytes, minimale Größe ist " "%(image_min_disk)i Bytes." #, python-format msgid "" "Volume sets block size, but the current libvirt hypervisor '%s' does not " "support custom block size" msgstr "" "Datenträger legt Blockgröße fest, aber der aktuelle libvirt-Hypervisor %s " "unterstützt keine angepassten Blockgrößen" #, python-format msgid "Volume type %(id_or_name)s could not be found." msgstr "Datenträgertyp %(id_or_name)s wurde nicht gefunden." msgid "When resizing, instances must change flavor!" msgstr "Beim Ändern der Größe muss die Version der Instanzen geändert werden!" #, python-format msgid "Wrong quota method %(method)s used on resource %(res)s" msgstr "Falsche Quotenmethode %(method)s für Ressource %(res)s verwendet" msgid "X-Forwarded-For is missing from request." msgstr "X-Forwarded-For wird in der Anfrage vermisst." msgid "X-Instance-ID header is missing from request." msgstr "X-Instance-ID-Header fehlt in Anforderung. " msgid "X-Instance-ID-Signature header is missing from request." msgstr "X-Instance-ID-Signature-Header fehlt in Anforderung." msgid "X-Metadata-Provider is missing from request." msgstr "X-Metadata-Provider wird in der Anfrage vermisst." msgid "X-Tenant-ID header is missing from request." msgstr "X-Tenant-ID-Header fehlt in Anforderung." msgid "You are not allowed to delete the image." msgstr "Sie sind nicht berechtigt, dieses Image zu löschen." msgid "" "You are not authorized to access the image the instance was started with." msgstr "" "Sie sind nicht berechtigt, auf das Image zuzugreifen, mit dem die Instanz " "gestartet wurde." msgid "You must implement __call__" msgstr "Sie müssen '__call__' implementieren" msgid "You should specify images_rbd_pool flag to use rbd images." msgstr "" "Sie sollten das Flag 'images_rbd_pool' angeben, um RBD-Images zu verwenden" msgid "You should specify images_volume_group flag to use LVM images." msgstr "" "Sie sollten das Flag 'images_volume_group' angeben, um LVM-Images zu " "verwenden" msgid "Zero floating IPs available." msgstr "Keine Floating IPs verfügbar." msgid "admin password can't be changed on existing disk" msgstr "" "Das Administrator Passwort kann nicht auf der bestehenden Festplatte " "geändert werden" msgid "cannot understand JSON" msgstr "kann JSON nicht verstehen" msgid "clone() is not implemented" msgstr "clone() ist nicht implementiert" #, python-format msgid "connect info: %s" msgstr "Verbindungsinfo: %s" #, python-format msgid "connecting to: %(host)s:%(port)s" msgstr "verbinden mit: %(host)s:%(port)s" msgid "direct_snapshot() is not implemented" msgstr "direct_snapshot() ist nicht implementiert" #, python-format msgid "disk type '%s' not supported" msgstr "Festplattentyp '%s' nicht unterstützt" #, python-format msgid "empty project id for instance %s" msgstr "leere Projektkennung für Instanz %s" msgid "error setting admin password" msgstr "Fehler beim Festlegen des Administratorkennworts" #, python-format msgid "error: %s" msgstr "Fehler: %s" #, python-format msgid "failed to generate X509 fingerprint. Error message: %s" msgstr "Generierung des X509 FIngerabdrucks fehlgeschlagen. Fehlermeldung: %s" msgid "failed to generate fingerprint" msgstr "Erzeugen des Fingerabdrucks ist fehlgeschlagen" msgid "filename cannot be None" msgstr "Dateiname darf nicht 'None' sein" msgid "floating IP is already associated" msgstr "Die Floating IP ist bereits zugeordnet." msgid "floating IP not found" msgstr "Floating IP nicht gefunden" #, python-format msgid "fmt=%(fmt)s backed by: %(backing_file)s" msgstr "fmt=%(fmt)s gesichert durch: %(backing_file)s" #, python-format msgid "href %s does not contain version" msgstr "Hyperlink %s enthält Version nicht" msgid "image already mounted" msgstr "Abbild bereits eingehängt" #, python-format msgid "instance %s is not running" msgstr "Instanz %s wird nicht ausgeführt" msgid "instance is a required argument to use @refresh_cache" msgstr "" "Instanz ist ein erforderliches Argument für die Verwendung von " "'@refresh_cache'" msgid "instance is not in a suspended state" msgstr "Instanz ist nicht im Aussetzstatus" msgid "instance is not powered on" msgstr "Instanz ist nicht eingeschaltet" msgid "instance is powered off and cannot be suspended." msgstr "Instanz wird ausgeschaltet und kann nicht ausgesetzt werden. " #, python-format msgid "instance_id %s could not be found as device id on any ports" msgstr "instance_id %s konnte auf keinem Port als Einheiten-ID gefunden werden" msgid "is_public must be a boolean" msgstr "'is_public' muss ein boolescher Wert sein" msgid "keymgr.fixed_key not defined" msgstr "keymgr.fixed_key nicht bestimmt" msgid "l3driver call to add floating IP failed" msgstr "'l3driver'-Aufruf zum Hinzufügen einer Floating IP ist fehlgeschlagen." #, python-format msgid "libguestfs installed but not usable (%s)" msgstr "libguestfs installiert, aber nicht benutzbar (%s)" #, python-format msgid "libguestfs is not installed (%s)" msgstr "libguestfs ist nicht installiert (%s)" #, python-format msgid "marker [%s] not found" msgstr "Marker [%s] nicht gefunden" #, python-format msgid "max rows must be <= %(max_value)d" msgstr "max. Zeilen müssen <= %(max_value)d sein" msgid "max_count cannot be greater than 1 if an fixed_ip is specified." msgstr "" "max_count darf nicht größer als 1 sein, wenn eine fixed_ip angegeben ist." msgid "min_count must be <= max_count" msgstr "'min_count' muss <= 'max_count' sein" #, python-format msgid "nbd device %s did not show up" msgstr "NBD-Einheit %s wurde nicht angezeigt" msgid "nbd unavailable: module not loaded" msgstr "nbd nicht verfügbar: Modul nicht geladen" #, python-format msgid "no match found for %s" msgstr "keine Übereinstimmung gefunden für %s" #, python-format msgid "no usable parent snapshot for volume %s" msgstr "Kein verwendbares übergeordnetes Abbild für Datenträger %s" #, python-format msgid "no write permission on storage pool %s" msgstr "Keine Schreibberechtigung für Speicherpool %s" #, python-format msgid "not able to execute ssh command: %s" msgstr "SSH-Befehl kann nicht ausgeführt werden: %s" msgid "old style configuration can use only dictionary or memcached backends" msgstr "" "Alter Konfigurationsstil kann nur Verzeichnis- oder memcached-Backends " "verwenden." msgid "operation time out" msgstr "Vorgangszeitüberschreitung" #, python-format msgid "partition %s not found" msgstr "Partition %s nicht gefunden" #, python-format msgid "partition search unsupported with %s" msgstr "Partitionssuche nicht unterstützt mit %s" msgid "pause not supported for vmwareapi" msgstr "'pause' nicht unterstützt für 'vmwareapi'" msgid "printable characters with at least one non space character" msgstr "" "Druckbare Zeichen mit mindestens einem Zeichen, das kein Leerzeichen ist." msgid "printable characters. Can not start or end with whitespace." msgstr "Druckbare Zeichen. Keine Leerzeichen davor oder danach zulässig." #, python-format msgid "qemu-img failed to execute on %(path)s : %(exp)s" msgstr "qemu-img konnte nicht ausgeführt werden für %(path)s : %(exp)s" #, python-format msgid "qemu-nbd error: %s" msgstr "qemu-nbd-Fehler: %s" msgid "rbd python libraries not found" msgstr "rbd Python-Bibliotheken nicht gefunden" #, python-format msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" msgstr "'read_deleted' kann nur 'no', 'yes' oder 'only' sein, nicht '%r'" msgid "serve() can only be called once" msgstr "serve() kann nur einmal aufgerufen werden." msgid "service is a mandatory argument for DB based ServiceGroup driver" msgstr "" "Service ist ein obligatorisches Argument für den datenbankbasierten " "ServiceGroup-Treiber" msgid "service is a mandatory argument for Memcached based ServiceGroup driver" msgstr "" "Service ist ein obligatorisches Argument für den Memcached-basierten " "ServiceGroup-Treiber" msgid "set_admin_password is not implemented by this driver or guest instance." msgstr "" "'set_admin_password' wird von diesem Treiber oder dieser Gastinstanz nicht " "implementiert" #, python-format msgid "snapshot for %s" msgstr "Momentaufnahme für %s" msgid "snapshot_id required in create_info" msgstr "snapshot_id in create_info erforderlich" msgid "token not provided" msgstr "Token nicht angegeben" msgid "too many body keys" msgstr "zu viele Textschlüssel" msgid "unpause not supported for vmwareapi" msgstr "'unpause' nicht unterstützt für 'vmwareapi'" #, python-format msgid "vg %s must be LVM volume group" msgstr "Datenträgergruppe '%s' muss sich in LVM-Datenträgergruppe befinden" #, python-format msgid "vhostuser_sock_path not present in vif_details for vif %(vif_id)s" msgstr "vhostuser_sock_path nicht vorhanden in vif_details für vif %(vif_id)s" #, python-format msgid "vif type %s not supported" msgstr "Vif-Typ %s nicht unterstütztnot supported" msgid "vif_type parameter must be present for this vif_driver implementation" msgstr "" "Parameter 'vif_type' muss für diese vif_driver-Implementierung vorhanden sein" #, python-format msgid "volume %s already attached" msgstr "Datenträger %s ist bereits angehängt" #, python-format msgid "" "volume '%(vol)s' status must be 'in-use'. Currently in '%(status)s' status" msgstr "" "Status von Datenträger '%(vol)s' muss 'in-use' lauten. Weist derzeit den " "Status '%(status)s' auf" ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315688.893605 nova-32.0.0/nova/locale/es/0000775000175000017500000000000000000000000015373 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.3696086 nova-32.0.0/nova/locale/es/LC_MESSAGES/0000775000175000017500000000000000000000000017160 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/locale/es/LC_MESSAGES/nova.po0000664000175000017500000027410400000000000020473 0ustar00zuulzuul00000000000000# Translations template for nova. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the nova project. # # Translators: # Adriana Chisco Landazábal , 2015 # Alberto Molina Coballes , 2012-2014 # Ying Chun Guo , 2013 # David Martinez Morata, 2014 # FIRST AUTHOR , 2011 # Jose Ramirez Garcia , 2014 # Edgar Carballo , 2013 # Pablo Sanchez , 2015 # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: nova VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2025-07-04 18:13+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 06:05+0000\n" "Last-Translator: Copied by Zanata \n" "Language: es\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: Spanish\n" #, python-format msgid "%(address)s is not a valid IP v4/6 address." msgstr "%(address)s no es una direccion IP v4/6 valida" #, python-format msgid "" "%(binary)s attempted direct database access which is not allowed by policy" msgstr "" "%(binary)s ha intentado un acceso de bases de datos directo que no está " "permitido por la política." #, python-format msgid "%(cidr)s is not a valid IP network." msgstr "%(cidr)s no es una red de IP válida." #, python-format msgid "%(field)s should not be part of the updates." msgstr "%(field)s on debería formar parte de las actualizaciones." #, python-format msgid "%(memsize)d MB of memory assigned, but expected %(memtotal)d MB" msgstr "" "Se han asignado %(memsize)d MB de memoria, pero se esperaban %(memtotal)d MB" #, python-format msgid "%(path)s is not on local storage: %(reason)s" msgstr "%(path)s no está en un almacenamiento local: %(reason)s" #, python-format msgid "%(path)s is not on shared storage: %(reason)s" msgstr "%(path)s no está en un almacenamiento compartido: %(reason)s" #, python-format msgid "%(total)i rows matched query %(meth)s, %(done)i migrated" msgstr "" "%(total)i filas han coincidido con la consulta %(meth)s, %(done)i migradas" #, python-format msgid "%(type)s hypervisor does not support PCI devices" msgstr "El hipervisor %(type)s no soporta dispositivos PCI" #, python-format msgid "%s does not support disk hotplug." msgstr "%s no soporta hotplug de disco." #, python-format msgid "%s format is not supported" msgstr "No se soporta formato %s" #, python-format msgid "%s is not supported." msgstr "%s no está soportada." #, python-format msgid "%s must be either 'MANUAL' or 'AUTO'." msgstr "%s debe ser 'MANUAL' o 'AUTO'." #, python-format msgid "'%(other)s' should be an instance of '%(cls)s'" msgstr "'%(other)s' debería ser una instancia de '%(cls)s'." msgid "'qemu-img info' parsing failed." msgstr "Se ha encontrado un error en el análisis de 'qemu-img info'." #, python-format msgid "'rxtx_factor' argument must be a float between 0 and %g" msgstr "El argumento 'rxtx_factor' debe ser un flotante entre 0 y %g" #, python-format msgid "A NetworkModel is required in field %s" msgstr "Se requiere un NetworkModel en campo %s" #, python-format msgid "" "API Version String %(version)s is of invalid format. Must be of format " "MajorNum.MinorNum." msgstr "" "Secuencia API Versión %(version)s tiene un formato no válido. Debe ser un " "formato MajorNum.MinorNum." #, python-format msgid "API version %(version)s is not supported on this method." msgstr "Versión API %(version)s, no soportada en este método." msgid "Access list not available for public flavors." msgstr "La lista de acceso no está disponible para tipos públicos. " #, python-format msgid "Action %s not found" msgstr "Acción %s no encontrada" #, python-format msgid "" "Action for request_id %(request_id)s on instance %(instance_uuid)s not found" msgstr "" "La acción para request_id %(request_id)s en la instancia %(instance_uuid)s " "no se ha encontrado." #, python-format msgid "Action: '%(action)s', calling method: %(meth)s, body: %(body)s" msgstr "Acción: '%(action)s', método de llamada: %(meth)s, cuerpo: %(body)s" #, python-format msgid "Add metadata failed for aggregate %(id)s after %(retries)s retries" msgstr "" "Fallo en adición de metadata para el agregado %(id)s después de %(retries)s " "intentos" #, python-format msgid "Aggregate %(aggregate_id)s already has host %(host)s." msgstr "El agregado %(aggregate_id)s ya tiene el host %(host)s." #, python-format msgid "Aggregate %(aggregate_id)s could not be found." msgstr "No se ha podido encontrar el agregado %(aggregate_id)s." #, python-format msgid "Aggregate %(aggregate_id)s has no host %(host)s." msgstr "El agregado %(aggregate_id)s no tiene ningún host %(host)s." #, python-format msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." msgstr "" "El agregado %(aggregate_id)s no tiene metadatos con la clave " "%(metadata_key)s." #, python-format msgid "Aggregate %(aggregate_name)s already exists." msgstr "El agregado %(aggregate_name)s ya existe." #, python-format msgid "Aggregate %s does not support empty named availability zone" msgstr "" "El agregado %s no admite una zona de disponibilidad con el nombre vacío" #, python-format msgid "An invalid 'name' value was provided. The name must be: %(reason)s" msgstr "" "Se ha proporcionado un valor no válido en el campo 'name'. El nombre debe " "ser: %(reason)s" msgid "An unknown error has occurred. Please try your request again." msgstr "" "Ha sucedido un error desconocido. Por favor repite el intento de nuevo." msgid "An unknown exception occurred." msgstr "Una excepción desconocida ha ocurrido" #, python-format msgid "Architecture name '%(arch)s' is not recognised" msgstr "No se reconoce el nombre de la Arquitectura '%(arch)s'" #, python-format msgid "Architecture name '%s' is not valid" msgstr "El nombre de la Arquitectura '%s' no es válido" #, python-format msgid "" "Attempt to consume PCI device %(compute_node_id)s:%(address)s from empty pool" msgstr "" "Intento de consumir dispositivo PCI %(compute_node_id)s:%(address)s de pool " "vacío" msgid "Attempted overwrite of an existing value." msgstr "Se ha intentado sobreescribir un valor ya existente." #, python-format msgid "Attribute not supported: %(attr)s" msgstr "Atributo no soportado: %(attr)s" #, python-format msgid "Bad network format: missing %s" msgstr "Formato de red erróneo: falta %s" msgid "Bad networks format" msgstr "Formato de redes erróneo" #, python-format msgid "Bad networks format: network uuid is not in proper format (%s)" msgstr "" "Formato incorrecto de redes: el uuid de red no está en el formato correcto " "(%s) " #, python-format msgid "Bad prefix for network in cidr %s" msgstr "Prefijo erróneo para red en cidr %s" #, python-format msgid "" "Binding failed for port %(port_id)s, please check neutron logs for more " "information." msgstr "" "El enlace ha fallado para el puerto %(port_id)s, compruebe los registros de " "neutron para más información." msgid "Blank components" msgstr "Componentes en blanco" msgid "" "Blank volumes (source: 'blank', dest: 'volume') need to have non-zero size" msgstr "" "Volumenes vacios (origen:'vacio',dest:'volume') necesitan tener un tamaño no " "nulo." #, python-format msgid "Block Device %(id)s is not bootable." msgstr "El dispositivo de bloque %(id)s no puede arrancar." #, python-format msgid "" "Block Device Mapping %(volume_id)s is a multi-attach volume and is not valid " "for this operation." msgstr "" "El mapeo de dispositivo de bloques %(volume_id)s es un volumen con diversos " "adjuntos y no es válido para esta operación." msgid "Block Device Mapping cannot be converted to legacy format. " msgstr "" "La correlación de dispositivo de bloque no puede ser convertida a formato " "heredado." msgid "Block Device Mapping is Invalid." msgstr "La correlación de dispositivo de bloque no es válida." #, python-format msgid "Block Device Mapping is Invalid: %(details)s" msgstr "La correlación de dispositivo de bloque es inválida: %(details)s" msgid "" "Block Device Mapping is Invalid: Boot sequence for the instance and image/" "block device mapping combination is not valid." msgstr "" "La correlación de dispositivo de bloque es inválida: La secuencia de " "arranque para la instancia y la combinación de la imagen/correlación de " "dispositivo de bloque no es válida." msgid "" "Block Device Mapping is Invalid: You specified more local devices than the " "limit allows" msgstr "" "La correlación de dispositivo de bloque es inválida: Ha especificado más " "dispositivos locales que el límite permitido" #, python-format msgid "Block Device Mapping is Invalid: failed to get image %(id)s." msgstr "" "La correlación de dispositivo de bloque es inválida: no ha sido posible " "obtener la imagen %(id)s." #, python-format msgid "Block Device Mapping is Invalid: failed to get snapshot %(id)s." msgstr "" "La correlación de dispositivo de bloque no es válida: no se ha podido " "obtener la instantánea %(id)s." #, python-format msgid "Block Device Mapping is Invalid: failed to get volume %(id)s." msgstr "" "La correlación de dispositivo de bloque no es válida: no se ha podido " "obtener el volumen %(id)s." msgid "Block migration can not be used with shared storage." msgstr "" "No se puede utilizar la migración de bloque con almacenamiento compartido. " msgid "Boot index is invalid." msgstr "El índice de arranque es válido." #, python-format msgid "Build of instance %(instance_uuid)s aborted: %(reason)s" msgstr "Construcción de instancia %(instance_uuid)s abortada: %(reason)s" #, python-format msgid "Build of instance %(instance_uuid)s was re-scheduled: %(reason)s" msgstr "Construcción de instancia %(instance_uuid)s reprogramada: %(reason)s" #, python-format msgid "BuildRequest not found for instance %(uuid)s" msgstr "" "No se ha encontrado la solicitud de compilación (BuildRequest) para la " "instancia %(uuid)s" msgid "CPU and memory allocation must be provided for all NUMA nodes" msgstr "" "La asignación de CPU y memoria debe proporcionarse para todos los nodos NUMA" #, python-format msgid "" "CPU doesn't have compatibility.\n" "\n" "%(ret)s\n" "\n" "Refer to %(u)s" msgstr "" "CPU no tiene compatibilidad.\n" " \n" "%(ret)s\n" "\n" "Consulte %(u)s" #, python-format msgid "CPU number %(cpunum)d is assigned to two nodes" msgstr "El numero de CPU %(cpunum)d esta asignado a dos nodos" #, python-format msgid "CPU number %(cpunum)d is larger than max %(cpumax)d" msgstr "El numero de CPU %(cpunum)d es mas largo que le máximo %(cpumax)d" #, python-format msgid "CPU number %(cpuset)s is not assigned to any node" msgstr "El numero de CPU %(cpuset)s no esta asignado a ningún nodo" msgid "Can not add access to a public flavor." msgstr "No se puede añadir acceso al sabor público." msgid "Can not find requested image" msgstr "No se puede encontrar la imagen solicitada " #, python-format msgid "Can not handle authentication request for %d credentials" msgstr "" "No se puede manejar la solicitud de autenticación para las credenciales %d" msgid "Can't retrieve root device path from instance libvirt configuration" msgstr "" "No se puede recuperar la vía de acceso ed dispositivo raíz de la " "configuración de libvirt de instancia" #, python-format msgid "" "Cannot '%(action)s' instance %(server_id)s while it is in %(attr)s %(state)s" msgstr "" "No se puede '%(action)s' instancia %(server_id)s mientras está en %(attr)s " "%(state)s" #, python-format msgid "Cannot add host to aggregate %(aggregate_id)s. Reason: %(reason)s." msgstr "" "No se puede añadir host al agregado %(aggregate_id)s. Razón: %(reason)s." msgid "Cannot attach one or more volumes to multiple instances" msgstr "No se pueden conectar uno o más volúmenes a varias instancias" #, python-format msgid "Cannot call %(method)s on orphaned %(objtype)s object" msgstr "No se puede ejecutar %(method)s en un objecto huérfano %(objtype)s" #, python-format msgid "" "Cannot determine the parent storage pool for %s; cannot determine where to " "store images" msgstr "" "No se puede determinar la agrupación de almacenamiento padre para %s; no se " "puede determinar dónde se deben almacenar las imágenes" msgid "Cannot find image for rebuild" msgstr "No se puede encontrar la imagen para reconstrucción " #, python-format msgid "Cannot remove host %(host)s in aggregate %(id)s" msgstr "No se puede eliminar el host %(host)s en el agregado %(id)s" #, python-format msgid "Cannot remove host from aggregate %(aggregate_id)s. Reason: %(reason)s." msgstr "" "No se puede remover el host del agregado %(aggregate_id)s. Razón: %(reason)s." msgid "Cannot rescue a volume-backed instance" msgstr "No se puede rescatar una instancia de volume-backed" msgid "" "Cannot set cpu thread pinning policy in a non dedicated cpu pinning policy" msgstr "" "No se puede definir una política de anclaje de hebras de CPU en una política " "de anclaje de CPU no dedicada" msgid "Cannot set realtime policy in a non dedicated cpu pinning policy" msgstr "" "No se puede definir una política en tiempo real en una política de anclaje " "de CPU no dedicada." #, python-format msgid "Cannot update aggregate %(aggregate_id)s. Reason: %(reason)s." msgstr "No se puede actualizar agregado %(aggregate_id)s. Razón: %(reason)s." #, python-format msgid "" "Cannot update metadata of aggregate %(aggregate_id)s. Reason: %(reason)s." msgstr "" "No se puede actualizar metadatos de agregado %(aggregate_id)s. Razón: " "%(reason)s." #, python-format msgid "Cell %(uuid)s has no mapping." msgstr "La celda %(uuid)s no posee mapeo alguno" #, python-format msgid "" "Change would make usage less than 0 for the following resources: %(unders)s" msgstr "" "El cambio produciría un uso inferior a 0 para los recursos siguientes: " "%(unders)s." #, python-format msgid "Class %(class_name)s could not be found: %(exception)s" msgstr "No se ha podido encontrar la clase %(class_name)s: %(exception)s" #, python-format msgid "Compute host %(host)s could not be found." msgstr "No se ha podido encontrar el host de Compute %(host)s." #, python-format msgid "Compute host %s not found." msgstr "No se ha encontrado Compute host %s." #, python-format msgid "Compute service of %(host)s is still in use." msgstr "El servicio Compute de %(host)s todavía se encuentra en uso." #, python-format msgid "Compute service of %(host)s is unavailable at this time." msgstr "El servicio Compute de %(host)s no está disponible en este momento." #, python-format msgid "Config drive format '%(format)s' is not supported." msgstr "El formato de unidad de configuración '%(format)s' no está soportado." #, python-format msgid "" "Config requested an explicit CPU model, but the current libvirt hypervisor " "'%s' does not support selecting CPU models" msgstr "" "La configuración ha solicitado un modelo CPU explícito, pero el hipervisor " "libvirt actual '%s' no soporta la selección de modelos de CPU" #, python-format msgid "" "Conflict updating instance %(instance_uuid)s, but we were unable to " "determine the cause" msgstr "" "Ha ocurrido un conflicto al actualizar la instancia %(instance_uuid)s pero " "no hemos podido establecer la causa." #, python-format msgid "" "Conflict updating instance %(instance_uuid)s. Expected: %(expected)s. " "Actual: %(actual)s" msgstr "" "Ha ocurrido un conflicto al actualizar la instancia %(instance_uuid)s. " "Esperado: %(expected)s. Actualmente: %(actual)s." #, python-format msgid "Connection to cinder host failed: %(reason)s" msgstr "Fallo en la conexión al alojamiento cinder: %(reason)s" #, python-format msgid "Connection to glance host %(server)s failed: %(reason)s" msgstr "La conexión con el host glance %(server)s ha fallado: %(reason)s" #, python-format msgid "Connection to libvirt lost: %s" msgstr "Conexión hacia libvirt perdida: %s" #, python-format msgid "" "Console log output could not be retrieved for instance %(instance_id)s. " "Reason: %(reason)s" msgstr "" "No se ha podido recuperar la salida del registro de la consola para la " "instancia %(instance_id)s. Motivo: %(reason)s" msgid "Constraint not met." msgstr "Restricción no cumplida." #, python-format msgid "Converted to raw, but format is now %s" msgstr "Convertido a sin formato, pero el formato es ahora %s" #, python-format msgid "Could not attach image to loopback: %s" msgstr "No se puede unir la imagen con el loopback: %s" #, python-format msgid "Could not fetch image %(image_id)s" msgstr "No se ha podido captar la imagen %(image_id)s" #, python-format msgid "Could not find a handler for %(driver_type)s volume." msgstr "" "No se ha podido encontrar un manejador para el volumen %(driver_type)s." #, python-format msgid "Could not find binary %(binary)s on host %(host)s." msgstr "No se ha podido encontrar el binario %(binary)s en el host %(host)s." #, python-format msgid "Could not find config at %(path)s" msgstr "No se ha podido encontrar configuración en %(path)s" msgid "Could not find the datastore reference(s) which the VM uses." msgstr "" "No se ha podido encontrar la(s) referencia(s) de almacén de datos que la MV " "utiliza." #, python-format msgid "Could not load line %(line)s, got error %(error)s" msgstr "" "No se puede cargar la linea %(line)s, se ha obtenido el error %(error)s" #, python-format msgid "Could not load paste app '%(name)s' from %(path)s" msgstr "No se ha podido cargar aplicación de pegar '%(name)s' desde %(path)s " #, python-format msgid "" "Could not mount vfat config drive. %(operation)s failed. Error: %(error)s" msgstr "" "No se ha podido montar la unidad de configuración vfat. %(operation)s ha " "fallado. Error: %(error)s" #, python-format msgid "Could not upload image %(image_id)s" msgstr "No se ha podido cargar la imagen %(image_id)s" msgid "Creation of virtual interface with unique mac address failed" msgstr "La creación de la interfaz virtual con dirección MAC única ha fallado" #, python-format msgid "Datastore regex %s did not match any datastores" msgstr "" "El valor regex %s del almacén de datos no concuerda con algún almacén de " "datos." msgid "Datetime is in invalid format" msgstr "El formato de fecha no es válido" msgid "Default PBM policy is required if PBM is enabled." msgstr "Se requiere una política PBM por defecto si se habilita PBM." #, python-format msgid "Device '%(device)s' not found." msgstr "No se ha encontrado el disposisitvo'%(device)s'." msgid "Device name contains spaces." msgstr "El nombre del dispositivo contiene espacios." msgid "Device name empty or too long." msgstr "El nombre del dispositivo está vacío o es demasiado largo." #, python-format msgid "Device type mismatch for alias '%s'" msgstr "Discrepancia de tipo de dispositivo para el alias '%s'" #, python-format msgid "Disk format %(disk_format)s is not acceptable" msgstr "Formato de disco %(disk_format)s no es aceptable" #, python-format msgid "Disk info file is invalid: %(reason)s" msgstr "El archivo de información de disco es inválido: %(reason)s" #, python-format msgid "Driver Error: %s" msgstr "Error de dispositivo: %s" #, python-format msgid "Error attempting to run %(method)s" msgstr "Error al intentar ejecutar %(method)s" #, python-format msgid "" "Error destroying the instance on node %(node)s. Provision state still " "'%(state)s'." msgstr "" "Error al destruir la instancia en nodo %(node)s. El estado de provisión aún " "es '%(state)s'." #, python-format msgid "Error during unshelve instance %(instance_id)s: %(reason)s" msgstr "" "Error durante la extracción de la instancia %(instance_id)s: %(reason)s" #, python-format msgid "" "Error from libvirt while getting domain info for %(instance_name)s: [Error " "Code %(error_code)s] %(ex)s" msgstr "" "Error de libvirt al obtener la información de dominio para " "%(instance_name)s: [Código de error %(error_code)s] %(ex)s" #, python-format msgid "" "Error from libvirt while looking up %(instance_name)s: [Error Code " "%(error_code)s] %(ex)s" msgstr "" "Error de libvirt al buscar %(instance_name)s: [Código de error " "%(error_code)s] %(ex)s" #, python-format msgid "" "Error from libvirt while quiescing %(instance_name)s: [Error Code " "%(error_code)s] %(ex)s" msgstr "" "Error de libvirt durante el modo inactivo %(instance_name)s: [Código de " "Error%(error_code)s] %(ex)s" #, python-format msgid "" "Error from libvirt while set password for username \"%(user)s\": [Error Code " "%(error_code)s] %(ex)s" msgstr "" "Error de libvert al establecer la contraseña para el usuario \"%(user)s\": " "[Código de error %(error_code)s] %(ex)s" #, python-format msgid "" "Error mounting %(device)s to %(dir)s in image %(image)s with libguestfs " "(%(e)s)" msgstr "" "Error al montar %(device)s a %(dir)s en imagen %(image)s con libguestfs " "(%(e)s)" #, python-format msgid "Error mounting %(image)s with libguestfs (%(e)s)" msgstr "Error al montar %(image)s con libguestfs (%(e)s)" #, python-format msgid "Error when creating resource monitor: %(monitor)s" msgstr "Error al crear monitor de recursos: %(monitor)s" #, python-format msgid "Event %(event)s not found for action id %(action_id)s" msgstr "" "No se ha encontrado el suceso %(event)s para el id de acción %(action_id)s" msgid "Event must be an instance of nova.virt.event.Event" msgstr "El suceso debe ser una instancia de un nova.virt.event.Event" #, python-format msgid "" "Exceeded max scheduling attempts %(max_attempts)d for instance " "%(instance_uuid)s. Last exception: %(exc_reason)s" msgstr "" "Se ha superado el número máximo de intentos de planificación " "%(max_attempts)d para la instancia %(instance_uuid)s. Última excepción: " "%(exc_reason)s" #, python-format msgid "" "Exceeded max scheduling retries %(max_retries)d for instance " "%(instance_uuid)s during live migration" msgstr "" "Se han excedido los intentos máximos de programación %(max_retries)d para la " "instancia %(instance_uuid)s durante la migración en vivo" #, python-format msgid "Exceeded maximum number of retries. %(reason)s" msgstr "Se ha excedido el número máximo de intentos. %(reason)s" #, python-format msgid "Expected a uuid but received %(uuid)s." msgstr "Se esperaba un uuid pero se ha recibido %(uuid)s." msgid "Extracting vmdk from OVA failed." msgstr "Error al extraer vmdk de OVA." #, python-format msgid "Failed to access port %(port_id)s: %(reason)s" msgstr "Error al acceder a puerto %(port_id)s: %(reason)s" #, python-format msgid "Failed to allocate the network(s) with error %s, not rescheduling." msgstr "No fue posible asignar red(es) con error %s, no se reprogramará." msgid "Failed to allocate the network(s), not rescheduling." msgstr "Fallo al asociar la(s) red(es), no se reprogramará." #, python-format msgid "Failed to attach network adapter device to %(instance_uuid)s" msgstr "Error al conectar el dispositivo adaptador de red a %(instance_uuid)s" #, python-format msgid "Failed to deploy instance: %(reason)s" msgstr "Fallo al desplegar instancia: %(reason)s" #, python-format msgid "Failed to detach PCI device %(dev)s: %(reason)s" msgstr "Fallo al desasociar el dispositivo PCI %(dev)s: %(reason)s" #, python-format msgid "Failed to detach network adapter device from %(instance_uuid)s" msgstr "" "Error al desconectar el dispositivo adaptador de red desde %(instance_uuid)s" #, python-format msgid "Failed to encrypt text: %(reason)s" msgstr "No se ha podido cifrar el texto: %(reason)s" #, python-format msgid "Failed to launch instances: %(reason)s" msgstr "Fallo al ejecutar instancias: %(reason)s" #, python-format msgid "Failed to map partitions: %s" msgstr "No se han podido correlacionar particiones: %s" #, python-format msgid "Failed to mount filesystem: %s" msgstr "Fallo al montar el sistema de ficheros: %s" #, python-format msgid "Failed to power off instance: %(reason)s" msgstr "Fallo al apagar la instancia: %(reason)s" #, python-format msgid "Failed to power on instance: %(reason)s" msgstr "Fallo al arrancar la instancia: %(reason)s" #, python-format msgid "Failed to provision instance %(inst)s: %(reason)s" msgstr "Fallo al proporcionar instancia la instancia %(inst)s: %(reason)s" #, python-format msgid "Failed to read or write disk info file: %(reason)s" msgstr "" "Fallo al leer o escribir el archivo de información de disco: %(reason)s" #, python-format msgid "Failed to reboot instance: %(reason)s" msgstr "Fallo al reiniciar la instancia: %(reason)s" #, python-format msgid "Failed to remove volume(s): (%(reason)s)" msgstr "Fallo al remover el(los) volumen(es): (%(reason)s)" #, python-format msgid "Failed to request Ironic to rebuild instance %(inst)s: %(reason)s" msgstr "" "Error al solicitar Ironic para reconstruir instancia %(inst)s: %(reason)s" #, python-format msgid "Failed to resume instance: %(reason)s" msgstr "Fallo al resumir la instancia: %(reason)s" #, python-format msgid "Failed to run qemu-img info on %(path)s : %(error)s" msgstr "Error al ejecutar run qemu-img. Información en %(path)s : %(error)s" #, python-format msgid "Failed to set admin password on %(instance)s because %(reason)s" msgstr "" "No se ha podido establecer la contraseña de administrador en %(instance)s " "debido a %(reason)s" #, python-format msgid "Failed to suspend instance: %(reason)s" msgstr "Fallo al suspender instancia: %(reason)s" #, python-format msgid "Failed to terminate instance: %(reason)s" msgstr "Fallo al terminar la instancia: %(reason)s" msgid "Failure prepping block device." msgstr "Fallo al preparar el dispositivo de bloque." #, python-format msgid "File %(file_path)s could not be found." msgstr "No se ha podido encontrar el archivo %(file_path)s." #, python-format msgid "Fixed IP %(ip)s is not a valid ip address for network %(network_id)s." msgstr "" "La IP fija %(ip)s no es una direccion IP valida para la red %(network_id)s." #, python-format msgid "Fixed IP %s is already in use." msgstr "IP fija %s ya está en uso." #, python-format msgid "" "Fixed IP address %(address)s is already in use on instance %(instance_uuid)s." msgstr "" "La dirección IP fija %(address)s ya se está utilizando en la instancia " "%(instance_uuid)s." #, python-format msgid "Fixed IP not found for address %(address)s." msgstr "No se ha encontrado la IP fija de la dirección %(address)s." #, python-format msgid "Flavor %(flavor_id)s could not be found." msgstr "No se ha podido encontrar el tipo %(flavor_id)s." #, python-format msgid "Flavor %(flavor_id)s has no extra specs with key %(extra_specs_key)s." msgstr "" "El tipo %(flavor_id)s no tiene especificaciones adicionales con clave " "%(extra_specs_key)s" #, python-format msgid "Flavor %(flavor_id)s has no extra specs with key %(key)s." msgstr "" "Tipo %(flavor_id)s no tiene especificaciones adicionales con clave %(key)s." #, python-format msgid "" "Flavor %(id)s extra spec cannot be updated or created after %(retries)d " "retries." msgstr "" "No se puede crear o actualizar el tipo %(id)s de especificaciones " "adicionales después de %(retries)d intentos." #, python-format msgid "" "Flavor access already exists for flavor %(flavor_id)s and project " "%(project_id)s combination." msgstr "" "Acceso al tipo ya existe para la combinación del tipo %(flavor_id)s y el " "proyecto %(project_id)s." #, python-format msgid "Flavor access not found for %(flavor_id)s / %(project_id)s combination." msgstr "" "No se ha encontrado el acceso al tipo para la combinación %(flavor_id)s / " "%(project_id)s." msgid "Flavor used by the instance could not be found." msgstr "No se ha podido encontrar el tipo utilizado por la instancia." #, python-format msgid "Flavor with ID %(flavor_id)s already exists." msgstr "El tipo identificado como %(flavor_id)s ya existe." #, python-format msgid "Flavor with name %(flavor_name)s could not be found." msgstr "No se puede encontrar el tipo con nombre %(flavor_name)s." #, python-format msgid "Flavor with name %(name)s already exists." msgstr "El tipo de nombre %(name)s ya existe." #, python-format msgid "" "Flavor's disk is smaller than the minimum size specified in image metadata. " "Flavor disk is %(flavor_size)i bytes, minimum size is %(image_min_disk)i " "bytes." msgstr "" "El disco del sabor es más pequeño que el tamaño mínimo especificado en los " "metadatos del imagen. El disco del sabor es %(flavor_size)i bytes, tamaño " "mínimo es %(image_min_disk)i bytes." #, python-format msgid "" "Flavor's disk is too small for requested image. Flavor disk is " "%(flavor_size)i bytes, image is %(image_size)i bytes." msgstr "" "El disco Flavor es demasiado pequeño para la imagen solicitada. El disco " "Flavor tiene %(flavor_size)i bytes, la imagen tiene %(image_size)i bytes." msgid "Flavor's memory is too small for requested image." msgstr "La memoria del tipo es demasiado pequeña para la imagen solicitada." #, python-format msgid "Floating IP %(address)s association has failed." msgstr "Ha fallado la asociación de IP flotante %(address)s." #, python-format msgid "Floating IP %(address)s is associated." msgstr "La dirección IP flotante %(address)s está asociada." #, python-format msgid "Floating IP %(address)s is not associated with instance %(id)s." msgstr "La IP flotante %(address)s no está asociada a la instancia %(id)s." #, python-format msgid "Floating IP not found for ID %(id)s." msgstr "No se ha encontrado ninguna dirección IP flotante para el ID %(id)s." #, python-format msgid "Floating IP not found for ID %s" msgstr "No se ha encontrado la IP flotante para el IP %s." #, python-format msgid "Floating IP not found for address %(address)s." msgstr "" "No se ha encontrado ninguna dirección IP flotante para la dirección " "%(address)s." msgid "Floating IP pool not found." msgstr "No se ha encontrado el pool de IP flotantes." msgid "" "Forbidden to exceed flavor value of number of serial ports passed in image " "meta." msgstr "" "Se prohíbe exceder el tipo de serie del número de puertos seriales presentes " "en meta imagen." msgid "Found no disk to snapshot." msgstr "No se ha encontrado disco relacionado a instantánea." msgid "Guest does not have a console available." msgstr "El invitado no tiene una consola disponible." #, python-format msgid "Host %(host)s could not be found." msgstr "No se ha podido encontrar el host %(host)s." #, python-format msgid "Host %(host)s is already mapped to cell %(uuid)s" msgstr "El host %(host)s ya está correlacionado con la celda %(uuid)s" #, python-format msgid "Host '%(name)s' is not mapped to any cell" msgstr "Host '%(name)s' no esta mapeado a ninguna celda" msgid "Host aggregate is not empty" msgstr "El agregado de anfitrión no está vacío" msgid "Host does not support guests with NUMA topology set" msgstr "Host no soporta invitados con conjunto de topología NUMA" msgid "Host does not support guests with custom memory page sizes" msgstr "" "Host no soporta invitados con tamaños de página de memoria perzonalizados" msgid "Hypervisor driver does not support post_live_migration_at_source method" msgstr "" "El controlador del hipervisor no soporta método post_live_migration_at_source" #, python-format msgid "Hypervisor virt type '%s' is not valid" msgstr "El tipo virtual de hipervisor '%s' no es válido" #, python-format msgid "Hypervisor virtualization type '%(hv_type)s' is not recognised" msgstr "No se reconoce el tipo de virtualización de hipervisor '%(hv_type)s'" #, python-format msgid "Hypervisor with ID '%s' could not be found." msgstr "El hipervisor con el ID '%s' no se ha podido encontrar. " #, python-format msgid "IP allocation over quota in pool %s." msgstr "La asignación IP excede la capacidad en pool %s." msgid "IP allocation over quota." msgstr "La asignación IP excede la capacidad." #, python-format msgid "Image %(image_id)s could not be found." msgstr "No se ha podido encontrar la imagen %(image_id)s. " #, python-format msgid "Image %(image_id)s is not active." msgstr "La imagen %(image_id)s no está activa." #, python-format msgid "Image %(image_id)s is unacceptable: %(reason)s" msgstr "La imagen %(image_id)s es inaceptable: %(reason)s" msgid "Image disk size greater than requested disk size" msgstr "La imagen de disco es más grande que el tamaño del disco solicitado" msgid "Image is not raw format" msgstr "La imagen no tiene formato original" msgid "Image metadata limit exceeded" msgstr "Se ha superado el límite de metadatos de imágenes" #, python-format msgid "Image model '%(image)s' is not supported" msgstr "No se soporta modelo de imagen '%(image)s'" msgid "Image not found." msgstr "Imagen no encontrada." #, python-format msgid "" "Image property '%(name)s' is not permitted to override NUMA configuration " "set against the flavor" msgstr "" "No se permite que la propiedad de imagen '%(name)s' elimine conjunto de " "configuración NUMA relativo al tipo" msgid "" "Image property 'hw_cpu_policy' is not permitted to override CPU pinning " "policy set against the flavor" msgstr "" "No se permite que la propiedad de imagen 'hw_cpu_policy' elimine conjunto de " "política de anclaje de CPU relativo al tipo" msgid "" "Image property 'hw_cpu_thread_policy' is not permitted to override CPU " "thread pinning policy set against the flavor" msgstr "" "No se permite que la propiedad de imagen 'hw_cpu_thread_policy' sustituya la " "política de anclaje de hebras de CPU definida para este tipo" msgid "Image that the instance was started with could not be found." msgstr "No se ha podido encontrar la imagen con la que se lanzó la instancia." #, python-format msgid "Image's config drive option '%(config_drive)s' is invalid" msgstr "" "Opción del controlador para la configuración de imagen '%(config_drive)s' " "no es válida." msgid "" "Images with destination_type 'volume' need to have a non-zero size specified" msgstr "" "Las imágenes con destination_type 'colume? necesitan tener un tamaño " "especificado diferente a cero" msgid "In ERROR state" msgstr "En estado de ERROR " #, python-format msgid "In states %(vm_state)s/%(task_state)s, not RESIZED/None" msgstr "En los estados %(vm_state)s/%(task_state)s, no REDIMENSIONADO/Ninguno" #, python-format msgid "In-progress live migration %(id)s is not found for server %(uuid)s." msgstr "" "La migración en vivo en curso %(id)s no se encuentra para el servidor " "%(uuid)s." msgid "" "Incompatible settings: ephemeral storage encryption is supported only for " "LVM images." msgstr "" "Configuraciones incompatibles: cifrado de almacenamiento efímero solo es " "soportado por imágenes LVM." #, python-format msgid "Info cache for instance %(instance_uuid)s could not be found." msgstr "" "No se ha podido encontrar la memoria caché de información para la instancia " "%(instance_uuid)s." #, python-format msgid "" "Instance %(instance)s and volume %(vol)s are not in the same " "availability_zone. Instance is in %(ins_zone)s. Volume is in %(vol_zone)s" msgstr "" "Instancia %(instance)s y volumen %(vol)s no están en la misma " "availability_zone. Instancia está en %(ins_zone)s. Volumen está en " "%(vol_zone)s" #, python-format msgid "Instance %(instance)s does not have a port with id %(port)s" msgstr "Instancia %(instance)s no tiene un puerto identificado como %(port)s" #, python-format msgid "Instance %(instance_id)s cannot be rescued: %(reason)s" msgstr "La instancia %(instance_id)s no se puede rescatar: %(reason)s." #, python-format msgid "Instance %(instance_id)s could not be found." msgstr "No se ha podido encontrar la instancia %(instance_id)s." #, python-format msgid "Instance %(instance_id)s has no tag '%(tag)s'" msgstr "Instancia %(instance_id)s no tiene etiqueta '%(tag)s'" #, python-format msgid "Instance %(instance_id)s is not in rescue mode" msgstr "La instancia %(instance_id)s no esta en modo de rescate" #, python-format msgid "Instance %(instance_id)s is not ready" msgstr "La instancia %(instance_id)s no está preparada" #, python-format msgid "Instance %(instance_id)s is not running." msgstr "La instacia %(instance_id)s no se esta ejecutando" #, python-format msgid "Instance %(instance_id)s is unacceptable: %(reason)s" msgstr "La instancia %(instance_id)s no es aceptable: %(reason)s" #, python-format msgid "Instance %(instance_uuid)s does not specify a NUMA topology" msgstr "La instancia %(instance_uuid)s no especifica una topología NUMA" #, python-format msgid "Instance %(instance_uuid)s does not specify a migration context." msgstr "La instancia %(instance_uuid)s no especifica un contexto de migración." #, python-format msgid "" "Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while " "the instance is in this state." msgstr "" "La instancia %(instance_uuid)s está en %(attr)s %(state)s. No se puede " "%(method)s mientras la instancia está en este estado." #, python-format msgid "Instance %(instance_uuid)s is locked" msgstr "La instancia %(instance_uuid)s está bloqueada" #, python-format msgid "" "Instance %(instance_uuid)s requires config drive, but it does not exist." msgstr "" "La instancia %(instance_uuid)s requiere una unidad de configuración, pero no " "existe." #, python-format msgid "Instance %(name)s already exists." msgstr "La instancia %(name)s ya existe." #, python-format msgid "Instance %(server_id)s is in an invalid state for '%(action)s'" msgstr "" "La instancia %(server_id)s se encuentra en un estado no válido para " "'%(action)s'" #, python-format msgid "Instance %(uuid)s has no mapping to a cell." msgstr "Instancia %(uuid)s no tiene mapeo para una celda." #, python-format msgid "Instance %s not found" msgstr "No se ha encontrado la instancia %s" #, python-format msgid "Instance %s provisioning was aborted" msgstr "Se ha abortado la provisión de instancia %s" msgid "Instance could not be found" msgstr "No se ha podido encontrar la instancia" msgid "Instance disk to be encrypted but no context provided" msgstr "Se encriptará disco de instancia ero no se ha proporcionado contexto" msgid "Instance event failed" msgstr "El evento de instancia ha fallado" #, python-format msgid "Instance group %(group_uuid)s already exists." msgstr "El grupo de instancias %(group_uuid)s ya existe." #, python-format msgid "Instance group %(group_uuid)s could not be found." msgstr "No se ha podido encontrar el grupo de instancias %(group_uuid)s." msgid "Instance has no source host" msgstr "La instancia no tiene ningún host de origen" msgid "Instance has not been resized." msgstr "La instancia no se ha redimensionado." #, python-format msgid "Instance hostname %(hostname)s is not a valid DNS name" msgstr "El nombre de host de instancia %(hostname)s no es un nombre DNS válido" msgid "Instance is not a member of specified network" msgstr "La instancia no es miembro de la red especificada" #, python-format msgid "Instance rollback performed due to: %s" msgstr "Reversión de instancia ejecutada debido a: %s" #, python-format msgid "" "Insufficient Space on Volume Group %(vg)s. Only %(free_space)db available, " "but %(size)d bytes required by volume %(lv)s." msgstr "" "Espacio insuficiente en grupo de volumen %(vg)s. Sólo hay %(free_space)db " "disponibles, pero se necesitan %(size)d bytes para el volumen %(lv)s." #, python-format msgid "Insufficient compute resources: %(reason)s." msgstr "Recursos de cómputo insuficientes: %(reason)s." #, python-format msgid "Interface %(interface)s not found." msgstr "No se ha encontrado la interfaz %(interface)s." #, python-format msgid "Invalid Base 64 data for file %(path)s" msgstr "Datos Base-64 inválidos para el archivo %(path)s" msgid "Invalid Connection Info" msgstr "Información de conexión no válida" #, python-format msgid "Invalid ID received %(id)s." msgstr "Se ha recibido el ID %(id)s no válido." #, python-format msgid "Invalid IP format %s" msgstr "Formato IP inválido %s" #, python-format msgid "Invalid IP protocol %(protocol)s." msgstr "Protocolo IP invalido %(protocol)s" msgid "" "Invalid PCI Whitelist: The PCI whitelist can specify devname or address, but " "not both" msgstr "" "Lista blanca de PCI no válida: La lista blanca de PCI puede especificar un " "devname o una dirección, pero no ambas" #, python-format msgid "Invalid PCI alias definition: %(reason)s" msgstr "Definición de alias PCI inválido: %(reason)s" #, python-format msgid "Invalid Regular Expression %s" msgstr "La expresión regular %s es inválida" #, python-format msgid "Invalid characters in hostname '%(hostname)s'" msgstr "Caracteres invalidos en el nombre del host '%(hostname)s'" msgid "Invalid config_drive provided." msgstr "La config_drive proporcionada es inválida." #, python-format msgid "Invalid config_drive_format \"%s\"" msgstr "config_drive_format \"%s\" no válido" #, python-format msgid "Invalid console type %(console_type)s" msgstr "Tipo de consola %(console_type)s no válido " #, python-format msgid "Invalid content type %(content_type)s." msgstr "Tipo de contenido invalido %(content_type)s." #, python-format msgid "Invalid datetime string: %(reason)s" msgstr "Cadena date time invalida: %(reason)s" msgid "Invalid device UUID." msgstr "Dispositivo UUID invalido." #, python-format msgid "Invalid entry: '%s'" msgstr "Entrada no válida: '%s' " #, python-format msgid "Invalid entry: '%s'; Expecting dict" msgstr "Entrada no válida: '%s'; Esperando dict" #, python-format msgid "Invalid entry: '%s'; Expecting list or dict" msgstr "Entrada no válida: '%s'; esperando lista o dict" #, python-format msgid "Invalid exclusion expression %r" msgstr "Expresión de exclusión inválida %r" #, python-format msgid "Invalid image format '%(format)s'" msgstr "formato de imagen no válido '%(format)s' " #, python-format msgid "Invalid image href %(image_href)s." msgstr "href de imagen %(image_href)s no válida." #, python-format msgid "Invalid inclusion expression %r" msgstr "Expresión de inclusión inválida %r" #, python-format msgid "" "Invalid input for field/attribute %(path)s. Value: %(value)s. %(message)s" msgstr "" "Conenido inválido para el campo/atributo %(path)s. Valor: %(value)s. " "%(message)s" #, python-format msgid "Invalid input received: %(reason)s" msgstr "Entrada inválida recibida: %(reason)s" msgid "Invalid instance image." msgstr "Imagen de instancia no válida." #, python-format msgid "Invalid is_public filter [%s]" msgstr "Filtro is_public no válido [%s]" msgid "Invalid key_name provided." msgstr "Se ha proporcionado un nombre de clave no válido." #, python-format msgid "Invalid memory page size '%(pagesize)s'" msgstr "Tamaño de página de memoria no válido '%(pagesize)s'" msgid "Invalid metadata key" msgstr "Clave de metadatos no válida" #, python-format msgid "Invalid metadata size: %(reason)s" msgstr "Tamaño de metadatos inválido: %(reason)s" #, python-format msgid "Invalid metadata: %(reason)s" msgstr "Metadatos inválidos: %(reason)s" #, python-format msgid "Invalid minDisk filter [%s]" msgstr "Filtro minDisk no válido [%s]" #, python-format msgid "Invalid minRam filter [%s]" msgstr "Filtro minRam no válido [%s]" #, python-format msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s" msgstr "Rango de puertos invalido %(from_port)s:%(to_port)s. %(msg)s" msgid "Invalid proxy request signature." msgstr "Firma de solicitud de proxy no válida." #, python-format msgid "Invalid range expression %r" msgstr "Expresión de intérvalo inválida %r" msgid "Invalid service catalog json." msgstr "JSON de catálogo de servicios no válido." msgid "Invalid start time. The start time cannot occur after the end time." msgstr "" "Hora de inicio no válida. La hora de inicio no pude tener lugar después de " "la hora de finalización." msgid "Invalid state of instance files on shared storage" msgstr "Estado no válido de archivos de instancia en almacenamiento compartido" #, python-format msgid "Invalid timestamp for date %s" msgstr "Indicación de fecha y hora no válida para la fecha %s" #, python-format msgid "Invalid usage_type: %s" msgstr "usage_type: %s no válido" #, python-format msgid "Invalid value for Config Drive option: %(option)s" msgstr "" "Valor inválido para la opción de configuración de controlador: %(option)s" #, python-format msgid "Invalid virtual interface address %s in request" msgstr "Dirección de interfaz virtual inválida %s en la solicitud" #, python-format msgid "Invalid volume access mode: %(access_mode)s" msgstr "Modo de acceso al volumen invalido: %(access_mode)s" #, python-format msgid "Invalid volume: %(reason)s" msgstr "Volumen inválido: %(reason)s" msgid "Invalid volume_size." msgstr "volume_size invalido." #, python-format msgid "Ironic node uuid not supplied to driver for instance %s." msgstr "" "No se ha proporcionado nodo uuid Ironic para controlador de instancia %s." #, python-format msgid "" "It is not allowed to create an interface on external network %(network_uuid)s" msgstr "" "No está permitido crear una interfaz en una red externa %(network_uuid)s" msgid "" "Key Names can only contain alphanumeric characters, periods, dashes, " "underscores, colons and spaces." msgstr "" "Los nombres de las claves solo pueden contener caracteres alfanuméricos, " "punto, guión, guión bajo, dos puntos y espacios." #, python-format msgid "Key manager error: %(reason)s" msgstr "error de administrador de claves: %(reason)s" #, python-format msgid "Key pair '%(key_name)s' already exists." msgstr "El par de claves '%(key_name)s' ya existe." #, python-format msgid "Keypair %(name)s not found for user %(user_id)s" msgstr "" "No se ha encontrado el par de claves %(name)s para el usuario %(user_id)s" #, python-format msgid "Keypair data is invalid: %(reason)s" msgstr "El conjunto de claves son inválidos: %(reason)s" msgid "Limits only supported from vCenter 6.0 and above" msgstr "Sólo se admiten límites a partir de vCenter 6.0 " #, python-format msgid "Live migration %(id)s for server %(uuid)s is not in progress." msgstr "" "La migración en vivo %(id)s para el servidor %(uuid)s no está en curso." #, python-format msgid "Malformed message body: %(reason)s" msgstr "Cuerpo de mensaje con formato incorrecto: %(reason)s" #, python-format msgid "" "Malformed request URL: URL's project_id '%(project_id)s' doesn't match " "Context's project_id '%(context_project_id)s'" msgstr "" "Solicitud URL incorrecta: el project_id de la URL '%(project_id)s' no " "corresponde con el project_id del Contexto '%(context_project_id)s'" msgid "Malformed request body" msgstr "Cuerpo de solicitud incorrecto" msgid "Mapping image to local is not supported." msgstr "No se soporta el mapeo de imagen a local." #, python-format msgid "Marker %(marker)s could not be found." msgstr "No se ha podido encontrar el marcador %(marker)s." msgid "Maximum number of floating IPs exceeded" msgstr "Se ha superado el número máximo de IP flotantes" #, python-format msgid "Maximum number of metadata items exceeds %(allowed)d" msgstr "El número máximo de elementos de metadatos supera %(allowed)d" msgid "Maximum number of ports exceeded" msgstr "El número máximo de puertos ha sido excedido." msgid "Maximum number of security groups or rules exceeded" msgstr "Se ha superado el número máximo de grupos o reglas de seguridad" msgid "Metadata item was not found" msgstr "No se ha encontrado el elemento metadatos" msgid "Metadata property key greater than 255 characters" msgstr "Clave de propiedad metadatos de más de 255 caracteres " msgid "Metadata property value greater than 255 characters" msgstr "Valor de propiedad de metadatos de más de 255 caracteres " msgid "Metadata type should be dict." msgstr "El tipo de metadato debería ser dict." #, python-format msgid "" "Metric %(name)s could not be found on the compute host node %(host)s." "%(node)s." msgstr "" "La métrica %(name)s no se puede encontrar en el nodo de cómputo anfitrión " "%(host)s:%(node)s." #, python-format msgid "Migration %(id)s for server %(uuid)s is not live-migration." msgstr "" "La migración %(id)s para el servidor %(uuid)s no es una migración en vivo." #, python-format msgid "Migration %(migration_id)s could not be found." msgstr "No se ha podido encontrar la migración %(migration_id)s." #, python-format msgid "Migration %(migration_id)s not found for instance %(instance_id)s" msgstr "" "No se ha encontrado la migración %(migration_id)s para la instancia " "%(instance_id)s" #, python-format msgid "" "Migration %(migration_id)s state of instance %(instance_uuid)s is %(state)s. " "Cannot %(method)s while the migration is in this state." msgstr "" "El estado de la migración %(migration_id)s de la instancia " "%(instance_uuid)s es %(state)s. No se puede %(method)s mientras la instancia " "está en este estado." #, python-format msgid "Migration error: %(reason)s" msgstr "Error en migración: %(reason)s" msgid "Migration is not supported for LVM backed instances" msgstr "No se soporta la migración de instancias LVM respaldadas" #, python-format msgid "" "Migration not found for instance %(instance_id)s with status %(status)s." msgstr "" "No se ha encontrado la migración para la instancia %(instance_id)s con el " "estado %(status)s." #, python-format msgid "Migration pre-check error: %(reason)s" msgstr "Error de pre-verificación de migraión: %(reason)s" #, python-format msgid "Migration select destinations error: %(reason)s" msgstr "Error de selección de destinos de migración: %(reason)s" #, python-format msgid "Missing arguments: %s" msgstr "Faltan argumentos: %s" msgid "Missing device UUID." msgstr "Dispositivo UUID perdido." msgid "Missing disabled reason field" msgstr "Campo disabled reason omitido." msgid "Missing forced_down field" msgstr "Campo forced_down no presente." msgid "Missing imageRef attribute" msgstr "Falta el atributo imageRef" #, python-format msgid "Missing keys: %s" msgstr "Faltan claves: %s" msgid "Missing parameter dict" msgstr "Falta el parámetro dict " #, python-format msgid "" "More than one instance is associated with fixed IP address '%(address)s'." msgstr "" "Hay más de una instancia asociada con la dirección IP fija '%(address)s'." msgid "" "More than one possible network found. Specify network ID(s) to select which " "one(s) to connect to." msgstr "" "Se ha encontrado más de una red posible. Especifique ID(s) de la red para " "seleccionar a cuál(es) conectarse." msgid "More than one swap drive requested." msgstr "Más de un controlador de intercambio ha sido solicitado." #, python-format msgid "Multi-boot operating system found in %s" msgstr "Se ha encontrado sistema operativo multiarranque en %s" msgid "Multiple X-Instance-ID headers found within request." msgstr "" "Se han encontrado varias cabeceas de ID de instancia X en la solicitud." msgid "Multiple X-Tenant-ID headers found within request." msgstr "Se han encontrado múltiples cabeceras X-Tenant-ID en la solicitud." #, python-format msgid "Multiple floating IP pools matches found for name '%s'" msgstr "" "Se han encontrado varias coincidencias de agrupaciones de IP flotante para " "el nombre '%s' " #, python-format msgid "Multiple floating IPs are found for address %(address)s." msgstr "Se han encontrado varias IP flotantes para la dirección %(address)s." msgid "" "Multiple hosts may be managed by the VMWare vCenter driver; therefore we do " "not return uptime for just one host." msgstr "" "Múltiples anfitrionespueden ser administrados por el controlador de vCenter " "de VMware; por lo tanto no se puede regresar tiempo de ejecución solamente " "para un huésped." msgid "Multiple possible networks found, use a Network ID to be more specific." msgstr "" "Se han encontrado múltiples redes posibles, usa un identificador de red para " "ser más específico." #, python-format msgid "" "Multiple security groups found matching '%s'. Use an ID to be more specific." msgstr "" "Se han encontrado varios grupos de seguridad que coinciden con '%s'. Utilice " "un ID para ser más específico." msgid "Must input network_id when request IP address" msgstr "Se debe ingresar a network_id cuando se solicite dirección IP" msgid "Must not input both network_id and port_id" msgstr "No se debe ingresar ni a network_id ni a port_id" msgid "" "Must specify host_ip, host_username and host_password to use vmwareapi." "VMwareVCDriver" msgstr "" "Debe especificar host_ip, host_username y host_password para usar vmwareapi." "VMwareVCDriver" msgid "Must supply a positive value for max_number" msgstr "Debe indicar un valor positivo para max_number" msgid "Must supply a positive value for max_rows" msgstr "Se debe proporcionar un valor positivo para max_rows" #, python-format msgid "Network %(network_id)s could not be found." msgstr "No se ha podido encontrar la red %(network_id)s." #, python-format msgid "" "Network %(network_uuid)s requires a subnet in order to boot instances on." msgstr "" "La red %(network_uuid)s requiere una subred para poder arrancar instancias." #, python-format msgid "Network could not be found for bridge %(bridge)s" msgstr "No se ha podido encontrar la red para el puente %(bridge)s" #, python-format msgid "Network could not be found for instance %(instance_id)s." msgstr "No se ha podido encontrar la red para la instancia %(instance_id)s." msgid "Network not found" msgstr "No se ha encontrado la red" msgid "" "Network requires port_security_enabled and subnet associated in order to " "apply security groups." msgstr "" "La red requiere port_security_enabled y una subred asociada para aplicar " "grupos de seguridad." msgid "New volume must be detached in order to swap." msgstr "" "El nuevo volumen debe ser desasociado para poder activar la memoria de " "intercambio." msgid "New volume must be the same size or larger." msgstr "El nuevo volumen debe ser del mismo o de mayor tamaño." #, python-format msgid "No Block Device Mapping with id %(id)s." msgstr "No hay mapeo de dispositivo de bloque identificado como %(id)s." msgid "No Unique Match Found." msgstr "No se ha encontrado una sola coincidencia." msgid "No compute host specified" msgstr "No se ha especificado ningún host de cálculo" #, python-format msgid "No configuration information found for operating system %(os_name)s" msgstr "" "No se ha encontrado ninguna información de configuración para el sistema " "operativo %(os_name)s" #, python-format msgid "No device with MAC address %s exists on the VM" msgstr "No existe dispositivo con dirección MAC %s en la VM" #, python-format msgid "No device with interface-id %s exists on VM" msgstr "No existe dispositivo con identificación de interfaz %s en VM" #, python-format msgid "No disk at %(location)s" msgstr "No hay ningún disco en %(location)s" #, python-format msgid "No fixed IP addresses available for network: %(net)s" msgstr "No hay dirección IP fija disponibles para red: %(net)s. " msgid "No fixed IPs associated to instance" msgstr "No hay IP fijas asociadas a la instancia " msgid "No free nbd devices" msgstr "No hay dispositivos nbd libres" msgid "No host available on cluster" msgstr "No hay anfitrión disponible en cluster." msgid "No hosts found to map to cell, exiting." msgstr "" "No se ha encontrado ningún host para correlacionar con la celda, saliendo." #, python-format msgid "No hypervisor matching '%s' could be found." msgstr "No es ha podido encontrar ningún hipervisor que coincida con '%s'. " msgid "No image locations are accessible" msgstr "No hay ubicaciones de imagen accesibles" #, python-format msgid "" "No live migration URI configured and no default available for " "\"%(virt_type)s\" hypervisor virtualization type." msgstr "" "No se ha configurado ninguna URI de migración en vivo ni hay ninguna " "predeterminada disponible para el tipo de virtualización de hipervisor " "\"%(virt_type)s\"." msgid "No more floating IPs available." msgstr "No hay más IP flotantes disponibles." #, python-format msgid "No more floating IPs in pool %s." msgstr "No hay más IP flotantes en la agrupación %s." #, python-format msgid "No mount points found in %(root)s of %(image)s" msgstr "No se han encontrado puntos de montaje en %(root)s de %(image)s" #, python-format msgid "No operating system found in %s" msgstr "No se ha encontrado ningún sistema operativo en %s" msgid "No root disk defined." msgstr "No se ha definido un disco raíz." #, python-format msgid "" "No specific network was requested and none are available for project " "'%(project_id)s'." msgstr "" "No se ha solicitado ninguna red específica y no hay ninguna disponible para " "el proyecto '%(project_id)s'." msgid "No valid host found for cold migrate" msgstr "No se ha encontrado anfitrión para migración en frío" msgid "No valid host found for resize" msgstr "No se ha encontrado un host válido para redimensionamiento" #, python-format msgid "No valid host was found. %(reason)s" msgstr "No se ha encontrado ningún host válido. %(reason)s" #, python-format msgid "No volume Block Device Mapping at path: %(path)s" msgstr "No hay mapeo de volumen de dispositivo de bloque en ruta: %(path)s" #, python-format msgid "No volume Block Device Mapping with id %(volume_id)s." msgstr "" "No hay volumen de Block Device Mapping con identificador %(volume_id)s." #, python-format msgid "Node %s could not be found." msgstr "No se puede encontrar nodo %s." #, python-format msgid "Not able to acquire a free port for %(host)s" msgstr "No se puede obtener un puerto libre para %(host)s" #, python-format msgid "Not able to bind %(host)s:%(port)d, %(error)s" msgstr "No se puede enlazar %(host)s:%(port)d, %(error)s" #, python-format msgid "" "Not all Virtual Functions of PF %(compute_node_id)s:%(address)s are free." msgstr "" "No todas las Funciones Virtuales de la PF %(compute_node_id)s:%(address)s " "son gratuitas." msgid "Not an rbd snapshot" msgstr "No es una instantánea rbd" #, python-format msgid "Not authorized for image %(image_id)s." msgstr "No está autorizado para la imagen %(image_id)s." msgid "Not authorized." msgstr "No Autorizado" msgid "Not enough parameters to build a valid rule." msgstr "No hay suficientes parámetros para crear una regla válida." msgid "Not stored in rbd" msgstr "No está almacenado en rbd" msgid "Nothing was archived." msgstr "No se ha archivado nada." #, python-format msgid "Nova requires libvirt version %s or greater." msgstr "Nova requiere versión libvirt %s o mayor." msgid "Number of Rows Archived" msgstr "Número de filas archivado" #, python-format msgid "Object action %(action)s failed because: %(reason)s" msgstr "La acción objeto %(action)s falló debido a: %(reason)s" msgid "Old volume is attached to a different instance." msgstr "Volumen antigüo está ligado a una instancia diferente." #, python-format msgid "One or more hosts already in availability zone(s) %s" msgstr "Uno o más hosts ya se encuentran en zona(s) de disponibilidad %s" msgid "Only administrators may list deleted instances" msgstr "Sólo los administradores pueden listar instancias suprimidas " msgid "Origin header does not match this host." msgstr "Cabecera de origen no coincide con este host." msgid "Origin header not valid." msgstr "Cabecera de origen no válida" msgid "Origin header protocol does not match this host." msgstr "Protocolo de cabecera de origen no coincide con este host." #, python-format msgid "PCI Device %(node_id)s:%(address)s not found." msgstr "Dispositivo PCI %(node_id)s:%(address)s no encontrado." #, python-format msgid "PCI alias %(alias)s is not defined" msgstr "Alias PCI %(alias)s no definido" #, python-format msgid "" "PCI device %(compute_node_id)s:%(address)s is %(status)s instead of " "%(hopestatus)s" msgstr "" "el dispositivo PCI %(compute_node_id)s:%(address)s está %(status)s en lugar " "de %(hopestatus)s" #, python-format msgid "" "PCI device %(compute_node_id)s:%(address)s is owned by %(owner)s instead of " "%(hopeowner)s" msgstr "" "El dueño del dispositivo PCI %(compute_node_id)s:%(address)s es %(owner)s en " "lugar de %(hopeowner)s" #, python-format msgid "PCI device %(id)s not found" msgstr "Dispositivo PCI %(id)s no encontrado" #, python-format msgid "PCI device request %(requests)s failed" msgstr "La solicitud de dispositivo PCI %(requests)s ha fallado" #, python-format msgid "Page size %(pagesize)s forbidden against '%(against)s'" msgstr "El tamaño de página %(pagesize)s no es permitido por '%(against)s'" #, python-format msgid "Page size %(pagesize)s is not supported by the host." msgstr "El host no soporta el tamaño de página %(pagesize)s." #, python-format msgid "" "Parameters %(missing_params)s not present in vif_details for vif %(vif_id)s. " "Check your Neutron configuration to validate that the macvtap parameters are " "correct." msgstr "" "Los parámetros %(missing_params)s no están disponibles en vif_details para " "vif %(vif_id)s. Refiérese a la configuration de Neutron para averificar que " "los parámetros de macvtap son correctos." #, python-format msgid "Path %s must be LVM logical volume" msgstr "La vía de acceso %s debe ser el volumen lógico LVM" msgid "Paused" msgstr "Pausada" msgid "Personality file limit exceeded" msgstr "Se ha superado el límite de archivo de personalidad" #, python-format msgid "" "Physical Function %(compute_node_id)s:%(address)s, related to VF " "%(compute_node_id)s:%(vf_address)s is %(status)s instead of %(hopestatus)s" msgstr "" "La Función Física %(compute_node_id)s:%(address)s, relacionada con la VF " "%(compute_node_id)s:%(vf_address)s tiene el estado %(status)s en lugar de " "%(hopestatus)s" #, python-format msgid "Physical network is missing for network %(network_uuid)s" msgstr "La red física no esta disponible para la red %(network_uuid)s" #, python-format msgid "Policy doesn't allow %(action)s to be performed." msgstr "La política no permite que la %(action)s se realice" #, python-format msgid "Port %(port_id)s is still in use." msgstr "El puerto %(port_id)s todavía se está utilizando." #, python-format msgid "Port %(port_id)s not usable for instance %(instance)s." msgstr "El puerto %(port_id)s no es utilizable para la instancia %(instance)s." #, python-format msgid "" "Port %(port_id)s not usable for instance %(instance)s. Value %(value)s " "assigned to dns_name attribute does not match instance's hostname " "%(hostname)s" msgstr "" "La instancia %(instance)s no puede utilizar el puerto %(port_id)s. El valor " "%(value)s asignado al atributo dns_name no coincide con el nombre de host de " "la instancia %(hostname)s" #, python-format msgid "Port %(port_id)s requires a FixedIP in order to be used." msgstr "El puerto %(port_id)s requiere una FixedIP para poder ser utilizado." #, python-format msgid "Port %s is not attached" msgstr "El puerto %s no se encuentra asignado" #, python-format msgid "Port id %(port_id)s could not be found." msgstr "No se ha podido encontrar el ID de puerto %(port_id)s." #, python-format msgid "Provided video model (%(model)s) is not supported." msgstr "Modelo de vídeo proporcionado (%(model)s) no está sopotado." #, python-format msgid "Provided watchdog action (%(action)s) is not supported." msgstr "La acción watchdog proporcionada (%(action)s) no está soportada." msgid "QEMU guest agent is not enabled" msgstr "Agente invitado QEMU no está habilitado" #, python-format msgid "Quiescing is not supported in instance %(instance_id)s" msgstr "No hay soporte para la desactivación en la instancia %(instance_id)s" #, python-format msgid "Quota class %(class_name)s could not be found." msgstr "No se ha encontrado la clase de cuota %(class_name)s." msgid "Quota could not be found" msgstr "No se ha podido encontrar la cuota" #, python-format msgid "" "Quota exceeded for %(overs)s: Requested %(req)s, but already used %(used)s " "of %(allowed)s %(overs)s" msgstr "" "Se ha superado la cuota para %(overs)s: Solicitado %(req)s, pero ya se " "utiliza %(used)s de %(allowed)s %(overs)s" #, python-format msgid "Quota exceeded for resources: %(overs)s" msgstr "Cuota superada para recursos: %(overs)s" msgid "Quota exceeded, too many key pairs." msgstr "Cuota superada, demasiados pares de claves." msgid "Quota exceeded, too many server groups." msgstr "Capacidad superada, demasiados grupos servidores. " msgid "Quota exceeded, too many servers in group" msgstr "Capacidad excedida, demasiados servidores en grupo" #, python-format msgid "Quota exists for project %(project_id)s, resource %(resource)s" msgstr "Cuota existente para el proyecto %(project_id)s, recurso %(resource)s" #, python-format msgid "Quota for project %(project_id)s could not be found." msgstr "No se ha encontrado la cuota para el proyecto %(project_id)s." #, python-format msgid "" "Quota for user %(user_id)s in project %(project_id)s could not be found." msgstr "" "No se ha encontrado la cuota para el usuario %(user_id)s en el proyecto " "%(project_id)s." #, python-format msgid "" "Quota limit %(limit)s for %(resource)s must be greater than or equal to " "already used and reserved %(minimum)s." msgstr "" "Capacidad límite %(limit)s para %(resource)s debe ser mayor o igual que la " "utilizada o en reserva %(minimum)s." #, python-format msgid "" "Quota limit %(limit)s for %(resource)s must be less than or equal to " "%(maximum)s." msgstr "" "Capacidad límite %(limit)s para %(resource)s debe ser menor o igual que " "%(maximum)s." msgid "Request body and URI mismatch" msgstr "Discrepancia de URI y cuerpo de solicitud" msgid "Request is too large." msgstr "La solicitud es demasiado larga." #, python-format msgid "Request of image %(image_id)s got BadRequest response: %(response)s" msgstr "" "La solicitud de la imagen %(image_id)s ha obtenido una respuesta de " "solicitud incorrecta (BadRequest): %(response)s" #, python-format msgid "RequestSpec not found for instance %(instance_uuid)s" msgstr "" "No se ha encontrado la especificación de solicitud (RequestSpec) para la " "instancia %(instance_uuid)s" msgid "Requested CPU control policy not supported by host" msgstr "El host no da soporte a la política de control de CPU solicitada" #, python-format msgid "" "Requested hardware '%(model)s' is not supported by the '%(virt)s' virt driver" msgstr "" "El hardware solicitado '%(model)s' no está soportado por el controlador de " "virtualización '%(virt)s'" #, python-format msgid "Requested image %(image)s has automatic disk resize disabled." msgstr "" "La imagen solicitada %(image)s tiene desactivada la modificación automática " "de tamaño de disco." msgid "" "Requested instance NUMA topology cannot fit the given host NUMA topology" msgstr "" "La topología de instancia NUMA no es compatible con la topología de host " "NUMA proporcionada" msgid "" "Requested instance NUMA topology together with requested PCI devices cannot " "fit the given host NUMA topology" msgstr "" "La topología de instancia NUMA solicitada junto con los dispositivos PCI " "solicitados no son compatibles con la topología de host NUMA proporcionada" #, python-format msgid "" "Requested vCPU limits %(sockets)d:%(cores)d:%(threads)d are impossible to " "satisfy for vcpus count %(vcpus)d" msgstr "" "Los límites VCPU solicitados %(sockets)d:%(cores)d:%(threads)d son " "imposibles de cumplir para el número de vcpus %(vcpus)d" #, python-format msgid "Rescue device does not exist for instance %s" msgstr "No existe dispositivo de rescate para instancia %s" #, python-format msgid "Resize error: %(reason)s" msgstr "Error de redimensionamiento: %(reason)s" msgid "Resize to zero disk flavor is not allowed." msgstr "No se permite redimensionamiento a tipo cero del disco." msgid "Resource could not be found." msgstr "No se ha podido encontrar el recurso." msgid "Resumed" msgstr "Reanudada" #, python-format msgid "Root element name should be '%(name)s' not '%(tag)s'" msgstr "El nombre de elemento raíz debe ser '%(name)s' no '%(tag)s'" #, python-format msgid "Running batches of %i until complete" msgstr "Ejecutando lotes de %i hasta finalizar" #, python-format msgid "Scheduler Host Filter %(filter_name)s could not be found." msgstr "" "No se ha podido encontrar el filtro de host de planificador %(filter_name)s." #, python-format msgid "Security group %(name)s is not found for project %(project)s" msgstr "" "El grupo de seguridad %(name)s no ha sido encontrado para el proyecto " "%(project)s" #, python-format msgid "" "Security group %(security_group_id)s not found for project %(project_id)s." msgstr "" "No se ha encontrado el grupo de seguridad %(security_group_id)s para el " "proyecto %(project_id)s." #, python-format msgid "Security group %(security_group_id)s not found." msgstr "No se ha encontrado el grupo de seguridad %(security_group_id)s." #, python-format msgid "" "Security group %(security_group_name)s already exists for project " "%(project_id)s." msgstr "" "El grupo de seguridad %(security_group_name)s ya existe para el proyecto " "%(project_id)s" #, python-format msgid "" "Security group %(security_group_name)s not associated with the instance " "%(instance)s" msgstr "" "El grupo de seguridad %(security_group_name)s no está asociado a la " "instancia %(instance)s" msgid "Security group id should be uuid" msgstr "El id de grupo de seguridad debe ser uuid" msgid "Security group name cannot be empty" msgstr "El nombre de grupo de seguridad no puede estar vacío" msgid "Security group not specified" msgstr "Grupo de seguridad no especificado" #, python-format msgid "Server disk was unable to be resized because: %(reason)s" msgstr "El disco del servidor fue incapaz de re-escalarse debido a: %(reason)s" msgid "Server does not exist" msgstr "El servidor no existe" #, python-format msgid "ServerGroup policy is not supported: %(reason)s" msgstr "No se soporta la política ServerGroup: %(reason)s" msgid "ServerGroupAffinityFilter not configured" msgstr "ServerGroupAffinityFilter no configurado" msgid "ServerGroupAntiAffinityFilter not configured" msgstr "ServerGroupAntiAffinityFilter no configurado" msgid "ServerGroupSoftAffinityWeigher not configured" msgstr "No se ha configurado ServerGroupSoftAffinityWeigher" msgid "ServerGroupSoftAntiAffinityWeigher not configured" msgstr "No se ha configurado ServerGroupSoftAntiAffinityWeigher" #, python-format msgid "Service %(service_id)s could not be found." msgstr "No se ha podido encontrar el servicio %(service_id)s." #, python-format msgid "Service %s not found." msgstr "Servicio %s no encontrado." msgid "Service is unavailable at this time." msgstr "El servicio no esta disponible en este momento" #, python-format msgid "Service with host %(host)s binary %(binary)s exists." msgstr "Servicio con host %(host)s binario %(binary)s existe." #, python-format msgid "Service with host %(host)s topic %(topic)s exists." msgstr "Servicio con host %(host)s asunto %(topic)s existe." msgid "Set admin password is not supported" msgstr "No se soporta el establecer de la constraseña del admin" #, python-format msgid "Share '%s' is not supported" msgstr "Compartido %s no está soportado." #, python-format msgid "Share level '%s' cannot have share configured" msgstr "Nivel compartido '%s' no puede tener configurado compartido" #, python-format msgid "Snapshot %(snapshot_id)s could not be found." msgstr "No se ha podido encontrar la instantánea %(snapshot_id)s." msgid "Some required fields are missing" msgstr "Algunos campos obligatorios no están rellenos." #, python-format msgid "" "Something went wrong when deleting a volume snapshot: rebasing a " "%(protocol)s network disk using qemu-img has not been fully tested" msgstr "" "Ha habido algún problema al suprimir una instantánea de volumen: no se ha " "probado completamente el proceso de reorganizar un disco de red de " "%(protocol)s utilizando qemu-img" msgid "Sort direction size exceeds sort key size" msgstr "" "El tamaño de dirección de ordenación excede el tamaño de clave de ordenación" msgid "Sort key supplied was not valid." msgstr "La clave de clasificación proporcionada no es válida. " msgid "Specified fixed address not assigned to instance" msgstr "Dirección fija especificada no asignada a la instancia" msgid "Started" msgstr "Arrancado" msgid "Stopped" msgstr "Se ha detenido" #, python-format msgid "Storage error: %(reason)s" msgstr "Error de almacenamiento: %(reason)s" #, python-format msgid "Storage policy %s did not match any datastores" msgstr "" "La política de almacenamiento %s no coincide con ningún almacén de datos" msgid "Success" msgstr "Éxito" msgid "Suspended" msgstr "Suspendida" msgid "Swap drive requested is larger than instance type allows." msgstr "" "El controlador de intercambio solicitado es más grande que lo que permite la " "instancia." msgid "Table" msgstr "Tabla" #, python-format msgid "Task %(task_name)s is already running on host %(host)s" msgstr "La tarea %(task_name)s ya se está ejecutando en el host %(host)s" #, python-format msgid "Task %(task_name)s is not running on host %(host)s" msgstr "La tarea %(task_name)s no se está ejecutando en el host %(host)s" #, python-format msgid "The PCI address %(address)s has an incorrect format." msgstr "La dirección PCI %(address)s tiene un formato incorrecto." #, python-format msgid "The console port range %(min_port)d-%(max_port)d is exhausted." msgstr "" "El puerto de rangos de consola %(min_port)d-%(max_port)d se ha agotado." msgid "The created instance's disk would be too small." msgstr "La capacidad del disco de la instancia creada sería demasiado pequeña." msgid "The current driver does not support preserving ephemeral partitions." msgstr "" "El dispositivo actual no soporta la preservación de particiones efímeras." msgid "The default PBM policy doesn't exist on the backend." msgstr "La política PBM por defecto no existe en el backend." msgid "The floating IP request failed with a BadRequest" msgstr "" "La solicitud de la IP flotante ha fallado con BadRequest (Solicitud " "incorrecta)" msgid "" "The instance requires a newer hypervisor version than has been provided." msgstr "" "La instancia necesita una versión de hipervisor más reciente que la " "proporcionada." #, python-format msgid "The number of defined ports: %(ports)d is over the limit: %(quota)d" msgstr "El número de puertos definidos: %(ports)d es más del límite: %(quota)d" #, python-format msgid "The provided RNG device path: (%(path)s) is not present on the host." msgstr "" "La ruta del dispositivo RNG proporcionada: (%(path)s) no está presente en el " "anfitrión." msgid "The request is invalid." msgstr "La petición es inválida." #, python-format msgid "" "The requested amount of video memory %(req_vram)d is higher than the maximum " "allowed by flavor %(max_vram)d." msgstr "" "La cantidad solicitada de memoria de vídeo %(req_vram)d es mayor que la " "máxima permitida por el tipo %(max_vram)d." msgid "The requested availability zone is not available" msgstr "La zona de disponibilidad solicitada no está disponible" msgid "The requested functionality is not supported." msgstr "No se soporta la funcionalidad solicitada." #, python-format msgid "The specified cluster '%s' was not found in vCenter" msgstr "El clúster especificado '%s' no se ha encontrado en vCenter" #, python-format msgid "The supplied device path (%(path)s) is in use." msgstr "La ruta proporcionada al dispositivo (%(path)s) está en uso." #, python-format msgid "The supplied device path (%(path)s) is invalid." msgstr "La ruta proporcionada al dispositivo (%(path)s) no es válida." #, python-format msgid "" "The supplied disk path (%(path)s) already exists, it is expected not to " "exist." msgstr "" "La ruta de disco proporcionada (%(path)s) ya existe, se espera una que no " "exista." msgid "The supplied hypervisor type of is invalid." msgstr "El tipo de hipervisor proporcionado no es válido. " msgid "The target host can't be the same one." msgstr "El host de destino no puede ser el mismo." #, python-format msgid "The token '%(token)s' is invalid or has expired" msgstr "El token '%(token)s' no es válido o ha expirado" #, python-format msgid "" "The volume cannot be assigned the same device name as the root device %s" msgstr "" "No se puede asignar al volumen el mismo nombre de dispositivo del " "dispositivo principal %s" msgid "There are not enough hosts available." msgstr "No hay suficientes hosts disponibles." #, python-format msgid "There is no such action: %s" msgstr "No existe esta acción: %s" #, python-format msgid "" "This compute node's hypervisor is older than the minimum supported version: " "%(version)s." msgstr "" "El hipervisor de este nodo de cálculo es anterior a la versión mínima " "soportada: %(version)s." msgid "" "This method needs to be called with either networks=None and port_ids=None " "or port_ids and networks as not none." msgstr "" "Este método se tiene que llamar con networks=None y port_ids=None o bien con " "port_ids y networks con un valor distinto de None." #, python-format msgid "This rule already exists in group %s" msgstr "Esta regla ya existe en el grupo %s" #, python-format msgid "" "This service is older (v%(thisver)i) than the minimum (v%(minver)i) version " "of the rest of the deployment. Unable to continue." msgstr "" "Este servicio es anterior (v%(thisver)i) a la versión mímima soportada " "(v%(minver)i) del resto del despliegue. No se puede continuar." msgid "Timeout waiting for response from cell" msgstr "Se ha excedido el tiempo de espera de respuesta de la célula" #, python-format msgid "Timeout while checking if we can live migrate to host: %s" msgstr "" "Se ha agotado el tiempo de espera mientras se comprobaba si se puede migrar " "en directo al host: %s" msgid "To and From ports must be integers" msgstr "Puertos De y Hacia deben ser enteros" msgid "Token not found" msgstr "Token no encontrado" msgid "Triggering crash dump is not supported" msgstr "No se da soporte a desecadenar un volcado de memoria" msgid "Type and Code must be integers for ICMP protocol type" msgstr "Tipo y Código deben ser enteros del tipo de protocolo ICMP" msgid "UEFI is not supported" msgstr "UEFI no está soportado" #, python-format msgid "" "Unable to associate floating IP %(address)s to any fixed IPs for instance " "%(id)s. Instance has no fixed IPv4 addresses to associate." msgstr "" "No es posible asociar una IP flotante %(address)s a ninguna de las IPs fijas " "para instancia %(id)s. La instancia no tiene direcciones IPv4 fijas para " "asociar." #, python-format msgid "" "Unable to associate floating IP %(address)s to fixed IP %(fixed_address)s " "for instance %(id)s. Error: %(error)s" msgstr "" "No es posible asociar la IP flotante %(address)s a la IP fija " "%(fixed_address)s para instancia %(id)s. Error: %(error)s" #, python-format msgid "Unable to convert image to %(format)s: %(exp)s" msgstr "No se puede convertir la imagen a %(format)s: %(exp)s" #, python-format msgid "Unable to convert image to raw: %(exp)s" msgstr "No se puede convertir la imagen a sin formato: %(exp)s" #, python-format msgid "Unable to determine disk bus for '%s'" msgstr "No se puede determinar el bus de disco para '%s'" #, python-format msgid "Unable to determine disk prefix for %s" msgstr "No se puede determinar el prefijo de disco para %s " #, python-format msgid "Unable to find host for Instance %s" msgstr "No se puede encontrar el host para la instancia %s " msgid "Unable to find iSCSI Target" msgstr "No se puede encontrar el destino iSCSI " msgid "Unable to find volume" msgstr "No se puede encontrar volumen" msgid "Unable to get host UUID: /etc/machine-id does not exist" msgstr "No se puede obtener el UUID de host: /etc/machine-id no existe" msgid "Unable to get host UUID: /etc/machine-id is empty" msgstr "No se puede obtener el UUID de host: /etc/machine-id está vacío" msgid "" "Unable to launch multiple instances with a single configured port ID. Please " "launch your instance one by one with different ports." msgstr "" "Incapaz de lanzar múltiples instancias con un solo identificador de puerto " "configurado. Por favor lanza tu instancia una por una con puertos diferentes." #, python-format msgid "" "Unable to migrate %(instance_uuid)s to %(dest)s: Lack of memory(host:" "%(avail)s <= instance:%(mem_inst)s)" msgstr "" "No se ha podido migrar %(instance_uuid)s a %(dest)s: falta de memoria (host:" "%(avail)s <= instancia:%(mem_inst)s)" #, python-format msgid "" "Unable to migrate %(instance_uuid)s: Disk of instance is too large(available " "on destination host:%(available)s < need:%(necessary)s)" msgstr "" "No se ha podido migrar %(instance_uuid)s: el disco de la instancia es " "demasiado grande (disponible en host de destino: %(available)s < necesario: " "%(necessary)s)" #, python-format msgid "" "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." msgstr "" "Incapaz de emigrar la instancia %(instance_id)s al actual anfitrion " "(%(host)s)" msgid "Unable to resize disk down." msgstr "Incapaz de reducir el tamaño del disco." msgid "Unable to set password on instance" msgstr "No se puede establecer contraseña en la instancia" msgid "Unable to shrink disk." msgstr "No se puede empaquetar disco." #, python-format msgid "Unacceptable CPU info: %(reason)s" msgstr "Información de CPU inválida: %(reason)s" msgid "Unacceptable parameters." msgstr "Parametros inaceptables" #, python-format msgid "Unavailable console type %(console_type)s." msgstr "El tipo de consola %(console_type)s no está disponible." msgid "" "Undefined Block Device Mapping root: BlockDeviceMappingList contains Block " "Device Mappings from multiple instances." msgstr "" "Raíz de mapeo de dispositivo de bloques no definida: la lista de mapeos de " "dispositivos de bloques (BlockDeviceMappingList ) contiene mapeos de " "dispositivos de bloques de diversas instancias." #, python-format msgid "Unexpected aggregate action %s" msgstr "Acción de agregado inesperada %s" msgid "Unexpected type adding stats" msgstr "Estado de adición de tipo inesperada" #, python-format msgid "Unexpected vif_type=%s" msgstr "vif_type=%s inesperado" msgid "Unknown" msgstr "Desconocido" msgid "Unknown action" msgstr "Acción desconocida" #, python-format msgid "Unknown config drive format %(format)s. Select one of iso9660 or vfat." msgstr "" "Formato de unidad de configuración desconocido %(format)s. Seleccione uno de " "iso9660 o vfat." #, python-format msgid "Unknown delete_info type %s" msgstr "Tipo delete_info %s desconocido" #, python-format msgid "Unknown image_type=%s" msgstr "image_type=%s desconocido " #, python-format msgid "Unknown quota resources %(unknown)s." msgstr "Recursos de cuota desconocidos %(unknown)s." msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "Dirección de clasificación desconocida, debe ser 'desc' o ' asc'" #, python-format msgid "Unknown type: %s" msgstr "Tipo desconocido: %s" msgid "Unrecognized legacy format." msgstr "Formato heredado no reconocido." #, python-format msgid "Unrecognized read_deleted value '%s'" msgstr "Valor de read_deleted no reconocido '%s'" #, python-format msgid "Unrecognized value '%s' for CONF.running_deleted_instance_action" msgstr "Valor '%s' no reconocido para CONF.running_deleted_instance_action" #, python-format msgid "Unshelve attempted but the image %s cannot be found." msgstr "Se ha intentado la extracción pero la imagen %s no ha sido encontrada." msgid "Unsupported Content-Type" msgstr "Tipo de contenido no soportado" #, python-format msgid "User %(username)s not found in password file." msgstr "" "El usuario %(username)s no se ha encontrado en el archivo de contraseña." #, python-format msgid "User %(username)s not found in shadow file." msgstr "" "El usuario %(username)s no se ha encontrado en el archivo de duplicación. " msgid "User data needs to be valid base 64." msgstr "Los datos de usuario deben ser de base 64 válidos." msgid "User does not have admin privileges" msgstr "El usuario no tiene privilegios de administrador" msgid "" "Using different block_device_mapping syntaxes is not allowed in the same " "request." msgstr "" "El uso de sintáxis diferentes de block_device_mapping en la misma petición " "no está permitido." #, python-format msgid "" "Version %(req_ver)s is not supported by the API. Minimum is %(min_ver)s and " "maximum is %(max_ver)s." msgstr "" "Versión %(req_ver)s no soportada por el API. Mínimo es %(min_ver)s y máximo " "es %(max_ver)s." msgid "Virtual Interface creation failed" msgstr "Creacion de interfaz virtual fallida" msgid "Virtual interface plugin failed" msgstr "Plugin de interfaz virtual fallido" #, python-format msgid "Virtual machine mode '%(vmmode)s' is not recognised" msgstr "No se reconoce el modo de máquina virtual '%(vmmode)s' " #, python-format msgid "Virtual machine mode '%s' is not valid" msgstr "El modo de máquina virtual '%s' no es válido" #, python-format msgid "Virtualization type '%(virt)s' is not supported by this compute driver" msgstr "" "El tipo de virtualización '%(virt)s' no está soportado por este controlador " "de cálculo" #, python-format msgid "Volume %(volume_id)s could not be attached. Reason: %(reason)s" msgstr "No se ha podido adjuntar el volumen %(volume_id)s. Motivo: %(reason)s" #, python-format msgid "Volume %(volume_id)s could not be found." msgstr "No se ha podido encontrar el volumen %(volume_id)s." #, python-format msgid "" "Volume %(volume_id)s did not finish being created even after we waited " "%(seconds)s seconds or %(attempts)s attempts. And its status is " "%(volume_status)s." msgstr "" "La creación del volumen %(volume_id)s no se ha completado incluso después de " "esperar %(seconds)s segundos o %(attempts)s intentos. El estado es " "%(volume_status)s." msgid "Volume does not belong to the requested instance." msgstr "El volumen no pertenece a la instancia solicitada." #, python-format msgid "" "Volume encryption is not supported for %(volume_type)s volume %(volume_id)s" msgstr "" "La encriptación del volumen no es soportada por %(volume_type)s volume " "%(volume_id)s" #, python-format msgid "" "Volume is smaller than the minimum size specified in image metadata. Volume " "size is %(volume_size)i bytes, minimum size is %(image_min_disk)i bytes." msgstr "" "El volumen es menor del tamaño mínimo especificado en los metatarso de la " "imagen. El tamaño del volumen es %(volume_size)i bytes, el tamaño mínimo es " "%(image_min_disk)i bytes." #, python-format msgid "" "Volume sets block size, but the current libvirt hypervisor '%s' does not " "support custom block size" msgstr "" "El volúmen establece el tamaño de bloque, pero el hipervisor libvirt actual " "'%s' no soporta tamaño de bloque personalizado." msgid "When resizing, instances must change flavor!" msgstr "Al redimensionarse, las instancias deben cambiar de tipo." #, python-format msgid "Wrong quota method %(method)s used on resource %(res)s" msgstr "Método de contingencia %(method)s usado en recurso %(res)s es erróneo" msgid "X-Forwarded-For is missing from request." msgstr "X-Forwarded-For no está presente en la petición." msgid "X-Instance-ID header is missing from request." msgstr "Falta la cabecera de ID de instancia X en la solicitud." msgid "X-Instance-ID-Signature header is missing from request." msgstr "Cabecera X-Instance-ID-Signature hace falta en la solicitud." msgid "X-Metadata-Provider is missing from request." msgstr "X-Metadata-Provider no está presente en la petición." msgid "X-Tenant-ID header is missing from request." msgstr "Falta cabecera X-Tenant-ID en la solicitud." msgid "You are not allowed to delete the image." msgstr "No le está permitido suprimir la imagen." msgid "" "You are not authorized to access the image the instance was started with." msgstr "" "No está autorizado a acceder a la imagen con la que se ha lanzado la " "instancia." msgid "You must implement __call__" msgstr "Debe implementar __call__" msgid "You should specify images_rbd_pool flag to use rbd images." msgstr "" "Debes especificar la bandera images_rbd_pool para utilizar imagenes rbd." msgid "You should specify images_volume_group flag to use LVM images." msgstr "" "Debes especificar la bandera images_volue_group para utilizar imagenes LVM." msgid "Zero floating IPs available." msgstr "No hay ninguna dirección IP flotante disponible." msgid "admin password can't be changed on existing disk" msgstr "" "No se puede cambiar la contraseña de administrador en el disco existente" msgid "cannot understand JSON" msgstr "no se puede entender JSON" msgid "clone() is not implemented" msgstr "no se ha implementado clone()" #, python-format msgid "connect info: %s" msgstr "información de conexión: %s" #, python-format msgid "connecting to: %(host)s:%(port)s" msgstr "conectando a: %(host)s:%(port)s" msgid "direct_snapshot() is not implemented" msgstr "direct_snapshot() no está implementado" #, python-format msgid "disk type '%s' not supported" msgstr "No se soporta tipo de disco '%s' " #, python-format msgid "empty project id for instance %s" msgstr "ID de proyecto vacío para la instancia %s" msgid "error setting admin password" msgstr "error al establecer contraseña de administrador" #, python-format msgid "error: %s" msgstr "error: %s" #, python-format msgid "failed to generate X509 fingerprint. Error message: %s" msgstr "falló al generar huella digital X509. Mensaje de error: %s" msgid "failed to generate fingerprint" msgstr "falló al generar huella digital" msgid "filename cannot be None" msgstr "nombre del fichero no puede ser Ninguno" msgid "floating IP is already associated" msgstr "Esta IP flotante ya está asociada" msgid "floating IP not found" msgstr "No se ha encontrado la IP flotante" #, python-format msgid "fmt=%(fmt)s backed by: %(backing_file)s" msgstr "fmt=%(fmt)s respaldado por: %(backing_file)s" #, python-format msgid "href %s does not contain version" msgstr "href %s no contiene la versión" msgid "image already mounted" msgstr "imagen ya montada" #, python-format msgid "instance %s is not running" msgstr "No se está ejecutando instancia %s" msgid "instance is a required argument to use @refresh_cache" msgstr "la instancia es un argumento necesario para utilizar @refresh_cache " msgid "instance is not in a suspended state" msgstr "la instancia no está en un estado suspendido" msgid "instance is not powered on" msgstr "instancia no activada" msgid "instance is powered off and cannot be suspended." msgstr "instancia está desactivada y no se puede suspender. " #, python-format msgid "instance_id %s could not be found as device id on any ports" msgstr "instance_id %s no ha sido encontrada como dispositivo en ningún puerto" msgid "is_public must be a boolean" msgstr "is_public debe ser un booleano" msgid "keymgr.fixed_key not defined" msgstr "keymgr:fixed_key no está definido" msgid "l3driver call to add floating IP failed" msgstr "La llamada l3driver para añadir IP flotante ha fallado" #, python-format msgid "libguestfs installed but not usable (%s)" msgstr "libguestfs está instalado pero no puede ser usado (%s)" #, python-format msgid "libguestfs is not installed (%s)" msgstr "libguestfs no está nstalado (%s)" #, python-format msgid "marker [%s] not found" msgstr "no se ha encontrado el marcador [%s]" #, python-format msgid "max rows must be <= %(max_value)d" msgstr "el número máximo de filas debe ser <= %(max_value)d" msgid "max_count cannot be greater than 1 if an fixed_ip is specified." msgstr "max_count no puede ser mayor a 1 si se especifica una fixed_ip." msgid "min_count must be <= max_count" msgstr "min_count debe ser <= max_count " #, python-format msgid "nbd device %s did not show up" msgstr "el dispositivo nbd %s no se ha mostrado" msgid "nbd unavailable: module not loaded" msgstr "nbd no disponible: módulo no cargado" #, python-format msgid "no match found for %s" msgstr "No se ha encontrado coincidencia para %s" #, python-format msgid "no usable parent snapshot for volume %s" msgstr "no hay ninguna instantánea padre para el volumen %s" #, python-format msgid "no write permission on storage pool %s" msgstr "" "no dispone de permiso de escritura en la agrupación de almacenamiento%s." #, python-format msgid "not able to execute ssh command: %s" msgstr "No es posible ejecutar comando ssh: %s" msgid "old style configuration can use only dictionary or memcached backends" msgstr "" "La configuración antigua solo puede utilizar programas de fondo de ipo " "diccionario o guardados en la memoria caché" msgid "operation time out" msgstr "Tiempo de espera agotado para la operación" #, python-format msgid "partition %s not found" msgstr "No se ha encontrado la partición %s " #, python-format msgid "partition search unsupported with %s" msgstr "búsqueda de partición no soportada con %s " msgid "pause not supported for vmwareapi" msgstr "pausa no soportada para vmwareapi" msgid "printable characters with at least one non space character" msgstr "caracteres imprimibles con al menos un carácter que no sea un espacio." msgid "printable characters. Can not start or end with whitespace." msgstr "" "caracteres imprimibles. No pueden comenzar ni terminar con un espacio en " "blanco." #, python-format msgid "qemu-img failed to execute on %(path)s : %(exp)s" msgstr "No se ha podido ejecutar qemu-img en %(path)s : %(exp)s" #, python-format msgid "qemu-nbd error: %s" msgstr "error de qemu-nbd: %s" msgid "rbd python libraries not found" msgstr "Las librerías rbd python no han sido encontradas" #, python-format msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" msgstr "read_deleted solo puede ser 'no', 'yes' o 'only', no %r" msgid "serve() can only be called once" msgstr "serve() sólo se puede llamar una vez " msgid "service is a mandatory argument for DB based ServiceGroup driver" msgstr "" "el servicio es un argumento obligatorio para el controlador ServiceGroup " "basado en base de datos" msgid "service is a mandatory argument for Memcached based ServiceGroup driver" msgstr "" "el servicio es un argumento obligatorio para el controlador de ServiceGroup " "basado en Memcached" msgid "set_admin_password is not implemented by this driver or guest instance." msgstr "" "esta instancia de invitado o controlador no implementa set_admin_password ." #, python-format msgid "snapshot for %s" msgstr "instantánea para %s " msgid "snapshot_id required in create_info" msgstr "snapshot_id es requerido en create_info" msgid "token not provided" msgstr "token no proporcionado" msgid "too many body keys" msgstr "demasiadas claves de cuerpo" msgid "unpause not supported for vmwareapi" msgstr "cancelación de pausa no soportada para vmwareapi" #, python-format msgid "vg %s must be LVM volume group" msgstr "El grupo de volúmenes %s debe ser el grupo de volúmenes LVM" #, python-format msgid "vhostuser_sock_path not present in vif_details for vif %(vif_id)s" msgstr "" "vhostuser_sock_path no está presente en vif_details para vif %(vif_id)s" #, python-format msgid "vif type %s not supported" msgstr "El tipo VIF %s no está soportado" msgid "vif_type parameter must be present for this vif_driver implementation" msgstr "" "El parámetro vif_type debe estar presente para esta implementación de " "vif_driver" #, python-format msgid "volume %s already attached" msgstr "volumen %s ya ha sido adjuntado" #, python-format msgid "" "volume '%(vol)s' status must be 'in-use'. Currently in '%(status)s' status" msgstr "" "estado de volumen '%(vol)s' debe ser 'en-uso'. Actualmente en '%(status)s' " "estado" ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315688.893605 nova-32.0.0/nova/locale/fr/0000775000175000017500000000000000000000000015373 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.3696086 nova-32.0.0/nova/locale/fr/LC_MESSAGES/0000775000175000017500000000000000000000000017160 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/locale/fr/LC_MESSAGES/nova.po0000664000175000017500000027461200000000000020477 0ustar00zuulzuul00000000000000# Translations template for nova. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the nova project. # # Translators: # ariivarua , 2013 # Fabien B. , 2013 # Corina Roe , 2014 # CryLegend , 2013 # EVEILLARD , 2013 # FIRST AUTHOR , 2011 # Frédéric , 2014 # GuiTsi , 2013 # Jonathan Dupart , 2014 # Kodoku , 2013 # Lucas Mascaro , 2015 # Eric Marques , 2013 # Maxime COQUEREL , 2014-2015 # Andrew Melim , 2014 # Olivier Buisson , 2012 # Patrice LACHANCE , 2013 # EVEILLARD , 2013 # Vincent JOBARD , 2013 # Benjamin Godard , 2013 # Andreas Jaeger , 2016. #zanata # Thomas Morin , 2017. #zanata msgid "" msgstr "" "Project-Id-Version: nova VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2025-07-04 18:13+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2017-12-01 11:31+0000\n" "Last-Translator: Thomas Morin \n" "Language: fr\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: French\n" #, python-format msgid "%(address)s is not a valid IP v4/6 address." msgstr "%(address)s n'est pas une adresse IP v4/6 valide" #, python-format msgid "" "%(binary)s attempted direct database access which is not allowed by policy" msgstr "" "%(binary)s a tenté un accès direct à la base de données qui n'est pas " "autorisé par la stratégie" #, python-format msgid "%(cidr)s is not a valid IP network." msgstr "%(cidr)s n'est pas une adresse IP réseau valide." #, python-format msgid "%(field)s should not be part of the updates." msgstr "%(field)s ne devrait pas faire partie des mises à jour." #, python-format msgid "%(memsize)d MB of memory assigned, but expected %(memtotal)d MB" msgstr "%(memsize)d MB de mémoire assignée, mais %(memtotal)d MB attendus" #, python-format msgid "%(path)s is not on local storage: %(reason)s" msgstr "%(path)s n'est pas sur un stockage local : %(reason)s" #, python-format msgid "%(path)s is not on shared storage: %(reason)s" msgstr "%(path)s n'est pas sur un stockage partagé : %(reason)s" #, python-format msgid "%(total)i rows matched query %(meth)s, %(done)i migrated" msgstr "%(total)i lignes correspondent à la requête %(meth)s, %(done)i migrées" #, python-format msgid "%(type)s hypervisor does not support PCI devices" msgstr "L'hyperviseur %(type)s ne supporte pas les périphériques PCI" #, python-format msgid "%s does not support disk hotplug." msgstr "%s ne prend pas en charge le branchement à chaud de disque." #, python-format msgid "%s format is not supported" msgstr "Le format %s n'est pas supporté" #, python-format msgid "%s is not supported." msgstr "%s n'est pas supporté." #, python-format msgid "%s must be either 'MANUAL' or 'AUTO'." msgstr "%s doit être 'MANUAL' ou 'AUTO'." #, python-format msgid "'%(other)s' should be an instance of '%(cls)s'" msgstr "'%(other)s' devrait être une instance de '%(cls)s'" msgid "'qemu-img info' parsing failed." msgstr "Echec de l'analyse syntaxique de 'qemu-img info'." #, python-format msgid "'rxtx_factor' argument must be a float between 0 and %g" msgstr "" "L'argument 'rxtx_factor' doit être une variable flottante entre 0 et %g." #, python-format msgid "A NetworkModel is required in field %s" msgstr "Un modèle de réseau est requis dans la zone %s" #, python-format msgid "" "API Version String %(version)s is of invalid format. Must be of format " "MajorNum.MinorNum." msgstr "" "Le format de l'écriture de la version %(version)s de l'API est invalide. Il " "doit être de forme: NumMajeur.NumMineur ." #, python-format msgid "API version %(version)s is not supported on this method." msgstr "La version %(version)s de l'API n'est pas supporté par cette méthode " msgid "Access list not available for public flavors." msgstr "Liste d'accès non disponible pour les versions publiques." #, python-format msgid "Action %s not found" msgstr "Action %s non trouvé" #, python-format msgid "" "Action for request_id %(request_id)s on instance %(instance_uuid)s not found" msgstr "" "Action de request_id %(request_id)s sur l'instance %(instance_uuid)s non " "trouvée" #, python-format msgid "Action: '%(action)s', calling method: %(meth)s, body: %(body)s" msgstr "Action: '%(action)s', appellant la méthode: %(meth)s, corps: %(body)s" #, python-format msgid "Add metadata failed for aggregate %(id)s after %(retries)s retries" msgstr "" "Echec de l'ajout de métadonnées pour l'agrégat %(id)s après %(retries)s " "tentatives" #, python-format msgid "Aggregate %(aggregate_id)s already has host %(host)s." msgstr "L'agrégat %(aggregate_id)s a déjà l'hôte %(host)s." #, python-format msgid "Aggregate %(aggregate_id)s could not be found." msgstr "Agrégat %(aggregate_id)s introuvable." #, python-format msgid "Aggregate %(aggregate_id)s has no host %(host)s." msgstr "L'agrégat %(aggregate_id)s n'a pas d'hôte %(host)s." #, python-format msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." msgstr "" "L'agrégat %(aggregate_id)s n'a pas de métadonnées avec la clé " "%(metadata_key)s." #, python-format msgid "Aggregate %(aggregate_name)s already exists." msgstr "L'agrégat %(aggregate_name)s existe déjà." #, python-format msgid "Aggregate %s does not support empty named availability zone" msgstr "" "L'agrégat de %s ne prend pas en charge la zone de disponibilité nommée vide" #, python-format msgid "An invalid 'name' value was provided. The name must be: %(reason)s" msgstr "" "Une valeur 'name' non valide a été fournie. Le nom doit être : %(reason)s" msgid "An unknown error has occurred. Please try your request again." msgstr "" "Une erreur inopinée à eu lieu. Merci d'essayer votre requête à nouveau." msgid "An unknown exception occurred." msgstr "Une exception inconnue s'est produite." #, python-format msgid "Architecture name '%(arch)s' is not recognised" msgstr "Nom d'architecture '%(arch)s' n'est pas reconnu" #, python-format msgid "Architecture name '%s' is not valid" msgstr "Le nom d'architecture '%s' n'est pas valide" #, python-format msgid "" "Attempt to consume PCI device %(compute_node_id)s:%(address)s from empty pool" msgstr "" "Tentative d'utilisation du périphérique PCI %(compute_node_id)s:%(address)s " "à partir d'un pool vide" msgid "Attempted overwrite of an existing value." msgstr "Tentative d'écriture d'une valeur existante." #, python-format msgid "Attribute not supported: %(attr)s" msgstr "Attribut %(attr)s non supporté " #, python-format msgid "Bad network format: missing %s" msgstr "Format de réseau incorrect : %s manquant" msgid "Bad networks format" msgstr "Format de réseaux incorrect" #, python-format msgid "Bad networks format: network uuid is not in proper format (%s)" msgstr "" "Format de réseaux incorrect : l'UUID du réseau n'est pas au format approprié " "(%s)" #, python-format msgid "Bad prefix for network in cidr %s" msgstr "Préfixe incorrecte pour le réseau dans cidr %s" #, python-format msgid "" "Binding failed for port %(port_id)s, please check neutron logs for more " "information." msgstr "" "Échec de l'attachement du port %(port_id)s, voir les logs neutron pour plus " "d'information." msgid "Blank components" msgstr "Composants vides" msgid "" "Blank volumes (source: 'blank', dest: 'volume') need to have non-zero size" msgstr "" "Les volumes vides (source : 'vide', dest : 'volume') doivent avoir une " "taille non zéro" #, python-format msgid "Block Device %(id)s is not bootable." msgstr "L'unité par bloc %(id)s n'est pas amorçable." #, python-format msgid "" "Block Device Mapping %(volume_id)s is a multi-attach volume and is not valid " "for this operation." msgstr "" "Le volume de mappage d'unité par bloc %(volume_id)s est un volume multi-" "connexion et n'est pas valide pour cette opération." msgid "Block Device Mapping cannot be converted to legacy format. " msgstr "Le mappage d'unité par bloc ne peut être converti à l'ancien format." msgid "Block Device Mapping is Invalid." msgstr "Le mappage d'unité par bloc n'est pas valide." #, python-format msgid "Block Device Mapping is Invalid: %(details)s" msgstr "Le mappage d'unité par bloc n'est pas valide : %(details)s" msgid "" "Block Device Mapping is Invalid: Boot sequence for the instance and image/" "block device mapping combination is not valid." msgstr "" "Le mappage des unités par bloc est invalide : La séquence de démarrage pour " "l'instance et la combinaison de mappage du périphérique image/bloc n'est pas " "valide." msgid "" "Block Device Mapping is Invalid: You specified more local devices than the " "limit allows" msgstr "" "Le mappage d'unité par bloc est invalide : Vous avez spécifié plus de " "périphériques locaux que la limite autorise" #, python-format msgid "Block Device Mapping is Invalid: failed to get image %(id)s." msgstr "" "Le mappage d'unité par bloc n'est pas valide : impossible d'obtenir l'image " "%(id)s." #, python-format msgid "Block Device Mapping is Invalid: failed to get snapshot %(id)s." msgstr "" "Le mappage d'unité par bloc n'est pas valide : échec d'obtention de " "l'instantané %(id)s." #, python-format msgid "Block Device Mapping is Invalid: failed to get volume %(id)s." msgstr "" "Le mappage d'unité par bloc n'est pas valide : échec d'obtention du volume " "%(id)s." msgid "Block migration can not be used with shared storage." msgstr "" "La migration par bloc ne peut pas être utilisée avec le stockage partagé." msgid "Boot index is invalid." msgstr "L'index de démarrage est invalide." #, python-format msgid "Build of instance %(instance_uuid)s aborted: %(reason)s" msgstr "Construction de l'instance %(instance_uuid)s interrompue : %(reason)s" #, python-format msgid "Build of instance %(instance_uuid)s was re-scheduled: %(reason)s" msgstr "" "La construction de l'instance %(instance_uuid)s a été reprogrammée : " "%(reason)s" #, python-format msgid "BuildRequest not found for instance %(uuid)s" msgstr "BuildRequest introuvable pour l'instance %(uuid)s." msgid "CPU and memory allocation must be provided for all NUMA nodes" msgstr "" "CPU et allocation mémoire doivent être fournis pour tous les nœuds NUMA" #, python-format msgid "" "CPU doesn't have compatibility.\n" "\n" "%(ret)s\n" "\n" "Refer to %(u)s" msgstr "" "L'UC n'a pas de compatibilité.\n" "\n" "%(ret)s\n" "\n" "Voir %(u)s" #, python-format msgid "CPU number %(cpunum)d is assigned to two nodes" msgstr "Le CPU numéro %(cpunum)d est assigné à deux nodes" #, python-format msgid "CPU number %(cpunum)d is larger than max %(cpumax)d" msgstr "Le nombre de CPU %(cpunum)d est plus grand que le maximum %(cpumax)d" #, python-format msgid "CPU number %(cpuset)s is not assigned to any node" msgstr "Le numéro de CPU %(cpuset)s n'est assigné à aucun node" msgid "Can not add access to a public flavor." msgstr "Impossible d'ajouter l'accès à un gabarit public." msgid "Can not find requested image" msgstr "Impossible de trouver l'image demandée" #, python-format msgid "Can not handle authentication request for %d credentials" msgstr "" "Impossible de traiter la demande d'authentification pour les données " "d'identification %d" msgid "Can't retrieve root device path from instance libvirt configuration" msgstr "" "Impossible d'extraire le chemin d'unité racine de la configuration libvirt " "de l'instance" #, python-format msgid "" "Cannot '%(action)s' instance %(server_id)s while it is in %(attr)s %(state)s" msgstr "" "Impossible de '%(action)s' l'instance %(server_id)s lorsque elle a l'état " "%(attr)s %(state)s" #, python-format msgid "Cannot add host to aggregate %(aggregate_id)s. Reason: %(reason)s." msgstr "" "Impossible d'ajouter l’hôte sur l'ensemble %(aggregate_id)s. Raison: " "%(reason)s" msgid "Cannot attach one or more volumes to multiple instances" msgstr "Impossible de connecter un ou plusieurs volumes à plusieurs instances" #, python-format msgid "Cannot call %(method)s on orphaned %(objtype)s object" msgstr "Pas d'appel de %(method)s sur un objet %(objtype)s orphelin" #, python-format msgid "" "Cannot determine the parent storage pool for %s; cannot determine where to " "store images" msgstr "" "Impossible de déterminer le pool de stockage parent pour %s; impossible de " "déterminer où stocker les images" msgid "Cannot find image for rebuild" msgstr "Image introuvable pour la régénération" #, python-format msgid "Cannot remove host %(host)s in aggregate %(id)s" msgstr "Impossible de supprimer l'hôte %(host)s dans l'agrégat %(id)s" #, python-format msgid "Cannot remove host from aggregate %(aggregate_id)s. Reason: %(reason)s." msgstr "" "Impossible de supprimer l’hôte de l'ensemble %(aggregate_id)s. Raison: " "%(reason)s" msgid "Cannot rescue a volume-backed instance" msgstr "Impossible de sauver une instance volume" msgid "" "Cannot set cpu thread pinning policy in a non dedicated cpu pinning policy" msgstr "" "Impossible de définir une stratégie de réservation de thread de CPU dans une " "stratégie de réservation de CPU non dédiée" msgid "Cannot set realtime policy in a non dedicated cpu pinning policy" msgstr "" "Impossible de définir une stratégie en temps réel dans une stratégie de " "réservation de CPU non dédiée" #, python-format msgid "Cannot update aggregate %(aggregate_id)s. Reason: %(reason)s." msgstr "" "Impossible de mettre a jour l'ensemble %(aggregate_id)s. Raison: %(reason)s" #, python-format msgid "" "Cannot update metadata of aggregate %(aggregate_id)s. Reason: %(reason)s." msgstr "" "Impossible de mettre a jour les métadonnées de l'ensemble %(aggregate_id)s. " "Raison: %(reason)s." #, python-format msgid "Cell %(uuid)s has no mapping." msgstr "La cellule %(uuid)s ne possède aucun mappage." #, python-format msgid "" "Change would make usage less than 0 for the following resources: %(unders)s" msgstr "" "La modification définira une utilisation inférieure à 0 pour les ressources " "suivantes : %(unders)s" #, python-format msgid "Class %(class_name)s could not be found: %(exception)s" msgstr "Classe %(class_name)s introuvable : %(exception)s" #, python-format msgid "Compute host %(host)s could not be found." msgstr "L'hôte de calcul %(host)s ne peut pas être trouvé." #, python-format msgid "Compute host %s not found." msgstr "Host Compute %s non trouvé." #, python-format msgid "Compute service of %(host)s is still in use." msgstr "Le service de calcul de %(host)s est encore en cours d'utilisation." #, python-format msgid "Compute service of %(host)s is unavailable at this time." msgstr "Le service de calcul de %(host)s est indisponible pour l'instant." #, python-format msgid "Config drive format '%(format)s' is not supported." msgstr "" "Le format de l'unité de configuration '%(format)s' n'est pas pris en charge." #, python-format msgid "" "Config requested an explicit CPU model, but the current libvirt hypervisor " "'%s' does not support selecting CPU models" msgstr "" "La config a demandé un modèle d'UC explicite, mais l'hyperviseur libvirt " "'%s' actuel ne prend pas en charge la sélection des modèles d'UC" #, python-format msgid "" "Conflict updating instance %(instance_uuid)s, but we were unable to " "determine the cause" msgstr "" "Conflit lors de la mise à jour de l'instance %(instance_uuid)s, cause " "inconnue" #, python-format msgid "" "Conflict updating instance %(instance_uuid)s. Expected: %(expected)s. " "Actual: %(actual)s" msgstr "" "Conflit lors de la mise à jour de l'instance %(instance_uuid)s. Attendu: " "%(expected)s. Actuel: %(actual)s" #, python-format msgid "Connection to cinder host failed: %(reason)s" msgstr "La connexion à l'hôte cinder a échoué : %(reason)s" #, python-format msgid "Connection to glance host %(server)s failed: %(reason)s" msgstr "La connexion à l'hôte Glance %(server)s a échoué : %(reason)s" #, python-format msgid "Connection to libvirt lost: %s" msgstr "Perte de la connexion à libvirt : %s" #, python-format msgid "" "Console log output could not be retrieved for instance %(instance_id)s. " "Reason: %(reason)s" msgstr "" "Impossible d'extraire la sortie du journal de la console pour l'instance " "%(instance_id)s. Raison : %(reason)s" msgid "Constraint not met." msgstr "Contrainte non vérifiée." #, python-format msgid "Converted to raw, but format is now %s" msgstr "Converti au format brut, mais le format est maintenant %s" #, python-format msgid "Could not attach image to loopback: %s" msgstr "Impossible de lier l'image au loopback : %s" #, python-format msgid "Could not fetch image %(image_id)s" msgstr "Impossible d'extraire l'image %(image_id)s" #, python-format msgid "Could not find a handler for %(driver_type)s volume." msgstr "" "Impossible de trouver un gestionnaire pour le %(driver_type)s de volume." #, python-format msgid "Could not find binary %(binary)s on host %(host)s." msgstr "Impossible de trouver le binaire %(binary)s sur l'hôte %(host)s." #, python-format msgid "Could not find config at %(path)s" msgstr "Configuration introuvable dans %(path)s" msgid "Could not find the datastore reference(s) which the VM uses." msgstr "" "Impossible de trouver la ou les références de magasin de données utilisé par " "la machine virtuelle." #, python-format msgid "Could not load line %(line)s, got error %(error)s" msgstr "Impossible de charger la ligne %(line)s, erreur %(error)s obtenue" #, python-format msgid "Could not load paste app '%(name)s' from %(path)s" msgstr "Echec du chargement de l'app de collage '%(name)s' depuis %(path)s" #, python-format msgid "" "Could not mount vfat config drive. %(operation)s failed. Error: %(error)s" msgstr "" "Impossible d'installer l'unité de configuration vfat. %(operation)s a " "échoué. Erreur : %(error)s" #, python-format msgid "Could not upload image %(image_id)s" msgstr "Impossible de télécharger l'image %(image_id)s" msgid "Creation of virtual interface with unique mac address failed" msgstr "" "La création d'une interface virtuelle avec une adresse mac unique a échoué" #, python-format msgid "Datastore regex %s did not match any datastores" msgstr "" "L'expression régulière %s du magasin de données ne correspond à aucun " "magasin de données" msgid "Datetime is in invalid format" msgstr "Datetime est dans un format non valide" msgid "Default PBM policy is required if PBM is enabled." msgstr "La règle PBM par défaut est nécessaire si PBM est activé." #, python-format msgid "Device '%(device)s' not found." msgstr "Device '%(device)s' introuvable." msgid "Device name contains spaces." msgstr "Le nom du périphérique contient des espaces." msgid "Device name empty or too long." msgstr "Nom du périphérique vide ou trop long." #, python-format msgid "Device type mismatch for alias '%s'" msgstr "Type de périphérique non concordant pour l'alias '%s'" #, python-format msgid "Disk format %(disk_format)s is not acceptable" msgstr "Le format de disque %(disk_format)s n'est pas acceptable" #, python-format msgid "Disk info file is invalid: %(reason)s" msgstr "Le ficher d'information du disque est invalide : %(reason)s" #, python-format msgid "Driver Error: %s" msgstr "Erreur du pilote: %s" #, python-format msgid "Error attempting to run %(method)s" msgstr "Erreur lors de la tentative d'exécution de %(method)s" #, python-format msgid "" "Error destroying the instance on node %(node)s. Provision state still " "'%(state)s'." msgstr "" "Erreur lors de la destruction de l'instance sur le noeud %(node)s. L'état de " "la mise à disposition est encore '%(state)s'." #, python-format msgid "Error during unshelve instance %(instance_id)s: %(reason)s" msgstr "" "Erreur durant la dé-réservation de l'instance %(instance_id)s: %(reason)s" #, python-format msgid "" "Error from libvirt while getting domain info for %(instance_name)s: [Error " "Code %(error_code)s] %(ex)s" msgstr "" "Erreur de libvirt lors de l'obtention des informations de domaine pour " "%(instance_name)s : [Code d'erreur %(error_code)s] %(ex)s" #, python-format msgid "" "Error from libvirt while looking up %(instance_name)s: [Error Code " "%(error_code)s] %(ex)s" msgstr "" "Erreur de libvirt lors de la recherche de %(instance_name)s : [Code d'erreur " "%(error_code)s] %(ex)s" #, python-format msgid "" "Error from libvirt while quiescing %(instance_name)s: [Error Code " "%(error_code)s] %(ex)s" msgstr "" "Erreur de libvirt lors de la mise au repos de %(instance_name)s : [Code " "d'erreur %(error_code)s] %(ex)s" #, python-format msgid "" "Error from libvirt while set password for username \"%(user)s\": [Error Code " "%(error_code)s] %(ex)s" msgstr "" "Erreur libvirt lors de la définition du mot de passe pour le nom " "d'utilisateur \"%(user)s\". [Code d'erreur : %(error_code)s] %(ex)s" #, python-format msgid "" "Error mounting %(device)s to %(dir)s in image %(image)s with libguestfs " "(%(e)s)" msgstr "" "Erreur de montage de %(device)s pour %(dir)s dans l'image %(image)s avec " "libguestfs (%(e)s)" #, python-format msgid "Error mounting %(image)s with libguestfs (%(e)s)" msgstr "Erreur lors du montage de %(image)s avec libguestfs (%(e)s)" #, python-format msgid "Error when creating resource monitor: %(monitor)s" msgstr "Erreur lors de la création du moniteur de ressource : %(monitor)s" #, python-format msgid "Event %(event)s not found for action id %(action_id)s" msgstr "Evénement %(event)s non trouvé pour l'ID action %(action_id)s" msgid "Event must be an instance of nova.virt.event.Event" msgstr "L'événement doit être une instance de nova.virt.event.Event" #, python-format msgid "" "Exceeded max scheduling attempts %(max_attempts)d for instance " "%(instance_uuid)s. Last exception: %(exc_reason)s" msgstr "" "Nombre de tentatives de planification max %(max_attempts)d pour l'instance " "%(instance_uuid)s dépassé. Dernière exception : %(exc_reason)s" #, python-format msgid "" "Exceeded max scheduling retries %(max_retries)d for instance " "%(instance_uuid)s during live migration" msgstr "" "Le nombre maximal de nouvelles tentatives de planification (%(max_retries)d) " "a été dépassé pour l'instance %(instance_uuid)s pendant la migration à chaud" #, python-format msgid "Exceeded maximum number of retries. %(reason)s" msgstr "Nombre maximum d'essai dépassé. %(reason)s." #, python-format msgid "Expected a uuid but received %(uuid)s." msgstr "UUID attendu mais %(uuid)s reçu." msgid "Extracting vmdk from OVA failed." msgstr "Echec de l'extraction de vmdk à partir d'OVA." #, python-format msgid "Failed to access port %(port_id)s: %(reason)s" msgstr "Impossible d'accéder au port %(port_id)s : %(reason)s" #, python-format msgid "Failed to allocate the network(s) with error %s, not rescheduling." msgstr "" "Echec de l'allocation de réseau(x) avec l'erreur %s, ne pas replanifier." msgid "Failed to allocate the network(s), not rescheduling." msgstr "Echec de l'allocation de réseau(x), ne pas replanifier." #, python-format msgid "Failed to attach network adapter device to %(instance_uuid)s" msgstr "Impossible de connecter la carte réseau avec %(instance_uuid)s" #, python-format msgid "Failed to deploy instance: %(reason)s" msgstr "Echec de déploiement de l'instance: %(reason)s" #, python-format msgid "Failed to detach PCI device %(dev)s: %(reason)s" msgstr "Échec du détachement du périphérique PCI %(dev)s: %(reason)s" #, python-format msgid "Failed to detach network adapter device from %(instance_uuid)s" msgstr "Impossible de déconnecter la carte réseau de %(instance_uuid)s" #, python-format msgid "Failed to encrypt text: %(reason)s" msgstr "Echec du chiffrement du texte : %(reason)s" #, python-format msgid "Failed to launch instances: %(reason)s" msgstr "Échec à lancer les instances : %(reason)s" #, python-format msgid "Failed to map partitions: %s" msgstr "Echec de mappage des partitions : %s" #, python-format msgid "Failed to mount filesystem: %s" msgstr "Impossible de monter le système de fichier : %s" #, python-format msgid "Failed to power off instance: %(reason)s" msgstr "Échec à éteindre l'instance : %(reason)s" #, python-format msgid "Failed to power on instance: %(reason)s" msgstr "Echec à faire fonctionner l'instance : %(reason)s" #, python-format msgid "Failed to provision instance %(inst)s: %(reason)s" msgstr "Echec de la mise à disposition de l'instance %(inst)s : %(reason)s" #, python-format msgid "Failed to read or write disk info file: %(reason)s" msgstr "Echec à lire ou à écrire le fichier d'information disque : %(reason)s" #, python-format msgid "Failed to reboot instance: %(reason)s" msgstr "Echec à redémarrer l'instance : %(reason)s" #, python-format msgid "Failed to remove volume(s): (%(reason)s)" msgstr "Echec de suppresion du volume(s): (%(reason)s)" #, python-format msgid "Failed to request Ironic to rebuild instance %(inst)s: %(reason)s" msgstr "" "Echec de la demande de régénération de l'instance %(inst)s à Ironic : " "%(reason)s" #, python-format msgid "Failed to resume instance: %(reason)s" msgstr "Echec à résumé l'instance : %(reason)s" #, python-format msgid "Failed to run qemu-img info on %(path)s : %(error)s" msgstr "Échec à lancer qemu-img info sur %(path)s : %(error)s" #, python-format msgid "Failed to set admin password on %(instance)s because %(reason)s" msgstr "" "Echec de définition du mot de passe d'admin sur %(instance)s car %(reason)s" #, python-format msgid "Failed to suspend instance: %(reason)s" msgstr "Échec à suspendre l'instance : %(reason)s" #, python-format msgid "Failed to terminate instance: %(reason)s" msgstr "Échec à terminer l'instance : %(reason)s" msgid "Failure prepping block device." msgstr "Echec de préparation de l'unité par bloc." #, python-format msgid "File %(file_path)s could not be found." msgstr "Fichier %(file_path)s introuvable." #, python-format msgid "Fixed IP %(ip)s is not a valid ip address for network %(network_id)s." msgstr "" "L'IP fixe %(ip)s n'est pas une adresse IP valide pour le réseau " "%(network_id)s." #, python-format msgid "Fixed IP %s is already in use." msgstr "L'adresse IP fixe %s est déjà utilisée." #, python-format msgid "" "Fixed IP address %(address)s is already in use on instance %(instance_uuid)s." msgstr "" "L'adresse IP fixe %(address)s est déjà utilisée sur l'instance " "%(instance_uuid)s." #, python-format msgid "Fixed IP not found for address %(address)s." msgstr "Pas d'IP fixe trouvée pour l'adresse %(address)s." #, python-format msgid "Flavor %(flavor_id)s could not be found." msgstr "Le Flavor %(flavor_id)s ne peut être trouvé." #, python-format msgid "Flavor %(flavor_id)s has no extra specs with key %(extra_specs_key)s." msgstr "" "Le type d'instance %(flavor_id)s n'a pas de spécifications supplémentaires " "avec la clé %(extra_specs_key)s." #, python-format msgid "Flavor %(flavor_id)s has no extra specs with key %(key)s." msgstr "" "Le type d'instance %(flavor_id)s n'a pas de spécifications supplémentaires " "avec la clé %(key)s" #, python-format msgid "" "Flavor %(id)s extra spec cannot be updated or created after %(retries)d " "retries." msgstr "" "Impossible de mettre à jour ou de créer la spécification supplémentaire " "%(id)s du gabarit après %(retries)d tentatives." #, python-format msgid "" "Flavor access already exists for flavor %(flavor_id)s and project " "%(project_id)s combination." msgstr "" "L'accès de version existe déjà pour la combinaison version %(flavor_id)s et " "projet %(project_id)s." #, python-format msgid "Flavor access not found for %(flavor_id)s / %(project_id)s combination." msgstr "" "Version inaccessible pour la combinaison %(flavor_id)s / %(project_id)s." msgid "Flavor used by the instance could not be found." msgstr "La version utilisée par l'instance est introuvable." #, python-format msgid "Flavor with ID %(flavor_id)s already exists." msgstr "Le type d'instance avec l'ID %(flavor_id)s existe déjà." #, python-format msgid "Flavor with name %(flavor_name)s could not be found." msgstr "Le type d'instance nommé %(flavor_name)s ne peut être trouvé." #, python-format msgid "Flavor with name %(name)s already exists." msgstr "Le type d'instance avec le nom %(name)s existe déjà." #, python-format msgid "" "Flavor's disk is smaller than the minimum size specified in image metadata. " "Flavor disk is %(flavor_size)i bytes, minimum size is %(image_min_disk)i " "bytes." msgstr "" "Le disque du gabarit est plus petit que la taille minimum spécifiée dans les " "métadonnées de l'image. La taille du disque est %(flavor_size)i octets, la " "taille minimum est %(image_min_disk)i octets." #, python-format msgid "" "Flavor's disk is too small for requested image. Flavor disk is " "%(flavor_size)i bytes, image is %(image_size)i bytes." msgstr "" "Le disque du gabaris est trop petit pour l'image demandée. Le disque du " "gabarit fait %(flavor_size)i bytes, l'image fait %(image_size)i bytes." msgid "Flavor's memory is too small for requested image." msgstr "La mémoire du type d'instance est trop petite pour l'image demandée." #, python-format msgid "Floating IP %(address)s association has failed." msgstr "L'association de l'IP floattante %(address)s a échoué." #, python-format msgid "Floating IP %(address)s is associated." msgstr "L'adresse IP flottante %(address)s est associée." #, python-format msgid "Floating IP %(address)s is not associated with instance %(id)s." msgstr "" "L'adresse IP flottante %(address)s n'est pas associée à l'instance %(id)s." #, python-format msgid "Floating IP not found for ID %(id)s." msgstr "Aucune IP dynamique trouvée pour l'id %(id)s." #, python-format msgid "Floating IP not found for ID %s" msgstr "Adresse IP flottante non trouvée pour l'ID %s" #, python-format msgid "Floating IP not found for address %(address)s." msgstr "Aucune IP dynamique trouvée pour l'adresse %(address)s." msgid "Floating IP pool not found." msgstr "Pool d'IP flottantes non trouvé." msgid "" "Forbidden to exceed flavor value of number of serial ports passed in image " "meta." msgstr "" "Le dépassement du nombre de port série du type d'instance passé dans la meta " "image est interdit" msgid "Found no disk to snapshot." msgstr "Aucun disque trouvé pour l'instantané." msgid "Guest does not have a console available." msgstr "Aucune console n'est disponible pour l'invité." #, python-format msgid "Host %(host)s could not be found." msgstr "L'hôte %(host)s ne peut pas être trouvé." #, python-format msgid "Host %(host)s is already mapped to cell %(uuid)s" msgstr "L'hôte %(host)s est déjà mappé à la cellule %(uuid)s" #, python-format msgid "Host '%(name)s' is not mapped to any cell" msgstr "L'hôte '%(name)s' n'est mappé à aucune cellule" msgid "Host aggregate is not empty" msgstr "L'agrégat d'hôte n'est pas vide" msgid "Host does not support guests with NUMA topology set" msgstr "L'hote ne supporte pas les invités avec le groupe topologique NUMA" msgid "Host does not support guests with custom memory page sizes" msgstr "" "L'hôte ne prend pas en charge les invités avec des tailles de pages de " "mémoire personnalisées" msgid "Hypervisor driver does not support post_live_migration_at_source method" msgstr "" "Le pilote de l'hyperviseur ne prend pas en charge la méthode " "post_live_migration_at_source" #, python-format msgid "Hypervisor virt type '%s' is not valid" msgstr "Le type virtuel d'hyperviseur '%s' n'est pas valide" #, python-format msgid "Hypervisor virtualization type '%(hv_type)s' is not recognised" msgstr "" "Le type de virtualisation de l'hyperviseur '%(hv_type)s' n'est pas reconnu." #, python-format msgid "Hypervisor with ID '%s' could not be found." msgstr "L'hyperviseur avec l'ID '%s' est introuvable." #, python-format msgid "IP allocation over quota in pool %s." msgstr "L'allocation IP dépasse le quota dans le pool %s." msgid "IP allocation over quota." msgstr "L'allocation IP dépasse le quota." #, python-format msgid "Image %(image_id)s could not be found." msgstr "L'image %(image_id)s n'a pas été trouvée." #, python-format msgid "Image %(image_id)s is not active." msgstr "L'image %(image_id)s n'est pas active." #, python-format msgid "Image %(image_id)s is unacceptable: %(reason)s" msgstr "L'image %(image_id)s est inacceptable: %(reason)s" msgid "Image disk size greater than requested disk size" msgstr "" "La taille du disque d'image est supérieure à la taille de disque demandée" msgid "Image is not raw format" msgstr "L'image n'est pas au format raw" msgid "Image metadata limit exceeded" msgstr "Limite de métadonnées d'image dépassée" #, python-format msgid "Image model '%(image)s' is not supported" msgstr "Le modèle d'image '%(image)s' n'est pas supporté" msgid "Image not found." msgstr "Image introuvable." #, python-format msgid "" "Image property '%(name)s' is not permitted to override NUMA configuration " "set against the flavor" msgstr "" "La propriété '%(name)s' de l'image n'est pas autorisée à réécrire la " "configuration NUMA réglé par rapport au type d'instance." msgid "" "Image property 'hw_cpu_policy' is not permitted to override CPU pinning " "policy set against the flavor" msgstr "" "La propriété d'image 'hw_cpu_policy' ne peut pas remplacer la règle de " "réservation d'unité centrale définie sur la version" msgid "" "Image property 'hw_cpu_thread_policy' is not permitted to override CPU " "thread pinning policy set against the flavor" msgstr "" "La propriété d'image 'hw_cpu_thread_policy' ne peut pas remplacer la règle " "de réservation de CPU définie sur la version" msgid "Image that the instance was started with could not be found." msgstr "L'image par laquelle l'instance a été démarrée est introuvable." #, python-format msgid "Image's config drive option '%(config_drive)s' is invalid" msgstr "" "L'option du lecteur de configuration de l'image '%(config_drive)s' est " "invalide" msgid "" "Images with destination_type 'volume' need to have a non-zero size specified" msgstr "" "Les images avec destination_type 'volume' doit avoir une taille différente " "de zéro ." msgid "In ERROR state" msgstr "A l'état ERREUR" #, python-format msgid "In states %(vm_state)s/%(task_state)s, not RESIZED/None" msgstr "Dans les états %(vm_state)s/%(task_state)s, non RESIZED/None" #, python-format msgid "In-progress live migration %(id)s is not found for server %(uuid)s." msgstr "" "La migration à chaud en cours %(id)s est introuvable pour le serveur " "%(uuid)s." msgid "" "Incompatible settings: ephemeral storage encryption is supported only for " "LVM images." msgstr "" "Paramètres incompatibles : le chiffrement éphémère du stockage est pris en " "charge uniquement pour les images LVM." #, python-format msgid "Info cache for instance %(instance_uuid)s could not be found." msgstr "Le cache d'infos pour l'instance %(instance_uuid)s est introuvable." #, python-format msgid "" "Instance %(instance)s and volume %(vol)s are not in the same " "availability_zone. Instance is in %(ins_zone)s. Volume is in %(vol_zone)s" msgstr "" "L'instance %(instance)s et le volume %(vol)s ne sont pas dans la même zone " "de disponibilité. L'instance est dans %(ins_zone)s. Le volume est dans " "%(vol_zone)s" #, python-format msgid "Instance %(instance)s does not have a port with id %(port)s" msgstr "L'instance %(instance)s n'a pas de port avec id %(port)s" #, python-format msgid "Instance %(instance_id)s cannot be rescued: %(reason)s" msgstr "Impossible de sauver l'instance %(instance_id)s : %(reason)s" #, python-format msgid "Instance %(instance_id)s could not be found." msgstr "L'instance %(instance_id)s n'a pas pu être trouvée." #, python-format msgid "Instance %(instance_id)s has no tag '%(tag)s'" msgstr "L'instance %(instance_id)s n'a pas d'étiquette '%(tag)s'." #, python-format msgid "Instance %(instance_id)s is not in rescue mode" msgstr "L'instance %(instance_id)s n'est pas en mode secours" #, python-format msgid "Instance %(instance_id)s is not ready" msgstr "L'instance %(instance_id)s n'est pas prête" #, python-format msgid "Instance %(instance_id)s is not running." msgstr "L'instance %(instance_id)s ne fonctionne pas." #, python-format msgid "Instance %(instance_id)s is unacceptable: %(reason)s" msgstr "L'instance %(instance_id)s est inacceptable: %(reason)s" #, python-format msgid "Instance %(instance_uuid)s does not specify a NUMA topology" msgstr "L'instance %(instance_uuid)s ne spécifie pas une topologie NUMA" #, python-format msgid "Instance %(instance_uuid)s does not specify a migration context." msgstr "L'instance %(instance_uuid)s ne spécifie pas de contexte de migration." #, python-format msgid "" "Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while " "the instance is in this state." msgstr "" "L'instance %(instance_uuid)s dans %(attr)s %(state)s. Impossible de " "%(method)s pendant que l'instance est dans cet état." #, python-format msgid "Instance %(instance_uuid)s is locked" msgstr "L'instance %(instance_uuid)s est verrouillée" #, python-format msgid "" "Instance %(instance_uuid)s requires config drive, but it does not exist." msgstr "" "L'instance %(instance_uuid)s nécessite une unité de configuration, mais " "cette unité n'existe pas." #, python-format msgid "Instance %(name)s already exists." msgstr "L'instance %(name)s existe déjà." #, python-format msgid "Instance %(server_id)s is in an invalid state for '%(action)s'" msgstr "L'instance %(server_id)s est dans un état invalide pour '%(action)s'" #, python-format msgid "Instance %(uuid)s has no mapping to a cell." msgstr "L'instance %(uuid)s ne comporte aucun mappage vers une cellule." #, python-format msgid "Instance %s not found" msgstr "Instance %s non trouvé" #, python-format msgid "Instance %s provisioning was aborted" msgstr "Le provisionnement de l'instance %s a été interrompu" msgid "Instance could not be found" msgstr "Instance introuvable." msgid "Instance disk to be encrypted but no context provided" msgstr "Disque d'instance à chiffrer, mais aucun contexte fourni" msgid "Instance event failed" msgstr "Echec de l'événement d'instance" #, python-format msgid "Instance group %(group_uuid)s already exists." msgstr "Groupe d'instance %(group_uuid)s existe déjà." #, python-format msgid "Instance group %(group_uuid)s could not be found." msgstr "Groupe d'instance %(group_uuid)s ne peut pas etre trouvé." msgid "Instance has no source host" msgstr "L'instance n'a aucun hôte source" msgid "Instance has not been resized." msgstr "L'instance n'a pas été redimensionnée." #, python-format msgid "Instance hostname %(hostname)s is not a valid DNS name" msgstr "Le nom d'hôte de l'instance %(hostname)s n'est pas un nom DNS valide" msgid "Instance is not a member of specified network" msgstr "L'instance n'est pas un membre du réseau spécifié" #, python-format msgid "Instance rollback performed due to: %s" msgstr "Retour-arrière de l'instance réalisé du à: %s" #, python-format msgid "" "Insufficient Space on Volume Group %(vg)s. Only %(free_space)db available, " "but %(size)d bytes required by volume %(lv)s." msgstr "" "Espace insuffisant sur le groupe de volumes %(vg)s. Seulement " "%(free_space)db disponibles, mais %(size)db requis par volume %(lv)s." #, python-format msgid "Insufficient compute resources: %(reason)s." msgstr "Ressources de calcul insuffisante : %(reason)s." #, python-format msgid "Interface %(interface)s not found." msgstr "L'interface %(interface)s non trouvée." #, python-format msgid "Invalid Base 64 data for file %(path)s" msgstr "Contenu BAse 64 invalide pour le fichier %(path)s" msgid "Invalid Connection Info" msgstr "Informations de connexion non valides" #, python-format msgid "Invalid ID received %(id)s." msgstr "ID non valide %(id)s reçu." #, python-format msgid "Invalid IP format %s" msgstr "Format adresse IP non valide %s" #, python-format msgid "Invalid IP protocol %(protocol)s." msgstr "Le protocole IP %(protocol)s est invalide" msgid "" "Invalid PCI Whitelist: The PCI whitelist can specify devname or address, but " "not both" msgstr "" "Whitelist PCI invalide: la whitelist PCI peut spécifier le devname ou " "l'adresse, mais pas les deux." #, python-format msgid "Invalid PCI alias definition: %(reason)s" msgstr "Définition d'un alias PCI invalide : %(reason)s" #, python-format msgid "Invalid Regular Expression %s" msgstr "Expression régulière non valide %s" #, python-format msgid "Invalid characters in hostname '%(hostname)s'" msgstr "Caractères invalides dans le hostname '%(hostname)s'" msgid "Invalid config_drive provided." msgstr "Le config_drive fourni est invalide." #, python-format msgid "Invalid config_drive_format \"%s\"" msgstr "config_drive_format non valide \"%s\"" #, python-format msgid "Invalid console type %(console_type)s" msgstr "Type de console non valide %(console_type)s" #, python-format msgid "Invalid content type %(content_type)s." msgstr "Le type de contenu %(content_type)s est invalide" #, python-format msgid "Invalid datetime string: %(reason)s" msgstr "Chaîne de datetime invalide : %(reason)s" msgid "Invalid device UUID." msgstr "Périphérique UUID invalide." #, python-format msgid "Invalid entry: '%s'" msgstr "Entrée non valide : '%s'" #, python-format msgid "Invalid entry: '%s'; Expecting dict" msgstr "Entrée non valide : '%s' ; dictionnaire attendu" #, python-format msgid "Invalid entry: '%s'; Expecting list or dict" msgstr "Entrée non valide : '%s'; liste ou dictionnaire attendu" #, python-format msgid "Invalid exclusion expression %r" msgstr "Expression d'exclusion invalide %r" #, python-format msgid "Invalid image format '%(format)s'" msgstr "Format d'image invalide '%(format)s'" #, python-format msgid "Invalid image href %(image_href)s." msgstr "L'image href %(image_href)s est invalide." #, python-format msgid "Invalid inclusion expression %r" msgstr "Expression d'inclusion invalide %r" #, python-format msgid "" "Invalid input for field/attribute %(path)s. Value: %(value)s. %(message)s" msgstr "" "Entrée invalide pour champ/attribut %(path)s. Valeur : %(value)s. %(message)s" #, python-format msgid "Invalid input received: %(reason)s" msgstr "Entrée invalide reçue : %(reason)s" msgid "Invalid instance image." msgstr "Instance image non valide." #, python-format msgid "Invalid is_public filter [%s]" msgstr "Filtre is_public non valide [%s]" msgid "Invalid key_name provided." msgstr "key_name fourni non valide." #, python-format msgid "Invalid memory page size '%(pagesize)s'" msgstr "Taille de page de mémoire non valide '%(pagesize)s'" msgid "Invalid metadata key" msgstr "Clé de métadonnées non valide" #, python-format msgid "Invalid metadata size: %(reason)s" msgstr "Taille de métadonnée invalide : %(reason)s" #, python-format msgid "Invalid metadata: %(reason)s" msgstr "Métadonnée invalide : %(reason)s" #, python-format msgid "Invalid minDisk filter [%s]" msgstr "Filtre minDisk non valide [%s]" #, python-format msgid "Invalid minRam filter [%s]" msgstr "Filtre minRam non valide [%s]" #, python-format msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s" msgstr "La plage de port %(from_port)s:%(to_port)s. %(msg)s est invalide" msgid "Invalid proxy request signature." msgstr "Signature de demande de proxy non valide." #, python-format msgid "Invalid range expression %r" msgstr "Valeur de %r invalide" msgid "Invalid service catalog json." msgstr "json de catalogue de service non valide." msgid "Invalid start time. The start time cannot occur after the end time." msgstr "" "Heure de début non valide. L'heure de début ne peut pas être définie après " "l'heure de fin." msgid "Invalid state of instance files on shared storage" msgstr "Etat non valide des fichiers d'instance sur la mémoire partagée" #, python-format msgid "Invalid timestamp for date %s" msgstr "Horodatage non valide pour la date %s" #, python-format msgid "Invalid usage_type: %s" msgstr "Type d'utilisation (usage_type) non valide : %s" #, python-format msgid "Invalid value for Config Drive option: %(option)s" msgstr "Valeur invalide pour l'option du lecteur de configuration : %(option)s" #, python-format msgid "Invalid virtual interface address %s in request" msgstr "Adresse d'interface virtuelle %s non valide dans la demande" #, python-format msgid "Invalid volume access mode: %(access_mode)s" msgstr "Mode d'accès au volume invalide : %(access_mode)s" #, python-format msgid "Invalid volume: %(reason)s" msgstr "Volume invalide : %(reason)s" msgid "Invalid volume_size." msgstr "volume_size invalide." #, python-format msgid "Ironic node uuid not supplied to driver for instance %s." msgstr "L' uuid du noeud Ironic non fourni au pilote pour instance %s." #, python-format msgid "" "It is not allowed to create an interface on external network %(network_uuid)s" msgstr "" "Il est interdit de créer une interface sur le réseau externe %(network_uuid)s" msgid "" "Key Names can only contain alphanumeric characters, periods, dashes, " "underscores, colons and spaces." msgstr "" "Les noms de clé peuvent seulement contenir des caractères alphanumériques, " "des points, des tirets, des traits de soulignement, des deux-points et des " "espaces." #, python-format msgid "Key manager error: %(reason)s" msgstr "Erreur du gestionaire de clé: %(reason)s" #, python-format msgid "Key pair '%(key_name)s' already exists." msgstr "La paire de clés %(key_name)s' existe déjà." #, python-format msgid "Keypair %(name)s not found for user %(user_id)s" msgstr "" "La paire de clés %(name)s est introuvable pour l'utilisateur %(user_id)s" #, python-format msgid "Keypair data is invalid: %(reason)s" msgstr "La donnée de paire de clés est invalide : %(reason)s" msgid "Limits only supported from vCenter 6.0 and above" msgstr "Limites seulement supportées sur vCenter 6.0 et supérieur" #, python-format msgid "Live migration %(id)s for server %(uuid)s is not in progress." msgstr "" "La migration à chaud %(id)s pour le serveur %(uuid)s n'est pas en cours." #, python-format msgid "Malformed message body: %(reason)s" msgstr "Format de corps de message non valide : %(reason)s" #, python-format msgid "" "Malformed request URL: URL's project_id '%(project_id)s' doesn't match " "Context's project_id '%(context_project_id)s'" msgstr "" "URL de demande incorrectement formée : l'ID projet '%(project_id)s' de l'URL " "ne correspond pas à l'ID projet '%(context_project_id)s' du contexte" msgid "Malformed request body" msgstr "Format de corps de demande incorrect" msgid "Mapping image to local is not supported." msgstr "Le mappage de l'image sur local n'est pas pris en charge." #, python-format msgid "Marker %(marker)s could not be found." msgstr "Le marqueur %(marker)s est introuvable." msgid "Maximum number of floating IPs exceeded" msgstr "Nombre maximal d'adresses IP flottantes dépassé" #, python-format msgid "Maximum number of metadata items exceeds %(allowed)d" msgstr "Le nombre maximal d'éléments de métadonnées dépasse %(allowed)d" msgid "Maximum number of ports exceeded" msgstr "Nombre maximum de ports dépassé" msgid "Maximum number of security groups or rules exceeded" msgstr "Nombre maximal de groupes de sécurité ou de règles dépassé" msgid "Metadata item was not found" msgstr "Elément de métadonnées introuvable" msgid "Metadata property key greater than 255 characters" msgstr "" "Taille de la clé de propriété de métadonnées supérieure à 255 caractères" msgid "Metadata property value greater than 255 characters" msgstr "" "Taille de la valeur de propriété de métadonnées supérieure à 255 caractères" msgid "Metadata type should be dict." msgstr "Le type de métadonnée doit être un dictionnaire." #, python-format msgid "" "Metric %(name)s could not be found on the compute host node %(host)s." "%(node)s." msgstr "" "La métrique %(name)s ne peut être trouvé sur le noeud de calcul de l'hôte " "%(host)s.%(node)s." #, python-format msgid "Migration %(id)s for server %(uuid)s is not live-migration." msgstr "" "La migration %(id)s pour le serveur %(uuid)s n'est pas une migration à chaud." #, python-format msgid "Migration %(migration_id)s could not be found." msgstr "La migration %(migration_id)s ne peut être trouvée." #, python-format msgid "Migration %(migration_id)s not found for instance %(instance_id)s" msgstr "Migration %(migration_id)s introuvable pour l'instance %(instance_id)s" #, python-format msgid "" "Migration %(migration_id)s state of instance %(instance_uuid)s is %(state)s. " "Cannot %(method)s while the migration is in this state." msgstr "" "L'état de la migration %(migration_id)s de l'instance %(instance_uuid)s est " "%(state)s. Impossible de %(method)s tant que la migration est dans cet état." #, python-format msgid "Migration error: %(reason)s" msgstr "Erreur de migration: %(reason)s" msgid "Migration is not supported for LVM backed instances" msgstr "" "La migration n'est pas prise en charge pour des instances sauvegardées par " "LVM" #, python-format msgid "" "Migration not found for instance %(instance_id)s with status %(status)s." msgstr "" "Migration non trouvée pour l'instance %(instance_id)s avec le statut " "%(status)s." #, python-format msgid "Migration pre-check error: %(reason)s" msgstr "Erreur lors de la vérification de la migration: %(reason)s" #, python-format msgid "Migration select destinations error: %(reason)s" msgstr "Erreur de sélection de destinations de migration : %(reason)s" #, python-format msgid "Missing arguments: %s" msgstr "Arguments manquants : %s" msgid "Missing device UUID." msgstr "Périphérique UUID manquant." msgid "Missing disabled reason field" msgstr "Le champ de la raison de désactivation est manquant" msgid "Missing forced_down field" msgstr "Champ forced_down manquant" msgid "Missing imageRef attribute" msgstr "Attribut imageRef manquant" #, python-format msgid "Missing keys: %s" msgstr "Clés manquantes : %s" msgid "Missing parameter dict" msgstr "Paramètre dict manquant" #, python-format msgid "" "More than one instance is associated with fixed IP address '%(address)s'." msgstr "Plusieurs instances sont associées à l'adresse IP fixe '%(address)s'." msgid "" "More than one possible network found. Specify network ID(s) to select which " "one(s) to connect to." msgstr "" "Il y a plusieurs réseaux possibles. Veuillez indiquer les ID(s) de réseau " "pour que l'on sache auquel(s) se connecter." msgid "More than one swap drive requested." msgstr "Plusieurs unités demandées." #, python-format msgid "Multi-boot operating system found in %s" msgstr "Système d'exploitation à plusieurs démarrages trouvé dans %s" msgid "Multiple X-Instance-ID headers found within request." msgstr "Plusieurs en-têtes Multiple X-Instance-ID trouvés dans la demande." msgid "Multiple X-Tenant-ID headers found within request." msgstr "En-tête X-Tenant-ID multiple trouvés a l'interieur de la requete" #, python-format msgid "Multiple floating IP pools matches found for name '%s'" msgstr "" "Plusieurs correspondances de pools d'adresses IP flottantes trouvées pour le " "nom '%s'" #, python-format msgid "Multiple floating IPs are found for address %(address)s." msgstr "Plusieurs adresses IP flottantes trouvées pour l'adresse %(address)s." msgid "" "Multiple hosts may be managed by the VMWare vCenter driver; therefore we do " "not return uptime for just one host." msgstr "" "Plusieurs hôtes peuvent être gérés par le pilote VMWare vCenter ; par " "conséquent, nous ne retournons pas la disponibilité pour un seul hôte." msgid "Multiple possible networks found, use a Network ID to be more specific." msgstr "" "Plusieurs réseaux possibles trouvés. Utilisez un ID réseau pour préciser " "votre demande." #, python-format msgid "" "Multiple security groups found matching '%s'. Use an ID to be more specific." msgstr "" "Plusieurs groupes de sécurité ont été trouvés correspondant à '%s'. Utilisez " "un ID pour préciser votre demande." msgid "Must input network_id when request IP address" msgstr "network_id doit être entré lors de la demande d'adresse IP" msgid "Must not input both network_id and port_id" msgstr "Vous ne devez pas entrer à la fois network_id et port_id" msgid "" "Must specify host_ip, host_username and host_password to use vmwareapi." "VMwareVCDriver" msgstr "" "Il faut indiquer host_ip, host_username et host_password pour utiliser " "vmwareapi.VMwareVCDriver" msgid "Must supply a positive value for max_number" msgstr "Veuillez fournir une valeur positive pour max_number" msgid "Must supply a positive value for max_rows" msgstr "Veuillez fournir une valeur positive pour max_rows" #, python-format msgid "Network %(network_id)s could not be found." msgstr "Le réseau %(network_id)s n'a pas été trouvé." #, python-format msgid "" "Network %(network_uuid)s requires a subnet in order to boot instances on." msgstr "" "Réseau %(network_uuid)s demande un sous réseau pour pouvoir démarrer des " "instances dessus." #, python-format msgid "Network could not be found for bridge %(bridge)s" msgstr "Aucun réseau trouvé pour le pont %(bridge)s" #, python-format msgid "Network could not be found for instance %(instance_id)s." msgstr "Aucun réseau trouvé pour l'instance %(instance_id)s." msgid "Network not found" msgstr "Réseau non trouvé" msgid "" "Network requires port_security_enabled and subnet associated in order to " "apply security groups." msgstr "" "Le réseau nécessite port_security_enabled et le sous-réseau associé afin " "d'appliquer les groupes de sécurité." msgid "New volume must be detached in order to swap." msgstr "Le nouveau volume doit être détaché afin de permuter." msgid "New volume must be the same size or larger." msgstr "Le nouveau volume doit être de la même taille ou plus grand." #, python-format msgid "No Block Device Mapping with id %(id)s." msgstr "Pas de mappage d'unité par bloc avec l'id %(id)s." msgid "No Unique Match Found." msgstr "Correspondance unique non trouvée." msgid "No compute host specified" msgstr "Aucun hôte de calcul spécifié" #, python-format msgid "No configuration information found for operating system %(os_name)s" msgstr "" "Aucune information de configuration n'a été trouvée pour le système " "d'exploitation %(os_name)s" #, python-format msgid "No device with MAC address %s exists on the VM" msgstr "" "Aucun périphérique ayant pour adresse MAC %s n'existe sur la machine " "virtuelle" #, python-format msgid "No device with interface-id %s exists on VM" msgstr "" "Aucun périphérique ayant pour interface-id %s n'existe sur la machine " "virtuelle" #, python-format msgid "No disk at %(location)s" msgstr "Aucun disque sur %(location)s" #, python-format msgid "No fixed IP addresses available for network: %(net)s" msgstr "Pas d'adresses IP fixes disponibles pour le réseau : %(net)s" msgid "No fixed IPs associated to instance" msgstr "Aucune adresse IP fixe associée à l'instance" msgid "No free nbd devices" msgstr "Pas de device nbd libre" msgid "No host available on cluster" msgstr "Aucun hôte disponible sur le cluster" msgid "No hosts found to map to cell, exiting." msgstr "Aucun hôte à mapper à la cellule n'a été trouvé. Sortie..." #, python-format msgid "No hypervisor matching '%s' could be found." msgstr "Aucun hyperviseur correspondant à '%s' n'a été trouvé." msgid "No image locations are accessible" msgstr "Aucun emplacement d'image n'est accessible" #, python-format msgid "" "No live migration URI configured and no default available for " "\"%(virt_type)s\" hypervisor virtualization type." msgstr "" "Aucun URI de migration à chaud n'est configuré et aucune valeur par défaut " "n'est disponible pour le type de virtualisation d'hyperviseur " "\"%(virt_type)s\"." msgid "No more floating IPs available." msgstr "Plus d'adresses IP flottantes disponibles." #, python-format msgid "No more floating IPs in pool %s." msgstr "Plus d'adresses IP flottantes disponibles dans le pool %s." #, python-format msgid "No mount points found in %(root)s of %(image)s" msgstr "Aucun point de montage trouvé dans %(root)s de l'image %(image)s" #, python-format msgid "No operating system found in %s" msgstr "Aucun système d'exploitation trouvé dans %s" msgid "No root disk defined." msgstr "Aucun disque racine défini." #, python-format msgid "" "No specific network was requested and none are available for project " "'%(project_id)s'." msgstr "" "Aucun réseau spécifique n'a été demandé et il n'existe aucun réseau " "disponible pour le projet '%(project_id)s'." msgid "No valid host found for cold migrate" msgstr "Aucun hôte valide n'a été trouvé pour la migration à froid" msgid "No valid host found for resize" msgstr "Aucun hôte valide n'a été trouvé pour le redimensionnement" #, python-format msgid "No valid host was found. %(reason)s" msgstr "Hôte non valide trouvé. %(reason)s" #, python-format msgid "No volume Block Device Mapping at path: %(path)s" msgstr "Pas de volume de mappage d'unité de bloc en: %(path)s." #, python-format msgid "No volume Block Device Mapping with id %(volume_id)s." msgstr "Pas de volume de mappage d'unité de bloc avec l'id: %(volume_id)s." #, python-format msgid "Node %s could not be found." msgstr "Noeud %s introuvable." #, python-format msgid "Not able to acquire a free port for %(host)s" msgstr "Pas capable d'acquérir un port libre pour %(host)s" #, python-format msgid "Not able to bind %(host)s:%(port)d, %(error)s" msgstr "Pas capable de lier %(host)s : %(port)d, %(error)s " #, python-format msgid "" "Not all Virtual Functions of PF %(compute_node_id)s:%(address)s are free." msgstr "" "Les fonctions virtuelles de PF %(compute_node_id)s:%(address)s ne sont pas " "toutes libres." msgid "Not an rbd snapshot" msgstr "N'est pas un instantané rbd" #, python-format msgid "Not authorized for image %(image_id)s." msgstr "Non autorisé pour l'image %(image_id)s." msgid "Not authorized." msgstr "Non autorisé." msgid "Not enough parameters to build a valid rule." msgstr "Pas assez de parametres pour contruire un règle valide." msgid "Not stored in rbd" msgstr "Non stocké dans rbd" msgid "Nothing was archived." msgstr "Aucun élément archivé." #, python-format msgid "Nova requires libvirt version %s or greater." msgstr "Nova nécessite libvirt %s ou une version ultérieure." msgid "Number of Rows Archived" msgstr "Nombre de lignes archivées" #, python-format msgid "Object action %(action)s failed because: %(reason)s" msgstr "L'action de l'objet %(action)s a échoué car : %(reason)s" msgid "Old volume is attached to a different instance." msgstr "L'ancien volume est attaché à une instance différente." #, python-format msgid "One or more hosts already in availability zone(s) %s" msgstr "" "Un ou plusieurs hôte(s) sont déjà présents dans la(les) zone(s) de " "disponibilité(s) %s" msgid "Only administrators may list deleted instances" msgstr "Seul l'administrateur peut afficher la liste des instances supprimées" msgid "Origin header does not match this host." msgstr "L'en-tête d'origine ne correspond pas à cet hôte." msgid "Origin header not valid." msgstr "En-tête d'origine non valide." msgid "Origin header protocol does not match this host." msgstr "Le protocole de l'en-tête d'origine ne correspond pas à cet hôte." #, python-format msgid "PCI Device %(node_id)s:%(address)s not found." msgstr "Périphérique PCI %(node_id)s:%(address)s introuvable." #, python-format msgid "PCI alias %(alias)s is not defined" msgstr "L'alias PCI %(alias)s n'est pas défini" #, python-format msgid "" "PCI device %(compute_node_id)s:%(address)s is %(status)s instead of " "%(hopestatus)s" msgstr "" "Le pérphérique PCI %(compute_node_id)s:%(address)s est %(status)s au lieu de " "%(hopestatus)s" #, python-format msgid "" "PCI device %(compute_node_id)s:%(address)s is owned by %(owner)s instead of " "%(hopeowner)s" msgstr "" "Le périphérique PCI %(compute_node_id)s:%(address)s appartient à %(owner)s " "au lieu de %(hopeowner)s" #, python-format msgid "PCI device %(id)s not found" msgstr "Périphérique PCI %(id)s introuvable" #, python-format msgid "PCI device request %(requests)s failed" msgstr "La requête %(requests)s au périphérique PCI a échoué." #, python-format msgid "Page size %(pagesize)s forbidden against '%(against)s'" msgstr "Taille de page %(pagesize)s interdite sur '%(against)s'" #, python-format msgid "Page size %(pagesize)s is not supported by the host." msgstr "Taille de page %(pagesize)s non prise en charge par l'hôte." #, python-format msgid "" "Parameters %(missing_params)s not present in vif_details for vif %(vif_id)s. " "Check your Neutron configuration to validate that the macvtap parameters are " "correct." msgstr "" "Le paramètre %(missing_params)s est absent de vif_details pour le vif " "%(vif_id)s. Vérifiez votre configuration de Neutron pour valider que les " "parapmètres macvtap sont correct." #, python-format msgid "Path %s must be LVM logical volume" msgstr "Le chemin %s doit être un volume logique LVM." msgid "Paused" msgstr "En pause" msgid "Personality file limit exceeded" msgstr "Limite de fichier de personnalité dépassé" #, python-format msgid "" "Physical Function %(compute_node_id)s:%(address)s, related to VF " "%(compute_node_id)s:%(vf_address)s is %(status)s instead of %(hopestatus)s" msgstr "" "La fonction physique %(compute_node_id)s:%(address)s, associée à VF " "%(compute_node_id)s:%(vf_address)s est %(status)s au lieu de %(hopestatus)s" #, python-format msgid "Physical network is missing for network %(network_uuid)s" msgstr "Réseau physique manquant pour le réseau %(network_uuid)s" #, python-format msgid "Policy doesn't allow %(action)s to be performed." msgstr "Le réglage des droits n'autorise pas %(action)s à être effectué(e)(s)" #, python-format msgid "Port %(port_id)s is still in use." msgstr "Le port %(port_id)s est encore en cours d'utilisation." #, python-format msgid "Port %(port_id)s not usable for instance %(instance)s." msgstr "Port %(port_id)s inutilisable pour l'instance %(instance)s." #, python-format msgid "" "Port %(port_id)s not usable for instance %(instance)s. Value %(value)s " "assigned to dns_name attribute does not match instance's hostname " "%(hostname)s" msgstr "" "Le port %(port_id)s n'est pas utilisable pour l'instance %(instance)s. La " "valeur %(value)s affectée à l'attribut dns_name ne correspond pas au nom " "d'hôte de l'instance %(hostname)s" #, python-format msgid "Port %(port_id)s requires a FixedIP in order to be used." msgstr "Port %(port_id)s demande une IP fixe pour être utilisé." #, python-format msgid "Port %s is not attached" msgstr "Le port %s n'est pas connecté" #, python-format msgid "Port id %(port_id)s could not be found." msgstr "ID port %(port_id)s introuvable." #, python-format msgid "Provided video model (%(model)s) is not supported." msgstr "Le modèle de vidéo fourni (%(model)s) n'est pas supporté." #, python-format msgid "Provided watchdog action (%(action)s) is not supported." msgstr "L'action de garde fourni (%(action)s) n'est pas supportée" msgid "QEMU guest agent is not enabled" msgstr "L'agent invité QEMU n'est pas activé" #, python-format msgid "Quiescing is not supported in instance %(instance_id)s" msgstr "" "La mise au repos n'est pas prise en charge dans l'instance %(instance_id)s" #, python-format msgid "Quota class %(class_name)s could not be found." msgstr "Classe de quota %(class_name)s introuvable." msgid "Quota could not be found" msgstr "Le quota ne peut pas être trouvé" #, python-format msgid "" "Quota exceeded for %(overs)s: Requested %(req)s, but already used %(used)s " "of %(allowed)s %(overs)s" msgstr "" "Quota dépassé pour %(overs)s : demandé %(req)s, mais %(used)s déjà " "utilisés%(allowed)s %(overs)s" #, python-format msgid "Quota exceeded for resources: %(overs)s" msgstr "Quota dépassé pour les ressources : %(overs)s" msgid "Quota exceeded, too many key pairs." msgstr "Quota dépassé, trop de paires de clés." msgid "Quota exceeded, too many server groups." msgstr "Quota dépassé, trop de groupes de serveur." msgid "Quota exceeded, too many servers in group" msgstr "Quota dépassé, trop de serveurs dans le groupe" #, python-format msgid "Quota exists for project %(project_id)s, resource %(resource)s" msgstr "Le quota existe pour le projet %(project_id)s, ressource %(resource)s" #, python-format msgid "Quota for project %(project_id)s could not be found." msgstr "Le quota pour le projet %(project_id)s ne peut pas être trouvé." #, python-format msgid "" "Quota for user %(user_id)s in project %(project_id)s could not be found." msgstr "" "Le quota de l'utilisateur %(user_id)s dans le projet %(project_id)s ne peut " "être trouvé." #, python-format msgid "" "Quota limit %(limit)s for %(resource)s must be greater than or equal to " "already used and reserved %(minimum)s." msgstr "" "Le quota limite %(limit)s pour %(resource)s doit être supérieur ou égal a " "celle déjà utilisé et réservé: %(minimum)s." #, python-format msgid "" "Quota limit %(limit)s for %(resource)s must be less than or equal to " "%(maximum)s." msgstr "" "Le quota limite %(limit)s pour %(resource)s doit être inferieur ou égal à " "%(maximum)s." msgid "Request body and URI mismatch" msgstr "Corps et URI de demande discordants" msgid "Request is too large." msgstr "La demande est trop grande." #, python-format msgid "Request of image %(image_id)s got BadRequest response: %(response)s" msgstr "" "La demande d'image %(image_id)s a reçu une réponse de demande incorrecte : " "%(response)s" #, python-format msgid "RequestSpec not found for instance %(instance_uuid)s" msgstr "RequestSpec non trouvé pour l'instance %(instance_uuid)s" msgid "Requested CPU control policy not supported by host" msgstr "" "La stratégie de contrôle d'UC demandée n'est pas prise en charge par l'hôte" #, python-format msgid "" "Requested hardware '%(model)s' is not supported by the '%(virt)s' virt driver" msgstr "" "Le matériel demandé '%(model)s' n'est pas pris en charge par le pilote " "virtuel '%(virt)s'" #, python-format msgid "Requested image %(image)s has automatic disk resize disabled." msgstr "" "Le redimensionnement automatique du disque est désactivé pour l'image " "requise : %(image)s." msgid "" "Requested instance NUMA topology cannot fit the given host NUMA topology" msgstr "" "La topologie NUMA de l'instance demandée ne tient pas dans la topologie NUMA " "de l'hôte donné" msgid "" "Requested instance NUMA topology together with requested PCI devices cannot " "fit the given host NUMA topology" msgstr "" "La topologie de l'instance NUMA demandée avec les périphériques PCI requis " "ne tient pas dans la topologie NUMA de l'hôte donné" #, python-format msgid "" "Requested vCPU limits %(sockets)d:%(cores)d:%(threads)d are impossible to " "satisfy for vcpus count %(vcpus)d" msgstr "" "Les limites vCPU demandées %(sockets)d:%(cores)d:%(threads)d ne peuvent être " "satisfaite pour le nombre de vcpus %(vcpus)d" #, python-format msgid "Rescue device does not exist for instance %s" msgstr "L'unité Rescue n'existe pas pour l'instance %s" #, python-format msgid "Resize error: %(reason)s" msgstr "Erreur de redimensionnement : %(reason)s" msgid "Resize to zero disk flavor is not allowed." msgstr "Le redimensionnement sur une version de disque nulle est interdit." msgid "Resource could not be found." msgstr "La ressource n'a pas pu être trouvée." msgid "Resumed" msgstr "Repris" #, python-format msgid "Root element name should be '%(name)s' not '%(tag)s'" msgstr "Le nom de l'élément racine doit être '%(name)s', pas '%(tag)s'" #, python-format msgid "Running batches of %i until complete" msgstr "Exécution des lots de %i jusqu'à la fin" #, python-format msgid "Scheduler Host Filter %(filter_name)s could not be found." msgstr "La plannification de filtre hôte %(filter_name)s ne peut être trouvée." #, python-format msgid "Security group %(name)s is not found for project %(project)s" msgstr "Groupe de sécurité %(name)s introuvable pour le projet %(project)s" #, python-format msgid "" "Security group %(security_group_id)s not found for project %(project_id)s." msgstr "" "Groupe de sécurité %(security_group_id)s non trouvé pour le projet " "%(project_id)s." #, python-format msgid "Security group %(security_group_id)s not found." msgstr "Groupe de sécurité %(security_group_id)s non trouvé." #, python-format msgid "" "Security group %(security_group_name)s already exists for project " "%(project_id)s." msgstr "" "Groupe de sécurité %(security_group_name)s existe déjà pour le projet " "%(project_id)s." #, python-format msgid "" "Security group %(security_group_name)s not associated with the instance " "%(instance)s" msgstr "" "Le groupe de sécurité %(security_group_name)s n'est pas associé à l'instance " "%(instance)s" msgid "Security group id should be uuid" msgstr "L'ID groupe de sécurité doit être un UUID" msgid "Security group name cannot be empty" msgstr "Le nom du groupe de sécurité ne peut pas être vide" msgid "Security group not specified" msgstr "Groupe de sécurité non spécifié" #, python-format msgid "Server disk was unable to be resized because: %(reason)s" msgstr "" "La taille du disque du serveur n'a pas pu être modifiée car : %(reason)s" msgid "Server does not exist" msgstr "Le serveur n'existe pas" #, python-format msgid "ServerGroup policy is not supported: %(reason)s" msgstr "La stratégie ServerGroup n'est pas supporté: %(reason)s" msgid "ServerGroupAffinityFilter not configured" msgstr "ServerGroupAffinityFilter non configuré" msgid "ServerGroupAntiAffinityFilter not configured" msgstr "ServerGroupAntiAffinityFilter non configuré" msgid "ServerGroupSoftAffinityWeigher not configured" msgstr "ServerGroupSoftAffinityWeigher non configuré" msgid "ServerGroupSoftAntiAffinityWeigher not configured" msgstr "ServerGroupSoftAntiAffinityWeigher non configuré" #, python-format msgid "Service %(service_id)s could not be found." msgstr "Le service %(service_id)s ne peut pas être trouvé." #, python-format msgid "Service %s not found." msgstr "Service %s non trouvé." msgid "Service is unavailable at this time." msgstr "Le service est indisponible actuellement." #, python-format msgid "Service with host %(host)s binary %(binary)s exists." msgstr "Le service avec l'hôte %(host)s et le binaire %(binary)s existe." #, python-format msgid "Service with host %(host)s topic %(topic)s exists." msgstr "Service avec l'hôte %(host)s et le topic %(topic)s existe." msgid "Set admin password is not supported" msgstr "La définition du mot de passe admin n'est pas supportée" #, python-format msgid "Share '%s' is not supported" msgstr "Le partage '%s' n'est pas pris en charge" #, python-format msgid "Share level '%s' cannot have share configured" msgstr "Le niveau de partage '%s' n'a pas de partage configuré" #, python-format msgid "Snapshot %(snapshot_id)s could not be found." msgstr "Le snapshot %(snapshot_id)s n'a pas été trouvé." msgid "Some required fields are missing" msgstr "Des champs requis sont manquants" #, python-format msgid "" "Something went wrong when deleting a volume snapshot: rebasing a " "%(protocol)s network disk using qemu-img has not been fully tested" msgstr "" "Une erreur s'est produite lors de la suppression d'un instantané de volume : " "relocaliser un disque réseau %(protocol)s avec qemu-img n'a pas été " "entièrement testé" msgid "Sort direction size exceeds sort key size" msgstr "La taille du sens de tri dépasse la taille de la clé de tri" msgid "Sort key supplied was not valid." msgstr "La clé de tri fournie n'était pas valide." msgid "Specified fixed address not assigned to instance" msgstr "L'adresse fixe spécifiée n'est pas assignée à une instance" msgid "Started" msgstr "Démarré" msgid "Stopped" msgstr "Stoppé" #, python-format msgid "Storage error: %(reason)s" msgstr "Erreur de stockage: %(reason)s" #, python-format msgid "Storage policy %s did not match any datastores" msgstr "Les règles de stockage %s ne correspondent à aucun magasin de données" msgid "Success" msgstr "Succès" msgid "Suspended" msgstr "Suspendue" msgid "Swap drive requested is larger than instance type allows." msgstr "" "Le lecteur de swap demandé est plus grand que ce que le type d'instance " "autorise." msgid "Table" msgstr "Tableau" #, python-format msgid "Task %(task_name)s is already running on host %(host)s" msgstr "" "La tâche %(task_name)s est déjà en cours d'exécution sur l'hôte %(host)s" #, python-format msgid "Task %(task_name)s is not running on host %(host)s" msgstr "" "La tâche %(task_name)s n'est pas en cours d'exécution sur l'hôte %(host)s" #, python-format msgid "The PCI address %(address)s has an incorrect format." msgstr "L'adresse PCI %(address)s a un format incorrect." #, python-format msgid "The console port range %(min_port)d-%(max_port)d is exhausted." msgstr "L'intervalle de ports console %(min_port)d-%(max_port)d est épuisé." msgid "The created instance's disk would be too small." msgstr "Le disque de l'instance créée serait trop petit." msgid "The current driver does not support preserving ephemeral partitions." msgstr "Le pilote actuel ne permet pas de préserver les partitions éphémères." msgid "The default PBM policy doesn't exist on the backend." msgstr "La règle PBM par défaut n'existe pas sur le back-end." msgid "The floating IP request failed with a BadRequest" msgstr "La demande d'IP flottante a échouée avec l'erreur Mauvaise Requête" msgid "" "The instance requires a newer hypervisor version than has been provided." msgstr "" "L'instance nécessite une version plus récente de l'hyperviseur que celle " "fournie." #, python-format msgid "The number of defined ports: %(ports)d is over the limit: %(quota)d" msgstr "Le nombre de ports définis (%(ports)d) dépasse la limite (%(quota)d)" #, python-format msgid "The provided RNG device path: (%(path)s) is not present on the host." msgstr "" "Le chemin du périphérique RNG donné: (%(path)s) n'est pas présent sur l’hôte." msgid "The request is invalid." msgstr "La requête est invalide." #, python-format msgid "" "The requested amount of video memory %(req_vram)d is higher than the maximum " "allowed by flavor %(max_vram)d." msgstr "" "La quantité de mémoire vidéo demandée %(req_vram)d est plus élevée que le " "maximum autorisé par la version: %(max_vram)d." msgid "The requested availability zone is not available" msgstr "La zone de disponibilité demandée n'est pas disponible" msgid "The requested functionality is not supported." msgstr "La fonctionnalité demandée n'est pas suportée" #, python-format msgid "The specified cluster '%s' was not found in vCenter" msgstr "Le cluster spécifié, '%s', est introuvable dans vCenter" #, python-format msgid "The supplied device path (%(path)s) is in use." msgstr "" "le chemin d'accès d'unité fourni (%(path)s) est en cours d'utilisation." #, python-format msgid "The supplied device path (%(path)s) is invalid." msgstr "Le chemin de périphérique (%(path)s) est invalide." #, python-format msgid "" "The supplied disk path (%(path)s) already exists, it is expected not to " "exist." msgstr "" "Le chemin d'accès du disque (%(path)s) existe déjà, il n'était pas prévu " "d'exister." msgid "The supplied hypervisor type of is invalid." msgstr "Le type de l'hyperviseur fourni n'est pas valide." msgid "The target host can't be the same one." msgstr "L'hôte de la cible ne peut pas être le même." #, python-format msgid "The token '%(token)s' is invalid or has expired" msgstr "Le jeton '%(token)s' est invalide ou a expiré" #, python-format msgid "" "The volume cannot be assigned the same device name as the root device %s" msgstr "" "Le volume ne peut pas recevoir le même nom d'unité que l'unité racine %s" msgid "There are not enough hosts available." msgstr "Le nombre d'hôtes disponibles est insuffisant" #, python-format msgid "There is no such action: %s" msgstr "Aucune action de ce type : %s" #, python-format msgid "" "This compute node's hypervisor is older than the minimum supported version: " "%(version)s." msgstr "" "L'hyperviseur de ce noeud de calcul est plus ancien que la version minimale " "prise en charge : %(version)s." msgid "" "This method needs to be called with either networks=None and port_ids=None " "or port_ids and networks as not none." msgstr "" "Cette méthode doit être appelée avec networks=None et port_ids=None ou avec " "des valeurs de port_ids et networks autres que None." #, python-format msgid "This rule already exists in group %s" msgstr "Cette règle existe déjà dans le groupe %s" #, python-format msgid "" "This service is older (v%(thisver)i) than the minimum (v%(minver)i) version " "of the rest of the deployment. Unable to continue." msgstr "" "Ce service est plus ancien (v%(thisver)i) que la version minimale " "(v%(minver)i) du reste du déploiement. Impossible de continuer." msgid "Timeout waiting for response from cell" msgstr "Dépassement du délai d'attente pour la réponse de la cellule" #, python-format msgid "Timeout while checking if we can live migrate to host: %s" msgstr "" "Timeout lors de la vérification de la possibilité de migrer à chaud vers " "l'hôte: %s" msgid "To and From ports must be integers" msgstr "Les ports de destination et d'origine doivent être des entiers" msgid "Token not found" msgstr "Token non trouvé" msgid "Triggering crash dump is not supported" msgstr "Déclenchement de vidage sur incident non pris en charge" msgid "Type and Code must be integers for ICMP protocol type" msgstr "" "Le type et le code doivent être des entiers pour le type de protocole ICMP" msgid "UEFI is not supported" msgstr "UEFI n'est pas supporté" #, python-format msgid "" "Unable to associate floating IP %(address)s to any fixed IPs for instance " "%(id)s. Instance has no fixed IPv4 addresses to associate." msgstr "" "Impossible d'assigner l'IP flottante %(address)s à une IP fixe de l'instance " "%(id)s. L'instance n'a pas d'IPv4 fixe." #, python-format msgid "" "Unable to associate floating IP %(address)s to fixed IP %(fixed_address)s " "for instance %(id)s. Error: %(error)s" msgstr "" "Incapable d'associer une IP flottante %(address)s à une IP fixe " "%(fixed_address)s pour l'instance %(id)s. Erreur : %(error)s" #, python-format msgid "Unable to convert image to %(format)s: %(exp)s" msgstr "Impossible de convertir l'image en %(format)s : %(exp)s" #, python-format msgid "Unable to convert image to raw: %(exp)s" msgstr "Impossible de convertir l'image en raw : %(exp)s" #, python-format msgid "Unable to determine disk bus for '%s'" msgstr "Impossible de déterminer le bus de disque pour '%s'" #, python-format msgid "Unable to determine disk prefix for %s" msgstr "Impossible de déterminer le préfixe du disque pour %s" #, python-format msgid "Unable to find host for Instance %s" msgstr "Impossible de trouver l'hôte pour l'instance %s" msgid "Unable to find iSCSI Target" msgstr "Cible iSCSI introuvable" msgid "Unable to find volume" msgstr "Volume introuvable" msgid "Unable to get host UUID: /etc/machine-id does not exist" msgstr "Impossible d'obtenir l'UUID de l'hôte : /etc/machine-id n'existe pas" msgid "Unable to get host UUID: /etc/machine-id is empty" msgstr "Impossible d'obtenir l'UUID de l'hôte : /etc/machine-id est vide" msgid "" "Unable to launch multiple instances with a single configured port ID. Please " "launch your instance one by one with different ports." msgstr "" "Impossible de lancer plusieurs instances avec un seul ID de port configuré. " "Veuillez lancer vos instances une à une avec des ports différents." #, python-format msgid "" "Unable to migrate %(instance_uuid)s to %(dest)s: Lack of memory(host:" "%(avail)s <= instance:%(mem_inst)s)" msgstr "" "Impossible de migrer %(instance_uuid)s vers %(dest)s : manque de " "mémoire(hôte : %(avail)s <= instance : %(mem_inst)s)" #, python-format msgid "" "Unable to migrate %(instance_uuid)s: Disk of instance is too large(available " "on destination host:%(available)s < need:%(necessary)s)" msgstr "" "Impossible de migrer %(instance_uuid)s : Le disque de l'instance est trop " "grand (disponible sur l'hôte de destination :%(available)s < requis :" "%(necessary)s)" #, python-format msgid "" "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." msgstr "" "Impossible de migrer l'instance (%(instance_id)s) vers l'hôte actuel " "(%(host)s)." msgid "Unable to resize disk down." msgstr "Impossible de redimensionner le disque à la baisse." msgid "Unable to set password on instance" msgstr "Impossible de définir le mot de passe sur l'instance" msgid "Unable to shrink disk." msgstr "Impossible de redimensionner le disque." #, python-format msgid "Unacceptable CPU info: %(reason)s" msgstr "Information CPU inacceptable : %(reason)s" msgid "Unacceptable parameters." msgstr "Paramètres inacceptables." #, python-format msgid "Unavailable console type %(console_type)s." msgstr "Type de console indisponible %(console_type)s." msgid "" "Undefined Block Device Mapping root: BlockDeviceMappingList contains Block " "Device Mappings from multiple instances." msgstr "" "Racine du mappage d'unité par bloc non définie : BlockDeviceMappingList " "contient des mappages d'unité par bloc provenant de plusieurs instances." #, python-format msgid "Unexpected aggregate action %s" msgstr "Action d'agrégat inattendue : %s" msgid "Unexpected type adding stats" msgstr "Type inattendu d'ajout des statistiques" #, python-format msgid "Unexpected vif_type=%s" msgstr "vif_type = %s inattendu" msgid "Unknown" msgstr "Inconnu" msgid "Unknown action" msgstr "Action inconnu" #, python-format msgid "Unknown config drive format %(format)s. Select one of iso9660 or vfat." msgstr "" "Format d'unité de config inconnu %(format)s. Sélectionnez iso9660 ou vfat." #, python-format msgid "Unknown delete_info type %s" msgstr "Type inconnu delete_info %s" #, python-format msgid "Unknown image_type=%s" msgstr "image_type=%s inconnu" #, python-format msgid "Unknown quota resources %(unknown)s." msgstr "Ressources de quota inconnues %(unknown)s." msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "Direction d'ordonnancement inconnue, choisir 'desc' ou 'asc'" #, python-format msgid "Unknown type: %s" msgstr "Type inconnu: %s" msgid "Unrecognized legacy format." msgstr "Ancien format non reconnu." #, python-format msgid "Unrecognized read_deleted value '%s'" msgstr "Valeur read_deleted non reconnue '%s'" #, python-format msgid "Unrecognized value '%s' for CONF.running_deleted_instance_action" msgstr "Valeur non reconnue '%s' pour CONF.running_deleted_instance_action" #, python-format msgid "Unshelve attempted but the image %s cannot be found." msgstr "Extraction tentée mais l'image %s est introuvable." msgid "Unsupported Content-Type" msgstr "Type de contenu non pris en charge" #, python-format msgid "User %(username)s not found in password file." msgstr "Utilisateur %(username)s non trouvé dans le fichier de mot de passe." #, python-format msgid "User %(username)s not found in shadow file." msgstr "Utilisateur %(username)s non trouvé dans le fichier fantôme." msgid "User data needs to be valid base 64." msgstr "Les données utilisateur doivent être des données base 64 valides." msgid "User does not have admin privileges" msgstr "L’utilisateur n'a pas les privilèges administrateur" msgid "" "Using different block_device_mapping syntaxes is not allowed in the same " "request." msgstr "" "Utiliser différentes syntaxes de block_device_mapping n'est pas autorisé " "dans la même requête." #, python-format msgid "" "Version %(req_ver)s is not supported by the API. Minimum is %(min_ver)s and " "maximum is %(max_ver)s." msgstr "" "La version %(req_ver)s n'est pas supporté par l'API. Minimum requis: " "%(min_ver)s et le maximum: %(max_ver)s" msgid "Virtual Interface creation failed" msgstr "La création de l'Interface Virtuelle a échoué" msgid "Virtual interface plugin failed" msgstr "Echec du plugin d'interface virtuelle" #, python-format msgid "Virtual machine mode '%(vmmode)s' is not recognised" msgstr "Le mode de la machine virtuelle '%(vmmode)s' n'est pas reconnu" #, python-format msgid "Virtual machine mode '%s' is not valid" msgstr "Le mode de machine virtuelle '%s' n'est pas valide" #, python-format msgid "Virtualization type '%(virt)s' is not supported by this compute driver" msgstr "" "Le type de virtualisation '%(virt)s' n'est pas pris en charge par ce pilote " "de calcul" #, python-format msgid "Volume %(volume_id)s could not be attached. Reason: %(reason)s" msgstr "Impossible de connecter le volume %(volume_id)s. Raison : %(reason)s" #, python-format msgid "Volume %(volume_id)s could not be found." msgstr "Le volume %(volume_id)s n'a pas pu être trouvé." #, python-format msgid "" "Volume %(volume_id)s did not finish being created even after we waited " "%(seconds)s seconds or %(attempts)s attempts. And its status is " "%(volume_status)s." msgstr "" "Création du %(volume_id)s non terminée après attente de %(seconds)s secondes " "ou %(attempts)s tentatives. Son statut est %(volume_status)s." msgid "Volume does not belong to the requested instance." msgstr "Le volume n'appartient pas à l'instance demandée." #, python-format msgid "" "Volume encryption is not supported for %(volume_type)s volume %(volume_id)s" msgstr "" "L'encryptage de volume n'est pas supporté pour le volume %(volume_id)s de " "type %(volume_type)s" #, python-format msgid "" "Volume is smaller than the minimum size specified in image metadata. Volume " "size is %(volume_size)i bytes, minimum size is %(image_min_disk)i bytes." msgstr "" "Le volume est plus petit que la taille minimum spécifiée dans les " "métadonnées de l'image. La taille du volume est %(volume_size)i octets, la " "taille minimum est %(image_min_disk)i octets." #, python-format msgid "" "Volume sets block size, but the current libvirt hypervisor '%s' does not " "support custom block size" msgstr "" "Le volume définit la taille de bloc, mais l'hyperviseur libvirt en cours " "'%s' ne prend pas en charge la taille de bloc personnalisée" msgid "When resizing, instances must change flavor!" msgstr "Lors du redimensionnement, les instances doivent changer la version !" #, python-format msgid "Wrong quota method %(method)s used on resource %(res)s" msgstr "Mauvaise méthode de quota %(method)s utilisée sur la ressource %(res)s" msgid "X-Forwarded-For is missing from request." msgstr "X-Forwarded-For est manquant dans la requête" msgid "X-Instance-ID header is missing from request." msgstr "L'en-tête X-Instance-ID est manquant dans la demande." msgid "X-Instance-ID-Signature header is missing from request." msgstr "L'en-tête X-Instance-ID-Signature est absent de la demande." msgid "X-Metadata-Provider is missing from request." msgstr "X-Metadata-Provider est manquant dans la requête" msgid "X-Tenant-ID header is missing from request." msgstr "L'entête X-Tenant-ID est manquante dans la requête." msgid "You are not allowed to delete the image." msgstr "Vous n'êtes pas autorisé à supprimer l'image." msgid "" "You are not authorized to access the image the instance was started with." msgstr "" "Vous n'êtes pas autorisé à accéder à l'image par laquelle l'instance a été " "démarrée." msgid "You must implement __call__" msgstr "Vous devez implémenter __call__" msgid "You should specify images_rbd_pool flag to use rbd images." msgstr "" "Vous devez indiquer l'indicateur images_rbd_pool pour utiliser les images " "rbd." msgid "You should specify images_volume_group flag to use LVM images." msgstr "" "Vous devez spécifier l'indicateur images_volume_group pour utiliser les " "images LVM." msgid "Zero floating IPs available." msgstr "Aucune adresse IP flottante n'est disponible." msgid "admin password can't be changed on existing disk" msgstr "Impossible de modifier le mot de passe admin sur le disque existant" msgid "cannot understand JSON" msgstr "impossible de comprendre JSON" msgid "clone() is not implemented" msgstr "clone() n'est pas implémenté" #, python-format msgid "connect info: %s" msgstr "Information de connexion: %s" #, python-format msgid "connecting to: %(host)s:%(port)s" msgstr "connexion à : %(host)s:%(port)s" msgid "direct_snapshot() is not implemented" msgstr "direct_snapshot() n'est pas implémenté" #, python-format msgid "disk type '%s' not supported" msgstr "type disque '%s' non supporté" #, python-format msgid "empty project id for instance %s" msgstr "ID projet vide pour l'instance %s" msgid "error setting admin password" msgstr "erreur lors de la définition du mot de passe admin" #, python-format msgid "error: %s" msgstr "erreur: %s" #, python-format msgid "failed to generate X509 fingerprint. Error message: %s" msgstr "Échec lors de la génération de l'emprunte X509. Message d'erreur: %s " msgid "failed to generate fingerprint" msgstr "Échec dans la génération de l'empreinte" msgid "filename cannot be None" msgstr "Nom de fichier ne peut pas etre \"vide\"" msgid "floating IP is already associated" msgstr "L'adresse IP flottante est déjà associée" msgid "floating IP not found" msgstr "Adresse IP flottante non trouvée" #, python-format msgid "fmt=%(fmt)s backed by: %(backing_file)s" msgstr "fmt=%(fmt)s sauvegardé par : %(backing_file)s" #, python-format msgid "href %s does not contain version" msgstr "href %s ne contient pas de version" msgid "image already mounted" msgstr "image déjà montée" #, python-format msgid "instance %s is not running" msgstr "instance %s n'est pas en cours d'exécution" msgid "instance is a required argument to use @refresh_cache" msgstr "" "l'instance est un argument obligatoire pour l'utilisation de @refresh_cache" msgid "instance is not in a suspended state" msgstr "l'instance n'est pas à l'état suspendu" msgid "instance is not powered on" msgstr "l'instance n'est pas mise sous tension" msgid "instance is powered off and cannot be suspended." msgstr "L'instance est hors tension et ne peut pas être interrompue." #, python-format msgid "instance_id %s could not be found as device id on any ports" msgstr "" "l'instance_id %s est introuvable comme identificateur d'unité sur aucun port" msgid "is_public must be a boolean" msgstr "is_public doit être booléen." msgid "keymgr.fixed_key not defined" msgstr "keymgr.fixed_key n'est pas défini" msgid "l3driver call to add floating IP failed" msgstr "Échec de l'ajout d'une adresse IP flottant par l'appel l3driver" #, python-format msgid "libguestfs installed but not usable (%s)" msgstr "libguestfs est installé mais n'est pas utilisable (%s)" #, python-format msgid "libguestfs is not installed (%s)" msgstr "libguestfs n'est pas installé (%s)" #, python-format msgid "marker [%s] not found" msgstr "le marqueur [%s] est introuvable" #, python-format msgid "max rows must be <= %(max_value)d" msgstr "Le nombre maximum doit être <= %(max_value)d" msgid "max_count cannot be greater than 1 if an fixed_ip is specified." msgstr "max_count ne peut être supérieur à 1 si un fixed_ip est spécifié." msgid "min_count must be <= max_count" msgstr "min_count doit être <= max_count" #, python-format msgid "nbd device %s did not show up" msgstr "Device nbd %s n'est pas apparu" msgid "nbd unavailable: module not loaded" msgstr "nbd non disponible : module non chargé" #, python-format msgid "no match found for %s" msgstr "aucune occurrence trouvée pour %s" #, python-format msgid "no usable parent snapshot for volume %s" msgstr "aucun instantané parent utilisable pour le volume %s" #, python-format msgid "no write permission on storage pool %s" msgstr "aucun droit en écriture sur le pool de stockage %s" #, python-format msgid "not able to execute ssh command: %s" msgstr "impossible d'exécuter la commande ssh : %s" msgid "old style configuration can use only dictionary or memcached backends" msgstr "" "une ancienne configuration ne peut utiliser que des back-ends de type " "dictionary ou memcached" msgid "operation time out" msgstr "l'opération a dépassé le délai d'attente" #, python-format msgid "partition %s not found" msgstr "partition %s non trouvée" #, python-format msgid "partition search unsupported with %s" msgstr "recherche de partition non pris en charge avec %s" msgid "pause not supported for vmwareapi" msgstr "mise en pause non prise en charge pour vmwareapi" msgid "printable characters with at least one non space character" msgstr "" "caractères imprimables avec au moins un caractère différent d'un espace" msgid "printable characters. Can not start or end with whitespace." msgstr "" "caractères imprimables. Ne peut pas commencer ou se terminer par un espace." #, python-format msgid "qemu-img failed to execute on %(path)s : %(exp)s" msgstr "Echec d'exécution de qemu-img sur %(path)s : %(exp)s" #, python-format msgid "qemu-nbd error: %s" msgstr "erreur qemu-nbd : %s" msgid "rbd python libraries not found" msgstr "Librairies python rbd non trouvé" #, python-format msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" msgstr "" "read_deleted peut uniquement correspondre à 'no', 'yes' ou 'only', et non %r" msgid "serve() can only be called once" msgstr "serve() peut uniquement être appelé une fois" msgid "service is a mandatory argument for DB based ServiceGroup driver" msgstr "" "Le service est un argument obligatoire pour le pilote ServiceGroup utilisant " "la base de données" msgid "service is a mandatory argument for Memcached based ServiceGroup driver" msgstr "" "Le service est un argument obligatoire pour le pilote ServiceGroup utilisant " "Memcached" msgid "set_admin_password is not implemented by this driver or guest instance." msgstr "" "set_admin_password n'est pas implémenté par ce pilote ou par cette instance " "invitée." #, python-format msgid "snapshot for %s" msgstr "instantané pour %s" msgid "snapshot_id required in create_info" msgstr "snapshot_id requis dans create_info" msgid "token not provided" msgstr "Jeton non fourni" msgid "too many body keys" msgstr "trop de clés de corps" msgid "unpause not supported for vmwareapi" msgstr "annulation de la mise en pause non prise en charge pour vmwareapi" #, python-format msgid "vg %s must be LVM volume group" msgstr "vg %s doit être un groupe de volumes LVM" #, python-format msgid "vhostuser_sock_path not present in vif_details for vif %(vif_id)s" msgstr "vhostuser_sock_path absent de vif_détails pour le vif %(vif_id)s" #, python-format msgid "vif type %s not supported" msgstr "Type vif %s non pris en charge" msgid "vif_type parameter must be present for this vif_driver implementation" msgstr "" "Le paramètre vif_type doit être présent pour cette implémentation de " "vif_driver." #, python-format msgid "volume %s already attached" msgstr "Le volume %s est déjà connecté " #, python-format msgid "" "volume '%(vol)s' status must be 'in-use'. Currently in '%(status)s' status" msgstr "" "Le statut du volume '%(vol)s' doit être 'in-use'. Statut actuel : " "'%(status)s'" ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315688.893605 nova-32.0.0/nova/locale/it/0000775000175000017500000000000000000000000015400 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.3696086 nova-32.0.0/nova/locale/it/LC_MESSAGES/0000775000175000017500000000000000000000000017165 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/locale/it/LC_MESSAGES/nova.po0000664000175000017500000027063100000000000020501 0ustar00zuulzuul00000000000000# Translations template for nova. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the nova project. # # Translators: # Ying Chun Guo , 2013 # FIRST AUTHOR , 2011 # Loris Strozzini, 2012 # ls, 2012 # Mariano Iumiento , 2013 # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: nova VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2025-07-04 18:13+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 06:04+0000\n" "Last-Translator: Copied by Zanata \n" "Language: it\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: Italian\n" #, python-format msgid "%(address)s is not a valid IP v4/6 address." msgstr "%(address)s non è un indirizzo v4/6 IP valido." #, python-format msgid "" "%(binary)s attempted direct database access which is not allowed by policy" msgstr "" "%(binary)s ha tentato l'accesso diretto al database che non è consentito " "dalla politica" #, python-format msgid "%(cidr)s is not a valid IP network." msgstr "%(cidr)s non è una rete IP valida." #, python-format msgid "%(field)s should not be part of the updates." msgstr "%(field)s non deve fare parte degli aggiornamenti." #, python-format msgid "%(memsize)d MB of memory assigned, but expected %(memtotal)d MB" msgstr "%(memsize)d MB di memoria assegnata, ma previsti MB di %(memtotal)d" #, python-format msgid "%(path)s is not on local storage: %(reason)s" msgstr "%(path)s non si trova nella memoria locale: %(reason)s" #, python-format msgid "%(path)s is not on shared storage: %(reason)s" msgstr "%(path)s non si trova nella memoria condivisa: %(reason)s" #, python-format msgid "%(total)i rows matched query %(meth)s, %(done)i migrated" msgstr "" "%(total)i righe corrispondenti alla query %(meth)s, %(done)i sono state " "migrate" #, python-format msgid "%(type)s hypervisor does not support PCI devices" msgstr "l'hypervisor %(type)s non supporta i dispositivi PCI" #, python-format msgid "%s does not support disk hotplug." msgstr "%s non supporta il collegamento a caldo del disco." #, python-format msgid "%s format is not supported" msgstr "Il formato %s non è supportato" #, python-format msgid "%s is not supported." msgstr "%s non è supportato." #, python-format msgid "%s must be either 'MANUAL' or 'AUTO'." msgstr "%s deve essere 'MANUAL' o 'AUTO'." #, python-format msgid "'%(other)s' should be an instance of '%(cls)s'" msgstr "'%(other)s' deve essere un'istanza di '%(cls)s'" msgid "'qemu-img info' parsing failed." msgstr "analisi di 'qemu-img info' non riuscita." #, python-format msgid "'rxtx_factor' argument must be a float between 0 and %g" msgstr "" "L'argomento 'rxtx_factor' deve essere un valore a virgola mobile compreso " "tra 0 e %g" #, python-format msgid "A NetworkModel is required in field %s" msgstr "Un modello di rete è richiesto nel campo %s" #, python-format msgid "" "API Version String %(version)s is of invalid format. Must be of format " "MajorNum.MinorNum." msgstr "" "Stringa della versione API %(version)s in formato non valido. Deve essere in " "formato MajorNum.MinorNum." #, python-format msgid "API version %(version)s is not supported on this method." msgstr "Versione API %(version)s non supportata in questo metodo." msgid "Access list not available for public flavors." msgstr "Elenco accessi non disponibile per i flavor pubblici." #, python-format msgid "Action %s not found" msgstr "Azione %s non trovata" #, python-format msgid "" "Action for request_id %(request_id)s on instance %(instance_uuid)s not found" msgstr "" "L'azione per request_id %(request_id)s nell'istanza %(instance_uuid)s non è " "stata trovata" #, python-format msgid "Action: '%(action)s', calling method: %(meth)s, body: %(body)s" msgstr "Azione: '%(action)s', metodo chiamata: %(meth)s, corpo: %(body)s" #, python-format msgid "Add metadata failed for aggregate %(id)s after %(retries)s retries" msgstr "" "L'aggiunta dei metadati non è riuscita per l'aggregato %(id)s dopo " "%(retries)s tentativi" #, python-format msgid "Aggregate %(aggregate_id)s already has host %(host)s." msgstr "L'aggregato %(aggregate_id)s dispone già dell'host %(host)s." #, python-format msgid "Aggregate %(aggregate_id)s could not be found." msgstr "Impossibile trovare l'aggregato %(aggregate_id)s." #, python-format msgid "Aggregate %(aggregate_id)s has no host %(host)s." msgstr "L'aggregato %(aggregate_id)s non contiene alcun host %(host)s." #, python-format msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." msgstr "" "L'aggregato %(aggregate_id)s non contiene metadati con la chiave " "%(metadata_key)s." #, python-format msgid "Aggregate %(aggregate_name)s already exists." msgstr "L'aggregato %(aggregate_name)s esiste già." #, python-format msgid "Aggregate %s does not support empty named availability zone" msgstr "" "L'aggregazione %s non supporta la zona di disponibilità denominata vuota" #, python-format msgid "An invalid 'name' value was provided. The name must be: %(reason)s" msgstr "" "È stato fornito un valore 'name' non valido. Il nome deve essere: %(reason)s" msgid "An unknown error has occurred. Please try your request again." msgstr "Si è verificato un errore sconosciuto. Ritentare la richiesta." msgid "An unknown exception occurred." msgstr "E' stato riscontrato un errore sconosciuto" #, python-format msgid "Architecture name '%(arch)s' is not recognised" msgstr "Il nome architettura '%(arch)s' non è riconosciuto" #, python-format msgid "Architecture name '%s' is not valid" msgstr "Il nome architettura '%s' non è valido" #, python-format msgid "" "Attempt to consume PCI device %(compute_node_id)s:%(address)s from empty pool" msgstr "" "Tentativo di utilizzare il dispositivo PCI %(compute_node_id)s:%(address)s " "dal pool al di fuori del pool" msgid "Attempted overwrite of an existing value." msgstr "Si è tentato di sovrascrivere un valore esistente." #, python-format msgid "Attribute not supported: %(attr)s" msgstr "Attributo non supportato: %(attr)s" #, python-format msgid "Bad network format: missing %s" msgstr "Formato rete non corretto: manca %s" msgid "Bad networks format" msgstr "Formato reti non corretto" #, python-format msgid "Bad networks format: network uuid is not in proper format (%s)" msgstr "" "Il formato delle reti non è corretto: il formato (%s) uuid della rete non è " "corretto" #, python-format msgid "Bad prefix for network in cidr %s" msgstr "Prefisso errato per la rete in cidr %s" #, python-format msgid "" "Binding failed for port %(port_id)s, please check neutron logs for more " "information." msgstr "" "Bind non riuscito per la porta %(port_id)s, controllare i log neutron per " "ulteriori informazioni." msgid "Blank components" msgstr "Componenti vuoti" msgid "" "Blank volumes (source: 'blank', dest: 'volume') need to have non-zero size" msgstr "" "I volumi vuoti (origine: 'blank', dest: 'volume') devono avere una " "dimensione diversa da zero" #, python-format msgid "Block Device %(id)s is not bootable." msgstr "Il dispositivo di blocco %(id)s non è riavviabile." #, python-format msgid "" "Block Device Mapping %(volume_id)s is a multi-attach volume and is not valid " "for this operation." msgstr "" "L'associazione del dispositivo di blocco %(volume_id)s è un volume multi-" "attach e non è valida per questa operazione." msgid "Block Device Mapping cannot be converted to legacy format. " msgstr "" "L'associazione del dispositivo di blocco non può essere convertita in " "formato legacy. " msgid "Block Device Mapping is Invalid." msgstr "La mappatura unità di blocco non è valida." #, python-format msgid "Block Device Mapping is Invalid: %(details)s" msgstr "L'associazione del dispositivo di blocco non è valida: %(details)s" msgid "" "Block Device Mapping is Invalid: Boot sequence for the instance and image/" "block device mapping combination is not valid." msgstr "" "L'associazione del dispositivo di blocco non è valida: la sequenza di avvio " "per l'istanza e la combinazione dell'associazione del dispositivo immagine/" "blocco non è valida." msgid "" "Block Device Mapping is Invalid: You specified more local devices than the " "limit allows" msgstr "" "L'associazione del dispositivo di blocco non è valida: sono stati " "specificati più dispositivi locali del limite consentito" #, python-format msgid "Block Device Mapping is Invalid: failed to get image %(id)s." msgstr "" "L'associazione del dispositivo di blocco non è valida: impossibile ottenere " "l'immagine %(id)s." #, python-format msgid "Block Device Mapping is Invalid: failed to get snapshot %(id)s." msgstr "" "La mappatura unità di blocco non è valida: impossibile ottenere " "un'istantanea %(id)s." #, python-format msgid "Block Device Mapping is Invalid: failed to get volume %(id)s." msgstr "" "La mappatura unità di blocco non è valida: impossibile ottenere il volume " "%(id)s." msgid "Block migration can not be used with shared storage." msgstr "" "La migrazione blocchi non può essere utilizzata con l'archivio condiviso." msgid "Boot index is invalid." msgstr "L'indice boot non è valido." #, python-format msgid "Build of instance %(instance_uuid)s aborted: %(reason)s" msgstr "La build dell'istanza %(instance_uuid)s è stata interrotta: %(reason)s" #, python-format msgid "Build of instance %(instance_uuid)s was re-scheduled: %(reason)s" msgstr "" "La build dell'istanza %(instance_uuid)s è stata ripianificata: %(reason)s" #, python-format msgid "BuildRequest not found for instance %(uuid)s" msgstr "BuildRequest non trovata per l'istanza %(uuid)s" msgid "CPU and memory allocation must be provided for all NUMA nodes" msgstr "" "La CPU e l'allocazione di memoria devono essere forniti per tutti i nodi NUMA" #, python-format msgid "" "CPU doesn't have compatibility.\n" "\n" "%(ret)s\n" "\n" "Refer to %(u)s" msgstr "" "CPU non ha compatibilità.\n" "\n" "%(ret)s\n" "\n" "Fare riferimento a %(u)s" #, python-format msgid "CPU number %(cpunum)d is assigned to two nodes" msgstr "Il numero CPU %(cpunum)d è assegnato a due nodi" #, python-format msgid "CPU number %(cpunum)d is larger than max %(cpumax)d" msgstr "Il numero CPU %(cpunum)d è superiore a quello massimo %(cpumax)d" #, python-format msgid "CPU number %(cpuset)s is not assigned to any node" msgstr "Il numero CPU %(cpuset)s non è assegnato a nessun nodo" msgid "Can not add access to a public flavor." msgstr "Impossibile aggiungere l'accesso a una versione pubblica." msgid "Can not find requested image" msgstr "Impossibile trovare l'immagine richiesta" #, python-format msgid "Can not handle authentication request for %d credentials" msgstr "" "Impossibile gestire la richiesta di autenticazione per le credenziali %d" msgid "Can't retrieve root device path from instance libvirt configuration" msgstr "" "Impossibile recuperare il percorso root dell'unità dalla configurazione " "libvirt dell'istanza" #, python-format msgid "" "Cannot '%(action)s' instance %(server_id)s while it is in %(attr)s %(state)s" msgstr "" "Impossibile '%(action)s' l'istanza %(server_id)s mentre si trova in %(attr)s " "%(state)s" #, python-format msgid "Cannot add host to aggregate %(aggregate_id)s. Reason: %(reason)s." msgstr "" "Impossibile aggiungere l'host all'aggregato %(aggregate_id)s. Motivo: " "%(reason)s." msgid "Cannot attach one or more volumes to multiple instances" msgstr "Impossibile collegare uno o più volume a più istanze" #, python-format msgid "Cannot call %(method)s on orphaned %(objtype)s object" msgstr "Impossibile chiamare %(method)s su oggetto orfano %(objtype)s" #, python-format msgid "" "Cannot determine the parent storage pool for %s; cannot determine where to " "store images" msgstr "" "Impossibile determinare il pool di archiviazione parent per %s; impossibile " "determinare dove archiviare le immagini" msgid "Cannot find image for rebuild" msgstr "Impossibile trovare l'immagine per la nuova build" #, python-format msgid "Cannot remove host %(host)s in aggregate %(id)s" msgstr "Impossibile rimuovere l'host %(host)s nell'aggregato %(id)s" #, python-format msgid "Cannot remove host from aggregate %(aggregate_id)s. Reason: %(reason)s." msgstr "" "Impossibile rimuovere l'host dall'aggregato %(aggregate_id)s. Motivo: " "%(reason)s." msgid "Cannot rescue a volume-backed instance" msgstr "Impossibile ripristinare un'istanza volume-backed" msgid "" "Cannot set cpu thread pinning policy in a non dedicated cpu pinning policy" msgstr "" "Impossibile impostare la politica di blocco del thread della CPU in una " "politica di blocco della CPU non dedicata" msgid "Cannot set realtime policy in a non dedicated cpu pinning policy" msgstr "" "Impossibile impostare la politica in tempo reale in una politica di blocco " "della CPU non dedicata " #, python-format msgid "Cannot update aggregate %(aggregate_id)s. Reason: %(reason)s." msgstr "" "Impossibile aggiornare l'aggregato %(aggregate_id)s. Motivo: %(reason)s." #, python-format msgid "" "Cannot update metadata of aggregate %(aggregate_id)s. Reason: %(reason)s." msgstr "" "Impossibile aggiornare i metadati dell'aggregato %(aggregate_id)s. Motivo: " "%(reason)s." #, python-format msgid "Cell %(uuid)s has no mapping." msgstr "La cella %(uuid)s non dispone di alcuna associazione." #, python-format msgid "" "Change would make usage less than 0 for the following resources: %(unders)s" msgstr "" "La modifica renderebbe l'utilizzo inferiore a 0 per le seguenti risorse: " "%(unders)s" #, python-format msgid "Class %(class_name)s could not be found: %(exception)s" msgstr "Impossibile trovare la classe %(class_name)s: %(exception)s" #, python-format msgid "Compute host %(host)s could not be found." msgstr "Impossibile trovare l'host compute %(host)s." #, python-format msgid "Compute host %s not found." msgstr "Impossibile trovare l'host compute %s." #, python-format msgid "Compute service of %(host)s is still in use." msgstr "Il servizio compute di %(host)s è ancora in uso." #, python-format msgid "Compute service of %(host)s is unavailable at this time." msgstr "Il servizio compute di %(host)s non è disponibile in questo momento." #, python-format msgid "Config drive format '%(format)s' is not supported." msgstr "Il formato dell'unità di configurazione '%(format)s' non è supportato." #, python-format msgid "" "Config requested an explicit CPU model, but the current libvirt hypervisor " "'%s' does not support selecting CPU models" msgstr "" "Config ha richiesto un modello di CPU esplicito, ma l'hypervisor libvirt " "'%s' non supporta la selezione dei modelli di CPU" #, python-format msgid "" "Conflict updating instance %(instance_uuid)s, but we were unable to " "determine the cause" msgstr "" "Conflitto durante l'aggiornamento dell'istanza %(instance_uuid)s, ma non è " "stato possibile determinare la causa." #, python-format msgid "" "Conflict updating instance %(instance_uuid)s. Expected: %(expected)s. " "Actual: %(actual)s" msgstr "" "Conflitto durante l'aggiornamento dell'istanza %(instance_uuid)s. Previsto: " "%(expected)s. Reale: %(actual)s" #, python-format msgid "Connection to cinder host failed: %(reason)s" msgstr "Connessione a cinder host non riuscita: %(reason)s" #, python-format msgid "Connection to glance host %(server)s failed: %(reason)s" msgstr "Connessione all'host glance %(server)s non riuscita: %(reason)s" #, python-format msgid "Connection to libvirt lost: %s" msgstr "Connessione a libvirt non riuscita: %s" #, python-format msgid "" "Console log output could not be retrieved for instance %(instance_id)s. " "Reason: %(reason)s" msgstr "" "Impossibile richiamare l'output del log della console per l'istanza " "%(instance_id)s. Motivo: %(reason)s" msgid "Constraint not met." msgstr "Vincolo non soddisfatto." #, python-format msgid "Converted to raw, but format is now %s" msgstr "Convertito in non elaborato, ma il formato ora è %s" #, python-format msgid "Could not attach image to loopback: %s" msgstr "Impossibile collegare l'immagine al loopback: %s" #, python-format msgid "Could not fetch image %(image_id)s" msgstr "Impossibile recuperare l'immagine %(image_id)s" #, python-format msgid "Could not find a handler for %(driver_type)s volume." msgstr "Impossibile trovare un gestore per il volume %(driver_type)s." #, python-format msgid "Could not find binary %(binary)s on host %(host)s." msgstr "Impossibile trovare il binario %(binary)s nell'host %(host)s." #, python-format msgid "Could not find config at %(path)s" msgstr "Impossibile trovare la configurazione in %(path)s" msgid "Could not find the datastore reference(s) which the VM uses." msgstr "" "Impossibile trovare il riferimento(i) archivio dati utilizzato dalla VM." #, python-format msgid "Could not load line %(line)s, got error %(error)s" msgstr "Impossibile caricare la linea %(line)s, ricevuto l'errore %(error)s" #, python-format msgid "Could not load paste app '%(name)s' from %(path)s" msgstr "Impossibile caricare l'app paste '%(name)s' in %(path)s" #, python-format msgid "" "Could not mount vfat config drive. %(operation)s failed. Error: %(error)s" msgstr "" "Impossibile montare l'unità vfat config. %(operation)s non riuscito. Errore: " "%(error)s" #, python-format msgid "Could not upload image %(image_id)s" msgstr "Impossibile caricare l'immagine %(image_id)s" msgid "Creation of virtual interface with unique mac address failed" msgstr "" "Creazione dell'interfaccia virtuale con indirizzo mac univoco non riuscita" #, python-format msgid "Datastore regex %s did not match any datastores" msgstr "L'archivio dati regex %s non corrispondeva ad alcun archivio dati" msgid "Datetime is in invalid format" msgstr "La data/ora è in un formato non valido" msgid "Default PBM policy is required if PBM is enabled." msgstr "La politica PBM predefinita è richiesta se PBM è abilitato." #, python-format msgid "Device '%(device)s' not found." msgstr "Dispositivo '%(device)s' non trovato." msgid "Device name contains spaces." msgstr "Il nome dispositivo contiene degli spazi." msgid "Device name empty or too long." msgstr "Nome dispositivo vuoto o troppo lungo." #, python-format msgid "Device type mismatch for alias '%s'" msgstr "Mancata corrispondenza del tipo di dispositivo per l'alias '%s'" #, python-format msgid "Disk format %(disk_format)s is not acceptable" msgstr "Il formato disco %(disk_format)s non è accettabile" #, python-format msgid "Disk info file is invalid: %(reason)s" msgstr "Il file di informazioni sul disco non è valido: %(reason)s" #, python-format msgid "Driver Error: %s" msgstr "Errore del driver: %s" #, python-format msgid "Error attempting to run %(method)s" msgstr "Errore nel tentativo di eseguire %(method)s" #, python-format msgid "" "Error destroying the instance on node %(node)s. Provision state still " "'%(state)s'." msgstr "" "Errore durante la distruzione dell'istanza sul nodo %(node)s. Stato fornito " "ancora '%(state)s'." #, python-format msgid "Error during unshelve instance %(instance_id)s: %(reason)s" msgstr "Errore durante l'istanza non rinviata %(instance_id)s: %(reason)s" #, python-format msgid "" "Error from libvirt while getting domain info for %(instance_name)s: [Error " "Code %(error_code)s] %(ex)s" msgstr "" "Errore da libvirt durante l'acquisizione delle informazioni sul dominio per " "%(instance_name)s: [Codice di errore %(error_code)s] %(ex)s" #, python-format msgid "" "Error from libvirt while looking up %(instance_name)s: [Error Code " "%(error_code)s] %(ex)s" msgstr "" "Errore da libvirt durante la ricerca di %(instance_name)s: [Codice di errore " "%(error_code)s] %(ex)s" #, python-format msgid "" "Error from libvirt while quiescing %(instance_name)s: [Error Code " "%(error_code)s] %(ex)s" msgstr "" "Errore da libvirt durante la disattivazione di %(instance_name)s: [Codice di " "errore %(error_code)s] %(ex)s" #, python-format msgid "" "Error from libvirt while set password for username \"%(user)s\": [Error Code " "%(error_code)s] %(ex)s" msgstr "" "Errore da libvirt durante l'impostazione della password per il nome utente " "\"%(user)s\": [Codice di errore %(error_code)s] %(ex)s" #, python-format msgid "" "Error mounting %(device)s to %(dir)s in image %(image)s with libguestfs " "(%(e)s)" msgstr "" "Errore di montaggio %(device)s in %(dir)s nell'immagine %(image)s con " "libguestfs (%(e)s)" #, python-format msgid "Error mounting %(image)s with libguestfs (%(e)s)" msgstr "Errore di montaggio %(image)s con libguestfs (%(e)s)" #, python-format msgid "Error when creating resource monitor: %(monitor)s" msgstr "Errore durante la creazione del monitor di risorse: %(monitor)s" #, python-format msgid "Event %(event)s not found for action id %(action_id)s" msgstr "L'evento %(event)s per l'id azione %(action_id)s non è stato trovato" msgid "Event must be an instance of nova.virt.event.Event" msgstr "L'evento deve essere un'istanza di nova.virt.event.Event" #, python-format msgid "" "Exceeded max scheduling attempts %(max_attempts)d for instance " "%(instance_uuid)s. Last exception: %(exc_reason)s" msgstr "" "Superamento numero max tentativi di pianificazione %(max_attempts)d per " "l'istanza %(instance_uuid)s. Ultima eccezione: %(exc_reason)s" #, python-format msgid "" "Exceeded max scheduling retries %(max_retries)d for instance " "%(instance_uuid)s during live migration" msgstr "" "Superamento numero max tentativi di pianificazione %(max_retries)d per " "l'istanza %(instance_uuid)s durante la migrazione attiva" #, python-format msgid "Exceeded maximum number of retries. %(reason)s" msgstr "Superato numero massimo di tentativi. %(reason)s" #, python-format msgid "Expected a uuid but received %(uuid)s." msgstr "Era previsto un uuid ma è stato ricevuto %(uuid)s." msgid "Extracting vmdk from OVA failed." msgstr "Estrazione di vmdk da OVA non riuscita." #, python-format msgid "Failed to access port %(port_id)s: %(reason)s" msgstr "Impossibile accedere alla porta %(port_id)s: %(reason)s" #, python-format msgid "Failed to allocate the network(s) with error %s, not rescheduling." msgstr "" "Impossibile allocare la rete con errore %s, nuova pianificazione non " "prevista." msgid "Failed to allocate the network(s), not rescheduling." msgstr "" "Impossibile allocare una o più reti, nuova pianificazione non prevista." #, python-format msgid "Failed to attach network adapter device to %(instance_uuid)s" msgstr "" "Impossibile collegare il dispositivo dell'adattatore di rete a " "%(instance_uuid)s" #, python-format msgid "Failed to deploy instance: %(reason)s" msgstr "Impossibile distribuire l'istanza: %(reason)s" #, python-format msgid "Failed to detach PCI device %(dev)s: %(reason)s" msgstr "Impossibile scollegare il dispositivo PCI %(dev)s: %(reason)s" #, python-format msgid "Failed to detach network adapter device from %(instance_uuid)s" msgstr "" "Impossibile scollegare il dispositivo dell'adattatore di rete da " "%(instance_uuid)s" #, python-format msgid "Failed to encrypt text: %(reason)s" msgstr "Impossibile decodificare testo: %(reason)s" #, python-format msgid "Failed to launch instances: %(reason)s" msgstr "Impossibile avviare le istanze: %(reason)s" #, python-format msgid "Failed to map partitions: %s" msgstr "Impossibile associare le partizioni: %s" #, python-format msgid "Failed to mount filesystem: %s" msgstr "Impossibile montare il file system: %s" #, python-format msgid "Failed to power off instance: %(reason)s" msgstr "Impossibile disattivare l'istanza: %(reason)s" #, python-format msgid "Failed to power on instance: %(reason)s" msgstr "Impossibile alimentare l'istanza: %(reason)s" #, python-format msgid "Failed to provision instance %(inst)s: %(reason)s" msgstr "Impossibile fornire l'istanza %(inst)s: %(reason)s" #, python-format msgid "Failed to read or write disk info file: %(reason)s" msgstr "" "Impossibile leggere o scrivere nel file di informazioni sul disco: %(reason)s" #, python-format msgid "Failed to reboot instance: %(reason)s" msgstr "Impossibile riavviare l'istanza: %(reason)s" #, python-format msgid "Failed to remove volume(s): (%(reason)s)" msgstr "Impossibile rimuovere il volume: (%(reason)s)" #, python-format msgid "Failed to request Ironic to rebuild instance %(inst)s: %(reason)s" msgstr "" "Impossibile richiedere ad Ironic di ricreare l'istanza %(inst)s: %(reason)s" #, python-format msgid "Failed to resume instance: %(reason)s" msgstr "Impossibile ripristinare l'istanza: %(reason)s" #, python-format msgid "Failed to run qemu-img info on %(path)s : %(error)s" msgstr "Impossibile eseguire qemu-img info in %(path)s : %(error)s" #, python-format msgid "Failed to set admin password on %(instance)s because %(reason)s" msgstr "" "Impossibile impostare la password admin in %(instance)s perché %(reason)s" #, python-format msgid "Failed to suspend instance: %(reason)s" msgstr "Impossibile sospendere l'istanza: %(reason)s" #, python-format msgid "Failed to terminate instance: %(reason)s" msgstr "Impossibile terminare l'istanza: %(reason)s" msgid "Failure prepping block device." msgstr "Errore durante l'esecuzione preparatoria del dispositivo di blocco." #, python-format msgid "File %(file_path)s could not be found." msgstr "Impossibile trovare il file %(file_path)s." #, python-format msgid "Fixed IP %(ip)s is not a valid ip address for network %(network_id)s." msgstr "" "L'IP fisso %(ip)s non è un indirizzo IP valido per la rete %(network_id)s." #, python-format msgid "Fixed IP %s is already in use." msgstr "IP fisso %s già in uso." #, python-format msgid "" "Fixed IP address %(address)s is already in use on instance %(instance_uuid)s." msgstr "" "L'indirizzo IP fisso %(address)s è già in uso nell'istanza %(instance_uuid)s." #, python-format msgid "Fixed IP not found for address %(address)s." msgstr "Impossibile trovare un IP fisso per l'indirizzo %(address)s." #, python-format msgid "Flavor %(flavor_id)s could not be found." msgstr "Impossibile trovare la tipologia %(flavor_id)s." #, python-format msgid "Flavor %(flavor_id)s has no extra specs with key %(extra_specs_key)s." msgstr "" "Il flavor %(flavor_id)s non ha ulteriori specifiche con chiave " "%(extra_specs_key)s." #, python-format msgid "Flavor %(flavor_id)s has no extra specs with key %(key)s." msgstr "" "Il flavor %(flavor_id)s non ha ulteriori specifiche con chiave %(key)s." #, python-format msgid "" "Flavor %(id)s extra spec cannot be updated or created after %(retries)d " "retries." msgstr "" "Le specifiche supplementari del flavor %(id)s non possono essere aggiornate " "o create dopo %(retries)d tentativi." #, python-format msgid "" "Flavor access already exists for flavor %(flavor_id)s and project " "%(project_id)s combination." msgstr "" "L'accesso a flavor esiste già per flavor %(flavor_id)s e progetto " "%(project_id)s." #, python-format msgid "Flavor access not found for %(flavor_id)s / %(project_id)s combination." msgstr "" "Impossibile trovare l'accesso a flavor per la combinazione %(flavor_id)s / " "%(project_id)s." msgid "Flavor used by the instance could not be found." msgstr "Impossibile trovare flavor utilizzato dall'istanza." #, python-format msgid "Flavor with ID %(flavor_id)s already exists." msgstr "Il flavor con ID %(flavor_id)s esiste già." #, python-format msgid "Flavor with name %(flavor_name)s could not be found." msgstr "Impossibile trovare il flavor con nome %(flavor_name)s." #, python-format msgid "Flavor with name %(name)s already exists." msgstr "Il flavor con nome %(name)s esiste già." #, python-format msgid "" "Flavor's disk is smaller than the minimum size specified in image metadata. " "Flavor disk is %(flavor_size)i bytes, minimum size is %(image_min_disk)i " "bytes." msgstr "" "Il disco della versione è più piccolo della dimensione minima specificata " "nei metadati dell'immagine. Il disco versione è %(flavor_size)i byte, la " "dimensione minima è %(image_min_disk)i byte." #, python-format msgid "" "Flavor's disk is too small for requested image. Flavor disk is " "%(flavor_size)i bytes, image is %(image_size)i bytes." msgstr "" "Il disco della versione è troppo piccolo per l'immagine richiesta. Il disco " "versione è %(flavor_size)i byte, l'immagine è %(image_size)i byte." msgid "Flavor's memory is too small for requested image." msgstr "La memoria flavor è troppo piccola per l'immagine richiesta." #, python-format msgid "Floating IP %(address)s association has failed." msgstr "Associazione IP %(address)s mobile non riuscita." #, python-format msgid "Floating IP %(address)s is associated." msgstr "L'IP mobile %(address)s è associato." #, python-format msgid "Floating IP %(address)s is not associated with instance %(id)s." msgstr "L'IP mobile %(address)s non è associato a un'istanza %(id)s." #, python-format msgid "Floating IP not found for ID %(id)s." msgstr "Impossibile trovare l'IP mobile per l'ID %(id)s." #, python-format msgid "Floating IP not found for ID %s" msgstr "Impossibile trovare l'IP mobile per l'ID %s." #, python-format msgid "Floating IP not found for address %(address)s." msgstr "Impossibile trovare l'IP mobile per l'indirizzo %(address)s." msgid "Floating IP pool not found." msgstr "Impossibile trovare pool di IP mobili." msgid "" "Forbidden to exceed flavor value of number of serial ports passed in image " "meta." msgstr "" "È vietato superare il valore flavor di numero di porte seriali trasferito ai " "metadati immagine." msgid "Found no disk to snapshot." msgstr "Non è stato trovato nessun disco per l'istantanea." msgid "Guest does not have a console available." msgstr "Guest non dispone di una console disponibile." #, python-format msgid "Host %(host)s could not be found." msgstr "Impossibile trovare l'host %(host)s." #, python-format msgid "Host %(host)s is already mapped to cell %(uuid)s" msgstr "L'host %(host)s è già associato alla cella %(uuid)s" #, python-format msgid "Host '%(name)s' is not mapped to any cell" msgstr "L'host '%(name)s' non è associato a una cella" msgid "Host aggregate is not empty" msgstr "L'aggregato host non è vuoto" msgid "Host does not support guests with NUMA topology set" msgstr "L'host non supporta guest con la topologia NUMA impostata" msgid "Host does not support guests with custom memory page sizes" msgstr "" "L'host non supporta guest con dimensioni pagina di memoria personalizzate" msgid "Hypervisor driver does not support post_live_migration_at_source method" msgstr "" "Il driver hypervisor non supporta il metodo post_live_migration_at_source" #, python-format msgid "Hypervisor virt type '%s' is not valid" msgstr "Tipo virt hypervisor '%s' non valido" #, python-format msgid "Hypervisor virtualization type '%(hv_type)s' is not recognised" msgstr "" "Il tipo di virtualizzazione di hypervisor '%(hv_type)s' non è riconosciuto" #, python-format msgid "Hypervisor with ID '%s' could not be found." msgstr "Impossibile trovare hypervisor con ID '%s'." #, python-format msgid "IP allocation over quota in pool %s." msgstr "L'allocazione IP supera la quota nel pool %s." msgid "IP allocation over quota." msgstr "L'allocazione IP supera la quota." #, python-format msgid "Image %(image_id)s could not be found." msgstr "Impossibile trovare l'immagine %(image_id)s." #, python-format msgid "Image %(image_id)s is not active." msgstr "L'immagine %(image_id)s non è attiva." #, python-format msgid "Image %(image_id)s is unacceptable: %(reason)s" msgstr "L'immagine %(image_id)s non è accettabile: %(reason)s" msgid "Image disk size greater than requested disk size" msgstr "Dimensione disco immagine maggiore della dimensione disco richiesta" msgid "Image is not raw format" msgstr "L'immagine non è nel formato non elaborato" msgid "Image metadata limit exceeded" msgstr "Superato il limite dei metadati dell'immagine" #, python-format msgid "Image model '%(image)s' is not supported" msgstr "Modello immagine '%(image)s' non supportato" msgid "Image not found." msgstr "Immagine non trovata." #, python-format msgid "" "Image property '%(name)s' is not permitted to override NUMA configuration " "set against the flavor" msgstr "" "La proprietà immagine '%(name)s' non è consentita per sovrascrivere " "l'impostazione di configurazione NUMA rispetto al flavor" msgid "" "Image property 'hw_cpu_policy' is not permitted to override CPU pinning " "policy set against the flavor" msgstr "" "La proprietà immagine 'hw_cpu_policy' non può sostituire la politica di " "blocco della CPU impostata sul flavor" msgid "" "Image property 'hw_cpu_thread_policy' is not permitted to override CPU " "thread pinning policy set against the flavor" msgstr "" "La proprietà immagine 'hw_cpu_thread_policy' non può sostituire la politica " "di blocco del thread della CPU impostata sul flavor" msgid "Image that the instance was started with could not be found." msgstr "Impossibile trovare l'immagine con cui è stata avviata l'istanza." #, python-format msgid "Image's config drive option '%(config_drive)s' is invalid" msgstr "" "L'opzione dell'unità di configurazione dell'immagine '%(config_drive)s' non " "è valida" msgid "" "Images with destination_type 'volume' need to have a non-zero size specified" msgstr "" "Le immagini con destination-type 'volume' devono avere specificata una " "dimensione diversa da zero" msgid "In ERROR state" msgstr "In stato di ERRORE" #, python-format msgid "In states %(vm_state)s/%(task_state)s, not RESIZED/None" msgstr "Negli stati %(vm_state)s/%(task_state)s, non RESIZED/None" #, python-format msgid "In-progress live migration %(id)s is not found for server %(uuid)s." msgstr "" "La migrazione attiva in corso %(id)s non è stata trovata per il server " "%(uuid)s." msgid "" "Incompatible settings: ephemeral storage encryption is supported only for " "LVM images." msgstr "" "Impostazioni incompatibili: la codifica della memoria temporanea è " "supportata solo per le immagini LVM." #, python-format msgid "Info cache for instance %(instance_uuid)s could not be found." msgstr "" "Impossibile trovare le informazioni della cache per l'istanza " "%(instance_uuid)s." #, python-format msgid "" "Instance %(instance)s and volume %(vol)s are not in the same " "availability_zone. Instance is in %(ins_zone)s. Volume is in %(vol_zone)s" msgstr "" "L'istanza %(instance)s ed il volume %(vol)s non si trovano nella stessa " "availability_zone. L'istanza si trova in %(ins_zone)s. Il volume si trova in " "%(vol_zone)s" #, python-format msgid "Instance %(instance)s does not have a port with id %(port)s" msgstr "L'istanza %(instance)s non dispone di una porta con id%(port)s" #, python-format msgid "Instance %(instance_id)s cannot be rescued: %(reason)s" msgstr "L'istanza %(instance_id)s non può essere ripristinata: %(reason)s" #, python-format msgid "Instance %(instance_id)s could not be found." msgstr "Impossibile trovare l'istanza %(instance_id)s." #, python-format msgid "Instance %(instance_id)s has no tag '%(tag)s'" msgstr "L'istanza %(instance_id)s non dispone di tag '%(tag)s'" #, python-format msgid "Instance %(instance_id)s is not in rescue mode" msgstr "L'istanza %(instance_id)s non è in modalità di ripristino" #, python-format msgid "Instance %(instance_id)s is not ready" msgstr "L'istanza %(instance_id)s non è pronta" #, python-format msgid "Instance %(instance_id)s is not running." msgstr "L'istanza %(instance_id)s non è in esecuzione." #, python-format msgid "Instance %(instance_id)s is unacceptable: %(reason)s" msgstr "L'istanza %(instance_id)s non è accettabile: %(reason)s" #, python-format msgid "Instance %(instance_uuid)s does not specify a NUMA topology" msgstr "L'istanza %(instance_uuid)s non specifica una topologia NUMA" #, python-format msgid "Instance %(instance_uuid)s does not specify a migration context." msgstr "L'istanza %(instance_uuid)s non specifica un contesto di migrazione." #, python-format msgid "" "Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while " "the instance is in this state." msgstr "" "Istanza %(instance_uuid)s in %(attr)s %(state)s. Impossibile %(method)s " "quando l'istanza è in questo stato." #, python-format msgid "Instance %(instance_uuid)s is locked" msgstr "L'istanza %(instance_uuid)s è bloccata" #, python-format msgid "" "Instance %(instance_uuid)s requires config drive, but it does not exist." msgstr "" "L'istanza %(instance_uuid)s richiede l'unità di configurazione ma non esiste." #, python-format msgid "Instance %(name)s already exists." msgstr "L'istanza %(name)s esiste già." #, python-format msgid "Instance %(server_id)s is in an invalid state for '%(action)s'" msgstr "" "L'istanza %(server_id)s si trova in uno stato non valido per '%(action)s'" #, python-format msgid "Instance %(uuid)s has no mapping to a cell." msgstr "L'istanza %(uuid)s non dispone di alcuna associazione a una cella." #, python-format msgid "Instance %s not found" msgstr "Istanza %s non trovata" #, python-format msgid "Instance %s provisioning was aborted" msgstr "Il provisioning dell'istanza %s è stato interrotto" msgid "Instance could not be found" msgstr "Impossibile trovare l'istanza" msgid "Instance disk to be encrypted but no context provided" msgstr "" "Disco dell'istanza da codificare, ma non è stato fornito alcun contesto" msgid "Instance event failed" msgstr "Evento istanza non riuscito" #, python-format msgid "Instance group %(group_uuid)s already exists." msgstr "Il gruppo di istanze %(group_uuid)s esiste già." #, python-format msgid "Instance group %(group_uuid)s could not be found." msgstr "Impossibile trovare il gruppo di istanze %(group_uuid)s." msgid "Instance has no source host" msgstr "L'istanza non dispone alcun host di origine" msgid "Instance has not been resized." msgstr "L'istanza non è stata ridmensionata." #, python-format msgid "Instance hostname %(hostname)s is not a valid DNS name" msgstr "Il nome host %(hostname)s dell'istanza non è un nome DNS valido" msgid "Instance is not a member of specified network" msgstr "L'istanza non è un membro della rete specificata" #, python-format msgid "Instance rollback performed due to: %s" msgstr "Ripristino dell'istanza eseguito a causa di: %s" #, python-format msgid "" "Insufficient Space on Volume Group %(vg)s. Only %(free_space)db available, " "but %(size)d bytes required by volume %(lv)s." msgstr "" "Spazio insufficiente nel gruppo volume %(vg)s. Solo %(free_space)db " "disponibile, ma %(size)d byte richiesti dal volume %(lv)s." #, python-format msgid "Insufficient compute resources: %(reason)s." msgstr "Risorse di elaborazione insufficienti: %(reason)s." #, python-format msgid "Interface %(interface)s not found." msgstr "Impossibile trovare l'interfaccia %(interface)s." #, python-format msgid "Invalid Base 64 data for file %(path)s" msgstr "I dati della base 64 non sono validi per il file %(path)s" msgid "Invalid Connection Info" msgstr "Informazioni sulla connessione non valide" #, python-format msgid "Invalid ID received %(id)s." msgstr "Ricevuto ID non valido %(id)s." #, python-format msgid "Invalid IP format %s" msgstr "Formato IP non valido %s" #, python-format msgid "Invalid IP protocol %(protocol)s." msgstr "Protocollo IP non valido %(protocol)s." msgid "" "Invalid PCI Whitelist: The PCI whitelist can specify devname or address, but " "not both" msgstr "" "La whitelist PCI non è valida: la whitelist PCI può specificare il devname o " "l'indirizzo, ma non entrambi" #, python-format msgid "Invalid PCI alias definition: %(reason)s" msgstr "Definizione alias PCI non valida: %(reason)s" #, python-format msgid "Invalid Regular Expression %s" msgstr "Espressione regolare non valida %s" #, python-format msgid "Invalid characters in hostname '%(hostname)s'" msgstr "Caratteri non validi nel nome host '%(hostname)s'" msgid "Invalid config_drive provided." msgstr "config_drive specificato non è valido." #, python-format msgid "Invalid config_drive_format \"%s\"" msgstr "config_drive_format \"%s\" non valido" #, python-format msgid "Invalid console type %(console_type)s" msgstr "Tipo di console non valido %(console_type)s" #, python-format msgid "Invalid content type %(content_type)s." msgstr "Tipo di contenuto non valido%(content_type)s." #, python-format msgid "Invalid datetime string: %(reason)s" msgstr "Stringa data/ora non valida: %(reason)s" msgid "Invalid device UUID." msgstr "UUID del dispositivo non valido." #, python-format msgid "Invalid entry: '%s'" msgstr "Voce non valida: '%s'" #, python-format msgid "Invalid entry: '%s'; Expecting dict" msgstr "Voce non valida: '%s'; è previsto dict" #, python-format msgid "Invalid entry: '%s'; Expecting list or dict" msgstr "Voce non valida: '%s'; è previsto list o dict" #, python-format msgid "Invalid exclusion expression %r" msgstr "Espressione di esclusione %r non valida" #, python-format msgid "Invalid image format '%(format)s'" msgstr "Formato immagine non valido '%(format)s'" #, python-format msgid "Invalid image href %(image_href)s." msgstr "href immagine %(image_href)s non valido." #, python-format msgid "Invalid inclusion expression %r" msgstr "Espressione di inclusione %r non valida" #, python-format msgid "" "Invalid input for field/attribute %(path)s. Value: %(value)s. %(message)s" msgstr "" "Input non valido per campo/attributo %(path)s. Valore: %(value)s. %(message)s" #, python-format msgid "Invalid input received: %(reason)s" msgstr "Input ricevuto non valido: %(reason)s" msgid "Invalid instance image." msgstr "Immagine istanza non valida." #, python-format msgid "Invalid is_public filter [%s]" msgstr "Filtro is_public non valido [%s]" msgid "Invalid key_name provided." msgstr "Il nome_chiave specificato non è valido." #, python-format msgid "Invalid memory page size '%(pagesize)s'" msgstr "Dimensione pagina di memoria non valida '%(pagesize)s'" msgid "Invalid metadata key" msgstr "La chiave di metadati non è valida" #, python-format msgid "Invalid metadata size: %(reason)s" msgstr "Dimensione metadati non valida: %(reason)s" #, python-format msgid "Invalid metadata: %(reason)s" msgstr "Metadati non validi: %(reason)s" #, python-format msgid "Invalid minDisk filter [%s]" msgstr "Filtro minDisk non valido [%s]" #, python-format msgid "Invalid minRam filter [%s]" msgstr "Filtro minRam non valido [%s]" #, python-format msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s" msgstr "Intervallo di porta non valido %(from_port)s:%(to_port)s. %(msg)s" msgid "Invalid proxy request signature." msgstr "Firma della richiesta del proxy non valida." #, python-format msgid "Invalid range expression %r" msgstr "Espressione di intervallo %r non valida" msgid "Invalid service catalog json." msgstr "json del catalogo del servizio non è valido." msgid "Invalid start time. The start time cannot occur after the end time." msgstr "" "Ora di inizio non valida. L'ora di inizio non può essere successiva all'ora " "di fine." msgid "Invalid state of instance files on shared storage" msgstr "Stato non valido dei file dell'istanza nella memoria condivisa" #, python-format msgid "Invalid timestamp for date %s" msgstr "Data/ora non valida per la data %s" #, python-format msgid "Invalid usage_type: %s" msgstr "usage_type non valido: %s" #, python-format msgid "Invalid value for Config Drive option: %(option)s" msgstr "Valore non valido per l'opzione unità di config: %(option)s" #, python-format msgid "Invalid virtual interface address %s in request" msgstr "Indirizzo interfaccia virtuale non valido %s nella richiesta" #, python-format msgid "Invalid volume access mode: %(access_mode)s" msgstr "Modalità di accesso al volume non valida: %(access_mode)s" #, python-format msgid "Invalid volume: %(reason)s" msgstr "Volume non valido: %(reason)s" msgid "Invalid volume_size." msgstr "Volume_size non valido." #, python-format msgid "Ironic node uuid not supplied to driver for instance %s." msgstr "L'UUID del nodo Ironic non è stato fornito al driver per l'istanza %s." #, python-format msgid "" "It is not allowed to create an interface on external network %(network_uuid)s" msgstr "" "Non è consentito creare un'interfaccia sulla rete esterna %(network_uuid)s" msgid "" "Key Names can only contain alphanumeric characters, periods, dashes, " "underscores, colons and spaces." msgstr "" "I nomi chiave possono solo contenere caratteri alfanumerici, punti, " "trattini, caratteri di sottolineatura, due punti e spazi." #, python-format msgid "Key manager error: %(reason)s" msgstr "Errore gestore chiavi: %(reason)s" #, python-format msgid "Key pair '%(key_name)s' already exists." msgstr "La coppia di chiavi '%(key_name)s' esiste già." #, python-format msgid "Keypair %(name)s not found for user %(user_id)s" msgstr "" "Impossibile trovare la coppia di chiavi %(name)s per l'utente %(user_id)s" #, python-format msgid "Keypair data is invalid: %(reason)s" msgstr "I dati della coppia di chiavi non sono validi: %(reason)s" msgid "Limits only supported from vCenter 6.0 and above" msgstr "Limiti supportati solo da vCenter 6.0 e successivi" #, python-format msgid "Live migration %(id)s for server %(uuid)s is not in progress." msgstr "La migrazione %(id)s per il server %(uuid)s non è in corso." #, python-format msgid "Malformed message body: %(reason)s" msgstr "Corpo del messaggio non valido: %(reason)s" #, python-format msgid "" "Malformed request URL: URL's project_id '%(project_id)s' doesn't match " "Context's project_id '%(context_project_id)s'" msgstr "" "URL richiesto non valido: il project_id del progetto '%(project_id)s' non " "corrisponde al project_id del progetto '%(context_project_id)s'" msgid "Malformed request body" msgstr "Corpo richiesta non corretto" msgid "Mapping image to local is not supported." msgstr "Associazione dell'immagine all'elemento locale non supportata." #, python-format msgid "Marker %(marker)s could not be found." msgstr "Impossibile trovare l'indicatore %(marker)s." msgid "Maximum number of floating IPs exceeded" msgstr "Il numero massimo di IP mobili è stato superato" #, python-format msgid "Maximum number of metadata items exceeds %(allowed)d" msgstr "Il numero massimo di elementi metadati è stato superato %(allowed)d" msgid "Maximum number of ports exceeded" msgstr "Numero massimo di porte superato" msgid "Maximum number of security groups or rules exceeded" msgstr "Il numero massimo di gruppi di sicurezza o di regole è stato superato" msgid "Metadata item was not found" msgstr "L'elemento metadati non è stato trovato" msgid "Metadata property key greater than 255 characters" msgstr "La chiave della proprietà dei metadati supera 255 caratteri" msgid "Metadata property value greater than 255 characters" msgstr "Il valore della proprietà dei metadati supera 255 caratteri" msgid "Metadata type should be dict." msgstr "Tipo di metadati deve essere dict." #, python-format msgid "" "Metric %(name)s could not be found on the compute host node %(host)s." "%(node)s." msgstr "" "Impossibile trovare la metrica %(name)s sul nodo host compute %(host)s." "%(node)s." #, python-format msgid "Migration %(id)s for server %(uuid)s is not live-migration." msgstr "" "La migrazione %(id)s per il server %(uuid)s non è una migrazione attiva." #, python-format msgid "Migration %(migration_id)s could not be found." msgstr "Impossibile trovare la migrazione %(migration_id)s." #, python-format msgid "Migration %(migration_id)s not found for instance %(instance_id)s" msgstr "Migrazione %(migration_id)s non trovata per l'istanza %(instance_id)s " #, python-format msgid "" "Migration %(migration_id)s state of instance %(instance_uuid)s is %(state)s. " "Cannot %(method)s while the migration is in this state." msgstr "" "Lo stato dell'istanza %(instance_uuid)s della migrazione %(migration_id)s è " "%(state)s. Impossibile %(method)s mentre la migrazione è in questo stato." #, python-format msgid "Migration error: %(reason)s" msgstr "Errore di migrazione: %(reason)s" msgid "Migration is not supported for LVM backed instances" msgstr "" "La migrazione non è supportata per le istanze sottoposte a backup da LVM" #, python-format msgid "" "Migration not found for instance %(instance_id)s with status %(status)s." msgstr "" "La migrazione per l'istanza %(instance_id)s non è stata trovata con lo stato " "%(status)s." #, python-format msgid "Migration pre-check error: %(reason)s" msgstr "Errore di verifica preliminare della migrazione: %(reason)s" #, python-format msgid "Migration select destinations error: %(reason)s" msgstr "Errore delle destinazioni di selezione della migrazione: %(reason)s" #, python-format msgid "Missing arguments: %s" msgstr "Argomenti mancanti: %s" msgid "Missing device UUID." msgstr "Manca l'UUID del dispositivo." msgid "Missing disabled reason field" msgstr "Manca il campo causa disabilitata" msgid "Missing forced_down field" msgstr "Campo forced_down mancante" msgid "Missing imageRef attribute" msgstr "Manca l'attributo imageRef" #, python-format msgid "Missing keys: %s" msgstr "Mancano le chiavi: %s" msgid "Missing parameter dict" msgstr "Manca il parametro dict" #, python-format msgid "" "More than one instance is associated with fixed IP address '%(address)s'." msgstr "Più di un'istanza è associata all'indirizzo IP fisso '%(address)s'." msgid "" "More than one possible network found. Specify network ID(s) to select which " "one(s) to connect to." msgstr "" "Trovate più reti possibili. Specificare l'ID rete(i) per selezionare " "quella(e) a cui effettuare la connessione." msgid "More than one swap drive requested." msgstr "È richiesta più di un'unità di scambio." #, python-format msgid "Multi-boot operating system found in %s" msgstr "Rilevato avvio multiplo del sistema operativo in %s" msgid "Multiple X-Instance-ID headers found within request." msgstr "" "Sono state trovate più intestazioni X-Instance-ID all'interno della " "richiesta." msgid "Multiple X-Tenant-ID headers found within request." msgstr "" "Sono state trovate più intestazioni X-Tenant-ID all'interno della richiesta." #, python-format msgid "Multiple floating IP pools matches found for name '%s'" msgstr "" "Sono state trovate più corrispondenze di pool di IP mobili per il nome '%s'" #, python-format msgid "Multiple floating IPs are found for address %(address)s." msgstr "Più IP mobili sono stati trovati per l'indirizzo %(address)s." msgid "" "Multiple hosts may be managed by the VMWare vCenter driver; therefore we do " "not return uptime for just one host." msgstr "" "Più host possono essere gestiti dal driver VMWare vCenter; pertanto non " "restituiamo l'ora di aggiornamento solo per un host." msgid "Multiple possible networks found, use a Network ID to be more specific." msgstr "Trovate più reti possibili, utilizzare un ID rete più specifico." #, python-format msgid "" "Multiple security groups found matching '%s'. Use an ID to be more specific." msgstr "" "Trovati più gruppi sicurezza che corrispondono a '%s'. Utilizzare un ID per " "essere più precisi." msgid "Must input network_id when request IP address" msgstr "È necessario immettere network_id quando è richiesto l'indirizzo IP" msgid "Must not input both network_id and port_id" msgstr "Non si deve immettere entrambi network_id e port_id" msgid "" "Must specify host_ip, host_username and host_password to use vmwareapi." "VMwareVCDriver" msgstr "" "È necessario specificare host_ip, host_username e host_password per " "l'utilizzo di vmwareapi.VMwareVCDriver" msgid "Must supply a positive value for max_number" msgstr "È necessario fornire un valore positivo per max_number" msgid "Must supply a positive value for max_rows" msgstr "È necessario fornire un valore positivo per max_rows" #, python-format msgid "Network %(network_id)s could not be found." msgstr "Impossibile trovare la rete %(network_id)s." #, python-format msgid "" "Network %(network_uuid)s requires a subnet in order to boot instances on." msgstr "" "La rete %(network_uuid)s richiede una sottorete per avviare le istanze." #, python-format msgid "Network could not be found for bridge %(bridge)s" msgstr "Impossibile trovare la rete per il bridge %(bridge)s" #, python-format msgid "Network could not be found for instance %(instance_id)s." msgstr "Impossibile trovare la rete per l'istanza %(instance_id)s." msgid "Network not found" msgstr "Rete non trovata" msgid "" "Network requires port_security_enabled and subnet associated in order to " "apply security groups." msgstr "" "La rete richiede port_security_enabled e la sottorete associata al fine di " "applicare i gruppi sicurezza." msgid "New volume must be detached in order to swap." msgstr "Il nuovo volume deve essere scollegato per lo scambio." msgid "New volume must be the same size or larger." msgstr "Il nuovo volume deve avere la stessa dimensione o superiore." #, python-format msgid "No Block Device Mapping with id %(id)s." msgstr "Nessuna associazione dispositivo di blocco con id %(id)s." msgid "No Unique Match Found." msgstr "Non è stata trovata nessuna corrispondenza univoca." msgid "No compute host specified" msgstr "Nessun host di calcolo specificato" #, python-format msgid "No configuration information found for operating system %(os_name)s" msgstr "" "Nessuna informazione di configurazione trovata per il sistema operativo " "%(os_name)s" #, python-format msgid "No device with MAC address %s exists on the VM" msgstr "Nessun dispositivo con l'indirizzo MAC %s esiste sulla VM" #, python-format msgid "No device with interface-id %s exists on VM" msgstr "Nessun dispositivo con interface-id %s esiste sulla VM" #, python-format msgid "No disk at %(location)s" msgstr "Nessun disco in %(location)s" #, python-format msgid "No fixed IP addresses available for network: %(net)s" msgstr "Nessun indirizzo IP fisso disponibile per la rete: %(net)s" msgid "No fixed IPs associated to instance" msgstr "Nessun IP fisso associato all'istanza" msgid "No free nbd devices" msgstr "Nessuna unità nbd disponibile" msgid "No host available on cluster" msgstr "Nessun host disponibile sul cluster" msgid "No hosts found to map to cell, exiting." msgstr "Nessun host trovato da associare alla cella, uscita." #, python-format msgid "No hypervisor matching '%s' could be found." msgstr "Non è stata trovata alcuna corrispondenza hypervisor '%s'." msgid "No image locations are accessible" msgstr "Nessuna ubicazione immagine è accessibile" #, python-format msgid "" "No live migration URI configured and no default available for " "\"%(virt_type)s\" hypervisor virtualization type." msgstr "" "Nessun URI di migrazione attiva configurato e nessun valore predefinito " "disponibile per il tipo di virtualizzazione dell'hypervisor " "\"%(virt_type)s\"." msgid "No more floating IPs available." msgstr "IP mobili non più disponibili." #, python-format msgid "No more floating IPs in pool %s." msgstr "Non ci sono più IP mobili nel pool %s." #, python-format msgid "No mount points found in %(root)s of %(image)s" msgstr "Nessun punto di montaggio trovato in %(root)s di %(image)s" #, python-format msgid "No operating system found in %s" msgstr "Nessun sistema operativo rilevato in %s" msgid "No root disk defined." msgstr "Nessun disco root definito" #, python-format msgid "" "No specific network was requested and none are available for project " "'%(project_id)s'." msgstr "" "Nessuna rete specifica era richiesta e nessuna è disponibile per il progetto " "'%(project_id)s'." msgid "No valid host found for cold migrate" msgstr "Nessun host valido trovato per la migrazione a freddo" msgid "No valid host found for resize" msgstr "Nessun host valido trovato per il ridimensionamento" #, python-format msgid "No valid host was found. %(reason)s" msgstr "Non è stato trovato alcun host valido. %(reason)s" #, python-format msgid "No volume Block Device Mapping at path: %(path)s" msgstr "" "Nessuna associazione dell'unità di blocco del volume nel percorso: %(path)s" #, python-format msgid "No volume Block Device Mapping with id %(volume_id)s." msgstr "Nessun volume di associazione unità di blocco con id %(volume_id)s." #, python-format msgid "Node %s could not be found." msgstr "Impossibile trovare il nodo %s." #, python-format msgid "Not able to acquire a free port for %(host)s" msgstr "Impossibile acquisire una porta libera per %(host)s" #, python-format msgid "Not able to bind %(host)s:%(port)d, %(error)s" msgstr "Impossibile collegare %(host)s:%(port)d, %(error)s" #, python-format msgid "" "Not all Virtual Functions of PF %(compute_node_id)s:%(address)s are free." msgstr "" "Non tutte le funzioni virtuali di PF %(compute_node_id)s:%(address)s sono " "disponibili." msgid "Not an rbd snapshot" msgstr "Non è un'istantanea rbd" #, python-format msgid "Not authorized for image %(image_id)s." msgstr "Non autorizzato per l'immagine %(image_id)s." msgid "Not authorized." msgstr "Non autorizzato." msgid "Not enough parameters to build a valid rule." msgstr "Parametri non sufficienti per creare una regola valida" msgid "Not stored in rbd" msgstr "Non memorizzato in rbd" msgid "Nothing was archived." msgstr "Non è stato archiviato alcun elemento." #, python-format msgid "Nova requires libvirt version %s or greater." msgstr "Nova richiede libvirt versione %s o successiva." msgid "Number of Rows Archived" msgstr "Numero di righe archiviate" #, python-format msgid "Object action %(action)s failed because: %(reason)s" msgstr "Azione dell'oggetto %(action)s non riuscita perché: %(reason)s" msgid "Old volume is attached to a different instance." msgstr "Il volume precedente è collegato ad un'istanza diversa." #, python-format msgid "One or more hosts already in availability zone(s) %s" msgstr "Uno o più host sono già nelle zone di disponibilità %s" msgid "Only administrators may list deleted instances" msgstr "Solo gli amministratori possono elencare le istanze eliminate" msgid "Origin header does not match this host." msgstr "L'intestazione origine non corrisponde a questo host." msgid "Origin header not valid." msgstr "Intestazione origine non valida." msgid "Origin header protocol does not match this host." msgstr "Il protocollo dell'intestazione origine non corrisponde a questo host." #, python-format msgid "PCI Device %(node_id)s:%(address)s not found." msgstr "Dispositivo PCI %(node_id)s:%(address)s non trovato." #, python-format msgid "PCI alias %(alias)s is not defined" msgstr "L'alias PCI %(alias)s non è definito" #, python-format msgid "" "PCI device %(compute_node_id)s:%(address)s is %(status)s instead of " "%(hopestatus)s" msgstr "" "Il dispositivo PCI %(compute_node_id)s:%(address)s è %(status)s anziché di " "%(hopestatus)s" #, python-format msgid "" "PCI device %(compute_node_id)s:%(address)s is owned by %(owner)s instead of " "%(hopeowner)s" msgstr "" "Il dispositivo PCI %(compute_node_id)s:%(address)s è di proprietà di " "%(owner)s anziché di %(hopeowner)s" #, python-format msgid "PCI device %(id)s not found" msgstr "Dispositivo PCI %(id)s non trovato" #, python-format msgid "PCI device request %(requests)s failed" msgstr "La richiesta del dispositivo PCI %(requests)s non è riuscita" #, python-format msgid "Page size %(pagesize)s forbidden against '%(against)s'" msgstr "Dimensione pagina %(pagesize)s non consentita su '%(against)s'" #, python-format msgid "Page size %(pagesize)s is not supported by the host." msgstr "Dimensione pagina %(pagesize)s non supportata dall'host." #, python-format msgid "" "Parameters %(missing_params)s not present in vif_details for vif %(vif_id)s. " "Check your Neutron configuration to validate that the macvtap parameters are " "correct." msgstr "" "Parametri %(missing_params)s non presenti in vif_details per vif %(vif_id)s. " "Controllare la configurazione neutron per confermare che i parametri macvtap " "siano corretti." #, python-format msgid "Path %s must be LVM logical volume" msgstr "Il percorso %s deve essere un volume logico LVM" msgid "Paused" msgstr "In pausa" msgid "Personality file limit exceeded" msgstr "Il limite del file di personalità è stato superato" #, python-format msgid "" "Physical Function %(compute_node_id)s:%(address)s, related to VF " "%(compute_node_id)s:%(vf_address)s is %(status)s instead of %(hopestatus)s" msgstr "" "La funzione fisica %(compute_node_id)s:%(address)s, correlata a VF " "%(compute_node_id)s:%(vf_address)s è %(status)s anziché %(hopestatus)s" #, python-format msgid "Physical network is missing for network %(network_uuid)s" msgstr "Manca la rete fisica per la rete %(network_uuid)s" #, python-format msgid "Policy doesn't allow %(action)s to be performed." msgstr "La politica non consente di eseguire l'azione %(action)s ." #, python-format msgid "Port %(port_id)s is still in use." msgstr "La porta %(port_id)s è ancora in uso." #, python-format msgid "Port %(port_id)s not usable for instance %(instance)s." msgstr "La porta %(port_id)s non è utilizzabile per l'istanza %(instance)s." #, python-format msgid "" "Port %(port_id)s not usable for instance %(instance)s. Value %(value)s " "assigned to dns_name attribute does not match instance's hostname " "%(hostname)s" msgstr "" "La porta %(port_id)s non è utilizzabile per l'istanza %(instance)s. Il " "valore %(value)s assegnato all'attributo dns_name non corrisponde al nome " "host dell'istanza %(hostname)s" #, python-format msgid "Port %(port_id)s requires a FixedIP in order to be used." msgstr "La porta %(port_id)s richiede un FixedIP per essere utilizzata." #, python-format msgid "Port %s is not attached" msgstr "La porta %s non è collegata" #, python-format msgid "Port id %(port_id)s could not be found." msgstr "Impossibile trovare l'd porta %(port_id)s." #, python-format msgid "Provided video model (%(model)s) is not supported." msgstr "Il modello video fornito (%(model)s) non è supportato." #, python-format msgid "Provided watchdog action (%(action)s) is not supported." msgstr "L'azione watchdog (%(action)s) non è supportata." msgid "QEMU guest agent is not enabled" msgstr "Agent guest QEMU non abilitato" #, python-format msgid "Quiescing is not supported in instance %(instance_id)s" msgstr "Sospensione non supportata per l'istanza %(instance_id)s" #, python-format msgid "Quota class %(class_name)s could not be found." msgstr "Impossibile trovare la classe di quota %(class_name)s." msgid "Quota could not be found" msgstr "Impossibile trovare la quota" #, python-format msgid "" "Quota exceeded for %(overs)s: Requested %(req)s, but already used %(used)s " "of %(allowed)s %(overs)s" msgstr "" "Quota superata per %(overs)s: Richiesto %(req)s, ma già utilizzato %(used)s " "%(allowed)s %(overs)s" #, python-format msgid "Quota exceeded for resources: %(overs)s" msgstr "Quota superata per le risorse: %(overs)s" msgid "Quota exceeded, too many key pairs." msgstr "Quota superata, troppe coppie di chiavi." msgid "Quota exceeded, too many server groups." msgstr "Quota superata, troppi gruppi di server." msgid "Quota exceeded, too many servers in group" msgstr "Quota superata, troppi server nel gruppo" #, python-format msgid "Quota exists for project %(project_id)s, resource %(resource)s" msgstr "la quota per il progetto %(project_id)s esiste, risorsa %(resource)s" #, python-format msgid "Quota for project %(project_id)s could not be found." msgstr "Impossibile trovare la quota per il progetto %(project_id)s." #, python-format msgid "" "Quota for user %(user_id)s in project %(project_id)s could not be found." msgstr "" "Impossibile trovare la quota per l'utente %(user_id)s nel progetto " "%(project_id)s." #, python-format msgid "" "Quota limit %(limit)s for %(resource)s must be greater than or equal to " "already used and reserved %(minimum)s." msgstr "" "Il limite della quota %(limit)s per %(resource)s deve essere maggiore o " "uguale a quello già utilizzato e prenotato %(minimum)s." #, python-format msgid "" "Quota limit %(limit)s for %(resource)s must be less than or equal to " "%(maximum)s." msgstr "" "Il limite della quota %(limit)s per %(resource)s deve essere inferiore o " "uguale a %(maximum)s." msgid "Request body and URI mismatch" msgstr "Il corpo della richiesta e l'URI non corrispondono" msgid "Request is too large." msgstr "La richiesta è troppo grande." #, python-format msgid "Request of image %(image_id)s got BadRequest response: %(response)s" msgstr "" "La richiesta dell'immagine %(image_id)s ha ricevuto una risposta BadRequest: " "%(response)s" #, python-format msgid "RequestSpec not found for instance %(instance_uuid)s" msgstr "RequestSpec non trovata per l'istanza %(instance_uuid)s" msgid "Requested CPU control policy not supported by host" msgstr "" "La politica di controllo della CPU richiesta non è supportata dall'host" #, python-format msgid "" "Requested hardware '%(model)s' is not supported by the '%(virt)s' virt driver" msgstr "" "L'hardware richiesto '%(model)s' non è supportato dal driver virtuale " "'%(virt)s'" #, python-format msgid "Requested image %(image)s has automatic disk resize disabled." msgstr "" "Il ridimensionamento automatico del disco per l'immagine richiesta %(image)s " "è disabilitato." msgid "" "Requested instance NUMA topology cannot fit the given host NUMA topology" msgstr "" "La topologia NUMA dell'istanza richiesta non si adatta alla topologia NUMA " "dell'host fornito" msgid "" "Requested instance NUMA topology together with requested PCI devices cannot " "fit the given host NUMA topology" msgstr "" "La topologia NUMA dell'istanza richiesta insieme ai dispositivi PCI " "richiesti non si adatta alla topologia NUMA dell'host fornito" #, python-format msgid "" "Requested vCPU limits %(sockets)d:%(cores)d:%(threads)d are impossible to " "satisfy for vcpus count %(vcpus)d" msgstr "" "I limiti vCPU richiesti %(sockets)d:%(cores)d:%(threads)d non sono possibili " "per soddisfare il conteggio vcpu %(vcpus)d" #, python-format msgid "Rescue device does not exist for instance %s" msgstr "Il dispositivo di ripristino non esiste per l'istanza %s" #, python-format msgid "Resize error: %(reason)s" msgstr "Errore di ridimensionamento: %(reason)s" msgid "Resize to zero disk flavor is not allowed." msgstr "Il ridimensionamento del flavor disco su zero non è consentito." msgid "Resource could not be found." msgstr "Impossibile trovare la risorsa." msgid "Resumed" msgstr "Ripristinato" #, python-format msgid "Root element name should be '%(name)s' not '%(tag)s'" msgstr "Il nome dell'elemento root deve essere '%(name)s', non '%(tag)s'" #, python-format msgid "Running batches of %i until complete" msgstr "Esecuzione di batch di %i fino al completamento" #, python-format msgid "Scheduler Host Filter %(filter_name)s could not be found." msgstr "Impossibile trovare il filtro Scheduler Host %(filter_name)s." #, python-format msgid "Security group %(name)s is not found for project %(project)s" msgstr "" "Il gruppo di sicurezza %(name)s non è stato trovato per il progetto " "%(project)s" #, python-format msgid "" "Security group %(security_group_id)s not found for project %(project_id)s." msgstr "" "Impossibile trovare il gruppo di sicurezza %(security_group_id)s per il " "progetto %(project_id)s." #, python-format msgid "Security group %(security_group_id)s not found." msgstr "Impossibile trovare il gruppo di sicurezza %(security_group_id)s." #, python-format msgid "" "Security group %(security_group_name)s already exists for project " "%(project_id)s." msgstr "" "Il gruppo di sicurezza %(security_group_name)s esiste già per il progetto " "%(project_id)s." #, python-format msgid "" "Security group %(security_group_name)s not associated with the instance " "%(instance)s" msgstr "" "Il gruppo di sicurezza %(security_group_name)s non è associato all'istanza " "%(instance)s" msgid "Security group id should be uuid" msgstr "L'id gruppo sicurezza deve essere uuid" msgid "Security group name cannot be empty" msgstr "Il nome gruppo di sicurezza non può essere vuoto" msgid "Security group not specified" msgstr "Gruppo di sicurezza non specificato" #, python-format msgid "Server disk was unable to be resized because: %(reason)s" msgstr "" "Non è stato possibile ridimensionare il disco del server perché: %(reason)s" msgid "Server does not exist" msgstr "Il server non esiste" #, python-format msgid "ServerGroup policy is not supported: %(reason)s" msgstr "Politica ServerGroup non supportata: %(reason)s" msgid "ServerGroupAffinityFilter not configured" msgstr "ServerGroupAffinityFilter non configurato" msgid "ServerGroupAntiAffinityFilter not configured" msgstr "ServerGroupAntiAffinityFilter non configurato" msgid "ServerGroupSoftAffinityWeigher not configured" msgstr "ServerGroupSoftAffinityWeigher non configurato" msgid "ServerGroupSoftAntiAffinityWeigher not configured" msgstr "ServerGroupSoftAntiAffinityWeigher non configurato" #, python-format msgid "Service %(service_id)s could not be found." msgstr "Impossibile trovare il servizio %(service_id)s." #, python-format msgid "Service %s not found." msgstr "Il servizio %s non è stato trovato." msgid "Service is unavailable at this time." msgstr "Il servizio non è disponibile in questo momento." #, python-format msgid "Service with host %(host)s binary %(binary)s exists." msgstr "Il servizio con valore host %(host)s binario %(binary)s esiste già." #, python-format msgid "Service with host %(host)s topic %(topic)s exists." msgstr "Il servizio con host %(host)s topic %(topic)s esiste." msgid "Set admin password is not supported" msgstr "L'impostazione della password admin non è supportata" #, python-format msgid "Share '%s' is not supported" msgstr "La condivisione '%s' non è supportata" #, python-format msgid "Share level '%s' cannot have share configured" msgstr "" "Il livello di condivisione '%s' non può avere la condivisione configurata" #, python-format msgid "Snapshot %(snapshot_id)s could not be found." msgstr "Impossibile trovare l'istantanea %(snapshot_id)s." msgid "Some required fields are missing" msgstr "Mancano alcuni campi obbligatori" #, python-format msgid "" "Something went wrong when deleting a volume snapshot: rebasing a " "%(protocol)s network disk using qemu-img has not been fully tested" msgstr "" "Si è verificato un errore durante l'eliminazione di un'istantanea del " "volume: la creazione di una nuova base per un disco di rete %(protocol)s " "tramite qemu-img non è stata testata completamente" msgid "Sort direction size exceeds sort key size" msgstr "" "La dimensione del criterio di ordinamento supera la dimensione della chiave " "di ordinamento" msgid "Sort key supplied was not valid." msgstr "La chiave di ordinamento fornita non è valida." msgid "Specified fixed address not assigned to instance" msgstr "L'indirizzo fisso specificato non è stato assegnato all'istanza" msgid "Started" msgstr "Avviato" msgid "Stopped" msgstr "Di arresto" #, python-format msgid "Storage error: %(reason)s" msgstr "Errore di memoria: %(reason)s" #, python-format msgid "Storage policy %s did not match any datastores" msgstr "La politica di archiviazione %s non corrisponde ad alcun archivio dati" msgid "Success" msgstr "Riuscito" msgid "Suspended" msgstr "Sospeso" msgid "Swap drive requested is larger than instance type allows." msgstr "" "L'unità di scambio richiesta è più grande di quanto consentito dal tipo di " "istanza." msgid "Table" msgstr "Tabella" #, python-format msgid "Task %(task_name)s is already running on host %(host)s" msgstr "L'attività %(task_name)s è già in esecuzione nell'host %(host)s" #, python-format msgid "Task %(task_name)s is not running on host %(host)s" msgstr "L'attività %(task_name)s non è in esecuzione nell'host %(host)s" #, python-format msgid "The PCI address %(address)s has an incorrect format." msgstr "Il formato dell'indirizzo PCI %(address)s non è corretto." #, python-format msgid "The console port range %(min_port)d-%(max_port)d is exhausted." msgstr "La serie di porte di console %(min_port)d-%(max_port)d è esaurita." msgid "The created instance's disk would be too small." msgstr "Il disco dell'istanza creata potrebbe essere troppo piccolo." msgid "The current driver does not support preserving ephemeral partitions." msgstr "" "Il driver corrente non supporta la conservazione di partizioni effimere." msgid "The default PBM policy doesn't exist on the backend." msgstr "La politica PBM predefinita non esiste sul backend." msgid "The floating IP request failed with a BadRequest" msgstr "Richiesta IP mobile non riuscita con errore Richiesta non corretta" msgid "" "The instance requires a newer hypervisor version than has been provided." msgstr "" "L'istanza richiede una versione di hypervisor più recente di quella fornita." #, python-format msgid "The number of defined ports: %(ports)d is over the limit: %(quota)d" msgstr "" "Il numero di porte definite: %(ports)d è superiore al limite: %(quota)d" #, python-format msgid "The provided RNG device path: (%(path)s) is not present on the host." msgstr "" "Il percorso del dispositivo RNG fornito (%(path)s) non è presente sull'host." msgid "The request is invalid." msgstr "La richiesta non è valida." #, python-format msgid "" "The requested amount of video memory %(req_vram)d is higher than the maximum " "allowed by flavor %(max_vram)d." msgstr "" "La quantità di memoria video richiesta %(req_vram)d è superiore a quella " "massima generalmente consentita %(max_vram)d." msgid "The requested availability zone is not available" msgstr "L'area di disponibilità richiesta non è disponibile" msgid "The requested functionality is not supported." msgstr "La funzionalità richiesta non è supportata." #, python-format msgid "The specified cluster '%s' was not found in vCenter" msgstr "Il cluster specificato '%s' non è stato trovato in vCenter" #, python-format msgid "The supplied device path (%(path)s) is in use." msgstr "Il percorso unità specificato (%(path)s) è in uso." #, python-format msgid "The supplied device path (%(path)s) is invalid." msgstr "Il percorso unità specificato (%(path)s) non è valido." #, python-format msgid "" "The supplied disk path (%(path)s) already exists, it is expected not to " "exist." msgstr "" "Il percorso disco (%(path)s) specificato esiste già, è previsto che non " "esista." msgid "The supplied hypervisor type of is invalid." msgstr "Il tipo di hypervisor fornito non è valido." msgid "The target host can't be the same one." msgstr "L'host di destinazione non può essere lo stesso." #, python-format msgid "The token '%(token)s' is invalid or has expired" msgstr "Il token '%(token)s' non è valido oppure è scaduto" #, python-format msgid "" "The volume cannot be assigned the same device name as the root device %s" msgstr "" "Non è possibile assegnare al volume lo stesso nome dispositivo assegnato al " "dispositivo root %s" msgid "There are not enough hosts available." msgstr "Numero di host disponibili non sufficiente." #, python-format msgid "There is no such action: %s" msgstr "Non esiste alcuna azione simile: %s" #, python-format msgid "" "This compute node's hypervisor is older than the minimum supported version: " "%(version)s." msgstr "" "L' hypervisor del nodo di calcolo è più vecchio della versione minima " "supportata: %(version)s." msgid "" "This method needs to be called with either networks=None and port_ids=None " "or port_ids and networks as not none." msgstr "" "Questo metodo deve essere richiamato con netoworks=None e port_ids=None o " "port_ids e networks non none." #, python-format msgid "This rule already exists in group %s" msgstr "Questa regola già esiste nel gruppo %s" #, python-format msgid "" "This service is older (v%(thisver)i) than the minimum (v%(minver)i) version " "of the rest of the deployment. Unable to continue." msgstr "" "Questo servizio è più vecchio (v%(thisver)i) della versione minima " "(v%(minver)i) del resto della distribuzione. Impossibile continuare." msgid "Timeout waiting for response from cell" msgstr "Timeout in attesa di risposta dalla cella" #, python-format msgid "Timeout while checking if we can live migrate to host: %s" msgstr "" "Timeout durante il controllo della possibilità di eseguire la migrazione " "live all'host: %s" msgid "To and From ports must be integers" msgstr "Le porte 'Da' e 'A' devono essere numeri interi" msgid "Token not found" msgstr "Token non trovato" msgid "Triggering crash dump is not supported" msgstr "L'attivazione del dump di crash non è supportata" msgid "Type and Code must be integers for ICMP protocol type" msgstr "Tipo e codice devono essere numeri interi per il tipo protocollo ICMP" msgid "UEFI is not supported" msgstr "UEFI non è supportato" #, python-format msgid "" "Unable to associate floating IP %(address)s to any fixed IPs for instance " "%(id)s. Instance has no fixed IPv4 addresses to associate." msgstr "" "Impossibile associare l'IP mobile %(address)s all'IP fisso per l'istanza " "%(id)s. L'istanza non presenta alcun indirizzo IPv4 fisso da associare." #, python-format msgid "" "Unable to associate floating IP %(address)s to fixed IP %(fixed_address)s " "for instance %(id)s. Error: %(error)s" msgstr "" "Impossibile associare l'IP mobile %(address)s all'IP fisso %(fixed_address)s " "per l'istanza %(id)s. Errore: %(error)s" #, python-format msgid "Unable to convert image to %(format)s: %(exp)s" msgstr "Impossibile convertire l'immagine in %(format)s: %(exp)s" #, python-format msgid "Unable to convert image to raw: %(exp)s" msgstr "Impossibile convertire l'immagine in immagine non elaborata: %(exp)s" #, python-format msgid "Unable to determine disk bus for '%s'" msgstr "Impossibile determinare il bus del disco per '%s'" #, python-format msgid "Unable to determine disk prefix for %s" msgstr "Impossibile determinare il prefisso del disco per %s" #, python-format msgid "Unable to find host for Instance %s" msgstr "Impossibile trovare l'host per l'istanza %s" msgid "Unable to find iSCSI Target" msgstr "Impossibile trovare la destinazione iSCSI" msgid "Unable to find volume" msgstr "Impossibile trovare il volume" msgid "Unable to get host UUID: /etc/machine-id does not exist" msgstr "Impossibile richiamare l'UUID host: /etc/machine-id non esiste" msgid "Unable to get host UUID: /etc/machine-id is empty" msgstr "Impossibile richiamare l'UUID host: /etc/machine-id è vuoto" msgid "" "Unable to launch multiple instances with a single configured port ID. Please " "launch your instance one by one with different ports." msgstr "" "Impossibile avviare più istanze con un singolo ID porta configurato. Avviare " "le proprie istanze una per volta con porte differenti." #, python-format msgid "" "Unable to migrate %(instance_uuid)s to %(dest)s: Lack of memory(host:" "%(avail)s <= instance:%(mem_inst)s)" msgstr "" "Impossibile migrare %(instance_uuid)s to %(dest)s: mancanza di memoria (host:" "%(avail)s <= istanza:%(mem_inst)s)" #, python-format msgid "" "Unable to migrate %(instance_uuid)s: Disk of instance is too large(available " "on destination host:%(available)s < need:%(necessary)s)" msgstr "" "Impossibile migrare %(instance_uuid)s: il disco dell'istanza è troppo grande " "(disponibile nell'host di destinazione: %(available)s < necessario: " "%(necessary)s)" #, python-format msgid "" "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." msgstr "" "Impossibile migrare l'istanza (%(instance_id)s) nell'host corrente " "(%(host)s)." msgid "Unable to resize disk down." msgstr "Impossibile ridurre il disco a dimensioni inferiori." msgid "Unable to set password on instance" msgstr "Impossibile impostare la password sull'istanza" msgid "Unable to shrink disk." msgstr "Impossibile ridurre il disco." #, python-format msgid "Unacceptable CPU info: %(reason)s" msgstr "Informazioni CPU non accettabili: %(reason)s" msgid "Unacceptable parameters." msgstr "Parametri inaccettabili." #, python-format msgid "Unavailable console type %(console_type)s." msgstr "Tipo di console non disponibile %(console_type)s." msgid "" "Undefined Block Device Mapping root: BlockDeviceMappingList contains Block " "Device Mappings from multiple instances." msgstr "" "Root di associazione dispositivo di blocco non definita: " "BlockDeviceMappingList contiene le associazioni del dispositivo di blocco di " "più istanze." #, python-format msgid "Unexpected aggregate action %s" msgstr "Azione aggregato non prevista %s" msgid "Unexpected type adding stats" msgstr "Tipo non previsto durante l'aggiunta di statistiche" #, python-format msgid "Unexpected vif_type=%s" msgstr "vif_type=%s imprevisto" msgid "Unknown" msgstr "Sconosciuto" msgid "Unknown action" msgstr "Azione sconosciuta" #, python-format msgid "Unknown config drive format %(format)s. Select one of iso9660 or vfat." msgstr "" "Formato unità di configurazione sconosciuto %(format)s. Selezionare una di " "iso9660 o vfat." #, python-format msgid "Unknown delete_info type %s" msgstr "Tipo di delete_info %s sconosciuto" #, python-format msgid "Unknown image_type=%s" msgstr "image_type=%s sconosciuto" #, python-format msgid "Unknown quota resources %(unknown)s." msgstr "Risorse quota sconosciute %(unknown)s." msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "Direzione ordinamento sconosciuta, deve essere 'desc' o 'asc'" #, python-format msgid "Unknown type: %s" msgstr "Tipo sconosciuto: %s" msgid "Unrecognized legacy format." msgstr "Formato legacy non riconosciuto." #, python-format msgid "Unrecognized read_deleted value '%s'" msgstr "Valore read_deleted non riconosciuto '%s'" #, python-format msgid "Unrecognized value '%s' for CONF.running_deleted_instance_action" msgstr "Valore non riconosciuto '%s' per CONF.running_deleted_instance_action" #, python-format msgid "Unshelve attempted but the image %s cannot be found." msgstr "Accantonamento tentato, ma l'immagine %s non è stata trovata" msgid "Unsupported Content-Type" msgstr "Tipo-contenuto non supportato" #, python-format msgid "User %(username)s not found in password file." msgstr "Utente %(username)s non trovato nel file di password." #, python-format msgid "User %(username)s not found in shadow file." msgstr "Utente %(username)s non trovato nel file shadow." msgid "User data needs to be valid base 64." msgstr "I dati utente devono avere una valida base 64." msgid "User does not have admin privileges" msgstr "L'utente non ha i privilegi dell'amministratore" msgid "" "Using different block_device_mapping syntaxes is not allowed in the same " "request." msgstr "" "L'utilizzo di sintassi block_device_mapping differenti non è consentito " "nella stessa CSR1vk." #, python-format msgid "" "Version %(req_ver)s is not supported by the API. Minimum is %(min_ver)s and " "maximum is %(max_ver)s." msgstr "" "La versione %(req_ver)s non è supportata dall'API. Il valore minimo è " "%(min_ver)s ed il massimo è %(max_ver)s." msgid "Virtual Interface creation failed" msgstr "Creazione interfaccia virtuale non riuscita" msgid "Virtual interface plugin failed" msgstr "Plugin dell'interfaccia virtuale non riuscito" #, python-format msgid "Virtual machine mode '%(vmmode)s' is not recognised" msgstr "La modalità della macchina virtuale '%(vmmode)s' non è riconosciuta" #, python-format msgid "Virtual machine mode '%s' is not valid" msgstr "La modalità della macchina virtuale '%s' non è valida" #, python-format msgid "Virtualization type '%(virt)s' is not supported by this compute driver" msgstr "" "Il tipo di virtualizzazione '%(virt)s' non è supportato dal questo driver " "compute" #, python-format msgid "Volume %(volume_id)s could not be attached. Reason: %(reason)s" msgstr "Impossibile collegare il volume %(volume_id)s. Motivo: %(reason)s" #, python-format msgid "Volume %(volume_id)s could not be found." msgstr "Impossibile trovare il volume %(volume_id)s." #, python-format msgid "" "Volume %(volume_id)s did not finish being created even after we waited " "%(seconds)s seconds or %(attempts)s attempts. And its status is " "%(volume_status)s." msgstr "" "La creazione del volume %(volume_id)s non è stata completata anche dopo " "un'attesa di %(seconds)s secondi o %(attempts)s tentativi e lo stato è " "%(volume_status)s." msgid "Volume does not belong to the requested instance." msgstr "Il volume non appartiene all'istanza richiesta." #, python-format msgid "" "Volume encryption is not supported for %(volume_type)s volume %(volume_id)s" msgstr "" "Codifica volume non supportata per volume %(volume_type)s %(volume_id)s" #, python-format msgid "" "Volume is smaller than the minimum size specified in image metadata. Volume " "size is %(volume_size)i bytes, minimum size is %(image_min_disk)i bytes." msgstr "" "Il volume è più piccolo della dimensione minima specificata nei metadati " "dell'immagine. Dimensione volume %(volume_size)i byte, dimensione minima " "%(image_min_disk)i byte." #, python-format msgid "" "Volume sets block size, but the current libvirt hypervisor '%s' does not " "support custom block size" msgstr "" "Il volume imposta la dimensione del blocco ma l'hypervisor libvirt corrente " "'%s' non supporta la dimensione del blocco personalizzata" msgid "When resizing, instances must change flavor!" msgstr "Durante il ridimensionamento, le istanze devono cambiare tipologia!" #, python-format msgid "Wrong quota method %(method)s used on resource %(res)s" msgstr "Metodo quota errato %(method)s utilizzato per la risorsa %(res)s" msgid "X-Forwarded-For is missing from request." msgstr "X-Forwarded-For manca dalla richiesta." msgid "X-Instance-ID header is missing from request." msgstr "L'intestazione X-Instance-ID manca nella richiesta." msgid "X-Instance-ID-Signature header is missing from request." msgstr "Intestazione X-Instance-ID-Signature non presente nella richiesta." msgid "X-Metadata-Provider is missing from request." msgstr "X-Metadata-Provider manca dalla richiesta." msgid "X-Tenant-ID header is missing from request." msgstr "L'intestazione X-Tenant-ID non è presente nella richiesta." msgid "You are not allowed to delete the image." msgstr "Non è consentito eliminare l'immagine." msgid "" "You are not authorized to access the image the instance was started with." msgstr "" "Non si è autorizzati ad accedere all'immagine con cui è stata avviata " "l'istanza." msgid "You must implement __call__" msgstr "È necessario implementare __call__" msgid "You should specify images_rbd_pool flag to use rbd images." msgstr "" "È necessario specificare l'indicatore images_rbd_pool per utilizzare le " "immagini rbd." msgid "You should specify images_volume_group flag to use LVM images." msgstr "" "È necessario specificare l'indicatore images_volume_group per utilizzare le " "immagini LVM." msgid "Zero floating IPs available." msgstr "Nessun IP mobile disponibile." msgid "admin password can't be changed on existing disk" msgstr "La password admin non può essere modificata sul disco esistente" msgid "cannot understand JSON" msgstr "impossibile riconoscere JSON" msgid "clone() is not implemented" msgstr "Il clone () non è implementato" #, python-format msgid "connect info: %s" msgstr "informazioni di connessione: %s" #, python-format msgid "connecting to: %(host)s:%(port)s" msgstr "connessione a: %(host)s:%(port)s" msgid "direct_snapshot() is not implemented" msgstr "direct_snapshot() non implementato" #, python-format msgid "disk type '%s' not supported" msgstr "tipo di disco '%s' non supportato" #, python-format msgid "empty project id for instance %s" msgstr "id progetto vuoto per l'istanza %s" msgid "error setting admin password" msgstr "errore di impostazione della password admin" #, python-format msgid "error: %s" msgstr "errore: %s" #, python-format msgid "failed to generate X509 fingerprint. Error message: %s" msgstr "impossibile generare l'impronta digitale X509. Messaggio di errore: %s" msgid "failed to generate fingerprint" msgstr "impossibile generare l'impronta digitale" msgid "filename cannot be None" msgstr "il nome file non può essere None" msgid "floating IP is already associated" msgstr "l'IP mobile è già associato" msgid "floating IP not found" msgstr "IP mobile non trovato" #, python-format msgid "fmt=%(fmt)s backed by: %(backing_file)s" msgstr "fmt=%(fmt)s sottoposto a backup da: %(backing_file)s" #, python-format msgid "href %s does not contain version" msgstr "href %s non contiene la versione" msgid "image already mounted" msgstr "immagine già montata" #, python-format msgid "instance %s is not running" msgstr "l'istanza %s non è in esecuzione" msgid "instance is a required argument to use @refresh_cache" msgstr "istanza è un argomento obbligatorio per utilizzare @refresh_cache" msgid "instance is not in a suspended state" msgstr "Lo stato dell'istanza non è suspended" msgid "instance is not powered on" msgstr "l'istanza non è accesa" msgid "instance is powered off and cannot be suspended." msgstr "l'istanza è spenta e non può essere sospesa." #, python-format msgid "instance_id %s could not be found as device id on any ports" msgstr "" "Non è stato possibile trovare l'instance_id %s come ID dispositivo su " "qualsiasi porta" msgid "is_public must be a boolean" msgstr "is_public deve essere un booleano" msgid "keymgr.fixed_key not defined" msgstr "keymgr.fixed_key non definito" msgid "l3driver call to add floating IP failed" msgstr "chiamata di l3driver per aggiungere IP mobile non riuscita" #, python-format msgid "libguestfs installed but not usable (%s)" msgstr "libguestfs installato, ma non utilizzabile (%s)" #, python-format msgid "libguestfs is not installed (%s)" msgstr "libguestfs non è installato (%s)" #, python-format msgid "marker [%s] not found" msgstr "indicatore [%s] non trovato" #, python-format msgid "max rows must be <= %(max_value)d" msgstr "il numero massimo di righe deve essere <= %(max_value)d" msgid "max_count cannot be greater than 1 if an fixed_ip is specified." msgstr "max_count non può essere maggiore di 1 se è specificato un fixed_ip." msgid "min_count must be <= max_count" msgstr "min_count deve essere <= max_count" #, python-format msgid "nbd device %s did not show up" msgstr "unità nbd %s non visualizzata" msgid "nbd unavailable: module not loaded" msgstr "nbd non disponibile: modulo non caricato" #, python-format msgid "no match found for %s" msgstr "nessuna corrispondenza trovata per %s" #, python-format msgid "no usable parent snapshot for volume %s" msgstr "nessuna istantanea parent utilizzabile per il volume %s" #, python-format msgid "no write permission on storage pool %s" msgstr "nessuna autorizzazione di scrittura nel pool di archiviazione %s" #, python-format msgid "not able to execute ssh command: %s" msgstr "Impossibile eseguire il comando ssh: %s" msgid "old style configuration can use only dictionary or memcached backends" msgstr "" "La configurazione old style può utilizzare solo backend di dizionario o " "memorizzati nella cache" msgid "operation time out" msgstr "timeout operazione" #, python-format msgid "partition %s not found" msgstr "partizione %s non trovata" #, python-format msgid "partition search unsupported with %s" msgstr "ricerca partizione non supportata con %s" msgid "pause not supported for vmwareapi" msgstr "sospensione non supportata per vmwareapi" msgid "printable characters with at least one non space character" msgstr "caratteri stampabili con almeno un carattere diverso dallo spazio " msgid "printable characters. Can not start or end with whitespace." msgstr "caratteri stampabili. Non possono iniziare o terminare con uno spazio." #, python-format msgid "qemu-img failed to execute on %(path)s : %(exp)s" msgstr "Impossibile eseguire qemu-img su %(path)s : %(exp)s" #, python-format msgid "qemu-nbd error: %s" msgstr "errore qemu-nbd: %s" msgid "rbd python libraries not found" msgstr "Impossibile trovare le librerie rbd python" #, python-format msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" msgstr "read_deleted può essere solo 'no', 'yes' o 'only', non %r" msgid "serve() can only be called once" msgstr "il servizio() può essere chiamato solo una volta" msgid "service is a mandatory argument for DB based ServiceGroup driver" msgstr "" "il servizio è un argomento obbligatorio per il driver ServiceGroup basato su " "DB" msgid "service is a mandatory argument for Memcached based ServiceGroup driver" msgstr "il servizio è un argomento obbligatorio per il driver basato Memcached" msgid "set_admin_password is not implemented by this driver or guest instance." msgstr "" "set_admin_password non è implementato da questo driver o istanza guest." #, python-format msgid "snapshot for %s" msgstr "istantanea per %s" msgid "snapshot_id required in create_info" msgstr "snapshot_id obbligatorio in create_info" msgid "token not provided" msgstr "token non fornito" msgid "too many body keys" msgstr "troppe chiavi del corpo" msgid "unpause not supported for vmwareapi" msgstr "annullamento sospensione non supportato per vmwareapi" #, python-format msgid "vg %s must be LVM volume group" msgstr "vg %s deve essere il gruppo di volumi LVM" #, python-format msgid "vhostuser_sock_path not present in vif_details for vif %(vif_id)s" msgstr "vhostuser_sock_path non presente in vif_details per vif %(vif_id)s" #, python-format msgid "vif type %s not supported" msgstr "Tipo vif %s non supportato" msgid "vif_type parameter must be present for this vif_driver implementation" msgstr "" "il parametro vif_type deve essere presente per questa implementazione di " "vif_driver" #, python-format msgid "volume %s already attached" msgstr "volume %s già collegato" #, python-format msgid "" "volume '%(vol)s' status must be 'in-use'. Currently in '%(status)s' status" msgstr "" "Lo stato del volume '%(vol)s' deve essere 'in-use'. Attualmente lo stato è " "'%(status)s'" ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315688.893605 nova-32.0.0/nova/locale/ja/0000775000175000017500000000000000000000000015356 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.3696086 nova-32.0.0/nova/locale/ja/LC_MESSAGES/0000775000175000017500000000000000000000000017143 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/locale/ja/LC_MESSAGES/nova.po0000664000175000017500000032422700000000000020460 0ustar00zuulzuul00000000000000# Translations template for nova. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the nova project. # # Translators: # FIRST AUTHOR , 2011 # Sasuke(Kyohei MORIYAMA) <>, 2015 # *pokotan-in-the-sky* <>, 2012 # Tom Fifield , 2013 # Tomoyuki KATO , 2013 # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: nova VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2025-07-04 18:13+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 06:06+0000\n" "Last-Translator: Copied by Zanata \n" "Language: ja\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: Japanese\n" #, python-format msgid "%(address)s is not a valid IP v4/6 address." msgstr "%(address)s が有効な IP v4/6 アドレスではありません。" #, python-format msgid "" "%(binary)s attempted direct database access which is not allowed by policy" msgstr "" "%(binary)s が直接データベースへアクセスしようとしましたが、これはポリシーで許" "可されていません" #, python-format msgid "%(cidr)s is not a valid IP network." msgstr "%(cidr)s や有効な IP ネットワークではありません。" #, python-format msgid "%(field)s should not be part of the updates." msgstr "%(field)s を更新に含めることはできません。" #, python-format msgid "%(memsize)d MB of memory assigned, but expected %(memtotal)d MB" msgstr "" "%(memsize)d MB のメモリーが割り当てられていますが、%(memtotal)d MB が期待され" "ていました" #, python-format msgid "%(path)s is not on local storage: %(reason)s" msgstr "%(path)s はローカルストレージ上にありません: %(reason)s" #, python-format msgid "%(path)s is not on shared storage: %(reason)s" msgstr "%(path)s は共有ストレージ上にありません: %(reason)s" #, python-format msgid "%(total)i rows matched query %(meth)s, %(done)i migrated" msgstr "%(total)i 行がクエリー %(meth)s に合致し、%(done)i が移行しました" #, python-format msgid "%(type)s hypervisor does not support PCI devices" msgstr "%(type)s ハイパーバイザーは PCI デバイスをサポートしていません" #, python-format msgid "%s does not support disk hotplug." msgstr "%s ではディスクのホットプラグはサポートされていません。" #, python-format msgid "%s format is not supported" msgstr "%s 形式はサポートされていません" #, python-format msgid "%s is not supported." msgstr "%s はサポートされていません。" #, python-format msgid "%s must be either 'MANUAL' or 'AUTO'." msgstr "%s は 'MANUAL' または 'AUTO' のいずれかでなければいけません。" #, python-format msgid "'%(other)s' should be an instance of '%(cls)s'" msgstr "'%(other)s' は'%(cls)s' のインスタンスである必要があります" msgid "'qemu-img info' parsing failed." msgstr "'qemu-img info' の解析に失敗しました。" #, python-format msgid "'rxtx_factor' argument must be a float between 0 and %g" msgstr "" "'rxtx_factor' 引数は 0 から %g の範囲内の浮動小数点数でなければなりません" #, python-format msgid "A NetworkModel is required in field %s" msgstr "フィールド %s に NetworkModel が必要です" #, python-format msgid "" "API Version String %(version)s is of invalid format. Must be of format " "MajorNum.MinorNum." msgstr "" "API バージョン文字列 %(version)s の形式が無効です。形式は MajorNum.MinorNum " "でなければなりません。" #, python-format msgid "API version %(version)s is not supported on this method." msgstr "API バージョン %(version)s はこのメソッドではサポートされていません。" msgid "Access list not available for public flavors." msgstr "パブリックフレーバーではアクセスリストを使用できません。" #, python-format msgid "Action %s not found" msgstr "アクション %s が見つかりません" #, python-format msgid "" "Action for request_id %(request_id)s on instance %(instance_uuid)s not found" msgstr "" "インスタンス %(instance_uuid)s に対する request_id %(request_id)s のアクショ" "ンが見つかりません" #, python-format msgid "Action: '%(action)s', calling method: %(meth)s, body: %(body)s" msgstr "アクション: '%(action)s'、呼び出しメソッド: %(meth)s、本文: %(body)s" #, python-format msgid "Add metadata failed for aggregate %(id)s after %(retries)s retries" msgstr "" "アグリゲート %(id)s にメタデータを追加しようと %(retries)s 回再試行しました" "が、追加できませんでした" #, python-format msgid "Aggregate %(aggregate_id)s already has host %(host)s." msgstr "アグリゲート %(aggregate_id)s には既にホスト %(host)s があります。" #, python-format msgid "Aggregate %(aggregate_id)s could not be found." msgstr "アグリゲート %(aggregate_id)s が見つかりませんでした。" #, python-format msgid "Aggregate %(aggregate_id)s has no host %(host)s." msgstr "アグリゲート %(aggregate_id)s にはホスト %(host)s がありません。" #, python-format msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." msgstr "" "アグリゲート %(aggregate_id)s にはキー %(metadata_key)s を持つメタデータはあ" "りません。" #, python-format msgid "Aggregate %(aggregate_name)s already exists." msgstr "アグリゲート %(aggregate_name)s は既に存在します。" #, python-format msgid "Aggregate %s does not support empty named availability zone" msgstr "アグリゲート %s は空の名前のアベイラビリティーゾーンをサポートしません" #, python-format msgid "An invalid 'name' value was provided. The name must be: %(reason)s" msgstr "" "無効な 'name' の値が提供されました。名前は以下である必要があります: " "%(reason)s" msgid "An unknown error has occurred. Please try your request again." msgstr "未知のエラーが発生しました。再度リクエストを実行してください。" msgid "An unknown exception occurred." msgstr "不明な例外が発生しました。" #, python-format msgid "Architecture name '%(arch)s' is not recognised" msgstr "アーキテクチャー名 '%(arch)s' は認識できません" #, python-format msgid "Architecture name '%s' is not valid" msgstr "アーキテクチャー名 '%s' が有効ではありません" #, python-format msgid "" "Attempt to consume PCI device %(compute_node_id)s:%(address)s from empty pool" msgstr "" "空のプールから PCI デバイス %(compute_node_id)s:%(address)s を取り込んでみて" "ください" msgid "Attempted overwrite of an existing value." msgstr "既存の値を上書きしようとしました。" #, python-format msgid "Attribute not supported: %(attr)s" msgstr "この属性はサポートされていません: %(attr)s" #, python-format msgid "Bad network format: missing %s" msgstr "ネットワークの形式が正しくありません。%s がありません" msgid "Bad networks format" msgstr "ネットワークの形式が正しくありません" #, python-format msgid "Bad networks format: network uuid is not in proper format (%s)" msgstr "" "ネットワークの形式が正しくありません。ネットワーク UUID が適切な形式になって" "いません (%s)" #, python-format msgid "Bad prefix for network in cidr %s" msgstr "CIDR %s 内のネットワークでは無効なプレフィックス" #, python-format msgid "" "Binding failed for port %(port_id)s, please check neutron logs for more " "information." msgstr "" "ポート %(port_id)s でバインドが失敗しました。詳細情報については neutron のロ" "グを確認してください。" msgid "Blank components" msgstr "空白コンポーネント" msgid "" "Blank volumes (source: 'blank', dest: 'volume') need to have non-zero size" msgstr "" "ブランクのボリューム (ソース: 'blank'、宛先: 'volume') にはゼロでないサイズを" "設定する必要があります" #, python-format msgid "Block Device %(id)s is not bootable." msgstr "ブロックデバイス %(id)s がブート可能ではありません。" #, python-format msgid "" "Block Device Mapping %(volume_id)s is a multi-attach volume and is not valid " "for this operation." msgstr "" "ブロックデバイスマッピング %(volume_id)s は複数の接続を持つボリュームであり、" "この処理には有効ではありません。" msgid "Block Device Mapping cannot be converted to legacy format. " msgstr "ブロックデバイスマッピングを以前の形式に変換することはできません。" msgid "Block Device Mapping is Invalid." msgstr "ブロックデバイスマッピングが無効です。" #, python-format msgid "Block Device Mapping is Invalid: %(details)s" msgstr "ブロックデバイスマッピングが無効です: %(details)s" msgid "" "Block Device Mapping is Invalid: Boot sequence for the instance and image/" "block device mapping combination is not valid." msgstr "" "ブロックデバイスマッピングが無効です。指定されたインスタンスとイメージ/ブロッ" "クデバイスマッピングの組み合わせでのブートシーケンスは無効です。" msgid "" "Block Device Mapping is Invalid: You specified more local devices than the " "limit allows" msgstr "" "ブロックデバイスマッピングが無効です。制限で許可されているよりも多くのローカ" "ルデバイスが指定されました。" #, python-format msgid "Block Device Mapping is Invalid: failed to get image %(id)s." msgstr "" "ブロックデバイスマッピングが無効です。イメージ %(id)s の取得に失敗しました。" #, python-format msgid "Block Device Mapping is Invalid: failed to get snapshot %(id)s." msgstr "" "ブロックデバイスマッピングが無効です。スナップショット %(id)s の取得に失敗し" "ました。" #, python-format msgid "Block Device Mapping is Invalid: failed to get volume %(id)s." msgstr "" "ブロックデバイスマッピングが無効です。ボリューム %(id)s の取得に失敗しまし" "た。" msgid "Block migration can not be used with shared storage." msgstr "" "ブロックマイグレーションを使用するときに、共有ストレージを使用することはでき" "ません。" msgid "Boot index is invalid." msgstr "boot インデックスが無効です。" #, python-format msgid "Build of instance %(instance_uuid)s aborted: %(reason)s" msgstr "インスタンス %(instance_uuid)s の作成は打ち切られました: %(reason)s" #, python-format msgid "Build of instance %(instance_uuid)s was re-scheduled: %(reason)s" msgstr "" "インスタンス %(instance_uuid)s の作成は再スケジュールされました: %(reason)s" #, python-format msgid "BuildRequest not found for instance %(uuid)s" msgstr "インスタンス %(uuid)s に関する BuildRequest が見つかりません" msgid "CPU and memory allocation must be provided for all NUMA nodes" msgstr "" "CPU とメモリーの割り当ては、すべての NUMA ノードに指定しなければなりません" #, python-format msgid "" "CPU doesn't have compatibility.\n" "\n" "%(ret)s\n" "\n" "Refer to %(u)s" msgstr "" "CPU に互換性がありません。\n" "\n" "%(ret)s\n" "\n" "%(u)s を参照" #, python-format msgid "CPU number %(cpunum)d is assigned to two nodes" msgstr "CPU 番号 %(cpunum)d は 2 つのノードに割り当てられています" #, python-format msgid "CPU number %(cpunum)d is larger than max %(cpumax)d" msgstr "CPU 番号 %(cpunum)d は最大値 %(cpumax)d を超えています" #, python-format msgid "CPU number %(cpuset)s is not assigned to any node" msgstr "CPU 番号 %(cpuset)s はノードに割り当てられていません" msgid "Can not add access to a public flavor." msgstr "パブリックフレーバーにアクセスを追加できません" msgid "Can not find requested image" msgstr "要求されたイメージが見つかりません" #, python-format msgid "Can not handle authentication request for %d credentials" msgstr "%d 認証情報に関する認証要求を処理できません" msgid "Can't retrieve root device path from instance libvirt configuration" msgstr "インスタンスの libvirt 設定からルートデバイスのパスを取得できません" #, python-format msgid "" "Cannot '%(action)s' instance %(server_id)s while it is in %(attr)s %(state)s" msgstr "" "インスタンス %(server_id)s が %(attr)s %(state)s にある間は '%(action)s' を行" "うことはできません" #, python-format msgid "Cannot add host to aggregate %(aggregate_id)s. Reason: %(reason)s." msgstr "" "ホストをアグリゲート %(aggregate_id)s に追加できません。理由: %(reason)s。" msgid "Cannot attach one or more volumes to multiple instances" msgstr "1 つのボリュームを複数のインスタンスに接続できません" #, python-format msgid "Cannot call %(method)s on orphaned %(objtype)s object" msgstr "" "親のない %(objtype)s オブジェクトで %(method)s を呼び出すことはできません" #, python-format msgid "" "Cannot determine the parent storage pool for %s; cannot determine where to " "store images" msgstr "" "%s の親のストレージプールを検出できません。イメージを保存する場所を決定できま" "せん。" msgid "Cannot find image for rebuild" msgstr "再作成用のイメージが見つかりません" #, python-format msgid "Cannot remove host %(host)s in aggregate %(id)s" msgstr "ホスト %(host)s をアグリゲート %(id)s から削除できません" #, python-format msgid "Cannot remove host from aggregate %(aggregate_id)s. Reason: %(reason)s." msgstr "" "ホストをアグリゲート %(aggregate_id)s から削除できません。理由: %(reason)s。" msgid "Cannot rescue a volume-backed instance" msgstr "ボリュームを使ったインスタンスはレスキューできません" msgid "" "Cannot set cpu thread pinning policy in a non dedicated cpu pinning policy" msgstr "" "専用でない CPU の固定ポリシーではCPU スレッドの固定ポリシーを設定できません" msgid "Cannot set realtime policy in a non dedicated cpu pinning policy" msgstr "" "専用でない CPU の固定ポリシーではリアルタイムのポリシーを設定できません" #, python-format msgid "Cannot update aggregate %(aggregate_id)s. Reason: %(reason)s." msgstr "アグリゲート %(aggregate_id)s を更新できません。理由: %(reason)s。" #, python-format msgid "" "Cannot update metadata of aggregate %(aggregate_id)s. Reason: %(reason)s." msgstr "" "アグリゲート %(aggregate_id)s のメタデータを更新できません。理由: " "%(reason)s。" #, python-format msgid "Cell %(uuid)s has no mapping." msgstr "セル %(uuid)s にはマッピングがありません。" #, python-format msgid "" "Change would make usage less than 0 for the following resources: %(unders)s" msgstr "変更によって、リソース %(unders)s の使用量が 0 未満になります" #, python-format msgid "Class %(class_name)s could not be found: %(exception)s" msgstr "クラス %(class_name)s が見つかりませんでした: %(exception)s" #, python-format msgid "Compute host %(host)s could not be found." msgstr "コンピュートホスト %(host)s が見つかりませんでした。" #, python-format msgid "Compute host %s not found." msgstr "コンピュートホスト %s が見つかりません。" #, python-format msgid "Compute service of %(host)s is still in use." msgstr "%(host)s のコンピュートサービスが依然として使用されています。" #, python-format msgid "Compute service of %(host)s is unavailable at this time." msgstr "この時点では %(host)s のコンピュートサービスを使用できません。" #, python-format msgid "Config drive format '%(format)s' is not supported." msgstr "コンフィグドライブ形式 %(format)s がサポートされません。" #, python-format msgid "" "Config requested an explicit CPU model, but the current libvirt hypervisor " "'%s' does not support selecting CPU models" msgstr "" "設定で、明示的な CPU モデルが要求されましたが、現在の libvirt ハイパーバイ" "ザー '%s' は CPU モデルの選択をサポートしていません" #, python-format msgid "" "Conflict updating instance %(instance_uuid)s, but we were unable to " "determine the cause" msgstr "" "インスタンス %(instance_uuid)s の更新で競合が発生したものの、原因を特定できま" "せんでした。" #, python-format msgid "" "Conflict updating instance %(instance_uuid)s. Expected: %(expected)s. " "Actual: %(actual)s" msgstr "" "インスタンス %(instance_uuid)s の更新で競合が発生しました。%(expected)s を期" "待したものの、実際には %(actual)s が得られました" #, python-format msgid "Connection to cinder host failed: %(reason)s" msgstr "cinder ホストへの接続に失敗しました: %(reason)s" #, python-format msgid "Connection to glance host %(server)s failed: %(reason)s" msgstr "glance ホスト %(server)s への接続に失敗しました: %(reason)s" #, python-format msgid "Connection to libvirt lost: %s" msgstr "libvirt との接続が失われました: %s" #, python-format msgid "" "Console log output could not be retrieved for instance %(instance_id)s. " "Reason: %(reason)s" msgstr "" "インスタンス %(instance_id)s についてコンソールログの出力を取得できませんでし" "た。理由: %(reason)s" msgid "Constraint not met." msgstr "制約が満たされていません。" #, python-format msgid "Converted to raw, but format is now %s" msgstr "raw 形式に変換されましたが、現在の形式は %s です" #, fuzzy, python-format msgid "Could not attach image to loopback: %s" msgstr "イメージをループバック %s にアタッチできません。" #, python-format msgid "Could not fetch image %(image_id)s" msgstr "イメージ %(image_id)s を取り出すことができませんでした" #, python-format msgid "Could not find a handler for %(driver_type)s volume." msgstr "%(driver_type)s ボリュームのハンドラーが見つかりませんでした。" #, python-format msgid "Could not find binary %(binary)s on host %(host)s." msgstr "ホスト %(host)s 上でバイナリー %(binary)s が見つかりませんでした。" #, python-format msgid "Could not find config at %(path)s" msgstr "%(path)s に config が見つかりませんでした" msgid "Could not find the datastore reference(s) which the VM uses." msgstr "VM が使用するデータストア参照が見つかりませんでした。" #, python-format msgid "Could not load line %(line)s, got error %(error)s" msgstr "" "行 %(line)s をロードできませんでした。エラー %(error)s を受け取りました" #, python-format msgid "Could not load paste app '%(name)s' from %(path)s" msgstr "" "paste アプリケーション '%(name)s' を %(path)s からロードできませんでした" #, python-format msgid "" "Could not mount vfat config drive. %(operation)s failed. Error: %(error)s" msgstr "" "vfat コンフィグドライブをマウントできません。%(operation)s が失敗しました。エ" "ラー: %(error)s" #, python-format msgid "Could not upload image %(image_id)s" msgstr "イメージ %(image_id)s をアップロードできませんでした" msgid "Creation of virtual interface with unique mac address failed" msgstr "一意な MAC アドレスを持つ仮想インターフェースを作成できませんでした" #, python-format msgid "Datastore regex %s did not match any datastores" msgstr "データストア regex %s がどのデータストアとも一致しませんでした" msgid "Datetime is in invalid format" msgstr "日時が無効な形式です" msgid "Default PBM policy is required if PBM is enabled." msgstr "PBM が有効になっている場合、デフォルト PBM ポリシーは必須です。" #, python-format msgid "Device '%(device)s' not found." msgstr "デバイス '%(device)s' が見つかりません。" msgid "Device name contains spaces." msgstr "デバイス名に空白が含まれています。" msgid "Device name empty or too long." msgstr "デバイス名が空か、長すぎます。" #, python-format msgid "Device type mismatch for alias '%s'" msgstr "別名 '%s' のデバイスタイプが一致しません" #, python-format msgid "Disk format %(disk_format)s is not acceptable" msgstr "ディスク形式 %(disk_format)s は受け付けられません" #, python-format msgid "Disk info file is invalid: %(reason)s" msgstr "ディスク情報ファイルが無効です: %(reason)s" #, python-format msgid "Driver Error: %s" msgstr "ドライバーエラー: %s" #, python-format msgid "Error attempting to run %(method)s" msgstr "%(method)s を実行しようとしてエラーが発生しました" #, python-format msgid "" "Error destroying the instance on node %(node)s. Provision state still " "'%(state)s'." msgstr "" "インスタンスをノード %(node)s で破棄しているときにエラーが発生しました。プロ" "ビジョニング状態は '%(state)s' です。" #, python-format msgid "Error during unshelve instance %(instance_id)s: %(reason)s" msgstr "インスタンス %(instance_id)s の復元中のエラー: %(reason)s" #, python-format msgid "" "Error from libvirt while getting domain info for %(instance_name)s: [Error " "Code %(error_code)s] %(ex)s" msgstr "" "%(instance_name)s のドメイン情報を取得している際に、libvirt でエラーが発生し" "ました: [エラーコード %(error_code)s] %(ex)s" #, python-format msgid "" "Error from libvirt while looking up %(instance_name)s: [Error Code " "%(error_code)s] %(ex)s" msgstr "" "%(instance_name)s の検索中に libvirt でエラーが発生しました: [エラーコード " "%(error_code)s] %(ex)s" #, python-format msgid "" "Error from libvirt while quiescing %(instance_name)s: [Error Code " "%(error_code)s] %(ex)s" msgstr "" "%(instance_name)s の正常終了中に libvirt でエラーが発生しました: [エラーコー" "ド %(error_code)s] %(ex)s" #, python-format msgid "" "Error from libvirt while set password for username \"%(user)s\": [Error Code " "%(error_code)s] %(ex)s" msgstr "" "ユーザー名 \"%(user)s\" のパスワードの設定中に libvirt でエラーが発生しまし" "た: [エラーコード %(error_code)s] %(ex)s" #, python-format msgid "" "Error mounting %(device)s to %(dir)s in image %(image)s with libguestfs " "(%(e)s)" msgstr "" "libguestfs (%(e)s) を使用してイメージ %(image)s の %(dir)s に %(device)s をマ" "ウントする際にエラーが発生しました" #, python-format msgid "Error mounting %(image)s with libguestfs (%(e)s)" msgstr "" "libguestfs (%(e)s) を使用してイメージ %(image)s をマウントする際にエラーが発" "生しました" #, python-format msgid "Error when creating resource monitor: %(monitor)s" msgstr "リソースモニター %(monitor)s を作成するときにエラーが発生しました" #, python-format msgid "Event %(event)s not found for action id %(action_id)s" msgstr "" "アクション ID %(action_id)s に対応するイベント %(event)s が見つかりません" msgid "Event must be an instance of nova.virt.event.Event" msgstr "イベントは nova.virt.event.Event のインスタンスでなければなりません" #, python-format msgid "" "Exceeded max scheduling attempts %(max_attempts)d for instance " "%(instance_uuid)s. Last exception: %(exc_reason)s" msgstr "" "インスタンス %(instance_uuid)s に関してスケジューリング可能な最大試行回数 " "%(max_attempts)d を超えました。直近の例外: %(exc_reason)s" #, python-format msgid "" "Exceeded max scheduling retries %(max_retries)d for instance " "%(instance_uuid)s during live migration" msgstr "" "ライブマイグレーション時にインスタンス %(instance_uuid)s の最大スケジューリン" "グ再試行回数 %(max_retries)d を超えました" #, python-format msgid "Exceeded maximum number of retries. %(reason)s" msgstr "再試行の最大回数を超えました。%(reason)s" #, python-format msgid "Expected a uuid but received %(uuid)s." msgstr "UUID が必要ですが、%(uuid)s を受け取りました。" msgid "Extracting vmdk from OVA failed." msgstr "OVA からの vmdk の取得に失敗しました。" #, python-format msgid "Failed to access port %(port_id)s: %(reason)s" msgstr "ポート %(port_id)s へのアクセスに失敗しました: %(reason)s" #, python-format msgid "Failed to allocate the network(s) with error %s, not rescheduling." msgstr "" "エラー %s が発生したため、ネットワークを割り当てることができませんでした。再" "スケジュールは行われません。" msgid "Failed to allocate the network(s), not rescheduling." msgstr "" "ネットワークを割り当てることができませんでした。再スケジュールは行われませ" "ん。" #, python-format msgid "Failed to attach network adapter device to %(instance_uuid)s" msgstr "" "ネットワークアダプターデバイスを %(instance_uuid)s に接続できませんでした" #, python-format msgid "Failed to deploy instance: %(reason)s" msgstr "インスタンスをデプロイできませんでした: %(reason)s" #, python-format msgid "Failed to detach PCI device %(dev)s: %(reason)s" msgstr "PCI デバイス %(dev)s を切り離すことができませんでした: %(reason)s" #, python-format msgid "Failed to detach network adapter device from %(instance_uuid)s" msgstr "" "ネットワークアダプターデバイスを %(instance_uuid)s から切り離すことができませ" "んでした" #, python-format msgid "Failed to encrypt text: %(reason)s" msgstr "テキストの暗号化に失敗しました: %(reason)s" #, python-format msgid "Failed to launch instances: %(reason)s" msgstr "インスタンスを起動できませんでした: %(reason)s" #, python-format msgid "Failed to map partitions: %s" msgstr "パーティションのマッピングに失敗しました: %s" #, python-format msgid "Failed to mount filesystem: %s" msgstr "ファイルシステム %s のマウントに失敗しました。" #, python-format msgid "Failed to power off instance: %(reason)s" msgstr "インスタンスの電源オフに失敗しました: %(reason)s" #, python-format msgid "Failed to power on instance: %(reason)s" msgstr "インスタンスの電源オンに失敗しました: %(reason)s" #, python-format msgid "Failed to provision instance %(inst)s: %(reason)s" msgstr "インスタンス %(inst)s をプロビジョニングできませんでした: %(reason)s" #, python-format msgid "Failed to read or write disk info file: %(reason)s" msgstr "ディスク情報ファイルの読み取りまたは書き込みに失敗しました: %(reason)s" #, python-format msgid "Failed to reboot instance: %(reason)s" msgstr "インスタンスをリブートできませんでした: %(reason)s" #, python-format msgid "Failed to remove volume(s): (%(reason)s)" msgstr "ボリュームの削除に失敗しました: (%(reason)s)" #, python-format msgid "Failed to request Ironic to rebuild instance %(inst)s: %(reason)s" msgstr "" "インスタンス %(inst)s の再構築を Ironic に要求できませんでした: %(reason)s" #, python-format msgid "Failed to resume instance: %(reason)s" msgstr "インスタンスの再開に失敗しました: %(reason)s" #, python-format msgid "Failed to run qemu-img info on %(path)s : %(error)s" msgstr "qemu-img info を %(path)s に対して実行できませんでした: %(error)s" #, python-format msgid "Failed to set admin password on %(instance)s because %(reason)s" msgstr "%(instance)s で管理者パスワードの設定に失敗しました。理由: %(reason)s" #, python-format msgid "Failed to suspend instance: %(reason)s" msgstr "インスタンスを休止できませんでした: %(reason)s" #, python-format msgid "Failed to terminate instance: %(reason)s" msgstr "インスタンスを削除できませんでした: %(reason)s" msgid "Failure prepping block device." msgstr "ブロックデバイスを準備できませんでした" #, python-format msgid "File %(file_path)s could not be found." msgstr "ファイル %(file_path)s が見つかりませんでした。" #, python-format msgid "Fixed IP %(ip)s is not a valid ip address for network %(network_id)s." msgstr "" "Fixed IP %(ip)s はネットワーク %(network_id)s の有効な IP アドレスではありま" "せん。" #, python-format msgid "Fixed IP %s is already in use." msgstr "Fixed IP %s は既に使用中です。" #, python-format msgid "" "Fixed IP address %(address)s is already in use on instance %(instance_uuid)s." msgstr "" "Fixed IP アドレス %(address)s はインスタンス %(instance_uuid)s で既に使用され" "ています。" #, python-format msgid "Fixed IP not found for address %(address)s." msgstr " アドレス %(address)s に対応する Fixed IP が見つかりません。" #, python-format msgid "Flavor %(flavor_id)s could not be found." msgstr "フレーバー %(flavor_id)s が見つかりませんでした。" #, python-format msgid "Flavor %(flavor_id)s has no extra specs with key %(extra_specs_key)s." msgstr "" "フレーバー %(flavor_id)s にはキー %(extra_specs_key)s を持つ追加スペックはあ" "りません。" #, python-format msgid "Flavor %(flavor_id)s has no extra specs with key %(key)s." msgstr "" "フレーバー %(flavor_id)s にはキー %(key)s を持つ追加スペックはありません。" #, python-format msgid "" "Flavor %(id)s extra spec cannot be updated or created after %(retries)d " "retries." msgstr "" "%(retries)d 回の再試行の後では、フレーバー %(id)s の追加仕様の更新と作成を行" "うことはできません。" #, python-format msgid "" "Flavor access already exists for flavor %(flavor_id)s and project " "%(project_id)s combination." msgstr "" "フレーバー %(flavor_id)s とプロジェクト %(project_id)s の組み合わせに対応する" "フレーバーアクセスは既に存在します。" #, python-format msgid "Flavor access not found for %(flavor_id)s / %(project_id)s combination." msgstr "" "%(flavor_id)s / %(project_id)s の組み合わせに対応するフレーバーアクセスが見つ" "かりません。" msgid "Flavor used by the instance could not be found." msgstr "インスタンスで使用されたフレーバーが見つかりませんでした" #, python-format msgid "Flavor with ID %(flavor_id)s already exists." msgstr "ID %(flavor_id)s を持つフレーバーは既に存在します。" #, python-format msgid "Flavor with name %(flavor_name)s could not be found." msgstr "名前が %(flavor_name)s のフレーバーが見つかりませんでした。" #, python-format msgid "Flavor with name %(name)s already exists." msgstr "名前が %(name)s のフレーバーは既に存在します。" #, python-format msgid "" "Flavor's disk is smaller than the minimum size specified in image metadata. " "Flavor disk is %(flavor_size)i bytes, minimum size is %(image_min_disk)i " "bytes." msgstr "" "フレーバーのディスクサイズがイメージのメタデータで指定された最小サイズよりも" "小さくなっています。フレーバーディスクは %(flavor_size)i バイト、最小サイズ" "は %(image_min_disk)i バイトです。" #, python-format msgid "" "Flavor's disk is too small for requested image. Flavor disk is " "%(flavor_size)i bytes, image is %(image_size)i bytes." msgstr "" "フレーバーのディスクサイズがリクエストされたイメージに対し小さすぎます。フ" "レーバーディスクは %(flavor_size)i バイト、イメージは %(image_size)i バイトで" "す。" msgid "Flavor's memory is too small for requested image." msgstr "フレーバーのメモリーは要求されたイメージに対して小さすぎます。" #, python-format msgid "Floating IP %(address)s association has failed." msgstr "Floating IP %(address)s の割り当てに失敗しました。" #, python-format msgid "Floating IP %(address)s is associated." msgstr "Floating IP %(address)s が割り当てられています。" #, python-format msgid "Floating IP %(address)s is not associated with instance %(id)s." msgstr "" "Floating IP %(address)s はインスタンス %(id)s に割り当てられていません。" #, python-format msgid "Floating IP not found for ID %(id)s." msgstr "ID %(id)s の Floating IP が見つかりません。" #, python-format msgid "Floating IP not found for ID %s" msgstr "ID %s の Floating IP が見つかりません" #, python-format msgid "Floating IP not found for address %(address)s." msgstr "アドレス %(address)s に対応する Floating IP が見つかりません。" msgid "Floating IP pool not found." msgstr "Floating IP プールが見つかりません。" msgid "" "Forbidden to exceed flavor value of number of serial ports passed in image " "meta." msgstr "" "イメージメタデータで渡されるシリアルポート数のフレーバー値を超えないようにし" "てください。" msgid "Found no disk to snapshot." msgstr "スナップショットの作成対象のディスクが見つかりません" msgid "Guest does not have a console available." msgstr "ゲストはコンソールを使用することはできません。" #, python-format msgid "Host %(host)s could not be found." msgstr "ホスト %(host)s が見つかりませんでした。" #, python-format msgid "Host %(host)s is already mapped to cell %(uuid)s" msgstr "ホスト %(host)s が既にセル %(uuid)s にマッピングされています" #, python-format msgid "Host '%(name)s' is not mapped to any cell" msgstr "ホスト '%(name)s' がどのセルにもマッピングされません" msgid "Host aggregate is not empty" msgstr "ホストアグリゲートが空ではありません" msgid "Host does not support guests with NUMA topology set" msgstr "ホストが NUMA トポロジーが設定されたゲストをサポートしていません" msgid "Host does not support guests with custom memory page sizes" msgstr "" "ホストがカスタムのメモリーページサイズが指定されたゲストをサポートしていませ" "ん" msgid "Hypervisor driver does not support post_live_migration_at_source method" msgstr "" "ハイパーバイザードライバーが post_live_migration_at_source メソッドをサポート" "していません" #, python-format msgid "Hypervisor virt type '%s' is not valid" msgstr "ハイバーバイザーの仮想化タイプ '%s' が有効ではありません" #, python-format msgid "Hypervisor virtualization type '%(hv_type)s' is not recognised" msgstr "ハイパーバイザー仮想化タイプ '%(hv_type)s' は認識されていません" #, python-format msgid "Hypervisor with ID '%s' could not be found." msgstr "ID '%s' のハイパーバイザーが見つかりませんでした。" #, python-format msgid "IP allocation over quota in pool %s." msgstr "IP の割り当て量がプール %s 内のクォータを超えています。" msgid "IP allocation over quota." msgstr "IP の割り当て量がクォータを超えています。" #, python-format msgid "Image %(image_id)s could not be found." msgstr "イメージ %(image_id)s が見つかりませんでした。" #, python-format msgid "Image %(image_id)s is not active." msgstr "イメージ %(image_id)s はアクティブではありません。" #, python-format msgid "Image %(image_id)s is unacceptable: %(reason)s" msgstr "イメージ %(image_id)s は受け付けられません: %(reason)s" msgid "Image disk size greater than requested disk size" msgstr "イメージディスクが、要求されたディスクサイズよりも大きなサイズです" msgid "Image is not raw format" msgstr "イメージは raw 形式ではありません" msgid "Image metadata limit exceeded" msgstr "イメージメタデータ数の上限を超えました" #, python-format msgid "Image model '%(image)s' is not supported" msgstr "イメージモデル '%(image)s' はサポートされません" msgid "Image not found." msgstr "イメージが見つかりません。" #, python-format msgid "" "Image property '%(name)s' is not permitted to override NUMA configuration " "set against the flavor" msgstr "" "イメージプロパティー '%(name)s' で、フレーバーに対して設定された NUMA 構成を" "オーバーライドすることは許可されません" msgid "" "Image property 'hw_cpu_policy' is not permitted to override CPU pinning " "policy set against the flavor" msgstr "" "イメージプロパティー 'hw_cpu_policy' はこのフレーバーでは設定されたCPU コア固" "定ポリシーのオーバーライドを許可されていません" msgid "" "Image property 'hw_cpu_thread_policy' is not permitted to override CPU " "thread pinning policy set against the flavor" msgstr "" "イメージプロパティー 'hw_cpu_thread_policy' は、フレーバーに設定された CPU ス" "レッドの固定ポリシーを上書きすることはできません。" msgid "Image that the instance was started with could not be found." msgstr "インスタンスの起動時に使用されたイメージが見つかりませんでした。" #, python-format msgid "Image's config drive option '%(config_drive)s' is invalid" msgstr "イメージのコンフィグドライブのオプション '%(config_drive)s' は無効です" msgid "" "Images with destination_type 'volume' need to have a non-zero size specified" msgstr "" "destination_type 'volume' を含むイメージには、ゼロ以外のサイズが指定されてい" "る必要があります" msgid "In ERROR state" msgstr "エラー状態です" #, python-format msgid "In states %(vm_state)s/%(task_state)s, not RESIZED/None" msgstr "状態は %(vm_state)s/%(task_state)s です。RESIZED/None ではありません" #, python-format msgid "In-progress live migration %(id)s is not found for server %(uuid)s." msgstr "" "サーバー %(uuid)s で進行中のライブマイグレーション %(id)s が見つかりません。" msgid "" "Incompatible settings: ephemeral storage encryption is supported only for " "LVM images." msgstr "" "設定に互換性がありません: 一時ストレージ暗号化は LVM イメージでのみサポート" "されています。" #, python-format msgid "Info cache for instance %(instance_uuid)s could not be found." msgstr "" "インスタンス %(instance_uuid)s 用の情報キャッシュが見つかりませんでした。" #, python-format msgid "" "Instance %(instance)s and volume %(vol)s are not in the same " "availability_zone. Instance is in %(ins_zone)s. Volume is in %(vol_zone)s" msgstr "" "インスタンス %(instance)s とボリューム %(vol)s は同じアベイラビリティーゾーン" "にありません。インスタンスは %(ins_zone)s に、ボリュームは %(vol_zone)s にあ" "ります" #, python-format msgid "Instance %(instance)s does not have a port with id %(port)s" msgstr "インスタンス %(instance)s に ID %(port)s のポートがありません" #, python-format msgid "Instance %(instance_id)s cannot be rescued: %(reason)s" msgstr "インスタンス %(instance_id)s をレスキューできません: %(reason)s" #, python-format msgid "Instance %(instance_id)s could not be found." msgstr "インスタンス %(instance_id)s が見つかりませんでした。" #, python-format msgid "Instance %(instance_id)s has no tag '%(tag)s'" msgstr "インスタンス %(instance_id)s にタグ \"%(tag)s\" がありません" #, python-format msgid "Instance %(instance_id)s is not in rescue mode" msgstr "インスタンス %(instance_id)s はレスキューモードではありません。" #, python-format msgid "Instance %(instance_id)s is not ready" msgstr "インスタンス %(instance_id)s の準備ができていません" #, python-format msgid "Instance %(instance_id)s is not running." msgstr "インスタンス %(instance_id)s は実行されていません。" #, python-format msgid "Instance %(instance_id)s is unacceptable: %(reason)s" msgstr "インスタンス %(instance_id)s は受け付けられません: %(reason)s" #, python-format msgid "Instance %(instance_uuid)s does not specify a NUMA topology" msgstr "インスタンス %(instance_uuid)s で NUMA トポロジーが指定されていません" #, python-format msgid "Instance %(instance_uuid)s does not specify a migration context." msgstr "" "インスタンス %(instance_uuid)s がマイグレーションのコンテキストを設定していま" "せん。" #, python-format msgid "" "Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while " "the instance is in this state." msgstr "" "インスタンス %(instance_uuid)s は %(attr)s %(state)s 状態です。インスタンスが" "この状態にある間は %(method)s を行えません。" #, python-format msgid "Instance %(instance_uuid)s is locked" msgstr "インスタンス %(instance_uuid)s はロックされています" #, python-format msgid "" "Instance %(instance_uuid)s requires config drive, but it does not exist." msgstr "" "インスタンス %(instance_uuid)s にはコンフィグドライブが必要ですが、存在しませ" "ん。" #, python-format msgid "Instance %(name)s already exists." msgstr "インスタンス %(name)s は既に存在します。" #, python-format msgid "Instance %(server_id)s is in an invalid state for '%(action)s'" msgstr "" "インスタンス %(server_id)s は '%(action)s' が実行できない状態にあります" #, python-format msgid "Instance %(uuid)s has no mapping to a cell." msgstr "インスタンス %(uuid)s にはセルに対するマッピングがありません。" #, python-format msgid "Instance %s not found" msgstr "インスタンス %s が見つかりません" #, python-format msgid "Instance %s provisioning was aborted" msgstr "インスタンス %s のプロビジョニングが中止しました。" msgid "Instance could not be found" msgstr "インスタンスが見つかりませんでした" msgid "Instance disk to be encrypted but no context provided" msgstr "" "インスタンスディスクの暗号化が必要ですが、コンテキストが指定されていません。" msgid "Instance event failed" msgstr "インスタンスイベントが失敗しました" #, python-format msgid "Instance group %(group_uuid)s already exists." msgstr "インスタンスグループ %(group_uuid)s は既に存在します。" #, python-format msgid "Instance group %(group_uuid)s could not be found." msgstr "インスタンスグループ %(group_uuid)s が見つかりませんでした。" #, fuzzy msgid "Instance has no source host" msgstr "インスタンスにソースホストがありません" msgid "Instance has not been resized." msgstr "インスタンスのサイズ変更が行われていません" #, python-format msgid "Instance hostname %(hostname)s is not a valid DNS name" msgstr "インスタンスのホスト名 %(hostname)s は有効な DNS 名ではありません。" msgid "Instance is not a member of specified network" msgstr "インスタンスは指定されたネットワークのメンバーではありません" #, python-format msgid "Instance rollback performed due to: %s" msgstr "インスタンスのロールバックが実行されました。原因: %s" #, python-format msgid "" "Insufficient Space on Volume Group %(vg)s. Only %(free_space)db available, " "but %(size)d bytes required by volume %(lv)s." msgstr "" "ボリュームグループ %(vg)s に十分なスペースがありません。使用可能なのは " "%(free_space)db のみですが、ボリューム %(lv)s には %(size)d バイト必要です。" #, python-format msgid "Insufficient compute resources: %(reason)s." msgstr "コンピュートリソースが不十分です: %(reason)s。" #, python-format msgid "Interface %(interface)s not found." msgstr "インターフェース %(interface)s が見つかりません。" #, python-format msgid "Invalid Base 64 data for file %(path)s" msgstr "ファイル %(path)s の Base64 データが無効です" msgid "Invalid Connection Info" msgstr "無効な接続情報" #, python-format msgid "Invalid ID received %(id)s." msgstr "無効な ID %(id)s を受信しました。" #, python-format msgid "Invalid IP format %s" msgstr "%s は無効な IP 形式です" #, python-format msgid "Invalid IP protocol %(protocol)s." msgstr "無効な IP プロトコル %(protocol)s。" msgid "" "Invalid PCI Whitelist: The PCI whitelist can specify devname or address, but " "not both" msgstr "" "無効な PCI ホワイトリスト: PCI ホワイトリストでは devname またはアドレスを指" "定できますが、両方を指定することはできません" #, python-format msgid "Invalid PCI alias definition: %(reason)s" msgstr "PCI エイリアス定義が無効です: %(reason)s" #, python-format msgid "Invalid Regular Expression %s" msgstr "正規表現 %s は無効です" #, python-format msgid "Invalid characters in hostname '%(hostname)s'" msgstr "ホスト名 '%(hostname)s' に無効な文字があります" msgid "Invalid config_drive provided." msgstr "無効な config_drive が指定されました。" #, python-format msgid "Invalid config_drive_format \"%s\"" msgstr "無効な config_drive_format \"%s\"" #, python-format msgid "Invalid console type %(console_type)s" msgstr "無効なコンソールタイプ %(console_type)s" #, python-format msgid "Invalid content type %(content_type)s." msgstr "無効なコンテンツ形式 %(content_type)s。" #, python-format msgid "Invalid datetime string: %(reason)s" msgstr "日時の文字列が無効です: %(reason)s" msgid "Invalid device UUID." msgstr "デバイス UUID が無効です。" #, python-format msgid "Invalid entry: '%s'" msgstr "項目 '%s' は無効です" #, python-format msgid "Invalid entry: '%s'; Expecting dict" msgstr "項目 '%s' は無効です。辞書型が期待されています" #, python-format msgid "Invalid entry: '%s'; Expecting list or dict" msgstr "項目 '%s' は無効です。リストまたは辞書型が期待されています" #, python-format msgid "Invalid exclusion expression %r" msgstr "排他式 %r は無効です" #, python-format msgid "Invalid image format '%(format)s'" msgstr "イメージ形式 '%(format)s' は無効です" #, python-format msgid "Invalid image href %(image_href)s." msgstr "無効なイメージ href %(image_href)s。" #, python-format msgid "Invalid inclusion expression %r" msgstr "包含式 %r は無効です" #, python-format msgid "" "Invalid input for field/attribute %(path)s. Value: %(value)s. %(message)s" msgstr "フィールド/属性 %(path)s の入力が無効です。値: %(value)s。%(message)s" #, python-format msgid "Invalid input received: %(reason)s" msgstr "無効な入力を受信しました: %(reason)s" msgid "Invalid instance image." msgstr "インスタンスイメージが無効です。" #, python-format msgid "Invalid is_public filter [%s]" msgstr "無効な is_public フィルター [%s]" msgid "Invalid key_name provided." msgstr "無効な key_name が指定されました。" #, python-format msgid "Invalid memory page size '%(pagesize)s'" msgstr "メモリーページサイズ \"%(pagesize)s\" が無効です" msgid "Invalid metadata key" msgstr "無効なメタデータキーです" #, python-format msgid "Invalid metadata size: %(reason)s" msgstr "無効なメタデータサイズ: %(reason)s" #, python-format msgid "Invalid metadata: %(reason)s" msgstr "メタデータが無効です: %(reason)s" #, python-format msgid "Invalid minDisk filter [%s]" msgstr "無効な minDisk フィルター [%s]" #, python-format msgid "Invalid minRam filter [%s]" msgstr "無効な minRam フィルター [%s]" #, python-format msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s" msgstr "無効なポート範囲 %(from_port)s:%(to_port)s。 %(msg)s" msgid "Invalid proxy request signature." msgstr "無効なプロキシー要求シグニチャー" #, python-format msgid "Invalid range expression %r" msgstr "範囲式 %r は無効です" msgid "Invalid service catalog json." msgstr "無効なサービスカタログ JSON。" msgid "Invalid start time. The start time cannot occur after the end time." msgstr "無効な開始時刻。開始時刻を終了時刻より後にすることはできません。" msgid "Invalid state of instance files on shared storage" msgstr "共有ストレージ上のインスタンスファイルの無効な状態" #, python-format msgid "Invalid timestamp for date %s" msgstr "日付 %s のタイムスタンプが無効です" #, python-format msgid "Invalid usage_type: %s" msgstr "usage_type %s は無効です" #, python-format msgid "Invalid value for Config Drive option: %(option)s" msgstr "コンフィグドライブのオプション %(option)s の値が無効です" #, python-format msgid "Invalid virtual interface address %s in request" msgstr "リクエストに無効な仮想インターフェースアドレス %s があります" #, python-format msgid "Invalid volume access mode: %(access_mode)s" msgstr "ボリュームアクセスモードが無効です: %(access_mode)s" #, python-format msgid "Invalid volume: %(reason)s" msgstr "無効なボリューム: %(reason)s" msgid "Invalid volume_size." msgstr "volume_size が無効です。" #, python-format msgid "Ironic node uuid not supplied to driver for instance %s." msgstr "" "Ironic ノード uuid が、インスタンス %s のドライバーに提供されていません。" #, python-format msgid "" "It is not allowed to create an interface on external network %(network_uuid)s" msgstr "" "外部ネットワーク %(network_uuid)s でインターフェースを作成することは許可され" "ていません" msgid "" "Key Names can only contain alphanumeric characters, periods, dashes, " "underscores, colons and spaces." msgstr "" "キー名に使用できるのは、英数字、ピリオド、ダッシュ、アンダースコアー、コロ" "ン、および空白のみです。" #, python-format msgid "Key manager error: %(reason)s" msgstr "鍵マネージャーエラー: %(reason)s" #, python-format msgid "Key pair '%(key_name)s' already exists." msgstr "キーペア '%(key_name)s' は既に存在します。" #, python-format msgid "Keypair %(name)s not found for user %(user_id)s" msgstr "ユーザー %(user_id)s のキーペア %(name)s が見つかりません" #, python-format msgid "Keypair data is invalid: %(reason)s" msgstr "キーペアデータが無効です: %(reason)s" msgid "Limits only supported from vCenter 6.0 and above" msgstr "上限が適用されるのは、vCenter 6.0 以降の場合のみです。" #, python-format msgid "Live migration %(id)s for server %(uuid)s is not in progress." msgstr "サーバー %(uuid)s のマイグレーション %(id)s は進行中ではありません。" #, python-format msgid "Malformed message body: %(reason)s" msgstr "メッセージ本文の形式に誤りがあります: %(reason)s" #, python-format msgid "" "Malformed request URL: URL's project_id '%(project_id)s' doesn't match " "Context's project_id '%(context_project_id)s'" msgstr "" "誤った形式のリクエスト URL です。URL の project_id '%(project_id)s' がコンテ" "キストの project_id '%(context_project_id)s' と一致しません" msgid "Malformed request body" msgstr "誤った形式のリクエスト本文" msgid "Mapping image to local is not supported." msgstr "ローカルへのイメージマッピングはサポートしていません。" #, python-format msgid "Marker %(marker)s could not be found." msgstr "マーカー %(marker)s が見つかりませんでした。" msgid "Maximum number of floating IPs exceeded" msgstr "Floating IP の最大数を超えました" #, python-format msgid "Maximum number of metadata items exceeds %(allowed)d" msgstr "メタデータ項目の最大数が %(allowed)d を超えています" msgid "Maximum number of ports exceeded" msgstr "最大ポート数を超えました" msgid "Maximum number of security groups or rules exceeded" msgstr "セキュリティーグループまたはルールの最大数を超えました" msgid "Metadata item was not found" msgstr "メタデータ項目が見つかりませんでした" msgid "Metadata property key greater than 255 characters" msgstr "メタデータプロパティーのキーが 255 文字を超えています" msgid "Metadata property value greater than 255 characters" msgstr "メタデータプロパティーの値が 255 文字を超えています" msgid "Metadata type should be dict." msgstr "メタデータタイプは dict でなければなりません。" #, python-format msgid "" "Metric %(name)s could not be found on the compute host node %(host)s." "%(node)s." msgstr "" "コンピュートホストノード %(host)s.%(node)s では、メトリック %(name)s は見つか" "りませんでした。" #, python-format msgid "Migration %(id)s for server %(uuid)s is not live-migration." msgstr "" "サーバー %(uuid)s のマイグレーション %(id)s はライブマイグレーションではあり" "ません。" #, python-format msgid "Migration %(migration_id)s could not be found." msgstr "マイグレーション %(migration_id)s が見つかりませんでした。" #, python-format msgid "Migration %(migration_id)s not found for instance %(instance_id)s" msgstr "" "インスタンス %(instance_id)s でマイグレーション %(migration_id)s が見つかりま" "せん" #, python-format msgid "" "Migration %(migration_id)s state of instance %(instance_uuid)s is %(state)s. " "Cannot %(method)s while the migration is in this state." msgstr "" "インスタンス %(instance_uuid)s のマイグレーション %(migration_id)s の状態が " "%(state)s です。マイグレーションがこの状態にある場合、%(method)s を実行できま" "せん。" #, python-format msgid "Migration error: %(reason)s" msgstr "マイグレーションエラー: %(reason)s" msgid "Migration is not supported for LVM backed instances" msgstr "" "マイグレーションは LVM 形式のイメージを使用するインスタンスではサポートされて" "いません" #, python-format msgid "" "Migration not found for instance %(instance_id)s with status %(status)s." msgstr "" "状態が %(status)s のインスタンス %(instance_id)s のマイグレーションが見つかり" "ません。" #, python-format msgid "Migration pre-check error: %(reason)s" msgstr "マイグレーション事前検査エラー: %(reason)s" #, python-format msgid "Migration select destinations error: %(reason)s" msgstr "マイグレーション先の選択エラー: %(reason)s" #, python-format msgid "Missing arguments: %s" msgstr "引数 %s がありません" msgid "Missing device UUID." msgstr "デバイス UUID がありません。" msgid "Missing disabled reason field" msgstr "「無効化の理由」フィールドがありません" msgid "Missing forced_down field" msgstr "forced_down フィールドがありません" msgid "Missing imageRef attribute" msgstr "imageRef 属性が指定されていません" #, python-format msgid "Missing keys: %s" msgstr "キーがありません: %s" msgid "Missing parameter dict" msgstr "パラメーター dict が指定されていません" #, python-format msgid "" "More than one instance is associated with fixed IP address '%(address)s'." msgstr "" "複数のインスタンスが Fixed IP アドレス '%(address)s' に割り当てられています。" msgid "" "More than one possible network found. Specify network ID(s) to select which " "one(s) to connect to." msgstr "" "複数の使用可能なネットワークが見つかりました。接続先のネットワークを選択する" "には、ネットワーク ID を指定してください。" msgid "More than one swap drive requested." msgstr "複数のスワップドライブが要求されました。" #, python-format msgid "Multi-boot operating system found in %s" msgstr "%s 内にブート可能なオペレーティングシステムが複数見つかりました" msgid "Multiple X-Instance-ID headers found within request." msgstr "リクエストに複数の X-Instance-ID ヘッダーが検出されました。" msgid "Multiple X-Tenant-ID headers found within request." msgstr "リクエストに複数の X-Tenant-ID ヘッダーが検出されました。" #, python-format msgid "Multiple floating IP pools matches found for name '%s'" msgstr "名前が '%s' の Floating IP プールが複数見つかりました" #, python-format msgid "Multiple floating IPs are found for address %(address)s." msgstr "アドレス %(address)s に対して複数の Floating IP が見つかりました。" msgid "" "Multiple hosts may be managed by the VMWare vCenter driver; therefore we do " "not return uptime for just one host." msgstr "" "複数のホストが VMWare vCenter ドライバーによって管理されている可能性がありま" "す。このため ホスト単位の稼働時間は返しません。" msgid "Multiple possible networks found, use a Network ID to be more specific." msgstr "" "使用可能なネットワークが複数見つかりました。ネットワーク ID を具体的に指定し" "てください。" #, python-format msgid "" "Multiple security groups found matching '%s'. Use an ID to be more specific." msgstr "" "'%s' に一致するセキュリティーグループが複数見つかりました。より具体的な ID を" "使用してください。" msgid "Must input network_id when request IP address" msgstr "IP アドレスを要求するときは、network_id を入力する必要があります" msgid "Must not input both network_id and port_id" msgstr "network_id と port_id の両方を入力しないでください" msgid "" "Must specify host_ip, host_username and host_password to use vmwareapi." "VMwareVCDriver" msgstr "" "vmwareapi.VMwareVCDriver を使用するためのホスト IP、ユーザー名、ホストパス" "ワードを指定する必要があります" msgid "Must supply a positive value for max_number" msgstr "max_number には正の値を指定する必要があります" msgid "Must supply a positive value for max_rows" msgstr "max_rows には正の値を指定する必要があります" #, python-format msgid "Network %(network_id)s could not be found." msgstr "ネットワーク %(network_id)s が見つかりませんでした。" #, python-format msgid "" "Network %(network_uuid)s requires a subnet in order to boot instances on." msgstr "" "ネットワーク %(network_uuid)s でインスタンスをブートするには、サブネットが必" "要です。" #, python-format msgid "Network could not be found for bridge %(bridge)s" msgstr "ブリッジ %(bridge)s のネットワークが見つかりませんでした" #, python-format msgid "Network could not be found for instance %(instance_id)s." msgstr "インスタンス %(instance_id)s のネットワークが見つかりませんでした。" msgid "Network not found" msgstr "ネットワークが見つかりません" msgid "" "Network requires port_security_enabled and subnet associated in order to " "apply security groups." msgstr "" "セキュリティーグループを適用するには、ネットワークが port_security_enabled に" "なっていて、サブネットが関連付けられている必要があります。" msgid "New volume must be detached in order to swap." msgstr "スワップを行うには、新規ボリュームを切断する必要があります。" msgid "New volume must be the same size or larger." msgstr "新規ボリュームは同じサイズか、それ以上でなければなりません。" #, python-format msgid "No Block Device Mapping with id %(id)s." msgstr "ID %(id)s を持つブロックデバイスマッピングがありません。" msgid "No Unique Match Found." msgstr "1 つだけ一致するデータが見つかりません。" msgid "No compute host specified" msgstr "コンピュートホストが指定されていません" #, python-format msgid "No configuration information found for operating system %(os_name)s" msgstr "オペレーティングシステム %(os_name)s に関する設定情報が見つかりません" #, python-format msgid "No device with MAC address %s exists on the VM" msgstr "MAC アドレス %s を持つデバイスが VM にありません" #, python-format msgid "No device with interface-id %s exists on VM" msgstr "interface-id %s を持つデバイスが VM にありません" #, python-format msgid "No disk at %(location)s" msgstr "%(location)s にディスクがありません" #, python-format msgid "No fixed IP addresses available for network: %(net)s" msgstr "ネットワーク %(net)s で使用可能な Fixed IP アドレスがありません。" msgid "No fixed IPs associated to instance" msgstr "Fixed IP がインスタンスに割り当てられていません" msgid "No free nbd devices" msgstr "空きの nbd デバイスがありません" msgid "No host available on cluster" msgstr "クラスター上に使用可能なホストがありません" msgid "No hosts found to map to cell, exiting." msgstr "セルにマッピングするホストが見つかりません。処理を終了します。" #, python-format msgid "No hypervisor matching '%s' could be found." msgstr "'%s' と合致するハイパーバイザーが見つかりませんでした。" msgid "No image locations are accessible" msgstr "イメージの場所にアクセスできません" #, python-format msgid "" "No live migration URI configured and no default available for " "\"%(virt_type)s\" hypervisor virtualization type." msgstr "" "ライブマイグレーションの URI が設定されておらず、ハイパーバイザーの仮想化タイ" "プの \"%(virt_type)s\" で使用可能なデフォルトが存在しません。" msgid "No more floating IPs available." msgstr "使用可能な Floating IP はこれ以上ありません。" #, python-format msgid "No more floating IPs in pool %s." msgstr "プール %s 内に Floating IP はこれ以上ありません。" #, python-format msgid "No mount points found in %(root)s of %(image)s" msgstr "%(image)s の %(root)s にマウントポイントが見つかりません" #, python-format msgid "No operating system found in %s" msgstr "%s 内にオペレーティングシステムが見つかりません" msgid "No root disk defined." msgstr "ルートディスクが定義されていません。" #, python-format msgid "" "No specific network was requested and none are available for project " "'%(project_id)s'." msgstr "" "特定のネットワークが要求されず、プロジェクト '%(project_id)s' で利用可能な" "ネットワークがありません。" msgid "No valid host found for cold migrate" msgstr "コールドマイグレーションに有効なホストが見つかりません" msgid "No valid host found for resize" msgstr "サイズ変更の対象として有効なホストが見つかりません" #, python-format msgid "No valid host was found. %(reason)s" msgstr "有効なホストが見つかりませんでした。%(reason)s" #, python-format msgid "No volume Block Device Mapping at path: %(path)s" msgstr "ボリュームのブロックデバイスマッピングがパス %(path)s にありません" #, python-format msgid "No volume Block Device Mapping with id %(volume_id)s." msgstr "" "ID %(volume_id)s を持つボリュームのブロックデバイスマッピングがありません。" #, python-format msgid "Node %s could not be found." msgstr "ノード %s が見つかりませんでした。" #, python-format msgid "Not able to acquire a free port for %(host)s" msgstr "%(host)s 用の未使用ポートを取得できません" #, python-format msgid "Not able to bind %(host)s:%(port)d, %(error)s" msgstr "%(host)s:%(port)d をバインドできません。%(error)s" #, python-format msgid "" "Not all Virtual Functions of PF %(compute_node_id)s:%(address)s are free." msgstr "" "PF %(compute_node_id)s の %(address)s のすべての Virtual Function に空きがあ" "るとは限りません。" msgid "Not an rbd snapshot" msgstr "rbd スナップショットではありません" #, python-format msgid "Not authorized for image %(image_id)s." msgstr "イメージ %(image_id)s への権限がありません。" msgid "Not authorized." msgstr "権限がありません。" msgid "Not enough parameters to build a valid rule." msgstr "有効なルールを作成するだけの十分なパラメータがありません" msgid "Not stored in rbd" msgstr "rbd 内に保管されていません" msgid "Nothing was archived." msgstr "アーカイブは行われませんでした" #, python-format msgid "Nova requires libvirt version %s or greater." msgstr "Nova には libvirt バージョン %s 以降が必要です。" msgid "Number of Rows Archived" msgstr "アーカイブ済みの行数" #, python-format msgid "Object action %(action)s failed because: %(reason)s" msgstr "オブジェクトのアクション %(action)s が失敗しました。原因: %(reason)s" msgid "Old volume is attached to a different instance." msgstr "旧ボリュームは別のインスタンスに接続されています。" #, python-format msgid "One or more hosts already in availability zone(s) %s" msgstr "アベイラビリティーゾーン %s に 1 つ以上のホストが既にあります" msgid "Only administrators may list deleted instances" msgstr "削除済みインスタンスの一覧を取得できるのは管理者のみです。" msgid "Origin header does not match this host." msgstr "オリジンヘッダーがこのホストに一致しません。" msgid "Origin header not valid." msgstr "オリジンヘッダーが無効です。" msgid "Origin header protocol does not match this host." msgstr "オリジンヘッダープロトコルがこのホストに一致しません。" #, python-format msgid "PCI Device %(node_id)s:%(address)s not found." msgstr "PCI デバイス %(node_id)s:%(address)s が見つかりません。" #, python-format msgid "PCI alias %(alias)s is not defined" msgstr "PCI エイリアス %(alias)s が定義されていません" #, python-format msgid "" "PCI device %(compute_node_id)s:%(address)s is %(status)s instead of " "%(hopestatus)s" msgstr "" "PCI デバイス %(compute_node_id)s:%(address)s は %(hopestatus)s ではなく " "%(status)s です" #, python-format msgid "" "PCI device %(compute_node_id)s:%(address)s is owned by %(owner)s instead of " "%(hopeowner)s" msgstr "" "PCI デバイス %(compute_node_id)s:%(address)s の所有者は、%(hopeowner)s ではな" "く %(owner)s です" #, python-format msgid "PCI device %(id)s not found" msgstr "PCI デバイス %(id)s が見つかりません" #, python-format msgid "PCI device request %(requests)s failed" msgstr "PCI デバイス要求 %(requests)s が失敗しました" #, python-format msgid "Page size %(pagesize)s forbidden against '%(against)s'" msgstr "ページサイズ %(pagesize)s は \"%(against)s\" に対して禁止されています" #, python-format msgid "Page size %(pagesize)s is not supported by the host." msgstr "ページサイズ %(pagesize)s はこのホストではサポートされていません。" #, python-format msgid "" "Parameters %(missing_params)s not present in vif_details for vif %(vif_id)s. " "Check your Neutron configuration to validate that the macvtap parameters are " "correct." msgstr "" "vif %(vif_id)s について、パラメーター %(missing_params)s が vif_details 内に" "存在しません。macvtapパラーメーターが正確に設定されているか Neutron の設定を" "確認してください。" #, python-format msgid "Path %s must be LVM logical volume" msgstr "パス %s は LVM 論理ボリュームでなければなりません" msgid "Paused" msgstr "一時停止済み" msgid "Personality file limit exceeded" msgstr "パーソナリティーファイル数の上限を超えました" #, python-format msgid "" "Physical Function %(compute_node_id)s:%(address)s, related to VF " "%(compute_node_id)s:%(vf_address)s is %(status)s instead of %(hopestatus)s" msgstr "" "VF %(compute_node_id)s に関連する %(address)s のPhysical Function " "%(compute_node_id)s。%(vf_address)s は %(status)s ではなく %(hopestatus)sです" #, python-format msgid "Physical network is missing for network %(network_uuid)s" msgstr "ネットワーク %(network_uuid)s に対応する物理ネットワークがありません" #, python-format msgid "Policy doesn't allow %(action)s to be performed." msgstr "ポリシーにより %(action)s の実行が許可されていません" #, python-format msgid "Port %(port_id)s is still in use." msgstr "ポート %(port_id)s はまだ使用中です。" #, python-format msgid "Port %(port_id)s not usable for instance %(instance)s." msgstr "ポート %(port_id)s はインスタンス %(instance)s では使用できません。" #, python-format msgid "" "Port %(port_id)s not usable for instance %(instance)s. Value %(value)s " "assigned to dns_name attribute does not match instance's hostname " "%(hostname)s" msgstr "" "ポート %(port_id)s はインスタンス %(instance)s では使用できません。dns_name " "属性に割り当てられた値 %(value)s がインスタンスのホスト名 %(hostname)s と合致" "しません" #, python-format msgid "Port %(port_id)s requires a FixedIP in order to be used." msgstr "ポート %(port_id)s を使用するには、Fixed IP が必要です。" #, python-format msgid "Port %s is not attached" msgstr "ポート %s は接続されていません" #, python-format msgid "Port id %(port_id)s could not be found." msgstr "ポート ID %(port_id)s が見つかりませんでした。" #, python-format msgid "Provided video model (%(model)s) is not supported." msgstr "指定されたビデオモデル (%(model)s) はサポートされていません。" #, python-format msgid "Provided watchdog action (%(action)s) is not supported." msgstr "" "指定されたウォッチドッグアクション (%(action)s) はサポートされていません。" msgid "QEMU guest agent is not enabled" msgstr "QEMU ゲストエージェントが有効になっていません" #, python-format msgid "Quiescing is not supported in instance %(instance_id)s" msgstr "インスタンス %(instance_id)s を正常に終了することができません。" #, python-format msgid "Quota class %(class_name)s could not be found." msgstr "クォータクラス %(class_name)s が見つかりませんでした。" msgid "Quota could not be found" msgstr "クォータが見つかりませんでした" #, python-format msgid "" "Quota exceeded for %(overs)s: Requested %(req)s, but already used %(used)s " "of %(allowed)s %(overs)s" msgstr "" "%(overs)s のクオータを超えました: %(req)s がリクエストされましたが、既に " "%(allowed)s のうち %(used)s を使用しています (%(overs)s)" #, python-format msgid "Quota exceeded for resources: %(overs)s" msgstr "リソース %(overs)s がクォータを超過しました" msgid "Quota exceeded, too many key pairs." msgstr "クォータを超過しました。キーペアが多すぎます。" msgid "Quota exceeded, too many server groups." msgstr "クォータを超過しました。サーバーグループが多すぎます。" msgid "Quota exceeded, too many servers in group" msgstr "クォータを超過しました。グループ内のサーバーが多すぎます" #, python-format msgid "Quota exists for project %(project_id)s, resource %(resource)s" msgstr "" "プロジェクト %(project_id)s、リソース %(resource)s のクォータが存在します" #, python-format msgid "Quota for project %(project_id)s could not be found." msgstr "プロジェクト %(project_id)s のクォータが見つかりませんでした。" #, python-format msgid "" "Quota for user %(user_id)s in project %(project_id)s could not be found." msgstr "" "プロジェクト %(project_id)s のユーザー %(user_id)s のクォータが見つかりません" "でした。" #, python-format msgid "" "Quota limit %(limit)s for %(resource)s must be greater than or equal to " "already used and reserved %(minimum)s." msgstr "" "%(resource)s のクォータ上限 %(limit)s は、既に使用もしくは予約されている数で" "ある %(minimum)s 以上でなければなりません。" #, python-format msgid "" "Quota limit %(limit)s for %(resource)s must be less than or equal to " "%(maximum)s." msgstr "" "%(resource)s のクォータ上限 %(limit)s は、%(maximum)s 以下でなければなりませ" "ん。" msgid "Request body and URI mismatch" msgstr "リクエスト本文と URI の不一致" msgid "Request is too large." msgstr "リクエストが大きすぎます。" #, python-format msgid "Request of image %(image_id)s got BadRequest response: %(response)s" msgstr "" "イメージ %(image_id)s のリクエストに対して BadRequest のレスポンスが返されま" "した: %(response)s" #, python-format msgid "RequestSpec not found for instance %(instance_uuid)s" msgstr "インスタンス %(instance_uuid)s に対する RequestSpec が見つかりません" msgid "Requested CPU control policy not supported by host" msgstr "ホストは要求された CPU の制御ポリシーをサポートしません" #, python-format msgid "" "Requested hardware '%(model)s' is not supported by the '%(virt)s' virt driver" msgstr "" "要求されたハードウェア '%(model)s' は '%(virt)s' virt ドライバーではサポート" "されません" #, python-format msgid "Requested image %(image)s has automatic disk resize disabled." msgstr "" "要求されたイメージ %(image)s ではディスクサイズの自動変更が無効になっていま" "す。" msgid "" "Requested instance NUMA topology cannot fit the given host NUMA topology" msgstr "" "要求されたインスタンス NUMA トポロジーは、指定されたホスト NUMA トポロジーに" "適合しません" msgid "" "Requested instance NUMA topology together with requested PCI devices cannot " "fit the given host NUMA topology" msgstr "" "PCI デバイスとともに要求されたインスタンスの NUMA トポロジーは、指定されたホ" "ストの NUMA トポロジーに適合できません" #, python-format msgid "" "Requested vCPU limits %(sockets)d:%(cores)d:%(threads)d are impossible to " "satisfy for vcpus count %(vcpus)d" msgstr "" "要求された vCPU 制限 %(sockets)d:%(cores)d:%(threads)d は、vCPU カウント " "%(vcpus)d を満たすことができません" #, python-format msgid "Rescue device does not exist for instance %s" msgstr "インスタンス %s 用のレスキューデバイスが存在しません" #, python-format msgid "Resize error: %(reason)s" msgstr "サイズ変更エラー: %(reason)s" msgid "Resize to zero disk flavor is not allowed." msgstr "ディスクが 0 のフレーバーにサイズ変更することはできません。" msgid "Resource could not be found." msgstr "リソースを見つけられませんでした。" msgid "Resumed" msgstr "再開済み" #, python-format msgid "Root element name should be '%(name)s' not '%(tag)s'" msgstr "" "ルートエレメント名は '%(tag)s' ではなく '%(name)s' でなければなりません" #, python-format msgid "Running batches of %i until complete" msgstr "完了するまで %i のバッチを実行します" #, python-format msgid "Scheduler Host Filter %(filter_name)s could not be found." msgstr "" "スケジューラーホストフィルター %(filter_name)s が見つかりませんでした。" #, python-format msgid "Security group %(name)s is not found for project %(project)s" msgstr "" "プロジェクト %(project)s のセキュリティーグループ %(name)s が見つかりません" #, python-format msgid "" "Security group %(security_group_id)s not found for project %(project_id)s." msgstr "" "プロジェクト %(project_id)s のセキュリティーグループ %(security_group_id)s が" "見つかりません。" #, python-format msgid "Security group %(security_group_id)s not found." msgstr "セキュリティーグループ %(security_group_id)s が見つかりません。" #, python-format msgid "" "Security group %(security_group_name)s already exists for project " "%(project_id)s." msgstr "" "プロジェクト %(project_id)s にはセキュリティーグループ " "%(security_group_name)s がすでに存在します。" #, python-format msgid "" "Security group %(security_group_name)s not associated with the instance " "%(instance)s" msgstr "" "セキュリティーグループ %(security_group_name)s がインスタンス %(instance)s に" "関連付けられていません" msgid "Security group id should be uuid" msgstr "セキュリティーグループ ID は UUID でなければなりません" msgid "Security group name cannot be empty" msgstr "セキュリティーグループ名を空にすることはできません" msgid "Security group not specified" msgstr "セキュリティーグループが指定されていません" #, python-format msgid "Server disk was unable to be resized because: %(reason)s" msgstr "サーバーディスクのサイズを変更できませんでした。理由: %(reason)s" msgid "Server does not exist" msgstr "サーバーが存在しません。" #, python-format msgid "ServerGroup policy is not supported: %(reason)s" msgstr "サーバーグループポリシーはサポートされていません: %(reason)s" msgid "ServerGroupAffinityFilter not configured" msgstr "ServerGroupAffinityFilter が設定されていません" msgid "ServerGroupAntiAffinityFilter not configured" msgstr "ServerGroupAntiAffinityFilter が設定されていません" msgid "ServerGroupSoftAffinityWeigher not configured" msgstr "ServerGroupSoftAffinityWeigher が設定されていません" msgid "ServerGroupSoftAntiAffinityWeigher not configured" msgstr "ServerGroupSoftAntiAffinityWeigher が設定されていません" #, python-format msgid "Service %(service_id)s could not be found." msgstr "サービス %(service_id)s が見つかりませんでした。" #, python-format msgid "Service %s not found." msgstr "サービス %s が見つかりません。" msgid "Service is unavailable at this time." msgstr "サービスが現在利用できません。" #, python-format msgid "Service with host %(host)s binary %(binary)s exists." msgstr "" "ホスト %(host)s のバイナリー %(binary)s を使用するサービスが存在します。" #, python-format msgid "Service with host %(host)s topic %(topic)s exists." msgstr "ホスト %(host)s のトピック %(topic)s を使用するサービスが存在します。" msgid "Set admin password is not supported" msgstr "設定された管理者パスワードがサポートされません" #, python-format msgid "Share '%s' is not supported" msgstr "シェア '%s' はサポートされません" #, python-format msgid "Share level '%s' cannot have share configured" msgstr "シェアレベル '%s' に設定されたシェアがありません" #, python-format msgid "Snapshot %(snapshot_id)s could not be found." msgstr "スナップショット %(snapshot_id)s が見つかりませんでした。" msgid "Some required fields are missing" msgstr "いくつかの必須フィールドがありません。" #, python-format msgid "" "Something went wrong when deleting a volume snapshot: rebasing a " "%(protocol)s network disk using qemu-img has not been fully tested" msgstr "" "ボリュームのスナップショットの削除中に問題が発生しました: qemu-img を使用し" "て %(protocol)s のネットワークディスクを再設定することは十分に検証されていま" "せん。" msgid "Sort direction size exceeds sort key size" msgstr "ソート方向の数がソートキーの数より多いです" msgid "Sort key supplied was not valid." msgstr "指定されたソートキーが無効でした。" msgid "Specified fixed address not assigned to instance" msgstr "指定された固定アドレスはインスタンスに割り当てられていません" msgid "Started" msgstr "開始済み" msgid "Stopped" msgstr "停止済み" #, python-format msgid "Storage error: %(reason)s" msgstr "ストレージエラー: %(reason)s" #, python-format msgid "Storage policy %s did not match any datastores" msgstr "ストレージポリシー %s がどのデータストアにも一致しませんでした" msgid "Success" msgstr "成功" msgid "Suspended" msgstr "休止済み" msgid "Swap drive requested is larger than instance type allows." msgstr "" "要求されたスワップドライブのサイズが、インスタンスタイプで許可されているサイ" "ズを超えています。" msgid "Table" msgstr "テーブル" #, python-format msgid "Task %(task_name)s is already running on host %(host)s" msgstr "タスク %(task_name)s はホスト %(host)s 上で既に実行中です" #, python-format msgid "Task %(task_name)s is not running on host %(host)s" msgstr "タスク %(task_name)s はホスト %(host)s 上で実行されていません" #, python-format msgid "The PCI address %(address)s has an incorrect format." msgstr "PCI アドレス %(address)s の形式が正しくありません。" #, python-format msgid "The console port range %(min_port)d-%(max_port)d is exhausted." msgstr "" "コンソール用のポート範囲 %(min_port)d-%(max_port)d を使い切られています。" msgid "The created instance's disk would be too small." msgstr "作成されたインスタンスのディスクが小さすぎます。" msgid "The current driver does not support preserving ephemeral partitions." msgstr "現行ドライバーは、一時パーティションの保持をサポートしていません。" msgid "The default PBM policy doesn't exist on the backend." msgstr "デフォルト PBM ポリシーがバックエンドに存在しません。" msgid "The floating IP request failed with a BadRequest" msgstr "Floating IP のリクエストが BadRequest により失敗しました。" msgid "" "The instance requires a newer hypervisor version than has been provided." msgstr "" "このインスタンスは使用されているものよりも新しいバージョンのハイパーバイザー" "を必要とします。" #, python-format msgid "The number of defined ports: %(ports)d is over the limit: %(quota)d" msgstr "定義したポート %(ports)d の数が上限 %(quota)d を超えています" #, python-format msgid "The provided RNG device path: (%(path)s) is not present on the host." msgstr "指定された RNG デバイスパス (%(path)s) がホスト上にありません。" msgid "The request is invalid." msgstr "リクエスト内容が無効です。" #, python-format msgid "" "The requested amount of video memory %(req_vram)d is higher than the maximum " "allowed by flavor %(max_vram)d." msgstr "" "要求されたビデオメモリー容量 %(req_vram)d がフレーバー %(max_vram)d で許可さ" "れている最大値を上回っています。" msgid "The requested availability zone is not available" msgstr "要求されたアベイラビリティーゾーンは使用不可です" msgid "The requested functionality is not supported." msgstr "要求された機能はサポートされていません。" #, python-format msgid "The specified cluster '%s' was not found in vCenter" msgstr "指定されたクラスター '%s' が vCenter で見つかりませんでした" #, python-format msgid "The supplied device path (%(path)s) is in use." msgstr "指定されたデバイスパス (%(path)s) は使用中です。" #, python-format msgid "The supplied device path (%(path)s) is invalid." msgstr "指定されたデバイスパス (%(path)s) が無効です。" #, python-format msgid "" "The supplied disk path (%(path)s) already exists, it is expected not to " "exist." msgstr "" "指定されたディスクパス (%(path)s) は既にに存在します。これは存在しているべき" "ではありません。" msgid "The supplied hypervisor type of is invalid." msgstr "指定されたハイパーバイザーは無効です。" msgid "The target host can't be the same one." msgstr "宛先ホストを同じホストにすることはできません。" #, python-format msgid "The token '%(token)s' is invalid or has expired" msgstr "トークン \"%(token)s\" が無効か、有効期限切れです" #, python-format msgid "" "The volume cannot be assigned the same device name as the root device %s" msgstr "" "ボリュームにルートデバイス %s と同じデバイス名を割り当てることはできません" msgid "There are not enough hosts available." msgstr "使用可能なホストが不足しています。" #, python-format msgid "There is no such action: %s" msgstr "このようなアクションはありません: %s" #, python-format msgid "" "This compute node's hypervisor is older than the minimum supported version: " "%(version)s." msgstr "" "このコンピュートノードのハイパーバイザーがサポートされる最小バージョンよりも" "古くなっています: %(version)s。" msgid "" "This method needs to be called with either networks=None and port_ids=None " "or port_ids and networks as not none." msgstr "" "このメソッドを呼び出すには、networks と port_ids に None を設定するか、 " "port_ids と networks に None 以外の値を設定する必要があります。" #, python-format msgid "This rule already exists in group %s" msgstr "指定されたルールは既にグループ %s に存在しています。" #, python-format msgid "" "This service is older (v%(thisver)i) than the minimum (v%(minver)i) version " "of the rest of the deployment. Unable to continue." msgstr "" "このサービスが実装環境の残りの部分の最小 (v%(minver)i) バージョンよりも古く " "(v%(thisver)i) なっています。処理を継続できません。" msgid "Timeout waiting for response from cell" msgstr "セルからの応答を待機中にタイムアウトになりました" #, python-format msgid "Timeout while checking if we can live migrate to host: %s" msgstr "" "ホスト %s にライブマイグレーションできるか確認中にタイムアウトが発生しました" msgid "To and From ports must be integers" msgstr "開始ポートと終了ポートは整数でなければなりません" msgid "Token not found" msgstr "トークンが見つかりません" msgid "Triggering crash dump is not supported" msgstr "クラッシュダンプのトリガーはサポートされません" msgid "Type and Code must be integers for ICMP protocol type" msgstr "ICMP プロトコルのタイプおよびコードは整数でなければなりません" msgid "UEFI is not supported" msgstr "UEFI はサポートされません" #, python-format msgid "" "Unable to associate floating IP %(address)s to any fixed IPs for instance " "%(id)s. Instance has no fixed IPv4 addresses to associate." msgstr "" "インスタンス %(id)s において Floating IP %(address)s を Fixed IP に割り当てる" "ことができません。インスタンスに割り当てを行うべき Fixed IPv4 がありません。" #, python-format msgid "" "Unable to associate floating IP %(address)s to fixed IP %(fixed_address)s " "for instance %(id)s. Error: %(error)s" msgstr "" "インスタンス %(id)s において Floating IP %(address)s を Fixed IP " "%(fixed_address)s に割り当てることができません。エラー: %(error)s" #, python-format msgid "Unable to convert image to %(format)s: %(exp)s" msgstr "イメージを %(format)s に変換できません: %(exp)s" #, python-format msgid "Unable to convert image to raw: %(exp)s" msgstr "イメージを raw 形式に変換できません: %(exp)s" #, python-format msgid "Unable to determine disk bus for '%s'" msgstr "ディスク '%s' のバスを判別できません" #, python-format msgid "Unable to determine disk prefix for %s" msgstr "%s のディスクプレフィックスを判別できません" #, python-format msgid "Unable to find host for Instance %s" msgstr "インスタンス %s のホストが見つかりません" msgid "Unable to find iSCSI Target" msgstr "iSCSI ターゲットが見つかりません" msgid "Unable to find volume" msgstr "ボリュームが見つかりません" msgid "Unable to get host UUID: /etc/machine-id does not exist" msgstr " ホストの UUID を取得できません: /etc/machine-id が存在しません" msgid "Unable to get host UUID: /etc/machine-id is empty" msgstr "ホストの UUID が取得できません: /etc/machine-id が空です" msgid "" "Unable to launch multiple instances with a single configured port ID. Please " "launch your instance one by one with different ports." msgstr "" "1個の作成済みのポート ID で複数のインスタンスの起動はできません。1 つ 1 つの" "インスタンスを別々のポートで起動してください。" #, python-format msgid "" "Unable to migrate %(instance_uuid)s to %(dest)s: Lack of memory(host:" "%(avail)s <= instance:%(mem_inst)s)" msgstr "" "%(instance_uuid)s を %(dest)s にマイグレーションできません: メモリー不足です " "(host:%(avail)s <= instance:%(mem_inst)s)" #, python-format msgid "" "Unable to migrate %(instance_uuid)s: Disk of instance is too large(available " "on destination host:%(available)s < need:%(necessary)s)" msgstr "" "%(instance_uuid)s をマイグレーションできません: インスタンスのディスクが大き" "すぎます (宛先ホスト上の使用可能量: %(available)s < 必要量: %(necessary)s)" #, python-format msgid "" "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." msgstr "" "インスタンス (%(instance_id)s) を現在と同じホスト (%(host)s) にマイグレーショ" "ンすることはできません。" msgid "Unable to resize disk down." msgstr "ディスクのサイズを縮小することができません。" msgid "Unable to set password on instance" msgstr "インスタンスにパスワードを設定できません" msgid "Unable to shrink disk." msgstr "ディスクを縮小できません。" #, python-format msgid "Unacceptable CPU info: %(reason)s" msgstr "指定できない CPU 情報: %(reason)s" msgid "Unacceptable parameters." msgstr "指定できないパラメーターです。" #, python-format msgid "Unavailable console type %(console_type)s." msgstr "コンソールタイプ %(console_type)s は使用できません。" msgid "" "Undefined Block Device Mapping root: BlockDeviceMappingList contains Block " "Device Mappings from multiple instances." msgstr "" "ブロックデバイスマッピングのルートが定義されていません: " "BlockDeviceMappingList に複数のインスタンスのブロックデバイスのマッピングが含" "まれています。" #, python-format msgid "Unexpected aggregate action %s" msgstr "想定しないアグリゲートのアクション %s" msgid "Unexpected type adding stats" msgstr "統計の追加中に想定しないタイプが見つかりました" #, python-format msgid "Unexpected vif_type=%s" msgstr "想定しない vif_type=%s" msgid "Unknown" msgstr "不明" msgid "Unknown action" msgstr "不明なアクション" #, python-format msgid "Unknown config drive format %(format)s. Select one of iso9660 or vfat." msgstr "" "不明なコンフィグドライブ形式 %(format)s です。「iso9660」または「vfat」のいず" "れかを選択してください。" #, python-format msgid "Unknown delete_info type %s" msgstr "不明な delete_info タイプ %s" #, python-format msgid "Unknown image_type=%s" msgstr "不明な image_type=%s" #, python-format msgid "Unknown quota resources %(unknown)s." msgstr "不明なクォータリソース %(unknown)s。" msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "ソート方向が不明です。'desc' または 'asc' でなければなりません" #, python-format msgid "Unknown type: %s" msgstr "不明なタイプ: %s" msgid "Unrecognized legacy format." msgstr "認識できない以前のフォーマットです。" #, python-format msgid "Unrecognized read_deleted value '%s'" msgstr "認識されない read_deleted 値 '%s'" #, python-format msgid "Unrecognized value '%s' for CONF.running_deleted_instance_action" msgstr "CONF.running_deleted_instance_action で認識されない値 '%s'" #, python-format msgid "Unshelve attempted but the image %s cannot be found." msgstr "復元が試行されましたが、イメージ %s が見つかりません。" msgid "Unsupported Content-Type" msgstr "サポートされない Content-Type" #, python-format msgid "User %(username)s not found in password file." msgstr "パスワードファイルにユーザー %(username)s が見つかりません。" #, python-format msgid "User %(username)s not found in shadow file." msgstr "shadow ファイルにユーザー %(username)s が見つかりません。" msgid "User data needs to be valid base 64." msgstr "ユーザーデータは有効な Base64 でなければなりません。" msgid "User does not have admin privileges" msgstr "ユーザーに管理者権限がありません" msgid "" "Using different block_device_mapping syntaxes is not allowed in the same " "request." msgstr "同じリクエスト内で異なる block_device_mapping 指定は使用できません。" #, python-format msgid "" "Version %(req_ver)s is not supported by the API. Minimum is %(min_ver)s and " "maximum is %(max_ver)s." msgstr "" "バージョン %(req_ver)s はこの API ではサポートされていません。最小" "は%(min_ver)s、最大は %(max_ver)s です。" msgid "Virtual Interface creation failed" msgstr "仮想インターフェースの作成に失敗しました" msgid "Virtual interface plugin failed" msgstr "仮想インターフェースの接続に失敗しました" #, python-format msgid "Virtual machine mode '%(vmmode)s' is not recognised" msgstr "仮想マシンモード '%(vmmode)s' は認識できません" #, python-format msgid "Virtual machine mode '%s' is not valid" msgstr "仮想マシンモード '%s' が有効ではありません" #, python-format msgid "Virtualization type '%(virt)s' is not supported by this compute driver" msgstr "" "このコンピュートドライバーでは仮想化タイプ '%(virt)s' はサポートされません" #, python-format msgid "Volume %(volume_id)s could not be attached. Reason: %(reason)s" msgstr "ボリューム %(volume_id)s を接続できません。理由: %(reason)s" #, python-format msgid "Volume %(volume_id)s could not be found." msgstr "ボリューム %(volume_id)s が見つかりませんでした。" #, python-format msgid "" "Volume %(volume_id)s did not finish being created even after we waited " "%(seconds)s seconds or %(attempts)s attempts. And its status is " "%(volume_status)s." msgstr "" "%(seconds)s 秒待機し、%(attempts)s 回試行したものの、ボリューム " "%(volume_id)s を作成することができませんでした。状況は %(volume_status)s で" "す。" msgid "Volume does not belong to the requested instance." msgstr "ボリュームが要求されたインスタンスに属していません。" #, python-format msgid "" "Volume encryption is not supported for %(volume_type)s volume %(volume_id)s" msgstr "" "%(volume_type)s のボリューム %(volume_id)s に関してボリュームの暗号化はサポー" "トされません。" #, python-format msgid "" "Volume is smaller than the minimum size specified in image metadata. Volume " "size is %(volume_size)i bytes, minimum size is %(image_min_disk)i bytes." msgstr "" "ボリュームがイメージのメタデータで指定された最小サイズよりも小さくなっていま" "す。ボリュームサイズは %(volume_size)i バイト、最小サイズは " "%(image_min_disk)i バイトです。" #, python-format msgid "" "Volume sets block size, but the current libvirt hypervisor '%s' does not " "support custom block size" msgstr "" "ボリュームによってブロックサイズが設定されますが、現在の libvirt ハイパーバイ" "ザー '%s' はカスタムブロックサイズをサポートしていません" msgid "When resizing, instances must change flavor!" msgstr "サイズ変更の際は、インスタンスのフレーバーを変更する必要があります。" #, python-format msgid "Wrong quota method %(method)s used on resource %(res)s" msgstr "" "リソース %(res)s で使用されるクォータメソッド %(method)s が正しくありません" msgid "X-Forwarded-For is missing from request." msgstr "リクエストに X-Forwarded-For がありません。" msgid "X-Instance-ID header is missing from request." msgstr "リクエストに X-Instance-ID ヘッダーがありません。" msgid "X-Instance-ID-Signature header is missing from request." msgstr "X-Instance-ID-Signature ヘッダーがリクエストにありません。" msgid "X-Metadata-Provider is missing from request." msgstr "リクエストに X-Metadata-Provider がありません。" msgid "X-Tenant-ID header is missing from request." msgstr "リクエストに X-Tenant-ID ヘッダーがありません。" msgid "You are not allowed to delete the image." msgstr "このイメージの削除は許可されていません。" msgid "" "You are not authorized to access the image the instance was started with." msgstr "" "インスタンスの起動時に使用されたイメージへのアクセスが許可されていません。" msgid "You must implement __call__" msgstr "__call__ を実装しなければなりません" msgid "You should specify images_rbd_pool flag to use rbd images." msgstr "" "rbd イメージを使用するには images_rbd_pool フラグを指定する必要があります。" msgid "You should specify images_volume_group flag to use LVM images." msgstr "" "LVM イメージを使用するには images_volume_group フラグを指定する必要がありま" "す。" msgid "Zero floating IPs available." msgstr "使用可能な Floating IP はありません。" msgid "admin password can't be changed on existing disk" msgstr "既存のディスク上で管理者パスワードを変更することはできません" msgid "cannot understand JSON" msgstr "JSON を解釈できません" msgid "clone() is not implemented" msgstr "clone() は実装されていません" #, python-format msgid "connect info: %s" msgstr "接続情報: %s" #, python-format msgid "connecting to: %(host)s:%(port)s" msgstr "%(host)s:%(port)s に接続中です" msgid "direct_snapshot() is not implemented" msgstr "direct_snapshot() が実装されていません" #, python-format msgid "disk type '%s' not supported" msgstr "ディスクタイプ '%s' はサポートされていません" #, python-format msgid "empty project id for instance %s" msgstr "インスタンス %s のプロジェクト ID が空です" msgid "error setting admin password" msgstr "管理者パスワードの設定中にエラーが発生しました" #, python-format msgid "error: %s" msgstr "エラー: %s" #, python-format msgid "failed to generate X509 fingerprint. Error message: %s" msgstr "X.509 フィンガープリントの生成に失敗しました。エラーメッセージ: %s" msgid "failed to generate fingerprint" msgstr "フィンガープリントの生成に失敗しました" msgid "filename cannot be None" msgstr "ファイル名を None にすることはできません" msgid "floating IP is already associated" msgstr "Floating IP は既に割り当てられています" msgid "floating IP not found" msgstr "Floating IP が見つかりません" #, python-format msgid "fmt=%(fmt)s backed by: %(backing_file)s" msgstr "fmt=%(fmt)s は %(backing_file)s でサポートされています" #, python-format msgid "href %s does not contain version" msgstr "href %s にバージョンが含まれていません" msgid "image already mounted" msgstr "イメージは既にマウントされています" #, python-format msgid "instance %s is not running" msgstr "インスタンス %s は実行されていません" msgid "instance is a required argument to use @refresh_cache" msgstr "@refresh_cache を使用する場合、インスタンスは必須の引数です" msgid "instance is not in a suspended state" msgstr "インスタンスは休止状態ではありません" msgid "instance is not powered on" msgstr "インスタンスの電源がオンになっていません" msgid "instance is powered off and cannot be suspended." msgstr "インスタンスは電源オフになっています。休止できません。" #, python-format msgid "instance_id %s could not be found as device id on any ports" msgstr "instance_id %s がデバイス ID に設定されたポートが見つかりませんでした" msgid "is_public must be a boolean" msgstr "is_public はブール値でなければなりません" msgid "keymgr.fixed_key not defined" msgstr "keymgr.fixed_key が定義されていません" msgid "l3driver call to add floating IP failed" msgstr "Floating IP を追加するための l3driver の呼び出しが失敗しました" #, python-format msgid "libguestfs installed but not usable (%s)" msgstr "libguestfs はインストールされていますが、使用できません (%s)" #, python-format msgid "libguestfs is not installed (%s)" msgstr "libguestfs がインストールされていません (%s)" #, python-format msgid "marker [%s] not found" msgstr "マーカー [%s] が見つかりません" #, python-format msgid "max rows must be <= %(max_value)d" msgstr "最大行数は %(max_value)d 以上である必要があります" msgid "max_count cannot be greater than 1 if an fixed_ip is specified." msgstr "" "fixed_ip が指定されている場合、max_count を 1 より大きくすることはできませ" "ん。" msgid "min_count must be <= max_count" msgstr "min_count は max_count 以下でなければなりません" #, python-format msgid "nbd device %s did not show up" msgstr "nbd デバイス %s が出現しません" msgid "nbd unavailable: module not loaded" msgstr "nbd が使用不可です: モジュールがロードされていません" #, python-format msgid "no match found for %s" msgstr "%s に合致するものが見つかりません" #, python-format msgid "no usable parent snapshot for volume %s" msgstr "ボリューム %s に関して使用可能な親スナップショットがありません" #, python-format msgid "no write permission on storage pool %s" msgstr "ストレージプール %s に書き込み権限がありません" #, python-format msgid "not able to execute ssh command: %s" msgstr "ssh コマンドを実行できません: %s" msgid "old style configuration can use only dictionary or memcached backends" msgstr "" "古い形式の設定では、ディクショナリーと memcached のバックエンドのみを使用でき" "ます" msgid "operation time out" msgstr "操作がタイムアウトしました" #, python-format msgid "partition %s not found" msgstr "パーティション %s が見つかりません" #, python-format msgid "partition search unsupported with %s" msgstr "パーティションの検索は %s ではサポートされていません" msgid "pause not supported for vmwareapi" msgstr "vmwareapi では一時停止はサポートされていません" msgid "printable characters with at least one non space character" msgstr "1 つ以上のスペースではない文字を含む印刷可能な文字。" msgid "printable characters. Can not start or end with whitespace." msgstr "印刷可能な文字。空白で開始または終了することはできません。" #, python-format msgid "qemu-img failed to execute on %(path)s : %(exp)s" msgstr "%(path)s で qemu-img を実行できませんでした: %(exp)s" #, python-format msgid "qemu-nbd error: %s" msgstr "qemu-nbd エラー: %s" msgid "rbd python libraries not found" msgstr "rbd python ライブラリーが見つかりません" #, python-format msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" msgstr "" "read_deleted に指定できるのは 'no', 'yes', 'only' のいずれかです。%r は指定で" "きません。" msgid "serve() can only be called once" msgstr "serve() は一度しか呼び出せません" msgid "service is a mandatory argument for DB based ServiceGroup driver" msgstr "サービスは DB ベースの ServiceGroup ドライバーの必須の引数です" msgid "service is a mandatory argument for Memcached based ServiceGroup driver" msgstr "service は Memcached ベースの ServiceGroup ドライバーの必須の引数です" msgid "set_admin_password is not implemented by this driver or guest instance." msgstr "" "set_admin_password は、このドライバーまたはゲストインスタンスでは実装されてい" "ません。" #, python-format msgid "snapshot for %s" msgstr "%s のスナップショット" msgid "snapshot_id required in create_info" msgstr "create_info には snapshot_id が必要です" msgid "token not provided" msgstr "トークンが指定されていません" msgid "too many body keys" msgstr "本文にキーが多すぎます" msgid "unpause not supported for vmwareapi" msgstr "vmwareapi では一時停止解除はサポートされていません" #, python-format msgid "vg %s must be LVM volume group" msgstr "vg %s は LVM ボリュームグループでなければなりません" #, python-format msgid "vhostuser_sock_path not present in vif_details for vif %(vif_id)s" msgstr "vhostuser_sock_path が vif %(vif_id)s の vif_details にありません" #, python-format msgid "vif type %s not supported" msgstr "vif タイプ %s はサポートされません" msgid "vif_type parameter must be present for this vif_driver implementation" msgstr "この vif_driver の実装では vif_type パラメーターが必要です" #, python-format msgid "volume %s already attached" msgstr "ボリューム %s は既に接続されています" #, python-format msgid "" "volume '%(vol)s' status must be 'in-use'. Currently in '%(status)s' status" msgstr "" "ボリューム '%(vol)s' の状況は「使用中」でなければなりませんが、現在の状況は " "'%(status)s' です" ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315688.893605 nova-32.0.0/nova/locale/ko_KR/0000775000175000017500000000000000000000000015771 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.3696086 nova-32.0.0/nova/locale/ko_KR/LC_MESSAGES/0000775000175000017500000000000000000000000017556 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/locale/ko_KR/LC_MESSAGES/nova.po0000664000175000017500000032050700000000000021070 0ustar00zuulzuul00000000000000# Translations template for nova. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the nova project. # # Translators: # Seunghyo Chun , 2013 # Seunghyo Chun , 2013 # Sungjin Kang , 2013 # Sungjin Kang , 2013 # Andreas Jaeger , 2016. #zanata # Lee Jongwon , 2016. #zanata # Ian Y. Choi , 2017. #zanata # Lee Dogeon , 2018. #zanata # Kuemjong Jeong , 2023. #zanata # Sion Shin , 2023. #zanata msgid "" msgstr "" "Project-Id-Version: nova VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2025-07-04 18:13+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2023-08-26 06:20+0000\n" "Last-Translator: Sion Shin \n" "Language: ko_KR\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: Korean (South Korea)\n" #, python-format msgid "%(address)s is not a valid IP v4/6 address." msgstr "%(address)s는 v4/6주소에 맞지 않은 IP입니다." #, python-format msgid "" "%(binary)s attempted direct database access which is not allowed by policy" msgstr "" "%(binary)s에서 정책적으로 허용되지 않는 직접 데이터베이스 액세스를 시도함" #, python-format msgid "%(cidr)s is not a valid IP network." msgstr "%(cidr)s은(는) 올바른 IP 네트워크가 아닙니다." #, python-format msgid "%(field)s should not be part of the updates." msgstr "%(field)s은(는) 업데이트의 일부여서는 안 됩니다. " #, python-format msgid "%(memsize)d MB of memory assigned, but expected %(memtotal)d MB" msgstr "%(memtotal)dMB를 예상했지만 %(memsize)dMB의 메모리가 지정됨" #, python-format msgid "%(path)s is not on local storage: %(reason)s" msgstr "%(path)s이(가) 로컬 스토리지에 없음: %(reason)s" #, python-format msgid "%(path)s is not on shared storage: %(reason)s" msgstr "%(path)s이(가) 공유 스토리지에 없음: %(reason)s" #, python-format msgid "%(total)i rows matched query %(meth)s, %(done)i migrated" msgstr "%(total)i 행이 %(meth)s 조회와 일치함, %(done)i이(가) 마이그레이션됨" #, python-format msgid "%(type)s hypervisor does not support PCI devices" msgstr "%(type)s 하이퍼바이저가 PCI 디바이스를 지원하지 않음" #, python-format msgid "%s does not support disk hotplug." msgstr "%s에서 디스크 hotplug를 지원하지 않습니다." #, python-format msgid "%s format is not supported" msgstr "%s 형식이 지원되지 않음" #, python-format msgid "%s is not supported." msgstr "%s이(가) 지원되지 않습니다." #, python-format msgid "%s must be either 'MANUAL' or 'AUTO'." msgstr "%s은(는) 'MANUAL' 또는 'AUTO'여야 합니다. " #, python-format msgid "'%(other)s' should be an instance of '%(cls)s'" msgstr "'%(other)s'은(는) '%(cls)s'의 인스턴스여야 함" msgid "'qemu-img info' parsing failed." msgstr "'qemu-img info' 구문 분석에 실패했습니다. " #, python-format msgid "'rxtx_factor' argument must be a float between 0 and %g" msgstr "'rxtx_factor' 인수는 0에서 %g 까지의 부동수여야 함 " #, python-format msgid "A NetworkModel is required in field %s" msgstr "NetworkModel이 필드 %s에 필요함" #, python-format msgid "" "A conflict was encountered attempting to invoke the placement API at URL " "%(url)s: %(error)s" msgstr "" "%(url)s 에서 Placement API 호출하는 동안 충돌이 발생했습니다. : %(error)s" msgid "" "A conflict was encountered attempting to reshape a provider tree: $(error)s" msgstr "공급자 트리를 다시 형성하는 동안 충돌이 발생했습니다. :$(error)s" #, python-format msgid "" "A conflict was encountered attempting to update resource provider %(uuid)s " "(generation %(generation)d): %(error)s" msgstr "" "리소스 공급자 %(uuid)s (generation %(generation)d) 업데이트를 시도하는 동안 " "충돌이 발생했습니다. : %(error)s" #, python-format msgid "" "API Version String %(version)s is of invalid format. Must be of format " "MajorNum.MinorNum." msgstr "" "API 버전 문자열 %(version)s 형식이 올바르지 않습니다. 형식은 MajorNum." "MinorNum 이어야 합니다." #, python-format msgid "API version %(version)s is not supported on this method." msgstr "API 버전 %(version)s에서는 이 메소드를 지원하지 않습니다.." msgid "Access list not available for public flavors." msgstr "액세스 목록이 공용 플레이버에 사용할 수 없습니다. " #, python-format msgid "Action %s not found" msgstr "조치 %s을(를) 찾을 수 없음" #, python-format msgid "" "Action for request_id %(request_id)s on instance %(instance_uuid)s not found" msgstr "" "%(instance_uuid)s 인스턴스에서 request_id %(request_id)s에 대한 조치를 찾을 " "수 없음" #, python-format msgid "Action: '%(action)s', calling method: %(meth)s, body: %(body)s" msgstr "조치: '%(action)s', 호출 메소드: %(meth)s, 본문: %(body)s" #, python-format msgid "Active live migration for instance %(instance_id)s not found" msgstr "" "인스턴스 %(instance_id)s에 대한 활성화 상태의 마이그레이션을 찾을 수 없음" #, python-format msgid "Add metadata failed for aggregate %(id)s after %(retries)s retries" msgstr "" "%(retries)s 재시도 후 %(id)s 집합에 대한 메타데이터 추가를 실패했습니다" #, python-format msgid "Aggregate %(aggregate_id)s already has host %(host)s." msgstr "%(aggregate_id)s 집합에 이미 %(host)s 호스트가 있습니다. " #, python-format msgid "Aggregate %(aggregate_id)s could not be found." msgstr "%(aggregate_id)s 집합을 찾을 수 없습니다. " #, python-format msgid "Aggregate %(aggregate_id)s has no host %(host)s." msgstr "%(aggregate_id)s 집합에 %(host)s 호스트가 없습니다. " #, python-format msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." msgstr "" "%(aggregate_id)s 집합에 %(metadata_key)s 키를 갖는 메타데이터가 없습니다. " #, python-format msgid "Aggregate %(aggregate_name)s already exists." msgstr "%(aggregate_name)s 집합이 이미 존재합니다. " #, python-format msgid "Aggregate %s does not support empty named availability zone" msgstr "%s 집합에서 이름 지정된 비어 있는 가용 구역을 지원하지 않음" #, python-format msgid "An invalid 'name' value was provided. The name must be: %(reason)s" msgstr "" "올바르지 않은 'name' 값이 제공되었습니다. 이름은 %(reason)s이어야 합니다." msgid "An unknown error has occurred. Please try your request again." msgstr "알 수 없는 오류가 발생했습니다. 요청을 다시 시도하십시오. " msgid "An unknown exception occurred." msgstr "알 수 없는 예외가 발생했습니다. " #, python-format msgid "Architecture name '%(arch)s' is not recognised" msgstr "아키텍처 이름 '%(arch)s'이(가) 인식되지 않음" #, python-format msgid "Architecture name '%s' is not valid" msgstr "아키텍처 이름 '%s'이(가) 올바르지 않음" #, python-format msgid "" "Attempt to consume PCI device %(compute_node_id)s:%(address)s from empty pool" msgstr "" "비어 있는 풀에서 PCI 디바이스 %(compute_node_id)s:%(address)s을(를) 이용하려" "고시도함" msgid "Attempted overwrite of an existing value." msgstr "기존 값을 겹쳐쓰려 했습니다." #, python-format msgid "Attribute not supported: %(attr)s" msgstr "지원하지 않는 속성입니다: %(attr)s" msgid "Bad Request - Feature is not supported in Nova" msgstr "Bad Request - Nova에서 지원되지 않는 기능입니다." msgid "Bad Request - Invalid Parameters" msgstr "잘못 된 요청 - 유효하지 않은 매개변수" #, python-format msgid "Bad network format: missing %s" msgstr "잘못된 네트워크 형식: %s 누락" msgid "Bad networks format" msgstr "잘못된 네트워크 형식" #, python-format msgid "Bad networks format: network uuid is not in proper format (%s)" msgstr "잘못된 네트워크 형식: 네트워크 uuid의 적절한 형식(%s)이 아님" #, python-format msgid "Bad prefix for network in cidr %s" msgstr "cidr %s의 네트워크에 대한 접두부가 올바르지 않음" #, python-format msgid "" "Binding failed for port %(port_id)s, please check neutron logs for more " "information." msgstr "" "포트 %(port_id)s에 대해 바인딩에 실패했습니다. 자세한 정보는 neutron 로그를 " "확인하십시오. " msgid "Blank components" msgstr "비어 있는 구성요소" msgid "" "Blank volumes (source: 'blank', dest: 'volume') need to have non-zero size" msgstr "공백 볼륨(소스: 'blank', 대상: 'volume')은 크기가 0(영)이 아니어야 함" #, python-format msgid "Block Device %(id)s is not bootable." msgstr "%(id)s 블록 디바이스로 부팅할 수 없습니다." #, python-format msgid "" "Block Device Mapping %(volume_id)s is a multi-attach volume and is not valid " "for this operation." msgstr "" "블록 디바이스 맵핑 %(volume_id)s은(는) 다중 연결 볼륨이므로 이 작업에는 올바" "르지 않습니다." msgid "Block Device Mapping cannot be converted to legacy format. " msgstr "블록 디바이스 맵핑을 레거시 형식으로 전환할 수 없습니다. " msgid "Block Device Mapping is Invalid." msgstr "블록 디바이스 맵핑이 올바르지 않습니다. " #, python-format msgid "Block Device Mapping is Invalid: %(details)s" msgstr "블록 디바이스 맵핑이 올바르지 않습니다: %(details)s" msgid "" "Block Device Mapping is Invalid: Boot sequence for the instance and image/" "block device mapping combination is not valid." msgstr "" "블록 디바이스 맵핑이 올바르지 않습니다: 인스턴스와 이미지/블록 디바이스 맵핑 " "조합에 대한 부트 시퀀스가 올바르지 않습니다." msgid "" "Block Device Mapping is Invalid: You specified more local devices than the " "limit allows" msgstr "" "블록 디바이스 맵핑이 올바르지 않습니다: 허용 한도보다 많은 로컬 디바이스를 지" "정했습니다." #, python-format msgid "Block Device Mapping is Invalid: failed to get image %(id)s." msgstr "" "블록 디바이스 맵핑이 올바르지 않습니다: %(id)s 이미지를 가져오지 못했습니다. " #, python-format msgid "Block Device Mapping is Invalid: failed to get snapshot %(id)s." msgstr "" "블록 디바이스 맵핑이 올바르지 않습니다: %(id)s 스냅샷을 가져오지 못했습니다. " #, python-format msgid "Block Device Mapping is Invalid: failed to get volume %(id)s." msgstr "" "블록 디바이스 맵핑이 올바르지 않습니다: %(id)s 볼륨을 가져오지 못했습니다. " msgid "Block migration can not be used with shared storage." msgstr "블록 마이그레이션은 공유 스토리지에서 사용할 수 없습니다. " msgid "Boot index is invalid." msgstr "부트 인덱스가 올바르지 않습니다." #, python-format msgid "Build of instance %(instance_uuid)s aborted: %(reason)s" msgstr "인스턴스 %(instance_uuid)s의 빌드가 중단됨: %(reason)s" #, python-format msgid "Build of instance %(instance_uuid)s was re-scheduled: %(reason)s" msgstr "인스턴스 %(instance_uuid)s의 빌드가 다시 예정됨: %(reason)s" #, python-format msgid "BuildRequest not found for instance %(uuid)s" msgstr "%(uuid)s 인스턴스의 BuildRequest를 찾을 수 없음" msgid "CPU and memory allocation must be provided for all NUMA nodes" msgstr "모든 NUMA 노드에 CPU 및 메모리 할당을 제공해야 함" #, python-format msgid "" "CPU doesn't have compatibility.\n" "\n" "%(ret)s\n" "\n" "Refer to %(u)s" msgstr "" "CPU가 호환성을 갖지 않습니다.\n" "\n" "%(ret)s\n" "\n" "%(u)s을(를) 참조하십시오. " #, python-format msgid "CPU number %(cpunum)d is assigned to two nodes" msgstr "CPU 번호 %(cpunum)d이(가) 두 개의 노드에 지정됨" #, python-format msgid "CPU number %(cpunum)d is larger than max %(cpumax)d" msgstr "CPU 번호 %(cpunum)d은(는) 최대값 %(cpumax)d 보다 큼" #, python-format msgid "CPU number %(cpuset)s is not assigned to any node" msgstr "CPU 번호 %(cpuset)s이(가) 어느 노드에도 지정되지 않았음" msgid "Can not add access to a public flavor." msgstr "공용 플레이버에 대한 액세스를 추가할 수 없습니다. " msgid "Can not find requested image" msgstr "요청된 이미지를 찾을 수 없음" #, python-format msgid "Can not handle authentication request for %d credentials" msgstr "%d 신임 정보에 대한 인증 정보를 처리할 수 없음" msgid "Can't retrieve root device path from instance libvirt configuration" msgstr "인스턴스 libvirt 구성에서 루트 디바이스 경로를 검색할 수 없음" #, python-format msgid "" "Cannot '%(action)s' instance %(server_id)s while it is in %(attr)s %(state)s" msgstr "" "다음 상태에서는 인스턴스 %(server_id)s에 '%(action)s' 조치를 수행할 수 없음: " "%(attr)s %(state)s" #, python-format msgid "Cannot add host to aggregate %(aggregate_id)s. Reason: %(reason)s." msgstr "호스트를 집합 %(aggregate_id)s에 추가할 수 없습니다. 이유: %(reason)s." msgid "Cannot attach one or more volumes to multiple instances" msgstr "복수 인스턴스에 하나 이상의 볼륨을 첨부할 수 없음" #, python-format msgid "Cannot call %(method)s on orphaned %(objtype)s object" msgstr "고아 %(objtype)s 오브젝트에서 %(method)s 메소드를 호출할 수 없음" #, python-format msgid "" "Cannot determine the parent storage pool for %s; cannot determine where to " "store images" msgstr "" "%s;의 상위 스토리지 풀을 판별할 수 없습니다. 이미지 저장 위치를 판별할 수 없" "습니다." msgid "Cannot find image for rebuild" msgstr "다시 빌드할 이미지를 찾을 수 없음" #, python-format msgid "Cannot remove host %(host)s in aggregate %(id)s" msgstr "%(id)s 집합에서 %(host)s 호스트를 제거할 수 없습니다" #, python-format msgid "Cannot remove host from aggregate %(aggregate_id)s. Reason: %(reason)s." msgstr "" "%(aggregate_id)s 집합에서 호스트를 제거할 수 없습니다. 이유: %(reason)s." msgid "Cannot rescue a volume-backed instance" msgstr "volume-backed 인스턴스를 구조할 수 없음" msgid "" "Cannot set cpu thread pinning policy in a non dedicated cpu pinning policy" msgstr "전용이 아닌 CPU 고정 정책에 CPU 스레드 고정 정책을 설정할 수 없음" msgid "Cannot set realtime policy in a non dedicated cpu pinning policy" msgstr "전용이 아닌 CPU 고정 정책에서 실시간 정책을 설정할 수 없음" #, python-format msgid "Cannot update aggregate %(aggregate_id)s. Reason: %(reason)s." msgstr "%(aggregate_id)s 집합을 업데이트할 수 없습니다. 이유: %(reason)s." #, python-format msgid "" "Cannot update metadata of aggregate %(aggregate_id)s. Reason: %(reason)s." msgstr "" "%(aggregate_id)s 집합 메타데이터를 업데이트할 수 없습니다. 이유: %(reason)s." #, python-format msgid "Cell %(uuid)s has no mapping." msgstr "셀 %(uuid)s에 맵핑이 없습니다." msgid "Cell0 cannot be disabled." msgstr "셀0를 비활성화 할 수 없습니다." #, python-format msgid "" "Change would make usage less than 0 for the following resources: %(unders)s" msgstr "변경하면 다음 자원에 대한 사용량이 0보다 작아집니다: %(unders)s" #, python-format msgid "Cinder API version %(version)s is not available." msgstr "Cinder API 버전 %(version)s 을 찾을 수 없습니다." #, python-format msgid "Class %(class_name)s could not be found: %(exception)s" msgstr "%(class_name)s 클래스를 찾을 수 없음: %(exception)s" #, python-format msgid "Compute host %(host)s could not be found." msgstr "%(host)s 계산 호스트를 찾을 수 없습니다. " #, python-format msgid "Compute host %s not found." msgstr "계산 호스트 %s을(를) 찾을 수 없음." #, python-format msgid "Compute service of %(host)s is still in use." msgstr "%(host)s Compute 서비스를 사용하고 있습니다." #, python-format msgid "Compute service of %(host)s is unavailable at this time." msgstr "%(host)s Compute 서비스를 지금 사용할 수 없습니다." #, python-format msgid "Config drive format '%(format)s' is not supported." msgstr "구성 드라이브 형식 '%(format)s'은(는) 지원되지 않습니다." #, python-format msgid "" "Config requested an explicit CPU model, but the current libvirt hypervisor " "'%s' does not support selecting CPU models" msgstr "" "구성이 명시적 CPU 모델을 요청했지만 현재 libvirt 하이퍼바이저 '%s'이(가) CPU " "모델 선택을 지원하지 않음" msgid "Configuration is Invalid." msgstr "잘못된 구성입니다." #, python-format msgid "Conflict deleting image. Reason: %(reason)s." msgstr "이미지 삭제에 충돌이 발생했습니다. 이유: %(reason)s" #, python-format msgid "" "Conflict updating instance %(instance_uuid)s, but we were unable to " "determine the cause" msgstr "" "인스턴스 %(instance_uuid)s 업데이트 중에 충돌이 발생했지만 원인을 판별할 수 " "없음" #, python-format msgid "" "Conflict updating instance %(instance_uuid)s. Expected: %(expected)s. " "Actual: %(actual)s" msgstr "" "인스턴스 %(instance_uuid)s 업데이트 중에 충돌이 발생했습니다. 예상: " "%(expected)s. 실제: %(actual)s" #, python-format msgid "Connection to cinder host failed: %(reason)s" msgstr "Cinder 호스트 연결하지 못했습니다: %(reason)s" #, python-format msgid "Connection to glance host %(server)s failed: %(reason)s" msgstr "Glance 호스트 %(server)s에 연결하는 데 실패: %(reason)s" #, python-format msgid "Connection to keystone host failed: %(reason)s" msgstr "Keystone 호스트 연결이 실패했습니다: %(reason)s" #, python-format msgid "Connection to libvirt lost: %s" msgstr "libvirt 연결 유실: %s" #, python-format msgid "" "Console log output could not be retrieved for instance %(instance_id)s. " "Reason: %(reason)s" msgstr "" "%(instance_id)s 인스턴스의 콘솔 로그 출력을 검색할 수 없습니다. 이유: " "%(reason)s" msgid "Constraint not met." msgstr "제한조건이 만족되지 않았습니다. " #, python-format msgid "Converted to raw, but format is now %s" msgstr "원시로 변환되었지만 형식은 지금 %s임" #, python-format msgid "Could not attach image to loopback: %s" msgstr "루프백에 이미지를 첨부할 수 없음: %s" #, python-format msgid "Could not fetch image %(image_id)s" msgstr "%(image_id)s 이미지를 페치할 수 없음" #, python-format msgid "Could not find a handler for %(driver_type)s volume." msgstr "%(driver_type)s 볼륨에 대한 핸들러를 찾을 수 없습니다. " #, python-format msgid "Could not find binary %(binary)s on host %(host)s." msgstr "%(host)s 호스트에서 2진 %(binary)s을(를) 찾을 수 없습니다. " #, python-format msgid "Could not find config at %(path)s" msgstr "%(path)s에서 구성을 찾을 수 없음" msgid "Could not find the datastore reference(s) which the VM uses." msgstr "VM이 사용하는 데이터 저장소 참조를 찾을 수 없습니다. " #, python-format msgid "Could not load line %(line)s, got error %(error)s" msgstr "%(line)s 행을 로드할 수 없음. %(error)s 오류가 발생했음" #, python-format msgid "Could not load paste app '%(name)s' from %(path)s" msgstr "%(path)s에서 페이스트 앱 '%(name)s'을(를) 로드할 수 없음" #, python-format msgid "" "Could not mount vfat config drive. %(operation)s failed. Error: %(error)s" msgstr "" "vfat 구성 드라이브를 마운트할 수 없습니다. %(operation)s에 실패했습니다. 오" "류: %(error)s" #, python-format msgid "Could not upload image %(image_id)s" msgstr "%(image_id)s 이미지를 업로드할 수 없음" msgid "Creation of virtual interface with unique mac address failed" msgstr "고유 MAC 주소가 있는 가상 인터페이 생성에 실패했습니다" #, python-format msgid "Datastore regex %s did not match any datastores" msgstr "데이터 저장소 regex %s이(가) 데이터 저장소와 일치하지 않음" msgid "Datetime is in invalid format" msgstr "Datetime이 올바르지 않은 형식임" msgid "Default PBM policy is required if PBM is enabled." msgstr "PBM을 사용하는 경우 기본 PBM 정책이 필요합니다." #, python-format msgid "Device '%(device)s' not found." msgstr "'%(device)s' 디바이스를 찾을 수 없습니다." #, python-format msgid "Device detach failed for %(device)s: %(reason)s" msgstr "장치 해제가 %(device)s: %(reason)s 때문에 실패했습니다." msgid "Device name contains spaces." msgstr "장치 이름에 공백이 있습니다." msgid "Device name empty or too long." msgstr "장치 이름이 비어있거나 너무 깁니다." #, python-format msgid "Device type mismatch for alias '%s'" msgstr "'%s' 별명의 디바이스 유형이 일치하지 않음" #, python-format msgid "Disk format %(disk_format)s is not acceptable" msgstr "Disk format %(disk_format)s를 알 수 없습니다." #, python-format msgid "Disk info file is invalid: %(reason)s" msgstr "디스크 정보 파일이 올바르지 않음: %(reason)s" #, python-format msgid "Driver Error: %s" msgstr "드라이버 오류: %s" msgid "" "Ephemeral disks requested are larger than the instance type allows. If no " "size is given in one block device mapping, flavor ephemeral size will be " "used." msgstr "" "임시 디스크는 인스턴스 유형이 허용하는 것 이상으로 요청 될 수 있습니다. 하나" "의 블록 장치 매핑에서 크기가 주어지지 않는다면, 선호하는 임시 크기로 사용 됩" "니다." #, python-format msgid "Error attempting to run %(method)s" msgstr "%(method)s을(를) 실행하는 중에 오류 발생" #, python-format msgid "" "Error destroying the instance on node %(node)s. Provision state still " "'%(state)s'." msgstr "" "%(node)s 노드에서 인스턴스를 영구 삭제하는 중 오류가 발생했습니다. 프로비저" "닝 상태는 아직 '%(state)s'입니다." #, python-format msgid "Error during unshelve instance %(instance_id)s: %(reason)s" msgstr "%(instance_id)s 인스턴스 언쉘브 중 오류 발생: %(reason)s" #, python-format msgid "" "Error from libvirt while getting domain info for %(instance_name)s: [Error " "Code %(error_code)s] %(ex)s" msgstr "" "%(instance_name)s의 도메인 정보를 가져오는 중 libvirt에서 오류 발생: [오류 코" "드 %(error_code)s] %(ex)s" #, python-format msgid "" "Error from libvirt while looking up %(instance_name)s: [Error Code " "%(error_code)s] %(ex)s" msgstr "" "%(instance_name)s 검색 중 libvirt에서 오류 발생: [오류 코드 %(error_code)s] " "%(ex)s" #, python-format msgid "" "Error from libvirt while quiescing %(instance_name)s: [Error Code " "%(error_code)s] %(ex)s" msgstr "" "%(instance_name)s을(를) Quiesce하는 중 libvirt에서 오류 발생: [오류 코드 " "%(error_code)s] %(ex)s" #, python-format msgid "" "Error from libvirt while set password for username \"%(user)s\": [Error Code " "%(error_code)s] %(ex)s" msgstr "" "사용자 이름 \"%(user)s\"에 대한 비밀번호 설정 중 libvirt에서 오류 발생: [오" "류 코드 %(error_code)s] %(ex)s" #, python-format msgid "" "Error mounting %(device)s to %(dir)s in image %(image)s with libguestfs " "(%(e)s)" msgstr "" "%(image)s 이미지에서 %(device)s을(를) %(dir)s에 마운트하는 중 오류 발생" "(libguestfs(%(e)s)) " #, python-format msgid "Error mounting %(image)s with libguestfs (%(e)s)" msgstr "libguestfs(%(e)s)를 갖는 %(image)s 마운트 오류" #, python-format msgid "Error when creating resource monitor: %(monitor)s" msgstr "자원 모니터 작성 중에 오류 발생: %(monitor)s" #, python-format msgid "Event %(event)s not found for action id %(action_id)s" msgstr "조치 ID %(action_id)s에 대한 %(event)s 이벤트를 찾을 수 없음" msgid "Event must be an instance of nova.virt.event.Event" msgstr "이벤트는 nova.virt.event.Event의 인스턴스여야 함" #, python-format msgid "" "Exceeded max scheduling attempts %(max_attempts)d for instance " "%(instance_uuid)s. Last exception: %(exc_reason)s" msgstr "" "%(instance_uuid)s 인스턴스에 대한 최대 스케줄링 시도 %(max_attempts)d을(를) " "초과했습니다. 마지막 예외: %(exc_reason)s" #, python-format msgid "" "Exceeded max scheduling retries %(max_retries)d for instance " "%(instance_uuid)s during live migration" msgstr "" "인스턴스에 대한 최대 스케줄링 재시도 %(max_retries)d을(를) 초과" "함%(instance_uuid)s" #, python-format msgid "Exceeded maximum number of retries. %(reason)s" msgstr "최대 재시도 횟수를 초과했습니다. %(reason)s" #, python-format msgid "Expected a uuid but received %(uuid)s." msgstr "uuid를 예상했지만 %(uuid)s을(를) 수신했습니다. " msgid "Extracting vmdk from OVA failed." msgstr "OVA에서 vmdk의 압축을 풀지 못했습니다." #, python-format msgid "Failed to access port %(port_id)s: %(reason)s" msgstr "포트 %(port_id)s에 액세스 실패: %(reason)s" #, python-format msgid "Failed to allocate the network(s) with error %s, not rescheduling." msgstr "%s 오류로 인해 네트워크 할당 실패. 다시 스케줄하지 않음" msgid "Failed to allocate the network(s), not rescheduling." msgstr "네트워크 할당 실패. 다시 스케줄하지 않음" #, python-format msgid "Failed to attach network adapter device to %(instance_uuid)s" msgstr "네트워크 어댑터 디바이스를 %(instance_uuid)s에 접속하는 데 실패함" #, python-format msgid "Failed to deploy instance: %(reason)s" msgstr "인스턴스 배치 실패: %(reason)s" #, python-format msgid "Failed to detach PCI device %(dev)s: %(reason)s" msgstr "PCI 디바이스 %(dev)s을(를) 분리하지 못함: %(reason)s" #, python-format msgid "Failed to detach network adapter device from %(instance_uuid)s" msgstr "네트워크 어댑터 디바이스를 %(instance_uuid)s에서 분리하는 데 실패함" #, python-format msgid "Failed to encrypt text: %(reason)s" msgstr "텍스트를 암호화하지 못했습니다: %(reason)s" #, python-format msgid "Failed to get resource provider with UUID %(uuid)s" msgstr "UUID로 리소스 공급자 가져오기 실패: %(uuid)s" #, python-format msgid "Failed to launch instances: %(reason)s" msgstr "인스턴스 실행 실패: %(reason)s" #, python-format msgid "Failed to map partitions: %s" msgstr "파티션을 맵핑하지 못했음: %s" #, python-format msgid "Failed to mount filesystem: %s" msgstr "파일 시스템 마운트 실패: %s" #, python-format msgid "Failed to power off instance: %(reason)s" msgstr "인스턴스 전원 끔 실패: %(reason)s" #, python-format msgid "Failed to power on instance: %(reason)s" msgstr "인스턴스 전원 꼄 실패: %(reason)s" #, python-format msgid "Failed to provision instance %(inst)s: %(reason)s" msgstr "인스턴스 %(inst)s 프로비저닝 실패: %(reason)s" #, python-format msgid "Failed to quiesce instance: %(reason)s" msgstr "인스턴스를 종료하지 못했습니다: %(reason)s" #, python-format msgid "Failed to read or write disk info file: %(reason)s" msgstr "디스크 정보 파일을 읽거나 쓰지 못함: %(reason)s" #, python-format msgid "Failed to reboot instance: %(reason)s" msgstr "인스턴스 재부팅 실패: %(reason)s" #, python-format msgid "Failed to remove volume(s): (%(reason)s)" msgstr "볼륨을 제거하지 못함: (%(reason)s)" #, python-format msgid "Failed to request Ironic to rebuild instance %(inst)s: %(reason)s" msgstr "%(inst)s 인스턴스를 다시 빌드하기 위한 아이로닉 요청 실패: %(reason)s" #, python-format msgid "Failed to resume instance: %(reason)s" msgstr "인스턴스 재개 실패: %(reason)s" #, python-format msgid "Failed to run qemu-img info on %(path)s : %(error)s" msgstr "%(path)s에서 qemu-img 정보 실행 실패: %(error)s" #, python-format msgid "Failed to set admin password on %(instance)s because %(reason)s" msgstr "%(reason)s 때문에 %(instance)s에 관리 비밀번호를 설정하지 못했음" #, python-format msgid "Failed to suspend instance: %(reason)s" msgstr "인스턴스 일시중단 실패: %(reason)s" msgid "" "Failed to synchronize the placement service with resource provider " "information supplied by the compute host." msgstr "" "컴퓨트 호스트의 리소스 공급자 정보와 Placement 서비스를 동기화하지 못했습니" "다. " #, python-format msgid "Failed to terminate instance: %(reason)s" msgstr "인스턴스 종료 실패: %(reason)s" #, python-format msgid "Failed to unplug virtual interface: %(reason)s" msgstr "가상 인터페이스 해제 실패: %(reason)s" msgid "Failure prepping block device." msgstr "블록 디바이스 준비 실패" msgid "Feature not supported with instances that have accelerators." msgstr "가속화된 인스턴스에서 지원되지 않는 기능입니다." #, python-format msgid "File %(file_path)s could not be found." msgstr "%(file_path)s 파일을 찾을 수 없습니다. " #, python-format msgid "Fixed IP %(ip)s is not a valid ip address for network %(network_id)s." msgstr "" "고정 IP %(ip)s이(가) 네트워크 %(network_id)s에 대해 올바른 IP 주소가 아닙니" "다. " #, python-format msgid "Fixed IP %s is already in use." msgstr "고정 IP %s을(를) 이미 사용하고 있습니다." #, python-format msgid "" "Fixed IP address %(address)s is already in use on instance %(instance_uuid)s." msgstr "" "고정 IP 주소 %(address)s이(가) 이미 %(instance_uuid)s 인스턴스에서 사용되고 " "있습니다." #, python-format msgid "Fixed IP not found for address %(address)s." msgstr "%(address)s 주소의 Fixed IP를 찾을 수 없습니다." #, python-format msgid "Flavor %(flavor_id)s could not be found." msgstr "%(flavor_id)s 플레이버를 찾을 수 없습니다. " #, python-format msgid "Flavor %(flavor_id)s has no extra specs with key %(extra_specs_key)s." msgstr "" "플레이버 %(flavor_id)s에 %(extra_specs_key)s 키가 있는 추가 스펙이 없습니다." #, python-format msgid "Flavor %(flavor_id)s has no extra specs with key %(key)s." msgstr "플레이버 %(flavor_id)s에 %(key)s 키가 있는 추가 스펙이 없습니다." #, python-format msgid "" "Flavor %(id)s extra spec cannot be updated or created after %(retries)d " "retries." msgstr "" "%(retries)d번 재시도 후에 Flavor %(id)s 추가 스펙을 업데이트하거나 작성할 수 " "없습니다." #, python-format msgid "" "Flavor access already exists for flavor %(flavor_id)s and project " "%(project_id)s combination." msgstr "" "플레이버 %(flavor_id)s 및 %(project_id)s 프로젝트 조합에 대한 플레이버 액세스" "가 이미 존재합니다. " #, python-format msgid "Flavor access not found for %(flavor_id)s / %(project_id)s combination." msgstr "" "%(flavor_id)s / %(project_id)s 조합에 대한 플레이버 액세스를 찾을 수 없습니" "다. " msgid "Flavor used by the instance could not be found." msgstr "인스턴스가 사용한 플레이버를 찾을 수 없습니다. " #, python-format msgid "Flavor with ID %(flavor_id)s already exists." msgstr "ID가 %(flavor_id)s인 플레이버가 이미 있습니다." #, python-format msgid "Flavor with name %(flavor_name)s could not be found." msgstr "이름이 %(flavor_name)s인 플레이버를 찾을 수 없습니다." #, python-format msgid "Flavor with name %(name)s already exists." msgstr "이름이 %(name)s인 플레이버가 이미 있습니다." #, python-format msgid "" "Flavor's disk is smaller than the minimum size specified in image metadata. " "Flavor disk is %(flavor_size)i bytes, minimum size is %(image_min_disk)i " "bytes." msgstr "" "플레이버의 디스크가 이미지 메타데이터에서 지정된 최소 크기보다 작습니다. 플레" "이버 디스크는 %(flavor_size)i바이트이고 최소 크기는 %(image_min_disk)i바이트" "입니다. " #, python-format msgid "" "Flavor's disk is too small for requested image. Flavor disk is " "%(flavor_size)i bytes, image is %(image_size)i bytes." msgstr "" "플레이버의 디스크가 요청된 이미지에 비해 너무 작습니다. 플레이버 디스크는 " "%(flavor_size)i바이트이고 이미지는 %(image_size)i바이트입니다. " msgid "Flavor's memory is too small for requested image." msgstr "플레이버의 메모리가 요청된 이미지에 대해 너무 작습니다." #, python-format msgid "Floating IP %(address)s association has failed." msgstr "부동 IP %(address)s 연관에 실패했습니다. " #, python-format msgid "Floating IP %(address)s is associated." msgstr "Floating IP %(address)s이(가) 연관되어 있습니다." #, python-format msgid "Floating IP %(address)s is not associated with instance %(id)s." msgstr "Floating IP %(address)s이(가) %(id)s 인스턴스와 연관되지 않았습니다." #, python-format msgid "Floating IP not found for ID %(id)s." msgstr "ID %(id)s의 Floating IP를 찾을 수 없습니다. " #, python-format msgid "Floating IP not found for ID %s" msgstr "ID %s의 Floating IP를 찾을 수 없음" #, python-format msgid "Floating IP not found for address %(address)s." msgstr "%(address)s 주소의 Floating IP를 찾을 수 없습니다." msgid "Floating IP pool not found." msgstr "Floating IP 풀을 찾을 수 없습니다." msgid "Forbidden" msgstr "허용 되지 않은" msgid "" "Forbidden to exceed flavor value of number of serial ports passed in image " "meta." msgstr "" "이미지 메타에 패스된 직렬 포트 수의 플레이버 값 초과가 금지되어 있습니다." msgid "Found no disk to snapshot." msgstr "스냅샷할 디스크를 찾지 못함." msgid "Guest agent is not enabled for the instance" msgstr "게스트 에이전트는 해당 인스터스에 활성화 되지 않습니다." msgid "Guest does not have a console available." msgstr "게스트에 사용할 수 있는 콘솔이 없습니다." #, python-format msgid "Host %(host)s could not be found." msgstr "%(host)s 호스트를 찾을 수 없습니다. " #, python-format msgid "Host %(host)s is already mapped to cell %(uuid)s" msgstr "%(host)s 호스트가 이미 %(uuid)s 셀에 맵핑됨" #, python-format msgid "Host '%(name)s' is not mapped to any cell" msgstr "'%(name)s' 호스트가 셀에 맵핑되지 않음" msgid "Host aggregate is not empty" msgstr "호스트 집합이 비어 있지 않습니다" msgid "Host does not support guests with NUMA topology set" msgstr "호스트에서 NUMA 토폴로지 세트가 있는 게스트를 지원하지 않음" msgid "Host does not support guests with custom memory page sizes" msgstr "" "호스트에서 사용자 정의 메모리 페이지 크기를 사용하는 게스트를 지원하지 않음" msgid "Hypervisor driver does not support post_live_migration_at_source method" msgstr "" "하이퍼바이저 드라이버가 post_live_migration_at_source 메소드를 지원하지 않음" #, python-format msgid "Hypervisor virt type '%s' is not valid" msgstr "하이퍼바이저 가상화 유형 '%s'이(가) 올바르지 않음" #, python-format msgid "Hypervisor virtualization type '%(hv_type)s' is not recognised" msgstr "하이퍼바이저 가상화 유형 '%(hv_type)s'이(가) 인식되지 않음" #, python-format msgid "Hypervisor with ID '%s' could not be found." msgstr "ID가 '%s'인 하이퍼바이저를 찾을 수 없습니다. " #, python-format msgid "IP allocation over quota in pool %s." msgstr "%s 풀에서 IP 할당이 할당량을 초과했습니다." msgid "IP allocation over quota." msgstr "IP 할당이 할당량을 초과했습니다." #, python-format msgid "Image %(image_id)s could not be found." msgstr "%(image_id)s 이미지를 찾을 수 없습니다. " #, python-format msgid "Image %(image_id)s is not active." msgstr "%(image_id)s 이미지가 active 상태가 아닙니다. " #, python-format msgid "Image %(image_id)s is unacceptable: %(reason)s" msgstr "%(image_id)s 이미지는 허용할 수 없음: %(reason)s" msgid "Image disk size greater than requested disk size" msgstr "이미지 디스크 크기가 요청된 디스크 크기보다 큼" msgid "Image is not raw format" msgstr "이미지가 원시 형식이 아님" msgid "Image metadata limit exceeded" msgstr "이미지 메타데이터 한계 초과" #, python-format msgid "Image model '%(image)s' is not supported" msgstr "이미지 모델 '%(image)s'은(는) 지원되지 않음" msgid "Image not found." msgstr "이미지를 찾을 수 없습니다. " #, python-format msgid "" "Image property '%(name)s' is not permitted to override NUMA configuration " "set against the flavor" msgstr "" "이미지 특성 '%(name)s'은(는) 플레이버에 대해 설정된 NUMA 구성을 대체할 수 없" "음" msgid "" "Image property 'hw_cpu_policy' is not permitted to override CPU pinning " "policy set against the flavor" msgstr "" "이미지 특성 'hw_cpu_policy'는 플레이버에 맞지 않는 CPU 고정 정책 세트를 대체" "할 수 없음" msgid "" "Image property 'hw_cpu_thread_policy' is not permitted to override CPU " "thread pinning policy set against the flavor" msgstr "" "'hw_cpu_thread_policy' 이미지 특성은 Flavor에 대한 CPU 스레드 고정 정책 세트" "를 대체할 수 없음" msgid "Image that the instance was started with could not be found." msgstr "인스턴스가 시작되었던 해당 이미지를 찾을 수 없음. " #, python-format msgid "Image's config drive option '%(config_drive)s' is invalid" msgstr "" "이미지의 구성 드라이브 옵션 '%(config_drive)s'이(가) 올바르지 않습니다. " msgid "" "Images with destination_type 'volume' need to have a non-zero size specified" msgstr "destination_type이 '볼륨'인 이미지는 0이 아닌 크기를 지정해야 함" #, python-format msgid "Import of image %(image_id)s refused: %(reason)s" msgstr "%(image_id)s 이미지의 임포트가 거절되었습니다: %(reason)s" msgid "In ERROR state" msgstr "ERROR 상태에 있음" #, python-format msgid "In states %(vm_state)s/%(task_state)s, not RESIZED/None" msgstr "%(vm_state)s/%(task_state)s 상태에 있음, RESIZED/None이 아님" #, python-format msgid "In-progress live migration %(id)s is not found for server %(uuid)s." msgstr "" "%(uuid)s 서버의 진행 중인 라이브 마이그레이션 %(id)s을(를) 찾을 수 없습니다." msgid "" "Incompatible settings: ephemeral storage encryption is supported only for " "LVM images." msgstr "" "호환되지 않는 설정: ephemeral 스토리지 암호화가 LVM 이미지에만 지원됩니다." #, python-format msgid "Info cache for instance %(instance_uuid)s could not be found." msgstr "%(instance_uuid)s 인스턴스에 대한 정보 캐시를 찾을 수 없습니다." #, python-format msgid "" "Instance %(instance)s and volume %(vol)s are not in the same " "availability_zone. Instance is in %(ins_zone)s. Volume is in %(vol_zone)s" msgstr "" "인스턴스 %(instance)s 및 볼륨 %(vol)s이(가) 같은 availability_zone에 있지 않" "습니다. 인스턴스는 %(ins_zone)s에 볼륨은 %(vol_zone)s에 있음" #, python-format msgid "Instance %(instance)s does not have a port with id %(port)s" msgstr "%(instance)s 인스턴스에 ID가 %(port)s인 포트가 없음" #, python-format msgid "Instance %(instance_id)s cannot be rescued: %(reason)s" msgstr "%(instance_id)s 인스턴스를 구조할 수 없습니다: %(reason)s" #, python-format msgid "Instance %(instance_id)s could not be found." msgstr "%(instance_id)s 인스턴스를 찾을 수 없습니다. " #, python-format msgid "Instance %(instance_id)s has no tag '%(tag)s'" msgstr "인스턴스 %(instance_id)s에 '%(tag)s' 태그가 없음" #, python-format msgid "Instance %(instance_id)s is not in rescue mode" msgstr "%(instance_id)s 인스턴스가 구조 모드에 있지 않습니다" #, python-format msgid "Instance %(instance_id)s is not ready" msgstr "%(instance_id)s 인스턴스가 준비 상태가 아닙니다" #, python-format msgid "Instance %(instance_id)s is not running." msgstr "%(instance_id)s 인스턴스가 실행 중이 아닙니다. " #, python-format msgid "Instance %(instance_id)s is unacceptable: %(reason)s" msgstr "%(instance_id)s 인스턴스는 허용할 수 없음: %(reason)s" #, python-format msgid "Instance %(instance_uuid)s does not have fixed IP '%(ip)s'." msgstr "인스턴스 %(instance_uuid)s 가 고정 IP '%(ip)s'를 가지고 있지 않습니다." #, python-format msgid "Instance %(instance_uuid)s does not specify a NUMA topology" msgstr "인스턴스 %(instance_uuid)s이(가) NUMA 토폴로지를 지정하지 않음" #, python-format msgid "Instance %(instance_uuid)s does not specify a migration context." msgstr "" "인스턴스 %(instance_uuid)s이(가) 마이그레이션 컨텍스트를 지정하지 않습니다. " #, python-format msgid "" "Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while " "the instance is in this state." msgstr "" "%(instance_uuid)s 인스턴스가 %(attr)s %(state)s에 있습니다. 인스턴스가 이 상" "태에 있는 중에는 %(method)s할 수 없습니다." #, python-format msgid "Instance %(instance_uuid)s is locked" msgstr "%(instance_uuid)s 인스턴스가 잠겼음" #, python-format msgid "" "Instance %(instance_uuid)s requires config drive, but it does not exist." msgstr "%(instance_uuid)s 인스턴스에 구성 드라이브가 필요하지만 없습니다." #, python-format msgid "Instance %(name)s already exists." msgstr "%(name)s 인스턴스가 이미 존재합니다. " #, python-format msgid "Instance %(server_id)s is in an invalid state for '%(action)s'" msgstr "인스턴스 %(server_id)s의 상태가 '%(action)s'에 대해 올바르지 않음" #, python-format msgid "Instance %(uuid)s has no mapping to a cell." msgstr "%(uuid)s 인스턴스에 셀에 대한 맵핑이 없습니다." #, python-format msgid "Instance %s not found" msgstr "%s 인스턴스를 찾을 수 없음" #, python-format msgid "Instance %s provisioning was aborted" msgstr "인스턴스 %s 프로비저닝이 중단됨" msgid "Instance could not be found" msgstr "인스턴스를 찾을 수 없음" msgid "Instance disk to be encrypted but no context provided" msgstr "인스턴스 디스크를 암호화히자만 제공된 텍스트가 없음" msgid "Instance event failed" msgstr "인스턴스 이벤트에 실패" #, python-format msgid "Instance group %(group_uuid)s already exists." msgstr "%(group_uuid)s 인스턴스 그룹이 이미 존재합니다. " #, python-format msgid "Instance group %(group_uuid)s could not be found." msgstr "인스턴스 그룹 %(group_uuid)s을(를) 찾을 수 없습니다. " msgid "Instance has no source host" msgstr "인스턴스에 소스 호스트가 없음" msgid "Instance has not been resized." msgstr "인스턴스 크기가 조정되지 않았습니다. " #, python-format msgid "Instance hostname %(hostname)s is not a valid DNS name" msgstr "인스턴스 호스트 이름 %(hostname)s이(가) 올바른 DNS 이름이 아님" msgid "Instance is not a member of specified network" msgstr "인스턴스가 지정된 네트워크의 멤버가 아님" msgid "Instance network is not ready yet" msgstr "인스턴스 네트워크가 아직 준비 되지 않았습니다." #, python-format msgid "Instance rollback performed due to: %s" msgstr "인스턴스 롤백이 수행됨. 원인: %s" #, python-format msgid "" "Insufficient Space on Volume Group %(vg)s. Only %(free_space)db available, " "but %(size)d bytes required by volume %(lv)s." msgstr "" "%(vg)s 볼륨 그룹의 공간이 충분하지 않습니다. %(free_space)db만 사용할 수 있지" "만, %(lv)s 볼륨에 %(size)d바이트가 필요합니다." #, python-format msgid "Insufficient compute resources: %(reason)s." msgstr "Compute 리소스가 충분하지 않습니다: %(reason)s." #, python-format msgid "Interface %(interface)s not found." msgstr "%(interface)s 인터페이스를 찾을 수 없습니다. " #, python-format msgid "Invalid Base 64 data for file %(path)s" msgstr "파일 %(path)s에 대해 올바르지 않은 Base 64 데이터" msgid "Invalid Connection Info" msgstr "올바르지 않은 연결 정보" #, python-format msgid "Invalid ID received %(id)s." msgstr "올바르지 않은 ID가 %(id)s을(를) 수신했습니다." #, python-format msgid "Invalid IP format %s" msgstr "올바르지 않은 IP 형식 %s" #, python-format msgid "Invalid IP protocol %(protocol)s." msgstr "올바르지 않은 IP 프로토콜 %(protocol)s." msgid "" "Invalid PCI Whitelist: The PCI whitelist can specify devname or address, but " "not both" msgstr "" "올바르지 않은 PCI 화이트리스트: PCI 화이트리스트는 디바이스 이름 또는 주소를 " "지정할 수 있지만 둘 다 지정할 수는 없음" #, python-format msgid "Invalid PCI alias definition: %(reason)s" msgstr "올바르지 않은 PCI 별명 정의: %(reason)s" #, python-format msgid "Invalid Regular Expression %s" msgstr "올바르지 않은 정규식 %s" #, python-format msgid "Invalid characters in hostname '%(hostname)s'" msgstr "호스트 이름 '%(hostname)s'의 올바르지 않은 문자" msgid "Invalid config_drive provided." msgstr "올바르지 않은 config_drive가 제공되었습니다. " #, python-format msgid "Invalid config_drive_format \"%s\"" msgstr "올바르지 않은 config_drive_format \"%s\"" #, python-format msgid "Invalid console type %(console_type)s" msgstr "올바르지 않은 콘솔 유형 %(console_type)s" #, python-format msgid "Invalid content type %(content_type)s." msgstr "올바르지 않은 컨텐츠 유형 %(content_type)s." #, python-format msgid "Invalid datetime string: %(reason)s" msgstr "올바르지 않은 Datetime 문자열: %(reason)s" msgid "Invalid device UUID." msgstr "장치 UUID가 올바르지 않습니다." #, python-format msgid "Invalid entry: '%s'" msgstr "올바르지 않은 항목: '%s'" #, python-format msgid "Invalid entry: '%s'; Expecting dict" msgstr "올바르지 않은 항목: '%s', 사전 예상" #, python-format msgid "Invalid entry: '%s'; Expecting list or dict" msgstr "올바르지 않은 항목: '%s', 목록 또는 사전 예상" #, python-format msgid "Invalid exclusion expression %r" msgstr "올바르지 않은 제외 표현식 %r" #, python-format msgid "Invalid image format '%(format)s'" msgstr "올바르지 않은 이미지 형식 '%(format)s'" #, python-format msgid "Invalid image href %(image_href)s." msgstr "올바르지 않은 이미지 href %(image_href)s." #, python-format msgid "Invalid image property name %(image_property_name)s." msgstr "유효하지 않은 이미지 속성 이름 %(image_property_name)s 입니다." #, python-format msgid "Invalid inclusion expression %r" msgstr "올바르지 않은 포함 표현식 %r" #, python-format msgid "" "Invalid input for field/attribute %(path)s. Value: %(value)s. %(message)s" msgstr "" "필드/속성 %(path)s에 대한 올바르지 않은 입력. 값: %(value)s. %(message)s" #, python-format msgid "Invalid input received: %(reason)s" msgstr "올바르지 않은 입력을 받았습니다: %(reason)s" msgid "Invalid instance image." msgstr "올바르지 않은 인스턴스 이미지" #, python-format msgid "Invalid is_public filter [%s]" msgstr "올바르지 않은 is_public 필터 [%s]" msgid "Invalid key_name provided." msgstr "올바르지 않은 key_name이 제공되었습니다. " #, python-format msgid "Invalid memory page size '%(pagesize)s'" msgstr "올바르지 않은 메모리 페이지 크기 '%(pagesize)s'" msgid "Invalid metadata key" msgstr "올바르지 않은 메타데이터 키" #, python-format msgid "Invalid metadata size: %(reason)s" msgstr "올바르지 않은 메타데이터 크기: %(reason)s" #, python-format msgid "Invalid metadata: %(reason)s" msgstr "올바르지 않은 메타데이터: %(reason)s" #, python-format msgid "Invalid minDisk filter [%s]" msgstr "올바르지 않은 minDisk 필터 [%s]" #, python-format msgid "Invalid minRam filter [%s]" msgstr "올바르지 않은 minRam 필터 [%s]" #, python-format msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s" msgstr "올바르지 않은 포트 범위 %(from_port)s:%(to_port)s. %(msg)s" msgid "Invalid proxy request signature." msgstr "올바르지 않은 프록시 요청 서명입니다. " #, python-format msgid "Invalid range expression %r" msgstr "올바르지 않은 범위 표현식 %r" msgid "Invalid service catalog json." msgstr "올바르지 않은 서비스 카탈로그 json입니다. " msgid "Invalid start time. The start time cannot occur after the end time." msgstr "" "시작 시간이 올바르지 않습니다. 종료 시간 후에는 시작 시간이 올 수 없습니다." msgid "Invalid state of instance files on shared storage" msgstr "공유 스토리지에서 인스턴스 파일의 올바르지 않은 상태" #, python-format msgid "Invalid timestamp for date %s" msgstr "날짜 %s에 대한 올바르지 않은 시간소인" #, python-format msgid "Invalid usage_type: %s" msgstr "올바르지 않은 usage_type: %s" #, python-format msgid "Invalid value for Config Drive option: %(option)s" msgstr "구성 드라이브 옵션에 대해 올바르지 않은 값: %(option)s" #, python-format msgid "Invalid virtual interface address %s in request" msgstr "요청에서 올바르지 않은 가상 인터페이스 주소 %s" #, python-format msgid "Invalid volume access mode: %(access_mode)s" msgstr "올바르지 않은 볼륨 접근 모드: %(access_mode)s" #, python-format msgid "Invalid volume: %(reason)s" msgstr "올바르지 않은 볼륨: %(reason)s" msgid "Invalid volume_size." msgstr "volume_size가 올바르지 않습니다." #, python-format msgid "Ironic node uuid not supplied to driver for instance %s." msgstr "아이로닉 노드 uuid가 인스턴스 %s의 드라이버에 제공되지 않습니다." #, python-format msgid "" "It is not allowed to create an interface on external network %(network_uuid)s" msgstr "외부 네트워크 %(network_uuid)s에 인터페이스를 작성할 수 없음" msgid "" "Key Names can only contain alphanumeric characters, periods, dashes, " "underscores, colons and spaces." msgstr "" "키 이름은 영숫자 문자, 마침표, 대시, 밑줄, 콜론, 공백만 포함할 수 있습니다." #, python-format msgid "Key manager error: %(reason)s" msgstr "키 관리자 오류: %(reason)s" #, python-format msgid "Key pair '%(key_name)s' already exists." msgstr "'%(key_name)s' 키 쌍이 이미 존재합니다. " #, python-format msgid "Keypair %(name)s not found for user %(user_id)s" msgstr "%(user_id)s 사용자에 대한 키 쌍 %(name)s을(를) 찾을 수 없음" #, python-format msgid "Keypair data is invalid: %(reason)s" msgstr "키 쌍 데이터가 올바르지 않습니다: %(reason)s" msgid "Libguestfs does not have permission to read host kernel." msgstr "Libguestfs에게는 커널 호스트를 읽어올 수 있는 권한이 없습니다" msgid "Limits only supported from vCenter 6.0 and above" msgstr "vCenter 6.0 이상에서만 지원되는 한계" #, python-format msgid "Live migration %(id)s for server %(uuid)s is not in progress." msgstr "%(uuid)s 서버의 라이브 마이그레이션 %(id)s이(가) 진행 중이 아닙니다." #, python-format msgid "Malformed message body: %(reason)s" msgstr "잘못된 메시지 본문: %(reason)s" #, python-format msgid "" "Malformed request URL: URL's project_id '%(project_id)s' doesn't match " "Context's project_id '%(context_project_id)s'" msgstr "" "잘못 구성된 요청 URL: URL의 프로젝트 ID '%(project_id)s'이(가)컨텍스트의 프로" "젝트 ID '%(context_project_id)s'과(와) 일치하지 않습니다. " msgid "Malformed request body" msgstr "형식이 틀린 요청 본문" msgid "Mapping image to local is not supported." msgstr "로컬에 매핑된 이미지는 지원하지 않습니다." #, python-format msgid "Marker %(marker)s could not be found." msgstr "%(marker)s 마커를 찾을 수 없습니다. " msgid "Maximum number of floating IPs exceeded" msgstr "Floating IP의 최대수 초과" #, python-format msgid "Maximum number of metadata items exceeds %(allowed)d" msgstr "메타데이터의 최대 수가 %(allowed)d을(를) 초과함" msgid "Maximum number of ports exceeded" msgstr "최대 포트 수를 초과함" msgid "Maximum number of security groups or rules exceeded" msgstr "보안 그룹 또는 규칙의 최대 수 초과" #, python-format msgid "Maximum number of serial port exceeds %(allowed)d for %(virt_type)s" msgstr "" " 시리얼 포트에 대한 최대 허용 개수가 %(allowed)d for %(virt_type)s 를 초과했" "습니다." msgid "Metadata item was not found" msgstr "메타데이터 항목이 없음" msgid "Metadata property key greater than 255 characters" msgstr "메타데이터 특성 키가 255자보다 큼" msgid "Metadata property value greater than 255 characters" msgstr "메타데이터 특성 값이 255자보다 큼" msgid "Metadata type should be dict." msgstr "메타데이터 유형은 dict여야 합니다." #, python-format msgid "" "Metric %(name)s could not be found on the compute host node %(host)s." "%(node)s." msgstr "" "메트릭 %(name)s을(를) 계산 호스트 노드 %(host)s.%(node)s.에서 찾을 수 없습니" "다." #, python-format msgid "Migration %(id)s for server %(uuid)s is not live-migration." msgstr "%(uuid)s 서버의 %(id)s 마이그레이션이 라이브 마이그레이션이 아닙니다." #, python-format msgid "Migration %(migration_id)s could not be found." msgstr "%(migration_id)s 마이그레이션을 찾을 수 없습니다. " #, python-format msgid "Migration %(migration_id)s not found for instance %(instance_id)s" msgstr "" "%(instance_id)s 인스턴스의 %(migration_id)s 마이그레이션을 찾을 수 없음" #, python-format msgid "" "Migration %(migration_id)s state of instance %(instance_uuid)s is %(state)s. " "Cannot %(method)s while the migration is in this state." msgstr "" "%(instance_uuid)s 인스턴스의 %(migration_id)s 마이그레이션 상태는 %(state)s입" "니다. 마이그레이션이 이 상태인 경우 %(method)s을(를) 수행할 수 없습니다." #, python-format msgid "Migration error: %(reason)s" msgstr "마이그레이션 오류: %(reason)s" msgid "Migration is not supported for LVM backed instances" msgstr "LVM 지원 인스턴스에 마이그레이션이 지원되지 않음" #, python-format msgid "" "Migration not found for instance %(instance_id)s with status %(status)s." msgstr "" "%(status)s 상태를 갖는 %(instance_id)s 인스턴스에 대한 마이그레이션을 찾을 " "수 없습니다. " #, python-format msgid "Migration pre-check error: %(reason)s" msgstr "마이그레이션 사전 확인 오류: %(reason)s" #, python-format msgid "Migration select destinations error: %(reason)s" msgstr "마이그레이션 선택 대상 오류: %(reason)s" #, python-format msgid "Missing arguments: %s" msgstr "누락된 인수: %s" msgid "Missing device UUID." msgstr "장치 UUID가 비어 있습니다." msgid "Missing disabled reason field" msgstr "사용 안함 이유 필드가 누락됨" msgid "Missing forced_down field" msgstr "forced_down 필드 누락" msgid "Missing imageRef attribute" msgstr "imageRef 속성 누락" #, python-format msgid "Missing keys: %s" msgstr "누락 키: %s" msgid "Missing parameter dict" msgstr "매개변수 사전 누락" #, python-format msgid "" "More than one instance is associated with fixed IP address '%(address)s'." msgstr "" "둘 이상의 인스턴스가 Fixed IP 주소 '%(address)s'과(와) 연관되어 있습니다. " msgid "" "More than one possible network found. Specify network ID(s) to select which " "one(s) to connect to." msgstr "" "사용 가능한 네트워크를 두 개 이상 발견했습니다. 네트워크 ID를 지정하여 연결" "할 항목을 선택하십시오." msgid "More than one swap drive requested." msgstr "둘 이상의 스왑 드라이브를 요청함 " #, python-format msgid "Multi-boot operating system found in %s" msgstr "%s에 다중 부트 운영 체제가 있음" msgid "Multiple X-Instance-ID headers found within request." msgstr "요청에 다중 X-Instance-ID 헤더가 있습니다. " msgid "Multiple X-Tenant-ID headers found within request." msgstr "요청 내에 다중 ltiple X-Tenant-ID 헤더가 있습니다." #, python-format msgid "Multiple floating IP pools matches found for name '%s'" msgstr "'%s' 이름에 대해 다중 부동 IP 풀 일치가 발견됨" #, python-format msgid "Multiple floating IPs are found for address %(address)s." msgstr "%(address)s 주소의 Floating IP가 여러 개 있습니다." msgid "" "Multiple hosts may be managed by the VMWare vCenter driver; therefore we do " "not return uptime for just one host." msgstr "" "VMWare vCenter 드라이버에서 다중 호스트를 관리할 수도 있습니다. 따라서 단지 " "하나의 호스트에 대해서만 가동 시간을 리턴하지는 않습니다." msgid "Multiple possible networks found, use a Network ID to be more specific." msgstr "" "가능한 여러 개의 네트워크가 발견됨. 좀 더 구체적인 네트워크 ID를 사용하십시" "오. " #, python-format msgid "" "Multiple security groups found matching '%s'. Use an ID to be more specific." msgstr "" "여러 개의 보안 그룹에서 일치하는 '%s'을(를) 찾았습니다. 좀 더 구체적인 ID를 " "사용하십시오." msgid "Must input network_id when request IP address" msgstr "IP 주소 요청 시 network_id를 입력해야 함" msgid "Must not input both network_id and port_id" msgstr "network_id 및 port_id 둘 다 입력하지 않아야 함" msgid "" "Must specify host_ip, host_username and host_password to use vmwareapi." "VMwareVCDriver" msgstr "" "host_ip, host_username 및 host_password를 지정해야 vmwareapi.VMwareVCDriver" "를 사용할 수 있음" msgid "Must supply a positive value for max_number" msgstr "max_number에 양의 값을 제공해야 함" msgid "Must supply a positive value for max_rows" msgstr "최대 행 값으로 양수를 제공해야 합니다. " #, python-format msgid "Network %(network_id)s could not be found." msgstr "%(network_id)s 네트워크를 찾을 수 없습니다. " #, python-format msgid "" "Network %(network_uuid)s requires a subnet in order to boot instances on." msgstr "인스턴스를 부팅하려면 %(network_uuid)s 네트워크에 서브넷이 필요합니다." #, python-format msgid "Network could not be found for bridge %(bridge)s" msgstr "%(bridge)s 브릿지에 대한 네트워크를 찾을 수 없음" #, python-format msgid "Network could not be found for instance %(instance_id)s." msgstr "%(instance_id)s 인스턴스에 대한 네트워크를 찾을 수 없습니다. " msgid "Network not found" msgstr "네트워크를 찾을 수 없음" msgid "" "Network requires port_security_enabled and subnet associated in order to " "apply security groups." msgstr "" "보안 그룹을 적용하기 위해서는 네트워크에 port_security_enabled 및 서브넷이 연" "관되어 있어야 합니다." msgid "New volume must be detached in order to swap." msgstr "스왑하려면 새 볼륨을 분리해야 합니다." msgid "New volume must be the same size or larger." msgstr "새 볼륨은 동일한 크기이거나 이상이어야 합니다." #, python-format msgid "No Block Device Mapping with id %(id)s." msgstr "ID가 %(id)s인 블록 디바이스 맵핑이 없습니다. " msgid "No Unique Match Found." msgstr "고유한 일치점을 찾지 못했습니다." msgid "No compute host specified" msgstr "지정된 계산 호스트가 없음" #, python-format msgid "No conf group name could be found for service type %(stype)s." msgstr "서비스 타입 '%(stype)s'에 일치하는 구성 그룹을 찾을 수 없습니다." #, python-format msgid "No configuration information found for operating system %(os_name)s" msgstr "%(os_name)s 운영 체제의 구성 정보를 찾을 수 없음" #, python-format msgid "No device with MAC address %s exists on the VM" msgstr "VM에 MAC 주소가 %s인 디바이스가 없음" #, python-format msgid "No device with interface-id %s exists on VM" msgstr "VM에 인터페이스 ID가 %s인 디바이스가 없음" #, python-format msgid "No disk at %(location)s" msgstr "%(location)s에 디스크가 없음" #, python-format msgid "No fixed IP addresses available for network: %(net)s" msgstr "네트워크에 사용 가능한 고정 IP 주소가 없음: %(net)s" msgid "No fixed IPs associated to instance" msgstr "인스턴스와 연관된 Fixed IP가 없음" msgid "No free nbd devices" msgstr "여유 nbd 디바이스 없음" msgid "No host available on cluster" msgstr "클러스터에서 사용 가능한 호스트가 없음" msgid "No hosts found to map to cell, exiting." msgstr "셀에 맵핑할 호스트를 찾지 못하여 종료합니다." #, python-format msgid "No hypervisor matching '%s' could be found." msgstr "'%s'과(와) 일치하는 하이퍼바이저를 찾을 수 없습니다. " msgid "No image locations are accessible" msgstr "액세스 가능한 이미지 위치가 없음" #, python-format msgid "" "No live migration URI configured and no default available for " "\"%(virt_type)s\" hypervisor virtualization type." msgstr "" "라이브 마이그레이션 URI가 구성되지 않았으며 \"%(virt_type)s\" 하이퍼바이저 가" "상화 유형에 사용 가능한 기본값이 없습니다." msgid "No more floating IPs available." msgstr "더 이상 사용할 수 있는 Floating IP가 없습니다." #, python-format msgid "No more floating IPs in pool %s." msgstr "%s 풀에 추가 Floating IP가 없습니다." #, python-format msgid "No mount points found in %(root)s of %(image)s" msgstr "%(image)s의 %(root)s에 마운트 지점이 없음" #, python-format msgid "No operating system found in %s" msgstr "%s에 운영 체제가 없음" msgid "No root disk defined." msgstr "루트 디스크가 정의되지 않았습니다." #, python-format msgid "" "No specific network was requested and none are available for project " "'%(project_id)s'." msgstr "" "'%(project_id)s' 프로젝트에 특정 네트워크가 요청되었지만, 사용할 수 있는 네트" "워크가 없습니다." #, python-format msgid "No such resource provider %(name_or_uuid)s." msgstr "" "다음의 이름 또는 UUID를 가지는 리소스 공급자가 존재하지 않습니다. : " "%(name_or_uuid)s" msgid "No valid host found for cold migrate" msgstr "콜드 마이그레이션에 대한 유효한 호스트를 찾을 수 없음" msgid "No valid host found for resize" msgstr "크기 조정할 올바른 호스트를 찾을 수 없음" #, python-format msgid "No valid host was found. %(reason)s" msgstr "유효한 호스트가 없습니다. %(reason)s" #, python-format msgid "No volume Block Device Mapping at path: %(path)s" msgstr "경로 %(path)s에 볼륨 블록 디바이스 맵핑이 없음" #, python-format msgid "No volume Block Device Mapping with id %(volume_id)s." msgstr "ID가 %(volume_id)s인 볼륨 블록 디바이스 맵핑이 없습니다." #, python-format msgid "Node %s could not be found." msgstr "%s 노드를 찾을 수 없습니다. " #, python-format msgid "Not able to acquire a free port for %(host)s" msgstr "%(host)s에 사용 가능한 포트를 획득할 수 없음" #, python-format msgid "Not able to bind %(host)s:%(port)d, %(error)s" msgstr "%(host)s:%(port)d, %(error)s을(를) 바인드할 수 없음" #, python-format msgid "" "Not all Virtual Functions of PF %(compute_node_id)s:%(address)s are free." msgstr "" "PF %(compute_node_id)s:%(address)s의 일부 가상 기능은 사용할 수 없습니다." msgid "Not an rbd snapshot" msgstr "rbd 스냅샷이 아님" #, python-format msgid "Not authorized for image %(image_id)s." msgstr "%(image_id)s 이미지에 대한 권한이 없습니다. " msgid "Not authorized." msgstr "권한이 없습니다. " msgid "Not enough parameters to build a valid rule." msgstr "유효한 규칙을 빌드하기엔 매개변수가 부족합니다. " msgid "Not stored in rbd" msgstr "rbd에 저장되지 않음" msgid "Nothing was archived." msgstr "보관된 사항이 없습니다." #, python-format msgid "Nova does not support Cinder API version %(version)s" msgstr "Nova는 Cinder API 버전 %(version)s 를 지원하지 않습니다" #, python-format msgid "Nova requires libvirt version %s or greater." msgstr "Nova에 libvirt 버전 %s 이상이 필요합니다." msgid "Number of Rows Archived" msgstr "보관된 행 수" #, python-format msgid "Object action %(action)s failed because: %(reason)s" msgstr "%(action)s 오브젝트 조치가 실패함. 이유: %(reason)s" msgid "Old volume is attached to a different instance." msgstr "이전 볼륨이 다른 인스턴스에 접속되어 있습니다." #, python-format msgid "One or more hosts already in availability zone(s) %s" msgstr "하나 이상의 호스트가 이미 가용성 구역 %s에 있음" msgid "Only administrators may list deleted instances" msgstr "관리자만 삭제된 인스턴스를 나열할 수 있음" msgid "Origin header does not match this host." msgstr "원본 헤더가 이 호스트와 일치하지 않습니다." msgid "Origin header not valid." msgstr "원본 헤더가 올바르지 않습니다." msgid "Origin header protocol does not match this host." msgstr "원본 헤더 프로토콜이 이 호스트와 일치하지 않습니다." #, python-format msgid "PCI Device %(node_id)s:%(address)s not found." msgstr "PCI 디바이스 %(node_id)s:%(address)s을(를) 찾을 수 없음" #, python-format msgid "PCI alias %(alias)s is not defined" msgstr "PCI 별명 %(alias)s이(가) 정의되지 않음 " #, python-format msgid "" "PCI device %(compute_node_id)s:%(address)s is %(status)s instead of " "%(hopestatus)s" msgstr "" "PCI 디바이스 %(compute_node_id)s:%(address)s이(가) %(status)s 상태임" "(%(hopestatus)s 대신)" #, python-format msgid "" "PCI device %(compute_node_id)s:%(address)s is owned by %(owner)s instead of " "%(hopeowner)s" msgstr "" "PCI 디바이스 %(compute_node_id)s:%(address)s을(를) %(owner)s이(가) 소유함" "(%(hopeowner)s 대신)" #, python-format msgid "PCI device %(id)s not found" msgstr "PCI 디바이스 %(id)s을(를) 찾을 수 없음" #, python-format msgid "PCI device request %(requests)s failed" msgstr "PCI 디바이스 요청 %(requests)s에 실패함" #, python-format msgid "Page size %(pagesize)s forbidden against '%(against)s'" msgstr "페이지 크기 %(pagesize)s이(가) '%(against)s'에 대해 금지됨" #, python-format msgid "Page size %(pagesize)s is not supported by the host." msgstr "호스트에서 페이지 크기 %(pagesize)s을(를) 지원하지 않습니다." #, python-format msgid "" "Parameters %(missing_params)s not present in vif_details for vif %(vif_id)s. " "Check your Neutron configuration to validate that the macvtap parameters are " "correct." msgstr "" "매개변수 %(missing_params)s이(가) vif %(vif_id)s에 대한 vif_details에 없습니" "다. Neutron 구성을 확인하여 macvtap 매개변수가 올바른지 유효성 검증하십시오. " #, python-format msgid "Path %s must be LVM logical volume" msgstr "경로 %s은(는) LVM 논리적 볼륨이어야 함" msgid "Paused" msgstr "정지함" msgid "Personality file limit exceeded" msgstr "특성 파일 한계 초과" #, python-format msgid "" "Physical Function %(compute_node_id)s:%(address)s, related to VF " "%(compute_node_id)s:%(vf_address)s is %(status)s instead of %(hopestatus)s" msgstr "" "VF %(compute_node_id)s:%(vf_address)s과(와) 관련된 실제 기능 " "%(compute_node_id)s:%(address)s은(는) %(hopestatus)s이(가) 아니라 %(status)s" "입니다." #, python-format msgid "Physical network is missing for network %(network_uuid)s" msgstr "%(network_uuid)s 네트워크의 실제 네트워크가 누락됨" #, python-format msgid "Policy doesn't allow %(action)s to be performed." msgstr "%(action)s 정책이 수행되도록 허용되지 않았습니다." #, python-format msgid "Port %(port_id)s is still in use." msgstr "%(port_id)s 포트가 아직 사용 중입니다. " #, python-format msgid "Port %(port_id)s not usable for instance %(instance)s." msgstr "%(port_id)s 포트를 %(instance)s 인스턴스에 사용할 수 없습니다. " #, python-format msgid "" "Port %(port_id)s not usable for instance %(instance)s. Value %(value)s " "assigned to dns_name attribute does not match instance's hostname " "%(hostname)s" msgstr "" "%(instance)s에 %(port_id)s 포트를 사용할 수 없습니다. dns_name 속성에 할당된 " "%(value)s 값은 인스턴스의 호스트 이름 %(hostname)s과(와) 일치하지 않습니다." #, python-format msgid "Port %(port_id)s requires a FixedIP in order to be used." msgstr "%(port_id)s 포트를 사용하려면 FixedIP가 필요합니다." #, python-format msgid "Port %s is not attached" msgstr "%s 포트가 접속되지 않음" #, python-format msgid "Port id %(port_id)s could not be found." msgstr "포트 ID %(port_id)s을(를) 찾을 수 없습니다." #, python-format msgid "Port update failed for port %(port_id)s: %(reason)s" msgstr "포트 %(port_id)s: %(reason)s 때문에 포트 업데이트가 실패했습니다." #, python-format msgid "Provided video model (%(model)s) is not supported." msgstr "제공된 비디오 모델(%(model)s)이 지원되지 않습니다." #, python-format msgid "Provided watchdog action (%(action)s) is not supported." msgstr "제공된 watchdog 조치(%(action)s)가 지원되지 않습니다." msgid "QEMU guest agent is not enabled" msgstr "QEMU 게스트 에이전트가 사용되지 않음" #, python-format msgid "Quiescing is not supported in instance %(instance_id)s" msgstr "인스턴스 %(instance_id)s에서 Quiesce가 지원되지 않음" #, python-format msgid "Quota class %(class_name)s could not be found." msgstr "%(class_name)s 할당량 클래스를 찾을 수 없습니다. " msgid "Quota could not be found" msgstr "할당량을 찾을 수 없음" #, python-format msgid "" "Quota exceeded for %(overs)s: Requested %(req)s, but already used %(used)s " "of %(allowed)s %(overs)s" msgstr "" "%(overs)s에 대한 할당량 초과: %(req)s을(를) 요청했지만 이미 %(allowed)s " "%(overs)s 중 %(used)s을(를) 사용했습니다. " #, python-format msgid "Quota exceeded for resources: %(overs)s" msgstr "자원에 대한 할당량 초과: %(overs)s" #, python-format msgid "" "Quota exceeded or out of space for image %(image_id)s in the image service." msgstr "" "이미지 서비스의 %(image_id)s 이미지가 할당량을 초과 했거나 공간이 부족합니다." msgid "Quota exceeded, too many key pairs." msgstr "할당량 초과. 키 쌍이 너무 많습니다. " msgid "Quota exceeded, too many server groups." msgstr "할당량 초과. 서버 그룹이 너무 많습니다. " msgid "Quota exceeded, too many servers in group" msgstr "할당량 초과. 그룹에 서버가 너무 많습니다. " #, python-format msgid "Quota exists for project %(project_id)s, resource %(resource)s" msgstr "" "프로젝트 %(project_id)s, 자원 %(resource)s에 대한 할당량이 존재합니다. " #, python-format msgid "Quota for project %(project_id)s could not be found." msgstr "%(project_id)s 프로젝트에 대한 할당량을 찾을 수 없습니다. " #, python-format msgid "" "Quota for user %(user_id)s in project %(project_id)s could not be found." msgstr "" "%(project_id)s 프로젝트의 %(user_id)s 사용자에 대한 할당량을 찾을 수 없습니" "다." #, python-format msgid "" "Quota limit %(limit)s for %(resource)s must be greater than or equal to " "already used and reserved %(minimum)s." msgstr "" "%(resource)s의 할당량 한계 %(limit)s은(는) 이미 사용되고 예약된 %(minimum)s " "이상이어야 합니다." #, python-format msgid "" "Quota limit %(limit)s for %(resource)s must be less than or equal to " "%(maximum)s." msgstr "" "%(resource)s의 할당량 한계 %(limit)s은(는) %(maximum)s 이하여야 합니다." msgid "Request body and URI mismatch" msgstr "요청 본문 및 URI 불일치" msgid "Request is too large." msgstr "요청이 너무 큽니다. " #, python-format msgid "Request of image %(image_id)s got BadRequest response: %(response)s" msgstr "%(image_id)s 이미지 요청 결과 BadRequest 응답 발생: %(response)s" #, python-format msgid "RequestSpec not found for instance %(instance_uuid)s" msgstr "%(instance_uuid)s 인스턴스의 RequestSpec을 찾을 수 없음" msgid "Requested CPU control policy not supported by host" msgstr "요청된 CPU 제어 정책은 호스트에서 지원되지 않음" #, python-format msgid "" "Requested hardware '%(model)s' is not supported by the '%(virt)s' virt driver" msgstr "" "'%(virt)s' virt 드라이버가 요청된 하드웨어 '%(model)s'을(를) 지원하지 않음" #, python-format msgid "Requested image %(image)s has automatic disk resize disabled." msgstr "" "요청한 이미지 %(image)s에서 자동 디스크 크기 조정을 사용할 수 없습니다." msgid "" "Requested instance NUMA topology cannot fit the given host NUMA topology" msgstr "" "요청된 인스턴스 NUMA 토폴로지를 제공된 호스트 NUMA 토폴로지에 맞출 수 없음" msgid "" "Requested instance NUMA topology together with requested PCI devices cannot " "fit the given host NUMA topology" msgstr "" "요청된 인스턴스 NUMA 토폴로지와 요청된 PCI 디바이스를 함께 제공된 호스트 " "NUMA 토폴로지에 맞출 수 없음" #, python-format msgid "" "Requested vCPU limits %(sockets)d:%(cores)d:%(threads)d are impossible to " "satisfy for vcpus count %(vcpus)d" msgstr "" "요청된 vCPU 한계 %(sockets)d:%(cores)d:%(threads)d은(는) vcpus 개수 %(vcpus)d" "을(를) 충족시킬 수 없음" #, python-format msgid "Rescue device does not exist for instance %s" msgstr "%s 인스턴스에 대한 복구 디바이스가 없음" #, python-format msgid "Resize error: %(reason)s" msgstr "크기 조정 오류: %(reason)s" msgid "Resize to zero disk flavor is not allowed." msgstr "0 디스크 플레이버로의 크기 조정은 허용되지 않습니다." #, python-format msgid "Resource class '%(resource_class)s' invalid." msgstr "리소스 클래스 '%(resource_class)s'가 올바르지 않습니다." msgid "Resource could not be found." msgstr "자원을 찾을 수 없습니다. " msgid "Resource provider has allocations." msgstr "자원 제공자는 할당량을 갖고 있습니다." msgid "Resumed" msgstr "재시작함" #, python-format msgid "Root element name should be '%(name)s' not '%(tag)s'" msgstr "루트 요소 이름은 '%(tag)s'이(가) 아닌 '%(name)s'이어야 함" #, python-format msgid "Running batches of %i until complete" msgstr "완료될 때까지 %i 일괄처리 실행" #, python-format msgid "Scheduler Host Filter %(filter_name)s could not be found." msgstr "스케줄러 호스트 필터 %(filter_name)s을(를) 찾을 수 없습니다. " #, python-format msgid "Scheduling failed: %(reason)s" msgstr "스케줄링 실패: %(reason)s" #, python-format msgid "Security group %(name)s is not found for project %(project)s" msgstr "프로젝트 %(project)s에 대해 보안 그룹 %(name)s을(를) 찾을 수 없음" #, python-format msgid "" "Security group %(security_group_id)s not found for project %(project_id)s." msgstr "" "%(project_id)s 프로젝트에 대한 %(security_group_id)s 보안 그룹을 찾을 수 없습" "니다. " #, python-format msgid "Security group %(security_group_id)s not found." msgstr "%(security_group_id)s 보안 그룹을 찾을 수 없습니다. " #, python-format msgid "" "Security group %(security_group_name)s already exists for project " "%(project_id)s." msgstr "보안 그룹 %(security_group_name)s이(가) 프로젝트%(project_id)s." #, python-format msgid "" "Security group %(security_group_name)s not associated with the instance " "%(instance)s" msgstr "" "보안 그룹 %(security_group_name)s이(가) 인스턴스와 연관되지 않음%(instance)s " "인스턴스와 연관되지 않음" msgid "Security group id should be uuid" msgstr "보안 그룹 ID는 uuid여야 함" msgid "Security group name cannot be empty" msgstr "보안 그룹 이름은 공백일 수 없음" msgid "Security group not specified" msgstr "보안 그룹이 지정되지 않음" #, python-format msgid "Server %(server_id)s has no tag '%(tag)s'" msgstr "서버 %(server_id)s는 '%(tag)s' 태그가 없습니다." #, python-format msgid "Server disk was unable to be resized because: %(reason)s" msgstr "서버 디스크의 크기를 재조정할 수 없음. 이유: %(reason)s" msgid "Server does not exist" msgstr "서버가 없음" #, python-format msgid "ServerGroup policy is not supported: %(reason)s" msgstr "ServerGroup 정책이 지원되지 않음: %(reason)s" msgid "ServerGroupAffinityFilter not configured" msgstr "ServerGroupAffinityFilter가 구성되지 않았음" msgid "ServerGroupAntiAffinityFilter not configured" msgstr "ServerGroupAntiAffinityFilter가 구성되지 않았음" msgid "ServerGroupSoftAffinityWeigher not configured" msgstr "ServerGroupSoftAffinityWeigher이 구성되지 않음" msgid "ServerGroupSoftAntiAffinityWeigher not configured" msgstr "ServerGroupSoftAntiAffinityWeigher이 구성되지 않음" #, python-format msgid "Service %(service_id)s could not be found." msgstr "%(service_id)s 서비스를 찾을 수 없습니다. " #, python-format msgid "Service %s not found." msgstr "%s 서비스를 찾을 수 없음" msgid "Service is unavailable at this time." msgstr "지금 서비스를 사용할 수 없습니다." #, python-format msgid "Service with host %(host)s binary %(binary)s exists." msgstr "호스트 %(host)s 바이너리 %(binary)s인 서비스가 존재합니다. " #, python-format msgid "Service with host %(host)s topic %(topic)s exists." msgstr "호스트 %(host)s 주제 %(topic)s인 서비스가 존재합니다. " msgid "Set admin password is not supported" msgstr "설정된 관리 비밀번호가 지원되지 않음" #, python-format msgid "Share '%s' is not supported" msgstr "공유 '%s'은(는) 지원되지 않음" #, python-format msgid "Share level '%s' cannot have share configured" msgstr "공유 레벨 '%s'에는 공유를 구성할 수 없음" #, python-format msgid "Snapshot %(snapshot_id)s could not be found." msgstr "%(snapshot_id)s 스냅샷을 찾을 수 없습니다. " msgid "Some required fields are missing" msgstr "일부 필수 필드가 비어있습니다." #, python-format msgid "" "Something went wrong when deleting a volume snapshot: rebasing a " "%(protocol)s network disk using qemu-img has not been fully tested" msgstr "" "볼륨 스냅샷을 삭제할 때 문제 발생: qemu-img를 사용한 %(protocol)s 네트워크 디" "스크의 리베이스가 완전히 테스트되지 않음" msgid "Sort direction size exceeds sort key size" msgstr "정렬 방향 크기가 정렬 키 크기를 초과함" msgid "Sort key supplied was not valid." msgstr "제공되는 정렬 키가 올바르지 않습니다. " msgid "Specified fixed address not assigned to instance" msgstr "지정된 고정 주소가 인스턴스에 연관되지 않음" msgid "Specifying a volume_type with destination_type=local is not supported." msgstr "volume_type 구성을 destination_type=local과 함께 설정할 수 없습니다." msgid "Specifying volume type to existing volume is not supported." msgstr "볼륨 유형을 존재하는 볼륨으로 설정할 수 없습니다." msgid "Started" msgstr "작동함" msgid "Stopped" msgstr "중지됨" #, python-format msgid "Storage error: %(reason)s" msgstr "스토리지 오류: %(reason)s" #, python-format msgid "Storage policy %s did not match any datastores" msgstr "스토리지 정책 %s이(가) 데이터 저장소와 일치하지 않음" msgid "Success" msgstr "완료" msgid "Suspended" msgstr "Suspended" msgid "Swap drive requested is larger than instance type allows." msgstr "요청한 스왑 드라이브가 허용되는 인스턴스 유형보다 큽니다." msgid "Table" msgstr "테이블" #, python-format msgid "Task %(task_name)s is already running on host %(host)s" msgstr "%(task_name)s 태스크가 이미 %(host)s 호스트에서 실행 중임" #, python-format msgid "Task %(task_name)s is not running on host %(host)s" msgstr "%(task_name)s 태스크가 %(host)s 호스트에서 실행 중이 아님" #, python-format msgid "The PCI address %(address)s has an incorrect format." msgstr "PCI 주소 %(address)s에 올바르지 않은 형식이 있습니다." msgid "The `os_distro` image metadata property is required" msgstr "다음의 이미지 메타데이터 속성이 필요합니다. : `os_distro`" #, python-format msgid "The console port range %(min_port)d-%(max_port)d is exhausted." msgstr "콘솔 포트 범위 %(min_port)d-%(max_port)d이(가) 소진되었습니다." msgid "The created instance's disk would be too small." msgstr "작성된 인스턴스의 디스크가 너무 작습니다. " msgid "The current driver does not support preserving ephemeral partitions." msgstr "현재 드라이버는 임시 파티션 유지를 지원하지 않습니다." msgid "The default PBM policy doesn't exist on the backend." msgstr "백엔드에 기본 PBM 정책이 없습니다." #, python-format msgid "" "The fixed IP associated with port %(port_id)s is not compatible with the " "host." msgstr "" " %(port_id)s와 연관되어 있는 고정 IP주소는 해당 호스트와 호환되지 않습니다." msgid "The floating IP request failed with a BadRequest" msgstr "부동 IP 요청이 실패하여 BadRequest가 생성됨" msgid "" "The instance requires a newer hypervisor version than has been provided." msgstr "인스턴스는 제공된 것보다 최신 하이퍼바이저 버전이 필요합니다. " #, python-format msgid "The number of defined ports: %(ports)d is over the limit: %(quota)d" msgstr "정의된 포트 수 %(ports)d이(가) 한계를 초과함: %(quota)d" #, python-format msgid "" "The property 'numa_nodes' cannot be '%(nodes)s'. It must be a number greater " "than 0" msgstr "" "속성 'numa_nodes'가 '%(nodes)s'이 될 순 없습니다. 반드시 숫자 0보다 커야 합" "니다. " #, python-format msgid "The provided RNG device path: (%(path)s) is not present on the host." msgstr "제공된 RNG 디바이스 경로: (%(path)s)이(가) 호스트에 없습니다." msgid "The request is invalid." msgstr "요청이 올바르지 않습니다." #, python-format msgid "" "The requested amount of video memory %(req_vram)d is higher than the maximum " "allowed by flavor %(max_vram)d." msgstr "" "요청된 양의 비디오 메모리 %(req_vram)d이(가) %(max_vram)d 플레이버에 의해 허" "용된 최대값보다 높습니다." msgid "The requested availability zone is not available" msgstr "요청한 가용성 구역을 사용할 수 없음" msgid "The requested functionality is not supported." msgstr "요청된 기능이 지원되지 않습니다." #, python-format msgid "The specified cluster '%s' was not found in vCenter" msgstr "지정된 클러스터 '%s'을(를) vCenter에서 찾을 수 없음" #, python-format msgid "The supplied device path (%(path)s) is in use." msgstr "제공된 디바이스 경로(%(path)s)가 사용 중입니다. " #, python-format msgid "The supplied device path (%(path)s) is invalid." msgstr "제공된 디바이스 경로(%(path)s)가 올바르지 않습니다. " #, python-format msgid "" "The supplied disk path (%(path)s) already exists, it is expected not to " "exist." msgstr "제공된 디스크 경로(%(path)s)가 이미 존재합니다. 없어야합니다." msgid "The supplied hypervisor type of is invalid." msgstr "제공된 하이퍼바이저 유형이 올바르지 않습니다. " msgid "The target host can't be the same one." msgstr "대상 호스트가 동일한 것이어서는 안됩니다." #, python-format msgid "The token '%(token)s' is invalid or has expired" msgstr "토큰 '%(token)s'이(가) 올바르지 않거나 만료됨" #, python-format msgid "" "The volume cannot be assigned the same device name as the root device %s" msgstr "볼륨에 루트 디바이스 %s과(와) 같은 디바이스 이름을 지정할 수 없음" #, python-format msgid "The volume mount at %(mount_path)s is unusable." msgstr "%(mount_path)s에 마운트 된 볼륨을 사용할 수 없습니다." msgid "There are not enough hosts available." msgstr "사용 가능한 호스트가 부족합니다." #, python-format msgid "There is no such action: %s" msgstr "해당 조치가 없음: %s" #, python-format msgid "" "This compute node's hypervisor is older than the minimum supported version: " "%(version)s." msgstr "" "이 컴퓨트 노드의 하이퍼바이저가 지원되는 최소 버전 %(version)s보다 이전입니" "다." msgid "" "This method needs to be called with either networks=None and port_ids=None " "or port_ids and networks as not none." msgstr "" "이 메소드는 networks=None 및 port_ids=None 또는 port_ids와 networks를 none으" "로 지정하지 않은 상태로 호출해야 합니다." #, python-format msgid "This rule already exists in group %s" msgstr "이 규칙이 이미 %s 그룹에 존재함" #, python-format msgid "" "This service is older (v%(thisver)i) than the minimum (v%(minver)i) version " "of the rest of the deployment. Unable to continue." msgstr "" "이 서비스는 나머지 배치의 최소 (v%(minver)i) 버전보다 이전(v%(thisver)i)입니" "다." msgid "Timeout waiting for response from cell" msgstr "셀의 응답을 대시하는 중에 제한시간 초과" #, python-format msgid "Timeout while checking if we can live migrate to host: %s" msgstr "" "호스트로 라이브 마이그레이션할 수 있는지 확인하는 중에 제한시간 초과 발생: %s" msgid "To and From ports must be integers" msgstr "발신 및 수신 포트는 정수여야 함" msgid "Token not found" msgstr "토큰을 찾을 수 없음" msgid "Triggering crash dump is not supported" msgstr "충돌 덤프 트리거가 지원되지 않음" msgid "Type and Code must be integers for ICMP protocol type" msgstr "ICMP 프로토콜 유형의 경우 유형 및 코드는 정수여야 함" msgid "UEFI is not supported" msgstr "UEFI가 지원되지 않음" #, python-format msgid "" "Unable to associate floating IP %(address)s to any fixed IPs for instance " "%(id)s. Instance has no fixed IPv4 addresses to associate." msgstr "" "Floating IP %(address)s을(를) %(id)s 인스턴스의 Fixed IP와 연관시킬 수 없습니" "다. 인스턴스에 연관시킬 Fixed IPv4 주소가 없습니다." #, python-format msgid "" "Unable to associate floating IP %(address)s to fixed IP %(fixed_address)s " "for instance %(id)s. Error: %(error)s" msgstr "" "Floating IP %(address)s을(를) 인스턴스 %(id)s의 Fixed IP %(fixed_address)s에 " "연관시킬 수 없습니다. 오류: %(error)s" #, python-format msgid "Unable to automatically allocate a network for project %(project_id)s" msgstr "%(project_id)s 때문에 자동으로 네트워크를 할당할 수 없습니다." msgid "Unable to communicate with the Placement API." msgstr "Placement API와 통신할 수 없습니다." #, python-format msgid "Unable to convert image to %(format)s: %(exp)s" msgstr "이미지를 %(format)s(으)로 변환할 수 없음: %(exp)s" #, python-format msgid "Unable to convert image to raw: %(exp)s" msgstr "이미지를 원시로 변환할 수 없음: %(exp)s" #, python-format msgid "Unable to determine disk bus for '%s'" msgstr "'%s'의 디스크 버스를 판별할 수 없음" #, python-format msgid "Unable to determine disk prefix for %s" msgstr "%s의 디스크 접두부를 판별할 수 없음" #, python-format msgid "Unable to find host for Instance %s" msgstr "%s 인스턴스에 대한 호스트를 찾을 수 없음" msgid "Unable to find iSCSI Target" msgstr "iSCSI 대상을 찾을 수 없음" msgid "Unable to find volume" msgstr "볼륨을 찾을 수 없음" msgid "Unable to get host UUID: /etc/machine-id does not exist" msgstr "호스트 UUID를 가져올 수 없음: /etc/machine-id가 존재하지 않음" msgid "Unable to get host UUID: /etc/machine-id is empty" msgstr "호스트 UUID를 가져올 수 없음: /etc/machine-id가 비어 있음" msgid "" "Unable to launch multiple instances with a single configured port ID. Please " "launch your instance one by one with different ports." msgstr "" "단일 포트 ID가 구성된 다중 인스턴스를 실행할 수 없습니다.인스턴스를 다른 포트" "와 함께 하나씩 실행하십시오." #, python-format msgid "" "Unable to migrate %(instance_uuid)s to %(dest)s: Lack of memory(host:" "%(avail)s <= instance:%(mem_inst)s)" msgstr "" "%(instance_uuid)s을(를) %(dest)s(으)로 마이그레이션할 수 없음. 메모리 부족(호" "스트:%(avail)s <= 인스턴스:%(mem_inst)s)" #, python-format msgid "" "Unable to migrate %(instance_uuid)s: Disk of instance is too large(available " "on destination host:%(available)s < need:%(necessary)s)" msgstr "" "%(instance_uuid)s을(를) 마이그레이션할 수 없음: 인스턴스의 디스크가 너무 큼" "(대상 호스트의 사용 가능량:%(available)s < 필요량:%(necessary)s)" #, python-format msgid "" "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." msgstr "" "인스턴스(%(instance_id)s)를 현재 호스트(%(host)s)로 마이그레이션할 수 없습니" "다. " msgid "Unable to resize disk down." msgstr "디스크 크기를 줄일 수 없습니다." msgid "Unable to set password on instance" msgstr "인스턴스에 대한 비밀번호를 설정할 수 없음" msgid "Unable to shrink disk." msgstr "디스크를 줄일 수 없습니다." #, python-format msgid "Unacceptable CPU info: %(reason)s" msgstr "CPU 정보를 확인할 수 없습니다: %(reason)s" msgid "Unacceptable parameters." msgstr "사용할 수 없는 매개변수입니다. " #, python-format msgid "Unavailable console type %(console_type)s." msgstr "사용 불가능한 콘솔 유형 %(console_type)s입니다." msgid "" "Undefined Block Device Mapping root: BlockDeviceMappingList contains Block " "Device Mappings from multiple instances." msgstr "" "정의되지 않은 블록 디바이스 맵핑 루트: BlockDeviceMappingList에는 여러 인스턴" "스의 블록 디바이스 맵핑이 포함되어 있습니다." #, python-format msgid "Unexpected aggregate action %s" msgstr "예상치 않은 집합 %s 작업" msgid "Unexpected type adding stats" msgstr "통계를 추가하는 예기치 않은 유형이 있음" #, python-format msgid "Unexpected vif_type=%s" msgstr "예기치 않은 vif_type=%s" msgid "Unknown" msgstr "알 수 없음" msgid "Unknown action" msgstr "알 수 없는 조치" #, python-format msgid "Unknown config drive format %(format)s. Select one of iso9660 or vfat." msgstr "" "알 수 없는 구성 드라이브 형식 %(format)s입니다. iso9660 또는 vfat 중 하나를 " "선택하십시오. " #, python-format msgid "Unknown delete_info type %s" msgstr "알 수 없는 delete_info 유형: %s" #, python-format msgid "Unknown image_type=%s" msgstr "알 수 없는 image_type=%s" #, python-format msgid "Unknown quota resources %(unknown)s." msgstr "알 수 없는 할당량 자원 %(unknown)s." msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "알 수 없는 정렬 방향입니다. 'desc' 또는 'asc'여야 함" #, python-format msgid "Unknown type: %s" msgstr "알 수 없는 유형: %s" msgid "Unrecognized legacy format." msgstr "인식할 수 없는 레거시 포맷입니다." #, python-format msgid "Unrecognized read_deleted value '%s'" msgstr "인식되지 않는 read_deleted 값 '%s'" #, python-format msgid "Unrecognized value '%s' for CONF.running_deleted_instance_action" msgstr "CONF.running_deleted_instance_action에 대한 인식되지 않는 값 '%s'" #, python-format msgid "Unshelve attempted but the image %s cannot be found." msgstr "언쉘브를 시도했으나 %s 이미지를 찾을 수 없습니다." msgid "Unsupported Content-Type" msgstr "지원되지 않는 Content-Type" #, python-format msgid "User %(username)s not found in password file." msgstr "%(username)s 사용자가 비밀번호 파일에 없습니다. " #, python-format msgid "User %(username)s not found in shadow file." msgstr "%(username)s 사용자가 새도우 파일에 없습니다. " msgid "User data needs to be valid base 64." msgstr "사용자 데이터는 유효한 base64여야 합니다. " msgid "User does not have admin privileges" msgstr "사용자에게 관리 권한이 없습니다" msgid "" "Using different block_device_mapping syntaxes is not allowed in the same " "request." msgstr "" "동일한 요청에서 다른 block_device_mapping 구문 사용은 요청을 건너뛰는 중입니" "다." #, python-format msgid "" "Version %(req_ver)s is not supported by the API. Minimum is %(min_ver)s and " "maximum is %(max_ver)s." msgstr "" "API에서 %(req_ver)s 버전을 지원하지 않습니다. 최소 %(min_ver)s 이상, 최대 " "%(max_ver)s 이하여야 합니다." #, python-format msgid "" "Version of %(name)s %(min_ver)s %(max_ver)s intersects with another versions." msgstr "%(name)s %(min_ver)s %(max_ver)s의 버전이 다른 버전과 겹칩니다." msgid "Virtual Interface creation failed" msgstr "가상 인터페이스 생성 실패하였습니다" msgid "Virtual interface plugin failed" msgstr "가상 인터페이스 연결 실패했습니다" #, python-format msgid "Virtual machine mode '%(vmmode)s' is not recognised" msgstr "가상 머신 모드 '%(vmmode)s'이(가) 인식되지 않음" #, python-format msgid "Virtual machine mode '%s' is not valid" msgstr "가상 머신 모드 '%s'이(가) 올바르지 않음" #, python-format msgid "Virtualization type '%(virt)s' is not supported by this compute driver" msgstr "이 컴퓨터 드라이버가 가상화 유형 '%(virt)s'을(를) 지원하지 않음" #, python-format msgid "Volume %(volume_id)s could not be attached. Reason: %(reason)s" msgstr "%(volume_id)s 볼륨을 연결할 수 없습니다. 이유: %(reason)s" #, python-format msgid "Volume %(volume_id)s could not be detached. Reason: %(reason)s" msgstr "%(volume_id)s 볼륨을 분리할 수 없습니다. 이유: %(reason)s" #, python-format msgid "Volume %(volume_id)s could not be extended. Reason: %(reason)s" msgstr "%(volume_id)s 볼륨을 확장할 수 없습니다. 이유: %(reason)s" #, python-format msgid "Volume %(volume_id)s could not be found." msgstr "%(volume_id)s 볼륨을 찾을 수 없습니다. " #, python-format msgid "" "Volume %(volume_id)s did not finish being created even after we waited " "%(seconds)s seconds or %(attempts)s attempts. And its status is " "%(volume_status)s." msgstr "" "%(seconds)s 초 동안 기다리거나 %(attempts)s 시도 하였으나 %(volume_id)s 볼륨" "이 생성되지 않았습니다. 지금 상태는 %(volume_status)s 입니다." msgid "Volume does not belong to the requested instance." msgstr "볼륨이 요청된 인스턴스에 속해 있지 않습니다." #, python-format msgid "" "Volume encryption is not supported for %(volume_type)s volume %(volume_id)s" msgstr "볼륨 암호화는 %(volume_type)s 볼륨 %(volume_id)s 를 지원하지 않습니다" #, python-format msgid "" "Volume is smaller than the minimum size specified in image metadata. Volume " "size is %(volume_size)i bytes, minimum size is %(image_min_disk)i bytes." msgstr "" "볼륨이 이미지 메타데이터에서 지정된 최소 크기보다 작습니다. 볼륨크기는 " "%(volume_size)i바이트이고 최소 크기는 %(image_min_disk)i바이트입니다. " #, python-format msgid "" "Volume sets block size, but the current libvirt hypervisor '%s' does not " "support custom block size" msgstr "" "볼륨에서 블록 크기를 설정하지만 현재 libvirt 하이퍼바이저 '%s'이(가) 사용자 " "정의 블록 크기를 지원하지 않음" msgid "Volume size extension is not supported by the hypervisor." msgstr "하이퍼바이저에서 볼륨크기 확장을 지원하지 않습니다" msgid "When resizing, instances must change flavor!" msgstr "크기를 조정할 때 인스턴스는 플레이버를 변경해야 합니다!" #, python-format msgid "Wrong quota method %(method)s used on resource %(res)s" msgstr "%(res)s 자원에서 올바르지 않은 할당량 메소드 %(method)s이(가) 사용됨" msgid "X-Forwarded-For is missing from request." msgstr "X-Forwarded-For가 요청에서 누락되었습니다. " msgid "X-Instance-ID header is missing from request." msgstr "X-Instance-ID 헤더가 요청에서 누락되었습니다. " msgid "X-Instance-ID-Signature header is missing from request." msgstr "X-Instance-ID-Signature 헤더가 요청에서 누락되었습니다." msgid "X-Metadata-Provider is missing from request." msgstr "X-Metadata-Provider가 요청에서 누락되었습니다. " msgid "X-Tenant-ID header is missing from request." msgstr "X-Tenant-ID 헤더가 요청에서 누락되었습니다." msgid "You are not allowed to delete the image." msgstr "이미지를 삭제할 수 없습니다." msgid "" "You are not authorized to access the image the instance was started with." msgstr "인스턴스가 시작되는 해당 이미지에 액세스할 권한이 없습니다. " #, python-format msgid "" "You can't use %s options in vzstorage_mount_opts configuration parameter." msgstr "vzstorage_mount_opts 구성 매개변수에서 %s 옵션을 사용할 수 없습니다." msgid "You must implement __call__" msgstr "__call__을 구현해야 합니다. " msgid "You should specify images_rbd_pool flag to use rbd images." msgstr "rbd 이미지를 사용하려면 images_rbd_pool 플래그를 지정해야 합니다." msgid "You should specify images_volume_group flag to use LVM images." msgstr "LVM 이미지를 사용하려면 images_volume_group 플래그를 지정해야 합니다." msgid "Zero floating IPs available." msgstr "사용할 수 있는 Floating IP가 0개입니다." msgid "admin password can't be changed on existing disk" msgstr "관리 비밀번호는 기존 디스크에서 변경될 수 없음" msgid "cannot understand JSON" msgstr "JSON을 이해할 수 없음" msgid "clone() is not implemented" msgstr "clone()이 구현되지 않음" #, python-format msgid "connect info: %s" msgstr "연결 정보: %s" #, python-format msgid "connecting to: %(host)s:%(port)s" msgstr "%(host)s:%(port)s에 연결 중" msgid "direct_snapshot() is not implemented" msgstr "direct_snapshot()이 구현되지 않음" #, python-format msgid "disk type '%s' not supported" msgstr "디스크 유형 '%s'이(가) 지원되지 않음" #, python-format msgid "empty project id for instance %s" msgstr "%s 인스턴스에 대한 비어 있는 프로젝트 ID" msgid "error setting admin password" msgstr "관리 비밀번호 설정 오류" #, python-format msgid "error: %s" msgstr "오류: %s" #, python-format msgid "failed to generate X509 fingerprint. Error message: %s" msgstr "X509 fingerprint를 생성하지 못했습니다. 에러 메시지: %s" msgid "failed to generate fingerprint" msgstr "Fingerprint를 생성하지 못했습니다" msgid "filename cannot be None" msgstr "파일 이름은 None일 수 없음" msgid "floating IP is already associated" msgstr "Floating IP가 이미 연관되어 있음" msgid "floating IP not found" msgstr "Floating IP를 찾을 수 없음" #, python-format msgid "fmt=%(fmt)s backed by: %(backing_file)s" msgstr "fmt=%(fmt)s 백업: %(backing_file)s" #, python-format msgid "href %s does not contain version" msgstr "href %s에 버전이 없음" msgid "image already mounted" msgstr "이미지가 이미 마운트되었음" #, python-format msgid "instance %s is not running" msgstr "인스턴스 %s이(가) 실행 중이 아님" msgid "instance is a required argument to use @refresh_cache" msgstr "인스턴스는 @refresh_cache를 사용하기 위한 필수 인수임" msgid "instance is not in a suspended state" msgstr "인스턴스가 일시중단 상태에 있지 않음" msgid "instance is not powered on" msgstr "인스턴스가 전원 공급되지 않음" msgid "instance is powered off and cannot be suspended." msgstr "인스턴스가 전원 차단되었고 일시중단될 수 없습니다. " #, python-format msgid "instance_id %s could not be found as device id on any ports" msgstr "포트의 디바이스 ID로 instance_id %s을(를) 찾을 수 없음" msgid "is_public must be a boolean" msgstr "is_public은 부울이어야 함" msgid "keymgr.fixed_key not defined" msgstr "keymgr.fixed_key가 정의되지 않음" msgid "l3driver call to add floating IP failed" msgstr "Floating IP를 추가하기 위한 l3driver 호출에 실패" #, python-format msgid "libguestfs installed but not usable (%s)" msgstr "libguestfs가 설치되었지만 사용할 수 없음(%s)" #, python-format msgid "libguestfs is not installed (%s)" msgstr "libguestfs가 설치되지 않음(%s)" #, python-format msgid "marker [%s] not found" msgstr "마커 [%s]을(를) 찾을 수 없음" #, python-format msgid "max rows must be <= %(max_value)d" msgstr "최대 행은 <= %(max_value)d이어야 함" msgid "max_count cannot be greater than 1 if an fixed_ip is specified." msgstr "fixed_ip가 지정된 경우 max_count는 1 이하여야 합니다." msgid "min_count must be <= max_count" msgstr "min_count는 max_count 이하여야 함" #, python-format msgid "nbd device %s did not show up" msgstr "nbd 디바이스 %s이(가) 표시되지 않음" msgid "nbd unavailable: module not loaded" msgstr "nbd 사용 불가능: 모듈이 로드되지 않았음" #, python-format msgid "no match found for %s" msgstr "%s에 대한 일치 항목을 찾을 수 없음" #, python-format msgid "no usable parent snapshot for volume %s" msgstr "%s 볼륨에 사용 가능한 상위 스냅샷이 없음" #, python-format msgid "no write permission on storage pool %s" msgstr "스토리지 풀 %s에 쓰기 권한이 없음" #, python-format msgid "not able to execute ssh command: %s" msgstr "ssh 명령을 실행할 수 없음: %s" msgid "old style configuration can use only dictionary or memcached backends" msgstr "이전 스타일 구성에서는 사전 또는 memcached 백엔드만 사용할 수 있음" msgid "operation time out" msgstr "조작 제한시간이 초과됨" #, python-format msgid "partition %s not found" msgstr "%s 파티션을 찾을 수 없음" #, python-format msgid "partition search unsupported with %s" msgstr "파티션 검색이 %s에서 지원되지 않음" msgid "pause not supported for vmwareapi" msgstr "vmwareapi에 대한 일시정지는 지원되지 않음" msgid "printable characters with at least one non space character" msgstr "공백이 아닌 문자가 하나 이상 포함된 인쇄 가능한 문자" msgid "printable characters. Can not start or end with whitespace." msgstr "인쇄 가능한 문자입니다. 공백으로 시작하거나 종료할 수 없습니다." #, python-format msgid "qemu-img failed to execute on %(path)s : %(exp)s" msgstr "qemu-img가 %(path)s에서 실행하는 데 실패 : %(exp)s" #, python-format msgid "qemu-nbd error: %s" msgstr "qemu-nbd 오류: %s" msgid "rbd python libraries not found" msgstr "rbd python 라이브러리를 찾을 수 없음" #, python-format msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" msgstr "" "read_deleted는 'no', 'yes', 'only' 중 하나만 가능하며, %r은(는) 사용하지 못 " "합니다." msgid "serve() can only be called once" msgstr "serve()는 한 번만 호출할 수 있음" msgid "service is a mandatory argument for DB based ServiceGroup driver" msgstr "서비스는 DB 기반 ServiceGroup 드라이버의 필수 인수임" msgid "service is a mandatory argument for Memcached based ServiceGroup driver" msgstr "서비스는 Memcached 기반 ServiceGroup 드라이버의 필수 인수임" msgid "set_admin_password is not implemented by this driver or guest instance." msgstr "" "set_admin_password가 이 드라이버 또는 게스트 인스턴스에 의해 구현되지 않습니" "다. " #, python-format msgid "snapshot for %s" msgstr "%s 스냅샷" msgid "snapshot_id required in create_info" msgstr "create_info에 snapshot_id가 필요함" msgid "token not provided" msgstr "토큰이 제공되지 않음" msgid "too many body keys" msgstr "본문 키가 너무 많음" msgid "unpause not supported for vmwareapi" msgstr "vmwareapi에 대한 일시정지 해제는 지원되지 않음" #, python-format msgid "vg %s must be LVM volume group" msgstr "vg %s은(는) LVM 볼륨 그룹이어야 함" #, python-format msgid "vhostuser_sock_path not present in vif_details for vif %(vif_id)s" msgstr "vif %(vif_id)s의 vif_details에 vhostuser_sock_path가 표시되지 않음" #, python-format msgid "vif type %s not supported" msgstr "vif 유형 %s이(가) 지원되지 않음" msgid "vif_type parameter must be present for this vif_driver implementation" msgstr "이 vif_driver 구현을 위해 vif_type 매개변수가 존재해야 함" #, python-format msgid "volume %s already attached" msgstr "볼륨 %s이(가) 이미 접속됨" #, python-format msgid "" "volume '%(vol)s' status must be 'in-use'. Currently in '%(status)s' status" msgstr "볼륨 '%(vol)s' 상태는 '사용 중'이어야 합니다. 현재 상태 '%(status)s'" ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315688.893605 nova-32.0.0/nova/locale/pt_BR/0000775000175000017500000000000000000000000015772 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.3736086 nova-32.0.0/nova/locale/pt_BR/LC_MESSAGES/0000775000175000017500000000000000000000000017557 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/locale/pt_BR/LC_MESSAGES/nova.po0000664000175000017500000027154200000000000021075 0ustar00zuulzuul00000000000000# Translations template for nova. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the nova project. # # Translators: # Francisco Demontiê dos Santos Junior , 2013 # FIRST AUTHOR , 2011 # Francisco Demontiê dos Santos Junior , 2013 # Gabriel Wainer, 2013 # Josemar Muller Lohn , 2013 # Leonardo Rodrigues de Mello <>, 2012 # Marcelo Dieder , 2013 # MichaelBr , 2013 # Volmar Oliveira Junior , 2013 # Welkson Renny de Medeiros , 2012 # Wiliam Souza , 2013 # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: nova VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2025-07-04 18:13+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 06:08+0000\n" "Last-Translator: Copied by Zanata \n" "Language: pt_BR\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: Portuguese (Brazil)\n" #, python-format msgid "%(address)s is not a valid IP v4/6 address." msgstr "%(address)s não é um endereço IPv4/6 válido." #, python-format msgid "" "%(binary)s attempted direct database access which is not allowed by policy" msgstr "" "%(binary)s tentou o acesso direto ao banco de dados, que não é permitido " "pela política" #, python-format msgid "%(cidr)s is not a valid IP network." msgstr "%(cidr)s não é uma rede de IP válida." #, python-format msgid "%(field)s should not be part of the updates." msgstr "%(field)s não deve fazer parte das atualizações." #, python-format msgid "%(memsize)d MB of memory assigned, but expected %(memtotal)d MB" msgstr "%(memsize)d MB de memória designada, mas esperada %(memtotal)d MB" #, python-format msgid "%(path)s is not on local storage: %(reason)s" msgstr "%(path)s não está no armazenamento local: %(reason)s" #, python-format msgid "%(path)s is not on shared storage: %(reason)s" msgstr "%(path)s não está no armazenamento compartilhado: %(reason)s" #, python-format msgid "%(total)i rows matched query %(meth)s, %(done)i migrated" msgstr "%(total)i linhas corresponderal à consulta %(meth)s, %(done)i migradas" #, python-format msgid "%(type)s hypervisor does not support PCI devices" msgstr "O hypervisor %(type)s não suporta dispositivos PCI" #, python-format msgid "%s does not support disk hotplug." msgstr "%s não suporta o hotplug do disco." #, python-format msgid "%s format is not supported" msgstr "O formato %s não é suportado" #, python-format msgid "%s is not supported." msgstr "%s não é suportado." #, python-format msgid "%s must be either 'MANUAL' or 'AUTO'." msgstr "%s deve ser 'MANUAL' ou 'AUTO'." #, python-format msgid "'%(other)s' should be an instance of '%(cls)s'" msgstr "'%(other)s' deve ser uma instância de '%(cls)s'" msgid "'qemu-img info' parsing failed." msgstr "Falha na análise de 'qemu-img info'." #, python-format msgid "'rxtx_factor' argument must be a float between 0 and %g" msgstr "Argumento 'rxtx_factor' deve ser um valor flutuante entre 0 e %g" #, python-format msgid "A NetworkModel is required in field %s" msgstr "Um NetworkModel é requerido no campo %s" #, python-format msgid "" "API Version String %(version)s is of invalid format. Must be of format " "MajorNum.MinorNum." msgstr "" "String de Versão de API %(version)s é de formato inválido. Deve estar no " "formato MajorNum.MinorNum." #, python-format msgid "API version %(version)s is not supported on this method." msgstr "Versão de API %(version)s não é suportada nesse método." msgid "Access list not available for public flavors." msgstr "Lista de acesso não disponível para métodos públicos." #, python-format msgid "Action %s not found" msgstr "Ação %s não localizada" #, python-format msgid "" "Action for request_id %(request_id)s on instance %(instance_uuid)s not found" msgstr "" "Ação para request_id %(request_id)s na instância %(instance_uuid)s não " "localizada" #, python-format msgid "Action: '%(action)s', calling method: %(meth)s, body: %(body)s" msgstr "Ação: '%(action)s', método de chamada: %(meth)s, corpo: %(body)s" #, python-format msgid "Add metadata failed for aggregate %(id)s after %(retries)s retries" msgstr "" "A inclusão de metadados falhou para o agregado %(id)s após %(retries)s novas " "tentativas" #, python-format msgid "Aggregate %(aggregate_id)s already has host %(host)s." msgstr "O agregado %(aggregate_id)s já possui o host %(host)s." #, python-format msgid "Aggregate %(aggregate_id)s could not be found." msgstr "O agregado %(aggregate_id)s não pôde ser localizado." #, python-format msgid "Aggregate %(aggregate_id)s has no host %(host)s." msgstr "O agregado %(aggregate_id)s não possui host %(host)s." #, python-format msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." msgstr "" "O agregado %(aggregate_id)s não possui metadados com a chave " "%(metadata_key)s." #, python-format msgid "Aggregate %(aggregate_name)s already exists." msgstr "O agregado %(aggregate_name)s já existe." #, python-format msgid "Aggregate %s does not support empty named availability zone" msgstr "O agregado %s não suporta zona de disponibilidade nomeada vazia" #, python-format msgid "An invalid 'name' value was provided. The name must be: %(reason)s" msgstr "Um valor 'name' inválido foi fornecido. O nome deve ser: %(reason)s" msgid "An unknown error has occurred. Please try your request again." msgstr "" "Ocorreu um erro desconhecido. Por favor tente sua requisição novamente." msgid "An unknown exception occurred." msgstr "Ocorreu uma exceção desconhecida." #, python-format msgid "Architecture name '%(arch)s' is not recognised" msgstr "Nome de arquitetura '%(arch)s' não é reconhecido" #, python-format msgid "Architecture name '%s' is not valid" msgstr "O nome de arquitetura '%s' não é válido" #, python-format msgid "" "Attempt to consume PCI device %(compute_node_id)s:%(address)s from empty pool" msgstr "" "Tentativa de consumir dispositivo PCI %(compute_node_id)s:%(address)s do " "conjunto vazio" msgid "Attempted overwrite of an existing value." msgstr "Tentativa de sobrescrever um valor existente." #, python-format msgid "Attribute not supported: %(attr)s" msgstr "Atributo não suportado: %(attr)s" #, python-format msgid "Bad network format: missing %s" msgstr "Formato de rede inválido: %s ausente" msgid "Bad networks format" msgstr "Formato de redes inválido" #, python-format msgid "Bad networks format: network uuid is not in proper format (%s)" msgstr "" "Formato de redes inválido: o uuid da rede não está em formato adequado (%s)" #, python-format msgid "Bad prefix for network in cidr %s" msgstr "Prefixo inválido para rede em cidr %s" #, python-format msgid "" "Binding failed for port %(port_id)s, please check neutron logs for more " "information." msgstr "" "A ligação falhou para a porta %(port_id)s, verifique os logs do neutron para " "obter informações adicionais." msgid "Blank components" msgstr "Componentes em branco" msgid "" "Blank volumes (source: 'blank', dest: 'volume') need to have non-zero size" msgstr "" "Volumes em branco (origem: 'blank', dest: 'volume') precisam ter tamanho " "diferente de zero" #, python-format msgid "Block Device %(id)s is not bootable." msgstr "%(id)s do Dispositivo de Bloco não é inicializável." #, python-format msgid "" "Block Device Mapping %(volume_id)s is a multi-attach volume and is not valid " "for this operation." msgstr "" "O Mapeamento de Dispositivo de Bloco %(volume_id)s é um volume de diversas " "conexões e não é válido para essa operação." msgid "Block Device Mapping cannot be converted to legacy format. " msgstr "" "O Mapeamento do Dispositivo de Bloco não pode ser convertido para um formato " "legado." msgid "Block Device Mapping is Invalid." msgstr "O Mapeamento de Dispositivo de Bloco é Inválido." #, python-format msgid "Block Device Mapping is Invalid: %(details)s" msgstr "O Mapeamento do Dispositivo de Bloco é Inválido: %(details)s" msgid "" "Block Device Mapping is Invalid: Boot sequence for the instance and image/" "block device mapping combination is not valid." msgstr "" "Mapeamento de Dispositivo de Bloco inválido: A sequência de boot para a " "instância e a combinação de mapeamento de dispositivo de imagem/bloco é " "inválida." msgid "" "Block Device Mapping is Invalid: You specified more local devices than the " "limit allows" msgstr "" "Mapeamento de Dispositivo de Bloco inválido: Você especificou mais " "dispositivos locais que o limite permitido" #, python-format msgid "Block Device Mapping is Invalid: failed to get image %(id)s." msgstr "" "Mapeamento de Dispositivo de Bloco inválido: falha ao obter imagem %(id)s." #, python-format msgid "Block Device Mapping is Invalid: failed to get snapshot %(id)s." msgstr "" "O Mapeamento de Dispositivo de Bloco é Inválido: falha ao obter captura " "instantânea %(id)s." #, python-format msgid "Block Device Mapping is Invalid: failed to get volume %(id)s." msgstr "" "O Mapeamento de Dispositivo de Bloco é Inválido: falha ao obter volume " "%(id)s." msgid "Block migration can not be used with shared storage." msgstr "Migração de bloco não pode ser usada com armazenamento compartilhado." msgid "Boot index is invalid." msgstr "Índice de inicialização inválido." #, python-format msgid "Build of instance %(instance_uuid)s aborted: %(reason)s" msgstr "Construção da instância %(instance_uuid)s interrompida: %(reason)s" #, python-format msgid "Build of instance %(instance_uuid)s was re-scheduled: %(reason)s" msgstr "" "A construção da instância %(instance_uuid)s foi replanejada: %(reason)s" #, python-format msgid "BuildRequest not found for instance %(uuid)s" msgstr "BuildRequest não localizado para a instância %(uuid)s" msgid "CPU and memory allocation must be provided for all NUMA nodes" msgstr "CPU e alocação de memória devem ser fornecidos para todos os nós NUMA" #, python-format msgid "" "CPU doesn't have compatibility.\n" "\n" "%(ret)s\n" "\n" "Refer to %(u)s" msgstr "" "A CPU não possui compatibilidade.\n" "\n" "%(ret)s\n" "\n" "Consulte %(u)s" #, python-format msgid "CPU number %(cpunum)d is assigned to two nodes" msgstr "Número de CPU %(cpunum)d é designado a dois nós" #, python-format msgid "CPU number %(cpunum)d is larger than max %(cpumax)d" msgstr "Número de CPU %(cpunum)d é maior que o máximo %(cpumax)d" #, python-format msgid "CPU number %(cpuset)s is not assigned to any node" msgstr "Número de CPU %(cpuset)s não é designado a nenhum nó" msgid "Can not add access to a public flavor." msgstr "Não é possível incluir acesso em um tipo público." msgid "Can not find requested image" msgstr "Não é possível localizar a imagem solicitada" #, python-format msgid "Can not handle authentication request for %d credentials" msgstr "" "Não é possível manipular solicitação de autenticação para %d credenciais" msgid "Can't retrieve root device path from instance libvirt configuration" msgstr "" "Não é possível recuperar o caminho de dispositivo raiz da configuração de " "libvirt da instância" #, python-format msgid "" "Cannot '%(action)s' instance %(server_id)s while it is in %(attr)s %(state)s" msgstr "" "Não é possível '%(action)s' da instância %(server_id)s enquanto ele está em " "%(attr)s %(state)s" #, python-format msgid "Cannot add host to aggregate %(aggregate_id)s. Reason: %(reason)s." msgstr "" "Não é possível adicionar o host para o agregado %(aggregate_id)s. Motivo: " "%(reason)s." msgid "Cannot attach one or more volumes to multiple instances" msgstr "Não é possível anexar um ou mais volumes a várias instâncias" #, python-format msgid "Cannot call %(method)s on orphaned %(objtype)s object" msgstr "Não é possível chamar %(method)s no objeto órfão %(objtype)s" #, python-format msgid "" "Cannot determine the parent storage pool for %s; cannot determine where to " "store images" msgstr "" "Não é possível determinar o conjunto de armazenamentos pai para %s; não é " "possível determinar onde armazenar as imagens" msgid "Cannot find image for rebuild" msgstr "Não foi possível localizar a imagem para reconstrução" #, python-format msgid "Cannot remove host %(host)s in aggregate %(id)s" msgstr "Não é possível remover o host %(host)s do agregado %(id)s" #, python-format msgid "Cannot remove host from aggregate %(aggregate_id)s. Reason: %(reason)s." msgstr "" "Não é possível remover o host do agregado %(aggregate_id)s. Motivo: " "%(reason)s." msgid "Cannot rescue a volume-backed instance" msgstr "Não é possível resgatar uma instância suportada por volume" msgid "" "Cannot set cpu thread pinning policy in a non dedicated cpu pinning policy" msgstr "" "Não é possível configurar a política de pinning de encadeamento de CPU em " "uma política de pinning de CPU dedicada" msgid "Cannot set realtime policy in a non dedicated cpu pinning policy" msgstr "" "Não é possível configurar a política em tempo real em uma política de " "pinning de CPU dedicada" #, python-format msgid "Cannot update aggregate %(aggregate_id)s. Reason: %(reason)s." msgstr "" "Não é possível atualizar o agregado %(aggregate_id)s. Motivo: %(reason)s." #, python-format msgid "" "Cannot update metadata of aggregate %(aggregate_id)s. Reason: %(reason)s." msgstr "" "Não é possível atualizar o metadado do agregado %(aggregate_id)s. Motivo: " "%(reason)s." #, python-format msgid "Cell %(uuid)s has no mapping." msgstr "Célula %(uuid)s não possui mapeamento." #, python-format msgid "" "Change would make usage less than 0 for the following resources: %(unders)s" msgstr "A mudança faria uso de menos de 0 dos recursos a seguir: %(unders)s" #, python-format msgid "Class %(class_name)s could not be found: %(exception)s" msgstr "A classe %(class_name)s não pôde ser localizada: %(exception)s" #, python-format msgid "Compute host %(host)s could not be found." msgstr "O host de cálculo %(host)s não pôde ser localizado." #, python-format msgid "Compute host %s not found." msgstr "Compute host %s não pode ser encontrado." #, python-format msgid "Compute service of %(host)s is still in use." msgstr "Serviço de cálculo de %(host)s ainda está em uso." #, python-format msgid "Compute service of %(host)s is unavailable at this time." msgstr "O serviço de cálculo de %(host)s está indisponível no momento." #, python-format msgid "Config drive format '%(format)s' is not supported." msgstr "O formato da unidade de configuração %(format)s não é suportado." #, python-format msgid "" "Config requested an explicit CPU model, but the current libvirt hypervisor " "'%s' does not support selecting CPU models" msgstr "" "Configuração solicitou um modelo de CPU explícito, mas o hypervisor libvirt " "atual '%s' não suporta a seleção de modelos de CPU" #, python-format msgid "" "Conflict updating instance %(instance_uuid)s, but we were unable to " "determine the cause" msgstr "" "Conflito ao atualizar a instância %(instance_uuid)s, mas não foi possível " "determinar a causa" #, python-format msgid "" "Conflict updating instance %(instance_uuid)s. Expected: %(expected)s. " "Actual: %(actual)s" msgstr "" "Conflito ao atualizar a instância %(instance_uuid)s. Esperado: %(expected)s. " "Real: %(actual)s" #, python-format msgid "Connection to cinder host failed: %(reason)s" msgstr "A conexão com o host Cinder falhou: %(reason)s" #, python-format msgid "Connection to glance host %(server)s failed: %(reason)s" msgstr "A conexão com o host Glance %(server)s falhou: %(reason)s" #, python-format msgid "Connection to libvirt lost: %s" msgstr "Conexão com libvirt perdida: %s" #, python-format msgid "" "Console log output could not be retrieved for instance %(instance_id)s. " "Reason: %(reason)s" msgstr "" "A saída de log do console não pôde ser recuperada para a instância " "%(instance_id)s. Motivo: %(reason)s" msgid "Constraint not met." msgstr "Restrição não atendida." #, python-format msgid "Converted to raw, but format is now %s" msgstr "Convertido em bruto, mas o formato é agora %s" #, python-format msgid "Could not attach image to loopback: %s" msgstr "Não foi possível anexar imagem ao loopback: %s" #, python-format msgid "Could not fetch image %(image_id)s" msgstr "Não foi possível buscar a imagem %(image_id)s" #, python-format msgid "Could not find a handler for %(driver_type)s volume." msgstr "" "Não foi possível localizar um manipulador para o volume %(driver_type)s." #, python-format msgid "Could not find binary %(binary)s on host %(host)s." msgstr "Não foi possível localizar o binário %(binary)s no host %(host)s." #, python-format msgid "Could not find config at %(path)s" msgstr "Não foi possível localizar a configuração em %(path)s" msgid "Could not find the datastore reference(s) which the VM uses." msgstr "" "Não foi possível localizar a(s) referência(s) do armazenamento de dados que " "a VM " "usa." #, python-format msgid "Could not load line %(line)s, got error %(error)s" msgstr "Não foi possível carregar a linha %(line)s, obteve erro %(error)s" #, python-format msgid "Could not load paste app '%(name)s' from %(path)s" msgstr "" "Não foi possível carregar o aplicativo paste app '%(name)s' a partir do " "%(path)s" #, python-format msgid "" "Could not mount vfat config drive. %(operation)s failed. Error: %(error)s" msgstr "" "Não foi possível montar a unidade de configuração vfat. Falha de " "%(operation)s. Erro: %(error)s" #, python-format msgid "Could not upload image %(image_id)s" msgstr "Não foi possível fazer upload da imagem %(image_id)s" msgid "Creation of virtual interface with unique mac address failed" msgstr "Criação da interface virtual com endereço mac único falhou" #, python-format msgid "Datastore regex %s did not match any datastores" msgstr "" "O regex do armazenamento de dados %s não correspondeu a nenhum armazenamento " "de dados" msgid "Datetime is in invalid format" msgstr "Data/hora estão em formato inválido" msgid "Default PBM policy is required if PBM is enabled." msgstr "Política de PBM padrão será necessária se PBM for ativado." #, python-format msgid "Device '%(device)s' not found." msgstr "Dispositivo '%(device)s' não localizado." msgid "Device name contains spaces." msgstr "Nome do dispositivo contém espaços." msgid "Device name empty or too long." msgstr "Nome de dispositivo vazio ou muito longo." #, python-format msgid "Device type mismatch for alias '%s'" msgstr "Tipo de dispositivo incompatível para o alias '%s'" #, python-format msgid "Disk format %(disk_format)s is not acceptable" msgstr "Formato do disco %(disk_format)s não é aceito" #, python-format msgid "Disk info file is invalid: %(reason)s" msgstr "Arquivo de informações de disco é inválido: %(reason)s" #, python-format msgid "Driver Error: %s" msgstr "Erro de driver: %s" #, python-format msgid "Error attempting to run %(method)s" msgstr "Erro ao tentar executar %(method)s" #, python-format msgid "" "Error destroying the instance on node %(node)s. Provision state still " "'%(state)s'." msgstr "" "Erro ao destruir a instância no nó %(node)s. O estado da provisão ainda é " "'%(state)s'." #, python-format msgid "Error during unshelve instance %(instance_id)s: %(reason)s" msgstr "Erro durante a instância unshelve %(instance_id)s: %(reason)s" #, python-format msgid "" "Error from libvirt while getting domain info for %(instance_name)s: [Error " "Code %(error_code)s] %(ex)s" msgstr "" "erro de libvirt ao obter informações do domínio para %(instance_name)s: " "[Código de Erro %(error_code)s] %(ex)s" #, python-format msgid "" "Error from libvirt while looking up %(instance_name)s: [Error Code " "%(error_code)s] %(ex)s" msgstr "" "Erro de libvirt ao consultar %(instance_name)s: [Código de Erro " "%(error_code)s] %(ex)s" #, python-format msgid "" "Error from libvirt while quiescing %(instance_name)s: [Error Code " "%(error_code)s] %(ex)s" msgstr "" "Erro de libvirt ao efetuar quiesce %(instance_name)s: [Código de Erro " "%(error_code)s] %(ex)s" #, python-format msgid "" "Error from libvirt while set password for username \"%(user)s\": [Error Code " "%(error_code)s] %(ex)s" msgstr "" "Erro de libvirt ao configurar senha para o nome do usuário \"%(user)s\": " "[Código de erro %(error_code)s] %(ex)s" #, python-format msgid "" "Error mounting %(device)s to %(dir)s in image %(image)s with libguestfs " "(%(e)s)" msgstr "" "Erro ao montar %(device)s para %(dir)s na imagem %(image)s com libguestfs " "(%(e)s)" #, python-format msgid "Error mounting %(image)s with libguestfs (%(e)s)" msgstr "Erro ao montar %(image)s com libguestfs (%(e)s)" #, python-format msgid "Error when creating resource monitor: %(monitor)s" msgstr "Erro ao criar monitor de recurso: %(monitor)s" #, python-format msgid "Event %(event)s not found for action id %(action_id)s" msgstr "Evento %(event)s não localizado para o ID da ação %(action_id)s" msgid "Event must be an instance of nova.virt.event.Event" msgstr "O evento deve ser uma instância de nova.virt.event.Event" #, python-format msgid "" "Exceeded max scheduling attempts %(max_attempts)d for instance " "%(instance_uuid)s. Last exception: %(exc_reason)s" msgstr "" "Máximo excedido de tentativas de planejamento %(max_attempts)d para a " "instância %(instance_uuid)s. Última exceção:%(exc_reason)s" #, python-format msgid "" "Exceeded max scheduling retries %(max_retries)d for instance " "%(instance_uuid)s during live migration" msgstr "" "Excedido o máximo de novas tentativas de planejamento %(max_retries)d para a " "instância %(instance_uuid)s durante migração em tempo real" #, python-format msgid "Exceeded maximum number of retries. %(reason)s" msgstr "Foi excedido o número máximo de tentativas. %(reason)s" #, python-format msgid "Expected a uuid but received %(uuid)s." msgstr "Esperado um uuid, mas recebido %(uuid)s." msgid "Extracting vmdk from OVA failed." msgstr "A extração de vmdk de OVA falhou." #, python-format msgid "Failed to access port %(port_id)s: %(reason)s" msgstr "Falha ao acessar a porta %(port_id)s: %(reason)s" #, python-format msgid "Failed to allocate the network(s) with error %s, not rescheduling." msgstr "Falha ao alocar a(s) rede(s) com erro %s, não reagendando." msgid "Failed to allocate the network(s), not rescheduling." msgstr "Falha ao alocar a rede(s), não replanejando." #, python-format msgid "Failed to attach network adapter device to %(instance_uuid)s" msgstr "" "Falha ao anexar o dispositivo de adaptador de rede para %(instance_uuid)s" #, python-format msgid "Failed to deploy instance: %(reason)s" msgstr "Falha ao provisionar a instância: %(reason)s" #, python-format msgid "Failed to detach PCI device %(dev)s: %(reason)s" msgstr "Falha ao remover o dispositivo PCI %(dev)s: %(reason)s" #, python-format msgid "Failed to detach network adapter device from %(instance_uuid)s" msgstr "" "Falha ao remover o dispositivo de adaptador de rede de %(instance_uuid)s" #, python-format msgid "Failed to encrypt text: %(reason)s" msgstr "Falha ao criptografar texto: %(reason)s" #, python-format msgid "Failed to launch instances: %(reason)s" msgstr "Falha ao ativar instâncias: %(reason)s" #, python-format msgid "Failed to map partitions: %s" msgstr "Falha ao mapear partições: %s" #, python-format msgid "Failed to mount filesystem: %s" msgstr "Falhou em montar sistema de arquivo: %s" #, python-format msgid "Failed to power off instance: %(reason)s" msgstr "Falha ao desativar a instância: %(reason)s" #, python-format msgid "Failed to power on instance: %(reason)s" msgstr "Falha ao ativar a instância: %(reason)s" #, python-format msgid "Failed to provision instance %(inst)s: %(reason)s" msgstr "Falha ao fornecer instância %(inst)s: %(reason)s" #, python-format msgid "Failed to read or write disk info file: %(reason)s" msgstr "Falha ao ler ou gravar o arquivo de informações do disco: %(reason)s" #, python-format msgid "Failed to reboot instance: %(reason)s" msgstr "Falha ao reinicializar a instância: %(reason)s" #, python-format msgid "Failed to remove volume(s): (%(reason)s)" msgstr "Falha ao remover o(s) volume(s): (%(reason)s)" #, python-format msgid "Failed to request Ironic to rebuild instance %(inst)s: %(reason)s" msgstr "" "Falha ao solicitar reconstrução da instância pelo Ironic %(inst)s: %(reason)s" #, python-format msgid "Failed to resume instance: %(reason)s" msgstr "Falha ao continuar a instância: %(reason)s" #, python-format msgid "Failed to run qemu-img info on %(path)s : %(error)s" msgstr "Falha ao executar as informações de qemu-img em %(path)s : %(error)s" #, python-format msgid "Failed to set admin password on %(instance)s because %(reason)s" msgstr "" "Falha ao configurar a senha de administrador em %(instance)s porque " "%(reason)s" #, python-format msgid "Failed to suspend instance: %(reason)s" msgstr "Falha ao suspender a instância: %(reason)s" #, python-format msgid "Failed to terminate instance: %(reason)s" msgstr "Falha ao finalizar instância: %(reason)s" msgid "Failure prepping block device." msgstr "Falha na preparação do dispositivo de bloco." #, python-format msgid "File %(file_path)s could not be found." msgstr "O arquivo %(file_path)s não pôde ser localizado." #, python-format msgid "Fixed IP %(ip)s is not a valid ip address for network %(network_id)s." msgstr "" "O IP fixo %(ip)s não é um endereço IP válido para a rede %(network_id)s." #, python-format msgid "Fixed IP %s is already in use." msgstr "IP fixo %s já está em uso." #, python-format msgid "" "Fixed IP address %(address)s is already in use on instance %(instance_uuid)s." msgstr "" "O endereço IP fixo %(address)s já está em uso na instância %(instance_uuid)s." #, python-format msgid "Fixed IP not found for address %(address)s." msgstr "IP fixo não localizado para o endereço %(address)s." #, python-format msgid "Flavor %(flavor_id)s could not be found." msgstr "O método %(flavor_id)s não pôde ser localizado." #, python-format msgid "Flavor %(flavor_id)s has no extra specs with key %(extra_specs_key)s." msgstr "" "Tipo %(flavor_id)s não possui especificações extras com a chave " "%(extra_specs_key)s." #, python-format msgid "Flavor %(flavor_id)s has no extra specs with key %(key)s." msgstr "" "Tipo %(flavor_id)s não possui especificações extras com a chave %(key)s." #, python-format msgid "" "Flavor %(id)s extra spec cannot be updated or created after %(retries)d " "retries." msgstr "" "A especificação extra de tipo %(id)s não pode ser atualizada ou criada após " "%(retries)d novas tentativas." #, python-format msgid "" "Flavor access already exists for flavor %(flavor_id)s and project " "%(project_id)s combination." msgstr "" "Acesso flavor já existe para o flavor %(flavor_id)s e o projeto " "%(project_id)s." #, python-format msgid "Flavor access not found for %(flavor_id)s / %(project_id)s combination." msgstr "" "Acesso ao método não localizado para a combinação %(flavor_id)s / " "%(project_id)s." msgid "Flavor used by the instance could not be found." msgstr "O método usado pela instância não pôde ser localizado." #, python-format msgid "Flavor with ID %(flavor_id)s already exists." msgstr "Tipo com ID %(flavor_id)s já existe." #, python-format msgid "Flavor with name %(flavor_name)s could not be found." msgstr "Tipo com nome %(flavor_name)s não pôde ser localizado." #, python-format msgid "Flavor with name %(name)s already exists." msgstr "Tipo com nome %(name)s já existe." #, python-format msgid "" "Flavor's disk is smaller than the minimum size specified in image metadata. " "Flavor disk is %(flavor_size)i bytes, minimum size is %(image_min_disk)i " "bytes." msgstr "" "O disco do tipo é menor que o tamanho mínimo especificado nos metadados de " "imagem. O disco do tipo tem %(flavor_size)i bytes; o tamanho mínimo é " "%(image_min_disk)i bytes." #, python-format msgid "" "Flavor's disk is too small for requested image. Flavor disk is " "%(flavor_size)i bytes, image is %(image_size)i bytes." msgstr "" "O disco do tipo é muito pequeno para a imagem solicitada. O disco do tipo " "tem %(flavor_size)i bytes; a imagem tem %(image_size)i bytes." msgid "Flavor's memory is too small for requested image." msgstr "Memória do tipo é muito pequena para a imagem solicitada." #, python-format msgid "Floating IP %(address)s association has failed." msgstr "A associação de IP flutuante %(address)s falhou." #, python-format msgid "Floating IP %(address)s is associated." msgstr "O IP flutuante %(address)s está associado." #, python-format msgid "Floating IP %(address)s is not associated with instance %(id)s." msgstr "O IP flutuante %(address)s não está associado à instância %(id)s." #, python-format msgid "Floating IP not found for ID %(id)s." msgstr "IP flutuante não localizado para o ID %(id)s." #, python-format msgid "Floating IP not found for ID %s" msgstr "IP flutuante não localizado para ID %s" #, python-format msgid "Floating IP not found for address %(address)s." msgstr "IP flutuante não localizado para o endereço %(address)s." msgid "Floating IP pool not found." msgstr "Conjunto de IPs flutuantes não localizado." msgid "" "Forbidden to exceed flavor value of number of serial ports passed in image " "meta." msgstr "" "Proibido exceder valor do tipo do número de portas seriais passadas nos " "metadados de imagem." msgid "Found no disk to snapshot." msgstr "Não foi localizado nenhum disco para captura instantânea." msgid "Guest does not have a console available." msgstr "O convidado não possui um console disponível" #, python-format msgid "Host %(host)s could not be found." msgstr "Host %(host)s não encontrado." #, python-format msgid "Host %(host)s is already mapped to cell %(uuid)s" msgstr "O host %(host)s já está mapeado para a célula %(uuid)s" #, python-format msgid "Host '%(name)s' is not mapped to any cell" msgstr "Host '%(name)s' não mapeado para qualquer célula" msgid "Host aggregate is not empty" msgstr "Agregado do host não está vazio" msgid "Host does not support guests with NUMA topology set" msgstr "O host não suporta convidados com a topologia NUMA configurada" msgid "Host does not support guests with custom memory page sizes" msgstr "" "O host não suporta convidados com tamanhos de página de memória customizados" msgid "Hypervisor driver does not support post_live_migration_at_source method" msgstr "" "Driver do hypervisor não suporta o método post_live_migration_at_source" #, python-format msgid "Hypervisor virt type '%s' is not valid" msgstr "O tipo hypervisor virt '%s' não é válido" #, python-format msgid "Hypervisor virtualization type '%(hv_type)s' is not recognised" msgstr "Tipo de virtualização do hypervisor '%(hv_type)s' não é reconhecido" #, python-format msgid "Hypervisor with ID '%s' could not be found." msgstr "O hypervisor com o ID '%s' não pôde ser localizado." #, python-format msgid "IP allocation over quota in pool %s." msgstr "Alocação de IP de cota no conjunto %s." msgid "IP allocation over quota." msgstr "Alocação de IP acima da cota." #, python-format msgid "Image %(image_id)s could not be found." msgstr "Imagem %(image_id)s não foi encontrada." #, python-format msgid "Image %(image_id)s is not active." msgstr "A imagem %(image_id)s não está ativa." #, python-format msgid "Image %(image_id)s is unacceptable: %(reason)s" msgstr "A imagem %(image_id)s é inaceitável: %(reason)s" msgid "Image disk size greater than requested disk size" msgstr "Tamanho do disco de imagem maior que o tamanho do disco solicitado" msgid "Image is not raw format" msgstr "A imagem não é um formato bruto" msgid "Image metadata limit exceeded" msgstr "Limite excedido de metadados da imagem" #, python-format msgid "Image model '%(image)s' is not supported" msgstr "O modelo de imagem '%(image)s' não é suportado" msgid "Image not found." msgstr "Imagem não encontrada." #, python-format msgid "" "Image property '%(name)s' is not permitted to override NUMA configuration " "set against the flavor" msgstr "" "Propriedade de imagem '%(name)s' não é permitida para substituir a " "configuração NUMA definida com relação ao tipo" msgid "" "Image property 'hw_cpu_policy' is not permitted to override CPU pinning " "policy set against the flavor" msgstr "" "Propriedade de imagem 'hw_cpu_policy' não é permitida para substituir o " "pinning da CPU conjunto de política em relação ao tipo" msgid "" "Image property 'hw_cpu_thread_policy' is not permitted to override CPU " "thread pinning policy set against the flavor" msgstr "" "A propriedade de imagem 'hw_cpu_thread_policy' não é permitida para " "substituir o conjunto de política de pinning de encadeamento de CPU em " "relação ao tipo" msgid "Image that the instance was started with could not be found." msgstr "A imagem que foi iniciada pela instância não pode ser localizada." #, python-format msgid "Image's config drive option '%(config_drive)s' is invalid" msgstr "Opção de unidade de configuração da imagem '%(config_drive)s' inválida" msgid "" "Images with destination_type 'volume' need to have a non-zero size specified" msgstr "" "As imagens com destination_type 'volume' precisam ter um tamanho diferente " "de zero especificado" msgid "In ERROR state" msgstr "No estado ERROR" #, python-format msgid "In states %(vm_state)s/%(task_state)s, not RESIZED/None" msgstr "Nos estados %(vm_state)s/%(task_state)s, não RESIZED/Nenhum" #, python-format msgid "In-progress live migration %(id)s is not found for server %(uuid)s." msgstr "" "A migração em tempo real em andamento %(id)s não está localizada para o " "servidor %(uuid)s." msgid "" "Incompatible settings: ephemeral storage encryption is supported only for " "LVM images." msgstr "" "Configurações incompatíveis: criptografia de armazenamento efêmera suportada " "somente para imagens LVM." #, python-format msgid "Info cache for instance %(instance_uuid)s could not be found." msgstr "" "O cache de informações para a instância %(instance_uuid)s não pôde ser " "localizado." #, python-format msgid "" "Instance %(instance)s and volume %(vol)s are not in the same " "availability_zone. Instance is in %(ins_zone)s. Volume is in %(vol_zone)s" msgstr "" "Instância %(instance)s e volume %(vol)s não estão na mesma " "availability_zone. A instância está em %(ins_zone)s. O volume está em " "%(vol_zone)s" #, python-format msgid "Instance %(instance)s does not have a port with id %(port)s" msgstr "A instância %(instance)s não possui uma porta com o ID%(port)s" #, python-format msgid "Instance %(instance_id)s cannot be rescued: %(reason)s" msgstr "A instância %(instance_id)s não pode ser resgatada: %(reason)s" #, python-format msgid "Instance %(instance_id)s could not be found." msgstr "A instância %(instance_id)s não pôde ser localizada." #, python-format msgid "Instance %(instance_id)s has no tag '%(tag)s'" msgstr "Instância %(instance_id)s não possui identificação ‘%(tag)s‘" #, python-format msgid "Instance %(instance_id)s is not in rescue mode" msgstr "A instância %(instance_id)s não está no modo de resgate" #, python-format msgid "Instance %(instance_id)s is not ready" msgstr "A instância %(instance_id)s não está pronta" #, python-format msgid "Instance %(instance_id)s is not running." msgstr "A instância %(instance_id)s não está executando." #, python-format msgid "Instance %(instance_id)s is unacceptable: %(reason)s" msgstr "A instância %(instance_id)s é inaceitável: %(reason)s" #, python-format msgid "Instance %(instance_uuid)s does not specify a NUMA topology" msgstr "A instância %(instance_uuid)s não especifica uma topologia NUMA" #, python-format msgid "Instance %(instance_uuid)s does not specify a migration context." msgstr "A instância %(instance_uuid)s não especifica um contexto de migração." #, python-format msgid "" "Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while " "the instance is in this state." msgstr "" "Instância %(instance_uuid)s em %(attr)s %(state)s. Não é possível %(method)s " "enquanto a instância está nesse estado." #, python-format msgid "Instance %(instance_uuid)s is locked" msgstr "A instância %(instance_uuid)s está bloqueada" #, python-format msgid "" "Instance %(instance_uuid)s requires config drive, but it does not exist." msgstr "" "A instância %(instance_uuid)s requer a unidade de configuração, mas ela não " "existe." #, python-format msgid "Instance %(name)s already exists." msgstr "A instância %(name)s já existe." #, python-format msgid "Instance %(server_id)s is in an invalid state for '%(action)s'" msgstr "A instância %(server_id)s está em um estado inválido para '%(action)s'" #, python-format msgid "Instance %(uuid)s has no mapping to a cell." msgstr "A instância %(uuid)s não possui nenhum mapeamento para uma célula." #, python-format msgid "Instance %s not found" msgstr "Instância %s não encontrada" #, python-format msgid "Instance %s provisioning was aborted" msgstr "A instância %s que está sendo provisionada foi interrompida" msgid "Instance could not be found" msgstr "A instância não pôde ser localizada" msgid "Instance disk to be encrypted but no context provided" msgstr "Disco da instância a ser criptografado, porém, sem contexto fornecido" msgid "Instance event failed" msgstr "Evento de instância com falha" #, python-format msgid "Instance group %(group_uuid)s already exists." msgstr "O grupo de instância %(group_uuid)s já existe." #, python-format msgid "Instance group %(group_uuid)s could not be found." msgstr "O grupo de instância %(group_uuid)s não pôde ser localizado." msgid "Instance has no source host" msgstr "A instância não possui host de origem" msgid "Instance has not been resized." msgstr "A instância não foi redimensionada." #, python-format msgid "Instance hostname %(hostname)s is not a valid DNS name" msgstr "O nome do host da instância %(hostname)s não é um nome DNS válido" msgid "Instance is not a member of specified network" msgstr "A instância não é um membro de rede especificado" #, python-format msgid "Instance rollback performed due to: %s" msgstr "Retrocesso de instância executado devido a: %s" #, python-format msgid "" "Insufficient Space on Volume Group %(vg)s. Only %(free_space)db available, " "but %(size)d bytes required by volume %(lv)s." msgstr "" "Espaço Insuficiente no Grupo de Volumes %(vg)s. Apenas %(free_space)db " "disponíveis, mas %(size)d bytes requeridos pelo volume %(lv)s." #, python-format msgid "Insufficient compute resources: %(reason)s." msgstr "Recursos de cálculo insuficientes: %(reason)s." #, python-format msgid "Interface %(interface)s not found." msgstr "Interface %(interface)s não encontrada." #, python-format msgid "Invalid Base 64 data for file %(path)s" msgstr "Dados Base 64 inválidos para o arquivo %(path)s" msgid "Invalid Connection Info" msgstr "Informações de conexão inválidas" #, python-format msgid "Invalid ID received %(id)s." msgstr "ID inválido recebido %(id)s." #, python-format msgid "Invalid IP format %s" msgstr "Formato de IP inválido %s" #, python-format msgid "Invalid IP protocol %(protocol)s." msgstr "Protocolo IP %(protocol)s é inválido." msgid "" "Invalid PCI Whitelist: The PCI whitelist can specify devname or address, but " "not both" msgstr "" "Lista de desbloqueio de PCI inválida: A lista de desbloqueio de PCI pode " "especificar o devname ou endereço , mas não ambos" #, python-format msgid "Invalid PCI alias definition: %(reason)s" msgstr "Definição de alias de PCI inválida: %(reason)s" #, python-format msgid "Invalid Regular Expression %s" msgstr "Expressão Regular inválida %s" #, python-format msgid "Invalid characters in hostname '%(hostname)s'" msgstr "Caracteres inválidos no nome do host '%(hostname)s'" msgid "Invalid config_drive provided." msgstr "config_drive inválida fornecida." #, python-format msgid "Invalid config_drive_format \"%s\"" msgstr "config_drive_format inválido \"%s\"" #, python-format msgid "Invalid console type %(console_type)s" msgstr "Tipo de console inválido %(console_type)s" #, python-format msgid "Invalid content type %(content_type)s." msgstr "Tipo de conteúdo %(content_type)s é inválido." #, python-format msgid "Invalid datetime string: %(reason)s" msgstr "String datetime inválida: %(reason)s" msgid "Invalid device UUID." msgstr "UUID de dispositivo inválido." #, python-format msgid "Invalid entry: '%s'" msgstr "Entrada inválida: '%s'" #, python-format msgid "Invalid entry: '%s'; Expecting dict" msgstr "Entrada inválida: '%s'; Esperando dicionário" #, python-format msgid "Invalid entry: '%s'; Expecting list or dict" msgstr "Entrada inválida: '%s'; Esperando dicionário ou lista" #, python-format msgid "Invalid exclusion expression %r" msgstr "Expressão de exclusão inválida %r" #, python-format msgid "Invalid image format '%(format)s'" msgstr "Formato de imagem inválido '%(format)s'" #, python-format msgid "Invalid image href %(image_href)s." msgstr "Imagem inválida href %(image_href)s." #, python-format msgid "Invalid inclusion expression %r" msgstr "Expressão de inclusão inválida %r" #, python-format msgid "" "Invalid input for field/attribute %(path)s. Value: %(value)s. %(message)s" msgstr "" "Entrada inválida para campo/atributo %(path)s. Valor: %(value)s. %(message)s" #, python-format msgid "Invalid input received: %(reason)s" msgstr "Entrada inválida recebida: %(reason)s" msgid "Invalid instance image." msgstr "Imagem de instância inválida." #, python-format msgid "Invalid is_public filter [%s]" msgstr "Filtro is_public inválido [%s]" msgid "Invalid key_name provided." msgstr "key_name inválido fornecido." #, python-format msgid "Invalid memory page size '%(pagesize)s'" msgstr "Tamanho de página de memória inválido ‘%(pagesize)s‘" msgid "Invalid metadata key" msgstr "Chave de metadados inválida" #, python-format msgid "Invalid metadata size: %(reason)s" msgstr "Tamanho de metadados inválido: %(reason)s" #, python-format msgid "Invalid metadata: %(reason)s" msgstr "Metadados inválidos: %(reason)s" #, python-format msgid "Invalid minDisk filter [%s]" msgstr "Filtro minDisk inválido [%s]" #, python-format msgid "Invalid minRam filter [%s]" msgstr "Filtro minRam inválido [%s]" #, python-format msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s" msgstr "Sequencia de porta %(from_port)s:%(to_port)s é inválida. %(msg)s" msgid "Invalid proxy request signature." msgstr "Assinatura da solicitação de proxy inválida." #, python-format msgid "Invalid range expression %r" msgstr "Expressão de intervalo inválido %r" msgid "Invalid service catalog json." msgstr "Catálogo de serviço json inválido." msgid "Invalid start time. The start time cannot occur after the end time." msgstr "" "Horário de início inválido. O horário de início não pode ocorrer após o " "horário de encerramento." msgid "Invalid state of instance files on shared storage" msgstr "" "Estado inválido de arquivos de instância em armazenamento compartilhado" #, python-format msgid "Invalid timestamp for date %s" msgstr "Registro de data e hora inválido para a data %s" #, python-format msgid "Invalid usage_type: %s" msgstr "Usage_type inválido: %s" #, python-format msgid "Invalid value for Config Drive option: %(option)s" msgstr "Valor inválido para a opção Configuração de Unidade: %(option)s" #, python-format msgid "Invalid virtual interface address %s in request" msgstr "Endereço de interface virtual inválido %s na solicitação" #, python-format msgid "Invalid volume access mode: %(access_mode)s" msgstr "Modo de acesso a volume inválido: %(access_mode)s" #, python-format msgid "Invalid volume: %(reason)s" msgstr "Volume inválido: %(reason)s" msgid "Invalid volume_size." msgstr "volume_size inválido." #, python-format msgid "Ironic node uuid not supplied to driver for instance %s." msgstr "UUID do nó do Ironic não fornecido ao driver para a instância %s." #, python-format msgid "" "It is not allowed to create an interface on external network %(network_uuid)s" msgstr "Não é permitido criar uma interface na rede externa %(network_uuid)s" msgid "" "Key Names can only contain alphanumeric characters, periods, dashes, " "underscores, colons and spaces." msgstr "" "Nomes de Chave podem conter apenas caracteres alfanuméricos, pontos, hifens, " "sublinhados, dois-pontos e espaços." #, python-format msgid "Key manager error: %(reason)s" msgstr "Erro do gerenciador de chaves: %(reason)s" #, python-format msgid "Key pair '%(key_name)s' already exists." msgstr "O par de chaves '%(key_name)s' já existe." #, python-format msgid "Keypair %(name)s not found for user %(user_id)s" msgstr "Par de chaves %(name)s não localizado para o usuário %(user_id)s" #, python-format msgid "Keypair data is invalid: %(reason)s" msgstr "Dados do par de chaves é inválido: %(reason)s" msgid "Limits only supported from vCenter 6.0 and above" msgstr "Limites suportados somente a partir do vCenter 6.0 e acima" #, python-format msgid "Live migration %(id)s for server %(uuid)s is not in progress." msgstr "" "A migração em tempo real %(id)s para o servidor %(uuid)s não está em " "andamento. " #, python-format msgid "Malformed message body: %(reason)s" msgstr "Corpo da mensagem malformado: %(reason)s" #, python-format msgid "" "Malformed request URL: URL's project_id '%(project_id)s' doesn't match " "Context's project_id '%(context_project_id)s'" msgstr "" "URL de solicitação Malformada: project_id '%(project_id)s' da URL não " "corresponde ao project_id '%(context_project_id)s' do contexto" msgid "Malformed request body" msgstr "Corpo do pedido está mal formado" msgid "Mapping image to local is not supported." msgstr "Mapeamento de imagem para local não é suportado." #, python-format msgid "Marker %(marker)s could not be found." msgstr "O marcador %(marker)s não pôde ser localizado." msgid "Maximum number of floating IPs exceeded" msgstr "Número máximo de IPs flutuantes excedido" #, python-format msgid "Maximum number of metadata items exceeds %(allowed)d" msgstr "O número máximo de itens de metadados excede %(allowed)d" msgid "Maximum number of ports exceeded" msgstr "Número máximo de portas excedido" msgid "Maximum number of security groups or rules exceeded" msgstr "Número máximo de grupos de segurança ou regras excedido" msgid "Metadata item was not found" msgstr "O item de metadados não foi localizado" msgid "Metadata property key greater than 255 characters" msgstr "Chave da propriedade de metadados com mais de 255 caracteres" msgid "Metadata property value greater than 255 characters" msgstr "Valor da propriedade de metadados com mais de 255 caracteres" msgid "Metadata type should be dict." msgstr "Tipo de metadados deve ser dic." #, python-format msgid "" "Metric %(name)s could not be found on the compute host node %(host)s." "%(node)s." msgstr "" "Métrica %(name)s não pôde ser localizada no nó de host de cálculo %(host)s." "%(node)s." #, python-format msgid "Migration %(id)s for server %(uuid)s is not live-migration." msgstr "" "A migração %(id)s para o servidor %(uuid)s não é uma migração em tempo " "real. " #, python-format msgid "Migration %(migration_id)s could not be found." msgstr "A migração %(migration_id)s não pôde ser localizada." #, python-format msgid "Migration %(migration_id)s not found for instance %(instance_id)s" msgstr "" "Migração %(migration_id)s não localizada para a instância %(instance_id)s" #, python-format msgid "" "Migration %(migration_id)s state of instance %(instance_uuid)s is %(state)s. " "Cannot %(method)s while the migration is in this state." msgstr "" "O estado da migração %(migration_id)s da instância %(instance_uuid)s é " "%(state)s. O método %(method)s não é possível enquanto a migração estiver " "nesse estado." #, python-format msgid "Migration error: %(reason)s" msgstr "Erro de migração: %(reason)s" msgid "Migration is not supported for LVM backed instances" msgstr "A migração não é suportada para instâncias do LVM de backup" #, python-format msgid "" "Migration not found for instance %(instance_id)s with status %(status)s." msgstr "" "Migração não localizada para a instância %(instance_id)s com o status " "%(status)s." #, python-format msgid "Migration pre-check error: %(reason)s" msgstr "Erro de pré-verificação de migração: %(reason)s" #, python-format msgid "Migration select destinations error: %(reason)s" msgstr "Erro de destinos de seleção de migração: %(reason)s" #, python-format msgid "Missing arguments: %s" msgstr "Argumentos ausentes: %s" msgid "Missing device UUID." msgstr "UUID de dispositivo faltando." msgid "Missing disabled reason field" msgstr "Está faltando o campo do motivo da desativação" msgid "Missing forced_down field" msgstr "Faltando campo forced_down" msgid "Missing imageRef attribute" msgstr "Atributo imageRef ausente" #, python-format msgid "Missing keys: %s" msgstr "Chaves ausentes: %s" msgid "Missing parameter dict" msgstr "Dicionário de parâmetros ausente" #, python-format msgid "" "More than one instance is associated with fixed IP address '%(address)s'." msgstr "" "Mais de uma instância está associada ao endereço IP fixo '%(address)s'." msgid "" "More than one possible network found. Specify network ID(s) to select which " "one(s) to connect to." msgstr "" "Mais de uma rede possível localizada. Especifique ID(s) de rede para " "selecionar qual(is) a se conectar." msgid "More than one swap drive requested." msgstr "Mais de uma unidade de troca solicitada." #, python-format msgid "Multi-boot operating system found in %s" msgstr "Sistema operacional de multi-inicialização localizado em %s" msgid "Multiple X-Instance-ID headers found within request." msgstr "Vários cabeçalhos X-Instance-ID localizados dentro da solicitação." msgid "Multiple X-Tenant-ID headers found within request." msgstr "Vários cabeçalhos X-Tenant-ID localizados dentro da solicitação." #, python-format msgid "Multiple floating IP pools matches found for name '%s'" msgstr "" "Vários correspondências de conjuntos de IP flutuantes localizadas para o " "nome '%s'" #, python-format msgid "Multiple floating IPs are found for address %(address)s." msgstr "Vários IPs flutuantes foram localizados para o endereço %(address)s." msgid "" "Multiple hosts may be managed by the VMWare vCenter driver; therefore we do " "not return uptime for just one host." msgstr "" "Vários hosts podem ser gerenciados pelo driver vCenter VMWare; portanto, não " "retorne o tempo de atividade apenas para um host." msgid "Multiple possible networks found, use a Network ID to be more specific." msgstr "" "Várias redes possíveis localizados, use um ID de Rede para ser mais " "específico." #, python-format msgid "" "Multiple security groups found matching '%s'. Use an ID to be more specific." msgstr "" "Foram localizados vários grupos de segurança que correspondem a '%s'. Use um " "ID para ser mais específico." msgid "Must input network_id when request IP address" msgstr "network_id deve ser inserido quando o endereço IP for solicitado" msgid "Must not input both network_id and port_id" msgstr "Ambos network_id e port_id não devem ser inseridos" msgid "" "Must specify host_ip, host_username and host_password to use vmwareapi." "VMwareVCDriver" msgstr "" "Deve especificar host_ip, host_username e host_password para utilizar " "vmwareapi.VMwareVCDriver" msgid "Must supply a positive value for max_number" msgstr "Deve-se fornecer um valor positivo para o max_number" msgid "Must supply a positive value for max_rows" msgstr "Deve ser fornecido um valor positivo para max_rows" #, python-format msgid "Network %(network_id)s could not be found." msgstr "Rede %(network_id)s não foi encontrada." #, python-format msgid "" "Network %(network_uuid)s requires a subnet in order to boot instances on." msgstr "" "A rede %(network_uuid)s requer uma sub-rede para inicializar instâncias." #, python-format msgid "Network could not be found for bridge %(bridge)s" msgstr "A rede não pôde ser localizada para a ponte %(bridge)s" #, python-format msgid "Network could not be found for instance %(instance_id)s." msgstr "A rede não pôde ser localizada para a instância %(instance_id)s." msgid "Network not found" msgstr "Rede não localizada" msgid "" "Network requires port_security_enabled and subnet associated in order to " "apply security groups." msgstr "" "A rede requer port_security_enabled e sub-rede associados para aplicar " "grupos de segurança." msgid "New volume must be detached in order to swap." msgstr "O novo volume deve ser removido para a troca." msgid "New volume must be the same size or larger." msgstr "O novo volume deve ser do mesmo tamanho ou maior." #, python-format msgid "No Block Device Mapping with id %(id)s." msgstr "Nenhum Mapeamento de Dispositivo de Bloco com id %(id)s." msgid "No Unique Match Found." msgstr "Nenhuma Correspondência Exclusiva Localizada." msgid "No compute host specified" msgstr "Nenhum host de cálculo especificado" #, python-format msgid "No configuration information found for operating system %(os_name)s" msgstr "" "Nenhuma informação de configuração localizada para o sistema operacional " "%(os_name)s" #, python-format msgid "No device with MAC address %s exists on the VM" msgstr "Nenhum dispositivo com endereço MAC %s existe na VM" #, python-format msgid "No device with interface-id %s exists on VM" msgstr "Nenhum dispositivo com interface-id %s existe na VM" #, python-format msgid "No disk at %(location)s" msgstr "Nenhum disco em %(location)s" #, python-format msgid "No fixed IP addresses available for network: %(net)s" msgstr "Nenhum endereço IP fixo disponível para rede: %(net)s" msgid "No fixed IPs associated to instance" msgstr "Nenhum IP fixo associado à instância" msgid "No free nbd devices" msgstr "Nenhum dispositivo nbd livre" msgid "No host available on cluster" msgstr "Nenhum host disponível no cluster" msgid "No hosts found to map to cell, exiting." msgstr "Nenhum host localizado para mapear para a célula, encerrando." #, python-format msgid "No hypervisor matching '%s' could be found." msgstr "Nenhum hypervisor corresponde a '%s' pôde ser localizado." msgid "No image locations are accessible" msgstr "Nenhum local da imagem é acessível" #, python-format msgid "" "No live migration URI configured and no default available for " "\"%(virt_type)s\" hypervisor virtualization type." msgstr "" "Nenhum URI de migração em tempo real configurado e nenhum padrão disponível " "para o tipo de virtualização do hypervisor \"%(virt_type)s\"." msgid "No more floating IPs available." msgstr "Nenhum IP flutuante disponível." #, python-format msgid "No more floating IPs in pool %s." msgstr "Sem IPs flutuantes no conjunto %s." #, python-format msgid "No mount points found in %(root)s of %(image)s" msgstr "Nenhum ponto de montagem localizado em %(root)s de %(image)s" #, python-format msgid "No operating system found in %s" msgstr "Nenhum sistema operacional localizado em %s" msgid "No root disk defined." msgstr "Nenhum disco raiz definido." #, python-format msgid "" "No specific network was requested and none are available for project " "'%(project_id)s'." msgstr "" "Nenhuma rede específica foi solicitada e nenhuma está disponível para o " "projeto '%(project_id)s'." msgid "No valid host found for cold migrate" msgstr "Nenhum host válido localizado para a migração a frio" msgid "No valid host found for resize" msgstr "Nenhum host válido localizado para redimensionamento" #, python-format msgid "No valid host was found. %(reason)s" msgstr "Nenhum host válido localizado. %(reason)s" #, python-format msgid "No volume Block Device Mapping at path: %(path)s" msgstr "" "Nenhum Mapeamento do Dispositivo de Bloco do volume no caminho: %(path)s" #, python-format msgid "No volume Block Device Mapping with id %(volume_id)s." msgstr "" "Nenhum Mapeamento de Dispositivo de Bloco do volume com o ID %(volume_id)s." #, python-format msgid "Node %s could not be found." msgstr "O nó %s não pôde ser localizado." #, python-format msgid "Not able to acquire a free port for %(host)s" msgstr "Não é possível adquirir uma porta livre para %(host)s" #, python-format msgid "Not able to bind %(host)s:%(port)d, %(error)s" msgstr "Não é possível ligar %(host)s:%(port)d, %(error)s" #, python-format msgid "" "Not all Virtual Functions of PF %(compute_node_id)s:%(address)s are free." msgstr "" "Nem todas as Funções Virtuais do %(compute_node_id)s:%(address)s estão " "livres." msgid "Not an rbd snapshot" msgstr "Não uma captura instantânea de rbd" #, python-format msgid "Not authorized for image %(image_id)s." msgstr "Não autorizado para a imagem %(image_id)s." msgid "Not authorized." msgstr "Não autorizado." msgid "Not enough parameters to build a valid rule." msgstr "Não há parâmetros suficientes para construir uma regra válida." msgid "Not stored in rbd" msgstr "Não armazenado em rbd" msgid "Nothing was archived." msgstr "Nada foi arquivado" #, python-format msgid "Nova requires libvirt version %s or greater." msgstr "Nova requer a versão libvirt %s ou superior." msgid "Number of Rows Archived" msgstr "Número de Linhas Arquivadas" #, python-format msgid "Object action %(action)s failed because: %(reason)s" msgstr "A ação do objeto %(action)s falhou porque: %(reason)s" msgid "Old volume is attached to a different instance." msgstr "Um volume antigo está anexado a uma instância diferente." #, python-format msgid "One or more hosts already in availability zone(s) %s" msgstr "Um ou mais hosts já na(s) zona(s) de disponibilidade %s" msgid "Only administrators may list deleted instances" msgstr "Apenas administradores podem listar instância excluídas" msgid "Origin header does not match this host." msgstr "Cabeçalho de origem não corresponde a esse host." msgid "Origin header not valid." msgstr "Cabeçalho de origem não é válido." msgid "Origin header protocol does not match this host." msgstr "Protocolo do cabeçalho de origem não corresponde a esse host." #, python-format msgid "PCI Device %(node_id)s:%(address)s not found." msgstr "Dispositivo PCI %(node_id)s:%(address)s não localizado." #, python-format msgid "PCI alias %(alias)s is not defined" msgstr "O alias de PCI %(alias)s não está definido" #, python-format msgid "" "PCI device %(compute_node_id)s:%(address)s is %(status)s instead of " "%(hopestatus)s" msgstr "" "Dispositivo PCI %(compute_node_id)s:%(address)s é %(status)s ao invés de " "%(hopestatus)s" #, python-format msgid "" "PCI device %(compute_node_id)s:%(address)s is owned by %(owner)s instead of " "%(hopeowner)s" msgstr "" "Dispositivo PCI %(compute_node_id)s:%(address)s pertence a %(owner)s ao " "invés de %(hopeowner)s" #, python-format msgid "PCI device %(id)s not found" msgstr "Dispositivo PCI %(id)s não localizado" #, python-format msgid "PCI device request %(requests)s failed" msgstr "A solicitação de dispositivo PCI %(requests)s falhou" #, python-format msgid "Page size %(pagesize)s forbidden against '%(against)s'" msgstr "Tamanho da página %(pagesize)s proibido contra '%(against)s'" #, python-format msgid "Page size %(pagesize)s is not supported by the host." msgstr "Tamanho da página %(pagesize)s não é suportado pelo host." #, python-format msgid "" "Parameters %(missing_params)s not present in vif_details for vif %(vif_id)s. " "Check your Neutron configuration to validate that the macvtap parameters are " "correct." msgstr "" "Parâmetros %(missing_params)s não presentes em vif_details para vif " "%(vif_id)s. Verifique a configuração do Neutron para validar se os " "parâmetros macvtap estão corretos." #, python-format msgid "Path %s must be LVM logical volume" msgstr "O caminho %s deve ser um volume lógico LVM" msgid "Paused" msgstr "Interrompido" msgid "Personality file limit exceeded" msgstr "Limite excedido do arquivo de personalidade" #, python-format msgid "" "Physical Function %(compute_node_id)s:%(address)s, related to VF " "%(compute_node_id)s:%(vf_address)s is %(status)s instead of %(hopestatus)s" msgstr "" "A Função Física %(compute_node_id)s:%(address)s relacionada ao VF " "%(compute_node_id)s:%(vf_address)s é %(status)s em vez de %(hopestatus)s" #, python-format msgid "Physical network is missing for network %(network_uuid)s" msgstr "A rede física está ausente para a rede %(network_uuid)s" #, python-format msgid "Policy doesn't allow %(action)s to be performed." msgstr "A política não permite que %(action)s sejam executadas." #, python-format msgid "Port %(port_id)s is still in use." msgstr "A porta %(port_id)s ainda está em uso." #, python-format msgid "Port %(port_id)s not usable for instance %(instance)s." msgstr "Porta %(port_id)s não utilizável para a instância %(instance)s." #, python-format msgid "" "Port %(port_id)s not usable for instance %(instance)s. Value %(value)s " "assigned to dns_name attribute does not match instance's hostname " "%(hostname)s" msgstr "" "Porta %(port_id)s não utilizável para a instância %(instance)s. O valor " "%(value)s designado para o atributo dns_name não corresponde ao nome do host " "da instância %(hostname)s" #, python-format msgid "Port %(port_id)s requires a FixedIP in order to be used." msgstr "A porta %(port_id)s requer um FixedIP para ser usado." #, python-format msgid "Port %s is not attached" msgstr "Porta %s não está conectada" #, python-format msgid "Port id %(port_id)s could not be found." msgstr "O ID da porta %(port_id)s não pôde ser localizado." #, python-format msgid "Provided video model (%(model)s) is not supported." msgstr "Modelo de vídeo fornecido (%(model)s) não é suportado." #, python-format msgid "Provided watchdog action (%(action)s) is not supported." msgstr "Ação de watchdog fornecida (%(action)s) não é suportada." msgid "QEMU guest agent is not enabled" msgstr "O agente convidado QEMU não está ativado" #, python-format msgid "Quiescing is not supported in instance %(instance_id)s" msgstr "Quiesce não é suportado na instância %(instance_id)s" #, python-format msgid "Quota class %(class_name)s could not be found." msgstr "A classe da cota %(class_name)s não pôde ser localizada." msgid "Quota could not be found" msgstr "A cota não pôde ser localizada" #, python-format msgid "" "Quota exceeded for %(overs)s: Requested %(req)s, but already used %(used)s " "of %(allowed)s %(overs)s" msgstr "" "Cota excedida para %(overs)s: Solicitados %(req)s, mas já usados %(used)s " "%(allowed)s %(overs)s" #, python-format msgid "Quota exceeded for resources: %(overs)s" msgstr "Cota excedida para os recursos: %(overs)s" msgid "Quota exceeded, too many key pairs." msgstr "Cota excedida; excesso de pares de chaves." msgid "Quota exceeded, too many server groups." msgstr "Cota excedida, grupos de servidor em excesso." msgid "Quota exceeded, too many servers in group" msgstr "Cota excedida, servidores em excesso no grupo" #, python-format msgid "Quota exists for project %(project_id)s, resource %(resource)s" msgstr "Existe cota para o projeto %(project_id)s, recurso %(resource)s" #, python-format msgid "Quota for project %(project_id)s could not be found." msgstr "A cota para o projeto %(project_id)s não pôde ser localizada." #, python-format msgid "" "Quota for user %(user_id)s in project %(project_id)s could not be found." msgstr "" "Cota para o usuário %(user_id)s no projeto %(project_id)s não pôde ser " "encontrada." #, python-format msgid "" "Quota limit %(limit)s for %(resource)s must be greater than or equal to " "already used and reserved %(minimum)s." msgstr "" "O limite de cota %(limit)s para %(resource)s deve ser maior ou igual ao " "%(minimum)s já utilizado e reservado." #, python-format msgid "" "Quota limit %(limit)s for %(resource)s must be less than or equal to " "%(maximum)s." msgstr "" "O limite de cota %(limit)s para %(resource)s deve ser menor ou igual a " "%(maximum)s." msgid "Request body and URI mismatch" msgstr "Corpo do pedido e incompatibilidade URI" msgid "Request is too large." msgstr "A solicitação é muito grande." #, python-format msgid "Request of image %(image_id)s got BadRequest response: %(response)s" msgstr "" "A solicitação da imagem %(image_id)s obteve a resposta BadRequest: " "%(response)s" #, python-format msgid "RequestSpec not found for instance %(instance_uuid)s" msgstr "RequestSpec não localizado para a instância %(instance_uuid)s" msgid "Requested CPU control policy not supported by host" msgstr "Política de controle de CPU solicitada não suportada pelo host. " #, python-format msgid "" "Requested hardware '%(model)s' is not supported by the '%(virt)s' virt driver" msgstr "" "O hardware solicitado '%(model)s' não é suportado pelo driver virt '%(virt)s'" #, python-format msgid "Requested image %(image)s has automatic disk resize disabled." msgstr "" "A imagem solicitada %(image)s possui o redimensionamento automático de disco " "desativado." msgid "" "Requested instance NUMA topology cannot fit the given host NUMA topology" msgstr "" "A topologia NUMA da instância solicitada não pode ser ajustada na topologia " "NUMA do host determinado" msgid "" "Requested instance NUMA topology together with requested PCI devices cannot " "fit the given host NUMA topology" msgstr "" "A topologia NUMA da instância solicitada juntamente com os dispositivos PCI " "solicitados não cabe na topologia NUMA do host determinado" #, python-format msgid "" "Requested vCPU limits %(sockets)d:%(cores)d:%(threads)d are impossible to " "satisfy for vcpus count %(vcpus)d" msgstr "" "Os limites de vCPU solicitados %(sockets)d:%(cores)d:%(threads)d são " "impossíveis de satisfazer para contagens de vcpus %(vcpus)d" #, python-format msgid "Rescue device does not exist for instance %s" msgstr "Dispositivo de resgate não existe para a instância %s" #, python-format msgid "Resize error: %(reason)s" msgstr "Erro de redimensionamento: %(reason)s" msgid "Resize to zero disk flavor is not allowed." msgstr "Redimensionar para tipo de disco zero não é permitido." msgid "Resource could not be found." msgstr "O recurso não pôde ser localizado." msgid "Resumed" msgstr "Retomado" #, python-format msgid "Root element name should be '%(name)s' not '%(tag)s'" msgstr "Nome do elemento raiz deve ser '%(name)s‘, não '%(tag)s'" #, python-format msgid "Running batches of %i until complete" msgstr "Executando lotes de %i até a conclusão" #, python-format msgid "Scheduler Host Filter %(filter_name)s could not be found." msgstr "" "O Filtro do Host do Planejador %(filter_name)s não pôde ser localizado." #, python-format msgid "Security group %(name)s is not found for project %(project)s" msgstr "Grupo de segurança %(name)s não localizado para o projeto %(project)s" #, python-format msgid "" "Security group %(security_group_id)s not found for project %(project_id)s." msgstr "" "Grupo de segurança %(security_group_id)s não localizado para o projeto " "%(project_id)s." #, python-format msgid "Security group %(security_group_id)s not found." msgstr "Grupo de segurança %(security_group_id)s não localizado." #, python-format msgid "" "Security group %(security_group_name)s already exists for project " "%(project_id)s." msgstr "" "O grupo de segurança %(security_group_name)s já existe para o projeto " "%(project_id)s." #, python-format msgid "" "Security group %(security_group_name)s not associated with the instance " "%(instance)s" msgstr "" "Grupo de segurança %(security_group_name)s não associado à instância " "%(instance)s" msgid "Security group id should be uuid" msgstr "O ID do grupo de segurança deve ser uuid" msgid "Security group name cannot be empty" msgstr "O nome do grupo de segurança não pode estar vazio" msgid "Security group not specified" msgstr "Grupo de segurança não especificado" #, python-format msgid "Server disk was unable to be resized because: %(reason)s" msgstr "O disco do servidor não pôde ser redimensionado porque: %(reason)s" msgid "Server does not exist" msgstr "O servidor não existe" #, python-format msgid "ServerGroup policy is not supported: %(reason)s" msgstr "Política do ServerGroup não é suportada: %(reason)s" msgid "ServerGroupAffinityFilter not configured" msgstr "ServerGroupAffinityFilter não configurado" msgid "ServerGroupAntiAffinityFilter not configured" msgstr "ServerGroupAntiAffinityFilter não configurado" msgid "ServerGroupSoftAffinityWeigher not configured" msgstr "ServerGroupSoftAffinityWeigher não configurado" msgid "ServerGroupSoftAntiAffinityWeigher not configured" msgstr "ServerGroupSoftAntiAffinityWeigher não configurado" #, python-format msgid "Service %(service_id)s could not be found." msgstr "Serviço %(service_id)s não encontrado." #, python-format msgid "Service %s not found." msgstr "Serviço %s não localizado." msgid "Service is unavailable at this time." msgstr "Serviço está indisponível neste momento" #, python-format msgid "Service with host %(host)s binary %(binary)s exists." msgstr "O serviço com host %(host)s binário %(binary)s existe." #, python-format msgid "Service with host %(host)s topic %(topic)s exists." msgstr "O serviço com host %(host)s tópico %(topic)s existe." msgid "Set admin password is not supported" msgstr "Definir senha admin não é suportado" #, python-format msgid "Share '%s' is not supported" msgstr "O compartilhamento '%s' não é suportado" #, python-format msgid "Share level '%s' cannot have share configured" msgstr "" "O nível de compartilhamento '%s' não pode ter compartilhamento configurado" #, python-format msgid "Snapshot %(snapshot_id)s could not be found." msgstr "A captura instantânea %(snapshot_id)s não pôde ser localizada." msgid "Some required fields are missing" msgstr "Alguns campos requeridos estão faltando." #, python-format msgid "" "Something went wrong when deleting a volume snapshot: rebasing a " "%(protocol)s network disk using qemu-img has not been fully tested" msgstr "" "|Algo de errado ocorreu ao excluir uma captura instantânea de um volume: O " "rebaseamento de um disco de rede %(protocol)s usando qemu-img não foi " "testado totalmente" msgid "Sort direction size exceeds sort key size" msgstr "" "O tamanho de direção de classificação excede o tamanho da chave de " "classificação" msgid "Sort key supplied was not valid." msgstr "A chave de classificação fornecida não era válida." msgid "Specified fixed address not assigned to instance" msgstr "Endereço fixo especificado não designado à instância" msgid "Started" msgstr "Iniciado" msgid "Stopped" msgstr "Interrompido" #, python-format msgid "Storage error: %(reason)s" msgstr "Erro de armazenamento: %(reason)s" #, python-format msgid "Storage policy %s did not match any datastores" msgstr "" "Política de armazenamento %s não corresponde a nenhum armazenamento de dados" msgid "Success" msgstr "Sucesso" msgid "Suspended" msgstr "Suspenso" msgid "Swap drive requested is larger than instance type allows." msgstr "Drive de swap é maior do que o tipo de instância permite." msgid "Table" msgstr "Tabela" #, python-format msgid "Task %(task_name)s is already running on host %(host)s" msgstr "A tarefa %(task_name)s já está em execução no host %(host)s" #, python-format msgid "Task %(task_name)s is not running on host %(host)s" msgstr "A tarefa %(task_name)s não está em execução no host %(host)s" #, python-format msgid "The PCI address %(address)s has an incorrect format." msgstr "O endereço PCI %(address)s possui um formato incorreto." #, python-format msgid "The console port range %(min_port)d-%(max_port)d is exhausted." msgstr "" "O intervalo de portas do console %(min_port)d-%(max_port)d está esgotado." msgid "The created instance's disk would be too small." msgstr "O disco da instância criada seria muito pequeno." msgid "The current driver does not support preserving ephemeral partitions." msgstr "O driver atual não suporta a preservação partições temporárias." msgid "The default PBM policy doesn't exist on the backend." msgstr "A política de PBM padrão não existe no backend." msgid "The floating IP request failed with a BadRequest" msgstr "A solicitação de IP flutuante falhou com um BadRequest" msgid "" "The instance requires a newer hypervisor version than has been provided." msgstr "" "A instância requer uma versão de hypervisor mais recente do que a fornecida." #, python-format msgid "The number of defined ports: %(ports)d is over the limit: %(quota)d" msgstr "" "O número de portas definidas: %(ports)d está acima do limite: %(quota)d" #, python-format msgid "The provided RNG device path: (%(path)s) is not present on the host." msgstr "" "O caminho de dispositivo RNG fornecido: (%(path)s) não está presente no host." msgid "The request is invalid." msgstr "A requisição é inválida." #, python-format msgid "" "The requested amount of video memory %(req_vram)d is higher than the maximum " "allowed by flavor %(max_vram)d." msgstr "" "A quantidade solicitada de memória de vídeo %(req_vram)d é maior que o " "máximo permitido pelo tipo %(max_vram)d." msgid "The requested availability zone is not available" msgstr "A zona de disponibilidade solicitada não está disponível" msgid "The requested functionality is not supported." msgstr "A funcionalidade solicitada não é suportada." #, python-format msgid "The specified cluster '%s' was not found in vCenter" msgstr "O cluster especificado '%s' não foi localizado no vCenter" #, python-format msgid "The supplied device path (%(path)s) is in use." msgstr "O caminho de dispositivo fornecido (%(path)s) está em uso." #, python-format msgid "The supplied device path (%(path)s) is invalid." msgstr "O caminho do dispositivo fornecido (%(path)s) é inválido." #, python-format msgid "" "The supplied disk path (%(path)s) already exists, it is expected not to " "exist." msgstr "" "O caminho de disco fornecido (%(path)s) já existe, era esperado não existir." msgid "The supplied hypervisor type of is invalid." msgstr "O tipo de hypervisor fornecido é inválido." msgid "The target host can't be the same one." msgstr "O host de destino não pode ser o mesmo." #, python-format msgid "The token '%(token)s' is invalid or has expired" msgstr "O token ‘%(token)s‘ é inválido ou expirou" #, python-format msgid "" "The volume cannot be assigned the same device name as the root device %s" msgstr "" "O volume não pode ser atribuído ao mesmo nome de dispositivo que o " "dispositivo raiz %s" msgid "There are not enough hosts available." msgstr "Não há hosts suficientes disponíveis." #, python-format msgid "There is no such action: %s" msgstr "Essa ação não existe: %s" #, python-format msgid "" "This compute node's hypervisor is older than the minimum supported version: " "%(version)s." msgstr "" "Esse hypervisor de nó de cálculo é mais antigo que a versão mínima " "suportada: %(version)s." msgid "" "This method needs to be called with either networks=None and port_ids=None " "or port_ids and networks as not none." msgstr "" "Esse método precisa ser chamado com networks=None e port_ids=None ou com " "port_ids e rede como não none." #, python-format msgid "This rule already exists in group %s" msgstr "Esta regra já existe no grupo %s" #, python-format msgid "" "This service is older (v%(thisver)i) than the minimum (v%(minver)i) version " "of the rest of the deployment. Unable to continue." msgstr "" "Esse serviço é mais antigo (v%(thisver)i) que a versão mínima (v%(minver)i) " "do resto da implementação. Não é possível continuar." msgid "Timeout waiting for response from cell" msgstr "Aguardando tempo limite para a resposta da célula" #, python-format msgid "Timeout while checking if we can live migrate to host: %s" msgstr "" "Tempo limite atingido ao verificar se é possível migrar em tempo real para o " "host: %s" msgid "To and From ports must be integers" msgstr "Portas Para e De devem ser números inteiros" msgid "Token not found" msgstr "Token não localizado" msgid "Triggering crash dump is not supported" msgstr "O acionamento de dump de travamento não é suportado." msgid "Type and Code must be integers for ICMP protocol type" msgstr "Tipo e Código devem ser números inteiros para o tipo de protocolo ICMP" msgid "UEFI is not supported" msgstr "UEFI não é suportado" #, python-format msgid "" "Unable to associate floating IP %(address)s to any fixed IPs for instance " "%(id)s. Instance has no fixed IPv4 addresses to associate." msgstr "" "Não é possível associar o IP flutuante %(address)s a nenhum IP fixo para a " "instância %(id)s. A instância não possui nenhum endereço IPv4 fixo para " "associar." #, python-format msgid "" "Unable to associate floating IP %(address)s to fixed IP %(fixed_address)s " "for instance %(id)s. Error: %(error)s" msgstr "" "Não é possível associar o IP flutuante %(address)s ao IP fixo " "%(fixed_address)s para a instância %(id)s. Erro: %(error)s" #, python-format msgid "Unable to convert image to %(format)s: %(exp)s" msgstr "Não é possível converter a imagem em %(format)s: %(exp)s" #, python-format msgid "Unable to convert image to raw: %(exp)s" msgstr "Não é possível converter a imagem para bruto: %(exp)s" #, python-format msgid "Unable to determine disk bus for '%s'" msgstr "Não é possível determinar o barramento de disco para '%s'" #, python-format msgid "Unable to determine disk prefix for %s" msgstr "Não é possível determinar o prefixo do disco para %s" #, python-format msgid "Unable to find host for Instance %s" msgstr "Não é possível localizar o host para a Instância %s" msgid "Unable to find iSCSI Target" msgstr "Não é possível localizar o Destino iSCSI" msgid "Unable to find volume" msgstr "Não é possível localizar o volume" msgid "Unable to get host UUID: /etc/machine-id does not exist" msgstr "Não é possível obter UUID do host: /etc/machine-id não existe" msgid "Unable to get host UUID: /etc/machine-id is empty" msgstr "Não é possível obter UUID do host: /etc/machine-id está vazio" msgid "" "Unable to launch multiple instances with a single configured port ID. Please " "launch your instance one by one with different ports." msgstr "" "Não é possível ativar várias instâncias com um único ID de porta " "configurada. Inicie sua instância uma por uma com portas diferentes." #, python-format msgid "" "Unable to migrate %(instance_uuid)s to %(dest)s: Lack of memory(host:" "%(avail)s <= instance:%(mem_inst)s)" msgstr "" "Não é possível migrar %(instance_uuid)s para %(dest)s: Falta de memória " "(host:%(avail)s <= instância:%(mem_inst)s)" #, python-format msgid "" "Unable to migrate %(instance_uuid)s: Disk of instance is too large(available " "on destination host:%(available)s < need:%(necessary)s)" msgstr "" "Não é possível migrar %(instance_uuid)s: O disco da instância é muito grande " "(disponível no host de destino: %(available)s < necessário: %(necessary)s)" #, python-format msgid "" "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." msgstr "" "Não é possível migrar a instância (%(instance_id)s) para o host atual " "(%(host)s)." msgid "Unable to resize disk down." msgstr "Não é possível redimensionar o disco para um tamanho menor." msgid "Unable to set password on instance" msgstr "Não é possível configurar senha na instância" msgid "Unable to shrink disk." msgstr "Não é possível reduzir disco." #, python-format msgid "Unacceptable CPU info: %(reason)s" msgstr "Informações de CPU inaceitáveis: %(reason)s" msgid "Unacceptable parameters." msgstr "Parâmetros inaceitáveis." #, python-format msgid "Unavailable console type %(console_type)s." msgstr "Tipo de console indisponível %(console_type)s." msgid "" "Undefined Block Device Mapping root: BlockDeviceMappingList contains Block " "Device Mappings from multiple instances." msgstr "" "Raiz do Mapeamento de Dispositivo de Bloco indefinido: " "BlockDeviceMappingList contém os Mapeamentos de Dispositivo de Bloco a " "partir de diversas instâncias." #, python-format msgid "Unexpected aggregate action %s" msgstr "Ação inesperada %s agregada" msgid "Unexpected type adding stats" msgstr "Estatísticas de inclusão de tipo inesperado" #, python-format msgid "Unexpected vif_type=%s" msgstr "vif_type inesperado=%s" msgid "Unknown" msgstr "Desconhecido" msgid "Unknown action" msgstr "Ação desconhecida" #, python-format msgid "Unknown config drive format %(format)s. Select one of iso9660 or vfat." msgstr "" "Formato da unidade de configuração %(format)s desconhecido. Selecione um de " "iso9660 ou vfat." #, python-format msgid "Unknown delete_info type %s" msgstr "Tipo de delete_info desconhecido %s" #, python-format msgid "Unknown image_type=%s" msgstr "image_type desconhecido=%s" #, python-format msgid "Unknown quota resources %(unknown)s." msgstr "Recursos da cota desconhecidos %(unknown)s." msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "Direção de classificação desconhecida; deve ser 'desc' ou 'asc'" #, python-format msgid "Unknown type: %s" msgstr "Tipo desconhecido: %s" msgid "Unrecognized legacy format." msgstr "Formato legado não reconhecido." #, python-format msgid "Unrecognized read_deleted value '%s'" msgstr "Valor read_deleted não reconhecido '%s'" #, python-format msgid "Unrecognized value '%s' for CONF.running_deleted_instance_action" msgstr "Valor não reconhecido '%s' para CONF.running_deleted_instance_action" #, python-format msgid "Unshelve attempted but the image %s cannot be found." msgstr "" "Tentativa de remover adiamento, mas a imagem %s não pode ser localizada." msgid "Unsupported Content-Type" msgstr "Tipo de Conteúdo Não Suportado" #, python-format msgid "User %(username)s not found in password file." msgstr "Usuário %(username)s não localizado no arquivo de senha." #, python-format msgid "User %(username)s not found in shadow file." msgstr "Usuário %(username)s não localizado no arquivo de sombra." msgid "User data needs to be valid base 64." msgstr "Os dados do usuário devem ser base 64 válidos." msgid "User does not have admin privileges" msgstr "Usuário não tem privilégios de administrador" msgid "" "Using different block_device_mapping syntaxes is not allowed in the same " "request." msgstr "" "O uso de sintaxes block_device_mapping diferentes não é permitido na mesma " "solicitação." #, python-format msgid "" "Version %(req_ver)s is not supported by the API. Minimum is %(min_ver)s and " "maximum is %(max_ver)s." msgstr "" "Versão %(req_ver)s não é suportada pela API. Mínimo é %(min_ver)s e máximo " "é %(max_ver)s." msgid "Virtual Interface creation failed" msgstr "Falha na criação da Interface Virtual" msgid "Virtual interface plugin failed" msgstr "Plugin da interface virtual falhou." #, python-format msgid "Virtual machine mode '%(vmmode)s' is not recognised" msgstr "Modo da máquina virtual '%(vmmode)s' não reconhecido" #, python-format msgid "Virtual machine mode '%s' is not valid" msgstr "O modo de máquina virtual '%s' não é válido" #, python-format msgid "Virtualization type '%(virt)s' is not supported by this compute driver" msgstr "" "O tipo de virtualização '%(virt)s' não é suportado por esse driver de cálculo" #, python-format msgid "Volume %(volume_id)s could not be attached. Reason: %(reason)s" msgstr "O volume %(volume_id)s não pôde ser anexado. Motivo: %(reason)s" #, python-format msgid "Volume %(volume_id)s could not be found." msgstr "Volume %(volume_id)s não pode ser encontrado." #, python-format msgid "" "Volume %(volume_id)s did not finish being created even after we waited " "%(seconds)s seconds or %(attempts)s attempts. And its status is " "%(volume_status)s." msgstr "" "Volume %(volume_id)s acabou não sendo criado mesmo depois de esperarmos " "%(seconds)s segundos ou %(attempts)s tentativas. E seu estado é " "%(volume_status)s." msgid "Volume does not belong to the requested instance." msgstr "Volume não pertence à instância solicitada." #, python-format msgid "" "Volume encryption is not supported for %(volume_type)s volume %(volume_id)s" msgstr "" "Criptografia de volume não é suportada para %(volume_type)s volume " "%(volume_id)s" #, python-format msgid "" "Volume is smaller than the minimum size specified in image metadata. Volume " "size is %(volume_size)i bytes, minimum size is %(image_min_disk)i bytes." msgstr "" "O volume é menor que o tamanho mínimo especificado nos metadados de imagem. " "O tamanho do volume é %(volume_size)i bytes; o tamanho mínimo é " "%(image_min_disk)i bytes." #, python-format msgid "" "Volume sets block size, but the current libvirt hypervisor '%s' does not " "support custom block size" msgstr "" "O volume configura o tamanho de bloco, mas o hypervisor libvirt atual '%s' " "não suporta o tamanho de bloco customizado" msgid "When resizing, instances must change flavor!" msgstr "Ao redimensionar, as instâncias devem alterar o método!" #, python-format msgid "Wrong quota method %(method)s used on resource %(res)s" msgstr "Método de cota errado %(method)s usado no recurso %(res)s" msgid "X-Forwarded-For is missing from request." msgstr "X-Forwarded-For está ausente da solicitação." msgid "X-Instance-ID header is missing from request." msgstr "O cabeçalho X-Instance-ID está ausente da solicitação." msgid "X-Instance-ID-Signature header is missing from request." msgstr "Cabeçalho X-Instance-ID-Signature está ausente da solicitação." msgid "X-Metadata-Provider is missing from request." msgstr "X-Metadata-Provider está ausente da solicitação." msgid "X-Tenant-ID header is missing from request." msgstr "Cabeçalho X-Tenant-ID está ausente da solicitação." msgid "You are not allowed to delete the image." msgstr "Você não tem permissão para excluir a imagem." msgid "" "You are not authorized to access the image the instance was started with." msgstr "" "Você não está autorizado a acessar a imagem com a qual a instância foi " "iniciada." msgid "You must implement __call__" msgstr "Você deve implementar __call__" msgid "You should specify images_rbd_pool flag to use rbd images." msgstr "" "Você deve especificar o sinalizador images_rbd_pool para usar imagens rbd." msgid "You should specify images_volume_group flag to use LVM images." msgstr "" "Você deve especificar o sinalizador images_volume_group para usar imagens " "LVM." msgid "Zero floating IPs available." msgstr "Nenhum IPs flutuantes disponíveis." msgid "admin password can't be changed on existing disk" msgstr "senha do administrador não pode ser alterada no disco existente" msgid "cannot understand JSON" msgstr "não é possível entender JSON" msgid "clone() is not implemented" msgstr "clone() não está implementado" #, python-format msgid "connect info: %s" msgstr "informações de conexão: %s" #, python-format msgid "connecting to: %(host)s:%(port)s" msgstr "conectando a:%(host)s:%(port)s" msgid "direct_snapshot() is not implemented" msgstr "direct_snapshot() não está implementada" #, python-format msgid "disk type '%s' not supported" msgstr "tipo de disco '%s' não suportado" #, python-format msgid "empty project id for instance %s" msgstr "ID do projeto vazio para a instância %s" msgid "error setting admin password" msgstr "erro ao configurar senha de administrador" #, python-format msgid "error: %s" msgstr "erro: %s" #, python-format msgid "failed to generate X509 fingerprint. Error message: %s" msgstr "falha ao gerar a impressão digital X509. Mensagem de erro:%s" msgid "failed to generate fingerprint" msgstr "falha ao gerar a impressão digital" msgid "filename cannot be None" msgstr "nome de arquivo não pode ser Nenhum" msgid "floating IP is already associated" msgstr "O IP flutuante já está associado" msgid "floating IP not found" msgstr "IP flutuante não localizado" #, python-format msgid "fmt=%(fmt)s backed by: %(backing_file)s" msgstr "fmt=%(fmt)s retornado por: %(backing_file)s" #, python-format msgid "href %s does not contain version" msgstr "href %s não contém versão" msgid "image already mounted" msgstr "imagem já montada" #, python-format msgid "instance %s is not running" msgstr "instância %s não está em execução" msgid "instance is a required argument to use @refresh_cache" msgstr "a instância é um argumento necessário para usar @refresh_cache" msgid "instance is not in a suspended state" msgstr "a instância não está em um estado suspenso" msgid "instance is not powered on" msgstr "a instância não está ativada" msgid "instance is powered off and cannot be suspended." msgstr "a instância está desligada e não pode ser suspensa." #, python-format msgid "instance_id %s could not be found as device id on any ports" msgstr "" "instance_id %s não pôde ser localizado como id do dispositivo em nenhuma " "porta" msgid "is_public must be a boolean" msgstr "is_public deve ser um booleano" msgid "keymgr.fixed_key not defined" msgstr "keymgr.fixed_key não definido" msgid "l3driver call to add floating IP failed" msgstr "Falha na chamada l3driver para incluir IP flutuante" #, python-format msgid "libguestfs installed but not usable (%s)" msgstr "libguestfs instalado, mas não utilizável (%s)" #, python-format msgid "libguestfs is not installed (%s)" msgstr "libguestfs não está instalado (%s)" #, python-format msgid "marker [%s] not found" msgstr "marcador [%s] não localizado" #, python-format msgid "max rows must be <= %(max_value)d" msgstr "O máx. de linhas deve ser <= %(max_value)d" msgid "max_count cannot be greater than 1 if an fixed_ip is specified." msgstr "max_count não pode ser maior que 1 se um fixed_ip for especificado." msgid "min_count must be <= max_count" msgstr "min_count deve ser <= max_count" #, python-format msgid "nbd device %s did not show up" msgstr "dispositivo nbd %s não mostrado" msgid "nbd unavailable: module not loaded" msgstr "nbd indisponível: módulo não carregado" #, python-format msgid "no match found for %s" msgstr "nenhuma correspondência localizada para %s" #, python-format msgid "no usable parent snapshot for volume %s" msgstr "Nenhuma captura instantânea pai utilizável para o volume %s." #, python-format msgid "no write permission on storage pool %s" msgstr "Nenhuma permissão de gravação para o conjunto de armazenamentos %s" #, python-format msgid "not able to execute ssh command: %s" msgstr "não foi possível executar o comando ssh: %s" msgid "old style configuration can use only dictionary or memcached backends" msgstr "" "A configuração de estilo antigo pode usar somente backends de dicionário ou " "memcached" msgid "operation time out" msgstr "tempo limite da operação" #, python-format msgid "partition %s not found" msgstr "partição %s não localizada" #, python-format msgid "partition search unsupported with %s" msgstr "procura de partição não suportada com %s" msgid "pause not supported for vmwareapi" msgstr "pausa não suportada para vmwareapi" msgid "printable characters with at least one non space character" msgstr "" "Caracteres imprimíveis com pelo menos um caractere diferente de espaço." msgid "printable characters. Can not start or end with whitespace." msgstr "" "Caracteres imprimíveis não podem iniciar ou terminar com espaço em branco." #, python-format msgid "qemu-img failed to execute on %(path)s : %(exp)s" msgstr "qemu-img falhou ao executar no %(path)s : %(exp)s" #, python-format msgid "qemu-nbd error: %s" msgstr "erro qemu-nbd: %s" msgid "rbd python libraries not found" msgstr "Bibliotecas rbd python não localizadas" #, python-format msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" msgstr "read_deleted pode ser apenas um de 'no', 'yes' ou 'only', não %r" msgid "serve() can only be called once" msgstr "serve() pode ser chamado apenas uma vez" msgid "service is a mandatory argument for DB based ServiceGroup driver" msgstr "" "o serviço é um argumento obrigatório para driver do ServiceGroup baseado em " "BD" msgid "service is a mandatory argument for Memcached based ServiceGroup driver" msgstr "" "o serviço é um argumento obrigatório para o driver do ServiceGroup com base " "em Memcached" msgid "set_admin_password is not implemented by this driver or guest instance." msgstr "" "set_admin_password não está implementado por este driver ou esta instância " "convidada." #, python-format msgid "snapshot for %s" msgstr "captura instantânea para %s" msgid "snapshot_id required in create_info" msgstr "snapshot_id necessário em create_info" msgid "token not provided" msgstr "token não fornecido" msgid "too many body keys" msgstr "excesso de chaves de corpo" msgid "unpause not supported for vmwareapi" msgstr "cancelamento de pausa não suportado para vmwareapi" #, python-format msgid "vg %s must be LVM volume group" msgstr "vg %s deve estar no grupo de volumes LVM" #, python-format msgid "vhostuser_sock_path not present in vif_details for vif %(vif_id)s" msgstr "vhostuser_sock_path ausente no vif_details para vif %(vif_id)s" #, python-format msgid "vif type %s not supported" msgstr "Tipo de vif %s não suportado" msgid "vif_type parameter must be present for this vif_driver implementation" msgstr "" "o parâmetro vif_type deve estar presente para esta implementação de " "vif_driver" #, python-format msgid "volume %s already attached" msgstr "volume %s já conectado" #, python-format msgid "" "volume '%(vol)s' status must be 'in-use'. Currently in '%(status)s' status" msgstr "" "o volume '%(vol)s' de status deve estar 'em uso'. Atualmente em '%(status)s' " "de status" ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315688.893605 nova-32.0.0/nova/locale/ru/0000775000175000017500000000000000000000000015412 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.3736086 nova-32.0.0/nova/locale/ru/LC_MESSAGES/0000775000175000017500000000000000000000000017177 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/locale/ru/LC_MESSAGES/nova.po0000664000175000017500000034356500000000000020522 0ustar00zuulzuul00000000000000# Translations template for nova. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the nova project. # # Translators: # Ilya Alekseyev , 2013 # Aleksandr Brezhnev , 2013 # Alexei Rudenko , 2013 # FIRST AUTHOR , 2011 # lykoz , 2012 # Alexei Rudenko , 2013 # Stanislav Hanzhin , 2013 # dvy , 2014 # Andreas Jaeger , 2016. #zanata # Dmitriy Rabotyagov , 2024. #zanata msgid "" msgstr "" "Project-Id-Version: nova VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2025-07-04 18:13+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2024-09-17 06:43+0000\n" "Last-Translator: Dmitriy Rabotyagov \n" "Language: ru\n" "Plural-Forms: nplurals=4; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && " "n%10<=4 && (n%100<12 || n%100>14) ? 1 : n%10==0 || (n%10>=5 && n%10<=9) || " "(n%100>=11 && n%100<=14)? 2 : 3);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: Russian\n" #, python-format msgid "%(address)s is not a valid IP v4/6 address." msgstr "%(address)s не является допустимым IP-адресом в4/6." #, python-format msgid "" "%(binary)s attempted direct database access which is not allowed by policy" msgstr "" "%(binary)s произвел попытку прямого доступа к базе данных, что не разрешено " "стратегией" #, python-format msgid "%(cidr)s is not a valid IP network." msgstr "%(cidr)s не является допустимой IP-сетью." #, python-format msgid "%(field)s should not be part of the updates." msgstr "%(field)s не должно входить в состав обновлений." #, python-format msgid "%(memsize)d MB of memory assigned, but expected %(memtotal)d MB" msgstr "Назначено %(memsize)d МБ, но ожидалось %(memtotal)d МБ" #, python-format msgid "%(path)s is not on local storage: %(reason)s" msgstr "%(path)s не находится в локальном хранилище: %(reason)s" #, python-format msgid "%(path)s is not on shared storage: %(reason)s" msgstr "%(path)s не находится в общем хранилище: %(reason)s" #, python-format msgid "%(total)i rows matched query %(meth)s, %(done)i migrated" msgstr "%(total)i строк, совпадающих с запросом %(meth)s, %(done)i перенесено" #, python-format msgid "%(type)s hypervisor does not support PCI devices" msgstr "Гипервизор %(type)s не поддерживает устройства PCI" #, python-format msgid "%s does not support disk hotplug." msgstr "%s не поддерживает горячее подключение дисков." #, python-format msgid "%s format is not supported" msgstr "Формат %s не поддерживается" #, python-format msgid "%s is not supported." msgstr "%s не поддерживается." #, python-format msgid "%s must be either 'MANUAL' or 'AUTO'." msgstr "%s должен быть 'MANUAL' или 'AUTO'." #, python-format msgid "'%(other)s' should be an instance of '%(cls)s'" msgstr "'%(other)s' должен быть экземпляром '%(cls)s'" msgid "'qemu-img info' parsing failed." msgstr "Ошибка анализа 'qemu-img info'." #, python-format msgid "'rxtx_factor' argument must be a float between 0 and %g" msgstr "" "Аргумент 'rxtx_factor' должен быть числом с плавающей точкой в диапазоне от " "0 до %g" #, python-format msgid "A NetworkModel is required in field %s" msgstr "В поле %s требуется NetworkModel " #, python-format msgid "" "API Version String %(version)s is of invalid format. Must be of format " "MajorNum.MinorNum." msgstr "" "Недопустимый формат строки версии API %(version)s. Требуется формат: " "MajorNum.MinorNum." #, python-format msgid "API version %(version)s is not supported on this method." msgstr "Версия API %(version)s не поддерживается этим методом." msgid "Access list not available for public flavors." msgstr "Список прав доступа не доступен для общих разновидностей." #, python-format msgid "Action %s not found" msgstr "Действие %s не найдено" #, python-format msgid "" "Action for request_id %(request_id)s on instance %(instance_uuid)s not found" msgstr "" "Действие для request_id %(request_id)s в экземпляре %(instance_uuid)s не " "найдено" #, python-format msgid "Action: '%(action)s', calling method: %(meth)s, body: %(body)s" msgstr "Действие: %(action)s, вызывающий метод: %(meth)s, тело: %(body)s" #, python-format msgid "Add metadata failed for aggregate %(id)s after %(retries)s retries" msgstr "" "Добавление метаданных не выполнено для множества %(id)s после %(retries)s " "попыток" #, python-format msgid "Aggregate %(aggregate_id)s already has host %(host)s." msgstr "Множество %(aggregate_id)s уже имеет хост %(host)s." #, python-format msgid "Aggregate %(aggregate_id)s could not be found." msgstr "Множество %(aggregate_id)s не найдено." #, python-format msgid "Aggregate %(aggregate_id)s has no host %(host)s." msgstr "Множество %(aggregate_id)s не имеет хоста %(host)s." #, python-format msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." msgstr "" "Множество %(aggregate_id)s не имеет метаданных с ключом %(metadata_key)s." #, python-format msgid "Aggregate %(aggregate_name)s already exists." msgstr "Множество %(aggregate_name)s уже существует." #, python-format msgid "Aggregate %s does not support empty named availability zone" msgstr "Совокупный ресурс %s не поддерживает зону доступности с пустым именем" #, python-format msgid "An invalid 'name' value was provided. The name must be: %(reason)s" msgstr "Недопустимое значение 'name'. Должно быть указано: %(reason)s" msgid "An unknown error has occurred. Please try your request again." msgstr "" "Произошла неизвестная ошибка. Пожалуйста, попытайтесь повторить ваш запрос." msgid "An unknown exception occurred." msgstr "Обнаружено неизвестное исключение." #, python-format msgid "Architecture name '%(arch)s' is not recognised" msgstr "Имя архитектуры %(arch)s не распознано" #, python-format msgid "Architecture name '%s' is not valid" msgstr "Недопустимое имя архитектуры: '%s'" #, python-format msgid "" "Attempt to consume PCI device %(compute_node_id)s:%(address)s from empty pool" msgstr "" "Попытка приема устройства PCI %(compute_node_id)s:%(address)s из пустого из " "пула" msgid "Attempted overwrite of an existing value." msgstr "Попытка заменить существующее значение." #, python-format msgid "Attribute not supported: %(attr)s" msgstr "Атрибут не поддерживается: %(attr)s" msgid "Bad Request - Feature is not supported in Nova" msgstr "Неверный запрос - Данный функционал не поддерживается Nova" msgid "Bad Request - Invalid Parameters" msgstr "Неверный запрос - Недопустимые параметры" #, python-format msgid "Bad network format: missing %s" msgstr "Недопустимый сетевой формат: отсутствует %s" msgid "Bad networks format" msgstr "Недопустимый сетевой формат" #, python-format msgid "Bad networks format: network uuid is not in proper format (%s)" msgstr "" "Недопустимый сетевой формат: сетевой uuid имеет неправильный формат (%s)" #, python-format msgid "Bad prefix for network in cidr %s" msgstr "Неверный префикс для сети в cidr %s" #, python-format msgid "" "Binding failed for port %(port_id)s, please check neutron logs for more " "information." msgstr "" "Ошибка создания привязки для порта %(port_id)s. Дополнительные сведения " "можно найти в протоколах neutron." msgid "Blank components" msgstr "Пустые компоненты" msgid "" "Blank volumes (source: 'blank', dest: 'volume') need to have non-zero size" msgstr "" "Пустые тома (source: 'blank', dest: 'volume') должны иметь ненулевой размер" #, python-format msgid "Block Device %(id)s is not bootable." msgstr "Блочное устройство не загрузочное %(id)s." #, python-format msgid "" "Block Device Mapping %(volume_id)s is a multi-attach volume and is not valid " "for this operation." msgstr "" "Связывание блочного устройства %(volume_id)s - это том с множественным " "подключением, что недопустимо для этой операции." msgid "Block Device Mapping cannot be converted to legacy format. " msgstr "" "Связывание блочного устройства не может быть преобразовано в устаревший " "формат. " msgid "Block Device Mapping is Invalid." msgstr "Недопустимое связывание блочного устройства." #, python-format msgid "Block Device Mapping is Invalid: %(details)s" msgstr "Недопустимое связывание блочного устройства: %(details)s" msgid "" "Block Device Mapping is Invalid: Boot sequence for the instance and image/" "block device mapping combination is not valid." msgstr "" "Недопустимое связывание блочного устройства: Последовательность загрузки для " "данного экземпляра и сочетание связывания образа/блочного устройства " "является недопустимым." msgid "" "Block Device Mapping is Invalid: You specified more local devices than the " "limit allows" msgstr "" "Недопустимое связывание блочного устройства: Вы указали больше локальных " "устройств, чем допускает ограничение" #, python-format msgid "Block Device Mapping is Invalid: failed to get image %(id)s." msgstr "" "Недопустимое связывание блочного устройства: не удалось получить образ " "%(id)s." #, python-format msgid "Block Device Mapping is Invalid: failed to get snapshot %(id)s." msgstr "" "Недопустимое связывание блочного устройства: не удалось получить " "моментальную копию %(id)s." #, python-format msgid "Block Device Mapping is Invalid: failed to get volume %(id)s." msgstr "" "Недопустимое связывание блочного устройства: не удалось получить том %(id)s." msgid "Block migration can not be used with shared storage." msgstr "Блочный перенос не может выполняться с общим хранилищем." msgid "Boot index is invalid." msgstr "Недопустимый индекс загрузки." #, python-format msgid "Build of instance %(instance_uuid)s aborted: %(reason)s" msgstr "Компоновка экземпляра %(instance_uuid)s прервана: %(reason)s" #, python-format msgid "Build of instance %(instance_uuid)s was re-scheduled: %(reason)s" msgstr "" "Компоновка экземпляра %(instance_uuid)s повторно запланирована: %(reason)s" #, python-format msgid "BuildRequest not found for instance %(uuid)s" msgstr "BuildRequest не найден для экземпляра %(uuid)s" msgid "CPU and memory allocation must be provided for all NUMA nodes" msgstr "Выделение CPU и памяти должно обеспечиваться для всех узлов NUMA" #, python-format msgid "" "CPU doesn't have compatibility.\n" "\n" "%(ret)s\n" "\n" "Refer to %(u)s" msgstr "" "CPU не совместим.\n" "\n" "%(ret)s\n" "\n" "Ссылка на %(u)s" #, python-format msgid "CPU number %(cpunum)d is assigned to two nodes" msgstr "Число CPU %(cpunum)d назначено двум узлам" #, python-format msgid "CPU number %(cpunum)d is larger than max %(cpumax)d" msgstr "Число CPU %(cpunum)d превышает максимальное (%(cpumax)d)" #, python-format msgid "CPU number %(cpuset)s is not assigned to any node" msgstr "Число CPU %(cpuset)s не назначено ни одному узлу" msgid "Can not add access to a public flavor." msgstr "Невозможно добавить права доступа к общедоступной разновидности." msgid "Can not find requested image" msgstr "Невозможно найти запрошенный образ" #, python-format msgid "Can not handle authentication request for %d credentials" msgstr "" "Невозможно обработать запрос идентификации для идентификационных данных %d" msgid "Can't retrieve root device path from instance libvirt configuration" msgstr "" "Невозможно извлечь корневой путь к устройству из конфигурации libvirt " "экземпляра" #, python-format msgid "" "Cannot '%(action)s' instance %(server_id)s while it is in %(attr)s %(state)s" msgstr "" "Невозможно выполнить действие '%(action)s' для экземпляра %(server_id)s в " "состоянии %(attr)s %(state)s" #, python-format msgid "Cannot add host to aggregate %(aggregate_id)s. Reason: %(reason)s." msgstr "" "Не удается добавить хост в составной объект %(aggregate_id)s. Причина: " "%(reason)s." msgid "Cannot attach one or more volumes to multiple instances" msgstr "Невозможно подключить один или несколько томов нескольким экземплярам" #, python-format msgid "Cannot call %(method)s on orphaned %(objtype)s object" msgstr "Невозможно вызвать %(method)s в неприсвоенном объекте %(objtype)s" #, python-format msgid "" "Cannot determine the parent storage pool for %s; cannot determine where to " "store images" msgstr "" "Неизвестен родительский пул памяти для %s. Не удается определить " "расположение для сохранения образов" msgid "Cannot find image for rebuild" msgstr "Невозможно найти образ для перекомпоновки" #, python-format msgid "Cannot remove host %(host)s in aggregate %(id)s" msgstr "Не удается убрать %(host)s из агрегата %(id)s" #, python-format msgid "Cannot remove host from aggregate %(aggregate_id)s. Reason: %(reason)s." msgstr "" "Не удается удалить хост из составного объекта %(aggregate_id)s. Причина: " "%(reason)s." msgid "Cannot rescue a volume-backed instance" msgstr "Невозможно аварийно восстановить сохраненный в томе экземпляр" msgid "" "Cannot set cpu thread pinning policy in a non dedicated cpu pinning policy" msgstr "" "Невозможно задать стратегию прикрепления CPU к нити в стратегии прикрепления " "невыделенного CPU" msgid "Cannot set realtime policy in a non dedicated cpu pinning policy" msgstr "" "Невозможно задать стратегию реального времени в стратегии прикрепления " "невыделенного CPU" #, python-format msgid "Cannot update aggregate %(aggregate_id)s. Reason: %(reason)s." msgstr "" "Не удается обновить составной объект %(aggregate_id)s. Причина: %(reason)s." #, python-format msgid "" "Cannot update metadata of aggregate %(aggregate_id)s. Reason: %(reason)s." msgstr "" "Не удается обновить метаданные в составном объекте %(aggregate_id)s. " "Причина: %(reason)s." #, python-format msgid "Cell %(uuid)s has no mapping." msgstr "Ячейка %(uuid)s не имеет связей." #, python-format msgid "" "Change would make usage less than 0 for the following resources: %(unders)s" msgstr "" "Изменение будет использовать менее 0 для следующих ресурсов: %(unders)s" #, python-format msgid "Cinder API version %(version)s is not available." msgstr "Версия API Cinder %(version)s недоступна" #, python-format msgid "Class %(class_name)s could not be found: %(exception)s" msgstr "Класс %(class_name)s не найден: %(exception)s" #, python-format msgid "Compute host %(host)s could not be found." msgstr "Узел сompute %(host)s не найден." #, python-format msgid "Compute host %s not found." msgstr "Не найден хост вычисления %s." #, python-format msgid "Compute service of %(host)s is still in use." msgstr "Служба вычисления %(host)s по-прежнему занята." #, python-format msgid "Compute service of %(host)s is unavailable at this time." msgstr "Служба вычисления %(host)s недоступна в данный момент." #, python-format msgid "Config drive format '%(format)s' is not supported." msgstr "Формат диска конфигурации %(format)s не поддерживается." #, python-format msgid "" "Config requested an explicit CPU model, but the current libvirt hypervisor " "'%s' does not support selecting CPU models" msgstr "" "Конфигурация запросила явную модель CPU, но гипервизор текущей libvirt '%s' " "не поддерживает выбор моделей CPU" #, python-format msgid "" "Conflict updating instance %(instance_uuid)s, but we were unable to " "determine the cause" msgstr "" "Конфликт при обновлении экземпляра %(instance_uuid)s, но не удалось " "определить причину." #, python-format msgid "" "Conflict updating instance %(instance_uuid)s. Expected: %(expected)s. " "Actual: %(actual)s" msgstr "" "Конфликт при обновлении экземпляра %(instance_uuid)s. Ожидалось: " "%(expected)s. Фактическое значение: %(actual)s" #, python-format msgid "Connection to cinder host failed: %(reason)s" msgstr "Не удалось подключиться к хосту cinder: %(reason)s" #, python-format msgid "Connection to glance host %(server)s failed: %(reason)s" msgstr "" "Не удалось установить соединение с хостом glance %(server)s: %(reason)s" #, python-format msgid "Connection to keystone host failed: %(reason)s" msgstr "Не удалось подключиться к хосту Keystone: %(reason)s" #, python-format msgid "Connection to libvirt lost: %s" msgstr "Соединение с libvirt потеряно: %s" #, python-format msgid "" "Console log output could not be retrieved for instance %(instance_id)s. " "Reason: %(reason)s" msgstr "" "Не удается получить вывод протокола консоли %(instance_id)s. Причина: " "%(reason)s" msgid "Constraint not met." msgstr "Ограничение не выполнено." #, python-format msgid "Converted to raw, but format is now %s" msgstr "Преобразование в необработанный, но текущий формат %s" #, python-format msgid "Could not attach image to loopback: %s" msgstr "Невозможно прикрепить образ для замыкания: %s" #, python-format msgid "Could not fetch image %(image_id)s" msgstr "Невозможно извлечь образ %(image_id)s" #, python-format msgid "Could not find a handler for %(driver_type)s volume." msgstr "Невозможно найти обработчик для тома %(driver_type)s." #, python-format msgid "Could not find binary %(binary)s on host %(host)s." msgstr "Не удалось найти двоичный файл %(binary)s на хосте %(host)s." #, python-format msgid "Could not find config at %(path)s" msgstr "Невозможно найти конфигурацию по адресу %(path)s" msgid "Could not find the datastore reference(s) which the VM uses." msgstr "Не удалось найти ссылки на хранилища данных, используемых VM." #, python-format msgid "Could not load line %(line)s, got error %(error)s" msgstr "Не удалось загрузить строку %(line)s. Ошибка: %(error)s" #, python-format msgid "Could not load paste app '%(name)s' from %(path)s" msgstr "Невозможно загрузить приложение '%(name)s' из %(path)s" #, python-format msgid "" "Could not mount vfat config drive. %(operation)s failed. Error: %(error)s" msgstr "" "Невозможно смонтировать диск конфигурации vfat. %(operation)s не выполнено. " "Ошибка: %(error)s" #, python-format msgid "Could not upload image %(image_id)s" msgstr "Невозможно передать образ %(image_id)s" msgid "Creation of virtual interface with unique mac address failed" msgstr "" "Не удалось создать виртуальный интерфейс с помощью уникального MAC-адреса" #, python-format msgid "Datastore regex %s did not match any datastores" msgstr "" "Регулярное выражение %s хранилища данных не соответствует ни одному " "хранилищу данных" msgid "Datetime is in invalid format" msgstr "Недопустимый формат даты/времени" msgid "Default PBM policy is required if PBM is enabled." msgstr "Стратегия PBM по умолчанию является обязательной, если включен PBM." #, python-format msgid "Device '%(device)s' not found." msgstr "Устройство '%(device)s' не найдено." msgid "Device name contains spaces." msgstr "Имя устройства содержит пробелы." msgid "Device name empty or too long." msgstr "Имя устройства пустое или слишком длинное." #, python-format msgid "Device type mismatch for alias '%s'" msgstr "Несоответствие типа устройства для псевдонима '%s'" #, python-format msgid "Disk format %(disk_format)s is not acceptable" msgstr "Форматирование диска %(disk_format)s недопустимо" #, python-format msgid "Disk info file is invalid: %(reason)s" msgstr "Недопустимый файл информации о диске: %(reason)s" #, python-format msgid "Driver Error: %s" msgstr "Ошибка драйвера: %s" #, python-format msgid "Error attempting to run %(method)s" msgstr "Ошибка при попытке выполнения %(method)s" #, python-format msgid "" "Error destroying the instance on node %(node)s. Provision state still " "'%(state)s'." msgstr "" "Ошибка уничтожения экземпляра на узле %(node)s. Состояние выделения ресурсов " "все еще '%(state)s'." #, python-format msgid "Error during unshelve instance %(instance_id)s: %(reason)s" msgstr "" "Ошибка возврата из отложенного состояния экземпляра %(instance_id)s: " "%(reason)s" #, python-format msgid "" "Error from libvirt while getting domain info for %(instance_name)s: [Error " "Code %(error_code)s] %(ex)s" msgstr "" "Ошибка в libvirt при получении информации о домене для %(instance_name)s: " "[Код ошибки: %(error_code)s] %(ex)s" #, python-format msgid "" "Error from libvirt while looking up %(instance_name)s: [Error Code " "%(error_code)s] %(ex)s" msgstr "" "Ошибка libvirt во время поиска %(instance_name)s: [Код ошибки " "%(error_code)s] %(ex)s" #, python-format msgid "" "Error from libvirt while quiescing %(instance_name)s: [Error Code " "%(error_code)s] %(ex)s" msgstr "" "Ошибка в libvirt во время приостановки %(instance_name)s: [Код ошибки: " "%(error_code)s] %(ex)s" #, python-format msgid "" "Error from libvirt while set password for username \"%(user)s\": [Error Code " "%(error_code)s] %(ex)s" msgstr "" "Ошибка в libvirt при установке пароля для имени пользователя \"%(user)s\": " "[Код ошибки %(error_code)s] %(ex)s" #, python-format msgid "" "Error mounting %(device)s to %(dir)s in image %(image)s with libguestfs " "(%(e)s)" msgstr "" "Ошибка при монтировании %(device)s на %(dir)s в образе %(image)s с " "libguestfs (%(e)s)" #, python-format msgid "Error mounting %(image)s with libguestfs (%(e)s)" msgstr "Ошибка при монтировании %(image)s с помощью libguestfs (%(e)s)" #, python-format msgid "Error when creating resource monitor: %(monitor)s" msgstr "Ошибка при создании монитора ресурсов: %(monitor)s" #, python-format msgid "Event %(event)s not found for action id %(action_id)s" msgstr "Событие %(event)s не найдено для ИД действия %(action_id)s" msgid "Event must be an instance of nova.virt.event.Event" msgstr "Событие должно быть экземпляром nova.virt.event.Event" #, python-format msgid "" "Exceeded max scheduling attempts %(max_attempts)d for instance " "%(instance_uuid)s. Last exception: %(exc_reason)s" msgstr "" "Выполнено максимальное запланированное число попыток %(max_attempts)d для " "экземпляра %(instance_uuid)s. Последняя исключительная ситуация: " "%(exc_reason)s" #, python-format msgid "" "Exceeded max scheduling retries %(max_retries)d for instance " "%(instance_uuid)s during live migration" msgstr "" "Выполнено максимальное запланированное число повторных попыток " "%(max_retries)d для экземпляра %(instance_uuid)s в процессе оперативного " "переноса" #, python-format msgid "Exceeded maximum number of retries. %(reason)s" msgstr "Превышено максимальное количество попыток. %(reason)s" #, python-format msgid "Expected a uuid but received %(uuid)s." msgstr "Ожидался uuid, а получен %(uuid)s." msgid "Extracting vmdk from OVA failed." msgstr "Извлечение vmdk из OVA не выполнено." #, python-format msgid "Failed to access port %(port_id)s: %(reason)s" msgstr "Не удалось обратиться к порту %(port_id)s: %(reason)s" #, python-format msgid "Failed to allocate the network(s) with error %s, not rescheduling." msgstr "" "Не удалось выделить сеть (сети). Ошибка: %s. Перепланировка не выполняется." msgid "Failed to allocate the network(s), not rescheduling." msgstr "Не удалось выделить сеть(сети), перепланировка не выполняется." #, python-format msgid "Failed to attach network adapter device to %(instance_uuid)s" msgstr "Не удалось подключить устройство сетевого адаптера к %(instance_uuid)s" #, python-format msgid "Failed to deploy instance: %(reason)s" msgstr "Не удалось развернуть экземпляр: %(reason)s" #, python-format msgid "Failed to detach PCI device %(dev)s: %(reason)s" msgstr "Не удалось отключить устройство PCI %(dev)s: %(reason)s" #, python-format msgid "Failed to detach network adapter device from %(instance_uuid)s" msgstr "Не удалось отключить устройство сетевого адаптера от %(instance_uuid)s" #, python-format msgid "Failed to encrypt text: %(reason)s" msgstr "Не удалось зашифровать текст: %(reason)s" #, python-format msgid "Failed to launch instances: %(reason)s" msgstr "Не удалось запустить экземпляры: %(reason)s" #, python-format msgid "Failed to map partitions: %s" msgstr "Не удалось отобразить разделы: %s" #, python-format msgid "Failed to mount filesystem: %s" msgstr "Ошибка монтирования файловой системы: %s" #, python-format msgid "Failed to power off instance: %(reason)s" msgstr "Не удалось выключить экземпляр: %(reason)s" #, python-format msgid "Failed to power on instance: %(reason)s" msgstr "Не удалось включить экземпляр: %(reason)s" #, python-format msgid "Failed to provision instance %(inst)s: %(reason)s" msgstr "Не удалось выделить ресурсы экземпляру %(inst)s: %(reason)s" #, python-format msgid "Failed to read or write disk info file: %(reason)s" msgstr "Не удалось прочитать или записать файл информации о диске: %(reason)s" #, python-format msgid "Failed to reboot instance: %(reason)s" msgstr "Не удалось перезагрузить экземпляр: %(reason)s" #, python-format msgid "Failed to remove volume(s): (%(reason)s)" msgstr "Не удалось удалить тома: (%(reason)s)" #, python-format msgid "Failed to request Ironic to rebuild instance %(inst)s: %(reason)s" msgstr "" "Не удалось запросить Ironic для перекомпоновки экземпляра %(inst)s: " "%(reason)s" #, python-format msgid "Failed to resume instance: %(reason)s" msgstr "Не удалось возобновить экземпляр: %(reason)s" #, python-format msgid "Failed to run qemu-img info on %(path)s : %(error)s" msgstr "Не удалось выполнить команду qemu-img info в %(path)s : %(error)s" #, python-format msgid "Failed to set admin password on %(instance)s because %(reason)s" msgstr "" "Не удалось установить пароль администратора в %(instance)s по причине: " "%(reason)s" #, python-format msgid "Failed to suspend instance: %(reason)s" msgstr "Не удалось приостановить экземпляр: %(reason)s" #, python-format msgid "Failed to terminate instance: %(reason)s" msgstr "Не удалось завершить экземпляр: %(reason)s" msgid "Failure prepping block device." msgstr "Сбой при подготовке блочного устройства." #, python-format msgid "File %(file_path)s could not be found." msgstr "Файл %(file_path)s не может быть найден." #, python-format msgid "Fixed IP %(ip)s is not a valid ip address for network %(network_id)s." msgstr "" "Фиксированный IP %(ip)s не является допустимым IP-адресом для сети " "%(network_id)s." #, python-format msgid "Fixed IP %s is already in use." msgstr "Фиксированный IP-адрес %s уже используется." #, python-format msgid "" "Fixed IP address %(address)s is already in use on instance %(instance_uuid)s." msgstr "" "Фиксированный IP-адрес %(address)s уже используется в экземпляре " "%(instance_uuid)s." #, python-format msgid "Fixed IP not found for address %(address)s." msgstr "Для адреса %(address)s не найдено фиксированных IP." #, python-format msgid "Flavor %(flavor_id)s could not be found." msgstr "Разновидность %(flavor_id)s не найдена." #, python-format msgid "Flavor %(flavor_id)s has no extra specs with key %(extra_specs_key)s." msgstr "" "Разновидность %(flavor_id)s не содержит дополнительных спецификаций с ключом " "%(extra_specs_key)s." #, python-format msgid "Flavor %(flavor_id)s has no extra specs with key %(key)s." msgstr "" "Разновидность %(flavor_id)s не содержит дополнительных спецификаций с ключом " "%(key)s." #, python-format msgid "" "Flavor %(id)s extra spec cannot be updated or created after %(retries)d " "retries." msgstr "" "Дополнительную спецификацию %(id)s разновидности не удалось создать или " "изменить за %(retries)d попыток." #, python-format msgid "" "Flavor access already exists for flavor %(flavor_id)s and project " "%(project_id)s combination." msgstr "" "Права доступа к разновидности уже существуют для комбинации разновидности " "%(flavor_id)s и проекта %(project_id)s." #, python-format msgid "Flavor access not found for %(flavor_id)s / %(project_id)s combination." msgstr "" "Права доступа к разновидности не найдены для комбинации %(flavor_id)s / " "%(project_id)s." msgid "Flavor used by the instance could not be found." msgstr "Используемая экземпляром разновидность не найдена." #, python-format msgid "Flavor with ID %(flavor_id)s already exists." msgstr "Разновидность с ИД %(flavor_id)s уже существует." #, python-format msgid "Flavor with name %(flavor_name)s could not be found." msgstr "Не удалось найти разновидность с именем %(flavor_name)s." #, python-format msgid "Flavor with name %(name)s already exists." msgstr "Разновидность с именем %(name)s уже существует." #, python-format msgid "" "Flavor's disk is smaller than the minimum size specified in image metadata. " "Flavor disk is %(flavor_size)i bytes, minimum size is %(image_min_disk)i " "bytes." msgstr "" "Объем диска разновидности меньше минимального размера, указанного в " "метаданных образа. Объем диска разновидности: %(flavor_size)i байт, " "минимальный объем - %(image_min_disk)i байт." #, python-format msgid "" "Flavor's disk is too small for requested image. Flavor disk is " "%(flavor_size)i bytes, image is %(image_size)i bytes." msgstr "" "Диск разновидности слишком мал для запрошенного образа. Диск разновидности " "составляет %(flavor_size)i байт, размер образа - %(image_size)i байт." msgid "Flavor's memory is too small for requested image." msgstr "Память разновидности слишком мала для запрошенного образа." #, python-format msgid "Floating IP %(address)s association has failed." msgstr "Сбой связи нефиксированного IP-адреса %(address)s." #, python-format msgid "Floating IP %(address)s is associated." msgstr "Нефиксированный IP %(address)s связан." #, python-format msgid "Floating IP %(address)s is not associated with instance %(id)s." msgstr "Нефиксированный IP %(address)s не связан с экземпляром %(id)s." #, python-format msgid "Floating IP not found for ID %(id)s." msgstr "Для ИД %(id)s не найден нефиксированный IP." #, python-format msgid "Floating IP not found for ID %s" msgstr "Нефиксированный IP не найден для ИД %s" #, python-format msgid "Floating IP not found for address %(address)s." msgstr "Нефиксированный IP не найден для адреса %(address)s." msgid "Floating IP pool not found." msgstr "Пул нефиксированных IP не найден." msgid "Forbidden" msgstr "Запрещено" msgid "" "Forbidden to exceed flavor value of number of serial ports passed in image " "meta." msgstr "" "Запрещено превышать значение разновидности для числа последовательных " "портов, передаваемого в метаданные образа." msgid "Found no disk to snapshot." msgstr "Не найден диск для создания моментальной копии." msgid "Guest does not have a console available." msgstr "Гость не имеет доступной консоли." #, python-format msgid "Host %(host)s could not be found." msgstr "Узел %(host)s не найден." #, python-format msgid "Host %(host)s is already mapped to cell %(uuid)s" msgstr "Хост %(host)s уже связан с ячейкой %(uuid)s" #, python-format msgid "Host '%(name)s' is not mapped to any cell" msgstr "Хост '%(name)s' не привязан ни к одной ячейке" msgid "Host aggregate is not empty" msgstr "Множество хостов не пустое" msgid "Host does not support guests with NUMA topology set" msgstr "Хост не поддерживает гостей с топологией NUMA" msgid "Host does not support guests with custom memory page sizes" msgstr "" "Хост не поддерживает гостей с пользовательскими размерами страниц памяти" msgid "Hypervisor driver does not support post_live_migration_at_source method" msgstr "" "Драйвер гипервизора не поддерживает метод post_live_migration_at_source" #, python-format msgid "Hypervisor virt type '%s' is not valid" msgstr "Недопустимый тип виртуализации гипервизора '%s'" #, python-format msgid "Hypervisor virtualization type '%(hv_type)s' is not recognised" msgstr "Тип виртуализации гипервизора %(hv_type)s не распознан" #, python-format msgid "Hypervisor with ID '%s' could not be found." msgstr "Гипервизор с ИД '%s' не найден." #, python-format msgid "IP allocation over quota in pool %s." msgstr "Превышение квоты выделения IP-адресов в пуле %s." msgid "IP allocation over quota." msgstr "Превышение квоты выделения IP-адресов." #, python-format msgid "Image %(image_id)s could not be found." msgstr "Образ %(image_id)s не найден." #, python-format msgid "Image %(image_id)s is not active." msgstr "Образ %(image_id)s не активен." #, python-format msgid "Image %(image_id)s is unacceptable: %(reason)s" msgstr "Образ %(image_id)s недопустим: %(reason)s" msgid "Image disk size greater than requested disk size" msgstr "Размер диска образа превышает запрашиваемый размер диска" msgid "Image is not raw format" msgstr "Образ не в формате raw" msgid "Image metadata limit exceeded" msgstr "Ограничение метаданных образа превышено" #, python-format msgid "Image model '%(image)s' is not supported" msgstr "Модель образа '%(image)s' не поддерживается" msgid "Image not found." msgstr "образ не найден." #, python-format msgid "" "Image property '%(name)s' is not permitted to override NUMA configuration " "set against the flavor" msgstr "" "Свойству %(name)s образа не разрешено переопределять конфигурацию NUMA, " "заданную для разновидности" msgid "" "Image property 'hw_cpu_policy' is not permitted to override CPU pinning " "policy set against the flavor" msgstr "" "Свойству образа 'hw_cpu_policy' не разрешено переопределять стратегию " "прикрепления CPU для данной разновидности" msgid "" "Image property 'hw_cpu_thread_policy' is not permitted to override CPU " "thread pinning policy set against the flavor" msgstr "" "Свойству образа 'hw_cpu_thread_policy' не разрешено переопределять стратегию " "прикрепления CPU к нити для данной разновидности" msgid "Image that the instance was started with could not be found." msgstr "Образ, с помощью которого запущен экземпляр, не найден." #, python-format msgid "Image's config drive option '%(config_drive)s' is invalid" msgstr "" "В образе указано недопустимое значение параметра %(config_drive)s диска " "конфигурации" msgid "" "Images with destination_type 'volume' need to have a non-zero size specified" msgstr "" "У образов с destination_type 'volume' должен быть указан ненулевой размер " msgid "In ERROR state" msgstr "В состоянии ОШИБКА" #, python-format msgid "In states %(vm_state)s/%(task_state)s, not RESIZED/None" msgstr "В состояниях %(vm_state)s/%(task_state)s, не RESIZED/None" #, python-format msgid "In-progress live migration %(id)s is not found for server %(uuid)s." msgstr "Оперативный перенос %(id)s не найден для сервера %(uuid)s." msgid "" "Incompatible settings: ephemeral storage encryption is supported only for " "LVM images." msgstr "" "Несовместимые параметры: шифрование временной памяти поддерживается только " "для образов LVM." #, python-format msgid "Info cache for instance %(instance_uuid)s could not be found." msgstr "Кэш информации для экземпляра %(instance_uuid)s не найден." #, python-format msgid "" "Instance %(instance)s and volume %(vol)s are not in the same " "availability_zone. Instance is in %(ins_zone)s. Volume is in %(vol_zone)s" msgstr "" "Экземпляр %(instance)s и том %(vol)s не находятся в одной зоне доступности " "availability_zone. Экземпляр находится в %(ins_zone)s. Том находится в " "%(vol_zone)s" #, python-format msgid "Instance %(instance)s does not have a port with id %(port)s" msgstr "В экземпляре %(instance)s нет порта с ИД %(port)s" #, python-format msgid "Instance %(instance_id)s cannot be rescued: %(reason)s" msgstr "" "Экземпляр %(instance_id)s не может быть аварийно восстановлен: %(reason)s" #, python-format msgid "Instance %(instance_id)s could not be found." msgstr "Копия %(instance_id)s не найдена." #, python-format msgid "Instance %(instance_id)s has no tag '%(tag)s'" msgstr "Экземпляр %(instance_id)s не имеет тега '%(tag)s'" #, python-format msgid "Instance %(instance_id)s is not in rescue mode" msgstr "Копия %(instance_id)s не переведена в режим восстановления" #, python-format msgid "Instance %(instance_id)s is not ready" msgstr "Экземпляр %(instance_id)s не готов" #, python-format msgid "Instance %(instance_id)s is not running." msgstr "Копия %(instance_id)s не выполняется." #, python-format msgid "Instance %(instance_id)s is unacceptable: %(reason)s" msgstr "Копия %(instance_id)s недопустима: %(reason)s" #, python-format msgid "Instance %(instance_uuid)s does not specify a NUMA topology" msgstr "В экземпляре %(instance_uuid)s не указана топология NUMA" #, python-format msgid "Instance %(instance_uuid)s does not specify a migration context." msgstr "Экземпляр %(instance_uuid)s не задает контекст миграции." #, python-format msgid "" "Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while " "the instance is in this state." msgstr "" "Копия %(instance_uuid)s в %(attr)s %(state)s. Невозможно %(method)s во время " "нахождения копии в этом состоянии." #, python-format msgid "Instance %(instance_uuid)s is locked" msgstr "Экземпляр %(instance_uuid)s заблокирован" #, python-format msgid "" "Instance %(instance_uuid)s requires config drive, but it does not exist." msgstr "" "Для экземпляра %(instance_uuid)s необходим диск конфигурации, но оне не " "существует." #, python-format msgid "Instance %(name)s already exists." msgstr "Копия %(name)s уже существует." #, python-format msgid "Instance %(server_id)s is in an invalid state for '%(action)s'" msgstr "" "Экземпляр %(server_id)s находится в недопустимом состоянии для действия " "'%(action)s'" #, python-format msgid "Instance %(uuid)s has no mapping to a cell." msgstr "Экземпляр %(uuid)s не имеет связей с ячейкой." #, python-format msgid "Instance %s not found" msgstr "Экземпляр %s не найден" #, python-format msgid "Instance %s provisioning was aborted" msgstr "Предоставление ресурсов для экземпляра %s прервано." msgid "Instance could not be found" msgstr "Копия не найдена" msgid "Instance disk to be encrypted but no context provided" msgstr "Диск экземпляра должен шифроваться, но не передан контекст" msgid "Instance event failed" msgstr "Сбой события экземпляра" #, python-format msgid "Instance group %(group_uuid)s already exists." msgstr "Группа экземпляров %(group_uuid)s уже существует." #, python-format msgid "Instance group %(group_uuid)s could not be found." msgstr "Группа экземпляров %(group_uuid)s не найдена." msgid "Instance has no source host" msgstr "В экземпляре отсутствует исходный хост" msgid "Instance has not been resized." msgstr "С копией не производилось изменение размера." #, python-format msgid "Instance hostname %(hostname)s is not a valid DNS name" msgstr "Недопустимое имя DNS экземпляра %(hostname)s" msgid "Instance is not a member of specified network" msgstr "Копия не является участником заданной сети" #, python-format msgid "Instance rollback performed due to: %s" msgstr "Выполнен откат экземпляра вследствие: %s" #, python-format msgid "" "Insufficient Space on Volume Group %(vg)s. Only %(free_space)db available, " "but %(size)d bytes required by volume %(lv)s." msgstr "" "Недостаточное пространство в группе томов %(vg)s. Доступно только " "%(free_space)db, но %(size)d байт запрошено томом %(lv)s." #, python-format msgid "Insufficient compute resources: %(reason)s." msgstr "Недостаточно вычислительных ресурсов: %(reason)s." #, python-format msgid "Interface %(interface)s not found." msgstr "Интерфейс %(interface)s не найден." #, python-format msgid "Invalid Base 64 data for file %(path)s" msgstr "Недопустимые данные Base 64 для файла %(path)s" msgid "Invalid Connection Info" msgstr "Неверная информация о соединении" #, python-format msgid "Invalid ID received %(id)s." msgstr "Получен неверный ИД %(id)s." #, python-format msgid "Invalid IP format %s" msgstr "Недопустимый формат IP %s" #, python-format msgid "Invalid IP protocol %(protocol)s." msgstr "Недопустимый протокол IP %(protocol)s." msgid "" "Invalid PCI Whitelist: The PCI whitelist can specify devname or address, but " "not both" msgstr "" "Недопустимый белый список PCI: в белом списке PCI может быть задано либо имя " "устройства, либо адрес, но не то и другое одновременно" #, python-format msgid "Invalid PCI alias definition: %(reason)s" msgstr "Недопустимое определение псевдонима PCI: %(reason)s" #, python-format msgid "Invalid Regular Expression %s" msgstr "Недопустимое регулярное выражение %s" #, python-format msgid "Invalid characters in hostname '%(hostname)s'" msgstr "Недопустимые символы в имени хоста %(hostname)s" msgid "Invalid config_drive provided." msgstr "Указан неверный config_drive." #, python-format msgid "Invalid config_drive_format \"%s\"" msgstr "Неверный config_drive_format \"%s\"" #, python-format msgid "Invalid console type %(console_type)s" msgstr "Недопустимый тип консоли %(console_type)s" #, python-format msgid "Invalid content type %(content_type)s." msgstr "Недопустимый тип содержимого %(content_type)s." #, python-format msgid "Invalid datetime string: %(reason)s" msgstr "Недопустимая строка даты/времени: %(reason)s" msgid "Invalid device UUID." msgstr "Недопустимый UUID устройства." #, python-format msgid "Invalid entry: '%s'" msgstr "Недопустимая запись: '%s'" #, python-format msgid "Invalid entry: '%s'; Expecting dict" msgstr "Недопустимая запись: '%s'; ожидался dict" #, python-format msgid "Invalid entry: '%s'; Expecting list or dict" msgstr "Недопустимая запись: '%s'; ожидается список или dict" #, python-format msgid "Invalid exclusion expression %r" msgstr "Недопустимое выражение исключения %r" #, python-format msgid "Invalid image format '%(format)s'" msgstr "Недопустимый формат образа '%(format)s'" #, python-format msgid "Invalid image href %(image_href)s." msgstr "Недопустимый образ href %(image_href)s." #, python-format msgid "Invalid inclusion expression %r" msgstr "Недопустимое выражение включения %r" #, python-format msgid "" "Invalid input for field/attribute %(path)s. Value: %(value)s. %(message)s" msgstr "" "Недопустимые входные данные для поля/атрибута %(path)s. Значение: %(value)s. " "%(message)s" #, python-format msgid "Invalid input received: %(reason)s" msgstr "Получены недопустимые входные данные: %(reason)s" msgid "Invalid instance image." msgstr "Неверный образ экземпляра." #, python-format msgid "Invalid is_public filter [%s]" msgstr "Неверный фильтр is_public [%s]" msgid "Invalid key_name provided." msgstr "Предоставлен недопустимый key_name." #, python-format msgid "Invalid memory page size '%(pagesize)s'" msgstr "Недопустимый размер страницы памяти '%(pagesize)s'" msgid "Invalid metadata key" msgstr "Неправильный ключ метаданных" #, python-format msgid "Invalid metadata size: %(reason)s" msgstr "Недопустимый размер метаданных: %(reason)s" #, python-format msgid "Invalid metadata: %(reason)s" msgstr "Недопустимые метаданные: %(reason)s" #, python-format msgid "Invalid minDisk filter [%s]" msgstr "Неверный фильтр minDisk_public [%s]" #, python-format msgid "Invalid minRam filter [%s]" msgstr "Неверный фильтр minRam_public [%s]" #, python-format msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s" msgstr "Недопустимый диапазон портов %(from_port)s:%(to_port)s. %(msg)s" msgid "Invalid proxy request signature." msgstr "Неверная подпись запроса прокси." #, python-format msgid "Invalid range expression %r" msgstr "Недопустимое выражение диапазона %r" msgid "Invalid service catalog json." msgstr "Недопустимый json каталога службы." msgid "Invalid start time. The start time cannot occur after the end time." msgstr "" "Неверное время запуска. Время запуска не может быть больше конечного времени." msgid "Invalid state of instance files on shared storage" msgstr "Недопустимое состояние файлов экземпляров в общем хранилище" #, python-format msgid "Invalid timestamp for date %s" msgstr "Неверное системное время для даты %s" #, python-format msgid "Invalid usage_type: %s" msgstr "Недопустимый usage_type: %s" #, python-format msgid "Invalid value for Config Drive option: %(option)s" msgstr "Недопустимое значение для опции Диск конфигурации: %(option)s" #, python-format msgid "Invalid virtual interface address %s in request" msgstr "Неверный адрес виртуального интерфейса %s в запросе" #, python-format msgid "Invalid volume access mode: %(access_mode)s" msgstr "Недопустимый режим доступа к тому: %(access_mode)s" #, python-format msgid "Invalid volume: %(reason)s" msgstr "Недопустимый том: %(reason)s" msgid "Invalid volume_size." msgstr "Недопустимое значение volume_size." #, python-format msgid "Ironic node uuid not supplied to driver for instance %s." msgstr "Драйверу не передан UUID узла Ironic для экземпляра %s." #, python-format msgid "" "It is not allowed to create an interface on external network %(network_uuid)s" msgstr "Не разрешено создавать интерфейсы во внешней сети %(network_uuid)s" msgid "" "Key Names can only contain alphanumeric characters, periods, dashes, " "underscores, colons and spaces." msgstr "" "Имена ключей могут содержать в себе только алфавитно-цифровые символы, " "точки, дефисы, подчеркивания, двоеточия и пробелы." #, python-format msgid "Key manager error: %(reason)s" msgstr "Ошибка администратора ключей: %(reason)s" #, python-format msgid "Key pair '%(key_name)s' already exists." msgstr "Пара ключа '%(key_name)s' уже существует." #, python-format msgid "Keypair %(name)s not found for user %(user_id)s" msgstr "" "Криптографическая пара %(name)s не найдена для пользователя %(user_id)s" #, python-format msgid "Keypair data is invalid: %(reason)s" msgstr "Недопустимая пара ключей: %(reason)s" msgid "Limits only supported from vCenter 6.0 and above" msgstr "Ограничения поддерживаются только в vCenter 6.0 и выше" #, python-format msgid "Live migration %(id)s for server %(uuid)s is not in progress." msgstr "Оперативный перенос %(id)s для сервера %(uuid)s не выполняется." #, python-format msgid "Malformed message body: %(reason)s" msgstr "Неправильное тело сообщения: %(reason)s" #, python-format msgid "" "Malformed request URL: URL's project_id '%(project_id)s' doesn't match " "Context's project_id '%(context_project_id)s'" msgstr "" "Неверный формат URL запроса: project_id '%(project_id)s' для URL не " "соответствует project_id '%(context_project_id)s' для контекста" msgid "Malformed request body" msgstr "Неправильное тело запроса" msgid "Mapping image to local is not supported." msgstr "Преобразование образа в локальный не поддерживается." #, python-format msgid "Marker %(marker)s could not be found." msgstr "Маркер %(marker)s не найден." msgid "Maximum number of floating IPs exceeded" msgstr "Превышено максимальное число нефиксированных IP" #, python-format msgid "Maximum number of metadata items exceeds %(allowed)d" msgstr "Максимальное число элементов метаданных превышает %(allowed)d" msgid "Maximum number of ports exceeded" msgstr "Превышено максимальное число портов" msgid "Maximum number of security groups or rules exceeded" msgstr "Максимальное число групп защиты или правил превышено" msgid "Metadata item was not found" msgstr "Элемент метаданных не найден" msgid "Metadata property key greater than 255 characters" msgstr "Ключ свойства метаданных превышает 256 символов" msgid "Metadata property value greater than 255 characters" msgstr "Значение свойства метаданных превышает 256 символов" msgid "Metadata type should be dict." msgstr "Тип метаданных должен быть задан как dict." #, python-format msgid "" "Metric %(name)s could not be found on the compute host node %(host)s." "%(node)s." msgstr "" "Не удалось найти показатель %(name)s на вычислительном узле хоста %(host)s." "%(node)s." #, python-format msgid "Migration %(id)s for server %(uuid)s is not live-migration." msgstr "" "Перенос %(id)s для сервера %(uuid)s не выполняется в оперативном режиме." #, python-format msgid "Migration %(migration_id)s could not be found." msgstr "Перемещение %(migration_id)s не найдено." #, python-format msgid "Migration %(migration_id)s not found for instance %(instance_id)s" msgstr "Перенос %(migration_id)s не найден для экземпляра %(instance_id)s" #, python-format msgid "" "Migration %(migration_id)s state of instance %(instance_uuid)s is %(state)s. " "Cannot %(method)s while the migration is in this state." msgstr "" "Состояние переноса %(migration_id)s для экземпляра %(instance_uuid)s - " "%(state)s. В этом состоянии переноса выполнить %(method)s невозможно." #, python-format msgid "Migration error: %(reason)s" msgstr "Ошибка переноса: %(reason)s" msgid "Migration is not supported for LVM backed instances" msgstr "Перенос не поддерживается для зарезервированных экземпляров LVM" #, python-format msgid "" "Migration not found for instance %(instance_id)s with status %(status)s." msgstr "" "Перемещение не найдено для копии %(instance_id)s в состоянии %(status)s." #, python-format msgid "Migration pre-check error: %(reason)s" msgstr "Ошибка предварительной проверки переноса: %(reason)s" #, python-format msgid "Migration select destinations error: %(reason)s" msgstr "Ошибка выбора целевых объектов переноса: %(reason)s" #, python-format msgid "Missing arguments: %s" msgstr "Отсутствуют аргументы: %s" msgid "Missing device UUID." msgstr "Не указан UUID устройства." msgid "Missing disabled reason field" msgstr "Отсутствует поле причины выключения" msgid "Missing forced_down field" msgstr "Отсутствует поле forced_down " msgid "Missing imageRef attribute" msgstr "Отсутствует атрибут imageRef" #, python-format msgid "Missing keys: %s" msgstr "Отсутствуют ключи: %s" msgid "Missing parameter dict" msgstr "Отсутствует параметр dict" #, python-format msgid "" "More than one instance is associated with fixed IP address '%(address)s'." msgstr "" "Более одного экземпляра связано с фиксированным IP-адресом '%(address)s'." msgid "" "More than one possible network found. Specify network ID(s) to select which " "one(s) to connect to." msgstr "" "Найдено более одной возможной сети. Укажите ИД сетей, к которой требуется " "выполнить подключение." msgid "More than one swap drive requested." msgstr "Запрошено несколько временных дисков." #, python-format msgid "Multi-boot operating system found in %s" msgstr "Операционная система альтернативной загрузки найдена в %s" msgid "Multiple X-Instance-ID headers found within request." msgstr "Несколько заголовков X-Instance-ID находится в запросе." msgid "Multiple X-Tenant-ID headers found within request." msgstr "В запросе обнаружено несколько заголовков X-Tenant-ID." #, python-format msgid "Multiple floating IP pools matches found for name '%s'" msgstr "Найдено несколько соответствий пулов нефиксированных IP для имени '%s'" #, python-format msgid "Multiple floating IPs are found for address %(address)s." msgstr "Несколько нефиксированных IP найдено для адреса %(address)s." msgid "" "Multiple hosts may be managed by the VMWare vCenter driver; therefore we do " "not return uptime for just one host." msgstr "" "Драйвером VMWare vCenter может управляться несколько хостов, поэтому время " "работы для отдельного хоста не возвращается." msgid "Multiple possible networks found, use a Network ID to be more specific." msgstr "" "Найдено несколько возможных сетей, используйте более определенный ИД сети." #, python-format msgid "" "Multiple security groups found matching '%s'. Use an ID to be more specific." msgstr "" "Найдено несколько групп защиты, соответствующих '%s'. Используйте ИД, " "который будет более определенным." msgid "Must input network_id when request IP address" msgstr "Необходимо указывать network_id при запросе IP-адреса" msgid "Must not input both network_id and port_id" msgstr "Нельзя вводить и network_id, и port_id" msgid "" "Must specify host_ip, host_username and host_password to use vmwareapi." "VMwareVCDriver" msgstr "" "Необходимо указать host_ip, host_username и host_password для использования " "vmwareapi.VMwareVCDriver" msgid "Must supply a positive value for max_number" msgstr "max_number должно быть положительным числом" msgid "Must supply a positive value for max_rows" msgstr "Необходимо предоставить положительное значение для max_rows" #, python-format msgid "Network %(network_id)s could not be found." msgstr "Сеть %(network_id)s не найдена." #, python-format msgid "" "Network %(network_uuid)s requires a subnet in order to boot instances on." msgstr "Сети %(network_uuid)s требуется подсеть для загрузки экземпляров." #, python-format msgid "Network could not be found for bridge %(bridge)s" msgstr "Сеть не может быть найдена для моста %(bridge)s" #, python-format msgid "Network could not be found for instance %(instance_id)s." msgstr "Сеть не найдена для копии %(instance_id)s." msgid "Network not found" msgstr "Сеть не найдена" msgid "" "Network requires port_security_enabled and subnet associated in order to " "apply security groups." msgstr "" "Сеть требует связи port_security_enabled и subnet, для того чтобы применить " "группы защиты." msgid "New volume must be detached in order to swap." msgstr "Для подкачки новый том необходимо отключить." msgid "New volume must be the same size or larger." msgstr "Размер нового тома должен быть тем же или большим." #, python-format msgid "No Block Device Mapping with id %(id)s." msgstr "Отсутствует связь блочного устройства с ИД %(id)s." msgid "No Unique Match Found." msgstr "Уникальное соответствие не найдено." msgid "No compute host specified" msgstr "Хост вычислений не указан" #, python-format msgid "No configuration information found for operating system %(os_name)s" msgstr "" "Не найдена информация о конфигурации для операционной системы %(os_name)s" #, python-format msgid "No device with MAC address %s exists on the VM" msgstr "В виртуальной машине нет устройства с MAC-адресом %s" #, python-format msgid "No device with interface-id %s exists on VM" msgstr "В виртуальной машине нет устройства с interface-id %s" #, python-format msgid "No disk at %(location)s" msgstr "Отсутствует диск в %(location)s" #, python-format msgid "No fixed IP addresses available for network: %(net)s" msgstr "Нет доступных фиксированных IP-адресов для сети %(net)s" msgid "No fixed IPs associated to instance" msgstr "Нет фиксированных IP, связанных с экземпляром" msgid "No free nbd devices" msgstr "Нет свободных устройств nbd" msgid "No host available on cluster" msgstr "Отсутствует хост в кластере" msgid "No hosts found to map to cell, exiting." msgstr "Нет хостов для связывания с ячейкой, выход." #, python-format msgid "No hypervisor matching '%s' could be found." msgstr "Гипервизор, соответствующий '%s', не найден." msgid "No image locations are accessible" msgstr "Нет доступных расположений образов" #, python-format msgid "" "No live migration URI configured and no default available for " "\"%(virt_type)s\" hypervisor virtualization type." msgstr "" "Не настроен URI оперативной миграции, и не задано значение по умолчанию для " "типа виртуализации гипервизора \"%(virt_type)s\"." msgid "No more floating IPs available." msgstr "Нет доступных нефиксированных IP." #, python-format msgid "No more floating IPs in pool %s." msgstr "Нет доступных нефиксированных IP в пуле %s." #, python-format msgid "No mount points found in %(root)s of %(image)s" msgstr "Точки монтирования не найдены в %(root)s из %(image)s" #, python-format msgid "No operating system found in %s" msgstr "Операционная система не найдена в %s" msgid "No root disk defined." msgstr "Не определен корневой диск." #, python-format msgid "" "No specific network was requested and none are available for project " "'%(project_id)s'." msgstr "" "Требуемая сеть не задана, и нет доступных сетей для проекта '%(project_id)s'." msgid "No valid host found for cold migrate" msgstr "Не найден допустимый хост для холодного переноса" msgid "No valid host found for resize" msgstr "Не найдены допустимые хосты для изменения размера" #, python-format msgid "No valid host was found. %(reason)s" msgstr "Допустимый узел не найден. %(reason)s" #, python-format msgid "No volume Block Device Mapping at path: %(path)s" msgstr "Нет отображения блочных устройств тома в пути: %(path)s" #, python-format msgid "No volume Block Device Mapping with id %(volume_id)s." msgstr "Отсутствует связывание блочного устройства тома с ИД %(volume_id)s." #, python-format msgid "Node %s could not be found." msgstr "Узел %s не найден." #, python-format msgid "Not able to acquire a free port for %(host)s" msgstr "Не удалось получить свободный порт для %(host)s" #, python-format msgid "Not able to bind %(host)s:%(port)d, %(error)s" msgstr "Не удалось связать %(host)s:%(port)d, %(error)s" #, python-format msgid "" "Not all Virtual Functions of PF %(compute_node_id)s:%(address)s are free." msgstr "" "Не все виртуальные функции для PF %(compute_node_id)s:%(address)s свободны." msgid "Not an rbd snapshot" msgstr "Не является моментальной копией rbd" #, python-format msgid "Not authorized for image %(image_id)s." msgstr "Нет доступа к образу %(image_id)s." msgid "Not authorized." msgstr "Не авторизировано." msgid "Not enough parameters to build a valid rule." msgstr "Недостаточно параметров для сбора правильного правила." msgid "Not stored in rbd" msgstr "Не сохранено в rbd" msgid "Nothing was archived." msgstr "Ничего не архивировано." #, python-format msgid "Nova does not support Cinder API version %(version)s" msgstr "Nova не поддерживает версию API Cinder %(version)s" #, python-format msgid "Nova requires libvirt version %s or greater." msgstr "Для Nova требуется версия libvirt %s или выше." msgid "Number of Rows Archived" msgstr "Число архивированных строк" #, python-format msgid "Object action %(action)s failed because: %(reason)s" msgstr "Действие объекта %(action)s не выполнено, причина: %(reason)s" msgid "Old volume is attached to a different instance." msgstr "Старый том подключен к другому экземпляру." #, python-format msgid "One or more hosts already in availability zone(s) %s" msgstr "Один или несколько хостов уже находятся в зоне готовности %s" msgid "Only administrators may list deleted instances" msgstr "Только администраторы могут выводить список удаленных экземпляров" msgid "Origin header does not match this host." msgstr "Заголовок Origin не соответствует данному хосту." msgid "Origin header not valid." msgstr "Недопустимый заголовок Origin." msgid "Origin header protocol does not match this host." msgstr "Протокол заголовка Origin не соответствует данному хосту." #, python-format msgid "PCI Device %(node_id)s:%(address)s not found." msgstr "Устройство PCI %(node_id)s:%(address)s не найдено." #, python-format msgid "PCI alias %(alias)s is not defined" msgstr "Псевдоним PCI %(alias)s не определен" #, python-format msgid "" "PCI device %(compute_node_id)s:%(address)s is %(status)s instead of " "%(hopestatus)s" msgstr "" "Устройство PCI %(compute_node_id)s:%(address)s является %(status)s, а не " "%(hopestatus)s" #, python-format msgid "" "PCI device %(compute_node_id)s:%(address)s is owned by %(owner)s instead of " "%(hopeowner)s" msgstr "" "Устройство PCI %(compute_node_id)s:%(address)s принадлежит %(owner)s, а не " "%(hopeowner)s" #, python-format msgid "PCI device %(id)s not found" msgstr "Устройство PCI %(id)s не найдено" #, python-format msgid "PCI device request %(requests)s failed" msgstr "Запрос устройства PCI %(requests)s не выполнен" #, python-format msgid "Page size %(pagesize)s forbidden against '%(against)s'" msgstr "Размер страницы %(pagesize)s запрещен для '%(against)s'" #, python-format msgid "Page size %(pagesize)s is not supported by the host." msgstr "Размер страницы %(pagesize)s не поддерживается хостом." #, python-format msgid "" "Parameters %(missing_params)s not present in vif_details for vif %(vif_id)s. " "Check your Neutron configuration to validate that the macvtap parameters are " "correct." msgstr "" "Параметры %(missing_params)s отсутствуют в vif_details для vif %(vif_id)s. " "Проверьте правильность параметров macvtap в конфигурации Neutron. " #, python-format msgid "Path %s must be LVM logical volume" msgstr "Путь %s должен быть логическим томом LVM" msgid "Paused" msgstr "На паузе" msgid "Personality file limit exceeded" msgstr "Превышено ограничение файла личных параметров" #, python-format msgid "" "Physical Function %(compute_node_id)s:%(address)s, related to VF " "%(compute_node_id)s:%(vf_address)s is %(status)s instead of %(hopestatus)s" msgstr "" "Состояние физической функции %(compute_node_id)s:%(address)s, связанной с VF " "%(compute_node_id)s:%(vf_address)s - это %(status)s вместо %(hopestatus)s" #, python-format msgid "Physical network is missing for network %(network_uuid)s" msgstr "Отсутствует физическая сеть для сети %(network_uuid)s" #, python-format msgid "Policy doesn't allow %(action)s to be performed." msgstr "Политика не допускает выполнения %(action)s." #, python-format msgid "Port %(port_id)s is still in use." msgstr "Порт %(port_id)s по-прежнему занят." #, python-format msgid "Port %(port_id)s not usable for instance %(instance)s." msgstr "Порт %(port_id)s не применим для экземпляра %(instance)s." #, python-format msgid "" "Port %(port_id)s not usable for instance %(instance)s. Value %(value)s " "assigned to dns_name attribute does not match instance's hostname " "%(hostname)s" msgstr "" "Порт %(port_id)s не применим для экземпляра %(instance)s. Значение " "%(value)s, присвоенное атрибуту dns_name attribute, не совпадает с именем " "хоста %(hostname)s экземпляра" #, python-format msgid "Port %(port_id)s requires a FixedIP in order to be used." msgstr "Для использования порта %(port_id)s требуется FixedIP." #, python-format msgid "Port %s is not attached" msgstr "Порт %s не подключен" #, python-format msgid "Port id %(port_id)s could not be found." msgstr "Не удалось найти ИД порта %(port_id)s." #, python-format msgid "Provided video model (%(model)s) is not supported." msgstr "Переданная модель видео (%(model)s) не поддерживается." #, python-format msgid "Provided watchdog action (%(action)s) is not supported." msgstr "" "Указанное действие сторожевого устройства %(action)s) не поддерживается." msgid "QEMU guest agent is not enabled" msgstr "Гостевой агент QEMU не включен" #, python-format msgid "Quiescing is not supported in instance %(instance_id)s" msgstr "В экземпляре %(instance_id)s приостановка не поддерживается " #, python-format msgid "Quota class %(class_name)s could not be found." msgstr "Класс квоты %(class_name)s не найден." msgid "Quota could not be found" msgstr "Квота не найдена" #, python-format msgid "" "Quota exceeded for %(overs)s: Requested %(req)s, but already used %(used)s " "of %(allowed)s %(overs)s" msgstr "" "Превышена квота для %(overs)s: Запрошено %(req)s, но уже используется " "%(used)s %(allowed)s %(overs)s" #, python-format msgid "Quota exceeded for resources: %(overs)s" msgstr "Квота превышена для ресурсов: %(overs)s" msgid "Quota exceeded, too many key pairs." msgstr "Квота превышена, слишком много пар ключей." msgid "Quota exceeded, too many server groups." msgstr "Превышена квота, слишком много групп серверов." msgid "Quota exceeded, too many servers in group" msgstr "Превышена квота, слишком много серверов в группе." #, python-format msgid "Quota exists for project %(project_id)s, resource %(resource)s" msgstr "Квота существует для проекта %(project_id)s, ресурс %(resource)s" #, python-format msgid "Quota for project %(project_id)s could not be found." msgstr "Квота проекта %(project_id)s не найдена." #, python-format msgid "" "Quota for user %(user_id)s in project %(project_id)s could not be found." msgstr "" "Не удалось найти квоту для пользователя %(user_id)s в проекте %(project_id)s." #, python-format msgid "" "Quota limit %(limit)s for %(resource)s must be greater than or equal to " "already used and reserved %(minimum)s." msgstr "" "Ограничение квоты %(limit)s для %(resource)s должно быть не меньше уже " "занятого и зарезервированного объема %(minimum)s." #, python-format msgid "" "Quota limit %(limit)s for %(resource)s must be less than or equal to " "%(maximum)s." msgstr "" "Ограничение квоты %(limit)s для %(resource)s должно быть не больше " "%(maximum)s." msgid "Request body and URI mismatch" msgstr "Тело запроса и URI не совпадают" msgid "Request is too large." msgstr "Запрос слишком велик." #, python-format msgid "Request of image %(image_id)s got BadRequest response: %(response)s" msgstr "На запрос образа %(image_id)s получен ответ BadRequest: %(response)s" #, python-format msgid "RequestSpec not found for instance %(instance_uuid)s" msgstr "RequestSpec не найден для %(instance_uuid)s" msgid "Requested CPU control policy not supported by host" msgstr "Запрошенная стратегия управления CPU не поддерживается хостом" #, python-format msgid "" "Requested hardware '%(model)s' is not supported by the '%(virt)s' virt driver" msgstr "" "Запрошенное аппаратное обеспечение '%(model)s' не поддерживается виртуальным " "драйвером '%(virt)s' " #, python-format msgid "Requested image %(image)s has automatic disk resize disabled." msgstr "" "Для запрашиваемого образа %(image)s отключена функция автоматического " "изменения размера диска." msgid "" "Requested instance NUMA topology cannot fit the given host NUMA topology" msgstr "" "Запрошенная топология NUMA экземпляра не подходит для данной топологии NUMA " "хоста" msgid "" "Requested instance NUMA topology together with requested PCI devices cannot " "fit the given host NUMA topology" msgstr "" "Запрошенная топология NUMA экземпляра и запрошенные устройства PCI не " "помещаются в заданной топологии NUMA хоста" #, python-format msgid "" "Requested vCPU limits %(sockets)d:%(cores)d:%(threads)d are impossible to " "satisfy for vcpus count %(vcpus)d" msgstr "" "Запрошенные ограничения VCPU %(sockets)d:%(cores)d:%(threads)d невозможно " "удовлетворить для %(vcpus)d VCPU" #, python-format msgid "Rescue device does not exist for instance %s" msgstr "Устройство аварийного восстановления не существует для экземпляра %s" #, python-format msgid "Resize error: %(reason)s" msgstr "Ошибка при изменении размера: %(reason)s" msgid "Resize to zero disk flavor is not allowed." msgstr "Разновидность диска нельзя сделать нулевого размера." msgid "Resource could not be found." msgstr "Ресурс не может быть найден." msgid "Resumed" msgstr "Продолжено" #, python-format msgid "Root element name should be '%(name)s' not '%(tag)s'" msgstr "Имя корневого элемента должно быть %(name)s, а не %(tag)s" #, python-format msgid "Running batches of %i until complete" msgstr "Пакеты %i будут выполняться до полного завершения" #, python-format msgid "Scheduler Host Filter %(filter_name)s could not be found." msgstr "Фильтр узлов диспетчера %(filter_name)s не найден" #, python-format msgid "Security group %(name)s is not found for project %(project)s" msgstr "Группа защиты %(name)s не найдена для проекта %(project)s" #, python-format msgid "" "Security group %(security_group_id)s not found for project %(project_id)s." msgstr "" "Группа безопасности %(security_group_id)s не найдена для проекта " "%(project_id)s." #, python-format msgid "Security group %(security_group_id)s not found." msgstr "Группа безопасности %(security_group_id)s не найдена." #, python-format msgid "" "Security group %(security_group_name)s already exists for project " "%(project_id)s." msgstr "" "Группа защиты %(security_group_name)s уже существует для проекта. " "%(project_id)s." #, python-format msgid "" "Security group %(security_group_name)s not associated with the instance " "%(instance)s" msgstr "" "Группа защиты %(security_group_name)s не связана с экземпляром %(instance)s" msgid "Security group id should be uuid" msgstr "ИД группы защиты должен быть uuid" msgid "Security group name cannot be empty" msgstr "Наименование группы безопасности не может отсутствовать" msgid "Security group not specified" msgstr "Группа безопасности не задана" #, python-format msgid "Server disk was unable to be resized because: %(reason)s" msgstr "Размер диска сервера не может изменен, причина: %(reason)s" msgid "Server does not exist" msgstr "Сервер не существует" #, python-format msgid "ServerGroup policy is not supported: %(reason)s" msgstr "Стратегия ServerGroup не поддерживается: %(reason)s" msgid "ServerGroupAffinityFilter not configured" msgstr "ServerGroupAffinityFilter не настроен" msgid "ServerGroupAntiAffinityFilter not configured" msgstr "ServerGroupAntiAffinityFilter не настроен" msgid "ServerGroupSoftAffinityWeigher not configured" msgstr "ServerGroupSoftAffinityWeigher не настроен" msgid "ServerGroupSoftAntiAffinityWeigher not configured" msgstr "ServerGroupSoftAntiAffinityWeigher не настроен" #, python-format msgid "Service %(service_id)s could not be found." msgstr "Служба %(service_id)s не найдена." #, python-format msgid "Service %s not found." msgstr "Служба %s не найдена." msgid "Service is unavailable at this time." msgstr "В данный момент служба недоступна." #, python-format msgid "Service with host %(host)s binary %(binary)s exists." msgstr "Служба с хостом %(host)s для двоичного файла %(binary)s существует." #, python-format msgid "Service with host %(host)s topic %(topic)s exists." msgstr "Служба с хостом %(host)s для раздела %(topic)s существует." msgid "Set admin password is not supported" msgstr "Указание пароля администратора не поддерживается." #, python-format msgid "Share '%s' is not supported" msgstr "Общий ресурс '%s' не поддерживается" #, python-format msgid "Share level '%s' cannot have share configured" msgstr "Для уровня '%s' общего ресурса нельзя настраивать общий ресурс. " #, python-format msgid "Snapshot %(snapshot_id)s could not be found." msgstr "Снимок %(snapshot_id)s не может быть найден." msgid "Some required fields are missing" msgstr "Не указаны некоторые обязательные поля" #, python-format msgid "" "Something went wrong when deleting a volume snapshot: rebasing a " "%(protocol)s network disk using qemu-img has not been fully tested" msgstr "" "Непредвиденная ошибка при удалении моментальной копии тома: изменение базы " "сетевого диска %(protocol)s с помощью qemu-img не полностью отлажено" msgid "Sort direction size exceeds sort key size" msgstr "Размер направления сортировки превышает размер ключа сортировки" msgid "Sort key supplied was not valid." msgstr "Указанный ключ сортировки неверен." msgid "Specified fixed address not assigned to instance" msgstr "Указанный фиксированный адрес не назначен экземпляру" msgid "Started" msgstr "Начато" msgid "Stopped" msgstr "Остановлен" #, python-format msgid "Storage error: %(reason)s" msgstr "Ошибка хранилища: %(reason)s" #, python-format msgid "Storage policy %s did not match any datastores" msgstr "Стратегия хранения %s не соответствует ни одному хранилищу данных" msgid "Success" msgstr "Успешно" msgid "Suspended" msgstr "Приостановлено" msgid "Swap drive requested is larger than instance type allows." msgstr "" "Размер запрашиваемого съемного диска превышает допустимый для данного типа " "экземпляра." msgid "Table" msgstr "Таблица" #, python-format msgid "Task %(task_name)s is already running on host %(host)s" msgstr "Задача %(task_name)s уже выполняется на хосте %(host)s" #, python-format msgid "Task %(task_name)s is not running on host %(host)s" msgstr "Задача %(task_name)s не выполняется на хосте %(host)s" #, python-format msgid "The PCI address %(address)s has an incorrect format." msgstr "Некорректный формат адреса PCI %(address)s." #, python-format msgid "The console port range %(min_port)d-%(max_port)d is exhausted." msgstr "Диапазон портов консоли %(min_port)d-%(max_port)d исчерпан." msgid "The created instance's disk would be too small." msgstr "Созданный диск экземпляра будет недостаточным." msgid "The current driver does not support preserving ephemeral partitions." msgstr "Текущий драйвер не поддерживает сохранение временных разделов." msgid "The default PBM policy doesn't exist on the backend." msgstr "Стратегия PBM по умолчанию не существует на базовом сервере." msgid "The floating IP request failed with a BadRequest" msgstr "Сбой нефиксированного IP-адреса с BadRequest" msgid "" "The instance requires a newer hypervisor version than has been provided." msgstr "Копии необходима новая версия гипервизора, вместо предоставленной." #, python-format msgid "The number of defined ports: %(ports)d is over the limit: %(quota)d" msgstr "" "Число определенных портов %(ports)dis превышает максимально разрешенное: " "%(quota)d" #, python-format msgid "The provided RNG device path: (%(path)s) is not present on the host." msgstr "Указанный путь к устройству RNG: (%(path)s) не существует на хосте." msgid "The request is invalid." msgstr "Недопустимый запрос." #, python-format msgid "" "The requested amount of video memory %(req_vram)d is higher than the maximum " "allowed by flavor %(max_vram)d." msgstr "" "Запрошенный объем видео-памяти %(req_vram)d превышает максимально допустимый " "для разновидности %(max_vram)d." msgid "The requested availability zone is not available" msgstr "Запрашиваемая зона готовности недоступна" msgid "The requested functionality is not supported." msgstr "Запрошенная функциональность не поддерживается." #, python-format msgid "The specified cluster '%s' was not found in vCenter" msgstr "Указанный кластер '%s' не найден в vCenter" #, python-format msgid "The supplied device path (%(path)s) is in use." msgstr "Указанный путь к устройству (%(path)s) занят." #, python-format msgid "The supplied device path (%(path)s) is invalid." msgstr "Недопустимое размещение предоставленного устройства (%(path)s)." #, python-format msgid "" "The supplied disk path (%(path)s) already exists, it is expected not to " "exist." msgstr "" "Предоставленный адрес диска (%(path)s) уже существует, но ожидалось, что " "отсутствует." msgid "The supplied hypervisor type of is invalid." msgstr "Представленный тип гипервизора неверен. " msgid "The target host can't be the same one." msgstr "Целевой хост не может быть тем же самым." #, python-format msgid "The token '%(token)s' is invalid or has expired" msgstr "Маркер '%(token)s' недопустим или устарел" #, python-format msgid "" "The volume cannot be assigned the same device name as the root device %s" msgstr "" "Том нельзя назначить имени устройства, совпадающему с корневым устройством %s" msgid "There are not enough hosts available." msgstr "Нет достаточного числа доступных хостов." #, python-format msgid "There is no such action: %s" msgstr "Не существует такого действия: %s" #, python-format msgid "" "This compute node's hypervisor is older than the minimum supported version: " "%(version)s." msgstr "Версия гипервизора этого узла ниже минимально допустимой: %(version)s." msgid "" "This method needs to be called with either networks=None and port_ids=None " "or port_ids and networks as not none." msgstr "" "Метод должны быть вызван или с networks=None и port_ids=None, или с port_ids " "и networks отличными от none." #, python-format msgid "This rule already exists in group %s" msgstr "Это правило уже существует в группе %s" #, python-format msgid "" "This service is older (v%(thisver)i) than the minimum (v%(minver)i) version " "of the rest of the deployment. Unable to continue." msgstr "" "Версия этой службы (v%(thisver)i) меньше минимальной версии (v%(minver)i) " "остальных компонентов развертывания. Продолжение работы невозможно." msgid "Timeout waiting for response from cell" msgstr "Тайм-аут ожидания ответа от ячейки" #, python-format msgid "Timeout while checking if we can live migrate to host: %s" msgstr "" "Произошел тайм-аут при проверке возможности оперативной миграции на хост: %s" msgid "To and From ports must be integers" msgstr "Порты От и К должны быть целыми числами" msgid "Token not found" msgstr "Маркер не найден" msgid "Triggering crash dump is not supported" msgstr "Активация дампа сбоя не поддерживается" msgid "Type and Code must be integers for ICMP protocol type" msgstr "Значения Тип и Код должны быть целыми числами для типа протокола ICMP" msgid "UEFI is not supported" msgstr "UEFI не поддерживается" #, python-format msgid "" "Unable to associate floating IP %(address)s to any fixed IPs for instance " "%(id)s. Instance has no fixed IPv4 addresses to associate." msgstr "" "Не удается связать нефиксированный IP %(address)s с каким-либо из " "фиксированных IP для экземпляра %(id)s. У экземпляра нет фиксированных " "адресов IPv4 для связывания." #, python-format msgid "" "Unable to associate floating IP %(address)s to fixed IP %(fixed_address)s " "for instance %(id)s. Error: %(error)s" msgstr "" "Не удалось связать нефиксированный IP-адрес %(address)s с фиксированным IP-" "адресом %(fixed_address)s для экземпляра %(id)s. Ошибка: %(error)s" #, python-format msgid "Unable to convert image to %(format)s: %(exp)s" msgstr "Не удается преобразовать образ в %(format)s: %(exp)s" #, python-format msgid "Unable to convert image to raw: %(exp)s" msgstr "Не удается преобразовать образ в формат raw: %(exp)s" #, python-format msgid "Unable to determine disk bus for '%s'" msgstr "Невозможно определить шину диска для '%s'" #, python-format msgid "Unable to determine disk prefix for %s" msgstr "Невозможно определить префикс диска для %s" #, python-format msgid "Unable to find host for Instance %s" msgstr "Невозможно найти узел для копии %s" msgid "Unable to find iSCSI Target" msgstr "Невозможно найти назначение iSCSI" msgid "Unable to find volume" msgstr "Невозможно найти том" msgid "Unable to get host UUID: /etc/machine-id does not exist" msgstr "Не удалось получить UUID хоста: /etc/machine-id не существует" msgid "Unable to get host UUID: /etc/machine-id is empty" msgstr "Не удалось получить UUID хоста: /etc/machine-id пуст" msgid "" "Unable to launch multiple instances with a single configured port ID. Please " "launch your instance one by one with different ports." msgstr "" "Невозможно запустить несколько экземпляров с одним заданным ИД порта. " "Запустите свой экземпляр последовательно на разных портах." #, python-format msgid "" "Unable to migrate %(instance_uuid)s to %(dest)s: Lack of memory(host:" "%(avail)s <= instance:%(mem_inst)s)" msgstr "" "Невозможно перенести %(instance_uuid)s в %(dest)s: Недостаточно памяти(хост:" "%(avail)s <= экземпляр:%(mem_inst)s)" #, python-format msgid "" "Unable to migrate %(instance_uuid)s: Disk of instance is too large(available " "on destination host:%(available)s < need:%(necessary)s)" msgstr "" "Невозможно перенести %(instance_uuid)s: Диск экземпляра слишком велик " "(доступно на целевом хосте:%(available)s < необходимо:%(necessary)s)" #, python-format msgid "" "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." msgstr "" "Невозможно переместить копию (%(instance_id)s) на текущий узел (%(host)s)." msgid "Unable to resize disk down." msgstr "Нельзя уменьшить размер диска." msgid "Unable to set password on instance" msgstr "Невозможно установить пароль для экземпляра" msgid "Unable to shrink disk." msgstr "Не удалось уменьшить размер диска." #, python-format msgid "Unacceptable CPU info: %(reason)s" msgstr "Неприменимая информация о CPU: %(reason)s" msgid "Unacceptable parameters." msgstr "Недопустимые параметры." #, python-format msgid "Unavailable console type %(console_type)s." msgstr "Недопустимый тип консоли %(console_type)s." msgid "" "Undefined Block Device Mapping root: BlockDeviceMappingList contains Block " "Device Mappings from multiple instances." msgstr "" "Не определен корневой элемент связывания блочного устройства: " "BlockDeviceMappingList содержит связывания блочных устройств из нескольких " "экземпляров." #, python-format msgid "Unexpected aggregate action %s" msgstr "Непредвиденное составное действие %s" msgid "Unexpected type adding stats" msgstr "Непредвиденный тип добавления статистики" #, python-format msgid "Unexpected vif_type=%s" msgstr "Неожиданный vif_type=%s" msgid "Unknown" msgstr "Неизвестно" msgid "Unknown action" msgstr "Неизвестное действие" #, python-format msgid "Unknown config drive format %(format)s. Select one of iso9660 or vfat." msgstr "" "Неизвестный формат диска конфигурации %(format)s. Выберите iso9660 или vfat." #, python-format msgid "Unknown delete_info type %s" msgstr "Неизвестный тип delete_info %s" #, python-format msgid "Unknown image_type=%s" msgstr "Неизвестный image_type=%s" #, python-format msgid "Unknown quota resources %(unknown)s." msgstr "Неизвестные ресурсы квоты: %(unknown)s." msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "Неизвестное направление сортировки, должно быть 'desc' или 'asc'" #, python-format msgid "Unknown type: %s" msgstr "Неизвестный тип: %s" msgid "Unrecognized legacy format." msgstr "Нераспознанный устаревший формат." #, python-format msgid "Unrecognized read_deleted value '%s'" msgstr "Нераспознанное значение read_deleted '%s'" #, python-format msgid "Unrecognized value '%s' for CONF.running_deleted_instance_action" msgstr "Нераспознанное значение '%s' для CONF.running_deleted_instance_action" #, python-format msgid "Unshelve attempted but the image %s cannot be found." msgstr "" "Предпринята попытка возврата из отложенного состояния, но образ %s не найден." msgid "Unsupported Content-Type" msgstr "Не поддерживаемый тип содержимого" #, python-format msgid "User %(username)s not found in password file." msgstr "Пользователь %(username)s не найден в файле паролей." #, python-format msgid "User %(username)s not found in shadow file." msgstr "Пользователь %(username)s не найден в теневом файле." msgid "User data needs to be valid base 64." msgstr "Пользовательские данные должны иметь верный формат base64." msgid "User does not have admin privileges" msgstr "Пользователь не имеет административных привилегий" msgid "" "Using different block_device_mapping syntaxes is not allowed in the same " "request." msgstr "" "Использование разных форматов block_device_mapping в одном запросе не " "разрешено." #, python-format msgid "" "Version %(req_ver)s is not supported by the API. Minimum is %(min_ver)s and " "maximum is %(max_ver)s." msgstr "" "Версия %(req_ver)s не поддерживается в API. Минимальная требуемая версия: " "%(min_ver)s, максимальная: %(max_ver)s." msgid "Virtual Interface creation failed" msgstr "Ошибка создания виртуального интерфейса" msgid "Virtual interface plugin failed" msgstr "Ошибка в модуле виртуального интерфейса" #, python-format msgid "Virtual machine mode '%(vmmode)s' is not recognised" msgstr "Режим виртуальной машины %(vmmode)s не распознан" #, python-format msgid "Virtual machine mode '%s' is not valid" msgstr "Недопустимый режим виртуальной машины '%s'" #, python-format msgid "Virtualization type '%(virt)s' is not supported by this compute driver" msgstr "" "Тип виртуализации '%(virt)s' не поддерживается этим драйвером вычисления" #, python-format msgid "Volume %(volume_id)s could not be attached. Reason: %(reason)s" msgstr "Не удалось подключить том %(volume_id)s. Причина: %(reason)s" #, python-format msgid "Volume %(volume_id)s could not be found." msgstr "Том %(volume_id)s не найден." #, python-format msgid "" "Volume %(volume_id)s did not finish being created even after we waited " "%(seconds)s seconds or %(attempts)s attempts. And its status is " "%(volume_status)s." msgstr "" "Создание тома %(volume_id)s не завершается долгое время %(seconds)s секунд " "или %(attempts)s попыток. Состояние тома: %(volume_status)s." msgid "Volume does not belong to the requested instance." msgstr "Том не относится к запрашиваемому экземпляру." #, python-format msgid "" "Volume encryption is not supported for %(volume_type)s volume %(volume_id)s" msgstr "Для тома %(volume_type)s %(volume_id)s не поддерживается шифрование " #, python-format msgid "" "Volume is smaller than the minimum size specified in image metadata. Volume " "size is %(volume_size)i bytes, minimum size is %(image_min_disk)i bytes." msgstr "" "Размер тома меньше, чем минимальный размер, указанный в метаданных образа. " "Размер тома составляет %(volume_size)i байт, минимальный размер - " "%(image_min_disk)i байт." #, python-format msgid "" "Volume sets block size, but the current libvirt hypervisor '%s' does not " "support custom block size" msgstr "" "Том указывает размер блока, но текущий гипервизор libvirt '%s' не " "поддерживает нестандартный размер блока" msgid "When resizing, instances must change flavor!" msgstr "При изменении размера экземпляры должны изменить разновидность!" #, python-format msgid "Wrong quota method %(method)s used on resource %(res)s" msgstr "" "Используется неверный метод контроля квоты %(method)s для ресурса %(res)s" msgid "X-Forwarded-For is missing from request." msgstr "В запросе отсутствует X-Forwarded-For." msgid "X-Instance-ID header is missing from request." msgstr "Заголовок X-Instance-ID отсутствует в запросе." msgid "X-Instance-ID-Signature header is missing from request." msgstr "В запросе отсутствует заголовок X-Instance-ID-Signature." msgid "X-Metadata-Provider is missing from request." msgstr "В запросе отсутствует X-Metadata-Provider." msgid "X-Tenant-ID header is missing from request." msgstr "Заголовок X-Tenant-ID отсутствует в запросе." msgid "You are not allowed to delete the image." msgstr "Вам не разрешено удалять образ." msgid "" "You are not authorized to access the image the instance was started with." msgstr "У вас нет прав доступа к образу, с помощью которого запущен экземпляр." msgid "You must implement __call__" msgstr "Отсутствует реализация __call__" msgid "You should specify images_rbd_pool flag to use rbd images." msgstr "Необходимо указать флаг images_rbd_pool для использования образов rbd." msgid "You should specify images_volume_group flag to use LVM images." msgstr "" "Необходимо указать флаг images_volume_group для использования образов LVM." msgid "Zero floating IPs available." msgstr "Нет доступных нефиксированных IP." msgid "admin password can't be changed on existing disk" msgstr "пароль администратора не может быть изменен на существующем диске" msgid "cannot understand JSON" msgstr "невозможно понять JSON" msgid "clone() is not implemented" msgstr "Функция clone() не реализована" #, python-format msgid "connect info: %s" msgstr "информация о соединении: %s" #, python-format msgid "connecting to: %(host)s:%(port)s" msgstr "подключение к: %(host)s:%(port)s" msgid "direct_snapshot() is not implemented" msgstr "Функция direct_snapshot() не реализована" #, python-format msgid "disk type '%s' not supported" msgstr "тип диска %s не поддерживается" #, python-format msgid "empty project id for instance %s" msgstr "пустой ИД проекта для экземпляра %s" msgid "error setting admin password" msgstr "ошибка при установке пароля администратора" #, python-format msgid "error: %s" msgstr "Ошибка: %s" #, python-format msgid "failed to generate X509 fingerprint. Error message: %s" msgstr "не удалось создать отпечаток X509. Сообщение об ошибке: %s" msgid "failed to generate fingerprint" msgstr "не удалось создать отпечаток" msgid "filename cannot be None" msgstr "имя файла не может быть None" msgid "floating IP is already associated" msgstr "нефиксированный IP уже связан" msgid "floating IP not found" msgstr "нефиксированный IP не найден" #, python-format msgid "fmt=%(fmt)s backed by: %(backing_file)s" msgstr "fmt=%(fmt)s backed by: %(backing_file)s" #, python-format msgid "href %s does not contain version" msgstr "href %s не содержит версию" msgid "image already mounted" msgstr "образ уже присоединён" #, python-format msgid "instance %s is not running" msgstr "Экземпляр %s не запущен" msgid "instance is a required argument to use @refresh_cache" msgstr "" "экземпляр является требуемым аргументом для использования @refresh_cache" msgid "instance is not in a suspended state" msgstr "копия не в приостановленном состоянии" msgid "instance is not powered on" msgstr "копия не включена" msgid "instance is powered off and cannot be suspended." msgstr "экземпляр выключен и не может быть приостановлен." #, python-format msgid "instance_id %s could not be found as device id on any ports" msgstr "Не удалось найти instance_id %s как ИД устройства ни на одном порту" msgid "is_public must be a boolean" msgstr "is_public должен быть boolean" msgid "keymgr.fixed_key not defined" msgstr "keymgr.fixed_key не определен" msgid "l3driver call to add floating IP failed" msgstr "Не удалось выполнить вызов l3driver для добавления нефиксированного IP" #, python-format msgid "libguestfs installed but not usable (%s)" msgstr "libguestfs установлена, но ее невозможно использовать (%s)" #, python-format msgid "libguestfs is not installed (%s)" msgstr "Не установлена libguestfs (%s)" #, python-format msgid "marker [%s] not found" msgstr "маркер [%s] не найден" #, python-format msgid "max rows must be <= %(max_value)d" msgstr "максимальное число строк должно быть <= %(max_value)d" msgid "max_count cannot be greater than 1 if an fixed_ip is specified." msgstr "" "Значение max_count не может быть больше 1, если указано значение fixed_ip." msgid "min_count must be <= max_count" msgstr "min_count должен быть <= max_count" #, python-format msgid "nbd device %s did not show up" msgstr "Устройство nbd %s не показан" msgid "nbd unavailable: module not loaded" msgstr "nbd недоступен: модуль не загружен" #, python-format msgid "no match found for %s" msgstr "не найдено соответствие для %s" #, python-format msgid "no usable parent snapshot for volume %s" msgstr "не найдена пригодная родительская моментальная копия для тома %s" #, python-format msgid "no write permission on storage pool %s" msgstr "нет прав записи в пул памяти %s" #, python-format msgid "not able to execute ssh command: %s" msgstr "не может выполнить команду ssh: %s" msgid "old style configuration can use only dictionary or memcached backends" msgstr "" "Конфигурация в старом стиле может основываться только на словаре или " "memcached" msgid "operation time out" msgstr "тайм-аут операции" #, python-format msgid "partition %s not found" msgstr "раздел %s не найден" #, python-format msgid "partition search unsupported with %s" msgstr "поиск раздела не поддерживается %s" msgid "pause not supported for vmwareapi" msgstr "остановка не поддерживается для vmwareapi" msgid "printable characters with at least one non space character" msgstr "печатаемые символы хотя бы один символ, не являющийся пробелом." msgid "printable characters. Can not start or end with whitespace." msgstr "печатаемые символы. Не может начинаться или заканчиваться пробелом." #, python-format msgid "qemu-img failed to execute on %(path)s : %(exp)s" msgstr "qemu-img не выполнен в %(path)s : %(exp)s" #, python-format msgid "qemu-nbd error: %s" msgstr "ошибка qemu-nbd: %s" msgid "rbd python libraries not found" msgstr "Не найдены библиотеки rbd и python" #, python-format msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" msgstr "" "read_deleted может принимать значения 'no', 'yes' или 'only', значение %r " "недопустимо" msgid "serve() can only be called once" msgstr "serve() может быть вызван только один раз" msgid "service is a mandatory argument for DB based ServiceGroup driver" msgstr "" "служба является обязательным аргументом для базы данных на основе драйвера " "ServiceGroup" msgid "service is a mandatory argument for Memcached based ServiceGroup driver" msgstr "" "служба является обязательным аргументом для Memcached на основе драйвера " "ServiceGroup" msgid "set_admin_password is not implemented by this driver or guest instance." msgstr "" "set_admin_password не реализован этим драйвером или гостевым экземпляром." #, python-format msgid "snapshot for %s" msgstr "моментальная копия для %s" msgid "snapshot_id required in create_info" msgstr "snapshot_id обязателен в create_info" msgid "token not provided" msgstr "маркер не указан" msgid "too many body keys" msgstr "слишком много ключей тела" msgid "unpause not supported for vmwareapi" msgstr "отмена остановки не поддерживается для vmwareapi" #, python-format msgid "vg %s must be LVM volume group" msgstr "vg %s должен быть группой томов LVM" #, python-format msgid "vhostuser_sock_path not present in vif_details for vif %(vif_id)s" msgstr "vhostuser_sock_path отсутствует в vif_details для vif %(vif_id)s" #, python-format msgid "vif type %s not supported" msgstr "Тип vif %s не поддерживается" msgid "vif_type parameter must be present for this vif_driver implementation" msgstr "Параметр vif_type должен присутствовать для этой реализации vif_driver" #, python-format msgid "volume %s already attached" msgstr "Том %s уже присоединен" #, python-format msgid "" "volume '%(vol)s' status must be 'in-use'. Currently in '%(status)s' status" msgstr "" "Требуемое состояние '%(vol)s' тома: 'in-use'. Текущее состояние: '%(status)s'" ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315688.893605 nova-32.0.0/nova/locale/tr_TR/0000775000175000017500000000000000000000000016016 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.3736086 nova-32.0.0/nova/locale/tr_TR/LC_MESSAGES/0000775000175000017500000000000000000000000017603 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/locale/tr_TR/LC_MESSAGES/nova.po0000664000175000017500000021370000000000000021111 0ustar00zuulzuul00000000000000# Translations template for nova. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the nova project. # # Translators: # Özcan Zafer AYAN , 2013 # Özcan Zafer AYAN , 2013 # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: nova VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2025-07-04 18:13+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 06:09+0000\n" "Last-Translator: Copied by Zanata \n" "Language: tr_TR\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: Turkish (Turkey)\n" #, python-format msgid "%(address)s is not a valid IP v4/6 address." msgstr "%(address)s geçerli bir IP v4/6 adresi değildir." #, python-format msgid "" "%(binary)s attempted direct database access which is not allowed by policy" msgstr "" "%(binary)s ilkesel olarak izin verilmeyen şekilde doğrudan veri tabanı " "erişimine kalkıştı" #, python-format msgid "%(memsize)d MB of memory assigned, but expected %(memtotal)d MB" msgstr "%(memsize)d MB hafıza atanmış, %(memtotal)d MB bekleniyordu" #, python-format msgid "%(path)s is not on local storage: %(reason)s" msgstr "%(path)s yerel depoda değil: %(reason)s" #, python-format msgid "%(path)s is not on shared storage: %(reason)s" msgstr "%(path)s paylaşımlı depoda değil: %(reason)s" #, python-format msgid "%(type)s hypervisor does not support PCI devices" msgstr "%(type)s hipervizörü PCI aygıtlarını desteklemiyor" #, python-format msgid "%s does not support disk hotplug." msgstr "%s disk canlı takmayı desteklemiyor." #, python-format msgid "%s format is not supported" msgstr "%s biçimi desteklenmiyor" #, python-format msgid "%s is not supported." msgstr "%s desteklenmiyor." #, python-format msgid "%s must be either 'MANUAL' or 'AUTO'." msgstr "%s 'MANUAL' veya 'AUTO' olmak zorunda" msgid "'qemu-img info' parsing failed." msgstr "'qemu-img info' ayrıştırma başarısız." #, python-format msgid "'rxtx_factor' argument must be a float between 0 and %g" msgstr "" "'rxtx_factor' bağımsız değişkeni 0 ve %g arasında kesirli bir sayı olmalı " #, python-format msgid "A NetworkModel is required in field %s" msgstr "%s alanında bir AğModeli gerekli" #, python-format msgid "" "API Version String %(version)s is of invalid format. Must be of format " "MajorNum.MinorNum." msgstr "" "API Sürüm Karakter Dizisi %(version)s geçersiz bir biçimde. MajorNum." "MinorNum biçiminde olmalı." #, python-format msgid "API version %(version)s is not supported on this method." msgstr "API sürümü %(version)s bu metodda desteklenmiyor." msgid "Access list not available for public flavors." msgstr "Erişim listesi açık nitelikler için kullanılamaz." #, python-format msgid "Action %s not found" msgstr "Eylem %s bulunamadı" #, python-format msgid "" "Action for request_id %(request_id)s on instance %(instance_uuid)s not found" msgstr "" "%(instance_uuid)s sunucusu üzerinde request_id %(request_id)s için eylem " "bulunamadı" #, python-format msgid "Action: '%(action)s', calling method: %(meth)s, body: %(body)s" msgstr "Eylem: '%(action)s', çağıran metod: %(meth)s, gövde: %(body)s" #, python-format msgid "Add metadata failed for aggregate %(id)s after %(retries)s retries" msgstr "" "Metadata ekleme %(id)s takımı için %(retries)s denemeden sonra başarısız oldu" #, python-format msgid "Aggregate %(aggregate_id)s already has host %(host)s." msgstr "%(aggregate_id)s kümesi zaten%(host)s sunucusuna sahip." #, python-format msgid "Aggregate %(aggregate_id)s could not be found." msgstr "%(aggregate_id)s kümesi bulunamadı." #, python-format msgid "Aggregate %(aggregate_id)s has no host %(host)s." msgstr "%(aggregate_id)s kümesi %(host)s sunucusuna sahip değil." #, python-format msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." msgstr "" "%(aggregate_id)s kümesi %(metadata_key)s. anahtarı ile hiç metadata'sı yok." #, python-format msgid "Aggregate %(aggregate_name)s already exists." msgstr "%(aggregate_name)s kümesi zaten var." msgid "An unknown error has occurred. Please try your request again." msgstr "Bilinmeyen bir hata oluştu. Lütfen tekrar deneyin." msgid "An unknown exception occurred." msgstr "Bilinmeyen bir istisna oluştu." #, python-format msgid "Architecture name '%(arch)s' is not recognised" msgstr "Mimari ismi '%(arch)s' tanınmıyor" #, python-format msgid "Architecture name '%s' is not valid" msgstr "Mimari adı '%s' geçerli değil" #, python-format msgid "" "Attempt to consume PCI device %(compute_node_id)s:%(address)s from empty pool" msgstr "" "Boş havuzdan PCI aygıtı %(compute_node_id)s:%(address)s tüketme çalışması" msgid "Attempted overwrite of an existing value." msgstr "Mevcut değerin üzerine yazılması girişimi." #, python-format msgid "Attribute not supported: %(attr)s" msgstr "Öznitelik desteklenmiyor: %(attr)s" #, python-format msgid "Bad network format: missing %s" msgstr "Yanlış ağ biçimi: %s bulunamadı" msgid "Bad networks format" msgstr "Hatalı ağ biçimi" #, python-format msgid "Bad networks format: network uuid is not in proper format (%s)" msgstr "Yanlış ağ biçimi: ağ UUID'si uygun formatta değil(%s)" #, python-format msgid "Bad prefix for network in cidr %s" msgstr "%s cidr'indeki ağ için kötü ön ek" msgid "Blank components" msgstr "Boş bileşenler" msgid "" "Blank volumes (source: 'blank', dest: 'volume') need to have non-zero size" msgstr "" "Boş mantıksal sürücülerin (kaynak: 'boş', hedef:'mantıksal sürücü') sıfırdan " "farklı boyutu olmalı" #, python-format msgid "Block Device %(id)s is not bootable." msgstr "Blok Aygıtı %(id)s ön yüklenebilir değil." msgid "Block Device Mapping cannot be converted to legacy format. " msgstr "Blok Aygıt Eşleştirmesi eski biçime dönüştürülemiyor. " msgid "Block Device Mapping is Invalid." msgstr "Blok Aygıt Eşleştirmesi Geçersiz." #, python-format msgid "Block Device Mapping is Invalid: %(details)s" msgstr "Blok Aygıt Eşleştirmesi Geçersiz: %(details)s" msgid "" "Block Device Mapping is Invalid: Boot sequence for the instance and image/" "block device mapping combination is not valid." msgstr "" "Blok Aygıt Eşleştirmesi Geçersiz: Sunucu için ön yükleme sırası ve imaj/blok " "aygıt haritası bileşimi geçersiz." msgid "" "Block Device Mapping is Invalid: You specified more local devices than the " "limit allows" msgstr "" "Blok Aygıt Eşleştirmesi Geçersiz: Sınırın izin verdiğinden çok yerel aygıt " "tanımladınız" #, python-format msgid "Block Device Mapping is Invalid: failed to get image %(id)s." msgstr "Blok Aygıt Eşleştirmesi Geçersiz: imaj %(id)s alınamadı." #, python-format msgid "Block Device Mapping is Invalid: failed to get snapshot %(id)s." msgstr "Blok Aygıt Eşleştirmesi Geçersiz: Anlık görüntü %(id)s alınamadı." #, python-format msgid "Block Device Mapping is Invalid: failed to get volume %(id)s." msgstr "Blok Aygıt Eşleştirmesi Geçersiz: mantıksal sürücü %(id)s alınamadı." msgid "Block migration can not be used with shared storage." msgstr "Blok göçü paylaşılan hafıza ile kullanılamaz." msgid "Boot index is invalid." msgstr "Yükleme indeksi geçersiz." #, python-format msgid "Build of instance %(instance_uuid)s aborted: %(reason)s" msgstr "%(instance_uuid)s sunucusunun inşası iptal edildi: %(reason)s" #, python-format msgid "Build of instance %(instance_uuid)s was re-scheduled: %(reason)s" msgstr "%(instance_uuid)s sunucusunun inşası yeniden zamanlandı: %(reason)s" msgid "CPU and memory allocation must be provided for all NUMA nodes" msgstr "Tüm NUMA düğümleri için ayrılan CPU ve hafıza sağlanmalıdır" #, python-format msgid "" "CPU doesn't have compatibility.\n" "\n" "%(ret)s\n" "\n" "Refer to %(u)s" msgstr "" "CPU uyumluluğu yok. \n" " \n" " %(ret)s \n" " \n" " Bkz: %(u)s" #, python-format msgid "CPU number %(cpunum)d is assigned to two nodes" msgstr "%(cpunum)d CPU sayısı iki düğüme atanmış" #, python-format msgid "CPU number %(cpunum)d is larger than max %(cpumax)d" msgstr "CPU sayısı %(cpunum)d azami sayı %(cpumax)d den fazla" #, python-format msgid "CPU number %(cpuset)s is not assigned to any node" msgstr "CPU sayısı %(cpuset)s herhangi bir düğüme atanmamış" msgid "Can not find requested image" msgstr "İstenilen imaj dosyası bulunamadı" #, python-format msgid "Can not handle authentication request for %d credentials" msgstr "%d kimlik bilgileri için kimlik doğrulama isteği ele alınamadı" msgid "Can't retrieve root device path from instance libvirt configuration" msgstr "Sunucu libvirt yapılandırmasından kök aygıt yolu alınamadı" #, python-format msgid "" "Cannot '%(action)s' instance %(server_id)s while it is in %(attr)s %(state)s" msgstr "" "%(attr)s %(state)s halindeyken sunucu %(server_id)s '%(action)s' yapılamaz" #, python-format msgid "Cannot add host to aggregate %(aggregate_id)s. Reason: %(reason)s." msgstr "İstemci %(aggregate_id)s takımına eklenemiyor. Sebep: %(reason)s." msgid "Cannot attach one or more volumes to multiple instances" msgstr "" "Bir ya da daha fazla mantıksal sürücü birden fazla sunucuya eklenemiyor" #, python-format msgid "Cannot call %(method)s on orphaned %(objtype)s object" msgstr "Artık %(objtype)s objesi üzerinde %(method)s çağrılamaz" msgid "Cannot find image for rebuild" msgstr "Yeniden kurulum için imaj dosyası bulunamadı." #, python-format msgid "Cannot remove host %(host)s in aggregate %(id)s" msgstr "%(id)s takımındaki %(host)s istemcisi çıkarılamıyor" #, python-format msgid "Cannot remove host from aggregate %(aggregate_id)s. Reason: %(reason)s." msgstr "İstemci %(aggregate_id)s takımından çıkarılamıyor. Sebep: %(reason)s." msgid "Cannot rescue a volume-backed instance" msgstr "Mantıksal sürücü destekli sunucu kurtarılamıyor" #, python-format msgid "Cannot update aggregate %(aggregate_id)s. Reason: %(reason)s." msgstr "%(aggregate_id)s takımı güncellenemiyor. Sebep: %(reason)s." #, python-format msgid "" "Cannot update metadata of aggregate %(aggregate_id)s. Reason: %(reason)s." msgstr "" "%(aggregate_id)s takımının metadata'sı güncellenemiyor. Sebep: %(reason)s." #, python-format msgid "Cell %(uuid)s has no mapping." msgstr "%(uuid)s hücresinin eşleştirmesi yok." #, python-format msgid "" "Change would make usage less than 0 for the following resources: %(unders)s" msgstr "Değişiklik şu kaynaklar için kullanımı 0 altında düşürür: %(unders)s" #, python-format msgid "Class %(class_name)s could not be found: %(exception)s" msgstr "%(class_name)s sınıfı bulunamadı: %(exception)s" #, python-format msgid "Compute host %(host)s could not be found." msgstr "%(host)s hesaplama sunucusu bulunamadı." #, python-format msgid "Compute host %s not found." msgstr "Hesap istemcisi %s bulunamadı." #, python-format msgid "Compute service of %(host)s is still in use." msgstr "%(host)s hesaplama servisi hala kullanımda." #, python-format msgid "Compute service of %(host)s is unavailable at this time." msgstr "%(host)s hesaplama servisi şu an kullanılabilir değil." #, python-format msgid "" "Config requested an explicit CPU model, but the current libvirt hypervisor " "'%s' does not support selecting CPU models" msgstr "" "Yapılandırma belirli bir CPU modeli istedi, ama mevcut libvirt hipervizörü " "'%s' CPU modeli seçme desteklemiyor" #, python-format msgid "Connection to cinder host failed: %(reason)s" msgstr "Cinder istemcisine bağlantı başarısız: %(reason)s" #, python-format msgid "Connection to libvirt lost: %s" msgstr "libvirt bağlantısı kayboldu: %s" msgid "Constraint not met." msgstr "Kısıtlama karşılanmadı." #, python-format msgid "Converted to raw, but format is now %s" msgstr "Ham şekle dönüştürüldü, ama biçim artık %s" #, python-format msgid "Could not attach image to loopback: %s" msgstr "İmaj geri dönüşe eklenemiyor: %s" #, python-format msgid "Could not fetch image %(image_id)s" msgstr "%(image_id)s imajı getirilemedi" #, python-format msgid "Could not find a handler for %(driver_type)s volume." msgstr "%(driver_type)s bölümü için bir işleyici bulunamadı." #, python-format msgid "Could not find binary %(binary)s on host %(host)s." msgstr "%(host)s sunucusunda %(binary)s ikilisi bulunamadı." #, python-format msgid "Could not find config at %(path)s" msgstr "%(path)s'deki yapılandırma bulunamadı" msgid "Could not find the datastore reference(s) which the VM uses." msgstr "VM'nin kullandığı veri deposu referansı(ları) bulunamadı." #, python-format msgid "Could not load line %(line)s, got error %(error)s" msgstr "%(line)s satırı yüklenemedi, %(error)s hatası alındı" #, python-format msgid "Could not load paste app '%(name)s' from %(path)s" msgstr "Yapıştırma uygulaması '%(name)s' %(path)s yolundan yüklenemedi" #, python-format msgid "" "Could not mount vfat config drive. %(operation)s failed. Error: %(error)s" msgstr "" "vfat yapılandırma sürücüsü bağlanamadı. %(operation)s başarısız. Hata: " "%(error)s" #, python-format msgid "Could not upload image %(image_id)s" msgstr "İmaj %(image_id)s yüklenemedi" msgid "Creation of virtual interface with unique mac address failed" msgstr "Benzersiz mac adresine sahip sanal arayüzün oluşturulması başarısız" #, python-format msgid "Datastore regex %s did not match any datastores" msgstr "Veridepolama düzenli ifadesi %s herhangi bir verideposuyla eşleşmedi" msgid "Datetime is in invalid format" msgstr "Datetime geçersiz biçimde" msgid "Default PBM policy is required if PBM is enabled." msgstr "PBM etkin ise varsayılan PBM ilkesi gerekir." msgid "Device name contains spaces." msgstr "Aygıt adı boşluk içeriyor." msgid "Device name empty or too long." msgstr "Aygıt adı boş veya çok uzun." #, python-format msgid "Disk format %(disk_format)s is not acceptable" msgstr "%(disk_format)s disk formatı kabul edilemez." #, python-format msgid "Disk info file is invalid: %(reason)s" msgstr "Disk bilgi dosyası geçersiz: %(reason)s" #, python-format msgid "Driver Error: %s" msgstr "Sürücü Hatası: %s" #, python-format msgid "" "Error destroying the instance on node %(node)s. Provision state still " "'%(state)s'." msgstr "" "%(node)s düğümündeki sunucu silinirken hata. Hazırlık durumu hala " "'%(state)s'." #, python-format msgid "Error during unshelve instance %(instance_id)s: %(reason)s" msgstr "Askıdan almadan hata sunucu %(instance_id)s: %(reason)s" #, python-format msgid "" "Error from libvirt while getting domain info for %(instance_name)s: [Error " "Code %(error_code)s] %(ex)s" msgstr "" "%(instance_name)s için alan bilgisi alınırken libvirt hatası: [Hata Kodu " "%(error_code)s] %(ex)s" #, python-format msgid "" "Error from libvirt while looking up %(instance_name)s: [Error Code " "%(error_code)s] %(ex)s" msgstr "" "%(instance_name)s aranırken libvirt hatası: [Hata Kodu %(error_code)s] %(ex)s" #, python-format msgid "" "Error from libvirt while quiescing %(instance_name)s: [Error Code " "%(error_code)s] %(ex)s" msgstr "" "%(instance_name)s susturulurken libvirt hatası: [Hata Kodu %(error_code)s] " "%(ex)s" #, python-format msgid "" "Error mounting %(device)s to %(dir)s in image %(image)s with libguestfs " "(%(e)s)" msgstr "" "%(device)s %(dir)s e %(image)s imajında libguestfs (%(e)s) ile bağlanamıyor" #, python-format msgid "Error mounting %(image)s with libguestfs (%(e)s)" msgstr "%(image)s'in libguestfs (%(e)s) ile bağlanmasında hata" #, python-format msgid "Error when creating resource monitor: %(monitor)s" msgstr "Kaynak izleme oluşturulurken hata: %(monitor)s" #, python-format msgid "Event %(event)s not found for action id %(action_id)s" msgstr "Olay %(event)s eylem kimliği %(action_id)s için bulunamadı" msgid "Event must be an instance of nova.virt.event.Event" msgstr "Olay bir nova.virt.event.Event örneği olmalı" #, python-format msgid "" "Exceeded max scheduling retries %(max_retries)d for instance " "%(instance_uuid)s during live migration" msgstr "" "Canlı göç sırasında %(instance_uuid)s sunucusu için azami zamanlama yeniden " "deneme sayısı %(max_retries)d aşıldı" #, python-format msgid "Expected a uuid but received %(uuid)s." msgstr "Bir uuid bekleniyordu ama %(uuid)s alındı." msgid "Extracting vmdk from OVA failed." msgstr "OVA'dan vmdk çıkarma başarısız." #, python-format msgid "Failed to access port %(port_id)s: %(reason)s" msgstr "%(port_id)s bağlantı noktasına erişim başarısız: %(reason)s" #, python-format msgid "Failed to allocate the network(s) with error %s, not rescheduling." msgstr "Ağ(lar) ayırma %s hatasıyla başarısız, yeniden zamanlanmıyor." msgid "Failed to allocate the network(s), not rescheduling." msgstr "Ağ(lar) ayrılamadı, yeniden zamanlanmıyor." #, python-format msgid "Failed to attach network adapter device to %(instance_uuid)s" msgstr "Ağ bağdaştırıcısı aygıtı %(instance_uuid)s e eklenemedi" #, python-format msgid "Failed to deploy instance: %(reason)s" msgstr "Sunucu açılamadı: %(reason)s" #, python-format msgid "Failed to detach PCI device %(dev)s: %(reason)s" msgstr "PCI aygıtı %(dev)s ayrılamadı: %(reason)s" #, python-format msgid "Failed to detach network adapter device from %(instance_uuid)s" msgstr "Ağ bağdaştırıcısı aygıtı %(instance_uuid)s den ayrılamadı" #, python-format msgid "Failed to encrypt text: %(reason)s" msgstr "Metin şifrelenemedi: %(reason)s" #, python-format msgid "Failed to launch instances: %(reason)s" msgstr "Sunucular açılamadı: %(reason)s" #, python-format msgid "Failed to map partitions: %s" msgstr "Bölümler eşlenemiyor: %s" #, python-format msgid "Failed to mount filesystem: %s" msgstr "Dosya sistemi bağlanamadı: %s" #, python-format msgid "Failed to power off instance: %(reason)s" msgstr "Sunucu kapatılamadı: %(reason)s" #, python-format msgid "Failed to power on instance: %(reason)s" msgstr "Sunucu açılamadı: %(reason)s" #, python-format msgid "Failed to provision instance %(inst)s: %(reason)s" msgstr "%(inst)s sunucusu hazırlanamadı: %(reason)s" #, python-format msgid "Failed to read or write disk info file: %(reason)s" msgstr "Disk bilgi dosyası okuma ya da yazması başarısız: %(reason)s" #, python-format msgid "Failed to reboot instance: %(reason)s" msgstr "Sunucu yeniden başlatılamadı: %(reason)s" #, python-format msgid "Failed to remove volume(s): (%(reason)s)" msgstr "Mantıksal sürücü(ler) kaldırılamadı: (%(reason)s)" #, python-format msgid "Failed to request Ironic to rebuild instance %(inst)s: %(reason)s" msgstr "" "Ironic'e %(inst)s sunucusunu yeniden inşa etmesi isteği başarısız: %(reason)s" #, python-format msgid "Failed to resume instance: %(reason)s" msgstr "Sunucu sürdürülemedi: %(reason)s" #, python-format msgid "Failed to run qemu-img info on %(path)s : %(error)s" msgstr "%(path)s üzerinde qemu-img info çalıştırılamadı: %(error)s" #, python-format msgid "Failed to set admin password on %(instance)s because %(reason)s" msgstr "%(reason)s yüzünden %(instance)s üzerinde parola ayarlanamadı" #, python-format msgid "Failed to suspend instance: %(reason)s" msgstr "Sunucu askıya alınamadı: %(reason)s" #, python-format msgid "Failed to terminate instance: %(reason)s" msgstr "Sunucu sonlandırılamadı: %(reason)s" msgid "Failure prepping block device." msgstr "Blok aygıt hazırlama başarısız." #, python-format msgid "File %(file_path)s could not be found." msgstr "%(file_path)s dosyası bulunamadı." #, python-format msgid "Fixed IP %(ip)s is not a valid ip address for network %(network_id)s." msgstr "Sabit IP %(ip)s %(network_id)s için gereçli bir ip adresi değil." #, python-format msgid "Fixed IP %s is already in use." msgstr "Sabit IP %s zaten kullanılıyor." #, python-format msgid "" "Fixed IP address %(address)s is already in use on instance %(instance_uuid)s." msgstr "" "Sabit IP adresi %(address)s %(instance_uuid)s sunucusu üzerinde zaten " "kullanımda." #, python-format msgid "Flavor %(flavor_id)s could not be found." msgstr "%(flavor_id)s örnek türü bulunamadı." #, python-format msgid "Flavor %(flavor_id)s has no extra specs with key %(extra_specs_key)s." msgstr "" "%(flavor_id)s niteliğinin %(extra_specs_key)s anahtarına sahip ek özelliği " "yok." #, python-format msgid "Flavor %(flavor_id)s has no extra specs with key %(key)s." msgstr "%(flavor_id)s niteliğinin %(key)s anahtarına sahip ek özelliği yok." #, python-format msgid "" "Flavor access already exists for flavor %(flavor_id)s and project " "%(project_id)s combination." msgstr "" "Nitelik erişimi %(flavor_id)s nitelik ve %(project_id)s proje katışımı için " "zaten mevcut." #, python-format msgid "Flavor access not found for %(flavor_id)s / %(project_id)s combination." msgstr "" "Nitelik erişimi %(flavor_id)s / %(project_id)s katışımı için bulunamadı." msgid "Flavor used by the instance could not be found." msgstr "Sunucu tarafından kullanılan nitelik bulunamadı." #, python-format msgid "Flavor with ID %(flavor_id)s already exists." msgstr "%(flavor_id)s kimliğine sahip nitelik zaten mevcut." #, python-format msgid "Flavor with name %(flavor_name)s could not be found." msgstr "%(flavor_name)s ismine sahip nitelik bulunamadı." #, python-format msgid "Flavor with name %(name)s already exists." msgstr "%(name)s ismine sahip nitelik zaten mevcut." msgid "Flavor's memory is too small for requested image." msgstr "Niteliğin hafızası istenen imaj için çok küçük." #, python-format msgid "Floating IP %(address)s association has failed." msgstr "Değişken IP %(address)s ilişkilendirmesi başarısız." msgid "" "Forbidden to exceed flavor value of number of serial ports passed in image " "meta." msgstr "" "İmaj metasına geçirilen nitelik seri bağlantı noktası sayısı değerini " "geçmeye izin verilmez." msgid "Found no disk to snapshot." msgstr "Anlık görüntüsü alınacak disk bulunamadı." #, python-format msgid "Host %(host)s could not be found." msgstr "%(host)s sunucusu bulunamadı." msgid "Host aggregate is not empty" msgstr "İstemci takımı boş değil" msgid "Host does not support guests with NUMA topology set" msgstr "İstemci NUMA toploji kümesine sahip konukları desteklemiyor" msgid "Host does not support guests with custom memory page sizes" msgstr "İstemci özel hafıza sayfa boyutlarına sahip konukları desteklemiyor" msgid "Hypervisor driver does not support post_live_migration_at_source method" msgstr "" "Hipervizör sürücüsü post_live_migration_at_source yöntemini desteklemiyor" #, python-format msgid "Hypervisor virt type '%s' is not valid" msgstr "Hipervizör sanallaştırma türü '%s' geçerli değil" #, python-format msgid "Hypervisor virtualization type '%(hv_type)s' is not recognised" msgstr "Hipervizör sanallaştırma türü '%(hv_type)s' tanınmıyor" #, python-format msgid "Hypervisor with ID '%s' could not be found." msgstr "'%s' kimlikli hipervizör bulunamadı." #, python-format msgid "IP allocation over quota in pool %s." msgstr "%s havuzundaki IP ayırma kota üzerinde." msgid "IP allocation over quota." msgstr "IP ayırma kota üzerinde." #, python-format msgid "Image %(image_id)s could not be found." msgstr "%(image_id)s imaj kaynak dosyası bulunamadı." #, python-format msgid "Image %(image_id)s is not active." msgstr "İmaj %(image_id)s etkin değil." #, python-format msgid "Image %(image_id)s is unacceptable: %(reason)s" msgstr "%(image_id)s imajı kabul edilemez: %(reason)s" msgid "Image disk size greater than requested disk size" msgstr "İmaj disk boyutu istenen disk boyutundan büyük" msgid "Image is not raw format" msgstr "İmaj ham biçim değil" msgid "Image metadata limit exceeded" msgstr "İmaj üstveri sınırı aşıldı" #, python-format msgid "Image model '%(image)s' is not supported" msgstr "İmaj modeli '%(image)s' desteklenmiyor" msgid "Image not found." msgstr "İmaj bulunamadı" #, python-format msgid "" "Image property '%(name)s' is not permitted to override NUMA configuration " "set against the flavor" msgstr "" "İmaj özelliği '%(name)s' NUMA yapılandırma kümesini nitelikte belirtilene " "karşı yazma hakkına sahip değil" msgid "" "Image property 'hw_cpu_policy' is not permitted to override CPU pinning " "policy set against the flavor" msgstr "" "İmaj özelliği 'hw_cpu_policy' niteliğe karşı ayarlanmış CPU iğneleme " "ilkesini ezme iznine sahip değil" msgid "Image that the instance was started with could not be found." msgstr "Sunucunun başlatıldığı imaj bulunamadı." #, python-format msgid "Image's config drive option '%(config_drive)s' is invalid" msgstr "İmajın yapılandırma sürücüsü seçeneği '%(config_drive)s' geçersiz" msgid "" "Images with destination_type 'volume' need to have a non-zero size specified" msgstr "" "Hedef türü 'mantıksal sürücü' olan imajların boyutu sıfırdan farklı " "belirtilmelidir" msgid "In ERROR state" msgstr "HATA durumunda" #, python-format msgid "In states %(vm_state)s/%(task_state)s, not RESIZED/None" msgstr "%(vm_state)s/%(task_state)s durumu içinde, RESIZED/None değil" msgid "" "Incompatible settings: ephemeral storage encryption is supported only for " "LVM images." msgstr "" "Uyumsuz ayarlar: geçici depolama şifreleme yalnızca LVM imajlarında " "desteklenir." #, python-format msgid "Info cache for instance %(instance_uuid)s could not be found." msgstr "%(instance_uuid)s sunucusu için bilgi zulası bulunamadı." #, python-format msgid "" "Instance %(instance)s and volume %(vol)s are not in the same " "availability_zone. Instance is in %(ins_zone)s. Volume is in %(vol_zone)s" msgstr "" "Sunucu %(instance)s ve mantıksal sürücü %(vol)s aynı kullanılabilir bölgede " "değil. Sunucu %(ins_zone)s içinde. Mantıksal sürücüler %(vol_zone)s" #, python-format msgid "Instance %(instance)s does not have a port with id %(port)s" msgstr "" "%(instance)s sunucusu %(port)s kimliğine sahip bir bağlantı noktasına sahip " "değil" #, python-format msgid "Instance %(instance_id)s cannot be rescued: %(reason)s" msgstr "Sunucu %(instance_id)s kurtarılamıyor: %(reason)s" #, python-format msgid "Instance %(instance_id)s could not be found." msgstr "%(instance_id)s örneği bulunamadı." #, python-format msgid "Instance %(instance_id)s has no tag '%(tag)s'" msgstr "%(instance_id)s sunucusunun '%(tag)s' etiketi yok" #, python-format msgid "Instance %(instance_id)s is not in rescue mode" msgstr "%(instance_id)s örneği kurtarma modunda değil" #, python-format msgid "Instance %(instance_id)s is not ready" msgstr "Sunucu %(instance_id)s hazır değil" #, python-format msgid "Instance %(instance_id)s is not running." msgstr "%(instance_id)s örneği çalışmıyor." #, python-format msgid "Instance %(instance_id)s is unacceptable: %(reason)s" msgstr "%(instance_id)s örneği kabul edilemez: %(reason)s" #, python-format msgid "Instance %(instance_uuid)s does not specify a NUMA topology" msgstr "%(instance_uuid)s sunucusu bir NUMA topolojisi belirtmiyor" #, python-format msgid "" "Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while " "the instance is in this state." msgstr "" "%(attr)s %(state)s 'deki %(instance_uuid)s örneği. Örnek bu durumda iken " "%(method)s yapılamaz." #, python-format msgid "Instance %(instance_uuid)s is locked" msgstr "Sunucu %(instance_uuid)s kilitli" #, python-format msgid "Instance %(name)s already exists." msgstr "%(name)s örneği zaten var." #, python-format msgid "Instance %(server_id)s is in an invalid state for '%(action)s'" msgstr "%(server_id)s sunucusu '%(action)s' eylemi için geçersiz bir durumda" #, python-format msgid "Instance %(uuid)s has no mapping to a cell." msgstr "%(uuid)s sunucusunun bir hücreye eşlemi yok." #, python-format msgid "Instance %s not found" msgstr "Sunucu %s bulunamadı" #, python-format msgid "Instance %s provisioning was aborted" msgstr "Sunucu %s hazırlığı iptal edildi" msgid "Instance could not be found" msgstr "Örnek bulunamadı." msgid "Instance disk to be encrypted but no context provided" msgstr "Sunucu diski şifrelenecek ama içerik sağlanmamış" msgid "Instance event failed" msgstr "Sunucu olayı başarısız" #, python-format msgid "Instance group %(group_uuid)s already exists." msgstr "Sunucu grubu %(group_uuid)s zaten mevcut." #, python-format msgid "Instance group %(group_uuid)s could not be found." msgstr "Sunucu grubu %(group_uuid)s bulunamadı." msgid "Instance has no source host" msgstr "Sunucunun kaynak istemcisi yok" msgid "Instance has not been resized." msgstr "Örnek tekrar boyutlandırılacak şekilde ayarlanmadı." msgid "Instance is not a member of specified network" msgstr "Örnek belirlenmiş ağın bir üyesi değil" #, python-format msgid "Instance rollback performed due to: %s" msgstr "Sunucu geri döndürme şu sebepten yapıldı: %s" #, python-format msgid "Insufficient compute resources: %(reason)s." msgstr "Yetersiz hesaplama kaynağı: %(reason)s." #, python-format msgid "Interface %(interface)s not found." msgstr "%(interface)s arayüzü bulunamadı." #, python-format msgid "Invalid Base 64 data for file %(path)s" msgstr "%(path)s dosyası için geçersiz base 64 verisi" msgid "Invalid Connection Info" msgstr "Geçersiz Bağlantı Bilgisi" #, python-format msgid "Invalid ID received %(id)s." msgstr "Geçersiz ID alındı %(id)s." #, python-format msgid "Invalid IP format %s" msgstr "Geçersiz IP biçimi %s" #, python-format msgid "Invalid IP protocol %(protocol)s." msgstr "Geçersiz IP %(protocol)s." msgid "" "Invalid PCI Whitelist: The PCI whitelist can specify devname or address, but " "not both" msgstr "" "Geçersiz PCI Beyaz listesi: PCI beeyaz listesi aygıt ismi ya da adresi " "belirtebilir, ama ikisini birden belirtemez" #, python-format msgid "Invalid PCI alias definition: %(reason)s" msgstr "Geçersiz PCI rumuzu tanımı: %(reason)s" #, python-format msgid "Invalid Regular Expression %s" msgstr "Geçersiz Düzenli İfade %s" #, python-format msgid "Invalid characters in hostname '%(hostname)s'" msgstr "Ana makine adında geçersiz karakter '%(hostname)s'" msgid "Invalid config_drive provided." msgstr "Sağlanan yapılandırma_sürücüsü geçersiz." #, python-format msgid "Invalid config_drive_format \"%s\"" msgstr "Geçersiz config_drive_format \"%s\"" #, python-format msgid "Invalid console type %(console_type)s" msgstr "Geçersiz konsol türü %(console_type)s" #, python-format msgid "Invalid content type %(content_type)s." msgstr "Geçersiz içerik türü %(content_type)s." #, python-format msgid "Invalid datetime string: %(reason)s" msgstr "Geçersiz datetime karakter dizisi: %(reason)s" msgid "Invalid device UUID." msgstr "Geçersiz aygıt UUID." #, python-format msgid "Invalid entry: '%s'" msgstr "Geçersiz girdi: '%s'" #, python-format msgid "Invalid entry: '%s'; Expecting dict" msgstr "Geçersiz girdi: '%s'; Sözlük bekleniyordu" #, python-format msgid "Invalid entry: '%s'; Expecting list or dict" msgstr "Geçersiz girdi: '%s'. liste veya sözlük bekleniyor" #, python-format msgid "Invalid exclusion expression %r" msgstr "Geçersiz dışlama ifadesi %r" #, python-format msgid "Invalid image format '%(format)s'" msgstr "Geçersiz imaj biçimi '%(format)s'" #, python-format msgid "Invalid image href %(image_href)s." msgstr "Geçersiz %(image_href)s imaj kaynak dosyası." #, python-format msgid "Invalid inclusion expression %r" msgstr "Geçersiz dahil etme ifadesi %r" #, python-format msgid "" "Invalid input for field/attribute %(path)s. Value: %(value)s. %(message)s" msgstr "" "Alan/öznitelik %(path)s için geçersiz girdi. Değer: %(value)s. %(message)s" #, python-format msgid "Invalid input received: %(reason)s" msgstr "Geçersiz girdi alındı: %(reason)s" msgid "Invalid instance image." msgstr "Geçersiz sunucu imajı." #, python-format msgid "Invalid is_public filter [%s]" msgstr "Geçersiz is_public filtresi [%s]" msgid "Invalid key_name provided." msgstr "Geçersiz anahtar adı verildi." #, python-format msgid "Invalid memory page size '%(pagesize)s'" msgstr "Geçersiz hafıza sayfa boyutu '%(pagesize)s'" msgid "Invalid metadata key" msgstr "Geçersiz özellik anahtarı" #, python-format msgid "Invalid metadata size: %(reason)s" msgstr "Geçersiz metadata boyutu: %(reason)s" #, python-format msgid "Invalid metadata: %(reason)s" msgstr "Geçersiz metadata: %(reason)s" #, python-format msgid "Invalid minDisk filter [%s]" msgstr "Geçersiz minDisk filtresi [%s]" #, python-format msgid "Invalid minRam filter [%s]" msgstr "geçersiz minRam filtresi [%s]" #, python-format msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s" msgstr "Geçersiz port aralığı %(from_port)s:%(to_port)s. %(msg)s" msgid "Invalid proxy request signature." msgstr "Geçersiz vekil istek imzası." #, python-format msgid "Invalid range expression %r" msgstr "Geçersiz aralık ifadesi %r" msgid "Invalid service catalog json." msgstr "Geçersiz servis kataloğu json'u." msgid "Invalid start time. The start time cannot occur after the end time." msgstr "" "Geçersiz başlangıç zamanı. Başlangıç zamanı bitiş zamanından sonra olamaz." msgid "Invalid state of instance files on shared storage" msgstr "Paylaşımlı depolamada geçersiz sunucu durumu dosyaları" #, python-format msgid "Invalid timestamp for date %s" msgstr "%s tarihi için geçersiz zaman damgası" #, python-format msgid "Invalid usage_type: %s" msgstr "Geçersiz usage_type: %s" #, python-format msgid "Invalid value for Config Drive option: %(option)s" msgstr "Yapılandırma Sürücüsü seçeneği için geçersiz değer: %(option)s" #, python-format msgid "Invalid virtual interface address %s in request" msgstr "İstekte geçersiz sanal arayüz adresi %s" #, python-format msgid "Invalid volume access mode: %(access_mode)s" msgstr "Geçersiz mantıksal sürücü erişim kipi: %(access_mode)s" #, python-format msgid "Invalid volume: %(reason)s" msgstr "Geçersiz mantıksal sürücü: %(reason)s" msgid "Invalid volume_size." msgstr "Geçersiz volume_size." #, python-format msgid "Ironic node uuid not supplied to driver for instance %s." msgstr "Ironic düğümü uuid'si %s sunucusu için sürücüye sağlanmamış." #, python-format msgid "" "It is not allowed to create an interface on external network %(network_uuid)s" msgstr "Harici ağ %(network_uuid)s üzerinde arayüz oluşturmaya izin verilmiyor" msgid "" "Key Names can only contain alphanumeric characters, periods, dashes, " "underscores, colons and spaces." msgstr "" "Anahtar İsimleri yalnızca alfanümerik karakterler, nokta, tire, alt çizgi, " "sütun ve boşluk içerebilir." #, python-format msgid "Key manager error: %(reason)s" msgstr "Anahtar yöneticisi hatası: %(reason)s" #, python-format msgid "Key pair '%(key_name)s' already exists." msgstr "Anahtar çifti '%(key_name)s' zaten mevcut." #, python-format msgid "Keypair %(name)s not found for user %(user_id)s" msgstr "%(user_id)s kullanıcısı için %(name)s anahtar çifti bulunamadı" #, python-format msgid "Keypair data is invalid: %(reason)s" msgstr "Anahtar çifti verisi geçersiz: %(reason)s" #, python-format msgid "Malformed message body: %(reason)s" msgstr "Hatalı biçimlendirilmiş mesaj gövdesi: %(reason)s" #, python-format msgid "" "Malformed request URL: URL's project_id '%(project_id)s' doesn't match " "Context's project_id '%(context_project_id)s'" msgstr "" "Bozuk istek URL'si: URL'nin proje_id'si '%(project_id)s' İçeriğin " "proje_id'si '%(context_project_id)s' ile eşleşmiyor" msgid "Malformed request body" msgstr "Kusurlu istek gövdesi" msgid "Mapping image to local is not supported." msgstr "İmajın yerele eşleştirilmesi desteklenmiyor." #, python-format msgid "Marker %(marker)s could not be found." msgstr "İşaretçi %(marker)s bulunamadı." #, python-format msgid "Maximum number of metadata items exceeds %(allowed)d" msgstr "Azami metadata öğesi sayısı %(allowed)d sayısını aşıyor" msgid "Maximum number of ports exceeded" msgstr "Azami bağlantı noktası sayısı aşıldı" msgid "Maximum number of security groups or rules exceeded" msgstr "Azami güvenlik grubu veya kural sayısı aşıldı" msgid "Metadata item was not found" msgstr "İçerik özelliği bilgisi bulunamadı" msgid "Metadata property key greater than 255 characters" msgstr "Metadata özellik anahtarı 255 karakterden büyük" msgid "Metadata property value greater than 255 characters" msgstr "Metadata özellik değeri 255 karakterden büyük" msgid "Metadata type should be dict." msgstr "Metadata türü sözlük olmalı." #, python-format msgid "" "Metric %(name)s could not be found on the compute host node %(host)s." "%(node)s." msgstr "" "%(name)s ölçüsü %(host)s.%(node)s hesaplama istemci düğümünde bulunamadı." #, python-format msgid "Migration %(migration_id)s could not be found." msgstr "%(migration_id)s göçü bulunamadı." #, python-format msgid "Migration error: %(reason)s" msgstr "Göç hatası: %(reason)s" msgid "Migration is not supported for LVM backed instances" msgstr "LVM destekli sunucularda göç desteklenmiyor" #, python-format msgid "" "Migration not found for instance %(instance_id)s with status %(status)s." msgstr "%(status)s durumuyla %(instance_id)s örneği için göç bulunamadı." #, python-format msgid "Migration pre-check error: %(reason)s" msgstr "Göç ön-kontrol hatası: %(reason)s" #, python-format msgid "Missing arguments: %s" msgstr "Eksik bağımsız değişken: %s" msgid "Missing device UUID." msgstr "Eksik aygıt UUID." msgid "Missing disabled reason field" msgstr "Kapatılma sebebi alanı eksik" msgid "Missing imageRef attribute" msgstr "İmaj referans özelliği eksik" #, python-format msgid "Missing keys: %s" msgstr "Eksik anahtarlar: %s" msgid "Missing parameter dict" msgstr "Parametre dizini eksik" msgid "" "More than one possible network found. Specify network ID(s) to select which " "one(s) to connect to." msgstr "" "Birden fazla olası ağ bulundu. Hangilerine bağlanılacağını seçmek için ağ " "ID(leri) belirtin." msgid "More than one swap drive requested." msgstr "Birden fazla swap sürücü istendi." #, python-format msgid "Multi-boot operating system found in %s" msgstr "%s içinde birden fazla ön yüklemeli işletim sistemi bulundu" msgid "Multiple X-Instance-ID headers found within request." msgstr "İstekte birden fazla X-Instance-ID başlığı bulundu." msgid "Multiple X-Tenant-ID headers found within request." msgstr "İstekte birden fazla X-Tenant-ID başlığı bulundu." #, python-format msgid "Multiple floating IP pools matches found for name '%s'" msgstr "'%s' ismi için birden fazla değişken IP havuzu eşleşmesi bulundu" msgid "" "Multiple hosts may be managed by the VMWare vCenter driver; therefore we do " "not return uptime for just one host." msgstr "" "Birden fazla istemci VMWare vCenter sürücüsü tarafından yönetiliyor " "olabilir; bu yüzden tek bir istemci için açık kalma süresi döndürmüyoruz." msgid "Multiple possible networks found, use a Network ID to be more specific." msgstr "" "Birden fazla olası ağ bulundu, daha belirli olmak için Ağ Kimliği kullanın." #, python-format msgid "" "Multiple security groups found matching '%s'. Use an ID to be more specific." msgstr "" "'%s' ile eşleşen birden fazla güvenlik grubu bulundu. Daha özgü olmak için " "bir ID kullanın." msgid "Must input network_id when request IP address" msgstr "IP adresi istendiğinde network_id girdisi verilmelidir" msgid "Must not input both network_id and port_id" msgstr "Hem network_id hem port_id verilmemelidir" msgid "" "Must specify host_ip, host_username and host_password to use vmwareapi." "VMwareVCDriver" msgstr "" "vmwareapi.VMwareVCDriver kullanmak için host_ip, host_username ve " "host_password belirtilmeli" msgid "Must supply a positive value for max_rows" msgstr "max_rows için pozitif bir değer verilmeli" #, python-format msgid "Network %(network_id)s could not be found." msgstr "%(network_id)s ağı bulunamadı." #, python-format msgid "" "Network %(network_uuid)s requires a subnet in order to boot instances on." msgstr "" "Ağ %(network_uuid)s sunucuları başlatmak için alt ağlara ihtiyaç duyar." #, python-format msgid "Network could not be found for bridge %(bridge)s" msgstr " %(bridge)s köprüsü için ağ bulunamadı." #, python-format msgid "Network could not be found for instance %(instance_id)s." msgstr "%(instance_id)s örneği için ağ bulunamadı." msgid "Network not found" msgstr "Ağ bulunamadı" msgid "" "Network requires port_security_enabled and subnet associated in order to " "apply security groups." msgstr "" "Güvenlik gruplarını uygulamak için ağın port_security_enabled olmasına ve " "alt ağın ilişkilendirilmesine ihtiyaç var." msgid "New volume must be detached in order to swap." msgstr "" "Yeni mantıksal sürücünün değiştirilebilmesi için ayrılmış olması gerekir." msgid "New volume must be the same size or larger." msgstr "Yeni mantıksal sürücü aynı boyutta ya da daha büyük olmalı." #, python-format msgid "No Block Device Mapping with id %(id)s." msgstr "%(id)s kimliğine sahip bir Blok Aygıt Eşleştirmesi yok." msgid "No Unique Match Found." msgstr "Benzersiz Eşleşme Bulunamadı." msgid "No compute host specified" msgstr "Hesap istemcisi belirtilmedi" #, python-format msgid "No device with MAC address %s exists on the VM" msgstr "VM'de %s MAC adresine sahip bir aygıt yok" #, python-format msgid "No device with interface-id %s exists on VM" msgstr "VM'de interface-id %s'a ait bir aygıt yok" #, python-format msgid "No disk at %(location)s" msgstr "%(location)s'da disk yok." #, python-format msgid "No fixed IP addresses available for network: %(net)s" msgstr "Şu ağ için kullanılabilir sabit UP adresleri yok: %(net)s" msgid "No free nbd devices" msgstr "Boş nbd aygıtı yok" msgid "No host available on cluster" msgstr "Kümede kullanılabilir istemci yok" #, python-format msgid "No hypervisor matching '%s' could be found." msgstr "'%s' ile eşleşen bir hipervizör bulunamadı." msgid "No image locations are accessible" msgstr "İmaj konumları erişilebilir değil" #, python-format msgid "No mount points found in %(root)s of %(image)s" msgstr "%(image)s %(root)s içinde bağlantı noktası bulunmadı" #, python-format msgid "No operating system found in %s" msgstr "%s içinde işletim sistemi bulunamadı" msgid "No root disk defined." msgstr "Kök disk tanımlanmamış." msgid "No valid host found for cold migrate" msgstr "Soğuk göç için geçerli bir istemci bulunamadı" msgid "No valid host found for resize" msgstr "Yeniden boyutlama için geçerli bir istemci bulunamadı" #, python-format msgid "No valid host was found. %(reason)s" msgstr "Geçerli bir sunucu bulunamadı: %(reason)s" #, python-format msgid "No volume Block Device Mapping at path: %(path)s" msgstr "Şu yolda mantıksal sürücü Blok Aygıt eşleştirmesi yok: %(path)s" #, python-format msgid "No volume Block Device Mapping with id %(volume_id)s." msgstr "" "%(volume_id)s kimliğine sahip mantıksal sürücü Blok Aygıt Eşleştirmesi yok." #, python-format msgid "Node %s could not be found." msgstr "Düğüm %s bulunamadı." #, python-format msgid "Not able to acquire a free port for %(host)s" msgstr "%(host)s için boş bir bağlantı noktası edinilemedi" #, python-format msgid "Not able to bind %(host)s:%(port)d, %(error)s" msgstr "%(host)s:%(port)d bağlanamadı, %(error)s" msgid "Not an rbd snapshot" msgstr "Rbd anlık görüntüsü değil" #, python-format msgid "Not authorized for image %(image_id)s." msgstr "İmaj %(image_id)s için yetkilendirilmemiş." msgid "Not authorized." msgstr "Yetkiniz yok." msgid "Not enough parameters to build a valid rule." msgstr "Geçerli bir kuralı oluşturmak için yeterli parametre yok." msgid "Not stored in rbd" msgstr "Rbd'de depolanmadı" #, python-format msgid "Nova requires libvirt version %s or greater." msgstr "Nova libvirt sürüm %s ya da daha yenisine ihtiyaç duyar." #, python-format msgid "Object action %(action)s failed because: %(reason)s" msgstr "Nesne eylemi %(action)s başarısız çünkü: %(reason)s" msgid "Old volume is attached to a different instance." msgstr "Eski mantıksal sürücü başka bir sunucuya eklenmiş." #, python-format msgid "One or more hosts already in availability zone(s) %s" msgstr "Bir ya da daha fazla istemci zaten kullanılabilir bölge(ler)de %s" msgid "Only administrators may list deleted instances" msgstr "Yalnızca yöneticiler silinen sunucuları listeleyebilir" msgid "Origin header does not match this host." msgstr "Kaynak başlık bu istemciyle eşleşmiyor." msgid "Origin header not valid." msgstr "Kaynak başlık geçerli değil." msgid "Origin header protocol does not match this host." msgstr "Kaynak başlık iletişim kuralı bu istemciyle eşleşmiyor." #, python-format msgid "PCI Device %(node_id)s:%(address)s not found." msgstr "PCI Aygıtı %(node_id)s:%(address)s bulunamadı." #, python-format msgid "PCI alias %(alias)s is not defined" msgstr "PCI rumuzu %(alias)s tanımlanmamış" #, python-format msgid "" "PCI device %(compute_node_id)s:%(address)s is %(status)s instead of " "%(hopestatus)s" msgstr "" "PCI aygıtı %(compute_node_id)s:%(address)s %(hopestatus)s olacağına " "%(status)s" #, python-format msgid "" "PCI device %(compute_node_id)s:%(address)s is owned by %(owner)s instead of " "%(hopeowner)s" msgstr "" "PCI aygıtı %(compute_node_id)s:%(address)s %(hopeowner)s yerine %(owner)s " "tarafından sahiplenilmiş" #, python-format msgid "PCI device %(id)s not found" msgstr "PCI aygıtı %(id)s bulunamadı" #, python-format msgid "Page size %(pagesize)s forbidden against '%(against)s'" msgstr "Sayfa boyutu %(pagesize)s '%(against)s' e karşı yasaklı" #, python-format msgid "Path %s must be LVM logical volume" msgstr "Yol %s LVM mantıksal sürücüsü olmalı" msgid "Paused" msgstr "Durduruldu" msgid "Personality file limit exceeded" msgstr "Kişisel dosya limiti aşıldı" #, python-format msgid "Physical network is missing for network %(network_uuid)s" msgstr "%(network_uuid)s ağı için fiziksel ağ eksik" #, python-format msgid "Policy doesn't allow %(action)s to be performed." msgstr "%(action)s uygulanmasına izin verilmiyor." #, python-format msgid "Port %(port_id)s is still in use." msgstr "Bağlantı noktası %(port_id)s hala kullanımda." #, python-format msgid "Port %(port_id)s not usable for instance %(instance)s." msgstr "" "Bağlantı noktası %(port_id)s %(instance)s sunucusu için kullanılabilir değil." #, python-format msgid "Port %(port_id)s requires a FixedIP in order to be used." msgstr "" "Bağlantı noktası %(port_id)s kullanılabilmek için bir SabitIP'ye ihtiyaç " "duyuyor." #, python-format msgid "Port %s is not attached" msgstr "Bağlantı noktası %s eklenmemiş" #, python-format msgid "Port id %(port_id)s could not be found." msgstr "Bağlantı noktası kimliği %(port_id)s bulunamadı." #, python-format msgid "Provided video model (%(model)s) is not supported." msgstr "Sağlanan video modeli (%(model)s) desteklenmiyor." #, python-format msgid "Provided watchdog action (%(action)s) is not supported." msgstr "Sağlanan watchdog eylemi (%(action)s) desteklenmiyor." msgid "QEMU guest agent is not enabled" msgstr "QEMU konuk ajan etkin değil" #, python-format msgid "Quota class %(class_name)s could not be found." msgstr "Kota sınıfı %(class_name)s bulunamadı." msgid "Quota could not be found" msgstr "Kota bulunamadı." #, python-format msgid "Quota exceeded for resources: %(overs)s" msgstr "Kaynaklar için kota aşıldı: %(overs)s" msgid "Quota exceeded, too many key pairs." msgstr "Kota aşıldı, çok fazla anahtar çifti." msgid "Quota exceeded, too many server groups." msgstr "Kota aşıldı, çok fazla sunucu grubu." msgid "Quota exceeded, too many servers in group" msgstr "Kota aşıldı, grupta çok fazla sunucu var" #, python-format msgid "Quota exists for project %(project_id)s, resource %(resource)s" msgstr "Kota %(project_id)s projesi, %(resource)s kaynağı için mevcut" #, python-format msgid "Quota for project %(project_id)s could not be found." msgstr "%(project_id)s projesi için bir kota bulunamadı." #, python-format msgid "" "Quota for user %(user_id)s in project %(project_id)s could not be found." msgstr "" "%(project_id)s projesindeki %(user_id)s kullanıcısı için kota bulunamadı." #, python-format msgid "" "Quota limit %(limit)s for %(resource)s must be greater than or equal to " "already used and reserved %(minimum)s." msgstr "" "%(resource)s için %(limit)s kota sınırı zaten kullanılan ve ayrılmış olan " "%(minimum)s den büyük ya da eşit olmalı." #, python-format msgid "" "Quota limit %(limit)s for %(resource)s must be less than or equal to " "%(maximum)s." msgstr "" "%(resource)s için %(limit)s kota sınırı %(maximum)s den küçük ya da eşit " "olmalı." msgid "Request body and URI mismatch" msgstr "URI ve gövde isteği uyumsuz" msgid "Request is too large." msgstr "İstek çok geniş" #, python-format msgid "" "Requested hardware '%(model)s' is not supported by the '%(virt)s' virt driver" msgstr "" "İstenen donanım '%(model)s' '%(virt)s' sanallaştırma sürücüsü tarafından " "desteklenmiyor" #, python-format msgid "Requested image %(image)s has automatic disk resize disabled." msgstr "İstenen imajda %(image)s otomatik disk yeniden boyutlandırma kapalı." msgid "" "Requested instance NUMA topology cannot fit the given host NUMA topology" msgstr "" "İstenen sunucu NUMA topolojisi verilen istemci NUMA topolojisine uymuyor" msgid "" "Requested instance NUMA topology together with requested PCI devices cannot " "fit the given host NUMA topology" msgstr "" "İstenen NUMA topolojisi istenen PCI aygıtlarıyla birlikte istemci NUMA " "topolojisine uymuyor" #, python-format msgid "" "Requested vCPU limits %(sockets)d:%(cores)d:%(threads)d are impossible to " "satisfy for vcpus count %(vcpus)d" msgstr "" "İzin verilen vCPU sınırları %(sockets)d:%(cores)d:%(threads)d %(vcpus)d vcpu " "sayısını tatmin etmek için yetersiz" #, python-format msgid "Rescue device does not exist for instance %s" msgstr "%s sunucusu için kurtarma aygıtı mevcut değil" #, python-format msgid "Resize error: %(reason)s" msgstr "Yeniden boyutlama hatası: %(reason)s" msgid "Resize to zero disk flavor is not allowed." msgstr "Sıfıra yeniden boyutlandırma disk niteliğine izin verilmiyor." msgid "Resource could not be found." msgstr "Kaynak bulunamadı." msgid "Resumed" msgstr "Devam Edildi" #, python-format msgid "Root element name should be '%(name)s' not '%(tag)s'" msgstr "Kök öğe ismi '%(name)s' olmalı '%(tag)s' değil" #, python-format msgid "Scheduler Host Filter %(filter_name)s could not be found." msgstr "%(filter_name)s zamanlayıcı sunucu filtresi bulunamadı." #, python-format msgid "Security group %(name)s is not found for project %(project)s" msgstr "Güvenlik grubu %(name)s %(project)s projesi için bulunamadı" #, python-format msgid "" "Security group %(security_group_id)s not found for project %(project_id)s." msgstr "" "%(project_id)s projesi için %(security_group_id)s güvenlik grubu bulunamadı." #, python-format msgid "Security group %(security_group_id)s not found." msgstr "%(security_group_id)s güvenlik grubu bulunamadı." #, python-format msgid "" "Security group %(security_group_name)s already exists for project " "%(project_id)s." msgstr "" "%(security_group_name)s güvenlik grubu %(project_id)s projesi için zaten " "mevcut." #, python-format msgid "" "Security group %(security_group_name)s not associated with the instance " "%(instance)s" msgstr "" "%(security_group_name)s güvenlik grubu %(instance)s sunucusu ile " "ilişkilendirilmemiş" msgid "Security group id should be uuid" msgstr "Güvenlik grubu kimliği uuid olmalı" msgid "Security group name cannot be empty" msgstr "Güvenlik grup adı boş bırakılamaz" msgid "Security group not specified" msgstr "Güvenlik grubu belirlenmedi" #, python-format msgid "Server disk was unable to be resized because: %(reason)s" msgstr "Sunucu diski yeniden boyutlandırılamadı çünkü: %(reason)s" msgid "Server does not exist" msgstr "Sunucu mevcut değil" #, python-format msgid "ServerGroup policy is not supported: %(reason)s" msgstr "SunucuGrubu ilkesi desteklenmiyor: %(reason)s" msgid "ServerGroupAffinityFilter not configured" msgstr "ServerGroupAffinityFilter yapılandırılmamış" msgid "ServerGroupAntiAffinityFilter not configured" msgstr "ServerGroupAntiAffinityFilter yapılandırılmamış" #, python-format msgid "Service %(service_id)s could not be found." msgstr "%(service_id)s servisi bulunamadı." #, python-format msgid "Service %s not found." msgstr "Servis %s bulunamadı." msgid "Service is unavailable at this time." msgstr "Şu anda servis kullanılamıyor." #, python-format msgid "Service with host %(host)s binary %(binary)s exists." msgstr "İstemci %(host)s ikiliğine %(binary)s sahip servis mevcut." #, python-format msgid "Service with host %(host)s topic %(topic)s exists." msgstr "İstemci %(host)s başlığına %(topic)s sahip servis mevcut." #, python-format msgid "Share '%s' is not supported" msgstr "Paylaşım '%s' desteklenmiyor" #, python-format msgid "Share level '%s' cannot have share configured" msgstr "Paylaşım seviyesi '%s' paylaşım yapılandırmasına sahip olamaz" #, python-format msgid "Snapshot %(snapshot_id)s could not be found." msgstr "%(snapshot_id)s sistem anlık görüntüsü bulunamadı." msgid "Some required fields are missing" msgstr "Bazı gerekli alanlar eksik" msgid "Sort direction size exceeds sort key size" msgstr "Sıralama yönü boyutu sııralama anahtarı boyutunu geçiyor" msgid "Sort key supplied was not valid." msgstr "Verilen sıralama anahtarı geçerli değil." msgid "Specified fixed address not assigned to instance" msgstr "Belirtilen sabit adres sunucuya atanmamış" msgid "Started" msgstr "Başlatıldı" msgid "Stopped" msgstr "Durduruldu" #, python-format msgid "Storage error: %(reason)s" msgstr "Depolama hatası: %(reason)s" #, python-format msgid "Storage policy %s did not match any datastores" msgstr "Depolama ilkesi %s hiçbir verideposuyla eşleşmedi" msgid "Success" msgstr "Başarılı" msgid "Suspended" msgstr "Askıda" msgid "Swap drive requested is larger than instance type allows." msgstr "İstenen swap sürücüsü sunucu türünün izin verdiğinden daha büyük." #, python-format msgid "Task %(task_name)s is already running on host %(host)s" msgstr "Görev %(task_name)s zaten %(host)s istemcisi üstünde çalışıyor" #, python-format msgid "Task %(task_name)s is not running on host %(host)s" msgstr "Görev %(task_name)s %(host)s istemcisi üzerinde çalışmıyor" #, python-format msgid "The PCI address %(address)s has an incorrect format." msgstr "PCI adresi %(address)s yanlış biçimde." #, python-format msgid "The console port range %(min_port)d-%(max_port)d is exhausted." msgstr "Konsol bağlantı noktası aralığı %(min_port)d-%(max_port)d tükenmiş." msgid "The current driver does not support preserving ephemeral partitions." msgstr "Mevcut sürücü geçici bölümleri korumayı desteklemiyor." msgid "The default PBM policy doesn't exist on the backend." msgstr "Varsayılan PBM ilkesi arka uçta mevcut değil." msgid "" "The instance requires a newer hypervisor version than has been provided." msgstr "Örnek şu ankinden daha yeni hypervisor versiyonu gerektirir." #, python-format msgid "The provided RNG device path: (%(path)s) is not present on the host." msgstr "Sağlanan RNG aygıt yolu: (%(path)s) istemci üzerinde mevcut değil." msgid "The request is invalid." msgstr "İstek geçersiz" #, python-format msgid "" "The requested amount of video memory %(req_vram)d is higher than the maximum " "allowed by flavor %(max_vram)d." msgstr "" "İstenen miktarda video hafızası %(req_vram)d nitelik tarafından izin veirlen " "%(max_vram)d den yüksek." msgid "The requested availability zone is not available" msgstr "İstenen kullanılabilirlik bölgesi uygun değil" msgid "The requested functionality is not supported." msgstr "İstenen işlevsellik desteklenmiyor" #, python-format msgid "The supplied device path (%(path)s) is in use." msgstr "Verilen aygıt yolu (%(path)s) kullanımda." #, python-format msgid "The supplied device path (%(path)s) is invalid." msgstr "Desteklenen cihaz yolu (%(path)s) geçersiz." #, python-format msgid "" "The supplied disk path (%(path)s) already exists, it is expected not to " "exist." msgstr "Desteklenen disk yolu (%(path)s) halen var,fakat var olmaması gerekir." msgid "The supplied hypervisor type of is invalid." msgstr "Desteklenen hypervisor türü geçersiz." msgid "The target host can't be the same one." msgstr "Hedef istemci aynısı olamaz." #, python-format msgid "The token '%(token)s' is invalid or has expired" msgstr "Jeton '%(token)s' geçersiz ya da süresi dolmuş" #, python-format msgid "" "The volume cannot be assigned the same device name as the root device %s" msgstr "Mantıksal sürücü kök aygıt %s ile aynı aygıt ismiyle atanamaz" msgid "There are not enough hosts available." msgstr "Yeterince kullanılabilir istemci yok." #, python-format msgid "There is no such action: %s" msgstr "Böyle bir işlem yok: %s" #, python-format msgid "This rule already exists in group %s" msgstr "Bu kural zaten grupta var %s" msgid "Timeout waiting for response from cell" msgstr "Hücreden cevap beklerken zaman aşımı" msgid "To and From ports must be integers" msgstr "Hedef ve Kaynak bağlantı noktaları tam sayı olmalı" msgid "Token not found" msgstr "Jeton bulunamadı" msgid "Type and Code must be integers for ICMP protocol type" msgstr "ICMP iletişim kuralı türü için Tür ve Kod tam sayı olmalı" #, python-format msgid "Unable to determine disk bus for '%s'" msgstr "'%s' için disk veri yolu belirlenemiyor" #, python-format msgid "Unable to determine disk prefix for %s" msgstr "%s için disk ön eki belirlenemiyor" #, python-format msgid "Unable to find host for Instance %s" msgstr "%s örneği için sunucu bulma başarısız" msgid "Unable to find iSCSI Target" msgstr "iSCSI Hedefi bulunamadı" msgid "Unable to find volume" msgstr "Mantıksal sürücü bulunamadı" msgid "" "Unable to launch multiple instances with a single configured port ID. Please " "launch your instance one by one with different ports." msgstr "" "Birden fazla sunucu tekil yapılandırılan bağlantı noktası ID'si ile " "başlatılamadı. Lütfen sunucunuzu tek tek değişik bağlantı noktalarıyla " "başlatın." #, python-format msgid "" "Unable to migrate %(instance_uuid)s to %(dest)s: Lack of memory(host:" "%(avail)s <= instance:%(mem_inst)s)" msgstr "" "%(instance_uuid)s %(dest)s e göç ettirilemiyor: Hafıza yetersiz(istemci:" "%(avail)s <= sunucu:%(mem_inst)s)" #, python-format msgid "" "Unable to migrate %(instance_uuid)s: Disk of instance is too large(available " "on destination host:%(available)s < need:%(necessary)s)" msgstr "" "%(instance_uuid)s göç ettirilemiyor: Sunucunun diski çok büyük(hedef " "istemcide kullanılabilir olan:%(available)s < ihtiyaç duyulan:%(necessary)s)" #, python-format msgid "" "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." msgstr "Mevcut (%(host)s) sunucusundan (%(instance_id)s) örneği geçirilemez." msgid "Unable to resize disk down." msgstr "Disk boyutu küçültülemedi." msgid "Unable to set password on instance" msgstr "Sunucuya parola ayarlanamadı" msgid "Unable to shrink disk." msgstr "Disk küçültülemiyor." #, python-format msgid "Unacceptable CPU info: %(reason)s" msgstr "Kabul edilemez CPU bilgisi: %(reason)s" msgid "Unacceptable parameters." msgstr "Kabul edilemez parametreler var." #, python-format msgid "Unavailable console type %(console_type)s." msgstr "Uygun olmayan konsol türü %(console_type)s." #, python-format msgid "Unexpected aggregate action %s" msgstr "Beklenmedik takım eylemi %s" msgid "Unexpected type adding stats" msgstr "Durum eklemede beklenmedik tür" #, python-format msgid "Unexpected vif_type=%s" msgstr "Beklenmeyen vif_type=%s" msgid "Unknown" msgstr "Bilinmeyen" msgid "Unknown action" msgstr "Bilinmeyen eylem" #, python-format msgid "Unknown delete_info type %s" msgstr "Bilinmeyen delete_info türü %s" #, python-format msgid "Unknown image_type=%s" msgstr "Bilinmeyen image_type=%s" #, python-format msgid "Unknown quota resources %(unknown)s." msgstr "Bilinmeyen kota kaynakları %(unknown)s." msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "Bilinmeyen sıralama yönü, 'desc' veya 'asc' olmalı" #, python-format msgid "Unknown type: %s" msgstr "Bilinmeyen tür: %s" msgid "Unrecognized legacy format." msgstr "Tanınmayan geçmişe ait biçim." #, python-format msgid "Unrecognized read_deleted value '%s'" msgstr "Tanınmayan silinmiş okuma değeri '%s'" #, python-format msgid "Unrecognized value '%s' for CONF.running_deleted_instance_action" msgstr "CONF.running_deleted_instance_action için tanınmayan değer '%s'" #, python-format msgid "Unshelve attempted but the image %s cannot be found." msgstr "Askıdan almaya çalışıldı ama %s imajı bulunamadı." msgid "Unsupported Content-Type" msgstr "Desteklenmeyen içerik türü" #, python-format msgid "User %(username)s not found in password file." msgstr "Kullanıcı %(username)s parola dosyasında bulunamadı." #, python-format msgid "User %(username)s not found in shadow file." msgstr "Kullanıcı %(username)s gölge dosyasında bulunamadı." msgid "User data needs to be valid base 64." msgstr "Kullanıcı verisi geçerli base 64 olmalıdır." msgid "User does not have admin privileges" msgstr "Kullanıcı yönetici ayrıcalıklarına sahip değil" msgid "" "Using different block_device_mapping syntaxes is not allowed in the same " "request." msgstr "" "Aynı istekte değişik blok_aygıt_eşleştirmesi söz dizimi kullanımına izin " "verilmiyor." #, python-format msgid "" "Version %(req_ver)s is not supported by the API. Minimum is %(min_ver)s and " "maximum is %(max_ver)s." msgstr "" "Sürüm %(req_ver)s API tarafından desteklenmiyor. Asgari %(min_ver)s ve azami " "%(max_ver)s." msgid "Virtual Interface creation failed" msgstr "Sanal arayüz oluşturma hatası" msgid "Virtual interface plugin failed" msgstr "Sanal arayüz eklentisi başarısız" #, python-format msgid "Virtual machine mode '%(vmmode)s' is not recognised" msgstr "Sanal makine kipi '%(vmmode)s' tanınmıyor" #, python-format msgid "Virtual machine mode '%s' is not valid" msgstr "Sanal makine kipi '%s' geçerli değil" #, python-format msgid "Virtualization type '%(virt)s' is not supported by this compute driver" msgstr "" "Sanallaştırma türü '%(virt)s' bu hesaplama sürücüsü tarafından desteklenmiyor" #, python-format msgid "Volume %(volume_id)s could not be found." msgstr "%(volume_id)s bölümü bulunamadı." #, python-format msgid "" "Volume %(volume_id)s did not finish being created even after we waited " "%(seconds)s seconds or %(attempts)s attempts. And its status is " "%(volume_status)s." msgstr "" "Mantıksal sürücü %(volume_id)s oluşturulması %(seconds)s saniye beklememize " "ya da %(attempts)s kere denemeye rağmen bitmedi. Ve durumu %(volume_status)s." msgid "Volume does not belong to the requested instance." msgstr "Mantıksal sürücü istenen sunucuya ait değil." #, python-format msgid "" "Volume sets block size, but the current libvirt hypervisor '%s' does not " "support custom block size" msgstr "" "Mantıksal sürücü blok boyutu ayarlıyor, ama mevcut libvirt hipervizörü '%s' " "özel blok boyutunu desteklemiyor" msgid "When resizing, instances must change flavor!" msgstr "Yeniden boyutlandırırken, sunucular nitelik değiştirmelidir!" #, python-format msgid "Wrong quota method %(method)s used on resource %(res)s" msgstr "Kaynak %(res)s üstünde yanlış kota metodu %(method)s kullanıldı" msgid "X-Instance-ID header is missing from request." msgstr "İstekte X-Instance-ID başlığı eksik." msgid "X-Instance-ID-Signature header is missing from request." msgstr "İstekte X-Instance-ID-Signature başlığı eksik." msgid "X-Tenant-ID header is missing from request." msgstr "İstekte X-Tenant-ID başlığı eksik." msgid "You are not allowed to delete the image." msgstr "İmajı silmeye yetkili değilsiniz." msgid "" "You are not authorized to access the image the instance was started with." msgstr "Sunucunun başlatıldığı imaja erişme yetkiniz yok." msgid "You must implement __call__" msgstr "__call__ fonksiyonunu uygulamalısınız." msgid "You should specify images_rbd_pool flag to use rbd images." msgstr "" "rbd imajlarını kullanmak için images_rbd_pool bayrağını belirtmelisiniz." msgid "You should specify images_volume_group flag to use LVM images." msgstr "LVM imajları kullanmak için images_volume_group belirtmelisiniz." msgid "admin password can't be changed on existing disk" msgstr "yönetici parolası mevcut diskte değiştirilemez" msgid "cannot understand JSON" msgstr "JSON dosyası anlaşılamadı" msgid "clone() is not implemented" msgstr "clone() uygulanmamış" #, python-format msgid "connect info: %s" msgstr "bağlantı bilgisi: %s" #, python-format msgid "connecting to: %(host)s:%(port)s" msgstr "bağlanılıyor: %(host)s:%(port)s" #, python-format msgid "disk type '%s' not supported" msgstr "disk türü '%s' desteklenmiyor" #, python-format msgid "empty project id for instance %s" msgstr "%s sunucusu için boş proje kimliği" msgid "error setting admin password" msgstr "yönetici parolası ayarlarken hata" #, python-format msgid "error: %s" msgstr "hata: %s" #, python-format msgid "failed to generate X509 fingerprint. Error message: %s" msgstr "X509 parmak izi oluşturulamadı. Hata iletisi: %s" msgid "failed to generate fingerprint" msgstr "parmak izi oluşturulamadı" msgid "filename cannot be None" msgstr "dosya ismi None olamaz" #, python-format msgid "fmt=%(fmt)s backed by: %(backing_file)s" msgstr "%(backing_file)s tarafından desteklenen fmt=%(fmt)s" #, python-format msgid "href %s does not contain version" msgstr "%s referansı versiyon içermiyor" msgid "image already mounted" msgstr "imaj zaten bağlanmış" #, python-format msgid "instance %s is not running" msgstr "sunucu %s çalışmıyor" msgid "instance is a required argument to use @refresh_cache" msgstr "sunucu @refresh_cache kullanmak için gerekli bir bağımsız değişken" msgid "instance is not in a suspended state" msgstr "sunucu bekletilme durumunda değil" msgid "instance is not powered on" msgstr "sunucunun gücü açılmamış" msgid "instance is powered off and cannot be suspended." msgstr "sunucunun gücü kapalı ve bekletilemez." #, python-format msgid "instance_id %s could not be found as device id on any ports" msgstr "" "instance_id %s herhangi bir bağlantı noktasında aygıt kimliği olarak " "bulunamadı" msgid "is_public must be a boolean" msgstr "is_public bool olmalı" msgid "keymgr.fixed_key not defined" msgstr "keymgr.fixed_key tanımlanmamış" #, python-format msgid "libguestfs installed but not usable (%s)" msgstr "libguestfs kurulu ama kullanılabilir değil (%s)" #, python-format msgid "libguestfs is not installed (%s)" msgstr "libguestfs kurulu değil (%s)" #, python-format msgid "marker [%s] not found" msgstr " [%s] göstergesi bulunamadı" msgid "max_count cannot be greater than 1 if an fixed_ip is specified." msgstr "sabit ip belirtildiyse max_count 1'den büyük olamaz." msgid "min_count must be <= max_count" msgstr "min_count max_count'dan küçük olmalı" #, python-format msgid "nbd device %s did not show up" msgstr "nbd aygıtı %s ortaya çıkmadı" msgid "nbd unavailable: module not loaded" msgstr "nbd kullanılabilir değil: modül yüklenmemiş" #, python-format msgid "no match found for %s" msgstr "%s için eşleşme bulunamadı" #, python-format msgid "not able to execute ssh command: %s" msgstr "ssh komutu çalıştırılamadı: %s" msgid "operation time out" msgstr "işlem zaman aşımına uğradı" #, python-format msgid "partition %s not found" msgstr "bölüm %s bulunamadı" #, python-format msgid "partition search unsupported with %s" msgstr "bölüm arama %s ile desteklenmiyor" msgid "pause not supported for vmwareapi" msgstr "vmwareapi için duraklatma desteklenmiyor" #, python-format msgid "qemu-nbd error: %s" msgstr "qemu-nbd hatası: %s" msgid "rbd python libraries not found" msgstr "rbd python kitaplıkları bulunamadı" #, python-format msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" msgstr "" "read_deleted değişkeni 'no', 'yes' veya 'only' değerlerini alabilir, %r " "olamaz" msgid "serve() can only be called once" msgstr "serve() yalnızca bir kere çağrılabilir" msgid "service is a mandatory argument for DB based ServiceGroup driver" msgstr "servis, DB tabanlı Servis Grubu sürücüsü için gerekli bir değişkendir" msgid "service is a mandatory argument for Memcached based ServiceGroup driver" msgstr "" "servis, Memcached tabanlı Servis Grubu sürücüsü için gerekli bir bağımsız " "değişkendir" msgid "set_admin_password is not implemented by this driver or guest instance." msgstr "" "set_admin_password bu sürücü ya da konuk sunucu tarafından uygulanmıyor." #, python-format msgid "snapshot for %s" msgstr "%s için anlık görüntü" msgid "snapshot_id required in create_info" msgstr "create_info'da snapshot_id gerekiyor" msgid "token not provided" msgstr "jeton sağlanmamış" msgid "too many body keys" msgstr "Çok sayıda gövde anahtarları" msgid "unpause not supported for vmwareapi" msgstr "vmwareapi için sürdürme desteklenmiyor" #, python-format msgid "vg %s must be LVM volume group" msgstr "vg %s LVM mantıksal sürücü grubu olmalı" #, python-format msgid "vhostuser_sock_path not present in vif_details for vif %(vif_id)s" msgstr "" "vif_details kısmında %(vif_id)s vif'i için vhostuser_sock_path mevcut değil" msgid "vif_type parameter must be present for this vif_driver implementation" msgstr "Bu vif_driver uygulaması için vif_type parametresi mevcut olmalı" #, python-format msgid "volume %s already attached" msgstr "mantıksal sürücü %s zaten ekli" #, python-format msgid "" "volume '%(vol)s' status must be 'in-use'. Currently in '%(status)s' status" msgstr "" "mantıksal sürücü '%(vol)s' durumu 'kullanımda' olmalı. Şu an '%(status)s' " "durumunda" ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315688.893605 nova-32.0.0/nova/locale/zh_CN/0000775000175000017500000000000000000000000015765 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.3736086 nova-32.0.0/nova/locale/zh_CN/LC_MESSAGES/0000775000175000017500000000000000000000000017552 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/locale/zh_CN/LC_MESSAGES/nova.po0000664000175000017500000025541700000000000021073 0ustar00zuulzuul00000000000000# Translations template for nova. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the nova project. # # Translators: # Amos Huang , 2013 # XiaoYong Yuan , 2013 # Ying Chun Guo , 2013 # donghua , 2013 # LIU Yulong , 2013 # LIU Yulong , 2013 # FIRST AUTHOR , 2011 # hamo , 2012 # hanxue , 2012 # honglei, 2015 # Jack River , 2013 # kwang1971 , 2014 # kwang1971 , 2014 # Lee Anthony , 2013 # Jack River , 2013 # Shuwen SUN , 2014 # Tom Fifield , 2013-2014 # Xiao Xi LIU , 2014 # XiaoYong Yuan , 2013 # 颜海峰 , 2014 # Yu Zhang, 2013 # Yu Zhang, 2013 # 汪军 , 2015 # 颜海峰 , 2014 # English translations for nova. # Andreas Jaeger , 2016. #zanata # zzxwill , 2016. #zanata # blkart , 2017. #zanata # Yikun Jiang , 2018. #zanata # Research and Development Center UnitedStack , 2022. #zanata msgid "" msgstr "" "Project-Id-Version: nova VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2025-07-04 18:13+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2022-07-26 02:32+0000\n" "Last-Translator: Research and Development Center UnitedStack " "\n" "Language: zh_CN\n" "Language-Team: Chinese (China)\n" "Plural-Forms: nplurals=1; plural=0\n" "Generated-By: Babel 2.2.0\n" "X-Generator: Zanata 4.3.3\n" #, python-format msgid "%(address)s is not a valid IP v4/6 address." msgstr "%(address)s 不是有效的IP v4/6地址。" #, python-format msgid "" "%(binary)s attempted direct database access which is not allowed by policy" msgstr "%(binary)s 尝试了直接访问数据库,策略不允许进行此访问" #, python-format msgid "%(cidr)s is not a valid IP network." msgstr "%(cidr)s 是无效 IP 网络。" #, python-format msgid "%(field)s should not be part of the updates." msgstr "%(field)s应该是更新的部分。" #, python-format msgid "%(memsize)d MB of memory assigned, but expected %(memtotal)d MB" msgstr "" "已分配 %(memsize)d MB 内存,但本次请求申请了 %(memtotal)d MB 内存,已超出总配" "额限制" #, python-format msgid "%(path)s is not on local storage: %(reason)s" msgstr "%(path)s 没有在本地存储器上:%(reason)s" #, python-format msgid "%(path)s is not on shared storage: %(reason)s" msgstr "%(path)s 没有在共享存储器上:%(reason)s" #, python-format msgid "%(total)i rows matched query %(meth)s, %(done)i migrated" msgstr "%(total)i 行与查询 %(meth)s 匹配,%(done)i 已迁移" #, python-format msgid "%(type)s hypervisor does not support PCI devices" msgstr "%(type)s监测器不支持PCI设备" #, python-format msgid "%s does not support disk hotplug." msgstr "%s 不支持磁盘热插。" #, python-format msgid "%s format is not supported" msgstr "不支持格式%s" #, python-format msgid "%s is not supported." msgstr "不支持%s" #, python-format msgid "%s must be either 'MANUAL' or 'AUTO'." msgstr "%s 必须是'MANUAL' 或者 'AUTO'。" #, python-format msgid "'%(other)s' should be an instance of '%(cls)s'" msgstr "'%(other)s'应该是'%(cls)s'的实例" msgid "'qemu-img info' parsing failed." msgstr "'qemu-img info'解析失败" #, python-format msgid "'rxtx_factor' argument must be a float between 0 and %g" msgstr "'rxtx_factor'参数必须是0和%g之间的浮点数" #, python-format msgid "A NetworkModel is required in field %s" msgstr "在字段%s中网络模型是必须的" #, python-format msgid "" "API Version String %(version)s is of invalid format. Must be of format " "MajorNum.MinorNum." msgstr "API 版本字符%(version)s格式无效。必须是大版本号.小版本号。" #, python-format msgid "API version %(version)s is not supported on this method." msgstr "这个方法不支持%(version)s版本的API。" msgid "Access list not available for public flavors." msgstr "未提供公用云主机类型的访问列表。" #, python-format msgid "Action %s not found" msgstr "行为 %s 未定义" #, python-format msgid "" "Action for request_id %(request_id)s on instance %(instance_uuid)s not found" msgstr "找不到实例 %(instance_uuid)s 上针对 request_id %(request_id)s 的操作" #, python-format msgid "Action: '%(action)s', calling method: %(meth)s, body: %(body)s" msgstr "操作:“%(action)s”,调用方法:%(meth)s,主体:%(body)s" #, python-format msgid "Add metadata failed for aggregate %(id)s after %(retries)s retries" msgstr "在%(retries)s尝试后,为聚合%(id)s 添加元数据" #, python-format msgid "Aggregate %(aggregate_id)s already has host %(host)s." msgstr "聚合 %(aggregate_id)s已经有主机 %(host)s。" #, python-format msgid "Aggregate %(aggregate_id)s could not be found." msgstr "找不到聚合 %(aggregate_id)s。" #, python-format msgid "Aggregate %(aggregate_id)s has no host %(host)s." msgstr "聚合 %(aggregate_id)s没有主机 %(host)s。" #, python-format msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." msgstr "聚合 %(aggregate_id)s 没有键为 %(metadata_key)s 的元数据。" #, python-format msgid "Aggregate %(aggregate_name)s already exists." msgstr "聚合 %(aggregate_name)s 已经存在。" #, python-format msgid "Aggregate %s does not support empty named availability zone" msgstr "聚合 %s 不支持名称为空的可用区域" #, python-format msgid "An invalid 'name' value was provided. The name must be: %(reason)s" msgstr "提供了无效“name”值。name 必须为:%(reason)s" msgid "An unknown error has occurred. Please try your request again." msgstr "发生了一个未知的错误. 请重试你的请求." msgid "An unknown exception occurred." msgstr "发生未知异常。" #, python-format msgid "Architecture name '%(arch)s' is not recognised" msgstr "体系结构名称“%(arch)s”不可识别" #, python-format msgid "Architecture name '%s' is not valid" msgstr "体系结构名称 '%s' 无效" msgid "Archiving" msgstr "归档" #, python-format msgid "" "Attempt to consume PCI device %(compute_node_id)s:%(address)s from empty pool" msgstr "尝试从空池子中消费PCI设备%(compute_node_id)s:%(address)s" msgid "Attempted overwrite of an existing value." msgstr "尝试覆盖一个已存在的值。" #, python-format msgid "Attribute not supported: %(attr)s" msgstr "属性不受支持: %(attr)s" msgid "Bad Request - Invalid Parameters" msgstr "错误请求——参数无效" #, python-format msgid "Bad network format: missing %s" msgstr "错误的网络格式:丢失%s" msgid "Bad networks format" msgstr "错误的网络格式" #, python-format msgid "Bad networks format: network uuid is not in proper format (%s)" msgstr "损坏的网络格式:网络 uuid 格式不正确 (%s)" #, python-format msgid "Bad prefix for network in cidr %s" msgstr "cidr %s 中网络前缀不正确" #, python-format msgid "" "Binding failed for port %(port_id)s, please check neutron logs for more " "information." msgstr "绑定端口%(port_id)s失败,更多细节请查看日志。" msgid "Blank components" msgstr "空组件" msgid "" "Blank volumes (source: 'blank', dest: 'volume') need to have non-zero size" msgstr "空白卷(source: 'blank', dest: 'volume')需要未非0的大小。" #, python-format msgid "Block Device %(id)s is not bootable." msgstr "块设备 %(id)s 不能被引导。" #, python-format msgid "" "Block Device Mapping %(volume_id)s is a multi-attach volume and is not valid " "for this operation." msgstr "块设备映射 %(volume_id)s 是多重附加卷,对此操作无效。" msgid "Block Device Mapping cannot be converted to legacy format. " msgstr "块设备映射不能被转换为旧格式。" msgid "Block Device Mapping is Invalid." msgstr "块设备映射无效。" #, python-format msgid "Block Device Mapping is Invalid: %(details)s" msgstr "块设备映射无效: %(details)s" msgid "" "Block Device Mapping is Invalid: Boot sequence for the instance and image/" "block device mapping combination is not valid." msgstr "块设备映射无效: 当前云主机和镜像/块设备映射组合的启动序列无效。" msgid "" "Block Device Mapping is Invalid: You specified more local devices than the " "limit allows" msgstr "块设备映射无效: 你指定了多于限定允许的本地设备。" #, python-format msgid "Block Device Mapping is Invalid: failed to get image %(id)s." msgstr "块设备映射无效: 无法获取镜像 %(id)s." #, python-format msgid "Block Device Mapping is Invalid: failed to get snapshot %(id)s." msgstr "块设备映射无效:未能获取快照 %(id)s。" #, python-format msgid "Block Device Mapping is Invalid: failed to get volume %(id)s." msgstr "块设备映射无效:未能获取卷 %(id)s。" msgid "Block migration can not be used with shared storage." msgstr "块存储迁移无法在共享存储使用" msgid "Boot index is invalid." msgstr "启动索引编号无效。" #, python-format msgid "Build of instance %(instance_uuid)s aborted: %(reason)s" msgstr "实例%(instance_uuid)s的构建已中止:%(reason)s" #, python-format msgid "Build of instance %(instance_uuid)s was re-scheduled: %(reason)s" msgstr "实例%(instance_uuid)s的构建已被重新安排:%(reason)s" #, python-format msgid "BuildRequest not found for instance %(uuid)s" msgstr "找不到对应实例 %(uuid)s 的无效请求" msgid "CPU and memory allocation must be provided for all NUMA nodes" msgstr "必须为所有 NUMA 节点提供 CPU 和内存分配" #, python-format msgid "" "CPU doesn't have compatibility.\n" "\n" "%(ret)s\n" "\n" "Refer to %(u)s" msgstr "" "CPU 不兼容.\n" "\n" "%(ret)s\n" "\n" "参考 %(u)s" #, python-format msgid "CPU number %(cpunum)d is assigned to two nodes" msgstr "CPU 编号 %(cpunum)d 分配给两个节点" #, python-format msgid "CPU number %(cpunum)d is larger than max %(cpumax)d" msgstr "CPU 数 %(cpunum)d 大于最大值 %(cpumax)d" #, python-format msgid "CPU number %(cpuset)s is not assigned to any node" msgstr "CPU 编号 %(cpuset)s 未分配为任何节点" msgid "Can not add access to a public flavor." msgstr "不能添加访问到公共云主机类型。" msgid "Can not find requested image" msgstr "无法找到请求的镜像" #, python-format msgid "Can not handle authentication request for %d credentials" msgstr "无法为 %d 凭证处理认证请求" msgid "Can't retrieve root device path from instance libvirt configuration" msgstr "无法从实例 libvirt 配置中检索到根设备路径" #, python-format msgid "" "Cannot '%(action)s' instance %(server_id)s while it is in %(attr)s %(state)s" msgstr "" "在实例 %(server_id)s 处于 %(attr)s %(state)s 状态时,无法对该实例执" "行“%(action)s”" #, python-format msgid "Cannot add host to aggregate %(aggregate_id)s. Reason: %(reason)s." msgstr "不能添加主机到聚合%(aggregate_id)s。原因是:%(reason)s。" msgid "Cannot attach one or more volumes to multiple instances" msgstr "无法将一个或多个卷连接至多个实例" #, python-format msgid "Cannot call %(method)s on orphaned %(objtype)s object" msgstr "对于孤立对象 %(objtype)s 无法调用 %(method)s" #, python-format msgid "" "Cannot determine the parent storage pool for %s; cannot determine where to " "store images" msgstr "无法确定 %s 的父存储池;无法确定镜像的存储位置" msgid "Cannot find image for rebuild" msgstr "无法找到用来重新创建的镜像" #, python-format msgid "Cannot remove host %(host)s in aggregate %(id)s" msgstr "无法在聚集 %(id)s 中除去主机 %(host)s" #, python-format msgid "Cannot remove host from aggregate %(aggregate_id)s. Reason: %(reason)s." msgstr "不能从聚合%(aggregate_id)s移除主机。原因是:%(reason)s。" msgid "Cannot rescue a volume-backed instance" msgstr "无法急救卷支持的实例" msgid "" "Cannot set cpu thread pinning policy in a non dedicated cpu pinning policy" msgstr "无法在非专用 CPU 锁定策略中设置 CPU 线程锁定策略。" msgid "Cannot set realtime policy in a non dedicated cpu pinning policy" msgstr "无法在非专用 CPU 锁定策略中设置实时策略。" #, python-format msgid "Cannot update aggregate %(aggregate_id)s. Reason: %(reason)s." msgstr "不能更新聚合%(aggregate_id)s。原因是:%(reason)s。" #, python-format msgid "" "Cannot update metadata of aggregate %(aggregate_id)s. Reason: %(reason)s." msgstr "不能更新聚合%(aggregate_id)s的元素数据。原因是:%(reason)s。" #, python-format msgid "Cell %(uuid)s has no mapping." msgstr "单元%(uuid)s没有映射" #, python-format msgid "" "Change would make usage less than 0 for the following resources: %(unders)s" msgstr "对于下列资源,更改将导致使用量小于 0:%(unders)s" #, python-format msgid "Cinder API version %(version)s is not available." msgstr "Cinder API 版本 %(version)s 不可用." #, python-format msgid "Class %(class_name)s could not be found: %(exception)s" msgstr "找不到类 %(class_name)s :异常 %(exception)s" msgid "Completed" msgstr "完成" #, python-format msgid "Compute host %(host)s could not be found." msgstr "计算主机 %(host)s 没有找到。" #, python-format msgid "Compute host %s not found." msgstr "计算主机 %s 未找到。" #, python-format msgid "Compute service of %(host)s is still in use." msgstr "%(host)s 的计算服务仍然在使用。" #, python-format msgid "Compute service of %(host)s is unavailable at this time." msgstr "%(host)s 的计算服务此时不可用。" #, python-format msgid "Config drive format '%(format)s' is not supported." msgstr "配置驱动器格式“%(format)s”不受支持。" #, python-format msgid "" "Config requested an explicit CPU model, but the current libvirt hypervisor " "'%s' does not support selecting CPU models" msgstr "" "配置已请求显式 CPU 模型,但是当前 libvirt 管理程序“%s”不支持选择 CPU 模型" #, python-format msgid "" "Conflict updating instance %(instance_uuid)s, but we were unable to " "determine the cause" msgstr "更新实例%(instance_uuid)s冲突,但是我们不能判断原因" #, python-format msgid "" "Conflict updating instance %(instance_uuid)s. Expected: %(expected)s. " "Actual: %(actual)s" msgstr "更新实例%(instance_uuid)s冲突。期望:%(expected)s。实际:%(actual)s" #, python-format msgid "Connection to cinder host failed: %(reason)s" msgstr "连接cinder主机失败: %(reason)s" #, python-format msgid "Connection to glance host %(server)s failed: %(reason)s" msgstr "连接 Glance 主机 %(server)s 失败:%(reason)s" msgid "Connection to libvirt lost" msgstr "libvirt 连接丢失" #, python-format msgid "Connection to libvirt lost: %s" msgstr "到libvirt的连接丢失:%s" #, python-format msgid "" "Console log output could not be retrieved for instance %(instance_id)s. " "Reason: %(reason)s" msgstr "无法检索实例 %(instance_id)s 控制台日志输出。原因:%(reason)s" msgid "Constraint not met." msgstr "未符合约束。" #, python-format msgid "Converted to raw, but format is now %s" msgstr "转化为裸格式,但目前格式是 %s" #, python-format msgid "Could not attach image to loopback: %s" msgstr "无法给loopback附加镜像:%s" #, python-format msgid "Could not fetch image %(image_id)s" msgstr "未能访存映像 %(image_id)s" #, python-format msgid "Could not find a handler for %(driver_type)s volume." msgstr "无法为 %(driver_type)s 卷找到句柄。" #, python-format msgid "Could not find binary %(binary)s on host %(host)s." msgstr "没有找到二进制 %(binary)s 在主机 %(host)s 上。" #, python-format msgid "Could not find config at %(path)s" msgstr "在 %(path)s 找不到配置文件。" msgid "Could not find the datastore reference(s) which the VM uses." msgstr "无法找到虚拟机使用的数据存储引用。" #, python-format msgid "Could not load line %(line)s, got error %(error)s" msgstr "不能加载行%(line)s,得到的错误是%(error)s" #, python-format msgid "Could not load paste app '%(name)s' from %(path)s" msgstr "无法从路径 %(path)s 中加载应用 '%(name)s'" #, python-format msgid "" "Could not mount vfat config drive. %(operation)s failed. Error: %(error)s" msgstr "未能安装 vfat 配置驱动器。%(operation)s 失败。错误:%(error)s" #, python-format msgid "Could not upload image %(image_id)s" msgstr "未能上载映像 %(image_id)s" msgid "Creation of virtual interface with unique mac address failed" msgstr "为使用特殊MAC地址的vm的创建失败" msgid "Database Connection" msgstr "数据库连接" #, python-format msgid "Datastore regex %s did not match any datastores" msgstr "存储正则表达式%s没有匹配存储" msgid "Datetime is in invalid format" msgstr "时间格式无效" msgid "Default PBM policy is required if PBM is enabled." msgstr "如果PBM启用,缺省的PBM策略是必须的。" #, python-format msgid "Device '%(device)s' not found." msgstr "找不到设备“%(device)s”。" #, python-format msgid "Device detach failed for %(device)s: %(reason)s" msgstr "对 %(device)s 执行设备拆离失败:%(reason)s)" msgid "Device name contains spaces." msgstr "Device名称中包含了空格" msgid "Device name empty or too long." msgstr "设备名称为空或者太长" #, python-format msgid "Device type mismatch for alias '%s'" msgstr "别名“%s”的设备类型不匹配" #, python-format msgid "Disk format %(disk_format)s is not acceptable" msgstr "磁盘格式 %(disk_format)s 不能接受" #, python-format msgid "Disk info file is invalid: %(reason)s" msgstr "磁盘信息文件无效:%(reason)s" #, python-format msgid "Driver Error: %s" msgstr "驱动错误:%s" #, python-format msgid "Error attempting to run %(method)s" msgstr "尝试运行 %(method)s 时出错" #, python-format msgid "" "Error destroying the instance on node %(node)s. Provision state still " "'%(state)s'." msgstr "在节点%(node)s销毁实例出错。准备状态仍然是'%(state)s'。" #, python-format msgid "Error during unshelve instance %(instance_id)s: %(reason)s" msgstr "取消搁置实例 %(instance_id)s 期间出错:%(reason)s" #, python-format msgid "" "Error from libvirt while getting domain info for %(instance_name)s: [Error " "Code %(error_code)s] %(ex)s" msgstr "" "获取%(instance_name)s域信息时libvirt报错:[错误号 %(error_code)s] %(ex)s" #, python-format msgid "" "Error from libvirt while looking up %(instance_name)s: [Error Code " "%(error_code)s] %(ex)s" msgstr "查找 %(instance_name)s时libvirt出错:[错误代码 %(error_code)s] %(ex)s" #, python-format msgid "" "Error from libvirt while quiescing %(instance_name)s: [Error Code " "%(error_code)s] %(ex)s" msgstr "" "当停顿%(instance_name)s时,libvirt报错: [错误代号 %(error_code)s] %(ex)s" #, python-format msgid "" "Error from libvirt while set password for username \"%(user)s\": [Error Code " "%(error_code)s] %(ex)s" msgstr "" "当为用户\"%(user)s\"设置密码时,libvirt报错:[错误号 %(error_code)s] %(ex)s" #, python-format msgid "" "Error mounting %(device)s to %(dir)s in image %(image)s with libguestfs " "(%(e)s)" msgstr "使用libguestfs在镜像%(image)s中挂载%(device)s 到 %(dir)s出错 (%(e)s)" #, python-format msgid "Error mounting %(image)s with libguestfs (%(e)s)" msgstr "使用 libguestfs挂载%(image)s出错 (%(e)s)" #, python-format msgid "Error when creating resource monitor: %(monitor)s" msgstr "创建资源监控时出错:%(monitor)s" #, python-format msgid "" "Error:\n" "%s" msgstr "" "错误:\n" "%s" #, python-format msgid "Event %(event)s not found for action id %(action_id)s" msgstr "对于操作标识 %(action_id)s,找不到事件 %(event)s" msgid "Event must be an instance of nova.virt.event.Event" msgstr "事件必须是 nova.virt.event.Event 的实例" #, python-format msgid "" "Exceeded max scheduling attempts %(max_attempts)d for instance " "%(instance_uuid)s. Last exception: %(exc_reason)s" msgstr "" "已超过实例 %(instance_uuid)s 的最大调度尝试次数 %(max_attempts)d。最近一次异" "常:%(exc_reason)s" #, python-format msgid "" "Exceeded max scheduling retries %(max_retries)d for instance " "%(instance_uuid)s during live migration" msgstr "在热迁移期间,对于实例%(instance_uuid)s超过最大调度次数%(max_retries)d" #, python-format msgid "Exceeded maximum number of retries. %(reason)s" msgstr "超过最大尝试次数。%(reason)s" #, python-format msgid "Expected a uuid but received %(uuid)s." msgstr "期望 uuid,但是接收到 %(uuid)s。" msgid "Extracting vmdk from OVA failed." msgstr "从OVA提前vmdk失败。" #, python-format msgid "Failed to access port %(port_id)s: %(reason)s" msgstr "访问端口%(port_id)s失败:%(reason)s" #, python-format msgid "Failed to allocate the network(s) with error %s, not rescheduling." msgstr "分配网络失败,错误是%s,不重新调度。" msgid "Failed to allocate the network(s), not rescheduling." msgstr "分配网络失败,不重新调度。" #, python-format msgid "Failed to attach network adapter device to %(instance_uuid)s" msgstr "连接网络适配器设备到%(instance_uuid)s失败" #, python-format msgid "Failed to connect to libvirt: %(msg)s" msgstr "连接到 libvirt 失败: %(msg)s" #, python-format msgid "Failed to deploy instance: %(reason)s" msgstr "未能部署实例: %(reason)s" #, python-format msgid "Failed to detach PCI device %(dev)s: %(reason)s" msgstr "断开PCI设备%(dev)s失败:%(reason)s" #, python-format msgid "Failed to detach network adapter device from %(instance_uuid)s" msgstr "从实例%(instance_uuid)s 断开网络适配器设备失败" #, python-format msgid "Failed to encrypt text: %(reason)s" msgstr "未能对文本进行加密:%(reason)s" #, python-format msgid "Failed to launch instances: %(reason)s" msgstr "无法启动云主机:%(reason)s" #, python-format msgid "Failed to map partitions: %s" msgstr "映射分区失败:%s" #, python-format msgid "Failed to mount filesystem: %s" msgstr "挂载文件系统失败:%s" #, python-format msgid "Failed to power off instance: %(reason)s" msgstr "云主机无法关机:%(reason)s" #, python-format msgid "Failed to power on instance: %(reason)s" msgstr "云主机无法开机:%(reason)s" #, python-format msgid "Failed to provision instance %(inst)s: %(reason)s" msgstr "准备实例%(inst)s失败:%(reason)s" #, python-format msgid "Failed to read or write disk info file: %(reason)s" msgstr "读写磁盘信息文件错误:%(reason)s" #, python-format msgid "Failed to reboot instance: %(reason)s" msgstr "云主机无法重启:%(reason)s" #, python-format msgid "Failed to remove volume(s): (%(reason)s)" msgstr "移除卷失败:(%(reason)s)" #, python-format msgid "Failed to request Ironic to rebuild instance %(inst)s: %(reason)s" msgstr "请求Ironic 重新构建实例%(inst)s失败:%(reason)s" #, python-format msgid "Failed to resume instance: %(reason)s" msgstr "无法恢复云主机:%(reason)s" #, python-format msgid "Failed to run qemu-img info on %(path)s : %(error)s" msgstr "在%(path)s运行 qemu-img info 失败:%(error)s" #, python-format msgid "Failed to set admin password on %(instance)s because %(reason)s" msgstr "未能对 %(instance)s 设置管理员密码,原因如下:%(reason)s" #, python-format msgid "Failed to suspend instance: %(reason)s" msgstr "无法挂起云主机:%(reason)s" #, python-format msgid "Failed to terminate instance: %(reason)s" msgstr "无法终止云主机:%(reason)s" msgid "Failure prepping block device." msgstr "准备块设备失败。" #, python-format msgid "File %(file_path)s could not be found." msgstr "找不到文件 %(file_path)s。" #, python-format msgid "Fixed IP %(ip)s is not a valid ip address for network %(network_id)s." msgstr "对于网络%(network_id)s,固定IP %(ip)s 不是一个有效的ip地址。" #, python-format msgid "Fixed IP %s is already in use." msgstr "固定IP%s已在使用。" #, python-format msgid "" "Fixed IP address %(address)s is already in use on instance %(instance_uuid)s." msgstr "在实例 %(instance_uuid)s 上,固定 IP 地址 %(address)s 已在使用中。" #, python-format msgid "Fixed IP not found for address %(address)s." msgstr "找不到对应地址 %(address)s 的固定 IP。" #, python-format msgid "Flavor %(flavor_id)s could not be found." msgstr "云主机类型 %(flavor_id)s 没有找到。" #, python-format msgid "Flavor %(flavor_id)s has no extra specs with key %(extra_specs_key)s." msgstr "云主机类型 %(flavor_id)s 中没有名为 %(extra_specs_key)s 的附加规格。" #, python-format msgid "Flavor %(flavor_id)s has no extra specs with key %(key)s." msgstr "云主机类型%(flavor_id)s中没有名为%(key)s的附加规格。" #, python-format msgid "" "Flavor %(id)s extra spec cannot be updated or created after %(retries)d " "retries." msgstr " %(retries)d 次重试后,无法更新或创建类型 %(id)s 的特别设定。" #, python-format msgid "" "Flavor access already exists for flavor %(flavor_id)s and project " "%(project_id)s combination." msgstr "项目 %(project_id)s已经拥有对于云主机类型 %(flavor_id)s的访问权限。" #, python-format msgid "Flavor access not found for %(flavor_id)s / %(project_id)s combination." msgstr "项目%(project_id)s中未发现云主机类型%(flavor_id)s。" msgid "Flavor used by the instance could not be found." msgstr "找不到实例使用的云主机类型。" #, python-format msgid "Flavor with ID %(flavor_id)s already exists." msgstr "ID为%(flavor_id)s的云主机类型已经存在。" #, python-format msgid "Flavor with name %(flavor_name)s could not be found." msgstr "没有找到名为%(flavor_name)s的云主机类型。" #, python-format msgid "Flavor with name %(name)s already exists." msgstr "名为%(name)s的云主机类型已经存在。" #, python-format msgid "" "Flavor's disk is smaller than the minimum size specified in image metadata. " "Flavor disk is %(flavor_size)i bytes, minimum size is %(image_min_disk)i " "bytes." msgstr "" "云主机类型的磁盘比在镜像元数据中指定的最小值还小。云主机类型磁盘大小" "是%(flavor_size)i 字节,最小值大小是%(image_min_disk)i字节。" #, python-format msgid "" "Flavor's disk is too small for requested image. Flavor disk is " "%(flavor_size)i bytes, image is %(image_size)i bytes." msgstr "" "对于请求的镜像,云主机类型的磁盘太小。云主机类型磁盘大小是%(flavor_size)i字" "节,镜像大小是%(image_size)i字节。" msgid "Flavor's memory is too small for requested image." msgstr "相对于申请的镜像,该云主机类型的内存太小。" #, python-format msgid "Floating IP %(address)s association has failed." msgstr "浮动IP %(address)s绑定失败。" #, python-format msgid "Floating IP %(address)s is associated." msgstr "浮动 IP %(address)s 已关联。" #, python-format msgid "Floating IP %(address)s is not associated with instance %(id)s." msgstr "浮动 IP %(address)s 未与实例 %(id)s 关联。" #, python-format msgid "Floating IP not found for ID %(id)s." msgstr "找不到对应标识 %(id)s 的浮动 IP。" #, python-format msgid "Floating IP not found for ID %s" msgstr "找不到对应标识 %s 的浮动 IP" #, python-format msgid "Floating IP not found for address %(address)s." msgstr "找不到对应地址 %(address)s 的浮动 IP。" msgid "Floating IP pool not found." msgstr "找不到浮动 IP 池。" msgid "Forbidden" msgstr "禁止" msgid "" "Forbidden to exceed flavor value of number of serial ports passed in image " "meta." msgstr "传入镜像元数据的串口数不能超过云主机类型中的设定。" msgid "Found no disk to snapshot." msgstr "发现没有盘来做快照。" msgid "Guest agent is not enabled for the instance" msgstr "该云主机未启用 Guest agent" msgid "Guest does not have a console available." msgstr "访客没有可用控制台。" #, python-format msgid "Host %(host)s could not be found." msgstr "主机 %(host)s 没有找到。" #, python-format msgid "Host %(host)s is already mapped to cell %(uuid)s" msgstr "主机 %(host)s 已映射至单元 %(uuid)s" #, python-format msgid "Host '%(name)s' is not mapped to any cell" msgstr "主机 '%(name)s'没有映射到任何单元" msgid "Host aggregate is not empty" msgstr "主机聚合不为空" msgid "Host does not support guests with NUMA topology set" msgstr "主机不支持具有 NUMA 拓扑集的客户机" msgid "Host does not support guests with custom memory page sizes" msgstr "主机不支持定制内存页大小的客户机" msgid "Hypervisor driver does not support post_live_migration_at_source method" msgstr "监测器驱动不支持post_live_migration_at_source方法" #, python-format msgid "Hypervisor virt type '%s' is not valid" msgstr "监测器虚拟化类型'%s'无效" #, python-format msgid "Hypervisor virtualization type '%(hv_type)s' is not recognised" msgstr "监测器虚拟化类型“%(hv_type)s”无法识别" #, python-format msgid "Hypervisor with ID '%s' could not be found." msgstr "找不到具有标识“%s”的管理程序。" #, python-format msgid "IP allocation over quota in pool %s." msgstr "IP分配操作池%s的配额。" msgid "IP allocation over quota." msgstr "IP分配超过配额。" #, python-format msgid "Image %(image_id)s could not be found." msgstr "镜像 %(image_id)s 没有找到。" #, python-format msgid "Image %(image_id)s is not active." msgstr "映像 %(image_id)s 处于不活动状态。" #, python-format msgid "Image %(image_id)s is unacceptable: %(reason)s" msgstr "镜像 %(image_id)s 无法接受,原因是: %(reason)s" msgid "Image disk size greater than requested disk size" msgstr "镜像磁盘大小大于请求磁盘大小" msgid "Image is not raw format" msgstr "镜像不是裸格式镜像" msgid "Image metadata limit exceeded" msgstr "超过镜像元数据限制" #, python-format msgid "Image model '%(image)s' is not supported" msgstr "镜像模式 '%(image)s' 不支持" msgid "Image not found." msgstr "镜像没有找到。" #, python-format msgid "" "Image property '%(name)s' is not permitted to override NUMA configuration " "set against the flavor" msgstr "" "镜像属性'%(name)s'无法覆盖云主机类型设定的NUMA(非一致性内存访问)配置。" msgid "" "Image property 'hw_cpu_policy' is not permitted to override CPU pinning " "policy set against the flavor" msgstr "对照实例类型,不允许镜像属性“hw_cpu_policy”覆盖 CPU 锁定策略集" msgid "" "Image property 'hw_cpu_thread_policy' is not permitted to override CPU " "thread pinning policy set against the flavor" msgstr "" "不允许使用镜像属性“hw_cpu_thread_policy”覆盖针对该类型设置的 CPU 线程锁定策略" msgid "Image that the instance was started with could not be found." msgstr "实例启动的镜像没有找到。" #, python-format msgid "Image's config drive option '%(config_drive)s' is invalid" msgstr "镜像的配置驱动器选项“%(config_drive)s”无效" msgid "" "Images with destination_type 'volume' need to have a non-zero size specified" msgstr "带目的类型'volume'的镜像需要指定非0的大小。" msgid "In ERROR state" msgstr "处于“错误”状态" #, python-format msgid "In states %(vm_state)s/%(task_state)s, not RESIZED/None" msgstr "处于状态 %(vm_state)s/%(task_state)s,而不是处于状态“已调整大小”/“无”" #, python-format msgid "In-progress live migration %(id)s is not found for server %(uuid)s." msgstr "找不到对应服务器 %(uuid)s 的进行中的实时迁移 %(id)s。" msgid "" "Incompatible settings: ephemeral storage encryption is supported only for " "LVM images." msgstr "不兼容的设置:只有LVM镜像支持瞬时存储加密。" #, python-format msgid "Info cache for instance %(instance_uuid)s could not be found." msgstr "找不到用于实例 %(instance_uuid)s 的信息高速缓存。" #, python-format msgid "" "Instance %(instance)s and volume %(vol)s are not in the same " "availability_zone. Instance is in %(ins_zone)s. Volume is in %(vol_zone)s" msgstr "" "实例%(instance)s和卷%(vol)s没有在同一个可用域availability_zone。实例" "在%(ins_zone)s。卷在%(vol_zone)s" #, python-format msgid "Instance %(instance)s does not have a port with id %(port)s" msgstr "实例%(instance)s没有唯一标识为 %(port)s的端口" #, python-format msgid "Instance %(instance_id)s cannot be rescued: %(reason)s" msgstr "无法急救实例 %(instance_id)s:%(reason)s" #, python-format msgid "Instance %(instance_id)s could not be found." msgstr "实例 %(instance_id)s 没有找到。" #, python-format msgid "Instance %(instance_id)s has no tag '%(tag)s'" msgstr "实例%(instance_id)s没有标签'%(tag)s'" #, python-format msgid "Instance %(instance_id)s is not in rescue mode" msgstr "实例 %(instance_id)s 不在救援模式。" #, python-format msgid "Instance %(instance_id)s is not ready" msgstr "实例 %(instance_id)s 未就绪" #, python-format msgid "Instance %(instance_id)s is not running." msgstr "实例 %(instance_id)s 没有运行。" #, python-format msgid "Instance %(instance_id)s is unacceptable: %(reason)s" msgstr "实例 %(instance_id)s 无法接受,原因是: %(reason)s" #, python-format msgid "Instance %(instance_uuid)s does not specify a NUMA topology" msgstr "实例 %(instance_uuid)s 未指定 NUMA 拓扑" #, python-format msgid "Instance %(instance_uuid)s does not specify a migration context." msgstr "实例 %(instance_uuid)s未指定迁移上下文" #, python-format msgid "" "Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while " "the instance is in this state." msgstr "" "实例 %(instance_uuid)s 处于%(attr)s %(state)s 中。该实例在这种状态下不能执行 " "%(method)s。" #, python-format msgid "Instance %(instance_uuid)s is locked" msgstr "实例 %(instance_uuid)s 已锁定" #, python-format msgid "" "Instance %(instance_uuid)s requires config drive, but it does not exist." msgstr "实例 %(instance_uuid)s 需要配置驱动器,但配置驱动器不存在。" #, python-format msgid "Instance %(name)s already exists." msgstr "实例 %(name)s 已经存在。" #, python-format msgid "Instance %(server_id)s is in an invalid state for '%(action)s'" msgstr "对于“%(action)s”,实例 %(server_id)s 处于无效状态" #, python-format msgid "Instance %(uuid)s has no mapping to a cell." msgstr "实例 %(uuid)s 没有映射到一个单元。" #, python-format msgid "Instance %s not found" msgstr "找不到实例 %s" #, python-format msgid "Instance %s provisioning was aborted" msgstr "废弃准备实例%s" msgid "Instance could not be found" msgstr "无法找到实例" msgid "Instance disk to be encrypted but no context provided" msgstr "实例磁盘将被加密,但是没有提供上下文" msgid "Instance event failed" msgstr "实例事件无效" #, python-format msgid "Instance group %(group_uuid)s already exists." msgstr "实例组 %(group_uuid)s 已存在。" #, python-format msgid "Instance group %(group_uuid)s could not be found." msgstr "找不到实例组 %(group_uuid)s" msgid "Instance has no source host" msgstr "实例没有任何源主机" msgid "Instance has not been resized." msgstr "实例还没有调整大小。" #, python-format msgid "Instance hostname %(hostname)s is not a valid DNS name" msgstr "实例主机名 %(hostname)s 是无效 DNS 名称" msgid "Instance is not a member of specified network" msgstr "实例并不是指定网络的成员" msgid "Instance network is not ready yet" msgstr "云主机网络尚未就绪" #, python-format msgid "Instance rollback performed due to: %s" msgstr "由于%s,实例回滚已被执行" #, python-format msgid "" "Insufficient Space on Volume Group %(vg)s. Only %(free_space)db available, " "but %(size)d bytes required by volume %(lv)s." msgstr "" "卷组 %(vg)s 上的空间不足。只有 %(free_space)db 可用,但卷 %(lv)s 需要 " "%(size)d 字节。" #, python-format msgid "Insufficient compute resources: %(reason)s." msgstr "计算资源不足:%(reason)s。" #, python-format msgid "Interface %(interface)s not found." msgstr "接口 %(interface)s没有找到。" #, python-format msgid "Invalid Base 64 data for file %(path)s" msgstr "文件%(path)s的Base 64数据非法" msgid "Invalid Connection Info" msgstr "连接信息无效" #, python-format msgid "Invalid ID received %(id)s." msgstr "接收到的标识 %(id)s 无效。" #, python-format msgid "Invalid IP format %s" msgstr "无效IP格式%s" #, python-format msgid "Invalid IP protocol %(protocol)s." msgstr "无效的IP协议 %(protocol)s。" msgid "" "Invalid PCI Whitelist: The PCI whitelist can specify devname or address, but " "not both" msgstr "无效PCI白名单:PCI白名单可以指定设备名或地址,而不是两个同时指定" #, python-format msgid "Invalid PCI alias definition: %(reason)s" msgstr "无效PCI别名定义:%(reason)s" #, python-format msgid "Invalid Regular Expression %s" msgstr "无效的正则表达式%s" #, python-format msgid "Invalid characters in hostname '%(hostname)s'" msgstr "主机名“%(hostname)s”中的有无效字符" msgid "Invalid config_drive provided." msgstr "提供了无效的config_drive" #, python-format msgid "Invalid config_drive_format \"%s\"" msgstr "config_drive_format“%s”无效" #, python-format msgid "Invalid console type %(console_type)s" msgstr "控制台类型 %(console_type)s 无效" #, python-format msgid "Invalid content type %(content_type)s." msgstr "无效的内容类型 %(content_type)s。" #, python-format msgid "Invalid datetime string: %(reason)s" msgstr "日期字符串无效: %(reason)s" msgid "Invalid device UUID." msgstr "无效的设备UUID" #, python-format msgid "Invalid entry: '%s'" msgstr "无效输入:'%s'" #, python-format msgid "Invalid entry: '%s'; Expecting dict" msgstr "无效输入: '%s';期望词典" #, python-format msgid "Invalid entry: '%s'; Expecting list or dict" msgstr "无效输入:'%s',期望列表或词典" #, python-format msgid "Invalid exclusion expression %r" msgstr "无效排它表达式%r" #, python-format msgid "Invalid image format '%(format)s'" msgstr "无效镜像格式'%(format)s'" #, python-format msgid "Invalid image href %(image_href)s." msgstr "无效的镜像href %(image_href)s。" #, python-format msgid "Invalid image metadata. Error: %s" msgstr "无效镜像元数据。错误:%s" #, python-format msgid "Invalid inclusion expression %r" msgstr "无效包含表达式%r" #, python-format msgid "" "Invalid input for field/attribute %(path)s. Value: %(value)s. %(message)s" msgstr "对于字段/属性 %(path)s,输入无效。值:%(value)s。%(message)s" #, python-format msgid "Invalid input received: %(reason)s" msgstr "输入无效: %(reason)s" msgid "Invalid instance image." msgstr "无效实例镜像。" #, python-format msgid "Invalid is_public filter [%s]" msgstr "is_public 过滤器 [%s] 无效" msgid "Invalid key_name provided." msgstr "提供了无效的key_name。" #, python-format msgid "Invalid memory page size '%(pagesize)s'" msgstr "内存页大小“%(pagesize)s”无效" msgid "Invalid metadata key" msgstr "无效的元数据键" #, python-format msgid "Invalid metadata size: %(reason)s" msgstr "元数据大小无效: %(reason)s" #, python-format msgid "Invalid metadata: %(reason)s" msgstr "元数据无效: %(reason)s" #, python-format msgid "Invalid minDisk filter [%s]" msgstr "minDisk 过滤器 [%s] 无效" #, python-format msgid "Invalid minRam filter [%s]" msgstr "minRam 过滤器 [%s] 无效" #, python-format msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s" msgstr "无效的端口范围 %(from_port)s:%(to_port)s. %(msg)s" msgid "Invalid proxy request signature." msgstr "代理请求签名无效。" #, python-format msgid "Invalid range expression %r" msgstr "无效范围表达式%r" msgid "Invalid service catalog json." msgstr "服务目录 json 无效。" msgid "Invalid start time. The start time cannot occur after the end time." msgstr "开始时间无效。开始时间不能出现在结束时间之后。" msgid "Invalid state of instance files on shared storage" msgstr "共享存储器上实例文件的状态无效" msgid "Invalid status value" msgstr "状态值无效" #, python-format msgid "Invalid timestamp for date %s" msgstr "对于日期 %s,时间戳记无效" #, python-format msgid "Invalid usage_type: %s" msgstr "usage_type: %s无效" #, python-format msgid "Invalid value for Config Drive option: %(option)s" msgstr "非法的Config Drive值: %(option)s" #, python-format msgid "Invalid virtual interface address %s in request" msgstr "请求中无效虚拟接口地址%s" #, python-format msgid "Invalid volume access mode: %(access_mode)s" msgstr "无效的卷访问模式: %(access_mode)s" #, python-format msgid "Invalid volume: %(reason)s" msgstr "卷无效: %(reason)s" msgid "Invalid volume_size." msgstr "无效volume_size." #, python-format msgid "Ironic node uuid not supplied to driver for instance %s." msgstr "Ironic节点uuid不提供实例 %s的驱动。" #, python-format msgid "" "It is not allowed to create an interface on external network %(network_uuid)s" msgstr "在外部网络%(network_uuid)s创建一个接口是不允许的" msgid "" "Key Names can only contain alphanumeric characters, periods, dashes, " "underscores, colons and spaces." msgstr "关键字名字只能包含数字字母,句号,破折号,下划线,冒号和空格。" #, python-format msgid "Key manager error: %(reason)s" msgstr "关键管理者错误:%(reason)s" #, python-format msgid "Key pair '%(key_name)s' already exists." msgstr "密钥对 %(key_name)s 已经存在" #, python-format msgid "Keypair %(name)s not found for user %(user_id)s" msgstr "密钥对 %(name)s 没有为用户 %(user_id)s 找到。" #, python-format msgid "Keypair data is invalid: %(reason)s" msgstr "密钥对数据不合法: %(reason)s" msgid "Limits only supported from vCenter 6.0 and above" msgstr "仅 vCenter 6.0 及以上版本支持限制" #, python-format msgid "Live migration %(id)s for server %(uuid)s is not in progress." msgstr "对应服务器 %(uuid)s 的实时迁移 %(id)s 未在进行。" #, python-format msgid "Malformed message body: %(reason)s" msgstr "错误格式的消息体: %(reason)s" #, python-format msgid "" "Malformed request URL: URL's project_id '%(project_id)s' doesn't match " "Context's project_id '%(context_project_id)s'" msgstr "" "请求 URL 的格式不正确:URL 的 project_id“%(project_id)s”与上下文的 " "project_id“%(context_project_id)s”不匹配" msgid "Malformed request body" msgstr "错误格式的请求主体" msgid "Mapping image to local is not supported." msgstr "不支持映射镜像到本地。" #, python-format msgid "Marker %(marker)s could not be found." msgstr "找不到标记符 %(marker)s。" msgid "Maximum number of floating IPs exceeded" msgstr "已超过最大浮动 IP 数" #, python-format msgid "Maximum number of metadata items exceeds %(allowed)d" msgstr "最大元数据项数超过 %(allowed)d" msgid "Maximum number of ports exceeded" msgstr "超过端口的最大数" msgid "Maximum number of security groups or rules exceeded" msgstr "已超过最大安全组数或最大规则数" msgid "Metadata item was not found" msgstr "元数据项目未找到" msgid "Metadata property key greater than 255 characters" msgstr "元数据属性关键字超过 255 个字符" msgid "Metadata property value greater than 255 characters" msgstr "元数据属性值超过 255 个字符" msgid "Metadata type should be dict." msgstr "元数据类型必须为词典。" #, python-format msgid "" "Metric %(name)s could not be found on the compute host node %(host)s." "%(node)s." msgstr "在计算主机节点上%(host)s.%(node)s,测量%(name)s没有找到。" msgid "Migration" msgstr "迁移" #, python-format msgid "Migration %(id)s for server %(uuid)s is not live-migration." msgstr "对应服务器 %(uuid)s 的迁移 %(id)s 并非实时迁移。" #, python-format msgid "Migration %(migration_id)s could not be found." msgstr "迁移 %(migration_id)s 没有找到。" #, python-format msgid "Migration %(migration_id)s not found for instance %(instance_id)s" msgstr "找不到对应实例 %(instance_id)s 的迁移 %(migration_id)s" #, python-format msgid "" "Migration %(migration_id)s state of instance %(instance_uuid)s is %(state)s. " "Cannot %(method)s while the migration is in this state." msgstr "" "实例 %(instance_uuid)s 的迁移 %(migration_id)s 状态为 %(state)s。迁移处于此状" "态时,无法执行 %(method)s。" #, python-format msgid "Migration error: %(reason)s" msgstr "迁移错误:%(reason)s" msgid "Migration is not supported for LVM backed instances" msgstr "对于LVM作为后台的实例,不支持迁移" #, python-format msgid "" "Migration not found for instance %(instance_id)s with status %(status)s." msgstr "没有为实例 %(instance_id)s 找到迁移其状态为 %(status)s 。" #, python-format msgid "Migration pre-check error: %(reason)s" msgstr "迁移预检查错误:%(reason)s" #, python-format msgid "Migration select destinations error: %(reason)s" msgstr "迁移选择目标错误:%(reason)s" #, python-format msgid "Missing arguments: %s" msgstr "缺少参数: %s" msgid "Missing device UUID." msgstr "缺少设备UUID" msgid "Missing disabled reason field" msgstr "缺少禁用的原因字段" msgid "Missing forced_down field" msgstr "缺少forced_down字段" msgid "Missing imageRef attribute" msgstr "缺少属性imageRef" #, python-format msgid "Missing keys: %s" msgstr "缺少键:%s" msgid "Missing parameter dict" msgstr "缺少参数 dict" #, python-format msgid "Missing vlan number in %s" msgstr " %s 中缺少 vlan id" #, python-format msgid "" "More than one instance is associated with fixed IP address '%(address)s'." msgstr "多个关联实例与固定 IP 地址“%(address)s”相关联。" msgid "" "More than one possible network found. Specify network ID(s) to select which " "one(s) to connect to." msgstr "发现不止一个可能的网络。指定网络ID来选择连接到那个网络。" msgid "More than one swap drive requested." msgstr "请求了多于一个交换驱动" #, python-format msgid "Multi-boot operating system found in %s" msgstr "在 %s 中找到多重引导操作系统" msgid "Multiple X-Instance-ID headers found within request." msgstr "在请求内找到多个 X-Instance-ID 头。" msgid "Multiple X-Tenant-ID headers found within request." msgstr "在请求内找到多个 X-Tenant-ID 。" #, python-format msgid "Multiple floating IP pools matches found for name '%s'" msgstr "对于名称“%s”,找到多个浮动 IP 池匹配项" #, python-format msgid "Multiple floating IPs are found for address %(address)s." msgstr "发现对应地址 %(address)s 的多个浮动 IP。" msgid "" "Multiple hosts may be managed by the VMWare vCenter driver; therefore we do " "not return uptime for just one host." msgstr "" " VMWare vCenter驱动可能管理者多个主机;然而,我们因为只有一台主机,没有及时反" "馈。" msgid "Multiple possible networks found, use a Network ID to be more specific." msgstr "发现多个可能的网络,用Network ID会更加明确。" #, python-format msgid "" "Multiple security groups found matching '%s'. Use an ID to be more specific." msgstr "找到多个与“%s”匹配的安全组。请使用标识以更具体地进行查找。" msgid "Must input network_id when request IP address" msgstr "请求IP地址,必须输入network_id" msgid "Must not input both network_id and port_id" msgstr "network_id和port_id必须同时输入" msgid "" "Must specify host_ip, host_username and host_password to use vmwareapi." "VMwareVCDriver" msgstr "" "使用vmwareapi.VMwareVCDriver必须指定host_ip, host_username 和 host_password" msgid "Must supply a positive value for max_number" msgstr "必须对 max_number 提供正值" msgid "Must supply a positive value for max_rows" msgstr "必须为最大行max_rows提供一个正数" msgid "Name" msgstr "名称" #, python-format msgid "Network %(network_id)s could not be found." msgstr "网络 %(network_id)s 没有找到。" #, python-format msgid "" "Network %(network_uuid)s requires a subnet in order to boot instances on." msgstr "网络%(network_uuid)s需要一个子网,以便启动实例。" #, python-format msgid "Network could not be found for bridge %(bridge)s" msgstr "无法为桥 %(bridge)s 找到网络" #, python-format msgid "Network could not be found for instance %(instance_id)s." msgstr "无法为实例 %(instance_id)s 找到网络。" msgid "Network not found" msgstr "没有找到网络" msgid "" "Network requires port_security_enabled and subnet associated in order to " "apply security groups." msgstr "网络必须启用端口安全特性,并且需要关联子网,以便应用安全组。" msgid "New volume must be detached in order to swap." msgstr "为了进行交换,新卷必须断开。" msgid "New volume must be the same size or larger." msgstr "新卷必须大小相同或者更大。" #, python-format msgid "No Block Device Mapping with id %(id)s." msgstr "没有块设备与ID %(id)s进行映射。" msgid "No Unique Match Found." msgstr "找不到任何唯一匹配项。" msgid "No compute host specified" msgstr "未指定计算宿主机" #, python-format msgid "No configuration information found for operating system %(os_name)s" msgstr "找不到对应操作系统 %(os_name)s 的配置信息" #, python-format msgid "No device with MAC address %s exists on the VM" msgstr "在VM上面没有MAC 地址 %s 的设备" #, python-format msgid "No device with interface-id %s exists on VM" msgstr "在VM上面没有 interface-id %s 的设备" #, python-format msgid "No disk at %(location)s" msgstr "在 %(location)s 没有磁盘" #, python-format msgid "No fixed IP addresses available for network: %(net)s" msgstr "没有可用的固定IP给网络:%(net)s" msgid "No fixed IPs associated to instance" msgstr "没有固定 IP 与实例相关联" msgid "No free nbd devices" msgstr "没有空闲NBD设备" msgid "No host available on cluster" msgstr "没有可用的主机在集群中" msgid "No hosts found to map to cell, exiting." msgstr "找不到要映射至单元的主机,正在退出。" #, python-format msgid "No hypervisor matching '%s' could be found." msgstr "找不到任何与“%s”匹配的管理程序。" msgid "No image locations are accessible" msgstr "没有镜像位置可以访问" #, python-format msgid "" "No live migration URI configured and no default available for " "\"%(virt_type)s\" hypervisor virtualization type." msgstr "" "未配置任何实时迁移 URI,并且未提供对应 “%(virt_type)s” hypervisor 虚拟化类型" "的缺省值。" msgid "No more floating IPs available." msgstr "没有其他可用浮动 IP。" #, python-format msgid "No more floating IPs in pool %s." msgstr "池 %s 中没有其他浮动 IP。" #, python-format msgid "No mount points found in %(root)s of %(image)s" msgstr "在%(image)s的%(root)s没有找到挂载点" #, python-format msgid "No operating system found in %s" msgstr "在 %s 中找不到任何操作系统" msgid "No root disk defined." msgstr "没有定义根磁盘。" #, python-format msgid "" "No specific network was requested and none are available for project " "'%(project_id)s'." msgstr "未请求特定网络,项目“%(project_id)s”没有可用网络。" msgid "No valid host found for cold migrate" msgstr "冷迁移过程中发现无效主机" msgid "No valid host found for resize" msgstr "重新配置过程中没有发现有效主机" #, python-format msgid "No valid host was found. %(reason)s" msgstr "找不到有效主机,原因是 %(reason)s。" #, python-format msgid "No volume Block Device Mapping at path: %(path)s" msgstr "没有卷块设备映射到以下路径:%(path)s" #, python-format msgid "No volume Block Device Mapping with id %(volume_id)s." msgstr "没有卷块设备与ID %(volume_id)s进行映射。" #, python-format msgid "Node %s could not be found." msgstr "找不到节点%s。" #, python-format msgid "Not able to acquire a free port for %(host)s" msgstr "无法为 %(host)s 获取可用端口" #, python-format msgid "Not able to bind %(host)s:%(port)d, %(error)s" msgstr "无法绑定 %(host)s:%(port)d,%(error)s" #, python-format msgid "" "Not all Virtual Functions of PF %(compute_node_id)s:%(address)s are free." msgstr "并非 PF %(compute_node_id)s:%(address)s 的所有虚拟功能都可用。" msgid "Not an rbd snapshot" msgstr "不是 rbd 快照" #, python-format msgid "Not authorized for image %(image_id)s." msgstr "未针对映像 %(image_id)s 授权。" msgid "Not authorized." msgstr "未授权。" msgid "Not enough parameters to build a valid rule." msgstr "参数不够创建有效规则。" msgid "Not stored in rbd" msgstr "未存储在 rbd 中" msgid "Nothing was archived." msgstr "未归档任何内容。" #, python-format msgid "Nova does not support Cinder API version %(version)s" msgstr "Nova 不支持的 Cinder API 版本 %(version)s" #, python-format msgid "Nova requires QEMU version %s or greater." msgstr "Nova 需要 %s 或更高版本的 QEMU." #, python-format msgid "Nova requires Virtuozzo version %s or greater." msgstr "Nova 需要 %s 或更高版本的 Virtuozzo." #, python-format msgid "Nova requires libvirt version %s or greater." msgstr "Nova请求的 libvirt 版本%s 或更高。" msgid "Number of Rows Archived" msgstr "已归档行数" #, python-format msgid "Object action %(action)s failed because: %(reason)s" msgstr "由于 %(reason)s,对象操作 %(action)s 失败" msgid "Old volume is attached to a different instance." msgstr "旧卷绑定到一个不同的实例。" #, python-format msgid "One or more hosts already in availability zone(s) %s" msgstr "在可用区域%s中,已经有一个或多个主机" #, python-format msgid "Only administrators can sort servers by %s" msgstr "只有管理员可以基于 %s 对服务器进行排序" msgid "Only administrators may list deleted instances" msgstr "仅管理员可列示已删除的实例" msgid "Origin header does not match this host." msgstr "源头与这台主机不匹配。" msgid "Origin header not valid." msgstr "源头无效。" msgid "Origin header protocol does not match this host." msgstr "源头协议与这台不匹配主机。" #, python-format msgid "PCI Device %(node_id)s:%(address)s not found." msgstr "未找到PCI设备%(node_id)s:%(address)s。" #, python-format msgid "PCI alias %(alias)s is not defined" msgstr "PCI别名%(alias)s没有定义" #, python-format msgid "" "PCI device %(compute_node_id)s:%(address)s is %(status)s instead of " "%(hopestatus)s" msgstr "" "PCI设备%(compute_node_id)s:%(address)s是%(status)s,而不是%(hopestatus)s" #, python-format msgid "" "PCI device %(compute_node_id)s:%(address)s is owned by %(owner)s instead of " "%(hopeowner)s" msgstr "" "PCI设备%(compute_node_id)s:%(address)s归%(owner)s所有,而不是%(hopeowner)s" #, python-format msgid "PCI device %(id)s not found" msgstr "无法找到PCI设备 %(id)s" #, python-format msgid "PCI device request %(requests)s failed" msgstr "PCI 设备请求 %(requests)s 失败" #, python-format msgid "Page size %(pagesize)s forbidden against '%(against)s'" msgstr "已对照“%(against)s”禁用页大小 %(pagesize)s" #, python-format msgid "Page size %(pagesize)s is not supported by the host." msgstr "主机不支持页大小 %(pagesize)s。" #, python-format msgid "" "Parameters %(missing_params)s not present in vif_details for vif %(vif_id)s. " "Check your Neutron configuration to validate that the macvtap parameters are " "correct." msgstr "" "在vif %(vif_id)s的vif_details详情中不存在参数%(missing_params)s。检查您的网络" "配置来验证macvtap参数是正确的。" #, python-format msgid "Path %s must be LVM logical volume" msgstr "路径 %s 必须为 LVM 逻辑卷" msgid "Paused" msgstr "已暂停" msgid "Personality file limit exceeded" msgstr "超过个性化文件限制" #, python-format msgid "" "Physical Function %(compute_node_id)s:%(address)s, related to VF " "%(compute_node_id)s:%(vf_address)s is %(status)s instead of %(hopestatus)s" msgstr "" "与 VF %(compute_node_id)s:%(vf_address)s 相关的物理功能 %(compute_node_id)s:" "%(address)s 为 %(status)s 而不是 %(hopestatus)s" #, python-format msgid "Physical network is missing for network %(network_uuid)s" msgstr "网络%(network_uuid)s的物理网络丢失" #, python-format msgid "Policy doesn't allow %(action)s to be performed." msgstr "政策不允许 %(action)s 被执行。" #, python-format msgid "Port %(port_id)s is still in use." msgstr "端口 %(port_id)s 仍在使用中。" #, python-format msgid "Port %(port_id)s not usable for instance %(instance)s." msgstr "对于实例 %(instance)s,端口 %(port_id)s 不可用。" #, python-format msgid "" "Port %(port_id)s not usable for instance %(instance)s. Value %(value)s " "assigned to dns_name attribute does not match instance's hostname " "%(hostname)s" msgstr "" "端口 %(port_id)s 对实例 %(instance)s 不可用。分配给 dns_name 属性的值 " "%(value)s 与实例的主机名 %(hostname)s 不匹配" #, python-format msgid "Port %(port_id)s requires a FixedIP in order to be used." msgstr "端口%(port_id)s需要有一个固定IP才可以使用。" #, python-format msgid "Port %s is not attached" msgstr "端口%s没有连接" #, python-format msgid "Port id %(port_id)s could not be found." msgstr "找不到端口标识 %(port_id)s。" #, python-format msgid "Port update failed for port %(port_id)s: %(reason)s" msgstr "端口 %(port_id)s 更新失败: %(reason)s" #, python-format msgid "Provided video model (%(model)s) is not supported." msgstr "不支持提供视频模型(%(model)s)。" #, python-format msgid "Provided watchdog action (%(action)s) is not supported." msgstr "不支持提供的看守程序操作(%(action)s)。" msgid "QEMU guest agent is not enabled" msgstr "QEMU客户端代理未启动" #, python-format msgid "Quiescing is not supported in instance %(instance_id)s" msgstr "实例%(instance_id)s不支持停顿" #, python-format msgid "Quota class %(class_name)s could not be found." msgstr "找不到配额类 %(class_name)s。" msgid "Quota could not be found" msgstr "配额没有找到。" #, python-format msgid "" "Quota exceeded for %(overs)s: Requested %(req)s, but already used %(used)s " "of %(allowed)s %(overs)s" msgstr "" "对于%(overs)s配额已超过:请求%(req)s,但是已经使用 %(allowed)s %(overs)s的 " "%(used)s" #, python-format msgid "Quota exceeded for resources: %(overs)s" msgstr "对于资源,已超过配额:%(overs)s" msgid "Quota exceeded, too many key pairs." msgstr "已超过配额,密钥对太多。" msgid "Quota exceeded, too many server groups." msgstr "已超过配额,服务器组太多。" msgid "Quota exceeded, too many servers in group" msgstr "已超过配额,组中太多的服务器" #, python-format msgid "Quota exists for project %(project_id)s, resource %(resource)s" msgstr "项目 %(project_id)s 资源 %(resource)s 的配额存在。" #, python-format msgid "Quota for project %(project_id)s could not be found." msgstr "没有为项目 %(project_id)s 找到配额。" #, python-format msgid "" "Quota for user %(user_id)s in project %(project_id)s could not be found." msgstr "无法在项目 %(project_id)s 中为用户 %(user_id)s 找到配额。" #, python-format msgid "" "Quota limit %(limit)s for %(resource)s must be greater than or equal to " "already used and reserved %(minimum)s." msgstr "" "资源%(resource)s的配额限制%(limit)s 必须大于或等于已用和保留值%(minimum)s。" #, python-format msgid "" "Quota limit %(limit)s for %(resource)s must be less than or equal to " "%(maximum)s." msgstr "资源%(resource)s的配额限制%(limit)s 必须少于或等于%(maximum)s。" msgid "Request body and URI mismatch" msgstr "请求主体和URI不匹配" msgid "Request is too large." msgstr "请求太大。" #, python-format msgid "Request of image %(image_id)s got BadRequest response: %(response)s" msgstr "镜像 %(image_id)s 的请求收到无效请求响应:%(response)s" #, python-format msgid "RequestSpec not found for instance %(instance_uuid)s" msgstr "找不到对应实例 %(instance_uuid)s 的 RequestSpec" msgid "Requested CPU control policy not supported by host" msgstr "主机不支持所请求 CPU 控制策略" #, python-format msgid "" "Requested hardware '%(model)s' is not supported by the '%(virt)s' virt driver" msgstr "“%(virt)s”虚拟驱动程序不支持所请求硬件“%(model)s”" #, python-format msgid "Requested image %(image)s has automatic disk resize disabled." msgstr "请求的镜像%(image)s禁用磁盘自动调整大小功能。" msgid "" "Requested instance NUMA topology cannot fit the given host NUMA topology" msgstr "请求实例的NUMA拓扑不能满足给定主机的NUMA拓扑" msgid "" "Requested instance NUMA topology together with requested PCI devices cannot " "fit the given host NUMA topology" msgstr "与在PCI设备请求一起的NUMA拓扑请求不能满足给定主机的NUMA拓扑" #, python-format msgid "" "Requested vCPU limits %(sockets)d:%(cores)d:%(threads)d are impossible to " "satisfy for vcpus count %(vcpus)d" msgstr "" "请求的 vCPU 限制 %(sockets)d:%(cores)d:%(threads)d 无法满足vcpus 计数 " "%(vcpus)d" #, python-format msgid "Rescue device does not exist for instance %s" msgstr "对于实例%s,营救设备不存在" #, python-format msgid "Resize error: %(reason)s" msgstr "发生调整大小错误:%(reason)s" msgid "Resize to zero disk flavor is not allowed." msgstr "不允许将云主机类型的磁盘大小缩减为零。" msgid "Resource could not be found." msgstr "资源没有找到。" msgid "Resumed" msgstr "已恢复" #, python-format msgid "Root element name should be '%(name)s' not '%(tag)s'" msgstr "根元素名应该是 '%(name)s' 而不是'%(tag)s'" #, python-format msgid "Running batches of %i until complete" msgstr "运行 %i 的批处理直到完成" #, python-format msgid "Scheduler Host Filter %(filter_name)s could not be found." msgstr "调度器主机过滤器 %(filter_name)s 没有找到。" #, python-format msgid "Security group %(name)s is not found for project %(project)s" msgstr "在项目%(project)s没有找到安全组%(name)s" #, python-format msgid "" "Security group %(security_group_id)s not found for project %(project_id)s." msgstr "没有找到安全组 %(security_group_id)s 针对项目 %(project_id)s 。" #, python-format msgid "Security group %(security_group_id)s not found." msgstr "安全组 %(security_group_id)s 没有找到。" #, python-format msgid "" "Security group %(security_group_name)s already exists for project " "%(project_id)s." msgstr "对于项目 %(project_id)s,安全组 %(security_group_name)s 已经存在。" #, python-format msgid "" "Security group %(security_group_name)s not associated with the instance " "%(instance)s" msgstr "安全组%(security_group_name)s 没有与实例%(instance)s 绑定" msgid "Security group id should be uuid" msgstr "安全组标识应该为 uuid" msgid "Security group name cannot be empty" msgstr "安全组名称不能是空" msgid "Security group not specified" msgstr "没有指定安全组" #, python-format msgid "Server %(server_id)s has no tag '%(tag)s'" msgstr "服务器%(server_id)s上未发现标签'%(tag)s'" #, python-format msgid "Server disk was unable to be resized because: %(reason)s" msgstr "由于: %(reason)s,实例磁盘空间不能修改" msgid "Server does not exist" msgstr "服务器不存在" #, python-format msgid "ServerGroup policy is not supported: %(reason)s" msgstr "服务器组策略不支持: %(reason)s" msgid "ServerGroupAffinityFilter not configured" msgstr "没有配置ServerGroupAffinityFilter" msgid "ServerGroupAntiAffinityFilter not configured" msgstr "没有配置ServerGroupAntiAffinityFilter" msgid "ServerGroupSoftAffinityWeigher not configured" msgstr "未配置 ServerGroupSoftAffinityWeigher" msgid "ServerGroupSoftAntiAffinityWeigher not configured" msgstr "未配置 ServerGroupSoftAntiAffinityWeigher" #, python-format msgid "Service %(service_id)s could not be found." msgstr "服务 %(service_id)s 没有找到。" #, python-format msgid "Service %s not found." msgstr "服务%s没有找到。" msgid "Service is unavailable at this time." msgstr "此时的付不可用。" #, python-format msgid "Service with host %(host)s binary %(binary)s exists." msgstr "主机 %(host)s 二进制 %(binary)s 的服务已存在。" #, python-format msgid "Service with host %(host)s topic %(topic)s exists." msgstr "主机 %(host)s 主题 %(topic)s 的服务已存在。" msgid "Set admin password is not supported" msgstr "设置管理员密码不支持" msgid "Set the cell name." msgstr "设置 cell 名称" #, python-format msgid "Share '%s' is not supported" msgstr "不支持共享 '%s' " #, python-format msgid "Share level '%s' cannot have share configured" msgstr "共享级别 '%s' 不用共享配置" #, python-format msgid "Snapshot %(snapshot_id)s could not be found." msgstr "快照 %(snapshot_id)s 没有找到。" msgid "Some required fields are missing" msgstr "有些必填字段没有填写" #, python-format msgid "" "Something went wrong when deleting a volume snapshot: rebasing a " "%(protocol)s network disk using qemu-img has not been fully tested" msgstr "" "删除卷快照时出错:使用 qemu-img 对 %(protocol)s 网络磁盘重建基础的操作未经全" "面测试" msgid "Sort direction size exceeds sort key size" msgstr "分类大小超过分类关键值大小" msgid "Sort key supplied was not valid." msgstr "提供的排序键无效。" msgid "Specified fixed address not assigned to instance" msgstr "指定的固定IP地址没有分配给实例" msgid "Started" msgstr "已开始" msgid "Stopped" msgstr "已停止" #, python-format msgid "Storage error: %(reason)s" msgstr "存储错误:%(reason)s" #, python-format msgid "Storage policy %s did not match any datastores" msgstr "存储策略 %s 不匹配任何存储" msgid "Success" msgstr "成功" msgid "Suspended" msgstr "已挂起" msgid "Swap drive requested is larger than instance type allows." msgstr "交换驱动器的请求大于实例类型允许。" msgid "Table" msgstr "表" #, python-format msgid "Task %(task_name)s is already running on host %(host)s" msgstr "任务 %(task_name)s 已在主机 %(host)s 上运行" #, python-format msgid "Task %(task_name)s is not running on host %(host)s" msgstr "任务 %(task_name)s 未在主机 %(host)s 上运行" #, python-format msgid "The PCI address %(address)s has an incorrect format." msgstr "PCI地址%(address)s格式不正确。" #, python-format msgid "The console port range %(min_port)d-%(max_port)d is exhausted." msgstr "控制台端口范围%(min_port)d-%(max_port)d已经耗尽。" msgid "The created instance's disk would be too small." msgstr "将创建的实例磁盘太小。" msgid "The current driver does not support preserving ephemeral partitions." msgstr "当前驱动不支持保存瞬时分区。" msgid "The default PBM policy doesn't exist on the backend." msgstr "在后台,缺省的PBM策略不存在。" msgid "The floating IP request failed with a BadRequest" msgstr "由于坏的请求,浮动IP请求失败" msgid "" "The instance requires a newer hypervisor version than has been provided." msgstr "该实例需要比当前版本更新的虚拟机管理程序。" #, python-format msgid "The number of defined ports: %(ports)d is over the limit: %(quota)d" msgstr "定义的端口数量:%(ports)d 超过限制:%(quota)d" #, python-format msgid "The number of tags exceeded the per-server limit %d" msgstr "标签数量超过了保护限制,上限数量为%d" #, python-format msgid "The provided RNG device path: (%(path)s) is not present on the host." msgstr "主机上不存在提供的RNG设备路径:(%(path)s)。" msgid "The request is invalid." msgstr "请求无效。" #, python-format msgid "" "The requested amount of video memory %(req_vram)d is higher than the maximum " "allowed by flavor %(max_vram)d." msgstr "申请的显存数 %(req_vram)d 超过了云主机类型允许的最大值 %(max_vram)d 。" msgid "The requested availability zone is not available" msgstr "请求的可用域是不可用的!" msgid "The requested functionality is not supported." msgstr "不支持请求的功能" #, python-format msgid "The specified cluster '%s' was not found in vCenter" msgstr "在vCenter中没有找到指定的集群'%s'" #, python-format msgid "The supplied device path (%(path)s) is in use." msgstr "提供的设备路径 (%(path)s) 正在使用中。" #, python-format msgid "The supplied device path (%(path)s) is invalid." msgstr "提供的设备路径 (%(path)s) 是无效的。" #, python-format msgid "" "The supplied disk path (%(path)s) already exists, it is expected not to " "exist." msgstr "提供的磁盘路径 (%(path)s) 已经存在,预计是不存在的。" msgid "The supplied hypervisor type of is invalid." msgstr "提供的虚拟机管理程序类型无效。" msgid "The target host can't be the same one." msgstr "目标主机不能是当前主机。" #, python-format msgid "The token '%(token)s' is invalid or has expired" msgstr "令牌“%(token)s”无效或已到期" msgid "The uuid of the cell to delete." msgstr "要删除的 cell 的 uuid" #, python-format msgid "" "The volume cannot be assigned the same device name as the root device %s" msgstr "卷不能分配不能用与root设备%s一样的设备名称" msgid "There are not enough hosts available." msgstr "没有足够的主机可用。" #, python-format msgid "There is no such action: %s" msgstr "没有该动作:%s" #, python-format msgid "" "This compute node's hypervisor is older than the minimum supported version: " "%(version)s." msgstr "此计算节点的 hypervisor 的版本低于最低受支持版本:%(version)s。" msgid "" "This method needs to be called with either networks=None and port_ids=None " "or port_ids and networks as not none." msgstr "" "调用此方法时,需要以下设置:将 networks 和 port_ids 指定为 None,或者将 " "networks 和 port_ids 指定为非 None。" #, python-format msgid "This rule already exists in group %s" msgstr "这条规则已经存在于组%s 中" #, python-format msgid "" "This service is older (v%(thisver)i) than the minimum (v%(minver)i) version " "of the rest of the deployment. Unable to continue." msgstr "" "此服务的版本 (v%(thisver)i) 低于部署的余下部分的最低版本 (v%(minver)i)。无法" "继续。" msgid "Timeout waiting for response from cell" msgstr "等待来自单元的响应时发生超时" #, python-format msgid "Timeout while checking if we can live migrate to host: %s" msgstr "当检查我们是否可以在线迁移到主机%s时,超时。" msgid "To and From ports must be integers" msgstr "目的和源端口必须是整数" msgid "Token not found" msgstr "找不到令牌" msgid "Triggering crash dump is not supported" msgstr "触发崩溃转储不受支持" msgid "Type and Code must be integers for ICMP protocol type" msgstr "类型和编码必须是ICMP协议类型" msgid "UEFI is not supported" msgstr "UEFI 不受支持" msgid "UUID" msgstr "UUID" #, python-format msgid "" "Unable to associate floating IP %(address)s to any fixed IPs for instance " "%(id)s. Instance has no fixed IPv4 addresses to associate." msgstr "" "无法将浮动 IP %(address)s 关联至实例 %(id)s 的任何固定 IP。实例没有要关联的固" "定 IPv4 地址。" #, python-format msgid "" "Unable to associate floating IP %(address)s to fixed IP %(fixed_address)s " "for instance %(id)s. Error: %(error)s" msgstr "" "无法将浮动 IP %(address)s 关联至实例 %(id)s 的任何固定 IP %(fixed_address)s。" "错误:%(error)s" #, python-format msgid "Unable to automatically allocate a network for project %(project_id)s" msgstr "无法为项目 %(project_id)s 自动分配网络" #, python-format msgid "Unable to convert image to %(format)s: %(exp)s" msgstr "无法将镜像转换为 %(format)s:%(exp)s" #, python-format msgid "Unable to convert image to raw: %(exp)s" msgstr "无法将镜像转换为原始格式:%(exp)s" #, python-format msgid "Unable to determine disk bus for '%s'" msgstr "无法为“%s”确定磁盘总线" #, python-format msgid "Unable to determine disk prefix for %s" msgstr "无法为 %s 确定磁盘前缀" #, python-format msgid "Unable to find host for Instance %s" msgstr "无法找到实例 %s 的宿主机" msgid "Unable to find iSCSI Target" msgstr "找不到 iSCSI 目标" msgid "Unable to find volume" msgstr "找不到卷" msgid "Unable to get host UUID: /etc/machine-id does not exist" msgstr "不能获取主机UUID:/etc/machine-id不存在" msgid "Unable to get host UUID: /etc/machine-id is empty" msgstr "不能获取主机UUID:/etc/machine-id 为空" msgid "" "Unable to launch multiple instances with a single configured port ID. Please " "launch your instance one by one with different ports." msgstr "" "无法启动多个实例,这些实例配置使用同一个端口ID。请一个一个启动您的实例,并且" "实例间使用不同的端口。" #, python-format msgid "" "Unable to migrate %(instance_uuid)s to %(dest)s: Lack of memory(host:" "%(avail)s <= instance:%(mem_inst)s)" msgstr "" "无法将 %(instance_uuid)s 迁移至 %(dest)s:内存不足(主机:%(avail)s <= 实例:" "%(mem_inst)s)" #, python-format msgid "" "Unable to migrate %(instance_uuid)s: Disk of instance is too large(available " "on destination host:%(available)s < need:%(necessary)s)" msgstr "" "无法迁移 %(instance_uuid)s:实例的磁盘太大(在目标主机上可用的:" "%(available)s < 需要:%(necessary)s)" #, python-format msgid "" "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." msgstr "无法把实例 (%(instance_id)s) 迁移到当前主机 (%(host)s)。" msgid "Unable to resize disk down." msgstr "不能向下调整磁盘。" msgid "Unable to set password on instance" msgstr "无法对实例设置密码" msgid "Unable to shrink disk." msgstr "不能压缩磁盘。" #, python-format msgid "Unacceptable CPU info: %(reason)s" msgstr "CPU信息不能被接受:%(reason)s。" msgid "Unacceptable parameters." msgstr "无法接受的参数。" #, python-format msgid "Unavailable console type %(console_type)s." msgstr "不可用的控制台类型 %(console_type)s." msgid "" "Undefined Block Device Mapping root: BlockDeviceMappingList contains Block " "Device Mappings from multiple instances." msgstr "" "未定义的块设备映射根:BlockDeviceMappingList 包含来自多个实例的块设备映射。" #, python-format msgid "Unexpected aggregate action %s" msgstr "未预期的聚合动作%s" msgid "Unexpected type adding stats" msgstr "类型添加统计发生意外" #, python-format msgid "Unexpected vif_type=%s" msgstr "存在意外 vif_type=%s" msgid "Unknown" msgstr "未知" msgid "Unknown action" msgstr "未知操作" #, python-format msgid "Unknown auth type: %s" msgstr "未知认证类型:%s" #, python-format msgid "Unknown config drive format %(format)s. Select one of iso9660 or vfat." msgstr "配置驱动器格式 %(format)s 未知。请选择下列其中一项:iso9660 或 vfat。" #, python-format msgid "Unknown delete_info type %s" msgstr "未知delete_info类型%s" #, python-format msgid "Unknown image_type=%s" msgstr "image_type=%s 未知" #, python-format msgid "Unknown quota resources %(unknown)s." msgstr "配额资源 %(unknown)s 未知。" msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "排序方向未知,必须为“降序”或“升序”" #, python-format msgid "Unknown type: %s" msgstr "未知类型:%s" msgid "Unrecognized legacy format." msgstr "不能识别的格式。" #, python-format msgid "Unrecognized read_deleted value '%s'" msgstr "无法识别的 read_deleted 取值”%s“" #, python-format msgid "Unrecognized value '%s' for CONF.running_deleted_instance_action" msgstr "对于CONF.running_deleted_instance_action,无法识别的值 '%s'" #, python-format msgid "Unshelve attempted but the image %s cannot be found." msgstr "试图取消废弃但是镜像%s没有找到。" msgid "Unsupported Content-Type" msgstr "不支持的Content-Type" #, python-format msgid "User %(username)s not found in password file." msgstr "在密码文件中找不到用户 %(username)s。" #, python-format msgid "User %(username)s not found in shadow file." msgstr "在影子文件中找不到用户 %(username)s。" msgid "User data needs to be valid base 64." msgstr "用户数据需要是有效的基本 64 位。" msgid "User does not have admin privileges" msgstr "用户没有管理员权限" msgid "" "Using different block_device_mapping syntaxes is not allowed in the same " "request." msgstr "在同一个请求中,不允许使用不同的 block_device_mapping语法。" #, python-format msgid "" "Version %(req_ver)s is not supported by the API. Minimum is %(min_ver)s and " "maximum is %(max_ver)s." msgstr "" "API不支持版本%(req_ver)s。小版本号是%(min_ver)s,大版本号是%(max_ver)s。" msgid "Virtual Interface creation failed" msgstr "虚拟接口创建失败" msgid "Virtual interface plugin failed" msgstr "虚拟接口插件失效" #, python-format msgid "Virtual machine mode '%(vmmode)s' is not recognised" msgstr "虚拟机模式“%(vmmode)s”无法识别" #, python-format msgid "Virtual machine mode '%s' is not valid" msgstr "虚拟机模式 '%s'无效" #, python-format msgid "Virtualization type '%(virt)s' is not supported by this compute driver" msgstr "此计算驱动程序不支持虚拟化类型“%(virt)s”" #, python-format msgid "Volume %(volume_id)s could not be attached. Reason: %(reason)s" msgstr "无法挂载卷 %(volume_id)s。原因:%(reason)s" #, python-format msgid "Volume %(volume_id)s could not be detached. Reason: %(reason)s" msgstr "无法卸载卷 %(volume_id)s 。原因:%(reason)s" #, python-format msgid "Volume %(volume_id)s could not be found." msgstr "卷 %(volume_id)s 没有找到。" #, python-format msgid "" "Volume %(volume_id)s did not finish being created even after we waited " "%(seconds)s seconds or %(attempts)s attempts. And its status is " "%(volume_status)s." msgstr "" "在等待%(seconds)s 秒或%(attempts)s尝试后,卷%(volume_id)s仍然没有创建成功。它" "的状态是%(volume_status)s。" msgid "Volume does not belong to the requested instance." msgstr "卷不属于请求的实例。" #, python-format msgid "" "Volume encryption is not supported for %(volume_type)s volume %(volume_id)s" msgstr "%(volume_type)s 类型的卷 %(volume_id)s不支持卷加密" #, python-format msgid "" "Volume is smaller than the minimum size specified in image metadata. Volume " "size is %(volume_size)i bytes, minimum size is %(image_min_disk)i bytes." msgstr "" "卷比镜像元数据中指定的最小值还小。卷大小是%(volume_size)i字节,最小值" "是%(image_min_disk)i字节。" #, python-format msgid "" "Volume sets block size, but the current libvirt hypervisor '%s' does not " "support custom block size" msgstr "卷设置块大小,但是当前libvirt监测器 '%s'不支持定制化块大小" msgid "When resizing, instances must change flavor!" msgstr "调整大小时,实例必须更换云主机类型!" #, python-format msgid "Wrong quota method %(method)s used on resource %(res)s" msgstr "错误的配额方法%(method)s用在资源%(res)s" msgid "X-Forwarded-For is missing from request." msgstr "请求中缺少 X-Forwarded-For 。" msgid "X-Instance-ID header is missing from request." msgstr "请求中缺少 X-Instance-ID 头。" msgid "X-Instance-ID-Signature header is missing from request." msgstr "请求中缺少 X-Instance-ID-Signature 。" msgid "X-Metadata-Provider is missing from request." msgstr "请求中缺少X-Metadata-Provider。" msgid "X-Tenant-ID header is missing from request." msgstr "请求中缺少 X-Tenant-ID 。" msgid "You are not allowed to delete the image." msgstr "不允许删除该映像。" msgid "" "You are not authorized to access the image the instance was started with." msgstr "您无权访问实例启动的镜像。" msgid "You must implement __call__" msgstr "你必须执行 __call__" msgid "You should specify images_rbd_pool flag to use rbd images." msgstr "为了使用rbd镜像,您应该指定 images_rbd_pool标记。" msgid "You should specify images_volume_group flag to use LVM images." msgstr "为了使用LVM镜像,您应该指定 images_volume_group标记" msgid "Zero floating IPs available." msgstr "没有可用浮动 IP。" msgid "admin password can't be changed on existing disk" msgstr "无法在现有磁盘上更改管理员密码" msgid "cannot understand JSON" msgstr "无法理解JSON" msgid "clone() is not implemented" msgstr "clone()没有实现" msgid "complete" msgstr "完成" #, python-format msgid "connect info: %s" msgstr "连接信息:%s" #, python-format msgid "connecting to: %(host)s:%(port)s" msgstr "连接到:%(host)s:%(port)s" msgid "direct_snapshot() is not implemented" msgstr "未实现 direct_snapshot()" #, python-format msgid "disk type '%s' not supported" msgstr "不支持磁盘类型'%s' " #, python-format msgid "empty project id for instance %s" msgstr "用于实例 %s 的项目标识为空" msgid "error setting admin password" msgstr "设置管理员密码时出错" #, python-format msgid "error: %s" msgstr "错误:%s" #, python-format msgid "failed to generate X509 fingerprint. Error message: %s" msgstr "生成X509指纹失败。错误消息:%s" msgid "failed to generate fingerprint" msgstr "生成fingerprint失败" msgid "filename cannot be None" msgstr "文件名不能为None" msgid "floating IP is already associated" msgstr "已关联浮动 IP" msgid "floating IP not found" msgstr "找不到浮动 IP" #, python-format msgid "fmt=%(fmt)s backed by: %(backing_file)s" msgstr "fmt=%(fmt)s 由 %(backing_file)s 支持" #, python-format msgid "href %s does not contain version" msgstr "href %s 不包含版本" msgid "image already mounted" msgstr "镜像已经挂载" #, python-format msgid "instance %s is not running" msgstr "实例%s没有运行" msgid "instance is a required argument to use @refresh_cache" msgstr "使用 @refresh_cache 时,instance 是必需的自变量" msgid "instance is not in a suspended state" msgstr "实例不在挂起状态" msgid "instance is not powered on" msgstr "实例未启动" msgid "instance is powered off and cannot be suspended." msgstr "已对实例关闭电源,无法暂挂该实例。" #, python-format msgid "instance_id %s could not be found as device id on any ports" msgstr "当设备在任意端口上时,无法找到实例id %s" msgid "is_public must be a boolean" msgstr "is_public 必须为布尔值" msgid "keymgr.fixed_key not defined" msgstr "keymgr.fixed_key 没有定义" msgid "l3driver call to add floating IP failed" msgstr "用于添加浮动 IP 的 l3driver 调用失败" #, python-format msgid "libguestfs installed but not usable (%s)" msgstr "libguestfs安装了,但是不可用(%s)" #, python-format msgid "libguestfs is not installed (%s)" msgstr "libguestfs没有安装 (%s)" #, python-format msgid "marker [%s] not found" msgstr "没有找到标记 [%s]" #, python-format msgid "max rows must be <= %(max_value)d" msgstr "最大行数必须小于或等于 %(max_value)d" msgid "max_count cannot be greater than 1 if an fixed_ip is specified." msgstr "如果指定了一个固定IP,最大数max_count不能大于1。" msgid "min_count must be <= max_count" msgstr "min_count 必须小于或等于 max_count" #, python-format msgid "nbd device %s did not show up" msgstr "nbd 设备 %s 没有出现" msgid "nbd unavailable: module not loaded" msgstr "NBD不可用:模块没有加载" #, python-format msgid "no match found for %s" msgstr "对于%s没有找到匹配的" #, python-format msgid "no usable parent snapshot for volume %s" msgstr "卷 %s 没有可用父快照" #, python-format msgid "no write permission on storage pool %s" msgstr "没有对存储池 %s 的写许可权" #, python-format msgid "not able to execute ssh command: %s" msgstr "不能执行ssh命令:%s" msgid "old style configuration can use only dictionary or memcached backends" msgstr "旧样式配置只能使用字典或 memcached 后端" msgid "operation time out" msgstr "操作超时" #, python-format msgid "partition %s not found" msgstr "找不到分区 %s" #, python-format msgid "partition search unsupported with %s" msgstr "在具有 %s 的情况下,不支持搜索分区" msgid "pause not supported for vmwareapi" msgstr "vmwareapi 不支持暂停" msgid "printable characters with at least one non space character" msgstr "可显示字符,带有至少一个非空格字符" msgid "printable characters. Can not start or end with whitespace." msgstr "可显示字符。不能以空格开头或结尾。" #, python-format msgid "qemu-img failed to execute on %(path)s : %(exp)s" msgstr "对 %(path)s 执行 qemu-img 失败:%(exp)s" #, python-format msgid "qemu-nbd error: %s" msgstr "qemu-nbd 错误:%s" msgid "rbd python libraries not found" msgstr "没有找到rbd pyhon库" #, python-format msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" msgstr "read_deleted 只能是“no”、“yes”或“only”其中一项,而不能是 %r" msgid "serve() can only be called once" msgstr "serve() 只能调用一次" msgid "service is a mandatory argument for DB based ServiceGroup driver" msgstr "service 对于基于数据库的 ServiceGroup 驱动程序是必需的自变量" msgid "service is a mandatory argument for Memcached based ServiceGroup driver" msgstr "service 对于基于 Memcached 的 ServiceGroup 驱动程序是必需的自变量" msgid "set_admin_password is not implemented by this driver or guest instance." msgstr "此驱动程序或 guest 实例未实现 set_admin_password。" #, python-format msgid "snapshot for %s" msgstr "%s 的快照" msgid "snapshot_id required in create_info" msgstr "在create_info中必须有snapshot_id" msgid "stopped" msgstr "已停止" msgid "token not provided" msgstr "令牌没有提供" msgid "too many body keys" msgstr "过多主体密钥" msgid "unpause not supported for vmwareapi" msgstr "vmwareapi 不支持取消暂停" #, python-format msgid "vg %s must be LVM volume group" msgstr "vg %s 必须为 LVM 卷组" #, python-format msgid "vhostuser_sock_path not present in vif_details for vif %(vif_id)s" msgstr "在vif %(vif_id)s 的vif_details详情中不存在vhostuser_sock_path" #, python-format msgid "vif type %s not supported" msgstr "vif 类型 %s 不受支持" msgid "vif_type parameter must be present for this vif_driver implementation" msgstr "对于此 vif_driver 实现,必须存在 vif_type 参数" #, python-format msgid "volume %s already attached" msgstr "卷%s已经绑定" #, python-format msgid "" "volume '%(vol)s' status must be 'in-use'. Currently in '%(status)s' status" msgstr "卷 '%(vol)s' 状态必须是‘使用中‘。当前处于 '%(status)s' 状态" ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315688.893605 nova-32.0.0/nova/locale/zh_TW/0000775000175000017500000000000000000000000016017 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.3736086 nova-32.0.0/nova/locale/zh_TW/LC_MESSAGES/0000775000175000017500000000000000000000000017604 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/locale/zh_TW/LC_MESSAGES/nova.po0000664000175000017500000024724500000000000021125 0ustar00zuulzuul00000000000000# Translations template for nova. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the nova project. # # Translators: # Chao-Hsiung Liao , 2012 # FIRST AUTHOR , 2011 # Pellaeon Lin , 2013 # Pellaeon Lin , 2013 # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: nova VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2025-07-04 18:13+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 06:10+0000\n" "Last-Translator: Copied by Zanata \n" "Language: zh_TW\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: Chinese (Taiwan)\n" #, python-format msgid "%(address)s is not a valid IP v4/6 address." msgstr "%(address)s 不是有效的 IPv4/IPv6 位址。" #, python-format msgid "" "%(binary)s attempted direct database access which is not allowed by policy" msgstr "%(binary)s 嘗試直接存取資料庫,但原則不容許這樣做" #, python-format msgid "%(cidr)s is not a valid IP network." msgstr "%(cidr)s 不是有效的 IP 網路。" #, python-format msgid "%(field)s should not be part of the updates." msgstr "%(field)s 不應是更新的一部分。" #, python-format msgid "%(memsize)d MB of memory assigned, but expected %(memtotal)d MB" msgstr "已指派 %(memsize)d MB 記憶體,但預期為 %(memtotal)d MB" #, python-format msgid "%(path)s is not on local storage: %(reason)s" msgstr "%(path)s 不在本端儲存體上:%(reason)s" #, python-format msgid "%(path)s is not on shared storage: %(reason)s" msgstr "%(path)s 不在共用儲存體上:%(reason)s" #, python-format msgid "%(total)i rows matched query %(meth)s, %(done)i migrated" msgstr "%(total)i 列與查詢 %(meth)s 相符,已移轉 %(done)i" #, python-format msgid "%(type)s hypervisor does not support PCI devices" msgstr "%(type)s Hypervisor 不支援 PCI 裝置" #, python-format msgid "%s does not support disk hotplug." msgstr "%s 不支援磁碟熱插拔。" #, python-format msgid "%s format is not supported" msgstr "不支援 %s 格式" #, python-format msgid "%s is not supported." msgstr "不支援 %s。" #, python-format msgid "%s must be either 'MANUAL' or 'AUTO'." msgstr "%s 必須是 'MANUAL' 或 'AUTO'。" #, python-format msgid "'%(other)s' should be an instance of '%(cls)s'" msgstr "'%(other)s' 應該是 '%(cls)s' 的實例" msgid "'qemu-img info' parsing failed." msgstr "'qemu-img info' 剖析失敗。" #, python-format msgid "'rxtx_factor' argument must be a float between 0 and %g" msgstr "'rxtx_factor' 引數必須是介於 0 和 %g 之間的浮點數" #, python-format msgid "A NetworkModel is required in field %s" msgstr "欄位 %s 需要 NetworkModel" #, python-format msgid "" "API Version String %(version)s is of invalid format. Must be of format " "MajorNum.MinorNum." msgstr "API 版本字串 %(version)s 格式無效。格式必須為 MajorNum.MinorNum。" #, python-format msgid "API version %(version)s is not supported on this method." msgstr "此方法不支援 API %(version)s 版。" msgid "Access list not available for public flavors." msgstr "存取清單不適用於公用特性。" #, python-format msgid "Action %s not found" msgstr "找不到動作 %s" #, python-format msgid "" "Action for request_id %(request_id)s on instance %(instance_uuid)s not found" msgstr "" "在實例 %(instance_uuid)s 上找不到對 request_id %(request_id)s 執行的動作" #, python-format msgid "Action: '%(action)s', calling method: %(meth)s, body: %(body)s" msgstr "動作:'%(action)s',呼叫方法:%(meth)s,主體:%(body)s" #, python-format msgid "Add metadata failed for aggregate %(id)s after %(retries)s retries" msgstr "重試 %(retries)s 次之後,對聚集 %(id)s 新增 meta 資料仍失敗" #, python-format msgid "Aggregate %(aggregate_id)s already has host %(host)s." msgstr "聚集 %(aggregate_id)s 已有主機 %(host)s。" #, python-format msgid "Aggregate %(aggregate_id)s could not be found." msgstr "找不到聚集 %(aggregate_id)s。" #, python-format msgid "Aggregate %(aggregate_id)s has no host %(host)s." msgstr "聚集 %(aggregate_id)s 沒有主機 %(host)s。" #, python-format msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." msgstr "聚集 %(aggregate_id)s 沒有索引鍵為 %(metadata_key)s 的 meta 資料。" #, python-format msgid "Aggregate %(aggregate_name)s already exists." msgstr "聚集 %(aggregate_name)s 已存在。" #, python-format msgid "Aggregate %s does not support empty named availability zone" msgstr "聚集 %s 不支援空白命名的可用區域" #, python-format msgid "An invalid 'name' value was provided. The name must be: %(reason)s" msgstr "所提供的「名稱」值無效。名稱必須是:%(reason)s" msgid "An unknown error has occurred. Please try your request again." msgstr "發生不明錯誤。請重試要求。" msgid "An unknown exception occurred." msgstr "發生一個未知例外" #, python-format msgid "Architecture name '%(arch)s' is not recognised" msgstr "未辨識架構名稱 '%(arch)s'" #, python-format msgid "Architecture name '%s' is not valid" msgstr "架構名稱 '%s' 無效" #, python-format msgid "" "Attempt to consume PCI device %(compute_node_id)s:%(address)s from empty pool" msgstr "嘗試從空儲存區耗用 PCI 裝置 %(compute_node_id)s:%(address)s" msgid "Attempted overwrite of an existing value." msgstr "試圖改寫現有值。" #, python-format msgid "Attribute not supported: %(attr)s" msgstr "不支援屬性:%(attr)s" #, python-format msgid "Bad network format: missing %s" msgstr "錯誤的網路格式:遺漏了 %s" msgid "Bad networks format" msgstr "錯誤的網路格式" #, python-format msgid "Bad networks format: network uuid is not in proper format (%s)" msgstr "錯誤的網路格式:網路 UUID 不是適當的格式 (%s)" #, python-format msgid "Bad prefix for network in cidr %s" msgstr "CIDR %s 中網路的字首錯誤" #, python-format msgid "" "Binding failed for port %(port_id)s, please check neutron logs for more " "information." msgstr "針對埠 %(port_id)s 的連結失敗,請檢查 Neutron 日誌,以取得相關資訊。" msgid "Blank components" msgstr "空白元件" msgid "" "Blank volumes (source: 'blank', dest: 'volume') need to have non-zero size" msgstr "空白磁區(來源:'blank',目的地:'volume')需要具有非零大小" #, python-format msgid "Block Device %(id)s is not bootable." msgstr "區塊裝置 %(id)s 不可啟動。" #, python-format msgid "" "Block Device Mapping %(volume_id)s is a multi-attach volume and is not valid " "for this operation." msgstr "「區塊裝置對映」%(volume_id)s 是一個多重連接磁區,且不適用於此作業。" msgid "Block Device Mapping cannot be converted to legacy format. " msgstr "無法將「區塊裝置對映」轉換為舊式格式。" msgid "Block Device Mapping is Invalid." msgstr "「區塊裝置對映」無效。" #, python-format msgid "Block Device Mapping is Invalid: %(details)s" msgstr "「區塊裝置對映」無效:%(details)s" msgid "" "Block Device Mapping is Invalid: Boot sequence for the instance and image/" "block device mapping combination is not valid." msgstr "「區塊裝置對映」無效:實例的開機順序和映像檔或區塊裝置對應的組合無效。" msgid "" "Block Device Mapping is Invalid: You specified more local devices than the " "limit allows" msgstr "「區塊裝置對映」無效:您指定了超越限制數量的本地裝置" #, python-format msgid "Block Device Mapping is Invalid: failed to get image %(id)s." msgstr "「區塊裝置對映」無效:無法取得映像檔 %(id)s。" #, python-format msgid "Block Device Mapping is Invalid: failed to get snapshot %(id)s." msgstr "「區塊裝置對映」無效:無法取得 Snapshot %(id)s。" #, python-format msgid "Block Device Mapping is Invalid: failed to get volume %(id)s." msgstr "「區塊裝置對映」無效:無法取得磁區 %(id)s。" msgid "Block migration can not be used with shared storage." msgstr "區塊移轉不能與共用儲存體配合使用。" msgid "Boot index is invalid." msgstr "啟動索引無效。" #, python-format msgid "Build of instance %(instance_uuid)s aborted: %(reason)s" msgstr "建置實例 %(instance_uuid)s 已中止:%(reason)s" #, python-format msgid "Build of instance %(instance_uuid)s was re-scheduled: %(reason)s" msgstr "建置實例 %(instance_uuid)s 已重新排定:%(reason)s" #, python-format msgid "BuildRequest not found for instance %(uuid)s" msgstr "找不到實例 %(uuid)s 的 BuildRequest" msgid "CPU and memory allocation must be provided for all NUMA nodes" msgstr "必須為所有 NUMA 節點都提供 CPU 和記憶體配置" #, python-format msgid "" "CPU doesn't have compatibility.\n" "\n" "%(ret)s\n" "\n" "Refer to %(u)s" msgstr "" "CPU 不相容。\n" "\n" "%(ret)s\n" "\n" "請參閱 %(u)s" #, python-format msgid "CPU number %(cpunum)d is assigned to two nodes" msgstr "CPU 數目 %(cpunum)d 已指派給兩個節點" #, python-format msgid "CPU number %(cpunum)d is larger than max %(cpumax)d" msgstr "CPU 數目 %(cpunum)d 大於上限 %(cpumax)d" #, python-format msgid "CPU number %(cpuset)s is not assigned to any node" msgstr "CPU 數目 %(cpuset)s 未指派給任何節點" msgid "Can not add access to a public flavor." msgstr "無法新增對公用特性的存取權。" msgid "Can not find requested image" msgstr "找不到所要求的映像檔" #, python-format msgid "Can not handle authentication request for %d credentials" msgstr "無法處理對 %d 認證的鑑別要求" msgid "Can't retrieve root device path from instance libvirt configuration" msgstr "無法從實例 libVirt 配置擷取根裝置路徑" #, python-format msgid "" "Cannot '%(action)s' instance %(server_id)s while it is in %(attr)s %(state)s" msgstr "無法%(action)s實例 %(server_id)s,因為該實例處於 %(attr)s %(state)s" #, python-format msgid "Cannot add host to aggregate %(aggregate_id)s. Reason: %(reason)s." msgstr "無法將主機新增至聚集 %(aggregate_id)s。原因:%(reason)s。" msgid "Cannot attach one or more volumes to multiple instances" msgstr "無法將一個以上的磁區連接至多個實例" #, python-format msgid "Cannot call %(method)s on orphaned %(objtype)s object" msgstr "無法對孤立的 %(objtype)s 物件呼叫 %(method)s" #, python-format msgid "" "Cannot determine the parent storage pool for %s; cannot determine where to " "store images" msgstr "無法判定 %s 的母項儲存區;無法判定用來儲存映像檔的位置" msgid "Cannot find image for rebuild" msgstr "找不到要重建的映像檔" #, python-format msgid "Cannot remove host %(host)s in aggregate %(id)s" msgstr "無法移除聚集 %(id)s 中的主機 %(host)s" #, python-format msgid "Cannot remove host from aggregate %(aggregate_id)s. Reason: %(reason)s." msgstr "無法從聚集 %(aggregate_id)s 移除主機。原因:%(reason)s。" msgid "Cannot rescue a volume-backed instance" msgstr "無法救援以磁區為基礎的實例" msgid "" "Cannot set cpu thread pinning policy in a non dedicated cpu pinning policy" msgstr "無法在非專用 CPU 固定原則中設定 CPU 執行緒固定原則" msgid "Cannot set realtime policy in a non dedicated cpu pinning policy" msgstr "無法在非專用 CPU 固定原則中設定即時原則" #, python-format msgid "Cannot update aggregate %(aggregate_id)s. Reason: %(reason)s." msgstr "無法更新聚集 %(aggregate_id)s。原因:%(reason)s。" #, python-format msgid "" "Cannot update metadata of aggregate %(aggregate_id)s. Reason: %(reason)s." msgstr "無法更新聚集 %(aggregate_id)s 的 meta 資料。原因:%(reason)s。" #, python-format msgid "Cell %(uuid)s has no mapping." msgstr "Cell %(uuid)s 沒有對映。" #, python-format msgid "" "Change would make usage less than 0 for the following resources: %(unders)s" msgstr "變更會使下列資源的用量小於 0:%(unders)s" #, python-format msgid "Class %(class_name)s could not be found: %(exception)s" msgstr "找不到類別 %(class_name)s:%(exception)s" #, python-format msgid "Compute host %(host)s could not be found." msgstr "找不到計算主機 %(host)s。" #, python-format msgid "Compute host %s not found." msgstr "找不到計算主機 %s。" #, python-format msgid "Compute service of %(host)s is still in use." msgstr "%(host)s 的計算服務仍在使用中。" #, python-format msgid "Compute service of %(host)s is unavailable at this time." msgstr "此時無法使用 %(host)s 的計算服務。" #, python-format msgid "Config drive format '%(format)s' is not supported." msgstr "不支援配置磁碟機格式 '%(format)s'。" #, python-format msgid "" "Config requested an explicit CPU model, but the current libvirt hypervisor " "'%s' does not support selecting CPU models" msgstr "" "配置已要求明確的 CPU 型號,但現行 libVirt Hypervisor '%s' 不支援選取 CPU 型號" #, python-format msgid "" "Conflict updating instance %(instance_uuid)s, but we were unable to " "determine the cause" msgstr "更新實例 %(instance_uuid)s 時發生衝突,但是無法判定原因" #, python-format msgid "" "Conflict updating instance %(instance_uuid)s. Expected: %(expected)s. " "Actual: %(actual)s" msgstr "" "更新實例 %(instance_uuid)s 時發生衝突。預期:%(expected)s。實際:%(actual)s" #, python-format msgid "Connection to cinder host failed: %(reason)s" msgstr "連線 Cinder 主機失敗:%(reason)s" #, python-format msgid "Connection to glance host %(server)s failed: %(reason)s" msgstr "與 Glance 主機 %(server)s 的連線失敗:%(reason)s" #, python-format msgid "Connection to libvirt lost: %s" msgstr "libVirt 連線已中斷:%s" #, python-format msgid "" "Console log output could not be retrieved for instance %(instance_id)s. " "Reason: %(reason)s" msgstr "無法擷取實例 %(instance_id)s 的主控台日誌輸出。原因:%(reason)s" msgid "Constraint not met." msgstr "不符合限制。" #, python-format msgid "Converted to raw, but format is now %s" msgstr "已轉換為原始,但格式現在為 %s" #, python-format msgid "Could not attach image to loopback: %s" msgstr "無法將映像檔連接至迴圈:%s" #, python-format msgid "Could not fetch image %(image_id)s" msgstr "無法提取映像檔 %(image_id)s" #, python-format msgid "Could not find a handler for %(driver_type)s volume." msgstr "找不到 %(driver_type)s 磁區的處理程式。" #, python-format msgid "Could not find binary %(binary)s on host %(host)s." msgstr "在主機 %(host)s 上找不到二進位檔 %(binary)s。" #, python-format msgid "Could not find config at %(path)s" msgstr "在 %(path)s 處找不到配置" msgid "Could not find the datastore reference(s) which the VM uses." msgstr "找不到 VM 所使用的資料儲存庫參照。" #, python-format msgid "Could not load line %(line)s, got error %(error)s" msgstr "無法載入行 %(line)s,取得錯誤 %(error)s" #, python-format msgid "Could not load paste app '%(name)s' from %(path)s" msgstr "無法從 %(path)s 載入 paste 應用程式 '%(name)s'" #, python-format msgid "" "Could not mount vfat config drive. %(operation)s failed. Error: %(error)s" msgstr "無法裝載 vfat 配置磁碟機。%(operation)s 失敗。錯誤:%(error)s" #, python-format msgid "Could not upload image %(image_id)s" msgstr "無法上傳映像檔 %(image_id)s" msgid "Creation of virtual interface with unique mac address failed" msgstr "使用唯一 MAC 位址來建立虛擬介面失敗" #, python-format msgid "Datastore regex %s did not match any datastores" msgstr "資料儲存庫正規表示式 %s 不符合任何資料儲存庫" msgid "Datetime is in invalid format" msgstr "日期時間的格式無效" msgid "Default PBM policy is required if PBM is enabled." msgstr "如果已啟用 PBM,則需要預設 PBM 原則。" #, python-format msgid "Device '%(device)s' not found." msgstr "找不到裝置 '%(device)s'。" msgid "Device name contains spaces." msgstr "裝置名稱包含空格。" msgid "Device name empty or too long." msgstr "裝置名稱為空,或者太長。" #, python-format msgid "Device type mismatch for alias '%s'" msgstr "別名 '%s' 的裝置類型不符" #, python-format msgid "Disk format %(disk_format)s is not acceptable" msgstr "無法接受磁碟格式 %(disk_format)s" #, python-format msgid "Disk info file is invalid: %(reason)s" msgstr "磁碟資訊檔無效:%(reason)s" #, python-format msgid "Driver Error: %s" msgstr "驅動程式錯誤:%s" #, python-format msgid "Error attempting to run %(method)s" msgstr "嘗試執行 %(method)s 時發生錯誤" #, python-format msgid "" "Error destroying the instance on node %(node)s. Provision state still " "'%(state)s'." msgstr "毀損節點 %(node)s 上的實例時發生錯誤。供應狀態仍為'%(state)s'。" #, python-format msgid "Error during unshelve instance %(instance_id)s: %(reason)s" msgstr "解除擱置實例 %(instance_id)s 期間發生錯誤:%(reason)s" #, python-format msgid "" "Error from libvirt while getting domain info for %(instance_name)s: [Error " "Code %(error_code)s] %(ex)s" msgstr "" "獲取 %(instance_name)s 的網域資訊時 libVirt 傳回錯誤:[錯誤碼 " "%(error_code)s] %(ex)s" #, python-format msgid "" "Error from libvirt while looking up %(instance_name)s: [Error Code " "%(error_code)s] %(ex)s" msgstr "" "查閱 %(instance_name)s 時 libVirt 傳回錯誤:[錯誤碼 %(error_code)s] %(ex)s" #, python-format msgid "" "Error from libvirt while quiescing %(instance_name)s: [Error Code " "%(error_code)s] %(ex)s" msgstr "" "對 %(instance_name)s 執行靜止動作時,libVirt 中發生錯誤:[錯誤碼 " "%(error_code)s] %(ex)s" #, python-format msgid "" "Error from libvirt while set password for username \"%(user)s\": [Error Code " "%(error_code)s] %(ex)s" msgstr "" "設定使用者名稱 \"%(user)s\" 的密碼時,libvirt 傳回了錯誤:[錯誤" "碼%(error_code)s] %(ex)s" #, python-format msgid "" "Error mounting %(device)s to %(dir)s in image %(image)s with libguestfs " "(%(e)s)" msgstr "" "使用 libguestfs 將 %(device)s 裝載到映像檔 %(image)s 中的 %(dir)s 時發生錯誤" "(%(e)s)" #, python-format msgid "Error mounting %(image)s with libguestfs (%(e)s)" msgstr "裝載具有 libguestfs (%(e)s) 的 %(image)s 時發生錯誤" #, python-format msgid "Error when creating resource monitor: %(monitor)s" msgstr "建立資源監視器 %(monitor)s 時發生錯誤" #, python-format msgid "Event %(event)s not found for action id %(action_id)s" msgstr "找不到動作識別碼 %(action_id)s 的事件 %(event)s" msgid "Event must be an instance of nova.virt.event.Event" msgstr "事件必須是 nova.virt.event.Event 的實例" #, python-format msgid "" "Exceeded max scheduling attempts %(max_attempts)d for instance " "%(instance_uuid)s. Last exception: %(exc_reason)s" msgstr "" "已超出實例 %(instance_uuid)s 的排程嘗試次數上限 %(max_attempts)d。前次異常狀" "況:%(exc_reason)s" #, python-format msgid "" "Exceeded max scheduling retries %(max_retries)d for instance " "%(instance_uuid)s during live migration" msgstr "" "在即時移轉期間,超出了實例 %(instance_uuid)s 的排程重試次數上限 " "%(max_retries)d" #, python-format msgid "Exceeded maximum number of retries. %(reason)s" msgstr "已超出重試次數上限。%(reason)s" #, python-format msgid "Expected a uuid but received %(uuid)s." msgstr "需要 UUID,但收到 %(uuid)s。" msgid "Extracting vmdk from OVA failed." msgstr "從 OVA 擷取 VMDK 失敗。" #, python-format msgid "Failed to access port %(port_id)s: %(reason)s" msgstr "無法存取埠 %(port_id)s:%(reason)s" #, python-format msgid "Failed to allocate the network(s) with error %s, not rescheduling." msgstr "無法配置網路,發生錯誤 %s,將不重新排定。" msgid "Failed to allocate the network(s), not rescheduling." msgstr "無法配置網路,將不會重新排定。" #, python-format msgid "Failed to attach network adapter device to %(instance_uuid)s" msgstr "無法將網路配接卡裝置連接至 %(instance_uuid)s" #, python-format msgid "Failed to deploy instance: %(reason)s" msgstr "無法部署實例:%(reason)s" #, python-format msgid "Failed to detach PCI device %(dev)s: %(reason)s" msgstr "無法分離 PCI 裝置 %(dev)s:%(reason)s" #, python-format msgid "Failed to detach network adapter device from %(instance_uuid)s" msgstr "無法將網路配接卡裝置從 %(instance_uuid)s 分離" #, python-format msgid "Failed to encrypt text: %(reason)s" msgstr "無法加密文字:%(reason)s" #, python-format msgid "Failed to launch instances: %(reason)s" msgstr "無法啟動實例:%(reason)s" #, python-format msgid "Failed to map partitions: %s" msgstr "無法對映分割區:%s" #, python-format msgid "Failed to mount filesystem: %s" msgstr "無法裝載檔案系統:%s" #, python-format msgid "Failed to power off instance: %(reason)s" msgstr "無法關閉實例的電源:%(reason)s" #, python-format msgid "Failed to power on instance: %(reason)s" msgstr "無法開啟實例的電源:%(reason)s" #, python-format msgid "Failed to provision instance %(inst)s: %(reason)s" msgstr "無法供應實例 %(inst)s:%(reason)s" #, python-format msgid "Failed to read or write disk info file: %(reason)s" msgstr "無法讀取或寫入磁碟資訊檔:%(reason)s" #, python-format msgid "Failed to reboot instance: %(reason)s" msgstr "無法重新啟動實例:%(reason)s" #, python-format msgid "Failed to remove volume(s): (%(reason)s)" msgstr "無法移除磁區:(%(reason)s)" #, python-format msgid "Failed to request Ironic to rebuild instance %(inst)s: %(reason)s" msgstr "無法要求 Ironic 來重建實例 %(inst)s:%(reason)s" #, python-format msgid "Failed to resume instance: %(reason)s" msgstr "無法回復實例:%(reason)s" #, python-format msgid "Failed to run qemu-img info on %(path)s : %(error)s" msgstr "無法在 %(path)s 上執行 qemu-img 資訊:%(error)s" #, python-format msgid "Failed to set admin password on %(instance)s because %(reason)s" msgstr "無法在 %(instance)s 上設定管理者密碼,因為 %(reason)s" #, python-format msgid "Failed to suspend instance: %(reason)s" msgstr "無法懸置實例:%(reason)s" #, python-format msgid "Failed to terminate instance: %(reason)s" msgstr "無法終止實例:%(reason)s" msgid "Failure prepping block device." msgstr "準備區塊裝置時失敗。" #, python-format msgid "File %(file_path)s could not be found." msgstr "找不到檔案 %(file_path)s。" #, python-format msgid "Fixed IP %(ip)s is not a valid ip address for network %(network_id)s." msgstr "固定 IP %(ip)s 不是網路 %(network_id)s 的有效 IP 位址。" #, python-format msgid "Fixed IP %s is already in use." msgstr "固定 IP %s 已在使用中。" #, python-format msgid "" "Fixed IP address %(address)s is already in use on instance %(instance_uuid)s." msgstr "實例 %(instance_uuid)s 上已使用固定 IP 位址 %(address)s。" #, python-format msgid "Fixed IP not found for address %(address)s." msgstr "找不到位址 %(address)s 的固定 IP。" #, python-format msgid "Flavor %(flavor_id)s could not be found." msgstr "找不到特性 %(flavor_id)s。" #, python-format msgid "Flavor %(flavor_id)s has no extra specs with key %(extra_specs_key)s." msgstr "特性 %(flavor_id)s 沒有索引鍵為 %(extra_specs_key)s 的額外規格。" #, python-format msgid "Flavor %(flavor_id)s has no extra specs with key %(key)s." msgstr "特性 %(flavor_id)s 沒有索引鍵為 %(key)s 的額外規格。" #, python-format msgid "" "Flavor %(id)s extra spec cannot be updated or created after %(retries)d " "retries." msgstr "在嘗試 %(retries)d 次之後,無法更新或建立特性 %(id)s 額外規格。" #, python-format msgid "" "Flavor access already exists for flavor %(flavor_id)s and project " "%(project_id)s combination." msgstr "特性 %(flavor_id)s 及專案%(project_id)s 組合已存在特性存取。" #, python-format msgid "Flavor access not found for %(flavor_id)s / %(project_id)s combination." msgstr "找不到 %(flavor_id)s / %(project_id)s 組合的特性存取。" msgid "Flavor used by the instance could not be found." msgstr "找不到實例所使用的特性。" #, python-format msgid "Flavor with ID %(flavor_id)s already exists." msgstr "ID 為 %(flavor_id)s 的特性已存在。" #, python-format msgid "Flavor with name %(flavor_name)s could not be found." msgstr "找不到名稱為 %(flavor_name)s 的特性。" #, python-format msgid "Flavor with name %(name)s already exists." msgstr "名稱為 %(name)s 的特性已存在。" #, python-format msgid "" "Flavor's disk is smaller than the minimum size specified in image metadata. " "Flavor disk is %(flavor_size)i bytes, minimum size is %(image_min_disk)i " "bytes." msgstr "" "特性磁碟小於映像檔 meta 資料中指定的大小下限。特性磁碟為 %(flavor_size)i 位元" "組,大小下限為 %(image_min_disk)i 位元組。" #, python-format msgid "" "Flavor's disk is too small for requested image. Flavor disk is " "%(flavor_size)i bytes, image is %(image_size)i bytes." msgstr "" "針對所要求的映像檔而言,特性磁碟太小。特性磁碟為%(flavor_size)i 位元組,映像" "檔為 %(image_size)i 位元組。" msgid "Flavor's memory is too small for requested image." msgstr "特性的記憶體太小,裝不下所要求的映像檔。" #, python-format msgid "Floating IP %(address)s association has failed." msgstr "浮動 IP %(address)s 關聯失敗。" #, python-format msgid "Floating IP %(address)s is associated." msgstr "已與浮動 IP %(address)s 產生關聯。" #, python-format msgid "Floating IP %(address)s is not associated with instance %(id)s." msgstr "浮動 IP %(address)s 未與實例 %(id)s 產生關聯。" #, python-format msgid "Floating IP not found for ID %(id)s." msgstr "找不到 ID %(id)s 的浮動 IP。" #, python-format msgid "Floating IP not found for ID %s" msgstr "找不到 ID %s 的浮動 IP" #, python-format msgid "Floating IP not found for address %(address)s." msgstr "找不到位址 %(address)s 的浮動 IP。" msgid "Floating IP pool not found." msgstr "找不到浮動 IP 儲存區。" msgid "" "Forbidden to exceed flavor value of number of serial ports passed in image " "meta." msgstr "已禁止超出映像檔 meta 中傳遞之序列埠數目的特性值。" msgid "Found no disk to snapshot." msgstr "找不到磁碟來取得 Snapshot。" msgid "Guest does not have a console available." msgstr "訪客沒有主控台可用。" #, python-format msgid "Host %(host)s could not be found." msgstr "找不到主機 %(host)s。" #, python-format msgid "Host %(host)s is already mapped to cell %(uuid)s" msgstr "主機 %(host)s 已經對映至 Cell %(uuid)s" #, python-format msgid "Host '%(name)s' is not mapped to any cell" msgstr "主機 '%(name)s' 未對映至任何 Cell" msgid "Host aggregate is not empty" msgstr "主機聚集不為空" msgid "Host does not support guests with NUMA topology set" msgstr "主機不支援具有 NUMA 拓蹼集的訪客" msgid "Host does not support guests with custom memory page sizes" msgstr "主機不支援具有自訂記憶體頁面大小的訪客" msgid "Hypervisor driver does not support post_live_migration_at_source method" msgstr "Hypervisor 驅動程式不支援 post_live_migration_at_source 方法" #, python-format msgid "Hypervisor virt type '%s' is not valid" msgstr "Hypervisor virt 類型 '%s' 無效" #, python-format msgid "Hypervisor virtualization type '%(hv_type)s' is not recognised" msgstr "未辨識 Hypervisor 虛擬化類型 '%(hv_type)s'" #, python-format msgid "Hypervisor with ID '%s' could not be found." msgstr "找不到 ID 為 '%s' 的 Hypervisor。" #, python-format msgid "IP allocation over quota in pool %s." msgstr "IP 配置超過儲存區 %s 中的配額。" msgid "IP allocation over quota." msgstr "IP 配置超過配額。" #, python-format msgid "Image %(image_id)s could not be found." msgstr "找不到映像檔 %(image_id)s。" #, python-format msgid "Image %(image_id)s is not active." msgstr "映像檔 %(image_id)s 不在作用中。" #, python-format msgid "Image %(image_id)s is unacceptable: %(reason)s" msgstr "無法接受映像檔 %(image_id)s:%(reason)s" msgid "Image disk size greater than requested disk size" msgstr "映像檔磁碟大小大於所要求的磁碟大小" msgid "Image is not raw format" msgstr "映像檔不是原始格式" msgid "Image metadata limit exceeded" msgstr "已超出映像檔 meta 資料限制" #, python-format msgid "Image model '%(image)s' is not supported" msgstr "不支援映像檔模型 '%(image)s'" msgid "Image not found." msgstr "找不到映像檔。" #, python-format msgid "" "Image property '%(name)s' is not permitted to override NUMA configuration " "set against the flavor" msgstr "不允許映像檔內容 '%(name)s' 針對特性置換 NUMA 配置集" msgid "" "Image property 'hw_cpu_policy' is not permitted to override CPU pinning " "policy set against the flavor" msgstr "" "不允許使用映像檔內容 'hw_cpu_policy' 來置換針對特性所設定的 CPU 固定原則" msgid "" "Image property 'hw_cpu_thread_policy' is not permitted to override CPU " "thread pinning policy set against the flavor" msgstr "" "針對該特性,不允許映像檔內容 'hw_cpu_thread_policy' 置換 CPU 執行緒固定原則集" msgid "Image that the instance was started with could not be found." msgstr "找不到已用來啟動實例的映像檔。" #, python-format msgid "Image's config drive option '%(config_drive)s' is invalid" msgstr "映像檔的配置驅動選項 '%(config_drive)s' 無效" msgid "" "Images with destination_type 'volume' need to have a non-zero size specified" msgstr "destination_type 為 'volume' 的映像檔需要指定非零大小" msgid "In ERROR state" msgstr "處於 ERROR 狀態" #, python-format msgid "In states %(vm_state)s/%(task_state)s, not RESIZED/None" msgstr "處於狀態 %(vm_state)s/%(task_state)s,而不是 RESIZED/None" #, python-format msgid "In-progress live migration %(id)s is not found for server %(uuid)s." msgstr "找不到伺服器 %(uuid)s 的進行中即時移轉 %(id)s。" msgid "" "Incompatible settings: ephemeral storage encryption is supported only for " "LVM images." msgstr "不相容的設定:只有 LVM 映像檔才支援暫時儲存體加密。" #, python-format msgid "Info cache for instance %(instance_uuid)s could not be found." msgstr "找不到實例 %(instance_uuid)s 的資訊快取。" #, python-format msgid "" "Instance %(instance)s and volume %(vol)s are not in the same " "availability_zone. Instance is in %(ins_zone)s. Volume is in %(vol_zone)s" msgstr "" "實例 %(instance)s 與磁區 %(vol)s 不在同一可用性區域中。實例在 %(ins_zone)s " "中,而磁區在 %(vol_zone)s 中" #, python-format msgid "Instance %(instance)s does not have a port with id %(port)s" msgstr "實例 %(instance)s 沒有 ID 為 %(port)s 的埠" #, python-format msgid "Instance %(instance_id)s cannot be rescued: %(reason)s" msgstr "無法救援實例 %(instance_id)s:%(reason)s" #, python-format msgid "Instance %(instance_id)s could not be found." msgstr "找不到實例 %(instance_id)s。" #, python-format msgid "Instance %(instance_id)s has no tag '%(tag)s'" msgstr "實例 %(instance_id)s 沒有標籤 '%(tag)s'" #, python-format msgid "Instance %(instance_id)s is not in rescue mode" msgstr "實例 %(instance_id)s 不處於救援模式" #, python-format msgid "Instance %(instance_id)s is not ready" msgstr "實例 %(instance_id)s 未備妥" #, python-format msgid "Instance %(instance_id)s is not running." msgstr "實例 %(instance_id)s 不在執行中。" #, python-format msgid "Instance %(instance_id)s is unacceptable: %(reason)s" msgstr "無法接受實例 %(instance_id)s:%(reason)s" #, python-format msgid "Instance %(instance_uuid)s does not specify a NUMA topology" msgstr "實例 %(instance_uuid)s 未指定 NUMA 拓蹼" #, python-format msgid "Instance %(instance_uuid)s does not specify a migration context." msgstr "實例 %(instance_uuid)s 未指定移轉環境定義。" #, python-format msgid "" "Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while " "the instance is in this state." msgstr "" "實例 %(instance_uuid)s 處於 %(attr)s %(state)s。實例處於此狀態時,無法 " "%(method)s。" #, python-format msgid "Instance %(instance_uuid)s is locked" msgstr "已鎖定實例 %(instance_uuid)s" #, python-format msgid "" "Instance %(instance_uuid)s requires config drive, but it does not exist." msgstr "實例 %(instance_uuid)s 需要配置磁碟機,但該磁碟機不存在。" #, python-format msgid "Instance %(name)s already exists." msgstr "實例 %(name)s 已存在。" #, python-format msgid "Instance %(server_id)s is in an invalid state for '%(action)s'" msgstr "實例 %(server_id)s 處於無效的狀態,無法%(action)s" #, python-format msgid "Instance %(uuid)s has no mapping to a cell." msgstr "實例 %(uuid)s 沒有與 Cell 的對映。" #, python-format msgid "Instance %s not found" msgstr "找不到實例 %s" #, python-format msgid "Instance %s provisioning was aborted" msgstr "已中斷實例 %s 供應" msgid "Instance could not be found" msgstr "找不到實例" msgid "Instance disk to be encrypted but no context provided" msgstr "即將加密實例磁碟,但卻未提供環境定義" msgid "Instance event failed" msgstr "實例事件失敗" #, python-format msgid "Instance group %(group_uuid)s already exists." msgstr "實例群組 %(group_uuid)s 已存在。" #, python-format msgid "Instance group %(group_uuid)s could not be found." msgstr "找不到實例群組 %(group_uuid)s。" msgid "Instance has no source host" msgstr "實例沒有來源主機" msgid "Instance has not been resized." msgstr "尚未調整實例大小。" #, python-format msgid "Instance hostname %(hostname)s is not a valid DNS name" msgstr "實例主機名 %(hostname)s 不是有效的 DNS 名稱" msgid "Instance is not a member of specified network" msgstr "實例不是所指定網路的成員" #, python-format msgid "Instance rollback performed due to: %s" msgstr "已執行實例回復作業,原因:%s" #, python-format msgid "" "Insufficient Space on Volume Group %(vg)s. Only %(free_space)db available, " "but %(size)d bytes required by volume %(lv)s." msgstr "" "磁區群組 %(vg)s 上的空間不足。僅 %(free_space)db 可用,但磁區 %(lv)s 需要 " "%(size)d 位元組。" #, python-format msgid "Insufficient compute resources: %(reason)s." msgstr "計算資源不足:%(reason)s。" #, python-format msgid "Interface %(interface)s not found." msgstr "找不到介面 %(interface)s。" #, python-format msgid "Invalid Base 64 data for file %(path)s" msgstr "檔案 %(path)s 的 Base 64 資料無效" msgid "Invalid Connection Info" msgstr "無效的連線資訊" #, python-format msgid "Invalid ID received %(id)s." msgstr "收到無效的 ID %(id)s。" #, python-format msgid "Invalid IP format %s" msgstr "無效的 IP 格式 %s" #, python-format msgid "Invalid IP protocol %(protocol)s." msgstr "無效的 IP 通訊協定 %(protocol)s。" msgid "" "Invalid PCI Whitelist: The PCI whitelist can specify devname or address, but " "not both" msgstr "無效的 PCI 白名單:PCI 白名單可以指定裝置名稱或位址,但不能指定這兩者" #, python-format msgid "Invalid PCI alias definition: %(reason)s" msgstr "無效的 PCI 別名定義:%(reason)s" #, python-format msgid "Invalid Regular Expression %s" msgstr "無效的正規表示式 %s" #, python-format msgid "Invalid characters in hostname '%(hostname)s'" msgstr "主機名稱 '%(hostname)s' 中有無效字元" msgid "Invalid config_drive provided." msgstr "提供的 config_drive 無效。" #, python-format msgid "Invalid config_drive_format \"%s\"" msgstr "config_drive_format \"%s\" 無效" #, python-format msgid "Invalid console type %(console_type)s" msgstr "無效的主控台類型 %(console_type)s" #, python-format msgid "Invalid content type %(content_type)s." msgstr "無效的內容類型 %(content_type)s。" #, python-format msgid "Invalid datetime string: %(reason)s" msgstr "無效的日期時間字串:%(reason)s" msgid "Invalid device UUID." msgstr "無效的裝置 UUID。" #, python-format msgid "Invalid entry: '%s'" msgstr "項目 '%s' 無效" #, python-format msgid "Invalid entry: '%s'; Expecting dict" msgstr "項目 '%s' 無效;預期字典" #, python-format msgid "Invalid entry: '%s'; Expecting list or dict" msgstr "項目 '%s' 無效;預期清單或字典" #, python-format msgid "Invalid exclusion expression %r" msgstr "無效的排除表示式 %r" #, python-format msgid "Invalid image format '%(format)s'" msgstr "映像檔格式 '%(format)s' 無效" #, python-format msgid "Invalid image href %(image_href)s." msgstr "無效的映像檔 href %(image_href)s。" #, python-format msgid "Invalid inclusion expression %r" msgstr "無效的併入表示式 %r" #, python-format msgid "" "Invalid input for field/attribute %(path)s. Value: %(value)s. %(message)s" msgstr "欄位/屬性 %(path)s 的輸入無效。值:%(value)s。%(message)s" #, python-format msgid "Invalid input received: %(reason)s" msgstr "收到的輸入無效:%(reason)s" msgid "Invalid instance image." msgstr "無效的實例映像檔。" #, python-format msgid "Invalid is_public filter [%s]" msgstr "無效的 is_public 過濾器 [%s]" msgid "Invalid key_name provided." msgstr "提供的 key_name 無效。" #, python-format msgid "Invalid memory page size '%(pagesize)s'" msgstr "記憶體頁面大小 '%(pagesize)s' 無效" msgid "Invalid metadata key" msgstr "無效的 meta 資料索引鍵" #, python-format msgid "Invalid metadata size: %(reason)s" msgstr "無效的 meta 資料大小:%(reason)s" #, python-format msgid "Invalid metadata: %(reason)s" msgstr "無效的 meta 資料:%(reason)s" #, python-format msgid "Invalid minDisk filter [%s]" msgstr "無效的 minDisk 過濾器 [%s]" #, python-format msgid "Invalid minRam filter [%s]" msgstr "無效的 minRam 過濾器 [%s]" #, python-format msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s" msgstr "無效的埠範圍 %(from_port)s:%(to_port)s。%(msg)s" msgid "Invalid proxy request signature." msgstr "無效的 Proxy 要求簽章。" #, python-format msgid "Invalid range expression %r" msgstr "無效的範圍表示式 %r" msgid "Invalid service catalog json." msgstr "無效的服務型錄 JSON。" msgid "Invalid start time. The start time cannot occur after the end time." msgstr "無效的開始時間。開始時間不能在結束時間之後。" msgid "Invalid state of instance files on shared storage" msgstr "共用儲存體上實例檔案的狀態無效" #, python-format msgid "Invalid timestamp for date %s" msgstr "日期 %s 的時間戳記無效" #, python-format msgid "Invalid usage_type: %s" msgstr "usage_type %s 無效" #, python-format msgid "Invalid value for Config Drive option: %(option)s" msgstr "「配置驅動」選項 %(option)s 的值無效" #, python-format msgid "Invalid virtual interface address %s in request" msgstr "要求中的虛擬介面位址 %s 無效" #, python-format msgid "Invalid volume access mode: %(access_mode)s" msgstr "無效的磁區存取模式:%(access_mode)s" #, python-format msgid "Invalid volume: %(reason)s" msgstr "無效的磁區:%(reason)s" msgid "Invalid volume_size." msgstr "無效的 volume_size。" #, python-format msgid "Ironic node uuid not supplied to driver for instance %s." msgstr "未將 Ironic 節點 UUID 提供給實例 %s 的驅動程式。" #, python-format msgid "" "It is not allowed to create an interface on external network %(network_uuid)s" msgstr "不容許在下列外部網路上建立介面:%(network_uuid)s" msgid "" "Key Names can only contain alphanumeric characters, periods, dashes, " "underscores, colons and spaces." msgstr "索引鍵名稱只能包含英數字元、句點、橫線、底線、冒號及空格。" #, python-format msgid "Key manager error: %(reason)s" msgstr "金鑰管理程式錯誤:%(reason)s" #, python-format msgid "Key pair '%(key_name)s' already exists." msgstr "金鑰組 '%(key_name)s' 已存在。" #, python-format msgid "Keypair %(name)s not found for user %(user_id)s" msgstr "找不到使用者 %(user_id)s 的金鑰組 %(name)s" #, python-format msgid "Keypair data is invalid: %(reason)s" msgstr "金鑰組資料無效:%(reason)s" msgid "Limits only supported from vCenter 6.0 and above" msgstr "只有 vCenter 6.0 及更高版本中的限制才受支援" #, python-format msgid "Live migration %(id)s for server %(uuid)s is not in progress." msgstr "伺服器 %(uuid)s 的即時移轉 %(id)s 不在進行中。" #, python-format msgid "Malformed message body: %(reason)s" msgstr "訊息內文的格式不正確:%(reason)s" #, python-format msgid "" "Malformed request URL: URL's project_id '%(project_id)s' doesn't match " "Context's project_id '%(context_project_id)s'" msgstr "" "要求 URL 的格式不正確:URL 的 project_id '%(project_id)s',與環境定義的 " "project_id '%(context_project_id)s' 不符" msgid "Malformed request body" msgstr "要求內文的格式不正確" msgid "Mapping image to local is not supported." msgstr "不支援將映像檔對映至本端。" #, python-format msgid "Marker %(marker)s could not be found." msgstr "找不到標記 %(marker)s。" msgid "Maximum number of floating IPs exceeded" msgstr "超過了浮動 IP 數目上限" #, python-format msgid "Maximum number of metadata items exceeds %(allowed)d" msgstr "meta 資料項目數目上限已超出所允許的 %(allowed)d" msgid "Maximum number of ports exceeded" msgstr "已超出埠數目上限" msgid "Maximum number of security groups or rules exceeded" msgstr "已超出安全群組或規則數目上限" msgid "Metadata item was not found" msgstr "找不到 meta 資料項目" msgid "Metadata property key greater than 255 characters" msgstr "meta 資料內容索引鍵超過 255 個字元" msgid "Metadata property value greater than 255 characters" msgstr "meta 資料內容值超過 255 個字元" msgid "Metadata type should be dict." msgstr "meta 資料類型應該是字典。" #, python-format msgid "" "Metric %(name)s could not be found on the compute host node %(host)s." "%(node)s." msgstr "在計算主機節點 %(host)s.%(node)s 上找不到度量 %(name)s。" #, python-format msgid "Migration %(id)s for server %(uuid)s is not live-migration." msgstr "伺服器 %(uuid)s 的移轉 %(id)s 不是即時移轉。" #, python-format msgid "Migration %(migration_id)s could not be found." msgstr "找不到移轉 %(migration_id)s。" #, python-format msgid "Migration %(migration_id)s not found for instance %(instance_id)s" msgstr "找不到實例 %(instance_id)s 的移轉 %(migration_id)s" #, python-format msgid "" "Migration %(migration_id)s state of instance %(instance_uuid)s is %(state)s. " "Cannot %(method)s while the migration is in this state." msgstr "" "實例 %(instance_uuid)s 的移轉 %(migration_id)s 狀態為 %(state)s。當移轉處於這" "種狀態時,無法執行 %(method)s。" #, python-format msgid "Migration error: %(reason)s" msgstr "移轉錯誤:%(reason)s" msgid "Migration is not supported for LVM backed instances" msgstr "不支援移轉以 LVM 為基礎的實例" #, python-format msgid "" "Migration not found for instance %(instance_id)s with status %(status)s." msgstr "找不到實例 %(instance_id)s(狀態為 %(status)s)的移轉。" #, python-format msgid "Migration pre-check error: %(reason)s" msgstr "移轉事先檢查發生錯誤:%(reason)s" #, python-format msgid "Migration select destinations error: %(reason)s" msgstr "移轉選取目的地錯誤:%(reason)s" #, python-format msgid "Missing arguments: %s" msgstr "遺漏引數:%s" msgid "Missing device UUID." msgstr "遺漏裝置 UUID。" msgid "Missing disabled reason field" msgstr "遺漏了停用原因欄位" msgid "Missing forced_down field" msgstr "遺漏 forced_down 欄位" msgid "Missing imageRef attribute" msgstr "遺漏了 imageRef 屬性" #, python-format msgid "Missing keys: %s" msgstr "遺漏了索引鍵:%s" msgid "Missing parameter dict" msgstr "遺漏了參數字典" #, python-format msgid "" "More than one instance is associated with fixed IP address '%(address)s'." msgstr "有多個實例與固定 IP 位址 '%(address)s' 相關聯。" msgid "" "More than one possible network found. Specify network ID(s) to select which " "one(s) to connect to." msgstr "找到多個可能的網路。請指定網路 ID 以選取要連接的網路。" msgid "More than one swap drive requested." msgstr "已要求多個交換磁碟機。" #, python-format msgid "Multi-boot operating system found in %s" msgstr "在 %s 中找到了多重啟動作業系統" msgid "Multiple X-Instance-ID headers found within request." msgstr "在要求中發現多個 X-Instance-ID 標頭。" msgid "Multiple X-Tenant-ID headers found within request." msgstr "在要求中發現多個 X-Tenant-ID 標頭。" #, python-format msgid "Multiple floating IP pools matches found for name '%s'" msgstr "找到名稱 '%s' 的多個浮動 IP 儲存區相符項" #, python-format msgid "Multiple floating IPs are found for address %(address)s." msgstr "找到位址 %(address)s 的多個浮動 IP。" msgid "" "Multiple hosts may be managed by the VMWare vCenter driver; therefore we do " "not return uptime for just one host." msgstr "" "多個主機可能由 VMWare vCenter 驅動程式進行管理;因此,將不會儘傳回一個主機執" "行時間。" msgid "Multiple possible networks found, use a Network ID to be more specific." msgstr "找到多個可能的網路,請使用更明確的網路 ID。" #, python-format msgid "" "Multiple security groups found matching '%s'. Use an ID to be more specific." msgstr "找到多個與 '%s' 相符的安全群組。請使用更明確的 ID。" msgid "Must input network_id when request IP address" msgstr "要求 IP 位址時,必須輸入 network_id" msgid "Must not input both network_id and port_id" msgstr "不得同時輸入 network_id 和 port_id" msgid "" "Must specify host_ip, host_username and host_password to use vmwareapi." "VMwareVCDriver" msgstr "" "必須指定 host_ip、host_username 及 host_password,才能使用vmwareapi." "VMwareVCDriver" msgid "Must supply a positive value for max_number" msgstr "必須為 max_number 提供一個正值" msgid "Must supply a positive value for max_rows" msgstr "必須為 max_rows 提供正值" #, python-format msgid "Network %(network_id)s could not be found." msgstr "找不到網路 %(network_id)s。" #, python-format msgid "" "Network %(network_uuid)s requires a subnet in order to boot instances on." msgstr "網路 %(network_uuid)s 需要子網路才能啟動實例。" #, python-format msgid "Network could not be found for bridge %(bridge)s" msgstr "找不到橋接器 %(bridge)s 的網路" #, python-format msgid "Network could not be found for instance %(instance_id)s." msgstr "找不到實例 %(instance_id)s 的網路。" msgid "Network not found" msgstr "找不到網路" msgid "" "Network requires port_security_enabled and subnet associated in order to " "apply security groups." msgstr "網路需要 port_security_enabled 及相關聯的子網路,才能套用安全群組。" msgid "New volume must be detached in order to swap." msgstr "新磁區必須分離才能交換。" msgid "New volume must be the same size or larger." msgstr "新磁區必須具有相同大小或者更大。" #, python-format msgid "No Block Device Mapping with id %(id)s." msgstr "沒有 ID 為 %(id)s 的區塊裝置對映。" msgid "No Unique Match Found." msgstr "找不到唯一相符項。" msgid "No compute host specified" msgstr "未指定計算主機" #, python-format msgid "No configuration information found for operating system %(os_name)s" msgstr "找不到作業系統 %(os_name)s 的配置資訊" #, python-format msgid "No device with MAC address %s exists on the VM" msgstr "VM 上不存在 MAC 位址為 %s 的裝置" #, python-format msgid "No device with interface-id %s exists on VM" msgstr "VM 上不存在 interface-id 為 %s 的裝置" #, python-format msgid "No disk at %(location)s" msgstr "%(location)s 處沒有磁碟" #, python-format msgid "No fixed IP addresses available for network: %(net)s" msgstr "網路 %(net)s 沒有可用的固定 IP 位址" msgid "No fixed IPs associated to instance" msgstr "沒有固定 IP 與實例相關聯" msgid "No free nbd devices" msgstr "沒有可用的 NBD 裝置" msgid "No host available on cluster" msgstr "叢集上沒有可用的主機" msgid "No hosts found to map to cell, exiting." msgstr "找不到要對映至 Cell 的主機,正在結束。" #, python-format msgid "No hypervisor matching '%s' could be found." msgstr "找不到與 '%s' 相符的 Hypervisor。" msgid "No image locations are accessible" msgstr "任何映像檔位置均不可存取" #, python-format msgid "" "No live migration URI configured and no default available for " "\"%(virt_type)s\" hypervisor virtualization type." msgstr "" "未配置任何即時移轉 URI,且 \"%(virt_type)s\" Hypervisor 虛擬化類型無法使用任" "何預設值。" msgid "No more floating IPs available." msgstr "沒有更多的浮動 IP 可供使用。" #, python-format msgid "No more floating IPs in pool %s." msgstr "儲存區 %s 中沒有更多的浮動 IP。" #, python-format msgid "No mount points found in %(root)s of %(image)s" msgstr "在 %(image)s 的 %(root)s 中找不到裝載點" #, python-format msgid "No operating system found in %s" msgstr "在 %s 中找不到作業系統" msgid "No root disk defined." msgstr "未定義根磁碟。" #, python-format msgid "" "No specific network was requested and none are available for project " "'%(project_id)s'." msgstr "未要求任何特定網路,且專案 '%(project_id)s' 無法使用任何網路。" msgid "No valid host found for cold migrate" msgstr "找不到有效的主機進行冷移轉" msgid "No valid host found for resize" msgstr "找不到要調整其大小的有效主機" #, python-format msgid "No valid host was found. %(reason)s" msgstr "找不到有效的主機。%(reason)s" #, python-format msgid "No volume Block Device Mapping at path: %(path)s" msgstr "路徑 %(path)s 處不存在磁區區塊裝置對映" #, python-format msgid "No volume Block Device Mapping with id %(volume_id)s." msgstr "不存在 ID 為 %(volume_id)s 的磁區區塊裝置對映。" #, python-format msgid "Node %s could not be found." msgstr "找不到節點 %s。" #, python-format msgid "Not able to acquire a free port for %(host)s" msgstr "無法獲得 %(host)s 的可用埠" #, python-format msgid "Not able to bind %(host)s:%(port)d, %(error)s" msgstr "無法連結 %(host)s:%(port)d,%(error)s" #, python-format msgid "" "Not all Virtual Functions of PF %(compute_node_id)s:%(address)s are free." msgstr "並非 PF %(compute_node_id)s:%(address)s 的所有虛擬函數都可用。" msgid "Not an rbd snapshot" msgstr "不是 rbd Snapshot" #, python-format msgid "Not authorized for image %(image_id)s." msgstr "未獲映像檔 %(image_id)s 的授權。" msgid "Not authorized." msgstr "未被授權" msgid "Not enough parameters to build a valid rule." msgstr "參數數目不足以建置有效的規則。" msgid "Not stored in rbd" msgstr "未儲存在 rbd 中" msgid "Nothing was archived." msgstr "未保存任何內容。" #, python-format msgid "Nova requires libvirt version %s or greater." msgstr "Nova 需要 libVirt %s 版或更高版本。" msgid "Number of Rows Archived" msgstr "已保存的列數" #, python-format msgid "Object action %(action)s failed because: %(reason)s" msgstr "物件動作 %(action)s 失敗,原因:%(reason)s" msgid "Old volume is attached to a different instance." msgstr "已將舊磁區連接至其他實例。" #, python-format msgid "One or more hosts already in availability zone(s) %s" msgstr "一個以上的主機已經位於可用性區域 %s 中" msgid "Only administrators may list deleted instances" msgstr "只有管理者才能列出已刪除的實例" msgid "Origin header does not match this host." msgstr "原始標頭與此主機不符。" msgid "Origin header not valid." msgstr "原始標頭無效。" msgid "Origin header protocol does not match this host." msgstr "原始標頭通訊協定與此主機不符。" #, python-format msgid "PCI Device %(node_id)s:%(address)s not found." msgstr "找不到 PCI 裝置 %(node_id)s:%(address)s。" #, python-format msgid "PCI alias %(alias)s is not defined" msgstr "未定義 PCI 別名 %(alias)s" #, python-format msgid "" "PCI device %(compute_node_id)s:%(address)s is %(status)s instead of " "%(hopestatus)s" msgstr "" "PCI 裝置 %(compute_node_id)s:%(address)s 的狀態是 %(status)s,而不" "是%(hopestatus)s" #, python-format msgid "" "PCI device %(compute_node_id)s:%(address)s is owned by %(owner)s instead of " "%(hopeowner)s" msgstr "" "PCI 裝置 %(compute_node_id)s:%(address)s 的擁有者是 %(owner)s,而不" "是%(hopeowner)s" #, python-format msgid "PCI device %(id)s not found" msgstr "找不到 PCI 裝置 %(id)s" #, python-format msgid "PCI device request %(requests)s failed" msgstr "PCI 裝置要求 %(requests)s 失敗" #, python-format msgid "Page size %(pagesize)s forbidden against '%(against)s'" msgstr "針對 '%(against)s',已禁止頁面大小 %(pagesize)s" #, python-format msgid "Page size %(pagesize)s is not supported by the host." msgstr "主機不支援頁面大小 %(pagesize)s。" #, python-format msgid "" "Parameters %(missing_params)s not present in vif_details for vif %(vif_id)s. " "Check your Neutron configuration to validate that the macvtap parameters are " "correct." msgstr "" "參數 %(missing_params)s 未呈現在 vif %(vif_id)s 的 vif_details 中。 請檢查 " "Neutron 配置,以確認 macvtap 參數是正確的。" #, python-format msgid "Path %s must be LVM logical volume" msgstr "路徑 %s 必須是 LVM 邏輯磁區" msgid "Paused" msgstr "已暫停" msgid "Personality file limit exceeded" msgstr "已超出特質檔案限制" #, python-format msgid "" "Physical Function %(compute_node_id)s:%(address)s, related to VF " "%(compute_node_id)s:%(vf_address)s is %(status)s instead of %(hopestatus)s" msgstr "" "與 VF %(compute_node_id)s:%(vf_address)s 相關的實體函數 %(compute_node_id)s:" "%(address)s 處於 %(status)s 狀態,而不是 %(hopestatus)s 狀態" #, python-format msgid "Physical network is missing for network %(network_uuid)s" msgstr "遺漏了用於網路 %(network_uuid)s 的實體網路" #, python-format msgid "Policy doesn't allow %(action)s to be performed." msgstr "原則不容許執行 %(action)s。" #, python-format msgid "Port %(port_id)s is still in use." msgstr "埠 %(port_id)s 仍在使用中。" #, python-format msgid "Port %(port_id)s not usable for instance %(instance)s." msgstr "埠 %(port_id)s 不適用於實例 %(instance)s。" #, python-format msgid "" "Port %(port_id)s not usable for instance %(instance)s. Value %(value)s " "assigned to dns_name attribute does not match instance's hostname " "%(hostname)s" msgstr "" "實例 %(instance)s 無法使用埠 %(port_id)s。指派給 dns_name 屬性的值 %(value)s " "與實例的主機名 %(hostname)s 不符" #, python-format msgid "Port %(port_id)s requires a FixedIP in order to be used." msgstr "埠 %(port_id)s 需要固定 IP 才能使用。" #, python-format msgid "Port %s is not attached" msgstr "未連接埠 %s" #, python-format msgid "Port id %(port_id)s could not be found." msgstr "找不到埠 ID %(port_id)s。" #, python-format msgid "Provided video model (%(model)s) is not supported." msgstr "不支援提供的視訊模型 (%(model)s)。" #, python-format msgid "Provided watchdog action (%(action)s) is not supported." msgstr "不支援所提供的監視器動作 (%(action)s)。" msgid "QEMU guest agent is not enabled" msgstr "未啟用 QEMU 訪客代理程式" #, python-format msgid "Quiescing is not supported in instance %(instance_id)s" msgstr "實例 %(instance_id)s 不支援靜止" #, python-format msgid "Quota class %(class_name)s could not be found." msgstr "找不到配額類別 %(class_name)s。" msgid "Quota could not be found" msgstr "找不到配額" #, python-format msgid "" "Quota exceeded for %(overs)s: Requested %(req)s, but already used %(used)s " "of %(allowed)s %(overs)s" msgstr "" "%(overs)s 已超出配額:要求 %(req)s,但已經使用了 %(used)s %(allowed)s " "%(overs)s" #, python-format msgid "Quota exceeded for resources: %(overs)s" msgstr "資源已超出配額:%(overs)s" msgid "Quota exceeded, too many key pairs." msgstr "已超出配額,金鑰組太多。" msgid "Quota exceeded, too many server groups." msgstr "已超出配額,伺服器群組太多。" msgid "Quota exceeded, too many servers in group" msgstr "已超出配額,群組中的伺服器太多" #, python-format msgid "Quota exists for project %(project_id)s, resource %(resource)s" msgstr "專案 %(project_id)s 資源 %(resource)s 已存在配額" #, python-format msgid "Quota for project %(project_id)s could not be found." msgstr "找不到專案 %(project_id)s 的配額。" #, python-format msgid "" "Quota for user %(user_id)s in project %(project_id)s could not be found." msgstr "找不到專案 %(project_id)s 中使用者 %(user_id)s 的配額。" #, python-format msgid "" "Quota limit %(limit)s for %(resource)s must be greater than or equal to " "already used and reserved %(minimum)s." msgstr "" "%(resource)s 的配額限制 %(limit)s 必須大於或等於已經使用並保留的 " "%(minimum)s。" #, python-format msgid "" "Quota limit %(limit)s for %(resource)s must be less than or equal to " "%(maximum)s." msgstr "%(resource)s 的配額限制 %(limit)s 必須小於或等於%(maximum)s。" msgid "Request body and URI mismatch" msgstr "要求內文與 URI 不符" msgid "Request is too large." msgstr "要求太大。" #, python-format msgid "Request of image %(image_id)s got BadRequest response: %(response)s" msgstr "映像檔 %(image_id)s 的要求取得 BadRequest 回應:%(response)s" #, python-format msgid "RequestSpec not found for instance %(instance_uuid)s" msgstr "找不到實例 %(instance_uuid)s 的 RequestSpec" msgid "Requested CPU control policy not supported by host" msgstr "主機不支援所要求的 CPU 控制原則" #, python-format msgid "" "Requested hardware '%(model)s' is not supported by the '%(virt)s' virt driver" msgstr "'%(virt)s' 虛擬化驅動程式不支援所要求的硬體 '%(model)s'" #, python-format msgid "Requested image %(image)s has automatic disk resize disabled." msgstr "所要求的映像檔 %(image)s 已禁止自動調整磁碟大小。" msgid "" "Requested instance NUMA topology cannot fit the given host NUMA topology" msgstr "所要求的實例 NUMA 拓蹼無法適合給定的主機 NUMA 拓蹼" msgid "" "Requested instance NUMA topology together with requested PCI devices cannot " "fit the given host NUMA topology" msgstr "所要求的實例 NUMA 拓蹼及所要求的 PCI 裝置無法適合給定的主機 NUMA 拓蹼" #, python-format msgid "" "Requested vCPU limits %(sockets)d:%(cores)d:%(threads)d are impossible to " "satisfy for vcpus count %(vcpus)d" msgstr "" "所要求的 vCPU 限制 %(sockets)d:%(cores)d:%(threads)d 無法滿足 vCPU 計數 " "%(vcpus)d" #, python-format msgid "Rescue device does not exist for instance %s" msgstr "實例 %s 的救援裝置不存在" #, python-format msgid "Resize error: %(reason)s" msgstr "調整大小錯誤:%(reason)s" msgid "Resize to zero disk flavor is not allowed." msgstr "不容許調整大小至 0 磁碟特性。" msgid "Resource could not be found." msgstr "找不到資源。" msgid "Resumed" msgstr "已恢復" #, python-format msgid "Root element name should be '%(name)s' not '%(tag)s'" msgstr "根元素名稱應該為 '%(name)s' 而不是 '%(tag)s'" #, python-format msgid "Running batches of %i until complete" msgstr "正在執行 %i 的各個批次,直到完成為止" #, python-format msgid "Scheduler Host Filter %(filter_name)s could not be found." msgstr "找不到「排程器主機過濾器」%(filter_name)s。" #, python-format msgid "Security group %(name)s is not found for project %(project)s" msgstr "找不到專案 %(project)s 的安全群組 %(name)s" #, python-format msgid "" "Security group %(security_group_id)s not found for project %(project_id)s." msgstr "找不到專案 %(project_id)s 的安全群組 %(security_group_id)s。" #, python-format msgid "Security group %(security_group_id)s not found." msgstr "找不到安全群組 %(security_group_id)s。" #, python-format msgid "" "Security group %(security_group_name)s already exists for project " "%(project_id)s." msgstr "專案 %(project_id)s 已存在安全群組%(security_group_name)s。" #, python-format msgid "" "Security group %(security_group_name)s not associated with the instance " "%(instance)s" msgstr "安全群組 %(security_group_name)s 未與實例 %(instance)s 產生關聯" msgid "Security group id should be uuid" msgstr "安全群組 ID 應該是 UUID" msgid "Security group name cannot be empty" msgstr "安全群組名稱不能是空的" msgid "Security group not specified" msgstr "未指定安全群組" #, python-format msgid "Server disk was unable to be resized because: %(reason)s" msgstr "無法調整伺服器磁碟的大小,原因:%(reason)s" msgid "Server does not exist" msgstr "伺服器不存在" #, python-format msgid "ServerGroup policy is not supported: %(reason)s" msgstr "不支援 ServerGroup 原則:%(reason)s" msgid "ServerGroupAffinityFilter not configured" msgstr "未配置 ServerGroupAffinityFilter" msgid "ServerGroupAntiAffinityFilter not configured" msgstr "未配置 ServerGroupAntiAffinityFilter" msgid "ServerGroupSoftAffinityWeigher not configured" msgstr "未配置 ServerGroupSoftAffinityWeigher" msgid "ServerGroupSoftAntiAffinityWeigher not configured" msgstr "未配置 ServerGroupSoftAntiAffinityWeigher" #, python-format msgid "Service %(service_id)s could not be found." msgstr "找不到服務 %(service_id)s。" #, python-format msgid "Service %s not found." msgstr "找不到服務 %s。" msgid "Service is unavailable at this time." msgstr "此時無法使用服務。" #, python-format msgid "Service with host %(host)s binary %(binary)s exists." msgstr "主機為 %(host)s 且二進位檔為 %(binary)s 的服務已存在。" #, python-format msgid "Service with host %(host)s topic %(topic)s exists." msgstr "主機為 %(host)s 且主題為 %(topic)s 的服務已存在。" msgid "Set admin password is not supported" msgstr "不支援設定管理密碼" #, python-format msgid "Share '%s' is not supported" msgstr "不支援共用 '%s'" #, python-format msgid "Share level '%s' cannot have share configured" msgstr "共用層次 '%s' 不能配置共用" #, python-format msgid "Snapshot %(snapshot_id)s could not be found." msgstr "找不到 Snapshot %(snapshot_id)s。" msgid "Some required fields are missing" msgstr "遺漏了部分必要欄位" #, python-format msgid "" "Something went wrong when deleting a volume snapshot: rebasing a " "%(protocol)s network disk using qemu-img has not been fully tested" msgstr "" "刪除磁區 Snapshot 時發生問題:尚未完全測試使用 qemu-img 來重設 %(protocol)s " "網路磁碟的基線" msgid "Sort direction size exceeds sort key size" msgstr "排序方向大小超過排序鍵大小" msgid "Sort key supplied was not valid." msgstr "提供的排序鍵無效。" msgid "Specified fixed address not assigned to instance" msgstr "沒有將所指定的固定位址指派給實例" msgid "Started" msgstr "已開始" msgid "Stopped" msgstr "已停止" #, python-format msgid "Storage error: %(reason)s" msgstr "儲存體錯誤:%(reason)s" #, python-format msgid "Storage policy %s did not match any datastores" msgstr "儲存體原則 %s 不符合任何資料儲存庫" msgid "Success" msgstr "成功" msgid "Suspended" msgstr "已停止" msgid "Swap drive requested is larger than instance type allows." msgstr "所要求的交換磁碟機,大於實例類型所容許的容量。" msgid "Table" msgstr "表格" #, python-format msgid "Task %(task_name)s is already running on host %(host)s" msgstr "作業 %(task_name)s 已經在主機 %(host)s 上執行" #, python-format msgid "Task %(task_name)s is not running on host %(host)s" msgstr "作業 %(task_name)s 未在主機 %(host)s 上執行" #, python-format msgid "The PCI address %(address)s has an incorrect format." msgstr "PCI 位址 %(address)s 的格式不正確。" #, python-format msgid "The console port range %(min_port)d-%(max_port)d is exhausted." msgstr "主控台埠範圍 %(min_port)d-%(max_port)d 已耗盡。" msgid "The created instance's disk would be too small." msgstr "已建立實例的磁碟將太小。" msgid "The current driver does not support preserving ephemeral partitions." msgstr "現行驅動程式不支援保留暫時分割區。" msgid "The default PBM policy doesn't exist on the backend." msgstr "預設 PBM 原則不存在於後端上。" msgid "The floating IP request failed with a BadRequest" msgstr "浮動 IP 要求失敗,發生 BadRequest" msgid "" "The instance requires a newer hypervisor version than has been provided." msgstr "實例需要比所提供版本還新的 Hypervisor 版本。" #, python-format msgid "The number of defined ports: %(ports)d is over the limit: %(quota)d" msgstr "所定義的埠數目 %(ports)d 超出限制:%(quota)d" #, python-format msgid "The provided RNG device path: (%(path)s) is not present on the host." msgstr "主機上不存在所提供的 RNG 裝置路徑:(%(path)s)。" msgid "The request is invalid." msgstr "要求無效。" #, python-format msgid "" "The requested amount of video memory %(req_vram)d is higher than the maximum " "allowed by flavor %(max_vram)d." msgstr "" "所要求的視訊記憶體數量 %(req_vram)d 大於特性所容許的上限 %(max_vram)d。" msgid "The requested availability zone is not available" msgstr "所要求的可用性區域無法使用" msgid "The requested functionality is not supported." msgstr "所要求的功能不受支援。" #, python-format msgid "The specified cluster '%s' was not found in vCenter" msgstr "在 vCenter 中找不到指定的叢集 '%s'" #, python-format msgid "The supplied device path (%(path)s) is in use." msgstr "提供的裝置路徑 (%(path)s) 已在使用中。" #, python-format msgid "The supplied device path (%(path)s) is invalid." msgstr "提供的裝置路徑 (%(path)s) 無效。" #, python-format msgid "" "The supplied disk path (%(path)s) already exists, it is expected not to " "exist." msgstr "提供的磁碟路徑 (%(path)s) 已存在,但它不應該存在。" msgid "The supplied hypervisor type of is invalid." msgstr "提供的 Hypervisor 類型無效。" msgid "The target host can't be the same one." msgstr "目標主機不能是相同主機。" #, python-format msgid "The token '%(token)s' is invalid or has expired" msgstr "記號 '%(token)s' 無效或已過期" #, python-format msgid "" "The volume cannot be assigned the same device name as the root device %s" msgstr "無法對磁區指派與根裝置 %s 相同的裝置名稱" msgid "There are not enough hosts available." msgstr "沒有足夠的可用主機。" #, python-format msgid "There is no such action: %s" msgstr "沒有這樣的動作:%s" #, python-format msgid "" "This compute node's hypervisor is older than the minimum supported version: " "%(version)s." msgstr "這部電腦節點的 Hypervisor 版本低於所支援的版本下限:%(version)s。" msgid "" "This method needs to be called with either networks=None and port_ids=None " "or port_ids and networks as not none." msgstr "" "需要在 networks=None 且 port_ids=None 或者 port_ids 及 networks 都不為 None " "時,呼叫此方法。" #, python-format msgid "This rule already exists in group %s" msgstr "此規則已存在於群組 %s 中" #, python-format msgid "" "This service is older (v%(thisver)i) than the minimum (v%(minver)i) version " "of the rest of the deployment. Unable to continue." msgstr "" "此服務的版本 (v%(thisver)i) 低於其餘部署的版本下限 (v%(minver)i)。無法繼續。" msgid "Timeout waiting for response from cell" msgstr "等候 Cell 回應時發生逾時" #, python-format msgid "Timeout while checking if we can live migrate to host: %s" msgstr "在檢查是否可以即時移轉至主機時逾時:%s" msgid "To and From ports must be integers" msgstr "目標埠和來源埠必須是整數" msgid "Token not found" msgstr "找不到記號" msgid "Triggering crash dump is not supported" msgstr "不支援觸發損毀傾出" msgid "Type and Code must be integers for ICMP protocol type" msgstr "ICMP 通訊協定類型的類型及代碼必須是整數" msgid "UEFI is not supported" msgstr "不支援 UEFI" #, python-format msgid "" "Unable to associate floating IP %(address)s to any fixed IPs for instance " "%(id)s. Instance has no fixed IPv4 addresses to associate." msgstr "" "無法將浮動 IP %(address)s 關聯至實例 %(id)s 的任何固定 IP。該實例沒有固定 " "IPv4 位址與其相關聯。" #, python-format msgid "" "Unable to associate floating IP %(address)s to fixed IP %(fixed_address)s " "for instance %(id)s. Error: %(error)s" msgstr "" "無法將浮動 IP %(address)s 關聯至實例 %(id)s 的固定 IP %(fixed_address)s。錯" "誤:%(error)s" #, python-format msgid "Unable to convert image to %(format)s: %(exp)s" msgstr "無法將映像檔轉換為 %(format)s:%(exp)s" #, python-format msgid "Unable to convert image to raw: %(exp)s" msgstr "無法將映像檔轉換為原始格式:%(exp)s" #, python-format msgid "Unable to determine disk bus for '%s'" msgstr "無法判定 '%s' 的磁碟匯流排" #, python-format msgid "Unable to determine disk prefix for %s" msgstr "無法判定 %s 的磁碟字首" #, python-format msgid "Unable to find host for Instance %s" msgstr "找不到實例 %s 的主機" msgid "Unable to find iSCSI Target" msgstr "找不到 iSCSI 目標" msgid "Unable to find volume" msgstr "找不到磁區" msgid "Unable to get host UUID: /etc/machine-id does not exist" msgstr "無法取得主機 UUID:/etc/machine-id 不存在" msgid "Unable to get host UUID: /etc/machine-id is empty" msgstr "無法取得主機 UUID:/etc/machine-id 是空的" msgid "" "Unable to launch multiple instances with a single configured port ID. Please " "launch your instance one by one with different ports." msgstr "無法以單一配置埠 ID 來啟動多個實例。請使用不同的埠,逐一啟動實例。" #, python-format msgid "" "Unable to migrate %(instance_uuid)s to %(dest)s: Lack of memory(host:" "%(avail)s <= instance:%(mem_inst)s)" msgstr "" "無法將 %(instance_uuid)s 移轉至 %(dest)s:記憶體不足(主機:%(avail)s <= 實" "例:%(mem_inst)s)" #, python-format msgid "" "Unable to migrate %(instance_uuid)s: Disk of instance is too large(available " "on destination host:%(available)s < need:%(necessary)s)" msgstr "" "無法移轉 %(instance_uuid)s:實例的磁碟太大(目的地主機上的可用空間:" "%(available)s < 需要的空間:%(necessary)s)" #, python-format msgid "" "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." msgstr "無法將實例 (%(instance_id)s) 移轉至現行主機 (%(host)s)。" msgid "Unable to resize disk down." msgstr "無法將磁碟大小調小。" msgid "Unable to set password on instance" msgstr "無法在實例上設定密碼" msgid "Unable to shrink disk." msgstr "無法收縮磁碟。" #, python-format msgid "Unacceptable CPU info: %(reason)s" msgstr "無法接受的 CPU 資訊:%(reason)s" msgid "Unacceptable parameters." msgstr "不可接受的參數值" #, python-format msgid "Unavailable console type %(console_type)s." msgstr "無法使用的主控台類型 %(console_type)s。" msgid "" "Undefined Block Device Mapping root: BlockDeviceMappingList contains Block " "Device Mappings from multiple instances." msgstr "" "未定義的「區塊裝置對映」根:BlockDeviceMappingList 包含來自多個實例的「區塊裝" "置對映」。" #, python-format msgid "Unexpected aggregate action %s" msgstr "非預期的聚集動作 %s" msgid "Unexpected type adding stats" msgstr "新增統計資料時遇到非預期的類型" #, python-format msgid "Unexpected vif_type=%s" msgstr "非預期的 vif_type = %s" msgid "Unknown" msgstr "未知" msgid "Unknown action" msgstr "不明動作" #, python-format msgid "Unknown config drive format %(format)s. Select one of iso9660 or vfat." msgstr "不明的配置磁碟機格式 %(format)s。請選取 iso9660 或 vfat 的其中之一。" #, python-format msgid "Unknown delete_info type %s" msgstr "不明的 delete_info 類型 %s" #, python-format msgid "Unknown image_type=%s" msgstr "不明的 image_type = %s" #, python-format msgid "Unknown quota resources %(unknown)s." msgstr "不明的配額資源 %(unknown)s。" msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "不明的排序方向,必須為 'desc' 或 'asc'" #, python-format msgid "Unknown type: %s" msgstr "不明的類型:%s" msgid "Unrecognized legacy format." msgstr "無法辨識的舊格式。" #, python-format msgid "Unrecognized read_deleted value '%s'" msgstr "無法辨識 read_deleted 值 '%s'" #, python-format msgid "Unrecognized value '%s' for CONF.running_deleted_instance_action" msgstr "無法辨識 CONF.running_deleted_instance_action 的值 '%s'" #, python-format msgid "Unshelve attempted but the image %s cannot be found." msgstr "已嘗試解除擱置,但卻找不到映像檔 %s。" msgid "Unsupported Content-Type" msgstr "不支援的內容類型" #, python-format msgid "User %(username)s not found in password file." msgstr "在密碼檔中找不到使用者 %(username)s。" #, python-format msgid "User %(username)s not found in shadow file." msgstr "在備份副本檔中找不到使用者 %(username)s。" msgid "User data needs to be valid base 64." msgstr "使用者資料必須是有效的 base64。" msgid "User does not have admin privileges" msgstr "使用者並沒有管理者權力" msgid "" "Using different block_device_mapping syntaxes is not allowed in the same " "request." msgstr "同一個要求中不容許使用其他 block_device_mapping語法。" #, python-format msgid "" "Version %(req_ver)s is not supported by the API. Minimum is %(min_ver)s and " "maximum is %(max_ver)s." msgstr "API 不支援 %(req_ver)s 版。最低為 %(min_ver)s,最高為 %(max_ver)s。" msgid "Virtual Interface creation failed" msgstr "建立虛擬介面失敗" msgid "Virtual interface plugin failed" msgstr "虛擬介面外掛程式失敗" #, python-format msgid "Virtual machine mode '%(vmmode)s' is not recognised" msgstr "未辨識虛擬機器模式 '%(vmmode)s'" #, python-format msgid "Virtual machine mode '%s' is not valid" msgstr "虛擬機器模式 '%s' 無效" #, python-format msgid "Virtualization type '%(virt)s' is not supported by this compute driver" msgstr "此計算驅動程式不支援虛擬化類型 '%(virt)s'" #, python-format msgid "Volume %(volume_id)s could not be attached. Reason: %(reason)s" msgstr "無法連接磁區 %(volume_id)s。原因:%(reason)s" #, python-format msgid "Volume %(volume_id)s could not be found." msgstr "找不到磁區 %(volume_id)s。" #, python-format msgid "" "Volume %(volume_id)s did not finish being created even after we waited " "%(seconds)s seconds or %(attempts)s attempts. And its status is " "%(volume_status)s." msgstr "" "即使在等待 %(seconds)s 秒或者嘗試 %(attempts)s 次之後,也未完成建立磁區 " "%(volume_id)s。並且它的狀態是%(volume_status)s。" msgid "Volume does not belong to the requested instance." msgstr "磁區不屬於所要求的實例。" #, python-format msgid "" "Volume encryption is not supported for %(volume_type)s volume %(volume_id)s" msgstr "%(volume_type)s 的磁區 %(volume_id)s 不支援磁區加密" #, python-format msgid "" "Volume is smaller than the minimum size specified in image metadata. Volume " "size is %(volume_size)i bytes, minimum size is %(image_min_disk)i bytes." msgstr "" "磁區小於映像檔 meta 資料中指定的大小下限。磁區大小為 %(volume_size)i 位元組," "大小下限為 %(image_min_disk)i 位元組。" #, python-format msgid "" "Volume sets block size, but the current libvirt hypervisor '%s' does not " "support custom block size" msgstr "由磁區設定區塊大小,但現行 libVirt Hypervisor '%s' 不支援自訂區塊大小" msgid "When resizing, instances must change flavor!" msgstr "重新調整大小時,實例必須變更特性!" #, python-format msgid "Wrong quota method %(method)s used on resource %(res)s" msgstr "在資源 %(res)s 上使用了錯誤的配額方法 %(method)s" msgid "X-Forwarded-For is missing from request." msgstr "要求遺漏了 X-Forwarded-For。" msgid "X-Instance-ID header is missing from request." msgstr "要求遺漏了 X-Instance-ID 標頭。" msgid "X-Instance-ID-Signature header is missing from request." msgstr "要求中遺漏 X-Instance-ID-Signature 標頭。" msgid "X-Metadata-Provider is missing from request." msgstr "要求遺漏了 X-Metadata-Provider。" msgid "X-Tenant-ID header is missing from request." msgstr "要求遺漏了 X-Tenant-ID 標頭。" msgid "You are not allowed to delete the image." msgstr "不容許您刪除該映像檔。" msgid "" "You are not authorized to access the image the instance was started with." msgstr "您未獲授權來存取已用來啟動實例的映像檔。" msgid "You must implement __call__" msgstr "必須實作 __call__" msgid "You should specify images_rbd_pool flag to use rbd images." msgstr "應該指定 images_rbd_pool 旗標以使用 rbd 映像檔。" msgid "You should specify images_volume_group flag to use LVM images." msgstr "應該指定 images_volume_group 旗標以使用 LVM 映像檔。" msgid "Zero floating IPs available." msgstr "有 0 個浮動 IP 可供使用。" msgid "admin password can't be changed on existing disk" msgstr "無法在現有磁碟上變更管理者密碼" msgid "cannot understand JSON" msgstr "無法理解 JSON" msgid "clone() is not implemented" msgstr "未實作 clone()" #, python-format msgid "connect info: %s" msgstr "連接資訊:%s" #, python-format msgid "connecting to: %(host)s:%(port)s" msgstr "正在連接至:%(host)s:%(port)s" msgid "direct_snapshot() is not implemented" msgstr "未實作 direct_snapshot()" #, python-format msgid "disk type '%s' not supported" msgstr "磁碟類型 '%s' 不受支援" #, python-format msgid "empty project id for instance %s" msgstr "實例 %s 的專案 ID 是空的" msgid "error setting admin password" msgstr "設定管理者密碼時發生錯誤" #, python-format msgid "error: %s" msgstr "錯誤:%s" #, python-format msgid "failed to generate X509 fingerprint. Error message: %s" msgstr "無法產生 X509 指紋。錯誤訊息:%s" msgid "failed to generate fingerprint" msgstr "無法產生指紋" msgid "filename cannot be None" msgstr "檔名不能為 None" msgid "floating IP is already associated" msgstr "已與浮動 IP 產生關聯" msgid "floating IP not found" msgstr "找不到浮動 IP" #, python-format msgid "fmt=%(fmt)s backed by: %(backing_file)s" msgstr "fmt = %(fmt)s 受 %(backing_file)s 支援" #, python-format msgid "href %s does not contain version" msgstr "href %s 不包含版本" msgid "image already mounted" msgstr "已裝載映像檔" #, python-format msgid "instance %s is not running" msgstr "實例 %s 未在執行中" msgid "instance is a required argument to use @refresh_cache" msgstr "實例是使用 @refresh_cache 的必要引數" msgid "instance is not in a suspended state" msgstr "實例不處於暫停狀態" msgid "instance is not powered on" msgstr "未開啟實例的電源" msgid "instance is powered off and cannot be suspended." msgstr "實例已關閉電源,無法暫停。" #, python-format msgid "instance_id %s could not be found as device id on any ports" msgstr "找不到 instance_id %s 來作為任何埠上的裝置 ID" msgid "is_public must be a boolean" msgstr "is_public 必須是布林值" msgid "keymgr.fixed_key not defined" msgstr "未定義 keymgr.fixed_key" msgid "l3driver call to add floating IP failed" msgstr "l3driver 呼叫以新增浮動 IP 失敗" #, python-format msgid "libguestfs installed but not usable (%s)" msgstr "libguestfs 已安裝,但卻無法使用 (%s)" #, python-format msgid "libguestfs is not installed (%s)" msgstr "未安裝 libguestfs (%s)" #, python-format msgid "marker [%s] not found" msgstr "找不到標記 [%s]" #, python-format msgid "max rows must be <= %(max_value)d" msgstr "列數上限必須小於或等於 %(max_value)d" msgid "max_count cannot be greater than 1 if an fixed_ip is specified." msgstr "如果指定了 fixed_ip,則 max_count 不得大於 1。" msgid "min_count must be <= max_count" msgstr "min_count 必須 <= max_count" #, python-format msgid "nbd device %s did not show up" msgstr "NBD 裝置 %s 未顯示" msgid "nbd unavailable: module not loaded" msgstr "NBD 無法使用:未載入模組" #, python-format msgid "no match found for %s" msgstr "找不到 %s 的相符項" #, python-format msgid "no usable parent snapshot for volume %s" msgstr "磁區 %s 沒有可使用的母項 Snapshot" #, python-format msgid "no write permission on storage pool %s" msgstr "對儲存區 %s 沒有寫入權" #, python-format msgid "not able to execute ssh command: %s" msgstr "無法執行 SSH 指令:%s" msgid "old style configuration can use only dictionary or memcached backends" msgstr "舊樣式配置只能使用字典或 Memcached 後端" msgid "operation time out" msgstr "作業逾時" #, python-format msgid "partition %s not found" msgstr "找不到分割區 %s" #, python-format msgid "partition search unsupported with %s" msgstr "%s 不支援進行分割區搜尋" msgid "pause not supported for vmwareapi" msgstr "vmwareapi 不支援暫停" msgid "printable characters with at least one non space character" msgstr "含有至少一個非空格字元的可列印字元" msgid "printable characters. Can not start or end with whitespace." msgstr "可列印字元。不能以空格開頭或結尾。" #, python-format msgid "qemu-img failed to execute on %(path)s : %(exp)s" msgstr "qemu-img 無法在 %(path)s 上執行:%(exp)s" #, python-format msgid "qemu-nbd error: %s" msgstr "qemu-nbd 錯誤:%s" msgid "rbd python libraries not found" msgstr "找不到 rbd Python 程式庫" #, python-format msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" msgstr "read_deleted 只能是 'no'、'yes' 或 'only' 其中之一,不能是 %r" msgid "serve() can only be called once" msgstr "只能呼叫 serve() 一次" msgid "service is a mandatory argument for DB based ServiceGroup driver" msgstr "服務是 DB 型 ServiceGroup 驅動程式的必要引數" msgid "service is a mandatory argument for Memcached based ServiceGroup driver" msgstr "服務是 Memcached 型 ServiceGroup 驅動程式的必要引數" msgid "set_admin_password is not implemented by this driver or guest instance." msgstr "set_admin_password 不是由此驅動程式或來賓實例實作。" #, python-format msgid "snapshot for %s" msgstr "%s 的 Snapshot" msgid "snapshot_id required in create_info" msgstr "create_info 中需要 snapshot_id" msgid "token not provided" msgstr "未提供記號" msgid "too many body keys" msgstr "主體金鑰太多" msgid "unpause not supported for vmwareapi" msgstr "vmwareapi 不支援取消暫停" #, python-format msgid "vg %s must be LVM volume group" msgstr "磁區群組 %s 必須是 LVM 磁區群組" #, python-format msgid "vhostuser_sock_path not present in vif_details for vif %(vif_id)s" msgstr "vhostuser_sock_path 未出現在 VIF %(vif_id)s 的 vif_details 中" #, python-format msgid "vif type %s not supported" msgstr "VIF 類型 %s 不受支援" msgid "vif_type parameter must be present for this vif_driver implementation" msgstr "此 vif_driver 實作的 vif_type 參數必須存在" #, python-format msgid "volume %s already attached" msgstr "已連接磁區 %s" #, python-format msgid "" "volume '%(vol)s' status must be 'in-use'. Currently in '%(status)s' status" msgstr "磁區 '%(vol)s' 狀態必須為「使用中」。目前處於「%(status)s」狀態" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/manager.py0000664000175000017500000001275600000000000015524 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base Manager class. Managers are responsible for a certain aspect of the system. It is a logical grouping of code relating to a portion of the system. In general other components should be using the manager to make changes to the components that it is responsible for. For example, other components that need to deal with volumes in some way, should do so by calling methods on the VolumeManager instead of directly changing fields in the database. This allows us to keep all of the code relating to volumes in the same place. We have adopted a basic strategy of Smart managers and dumb data, which means rather than attaching methods to data objects, components should call manager methods that act on the data. Methods on managers that can be executed locally should be called directly. If a particular method must execute on a remote host, this should be done via rpc to the service that wraps the manager Managers should be responsible for most of the db access, and non-implementation specific data. Anything implementation specific that can't be generalized should be done by the Driver. In general, we prefer to have one manager with multiple drivers for different implementations, but sometimes it makes sense to have multiple managers. You can think of it this way: Abstract different overall strategies at the manager level(FlatNetwork vs VlanNetwork), and different implementations at the driver level(LinuxNetDriver vs CiscoNetDriver). Managers will often provide methods for initial setup of a host or periodic tasks to a wrapping service. This module provides Manager, a base class for managers. """ from oslo_service import periodic_task import nova.conf import nova.db.main.api from nova import profiler from nova import rpc CONF = nova.conf.CONF class PeriodicTasks(periodic_task.PeriodicTasks): def __init__(self): super(PeriodicTasks, self).__init__(CONF) class ManagerMeta(profiler.get_traced_meta(), type(PeriodicTasks)): """Metaclass to trace all children of a specific class. This metaclass wraps every public method (not starting with _ or __) of the class using it. All children classes of the class using ManagerMeta will be profiled as well. Adding this metaclass requires that the __trace_args__ attribute be added to the class we want to modify. That attribute is a dictionary with one mandatory key: "name". "name" defines the name of the action to be traced (for example, wsgi, rpc, db). The OSprofiler-based tracing, although, will only happen if profiler instance was initiated somewhere before in the thread, that can only happen if profiling is enabled in nova.conf and the API call to Nova API contained specific headers. """ class Manager(PeriodicTasks, metaclass=ManagerMeta): __trace_args__ = {"name": "rpc"} def __init__(self, host=None, service_name='undefined'): if not host: host = CONF.host self.host = host self.backdoor_port = None self.service_name = service_name self.notifier = rpc.get_notifier(self.service_name, self.host) self.additional_endpoints = [] super(Manager, self).__init__() def periodic_tasks(self, context, raise_on_error=False): """Tasks to be run at a periodic interval.""" return self.run_periodic_tasks(context, raise_on_error=raise_on_error) def init_host(self, service_ref): """Hook to do additional manager initialization when one requests the service be started. This is called before any service record is created, but if one already exists for this service, it is provided. Child classes should override this method. :param service_ref: An objects.Service if one exists, else None. """ pass def cleanup_host(self): """Hook to do cleanup work when the service shuts down. Child classes should override this method. """ pass def pre_start_hook(self, service_ref): """Hook to provide the manager the ability to do additional start-up work before any RPC queues/consumers are created. This is called after other initialization has succeeded and a service record is created. Child classes should override this method. :param service_ref: The nova.objects.Service for this """ pass def post_start_hook(self): """Hook to provide the manager the ability to do additional start-up work immediately after a service creates RPC consumers and starts 'running'. Child classes should override this method. """ pass def reset(self): """Hook called on SIGHUP to signal the manager to re-read any dynamic configuration or do any reconfiguration tasks. """ pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/middleware.py0000664000175000017500000000307400000000000016220 0ustar00zuulzuul00000000000000# Copyright 2016 Hewlett Packard Enterprise Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_middleware import cors def set_defaults(): """Update default configuration options for oslo.middleware.""" cors.set_defaults( allow_headers=['X-Auth-Token', 'X-Openstack-Request-Id', 'X-Identity-Status', 'X-Roles', 'X-Service-Catalog', 'X-User-Id', 'X-Tenant-Id', 'X-OpenStack-Nova-API-Version', 'OpenStack-API-Version'], expose_headers=['X-Auth-Token', 'X-Openstack-Request-Id', 'X-Subject-Token', 'X-Service-Token', 'X-OpenStack-Nova-API-Version', 'OpenStack-API-Version'], allow_methods=['GET', 'PUT', 'POST', 'DELETE', 'PATCH'] ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/monkey_patch.py0000664000175000017500000001206200000000000016561 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara # Copyright 2019 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Enable eventlet monkey patching.""" import os MONKEY_PATCHED = False def is_patched(): return MONKEY_PATCHED def _monkey_patch(): if is_patched(): return False # NOTE(mdbooth): Anything imported here will not be monkey patched. It is # important to take care not to import anything here which requires monkey # patching. # NOTE(artom) eventlet processes environment variables at import-time. # as such any eventlet configuration should happen here if needed. import eventlet import sys # Note any modules with known monkey-patching issues which have been # imported before monkey patching. # urllib3: https://bugs.launchpad.net/nova/+bug/1808951 # oslo_context.context: https://bugs.launchpad.net/nova/+bug/1773102 problems = (set(['urllib3', 'oslo_context.context']) & set(sys.modules.keys())) eventlet.monkey_patch() # NOTE(mdbooth): Log here instead of earlier to avoid loading oslo logging # before monkey patching. # NOTE(mdbooth): Ideally we would raise an exception here, as this is # likely to cause problems when executing nova code. However, some non-nova # tools load nova only to extract metadata and do not execute it. Two # examples are oslopolicy-policy-generator and sphinx, both of which can # fail if we assert here. It is not ideal that these utilities are monkey # patching at all, but we should not break them. # TODO(mdbooth): If there is any way to reliably determine if we are being # loaded in that kind of context without breaking existing callers, we # should do it and bypass monkey patching here entirely. if problems: from oslo_log import log as logging LOG = logging.getLogger(__name__) LOG.warning("Modules with known eventlet monkey patching issues were " "imported prior to eventlet monkey patching: %s. This " "warning can usually be ignored if the caller is only " "importing and not executing nova code.", ', '.join(problems)) return True def patch(): if (os.environ.get('OS_NOVA_DISABLE_EVENTLET_PATCHING', '').lower() not in ('1', 'true', 'yes')): if _monkey_patch(): global MONKEY_PATCHED MONKEY_PATCHED = True import oslo_service.backend as service service.init_backend(service.BackendType.EVENTLET) from oslo_log import log as logging LOG = logging.getLogger(__name__) LOG.info("Service is starting with Eventlet based service backend") else: # We asked not to monkey patch so we will run in native threading mode import oslo_service.backend as service # NOTE(gibi): This will raise if the backend is already initialized # with Eventlet service.init_backend(service.BackendType.THREADING) # NOTE(gibi): We were asked not to monkey patch. Let's enforce it by # removing the possibility to monkey_patch accidentally poison_eventlet() from oslo_log import log as logging LOG = logging.getLogger(__name__) LOG.warning( "Service is starting with native threading. This is currently " "experimental. Do not use it in production without first " "testing it in pre-production.") def _poison(*args, **kwargs): raise RuntimeError( "The service is started with native threading via " "OS_NOVA_DISABLE_EVENTLET_PATCHING set to '%s', but then the " "service tried to call eventlet.monkey_patch(). This is a bug." % os.environ.get('OS_NOVA_DISABLE_EVENTLET_PATCHING', '')) def poison_eventlet(): import eventlet eventlet.monkey_patch = _poison eventlet.patcher.monkey_patch = _poison # We want to have this but cannot have this yet as we still have common # code that imports eventlet like nova.utils.tpool # # class PoisonEventletImport: # def find_spec(self, fullname, path, target=None): # if fullname.startswith('eventlet'): # raise ImportError( # "The service started in threading mode so it should " # "not import eventlet") # import sys # sys.meta_path.insert(0, PoisonEventletImport()) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.3736086 nova-32.0.0/nova/network/0000775000175000017500000000000000000000000015216 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/network/__init__.py0000664000175000017500000000000000000000000017315 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/network/constants.py0000664000175000017500000000276300000000000017614 0ustar00zuulzuul00000000000000# Copyright 2013 UnitedStack Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Port fields BINDING_PROFILE = 'binding:profile' BINDING_HOST_ID = 'binding:host_id' RESOURCE_REQUEST = 'resource_request' REQUEST_GROUPS = 'request_groups' NUMA_POLICY = 'numa_affinity_policy' # Binding profile fields MIGRATING_ATTR = 'migrating_to' ALLOCATION = 'allocation' # Core extensions DNS_INTEGRATION = 'dns-integration' MULTI_PROVIDER = 'multi-provider' FIP_PORT_DETAILS = 'fip-port-details' PORT_BINDING = 'binding' PORT_BINDING_EXTENDED = 'binding-extended' SUBSTR_PORT_FILTERING = 'ip-substring-filtering' SEGMENT = 'segment' RESOURCE_REQUEST_GROUPS = 'port-resource-request-groups' SG_SHARED_FILTER = "security-groups-shared-filtering" # Third-party extensions VNIC_INDEX = 'vnic-index' # this is provided by the vmware_nsx project # Search fields NET_EXTERNAL = 'router:external' # Misc DEFAULT_SECGROUP = 'default' L3_NETWORK_TYPES = ['vxlan', 'gre', 'geneve'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/network/model.py0000664000175000017500000005064500000000000016702 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import netaddr from oslo_serialization import jsonutils from nova import exception from nova.i18n import _ from nova import utils # Constants for the 'vif_type' field in VIF class VIF_TYPE_OVS = 'ovs' VIF_TYPE_IVS = 'ivs' VIF_TYPE_DVS = 'dvs' VIF_TYPE_IOVISOR = 'iovisor' VIF_TYPE_BRIDGE = 'bridge' VIF_TYPE_802_QBG = '802.1qbg' VIF_TYPE_802_QBH = '802.1qbh' VIF_TYPE_HW_VEB = 'hw_veb' VIF_TYPE_HYPERV = 'hyperv' VIF_TYPE_HOSTDEV = 'hostdev_physical' VIF_TYPE_IB_HOSTDEV = 'ib_hostdev' VIF_TYPE_MIDONET = 'midonet' VIF_TYPE_VHOSTUSER = 'vhostuser' VIF_TYPE_VROUTER = 'vrouter' VIF_TYPE_OTHER = 'other' VIF_TYPE_TAP = 'tap' VIF_TYPE_MACVTAP = 'macvtap' VIF_TYPE_AGILIO_OVS = 'agilio_ovs' VIF_TYPE_BINDING_FAILED = 'binding_failed' VIF_TYPE_VIF = 'vif' VIF_TYPE_UNBOUND = 'unbound' # Constants for dictionary keys in the 'vif_details' field in the VIF # class VIF_DETAILS_PORT_FILTER = 'port_filter' VIF_DETAILS_OVS_HYBRID_PLUG = 'ovs_hybrid_plug' VIF_DETAILS_PHYSICAL_NETWORK = 'physical_network' VIF_DETAILS_BRIDGE_NAME = 'bridge_name' VIF_DETAILS_OVS_DATAPATH_TYPE = 'datapath_type' # The following constant defines an SR-IOV related parameter in the # 'vif_details'. 'profileid' should be used for VIF_TYPE_802_QBH VIF_DETAILS_PROFILEID = 'profileid' # The following constant defines an SR-IOV and macvtap related parameter in # the 'vif_details'. 'vlan' should be used for VIF_TYPE_HW_VEB or # VIF_TYPE_MACVTAP VIF_DETAILS_VLAN = 'vlan' # The following three constants define the macvtap related fields in # the 'vif_details'. VIF_DETAILS_MACVTAP_SOURCE = 'macvtap_source' VIF_DETAILS_MACVTAP_MODE = 'macvtap_mode' VIF_DETAILS_PHYS_INTERFACE = 'physical_interface' # Constants for vhost-user related fields in 'vif_details'. # Sets mode on vhost-user socket, valid values are 'client' # and 'server' VIF_DETAILS_VHOSTUSER_MODE = 'vhostuser_mode' # vhost-user socket path VIF_DETAILS_VHOSTUSER_SOCKET = 'vhostuser_socket' # Specifies whether vhost-user socket should be plugged # into ovs bridge. Valid values are True and False VIF_DETAILS_VHOSTUSER_OVS_PLUG = 'vhostuser_ovs_plug' # Specifies whether vhost-user socket should be used to # create a fp netdevice interface. VIF_DETAILS_VHOSTUSER_FP_PLUG = 'vhostuser_fp_plug' # Specifies whether vhost-user socket should be used to # create a vrouter netdevice interface # TODO(mhenkel): Consider renaming this to be contrail-specific. VIF_DETAILS_VHOSTUSER_VROUTER_PLUG = 'vhostuser_vrouter_plug' # Constants for dictionary keys in the 'vif_details' field that are # valid for VIF_TYPE_TAP. VIF_DETAILS_TAP_MAC_ADDRESS = 'mac_address' # Open vSwitch datapath types. VIF_DETAILS_OVS_DATAPATH_SYSTEM = 'system' VIF_DETAILS_OVS_DATAPATH_NETDEV = 'netdev' # Define supported virtual NIC types. VNIC_TYPE_DIRECT and VNIC_TYPE_MACVTAP # are used for SR-IOV ports VNIC_TYPE_NORMAL = 'normal' VNIC_TYPE_DIRECT = 'direct' VNIC_TYPE_MACVTAP = 'macvtap' VNIC_TYPE_DIRECT_PHYSICAL = 'direct-physical' VNIC_TYPE_BAREMETAL = 'baremetal' VNIC_TYPE_VIRTIO_FORWARDER = 'virtio-forwarder' VNIC_TYPE_VDPA = 'vdpa' VNIC_TYPE_ACCELERATOR_DIRECT = 'accelerator-direct' VNIC_TYPE_ACCELERATOR_DIRECT_PHYSICAL = 'accelerator-direct-physical' VNIC_TYPE_REMOTE_MANAGED = "remote-managed" # Define list of ports which needs pci request. # Note: The macvtap port needs a PCI request as it is a tap interface # with VF as the lower physical interface. # Note: Currently, VNIC_TYPE_VIRTIO_FORWARDER assumes a 1:1 # relationship with a VF. This is expected to change in the future. # Note: # VNIC_TYPE_ACCELERATOR_DIRECT and VNIC_TYPE_ACCELERATOR_DIRECT_PHYSICAL # does not need a PCI request, these devices are not tracked by the pci # tracker in nova but tracked by cyborg. The scheduling will use the # cyborg provided resource request to find a compute with such devices, # and the device claiming will be done via binding the cyborg arqs to the # selected compute node. VNIC_TYPES_SRIOV = ( VNIC_TYPE_DIRECT, VNIC_TYPE_MACVTAP, VNIC_TYPE_DIRECT_PHYSICAL, VNIC_TYPE_VIRTIO_FORWARDER, VNIC_TYPE_VDPA, VNIC_TYPE_REMOTE_MANAGED ) # Define list of ports which are passthrough to the guest # and need a special treatment on snapshot and suspend/resume VNIC_TYPES_DIRECT_PASSTHROUGH = ( VNIC_TYPE_DIRECT, VNIC_TYPE_DIRECT_PHYSICAL, VNIC_TYPE_ACCELERATOR_DIRECT, VNIC_TYPE_ACCELERATOR_DIRECT_PHYSICAL, VNIC_TYPE_REMOTE_MANAGED, VNIC_TYPE_VDPA ) # Define list of ports which contains devices managed by cyborg. VNIC_TYPES_ACCELERATOR = ( VNIC_TYPE_ACCELERATOR_DIRECT, VNIC_TYPE_ACCELERATOR_DIRECT_PHYSICAL ) # Constants for the 'vif_model' values VIF_MODEL_VIRTIO = 'virtio' VIF_MODEL_NE2K_PCI = 'ne2k_pci' VIF_MODEL_PCNET = 'pcnet' VIF_MODEL_RTL8139 = 'rtl8139' VIF_MODEL_E1000 = 'e1000' VIF_MODEL_E1000E = 'e1000e' VIF_MODEL_NETFRONT = 'netfront' VIF_MODEL_SPAPR_VLAN = 'spapr-vlan' VIF_MODEL_LAN9118 = 'lan9118' VIF_MODEL_IGB = 'igb' VIF_MODEL_SRIOV = 'sriov' VIF_MODEL_VMXNET = 'vmxnet' VIF_MODEL_VMXNET3 = 'vmxnet3' VIF_MODEL_ALL = ( VIF_MODEL_VIRTIO, VIF_MODEL_NE2K_PCI, VIF_MODEL_PCNET, VIF_MODEL_RTL8139, VIF_MODEL_E1000, VIF_MODEL_E1000E, VIF_MODEL_NETFRONT, VIF_MODEL_SPAPR_VLAN, VIF_MODEL_LAN9118, VIF_MODEL_SRIOV, VIF_MODEL_VMXNET, VIF_MODEL_VMXNET3, VIF_MODEL_IGB, ) # these types have been leaked to guests in network_data.json LEGACY_EXPOSED_VIF_TYPES = ( VIF_TYPE_BRIDGE, VIF_TYPE_DVS, VIF_TYPE_HW_VEB, VIF_TYPE_HYPERV, VIF_TYPE_OVS, VIF_TYPE_TAP, VIF_TYPE_VHOSTUSER, VIF_TYPE_VIF, ) # Constant for max length of network interface names # eg 'bridge' in the Network class or 'devname' in # the VIF class NIC_NAME_LEN = 14 class Model(dict): """Defines some necessary structures for most of the network models.""" def __repr__(self): return jsonutils.dumps(self) def _set_meta(self, kwargs): # pull meta out of kwargs if it's there self['meta'] = kwargs.pop('meta', {}) # update meta with any additional kwargs that may exist self['meta'].update(kwargs) def get_meta(self, key, default=None): """calls get(key, default) on self['meta'].""" return self['meta'].get(key, default) class IP(Model): """Represents an IP address in Nova.""" def __init__(self, address=None, type=None, **kwargs): super(IP, self).__init__() self['address'] = address self['type'] = type self['version'] = kwargs.pop('version', None) self._set_meta(kwargs) # determine version from address if not passed in if self['address'] and not self['version']: try: self['version'] = netaddr.IPAddress(self['address']).version except netaddr.AddrFormatError: msg = _("Invalid IP format %s") % self['address'] raise exception.InvalidIpAddressError(msg) def __eq__(self, other): keys = ['address', 'type', 'version'] return all(self[k] == other[k] for k in keys) def __ne__(self, other): return not self.__eq__(other) def is_in_subnet(self, subnet): if self['address'] and subnet['cidr']: return (netaddr.IPAddress(self['address']) in netaddr.IPNetwork(subnet['cidr'])) else: return False @classmethod def hydrate(cls, ip): if ip: return cls(**ip) return None class FixedIP(IP): """Represents a Fixed IP address in Nova.""" def __init__(self, floating_ips=None, **kwargs): super(FixedIP, self).__init__(**kwargs) self['floating_ips'] = floating_ips or [] if not self['type']: self['type'] = 'fixed' def add_floating_ip(self, floating_ip): if floating_ip not in self['floating_ips']: self['floating_ips'].append(floating_ip) def floating_ip_addresses(self): return [ip['address'] for ip in self['floating_ips']] @staticmethod def hydrate(fixed_ip): fixed_ip = FixedIP(**fixed_ip) fixed_ip['floating_ips'] = [IP.hydrate(floating_ip) for floating_ip in fixed_ip['floating_ips']] return fixed_ip def __eq__(self, other): keys = ['address', 'type', 'version', 'floating_ips'] return all(self[k] == other[k] for k in keys) def __ne__(self, other): return not self.__eq__(other) class Route(Model): """Represents an IP Route in Nova.""" def __init__(self, cidr=None, gateway=None, interface=None, **kwargs): super(Route, self).__init__() self['cidr'] = cidr self['gateway'] = gateway # FIXME(mriedem): Is this actually used? It's never set. self['interface'] = interface self._set_meta(kwargs) @classmethod def hydrate(cls, route): route = cls(**route) route['gateway'] = IP.hydrate(route['gateway']) return route class Subnet(Model): """Represents a Subnet in Nova.""" def __init__(self, cidr=None, dns=None, gateway=None, ips=None, routes=None, **kwargs): super(Subnet, self).__init__() self['cidr'] = cidr self['dns'] = dns or [] self['gateway'] = gateway self['ips'] = ips or [] self['routes'] = routes or [] self['version'] = kwargs.pop('version', None) self._set_meta(kwargs) if self['cidr'] and not self['version']: self['version'] = netaddr.IPNetwork(self['cidr']).version def __eq__(self, other): keys = ['cidr', 'dns', 'gateway', 'ips', 'routes', 'version'] return all(self[k] == other[k] for k in keys) def __ne__(self, other): return not self.__eq__(other) def add_route(self, new_route): if new_route not in self['routes']: self['routes'].append(new_route) def add_dns(self, dns): if dns not in self['dns']: self['dns'].append(dns) def add_ip(self, ip): if ip not in self['ips']: self['ips'].append(ip) def as_netaddr(self): """Convenient function to get cidr as a netaddr object.""" return netaddr.IPNetwork(self['cidr']) @classmethod def hydrate(cls, subnet): subnet = cls(**subnet) subnet['dns'] = [IP.hydrate(dns) for dns in subnet['dns']] subnet['ips'] = [FixedIP.hydrate(ip) for ip in subnet['ips']] subnet['routes'] = [Route.hydrate(route) for route in subnet['routes']] subnet['gateway'] = IP.hydrate(subnet['gateway']) return subnet class Network(Model): """Represents a Network in Nova.""" def __init__(self, id=None, bridge=None, label=None, subnets=None, **kwargs): super(Network, self).__init__() self['id'] = id self['bridge'] = bridge self['label'] = label self['subnets'] = subnets or [] self._set_meta(kwargs) def add_subnet(self, subnet): if subnet not in self['subnets']: self['subnets'].append(subnet) @classmethod def hydrate(cls, network): if network: network = cls(**network) network['subnets'] = [Subnet.hydrate(subnet) for subnet in network['subnets']] return network def __eq__(self, other): keys = ['id', 'bridge', 'label', 'subnets'] return all(self[k] == other[k] for k in keys) def __ne__(self, other): return not self.__eq__(other) class VIF8021QbgParams(Model): """Represents the parameters for a 802.1qbg VIF.""" def __init__(self, managerid, typeid, typeidversion, instanceid): super(VIF8021QbgParams, self).__init__() self['managerid'] = managerid self['typeid'] = typeid self['typeidversion'] = typeidversion self['instanceid'] = instanceid class VIF8021QbhParams(Model): """Represents the parameters for a 802.1qbh VIF.""" def __init__(self, profileid): super(VIF8021QbhParams, self).__init__() self['profileid'] = profileid class VIF(Model): """Represents a Virtual Interface in Nova.""" def __init__(self, id=None, address=None, network=None, type=None, details=None, devname=None, ovs_interfaceid=None, qbh_params=None, qbg_params=None, active=False, vnic_type=VNIC_TYPE_NORMAL, profile=None, preserve_on_delete=False, delegate_create=False, **kwargs): super(VIF, self).__init__() self['id'] = id self['address'] = address self['network'] = network or None self['type'] = type self['details'] = details or {} self['devname'] = devname self['ovs_interfaceid'] = ovs_interfaceid self['qbh_params'] = qbh_params self['qbg_params'] = qbg_params self['active'] = active self['vnic_type'] = vnic_type self['profile'] = profile self['preserve_on_delete'] = preserve_on_delete self['delegate_create'] = delegate_create self._set_meta(kwargs) def __eq__(self, other): keys = ['id', 'address', 'network', 'vnic_type', 'type', 'profile', 'details', 'devname', 'ovs_interfaceid', 'qbh_params', 'qbg_params', 'active', 'preserve_on_delete', 'delegate_create'] return all(self[k] == other[k] for k in keys) def __ne__(self, other): return not self.__eq__(other) def fixed_ips(self): if self['network']: return [fixed_ip for subnet in self['network']['subnets'] for fixed_ip in subnet['ips']] else: return [] def floating_ips(self): return [floating_ip for fixed_ip in self.fixed_ips() for floating_ip in fixed_ip['floating_ips']] def labeled_ips(self): """Returns the list of all IPs The return value looks like this flat structure:: {'network_label': 'my_network', 'network_id': 'n8v29837fn234782f08fjxk3ofhb84', 'ips': [{'address': '123.123.123.123', 'version': 4, 'type: 'fixed', 'meta': {...}}, {'address': '124.124.124.124', 'version': 4, 'type': 'floating', 'meta': {...}}, {'address': 'fe80::4', 'version': 6, 'type': 'fixed', 'meta': {...}}] """ if self['network']: # remove unnecessary fields on fixed_ips ips = [IP(**ip) for ip in self.fixed_ips()] for ip in ips: # remove floating ips from IP, since this is a flat structure # of all IPs del ip['meta']['floating_ips'] # add floating ips to list (if any) ips.extend(self.floating_ips()) return {'network_label': self['network']['label'], 'network_id': self['network']['id'], 'ips': ips} return [] def is_hybrid_plug_enabled(self): return self['details'].get(VIF_DETAILS_OVS_HYBRID_PLUG, False) def is_neutron_filtering_enabled(self): return self['details'].get(VIF_DETAILS_PORT_FILTER, False) def get_physical_network(self): phy_network = self['network']['meta'].get('physical_network') if not phy_network: phy_network = self['details'].get(VIF_DETAILS_PHYSICAL_NETWORK) return phy_network @classmethod def hydrate(cls, vif): vif = cls(**vif) vif['network'] = Network.hydrate(vif['network']) return vif def has_allocation(self): return self['profile'] and bool(self['profile'].get('allocation')) def get_netmask(ip, subnet): """Returns the netmask appropriate for injection into a guest.""" if ip['version'] == 4: return str(subnet.as_netaddr().netmask) return subnet.as_netaddr()._prefixlen class NetworkInfo(list): """Stores and manipulates network information for a Nova instance.""" # NetworkInfo is a list of VIFs def fixed_ips(self): """Returns all fixed_ips without floating_ips attached.""" return [ip for vif in self for ip in vif.fixed_ips()] def floating_ips(self): """Returns all floating_ips.""" return [ip for vif in self for ip in vif.floating_ips()] @classmethod def hydrate(cls, network_info): if isinstance(network_info, str): network_info = jsonutils.loads(network_info) return cls([VIF.hydrate(vif) for vif in network_info]) def wait(self, do_raise=True): """Wait for asynchronous call to finish.""" # There is no asynchronous call for this class, so this is a no-op # here, but subclasses may override to provide asynchronous # capabilities. Must be defined here in the parent class so that code # which works with both parent and subclass types can reference this # method. pass def json(self): return jsonutils.dumps(self) def has_port_with_allocation(self): return any(vif.has_allocation() for vif in self) class NetworkInfoAsyncWrapper(NetworkInfo): """Wrapper around NetworkInfo that allows retrieving NetworkInfo in an async manner. This allows one to start querying for network information before you know you will need it. If you have a long-running operation, this allows the network model retrieval to occur in the background. When you need the data, it will ensure the async operation has completed. As an example: def allocate_net_info(arg1, arg2) return call_neutron_to_allocate(arg1, arg2) network_info = NetworkInfoAsyncWrapper(allocate_net_info, arg1, arg2) [do a long running operation -- real network_info will be retrieved in the background] [do something with network_info] """ def __init__(self, async_method, *args, **kwargs): super(NetworkInfoAsyncWrapper, self).__init__() self._future = utils.spawn(async_method, *args, **kwargs) methods = ['json', 'fixed_ips', 'floating_ips'] for method in methods: fn = getattr(self, method) wrapper = functools.partial(self._sync_wrapper, fn) functools.update_wrapper(wrapper, fn) setattr(self, method, wrapper) def _sync_wrapper(self, wrapped, *args, **kwargs): """Synchronize the model before running a method.""" self.wait() return wrapped(*args, **kwargs) def __getitem__(self, *args, **kwargs): fn = super(NetworkInfoAsyncWrapper, self).__getitem__ return self._sync_wrapper(fn, *args, **kwargs) def __iter__(self, *args, **kwargs): fn = super(NetworkInfoAsyncWrapper, self).__iter__ return self._sync_wrapper(fn, *args, **kwargs) def __len__(self, *args, **kwargs): fn = super(NetworkInfoAsyncWrapper, self).__len__ return self._sync_wrapper(fn, *args, **kwargs) def __str__(self, *args, **kwargs): fn = super(NetworkInfoAsyncWrapper, self).__str__ return self._sync_wrapper(fn, *args, **kwargs) def __repr__(self, *args, **kwargs): fn = super(NetworkInfoAsyncWrapper, self).__repr__ return self._sync_wrapper(fn, *args, **kwargs) def wait(self, do_raise=True): """Wait for asynchronous call to finish.""" if self._future is not None: try: # NOTE(comstud): This looks funky, but this object is # subclassed from list. In other words, 'self' is really # just a list with a bunch of extra methods. So this # line just replaces the current list (which should be # empty) with the result. self[:] = self._future.result() except Exception: if do_raise: raise finally: self._future = None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/network/neutron.py0000664000175000017500000056332700000000000017302 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved # Copyright (c) 2012 NEC Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ API and utilities for nova-network interactions. """ import copy import functools import inspect import time import typing as ty from keystoneauth1 import loading as ks_loading from neutronclient.common import exceptions as neutron_client_exc from neutronclient.v2_0 import client as clientv20 from oslo_concurrency import lockutils from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import strutils from oslo_utils import uuidutils from nova.accelerator import cyborg from nova.compute import utils as compute_utils import nova.conf from nova import context as nova_context from nova import exception from nova.i18n import _ from nova.network import constants from nova.network import model as network_model from nova import objects from nova.objects import fields as obj_fields from nova.pci import request as pci_request from nova.pci import utils as pci_utils from nova.pci import whitelist as pci_whitelist from nova.policies import servers as servers_policies from nova import profiler from nova import service_auth from nova import utils CONF = nova.conf.CONF LOG = logging.getLogger(__name__) _SESSION = None _ADMIN_AUTH = None def reset_state(): global _ADMIN_AUTH global _SESSION _ADMIN_AUTH = None _SESSION = None def _load_auth_plugin(conf): auth_plugin = ks_loading.load_auth_from_conf_options(conf, nova.conf.neutron.NEUTRON_GROUP) if auth_plugin: return auth_plugin if conf.neutron.auth_type is None: # If we're coming in through a REST API call for something like # creating a server, the end user is going to get a 500 response # which is accurate since the system is mis-configured, but we should # leave a breadcrumb for the operator that is checking the logs. LOG.error('The [neutron] section of your nova configuration file ' 'must be configured for authentication with the networking ' 'service endpoint. See the networking service install guide ' 'for details: ' 'https://docs.openstack.org/neutron/latest/install/') err_msg = _('Unknown auth type: %s') % conf.neutron.auth_type raise neutron_client_exc.Unauthorized(message=err_msg) def get_binding_profile(port): """Convenience method to get the binding:profile from the port The binding:profile in the port is undefined in the networking service API and is dependent on backend configuration. This means it could be an empty dict, None, or have some values. :param port: dict port response body from the networking service API :returns: The port binding:profile dict; empty if not set on the port """ return port.get(constants.BINDING_PROFILE, {}) or {} def update_instance_cache_with_nw_info(impl, context, instance, nw_info=None): if instance.deleted: LOG.debug('Instance is deleted, no further info cache update', instance=instance) return try: if not isinstance(nw_info, network_model.NetworkInfo): nw_info = None if nw_info is None: nw_info = impl._get_instance_nw_info(context, instance) LOG.debug('Updating instance_info_cache with network_info: %s', nw_info, instance=instance) # NOTE(comstud): The save() method actually handles updating or # creating the instance. We don't need to retrieve the object # from the DB first. ic = objects.InstanceInfoCache.new(context, instance.uuid) ic.network_info = nw_info ic.save() instance.info_cache = ic except exception.InstanceNotFound as e: # The instance could have moved during a cross-cell migration when we # receive an external event from neutron. Avoid logging a traceback # when it happens. msg = str(e) if e.__class__.__name__.endswith('_Remote'): # If this exception was raised remotely over RPC, the traceback(s) # will be appended to the message. Truncate it in that case. msg = utils.safe_truncate(msg.split('\n', 1)[0], 255) LOG.info('Failed storing info cache due to: %s. ' 'The instance may have moved to another cell during a ' 'cross-cell migration', msg, instance=instance) raise exception.InstanceNotFound(message=msg) except Exception: with excutils.save_and_reraise_exception(): LOG.exception('Failed storing info cache', instance=instance) def refresh_cache(f): """Decorator to update the instance_info_cache Requires context and instance as function args """ argspec = inspect.getfullargspec(f) @functools.wraps(f) def wrapper(self, context, *args, **kwargs): try: # get the instance from arguments (or raise ValueError) instance = kwargs.get('instance') if not instance: instance = args[argspec.args.index('instance') - 2] except ValueError: msg = _('instance is a required argument to use @refresh_cache') raise Exception(msg) with lockutils.lock('refresh_cache-%s' % instance.uuid): # We need to call the wrapped function with the lock held to ensure # that it can call _get_instance_nw_info safely. res = f(self, context, *args, **kwargs) update_instance_cache_with_nw_info(self, context, instance, nw_info=res) # return the original function's return value return res return wrapper @profiler.trace_cls("neutron_api") class ClientWrapper: """A Neutron client wrapper class. Wraps the callable methods, catches Unauthorized,Forbidden from Neutron and convert it to a 401,403 for Nova clients. """ def __init__(self, base_client, admin): self.base_client = base_client self.admin = admin def __getattr__(self, name): base_attr = getattr(self.base_client, name) # Each callable base client attr is wrapped so that we can translate # the Unauthorized exception based on if we have an admin client or # not. if callable(base_attr): return self.proxy(base_attr) return base_attr def proxy(self, obj): def wrapper(*args, **kwargs): try: ret = obj(*args, **kwargs) except neutron_client_exc.Unauthorized: if not self.admin: # Token is expired so Neutron is raising a # unauthorized exception, we should convert it to # raise a 401 to make client to handle a retry by # regenerating a valid token and trying a new # attempt. raise exception.Unauthorized() # In admin context if token is invalid Neutron client # should be able to regenerate a valid by using the # Neutron admin credential configuration located in # nova.conf. LOG.error("Neutron client was not able to generate a " "valid admin token, please verify Neutron " "admin credential located in nova.conf") raise exception.NeutronAdminCredentialConfigurationInvalid() except neutron_client_exc.Forbidden as e: raise exception.Forbidden(str(e)) return ret return wrapper def _get_auth_plugin(context, admin=False): # NOTE(dprince): In the case where no auth_token is present we allow use of # neutron admin tenant credentials if it is an admin context. This is to # support some services (metadata API) where an admin context is used # without an auth token. global _ADMIN_AUTH user_auth = None if admin or (context.is_admin and not context.auth_token): if not _ADMIN_AUTH: _ADMIN_AUTH = _load_auth_plugin(CONF) user_auth = _ADMIN_AUTH if context.auth_token or user_auth: # When user_auth = None, user_auth will be extracted from the context. return service_auth.get_auth_plugin(context, user_auth=user_auth) # We did not get a user token and we should not be using # an admin token so log an error raise exception.Unauthorized() def _get_session(): global _SESSION if not _SESSION: _SESSION = ks_loading.load_session_from_conf_options( CONF, nova.conf.neutron.NEUTRON_GROUP) return _SESSION def get_client(context, admin=False): auth_plugin = _get_auth_plugin(context, admin=admin) session = _get_session() client_args = dict(session=session, auth=auth_plugin, global_request_id=context.global_id, connect_retries=CONF.neutron.http_retries) # NOTE(efried): We build an adapter # to pull conf options # to pass to neutronclient # which uses them to build an Adapter. # This should be unwound at some point. adap = utils.get_ksa_adapter( 'network', ksa_auth=auth_plugin, ksa_session=session) client_args = dict(client_args, service_type=adap.service_type, service_name=adap.service_name, interface=adap.interface, region_name=adap.region_name, endpoint_override=adap.endpoint_override) return ClientWrapper(clientv20.Client(**client_args), admin=admin or context.is_admin) def _is_not_duplicate(item, items, items_list_name, instance): present = item in items # The expectation from this function's perspective is that the # item is not part of the items list so if it is part of it # we should at least log it as a warning if present: LOG.warning("%(item)s already exists in list: %(list_name)s " "containing: %(items)s. ignoring it", {'item': item, 'list_name': items_list_name, 'items': items}, instance=instance) return not present def _ensure_no_port_binding_failure(port): binding_vif_type = port.get('binding:vif_type') if binding_vif_type == network_model.VIF_TYPE_BINDING_FAILED: raise exception.PortBindingFailed(port_id=port['id']) class API: """API for interacting with the neutron 2.x API.""" def __init__(self): self.last_neutron_extension_sync = None self.extensions = {} self.pci_whitelist = pci_whitelist.Whitelist( CONF.pci.device_spec) def _update_port_with_migration_profile( self, instance, port_id, port_profile, admin_client): try: updated_port = admin_client.update_port( port_id, {'port': {constants.BINDING_PROFILE: port_profile}}) return updated_port except Exception as ex: with excutils.save_and_reraise_exception(): LOG.error("Unable to update binding profile " "for port: %(port)s due to failure: %(error)s", {'port': port_id, 'error': ex}, instance=instance) def _clear_migration_port_profile( self, context, instance, admin_client, ports): for p in ports: # If the port already has a migration profile and if # it is to be torn down, then we need to clean up # the migration profile. port_profile = get_binding_profile(p) if not port_profile: continue if constants.MIGRATING_ATTR in port_profile: del port_profile[constants.MIGRATING_ATTR] LOG.debug("Removing port %s migration profile", p['id'], instance=instance) self._update_port_with_migration_profile( instance, p['id'], port_profile, admin_client) def _setup_migration_port_profile( self, context, instance, host, admin_client, ports): # Migrating to a new host for p in ports: # If the host hasn't changed, there is nothing to do. # But if the destination host is different than the # current one, please update the port_profile with # the 'migrating_to'(constants.MIGRATING_ATTR) key pointing to # the given 'host'. host_id = p.get(constants.BINDING_HOST_ID) if host_id != host: port_profile = get_binding_profile(p) # If the "migrating_to" attribute already points at the given # host, then skip the port update call since we're not changing # anything. if host != port_profile.get(constants.MIGRATING_ATTR): port_profile[constants.MIGRATING_ATTR] = host self._update_port_with_migration_profile( instance, p['id'], port_profile, admin_client) LOG.debug("Port %(port_id)s updated with migration " "profile %(profile_data)s successfully", {'port_id': p['id'], 'profile_data': port_profile}, instance=instance) def setup_networks_on_host(self, context, instance, host=None, teardown=False): """Setup or teardown the network structures. :param context: The user request context. :param instance: The instance with attached ports. :param host: Optional host used to control the setup. If provided and is not the same as the current instance.host, this method assumes the instance is being migrated and sets the "migrating_to" attribute in the binding profile for the attached ports. :param teardown: Whether or not network information for the ports should be cleaned up. If True, at a minimum the "migrating_to" attribute is cleared in the binding profile for the ports. If a host is also provided, then port bindings for that host are deleted when teardown is True as long as the host does not match the current instance.host. :raises: nova.exception.PortBindingDeletionFailed if host is not None, teardown is True, and port binding deletion fails. """ # Check if the instance is migrating to a new host. port_migrating = host and (instance.host != host) # If the port is migrating to a new host or if it is a # teardown on the original host, then proceed. if port_migrating or teardown: search_opts = {'device_id': instance.uuid, 'tenant_id': instance.project_id, constants.BINDING_HOST_ID: instance.host} # Now get the port details to process the ports # binding profile info. data = self.list_ports(context, **search_opts) ports = data['ports'] admin_client = get_client(context, admin=True) if teardown: # Reset the port profile self._clear_migration_port_profile( context, instance, admin_client, ports) # If a host was provided, delete any bindings between that # host and the ports as long as the host isn't the same as # the current instance.host. has_binding_ext = self.has_port_binding_extension( client=admin_client) if port_migrating and has_binding_ext: self._delete_port_bindings(context, ports, host) elif port_migrating: # Setup the port profile self._setup_migration_port_profile( context, instance, host, admin_client, ports) def _delete_port_bindings(self, context, ports, host): """Attempt to delete all port bindings on the host. :param context: The user request context. :param ports: list of port dicts to cleanup; the 'id' field is required per port dict in the list :param host: host from which to delete port bindings :raises: PortBindingDeletionFailed if port binding deletion fails. """ client = get_client(context, admin=True) failed_port_ids = [] for port in ports: # This call is safe in that 404s for non-existing # bindings are ignored. try: client.delete_port_binding(port['id'], host) except neutron_client_exc.NeutronClientException as exc: # We can safely ignore 404s since we're trying to delete # the thing that wasn't found anyway, but for everything else # we should log an error if exc.status_code == 404: continue failed_port_ids.append(port['id']) LOG.exception( "Failed to delete binding for port %(port_id)s on host " "%(host)s", {'port_id': port['id'], 'host': host}) if failed_port_ids: raise exception.PortBindingDeletionFailed( port_id=','.join(failed_port_ids), host=host) def _get_available_networks(self, context, project_id, net_ids=None, neutron=None, auto_allocate=False): """Return a network list available for the tenant. The list contains networks owned by the tenant and public networks. If net_ids specified, it searches networks with requested IDs only. """ if not neutron: neutron = get_client(context) if net_ids: # If user has specified to attach instance only to specific # networks then only add these to **search_opts. This search will # also include 'shared' networks. search_opts = {'id': net_ids} nets = neutron.list_networks(**search_opts).get('networks', []) else: # (1) Retrieve non-public network list owned by the tenant. search_opts = {'tenant_id': project_id, 'shared': False} if auto_allocate: # The auto-allocated-topology extension may create complex # network topologies and it does so in a non-transactional # fashion. Therefore API users may be exposed to resources that # are transient or partially built. A client should use # resources that are meant to be ready and this can be done by # checking their admin_state_up flag. search_opts['admin_state_up'] = True nets = neutron.list_networks(**search_opts).get('networks', []) # (2) Retrieve public network list. search_opts = {'shared': True} nets += neutron.list_networks(**search_opts).get('networks', []) _ensure_requested_network_ordering( lambda x: x['id'], nets, net_ids) return nets def _cleanup_created_port(self, port_client, port_id, instance): try: port_client.delete_port(port_id) except neutron_client_exc.NeutronClientException: LOG.exception( 'Failed to delete port %(port_id)s while cleaning up after an ' 'error.', {'port_id': port_id}, instance=instance) def _create_port_minimal(self, context, port_client, instance, network_id, fixed_ip=None, security_group_ids=None): """Attempts to create a port for the instance on the given network. :param context: The request context. :param port_client: The client to use to create the port. :param instance: Create the port for the given instance. :param network_id: Create the port on the given network. :param fixed_ip: Optional fixed IP to use from the given network. :param security_group_ids: Optional list of security group IDs to apply to the port. :returns: The created port. :raises PortLimitExceeded: If neutron fails with an OverQuota error. :raises NoMoreFixedIps: If neutron fails with IpAddressGenerationFailure error. :raises: PortBindingFailed: If port binding failed. :raises NetworksWithQoSPolicyNotSupported: if the created port has resource request. """ # Set the device_id so it's clear who this port was created for, # and to stop other instances trying to use it port_req_body = {'port': {'device_id': instance.uuid}} try: if fixed_ip: port_req_body['port']['fixed_ips'] = [ {'ip_address': str(fixed_ip)}] port_req_body['port']['network_id'] = network_id port_req_body['port']['admin_state_up'] = True port_req_body['port']['tenant_id'] = instance.project_id if security_group_ids: port_req_body['port']['security_groups'] = security_group_ids port_response = port_client.create_port(port_req_body) port = port_response['port'] port_id = port['id'] # NOTE(gibi): Checking if the created port has resource request as # such ports are currently not supported as they would at least # need resource allocation manipulation in placement but might also # need a new scheduling if resource on this host is not available. if self._has_resource_request(context, port, port_client): msg = ( "The auto-created port %(port_id)s is being deleted due " "to its network having QoS policy.") LOG.info(msg, {'port_id': port_id}) self._cleanup_created_port(port_client, port_id, instance) # NOTE(gibi): This limitation regarding server create can be # removed when the port creation is moved to the conductor. But # this code also limits attaching a network that has QoS # minimum bandwidth rule. raise exception.NetworksWithQoSPolicyNotSupported( instance_uuid=instance.uuid, network_id=network_id) try: _ensure_no_port_binding_failure(port) except exception.PortBindingFailed: with excutils.save_and_reraise_exception(): port_client.delete_port(port_id) LOG.debug('Successfully created port: %s', port_id, instance=instance) return port except neutron_client_exc.InvalidIpForNetworkClient: LOG.warning('Neutron error: %(ip)s is not a valid IP address ' 'for network %(network_id)s.', {'ip': fixed_ip, 'network_id': network_id}, instance=instance) msg = (_('Fixed IP %(ip)s is not a valid ip address for ' 'network %(network_id)s.') % {'ip': fixed_ip, 'network_id': network_id}) raise exception.InvalidInput(reason=msg) except (neutron_client_exc.IpAddressInUseClient, neutron_client_exc.IpAddressAlreadyAllocatedClient): LOG.warning('Neutron error: Fixed IP %s is ' 'already in use.', fixed_ip, instance=instance) msg = _("Fixed IP %s is already in use.") % fixed_ip raise exception.FixedIpAlreadyInUse(message=msg) except neutron_client_exc.OverQuotaClient: LOG.warning( 'Neutron error: Port quota exceeded in tenant: %s', port_req_body['port']['tenant_id'], instance=instance) raise exception.PortLimitExceeded() except neutron_client_exc.IpAddressGenerationFailureClient: LOG.warning('Neutron error: No more fixed IPs in network: %s', network_id, instance=instance) raise exception.NoMoreFixedIps(net=network_id) except neutron_client_exc.NeutronClientException: with excutils.save_and_reraise_exception(): LOG.exception('Neutron error creating port on network %s', network_id, instance=instance) def _update_port(self, port_client, instance, port_id, port_req_body): try: port_response = port_client.update_port(port_id, port_req_body) port = port_response['port'] _ensure_no_port_binding_failure(port) LOG.debug('Successfully updated port: %s', port_id, instance=instance) return port except neutron_client_exc.MacAddressInUseClient: mac_address = port_req_body['port'].get('mac_address') network_id = port_req_body['port'].get('network_id') LOG.warning('Neutron error: MAC address %(mac)s is already ' 'in use on network %(network)s.', {'mac': mac_address, 'network': network_id}, instance=instance) raise exception.PortInUse(port_id=mac_address) except neutron_client_exc.HostNotCompatibleWithFixedIpsClient: network_id = port_req_body['port'].get('network_id') LOG.warning('Neutron error: Tried to bind a port with ' 'fixed_ips to a host in the wrong segment on ' 'network %(network)s.', {'network': network_id}, instance=instance) raise exception.FixedIpInvalidOnHost(port_id=port_id) def _check_external_network_attach(self, context, nets): """Check if attaching to external network is permitted.""" if not context.can(servers_policies.NETWORK_ATTACH_EXTERNAL, fatal=False): for net in nets: # Perform this check here rather than in validate_networks to # ensure the check is performed every time # allocate_for_instance is invoked if net.get('router:external') and not net.get('shared'): raise exception.ExternalNetworkAttachForbidden( network_uuid=net['id']) def unbind_ports(self, context, ports, detach=True): """Unbind and detach the given ports by clearing their device_owner and dns_name. The device_id will also be cleaned if detach=True. :param context: The request context. :param ports: list of port IDs. """ neutron = get_client(context) self._unbind_ports(context, ports, neutron, detach=detach) def _unbind_ports(self, context, ports, neutron, port_client=None, detach=True): """Unbind and detach the given ports by clearing their device_owner and dns_name. The device_id will also be cleaned if detach=True. :param context: The request context. :param ports: list of port IDs. :param neutron: neutron client for the current context. :param port_client: The client with appropriate karma for updating the ports. """ if port_client is None: # Requires admin creds to set port bindings port_client = get_client(context, admin=True) # it is a dict of network dicts as returned by the neutron client keyed # by network UUID networks: ty.Dict[str, ty.Dict] = {} for port_id in ports: # A port_id is optional in the NetworkRequest object so check here # in case the caller forgot to filter the list. if port_id is None: continue port_req_body: ty.Dict[str, ty.Any] = { 'port': { constants.BINDING_HOST_ID: None, } } if detach: port_req_body['port']['device_id'] = '' port_req_body['port']['device_owner'] = '' try: port = self._show_port( context, port_id, neutron_client=neutron, fields=[constants.BINDING_PROFILE, 'network_id']) except exception.PortNotFound: LOG.debug('Unable to show port %s as it no longer ' 'exists.', port_id) continue except Exception: # NOTE: In case we can't retrieve the binding:profile or # network info assume that they are empty LOG.exception("Unable to get binding:profile for port '%s'", port_id) port_profile = {} network: dict = {} else: port_profile = get_binding_profile(port) net_id = port.get('network_id') if net_id in networks: network = networks[net_id] else: network = neutron.show_network(net_id, fields=['dns_domain'] ).get('network') networks[net_id] = network # Unbind Port device if port_profile.get('arq_uuid'): """Delete device profile by arq uuid.""" cyclient = cyborg.get_client(context) cyclient.delete_arqs_by_uuid([port_profile['arq_uuid']]) LOG.debug('Delete ARQs %s for port %s', port_profile['arq_uuid'], port_id) # NOTE: We're doing this to remove the binding information # for the physical device but don't want to overwrite the other # information in the binding profile. for profile_key in ('pci_vendor_info', 'pci_slot', constants.ALLOCATION, 'arq_uuid', 'physical_network', 'card_serial_number', 'vf_num', 'pf_mac_address', 'device_mac_address'): if profile_key in port_profile: del port_profile[profile_key] port_req_body['port'][constants.BINDING_PROFILE] = port_profile # NOTE: For internal DNS integration (network does not have a # dns_domain), or if we cannot retrieve network info, we use the # admin client to reset dns_name. if ( detach and self.has_dns_extension(client=port_client) and not network.get('dns_domain') ): port_req_body['port']['dns_name'] = '' try: port_client.update_port(port_id, port_req_body) except neutron_client_exc.PortNotFoundClient: LOG.debug('Unable to unbind port %s as it no longer ' 'exists.', port_id) except Exception: LOG.exception("Unable to clear device ID for port '%s'", port_id) # NOTE: For external DNS integration, we use the neutron client # with user's context to reset the dns_name since the recordset is # under user's zone. if detach: self._reset_port_dns_name(network, port_id, neutron) def _validate_requested_port_ids(self, context, instance, neutron, requested_networks): """Processes and validates requested networks for allocation. Iterates over the list of NetworkRequest objects, validating the request and building sets of ports and networks to use for allocating ports for the instance. :param context: The user request context. :type context: nova.context.RequestContext :param instance: allocate networks on this instance :type instance: nova.objects.Instance :param neutron: neutron client session :type neutron: neutronclient.v2_0.client.Client :param requested_networks: List of user-requested networks and/or ports :type requested_networks: nova.objects.NetworkRequestList :returns: tuple of: - ports: dict mapping of port id to port dict - ordered_networks: list of nova.objects.NetworkRequest objects for requested networks (either via explicit network request or the network for an explicit port request) :raises nova.exception.PortNotFound: If a requested port is not found in Neutron. :raises nova.exception.PortNotUsable: If a requested port is not owned by the same tenant that the instance is created under. :raises nova.exception.PortInUse: If a requested port is already attached to another instance. :raises nova.exception.PortNotUsableDNS: If a requested port has a value assigned to its dns_name attribute. """ ports = {} ordered_networks = [] # If we're asked to auto-allocate the network then there won't be any # ports or real neutron networks to lookup, so just return empty # results. if requested_networks and not requested_networks.auto_allocate: for request in requested_networks: # Process a request to use a pre-existing neutron port. if request.port_id: # Make sure the port exists. port = self._show_port(context, request.port_id, neutron_client=neutron) # Make sure the instance has access to the port. if port['tenant_id'] != instance.project_id: raise exception.PortNotUsable(port_id=request.port_id, instance=instance.uuid) # Make sure the port isn't already attached to another # instance. if port.get('device_id'): raise exception.PortInUse(port_id=request.port_id) # Make sure that if the user assigned a value to the port's # dns_name attribute, it is equal to the instance's # hostname if port.get('dns_name'): if port['dns_name'] != instance.hostname: raise exception.PortNotUsableDNS( port_id=request.port_id, instance=instance.uuid, value=port['dns_name'], hostname=instance.hostname) # Make sure the port is usable _ensure_no_port_binding_failure(port) # If requesting a specific port, automatically process # the network for that port as if it were explicitly # requested. request.network_id = port['network_id'] ports[request.port_id] = port # Process a request to use a specific neutron network. if request.network_id: ordered_networks.append(request) return ports, ordered_networks def _clean_security_groups(self, security_groups): """Cleans security groups requested from Nova API Neutron already passes a 'default' security group when creating ports so it's not necessary to specify it to the request. """ if not security_groups: security_groups = [] elif security_groups == [constants.DEFAULT_SECGROUP]: security_groups = [] return security_groups def _get_security_group_ids(self, security_groups, user_security_groups): """Processes requested security groups based on existing user groups :param security_groups: list of security group names or IDs :param user_security_groups: list of Neutron security groups found :return: list of security group IDs :raises nova.exception.NoUniqueMatch: If multiple security groups are requested with the same name. :raises nova.exception.SecurityGroupNotFound: If a given security group is not found. """ # Initialize two dictionaries to map security group names and IDs to # their corresponding IDs name_to_id = {} # NOTE(sean-k-mooney): using a dict here instead of a set is faster # probably due to l1 code cache misses due to the introduction # of set lookup in addition to dict lookups making the branch # prediction for the second for loop less reliable. id_to_id = {} # Populate the dictionaries with user security groups for user_security_group in user_security_groups: name = user_security_group['name'] sg_id = user_security_group['id'] # Check for duplicate names and raise an exception if found if name in name_to_id: raise exception.NoUniqueMatch( _("Multiple security groups found matching" " '%s'. Use an ID to be more specific.") % name) # Map the name to its corresponding ID name_to_id[name] = sg_id # Map the ID to itself for easy lookup id_to_id[sg_id] = sg_id # Initialize an empty list to store the resulting security group IDs security_group_ids = [] # Iterate over the requested security groups for security_group in security_groups: # Check if the security group is in the name-to-ID dictionary # as if a user names the security group the same as # another's security groups uuid, the name takes priority. if security_group in name_to_id: security_group_ids.append(name_to_id[security_group]) # Check if the security group is in the ID-to-ID dictionary elif security_group in id_to_id: security_group_ids.append(id_to_id[security_group]) # Raise an exception if the security group is not found in # either dictionary else: raise exception.SecurityGroupNotFound( security_group_id=security_group) # Return the list of security group IDs return security_group_ids def _process_security_groups(self, instance, neutron, security_groups): """Processes and validates requested security groups for allocation. Iterates over the list of requested security groups, validating the request and filtering out the list of security group IDs to use for port allocation. :param instance: allocate networks on this instance :type instance: nova.objects.Instance :param neutron: neutron client session :type neutron: neutronclient.v2_0.client.Client :param security_groups: list of requested security group name or IDs to use when allocating new ports for the instance :return: list of security group IDs to use when allocating new ports :raises nova.exception.NoUniqueMatch: If multiple security groups are requested with the same name. :raises nova.exception.SecurityGroupNotFound: If a requested security group is not in the tenant-filtered list of available security groups in Neutron. """ security_group_ids = [] # TODO(arosen) Should optimize more to do direct query for security # group if len(security_groups) == 1 if len(security_groups): # NOTE(slaweq): fields other than name and id aren't really needed # so asking only about those fields will allow Neutron to not # prepare list of rules for each found security group. That may # speed processing of this request a lot in case when tenant has # got many security groups sg_fields = ['id', 'name'] search_opts = {'tenant_id': instance.project_id} sg_filter_ext = self.has_sg_shared_filter_extension(client=neutron) user_security_groups = neutron.list_security_groups( fields=sg_fields, **search_opts).get('security_groups') try: security_group_ids = self._get_security_group_ids( security_groups, user_security_groups) except exception.SecurityGroupNotFound: # Trigger a raise if the shared filter extension is not loaded, # else we will trigger on the second call below when we pass # any shared security groups. if not sg_filter_ext: raise # NOTE(hangyang): Make another request to get the RBAC shared # SGs accessible to the tenant search_opts = {'shared': True} user_security_groups += neutron.list_security_groups( fields=sg_fields, **search_opts).get('security_groups') security_group_ids = self._get_security_group_ids( security_groups, user_security_groups) return security_group_ids def _validate_requested_network_ids(self, context, instance, neutron, requested_networks, ordered_networks): """Check requested networks using the Neutron API. Check the user has access to the network they requested, and that it is a suitable network to connect to. This includes getting the network details for any ports that have been passed in, because the request will have been updated with the network_id in _validate_requested_port_ids. If the user has not requested any ports or any networks, we get back a full list of networks the user has access to, and if there is only one network, we update ordered_networks so we will connect the instance to that network. :param context: The request context. :param instance: nova.objects.instance.Instance object. :param neutron: neutron client :param requested_networks: nova.objects.NetworkRequestList, list of user-requested networks and/or ports; may be empty :param ordered_networks: output from _validate_requested_port_ids that will be used to create and update ports :returns: dict, keyed by network ID, of networks to use :raises InterfaceAttachFailedNoNetwork: If no specific networks were requested and none are available. :raises NetworkAmbiguous: If no specific networks were requested but more than one is available. :raises ExternalNetworkAttachForbidden: If the policy rules forbid the request context from using an external non-shared network but one was requested (or available). """ # Get networks from Neutron # If net_ids is empty, this actually returns all available nets auto_allocate = requested_networks and requested_networks.auto_allocate net_ids = [request.network_id for request in ordered_networks] nets = self._get_available_networks(context, instance.project_id, net_ids, neutron=neutron, auto_allocate=auto_allocate) if not nets: if requested_networks: # There are no networks available for the project to use and # none specifically requested, so check to see if we're asked # to auto-allocate the network. if auto_allocate: # During validate_networks we checked to see if # auto-allocation is available so we don't need to do that # again here. nets = [self._auto_allocate_network(instance, neutron)] else: # NOTE(chaochin): If user specifies a network id and the # network can not be found, raise NetworkNotFound error. for request in requested_networks: if not request.port_id and request.network_id: raise exception.NetworkNotFound( network_id=request.network_id) else: # no requested nets and user has no available nets return {} # if this function is directly called without a requested_network param if (not requested_networks or requested_networks.is_single_unspecified or requested_networks.auto_allocate): # If no networks were requested and none are available, consider # it a bad request. if not nets: raise exception.InterfaceAttachFailedNoNetwork( project_id=instance.project_id) # bug/1267723 - if no network is requested and more # than one is available then raise NetworkAmbiguous Exception if len(nets) > 1: msg = _("Multiple possible networks found, use a Network " "ID to be more specific.") raise exception.NetworkAmbiguous(msg) ordered_networks.append( objects.NetworkRequest(network_id=nets[0]['id'])) # NOTE(melwitt): check external net attach permission after the # check for ambiguity, there could be another # available net which is permitted bug/1364344 self._check_external_network_attach(context, nets) return {net['id']: net for net in nets} def _create_ports_for_instance(self, context, instance, ordered_networks, nets, neutron, security_group_ids): """Create port for network_requests that don't have a port_id :param context: The request context. :param instance: nova.objects.instance.Instance object. :param ordered_networks: objects.NetworkRequestList in requested order :param nets: a dict of network_id to networks returned from neutron :param neutron: neutronclient built from users request context :param security_group_ids: a list of security group IDs to be applied to any ports created :returns a list of pairs (NetworkRequest, created_port_uuid); note that created_port_uuid will be None for the pair where a pre-existing port was part of the user request """ created_port_ids = [] requests_and_created_ports = [] for request in ordered_networks: network = nets.get(request.network_id) # if network_id did not pass validate_networks() and not available # here then skip it safely not continuing with a None Network if not network: continue try: port_security_enabled = network.get( 'port_security_enabled', True) if port_security_enabled: if not network.get('subnets'): # Neutron can't apply security groups to a port # for a network without L3 assignments. LOG.debug('Network with port security enabled does ' 'not have subnets so security groups ' 'cannot be applied: %s', network, instance=instance) raise exception.SecurityGroupCannotBeApplied() else: if security_group_ids: # We don't want to apply security groups on port # for a network defined with # 'port_security_enabled=False'. LOG.debug('Network has port security disabled so ' 'security groups cannot be applied: %s', network, instance=instance) raise exception.SecurityGroupCannotBeApplied() created_port_id = None if not request.port_id: # create minimal port, if port not already created by user created_port = self._create_port_minimal( context, neutron, instance, request.network_id, request.address, security_group_ids) created_port_id = created_port['id'] created_port_ids.append(created_port_id) requests_and_created_ports.append(( request, created_port_id)) except Exception: with excutils.save_and_reraise_exception(): if created_port_ids: self._delete_ports( neutron, instance, created_port_ids) return requests_and_created_ports def _has_resource_request(self, context, port, neutron): resource_request = port.get(constants.RESOURCE_REQUEST) or {} if self.has_extended_resource_request_extension(context, neutron): return bool(resource_request.get(constants.REQUEST_GROUPS, [])) else: return bool(resource_request) def instance_has_extended_resource_request(self, instance_uuid): # NOTE(gibi): We need to use an admin context to query neutron ports as # neutron does not fill the resource_request field in the port response # if we query with a non admin context. admin_context = nova_context.get_admin_context() if not self.has_extended_resource_request_extension(admin_context): # Short circuit if the extended resource request API extension is # not available return False # So neutron supports the extended resource request but does the # instance has a port with such request search_opts = {'device_id': instance_uuid, 'fields': [constants.RESOURCE_REQUEST]} ports = self.list_ports( admin_context, **search_opts).get('ports', []) for port in ports: resource_request = port.get(constants.RESOURCE_REQUEST) or {} if resource_request.get(constants.REQUEST_GROUPS, []): return True return False def get_binding_profile_allocation( self, context: nova_context.RequestContext, port_id: str, resource_provider_mapping: ty.Dict[str, ty.List[str]], ) -> ty.Union[None, str, ty.Dict[str, str]]: """Calculate the value of the allocation key of the binding:profile based on the allocated resources. :param context: the request context :param port_id: the uuid of the neutron port :param resource_provider_mapping: the mapping returned by the placement defining which request group get allocated from which resource providers :returns: None if the port has no resource request. Returns a single RP UUID if the port has a legacy resource request. Returns a dict of request group id: resource provider UUID mapping if the port has an extended resource request. """ # We need to use an admin client as the port.resource_request is admin # only neutron_admin = get_client(context, admin=True) neutron = get_client(context) port = self._show_port(context, port_id, neutron_client=neutron_admin) if self._has_resource_request(context, port, neutron): return self._get_binding_profile_allocation( context, port, neutron, resource_provider_mapping) else: return None def _get_binding_profile_allocation( self, context, port, neutron, resource_provider_mapping ): # TODO(gibi): remove this condition and the else branch once Nova does # not need to support old Neutron sending the legacy resource request # extension if self.has_extended_resource_request_extension( context, neutron ): # The extended resource request format also means that a # port has more than a one request groups request_groups = port.get( constants.RESOURCE_REQUEST, {}).get( constants.REQUEST_GROUPS, []) # Each request group id from the port needs to be mapped to # a single provider id from the provider mappings. Each # group from the port is mapped to a numbered request group # in placement so we can assume that they are mapped to # a single provider and therefore the provider mapping list # has a single provider id. allocation = { group['id']: resource_provider_mapping[group['id']][0] for group in request_groups } else: # This is the legacy resource request format where a port # is mapped to a single request group # NOTE(gibi): In the resource provider mapping there can be # more than one RP fulfilling a request group. But resource # requests of a Neutron port is always mapped to a # numbered request group that is always fulfilled by one # resource provider. So we only pass that single RP UUID # here. allocation = resource_provider_mapping[ port['id']][0] return allocation def allocate_for_instance(self, context, instance, requested_networks, security_groups=None, bind_host_id=None, resource_provider_mapping=None, network_arqs=None): """Allocate network resources for the instance. :param context: The request context. :param instance: nova.objects.instance.Instance object. :param requested_networks: objects.NetworkRequestList object. :param security_groups: None or security groups to allocate for instance. :param bind_host_id: the host ID to attach to the ports being created. :param resource_provider_mapping: a dict keyed by ids of the entities (for example Neutron port) requesting resources for this instance mapped to a list of resource provider UUIDs that are fulfilling such a resource request. :param network_arqs: dict keyed by arq uuid, of ARQs allocated to ports. :returns: network info as from get_instance_nw_info() """ LOG.debug('allocate_for_instance()', instance=instance) if not instance.project_id: msg = _('empty project id for instance %s') raise exception.InvalidInput( reason=msg % instance.uuid) # We do not want to create a new neutron session for each call neutron = get_client(context) # We always need admin_client to build nw_info, # we sometimes need it when updating ports admin_client = get_client(context, admin=True) # # Validate ports and networks with neutron. The requested_ports_dict # variable is a dict, keyed by port ID, of ports that were on the user # request and may be empty. The ordered_networks variable is a list of # NetworkRequest objects for any networks or ports specifically # requested by the user, which again may be empty. # # NOTE(gibi): we use the admin_client here to ensure that the returned # ports has the resource_request attribute filled as later we use this # information to decide when to add allocation key to the port binding. # See bug 1849657. requested_ports_dict, ordered_networks = ( self._validate_requested_port_ids( context, instance, admin_client, requested_networks)) nets = self._validate_requested_network_ids( context, instance, neutron, requested_networks, ordered_networks) if not nets: LOG.debug("No network configured", instance=instance) return network_model.NetworkInfo([]) # Validate requested security groups security_groups = self._clean_security_groups(security_groups) security_group_ids = self._process_security_groups( instance, neutron, security_groups) # Tell Neutron which resource provider fulfills the ports' resource # request. # We only consider pre-created ports here as ports created # below based on requested networks are not scheduled to have their # resource request fulfilled. for port in requested_ports_dict.values(): # only communicate the allocations if the port has resource # requests if self._has_resource_request(context, port, neutron): profile = get_binding_profile(port) profile[constants.ALLOCATION] = ( self._get_binding_profile_allocation( context, port, neutron, resource_provider_mapping)) port[constants.BINDING_PROFILE] = profile # Create ports from the list of ordered_networks. The returned # requests_and_created_ports variable is a list of 2-item tuples of # the form (NetworkRequest, created_port_id). Note that a tuple pair # will have None for the created_port_id if the NetworkRequest already # contains a port_id, meaning the user requested a specific # pre-existing port so one wasn't created here. The ports will be # updated later in _update_ports_for_instance to be bound to the # instance and compute host. requests_and_created_ports = self._create_ports_for_instance( context, instance, ordered_networks, nets, neutron, security_group_ids) # # Update existing and newly created ports # ordered_nets, ordered_port_ids, preexisting_port_ids, \ created_port_ids = self._update_ports_for_instance( context, instance, neutron, admin_client, requests_and_created_ports, nets, bind_host_id, requested_ports_dict, network_arqs) # # Perform a full update of the network_info_cache, # including re-fetching lots of the required data from neutron # nw_info = self.get_instance_nw_info( context, instance, networks=ordered_nets, port_ids=ordered_port_ids, admin_client=admin_client, preexisting_port_ids=preexisting_port_ids) # Only return info about ports we processed in this run, which might # have been pre-existing neutron ports or ones that nova created. In # the initial allocation case (server create), this will be everything # we processed, and in later runs will only be what was processed that # time. For example, if the instance was created with port A and # then port B was attached in this call, only port B would be returned. # Thus, this filtering only affects the attach case. return network_model.NetworkInfo([vif for vif in nw_info if vif['id'] in created_port_ids + preexisting_port_ids]) def _update_ports_for_instance(self, context, instance, neutron, admin_client, requests_and_created_ports, nets, bind_host_id, requested_ports_dict, network_arqs): """Update ports from network_requests. Updates the pre-existing ports and the ones created in ``_create_ports_for_instance`` with ``device_id``, ``device_owner``, optionally ``mac_address`` and, depending on the loaded extensions, ``rxtx_factor``, ``binding:host_id``, ``dns_name``. :param context: The request context. :param instance: nova.objects.instance.Instance object. :param neutron: client using user context :param admin_client: client using admin context :param requests_and_created_ports: [(NetworkRequest, created_port_id)]; Note that created_port_id will be None for any user-requested pre-existing port. :param nets: a dict of network_id to networks returned from neutron :param bind_host_id: a string for port['binding:host_id'] :param requested_ports_dict: dict, keyed by port ID, of ports requested by the user :param network_arqs: dict keyed by arq uuid, of ARQs allocated to ports. :returns: tuple with the following:: * list of network dicts in their requested order * list of port IDs in their requested order - note that does not mean the port was requested by the user, it could be a port created on a network requested by the user * list of pre-existing port IDs requested by the user * list of created port IDs """ # We currently require admin creds to set port bindings. port_client = admin_client preexisting_port_ids = [] created_port_ids = [] ports_in_requested_order = [] nets_in_requested_order = [] created_vifs = [] # this list is for cleanups if we fail for request, created_port_id in requests_and_created_ports: vifobj = objects.VirtualInterface(context) vifobj.instance_uuid = instance.uuid vifobj.tag = request.tag if 'tag' in request else None network = nets.get(request.network_id) # if network_id did not pass validate_networks() and not available # here then skip it safely not continuing with a None Network if not network: continue nets_in_requested_order.append(network) zone = 'compute:%s' % instance.availability_zone port_req_body = {'port': {'device_id': instance.uuid, 'device_owner': zone}} if (requested_ports_dict and request.port_id in requested_ports_dict and get_binding_profile(requested_ports_dict[request.port_id])): port_req_body['port'][constants.BINDING_PROFILE] = \ get_binding_profile(requested_ports_dict[request.port_id]) try: port_arq = None if network_arqs: port_arq = network_arqs.get(request.arq_uuid, None) self._populate_neutron_extension_values( context, instance, request.pci_request_id, port_req_body, network=network, neutron=neutron, bind_host_id=bind_host_id, port_arq=port_arq) # NOTE(gibi): Remove this once we are sure that the fix for # bug 1942329 is always present in the deployed neutron. The # _populate_neutron_extension_values() call above already # populated this MAC to the binding profile instead. self._populate_pci_mac_address(instance, request.pci_request_id, port_req_body) if created_port_id: port_id = created_port_id created_port_ids.append(port_id) else: port_id = request.port_id ports_in_requested_order.append(port_id) # After port is created, update other bits updated_port = self._update_port( port_client, instance, port_id, port_req_body) # NOTE(danms): The virtual_interfaces table enforces global # uniqueness on MAC addresses, which clearly does not match # with neutron's view of the world. Since address is a 255-char # string we can namespace it with our port id. Using '/' should # be safely excluded from MAC address notations as well as # UUIDs. We can stop doing this now that we've removed # nova-network, but we need to leave the read translation in # for longer than that of course. vifobj.address = '%s/%s' % (updated_port['mac_address'], updated_port['id']) vifobj.uuid = port_id vifobj.create() created_vifs.append(vifobj) if not created_port_id: # only add if update worked and port create not called preexisting_port_ids.append(port_id) self._update_port_dns_name(context, instance, network, ports_in_requested_order[-1], neutron) except Exception: with excutils.save_and_reraise_exception(): self._unbind_ports(context, preexisting_port_ids, neutron, port_client) self._delete_ports(neutron, instance, created_port_ids) for vif in created_vifs: vif.destroy() return (nets_in_requested_order, ports_in_requested_order, preexisting_port_ids, created_port_ids) def _refresh_neutron_extensions_cache(self, client): """Refresh the neutron extensions cache when necessary.""" if (not self.last_neutron_extension_sync or ((time.time() - self.last_neutron_extension_sync) >= CONF.neutron.extension_sync_interval)): extensions_list = client.list_extensions()['extensions'] self.last_neutron_extension_sync = time.time() self.extensions.clear() self.extensions = {ext['alias']: ext for ext in extensions_list} def _has_extension(self, extension, context=None, client=None): """Check if the provided neutron extension is enabled. :param extension: The alias of the extension to check :param client: keystoneauth1.adapter.Adapter :param context: nova.context.RequestContext :returns: True if the neutron extension is available, else False """ if client is None: client = get_client(context) self._refresh_neutron_extensions_cache(client) return extension in self.extensions def has_multi_provider_extension(self, context=None, client=None): """Check if the 'multi-provider' extension is enabled. This extension allows administrative users to define multiple physical bindings for a logical network. """ return self._has_extension(constants.MULTI_PROVIDER, context, client) def has_dns_extension(self, context=None, client=None): """Check if the 'dns-integration' extension is enabled. This extension adds the 'dns_name' and 'dns_assignment' attributes to port resources. """ return self._has_extension(constants.DNS_INTEGRATION, context, client) def has_sg_shared_filter_extension(self, context=None, client=None): """Check if the 'security-groups-shared-filtering' extension is enabled. This extension adds a 'shared' filter to security group APIs. """ return self._has_extension(constants.SG_SHARED_FILTER, context, client) # TODO(gibi): Remove all branches where this is False after Neutron made # the this extension mandatory. In Xena this extension will be optional to # support the scenario where Neutron upgraded first. So Neutron can mark # this mandatory earliest in Yoga. def has_extended_resource_request_extension( self, context=None, client=None, ): return self._has_extension( constants.RESOURCE_REQUEST_GROUPS, context, client, ) def has_vnic_index_extension(self, context=None, client=None): """Check if the 'vnic-index' extension is enabled. This extension is provided by the VMWare NSX neutron plugin. """ return self._has_extension(constants.VNIC_INDEX, context, client) def has_fip_port_details_extension(self, context=None, client=None): """Check if the 'fip-port-details' extension is enabled. This extension adds the 'port_details' attribute to floating IPs. """ return self._has_extension(constants.FIP_PORT_DETAILS, context, client) def has_substr_port_filtering_extension(self, context=None, client=None): """Check if the 'ip-substring-filtering' extension is enabled. This extension adds support for filtering ports by using part of an IP address. """ return self._has_extension( constants.SUBSTR_PORT_FILTERING, context, client ) def has_segment_extension(self, context=None, client=None): """Check if the neutron 'segment' extension is enabled. This extension exposes information about L2 segments of a network. """ return self._has_extension( constants.SEGMENT, context, client, ) def has_port_binding_extension(self, context=None, client=None): """Check if the neutron 'binding-extended' extension is enabled. This extensions exposes port bindings of a virtual port to external application. This extension allows nova to bind a port to multiple hosts at the same time, like during live migration. """ return self._has_extension( constants.PORT_BINDING_EXTENDED, context, client ) def bind_ports_to_host(self, context, instance, host, vnic_types=None, port_profiles=None): """Attempts to bind the ports from the instance on the given host If the ports are already actively bound to another host, like the source host during live migration, then the new port bindings will be inactive, assuming $host is the destination host for the live migration. In the event of an error, any ports which were successfully bound to the host should have those host bindings removed from the ports. This method should not be used if "has_port_binding_extension" returns False. :param context: the user request context :type context: nova.context.RequestContext :param instance: the instance with a set of ports :type instance: nova.objects.Instance :param host: the host on which to bind the ports which are attached to the instance :type host: str :param vnic_types: optional dict for the host port binding :type vnic_types: dict of : :param port_profiles: optional dict per port ID for the host port binding profile. note that the port binding profile is mutable via the networking "Port Binding" API so callers that pass in a profile should ensure they have the latest version from neutron with their changes merged, which can be determined using the "revision_number" attribute of the port. :type port_profiles: dict of : :raises: PortBindingFailed if any of the ports failed to be bound to the destination host :returns: dict, keyed by port ID, of a new host port binding dict per port that was bound """ # Get the current ports off the instance. This assumes the cache is # current. network_info = instance.get_network_info() if not network_info: # The instance doesn't have any ports so there is nothing to do. LOG.debug('Instance does not have any ports.', instance=instance) return {} client = get_client(context, admin=True) bindings_by_port_id: ty.Dict[str, ty.Any] = {} for vif in network_info: # Now bind each port to the destination host and keep track of each # port that is bound to the resulting binding so we can rollback in # the event of a failure, or return the results if everything is OK port_id = vif['id'] binding = dict(host=host) if vnic_types is None or port_id not in vnic_types: binding['vnic_type'] = vif['vnic_type'] else: binding['vnic_type'] = vnic_types[port_id] if port_profiles is None or port_id not in port_profiles: binding['profile'] = vif['profile'] else: binding['profile'] = port_profiles[port_id] data = {'binding': binding} try: binding = client.create_port_binding(port_id, data)['binding'] except neutron_client_exc.NeutronClientException: # Something failed, so log the error and rollback any # successful bindings. LOG.error('Binding failed for port %s and host %s.', port_id, host, instance=instance, exc_info=True) for rollback_port_id in bindings_by_port_id: try: client.delete_port_binding(rollback_port_id, host) except neutron_client_exc.NeutronClientException as exc: if exc.status_code != 404: LOG.warning('Failed to remove binding for port %s ' 'on host %s.', rollback_port_id, host, instance=instance) raise exception.PortBindingFailed(port_id=port_id) bindings_by_port_id[port_id] = binding return bindings_by_port_id def delete_port_binding(self, context, port_id, host): """Delete the port binding for the given port ID and host This method should not be used if "has_port_binding_extension" returns False. :param context: The request context for the operation. :param port_id: The ID of the port with a binding to the host. :param host: The host from which port bindings should be deleted. :raises: nova.exception.PortBindingDeletionFailed if a non-404 error response is received from neutron. """ client = get_client(context, admin=True) try: client.delete_port_binding(port_id, host) except neutron_client_exc.NeutronClientException as exc: # We can safely ignore 404s since we're trying to delete # the thing that wasn't found anyway. if exc.status_code != 404: LOG.error( 'Unexpected error trying to delete binding for port %s ' 'and host %s.', port_id, host, exc_info=True) raise exception.PortBindingDeletionFailed( port_id=port_id, host=host) def _get_vf_pci_device_profile(self, pci_dev): """Get VF-specific fields to add to the PCI device profile. This data can be useful, e.g. for off-path networking backends that need to do the necessary plumbing in order to set a VF up for packet forwarding. """ vf_profile: ty.Dict[str, ty.Union[str, int]] = {} pf_mac = pci_dev.sriov_cap.get('pf_mac_address') vf_num = pci_dev.sriov_cap.get('vf_num') card_serial_number = pci_dev.card_serial_number if card_serial_number is not None: vf_profile['card_serial_number'] = card_serial_number if pf_mac is not None: vf_profile['pf_mac_address'] = pf_mac if vf_num is not None: vf_profile['vf_num'] = vf_num # Update port binding capabilities using PCI device's network # capabilities if they exist. pci_net_caps = pci_dev.network_caps if pci_net_caps: vf_profile.update({'capabilities': pci_net_caps}) return vf_profile def _get_pci_device_profile(self, pci_dev): dev_spec = self.pci_whitelist.get_devspec(pci_dev) if dev_spec: dev_profile = { 'pci_vendor_info': "%s:%s" % (pci_dev.vendor_id, pci_dev.product_id), 'pci_slot': pci_dev.address, 'physical_network': dev_spec.get_tags().get( 'physical_network' ), } if pci_dev.dev_type == obj_fields.PciDeviceType.SRIOV_VF: dev_profile.update( self._get_vf_pci_device_profile(pci_dev)) if pci_dev.dev_type == obj_fields.PciDeviceType.SRIOV_PF: # In general the MAC address information flows from the neutron # port to the device in the backend. Except for direct-physical # ports. In that case the MAC address flows from the physical # device, the PF, to the neutron port. So when such a port is # being bound to a host the port's MAC address needs to be # updated. Nova needs to put the new MAC into the binding # profile. if pci_dev.mac_address: dev_profile['device_mac_address'] = pci_dev.mac_address return dev_profile raise exception.PciDeviceNotFound(node_id=pci_dev.compute_node_id, address=pci_dev.address) def _populate_neutron_binding_profile(self, instance, pci_request_id, port_req_body, port_arq): """Populate neutron binding:profile. Populate it with SR-IOV related information :raises PciDeviceNotFound: If a claimed PCI device for the given pci_request_id cannot be found on the instance. """ if pci_request_id: pci_devices = instance.get_pci_devices(request_id=pci_request_id) if not pci_devices: # The pci_request_id likely won't mean much except for tracing # through the logs since it is generated per request. LOG.error('Unable to find PCI device using PCI request ID in ' 'list of claimed instance PCI devices: %s. Is the ' '[pci]device_spec configuration correct?', # Convert to a primitive list to stringify it. list(instance.pci_devices), instance=instance) raise exception.PciDeviceNotFound( _('PCI device not found for request ID %s.') % pci_request_id) pci_dev = pci_devices.pop() profile = copy.deepcopy(get_binding_profile(port_req_body['port'])) profile.update(self._get_pci_device_profile(pci_dev)) port_req_body['port'][constants.BINDING_PROFILE] = profile if port_arq: # PCI SRIOV device according port ARQ profile = copy.deepcopy(get_binding_profile(port_req_body['port'])) profile.update(cyborg.get_arq_pci_device_profile(port_arq)) port_req_body['port'][constants.BINDING_PROFILE] = profile @staticmethod def _populate_pci_mac_address(instance, pci_request_id, port_req_body): """Add the updated MAC address value to the update_port request body. Currently this is done only for PF passthrough. """ if pci_request_id is not None: pci_devs = instance.get_pci_devices(request_id=pci_request_id) if len(pci_devs) != 1: # NOTE(ndipanov): We shouldn't ever get here since # InstancePCIRequest instances built from network requests # only ever index a single device, which needs to be # successfully claimed for this to be called as part of # allocate_networks method LOG.error("PCI request %s does not have a " "unique device associated with it. Unable to " "determine MAC address", pci_request_id, instance=instance) return pci_dev = pci_devs[0] if pci_dev.dev_type == obj_fields.PciDeviceType.SRIOV_PF: try: mac = pci_utils.get_mac_by_pci_address(pci_dev.address) except exception.PciDeviceNotFoundById as e: LOG.error( "Could not determine MAC address for %(addr)s, " "error: %(e)s", {"addr": pci_dev.address, "e": e}, instance=instance) else: port_req_body['port']['mac_address'] = mac def _populate_neutron_extension_values(self, context, instance, pci_request_id, port_req_body, network=None, neutron=None, bind_host_id=None, port_arq=None): """Populate neutron extension values for the instance. If the extensions loaded contain QOS_QUEUE then pass the rxtx_factor. """ if neutron is None: neutron = get_client(context) port_req_body['port'][constants.BINDING_HOST_ID] = bind_host_id self._populate_neutron_binding_profile(instance, pci_request_id, port_req_body, port_arq) if self.has_dns_extension(client=neutron): # If the DNS integration extension is enabled in Neutron, most # ports will get their dns_name attribute set in the port create or # update requests in allocate_for_instance. So we just add the # dns_name attribute to the payload of those requests. The # exception is when the port binding extension is enabled in # Neutron and the port is on a network that has a non-blank # dns_domain attribute. This case requires to be processed by # method _update_port_dns_name if (not network.get('dns_domain')): port_req_body['port']['dns_name'] = instance.hostname def _update_port_dns_name(self, context, instance, network, port_id, neutron): """Update an instance port dns_name attribute with instance.hostname. The dns_name attribute of a port on a network with a non-blank dns_domain attribute will be sent to the external DNS service (Designate) if DNS integration is enabled in Neutron. This requires the assignment of the dns_name to the port to be done with a Neutron client using the user's context. allocate_for_instance uses a port with admin context if the port binding extensions is enabled in Neutron. In this case, we assign in this method the dns_name attribute to the port with an additional update request. Only a very small fraction of ports will require this additional update request. """ if self.has_dns_extension(client=neutron) and network.get( 'dns_domain'): try: port_req_body = {'port': {'dns_name': instance.hostname}} neutron.update_port(port_id, port_req_body) except neutron_client_exc.BadRequest: LOG.warning('Neutron error: Instance hostname ' '%(hostname)s is not a valid DNS name', {'hostname': instance.hostname}, instance=instance) msg = (_('Instance hostname %(hostname)s is not a valid DNS ' 'name') % {'hostname': instance.hostname}) raise exception.InvalidInput(reason=msg) def _reset_port_dns_name(self, network, port_id, client): """Reset an instance port dns_name attribute to empty when using external DNS service. _unbind_ports uses a client with admin context to reset the dns_name if the DNS extension is enabled and network does not have dns_domain set. When external DNS service is enabled, we use this method to make the request with a Neutron client using user's context, so that the DNS record can be found under user's zone and domain. """ if self.has_dns_extension(client=client) and network.get( 'dns_domain'): try: port_req_body = {'port': {'dns_name': ''}} client.update_port(port_id, port_req_body) except neutron_client_exc.NeutronClientException: LOG.exception("Failed to reset dns_name for port %s", port_id) def _delete_ports(self, neutron, instance, ports, raise_if_fail=False): exceptions = [] for port in ports: try: neutron.delete_port(port) except neutron_client_exc.NeutronClientException as e: if e.status_code == 404: LOG.warning("Port %s does not exist", port, instance=instance) else: exceptions.append(e) LOG.warning("Failed to delete port %s for instance.", port, instance=instance, exc_info=True) if len(exceptions) > 0 and raise_if_fail: raise exceptions[0] def deallocate_for_instance(self, context, instance, **kwargs): """Deallocate all network resources related to the instance.""" LOG.debug('deallocate_for_instance()', instance=instance) search_opts = {'device_id': instance.uuid} neutron = get_client(context) data = neutron.list_ports(**search_opts) ports = {port['id'] for port in data.get('ports', [])} requested_networks = kwargs.get('requested_networks') or [] # NOTE(danms): Temporary and transitional if isinstance(requested_networks, objects.NetworkRequestList): requested_networks = requested_networks.as_tuples() ports_to_skip = set([port_id for nets, fips, port_id, pci_request_id, arq_uuid, device_profile in requested_networks]) # NOTE(boden): requested_networks only passed in when deallocating # from a failed build / spawn call. Therefore we need to include # preexisting ports when deallocating from a standard delete op # in which case requested_networks is not provided. ports_to_skip |= set(self._get_preexisting_port_ids(instance)) ports = set(ports) - ports_to_skip # Reset device_id and device_owner for the ports that are skipped self._unbind_ports(context, ports_to_skip, neutron) # Delete the rest of the ports self._delete_ports(neutron, instance, ports, raise_if_fail=True) # deallocate vifs (mac addresses) objects.VirtualInterface.delete_by_instance_uuid( context, instance.uuid) # NOTE(arosen): This clears out the network_cache only if the instance # hasn't already been deleted. This is needed when an instance fails to # launch and is rescheduled onto another compute node. If the instance # has already been deleted this call does nothing. update_instance_cache_with_nw_info(self, context, instance, network_model.NetworkInfo([])) def deallocate_port_for_instance(self, context, instance, port_id): """Remove a specified port from the instance. :param context: the request context :param instance: the instance object the port is detached from :param port_id: the UUID of the port being detached :return: A NetworkInfo, port_allocation tuple where the port_allocation is a dict which contains the resource allocation of the port per resource provider uuid. E.g.: { rp_uuid: { "resources": { "NET_BW_EGR_KILOBIT_PER_SEC": 10000, "NET_BW_IGR_KILOBIT_PER_SEC": 20000, } } } Note that right now this dict only contains a single key as a neutron port only allocates from a single resource provider. """ # We need to use an admin client as the port.resource_request is admin # only neutron_admin = get_client(context, admin=True) neutron = get_client(context) port_allocation: ty.Dict = {} try: # NOTE(gibi): we need to read the port resource information from # neutron here as we might delete the port below port = neutron_admin.show_port(port_id)['port'] except exception.PortNotFound: LOG.debug('Unable to determine port %s resource allocation ' 'information as the port no longer exists.', port_id) port = None preexisting_ports = self._get_preexisting_port_ids(instance) if port_id in preexisting_ports: self._unbind_ports(context, [port_id], neutron) else: self._delete_ports(neutron, instance, [port_id], raise_if_fail=True) # Delete the VirtualInterface for the given port_id. vif = objects.VirtualInterface.get_by_uuid(context, port_id) if vif: self._delete_nic_metadata(instance, vif) vif.destroy() else: LOG.debug('VirtualInterface not found for port: %s', port_id, instance=instance) if port: # if there is resource associated to this port then that needs to # be deallocated so lets return info about such allocation resource_request = port.get(constants.RESOURCE_REQUEST) or {} profile = get_binding_profile(port) if self.has_extended_resource_request_extension(context, neutron): # new format groups = resource_request.get(constants.REQUEST_GROUPS) if groups: allocated_rps = profile.get(constants.ALLOCATION) for group in groups: allocated_rp = allocated_rps[group['id']] port_allocation[allocated_rp] = { "resources": group.get("resources", {}) } else: # legacy format allocated_rp = profile.get(constants.ALLOCATION) if resource_request and allocated_rp: port_allocation = { allocated_rp: { "resources": resource_request.get("resources", {}) } } else: # Check the info_cache. If the port is still in the info_cache and # in that cache there is allocation in the profile then we suspect # that the port is disappeared without deallocating the resources. for vif in instance.get_network_info(): if vif['id'] == port_id: profile = vif.get('profile') or {} rp_uuid = profile.get(constants.ALLOCATION) if rp_uuid: LOG.warning( 'Port %s disappeared during deallocate but it had ' 'resource allocation on resource provider %s. ' 'Resource allocation for this port may be ' 'leaked.', port_id, rp_uuid, instance=instance) break return self.get_instance_nw_info(context, instance), port_allocation def _delete_nic_metadata(self, instance, vif): if not instance.device_metadata: # nothing to delete return for device in instance.device_metadata.devices: if (isinstance(device, objects.NetworkInterfaceMetadata) and device.mac == vif.address): instance.device_metadata.devices.remove(device) instance.save() break def list_ports(self, context, **search_opts): """List ports for the client based on search options.""" return get_client(context).list_ports(**search_opts) def show_port(self, context, port_id): """Return the port for the client given the port id. :param context: Request context. :param port_id: The id of port to be queried. :returns: A dict containing port data keyed by 'port', e.g. :: {'port': {'port_id': 'abcd', 'fixed_ip_address': '1.2.3.4'}} """ return dict(port=self._show_port(context, port_id)) def _show_port(self, context, port_id, neutron_client=None, fields=None): """Return the port for the client given the port id. :param context: Request context. :param port_id: The id of port to be queried. :param neutron_client: A neutron client. :param fields: The condition fields to query port data. :returns: A dict of port data. e.g. {'port_id': 'abcd', 'fixed_ip_address': '1.2.3.4'} """ if not neutron_client: neutron_client = get_client(context) try: if fields: result = neutron_client.show_port(port_id, fields=fields) else: result = neutron_client.show_port(port_id) return result.get('port') except neutron_client_exc.PortNotFoundClient: raise exception.PortNotFound(port_id=port_id) except neutron_client_exc.Unauthorized: raise exception.Forbidden() except neutron_client_exc.NeutronClientException as exc: msg = (_("Failed to access port %(port_id)s: %(reason)s") % {'port_id': port_id, 'reason': exc}) raise exception.NovaException(message=msg) def get_instance_nw_info(self, context, instance, **kwargs): """Returns all network info related to an instance.""" with lockutils.lock('refresh_cache-%s' % instance.uuid): result = self._get_instance_nw_info(context, instance, **kwargs) update_instance_cache_with_nw_info(self, context, instance, nw_info=result) return result def _get_instance_nw_info(self, context, instance, networks=None, port_ids=None, admin_client=None, preexisting_port_ids=None, refresh_vif_id=None, force_refresh=False, **kwargs): # NOTE(danms): This is an inner method intended to be called # by other code that updates instance nwinfo. It *must* be # called with the refresh_cache-%(instance_uuid) lock held! if force_refresh: LOG.debug('Forcefully refreshing network info cache for instance', instance=instance) elif refresh_vif_id: LOG.debug('Refreshing network info cache for port %s', refresh_vif_id, instance=instance) else: LOG.debug('Building network info cache for instance', instance=instance) # Ensure that we have an up to date copy of the instance info cache. # Otherwise multiple requests could collide and cause cache # corruption. compute_utils.refresh_info_cache_for_instance(context, instance) nw_info = self._build_network_info_model(context, instance, networks, port_ids, admin_client, preexisting_port_ids, refresh_vif_id, force_refresh=force_refresh) return network_model.NetworkInfo.hydrate(nw_info) def _gather_port_ids_and_networks(self, context, instance, networks=None, port_ids=None, neutron=None): """Return an instance's complete list of port_ids and networks. The results are based on the instance info_cache in the nova db, not the instance's current list of ports in neutron. """ if ((networks is None and port_ids is not None) or (port_ids is None and networks is not None)): message = _("This method needs to be called with either " "networks=None and port_ids=None or port_ids and " "networks as not none.") raise exception.NovaException(message=message) ifaces = instance.get_network_info() # This code path is only done when refreshing the network_cache if port_ids is None: port_ids = [iface['id'] for iface in ifaces] net_ids = [iface['network']['id'] for iface in ifaces] if networks is None: networks = self._get_available_networks(context, instance.project_id, net_ids, neutron) # an interface was added/removed from instance. else: # Prepare the network ids list for validation purposes networks_ids = [network['id'] for network in networks] # Validate that interface networks doesn't exist in networks. # Though this issue can and should be solved in methods # that prepare the networks list, this method should have this # ignore-duplicate-networks/port-ids mechanism to reduce the # probability of failing to boot the VM. networks = networks + [ {'id': iface['network']['id'], 'name': iface['network']['label'], 'tenant_id': iface['network']['meta']['tenant_id']} for iface in ifaces if _is_not_duplicate(iface['network']['id'], networks_ids, "networks", instance)] # Include existing interfaces so they are not removed from the db. # Validate that the interface id is not in the port_ids port_ids = [iface['id'] for iface in ifaces if _is_not_duplicate(iface['id'], port_ids, "port_ids", instance)] + port_ids return networks, port_ids @refresh_cache def add_fixed_ip_to_instance(self, context, instance, network_id): """Add a fixed IP to the instance from specified network.""" neutron = get_client(context) search_opts = {'network_id': network_id} data = neutron.list_subnets(**search_opts) ipam_subnets = data.get('subnets', []) if not ipam_subnets: raise exception.NetworkNotFoundForInstance( instance_id=instance.uuid) zone = 'compute:%s' % instance.availability_zone search_opts = {'device_id': instance.uuid, 'device_owner': zone, 'network_id': network_id} data = neutron.list_ports(**search_opts) ports = data['ports'] for p in ports: for subnet in ipam_subnets: fixed_ips = p['fixed_ips'] fixed_ips.append({'subnet_id': subnet['id']}) port_req_body = {'port': {'fixed_ips': fixed_ips}} try: neutron.update_port(p['id'], port_req_body) return self._get_instance_nw_info(context, instance) except Exception as ex: msg = ("Unable to update port %(portid)s on subnet " "%(subnet_id)s with failure: %(exception)s") LOG.debug(msg, {'portid': p['id'], 'subnet_id': subnet['id'], 'exception': ex}, instance=instance) raise exception.NetworkNotFoundForInstance( instance_id=instance.uuid) @refresh_cache def remove_fixed_ip_from_instance(self, context, instance, address): """Remove a fixed IP from the instance.""" neutron = get_client(context) zone = 'compute:%s' % instance.availability_zone search_opts = {'device_id': instance.uuid, 'device_owner': zone, 'fixed_ips': 'ip_address=%s' % address} data = neutron.list_ports(**search_opts) ports = data['ports'] for p in ports: fixed_ips = p['fixed_ips'] new_fixed_ips = [] for fixed_ip in fixed_ips: if fixed_ip['ip_address'] != address: new_fixed_ips.append(fixed_ip) port_req_body = {'port': {'fixed_ips': new_fixed_ips}} try: neutron.update_port(p['id'], port_req_body) except Exception as ex: msg = ("Unable to update port %(portid)s with" " failure: %(exception)s") LOG.debug(msg, {'portid': p['id'], 'exception': ex}, instance=instance) return self._get_instance_nw_info(context, instance) raise exception.FixedIpNotFoundForInstance( instance_uuid=instance.uuid, ip=address) def _get_physnet_tunneled_info(self, context, neutron, net_id): """Retrieve detailed network info. :param context: The request context. :param neutron: The neutron client object. :param net_id: The ID of the network to retrieve information for. :return: A tuple containing the physnet name, if defined, and the tunneled status of the network. If the network uses multiple segments, the first segment that defines a physnet value will be used for the physnet name. """ if self.has_multi_provider_extension(client=neutron): network = neutron.show_network(net_id, fields='segments').get('network') segments = network.get('segments', {}) for net in segments: # NOTE(vladikr): In general, "multi-segments" network is a # combination of L2 segments. The current implementation # contains a vxlan and vlan(s) segments, where only a vlan # network will have a physical_network specified, but may # change in the future. The purpose of this method # is to find a first segment that provides a physical network. # TODO(vladikr): Additional work will be required to handle the # case of multiple vlan segments associated with different # physical networks. physnet_name = net.get('provider:physical_network') if physnet_name: return physnet_name, False # Raising here as at least one segment should # have a physical network provided. if segments: msg = (_("None of the segments of network %s provides a " "physical_network") % net_id) raise exception.NovaException(message=msg) net = neutron.show_network( net_id, fields=['provider:physical_network', 'provider:network_type']).get('network') return (net.get('provider:physical_network'), net.get('provider:network_type') in constants.L3_NETWORK_TYPES) @staticmethod def _get_trusted_mode_from_port(port): """Returns whether trusted mode is requested If port binding does not provide any information about trusted status this function is returning None """ value = get_binding_profile(port).get('trusted') if value is not None: # This allows the user to specify things like '1' and 'yes' in # the port binding profile and we can handle it as a boolean. return strutils.bool_from_string(value) @staticmethod def _is_remote_managed(vnic_type): """Determine if the port is remote_managed or not by VNIC type. :param str vnic_type: The VNIC type to assess. :return: A boolean indicator whether the NIC is remote managed or not. :rtype: bool """ return vnic_type == network_model.VNIC_TYPE_REMOTE_MANAGED def is_remote_managed_port(self, context, port_id): """Determine if a port has a REMOTE_MANAGED VNIC type. :param context: The request context :param port_id: The id of the Neutron port """ port = self.show_port(context, port_id)['port'] return self._is_remote_managed( port.get('binding:vnic_type', network_model.VNIC_TYPE_NORMAL) ) # NOTE(sean-k-mooney): we might want to have this return a # nova.network.model.VIF object instead in the future. def _get_port_vnic_info(self, context, neutron, port_id): """Retrieve port vNIC info :param context: The request context :param neutron: The Neutron client :param port_id: The id of port to be queried :return: A tuple of vNIC type, trusted status, network ID, resource request of the port if any and port numa affinity policy, and device_profile. Trusted status only affects SR-IOV ports and will always be None for other port types. If no port numa policy is requested by a port, None will be returned. """ fields = ['binding:vnic_type', constants.BINDING_PROFILE, 'network_id', constants.RESOURCE_REQUEST, constants.NUMA_POLICY, 'device_profile'] port = self._show_port( context, port_id, neutron_client=neutron, fields=fields) network_id = port.get('network_id') trusted = None vnic_type = port.get('binding:vnic_type', network_model.VNIC_TYPE_NORMAL) if vnic_type in network_model.VNIC_TYPES_SRIOV: trusted = self._get_trusted_mode_from_port(port) # NOTE(gibi): Get the port resource_request which may or may not be # set depending on neutron configuration, e.g. if QoS rules are # applied to the port/network and the port-resource-request API # extension is enabled. resource_request = port.get(constants.RESOURCE_REQUEST, None) numa_policy = port.get(constants.NUMA_POLICY, None) device_profile = port.get("device_profile", None) return (vnic_type, trusted, network_id, resource_request, numa_policy, device_profile) def support_create_with_resource_request(self, context): """Returns false if neutron is configured with extended resource request which is not currently supported. This function is only here temporarily to help mocking this check in the functional test environment. """ return not (self.has_extended_resource_request_extension(context)) def create_resource_requests( self, context, requested_networks, pci_requests=None, affinity_policy=None): """Retrieve all information for the networks passed at the time of creating the server. :param context: The request context. :param requested_networks: The networks requested for the server. :type requested_networks: nova.objects.NetworkRequestList :param pci_requests: The list of PCI requests to which additional PCI requests created here will be added. :type pci_requests: nova.objects.InstancePCIRequests :param affinity_policy: requested pci numa affinity policy :type affinity_policy: nova.objects.fields.PCINUMAAffinityPolicy :returns: A three tuple with an instance of ``objects.NetworkMetadata`` for use by the scheduler or None, a list of RequestGroup objects representing the resource needs of each requested port and a RequestLevelParam object that contains global scheduling instructions not specific to any of the RequestGroups """ if not requested_networks or requested_networks.no_allocate: return None, [], None physnets = set() tunneled = False neutron = get_client(context, admin=True) has_extended_resource_request_extension = ( self.has_extended_resource_request_extension(context, neutron)) resource_requests = [] request_level_params = objects.RequestLevelParams() for request_net in requested_networks: physnet = None trusted = None tunneled_ = False vnic_type = network_model.VNIC_TYPE_NORMAL pci_request_id = None requester_id = None port_numa_policy = None if request_net.port_id: # InstancePCIRequest.requester_id is semantically linked # to a port with a resource_request. requester_id = request_net.port_id (vnic_type, trusted, network_id, resource_request, port_numa_policy, device_profile) = self._get_port_vnic_info( context, neutron, request_net.port_id) physnet, tunneled_ = self._get_physnet_tunneled_info( context, neutron, network_id) if vnic_type in network_model.VNIC_TYPES_ACCELERATOR: # get request groups from cyborg profile if not device_profile: err = ('No device profile for port %s.' % (request_net.port_id)) raise exception.DeviceProfileError( name=device_profile, msg=err) cyclient = cyborg.get_client(context) dp_groups = cyclient.get_device_profile_groups( device_profile) dev_num = cyborg.get_device_amount_of_dp_groups(dp_groups) if dev_num > 1: err_msg = 'request multiple devices for single port.' raise exception.DeviceProfileError(name=device_profile, msg=err_msg) dp_request_groups = (cyclient.get_device_request_groups( dp_groups, owner=request_net.port_id)) LOG.debug("device_profile request group(ARQ): %s", dp_request_groups) # keep device_profile to avoid get vnic info again request_net.device_profile = device_profile resource_requests.extend(dp_request_groups) if resource_request: if has_extended_resource_request_extension: # need to handle the new resource request format # NOTE(gibi): explicitly orphan the RequestGroup by # setting context=None as we never intended to save it # to the DB. resource_requests.extend( objects.RequestGroup.from_extended_port_request( context=None, port_resource_request=resource_request)) request_level_params.extend_with( objects.RequestLevelParams.from_port_request( port_resource_request=resource_request)) else: # keep supporting the old format of the # resource_request # NOTE(gibi): explicitly orphan the RequestGroup by # setting context=None as we never intended to save it # to the DB. resource_requests.append( objects.RequestGroup.from_port_request( context=None, port_uuid=request_net.port_id, port_resource_request=resource_request)) elif request_net.network_id and not request_net.auto_allocate: network_id = request_net.network_id physnet, tunneled_ = self._get_physnet_tunneled_info( context, neutron, network_id) # All tunneled traffic must use the same logical NIC so we just # need to know if there is one or more tunneled networks present. tunneled = tunneled or tunneled_ # ...conversely, there can be multiple physnets, which will # generally be mapped to different NICs, and some requested # networks may use the same physnet. As a result, we need to know # the *set* of physnets from every network requested if physnet: physnets.add(physnet) if vnic_type in network_model.VNIC_TYPES_SRIOV: # TODO(moshele): To differentiate between the SR-IOV legacy # and SR-IOV ovs hardware offload we will leverage the nic # feature based scheduling in nova. This mean we will need # libvirt to expose the nic feature. At the moment # there is a limitation that deployers cannot use both # SR-IOV modes (legacy and ovs) in the same deployment. spec = { pci_request.PCI_NET_TAG: physnet, # Convert the value to string since tags are compared as # string values case-insensitively. pci_request.PCI_REMOTE_MANAGED_TAG: str(self._is_remote_managed(vnic_type)), } dev_type = pci_request.DEVICE_TYPE_FOR_VNIC_TYPE.get(vnic_type) if dev_type: spec[pci_request.PCI_DEVICE_TYPE_TAG] = dev_type if trusted is not None: # We specifically have requested device on a pool # with a tag trusted set to true or false. We # convert the value to string since tags are # compared in that way. spec[pci_request.PCI_TRUSTED_TAG] = str(trusted) request = objects.InstancePCIRequest( count=1, spec=[spec], request_id=uuidutils.generate_uuid(), requester_id=requester_id) # NOTE(sean-k-mooney): port NUMA policies take precedence # over image and flavor policies. numa_policy = port_numa_policy or affinity_policy if numa_policy: request.numa_policy = numa_policy pci_requests.requests.append(request) pci_request_id = request.request_id # Add pci_request_id into the requested network request_net.pci_request_id = pci_request_id return ( objects.NetworkMetadata(physnets=physnets, tunneled=tunneled), resource_requests, request_level_params ) def _can_auto_allocate_network(self, context, neutron): """Helper method to determine if we can auto-allocate networks :param context: nova request context :param neutron: neutron client :returns: True if it's possible to auto-allocate networks, False otherwise. """ # run the dry-run validation, which will raise a 409 if not ready try: neutron.validate_auto_allocated_topology_requirements( context.project_id) LOG.debug('Network auto-allocation is available for project ' '%s', context.project_id) return True except neutron_client_exc.Conflict as ex: LOG.debug('Unable to auto-allocate networks. %s', str(ex)) return False def _auto_allocate_network(self, instance, neutron): """Automatically allocates a network for the given project. :param instance: create the network for the project that owns this instance :param neutron: neutron client :returns: Details of the network that was created. :raises: nova.exception.UnableToAutoAllocateNetwork :raises: nova.exception.NetworkNotFound """ project_id = instance.project_id LOG.debug('Automatically allocating a network for project %s.', project_id, instance=instance) try: topology = neutron.get_auto_allocated_topology( project_id)['auto_allocated_topology'] except neutron_client_exc.Conflict: raise exception.UnableToAutoAllocateNetwork(project_id=project_id) try: network = neutron.show_network(topology['id'])['network'] except neutron_client_exc.NetworkNotFoundClient: # This shouldn't happen since we just created the network, but # handle it anyway. LOG.error('Automatically allocated network %(network_id)s ' 'was not found.', {'network_id': topology['id']}, instance=instance) raise exception.UnableToAutoAllocateNetwork(project_id=project_id) LOG.debug('Automatically allocated network: %s', network, instance=instance) return network def _ports_needed_per_instance(self, context, neutron, requested_networks): # TODO(danms): Remove me when all callers pass an object if requested_networks and isinstance(requested_networks[0], tuple): requested_networks = objects.NetworkRequestList.from_tuples( requested_networks) ports_needed_per_instance = 0 if (requested_networks is None or len(requested_networks) == 0 or requested_networks.auto_allocate): nets = self._get_available_networks(context, context.project_id, neutron=neutron) if len(nets) > 1: # Attaching to more than one network by default doesn't # make sense, as the order will be arbitrary and the guest OS # won't know which to configure msg = _("Multiple possible networks found, use a Network " "ID to be more specific.") raise exception.NetworkAmbiguous(msg) if not nets and ( requested_networks and requested_networks.auto_allocate): # If there are no networks available to this project and we # were asked to auto-allocate a network, check to see that we # can do that first. LOG.debug('No networks are available for project %s; checking ' 'to see if we can automatically allocate a network.', context.project_id) if not self._can_auto_allocate_network(context, neutron): raise exception.UnableToAutoAllocateNetwork( project_id=context.project_id) ports_needed_per_instance = 1 else: net_ids_requested = [] for request in requested_networks: if request.port_id: port = self._show_port(context, request.port_id, neutron_client=neutron) if port.get('device_id'): raise exception.PortInUse(port_id=request.port_id) deferred_ip = port.get('ip_allocation') == 'deferred' ipless_port = port.get('ip_allocation') == 'none' # NOTE(carl_baldwin) A deferred IP port doesn't have an # address here. If it fails to get one later when nova # updates it with host info, Neutron will error which # raises an exception. # NOTE(sbauza): We don't need to validate the # 'connectivity' attribute of the port's # 'binding:vif_details' to ensure it's 'l2', as Neutron # already verifies it. if ( not (deferred_ip or ipless_port) and not port.get('fixed_ips') ): raise exception.PortRequiresFixedIP( port_id=request.port_id) request.network_id = port['network_id'] else: ports_needed_per_instance += 1 net_ids_requested.append(request.network_id) # NOTE(jecarey) There is currently a race condition. # That is, if you have more than one request for a specific # fixed IP at the same time then only one will be allocated # the ip. The fixed IP will be allocated to only one of the # instances that will run. The second instance will fail on # spawn. That instance will go into error state. # TODO(jecarey) Need to address this race condition once we # have the ability to update mac addresses in Neutron. if request.address: # TODO(jecarey) Need to look at consolidating list_port # calls once able to OR filters. search_opts = {'network_id': request.network_id, 'fixed_ips': 'ip_address=%s' % ( request.address), 'fields': 'device_id'} existing_ports = neutron.list_ports( **search_opts)['ports'] if existing_ports: i_uuid = existing_ports[0]['device_id'] raise exception.FixedIpAlreadyInUse( address=request.address, instance_uuid=i_uuid) # Now check to see if all requested networks exist if net_ids_requested: nets = self._get_available_networks( context, context.project_id, net_ids_requested, neutron=neutron) for net in nets: if not net.get('subnets'): raise exception.NetworkRequiresSubnet( network_uuid=net['id']) if len(nets) != len(net_ids_requested): requested_netid_set = set(net_ids_requested) returned_netid_set = set([net['id'] for net in nets]) lostid_set = requested_netid_set - returned_netid_set if lostid_set: id_str = '' for _id in lostid_set: id_str = id_str and id_str + ', ' + _id or _id raise exception.NetworkNotFound(network_id=id_str) return ports_needed_per_instance def get_requested_resource_for_instance( self, context: nova_context.RequestContext, instance_uuid: str ) -> ty.Tuple[ ty.List['objects.RequestGroup'], 'objects.RequestLevelParams']: """Collect resource requests from the ports associated to the instance :param context: nova request context :param instance_uuid: The UUID of the instance :return: A two tuple with a list of RequestGroup objects and a RequestLevelParams object. """ # NOTE(gibi): We need to use an admin client as otherwise a non admin # initiated resize causes that neutron does not fill the # resource_request field of the port and this will lead to resource # allocation issues. See bug 1849695 neutron = get_client(context, admin=True) # get the ports associated to this instance data = neutron.list_ports( device_id=instance_uuid, fields=['id', constants.RESOURCE_REQUEST]) resource_requests = [] request_level_params = objects.RequestLevelParams() extended_rr = self.has_extended_resource_request_extension( context, neutron) for port in data.get('ports', []): resource_request = port.get(constants.RESOURCE_REQUEST) if extended_rr and resource_request: resource_requests.extend( objects.RequestGroup.from_extended_port_request( context=None, port_resource_request=port['resource_request'])) request_level_params.extend_with( objects.RequestLevelParams.from_port_request( port_resource_request=resource_request)) else: # keep supporting the old format of the resource_request if resource_request: # NOTE(gibi): explicitly orphan the RequestGroup by setting # context=None as we never intended to save it to the DB. resource_requests.append( objects.RequestGroup.from_port_request( context=None, port_uuid=port['id'], port_resource_request=port['resource_request'])) return resource_requests, request_level_params def validate_networks(self, context, requested_networks, num_instances): """Validate that the tenant can use the requested networks. Return the number of instances than can be successfully allocated with the requested network configuration. """ LOG.debug('validate_networks() for %s', requested_networks) neutron = get_client(context) ports_needed_per_instance = self._ports_needed_per_instance( context, neutron, requested_networks) # Note(PhilD): Ideally Nova would create all required ports as part of # network validation, but port creation requires some details # from the hypervisor. So we just check the quota and return # how many of the requested number of instances can be created if ports_needed_per_instance: quotas = neutron.show_quota(context.project_id)['quota'] if quotas.get('port', -1) == -1: # Unlimited Port Quota return num_instances # We only need the port count so only ask for ids back. params = dict(tenant_id=context.project_id, fields=['id']) ports = neutron.list_ports(**params)['ports'] free_ports = quotas.get('port') - len(ports) if free_ports < 0: msg = (_("The number of defined ports: %(ports)d " "is over the limit: %(quota)d") % {'ports': len(ports), 'quota': quotas.get('port')}) raise exception.PortLimitExceeded(msg) ports_needed = ports_needed_per_instance * num_instances if free_ports >= ports_needed: return num_instances else: return free_ports // ports_needed_per_instance return num_instances def _get_instance_uuids_by_ip(self, context, address): """Retrieve instance uuids associated with the given IP address. :returns: A list of dicts containing the uuids keyed by 'instance_uuid' e.g. [{'instance_uuid': uuid}, ...] """ search_opts = {"fixed_ips": 'ip_address=%s' % address} data = get_client(context).list_ports(**search_opts) ports = data.get('ports', []) return [{'instance_uuid': port['device_id']} for port in ports if port['device_id']] def _get_port_id_by_fixed_address(self, client, instance, address): """Return port_id from a fixed address.""" zone = 'compute:%s' % instance.availability_zone search_opts = {'device_id': instance.uuid, 'device_owner': zone} data = client.list_ports(**search_opts) ports = data['ports'] port_id = None for p in ports: for ip in p['fixed_ips']: if ip['ip_address'] == address: port_id = p['id'] break if not port_id: raise exception.FixedIpNotFoundForAddress(address=address) return port_id @refresh_cache def associate_floating_ip(self, context, instance, floating_address, fixed_address, affect_auto_assigned=False): """Associate a floating IP with a fixed IP.""" # Note(amotoki): 'affect_auto_assigned' is not respected # since it is not used anywhere in nova code and I could # find why this parameter exists. client = get_client(context) port_id = self._get_port_id_by_fixed_address(client, instance, fixed_address) fip = self._get_floating_ip_by_address(client, floating_address) param = {'port_id': port_id, 'fixed_ip_address': fixed_address} try: client.update_floatingip(fip['id'], {'floatingip': param}) except neutron_client_exc.Conflict as e: raise exception.FloatingIpAssociateFailed(str(e)) # If the floating IP was associated with another server, try to refresh # the cache for that instance to avoid a window of time where multiple # servers in the API say they are using the same floating IP. if fip['port_id']: # Trap and log any errors from # _update_inst_info_cache_for_disassociated_fip but not let them # raise back up to the caller since this refresh is best effort. try: self._update_inst_info_cache_for_disassociated_fip( context, instance, client, fip) except Exception as e: LOG.warning('An error occurred while trying to refresh the ' 'network info cache for an instance associated ' 'with port %s. Error: %s', fip['port_id'], e) def _update_inst_info_cache_for_disassociated_fip(self, context, instance, client, fip): """Update the network info cache when a floating IP is re-assigned. :param context: nova auth RequestContext :param instance: The instance to which the floating IP is now assigned :param client: ClientWrapper instance for using the Neutron API :param fip: dict for the floating IP that was re-assigned where the the ``port_id`` value represents the port that was associated with another server. """ port = self._show_port(context, fip['port_id'], neutron_client=client) orig_instance_uuid = port['device_id'] msg_dict = dict(address=fip['floating_ip_address'], instance_id=orig_instance_uuid) LOG.info('re-assign floating IP %(address)s from ' 'instance %(instance_id)s', msg_dict, instance=instance) orig_instance = self._get_instance_by_uuid_using_api_db( context, orig_instance_uuid) if orig_instance: # purge cached nw info for the original instance; pass the # context from the instance in case we found it in another cell update_instance_cache_with_nw_info( self, orig_instance._context, orig_instance) else: # Leave a breadcrumb about not being able to refresh the # the cache for the original instance. LOG.info('Unable to refresh the network info cache for ' 'instance %s after disassociating floating IP %s. ' 'If the instance still exists, its info cache may ' 'be healed automatically.', orig_instance_uuid, fip['id']) @staticmethod def _get_instance_by_uuid_using_api_db(context, instance_uuid): """Look up the instance by UUID This method is meant to be used sparingly since it tries to find the instance by UUID in the cell-targeted context. If the instance is not found, this method will try to determine if it's not found because it is deleted or if it is just in another cell. Therefore it assumes to have access to the API database and should only be called from methods that are used in the control plane services. :param context: cell-targeted nova auth RequestContext :param instance_uuid: UUID of the instance to find :returns: Instance object if the instance was found, else None. """ try: return objects.Instance.get_by_uuid(context, instance_uuid) except exception.InstanceNotFound: # The instance could be deleted or it could be in another cell. # To determine if its in another cell, check the instance # mapping in the API DB. try: inst_map = objects.InstanceMapping.get_by_instance_uuid( context, instance_uuid) except exception.InstanceMappingNotFound: # The instance is gone so just return. return # We have the instance mapping, look up the instance in the # cell the instance is in. with nova_context.target_cell( context, inst_map.cell_mapping) as cctxt: try: return objects.Instance.get_by_uuid(cctxt, instance_uuid) except exception.InstanceNotFound: # Alright it's really gone. return def get_all(self, context): """Get all networks for client.""" client = get_client(context) return client.list_networks().get('networks') def get(self, context, network_uuid): """Get specific network for client.""" client = get_client(context) try: return client.show_network(network_uuid).get('network') or {} except neutron_client_exc.NetworkNotFoundClient: raise exception.NetworkNotFound(network_id=network_uuid) def get_fixed_ip_by_address(self, context, address): """Return instance uuids given an address.""" uuid_maps = self._get_instance_uuids_by_ip(context, address) if len(uuid_maps) == 1: return uuid_maps[0] elif not uuid_maps: raise exception.FixedIpNotFoundForAddress(address=address) else: raise exception.FixedIpAssociatedWithMultipleInstances( address=address) def get_floating_ip(self, context, id): """Return floating IP object given the floating IP id.""" client = get_client(context) try: fip = client.show_floatingip(id)['floatingip'] except neutron_client_exc.NeutronClientException as e: if e.status_code == 404: raise exception.FloatingIpNotFound(id=id) with excutils.save_and_reraise_exception(): LOG.exception('Unable to access floating IP %s', id) # retrieve and cache the network details now since many callers need # the network name which isn't present in the response from neutron network_uuid = fip['floating_network_id'] try: fip['network_details'] = client.show_network( network_uuid)['network'] except neutron_client_exc.NetworkNotFoundClient: raise exception.NetworkNotFound(network_id=network_uuid) # ...and retrieve the port details for the same reason, but only if # they're not already there because the fip-port-details extension is # present if not self.has_fip_port_details_extension(client=client): port_id = fip['port_id'] try: fip['port_details'] = client.show_port( port_id)['port'] except neutron_client_exc.PortNotFoundClient: # it's possible to create floating IPs without a port fip['port_details'] = None return fip def get_floating_ip_by_address(self, context, address): """Return a floating IP given an address.""" client = get_client(context) fip = self._get_floating_ip_by_address(client, address) # retrieve and cache the network details now since many callers need # the network name which isn't present in the response from neutron network_uuid = fip['floating_network_id'] try: fip['network_details'] = client.show_network( network_uuid)['network'] except neutron_client_exc.NetworkNotFoundClient: raise exception.NetworkNotFound(network_id=network_uuid) # ...and retrieve the port details for the same reason, but only if # they're not already there because the fip-port-details extension is # present if not self.has_fip_port_details_extension(client=client): port_id = fip['port_id'] try: fip['port_details'] = client.show_port( port_id)['port'] except neutron_client_exc.PortNotFoundClient: # it's possible to create floating IPs without a port fip['port_details'] = None return fip def get_floating_ip_pools(self, context): """Return floating IP pools a.k.a. external networks.""" client = get_client(context) data = client.list_networks(**{constants.NET_EXTERNAL: True}) return data['networks'] def get_floating_ips_by_project(self, context): client = get_client(context) project_id = context.project_id fips = self._safe_get_floating_ips(client, tenant_id=project_id) if not fips: return fips # retrieve and cache the network details now since many callers need # the network name which isn't present in the response from neutron networks = {net['id']: net for net in self._get_available_networks( context, project_id, [fip['floating_network_id'] for fip in fips], client)} for fip in fips: network_uuid = fip['floating_network_id'] if network_uuid not in networks: raise exception.NetworkNotFound(network_id=network_uuid) fip['network_details'] = networks[network_uuid] # ...and retrieve the port details for the same reason, but only if # they're not already there because the fip-port-details extension is # present if not self.has_fip_port_details_extension(client=client): ports = {port['id']: port for port in client.list_ports( **{'tenant_id': project_id})['ports']} for fip in fips: port_id = fip['port_id'] if port_id in ports: fip['port_details'] = ports[port_id] else: # it's possible to create floating IPs without a port fip['port_details'] = None return fips def get_instance_id_by_floating_address(self, context, address): """Return the instance id a floating IP's fixed IP is allocated to.""" client = get_client(context) fip = self._get_floating_ip_by_address(client, address) if not fip['port_id']: return None try: port = self._show_port(context, fip['port_id'], neutron_client=client) except exception.PortNotFound: # NOTE: Here is a potential race condition between _show_port() and # _get_floating_ip_by_address(). fip['port_id'] shows a port which # is the server instance's. At _get_floating_ip_by_address(), # Neutron returns the list which includes the instance. Just after # that, the deletion of the instance happens and Neutron returns # 404 on _show_port(). LOG.debug('The port(%s) is not found', fip['port_id']) return None return port['device_id'] def get_vifs_by_instance(self, context, instance): return objects.VirtualInterfaceList.get_by_instance_uuid(context, instance.uuid) def _get_floating_ip_pool_id_by_name_or_id(self, client, name_or_id): search_opts = {constants.NET_EXTERNAL: True, 'fields': 'id'} if uuidutils.is_uuid_like(name_or_id): search_opts.update({'id': name_or_id}) else: search_opts.update({'name': name_or_id}) data = client.list_networks(**search_opts) nets = data['networks'] if len(nets) == 1: return nets[0]['id'] elif len(nets) == 0: raise exception.FloatingIpPoolNotFound() else: msg = (_("Multiple floating IP pools matches found for name '%s'") % name_or_id) raise exception.NovaException(message=msg) def allocate_floating_ip(self, context, pool=None): """Add a floating IP to a project from a pool.""" client = get_client(context) pool = pool or CONF.neutron.default_floating_pool pool_id = self._get_floating_ip_pool_id_by_name_or_id(client, pool) param = {'floatingip': {'floating_network_id': pool_id}} try: fip = client.create_floatingip(param) except (neutron_client_exc.IpAddressGenerationFailureClient, neutron_client_exc.ExternalIpAddressExhaustedClient) as e: raise exception.NoMoreFloatingIps(str(e)) except neutron_client_exc.OverQuotaClient as e: raise exception.FloatingIpLimitExceeded(str(e)) except neutron_client_exc.BadRequest as e: raise exception.FloatingIpBadRequest(str(e)) return fip['floatingip']['floating_ip_address'] def _safe_get_floating_ips(self, client, **kwargs): """Get floating IP gracefully handling 404 from Neutron.""" try: return client.list_floatingips(**kwargs)['floatingips'] # If a neutron plugin does not implement the L3 API a 404 from # list_floatingips will be raised. except neutron_client_exc.NotFound: return [] except neutron_client_exc.NeutronClientException as e: # bug/1513879 neutron client is currently using # NeutronClientException when there is no L3 API if e.status_code == 404: return [] with excutils.save_and_reraise_exception(): LOG.exception('Unable to access floating IP for %s', ', '.join(['%s %s' % (k, v) for k, v in kwargs.items()])) def _get_floating_ip_by_address(self, client, address): """Get floating IP from floating IP address.""" if not address: raise exception.FloatingIpNotFoundForAddress(address=address) fips = self._safe_get_floating_ips(client, floating_ip_address=address) if len(fips) == 0: raise exception.FloatingIpNotFoundForAddress(address=address) elif len(fips) > 1: raise exception.FloatingIpMultipleFoundForAddress(address=address) return fips[0] def _get_floating_ips_by_fixed_and_port(self, client, fixed_ip, port): """Get floating IPs from fixed IP and port.""" return self._safe_get_floating_ips(client, fixed_ip_address=fixed_ip, port_id=port) def release_floating_ip(self, context, address, affect_auto_assigned=False): """Remove a floating IP with the given address from a project.""" # Note(amotoki): We cannot handle a case where multiple pools # have overlapping IP address range. In this case we cannot use # 'address' as a unique key. # This is a limitation of the current nova. # Note(amotoki): 'affect_auto_assigned' is not respected # since it is not used anywhere in nova code and I could # find why this parameter exists. self._release_floating_ip(context, address) def disassociate_and_release_floating_ip(self, context, instance, floating_ip): """Removes (deallocates) and deletes the floating IP. This api call was added to allow this to be done in one operation if using neutron. """ @refresh_cache def _release_floating_ip_and_refresh_cache(self, context, instance, floating_ip): self._release_floating_ip( context, floating_ip['floating_ip_address'], raise_if_associated=False) if instance: _release_floating_ip_and_refresh_cache(self, context, instance, floating_ip) else: self._release_floating_ip( context, floating_ip['floating_ip_address'], raise_if_associated=False) def _release_floating_ip(self, context, address, raise_if_associated=True): client = get_client(context) fip = self._get_floating_ip_by_address(client, address) if raise_if_associated and fip['port_id']: raise exception.FloatingIpAssociated(address=address) try: client.delete_floatingip(fip['id']) except neutron_client_exc.NotFound: raise exception.FloatingIpNotFoundForAddress( address=address ) @refresh_cache def disassociate_floating_ip(self, context, instance, address, affect_auto_assigned=False): """Disassociate a floating IP from the instance.""" # Note(amotoki): 'affect_auto_assigned' is not respected # since it is not used anywhere in nova code and I could # find why this parameter exists. client = get_client(context) fip = self._get_floating_ip_by_address(client, address) client.update_floatingip(fip['id'], {'floatingip': {'port_id': None}}) def migrate_instance_start(self, context, instance, migration): """Start to migrate the network of an instance. If the instance has port bindings on the destination compute host, they are activated in this method which will atomically change the source compute host port binding to inactive and also change the port "binding:host_id" attribute to the destination host. If there are no binding resources for the attached ports on the given destination host, this method is a no-op. :param context: The user request context. :param instance: The instance being migrated. :param migration: dict with required keys:: "source_compute": The name of the source compute host. "dest_compute": The name of the destination compute host. :raises: nova.exception.PortBindingActivationFailed if any port binding activation fails """ if not self.has_port_binding_extension(context): # If neutron isn't new enough yet for the port "binding-extended" # API extension, we just no-op. The port binding host will be # be updated in migrate_instance_finish, which is functionally OK, # it's just not optimal. LOG.debug('Neutron is not new enough to perform early destination ' 'host port binding activation. Port bindings will be ' 'updated later.', instance=instance) return client = get_client(context, admin=True) dest_host = migration.dest_compute for vif in instance.get_network_info(): # Not all compute migration flows use the port binding-extended # API yet, so first check to see if there is a binding for the # port and destination host. try: binding = client.show_port_binding( vif['id'], dest_host )['binding'] except neutron_client_exc.NeutronClientException as exc: if exc.status_code != 404: # We don't raise an exception here because we assume that # port bindings will be updated correctly when # migrate_instance_finish runs LOG.error( 'Unexpected error trying to get binding info ' 'for port %s and destination host %s.', vif['id'], dest_host, exc_info=True) continue # ...but if there is no port binding record for the destination # host, we can safely assume none of the ports attached to the # instance are using the binding-extended API in this flow and # exit early. return if binding['status'] == 'ACTIVE': # We might be racing with another thread that's handling # post-migrate operations and already activated the port # binding for the destination host. LOG.debug( 'Port %s binding to destination host %s is already ACTIVE', vif['id'], dest_host, instance=instance) continue try: # This is a bit weird in that we don't PUT and update the # status to ACTIVE, it's more like a POST action method in the # compute API. client.activate_port_binding(vif['id'], dest_host) LOG.debug( 'Activated binding for port %s and host %s', vif['id'], dest_host) except neutron_client_exc.NeutronClientException as exc: # A 409 means the port binding is already active, which # shouldn't happen if the caller is doing things in the correct # order. if exc.status_code == 409: LOG.warning( 'Binding for port %s and host %s is already active', vif['id'], dest_host, exc_info=True) continue # Log the details, raise an exception. LOG.error( 'Unexpected error trying to activate binding ' 'for port %s and host %s.', vif['id'], dest_host, exc_info=True) raise exception.PortBindingActivationFailed( port_id=vif['id'], host=dest_host) # TODO(mriedem): Do we need to call # _clear_migration_port_profile? migrate_instance_finish # would normally take care of clearing the "migrating_to" # attribute on each port when updating the port's # binding:host_id to point to the destination host. def migrate_instance_finish( self, context, instance, migration, provider_mappings): """Finish migrating the network of an instance. :param context: nova auth request context :param instance: Instance object being migrated :param migration: Migration object for the operation; used to determine the phase of the migration which dictates what to do with claimed PCI devices for SR-IOV ports :param provider_mappings: a dict of list of resource provider uuids keyed by port uuid """ self._update_port_binding_for_instance( context, instance, migration.dest_compute, migration=migration, provider_mappings=provider_mappings) def _nw_info_get_ips(self, client, port): network_IPs = [] for fixed_ip in port['fixed_ips']: fixed = network_model.FixedIP(address=fixed_ip['ip_address']) floats = self._get_floating_ips_by_fixed_and_port( client, fixed_ip['ip_address'], port['id']) for ip in floats: fip = network_model.IP(address=ip['floating_ip_address'], type='floating') fixed.add_floating_ip(fip) network_IPs.append(fixed) return network_IPs def _nw_info_get_subnets(self, context, port, network_IPs, client=None): subnets = self._get_subnets_from_port(context, port, client) for subnet in subnets: subnet['ips'] = [fixed_ip for fixed_ip in network_IPs if fixed_ip.is_in_subnet(subnet)] return subnets def _nw_info_build_network(self, context, port, networks, subnets): # TODO(stephenfin): Pass in an existing admin client if available. neutron = get_client(context, admin=True) network_name = None network_mtu = None for net in networks: if port['network_id'] == net['id']: network_name = net['name'] tenant_id = net['tenant_id'] network_mtu = net.get('mtu') break else: tenant_id = port['tenant_id'] LOG.warning("Network %(id)s not matched with the tenants " "network! The ports tenant %(tenant_id)s will be " "used.", {'id': port['network_id'], 'tenant_id': tenant_id}) bridge = None ovs_interfaceid = None # Network model metadata should_create_bridge = None vif_type = port.get('binding:vif_type') port_details = port.get('binding:vif_details', {}) if vif_type in [network_model.VIF_TYPE_OVS, network_model.VIF_TYPE_AGILIO_OVS]: bridge = port_details.get(network_model.VIF_DETAILS_BRIDGE_NAME, CONF.neutron.ovs_bridge) ovs_interfaceid = port['id'] elif vif_type == network_model.VIF_TYPE_BRIDGE: bridge = port_details.get(network_model.VIF_DETAILS_BRIDGE_NAME, "brq" + port['network_id']) should_create_bridge = True elif vif_type == network_model.VIF_TYPE_DVS: # The name of the DVS port group will contain the neutron # network id bridge = port['network_id'] elif (vif_type == network_model.VIF_TYPE_VHOSTUSER and port_details.get(network_model.VIF_DETAILS_VHOSTUSER_OVS_PLUG, False)): bridge = port_details.get(network_model.VIF_DETAILS_BRIDGE_NAME, CONF.neutron.ovs_bridge) ovs_interfaceid = port['id'] elif (vif_type == network_model.VIF_TYPE_VHOSTUSER and port_details.get(network_model.VIF_DETAILS_VHOSTUSER_FP_PLUG, False)): bridge = port_details.get(network_model.VIF_DETAILS_BRIDGE_NAME, "brq" + port['network_id']) # Prune the bridge name if necessary. For the DVS this is not done # as the bridge is a '-'. if bridge is not None and vif_type != network_model.VIF_TYPE_DVS: bridge = bridge[:network_model.NIC_NAME_LEN] physnet, tunneled = self._get_physnet_tunneled_info( context, neutron, port['network_id']) network = network_model.Network( id=port['network_id'], bridge=bridge, injected=CONF.flat_injected, label=network_name, tenant_id=tenant_id, mtu=network_mtu, physical_network=physnet, tunneled=tunneled ) network['subnets'] = subnets if should_create_bridge is not None: network['should_create_bridge'] = should_create_bridge return network, ovs_interfaceid def _get_preexisting_port_ids(self, instance): """Retrieve the preexisting ports associated with the given instance. These ports were not created by nova and hence should not be deallocated upon instance deletion. """ net_info = instance.get_network_info() if not net_info: LOG.debug('Instance cache missing network info.', instance=instance) return [vif['id'] for vif in net_info if vif.get('preserve_on_delete')] def _build_vif_model(self, context, client, current_neutron_port, networks, preexisting_port_ids): """Builds a ``nova.network.model.VIF`` object based on the parameters and current state of the port in Neutron. :param context: Request context. :param client: Neutron client. :param current_neutron_port: The current state of a Neutron port from which to build the VIF object model. :param networks: List of dicts which represent Neutron networks associated with the ports currently attached to a given server instance. :param preexisting_port_ids: List of IDs of ports attached to a given server instance which Nova did not create and therefore should not delete when the port is detached from the server. :return: nova.network.model.VIF object which represents a port in the instance network info cache. """ vif_active = False if (current_neutron_port['admin_state_up'] is False or current_neutron_port['status'] == 'ACTIVE'): vif_active = True network_IPs = self._nw_info_get_ips(client, current_neutron_port) subnets = self._nw_info_get_subnets(context, current_neutron_port, network_IPs, client) devname = "tap" + current_neutron_port['id'] devname = devname[:network_model.NIC_NAME_LEN] network, ovs_interfaceid = ( self._nw_info_build_network(context, current_neutron_port, networks, subnets)) preserve_on_delete = (current_neutron_port['id'] in preexisting_port_ids) return network_model.VIF( id=current_neutron_port['id'], address=current_neutron_port['mac_address'], network=network, vnic_type=current_neutron_port.get('binding:vnic_type', network_model.VNIC_TYPE_NORMAL), type=current_neutron_port.get('binding:vif_type'), profile=get_binding_profile(current_neutron_port), details=current_neutron_port.get('binding:vif_details'), ovs_interfaceid=ovs_interfaceid, devname=devname, active=vif_active, preserve_on_delete=preserve_on_delete, delegate_create=True, ) def _log_error_if_vnic_type_changed( self, port_id, old_vnic_type, new_vnic_type, instance ): if old_vnic_type and old_vnic_type != new_vnic_type: LOG.error( 'The vnic_type of the bound port %s has ' 'been changed in neutron from "%s" to ' '"%s". Changing vnic_type of a bound port ' 'is not supported by Nova. To avoid ' 'breaking the connectivity of the instance ' 'please change the port vnic_type back to ' '"%s".', port_id, old_vnic_type, new_vnic_type, old_vnic_type, instance=instance ) def _build_network_info_model(self, context, instance, networks=None, port_ids=None, admin_client=None, preexisting_port_ids=None, refresh_vif_id=None, force_refresh=False): """Return list of ordered VIFs attached to instance. :param context: Request context. :param instance: Instance we are returning network info for. :param networks: List of networks being attached to an instance. If value is None this value will be populated from the existing cached value. :param port_ids: List of port_ids that are being attached to an instance in order of attachment. If value is None this value will be populated from the existing cached value. :param admin_client: A neutron client for the admin context. :param preexisting_port_ids: List of port_ids that nova didn't allocate and there shouldn't be deleted when an instance is de-allocated. Supplied list will be added to the cached list of preexisting port IDs for this instance. :param refresh_vif_id: Optional port ID to refresh within the existing cache rather than the entire cache. This can be triggered via a "network-changed" server external event from Neutron. :param force_refresh: If ``networks`` and ``port_ids`` are both None, by default the instance.info_cache will be used to populate the network info. Pass ``True`` to force collection of ports and networks from neutron directly. """ search_opts = {'tenant_id': instance.project_id, 'device_id': instance.uuid, } if admin_client is None: client = get_client(context, admin=True) else: client = admin_client data = client.list_ports(**search_opts) current_neutron_ports = data.get('ports', []) if preexisting_port_ids is None: preexisting_port_ids = [] preexisting_port_ids = set( preexisting_port_ids + self._get_preexisting_port_ids(instance)) current_neutron_port_map = {} for current_neutron_port in current_neutron_ports: current_neutron_port_map[current_neutron_port['id']] = ( current_neutron_port) # Figure out what kind of operation we're processing. If we're given # a single port to refresh then we try to optimize and update just the # information for that VIF in the existing cache rather than try to # rebuild the entire thing. if refresh_vif_id is not None: # TODO(mriedem): Consider pulling this out into it's own method. nw_info = instance.get_network_info() if nw_info: current_neutron_port = current_neutron_port_map.get( refresh_vif_id) if current_neutron_port: # Get the network for the port. networks = self._get_available_networks( context, instance.project_id, [current_neutron_port['network_id']], client) # Build the VIF model given the latest port information. refreshed_vif = self._build_vif_model( context, client, current_neutron_port, networks, preexisting_port_ids) for index, vif in enumerate(nw_info): if vif['id'] == refresh_vif_id: self._log_error_if_vnic_type_changed( vif['id'], vif['vnic_type'], refreshed_vif['vnic_type'], instance, ) # Update the existing entry. nw_info[index] = refreshed_vif LOG.debug('Updated VIF entry in instance network ' 'info cache for port %s.', refresh_vif_id, instance=instance) break else: # If it wasn't in the existing cache, add it. nw_info.append(refreshed_vif) LOG.debug('Added VIF to instance network info cache ' 'for port %s.', refresh_vif_id, instance=instance) else: # This port is no longer associated with the instance, so # simply remove it from the nw_info cache. for index, vif in enumerate(nw_info): if vif['id'] == refresh_vif_id: LOG.info('Port %s from network info_cache is no ' 'longer associated with instance in ' 'Neutron. Removing from network ' 'info_cache.', refresh_vif_id, instance=instance) del nw_info[index] break return nw_info # else there is no existing cache and we need to build it # Determine if we're doing a full refresh (_heal_instance_info_cache) # or if we are refreshing because we have attached/detached a port. # TODO(mriedem); we should leverage refresh_vif_id in the latter case # since we are unnecessarily rebuilding the entire cache for one port nw_info_refresh = networks is None and port_ids is None if nw_info_refresh and force_refresh: # Use the current set of ports from neutron rather than the cache. port_ids = self._get_ordered_port_list(context, instance, current_neutron_ports) net_ids = [ current_neutron_port_map.get(port_id, {}).get('network_id') for port_id in port_ids] # This is copied from _gather_port_ids_and_networks. networks = self._get_available_networks( context, instance.project_id, net_ids, client) else: # We are refreshing the full cache using the existing cache rather # than what is currently in neutron. networks, port_ids = self._gather_port_ids_and_networks( context, instance, networks, port_ids, client) old_nw_info = instance.get_network_info() nw_info = network_model.NetworkInfo() for port_id in port_ids: current_neutron_port = current_neutron_port_map.get(port_id) if current_neutron_port: vif = self._build_vif_model( context, client, current_neutron_port, networks, preexisting_port_ids) for old_vif in old_nw_info: if old_vif['id'] == port_id: self._log_error_if_vnic_type_changed( port_id, old_vif['vnic_type'], vif['vnic_type'], instance, ) nw_info.append(vif) elif nw_info_refresh: LOG.info('Port %s from network info_cache is no ' 'longer associated with instance in Neutron. ' 'Removing from network info_cache.', port_id, instance=instance) return nw_info def _get_ordered_port_list(self, context, instance, current_neutron_ports): """Returns ordered port list using nova virtual_interface data.""" # a dict, keyed by port UUID, of the port's "index" # so that we can order the returned port UUIDs by the # original insertion order followed by any newly-attached # ports port_uuid_to_index_map = {} port_order_list = [] ports_without_order = [] # Get set of ports from nova vifs vifs = self.get_vifs_by_instance(context, instance) for port in current_neutron_ports: # NOTE(mjozefcz): For each port check if we have its index from # nova virtual_interfaces objects. If not - it seems # to be a new port - add it at the end of list. # Find port index if it was attached before. for vif in vifs: if vif.uuid == port['id']: port_uuid_to_index_map[port['id']] = vif.id break if port['id'] not in port_uuid_to_index_map: # Assume that it's new port and add it to the end of port list. ports_without_order.append(port['id']) # Lets sort created port order_list by given index. port_order_list = sorted(port_uuid_to_index_map, key=lambda k: port_uuid_to_index_map[k]) # Add ports without order to the end of list port_order_list.extend(ports_without_order) return port_order_list def _get_subnets_from_port(self, context, port, client=None): """Return the subnets for a given port.""" fixed_ips = port['fixed_ips'] # No fixed_ips for the port means there is no subnet associated # with the network the port is created on. # Since list_subnets(id=[]) returns all subnets visible for the # current tenant, returned subnets may contain subnets which is not # related to the port. To avoid this, the method returns here. if not fixed_ips: return [] if not client: client = get_client(context) search_opts = {'id': list(set(ip['subnet_id'] for ip in fixed_ips))} data = client.list_subnets(**search_opts) ipam_subnets = data.get('subnets', []) subnets = [] for subnet in ipam_subnets: subnet_dict = {'cidr': subnet['cidr'], 'gateway': network_model.IP( address=subnet['gateway_ip'], type='gateway'), 'enable_dhcp': False, } if subnet.get('ipv6_address_mode'): subnet_dict['ipv6_address_mode'] = subnet['ipv6_address_mode'] # attempt to populate DHCP server field dhcp_search_opts = { 'network_id': subnet['network_id'], 'device_owner': 'network:dhcp'} data = client.list_ports(**dhcp_search_opts) dhcp_ports = data.get('ports', []) for p in dhcp_ports: for ip_pair in p['fixed_ips']: if ip_pair['subnet_id'] == subnet['id']: subnet_dict['dhcp_server'] = ip_pair['ip_address'] break # NOTE(stblatzheim): If enable_dhcp is set on subnet, but subnet # has ovn native dhcp and no dhcp-agents. Network owner will be # network:distributed # Just rely on enable_dhcp flag given by neutron # Fix for https://bugs.launchpad.net/nova/+bug/2055245 if subnet.get('enable_dhcp'): subnet_dict['enable_dhcp'] = True subnet_object = network_model.Subnet(**subnet_dict) for dns in subnet.get('dns_nameservers', []): subnet_object.add_dns( network_model.IP(address=dns, type='dns')) for route in subnet.get('host_routes', []): subnet_object.add_route( network_model.Route(cidr=route['destination'], gateway=network_model.IP( address=route['nexthop'], type='gateway'))) subnets.append(subnet_object) return subnets def setup_instance_network_on_host( self, context, instance, host, migration=None, provider_mappings=None): """Setup network for specified instance on host. :param context: The request context. :param instance: nova.objects.instance.Instance object. :param host: The host which network should be setup for instance. :param migration: The migration object if the instance is being tracked with a migration. :param provider_mappings: a dict of lists of resource provider uuids keyed by port uuid """ self._update_port_binding_for_instance( context, instance, host, migration, provider_mappings) def cleanup_instance_network_on_host(self, context, instance, host): """Cleanup network for specified instance on host. Port bindings for the given host are deleted. The ports associated with the instance via the port device_id field are left intact. :param context: The user request context. :param instance: Instance object with the associated ports :param host: host from which to delete port bindings :raises: PortBindingDeletionFailed if port binding deletion fails. """ # First check to see if the port binding extension is supported. client = get_client(context) if not self.has_port_binding_extension(client=client): LOG.info("Neutron extension '%s' is not supported; not cleaning " "up port bindings for host %s.", constants.PORT_BINDING_EXTENDED, host, instance=instance) return # Now get the ports associated with the instance. We go directly to # neutron rather than rely on the info cache just like # setup_networks_on_host. search_opts = {'device_id': instance.uuid, 'tenant_id': instance.project_id, 'fields': ['id']} # we only need the port id data = self.list_ports(context, **search_opts) self._delete_port_bindings(context, data['ports'], host) def _get_pci_mapping_for_migration(self, instance, migration): if not instance.migration_context: return {} # In case of revert, swap old and new devices to # update the ports back to the original devices. revert = migration and migration.status == 'reverted' return instance.migration_context.get_pci_mapping_for_migration(revert) def _get_port_pci_dev(self, instance, port): """Find the PCI device corresponding to the port. Assumes the port is an SRIOV one. :param instance: The instance to which the port is attached. :param port: The Neutron port, as obtained from the Neutron API JSON form. :return: The PciDevice object, or None if unable to find. """ # Find the port's PCIRequest, or return None for r in instance.pci_requests.requests: if r.requester_id == port['id']: request = r break else: LOG.debug('No PCI request found for port %s', port['id'], instance=instance) return None # Find the request's device, or return None for d in instance.pci_devices: if d.request_id == request.request_id: device = d break else: LOG.debug('No PCI device found for request %s', request.request_id, instance=instance) return None return device def _update_port_binding_for_instance( self, context, instance, host, migration=None, provider_mappings=None): neutron = get_client(context, admin=True) search_opts = {'device_id': instance.uuid, 'tenant_id': instance.project_id} data = neutron.list_ports(**search_opts) port_updates = [] ports = data['ports'] FAILED_VIF_TYPES = (network_model.VIF_TYPE_UNBOUND, network_model.VIF_TYPE_BINDING_FAILED) for p in ports: updates = {} binding_profile = get_binding_profile(p) # We need to update the port binding if the host has changed or if # the binding is clearly wrong due to previous lost messages. vif_type = p.get('binding:vif_type') if (p.get(constants.BINDING_HOST_ID) != host or vif_type in FAILED_VIF_TYPES): updates[constants.BINDING_HOST_ID] = host # If the host changed, the AZ could have also changed so we # need to update the device_owner. updates['device_owner'] = ( 'compute:%s' % instance.availability_zone) # NOTE: Before updating the port binding make sure we # remove the pre-migration status from the binding profile if binding_profile.get(constants.MIGRATING_ATTR): del binding_profile[constants.MIGRATING_ATTR] updates[constants.BINDING_PROFILE] = binding_profile # Update port with newly allocated PCI devices. Even if the # resize is happening on the same host, a new PCI device can be # allocated. Note that this only needs to happen if a migration # is in progress such as in a resize / migrate. It is possible # that this function is called without a migration object, such # as in an unshelve operation. vnic_type = p.get('binding:vnic_type') if vnic_type in network_model.VNIC_TYPES_SRIOV: # NOTE(artom) For migrations, update the binding profile from # the migration object... if migration is not None: # NOTE(artom) ... except for live migrations, because the # conductor has already done that when calling # bind_ports_to_host(). if not migration.is_live_migration: pci_mapping = self._get_pci_mapping_for_migration( instance, migration) pci_slot = binding_profile.get('pci_slot') new_dev = pci_mapping.get(pci_slot) if new_dev: binding_profile.update( self._get_pci_device_profile(new_dev)) updates[ constants.BINDING_PROFILE] = binding_profile else: raise exception.PortUpdateFailed(port_id=p['id'], reason=_("Unable to correlate PCI slot %s") % pci_slot) # NOTE(artom) If migration is None, this is an unshelve, and we # need to figure out the pci related binding information from # the InstancePCIRequest and PciDevice objects. else: pci_dev = self._get_port_pci_dev(instance, p) if pci_dev: binding_profile.update( self._get_pci_device_profile(pci_dev)) updates[constants.BINDING_PROFILE] = binding_profile # NOTE(gibi): during live migration the conductor already sets the # allocation key in the port binding. However during resize, cold # migrate, evacuate and unshelve we have to set the binding here. # Also note that during unshelve no migration object is created. if self._has_resource_request(context, p, neutron) and ( migration is None or not migration.is_live_migration ): if not provider_mappings: # TODO(gibi): Remove this check when compute RPC API is # bumped to 6.0 # NOTE(gibi): This should not happen as the API level # minimum compute service version check ensures that the # compute services already send the RequestSpec during # the move operations between the source and the # destination and the dest compute calculates the # mapping based on that. LOG.warning( "Provider mappings are not available to the compute " "service but are required for ports with a resource " "request. If compute RPC API versions are pinned for " "a rolling upgrade, you will need to retry this " "operation once the RPC version is unpinned and the " "nova-compute services are all upgraded.", instance=instance) raise exception.PortUpdateFailed( port_id=p['id'], reason=_( "Provider mappings are not available to the " "compute service but are required for ports with " "a resource request.")) binding_profile[constants.ALLOCATION] = ( self._get_binding_profile_allocation( context, p, neutron, provider_mappings)) updates[constants.BINDING_PROFILE] = binding_profile port_updates.append((p['id'], updates)) # Avoid rolling back updates if we catch an error above. # TODO(lbeliveau): Batch up the port updates in one neutron call. for port_id, updates in port_updates: if updates: LOG.info("Updating port %(port)s with " "attributes %(attributes)s", {"port": port_id, "attributes": updates}, instance=instance) try: neutron.update_port(port_id, {'port': updates}) except Exception: with excutils.save_and_reraise_exception(): LOG.exception("Unable to update binding details " "for port %s", port_id, instance=instance) def update_instance_vnic_index(self, context, instance, vif, index): """Update instance vnic index. When the 'VNIC index' extension is supported this method will update the vnic index of the instance on the port. An instance may have more than one vnic. :param context: The request context. :param instance: nova.objects.instance.Instance object. :param vif: The VIF in question. :param index: The index on the instance for the VIF. """ neutron = get_client(context) if self.has_vnic_index_extension(client=neutron): port_req_body = {'port': {'vnic_index': index}} try: neutron.update_port(vif['id'], port_req_body) except Exception: with excutils.save_and_reraise_exception(): LOG.exception('Unable to update instance VNIC index ' 'for port %s.', vif['id'], instance=instance) def get_segment_ids_for_network( self, context: nova.context.RequestContext, network_id: str, ) -> ty.List[str]: """Query the segmentation ids for the given network. :param context: The request context. :param network_id: The UUID of the network to be queried. :returns: The list of segment UUIDs of the network or an empty list if either Segment extension isn't enabled in Neutron or if the network isn't configured for routing. """ client = get_client(context, admin=True) if not self.has_segment_extension(client=client): return [] try: # NOTE(sbauza): We can't use list_segments() directly because the # API is borked and returns both segments but also segmentation IDs # of a provider network if any. subnets = client.list_subnets(network_id=network_id, fields='segment_id')['subnets'] except neutron_client_exc.NeutronClientException as e: raise exception.InvalidRoutedNetworkConfiguration( 'Failed to get segment IDs for network %s' % network_id) from e # The segment field of an unconfigured subnet could be None return [subnet['segment_id'] for subnet in subnets if subnet.get('segment_id') is not None] def get_segment_id_for_subnet( self, context: nova.context.RequestContext, subnet_id: str, ) -> ty.Optional[str]: """Query the segmentation id for the given subnet. :param context: The request context. :param subnet_id: The UUID of the subnet to be queried. :returns: The segment UUID of the subnet or None if either Segment extension isn't enabled in Neutron or the provided subnet doesn't have segments (if the related network isn't configured for routing) """ client = get_client(context, admin=True) if not self.has_segment_extension(client=client): return None try: subnet = client.show_subnet(subnet_id)['subnet'] except neutron_client_exc.NeutronClientException as e: raise exception.InvalidRoutedNetworkConfiguration( 'Subnet %s not found' % subnet_id) from e return subnet.get('segment_id') def _ensure_requested_network_ordering(accessor, unordered, preferred): """Sort a list with respect to the preferred network ordering.""" if preferred: unordered.sort(key=lambda i: preferred.index(accessor(i))) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/network/os_vif_util.py0000664000175000017500000004452000000000000020117 0ustar00zuulzuul00000000000000# Copyright 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ''' This module contains code for converting from the original nova.network.model data structure, to the new os-vif based versioned object model os_vif.objects.* ''' from os_vif import objects from oslo_config import cfg from oslo_log import log as logging from nova import exception from nova.i18n import _ from nova.network import model LOG = logging.getLogger(__name__) CONF = cfg.CONF LEGACY_VIFS = { model.VIF_TYPE_DVS, model.VIF_TYPE_IOVISOR, model.VIF_TYPE_802_QBG, model.VIF_TYPE_802_QBH, model.VIF_TYPE_HW_VEB, model.VIF_TYPE_HOSTDEV, model.VIF_TYPE_IB_HOSTDEV, model.VIF_TYPE_MIDONET, model.VIF_TYPE_TAP, model.VIF_TYPE_MACVTAP } def _get_vif_name(vif): """Get a VIF device name :param vif: the nova.network.model.VIF instance Get a string suitable for use as a host OS network device name :returns: a device name """ if vif.get('devname', None) is not None: return vif['devname'] return ('nic' + vif['id'])[:model.NIC_NAME_LEN] def _get_hybrid_bridge_name(vif): """Get a bridge device name :param vif: the nova.network.model.VIF instance Get a string suitable for use as a host OS bridge device name :returns: a bridge name """ return ('qbr' + vif['id'])[:model.NIC_NAME_LEN] def _set_vhostuser_settings(vif, obj): """Set vhostuser socket mode and path :param vif: the nova.network.model.VIF instance :param obj: a os_vif.objects.vif.VIFVHostUser instance :raises: exception.VifDetailsMissingVhostuserSockPath """ obj.mode = vif['details'].get( model.VIF_DETAILS_VHOSTUSER_MODE, 'server') path = vif['details'].get( model.VIF_DETAILS_VHOSTUSER_SOCKET, None) if path: obj.path = path else: raise exception.VifDetailsMissingVhostuserSockPath( vif_id=vif['id']) def nova_to_osvif_instance(instance): """Convert a Nova instance object to an os-vif instance object :param vif: a nova.objects.Instance instance :returns: a os_vif.objects.instance_info.InstanceInfo """ info = objects.instance_info.InstanceInfo( uuid=instance.uuid, name=instance.name) if (instance.obj_attr_is_set("project_id") and instance.project_id is not None): info.project_id = instance.project_id return info def _nova_to_osvif_ip(ip): """Convert Nova IP object into os_vif object :param route: nova.network.model.IP instance :returns: os_vif.objects.fixed_ip.FixedIP instance """ floating_ips = [fip['address'] for fip in ip.get('floating_ips', [])] return objects.fixed_ip.FixedIP( address=ip['address'], floating_ips=floating_ips) def _nova_to_osvif_ips(ips): """Convert Nova IP list into os_vif object :param routes: list of nova.network.model.IP instances :returns: os_vif.objects.fixed_ip.FixedIPList instance """ return objects.fixed_ip.FixedIPList( objects=[_nova_to_osvif_ip(ip) for ip in ips]) def _nova_to_osvif_route(route): """Convert Nova route object into os_vif object :param route: nova.network.model.Route instance :returns: os_vif.objects.route.Route instance """ obj = objects.route.Route( cidr=route['cidr']) if route['interface'] is not None: obj.interface = route['interface'] if (route['gateway'] is not None and route['gateway']['address'] is not None): obj.gateway = route['gateway']['address'] return obj def _nova_to_osvif_routes(routes): """Convert Nova route list into os_vif object :param routes: list of nova.network.model.Route instances :returns: os_vif.objects.route.RouteList instance """ return objects.route.RouteList( objects=[_nova_to_osvif_route(route) for route in routes]) def _nova_to_osvif_subnet(subnet): """Convert Nova subnet object into os_vif object :param subnet: nova.network.model.Subnet instance :returns: os_vif.objects.subnet.Subnet instance """ dnsaddrs = [ip['address'] for ip in subnet['dns']] obj = objects.subnet.Subnet( dns=dnsaddrs, ips=_nova_to_osvif_ips(subnet['ips']), routes=_nova_to_osvif_routes(subnet['routes'])) if subnet['cidr'] is not None: obj.cidr = subnet['cidr'] if (subnet['gateway'] is not None and subnet['gateway']['address'] is not None): obj.gateway = subnet['gateway']['address'] return obj def _nova_to_osvif_subnets(subnets): """Convert Nova subnet list into os_vif object :param subnets: list of nova.network.model.Subnet instances :returns: os_vif.objects.subnet.SubnetList instance """ return objects.subnet.SubnetList( objects=[_nova_to_osvif_subnet(subnet) for subnet in subnets]) def _nova_to_osvif_network(network): """Convert Nova network object into os_vif object :param network: nova.network.model.Network instance :returns: os_vif.objects.network.Network instance """ netobj = objects.network.Network( id=network['id'], bridge_interface=network.get_meta("bridge_interface"), subnets=_nova_to_osvif_subnets(network['subnets'])) if network["bridge"] is not None: netobj.bridge = network['bridge'] if network['label'] is not None: netobj.label = network['label'] if network.get_meta("mtu") is not None: netobj.mtu = network.get_meta("mtu") if network.get_meta("multi_host") is not None: netobj.multi_host = network.get_meta("multi_host") if network.get_meta("should_create_bridge") is not None: netobj.should_provide_bridge = network.get_meta("should_create_bridge") if network.get_meta("should_create_vlan") is not None: netobj.should_provide_vlan = network.get_meta("should_create_vlan") if network.get_meta("vlan") is None: raise exception.NovaException(_("Missing vlan number in %s") % network) netobj.vlan = network.get_meta("vlan") return netobj def _get_vif_instance(vif, cls, plugin, **kwargs): """Instantiate an os-vif VIF instance :param vif: the nova.network.model.VIF instance :param cls: class for a os_vif.objects.vif.VIFBase subclass :returns: a os_vif.objects.vif.VIFBase instance """ return cls( id=vif['id'], address=vif['address'], network=_nova_to_osvif_network(vif['network']), has_traffic_filtering=vif.is_neutron_filtering_enabled(), preserve_on_delete=vif['preserve_on_delete'], active=vif['active'], plugin=plugin, **kwargs) def _set_representor_datapath_offload_settings(vif, obj): """Populate the representor datapath offload metadata in the port profile. This function should only be called if the VIF's ``vnic_type`` is in the VNIC_TYPES_SRIOV list, and the ``port_profile`` field of ``obj`` has been populated. :param vif: the nova.network.model.VIF instance :param obj: an os_vif.objects.vif.VIFBase instance """ datapath_offload = objects.vif.DatapathOffloadRepresentor( representor_name=_get_vif_name(vif), representor_address=vif["profile"]["pci_slot"]) obj.port_profile.datapath_offload = datapath_offload def _get_vnic_direct_vif_instance(vif, port_profile, plugin, set_bridge=True): """Instantiate an os-vif VIF instance for ``vnic_type`` = VNIC_TYPE_DIRECT :param vif: the nova.network.model.VIF instance :param port_profile: an os_vif.objects.vif.VIFPortProfileBase instance :param plugin: the os-vif plugin name :param set_bridge: if True, populate obj.network.bridge :returns: an os_vif.objects.vif.VIFHostDevice instance """ obj = _get_vif_instance( vif, objects.vif.VIFHostDevice, port_profile=port_profile, plugin=plugin, dev_address=vif["profile"]["pci_slot"], dev_type=objects.fields.VIFHostDeviceDevType.ETHERNET ) if set_bridge and vif["network"]["bridge"] is not None: obj.network.bridge = vif["network"]["bridge"] return obj def _get_ovs_representor_port_profile(vif): """Instantiate an os-vif port_profile object. :param vif: the nova.network.model.VIF instance :returns: an os_vif.objects.vif.VIFPortProfileOVSRepresentor instance """ # TODO(jangutter): in accordance with the generic-os-vif-offloads spec, # the datapath offload info is duplicated in both interfaces for Stein. # The port profile should be transitioned to VIFPortProfileOpenVSwitch # during Train. return objects.vif.VIFPortProfileOVSRepresentor( interface_id=vif.get('ovs_interfaceid') or vif['id'], representor_name=_get_vif_name(vif), representor_address=vif["profile"]['pci_slot']) # VIF_TYPE_BRIDGE = 'bridge' def _nova_to_osvif_vif_bridge(vif): obj = _get_vif_instance( vif, objects.vif.VIFBridge, plugin="linux_bridge", vif_name=_get_vif_name(vif)) if vif["network"]["bridge"] is not None: obj.bridge_name = vif["network"]["bridge"] return obj # VIF_TYPE_OVS = 'ovs' def _nova_to_osvif_vif_ovs(vif): vif_name = _get_vif_name(vif) vnic_type = vif.get('vnic_type', model.VNIC_TYPE_NORMAL) profile = objects.vif.VIFPortProfileOpenVSwitch( interface_id=vif.get('ovs_interfaceid') or vif['id'], datapath_type=vif['details'].get( model.VIF_DETAILS_OVS_DATAPATH_TYPE)) if vnic_type in (model.VNIC_TYPE_DIRECT, model.VNIC_TYPE_VDPA): obj = _get_vnic_direct_vif_instance( vif, port_profile=_get_ovs_representor_port_profile(vif), plugin="ovs") _set_representor_datapath_offload_settings(vif, obj) elif vnic_type == model.VNIC_TYPE_REMOTE_MANAGED: # A networking backend is responsible for setting up a # representor in this case so the driver is noop. obj = _get_vif_instance( vif, objects.vif.VIFHostDevice, plugin="noop", vif_name=vif_name, dev_address=vif["profile"]["pci_slot"], dev_type=objects.fields.VIFHostDeviceDevType.ETHERNET) elif vif.is_hybrid_plug_enabled(): obj = _get_vif_instance( vif, objects.vif.VIFBridge, port_profile=profile, plugin="ovs", vif_name=vif_name, bridge_name=_get_hybrid_bridge_name(vif)) else: profile.create_port = vif.get('delegate_create', False) obj = _get_vif_instance( vif, objects.vif.VIFOpenVSwitch, port_profile=profile, plugin="ovs", vif_name=vif_name) if vif["network"]["bridge"] is not None: obj.bridge_name = vif["network"]["bridge"] return obj # VIF_TYPE_AGILIO_OVS = 'agilio_ovs' def _nova_to_osvif_vif_agilio_ovs(vif): vnic_type = vif.get('vnic_type', model.VNIC_TYPE_NORMAL) if vnic_type == model.VNIC_TYPE_DIRECT: obj = _get_vnic_direct_vif_instance( vif, plugin="agilio_ovs", port_profile=_get_ovs_representor_port_profile(vif)) _set_representor_datapath_offload_settings(vif, obj) elif vnic_type == model.VNIC_TYPE_VIRTIO_FORWARDER: obj = _get_vif_instance( vif, objects.vif.VIFVHostUser, port_profile=_get_ovs_representor_port_profile(vif), plugin="agilio_ovs", vif_name=_get_vif_name(vif)) _set_representor_datapath_offload_settings(vif, obj) _set_vhostuser_settings(vif, obj) if vif["network"]["bridge"] is not None: obj.network.bridge = vif["network"]["bridge"] else: LOG.debug("agilio_ovs falling through to ovs %s", vif) obj = _nova_to_osvif_vif_ovs(vif) return obj # VIF_TYPE_VHOST_USER = 'vhostuser' def _nova_to_osvif_vif_vhostuser(vif): if vif['details'].get(model.VIF_DETAILS_VHOSTUSER_FP_PLUG, False): if vif['details'].get(model.VIF_DETAILS_VHOSTUSER_OVS_PLUG, False): profile = objects.vif.VIFPortProfileFPOpenVSwitch( interface_id=vif.get('ovs_interfaceid') or vif['id'], datapath_type=vif['details'].get( model.VIF_DETAILS_OVS_DATAPATH_TYPE)) if vif.is_hybrid_plug_enabled(): profile.bridge_name = _get_hybrid_bridge_name(vif) profile.hybrid_plug = True else: profile.hybrid_plug = False if vif["network"]["bridge"] is not None: profile.bridge_name = vif["network"]["bridge"] else: profile = objects.vif.VIFPortProfileFPBridge() if vif["network"]["bridge"] is not None: profile.bridge_name = vif["network"]["bridge"] obj = _get_vif_instance(vif, objects.vif.VIFVHostUser, plugin="vhostuser_fp", vif_name=_get_vif_name(vif), port_profile=profile) _set_vhostuser_settings(vif, obj) return obj elif vif['details'].get(model.VIF_DETAILS_VHOSTUSER_OVS_PLUG, False): profile = objects.vif.VIFPortProfileOpenVSwitch( interface_id=vif.get('ovs_interfaceid') or vif['id'], datapath_type=vif['details'].get( model.VIF_DETAILS_OVS_DATAPATH_TYPE)) vif_name = ('vhu' + vif['id'])[:model.NIC_NAME_LEN] obj = _get_vif_instance(vif, objects.vif.VIFVHostUser, port_profile=profile, plugin="ovs", vif_name=vif_name) if vif["network"]["bridge"] is not None: obj.bridge_name = vif["network"]["bridge"] _set_vhostuser_settings(vif, obj) return obj elif vif['details'].get(model.VIF_DETAILS_VHOSTUSER_VROUTER_PLUG, False): obj = _get_vif_instance(vif, objects.vif.VIFVHostUser, plugin="contrail_vrouter", vif_name=_get_vif_name(vif)) _set_vhostuser_settings(vif, obj) return obj else: obj = _get_vif_instance(vif, objects.vif.VIFVHostUser, plugin="noop", vif_name=_get_vif_name(vif)) _set_vhostuser_settings(vif, obj) return obj # VIF_TYPE_IVS = 'ivs' def _nova_to_osvif_vif_ivs(vif): if vif.is_hybrid_plug_enabled(): obj = _get_vif_instance( vif, objects.vif.VIFBridge, plugin="ivs", vif_name=_get_vif_name(vif), bridge_name=_get_hybrid_bridge_name(vif)) else: obj = _get_vif_instance( vif, objects.vif.VIFGeneric, plugin="ivs", vif_name=_get_vif_name(vif)) return obj # VIF_TYPE_VROUTER = 'vrouter' def _nova_to_osvif_vif_vrouter(vif): vif_name = _get_vif_name(vif) vnic_type = vif.get('vnic_type', model.VNIC_TYPE_NORMAL) if vnic_type == model.VNIC_TYPE_NORMAL: obj = _get_vif_instance( vif, objects.vif.VIFGeneric, plugin="vrouter", vif_name=vif_name) elif vnic_type == model.VNIC_TYPE_DIRECT: obj = _get_vnic_direct_vif_instance( vif, port_profile=objects.vif.VIFPortProfileBase(), plugin="vrouter", set_bridge=False) _set_representor_datapath_offload_settings(vif, obj) elif vnic_type == model.VNIC_TYPE_VIRTIO_FORWARDER: obj = _get_vif_instance( vif, objects.vif.VIFVHostUser, port_profile=objects.vif.VIFPortProfileBase(), plugin="vrouter", vif_name=vif_name) _set_representor_datapath_offload_settings(vif, obj) _set_vhostuser_settings(vif, obj) else: raise NotImplementedError() return obj def nova_to_osvif_vif(vif): """Convert a Nova VIF model to an os-vif object :param vif: a nova.network.model.VIF instance Attempt to convert a nova VIF instance into an os-vif VIF object, pointing to a suitable plugin. This will return None if there is no os-vif plugin available yet. :returns: a os_vif.objects.vif.VIFBase subclass, or None if not supported with os-vif yet """ LOG.debug("Converting VIF %s", vif) vif_type = vif['type'] if vif_type in LEGACY_VIFS: # We want to explicitly fall back to the legacy path for these VIF # types LOG.debug('No conversion for VIF type %s yet', vif_type) return None if vif_type in {model.VIF_TYPE_BINDING_FAILED, model.VIF_TYPE_UNBOUND}: # These aren't real VIF types. VIF_TYPE_BINDING_FAILED indicates port # binding to a host failed and we are trying to plug the VIFs again, # which will fail because we do not know the actual real VIF type, like # VIF_TYPE_OVS, VIF_TYPE_BRIDGE, etc. VIF_TYPE_UNBOUND, by comparison, # is the default VIF type of a driver when it is not bound to any host, # i.e. we have not set the host ID in the binding driver. This should # also only happen in error cases. # TODO(stephenfin): We probably want a more meaningful log here LOG.debug('No conversion for VIF type %s yet', vif_type) return None if vif_type == model.VIF_TYPE_OVS: vif_obj = _nova_to_osvif_vif_ovs(vif) elif vif_type == model.VIF_TYPE_IVS: vif_obj = _nova_to_osvif_vif_ivs(vif) elif vif_type == model.VIF_TYPE_BRIDGE: vif_obj = _nova_to_osvif_vif_bridge(vif) elif vif_type == model.VIF_TYPE_AGILIO_OVS: vif_obj = _nova_to_osvif_vif_agilio_ovs(vif) elif vif_type == model.VIF_TYPE_VHOSTUSER: vif_obj = _nova_to_osvif_vif_vhostuser(vif) elif vif_type == model.VIF_TYPE_VROUTER: vif_obj = _nova_to_osvif_vif_vrouter(vif) else: raise exception.NovaException('Unsupported VIF type %s' % vif_type) LOG.debug('Converted object %s', vif_obj) return vif_obj ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/network/security_group_api.py0000664000175000017500000006527500000000000021523 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Piston Cloud Computing, Inc. # Copyright 2012 Red Hat, Inc. # Copyright 2013 Nicira, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import urllib import netaddr from neutronclient.common import exceptions as n_exc from neutronclient.neutron import v2_0 as neutronv20 from oslo_log import log as logging from oslo_utils import encodeutils from oslo_utils import excutils from oslo_utils import netutils from oslo_utils import uuidutils from webob import exc from nova import context as nova_context from nova import exception from nova.i18n import _ from nova.network import neutron as neutronapi from nova.objects import security_group as security_group_obj LOG = logging.getLogger(__name__) # NOTE: Neutron client has a max URL length of 8192, so we have # to limit the number of IDs we include in any single search. Really # doesn't seem to be any point in making this a config value. MAX_SEARCH_IDS = 150 def validate_id(id): if not uuidutils.is_uuid_like(id): msg = _("Security group id should be uuid") raise exception.Invalid(msg) return id def validate_name( context: nova_context.RequestContext, name: str): """Validate a security group name and return the corresponding UUID. :param context: The nova request context. :param name: The name of the security group. :raises NoUniqueMatch: If there is no unique match for the provided name. :raises SecurityGroupNotFound: If there's no match for the provided name. :raises NeutronClientException: For all other exceptions. """ neutron = neutronapi.get_client(context) try: return neutronv20.find_resourceid_by_name_or_id( neutron, 'security_group', name, context.project_id) except n_exc.NeutronClientNoUniqueMatch as e: raise exception.NoUniqueMatch(str(e)) except n_exc.NeutronClientException as e: if e.status_code == 404: LOG.debug('Neutron security group %s not found', name) raise exception.SecurityGroupNotFound(str(e)) else: LOG.error('Neutron Error: %s', e) raise e def parse_cidr(cidr): if not cidr: return '0.0.0.0/0' try: cidr = encodeutils.safe_decode(urllib.parse.unquote(cidr)) except Exception: raise exception.InvalidCidr(cidr=cidr) if not netutils.is_valid_cidr(cidr): raise exception.InvalidCidr(cidr=cidr) return cidr def new_group_ingress_rule(grantee_group_id, protocol, from_port, to_port): return _new_ingress_rule( protocol, from_port, to_port, group_id=grantee_group_id) def new_cidr_ingress_rule(grantee_cidr, protocol, from_port, to_port): return _new_ingress_rule( protocol, from_port, to_port, cidr=grantee_cidr) def _new_ingress_rule(ip_protocol, from_port, to_port, group_id=None, cidr=None): values = {} if group_id: values['group_id'] = group_id # Open everything if an explicit port range or type/code are not # specified, but only if a source group was specified. ip_proto_upper = ip_protocol.upper() if ip_protocol else '' if (ip_proto_upper == 'ICMP' and from_port is None and to_port is None): from_port = -1 to_port = -1 elif (ip_proto_upper in ['TCP', 'UDP'] and from_port is None and to_port is None): from_port = 1 to_port = 65535 elif cidr: values['cidr'] = cidr if ip_protocol and from_port is not None and to_port is not None: ip_protocol = str(ip_protocol) try: # Verify integer conversions from_port = int(from_port) to_port = int(to_port) except ValueError: if ip_protocol.upper() == 'ICMP': raise exception.InvalidInput(reason=_("Type and" " Code must be integers for ICMP protocol type")) else: raise exception.InvalidInput(reason=_("To and From ports " "must be integers")) if ip_protocol.upper() not in ['TCP', 'UDP', 'ICMP']: raise exception.InvalidIpProtocol(protocol=ip_protocol) # Verify that from_port must always be less than # or equal to to_port if (ip_protocol.upper() in ['TCP', 'UDP'] and (from_port > to_port)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="Former value cannot" " be greater than the later") # Verify valid TCP, UDP port ranges if (ip_protocol.upper() in ['TCP', 'UDP'] and (from_port < 1 or to_port > 65535)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="Valid %s ports should" " be between 1-65535" % ip_protocol.upper()) # Verify ICMP type and code if (ip_protocol.upper() == "ICMP" and (from_port < -1 or from_port > 255 or to_port < -1 or to_port > 255)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="For ICMP, the" " type:code must be valid") values['protocol'] = ip_protocol values['from_port'] = from_port values['to_port'] = to_port else: # If cidr based filtering, protocol and ports are mandatory if cidr: return None return values def create_security_group_rule(context, security_group, new_rule): if _rule_exists(security_group, new_rule): msg = (_('This rule already exists in group %s') % new_rule['parent_group_id']) raise exception.Invalid(msg) return add_rules(context, new_rule['parent_group_id'], security_group['name'], [new_rule])[0] def _rule_exists(security_group, new_rule): """Indicates whether the specified rule is already defined in the given security group. """ for rule in security_group['rules']: keys = ('group_id', 'cidr', 'from_port', 'to_port', 'protocol') for key in keys: if rule.get(key) != new_rule.get(key): break else: return rule.get('id') or True return False def populate_security_groups(security_groups): """Build and return a SecurityGroupList. :param security_groups: list of requested security group names or uuids :type security_groups: list :returns: nova.objects.security_group.SecurityGroupList """ if not security_groups: # Make sure it's an empty SecurityGroupList and not None return security_group_obj.SecurityGroupList() return security_group_obj.make_secgroup_list(security_groups) def create_security_group(context, name, description): neutron = neutronapi.get_client(context) body = _make_neutron_security_group_dict(name, description) try: security_group = neutron.create_security_group( body).get('security_group') except n_exc.BadRequest as e: raise exception.Invalid(str(e)) except n_exc.NeutronClientException as e: LOG.exception("Neutron Error creating security group %s", name) if e.status_code == 401: # TODO(arosen) Cannot raise generic response from neutron here # as this error code could be related to bad input or over # quota raise exc.HTTPBadRequest() elif e.status_code == 409: raise exception.SecurityGroupLimitExceeded(str(e)) raise e return _convert_to_nova_security_group_format(security_group) def update_security_group(context, security_group, name, description): neutron = neutronapi.get_client(context) body = _make_neutron_security_group_dict(name, description) try: security_group = neutron.update_security_group( security_group['id'], body).get('security_group') except n_exc.NeutronClientException as e: LOG.exception("Neutron Error updating security group %s", name) if e.status_code == 401: # TODO(arosen) Cannot raise generic response from neutron here # as this error code could be related to bad input or over # quota raise exc.HTTPBadRequest() raise e return _convert_to_nova_security_group_format(security_group) def _convert_to_nova_security_group_format(security_group): nova_group = {} nova_group['id'] = security_group['id'] nova_group['description'] = security_group['description'] nova_group['name'] = security_group['name'] nova_group['project_id'] = security_group['tenant_id'] nova_group['rules'] = [] for rule in security_group.get('security_group_rules', []): if rule['direction'] == 'ingress': nova_group['rules'].append( _convert_to_nova_security_group_rule_format(rule)) return nova_group def _convert_to_nova_security_group_rule_format(rule): nova_rule = {} nova_rule['id'] = rule['id'] nova_rule['parent_group_id'] = rule['security_group_id'] nova_rule['protocol'] = rule['protocol'] if (nova_rule['protocol'] and rule.get('port_range_min') is None and rule.get('port_range_max') is None): if rule['protocol'].upper() in ['TCP', 'UDP']: nova_rule['from_port'] = 1 nova_rule['to_port'] = 65535 else: nova_rule['from_port'] = -1 nova_rule['to_port'] = -1 else: nova_rule['from_port'] = rule.get('port_range_min') nova_rule['to_port'] = rule.get('port_range_max') nova_rule['group_id'] = rule['remote_group_id'] nova_rule['cidr'] = parse_cidr(rule.get('remote_ip_prefix')) return nova_rule def get(context, id): neutron = neutronapi.get_client(context) try: group = neutron.show_security_group(id).get('security_group') return _convert_to_nova_security_group_format(group) except n_exc.NeutronClientException as e: if e.status_code == 404: LOG.debug('Neutron security group %s not found', id) raise exception.SecurityGroupNotFound(str(e)) else: LOG.error("Neutron Error: %s", e) raise e def list(context, project, search_opts=None): """Returns list of security group rules owned by tenant.""" neutron = neutronapi.get_client(context) params = {} search_opts = search_opts if search_opts else {} # NOTE(jeffrey4l): list all the security groups when following # conditions are met # * names and ids don't exist. # * it is admin context and all_tenants exist in search_opts. # * project is not specified. list_all_tenants = (context.is_admin and 'all_tenants' in search_opts) # NOTE(jeffrey4l): neutron doesn't have `all-tenants` concept. # All the security group will be returned if the project/tenant # id is not passed. if not list_all_tenants: params['tenant_id'] = project try: security_groups = neutron.list_security_groups(**params).get( 'security_groups') except n_exc.NeutronClientException: with excutils.save_and_reraise_exception(): LOG.exception("Neutron Error getting security groups") converted_rules = [] for security_group in security_groups: converted_rules.append( _convert_to_nova_security_group_format(security_group)) return converted_rules def destroy(context, security_group): """This function deletes a security group.""" neutron = neutronapi.get_client(context) try: neutron.delete_security_group(security_group['id']) except n_exc.NeutronClientException as e: if e.status_code == 404: raise exception.SecurityGroupNotFound(str(e)) elif e.status_code == 409: raise exception.Invalid(str(e)) else: LOG.error("Neutron Error: %s", e) raise e def add_rules(context, id, name, vals): """Add security group rule(s) to security group. Note: the Nova security group API doesn't support adding multiple security group rules at once but the EC2 one does. Therefore, this function is written to support both. Multiple rules are installed to a security group in neutron using bulk support. """ neutron = neutronapi.get_client(context) body = _make_neutron_security_group_rules_list(vals) try: rules = neutron.create_security_group_rule( body).get('security_group_rules') except n_exc.NeutronClientException as e: if e.status_code == 404: LOG.exception("Neutron Error getting security group %s", name) raise exception.SecurityGroupNotFound(str(e)) elif e.status_code == 409: LOG.exception("Neutron Error adding rules to security " "group %s", name) raise exception.SecurityGroupLimitExceeded(str(e)) elif e.status_code == 400: LOG.exception("Neutron Error: %s", e) raise exception.Invalid(str(e)) else: raise e converted_rules = [] for rule in rules: converted_rules.append( _convert_to_nova_security_group_rule_format(rule)) return converted_rules def _make_neutron_security_group_dict(name, description): return {'security_group': {'name': name, 'description': description}} def _make_neutron_security_group_rules_list(rules): new_rules = [] for rule in rules: new_rule = {} # nova only supports ingress rules so all rules are ingress. new_rule['direction'] = "ingress" new_rule['protocol'] = rule.get('protocol') # FIXME(arosen) Nova does not expose ethertype on security group # rules. Therefore, in the case of self referential rules we # should probably assume they want to allow both IPv4 and IPv6. # Unfortunately, this would require adding two rules in neutron. # The reason we do not do this is because when the user using the # nova api wants to remove the rule we'd have to have some way to # know that we should delete both of these rules in neutron. # For now, self referential rules only support IPv4. if not rule.get('cidr'): new_rule['ethertype'] = 'IPv4' else: version = netaddr.IPNetwork(rule.get('cidr')).version new_rule['ethertype'] = 'IPv6' if version == 6 else 'IPv4' new_rule['remote_ip_prefix'] = rule.get('cidr') new_rule['security_group_id'] = rule.get('parent_group_id') new_rule['remote_group_id'] = rule.get('group_id') if 'from_port' in rule and rule['from_port'] != -1: new_rule['port_range_min'] = rule['from_port'] if 'to_port' in rule and rule['to_port'] != -1: new_rule['port_range_max'] = rule['to_port'] new_rules.append(new_rule) return {'security_group_rules': new_rules} def remove_rules(context, security_group, rule_ids): neutron = neutronapi.get_client(context) rule_ids = set(rule_ids) try: # The ec2 api allows one to delete multiple security group rules # at once. Since there is no bulk delete for neutron the best # thing we can do is delete the rules one by one and hope this # works.... :/ for rule_id in range(0, len(rule_ids)): neutron.delete_security_group_rule(rule_ids.pop()) except n_exc.NeutronClientException: with excutils.save_and_reraise_exception(): LOG.exception("Neutron Error unable to delete %s", rule_ids) def get_rule(context, id): neutron = neutronapi.get_client(context) try: rule = neutron.show_security_group_rule( id).get('security_group_rule') except n_exc.NeutronClientException as e: if e.status_code == 404: LOG.debug("Neutron security group rule %s not found", id) raise exception.SecurityGroupNotFound(str(e)) else: LOG.error("Neutron Error: %s", e) raise e return _convert_to_nova_security_group_rule_format(rule) def _get_ports_from_server_list(servers, neutron): """Returns a list of ports used by the servers.""" def _chunk_by_ids(servers, limit): ids = [] for server in servers: ids.append(server['id']) if len(ids) >= limit: yield ids ids = [] if ids: yield ids # Note: Have to split the query up as the search criteria # form part of the URL, which has a fixed max size ports = [] for ids in _chunk_by_ids(servers, MAX_SEARCH_IDS): search_opts = {'device_id': ids} try: ports.extend(neutron.list_ports(**search_opts).get('ports')) except n_exc.PortNotFoundClient: # There could be a race between deleting an instance and # retrieving its port groups from Neutron. In this case # PortNotFoundClient is raised and it can be safely ignored LOG.debug("Port not found for device with id %s", ids) return ports def _get_secgroups_from_port_list(ports, neutron, fields=None): """Returns a dict of security groups keyed by their ids.""" def _chunk_by_ids(sg_ids, limit): sg_id_list = [] for sg_id in sg_ids: sg_id_list.append(sg_id) if len(sg_id_list) >= limit: yield sg_id_list sg_id_list = [] if sg_id_list: yield sg_id_list # Find the set of unique SecGroup IDs to search for sg_ids = set() for port in ports: sg_ids.update(port.get('security_groups', [])) # Note: Have to split the query up as the search criteria # form part of the URL, which has a fixed max size security_groups = {} for sg_id_list in _chunk_by_ids(sg_ids, MAX_SEARCH_IDS): sg_search_opts = {'id': sg_id_list} if fields: sg_search_opts['fields'] = fields search_results = neutron.list_security_groups(**sg_search_opts) for sg in search_results.get('security_groups'): security_groups[sg['id']] = sg return security_groups def get_instances_security_groups_bindings(context, servers, detailed=False): """Returns a dict(instance_id, [security_groups]) to allow obtaining all of the instances and their security groups in one shot. If detailed is False only the security group name is returned. """ neutron = neutronapi.get_client(context) ports = _get_ports_from_server_list(servers, neutron) # If detailed is True, we want all fields from the security groups # including the potentially slow-to-join security_group_rules field. # But if detailed is False, only get the id and name fields since # that's all we'll use below. fields = None if detailed else ['id', 'name'] security_groups = _get_secgroups_from_port_list( ports, neutron, fields=fields) instances_security_group_bindings = {} for port in ports: for port_sg_id in port.get('security_groups', []): # Note: have to check we found port_sg as its possible # the port has an SG that this user doesn't have access to port_sg = security_groups.get(port_sg_id) if port_sg: if detailed: sg_entry = _convert_to_nova_security_group_format( port_sg) instances_security_group_bindings.setdefault( port['device_id'], []).append(sg_entry) else: # name is optional in neutron so if not specified # return id name = port_sg.get('name') if not name: name = port_sg.get('id') sg_entry = {'name': name} instances_security_group_bindings.setdefault( port['device_id'], []).append(sg_entry) return instances_security_group_bindings def get_instance_security_groups(context, instance, detailed=False): """Returns the security groups that are associated with an instance. If detailed is True then it also returns the full details of the security groups associated with an instance, otherwise just the security group name. """ servers = [{'id': instance.uuid}] sg_bindings = get_instances_security_groups_bindings( context, servers, detailed) return sg_bindings.get(instance.uuid, []) def _has_security_group_requirements(port): port_security_enabled = port.get('port_security_enabled', True) has_ip = port.get('fixed_ips') deferred_ip = port.get('ip_allocation') == 'deferred' if has_ip or deferred_ip: return port_security_enabled return False def add_to_instance(context, instance, security_group_name): """Add security group to the instance.""" neutron = neutronapi.get_client(context) try: security_group_id = neutronv20.find_resourceid_by_name_or_id( neutron, 'security_group', security_group_name, context.project_id) except n_exc.NeutronClientNoUniqueMatch as e: raise exception.NoUniqueMatch(str(e)) except n_exc.NeutronClientException as e: if e.status_code == 404: msg = (_("Security group %(name)s is not found for " "project %(project)s") % {'name': security_group_name, 'project': context.project_id}) raise exception.SecurityGroupNotFound(msg) else: raise e params = {'device_id': instance.uuid} try: ports = neutron.list_ports(**params).get('ports') except n_exc.NeutronClientException: with excutils.save_and_reraise_exception(): LOG.exception("Neutron Error:") if not ports: msg = (_("instance_id %s could not be found as device id on" " any ports") % instance.uuid) raise exception.SecurityGroupNotFound(msg) for port in ports: if not _has_security_group_requirements(port): LOG.warning("Cannot add security group %(name)s to " "%(instance)s since the port %(port_id)s " "does not meet security requirements", {'name': security_group_name, 'instance': instance.uuid, 'port_id': port['id']}) raise exception.SecurityGroupCannotBeApplied() if 'security_groups' not in port: port['security_groups'] = [] port['security_groups'].append(security_group_id) updated_port = {'security_groups': port['security_groups']} try: LOG.info("Adding security group %(security_group_id)s to " "port %(port_id)s", {'security_group_id': security_group_id, 'port_id': port['id']}) neutron.update_port(port['id'], {'port': updated_port}) except n_exc.NeutronClientException as e: if e.status_code == 400: raise exception.SecurityGroupCannotBeApplied(str(e)) elif e.status_code == 409: raise exception.SecurityGroupConnectionStateConflict(str(e)) else: raise e except Exception: with excutils.save_and_reraise_exception(): LOG.exception("Neutron Error:") def remove_from_instance(context, instance, security_group_name): """Remove the security group associated with the instance.""" neutron = neutronapi.get_client(context) try: security_group_id = neutronv20.find_resourceid_by_name_or_id( neutron, 'security_group', security_group_name, context.project_id) except n_exc.NeutronClientNoUniqueMatch as e: raise exception.NoUniqueMatch(e) except n_exc.NeutronClientException as e: if e.status_code == 404: msg = (_("Security group %(name)s is not found for " "project %(project)s") % {'name': security_group_name, 'project': context.project_id}) raise exception.SecurityGroupNotFound(msg) else: raise e params = {'device_id': instance.uuid} try: ports = neutron.list_ports(**params).get('ports') except n_exc.NeutronClientException: with excutils.save_and_reraise_exception(): LOG.exception("Neutron Error:") if not ports: msg = (_("instance_id %s could not be found as device id on" " any ports") % instance.uuid) raise exception.SecurityGroupNotFound(msg) found_security_group = False for port in ports: try: port.get('security_groups', []).remove(security_group_id) except ValueError: # When removing a security group from an instance the security # group should be on both ports since it was added this way if # done through the nova api. In case it is not a 404 is only # raised if the security group is not found on any of the # ports on the instance. continue updated_port = {'security_groups': port['security_groups']} try: LOG.info("Removing security group %(security_group_id)s from " "port %(port_id)s", {'security_group_id': security_group_id, 'port_id': port['id']}) neutron.update_port(port['id'], {'port': updated_port}) found_security_group = True except Exception: with excutils.save_and_reraise_exception(): LOG.exception("Neutron Error:") if not found_security_group: msg = (_("Security group %(security_group_name)s not associated " "with the instance %(instance)s") % {'security_group_name': security_group_name, 'instance': instance.uuid}) raise exception.SecurityGroupNotFound(msg) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.3736086 nova-32.0.0/nova/notifications/0000775000175000017500000000000000000000000016376 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/notifications/__init__.py0000664000175000017500000000227600000000000020516 0ustar00zuulzuul00000000000000# Copyright (c) 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Note(gibi): Importing publicly called functions so the caller code does not # need to be changed after we moved these function inside the package # Todo(gibi): remove these imports after legacy notifications using these are # transformed to versioned notifications from nova.notifications.base import audit_period_bounds # noqa from nova.notifications.base import image_meta # noqa from nova.notifications.base import info_from_instance # noqa from nova.notifications.base import send_update # noqa from nova.notifications.base import send_update_with_states # noqa ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/notifications/base.py0000664000175000017500000003536100000000000017672 0ustar00zuulzuul00000000000000# Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Functionality related to notifications common to multiple layers of the system. """ import datetime from keystoneauth1 import exceptions as ks_exc from oslo_log import log from oslo_utils import excutils from oslo_utils import timeutils import nova.conf import nova.context from nova import exception from nova.image import glance from nova.notifications.objects import base as notification_base from nova.notifications.objects import instance as instance_notification from nova.objects import fields from nova import rpc from nova import utils LOG = log.getLogger(__name__) CONF = nova.conf.CONF def send_update(context, old_instance, new_instance, service="compute", host=None): """Send compute.instance.update notification to report any changes occurred in that instance """ if not CONF.notifications.notify_on_state_change: # skip all this if updates are disabled return update_with_state_change = False old_vm_state = old_instance["vm_state"] new_vm_state = new_instance["vm_state"] old_task_state = old_instance["task_state"] new_task_state = new_instance["task_state"] # we should check if we need to send a state change or a regular # notification if old_vm_state != new_vm_state: # yes, the vm state is changing: update_with_state_change = True elif (CONF.notifications.notify_on_state_change == "vm_and_task_state" and old_task_state != new_task_state): # yes, the task state is changing: update_with_state_change = True if update_with_state_change: # send a notification with state changes # value of verify_states need not be True as the check for states is # already done here send_update_with_states(context, new_instance, old_vm_state, new_vm_state, old_task_state, new_task_state, service, host) else: try: old_display_name = None if new_instance["display_name"] != old_instance["display_name"]: old_display_name = old_instance["display_name"] send_instance_update_notification(context, new_instance, service=service, host=host, old_display_name=old_display_name) except exception.InstanceNotFound: LOG.debug('Failed to send instance update notification. The ' 'instance could not be found and was most likely ' 'deleted.', instance=new_instance) except Exception: LOG.exception("Failed to send state update notification", instance=new_instance) def send_update_with_states(context, instance, old_vm_state, new_vm_state, old_task_state, new_task_state, service="compute", host=None, verify_states=False): """Send compute.instance.update notification to report changes if there are any, in the instance """ if not CONF.notifications.notify_on_state_change: # skip all this if updates are disabled return fire_update = True # send update notification by default if verify_states: # check whether we need to send notification related to state changes fire_update = False # do not send notification if the conditions for vm and(or) task state # are not satisfied if old_vm_state != new_vm_state: # yes, the vm state is changing: fire_update = True elif (CONF.notifications.notify_on_state_change == "vm_and_task_state" and old_task_state != new_task_state): # yes, the task state is changing: fire_update = True if fire_update: # send either a state change or a regular notification try: send_instance_update_notification(context, instance, old_vm_state=old_vm_state, old_task_state=old_task_state, new_vm_state=new_vm_state, new_task_state=new_task_state, service=service, host=host) except exception.InstanceNotFound: LOG.debug('Failed to send instance update notification. The ' 'instance could not be found and was most likely ' 'deleted.', instance=instance) except Exception: LOG.exception("Failed to send state update notification", instance=instance) def _compute_states_payload(instance, old_vm_state=None, old_task_state=None, new_vm_state=None, new_task_state=None): # If the states were not specified we assume the current instance # states are the correct information. This is important to do for # both old and new states because otherwise we create some really # confusing notifications like: # # None(None) => Building(none) # # When we really were just continuing to build if new_vm_state is None: new_vm_state = instance["vm_state"] if new_task_state is None: new_task_state = instance["task_state"] if old_vm_state is None: old_vm_state = instance["vm_state"] if old_task_state is None: old_task_state = instance["task_state"] states_payload = { "old_state": old_vm_state, "state": new_vm_state, "old_task_state": old_task_state, "new_task_state": new_task_state, } return states_payload def send_instance_update_notification(context, instance, old_vm_state=None, old_task_state=None, new_vm_state=None, new_task_state=None, service="compute", host=None, old_display_name=None): """Send 'compute.instance.update' notification to inform observers about instance state changes. """ # NOTE(gibi): The image_ref_url is only used in unversioned notifications. # Calling the generate_image_url() could be costly as it calls # the Keystone API. So only do the call if the actual value will be # used. populate_image_ref_url = (CONF.notifications.notification_format in ('both', 'unversioned')) payload = info_from_instance(context, instance, None, populate_image_ref_url=populate_image_ref_url) # determine how we'll report states payload.update( _compute_states_payload( instance, old_vm_state, old_task_state, new_vm_state, new_task_state)) # add audit fields: (audit_start, audit_end) = audit_period_bounds(current_period=True) payload["audit_period_beginning"] = null_safe_isotime(audit_start) payload["audit_period_ending"] = null_safe_isotime(audit_end) # add old display name if it is changed if old_display_name: payload["old_display_name"] = old_display_name rpc.get_notifier(service, host).info(context, 'compute.instance.update', payload) _send_versioned_instance_update(context, instance, payload, host, service) @rpc.if_notifications_enabled def _send_versioned_instance_update(context, instance, payload, host, service): def _map_legacy_service_to_source(legacy_service): if not legacy_service.startswith('nova-'): return 'nova-' + service else: return service state_update = instance_notification.InstanceStateUpdatePayload( old_state=payload.get('old_state'), state=payload.get('state'), old_task_state=payload.get('old_task_state'), new_task_state=payload.get('new_task_state')) audit_period = instance_notification.AuditPeriodPayload( audit_period_beginning=payload.get('audit_period_beginning'), audit_period_ending=payload.get('audit_period_ending')) versioned_payload = instance_notification.InstanceUpdatePayload( context=context, instance=instance, state_update=state_update, audit_period=audit_period, old_display_name=payload.get('old_display_name')) notification = instance_notification.InstanceUpdateNotification( priority=fields.NotificationPriority.INFO, event_type=notification_base.EventType( object='instance', action=fields.NotificationAction.UPDATE), publisher=notification_base.NotificationPublisher( host=host or CONF.host, source=_map_legacy_service_to_source(service)), payload=versioned_payload) notification.emit(context) def audit_period_bounds(current_period=False): """Get the start and end of the relevant audit usage period :param current_period: if True, this will generate a usage for the current usage period; if False, this will generate a usage for the previous audit period. """ begin, end = utils.last_completed_audit_period() if current_period: audit_start = end audit_end = timeutils.utcnow() else: audit_start = begin audit_end = end return (audit_start, audit_end) def image_meta(system_metadata): """Format image metadata for use in notifications from the instance system metadata. """ image_meta = {} for md_key, md_value in system_metadata.items(): if md_key.startswith('image_'): image_meta[md_key[6:]] = md_value return image_meta def null_safe_str(s): return str(s) if s else '' def null_safe_isotime(s): if isinstance(s, datetime.datetime): return utils.strtime(s) else: return str(s) if s else '' def info_from_instance(context, instance, network_info, populate_image_ref_url=False, **kw): """Get detailed instance information for an instance which is common to all notifications. :param:instance: nova.objects.Instance :param:network_info: network_info provided if not None :param:populate_image_ref_url: If True then the full URL of the image of the instance is generated and returned. This, depending on the configuration, might mean a call to Keystone. If false, None value is returned in the dict at the image_ref_url key. """ image_ref_url = None if populate_image_ref_url: try: # NOTE(mriedem): We can eventually drop this when we no longer # support legacy notifications since versioned notifications don't # use this. image_ref_url = glance.API().generate_image_url( instance.image_ref, context) except ks_exc.EndpointNotFound: # We might be running from a periodic task with no auth token and # CONF.glance.api_servers isn't set, so we can't get the image API # endpoint URL from the service catalog, therefore just use the # image id for the URL (yes it's a lie, but it's best effort at # this point). with excutils.save_and_reraise_exception() as exc_ctx: if context.auth_token is None: image_ref_url = instance.image_ref exc_ctx.reraise = False flavor = instance.get_flavor() flavor_name = flavor.get('name', '') instance_flavorid = flavor.get('flavorid', '') instance_info = dict( # Owner properties tenant_id=instance.project_id, user_id=instance.user_id, # Identity properties instance_id=instance.uuid, display_name=instance.display_name, reservation_id=instance.reservation_id, hostname=instance.hostname, # Type properties instance_type=flavor_name, instance_type_id=instance.instance_type_id, instance_flavor_id=instance_flavorid, architecture=instance.architecture, # Capacity properties memory_mb=instance.flavor.memory_mb, disk_gb=instance.flavor.root_gb + instance.flavor.ephemeral_gb, vcpus=instance.flavor.vcpus, # Note(dhellmann): This makes the disk_gb value redundant, but # we are keeping it for backwards-compatibility with existing # users of notifications. root_gb=instance.flavor.root_gb, ephemeral_gb=instance.flavor.ephemeral_gb, # Location properties host=instance.host, node=instance.node, availability_zone=instance.availability_zone, cell_name=null_safe_str(instance.cell_name), # Date properties created_at=str(instance.created_at), # Terminated and Deleted are slightly different (although being # terminated and not deleted is a transient state), so include # both and let the recipient decide which they want to use. terminated_at=null_safe_isotime(instance.get('terminated_at', None)), deleted_at=null_safe_isotime(instance.get('deleted_at', None)), launched_at=null_safe_isotime(instance.get('launched_at', None)), # Image properties image_ref_url=image_ref_url, os_type=instance.os_type, kernel_id=instance.kernel_id, ramdisk_id=instance.ramdisk_id, # Status properties state=instance.vm_state, state_description=null_safe_str(instance.task_state), # NOTE(gibi): It might seems wrong to default the progress to an empty # string but this is how legacy work and this code only used by the # legacy notification so try to keep the compatibility here but also # keep it contained. progress=int(instance.progress) if instance.progress else '', # accessIPs access_ip_v4=instance.access_ip_v4, access_ip_v6=instance.access_ip_v6, ) if network_info is not None: fixed_ips = [] for vif in network_info: for ip in vif.fixed_ips(): ip["label"] = vif["network"]["label"] ip["vif_mac"] = vif["address"] fixed_ips.append(ip) instance_info['fixed_ips'] = fixed_ips # add image metadata image_meta_props = image_meta(instance.system_metadata) instance_info["image_meta"] = image_meta_props # add instance metadata instance_info['metadata'] = instance.metadata instance_info.update(kw) return instance_info ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.3776088 nova-32.0.0/nova/notifications/objects/0000775000175000017500000000000000000000000020027 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/notifications/objects/__init__.py0000664000175000017500000000000000000000000022126 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/notifications/objects/aggregate.py0000664000175000017500000001013200000000000022324 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.notifications.objects import base from nova.objects import base as nova_base from nova.objects import fields @nova_base.NovaObjectRegistry.register_notification class AggregatePayload(base.NotificationPayloadBase): SCHEMA = { 'id': ('aggregate', 'id'), 'uuid': ('aggregate', 'uuid'), 'name': ('aggregate', 'name'), 'hosts': ('aggregate', 'hosts'), 'metadata': ('aggregate', 'metadata'), } # Version 1.0: Initial version # 1.1: Making the id field nullable VERSION = '1.1' fields = { # NOTE(gibi): id is nullable as aggregate.create.start is sent before # the id is generated by the db 'id': fields.IntegerField(nullable=True), 'uuid': fields.UUIDField(nullable=False), 'name': fields.StringField(), 'hosts': fields.ListOfStringsField(nullable=True), 'metadata': fields.DictOfStringsField(nullable=True), } def __init__(self, aggregate): super(AggregatePayload, self).__init__() self.populate_schema(aggregate=aggregate) @base.notification_sample('aggregate-create-start.json') @base.notification_sample('aggregate-create-end.json') @base.notification_sample('aggregate-delete-start.json') @base.notification_sample('aggregate-delete-end.json') @base.notification_sample('aggregate-add_host-start.json') @base.notification_sample('aggregate-add_host-end.json') @base.notification_sample('aggregate-remove_host-start.json') @base.notification_sample('aggregate-remove_host-end.json') @base.notification_sample('aggregate-update_metadata-start.json') @base.notification_sample('aggregate-update_metadata-end.json') @base.notification_sample('aggregate-update_prop-start.json') @base.notification_sample('aggregate-update_prop-end.json') @base.notification_sample('aggregate-cache_images-start.json') @base.notification_sample('aggregate-cache_images-end.json') @nova_base.NovaObjectRegistry.register_notification class AggregateNotification(base.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': fields.ObjectField('AggregatePayload') } @nova_base.NovaObjectRegistry.register_notification class AggregateCachePayload(base.NotificationPayloadBase): SCHEMA = { 'id': ('aggregate', 'id'), 'uuid': ('aggregate', 'uuid'), 'name': ('aggregate', 'name'), } # Version 1.0: Initial version VERSION = '1.0' fields = { 'id': fields.IntegerField(), 'uuid': fields.UUIDField(), 'name': fields.StringField(), # The host that we just worked 'host': fields.StringField(), # The images that were downloaded or are already there 'images_cached': fields.ListOfStringsField(), # The images that are unable to be cached for some reason 'images_failed': fields.ListOfStringsField(), # The N/M progress information for this operation 'index': fields.IntegerField(), 'total': fields.IntegerField(), } def __init__(self, aggregate, host, index, total): super(AggregateCachePayload, self).__init__() self.populate_schema(aggregate=aggregate) self.host = host self.index = index self.total = total @base.notification_sample('aggregate-cache_images-progress.json') @nova_base.NovaObjectRegistry.register_notification class AggregateCacheNotification(base.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': fields.ObjectField('AggregateCachePayload'), } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/notifications/objects/base.py0000664000175000017500000002503100000000000021314 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_utils import excutils from oslo_versionedobjects import exception as ovo_exception from nova import exception from nova.objects import base from nova.objects import fields from nova import rpc LOG = logging.getLogger(__name__) @base.NovaObjectRegistry.register_if(False) class NotificationObject(base.NovaObject): """Base class for every notification related versioned object.""" # Version 1.0: Initial version VERSION = '1.0' def __init__(self, **kwargs): super(NotificationObject, self).__init__(**kwargs) # The notification objects are created on the fly when nova emits the # notification. This causes that every object shows every field as # changed. We don't want to send this meaningless information so we # reset the object after creation. self.obj_reset_changes(recursive=False) @base.NovaObjectRegistry.register_notification class EventType(NotificationObject): # Version 1.0: Initial version # Version 1.1: New valid actions values are added to the # NotificationActionField enum # Version 1.2: DELETE value is added to the NotificationActionField enum # Version 1.3: Set of new values are added to NotificationActionField enum # Version 1.4: Another set of new values are added to # NotificationActionField enum # Version 1.5: Aggregate related values have been added to # NotificationActionField enum # Version 1.6: ADD_FIX_IP replaced with INTERFACE_ATTACH in # NotificationActionField enum # Version 1.7: REMOVE_FIXED_IP replaced with INTERFACE_DETACH in # NotificationActionField enum # Version 1.8: IMPORT value is added to NotificationActionField enum # Version 1.9: ADD_MEMBER value is added to NotificationActionField enum # Version 1.10: UPDATE_METADATA value is added to the # NotificationActionField enum # Version 1.11: LOCK is added to NotificationActionField enum # Version 1.12: UNLOCK is added to NotificationActionField enum # Version 1.13: REBUILD_SCHEDULED value is added to the # NotificationActionField enum # Version 1.14: UPDATE_PROP value is added to the NotificationActionField # enum # Version 1.15: LIVE_MIGRATION_FORCE_COMPLETE is added to the # NotificationActionField enum # Version 1.16: CONNECT is added to NotificationActionField enum # Version 1.17: USAGE is added to NotificationActionField enum # Version 1.18: ComputeTask related values have been added to # NotificationActionField enum # Version 1.19: SELECT_DESTINATIONS is added to the NotificationActionField # enum # Version 1.20: IMAGE_CACHE is added to the NotificationActionField enum # Version 1.21: PROGRESS added to NotificationPhase enum # Version 1.22: SHARE_ATTACH SHARE_DETACH are added to the # NotificationActionField enum VERSION = '1.22' fields = { 'object': fields.StringField(nullable=False), 'action': fields.NotificationActionField(nullable=False), 'phase': fields.NotificationPhaseField(nullable=True), } def __init__(self, object, action, phase=None): super(EventType, self).__init__() self.object = object self.action = action self.phase = phase def to_notification_event_type_field(self): """Serialize the object to the wire format.""" s = '%s.%s' % (self.object, self.action) if self.phase: s += '.%s' % self.phase return s @base.NovaObjectRegistry.register_if(False) class NotificationPayloadBase(NotificationObject): """Base class for the payload of versioned notifications.""" # SCHEMA defines how to populate the payload fields. It is a dictionary # where every key value pair has the following format: # : (, # ) # The is the name where the data will be stored in the # payload object, this field has to be defined as a field of the payload. # The shall refer to name of the parameter passed as # kwarg to the payload's populate_schema() call and this object will be # used as the source of the data. The shall be # a valid field of the passed argument. # The SCHEMA needs to be applied with the populate_schema() call before the # notification can be emitted. # The value of the payload. field will be set by the # . field. The # will not be part of the payload object internal or # external representation. # Payload fields that are not set by the SCHEMA can be filled in the same # way as in any versioned object. SCHEMA = {} # Version 1.0: Initial version VERSION = '1.0' def __init__(self): super(NotificationPayloadBase, self).__init__() self.populated = not self.SCHEMA @rpc.if_notifications_enabled def populate_schema(self, set_none=True, **kwargs): """Populate the object based on the SCHEMA and the source objects :param kwargs: A dict contains the source object at the key defined in the SCHEMA """ for key, (obj, field) in self.SCHEMA.items(): source = kwargs[obj] # trigger lazy-load if possible try: setattr(self, key, getattr(source, field)) # ObjectActionError - not lazy loadable field # NotImplementedError - obj_load_attr() is not even defined # OrphanedObjectError - lazy loadable field but context is None except (exception.ObjectActionError, NotImplementedError, exception.OrphanedObjectError, ovo_exception.OrphanedObjectError): if set_none: # If it is unset or non lazy loadable in the source object # then we cannot do anything else but try to default it # in the payload object we are generating here. # NOTE(gibi): This will fail if the payload field is not # nullable, but that means that either the source object # is not properly initialized or the payload field needs # to be defined as nullable setattr(self, key, None) except Exception: with excutils.save_and_reraise_exception(): LOG.error('Failed trying to populate attribute "%s" ' 'using field: %s', key, field) self.populated = True # the schema population will create changed fields but we don't need # this information in the notification self.obj_reset_changes(recursive=True) @base.NovaObjectRegistry.register_notification class NotificationPublisher(NotificationObject): # Version 1.0: Initial version # 2.0: The binary field has been renamed to source # 2.1: The type of the source field changed from string to enum. # This only needs a minor bump as the enum uses the possible # values of the previous string field # 2.2: New enum for source fields added VERSION = '2.2' # TODO(stephenfin): Remove 'nova-cells' from 'NotificationSourceField' enum # when bumping this object to version 3.0 fields = { 'host': fields.StringField(nullable=False), 'source': fields.NotificationSourceField(nullable=False), } def __init__(self, host, source): super(NotificationPublisher, self).__init__() self.host = host self.source = source @classmethod def from_service_obj(cls, service): source = fields.NotificationSource.get_source_by_binary(service.binary) return cls(host=service.host, source=source) @base.NovaObjectRegistry.register_if(False) class NotificationBase(NotificationObject): """Base class for versioned notifications. Every subclass shall define a 'payload' field. """ # Version 1.0: Initial version VERSION = '1.0' fields = { 'priority': fields.NotificationPriorityField(), 'event_type': fields.ObjectField('EventType'), 'publisher': fields.ObjectField('NotificationPublisher'), } def _emit(self, context, event_type, publisher_id, payload): notifier = rpc.get_versioned_notifier(publisher_id) notify = getattr(notifier, self.priority) notify(context, event_type=event_type, payload=payload) @rpc.if_notifications_enabled def emit(self, context): """Send the notification.""" assert self.payload.populated # Note(gibi): notification payload will be a newly populated object # therefore every field of it will look changed so this does not carry # any extra information so we drop this from the payload. self.payload.obj_reset_changes(recursive=True) self._emit(context, event_type= self.event_type.to_notification_event_type_field(), publisher_id='%s:%s' % (self.publisher.source, self.publisher.host), payload=self.payload.obj_to_primitive()) def notification_sample(sample): """Class decorator to attach the notification sample information to the notification object for documentation generation purposes. :param sample: the path of the sample json file relative to the doc/notification_samples/ directory in the nova repository root. """ def wrap(cls): if not getattr(cls, 'samples', None): cls.samples = [sample] else: cls.samples.append(sample) return cls return wrap ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/notifications/objects/compute_task.py0000664000175000017500000000421000000000000023074 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.notifications.objects import base from nova.notifications.objects import request_spec as reqspec_payload from nova.objects import base as nova_base from nova.objects import fields @nova_base.NovaObjectRegistry.register_notification class ComputeTaskPayload(base.NotificationPayloadBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'instance_uuid': fields.UUIDField(), # There are some cases that request_spec is None. # e.g. Old instances can still have no RequestSpec object # attached to them. 'request_spec': fields.ObjectField('RequestSpecPayload', nullable=True), 'state': fields.InstanceStateField(nullable=True), 'reason': fields.ObjectField('ExceptionPayload') } def __init__(self, instance_uuid, request_spec, state, reason): super(ComputeTaskPayload, self).__init__() self.instance_uuid = instance_uuid self.request_spec = reqspec_payload.RequestSpecPayload( request_spec) if request_spec is not None else None self.state = state self.reason = reason @base.notification_sample('compute_task-build_instances-error.json') @base.notification_sample('compute_task-migrate_server-error.json') @base.notification_sample('compute_task-rebuild_server-error.json') @nova_base.NovaObjectRegistry.register_notification class ComputeTaskNotification(base.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': fields.ObjectField('ComputeTaskPayload') } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/notifications/objects/exception.py0000664000175000017500000000641700000000000022407 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import inspect import traceback as tb from nova.notifications.objects import base from nova.objects import base as nova_base from nova.objects import fields @nova_base.NovaObjectRegistry.register_notification class ExceptionPayload(base.NotificationPayloadBase): # Version 1.0: Initial version # Version 1.1: Add traceback field to ExceptionPayload VERSION = '1.1' fields = { 'module_name': fields.StringField(), 'function_name': fields.StringField(), 'exception': fields.StringField(), 'exception_message': fields.StringField(), 'traceback': fields.StringField() } def __init__(self, module_name, function_name, exception, exception_message, traceback): super(ExceptionPayload, self).__init__() self.module_name = module_name self.function_name = function_name self.exception = exception self.exception_message = exception_message self.traceback = traceback @classmethod def from_exception(cls, fault: Exception): traceback = fault.__traceback__ # NOTE(stephenfin): inspect.trace() will only return something if we're # inside the scope of an exception handler. If we are not, we fallback # to extracting information from the traceback. This is lossy, since # the stack stops at the exception handler, not the exception raise. # Check the inspect docs for more information. # # https://docs.python.org/3/library/inspect.html#types-and-members trace = inspect.trace() if trace: module = inspect.getmodule(trace[-1][0]) function_name = trace[-1][3] else: module = inspect.getmodule(traceback) function_name = traceback.tb_frame.f_code.co_name module_name = module.__name__ if module else 'unknown' # TODO(gibi): apply strutils.mask_password on exception_message and # consider emitting the exception_message only if the safe flag is # true in the exception like in the REST API return cls( function_name=function_name, module_name=module_name, exception=fault.__class__.__name__, exception_message=str(fault), # NOTE(stephenfin): the first argument to format_exception is # ignored since Python 3.5 traceback=','.join(tb.format_exception(None, fault, traceback)), ) @base.notification_sample('compute-exception.json') @nova_base.NovaObjectRegistry.register_notification class ExceptionNotification(base.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': fields.ObjectField('ExceptionPayload') } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/notifications/objects/flavor.py0000664000175000017500000000677700000000000021713 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.notifications.objects import base from nova.objects import base as nova_base from nova.objects import fields @base.notification_sample('flavor-create.json') @base.notification_sample('flavor-update.json') @base.notification_sample('flavor-delete.json') @nova_base.NovaObjectRegistry.register_notification class FlavorNotification(base.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': fields.ObjectField('FlavorPayload') } @nova_base.NovaObjectRegistry.register_notification class FlavorPayload(base.NotificationPayloadBase): # Version 1.0: Initial version # Version 1.1: Add other fields for Flavor # Version 1.2: Add extra_specs and projects fields # Version 1.3: Make projects and extra_specs field nullable as they are # not always available when a notification is emitted. # Version 1.4: Added description field. VERSION = '1.4' # NOTE: if we'd want to rename some fields(memory_mb->ram, root_gb->disk, # ephemeral_gb: ephemeral), bumping to payload version 2.0 will be needed. SCHEMA = { 'flavorid': ('flavor', 'flavorid'), 'memory_mb': ('flavor', 'memory_mb'), 'vcpus': ('flavor', 'vcpus'), 'root_gb': ('flavor', 'root_gb'), 'ephemeral_gb': ('flavor', 'ephemeral_gb'), 'name': ('flavor', 'name'), 'swap': ('flavor', 'swap'), 'rxtx_factor': ('flavor', 'rxtx_factor'), 'vcpu_weight': ('flavor', 'vcpu_weight'), 'disabled': ('flavor', 'disabled'), 'is_public': ('flavor', 'is_public'), 'extra_specs': ('flavor', 'extra_specs'), 'projects': ('flavor', 'projects'), 'description': ('flavor', 'description') } fields = { 'flavorid': fields.StringField(nullable=True), 'memory_mb': fields.IntegerField(nullable=True), 'vcpus': fields.IntegerField(nullable=True), 'root_gb': fields.IntegerField(nullable=True), 'ephemeral_gb': fields.IntegerField(nullable=True), 'name': fields.StringField(), 'swap': fields.IntegerField(), 'rxtx_factor': fields.FloatField(nullable=True), 'vcpu_weight': fields.IntegerField(nullable=True), 'disabled': fields.BooleanField(), 'is_public': fields.BooleanField(), 'extra_specs': fields.DictOfStringsField(nullable=True), 'projects': fields.ListOfStringsField(nullable=True), 'description': fields.StringField(nullable=True) } def __init__(self, flavor): super(FlavorPayload, self).__init__() if 'projects' not in flavor: # NOTE(danms): If projects is not loaded in the flavor, # don't attempt to load it. If we're in a child cell then # we can't load the real flavor, and if we're a flavor on # an instance then we don't want to anyway. flavor = flavor.obj_clone() flavor._context = None self.populate_schema(flavor=flavor) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/notifications/objects/image.py0000664000175000017500000001531400000000000021467 0ustar00zuulzuul00000000000000# Copyright 2018 NTT Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.notifications.objects import base from nova.objects import base as nova_base from nova.objects import fields from nova.objects import image_meta @nova_base.NovaObjectRegistry.register_notification class ImageMetaPayload(base.NotificationPayloadBase): # Version 1.0: Initial version VERSION = '1.0' SCHEMA = { 'id': ('image_meta', 'id'), 'name': ('image_meta', 'name'), 'status': ('image_meta', 'status'), 'visibility': ('image_meta', 'visibility'), 'protected': ('image_meta', 'protected'), 'checksum': ('image_meta', 'checksum'), 'owner': ('image_meta', 'owner'), 'size': ('image_meta', 'size'), 'virtual_size': ('image_meta', 'virtual_size'), 'container_format': ('image_meta', 'container_format'), 'disk_format': ('image_meta', 'disk_format'), 'created_at': ('image_meta', 'created_at'), 'updated_at': ('image_meta', 'updated_at'), 'tags': ('image_meta', 'tags'), 'direct_url': ('image_meta', 'direct_url'), 'min_ram': ('image_meta', 'min_ram'), 'min_disk': ('image_meta', 'min_disk') } # NOTE(takashin): The reason that each field is nullable is as follows. # # a. It is defined as "The value might be null (JSON null data type)." # in the "Show image" API (GET /v2/images/{image_id}) # in the glance API v2 Reference. # (https://docs.openstack.org/api-ref/image/v2/index.html) # # * checksum # * container_format # * disk_format # * min_disk # * min_ram # * name # * owner # * size # * updated_at # * virtual_size # # b. It is optional in the response from glance. # * direct_url # # a. It is defined as nullable in the ImageMeta object. # * created_at # # c. It cannot be got in the boot from volume case. # See VIM_IMAGE_ATTRIBUTES in nova/block_device.py. # # * id (not 'image_id') # * visibility # * protected # * status # * tags fields = { 'id': fields.UUIDField(nullable=True), 'name': fields.StringField(nullable=True), 'status': fields.StringField(nullable=True), 'visibility': fields.StringField(nullable=True), 'protected': fields.FlexibleBooleanField(nullable=True), 'checksum': fields.StringField(nullable=True), 'owner': fields.StringField(nullable=True), 'size': fields.IntegerField(nullable=True), 'virtual_size': fields.IntegerField(nullable=True), 'container_format': fields.StringField(nullable=True), 'disk_format': fields.StringField(nullable=True), 'created_at': fields.DateTimeField(nullable=True), 'updated_at': fields.DateTimeField(nullable=True), 'tags': fields.ListOfStringsField(nullable=True), 'direct_url': fields.StringField(nullable=True), 'min_ram': fields.IntegerField(nullable=True), 'min_disk': fields.IntegerField(nullable=True), 'properties': fields.ObjectField('ImageMetaPropsPayload') } def __init__(self, image_meta): super(ImageMetaPayload, self).__init__() self.properties = ImageMetaPropsPayload( image_meta_props=image_meta.properties) self.populate_schema(image_meta=image_meta) @nova_base.NovaObjectRegistry.register_notification class ImageMetaPropsPayload(base.NotificationPayloadBase): """Built dynamically from ImageMetaProps. This has the following implications: * When you make a versioned update to ImageMetaProps, you must *also* bump the version of this object, even though you didn't make any explicit changes here. There's an object hash test that should catch this for you. * As currently written, this relies on all of the fields of ImageMetaProps being initialized with no arguments. If you add one with arguments (e.g. ``nullable=True`` or with a ``default``), something needs to change here. """ # Version 1.0: Initial version # Version 1.1: Added 'gop', 'virtio' and 'none' to hw_video_model field # Version 1.2: Added hw_pci_numa_affinity_policy field # Version 1.3: Added hw_mem_encryption, hw_pmu and hw_time_hpet fields # Version 1.4: Added 'mixed' to hw_cpu_policy field # Version 1.5: Added 'hw_tpm_model' and 'hw_tpm_version' fields # Version 1.6: Added 'socket' to hw_pci_numa_affinity_policy # Version 1.7: Added 'hw_input_bus' field # Version 1.8: Added 'bochs' as an option to 'hw_video_model' # Version 1.9: Added 'hw_emulation_architecture' field # Version 1.10: Added 'hw_ephemeral_encryption' and # 'hw_ephemeral_encryption_format' fields # Version 1.11: Added 'hw_locked_memory' field # Version 1.12: Added 'hw_viommu_model' field # Version 1.13: Added 'hw_virtio_packed_ring' field # Version 1.14: Added 'hw_firmware_stateless' field # Version 1.15: Added igb value to 'hw_vif_model' enum # Version 1.16: Added 'hw_sound_model' field # Version 1.17: Added 'hw_usb_model' and 'hw_redirected_usb_ports' fields # Version 1.18: Added 'hw_mem_encryption_model' field VERSION = '1.18' # NOTE(efried): This logic currently relies on all of the fields of # ImageMetaProps being initialized with no arguments. See the docstring. # NOTE(efried): It's possible this could just be: # fields = image_meta.ImageMetaProps.fields # But it is not clear that OVO can tolerate the same *instance* of a type # class being used in more than one place. fields = { k: v.__class__() for k, v in image_meta.ImageMetaProps.fields.items() if k not in ('hw_ephemeral_encryption_secret_uuid',)} SCHEMA = { k: ('image_meta_props', k) for k in fields} def __init__(self, image_meta_props): super(ImageMetaPropsPayload, self).__init__() # NOTE(takashin): If fields are not set in the ImageMetaProps object, # it will not set the fields in the ImageMetaPropsPayload # in order to avoid too many fields whose values are None. self.populate_schema(set_none=False, image_meta_props=image_meta_props) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/notifications/objects/instance.py0000664000175000017500000010031200000000000022202 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import nova.conf from nova.notifications.objects import base from nova.notifications.objects import flavor as flavor_payload from nova.notifications.objects import keypair as keypair_payload from nova.objects import base as nova_base from nova.objects import fields CONF = nova.conf.CONF @nova_base.NovaObjectRegistry.register_notification class InstancePayload(base.NotificationPayloadBase): SCHEMA = { 'uuid': ('instance', 'uuid'), 'user_id': ('instance', 'user_id'), 'tenant_id': ('instance', 'project_id'), 'reservation_id': ('instance', 'reservation_id'), 'display_name': ('instance', 'display_name'), 'display_description': ('instance', 'display_description'), 'host_name': ('instance', 'hostname'), 'host': ('instance', 'host'), 'node': ('instance', 'node'), 'os_type': ('instance', 'os_type'), 'architecture': ('instance', 'architecture'), 'availability_zone': ('instance', 'availability_zone'), 'image_uuid': ('instance', 'image_ref'), 'key_name': ('instance', 'key_name'), 'kernel_id': ('instance', 'kernel_id'), 'ramdisk_id': ('instance', 'ramdisk_id'), 'created_at': ('instance', 'created_at'), 'launched_at': ('instance', 'launched_at'), 'terminated_at': ('instance', 'terminated_at'), 'deleted_at': ('instance', 'deleted_at'), 'updated_at': ('instance', 'updated_at'), 'state': ('instance', 'vm_state'), 'power_state': ('instance', 'power_state'), 'task_state': ('instance', 'task_state'), 'progress': ('instance', 'progress'), 'metadata': ('instance', 'metadata'), 'locked': ('instance', 'locked'), 'auto_disk_config': ('instance', 'auto_disk_config') } # Version 1.0: Initial version # Version 1.1: add locked and display_description field # Version 1.2: Add auto_disk_config field # Version 1.3: Add key_name field # Version 1.4: Add BDM related data # Version 1.5: Add updated_at field # Version 1.6: Add request_id field # Version 1.7: Added action_initiator_user and action_initiator_project to # InstancePayload # Version 1.8: Added locked_reason field # Version 1.9: Add shares related data VERSION = '1.9' fields = { 'uuid': fields.UUIDField(), 'user_id': fields.StringField(nullable=True), 'tenant_id': fields.StringField(nullable=True), 'reservation_id': fields.StringField(nullable=True), 'display_name': fields.StringField(nullable=True), 'display_description': fields.StringField(nullable=True), 'host_name': fields.StringField(nullable=True), 'host': fields.StringField(nullable=True), 'node': fields.StringField(nullable=True), 'os_type': fields.StringField(nullable=True), 'architecture': fields.StringField(nullable=True), 'availability_zone': fields.StringField(nullable=True), 'flavor': fields.ObjectField('FlavorPayload'), 'image_uuid': fields.StringField(nullable=True), 'key_name': fields.StringField(nullable=True), 'kernel_id': fields.StringField(nullable=True), 'ramdisk_id': fields.StringField(nullable=True), 'created_at': fields.DateTimeField(nullable=True), 'launched_at': fields.DateTimeField(nullable=True), 'terminated_at': fields.DateTimeField(nullable=True), 'deleted_at': fields.DateTimeField(nullable=True), 'updated_at': fields.DateTimeField(nullable=True), 'state': fields.InstanceStateField(nullable=True), 'power_state': fields.InstancePowerStateField(nullable=True), 'task_state': fields.InstanceTaskStateField(nullable=True), 'progress': fields.IntegerField(nullable=True), 'ip_addresses': fields.ListOfObjectsField('IpPayload'), 'block_devices': fields.ListOfObjectsField('BlockDevicePayload', nullable=True), 'metadata': fields.DictOfStringsField(), 'locked': fields.BooleanField(), 'auto_disk_config': fields.DiskConfigField(), 'request_id': fields.StringField(nullable=True), 'action_initiator_user': fields.StringField(nullable=True), 'action_initiator_project': fields.StringField(nullable=True), 'locked_reason': fields.StringField(nullable=True), 'shares': fields.ListOfObjectsField('SharePayload', nullable=True), } def __init__(self, context, instance, bdms=None): super(InstancePayload, self).__init__() network_info = instance.get_network_info() self.ip_addresses = IpPayload.from_network_info(network_info) self.flavor = flavor_payload.FlavorPayload(flavor=instance.flavor) if bdms is not None: self.block_devices = BlockDevicePayload.from_bdms(bdms) else: self.block_devices = BlockDevicePayload.from_instance(instance) self.shares = SharePayload.from_instance(instance) # NOTE(Kevin_Zheng): Don't include request_id for periodic tasks, # RequestContext for periodic tasks does not include project_id # and user_id. Consider modify this once periodic tasks got a # consistent request_id. self.request_id = context.request_id if (context.project_id and context.user_id) else None self.action_initiator_user = context.user_id self.action_initiator_project = context.project_id self.locked_reason = instance.system_metadata.get("locked_reason") self.populate_schema(instance=instance) @nova_base.NovaObjectRegistry.register_notification class InstanceActionPayload(InstancePayload): # No SCHEMA as all the additional fields are calculated # Version 1.1: locked and display_description added to InstancePayload # Version 1.2: Added auto_disk_config field to InstancePayload # Version 1.3: Added key_name field to InstancePayload # Version 1.4: Add BDM related data # Version 1.5: Added updated_at field to InstancePayload # Version 1.6: Added request_id field to InstancePayload # Version 1.7: Added action_initiator_user and action_initiator_project to # InstancePayload # Version 1.8: Added locked_reason field to InstancePayload # Version 1.9: Add shares related data VERSION = '1.9' fields = { 'fault': fields.ObjectField('ExceptionPayload', nullable=True), 'request_id': fields.StringField(nullable=True), } def __init__(self, context, instance, fault, bdms=None): super(InstanceActionPayload, self).__init__(context=context, instance=instance, bdms=bdms) self.fault = fault @nova_base.NovaObjectRegistry.register_notification class InstanceActionVolumePayload(InstanceActionPayload): # Version 1.0: Initial version # Version 1.1: Added key_name field to InstancePayload # Version 1.2: Add BDM related data # Version 1.3: Added updated_at field to InstancePayload # Version 1.4: Added request_id field to InstancePayload # Version 1.5: Added action_initiator_user and action_initiator_project to # InstancePayload # Version 1.6: Added locked_reason field to InstancePayload # Version 1.7: Add shares to InstancePayload VERSION = '1.7' fields = { 'volume_id': fields.UUIDField() } def __init__(self, context, instance, fault, volume_id): super(InstanceActionVolumePayload, self).__init__( context=context, instance=instance, fault=fault) self.volume_id = volume_id @nova_base.NovaObjectRegistry.register_notification class InstanceActionSharePayload(InstanceActionPayload): # Version 1.0: Initial version VERSION = '1.0' fields = { 'share_id': fields.UUIDField(), } def __init__(self, context, instance, fault, share_id): super(InstanceActionSharePayload, self).__init__( context=context, instance=instance, fault=fault) self.share_id = share_id @nova_base.NovaObjectRegistry.register_notification class InstanceActionVolumeSwapPayload(InstanceActionPayload): # No SCHEMA as all the additional fields are calculated # Version 1.1: locked and display_description added to InstancePayload # Version 1.2: Added auto_disk_config field to InstancePayload # Version 1.3: Added key_name field to InstancePayload # Version 1.4: Add BDM related data # Version 1.5: Added updated_at field to InstancePayload # Version 1.6: Added request_id field to InstancePayload # Version 1.7: Added action_initiator_user and action_initiator_project to # InstancePayload # Version 1.8: Added locked_reason field to InstancePayload # Version 1.9: Add shares to InstancePayload VERSION = '1.9' fields = { 'old_volume_id': fields.UUIDField(), 'new_volume_id': fields.UUIDField(), } def __init__(self, context, instance, fault, old_volume_id, new_volume_id): super(InstanceActionVolumeSwapPayload, self).__init__( context=context, instance=instance, fault=fault) self.old_volume_id = old_volume_id self.new_volume_id = new_volume_id @nova_base.NovaObjectRegistry.register_notification class InstanceCreatePayload(InstanceActionPayload): # No SCHEMA as all the additional fields are calculated # Version 1.2: Initial version. It starts at 1.2 to match with the version # of the InstanceActionPayload at the time when this specific # payload is created as a child of it so that the # instance.create notification using this new payload does not # have decreasing version. # 1.3: Add keypairs field # 1.4: Add key_name field to InstancePayload # 1.5: Add BDM related data to InstancePayload # 1.6: Add tags field to InstanceCreatePayload # 1.7: Added updated_at field to InstancePayload # 1.8: Added request_id field to InstancePayload # 1.9: Add trusted_image_certificates field to # InstanceCreatePayload # 1.10: Added action_initiator_user and action_initiator_project to # InstancePayload # 1.11: Added instance_name to InstanceCreatePayload # Version 1.12: Added locked_reason field to InstancePayload # Version 1.13: Add shares to InstancePayload VERSION = '1.13' fields = { 'keypairs': fields.ListOfObjectsField('KeypairPayload'), 'tags': fields.ListOfStringsField(), 'trusted_image_certificates': fields.ListOfStringsField( nullable=True), 'instance_name': fields.StringField(nullable=True), } def __init__(self, context, instance, fault, bdms): super(InstanceCreatePayload, self).__init__( context=context, instance=instance, fault=fault, bdms=bdms) self.keypairs = [keypair_payload.KeypairPayload(keypair=keypair) for keypair in instance.keypairs] self.tags = [instance_tag.tag for instance_tag in instance.tags] self.trusted_image_certificates = None if instance.trusted_certs: self.trusted_image_certificates = instance.trusted_certs.ids self.instance_name = instance.name @nova_base.NovaObjectRegistry.register_notification class InstanceActionResizePrepPayload(InstanceActionPayload): # No SCHEMA as all the additional fields are calculated # Version 1.0: Initial version # Version 1.1: Added request_id field to InstancePayload # Version 1.2: Added action_initiator_user and action_initiator_project to # InstancePayload # Version 1.3: Added locked_reason field to InstancePayload # Version 1.4: Add shares to InstancePayload VERSION = '1.4' fields = { 'new_flavor': fields.ObjectField('FlavorPayload', nullable=True) } def __init__(self, context, instance, fault, new_flavor): super(InstanceActionResizePrepPayload, self).__init__( context=context, instance=instance, fault=fault) self.new_flavor = new_flavor @nova_base.NovaObjectRegistry.register_notification class InstanceUpdatePayload(InstancePayload): # Version 1.0: Initial version # Version 1.1: locked and display_description added to InstancePayload # Version 1.2: Added tags field # Version 1.3: Added auto_disk_config field to InstancePayload # Version 1.4: Added key_name field to InstancePayload # Version 1.5: Add BDM related data # Version 1.6: Added updated_at field to InstancePayload # Version 1.7: Added request_id field to InstancePayload # Version 1.8: Added action_initiator_user and action_initiator_project to # InstancePayload # Version 1.9: Added locked_reason field to InstancePayload # Version 2.0: Remove bandwidth field # Version 2.1: Add shares to InstancePayload VERSION = '2.1' fields = { 'state_update': fields.ObjectField('InstanceStateUpdatePayload'), 'audit_period': fields.ObjectField('AuditPeriodPayload'), 'old_display_name': fields.StringField(nullable=True), 'tags': fields.ListOfStringsField(), } def __init__( self, context, instance, state_update, audit_period, old_display_name, ): super().__init__(context=context, instance=instance) self.state_update = state_update self.audit_period = audit_period self.old_display_name = old_display_name self.tags = [instance_tag.tag for instance_tag in instance.tags.objects] @nova_base.NovaObjectRegistry.register_notification class InstanceActionRescuePayload(InstanceActionPayload): # Version 1.0: Initial version # Version 1.1: Added request_id field to InstancePayload # Version 1.2: Added action_initiator_user and action_initiator_project to # InstancePayload # Version 1.3: Added locked_reason field to InstancePayload # Version 1.4: Add shares to InstancePayload VERSION = '1.4' fields = { 'rescue_image_ref': fields.UUIDField(nullable=True) } def __init__(self, context, instance, fault, rescue_image_ref): super(InstanceActionRescuePayload, self).__init__( context=context, instance=instance, fault=fault) self.rescue_image_ref = rescue_image_ref @nova_base.NovaObjectRegistry.register_notification class InstanceActionRebuildPayload(InstanceActionPayload): # No SCHEMA as all the additional fields are calculated # Version 1.7: Initial version. It starts at 1.7 to equal one more than # the version of the InstanceActionPayload at the time # when this specific payload is created so that the # instance.rebuild.* notifications using this new payload # signal the change of nova_object.name. # Version 1.8: Added action_initiator_user and action_initiator_project to # InstancePayload # Version 1.9: Added locked_reason field to InstancePayload # Version 1.10: Add shares to InstancePayload VERSION = '1.10' fields = { 'trusted_image_certificates': fields.ListOfStringsField( nullable=True) } def __init__(self, context, instance, fault, bdms=None): super(InstanceActionRebuildPayload, self).__init__( context=context, instance=instance, fault=fault, bdms=bdms) self.trusted_image_certificates = None if instance.trusted_certs: self.trusted_image_certificates = instance.trusted_certs.ids @nova_base.NovaObjectRegistry.register_notification class IpPayload(base.NotificationPayloadBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'label': fields.StringField(), 'mac': fields.MACAddressField(), 'meta': fields.DictOfStringsField(), 'port_uuid': fields.UUIDField(nullable=True), 'version': fields.IntegerField(), 'address': fields.IPV4AndV6AddressField(), 'device_name': fields.StringField(nullable=True) } def __init__(self, label, mac, meta, port_uuid, version, address, device_name): super(IpPayload, self).__init__() self.label = label self.mac = mac self.meta = meta self.port_uuid = port_uuid self.version = version self.address = address self.device_name = device_name @classmethod def from_network_info(cls, network_info): """Returns a list of IpPayload object based on the passed network_info. """ ips = [] if network_info is not None: for vif in network_info: for ip in vif.fixed_ips(): ips.append(cls( label=vif["network"]["label"], mac=vif["address"], meta=vif["meta"], port_uuid=vif["id"], version=ip["version"], address=ip["address"], device_name=vif["devname"])) return ips @nova_base.NovaObjectRegistry.register_notification class AuditPeriodPayload(base.NotificationPayloadBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'audit_period_beginning': fields.DateTimeField(), 'audit_period_ending': fields.DateTimeField(), } def __init__(self, audit_period_beginning, audit_period_ending): super(AuditPeriodPayload, self).__init__() self.audit_period_beginning = audit_period_beginning self.audit_period_ending = audit_period_ending @nova_base.NovaObjectRegistry.register_notification class BlockDevicePayload(base.NotificationPayloadBase): # Version 1.0: Initial version VERSION = '1.0' SCHEMA = { 'device_name': ('bdm', 'device_name'), 'boot_index': ('bdm', 'boot_index'), 'delete_on_termination': ('bdm', 'delete_on_termination'), 'volume_id': ('bdm', 'volume_id'), 'tag': ('bdm', 'tag') } fields = { 'device_name': fields.StringField(nullable=True), 'boot_index': fields.IntegerField(nullable=True), 'delete_on_termination': fields.BooleanField(default=False), 'volume_id': fields.UUIDField(), 'tag': fields.StringField(nullable=True) } def __init__(self, bdm): super(BlockDevicePayload, self).__init__() self.populate_schema(bdm=bdm) @classmethod def from_instance(cls, instance): """Returns a list of BlockDevicePayload objects based on the passed bdms. """ if not CONF.notifications.bdms_in_notifications: return None instance_bdms = instance.get_bdms() if instance_bdms is not None: return cls.from_bdms(instance_bdms) else: return [] @classmethod def from_bdms(cls, bdms): """Returns a list of BlockDevicePayload objects based on the passed BlockDeviceMappingList. """ payloads = [] for bdm in bdms: if bdm.volume_id is not None: payloads.append(cls(bdm)) return payloads @nova_base.NovaObjectRegistry.register_notification class SharePayload(base.NotificationPayloadBase): # Version 1.0: Initial version VERSION = '1.0' SCHEMA = { 'share_mapping_uuid': ('share', 'uuid'), 'share_id': ('share', 'share_id'), 'status': ('share', 'status'), 'tag': ('share', 'tag'), # Do not include 'export_location' as it could contains sensitive data # 'export_location': ('share', 'export_location') } fields = { 'share_mapping_uuid': fields.UUIDField(), 'share_id': fields.UUIDField(), 'status': fields.StringField(nullable=False), 'tag': fields.StringField(nullable=False), # 'export_location': fields.StringField(nullable=False), } def __init__(self, share): super(SharePayload, self).__init__() self.populate_schema(share=share) @classmethod def from_instance(cls, instance): """Returns a list of SharePayload objects based on the passed shares. """ if not CONF.notifications.include_share_mapping: return None instance_shares = instance.get_shares() return [cls(share) for share in instance_shares] @classmethod def from_shares(cls, shares): """Returns a list of SharePayload objects based on the passed ShareMappingList. """ return [cls(share) for share in shares] @nova_base.NovaObjectRegistry.register_notification class InstanceStateUpdatePayload(base.NotificationPayloadBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'old_state': fields.StringField(nullable=True), 'state': fields.StringField(nullable=True), 'old_task_state': fields.StringField(nullable=True), 'new_task_state': fields.StringField(nullable=True), } def __init__(self, old_state, state, old_task_state, new_task_state): super(InstanceStateUpdatePayload, self).__init__() self.old_state = old_state self.state = state self.old_task_state = old_task_state self.new_task_state = new_task_state @base.notification_sample('instance-delete-start.json') @base.notification_sample('instance-delete-end.json') @base.notification_sample('instance-pause-start.json') @base.notification_sample('instance-pause-end.json') @base.notification_sample('instance-unpause-start.json') @base.notification_sample('instance-unpause-end.json') @base.notification_sample('instance-resize-start.json') @base.notification_sample('instance-resize-end.json') @base.notification_sample('instance-resize-error.json') @base.notification_sample('instance-suspend-start.json') @base.notification_sample('instance-suspend-end.json') @base.notification_sample('instance-power_on-start.json') @base.notification_sample('instance-power_on_share-start.json') @base.notification_sample('instance-power_on-end.json') @base.notification_sample('instance-power_on_share-end.json') @base.notification_sample('instance-power_off-start.json') @base.notification_sample('instance-power_off-end.json') @base.notification_sample('instance-reboot-start.json') @base.notification_sample('instance-reboot-end.json') @base.notification_sample('instance-reboot-error.json') @base.notification_sample('instance-shutdown-start.json') @base.notification_sample('instance-shutdown-end.json') @base.notification_sample('instance-interface_attach-start.json') @base.notification_sample('instance-interface_attach-end.json') @base.notification_sample('instance-interface_attach-error.json') @base.notification_sample('instance-shelve-start.json') @base.notification_sample('instance-shelve-end.json') @base.notification_sample('instance-resume-start.json') @base.notification_sample('instance-resume-end.json') @base.notification_sample('instance-restore-start.json') @base.notification_sample('instance-restore-end.json') @base.notification_sample('instance-evacuate.json') @base.notification_sample('instance-resize_finish-start.json') @base.notification_sample('instance-resize_finish-end.json') @base.notification_sample('instance-live_migration_pre-start.json') @base.notification_sample('instance-live_migration_pre-end.json') @base.notification_sample('instance-live_migration_abort-start.json') @base.notification_sample('instance-live_migration_abort-end.json') @base.notification_sample('instance-live_migration_post-start.json') @base.notification_sample('instance-live_migration_post-end.json') @base.notification_sample('instance-live_migration_post_dest-start.json') @base.notification_sample('instance-live_migration_post_dest-end.json') @base.notification_sample('instance-live_migration_rollback-start.json') @base.notification_sample('instance-live_migration_rollback-end.json') @base.notification_sample('instance-live_migration_rollback_dest-start.json') @base.notification_sample('instance-live_migration_rollback_dest-end.json') @base.notification_sample('instance-interface_detach-start.json') @base.notification_sample('instance-interface_detach-end.json') @base.notification_sample('instance-resize_confirm-start.json') @base.notification_sample('instance-resize_confirm-end.json') @base.notification_sample('instance-resize_revert-start.json') @base.notification_sample('instance-resize_revert-end.json') @base.notification_sample('instance-live_migration_force_complete-start.json') @base.notification_sample('instance-live_migration_force_complete-end.json') @base.notification_sample('instance-shelve_offload-start.json') @base.notification_sample('instance-shelve_offload-end.json') @base.notification_sample('instance-soft_delete-start.json') @base.notification_sample('instance-soft_delete-end.json') @base.notification_sample('instance-trigger_crash_dump-start.json') @base.notification_sample('instance-trigger_crash_dump-end.json') @base.notification_sample('instance-unrescue-start.json') @base.notification_sample('instance-unrescue-end.json') @base.notification_sample('instance-unshelve-start.json') @base.notification_sample('instance-unshelve-end.json') @base.notification_sample('instance-lock.json') @base.notification_sample('instance-unlock.json') @nova_base.NovaObjectRegistry.register_notification class InstanceActionNotification(base.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': fields.ObjectField('InstanceActionPayload') } @base.notification_sample('instance-update.json') @nova_base.NovaObjectRegistry.register_notification class InstanceUpdateNotification(base.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': fields.ObjectField('InstanceUpdatePayload') } @base.notification_sample('instance-volume_swap-start.json') @base.notification_sample('instance-volume_swap-end.json') @base.notification_sample('instance-volume_swap-error.json') @nova_base.NovaObjectRegistry.register_notification class InstanceActionVolumeSwapNotification(base.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': fields.ObjectField('InstanceActionVolumeSwapPayload') } @base.notification_sample('instance-volume_attach-start.json') @base.notification_sample('instance-volume_attach-end.json') @base.notification_sample('instance-volume_attach-error.json') @base.notification_sample('instance-volume_detach-start.json') @base.notification_sample('instance-volume_detach-end.json') @nova_base.NovaObjectRegistry.register_notification class InstanceActionVolumeNotification(base.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': fields.ObjectField('InstanceActionVolumePayload') } @base.notification_sample('instance-share_attach-start.json') @base.notification_sample('instance-share_attach-error.json') @base.notification_sample('instance-share_attach-end.json') @base.notification_sample('instance-share_detach-start.json') @base.notification_sample('instance-share_detach-error.json') @base.notification_sample('instance-share_detach-end.json') @nova_base.NovaObjectRegistry.register_notification class InstanceActionShareNotification(base.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': fields.ObjectField('InstanceActionSharePayload') } @base.notification_sample('instance-create-start.json') @base.notification_sample('instance-create-end.json') @base.notification_sample('instance-create-error.json') @nova_base.NovaObjectRegistry.register_notification class InstanceCreateNotification(base.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': fields.ObjectField('InstanceCreatePayload') } @base.notification_sample('instance-resize_prep-start.json') @base.notification_sample('instance-resize_prep-end.json') @nova_base.NovaObjectRegistry.register_notification class InstanceActionResizePrepNotification(base.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': fields.ObjectField('InstanceActionResizePrepPayload') } @base.notification_sample('instance-snapshot-start.json') @base.notification_sample('instance-snapshot-end.json') @nova_base.NovaObjectRegistry.register_notification class InstanceActionSnapshotNotification(base.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': fields.ObjectField('InstanceActionSnapshotPayload') } @base.notification_sample('instance-rescue-start.json') @base.notification_sample('instance-rescue-end.json') @nova_base.NovaObjectRegistry.register_notification class InstanceActionRescueNotification(base.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': fields.ObjectField('InstanceActionRescuePayload') } @base.notification_sample('instance-rebuild_scheduled.json') @base.notification_sample('instance-rebuild-start.json') @base.notification_sample('instance-rebuild-end.json') @base.notification_sample('instance-rebuild-error.json') @nova_base.NovaObjectRegistry.register_notification class InstanceActionRebuildNotification(base.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': fields.ObjectField('InstanceActionRebuildPayload') } @nova_base.NovaObjectRegistry.register_notification class InstanceActionSnapshotPayload(InstanceActionPayload): # Version 1.6: Initial version. It starts at version 1.6 as # instance.snapshot.start and .end notifications are switched # from using InstanceActionPayload 1.5 to this new payload and # also it added a new field so we wanted to keep the version # number increasing to signal the change. # Version 1.7: Added request_id field to InstancePayload # Version 1.8: Added action_initiator_user and action_initiator_project to # InstancePayload # Version 1.9: Added locked_reason field to InstancePayload # Version 1.10: Add shares to InstancePayload VERSION = '1.10' fields = { 'snapshot_image_id': fields.UUIDField(), } def __init__(self, context, instance, fault, snapshot_image_id): super(InstanceActionSnapshotPayload, self).__init__( context=context, instance=instance, fault=fault) self.snapshot_image_id = snapshot_image_id @nova_base.NovaObjectRegistry.register_notification class InstanceExistsPayload(InstancePayload): # Version 1.0: Initial version # Version 1.1: Added action_initiator_user and action_initiator_project to # InstancePayload # Version 1.2: Added locked_reason field to InstancePayload # Version 2.0: Remove bandwidth field # Version 2.1: Add shares to InstancePayload VERSION = '2.1' fields = { 'audit_period': fields.ObjectField('AuditPeriodPayload'), } def __init__(self, context, instance, audit_period): super().__init__(context=context, instance=instance) self.audit_period = audit_period @base.notification_sample('instance-exists.json') @nova_base.NovaObjectRegistry.register_notification class InstanceExistsNotification(base.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': fields.ObjectField('InstanceExistsPayload') } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/notifications/objects/keypair.py0000664000175000017500000000405200000000000022046 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.notifications.objects import base from nova.objects import base as nova_base from nova.objects import fields @nova_base.NovaObjectRegistry.register_notification class KeypairPayload(base.NotificationPayloadBase): SCHEMA = { 'user_id': ('keypair', 'user_id'), 'name': ('keypair', 'name'), 'public_key': ('keypair', 'public_key'), 'fingerprint': ('keypair', 'fingerprint'), 'type': ('keypair', 'type') } # Version 1.0: Initial version VERSION = '1.0' fields = { 'user_id': fields.StringField(nullable=True), 'name': fields.StringField(nullable=False), 'fingerprint': fields.StringField(nullable=True), 'public_key': fields.StringField(nullable=True), 'type': fields.StringField(nullable=False), } def __init__(self, keypair, **kwargs): super(KeypairPayload, self).__init__(**kwargs) self.populate_schema(keypair=keypair) @base.notification_sample('keypair-create-start.json') @base.notification_sample('keypair-create-end.json') @base.notification_sample('keypair-delete-start.json') @base.notification_sample('keypair-delete-end.json') @base.notification_sample('keypair-import-start.json') @base.notification_sample('keypair-import-end.json') @nova_base.NovaObjectRegistry.register_notification class KeypairNotification(base.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': fields.ObjectField('KeypairPayload') } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/notifications/objects/libvirt.py0000664000175000017500000000260600000000000022060 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.notifications.objects import base from nova.objects import base as nova_base from nova.objects import fields @nova_base.NovaObjectRegistry.register_notification class LibvirtErrorPayload(base.NotificationPayloadBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'ip': fields.StringField(), 'reason': fields.ObjectField('ExceptionPayload'), } def __init__(self, ip, reason): super(LibvirtErrorPayload, self).__init__() self.ip = ip self.reason = reason @base.notification_sample('libvirt-connect-error.json') @nova_base.NovaObjectRegistry.register_notification class LibvirtErrorNotification(base.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': fields.ObjectField('LibvirtErrorPayload') } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/notifications/objects/metrics.py0000664000175000017500000000551600000000000022056 0ustar00zuulzuul00000000000000# Copyright 2018 NTT Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.notifications.objects import base from nova.objects import base as nova_base from nova.objects import fields @base.notification_sample('metrics-update.json') @nova_base.NovaObjectRegistry.register_notification class MetricsNotification(base.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': fields.ObjectField('MetricsPayload') } @nova_base.NovaObjectRegistry.register_notification class MetricPayload(base.NotificationPayloadBase): # Version 1.0: Initial version VERSION = '1.0' SCHEMA = { 'name': ('monitor_metric', 'name'), 'value': ('monitor_metric', 'value'), 'numa_membw_values': ('monitor_metric', 'numa_membw_values'), 'timestamp': ('monitor_metric', 'timestamp'), 'source': ('monitor_metric', 'source'), } fields = { 'name': fields.MonitorMetricTypeField(), 'value': fields.IntegerField(), 'numa_membw_values': fields.DictOfIntegersField(nullable=True), 'timestamp': fields.DateTimeField(), 'source': fields.StringField(), } def __init__(self, monitor_metric): super(MetricPayload, self).__init__() self.populate_schema(monitor_metric=monitor_metric) @classmethod def from_monitor_metric_list_obj(cls, monitor_metric_list): """Returns a list of MetricPayload objects based on the passed MonitorMetricList object. """ payloads = [] for monitor_metric in monitor_metric_list: payloads.append(cls(monitor_metric)) return payloads @nova_base.NovaObjectRegistry.register_notification class MetricsPayload(base.NotificationPayloadBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'host': fields.StringField(), 'host_ip': fields.StringField(), 'nodename': fields.StringField(), 'metrics': fields.ListOfObjectsField('MetricPayload'), } def __init__(self, host, host_ip, nodename, monitor_metric_list): super(MetricsPayload, self).__init__() self.host = host self.host_ip = host_ip self.nodename = nodename self.metrics = MetricPayload.from_monitor_metric_list_obj( monitor_metric_list) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/notifications/objects/request_spec.py0000664000175000017500000003251000000000000023104 0ustar00zuulzuul00000000000000# Copyright 2018 NTT Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.notifications.objects import base from nova.notifications.objects import flavor as flavor_payload from nova.notifications.objects import image as image_payload from nova.notifications.objects import server_group as server_group_payload from nova.objects import base as nova_base from nova.objects import fields @nova_base.NovaObjectRegistry.register_notification class RequestSpecPayload(base.NotificationPayloadBase): # Version 1.0: Initial version # Version 1.1: Add force_hosts, force_nodes, ignore_hosts, image_meta, # instance_group, requested_destination, retry, # scheduler_hints and security_groups fields VERSION = '1.1' SCHEMA = { 'ignore_hosts': ('request_spec', 'ignore_hosts'), 'instance_uuid': ('request_spec', 'instance_uuid'), 'project_id': ('request_spec', 'project_id'), 'user_id': ('request_spec', 'user_id'), 'availability_zone': ('request_spec', 'availability_zone'), 'num_instances': ('request_spec', 'num_instances'), 'scheduler_hints': ('request_spec', 'scheduler_hints'), } fields = { 'instance_uuid': fields.UUIDField(), 'project_id': fields.StringField(nullable=True), 'user_id': fields.StringField(nullable=True), 'availability_zone': fields.StringField(nullable=True), 'flavor': fields.ObjectField('FlavorPayload', nullable=True), 'force_hosts': fields.StringField(nullable=True), 'force_nodes': fields.StringField(nullable=True), 'ignore_hosts': fields.ListOfStringsField(nullable=True), 'image_meta': fields.ObjectField('ImageMetaPayload', nullable=True), 'instance_group': fields.ObjectField('ServerGroupPayload', nullable=True), 'image': fields.ObjectField('ImageMetaPayload', nullable=True), 'numa_topology': fields.ObjectField('InstanceNUMATopologyPayload', nullable=True), 'pci_requests': fields.ObjectField('InstancePCIRequestsPayload', nullable=True), 'num_instances': fields.IntegerField(default=1), 'requested_destination': fields.ObjectField('DestinationPayload', nullable=True), 'retry': fields.ObjectField('SchedulerRetriesPayload', nullable=True), 'scheduler_hints': fields.DictOfListOfStringsField(nullable=True), 'security_groups': fields.ListOfStringsField(), } def __init__(self, request_spec): super(RequestSpecPayload, self).__init__() self.flavor = flavor_payload.FlavorPayload( request_spec.flavor) if request_spec.obj_attr_is_set( 'flavor') else None self.image = image_payload.ImageMetaPayload( request_spec.image) if request_spec.image else None if request_spec.numa_topology is not None: if not request_spec.numa_topology.obj_attr_is_set('instance_uuid'): request_spec.numa_topology.instance_uuid = ( request_spec.instance_uuid) self.numa_topology = InstanceNUMATopologyPayload( request_spec.numa_topology) else: self.numa_topology = None if request_spec.pci_requests is not None: if not request_spec.pci_requests.obj_attr_is_set('instance_uuid'): request_spec.pci_requests.instance_uuid = ( request_spec.instance_uuid) self.pci_requests = InstancePCIRequestsPayload( request_spec.pci_requests) else: self.pci_requests = None if 'requested_destination' in request_spec \ and request_spec.requested_destination: self.requested_destination = DestinationPayload( destination=request_spec.requested_destination) else: self.requested_destination = None if 'retry' in request_spec and request_spec.retry: self.retry = SchedulerRetriesPayload( retry=request_spec.retry) else: self.retry = None self.security_groups = [ sec_group.identifier for sec_group in request_spec.security_groups] if 'instance_group' in request_spec and request_spec.instance_group: self.instance_group = server_group_payload.ServerGroupPayload( group=request_spec.instance_group) else: self.instance_group = None if 'force_hosts' in request_spec and request_spec.force_hosts: self.force_hosts = request_spec.force_hosts[0] else: self.force_hosts = None if 'force_nodes' in request_spec and request_spec.force_nodes: self.force_nodes = request_spec.force_nodes[0] else: self.force_nodes = None self.populate_schema(request_spec=request_spec) @nova_base.NovaObjectRegistry.register_notification class InstanceNUMATopologyPayload(base.NotificationPayloadBase): # Version 1.0: Initial version VERSION = '1.0' SCHEMA = { 'instance_uuid': ('numa_topology', 'instance_uuid'), 'emulator_threads_policy': ('numa_topology', 'emulator_threads_policy') } fields = { 'instance_uuid': fields.UUIDField(), 'cells': fields.ListOfObjectsField('InstanceNUMACellPayload'), 'emulator_threads_policy': fields.CPUEmulatorThreadsPolicyField( nullable=True) } def __init__(self, numa_topology): super(InstanceNUMATopologyPayload, self).__init__() self.cells = InstanceNUMACellPayload.from_numa_cell_list_obj( numa_topology.cells) self.populate_schema(numa_topology=numa_topology) @nova_base.NovaObjectRegistry.register_notification class InstanceNUMACellPayload(base.NotificationPayloadBase): # Version 1.0: Initial version # Version 1.1: Added pcpuset field # Version 1.2: Added 'mixed' to cpu_policy field VERSION = '1.2' SCHEMA = { 'id': ('numa_cell', 'id'), 'cpuset': ('numa_cell', 'cpuset'), 'pcpuset': ('numa_cell', 'pcpuset'), 'cpuset_reserved': ('numa_cell', 'cpuset_reserved'), 'memory': ('numa_cell', 'memory'), 'pagesize': ('numa_cell', 'pagesize'), 'cpu_pinning_raw': ('numa_cell', 'cpu_pinning_raw'), 'cpu_policy': ('numa_cell', 'cpu_policy'), 'cpu_thread_policy': ('numa_cell', 'cpu_thread_policy'), } fields = { 'id': fields.IntegerField(), 'cpuset': fields.SetOfIntegersField(), 'pcpuset': fields.SetOfIntegersField(), 'cpuset_reserved': fields.SetOfIntegersField(nullable=True), 'memory': fields.IntegerField(), 'pagesize': fields.IntegerField(nullable=True), 'cpu_topology': fields.ObjectField('VirtCPUTopologyPayload', nullable=True), 'cpu_pinning_raw': fields.DictOfIntegersField(nullable=True), 'cpu_policy': fields.CPUAllocationPolicyField(nullable=True), 'cpu_thread_policy': fields.CPUThreadAllocationPolicyField( nullable=True), } def __init__(self, numa_cell): super(InstanceNUMACellPayload, self).__init__() if (numa_cell.obj_attr_is_set('cpu_topology') and numa_cell.cpu_topology is not None): self.cpu_topology = VirtCPUTopologyPayload(numa_cell.cpu_topology) else: self.cpu_topology = None self.populate_schema(numa_cell=numa_cell) @classmethod def from_numa_cell_list_obj(cls, numa_cell_list): """Returns a list of InstanceNUMACellPayload objects based on the passed list of InstanceNUMACell objects. """ payloads = [] for numa_cell in numa_cell_list: payloads.append(cls(numa_cell)) return payloads @nova_base.NovaObjectRegistry.register_notification class VirtCPUTopologyPayload(base.NotificationPayloadBase): # Version 1.0: Initial version VERSION = '1.0' SCHEMA = { 'sockets': ('virt_cpu_topology', 'sockets'), 'cores': ('virt_cpu_topology', 'cores'), 'threads': ('virt_cpu_topology', 'threads'), } fields = { 'sockets': fields.IntegerField(nullable=True, default=1), 'cores': fields.IntegerField(nullable=True, default=1), 'threads': fields.IntegerField(nullable=True, default=1), } def __init__(self, virt_cpu_topology): super(VirtCPUTopologyPayload, self).__init__() self.populate_schema(virt_cpu_topology=virt_cpu_topology) @nova_base.NovaObjectRegistry.register_notification class InstancePCIRequestsPayload(base.NotificationPayloadBase): # Version 1.0: Initial version VERSION = '1.0' SCHEMA = { 'instance_uuid': ('pci_requests', 'instance_uuid') } fields = { 'instance_uuid': fields.UUIDField(), 'requests': fields.ListOfObjectsField('InstancePCIRequestPayload') } def __init__(self, pci_requests): super(InstancePCIRequestsPayload, self).__init__() self.requests = InstancePCIRequestPayload.from_pci_request_list_obj( pci_requests.requests) self.populate_schema(pci_requests=pci_requests) @nova_base.NovaObjectRegistry.register_notification class InstancePCIRequestPayload(base.NotificationPayloadBase): # Version 1.0: Initial version # Version 1.1: Added 'socket' to numa_policy field VERSION = '1.1' SCHEMA = { 'count': ('pci_request', 'count'), 'spec': ('pci_request', 'spec'), 'alias_name': ('pci_request', 'alias_name'), 'request_id': ('pci_request', 'request_id'), 'numa_policy': ('pci_request', 'numa_policy') } fields = { 'count': fields.IntegerField(), 'spec': fields.ListOfDictOfNullableStringsField(), 'alias_name': fields.StringField(nullable=True), 'request_id': fields.UUIDField(nullable=True), 'numa_policy': fields.PCINUMAAffinityPolicyField(nullable=True) } def __init__(self, pci_request): super(InstancePCIRequestPayload, self).__init__() self.populate_schema(pci_request=pci_request) @classmethod def from_pci_request_list_obj(cls, pci_request_list): """Returns a list of InstancePCIRequestPayload objects based on the passed list of InstancePCIRequest objects. """ payloads = [] for pci_request in pci_request_list: payloads.append(cls(pci_request)) return payloads @nova_base.NovaObjectRegistry.register_notification class DestinationPayload(base.NotificationPayloadBase): # Version 1.0: Initial version VERSION = '1.0' SCHEMA = { 'aggregates': ('destination', 'aggregates'), } fields = { 'host': fields.StringField(), 'node': fields.StringField(nullable=True), 'cell': fields.ObjectField('CellMappingPayload', nullable=True), 'aggregates': fields.ListOfStringsField(nullable=True, default=None), } def __init__(self, destination): super(DestinationPayload, self).__init__() if (destination.obj_attr_is_set('host') and destination.host is not None): self.host = destination.host if (destination.obj_attr_is_set('node') and destination.node is not None): self.node = destination.node if (destination.obj_attr_is_set('cell') and destination.cell is not None): self.cell = CellMappingPayload(destination.cell) self.populate_schema(destination=destination) @nova_base.NovaObjectRegistry.register_notification class SchedulerRetriesPayload(base.NotificationPayloadBase): # Version 1.0: Initial version VERSION = '1.0' SCHEMA = { 'num_attempts': ('retry', 'num_attempts'), } fields = { 'num_attempts': fields.IntegerField(), 'hosts': fields.ListOfStringsField(), } def __init__(self, retry): super(SchedulerRetriesPayload, self).__init__() self.hosts = [] for compute_node in retry.hosts: self.hosts.append(compute_node.hypervisor_hostname) self.populate_schema(retry=retry) @nova_base.NovaObjectRegistry.register_notification class CellMappingPayload(base.NotificationPayloadBase): # Version 1.0: Initial version # Version 2.0: Remove transport_url and database_connection fields. VERSION = '2.0' SCHEMA = { 'uuid': ('cell', 'uuid'), 'name': ('cell', 'name'), 'disabled': ('cell', 'disabled'), } fields = { 'uuid': fields.UUIDField(), 'name': fields.StringField(nullable=True), 'disabled': fields.BooleanField(default=False), } def __init__(self, cell): super(CellMappingPayload, self).__init__() self.populate_schema(cell=cell) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/notifications/objects/scheduler.py0000664000175000017500000000207100000000000022357 0ustar00zuulzuul00000000000000# Copyright 2017 Ericsson # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from nova.notifications.objects import base from nova.objects import base as nova_base from nova.objects import fields @base.notification_sample('scheduler-select_destinations-start.json') @base.notification_sample('scheduler-select_destinations-end.json') @nova_base.NovaObjectRegistry.register_notification class SelectDestinationsNotification(base.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': fields.ObjectField('RequestSpecPayload') } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/notifications/objects/server_group.py0000664000175000017500000000511500000000000023125 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from nova.notifications.objects import base from nova.objects import base as nova_base from nova.objects import fields @nova_base.NovaObjectRegistry.register_notification class ServerGroupPayload(base.NotificationPayloadBase): SCHEMA = { 'uuid': ('group', 'uuid'), 'name': ('group', 'name'), 'user_id': ('group', 'user_id'), 'project_id': ('group', 'project_id'), 'policies': ('group', 'policies'), 'members': ('group', 'members'), 'hosts': ('group', 'hosts'), 'policy': ('group', 'policy'), 'rules': ('group', 'rules'), } # Version 1.0: Initial version # Version 1.1: Deprecate policies, add policy and add rules VERSION = '1.1' fields = { 'uuid': fields.UUIDField(), 'name': fields.StringField(nullable=True), 'user_id': fields.StringField(nullable=True), 'project_id': fields.StringField(nullable=True), # NOTE(yikun): policies is deprecated and should # be removed on the next major version bump 'policies': fields.ListOfStringsField(nullable=True), 'members': fields.ListOfStringsField(nullable=True), 'hosts': fields.ListOfStringsField(nullable=True), 'policy': fields.StringField(nullable=True), 'rules': fields.DictOfStringsField(), } def __init__(self, group): super(ServerGroupPayload, self).__init__() # Note: The group is orphaned here to avoid triggering lazy-loading of # the group.hosts field. cgroup = copy.deepcopy(group) cgroup._context = None self.populate_schema(group=cgroup) @base.notification_sample('server_group-add_member.json') @base.notification_sample('server_group-create.json') @base.notification_sample('server_group-delete.json') @nova_base.NovaObjectRegistry.register_notification class ServerGroupNotification(base.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': fields.ObjectField('ServerGroupPayload') } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/notifications/objects/service.py0000664000175000017500000000506200000000000022044 0ustar00zuulzuul00000000000000# Copyright (c) 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.notifications.objects import base from nova.objects import base as nova_base from nova.objects import fields @base.notification_sample('service-create.json') @base.notification_sample('service-update.json') @base.notification_sample('service-delete.json') @nova_base.NovaObjectRegistry.register_notification class ServiceStatusNotification(base.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': fields.ObjectField('ServiceStatusPayload') } @nova_base.NovaObjectRegistry.register_notification class ServiceStatusPayload(base.NotificationPayloadBase): SCHEMA = { 'host': ('service', 'host'), 'binary': ('service', 'binary'), 'topic': ('service', 'topic'), 'report_count': ('service', 'report_count'), 'disabled': ('service', 'disabled'), 'disabled_reason': ('service', 'disabled_reason'), 'availability_zone': ('service', 'availability_zone'), 'last_seen_up': ('service', 'last_seen_up'), 'forced_down': ('service', 'forced_down'), 'version': ('service', 'version'), 'uuid': ('service', 'uuid') } # Version 1.0: Initial version # Version 1.1: Added uuid field. VERSION = '1.1' fields = { 'host': fields.StringField(nullable=True), 'binary': fields.StringField(nullable=True), 'topic': fields.StringField(nullable=True), 'report_count': fields.IntegerField(), 'disabled': fields.BooleanField(), 'disabled_reason': fields.StringField(nullable=True), 'availability_zone': fields.StringField(nullable=True), 'last_seen_up': fields.DateTimeField(nullable=True), 'forced_down': fields.BooleanField(), 'version': fields.IntegerField(), 'uuid': fields.UUIDField() } def __init__(self, service): super(ServiceStatusPayload, self).__init__() self.populate_schema(service=service) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/notifications/objects/volume.py0000664000175000017500000000452500000000000021716 0ustar00zuulzuul00000000000000# Copyright 2018 NTT Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.notifications.objects import base from nova.objects import base as nova_base from nova.objects import fields @base.notification_sample('volume-usage.json') @nova_base.NovaObjectRegistry.register_notification class VolumeUsageNotification(base.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': fields.ObjectField('VolumeUsagePayload') } @nova_base.NovaObjectRegistry.register_notification class VolumeUsagePayload(base.NotificationPayloadBase): # Version 1.0: Initial version VERSION = '1.0' SCHEMA = { 'volume_id': ('vol_usage', 'volume_id'), 'project_id': ('vol_usage', 'project_id'), 'user_id': ('vol_usage', 'user_id'), 'availability_zone': ('vol_usage', 'availability_zone'), 'instance_uuid': ('vol_usage', 'instance_uuid'), 'last_refreshed': ('vol_usage', 'last_refreshed'), 'reads': ('vol_usage', 'reads'), 'read_bytes': ('vol_usage', 'read_bytes'), 'writes': ('vol_usage', 'writes'), 'write_bytes': ('vol_usage', 'write_bytes') } fields = { 'volume_id': fields.UUIDField(), 'project_id': fields.StringField(nullable=True), 'user_id': fields.StringField(nullable=True), 'availability_zone': fields.StringField(nullable=True), 'instance_uuid': fields.UUIDField(nullable=True), 'last_refreshed': fields.DateTimeField(nullable=True), 'reads': fields.IntegerField(), 'read_bytes': fields.IntegerField(), 'writes': fields.IntegerField(), 'write_bytes': fields.IntegerField() } def __init__(self, vol_usage): super(VolumeUsagePayload, self).__init__() self.populate_schema(vol_usage=vol_usage) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.3896089 nova-32.0.0/nova/objects/0000775000175000017500000000000000000000000015156 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/objects/__init__.py0000664000175000017500000000620000000000000017265 0ustar00zuulzuul00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(comstud): You may scratch your head as you see code that imports # this module and then accesses attributes for objects such as Instance, # etc, yet you do not see these attributes in here. Never fear, there is # a little bit of magic. When objects are registered, an attribute is set # on this module automatically, pointing to the newest/latest version of # the object. def register_all(): # NOTE(danms): You must make sure your object gets imported in this # function in order for it to be registered by services that may # need to receive it via RPC. __import__('nova.objects.aggregate') __import__('nova.objects.block_device') __import__('nova.objects.build_request') __import__('nova.objects.cell_mapping') __import__('nova.objects.compute_node') __import__('nova.objects.diagnostics') __import__('nova.objects.console_auth_token') __import__('nova.objects.ec2') __import__('nova.objects.external_event') __import__('nova.objects.flavor') __import__('nova.objects.host_mapping') __import__('nova.objects.hv_spec') __import__('nova.objects.image_meta') __import__('nova.objects.instance') __import__('nova.objects.instance_action') __import__('nova.objects.instance_fault') __import__('nova.objects.instance_group') __import__('nova.objects.instance_info_cache') __import__('nova.objects.instance_mapping') __import__('nova.objects.instance_numa') __import__('nova.objects.instance_pci_requests') __import__('nova.objects.keypair') __import__('nova.objects.migrate_data') __import__('nova.objects.virt_device_metadata') __import__('nova.objects.migration') __import__('nova.objects.migration_context') __import__('nova.objects.monitor_metric') __import__('nova.objects.network_metadata') __import__('nova.objects.network_request') __import__('nova.objects.numa') __import__('nova.objects.pci_device') __import__('nova.objects.pci_device_pool') __import__('nova.objects.request_spec') __import__('nova.objects.tag') __import__('nova.objects.quotas') __import__('nova.objects.resource') __import__('nova.objects.security_group') __import__('nova.objects.selection') __import__('nova.objects.service') __import__('nova.objects.task_log') __import__('nova.objects.trusted_certs') __import__('nova.objects.vcpu_model') __import__('nova.objects.virt_cpu_topology') __import__('nova.objects.virtual_interface') __import__('nova.objects.volume_usage') __import__('nova.objects.share_mapping') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/objects/aggregate.py0000664000175000017500000005436700000000000017475 0ustar00zuulzuul00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db import exception as db_exc from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import uuidutils from sqlalchemy import orm from nova.compute import utils as compute_utils from nova.db.api import api as api_db_api from nova.db.api import models as api_models from nova import exception from nova.i18n import _ from nova import objects from nova.objects import base from nova.objects import fields LOG = logging.getLogger(__name__) DEPRECATED_FIELDS = ['deleted', 'deleted_at'] @api_db_api.context_manager.reader def _aggregate_get_from_db(context, aggregate_id): query = context.session.query(api_models.Aggregate).\ options(orm.joinedload(api_models.Aggregate._hosts)).\ options(orm.joinedload(api_models.Aggregate._metadata)) query = query.filter(api_models.Aggregate.id == aggregate_id) aggregate = query.first() if not aggregate: raise exception.AggregateNotFound(aggregate_id=aggregate_id) return aggregate @api_db_api.context_manager.reader def _aggregate_get_from_db_by_uuid(context, aggregate_uuid): query = context.session.query(api_models.Aggregate).\ options(orm.joinedload(api_models.Aggregate._hosts)).\ options(orm.joinedload(api_models.Aggregate._metadata)) query = query.filter(api_models.Aggregate.uuid == aggregate_uuid) aggregate = query.first() if not aggregate: raise exception.AggregateNotFound(aggregate_id=aggregate_uuid) return aggregate def _host_add_to_db(context, aggregate_id, host): try: with api_db_api.context_manager.writer.using(context): # Check to see if the aggregate exists _aggregate_get_from_db(context, aggregate_id) host_ref = api_models.AggregateHost() host_ref.update({"host": host, "aggregate_id": aggregate_id}) host_ref.save(context.session) return host_ref except db_exc.DBDuplicateEntry: raise exception.AggregateHostExists(host=host, aggregate_id=aggregate_id) def _host_delete_from_db(context, aggregate_id, host): count = 0 with api_db_api.context_manager.writer.using(context): # Check to see if the aggregate exists _aggregate_get_from_db(context, aggregate_id) query = context.session.query(api_models.AggregateHost) query = query.filter(api_models.AggregateHost.aggregate_id == aggregate_id) count = query.filter_by(host=host).delete() if count == 0: raise exception.AggregateHostNotFound(aggregate_id=aggregate_id, host=host) def _metadata_add_to_db(context, aggregate_id, metadata, max_retries=10, set_delete=False): all_keys = metadata.keys() for attempt in range(max_retries): try: with api_db_api.context_manager.writer.using(context): query = context.session.query(api_models.AggregateMetadata).\ filter_by(aggregate_id=aggregate_id) if set_delete: query.filter(~api_models.AggregateMetadata.key. in_(all_keys)).\ delete(synchronize_session=False) already_existing_keys = set() if all_keys: query = query.filter( api_models.AggregateMetadata.key.in_(all_keys)) for meta_ref in query.all(): key = meta_ref.key try: meta_ref.update({"value": metadata[key]}) already_existing_keys.add(key) except KeyError: # NOTE(ratailor): When user tries updating # metadata using case-sensitive key, we get # KeyError. raise exception.AggregateMetadataKeyExists( aggregate_id=aggregate_id, key=key) new_entries = [] for key, value in metadata.items(): if key in already_existing_keys: continue new_entries.append({"key": key, "value": value, "aggregate_id": aggregate_id}) if new_entries: context.session.execute( api_models.AggregateMetadata.__table__.insert(), new_entries) return metadata except db_exc.DBDuplicateEntry: # a concurrent transaction has been committed, # try again unless this was the last attempt with excutils.save_and_reraise_exception() as ctxt: if attempt < max_retries - 1: ctxt.reraise = False else: msg = _("Add metadata failed for aggregate %(id)s " "after %(retries)s retries") % \ {"id": aggregate_id, "retries": max_retries} LOG.warning(msg) @api_db_api.context_manager.writer def _metadata_delete_from_db(context, aggregate_id, key): # Check to see if the aggregate exists _aggregate_get_from_db(context, aggregate_id) query = context.session.query(api_models.AggregateMetadata) query = query.filter(api_models.AggregateMetadata.aggregate_id == aggregate_id) count = query.filter_by(key=key).delete() if count == 0: raise exception.AggregateMetadataNotFound( aggregate_id=aggregate_id, metadata_key=key) @api_db_api.context_manager.writer def _aggregate_create_in_db(context, values, metadata=None): query = context.session.query(api_models.Aggregate) query = query.filter(api_models.Aggregate.name == values['name']) aggregate = query.first() if not aggregate: aggregate = api_models.Aggregate() aggregate.update(values) aggregate.save(context.session) # We don't want these to be lazy loaded later. We know there is # nothing here since we just created this aggregate. aggregate._hosts = [] aggregate._metadata = [] else: raise exception.AggregateNameExists(aggregate_name=values['name']) if metadata: _metadata_add_to_db(context, aggregate.id, metadata) context.session.expire(aggregate, ['_metadata']) aggregate._metadata return aggregate @api_db_api.context_manager.writer def _aggregate_delete_from_db(context, aggregate_id): # Delete Metadata first context.session.query(api_models.AggregateMetadata).\ filter_by(aggregate_id=aggregate_id).\ delete() count = context.session.query(api_models.Aggregate).\ filter(api_models.Aggregate.id == aggregate_id).\ delete() if count == 0: raise exception.AggregateNotFound(aggregate_id=aggregate_id) @api_db_api.context_manager.writer def _aggregate_update_to_db(context, aggregate_id, values): aggregate = _aggregate_get_from_db(context, aggregate_id) set_delete = True if "availability_zone" in values: az = values.pop('availability_zone') if 'metadata' not in values: values['metadata'] = {'availability_zone': az} set_delete = False else: values['metadata']['availability_zone'] = az metadata = values.get('metadata') if metadata is not None: _metadata_add_to_db(context, aggregate_id, values.pop('metadata'), set_delete=set_delete) aggregate.update(values) try: aggregate.save(context.session) except db_exc.DBDuplicateEntry: if 'name' in values: raise exception.AggregateNameExists( aggregate_name=values['name']) else: raise return _aggregate_get_from_db(context, aggregate_id) @base.NovaObjectRegistry.register class Aggregate(base.NovaPersistentObject, base.NovaObject): # Version 1.0: Initial version # Version 1.1: String attributes updated to support unicode # Version 1.2: Added uuid field # Version 1.3: Added get_by_uuid method VERSION = '1.3' fields = { 'id': fields.IntegerField(), 'uuid': fields.UUIDField(nullable=False), 'name': fields.StringField(), 'hosts': fields.ListOfStringsField(nullable=True), 'metadata': fields.DictOfStringsField(nullable=True), } obj_extra_fields = ['availability_zone'] @staticmethod def _from_db_object(context, aggregate, db_aggregate): for key in aggregate.fields: if key == 'metadata': db_key = 'metadetails' elif key in DEPRECATED_FIELDS and key not in db_aggregate: continue else: db_key = key setattr(aggregate, key, db_aggregate[db_key]) # NOTE: This can be removed when we bump Aggregate to v2.0 aggregate.deleted_at = None aggregate.deleted = False aggregate._context = context aggregate.obj_reset_changes() return aggregate def _assert_no_hosts(self, action): if 'hosts' in self.obj_what_changed(): raise exception.ObjectActionError( action=action, reason='hosts updated inline') @base.remotable_classmethod def get_by_id(cls, context, aggregate_id): db_aggregate = _aggregate_get_from_db(context, aggregate_id) return cls._from_db_object(context, cls(), db_aggregate) @base.remotable_classmethod def get_by_uuid(cls, context, aggregate_uuid): db_aggregate = _aggregate_get_from_db_by_uuid(context, aggregate_uuid) return cls._from_db_object(context, cls(), db_aggregate) @base.remotable def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason='already created') self._assert_no_hosts('create') updates = self.obj_get_changes() payload = dict(updates) if 'metadata' in updates: # NOTE(danms): For some reason the notification format is weird payload['meta_data'] = payload.pop('metadata') if 'uuid' not in updates: updates['uuid'] = uuidutils.generate_uuid() self.uuid = updates['uuid'] LOG.debug('Generated uuid %(uuid)s for aggregate', dict(uuid=updates['uuid'])) compute_utils.notify_about_aggregate_update(self._context, "create.start", payload) compute_utils.notify_about_aggregate_action( context=self._context, aggregate=self, action=fields.NotificationAction.CREATE, phase=fields.NotificationPhase.START) metadata = updates.pop('metadata', None) db_aggregate = _aggregate_create_in_db(self._context, updates, metadata=metadata) self._from_db_object(self._context, self, db_aggregate) payload['aggregate_id'] = self.id compute_utils.notify_about_aggregate_update(self._context, "create.end", payload) compute_utils.notify_about_aggregate_action( context=self._context, aggregate=self, action=fields.NotificationAction.CREATE, phase=fields.NotificationPhase.END) @base.remotable def save(self): self._assert_no_hosts('save') updates = self.obj_get_changes() payload = {'aggregate_id': self.id} if 'metadata' in updates: payload['meta_data'] = updates['metadata'] compute_utils.notify_about_aggregate_update(self._context, "updateprop.start", payload) compute_utils.notify_about_aggregate_action( context=self._context, aggregate=self, action=fields.NotificationAction.UPDATE_PROP, phase=fields.NotificationPhase.START) updates.pop('id', None) db_aggregate = _aggregate_update_to_db(self._context, self.id, updates) compute_utils.notify_about_aggregate_update(self._context, "updateprop.end", payload) compute_utils.notify_about_aggregate_action( context=self._context, aggregate=self, action=fields.NotificationAction.UPDATE_PROP, phase=fields.NotificationPhase.END) self._from_db_object(self._context, self, db_aggregate) @base.remotable def update_metadata(self, updates): payload = {'aggregate_id': self.id, 'meta_data': updates} compute_utils.notify_about_aggregate_update(self._context, "updatemetadata.start", payload) compute_utils.notify_about_aggregate_action( context=self._context, aggregate=self, action=fields.NotificationAction.UPDATE_METADATA, phase=fields.NotificationPhase.START) to_add = {} for key, value in updates.items(): if value is None: try: _metadata_delete_from_db(self._context, self.id, key) except exception.AggregateMetadataNotFound: pass try: self.metadata.pop(key) except KeyError: pass else: to_add[key] = value self.metadata[key] = value _metadata_add_to_db(self._context, self.id, to_add) compute_utils.notify_about_aggregate_update(self._context, "updatemetadata.end", payload) compute_utils.notify_about_aggregate_action( context=self._context, aggregate=self, action=fields.NotificationAction.UPDATE_METADATA, phase=fields.NotificationPhase.END) self.obj_reset_changes(fields=['metadata']) @base.remotable def destroy(self): _aggregate_delete_from_db(self._context, self.id) @base.remotable def add_host(self, host): _host_add_to_db(self._context, self.id, host) if self.hosts is None: self.hosts = [] self.hosts.append(host) self.obj_reset_changes(fields=['hosts']) @base.remotable def delete_host(self, host): _host_delete_from_db(self._context, self.id, host) self.hosts.remove(host) self.obj_reset_changes(fields=['hosts']) @property def availability_zone(self): return self.metadata.get('availability_zone', None) @api_db_api.context_manager.reader def _get_all_from_db(context): query = context.session.query(api_models.Aggregate).\ options(orm.joinedload(api_models.Aggregate._hosts)).\ options(orm.joinedload(api_models.Aggregate._metadata)) return query.all() @api_db_api.context_manager.reader def _get_by_host_from_db(context, host, key=None): query = context.session.query(api_models.Aggregate).\ options(orm.joinedload(api_models.Aggregate._hosts)).\ options(orm.joinedload(api_models.Aggregate._metadata)) query = query.join(api_models.Aggregate._hosts) query = query.filter(api_models.AggregateHost.host == host) if key: query = query.join(api_models.Aggregate._metadata).filter( api_models.AggregateMetadata.key == key) return query.all() @api_db_api.context_manager.reader def _get_by_metadata_from_db(context, key=None, value=None): assert key is not None or value is not None query = context.session.query(api_models.Aggregate) query = query.join(api_models.Aggregate._metadata) if key is not None: query = query.filter(api_models.AggregateMetadata.key == key) if value is not None: query = query.filter(api_models.AggregateMetadata.value == value) query = query.options( orm.contains_eager(api_models.Aggregate._metadata) ) query = query.options(orm.joinedload(api_models.Aggregate._hosts)) return query.all() @api_db_api.context_manager.reader def _get_non_matching_by_metadata_keys_from_db(context, ignored_keys, key_prefix, value): """Filter aggregates based on non matching metadata. Find aggregates with at least one ${key_prefix}*[=${value}] metadata where the metadata key are not in the ignored_keys list. :return: Aggregates with any metadata entry: - whose key starts with `key_prefix`; and - whose value is `value` and - whose key is *not* in the `ignored_keys` list. """ if not key_prefix: raise ValueError(_('key_prefix mandatory field.')) query = context.session.query(api_models.Aggregate) query = query.join(api_models.Aggregate._metadata) query = query.filter(api_models.AggregateMetadata.value == value) query = query.filter(api_models.AggregateMetadata.key.like( key_prefix + '%')) if len(ignored_keys) > 0: query = query.filter( ~api_models.AggregateMetadata.key.in_(ignored_keys) ) query = query.options( orm.contains_eager(api_models.Aggregate._metadata) ) query = query.options(orm.joinedload(api_models.Aggregate._hosts)) return query.all() @base.NovaObjectRegistry.register class AggregateList(base.ObjectListBase, base.NovaObject): # Version 1.0: Initial version # Version 1.1: Added key argument to get_by_host() # Aggregate <= version 1.1 # Version 1.2: Added get_by_metadata_key # Version 1.3: Added get_by_metadata VERSION = '1.3' fields = { 'objects': fields.ListOfObjectsField('Aggregate'), } @classmethod def _filter_db_aggregates(cls, db_aggregates, hosts): if not isinstance(hosts, set): hosts = set(hosts) filtered_aggregates = [] for db_aggregate in db_aggregates: for host in db_aggregate['hosts']: if host in hosts: filtered_aggregates.append(db_aggregate) break return filtered_aggregates @base.remotable_classmethod def get_all(cls, context): db_aggregates = _get_all_from_db(context) return base.obj_make_list(context, cls(context), objects.Aggregate, db_aggregates) @base.remotable_classmethod def get_by_host(cls, context, host, key=None): db_aggregates = _get_by_host_from_db(context, host, key=key) return base.obj_make_list(context, cls(context), objects.Aggregate, db_aggregates) @base.remotable_classmethod def get_by_metadata_key(cls, context, key, hosts=None): db_aggregates = _get_by_metadata_from_db(context, key=key) if hosts is not None: db_aggregates = cls._filter_db_aggregates(db_aggregates, hosts) return base.obj_make_list(context, cls(context), objects.Aggregate, db_aggregates) @base.remotable_classmethod def get_by_metadata(cls, context, key=None, value=None): """Return aggregates with a metadata key set to value. This returns a list of all aggregates that have a metadata key set to some value. If key is specified, then only values for that key will qualify. """ db_aggregates = _get_by_metadata_from_db(context, key=key, value=value) return base.obj_make_list(context, cls(context), objects.Aggregate, db_aggregates) @classmethod def get_non_matching_by_metadata_keys(cls, context, ignored_keys, key_prefix, value): """Return aggregates that are not matching with metadata. For example, we have aggregates with metadata as below: 'agg1' with trait:HW_CPU_X86_MMX="required" 'agg2' with trait:HW_CPU_X86_SGX="required" 'agg3' with trait:HW_CPU_X86_MMX="required" 'agg3' with trait:HW_CPU_X86_SGX="required" Assume below request: aggregate_obj.AggregateList.get_non_matching_by_metadata_keys( self.context, ['trait:HW_CPU_X86_MMX'], 'trait:', value='required') It will return 'agg2' and 'agg3' as aggregates that are not matching with metadata. :param context: The security context :param ignored_keys: List of keys to match with the aggregate metadata keys that starts with key_prefix. :param key_prefix: Only compares metadata keys that starts with the key_prefix :param value: Value of metadata :returns: List of aggregates that doesn't match metadata keys that starts with key_prefix with the supplied keys. """ db_aggregates = _get_non_matching_by_metadata_keys_from_db( context, ignored_keys, key_prefix, value) return base.obj_make_list(context, objects.AggregateList(context), objects.Aggregate, db_aggregates) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/objects/base.py0000664000175000017500000004105400000000000016446 0ustar00zuulzuul00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Nova common internal object model""" import contextlib import datetime import functools import traceback import netaddr from oslo_log import log as logging import oslo_messaging as messaging from oslo_utils import versionutils from oslo_versionedobjects import base as ovoo_base from oslo_versionedobjects import exception as ovoo_exc from nova import exception from nova import objects from nova.objects import fields as obj_fields from nova import utils LOG = logging.getLogger(__name__) def all_things_equal(obj_a, obj_b): if obj_b is None: return False for name in obj_a.fields: set_a = name in obj_a set_b = name in obj_b if set_a != set_b: return False elif not set_a: continue if getattr(obj_a, name) != getattr(obj_b, name): return False return True def get_attrname(name): """Return the mangled name of the attribute's underlying storage.""" # FIXME(danms): This is just until we use o.vo's class properties # and object base. return '_obj_' + name def raise_on_too_new_values(version, primitive, field, new_values): value = primitive.get(field, None) if value in new_values: raise exception.ObjectActionError( action='obj_make_compatible', reason='%s=%s not supported in version %s' % (field, value, version)) class NovaObjectRegistry(ovoo_base.VersionedObjectRegistry): notification_classes = [] def registration_hook(self, cls, index): # NOTE(danms): This is called when an object is registered, # and is responsible for maintaining nova.objects.$OBJECT # as the highest-versioned implementation of a given object. version = versionutils.convert_version_to_tuple(cls.VERSION) if not hasattr(objects, cls.obj_name()): setattr(objects, cls.obj_name(), cls) else: cur_version = versionutils.convert_version_to_tuple( getattr(objects, cls.obj_name()).VERSION) if version >= cur_version: setattr(objects, cls.obj_name(), cls) @classmethod def register_notification(cls, notification_cls): """Register a class as notification. Use only to register concrete notification or payload classes, do not register base classes intended for inheritance only. """ cls.register_if(False)(notification_cls) cls.notification_classes.append(notification_cls) return notification_cls @classmethod def register_notification_objects(cls): """Register previously decorated notification as normal ovos. This is not intended for production use but only for testing and document generation purposes. """ for notification_cls in cls.notification_classes: cls.register(notification_cls) remotable_classmethod = ovoo_base.remotable_classmethod remotable = ovoo_base.remotable obj_make_list = ovoo_base.obj_make_list NovaObjectDictCompat = ovoo_base.VersionedObjectDictCompat NovaTimestampObject = ovoo_base.TimestampedObject def object_id(obj): """Try to get a stable identifier for an object""" if 'uuid' in obj: ident = obj.uuid elif 'id' in obj: ident = obj.id else: ident = 'anonymous' return '%s<%s>' % (obj.obj_name(), ident) def lazy_load_counter(fn): """Increment lazy-load counter and warn if over threshold""" @functools.wraps(fn) def wrapper(self, attrname): try: return fn(self, attrname) finally: if self._lazy_loads is None: self._lazy_loads = [] self._lazy_loads.append(attrname) if len(self._lazy_loads) > 1: LOG.debug('Object %s lazy-loaded attributes: %s', object_id(self), ','.join(self._lazy_loads)) return wrapper class NovaObject(ovoo_base.VersionedObject): """Base class and object factory. This forms the base of all objects that can be remoted or instantiated via RPC. Simply defining a class that inherits from this base class will make it remotely instantiatable. Objects should implement the necessary "get" classmethod routines as well as "save" object methods as appropriate. """ OBJ_SERIAL_NAMESPACE = 'nova_object' OBJ_PROJECT_NAMESPACE = 'nova' # Keep a running tally of how many times we've lazy-loaded on this object # so we can warn if it happens too often. This is not serialized or part # of the object that goes over the wire, so it is limited to a single # service, which is fine for what we need. _lazy_loads = None # NOTE(ndipanov): This is nova-specific @staticmethod def should_migrate_data(): """A check that can be used to inhibit online migration behavior This is usually used to check if all services that will be accessing the db directly are ready for the new format. """ raise NotImplementedError() # NOTE(danms): This is nova-specific @contextlib.contextmanager def obj_alternate_context(self, context): original_context = self._context self._context = context try: yield finally: self._context = original_context class NovaPersistentObject(object): """Mixin class for Persistent objects. This adds the fields that we use in common for most persistent objects. """ fields = { 'created_at': obj_fields.DateTimeField(nullable=True), 'updated_at': obj_fields.DateTimeField(nullable=True), 'deleted_at': obj_fields.DateTimeField(nullable=True), 'deleted': obj_fields.BooleanField(default=False), } # NOTE(danms): This is copied from oslo.versionedobjects ahead of # a release. Do not use it directly or modify it. # TODO(danms): Remove this when we can get it from oslo.versionedobjects class EphemeralObject(object): """Mix-in to provide more recognizable field defaulting. If an object should have all fields with a default= set to those values during instantiation, inherit from this class. The base VersionedObject class is designed in such a way that all fields are optional, which makes sense when representing a remote database row where not all columns are transported across RPC and not all columns should be set during an update operation. This is why fields with default= are not set implicitly during object instantiation, to avoid clobbering existing fields in the database. However, objects based on VersionedObject are also used to represent all-or-nothing blobs stored in the database, or even used purely in RPC to represent things that are not ever stored in the database. Thus, this mix-in is provided for these latter object use cases where the desired behavior is to always have default= fields be set at __init__ time. """ def __init__(self, *args, **kwargs): super(EphemeralObject, self).__init__(*args, **kwargs) # Not specifying any fields causes all defaulted fields to be set self.obj_set_defaults() class NovaEphemeralObject(EphemeralObject, NovaObject): """Base class for objects that are not row-column in the DB. Objects that are used purely over RPC (i.e. not persisted) or are written to the database in blob form or otherwise do not represent rows directly as fields should inherit from this object. The principal difference is that fields with a default value will be set at __init__ time instead of requiring manual intervention. """ pass class ObjectListBase(ovoo_base.ObjectListBase): # NOTE(danms): These are for transition to using the oslo # base object and can be removed when we move to it. @classmethod def _obj_primitive_key(cls, field): return 'nova_object.%s' % field @classmethod def _obj_primitive_field(cls, primitive, field, default=obj_fields.UnspecifiedDefault): key = cls._obj_primitive_key(field) if default == obj_fields.UnspecifiedDefault: return primitive[key] else: return primitive.get(key, default) class NovaObjectSerializer(messaging.NoOpSerializer): """A NovaObject-aware Serializer. This implements the Oslo Serializer interface and provides the ability to serialize and deserialize NovaObject entities. Any service that needs to accept or return NovaObjects as arguments or result values should pass this to its RPCClient and RPCServer objects. """ @property def conductor(self): if not hasattr(self, '_conductor'): from nova import conductor self._conductor = conductor.API() return self._conductor def _process_object(self, context, objprim): try: objinst = NovaObject.obj_from_primitive(objprim, context=context) except ovoo_exc.IncompatibleObjectVersion: objver = objprim['nova_object.version'] if objver.count('.') == 2: # NOTE(danms): For our purposes, the .z part of the version # should be safe to accept without requiring a backport objprim['nova_object.version'] = \ '.'.join(objver.split('.')[:2]) return self._process_object(context, objprim) objname = objprim['nova_object.name'] version_manifest = ovoo_base.obj_tree_get_versions(objname) if objname in version_manifest: objinst = self.conductor.object_backport_versions( context, objprim, version_manifest) else: raise return objinst def _process_iterable(self, context, action_fn, values): """Process an iterable, taking an action on each value. :param:context: Request context :param:action_fn: Action to take on each item in values :param:values: Iterable container of things to take action on :returns: A new container of the same type (except set) with items from values having had action applied. """ iterable = values.__class__ if issubclass(iterable, dict): return iterable(**{k: action_fn(context, v) for k, v in values.items()}) else: # NOTE(danms, gibi) A set can't have an unhashable value inside, # such as a dict. Convert the set to list, which is fine, since we # can't send them over RPC anyway. We convert it to list as this # way there will be no semantic change between the fake rpc driver # used in functional test and a normal rpc driver. if iterable == set: iterable = list return iterable([action_fn(context, value) for value in values]) def serialize_entity(self, context, entity): if isinstance(entity, (tuple, list, set, dict)): entity = self._process_iterable(context, self.serialize_entity, entity) elif (hasattr(entity, 'obj_to_primitive') and callable(entity.obj_to_primitive)): entity = entity.obj_to_primitive() return entity def deserialize_entity(self, context, entity): if isinstance(entity, dict) and 'nova_object.name' in entity: entity = self._process_object(context, entity) elif isinstance(entity, (tuple, list, set, dict)): entity = self._process_iterable(context, self.deserialize_entity, entity) return entity def obj_to_primitive(obj): """Recursively turn an object into a python primitive. A NovaObject becomes a dict, and anything that implements ObjectListBase becomes a list. """ if isinstance(obj, ObjectListBase): return [obj_to_primitive(x) for x in obj] elif isinstance(obj, NovaObject): result = {} for key in obj.obj_fields: if obj.obj_attr_is_set(key) or key in obj.obj_extra_fields: result[key] = obj_to_primitive(getattr(obj, key)) return result elif isinstance(obj, netaddr.IPAddress): return str(obj) elif isinstance(obj, netaddr.IPNetwork): return str(obj) else: return obj def obj_make_dict_of_lists(context, list_cls, obj_list, item_key): """Construct a dictionary of object lists, keyed by item_key. :param:context: Request context :param:list_cls: The ObjectListBase class :param:obj_list: The list of objects to place in the dictionary :param:item_key: The object attribute name to use as a dictionary key """ obj_lists = {} for obj in obj_list: key = getattr(obj, item_key) if key not in obj_lists: obj_lists[key] = list_cls() obj_lists[key].objects = [] obj_lists[key].objects.append(obj) for key in obj_lists: obj_lists[key]._context = context obj_lists[key].obj_reset_changes() return obj_lists def serialize_args(fn): """Decorator that will do the arguments serialization before remoting.""" def wrapper(obj, *args, **kwargs): args = [utils.strtime(arg) if isinstance(arg, datetime.datetime) else arg for arg in args] for k, v in kwargs.items(): if k == 'exc_val' and v: try: # NOTE(danms): When we run this for a remotable method, # we need to attempt to format_message() the exception to # get the sanitized message, and if it's not a # NovaException, fall back to just the exception class # name. However, a remotable will end up calling this again # on the other side of the RPC call, so we must not try # to do that again, otherwise we will always end up with # just str. So, only do that if exc_val is an Exception # class. kwargs[k] = (v.format_message() if isinstance(v, Exception) else v) except Exception: kwargs[k] = v.__class__.__name__ elif k == 'exc_tb' and v and not isinstance(v, str): kwargs[k] = ''.join(traceback.format_tb(v)) elif isinstance(v, datetime.datetime): kwargs[k] = utils.strtime(v) if hasattr(fn, '__call__'): return fn(obj, *args, **kwargs) # NOTE(danms): We wrap a descriptor, so use that protocol return fn.__get__(None, obj)(*args, **kwargs) # NOTE(danms): Make this discoverable wrapper.remotable = getattr(fn, 'remotable', False) wrapper.original_fn = fn return (functools.wraps(fn)(wrapper) if hasattr(fn, '__call__') else classmethod(wrapper)) def obj_equal_prims(obj_1, obj_2, ignore=None): """Compare two primitives for equivalence ignoring some keys. This operation tests the primitives of two objects for equivalence. Object primitives may contain a list identifying fields that have been changed - this is ignored in the comparison. The ignore parameter lists any other keys to be ignored. :param:obj1: The first object in the comparison :param:obj2: The second object in the comparison :param:ignore: A list of fields to ignore :returns: True if the primitives are equal ignoring changes and specified fields, otherwise False. """ def _strip(prim, keys): if isinstance(prim, dict): for k in keys: prim.pop(k, None) for v in prim.values(): _strip(v, keys) if isinstance(prim, list): for v in prim: _strip(v, keys) return prim if ignore is not None: keys = ['nova_object.changes'] + ignore else: keys = ['nova_object.changes'] prim_1 = _strip(obj_1.obj_to_primitive(), keys) prim_2 = _strip(obj_2.obj_to_primitive(), keys) return prim_1 == prim_2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/objects/block_device.py0000664000175000017500000004655500000000000020160 0ustar00zuulzuul00000000000000# Copyright 2013 Red Hat Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db import api as oslo_db_api from oslo_db.sqlalchemy import update_match from oslo_log import log as logging from oslo_utils import uuidutils from oslo_utils import versionutils from nova import block_device from nova.db.main import api as db from nova.db.main import models as db_models from nova import exception from nova.i18n import _ from nova import objects from nova.objects import base from nova.objects import fields LOG = logging.getLogger(__name__) _BLOCK_DEVICE_OPTIONAL_JOINED_FIELD = ['instance'] BLOCK_DEVICE_OPTIONAL_ATTRS = _BLOCK_DEVICE_OPTIONAL_JOINED_FIELD def _expected_cols(expected_attrs): return [attr for attr in expected_attrs if attr in _BLOCK_DEVICE_OPTIONAL_JOINED_FIELD] # TODO(berrange): Remove NovaObjectDictCompat @base.NovaObjectRegistry.register class BlockDeviceMapping(base.NovaPersistentObject, base.NovaObject, base.NovaObjectDictCompat): # Version 1.0: Initial version # Version 1.1: Add instance_uuid to get_by_volume_id method # Version 1.2: Instance version 1.14 # Version 1.3: Instance version 1.15 # Version 1.4: Instance version 1.16 # Version 1.5: Instance version 1.17 # Version 1.6: Instance version 1.18 # Version 1.7: Add update_or_create method # Version 1.8: Instance version 1.19 # Version 1.9: Instance version 1.20 # Version 1.10: Changed source_type field to BlockDeviceSourceTypeField. # Version 1.11: Changed destination_type field to # BlockDeviceDestinationTypeField. # Version 1.12: Changed device_type field to BlockDeviceTypeField. # Version 1.13: Instance version 1.21 # Version 1.14: Instance version 1.22 # Version 1.15: Instance version 1.23 # Version 1.16: Deprecate get_by_volume_id(), add # get_by_volume() and get_by_volume_and_instance() # Version 1.17: Added tag field # Version 1.18: Added attachment_id # Version 1.19: Added uuid # Version 1.20: Added volume_type # Version 1.21: Added encrypted, encryption_secret_uuid, encryption_format # and encryption_options VERSION = '1.21' fields = { 'id': fields.IntegerField(), 'uuid': fields.UUIDField(), 'instance_uuid': fields.UUIDField(), 'instance': fields.ObjectField('Instance', nullable=True), 'source_type': fields.BlockDeviceSourceTypeField(nullable=True), 'destination_type': fields.BlockDeviceDestinationTypeField( nullable=True), 'guest_format': fields.StringField(nullable=True), 'device_type': fields.BlockDeviceTypeField(nullable=True), 'disk_bus': fields.StringField(nullable=True), 'boot_index': fields.IntegerField(nullable=True), 'device_name': fields.StringField(nullable=True), 'delete_on_termination': fields.BooleanField(default=False), 'snapshot_id': fields.StringField(nullable=True), 'volume_id': fields.StringField(nullable=True), 'volume_size': fields.IntegerField(nullable=True), 'image_id': fields.StringField(nullable=True), 'no_device': fields.BooleanField(default=False), 'connection_info': fields.SensitiveStringField(nullable=True), 'tag': fields.StringField(nullable=True), 'attachment_id': fields.UUIDField(nullable=True), # volume_type field can be a volume type name or ID(UUID). 'volume_type': fields.StringField(nullable=True), 'encrypted': fields.BooleanField(default=False), 'encryption_secret_uuid': fields.UUIDField(nullable=True), 'encryption_format': fields.BlockDeviceEncryptionFormatTypeField( nullable=True), 'encryption_options': fields.StringField(nullable=True), } def obj_make_compatible(self, primitive, target_version): target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 21): primitive.pop('encrypted', None) primitive.pop('encryption_secret_uuid', None) primitive.pop('encryption_format', None) primitive.pop('encryption_options', None) if target_version < (1, 20) and 'volume_type' in primitive: del primitive['volume_type'] if target_version < (1, 19) and 'uuid' in primitive: del primitive['uuid'] if target_version < (1, 18) and 'attachment_id' in primitive: del primitive['attachment_id'] if target_version < (1, 17) and 'tag' in primitive: del primitive['tag'] @classmethod def populate_uuids(cls, context, max_count): @db.pick_context_manager_reader def get_bdms_no_uuid(context): return context.session.query(db_models.BlockDeviceMapping).\ filter_by(uuid=None).limit(max_count).all() db_bdms = get_bdms_no_uuid(context) done = 0 for db_bdm in db_bdms: cls._create_uuid(context, db_bdm['id']) done += 1 return done, done @staticmethod @oslo_db_api.wrap_db_retry(max_retries=1, retry_on_deadlock=True) def _create_uuid(context, bdm_id): # NOTE(mdbooth): This method is only required until uuid is made # non-nullable in a future release. # NOTE(mdbooth): We wrap this method in a retry loop because it can # fail (safely) on multi-master galera if concurrent updates happen on # different masters. It will never fail on single-master. We can only # ever need one retry. uuid = uuidutils.generate_uuid() values = {'uuid': uuid} compare = db_models.BlockDeviceMapping(id=bdm_id, uuid=None) # NOTE(mdbooth): We explicitly use an independent transaction context # here so as not to fail if: # 1. We retry. # 2. We're in a read transaction.This is an edge case of what's # normally a read operation. Forcing everything (transitively) # which reads a BDM to be in a write transaction for a narrow # temporary edge case is undesirable. tctxt = db.get_context_manager(context).writer.independent with tctxt.using(context): query = context.session.query(db_models.BlockDeviceMapping).\ filter_by(id=bdm_id) try: query.update_on_match(compare, 'id', values) except update_match.NoRowsMatched: # We can only get here if we raced, and another writer already # gave this bdm a uuid result = query.one() uuid = result['uuid'] assert uuid is not None return uuid @classmethod def _from_db_object(cls, context, block_device_obj, db_block_device, expected_attrs=None): if expected_attrs is None: expected_attrs = [] for key in block_device_obj.fields: if key in BLOCK_DEVICE_OPTIONAL_ATTRS: continue if key == 'uuid' and not db_block_device.get(key): # NOTE(danms): While the records could be nullable, # generate a UUID on read since the object requires it bdm_id = db_block_device['id'] db_block_device[key] = cls._create_uuid(context, bdm_id) block_device_obj[key] = db_block_device[key] if 'instance' in expected_attrs: my_inst = objects.Instance(context) my_inst._from_db_object(context, my_inst, db_block_device['instance']) block_device_obj.instance = my_inst block_device_obj._context = context block_device_obj.obj_reset_changes() return block_device_obj def _create(self, context, update_or_create=False): """Create the block device record in the database. In case the id field is set on the object, and if the instance is set raise an ObjectActionError. Resets all the changes on the object. Returns None :param context: security context used for database calls :param update_or_create: consider existing block devices for the instance based on the device name and swap, and only update the ones that match. Normally only used when creating the instance for the first time. """ if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason='already created') updates = self.obj_get_changes() if 'instance' in updates: raise exception.ObjectActionError(action='create', reason='instance assigned') if update_or_create: db_bdm = db.block_device_mapping_update_or_create( context, updates, legacy=False) else: db_bdm = db.block_device_mapping_create( context, updates, legacy=False) self._from_db_object(context, self, db_bdm) @base.remotable def create(self): self._create(self._context) @base.remotable def update_or_create(self): self._create(self._context, update_or_create=True) @base.remotable def destroy(self): if not self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='destroy', reason='already destroyed') db.block_device_mapping_destroy(self._context, self.id) delattr(self, base.get_attrname('id')) @base.remotable def save(self): updates = self.obj_get_changes() if 'instance' in updates: raise exception.ObjectActionError(action='save', reason='instance changed') updates.pop('id', None) updated = db.block_device_mapping_update(self._context, self.id, updates, legacy=False) if not updated: raise exception.BDMNotFound(id=self.id) self._from_db_object(self._context, self, updated) # NOTE(danms): This method is deprecated and will be removed in # v2.0 of the object @base.remotable_classmethod def get_by_volume_id(cls, context, volume_id, instance_uuid=None, expected_attrs=None): if expected_attrs is None: expected_attrs = [] db_bdms = db.block_device_mapping_get_all_by_volume_id( context, volume_id, _expected_cols(expected_attrs)) if not db_bdms: raise exception.VolumeBDMNotFound(volume_id=volume_id) if len(db_bdms) > 1: LOG.warning('Legacy get_by_volume_id() call found multiple ' 'BDMs for volume %(volume)s', {'volume': volume_id}) db_bdm = db_bdms[0] # NOTE (ndipanov): Move this to the db layer into a # get_by_instance_and_volume_id method if instance_uuid and instance_uuid != db_bdm['instance_uuid']: raise exception.InvalidVolume( reason=_("Volume does not belong to the " "requested instance.")) return cls._from_db_object(context, cls(), db_bdm, expected_attrs=expected_attrs) @base.remotable_classmethod def get_by_volume_and_instance(cls, context, volume_id, instance_uuid, expected_attrs=None): if expected_attrs is None: expected_attrs = [] db_bdm = db.block_device_mapping_get_by_instance_and_volume_id( context, volume_id, instance_uuid, _expected_cols(expected_attrs)) if not db_bdm: raise exception.VolumeBDMNotFound(volume_id=volume_id) return cls._from_db_object(context, cls(), db_bdm, expected_attrs=expected_attrs) @base.remotable_classmethod def get_by_volume(cls, context, volume_id, expected_attrs=None): if expected_attrs is None: expected_attrs = [] db_bdms = db.block_device_mapping_get_all_by_volume_id( context, volume_id, _expected_cols(expected_attrs)) if not db_bdms: raise exception.VolumeBDMNotFound(volume_id=volume_id) if len(db_bdms) > 1: raise exception.VolumeBDMIsMultiAttach(volume_id=volume_id) return cls._from_db_object(context, cls(), db_bdms[0], expected_attrs=expected_attrs) @property def is_root(self): return self.boot_index == 0 @property def is_volume(self): return (self.destination_type == fields.BlockDeviceDestinationType.VOLUME) @property def is_image(self): return self.source_type == fields.BlockDeviceSourceType.IMAGE @property def is_local(self): return (self.destination_type == fields.BlockDeviceDestinationType.LOCAL) def get_image_mapping(self): return block_device.BlockDeviceDict(self).get_image_mapping() @base.lazy_load_counter def obj_load_attr(self, attrname): if not self._context: raise exception.OrphanedObjectError(method='obj_load_attr', objtype=self.obj_name()) if attrname == 'encrypted': # We attempt to load this if we're creating a BDM object during an # attach volume request, for example. Use the default in that case. self.obj_set_defaults(attrname) elif attrname not in BLOCK_DEVICE_OPTIONAL_ATTRS: raise exception.ObjectActionError( action='obj_load_attr', reason='attribute %s not lazy-loadable' % attrname) else: LOG.debug( "Lazy-loading '%(attr)s' on %(name)s using uuid %(uuid)s", { 'attr': attrname, 'name': self.obj_name(), 'uuid': self.instance_uuid, } ) self.instance = objects.Instance.get_by_uuid(self._context, self.instance_uuid) self.obj_reset_changes(fields=['instance']) @base.NovaObjectRegistry.register class BlockDeviceMappingList(base.ObjectListBase, base.NovaObject): # Version 1.0: Initial version # Version 1.1: BlockDeviceMapping <= version 1.1 # Version 1.2: Added use_slave to get_by_instance_uuid # Version 1.3: BlockDeviceMapping <= version 1.2 # Version 1.4: BlockDeviceMapping <= version 1.3 # Version 1.5: BlockDeviceMapping <= version 1.4 # Version 1.6: BlockDeviceMapping <= version 1.5 # Version 1.7: BlockDeviceMapping <= version 1.6 # Version 1.8: BlockDeviceMapping <= version 1.7 # Version 1.9: BlockDeviceMapping <= version 1.8 # Version 1.10: BlockDeviceMapping <= version 1.9 # Version 1.11: BlockDeviceMapping <= version 1.10 # Version 1.12: BlockDeviceMapping <= version 1.11 # Version 1.13: BlockDeviceMapping <= version 1.12 # Version 1.14: BlockDeviceMapping <= version 1.13 # Version 1.15: BlockDeviceMapping <= version 1.14 # Version 1.16: BlockDeviceMapping <= version 1.15 # Version 1.17: Add get_by_instance_uuids() # Version 1.18: Add get_by_volume() VERSION = '1.18' fields = { 'objects': fields.ListOfObjectsField('BlockDeviceMapping'), } @property def instance_uuids(self): return set( bdm.instance_uuid for bdm in self if bdm.obj_attr_is_set('instance_uuid') ) @classmethod def bdms_by_instance_uuid(cls, context, instance_uuids): bdms = cls.get_by_instance_uuids(context, instance_uuids) return base.obj_make_dict_of_lists( context, cls, bdms, 'instance_uuid') @staticmethod @db.select_db_reader_mode def _db_block_device_mapping_get_all_by_instance_uuids( context, instance_uuids, use_slave=False): return db.block_device_mapping_get_all_by_instance_uuids( context, instance_uuids) @base.remotable_classmethod def get_by_instance_uuids(cls, context, instance_uuids, use_slave=False): db_bdms = cls._db_block_device_mapping_get_all_by_instance_uuids( context, instance_uuids, use_slave=use_slave) return base.obj_make_list( context, cls(), objects.BlockDeviceMapping, db_bdms or []) @staticmethod @db.select_db_reader_mode def _db_block_device_mapping_get_all_by_instance( context, instance_uuid, use_slave=False): return db.block_device_mapping_get_all_by_instance( context, instance_uuid) @base.remotable_classmethod def get_by_instance_uuid(cls, context, instance_uuid, use_slave=False): db_bdms = cls._db_block_device_mapping_get_all_by_instance( context, instance_uuid, use_slave=use_slave) return base.obj_make_list( context, cls(), objects.BlockDeviceMapping, db_bdms or []) @staticmethod @db.select_db_reader_mode def _db_block_device_mapping_get_all_by_volume( context, volume_id, use_slave=False): return db.block_device_mapping_get_all_by_volume_id( context, volume_id) @base.remotable_classmethod def get_by_volume(cls, context, volume_id, use_slave=False): db_bdms = cls._db_block_device_mapping_get_all_by_volume( context, volume_id, use_slave=use_slave) if not db_bdms: raise exception.VolumeBDMNotFound(volume_id=volume_id) return base.obj_make_list( context, cls(), objects.BlockDeviceMapping, db_bdms) def root_bdm(self): """It only makes sense to call this method when the BlockDeviceMappingList contains BlockDeviceMappings from exactly one instance rather than BlockDeviceMappings from multiple instances. For example, you should not call this method from a BlockDeviceMappingList created by get_by_instance_uuids(), but you may call this method from a BlockDeviceMappingList created by get_by_instance_uuid(). """ if len(self.instance_uuids) > 1: raise exception.UndefinedRootBDM() try: return next(bdm_obj for bdm_obj in self if bdm_obj.is_root) except StopIteration: return def block_device_make_list(context, db_list, **extra_args): return base.obj_make_list(context, objects.BlockDeviceMappingList(context), objects.BlockDeviceMapping, db_list, **extra_args) def block_device_make_list_from_dicts(context, bdm_dicts_list): bdm_objects = [objects.BlockDeviceMapping(context=context, **bdm) for bdm in bdm_dicts_list] return BlockDeviceMappingList(objects=bdm_objects) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/objects/build_request.py0000664000175000017500000004725300000000000020412 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import re from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import versionutils from oslo_versionedobjects import exception as ovoo_exc from nova.db.api import api as api_db_api from nova.db.api import models as api_models from nova.db import utils as db_utils from nova import exception from nova import objects from nova.objects import base from nova.objects import fields LOG = logging.getLogger(__name__) @base.NovaObjectRegistry.register class BuildRequest(base.NovaObject): # Version 1.0: Initial version # Version 1.1: Added block_device_mappings # Version 1.2: Added save() method # Version 1.3: Added tags VERSION = '1.3' fields = { 'id': fields.IntegerField(), 'instance_uuid': fields.UUIDField(), 'project_id': fields.StringField(), 'instance': fields.ObjectField('Instance'), 'block_device_mappings': fields.ObjectField('BlockDeviceMappingList'), # NOTE(alaski): Normally these would come from the NovaPersistentObject # mixin but they're being set explicitly because we only need # created_at/updated_at. There is no soft delete for this object. 'created_at': fields.DateTimeField(nullable=True), 'updated_at': fields.DateTimeField(nullable=True), 'tags': fields.ObjectField('TagList'), } def obj_make_compatible(self, primitive, target_version): super(BuildRequest, self).obj_make_compatible(primitive, target_version) target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 1) and 'block_device_mappings' in primitive: del primitive['block_device_mappings'] elif target_version < (1, 3) and 'tags' in primitive: del primitive['tags'] def _load_instance(self, db_instance): # NOTE(alaski): Be very careful with instance loading because it # changes more than most objects. try: self.instance = objects.Instance.obj_from_primitive( jsonutils.loads(db_instance)) except TypeError: LOG.debug('Failed to load instance from BuildRequest with uuid ' '%s because it is None', self.instance_uuid) raise exception.BuildRequestNotFound(uuid=self.instance_uuid) except ovoo_exc.IncompatibleObjectVersion: # This should only happen if proper service upgrade strategies are # not followed. Log the exception and raise BuildRequestNotFound. # If the instance can't be loaded this object is useless and may # as well not exist. LOG.debug('Could not deserialize instance store in BuildRequest ' 'with uuid %(instance_uuid)s. Found version %(version)s ' 'which is not supported here.', dict(instance_uuid=self.instance_uuid, version=jsonutils.loads( db_instance)["nova_object.version"])) LOG.exception('Could not deserialize instance in BuildRequest') raise exception.BuildRequestNotFound(uuid=self.instance_uuid) # NOTE(sbauza): The instance primitive should already have the deleted # field being set, so when hydrating it back here, we should get the # right value but in case we don't have it, let's suppose that the # instance is not deleted, which is the default value for that field. # NOTE(mriedem): Same for the "hidden" field. self.instance.obj_set_defaults('deleted', 'hidden') # NOTE(alaski): Set some fields on instance that are needed by the api, # not lazy-loadable, and don't change. self.instance.disable_terminate = False self.instance.terminated_at = None self.instance.host = None self.instance.node = None self.instance.compute_id = None self.instance.launched_at = None self.instance.launched_on = None self.instance.cell_name = None # The fields above are not set until the instance is in a cell at # which point this BuildRequest will be gone. locked_by could # potentially be set by an update so it should not be overwritten. if not self.instance.obj_attr_is_set('locked_by'): self.instance.locked_by = None # created_at/updated_at are not on the serialized instance because it # was never persisted. self.instance.created_at = self.created_at self.instance.updated_at = self.updated_at self.instance.tags = self.tags def _load_block_device_mappings(self, db_bdms): # 'db_bdms' is a serialized BlockDeviceMappingList object. If it's None # we're in a mixed version nova-api scenario and can't retrieve the # actual list. Set it to an empty list here which will cause a # temporary API inconsistency that will be resolved as soon as the # instance is scheduled and on a compute. if db_bdms is None: LOG.debug('Failed to load block_device_mappings from BuildRequest ' 'for instance %s because it is None', self.instance_uuid) self.block_device_mappings = objects.BlockDeviceMappingList() return self.block_device_mappings = ( objects.BlockDeviceMappingList.obj_from_primitive( jsonutils.loads(db_bdms))) def _load_tags(self, db_tags): # 'db_tags' is a serialized TagList object. If it's None # we're in a mixed version nova-api scenario and can't retrieve the # actual list. Set it to an empty list here which will cause a # temporary API inconsistency that will be resolved as soon as the # instance is scheduled and on a compute. if db_tags is None: LOG.debug('Failed to load tags from BuildRequest ' 'for instance %s because it is None', self.instance_uuid) self.tags = objects.TagList() return self.tags = ( objects.TagList.obj_from_primitive( jsonutils.loads(db_tags))) @staticmethod def _from_db_object(context, req, db_req): # Set this up front so that it can be pulled for error messages or # logging at any point. req.instance_uuid = db_req['instance_uuid'] for key in req.fields: if key == 'instance': continue elif isinstance(req.fields[key], fields.ObjectField): try: getattr(req, '_load_%s' % key)(db_req[key]) except AttributeError: LOG.exception('No load handler for %s', key) else: setattr(req, key, db_req[key]) # Load instance last because other fields on req may be referenced req._load_instance(db_req['instance']) req.obj_reset_changes(recursive=True) req._context = context return req @staticmethod @api_db_api.context_manager.reader def _get_by_instance_uuid_from_db(context, instance_uuid): db_req = context.session.query(api_models.BuildRequest).filter_by( instance_uuid=instance_uuid).first() if not db_req: raise exception.BuildRequestNotFound(uuid=instance_uuid) return db_req @base.remotable_classmethod def get_by_instance_uuid(cls, context, instance_uuid): db_req = cls._get_by_instance_uuid_from_db(context, instance_uuid) return cls._from_db_object(context, cls(), db_req) @staticmethod @api_db_api.context_manager.writer def _create_in_db(context, updates): db_req = api_models.BuildRequest() db_req.update(updates) db_req.save(context.session) return db_req def _get_update_primitives(self): updates = self.obj_get_changes() for key, value in updates.items(): if isinstance(self.fields[key], fields.ObjectField): updates[key] = jsonutils.dumps(value.obj_to_primitive()) return updates @base.remotable def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason='already created') if not self.obj_attr_is_set('instance_uuid'): # We can't guarantee this is not null in the db so check here raise exception.ObjectActionError(action='create', reason='instance_uuid must be set') updates = self._get_update_primitives() db_req = self._create_in_db(self._context, updates) self._from_db_object(self._context, self, db_req) @staticmethod @api_db_api.context_manager.writer def _destroy_in_db(context, instance_uuid): result = context.session.query(api_models.BuildRequest).filter_by( instance_uuid=instance_uuid).delete() if not result: raise exception.BuildRequestNotFound(uuid=instance_uuid) @base.remotable def destroy(self): self._destroy_in_db(self._context, self.instance_uuid) @api_db_api.context_manager.writer def _save_in_db(self, context, req_id, updates): db_req = context.session.query( api_models.BuildRequest).filter_by(id=req_id).first() if not db_req: raise exception.BuildRequestNotFound(uuid=self.instance_uuid) db_req.update(updates) context.session.add(db_req) return db_req @base.remotable def save(self): updates = self._get_update_primitives() db_req = self._save_in_db(self._context, self.id, updates) self._from_db_object(self._context, self, db_req) def get_new_instance(self, context): # NOTE(danms): This is a hack to make sure that the returned # instance has all dirty fields. There are probably better # ways to do this, but they kinda involve o.vo internals # so this is okay for the moment. instance = objects.Instance(context) for field in self.instance.obj_fields: # NOTE(danms): Don't copy the defaulted tags field # as instance.create() won't handle it properly. # TODO(zhengzhenyu): Handle this when the API supports creating # servers with tags. if field == 'tags': continue if self.instance.obj_attr_is_set(field): setattr(instance, field, getattr(self.instance, field)) return instance @base.NovaObjectRegistry.register class BuildRequestList(base.ObjectListBase, base.NovaObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'objects': fields.ListOfObjectsField('BuildRequest'), } @staticmethod @api_db_api.context_manager.reader def _get_all_from_db(context): query = context.session.query(api_models.BuildRequest) if not context.is_admin: query = query.filter_by(project_id=context.project_id) db_reqs = query.all() return db_reqs @base.remotable_classmethod def get_all(cls, context): db_build_reqs = cls._get_all_from_db(context) return base.obj_make_list(context, cls(context), objects.BuildRequest, db_build_reqs) @staticmethod def _pass_exact_filters(instance, filters): for filter_key, filter_val in filters.items(): if filter_key in ('metadata', 'system_metadata'): if isinstance(filter_val, list): for item in filter_val: for k, v in item.items(): if (k not in instance.metadata or v != instance.metadata[k]): return False else: for k, v in filter_val.items(): if (k not in instance.metadata or v != instance.metadata[k]): return False elif filter_key in ( 'tags', 'tags-any', 'not-tags', 'not-tags-any'): # Get the list of simple string tags first. tags = ([tag.tag for tag in instance.tags] if instance.tags else []) if filter_key == 'tags': for item in filter_val: if item not in tags: return False elif filter_key == 'tags-any': found = [] for item in filter_val: if item in tags: found.append(item) if not found: return False elif filter_key == 'not-tags': found = [] for item in filter_val: if item in tags: found.append(item) if len(found) == len(filter_val): return False elif filter_key == 'not-tags-any': for item in filter_val: if item in tags: return False elif isinstance(filter_val, (list, tuple, set, frozenset)): if not filter_val: # Special value to indicate that nothing will match. return None if instance.get(filter_key, None) not in filter_val: return False else: if instance.get(filter_key, None) != filter_val: return False return True @staticmethod def _pass_regex_filters(instance, filters): for filter_name, filter_val in filters.items(): try: instance_attr = getattr(instance, filter_name) except AttributeError: continue # Sometimes the REGEX filter value is not a string if not isinstance(filter_val, str): filter_val = str(filter_val) filter_re = re.compile(filter_val) if instance_attr and not filter_re.search(str(instance_attr)): return False return True @staticmethod def _sort_build_requests(build_req_list, sort_keys, sort_dirs): # build_req_list is a [] of build_reqs sort_keys.reverse() sort_dirs.reverse() def sort_attr(sort_key, build_req): if sort_key == 'id': # 'id' is not set on the instance yet. Use the BuildRequest # 'id' instead. return build_req.id return getattr(build_req.instance, sort_key) for sort_key, sort_dir in zip(sort_keys, sort_dirs): reverse = False if sort_dir.lower().startswith('asc') else True build_req_list.sort(key=functools.partial(sort_attr, sort_key), reverse=reverse) return build_req_list @base.remotable_classmethod def get_by_filters(cls, context, filters, limit=None, marker=None, sort_keys=None, sort_dirs=None): # Short-circuit on anything that will not yield results. # 'deleted' records can not be returned from here since build_requests # are not soft deleted. # 'cleaned' records won't exist as they would need to be deleted. if (limit == 0 or filters.get('deleted', False) or filters.get('cleaned', False)): # If we have a marker honor the MarkerNotFound semantics. if marker: raise exception.MarkerNotFound(marker=marker) return cls(context, objects=[]) # Because the build_requests table stores an instance as a serialized # versioned object it is not feasible to do the filtering and sorting # in the database. Just get all potentially relevant records and # process them here. It should be noted that build requests are short # lived so there should not be a lot of results to deal with. build_requests = cls.get_all(context) # Fortunately some filters do not apply here. # 'changes-since' works off of the updated_at field which has not yet # been set at the point in the boot process where build_request still # exists. So it can be ignored. # 'deleted' and 'cleaned' are handled above. sort_keys, sort_dirs = db_utils.process_sort_params( sort_keys, sort_dirs, default_dir='desc') # For other filters that don't match this, we will do regexp matching # Taken from db/sqlalchemy/api.py exact_match_filter_names = ['project_id', 'user_id', 'image_ref', 'vm_state', 'instance_type_id', 'uuid', 'metadata', 'host', 'task_state', 'system_metadata', 'tags', 'tags-any', 'not-tags', 'not-tags-any'] exact_filters = {} regex_filters = {} for key, value in filters.items(): if key in exact_match_filter_names: exact_filters[key] = value else: regex_filters[key] = value # As much as possible this copies the logic from db/sqlalchemy/api.py # instance_get_all_by_filters_sort. The main difference is that method # builds a sql query and this filters in python. filtered_build_reqs = [] for build_req in build_requests: instance = build_req.instance filter_result = cls._pass_exact_filters(instance, exact_filters) if filter_result is None: # The filter condition is such that nothing will match. # Bail early. return cls(context, objects=[]) if filter_result is False: continue if not cls._pass_regex_filters(instance, regex_filters): continue filtered_build_reqs.append(build_req) if (((len(filtered_build_reqs) < 2) or (not sort_keys)) and not marker): # No need to sort return cls(context, objects=filtered_build_reqs) sorted_build_reqs = cls._sort_build_requests(filtered_build_reqs, sort_keys, sort_dirs) marker_index = 0 if marker: for i, build_req in enumerate(sorted_build_reqs): if build_req.instance.uuid == marker: # The marker is the last seen item in the last page, so # we increment the index to the next item immediately # after the marker so the marker is not returned. marker_index = i + 1 break else: raise exception.MarkerNotFound(marker=marker) len_build_reqs = len(sorted_build_reqs) limit_index = len_build_reqs if limit: limit_index = marker_index + limit if limit_index > len_build_reqs: limit_index = len_build_reqs return cls(context, objects=sorted_build_reqs[marker_index:limit_index]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/objects/cell_mapping.py0000664000175000017500000002404200000000000020164 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from urllib import parse as urlparse from oslo_log import log as logging from oslo_utils import versionutils from sqlalchemy import sql from sqlalchemy.sql import expression import nova.conf from nova.db.api import api as api_db_api from nova.db.api import models as api_db_models from nova import exception from nova.objects import base from nova.objects import fields CONF = nova.conf.CONF LOG = logging.getLogger(__name__) def _parse_netloc(netloc): """Parse a user:pass@host:port and return a dict suitable for formatting a cell mapping template. """ these = { 'username': None, 'password': None, 'hostname': None, 'port': None, } if '@' in netloc: userpass, hostport = netloc.split('@', 1) else: hostport = netloc userpass = '' if hostport.startswith('['): host_end = hostport.find(']') if host_end < 0: raise ValueError('Invalid IPv6 URL') these['hostname'] = hostport[1:host_end] these['port'] = hostport[host_end + 1:] elif ':' in hostport: these['hostname'], these['port'] = hostport.split(':', 1) else: these['hostname'] = hostport if ':' in userpass: these['username'], these['password'] = userpass.split(':', 1) else: these['username'] = userpass return these @base.NovaObjectRegistry.register class CellMapping(base.NovaTimestampObject, base.NovaObject): # Version 1.0: Initial version # Version 1.1: Added disabled field VERSION = '1.1' CELL0_UUID = '00000000-0000-0000-0000-000000000000' fields = { 'id': fields.IntegerField(read_only=True), 'uuid': fields.UUIDField(), 'name': fields.StringField(nullable=True), 'transport_url': fields.StringField(), 'database_connection': fields.StringField(), 'disabled': fields.BooleanField(default=False), } def obj_make_compatible(self, primitive, target_version): super(CellMapping, self).obj_make_compatible(primitive, target_version) target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 1): if 'disabled' in primitive: del primitive['disabled'] @property def identity(self): if 'name' in self and self.name: return '%s(%s)' % (self.uuid, self.name) else: return self.uuid @staticmethod def _format_url(url, default): default_url = urlparse.urlparse(default) subs = { 'username': default_url.username, 'password': default_url.password, 'hostname': default_url.hostname, 'port': default_url.port, 'scheme': default_url.scheme, 'query': default_url.query, 'fragment': default_url.fragment, 'path': default_url.path.lstrip('/'), } # NOTE(danms): oslo.messaging has an extended format for the URL # which we need to support: # scheme://user:pass@host:port[,user1:pass@host1:port, ...]/path # Encode these values, if they exist, as indexed keys like # username1, password1, hostname1, port1. if ',' in default_url.netloc: netlocs = default_url.netloc.split(',') index = 0 for netloc in netlocs: index += 1 these = _parse_netloc(netloc) for key in these: subs['%s%i' % (key, index)] = these[key] return url.format(**subs) @staticmethod def format_db_url(url): if CONF.database.connection is None: if '{' in url: LOG.error('Cell mapping database_connection is a template, ' 'but [database]/connection is not set') return url try: return CellMapping._format_url(url, CONF.database.connection) except Exception: LOG.exception('Failed to parse [database]/connection to ' 'format cell mapping') return url @staticmethod def format_mq_url(url): if CONF.transport_url is None: if '{' in url: LOG.error('Cell mapping transport_url is a template, but ' '[DEFAULT]/transport_url is not set') return url try: return CellMapping._format_url(url, CONF.transport_url) except Exception: LOG.exception('Failed to parse [DEFAULT]/transport_url to ' 'format cell mapping') return url @staticmethod def _from_db_object(context, cell_mapping, db_cell_mapping): for key in cell_mapping.fields: val = db_cell_mapping[key] if key == 'database_connection': val = cell_mapping.format_db_url(val) elif key == 'transport_url': val = cell_mapping.format_mq_url(val) setattr(cell_mapping, key, val) cell_mapping.obj_reset_changes() cell_mapping._context = context return cell_mapping @staticmethod @api_db_api.context_manager.reader def _get_by_uuid_from_db(context, uuid): db_mapping = context.session\ .query(api_db_models.CellMapping).filter_by(uuid=uuid).first() if not db_mapping: raise exception.CellMappingNotFound(uuid=uuid) return db_mapping @base.remotable_classmethod def get_by_uuid(cls, context, uuid): db_mapping = cls._get_by_uuid_from_db(context, uuid) return cls._from_db_object(context, cls(), db_mapping) @staticmethod @api_db_api.context_manager.writer def _create_in_db(context, updates): db_mapping = api_db_models.CellMapping() db_mapping.update(updates) db_mapping.save(context.session) return db_mapping @base.remotable def create(self): db_mapping = self._create_in_db(self._context, self.obj_get_changes()) self._from_db_object(self._context, self, db_mapping) @staticmethod @api_db_api.context_manager.writer def _save_in_db(context, uuid, updates): db_mapping = context.session.query( api_db_models.CellMapping).filter_by(uuid=uuid).first() if not db_mapping: raise exception.CellMappingNotFound(uuid=uuid) db_mapping.update(updates) context.session.add(db_mapping) return db_mapping @base.remotable def save(self): changes = self.obj_get_changes() db_mapping = self._save_in_db(self._context, self.uuid, changes) self._from_db_object(self._context, self, db_mapping) self.obj_reset_changes() @staticmethod @api_db_api.context_manager.writer def _destroy_in_db(context, uuid): result = context.session.query(api_db_models.CellMapping).filter_by( uuid=uuid).delete() if not result: raise exception.CellMappingNotFound(uuid=uuid) @base.remotable def destroy(self): self._destroy_in_db(self._context, self.uuid) def is_cell0(self): return self.obj_attr_is_set('uuid') and self.uuid == self.CELL0_UUID @base.NovaObjectRegistry.register class CellMappingList(base.ObjectListBase, base.NovaObject): # Version 1.0: Initial version # Version 1.1: Add get_by_disabled() VERSION = '1.1' fields = { 'objects': fields.ListOfObjectsField('CellMapping'), } @staticmethod @api_db_api.context_manager.reader def _get_all_from_db(context): return context.session.query(api_db_models.CellMapping).order_by( expression.asc(api_db_models.CellMapping.id)).all() @base.remotable_classmethod def get_all(cls, context): db_mappings = cls._get_all_from_db(context) return base.obj_make_list(context, cls(), CellMapping, db_mappings) @staticmethod @api_db_api.context_manager.reader def _get_by_disabled_from_db(context, disabled): if disabled: return context.session.query(api_db_models.CellMapping)\ .filter_by(disabled=sql.true())\ .order_by(expression.asc(api_db_models.CellMapping.id)).all() else: return context.session.query(api_db_models.CellMapping)\ .filter_by(disabled=sql.false())\ .order_by(expression.asc(api_db_models.CellMapping.id)).all() @base.remotable_classmethod def get_by_disabled(cls, context, disabled): db_mappings = cls._get_by_disabled_from_db(context, disabled) return base.obj_make_list(context, cls(), CellMapping, db_mappings) @staticmethod @api_db_api.context_manager.reader def _get_by_project_id_from_db(context, project_id): # SELECT DISTINCT cell_id FROM instance_mappings \ # WHERE project_id = $project_id; cell_ids = context.session.query( api_db_models.InstanceMapping.cell_id ).filter_by( project_id=project_id ).distinct() # SELECT cell_mappings WHERE cell_id IN ($cell_ids); return context.session.query( api_db_models.CellMapping).filter( api_db_models.CellMapping.id.in_(cell_ids) ).all() @classmethod def get_by_project_id(cls, context, project_id): """Return a list of CellMapping objects which correspond to cells in which project_id has InstanceMappings. """ db_mappings = cls._get_by_project_id_from_db(context, project_id) return base.obj_make_list(context, cls(), CellMapping, db_mappings) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/objects/compute_node.py0000664000175000017500000006041500000000000020217 0ustar00zuulzuul00000000000000# Copyright 2013 IBM Corp # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db import exception as db_exc from oslo_serialization import jsonutils from oslo_utils import uuidutils from oslo_utils import versionutils import sqlalchemy as sa from sqlalchemy import sql import nova.conf from nova.db.main import api as db from nova.db.main import models from nova import exception from nova import objects from nova.objects import base from nova.objects import fields from nova.objects import pci_device_pool CONF = nova.conf.CONF @base.NovaObjectRegistry.register class ComputeNode(base.NovaPersistentObject, base.NovaObject): # Version 1.0: Initial version # Version 1.1: Added get_by_service_id() # Version 1.2: String attributes updated to support unicode # Version 1.3: Added stats field # Version 1.4: Added host ip field # Version 1.5: Added numa_topology field # Version 1.6: Added supported_hv_specs # Version 1.7: Added host field # Version 1.8: Added get_by_host_and_nodename() # Version 1.9: Added pci_device_pools # Version 1.10: Added get_first_node_by_host_for_old_compat() # Version 1.11: PciDevicePoolList version 1.1 # Version 1.12: HVSpec version 1.1 # Version 1.13: Changed service_id field to be nullable # Version 1.14: Added cpu_allocation_ratio and ram_allocation_ratio # Version 1.15: Added uuid # Version 1.16: Added disk_allocation_ratio # Version 1.17: Added mapped # Version 1.18: Added get_by_uuid(). # Version 1.19: Added get_by_nodename(). VERSION = '1.19' fields = { 'id': fields.IntegerField(read_only=True), 'uuid': fields.UUIDField(read_only=True), 'service_id': fields.IntegerField(nullable=True), 'host': fields.StringField(nullable=True), 'vcpus': fields.IntegerField(), 'memory_mb': fields.IntegerField(), 'local_gb': fields.IntegerField(), 'vcpus_used': fields.IntegerField(), 'memory_mb_used': fields.IntegerField(), 'local_gb_used': fields.IntegerField(), 'hypervisor_type': fields.StringField(), 'hypervisor_version': fields.IntegerField(), 'hypervisor_hostname': fields.StringField(nullable=True), 'free_ram_mb': fields.IntegerField(nullable=True), 'free_disk_gb': fields.IntegerField(nullable=True), 'current_workload': fields.IntegerField(nullable=True), 'running_vms': fields.IntegerField(nullable=True), # TODO(melwitt): cpu_info is non-nullable in the schema but we must # wait until version 2.0 of ComputeNode to change it to non-nullable 'cpu_info': fields.StringField(nullable=True), 'disk_available_least': fields.IntegerField(nullable=True), 'metrics': fields.StringField(nullable=True), 'stats': fields.DictOfNullableStringsField(nullable=True), 'host_ip': fields.IPAddressField(nullable=True), # TODO(rlrossit): because of history, numa_topology is held here as a # StringField, not a NUMATopology object. In version 2 of ComputeNode # this will be converted over to a fields.ObjectField('NUMATopology') 'numa_topology': fields.StringField(nullable=True), # NOTE(pmurray): the supported_hv_specs field maps to the # supported_instances field in the database 'supported_hv_specs': fields.ListOfObjectsField('HVSpec'), # NOTE(pmurray): the pci_device_pools field maps to the # pci_stats field in the database 'pci_device_pools': fields.ObjectField('PciDevicePoolList', nullable=True), 'cpu_allocation_ratio': fields.FloatField(), 'ram_allocation_ratio': fields.FloatField(), 'disk_allocation_ratio': fields.FloatField(), 'mapped': fields.IntegerField(), } def obj_make_compatible(self, primitive, target_version): super(ComputeNode, self).obj_make_compatible(primitive, target_version) target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 17): if 'mapped' in primitive: del primitive['mapped'] if target_version < (1, 16): if 'disk_allocation_ratio' in primitive: del primitive['disk_allocation_ratio'] if target_version < (1, 15): if 'uuid' in primitive: del primitive['uuid'] if target_version < (1, 14): if 'ram_allocation_ratio' in primitive: del primitive['ram_allocation_ratio'] if 'cpu_allocation_ratio' in primitive: del primitive['cpu_allocation_ratio'] if target_version < (1, 13) and primitive.get('service_id') is None: # service_id is non-nullable in versions before 1.13 try: service = objects.Service.get_by_compute_host( self._context, primitive['host']) primitive['service_id'] = service.id except (exception.ComputeHostNotFound, KeyError): # NOTE(hanlind): In case anything goes wrong like service not # found or host not being set, catch and set a fake value just # to allow for older versions that demand a value to work. # Setting to -1 will, if value is later used result in a # ServiceNotFound, so should be safe. primitive['service_id'] = -1 if target_version < (1, 9) and 'pci_device_pools' in primitive: del primitive['pci_device_pools'] if target_version < (1, 7) and 'host' in primitive: del primitive['host'] if target_version < (1, 6) and 'supported_hv_specs' in primitive: del primitive['supported_hv_specs'] if target_version < (1, 5) and 'numa_topology' in primitive: del primitive['numa_topology'] if target_version < (1, 4) and 'host_ip' in primitive: del primitive['host_ip'] if target_version < (1, 3) and 'stats' in primitive: # pre 1.3 version does not have a stats field del primitive['stats'] @staticmethod def _host_from_db_object(compute, db_compute): if (('host' not in db_compute or db_compute['host'] is None) and 'service_id' in db_compute and db_compute['service_id'] is not None): # FIXME(sbauza) : Unconverted compute record, provide compatibility # This has to stay until we can be sure that any/all compute nodes # in the database have been converted to use the host field # Service field of ComputeNode could be deprecated in a next patch, # so let's use directly the Service object try: service = objects.Service.get_by_id( compute._context, db_compute['service_id']) except exception.ServiceNotFound: compute.host = None return try: compute.host = service.host except (AttributeError, exception.OrphanedObjectError): # Host can be nullable in Service compute.host = None elif 'host' in db_compute and db_compute['host'] is not None: # New-style DB having host as a field compute.host = db_compute['host'] else: # We assume it should not happen but in case, let's set it to None compute.host = None @staticmethod def _from_db_object(context, compute, db_compute): special_cases = set([ 'stats', 'supported_hv_specs', 'host', 'pci_device_pools', ]) fields = set(compute.fields) - special_cases online_updates = {} for key in fields: value = db_compute[key] # NOTE(sbauza): Since all compute nodes don't possibly run the # latest RT code updating allocation ratios, we need to provide # a backwards compatible way of hydrating them. # As we want to care about our operators and since we don't want to # ask them to change their configuration files before upgrading, we # prefer to hardcode the default values for the ratios here until # the next release (Newton) where the opt default values will be # restored for both cpu (16.0), ram (1.5) and disk (1.0) # allocation ratios. # TODO(yikun): Remove this online migration code when all ratio # values are NOT 0.0 or NULL ratio_keys = ['cpu_allocation_ratio', 'ram_allocation_ratio', 'disk_allocation_ratio'] if key in ratio_keys and value in (None, 0.0): # ResourceTracker is not updating the value (old node) # or the compute node is updated but the default value has # not been changed r = getattr(CONF, key) # NOTE(yikun): If the allocation ratio record is not set, the # allocation ratio will be changed to the # CONF.x_allocation_ratio value if x_allocation_ratio is # set, and fallback to use the CONF.initial_x_allocation_ratio # otherwise. init_x_ratio = getattr(CONF, 'initial_%s' % key) value = r if r else init_x_ratio online_updates[key] = value elif key == 'numa_topology' and value and ( 'nova_object.name' not in value): # TODO(stephenfin): Remove this online migration in X or later, # once this has bedded in value = objects.NUMATopology.from_legacy_object(value) online_updates[key] = value elif key == 'mapped': value = 0 if value is None else value setattr(compute, key, value) if online_updates: db.compute_node_update(context, compute.id, online_updates) stats = db_compute['stats'] if stats: compute.stats = jsonutils.loads(stats) sup_insts = db_compute.get('supported_instances') if sup_insts: hv_specs = jsonutils.loads(sup_insts) hv_specs = [objects.HVSpec.from_list(hv_spec) for hv_spec in hv_specs] compute.supported_hv_specs = hv_specs pci_stats = db_compute.get('pci_stats') if pci_stats is not None: pci_stats = pci_device_pool.from_pci_stats(pci_stats) compute.pci_device_pools = pci_stats compute._context = context # Make sure that we correctly set the host field depending on either # host column is present in the table or not compute._host_from_db_object(compute, db_compute) compute.obj_reset_changes() return compute @base.remotable_classmethod def get_by_id(cls, context, compute_id): db_compute = db.compute_node_get(context, compute_id) return cls._from_db_object(context, cls(), db_compute) @base.remotable_classmethod def get_by_uuid(cls, context, compute_uuid): nodes = ComputeNodeList.get_all_by_uuids(context, [compute_uuid]) # We have a unique index on the uuid column so we can get back 0 or 1. if not nodes: raise exception.ComputeHostNotFound(host=compute_uuid) return nodes[0] # NOTE(hanlind): This is deprecated and should be removed on the next # major version bump @base.remotable_classmethod def get_by_service_id(cls, context, service_id): db_computes = db.compute_nodes_get_by_service_id(context, service_id) # NOTE(sbauza): Old version was returning an item, we need to keep this # behaviour for backwards compatibility db_compute = db_computes[0] return cls._from_db_object(context, cls(), db_compute) @base.remotable_classmethod def get_by_host_and_nodename(cls, context, host, nodename): db_compute = db.compute_node_get_by_host_and_nodename( context, host, nodename) return cls._from_db_object(context, cls(), db_compute) @base.remotable_classmethod def get_by_nodename(cls, context, hypervisor_hostname): '''Get by node name (i.e. hypervisor hostname). Raises ComputeHostNotFound if hypervisor_hostname with the given name doesn't exist. ''' db_compute = db.compute_node_get_by_nodename( context, hypervisor_hostname) return cls._from_db_object(context, cls(), db_compute) # TODO(pkholkin): Remove this method in the next major version bump @base.remotable_classmethod def get_first_node_by_host_for_old_compat(cls, context, host, use_slave=False): computes = ComputeNodeList.get_all_by_host(context, host, use_slave) # FIXME(sbauza): Ironic deployments can return multiple # nodes per host, we should return all the nodes and modify the callers # instead. # Arbitrarily returning the first node. return computes[0] @staticmethod def _convert_stats_to_db_format(updates): stats = updates.pop('stats', None) if stats is not None: updates['stats'] = jsonutils.dumps(stats) @staticmethod def _convert_host_ip_to_db_format(updates): host_ip = updates.pop('host_ip', None) if host_ip: updates['host_ip'] = str(host_ip) @staticmethod def _convert_supported_instances_to_db_format(updates): hv_specs = updates.pop('supported_hv_specs', None) if hv_specs is not None: hv_specs = [hv_spec.to_list() for hv_spec in hv_specs] updates['supported_instances'] = jsonutils.dumps(hv_specs) @staticmethod def _convert_pci_stats_to_db_format(updates): if 'pci_device_pools' in updates: pools = updates.pop('pci_device_pools') if pools is not None: pools = jsonutils.dumps(pools.obj_to_primitive()) updates['pci_stats'] = pools @base.remotable def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason='already created') updates = self.obj_get_changes() if 'uuid' not in updates: updates['uuid'] = uuidutils.generate_uuid() self.uuid = updates['uuid'] self._convert_stats_to_db_format(updates) self._convert_host_ip_to_db_format(updates) self._convert_supported_instances_to_db_format(updates) self._convert_pci_stats_to_db_format(updates) try: db_compute = db.compute_node_create(self._context, updates) except db_exc.DBDuplicateEntry: target = 'compute node %s:%s' % (updates['hypervisor_hostname'], updates['uuid']) raise exception.DuplicateRecord(target=target) self._from_db_object(self._context, self, db_compute) @base.remotable def save(self, prune_stats=False): # NOTE(belliott) ignore prune_stats param, no longer relevant updates = self.obj_get_changes() updates.pop('id', None) self._convert_stats_to_db_format(updates) self._convert_host_ip_to_db_format(updates) self._convert_supported_instances_to_db_format(updates) self._convert_pci_stats_to_db_format(updates) db_compute = db.compute_node_update(self._context, self.id, updates) self._from_db_object(self._context, self, db_compute) @base.remotable def destroy(self): if self.obj_attr_is_set('host') and self.host: # NOTE(melwitt): If our host is set, avoid a race between # nova-computes during ironic driver node rebalances which can # change node ownership. constraint = db.constraint(host=db.equal_any(self.host)) else: constraint = None try: db.compute_node_delete( self._context, self.id, constraint=constraint) except exception.ConstraintNotMet: raise exception.ObjectActionError(action='destroy', reason='host changed') def update_from_virt_driver(self, resources): # NOTE(pmurray): the virt driver provides a dict of values that # can be copied into the compute node. The names and representation # do not exactly match. # TODO(pmurray): the resources dict should be formalized. keys = ["vcpus", "memory_mb", "local_gb", "cpu_info", "vcpus_used", "memory_mb_used", "local_gb_used", "numa_topology", "hypervisor_type", "hypervisor_version", "hypervisor_hostname", "disk_available_least", "host_ip", "uuid"] for key in keys: if key in resources: # The uuid field is read-only so it should only be set when # creating the compute node record for the first time. Ignore # it otherwise. if (key == 'uuid' and 'uuid' in self and resources[key] != self.uuid): raise exception.InvalidNodeConfiguration( reason='Attempt to overwrite node %s with %s!' % ( self.uuid, resources[key])) setattr(self, key, resources[key]) # supported_instances has a different name in compute_node if 'supported_instances' in resources: si = resources['supported_instances'] self.supported_hv_specs = [objects.HVSpec.from_list(s) for s in si] @base.NovaObjectRegistry.register class ComputeNodeList(base.ObjectListBase, base.NovaObject): # Version 1.0: Initial version # ComputeNode <= version 1.2 # Version 1.1 ComputeNode version 1.3 # Version 1.2 Add get_by_service() # Version 1.3 ComputeNode version 1.4 # Version 1.4 ComputeNode version 1.5 # Version 1.5 Add use_slave to get_by_service # Version 1.6 ComputeNode version 1.6 # Version 1.7 ComputeNode version 1.7 # Version 1.8 ComputeNode version 1.8 + add get_all_by_host() # Version 1.9 ComputeNode version 1.9 # Version 1.10 ComputeNode version 1.10 # Version 1.11 ComputeNode version 1.11 # Version 1.12 ComputeNode version 1.12 # Version 1.13 ComputeNode version 1.13 # Version 1.14 ComputeNode version 1.14 # Version 1.15 Added get_by_pagination() # Version 1.16: Added get_all_by_uuids() # Version 1.17: Added get_all_by_not_mapped() VERSION = '1.17' fields = { 'objects': fields.ListOfObjectsField('ComputeNode'), } @base.remotable_classmethod def get_all(cls, context): db_computes = db.compute_node_get_all(context) return base.obj_make_list(context, cls(context), objects.ComputeNode, db_computes) @base.remotable_classmethod def get_all_by_not_mapped(cls, context, mapped_less_than): """Return ComputeNode records that are not mapped at a certain level""" db_computes = db.compute_node_get_all_mapped_less_than( context, mapped_less_than) return base.obj_make_list(context, cls(context), objects.ComputeNode, db_computes) @base.remotable_classmethod def get_by_pagination(cls, context, limit=None, marker=None): db_computes = db.compute_node_get_all_by_pagination( context, limit=limit, marker=marker) return base.obj_make_list(context, cls(context), objects.ComputeNode, db_computes) @base.remotable_classmethod def get_by_hypervisor(cls, context, hypervisor_match): db_computes = db.compute_node_search_by_hypervisor(context, hypervisor_match) return base.obj_make_list(context, cls(context), objects.ComputeNode, db_computes) # NOTE(hanlind): This is deprecated and should be removed on the next # major version bump @base.remotable_classmethod def _get_by_service(cls, context, service_id, use_slave=False): try: db_computes = db.compute_nodes_get_by_service_id( context, service_id) except exception.ServiceNotFound: # NOTE(sbauza): Previous behaviour was returning an empty list # if the service was created with no computes, we need to keep it. db_computes = [] return base.obj_make_list(context, cls(context), objects.ComputeNode, db_computes) @staticmethod @db.select_db_reader_mode def _db_compute_node_get_all_by_host(context, host, use_slave=False): return db.compute_node_get_all_by_host(context, host) @base.remotable_classmethod def get_all_by_host(cls, context, host, use_slave=False): db_computes = cls._db_compute_node_get_all_by_host(context, host, use_slave=use_slave) return base.obj_make_list(context, cls(context), objects.ComputeNode, db_computes) @staticmethod @db.select_db_reader_mode def _db_compute_node_get_all_by_uuids(context, compute_uuids): db_computes = db.model_query(context, models.ComputeNode).filter( models.ComputeNode.uuid.in_(compute_uuids)).all() return db_computes @base.remotable_classmethod def get_all_by_uuids(cls, context, compute_uuids): db_computes = cls._db_compute_node_get_all_by_uuids(context, compute_uuids) return base.obj_make_list(context, cls(context), objects.ComputeNode, db_computes) @staticmethod @db.select_db_reader_mode def _db_compute_node_get_by_hv_type(context, hv_type): db_computes = context.session.query(models.ComputeNode).filter( models.ComputeNode.hypervisor_type == hv_type).all() return db_computes @classmethod def get_by_hypervisor_type(cls, context, hv_type): db_computes = cls._db_compute_node_get_by_hv_type(context, hv_type) return base.obj_make_list(context, cls(context), objects.ComputeNode, db_computes) def _get_node_empty_ratio(context, max_count): """Query the DB for non-deleted compute_nodes with 0.0/None alloc ratios Results are limited by ``max_count``. """ return context.session.query(models.ComputeNode).filter(sa.or_( models.ComputeNode.ram_allocation_ratio == '0.0', models.ComputeNode.cpu_allocation_ratio == '0.0', models.ComputeNode.disk_allocation_ratio == '0.0', models.ComputeNode.ram_allocation_ratio == sql.null(), models.ComputeNode.cpu_allocation_ratio == sql.null(), models.ComputeNode.disk_allocation_ratio == sql.null() )).filter(models.ComputeNode.deleted == 0).limit(max_count).all() @db.pick_context_manager_writer def migrate_empty_ratio(context, max_count): cns = _get_node_empty_ratio(context, max_count) # NOTE(yikun): If it's an existing record with 0.0 or None values, # we need to migrate this record using 'xxx_allocation_ratio' config # if it's set, and fallback to use the 'initial_xxx_allocation_ratio' # otherwise. for cn in cns: for t in ['cpu', 'disk', 'ram']: current_ratio = getattr(cn, '%s_allocation_ratio' % t) if current_ratio in (0.0, None): r = getattr(CONF, "%s_allocation_ratio" % t) init_x_ratio = getattr(CONF, "initial_%s_allocation_ratio" % t) conf_alloc_ratio = r if r else init_x_ratio setattr(cn, '%s_allocation_ratio' % t, conf_alloc_ratio) context.session.add(cn) found = done = len(cns) return found, done ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/objects/console_auth_token.py0000664000175000017500000002072600000000000021422 0ustar00zuulzuul00000000000000# Copyright 2015 Intel Corp # Copyright 2016 Hewlett Packard Enterprise Development Company LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from urllib import parse as urlparse from oslo_db.exception import DBDuplicateEntry from oslo_log import log as logging from oslo_utils import strutils from oslo_utils import timeutils from oslo_utils import uuidutils from oslo_utils import versionutils from nova.db.main import api as db from nova import exception from nova.i18n import _ from nova.objects import base from nova.objects import fields from nova import utils LOG = logging.getLogger(__name__) @base.NovaObjectRegistry.register class ConsoleAuthToken(base.NovaTimestampObject, base.NovaObject): # Version 1.0: Initial version # Version 1.1: Add clean_expired_console_auths method. # The clean_expired_console_auths_for_host method # was deprecated. # Version 1.2: Add expires field. # This is to see token expire time. # Version 1.3: Added tls_port field. VERSION = '1.3' fields = { 'id': fields.IntegerField(), 'console_type': fields.StringField(nullable=False), 'host': fields.StringField(nullable=False), 'port': fields.IntegerField(nullable=False), 'tls_port': fields.IntegerField(nullable=True), 'internal_access_path': fields.StringField(nullable=True), 'instance_uuid': fields.UUIDField(nullable=False), 'access_url_base': fields.StringField(nullable=True), # NOTE(PaulMurray): The unhashed token field is not stored in the # database. A hash of the token is stored instead and is not a # field on the object. 'token': fields.StringField(nullable=False), 'expires': fields.IntegerField(nullable=False), } @property def access_url(self): """The access url with token parameter. :returns: the access url with credential parameters access_url_base is the base url used to access a console. Adding the unhashed token as a parameter in a query string makes it specific to this authorization. """ if self.obj_attr_is_set('id'): if self.console_type == 'novnc': # NOTE(melwitt): As of noVNC v1.1.0, we must use the 'path' # query parameter to pass the auth token within, as the # top-level 'token' query parameter was removed. The 'path' # parameter is supported in older noVNC versions, so it is # backward compatible. parsed_base_url = urlparse.urlparse(self.access_url_base) qparams = urlparse.parse_qs(parsed_base_url.query) qpath = '%s?token=%s' % (qparams.get('path', [''])[0], self.token) qparams.update({'path': qpath}) return parsed_base_url._replace( query=urlparse.urlencode(qparams, doseq=True)).geturl() else: return '%s?token=%s' % (self.access_url_base, self.token) def obj_make_compatible(self, primitive, target_version): super().obj_make_compatible(primitive, target_version) target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 3) and 'tls_port' in primitive: primitive.pop('tls_port', None) if target_version < (1, 2) and 'expires' in primitive: primitive.pop('expires', None) @staticmethod def _from_db_object(context, obj, db_obj): # NOTE(PaulMurray): token is not stored in the database but # this function assumes it is in db_obj. The unhashed token # field is populated in the authorize method after the token # authorization is created in the database. for field in obj.fields: setattr(obj, field, db_obj.get(field)) obj._context = context obj.obj_reset_changes() return obj @base.remotable def authorize(self, ttl): """Authorise the console token and store in the database. :param ttl: time to live in seconds :returns: an authorized token The expires value is set for ttl seconds in the future and the token hash is stored in the database. This function can only succeed if the token is unique and the object has not already been stored. """ if self.obj_attr_is_set('id'): raise exception.ObjectActionError( action='authorize', reason=_('must be a new object to authorize')) token = uuidutils.generate_uuid() token_hash = utils.get_sha256_str(token) expires = timeutils.utcnow_ts() + ttl updates = self.obj_get_changes() # NOTE(melwitt): token could be in the updates if authorize() has been # called twice on the same object. 'token' is not a database column and # should not be included in the call to create the database record. if 'token' in updates: del updates['token'] updates['token_hash'] = token_hash updates['expires'] = expires try: db_obj = db.console_auth_token_create(self._context, updates) db_obj['token'] = token self._from_db_object(self._context, self, db_obj) except DBDuplicateEntry: # NOTE(PaulMurray) we are generating the token above so this # should almost never happen - but technically its possible raise exception.TokenInUse() LOG.debug("Authorized token with expiry %(expires)s for console " "connection %(console)s", {'expires': expires, 'console': strutils.mask_password(self)}) return token @base.remotable_classmethod def validate(cls, context, token): """Validate the token. :param context: the context :param token: the token for the authorization :returns: The ConsoleAuthToken object if valid The token is valid if the token is in the database and the expires time has not passed. """ token_hash = utils.get_sha256_str(token) db_obj = db.console_auth_token_get_valid(context, token_hash) if db_obj is not None: db_obj['token'] = token obj = cls._from_db_object(context, cls(), db_obj) LOG.debug("Validated token - console connection is " "%(console)s", {'console': strutils.mask_password(obj)}) return obj else: LOG.debug("Token validation failed") raise exception.InvalidToken(token='***') @base.remotable_classmethod def clean_console_auths_for_instance(cls, context, instance_uuid): """Remove all console authorizations for the instance. :param context: the context :param instance_uuid: the instance to be cleaned All authorizations related to the specified instance will be removed from the database. """ db.console_auth_token_destroy_all_by_instance(context, instance_uuid) @base.remotable_classmethod def clean_expired_console_auths(cls, context): """Remove all expired console authorizations. :param context: the context All expired authorizations will be removed. Tokens that have not expired will remain. """ db.console_auth_token_destroy_expired(context) # TODO(takashin): This method was deprecated and will be removed # in a next major version bump. @base.remotable_classmethod def clean_expired_console_auths_for_host(cls, context, host): """Remove all expired console authorizations for the host. :param context: the context :param host: the host name All expired authorizations related to the specified host will be removed. Tokens that have not expired will remain. """ db.console_auth_token_destroy_expired_by_host(context, host) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/objects/diagnostics.py0000664000175000017500000001513000000000000020037 0ustar00zuulzuul00000000000000# Copyright (c) 2014 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.objects import base from nova.objects import fields @base.NovaObjectRegistry.register class CpuDiagnostics(base.NovaObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'id': fields.IntegerField(nullable=True), 'time': fields.IntegerField(nullable=True), 'utilisation': fields.IntegerField(nullable=True), } @base.NovaObjectRegistry.register class NicDiagnostics(base.NovaObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'mac_address': fields.MACAddressField(nullable=True), 'rx_octets': fields.IntegerField(nullable=True), 'rx_errors': fields.IntegerField(nullable=True), 'rx_drop': fields.IntegerField(nullable=True), 'rx_packets': fields.IntegerField(nullable=True), 'rx_rate': fields.IntegerField(nullable=True), 'tx_octets': fields.IntegerField(nullable=True), 'tx_errors': fields.IntegerField(nullable=True), 'tx_drop': fields.IntegerField(nullable=True), 'tx_packets': fields.IntegerField(nullable=True), 'tx_rate': fields.IntegerField(nullable=True) } @base.NovaObjectRegistry.register class DiskDiagnostics(base.NovaObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'read_bytes': fields.IntegerField(nullable=True), 'read_requests': fields.IntegerField(nullable=True), 'write_bytes': fields.IntegerField(nullable=True), 'write_requests': fields.IntegerField(nullable=True), 'errors_count': fields.IntegerField(nullable=True) } @base.NovaObjectRegistry.register class MemoryDiagnostics(base.NovaObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'maximum': fields.IntegerField(nullable=True), 'used': fields.IntegerField(nullable=True) } @base.NovaObjectRegistry.register class Diagnostics(base.NovaEphemeralObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'state': fields.InstancePowerStateField(), 'driver': fields.HypervisorDriverField(), 'hypervisor': fields.StringField(nullable=True), 'hypervisor_os': fields.StringField(nullable=True), 'uptime': fields.IntegerField(nullable=True), 'config_drive': fields.BooleanField(), 'memory_details': fields.ObjectField('MemoryDiagnostics', default=MemoryDiagnostics()), 'cpu_details': fields.ListOfObjectsField('CpuDiagnostics', default=[]), 'nic_details': fields.ListOfObjectsField('NicDiagnostics', default=[]), 'disk_details': fields.ListOfObjectsField('DiskDiagnostics', default=[]), 'num_cpus': fields.IntegerField(), 'num_nics': fields.IntegerField(), 'num_disks': fields.IntegerField() } def __init__(self, *args, **kwargs): super(Diagnostics, self).__init__(*args, **kwargs) self.num_cpus = len(self.cpu_details) self.num_nics = len(self.nic_details) self.num_disks = len(self.disk_details) def add_cpu(self, id=None, time=None, utilisation=None): """Add a new CpuDiagnostics object :param id: The virtual cpu number (Integer) :param time: CPU Time in nano seconds (Integer) :param utilisation: CPU utilisation in percentages (Integer) """ self.num_cpus += 1 self.cpu_details.append( CpuDiagnostics(id=id, time=time, utilisation=utilisation)) def add_nic(self, mac_address=None, rx_octets=None, rx_errors=None, rx_drop=None, rx_packets=None, rx_rate=None, tx_octets=None, tx_errors=None, tx_drop=None, tx_packets=None, tx_rate=None): """Add a new NicDiagnostics object :param mac_address: Mac address of the interface (String) :param rx_octets: Received octets (Integer) :param rx_errors: Received errors (Integer) :param rx_drop: Received packets dropped (Integer) :param rx_packets: Received packets (Integer) :param rx_rate: Receive rate (Integer) :param tx_octets: Transmitted Octets (Integer) :param tx_errors: Transmit errors (Integer) :param tx_drop: Transmit dropped packets (Integer) :param tx_packets: Transmit packets (Integer) :param tx_rate: Transmit rate (Integer) """ self.num_nics += 1 self.nic_details.append(NicDiagnostics(mac_address=mac_address, rx_octets=rx_octets, rx_errors=rx_errors, rx_drop=rx_drop, rx_packets=rx_packets, rx_rate=rx_rate, tx_octets=tx_octets, tx_errors=tx_errors, tx_drop=tx_drop, tx_packets=tx_packets, tx_rate=tx_rate)) def add_disk(self, read_bytes=None, read_requests=None, write_bytes=None, write_requests=None, errors_count=None): """Create a new DiskDiagnostics object :param read_bytes: Disk reads in bytes(Integer) :param read_requests: Read requests (Integer) :param write_bytes: Disk writes in bytes (Integer) :param write_requests: Write requests (Integer) :param errors_count: Disk errors (Integer) """ self.num_disks += 1 self.disk_details.append(DiskDiagnostics(read_bytes=read_bytes, read_requests=read_requests, write_bytes=write_bytes, write_requests=write_requests, errors_count=errors_count)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/objects/ec2.py0000664000175000017500000001611100000000000016201 0ustar00zuulzuul00000000000000# Copyright 2014 Red Hat Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools from oslo_utils import uuidutils from nova import cache_utils from nova.db.main import api as db from nova import exception from nova.objects import base from nova.objects import fields # NOTE(vish): cache mapping for one week _CACHE_TIME = 7 * 24 * 60 * 60 _CACHE = None def memoize(func): @functools.wraps(func) def memoizer(context, reqid): global _CACHE if not _CACHE: _CACHE = cache_utils.get_client(expiration_time=_CACHE_TIME) key = "%s:%s" % (func.__name__, reqid) key = str(key) value = _CACHE.get(key) if value is None: value = func(context, reqid) _CACHE.set(key, value) return value return memoizer def id_to_ec2_id(instance_id, template='i-%08x'): """Convert an instance ID (int) to an ec2 ID (i-[base 16 number]).""" return template % int(instance_id) def id_to_ec2_inst_id(context, instance_id): """Get or create an ec2 instance ID (i-[base 16 number]) from uuid.""" if instance_id is None: return None elif uuidutils.is_uuid_like(instance_id): int_id = get_int_id_from_instance_uuid(context, instance_id) return id_to_ec2_id(int_id) else: return id_to_ec2_id(instance_id) @memoize def get_int_id_from_instance_uuid(context, instance_uuid): if instance_uuid is None: return try: imap = EC2InstanceMapping.get_by_uuid(context, instance_uuid) return imap.id except exception.NotFound: imap = EC2InstanceMapping(context) imap.uuid = instance_uuid imap.create() return imap.id def glance_id_to_ec2_id(context, glance_id, image_type='ami'): image_id = glance_id_to_id(context, glance_id) if image_id is None: return template = image_type + '-%08x' return id_to_ec2_id(image_id, template=template) @memoize def glance_id_to_id(context, glance_id): """Convert a glance id to an internal (db) id.""" if not glance_id: return try: return S3ImageMapping.get_by_uuid(context, glance_id).id except exception.NotFound: s3imap = S3ImageMapping(context, uuid=glance_id) s3imap.create() return s3imap.id def glance_type_to_ec2_type(image_type): """Converts to a three letter image type. aki, kernel => aki ari, ramdisk => ari anything else => ami """ if image_type == 'kernel': return 'aki' if image_type == 'ramdisk': return 'ari' if image_type not in ['aki', 'ari']: return 'ami' return image_type @base.NovaObjectRegistry.register class EC2InstanceMapping(base.NovaPersistentObject, base.NovaObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'id': fields.IntegerField(), 'uuid': fields.UUIDField(), } @staticmethod def _from_db_object(context, imap, db_imap): for field in imap.fields: setattr(imap, field, db_imap[field]) imap._context = context imap.obj_reset_changes() return imap @base.remotable def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason='already created') db_imap = db.ec2_instance_create(self._context, self.uuid) self._from_db_object(self._context, self, db_imap) @base.remotable_classmethod def get_by_uuid(cls, context, instance_uuid): db_imap = db.ec2_instance_get_by_uuid(context, instance_uuid) if db_imap: return cls._from_db_object(context, cls(), db_imap) @base.remotable_classmethod def get_by_id(cls, context, ec2_id): db_imap = db.ec2_instance_get_by_id(context, ec2_id) if db_imap: return cls._from_db_object(context, cls(), db_imap) @base.NovaObjectRegistry.register class S3ImageMapping(base.NovaPersistentObject, base.NovaObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'id': fields.IntegerField(read_only=True), 'uuid': fields.UUIDField(), } @staticmethod def _from_db_object(context, s3imap, db_s3imap): for field in s3imap.fields: setattr(s3imap, field, db_s3imap[field]) s3imap._context = context s3imap.obj_reset_changes() return s3imap @base.remotable def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason='already created') db_s3imap = db.s3_image_create(self._context, self.uuid) self._from_db_object(self._context, self, db_s3imap) @base.remotable_classmethod def get_by_uuid(cls, context, s3_image_uuid): db_s3imap = db.s3_image_get_by_uuid(context, s3_image_uuid) if db_s3imap: return cls._from_db_object(context, cls(context), db_s3imap) @base.remotable_classmethod def get_by_id(cls, context, s3_id): db_s3imap = db.s3_image_get(context, s3_id) if db_s3imap: return cls._from_db_object(context, cls(context), db_s3imap) @base.NovaObjectRegistry.register class EC2Ids(base.NovaObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'instance_id': fields.StringField(read_only=True), 'ami_id': fields.StringField(nullable=True, read_only=True), 'kernel_id': fields.StringField(nullable=True, read_only=True), 'ramdisk_id': fields.StringField(nullable=True, read_only=True), } @staticmethod def _from_dict(ec2ids, dict_ec2ids): for field in ec2ids.fields: setattr(ec2ids, field, dict_ec2ids[field]) return ec2ids @staticmethod def _get_ec2_ids(context, instance): ec2_ids = {} ec2_ids['instance_id'] = id_to_ec2_inst_id(context, instance.uuid) ec2_ids['ami_id'] = glance_id_to_ec2_id(context, instance.image_ref) for image_type in ['kernel', 'ramdisk']: image_id = getattr(instance, '%s_id' % image_type) ec2_id = None if image_id is not None: ec2_image_type = glance_type_to_ec2_type(image_type) ec2_id = glance_id_to_ec2_id(context, image_id, ec2_image_type) ec2_ids['%s_id' % image_type] = ec2_id return ec2_ids @base.remotable_classmethod def get_by_instance(cls, context, instance): ec2_ids = cls._get_ec2_ids(context, instance) return cls._from_dict(cls(context), ec2_ids) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/objects/external_event.py0000664000175000017500000000462500000000000020562 0ustar00zuulzuul00000000000000# Copyright 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.objects import base as obj_base from nova.objects import fields EVENT_NAMES = [ # Network has changed for this instance, rebuild info_cache 'network-changed', # VIF plugging notifications, tag is port_id 'network-vif-plugged', 'network-vif-unplugged', 'network-vif-deleted', # Volume was extended for this instance, tag is volume_id 'volume-extended', # Power state has changed for this instance 'power-update', # Accelerator Request got bound, tag is ARQ uuid. # Sent when an ARQ for an instance has been bound or failed to bind. 'accelerator-request-bound', # re-image operation has completed from cinder side 'volume-reimaged', ] EVENT_STATUSES = ['failed', 'completed', 'in-progress'] # Possible tag values for the power-update event. POWER_ON = 'POWER_ON' POWER_OFF = 'POWER_OFF' @obj_base.NovaObjectRegistry.register class InstanceExternalEvent(obj_base.NovaObject): # Version 1.0: Initial version # Supports network-changed and vif-plugged # Version 1.1: adds network-vif-deleted event # Version 1.2: adds volume-extended event # Version 1.3: adds power-update event # Version 1.4: adds accelerator-request-bound event # Version 1.5: adds volume-reimaged event VERSION = '1.5' fields = { 'instance_uuid': fields.UUIDField(), 'name': fields.EnumField(valid_values=EVENT_NAMES), 'status': fields.EnumField(valid_values=EVENT_STATUSES), 'tag': fields.StringField(nullable=True), 'data': fields.DictOfStringsField(), } @staticmethod def make_key(name, tag=None): if tag is not None: return '%s-%s' % (name, tag) else: return name @property def key(self): return self.make_key(self.name, self.tag) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/objects/fields.py0000664000175000017500000011636000000000000017005 0ustar00zuulzuul00000000000000# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import re from cursive import signature_utils from oslo_serialization import jsonutils from oslo_versionedobjects import fields from nova import exception from nova.i18n import _ from nova.network import model as network_model from nova.virt import arch # Import field errors from oslo.versionedobjects KeyTypeError = fields.KeyTypeError ElementTypeError = fields.ElementTypeError # Import fields from oslo.versionedobjects BooleanField = fields.BooleanField UnspecifiedDefault = fields.UnspecifiedDefault IntegerField = fields.IntegerField NonNegativeIntegerField = fields.NonNegativeIntegerField UUIDField = fields.UUIDField FloatField = fields.FloatField NonNegativeFloatField = fields.NonNegativeFloatField StringField = fields.StringField SensitiveStringField = fields.SensitiveStringField EnumField = fields.EnumField DateTimeField = fields.DateTimeField DictOfStringsField = fields.DictOfStringsField DictOfNullableStringsField = fields.DictOfNullableStringsField DictOfIntegersField = fields.DictOfIntegersField ListOfStringsField = fields.ListOfStringsField ListOfUUIDField = fields.ListOfUUIDField SetOfIntegersField = fields.SetOfIntegersField ListOfSetsOfIntegersField = fields.ListOfSetsOfIntegersField ListOfDictOfNullableStringsField = fields.ListOfDictOfNullableStringsField DictProxyField = fields.DictProxyField ObjectField = fields.ObjectField ListOfObjectsField = fields.ListOfObjectsField VersionPredicateField = fields.VersionPredicateField FlexibleBooleanField = fields.FlexibleBooleanField DictOfListOfStringsField = fields.DictOfListOfStringsField IPAddressField = fields.IPAddressField IPV4AddressField = fields.IPV4AddressField IPV6AddressField = fields.IPV6AddressField IPV4AndV6AddressField = fields.IPV4AndV6AddressField IPNetworkField = fields.IPNetworkField IPV4NetworkField = fields.IPV4NetworkField IPV6NetworkField = fields.IPV6NetworkField AutoTypedField = fields.AutoTypedField BaseEnumField = fields.BaseEnumField MACAddressField = fields.MACAddressField ListOfIntegersField = fields.ListOfIntegersField PCIAddressField = fields.PCIAddressField # NOTE(danms): These are things we need to import for some of our # own implementations below, our tests, or other transitional # bits of code. These should be removable after we finish our # conversion. So do not use these nova fields directly in any new code; # instead, use the oslo.versionedobjects fields. Enum = fields.Enum Field = fields.Field FieldType = fields.FieldType Set = fields.Set Dict = fields.Dict List = fields.List Object = fields.Object IPAddress = fields.IPAddress IPV4Address = fields.IPV4Address IPV6Address = fields.IPV6Address IPNetwork = fields.IPNetwork IPV4Network = fields.IPV4Network IPV6Network = fields.IPV6Network class ResourceClass(fields.StringPattern): PATTERN = r"^[A-Z0-9_]+$" _REGEX = re.compile(PATTERN) @staticmethod def coerce(obj, attr, value): if isinstance(value, str): uppered = value.upper() if ResourceClass._REGEX.match(uppered): return uppered raise ValueError(_("Malformed Resource Class %s") % value) class ResourceClassField(AutoTypedField): AUTO_TYPE = ResourceClass() class SetOfStringsField(AutoTypedField): AUTO_TYPE = Set(fields.String()) class BaseNovaEnum(Enum): def __init__(self, **kwargs): super(BaseNovaEnum, self).__init__(valid_values=self.__class__.ALL) class Architecture(BaseNovaEnum): """Represents CPU architectures. Provides the standard names for all known processor architectures. Many have multiple variants to deal with big-endian vs little-endian modes, as well as 32 vs 64 bit word sizes. These names are chosen to be identical to the architecture names expected by libvirt, so if ever adding new ones then ensure it matches libvirt's expectation. """ ALPHA = arch.ALPHA ARMV6 = arch.ARMV6 ARMV7 = arch.ARMV7 ARMV7B = arch.ARMV7B AARCH64 = arch.AARCH64 CRIS = arch.CRIS I686 = arch.I686 IA64 = arch.IA64 LM32 = arch.LM32 M68K = arch.M68K MICROBLAZE = arch.MICROBLAZE MICROBLAZEEL = arch.MICROBLAZEEL MIPS = arch.MIPS MIPSEL = arch.MIPSEL MIPS64 = arch.MIPS64 MIPS64EL = arch.MIPS64EL OPENRISC = arch.OPENRISC PARISC = arch.PARISC PARISC64 = arch.PARISC64 PPC = arch.PPC PPCLE = arch.PPCLE PPC64 = arch.PPC64 PPC64LE = arch.PPC64LE PPCEMB = arch.PPCEMB S390 = arch.S390 S390X = arch.S390X SH4 = arch.SH4 SH4EB = arch.SH4EB SPARC = arch.SPARC SPARC64 = arch.SPARC64 UNICORE32 = arch.UNICORE32 X86_64 = arch.X86_64 XTENSA = arch.XTENSA XTENSAEB = arch.XTENSAEB ALL = arch.ALL @classmethod def from_host(cls): """Get the architecture of the host OS :returns: the canonicalized host architecture """ return cls.canonicalize(os.uname().machine) @classmethod def is_valid(cls, name): """Check if a string is a valid architecture :param name: architecture name to validate :returns: True if @name is valid """ return name in cls.ALL @classmethod def canonicalize(cls, name): """Canonicalize the architecture name :param name: architecture name to canonicalize :returns: a canonical architecture name """ if name is None: return None newname = name.lower() if newname in ("i386", "i486", "i586"): newname = cls.I686 # Xen mistake from Icehouse or earlier if newname in ("x86_32", "x86_32p"): newname = cls.I686 if newname == "amd64": newname = cls.X86_64 if not cls.is_valid(newname): raise exception.InvalidArchitectureName(arch=name) return newname def coerce(self, obj, attr, value): try: value = self.canonicalize(value) except exception.InvalidArchitectureName: msg = _("Architecture name '%s' is not valid") % value raise ValueError(msg) return super(Architecture, self).coerce(obj, attr, value) class BlockDeviceDestinationType(BaseNovaEnum): """Represents possible destination_type values for a BlockDeviceMapping.""" LOCAL = 'local' VOLUME = 'volume' ALL = (LOCAL, VOLUME) class BlockDeviceSourceType(BaseNovaEnum): """Represents the possible source_type values for a BlockDeviceMapping.""" BLANK = 'blank' IMAGE = 'image' SNAPSHOT = 'snapshot' VOLUME = 'volume' ALL = (BLANK, IMAGE, SNAPSHOT, VOLUME) class BlockDeviceType(BaseNovaEnum): """Represents possible device_type values for a BlockDeviceMapping.""" CDROM = 'cdrom' DISK = 'disk' FLOPPY = 'floppy' FS = 'fs' LUN = 'lun' ALL = (CDROM, DISK, FLOPPY, FS, LUN) class BlockDeviceEncryptionFormatType(BaseNovaEnum): PLAIN = 'plain' LUKS = 'luks' LUKSv2 = 'luksv2' ALL = (PLAIN, LUKS, LUKSv2) class ConfigDrivePolicy(BaseNovaEnum): OPTIONAL = "optional" MANDATORY = "mandatory" ALL = (OPTIONAL, MANDATORY) class CPUAllocationPolicy(BaseNovaEnum): DEDICATED = "dedicated" SHARED = "shared" MIXED = "mixed" ALL = (DEDICATED, SHARED, MIXED) class CPUThreadAllocationPolicy(BaseNovaEnum): # prefer (default): The host may or may not have hyperthreads. This # retains the legacy behavior, whereby siblings are preferred when # available. This is the default if no policy is specified. PREFER = "prefer" # isolate: The host may or many not have hyperthreads. If hyperthreads are # present, each vCPU will be placed on a different core and no vCPUs from # other guests will be able to be placed on the same core, i.e. one # thread sibling is guaranteed to always be unused. If hyperthreads are # not present, each vCPU will still be placed on a different core and # there are no thread siblings to be concerned with. ISOLATE = "isolate" # require: The host must have hyperthreads. Each vCPU will be allocated on # thread siblings. REQUIRE = "require" ALL = (PREFER, ISOLATE, REQUIRE) class CPUEmulatorThreadsPolicy(BaseNovaEnum): # share (default): Emulator threads float across the pCPUs # associated to the guest. SHARE = "share" # isolate: Emulator threads are isolated on a single pCPU. ISOLATE = "isolate" ALL = (SHARE, ISOLATE) class CPUMode(BaseNovaEnum): CUSTOM = 'custom' HOST_MODEL = 'host-model' HOST_PASSTHROUGH = 'host-passthrough' ALL = (CUSTOM, HOST_MODEL, HOST_PASSTHROUGH) class CPUMatch(BaseNovaEnum): MINIMUM = 'minimum' EXACT = 'exact' STRICT = 'strict' ALL = (MINIMUM, EXACT, STRICT) class CPUFeaturePolicy(BaseNovaEnum): FORCE = 'force' REQUIRE = 'require' OPTIONAL = 'optional' DISABLE = 'disable' FORBID = 'forbid' ALL = (FORCE, REQUIRE, OPTIONAL, DISABLE, FORBID) class DiskBus(BaseNovaEnum): # NOTE(aspiers): If you change this, don't forget to update the # docs and metadata for hw_*_bus in glance. # NOTE(lyarwood): Also update the possible values in the api-ref for the # block_device_mapping_v2.disk_bus parameter. FDC = "fdc" IDE = "ide" SATA = "sata" SCSI = "scsi" USB = "usb" VIRTIO = "virtio" XEN = "xen" LXC = "lxc" UML = "uml" ALL = (FDC, IDE, SATA, SCSI, USB, VIRTIO, XEN, LXC, UML) class DiskConfig(BaseNovaEnum): MANUAL = "MANUAL" AUTO = "AUTO" ALL = (MANUAL, AUTO) def coerce(self, obj, attr, value): enum_value = DiskConfig.AUTO if value else DiskConfig.MANUAL return super(DiskConfig, self).coerce(obj, attr, enum_value) class FirmwareType(BaseNovaEnum): UEFI = "uefi" BIOS = "bios" ALL = (UEFI, BIOS) class HVType(BaseNovaEnum): """Represents virtualization types. Provide the standard names for all known guest virtualization types. This is not to be confused with the Nova hypervisor driver types, since one driver may support multiple virtualization types and one virtualization type may be supported by multiple drivers. """ BAREMETAL = 'baremetal' BHYVE = 'bhyve' DOCKER = 'docker' FAKE = 'fake' HYPERV = 'hyperv' IRONIC = 'ironic' KQEMU = 'kqemu' KVM = 'kvm' LXC = 'lxc' LXD = 'lxd' OPENVZ = 'openvz' PARALLELS = 'parallels' VIRTUOZZO = 'vz' PHYP = 'phyp' QEMU = 'qemu' TEST = 'test' UML = 'uml' VBOX = 'vbox' VMWARE = 'vmware' XEN = 'xen' ZVM = 'zvm' PRSM = 'prsm' ALL = (BAREMETAL, BHYVE, DOCKER, FAKE, HYPERV, IRONIC, KQEMU, KVM, LXC, LXD, OPENVZ, PARALLELS, PHYP, QEMU, TEST, UML, VBOX, VIRTUOZZO, VMWARE, XEN, ZVM, PRSM) def coerce(self, obj, attr, value): try: value = self.canonicalize(value) except exception.InvalidHypervisorVirtType: msg = _("Hypervisor virt type '%s' is not valid") % value raise ValueError(msg) return super(HVType, self).coerce(obj, attr, value) @classmethod def is_valid(cls, name): """Check if a string is a valid hypervisor type :param name: hypervisor type name to validate :returns: True if @name is valid """ return name in cls.ALL @classmethod def canonicalize(cls, name): """Canonicalize the hypervisor type name :param name: hypervisor type name to canonicalize :returns: a canonical hypervisor type name """ if name is None: return None newname = name.lower() if newname == 'xapi': newname = cls.XEN if not cls.is_valid(newname): raise exception.InvalidHypervisorVirtType(hv_type=name) return newname class ImageSignatureHashType(BaseNovaEnum): # Represents the possible hash methods used for image signing ALL = tuple(sorted(signature_utils.HASH_METHODS.keys())) class ImageSignatureKeyType(BaseNovaEnum): # Represents the possible keypair types used for image signing ALL = ( 'DSA', 'ECC_SECP384R1', 'ECC_SECP521R1', 'ECC_SECT409K1', 'ECC_SECT409R1', 'ECC_SECT571K1', 'ECC_SECT571R1', 'RSA-PSS' ) class InputBus(BaseNovaEnum): USB = 'usb' VIRTIO = 'virtio' ALL = (USB, VIRTIO) class MigrationType(BaseNovaEnum): MIGRATION = 'migration' # cold migration RESIZE = 'resize' LIVE_MIGRATION = 'live-migration' EVACUATION = 'evacuation' ALL = (MIGRATION, RESIZE, LIVE_MIGRATION, EVACUATION) class OSType(BaseNovaEnum): LINUX = "linux" WINDOWS = "windows" ALL = (LINUX, WINDOWS) def coerce(self, obj, attr, value): # Some code/docs use upper case or initial caps # so canonicalize to all lower case value = value.lower() return super(OSType, self).coerce(obj, attr, value) class RNGModel(BaseNovaEnum): # NOTE(kchamart): Along with "virtio", we may need to extend this (if a # good reason shows up) to allow two more values for VirtIO # transitional and non-transitional devices (available since libvirt # 5.2.0): # # - virtio-transitional # - virtio-nontransitional # # This allows one to choose whether you want to have compatibility # with older guest operating systems. The value you select will in # turn decide the kind of PCI topology the guest will get. # # Details: # https://libvirt.org/formatdomain.html#elementsVirtioTransitional VIRTIO = "virtio" ALL = (VIRTIO,) class ShareMappingStatus(BaseNovaEnum): """Represents the possible status of a share mapping""" ATTACHING = "attaching" DETACHING = "detaching" ACTIVE = "active" INACTIVE = "inactive" ERROR = "error" ALL = (ATTACHING, DETACHING, ACTIVE, INACTIVE, ERROR) class ShareMappingProto(BaseNovaEnum): """Represents the possible protocol used by a share mapping""" NFS = "NFS" CEPHFS = "CEPHFS" ALL = (NFS, CEPHFS) class TPMModel(BaseNovaEnum): TIS = "tpm-tis" CRB = "tpm-crb" ALL = (TIS, CRB) class TPMVersion(BaseNovaEnum): v1_2 = "1.2" v2_0 = "2.0" ALL = (v1_2, v2_0) class MemEncryptionModel(BaseNovaEnum): AMD_SEV = "amd-sev" AMD_SEV_ES = "amd-sev-es" ALL = (AMD_SEV, AMD_SEV_ES) class MaxPhyAddrMode(BaseNovaEnum): PASSTHROUGH = "passthrough" EMULATE = "emulate" ALL = (PASSTHROUGH, EMULATE) class SCSIModel(BaseNovaEnum): BUSLOGIC = "buslogic" IBMVSCSI = "ibmvscsi" LSILOGIC = "lsilogic" LSISAS1068 = "lsisas1068" LSISAS1078 = "lsisas1078" VIRTIO_SCSI = "virtio-scsi" VMPVSCSI = "vmpvscsi" ALL = (BUSLOGIC, IBMVSCSI, LSILOGIC, LSISAS1068, LSISAS1078, VIRTIO_SCSI, VMPVSCSI) def coerce(self, obj, attr, value): # Some compat for strings we'd see in the legacy # vmware_adaptertype image property value = value.lower() if value == "lsilogicsas": value = SCSIModel.LSISAS1068 elif value == "paravirtual": value = SCSIModel.VMPVSCSI return super(SCSIModel, self).coerce(obj, attr, value) class SecureBoot(BaseNovaEnum): REQUIRED = "required" DISABLED = "disabled" OPTIONAL = "optional" ALL = (REQUIRED, DISABLED, OPTIONAL) class VideoModel(BaseNovaEnum): CIRRUS = "cirrus" QXL = "qxl" VGA = "vga" VMVGA = "vmvga" XEN = "xen" VIRTIO = 'virtio' GOP = 'gop' NONE = 'none' BOCHS = 'bochs' ALL = (CIRRUS, QXL, VGA, VMVGA, XEN, VIRTIO, GOP, NONE, BOCHS) class VIFModel(BaseNovaEnum): LEGACY_VALUES = {"virtuale1000": network_model.VIF_MODEL_E1000, "virtuale1000e": network_model.VIF_MODEL_E1000E, "virtualpcnet32": network_model.VIF_MODEL_PCNET, "virtualsriovethernetcard": network_model.VIF_MODEL_SRIOV, "virtualvmxnet": network_model.VIF_MODEL_VMXNET, "virtualvmxnet3": network_model.VIF_MODEL_VMXNET3, } ALL = network_model.VIF_MODEL_ALL def coerce(self, obj, attr, value): # Some compat for strings we'd see in the legacy # hw_vif_model image property value = value.lower() value = VIFModel.LEGACY_VALUES.get(value, value) return super(VIFModel, self).coerce(obj, attr, value) class VIOMMUModel(BaseNovaEnum): INTEL = 'intel' SMMUV3 = 'smmuv3' VIRTIO = 'virtio' AUTO = 'auto' ALL = (INTEL, SMMUV3, VIRTIO, AUTO) class VMMode(BaseNovaEnum): """Represents possible vm modes for instances. Compute instance VM modes represent the host/guest ABI used for the virtual machine or container. Individual hypervisors may support multiple different vm modes per host. Available VM modes for a hypervisor driver may also vary according to the architecture it is running on. """ HVM = 'hvm' # Native ABI (aka fully virtualized) XEN = 'xen' # Xen 3.0 paravirtualized UML = 'uml' # User Mode Linux paravirtualized EXE = 'exe' # Executables in containers ALL = (HVM, XEN, UML, EXE) def coerce(self, obj, attr, value): try: value = self.canonicalize(value) except exception.InvalidVirtualMachineMode: msg = _("Virtual machine mode '%s' is not valid") % value raise ValueError(msg) return super(VMMode, self).coerce(obj, attr, value) @classmethod def get_from_instance(cls, instance): """Get the vm mode for an instance :param instance: instance object to query :returns: canonicalized vm mode for the instance """ mode = instance.vm_mode return cls.canonicalize(mode) @classmethod def is_valid(cls, name): """Check if a string is a valid vm mode :param name: vm mode name to validate :returns: True if @name is valid """ return name in cls.ALL @classmethod def canonicalize(cls, mode): """Canonicalize the vm mode :param name: vm mode name to canonicalize :returns: a canonical vm mode name """ if mode is None: return None mode = mode.lower() # For compatibility with pre-Folsom deployments if mode == 'pv': mode = cls.XEN if mode == 'hv': mode = cls.HVM if mode == 'baremetal': mode = cls.HVM if not cls.is_valid(mode): raise exception.InvalidVirtualMachineMode(vmmode=mode) return mode class WatchdogAction(BaseNovaEnum): NONE = "none" PAUSE = "pause" POWEROFF = "poweroff" RESET = "reset" DISABLED = "disabled" ALL = (NONE, PAUSE, POWEROFF, RESET, DISABLED) class MonitorMetricType(BaseNovaEnum): CPU_FREQUENCY = "cpu.frequency" CPU_USER_TIME = "cpu.user.time" CPU_KERNEL_TIME = "cpu.kernel.time" CPU_IDLE_TIME = "cpu.idle.time" CPU_IOWAIT_TIME = "cpu.iowait.time" CPU_USER_PERCENT = "cpu.user.percent" CPU_KERNEL_PERCENT = "cpu.kernel.percent" CPU_IDLE_PERCENT = "cpu.idle.percent" CPU_IOWAIT_PERCENT = "cpu.iowait.percent" CPU_PERCENT = "cpu.percent" NUMA_MEM_BW_MAX = "numa.membw.max" NUMA_MEM_BW_CURRENT = "numa.membw.current" ALL = ( CPU_FREQUENCY, CPU_USER_TIME, CPU_KERNEL_TIME, CPU_IDLE_TIME, CPU_IOWAIT_TIME, CPU_USER_PERCENT, CPU_KERNEL_PERCENT, CPU_IDLE_PERCENT, CPU_IOWAIT_PERCENT, CPU_PERCENT, NUMA_MEM_BW_MAX, NUMA_MEM_BW_CURRENT, ) class HostStatus(BaseNovaEnum): UP = "UP" # The nova-compute is up. DOWN = "DOWN" # The nova-compute is forced_down. MAINTENANCE = "MAINTENANCE" # The nova-compute is disabled. UNKNOWN = "UNKNOWN" # The nova-compute has not reported. NONE = "" # No host or nova-compute. ALL = (UP, DOWN, MAINTENANCE, UNKNOWN, NONE) class PciDeviceStatus(BaseNovaEnum): AVAILABLE = "available" CLAIMED = "claimed" ALLOCATED = "allocated" REMOVED = "removed" # The device has been hot-removed and not yet deleted DELETED = "deleted" # The device is marked not available/deleted. UNCLAIMABLE = "unclaimable" UNAVAILABLE = "unavailable" ALL = (AVAILABLE, CLAIMED, ALLOCATED, REMOVED, DELETED, UNAVAILABLE, UNCLAIMABLE) class PciDeviceType(BaseNovaEnum): # NOTE(jaypipes): It's silly that the word "type-" is in these constants, # but alas, these were the original constant strings used... STANDARD = "type-PCI" SRIOV_PF = "type-PF" SRIOV_VF = "type-VF" # NOTE(sean-k-mooney): The DB field is Column(String(8), nullable=False) # type-vdpa is 9 long...and as Jay notes above the prefix is silly so # for the new vdpa value we drop the prefix to avoid a DB migration VDPA = "vdpa" ALL = (STANDARD, SRIOV_PF, SRIOV_VF, VDPA) class PCINUMAAffinityPolicy(BaseNovaEnum): REQUIRED = "required" LEGACY = "legacy" PREFERRED = "preferred" SOCKET = "socket" ALL = (REQUIRED, LEGACY, PREFERRED, SOCKET) class DiskFormat(BaseNovaEnum): RBD = "rbd" LVM = "lvm" QCOW2 = "qcow2" RAW = "raw" PLOOP = "ploop" VHD = "vhd" VMDK = "vmdk" VDI = "vdi" ISO = "iso" ALL = (RBD, LVM, QCOW2, RAW, PLOOP, VHD, VMDK, VDI, ISO) # TODO(stephenfin): Remove the xenapi and hyperv value when we bump the # 'Diagnostics' object (the only user of this enum) to 2.0 class HypervisorDriver(BaseNovaEnum): LIBVIRT = "libvirt" XENAPI = "xenapi" VMWAREAPI = "vmwareapi" IRONIC = "ironic" HYPERV = "hyperv" ALL = (LIBVIRT, XENAPI, VMWAREAPI, IRONIC, HYPERV) class PointerModelType(BaseNovaEnum): USBTABLET = "usbtablet" ALL = (USBTABLET,) class SoundModelType(BaseNovaEnum): SB16 = 'sb16' ES1370 = 'es1370' PCSPK = 'pcspk' AC97 = 'ac97' ICH6 = 'ich6' ICH9 = 'ich9' USB = 'usb' VIRTIO = 'virtio' ALL = (SB16, ES1370, PCSPK, AC97, ICH6, ICH9, USB, VIRTIO,) class USBControllerModelType(BaseNovaEnum): # NOTE(mikal): qemu itself (and therefore libvirt) supports a lot more # USB controllers than this, but they're all very old and don't support # USB3. So while we could turn them on, it would be like adding in IDE # disk bus emulation -- the performance isn't great and its been a very # long time since an operating system was released which genuinely couldn't # use something better. NONE = 'none' NEC_XHCI = 'nec-xhci' QEMU_XHCI = 'qemu-xhci' ALL = (NONE, NEC_XHCI, QEMU_XHCI) class NotificationPriority(BaseNovaEnum): AUDIT = 'audit' CRITICAL = 'critical' DEBUG = 'debug' INFO = 'info' ERROR = 'error' SAMPLE = 'sample' WARN = 'warn' ALL = (AUDIT, CRITICAL, DEBUG, INFO, ERROR, SAMPLE, WARN) class NotificationPhase(BaseNovaEnum): START = 'start' END = 'end' ERROR = 'error' PROGRESS = 'progress' ALL = (START, END, ERROR, PROGRESS) class NotificationSource(BaseNovaEnum): """Represents possible nova binary service names in notification envelope. The publisher_id field of the nova notifications consists of the name of the host and the name of the service binary that emits the notification. The below values are the ones that is used in every notification. Please note that on the REST API the nova-api service binary is called nova-osapi_compute. This is not reflected here as notifications always used the name nova-api instead. """ COMPUTE = 'nova-compute' API = 'nova-api' CONDUCTOR = 'nova-conductor' SCHEDULER = 'nova-scheduler' # TODO(stephenfin): Remove 'NETWORK' when 'NotificationPublisher' is # updated to version 3.0 NETWORK = 'nova-network' # TODO(stephenfin): Remove 'CONSOLEAUTH' when 'NotificationPublisher' is # updated to version 3.0 CONSOLEAUTH = 'nova-consoleauth' # TODO(stephenfin): Remove when 'NotificationPublisher' object version is # bumped to 3.0 CELLS = 'nova-cells' # TODO(stephenfin): Remove when 'NotificationPublisher' object version is # bumped to 3.0 CONSOLE = 'nova-console' METADATA = 'nova-metadata' ALL = (API, COMPUTE, CONDUCTOR, SCHEDULER, NETWORK, CONSOLEAUTH, CELLS, CONSOLE, METADATA) @staticmethod def get_source_by_binary(binary): # nova-osapi_compute binary name needs to be translated to nova-api # notification source enum value. return "nova-api" if binary == "nova-osapi_compute" else binary class NotificationAction(BaseNovaEnum): UPDATE = 'update' EXCEPTION = 'exception' DELETE = 'delete' PAUSE = 'pause' UNPAUSE = 'unpause' RESIZE = 'resize' VOLUME_SWAP = 'volume_swap' SUSPEND = 'suspend' POWER_ON = 'power_on' POWER_OFF = 'power_off' REBOOT = 'reboot' SHUTDOWN = 'shutdown' SNAPSHOT = 'snapshot' INTERFACE_ATTACH = 'interface_attach' SHELVE = 'shelve' RESUME = 'resume' RESTORE = 'restore' EXISTS = 'exists' RESCUE = 'rescue' VOLUME_ATTACH = 'volume_attach' VOLUME_DETACH = 'volume_detach' SHARE_ATTACH = 'share_attach' SHARE_DETACH = 'share_detach' CREATE = 'create' IMPORT = 'import' EVACUATE = 'evacuate' RESIZE_FINISH = 'resize_finish' LIVE_MIGRATION_ABORT = 'live_migration_abort' LIVE_MIGRATION_POST_DEST = 'live_migration_post_dest' LIVE_MIGRATION_POST = 'live_migration_post' LIVE_MIGRATION_PRE = 'live_migration_pre' LIVE_MIGRATION_ROLLBACK_DEST = 'live_migration_rollback_dest' LIVE_MIGRATION_ROLLBACK = 'live_migration_rollback' LIVE_MIGRATION_FORCE_COMPLETE = 'live_migration_force_complete' REBUILD = 'rebuild' REBUILD_SCHEDULED = 'rebuild_scheduled' INTERFACE_DETACH = 'interface_detach' RESIZE_CONFIRM = 'resize_confirm' RESIZE_PREP = 'resize_prep' RESIZE_REVERT = 'resize_revert' SELECT_DESTINATIONS = 'select_destinations' SHELVE_OFFLOAD = 'shelve_offload' SOFT_DELETE = 'soft_delete' TRIGGER_CRASH_DUMP = 'trigger_crash_dump' UNRESCUE = 'unrescue' UNSHELVE = 'unshelve' ADD_HOST = 'add_host' REMOVE_HOST = 'remove_host' ADD_MEMBER = 'add_member' UPDATE_METADATA = 'update_metadata' LOCK = 'lock' UNLOCK = 'unlock' UPDATE_PROP = 'update_prop' CONNECT = 'connect' USAGE = 'usage' BUILD_INSTANCES = 'build_instances' MIGRATE_SERVER = 'migrate_server' REBUILD_SERVER = 'rebuild_server' IMAGE_CACHE = 'cache_images' ALL = (UPDATE, EXCEPTION, DELETE, PAUSE, UNPAUSE, RESIZE, VOLUME_SWAP, SUSPEND, POWER_ON, REBOOT, SHUTDOWN, SNAPSHOT, INTERFACE_ATTACH, POWER_OFF, SHELVE, RESUME, RESTORE, EXISTS, RESCUE, VOLUME_ATTACH, VOLUME_DETACH, SHARE_ATTACH, SHARE_DETACH, CREATE, IMPORT, EVACUATE, RESIZE_FINISH, LIVE_MIGRATION_ABORT, LIVE_MIGRATION_POST_DEST, LIVE_MIGRATION_POST, LIVE_MIGRATION_PRE, LIVE_MIGRATION_ROLLBACK, LIVE_MIGRATION_ROLLBACK_DEST, REBUILD, INTERFACE_DETACH, RESIZE_CONFIRM, RESIZE_PREP, RESIZE_REVERT, SHELVE_OFFLOAD, SOFT_DELETE, TRIGGER_CRASH_DUMP, UNRESCUE, UNSHELVE, ADD_HOST, REMOVE_HOST, ADD_MEMBER, UPDATE_METADATA, LOCK, UNLOCK, REBUILD_SCHEDULED, UPDATE_PROP, LIVE_MIGRATION_FORCE_COMPLETE, CONNECT, USAGE, BUILD_INSTANCES, MIGRATE_SERVER, REBUILD_SERVER, SELECT_DESTINATIONS, IMAGE_CACHE) # TODO(rlrossit): These should be changed over to be a StateMachine enum from # oslo.versionedobjects using the valid state transitions described in # nova.compute.vm_states class InstanceState(BaseNovaEnum): ACTIVE = 'active' BUILDING = 'building' PAUSED = 'paused' SUSPENDED = 'suspended' STOPPED = 'stopped' RESCUED = 'rescued' RESIZED = 'resized' SOFT_DELETED = 'soft-delete' DELETED = 'deleted' ERROR = 'error' SHELVED = 'shelved' SHELVED_OFFLOADED = 'shelved_offloaded' ALL = (ACTIVE, BUILDING, PAUSED, SUSPENDED, STOPPED, RESCUED, RESIZED, SOFT_DELETED, DELETED, ERROR, SHELVED, SHELVED_OFFLOADED) # TODO(rlrossit): These should be changed over to be a StateMachine enum from # oslo.versionedobjects using the valid state transitions described in # nova.compute.task_states class InstanceTaskState(BaseNovaEnum): SCHEDULING = 'scheduling' BLOCK_DEVICE_MAPPING = 'block_device_mapping' NETWORKING = 'networking' SPAWNING = 'spawning' IMAGE_SNAPSHOT = 'image_snapshot' IMAGE_SNAPSHOT_PENDING = 'image_snapshot_pending' IMAGE_PENDING_UPLOAD = 'image_pending_upload' IMAGE_UPLOADING = 'image_uploading' IMAGE_BACKUP = 'image_backup' UPDATING_PASSWORD = 'updating_password' RESIZE_PREP = 'resize_prep' RESIZE_MIGRATING = 'resize_migrating' RESIZE_MIGRATED = 'resize_migrated' RESIZE_FINISH = 'resize_finish' RESIZE_REVERTING = 'resize_reverting' RESIZE_CONFIRMING = 'resize_confirming' REBOOTING = 'rebooting' REBOOT_PENDING = 'reboot_pending' REBOOT_STARTED = 'reboot_started' REBOOTING_HARD = 'rebooting_hard' REBOOT_PENDING_HARD = 'reboot_pending_hard' REBOOT_STARTED_HARD = 'reboot_started_hard' PAUSING = 'pausing' UNPAUSING = 'unpausing' SUSPENDING = 'suspending' RESUMING = 'resuming' POWERING_OFF = 'powering-off' POWERING_ON = 'powering-on' RESCUING = 'rescuing' UNRESCUING = 'unrescuing' REBUILDING = 'rebuilding' REBUILD_BLOCK_DEVICE_MAPPING = "rebuild_block_device_mapping" REBUILD_SPAWNING = 'rebuild_spawning' MIGRATING = "migrating" DELETING = 'deleting' SOFT_DELETING = 'soft-deleting' RESTORING = 'restoring' SHELVING = 'shelving' SHELVING_IMAGE_PENDING_UPLOAD = 'shelving_image_pending_upload' SHELVING_IMAGE_UPLOADING = 'shelving_image_uploading' SHELVING_OFFLOADING = 'shelving_offloading' UNSHELVING = 'unshelving' ALL = (SCHEDULING, BLOCK_DEVICE_MAPPING, NETWORKING, SPAWNING, IMAGE_SNAPSHOT, IMAGE_SNAPSHOT_PENDING, IMAGE_PENDING_UPLOAD, IMAGE_UPLOADING, IMAGE_BACKUP, UPDATING_PASSWORD, RESIZE_PREP, RESIZE_MIGRATING, RESIZE_MIGRATED, RESIZE_FINISH, RESIZE_REVERTING, RESIZE_CONFIRMING, REBOOTING, REBOOT_PENDING, REBOOT_STARTED, REBOOTING_HARD, REBOOT_PENDING_HARD, REBOOT_STARTED_HARD, PAUSING, UNPAUSING, SUSPENDING, RESUMING, POWERING_OFF, POWERING_ON, RESCUING, UNRESCUING, REBUILDING, REBUILD_BLOCK_DEVICE_MAPPING, REBUILD_SPAWNING, MIGRATING, DELETING, SOFT_DELETING, RESTORING, SHELVING, SHELVING_IMAGE_PENDING_UPLOAD, SHELVING_IMAGE_UPLOADING, SHELVING_OFFLOADING, UNSHELVING) class InstancePowerState(Enum): _UNUSED = '_unused' NOSTATE = 'pending' RUNNING = 'running' PAUSED = 'paused' SHUTDOWN = 'shutdown' CRASHED = 'crashed' SUSPENDED = 'suspended' # The order is important here. If you make changes, only *append* # values to the end of the list. ALL = ( NOSTATE, RUNNING, _UNUSED, PAUSED, SHUTDOWN, _UNUSED, CRASHED, SUSPENDED, ) def __init__(self): super(InstancePowerState, self).__init__( valid_values=InstancePowerState.ALL) def coerce(self, obj, attr, value): try: value = int(value) value = self.from_index(value) except (ValueError, KeyError): pass return super(InstancePowerState, self).coerce(obj, attr, value) @classmethod def index(cls, value): """Return an index into the Enum given a value.""" return cls.ALL.index(value) @classmethod def from_index(cls, index): """Return the Enum value at a given index.""" return cls.ALL[index] class NetworkModel(FieldType): @staticmethod def coerce(obj, attr, value): if isinstance(value, network_model.NetworkInfo): return value elif isinstance(value, str): # Hmm, do we need this? return network_model.NetworkInfo.hydrate(value) else: raise ValueError(_('A NetworkModel is required in field %s') % attr) @staticmethod def to_primitive(obj, attr, value): return value.json() @staticmethod def from_primitive(obj, attr, value): return network_model.NetworkInfo.hydrate(value) def stringify(self, value): return 'NetworkModel(%s)' % ( ','.join([str(vif['id']) for vif in value])) def get_schema(self): return {'type': ['string']} class NetworkVIFModel(FieldType): """Represents a nova.network.model.VIF object, which is a dict of stuff.""" @staticmethod def coerce(obj, attr, value): if isinstance(value, network_model.VIF): return value elif isinstance(value, str): return NetworkVIFModel.from_primitive(obj, attr, value) else: raise ValueError(_('A nova.network.model.VIF object is required ' 'in field %s') % attr) @staticmethod def to_primitive(obj, attr, value): return jsonutils.dumps(value) @staticmethod def from_primitive(obj, attr, value): return network_model.VIF.hydrate(jsonutils.loads(value)) def get_schema(self): return {'type': ['string']} class AddressBase(FieldType): @staticmethod def coerce(obj, attr, value): if re.match(obj.PATTERN, str(value)): return str(value) else: raise ValueError(_('Value must match %s') % obj.PATTERN) def get_schema(self): return {'type': ['string'], 'pattern': self.PATTERN} class USBAddress(AddressBase): PATTERN = '[a-f0-9]+:[a-f0-9]+' @staticmethod def coerce(obj, attr, value): return AddressBase.coerce(USBAddress, attr, value) class SCSIAddress(AddressBase): PATTERN = '[a-f0-9]+:[a-f0-9]+:[a-f0-9]+:[a-f0-9]+' @staticmethod def coerce(obj, attr, value): return AddressBase.coerce(SCSIAddress, attr, value) class IDEAddress(AddressBase): PATTERN = '[0-1]:[0-1]' @staticmethod def coerce(obj, attr, value): return AddressBase.coerce(IDEAddress, attr, value) class XenAddress(AddressBase): PATTERN = '(00[0-9]{2}00)|[1-9][0-9]+' @staticmethod def coerce(obj, attr, value): return AddressBase.coerce(XenAddress, attr, value) class USBAddressField(AutoTypedField): AUTO_TYPE = USBAddress() class SCSIAddressField(AutoTypedField): AUTO_TYPE = SCSIAddress() class IDEAddressField(AutoTypedField): AUTO_TYPE = IDEAddress() class XenAddressField(AutoTypedField): AUTO_TYPE = XenAddress() class ArchitectureField(BaseEnumField): AUTO_TYPE = Architecture() class BlockDeviceDestinationTypeField(BaseEnumField): AUTO_TYPE = BlockDeviceDestinationType() class BlockDeviceSourceTypeField(BaseEnumField): AUTO_TYPE = BlockDeviceSourceType() class BlockDeviceTypeField(BaseEnumField): AUTO_TYPE = BlockDeviceType() class BlockDeviceEncryptionFormatTypeField(BaseEnumField): AUTO_TYPE = BlockDeviceEncryptionFormatType() class ConfigDrivePolicyField(BaseEnumField): AUTO_TYPE = ConfigDrivePolicy() class CPUAllocationPolicyField(BaseEnumField): AUTO_TYPE = CPUAllocationPolicy() class CPUThreadAllocationPolicyField(BaseEnumField): AUTO_TYPE = CPUThreadAllocationPolicy() class CPUEmulatorThreadsPolicyField(BaseEnumField): AUTO_TYPE = CPUEmulatorThreadsPolicy() class CPUModeField(BaseEnumField): AUTO_TYPE = CPUMode() class CPUMatchField(BaseEnumField): AUTO_TYPE = CPUMatch() class CPUFeaturePolicyField(BaseEnumField): AUTO_TYPE = CPUFeaturePolicy() class DiskBusField(BaseEnumField): AUTO_TYPE = DiskBus() class DiskConfigField(BaseEnumField): AUTO_TYPE = DiskConfig() class FirmwareTypeField(BaseEnumField): AUTO_TYPE = FirmwareType() class HVTypeField(BaseEnumField): AUTO_TYPE = HVType() class ImageSignatureHashTypeField(BaseEnumField): AUTO_TYPE = ImageSignatureHashType() class ImageSignatureKeyTypeField(BaseEnumField): AUTO_TYPE = ImageSignatureKeyType() class InputBusField(BaseEnumField): AUTO_TYPE = InputBus() class MaxPhysAddrModeField(BaseEnumField): AUTO_TYPE = MaxPhyAddrMode() class MigrationTypeField(BaseEnumField): AUTO_TYPE = MigrationType() class OSTypeField(BaseEnumField): AUTO_TYPE = OSType() class RNGModelField(BaseEnumField): AUTO_TYPE = RNGModel() class ShareMappingStatusField(BaseEnumField): AUTO_TYPE = ShareMappingStatus() class ShareMappingProtoField(BaseEnumField): AUTO_TYPE = ShareMappingProto() class TPMModelField(BaseEnumField): AUTO_TYPE = TPMModel() class TPMVersionField(BaseEnumField): AUTO_TYPE = TPMVersion() class MemEncryptionModelField(BaseEnumField): AUTO_TYPE = MemEncryptionModel() class SCSIModelField(BaseEnumField): AUTO_TYPE = SCSIModel() class SecureBootField(BaseEnumField): AUTO_TYPE = SecureBoot() class VideoModelField(BaseEnumField): AUTO_TYPE = VideoModel() class VIFModelField(BaseEnumField): AUTO_TYPE = VIFModel() class VIOMMUModelField(BaseEnumField): AUTO_TYPE = VIOMMUModel() class VMModeField(BaseEnumField): AUTO_TYPE = VMMode() class WatchdogActionField(BaseEnumField): AUTO_TYPE = WatchdogAction() class MonitorMetricTypeField(BaseEnumField): AUTO_TYPE = MonitorMetricType() class PciDeviceStatusField(BaseEnumField): AUTO_TYPE = PciDeviceStatus() class PciDeviceTypeField(BaseEnumField): AUTO_TYPE = PciDeviceType() class PCINUMAAffinityPolicyField(BaseEnumField): AUTO_TYPE = PCINUMAAffinityPolicy() class DiskFormatField(BaseEnumField): AUTO_TYPE = DiskFormat() class HypervisorDriverField(BaseEnumField): AUTO_TYPE = HypervisorDriver() class PointerModelField(BaseEnumField): AUTO_TYPE = PointerModelType() class SoundModelField(BaseEnumField): AUTO_TYPE = SoundModelType() class USBControllerModelField(BaseEnumField): AUTO_TYPE = USBControllerModelType() class NotificationPriorityField(BaseEnumField): AUTO_TYPE = NotificationPriority() class NotificationPhaseField(BaseEnumField): AUTO_TYPE = NotificationPhase() class NotificationActionField(BaseEnumField): AUTO_TYPE = NotificationAction() class NotificationSourceField(BaseEnumField): AUTO_TYPE = NotificationSource() class InstanceStateField(BaseEnumField): AUTO_TYPE = InstanceState() class InstanceTaskStateField(BaseEnumField): AUTO_TYPE = InstanceTaskState() class InstancePowerStateField(BaseEnumField): AUTO_TYPE = InstancePowerState() class NetworkModelField(AutoTypedField): AUTO_TYPE = NetworkModel() class NetworkVIFModelField(AutoTypedField): AUTO_TYPE = NetworkVIFModel() class ListOfListsOfStringsField(AutoTypedField): AUTO_TYPE = List(List(fields.String())) class DictOfSetOfIntegersField(AutoTypedField): AUTO_TYPE = Dict(Set(fields.Integer())) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/objects/flavor.py0000664000175000017500000006337700000000000017041 0ustar00zuulzuul00000000000000# Copyright 2013 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db import exception as db_exc from oslo_db.sqlalchemy import utils as sqlalchemyutils from oslo_utils import versionutils import sqlalchemy as sa from sqlalchemy import orm from sqlalchemy import sql from sqlalchemy.sql import expression import nova.conf from nova.db.api import api as api_db_api from nova.db.api import models as api_models from nova.db import utils as db_utils from nova import exception from nova.notifications.objects import base as notification from nova.notifications.objects import flavor as flavor_notification from nova import objects from nova.objects import base from nova.objects import fields OPTIONAL_FIELDS = ['extra_specs', 'projects'] # Remove these fields in version 2.0 of the object. DEPRECATED_FIELDS = ['deleted', 'deleted_at'] # Non-joined fields which can be updated. MUTABLE_FIELDS = set(['description']) CONF = nova.conf.CONF def _dict_with_extra_specs(flavor_model): extra_specs = {x['key']: x['value'] for x in flavor_model['extra_specs']} return dict(flavor_model, extra_specs=extra_specs) # NOTE(danms): There are some issues with the oslo_db context manager # decorators with static methods. We pull these out for now and can # move them back into the actual staticmethods on the object when those # issues are resolved. @api_db_api.context_manager.reader def _get_projects_from_db(context, flavorid): db_flavor = context.session.query(api_models.Flavors).filter_by( flavorid=flavorid ).options( orm.joinedload(api_models.Flavors.projects) ).first() if not db_flavor: raise exception.FlavorNotFound(flavor_id=flavorid) return [x['project_id'] for x in db_flavor['projects']] @api_db_api.context_manager.writer def _flavor_add_project(context, flavor_id, project_id): project = api_models.FlavorProjects() project.update({'flavor_id': flavor_id, 'project_id': project_id}) try: project.save(context.session) except db_exc.DBDuplicateEntry: raise exception.FlavorAccessExists(flavor_id=flavor_id, project_id=project_id) @api_db_api.context_manager.writer def _flavor_del_project(context, flavor_id, project_id): result = context.session.query(api_models.FlavorProjects).\ filter_by(project_id=project_id).\ filter_by(flavor_id=flavor_id).\ delete() if result == 0: raise exception.FlavorAccessNotFound(flavor_id=flavor_id, project_id=project_id) @api_db_api.context_manager.writer def _flavor_extra_specs_add(context, flavor_id, specs, max_retries=10): writer = api_db_api.context_manager.writer for attempt in range(max_retries): try: spec_refs = context.session.query( api_models.FlavorExtraSpecs).\ filter_by(flavor_id=flavor_id).\ filter(api_models.FlavorExtraSpecs.key.in_( specs.keys())).\ all() existing_keys = set() for spec_ref in spec_refs: key = spec_ref["key"] existing_keys.add(key) with writer.savepoint.using(context): spec_ref.update({"value": specs[key]}) for key, value in specs.items(): if key in existing_keys: continue spec_ref = api_models.FlavorExtraSpecs() with writer.savepoint.using(context): spec_ref.update({"key": key, "value": value, "flavor_id": flavor_id}) context.session.add(spec_ref) return specs except db_exc.DBDuplicateEntry: # a concurrent transaction has been committed, # try again unless this was the last attempt if attempt == max_retries - 1: raise exception.FlavorExtraSpecUpdateCreateFailed( id=flavor_id, retries=max_retries) @api_db_api.context_manager.writer def _flavor_extra_specs_del(context, flavor_id, key): result = context.session.query(api_models.FlavorExtraSpecs).\ filter_by(flavor_id=flavor_id).\ filter_by(key=key).\ delete() if result == 0: raise exception.FlavorExtraSpecsNotFound( extra_specs_key=key, flavor_id=flavor_id) @api_db_api.context_manager.writer def _flavor_create(context, values): specs = values.get('extra_specs') db_specs = [] if specs: for k, v in specs.items(): db_spec = api_models.FlavorExtraSpecs() db_spec['key'] = k db_spec['value'] = v db_specs.append(db_spec) projects = values.get('projects') db_projects = [] if projects: for project in set(projects): db_project = api_models.FlavorProjects() db_project['project_id'] = project db_projects.append(db_project) values['extra_specs'] = db_specs values['projects'] = db_projects db_flavor = api_models.Flavors() db_flavor.update(values) try: db_flavor.save(context.session) except db_exc.DBDuplicateEntry as e: if 'flavorid' in e.columns: raise exception.FlavorIdExists(flavor_id=values['flavorid']) raise exception.FlavorExists(name=values['name']) except Exception as e: raise db_exc.DBError(e) return _dict_with_extra_specs(db_flavor) @api_db_api.context_manager.writer def _flavor_destroy(context, flavor_id=None, flavorid=None): query = context.session.query(api_models.Flavors) if flavor_id is not None: query = query.filter(api_models.Flavors.id == flavor_id) else: query = query.filter(api_models.Flavors.flavorid == flavorid) result = query.first() if not result: raise exception.FlavorNotFound(flavor_id=(flavor_id or flavorid)) context.session.query(api_models.FlavorProjects).\ filter_by(flavor_id=result.id).delete() context.session.query(api_models.FlavorExtraSpecs).\ filter_by(flavor_id=result.id).delete() context.session.delete(result) return result # TODO(berrange): Remove NovaObjectDictCompat # TODO(mriedem): Remove NovaPersistentObject in version 2.0 @base.NovaObjectRegistry.register class Flavor(base.NovaPersistentObject, base.NovaObject, base.NovaObjectDictCompat): # Version 1.0: Initial version # Version 1.1: Added save_projects(), save_extra_specs(), removed # remotable from save() # Version 1.2: Added description field. Note: this field should not be # persisted with the embedded instance.flavor. VERSION = '1.2' fields = { 'id': fields.IntegerField(), 'name': fields.StringField(nullable=True), 'memory_mb': fields.IntegerField(), 'vcpus': fields.IntegerField(), 'root_gb': fields.IntegerField(), 'ephemeral_gb': fields.IntegerField(), 'flavorid': fields.StringField(), 'swap': fields.IntegerField(), 'rxtx_factor': fields.FloatField(nullable=True, default=1.0), 'vcpu_weight': fields.IntegerField(nullable=True), 'disabled': fields.BooleanField(), 'is_public': fields.BooleanField(), 'extra_specs': fields.DictOfStringsField(), 'projects': fields.ListOfStringsField(), 'description': fields.StringField(nullable=True) } def __init__(self, *args, **kwargs): super(Flavor, self).__init__(*args, **kwargs) self._orig_extra_specs = {} self._orig_projects = [] def obj_make_compatible(self, primitive, target_version): super(Flavor, self).obj_make_compatible(primitive, target_version) target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 2) and 'description' in primitive: del primitive['description'] @staticmethod def _from_db_object(context, flavor, db_flavor, expected_attrs=None): if expected_attrs is None: expected_attrs = [] flavor._context = context for name, field in flavor.fields.items(): if name in OPTIONAL_FIELDS: continue if name in DEPRECATED_FIELDS and name not in db_flavor: continue value = db_flavor[name] if isinstance(field, fields.IntegerField): value = value if value is not None else 0 flavor[name] = value # NOTE(danms): This is to support processing the API flavor # model, which does not have these deprecated fields. When we # remove compatibility with the old InstanceType model, we can # remove this as well. if any(f not in db_flavor for f in DEPRECATED_FIELDS): flavor.deleted_at = None flavor.deleted = False if 'extra_specs' in expected_attrs: flavor.extra_specs = db_flavor['extra_specs'] if 'projects' in expected_attrs: if 'projects' in db_flavor: flavor['projects'] = [x['project_id'] for x in db_flavor['projects']] else: flavor._load_projects() flavor.obj_reset_changes() return flavor @staticmethod def _flavor_get_query_from_db(context): # We don't use a database context decorator on this method because this # method is not executing a query, it's only building one. query = context.session.query(api_models.Flavors).options( orm.joinedload(api_models.Flavors.extra_specs) ) if not context.is_admin: the_filter = [api_models.Flavors.is_public == sql.true()] the_filter.extend([ api_models.Flavors.projects.any(project_id=context.project_id) ]) query = query.filter(sa.or_(*the_filter)) return query @staticmethod @db_utils.require_context @api_db_api.context_manager.reader def _flavor_get_from_db(context, id): """Returns a dict describing specific flavor.""" result = Flavor._flavor_get_query_from_db(context).\ filter_by(id=id).\ first() if not result: raise exception.FlavorNotFound(flavor_id=id) return _dict_with_extra_specs(result) @staticmethod @db_utils.require_context @api_db_api.context_manager.reader def _flavor_get_by_name_from_db(context, name): """Returns a dict describing specific flavor.""" result = Flavor._flavor_get_query_from_db(context).\ filter_by(name=name).\ first() if not result: raise exception.FlavorNotFoundByName(flavor_name=name) return _dict_with_extra_specs(result) @staticmethod @db_utils.require_context @api_db_api.context_manager.reader def _flavor_get_by_flavor_id_from_db(context, flavor_id): """Returns a dict describing specific flavor_id.""" result = Flavor._flavor_get_query_from_db(context).\ filter_by(flavorid=flavor_id).\ order_by(expression.asc(api_models.Flavors.id)).\ first() if not result: raise exception.FlavorNotFound(flavor_id=flavor_id) return _dict_with_extra_specs(result) @staticmethod def _get_projects_from_db(context, flavorid): return _get_projects_from_db(context, flavorid) @base.remotable def _load_projects(self): self.projects = self._get_projects_from_db(self._context, self.flavorid) self.obj_reset_changes(['projects']) @base.lazy_load_counter def obj_load_attr(self, attrname): # NOTE(danms): Only projects could be lazy-loaded right now if attrname != 'projects': raise exception.ObjectActionError( action='obj_load_attr', reason='unable to load %s' % attrname) self._load_projects() def obj_reset_changes(self, fields=None, recursive=False): super(Flavor, self).obj_reset_changes(fields=fields, recursive=recursive) if fields is None or 'extra_specs' in fields: self._orig_extra_specs = (dict(self.extra_specs) if self.obj_attr_is_set('extra_specs') else {}) if fields is None or 'projects' in fields: self._orig_projects = (list(self.projects) if self.obj_attr_is_set('projects') else []) def obj_what_changed(self): changes = super(Flavor, self).obj_what_changed() if ('extra_specs' in self and self.extra_specs != self._orig_extra_specs): changes.add('extra_specs') if 'projects' in self and self.projects != self._orig_projects: changes.add('projects') return changes @classmethod def _obj_from_primitive(cls, context, objver, primitive): self = super(Flavor, cls)._obj_from_primitive(context, objver, primitive) changes = self.obj_what_changed() if 'extra_specs' not in changes: # This call left extra_specs "clean" so update our tracker self._orig_extra_specs = (dict(self.extra_specs) if self.obj_attr_is_set('extra_specs') else {}) if 'projects' not in changes: # This call left projects "clean" so update our tracker self._orig_projects = (list(self.projects) if self.obj_attr_is_set('projects') else []) return self @base.remotable_classmethod def get_by_id(cls, context, id): db_flavor = cls._flavor_get_from_db(context, id) return cls._from_db_object(context, cls(context), db_flavor, expected_attrs=['extra_specs']) @base.remotable_classmethod def get_by_name(cls, context, name): db_flavor = cls._flavor_get_by_name_from_db(context, name) return cls._from_db_object(context, cls(context), db_flavor, expected_attrs=['extra_specs']) @base.remotable_classmethod def get_by_flavor_id(cls, context, flavor_id, read_deleted=None): db_flavor = cls._flavor_get_by_flavor_id_from_db(context, flavor_id) return cls._from_db_object(context, cls(context), db_flavor, expected_attrs=['extra_specs']) @staticmethod def _flavor_add_project(context, flavor_id, project_id): return _flavor_add_project(context, flavor_id, project_id) @staticmethod def _flavor_del_project(context, flavor_id, project_id): return _flavor_del_project(context, flavor_id, project_id) def _add_access(self, project_id): self._flavor_add_project(self._context, self.id, project_id) @base.remotable def add_access(self, project_id): if 'projects' in self.obj_what_changed(): raise exception.ObjectActionError(action='add_access', reason='projects modified') self._add_access(project_id) self._load_projects() self._send_notification(fields.NotificationAction.UPDATE) def _remove_access(self, project_id): self._flavor_del_project(self._context, self.id, project_id) @base.remotable def remove_access(self, project_id): if 'projects' in self.obj_what_changed(): raise exception.ObjectActionError(action='remove_access', reason='projects modified') self._remove_access(project_id) self._load_projects() self._send_notification(fields.NotificationAction.UPDATE) @staticmethod def _flavor_create(context, updates): return _flavor_create(context, updates) @base.remotable def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason='already created') updates = self.obj_get_changes() expected_attrs = [] for attr in OPTIONAL_FIELDS: if attr in updates: expected_attrs.append(attr) db_flavor = self._flavor_create(self._context, updates) self._from_db_object(self._context, self, db_flavor, expected_attrs=expected_attrs) self._send_notification(fields.NotificationAction.CREATE) @base.remotable def save_projects(self, to_add=None, to_delete=None): """Add or delete projects. :param:to_add: A list of projects to add :param:to_delete: A list of projects to remove """ to_add = to_add if to_add is not None else [] to_delete = to_delete if to_delete is not None else [] for project_id in to_add: self._add_access(project_id) for project_id in to_delete: self._remove_access(project_id) self.obj_reset_changes(['projects']) @staticmethod def _flavor_extra_specs_add(context, flavor_id, specs, max_retries=10): return _flavor_extra_specs_add(context, flavor_id, specs, max_retries) @staticmethod def _flavor_extra_specs_del(context, flavor_id, key): return _flavor_extra_specs_del(context, flavor_id, key) @base.remotable def save_extra_specs(self, to_add=None, to_delete=None): """Add or delete extra_specs. :param:to_add: A dict of new keys to add/update :param:to_delete: A list of keys to remove """ to_add = to_add if to_add is not None else {} to_delete = to_delete if to_delete is not None else [] if to_add: self._flavor_extra_specs_add(self._context, self.id, to_add) for key in to_delete: self._flavor_extra_specs_del(self._context, self.id, key) self.obj_reset_changes(['extra_specs']) # NOTE(mriedem): This method is not remotable since we only expect the API # to be able to make updates to a flavor. @api_db_api.context_manager.writer def _save(self, context, values): db_flavor = context.session.query(api_models.Flavors).\ filter_by(id=self.id).first() if not db_flavor: raise exception.FlavorNotFound(flavor_id=self.id) db_flavor.update(values) db_flavor.save(context.session) # Refresh ourselves from the DB object so we get the new updated_at. self._from_db_object(context, self, db_flavor) self.obj_reset_changes() def save(self): updates = self.obj_get_changes() projects = updates.pop('projects', None) extra_specs = updates.pop('extra_specs', None) if updates: # Only allowed to update from the whitelist of mutable fields. if set(updates.keys()) - MUTABLE_FIELDS: raise exception.ObjectActionError( action='save', reason='read-only fields were changed') self._save(self._context, updates) if extra_specs is not None: deleted_keys = (set(self._orig_extra_specs.keys()) - set(extra_specs.keys())) added_keys = self.extra_specs else: added_keys = deleted_keys = None if projects is not None: deleted_projects = set(self._orig_projects) - set(projects) added_projects = set(projects) - set(self._orig_projects) else: added_projects = deleted_projects = None # NOTE(danms): The first remotable method we call will reset # our of the original values for projects and extra_specs. Thus, # we collect the added/deleted lists for both above and /then/ # call these methods to update them. if added_keys or deleted_keys: self.save_extra_specs(self.extra_specs, deleted_keys) if added_projects or deleted_projects: self.save_projects(added_projects, deleted_projects) if (added_keys or deleted_keys or added_projects or deleted_projects or updates): self._send_notification(fields.NotificationAction.UPDATE) @staticmethod def _flavor_destroy(context, flavor_id=None, flavorid=None): return _flavor_destroy(context, flavor_id=flavor_id, flavorid=flavorid) @base.remotable def destroy(self): # NOTE(danms): Historically the only way to delete a flavor # is via name, which is not very precise. We need to be able to # support the light construction of a flavor object and subsequent # delete request with only our name filled out. However, if we have # our id property, we should instead delete with that since it's # far more specific. if 'id' in self: db_flavor = self._flavor_destroy(self._context, flavor_id=self.id) else: db_flavor = self._flavor_destroy(self._context, flavorid=self.flavorid) self._from_db_object(self._context, self, db_flavor) self._send_notification(fields.NotificationAction.DELETE) def _send_notification(self, action): # NOTE(danms): Instead of making the below notification # lazy-load projects (which is a problem for instance-bound # flavors and compute-cell operations), just load them here. if 'projects' not in self: # If the flavor is deleted we can't lazy-load projects. # FlavorPayload will orphan the flavor which will make the # NotificationPayloadBase set projects=None in the notification # payload. if action != fields.NotificationAction.DELETE: self._load_projects() notification_type = flavor_notification.FlavorNotification payload_type = flavor_notification.FlavorPayload payload = payload_type(self) notification_type( publisher=notification.NotificationPublisher( host=CONF.host, source=fields.NotificationSource.API), event_type=notification.EventType(object="flavor", action=action), priority=fields.NotificationPriority.INFO, payload=payload).emit(self._context) @api_db_api.context_manager.reader def _flavor_get_all_from_db(context, inactive, filters, sort_key, sort_dir, limit, marker): """Returns all flavors. """ filters = filters or {} query = Flavor._flavor_get_query_from_db(context) if 'min_memory_mb' in filters: query = query.filter( api_models.Flavors.memory_mb >= filters['min_memory_mb']) if 'min_root_gb' in filters: query = query.filter( api_models.Flavors.root_gb >= filters['min_root_gb']) if 'disabled' in filters: query = query.filter( api_models.Flavors.disabled == filters['disabled']) if 'is_public' in filters and filters['is_public'] is not None: the_filter = [api_models.Flavors.is_public == filters['is_public']] if filters['is_public'] and context.project_id is not None: the_filter.extend([api_models.Flavors.projects.any( project_id=context.project_id)]) if len(the_filter) > 1: query = query.filter(sa.or_(*the_filter)) else: query = query.filter(the_filter[0]) marker_row = None if marker is not None: marker_row = Flavor._flavor_get_query_from_db(context).\ filter_by(flavorid=marker).\ first() if not marker_row: raise exception.MarkerNotFound(marker=marker) query = sqlalchemyutils.paginate_query(query, api_models.Flavors, limit, [sort_key, 'id'], marker=marker_row, sort_dir=sort_dir) return [_dict_with_extra_specs(i) for i in query.all()] @base.NovaObjectRegistry.register class FlavorList(base.ObjectListBase, base.NovaObject): VERSION = '1.1' fields = { 'objects': fields.ListOfObjectsField('Flavor'), } @base.remotable_classmethod def get_all(cls, context, inactive=False, filters=None, sort_key='flavorid', sort_dir='asc', limit=None, marker=None): api_db_flavors = _flavor_get_all_from_db(context, inactive=inactive, filters=filters, sort_key=sort_key, sort_dir=sort_dir, limit=limit, marker=marker) return base.obj_make_list(context, cls(context), objects.Flavor, api_db_flavors, expected_attrs=['extra_specs']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/objects/host_mapping.py0000664000175000017500000002475300000000000020233 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db import exception as db_exc from sqlalchemy import orm from nova import context from nova.db.api import api as api_db_api from nova.db.api import models as api_models from nova import exception from nova.i18n import _ from nova.objects import base from nova.objects import cell_mapping from nova.objects import fields def _cell_id_in_updates(updates): cell_mapping_obj = updates.pop("cell_mapping", None) if cell_mapping_obj: updates["cell_id"] = cell_mapping_obj.id def _apply_updates(context, db_mapping, updates): db_mapping.update(updates) db_mapping.save(context.session) # NOTE: This is done because a later access will trigger a lazy load # outside of the db session so it will fail. We don't lazy load # cell_mapping on the object later because we never need a HostMapping # without the CellMapping. db_mapping.cell_mapping return db_mapping @base.NovaObjectRegistry.register class HostMapping(base.NovaTimestampObject, base.NovaObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'id': fields.IntegerField(read_only=True), 'host': fields.StringField(), 'cell_mapping': fields.ObjectField('CellMapping'), } def _get_cell_mapping(self): with api_db_api.context_manager.reader.using(self._context) as session: cell_map = (session.query(api_models.CellMapping) .join(api_models.HostMapping) .filter(api_models.HostMapping.host == self.host) .first()) if cell_map is not None: return cell_mapping.CellMapping._from_db_object( self._context, cell_mapping.CellMapping(), cell_map) def _load_cell_mapping(self): self.cell_mapping = self._get_cell_mapping() @base.lazy_load_counter def obj_load_attr(self, attrname): if attrname == 'cell_mapping': self._load_cell_mapping() @staticmethod def _from_db_object(context, host_mapping, db_host_mapping): for key in host_mapping.fields: db_value = db_host_mapping.get(key) if key == "cell_mapping": # NOTE(dheeraj): If cell_mapping is stashed in db object # we load it here. Otherwise, lazy loading will happen # when .cell_mapping is accessed later if not db_value: continue db_value = cell_mapping.CellMapping._from_db_object( host_mapping._context, cell_mapping.CellMapping(), db_value) setattr(host_mapping, key, db_value) host_mapping.obj_reset_changes() host_mapping._context = context return host_mapping @staticmethod @api_db_api.context_manager.reader def _get_by_host_from_db(context, host): db_mapping = context.session.query( api_models.HostMapping ).options( orm.joinedload(api_models.HostMapping.cell_mapping) ).filter( api_models.HostMapping.host == host ).first() if not db_mapping: raise exception.HostMappingNotFound(name=host) return db_mapping @base.remotable_classmethod def get_by_host(cls, context, host): db_mapping = cls._get_by_host_from_db(context, host) return cls._from_db_object(context, cls(), db_mapping) @staticmethod @api_db_api.context_manager.writer def _create_in_db(context, updates): db_mapping = api_models.HostMapping() return _apply_updates(context, db_mapping, updates) @base.remotable def create(self): changes = self.obj_get_changes() # cell_mapping must be mapped to cell_id for create _cell_id_in_updates(changes) db_mapping = self._create_in_db(self._context, changes) self._from_db_object(self._context, self, db_mapping) @staticmethod @api_db_api.context_manager.writer def _save_in_db(context, obj, updates): db_mapping = context.session.query(api_models.HostMapping).filter_by( id=obj.id).first() if not db_mapping: raise exception.HostMappingNotFound(name=obj.host) return _apply_updates(context, db_mapping, updates) @base.remotable def save(self): changes = self.obj_get_changes() # cell_mapping must be mapped to cell_id for updates _cell_id_in_updates(changes) db_mapping = self._save_in_db(self._context, self, changes) self._from_db_object(self._context, self, db_mapping) self.obj_reset_changes() @staticmethod @api_db_api.context_manager.writer def _destroy_in_db(context, host): result = context.session.query(api_models.HostMapping).filter_by( host=host).delete() if not result: raise exception.HostMappingNotFound(name=host) @base.remotable def destroy(self): self._destroy_in_db(self._context, self.host) @base.NovaObjectRegistry.register class HostMappingList(base.ObjectListBase, base.NovaObject): # Version 1.0: Initial version # Version 1.1: Add get_all method VERSION = '1.1' fields = { 'objects': fields.ListOfObjectsField('HostMapping'), } @staticmethod @api_db_api.context_manager.reader def _get_from_db(context, cell_id=None): query = context.session.query(api_models.HostMapping).options( orm.joinedload(api_models.HostMapping.cell_mapping) ) if cell_id: query = query.filter(api_models.HostMapping.cell_id == cell_id) return query.all() @base.remotable_classmethod def get_by_cell_id(cls, context, cell_id): db_mappings = cls._get_from_db(context, cell_id) return base.obj_make_list(context, cls(), HostMapping, db_mappings) @base.remotable_classmethod def get_all(cls, context): db_mappings = cls._get_from_db(context) return base.obj_make_list(context, cls(), HostMapping, db_mappings) def _create_host_mapping(host_mapping): try: host_mapping.create() except db_exc.DBDuplicateEntry: raise exception.HostMappingExists(name=host_mapping.host) def _check_and_create_node_host_mappings(ctxt, cm, compute_nodes, status_fn): host_mappings = [] for compute in compute_nodes: status_fn(_("Checking host mapping for compute host " "'%(host)s': %(uuid)s") % {'host': compute.host, 'uuid': compute.uuid}) try: HostMapping.get_by_host(ctxt, compute.host) except exception.HostMappingNotFound: status_fn(_("Creating host mapping for compute host " "'%(host)s': %(uuid)s") % {'host': compute.host, 'uuid': compute.uuid}) host_mapping = HostMapping( ctxt, host=compute.host, cell_mapping=cm) _create_host_mapping(host_mapping) host_mappings.append(host_mapping) compute.mapped = 1 compute.save() return host_mappings def _check_and_create_service_host_mappings(ctxt, cm, services, status_fn): host_mappings = [] for service in services: try: HostMapping.get_by_host(ctxt, service.host) except exception.HostMappingNotFound: status_fn(_('Creating host mapping for service %(srv)s') % {'srv': service.host}) host_mapping = HostMapping( ctxt, host=service.host, cell_mapping=cm) _create_host_mapping(host_mapping) host_mappings.append(host_mapping) return host_mappings def _check_and_create_host_mappings(ctxt, cm, status_fn, by_service): from nova import objects if by_service: services = objects.ServiceList.get_by_binary( ctxt, 'nova-compute', include_disabled=True) added_hm = _check_and_create_service_host_mappings(ctxt, cm, services, status_fn) else: compute_nodes = objects.ComputeNodeList.get_all_by_not_mapped( ctxt, 1) added_hm = _check_and_create_node_host_mappings(ctxt, cm, compute_nodes, status_fn) return added_hm def discover_hosts(ctxt, cell_uuid=None, status_fn=None, by_service=False): # TODO(alaski): If this is not run on a host configured to use the API # database most of the lookups below will fail and may not provide a # great error message. Add a check which will raise a useful error # message about running this from an API host. from nova import objects if not status_fn: status_fn = lambda x: None if cell_uuid: cell_mappings = [objects.CellMapping.get_by_uuid(ctxt, cell_uuid)] else: cell_mappings = objects.CellMappingList.get_all(ctxt) status_fn(_('Found %s cell mappings.') % len(cell_mappings)) host_mappings = [] for cm in cell_mappings: if cm.is_cell0(): status_fn(_('Skipping cell0 since it does not contain hosts.')) continue if 'name' in cm and cm.name: status_fn(_("Getting computes from cell '%(name)s': " "%(uuid)s") % {'name': cm.name, 'uuid': cm.uuid}) else: status_fn(_("Getting computes from cell: %(uuid)s") % {'uuid': cm.uuid}) with context.target_cell(ctxt, cm) as cctxt: added_hm = _check_and_create_host_mappings(cctxt, cm, status_fn, by_service) status_fn(_('Found %(num)s unmapped computes in cell: %(uuid)s') % {'num': len(added_hm), 'uuid': cm.uuid}) host_mappings.extend(added_hm) return host_mappings ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/objects/hv_spec.py0000664000175000017500000000352600000000000017165 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import versionutils from nova.objects import base from nova.objects import fields @base.NovaObjectRegistry.register class HVSpec(base.NovaObject): # Version 1.0: Initial version # Version 1.1: Added 'vz' hypervisor # Version 1.2: Added 'lxd' hypervisor VERSION = '1.2' fields = { 'arch': fields.ArchitectureField(), 'hv_type': fields.HVTypeField(), 'vm_mode': fields.VMModeField(), } # NOTE(pmurray): for backward compatibility, the supported instance # data is stored in the database as a list. @classmethod def from_list(cls, data): return cls(arch=data[0], hv_type=data[1], vm_mode=data[2]) def to_list(self): return [self.arch, self.hv_type, self.vm_mode] def obj_make_compatible(self, primitive, target_version): super(HVSpec, self).obj_make_compatible(primitive, target_version) target_version = versionutils.convert_version_to_tuple(target_version) if (target_version < (1, 1) and 'hv_type' in primitive and fields.HVType.VIRTUOZZO == primitive['hv_type']): primitive['hv_type'] = fields.HVType.PARALLELS ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/objects/image_meta.py0000664000175000017500000010052200000000000017620 0ustar00zuulzuul00000000000000# Copyright 2014 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_utils import versionutils from nova.network import model as network_model from nova import objects from nova.objects import base from nova.objects import fields from nova import utils from nova.virt import hardware NULLABLE_STRING_FIELDS = ['name', 'checksum', 'owner', 'container_format', 'disk_format'] NULLABLE_INTEGER_FIELDS = ['size', 'virtual_size'] @base.NovaObjectRegistry.register class ImageMeta(base.NovaObject): # Version 1.0: Initial version # Version 1.1: updated ImageMetaProps # Version 1.2: ImageMetaProps version 1.2 # Version 1.3: ImageMetaProps version 1.3 # Version 1.4: ImageMetaProps version 1.4 # Version 1.5: ImageMetaProps version 1.5 # Version 1.6: ImageMetaProps version 1.6 # Version 1.7: ImageMetaProps version 1.7 # Version 1.8: ImageMetaProps version 1.8 VERSION = '1.8' # These are driven by what the image client API returns # to Nova from Glance. This is defined in the glance # code glance/api/v2/images.py get_base_properties() # method. A few things are currently left out: # self, file, schema - Nova does not appear to ever use # these field; locations - modelling the arbitrary # data in the 'metadata' subfield is non-trivial as # there's no clear spec. # # TODO(ft): In version 2.0, these fields should be nullable: # name, checksum, owner, size, virtual_size, container_format, disk_format # fields = { 'id': fields.UUIDField(), 'name': fields.StringField(), 'status': fields.StringField(), 'visibility': fields.StringField(), 'protected': fields.FlexibleBooleanField(), 'checksum': fields.StringField(), 'owner': fields.StringField(), 'size': fields.IntegerField(), 'virtual_size': fields.IntegerField(), 'container_format': fields.StringField(), 'disk_format': fields.StringField(), 'created_at': fields.DateTimeField(nullable=True), 'updated_at': fields.DateTimeField(nullable=True), 'tags': fields.ListOfStringsField(), 'direct_url': fields.StringField(), 'min_ram': fields.IntegerField(), 'min_disk': fields.IntegerField(), 'properties': fields.ObjectField('ImageMetaProps'), } @classmethod def from_dict(cls, image_meta): """Create instance from image metadata dict :param image_meta: image metadata dictionary Creates a new object instance, initializing from the properties associated with the image metadata instance :returns: an ImageMeta instance """ if image_meta is None: image_meta = {} # We must turn 'properties' key dict into an object # so copy image_meta to avoid changing original image_meta = copy.deepcopy(image_meta) image_meta["properties"] = \ objects.ImageMetaProps.from_dict( image_meta.get("properties", {})) # Some fields are nullable in Glance DB schema, but was not marked that # in ImageMeta initially by mistake. To keep compatibility with compute # nodes which are run with previous versions these fields are still # not nullable in ImageMeta, but the code below converts None to # appropriate empty values. for fld in NULLABLE_STRING_FIELDS: if fld in image_meta and image_meta[fld] is None: image_meta[fld] = '' for fld in NULLABLE_INTEGER_FIELDS: if fld in image_meta and image_meta[fld] is None: image_meta[fld] = 0 return cls(**image_meta) @classmethod def from_instance(cls, instance): """Create instance from instance system metadata :param instance: Instance object Creates a new object instance, initializing from the system metadata "image_*" properties associated with instance :returns: an ImageMeta instance """ sysmeta = utils.instance_sys_meta(instance) image_meta = utils.get_image_from_system_metadata(sysmeta) # NOTE(lyarwood): Provide the id of the image in image_meta if it # wasn't persisted in the system_metadata of the instance previously. # This is only provided to allow users of image_meta to avoid the need # to pass around references to instance.image_ref alongside image_meta. if image_meta.get('id') is None and instance.image_ref: image_meta['id'] = instance.image_ref return cls.from_dict(image_meta) @classmethod def from_image_ref(cls, context, image_api, image_ref): """Create instance from glance image :param context: the request context :param image_api: the glance client API :param image_ref: the glance image identifier Creates a new object instance, initializing from the properties associated with a glance image :returns: an ImageMeta instance """ image_meta = image_api.get(context, image_ref) image = cls.from_dict(image_meta) setattr(image, "id", image_ref) return image @base.NovaObjectRegistry.register class ImageMetaProps(base.NovaObject): # Version 1.0: Initial version # Version 1.1: added os_require_quiesce field # Version 1.2: added img_hv_type and img_hv_requested_version fields # Version 1.3: HVSpec version 1.1 # Version 1.4: added hw_vif_multiqueue_enabled field # Version 1.5: added os_admin_user field # Version 1.6: Added 'lxc' and 'uml' enum types to DiskBusField # Version 1.7: added img_config_drive field # Version 1.8: Added 'lxd' to hypervisor types # Version 1.9: added hw_cpu_thread_policy field # Version 1.10: added hw_cpu_realtime_mask field # Version 1.11: Added hw_firmware_type field # Version 1.12: Added properties for image signature verification # Version 1.13: added os_secure_boot field # Version 1.14: Added 'hw_pointer_model' field # Version 1.15: Added hw_rescue_bus and hw_rescue_device. # Version 1.16: WatchdogActionField supports 'disabled' enum. # Version 1.17: Add lan9118 as valid nic for hw_vif_model property for qemu # Version 1.18: Pull signature properties from cursive library # Version 1.19: Added 'img_hide_hypervisor_id' type field # Version 1.20: Added 'traits_required' list field # Version 1.21: Added 'hw_time_hpet' field # Version 1.22: Added 'gop', 'virtio' and 'none' to hw_video_model field # Version 1.23: Added 'hw_pmu' field # Version 1.24: Added 'hw_mem_encryption' field # Version 1.25: Added 'hw_pci_numa_affinity_policy' field # Version 1.26: Added 'mixed' to 'hw_cpu_policy' field # Version 1.27: Added 'hw_tpm_model' and 'hw_tpm_version' fields # Version 1.28: Added 'socket' to 'hw_pci_numa_affinity_policy' # Version 1.29: Added 'hw_input_bus' field # Version 1.30: Added 'bochs' as an option to 'hw_video_model' # Version 1.31: Added 'hw_emulation_architecture' field # Version 1.32: Added 'hw_ephemeral_encryption' and # 'hw_ephemeral_encryption_format' fields # Version 1.33: Added 'hw_locked_memory' field # Version 1.34: Added 'hw_viommu_model' field # Version 1.35: Added 'hw_virtio_packed_ring' field # Version 1.36: Added 'hw_maxphysaddr_mode' and # 'hw_maxphysaddr_bits' field # Version 1.37: Added 'hw_ephemeral_encryption_secret_uuid' field # Version 1.38: Added 'hw_firmware_stateless' field # Version 1.39: Added igb value to 'hw_vif_model' enum # Version 1.40: Added 'hw_sound_model' field # Version 1.41: Added 'hw_usb_model' and 'hw_redirected_usb_ports' fields # Version 1.42: Added 'hw_mem_encryption_model' field # NOTE(efried): When bumping this version, the version of # ImageMetaPropsPayload must also be bumped. See its docstring for details. VERSION = '1.42' def obj_make_compatible(self, primitive, target_version): # noqa: C901 super(ImageMetaProps, self).obj_make_compatible(primitive, target_version) target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 42): primitive.pop('hw_mem_encryption_model', None) if target_version < (1, 41): primitive.pop('hw_usb_model', None) primitive.pop('hw_redirected_usb_ports', None) if target_version < (1, 40): primitive.pop('hw_sound_model', None) if target_version < (1, 39): base.raise_on_too_new_values( target_version, primitive, 'hw_vif_model', (network_model.VIF_MODEL_IGB,)) if target_version < (1, 38): primitive.pop('hw_firmware_stateless', None) if target_version < (1, 37): primitive.pop('hw_ephemeral_encryption_secret_uuid', None) if target_version < (1, 36): primitive.pop('hw_maxphysaddr_mode', None) primitive.pop('hw_maxphysaddr_bits', None) if target_version < (1, 35): primitive.pop('hw_virtio_packed_ring', None) if target_version < (1, 34): primitive.pop('hw_viommu_model', None) if target_version < (1, 33): primitive.pop('hw_locked_memory', None) if target_version < (1, 32): primitive.pop('hw_ephemeral_encryption', None) primitive.pop('hw_ephemeral_encryption_format', None) if target_version < (1, 31): primitive.pop('hw_emulation_architecture', None) if target_version < (1, 30): base.raise_on_too_new_values( target_version, primitive, 'hw_video_model', (fields.VideoModel.BOCHS,)) if target_version < (1, 29): primitive.pop('hw_input_bus', None) if target_version < (1, 28): base.raise_on_too_new_values( target_version, primitive, 'hw_pci_numa_affinity_policy', (fields.PCINUMAAffinityPolicy.SOCKET,)) if target_version < (1, 27): primitive.pop('hw_tpm_model', None) primitive.pop('hw_tpm_version', None) if target_version < (1, 26): base.raise_on_too_new_values( target_version, primitive, 'hw_cpu_policy', (fields.CPUAllocationPolicy.MIXED,)) if target_version < (1, 25): primitive.pop('hw_pci_numa_affinity_policy', None) if target_version < (1, 24): primitive.pop('hw_mem_encryption', None) if target_version < (1, 23): primitive.pop('hw_pmu', None) # NOTE(sean-k-mooney): unlike other nova object we version this object # when composed object are updated. if target_version < (1, 22): base.raise_on_too_new_values( target_version, primitive, 'hw_video_model', (fields.VideoModel.GOP, fields.VideoModel.VIRTIO, fields.VideoModel.NONE)) if target_version < (1, 21): primitive.pop('hw_time_hpet', None) if target_version < (1, 20): primitive.pop('traits_required', None) if target_version < (1, 19): primitive.pop('img_hide_hypervisor_id', None) if target_version < (1, 16) and 'hw_watchdog_action' in primitive: # Check to see if hw_watchdog_action was set to 'disabled' and if # so, remove it since not specifying it is the same behavior. if primitive['hw_watchdog_action'] == \ fields.WatchdogAction.DISABLED: primitive.pop('hw_watchdog_action') if target_version < (1, 15): primitive.pop('hw_rescue_bus', None) primitive.pop('hw_rescue_device', None) if target_version < (1, 14): primitive.pop('hw_pointer_model', None) if target_version < (1, 13): primitive.pop('os_secure_boot', None) if target_version < (1, 11): primitive.pop('hw_firmware_type', None) if target_version < (1, 10): primitive.pop('hw_cpu_realtime_mask', None) if target_version < (1, 9): primitive.pop('hw_cpu_thread_policy', None) if target_version < (1, 7): primitive.pop('img_config_drive', None) if target_version < (1, 5): primitive.pop('os_admin_user', None) if target_version < (1, 4): primitive.pop('hw_vif_multiqueue_enabled', None) if target_version < (1, 2): primitive.pop('img_hv_type', None) primitive.pop('img_hv_requested_version', None) if target_version < (1, 1): primitive.pop('os_require_quiesce', None) if target_version < (1, 6): base.raise_on_too_new_values( target_version, primitive, 'hw_disk_bus', (fields.DiskBus.LXC, fields.DiskBus.UML)) # Maximum number of NUMA nodes permitted for the guest topology NUMA_NODES_MAX = 128 # 'hw_' - settings affecting the guest virtual machine hardware # 'img_' - settings affecting the use of images by the compute node # 'os_' - settings affecting the guest operating system setup # 'traits_required' - The required traits associated with the image fields = { # name of guest hardware architecture eg i686, x86_64, ppc64 'hw_architecture': fields.ArchitectureField(), # hw_architecture field is leveraged for checks against physical nodes # name of desired emulation architecture eg i686, x86_64, ppc64 'hw_emulation_architecture': fields.ArchitectureField(), # used to decide to expand root disk partition and fs to full size of # root disk 'hw_auto_disk_config': fields.StringField(), # whether to display BIOS boot device menu 'hw_boot_menu': fields.FlexibleBooleanField(), # name of the CDROM bus to use eg virtio, scsi, ide 'hw_cdrom_bus': fields.DiskBusField(), # preferred number of CPU cores per socket 'hw_cpu_cores': fields.IntegerField(), # preferred number of CPU sockets 'hw_cpu_sockets': fields.IntegerField(), # maximum number of CPU cores per socket 'hw_cpu_max_cores': fields.IntegerField(), # maximum number of CPU sockets 'hw_cpu_max_sockets': fields.IntegerField(), # maximum number of CPU threads per core 'hw_cpu_max_threads': fields.IntegerField(), # CPU allocation policy 'hw_cpu_policy': fields.CPUAllocationPolicyField(), # CPU thread allocation policy 'hw_cpu_thread_policy': fields.CPUThreadAllocationPolicyField(), # CPU mask indicates which vCPUs will have realtime enable, # example ^0-1 means that all vCPUs except 0 and 1 will have a # realtime policy. 'hw_cpu_realtime_mask': fields.StringField(), # preferred number of CPU threads per core 'hw_cpu_threads': fields.IntegerField(), # guest ABI version for guest xentools either 1 or 2 (or 3 - depends on # Citrix PV tools version installed in image) 'hw_device_id': fields.IntegerField(), # name of the hard disk bus to use eg virtio, scsi, ide 'hw_disk_bus': fields.DiskBusField(), # allocation mode eg 'preallocated' 'hw_disk_type': fields.StringField(), # name of the floppy disk bus to use eg fd, scsi, ide 'hw_floppy_bus': fields.DiskBusField(), # This indicates the guest needs UEFI firmware 'hw_firmware_type': fields.FirmwareTypeField(), # This indicates the guest needs stateless firmware 'hw_firmware_stateless': fields.FlexibleBooleanField(), # name of the input bus type to use, e.g. usb, virtio 'hw_input_bus': fields.InputBusField(), # boolean - used to trigger code to inject networking when booting a CD # image with a network boot image 'hw_ipxe_boot': fields.FlexibleBooleanField(), # string - make sure ``locked`` element is present in the # ``memoryBacking``. 'hw_locked_memory': fields.FlexibleBooleanField(), # There are sooooooooooo many possible machine types in # QEMU - several new ones with each new release - that it # is not practical to enumerate them all. So we use a free # form string 'hw_machine_type': fields.StringField(), # boolean indicating that the guest needs to be booted with # encrypted memory 'hw_mem_encryption': fields.FlexibleBooleanField(), # string = used to determine the CPU feature for guest memory # encryption 'hw_mem_encryption_model': fields.MemEncryptionModelField(), # One of the magic strings 'small', 'any', 'large' # or an explicit page size in KB (eg 4, 2048, ...) 'hw_mem_page_size': fields.StringField(), # Number of guest NUMA nodes 'hw_numa_nodes': fields.IntegerField(), # Each list entry corresponds to a guest NUMA node and the # set members indicate CPUs for that node 'hw_numa_cpus': fields.ListOfSetsOfIntegersField(), # Each list entry corresponds to a guest NUMA node and the # list value indicates the memory size of that node. 'hw_numa_mem': fields.ListOfIntegersField(), # Enum field to specify pci device NUMA affinity. 'hw_pci_numa_affinity_policy': fields.PCINUMAAffinityPolicyField(), # Generic property to specify the pointer model type. 'hw_pointer_model': fields.PointerModelField(), # boolean 'true' or 'false' to enable virtual performance # monitoring unit (vPMU). 'hw_pmu': fields.FlexibleBooleanField(), # boolean 'yes' or 'no' to enable QEMU guest agent 'hw_qemu_guest_agent': fields.FlexibleBooleanField(), # name of the rescue bus to use with the associated rescue device. 'hw_rescue_bus': fields.DiskBusField(), # name of rescue device to use. 'hw_rescue_device': fields.BlockDeviceTypeField(), # name of the RNG device type eg virtio # NOTE(kchamart): Although this is currently not used anymore, # we should not remove / deprecate it yet, as we are likely to # extend this field to allow two more values to support "VirtIO # transitional/non-transitional devices" (refer to the note in # RNGModel() class in nova/objects/fields.py), and thus expose # to the user again. 'hw_rng_model': fields.RNGModelField(), # boolean 'true' or 'false' to enable HPET 'hw_time_hpet': fields.FlexibleBooleanField(), # number of serial ports to create 'hw_serial_port_count': fields.IntegerField(), # name of the SCSI bus controller eg 'virtio-scsi', 'lsilogic', etc 'hw_scsi_model': fields.SCSIModelField(), # name of the video adapter model to use, eg cirrus, vga, xen, qxl 'hw_video_model': fields.VideoModelField(), # MB of video RAM to provide eg 64 'hw_video_ram': fields.IntegerField(), # name of a NIC device model eg virtio, e1000, rtl8139 'hw_vif_model': fields.VIFModelField(), # name of IOMMU device model eg virtio, intel, smmuv3, or auto 'hw_viommu_model': fields.VIOMMUModelField(), # "xen" vs "hvm" 'hw_vm_mode': fields.VMModeField(), # action to take when watchdog device fires eg reset, poweroff, pause, # none 'hw_watchdog_action': fields.WatchdogActionField(), # boolean - If true, this will enable the virtio-multiqueue feature 'hw_vif_multiqueue_enabled': fields.FlexibleBooleanField(), # name of emulated TPM model to use. 'hw_tpm_model': fields.TPMModelField(), # version of emulated TPM to use. 'hw_tpm_version': fields.TPMVersionField(), # boolean - if true will enable ephemeral encryption for instance 'hw_ephemeral_encryption': fields.FlexibleBooleanField(), # encryption format to be used when ephemeral encryption is enabled 'hw_ephemeral_encryption_format': fields.BlockDeviceEncryptionFormatTypeField(), # encryption secret uuid string for passphrase in the image 'hw_ephemeral_encryption_secret_uuid': fields.UUIDField(), # boolean - If true, this will enable the virtio packed ring feature 'hw_virtio_packed_ring': fields.FlexibleBooleanField(), # Control mode for the physical memory address bit of Libvirt guests. 'hw_maxphysaddr_mode': fields.MaxPhysAddrModeField(), # Control bits for the physical memory address bit of Libvirt guests. 'hw_maxphysaddr_bits': fields.IntegerField(), # Name of sound device model to use. 'hw_sound_model': fields.SoundModelField(), # Name of the USB Controller model to use. 'hw_usb_model': fields.USBControllerModelField(), # Number of USB redirection ports to add to the guest. 'hw_redirected_usb_ports': fields.IntegerField(), # if true download using bittorrent 'img_bittorrent': fields.FlexibleBooleanField(), # Which data format the 'img_block_device_mapping' field is # using to represent the block device mapping 'img_bdm_v2': fields.FlexibleBooleanField(), # Block device mapping - the may can be in one or two completely # different formats. The 'img_bdm_v2' field determines whether # it is in legacy format, or the new current format. Ideally # we would have a formal data type for this field instead of a # dict, but with 2 different formats to represent this is hard. # See nova/block_device.py from_legacy_mapping() for the complex # conversion code. So for now leave it as a dict and continue # to use existing code that is able to convert dict into the # desired internal BDM formats 'img_block_device_mapping': fields.ListOfDictOfNullableStringsField(), # boolean - if True, and image cache set to "some" decides if image # should be cached on host when server is booted on that host 'img_cache_in_nova': fields.FlexibleBooleanField(), # Compression level for images. (1-9) 'img_compression_level': fields.IntegerField(), # hypervisor supported version, eg. '>=2.6' 'img_hv_requested_version': fields.VersionPredicateField(), # type of the hypervisor, eg kvm, ironic, xen 'img_hv_type': fields.HVTypeField(), # Whether the image needs/expected config drive 'img_config_drive': fields.ConfigDrivePolicyField(), # boolean flag to set space-saving or performance behavior on the # Datastore 'img_linked_clone': fields.FlexibleBooleanField(), # Image mappings - related to Block device mapping data - mapping # of virtual image names to device names. This could be represented # as a formal data type, but is left as dict for same reason as # img_block_device_mapping field. It would arguably make sense for # the two to be combined into a single field and data type in the # future. 'img_mappings': fields.ListOfDictOfNullableStringsField(), # image project id (set on upload) 'img_owner_id': fields.StringField(), # root device name, used in snapshotting eg /dev/ 'img_root_device_name': fields.StringField(), # boolean - if false don't talk to nova agent 'img_use_agent': fields.FlexibleBooleanField(), # integer value 1 'img_version': fields.IntegerField(), # base64 of encoding of image signature 'img_signature': fields.StringField(), # string indicating hash method used to compute image signature 'img_signature_hash_method': fields.ImageSignatureHashTypeField(), # string indicating Castellan uuid of certificate # used to compute the image's signature 'img_signature_certificate_uuid': fields.UUIDField(), # string indicating type of key used to compute image signature 'img_signature_key_type': fields.ImageSignatureKeyTypeField(), # boolean - hide hypervisor signature on instance 'img_hide_hypervisor_id': fields.FlexibleBooleanField(), # string of username with admin privileges 'os_admin_user': fields.StringField(), # string of boot time command line arguments for the guest kernel 'os_command_line': fields.StringField(), # the name of the specific guest operating system distro. This # is not done as an Enum since the list of operating systems is # growing incredibly fast, and valid values can be arbitrarily # user defined. Nova has no real need for strict validation so # leave it freeform 'os_distro': fields.StringField(), # boolean - if true, then guest must support disk quiesce # or snapshot operation will be denied 'os_require_quiesce': fields.FlexibleBooleanField(), # Secure Boot feature will be enabled by setting the "os_secure_boot" # image property to "required". Other options can be: "disabled" or # "optional". # "os:secure_boot" flavor extra spec value overrides the image property # value. 'os_secure_boot': fields.SecureBootField(), # boolean - if using agent don't inject files, assume someone else is # doing that (cloud-init) 'os_skip_agent_inject_files_at_boot': fields.FlexibleBooleanField(), # boolean - if using agent don't try inject ssh key, assume someone # else is doing that (cloud-init) 'os_skip_agent_inject_ssh': fields.FlexibleBooleanField(), # The guest operating system family such as 'linux', 'windows' - this # is a fairly generic type. For a detailed type consider os_distro # instead 'os_type': fields.OSTypeField(), # The required traits associated with the image. Traits are expected to # be defined as starting with `trait:` like below: # trait:HW_CPU_X86_AVX2=required # for trait in image_meta.traits_required: # will yield trait strings such as 'HW_CPU_X86_AVX2' 'traits_required': fields.ListOfStringsField(), } # The keys are the legacy property names and # the values are the current preferred names _legacy_property_map = { 'architecture': 'hw_architecture', 'owner_id': 'img_owner_id', 'vmware_disktype': 'hw_disk_type', 'vmware_image_version': 'img_version', 'vmware_ostype': 'os_distro', 'auto_disk_config': 'hw_auto_disk_config', 'ipxe_boot': 'hw_ipxe_boot', 'xenapi_device_id': 'hw_device_id', 'xenapi_image_compression_level': 'img_compression_level', 'vmware_linked_clone': 'img_linked_clone', 'xenapi_use_agent': 'img_use_agent', 'xenapi_skip_agent_inject_ssh': 'os_skip_agent_inject_ssh', 'xenapi_skip_agent_inject_files_at_boot': 'os_skip_agent_inject_files_at_boot', 'cache_in_nova': 'img_cache_in_nova', 'vm_mode': 'hw_vm_mode', 'bittorrent': 'img_bittorrent', 'mappings': 'img_mappings', 'block_device_mapping': 'img_block_device_mapping', 'bdm_v2': 'img_bdm_v2', 'root_device_name': 'img_root_device_name', 'hypervisor_version_requires': 'img_hv_requested_version', 'hypervisor_type': 'img_hv_type', } # TODO(berrange): Need to run this from a data migration # at some point so we can eventually kill off the compat def _set_attr_from_legacy_names(self, image_props): for legacy_key in self._legacy_property_map: new_key = self._legacy_property_map[legacy_key] if legacy_key not in image_props: continue setattr(self, new_key, image_props[legacy_key]) vmware_adaptertype = image_props.get("vmware_adaptertype") if vmware_adaptertype == "ide": setattr(self, "hw_disk_bus", "ide") elif vmware_adaptertype: setattr(self, "hw_disk_bus", "scsi") setattr(self, "hw_scsi_model", vmware_adaptertype) def _set_numa_mem(self, image_props): hw_numa_mem = [] hw_numa_mem_set = False for cellid in range(ImageMetaProps.NUMA_NODES_MAX): memprop = "hw_numa_mem.%d" % cellid if memprop not in image_props: break hw_numa_mem.append(int(image_props[memprop])) hw_numa_mem_set = True del image_props[memprop] if hw_numa_mem_set: self.hw_numa_mem = hw_numa_mem def _set_numa_cpus(self, image_props): hw_numa_cpus = [] hw_numa_cpus_set = False for cellid in range(ImageMetaProps.NUMA_NODES_MAX): cpuprop = "hw_numa_cpus.%d" % cellid if cpuprop not in image_props: break hw_numa_cpus.append( hardware.parse_cpu_spec(image_props[cpuprop])) hw_numa_cpus_set = True del image_props[cpuprop] if hw_numa_cpus_set: self.hw_numa_cpus = hw_numa_cpus def _set_attr_from_current_names(self, image_props): for key in self.fields: # The two NUMA fields need special handling to # un-stringify them correctly if key == "hw_numa_mem": self._set_numa_mem(image_props) elif key == "hw_numa_cpus": self._set_numa_cpus(image_props) else: # traits_required will be populated by # _set_attr_from_trait_names if key not in image_props or key == "traits_required": continue setattr(self, key, image_props[key]) def _set_attr_from_trait_names(self, image_props): for trait in [str(k[6:]) for k, v in image_props.items() if str(k).startswith("trait:") and str(v) == 'required']: if 'traits_required' not in self: self.traits_required = [] self.traits_required.append(trait) @classmethod def from_dict(cls, image_props): """Create instance from image properties dict :param image_props: dictionary of image metadata properties Creates a new object instance, initializing from a dictionary of image metadata properties :returns: an ImageMetaProps instance """ obj = cls() # We look to see if the dict has entries for any # of the legacy property names first. Then we use # the current property names. That way if both the # current and legacy names are set, the value # associated with the current name takes priority obj._set_attr_from_legacy_names(image_props) obj._set_attr_from_current_names(image_props) obj._set_attr_from_trait_names(image_props) return obj def to_dict(self): """Returns a dictionary of image properties that are set.""" return base.obj_to_primitive(self) def get(self, name, defvalue=None): """Get the value of an attribute :param name: the attribute to request :param defvalue: the default value if not set This returns the value of an attribute if it is currently set, otherwise it will return None. This differs from accessing props.attrname, because that will raise an exception if the attribute has no value set. So instead of if image_meta.properties.obj_attr_is_set("some_attr"): val = image_meta.properties.some_attr else val = None Callers can rely on unconditional access val = image_meta.properties.get("some_attr") :returns: the attribute value or None """ if not self.obj_attr_is_set(name): return defvalue return getattr(self, name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/objects/instance.py0000664000175000017500000022112100000000000017333 0ustar00zuulzuul00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import typing as ty from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import timeutils from oslo_utils import versionutils import sqlalchemy as sa from sqlalchemy import sql from sqlalchemy.sql import func from nova import availability_zones as avail_zone from nova.compute import task_states from nova.compute import vm_states from nova import context as nova_context from nova.db.main import api as db from nova.db.main import models from nova import exception from nova.i18n import _ from nova.network import model as network_model from nova import notifications from nova import objects from nova.objects import base from nova.objects import fields from nova import utils CONF = cfg.CONF LOG = logging.getLogger(__name__) # List of fields that can be joined in DB layer. _INSTANCE_OPTIONAL_JOINED_FIELDS = ['metadata', 'system_metadata', 'info_cache', 'security_groups', 'pci_devices', 'tags', 'services', 'fault'] # These are fields that are optional but don't translate to db columns _INSTANCE_OPTIONAL_NON_COLUMN_FIELDS = ['flavor', 'old_flavor', 'new_flavor', 'ec2_ids'] # These are fields that are optional and in instance_extra _INSTANCE_EXTRA_FIELDS = ['numa_topology', 'pci_requests', 'flavor', 'vcpu_model', 'migration_context', 'keypairs', 'device_metadata', 'trusted_certs', 'resources'] # These are fields that applied/drooped by migration_context _MIGRATION_CONTEXT_ATTRS = ['numa_topology', 'pci_requests', 'pci_devices', 'resources'] # These are fields that can be specified as expected_attrs INSTANCE_OPTIONAL_ATTRS = (_INSTANCE_OPTIONAL_JOINED_FIELDS + _INSTANCE_OPTIONAL_NON_COLUMN_FIELDS + _INSTANCE_EXTRA_FIELDS) # These are fields that most query calls load by default INSTANCE_DEFAULT_FIELDS = ['metadata', 'system_metadata', 'info_cache', 'security_groups'] # Maximum count of tags to one instance MAX_TAG_COUNT = 50 def _expected_cols(expected_attrs): """Return expected_attrs that are columns needing joining. NB: This function may modify expected_attrs if one requested attribute requires another. """ if not expected_attrs: return expected_attrs simple_cols = [attr for attr in expected_attrs if attr in _INSTANCE_OPTIONAL_JOINED_FIELDS] complex_cols = ['extra.%s' % field for field in _INSTANCE_EXTRA_FIELDS if field in expected_attrs] if complex_cols: simple_cols.append('extra') simple_cols = [x for x in simple_cols if x not in _INSTANCE_EXTRA_FIELDS] expected_cols = simple_cols + complex_cols # NOTE(pumaranikar): expected_cols list can contain duplicates since # caller appends column attributes to expected_attr without checking if # it is already present in the list or not. Hence, we remove duplicates # here, if any. The resultant list is sorted based on list index to # maintain the insertion order. return sorted(list(set(expected_cols)), key=expected_cols.index) _NO_DATA_SENTINEL = object() # TODO(berrange): Remove NovaObjectDictCompat @base.NovaObjectRegistry.register class Instance(base.NovaPersistentObject, base.NovaObject, base.NovaObjectDictCompat): # Version 2.0: Initial version # Version 2.1: Added services # Version 2.2: Added keypairs # Version 2.3: Added device_metadata # Version 2.4: Added trusted_certs # Version 2.5: Added hard_delete kwarg in destroy # Version 2.6: Added hidden # Version 2.7: Added resources # Version 2.8: Added compute_id VERSION = '2.8' fields = { 'id': fields.IntegerField(), 'user_id': fields.StringField(nullable=True), 'project_id': fields.StringField(nullable=True), 'image_ref': fields.StringField(nullable=True), 'kernel_id': fields.StringField(nullable=True), 'ramdisk_id': fields.StringField(nullable=True), 'hostname': fields.StringField(nullable=True), 'launch_index': fields.IntegerField(nullable=True), 'key_name': fields.StringField(nullable=True), 'key_data': fields.StringField(nullable=True), 'power_state': fields.IntegerField(nullable=True), 'vm_state': fields.StringField(nullable=True), 'task_state': fields.StringField(nullable=True), 'services': fields.ObjectField('ServiceList'), 'memory_mb': fields.IntegerField(nullable=True), 'vcpus': fields.IntegerField(nullable=True), 'root_gb': fields.IntegerField(nullable=True), 'ephemeral_gb': fields.IntegerField(nullable=True), 'ephemeral_key_uuid': fields.UUIDField(nullable=True), 'host': fields.StringField(nullable=True), 'node': fields.StringField(nullable=True), 'compute_id': fields.IntegerField(nullable=True), # TODO(stephenfin): Remove this in version 3.0 of the object as it has # been replaced by 'flavor' 'instance_type_id': fields.IntegerField(nullable=True), 'user_data': fields.StringField(nullable=True), 'reservation_id': fields.StringField(nullable=True), 'launched_at': fields.DateTimeField(nullable=True), 'terminated_at': fields.DateTimeField(nullable=True), 'availability_zone': fields.StringField(nullable=True), 'display_name': fields.StringField(nullable=True), 'display_description': fields.StringField(nullable=True), 'launched_on': fields.StringField(nullable=True), 'locked': fields.BooleanField(default=False), 'locked_by': fields.StringField(nullable=True), 'os_type': fields.StringField(nullable=True), 'architecture': fields.StringField(nullable=True), 'vm_mode': fields.StringField(nullable=True), 'uuid': fields.UUIDField(), 'root_device_name': fields.StringField(nullable=True), 'default_ephemeral_device': fields.StringField(nullable=True), 'default_swap_device': fields.StringField(nullable=True), 'config_drive': fields.StringField(nullable=True), 'access_ip_v4': fields.IPV4AddressField(nullable=True), 'access_ip_v6': fields.IPV6AddressField(nullable=True), 'auto_disk_config': fields.BooleanField(default=False), 'progress': fields.IntegerField(nullable=True), 'shutdown_terminate': fields.BooleanField(default=False), 'disable_terminate': fields.BooleanField(default=False), # TODO(stephenfin): Remove this in version 3.0 of the object as it's # related to cells v1 'cell_name': fields.StringField(nullable=True), 'metadata': fields.DictOfStringsField(), 'system_metadata': fields.DictOfNullableStringsField(), 'info_cache': fields.ObjectField('InstanceInfoCache', nullable=True), # TODO(stephenfin): Remove this in version 3.0 of the object as it's # related to nova-network 'security_groups': fields.ObjectField('SecurityGroupList'), 'fault': fields.ObjectField('InstanceFault', nullable=True), 'cleaned': fields.BooleanField(default=False), 'pci_devices': fields.ObjectField('PciDeviceList', nullable=True), 'numa_topology': fields.ObjectField('InstanceNUMATopology', nullable=True), 'pci_requests': fields.ObjectField('InstancePCIRequests', nullable=True), 'device_metadata': fields.ObjectField('InstanceDeviceMetadata', nullable=True), 'tags': fields.ObjectField('TagList'), 'flavor': fields.ObjectField('Flavor'), 'old_flavor': fields.ObjectField('Flavor', nullable=True), 'new_flavor': fields.ObjectField('Flavor', nullable=True), 'vcpu_model': fields.ObjectField('VirtCPUModel', nullable=True), 'ec2_ids': fields.ObjectField('EC2Ids'), 'migration_context': fields.ObjectField('MigrationContext', nullable=True), 'keypairs': fields.ObjectField('KeyPairList'), 'trusted_certs': fields.ObjectField('TrustedCerts', nullable=True), 'hidden': fields.BooleanField(default=False), 'resources': fields.ObjectField('ResourceList', nullable=True), } obj_extra_fields = ['name'] def obj_make_compatible(self, primitive, target_version): super(Instance, self).obj_make_compatible(primitive, target_version) target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (2, 8) and 'compute_id' in primitive: del primitive['compute_id'] if target_version < (2, 7) and 'resources' in primitive: del primitive['resources'] if target_version < (2, 6) and 'hidden' in primitive: del primitive['hidden'] if target_version < (2, 4) and 'trusted_certs' in primitive: del primitive['trusted_certs'] if target_version < (2, 3) and 'device_metadata' in primitive: del primitive['device_metadata'] if target_version < (2, 2) and 'keypairs' in primitive: del primitive['keypairs'] if target_version < (2, 1) and 'services' in primitive: del primitive['services'] def __init__(self, *args, **kwargs): super(Instance, self).__init__(*args, **kwargs) self._reset_metadata_tracking() @property def image_meta(self): return objects.ImageMeta.from_instance(self) def _reset_metadata_tracking(self, fields=None): if fields is None or 'system_metadata' in fields: self._orig_system_metadata = (dict(self.system_metadata) if 'system_metadata' in self else {}) if fields is None or 'metadata' in fields: self._orig_metadata = (dict(self.metadata) if 'metadata' in self else {}) def obj_clone(self): """Create a copy of this instance object.""" nobj = super(Instance, self).obj_clone() # Since the base object only does a deep copy of the defined fields, # need to make sure to also copy the additional tracking metadata # attributes so they don't show as changed and cause the metadata # to always be updated even when stale information. if hasattr(self, '_orig_metadata'): nobj._orig_metadata = dict(self._orig_metadata) if hasattr(self, '_orig_system_metadata'): nobj._orig_system_metadata = dict(self._orig_system_metadata) return nobj def obj_reset_changes(self, fields=None, recursive=False): super(Instance, self).obj_reset_changes(fields, recursive=recursive) self._reset_metadata_tracking(fields=fields) def obj_what_changed(self): changes = super(Instance, self).obj_what_changed() if 'metadata' in self and self.metadata != self._orig_metadata: changes.add('metadata') if 'system_metadata' in self and (self.system_metadata != self._orig_system_metadata): changes.add('system_metadata') return changes @classmethod def _obj_from_primitive(cls, context, objver, primitive): self = super(Instance, cls)._obj_from_primitive(context, objver, primitive) self._reset_metadata_tracking() return self @property def name(self): try: base_name = CONF.instance_name_template % self.id except TypeError: # Support templates like "uuid-%(uuid)s", etc. info = {} # NOTE(russellb): Don't use self.iteritems() here, as it will # result in infinite recursion on the name property. for key in self.fields: if key == 'name': # NOTE(danms): prevent recursion continue elif not self.obj_attr_is_set(key): # NOTE(danms): Don't trigger lazy-loads continue info[key] = self[key] try: base_name = CONF.instance_name_template % info except KeyError: base_name = self.uuid except (exception.ObjectActionError, exception.OrphanedObjectError): # This indicates self.id was not set and/or could not be # lazy loaded. What this means is the instance has not # been persisted to a db yet, which should indicate it has # not been scheduled yet. In this situation it will have a # blank name. if (self.vm_state == vm_states.BUILDING and self.task_state == task_states.SCHEDULING): base_name = '' else: # If the vm/task states don't indicate that it's being booted # then we have a bug here. Log an error and attempt to return # the uuid which is what an error above would return. LOG.error('Could not lazy-load instance.id while ' 'attempting to generate the instance name.') base_name = self.uuid return base_name def _flavor_from_db(self, db_flavor): """Load instance flavor information from instance_extra.""" # Before we stored flavors in instance_extra, certain fields, defined # in nova.compute.flavors.system_metadata_flavor_props, were stored # in the instance.system_metadata for the embedded instance.flavor. # The "disabled" and "is_public" fields weren't one of those keys, # however, so really old instances that had their embedded flavor # converted to the serialized instance_extra form won't have the # disabled attribute set and we need to default those here so callers # don't explode trying to load instance.flavor.disabled. def _default_flavor_values(flavor): if 'disabled' not in flavor: flavor.disabled = False if 'is_public' not in flavor: flavor.is_public = True flavor_info = jsonutils.loads(db_flavor) self.flavor = objects.Flavor.obj_from_primitive(flavor_info['cur']) _default_flavor_values(self.flavor) if flavor_info['old']: self.old_flavor = objects.Flavor.obj_from_primitive( flavor_info['old']) _default_flavor_values(self.old_flavor) else: self.old_flavor = None if flavor_info['new']: self.new_flavor = objects.Flavor.obj_from_primitive( flavor_info['new']) _default_flavor_values(self.new_flavor) else: self.new_flavor = None self.obj_reset_changes(['flavor', 'old_flavor', 'new_flavor']) @staticmethod def _from_db_object(context, instance, db_inst, expected_attrs=None): """Method to help with migration to objects. Converts a database entity to a formal object. """ instance._context = context if expected_attrs is None: expected_attrs = [] # Most of the field names match right now, so be quick for field in instance.fields: if field in INSTANCE_OPTIONAL_ATTRS: continue elif field == 'deleted': instance.deleted = db_inst['deleted'] == db_inst['id'] elif field == 'cleaned': instance.cleaned = db_inst['cleaned'] == 1 else: instance[field] = db_inst[field] if 'metadata' in expected_attrs: instance['metadata'] = utils.instance_meta(db_inst) if 'system_metadata' in expected_attrs: instance['system_metadata'] = utils.instance_sys_meta(db_inst) if 'fault' in expected_attrs: instance['fault'] = ( objects.InstanceFault.get_latest_for_instance( context, instance.uuid)) if 'ec2_ids' in expected_attrs: instance._load_ec2_ids() if 'info_cache' in expected_attrs: if db_inst.get('info_cache') is None: instance.info_cache = None elif not instance.obj_attr_is_set('info_cache'): # TODO(danms): If this ever happens on a backlevel instance # passed to us by a backlevel service, things will break instance.info_cache = objects.InstanceInfoCache(context) if instance.info_cache is not None: instance.info_cache._from_db_object(context, instance.info_cache, db_inst['info_cache']) # TODO(danms): If we are updating these on a backlevel instance, # we'll end up sending back new versions of these objects (see # above note for new info_caches if 'pci_devices' in expected_attrs: pci_devices = base.obj_make_list( context, objects.PciDeviceList(context), objects.PciDevice, db_inst['pci_devices']) instance['pci_devices'] = pci_devices # TODO(stephenfin): Remove this as it's related to nova-network if 'security_groups' in expected_attrs: sec_groups = base.obj_make_list( context, objects.SecurityGroupList(context), objects.SecurityGroup, []) instance['security_groups'] = sec_groups if 'tags' in expected_attrs: tags = base.obj_make_list( context, objects.TagList(context), objects.Tag, db_inst['tags']) instance['tags'] = tags if 'services' in expected_attrs: services = base.obj_make_list( context, objects.ServiceList(context), objects.Service, db_inst['services']) instance['services'] = services instance._extra_attributes_from_db_object(instance, db_inst, expected_attrs) instance.obj_reset_changes() return instance @staticmethod def _extra_attributes_from_db_object(instance, db_inst, expected_attrs=None): """Method to help with migration of extra attributes to objects. """ if expected_attrs is None: expected_attrs = [] # NOTE(danms): We can be called with a dict instead of a # SQLAlchemy object, so we have to be careful here if hasattr(db_inst, '__dict__'): have_extra = 'extra' in db_inst.__dict__ and db_inst['extra'] else: have_extra = 'extra' in db_inst and db_inst['extra'] if 'numa_topology' in expected_attrs: if have_extra: instance._load_numa_topology( db_inst['extra'].get('numa_topology')) else: instance.numa_topology = None if 'pci_requests' in expected_attrs: if have_extra: instance._load_pci_requests( db_inst['extra'].get('pci_requests')) else: instance.pci_requests = None if 'device_metadata' in expected_attrs: if have_extra: instance._load_device_metadata( db_inst['extra'].get('device_metadata')) else: instance.device_metadata = None if 'vcpu_model' in expected_attrs: if have_extra: instance._load_vcpu_model( db_inst['extra'].get('vcpu_model')) else: instance.vcpu_model = None if 'migration_context' in expected_attrs: if have_extra: instance._load_migration_context( db_inst['extra'].get('migration_context')) else: instance.migration_context = None if 'keypairs' in expected_attrs: if have_extra: instance._load_keypairs(db_inst['extra'].get('keypairs')) if 'trusted_certs' in expected_attrs: if have_extra: instance._load_trusted_certs( db_inst['extra'].get('trusted_certs')) else: instance.trusted_certs = None if 'resources' in expected_attrs: if have_extra: instance._load_resources( db_inst['extra'].get('resources')) else: instance.resources = None if any([x in expected_attrs for x in ('flavor', 'old_flavor', 'new_flavor')]): if have_extra and db_inst['extra'].get('flavor'): instance._flavor_from_db(db_inst['extra']['flavor']) @staticmethod @db.select_db_reader_mode def _db_instance_get_by_uuid(context, uuid, columns_to_join, use_slave=False): return db.instance_get_by_uuid(context, uuid, columns_to_join=columns_to_join) @base.remotable_classmethod def get_by_uuid(cls, context, uuid, expected_attrs=None, use_slave=False): if expected_attrs is None: expected_attrs = ['info_cache'] columns_to_join = _expected_cols(expected_attrs) db_inst = cls._db_instance_get_by_uuid(context, uuid, columns_to_join, use_slave=use_slave) return cls._from_db_object(context, cls(), db_inst, expected_attrs) @base.remotable_classmethod def get_by_id(cls, context, inst_id, expected_attrs=None): if expected_attrs is None: expected_attrs = ['info_cache'] columns_to_join = _expected_cols(expected_attrs) db_inst = db.instance_get(context, inst_id, columns_to_join=columns_to_join) return cls._from_db_object(context, cls(), db_inst, expected_attrs) @base.remotable def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason='already created') if self.obj_attr_is_set('deleted') and self.deleted: raise exception.ObjectActionError(action='create', reason='already deleted') updates = self.obj_get_changes() version = versionutils.convert_version_to_tuple(self.VERSION) if 'node' in updates and 'compute_id' not in updates: # NOTE(danms): This is not really the best idea, as we should try # not to have different behavior based on the version of the # object. However, this exception helps us find cases in testing # where these may not be updated together. We can remove this # later. if version >= (2, 8): raise exception.ObjectActionError( ('Instance is being created with node (%r) ' 'but not compute_id') % updates['node']) else: LOG.warning('Instance is being created with node %r but ' 'no compute_id', updates['node']) # NOTE(danms): We know because of the check above that deleted # is either unset or false. Since we need to avoid passing False # down to the DB layer (which uses an integer), we can always # default it to zero here. updates['deleted'] = 0 expected_attrs = [attr for attr in INSTANCE_DEFAULT_FIELDS if attr in updates] if 'info_cache' in updates: updates['info_cache'] = { 'network_info': updates['info_cache'].network_info.json() } updates['extra'] = {} numa_topology = updates.pop('numa_topology', None) expected_attrs.append('numa_topology') if numa_topology: updates['extra']['numa_topology'] = numa_topology._to_json() else: updates['extra']['numa_topology'] = None pci_requests = updates.pop('pci_requests', None) expected_attrs.append('pci_requests') if pci_requests: updates['extra']['pci_requests'] = ( pci_requests.to_json()) else: updates['extra']['pci_requests'] = None updates['extra']['pci_devices'] = None device_metadata = updates.pop('device_metadata', None) expected_attrs.append('device_metadata') if device_metadata: updates['extra']['device_metadata'] = ( device_metadata._to_json()) else: updates['extra']['device_metadata'] = None flavor = updates.pop('flavor', None) if flavor: expected_attrs.append('flavor') old = ((self.obj_attr_is_set('old_flavor') and self.old_flavor) and self.old_flavor.obj_to_primitive() or None) new = ((self.obj_attr_is_set('new_flavor') and self.new_flavor) and self.new_flavor.obj_to_primitive() or None) flavor_info = { 'cur': self.flavor.obj_to_primitive(), 'old': old, 'new': new, } self._nullify_flavor_description(flavor_info) updates['extra']['flavor'] = jsonutils.dumps(flavor_info) keypairs = updates.pop('keypairs', None) if keypairs is not None: expected_attrs.append('keypairs') updates['extra']['keypairs'] = jsonutils.dumps( keypairs.obj_to_primitive()) vcpu_model = updates.pop('vcpu_model', None) expected_attrs.append('vcpu_model') if vcpu_model: updates['extra']['vcpu_model'] = ( jsonutils.dumps(vcpu_model.obj_to_primitive())) else: updates['extra']['vcpu_model'] = None trusted_certs = updates.pop('trusted_certs', None) expected_attrs.append('trusted_certs') if trusted_certs: updates['extra']['trusted_certs'] = jsonutils.dumps( trusted_certs.obj_to_primitive()) else: updates['extra']['trusted_certs'] = None resources = updates.pop('resources', None) expected_attrs.append('resources') if resources: updates['extra']['resources'] = jsonutils.dumps( resources.obj_to_primitive()) else: updates['extra']['resources'] = None # Initially all instances have no migration context, so avoid us # trying to lazy-load it to check. updates['extra']['migration_context'] = None db_inst = db.instance_create(self._context, updates) self._from_db_object(self._context, self, db_inst, expected_attrs) if ('pci_devices' in updates['extra'] and updates['extra']['pci_devices'] is None): self.pci_devices = None self.migration_context = None # NOTE(danms): The EC2 ids are created on their first load. In order # to avoid them being missing and having to be loaded later, we # load them once here on create now that the instance record is # created. self._load_ec2_ids() self.obj_reset_changes(['ec2_ids', 'pci_devices', 'migration_context']) @base.remotable def destroy(self, hard_delete=False): if not self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='destroy', reason='already destroyed') if not self.obj_attr_is_set('uuid'): raise exception.ObjectActionError(action='destroy', reason='no uuid') if not self.obj_attr_is_set('host') or not self.host: # NOTE(danms): If our host is not set, avoid a race constraint = db.constraint(host=db.equal_any(None)) else: constraint = None try: db_inst = db.instance_destroy(self._context, self.uuid, constraint=constraint, hard_delete=hard_delete) self._from_db_object(self._context, self, db_inst) except exception.ConstraintNotMet: raise exception.ObjectActionError(action='destroy', reason='host changed') delattr(self, base.get_attrname('id')) def _save_info_cache(self, context): if self.info_cache: with self.info_cache.obj_alternate_context(context): self.info_cache.save() # TODO(stephenfin): Remove this as it's related to nova-network def _save_security_groups(self, context): # NOTE(stephenfin): We no longer bother saving these since they # shouldn't be created in the first place pass def _save_fault(self, context): # NOTE(danms): I don't think we need to worry about this, do we? pass def _save_pci_requests(self, context): # TODO(danms): Unfortunately, extra.pci_requests is not a serialized # PciRequests object (!), so we have to handle it specially here. # That should definitely be fixed! self._extra_values_to_save['pci_requests'] = ( self.pci_requests.to_json()) def _save_pci_devices(self, context): # NOTE(yjiang5): All devices held by PCI tracker, only PCI tracker # permitted to update the DB. all change to devices from here will # be dropped. pass def _save_tags(self, context): # NOTE(gibi): tags are not saved through the instance pass def _save_services(self, context): # NOTE(mriedem): services are not saved through the instance pass @staticmethod def _nullify_flavor_description(flavor_info): """Helper method to nullify descriptions from a set of primitive flavors. Note that we don't remove the flavor description since that would make the versioned notification FlavorPayload have to handle the field not being set on the embedded instance.flavor. :param dict: dict of primitive flavor objects where the values are the flavors which get persisted in the instance_extra.flavor table. """ for flavor in flavor_info.values(): if flavor and 'description' in flavor['nova_object.data']: flavor['nova_object.data']['description'] = None def _save_flavor(self, context): if not any([x in self.obj_what_changed() for x in ('flavor', 'old_flavor', 'new_flavor')]): return flavor_info = { 'cur': self.flavor.obj_to_primitive(), 'old': (self.old_flavor and self.old_flavor.obj_to_primitive() or None), 'new': (self.new_flavor and self.new_flavor.obj_to_primitive() or None), } self._nullify_flavor_description(flavor_info) self._extra_values_to_save['flavor'] = jsonutils.dumps(flavor_info) self.obj_reset_changes(['flavor', 'old_flavor', 'new_flavor']) def _save_old_flavor(self, context): if 'old_flavor' in self.obj_what_changed(): self._save_flavor(context) def _save_new_flavor(self, context): if 'new_flavor' in self.obj_what_changed(): self._save_flavor(context) def _save_ec2_ids(self, context): # NOTE(hanlind): Read-only so no need to save this. pass def _save_keypairs(self, context): if 'keypairs' in self.obj_what_changed(): self._save_extra_generic('keypairs') self.obj_reset_changes(['keypairs'], recursive=True) def _save_extra_generic(self, field): if field in self.obj_what_changed(): obj = getattr(self, field) value = None if obj is not None: value = jsonutils.dumps(obj.obj_to_primitive()) self._extra_values_to_save[field] = value # TODO(stephenfin): Remove the 'admin_state_reset' field in version 3.0 of # the object @base.remotable def save(self, expected_vm_state=None, expected_task_state=None, admin_state_reset=False): """Save updates to this instance Column-wise updates will be made based on the result of self.obj_what_changed(). If expected_task_state is provided, it will be checked against the in-database copy of the instance before updates are made. :param expected_vm_state: Optional tuple of valid vm states for the instance to be in :param expected_task_state: Optional tuple of valid task states for the instance to be in :param admin_state_reset: True if admin API is forcing setting of task_state/vm_state """ context = self._context self._extra_values_to_save = {} updates = {} changes = self.obj_what_changed() version = versionutils.convert_version_to_tuple(self.VERSION) if 'node' in changes and 'compute_id' not in changes: # NOTE(danms): This is not really the best idea, as we should try # not to have different behavior based on the version of the # object. However, this exception helps us find cases in testing # where these may not be updated together. We can remove this # later. if version >= (2, 8): raise exception.ObjectActionError( ('Instance.node is being updated (%r) ' 'but compute_id is not') % self.node) else: LOG.warning('Instance %s node is being updated to %r but ' 'compute_id is not', self.uuid, self.node) for field in self.fields: # NOTE(danms): For object fields, we construct and call a # helper method like self._save_$attrname() if (self.obj_attr_is_set(field) and isinstance(self.fields[field], fields.ObjectField)): try: getattr(self, '_save_%s' % field)(context) except AttributeError: if field in _INSTANCE_EXTRA_FIELDS: self._save_extra_generic(field) continue LOG.exception('No save handler for %s', field, instance=self) except db_exc.DBReferenceError as exp: if exp.key != 'instance_uuid': raise # NOTE(melwitt): This will happen if we instance.save() # before an instance.create() and FK constraint fails. # In practice, this occurs in cells during a delete of # an unscheduled instance. Otherwise, it could happen # as a result of bug. raise exception.InstanceNotFound(instance_id=self.uuid) elif field in changes: updates[field] = self[field] if self._extra_values_to_save: db.instance_extra_update_by_uuid(context, self.uuid, self._extra_values_to_save) if not updates: return # Cleaned needs to be turned back into an int here if 'cleaned' in updates: if updates['cleaned']: updates['cleaned'] = 1 else: updates['cleaned'] = 0 if expected_task_state is not None: updates['expected_task_state'] = expected_task_state if expected_vm_state is not None: updates['expected_vm_state'] = expected_vm_state expected_attrs = [attr for attr in _INSTANCE_OPTIONAL_JOINED_FIELDS if self.obj_attr_is_set(attr)] if 'pci_devices' in expected_attrs: # NOTE(danms): We don't refresh pci_devices on save right now expected_attrs.remove('pci_devices') # NOTE(alaski): We need to pull system_metadata for the # notification.send_update() below. If we don't there's a KeyError # when it tries to extract the flavor. if 'system_metadata' not in expected_attrs: expected_attrs.append('system_metadata') old_ref, inst_ref = db.instance_update_and_get_original( context, self.uuid, updates, columns_to_join=_expected_cols(expected_attrs)) self._from_db_object(context, self, inst_ref, expected_attrs=expected_attrs) # NOTE(danms): We have to be super careful here not to trigger # any lazy-loads that will unmigrate or unbackport something. So, # make a copy of the instance for notifications first. new_ref = self.obj_clone() notifications.send_update(context, old_ref, new_ref) self.obj_reset_changes() @base.remotable def refresh(self, use_slave=False): extra = [field for field in INSTANCE_OPTIONAL_ATTRS if self.obj_attr_is_set(field)] current = self.__class__.get_by_uuid(self._context, uuid=self.uuid, expected_attrs=extra, use_slave=use_slave) # NOTE(danms): We orphan the instance copy so we do not unexpectedly # trigger a lazy-load (which would mean we failed to calculate the # expected_attrs properly) current._context = None for field in self.fields: if field not in self: continue if field not in current: # If the field isn't in current we should not # touch it, triggering a likely-recursive lazy load. # Log it so we can see it happening though, as it # probably isn't expected in most cases. LOG.debug('Field %s is set but not in refreshed ' 'instance, skipping', field) continue if field == 'info_cache': self.info_cache.refresh() elif self[field] != current[field]: self[field] = current[field] self.obj_reset_changes() def _load_generic(self, attrname): instance = self.__class__.get_by_uuid(self._context, uuid=self.uuid, expected_attrs=[attrname]) if attrname not in instance: # NOTE(danms): Never allow us to recursively-load raise exception.ObjectActionError( action='obj_load_attr', reason=_('loading %s requires recursion') % attrname) # NOTE(danms): load anything we don't already have from the # instance we got from the database to make the most of the # performance hit. for field in self.fields: if field in instance and field not in self: setattr(self, field, getattr(instance, field)) def _load_fault(self): self.fault = objects.InstanceFault.get_latest_for_instance( self._context, self.uuid) def _load_numa_topology(self, db_topology=_NO_DATA_SENTINEL): if db_topology is None: self.numa_topology = None elif db_topology is not _NO_DATA_SENTINEL: self.numa_topology = objects.InstanceNUMATopology.obj_from_db_obj( self._context, self.uuid, db_topology) else: try: self.numa_topology = \ objects.InstanceNUMATopology.get_by_instance_uuid( self._context, self.uuid) except exception.NumaTopologyNotFound: self.numa_topology = None def _load_pci_requests(self, db_requests=_NO_DATA_SENTINEL): if db_requests is not _NO_DATA_SENTINEL: self.pci_requests = objects.InstancePCIRequests.obj_from_db( self._context, self.uuid, db_requests) else: self.pci_requests = \ objects.InstancePCIRequests.get_by_instance_uuid( self._context, self.uuid) def _load_device_metadata(self, db_dev_meta=_NO_DATA_SENTINEL): if db_dev_meta is None: self.device_metadata = None elif db_dev_meta is not _NO_DATA_SENTINEL: self.device_metadata = \ objects.InstanceDeviceMetadata.obj_from_db( self._context, db_dev_meta) else: self.device_metadata = \ objects.InstanceDeviceMetadata.get_by_instance_uuid( self._context, self.uuid) def _load_flavor(self): instance = self.__class__.get_by_uuid( self._context, uuid=self.uuid, expected_attrs=['flavor']) # NOTE(danms): Orphan the instance to make sure we don't lazy-load # anything below instance._context = None self.flavor = instance.flavor self.old_flavor = instance.old_flavor self.new_flavor = instance.new_flavor def _load_vcpu_model(self, db_vcpu_model=_NO_DATA_SENTINEL): if db_vcpu_model is None: self.vcpu_model = None elif db_vcpu_model is _NO_DATA_SENTINEL: self.vcpu_model = objects.VirtCPUModel.get_by_instance_uuid( self._context, self.uuid) else: db_vcpu_model = jsonutils.loads(db_vcpu_model) self.vcpu_model = objects.VirtCPUModel.obj_from_primitive( db_vcpu_model) def _load_ec2_ids(self): self.ec2_ids = objects.EC2Ids.get_by_instance(self._context, self) def _load_pci_devices(self): self.pci_devices = objects.PciDeviceList.get_by_instance_uuid( self._context, self.uuid) def _load_migration_context(self, db_context=_NO_DATA_SENTINEL): if db_context is _NO_DATA_SENTINEL: try: self.migration_context = ( objects.MigrationContext.get_by_instance_uuid( self._context, self.uuid)) except exception.MigrationContextNotFound: self.migration_context = None elif db_context is None: self.migration_context = None else: self.migration_context = objects.MigrationContext.obj_from_db_obj( db_context) def _load_keypairs(self, db_keypairs=_NO_DATA_SENTINEL): if db_keypairs is _NO_DATA_SENTINEL: inst = objects.Instance.get_by_uuid(self._context, self.uuid, expected_attrs=['keypairs']) if 'keypairs' in inst: self.keypairs = inst.keypairs self.keypairs.obj_reset_changes(recursive=True) self.obj_reset_changes(['keypairs']) else: self.keypairs = objects.KeyPairList(objects=[]) # NOTE(danms): We leave the keypairs attribute dirty in hopes # someone else will save it for us elif db_keypairs: self.keypairs = objects.KeyPairList.obj_from_primitive( jsonutils.loads(db_keypairs)) self.obj_reset_changes(['keypairs']) def _load_tags(self): self.tags = objects.TagList.get_by_resource_id( self._context, self.uuid) def _load_trusted_certs(self, db_trusted_certs=_NO_DATA_SENTINEL): if db_trusted_certs is None: self.trusted_certs = None elif db_trusted_certs is _NO_DATA_SENTINEL: self.trusted_certs = objects.TrustedCerts.get_by_instance_uuid( self._context, self.uuid) else: self.trusted_certs = objects.TrustedCerts.obj_from_primitive( jsonutils.loads(db_trusted_certs)) def _load_resources(self, db_resources=_NO_DATA_SENTINEL): if db_resources is None: self.resources = None elif db_resources is _NO_DATA_SENTINEL: self.resources = objects.ResourceList.get_by_instance_uuid( self._context, self.uuid) else: self.resources = objects.ResourceList.obj_from_primitive( jsonutils.loads(db_resources)) def apply_migration_context(self): if self.migration_context: self._set_migration_context_to_instance(prefix='new_') else: LOG.debug("Trying to apply a migration context that does not " "seem to be set for this instance", instance=self) def revert_migration_context(self): if self.migration_context: self._set_migration_context_to_instance(prefix='old_') else: LOG.debug("Trying to revert a migration context that does not " "seem to be set for this instance", instance=self) def _set_migration_context_to_instance(self, prefix): for inst_attr_name in _MIGRATION_CONTEXT_ATTRS: setattr(self, inst_attr_name, None) attr_name = prefix + inst_attr_name if attr_name in self.migration_context: attr_value = getattr( self.migration_context, attr_name) setattr(self, inst_attr_name, attr_value) @contextlib.contextmanager def mutated_migration_context(self, revert=False): """Context manager to temporarily apply/revert the migration context. Calling .save() from within the context manager means that the mutated context will be saved which can cause incorrect resource tracking, and should be avoided. """ # First check to see if we even have a migration context set and if not # we can exit early without lazy-loading other attributes. if 'migration_context' in self and self.migration_context is None: yield return current_values = {} for attr_name in _MIGRATION_CONTEXT_ATTRS: current_values[attr_name] = getattr(self, attr_name) if revert: self.revert_migration_context() else: self.apply_migration_context() try: yield finally: for attr_name in _MIGRATION_CONTEXT_ATTRS: setattr(self, attr_name, current_values[attr_name]) @base.remotable def drop_migration_context(self): if self.migration_context: db.instance_extra_update_by_uuid(self._context, self.uuid, {'migration_context': None}) self.migration_context = None def clear_numa_topology(self): numa_topology = self.numa_topology if numa_topology is not None: self.numa_topology = numa_topology.clear_host_pinning() @base.lazy_load_counter def obj_load_attr(self, attrname): # NOTE(danms): We can't lazy-load anything without a context and a uuid if not self._context: if 'uuid' in self: LOG.debug( "Lazy-load of '%s' attempted by orphaned instance", attrname, instance=self ) raise exception.OrphanedObjectError(method='obj_load_attr', objtype=self.obj_name()) if 'uuid' not in self: raise exception.ObjectActionError( action='obj_load_attr', reason=_('attribute %s not lazy-loadable') % attrname) LOG.debug("Lazy-loading '%(attr)s' on %(name)s uuid %(uuid)s", {'attr': attrname, 'name': self.obj_name(), 'uuid': self.uuid, }) with utils.temporary_mutation(self._context, read_deleted='yes'): self._obj_load_attr(attrname) def _obj_load_attr(self, attrname): """Internal method for loading attributes from instances. NOTE: Do not use this directly. This method contains the implementation of lazy-loading attributes from Instance object, minus some massaging of the context and error-checking. This should always be called with the object-local context set for reading deleted instances and with uuid set. All of the code below depends on those two things. Thus, this should only be called from obj_load_attr() itself. :param attrname: The name of the attribute to be loaded """ # NOTE(danms): We handle some fields differently here so that we # can be more efficient if attrname == 'fault': self._load_fault() elif attrname == 'numa_topology': self._load_numa_topology() elif attrname == 'device_metadata': self._load_device_metadata() elif attrname == 'pci_requests': self._load_pci_requests() elif attrname == 'vcpu_model': self._load_vcpu_model() elif attrname == 'ec2_ids': self._load_ec2_ids() elif attrname == 'migration_context': self._load_migration_context() elif attrname == 'keypairs': # NOTE(danms): Let keypairs control its own destiny for # resetting changes. return self._load_keypairs() elif attrname == 'trusted_certs': return self._load_trusted_certs() elif attrname == 'resources': return self._load_resources() elif attrname == 'security_groups': self.security_groups = objects.SecurityGroupList() elif attrname == 'pci_devices': self._load_pci_devices() elif 'flavor' in attrname: self._load_flavor() elif attrname == 'services' and self.deleted: # NOTE(mriedem): The join in the data model for instances.services # filters on instances.deleted == 0, so if the instance is deleted # don't attempt to even load services since we'll fail. self.services = objects.ServiceList(self._context) elif attrname == 'tags': if self.deleted: # NOTE(mriedem): Same story as services, the DB API query # in instance_tag_get_by_instance_uuid will fail if the # instance has been deleted so just return an empty tag list. self.tags = objects.TagList(self._context) else: self._load_tags() elif attrname in self.fields and attrname != 'id': # NOTE(danms): We've never let 'id' be lazy-loaded, and use its # absence as a sentinel that it hasn't been created in the database # yet, so refuse to do so here. self._load_generic(attrname) else: # NOTE(danms): This is historically what we did for # something not in a field that was force-loaded. So, just # do this for consistency. raise exception.ObjectActionError( action='obj_load_attr', reason=_('attribute %s not lazy-loadable') % attrname) self.obj_reset_changes([attrname]) def get_flavor(self, namespace=None): prefix = ('%s_' % namespace) if namespace is not None else '' attr = '%sflavor' % prefix try: return getattr(self, attr) except exception.FlavorNotFound: # NOTE(danms): This only happens in the case where we don't # have flavor information in instance_extra, and doing # this triggers a lookup based on our instance_type_id for # (very) legacy instances. That legacy code expects a None here, # so emulate it for this helper, even though the actual attribute # is not nullable. return None @base.remotable def delete_metadata_key(self, key): """Optimized metadata delete method. This provides a more efficient way to delete a single metadata key, instead of just calling instance.save(). This should be called with the key still present in self.metadata, which it will update after completion. """ db.instance_metadata_delete(self._context, self.uuid, key) md_was_changed = 'metadata' in self.obj_what_changed() del self.metadata[key] self._orig_metadata.pop(key, None) notifications.send_update(self._context, self, self) if not md_was_changed: self.obj_reset_changes(['metadata']) def get_network_info(self): if self.info_cache is None: return network_model.NetworkInfo.hydrate([]) return self.info_cache.network_info def get_bdms(self): return objects.BlockDeviceMappingList.get_by_instance_uuid( self._context, self.uuid) def get_shares(self): return objects.ShareMappingList.get_by_instance_uuid( self._context, self.uuid) def remove_pci_device_and_request(self, pci_device): """Remove the PciDevice and the related InstancePciRequest""" if pci_device in self.pci_devices.objects: self.pci_devices.objects.remove(pci_device) self.pci_requests.requests = [ pci_req for pci_req in self.pci_requests.requests if pci_req.request_id != pci_device.request_id] def get_pci_devices( self, source: ty.Optional[int] = None, request_id: ty.Optional[str] = None, ) -> ty.List["objects.PciDevice"]: """Return the PCI devices allocated to the instance :param source: Filter by source. It can be InstancePCIRequest.FLAVOR_ALIAS or InstancePCIRequest.NEUTRON_PORT or None. None means returns devices from both type of requests. :param request_id: Filter by PciDevice.request_id. None means do not filter by request_id. :return: a list of matching PciDevice objects """ if not self.pci_devices: # return early to avoid an extra lazy load on self.pci_requests # if there are no devices allocated to be filtered return [] devs = self.pci_devices.objects if request_id is not None: devs = [dev for dev in devs if dev.request_id == request_id] if source is not None: # NOTE(gibi): this happens to work for the old requests when the # request has request_id None and therefore the device allocated # due to that request has request_id None too, so they will be # mapped via the None key. req_id_to_req = { req.request_id: req for req in self.pci_requests.requests } devs = [ dev for dev in devs if (req_id_to_req[dev.request_id].source == source) ] return devs def _make_instance_list(context, inst_list, db_inst_list, expected_attrs): get_fault = expected_attrs and 'fault' in expected_attrs inst_faults = {} if get_fault: # Build an instance_uuid:latest-fault mapping expected_attrs.remove('fault') instance_uuids = [inst['uuid'] for inst in db_inst_list] faults = objects.InstanceFaultList.get_by_instance_uuids( context, instance_uuids) for fault in faults: if fault.instance_uuid not in inst_faults: inst_faults[fault.instance_uuid] = fault inst_cls = objects.Instance inst_list.objects = [] for db_inst in db_inst_list: inst_obj = inst_cls._from_db_object( context, inst_cls(context), db_inst, expected_attrs=expected_attrs) if get_fault: inst_obj.fault = inst_faults.get(inst_obj.uuid, None) inst_list.objects.append(inst_obj) inst_list.obj_reset_changes() return inst_list @db.pick_context_manager_writer def populate_missing_availability_zones(context, max_count): # instances without host have no reasonable AZ to set instances = (context.session.query(models.Instance). filter(models.Instance.host.is_not(None)). filter_by(availability_zone=None).limit(max_count).all()) count_all = len(instances) count_hit = 0 for instance in instances: az = avail_zone.get_instance_availability_zone(context, instance) instance.availability_zone = az instance.save(context.session) count_hit += 1 return count_all, count_hit @db.pick_context_manager_writer def populate_instance_compute_id(context, max_count): instances = (context.session.query(models.Instance). filter(models.Instance.compute_id.is_(None)). limit(max_count).all()) count_all = count_hit = 0 rd_context = nova_context.get_admin_context(read_deleted='yes') for instance in instances: count_all += 1 try: node = objects.ComputeNode.get_by_host_and_nodename(rd_context, instance.host, instance.node) except exception.ComputeHostNotFound: LOG.error('Unable to migrate instance because host %s with ' 'node %s not found', instance.host, instance.node, instance=instance) continue instance.compute_id = node.id instance.save(context.session) count_hit += 1 return count_all, count_hit @base.NovaObjectRegistry.register class InstanceList(base.ObjectListBase, base.NovaObject): # Version 2.0: Initial Version # Version 2.1: Add get_uuids_by_host() # Version 2.2: Pagination for get_active_by_window_joined() # Version 2.3: Add get_count_by_vm_state() # Version 2.4: Add get_counts() # Version 2.5: Add get_uuids_by_host_and_node() # Version 2.6: Add get_uuids_by_hosts() VERSION = '2.6' fields = { 'objects': fields.ListOfObjectsField('Instance'), } @classmethod @db.select_db_reader_mode def _get_by_filters_impl(cls, context, filters, sort_key='created_at', sort_dir='desc', limit=None, marker=None, expected_attrs=None, use_slave=False, sort_keys=None, sort_dirs=None): if sort_keys or sort_dirs: db_inst_list = db.instance_get_all_by_filters_sort( context, filters, limit=limit, marker=marker, columns_to_join=_expected_cols(expected_attrs), sort_keys=sort_keys, sort_dirs=sort_dirs) else: db_inst_list = db.instance_get_all_by_filters( context, filters, sort_key, sort_dir, limit=limit, marker=marker, columns_to_join=_expected_cols(expected_attrs)) return db_inst_list @base.remotable_classmethod def get_by_filters(cls, context, filters, sort_key='created_at', sort_dir='desc', limit=None, marker=None, expected_attrs=None, use_slave=False, sort_keys=None, sort_dirs=None): db_inst_list = cls._get_by_filters_impl( context, filters, sort_key=sort_key, sort_dir=sort_dir, limit=limit, marker=marker, expected_attrs=expected_attrs, use_slave=use_slave, sort_keys=sort_keys, sort_dirs=sort_dirs) # NOTE(melwitt): _make_instance_list could result in joined objects' # (from expected_attrs) _from_db_object methods being called during # Instance._from_db_object, each of which might choose to perform # database writes. So, we call this outside of _get_by_filters_impl to # avoid being nested inside a 'reader' database transaction context. return _make_instance_list(context, cls(), db_inst_list, expected_attrs) @staticmethod @db.select_db_reader_mode def _db_instance_get_all_by_host(context, host, columns_to_join, use_slave=False): return db.instance_get_all_by_host(context, host, columns_to_join=columns_to_join) @base.remotable_classmethod def get_by_host(cls, context, host, expected_attrs=None, use_slave=False): db_inst_list = cls._db_instance_get_all_by_host( context, host, columns_to_join=_expected_cols(expected_attrs), use_slave=use_slave) return _make_instance_list(context, cls(), db_inst_list, expected_attrs) @base.remotable_classmethod def get_by_host_and_node(cls, context, host, node, expected_attrs=None): db_inst_list = db.instance_get_all_by_host_and_node( context, host, node, columns_to_join=_expected_cols(expected_attrs)) return _make_instance_list(context, cls(), db_inst_list, expected_attrs) @staticmethod @db.pick_context_manager_reader def _get_uuids_by_host_and_node(context, host, node): return context.session.query( models.Instance.uuid).filter_by( host=host).filter_by(node=node).filter_by(deleted=0).all() @base.remotable_classmethod def get_uuids_by_host_and_node(cls, context, host, node): """Return non-deleted instance UUIDs for the given host and node. :param context: nova auth request context :param host: Filter instances on this host. :param node: Filter instances on this node. :returns: list of non-deleted instance UUIDs on the given host and node """ return cls._get_uuids_by_host_and_node(context, host, node) @base.remotable_classmethod def get_by_host_and_not_type(cls, context, host, type_id=None, expected_attrs=None): db_inst_list = db.instance_get_all_by_host_and_not_type( context, host, type_id=type_id) return _make_instance_list(context, cls(), db_inst_list, expected_attrs) @base.remotable_classmethod def get_all(cls, context, expected_attrs=None): """Returns all instances on all nodes.""" db_instances = db.instance_get_all( context, columns_to_join=_expected_cols(expected_attrs)) return _make_instance_list(context, cls(), db_instances, expected_attrs) @base.remotable_classmethod def get_hung_in_rebooting(cls, context, reboot_window, expected_attrs=None): db_inst_list = db.instance_get_all_hung_in_rebooting(context, reboot_window) return _make_instance_list(context, cls(), db_inst_list, expected_attrs) @staticmethod @db.select_db_reader_mode def _db_instance_get_active_by_window_joined( context, begin, end, project_id, host, columns_to_join, use_slave=False, limit=None, marker=None): return db.instance_get_active_by_window_joined( context, begin, end, project_id, host, columns_to_join=columns_to_join, limit=limit, marker=marker) @base.remotable_classmethod def _get_active_by_window_joined(cls, context, begin, end=None, project_id=None, host=None, expected_attrs=None, use_slave=False, limit=None, marker=None): # NOTE(mriedem): We need to convert the begin/end timestamp strings # to timezone-aware datetime objects for the DB API call. begin = timeutils.parse_isotime(begin) end = timeutils.parse_isotime(end) if end else None db_inst_list = cls._db_instance_get_active_by_window_joined( context, begin, end, project_id, host, columns_to_join=_expected_cols(expected_attrs), use_slave=use_slave, limit=limit, marker=marker) return _make_instance_list(context, cls(), db_inst_list, expected_attrs) @classmethod def get_active_by_window_joined(cls, context, begin, end=None, project_id=None, host=None, expected_attrs=None, use_slave=False, limit=None, marker=None): """Get instances and joins active during a certain time window. :param:context: nova request context :param:begin: datetime for the start of the time window :param:end: datetime for the end of the time window :param:project_id: used to filter instances by project :param:host: used to filter instances on a given compute host :param:expected_attrs: list of related fields that can be joined in the database layer when querying for instances :param use_slave if True, ship this query off to a DB slave :param limit: maximum number of instances to return per page :param marker: last instance uuid from the previous page :returns: InstanceList """ # NOTE(mriedem): We have to convert the datetime objects to string # primitives for the remote call. begin = utils.isotime(begin) end = utils.isotime(end) if end else None return cls._get_active_by_window_joined(context, begin, end, project_id, host, expected_attrs, use_slave=use_slave, limit=limit, marker=marker) # TODO(stephenfin): Remove this as it's related to nova-network @base.remotable_classmethod def get_by_security_group_id(cls, context, security_group_id): raise NotImplementedError() # TODO(stephenfin): Remove this as it's related to nova-network @classmethod def get_by_security_group(cls, context, security_group): raise NotImplementedError() # TODO(stephenfin): Remove this as it's related to nova-network @base.remotable_classmethod def get_by_grantee_security_group_ids(cls, context, security_group_ids): raise NotImplementedError() def fill_faults(self): """Batch query the database for our instances' faults. :returns: A list of instance uuids for which faults were found. """ uuids = [inst.uuid for inst in self] faults = objects.InstanceFaultList.get_latest_by_instance_uuids( self._context, uuids) faults_by_uuid = {} for fault in faults: faults_by_uuid[fault.instance_uuid] = fault for instance in self: if instance.uuid in faults_by_uuid: instance.fault = faults_by_uuid[instance.uuid] else: # NOTE(danms): Otherwise the caller will cause a lazy-load # when checking it, and we know there are none instance.fault = None instance.obj_reset_changes(['fault']) return faults_by_uuid.keys() def fill_metadata(self): # NOTE(danms): This only fills system_metadata currently, but could # be extended to support user metadata if needed in the future. # Make a uuid-indexed dict of non-object instance dicts that the DB # layer can use. They need only contain the uuid of the instances # we are looking up. Any of them that already have system_metadata # need not be included. db_inst_shells = {inst.uuid: {'uuid': inst.uuid} for inst in self if 'system_metadata' not in inst} if db_inst_shells: updates = db.instances_fill_metadata( self._context, db_inst_shells.values(), manual_joins=['system_metadata']) updated = {i['uuid']: i for i in updates} for inst in [i for i in self if i.uuid in updated]: # Patch up our instances with system_metadata from the fill # operation inst.system_metadata = utils.instance_sys_meta(updated) @base.remotable_classmethod def get_uuids_by_host(cls, context, host): return db.instance_get_all_uuids_by_hosts(context, [host])[host] @base.remotable_classmethod def get_uuids_by_hosts(cls, context, hosts): """Returns a dict, keyed by hypervisor hostname, of a list of instance UUIDs associated with that compute node. """ return db.instance_get_all_uuids_by_hosts(context, hosts) @staticmethod @db.pick_context_manager_reader def _get_count_by_vm_state_in_db(context, project_id, user_id, vm_state): return context.session.query(models.Instance.id).\ filter_by(deleted=0).\ filter_by(project_id=project_id).\ filter_by(user_id=user_id).\ filter_by(vm_state=vm_state).\ count() @base.remotable_classmethod def get_count_by_vm_state(cls, context, project_id, user_id, vm_state): return cls._get_count_by_vm_state_in_db(context, project_id, user_id, vm_state) @staticmethod @db.pick_context_manager_reader def _get_counts_in_db(context, project_id, user_id=None): # NOTE(melwitt): Copied from nova/db/main/api.py: # It would be better to have vm_state not be nullable # but until then we test it explicitly as a workaround. not_soft_deleted = sa.or_( models.Instance.vm_state != vm_states.SOFT_DELETED, models.Instance.vm_state == sql.null() ) project_query = context.session.query( func.count(models.Instance.id), func.sum(models.Instance.vcpus), func.sum(models.Instance.memory_mb)).\ filter_by(deleted=0).\ filter(not_soft_deleted).\ filter_by(project_id=project_id) # NOTE(mriedem): Filter out hidden instances since there should be a # non-hidden version of the instance in another cell database and the # API will only show one of them, so we don't count the hidden copy. project_query = project_query.filter( sa.or_( models.Instance.hidden == sql.false(), models.Instance.hidden == sql.null(), )) project_result = project_query.first() fields = ('instances', 'cores', 'ram') project_counts = {field: int(project_result[idx] or 0) for idx, field in enumerate(fields)} counts = {'project': project_counts} if user_id: user_result = project_query.filter_by(user_id=user_id).first() user_counts = {field: int(user_result[idx] or 0) for idx, field in enumerate(fields)} counts['user'] = user_counts return counts @base.remotable_classmethod def get_counts(cls, context, project_id, user_id=None): """Get the counts of Instance objects in the database. :param context: The request context for database access :param project_id: The project_id to count across :param user_id: The user_id to count across :returns: A dict containing the project-scoped counts and user-scoped counts if user_id is specified. For example: {'project': {'instances': , 'cores': , 'ram': , 'cores': , 'ram': }} """ return cls._get_counts_in_db(context, project_id, user_id=user_id) @staticmethod @db.pick_context_manager_reader def _get_count_by_hosts(context, hosts): return context.session.query(models.Instance).\ filter_by(deleted=0).\ filter(models.Instance.host.in_(hosts)).count() @classmethod def get_count_by_hosts(cls, context, hosts): return cls._get_count_by_hosts(context, hosts) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/objects/instance_action.py0000664000175000017500000002770400000000000020703 0ustar00zuulzuul00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import timeutils from oslo_utils import versionutils from nova.compute import utils as compute_utils from nova.db.main import api as db from nova import exception from nova import objects from nova.objects import base from nova.objects import fields # TODO(berrange): Remove NovaObjectDictCompat @base.NovaObjectRegistry.register class InstanceAction(base.NovaPersistentObject, base.NovaObject, base.NovaObjectDictCompat): # Version 1.0: Initial version # Version 1.1: String attributes updated to support unicode # Version 1.2: Add create() method. VERSION = '1.2' fields = { 'id': fields.IntegerField(), 'action': fields.StringField(nullable=True), 'instance_uuid': fields.UUIDField(nullable=True), 'request_id': fields.StringField(nullable=True), 'user_id': fields.StringField(nullable=True), 'project_id': fields.StringField(nullable=True), 'start_time': fields.DateTimeField(nullable=True), 'finish_time': fields.DateTimeField(nullable=True), 'message': fields.StringField(nullable=True), } @staticmethod def _from_db_object(context, action, db_action): for field in action.fields: action[field] = db_action[field] action._context = context action.obj_reset_changes() return action @staticmethod def pack_action_start(context, instance_uuid, action_name): values = {'request_id': context.request_id, 'instance_uuid': instance_uuid, 'user_id': context.user_id, 'project_id': context.project_id, 'action': action_name, 'start_time': context.timestamp, 'updated_at': context.timestamp} return values @staticmethod def pack_action_finish(context, instance_uuid): utcnow = timeutils.utcnow() values = {'request_id': context.request_id, 'instance_uuid': instance_uuid, 'finish_time': utcnow, 'updated_at': utcnow} return values @base.remotable_classmethod def get_by_request_id(cls, context, instance_uuid, request_id): db_action = db.action_get_by_request_id(context, instance_uuid, request_id) if db_action: return cls._from_db_object(context, cls(), db_action) @base.remotable_classmethod def action_start(cls, context, instance_uuid, action_name, want_result=True): values = cls.pack_action_start(context, instance_uuid, action_name) db_action = db.action_start(context, values) if want_result: return cls._from_db_object(context, cls(), db_action) @base.remotable_classmethod def action_finish(cls, context, instance_uuid, want_result=True): values = cls.pack_action_finish(context, instance_uuid) db_action = db.action_finish(context, values) if want_result: return cls._from_db_object(context, cls(), db_action) @base.remotable def finish(self): values = self.pack_action_finish(self._context, self.instance_uuid) db_action = db.action_finish(self._context, values) self._from_db_object(self._context, self, db_action) # NOTE(mriedem): In most cases, the action_start() method should be used # to create new InstanceAction records. This method should only be used # in specific exceptional cases like when cloning actions from one cell # database to another. @base.remotable def create(self): if 'id' in self: raise exception.ObjectActionError(action='create', reason='already created') updates = self.obj_get_changes() db_action = db.action_start(self._context, updates) self._from_db_object(self._context, self, db_action) @base.NovaObjectRegistry.register class InstanceActionList(base.ObjectListBase, base.NovaObject): # Version 1.0: Initial version # Version 1.1: get_by_instance_uuid added pagination and filters support VERSION = '1.1' fields = { 'objects': fields.ListOfObjectsField('InstanceAction'), } @base.remotable_classmethod def get_by_instance_uuid(cls, context, instance_uuid, limit=None, marker=None, filters=None): db_actions = db.actions_get( context, instance_uuid, limit, marker, filters) return base.obj_make_list(context, cls(), InstanceAction, db_actions) # TODO(berrange): Remove NovaObjectDictCompat @base.NovaObjectRegistry.register class InstanceActionEvent(base.NovaPersistentObject, base.NovaObject, base.NovaObjectDictCompat): # Version 1.0: Initial version # Version 1.1: event_finish_with_failure decorated with serialize_args # Version 1.2: Add 'host' field # Version 1.3: Add create() method. # Version 1.4: Added 'details' field. VERSION = '1.4' fields = { 'id': fields.IntegerField(), 'event': fields.StringField(nullable=True), 'action_id': fields.IntegerField(nullable=True), 'start_time': fields.DateTimeField(nullable=True), 'finish_time': fields.DateTimeField(nullable=True), 'result': fields.StringField(nullable=True), 'traceback': fields.StringField(nullable=True), 'host': fields.StringField(nullable=True), 'details': fields.StringField(nullable=True) } def obj_make_compatible(self, primitive, target_version): target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 4) and 'details' in primitive: del primitive['details'] if target_version < (1, 2) and 'host' in primitive: del primitive['host'] @staticmethod def _from_db_object(context, event, db_event): for field in event.fields: event[field] = db_event[field] event._context = context event.obj_reset_changes() return event @staticmethod def pack_action_event_start(context, instance_uuid, event_name, host=None): values = {'event': event_name, 'instance_uuid': instance_uuid, 'request_id': context.request_id, 'start_time': timeutils.utcnow(), 'host': host} return values @staticmethod def pack_action_event_finish(context, instance_uuid, event_name, exc_val=None, exc_tb=None): values = {'event': event_name, 'instance_uuid': instance_uuid, 'request_id': context.request_id, 'finish_time': timeutils.utcnow()} if exc_tb is None: values['result'] = 'Success' else: values['result'] = 'Error' # Store the details using the same logic as storing an instance # fault message. if exc_val: # If we got a string for exc_val it's probably because of # the serialize_args decorator on event_finish_with_failure # so pass that as the message to exception_to_dict otherwise # the details will just the exception class name since it # cannot format the message as a NovaException. message = exc_val if isinstance(exc_val, str) else None values['details'] = compute_utils.exception_to_dict( exc_val, message=message)['message'] values['traceback'] = exc_tb return values @base.remotable_classmethod def get_by_id(cls, context, action_id, event_id): db_event = db.action_event_get_by_id(context, action_id, event_id) return cls._from_db_object(context, cls(), db_event) @base.remotable_classmethod def event_start(cls, context, instance_uuid, event_name, want_result=True, host=None): values = cls.pack_action_event_start(context, instance_uuid, event_name, host=host) db_event = db.action_event_start(context, values) if want_result: return cls._from_db_object(context, cls(), db_event) @base.serialize_args @base.remotable_classmethod def event_finish_with_failure(cls, context, instance_uuid, event_name, exc_val=None, exc_tb=None, want_result=None): values = cls.pack_action_event_finish(context, instance_uuid, event_name, exc_val=exc_val, exc_tb=exc_tb) db_event = db.action_event_finish(context, values) if want_result: return cls._from_db_object(context, cls(), db_event) @base.remotable_classmethod def event_finish(cls, context, instance_uuid, event_name, want_result=True): return cls.event_finish_with_failure(context, instance_uuid, event_name, exc_val=None, exc_tb=None, want_result=want_result) @base.remotable def finish_with_failure(self, exc_val, exc_tb): values = self.pack_action_event_finish(self._context, self.instance_uuid, self.event, exc_val=exc_val, exc_tb=exc_tb) db_event = db.action_event_finish(self._context, values) self._from_db_object(self._context, self, db_event) @base.remotable def finish(self): self.finish_with_failure(self._context, exc_val=None, exc_tb=None) # NOTE(mriedem): In most cases, the event_start() method should be used # to create new InstanceActionEvent records. This method should only be # used in specific exceptional cases like when cloning events from one cell # database to another. @base.remotable def create(self, instance_uuid, request_id): if 'id' in self: raise exception.ObjectActionError(action='create', reason='already created') updates = self.obj_get_changes() # The instance_uuid and request_id uniquely identify the "parent" # InstanceAction for this event and are used in action_event_start(). # TODO(mriedem): This could be optimized if we just didn't use # db.action_event_start and inserted the record ourselves and passed # in the action_id. updates['instance_uuid'] = instance_uuid updates['request_id'] = request_id db_event = db.action_event_start(self._context, updates) self._from_db_object(self._context, self, db_event) @base.NovaObjectRegistry.register class InstanceActionEventList(base.ObjectListBase, base.NovaObject): # Version 1.0: Initial version # Version 1.1: InstanceActionEvent <= 1.1 VERSION = '1.1' fields = { 'objects': fields.ListOfObjectsField('InstanceActionEvent'), } @base.remotable_classmethod def get_by_action(cls, context, action_id): db_events = db.action_events_get(context, action_id) return base.obj_make_list(context, cls(context), objects.InstanceActionEvent, db_events) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/objects/instance_fault.py0000664000175000017500000001010200000000000020521 0ustar00zuulzuul00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools from oslo_log import log as logging from nova.db.main import api as db from nova import exception from nova import objects from nova.objects import base from nova.objects import fields LOG = logging.getLogger(__name__) # TODO(berrange): Remove NovaObjectDictCompat @base.NovaObjectRegistry.register class InstanceFault(base.NovaPersistentObject, base.NovaObject, base.NovaObjectDictCompat): # Version 1.0: Initial version # Version 1.1: String attributes updated to support unicode # Version 1.2: Added create() VERSION = '1.2' fields = { 'id': fields.IntegerField(), 'instance_uuid': fields.UUIDField(), 'code': fields.IntegerField(), 'message': fields.StringField(nullable=True), 'details': fields.StringField(nullable=True), 'host': fields.StringField(nullable=True), } @staticmethod def _from_db_object(context, fault, db_fault): # NOTE(danms): These are identical right now for key in fault.fields: fault[key] = db_fault[key] fault._context = context fault.obj_reset_changes() return fault @base.remotable_classmethod def get_latest_for_instance(cls, context, instance_uuid): db_faults = db.instance_fault_get_by_instance_uuids(context, [instance_uuid]) if instance_uuid in db_faults and db_faults[instance_uuid]: return cls._from_db_object(context, cls(), db_faults[instance_uuid][0]) @base.remotable def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason='already created') values = { 'instance_uuid': self.instance_uuid, 'code': self.code, 'message': self.message, 'details': self.details, 'host': self.host, } db_fault = db.instance_fault_create(self._context, values) self._from_db_object(self._context, self, db_fault) self.obj_reset_changes() @base.NovaObjectRegistry.register class InstanceFaultList(base.ObjectListBase, base.NovaObject): # Version 1.0: Initial version # InstanceFault <= version 1.1 # Version 1.1: InstanceFault version 1.2 # Version 1.2: Added get_latest_by_instance_uuids() method VERSION = '1.2' fields = { 'objects': fields.ListOfObjectsField('InstanceFault'), } @base.remotable_classmethod def get_latest_by_instance_uuids(cls, context, instance_uuids): db_faultdict = db.instance_fault_get_by_instance_uuids(context, instance_uuids, latest=True) db_faultlist = itertools.chain(*db_faultdict.values()) return base.obj_make_list(context, cls(context), objects.InstanceFault, db_faultlist) @base.remotable_classmethod def get_by_instance_uuids(cls, context, instance_uuids): db_faultdict = db.instance_fault_get_by_instance_uuids(context, instance_uuids) db_faultlist = itertools.chain(*db_faultdict.values()) return base.obj_make_list(context, cls(context), objects.InstanceFault, db_faultlist) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/objects/instance_group.py0000664000175000017500000005676200000000000020570 0ustar00zuulzuul00000000000000# Copyright (c) 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_db import exception as db_exc from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import uuidutils from oslo_utils import versionutils from sqlalchemy import orm from nova.compute import utils as compute_utils from nova.db.api import api as api_db_api from nova.db.api import models as api_models from nova import exception from nova import objects from nova.objects import base from nova.objects import fields LAZY_LOAD_FIELDS = ['hosts'] LOG = logging.getLogger(__name__) def _instance_group_get_query(context, id_field=None, id=None): query = context.session.query(api_models.InstanceGroup).\ options(orm.joinedload(api_models.InstanceGroup._policies)).\ options(orm.joinedload(api_models.InstanceGroup._members)) if not context.is_admin: query = query.filter_by(project_id=context.project_id) if id and id_field: query = query.filter(id_field == id) return query def _instance_group_model_get_query(context, model_class, group_id): return context.session.query(model_class).filter_by(group_id=group_id) def _instance_group_model_add(context, model_class, items, item_models, field, group_id, append_to_models=None): models = [] already_existing = set() for db_item in item_models: already_existing.add(getattr(db_item, field)) models.append(db_item) for item in items: if item in already_existing: continue model = model_class() values = {'group_id': group_id} values[field] = item model.update(values) context.session.add(model) if append_to_models: append_to_models.append(model) models.append(model) return models def _instance_group_members_add(context, group, members): query = _instance_group_model_get_query(context, api_models.InstanceGroupMember, group.id) query = query.filter( api_models.InstanceGroupMember.instance_uuid.in_(set(members))) return _instance_group_model_add(context, api_models.InstanceGroupMember, members, query.all(), 'instance_uuid', group.id, append_to_models=group._members) def _instance_group_members_add_by_uuid(context, group_uuid, members): # NOTE(melwitt): The condition on the join limits the number of members # returned to only those we wish to check as already existing. group = context.session.query(api_models.InstanceGroup).outerjoin( api_models.InstanceGroupMember, api_models.InstanceGroupMember.instance_uuid.in_(set(members)) ).filter( api_models.InstanceGroup.uuid == group_uuid ).options(orm.contains_eager(api_models.InstanceGroup._members)).first() if not group: raise exception.InstanceGroupNotFound(group_uuid=group_uuid) return _instance_group_model_add( context, api_models.InstanceGroupMember, members, group._members, 'instance_uuid', group.id, ) # TODO(berrange): Remove NovaObjectDictCompat # TODO(mriedem): Replace NovaPersistentObject with TimestampedObject in v2.0. @base.NovaObjectRegistry.register class InstanceGroup(base.NovaPersistentObject, base.NovaObject, base.NovaObjectDictCompat): # Version 1.0: Initial version # Version 1.1: String attributes updated to support unicode # Version 1.2: Use list/dict helpers for policies, metadetails, members # Version 1.3: Make uuid a non-None real string # Version 1.4: Add add_members() # Version 1.5: Add get_hosts() # Version 1.6: Add get_by_name() # Version 1.7: Deprecate metadetails # Version 1.8: Add count_members_by_user() # Version 1.9: Add get_by_instance_uuid() # Version 1.10: Add hosts field # Version 1.11: Add policy and deprecate policies, add _rules VERSION = '1.11' fields = { 'id': fields.IntegerField(), 'user_id': fields.StringField(nullable=True), 'project_id': fields.StringField(nullable=True), 'uuid': fields.UUIDField(), 'name': fields.StringField(nullable=True), 'policies': fields.ListOfStringsField(nullable=True, read_only=True), 'members': fields.ListOfStringsField(nullable=True), 'hosts': fields.ListOfStringsField(nullable=True), 'policy': fields.StringField(nullable=True), # NOTE(danms): Use rules not _rules for general access '_rules': fields.DictOfStringsField(), } def __init__(self, *args, **kwargs): if 'rules' in kwargs: kwargs['_rules'] = kwargs.pop('rules') super(InstanceGroup, self).__init__(*args, **kwargs) @property def rules(self): if '_rules' not in self: return {} # NOTE(danms): Coerce our rules into a typed dict for convenience rules = {} if 'max_server_per_host' in self._rules: rules['max_server_per_host'] = \ int(self._rules['max_server_per_host']) return rules def obj_make_compatible(self, primitive, target_version): target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 11): # NOTE(yikun): Before 1.11, we had a policies property which is # the list of policy name, even though it was a list, there was # ever only one entry in the list. policy = primitive.pop('policy', None) if policy: primitive['policies'] = [policy] else: primitive['policies'] = [] primitive.pop('rules', None) if target_version < (1, 7): # NOTE(danms): Before 1.7, we had an always-empty # metadetails property primitive['metadetails'] = {} @staticmethod def _from_db_object(context, instance_group, db_inst): """Method to help with migration to objects. Converts a database entity to a formal object. """ # Most of the field names match right now, so be quick for field in instance_group.fields: if field in LAZY_LOAD_FIELDS: continue # This is needed to handle db models from both the api # database and the main database. In the migration to # the api database, we have removed soft-delete, so # the object fields for delete must be filled in with # default values for db models from the api database. # TODO(mriedem): Remove this when NovaPersistentObject is removed. ignore = {'deleted': False, 'deleted_at': None} if '_rules' == field: db_policy = db_inst['policy'] instance_group._rules = ( jsonutils.loads(db_policy['rules']) if db_policy and db_policy['rules'] else {}) elif field in ignore and not hasattr(db_inst, field): instance_group[field] = ignore[field] elif 'policies' == field: continue # NOTE(yikun): The obj.policies is deprecated and marked as # read_only in version 1.11, and there is no "policies" property # in InstanceGroup model anymore, so we just skip to set # "policies" and then load the "policies" when "policy" is set. elif 'policy' == field: db_policy = db_inst['policy'] if db_policy: instance_group.policy = db_policy['policy'] instance_group.policies = [instance_group.policy] else: instance_group.policy = None instance_group.policies = [] else: instance_group[field] = db_inst[field] instance_group._context = context instance_group.obj_reset_changes() return instance_group @staticmethod @api_db_api.context_manager.reader def _get_from_db_by_uuid(context, uuid): grp = _instance_group_get_query(context, id_field=api_models.InstanceGroup.uuid, id=uuid).first() if not grp: raise exception.InstanceGroupNotFound(group_uuid=uuid) return grp @staticmethod @api_db_api.context_manager.reader def _get_from_db_by_id(context, id): grp = _instance_group_get_query(context, id_field=api_models.InstanceGroup.id, id=id).first() if not grp: raise exception.InstanceGroupNotFound(group_uuid=id) return grp @staticmethod @api_db_api.context_manager.reader def _get_from_db_by_name(context, name): grp = _instance_group_get_query(context).filter_by(name=name).first() if not grp: raise exception.InstanceGroupNotFound(group_uuid=name) return grp @staticmethod @api_db_api.context_manager.reader def _get_from_db_by_instance(context, instance_uuid): grp_member = context.session.query(api_models.InstanceGroupMember).\ filter_by(instance_uuid=instance_uuid).first() if not grp_member: raise exception.InstanceGroupNotFound(group_uuid='') grp = InstanceGroup._get_from_db_by_id(context, grp_member.group_id) return grp @staticmethod @api_db_api.context_manager.writer def _save_in_db(context, group_uuid, values): grp = InstanceGroup._get_from_db_by_uuid(context, group_uuid) values_copy = copy.copy(values) members = values_copy.pop('members', None) grp.update(values_copy) if members is not None: _instance_group_members_add(context, grp, members) return grp @staticmethod @api_db_api.context_manager.writer def _create_in_db(context, values, policies=None, members=None, policy=None, rules=None): try: group = api_models.InstanceGroup() group.update(values) group.save(context.session) except db_exc.DBDuplicateEntry: raise exception.InstanceGroupIdExists(group_uuid=values['uuid']) if policies: db_policy = api_models.InstanceGroupPolicy( group_id=group['id'], policy=policies[0], rules=None) group._policies = [db_policy] group.rules = None elif policy: db_rules = jsonutils.dumps(rules or {}) db_policy = api_models.InstanceGroupPolicy( group_id=group['id'], policy=policy, rules=db_rules) group._policies = [db_policy] else: group._policies = [] if group._policies: group.save(context.session) if members: group._members = _instance_group_members_add(context, group, members) else: group._members = [] return group @staticmethod @api_db_api.context_manager.writer def _destroy_in_db(context, group_uuid): qry = _instance_group_get_query(context, id_field=api_models.InstanceGroup.uuid, id=group_uuid) if qry.count() == 0: raise exception.InstanceGroupNotFound(group_uuid=group_uuid) # Delete policies and members group_id = qry.first().id instance_models = [api_models.InstanceGroupPolicy, api_models.InstanceGroupMember] for model in instance_models: context.session.query(model).filter_by(group_id=group_id).delete() qry.delete() @staticmethod @api_db_api.context_manager.writer def _add_members_in_db(context, group_uuid, members): return _instance_group_members_add_by_uuid(context, group_uuid, members) @staticmethod @api_db_api.context_manager.writer def _remove_members_in_db(context, group_id, instance_uuids): # There is no public method provided for removing members because the # user-facing API doesn't allow removal of instance group members. We # need to be able to remove members to address quota races. context.session.query(api_models.InstanceGroupMember).\ filter_by(group_id=group_id).\ filter(api_models.InstanceGroupMember.instance_uuid. in_(set(instance_uuids))).\ delete(synchronize_session=False) @staticmethod @api_db_api.context_manager.writer def _destroy_members_bulk_in_db(context, instance_uuids): return context.session.query(api_models.InstanceGroupMember).filter( api_models.InstanceGroupMember.instance_uuid.in_(instance_uuids)).\ delete(synchronize_session=False) @classmethod def destroy_members_bulk(cls, context, instance_uuids): return cls._destroy_members_bulk_in_db(context, instance_uuids) @base.lazy_load_counter def obj_load_attr(self, attrname): # NOTE(sbauza): Only hosts could be lazy-loaded right now if attrname != 'hosts': raise exception.ObjectActionError( action='obj_load_attr', reason='unable to load %s' % attrname) LOG.debug("Lazy-loading '%(attr)s' on %(name)s uuid %(uuid)s", {'attr': attrname, 'name': self.obj_name(), 'uuid': self.uuid, }) self.hosts = self.get_hosts() self.obj_reset_changes(['hosts']) @base.remotable_classmethod def get_by_uuid(cls, context, uuid): db_group = cls._get_from_db_by_uuid(context, uuid) return cls._from_db_object(context, cls(), db_group) @base.remotable_classmethod def get_by_name(cls, context, name): db_group = cls._get_from_db_by_name(context, name) return cls._from_db_object(context, cls(), db_group) @base.remotable_classmethod def get_by_instance_uuid(cls, context, instance_uuid): db_group = cls._get_from_db_by_instance(context, instance_uuid) return cls._from_db_object(context, cls(), db_group) @classmethod def get_by_hint(cls, context, hint): if uuidutils.is_uuid_like(hint): return cls.get_by_uuid(context, hint) else: return cls.get_by_name(context, hint) @base.remotable def save(self): """Save updates to this instance group.""" updates = self.obj_get_changes() # NOTE(sbauza): We do NOT save the set of compute nodes that an # instance group is connected to in this method. Instance groups are # implicitly connected to compute nodes when the # InstanceGroup.add_members() method is called, which adds the mapping # table entries. # So, since the only way to have hosts in the updates is to set that # field explicitly, we prefer to raise an Exception so the developer # knows he has to call obj_reset_changes(['hosts']) right after setting # the field. if 'hosts' in updates: raise exception.InstanceGroupSaveException(field='hosts') # NOTE(yikun): You have to provide exactly one policy on group create, # and also there are no group update APIs, so we do NOT support # policies update. if 'policies' in updates: raise exception.InstanceGroupSaveException(field='policies') if not updates: return payload = dict(updates) payload['server_group_id'] = self.uuid db_group = self._save_in_db(self._context, self.uuid, updates) self._from_db_object(self._context, self, db_group) compute_utils.notify_about_server_group_update(self._context, "update", payload) @base.remotable def refresh(self): """Refreshes the instance group.""" current = self.__class__.get_by_uuid(self._context, self.uuid) for field in self.fields: if self.obj_attr_is_set(field) and self[field] != current[field]: self[field] = current[field] self.obj_reset_changes() @base.remotable def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason='already created') updates = self.obj_get_changes() payload = dict(updates) updates.pop('id', None) policies = updates.pop('policies', None) policy = updates.pop('policy', None) rules = updates.pop('_rules', None) members = updates.pop('members', None) if 'uuid' not in updates: self.uuid = uuidutils.generate_uuid() updates['uuid'] = self.uuid db_group = self._create_in_db(self._context, updates, policies=policies, members=members, policy=policy, rules=rules) self._from_db_object(self._context, self, db_group) payload['server_group_id'] = self.uuid compute_utils.notify_about_server_group_update(self._context, "create", payload) compute_utils.notify_about_server_group_action( context=self._context, group=self, action=fields.NotificationAction.CREATE) @base.remotable def destroy(self): payload = {'server_group_id': self.uuid} self._destroy_in_db(self._context, self.uuid) self.obj_reset_changes() compute_utils.notify_about_server_group_update(self._context, "delete", payload) compute_utils.notify_about_server_group_action( context=self._context, group=self, action=fields.NotificationAction.DELETE) @base.remotable_classmethod def add_members(cls, context, group_uuid, instance_uuids): payload = {'server_group_id': group_uuid, 'instance_uuids': instance_uuids} members = cls._add_members_in_db(context, group_uuid, instance_uuids) members = [member['instance_uuid'] for member in members] compute_utils.notify_about_server_group_update(context, "addmember", payload) compute_utils.notify_about_server_group_add_member(context, group_uuid) return list(members) @base.remotable def get_hosts(self, exclude=None): """Get a list of hosts for non-deleted instances in the group This method allows you to get a list of the hosts where instances in this group are currently running. There's also an option to exclude certain instance UUIDs from this calculation. """ filter_uuids = self.members if exclude: filter_uuids = set(filter_uuids) - set(exclude) filters = {'uuid': filter_uuids, 'deleted': False} # Pass expected_attrs=[] to avoid unnecessary joins. # TODO(mriedem): This is pretty inefficient since all we care about # are the hosts. We could optimize this with a single-purpose SQL query # like: # SELECT host FROM instances WHERE deleted=0 AND host IS NOT NULL # AND uuid IN ($filter_uuids) GROUP BY host; instances = objects.InstanceList.get_by_filters(self._context, filters=filters, expected_attrs=[]) return list(set([instance.host for instance in instances if instance.host])) @base.remotable def count_members_by_user(self, user_id): """Count the number of instances in a group belonging to a user.""" filter_uuids = self.members filters = {'uuid': filter_uuids, 'user_id': user_id, 'deleted': False} instances = objects.InstanceList.get_by_filters(self._context, filters=filters) return len(instances) @base.NovaObjectRegistry.register class InstanceGroupList(base.ObjectListBase, base.NovaObject): # Version 1.0: Initial version # InstanceGroup <= version 1.3 # Version 1.1: InstanceGroup <= version 1.4 # Version 1.2: InstanceGroup <= version 1.5 # Version 1.3: InstanceGroup <= version 1.6 # Version 1.4: InstanceGroup <= version 1.7 # Version 1.5: InstanceGroup <= version 1.8 # Version 1.6: InstanceGroup <= version 1.9 # Version 1.7: InstanceGroup <= version 1.10 # Version 1.8: Added get_counts() for quotas VERSION = '1.8' fields = { 'objects': fields.ListOfObjectsField('InstanceGroup'), } @staticmethod @api_db_api.context_manager.reader def _get_from_db(context, project_id=None): query = _instance_group_get_query(context) if project_id is not None: query = query.filter_by(project_id=project_id) return query.all() @staticmethod @api_db_api.context_manager.reader def _get_counts_from_db(context, project_id, user_id=None): query = context.session.query(api_models.InstanceGroup.id).\ filter_by(project_id=project_id) counts = {} counts['project'] = {'server_groups': query.count()} if user_id: query = query.filter_by(user_id=user_id) counts['user'] = {'server_groups': query.count()} return counts @base.remotable_classmethod def get_by_project_id(cls, context, project_id): api_db_groups = cls._get_from_db(context, project_id=project_id) return base.obj_make_list(context, cls(context), objects.InstanceGroup, api_db_groups) @base.remotable_classmethod def get_all(cls, context): api_db_groups = cls._get_from_db(context) return base.obj_make_list(context, cls(context), objects.InstanceGroup, api_db_groups) @base.remotable_classmethod def get_counts(cls, context, project_id, user_id=None): """Get the counts of InstanceGroup objects in the database. :param context: The request context for database access :param project_id: The project_id to count across :param user_id: The user_id to count across :returns: A dict containing the project-scoped counts and user-scoped counts if user_id is specified. For example: {'project': {'server_groups': }, 'user': {'server_groups': }} """ return cls._get_counts_from_db(context, project_id, user_id=user_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/objects/instance_info_cache.py0000664000175000017500000001070200000000000021472 0ustar00zuulzuul00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db import exception as db_exc from oslo_log import log as logging from nova.db.main import api as db from nova import exception from nova.objects import base from nova.objects import fields LOG = logging.getLogger(__name__) @base.NovaObjectRegistry.register class InstanceInfoCache(base.NovaPersistentObject, base.NovaObject): # Version 1.0: Initial version # Version 1.1: Converted network_info to store the model. # Version 1.2: Added new() and update_cells kwarg to save(). # Version 1.3: Added delete() # Version 1.4: String attributes updated to support unicode # Version 1.5: Actually set the deleted, created_at, updated_at, and # deleted_at attributes VERSION = '1.5' fields = { 'instance_uuid': fields.UUIDField(), 'network_info': fields.NetworkModelField(nullable=True), } @staticmethod def _from_db_object(context, info_cache, db_obj): for field in info_cache.fields: setattr(info_cache, field, db_obj[field]) info_cache.obj_reset_changes() info_cache._context = context return info_cache @classmethod def new(cls, context, instance_uuid): """Create an InfoCache object that can be used to create the DB entry for the first time. When save()ing this object, the info_cache_update() DB call will properly handle creating it if it doesn't exist already. """ info_cache = cls() info_cache.instance_uuid = instance_uuid info_cache.network_info = None info_cache._context = context # Leave the fields dirty return info_cache @base.remotable_classmethod def get_by_instance_uuid(cls, context, instance_uuid): db_obj = db.instance_info_cache_get(context, instance_uuid) if not db_obj: raise exception.InstanceInfoCacheNotFound( instance_uuid=instance_uuid) return cls._from_db_object(context, cls(context), db_obj) # TODO(stephenfin): Remove 'update_cells' in version 2.0 @base.remotable def save(self, update_cells=True): if 'network_info' in self.obj_what_changed(): nw_info_json = self.fields['network_info'].to_primitive( self, 'network_info', self.network_info) inst_uuid = self.instance_uuid try: rv = db.instance_info_cache_update( self._context, inst_uuid, {'network_info': nw_info_json}) except db_exc.DBReferenceError as exp: if exp.key != 'instance_uuid': raise # NOTE(melwitt): It is possible for us to fail here with a # foreign key constraint violation on instance_uuid when we # attempt to save the instance network info cache after # receiving a network-changed external event from neutron # during a cross-cell migration. This means the instance record # is not found in this cell database and we can raise # InstanceNotFound to signal that in a way that callers know # how to handle. raise exception.InstanceNotFound(instance_id=inst_uuid) self._from_db_object(self._context, self, rv) self.obj_reset_changes() @base.remotable def delete(self): db.instance_info_cache_delete(self._context, self.instance_uuid) @base.remotable def refresh(self): current = self.__class__.get_by_instance_uuid(self._context, self.instance_uuid) current._context = None for field in self.fields: if (self.obj_attr_is_set(field) and getattr(self, field) != getattr(current, field)): setattr(self, field, getattr(current, field)) self.obj_reset_changes() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/objects/instance_mapping.py0000664000175000017500000004773200000000000021064 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections from oslo_log import log as logging from oslo_utils import versionutils from sqlalchemy import orm from sqlalchemy.orm import exc as orm_exc from sqlalchemy import sql from sqlalchemy.sql import func from nova import context as nova_context from nova.db.api import api as api_db_api from nova.db.api import models as api_models from nova import exception from nova.i18n import _ from nova import objects from nova.objects import base from nova.objects import cell_mapping from nova.objects import fields from nova.objects import virtual_interface LOG = logging.getLogger(__name__) @base.NovaObjectRegistry.register class InstanceMapping(base.NovaTimestampObject, base.NovaObject): # Version 1.0: Initial version # Version 1.1: Add queued_for_delete # Version 1.2: Add user_id VERSION = '1.2' fields = { 'id': fields.IntegerField(read_only=True), 'instance_uuid': fields.UUIDField(), 'cell_mapping': fields.ObjectField('CellMapping', nullable=True), 'project_id': fields.StringField(), 'user_id': fields.StringField(), 'queued_for_delete': fields.BooleanField(default=False), } def obj_make_compatible(self, primitive, target_version): super(InstanceMapping, self).obj_make_compatible(primitive, target_version) target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 2) and 'user_id' in primitive: del primitive['user_id'] if target_version < (1, 1): if 'queued_for_delete' in primitive: del primitive['queued_for_delete'] @base.lazy_load_counter def obj_load_attr(self, attrname): if attrname == 'user_id': LOG.error('The unset user_id attribute of an unmigrated instance ' 'mapping should not be accessed.') raise exception.ObjectActionError( action='obj_load_attr', reason=_('attribute user_id is not lazy-loadable')) super(InstanceMapping, self).obj_load_attr(attrname) def _update_with_cell_id(self, updates): cell_mapping_obj = updates.pop("cell_mapping", None) if cell_mapping_obj: updates["cell_id"] = cell_mapping_obj.id return updates @staticmethod def _from_db_object(context, instance_mapping, db_instance_mapping): for key in instance_mapping.fields: db_value = db_instance_mapping.get(key) if key == 'cell_mapping': # cell_mapping can be None indicating that the instance has # not been scheduled yet. if db_value: db_value = cell_mapping.CellMapping._from_db_object( context, cell_mapping.CellMapping(), db_value) if key == 'user_id' and db_value is None: # NOTE(melwitt): If user_id is NULL, we can't set the field # because it's non-nullable. We don't plan for any code to read # the user_id field at this time, so skip setting it. continue setattr(instance_mapping, key, db_value) instance_mapping.obj_reset_changes() instance_mapping._context = context return instance_mapping @staticmethod @api_db_api.context_manager.reader def _get_by_instance_uuid_from_db(context, instance_uuid): db_mapping = context.session.query(api_models.InstanceMapping)\ .options(orm.joinedload(api_models.InstanceMapping.cell_mapping))\ .filter(api_models.InstanceMapping.instance_uuid == instance_uuid)\ .first() if not db_mapping: raise exception.InstanceMappingNotFound(uuid=instance_uuid) return db_mapping @base.remotable_classmethod def get_by_instance_uuid(cls, context, instance_uuid): db_mapping = cls._get_by_instance_uuid_from_db(context, instance_uuid) return cls._from_db_object(context, cls(), db_mapping) @staticmethod @api_db_api.context_manager.writer def _create_in_db(context, updates): db_mapping = api_models.InstanceMapping() db_mapping.update(updates) db_mapping.save(context.session) # NOTE: This is done because a later access will trigger a lazy load # outside of the db session so it will fail. We don't lazy load # cell_mapping on the object later because we never need an # InstanceMapping without the CellMapping. db_mapping.cell_mapping return db_mapping @base.remotable def create(self): changes = self.obj_get_changes() changes = self._update_with_cell_id(changes) if 'queued_for_delete' not in changes: # NOTE(danms): If we are creating a mapping, it should be # not queued_for_delete (unless we are being asked to # create one in deleted state for some reason). changes['queued_for_delete'] = False db_mapping = self._create_in_db(self._context, changes) self._from_db_object(self._context, self, db_mapping) @staticmethod @api_db_api.context_manager.writer def _save_in_db(context, instance_uuid, updates): db_mapping = context.session.query( api_models.InstanceMapping).filter_by( instance_uuid=instance_uuid).first() if not db_mapping: raise exception.InstanceMappingNotFound(uuid=instance_uuid) db_mapping.update(updates) # NOTE: This is done because a later access will trigger a lazy load # outside of the db session so it will fail. We don't lazy load # cell_mapping on the object later because we never need an # InstanceMapping without the CellMapping. db_mapping.cell_mapping context.session.add(db_mapping) return db_mapping @base.remotable def save(self): changes = self.obj_get_changes() changes = self._update_with_cell_id(changes) try: db_mapping = self._save_in_db(self._context, self.instance_uuid, changes) except orm_exc.StaleDataError: # NOTE(melwitt): If the instance mapping has been deleted out from # under us by conductor (delete requested while booting), we will # encounter a StaleDataError after we retrieved the row and try to # update it after it's been deleted. We can treat this like an # instance mapping not found and allow the caller to handle it. raise exception.InstanceMappingNotFound(uuid=self.instance_uuid) self._from_db_object(self._context, self, db_mapping) self.obj_reset_changes() @staticmethod @api_db_api.context_manager.writer def _destroy_in_db(context, instance_uuid): result = context.session.query(api_models.InstanceMapping).filter_by( instance_uuid=instance_uuid).delete() if not result: raise exception.InstanceMappingNotFound(uuid=instance_uuid) @base.remotable def destroy(self): self._destroy_in_db(self._context, self.instance_uuid) @api_db_api.context_manager.writer def populate_queued_for_delete(context, max_count): cells = objects.CellMappingList.get_all(context) processed = 0 for cell in cells: ims = ( # Get a direct list of instance mappings for this cell which # have not yet received a defined value decision for # queued_for_delete context.session.query(api_models.InstanceMapping) .filter( api_models.InstanceMapping.queued_for_delete == None) # noqa .filter(api_models.InstanceMapping.cell_id == cell.id) .limit(max_count).all()) ims_by_inst = {im.instance_uuid: im for im in ims} if not ims_by_inst: # If there is nothing from this cell to migrate, move on. continue with nova_context.target_cell(context, cell) as cctxt: filters = {'uuid': list(ims_by_inst.keys()), 'deleted': True, 'soft_deleted': True} instances = objects.InstanceList.get_by_filters( cctxt, filters, expected_attrs=[]) # Walk through every deleted instance that has a mapping needing # to be updated and update it for instance in instances: im = ims_by_inst.pop(instance.uuid) im.queued_for_delete = True context.session.add(im) processed += 1 # Any instances we did not just hit must be not-deleted, so # update the remaining mappings for non_deleted_im in ims_by_inst.values(): non_deleted_im.queued_for_delete = False context.session.add(non_deleted_im) processed += 1 max_count -= len(ims) if max_count <= 0: break return processed, processed @api_db_api.context_manager.writer def populate_user_id(context, max_count): cells = objects.CellMappingList.get_all(context) cms_by_id = {cell.id: cell for cell in cells} done = 0 unmigratable_ims = False ims = ( # Get a list of instance mappings which do not have user_id populated. # We need to include records with queued_for_delete=True because they # include SOFT_DELETED instances, which could be restored at any time # in the future. If we don't migrate SOFT_DELETED instances now, we # wouldn't be able to retire this migration code later. Also filter # out the marker instance created by the virtual interface migration. context.session.query(api_models.InstanceMapping) .filter_by(user_id=None) .filter(api_models.InstanceMapping.project_id != virtual_interface.FAKE_UUID) .limit(max_count).all()) found = len(ims) ims_by_inst_uuid = {} inst_uuids_by_cell_id = collections.defaultdict(set) for im in ims: ims_by_inst_uuid[im.instance_uuid] = im inst_uuids_by_cell_id[im.cell_id].add(im.instance_uuid) for cell_id, inst_uuids in inst_uuids_by_cell_id.items(): # We cannot migrate instance mappings that don't have a cell yet. if cell_id is None: unmigratable_ims = True continue with nova_context.target_cell(context, cms_by_id[cell_id]) as cctxt: # We need to migrate SOFT_DELETED instances because they could be # restored at any time in the future, preventing us from being able # to remove any other interim online data migration code we have, # if we don't migrate them here. # NOTE: it's not possible to query only for SOFT_DELETED instances. # We must query for both deleted and SOFT_DELETED instances. filters = {'uuid': inst_uuids} try: instances = objects.InstanceList.get_by_filters( cctxt, filters, expected_attrs=[]) except Exception as exp: LOG.warning('Encountered exception: "%s" while querying ' 'instances from cell: %s. Continuing to the next ' 'cell.', str(exp), cms_by_id[cell_id].identity) continue # Walk through every instance that has a mapping needing to be updated # and update it. for instance in instances: im = ims_by_inst_uuid.pop(instance.uuid) im.user_id = instance.user_id context.session.add(im) done += 1 if ims_by_inst_uuid: unmigratable_ims = True if done >= max_count: break if unmigratable_ims: LOG.warning('Some instance mappings were not migratable. This may ' 'be transient due to in-flight instance builds, or could ' 'be due to stale data that will be cleaned up after ' 'running "nova-manage db archive_deleted_rows --purge".') return found, done @base.NovaObjectRegistry.register class InstanceMappingList(base.ObjectListBase, base.NovaObject): # Version 1.0: Initial version # Version 1.1: Added get_by_cell_id method. # Version 1.2: Added get_by_instance_uuids method # Version 1.3: Added get_counts() VERSION = '1.3' fields = { 'objects': fields.ListOfObjectsField('InstanceMapping'), } @staticmethod @api_db_api.context_manager.reader def _get_by_project_id_from_db(context, project_id): return context.session.query(api_models.InstanceMapping)\ .options(orm.joinedload(api_models.InstanceMapping.cell_mapping))\ .filter(api_models.InstanceMapping.project_id == project_id).all() @base.remotable_classmethod def get_by_project_id(cls, context, project_id): db_mappings = cls._get_by_project_id_from_db(context, project_id) return base.obj_make_list(context, cls(), objects.InstanceMapping, db_mappings) @staticmethod @api_db_api.context_manager.reader def _get_by_cell_id_from_db(context, cell_id): return context.session.query(api_models.InstanceMapping)\ .options(orm.joinedload(api_models.InstanceMapping.cell_mapping))\ .filter(api_models.InstanceMapping.cell_id == cell_id).all() @base.remotable_classmethod def get_by_cell_id(cls, context, cell_id): db_mappings = cls._get_by_cell_id_from_db(context, cell_id) return base.obj_make_list(context, cls(), objects.InstanceMapping, db_mappings) @staticmethod @api_db_api.context_manager.reader def _get_by_instance_uuids_from_db(context, uuids): return context.session.query(api_models.InstanceMapping)\ .options(orm.joinedload(api_models.InstanceMapping.cell_mapping))\ .filter(api_models.InstanceMapping.instance_uuid.in_(uuids))\ .all() @base.remotable_classmethod def get_by_instance_uuids(cls, context, uuids): db_mappings = cls._get_by_instance_uuids_from_db(context, uuids) return base.obj_make_list(context, cls(), objects.InstanceMapping, db_mappings) @staticmethod @api_db_api.context_manager.writer def _destroy_bulk_in_db(context, instance_uuids): return context.session.query(api_models.InstanceMapping).filter( api_models.InstanceMapping.instance_uuid.in_(instance_uuids)).\ delete(synchronize_session=False) @classmethod def destroy_bulk(cls, context, instance_uuids): return cls._destroy_bulk_in_db(context, instance_uuids) @staticmethod @api_db_api.context_manager.reader def _get_not_deleted_by_cell_and_project_from_db(context, cell_uuid, project_id, limit): query = context.session.query(api_models.InstanceMapping) if project_id is not None: # Note that the project_id can be None in case # instances are being listed for the all-tenants case. query = query.filter_by(project_id=project_id) # Both the values NULL (for cases when the online data migration for # queued_for_delete was not run) and False (cases when the online # data migration for queued_for_delete was run) are assumed to mean # that the instance is not queued for deletion. query = query.filter( sql.or_( api_models.InstanceMapping.queued_for_delete == sql.false(), api_models.InstanceMapping.queued_for_delete.is_(None) ) ).join( api_models.InstanceMapping.cell_mapping ).options( orm.joinedload(api_models.InstanceMapping.cell_mapping) ).filter(api_models.CellMapping.uuid == cell_uuid) if limit is not None: query = query.limit(limit) return query.all() @classmethod def get_not_deleted_by_cell_and_project(cls, context, cell_uuid, project_id, limit=None): """Return a limit restricted list of InstanceMapping objects which are mapped to the specified cell_uuid, belong to the specified project_id and are not queued for deletion (note that unlike the other InstanceMappingList query methods which return all mappings irrespective of whether they are queued for deletion this method explicitly queries only for those mappings that are *not* queued for deletion as is evident from the naming of the method). """ db_mappings = cls._get_not_deleted_by_cell_and_project_from_db( context, cell_uuid, project_id, limit) return base.obj_make_list(context, cls(), objects.InstanceMapping, db_mappings) @staticmethod @api_db_api.context_manager.reader def _get_counts_in_db(context, project_id, user_id=None): project_query = context.session.query( func.count(api_models.InstanceMapping.id)).\ filter_by(queued_for_delete=False).\ filter_by(project_id=project_id) project_result = project_query.scalar() counts = {'project': {'instances': project_result}} if user_id: user_result = project_query.filter_by(user_id=user_id).scalar() counts['user'] = {'instances': user_result} return counts @base.remotable_classmethod def get_counts(cls, context, project_id, user_id=None): """Get the counts of InstanceMapping objects in the database. The count is used to represent the count of instances for the purpose of counting quota usage. Instances that are queued_for_deleted=True are not included in the count (deleted and SOFT_DELETED instances). Instances that are queued_for_deleted=None are not included in the count because we are not certain about whether or not they are deleted. :param context: The request context for database access :param project_id: The project_id to count across :param user_id: The user_id to count across :returns: A dict containing the project-scoped counts and user-scoped counts if user_id is specified. For example: {'project': {'instances': }, 'user': {'instances': }} """ return cls._get_counts_in_db(context, project_id, user_id=user_id) @staticmethod @api_db_api.context_manager.reader def _get_count_by_uuids_and_user_in_db(context, uuids, user_id): query = (context.session.query( func.count(api_models.InstanceMapping.id)) .filter(api_models.InstanceMapping.instance_uuid.in_(uuids)) .filter_by(queued_for_delete=False) .filter_by(user_id=user_id)) return query.scalar() @classmethod def get_count_by_uuids_and_user(cls, context, uuids, user_id): """Get the count of InstanceMapping objects by UUIDs and user_id. The count is used to represent the count of server group members belonging to a particular user, for the purpose of counting quota usage. Instances that are queued_for_deleted=True are not included in the count (deleted and SOFT_DELETED instances). :param uuids: List of instance UUIDs on which to filter :param user_id: The user_id on which to filter :returns: An integer for the count """ return cls._get_count_by_uuids_and_user_in_db(context, uuids, user_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/objects/instance_numa.py0000664000175000017500000003767500000000000020376 0ustar00zuulzuul00000000000000# Copyright 2014 Red Hat Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import versionutils from nova.db.main import api as db from nova import exception from nova.objects import base from nova.objects import fields as obj_fields from nova.virt import hardware LOG = logging.getLogger(__name__) # TODO(berrange): Remove NovaObjectDictCompat @base.NovaObjectRegistry.register class InstanceNUMACell(base.NovaEphemeralObject, base.NovaObjectDictCompat): # Version 1.0: Initial version # Version 1.1: Add pagesize field # Version 1.2: Add cpu_pinning_raw and topology fields # Version 1.3: Add cpu_policy and cpu_thread_policy fields # Version 1.4: Add cpuset_reserved field # Version 1.5: Add pcpuset field # Version 1.6: Add 'mixed' to cpu_policy field VERSION = '1.6' def obj_make_compatible(self, primitive, target_version): super(InstanceNUMACell, self).obj_make_compatible(primitive, target_version) target_version = versionutils.convert_version_to_tuple(target_version) # Instance with a 'mixed' CPU policy could not provide a backward # compatibility. if target_version < (1, 6): base.raise_on_too_new_values( target_version, primitive, 'cpu_policy', (obj_fields.CPUAllocationPolicy.MIXED,)) # NOTE(huaqiang): Since version 1.5, 'cpuset' is modified to track the # unpinned CPUs only, with pinned CPUs tracked via 'pcpuset' instead. # For a backward compatibility, move the 'dedicated' instance CPU list # from 'pcpuset' to 'cpuset'. if target_version < (1, 5): if (primitive['cpu_policy'] == obj_fields.CPUAllocationPolicy.DEDICATED): primitive['cpuset'] = primitive['pcpuset'] primitive.pop('pcpuset', None) LOG.warning( f'Downgrading InstanceNUMACell to version {target_version} ' f'may cause the loss of pinned CPUs if mixing different ' f'verisons of nova on different hosts. This should not ' f'happen on any supported version after Victoria.') if target_version < (1, 4): primitive.pop('cpuset_reserved', None) if target_version < (1, 3): primitive.pop('cpu_policy', None) primitive.pop('cpu_thread_policy', None) fields = { 'id': obj_fields.IntegerField(), 'cpuset': obj_fields.SetOfIntegersField(), 'pcpuset': obj_fields.SetOfIntegersField(), # These physical CPUs are reserved for use by the hypervisor 'cpuset_reserved': obj_fields.SetOfIntegersField(nullable=True, default=None), 'memory': obj_fields.IntegerField(), 'pagesize': obj_fields.IntegerField(nullable=True, default=None), # TODO(sean-k-mooney): This is no longer used and should be # removed in v2 'cpu_topology': obj_fields.ObjectField('VirtCPUTopology', nullable=True), 'cpu_pinning_raw': obj_fields.DictOfIntegersField(nullable=True, default=None), 'cpu_policy': obj_fields.CPUAllocationPolicyField(nullable=True, default=None), 'cpu_thread_policy': obj_fields.CPUThreadAllocationPolicyField( nullable=True, default=None), } cpu_pinning = obj_fields.DictProxyField('cpu_pinning_raw') def __len__(self): return len(self.total_cpus) @property def total_cpus(self): return self.cpuset | self.pcpuset @property def siblings(self): cpu_list = sorted(list(self.total_cpus)) threads = 0 if ('cpu_topology' in self) and self.cpu_topology: threads = self.cpu_topology.threads if threads == 1: threads = 0 return list(map(set, zip(*[iter(cpu_list)] * threads))) def pin(self, vcpu, pcpu): if vcpu not in self.pcpuset: return pinning_dict = self.cpu_pinning or {} pinning_dict[vcpu] = pcpu self.cpu_pinning = pinning_dict def pin_vcpus(self, *cpu_pairs): for vcpu, pcpu in cpu_pairs: self.pin(vcpu, pcpu) def clear_host_pinning(self): """Clear any data related to how this cell is pinned to the host. Needed for aborting claims as we do not want to keep stale data around. """ self.id = -1 self.cpu_pinning = {} return self # TODO(berrange): Remove NovaObjectDictCompat @base.NovaObjectRegistry.register class InstanceNUMATopology(base.NovaObject, base.NovaObjectDictCompat): # Version 1.0: Initial version # Version 1.1: Takes into account pagesize # Version 1.2: InstanceNUMACell 1.2 # Version 1.3: Add emulator threads policy VERSION = '1.3' def obj_make_compatible(self, primitive, target_version): super(InstanceNUMATopology, self).obj_make_compatible(primitive, target_version) target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 3): primitive.pop('emulator_threads_policy', None) fields = { # NOTE(danms): The 'id' field is no longer used and should be # removed in the future when convenient 'id': obj_fields.IntegerField(), 'instance_uuid': obj_fields.UUIDField(), 'cells': obj_fields.ListOfObjectsField('InstanceNUMACell'), 'emulator_threads_policy': ( obj_fields.CPUEmulatorThreadsPolicyField(nullable=True)), } @classmethod def obj_from_db_obj(cls, context, instance_uuid, db_obj): primitive = jsonutils.loads(db_obj) if 'nova_object.name' in primitive: obj = cls.obj_from_primitive(primitive) updated = cls._migrate_legacy_dedicated_instance_cpuset(obj) if updated: cls._save_migrated_cpuset_to_instance_extra( context, obj, instance_uuid) else: obj = cls._migrate_legacy_object(context, instance_uuid, primitive) return obj # TODO(huaqiang): Remove after Wallaby once we are sure these objects have # been loaded at least once. @classmethod def _migrate_legacy_dedicated_instance_cpuset(cls, obj): # NOTE(huaqiang): We may meet some topology object with the old version # 'InstanceNUMACell' cells, in that case, the 'dedicated' CPU is kept # in 'InstanceNUMACell.cpuset' field, but it should be kept in # 'InstanceNUMACell.pcpuset' field since Victoria. Making an upgrade # here but letting the caller persist the result if needed as we # don't know which table the InstanceNUMACell is coming from. It can # come from instance_extra or request_spec too. update_db = False for cell in obj.cells: version = versionutils.convert_version_to_tuple(cell.VERSION) if version < (1, 4): LOG.warning( "InstanceNUMACell %s with version %s for instance %s has " "too old version in the DB, don't know how to update, " "ignoring.", cell, cell.VERSION, obj.instance_uuid) continue if (version >= (1, 5) and cell.cpu_policy == obj_fields.CPUAllocationPolicy.DEDICATED and (cell.cpuset or not cell.pcpuset) ): LOG.warning( "InstanceNUMACell %s with version %s is inconsistent as " "the version is 1.5 or greater, cpu_policy is dedicated, " "but cpuset is not empty or pcpuset is empty.", cell, cell.VERSION) continue # NOTE(gibi): The data migration between 1.4. and 1.5 populates the # pcpuset field that is new in version 1.5. However below we update # the object version to 1.6 directly. This is intentional. The # version 1.6 introduced a new possible value 'mixed' for the # cpu_policy field. As that is a forward compatible change we don't # have a specific data migration for it. But we also don't have an # automated way to update old object versions from 1.5 to 1.6. So # we do it here just to avoid inconsistency between data and # version in the DB. if version < (1, 6): if cell.cpu_policy == obj_fields.CPUAllocationPolicy.DEDICATED: if "pcpuset" not in cell or not cell.pcpuset: # this cell was never migrated to 1.6, migrate it. cell.pcpuset = cell.cpuset cell.cpuset = set() cell.VERSION = '1.6' update_db = True else: # This data was already migrated to 1.6 format but the # version string wasn't updated to 1.6. This happened # before the fix # https://bugs.launchpad.net/nova/+bug/2097360 # Only update the version string. cell.VERSION = '1.6' update_db = True elif cell.cpu_policy in ( None, obj_fields.CPUAllocationPolicy.SHARED): # no data migration needed just add the new field and # stamp the new version in the DB cell.pcpuset = set() cell.VERSION = '1.6' update_db = True else: # obj_fields.CPUAllocationPolicy.MIXED # This means the cell data already got updated to the 1.6 # content as MIXED only supported with 1.6 but the version # was not updated to 1.6. # We should not do the data migration as that would trample # the pcpuset field. Just stamp the 1.6 version in the DB # and hope for the best. LOG.warning( "InstanceNUMACell %s with version %s for instance %s " "has older than 1.6 version in the DB but using the " "1.6 feature CPUAllocationPolicy.MIXED. So nova " "assumes that the data is in 1.6 format and only the " "version string is old. Correcting the version string " "in the DB.", cell, cell.VERSION, obj.instance_uuid) cell.VERSION = '1.6' update_db = True # When the next ovo version 1.7 is added it needs to be handed # here to do any migration if needed and to ensure the version in # the DB is stamped to 1.7 return update_db # TODO(huaqiang): Remove after Yoga once we are sure these objects have # been loaded at least once. @classmethod def _save_migrated_cpuset_to_instance_extra( cls, context, obj, instance_uuid ): db_obj = jsonutils.dumps(obj.obj_to_primitive()) values = { 'numa_topology': db_obj, } db.instance_extra_update_by_uuid( context, instance_uuid, values) # TODO(stephenfin): Remove in X or later, once this has bedded in @classmethod def _migrate_legacy_object(cls, context, instance_uuid, primitive): """Convert a pre-Liberty object to a real o.vo. Handle an unversioned object created prior to Liberty, by transforming to a versioned object and saving back the serialized version of this. :param context: RequestContext :param instance_uuid: The UUID of the instance this topology is associated with. :param primitive: A serialized representation of the legacy object. :returns: A serialized representation of the updated object. """ obj = cls( instance_uuid=instance_uuid, cells=[ InstanceNUMACell( id=cell.get('id'), cpuset=hardware.parse_cpu_spec(cell.get('cpus', '')), pcpuset=set(), memory=cell.get('mem', {}).get('total', 0), pagesize=cell.get('pagesize'), ) for cell in primitive.get('cells', []) ], ) db_obj = jsonutils.dumps(obj.obj_to_primitive()) values = { 'numa_topology': db_obj, } db.instance_extra_update_by_uuid(context, instance_uuid, values) return obj # TODO(ndipanov) Remove this method on the major version bump to 2.0 @base.remotable def create(self): values = {'numa_topology': self._to_json()} db.instance_extra_update_by_uuid(self._context, self.instance_uuid, values) self.obj_reset_changes() @base.remotable_classmethod def get_by_instance_uuid(cls, context, instance_uuid): db_extra = db.instance_extra_get_by_instance_uuid( context, instance_uuid, columns=['numa_topology']) if not db_extra: raise exception.NumaTopologyNotFound(instance_uuid=instance_uuid) if db_extra['numa_topology'] is None: return None return cls.obj_from_db_obj( context, instance_uuid, db_extra['numa_topology']) def _to_json(self): return jsonutils.dumps(self.obj_to_primitive()) def __len__(self): """Defined so that boolean testing works the same as for lists.""" return len(self.cells) # TODO(stephenfin): We should add a real 'cpu_policy' field on this object # and deprecate the one found on the cell @property def cpu_policy(self): cpu_policy = set(cell.cpu_policy for cell in self.cells) if len(cpu_policy) > 1: # NOTE(stephenfin): This should never happen in real life; it's to # prevent programmer error. raise exception.InternalError( 'Instance NUMA cells must have the same CPU policy.' ) return cpu_policy.pop() @property def cpu_pinning(self): """Return a set of all host CPUs this NUMATopology is pinned to.""" return set(itertools.chain.from_iterable([ cell.cpu_pinning.values() for cell in self.cells if cell.cpu_pinning])) @property def cpuset_reserved(self): return set(itertools.chain.from_iterable([ cell.cpuset_reserved for cell in self.cells if cell.cpuset_reserved])) def clear_host_pinning(self): """Clear any data related to how instance is pinned to the host. Needed for aborting claims as we do not want to keep stale data around. """ for cell in self.cells: cell.clear_host_pinning() return self @property def emulator_threads_isolated(self): """Determines whether emulator threads should be isolated""" return (self.obj_attr_is_set('emulator_threads_policy') and (self.emulator_threads_policy == obj_fields.CPUEmulatorThreadsPolicy.ISOLATE)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/objects/instance_pci_requests.py0000664000175000017500000001521500000000000022126 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils from oslo_utils import versionutils from nova.db.main import api as db from nova.objects import base from nova.objects import fields @base.NovaObjectRegistry.register class InstancePCIRequest(base.NovaObject): # Version 1.0: Initial version # Version 1.1: Added request_id field # Version 1.2: Added numa_policy field # Version 1.3: Added requester_id field # Version 1.4: Added 'socket' to numa_policy field VERSION = '1.4' # Possible sources for a PCI request: # FLAVOR_ALIAS : Request originated from a flavor alias. # NEUTRON_PORT : Request originated from a neutron port. FLAVOR_ALIAS = 0 NEUTRON_PORT = 1 fields = { 'count': fields.IntegerField(), 'spec': fields.ListOfDictOfNullableStringsField(), 'alias_name': fields.StringField(nullable=True), # Note(moshele): is_new is deprecated and should be removed # on major version bump 'is_new': fields.BooleanField(default=False), 'request_id': fields.UUIDField(nullable=True), 'requester_id': fields.StringField(nullable=True), 'numa_policy': fields.PCINUMAAffinityPolicyField(nullable=True), } @property def source(self): # PCI requests originate from two sources: instance flavor alias and # neutron SR-IOV ports. # SR-IOV ports pci_request don't have an alias_name. return (InstancePCIRequest.NEUTRON_PORT if self.alias_name is None else InstancePCIRequest.FLAVOR_ALIAS) def obj_load_attr(self, attr): setattr(self, attr, None) def obj_make_compatible(self, primitive, target_version): super(InstancePCIRequest, self).obj_make_compatible(primitive, target_version) target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 3) and 'requester_id' in primitive: del primitive['requester_id'] if target_version < (1, 2) and 'numa_policy' in primitive: del primitive['numa_policy'] if target_version < (1, 1) and 'request_id' in primitive: del primitive['request_id'] def is_live_migratable(self): return ( "spec" in self and self.spec is not None and all( spec.get("live_migratable") == "true" for spec in self.spec ) ) @base.NovaObjectRegistry.register class InstancePCIRequests(base.NovaObject): # Version 1.0: Initial version # Version 1.1: InstancePCIRequest 1.1 VERSION = '1.1' fields = { 'instance_uuid': fields.UUIDField(), 'requests': fields.ListOfObjectsField('InstancePCIRequest'), } @classmethod def obj_from_db(cls, context, instance_uuid, db_requests): self = cls(context=context, requests=[], instance_uuid=instance_uuid) if db_requests is not None: requests = jsonutils.loads(db_requests) else: requests = [] for request in requests: # Note(moshele): is_new is deprecated and therefore we load it # with default value of False request_obj = InstancePCIRequest( count=request['count'], spec=request['spec'], alias_name=request['alias_name'], is_new=False, numa_policy=request.get('numa_policy', fields.PCINUMAAffinityPolicy.LEGACY), request_id=request['request_id'], requester_id=request.get('requester_id')) request_obj.obj_reset_changes() self.requests.append(request_obj) self.obj_reset_changes() return self @base.remotable_classmethod def get_by_instance_uuid(cls, context, instance_uuid): db_pci_requests = db.instance_extra_get_by_instance_uuid( context, instance_uuid, columns=['pci_requests']) if db_pci_requests is not None: db_pci_requests = db_pci_requests['pci_requests'] return cls.obj_from_db(context, instance_uuid, db_pci_requests) @staticmethod def _load_legacy_requests(sysmeta_value, is_new=False): if sysmeta_value is None: return [] requests = [] db_requests = jsonutils.loads(sysmeta_value) for db_request in db_requests: request = InstancePCIRequest( count=db_request['count'], spec=db_request['spec'], alias_name=db_request['alias_name'], is_new=is_new) request.obj_reset_changes() requests.append(request) return requests @classmethod def get_by_instance(cls, context, instance): # NOTE (baoli): not all callers are passing instance as object yet. # Therefore, use the dict syntax in this routine if 'pci_requests' in instance['system_metadata']: # NOTE(danms): This instance hasn't been converted to use # instance_extra yet, so extract the data from sysmeta sysmeta = instance['system_metadata'] _requests = ( cls._load_legacy_requests(sysmeta['pci_requests']) + cls._load_legacy_requests(sysmeta.get('new_pci_requests'), is_new=True)) requests = cls(instance_uuid=instance['uuid'], requests=_requests) requests.obj_reset_changes() return requests else: return cls.get_by_instance_uuid(context, instance['uuid']) def to_json(self): blob = [{'count': x.count, 'spec': x.spec, 'alias_name': x.alias_name, 'is_new': x.is_new, 'numa_policy': x.numa_policy, 'request_id': x.request_id, 'requester_id': x.requester_id} for x in self.requests] return jsonutils.dumps(blob) def neutron_requests(self): return all( [ req for req in self.requests if req.source == InstancePCIRequest.NEUTRON_PORT ] ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/objects/keypair.py0000664000175000017500000001555100000000000017203 0ustar00zuulzuul00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db import exception as db_exc from oslo_db.sqlalchemy import utils as sqlalchemyutils from oslo_log import log as logging from oslo_utils import versionutils from nova.db.api import api as api_db_api from nova.db.api import models as api_models from nova import exception from nova import objects from nova.objects import base from nova.objects import fields KEYPAIR_TYPE_SSH = 'ssh' KEYPAIR_TYPE_X509 = 'x509' LOG = logging.getLogger(__name__) @api_db_api.context_manager.reader def _get_from_db(context, user_id, name=None, limit=None, marker=None): query = context.session.query(api_models.KeyPair).\ filter(api_models.KeyPair.user_id == user_id) if name is not None: db_keypair = query.filter(api_models.KeyPair.name == name).\ first() if not db_keypair: raise exception.KeypairNotFound(user_id=user_id, name=name) return db_keypair marker_row = None if marker is not None: marker_row = context.session.query(api_models.KeyPair).\ filter(api_models.KeyPair.name == marker).\ filter(api_models.KeyPair.user_id == user_id).first() if not marker_row: raise exception.MarkerNotFound(marker=marker) query = sqlalchemyutils.paginate_query( query, api_models.KeyPair, limit, ['name'], marker=marker_row) return query.all() @api_db_api.context_manager.reader def _get_count_from_db(context, user_id): return context.session.query(api_models.KeyPair).\ filter(api_models.KeyPair.user_id == user_id).\ count() @api_db_api.context_manager.writer def _create_in_db(context, values): kp = api_models.KeyPair() kp.update(values) try: kp.save(context.session) except db_exc.DBDuplicateEntry: raise exception.KeyPairExists(key_name=values['name']) return kp @api_db_api.context_manager.writer def _destroy_in_db(context, user_id, name): result = context.session.query(api_models.KeyPair).\ filter_by(user_id=user_id).\ filter_by(name=name).\ delete() if not result: raise exception.KeypairNotFound(user_id=user_id, name=name) # TODO(berrange): Remove NovaObjectDictCompat @base.NovaObjectRegistry.register class KeyPair(base.NovaPersistentObject, base.NovaObject, base.NovaObjectDictCompat): # Version 1.0: Initial version # Version 1.1: String attributes updated to support unicode # Version 1.2: Added keypair type # Version 1.3: Name field is non-null # Version 1.4: Add localonly flag to get_by_name() VERSION = '1.4' fields = { 'id': fields.IntegerField(), 'name': fields.StringField(nullable=False), 'user_id': fields.StringField(nullable=True), 'fingerprint': fields.StringField(nullable=True), 'public_key': fields.StringField(nullable=True), 'type': fields.StringField(nullable=False), } def obj_make_compatible(self, primitive, target_version): super(KeyPair, self).obj_make_compatible(primitive, target_version) target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 2) and 'type' in primitive: del primitive['type'] @staticmethod def _from_db_object(context, keypair, db_keypair): ignore = {'deleted': False, 'deleted_at': None} for key in keypair.fields: if key in ignore and not hasattr(db_keypair, key): keypair[key] = ignore[key] else: keypair[key] = db_keypair[key] keypair._context = context keypair.obj_reset_changes() return keypair @staticmethod def _get_from_db(context, user_id, name): return _get_from_db(context, user_id, name=name) @staticmethod def _destroy_in_db(context, user_id, name): return _destroy_in_db(context, user_id, name) @staticmethod def _create_in_db(context, values): return _create_in_db(context, values) # TODO(stephenfin): Remove the 'localonly' parameter in v2.0 @base.remotable_classmethod def get_by_name(cls, context, user_id, name, localonly=False): if localonly: # There is no longer a "local" (main) table for keypairs, so this # will always return nothing now raise exception.KeypairNotFound(user_id=user_id, name=name) db_keypair = cls._get_from_db(context, user_id, name) return cls._from_db_object(context, cls(), db_keypair) @base.remotable_classmethod def destroy_by_name(cls, context, user_id, name): cls._destroy_in_db(context, user_id, name) @base.remotable def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError( action='create', reason='already created', ) self._create() def _create(self): updates = self.obj_get_changes() db_keypair = self._create_in_db(self._context, updates) self._from_db_object(self._context, self, db_keypair) @base.remotable def destroy(self): self._destroy_in_db(self._context, self.user_id, self.name) @base.NovaObjectRegistry.register class KeyPairList(base.ObjectListBase, base.NovaObject): # Version 1.0: Initial version # KeyPair <= version 1.1 # Version 1.1: KeyPair <= version 1.2 # Version 1.2: KeyPair <= version 1.3 # Version 1.3: Add new parameters 'limit' and 'marker' to get_by_user() VERSION = '1.3' fields = { 'objects': fields.ListOfObjectsField('KeyPair'), } @staticmethod def _get_from_db(context, user_id, limit, marker): return _get_from_db(context, user_id, limit=limit, marker=marker) @staticmethod def _get_count_from_db(context, user_id): return _get_count_from_db(context, user_id) @base.remotable_classmethod def get_by_user(cls, context, user_id, limit=None, marker=None): api_db_keypairs = cls._get_from_db( context, user_id, limit=limit, marker=marker) return base.obj_make_list( context, cls(context), objects.KeyPair, api_db_keypairs, ) @base.remotable_classmethod def get_count_by_user(cls, context, user_id): return cls._get_count_from_db(context, user_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/objects/migrate_data.py0000664000175000017500000004133700000000000020161 0ustar00zuulzuul00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_log import log from oslo_serialization import jsonutils from oslo_utils import versionutils from nova import exception from nova.objects import base as obj_base from nova.objects import fields LOG = log.getLogger(__name__) OS_VIF_DELEGATION = 'os_vif_delegation' @obj_base.NovaObjectRegistry.register class VIFMigrateData(obj_base.NovaObject): # Version 1.0: Initial version VERSION = '1.0' # The majority of the fields here represent a port binding on the # **destination** host during a live migration. The vif_type, among # other fields, could be different from the existing binding on the # source host, which is represented by the "source_vif" field. fields = { 'port_id': fields.StringField(), 'vnic_type': fields.StringField(), # TODO(sean-k-mooney): make enum? 'vif_type': fields.StringField(), # vif_details is a dict whose contents are dependent on the vif_type # and can be any number of types for the values, so we just store it # as a serialized dict 'vif_details_json': fields.StringField(), # profile is in the same random dict of terrible boat as vif_details # so it's stored as a serialized json string 'profile_json': fields.StringField(), 'host': fields.StringField(), # The source_vif attribute is a copy of the VIF network model # representation of the port on the source host which can be used # for filling in blanks about the VIF (port) when building a # configuration reference for the destination host. # NOTE(mriedem): This might not be sufficient based on how the # destination host is configured for all vif types. See the note in # the libvirt driver here: https://review.opendev.org/#/c/551370/ # 29/nova/virt/libvirt/driver.py@7036 'source_vif': fields.NetworkVIFModelField(), } @property def vif_details(self): if 'vif_details_json' not in self: return {} return jsonutils.loads(self.vif_details_json) @vif_details.setter def vif_details(self, vif_details_dict): self.vif_details_json = jsonutils.dumps(vif_details_dict) @property def profile(self): if 'profile_json' not in self: return {} return jsonutils.loads(self.profile_json) @profile.setter def profile(self, profile_dict): self.profile_json = jsonutils.dumps(profile_dict) @property def supports_os_vif_delegation(self): return self.profile.get(OS_VIF_DELEGATION, False) # TODO(stephenfin): add a proper delegation field instead of storing this # info in the profile catch-all blob @supports_os_vif_delegation.setter def supports_os_vif_delegation(self, supported): # we can't simply set the attribute using dict notation since the # getter returns a copy of the data, not the data itself self.profile = dict( self.profile or {}, **{OS_VIF_DELEGATION: supported}) def get_dest_vif(self): """Get a destination VIF representation of this object. This method takes the source_vif and updates it to include the destination host port binding information using the other fields on this object. :return: nova.network.model.VIF object """ if 'source_vif' not in self: raise exception.ObjectActionError( action='get_dest_vif', reason='source_vif is not set') vif = copy.deepcopy(self.source_vif) vif['type'] = self.vif_type vif['vnic_type'] = self.vnic_type vif['profile'] = self.profile vif['details'] = self.vif_details vif['delegate_create'] = self.supports_os_vif_delegation return vif @classmethod def create_skeleton_migrate_vifs(cls, vifs): """Create migrate vifs for live migration. :param vifs: a list of VIFs. :return: list of VIFMigrateData object corresponding to the provided VIFs. """ vif_mig_data = [] for vif in vifs: mig_vif = cls(port_id=vif['id'], source_vif=vif) vif_mig_data.append(mig_vif) return vif_mig_data @obj_base.NovaObjectRegistry.register class LibvirtLiveMigrateNUMAInfo(obj_base.NovaObject): # Version 1.0: Initial version VERSION = '1.0' fields = { # NOTE(artom) We need a 1:many cardinality here, so DictOfIntegers with # its 1:1 cardinality cannot work here. cpu_pins can have a single # guest CPU pinned to multiple host CPUs. 'cpu_pins': fields.DictOfSetOfIntegersField(), # NOTE(artom) Currently we never pin a guest cell to more than a single # host cell, so cell_pins could be a DictOfIntegers, but # DictOfSetOfIntegers is more future-proof. 'cell_pins': fields.DictOfSetOfIntegersField(), 'emulator_pins': fields.SetOfIntegersField(), 'sched_vcpus': fields.SetOfIntegersField(), 'sched_priority': fields.IntegerField(), } @obj_base.NovaObjectRegistry.register_if(False) class LiveMigrateData(obj_base.NovaObject): # Version 1.0: Initial version # Version 1.1: Added old_vol_attachment_ids field. # Version 1.2: Added wait_for_vif_plugged # Version 1.3: Added vifs field. # Version 1.4: Added pci_dev_map_src_dst field. VERSION = '1.4' fields = { 'is_volume_backed': fields.BooleanField(), 'migration': fields.ObjectField('Migration'), # old_vol_attachment_ids is a dict used to store the old attachment_ids # for each volume so they can be restored on a migration rollback. The # key is the volume_id, and the value is the attachment_id. # TODO(mdbooth): This field was made redundant by change Ibe9215c0. We # should eventually remove it. 'old_vol_attachment_ids': fields.DictOfStringsField(), # wait_for_vif_plugged is set in pre_live_migration on the destination # compute host based on the [compute]/live_migration_wait_for_vif_plug # config option value; a default value is not set here since the # default for the config option may change in the future 'wait_for_vif_plugged': fields.BooleanField(), 'vifs': fields.ListOfObjectsField('VIFMigrateData'), 'pci_dev_map_src_dst': fields.DictOfStringsField(), } @obj_base.NovaObjectRegistry.register class LibvirtLiveMigrateBDMInfo(obj_base.NovaObject): # VERSION 1.0 : Initial version # VERSION 1.1 : Added encryption_secret_uuid for tracking volume secret # uuid created on dest during migration with encrypted vols. VERSION = '1.1' fields = { # FIXME(danms): some of these can be enums? 'serial': fields.StringField(), 'bus': fields.StringField(), 'dev': fields.StringField(), 'type': fields.StringField(), 'format': fields.StringField(nullable=True), 'boot_index': fields.IntegerField(nullable=True), 'connection_info_json': fields.StringField(), 'encryption_secret_uuid': fields.UUIDField(nullable=True), } def obj_make_compatible(self, primitive, target_version): super(LibvirtLiveMigrateBDMInfo, self).obj_make_compatible( primitive, target_version) target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 1) and 'encryption_secret_uuid' in primitive: del primitive['encryption_secret_uuid'] # NOTE(danms): We don't have a connection_info object right # now, and instead mostly store/pass it as JSON that we're # careful with. When we get a connection_info object in the # future, we should use it here, so make this easy to convert # for later. @property def connection_info(self): return jsonutils.loads(self.connection_info_json) @connection_info.setter def connection_info(self, info): self.connection_info_json = jsonutils.dumps(info) def as_disk_info(self): info_dict = { 'dev': self.dev, 'bus': self.bus, 'type': self.type, } if self.obj_attr_is_set('format') and self.format: info_dict['format'] = self.format if self.obj_attr_is_set('boot_index') and self.boot_index is not None: info_dict['boot_index'] = str(self.boot_index) return info_dict @obj_base.NovaObjectRegistry.register class LibvirtLiveMigrateData(LiveMigrateData): # Version 1.0: Initial version # Version 1.1: Added target_connect_addr # Version 1.2: Added 'serial_listen_ports' to allow live migration with # serial console. # Version 1.3: Added 'supported_perf_events' # Version 1.4: Added old_vol_attachment_ids # Version 1.5: Added src_supports_native_luks # Version 1.6: Added wait_for_vif_plugged # Version 1.7: Added dst_wants_file_backed_memory # Version 1.8: Added file_backed_memory_discard # Version 1.9: Inherited vifs from LiveMigrateData # Version 1.10: Added dst_numa_info, src_supports_numa_live_migration, and # dst_supports_numa_live_migration fields # Version 1.11: Added dst_supports_mdev_live_migration, # source_mdev_types and target_mdevs fields # Version 1.12: Added dst_cpu_shared_set_info # Version 1.13: Inherited pci_dev_map_src_dst from LiveMigrateData VERSION = '1.13' fields = { 'filename': fields.StringField(), # FIXME: image_type should be enum? 'image_type': fields.StringField(), 'block_migration': fields.BooleanField(), 'disk_over_commit': fields.BooleanField(), 'disk_available_mb': fields.IntegerField(nullable=True), 'is_shared_instance_path': fields.BooleanField(), 'is_shared_block_storage': fields.BooleanField(), 'instance_relative_path': fields.StringField(), 'graphics_listen_addr_vnc': fields.IPAddressField(nullable=True), 'graphics_listen_addr_spice': fields.IPAddressField(nullable=True), 'serial_listen_addr': fields.StringField(nullable=True), 'serial_listen_ports': fields.ListOfIntegersField(), 'bdms': fields.ListOfObjectsField('LibvirtLiveMigrateBDMInfo'), 'target_connect_addr': fields.StringField(nullable=True), 'supported_perf_events': fields.ListOfStringsField(), # TODO(lyarwood): No longer used, drop in version 2.0 'src_supports_native_luks': fields.BooleanField(), 'dst_wants_file_backed_memory': fields.BooleanField(), # TODO(lyarwood): No longer used, drop in version 2.0 'file_backed_memory_discard': fields.BooleanField(), # TODO(artom) (src|dst)_supports_numa_live_migration are only used as # flags to indicate that the compute host is new enough to perform a # NUMA-aware live migration. Remove in version 2.0. 'src_supports_numa_live_migration': fields.BooleanField(), 'dst_supports_numa_live_migration': fields.BooleanField(), 'dst_numa_info': fields.ObjectField('LibvirtLiveMigrateNUMAInfo'), # TODO(sbauza) dst_supports_mdev_live_migration is only used as # flag to indicate that the compute host is new enough to perform a # mediated-device-aware live migration. Remove in version 2.0. 'dst_supports_mdev_live_migration': fields.BooleanField(), # key is mdev UUID and value is its type. 'source_mdev_types': fields.DictOfStringsField(), # key is source mdev UUID and value is the destination mdev UUID. 'target_mdevs': fields.DictOfStringsField(), 'dst_cpu_shared_set_info': fields.SetOfIntegersField(), } def obj_make_compatible(self, primitive, target_version): super(LibvirtLiveMigrateData, self).obj_make_compatible( primitive, target_version) target_version = versionutils.convert_version_to_tuple(target_version) if (target_version < (1, 13)): primitive.pop('pci_dev_map_src_dst', None) if (target_version < (1, 12)): primitive.pop('dst_cpu_shared_set_info', None) if target_version < (1, 11): primitive.pop('target_mdevs', None) primitive.pop('source_mdev_types', None) primitive.pop('dst_supports_mdev_live_migration', None) if (target_version < (1, 10) and 'src_supports_numa_live_migration' in primitive): del primitive['src_supports_numa_live_migration'] if (target_version < (1, 10) and 'dst_supports_numa_live_migration' in primitive): del primitive['dst_supports_numa_live_migration'] if target_version < (1, 10) and 'dst_numa_info' in primitive: del primitive['dst_numa_info'] if target_version < (1, 9) and 'vifs' in primitive: del primitive['vifs'] if target_version < (1, 8): if 'file_backed_memory_discard' in primitive: del primitive['file_backed_memory_discard'] if target_version < (1, 7): if 'dst_wants_file_backed_memory' in primitive: del primitive['dst_wants_file_backed_memory'] if target_version < (1, 6) and 'wait_for_vif_plugged' in primitive: del primitive['wait_for_vif_plugged'] if target_version < (1, 5): if 'src_supports_native_luks' in primitive: del primitive['src_supports_native_luks'] if target_version < (1, 4): if 'old_vol_attachment_ids' in primitive: del primitive['old_vol_attachment_ids'] if target_version < (1, 3): if 'supported_perf_events' in primitive: del primitive['supported_perf_events'] if target_version < (1, 2): if 'serial_listen_ports' in primitive: del primitive['serial_listen_ports'] if target_version < (1, 1) and 'target_connect_addr' in primitive: del primitive['target_connect_addr'] def is_on_shared_storage(self): return self.is_shared_block_storage or self.is_shared_instance_path # TODO(gmann): HyperV virt driver has been removed in Nova 29.0.0 (OpenStack # 2024.1) release but we kept this object for a couple of cycle. This can be # removed too in Nova 31.0.0 (OpenStack 2025.1) or later. @obj_base.NovaObjectRegistry.register class HyperVLiveMigrateData(LiveMigrateData): # Version 1.0: Initial version # Version 1.1: Added is_shared_instance_path # Version 1.2: Added old_vol_attachment_ids # Version 1.3: Added wait_for_vif_plugged # Version 1.4: Inherited vifs from LiveMigrateData # Version 1.5: Inherited pci_dev_map_src_dst from LiveMigrateData VERSION = '1.5' fields = {'is_shared_instance_path': fields.BooleanField()} def obj_make_compatible(self, primitive, target_version): super(HyperVLiveMigrateData, self).obj_make_compatible( primitive, target_version) target_version = versionutils.convert_version_to_tuple(target_version) if (target_version < (1, 5)): primitive.pop('pci_dev_map_src_dst', None) if target_version < (1, 4) and 'vifs' in primitive: del primitive['vifs'] if target_version < (1, 3) and 'wait_for_vif_plugged' in primitive: del primitive['wait_for_vif_plugged'] if target_version < (1, 2): if 'old_vol_attachment_ids' in primitive: del primitive['old_vol_attachment_ids'] if target_version < (1, 1): if 'is_shared_instance_path' in primitive: del primitive['is_shared_instance_path'] @obj_base.NovaObjectRegistry.register class VMwareLiveMigrateData(LiveMigrateData): # Version 1.0: Initial version # Version 1.1: Inherited pci_dev_map_src_dst from LiveMigrateData VERSION = '1.1' fields = { 'cluster_name': fields.StringField(nullable=False), 'datastore_regex': fields.StringField(nullable=False), } def obj_make_compatible(self, primitive, target_version): super(VMwareLiveMigrateData, self).obj_make_compatible( primitive, target_version) target_version = versionutils.convert_version_to_tuple(target_version) if (target_version < (1, 1)): primitive.pop('pci_dev_map_src_dst', None) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/objects/migration.py0000664000175000017500000003323000000000000017522 0ustar00zuulzuul00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db import exception as db_exc from oslo_log import log as logging from oslo_utils import uuidutils from oslo_utils import versionutils from nova.db.main import api as db from nova import exception from nova.i18n import _ from nova import objects from nova.objects import base from nova.objects import fields LOG = logging.getLogger(__name__) def determine_migration_type(migration): if isinstance(migration, dict): old_instance_type_id = migration['old_instance_type_id'] new_instance_type_id = migration['new_instance_type_id'] else: old_instance_type_id = migration.old_instance_type_id new_instance_type_id = migration.new_instance_type_id if old_instance_type_id != new_instance_type_id: return 'resize' return 'migration' @base.NovaObjectRegistry.register class Migration(base.NovaPersistentObject, base.NovaObject): # Version 1.0: Initial version # Version 1.1: String attributes updated to support unicode # Version 1.2: Added migration_type and hidden # Version 1.3: Added get_by_id_and_instance() # Version 1.4: Added migration progress detail # Version 1.5: Added uuid # Version 1.6: Added cross_cell_move and get_by_uuid(). # Version 1.7: Added user_id and project_id # Version 1.8: Added dest_compute_id VERSION = '1.8' fields = { 'id': fields.IntegerField(), 'uuid': fields.UUIDField(), 'source_compute': fields.StringField(nullable=True), # source hostname 'dest_compute': fields.StringField(nullable=True), # dest hostname 'source_node': fields.StringField(nullable=True), # source nodename 'dest_node': fields.StringField(nullable=True), # dest nodename # ID of ComputeNode matching dest_node 'dest_compute_id': fields.IntegerField(nullable=True), 'dest_host': fields.StringField(nullable=True), # dest host IP # TODO(stephenfin): Rename these to old_flavor_id, new_flavor_id in # v2.0 'old_instance_type_id': fields.IntegerField(nullable=True), 'new_instance_type_id': fields.IntegerField(nullable=True), 'instance_uuid': fields.StringField(nullable=True), 'status': fields.StringField(nullable=True), 'migration_type': fields.MigrationTypeField(nullable=False), 'hidden': fields.BooleanField(nullable=False, default=False), 'memory_total': fields.IntegerField(nullable=True), 'memory_processed': fields.IntegerField(nullable=True), 'memory_remaining': fields.IntegerField(nullable=True), 'disk_total': fields.IntegerField(nullable=True), 'disk_processed': fields.IntegerField(nullable=True), 'disk_remaining': fields.IntegerField(nullable=True), 'cross_cell_move': fields.BooleanField(default=False), # request context user id 'user_id': fields.StringField(nullable=True), # request context project id 'project_id': fields.StringField(nullable=True), } @staticmethod def _from_db_object(context, migration, db_migration): for key in migration.fields: value = db_migration[key] if key == 'migration_type' and value is None: value = determine_migration_type(db_migration) elif key == 'uuid' and value is None: continue setattr(migration, key, value) migration._context = context migration.obj_reset_changes() migration._ensure_uuid() return migration def obj_make_compatible(self, primitive, target_version): super(Migration, self).obj_make_compatible(primitive, target_version) target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 2): if 'migration_type' in primitive: del primitive['migration_type'] del primitive['hidden'] if target_version < (1, 4): if 'memory_total' in primitive: del primitive['memory_total'] del primitive['memory_processed'] del primitive['memory_remaining'] del primitive['disk_total'] del primitive['disk_processed'] del primitive['disk_remaining'] if target_version < (1, 5): if 'uuid' in primitive: del primitive['uuid'] if target_version < (1, 6) and 'cross_cell_move' in primitive: del primitive['cross_cell_move'] if target_version < (1, 7): if 'user_id' in primitive: del primitive['user_id'] if 'project_id' in primitive: del primitive['project_id'] if target_version < (1, 8): primitive.pop('dest_compute_id', None) @base.lazy_load_counter def obj_load_attr(self, attrname): if attrname == 'migration_type': # NOTE(danms): The only reason we'd need to load this is if # some older node sent us one. So, guess the type. self.migration_type = determine_migration_type(self) elif attrname in ['hidden', 'cross_cell_move']: self.obj_set_defaults(attrname) else: super(Migration, self).obj_load_attr(attrname) def _ensure_uuid(self): if 'uuid' in self: return self.uuid = uuidutils.generate_uuid() try: self.save() except db_exc.DBDuplicateEntry: # NOTE(danms) We raced to generate a uuid for this, # so fetch the winner and use that uuid fresh = self.__class__.get_by_id(self.context, self.id) self.uuid = fresh.uuid @base.remotable_classmethod def get_by_uuid(cls, context, migration_uuid): db_migration = db.migration_get_by_uuid(context, migration_uuid) return cls._from_db_object(context, cls(), db_migration) @base.remotable_classmethod def get_by_id(cls, context, migration_id): db_migration = db.migration_get(context, migration_id) return cls._from_db_object(context, cls(), db_migration) @base.remotable_classmethod def get_by_id_and_instance(cls, context, migration_id, instance_uuid): db_migration = db.migration_get_by_id_and_instance( context, migration_id, instance_uuid) return cls._from_db_object(context, cls(), db_migration) @base.remotable_classmethod def get_by_instance_and_status(cls, context, instance_uuid, status): db_migration = db.migration_get_by_instance_and_status( context, instance_uuid, status) return cls._from_db_object(context, cls(), db_migration) @base.remotable def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason='already created') if 'uuid' not in self: self.uuid = uuidutils.generate_uuid() # Record who is initiating the migration which is # not necessarily the owner of the instance. if 'user_id' not in self: self.user_id = self._context.user_id if 'project_id' not in self: self.project_id = self._context.project_id updates = self.obj_get_changes() if 'migration_type' not in updates: raise exception.ObjectActionError( action="create", reason=_("cannot create a Migration object without a " "migration_type set")) version = versionutils.convert_version_to_tuple(self.VERSION) if 'dest_node' in updates and 'dest_compute_id' not in updates: # NOTE(danms): This is not really the best idea, as we should try # not to have different behavior based on the version of the # object. However, this exception helps us find cases in testing # where these may not be updated together. We can remove this # later. if version >= (1, 8): raise exception.ObjectActionError( action='create', reason=_('cannot create a Migration object with a ' 'dest_node but no dest_compute_id')) else: LOG.warning('Migration is being created for %s but no ' 'compute_id is set', self.dest_node) db_migration = db.migration_create(self._context, updates) self._from_db_object(self._context, self, db_migration) @base.remotable def save(self): updates = self.obj_get_changes() updates.pop('id', None) db_migration = db.migration_update(self._context, self.id, updates) self._from_db_object(self._context, self, db_migration) self.obj_reset_changes() @property def instance(self): if not hasattr(self, '_cached_instance'): self._cached_instance = objects.Instance.get_by_uuid( self._context, self.instance_uuid, expected_attrs=['migration_context', 'flavor']) return self._cached_instance @instance.setter def instance(self, instance): self._cached_instance = instance @property def is_live_migration(self): return self.migration_type == fields.MigrationType.LIVE_MIGRATION @property def is_resize(self): return self.migration_type == fields.MigrationType.RESIZE @property def is_same_host_resize(self): return self.is_resize and self.source_node == self.dest_node def get_dest_compute_id(self): """Try to determine the ComputeNode id this migration targets. This should be just the dest_compute_id field, but for migrations created by older compute nodes, we may not have that set. If not, look up the compute the old way for compatibility. :raises:ComputeHostNotFound if the destination compute is missing """ if 'dest_compute_id' not in self: self.dest_compute_id = ( objects.ComputeNode.get_by_host_and_nodename( self._context, self.dest_compute, self.dest_node).id) return self.dest_compute_id @base.NovaObjectRegistry.register class MigrationList(base.ObjectListBase, base.NovaObject): # Version 1.0: Initial version # Migration <= 1.1 # Version 1.1: Added use_slave to get_unconfirmed_by_dest_compute # Version 1.2: Migration version 1.2 # Version 1.3: Added a new function to get in progress migrations # for an instance. # Version 1.4: Added sort_keys, sort_dirs, limit, marker kwargs to # get_by_filters for migrations pagination support. # Version 1.5: Added a new function to get in progress migrations # and error migrations for a given host + node. VERSION = '1.5' fields = { 'objects': fields.ListOfObjectsField('Migration'), } @staticmethod @db.select_db_reader_mode def _db_migration_get_unconfirmed_by_dest_compute( context, confirm_window, dest_compute, use_slave=False): return db.migration_get_unconfirmed_by_dest_compute( context, confirm_window, dest_compute) @base.remotable_classmethod def get_unconfirmed_by_dest_compute(cls, context, confirm_window, dest_compute, use_slave=False): db_migrations = cls._db_migration_get_unconfirmed_by_dest_compute( context, confirm_window, dest_compute, use_slave=use_slave) return base.obj_make_list(context, cls(context), objects.Migration, db_migrations) @base.remotable_classmethod def get_in_progress_by_host_and_node(cls, context, host, node): db_migrations = db.migration_get_in_progress_by_host_and_node( context, host, node) return base.obj_make_list(context, cls(context), objects.Migration, db_migrations) @base.remotable_classmethod def get_by_filters(cls, context, filters, sort_keys=None, sort_dirs=None, limit=None, marker=None): db_migrations = db.migration_get_all_by_filters( context, filters, sort_keys=sort_keys, sort_dirs=sort_dirs, limit=limit, marker=marker) return base.obj_make_list(context, cls(context), objects.Migration, db_migrations) @base.remotable_classmethod def get_in_progress_by_instance(cls, context, instance_uuid, migration_type=None): db_migrations = db.migration_get_in_progress_by_instance( context, instance_uuid, migration_type) return base.obj_make_list(context, cls(context), objects.Migration, db_migrations) @base.remotable_classmethod def get_in_progress_and_error(cls, context, host, node): db_migrations = \ db.migration_get_in_progress_and_error_by_host_and_node( context, host, node) return base.obj_make_list(context, cls(context), objects.Migration, db_migrations) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/objects/migration_context.py0000664000175000017500000001343200000000000021270 0ustar00zuulzuul00000000000000# Copyright 2015 Red Hat Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import versionutils from nova.db.main import api as db from nova import exception from nova import objects from nova.objects import base from nova.objects import fields LOG = logging.getLogger(__name__) @base.NovaObjectRegistry.register class MigrationContext(base.NovaPersistentObject, base.NovaObject): """Data representing additional resources related to a migration. Some resources cannot be calculated from knowing the flavor alone for the purpose of resources tracking, but need to be persisted at the time the claim was made, for subsequent resource tracking runs to be consistent. MigrationContext objects are created when the claim is done and are there to facilitate resource tracking and final provisioning of the instance on the destination host. """ # Version 1.0: Initial version # Version 1.1: Add old/new pci_devices and pci_requests # Version 1.2: Add old/new resources VERSION = '1.2' fields = { 'instance_uuid': fields.UUIDField(), 'migration_id': fields.IntegerField(), 'new_numa_topology': fields.ObjectField('InstanceNUMATopology', nullable=True), 'old_numa_topology': fields.ObjectField('InstanceNUMATopology', nullable=True), 'new_pci_devices': fields.ObjectField('PciDeviceList', nullable=True), 'old_pci_devices': fields.ObjectField('PciDeviceList', nullable=True), 'new_pci_requests': fields.ObjectField('InstancePCIRequests', nullable=True), 'old_pci_requests': fields.ObjectField('InstancePCIRequests', nullable=True), 'new_resources': fields.ObjectField('ResourceList', nullable=True), 'old_resources': fields.ObjectField('ResourceList', nullable=True), } @classmethod def obj_make_compatible(cls, primitive, target_version): target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 2): primitive.pop('old_resources', None) primitive.pop('new_resources', None) if target_version < (1, 1): primitive.pop('old_pci_devices', None) primitive.pop('new_pci_devices', None) primitive.pop('old_pci_requests', None) primitive.pop('new_pci_requests', None) @classmethod def obj_from_db_obj(cls, db_obj): primitive = jsonutils.loads(db_obj) return cls.obj_from_primitive(primitive) @base.remotable_classmethod def get_by_instance_uuid(cls, context, instance_uuid): db_extra = db.instance_extra_get_by_instance_uuid( context, instance_uuid, columns=['migration_context']) if not db_extra: raise exception.MigrationContextNotFound( instance_uuid=instance_uuid) if db_extra['migration_context'] is None: return None return cls.obj_from_db_obj(db_extra['migration_context']) def get_pci_mapping_for_migration(self, revert): """Get the mapping between the old PCI devices and the new PCI devices that have been allocated during this migration. The correlation is based on PCI request ID which is unique per PCI devices for SR-IOV ports. :param revert: If True, return a reverse mapping i.e mapping between new PCI devices and old PCI devices. :returns: dictionary of PCI mapping. if revert==False: {'': } if revert==True: {'': } """ step = -1 if revert else 1 current_pci_devs, updated_pci_devs = (self.old_pci_devices, self.new_pci_devices)[::step] if current_pci_devs and updated_pci_devs: LOG.debug("Determining PCI devices mapping using migration " "context: current_pci_devs: %(cur)s, " "updated_pci_devs: %(upd)s", {'cur': [dev for dev in current_pci_devs], 'upd': [dev for dev in updated_pci_devs]}) return {curr_dev.address: upd_dev for curr_dev in current_pci_devs for upd_dev in updated_pci_devs if curr_dev.request_id == upd_dev.request_id} return {} def is_cross_cell_move(self): """Helper to determine if this is a context for a cross-cell move. Based on the ``migration_id`` in this context, gets the Migration object and returns its ``cross_cell_move`` value. :return: True if this is a cross cell move migration, False otherwise. """ migration = objects.Migration.get_by_id( self._context, self.migration_id) return migration.cross_cell_move ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/objects/monitor_metric.py0000664000175000017500000001116400000000000020565 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils from oslo_utils import versionutils from nova.objects import base from nova.objects import fields from nova import utils # NOTE(jwcroppe): Used to determine which fields whose value we need to adjust # (read: divide by 100.0) before sending information to the RPC notifier since # these values were expected to be within the range [0, 1]. FIELDS_REQUIRING_CONVERSION = [fields.MonitorMetricType.CPU_USER_PERCENT, fields.MonitorMetricType.CPU_KERNEL_PERCENT, fields.MonitorMetricType.CPU_IDLE_PERCENT, fields.MonitorMetricType.CPU_IOWAIT_PERCENT, fields.MonitorMetricType.CPU_PERCENT] @base.NovaObjectRegistry.register class MonitorMetric(base.NovaObject): # Version 1.0: Initial version # Version 1.1: Added NUMA support VERSION = '1.1' fields = { 'name': fields.MonitorMetricTypeField(nullable=False), 'value': fields.IntegerField(nullable=False), 'numa_membw_values': fields.DictOfIntegersField(nullable=True), 'timestamp': fields.DateTimeField(nullable=False), # This will be the stevedore extension full class name # for the plugin from which the metric originates. 'source': fields.StringField(nullable=False), } def obj_make_compatible(self, primitive, target_version): super(MonitorMetric, self).obj_make_compatible(primitive, target_version) target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 1) and 'numa_membw_values' in primitive: del primitive['numa_membw_values'] # NOTE(jaypipes): This method exists to convert the object to the # format expected by the RPC notifier for metrics events. def to_dict(self): dict_to_return = { 'name': self.name, # NOTE(jaypipes): This is what jsonutils.dumps() does to # datetime.datetime objects, which is what timestamp is in # this object as well as the original simple dict metrics 'timestamp': utils.strtime(self.timestamp), 'source': self.source, } if self.obj_attr_is_set('value'): if self.name in FIELDS_REQUIRING_CONVERSION: dict_to_return['value'] = self.value / 100.0 else: dict_to_return['value'] = self.value elif self.obj_attr_is_set('numa_membw_values'): dict_to_return['numa_membw_values'] = self.numa_membw_values return dict_to_return @base.NovaObjectRegistry.register class MonitorMetricList(base.ObjectListBase, base.NovaObject): # Version 1.0: Initial version # Version 1.1: MonitorMetric version 1.1 VERSION = '1.1' fields = { 'objects': fields.ListOfObjectsField('MonitorMetric'), } @classmethod def from_json(cls, metrics): """Converts a legacy json object into a list of MonitorMetric objs and finally returns of MonitorMetricList :param metrics: a string of json serialized objects :returns: a MonitorMetricList Object. """ metrics = jsonutils.loads(metrics) if metrics else [] # NOTE(suro-patz): While instantiating the MonitorMetric() from # JSON-ified string, we need to re-convert the # normalized metrics to avoid truncation to 0 by # typecasting into an integer. metric_list = [] for metric in metrics: if ('value' in metric and metric['name'] in FIELDS_REQUIRING_CONVERSION): metric['value'] = metric['value'] * 100 metric_list.append(MonitorMetric(**metric)) return MonitorMetricList(objects=metric_list) # NOTE(jaypipes): This method exists to convert the object to the # format expected by the RPC notifier for metrics events. def to_list(self): return [m.to_dict() for m in self.objects] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/objects/network_metadata.py0000664000175000017500000000414700000000000021067 0ustar00zuulzuul00000000000000# Copyright 2018 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.objects import base from nova.objects import fields @base.NovaObjectRegistry.register class NetworkMetadata(base.NovaObject): """Hold aggregate metadata for a collection of networks. This object holds aggregate information for a collection of neutron networks. There are two types of network collections we care about and use this for: the collection of networks configured or requested for a guest and the collection of networks available to a host. We want this information to allow us to map a given neutron network to the logical NICs it does or will use (or, rather, to identify the NUMA affinity of those NICs and therefore the networks). Given that there are potentially tens of thousands of neutron networks accessible from a given host and tens or hundreds of networks configured for an instance, we need a way to group networks by some common attribute that would identify the logical NIC it would use. For L2 networks, this is the physnet attribute (e.g. ``provider:physical_network=provider1``), which is an arbitrary string used to distinguish between multiple physical (in the sense of physical wiring) networks. For L3 (tunneled) networks, this is merely the fact that they are L3 networks (e.g. ``provider:network_type=vxlan``) because, in neutron, *all* L3 networks must use the same logical NIC. """ # Version 1.0: Initial version VERSION = '1.0' fields = { 'physnets': fields.SetOfStringsField(), 'tunneled': fields.BooleanField(), } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/objects/network_request.py0000664000175000017500000001054500000000000020776 0ustar00zuulzuul00000000000000# Copyright 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import versionutils from nova.objects import base as obj_base from nova.objects import fields # These are special case enums for the auto-allocate scenario. 'none' means # do not allocate a network on server create. 'auto' means auto-allocate a # network (if possible) if none are already available to the project. Other # values for network_id can be a specific network id, or None, where None # is the case before auto-allocation was supported in the compute API. NETWORK_ID_NONE = 'none' NETWORK_ID_AUTO = 'auto' @obj_base.NovaObjectRegistry.register class NetworkRequest(obj_base.NovaObject): # Version 1.0: Initial version # Version 1.1: Added pci_request_id # Version 1.2: Added tag field # Version 1.3: Added arq_uuid and device_profile VERSION = '1.3' fields = { 'network_id': fields.StringField(nullable=True), 'address': fields.IPAddressField(nullable=True), 'port_id': fields.UUIDField(nullable=True), 'pci_request_id': fields.UUIDField(nullable=True), 'tag': fields.StringField(nullable=True), # arq_uuid save cyborg managed port device, pass # arq info from conductor to compute 'arq_uuid': fields.UUIDField(nullable=True), # transfer port's device_profile info from api to conductor 'device_profile': fields.StringField(nullable=True) } def obj_make_compatible(self, primitive, target_version): target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 2) and 'tag' in primitive: del primitive['tag'] if target_version < (1, 3) and 'arq_uuid' in primitive: del primitive['arq_uuid'] if target_version < (1, 3) and 'device_profile' in primitive: del primitive['device_profile'] def obj_load_attr(self, attr): setattr(self, attr, None) def to_tuple(self): address = str(self.address) if self.address is not None else None return (self.network_id, address, self.port_id, self.pci_request_id, self.arq_uuid, self.device_profile) @classmethod def from_tuple(cls, net_tuple): (network_id, address, port_id, pci_request_id, arq_uuid, device_profile) = net_tuple return cls(network_id=network_id, address=address, port_id=port_id, pci_request_id=pci_request_id, arq_uuid=arq_uuid, device_profile=device_profile) @property def auto_allocate(self): return self.network_id == NETWORK_ID_AUTO @property def no_allocate(self): return self.network_id == NETWORK_ID_NONE @obj_base.NovaObjectRegistry.register class NetworkRequestList(obj_base.ObjectListBase, obj_base.NovaObject): fields = { 'objects': fields.ListOfObjectsField('NetworkRequest'), } VERSION = '1.1' def as_tuples(self): return [x.to_tuple() for x in self.objects] @classmethod def from_tuples(cls, net_tuples): """Convenience method for converting a list of network request tuples into a NetworkRequestList object. :param net_tuples: list of network request tuples :returns: NetworkRequestList object """ requested_networks = cls(objects=[NetworkRequest.from_tuple(t) for t in net_tuples]) return requested_networks @property def is_single_unspecified(self): return ((len(self.objects) == 1) and (self.objects[0].to_tuple() == NetworkRequest().to_tuple())) @property def auto_allocate(self): return len(self.objects) == 1 and self.objects[0].auto_allocate @property def no_allocate(self): return len(self.objects) == 1 and self.objects[0].no_allocate ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/objects/numa.py0000664000175000017500000002472600000000000016503 0ustar00zuulzuul00000000000000# Copyright 2014 Red Hat Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils from oslo_utils import versionutils from nova import exception from nova.objects import base from nova.objects import fields as obj_fields from nova.virt import hardware @base.NovaObjectRegistry.register class NUMACell(base.NovaObject): # Version 1.0: Initial version # Version 1.1: Added pinned_cpus and siblings fields # Version 1.2: Added mempages field # Version 1.3: Add network_metadata field # Version 1.4: Add pcpuset # Version 1.5: Add socket VERSION = '1.5' fields = { 'id': obj_fields.IntegerField(read_only=True), 'cpuset': obj_fields.SetOfIntegersField(), 'pcpuset': obj_fields.SetOfIntegersField(), 'memory': obj_fields.IntegerField(), 'cpu_usage': obj_fields.IntegerField(default=0), 'memory_usage': obj_fields.IntegerField(default=0), 'pinned_cpus': obj_fields.SetOfIntegersField(), 'siblings': obj_fields.ListOfSetsOfIntegersField(), 'mempages': obj_fields.ListOfObjectsField('NUMAPagesTopology'), 'network_metadata': obj_fields.ObjectField('NetworkMetadata'), 'socket': obj_fields.IntegerField(nullable=True), } def obj_make_compatible(self, primitive, target_version): super(NUMACell, self).obj_make_compatible(primitive, target_version) target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 5): primitive.pop('socket', None) if target_version < (1, 4): primitive.pop('pcpuset', None) if target_version < (1, 3): primitive.pop('network_metadata', None) def __eq__(self, other): return base.all_things_equal(self, other) def __ne__(self, other): return not (self == other) @property def free_pcpus(self): """Return available dedicated CPUs.""" return self.pcpuset - self.pinned_cpus or set() @property def free_siblings(self): """Return available dedicated CPUs in their sibling set form.""" return [sibling_set & self.free_pcpus for sibling_set in self.siblings] @property def avail_pcpus(self): """Return number of available dedicated CPUs.""" return len(self.free_pcpus) @property def avail_memory(self): return self.memory - self.memory_usage @property def has_threads(self): """Check if SMT threads, a.k.a. HyperThreads, are present.""" return any(len(sibling_set) > 1 for sibling_set in self.siblings) def pin_cpus(self, cpus): if cpus - self.pcpuset: raise exception.CPUPinningUnknown(requested=list(cpus), available=list(self.pcpuset)) if self.pinned_cpus & cpus: available = list(self.pcpuset - self.pinned_cpus) raise exception.CPUPinningInvalid(requested=list(cpus), available=available) self.pinned_cpus |= cpus def unpin_cpus(self, cpus): if cpus - self.pcpuset: raise exception.CPUUnpinningUnknown(requested=list(cpus), available=list(self.pcpuset)) if (self.pinned_cpus & cpus) != cpus: raise exception.CPUUnpinningInvalid(requested=list(cpus), available=list( self.pinned_cpus)) self.pinned_cpus -= cpus def pin_cpus_with_siblings(self, cpus): """Pin (consume) both thread siblings if one of them is requested to be pinned. :param cpus: set of CPUs to pin """ pin_siblings = set() for sib in self.siblings: if cpus & sib: # NOTE(artom) If the intersection between cpus and sib is not # empty - IOW, the CPU we want to pin has sibligns - pin the # sibling as well. This is because we normally got here because # the `isolate` CPU thread policy is set, so we don't want to # place guest CPUs on host thread siblings. pin_siblings.update(sib) self.pin_cpus(pin_siblings) def unpin_cpus_with_siblings(self, cpus): """Unpin (free up) both thread siblings if one of them is requested to be freed. :param cpus: set of CPUs to unpin. """ pin_siblings = set() for sib in self.siblings: if cpus & sib: # NOTE(artom) This is the inverse operation of # pin_cpus_with_siblings() - see the NOTE there. If the CPU # we're unpinning has siblings, unpin the sibling as well. pin_siblings.update(sib) self.unpin_cpus(pin_siblings) def can_fit_pagesize(self, pagesize, memory, use_free=True): """Returns whether memory can fit into a given pagesize. :param pagesize: a page size in KibB :param memory: a memory size asked to fit in KiB :param use_free: if true, assess based on free memory rather than total memory. This means overcommit is not allowed, which should be the case for hugepages since these are memlocked by the kernel and can't be swapped out. :returns: whether memory can fit in hugepages :raises: MemoryPageSizeNotSupported if page size not supported """ for pages in self.mempages: avail_kb = pages.free_kb if use_free else pages.total_kb if pages.size_kb == pagesize: return memory <= avail_kb and (memory % pages.size_kb) == 0 raise exception.MemoryPageSizeNotSupported(pagesize=pagesize) @base.NovaObjectRegistry.register class NUMAPagesTopology(base.NovaObject): # Version 1.0: Initial version # Version 1.1: Adds reserved field VERSION = '1.1' fields = { 'size_kb': obj_fields.IntegerField(), 'total': obj_fields.IntegerField(), 'used': obj_fields.IntegerField(default=0), 'reserved': obj_fields.IntegerField(default=0), } def obj_make_compatible(self, primitive, target_version): super(NUMAPagesTopology, self).obj_make_compatible(primitive, target_version) target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 1): primitive.pop('reserved', None) def __eq__(self, other): return base.all_things_equal(self, other) def __ne__(self, other): return not (self == other) @property def free(self): """Returns the number of avail pages.""" if not self.obj_attr_is_set('reserved'): # In case where an old compute node is sharing resource to # an updated node we must ensure that this property is defined. self.reserved = 0 return self.total - self.used - self.reserved @property def free_kb(self): """Returns the avail memory size in KiB.""" return self.free * self.size_kb @property def total_kb(self): """Returns the total memory size in KiB.""" return self.total * self.size_kb @base.NovaObjectRegistry.register class NUMATopology(base.NovaObject): # Version 1.0: Initial version # Version 1.1: Update NUMACell to 1.1 # Version 1.2: Update NUMACell to 1.2 VERSION = '1.2' fields = { 'cells': obj_fields.ListOfObjectsField('NUMACell'), } def __eq__(self, other): return base.all_things_equal(self, other) def __ne__(self, other): return not (self == other) @property def has_threads(self): """Check if any cell use SMT threads (a.k.a. Hyperthreads)""" return any(cell.has_threads for cell in self.cells) def _to_json(self): return jsonutils.dumps(self.obj_to_primitive()) @classmethod def obj_from_db_obj(cls, db_obj): """Convert serialized representation to object. Deserialize instances of this object that have been stored as JSON blobs in the database. """ return cls.obj_from_primitive(jsonutils.loads(db_obj)) @classmethod def from_legacy_object(cls, primitive: str): """Convert a pre-Liberty object to a (serialized) real o.vo. :param primitive: A serialized representation of the legacy object. :returns: A serialized representation of the updated object. """ topology = cls( cells=[ NUMACell( id=cell.get('id'), cpuset=hardware.parse_cpu_spec(cell.get('cpus', '')), cpu_usage=cell.get('cpu_usage', 0), memory=cell.get('mem', {}).get('total', 0), memory_usage=cell.get('mem', {}).get('used', 0), mempages=[], pinned_cpus=set(), siblings=[], ) for cell in jsonutils.loads(primitive).get('cells', []) ], ) return topology._to_json() def __len__(self): """Defined so that boolean testing works the same as for lists.""" return len(self.cells) @base.NovaObjectRegistry.register class NUMATopologyLimits(base.NovaObject): # Version 1.0: Initial version # Version 1.1: Add network_metadata field VERSION = '1.1' fields = { 'cpu_allocation_ratio': obj_fields.FloatField(), 'ram_allocation_ratio': obj_fields.FloatField(), 'network_metadata': obj_fields.ObjectField('NetworkMetadata'), } def obj_make_compatible(self, primitive, target_version): super(NUMATopologyLimits, self).obj_make_compatible(primitive, target_version) target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 1): primitive.pop('network_metadata', None) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/objects/pci_device.py0000664000175000017500000006667000000000000017641 0ustar00zuulzuul00000000000000# Copyright 2013 Intel Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_db import api as oslo_db_api from oslo_db.sqlalchemy import update_match from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import uuidutils from oslo_utils import versionutils from nova.db.main import api as db from nova.db.main import models as db_models from nova import exception from nova import objects from nova.objects import base from nova.objects import fields LOG = logging.getLogger(__name__) def compare_pci_device_attributes(obj_a, obj_b): if not isinstance(obj_b, PciDevice): return False pci_ignore_fields = base.NovaPersistentObject.fields.keys() for name in obj_a.obj_fields: if name in pci_ignore_fields: continue is_set_a = obj_a.obj_attr_is_set(name) is_set_b = obj_b.obj_attr_is_set(name) if is_set_a != is_set_b: return False if is_set_a: if getattr(obj_a, name) != getattr(obj_b, name): return False return True @base.NovaObjectRegistry.register class PciDevice(base.NovaPersistentObject, base.NovaObject): """Object to represent a PCI device on a compute node. PCI devices are managed by the compute resource tracker, which discovers the devices from the hardware platform, claims, allocates and frees devices for instances. The PCI device information is permanently maintained in a database. This makes it convenient to get PCI device information, like physical function for a VF device, adjacent switch IP address for a NIC, hypervisor identification for a PCI device, etc. It also provides a convenient way to check device allocation information for administrator purposes. A device can be in available/claimed/allocated/deleted/removed state. A device is available when it is discovered.. A device is claimed prior to being allocated to an instance. Normally the transition from claimed to allocated is quick. However, during a resize operation the transition can take longer, because devices are claimed in prep_resize and allocated in finish_resize. A device becomes removed when hot removed from a node (i.e. not found in the next auto-discover) but not yet synced with the DB. A removed device should not be allocated to any instance, and once deleted from the DB, the device object is changed to deleted state and no longer synced with the DB. Filed notes:: | 'dev_id': | Hypervisor's identification for the device, the string format | is hypervisor specific | 'extra_info': | Device-specific properties like PF address, switch ip address etc. """ # Version 1.0: Initial version # Version 1.1: String attributes updated to support unicode # Version 1.2: added request_id field # Version 1.3: Added field to represent PCI device NUMA node # Version 1.4: Added parent_addr field # Version 1.5: Added 2 new device statuses: UNCLAIMABLE and UNAVAILABLE # Version 1.6: Added uuid field # Version 1.7: Added 'vdpa' to 'dev_type' field VERSION = '1.7' fields = { 'id': fields.IntegerField(), 'uuid': fields.UUIDField(), # Note(yjiang5): the compute_node_id may be None because the pci # device objects are created before the compute node is created in DB 'compute_node_id': fields.IntegerField(nullable=True), 'address': fields.StringField(), 'vendor_id': fields.StringField(), 'product_id': fields.StringField(), 'dev_type': fields.PciDeviceTypeField(), 'status': fields.PciDeviceStatusField(), 'dev_id': fields.StringField(nullable=True), 'label': fields.StringField(nullable=True), 'instance_uuid': fields.StringField(nullable=True), 'request_id': fields.StringField(nullable=True), 'extra_info': fields.DictOfStringsField(default={}), 'numa_node': fields.IntegerField(nullable=True), 'parent_addr': fields.StringField(nullable=True), } def obj_make_compatible(self, primitive, target_version): target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 2) and 'request_id' in primitive: del primitive['request_id'] if target_version < (1, 4) and 'parent_addr' in primitive: if primitive['parent_addr'] is not None: extra_info = primitive.get('extra_info', {}) extra_info['phys_function'] = primitive['parent_addr'] del primitive['parent_addr'] if target_version < (1, 5) and 'parent_addr' in primitive: added_statuses = (fields.PciDeviceStatus.UNCLAIMABLE, fields.PciDeviceStatus.UNAVAILABLE) base.raise_on_too_new_values( target_version, primitive, 'status', added_statuses) if target_version < (1, 6) and 'uuid' in primitive: del primitive['uuid'] if target_version < (1, 7) and 'dev_type' in primitive: base.raise_on_too_new_values( target_version, primitive, 'dev_type', (fields.PciDeviceType.VDPA,)) def __repr__(self): return ( f'PciDevice(address={self.address}, ' f'compute_node_id={self.compute_node_id})' ) def update_device(self, dev_dict): """Sync the content from device dictionary to device object. The resource tracker updates the available devices periodically. To avoid meaningless syncs with the database, we update the device object only if a value changed. """ # Note(yjiang5): status/instance_uuid should only be updated by # functions like claim/allocate etc. The id is allocated by # database. The extra_info is created by the object. no_changes = ('status', 'instance_uuid', 'id', 'extra_info') for key in no_changes: dev_dict.pop(key, None) # NOTE(ndipanov): This needs to be set as it's accessed when matching dev_dict.setdefault('parent_addr') for k, v in dev_dict.items(): if k in self.fields.keys(): setattr(self, k, v) else: # NOTE(yjiang5): extra_info.update does not update # obj_what_changed, set it explicitly # NOTE(ralonsoh): list of parameters currently added to # "extra_info" dict: # - "capabilities": dict of (strings/list of strings) # - "parent_ifname": the netdev name of the parent (PF) # device of a VF # - "mac_address": the MAC address of the PF # - "managed": "true"/"false" if the device is managed by # hypervisor # - "live_migratable": true/false if the device can be live # migratable extra_info = self.extra_info data = v if isinstance(v, str) else jsonutils.dumps(v) extra_info.update({k: data}) self.extra_info = extra_info # Remove the tag keys if they were set previously. # As in the above case, we must explicitly assign to self.extra_info # so that obj_what_changed detects the modification and triggers # a save later. tags_to_clean = ["managed", "live_migratable"] for tag in tags_to_clean: if tag not in dev_dict and tag in self.extra_info: extra_info = self.extra_info del extra_info[tag] self.extra_info = extra_info def __init__(self, *args, **kwargs): super(PciDevice, self).__init__(*args, **kwargs) # NOTE(ndipanov): These are required to build an in-memory device tree # but don't need to be proper fields (and can't easily be as they would # hold circular references) self.parent_device = None self.child_devices = [] @base.lazy_load_counter def obj_load_attr(self, attr): if attr in ['extra_info']: # NOTE(danms): extra_info used to be defaulted during init, # so make sure any bare instantiations of this object can # rely on the expectation that referencing that field will # not fail. self.obj_set_defaults(attr) else: super(PciDevice, self).obj_load_attr(attr) def __eq__(self, other): return compare_pci_device_attributes(self, other) def __ne__(self, other): return not (self == other) @classmethod def populate_dev_uuids(cls, context, max_count): @db.pick_context_manager_reader def get_devs_no_uuid(context): return context.session.query(db_models.PciDevice).\ filter_by(uuid=None).limit(max_count).all() db_devs = get_devs_no_uuid(context) done = 0 for db_dev in db_devs: cls._create_uuid(context, db_dev['id']) done += 1 return done, done @classmethod def _from_db_object(cls, context, pci_device, db_dev): for key in pci_device.fields: if key == 'uuid' and db_dev['uuid'] is None: # NOTE(danms): While the records could be nullable, # generate a UUID on read since the object requires it dev_id = db_dev['id'] db_dev[key] = cls._create_uuid(context, dev_id) if key == 'extra_info': extra_info = db_dev.get('extra_info') pci_device.extra_info = jsonutils.loads(extra_info) continue setattr(pci_device, key, db_dev[key]) pci_device._context = context pci_device.obj_reset_changes() return pci_device @staticmethod @oslo_db_api.wrap_db_retry(max_retries=1, retry_on_deadlock=True) def _create_uuid(context, dev_id): # NOTE(mdbooth): This method is only required until uuid is made # non-nullable in a future release. # NOTE(mdbooth): We wrap this method in a retry loop because it can # fail (safely) on multi-master galera if concurrent updates happen on # different masters. It will never fail on single-master. We can only # ever need one retry. uuid = uuidutils.generate_uuid() values = {'uuid': uuid} compare = db_models.PciDevice(id=dev_id, uuid=None) # NOTE(mdbooth): We explicitly use an independent transaction context # here so as not to fail if: # 1. We retry. # 2. We're in a read transaction. This is an edge case of what's # normally a read operation. Forcing everything (transitively) which # reads a PCI device to be in a write transaction for a narrow # temporary edge case is undesirable. tctxt = db.get_context_manager(context).writer.independent with tctxt.using(context): query = context.session.query(db_models.PciDevice).\ filter_by(id=dev_id) try: query.update_on_match(compare, 'id', values) except update_match.NoRowsMatched: # We can only get here if we raced, and another writer already # gave this PCI device a UUID result = query.one() uuid = result['uuid'] return uuid @base.remotable_classmethod def get_by_dev_addr(cls, context, compute_node_id, dev_addr): db_dev = db.pci_device_get_by_addr( context, compute_node_id, dev_addr) return cls._from_db_object(context, cls(), db_dev) @base.remotable_classmethod def get_by_dev_id(cls, context, id): db_dev = db.pci_device_get_by_id(context, id) return cls._from_db_object(context, cls(), db_dev) @classmethod def create(cls, context, dev_dict): """Create a PCI device based on hypervisor information. As the device object is just created and is not synced with db yet thus we should not reset changes here for fields from dict. """ pci_device = cls() # NOTE(danms): extra_info used to always be defaulted during init, # so make sure we replicate that behavior outside of init here # for compatibility reasons. pci_device.obj_set_defaults('extra_info') pci_device.update_device(dev_dict) pci_device.status = fields.PciDeviceStatus.AVAILABLE pci_device.uuid = uuidutils.generate_uuid() pci_device._context = context return pci_device @base.remotable def save(self): if self.status == fields.PciDeviceStatus.REMOVED: self.status = fields.PciDeviceStatus.DELETED db.pci_device_destroy(self._context, self.compute_node_id, self.address) elif self.status != fields.PciDeviceStatus.DELETED: # TODO(jaypipes): Remove in 2.0 version of object. This does an # inline migration to populate the uuid field. A similar migration # is done in the _from_db_object() method to migrate objects as # they are read from the DB. if 'uuid' not in self: self.uuid = uuidutils.generate_uuid() updates = self.obj_get_changes() if 'extra_info' in updates: updates['extra_info'] = jsonutils.dumps(updates['extra_info']) if updates: db_pci = db.pci_device_update(self._context, self.compute_node_id, self.address, updates) self._from_db_object(self._context, self, db_pci) @staticmethod def _bulk_update_status(dev_list, status): for dev in dev_list: dev.status = status def claim(self, instance_uuid): if self.status != fields.PciDeviceStatus.AVAILABLE: raise exception.PciDeviceInvalidStatus( compute_node_id=self.compute_node_id, address=self.address, status=self.status, hopestatus=[fields.PciDeviceStatus.AVAILABLE]) if self.dev_type == fields.PciDeviceType.SRIOV_PF: # Update PF status to CLAIMED if all of it dependants are free # and set their status to UNCLAIMABLE vfs_list = self.child_devices non_free_dependants = [ vf for vf in vfs_list if not vf.is_available()] if non_free_dependants: # NOTE(gibi): There should not be any dependent devices that # are UNCLAIMABLE or UNAVAILABLE as the parent is AVAILABLE, # but we got reports in bug 1969496 that this inconsistency # can happen. So check if the only non-free devices are in # state UNCLAIMABLE or UNAVAILABLE then we log a warning but # allow to claim the parent. actual_statuses = { child.status for child in non_free_dependants} allowed_non_free_statues = { fields.PciDeviceStatus.UNCLAIMABLE, fields.PciDeviceStatus.UNAVAILABLE, } if actual_statuses - allowed_non_free_statues == set(): LOG.warning( "Some child device of parent %s is in an inconsistent " "state. If you can reproduce this warning then please " "report a bug at " "https://bugs.launchpad.net/nova/+filebug with " "reproduction steps. Inconsistent children with " "state: %s", self.address, ",".join( "%s - %s" % (child.address, child.status) for child in non_free_dependants ), ) else: raise exception.PciDeviceVFInvalidStatus( compute_node_id=self.compute_node_id, address=self.address) self._bulk_update_status(vfs_list, fields.PciDeviceStatus.UNCLAIMABLE) elif self.dev_type in ( fields.PciDeviceType.SRIOV_VF, fields.PciDeviceType.VDPA ): # Update VF status to CLAIMED if it's parent has not been # previously allocated or claimed # When claiming/allocating a VF, it's parent PF becomes # unclaimable/unavailable. Therefore, it is expected to find the # parent PF in an unclaimable/unavailable state for any following # claims to a sibling VF parent_ok_statuses = (fields.PciDeviceStatus.AVAILABLE, fields.PciDeviceStatus.UNCLAIMABLE, fields.PciDeviceStatus.UNAVAILABLE) parent = self.parent_device if parent: if parent.status not in parent_ok_statuses: raise exception.PciDevicePFInvalidStatus( compute_node_id=self.compute_node_id, address=self.parent_addr, status=self.status, vf_address=self.address, hopestatus=parent_ok_statuses) # Set PF status if parent.status == fields.PciDeviceStatus.AVAILABLE: parent.status = fields.PciDeviceStatus.UNCLAIMABLE else: LOG.debug('Physical function addr: %(pf_addr)s parent of ' 'VF addr: %(vf_addr)s was not found', {'pf_addr': self.parent_addr, 'vf_addr': self.address}) self.status = fields.PciDeviceStatus.CLAIMED self.instance_uuid = instance_uuid def allocate(self, instance): ok_statuses = (fields.PciDeviceStatus.AVAILABLE, fields.PciDeviceStatus.CLAIMED) parent_ok_statuses = (fields.PciDeviceStatus.AVAILABLE, fields.PciDeviceStatus.UNCLAIMABLE, fields.PciDeviceStatus.UNAVAILABLE) dependants_ok_statuses = (fields.PciDeviceStatus.AVAILABLE, fields.PciDeviceStatus.UNCLAIMABLE) if self.status not in ok_statuses: raise exception.PciDeviceInvalidStatus( compute_node_id=self.compute_node_id, address=self.address, status=self.status, hopestatus=ok_statuses) if (self.status == fields.PciDeviceStatus.CLAIMED and self.instance_uuid != instance['uuid']): raise exception.PciDeviceInvalidOwner( compute_node_id=self.compute_node_id, address=self.address, owner=self.instance_uuid, hopeowner=instance['uuid']) if self.dev_type == fields.PciDeviceType.SRIOV_PF: vfs_list = self.child_devices if not all([vf.status in dependants_ok_statuses for vf in vfs_list]): raise exception.PciDeviceVFInvalidStatus( compute_node_id=self.compute_node_id, address=self.address) self._bulk_update_status(vfs_list, fields.PciDeviceStatus.UNAVAILABLE) elif self.dev_type in ( fields.PciDeviceType.SRIOV_VF, fields.PciDeviceType.VDPA ): parent = self.parent_device if parent: if parent.status not in parent_ok_statuses: raise exception.PciDevicePFInvalidStatus( compute_node_id=self.compute_node_id, address=self.parent_addr, status=self.status, vf_address=self.address, hopestatus=parent_ok_statuses) # Set PF status parent.status = fields.PciDeviceStatus.UNAVAILABLE else: LOG.debug('Physical function addr: %(pf_addr)s parent of ' 'VF addr: %(vf_addr)s was not found', {'pf_addr': self.parent_addr, 'vf_addr': self.address}) self.status = fields.PciDeviceStatus.ALLOCATED self.instance_uuid = instance['uuid'] # Notes(yjiang5): remove this check when instance object for # compute manager is finished if isinstance(instance, dict): if 'pci_devices' not in instance: instance['pci_devices'] = [] instance['pci_devices'].append(copy.copy(self)) else: instance.pci_devices.objects.append(copy.copy(self)) def remove(self): # We allow removal of a device is if it is unused. It can be unused # either by being in available state or being in a state that shows # that the parent or child device blocks the consumption of this device expected_states = [ fields.PciDeviceStatus.AVAILABLE, fields.PciDeviceStatus.UNAVAILABLE, fields.PciDeviceStatus.UNCLAIMABLE, ] if self.status not in expected_states: raise exception.PciDeviceInvalidStatus( compute_node_id=self.compute_node_id, address=self.address, status=self.status, hopestatus=expected_states) # Just to be on the safe side, do not allow removal of device that has # an owner even if the state of the device suggests that it is not # owned. if 'instance_uuid' in self and self.instance_uuid is not None: raise exception.PciDeviceInvalidOwner( compute_node_id=self.compute_node_id, address=self.address, owner=self.instance_uuid, hopeowner=None, ) self.status = fields.PciDeviceStatus.REMOVED self.instance_uuid = None self.request_id = None def free(self, instance=None): ok_statuses = (fields.PciDeviceStatus.ALLOCATED, fields.PciDeviceStatus.CLAIMED) free_devs = [] if self.status not in ok_statuses: raise exception.PciDeviceInvalidStatus( compute_node_id=self.compute_node_id, address=self.address, status=self.status, hopestatus=ok_statuses) if instance and self.instance_uuid != instance['uuid']: raise exception.PciDeviceInvalidOwner( compute_node_id=self.compute_node_id, address=self.address, owner=self.instance_uuid, hopeowner=instance['uuid']) if self.dev_type == fields.PciDeviceType.SRIOV_PF: # Set all PF dependants status to AVAILABLE vfs_list = self.child_devices self._bulk_update_status(vfs_list, fields.PciDeviceStatus.AVAILABLE) free_devs.extend(vfs_list) if self.dev_type in ( fields.PciDeviceType.SRIOV_VF, fields.PciDeviceType.VDPA ): # Set PF status to AVAILABLE if all of it's VFs are free parent = self.parent_device if not parent: LOG.debug('Physical function addr: %(pf_addr)s parent of ' 'VF addr: %(vf_addr)s was not found', {'pf_addr': self.parent_addr, 'vf_addr': self.address}) else: vfs_list = parent.child_devices if all([vf.is_available() for vf in vfs_list if vf.id != self.id]): parent.status = fields.PciDeviceStatus.AVAILABLE free_devs.append(parent) old_status = self.status self.status = fields.PciDeviceStatus.AVAILABLE free_devs.append(self) self.instance_uuid = None self.request_id = None if old_status == fields.PciDeviceStatus.ALLOCATED and instance: # Notes(yjiang5): remove this check when instance object for # compute manager is finished existed = next((dev for dev in instance['pci_devices'] if dev.id == self.id)) if isinstance(instance, dict): instance['pci_devices'].remove(existed) else: instance.pci_devices.objects.remove(existed) return free_devs def is_available(self): return self.status == fields.PciDeviceStatus.AVAILABLE @property def card_serial_number(self): caps_json = self.extra_info.get('capabilities', "{}") caps = jsonutils.loads(caps_json) return caps.get('vpd', {}).get('card_serial_number') @property def sriov_cap(self): caps_json = self.extra_info.get('capabilities', '{}') caps = jsonutils.loads(caps_json) return caps.get('sriov', {}) @property def mac_address(self): """The MAC address of the PF physical device or None if the device is not a PF or if the MAC is not available. """ return self.extra_info.get('mac_address') @property def network_caps(self): """PCI device network capabilities or empty list if not available""" caps_json = self.extra_info.get('capabilities', '{}') caps = jsonutils.loads(caps_json) return caps.get('network', []) @base.NovaObjectRegistry.register class PciDeviceList(base.ObjectListBase, base.NovaObject): # Version 1.0: Initial version # PciDevice <= 1.1 # Version 1.1: PciDevice 1.2 # Version 1.2: PciDevice 1.3 # Version 1.3: Adds get_by_parent_address VERSION = '1.3' fields = { 'objects': fields.ListOfObjectsField('PciDevice'), } def __init__(self, *args, **kwargs): super(PciDeviceList, self).__init__(*args, **kwargs) if 'objects' not in kwargs: self.objects = [] self.obj_reset_changes() @base.remotable_classmethod def get_by_compute_node(cls, context, node_id): db_dev_list = db.pci_device_get_all_by_node(context, node_id) return base.obj_make_list(context, cls(context), objects.PciDevice, db_dev_list) @base.remotable_classmethod def get_by_instance_uuid(cls, context, uuid): db_dev_list = db.pci_device_get_all_by_instance_uuid(context, uuid) return base.obj_make_list(context, cls(context), objects.PciDevice, db_dev_list) @base.remotable_classmethod def get_by_parent_address(cls, context, node_id, parent_addr): db_dev_list = db.pci_device_get_all_by_parent_addr(context, node_id, parent_addr) return base.obj_make_list(context, cls(context), objects.PciDevice, db_dev_list) def __repr__(self): return f"PciDeviceList(objects={[repr(obj) for obj in self.objects]})" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/objects/pci_device_pool.py0000664000175000017500000000731700000000000020663 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_serialization import jsonutils from oslo_utils import versionutils from nova import objects from nova.objects import base from nova.objects import fields @base.NovaObjectRegistry.register class PciDevicePool(base.NovaObject): # Version 1.0: Initial version # Version 1.1: Added numa_node field VERSION = '1.1' fields = { 'product_id': fields.StringField(), 'vendor_id': fields.StringField(), 'numa_node': fields.IntegerField(nullable=True), 'tags': fields.DictOfNullableStringsField(), 'count': fields.IntegerField(), } def obj_make_compatible(self, primitive, target_version): target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 1) and 'numa_node' in primitive: del primitive['numa_node'] # NOTE(pmurray): before this object existed the pci device pool data was # stored as a dict. For backward compatibility we need to be able to read # it in from a dict @classmethod def from_dict(cls, value): pool_dict = copy.copy(value) pool = cls() pool.vendor_id = pool_dict.pop("vendor_id") pool.product_id = pool_dict.pop("product_id") pool.numa_node = pool_dict.pop("numa_node", None) pool.count = pool_dict.pop("count") pool.tags = pool_dict return pool # NOTE(sbauza): Before using objects, pci stats was a list of # dictionaries not having tags. For compatibility with other modules, let's # create a reversible method def to_dict(self): pci_pool = base.obj_to_primitive(self) tags = pci_pool.pop('tags', {}) for k, v in tags.items(): pci_pool[k] = v return pci_pool @base.NovaObjectRegistry.register class PciDevicePoolList(base.ObjectListBase, base.NovaObject): # Version 1.0: Initial version # PciDevicePool <= 1.0 # Version 1.1: PciDevicePool version 1.1 VERSION = '1.1' fields = { 'objects': fields.ListOfObjectsField('PciDevicePool'), } def from_pci_stats(pci_stats): """Create and return a PciDevicePoolList from the data stored in the db, which can be either the serialized object, or, prior to the creation of the device pool objects, a simple dict or a list of such dicts. """ pools = [] if isinstance(pci_stats, str): try: pci_stats = jsonutils.loads(pci_stats) except (ValueError, TypeError): pci_stats = None if pci_stats: # Check for object-ness, or old-style storage format. if 'nova_object.namespace' in pci_stats: return objects.PciDevicePoolList.obj_from_primitive(pci_stats) else: # This can be either a dict or a list of dicts if isinstance(pci_stats, list): pools = [objects.PciDevicePool.from_dict(stat) for stat in pci_stats] else: pools = [objects.PciDevicePool.from_dict(pci_stats)] return objects.PciDevicePoolList(objects=pools) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/objects/quotas.py0000664000175000017500000006367000000000000017060 0ustar00zuulzuul00000000000000# Copyright 2013 Rackspace Hosting. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections from oslo_db import exception as db_exc from nova.db.api import api as api_db_api from nova.db.api import models as api_models from nova.db.main import api as main_db_api from nova.db.main import models as main_models from nova.db import utils as db_utils from nova import exception from nova.objects import base from nova.objects import fields from nova import quota def ids_from_instance(context, instance): if (context.is_admin and context.project_id != instance['project_id']): project_id = instance['project_id'] else: project_id = context.project_id if context.user_id != instance['user_id']: user_id = instance['user_id'] else: user_id = context.user_id return project_id, user_id # TODO(lyj): This method needs to be cleaned up once the # ids_from_instance helper method is renamed or some common # method is added for objects.quotas. def ids_from_security_group(context, security_group): return ids_from_instance(context, security_group) # TODO(PhilD): This method needs to be cleaned up once the # ids_from_instance helper method is renamed or some common # method is added for objects.quotas. def ids_from_server_group(context, server_group): return ids_from_instance(context, server_group) @base.NovaObjectRegistry.register class Quotas(base.NovaObject): # Version 1.0: initial version # Version 1.1: Added create_limit() and update_limit() # Version 1.2: Added limit_check() and count() # Version 1.3: Added check_deltas(), limit_check_project_and_user(), # and count_as_dict() VERSION = '1.3' fields = { # TODO(melwitt): Remove this field in version 2.0 of the object. 'reservations': fields.ListOfStringsField(nullable=True, default=[]), 'project_id': fields.StringField(nullable=True, default=None), 'user_id': fields.StringField(nullable=True, default=None), } def obj_load_attr(self, attr): self.obj_set_defaults(attr) # NOTE(danms): This is strange because resetting these would cause # them not to be saved to the database. I would imagine this is # from overzealous defaulting and that all three fields ultimately # get set all the time. However, quotas are weird, so replicate the # longstanding behavior of setting defaults and clearing their # dirty bit. self.obj_reset_changes(fields=[attr]) @staticmethod @api_db_api.context_manager.reader def _get_from_db(context, project_id, resource, user_id=None): model = api_models.ProjectUserQuota if user_id else api_models.Quota query = context.session.query(model).\ filter_by(project_id=project_id).\ filter_by(resource=resource) if user_id: query = query.filter_by(user_id=user_id) result = query.first() if not result: if user_id: raise exception.ProjectUserQuotaNotFound(project_id=project_id, user_id=user_id) else: raise exception.ProjectQuotaNotFound(project_id=project_id) return result @staticmethod @api_db_api.context_manager.reader def _get_all_from_db(context, project_id): return context.session.query(api_models.ProjectUserQuota).\ filter_by(project_id=project_id).\ all() @staticmethod @api_db_api.context_manager.reader def _get_all_from_db_by_project(context, project_id): # by_project refers to the returned dict that has a 'project_id' key rows = context.session.query(api_models.Quota).\ filter_by(project_id=project_id).\ all() result = {'project_id': project_id} for row in rows: result[row.resource] = row.hard_limit return result @staticmethod @api_db_api.context_manager.reader def _get_all_from_db_by_project_and_user(context, project_id, user_id): # by_project_and_user refers to the returned dict that has # 'project_id' and 'user_id' keys columns = (api_models.ProjectUserQuota.resource, api_models.ProjectUserQuota.hard_limit) user_quotas = context.session.query(*columns).\ filter_by(project_id=project_id).\ filter_by(user_id=user_id).\ all() result = {'project_id': project_id, 'user_id': user_id} for user_quota in user_quotas: result[user_quota.resource] = user_quota.hard_limit return result @staticmethod @api_db_api.context_manager.writer def _destroy_all_in_db_by_project(context, project_id): per_project = context.session.query(api_models.Quota).\ filter_by(project_id=project_id).\ delete(synchronize_session=False) per_user = context.session.query(api_models.ProjectUserQuota).\ filter_by(project_id=project_id).\ delete(synchronize_session=False) if not per_project and not per_user: raise exception.ProjectQuotaNotFound(project_id=project_id) @staticmethod @api_db_api.context_manager.writer def _destroy_all_in_db_by_project_and_user(context, project_id, user_id): result = context.session.query(api_models.ProjectUserQuota).\ filter_by(project_id=project_id).\ filter_by(user_id=user_id).\ delete(synchronize_session=False) if not result: raise exception.ProjectUserQuotaNotFound(project_id=project_id, user_id=user_id) @staticmethod @api_db_api.context_manager.reader def _get_class_from_db(context, class_name, resource): result = context.session.query(api_models.QuotaClass).\ filter_by(class_name=class_name).\ filter_by(resource=resource).\ first() if not result: raise exception.QuotaClassNotFound(class_name=class_name) return result @staticmethod @api_db_api.context_manager.reader def _get_all_class_from_db_by_name(context, class_name): # by_name refers to the returned dict that has a 'class_name' key rows = context.session.query(api_models.QuotaClass).\ filter_by(class_name=class_name).\ all() result = {'class_name': class_name} for row in rows: result[row.resource] = row.hard_limit return result @staticmethod @api_db_api.context_manager.writer def _create_limit_in_db(context, project_id, resource, limit, user_id=None): # TODO(melwitt): We won't have per project resources after nova-network # is removed. # TODO(stephenfin): We need to do something here now...but what? per_user = ( user_id and resource not in main_db_api.quota_get_per_project_resources() ) quota_ref = (api_models.ProjectUserQuota() if per_user else api_models.Quota()) if per_user: quota_ref.user_id = user_id quota_ref.project_id = project_id quota_ref.resource = resource quota_ref.hard_limit = limit try: quota_ref.save(context.session) except db_exc.DBDuplicateEntry: raise exception.QuotaExists(project_id=project_id, resource=resource) return quota_ref @staticmethod @api_db_api.context_manager.writer def _update_limit_in_db(context, project_id, resource, limit, user_id=None): # TODO(melwitt): We won't have per project resources after nova-network # is removed. # TODO(stephenfin): We need to do something here now...but what? per_user = ( user_id and resource not in main_db_api.quota_get_per_project_resources() ) model = api_models.ProjectUserQuota if per_user else api_models.Quota query = context.session.query(model).\ filter_by(project_id=project_id).\ filter_by(resource=resource) if per_user: query = query.filter_by(user_id=user_id) result = query.update({'hard_limit': limit}) if not result: if per_user: raise exception.ProjectUserQuotaNotFound(project_id=project_id, user_id=user_id) else: raise exception.ProjectQuotaNotFound(project_id=project_id) @staticmethod @api_db_api.context_manager.writer def _create_class_in_db(context, class_name, resource, limit): # NOTE(melwitt): There's no unique constraint on the QuotaClass model, # so check for duplicate manually. try: Quotas._get_class_from_db(context, class_name, resource) except exception.QuotaClassNotFound: pass else: raise exception.QuotaClassExists(class_name=class_name, resource=resource) quota_class_ref = api_models.QuotaClass() quota_class_ref.class_name = class_name quota_class_ref.resource = resource quota_class_ref.hard_limit = limit quota_class_ref.save(context.session) return quota_class_ref @staticmethod @api_db_api.context_manager.writer def _update_class_in_db(context, class_name, resource, limit): result = context.session.query(api_models.QuotaClass).\ filter_by(class_name=class_name).\ filter_by(resource=resource).\ update({'hard_limit': limit}) if not result: raise exception.QuotaClassNotFound(class_name=class_name) # TODO(melwitt): Remove this method in version 2.0 of the object. @base.remotable def reserve(self, expire=None, project_id=None, user_id=None, **deltas): # Honor the expected attributes even though we're not reserving # anything anymore. This will protect against things exploding if # someone has an Ocata compute host running by accident, for example. self.reservations = None self.project_id = project_id self.user_id = user_id self.obj_reset_changes() # TODO(melwitt): Remove this method in version 2.0 of the object. @base.remotable def commit(self): pass # TODO(melwitt): Remove this method in version 2.0 of the object. @base.remotable def rollback(self): pass @base.remotable_classmethod def limit_check(cls, context, project_id=None, user_id=None, **values): """Check quota limits.""" return quota.QUOTAS.limit_check( context, project_id=project_id, user_id=user_id, **values) @base.remotable_classmethod def limit_check_project_and_user(cls, context, project_values=None, user_values=None, project_id=None, user_id=None): """Check values against quota limits.""" return quota.QUOTAS.limit_check_project_and_user(context, project_values=project_values, user_values=user_values, project_id=project_id, user_id=user_id) # NOTE(melwitt): This can be removed once no old code can call count(). @base.remotable_classmethod def count(cls, context, resource, *args, **kwargs): """Count a resource.""" count = quota.QUOTAS.count_as_dict(context, resource, *args, **kwargs) key = 'user' if 'user' in count else 'project' return count[key][resource] @base.remotable_classmethod def count_as_dict(cls, context, resource, *args, **kwargs): """Count a resource and return a dict.""" return quota.QUOTAS.count_as_dict( context, resource, *args, **kwargs) @base.remotable_classmethod def check_deltas(cls, context, deltas, *count_args, **count_kwargs): """Check usage delta against quota limits. This does a Quotas.count_as_dict() followed by a Quotas.limit_check_project_and_user() using the provided deltas. :param context: The request context, for access checks :param deltas: A dict of {resource_name: delta, ...} to check against the quota limits :param count_args: Optional positional arguments to pass to count_as_dict() :param count_kwargs: Optional keyword arguments to pass to count_as_dict() :param check_project_id: Optional project_id for scoping the limit check to a different project than in the context :param check_user_id: Optional user_id for scoping the limit check to a different user than in the context :raises: exception.OverQuota if the limit check exceeds the quota limits """ # We can't do f(*args, kw=None, **kwargs) in python 2.x check_project_id = count_kwargs.pop('check_project_id', None) check_user_id = count_kwargs.pop('check_user_id', None) check_kwargs = collections.defaultdict(dict) for resource in deltas: # If we already counted a resource in a batch count, avoid # unnecessary re-counting and avoid creating empty dicts in # the defaultdict. if (resource in check_kwargs.get('project_values', {}) or resource in check_kwargs.get('user_values', {})): continue count = cls.count_as_dict(context, resource, *count_args, **count_kwargs) for res in count.get('project', {}): if res in deltas: total = count['project'][res] + deltas[res] check_kwargs['project_values'][res] = total for res in count.get('user', {}): if res in deltas: total = count['user'][res] + deltas[res] check_kwargs['user_values'][res] = total if check_project_id is not None: check_kwargs['project_id'] = check_project_id if check_user_id is not None: check_kwargs['user_id'] = check_user_id try: cls.limit_check_project_and_user(context, **check_kwargs) except exception.OverQuota as exc: # Report usage in the exception when going over quota key = 'user' if 'user' in count else 'project' exc.kwargs['usages'] = count[key] raise exc @base.remotable_classmethod def create_limit(cls, context, project_id, resource, limit, user_id=None): try: main_db_api.quota_get( context, project_id, resource, user_id=user_id) except exception.QuotaNotFound: cls._create_limit_in_db(context, project_id, resource, limit, user_id=user_id) else: raise exception.QuotaExists(project_id=project_id, resource=resource) @base.remotable_classmethod def update_limit(cls, context, project_id, resource, limit, user_id=None): try: cls._update_limit_in_db(context, project_id, resource, limit, user_id=user_id) except exception.QuotaNotFound: main_db_api.quota_update(context, project_id, resource, limit, user_id=user_id) @classmethod def create_class(cls, context, class_name, resource, limit): try: main_db_api.quota_class_get(context, class_name, resource) except exception.QuotaClassNotFound: cls._create_class_in_db(context, class_name, resource, limit) else: raise exception.QuotaClassExists(class_name=class_name, resource=resource) @classmethod def update_class(cls, context, class_name, resource, limit): try: cls._update_class_in_db(context, class_name, resource, limit) except exception.QuotaClassNotFound: main_db_api.quota_class_update( context, class_name, resource, limit) # NOTE(melwitt): The following methods are not remotable and return # dict-like database model objects. We are using classmethods to provide # a common interface for accessing the api/main databases. @classmethod def get(cls, context, project_id, resource, user_id=None): try: quota = cls._get_from_db(context, project_id, resource, user_id=user_id) except exception.QuotaNotFound: quota = main_db_api.quota_get(context, project_id, resource, user_id=user_id) return quota @classmethod def get_all(cls, context, project_id): api_db_quotas = cls._get_all_from_db(context, project_id) main_db_quotas = main_db_api.quota_get_all(context, project_id) return api_db_quotas + main_db_quotas @classmethod def get_all_by_project(cls, context, project_id): api_db_quotas_dict = cls._get_all_from_db_by_project(context, project_id) main_db_quotas_dict = main_db_api.quota_get_all_by_project( context, project_id) for k, v in api_db_quotas_dict.items(): main_db_quotas_dict[k] = v return main_db_quotas_dict @classmethod def get_all_by_project_and_user(cls, context, project_id, user_id): api_db_quotas_dict = cls._get_all_from_db_by_project_and_user( context, project_id, user_id) main_db_quotas_dict = main_db_api.quota_get_all_by_project_and_user( context, project_id, user_id) for k, v in api_db_quotas_dict.items(): main_db_quotas_dict[k] = v return main_db_quotas_dict @classmethod def destroy_all_by_project(cls, context, project_id): try: cls._destroy_all_in_db_by_project(context, project_id) except exception.ProjectQuotaNotFound: main_db_api.quota_destroy_all_by_project(context, project_id) @classmethod def destroy_all_by_project_and_user(cls, context, project_id, user_id): try: cls._destroy_all_in_db_by_project_and_user(context, project_id, user_id) except exception.ProjectUserQuotaNotFound: main_db_api.quota_destroy_all_by_project_and_user( context, project_id, user_id) @classmethod def get_class(cls, context, class_name, resource): try: qclass = cls._get_class_from_db(context, class_name, resource) except exception.QuotaClassNotFound: qclass = main_db_api.quota_class_get(context, class_name, resource) return qclass @classmethod def get_default_class(cls, context): try: qclass = cls._get_all_class_from_db_by_name( context, main_db_api._DEFAULT_QUOTA_NAME) except exception.QuotaClassNotFound: qclass = main_db_api.quota_class_get_default(context) return qclass @classmethod def get_all_class_by_name(cls, context, class_name): api_db_quotas_dict = cls._get_all_class_from_db_by_name(context, class_name) main_db_quotas_dict = main_db_api.quota_class_get_all_by_name(context, class_name) for k, v in api_db_quotas_dict.items(): main_db_quotas_dict[k] = v return main_db_quotas_dict @base.NovaObjectRegistry.register class QuotasNoOp(Quotas): # TODO(melwitt): Remove this method in version 2.0 of the object. def reserve(context, expire=None, project_id=None, user_id=None, **deltas): pass # TODO(melwitt): Remove this method in version 2.0 of the object. def commit(self, context=None): pass # TODO(melwitt): Remove this method in version 2.0 of the object. def rollback(self, context=None): pass def check_deltas(cls, context, deltas, *count_args, **count_kwargs): pass @db_utils.require_context @main_db_api.pick_context_manager_reader def _get_main_per_project_limits(context, limit): return context.session.query(main_models.Quota).\ filter_by(deleted=0).\ limit(limit).\ all() @db_utils.require_context @main_db_api.pick_context_manager_reader def _get_main_per_user_limits(context, limit): return context.session.query(main_models.ProjectUserQuota).\ filter_by(deleted=0).\ limit(limit).\ all() @db_utils.require_context @main_db_api.pick_context_manager_writer def _destroy_main_per_project_limits(context, project_id, resource): context.session.query(main_models.Quota).\ filter_by(deleted=0).\ filter_by(project_id=project_id).\ filter_by(resource=resource).\ soft_delete(synchronize_session=False) @db_utils.require_context @main_db_api.pick_context_manager_writer def _destroy_main_per_user_limits(context, project_id, resource, user_id): context.session.query(main_models.ProjectUserQuota).\ filter_by(deleted=0).\ filter_by(project_id=project_id).\ filter_by(user_id=user_id).\ filter_by(resource=resource).\ soft_delete(synchronize_session=False) @api_db_api.context_manager.writer def _create_limits_in_api_db(context, db_limits, per_user=False): for db_limit in db_limits: user_id = db_limit.user_id if per_user else None Quotas._create_limit_in_db(context, db_limit.project_id, db_limit.resource, db_limit.hard_limit, user_id=user_id) def migrate_quota_limits_to_api_db(context, max_count): # Migrate per project limits main_per_project_limits = _get_main_per_project_limits(context, max_count) done = 0 try: # Create all the limits in a single transaction. _create_limits_in_api_db(context, main_per_project_limits) except exception.QuotaExists: # NOTE(melwitt): This can happen if the migration is interrupted after # limits were created in the api db but before they were deleted from # the main db, and the migration is re-run. pass # Delete the limits separately. for db_limit in main_per_project_limits: _destroy_main_per_project_limits(context, db_limit.project_id, db_limit.resource) done += 1 if done == max_count: return len(main_per_project_limits), done # Migrate per user limits max_count -= done main_per_user_limits = _get_main_per_user_limits(context, max_count) try: # Create all the limits in a single transaction. _create_limits_in_api_db(context, main_per_user_limits, per_user=True) except exception.QuotaExists: # NOTE(melwitt): This can happen if the migration is interrupted after # limits were created in the api db but before they were deleted from # the main db, and the migration is re-run. pass # Delete the limits separately. for db_limit in main_per_user_limits: _destroy_main_per_user_limits(context, db_limit.project_id, db_limit.resource, db_limit.user_id) done += 1 return len(main_per_project_limits) + len(main_per_user_limits), done @db_utils.require_context @main_db_api.pick_context_manager_reader def _get_main_quota_classes(context, limit): return context.session.query(main_models.QuotaClass).\ filter_by(deleted=0).\ limit(limit).\ all() @main_db_api.pick_context_manager_writer def _destroy_main_quota_classes(context, db_classes): for db_class in db_classes: context.session.query(main_models.QuotaClass).\ filter_by(deleted=0).\ filter_by(id=db_class.id).\ soft_delete(synchronize_session=False) @api_db_api.context_manager.writer def _create_classes_in_api_db(context, db_classes): for db_class in db_classes: Quotas._create_class_in_db(context, db_class.class_name, db_class.resource, db_class.hard_limit) def migrate_quota_classes_to_api_db(context, max_count): main_quota_classes = _get_main_quota_classes(context, max_count) done = 0 try: # Create all the classes in a single transaction. _create_classes_in_api_db(context, main_quota_classes) except exception.QuotaClassExists: # NOTE(melwitt): This can happen if the migration is interrupted after # classes were created in the api db but before they were deleted from # the main db, and the migration is re-run. pass # Delete the classes in a single transaction. _destroy_main_quota_classes(context, main_quota_classes) found = done = len(main_quota_classes) return found, done ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/objects/request_spec.py0000664000175000017500000021343700000000000020244 0ustar00zuulzuul00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import itertools import typing as ty import os_resource_classes as orc from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import versionutils from nova.compute import pci_placement_translator import nova.conf from nova.db.api import api as api_db_api from nova.db.api import models as api_models from nova import exception from nova import objects from nova.objects import base from nova.objects import fields from nova.objects import instance as obj_instance CONF = nova.conf.CONF LOG = logging.getLogger(__name__) REQUEST_SPEC_OPTIONAL_ATTRS = ['requested_destination', 'security_groups', 'network_metadata', 'requested_resources', 'request_level_params', 'requested_networks'] @base.NovaObjectRegistry.register class RequestSpec(base.NovaObject): # Version 1.0: Initial version # Version 1.1: ImageMeta version 1.6 # Version 1.2: SchedulerRetries version 1.1 # Version 1.3: InstanceGroup version 1.10 # Version 1.4: ImageMeta version 1.7 # Version 1.5: Added get_by_instance_uuid(), create(), save() # Version 1.6: Added requested_destination # Version 1.7: Added destroy() # Version 1.8: Added security_groups # Version 1.9: Added user_id # Version 1.10: Added network_metadata # Version 1.11: Added is_bfv # Version 1.12: Added requested_resources # Version 1.13: Added request_level_params # Version 1.14: Added requested_networks # Version 1.15: Added get_by_instance_uuids() VERSION = '1.15' fields = { 'id': fields.IntegerField(), 'image': fields.ObjectField('ImageMeta', nullable=True), 'numa_topology': fields.ObjectField('InstanceNUMATopology', nullable=True), 'pci_requests': fields.ObjectField('InstancePCIRequests', nullable=True), # TODO(mriedem): The project_id shouldn't be nullable since the # scheduler relies on it being set. 'project_id': fields.StringField(nullable=True), 'user_id': fields.StringField(nullable=True), 'availability_zone': fields.StringField(nullable=True), 'flavor': fields.ObjectField('Flavor', nullable=False), 'num_instances': fields.IntegerField(default=1), # NOTE(alex_xu): This field won't be persisted. 'ignore_hosts': fields.ListOfStringsField(nullable=True), # NOTE(mriedem): In reality, you can only ever have one # host in the force_hosts list. The fact this is a list # is a mistake perpetuated over time. 'force_hosts': fields.ListOfStringsField(nullable=True), # NOTE(mriedem): In reality, you can only ever have one # node in the force_nodes list. The fact this is a list # is a mistake perpetuated over time. 'force_nodes': fields.ListOfStringsField(nullable=True), # NOTE(alex_xu): This field won't be persisted. 'requested_destination': fields.ObjectField('Destination', nullable=True, default=None), # NOTE(alex_xu): This field won't be persisted. 'retry': fields.ObjectField('SchedulerRetries', nullable=True), 'limits': fields.ObjectField('SchedulerLimits', nullable=True), 'instance_group': fields.ObjectField('InstanceGroup', nullable=True), # NOTE(sbauza): Since hints are depending on running filters, we prefer # to leave the API correctly validating the hints per the filters and # just provide to the RequestSpec object a free-form dictionary 'scheduler_hints': fields.DictOfListOfStringsField(nullable=True), 'instance_uuid': fields.UUIDField(), # TODO(stephenfin): Remove this as it's related to nova-network 'security_groups': fields.ObjectField('SecurityGroupList'), # NOTE(alex_xu): This field won't be persisted. 'network_metadata': fields.ObjectField('NetworkMetadata'), 'is_bfv': fields.BooleanField(), # NOTE(gibi): Eventually we want to store every resource request as # RequestGroup objects here. However currently the flavor based # resources like vcpu, ram, disk, and flavor.extra_spec based resources # are not handled this way. See the Todo in from_components() where # requested_resources are set. # NOTE(alex_xu): This field won't be persisted. 'requested_resources': fields.ListOfObjectsField('RequestGroup', nullable=True, default=None), # NOTE(efried): This field won't be persisted. 'request_level_params': fields.ObjectField('RequestLevelParams'), # NOTE(sbauza); This field won't be persisted. For move operations, we # reevaluate it using the network-related instance info_cache. 'requested_networks': fields.ObjectField('NetworkRequestList') } def obj_make_compatible(self, primitive, target_version): super(RequestSpec, self).obj_make_compatible(primitive, target_version) target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 14) and 'requested_networks' in primitive: del primitive['requested_networks'] if target_version < (1, 13) and 'request_level_params' in primitive: del primitive['request_level_params'] if target_version < (1, 12): if 'requested_resources' in primitive: del primitive['requested_resources'] if target_version < (1, 11) and 'is_bfv' in primitive: del primitive['is_bfv'] if target_version < (1, 10): if 'network_metadata' in primitive: del primitive['network_metadata'] if target_version < (1, 9): if 'user_id' in primitive: del primitive['user_id'] if target_version < (1, 8): if 'security_groups' in primitive: del primitive['security_groups'] if target_version < (1, 6): if 'requested_destination' in primitive: del primitive['requested_destination'] @base.lazy_load_counter def obj_load_attr(self, attrname): if attrname not in REQUEST_SPEC_OPTIONAL_ATTRS: raise exception.ObjectActionError( action='obj_load_attr', reason='attribute %s not lazy-loadable' % attrname) if attrname == 'security_groups': self.security_groups = objects.SecurityGroupList(objects=[]) return if attrname == 'network_metadata': self.network_metadata = objects.NetworkMetadata( physnets=set(), tunneled=False) return if attrname == 'request_level_params': self.request_level_params = RequestLevelParams() return if attrname == 'requested_networks': self.requested_networks = objects.NetworkRequestList(objects=[]) return # NOTE(sbauza): In case the primitive was not providing that field # because of a previous RequestSpec version, we want to default # that field in order to have the same behaviour. self.obj_set_defaults(attrname) @property def vcpus(self): return self.flavor.vcpus @property def memory_mb(self): return self.flavor.memory_mb @property def root_gb(self): return self.flavor.root_gb @property def ephemeral_gb(self): return self.flavor.ephemeral_gb @property def swap(self): return self.flavor.swap @property def root_required(self): # self.request_level_params and .root_required lazy-default via their # respective obj_load_attr methods. return self.request_level_params.root_required @property def root_forbidden(self): # self.request_level_params and .root_forbidden lazy-default via their # respective obj_load_attr methods. return self.request_level_params.root_forbidden @property def same_subtree(self): # self.request_level_params and .same_subtree lazy-default via their # respective obj_load_attr methods. return self.request_level_params.same_subtree def _image_meta_from_image(self, image): if isinstance(image, objects.ImageMeta): self.image = image elif isinstance(image, dict): # NOTE(sbauza): Until Nova is fully providing an ImageMeta object # for getting properties, we still need to hydrate it here # TODO(sbauza): To be removed once all RequestSpec hydrations are # done on the conductor side and if the image is an ImageMeta self.image = objects.ImageMeta.from_dict(image) else: self.image = None def _from_instance(self, instance): if isinstance(instance, obj_instance.Instance): # NOTE(sbauza): Instance should normally be a NovaObject... getter = getattr elif isinstance(instance, dict): # NOTE(sbauza): ... but there are some cases where request_spec # has an instance key as a dictionary, just because # select_destinations() is getting a request_spec dict made by # sched_utils.build_request_spec() # TODO(sbauza): To be removed once all RequestSpec hydrations are # done on the conductor side getter = lambda x, y: x.get(y) else: # If the instance is None, there is no reason to set the fields return instance_fields = ['numa_topology', 'pci_requests', 'uuid', 'project_id', 'user_id', 'availability_zone'] for field in instance_fields: if field == 'uuid': setattr(self, 'instance_uuid', getter(instance, field)) elif field == 'pci_requests': self._from_instance_pci_requests(getter(instance, field)) elif field == 'numa_topology': self._from_instance_numa_topology(getter(instance, field)) else: setattr(self, field, getter(instance, field)) def _from_instance_pci_requests(self, pci_requests): if isinstance(pci_requests, dict): self.pci_requests = objects.InstancePCIRequests.obj_from_primitive( pci_requests, ) else: self.pci_requests = pci_requests def _from_instance_numa_topology(self, numa_topology): if isinstance(numa_topology, str): numa_topology = objects.InstanceNUMATopology.obj_from_primitive( jsonutils.loads(numa_topology)) self.numa_topology = numa_topology def _from_flavor(self, flavor): if isinstance(flavor, objects.Flavor): self.flavor = flavor elif isinstance(flavor, dict): # NOTE(sbauza): Again, request_spec is primitived by # sched_utils.build_request_spec() and passed to # select_destinations() like this # TODO(sbauza): To be removed once all RequestSpec hydrations are # done on the conductor side self.flavor = objects.Flavor(**flavor) def _from_retry(self, retry_dict): self.retry = (SchedulerRetries.from_dict(self._context, retry_dict) if retry_dict else None) def _populate_group_info(self, filter_properties): if filter_properties.get('instance_group'): # New-style group information as a NovaObject, we can directly set # the field self.instance_group = filter_properties.get('instance_group') elif filter_properties.get('group_updated') is True: # Old-style group information having ugly dict keys containing sets # NOTE(sbauza): Can be dropped once select_destinations is removed policies = list(filter_properties.get('group_policies')) hosts = list(filter_properties.get('group_hosts')) members = list(filter_properties.get('group_members')) self.instance_group = objects.InstanceGroup(policy=policies[0], hosts=hosts, members=members) # InstanceGroup.uuid is not nullable so only set it if we got it. group_uuid = filter_properties.get('group_uuid') if group_uuid: self.instance_group.uuid = group_uuid # hosts has to be not part of the updates for saving the object self.instance_group.obj_reset_changes(['hosts']) else: # Set the value anyway to avoid any call to obj_attr_is_set for it self.instance_group = None def _from_limits(self, limits): if isinstance(limits, dict): self.limits = SchedulerLimits.from_dict(limits) else: # Already a SchedulerLimits object. self.limits = limits def _from_hints(self, hints_dict): if hints_dict is None: self.scheduler_hints = None return self.scheduler_hints = { hint: value if isinstance(value, list) else [value] for hint, value in hints_dict.items()} @classmethod def from_primitives(cls, context, request_spec, filter_properties): """Returns a new RequestSpec object by hydrating it from legacy dicts. Deprecated. A RequestSpec object is created early in the boot process using the from_components method. That object will either be passed to places that require it, or it can be looked up with get_by_instance_uuid. This method can be removed when there are no longer any callers. Because the method is not remotable it is not tied to object versioning. That helper is not intended to leave the legacy dicts kept in the nova codebase, but is rather just for giving a temporary solution for populating the Spec object until we get rid of scheduler_utils' build_request_spec() and the filter_properties hydratation in the conductor. :param context: a context object :param request_spec: An old-style request_spec dictionary :param filter_properties: An old-style filter_properties dictionary """ num_instances = request_spec.get('num_instances', 1) spec = cls(context, num_instances=num_instances) # Hydrate from request_spec first image = request_spec.get('image') spec._image_meta_from_image(image) instance = request_spec.get('instance_properties') spec._from_instance(instance) flavor = request_spec.get('instance_type') spec._from_flavor(flavor) # Hydrate now from filter_properties spec.ignore_hosts = filter_properties.get('ignore_hosts') spec.force_hosts = filter_properties.get('force_hosts') spec.force_nodes = filter_properties.get('force_nodes') retry = filter_properties.get('retry', {}) spec._from_retry(retry) limits = filter_properties.get('limits', {}) spec._from_limits(limits) spec._populate_group_info(filter_properties) scheduler_hints = filter_properties.get('scheduler_hints', {}) spec._from_hints(scheduler_hints) spec.requested_destination = filter_properties.get( 'requested_destination') # NOTE(sbauza): Default the other fields that are not part of the # original contract spec.obj_set_defaults() return spec def get_scheduler_hint(self, hint_name, default=None): """Convenient helper for accessing a particular scheduler hint since it is hydrated by putting a single item into a list. In order to reduce the complexity, that helper returns a string if the requested hint is a list of only one value, and if not, returns the value directly (ie. the list). If the hint is not existing (or scheduler_hints is None), then it returns the default value. :param hint_name: name of the hint :param default: the default value if the hint is not there """ if (not self.obj_attr_is_set('scheduler_hints') or self.scheduler_hints is None): return default hint_val = self.scheduler_hints.get(hint_name, default) return (hint_val[0] if isinstance(hint_val, list) and len(hint_val) == 1 else hint_val) def _to_legacy_image(self): return base.obj_to_primitive(self.image) if ( self.obj_attr_is_set('image') and self.image) else {} def _to_legacy_instance(self): # NOTE(sbauza): Since the RequestSpec only persists a few Instance # fields, we can only return a dict. instance = {} instance_fields = ['numa_topology', 'pci_requests', 'project_id', 'user_id', 'availability_zone', 'instance_uuid'] for field in instance_fields: if not self.obj_attr_is_set(field): continue if field == 'instance_uuid': instance['uuid'] = getattr(self, field) else: instance[field] = getattr(self, field) flavor_fields = ['root_gb', 'ephemeral_gb', 'memory_mb', 'vcpus'] if not self.obj_attr_is_set('flavor'): return instance for field in flavor_fields: instance[field] = getattr(self.flavor, field) return instance def _to_legacy_group_info(self): # NOTE(sbauza): Since this is only needed until the AffinityFilters are # modified by using directly the RequestSpec object, we need to keep # the existing dictionary as a primitive. return {'group_updated': True, 'group_hosts': set(self.instance_group.hosts), 'group_policies': set([self.instance_group.policy]), 'group_members': set(self.instance_group.members), 'group_uuid': self.instance_group.uuid} def to_legacy_request_spec_dict(self): """Returns a legacy request_spec dict from the RequestSpec object. Since we need to manage backwards compatibility and rolling upgrades within our RPC API, we need to accept to provide an helper for primitiving the right RequestSpec object into a legacy dict until we drop support for old Scheduler RPC API versions. If you don't understand why this method is needed, please don't use it. """ req_spec = {} if not self.obj_attr_is_set('num_instances'): req_spec['num_instances'] = self.fields['num_instances'].default else: req_spec['num_instances'] = self.num_instances req_spec['image'] = self._to_legacy_image() req_spec['instance_properties'] = self._to_legacy_instance() if self.obj_attr_is_set('flavor'): req_spec['instance_type'] = self.flavor else: req_spec['instance_type'] = {} return req_spec def to_legacy_filter_properties_dict(self): """Returns a legacy filter_properties dict from the RequestSpec object. Since we need to manage backwards compatibility and rolling upgrades within our RPC API, we need to accept to provide an helper for primitiving the right RequestSpec object into a legacy dict until we drop support for old Scheduler RPC API versions. If you don't understand why this method is needed, please don't use it. """ filt_props = {} if self.obj_attr_is_set('ignore_hosts') and self.ignore_hosts: filt_props['ignore_hosts'] = self.ignore_hosts if self.obj_attr_is_set('force_hosts') and self.force_hosts: filt_props['force_hosts'] = self.force_hosts if self.obj_attr_is_set('force_nodes') and self.force_nodes: filt_props['force_nodes'] = self.force_nodes if self.obj_attr_is_set('retry') and self.retry: filt_props['retry'] = self.retry.to_dict() if self.obj_attr_is_set('limits') and self.limits: filt_props['limits'] = self.limits.to_dict() if self.obj_attr_is_set('instance_group') and self.instance_group: filt_props.update(self._to_legacy_group_info()) if self.obj_attr_is_set('scheduler_hints') and self.scheduler_hints: # NOTE(sbauza): We need to backport all the hints correctly since # we had to hydrate the field by putting a single item into a list. filt_props['scheduler_hints'] = {hint: self.get_scheduler_hint( hint) for hint in self.scheduler_hints} if self.obj_attr_is_set('requested_destination' ) and self.requested_destination: filt_props['requested_destination'] = self.requested_destination return filt_props @staticmethod def _rc_from_request(spec: ty.Dict[str, ty.Any]) -> str: return pci_placement_translator.get_resource_class( spec.get("resource_class"), spec.get("vendor_id"), spec.get("product_id"), ) @staticmethod def _traits_from_request(spec: ty.Dict[str, ty.Any]) -> ty.Set[str]: return pci_placement_translator.get_traits(spec.get("traits", "")) def generate_request_groups_from_pci_requests(self): if not CONF.filter_scheduler.pci_in_placement: return False for pci_request in self.pci_requests.requests: if pci_request.source == objects.InstancePCIRequest.NEUTRON_PORT: # TODO(gibi): Handle neutron based PCI requests here in a later # cycle. continue if len(pci_request.spec) != 1: # We are instantiating InstancePCIRequest objects with spec in # two cases: # 1) when a neutron port is translated to InstancePCIRequest # object in # nova.network.neutron.API.create_resource_requests # 2) when the pci_passthrough:alias flavor extra_spec is # translated to InstancePCIRequest objects in # nova.pci.request._get_alias_from_config which enforces the # json schema defined in nova.pci.request. # # In both cases only a single dict is added to the spec list. # If we ever want to add support for multiple specs per request # then we have to solve the issue that each spec can request a # different resource class from placement. The only place in # nova that currently handles multiple specs per request is # nova.pci.utils.pci_device_prop_match() and it considers them # as alternatives. So specs with different resource classes # would mean alternative resource_class requests. This cannot # be expressed today in the allocation_candidate query towards # placement. raise ValueError( "PCI tracking in placement does not support multiple " "specs per PCI request" ) spec = pci_request.spec[0] # The goal is to translate InstancePCIRequest to RequestGroup. Each # InstancePCIRequest can be fulfilled from the whole RP tree. And # a flavor based InstancePCIRequest might request more than one # device (if count > 1) and those devices still need to be placed # independently to RPs. So we could have two options to translate # an InstancePCIRequest object to RequestGroup objects: # 1) put the all the requested resources from every # InstancePCIRequest to the unsuffixed RequestGroup. # 2) generate a separate RequestGroup for each individual device # request # # While #1) feels simpler it has a big downside. The unsuffixed # group will have a bulk request group resource provider mapping # returned from placement. So there would be no easy way to later # untangle which InstancePCIRequest is fulfilled by which RP, and # therefore which PCI device should be used to allocate a specific # device on the hypervisor during the PCI claim. Note that there # could be multiple PF RPs providing the same type of resources but # still we need to make sure that if a resource is allocated in # placement from a specific RP (representing a physical device) # then the PCI claim should consume resources from the same # physical device. # # So we need at least a separate RequestGroup per # InstancePCIRequest. However, for a InstancePCIRequest(count=2) # that would mean a RequestGroup(RC:2) which would mean both # resource should come from the same RP in placement. This is # impossible for PF or PCI type requests and over restrictive for # VF type requests. Therefore we need to generate one RequestGroup # per requested device. So for InstancePCIRequest(count=2) we need # to generate two separate RequestGroup(RC:1) objects. # NOTE(gibi): If we have count=2 requests then the multiple # RequestGroup split below only works if group_policy is set to # none as group_policy=isolate would prevent allocating two VFs # from the same PF. Fortunately # nova.scheduler.utils.resources_from_request_spec() already # defaults group_policy to none if it is not specified in the # flavor and there are multiple RequestGroups in the RequestSpec. for i in range(pci_request.count): rg = objects.RequestGroup( use_same_provider=True, # we need to generate a unique ID for each group, so we use # a counter requester_id=f"{pci_request.request_id}-{i}", # as we split count >= 2 requests to independent groups # each group will have a resource request of one resources={ self._rc_from_request(spec): 1 }, required_traits=self._traits_from_request(spec), # TODO(gibi): later we can add support for complex trait # queries here including forbidden_traits. ) self.requested_resources.append(rg) @classmethod def from_components( cls, context, instance_uuid, image, flavor, numa_topology, pci_requests, filter_properties, instance_group, availability_zone, security_groups=None, project_id=None, user_id=None, port_resource_requests=None, request_level_params=None ): """Returns a new RequestSpec object hydrated by various components. This helper is useful in creating the RequestSpec from the various objects that are assembled early in the boot process. This method creates a complete RequestSpec object with all properties set or intentionally left blank. :param context: a context object :param instance_uuid: the uuid of the instance to schedule :param image: a dict of properties for an image or volume :param flavor: a flavor NovaObject :param numa_topology: InstanceNUMATopology or None :param pci_requests: InstancePCIRequests :param filter_properties: a dict of properties for scheduling :param instance_group: None or an instance group NovaObject :param availability_zone: an availability_zone string :param security_groups: A SecurityGroupList object. If None, don't set security_groups on the resulting object. :param project_id: The project_id for the requestspec (should match the instance project_id). :param user_id: The user_id for the requestspec (should match the instance user_id). :param port_resource_requests: a list of RequestGroup objects representing the resource needs of the neutron ports :param request_level_params: a RequestLevelParams object """ spec_obj = cls(context) spec_obj.num_instances = 1 spec_obj.instance_uuid = instance_uuid spec_obj.instance_group = instance_group if spec_obj.instance_group is None and filter_properties: spec_obj._populate_group_info(filter_properties) spec_obj.project_id = project_id or context.project_id spec_obj.user_id = user_id or context.user_id spec_obj._image_meta_from_image(image) spec_obj._from_flavor(flavor) spec_obj._from_instance_pci_requests(pci_requests) spec_obj._from_instance_numa_topology(numa_topology) spec_obj.ignore_hosts = filter_properties.get('ignore_hosts') spec_obj.force_hosts = filter_properties.get('force_hosts') spec_obj.force_nodes = filter_properties.get('force_nodes') spec_obj._from_retry(filter_properties.get('retry', {})) spec_obj._from_limits(filter_properties.get('limits', {})) spec_obj._from_hints(filter_properties.get('scheduler_hints', {})) spec_obj.availability_zone = availability_zone if security_groups is not None: spec_obj.security_groups = security_groups spec_obj.requested_destination = filter_properties.get( 'requested_destination') # TODO(gibi): do the creation of the unnumbered group and any # numbered group from the flavor by moving the logic from # nova.scheduler.utils.resources_from_request_spec() here. See also # the comment in the definition of requested_resources field. spec_obj.requested_resources = [] if port_resource_requests: spec_obj.requested_resources.extend(port_resource_requests) spec_obj.generate_request_groups_from_pci_requests() # NOTE(gibi): later the scheduler adds more request level params but # never overrides existing ones so we can initialize them here. if request_level_params is None: request_level_params = objects.RequestLevelParams() spec_obj.request_level_params = request_level_params # NOTE(sbauza): Default the other fields that are not part of the # original contract spec_obj.obj_set_defaults() return spec_obj def ensure_project_and_user_id(self, instance): if 'project_id' not in self or self.project_id is None: self.project_id = instance.project_id if 'user_id' not in self or self.user_id is None: self.user_id = instance.user_id def ensure_network_information(self, instance): if not (instance.info_cache and instance.info_cache.network_info): # NOTE(sbauza): On create, the network_info field is null but we # directly set the RequestSpec nested network_requests field, so we # are fine returning here. return physnets = set([]) tunneled = True network_requests = [] # physical_network and tunneled might not be in the cache for old # instances that haven't had their info_cache healed yet for vif in instance.info_cache.network_info: physnet = vif.get('network', {}).get('meta', {}).get( 'physical_network', None) if physnet: physnets.add(physnet) tunneled |= vif.get('network', {}).get('meta', {}).get( 'tunneled', False) # We also want to recreate the original NetworkRequests # TODO(sbauza): We miss tag and pci_request_id information that is # not stored in the VIF model to fully provide all fields # FIXME(sbauza): We can't also guess whether the user provided us # a specific IP address to use for create, and which one. nr_args = { 'network_id': vif['network']['id'], 'port_id': vif['id'], } network_request = objects.NetworkRequest(**nr_args) network_requests.append(network_request) self.network_metadata = objects.NetworkMetadata( physnets=physnets, tunneled=tunneled) self.requested_networks = objects.NetworkRequestList( objects=network_requests) @staticmethod def _from_db_object(context, spec, db_spec): spec_obj = spec.obj_from_primitive(jsonutils.loads(db_spec['spec'])) data_migrated = False for key in spec.fields: # Load these from the db model not the serialized object within, # though they should match. if key in ['id', 'instance_uuid']: setattr(spec, key, db_spec[key]) elif key in ('requested_destination', 'requested_resources', 'network_metadata', 'request_level_params', 'requested_networks'): # Do not override what we already have in the object as this # field is not persisted. If save() is called after # one of these fields is populated, it will reset the field to # None and we'll lose what is set (but not persisted) on the # object. continue elif key in ('retry', 'ignore_hosts'): # NOTE(takashin): Do not override the 'retry' or 'ignore_hosts' # fields which are not persisted. They are not lazy-loadable # fields. If they are not set, set None. if not spec.obj_attr_is_set(key): setattr(spec, key, None) elif key == "numa_topology": if key in spec_obj: spec.numa_topology = spec_obj.numa_topology if spec.numa_topology: data_migrated = objects.InstanceNUMATopology.\ _migrate_legacy_dedicated_instance_cpuset( spec.numa_topology) elif key in spec_obj: setattr(spec, key, getattr(spec_obj, key)) spec._context = context if 'instance_group' in spec and spec.instance_group: # NOTE(mriedem): We could have a half-baked instance group with no # uuid if some legacy translation was performed on this spec in the # past. In that case, try to workaround the issue by getting the # group uuid from the scheduler hint. if 'uuid' not in spec.instance_group: spec.instance_group.uuid = spec.get_scheduler_hint('group') # NOTE(danms): We don't store the full instance group in # the reqspec since it would be stale almost immediately. # Instead, load it by uuid here so it's up-to-date. try: spec.instance_group = objects.InstanceGroup.get_by_uuid( context, spec.instance_group.uuid) except exception.InstanceGroupNotFound: # NOTE(danms): Instance group may have been deleted spec.instance_group = None spec.scheduler_hints.pop('group', None) if data_migrated: spec.save() spec.obj_reset_changes() return spec @staticmethod @api_db_api.context_manager.reader def _get_by_instance_uuid_from_db(context, instance_uuid): db_spec = context.session.query(api_models.RequestSpec).filter_by( instance_uuid=instance_uuid).first() if not db_spec: raise exception.RequestSpecNotFound( instance_uuid=instance_uuid) return db_spec @base.remotable_classmethod def get_by_instance_uuid(cls, context, instance_uuid): db_spec = cls._get_by_instance_uuid_from_db(context, instance_uuid) return cls._from_db_object(context, cls(), db_spec) @staticmethod @api_db_api.context_manager.reader def _get_by_instance_uuids_from_db(context, instance_uuids): db_specs = context.session.query(api_models.RequestSpec).filter( api_models.RequestSpec.instance_uuid.in_(instance_uuids)).all() return db_specs @base.remotable_classmethod def get_by_instance_uuids(cls, context, instance_uuids): req_specs = [] if not instance_uuids: return req_specs db_specs = cls._get_by_instance_uuids_from_db(context, instance_uuids) for db_spec in db_specs: req_specs.append(cls._from_db_object(context, cls(), db_spec)) return req_specs @staticmethod @api_db_api.context_manager.writer def _create_in_db(context, updates): db_spec = api_models.RequestSpec() db_spec.update(updates) db_spec.save(context.session) return db_spec def _get_update_primitives(self): """Serialize object to match the db model. We store copies of embedded objects rather than references to these objects because we want a snapshot of the request at this point. If the references changed or were deleted we would not be able to reschedule this instance under the same conditions as it was originally scheduled with. """ updates = self.obj_get_changes() db_updates = None # NOTE(alaski): The db schema is the full serialized object in a # 'spec' column. If anything has changed we rewrite the full thing. if updates: # NOTE(danms): Don't persist the could-be-large and could-be-stale # properties of InstanceGroup spec = self.obj_clone() if 'instance_group' in spec and spec.instance_group: spec.instance_group.members = None spec.instance_group.hosts = None # NOTE(mriedem): Don't persist these since they are per-request for excluded in ('retry', 'requested_destination', 'requested_resources', 'ignore_hosts'): if excluded in spec and getattr(spec, excluded): setattr(spec, excluded, None) # NOTE(stephenfin): Don't persist network metadata since we have # no need for it after scheduling if 'network_metadata' in spec and spec.network_metadata: del spec.network_metadata # NOTE(sbauza): Don't persist requested_networks since we have # no need for it after scheduling if 'requested_networks' in spec and spec.requested_networks: del spec.requested_networks # NOTE(gibi): Don't persist requested_networks since we have # no need for it after scheduling if 'request_level_params' in spec and spec.request_level_params: del spec.request_level_params db_updates = {'spec': jsonutils.dumps(spec.obj_to_primitive())} if 'instance_uuid' in updates: db_updates['instance_uuid'] = updates['instance_uuid'] return db_updates @base.remotable def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason='already created') updates = self._get_update_primitives() if not updates: raise exception.ObjectActionError(action='create', reason='no fields are set') db_spec = self._create_in_db(self._context, updates) self._from_db_object(self._context, self, db_spec) @staticmethod @api_db_api.context_manager.writer def _save_in_db(context, instance_uuid, updates): # FIXME(sbauza): Provide a classmethod when oslo.db bug #1520195 is # fixed and released db_spec = RequestSpec._get_by_instance_uuid_from_db(context, instance_uuid) db_spec.update(updates) db_spec.save(context.session) return db_spec @base.remotable def save(self): updates = self._get_update_primitives() if updates: db_spec = self._save_in_db(self._context, self.instance_uuid, updates) self._from_db_object(self._context, self, db_spec) self.obj_reset_changes() @staticmethod @api_db_api.context_manager.writer def _destroy_in_db(context, instance_uuid): result = context.session.query(api_models.RequestSpec).filter_by( instance_uuid=instance_uuid).delete() if not result: raise exception.RequestSpecNotFound(instance_uuid=instance_uuid) @base.remotable def destroy(self): self._destroy_in_db(self._context, self.instance_uuid) @staticmethod @api_db_api.context_manager.writer def _destroy_bulk_in_db(context, instance_uuids): return context.session.query(api_models.RequestSpec).filter( api_models.RequestSpec.instance_uuid.in_(instance_uuids)).\ delete(synchronize_session=False) @classmethod def destroy_bulk(cls, context, instance_uuids): return cls._destroy_bulk_in_db(context, instance_uuids) def reset_forced_destinations(self): """Clears the forced destination fields from the RequestSpec object. This method is for making sure we don't ask the scheduler to give us again the same destination(s) without persisting the modifications. """ self.force_hosts = None self.force_nodes = None # NOTE(sbauza): Make sure we don't persist this, we need to keep the # original request for the forced hosts self.obj_reset_changes(['force_hosts', 'force_nodes']) @property def maps_requested_resources(self): """Returns True if this RequestSpec needs to map requested_resources to resource providers, False otherwise. """ return 'requested_resources' in self and self.requested_resources def _is_valid_group_rp_mapping( self, group_rp_mapping, placement_allocations, provider_traits): """Decides if the mapping is valid from resources and traits perspective. :param group_rp_mapping: A list of RequestGroup - RP UUID two tuples representing a mapping between request groups in this RequestSpec and RPs from the allocation. It contains every RequestGroup in this RequestSpec but the mapping might not be valid from resources and traits perspective. :param placement_allocations: The overall allocation made by the scheduler for this RequestSpec :param provider_traits: A dict keyed by resource provider uuids containing the list of traits the given RP has. This dict contains info only about RPs appearing in the placement_allocations param. :return: True if each group's resource and trait request can be fulfilled from the RP it is mapped to. False otherwise. """ # Check that traits are matching for each group - rp pair in # this mapping for group, rp_uuid in group_rp_mapping: if not group.required_traits.issubset(provider_traits[rp_uuid]): return False # TODO(gibi): add support for groups with forbidden_traits and # aggregates # Check that each group can consume the requested resources from the rp # that it is mapped to in the current mapping. Consume each group's # request from the allocation, if anything drops below zero, then this # is not a solution rcs = set() allocs = copy.deepcopy(placement_allocations) for group, rp_uuid in group_rp_mapping: rp_allocs = allocs[rp_uuid]['resources'] for rc, amount in group.resources.items(): rcs.add(rc) if rc in rp_allocs: rp_allocs[rc] -= amount if rp_allocs[rc] < 0: return False else: return False # Check that all the allocations are consumed from the resource # classes that appear in the request groups. It should never happen # that we have a match but also have some leftover if placement returns # valid allocation candidates. Except if the leftover in the allocation # are due to the RC requested in the unnumbered group. for rp_uuid in allocs: rp_allocs = allocs[rp_uuid]['resources'] for rc, amount in group.resources.items(): if rc in rcs and rc in rp_allocs: if rp_allocs[rc] != 0: LOG.debug( 'Found valid group - RP mapping %s but there are ' 'allocations leftover in %s from resource class ' '%s', group_rp_mapping, allocs, rc) return False # If both the traits and the allocations are OK then mapping is valid return True def map_requested_resources_to_providers( self, placement_allocations, provider_traits): """Fill the provider_uuids field in each RequestGroup objects in the requested_resources field. The mapping is generated based on the overall allocation made for this RequestSpec, the request in each RequestGroup, and the traits of the RPs in the allocation. Limitations: * only groups with use_same_provider = True is mapped, the un-numbered group are not supported. * mapping is generated only based on the resource request and the required traits, aggregate membership and forbidden traits are not supported. * requesting the same resource class in numbered and un-numbered group is not supported We can live with these limitations today as Neutron does not use forbidden traits and aggregates in the request and each Neutron port is mapped to a numbered group and the resources class used by neutron ports are never requested through the flavor extra_spec. This is a workaround as placement does not return which RP fulfills which granular request group in the allocation candidate request. There is a spec proposing a solution in placement: https://review.opendev.org/#/c/597601/ :param placement_allocations: The overall allocation made by the scheduler for this RequestSpec :param provider_traits: A dict keyed by resource provider uuids containing the list of traits the given RP has. This dict contains info only about RPs appearing in the placement_allocations param. """ if not self.maps_requested_resources: # Nothing to do, so let's return early return for group in self.requested_resources: # See the limitations in the func doc above if (not group.use_same_provider or group.aggregates or group.forbidden_traits): raise NotImplementedError() # Iterate through every possible group - RP mappings and try to find a # valid one. If there are more than one possible solution then it is # enough to find one as these solutions are interchangeable from # backend (e.g. Neutron) perspective. LOG.debug('Trying to find a valid group - RP mapping for groups %s to ' 'allocations %s with traits %s', self.requested_resources, placement_allocations, provider_traits) # This generator first creates permutations with repetition of the RPs # with length of the number of groups we have. So if there is # 2 RPs (rp1, rp2) and # 3 groups (g1, g2, g3). # Then the itertools.product(('rp1', 'rp2'), repeat=3)) will be: # (rp1, rp1, rp1) # (rp1, rp1, rp2) # (rp1, rp2, rp1) # ... # (rp2, rp2, rp2) # Then we zip each of this permutations to our group list resulting in # a list of list of group - rp pairs: # [[('g1', 'rp1'), ('g2', 'rp1'), ('g3', 'rp1')], # [('g1', 'rp1'), ('g2', 'rp1'), ('g3', 'rp2')], # [('g1', 'rp1'), ('g2', 'rp2'), ('g3', 'rp1')], # ... # [('g1', 'rp2'), ('g2', 'rp2'), ('g3', 'rp2')]] # NOTE(gibi): the list() around the zip() below is needed as the # algorithm looks into the mapping more than once and zip returns an # iterator in py3.x. Still we need to generate a mapping once hence the # generator expression. every_possible_mapping = (list(zip(self.requested_resources, rps)) for rps in itertools.product( placement_allocations.keys(), repeat=len(self.requested_resources))) for mapping in every_possible_mapping: if self._is_valid_group_rp_mapping( mapping, placement_allocations, provider_traits): for group, rp in mapping: # NOTE(gibi): un-numbered group might be mapped to more # than one RP but we do not support that yet here. group.provider_uuids = [rp] LOG.debug('Found valid group - RP mapping %s', mapping) return # if we reached this point then none of the possible mappings was # valid. This should never happen as Placement returns allocation # candidates based on the overall resource request of the server # including the request of the groups. raise ValueError('No valid group - RP mapping is found for ' 'groups %s, allocation %s and provider traits %s' % (self.requested_resources, placement_allocations, provider_traits)) def get_request_group_mapping(self): """Return request group resource - provider mapping. This is currently used for Neutron ports that have resource request due to the port having QoS minimum bandwidth policy rule attached. :returns: A dict keyed by RequestGroup requester_id, currently Neutron port_id, to a list of resource provider UUIDs which provide resource for that RequestGroup. """ if ('requested_resources' in self and self.requested_resources is not None): return { group.requester_id: group.provider_uuids for group in self.requested_resources } @base.NovaObjectRegistry.register class Destination(base.NovaObject): # Version 1.0: Initial version # Version 1.1: Add cell field # Version 1.2: Add aggregates field # Version 1.3: Add allow_cross_cell_move field. # Version 1.4: Add forbidden_aggregates field VERSION = '1.4' fields = { 'host': fields.StringField(), # NOTE(sbauza): Given we want to split the host/node relationship later # and also remove the possibility to have multiple nodes per service, # let's provide a possible nullable node here. 'node': fields.StringField(nullable=True), 'cell': fields.ObjectField('CellMapping', nullable=True), # NOTE(dansmith): These are required aggregates (or sets) and # are passed to placement. See require_aggregates() below. 'aggregates': fields.ListOfStringsField(nullable=True, default=None), # NOTE(mriedem): allow_cross_cell_move defaults to False so that the # scheduler by default selects hosts from the cell specified in the # cell field. 'allow_cross_cell_move': fields.BooleanField(default=False), # NOTE(vrushali): These are forbidden aggregates passed to placement as # query params to the allocation candidates API. Nova uses this field # to implement the isolate_aggregates request filter. 'forbidden_aggregates': fields.SetOfStringsField(nullable=True, default=None), } def obj_make_compatible(self, primitive, target_version): super(Destination, self).obj_make_compatible(primitive, target_version) target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 4): if 'forbidden_aggregates' in primitive: del primitive['forbidden_aggregates'] if target_version < (1, 3) and 'allow_cross_cell_move' in primitive: del primitive['allow_cross_cell_move'] if target_version < (1, 2): if 'aggregates' in primitive: del primitive['aggregates'] if target_version < (1, 1): if 'cell' in primitive: del primitive['cell'] def obj_load_attr(self, attrname): self.obj_set_defaults(attrname) def require_aggregates(self, aggregates): """Add a set of aggregates to the list of required aggregates. This will take a list of aggregates, which are to be logically OR'd together and add them to the list of required aggregates that will be used to query placement. Aggregate sets provided in sequential calls to this method will be AND'd together. For example, the following set of calls: dest.require_aggregates(['foo', 'bar']) dest.require_aggregates(['baz']) will generate the following logical query to placement: "Candidates should be in 'foo' OR 'bar', but definitely in 'baz'" :param aggregates: A list of aggregates, at least one of which must contain the destination host. """ if self.aggregates is None: self.aggregates = [] self.aggregates.append(','.join(aggregates)) def append_forbidden_aggregates(self, forbidden_aggregates): """Add a set of aggregates to the forbidden aggregates. This will take a set of forbidden aggregates that should be ignored by the placement service. :param forbidden_aggregates: A set of aggregates which should be ignored by the placement service. """ if self.forbidden_aggregates is None: self.forbidden_aggregates = set([]) self.forbidden_aggregates |= forbidden_aggregates @base.NovaObjectRegistry.register class SchedulerRetries(base.NovaObject): # Version 1.0: Initial version # Version 1.1: ComputeNodeList version 1.14 VERSION = '1.1' fields = { 'num_attempts': fields.IntegerField(), # NOTE(sbauza): Even if we are only using host/node strings, we need to # know which compute nodes were tried 'hosts': fields.ObjectField('ComputeNodeList'), } @classmethod def from_dict(cls, context, retry_dict): # NOTE(sbauza): We are not persisting the user context since it's only # needed for hydrating the Retry object retry_obj = cls() if not ('num_attempts' and 'hosts') in retry_dict: # NOTE(sbauza): We prefer to return an empty object if the # primitive is not good enough return retry_obj retry_obj.num_attempts = retry_dict.get('num_attempts') # NOTE(sbauza): each retry_dict['hosts'] item is a list of [host, node] computes = [objects.ComputeNode(context=context, host=host, hypervisor_hostname=node) for host, node in retry_dict.get('hosts')] retry_obj.hosts = objects.ComputeNodeList(objects=computes) return retry_obj def to_dict(self): legacy_hosts = [[cn.host, cn.hypervisor_hostname] for cn in self.hosts] return {'num_attempts': self.num_attempts, 'hosts': legacy_hosts} @base.NovaObjectRegistry.register class SchedulerLimits(base.NovaObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'numa_topology': fields.ObjectField('NUMATopologyLimits', nullable=True, default=None), 'vcpu': fields.IntegerField(nullable=True, default=None), 'disk_gb': fields.IntegerField(nullable=True, default=None), 'memory_mb': fields.IntegerField(nullable=True, default=None), } @classmethod def from_dict(cls, limits_dict): limits = cls(**limits_dict) # NOTE(sbauza): Since the limits can be set for each field or not, we # prefer to have the fields nullable, but default the value to None. # Here we accept that the object is always generated from a primitive # hence the use of obj_set_defaults exceptionally. limits.obj_set_defaults() return limits def to_dict(self): limits = {} for field in self.fields: if getattr(self, field) is not None: limits[field] = getattr(self, field) return limits @base.NovaObjectRegistry.register class RequestGroup(base.NovaEphemeralObject): """Versioned object based on the unversioned nova.api.openstack.placement.lib.RequestGroup object. """ # Version 1.0: Initial version # Version 1.1: add requester_id and provider_uuids fields # Version 1.2: add in_tree field # Version 1.3: Add forbidden_aggregates field VERSION = '1.3' fields = { 'use_same_provider': fields.BooleanField(default=True), 'resources': fields.DictOfIntegersField(default={}), 'required_traits': fields.SetOfStringsField(default=set()), 'forbidden_traits': fields.SetOfStringsField(default=set()), # The aggregates field has a form of # [[aggregate_UUID1], # [aggregate_UUID2, aggregate_UUID3]] # meaning that the request should be fulfilled from an RP that is a # member of the aggregate aggregate_UUID1 and member of the aggregate # aggregate_UUID2 or aggregate_UUID3 . 'aggregates': fields.ListOfListsOfStringsField(default=[]), # The forbidden_aggregates field has a form of # set(['aggregate_UUID1', 'aggregate_UUID12', 'aggregate_UUID3']) # meaning that the request should not be fulfilled from an RP # belonging to any of the aggregates in forbidden_aggregates field. 'forbidden_aggregates': fields.SetOfStringsField(default=set()), # The entity the request is coming from (e.g. the Neutron port uuid) # which may not always be a UUID. 'requester_id': fields.StringField(nullable=True, default=None), # The resource provider UUIDs that together fulfill the request # NOTE(gibi): this can be more than one if this is the unnumbered # request group (i.e. use_same_provider=False) 'provider_uuids': fields.ListOfUUIDField(default=[]), 'in_tree': fields.UUIDField(nullable=True, default=None), } @classmethod def from_port_request(cls, context, port_uuid, port_resource_request): """Init the group from the resource request of a neutron port :param context: the request context :param port_uuid: the port requesting the resources :param port_resource_request: the resource_request attribute of the neutron port For example: port_resource_request = { "resources": { "NET_BW_IGR_KILOBIT_PER_SEC": 1000, "NET_BW_EGR_KILOBIT_PER_SEC": 1000}, "required": ["CUSTOM_PHYSNET_2", "CUSTOM_VNIC_TYPE_NORMAL"] } """ # NOTE(gibi): Assumptions: # * a port requests resource from a single provider. # * a port only specifies resources and required traits # NOTE(gibi): Placement rejects allocation candidates where a request # group has traits but no resources specified. This is why resources # are handled as mandatory below but not traits. obj = cls(context=context, use_same_provider=True, resources=port_resource_request['resources'], required_traits=set(port_resource_request.get( 'required', [])), requester_id=port_uuid) obj.obj_set_defaults() return obj @classmethod def from_extended_port_request(cls, context, port_resource_request): """Create the group objects from the resource request of a neutron port :param context: the request context :param port_resource_request: the resource_request attribute of the neutron port For example: port_resource_request = { "request_groups": [ { "id": "required": [CUSTOM_PHYSNET_2, CUSTOM_VNIC_TYPE_NORMAL], "resources":{ NET_PACKET_RATE_KILOPACKET_PER_SEC: 1000 } }, { "id": "required": [CUSTOM_PHYSNET_2, CUSTOM_VNIC_TYPE_NORMAL], "resources": { "NET_BW_IGR_KILOBIT_PER_SEC": 1000, "NET_BW_EGR_KILOBIT_PER_SEC": 1000, }, } ] } """ group_objs = [] for group in port_resource_request.get("request_groups", []): # NOTE(gibi): Placement rejects allocation candidates where a # request group has traits but no resources specified. This is why # resources are handled as mandatory below but not traits. obj = cls( context=context, use_same_provider=True, resources=group['resources'], required_traits=set(group.get('required', [])), requester_id=group['id']) obj.obj_set_defaults() group_objs.append(obj) return group_objs def obj_make_compatible(self, primitive, target_version): super(RequestGroup, self).obj_make_compatible( primitive, target_version) target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 3): if 'forbidden_aggregates' in primitive: del primitive['forbidden_aggregates'] if target_version < (1, 2): if 'in_tree' in primitive: del primitive['in_tree'] if target_version < (1, 1): if 'requester_id' in primitive: del primitive['requester_id'] if 'provider_uuids' in primitive: del primitive['provider_uuids'] def add_resource(self, rclass, amount): # Validate the class. if not (rclass.startswith(orc.CUSTOM_NAMESPACE) or rclass in orc.STANDARDS): LOG.warning( "Received an invalid ResourceClass '%(key)s' in extra_specs.", {"key": rclass}) return # val represents the amount. Convert to int, or warn and skip. try: amount = int(amount) if amount < 0: raise ValueError() except ValueError: LOG.warning( "Resource amounts must be nonnegative integers. Received '%s'", amount) return self.resources[rclass] = amount def add_trait(self, trait_name, trait_type): # Currently the only valid values for a trait entry are 'required' # and 'forbidden' trait_vals = ('required', 'forbidden') if trait_type == 'required': self.required_traits.add(trait_name) elif trait_type == 'forbidden': self.forbidden_traits.add(trait_name) else: LOG.warning( "Only (%(tvals)s) traits are supported. Received '%(val)s'.", {"tvals": ', '.join(trait_vals), "val": trait_type}) def is_empty(self): return not any(( self.resources, self.required_traits, self.forbidden_traits, self.aggregates, self.forbidden_aggregates)) def strip_zeros(self): """Remove any resources whose amount is zero.""" for rclass in list(self.resources): if self.resources[rclass] == 0: self.resources.pop(rclass) def to_queryparams(self): """Convert the RequestGroup to placement allocation candidates query parameters. """ # NOTE(efried): The sorting herein is not necessary for the API; it is # to make testing easier and logging/debugging predictable. res = self.resources required_traits = self.required_traits forbidden_traits = self.forbidden_traits aggregates = self.aggregates in_tree = self.in_tree forbidden_aggregates = self.forbidden_aggregates suffix = self.requester_id or '' resource_query = ",".join( sorted("%s:%s" % (rc, amount) for (rc, amount) in res.items())) qs_params = [('resources%s' % suffix, resource_query)] # Assemble required and forbidden traits, allowing for either/both # to be empty. required_val = ','.join( sorted(required_traits) + ['!%s' % ft for ft in sorted(forbidden_traits)]) if required_val: qs_params.append(('required%s' % suffix, required_val)) if aggregates: aggs = [] # member_of$S is a list of lists. We need a tuple of # ('member_of$S', 'in:uuid,uuid,...') for each inner list. for agglist in aggregates: aggs.append(('member_of%s' % suffix, 'in:' + ','.join(sorted(agglist)))) qs_params.extend(sorted(aggs)) if in_tree: qs_params.append(('in_tree%s' % suffix, in_tree)) if forbidden_aggregates: # member_of$S is a list of aggregate uuids. We need a # tuple of ('member_of$S, '!in:uuid,uuid,...'). forbidden_aggs = '!in:' + ','.join( sorted(forbidden_aggregates)) qs_params.append(('member_of%s' % suffix, forbidden_aggs)) return qs_params @base.NovaObjectRegistry.register class RequestLevelParams(base.NovaObject): """Options destined for the "top level" of the placement allocation candidates query (parallel to, but not including, the list of RequestGroup). """ # Version 1.0: Initial version # Version 1.1: Add same_subtree field VERSION = '1.1' fields = { # Traits required on the root provider 'root_required': fields.SetOfStringsField(default=set()), # Traits forbidden on the root provider 'root_forbidden': fields.SetOfStringsField(default=set()), # Lists of request group suffixes that needs to be allocated from the # same provider subtree 'same_subtree': fields.ListOfListsOfStringsField(default=list()), # NOTE(efried): group_policy would be appropriate to include here, once # we have a use case for it. } def obj_load_attr(self, attrname): self.obj_set_defaults(attrname) def obj_make_compatible(self, primitive, target_version): super().obj_make_compatible(primitive, target_version) target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 1): if 'same_subtree' in primitive: del primitive['same_subtree'] @classmethod def from_port_request(cls, port_resource_request): """Extracts request level global parameters from the resource_request of a neutron port. Neutron only uses same_subtree at the moment. """ same_subtree = port_resource_request.get('same_subtree') if same_subtree: # NOTE(gibi): A single port only has a single list of groups # requesting same subtree, but the RequestLevelParams maintains a # list of such subtree requests return cls(same_subtree=[same_subtree]) return cls() def extend_with(self, other_req_lvl_params): """Extends the existing object with parameter values from another RequestLevelParams object. That new set of requirements are connected with the existing ones with a logical AND operation. """ self.root_required = self.root_required.union( other_req_lvl_params.root_required) self.root_forbidden = self.root_forbidden.union( other_req_lvl_params.root_forbidden) self.same_subtree.extend(other_req_lvl_params.same_subtree) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/objects/resource.py0000664000175000017500000000711600000000000017364 0ustar00zuulzuul00000000000000# Copyright 2019 Intel Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils from nova.db.main import api as db from nova.objects import base from nova.objects import fields @base.NovaObjectRegistry.register class ResourceMetadata(base.NovaObject): # Version 1.0: Initial version VERSION = "1.0" # This is parent object of specific resources. # And it's used to be a object field of Resource, # that is to say Resource.metadata. def __eq__(self, other): return base.all_things_equal(self, other) def __ne__(self, other): return not (self == other) @base.NovaObjectRegistry.register class Resource(base.NovaObject): # Version 1.0: Initial version VERSION = "1.0" fields = { # UUID of resource provider 'provider_uuid': fields.UUIDField(), # resource class of the Resource 'resource_class': fields.ResourceClassField(), # identifier is used to identify resource, it is up to virt drivers # for mdev, it will be a UUID, for vpmem, it's backend namespace name 'identifier': fields.StringField(), # metadata is used to contain virt driver specific resource info 'metadata': fields.ObjectField('ResourceMetadata', subclasses=True), } def __eq__(self, other): return base.all_things_equal(self, other) def __ne__(self, other): return not (self == other) def __hash__(self): metadata = self.metadata if 'metadata' in self else None return hash((self.provider_uuid, self.resource_class, self.identifier, metadata)) @base.NovaObjectRegistry.register class ResourceList(base.ObjectListBase, base.NovaObject): # Version 1.0: Initial version VERSION = "1.0" fields = { 'objects': fields.ListOfObjectsField('Resource'), } @base.remotable_classmethod def get_by_instance_uuid(cls, context, instance_uuid): db_extra = db.instance_extra_get_by_instance_uuid( context, instance_uuid, columns=['resources']) if not db_extra or db_extra['resources'] is None: return None primitive = jsonutils.loads(db_extra['resources']) resources = cls.obj_from_primitive(primitive) return resources @base.NovaObjectRegistry.register class LibvirtVPMEMDevice(ResourceMetadata): # Version 1.0: Initial version VERSION = "1.0" fields = { # This is configured in file, used to generate resource class name # CUSTOM_PMEM_NAMESPACE_$LABEL 'label': fields.StringField(), # Backend pmem namespace's name 'name': fields.StringField(), # Backend pmem namespace's size 'size': fields.IntegerField(), # Backend device path 'devpath': fields.StringField(), # Backend pmem namespace's alignment 'align': fields.IntegerField(), } def __hash__(self): # Be sure all fields are set before using hash method return hash((self.label, self.name, self.size, self.devpath, self.align)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/objects/security_group.py0000664000175000017500000001570300000000000020621 0ustar00zuulzuul00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # TODO(stephenfin): This is all nova-network related and can be deleted as soon # as we remove the 'security_group' field from the 'Instance' object from oslo_utils import uuidutils from oslo_utils import versionutils from nova.db.main import api as db from nova.db.main import models from nova import objects from nova.objects import base from nova.objects import fields @base.NovaObjectRegistry.register class SecurityGroup(base.NovaPersistentObject, base.NovaObject): # Version 1.0: Initial version # Version 1.1: String attributes updated to support unicode # Version 1.2: Added uuid field for Neutron security groups. VERSION = '1.2' fields = { 'id': fields.IntegerField(), 'name': fields.StringField(), 'description': fields.StringField(), 'user_id': fields.StringField(), 'project_id': fields.StringField(), # The uuid field is only used for Neutron security groups and is not # persisted to the Nova database. 'uuid': fields.UUIDField() } def obj_make_compatible(self, primitive, target_version): target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 2) and 'uuid' in primitive: del primitive['uuid'] @staticmethod def _from_db_object(context, secgroup, db_secgroup): for field in secgroup.fields: if field != 'uuid': setattr(secgroup, field, db_secgroup[field]) secgroup._context = context secgroup.obj_reset_changes() return secgroup @base.remotable_classmethod def get(cls, context, secgroup_id): db_secgroup = db.security_group_get(context, secgroup_id) return cls._from_db_object(context, cls(), db_secgroup) @base.remotable_classmethod def get_by_name(cls, context, project_id, group_name): db_secgroup = db.security_group_get_by_name(context, project_id, group_name) return cls._from_db_object(context, cls(), db_secgroup) @base.remotable def in_use(self): return db.security_group_in_use(self._context, self.id) @base.remotable def save(self): updates = self.obj_get_changes() # We don't store uuid in the Nova database so remove it if someone # mistakenly tried to save a neutron security group object. We only # need the uuid in the object for obj_to_primitive() calls where this # object is serialized and stored in the RequestSpec object. updates.pop('uuid', None) if updates: db_secgroup = db.security_group_update(self._context, self.id, updates) self._from_db_object(self._context, self, db_secgroup) self.obj_reset_changes() @base.remotable def refresh(self): self._from_db_object(self._context, self, db.security_group_get(self._context, self.id)) @property def identifier(self): return self.uuid if 'uuid' in self else self.name @base.NovaObjectRegistry.register class SecurityGroupList(base.ObjectListBase, base.NovaObject): # Version 1.0: Initial version # SecurityGroup <= version 1.1 # Version 1.1: Added get_counts() for quotas VERSION = '1.1' fields = { 'objects': fields.ListOfObjectsField('SecurityGroup'), } def __init__(self, *args, **kwargs): super(SecurityGroupList, self).__init__(*args, **kwargs) self.objects = [] self.obj_reset_changes() @staticmethod @db.pick_context_manager_reader def _get_counts_from_db(context, project_id, user_id=None): query = context.session.query(models.SecurityGroup.id).\ filter_by(deleted=0).\ filter_by(project_id=project_id) counts = {} counts['project'] = {'security_groups': query.count()} if user_id: query = query.filter_by(user_id=user_id) counts['user'] = {'security_groups': query.count()} return counts @base.remotable_classmethod def get_all(cls, context): groups = db.security_group_get_all(context) return base.obj_make_list(context, cls(context), objects.SecurityGroup, groups) @base.remotable_classmethod def get_by_project(cls, context, project_id): groups = db.security_group_get_by_project(context, project_id) return base.obj_make_list(context, cls(context), objects.SecurityGroup, groups) @base.remotable_classmethod def get_by_instance(cls, context, instance): groups = db.security_group_get_by_instance(context, instance.uuid) return base.obj_make_list(context, cls(context), objects.SecurityGroup, groups) @base.remotable_classmethod def get_counts(cls, context, project_id, user_id=None): """Get the counts of SecurityGroup objects in the database. :param context: The request context for database access :param project_id: The project_id to count across :param user_id: The user_id to count across :returns: A dict containing the project-scoped counts and user-scoped counts if user_id is specified. For example: {'project': {'security_groups': }, 'user': {'security_groups': }} """ return cls._get_counts_from_db(context, project_id, user_id=user_id) def make_secgroup_list(security_groups): """A helper to make security group objects from a list of names or uuids. Note that this does not make them save-able or have the rest of the attributes they would normally have, but provides a quick way to fill, for example, an instance object during create. """ secgroups = objects.SecurityGroupList() secgroups.objects = [] for sg in security_groups: secgroup = objects.SecurityGroup() if uuidutils.is_uuid_like(sg): # This is a neutron security group uuid so store in the uuid field. secgroup.uuid = sg else: # This is neutron's special 'default' security group secgroup.name = sg secgroups.objects.append(secgroup) return secgroups ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/objects/selection.py0000664000175000017500000001162500000000000017522 0ustar00zuulzuul00000000000000# Copyright (c) 2017 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils from oslo_utils import versionutils from oslo_versionedobjects import base as ovo_base from oslo_versionedobjects import fields from nova import conf from nova import objects from nova.objects import base from nova.scheduler.filters import utils as filter_utils CONF = conf.CONF @base.NovaObjectRegistry.register class Selection(base.NovaObject, ovo_base.ComparableVersionedObject): """Represents a destination that has been selected by the Scheduler. Note that these objects are not persisted to the database. """ # Version 1.0: Initial version # Version 1.1: Added availability_zone field. VERSION = "1.1" fields = { "compute_node_uuid": fields.UUIDField(), "service_host": fields.StringField(), "nodename": fields.StringField(), "cell_uuid": fields.UUIDField(), "limits": fields.ObjectField("SchedulerLimits", nullable=True), # An allocation_request is a non-trivial dict, and so it will be stored # as an encoded string. "allocation_request": fields.StringField(nullable=True), "allocation_request_version": fields.StringField(nullable=True), # The availability_zone represents the AZ the service_host is in at # the time of scheduling. This is nullable for two reasons: # 1. The Instance.availability_zone field is nullable - though that's # not a great reason, the bigger reason is: # 2. The host may not be in an AZ, and CONF.default_availability_zone # is a StrOpt which technically could be set to None, so we have to # account for it being a None value (rather than just not setting # the field). 'availability_zone': fields.StringField(nullable=True), } def obj_make_compatible(self, primitive, target_version): super(Selection, self).obj_make_compatible(primitive, target_version) target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 1): primitive.pop('availability_zone', None) @classmethod def from_host_state(cls, host_state, allocation_request=None, allocation_request_version=None): """A convenience method for converting a HostState, an allocation_request, and an allocation_request_version into a Selection object. Note that allocation_request and allocation_request_version must be passed separately, as they are not part of the HostState. """ allocation_request_json = jsonutils.dumps(allocation_request) limits = objects.SchedulerLimits.from_dict(host_state.limits) metadata = filter_utils.aggregate_metadata_get_by_host( host_state, key='availability_zone') availability_zone = metadata.get('availability_zone') if availability_zone: # aggregate_metadata_get_by_host returns a set for the value but # a host can only be in one AZ. availability_zone = list(availability_zone)[0] else: availability_zone = CONF.default_availability_zone return cls(compute_node_uuid=host_state.uuid, service_host=host_state.host, nodename=host_state.nodename, cell_uuid=host_state.cell_uuid, limits=limits, allocation_request=allocation_request_json, allocation_request_version=allocation_request_version, availability_zone=availability_zone) def to_dict(self): if self.limits is not None: limits = self.limits.to_dict() else: limits = {} # The NUMATopologyFilter can set 'numa_topology' in the limits dict to # a NUMATopologyLimits object which we need to convert to a primitive # before this hits jsonutils.to_primitive(). We only check for that # known case specifically as we don't care about handling out of tree # filters or drivers injecting non-serializable things in the limits # dict. numa_limit = limits.get("numa_topology") if numa_limit is not None: limits['numa_topology'] = numa_limit.obj_to_primitive() return { 'host': self.service_host, 'nodename': self.nodename, 'limits': limits, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/objects/service.py0000664000175000017500000007734000000000000017203 0ustar00zuulzuul00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_utils import uuidutils from oslo_utils import versionutils from nova import availability_zones from nova import context as nova_context from nova.db.main import api as db from nova import exception from nova.notifications.objects import base as notification from nova.notifications.objects import service as service_notification from nova import objects from nova.objects import base from nova.objects import fields LOG = logging.getLogger(__name__) # NOTE(danms): This is the global service version counter SERVICE_VERSION = 70 # NOTE(danms): This is our SERVICE_VERSION history. The idea is that any # time we bump the version, we will put an entry here to record the change, # along with any pertinent data. For things that we can programmatically # detect that need a bump, we put something in _collect_things() below to # assemble a dict of things we can check. For example, we pretty much always # want to consider the compute RPC API version a thing that requires a service # bump so that we can drive version pins from it. We could include other # service RPC versions at some point, minimum object versions, etc. # # The TestServiceVersion test will fail if the calculated set of # things differs from the value in the last item of the list below, # indicating that a version bump is needed. # # Also note that there are other reasons we may want to bump this, # which will not be caught by the test. An example of this would be # triggering (or disabling) an online data migration once all services # in the cluster are at the same level. # # If a version bump is required for something mechanical, just document # that generic thing here (like compute RPC version bumps). No need to # replicate the details from compute/rpcapi.py here. However, for more # complex service interactions, extra detail should be provided SERVICE_VERSION_HISTORY = ( # Version 0: Pre-history {'compute_rpc': '4.0'}, # Version 1: Introduction of SERVICE_VERSION {'compute_rpc': '4.4'}, # Version 2: Compute RPC version 4.5 {'compute_rpc': '4.5'}, # Version 3: Compute RPC version 4.6 {'compute_rpc': '4.6'}, # Version 4: Add PciDevice.parent_addr (data migration needed) {'compute_rpc': '4.6'}, # Version 5: Compute RPC version 4.7 {'compute_rpc': '4.7'}, # Version 6: Compute RPC version 4.8 {'compute_rpc': '4.8'}, # Version 7: Compute RPC version 4.9 {'compute_rpc': '4.9'}, # Version 8: Compute RPC version 4.10 {'compute_rpc': '4.10'}, # Version 9: Compute RPC version 4.11 {'compute_rpc': '4.11'}, # Version 10: Compute node conversion to Inventories {'compute_rpc': '4.11'}, # Version 11: Compute RPC version 4.12 {'compute_rpc': '4.12'}, # Version 12: The network APIs and compute manager support a NetworkRequest # object where the network_id value is 'auto' or 'none'. BuildRequest # objects are populated by nova-api during instance boot. {'compute_rpc': '4.12'}, # Version 13: Compute RPC version 4.13 {'compute_rpc': '4.13'}, # Version 14: The compute manager supports setting device tags. {'compute_rpc': '4.13'}, # Version 15: Indicate that nova-conductor will stop a boot if BuildRequest # is deleted before RPC to nova-compute. {'compute_rpc': '4.13'}, # Version 16: Indicate that nova-compute will refuse to start if it doesn't # have a placement section configured. {'compute_rpc': '4.13'}, # Version 17: Add 'reserve_volume' to the boot from volume flow and # remove 'check_attach'. The service version bump is needed to fall back to # the old check in the API as the old computes fail if the volume is moved # to 'attaching' state by reserve. {'compute_rpc': '4.13'}, # Version 18: Compute RPC version 4.14 {'compute_rpc': '4.14'}, # Version 19: Compute RPC version 4.15 {'compute_rpc': '4.15'}, # Version 20: Compute RPC version 4.16 {'compute_rpc': '4.16'}, # Version 21: Compute RPC version 4.17 {'compute_rpc': '4.17'}, # Version 22: A marker for the behaviour change of auto-healing code on the # compute host regarding allocations against an instance {'compute_rpc': '4.17'}, # Version 23: Compute hosts allow pre-creation of the migration object # for cold migration. {'compute_rpc': '4.18'}, # Version 24: Add support for Cinder v3 attach/detach API. {'compute_rpc': '4.18'}, # Version 25: Compute hosts allow migration-based allocations # for live migration. {'compute_rpc': '4.18'}, # Version 26: Adds a 'host_list' parameter to build_and_run_instance() {'compute_rpc': '4.19'}, # Version 27: Compute RPC version 4.20; adds multiattach argument to # reserve_block_device_name(). {'compute_rpc': '4.20'}, # Version 28: Adds a 'host_list' parameter to prep_resize() {'compute_rpc': '4.21'}, # Version 29: Compute RPC version 4.22 {'compute_rpc': '4.22'}, # Version 30: Compute RPC version 5.0 {'compute_rpc': '5.0'}, # Version 31: The compute manager checks if 'trusted_certs' are supported {'compute_rpc': '5.0'}, # Version 32: Add 'file_backed_memory' support. The service version bump is # needed to allow the destination of a live migration to reject the # migration if 'file_backed_memory' is enabled and the source does not # support 'file_backed_memory' {'compute_rpc': '5.0'}, # Version 33: Add support for check on the server group with # 'max_server_per_host' rules {'compute_rpc': '5.0'}, # Version 34: Adds support to abort queued/preparing live migrations. {'compute_rpc': '5.0'}, # Version 35: Indicates that nova-compute supports live migration with # ports bound early on the destination host using VIFMigrateData. {'compute_rpc': '5.0'}, # Version 36: Indicates that nova-compute supports specifying volume # type when booting a volume-backed server. {'compute_rpc': '5.0'}, # Version 37: prep_resize takes a RequestSpec object {'compute_rpc': '5.1'}, # Version 38: set_host_enabled reflects COMPUTE_STATUS_DISABLED trait {'compute_rpc': '5.1'}, # Version 39: resize_instance, finish_resize, revert_resize, # finish_revert_resize, unshelve_instance takes a RequestSpec object {'compute_rpc': '5.2'}, # Version 40: Add migration and limits parameters to # check_can_live_migrate_destination(), new # drop_move_claim_at_destination() method, and numa_live_migration # parameter to check_can_live_migrate_source() {'compute_rpc': '5.3'}, # Version 41: Add cache_images() to compute rpcapi (version 5.4) {'compute_rpc': '5.4'}, # Version 42: Compute RPC version 5.5; +prep_snapshot_based_resize_at_dest {'compute_rpc': '5.5'}, # Version 43: Compute RPC version 5.6: prep_snapshot_based_resize_at_source {'compute_rpc': '5.6'}, # Version 44: Compute RPC version 5.7: finish_snapshot_based_resize_at_dest {'compute_rpc': '5.7'}, # Version 45: Compute RPC v5.8: confirm_snapshot_based_resize_at_source {'compute_rpc': '5.8'}, # Version 46: Compute RPC v5.9: revert_snapshot_based_resize_at_dest {'compute_rpc': '5.9'}, # Version 47: Compute RPC v5.10: # finish_revert_snapshot_based_resize_at_source {'compute_rpc': '5.10'}, # Version 48: Drivers report COMPUTE_SAME_HOST_COLD_MIGRATE trait. {'compute_rpc': '5.10'}, # Version 49: Compute now support server move operations with qos ports {'compute_rpc': '5.10'}, # Version 50: Compute RPC v5.11: # Add accel_uuids (accelerator requests) param to build_and_run_instance {'compute_rpc': '5.11'}, # Version 51: Add support for live migration with vpmem {'compute_rpc': '5.11'}, # Version 52: Add support for the 'mixed' CPU allocation policy {'compute_rpc': '5.11'}, # Version 53: Compute RPC v5.12: # Add accel_uuids (accelerator requests) param to rebuild_instance {'compute_rpc': '5.12'}, # Version 54: Compute RPC v5.13: # Add accel_uuids (accelerator requests) param to shelve_instance and # shelve_offload_instance and unshelve_instance {'compute_rpc': '5.13'}, # Version 55: Compute RPC v5.13: # Add support for qos interface attach {'compute_rpc': '5.13'}, # Version 56: Compute RPC v6.0: {'compute_rpc': '6.0'}, # Version 57: Compute RPC v6.0: # Add support for vnic 'accelerator-direct'. {'compute_rpc': '6.0'}, # Version 58: Compute RPC v6.0: # Add support for booting with neutron extended resource request {'compute_rpc': '6.0'}, # Version 59: Compute RPC v6.0: # Add support for server move operations with neutron extended resource # request {'compute_rpc': '6.0'}, # Version 60: Compute RPC v6.0: # Add support for interface attach operation with neutron extended resource # request {'compute_rpc': '6.0'}, # Version 61: Compute RPC v6.0: # Add support for remotely-managed ports (vnic-type 'remote-managed') {'compute_rpc': '6.0'}, # Version 62: Compute RPC v6.0: # Add support for VDPA port attach/detach {'compute_rpc': '6.0'}, # Version 63: Compute RPC v6.0: # Add support for VDPA hotplug live migration and suspend/resume {'compute_rpc': '6.0'}, # Version 64: Compute RPC v6.1: # Add reimage_boot_volume parameter to rebuild_instance() {'compute_rpc': '6.1'}, # Version 65: Compute RPC v6.1: # Added stable local node identity {'compute_rpc': '6.1'}, # Version 66: Compute RPC v6.2: # Add target_state parameter to rebuild_instance() {'compute_rpc': '6.2'}, # Version 67: Compute RPC v6.3: # Add delete_attachment parameter to remove_volume_connection() {'compute_rpc': '6.3'}, # Version 68: Compute RPC v6.4: # Add support for shares {'compute_rpc': '6.4'}, # Version 69: Compute RPC v6.4: # Compute manager supports sound model traits {'compute_rpc': '6.4'}, # Version 70: Compute RPC v6.4: # Compute manager supports USB controller model traits {'compute_rpc': '6.4'}, ) # This is the version after which we can rely on having a persistent # local node identity for single-node systems. NODE_IDENTITY_VERSION = 65 # This is used to raise an error at service startup if older than supported # computes are detected. # NOTE(sbauza) : Please modify it this way : # * At the beginning of a non-SLURP release (eg. 2023.2 Bobcat) (or just after # the previous SLURP release RC1, like 2023.1 Antelope), please bump # OLDEST_SUPPORTED_SERVICE_VERSION to the previous SLURP release (in that # example, Antelope) # * At the beginning of a SLURP release (eg. 2024.1 C) (or just after the # previous non-SLURP release RC1, like 2023.2 Bobcat), please keep the # OLDEST_SUPPORTED_SERVICE_VERSION value using the previous SLURP release # (in that example, Antelope) # * At the end of any release (SLURP or non-SLURP), please modify # SERVICE_VERSION_ALIASES to add a key/value with key being the release name # and value be the latest service version that the release supports (for # example, before Bobcat RC1, please add 'Bobcat': XX where X is the latest # servion version that was added) # NOTE(sbauza): The check for the minimum supported compute service version at # service startup can be skipped by setting the # [workarounds]/[workarounds]/disable_compute_service_check_for_ffu # configuration option to True, like grenade-skip-level jobs do. OLDEST_SUPPORTED_SERVICE_VERSION = 'Epoxy' SERVICE_VERSION_ALIASES = { 'Victoria': 52, 'Wallaby': 54, 'Xena': 57, 'Yoga': 61, 'Zed': 64, 'Antelope': 66, 'Bobcat': 66, 'Caracal': 66, 'Dalmatian': 67, 'Epoxy': 68, 'Flamingo': 70, } # TODO(berrange): Remove NovaObjectDictCompat @base.NovaObjectRegistry.register class Service(base.NovaPersistentObject, base.NovaObject, base.NovaObjectDictCompat): # Version 1.0: Initial version # Version 1.1: Added compute_node nested object # Version 1.2: String attributes updated to support unicode # Version 1.3: ComputeNode version 1.5 # Version 1.4: Added use_slave to get_by_compute_host # Version 1.5: ComputeNode version 1.6 # Version 1.6: ComputeNode version 1.7 # Version 1.7: ComputeNode version 1.8 # Version 1.8: ComputeNode version 1.9 # Version 1.9: ComputeNode version 1.10 # Version 1.10: Changes behaviour of loading compute_node # Version 1.11: Added get_by_host_and_binary # Version 1.12: ComputeNode version 1.11 # Version 1.13: Added last_seen_up # Version 1.14: Added forced_down # Version 1.15: ComputeNode version 1.12 # Version 1.16: Added version # Version 1.17: ComputeNode version 1.13 # Version 1.18: ComputeNode version 1.14 # Version 1.19: Added get_minimum_version() # Version 1.20: Added get_minimum_version_multi() # Version 1.21: Added uuid # Version 1.22: Added get_by_uuid() VERSION = '1.22' fields = { 'id': fields.IntegerField(read_only=True), 'uuid': fields.UUIDField(), 'host': fields.StringField(nullable=True), 'binary': fields.StringField(nullable=True), 'topic': fields.StringField(nullable=True), 'report_count': fields.IntegerField(), 'disabled': fields.BooleanField(), 'disabled_reason': fields.StringField(nullable=True), 'availability_zone': fields.StringField(nullable=True), 'compute_node': fields.ObjectField('ComputeNode'), 'last_seen_up': fields.DateTimeField(nullable=True), 'forced_down': fields.BooleanField(), 'version': fields.IntegerField(), } _MIN_VERSION_CACHE = {} _SERVICE_VERSION_CACHING = False def __init__(self, *args, **kwargs): # NOTE(danms): We're going against the rules here and overriding # init. The reason is that we want to *ensure* that we're always # setting the current service version on our objects, overriding # whatever else might be set in the database, or otherwise (which # is the normal reason not to override init). # # We also need to do this here so that it's set on the client side # all the time, such that create() and save() operations will # include the current service version. if 'version' in kwargs: raise exception.ObjectActionError( action='init', reason='Version field is immutable') super(Service, self).__init__(*args, **kwargs) self.version = SERVICE_VERSION def obj_make_compatible_from_manifest(self, primitive, target_version, version_manifest): super(Service, self).obj_make_compatible_from_manifest( primitive, target_version, version_manifest) _target_version = versionutils.convert_version_to_tuple(target_version) if _target_version < (1, 21) and 'uuid' in primitive: del primitive['uuid'] if _target_version < (1, 16) and 'version' in primitive: del primitive['version'] if _target_version < (1, 14) and 'forced_down' in primitive: del primitive['forced_down'] if _target_version < (1, 13) and 'last_seen_up' in primitive: del primitive['last_seen_up'] if _target_version < (1, 10): # service.compute_node was not lazy-loaded, we need to provide it # when called self._do_compute_node(self._context, primitive, version_manifest) def _do_compute_node(self, context, primitive, version_manifest): try: target_version = version_manifest['ComputeNode'] # NOTE(sbauza): Ironic deployments can have multiple # nodes for the same service, but for keeping same behaviour, # returning only the first elem of the list compute = objects.ComputeNodeList.get_all_by_host( context, primitive['host'])[0] except Exception: return primitive['compute_node'] = compute.obj_to_primitive( target_version=target_version, version_manifest=version_manifest) @staticmethod def _from_db_object(context, service, db_service): allow_missing = ('availability_zone',) for key in service.fields: if key in allow_missing and key not in db_service: continue if key == 'compute_node': # NOTE(sbauza); We want to only lazy-load compute_node continue elif key == 'version': # NOTE(danms): Special handling of the version field, since # it is read_only and set in our init. setattr(service, base.get_attrname(key), db_service[key]) elif key == 'uuid' and not db_service.get(key): # Leave uuid off the object if undefined in the database # so that it will be generated below. continue else: service[key] = db_service[key] service._context = context service.obj_reset_changes() return service @base.lazy_load_counter def obj_load_attr(self, attrname): if not self._context: raise exception.OrphanedObjectError(method='obj_load_attr', objtype=self.obj_name()) LOG.debug("Lazy-loading '%(attr)s' on %(name)s id %(id)s", {'attr': attrname, 'name': self.obj_name(), 'id': self.id, }) if attrname != 'compute_node': raise exception.ObjectActionError( action='obj_load_attr', reason='attribute %s not lazy-loadable' % attrname) if self.binary == 'nova-compute': # Only n-cpu services have attached compute_node(s) compute_nodes = objects.ComputeNodeList.get_all_by_host( self._context, self.host) else: # NOTE(sbauza); Previous behaviour was raising a ServiceNotFound, # we keep it for backwards compatibility raise exception.ServiceNotFound(service_id=self.id) # NOTE(sbauza): Ironic deployments can have multiple nodes # for the same service, but for keeping same behaviour, returning only # the first elem of the list self.compute_node = compute_nodes[0] @base.remotable_classmethod def get_by_id(cls, context, service_id): db_service = db.service_get(context, service_id) return cls._from_db_object(context, cls(), db_service) @base.remotable_classmethod def get_by_uuid(cls, context, service_uuid): db_service = db.service_get_by_uuid(context, service_uuid) return cls._from_db_object(context, cls(), db_service) @base.remotable_classmethod def get_by_host_and_topic(cls, context, host, topic): db_service = db.service_get_by_host_and_topic(context, host, topic) return cls._from_db_object(context, cls(), db_service) @base.remotable_classmethod def get_by_host_and_binary(cls, context, host, binary): try: db_service = db.service_get_by_host_and_binary(context, host, binary) except exception.HostBinaryNotFound: return return cls._from_db_object(context, cls(), db_service) @staticmethod @db.select_db_reader_mode def _db_service_get_by_compute_host(context, host, use_slave=False): return db.service_get_by_compute_host(context, host) @base.remotable_classmethod def get_by_compute_host(cls, context, host, use_slave=False): db_service = cls._db_service_get_by_compute_host(context, host, use_slave=use_slave) return cls._from_db_object(context, cls(), db_service) # NOTE(ndipanov): This is deprecated and should be removed on the next # major version bump @base.remotable_classmethod def get_by_args(cls, context, host, binary): db_service = db.service_get_by_host_and_binary(context, host, binary) return cls._from_db_object(context, cls(), db_service) def _check_minimum_version(self): """Enforce that we are not older that the minimum version. This is a loose check to avoid creating or updating our service record if we would do so with a version that is older that the current minimum of all services. This could happen if we were started with older code by accident, either due to a rollback or an old and un-updated node suddenly coming back onto the network. There is technically a race here between the check and the update, but since the minimum version should always roll forward and never backwards, we don't need to worry about doing it atomically. Further, the consequence for getting this wrong is minor, in that we'll just fail to send messages that other services understand. """ if not self.obj_attr_is_set('version'): return if not self.obj_attr_is_set('binary'): return minver = self.get_minimum_version(self._context, self.binary) if minver > self.version: raise exception.ServiceTooOld(thisver=self.version, minver=minver) @base.remotable def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason='already created') self._check_minimum_version() updates = self.obj_get_changes() if 'uuid' not in updates: updates['uuid'] = uuidutils.generate_uuid() self.uuid = updates['uuid'] db_service = db.service_create(self._context, updates) self._from_db_object(self._context, self, db_service) self._send_notification(fields.NotificationAction.CREATE) @base.remotable def save(self): updates = self.obj_get_changes() updates.pop('id', None) self._check_minimum_version() db_service = db.service_update(self._context, self.id, updates) self._from_db_object(self._context, self, db_service) self._send_status_update_notification(updates) def _send_status_update_notification(self, updates): # Note(gibi): We do not trigger notification on version as that field # is always dirty, which would cause that nova sends notification on # every other field change. See the comment in save() too. if set(updates.keys()).intersection( {'disabled', 'disabled_reason', 'forced_down'}): self._send_notification(fields.NotificationAction.UPDATE) def _send_notification(self, action): payload = service_notification.ServiceStatusPayload(self) service_notification.ServiceStatusNotification( publisher=notification.NotificationPublisher.from_service_obj( self), event_type=notification.EventType( object='service', action=action), priority=fields.NotificationPriority.INFO, payload=payload).emit(self._context) @base.remotable def destroy(self): db.service_destroy(self._context, self.id) self._send_notification(fields.NotificationAction.DELETE) @classmethod def enable_min_version_cache(cls): cls.clear_min_version_cache() cls._SERVICE_VERSION_CACHING = True @classmethod def clear_min_version_cache(cls): cls._MIN_VERSION_CACHE = {} @staticmethod @db.select_db_reader_mode def _db_service_get_minimum_version(context, binaries, use_slave=False): return db.service_get_minimum_version(context, binaries) @base.remotable_classmethod def get_minimum_version_multi(cls, context, binaries, use_slave=False): if not all(binary.startswith('nova-') for binary in binaries): LOG.warning('get_minimum_version called with likely-incorrect ' 'binaries `%s\'', ','.join(binaries)) raise exception.ObjectActionError(action='get_minimum_version', reason='Invalid binary prefix') if (not cls._SERVICE_VERSION_CACHING or any(binary not in cls._MIN_VERSION_CACHE for binary in binaries)): min_versions = cls._db_service_get_minimum_version( context, binaries, use_slave=use_slave) if min_versions: min_versions = {binary: version or 0 for binary, version in min_versions.items()} cls._MIN_VERSION_CACHE.update(min_versions) else: min_versions = {binary: cls._MIN_VERSION_CACHE[binary] for binary in binaries} if min_versions: version = min(min_versions.values()) else: version = 0 # NOTE(danms): Since our return value is not controlled by object # schema, be explicit here. version = int(version) return version @base.remotable_classmethod def get_minimum_version(cls, context, binary, use_slave=False): return cls.get_minimum_version_multi(context, [binary], use_slave=use_slave) def get_minimum_version_all_cells(context, binaries, require_all=False): """Get the minimum service version, checking all cells. This attempts to calculate the minimum service version for a set of binaries across all the cells in the system. If require_all is False, then any cells that fail to report a version will be ignored (assuming they won't be candidates for scheduling and thus excluding them from the minimum version calculation is reasonable). If require_all is True, then a failing cell will cause this to raise exception.CellTimeout, as would be appropriate for gating some data migration until everything is new enough. Note that services that do not report a positive version are excluded from this, as it crosses all cells which will naturally not have all services. """ if not all(binary.startswith('nova-') for binary in binaries): LOG.warning('get_minimum_version_all_cells called with ' 'likely-incorrect binaries `%s\'', ','.join(binaries)) raise exception.ObjectActionError( action='get_minimum_version_all_cells', reason='Invalid binary prefix') # NOTE(danms): Instead of using Service.get_minimum_version_multi(), we # replicate the call directly to the underlying DB method here because # we want to defeat the caching and we need to filter non-present # services differently from the single-cell method. results = nova_context.scatter_gather_all_cells( context, Service._db_service_get_minimum_version, binaries) min_version = None for cell_uuid, result in results.items(): if result is nova_context.did_not_respond_sentinel: LOG.warning('Cell %s did not respond when getting minimum ' 'service version', cell_uuid) if require_all: raise exception.CellTimeout() elif isinstance(result, Exception): LOG.warning('Failed to get minimum service version for cell %s', cell_uuid) if require_all: # NOTE(danms): Okay, this isn't necessarily a timeout, but # it's functionally the same from the caller's perspective # and we logged the fact that it was actually a failure # for the forensic investigator during the scatter/gather # routine. raise exception.CellTimeout() else: # NOTE(danms): Don't consider a zero or None result as the minimum # since we're crossing cells and will likely not have all the # services being probed. relevant_versions = [version for version in result.values() if version] if relevant_versions: min_version_cell = min(relevant_versions) min_version = (min(min_version, min_version_cell) if min_version else min_version_cell) # NOTE(danms): If we got no matches at all (such as at first startup) # then report that as zero to be consistent with the other such # methods. return min_version or 0 @base.NovaObjectRegistry.register class ServiceList(base.ObjectListBase, base.NovaObject): # Version 1.0: Initial version # Service <= version 1.2 # Version 1.1 Service version 1.3 # Version 1.2: Service version 1.4 # Version 1.3: Service version 1.5 # Version 1.4: Service version 1.6 # Version 1.5: Service version 1.7 # Version 1.6: Service version 1.8 # Version 1.7: Service version 1.9 # Version 1.8: Service version 1.10 # Version 1.9: Added get_by_binary() and Service version 1.11 # Version 1.10: Service version 1.12 # Version 1.11: Service version 1.13 # Version 1.12: Service version 1.14 # Version 1.13: Service version 1.15 # Version 1.14: Service version 1.16 # Version 1.15: Service version 1.17 # Version 1.16: Service version 1.18 # Version 1.17: Service version 1.19 # Version 1.18: Added include_disabled parameter to get_by_binary() # Version 1.19: Added get_all_computes_by_hv_type() VERSION = '1.19' fields = { 'objects': fields.ListOfObjectsField('Service'), } @base.remotable_classmethod def get_by_topic(cls, context, topic): db_services = db.service_get_all_by_topic(context, topic) return base.obj_make_list(context, cls(context), objects.Service, db_services) # NOTE(paul-carlton2): In v2.0 of the object the include_disabled flag # will be removed so both enabled and disabled hosts are returned @base.remotable_classmethod def get_by_binary(cls, context, binary, include_disabled=False): db_services = db.service_get_all_by_binary( context, binary, include_disabled=include_disabled) return base.obj_make_list(context, cls(context), objects.Service, db_services) @base.remotable_classmethod def get_by_host(cls, context, host): db_services = db.service_get_all_by_host(context, host) return base.obj_make_list(context, cls(context), objects.Service, db_services) @base.remotable_classmethod def get_all(cls, context, disabled=None, set_zones=False): db_services = db.service_get_all(context, disabled=disabled) if set_zones: db_services = availability_zones.set_availability_zones( context, db_services) return base.obj_make_list(context, cls(context), objects.Service, db_services) @base.remotable_classmethod def get_all_computes_by_hv_type(cls, context, hv_type): db_services = db.service_get_all_computes_by_hv_type( context, hv_type, include_disabled=False) return base.obj_make_list(context, cls(context), objects.Service, db_services) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/objects/share_mapping.py0000664000175000017500000001640300000000000020351 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from oslo_utils import versionutils import nova.conf from nova.db.main import api as db from nova import exception from nova.objects import base from nova.objects import fields from nova.share import manila as manila_api CONF = nova.conf.CONF LOG = logging.getLogger(__name__) EPHEMERAL_FIELDS = [ "access_type", "access_to", "access_key", ] @base.NovaObjectRegistry.register class ShareMapping(base.NovaTimestampObject, base.NovaObject): # Version 1.0: Initial version. # Version 1.1: Add "attaching" and "detaching" to possible values # of status field. # Version 1.2: Add ephemeral fields 'access_type', 'access_to', # 'access_key' to manage CephFS protocol and access. VERSION = '1.2' fields = { 'id': fields.IntegerField(read_only=True), 'uuid': fields.UUIDField(nullable=False), 'instance_uuid': fields.UUIDField(nullable=False), 'share_id': fields.UUIDField(nullable=False), 'status': fields.ShareMappingStatusField(), 'tag': fields.StringField(nullable=False), 'export_location': fields.StringField(nullable=False), 'share_proto': fields.ShareMappingProtoField(), # Next fields are ephemeral 'access_type': fields.StringField(nullable=True), 'access_to': fields.StringField(nullable=True), 'access_key': fields.StringField(nullable=True), } def obj_make_compatible(self, primitive, target_version): super(ShareMapping, self).obj_make_compatible( primitive, target_version ) target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 1): status = primitive.get('status') if status in ( fields.ShareMappingStatus.ATTACHING, fields.ShareMappingStatus.DETACHING, ): raise exception.ObjectActionError( action='obj_make_compatible', reason='status=%s not supported in version %s' % ( status, target_version)) @staticmethod def _from_db_object(context, share_mapping, db_share_mapping): for field in share_mapping.fields: if field not in EPHEMERAL_FIELDS: setattr(share_mapping, field, db_share_mapping[field]) share_mapping._context = context share_mapping.obj_reset_changes() return share_mapping @base.remotable def save(self): db_share_mapping = db.share_mapping_update( self._context, self.uuid, self.instance_uuid, self.share_id, self.status, self.tag, self.export_location, self.share_proto) self._from_db_object(self._context, self, db_share_mapping) def create(self): LOG.info( "Attaching share '%s' to instance '%s'.", self.share_id, self.instance_uuid) self.save() @base.remotable def delete(self): LOG.info( "Detaching share '%s' from instance '%s'.", self.share_id, self.instance_uuid, ) db.share_mapping_delete_by_instance_uuid_and_share_id( self._context, self.instance_uuid, self.share_id ) def activate(self): LOG.info( "Share '%s' about to be activated on instance '%s'.", self.share_id, self.instance_uuid) self.status = fields.ShareMappingStatus.ACTIVE self.save() def deactivate(self): LOG.info( "Share '%s' about to be deactivated on instance '%s'.", self.share_id, self.instance_uuid, ) self.status = fields.ShareMappingStatus.INACTIVE self.save() @base.remotable_classmethod def get_by_instance_uuid_and_share_id( cls, context, instance_uuid, share_id): """This query returns only one element as a share can be associated only one time to an instance. Note: the REST API prevent the user to create duplicate share mapping by raising an exception.ShareMappingAlreadyExists. """ share_mapping = ShareMapping(context) db_share_mapping = db.share_mapping_get_by_instance_uuid_and_share_id( context, instance_uuid, share_id) if not db_share_mapping: raise exception.ShareNotFound(share_id=share_id) return ShareMapping._from_db_object( context, share_mapping, db_share_mapping) def get_share_host_provider(self): if not self.export_location: return None if self.share_proto == 'NFS': rhost, _ = self.export_location.strip().split(':') else: raise NotImplementedError() return rhost def enhance_with_ceph_credentials(self, context): # Enhance the share_mapping object by adding Ceph # credential information access = manila_api.API().get_access( context, self.share_id, self.access_type, self.access_to ) self.access_key = access.access_key def set_access_according_to_protocol(self): if self.share_proto == fields.ShareMappingProto.NFS: self.access_type = 'ip' self.access_to = CONF.my_shared_fs_storage_ip elif self.share_proto == fields.ShareMappingProto.CEPHFS: self.access_type = 'cephx' self.access_to = 'nova' else: raise exception.ShareProtocolNotSupported( share_proto=self.share_proto ) @base.NovaObjectRegistry.register class ShareMappingList(base.ObjectListBase, base.NovaObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'objects': fields.ListOfObjectsField('ShareMapping'), } @base.remotable_classmethod def get_by_instance_uuid(cls, context, instance_uuid): db_share_mappings = db.share_mapping_get_by_instance_uuid( context, instance_uuid) return base.obj_make_list( context, cls(context), ShareMapping, db_share_mappings) @base.remotable_classmethod def get_by_share_id(cls, context, share_id): db_share_mappings = db.share_mapping_get_by_share_id( context, share_id) return base.obj_make_list( context, cls(context), ShareMapping, db_share_mappings) def activate_all(self): for share in self: share.activate() def deactivate_all(self): for share in self: share.deactivate() def contains_error(self): return any( [ share_mapping.status == fields.ShareMappingStatus.ERROR for share_mapping in self ] ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/objects/tag.py0000664000175000017500000000474100000000000016311 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.db.main import api as db from nova import objects from nova.objects import base from nova.objects import fields MAX_TAG_LENGTH = 60 @base.NovaObjectRegistry.register class Tag(base.NovaObject): # Version 1.0: Initial version # Version 1.1: Added method exists() VERSION = '1.1' fields = { 'resource_id': fields.StringField(), 'tag': fields.StringField(), } @staticmethod def _from_db_object(context, tag, db_tag): for key in tag.fields: setattr(tag, key, db_tag[key]) tag.obj_reset_changes() tag._context = context return tag @base.remotable def create(self): db_tag = db.instance_tag_add(self._context, self.resource_id, self.tag) self._from_db_object(self._context, self, db_tag) @base.remotable_classmethod def destroy(cls, context, resource_id, name): db.instance_tag_delete(context, resource_id, name) @base.remotable_classmethod def exists(cls, context, resource_id, name): return db.instance_tag_exists(context, resource_id, name) @base.NovaObjectRegistry.register class TagList(base.ObjectListBase, base.NovaObject): # Version 1.0: Initial version # Version 1.1: Tag <= version 1.1 VERSION = '1.1' fields = { 'objects': fields.ListOfObjectsField('Tag'), } @base.remotable_classmethod def get_by_resource_id(cls, context, resource_id): db_tags = db.instance_tag_get_by_instance_uuid(context, resource_id) return base.obj_make_list(context, cls(), objects.Tag, db_tags) @base.remotable_classmethod def create(cls, context, resource_id, tags): db_tags = db.instance_tag_set(context, resource_id, tags) return base.obj_make_list(context, cls(), objects.Tag, db_tags) @base.remotable_classmethod def destroy(cls, context, resource_id): db.instance_tag_delete_all(context, resource_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/objects/task_log.py0000664000175000017500000000604100000000000017334 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.db.main import api as db from nova.objects import base from nova.objects import fields @base.NovaObjectRegistry.register class TaskLog(base.NovaPersistentObject, base.NovaObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'id': fields.IntegerField(read_only=True), 'task_name': fields.StringField(), 'state': fields.StringField(read_only=True), 'host': fields.StringField(), 'period_beginning': fields.DateTimeField(), 'period_ending': fields.DateTimeField(), 'message': fields.StringField(), 'task_items': fields.IntegerField(), 'errors': fields.IntegerField(), } @staticmethod def _from_db_object(context, task_log, db_task_log): for field in task_log.fields: setattr(task_log, field, db_task_log[field]) task_log._context = context task_log.obj_reset_changes() return task_log @base.serialize_args @base.remotable_classmethod def get(cls, context, task_name, period_beginning, period_ending, host, state=None): db_task_log = db.task_log_get(context, task_name, period_beginning, period_ending, host, state=state) if db_task_log: return cls._from_db_object(context, cls(context), db_task_log) @base.remotable def begin_task(self): db.task_log_begin_task( self._context, self.task_name, self.period_beginning, self.period_ending, self.host, task_items=self.task_items, message=self.message) @base.remotable def end_task(self): db.task_log_end_task( self._context, self.task_name, self.period_beginning, self.period_ending, self.host, errors=self.errors, message=self.message) @base.NovaObjectRegistry.register class TaskLogList(base.ObjectListBase, base.NovaObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'objects': fields.ListOfObjectsField('TaskLog'), } @base.serialize_args @base.remotable_classmethod def get_all(cls, context, task_name, period_beginning, period_ending, host=None, state=None): db_task_logs = db.task_log_get_all(context, task_name, period_beginning, period_ending, host=host, state=state) return base.obj_make_list(context, cls(context), TaskLog, db_task_logs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/objects/trusted_certs.py0000664000175000017500000000244300000000000020425 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils from nova.db.main import api as db from nova.objects import base from nova.objects import fields @base.NovaObjectRegistry.register class TrustedCerts(base.NovaObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'ids': fields.ListOfStringsField(nullable=False), } @base.remotable_classmethod def get_by_instance_uuid(cls, context, instance_uuid): db_extra = db.instance_extra_get_by_instance_uuid( context, instance_uuid, columns=['trusted_certs']) if not db_extra or not db_extra['trusted_certs']: return None return cls.obj_from_primitive( jsonutils.loads(db_extra['trusted_certs'])) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/objects/vcpu_model.py0000664000175000017500000000442200000000000017667 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils from nova.db.main import api as db from nova.objects import base from nova.objects import fields @base.NovaObjectRegistry.register class VirtCPUModel(base.NovaObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'arch': fields.ArchitectureField(nullable=True), 'vendor': fields.StringField(nullable=True), 'topology': fields.ObjectField('VirtCPUTopology', nullable=True), 'features': fields.ListOfObjectsField("VirtCPUFeature", default=[]), 'mode': fields.CPUModeField(nullable=True), 'model': fields.StringField(nullable=True), 'match': fields.CPUMatchField(nullable=True), } def obj_load_attr(self, attrname): setattr(self, attrname, None) def to_json(self): return jsonutils.dumps(self.obj_to_primitive()) @classmethod def from_json(cls, jsonstr): return cls.obj_from_primitive(jsonutils.loads(jsonstr)) @base.remotable_classmethod def get_by_instance_uuid(cls, context, instance_uuid): db_extra = db.instance_extra_get_by_instance_uuid( context, instance_uuid, columns=['vcpu_model']) if not db_extra or not db_extra['vcpu_model']: return None return cls.obj_from_primitive(jsonutils.loads(db_extra['vcpu_model'])) @base.NovaObjectRegistry.register class VirtCPUFeature(base.NovaObject): VERSION = '1.0' fields = { 'policy': fields.CPUFeaturePolicyField(nullable=True), 'name': fields.StringField(nullable=False), } def obj_load_attr(self, attrname): setattr(self, attrname, None) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/objects/virt_cpu_topology.py0000664000175000017500000000266400000000000021327 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.objects import base from nova.objects import fields @base.NovaObjectRegistry.register class VirtCPUTopology(base.NovaObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'sockets': fields.IntegerField(nullable=True, default=1), 'cores': fields.IntegerField(nullable=True, default=1), 'threads': fields.IntegerField(nullable=True, default=1), } # NOTE(jaypipes): for backward compatibility, the virt CPU topology # data is stored in the database as a nested dict. @classmethod def from_dict(cls, data): return cls(sockets=data.get('sockets'), cores=data.get('cores'), threads=data.get('threads')) def to_dict(self): return { 'sockets': self.sockets, 'cores': self.cores, 'threads': self.threads } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/objects/virt_device_metadata.py0000664000175000017500000000767700000000000021714 0ustar00zuulzuul00000000000000# Copyright (C) 2016, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils from oslo_utils import versionutils from nova.db.main import api as db from nova.objects import base from nova.objects import fields @base.NovaObjectRegistry.register class DeviceBus(base.NovaObject): VERSION = '1.0' @base.NovaObjectRegistry.register class PCIDeviceBus(DeviceBus): VERSION = '1.0' fields = { 'address': fields.PCIAddressField(), } @base.NovaObjectRegistry.register class USBDeviceBus(DeviceBus): VERSION = '1.0' fields = { 'address': fields.USBAddressField(), } @base.NovaObjectRegistry.register class SCSIDeviceBus(DeviceBus): VERSION = '1.0' fields = { 'address': fields.SCSIAddressField(), } @base.NovaObjectRegistry.register class IDEDeviceBus(DeviceBus): VERSION = '1.0' fields = { 'address': fields.IDEAddressField(), } @base.NovaObjectRegistry.register class XenDeviceBus(DeviceBus): VERSION = '1.0' fields = { 'address': fields.XenAddressField(), } @base.NovaObjectRegistry.register class DeviceMetadata(base.NovaObject): VERSION = '1.0' fields = { 'bus': fields.ObjectField("DeviceBus", subclasses=True), 'tags': fields.ListOfStringsField(), } @base.NovaObjectRegistry.register class NetworkInterfaceMetadata(DeviceMetadata): # Version 1.0: Initial version # Version 1.1: Add vlans field # Version 1.2: Add vf_trusted field VERSION = '1.2' fields = { 'mac': fields.MACAddressField(), 'vlan': fields.IntegerField(), 'vf_trusted': fields.BooleanField(default=False), } def obj_make_compatible(self, primitive, target_version): target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 1) and 'vlan' in primitive: del primitive['vlan'] if target_version < (1, 2) and 'vf_trusted' in primitive: del primitive['vf_trusted'] @base.NovaObjectRegistry.register class DiskMetadata(DeviceMetadata): VERSION = '1.0' fields = { 'serial': fields.StringField(nullable=True), 'path': fields.StringField(nullable=True), } @base.NovaObjectRegistry.register class ShareMetadata(DeviceMetadata): VERSION = '1.0' fields = { 'share_id': fields.StringField(nullable=True), 'tag': fields.StringField(nullable=True), } @base.NovaObjectRegistry.register class InstanceDeviceMetadata(base.NovaObject): VERSION = '1.0' fields = { 'devices': fields.ListOfObjectsField('DeviceMetadata', subclasses=True), } @classmethod def obj_from_db(cls, context, db_dev_meta): primitive = jsonutils.loads(db_dev_meta) device_metadata = cls.obj_from_primitive(primitive) return device_metadata @base.remotable_classmethod def get_by_instance_uuid(cls, context, instance_uuid): db_extra = db.instance_extra_get_by_instance_uuid( context, instance_uuid, columns=['device_metadata']) if not db_extra or db_extra['device_metadata'] is None: return None primitive = jsonutils.loads(db_extra['device_metadata']) device_metadata = cls.obj_from_primitive(primitive) return device_metadata def _to_json(self): return jsonutils.dumps(self.obj_to_primitive()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/objects/virtual_interface.py0000664000175000017500000003300500000000000021237 0ustar00zuulzuul00000000000000# Copyright (C) 2014, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_utils import versionutils from nova import context as nova_context from nova.db.api import api as api_db_api from nova.db.main import api as main_db_api from nova.db.main import models as main_db_models from nova import exception from nova import objects from nova.objects import base from nova.objects import fields LOG = logging.getLogger(__name__) VIF_OPTIONAL_FIELDS = ['network_id'] FAKE_UUID = '00000000-0000-0000-0000-000000000000' @base.NovaObjectRegistry.register class VirtualInterface(base.NovaPersistentObject, base.NovaObject): # Version 1.0: Initial version # Version 1.1: Add tag field # Version 1.2: Adding a save method # Version 1.3: Added destroy() method VERSION = '1.3' fields = { 'id': fields.IntegerField(), # This is a MAC address. 'address': fields.StringField(nullable=True), 'network_id': fields.IntegerField(), 'instance_uuid': fields.UUIDField(), 'uuid': fields.UUIDField(), 'tag': fields.StringField(nullable=True), } def obj_make_compatible(self, primitive, target_version): target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 1) and 'tag' in primitive: del primitive['tag'] @staticmethod def _from_db_object(context, vif, db_vif): for field in vif.fields: if not db_vif[field] and field in VIF_OPTIONAL_FIELDS: continue else: setattr(vif, field, db_vif[field]) # NOTE(danms): The neutronv2 module namespaces mac addresses # with port id to avoid uniqueness constraints currently on # our table. Strip that out here so nobody else needs to care. if 'address' in vif and '/' in vif.address: vif.address, _ = vif.address.split('/', 1) vif._context = context vif.obj_reset_changes() return vif @base.remotable_classmethod def get_by_id(cls, context, vif_id): db_vif = main_db_api.virtual_interface_get(context, vif_id) if db_vif: return cls._from_db_object(context, cls(), db_vif) @base.remotable_classmethod def get_by_uuid(cls, context, vif_uuid): db_vif = main_db_api.virtual_interface_get_by_uuid(context, vif_uuid) if db_vif: return cls._from_db_object(context, cls(), db_vif) @base.remotable_classmethod def get_by_address(cls, context, address): db_vif = main_db_api.virtual_interface_get_by_address(context, address) if db_vif: return cls._from_db_object(context, cls(), db_vif) @base.remotable_classmethod def get_by_instance_and_network(cls, context, instance_uuid, network_id): db_vif = main_db_api.virtual_interface_get_by_instance_and_network( context, instance_uuid, network_id) if db_vif: return cls._from_db_object(context, cls(), db_vif) @base.remotable def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason='already created') updates = self.obj_get_changes() db_vif = main_db_api.virtual_interface_create(self._context, updates) self._from_db_object(self._context, self, db_vif) @base.remotable def save(self): updates = self.obj_get_changes() if 'address' in updates: raise exception.ObjectActionError(action='save', reason='address is not mutable') db_vif = main_db_api.virtual_interface_update( self._context, self.uuid, updates) return self._from_db_object(self._context, self, db_vif) @base.remotable_classmethod def delete_by_instance_uuid(cls, context, instance_uuid): main_db_api.virtual_interface_delete_by_instance( context, instance_uuid) @base.remotable def destroy(self): main_db_api.virtual_interface_delete(self._context, self.id) @base.NovaObjectRegistry.register class VirtualInterfaceList(base.ObjectListBase, base.NovaObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'objects': fields.ListOfObjectsField('VirtualInterface'), } @base.remotable_classmethod def get_all(cls, context): db_vifs = main_db_api.virtual_interface_get_all(context) return base.obj_make_list(context, cls(context), objects.VirtualInterface, db_vifs) @staticmethod @main_db_api.select_db_reader_mode def _db_virtual_interface_get_by_instance(context, instance_uuid, use_slave=False): return main_db_api.virtual_interface_get_by_instance( context, instance_uuid) @base.remotable_classmethod def get_by_instance_uuid(cls, context, instance_uuid, use_slave=False): db_vifs = cls._db_virtual_interface_get_by_instance( context, instance_uuid, use_slave=use_slave) return base.obj_make_list(context, cls(context), objects.VirtualInterface, db_vifs) @api_db_api.context_manager.writer def fill_virtual_interface_list(context, max_count): """This fills missing VirtualInterface Objects in Nova DB""" count_hit = 0 count_all = 0 def _regenerate_vif_list_base_on_cache(context, instance, old_vif_list, nw_info): # Set old VirtualInterfaces as deleted. for vif in old_vif_list: vif.destroy() # Generate list based on current cache: for vif in nw_info: vif_obj = objects.VirtualInterface(context) vif_obj.uuid = vif['id'] vif_obj.address = "%s/%s" % (vif['address'], vif['id']) vif_obj.instance_uuid = instance['uuid'] # Find tag from previous VirtualInterface object if exist. old_vif = [x for x in old_vif_list if x.uuid == vif['id']] vif_obj.tag = old_vif[0].tag if len(old_vif) > 0 else None vif_obj.create() cells = objects.CellMappingList.get_all(context) for cell in cells: if count_all == max_count: # We reached the limit of checked instances per # this function run. # Stop, do not go to other cell. break with nova_context.target_cell(context, cell) as cctxt: marker = _get_marker_for_migrate_instances(cctxt) filters = {'deleted': False} # Adjust the limit of migrated instances. # If user wants to process a total of 100 instances # and we did a 75 in cell1, then we only need to # verify 25 more in cell2, no more. adjusted_limit = max_count - count_all instances = objects.InstanceList.get_by_filters( cctxt, filters=filters, sort_key='created_at', sort_dir='asc', marker=marker, limit=adjusted_limit) for instance in instances: # We don't want to fill vif for FAKE instance. if instance.uuid == FAKE_UUID: continue try: info_cache = objects.InstanceInfoCache.\ get_by_instance_uuid(cctxt, instance.get('uuid')) if not info_cache.network_info: LOG.info('InstanceInfoCache object has not set ' 'NetworkInfo field. ' 'Skipping build of VirtualInterfaceList.') continue except exception.InstanceInfoCacheNotFound: LOG.info('Instance has no InstanceInfoCache object. ' 'Skipping build of VirtualInterfaceList for it.') continue # It by design filters out deleted vifs. vif_list = VirtualInterfaceList.\ get_by_instance_uuid(cctxt, instance.get('uuid')) nw_info = info_cache.network_info # This should be list with proper order of vifs, # but we're not sure about that. cached_vif_ids = [vif['id'] for vif in nw_info] # This is ordered list of vifs taken from db. db_vif_ids = [vif.uuid for vif in vif_list] count_all += 1 if cached_vif_ids == db_vif_ids: # The list of vifs and its order in cache and in # virtual_interfaces is the same. So we could end here. continue elif len(db_vif_ids) < len(cached_vif_ids): # Seems to be an instance from release older than # Newton and we don't have full VirtualInterfaceList for # it. Rewrite whole VirtualInterfaceList using interface # order from InstanceInfoCache. count_hit += 1 LOG.info('Got an instance %s with less VIFs defined in DB ' 'than in cache. Could be Pre-Newton instance. ' 'Building new VirtualInterfaceList for it.', instance.uuid) _regenerate_vif_list_base_on_cache(cctxt, instance, vif_list, nw_info) elif len(db_vif_ids) > len(cached_vif_ids): # Seems vif list is inconsistent with cache. # it could be a broken cache or interface # during attach. Do nothing. LOG.info('Got an unexpected number of VIF records in the ' 'database compared to what was stored in the ' 'instance_info_caches table for instance %s. ' 'Perhaps it is an instance during interface ' 'attach. Do nothing.', instance.uuid) continue else: # The order is different between lists. # We need a source of truth, so rebuild order # from cache. count_hit += 1 LOG.info('Got an instance %s with different order of ' 'VIFs between DB and cache. ' 'We need a source of truth, so rebuild order ' 'from cache.', instance.uuid) _regenerate_vif_list_base_on_cache(cctxt, instance, vif_list, nw_info) # Set marker to point last checked instance. if instances: marker = instances[-1].uuid _set_or_delete_marker_for_migrate_instances(cctxt, marker) return count_all, count_hit # NOTE(mjozefcz): This is similar to marker mechanism made for # RequestSpecs object creation. # Since we have a lot of instances to be check this # will add a FAKE row that points to last instance # we checked. # Please notice that because of virtual_interfaces_instance_uuid_fkey # we need to have FAKE_UUID instance object, even deleted one. @main_db_api.pick_context_manager_writer def _set_or_delete_marker_for_migrate_instances(context, marker=None): context.session.query(main_db_models.VirtualInterface).filter_by( instance_uuid=FAKE_UUID).delete() # Create FAKE_UUID instance objects, only for marker, if doesn't exist. # It is needed due constraint: virtual_interfaces_instance_uuid_fkey instance = context.session.query(main_db_models.Instance).filter_by( uuid=FAKE_UUID).first() if not instance: instance = objects.Instance(context) instance.uuid = FAKE_UUID instance.project_id = FAKE_UUID instance.user_id = FAKE_UUID instance.create() # That's fake instance, lets destroy it. # We need only its row to solve constraint issue. instance.destroy() if marker is not None: # ... but there can be a new marker to set db_mapping = objects.VirtualInterface(context) db_mapping.instance_uuid = FAKE_UUID db_mapping.uuid = FAKE_UUID db_mapping.tag = marker db_mapping.address = 'ff:ff:ff:ff:ff:ff/%s' % FAKE_UUID db_mapping.create() @main_db_api.pick_context_manager_reader def _get_marker_for_migrate_instances(context): vif = (context.session.query(main_db_models.VirtualInterface).filter_by( instance_uuid=FAKE_UUID)).first() marker = vif['tag'] if vif else None return marker ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/objects/volume_usage.py0000664000175000017500000000742400000000000020232 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.db.main import api as db from nova.objects import base from nova.objects import fields @base.NovaObjectRegistry.register class VolumeUsage(base.NovaPersistentObject, base.NovaObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'id': fields.IntegerField(read_only=True), 'volume_id': fields.UUIDField(), 'instance_uuid': fields.UUIDField(nullable=True), 'project_id': fields.StringField(nullable=True), 'user_id': fields.StringField(nullable=True), 'availability_zone': fields.StringField(nullable=True), 'tot_last_refreshed': fields.DateTimeField(nullable=True, read_only=True), 'tot_reads': fields.IntegerField(read_only=True), 'tot_read_bytes': fields.IntegerField(read_only=True), 'tot_writes': fields.IntegerField(read_only=True), 'tot_write_bytes': fields.IntegerField(read_only=True), 'curr_last_refreshed': fields.DateTimeField(nullable=True, read_only=True), 'curr_reads': fields.IntegerField(), 'curr_read_bytes': fields.IntegerField(), 'curr_writes': fields.IntegerField(), 'curr_write_bytes': fields.IntegerField() } @property def last_refreshed(self): if self.tot_last_refreshed and self.curr_last_refreshed: return max(self.tot_last_refreshed, self.curr_last_refreshed) elif self.tot_last_refreshed: return self.tot_last_refreshed else: # curr_last_refreshed must be set return self.curr_last_refreshed @property def reads(self): return self.tot_reads + self.curr_reads @property def read_bytes(self): return self.tot_read_bytes + self.curr_read_bytes @property def writes(self): return self.tot_writes + self.curr_writes @property def write_bytes(self): return self.tot_write_bytes + self.curr_write_bytes @staticmethod def _from_db_object(context, vol_usage, db_vol_usage): for field in vol_usage.fields: setattr(vol_usage, field, db_vol_usage[field]) vol_usage._context = context vol_usage.obj_reset_changes() return vol_usage @base.remotable def save(self, update_totals=False): db_vol_usage = db.vol_usage_update( self._context, self.volume_id, self.curr_reads, self.curr_read_bytes, self.curr_writes, self.curr_write_bytes, self.instance_uuid, self.project_id, self.user_id, self.availability_zone, update_totals=update_totals) self._from_db_object(self._context, self, db_vol_usage) def to_dict(self): return { 'volume_id': self.volume_id, 'tenant_id': self.project_id, 'user_id': self.user_id, 'availability_zone': self.availability_zone, 'instance_id': self.instance_uuid, 'last_refreshed': str( self.last_refreshed) if self.last_refreshed else '', 'reads': self.reads, 'read_bytes': self.read_bytes, 'writes': self.writes, 'write_bytes': self.write_bytes } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.3936088 nova-32.0.0/nova/pci/0000775000175000017500000000000000000000000014300 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/pci/__init__.py0000664000175000017500000000000000000000000016377 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/pci/devspec.py0000664000175000017500000004302100000000000016303 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import copy import re import string import typing as ty import nova.conf from nova import exception from nova.i18n import _ from nova import objects from nova.pci.request import PCI_REMOTE_MANAGED_TAG from nova.pci import utils from oslo_log import log as logging from oslo_utils import strutils MAX_VENDOR_ID = 0xFFFF MAX_PRODUCT_ID = 0xFFFF MAX_FUNC = 0x7 MAX_DOMAIN = 0xFFFF MAX_BUS = 0xFF MAX_SLOT = 0x1F ANY = '*' REGEX_ANY = '.*' LOG = logging.getLogger(__name__) CONF = nova.conf.CONF PCISpecAddressType = ty.Union[ty.Dict[str, str], str] class PciAddressSpec(metaclass=abc.ABCMeta): """Abstract class for all PCI address spec styles This class checks the address fields of the pci.device_spec """ def __init__(self, pci_addr: str) -> None: self.domain = '' self.bus = '' self.slot = '' self.func = '' @abc.abstractmethod def match(self, pci_addr): pass def is_single_address(self) -> bool: return all([ all(c in string.hexdigits for c in self.domain), all(c in string.hexdigits for c in self.bus), all(c in string.hexdigits for c in self.slot), all(c in string.hexdigits for c in self.func)]) def _set_pci_dev_info( self, prop: str, maxval: int, hex_value: str ) -> None: a = getattr(self, prop) if a == ANY: return try: v = int(a, 16) except ValueError: raise exception.PciConfigInvalidSpec( reason=_("property %(property)s ('%(attr)s') does not parse " "as a hex number.") % {'property': prop, 'attr': a}) if v > maxval: raise exception.PciConfigInvalidSpec( reason=_("property %(property)s (%(attr)s) is greater than " "the maximum allowable value (%(max)X).") % {'property': prop, 'attr': a, 'max': maxval}) setattr(self, prop, hex_value % v) class PhysicalPciAddress(PciAddressSpec): """Manages the address fields for a fully-qualified PCI address. This function class will validate the address fields for a single PCI device. """ def __init__(self, pci_addr: PCISpecAddressType) -> None: try: # TODO(stephenfin): Is this ever actually a string? if isinstance(pci_addr, dict): self.domain = pci_addr['domain'] self.bus = pci_addr['bus'] self.slot = pci_addr['slot'] self.func = pci_addr['function'] else: self.domain, self.bus, self.slot, self.func = ( utils.get_pci_address_fields(pci_addr)) self._set_pci_dev_info('func', MAX_FUNC, '%1x') self._set_pci_dev_info('domain', MAX_DOMAIN, '%04x') self._set_pci_dev_info('bus', MAX_BUS, '%02x') self._set_pci_dev_info('slot', MAX_SLOT, '%02x') except (KeyError, ValueError): raise exception.PciDeviceWrongAddressFormat(address=pci_addr) def match(self, phys_pci_addr: PciAddressSpec) -> bool: conditions = [ self.domain == phys_pci_addr.domain, self.bus == phys_pci_addr.bus, self.slot == phys_pci_addr.slot, self.func == phys_pci_addr.func, ] return all(conditions) def __str__(self): return f'{self.domain}:{self.bus}:{self.slot}.{self.func}' class PciAddressGlobSpec(PciAddressSpec): """Manages the address fields with glob style. This function class will validate the address fields with glob style, check for wildcards, and insert wildcards where the field is left blank. """ def __init__(self, pci_addr: str) -> None: self.domain = ANY self.bus = ANY self.slot = ANY self.func = ANY dbs, sep, func = pci_addr.partition('.') if func: self.func = func.strip() self._set_pci_dev_info('func', MAX_FUNC, '%01x') if dbs: dbs_fields = dbs.split(':') if len(dbs_fields) > 3: raise exception.PciDeviceWrongAddressFormat(address=pci_addr) # If we got a partial address like ":00.", we need to turn this # into a domain of ANY, a bus of ANY, and a slot of 00. This code # allows the address bus and/or domain to be left off dbs_all = [ANY] * (3 - len(dbs_fields)) dbs_all.extend(dbs_fields) dbs_checked = [s.strip() or ANY for s in dbs_all] self.domain, self.bus, self.slot = dbs_checked self._set_pci_dev_info('domain', MAX_DOMAIN, '%04x') self._set_pci_dev_info('bus', MAX_BUS, '%02x') self._set_pci_dev_info('slot', MAX_SLOT, '%02x') def match(self, phys_pci_addr: PciAddressSpec) -> bool: conditions = [ self.domain in (ANY, phys_pci_addr.domain), self.bus in (ANY, phys_pci_addr.bus), self.slot in (ANY, phys_pci_addr.slot), self.func in (ANY, phys_pci_addr.func) ] return all(conditions) class PciAddressRegexSpec(PciAddressSpec): """Manages the address fields with regex style. This function class will validate the address fields with regex style. The validation includes check for all PCI address attributes and validate their regex. """ def __init__(self, pci_addr: dict) -> None: try: self.domain = pci_addr.get('domain', REGEX_ANY) self.bus = pci_addr.get('bus', REGEX_ANY) self.slot = pci_addr.get('slot', REGEX_ANY) self.func = pci_addr.get('function', REGEX_ANY) self.domain_regex = re.compile(self.domain) self.bus_regex = re.compile(self.bus) self.slot_regex = re.compile(self.slot) self.func_regex = re.compile(self.func) except re.error: raise exception.PciDeviceWrongAddressFormat(address=pci_addr) def match(self, phys_pci_addr: PciAddressSpec) -> bool: conditions = [ bool(self.domain_regex.match(phys_pci_addr.domain)), bool(self.bus_regex.match(phys_pci_addr.bus)), bool(self.slot_regex.match(phys_pci_addr.slot)), bool(self.func_regex.match(phys_pci_addr.func)) ] return all(conditions) class WhitelistPciAddress(object): """Manages the address fields of the whitelist. This class checks the address fields of the pci.device_spec configuration option, validating the address fields. Example configs: | [pci] | device_spec = {"address":"*:0a:00.*", | "physical_network":"physnet1"} | device_spec = {"address": {"domain": ".*", "bus": "02", "slot": "01", "function": "[0-2]"}, "physical_network":"net1"} | device_spec = {"vendor_id":"1137","product_id":"0071"} """ def __init__( self, pci_addr: PCISpecAddressType, is_physical_function: bool ) -> None: self.is_physical_function = is_physical_function self._init_address_fields(pci_addr) def _check_physical_function(self) -> None: if self.pci_address_spec.is_single_address(): self.is_physical_function = ( utils.is_physical_function( self.pci_address_spec.domain, self.pci_address_spec.bus, self.pci_address_spec.slot, self.pci_address_spec.func)) def _init_address_fields(self, pci_addr: PCISpecAddressType) -> None: self.pci_address_spec: PciAddressSpec if not self.is_physical_function: if isinstance(pci_addr, str): self.pci_address_spec = PciAddressGlobSpec(pci_addr) elif isinstance(pci_addr, dict): self.pci_address_spec = PciAddressRegexSpec(pci_addr) else: raise exception.PciDeviceWrongAddressFormat(address=pci_addr) self._check_physical_function() else: self.pci_address_spec = PhysicalPciAddress(pci_addr) def match(self, pci_addr: str, pci_phys_addr: ty.Optional[str]) -> bool: """Match a device to this PciAddress. Assume this is called with a ``pci_addr`` and ``pci_phys_addr`` reported by libvirt. No attempt is made to verify if ``pci_addr`` is a VF of ``pci_phys_addr``. :param pci_addr: PCI address of the device to match. :param pci_phys_addr: PCI address of the parent of the device to match (or None if the device is not a VF). """ # Try to match on the parent PCI address if the PciDeviceSpec is a # PF (sriov is available) and the device to match is a VF. This # makes it possible to specify the PCI address of a PF in the # pci.device_spec to match any of its VFs' PCI addresses. if self.is_physical_function and pci_phys_addr: pci_phys_addr_obj = PhysicalPciAddress(pci_phys_addr) if self.pci_address_spec.match(pci_phys_addr_obj): return True # Try to match on the device PCI address only. pci_addr_obj = PhysicalPciAddress(pci_addr) return self.pci_address_spec.match(pci_addr_obj) class PciDeviceSpec(PciAddressSpec): def __init__(self, dev_spec: ty.Dict[str, str]) -> None: # stored for better error reporting self.dev_spec_conf = copy.deepcopy(dev_spec) # the non tag fields (i.e. address, devname) will be removed by # _init_dev_details self.tags = dev_spec self._init_dev_details() def _address_obj(self) -> ty.Optional[WhitelistPciAddress]: address_obj = None if self.dev_name: address_str, pf = utils.get_function_by_ifname(self.dev_name) if not address_str: return None # Note(moshele): In this case we always passing a string # of the PF pci address address_obj = WhitelistPciAddress(address_str, pf) else: # use self.address address_obj = self.address return address_obj def _init_dev_details(self) -> None: self.vendor_id = self.tags.pop("vendor_id", ANY) self.product_id = self.tags.pop("product_id", ANY) self.dev_name = self.tags.pop("devname", None) self.address: ty.Optional[WhitelistPciAddress] = None # Note(moshele): The address attribute can be a string or a dict. # For glob syntax or specific pci it is a string and for regex syntax # it is a dict. The WhitelistPciAddress class handles both types. address = self.tags.pop("address", None) self.vendor_id = self.vendor_id.strip() self._set_pci_dev_info('vendor_id', MAX_VENDOR_ID, '%04x') self._set_pci_dev_info('product_id', MAX_PRODUCT_ID, '%04x') if address and self.dev_name: raise exception.PciDeviceInvalidDeviceName() if not self.dev_name: self.address = WhitelistPciAddress(address or '*:*:*.*', False) # PFs with remote_managed tags are explicitly not supported. If they # are tagged as such by mistake in the whitelist Nova will # raise an exception. The reason for excluding PFs is the lack of a way # for an instance to access the control plane at the remote side (e.g. # on a DPU) for managing the PF representor corresponding to the PF. address_obj = self._address_obj() self._remote_managed = strutils.bool_from_string( self.tags.get(PCI_REMOTE_MANAGED_TAG)) self._normalize_device_spec_tag("managed") self._normalize_device_spec_tag("live_migratable") self._normalize_device_spec_tag("one_time_use") if self.tags.get('one_time_use') == 'true': # Validate that one_time_use=true is not set on devices where we # cannot support proper reservation protection. if not CONF.pci.report_in_placement: raise exception.PciConfigInvalidSpec( reason=_('one_time_use=true requires ' 'pci.report_in_placement to be enabled')) if self._remote_managed: if address_obj is None: # Note that this will happen if a netdev was specified in the # whitelist but it is not actually present on a system - in # this case Nova is not able to look up an address by # a netdev name. raise exception.PciDeviceRemoteManagedNotPresent() elif address_obj.is_physical_function: pf_addr = str(address_obj.pci_address_spec) vf_product_id = utils.get_vf_product_id_by_pf_addr(pf_addr) # VF vendor IDs have to match the corresponding PF vendor IDs # per the SR-IOV spec so we use it for matching here. pf_vendor_id, pf_product_id = utils.get_pci_ids_by_pci_addr( pf_addr) # Check the actual vendor ID and VF product ID of an assumed # VF (based on the actual PF). The VF product ID must match # the actual one if this is a VF device spec. if (self.product_id == vf_product_id and self.vendor_id in (pf_vendor_id, ANY)): pass elif (self.product_id in (pf_product_id, ANY) and self.vendor_id in (pf_vendor_id, ANY)): raise exception.PciDeviceInvalidPFRemoteManaged( address_obj.pci_address_spec) else: # The specified product and vendor IDs of what is supposed # to be a VF corresponding to the PF PCI address do not # match the actual ones for this PF. This means that the # whitelist is invalid. raise exception.PciConfigInvalidSpec( reason=_('the specified VF vendor ID %(vendor_id)s and' ' product ID %(product_id)s do not match the' ' expected VF IDs based on the corresponding' ' PF identified by PCI address %(pf_addr)s') % {'vendor_id': self.vendor_id, 'product_id': self.product_id, 'pf_addr': pf_addr}) def _ensure_remote_managed_dev_vpd_serial( self, dev_dict: ty.Dict[str, ty.Any]) -> bool: """Ensure the presence of a serial number field in PCI VPD. A card serial number extracted from PCI VPD is required to allow a networking backend to identify which remote host needs to program a given device. So if a device is tagged as remote_managed, it must have the card serial number or be filtered out. """ if not self._remote_managed: return True card_sn = dev_dict.get('capabilities', {}).get( 'vpd', {}).get('card_serial_number') # None or empty card_serial_number should be filtered out. That would # mean either no serial number in the VPD (if present at all) or SN is # an empty string which is not useful for device identification. return bool(card_sn) def match(self, dev_dict: ty.Dict[str, ty.Any]) -> bool: address_obj: ty.Optional[WhitelistPciAddress] = self._address_obj() if not address_obj: return False return all([ self.vendor_id in (ANY, dev_dict['vendor_id']), self.product_id in (ANY, dev_dict['product_id']), address_obj.match(dev_dict['address'], dev_dict.get('parent_addr')), self._ensure_remote_managed_dev_vpd_serial(dev_dict), ]) def match_pci_obj(self, pci_obj: 'objects.PciDevice') -> bool: dev_dict = { 'vendor_id': pci_obj.vendor_id, 'product_id': pci_obj.product_id, 'address': pci_obj.address, 'parent_addr': pci_obj.parent_addr, 'capabilities': { 'vpd': {'card_serial_number': pci_obj.card_serial_number}} } return self.match(dev_dict) def get_tags(self) -> ty.Dict[str, str]: return self.tags def _normalize_device_spec_tag(self, tag): if self.tags.get(tag, None) is not None: try: self.tags[tag] = ( "true" if strutils.bool_from_string( self.tags.get(tag), strict=True) else "false") except ValueError as e: raise exception.PciConfigInvalidSpec( reason=f"Cannot parse tag '{tag}': " + str(e) ) def enhanced_pci_device_with_spec_tags(self, dev: ty.Dict[str, ty.Any]): spec_tags = ["managed", "live_migratable"] for tag in spec_tags: tag_value = self.tags.get(tag) if tag_value is not None: dev.update({tag: tag_value}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/pci/manager.py0000664000175000017500000005331100000000000016267 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Intel, Inc. # Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import copy import typing as ty from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils from nova import context as ctx from nova import exception from nova import objects from nova.objects import fields from nova.pci import stats from nova.pci import whitelist CONF = cfg.CONF LOG = logging.getLogger(__name__) MappingType = ty.Dict[str, ty.List['objects.PciDevice']] PCIInvType = ty.DefaultDict[str, ty.List['objects.PciDevice']] class PciDevTracker(object): """Manage pci devices in a compute node. This class fetches pci passthrough information from hypervisor and tracks the usage of these devices. It's called by compute node resource tracker to allocate and free devices to/from instances, and to update the available pci passthrough device information from the hypervisor periodically. The `pci_devs` attribute of this class is the in-memory "master copy" of all devices on each compute host, and all data changes that happen when claiming/allocating/freeing devices HAVE TO be made against instances contained in `pci_devs` list, because they are periodically flushed to the DB when the save() method is called. It is unsafe to fetch PciDevice objects elsewhere in the code for update purposes as those changes will end up being overwritten when the `pci_devs` are saved. """ def __init__( self, context: ctx.RequestContext, compute_node: 'objects.ComputeNode', ): """Create a pci device tracker. :param context: The request context. :param compute_node: The object.ComputeNode whose PCI devices we're tracking. """ self.stale: ty.Dict[str, objects.PciDevice] = {} self.to_be_removed_when_freed: ty.Dict[str, objects.PciDevice] = {} self.node_id: str = compute_node.id self.dev_filter = whitelist.Whitelist(CONF.pci.device_spec) numa_topology = compute_node.numa_topology if numa_topology: # For legacy reasons, the NUMATopology is stored as a JSON blob. # Deserialize it into a real object. numa_topology = objects.NUMATopology.obj_from_db_obj(numa_topology) self.stats = stats.PciDeviceStats( numa_topology, dev_filter=self.dev_filter) self._context = context self.pci_devs = objects.PciDeviceList.get_by_compute_node( context, self.node_id) self._build_device_tree(self.pci_devs) self._initial_instance_usage() def _initial_instance_usage(self) -> None: self.allocations: PCIInvType = collections.defaultdict(list) self.claims: PCIInvType = collections.defaultdict(list) for dev in self.pci_devs: uuid = dev.instance_uuid if dev.status == fields.PciDeviceStatus.CLAIMED: self.claims[uuid].append(dev) elif dev.status == fields.PciDeviceStatus.ALLOCATED: self.allocations[uuid].append(dev) elif dev.status == fields.PciDeviceStatus.AVAILABLE: self.stats.add_device(dev) def save(self, context: ctx.RequestContext) -> None: devs = copy.copy(self.pci_devs.objects) for dev in devs: if dev.obj_what_changed(): with dev.obj_alternate_context(context): dev.save() if dev.status == fields.PciDeviceStatus.DELETED: self.pci_devs.objects.remove(dev) @property def pci_stats(self) -> stats.PciDeviceStats: return self.stats def update_devices_from_hypervisor_resources( self, devices_json: str, ) -> None: """Sync the pci device tracker with hypervisor information. To support pci device hot plug, we sync with the hypervisor periodically, fetching all devices information from hypervisor, update the tracker and sync the DB information. Devices should not be hot-plugged when assigned to a guest, but possibly the hypervisor has no such guarantee. The best we can do is to give a warning if a device is changed or removed while assigned. :param devices_json: The JSON-ified string of device information that is returned from the virt driver's get_available_resource() call in the pci_passthrough_devices key. """ devices = [] for dev in jsonutils.loads(devices_json): try: pci_dev_spec = self.dev_filter.device_assignable(dev) if pci_dev_spec is not None: # Since some configuration parameters cannot be # discovered by the driver, we need to add them from # the device specification provided by the operator. pci_dev_spec.enhanced_pci_device_with_spec_tags(dev) devices.append(dev) except exception.PciConfigInvalidSpec as e: # The raised exception is misleading as the problem is not with # the whitelist config but with the host PCI device reported by # libvirt. The code that matches the host PCI device to the # whitelist spec reuses the WhitelistPciAddress object to parse # the host PCI device address. That parsing can fail if the # PCI address has a 32 bit domain. But this should not prevent # processing the rest of the devices. So we simply skip this # device and continue. # Please note that this except block does not ignore the # invalid whitelist configuration. The whitelist config has # already been parsed or rejected in case it was invalid. At # this point the self.dev_filter represents the parsed and # validated whitelist config. LOG.debug( 'Skipping PCI device %s reported by the hypervisor: %s', {k: v for k, v in dev.items() if k in ['address', 'parent_addr']}, # NOTE(gibi): this is ugly but the device_assignable() call # uses the PhysicalPciAddress class to parse the PCI # addresses and that class reuses the code from # PciAddressSpec that was originally designed to parse # whitelist spec. Hence the raised exception talks about # whitelist config. This is misleading as in our case the # PCI address that we failed to parse came from the # hypervisor. # TODO(gibi): refactor the false abstraction to make the # code reuse clean from the false assumption that we only # parse whitelist config with # devspec.PciAddressSpec._set_pci_dev_info() str(e).replace( 'Invalid [pci]device_spec config:', 'The')) self._set_hvdevs(devices) @staticmethod def _build_device_tree(all_devs: ty.List['objects.PciDevice']) -> None: """Build a tree of devices that represents parent-child relationships. We need to have the relationships set up so that we can easily make all the necessary changes to parent/child devices without having to figure it out at each call site. This method just adds references to relevant instances already found in `pci_devs` to `child_devices` and `parent_device` fields of each one. Currently relationships are considered for SR-IOV PFs/VFs only. """ # Ensures that devices are ordered in ASC so VFs will come # after their PFs. all_devs.sort(key=lambda x: x.address) parents = {} for dev in all_devs: if dev.status in (fields.PciDeviceStatus.REMOVED, fields.PciDeviceStatus.DELETED): # NOTE(ndipanov): Removed devs are pruned from # self.pci_devs on save() so we need to make sure we # are not looking at removed ones as we may build up # the tree sooner than they are pruned. continue if dev.dev_type == fields.PciDeviceType.SRIOV_PF: dev.child_devices = [] parents[dev.address] = dev elif dev.dev_type in ( fields.PciDeviceType.SRIOV_VF, fields.PciDeviceType.VDPA ): dev.parent_device = parents.get(dev.parent_addr) if dev.parent_device: parents[dev.parent_addr].child_devices.append(dev) def _set_hvdevs(self, devices: ty.List[ty.Dict[str, ty.Any]]) -> None: exist_addrs = set([dev.address for dev in self.pci_devs]) new_addrs = set([dev['address'] for dev in devices]) for existed in self.pci_devs: if existed.address in exist_addrs - new_addrs: # Remove previously tracked PCI devices that are either # no longer reported by the hypervisor or have been removed # from the pci whitelist. try: existed.remove() except ( exception.PciDeviceInvalidStatus, exception.PciDeviceInvalidOwner, ) as e: LOG.warning("Unable to remove device with status " "'%(status)s' and ownership %(instance_uuid)s " "because of %(pci_exception)s. " "Check your [pci]device_spec " "configuration to make sure this allocated " "device is whitelisted. If you have removed " "the device from the whitelist intentionally " "or the device is no longer available on the " "host you will need to delete the server or " "migrate it to another host to silence this " "warning.", {'status': existed.status, 'instance_uuid': existed.instance_uuid, 'pci_exception': e.format_message()}) # NOTE(sean-k-mooney): the device may not be tracked for # two reasons: first the device could have been removed # from the host or second the whitelist could have been # updated. While force removing may seam reasonable, if # the device is allocated to a vm, force removing the # device entry from the resource tracker can prevent the vm # from rebooting. If the PCI device was removed due to an # update to the PCI whitelist which was later reverted, # removing the entry from the database and adding it back # later may lead to the scheduler incorrectly selecting # this host and the ResourceTracker assigning the PCI # device to a second vm. To prevent this bug we skip # deleting the device from the db in this iteration and # will try again on the next sync. # NOTE(gibi): We keep a list of these devices in memory # so that when the VM using the device is deleted then # the tracker can not just free the device but also # mark them for removal. This will prevent a bug where # such a freed device is re-allocated before removed. self.to_be_removed_when_freed[existed.address] = existed continue else: # Note(yjiang5): no need to update stats if an assigned # device is hot removed. # NOTE(gibi): only remove the device from the pools if it # is not already removed if existed in self.stats.get_free_devs(): self.stats.remove_device(existed) else: # Update tracked devices. new_value: ty.Dict[str, ty.Any] new_value = next((dev for dev in devices if dev['address'] == existed.address)) new_value['compute_node_id'] = self.node_id if existed.status in (fields.PciDeviceStatus.CLAIMED, fields.PciDeviceStatus.ALLOCATED): # Pci properties may change while assigned because of # hotplug or config changes. Although normally this should # not happen. # As the devices have been assigned to an instance, # we defer the change till the instance is destroyed. # We will not sync the new properties with database # before that. # TODO(yjiang5): Not sure if this is a right policy, but # at least it avoids some confusion and, if needed, # we can add more action like killing the instance # by force in future. self.stale[new_value['address']] = new_value else: existed.update_device(new_value) self.stats.update_device(existed) # Track newly discovered devices. for dev in [dev for dev in devices if dev['address'] in new_addrs - exist_addrs]: dev['compute_node_id'] = self.node_id dev_obj = objects.PciDevice.create(self._context, dev) self.pci_devs.objects.append(dev_obj) self.stats.add_device(dev_obj) self._build_device_tree(self.pci_devs) def _claim_instance( self, context: ctx.RequestContext, pci_requests: 'objects.InstancePCIRequests', instance_numa_topology: 'objects.InstanceNUMATopology', ) -> ty.List['objects.PciDevice']: instance_cells = None if instance_numa_topology: instance_cells = instance_numa_topology.cells devs = self.stats.consume_requests(pci_requests.requests, instance_cells) if not devs: return [] instance_uuid = pci_requests.instance_uuid for dev in devs: dev.claim(instance_uuid) if instance_numa_topology and any( dev.numa_node is None for dev in devs): LOG.warning("Assigning a pci device without numa affinity to " "instance %(instance)s which has numa topology", {'instance': instance_uuid}) return devs def claim_instance( self, context: ctx.RequestContext, pci_requests: 'objects.InstancePCIRequests', instance_numa_topology: 'objects.InstanceNUMATopology', ) -> ty.List['objects.PciDevice']: devs = [] if self.pci_devs and pci_requests.requests: instance_uuid = pci_requests.instance_uuid devs = self._claim_instance(context, pci_requests, instance_numa_topology) if devs: self.claims[instance_uuid] = devs return devs def _allocate_instance( self, instance: 'objects.Instance', devs: ty.List['objects.PciDevice'], ) -> None: for dev in devs: dev.allocate(instance) def allocate_instance(self, instance: 'objects.Instance') -> None: devs = self.claims.pop(instance['uuid'], []) self._allocate_instance(instance, devs) if devs: self.allocations[instance['uuid']] += devs def free_device( self, dev: 'objects.PciDevice', instance: 'objects.Instance' ) -> None: """Free device from pci resource tracker :param dev: cloned pci device object that needs to be free :param instance: the instance that this pci device is allocated to """ for pci_dev in self.pci_devs: # Find the matching pci device in the pci resource tracker. # Once found, free it. if dev.id == pci_dev.id and dev.instance_uuid == instance['uuid']: self._remove_device_from_pci_mapping( instance['uuid'], pci_dev, self.allocations) self._remove_device_from_pci_mapping( instance['uuid'], pci_dev, self.claims) self._free_device(pci_dev) break def _remove_device_from_pci_mapping( self, instance_uuid: str, pci_device: 'objects.PciDevice', pci_mapping: MappingType, ) -> None: """Remove a PCI device from allocations or claims. If there are no more PCI devices, pop the uuid. """ pci_devices = pci_mapping.get(instance_uuid, []) if pci_device in pci_devices: pci_devices.remove(pci_device) if len(pci_devices) == 0: pci_mapping.pop(instance_uuid, None) def _free_device( self, dev: 'objects.PciDevice', instance: 'objects.Instance' = None, ) -> None: freed_devs = dev.free(instance) stale = self.stale.pop(dev.address, None) if stale: dev.update_device(stale) to_be_removed = self.to_be_removed_when_freed.pop(dev.address, None) if to_be_removed: dev.remove() if dev in self.stats.get_free_devs(): self.stats.remove_device(dev) else: for dev in freed_devs: self.stats.add_device(dev) def free_instance_allocations( self, context: ctx.RequestContext, instance: 'objects.Instance', ) -> None: """Free devices that are in ALLOCATED state for instance. :param context: user request context :param instance: instance object """ if not self.allocations.pop(instance['uuid'], None): return for dev in self.pci_devs: if (dev.status == fields.PciDeviceStatus.ALLOCATED and dev.instance_uuid == instance['uuid']): self._free_device(dev, instance) def free_instance_claims( self, context: ctx.RequestContext, instance: 'objects.Instance', ) -> None: """Free devices that are in CLAIMED state for instance. :param context: user request context (nova.context.RequestContext) :param instance: instance object """ if not self.claims.pop(instance['uuid'], None): return for dev in self.pci_devs: if (dev.status == fields.PciDeviceStatus.CLAIMED and dev.instance_uuid == instance['uuid']): self._free_device(dev, instance) def free_instance( self, context: ctx.RequestContext, instance: 'objects.Instance', ) -> None: """Free devices that are in CLAIMED or ALLOCATED state for instance. :param context: user request context (nova.context.RequestContext) :param instance: instance object """ # Note(yjiang5): When an instance is resized, the devices in the # destination node are claimed to the instance in prep_resize stage. # However, the instance contains only allocated devices # information, not the claimed one. So we can't use # instance['pci_devices'] to check the devices to be freed. self.free_instance_allocations(context, instance) self.free_instance_claims(context, instance) def update_pci_for_instance( self, context: ctx.RequestContext, instance: 'objects.Instance', sign: int, ) -> None: """Update PCI usage information if devices are de/allocated.""" if not self.pci_devs: return if sign == -1: self.free_instance(context, instance) if sign == 1: self.allocate_instance(instance) def clean_usage( self, instances: 'objects.InstanceList', migrations: 'objects.MigrationList', ) -> None: """Remove all usages for instances not passed in the parameter. The caller should hold the COMPUTE_RESOURCE_SEMAPHORE lock """ existed = set(inst.uuid for inst in instances) existed |= set(mig.instance_uuid for mig in migrations) # need to copy keys, because the dict is modified in the loop body for uuid in list(self.claims): if uuid not in existed: devs = self.claims.pop(uuid, []) for dev in devs: self._free_device(dev) # need to copy keys, because the dict is modified in the loop body for uuid in list(self.allocations): if uuid not in existed: devs = self.allocations.pop(uuid, []) for dev in devs: self._free_device(dev) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/pci/request.py0000664000175000017500000003202300000000000016342 0ustar00zuulzuul00000000000000# Copyright 2013 Intel Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Example of a PCI alias:: | [pci] | alias = '{ | "name": "QuickAssist", | "product_id": "0443", | "vendor_id": "8086", | "device_type": "type-PCI", | "numa_policy": "legacy" | }' Aliases with the same name, device_type and numa_policy are ORed:: | [pci] | alias = '{ | "name": "QuickAssist", | "product_id": "0442", | "vendor_id": "8086", | "device_type": "type-PCI", | }' These two aliases define a device request meaning: vendor_id is "8086" and product_id is "0442" or "0443". """ import functools import typing as ty import jsonschema from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import uuidutils import nova.conf from nova import context as ctx from nova import exception from nova.i18n import _ from nova.network import model as network_model from nova import objects from nova.objects import fields as obj_fields from nova.pci import utils from oslo_utils import strutils Alias = ty.Dict[str, ty.Tuple[str, ty.List[ty.Dict[str, str]]]] PCI_NET_TAG = 'physical_network' PCI_TRUSTED_TAG = 'trusted' PCI_DEVICE_TYPE_TAG = 'dev_type' PCI_REMOTE_MANAGED_TAG = 'remote_managed' DEVICE_TYPE_FOR_VNIC_TYPE = { network_model.VNIC_TYPE_DIRECT_PHYSICAL: obj_fields.PciDeviceType.SRIOV_PF, network_model.VNIC_TYPE_VDPA: obj_fields.PciDeviceType.VDPA, } CONF = nova.conf.CONF LOG = logging.getLogger(__name__) _ALIAS_SCHEMA = { "type": "object", "additionalProperties": False, "properties": { "name": { "type": "string", "minLength": 1, "maxLength": 256, }, # TODO(stephenfin): This isn't used anywhere outside of tests and # should probably be removed. "capability_type": { "type": "string", "enum": ['pci'], }, "product_id": { "type": "string", "pattern": utils.PCI_VENDOR_PATTERN, }, "vendor_id": { "type": "string", "pattern": utils.PCI_VENDOR_PATTERN, }, "device_type": { "type": "string", # NOTE(sean-k-mooney): vDPA devices cannot currently be used with # alias-based PCI passthrough so we exclude it here "enum": [ obj_fields.PciDeviceType.STANDARD, obj_fields.PciDeviceType.SRIOV_PF, obj_fields.PciDeviceType.SRIOV_VF, ], }, "numa_policy": { "type": "string", "enum": list(obj_fields.PCINUMAAffinityPolicy.ALL), }, "resource_class": { "type": "string", }, "traits": { "type": "string", }, "live_migratable": { "type": "string", }, }, "required": ["name"], } def _validate_multispec(aliases): if CONF.filter_scheduler.pci_in_placement: alias_with_multiple_specs = [ name for name, spec in aliases.items() if len(spec[1]) > 1] if alias_with_multiple_specs: raise exception.PciInvalidAlias( "The PCI alias(es) %s have multiple specs but " "[filter_scheduler]pci_in_placement is True. The PCI in " "Placement feature only supports one spec per alias. You can " "assign the same resource_class to multiple [pci]device_spec " "matchers to allow using different devices for the same alias." % ",".join(alias_with_multiple_specs)) def _validate_required_ids(aliases): if CONF.filter_scheduler.pci_in_placement: alias_without_ids_or_rc = set() for name, alias in aliases.items(): for spec in alias[1]: ids = "vendor_id" in spec and "product_id" in spec rc = "resource_class" in spec if not ids and not rc: alias_without_ids_or_rc.add(name) if alias_without_ids_or_rc: raise exception.PciInvalidAlias( "The PCI alias(es) %s does not have vendor_id and product_id " "fields set or resource_class field set." % ",".join(sorted(alias_without_ids_or_rc))) else: alias_without_ids = set() for name, alias in aliases.items(): for spec in alias[1]: ids = "vendor_id" in spec and "product_id" in spec if not ids: alias_without_ids.add(name) if alias_without_ids: raise exception.PciInvalidAlias( "The PCI alias(es) %s does not have vendor_id and product_id " "fields set." % ",".join(sorted(alias_without_ids))) def _validate_aliases(aliases): """Checks the parsed aliases for common mistakes and raise easy to parse error messages """ _validate_multispec(aliases) _validate_required_ids(aliases) @functools.cache def get_alias_from_config() -> Alias: """Parse and validate PCI aliases from the nova config. :returns: A dictionary where the keys are alias names and the values are tuples of form ``(numa_policy, specs)``. ``numa_policy`` describes the required NUMA affinity of the device(s), while ``specs`` is a list of PCI device specs. :raises: exception.PciInvalidAlias if two aliases with the same name have different device types or different NUMA policies. """ jaliases = CONF.pci.alias # map alias name to alias spec list aliases: Alias = {} try: for jsonspecs in jaliases: spec = jsonutils.loads(jsonspecs) jsonschema.validate(spec, _ALIAS_SCHEMA) name = spec.pop('name').strip() numa_policy = spec.pop('numa_policy', None) if not numa_policy: numa_policy = obj_fields.PCINUMAAffinityPolicy.LEGACY dev_type = spec.pop('device_type', None) if dev_type: spec['dev_type'] = dev_type live_migratable = spec.pop('live_migratable', None) if live_migratable is not None: live_migratable = ( "true" if strutils.bool_from_string(live_migratable, strict=True) else "false" ) spec['live_migratable'] = live_migratable if name not in aliases: aliases[name] = (numa_policy, [spec]) continue if aliases[name][0] != numa_policy: reason = _("NUMA policy mismatch for alias '%s'") % name raise exception.PciInvalidAlias(reason=reason) if aliases[name][1][0]['dev_type'] != spec['dev_type']: reason = _("Device type mismatch for alias '%s'") % name raise exception.PciInvalidAlias(reason=reason) aliases[name][1].append(spec) except exception.PciInvalidAlias: raise except jsonschema.exceptions.ValidationError as exc: raise exception.PciInvalidAlias(reason=exc.message) except Exception as exc: raise exception.PciInvalidAlias(reason=str(exc)) _validate_aliases(aliases) return aliases def _translate_alias_to_requests( alias_spec: str, affinity_policy: ty.Optional[str] = None, ) -> ty.List['objects.InstancePCIRequest']: """Generate complete pci requests from pci aliases in extra_spec.""" pci_aliases = get_alias_from_config() pci_requests: ty.List[objects.InstancePCIRequest] = [] for name, count in [spec.split(':') for spec in alias_spec.split(',')]: name = name.strip() if name not in pci_aliases: raise exception.PciRequestAliasNotDefined(alias=name) numa_policy, spec = pci_aliases[name] policy = affinity_policy or numa_policy # NOTE(gibi): InstancePCIRequest has a requester_id field that could # be filled with the flavor.flavorid but currently there is no special # handling for InstancePCIRequests created from the flavor. So it is # left empty. pci_requests.append(objects.InstancePCIRequest( count=int(count), spec=spec, alias_name=name, numa_policy=policy, request_id=uuidutils.generate_uuid(), )) return pci_requests def get_instance_pci_request_from_vif( context: ctx.RequestContext, instance: 'objects.Instance', vif: network_model.VIF, ) -> ty.Optional['objects.InstancePCIRequest']: """Given an Instance, return the PCI request associated to the PCI device related to the given VIF (if any) on the compute node the instance is currently running. In this method we assume a VIF is associated with a PCI device if 'pci_slot' attribute exists in the vif 'profile' dict. :param context: security context :param instance: instance object :param vif: network VIF model object :raises: raises PciRequestFromVIFNotFound if a pci device is requested but not found on current host :return: instance's PCIRequest object associated with the given VIF or None if no PCI device is requested """ # Get PCI device address for VIF if exists vif_pci_dev_addr = vif['profile'].get('pci_slot') \ if vif['profile'] else None if not vif_pci_dev_addr: return None try: cn_id = objects.ComputeNode.get_by_host_and_nodename( context, instance.host, instance.node).id except exception.NotFound: LOG.warning("expected to find compute node with host %s " "and node %s when getting instance PCI request " "from VIF", instance.host, instance.node) return None # Find PCIDevice associated with vif_pci_dev_addr on the compute node # the instance is running on. found_pci_dev = None for pci_dev in instance.pci_devices: if (pci_dev.compute_node_id == cn_id and pci_dev.address == vif_pci_dev_addr): found_pci_dev = pci_dev break if not found_pci_dev: return None # Find PCIRequest associated with the given PCIDevice in instance for pci_req in instance.pci_requests.requests: if pci_req.request_id == found_pci_dev.request_id: return pci_req raise exception.PciRequestFromVIFNotFound( pci_slot=vif_pci_dev_addr, node_id=cn_id) def get_pci_requests_from_flavor( flavor: 'objects.Flavor', affinity_policy: ty.Optional[str] = None, ) -> 'objects.InstancePCIRequests': """Validate and return PCI requests. The ``pci_passthrough:alias`` extra spec describes the flavor's PCI requests. The extra spec's value is a comma-separated list of format ``alias_name_x:count, alias_name_y:count, ... ``, where ``alias_name`` is defined in ``pci.alias`` configurations. The flavor's requirement is translated into a PCI requests list. Each entry in the list is an instance of nova.objects.InstancePCIRequests with four keys/attributes. - 'spec' states the PCI device properties requirement - 'count' states the number of devices - 'alias_name' (optional) is the corresponding alias definition name - 'numa_policy' (optional) states the required NUMA affinity of the devices For example, assume alias configuration is:: { 'vendor_id':'8086', 'device_id':'1502', 'name':'alias_1' } While flavor extra specs includes:: 'pci_passthrough:alias': 'alias_1:2' The returned ``pci_requests`` are:: [{ 'count':2, 'specs': [{'vendor_id':'8086', 'device_id':'1502'}], 'alias_name': 'alias_1' }] :param flavor: The flavor to be checked :param affinity_policy: pci numa affinity policy :returns: A list of PCI requests :rtype: nova.objects.InstancePCIRequests :raises: exception.PciRequestAliasNotDefined if an invalid PCI alias is provided :raises: exception.PciInvalidAlias if the configuration contains invalid aliases. """ pci_requests: ty.List[objects.InstancePCIRequest] = [] if ('extra_specs' in flavor and 'pci_passthrough:alias' in flavor['extra_specs']): pci_requests = _translate_alias_to_requests( flavor['extra_specs']['pci_passthrough:alias'], affinity_policy=affinity_policy) return objects.InstancePCIRequests(requests=pci_requests) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/pci/stats.py0000664000175000017500000013234300000000000016016 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Intel, Inc. # Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import copy import typing as ty from oslo_config import cfg from oslo_log import log as logging from oslo_utils import strutils from nova import exception from nova import objects from nova.objects import fields from nova.objects import pci_device_pool from nova.pci.request import PCI_REMOTE_MANAGED_TAG from nova.pci import utils from nova.pci import whitelist CONF = cfg.CONF LOG = logging.getLogger(__name__) # TODO(stephenfin): We might want to use TypedDict here. Refer to # https://mypy.readthedocs.io/en/latest/kinds_of_types.html#typeddict for # more information. Pool = ty.Dict[str, ty.Any] class PciDeviceStats(object): """PCI devices summary information. According to the PCI SR-IOV spec, a PCI physical function can have up to 256 PCI virtual functions, thus the number of assignable PCI functions in a cloud can be big. The scheduler needs to know all device availability information in order to determine which compute hosts can support a PCI request. Passing individual virtual device information to the scheduler does not scale, so we provide summary information. Usually the virtual functions provided by a host PCI device have the same value for most properties, like vendor_id, product_id and class type. The PCI stats class summarizes this information for the scheduler. The pci stats information is maintained exclusively by compute node resource tracker and updated to database. The scheduler fetches the information and selects the compute node accordingly. If a compute node is selected, the resource tracker allocates the devices to the instance and updates the pci stats information. This summary information will be helpful for cloud management also. """ pool_keys = ['product_id', 'vendor_id', 'numa_node', 'dev_type'] # these can be specified in the [pci]device_spec and can be requested via # the PCI alias, but they are matched by the placement # allocation_candidates query, so we can ignore them during pool creation # and during filtering here ignored_spec_tags = ignored_pool_tags = ['resource_class', 'traits'] # this is a metadata key in the spec that is matched # specially in _filter_pools_based_on_placement_allocation. So we can # ignore them in the general matching logic. ignored_spec_tags += ['rp_uuids'] # this is a metadata key in the pool that is matched # specially in _filter_pools_based_on_placement_allocation. So we can # ignore them in the general matching logic. ignored_pool_tags += ['rp_uuid'] def __init__( self, numa_topology: 'objects.NUMATopology', stats: 'objects.PciDevicePoolList' = None, dev_filter: ty.Optional[whitelist.Whitelist] = None, ) -> None: self.numa_topology = numa_topology self.pools = ( [pci_pool.to_dict() for pci_pool in stats] if stats else [] ) self.pools.sort(key=lambda item: len(item)) self.dev_filter = dev_filter or whitelist.Whitelist( CONF.pci.device_spec) def _equal_properties( self, dev: Pool, entry: Pool, matching_keys: ty.List[str], ) -> bool: return all(dev.get(prop) == entry.get(prop) for prop in matching_keys) def _find_pool(self, dev_pool: Pool) -> ty.Optional[Pool]: """Return the first pool that matches dev.""" for pool in self.pools: pool_keys = pool.copy() del pool_keys['count'] del pool_keys['devices'] for tag in self.ignored_pool_tags: pool_keys.pop(tag, None) if (len(pool_keys.keys()) == len(dev_pool.keys()) and self._equal_properties(dev_pool, pool_keys, list(dev_pool))): return pool return None @staticmethod def _ensure_remote_managed_tag( dev: 'objects.PciDevice', pool: Pool): """Add a remote_managed tag depending on a device type if needed. Network devices may be managed remotely, e.g. by a SmartNIC DPU. If a tag has not been explicitly provided, populate it by assuming that a device is not remote managed by default. """ if dev.dev_type not in (fields.PciDeviceType.SRIOV_VF, fields.PciDeviceType.SRIOV_PF, fields.PciDeviceType.VDPA): return # A tag is added here rather than at the client side to avoid an # issue with having objects without this tag specified during an # upgrade to the first version that supports handling this tag. if pool.get(PCI_REMOTE_MANAGED_TAG) is None: # NOTE: tags are compared as strings case-insensitively, see # pci_device_prop_match in nova/pci/utils.py. pool[PCI_REMOTE_MANAGED_TAG] = 'false' def _create_pool_keys_from_dev( self, dev: 'objects.PciDevice', ) -> ty.Optional[Pool]: """Create a stats pool dict that this dev is supposed to be part of Note that this pool dict contains the stats pool's keys and their values. 'count' and 'devices' are not included. """ # Don't add a device that doesn't have a matching device spec. # This can happen during initial sync up with the controller devspec = self.dev_filter.get_devspec(dev) if not devspec: return None tags = devspec.get_tags() pool = {k: getattr(dev, k) for k in self.pool_keys} if tags: pool.update( { k: v for k, v in tags.items() if k not in self.ignored_pool_tags } ) # NOTE(gibi): since PCI in placement maps a PCI dev or a PF to a # single RP and the scheduler allocates from a specific RP we need # to split the pools by PCI or PF address. We can still keep # the VFs from the same parent PF in a single pool though as they # are equivalent from placement perspective. pool['address'] = dev.parent_addr or dev.address # NOTE(gibi): parent_ifname acts like a tag during pci claim but # not provided as part of the whitelist spec as it is auto detected # by the virt driver. # This key is used for match InstancePciRequest backed by neutron ports # that has resource_request and therefore that has resource allocation # already in placement. if dev.extra_info.get('parent_ifname'): pool['parent_ifname'] = dev.extra_info['parent_ifname'] self._ensure_remote_managed_tag(dev, pool) return pool def _get_pool_with_device_type_mismatch( self, dev: 'objects.PciDevice', ) -> ty.Optional[ty.Tuple[Pool, 'objects.PciDevice']]: """Check for device type mismatch in the pools for a given device. Return (pool, device) if device type does not match or a single None if the device type matches. """ for pool in self.pools: for device in pool['devices']: if device.address == dev.address: if dev.dev_type != pool["dev_type"]: return pool, device return None return None def update_device(self, dev: 'objects.PciDevice') -> None: """Update a device to its matching pool.""" pool_device_info = self._get_pool_with_device_type_mismatch(dev) if pool_device_info is None: return None pool, device = pool_device_info pool['devices'].remove(device) self._decrease_pool_count(self.pools, pool) self.add_device(dev) def add_device(self, dev: 'objects.PciDevice') -> None: """Add a device to its matching pool.""" dev_pool = self._create_pool_keys_from_dev(dev) if dev_pool: pool = self._find_pool(dev_pool) if not pool: dev_pool['count'] = 0 dev_pool['devices'] = [] self.pools.append(dev_pool) self.pools.sort(key=lambda item: len(item)) pool = dev_pool pool['count'] += 1 pool['devices'].append(dev) @staticmethod def _decrease_pool_count( pool_list: ty.List[Pool], pool: Pool, count: int = 1, ) -> int: """Decrement pool's size by count. If pool becomes empty, remove pool from pool_list. """ if pool['count'] > count: pool['count'] -= count count = 0 else: count -= pool['count'] pool_list.remove(pool) return count def remove_device(self, dev: 'objects.PciDevice') -> None: """Remove one device from the first pool that it matches.""" dev_pool = self._create_pool_keys_from_dev(dev) if dev_pool: pool = self._find_pool(dev_pool) if not pool: raise exception.PciDevicePoolEmpty( compute_node_id=dev.compute_node_id, address=dev.address) pool['devices'].remove(dev) self._decrease_pool_count(self.pools, pool) def get_free_devs(self) -> ty.List['objects.PciDevice']: free_devs: ty.List[objects.PciDevice] = [] for pool in self.pools: free_devs.extend(pool['devices']) return free_devs def _allocate_devs( self, pool: Pool, num: int, request_id: str ) -> ty.List["objects.PciDevice"]: alloc_devices = [] for _ in range(num): pci_dev = pool['devices'].pop() self._handle_device_dependents(pci_dev) pci_dev.request_id = request_id alloc_devices.append(pci_dev) return alloc_devices def consume_requests( self, pci_requests: 'objects.InstancePCIRequests', numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']] = None, ) -> ty.Optional[ty.List['objects.PciDevice']]: alloc_devices: ty.List[objects.PciDevice] = [] for request in pci_requests: count = request.count rp_uuids = self._get_rp_uuids_for_request( request=request, provider_mapping=None) pools = self._filter_pools( self.pools, request, numa_cells, rp_uuids=rp_uuids) # Failed to allocate the required number of devices. Return the # devices already allocated during previous iterations back to # their pools if not pools: LOG.error("Failed to allocate PCI devices for instance. " "Unassigning devices back to pools. " "This should not happen, since the scheduler " "should have accurate information, and allocation " "during claims is controlled via a hold " "on the compute node semaphore.") for d in range(len(alloc_devices)): self.add_device(alloc_devices.pop()) raise exception.PciDeviceRequestFailed(requests=pci_requests) if not rp_uuids: # if there is no placement allocation then we are free to # consume from the pools in any order: for pool in pools: if pool['count'] >= count: num_alloc = count else: num_alloc = pool['count'] count -= num_alloc pool['count'] -= num_alloc alloc_devices += self._allocate_devs( pool, num_alloc, request.request_id) if count == 0: break else: # but if there is placement allocation then we have to follow # it if not self._assert_one_pool_per_rp_uuid(pools): raise exception.PciDeviceRequestFailed( requests=pci_requests) requested_devs_per_pool_rp = collections.Counter(rp_uuids) for pool in pools: count = requested_devs_per_pool_rp[pool['rp_uuid']] pool['count'] -= count alloc_devices += self._allocate_devs( pool, count, request.request_id) # we consumed all the requested devices for the rp_uuid # so we can drop that rp_uuid from the request. requested_devs_per_pool_rp.pop(pool['rp_uuid'], None) return alloc_devices def _handle_device_dependents(self, pci_dev: 'objects.PciDevice') -> None: """Remove device dependents or a parent from pools. In case the device is a PF, all of it's dependent VFs should be removed from pools count, if these are present. When the device is a VF, or a VDPA device, it's parent PF pool count should be decreased, unless it is no longer in a pool. """ if pci_dev.dev_type == fields.PciDeviceType.SRIOV_PF: vfs_list = pci_dev.child_devices if vfs_list: free_devs = self.get_free_devs() for vf in vfs_list: # NOTE(gibi): do not try to remove a device that are # already removed if vf in free_devs: self.remove_device(vf) elif pci_dev.dev_type in ( fields.PciDeviceType.SRIOV_VF, fields.PciDeviceType.VDPA, ): try: parent = pci_dev.parent_device # Make sure not to decrease PF pool count if this parent has # been already removed from pools if parent in self.get_free_devs(): self.remove_device(parent) except exception.PciDeviceNotFound: return def _filter_pools_for_spec( self, pools: ty.List[Pool], request: 'objects.InstancePCIRequest', ) -> ty.List[Pool]: """Filter out pools that don't match the request's device spec. Exclude pools that do not match the specified ``vendor_id``, ``product_id`` and/or ``device_type`` field, or any of the other arbitrary tags such as ``physical_network``, specified in the request. :param pools: A list of PCI device pool dicts :param request: An InstancePCIRequest object describing the type, quantity and required NUMA affinity of device(s) we want. :returns: A list of pools that can be used to support the request if this is possible. """ def ignore_keys(spec): return { k: v for k, v in spec.items() if k not in self.ignored_spec_tags } request_specs = [ignore_keys(spec) for spec in request.spec] return [ pool for pool in pools if utils.pci_device_prop_match(pool, request_specs) ] def _filter_pools_for_numa_cells( self, pools: ty.List[Pool], request: 'objects.InstancePCIRequest', numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']], ) -> ty.List[Pool]: """Filter out pools with the wrong NUMA affinity, if required. Exclude pools that do not have *suitable* PCI NUMA affinity. ``numa_policy`` determines what *suitable* means, being one of PREFERRED (nice-to-have), LEGACY (must-have-if-available) and REQUIRED (must-have). We iterate through the various policies in order of strictness. This means that even if we only *prefer* PCI-NUMA affinity, we will still attempt to provide it if possible. :param pools: A list of PCI device pool dicts :param request: An InstancePCIRequest object describing the type, quantity and required NUMA affinity of device(s) we want. :param numa_cells: A list of InstanceNUMACell objects whose ``id`` corresponds to the ``id`` of host NUMACells. :returns: A list of pools that can, together, provide at least ``requested_count`` PCI devices with the level of NUMA affinity required by ``numa_policy``, else all pools that can satisfy this policy even if it's not enough. """ if not numa_cells: return pools # we default to the 'legacy' policy for...of course...legacy reasons requested_policy = fields.PCINUMAAffinityPolicy.LEGACY if 'numa_policy' in request: requested_policy = request.numa_policy or requested_policy requested_count = request.count numa_cell_ids = [cell.id for cell in numa_cells] # filter out pools which numa_node is not included in numa_cell_ids filtered_pools = [ pool for pool in pools if any(utils.pci_device_prop_match( pool, [{'numa_node': cell}]) for cell in numa_cell_ids)] # we can't apply a less strict policy than the one requested, so we # need to return if we've demanded a NUMA affinity of REQUIRED. # However, NUMA affinity is a good thing. If we can get enough devices # with the stricter policy then we will use them. if requested_policy == fields.PCINUMAAffinityPolicy.REQUIRED or sum( pool['count'] for pool in filtered_pools) >= requested_count: return filtered_pools # the SOCKET policy is a bit of a special case. It's less strict than # REQUIRED (so REQUIRED will automatically fulfil SOCKET, at least # with our assumption of never having multiple sockets per NUMA node), # but not always more strict than LEGACY: a PCI device with no NUMA # affinity will fulfil LEGACY but not SOCKET. If we have SOCKET, # process it here and don't continue. if requested_policy == fields.PCINUMAAffinityPolicy.SOCKET: return self._filter_pools_for_socket_affinity(pools, numa_cells) # some systems don't report NUMA node info for PCI devices, in which # case None is reported in 'pci_device.numa_node'. The LEGACY policy # allows us to use these devices so we include None in the list of # suitable NUMA cells. numa_cell_ids.append(None) # filter out pools which numa_node is not included in numa_cell_ids filtered_pools = [ pool for pool in pools if any(utils.pci_device_prop_match( pool, [{'numa_node': cell}]) for cell in numa_cell_ids)] # once again, we can't apply a less strict policy than the one # requested, so we need to return if we've demanded a NUMA affinity of # LEGACY. Similarly, we will also return if we have enough devices to # satisfy this somewhat strict policy. if requested_policy == fields.PCINUMAAffinityPolicy.LEGACY or sum( pool['count'] for pool in filtered_pools) >= requested_count: return filtered_pools # if we've got here, we're using the PREFERRED policy and weren't able # to provide anything with stricter affinity. Use whatever devices you # can, folks. return sorted( pools, key=lambda pool: pool.get('numa_node') not in numa_cell_ids) def _filter_pools_for_socket_affinity( self, pools: ty.List[Pool], numa_cells: ty.List['objects.InstanceNUMACell'], ) -> ty.List[Pool]: host_cells = self.numa_topology.cells # bail early if we don't have socket information for all host_cells. # This could happen if we're running on an weird older system with # multiple sockets per NUMA node, which is a configuration that we # explicitly chose not to support. if any(cell.socket is None for cell in host_cells): LOG.debug('No socket information in host NUMA cell(s).') return [] # get a set of host sockets that the guest cells are in. Since guest # cell IDs map to host cell IDs, we can just lookup the latter's # socket. socket_ids = set() for guest_cell in numa_cells: for host_cell in host_cells: if guest_cell.id == host_cell.id: socket_ids.add(host_cell.socket) # now get a set of host NUMA nodes that are in the above sockets allowed_numa_nodes = set() for host_cell in host_cells: if host_cell.socket in socket_ids: allowed_numa_nodes.add(host_cell.id) # filter out pools that are not in one of the correct host NUMA nodes. return [ pool for pool in pools if any( utils.pci_device_prop_match(pool, [{'numa_node': numa_node}]) for numa_node in allowed_numa_nodes ) ] def _filter_pools_for_unrequested_pfs( self, pools: ty.List[Pool], request: 'objects.InstancePCIRequest', ) -> ty.List[Pool]: """Filter out pools with PFs, unless these are required. This is necessary in cases where PFs and VFs have the same product_id and generally useful elsewhere. :param pools: A list of PCI device pool dicts :param request: An InstancePCIRequest object describing the type, quantity and required NUMA affinity of device(s) we want. :returns: A list of pools that can be used to support the request if this is possible. """ if all( spec.get('dev_type') != fields.PciDeviceType.SRIOV_PF for spec in request.spec ): pools = [ pool for pool in pools if not pool.get('dev_type') == fields.PciDeviceType.SRIOV_PF ] return pools def _filter_pools_for_unrequested_vdpa_devices( self, pools: ty.List[Pool], request: 'objects.InstancePCIRequest', ) -> ty.List[Pool]: """Filter out pools with VDPA devices, unless these are required. This is necessary as vdpa devices require special handling and should not be allocated to generic pci device requests. :param pools: A list of PCI device pool dicts :param request: An InstancePCIRequest object describing the type, quantity and required NUMA affinity of device(s) we want. :returns: A list of pools that can be used to support the request if this is possible. """ if all( spec.get('dev_type') != fields.PciDeviceType.VDPA for spec in request.spec ): pools = [ pool for pool in pools if not pool.get('dev_type') == fields.PciDeviceType.VDPA ] return pools def _filter_pools_for_unrequested_remote_managed_devices( self, pools: ty.List[Pool], request: 'objects.InstancePCIRequest', ) -> ty.List[Pool]: """Filter out pools with remote_managed devices, unless requested. Remote-managed devices are not usable for legacy SR-IOV or hardware offload scenarios and must be excluded from allocation. :param pools: A list of PCI device pool dicts :param request: An InstancePCIRequest object describing the type, quantity and required NUMA affinity of device(s) we want. :returns: A list of pools that can be used to support the request if this is possible. """ if all(not strutils.bool_from_string(spec.get(PCI_REMOTE_MANAGED_TAG)) for spec in request.spec): pools = [pool for pool in pools if not strutils.bool_from_string( pool.get(PCI_REMOTE_MANAGED_TAG))] return pools def _filter_pools_based_on_placement_allocation( self, pools: ty.List[Pool], request: 'objects.InstancePCIRequest', rp_uuids: ty.List[str], ) -> ty.List[Pool]: if not rp_uuids: # If there is no placement allocation then we don't need to filter # by it. This could happen if the instance only has neutron port # based InstancePCIRequest as that is currently not having # placement allocation (except for QoS ports, but that handled in a # separate codepath) or if the [filter_scheduler]pci_in_placement # configuration option is not enabled in the scheduler. return pools requested_dev_count_per_rp = collections.Counter(rp_uuids) matching_pools = [] for pool in pools: rp_uuid = pool.get('rp_uuid') if rp_uuid is None: # NOTE(gibi): As rp_uuids is not empty the scheduler allocated # PCI resources on this host, so we know that # [pci]report_in_placement is enabled on this host. But this # pool has no RP mapping which can only happen if the pool # contains PCI devices with physical_network tag, as those # devices not yet reported in placement. But if they are not # reported then we can ignore them here too. continue if ( # the placement allocation contains this pool rp_uuid in requested_dev_count_per_rp and # the amount of dev allocated in placement can be consumed # from the pool pool["count"] >= requested_dev_count_per_rp[rp_uuid] ): matching_pools.append(pool) return matching_pools def _filter_pools_for_live_migratable_devices( self, pools: ty.List[Pool], request: 'objects.InstancePCIRequest', ) -> ty.List[Pool]: """Filter out pools with non live_migratable devices. :param pools: A list of PCI device pool dicts :param request: An InstancePCIRequest object describing the type, quantity and required NUMA affinity of device(s) we want. :returns: A list of pools that can be used to support the request if this is possible. """ # The following code handles the case where 'live_migratable' is # set (either "true" or "false") by filtering devices to select the # appropriate ones. # If it is not set, we skip the next code block and no filtering # is applied to the pools. if all(spec.get("live_migratable") == 'true' for spec in request.spec): # if all specs require live migratable devices, then we need to # reduce the pools by the ones that support them. pools = [pool for pool in pools if pool.get("live_migratable") and pool['live_migratable'] == 'true'] elif all( spec.get("live_migratable") == "false" for spec in request.spec ): # If the request asks to NOT support live-migratable devices, then # we don't provide the ones that support them. # We want to exclude the devices that don't have this value yet. pools = [pool for pool in pools if pool.get("live_migratable") and pool['live_migratable'] == 'false'] return pools def _filter_pools( self, pools: ty.List[Pool], request: 'objects.InstancePCIRequest', numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']], rp_uuids: ty.List[str], ) -> ty.Optional[ty.List[Pool]]: """Determine if an individual PCI request can be met. Filter pools, which are collections of devices with similar traits, to identify those that can support the provided PCI request. If ``numa_cells`` is provided then NUMA locality may be taken into account, depending on the value of ``request.numa_policy``. :param pools: A list of PCI device pool dicts :param request: An InstancePCIRequest object describing the type, quantity and required NUMA affinity of device(s) we want. :param numa_cells: A list of InstanceNUMACell objects whose ``id`` corresponds to the ``id`` of host NUMACell objects. :param rp_uuids: A list of PR uuids this request fulfilled from in placement. So here we have to consider only the pools matching with these RP uuids :returns: A list of pools that can be used to support the request if this is possible, else None. """ # NOTE(vladikr): This code may be open to race conditions. # Two concurrent requests may succeed when called support_requests # because this method does not remove related devices from the pools # Firstly, let's exclude all devices that don't match our spec (e.g. # they've got different PCI IDs or something) before_count = sum([pool['count'] for pool in pools]) pools = self._filter_pools_for_spec(pools, request) after_count = sum([pool['count'] for pool in pools]) if after_count < before_count: LOG.debug( 'Dropped %d device(s) due to mismatched PCI attribute(s)', before_count - after_count ) if after_count < request.count: LOG.debug('Not enough PCI devices left to satisfy request') return None # Next, let's exclude all devices that aren't on the correct NUMA node # or socket, *assuming* we have devices and care about that, as # determined by policy before_count = after_count pools = self._filter_pools_for_numa_cells(pools, request, numa_cells) after_count = sum([pool['count'] for pool in pools]) if after_count < before_count: LOG.debug( 'Dropped %d device(s) as they are on the wrong NUMA node(s)', before_count - after_count ) if after_count < request.count: LOG.debug('Not enough PCI devices left to satisfy request') return None # If we're not requesting PFs then we should not use these. # Exclude them. before_count = after_count pools = self._filter_pools_for_unrequested_pfs(pools, request) after_count = sum([pool['count'] for pool in pools]) if after_count < before_count: LOG.debug( 'Dropped %d device(s) as they are PFs which we have not ' 'requested', before_count - after_count ) if after_count < request.count: LOG.debug('Not enough PCI devices left to satisfy request') return None # If we're not requesting VDPA devices then we should not use these # either. Exclude them. before_count = after_count pools = self._filter_pools_for_unrequested_vdpa_devices(pools, request) after_count = sum([pool['count'] for pool in pools]) if after_count < before_count: LOG.debug( 'Dropped %d device(s) as they are VDPA devices which we have ' 'not requested', before_count - after_count ) # If we're not requesting remote_managed devices then we should not # use these either. Exclude them. before_count = after_count pools = self._filter_pools_for_unrequested_remote_managed_devices( pools, request) after_count = sum([pool['count'] for pool in pools]) if after_count < before_count: LOG.debug( 'Dropped %d device(s) as they are remote-managed devices which' 'we have not requested', before_count - after_count ) # if there is placement allocation for the request then we have to # remove the pools that are not in the placement allocation before_count = after_count pools = self._filter_pools_based_on_placement_allocation( pools, request, rp_uuids) after_count = sum([pool['count'] for pool in pools]) if after_count < before_count: LOG.debug( 'Dropped %d device(s) that are not part of the placement ' 'allocation', before_count - after_count ) before_count = after_count pools = self._filter_pools_for_live_migratable_devices( pools, request) after_count = sum([pool['count'] for pool in pools]) if after_count < before_count: LOG.debug( 'Dropped %d device(s) that are not live migratable', before_count - after_count ) if after_count < request.count: LOG.debug('Not enough PCI devices left to satisfy request') return None return pools def support_requests( self, requests: ty.List['objects.InstancePCIRequest'], provider_mapping: ty.Optional[ty.Dict[str, ty.List[str]]], numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']] = None, ) -> bool: """Determine if the PCI requests can be met. Determine, based on a compute node's PCI stats, if an instance can be scheduled on the node. **Support does not mean real allocation**. If ``numa_cells`` is provided then NUMA locality may be taken into account, depending on the value of ``numa_policy``. :param requests: A list of InstancePCIRequest object describing the types, quantities and required NUMA affinities of devices we want. :type requests: nova.objects.InstancePCIRequests :param provider_mapping: A dict keyed by RequestGroup requester_id, to a list of resource provider UUIDs which provide resource for that RequestGroup. If it is None then it signals that the InstancePCIRequest objects already stores a mapping per request. I.e.: we are called _after_ the scheduler made allocations for this request in placement. :param numa_cells: A list of InstanceNUMACell objects whose ``id`` corresponds to the ``id`` of host NUMACells, or None. :returns: Whether this compute node can satisfy the given request. """ # try to apply the requests on the copy of the stats if it applies # cleanly then we know that the requests is supported. We call apply # only on a copy as we don't want to actually consume resources from # the pool as at this point this is just a test during host filtering. # Later the scheduler will call apply_request to consume on the # selected host. The compute will call consume_request during PCI claim # to consume not just from the pools but also consume PciDevice # objects. stats = copy.deepcopy(self) try: stats.apply_requests(requests, provider_mapping, numa_cells) except exception.PciDeviceRequestFailed: return False return True def _apply_request( self, pools: ty.List[Pool], request: 'objects.InstancePCIRequest', rp_uuids: ty.List[str], numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']] = None, ) -> bool: """Apply an individual PCI request. Apply a PCI request against a given set of PCI device pools, which are collections of devices with similar traits. If ``numa_cells`` is provided then NUMA locality may be taken into account, depending on the value of ``request.numa_policy``. :param pools: A list of PCI device pool dicts :param request: An InstancePCIRequest object describing the type, quantity and required NUMA affinity of device(s) we want. :param rp_uuids: A list of PR uuids this request fulfilled from in placement :param numa_cells: A list of InstanceNUMACell objects whose ``id`` corresponds to the ``id`` of host NUMACell objects. :returns: True if the request was applied against the provided pools successfully, else False. """ # NOTE(vladikr): This code maybe open to race conditions. # Two concurrent requests may succeed when called support_requests # because this method does not remove related devices from the pools filtered_pools = self._filter_pools( pools, request, numa_cells, rp_uuids) if not filtered_pools: return False if not rp_uuids: # If there is no placement allocation for this request then we are # free to consume from the filtered pools in any order count = request.count for pool in filtered_pools: count = self._decrease_pool_count(pools, pool, count) if not count: break else: # but if there is placement allocation then we have to follow that if not self._assert_one_pool_per_rp_uuid(pools): return False requested_devs_per_pool_rp = collections.Counter(rp_uuids) for pool in filtered_pools: count = requested_devs_per_pool_rp[pool['rp_uuid']] pool['count'] -= count # we consumed all the requested devices for the rp_uuid # so we can drop that rp_uuid from the request. requested_devs_per_pool_rp.pop(pool['rp_uuid'], None) if pool['count'] == 0: pools.remove(pool) return True def _get_rp_uuids_for_request( self, provider_mapping: ty.Optional[ty.Dict[str, ty.List[str]]], request: 'objects.InstancePCIRequest' ) -> ty.List[str]: """Return the list of RP uuids that are fulfilling the request. An RP will be in the list as many times as many devices needs to be allocated from that RP. """ if request.source == objects.InstancePCIRequest.NEUTRON_PORT: # TODO(gibi): support neutron based requests in a later cycle # an empty list will signal that any PCI pool can be used for this # request return [] if not provider_mapping: # NOTE(gibi): AFAIK specs is always a list of a single dict # but the object is hard to change retroactively rp_uuids = request.spec[0].get('rp_uuids') if not rp_uuids: # This can happen if [filter_scheduler]pci_in_placement is not # enabled yet # An empty list will signal that any PCI pool can be used for # this request return [] # TODO(gibi): this is baaad but spec is a dict of string so # the list is serialized return rp_uuids.split(',') # NOTE(gibi): the PCI prefilter generates RequestGroup suffixes from # InstancePCIRequests in the form of {request_id}-{count_index} # NOTE(gibi): a suffixed request group always fulfilled from a single # RP return [ rp_uuids[0] for group_id, rp_uuids in provider_mapping.items() if group_id.startswith(request.request_id) ] def apply_requests( self, requests: ty.List['objects.InstancePCIRequest'], provider_mapping: ty.Optional[ty.Dict[str, ty.List[str]]], numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']] = None, ) -> None: """Apply PCI requests to the PCI stats. This is used in multiple instance creation, when the scheduler has to maintain how the resources are consumed by the instances. If ``numa_cells`` is provided then NUMA locality may be taken into account, depending on the value of ``numa_policy``. :param requests: A list of InstancePCIRequest object describing the types, quantities and required NUMA affinities of devices we want. :type requests: nova.objects.InstancePCIRequests :param provider_mapping: A dict keyed by RequestGroup requester_id, to a list of resource provider UUIDs which provide resource for that RequestGroup. If it is None then it signals that the InstancePCIRequest objects already stores a mapping per request. I.e.: we are called _after_ the scheduler made allocations for this request in placement. :param numa_cells: A list of InstanceNUMACell objects whose ``id`` corresponds to the ``id`` of host NUMACells, or None. :raises: exception.PciDeviceRequestFailed if this compute node cannot satisfy the given request. """ for r in requests: rp_uuids = self._get_rp_uuids_for_request(provider_mapping, r) if not self._apply_request(self.pools, r, rp_uuids, numa_cells): raise exception.PciDeviceRequestFailed(requests=requests) def __iter__(self) -> ty.Iterator[Pool]: pools: ty.List[Pool] = [] for pool in self.pools: pool = copy.deepcopy(pool) # 'devices' shouldn't be part of stats if 'devices' in pool: del pool['devices'] pools.append(pool) return iter(pools) def clear(self) -> None: """Clear all the stats maintained.""" self.pools = [] def __eq__(self, other: object) -> bool: if not isinstance(other, PciDeviceStats): return NotImplemented return self.pools == other.pools def to_device_pools_obj(self) -> 'objects.PciDevicePoolList': """Return the contents of the pools as a PciDevicePoolList object.""" stats = [x for x in self] return pci_device_pool.from_pci_stats(stats) def has_remote_managed_device_pools(self) -> bool: """Determine whether remote managed device pools are present on a host. The check is pool-based, not free device-based and is NUMA cell agnostic. """ dummy_req = objects.InstancePCIRequest( count=0, spec=[{'remote_managed': True}] ) pools = self._filter_pools_for_spec(self.pools, dummy_req) return bool(pools) def populate_pools_metadata_from_assigned_devices(self): """Populate the rp_uuid of each pool based on the rp_uuid of the devices assigned to the pool. This can only be called from the compute where devices are assigned to each pool. This should not be called from the scheduler as there device - pool assignment is not known. """ # PciDevices are tracked in placement and flavor based PCI requests # are scheduled and allocated in placement. To be able to correlate # what is allocated in placement and what is consumed in nova we # need to map device pools to RPs. We can do that as the PciDevice # contains the RP UUID that represents it in placement. # NOTE(gibi): We cannot do this when the device is originally added to # the pool as the device -> placement translation, that creates the # RPs, runs after all the device is created and assigned to pools. for pool in self.pools: pool_rps = { dev.extra_info.get("rp_uuid") for dev in pool["devices"] if "rp_uuid" in dev.extra_info } if len(pool_rps) >= 2: # FIXME(gibi): Do we have a 1:1 pool - RP mapping even # if two PFs providing very similar VFs? raise ValueError( "We have a pool %s connected to more than one RPs %s in " "placement via devs %s" % (pool, pool_rps, pool["devices"]) ) if not pool_rps: # this can happen if the nova-compute is upgraded to have the # PCI in placement inventory handling code but # [pci]report_in_placement is not turned on yet. continue if pool_rps: # now we know that it is a single RP pool['rp_uuid'] = next(iter(pool_rps)) @staticmethod def _assert_one_pool_per_rp_uuid(pools: ty.List[Pool]) -> bool: """Asserts that each pool has a unique rp_uuid if any :param pools: A list of Pool objects. :return: True if each pool has a unique rp_uuid or no rp_uuid assigned, False otherwise. """ pools_per_rp_uuid = collections.defaultdict(list) for pool in pools: if "rp_uuid" in pool: pools_per_rp_uuid[pool["rp_uuid"]].append(pool) split_rp_uuids = { rp_uuid: pools for rp_uuid, pools in pools_per_rp_uuid.items() if len(pools) > 1} if split_rp_uuids: LOG.warning( "The PCI allocation logic assumes that devices " "related to the same rp_uuid are in the same pool. " "However the following rp_uuids are split across multiple " "pools. This should not happen. Please file a bug report. %s", split_rp_uuids ) return not split_rp_uuids ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/pci/utils.py0000664000175000017500000002361500000000000016021 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Intel, Inc. # Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import glob import os import re import typing as ty from oslo_log import log as logging from nova import exception if ty.TYPE_CHECKING: # avoid circular import from nova.pci import stats LOG = logging.getLogger(__name__) PCI_VENDOR_PATTERN = "^(hex{4})$".replace("hex", r"[\da-fA-F]") _PCI_ADDRESS_PATTERN = ("^(hex{4}):(hex{2}):(hex{2}).(oct{1})$". replace("hex", r"[\da-fA-F]"). replace("oct", "[0-7]")) _PCI_ADDRESS_REGEX = re.compile(_PCI_ADDRESS_PATTERN) _SRIOV_TOTALVFS = "sriov_totalvfs" def pci_device_prop_match( pci_dev: 'stats.Pool', specs: ty.List[ty.Dict[str, str]], ) -> bool: """Check if the pci_dev meet spec requirement Specs is a list of PCI device property requirements. An example of device requirement that the PCI should be either: a) Device with vendor_id as 0x8086 and product_id as 0x8259, or b) Device with vendor_id as 0x10de and product_id as 0x10d8: [{"vendor_id":"8086", "product_id":"8259"}, {"vendor_id":"10de", "product_id":"10d8", "capabilities_network": ["rx", "tx", "tso", "gso"]}] """ def _matching_devices(spec: ty.Dict[str, str]) -> bool: for k, v in spec.items(): pci_dev_v = pci_dev.get(k) if isinstance(v, list) and isinstance(pci_dev_v, list): if not all(x in pci_dev.get(k) for x in v): return False else: # We don't need to check case for tags in order to avoid any # mismatch with the tags provided by users for port # binding profile and the ones configured by operators # with pci whitelist option. if isinstance(v, str): v = v.lower() if isinstance(pci_dev_v, str): pci_dev_v = pci_dev_v.lower() if pci_dev_v != v: return False return True return any(_matching_devices(spec) for spec in specs) def parse_address(address: str) -> ty.Sequence[str]: """Parse a PCI address. Returns (domain, bus, slot, function) from PCI address that is stored in PciDevice DB table. """ m = _PCI_ADDRESS_REGEX.match(address) if not m: raise exception.PciDeviceWrongAddressFormat(address=address) return m.groups() def get_pci_address_fields(pci_addr: str) -> ty.Tuple[str, str, str, str]: """Parse a fully-specified PCI device address. Does not validate that the components are valid hex or wildcard values. :param pci_addr: A string of the form "::.". :return: A 4-tuple of strings ("", "", "", "") """ dbs, sep, func = pci_addr.partition('.') domain, bus, slot = dbs.split(':') return domain, bus, slot, func def get_pci_address(domain: str, bus: str, slot: str, func: str) -> str: """Assembles PCI address components into a fully-specified PCI address. Does not validate that the components are valid hex or wildcard values. :param domain, bus, slot, func: Hex or wildcard strings. :return: A string of the form "::.". """ return '%s:%s:%s.%s' % (domain, bus, slot, func) def get_function_by_ifname(ifname: str) -> ty.Tuple[ty.Optional[str], bool]: """Given the device name, returns the PCI address of a device and returns True if the address is in a physical function. """ dev_path = "/sys/class/net/%s/device" % ifname sriov_totalvfs = 0 if os.path.isdir(dev_path): try: # sriov_totalvfs contains the maximum possible VFs for this PF with open(os.path.join(dev_path, _SRIOV_TOTALVFS)) as fd: sriov_totalvfs = int(fd.read()) return (os.readlink(dev_path).strip("./"), sriov_totalvfs > 0) except (IOError, ValueError): return os.readlink(dev_path).strip("./"), False return None, False def is_physical_function( domain: str, bus: str, slot: str, function: str, ) -> bool: dev_path = "/sys/bus/pci/devices/%(d)s:%(b)s:%(s)s.%(f)s/" % { "d": domain, "b": bus, "s": slot, "f": function} if os.path.isdir(dev_path): try: with open(dev_path + _SRIOV_TOTALVFS) as fd: sriov_totalvfs = int(fd.read()) return sriov_totalvfs > 0 except (IOError, ValueError): pass return False def _get_sysfs_netdev_path(pci_addr: str, pf_interface: bool) -> str: """Get the sysfs path based on the PCI address of the device. Assumes a networking device - will not check for the existence of the path. """ if pf_interface: return "/sys/bus/pci/devices/%s/physfn/net" % pci_addr return "/sys/bus/pci/devices/%s/net" % pci_addr def get_ifname_by_pci_address( pci_addr: str, pf_interface: bool = False, ) -> str: """Get the interface name based on a VF's pci address. The returned interface name is either the parent PF's or that of the VF itself based on the argument of pf_interface. """ dev_path = _get_sysfs_netdev_path(pci_addr, pf_interface) try: dev_info = os.listdir(dev_path) return dev_info.pop() except Exception: raise exception.PciDeviceNotFoundById(id=pci_addr) def get_mac_by_pci_address(pci_addr: str, pf_interface: bool = False) -> str: """Get the MAC address of the nic based on its PCI address. Raises PciDeviceNotFoundById in case the pci device is not a NIC """ dev_path = _get_sysfs_netdev_path(pci_addr, pf_interface) if_name = get_ifname_by_pci_address(pci_addr, pf_interface) addr_file = os.path.join(dev_path, if_name, 'address') try: with open(addr_file) as f: mac = next(f).strip() return mac except (IOError, StopIteration) as e: LOG.warning("Could not find the expected sysfs file for " "determining the MAC address of the PCI device " "%(addr)s. May not be a NIC. Error: %(e)s", {'addr': pci_addr, 'e': e}) raise exception.PciDeviceNotFoundById(id=pci_addr) def get_vf_num_by_pci_address(pci_addr: str) -> int: """Get the VF number based on a VF's pci address A VF is associated with an VF number, which ip link command uses to configure it. This number can be obtained from the PCI device filesystem. """ VIRTFN_RE = re.compile(r"virtfn(\d+)") virtfns_path = "/sys/bus/pci/devices/%s/physfn/virtfn*" % (pci_addr) vf_num = None for vf_path in glob.iglob(virtfns_path): if re.search(pci_addr, os.readlink(vf_path)): t = VIRTFN_RE.search(vf_path) if t: vf_num = t.group(1) break else: raise exception.PciDeviceNotFoundById(id=pci_addr) return int(vf_num) def get_vf_product_id_by_pf_addr(pci_addr: str) -> str: """Get the VF product ID for a given PF. "Product ID" or Device ID in the PCIe spec terms for a PF is possible to retrieve via the VF Device ID field present as of SR-IOV 1.0 in the "3.3.11. VF Device ID (1Ah)" section. It is described as a field that "contains the Device ID that should be presented for every VF to the SI". It is available as of Linux kernel 4.15, commit 7dfca15276fc3f18411a2b2182704fa1222bcb60 :param pci_addr: A string of the form "::.". :return: A string containing a product ID of a VF corresponding to the PF. """ sriov_vf_device_path = f"/sys/bus/pci/devices/{pci_addr}/sriov_vf_device" try: with open(sriov_vf_device_path) as f: vf_product_id = f.readline().strip() except IOError as e: LOG.warning( "Could not find the expected sysfs file for " "determining the VF product ID of a PCI VF by PF" "with addr %(addr)s. May not be a PF. Error: %(e)s", {"addr": pci_addr, "e": e}, ) raise exception.PciDeviceNotFoundById(id=pci_addr) if not vf_product_id: raise ValueError("sriov_vf_device file does not contain" " a VF product ID") return vf_product_id def get_pci_ids_by_pci_addr(pci_addr: str) -> ty.Tuple[str, ...]: """Get the product ID and vendor ID for a given PCI device. :param pci_addr: A string of the form "::.". :return: A list containing a vendor and product ids. """ id_prefix = f"/sys/bus/pci/devices/{pci_addr}" ids: ty.List[str] = [] for id_name in ("vendor", "product"): try: with open(os.path.join(id_prefix, id_name)) as f: id_value = f.readline() if not id_value: raise ValueError(f"{id_name} file does not contain" " a valid value") ids.append(id_value.strip().replace("0x", "")) except IOError as e: LOG.warning( "Could not find the expected sysfs file for " f"determining the {id_name} ID of a PCI device " "with addr %(addr)s. Error: %(e)s", {"addr": pci_addr, "e": e}, ) raise exception.PciDeviceNotFoundById(id=pci_addr) return tuple(ids) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/pci/whitelist.py0000664000175000017500000000742000000000000016671 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Intel, Inc. # Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import typing as ty from oslo_serialization import jsonutils from nova import exception from nova.i18n import _ from nova import objects from nova.pci import devspec class Whitelist(object): """White list class to represent assignable pci devices. Not all devices on a compute node can be assigned to a guest. The cloud administrator decides which devices can be assigned based on ``vendor_id`` or ``product_id``, etc. If no white list is specified, no devices will be assignable. """ def __init__(self, whitelist_spec: ty.Optional[str] = None) -> None: """White list constructor For example, the following json string specifies that devices whose vendor_id is '8086' and product_id is '1520' can be assigned to guests. :: '[{"product_id":"1520", "vendor_id":"8086"}]' :param whitelist_spec: A JSON string for a dictionary or list thereof. Each dictionary specifies the pci device properties requirement. See the definition of ``device_spec`` in ``nova.conf.pci`` for details and examples. """ if whitelist_spec: self.specs = self._parse_white_list_from_config(whitelist_spec) else: self.specs = [] @staticmethod def _parse_white_list_from_config( whitelists: str, ) -> ty.List[devspec.PciDeviceSpec]: """Parse and validate the pci whitelist from the nova config.""" specs = [] for jsonspec in whitelists: try: dev_spec = jsonutils.loads(jsonspec) except ValueError: raise exception.PciConfigInvalidSpec( reason=_("Invalid entry: '%s'") % jsonspec) if isinstance(dev_spec, dict): dev_spec = [dev_spec] elif not isinstance(dev_spec, list): raise exception.PciConfigInvalidSpec( reason=_("Invalid entry: '%s'; " "Expecting list or dict") % jsonspec) for ds in dev_spec: if not isinstance(ds, dict): raise exception.PciConfigInvalidSpec( reason=_("Invalid entry: '%s'; " "Expecting dict") % ds) spec = devspec.PciDeviceSpec(ds) specs.append(spec) return specs def device_assignable( self, dev: ty.Dict[str, ty.Any] ) -> ty.Optional[devspec.PciDeviceSpec]: """Check if a device is part of pci device_spec (whitelist) and so can be assigned to a guest. If yes return the spec, else return None :param dev: A dictionary describing the device properties :return: A devspec.PciDeviceSpec or None """ for spec in self.specs: if spec.match(dev): return spec return None def get_devspec( self, pci_dev: 'objects.PciDevice', ) -> ty.Optional[devspec.PciDeviceSpec]: for spec in self.specs: if spec.match_pci_obj(pci_dev): return spec return None ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.405609 nova-32.0.0/nova/policies/0000775000175000017500000000000000000000000015334 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/__init__.py0000664000175000017500000001135700000000000017454 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools from nova.policies import admin_actions from nova.policies import admin_password from nova.policies import aggregates from nova.policies import assisted_volume_snapshots from nova.policies import attach_interfaces from nova.policies import availability_zone from nova.policies import baremetal_nodes from nova.policies import base from nova.policies import console_auth_tokens from nova.policies import console_output from nova.policies import create_backup from nova.policies import deferred_delete from nova.policies import evacuate from nova.policies import extended_server_attributes from nova.policies import extensions from nova.policies import flavor_access from nova.policies import flavor_extra_specs from nova.policies import flavor_manage from nova.policies import floating_ip_pools from nova.policies import floating_ips from nova.policies import hosts from nova.policies import hypervisors from nova.policies import instance_actions from nova.policies import instance_usage_audit_log from nova.policies import ips from nova.policies import keypairs from nova.policies import limits from nova.policies import lock_server from nova.policies import migrate_server from nova.policies import migrations from nova.policies import multinic from nova.policies import networks from nova.policies import pause_server from nova.policies import quota_class_sets from nova.policies import quota_sets from nova.policies import remote_consoles from nova.policies import rescue from nova.policies import security_groups from nova.policies import server_diagnostics from nova.policies import server_external_events from nova.policies import server_groups from nova.policies import server_metadata from nova.policies import server_password from nova.policies import server_shares from nova.policies import server_tags from nova.policies import server_topology from nova.policies import servers from nova.policies import servers_migrations from nova.policies import services from nova.policies import shelve from nova.policies import simple_tenant_usage from nova.policies import suspend_server from nova.policies import tenant_networks from nova.policies import volumes from nova.policies import volumes_attachments def list_rules(): return itertools.chain( base.list_rules(), admin_actions.list_rules(), admin_password.list_rules(), aggregates.list_rules(), assisted_volume_snapshots.list_rules(), attach_interfaces.list_rules(), availability_zone.list_rules(), baremetal_nodes.list_rules(), console_auth_tokens.list_rules(), console_output.list_rules(), create_backup.list_rules(), deferred_delete.list_rules(), evacuate.list_rules(), extended_server_attributes.list_rules(), extensions.list_rules(), flavor_access.list_rules(), flavor_extra_specs.list_rules(), flavor_manage.list_rules(), floating_ip_pools.list_rules(), floating_ips.list_rules(), hosts.list_rules(), hypervisors.list_rules(), instance_actions.list_rules(), instance_usage_audit_log.list_rules(), ips.list_rules(), keypairs.list_rules(), limits.list_rules(), lock_server.list_rules(), migrate_server.list_rules(), migrations.list_rules(), multinic.list_rules(), networks.list_rules(), pause_server.list_rules(), quota_class_sets.list_rules(), quota_sets.list_rules(), remote_consoles.list_rules(), rescue.list_rules(), security_groups.list_rules(), server_diagnostics.list_rules(), server_external_events.list_rules(), server_groups.list_rules(), server_metadata.list_rules(), server_password.list_rules(), server_shares.list_rules(), server_tags.list_rules(), server_topology.list_rules(), servers.list_rules(), servers_migrations.list_rules(), services.list_rules(), shelve.list_rules(), simple_tenant_usage.list_rules(), suspend_server.list_rules(), tenant_networks.list_rules(), volumes.list_rules(), volumes_attachments.list_rules() ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/admin_actions.py0000664000175000017500000000305700000000000020523 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base POLICY_ROOT = 'os_compute_api:os-admin-actions:%s' admin_actions_policies = [ policy.DocumentedRuleDefault( name=POLICY_ROOT % 'reset_state', check_str=base.ADMIN, description="Reset the state of a given server", operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/action (os-resetState)' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'inject_network_info', check_str=base.ADMIN, description="Inject network information into the server", operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/action (injectNetworkInfo)' } ], scope_types=['project']), ] def list_rules(): return admin_actions_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/admin_password.py0000664000175000017500000000231300000000000020717 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base BASE_POLICY_NAME = 'os_compute_api:os-admin-password' admin_password_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME, check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Change the administrative password for a server", operations=[ { 'path': '/servers/{server_id}/action (changePassword)', 'method': 'POST' } ], scope_types=['project']) ] def list_rules(): return admin_password_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/aggregates.py0000664000175000017500000000761600000000000020031 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base POLICY_ROOT = 'os_compute_api:os-aggregates:%s' NEW_POLICY_ROOT = 'compute:aggregates:%s' aggregates_policies = [ policy.DocumentedRuleDefault( name=POLICY_ROOT % 'set_metadata', check_str=base.ADMIN, description="Create or replace metadata for an aggregate", operations=[ { 'path': '/os-aggregates/{aggregate_id}/action (set_metadata)', 'method': 'POST' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'add_host', check_str=base.ADMIN, description="Add a host to an aggregate", operations=[ { 'path': '/os-aggregates/{aggregate_id}/action (add_host)', 'method': 'POST' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'create', check_str=base.ADMIN, description="Create an aggregate", operations=[ { 'path': '/os-aggregates', 'method': 'POST' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'remove_host', check_str=base.ADMIN, description="Remove a host from an aggregate", operations=[ { 'path': '/os-aggregates/{aggregate_id}/action (remove_host)', 'method': 'POST' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'update', check_str=base.ADMIN, description="Update name and/or availability zone for an aggregate", operations=[ { 'path': '/os-aggregates/{aggregate_id}', 'method': 'PUT' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'index', check_str=base.ADMIN, description="List all aggregates", operations=[ { 'path': '/os-aggregates', 'method': 'GET' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'delete', check_str=base.ADMIN, description="Delete an aggregate", operations=[ { 'path': '/os-aggregates/{aggregate_id}', 'method': 'DELETE' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'show', check_str=base.ADMIN, description="Show details for an aggregate", operations=[ { 'path': '/os-aggregates/{aggregate_id}', 'method': 'GET' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=NEW_POLICY_ROOT % 'images', check_str=base.ADMIN, description="Request image caching for an aggregate", operations=[ { 'path': '/os-aggregates/{aggregate_id}/images', 'method': 'POST' } ], scope_types=['project']), ] def list_rules(): return aggregates_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/assisted_volume_snapshots.py0000664000175000017500000000307000000000000023216 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base POLICY_ROOT = 'os_compute_api:os-assisted-volume-snapshots:%s' assisted_volume_snapshots_policies = [ policy.DocumentedRuleDefault( name=POLICY_ROOT % 'create', check_str=base.SERVICE_ROLE, description="Create an assisted volume snapshot", operations=[ { 'path': '/os-assisted-volume-snapshots', 'method': 'POST' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'delete', check_str=base.SERVICE_ROLE, description="Delete an assisted volume snapshot", operations=[ { 'path': '/os-assisted-volume-snapshots/{snapshot_id}', 'method': 'DELETE' } ], scope_types=['project']), ] def list_rules(): return assisted_volume_snapshots_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/attach_interfaces.py0000664000175000017500000000566700000000000021373 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base BASE_POLICY_NAME = 'os_compute_api:os-attach-interfaces' POLICY_ROOT = 'os_compute_api:os-attach-interfaces:%s' DEPRECATED_REASON = """ Nova API policies are introducing new default roles with scope_type capabilities. Old policies are deprecated and silently going to be ignored in nova 23.0.0 release. """ DEPRECATED_INTERFACES_POLICY = policy.DeprecatedRule( BASE_POLICY_NAME, base.RULE_ADMIN_OR_OWNER, deprecated_reason=DEPRECATED_REASON, deprecated_since='21.0.0', ) attach_interfaces_policies = [ policy.DocumentedRuleDefault( name=POLICY_ROOT % 'list', check_str=base.PROJECT_READER_OR_ADMIN, description="List port interfaces attached to a server", operations=[ { 'method': 'GET', 'path': '/servers/{server_id}/os-interface' }, ], scope_types=['project'], deprecated_rule=DEPRECATED_INTERFACES_POLICY), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'show', check_str=base.PROJECT_READER_OR_ADMIN, description="Show details of a port interface attached to a server", operations=[ { 'method': 'GET', 'path': '/servers/{server_id}/os-interface/{port_id}' } ], scope_types=['project'], deprecated_rule=DEPRECATED_INTERFACES_POLICY), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'create', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Attach an interface to a server", operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/os-interface' } ], scope_types=['project'], deprecated_rule=DEPRECATED_INTERFACES_POLICY), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'delete', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Detach an interface from a server", operations=[ { 'method': 'DELETE', 'path': '/servers/{server_id}/os-interface/{port_id}' } ], scope_types=['project'], deprecated_rule=DEPRECATED_INTERFACES_POLICY) ] def list_rules(): return attach_interfaces_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/availability_zone.py0000664000175000017500000000310500000000000021412 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base POLICY_ROOT = 'os_compute_api:os-availability-zone:%s' availability_zone_policies = [ policy.DocumentedRuleDefault( name=POLICY_ROOT % 'list', check_str=base.RULE_ANY, description="List availability zone information without host " "information", operations=[ { 'method': 'GET', 'path': '/os-availability-zone' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'detail', check_str=base.ADMIN, description="List detailed availability zone information with host " "information", operations=[ { 'method': 'GET', 'path': '/os-availability-zone/detail' } ], scope_types=['project']) ] def list_rules(): return availability_zone_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/baremetal_nodes.py0000664000175000017500000000413100000000000021031 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base ROOT_POLICY = 'os_compute_api:os-baremetal-nodes' BASE_POLICY_NAME = 'os_compute_api:os-baremetal-nodes:%s' DEPRECATED_REASON = """ Nova API policies are introducing new default roles with scope_type capabilities. Old policies are deprecated and silently going to be ignored in nova 23.0.0 release. """ DEPRECATED_BAREMETAL_POLICY = policy.DeprecatedRule( ROOT_POLICY, base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since='22.0.0' ) baremetal_nodes_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'list', check_str=base.ADMIN, description="""List and show details of bare metal nodes. These APIs are proxy calls to the Ironic service and are deprecated. """, operations=[ { 'method': 'GET', 'path': '/os-baremetal-nodes' } ], scope_types=['project'], deprecated_rule=DEPRECATED_BAREMETAL_POLICY), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'show', check_str=base.ADMIN, description="""Show action details for a server.""", operations=[ { 'method': 'GET', 'path': '/os-baremetal-nodes/{node_id}' } ], scope_types=['project'], deprecated_rule=DEPRECATED_BAREMETAL_POLICY) ] def list_rules(): return baremetal_nodes_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/base.py0000664000175000017500000001416300000000000016625 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy RULE_ADMIN_OR_OWNER = 'rule:admin_or_owner' # Admins or owners of the resource RULE_ADMIN_API = 'rule:admin_api' # Allow only users with the admin role RULE_ANY = '@' # Any user is allowed to perform the action. RULE_NOBODY = '!' # No users are allowed to perform the action. DEPRECATED_REASON = """ Nova API policies are introducing new default roles with scope_type capabilities. Old policies are deprecated and silently going to be ignored in nova 23.0.0 release. """ DEPRECATED_ADMIN_POLICY = policy.DeprecatedRule( name=RULE_ADMIN_API, check_str='is_admin:True', deprecated_reason=DEPRECATED_REASON, deprecated_since='21.0.0' ) DEPRECATED_ADMIN_OR_OWNER_POLICY = policy.DeprecatedRule( name=RULE_ADMIN_OR_OWNER, check_str='is_admin:True or project_id:%(project_id)s', deprecated_reason=DEPRECATED_REASON, deprecated_since='21.0.0' ) ADMIN = 'rule:context_is_admin' PROJECT_MEMBER = 'rule:project_manager_api' PROJECT_MEMBER = 'rule:project_member_api' PROJECT_READER = 'rule:project_reader_api' # TODO(gmaan): Remove the admin role from the service rule in 2026.2. We are # continue allowing admin to access the service APIs, otherwise it will break # deployment where nova service users in other services are not assigned # 'service' role. After one SLURP (2026.1), we can make service APIs only # allowed for the 'service' role. SERVICE_ROLE = 'rule:service_or_admin' PROJECT_MANAGER_OR_ADMIN = 'rule:project_manager_or_admin' PROJECT_MEMBER_OR_ADMIN = 'rule:project_member_or_admin' PROJECT_READER_OR_ADMIN = 'rule:project_reader_or_admin' # NOTE(gmann): Below is the mapping of new roles with legacy roles:: # Legacy Rule | New Rules |Operation |scope_type| # -------------------+----------------------------+----------------+----------- # RULE_ADMIN_API |-> ADMIN |Global resource | [project] # |-> PROJECT_MANAGER_OR_ADMIN |Write & Read | # -------------------+----------------------------+----------------+----------- # |-> ADMIN |Project admin | [project] # | |level operation | # RULE_ADMIN_OR_OWNER|-> PROJECT_MEMBER_OR_ADMIN |Project resource| [project] # | |Write | # |-> PROJECT_READER_OR_ADMIN |Project resource| [project] # | |Read | # NOTE(johngarbutt) The base rules here affect so many APIs the list # of related API operations has not been populated. It would be # crazy hard to manually maintain such a list. # NOTE(gmann): Keystone already support implied roles means assignment # of one role implies the assignment of another. New defaults roles # `reader`, `member` also has been added in bootstrap. If the bootstrap # process is re-run, and a `reader`, `member`, or `admin` role already # exists, a role implication chain will be created: `admin` implies # `member` implies `reader`. # For example: If we give access to 'reader' it means the 'admin' and # 'member' also get access. rules = [ policy.RuleDefault( "context_is_admin", "role:admin", "Decides what is required for the 'is_admin:True' check to succeed.", deprecated_rule=DEPRECATED_ADMIN_POLICY), policy.RuleDefault( "admin_or_owner", "is_admin:True or project_id:%(project_id)s", "Default rule for most non-Admin APIs.", deprecated_for_removal=True, deprecated_reason=DEPRECATED_REASON, deprecated_since='21.0.0'), policy.RuleDefault( "admin_api", "is_admin:True", "Default rule for most Admin APIs.", deprecated_for_removal=True, deprecated_reason=DEPRECATED_REASON, deprecated_since='21.0.0'), policy.RuleDefault( "project_manager_api", "role:manager and project_id:%(project_id)s", "Default rule for Project level management APIs.", deprecated_rule=DEPRECATED_ADMIN_POLICY), policy.RuleDefault( "project_member_api", "role:member and project_id:%(project_id)s", "Default rule for Project level non admin APIs.", deprecated_rule=DEPRECATED_ADMIN_OR_OWNER_POLICY), policy.RuleDefault( "project_reader_api", "role:reader and project_id:%(project_id)s", "Default rule for Project level read only APIs.", deprecated_rule=DEPRECATED_ADMIN_OR_OWNER_POLICY), policy.RuleDefault( "service_api", "role:service", "Default rule for service-to-service APIs.", deprecated_rule=DEPRECATED_ADMIN_POLICY), policy.RuleDefault( "project_manager_or_admin", "rule:project_manager_api or rule:context_is_admin", "Default rule for Project Manager or admin APIs.", deprecated_rule=DEPRECATED_ADMIN_POLICY), policy.RuleDefault( "project_member_or_admin", "rule:project_member_api or rule:context_is_admin", "Default rule for Project Member or admin APIs.", deprecated_rule=DEPRECATED_ADMIN_OR_OWNER_POLICY), policy.RuleDefault( "project_reader_or_admin", "rule:project_reader_api or rule:context_is_admin", "Default rule for Project reader or admin APIs.", deprecated_rule=DEPRECATED_ADMIN_OR_OWNER_POLICY), policy.RuleDefault( "service_or_admin", "rule:service_api or rule:context_is_admin", "Default rule for service or admin APIs.", deprecated_rule=DEPRECATED_ADMIN_POLICY), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/console_auth_tokens.py0000664000175000017500000000235200000000000021756 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base BASE_POLICY_NAME = 'os_compute_api:os-console-auth-tokens' console_auth_tokens_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME, check_str=base.ADMIN, description="Show console connection information for a given console " "authentication token", operations=[ { 'method': 'GET', 'path': '/os-console-auth-tokens/{console_token}' } ], scope_types=['project']) ] def list_rules(): return console_auth_tokens_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/console_output.py0000664000175000017500000000230100000000000020764 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base BASE_POLICY_NAME = 'os_compute_api:os-console-output' console_output_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME, check_str=base.PROJECT_MEMBER_OR_ADMIN, description='Show console output for a server', operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/action (os-getConsoleOutput)' } ], scope_types=['project']) ] def list_rules(): return console_output_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/create_backup.py0000664000175000017500000000226300000000000020501 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base BASE_POLICY_NAME = 'os_compute_api:os-create-backup' create_backup_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME, check_str=base.PROJECT_MEMBER_OR_ADMIN, description='Create a back up of a server', operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/action (createBackup)' } ], scope_types=['project']) ] def list_rules(): return create_backup_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/deferred_delete.py0000664000175000017500000000403600000000000021013 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base BASE_POLICY_NAME = 'os_compute_api:os-deferred-delete:%s' DEPRECATED_REASON = """ Nova API policies are introducing new default roles with scope_type capabilities. Old policies are deprecated and silently going to be ignored in nova 23.0.0 release. """ DEPRECATED_POLICY = policy.DeprecatedRule( 'os_compute_api:os-deferred-delete', base.RULE_ADMIN_OR_OWNER, deprecated_reason=DEPRECATED_REASON, deprecated_since='21.0.0' ) deferred_delete_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'restore', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Restore a soft deleted server", operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/action (restore)' }, ], scope_types=['project'], deprecated_rule=DEPRECATED_POLICY), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'force', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Force delete a server before deferred cleanup", operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/action (forceDelete)' } ], scope_types=['project'], deprecated_rule=DEPRECATED_POLICY) ] def list_rules(): return deferred_delete_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/evacuate.py0000664000175000017500000000224500000000000017506 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base BASE_POLICY_NAME = 'os_compute_api:os-evacuate' evacuate_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME, check_str=base.ADMIN, description="Evacuate a server from a failed host to a new host", operations=[ { 'path': '/servers/{server_id}/action (evacuate)', 'method': 'POST' } ], scope_types=['project']), ] def list_rules(): return evacuate_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/extended_server_attributes.py0000664000175000017500000000511100000000000023340 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base BASE_POLICY_NAME = 'os_compute_api:os-extended-server-attributes' extended_server_attributes_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME, check_str=base.ADMIN, description="""Return extended attributes for server. This rule will control the visibility for a set of servers attributes: - ``OS-EXT-SRV-ATTR:host`` - ``OS-EXT-SRV-ATTR:instance_name`` - ``OS-EXT-SRV-ATTR:reservation_id`` (since microversion 2.3) - ``OS-EXT-SRV-ATTR:launch_index`` (since microversion 2.3) - ``OS-EXT-SRV-ATTR:hostname`` (since microversion 2.3) - ``OS-EXT-SRV-ATTR:kernel_id`` (since microversion 2.3) - ``OS-EXT-SRV-ATTR:ramdisk_id`` (since microversion 2.3) - ``OS-EXT-SRV-ATTR:root_device_name`` (since microversion 2.3) - ``OS-EXT-SRV-ATTR:user_data`` (since microversion 2.3) Microvision 2.75 added the above attributes in the ``PUT /servers/{server_id}`` and ``POST /servers/{server_id}/action (rebuild)`` API responses which are also controlled by this policy rule, like the ``GET /servers*`` APIs. Microversion 2.90 made the ``OS-EXT-SRV-ATTR:hostname`` attribute available to all users, so this policy has no effect on that field for microversions 2.90 and greater. Controlling the visibility of this attribute for all microversions is therefore deprecated and will be removed in a future release. """, operations=[ { 'method': 'GET', 'path': '/servers/{id}' }, { 'method': 'GET', 'path': '/servers/detail' }, { 'method': 'PUT', 'path': '/servers/{server_id}' }, { 'method': 'POST', 'path': '/servers/{server_id}/action (rebuild)' } ], scope_types=['project'] ), ] def list_rules(): return extended_server_attributes_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/extensions.py0000664000175000017500000000243400000000000020110 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base BASE_POLICY_NAME = 'os_compute_api:extensions' extensions_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME, check_str=base.RULE_ANY, description="List available extensions and show information " "for an extension by alias", operations=[ { 'method': 'GET', 'path': '/extensions' }, { 'method': 'GET', 'path': '/extensions/{alias}' } ], scope_types=['project']), ] def list_rules(): return extensions_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/flavor_access.py0000664000175000017500000000563600000000000020532 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base BASE_POLICY_NAME = 'os_compute_api:os-flavor-access' POLICY_ROOT = 'os_compute_api:os-flavor-access:%s' # NOTE(gmann): Deprecating this policy explicitly as old defaults # admin or owner is not suitable for that which should be admin (Bug#1867840) # but changing that will break old deployment so let's keep supporting # the old default also and new default can be System Admin. # System Admin rule in base class is defined with the deprecated rule of admin # not admin or owner which is the main reason that we need to explicitly # deprecate this policy here. DEPRECATED_REASON = """ Nova API policies are introducing new default roles with scope_type capabilities. Old policies are deprecated and silently going to be ignored in nova 23.0.0 release. """ DEPRECATED_FLAVOR_ACCESS_POLICY = policy.DeprecatedRule( BASE_POLICY_NAME, base.RULE_ADMIN_OR_OWNER, deprecated_reason=DEPRECATED_REASON, deprecated_since='21.0.0' ) flavor_access_policies = [ policy.DocumentedRuleDefault( name=POLICY_ROOT % 'add_tenant_access', check_str=base.ADMIN, description="Add flavor access to a tenant", operations=[ { 'method': 'POST', 'path': '/flavors/{flavor_id}/action (addTenantAccess)' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'remove_tenant_access', check_str=base.ADMIN, description="Remove flavor access from a tenant", operations=[ { 'method': 'POST', 'path': '/flavors/{flavor_id}/action (removeTenantAccess)' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME, check_str=base.ADMIN, description="""List flavor access information Allows access to the full list of tenants that have access to a flavor via an os-flavor-access API. """, operations=[ { 'method': 'GET', 'path': '/flavors/{flavor_id}/os-flavor-access' }, ], scope_types=['project'], deprecated_rule=DEPRECATED_FLAVOR_ACCESS_POLICY), ] def list_rules(): return flavor_access_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/flavor_extra_specs.py0000664000175000017500000000650300000000000021603 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base POLICY_ROOT = 'os_compute_api:os-flavor-extra-specs:%s' flavor_extra_specs_policies = [ policy.DocumentedRuleDefault( name=POLICY_ROOT % 'show', check_str=base.PROJECT_READER_OR_ADMIN, description="Show an extra spec for a flavor", operations=[ { 'path': '/flavors/{flavor_id}/os-extra_specs/' '{flavor_extra_spec_key}', 'method': 'GET' } ], scope_types=['project'] ), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'create', check_str=base.ADMIN, description="Create extra specs for a flavor", operations=[ { 'path': '/flavors/{flavor_id}/os-extra_specs/', 'method': 'POST' } ], scope_types=['project'] ), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'update', check_str=base.ADMIN, description="Update an extra spec for a flavor", operations=[ { 'path': '/flavors/{flavor_id}/os-extra_specs/' '{flavor_extra_spec_key}', 'method': 'PUT' } ], scope_types=['project'] ), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'delete', check_str=base.ADMIN, description="Delete an extra spec for a flavor", operations=[ { 'path': '/flavors/{flavor_id}/os-extra_specs/' '{flavor_extra_spec_key}', 'method': 'DELETE' } ], scope_types=['project'] ), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'index', check_str=base.PROJECT_READER_OR_ADMIN, description="List extra specs for a flavor. Starting with " "microversion 2.61, extra specs may be returned in responses " "for the flavor resource.", operations=[ { 'path': '/flavors/{flavor_id}/os-extra_specs/', 'method': 'GET' }, # Microversion 2.61 operations for flavors: { 'path': '/flavors', 'method': 'POST' }, { 'path': '/flavors/detail', 'method': 'GET' }, { 'path': '/flavors/{flavor_id}', 'method': 'GET' }, { 'path': '/flavors/{flavor_id}', 'method': 'PUT' } ], scope_types=['project'] ), ] def list_rules(): return flavor_extra_specs_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/flavor_manage.py0000664000175000017500000000335700000000000020517 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base POLICY_ROOT = 'os_compute_api:os-flavor-manage:%s' flavor_manage_policies = [ policy.DocumentedRuleDefault( name=POLICY_ROOT % 'create', check_str=base.ADMIN, description="Create a flavor", operations=[ { 'method': 'POST', 'path': '/flavors' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'update', check_str=base.ADMIN, description="Update a flavor", operations=[ { 'method': 'PUT', 'path': '/flavors/{flavor_id}' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'delete', check_str=base.ADMIN, description="Delete a flavor", operations=[ { 'method': 'DELETE', 'path': '/flavors/{flavor_id}' } ], scope_types=['project']), ] def list_rules(): return flavor_manage_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/floating_ip_pools.py0000664000175000017500000000225600000000000021422 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base BASE_POLICY_NAME = 'os_compute_api:os-floating-ip-pools' floating_ip_pools_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME, check_str=base.RULE_ANY, description="List floating IP pools. This API is deprecated.", operations=[ { 'method': 'GET', 'path': '/os-floating-ip-pools' } ], scope_types=['project']), ] def list_rules(): return floating_ip_pools_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/floating_ips.py0000664000175000017500000000740600000000000020373 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base ROOT_POLICY = 'os_compute_api:os-floating-ips' BASE_POLICY_NAME = 'os_compute_api:os-floating-ips:%s' DEPRECATED_REASON = """ Nova API policies are introducing new default roles with scope_type capabilities. Old policies are deprecated and silently going to be ignored in nova 23.0.0 release. """ DEPRECATED_FIP_POLICY = policy.DeprecatedRule( ROOT_POLICY, base.RULE_ADMIN_OR_OWNER, deprecated_reason=DEPRECATED_REASON, deprecated_since='22.0.0' ) floating_ips_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'add', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Associate floating IPs to server. " " This API is deprecated.", operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/action (addFloatingIp)' } ], scope_types=['project'], deprecated_rule=DEPRECATED_FIP_POLICY), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'remove', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Disassociate floating IPs to server. " " This API is deprecated.", operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/action (removeFloatingIp)' } ], scope_types=['project'], deprecated_rule=DEPRECATED_FIP_POLICY), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'list', check_str=base.PROJECT_READER_OR_ADMIN, description="List floating IPs. This API is deprecated.", operations=[ { 'method': 'GET', 'path': '/os-floating-ips' } ], scope_types=['project'], deprecated_rule=DEPRECATED_FIP_POLICY), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'create', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Create floating IPs. This API is deprecated.", operations=[ { 'method': 'POST', 'path': '/os-floating-ips' } ], scope_types=['project'], deprecated_rule=DEPRECATED_FIP_POLICY), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'show', check_str=base.PROJECT_READER_OR_ADMIN, description="Show floating IPs. This API is deprecated.", operations=[ { 'method': 'GET', 'path': '/os-floating-ips/{floating_ip_id}' } ], scope_types=['project'], deprecated_rule=DEPRECATED_FIP_POLICY), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'delete', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Delete floating IPs. This API is deprecated.", operations=[ { 'method': 'DELETE', 'path': '/os-floating-ips/{floating_ip_id}' } ], scope_types=['project'], deprecated_rule=DEPRECATED_FIP_POLICY), ] def list_rules(): return floating_ips_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/hosts.py0000664000175000017500000000741000000000000017050 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base BASE_POLICY_NAME = 'os_compute_api:os-hosts' POLICY_NAME = 'os_compute_api:os-hosts:%s' DEPRECATED_REASON = """ Nova API policies are introducing new default roles with scope_type capabilities. Old policies are deprecated and silently going to be ignored in nova 23.0.0 release. """ DEPRECATED_POLICY = policy.DeprecatedRule( BASE_POLICY_NAME, base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since='22.0.0' ) hosts_policies = [ policy.DocumentedRuleDefault( name=POLICY_NAME % 'list', check_str=base.ADMIN, description="""List physical hosts. This API is deprecated in favor of os-hypervisors and os-services.""", operations=[ { 'method': 'GET', 'path': '/os-hosts' }, ], scope_types=['project'], deprecated_rule=DEPRECATED_POLICY), policy.DocumentedRuleDefault( name=POLICY_NAME % 'show', check_str=base.ADMIN, description="""Show physical host. This API is deprecated in favor of os-hypervisors and os-services.""", operations=[ { 'method': 'GET', 'path': '/os-hosts/{host_name}' } ], scope_types=['project'], deprecated_rule=DEPRECATED_POLICY), policy.DocumentedRuleDefault( name=POLICY_NAME % 'update', check_str=base.ADMIN, description="""Update physical host. This API is deprecated in favor of os-hypervisors and os-services.""", operations=[ { 'method': 'PUT', 'path': '/os-hosts/{host_name}' }, ], scope_types=['project'], deprecated_rule=DEPRECATED_POLICY), policy.DocumentedRuleDefault( name=POLICY_NAME % 'reboot', check_str=base.ADMIN, description="""Reboot physical host. This API is deprecated in favor of os-hypervisors and os-services.""", operations=[ { 'method': 'GET', 'path': '/os-hosts/{host_name}/reboot' }, ], scope_types=['project'], deprecated_rule=DEPRECATED_POLICY), policy.DocumentedRuleDefault( name=POLICY_NAME % 'shutdown', check_str=base.ADMIN, description="""Shutdown physical host. This API is deprecated in favor of os-hypervisors and os-services.""", operations=[ { 'method': 'GET', 'path': '/os-hosts/{host_name}/shutdown' }, ], scope_types=['project'], deprecated_rule=DEPRECATED_POLICY), policy.DocumentedRuleDefault( name=POLICY_NAME % 'start', check_str=base.ADMIN, description="""Start physical host. This API is deprecated in favor of os-hypervisors and os-services.""", operations=[ { 'method': 'GET', 'path': '/os-hosts/{host_name}/startup' } ], scope_types=['project'], deprecated_rule=DEPRECATED_POLICY), ] def list_rules(): return hosts_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/hypervisors.py0000664000175000017500000001002400000000000020300 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base BASE_POLICY_NAME = 'os_compute_api:os-hypervisors:%s' DEPRECATED_REASON = """ Nova API policies are introducing new default roles with scope_type capabilities. Old policies are deprecated and silently going to be ignored in nova 23.0.0 release. """ DEPRECATED_POLICY = policy.DeprecatedRule( 'os_compute_api:os-hypervisors', base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since='21.0.0' ) hypervisors_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'list', check_str=base.ADMIN, description="List all hypervisors.", operations=[ { 'path': '/os-hypervisors', 'method': 'GET' }, ], scope_types=['project'], deprecated_rule=DEPRECATED_POLICY), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'list-detail', check_str=base.ADMIN, description="List all hypervisors with details", operations=[ { 'path': '/os-hypervisors/details', 'method': 'GET' }, ], scope_types=['project'], deprecated_rule=DEPRECATED_POLICY), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'statistics', check_str=base.ADMIN, description="Show summary statistics for all hypervisors " "over all compute nodes.", operations=[ { 'path': '/os-hypervisors/statistics', 'method': 'GET' }, ], scope_types=['project'], deprecated_rule=DEPRECATED_POLICY), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'show', check_str=base.ADMIN, description="Show details for a hypervisor.", operations=[ { 'path': '/os-hypervisors/{hypervisor_id}', 'method': 'GET' }, ], scope_types=['project'], deprecated_rule=DEPRECATED_POLICY), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'uptime', check_str=base.ADMIN, description="Show the uptime of a hypervisor.", operations=[ { 'path': '/os-hypervisors/{hypervisor_id}/uptime', 'method': 'GET' }, ], scope_types=['project'], deprecated_rule=DEPRECATED_POLICY), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'search', check_str=base.ADMIN, description="Search hypervisor by hypervisor_hostname pattern.", operations=[ { 'path': '/os-hypervisors/{hypervisor_hostname_pattern}/search', 'method': 'GET' }, ], scope_types=['project'], deprecated_rule=DEPRECATED_POLICY), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'servers', check_str=base.ADMIN, description="List all servers on hypervisors that can match " "the provided hypervisor_hostname pattern.", operations=[ { 'path': '/os-hypervisors/{hypervisor_hostname_pattern}/servers', 'method': 'GET' } ], scope_types=['project'], deprecated_rule=DEPRECATED_POLICY ), ] def list_rules(): return hypervisors_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/instance_actions.py0000664000175000017500000000742100000000000021236 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base ROOT_POLICY = 'os_compute_api:os-instance-actions' BASE_POLICY_NAME = 'os_compute_api:os-instance-actions:%s' DEPRECATED_REASON = """ Nova API policies are introducing new default roles with scope_type capabilities. Old policies are deprecated and silently going to be ignored in nova 23.0.0 release. """ DEPRECATED_INSTANCE_ACTION_POLICY = policy.DeprecatedRule( ROOT_POLICY, base.RULE_ADMIN_OR_OWNER, deprecated_reason=DEPRECATED_REASON, deprecated_since='21.0.0', ) instance_actions_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'events:details', check_str=base.ADMIN, description="""Add "details" key in action events for a server. This check is performed only after the check os_compute_api:os-instance-actions:show passes. Beginning with Microversion 2.84, new field 'details' is exposed via API which can have more details about event failure. That field is controlled by this policy which is system reader by default. Making the 'details' field visible to the non-admin user helps to understand the nature of the problem (i.e. if the action can be retried), but in the other hand it might leak information about the deployment (e.g. the type of the hypervisor). """, operations=[ { 'method': 'GET', 'path': '/servers/{server_id}/os-instance-actions/{request_id}' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'events', check_str=base.ADMIN, description="""Add events details in action details for a server. This check is performed only after the check os_compute_api:os-instance-actions:show passes. Beginning with Microversion 2.51, events details are always included; traceback information is provided per event if policy enforcement passes. Beginning with Microversion 2.62, each event includes a hashed host identifier and, if policy enforcement passes, the name of the host.""", operations=[ { 'method': 'GET', 'path': '/servers/{server_id}/os-instance-actions/{request_id}' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'list', check_str=base.PROJECT_READER_OR_ADMIN, description="""List actions for a server.""", operations=[ { 'method': 'GET', 'path': '/servers/{server_id}/os-instance-actions' } ], scope_types=['project'], deprecated_rule=DEPRECATED_INSTANCE_ACTION_POLICY), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'show', check_str=base.PROJECT_READER_OR_ADMIN, description="""Show action details for a server.""", operations=[ { 'method': 'GET', 'path': '/servers/{server_id}/os-instance-actions/{request_id}' } ], scope_types=['project'], deprecated_rule=DEPRECATED_INSTANCE_ACTION_POLICY), ] def list_rules(): return instance_actions_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/instance_usage_audit_log.py0000664000175000017500000000416200000000000022730 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base BASE_POLICY_NAME = 'os_compute_api:os-instance-usage-audit-log:%s' DEPRECATED_REASON = """ Nova API policies are introducing new default roles with scope_type capabilities. Old policies are deprecated and silently going to be ignored in nova 23.0.0 release. """ DEPRECATED_POLICY = policy.DeprecatedRule( 'os_compute_api:os-instance-usage-audit-log', base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since='21.0.0' ) instance_usage_audit_log_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'list', check_str=base.ADMIN, description="List all usage audits.", operations=[ { 'method': 'GET', 'path': '/os-instance_usage_audit_log' }, ], scope_types=['project'], deprecated_rule=DEPRECATED_POLICY), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'show', check_str=base.ADMIN, description="List all usage audits occurred before " "a specified time for all servers on all compute hosts where " "usage auditing is configured", operations=[ { 'method': 'GET', 'path': '/os-instance_usage_audit_log/{before_timestamp}' } ], scope_types=['project'], deprecated_rule=DEPRECATED_POLICY), ] def list_rules(): return instance_usage_audit_log_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/ips.py0000664000175000017500000000305200000000000016501 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base POLICY_ROOT = 'os_compute_api:ips:%s' ips_policies = [ policy.DocumentedRuleDefault( name=POLICY_ROOT % 'show', check_str=base.PROJECT_READER_OR_ADMIN, description="Show IP addresses details for a network label of a " " server", operations=[ { 'method': 'GET', 'path': '/servers/{server_id}/ips/{network_label}' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'index', check_str=base.PROJECT_READER_OR_ADMIN, description="List IP addresses that are assigned to a server", operations=[ { 'method': 'GET', 'path': '/servers/{server_id}/ips' } ], scope_types=['project']), ] def list_rules(): return ips_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/keypairs.py0000664000175000017500000000427100000000000017541 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base POLICY_ROOT = 'os_compute_api:os-keypairs:%s' keypairs_policies = [ policy.DocumentedRuleDefault( name=POLICY_ROOT % 'index', check_str='(' + base.ADMIN + ') or user_id:%(user_id)s', description="List all keypairs", operations=[ { 'path': '/os-keypairs', 'method': 'GET' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'create', check_str='(' + base.ADMIN + ') or user_id:%(user_id)s', description="Create a keypair", operations=[ { 'path': '/os-keypairs', 'method': 'POST' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'delete', check_str='(' + base.ADMIN + ') or user_id:%(user_id)s', description="Delete a keypair", operations=[ { 'path': '/os-keypairs/{keypair_name}', 'method': 'DELETE' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'show', check_str='(' + base.ADMIN + ') or user_id:%(user_id)s', description="Show details of a keypair", operations=[ { 'path': '/os-keypairs/{keypair_name}', 'method': 'GET' } ], scope_types=['project']), ] def list_rules(): return keypairs_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/limits.py0000664000175000017500000000416400000000000017214 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base BASE_POLICY_NAME = 'os_compute_api:limits' OTHER_PROJECT_LIMIT_POLICY_NAME = 'os_compute_api:limits:other_project' DEPRECATED_REASON = """ Nova API policies are introducing new default roles with scope_type capabilities. Old policies are deprecated and silently going to be ignored in nova 23.0.0 release. """ DEPRECATED_POLICY = policy.DeprecatedRule( 'os_compute_api:os-used-limits', base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since='21.0.0' ) limits_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME, check_str=base.RULE_ANY, description="Show rate and absolute limits for the current user " "project", operations=[ { 'method': 'GET', 'path': '/limits' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=OTHER_PROJECT_LIMIT_POLICY_NAME, check_str=base.ADMIN, description="""Show rate and absolute limits of other project. This policy only checks if the user has access to the requested project limits. And this check is performed only after the check os_compute_api:limits passes""", operations=[ { 'method': 'GET', 'path': '/limits' } ], scope_types=['project'], deprecated_rule=DEPRECATED_POLICY), ] def list_rules(): return limits_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/lock_server.py0000664000175000017500000000374100000000000020231 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base POLICY_ROOT = 'os_compute_api:os-lock-server:%s' lock_server_policies = [ policy.DocumentedRuleDefault( name=POLICY_ROOT % 'lock', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Lock a server", operations=[ { 'path': '/servers/{server_id}/action (lock)', 'method': 'POST' } ], scope_types=['project'] ), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'unlock', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Unlock a server", operations=[ { 'path': '/servers/{server_id}/action (unlock)', 'method': 'POST' } ], scope_types=['project'] ), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'unlock:unlock_override', check_str=base.ADMIN, description="""Unlock a server, regardless who locked the server. This check is performed only after the check os_compute_api:os-lock-server:unlock passes""", operations=[ { 'path': '/servers/{server_id}/action (unlock)', 'method': 'POST' } ], scope_types=['project'] ), ] def list_rules(): return lock_server_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/migrate_server.py0000664000175000017500000000762200000000000020733 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base POLICY_ROOT = 'os_compute_api:os-migrate-server:%s' DEPRECATED_REASON = """\ Nova introduces one more policy to live migration API. The original policy is not deprecated and used to allow live migration without requesting a specific host. A new policy is added to control the live migration requesting a specific host. If you have overridden the original policy in your deployment, you must also add the new policy to keep the same permissions for live migration to a specific host. """ DEPRECATED_POLICY = policy.DeprecatedRule( POLICY_ROOT % 'migrate_live', base.ADMIN, deprecated_reason=DEPRECATED_REASON, deprecated_since='32.0.0' ) migrate_server_policies = [ policy.DocumentedRuleDefault( name=POLICY_ROOT % 'migrate', check_str=base.PROJECT_MANAGER_OR_ADMIN, description="Cold migrate a server without specifying a host", operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/action (migrate)' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'migrate:host', check_str=base.ADMIN, description="Cold migrate a server to a specified host", operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/action (migrate)' } ], scope_types=['project']), # NOTE(gmaan): You might see this policy as deprecated in the new policy # 'migrate_live:host' but it is not deprecated and still be used for # the live migration without specifying a host. By adding this existing # policy in new policy deprecated field, oslo.policy will handle the policy # overridden case. In that case, oslo.policy will pick the existing policy # overridden value from policy.yaml file and apply the same to the new # policy. This way existing deployment (for default as well as custom # policy case) will not break. policy.DocumentedRuleDefault( name=POLICY_ROOT % 'migrate_live', check_str=base.PROJECT_MANAGER_OR_ADMIN, description="Live migrate a server to a new host without a reboot " "without specifying a host.", operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/action (os-migrateLive)' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'migrate_live:host', check_str=base.ADMIN, description="Live migrate a server to a specified host without " "a reboot.", operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/action (os-migrateLive)' } ], scope_types=['project'], # TODO(gmaan): We can remove this after the next SLURP release # (after 2026.1 release). We need to keep this deprecated rule # for the case where operator has overridden the old policy # 'migrate_live' in policy.yaml. For details, refer to the above # comment in the 'migrate_live' policy rule. deprecated_rule=DEPRECATED_POLICY), ] def list_rules(): return migrate_server_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/migrations.py0000664000175000017500000000720400000000000020065 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base POLICY_ROOT = 'os_compute_api:os-migrations:%s' DEPRECATED_REASON = """\ Nova introduces two new policies to list migrations. The original policy is not deprecated and used to list the live migration without host info. Two new policies are added to list migration with host info and cross projects migrations. If you have overridden the original policy in your deployment, you must also update the new policy to keep the same permissions. """ DEPRECATED_POLICY = policy.DeprecatedRule( POLICY_ROOT % 'index', base.ADMIN, deprecated_reason=DEPRECATED_REASON, deprecated_since='32.0.0' ) migrations_policies = [ # NOTE(gmaan): You might see this policy as deprecated in the new policies # 'index:all_projects' and 'index:host' but it is not deprecated and still # be used to list the migration without host info. By adding this existing # policy in new policies deprecated field, oslo.policy will handle the # policy overridden case. In that case, oslo.policy will pick the existing # policy overridden value from policy.yaml file and apply the same to the # new policy. This way existing deployment (for default as well as custom # policy case) will not break. policy.DocumentedRuleDefault( name=POLICY_ROOT % 'index', check_str=base.PROJECT_MANAGER_OR_ADMIN, description="List migrations without host info", operations=[ { 'method': 'GET', 'path': '/os-migrations' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'index:all_projects', check_str=base.ADMIN, description="List migrations for all or cross projects", operations=[ { 'method': 'GET', 'path': '/os-migrations' } ], scope_types=['project'], # TODO(gmaan): We can remove this after the next SLURP release # (after 2026.1 release). We need to keep this deprecated rule # for the case where operator has overridden the old policy # 'index' in policy.yaml. For details, refer to the above # comment in the 'index' policy rule. deprecated_rule=DEPRECATED_POLICY), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'index:host', check_str=base.ADMIN, description="List migrations with host info", operations=[ { 'method': 'GET', 'path': '/os-migrations' } ], scope_types=['project'], # TODO(gmaan): We can remove this after the next SLURP release # (after 2026.1 release). We need to keep this deprecated rule # for the case where operator has overridden the old policy # 'index' in policy.yaml. For details, refer to the above # comment in the 'index' policy rule. deprecated_rule=DEPRECATED_POLICY), ] def list_rules(): return migrations_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/multinic.py0000664000175000017500000000426200000000000017536 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base ROOT_POLICY = 'os_compute_api:os-multinic' BASE_POLICY_NAME = 'os_compute_api:os-multinic:%s' DEPRECATED_REASON = """ Nova API policies are introducing new default roles with scope_type capabilities. Old policies are deprecated and silently going to be ignored in nova 23.0.0 release. """ DEPRECATED_POLICY = policy.DeprecatedRule( ROOT_POLICY, base.RULE_ADMIN_OR_OWNER, deprecated_reason=DEPRECATED_REASON, deprecated_since='22.0.0' ) multinic_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'add', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="""Add a fixed IP address to a server. This API is proxy calls to the Network service. This is deprecated.""", operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/action (addFixedIp)' } ], scope_types=['project'], deprecated_rule=DEPRECATED_POLICY), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'remove', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="""Remove a fixed IP address from a server. This API is proxy calls to the Network service. This is deprecated.""", operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/action (removeFixedIp)' } ], scope_types=['project'], deprecated_rule=DEPRECATED_POLICY), ] def list_rules(): return multinic_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/networks.py0000664000175000017500000000415100000000000017563 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base POLICY_ROOT = 'os_compute_api:os-networks:%s' BASE_POLICY_NAME = 'os_compute_api:os-networks:view' DEPRECATED_REASON = """ Nova API policies are introducing new default roles with scope_type capabilities. Old policies are deprecated and silently going to be ignored in nova 23.0.0 release. """ DEPRECATED_POLICY = policy.DeprecatedRule( BASE_POLICY_NAME, base.RULE_ADMIN_OR_OWNER, deprecated_reason=DEPRECATED_REASON, deprecated_since='22.0.0' ) networks_policies = [ policy.DocumentedRuleDefault( name=POLICY_ROOT % 'list', check_str=base.PROJECT_READER_OR_ADMIN, description="""List networks for the project. This API is proxy calls to the Network service. This is deprecated.""", operations=[ { 'method': 'GET', 'path': '/os-networks' } ], scope_types=['project'], deprecated_rule=DEPRECATED_POLICY), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'show', check_str=base.PROJECT_READER_OR_ADMIN, description="""Show network details. This API is proxy calls to the Network service. This is deprecated.""", operations=[ { 'method': 'GET', 'path': '/os-networks/{network_id}' } ], scope_types=['project'], deprecated_rule=DEPRECATED_POLICY), ] def list_rules(): return networks_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/pause_server.py0000664000175000017500000000302000000000000020404 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base POLICY_ROOT = 'os_compute_api:os-pause-server:%s' pause_server_policies = [ policy.DocumentedRuleDefault( name=POLICY_ROOT % 'pause', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Pause a server", operations=[ { 'path': '/servers/{server_id}/action (pause)', 'method': 'POST' } ], scope_types=['project'] ), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'unpause', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Unpause a paused server", operations=[ { 'path': '/servers/{server_id}/action (unpause)', 'method': 'POST' } ], scope_types=['project'] ), ] def list_rules(): return pause_server_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/quota_class_sets.py0000664000175000017500000000301500000000000021261 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base POLICY_ROOT = 'os_compute_api:os-quota-class-sets:%s' quota_class_sets_policies = [ policy.DocumentedRuleDefault( name=POLICY_ROOT % 'show', check_str=base.ADMIN, description="List quotas for specific quota classes", operations=[ { 'method': 'GET', 'path': '/os-quota-class-sets/{quota_class}' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'update', check_str=base.ADMIN, description='Update quotas for specific quota class', operations=[ { 'method': 'PUT', 'path': '/os-quota-class-sets/{quota_class}' } ], scope_types=['project']), ] def list_rules(): return quota_class_sets_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/quota_sets.py0000664000175000017500000000522000000000000020074 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base POLICY_ROOT = 'os_compute_api:os-quota-sets:%s' quota_sets_policies = [ policy.DocumentedRuleDefault( name=POLICY_ROOT % 'update', check_str=base.ADMIN, description="Update the quotas", operations=[ { 'method': 'PUT', 'path': '/os-quota-sets/{tenant_id}' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'defaults', check_str=base.RULE_ANY, description="List default quotas", operations=[ { 'method': 'GET', 'path': '/os-quota-sets/{tenant_id}/defaults' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'show', check_str=base.PROJECT_READER_OR_ADMIN, description="Show a quota", operations=[ { 'method': 'GET', 'path': '/os-quota-sets/{tenant_id}' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'delete', check_str=base.ADMIN, description="Revert quotas to defaults", operations=[ { 'method': 'DELETE', 'path': '/os-quota-sets/{tenant_id}' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'detail', # TODO(gmann): Until we have domain admin or so to get other project's # data, allow admin role(with scope check it will be project admin) to # get other project quota. check_str=base.PROJECT_READER_OR_ADMIN, description="Show the detail of quota", operations=[ { 'method': 'GET', 'path': '/os-quota-sets/{tenant_id}/detail' } ], scope_types=['project']), ] def list_rules(): return quota_sets_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/remote_consoles.py0000664000175000017500000000342500000000000021112 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base BASE_POLICY_NAME = 'os_compute_api:os-remote-consoles' remote_consoles_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME, check_str=base.PROJECT_MEMBER_OR_ADMIN, description="""Generate a URL to access remote server console. This policy is for ``POST /remote-consoles`` API and below Server actions APIs are deprecated: - ``os-getSerialConsole`` - ``os-getSPICEConsole`` - ``os-getVNCConsole``.""", operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/action (os-getSerialConsole)' }, { 'method': 'POST', 'path': '/servers/{server_id}/action (os-getSPICEConsole)' }, { 'method': 'POST', 'path': '/servers/{server_id}/action (os-getVNCConsole)' }, { 'method': 'POST', 'path': '/servers/{server_id}/remote-consoles' }, ], scope_types=['project']), ] def list_rules(): return remote_consoles_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/rescue.py0000664000175000017500000000361700000000000017203 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base BASE_POLICY_NAME = 'os_compute_api:os-rescue' UNRESCUE_POLICY_NAME = 'os_compute_api:os-unrescue' DEPRECATED_REASON = """ Rescue/Unrescue API policies are made granular with new policy for unrescue and keeping old policy for rescue. """ DEPRECATED_POLICY = policy.DeprecatedRule( 'os_compute_api:os-rescue', base.RULE_ADMIN_OR_OWNER, deprecated_reason=DEPRECATED_REASON, deprecated_since='21.0.0' ) rescue_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME, check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Rescue a server", operations=[ { 'path': '/servers/{server_id}/action (rescue)', 'method': 'POST' }, ], scope_types=['project']), policy.DocumentedRuleDefault( name=UNRESCUE_POLICY_NAME, check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Unrescue a server", operations=[ { 'path': '/servers/{server_id}/action (unrescue)', 'method': 'POST' } ], scope_types=['project'], deprecated_rule=DEPRECATED_POLICY ), ] def list_rules(): return rescue_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/security_groups.py0000664000175000017500000001252700000000000021163 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base BASE_POLICY_NAME = 'os_compute_api:os-security-groups' POLICY_NAME = 'os_compute_api:os-security-groups:%s' DEPRECATED_REASON = """ Nova API policies are introducing new default roles with scope_type capabilities. Old policies are deprecated and silently going to be ignored in nova 23.0.0 release. """ DEPRECATED_POLICY = policy.DeprecatedRule( BASE_POLICY_NAME, base.RULE_ADMIN_OR_OWNER, deprecated_reason=DEPRECATED_REASON, deprecated_since='22.0.0' ) security_groups_policies = [ policy.DocumentedRuleDefault( name=POLICY_NAME % 'get', check_str=base.PROJECT_READER_OR_ADMIN, description="List security groups. This API is deprecated.", operations=[ { 'method': 'GET', 'path': '/os-security-groups' } ], scope_types=['project'], deprecated_rule=DEPRECATED_POLICY), policy.DocumentedRuleDefault( name=POLICY_NAME % 'show', check_str=base.PROJECT_READER_OR_ADMIN, description="Show security group. This API is deprecated.", operations=[ { 'method': 'GET', 'path': '/os-security-groups/{security_group_id}' } ], scope_types=['project'], deprecated_rule=DEPRECATED_POLICY), policy.DocumentedRuleDefault( name=POLICY_NAME % 'create', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Create security group. This API is deprecated.", operations=[ { 'method': 'POST', 'path': '/os-security-groups' } ], scope_types=['project'], deprecated_rule=DEPRECATED_POLICY), policy.DocumentedRuleDefault( name=POLICY_NAME % 'update', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Update security group. This API is deprecated.", operations=[ { 'method': 'PUT', 'path': '/os-security-groups/{security_group_id}' } ], scope_types=['project'], deprecated_rule=DEPRECATED_POLICY), policy.DocumentedRuleDefault( name=POLICY_NAME % 'delete', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Delete security group. This API is deprecated.", operations=[ { 'method': 'DELETE', 'path': '/os-security-groups/{security_group_id}' }, ], scope_types=['project'], deprecated_rule=DEPRECATED_POLICY), policy.DocumentedRuleDefault( name=POLICY_NAME % 'rule:create', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Create security group Rule. This API is deprecated.", operations=[ { 'method': 'POST', 'path': '/os-security-group-rules' } ], scope_types=['project'], deprecated_rule=DEPRECATED_POLICY), policy.DocumentedRuleDefault( name=POLICY_NAME % 'rule:delete', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Delete security group Rule. This API is deprecated.", operations=[ { 'method': 'DELETE', 'path': '/os-security-group-rules/{security_group_id}' }, ], scope_types=['project'], deprecated_rule=DEPRECATED_POLICY), policy.DocumentedRuleDefault( name=POLICY_NAME % 'list', check_str=base.PROJECT_READER_OR_ADMIN, description="List security groups of server.", operations=[ { 'method': 'GET', 'path': '/servers/{server_id}/os-security-groups' }, ], scope_types=['project'], deprecated_rule=DEPRECATED_POLICY), policy.DocumentedRuleDefault( name=POLICY_NAME % 'add', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Add security groups to server.", operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/action (addSecurityGroup)' }, ], scope_types=['project'], deprecated_rule=DEPRECATED_POLICY), policy.DocumentedRuleDefault( name=POLICY_NAME % 'remove', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Remove security groups from server.", operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/action (removeSecurityGroup)' }, ], scope_types=['project'], deprecated_rule=DEPRECATED_POLICY), ] def list_rules(): return security_groups_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/server_diagnostics.py0000664000175000017500000000225200000000000021604 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base BASE_POLICY_NAME = 'os_compute_api:os-server-diagnostics' server_diagnostics_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME, check_str=base.ADMIN, description="Show the usage data for a server", operations=[ { 'method': 'GET', 'path': '/servers/{server_id}/diagnostics' } ], scope_types=['project']), ] def list_rules(): return server_diagnostics_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/server_external_events.py0000664000175000017500000000227600000000000022511 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base POLICY_ROOT = 'os_compute_api:os-server-external-events:%s' server_external_events_policies = [ policy.DocumentedRuleDefault( name=POLICY_ROOT % 'create', check_str=base.SERVICE_ROLE, description="Create one or more external events", operations=[ { 'method': 'POST', 'path': '/os-server-external-events' } ], scope_types=['project']), ] def list_rules(): return server_external_events_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/server_groups.py0000664000175000017500000000505300000000000020616 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base POLICY_ROOT = 'os_compute_api:os-server-groups:%s' server_groups_policies = [ policy.DocumentedRuleDefault( name=POLICY_ROOT % 'create', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Create a new server group", operations=[ { 'path': '/os-server-groups', 'method': 'POST' } ], scope_types=['project'] ), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'delete', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Delete a server group", operations=[ { 'path': '/os-server-groups/{server_group_id}', 'method': 'DELETE' } ], scope_types=['project'] ), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'index', check_str=base.PROJECT_READER_OR_ADMIN, description="List all server groups", operations=[ { 'path': '/os-server-groups', 'method': 'GET' } ], scope_types=['project'] ), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'index:all_projects', check_str=base.ADMIN, description="List all server groups for all projects", operations=[ { 'path': '/os-server-groups', 'method': 'GET' } ], scope_types=['project'] ), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'show', check_str=base.PROJECT_READER_OR_ADMIN, description="Show details of a server group", operations=[ { 'path': '/os-server-groups/{server_group_id}', 'method': 'GET' } ], scope_types=['project'] ), ] def list_rules(): return server_groups_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/server_metadata.py0000664000175000017500000000571600000000000021065 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base POLICY_ROOT = 'os_compute_api:server-metadata:%s' server_metadata_policies = [ policy.DocumentedRuleDefault( name=POLICY_ROOT % 'index', check_str=base.PROJECT_READER_OR_ADMIN, description="List all metadata of a server", operations=[ { 'path': '/servers/{server_id}/metadata', 'method': 'GET' } ], scope_types=['project'] ), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'show', check_str=base.PROJECT_READER_OR_ADMIN, description="Show metadata for a server", operations=[ { 'path': '/servers/{server_id}/metadata/{key}', 'method': 'GET' } ], scope_types=['project'] ), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'create', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Create metadata for a server", operations=[ { 'path': '/servers/{server_id}/metadata', 'method': 'POST' } ], scope_types=['project'] ), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'update_all', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Replace metadata for a server", operations=[ { 'path': '/servers/{server_id}/metadata', 'method': 'PUT' } ], scope_types=['project'] ), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'update', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Update metadata from a server", operations=[ { 'path': '/servers/{server_id}/metadata/{key}', 'method': 'PUT' } ], scope_types=['project'] ), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'delete', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Delete metadata from a server", operations=[ { 'path': '/servers/{server_id}/metadata/{key}', 'method': 'DELETE' } ], scope_types=['project'] ), ] def list_rules(): return server_metadata_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/server_password.py0000664000175000017500000000412700000000000021142 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base BASE_POLICY_NAME = 'os_compute_api:os-server-password:%s' DEPRECATED_REASON = """ Nova API policies are introducing new default roles with scope_type capabilities. Old policies are deprecated and silently going to be ignored in nova 23.0.0 release. """ DEPRECATED_POLICY = policy.DeprecatedRule( 'os_compute_api:os-server-password', base.RULE_ADMIN_OR_OWNER, deprecated_reason=DEPRECATED_REASON, deprecated_since='21.0.0' ) server_password_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'show', check_str=base.PROJECT_READER_OR_ADMIN, description="Show the encrypted administrative " "password of a server", operations=[ { 'method': 'GET', 'path': '/servers/{server_id}/os-server-password' }, ], scope_types=['project'], deprecated_rule=DEPRECATED_POLICY), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'clear', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Clear the encrypted administrative " "password of a server", operations=[ { 'method': 'DELETE', 'path': '/servers/{server_id}/os-server-password' } ], scope_types=['project'], deprecated_rule=DEPRECATED_POLICY), ] def list_rules(): return server_password_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/server_shares.py0000664000175000017500000000424500000000000020566 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base POLICY_ROOT = 'os_compute_api:os-server-shares:%s' server_shares_policies = [ policy.DocumentedRuleDefault( name=POLICY_ROOT % 'index', check_str=base.PROJECT_READER, description="List all shares for given server", operations=[ { 'method': 'GET', 'path': '/servers/{server_id}/shares' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'create', check_str=base.PROJECT_MEMBER, description="Attach a share to the specified server", operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/shares' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'show', check_str=base.PROJECT_READER, description="Show a share configured for the specified server", operations=[ { 'method': 'GET', 'path': '/servers/{server_id}/shares/{share_id}' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'delete', check_str=base.PROJECT_MEMBER, description="Detach a share to the specified server", operations=[ { 'method': 'DELETE', 'path': '/servers/{server_id}/shares/{share_id}' } ], scope_types=['project']), ] def list_rules(): return server_shares_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/server_tags.py0000664000175000017500000000602300000000000020233 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base POLICY_ROOT = 'os_compute_api:os-server-tags:%s' server_tags_policies = [ policy.DocumentedRuleDefault( name=POLICY_ROOT % 'delete_all', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Delete all the server tags", operations=[ { 'method': 'DELETE', 'path': '/servers/{server_id}/tags' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'index', check_str=base.PROJECT_READER_OR_ADMIN, description="List all tags for given server", operations=[ { 'method': 'GET', 'path': '/servers/{server_id}/tags' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'update_all', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Replace all tags on specified server with the new set " "of tags.", operations=[ { 'method': 'PUT', 'path': '/servers/{server_id}/tags' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'delete', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Delete a single tag from the specified server", operations=[ { 'method': 'DELETE', 'path': '/servers/{server_id}/tags/{tag}' } ], scope_types=['project'] ), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'update', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Add a single tag to the server if server has no " "specified tag", operations=[ { 'method': 'PUT', 'path': '/servers/{server_id}/tags/{tag}' } ], scope_types=['project'] ), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'show', check_str=base.PROJECT_READER_OR_ADMIN, description="Check tag existence on the server.", operations=[ { 'method': 'GET', 'path': '/servers/{server_id}/tags/{tag}' } ], scope_types=['project'] ), ] def list_rules(): return server_tags_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/server_topology.py0000664000175000017500000000312500000000000021151 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base BASE_POLICY_NAME = 'compute:server:topology:%s' server_topology_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'index', check_str=base.PROJECT_READER_OR_ADMIN, description="Show the NUMA topology data for a server", operations=[ { 'method': 'GET', 'path': '/servers/{server_id}/topology' } ], scope_types=['project']), policy.DocumentedRuleDefault( # Control host NUMA node and cpu pinning information name=BASE_POLICY_NAME % 'host:index', check_str=base.ADMIN, description="Show the NUMA topology data for a server with host " "NUMA ID and CPU pinning information", operations=[ { 'method': 'GET', 'path': '/servers/{server_id}/topology' } ], scope_types=['project']), ] def list_rules(): return server_topology_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/servers.py0000664000175000017500000003771200000000000017411 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base SERVERS = 'os_compute_api:servers:%s' NETWORK_ATTACH_EXTERNAL = 'network:attach_external_network' ZERO_DISK_FLAVOR = SERVERS % 'create:zero_disk_flavor' REQUESTED_DESTINATION = 'compute:servers:create:requested_destination' CROSS_CELL_RESIZE = 'compute:servers:resize:cross_cell' DEPRECATED_POLICY = policy.DeprecatedRule( 'os_compute_api:os-flavor-extra-specs:index', base.RULE_ADMIN_OR_OWNER, ) DEPRECATED_REASON = """ Policies for showing flavor extra specs in server APIs response is separated as new policy. This policy is deprecated only for that but not for list extra specs and showing it in flavor API response. """ rules = [ policy.DocumentedRuleDefault( name=SERVERS % 'index', check_str=base.PROJECT_READER_OR_ADMIN, description="List all servers", operations=[ { 'method': 'GET', 'path': '/servers' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=SERVERS % 'detail', check_str=base.PROJECT_READER_OR_ADMIN, description="List all servers with detailed information", operations=[ { 'method': 'GET', 'path': '/servers/detail' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=SERVERS % 'index:get_all_tenants', check_str=base.ADMIN, description="List all servers for all projects", operations=[ { 'method': 'GET', 'path': '/servers' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=SERVERS % 'detail:get_all_tenants', check_str=base.ADMIN, description="List all servers with detailed information for " " all projects", operations=[ { 'method': 'GET', 'path': '/servers/detail' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=SERVERS % 'allow_all_filters', check_str=base.ADMIN, description="Allow all filters when listing servers", operations=[ { 'method': 'GET', 'path': '/servers' }, { 'method': 'GET', 'path': '/servers/detail' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=SERVERS % 'show', check_str=base.PROJECT_READER_OR_ADMIN, description="Show a server", operations=[ { 'method': 'GET', 'path': '/servers/{server_id}' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=SERVERS % 'show:flavor-extra-specs', check_str=base.PROJECT_READER_OR_ADMIN, description="Starting with microversion 2.47, the flavor and its " "extra specs used for a server is also returned in the response " "when showing server details, updating a server or rebuilding a " "server.", operations=[ # Microversion 2.47 operations for servers: { 'path': '/servers/detail', 'method': 'GET' }, { 'path': '/servers/{server_id}', 'method': 'GET' }, { 'path': '/servers/{server_id}', 'method': 'PUT' }, { 'path': '/servers/{server_id}/action (rebuild)', 'method': 'POST' }, ], scope_types=['project'], deprecated_rule=DEPRECATED_POLICY, deprecated_reason=DEPRECATED_REASON, deprecated_since='25.0.0'), # the details in host_status are pretty sensitive, only admins # should do that by default. policy.DocumentedRuleDefault( name=SERVERS % 'show:host_status', check_str=base.ADMIN, description=""" Show a server with additional host status information. This means host_status will be shown irrespective of status value. If showing only host_status UNKNOWN is desired, use the ``os_compute_api:servers:show:host_status:unknown-only`` policy rule. Microvision 2.75 added the ``host_status`` attribute in the ``PUT /servers/{server_id}`` and ``POST /servers/{server_id}/action (rebuild)`` API responses which are also controlled by this policy rule, like the ``GET /servers*`` APIs. """, operations=[ { 'method': 'GET', 'path': '/servers/{server_id}' }, { 'method': 'GET', 'path': '/servers/detail' }, { 'method': 'PUT', 'path': '/servers/{server_id}' }, { 'method': 'POST', 'path': '/servers/{server_id}/action (rebuild)' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=SERVERS % 'show:host_status:unknown-only', check_str=base.ADMIN, description=""" Show a server with additional host status information, only if host status is UNKNOWN. This policy rule will only be enforced when the ``os_compute_api:servers:show:host_status`` policy rule does not pass for the request. An example policy configuration could be where the ``os_compute_api:servers:show:host_status`` rule is set to allow admin-only and the ``os_compute_api:servers:show:host_status:unknown-only`` rule is set to allow everyone. """, operations=[ { 'method': 'GET', 'path': '/servers/{server_id}' }, { 'method': 'GET', 'path': '/servers/detail' }, { 'method': 'PUT', 'path': '/servers/{server_id}' }, { 'method': 'POST', 'path': '/servers/{server_id}/action (rebuild)' } ], scope_types=['project'],), policy.DocumentedRuleDefault( name=SERVERS % 'create', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Create a server", operations=[ { 'method': 'POST', 'path': '/servers' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=SERVERS % 'create:forced_host', check_str=base.ADMIN, description=""" Create a server on the specified host and/or node. In this case, the server is forced to launch on the specified host and/or node by bypassing the scheduler filters unlike the ``compute:servers:create:requested_destination`` rule. """, operations=[ { 'method': 'POST', 'path': '/servers' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=REQUESTED_DESTINATION, check_str=base.ADMIN, description=""" Create a server on the requested compute service host and/or hypervisor_hostname. In this case, the requested host and/or hypervisor_hostname is validated by the scheduler filters unlike the ``os_compute_api:servers:create:forced_host`` rule. """, operations=[ { 'method': 'POST', 'path': '/servers' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=SERVERS % 'create:attach_volume', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Create a server with the requested volume attached to it", operations=[ { 'method': 'POST', 'path': '/servers' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=SERVERS % 'create:attach_network', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Create a server with the requested network attached " " to it", operations=[ { 'method': 'POST', 'path': '/servers' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=SERVERS % 'create:trusted_certs', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Create a server with trusted image certificate IDs", operations=[ { 'method': 'POST', 'path': '/servers' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=ZERO_DISK_FLAVOR, check_str=base.ADMIN, description=""" This rule controls the compute API validation behavior of creating a server with a flavor that has 0 disk, indicating the server should be volume-backed. For a flavor with disk=0, the root disk will be set to exactly the size of the image used to deploy the instance. However, in this case the filter_scheduler cannot select the compute host based on the virtual image size. Therefore, 0 should only be used for volume booted instances or for testing purposes. WARNING: It is a potential security exposure to enable this policy rule if users can upload their own images since repeated attempts to create a disk=0 flavor instance with a large image can exhaust the local disk of the compute (or shared storage cluster). See bug https://bugs.launchpad.net/nova/+bug/1739646 for details. """, operations=[ { 'method': 'POST', 'path': '/servers' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=NETWORK_ATTACH_EXTERNAL, check_str=base.ADMIN, description="Attach an unshared external network to a server", operations=[ # Create a server with a requested network or port. { 'method': 'POST', 'path': '/servers' }, # Attach a network or port to an existing server. { 'method': 'POST', 'path': '/servers/{server_id}/os-interface' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=SERVERS % 'delete', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Delete a server", operations=[ { 'method': 'DELETE', 'path': '/servers/{server_id}' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=SERVERS % 'update', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Update a server", operations=[ { 'method': 'PUT', 'path': '/servers/{server_id}' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=SERVERS % 'confirm_resize', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Confirm a server resize", operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/action (confirmResize)' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=SERVERS % 'revert_resize', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Revert a server resize", operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/action (revertResize)' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=SERVERS % 'reboot', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Reboot a server", operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/action (reboot)' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=SERVERS % 'resize', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Resize a server", operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/action (resize)' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=CROSS_CELL_RESIZE, check_str=base.RULE_NOBODY, description="Resize a server across cells. By default, this is " "disabled for all users and recommended to be tested in a " "deployment for admin users before opening it up to non-admin users. " "Resizing within a cell is the default preferred behavior even if " "this is enabled. ", operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/action (resize)' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=SERVERS % 'rebuild', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Rebuild a server", operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/action (rebuild)' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=SERVERS % 'rebuild:trusted_certs', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Rebuild a server with trusted image certificate IDs", operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/action (rebuild)' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=SERVERS % 'create_image', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Create an image from a server", operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/action (createImage)' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=SERVERS % 'create_image:allow_volume_backed', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Create an image from a volume backed server", operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/action (createImage)' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=SERVERS % 'start', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Start a server", operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/action (os-start)' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=SERVERS % 'stop', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Stop a server", operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/action (os-stop)' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=SERVERS % 'trigger_crash_dump', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Trigger crash dump in a server", operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/action (trigger_crash_dump)' } ], scope_types=['project']), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/servers_migrations.py0000664000175000017500000001044500000000000021637 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base POLICY_ROOT = 'os_compute_api:servers:migrations:%s' DEPRECATED_REASON = """\ Nova introduces one new policy to list live migrations. The original policy is not deprecated and used to list the in-progress live migration without host info. A new policy is added to list live migration with host info. If you have overridden the original policy in your deployment, you must also update the new policy to keep the same permissions. """ DEPRECATED_POLICY = policy.DeprecatedRule( POLICY_ROOT % 'index', base.ADMIN, deprecated_reason=DEPRECATED_REASON, deprecated_since='32.0.0' ) servers_migrations_policies = [ policy.DocumentedRuleDefault( name=POLICY_ROOT % 'show', check_str=base.ADMIN, description="Show details for an in-progress live migration for a " "given server", operations=[ { 'method': 'GET', 'path': '/servers/{server_id}/migrations/{migration_id}' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'force_complete', check_str=base.PROJECT_MANAGER_OR_ADMIN, description="Force an in-progress live migration for a given server " "to complete", operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/migrations/{migration_id}' '/action (force_complete)' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'delete', check_str=base.PROJECT_MANAGER_OR_ADMIN, description="Delete(Abort) an in-progress live migration", operations=[ { 'method': 'DELETE', 'path': '/servers/{server_id}/migrations/{migration_id}' } ], scope_types=['project']), # NOTE(gmaan): You might see this policy as deprecated in the new policy # 'index:host' but it is not deprecated and still be used to list the live # migration without host info. By adding this existing policy in new # policy deprecated field, oslo.policy will handle the policy overridden # case. In that case, oslo.policy will pick the existing policy overridden # value from policy.yaml file and apply the same to the new policy. This # way existing deployment (for default as well as custom policy case) will # not break. policy.DocumentedRuleDefault( name=POLICY_ROOT % 'index', check_str=base.PROJECT_MANAGER_OR_ADMIN, description="Lists in-progress live migrations for a given server " "without host info.", operations=[ { 'method': 'GET', 'path': '/servers/{server_id}/migrations' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'index:host', check_str=base.ADMIN, description="Lists in-progress live migrations for a given server " "with host info.", operations=[ { 'method': 'GET', 'path': '/servers/{server_id}/migrations' } ], scope_types=['project'], # TODO(gmaan): We can remove this after the next SLURP release # (after 2026.1 release). We need to keep this deprecated rule # for the case where operator has overridden the old policy # 'index' in policy.yaml. For details, refer to the above # comment in the 'index' policy rule. deprecated_rule=DEPRECATED_POLICY), ] def list_rules(): return servers_migrations_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/services.py0000664000175000017500000000456600000000000017544 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base BASE_POLICY_NAME = 'os_compute_api:os-services:%s' DEPRECATED_REASON = """ Nova API policies are introducing new default roles with scope_type capabilities. Old policies are deprecated and silently going to be ignored in nova 23.0.0 release. """ DEPRECATED_SERVICE_POLICY = policy.DeprecatedRule( 'os_compute_api:os-services', base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since='21.0.0', ) services_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'list', check_str=base.ADMIN, description="List all running Compute services in a region.", operations=[ { 'method': 'GET', 'path': '/os-services' } ], scope_types=['project'], deprecated_rule=DEPRECATED_SERVICE_POLICY), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'update', check_str=base.ADMIN, description="Update a Compute service.", operations=[ { # Added in microversion 2.53. 'method': 'PUT', 'path': '/os-services/{service_id}' }, ], scope_types=['project'], deprecated_rule=DEPRECATED_SERVICE_POLICY), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'delete', check_str=base.ADMIN, description="Delete a Compute service.", operations=[ { 'method': 'DELETE', 'path': '/os-services/{service_id}' } ], scope_types=['project'], deprecated_rule=DEPRECATED_SERVICE_POLICY), ] def list_rules(): return services_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/shelve.py0000664000175000017500000000440500000000000017177 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base POLICY_ROOT = 'os_compute_api:os-shelve:%s' shelve_policies = [ policy.DocumentedRuleDefault( name=POLICY_ROOT % 'shelve', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Shelve server", operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/action (shelve)' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'unshelve', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Unshelve (restore) shelved server", operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/action (unshelve)' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'unshelve_to_host', check_str=base.ADMIN, description="Unshelve (restore) shelve offloaded server to a " "specific host", operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/action (unshelve)' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'shelve_offload', check_str=base.ADMIN, description="Shelf-offload (remove) server", operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/action (shelveOffload)' } ], scope_types=['project']), ] def list_rules(): return shelve_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/simple_tenant_usage.py0000664000175000017500000000305300000000000021735 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base POLICY_ROOT = 'os_compute_api:os-simple-tenant-usage:%s' simple_tenant_usage_policies = [ policy.DocumentedRuleDefault( name=POLICY_ROOT % 'show', check_str=base.PROJECT_READER_OR_ADMIN, description="Show usage statistics for a specific tenant", operations=[ { 'method': 'GET', 'path': '/os-simple-tenant-usage/{tenant_id}' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'list', check_str=base.ADMIN, description="List per tenant usage statistics for all tenants", operations=[ { 'method': 'GET', 'path': '/os-simple-tenant-usage' } ], scope_types=['project']), ] def list_rules(): return simple_tenant_usage_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/suspend_server.py0000664000175000017500000000301600000000000020755 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base POLICY_ROOT = 'os_compute_api:os-suspend-server:%s' suspend_server_policies = [ policy.DocumentedRuleDefault( name=POLICY_ROOT % 'resume', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Resume suspended server", operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/action (resume)' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'suspend', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Suspend server", operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/action (suspend)' } ], scope_types=['project']), ] def list_rules(): return suspend_server_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/tenant_networks.py0000664000175000017500000000422000000000000021131 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base BASE_POLICY_NAME = 'os_compute_api:os-tenant-networks' POLICY_NAME = 'os_compute_api:os-tenant-networks:%s' DEPRECATED_REASON = """ Nova API policies are introducing new default roles with scope_type capabilities. Old policies are deprecated and silently going to be ignored in nova 23.0.0 release. """ DEPRECATED_POLICY = policy.DeprecatedRule( BASE_POLICY_NAME, base.RULE_ADMIN_OR_OWNER, deprecated_reason=DEPRECATED_REASON, deprecated_since='22.0.0' ) tenant_networks_policies = [ policy.DocumentedRuleDefault( name=POLICY_NAME % 'list', check_str=base.PROJECT_READER_OR_ADMIN, description="""List project networks. This API is proxy calls to the Network service. This is deprecated.""", operations=[ { 'method': 'GET', 'path': '/os-tenant-networks' }, ], scope_types=['project'], deprecated_rule=DEPRECATED_POLICY), policy.DocumentedRuleDefault( name=POLICY_NAME % 'show', check_str=base.PROJECT_READER_OR_ADMIN, description="""Show project network details. This API is proxy calls to the Network service. This is deprecated.""", operations=[ { 'method': 'GET', 'path': '/os-tenant-networks/{network_id}' }, ], scope_types=['project'], deprecated_rule=DEPRECATED_POLICY), ] def list_rules(): return tenant_networks_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/volumes.py0000664000175000017500000001307700000000000017410 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base BASE_POLICY_NAME = 'os_compute_api:os-volumes' POLICY_NAME = 'os_compute_api:os-volumes:%s' DEPRECATED_REASON = """ Nova API policies are introducing new default roles with scope_type capabilities. Old policies are deprecated and silently going to be ignored in nova 23.0.0 release. """ DEPRECATED_POLICY = policy.DeprecatedRule( BASE_POLICY_NAME, base.RULE_ADMIN_OR_OWNER, deprecated_reason=DEPRECATED_REASON, deprecated_since='22.0.0' ) volumes_policies = [ policy.DocumentedRuleDefault( name=POLICY_NAME % 'list', check_str=base.PROJECT_READER_OR_ADMIN, description="""List volumes. This API is a proxy call to the Volume service. It is deprecated.""", operations=[ { 'method': 'GET', 'path': '/os-volumes' }, ], scope_types=['project'], deprecated_rule=DEPRECATED_POLICY), policy.DocumentedRuleDefault( name=POLICY_NAME % 'create', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="""Create volume. This API is a proxy call to the Volume service. It is deprecated.""", operations=[ { 'method': 'POST', 'path': '/os-volumes' }, ], scope_types=['project'], deprecated_rule=DEPRECATED_POLICY), policy.DocumentedRuleDefault( name=POLICY_NAME % 'detail', check_str=base.PROJECT_READER_OR_ADMIN, description="""List volumes detail. This API is a proxy call to the Volume service. It is deprecated.""", operations=[ { 'method': 'GET', 'path': '/os-volumes/detail' }, ], scope_types=['project'], deprecated_rule=DEPRECATED_POLICY), policy.DocumentedRuleDefault( name=POLICY_NAME % 'show', check_str=base.PROJECT_READER_OR_ADMIN, description="""Show volume. This API is a proxy call to the Volume service. It is deprecated.""", operations=[ { 'method': 'GET', 'path': '/os-volumes/{volume_id}' }, ], scope_types=['project'], deprecated_rule=DEPRECATED_POLICY), policy.DocumentedRuleDefault( name=POLICY_NAME % 'delete', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="""Delete volume. This API is a proxy call to the Volume service. It is deprecated.""", operations=[ { 'method': 'DELETE', 'path': '/os-volumes/{volume_id}' }, ], scope_types=['project'], deprecated_rule=DEPRECATED_POLICY), policy.DocumentedRuleDefault( name=POLICY_NAME % 'snapshots:list', check_str=base.PROJECT_READER_OR_ADMIN, description="""List snapshots. This API is a proxy call to the Volume service. It is deprecated.""", operations=[ { 'method': 'GET', 'path': '/os-snapshots' }, ], scope_types=['project'], deprecated_rule=DEPRECATED_POLICY), policy.DocumentedRuleDefault( name=POLICY_NAME % 'snapshots:create', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="""Create snapshots. This API is a proxy call to the Volume service. It is deprecated.""", operations=[ { 'method': 'POST', 'path': '/os-snapshots' }, ], scope_types=['project'], deprecated_rule=DEPRECATED_POLICY), policy.DocumentedRuleDefault( name=POLICY_NAME % 'snapshots:detail', check_str=base.PROJECT_READER_OR_ADMIN, description="""List snapshots details. This API is a proxy call to the Volume service. It is deprecated.""", operations=[ { 'method': 'GET', 'path': '/os-snapshots/detail' }, ], scope_types=['project'], deprecated_rule=DEPRECATED_POLICY), policy.DocumentedRuleDefault( name=POLICY_NAME % 'snapshots:show', check_str=base.PROJECT_READER_OR_ADMIN, description="""Show snapshot. This API is a proxy call to the Volume service. It is deprecated.""", operations=[ { 'method': 'GET', 'path': '/os-snapshots/{snapshot_id}' }, ], scope_types=['project'], deprecated_rule=DEPRECATED_POLICY), policy.DocumentedRuleDefault( name=POLICY_NAME % 'snapshots:delete', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="""Delete snapshot. This API is a proxy call to the Volume service. It is deprecated.""", operations=[ { 'method': 'DELETE', 'path': '/os-snapshots/{snapshot_id}' } ], scope_types=['project'], deprecated_rule=DEPRECATED_POLICY), ] def list_rules(): return volumes_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policies/volumes_attachments.py0000664000175000017500000000646500000000000022006 0ustar00zuulzuul00000000000000# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base POLICY_ROOT = 'os_compute_api:os-volumes-attachments:%s' volumes_attachments_policies = [ policy.DocumentedRuleDefault( name=POLICY_ROOT % 'index', check_str=base.PROJECT_READER_OR_ADMIN, description="List volume attachments for an instance", operations=[ {'method': 'GET', 'path': '/servers/{server_id}/os-volume_attachments' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'create', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Attach a volume to an instance", operations=[ { 'method': 'POST', 'path': '/servers/{server_id}/os-volume_attachments' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'show', check_str=base.PROJECT_READER_OR_ADMIN, description="Show details of a volume attachment", operations=[ { 'method': 'GET', 'path': '/servers/{server_id}/os-volume_attachments/{volume_id}' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'update', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="""Update a volume attachment. New 'update' policy about 'swap + update' request (which is possible only >2.85) only is checked. We expect to be always superset of this policy permission. """, operations=[ { 'method': 'PUT', 'path': '/servers/{server_id}/os-volume_attachments/{volume_id}' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'swap', check_str=base.SERVICE_ROLE, description="Update a volume attachment with a different volumeId", operations=[ { 'method': 'PUT', 'path': '/servers/{server_id}/os-volume_attachments/{volume_id}' } ], scope_types=['project']), policy.DocumentedRuleDefault( name=POLICY_ROOT % 'delete', check_str=base.PROJECT_MEMBER_OR_ADMIN, description="Detach a volume from an instance", operations=[ { 'method': 'DELETE', 'path': '/servers/{server_id}/os-volume_attachments/{volume_id}' } ], scope_types=['project']), ] def list_rules(): return volumes_attachments_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/policy.py0000664000175000017500000002360100000000000015400 0ustar00zuulzuul00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Policy Engine For Nova.""" import copy import re from oslo_config import cfg from oslo_log import log as logging from oslo_policy import policy from oslo_utils import excutils from nova import exception from nova import policies CONF = cfg.CONF LOG = logging.getLogger(__name__) _ENFORCER = None # This list is about the resources which support user based policy enforcement. # Avoid sending deprecation warning for those resources. USER_BASED_RESOURCES = ['os-keypairs'] # oslo_policy will read the policy configuration file again when the file # is changed in runtime so the old policy rules will be saved to # saved_file_rules and used to compare with new rules to determine the # rules whether were updated. saved_file_rules = [] KEY_EXPR = re.compile(r'%\((\w+)\)s') def reset(): global _ENFORCER if _ENFORCER: _ENFORCER.clear() _ENFORCER = None def init(policy_file=None, rules=None, default_rule=None, use_conf=True, suppress_deprecation_warnings=False): """Init an Enforcer class. :param policy_file: Custom policy file to use, if none is specified, `CONF.policy_file` will be used. :param rules: Default dictionary / Rules to use. It will be considered just in the first instantiation. :param default_rule: Default rule to use, CONF.default_rule will be used if none is specified. :param use_conf: Whether to load rules from config file. :param suppress_deprecation_warnings: Whether to suppress the deprecation warnings. """ global _ENFORCER global saved_file_rules if not _ENFORCER: _ENFORCER = policy.Enforcer( CONF, policy_file=policy_file, rules=rules, default_rule=default_rule, use_conf=use_conf) # NOTE(gmann): Explicitly disable the warnings for policies # changing their default check_str. During policy-defaults-refresh # work, all the policy defaults have been changed and warning for # each policy started filling the logs limit for various tool. # Once we move to new defaults only world then we can enable these # warning again. _ENFORCER.suppress_default_change_warnings = True if suppress_deprecation_warnings: _ENFORCER.suppress_deprecation_warnings = True register_rules(_ENFORCER) _ENFORCER.load_rules() # Only the rules which are loaded from file may be changed. current_file_rules = _ENFORCER.file_rules current_file_rules = _serialize_rules(current_file_rules) # Checks whether the rules are updated in the runtime if saved_file_rules != current_file_rules: _warning_for_deprecated_user_based_rules(current_file_rules) saved_file_rules = copy.deepcopy(current_file_rules) def _serialize_rules(rules): """Serialize all the Rule object as string which is used to compare the rules list. """ result = [(rule_name, str(rule)) for rule_name, rule in rules.items()] return sorted(result, key=lambda rule: rule[0]) def _warning_for_deprecated_user_based_rules(rules): """Warning user based policy enforcement used in the rule but the rule doesn't support it. """ for rule in rules: # We will skip the warning for the resources which support user based # policy enforcement. if [resource for resource in USER_BASED_RESOURCES if resource in rule[0]]: continue if 'user_id' in KEY_EXPR.findall(rule[1]): LOG.warning( "The user_id attribute isn't supported in the rule '%s'. " "All the user_id based policy enforcement will be removed in " "the future.", rule[0] ) def set_rules(rules, overwrite=True, use_conf=False): """Set rules based on the provided dict of rules. :param rules: New rules to use. It should be an instance of dict. :param overwrite: Whether to overwrite current rules or update them with the new rules. :param use_conf: Whether to reload rules from config file. """ init(use_conf=False) _ENFORCER.set_rules(rules, overwrite, use_conf) def authorize(context, action, target=None, do_raise=True, exc=None): """Verifies that the action is valid on the target in this context. :param context: nova context :param action: string representing the action to be checked this should be colon separated for clarity. i.e. ``compute:create_instance``, ``compute:attach_volume``, ``volume:attach_volume`` :param target: dictionary representing the object of the action for object creation this should be a dictionary representing the location of the object e.g. ``{'project_id': instance.project_id}`` If None, then this default target will be considered: {'project_id': self.project_id, 'user_id': self.user_id} :param do_raise: if True (the default), raises PolicyNotAuthorized; if False, returns False :param exc: Class of the exception to raise if the check fails. Any remaining arguments passed to :meth:`authorize` (both positional and keyword arguments) will be passed to the exception class. If not specified, :class:`PolicyNotAuthorized` will be used. :raises nova.exception.PolicyNotAuthorized: if verification fails and do_raise is True. Or if 'exc' is specified it will raise an exception of that type. :return: returns a non-False value (not necessarily "True") if authorized, and the exact value False if not authorized and do_raise is False. """ init() if not exc: exc = exception.PolicyNotAuthorized # Legacy fallback for empty target from context.can() # should be removed once we improve testing and scope checks if target is None: target = default_target(context) try: result = _ENFORCER.authorize(action, target, context, do_raise=do_raise, exc=exc, action=action) except policy.PolicyNotRegistered: with excutils.save_and_reraise_exception(): LOG.exception('Policy not registered') except policy.InvalidScope: LOG.debug('Policy check for %(action)s failed with scope check ' '%(credentials)s', {'action': action, 'credentials': context.to_policy_values()}) raise exc(action=action) except Exception: with excutils.save_and_reraise_exception(): LOG.debug('Policy check for %(action)s failed with credentials ' '%(credentials)s', {'action': action, 'credentials': context.to_policy_values()}) return result def default_target(context): return {'project_id': context.project_id, 'user_id': context.user_id} def check_is_admin(context): """Whether or not roles contains 'admin' role according to policy setting. """ init() # the target is user-self target = default_target(context) return _ENFORCER.authorize('context_is_admin', target, context) @policy.register('is_admin') class IsAdminCheck(policy.Check): """An explicit check for is_admin.""" def __init__(self, kind, match): """Initialize the check.""" self.expected = (match.lower() == 'true') super(IsAdminCheck, self).__init__(kind, str(self.expected)) def __call__(self, target, creds, enforcer): """Determine whether is_admin matches the requested value.""" return creds['is_admin'] == self.expected def get_rules(): if _ENFORCER: return _ENFORCER.rules def register_rules(enforcer): enforcer.register_defaults(policies.list_rules()) def get_enforcer(): # This method is used by oslopolicy CLI scripts in order to generate policy # files from overrides on disk and defaults in code. cfg.CONF([], project='nova') init() return _ENFORCER def verify_deprecated_policy(old_policy, new_policy, default_rule, context): """Check the rule of the deprecated policy action If the current rule of the deprecated policy action is set to a non-default value, then a warning message is logged stating that the new policy action should be used to dictate permissions as the old policy action is being deprecated. :param old_policy: policy action that is being deprecated :param new_policy: policy action that is replacing old_policy :param default_rule: the old_policy action default rule value :param context: the nova context """ if _ENFORCER: current_rule = str(_ENFORCER.rules[old_policy]) else: current_rule = None if current_rule != default_rule: LOG.warning("Start using the new action '%(new_policy)s'. " "The existing action '%(old_policy)s' is being deprecated " "and will be removed in future release.", {'new_policy': new_policy, 'old_policy': old_policy}) context.can(old_policy) return True else: return False ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.405609 nova-32.0.0/nova/privsep/0000775000175000017500000000000000000000000015215 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/privsep/__init__.py0000664000175000017500000000221400000000000017325 0ustar00zuulzuul00000000000000# Copyright 2016 Red Hat, Inc # Copyright 2017 Rackspace Australia # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Setup privsep decorator.""" from oslo_privsep import capabilities from oslo_privsep import priv_context sys_admin_pctxt = priv_context.PrivContext( 'nova', cfg_section='nova_sys_admin', pypath=__name__ + '.sys_admin_pctxt', capabilities=[capabilities.CAP_CHOWN, capabilities.CAP_DAC_OVERRIDE, capabilities.CAP_DAC_READ_SEARCH, capabilities.CAP_FOWNER, capabilities.CAP_NET_ADMIN, capabilities.CAP_SYS_ADMIN], ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/privsep/fs.py0000664000175000017500000002776700000000000016222 0ustar00zuulzuul00000000000000# Copyright 2016 Red Hat, Inc # Copyright 2017 Rackspace Australia # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Helpers for filesystem related routines. """ import hashlib from oslo_concurrency import processutils from oslo_log import log as logging import nova.privsep LOG = logging.getLogger(__name__) @nova.privsep.sys_admin_pctxt.entrypoint def mount(fstype, device, mountpoint, options): mount_cmd = ['mount'] if fstype: mount_cmd.extend(['-t', fstype]) if options is not None: mount_cmd.extend(options) mount_cmd.extend([device, mountpoint]) return processutils.execute(*mount_cmd) @nova.privsep.sys_admin_pctxt.entrypoint def umount(mountpoint): processutils.execute('umount', mountpoint, attempts=3, delay_on_retry=True) @nova.privsep.sys_admin_pctxt.entrypoint def lvcreate(size, lv, vg, preallocated=None): cmd = ['lvcreate'] if not preallocated: cmd.extend(['-L', '%db' % size]) else: cmd.extend(['-L', '%db' % preallocated, '--virtualsize', '%db' % size]) cmd.extend(['-n', lv, vg]) processutils.execute(*cmd, attempts=3) @nova.privsep.sys_admin_pctxt.entrypoint def vginfo(vg): # NOTE(gibi): We see intermittent faults querying volume groups failing # with error code -11, hence the retry. See bug 1931710 return processutils.execute( 'vgs', '--noheadings', '--nosuffix', '--separator', '|', '--units', 'b', '-o', 'vg_size,vg_free', vg, attempts=3, delay_on_retry=True, ) @nova.privsep.sys_admin_pctxt.entrypoint def lvlist(vg): return processutils.execute( 'lvs', '--noheadings', '-o', 'lv_name', vg, attempts=3, delay_on_retry=True) @nova.privsep.sys_admin_pctxt.entrypoint def lvinfo(path): return processutils.execute('lvs', '-o', 'vg_all,lv_all', '--separator', '|', path) @nova.privsep.sys_admin_pctxt.entrypoint def lvremove(path): processutils.execute('lvremove', '-f', path, attempts=3) @nova.privsep.sys_admin_pctxt.entrypoint def blockdev_size(path): return processutils.execute('blockdev', '--getsize64', path) @nova.privsep.sys_admin_pctxt.entrypoint def blockdev_flush(path): return processutils.execute('blockdev', '--flushbufs', path) @nova.privsep.sys_admin_pctxt.entrypoint def clear(path, volume_size, shred=False): cmd = ['shred'] if shred: cmd.extend(['-n3']) else: cmd.extend(['-n0', '-z']) cmd.extend(['-s%d' % volume_size, path]) processutils.execute(*cmd) @nova.privsep.sys_admin_pctxt.entrypoint def loopsetup(path): return processutils.execute('losetup', '--find', '--show', path) @nova.privsep.sys_admin_pctxt.entrypoint def loopremove(device): return processutils.execute('losetup', '--detach', device, attempts=3) @nova.privsep.sys_admin_pctxt.entrypoint def nbd_connect(device, image): return processutils.execute('qemu-nbd', '-c', device, image) @nova.privsep.sys_admin_pctxt.entrypoint def nbd_disconnect(device): return processutils.execute('qemu-nbd', '-d', device) @nova.privsep.sys_admin_pctxt.entrypoint def create_device_maps(device): return processutils.execute('kpartx', '-a', device) @nova.privsep.sys_admin_pctxt.entrypoint def remove_device_maps(device): return processutils.execute('kpartx', '-d', device) @nova.privsep.sys_admin_pctxt.entrypoint def e2fsck(image, flags='-fp'): unprivileged_e2fsck(image, flags=flags) # NOTE(mikal): this method is deliberately not wrapped in a privsep # entrypoint. This is not for unit testing, there are some callers who do # not require elevated permissions when calling this. def unprivileged_e2fsck(image, flags='-fp'): processutils.execute('e2fsck', flags, image, check_exit_code=[0, 1, 2]) @nova.privsep.sys_admin_pctxt.entrypoint def resize2fs(image, check_exit_code, size=None): unprivileged_resize2fs(image, check_exit_code=check_exit_code, size=size) # NOTE(mikal): this method is deliberately not wrapped in a privsep # entrypoint. This is not for unit testing, there are some callers who do # not require elevated permissions when calling this. def unprivileged_resize2fs(image, check_exit_code, size=None): if size: cmd = ['resize2fs', image, size] else: cmd = ['resize2fs', image] processutils.execute(*cmd, check_exit_code=check_exit_code) @nova.privsep.sys_admin_pctxt.entrypoint def create_partition_table(device, style, check_exit_code=True): processutils.execute('parted', '--script', device, 'mklabel', style, check_exit_code=check_exit_code) @nova.privsep.sys_admin_pctxt.entrypoint def create_partition(device, style, start, end, check_exit_code=True): processutils.execute('parted', '--script', device, '--', 'mkpart', style, start, end, check_exit_code=check_exit_code) @nova.privsep.sys_admin_pctxt.entrypoint def list_partitions(device): return unprivileged_list_partitions(device) # NOTE(mikal): this method is deliberately not wrapped in a privsep # entrypoint. This is not for unit testing, there are some callers who do # not require elevated permissions when calling this. def unprivileged_list_partitions(device): """Return partition information (num, size, type) for a device.""" out, _err = processutils.execute('parted', '--script', '--machine', device, 'unit s', 'print') lines = [line for line in out.split('\n') if line] partitions = [] LOG.debug('Partitions:') for line in lines[2:]: line = line.rstrip(';') num, start, end, size, fstype, name, flags = line.split(':') num = int(num) start = int(start.rstrip('s')) end = int(end.rstrip('s')) size = int(size.rstrip('s')) LOG.debug(' %(num)s: %(fstype)s %(size)d sectors', {'num': num, 'fstype': fstype, 'size': size}) partitions.append((num, start, size, fstype, name, flags)) return partitions @nova.privsep.sys_admin_pctxt.entrypoint def resize_partition(device, start, end, bootable): processutils.execute('parted', '--script', device, 'rm', '1') processutils.execute('parted', '--script', device, 'mkpart', 'primary', '%ds' % start, '%ds' % end) if bootable: processutils.execute('parted', '--script', device, 'set', '1', 'boot', 'on') @nova.privsep.sys_admin_pctxt.entrypoint def ext_journal_disable(device): processutils.execute('tune2fs', '-O ^has_journal', device) @nova.privsep.sys_admin_pctxt.entrypoint def ext_journal_enable(device): processutils.execute('tune2fs', '-j', device) # NOTE(mikal): nova allows deployers to configure the command line which is # used to create a filesystem of a given type. This is frankly a little bit # weird, but its also historical and probably should be in some sort of # museum. So, we do that thing here, but it requires a funny dance in order # to load that configuration at startup. # NOTE(mikal): I really feel like this whole thing should be deprecated, I # just don't think its a great idea to let people specify a command in a # configuration option to run as root. _MKFS_COMMAND = {} _DEFAULT_MKFS_COMMAND = None FS_FORMAT_EXT2 = "ext2" FS_FORMAT_EXT3 = "ext3" FS_FORMAT_EXT4 = "ext4" FS_FORMAT_XFS = "xfs" FS_FORMAT_NTFS = "ntfs" FS_FORMAT_VFAT = "vfat" SUPPORTED_FS_TO_EXTEND = ( FS_FORMAT_EXT2, FS_FORMAT_EXT3, FS_FORMAT_EXT4) _DEFAULT_FILE_SYSTEM = FS_FORMAT_VFAT _DEFAULT_FS_BY_OSTYPE = {'linux': FS_FORMAT_EXT4, 'windows': FS_FORMAT_NTFS} def load_mkfs_command(os_type, command): global _MKFS_COMMAND global _DEFAULT_MKFS_COMMAND _MKFS_COMMAND[os_type] = command if os_type == 'default': _DEFAULT_MKFS_COMMAND = command def get_fs_type_for_os_type(os_type): global _MKFS_COMMAND return os_type if _MKFS_COMMAND.get(os_type) else 'default' # NOTE(mikal): this method needs to be duplicated from utils because privsep # can't depend on code outside the privsep directory. def _get_hash_str(base_str): """Returns string that represents MD5 hash of base_str (in hex format). If base_str is a Unicode string, encode it to UTF-8. """ if isinstance(base_str, str): base_str = base_str.encode('utf-8') return hashlib.md5(base_str, usedforsecurity=False).hexdigest() def get_file_extension_for_os_type(os_type, default_ephemeral_format, specified_fs=None): global _MKFS_COMMAND global _DEFAULT_MKFS_COMMAND mkfs_command = _MKFS_COMMAND.get(os_type, _DEFAULT_MKFS_COMMAND) if mkfs_command: extension = mkfs_command else: if not specified_fs: specified_fs = default_ephemeral_format if not specified_fs: specified_fs = _DEFAULT_FS_BY_OSTYPE.get(os_type, _DEFAULT_FILE_SYSTEM) extension = specified_fs return _get_hash_str(extension)[:7] @nova.privsep.sys_admin_pctxt.entrypoint def mkfs(fs, path, label=None): unprivileged_mkfs(fs, path, label=None) # NOTE(mikal): this method is deliberately not wrapped in a privsep # entrypoint. This is not for unit testing, there are some callers who do # not require elevated permissions when calling this. def unprivileged_mkfs(fs, path, label=None): """Format a file or block device :param fs: Filesystem type (examples include 'swap', 'ext3', 'ext4' 'btrfs', etc.) :param path: Path to file or block device to format :param label: Volume label to use """ if fs == 'swap': args = ['mkswap'] else: args = ['mkfs', '-t', fs] # add -F to force no interactive execute on non-block device. if fs in ('ext3', 'ext4', 'ntfs'): args.extend(['-F']) if label: if fs in ('msdos', 'vfat'): label_opt = '-n' else: label_opt = '-L' args.extend([label_opt, label]) args.append(path) processutils.execute(*args) @nova.privsep.sys_admin_pctxt.entrypoint def _inner_configurable_mkfs(os_type, fs_label, target): mkfs_command = (_MKFS_COMMAND.get(os_type, _DEFAULT_MKFS_COMMAND) or '') % {'fs_label': fs_label, 'target': target} processutils.execute(*mkfs_command.split()) # NOTE(mikal): this method is deliberately not wrapped in a privsep entrypoint def configurable_mkfs(os_type, fs_label, target, run_as_root, default_ephemeral_format, specified_fs=None): # Format a file or block device using a user provided command for each # os type. If user has not provided any configuration, format type will # be used according to a default_ephemeral_format configuration or a # system default. global _MKFS_COMMAND global _DEFAULT_MKFS_COMMAND mkfs_command = (_MKFS_COMMAND.get(os_type, _DEFAULT_MKFS_COMMAND) or '') % {'fs_label': fs_label, 'target': target} if mkfs_command: if run_as_root: _inner_configurable_mkfs(os_type, fs_label, target) else: processutils.execute(*mkfs_command.split()) else: if not specified_fs: specified_fs = default_ephemeral_format if not specified_fs: specified_fs = _DEFAULT_FS_BY_OSTYPE.get(os_type, _DEFAULT_FILE_SYSTEM) if run_as_root: mkfs(specified_fs, target, fs_label) else: unprivileged_mkfs(specified_fs, target, fs_label) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/privsep/idmapshift.py0000664000175000017500000001102400000000000017715 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace, Andrew Melton # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ IDMapShift is a tool that properly sets the ownership of a filesystem for use with linux user namespaces. When using user namespaces with linux containers, the filesystem of the container must be owned by the targeted user and group ids being applied to that container. Otherwise, processes inside the container won't be able to access the filesystem. For example, when using the id map string '0:10000:2000', this means that user ids inside the container between 0 and 1999 will map to user ids on the host between 10000 and 11999. Root (0) becomes 10000, user 1 becomes 10001, user 50 becomes 10050 and user 1999 becomes 11999. This means that files that are owned by root need to actually be owned by user 10000, and files owned by 50 need to be owned by 10050, and so on. IDMapShift will take the uid and gid strings used for user namespaces and properly set up the filesystem for use by those users. Uids and gids outside of provided ranges will be mapped to nobody (max uid/gid) so that they are inaccessible inside the container. """ import os from oslo_log import log as logging import nova.privsep LOG = logging.getLogger(__name__) NOBODY_ID = 65534 def find_target_id(fsid, mappings, nobody, memo): if fsid not in memo: for start, target, count in mappings: if start <= fsid < start + count: memo[fsid] = (fsid - start) + target break else: memo[fsid] = nobody return memo[fsid] def print_chown(path, uid, gid, target_uid, target_gid): LOG.debug('%s %s:%s -> %s:%s', path, uid, gid, target_uid, target_gid) def shift_path(path, uid_mappings, gid_mappings, nobody, uid_memo, gid_memo): stat = os.lstat(path) uid = stat.st_uid gid = stat.st_gid target_uid = find_target_id(uid, uid_mappings, nobody, uid_memo) target_gid = find_target_id(gid, gid_mappings, nobody, gid_memo) print_chown(path, uid, gid, target_uid, target_gid) os.lchown(path, target_uid, target_gid) def shift_dir(fsdir, uid_mappings, gid_mappings, nobody): uid_memo = dict() gid_memo = dict() def shift_path_short(p): shift_path(p, uid_mappings, gid_mappings, nobody, uid_memo=uid_memo, gid_memo=gid_memo) shift_path_short(fsdir) for root, dirs, files in os.walk(fsdir): for d in dirs: path = os.path.join(root, d) shift_path_short(path) for f in files: path = os.path.join(root, f) shift_path_short(path) def confirm_path(path, uid_ranges, gid_ranges, nobody): stat = os.lstat(path) uid = stat.st_uid gid = stat.st_gid uid_in_range = True if uid == nobody else False gid_in_range = True if gid == nobody else False if not uid_in_range or not gid_in_range: for (start, end) in uid_ranges: if start <= uid <= end: uid_in_range = True break for (start, end) in gid_ranges: if start <= gid <= end: gid_in_range = True break return uid_in_range and gid_in_range def get_ranges(maps): return [(target, target + count - 1) for (start, target, count) in maps] def confirm_dir(fsdir, uid_mappings, gid_mappings, nobody): uid_ranges = get_ranges(uid_mappings) gid_ranges = get_ranges(gid_mappings) if not confirm_path(fsdir, uid_ranges, gid_ranges, nobody): return False for root, dirs, files in os.walk(fsdir): for d in dirs: path = os.path.join(root, d) if not confirm_path(path, uid_ranges, gid_ranges, nobody): return False for f in files: path = os.path.join(root, f) if not confirm_path(path, uid_ranges, gid_ranges, nobody): return False return True @nova.privsep.sys_admin_pctxt.entrypoint def shift(path, uid_map, gid_map): if confirm_dir(uid_map, gid_map, path, NOBODY_ID): return shift_dir(path, uid_map, gid_map, NOBODY_ID) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/privsep/libvirt.py0000664000175000017500000002026400000000000017246 0ustar00zuulzuul00000000000000# Copyright 2016 Red Hat, Inc # Copyright 2017 Rackspace Australia # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ libvirt specific routines. """ import binascii import os import stat from oslo_concurrency import processutils from oslo_log import log as logging from oslo_utils import units import nova.privsep LOG = logging.getLogger(__name__) @nova.privsep.sys_admin_pctxt.entrypoint def dmcrypt_create_volume(target, device, cipher, key_size, key): """Sets up a dmcrypt mapping :param target: device mapper logical device name :param device: underlying block device :param cipher: encryption cipher string digestible by cryptsetup :param key_size: encryption key size :param key: encoded encryption key bytestring """ cmd = ('cryptsetup', 'create', target, device, '--cipher=' + cipher, '--key-size=' + str(key_size), '--key-file=-') key = binascii.hexlify(key).decode('utf-8') processutils.execute(*cmd, process_input=key) @nova.privsep.sys_admin_pctxt.entrypoint def dmcrypt_delete_volume(target): """Deletes a dmcrypt mapping :param target: name of the mapped logical device """ processutils.execute('cryptsetup', 'remove', target) @nova.privsep.sys_admin_pctxt.entrypoint def ploop_init(size, disk_format, fs_type, disk_path): """Initialize ploop disk, make it readable for non-root user :param disk_format: data allocation format (raw or expanded) :param fs_type: filesystem (ext4, ext3, none) :param disk_path: ploop image file """ processutils.execute('ploop', 'init', '-s', size, '-f', disk_format, '-t', fs_type, disk_path, check_exit_code=True) # Add read access for all users, because "ploop init" creates # disk with rw rights only for root. OpenStack user should have access # to the disk to request info via "qemu-img info" # TODO(mikal): this is a faithful rendition of the pre-privsep code from # the libvirt driver, but it seems undesirable to me. It would be good to # create the loop file with the right owner or group such that we don't # need to have it world readable. I don't have access to a system to test # this on however. st = os.stat(disk_path) os.chmod(disk_path, st.st_mode | stat.S_IROTH) @nova.privsep.sys_admin_pctxt.entrypoint def ploop_resize(disk_path, size): """Resize ploop disk :param disk_path: ploop image file :param size: new size (in bytes) """ processutils.execute('prl_disk_tool', 'resize', '--size', '%dM' % (size // units.Mi), '--resize_partition', '--hdd', disk_path, check_exit_code=True) @nova.privsep.sys_admin_pctxt.entrypoint def ploop_restore_descriptor(image_dir, base_delta, fmt): """Restore ploop disk descriptor XML :param image_dir: path to where descriptor XML is created :param base_delta: ploop image file containing the data :param fmt: ploop data allocation format (raw or expanded) """ processutils.execute('ploop', 'restore-descriptor', '-f', fmt, image_dir, base_delta, check_exit_code=True) @nova.privsep.sys_admin_pctxt.entrypoint def plug_infiniband_vif(vnic_mac, device_id, fabric, net_model, pci_slot): processutils.execute('ebrctl', 'add-port', vnic_mac, device_id, fabric, net_model, pci_slot) @nova.privsep.sys_admin_pctxt.entrypoint def unplug_infiniband_vif(fabric, vnic_mac): processutils.execute('ebrctl', 'del-port', fabric, vnic_mac) @nova.privsep.sys_admin_pctxt.entrypoint def plug_midonet_vif(port_id, dev): processutils.execute('mm-ctl', '--bind-port', port_id, dev) @nova.privsep.sys_admin_pctxt.entrypoint def unplug_midonet_vif(port_id): processutils.execute('mm-ctl', '--unbind-port', port_id) @nova.privsep.sys_admin_pctxt.entrypoint def plug_plumgrid_vif(dev, iface_id, vif_address, net_id, tenant_id): processutils.execute('ifc_ctl', 'gateway', 'add_port', dev) processutils.execute('ifc_ctl', 'gateway', 'ifup', dev, 'access_vm', iface_id, vif_address, 'pgtag2=%s' % net_id, 'pgtag1=%s' % tenant_id) @nova.privsep.sys_admin_pctxt.entrypoint def unplug_plumgrid_vif(dev): processutils.execute('ifc_ctl', 'gateway', 'ifdown', dev) processutils.execute('ifc_ctl', 'gateway', 'del_port', dev) @nova.privsep.sys_admin_pctxt.entrypoint def readpty(path): # TODO(mikal): I'm not a huge fan that we don't enforce a valid pty path # here, but I haven't come up with a great way of doing that. # NOTE(mikal): I am deliberately not catching the ImportError # exception here... Some platforms (I'm looking at you Windows) # don't have a fcntl and we may as well let them know that # with an ImportError, not that they should be calling this at all. import fcntl try: with open(path, 'r') as f: current_flags = fcntl.fcntl(f.fileno(), fcntl.F_GETFL) fcntl.fcntl(f.fileno(), fcntl.F_SETFL, current_flags | os.O_NONBLOCK) return f.read() except Exception as exc: # NOTE(mikal): dear internet, I see you looking at me with your # judging eyes. There's a story behind why we do this. You see, the # previous implementation did this: # # out, err = utils.execute('dd', # 'if=%s' % pty, # 'iflag=nonblock', # run_as_root=True, # check_exit_code=False) # return out # # So, it never checked stderr or the return code of the process it # ran to read the pty. Doing something better than that has turned # out to be unexpectedly hard because there are a surprisingly large # variety of errors which appear to be thrown when doing this read. # # Therefore for now we log the errors, but keep on rolling. Volunteers # to help clean this up are welcome and will receive free beverages. LOG.info( 'Ignored error while reading from instance console pty: %s', exc ) return '' @nova.privsep.sys_admin_pctxt.entrypoint def systemd_run_qb_mount(qb_vol, mnt_base, cfg_file=None): """Mount QB volume in separate CGROUP""" # Note(kaisers): Details on why we run without --user at bug #1756823 sysdr_cmd = ['systemd-run', '--scope', 'mount.quobyte', '--disable-xattrs', qb_vol, mnt_base] if cfg_file: sysdr_cmd.extend(['-c', cfg_file]) return processutils.execute(*sysdr_cmd) # NOTE(kaisers): this method is deliberately not wrapped in a privsep entry. def unprivileged_qb_mount(qb_vol, mnt_base, cfg_file=None): """Mount QB volume""" mnt_cmd = ['mount.quobyte', '--disable-xattrs', qb_vol, mnt_base] if cfg_file: mnt_cmd.extend(['-c', cfg_file]) return processutils.execute(*mnt_cmd) @nova.privsep.sys_admin_pctxt.entrypoint def umount(mnt_base): """Unmount volume""" unprivileged_umount(mnt_base) # NOTE(kaisers): this method is deliberately not wrapped in a privsep entry. def unprivileged_umount(mnt_base): """Unmount volume""" umnt_cmd = ['umount', mnt_base] return processutils.execute(*umnt_cmd) @nova.privsep.sys_admin_pctxt.entrypoint def get_pmem_namespaces(): ndctl_cmd = ['ndctl', 'list', '-X'] nss_info = processutils.execute(*ndctl_cmd)[0] return nss_info @nova.privsep.sys_admin_pctxt.entrypoint def cleanup_vpmem(devpath): daxio_cmd = ['daxio', '-z', '-o', '%s' % devpath] processutils.execute(*daxio_cmd) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/privsep/linux_net.py0000664000175000017500000001075300000000000017602 0ustar00zuulzuul00000000000000# Copyright 2016 Red Hat, Inc # Copyright 2017 Rackspace Australia # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Linux network specific helpers. """ import os from oslo_concurrency import processutils from oslo_log import log as logging from oslo_utils import excutils import nova.privsep.linux_net LOG = logging.getLogger(__name__) def device_exists(device): """Check if ethernet device exists.""" return os.path.exists('/sys/class/net/%s' % device) def delete_net_dev(dev): """Delete a network device only if it exists.""" if device_exists(dev): try: delete_net_dev_escalated(dev) LOG.debug("Net device removed: '%s'", dev) except processutils.ProcessExecutionError: with excutils.save_and_reraise_exception(): LOG.error("Failed removing net device: '%s'", dev) @nova.privsep.sys_admin_pctxt.entrypoint def delete_net_dev_escalated(dev): processutils.execute('ip', 'link', 'delete', dev, check_exit_code=[0, 2, 254]) @nova.privsep.sys_admin_pctxt.entrypoint def set_device_mtu(dev, mtu): if mtu: processutils.execute('ip', 'link', 'set', dev, 'mtu', mtu, check_exit_code=[0, 2, 254]) @nova.privsep.sys_admin_pctxt.entrypoint def set_device_enabled(dev): _set_device_enabled_inner(dev) def _set_device_enabled_inner(dev): processutils.execute('ip', 'link', 'set', dev, 'up', check_exit_code=[0, 2, 254]) @nova.privsep.sys_admin_pctxt.entrypoint def set_device_trust(dev, vf_num, trusted): _set_device_trust_inner(dev, vf_num, trusted) def _set_device_trust_inner(dev, vf_num, trusted): processutils.execute('ip', 'link', 'set', dev, 'vf', vf_num, 'trust', bool(trusted) and 'on' or 'off', check_exit_code=[0, 2, 254]) @nova.privsep.sys_admin_pctxt.entrypoint def set_device_macaddr(dev, mac_addr, port_state=None): _set_device_macaddr_inner(dev, mac_addr, port_state=port_state) def _set_device_macaddr_inner(dev, mac_addr, port_state=None): if port_state: processutils.execute('ip', 'link', 'set', dev, 'address', mac_addr, port_state, check_exit_code=[0, 2, 254]) else: processutils.execute('ip', 'link', 'set', dev, 'address', mac_addr, check_exit_code=[0, 2, 254]) @nova.privsep.sys_admin_pctxt.entrypoint def set_device_macaddr_and_vlan(dev, vf_num, mac_addr, vlan): processutils.execute('ip', 'link', 'set', dev, 'vf', vf_num, 'mac', mac_addr, 'vlan', vlan, run_as_root=True, check_exit_code=[0, 2, 254]) @nova.privsep.sys_admin_pctxt.entrypoint def create_tap_dev(dev, mac_address=None, multiqueue=False): if not device_exists(dev): try: # First, try with 'ip' cmd = ('ip', 'tuntap', 'add', dev, 'mode', 'tap') if multiqueue: cmd = cmd + ('multi_queue', ) processutils.execute(*cmd, check_exit_code=[0, 2, 254]) except processutils.ProcessExecutionError: if multiqueue: LOG.warning( 'Failed to create a tap device with ip tuntap. ' 'tunctl does not support creation of multi-queue ' 'enabled devices, skipping fallback.') raise # Second option: tunctl processutils.execute('tunctl', '-b', '-t', dev) if mac_address: _set_device_macaddr_inner(dev, mac_address) _set_device_enabled_inner(dev) @nova.privsep.sys_admin_pctxt.entrypoint def add_vlan(bridge_interface, interface, vlan_num): processutils.execute('ip', 'link', 'add', 'link', bridge_interface, 'name', interface, 'type', 'vlan', 'id', vlan_num, check_exit_code=[0, 2, 254]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/privsep/path.py0000664000175000017500000000645200000000000016532 0ustar00zuulzuul00000000000000# Copyright 2016 Red Hat, Inc # Copyright 2017 Rackspace Australia # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Routines that bypass file-system checks.""" import errno import os import shutil from oslo_utils import fileutils from nova import exception import nova.privsep @nova.privsep.sys_admin_pctxt.entrypoint def writefile(path, mode, content): if not os.path.exists(os.path.dirname(path)): raise exception.FileNotFound(file_path=path) with open(path, mode) as f: f.write(content) @nova.privsep.sys_admin_pctxt.entrypoint def chown( path: str, uid: int = -1, gid: int = -1, recursive: bool = False, ) -> None: if not os.path.exists(path): raise exception.FileNotFound(file_path=path) if not recursive or os.path.isfile(path): return os.chown(path, uid, gid) for root, dirs, files in os.walk(path): os.chown(root, uid, gid) for item in dirs: os.chown(os.path.join(root, item), uid, gid) for item in files: os.chown(os.path.join(root, item), uid, gid) @nova.privsep.sys_admin_pctxt.entrypoint def makedirs(path): fileutils.ensure_tree(path) @nova.privsep.sys_admin_pctxt.entrypoint def chmod(path, mode): if not os.path.exists(path): raise exception.FileNotFound(file_path=path) os.chmod(path, mode) @nova.privsep.sys_admin_pctxt.entrypoint def move_tree(source_path: str, dest_path: str) -> None: shutil.move(source_path, dest_path) @nova.privsep.sys_admin_pctxt.entrypoint def utime(path): if not os.path.exists(path): raise exception.FileNotFound(file_path=path) # NOTE(mikal): the old version of this used execute(touch, ...), which # would apparently fail on shared storage when multiple instances were # being launched at the same time. If we see failures here, we might need # to wrap this in a try / except. os.utime(path, None) @nova.privsep.sys_admin_pctxt.entrypoint def rmdir(path): if not os.path.exists(path): raise exception.FileNotFound(file_path=path) os.rmdir(path) @nova.privsep.sys_admin_pctxt.entrypoint def last_bytes(path, num): """Return num bytes from the end of the file, and remaining byte count. :param path: The file to read :param num: The number of bytes to return :returns: (data, remaining) """ with open(path, 'rb') as f: try: f.seek(-num, os.SEEK_END) except IOError as e: # seek() fails with EINVAL when trying to go before the start of # the file. It means that num is larger than the file size, so # just go to the start. if e.errno == errno.EINVAL: f.seek(0, os.SEEK_SET) else: raise remaining = f.tell() return (f.read(), remaining) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/privsep/qemu.py0000664000175000017500000002603700000000000016546 0ustar00zuulzuul00000000000000# Copyright 2018 Michael Still and Aptira # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Helpers for qemu tasks. """ import contextlib import os import tempfile import typing as ty from oslo_concurrency import processutils from oslo_log import log as logging from oslo_utils import units from nova import exception from nova.i18n import _ import nova.privsep.utils LOG = logging.getLogger(__name__) QEMU_IMG_LIMITS = processutils.ProcessLimits( cpu_time=30, address_space=1 * units.Gi) class EncryptionOptions(ty.TypedDict): secret: str format: str @nova.privsep.sys_admin_pctxt.entrypoint def convert_image(source, dest, in_format, out_format, instances_path, compress, src_encryption=None, dest_encryption=None): unprivileged_convert_image(source, dest, in_format, out_format, instances_path, compress, src_encryption=src_encryption, dest_encryption=dest_encryption) # NOTE(mikal): this method is deliberately not wrapped in a privsep entrypoint def unprivileged_convert_image( source: str, dest: str, in_format: ty.Optional[str], out_format: str, instances_path: str, compress: bool, src_encryption: ty.Optional[EncryptionOptions] = None, dest_encryption: ty.Optional[EncryptionOptions] = None, ) -> None: """Disk image conversion with qemu-img :param source: Location of the disk image to convert :param dest: Desired location of the converted disk image :param in_format: Disk image format of the source image :param out_format: Desired disk image format of the converted disk image :param instances_path: Location where instances are stored on disk :param compress: Whether to compress the converted disk image :param src_encryption: (Optional) Dict detailing various encryption attributes for the source image such as the format and passphrase. :param dest_encryption: (Optional) Dict detailing various encryption attributes for the destination image such as the format and passphrase. The in_format and out_format represent disk image file formats in QEMU, which are: * qcow2, which can be encrypted or not encrypted depending on options * raw, which is unencrypted * luks, which is encrypted raw See https://www.qemu.org/docs/master/system/qemu-block-drivers.html """ # NOTE(mdbooth, kchamart): `qemu-img convert` defaults to # 'cache=writeback' for the source image, and 'cache=unsafe' for the # target, which means that data is not synced to disk at completion. # We explicitly use 'cache=none' here, for the target image, to (1) # ensure that we don't interfere with other applications using the # host's I/O cache, and (2) ensure that the data is on persistent # storage when the command exits. Without (2), a host crash may # leave a corrupt image in the image cache, which Nova cannot # recover automatically. # NOTE(zigo, kchamart): We cannot use `qemu-img convert -t none` if # the 'instance_dir' is mounted on a filesystem that doesn't support # O_DIRECT, which is the case, for example, with 'tmpfs'. This # simply crashes `openstack server create` in environments like live # distributions. In such cases, the best choice is 'writeback', # which (a) makes the conversion multiple times faster; and (b) is # as safe as it can be, because at the end of the conversion it, # just like 'writethrough', calls fsync(2)|fdatasync(2), which # ensures to safely write the data to the physical disk. # NOTE(mikal): there is an assumption here that the source and destination # are in the instances_path. Is that worth enforcing? if nova.privsep.utils.supports_direct_io(instances_path): cache_mode = 'none' else: cache_mode = 'writeback' cmd = ['qemu-img', 'convert', '-t', cache_mode, '-O', out_format] # qemu-img: --image-opts and --format are mutually exclusive # If the source is encrypted, we will need to pass encryption related # options using --image-opts. driver_str = '' if in_format is not None: if not src_encryption: cmd += ['-f', in_format] else: driver_str = f'driver={in_format},' if compress: cmd += ['-c'] src_secret_file = None dest_secret_file = None encryption_opts: ty.List[str] = [] with contextlib.ExitStack() as stack: if src_encryption: src_secret_file = stack.enter_context( tempfile.NamedTemporaryFile(mode='tr+', encoding='utf-8')) # Write out the passphrase secret to a temp file src_secret_file.write(src_encryption['secret']) # Ensure the secret is written to disk, we can't .close() here as # that removes the file when using NamedTemporaryFile src_secret_file.flush() # When --image-opts is used, the source filename must be passed as # part of the option string instead of as a positional arg. # # The basic options include the secret and encryption format # Option names depend on the QEMU disk image file format: # https://www.qemu.org/docs/master/system/qemu-block-drivers.html#disk-image-file-formats # noqa # For 'luks' it is 'key-secret' and format is implied # For 'qcow2' it is 'encrypt.key-secret' and 'encrypt.format' prefix = 'encrypt.' if in_format == 'qcow2' else '' encryption_opts = [ '--object', f"secret,id=sec0,file={src_secret_file.name}", '--image-opts', f"{driver_str}file.driver=file,file.filename={source}," f"{prefix}key-secret=sec0", ] if dest_encryption: dest_secret_file = stack.enter_context( tempfile.NamedTemporaryFile(mode='tr+', encoding='utf-8')) # Write out the passphrase secret to a temp file dest_secret_file.write(dest_encryption['secret']) # Ensure the secret is written to disk, we can't .close() # here as that removes the file when using # NamedTemporaryFile dest_secret_file.flush() prefix = 'encrypt.' if out_format == 'qcow2' else '' encryption_opts += [ '--object', f"secret,id=sec1,file={dest_secret_file.name}", '-o', f'{prefix}key-secret=sec1', ] if prefix: # The encryption format is only relevant for the 'qcow2' disk # format. Otherwise, the disk format is 'luks' and the # encryption format is implied and not accepted as an option in # that case. encryption_opts += [ '-o', f"{prefix}format={dest_encryption['format']}" ] # Supported luks options: # cipher-alg= - Name of cipher algorithm and # key length # cipher-mode= - Name of encryption cipher mode # hash-alg= - Name of hash algorithm to use # for PBKDF # iter-time= - Time to spend in PBKDF in # milliseconds # ivgen-alg= - Name of IV generator algorithm # ivgen-hash-alg= - Name of IV generator hash # algorithm # # NOTE(melwitt): Sensible defaults (that match the qemu # defaults) are hardcoded at this time for simplicity and # consistency when instances are migrated. Configuration of # luks options could be added in a future release. encryption_options = { 'cipher-alg': 'aes-256', 'cipher-mode': 'xts', 'hash-alg': 'sha256', 'iter-time': 2000, 'ivgen-alg': 'plain64', 'ivgen-hash-alg': 'sha256', } for option, value in encryption_options.items(): encryption_opts += [ '-o', f'{prefix}{option}={value}', ] if src_encryption or dest_encryption: cmd += encryption_opts # If the source is not encrypted, it's passed as a positional argument. if not src_encryption: cmd += [source] processutils.execute(*cmd + [dest]) @nova.privsep.sys_admin_pctxt.entrypoint def privileged_qemu_img_info(path, format=None): """Return an object containing the parsed output from qemu-img info This is a privileged call to qemu-img info using the sys_admin_pctxt entrypoint allowing host block devices etc to be accessed. """ return unprivileged_qemu_img_info(path, format=format) def unprivileged_qemu_img_info(path, format=None): """Return an object containing the parsed output from qemu-img info.""" try: # The following check is about ploop images that reside within # directories and always have DiskDescriptor.xml file beside them if (os.path.isdir(path) and os.path.exists(os.path.join(path, "DiskDescriptor.xml"))): path = os.path.join(path, "root.hds") cmd = [ 'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path, '--force-share', '--output=json', ] if format is not None: cmd = cmd + ['-f', format] out, err = processutils.execute(*cmd, prlimit=QEMU_IMG_LIMITS) except processutils.ProcessExecutionError as exp: if exp.exit_code == -9: # this means we hit prlimits, make the exception more specific msg = (_("qemu-img aborted by prlimits when inspecting " "%(path)s : %(exp)s") % {'path': path, 'exp': exp}) elif exp.exit_code == 1 and 'No such file or directory' in exp.stderr: # The os.path.exists check above can race so this is a simple # best effort at catching that type of failure and raising a more # specific error. raise exception.DiskNotFound(location=path) else: msg = (_("qemu-img failed to execute on %(path)s : %(exp)s") % {'path': path, 'exp': exp}) raise exception.InvalidDiskInfo(reason=msg) if not out: msg = (_("Failed to run qemu-img info on %(path)s : %(error)s") % {'path': path, 'error': err}) raise exception.InvalidDiskInfo(reason=msg) return out ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/privsep/utils.py0000664000175000017500000000621700000000000016735 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara # Copyright 2018 Michael Still and Aptira # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This module is utility methods that privsep depends on. Privsep isn't allowed # to depend on anything outside the privsep directory, so these need to be # here. That said, other parts of nova can call into these utilities if # needed. import errno import mmap import os import random import sys from oslo_log import log as logging from oslo_utils import excutils # NOTE(mriedem): Avoid importing nova.utils since that can cause a circular # import with the privsep code. In fact, avoid importing anything outside # of nova/privsep/ if possible. LOG = logging.getLogger(__name__) def generate_random_string(): return str(random.randint(0, sys.maxsize)) def supports_direct_io(dirpath): if not hasattr(os, 'O_DIRECT'): LOG.debug("This python runtime does not support direct I/O") return False # Use a random filename to avoid issues with $dirpath being on shared # storage. file_name = "%s.%s" % (".directio.test", generate_random_string()) testfile = os.path.join(dirpath, file_name) hasDirectIO = True fd = None try: fd = os.open(testfile, os.O_CREAT | os.O_WRONLY | os.O_DIRECT) # Check is the write allowed with 4096 byte alignment align_size = 4096 m = mmap.mmap(-1, align_size) m.write(b"x" * align_size) os.write(fd, m) LOG.debug("Path '%(path)s' supports direct I/O", {'path': dirpath}) except OSError as e: if e.errno in (errno.EINVAL, errno.ENOENT): LOG.debug("Path '%(path)s' does not support direct I/O: " "'%(ex)s'", {'path': dirpath, 'ex': e}) hasDirectIO = False else: with excutils.save_and_reraise_exception(): LOG.error("Error on '%(path)s' while checking " "direct I/O: '%(ex)s'", {'path': dirpath, 'ex': e}) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error("Error on '%(path)s' while checking direct I/O: " "'%(ex)s'", {'path': dirpath, 'ex': e}) finally: # ensure unlink(filepath) will actually remove the file by deleting # the remaining link to it in close(fd) if fd is not None: os.close(fd) try: os.unlink(testfile) except Exception: pass return hasDirectIO ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/profiler.py0000664000175000017500000000440400000000000015723 0ustar00zuulzuul00000000000000# Copyright 2016 IBM Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import importutils import webob.dec import nova.conf profiler = importutils.try_import('osprofiler.profiler') profiler_web = importutils.try_import('osprofiler.web') CONF = nova.conf.CONF class WsgiMiddleware(object): def __init__(self, application, **kwargs): self.application = application @classmethod def factory(cls, global_conf, **local_conf): if profiler_web: return profiler_web.WsgiMiddleware.factory(global_conf, **local_conf) def filter_(app): return cls(app, **local_conf) return filter_ @webob.dec.wsgify def __call__(self, request): return request.get_response(self.application) def get_traced_meta(): if profiler and 'profiler' in CONF and CONF.profiler.enabled: return profiler.TracedMeta else: # NOTE(rpodolyaka): if we do not return a child of type, then Python # fails to build a correct MRO when osprofiler is not installed class NoopMeta(type): pass return NoopMeta def trace_cls(name, **kwargs): """Wrap the OSProfiler trace_cls decorator so that it will not try to patch the class unless OSProfiler is present and enabled in the config :param name: The name of action. E.g. wsgi, rpc, db, etc.. :param kwargs: Any other keyword args used by profiler.trace_cls """ def decorator(cls): if profiler and 'profiler' in CONF and CONF.profiler.enabled: trace_decorator = profiler.trace_cls(name, kwargs) return trace_decorator(cls) return cls return decorator ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/quota.py0000664000175000017500000020377200000000000015243 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Quotas for resources per project.""" import copy from oslo_log import log as logging from oslo_utils import importutils from sqlalchemy import sql import nova.conf from nova import context as nova_context from nova.db.api import api as api_db_api from nova.db.api import models as api_models from nova.db.main import api as main_db_api from nova import exception from nova.limit import local as local_limit from nova.limit import placement as placement_limit from nova import objects from nova.scheduler.client import report LOG = logging.getLogger(__name__) CONF = nova.conf.CONF # Lazy-loaded on first access. # Avoid constructing the KSA adapter and provider tree on every access. PLACEMENT_CLIENT = None # If user_id and queued_for_delete are populated for a project, cache the # result to avoid doing unnecessary EXISTS database queries. UID_QFD_POPULATED_CACHE_BY_PROJECT = set() # For the server group members check, we do not scope to a project, so if all # user_id and queued_for_delete are populated for all projects, cache the # result to avoid doing unnecessary EXISTS database queries. UID_QFD_POPULATED_CACHE_ALL = False class DbQuotaDriver(object): """Driver to perform necessary checks to enforce quotas and obtain quota information. The default driver utilizes the local database. """ UNLIMITED_VALUE = -1 def get_reserved(self): # Since we stopped reserving the DB, we just return 0 return 0 def get_defaults(self, context, resources): """Given a list of resources, retrieve the default quotas. Use the class quotas named `_DEFAULT_QUOTA_NAME` as default quotas, if it exists. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. """ quotas = {} default_quotas = objects.Quotas.get_default_class(context) for resource in resources.values(): # resource.default returns the config options. So if there's not # an entry for the resource in the default class, it uses the # config option. quotas[resource.name] = default_quotas.get(resource.name, resource.default) return quotas def get_class_quotas(self, context, resources, quota_class): """Given a list of resources, retrieve the quotas for the given quota class. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param quota_class: The name of the quota class to return quotas for. """ quotas = {} class_quotas = objects.Quotas.get_all_class_by_name(context, quota_class) for resource in resources.values(): quotas[resource.name] = class_quotas.get(resource.name, resource.default) return quotas def _process_quotas(self, context, resources, project_id, quotas, quota_class=None, usages=None, remains=False): modified_quotas = {} # Get the quotas for the appropriate class. If the project ID # matches the one in the context, we use the quota_class from # the context, otherwise, we use the provided quota_class (if # any) if project_id == context.project_id: quota_class = context.quota_class if quota_class: class_quotas = objects.Quotas.get_all_class_by_name(context, quota_class) else: class_quotas = {} default_quotas = self.get_defaults(context, resources) for resource in resources.values(): limit = quotas.get(resource.name, class_quotas.get( resource.name, default_quotas[resource.name])) modified_quotas[resource.name] = dict(limit=limit) # Include usages if desired. This is optional because one # internal consumer of this interface wants to access the # usages directly from inside a transaction. if usages: usage = usages.get(resource.name, {}) modified_quotas[resource.name].update( in_use=usage.get('in_use', 0), ) # Initialize remains quotas with the default limits. if remains: modified_quotas[resource.name].update(remains=limit) if remains: # Get all user quotas for a project and subtract their limits # from the class limits to get the remains. For example, if the # class/default is 20 and there are two users each with quota of 5, # then there is quota of 10 left to give out. all_quotas = objects.Quotas.get_all(context, project_id) for quota in all_quotas: if quota.resource in modified_quotas: modified_quotas[quota.resource]['remains'] -= \ quota.hard_limit return modified_quotas def _get_usages(self, context, resources, project_id, user_id=None): """Get usages of specified resources. This function is called to get resource usages for validating quota limit creates or updates in the os-quota-sets API and for displaying resource usages in the os-used-limits API. This function is not used for checking resource usage against quota limits. :param context: The request context for access checks :param resources: The dict of Resources for which to get usages :param project_id: The project_id for scoping the usage count :param user_id: Optional user_id for scoping the usage count :returns: A dict containing resources and their usage information, for example: {'project_id': 'project-uuid', 'user_id': 'user-uuid', 'instances': {'in_use': 5}} """ usages = {} for resource in resources.values(): # NOTE(melwitt): We should skip resources that are not countable, # such as AbsoluteResources. if not isinstance(resource, CountableResource): continue if resource.name in usages: # This is needed because for any of the resources: # ('instances', 'cores', 'ram'), they are counted at the same # time for efficiency (query the instances table once instead # of multiple times). So, a count of any one of them contains # counts for the others and we can avoid re-counting things. continue if resource.name in ('key_pairs', 'server_group_members'): # These per user resources are special cases whose usages # are not considered when validating limit create/update or # displaying used limits. They are always zero. usages[resource.name] = {'in_use': 0} else: if ( resource.name in main_db_api.quota_get_per_project_resources() ): count = resource.count_as_dict(context, project_id) key = 'project' else: # NOTE(melwitt): This assumes a specific signature for # count_as_dict(). Usages used to be records in the # database but now we are counting resources. The # count_as_dict() function signature needs to match this # call, else it should get a conditional in this function. count = resource.count_as_dict(context, project_id, user_id=user_id) key = 'user' if user_id else 'project' # Example count_as_dict() return value: # {'project': {'instances': 5}, # 'user': {'instances': 2}} counted_resources = count[key].keys() for res in counted_resources: count_value = count[key][res] usages[res] = {'in_use': count_value} return usages def get_user_quotas(self, context, resources, project_id, user_id, quota_class=None, usages=True, project_quotas=None, user_quotas=None): """Given a list of resources, retrieve the quotas for the given user and project. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param project_id: The ID of the project to return quotas for. :param user_id: The ID of the user to return quotas for. :param quota_class: If project_id != context.project_id, the quota class cannot be determined. This parameter allows it to be specified. It will be ignored if project_id == context.project_id. :param usages: If True, the current counts will also be returned. :param project_quotas: Quotas dictionary for the specified project. :param user_quotas: Quotas dictionary for the specified project and user. """ if user_quotas: user_quotas = user_quotas.copy() else: user_quotas = objects.Quotas.get_all_by_project_and_user( context, project_id, user_id) # Use the project quota for default user quota. proj_quotas = project_quotas or objects.Quotas.get_all_by_project( context, project_id) for key, value in proj_quotas.items(): if key not in user_quotas.keys(): user_quotas[key] = value user_usages = {} if usages: user_usages = self._get_usages(context, resources, project_id, user_id=user_id) return self._process_quotas(context, resources, project_id, user_quotas, quota_class, usages=user_usages) def get_project_quotas(self, context, resources, project_id, quota_class=None, usages=True, remains=False, project_quotas=None): """Given a list of resources, retrieve the quotas for the given project. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param project_id: The ID of the project to return quotas for. :param quota_class: If project_id != context.project_id, the quota class cannot be determined. This parameter allows it to be specified. It will be ignored if project_id == context.project_id. :param usages: If True, the current counts will also be returned. :param remains: If True, the current remains of the project will will be returned. :param project_quotas: Quotas dictionary for the specified project. """ project_quotas = project_quotas or objects.Quotas.get_all_by_project( context, project_id) project_usages = {} if usages: project_usages = self._get_usages(context, resources, project_id) return self._process_quotas(context, resources, project_id, project_quotas, quota_class, usages=project_usages, remains=remains) def _is_unlimited_value(self, v): """A helper method to check for unlimited value. """ return v <= self.UNLIMITED_VALUE def _sum_quota_values(self, v1, v2): """A helper method that handles unlimited values when performing sum operation. """ if self._is_unlimited_value(v1) or self._is_unlimited_value(v2): return self.UNLIMITED_VALUE return v1 + v2 def _sub_quota_values(self, v1, v2): """A helper method that handles unlimited values when performing subtraction operation. """ if self._is_unlimited_value(v1) or self._is_unlimited_value(v2): return self.UNLIMITED_VALUE return v1 - v2 def get_settable_quotas(self, context, resources, project_id, user_id=None): """Given a list of resources, retrieve the range of settable quotas for the given user or project. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param project_id: The ID of the project to return quotas for. :param user_id: The ID of the user to return quotas for. """ settable_quotas = {} db_proj_quotas = objects.Quotas.get_all_by_project(context, project_id) project_quotas = self.get_project_quotas(context, resources, project_id, remains=True, project_quotas=db_proj_quotas) if user_id: setted_quotas = objects.Quotas.get_all_by_project_and_user( context, project_id, user_id) user_quotas = self.get_user_quotas(context, resources, project_id, user_id, project_quotas=db_proj_quotas, user_quotas=setted_quotas) for key, value in user_quotas.items(): # Maximum is the remaining quota for a project (class/default # minus the sum of all user quotas in the project), plus the # given user's quota. So if the class/default is 20 and there # are two users each with quota of 5, then there is quota of # 10 remaining. The given user currently has quota of 5, so # the maximum you could update their quota to would be 15. # Class/default 20 - currently used in project 10 + current # user 5 = 15. maximum = \ self._sum_quota_values(project_quotas[key]['remains'], setted_quotas.get(key, 0)) # This function is called for the quota_sets api and the # corresponding nova-manage command. The idea is when someone # attempts to update a quota, the value chosen must be at least # as much as the current usage and less than or equal to the # project limit less the sum of existing per user limits. minimum = value['in_use'] settable_quotas[key] = {'minimum': minimum, 'maximum': maximum} else: for key, value in project_quotas.items(): minimum = \ max(int(self._sub_quota_values(value['limit'], value['remains'])), int(value['in_use'])) settable_quotas[key] = {'minimum': minimum, 'maximum': -1} return settable_quotas def _get_quotas(self, context, resources, keys, project_id=None, user_id=None, project_quotas=None): """A helper method which retrieves the quotas for the specific resources identified by keys, and which apply to the current context. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param keys: A list of the desired quotas to retrieve. :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. :param user_id: Specify the user_id if current context is admin and admin wants to impact on common user. :param project_quotas: Quotas dictionary for the specified project. """ # Filter resources desired = set(keys) sub_resources = {k: v for k, v in resources.items() if k in desired} # Make sure we accounted for all of them... if len(keys) != len(sub_resources): unknown = desired - set(sub_resources.keys()) raise exception.QuotaResourceUnknown(unknown=sorted(unknown)) if user_id: LOG.debug('Getting quotas for user %(user_id)s and project ' '%(project_id)s. Resources: %(keys)s', {'user_id': user_id, 'project_id': project_id, 'keys': keys}) # Grab and return the quotas (without usages) quotas = self.get_user_quotas(context, sub_resources, project_id, user_id, context.quota_class, usages=False, project_quotas=project_quotas) else: LOG.debug('Getting quotas for project %(project_id)s. Resources: ' '%(keys)s', {'project_id': project_id, 'keys': keys}) # Grab and return the quotas (without usages) quotas = self.get_project_quotas(context, sub_resources, project_id, context.quota_class, usages=False, project_quotas=project_quotas) return {k: v['limit'] for k, v in quotas.items()} def limit_check(self, context, resources, values, project_id=None, user_id=None): """Check simple quota limits. For limits--those quotas for which there is no usage synchronization function--this method checks that a set of proposed values are permitted by the limit restriction. This method will raise a QuotaResourceUnknown exception if a given resource is unknown or if it is not a simple limit resource. If any of the proposed values is over the defined quota, an OverQuota exception will be raised with the sorted list of the resources which are too high. Otherwise, the method returns nothing. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param values: A dictionary of the values to check against the quota. :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. :param user_id: Specify the user_id if current context is admin and admin wants to impact on common user. """ _valid_method_call_check_resources(values, 'check', resources) # Ensure no value is less than zero unders = [key for key, val in values.items() if val < 0] if unders: raise exception.InvalidQuotaValue(unders=sorted(unders)) # If project_id is None, then we use the project_id in context if project_id is None: project_id = context.project_id # If user id is None, then we use the user_id in context if user_id is None: user_id = context.user_id # Get the applicable quotas project_quotas = objects.Quotas.get_all_by_project(context, project_id) quotas = self._get_quotas(context, resources, values.keys(), project_id=project_id, project_quotas=project_quotas) user_quotas = self._get_quotas(context, resources, values.keys(), project_id=project_id, user_id=user_id, project_quotas=project_quotas) # Check the quotas and construct a list of the resources that # would be put over limit by the desired values overs = [key for key, val in values.items() if quotas[key] >= 0 and quotas[key] < val or (user_quotas[key] >= 0 and user_quotas[key] < val)] if overs: headroom = {} for key in overs: headroom[key] = min( val for val in (quotas.get(key), project_quotas.get(key)) if val is not None ) raise exception.OverQuota(overs=sorted(overs), quotas=quotas, usages={}, headroom=headroom) def limit_check_project_and_user(self, context, resources, project_values=None, user_values=None, project_id=None, user_id=None): """Check values (usage + desired delta) against quota limits. For limits--this method checks that a set of proposed values are permitted by the limit restriction. This method will raise a QuotaResourceUnknown exception if a given resource is unknown or if it is not a simple limit resource. If any of the proposed values is over the defined quota, an OverQuota exception will be raised with the sorted list of the resources which are too high. Otherwise, the method returns nothing. :param context: The request context, for access checks :param resources: A dictionary of the registered resources :param project_values: Optional dict containing the resource values to check against project quota, e.g. {'instances': 1, 'cores': 2, 'memory_mb': 512} :param user_values: Optional dict containing the resource values to check against user quota, e.g. {'instances': 1, 'cores': 2, 'memory_mb': 512} :param project_id: Optional project_id for scoping the limit check to a different project than in the context :param user_id: Optional user_id for scoping the limit check to a different user than in the context """ if project_values is None: project_values = {} if user_values is None: user_values = {} _valid_method_call_check_resources(project_values, 'check', resources) _valid_method_call_check_resources(user_values, 'check', resources) if not any([project_values, user_values]): raise exception.Invalid( 'Must specify at least one of project_values or user_values ' 'for the limit check.') # Ensure no value is less than zero for vals in (project_values, user_values): unders = [key for key, val in vals.items() if val < 0] if unders: raise exception.InvalidQuotaValue(unders=sorted(unders)) # Get a set of all keys for calling _get_quotas() so we get all of the # resource limits we need. all_keys = set(project_values).union(user_values) # Keys that are in both project_values and user_values need to be # checked against project quota and user quota, respectively. # Keys that are not in both only need to be checked against project # quota or user quota, if it is defined. Separate the keys that don't # need to be checked against both quotas, merge them into one dict, # and remove them from project_values and user_values. keys_to_merge = set(project_values).symmetric_difference(user_values) merged_values = {} for key in keys_to_merge: # The key will be either in project_values or user_values based on # the earlier symmetric_difference. Default to 0 in case the found # value is 0 and won't take precedence over a None default. merged_values[key] = (project_values.get(key, 0) or user_values.get(key, 0)) project_values.pop(key, None) user_values.pop(key, None) # If project_id is None, then we use the project_id in context if project_id is None: project_id = context.project_id # If user id is None, then we use the user_id in context if user_id is None: user_id = context.user_id # Get the applicable quotas. They will be merged together (taking the # min limit) if project_values and user_values were not specified # together. # per project quota limits (quotas that have no concept of # user-scoping: ) project_quotas = objects.Quotas.get_all_by_project(context, project_id) # per user quotas, project quota limits (for quotas that have # user-scoping, limits for the project) quotas = self._get_quotas(context, resources, all_keys, project_id=project_id, project_quotas=project_quotas) # per user quotas, user quota limits (for quotas that have # user-scoping, the limits for the user) user_quotas = self._get_quotas(context, resources, all_keys, project_id=project_id, user_id=user_id, project_quotas=project_quotas) if merged_values: # This is for resources that are not counted across a project and # must pass both the quota for the project and the quota for the # user. # Combine per user project quotas and user_quotas for use in the # checks, taking the minimum limit between the two. merged_quotas = copy.deepcopy(quotas) for k, v in user_quotas.items(): if k in merged_quotas: merged_quotas[k] = min(merged_quotas[k], v) else: merged_quotas[k] = v # Check the quotas and construct a list of the resources that # would be put over limit by the desired values overs = [key for key, val in merged_values.items() if merged_quotas[key] >= 0 and merged_quotas[key] < val] if overs: headroom = {} for key in overs: headroom[key] = merged_quotas[key] raise exception.OverQuota(overs=sorted(overs), quotas=merged_quotas, usages={}, headroom=headroom) # This is for resources that are counted across a project and # across a user (instances, cores, ram, server_groups). The # project_values must pass the quota for the project and the # user_values must pass the quota for the user. over_user_quota = False overs = [] for key in user_values.keys(): # project_values and user_values should contain the same keys or # be empty after the keys in the symmetric_difference were removed # from both dicts. if quotas[key] >= 0 and quotas[key] < project_values[key]: overs.append(key) elif (user_quotas[key] >= 0 and user_quotas[key] < user_values[key]): overs.append(key) over_user_quota = True if overs: quotas_exceeded = user_quotas if over_user_quota else quotas headroom = {} for key in overs: headroom[key] = quotas_exceeded[key] raise exception.OverQuota(overs=sorted(overs), quotas=quotas_exceeded, usages={}, headroom=headroom) class NoopQuotaDriver(object): """Driver that turns quotas calls into no-ops and pretends that quotas for all resources are unlimited. This can be used if you do not wish to have any quota checking. """ def get_reserved(self): # Noop has always returned -1 for reserved return -1 def get_defaults(self, context, resources): """Given a list of resources, retrieve the default quotas. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. """ quotas = {} for resource in resources.values(): quotas[resource.name] = -1 return quotas def get_class_quotas(self, context, resources, quota_class): """Given a list of resources, retrieve the quotas for the given quota class. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param quota_class: The name of the quota class to return quotas for. """ quotas = {} for resource in resources.values(): quotas[resource.name] = -1 return quotas def _get_noop_quotas(self, resources, usages=None, remains=False): quotas = {} for resource in resources.values(): quotas[resource.name] = {} quotas[resource.name]['limit'] = -1 if usages: quotas[resource.name]['in_use'] = -1 if remains: quotas[resource.name]['remains'] = -1 return quotas def get_user_quotas(self, context, resources, project_id, user_id, quota_class=None, usages=True): """Given a list of resources, retrieve the quotas for the given user and project. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param project_id: The ID of the project to return quotas for. :param user_id: The ID of the user to return quotas for. :param quota_class: If project_id != context.project_id, the quota class cannot be determined. This parameter allows it to be specified. It will be ignored if project_id == context.project_id. :param usages: If True, the current counts will also be returned. """ return self._get_noop_quotas(resources, usages=usages) def get_project_quotas(self, context, resources, project_id, quota_class=None, usages=True, remains=False): """Given a list of resources, retrieve the quotas for the given project. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param project_id: The ID of the project to return quotas for. :param quota_class: If project_id != context.project_id, the quota class cannot be determined. This parameter allows it to be specified. It will be ignored if project_id == context.project_id. :param usages: If True, the current counts will also be returned. :param remains: If True, the current remains of the project will will be returned. """ return self._get_noop_quotas(resources, usages=usages, remains=remains) def get_settable_quotas(self, context, resources, project_id, user_id=None): """Given a list of resources, retrieve the range of settable quotas for the given user or project. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param project_id: The ID of the project to return quotas for. :param user_id: The ID of the user to return quotas for. """ quotas = {} for resource in resources.values(): quotas[resource.name] = {'minimum': 0, 'maximum': -1} return quotas def limit_check(self, context, resources, values, project_id=None, user_id=None): """Check simple quota limits. For limits--those quotas for which there is no usage synchronization function--this method checks that a set of proposed values are permitted by the limit restriction. This method will raise a QuotaResourceUnknown exception if a given resource is unknown or if it is not a simple limit resource. If any of the proposed values is over the defined quota, an OverQuota exception will be raised with the sorted list of the resources which are too high. Otherwise, the method returns nothing. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param values: A dictionary of the values to check against the quota. :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. :param user_id: Specify the user_id if current context is admin and admin wants to impact on common user. """ pass def limit_check_project_and_user(self, context, resources, project_values=None, user_values=None, project_id=None, user_id=None): """Check values against quota limits. For limits--this method checks that a set of proposed values are permitted by the limit restriction. This method will raise a QuotaResourceUnknown exception if a given resource is unknown or if it is not a simple limit resource. If any of the proposed values is over the defined quota, an OverQuota exception will be raised with the sorted list of the resources which are too high. Otherwise, the method returns nothing. :param context: The request context, for access checks :param resources: A dictionary of the registered resources :param project_values: Optional dict containing the resource values to check against project quota, e.g. {'instances': 1, 'cores': 2, 'memory_mb': 512} :param user_values: Optional dict containing the resource values to check against user quota, e.g. {'instances': 1, 'cores': 2, 'memory_mb': 512} :param project_id: Optional project_id for scoping the limit check to a different project than in the context :param user_id: Optional user_id for scoping the limit check to a different user than in the context """ pass class UnifiedLimitsDriver(NoopQuotaDriver): """Ease migration to new unified limits code. Help ease migration to unified limits by ensuring the old code paths still work with unified limits. Eventually the expectation is all this legacy quota code will go away, leaving the new simpler code """ def __init__(self): LOG.warning("The Unified Limits Quota Driver is experimental and " "is under active development. Do not use this driver.") def get_reserved(self): # To make unified limits APIs the same as the DB driver, return 0 return 0 def get_class_quotas(self, context, resources, quota_class): """Given a list of resources, retrieve the quotas for the given quota class. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param quota_class: Placeholder, we always assume default quota class. """ # NOTE(johngarbutt): ignoring quota_class, as ignored in noop driver return self.get_defaults(context, resources) def get_defaults(self, context, resources): local_limits = local_limit.get_legacy_default_limits() # Note we get 0 if there is no registered limit, # to mirror oslo_limit behaviour when there is no registered limit placement_limits = placement_limit.get_legacy_default_limits() quotas = {} for resource in resources.values(): if resource.name in placement_limits: quotas[resource.name] = placement_limits[resource.name] else: # return -1 for things like security_group_rules # that are neither a keystone limit or a local limit quotas[resource.name] = local_limits.get(resource.name, -1) return quotas def get_project_quotas(self, context, resources, project_id, quota_class=None, usages=True, remains=False): if quota_class is not None: raise NotImplementedError("quota_class") if remains: raise NotImplementedError("remains") local_limits = local_limit.get_legacy_default_limits() # keystone limits always returns core, ram and instances # if nothing set in keystone, we get back 0, i.e. don't allow placement_limits = placement_limit.get_legacy_project_limits( project_id) project_quotas = {} for resource in resources.values(): if resource.name in placement_limits: limit = placement_limits[resource.name] else: # return -1 for things like security_group_rules # that are neither a keystone limit or a local limit limit = local_limits.get(resource.name, -1) project_quotas[resource.name] = {"limit": limit} if usages: local_in_use = local_limit.get_in_use(context, project_id) p_in_use = placement_limit.get_legacy_counts(context, project_id) for resource in resources.values(): # default to 0 for resources that are deprecated, # i.e. not in keystone or local limits, such that we # are API compatible with what was returned with # the db driver, even though noop driver returned -1 usage_count = 0 if resource.name in local_in_use: usage_count = local_in_use[resource.name] if resource.name in p_in_use: usage_count = p_in_use[resource.name] project_quotas[resource.name]["in_use"] = usage_count return project_quotas def get_user_quotas(self, context, resources, project_id, user_id, quota_class=None, usages=True): return self.get_project_quotas(context, resources, project_id, quota_class, usages) class BaseResource(object): """Describe a single resource for quota checking.""" def __init__(self, name, flag=None): """Initializes a Resource. :param name: The name of the resource, i.e., "instances". :param flag: The name of the flag or configuration option which specifies the default value of the quota for this resource. """ self.name = name self.flag = flag @property def default(self): """Return the default value of the quota.""" return CONF.quota[self.flag] if self.flag else -1 class AbsoluteResource(BaseResource): """Describe a resource that does not correspond to database objects.""" valid_method = 'check' class CountableResource(AbsoluteResource): """Describe a resource where the counts aren't based solely on the project ID. """ def __init__(self, name, count_as_dict, flag=None): """Initializes a CountableResource. Countable resources are those resources which directly correspond to objects in the database, but for which a count by project ID is inappropriate e.g. keypairs A CountableResource must be constructed with a counting function, which will be called to determine the current counts of the resource. The counting function will be passed the context, along with the extra positional and keyword arguments that are passed to Quota.count_as_dict(). It should return a dict specifying the count scoped to a project and/or a user. Example count of instances, cores, or ram returned as a rollup of all the resources since we only want to query the instances table once, not multiple times, for each resource. Instances, cores, and ram are counted across a project and across a user: {'project': {'instances': 5, 'cores': 8, 'ram': 4096}, 'user': {'instances': 1, 'cores': 2, 'ram': 512}} Example count of server groups keeping a consistent format. Server groups are counted across a project and across a user: {'project': {'server_groups': 7}, 'user': {'server_groups': 2}} Example count of key pairs keeping a consistent format. Key pairs are counted across a user only: {'user': {'key_pairs': 5}} Note that this counting is not performed in a transaction-safe manner. This resource class is a temporary measure to provide required functionality, until a better approach to solving this problem can be evolved. :param name: The name of the resource, i.e., "instances". :param count_as_dict: A callable which returns the count of the resource as a dict. The arguments passed are as described above. :param flag: The name of the flag or configuration option which specifies the default value of the quota for this resource. """ super(CountableResource, self).__init__(name, flag=flag) self.count_as_dict = count_as_dict class QuotaEngine(object): """Represent the set of recognized quotas.""" def __init__(self, quota_driver=None, resources=None): """Initialize a Quota object. :param quota_driver: a QuotaDriver object (only used in testing. if None (default), instantiates a driver from the CONF.quota.driver option) :param resources: iterable of Resource objects """ resources = resources or [] self._resources = { resource.name: resource for resource in resources } # NOTE(mriedem): quota_driver is ever only supplied in tests with a # fake driver. self.__driver_override = quota_driver self.__driver = None self.__driver_name = None @property def _driver(self): if self.__driver_override: return self.__driver_override # NOTE(johngarbutt) to allow unit tests to change the driver by # simply overriding config, double check if we have the correct # driver cached before we return the currently cached driver driver_name_in_config = CONF.quota.driver if self.__driver_name != driver_name_in_config: self.__driver = importutils.import_object(driver_name_in_config) self.__driver_name = driver_name_in_config return self.__driver def get_defaults(self, context): """Retrieve the default quotas. :param context: The request context, for access checks. """ return self._driver.get_defaults(context, self._resources) def get_class_quotas(self, context, quota_class): """Retrieve the quotas for the given quota class. :param context: The request context, for access checks. :param quota_class: The name of the quota class to return quotas for. """ return self._driver.get_class_quotas(context, self._resources, quota_class) def get_user_quotas(self, context, project_id, user_id, quota_class=None, usages=True): """Retrieve the quotas for the given user and project. :param context: The request context, for access checks. :param project_id: The ID of the project to return quotas for. :param user_id: The ID of the user to return quotas for. :param quota_class: If project_id != context.project_id, the quota class cannot be determined. This parameter allows it to be specified. :param usages: If True, the current counts will also be returned. """ return self._driver.get_user_quotas(context, self._resources, project_id, user_id, quota_class=quota_class, usages=usages) def get_project_quotas(self, context, project_id, quota_class=None, usages=True, remains=False): """Retrieve the quotas for the given project. :param context: The request context, for access checks. :param project_id: The ID of the project to return quotas for. :param quota_class: If project_id != context.project_id, the quota class cannot be determined. This parameter allows it to be specified. :param usages: If True, the current counts will also be returned. :param remains: If True, the current remains of the project will will be returned. """ return self._driver.get_project_quotas(context, self._resources, project_id, quota_class=quota_class, usages=usages, remains=remains) def get_settable_quotas(self, context, project_id, user_id=None): """Given a list of resources, retrieve the range of settable quotas for the given user or project. :param context: The request context, for access checks. :param project_id: The ID of the project to return quotas for. :param user_id: The ID of the user to return quotas for. """ return self._driver.get_settable_quotas(context, self._resources, project_id, user_id=user_id) def count_as_dict(self, context, resource, *args, **kwargs): """Count a resource and return a dict. For countable resources, invokes the count_as_dict() function and returns its result. Arguments following the context and resource are passed directly to the count function declared by the resource. :param context: The request context, for access checks. :param resource: The name of the resource, as a string. :returns: A dict containing the count(s) for the resource, for example: {'project': {'instances': 2, 'cores': 4, 'ram': 1024}, 'user': {'instances': 1, 'cores': 2, 'ram': 512}} another example: {'user': {'key_pairs': 5}} """ # Get the resource res = self._resources.get(resource) if not res or not hasattr(res, 'count_as_dict'): raise exception.QuotaResourceUnknown(unknown=[resource]) return res.count_as_dict(context, *args, **kwargs) # TODO(melwitt): This can be removed once no old code can call # limit_check(). It will be replaced with limit_check_project_and_user(). def limit_check(self, context, project_id=None, user_id=None, **values): """Check simple quota limits. For limits--those quotas for which there is no usage synchronization function--this method checks that a set of proposed values are permitted by the limit restriction. The values to check are given as keyword arguments, where the key identifies the specific quota limit to check, and the value is the proposed value. This method will raise a QuotaResourceUnknown exception if a given resource is unknown or if it is not a simple limit resource. If any of the proposed values is over the defined quota, an OverQuota exception will be raised with the sorted list of the resources which are too high. Otherwise, the method returns nothing. :param context: The request context, for access checks. :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. :param user_id: Specify the user_id if current context is admin and admin wants to impact on common user. """ return self._driver.limit_check(context, self._resources, values, project_id=project_id, user_id=user_id) def limit_check_project_and_user(self, context, project_values=None, user_values=None, project_id=None, user_id=None): """Check values against quota limits. For limits--this method checks that a set of proposed values are permitted by the limit restriction. This method will raise a QuotaResourceUnknown exception if a given resource is unknown or if it is not a simple limit resource. If any of the proposed values is over the defined quota, an OverQuota exception will be raised with the sorted list of the resources which are too high. Otherwise, the method returns nothing. :param context: The request context, for access checks :param project_values: Optional dict containing the resource values to check against project quota, e.g. {'instances': 1, 'cores': 2, 'memory_mb': 512} :param user_values: Optional dict containing the resource values to check against user quota, e.g. {'instances': 1, 'cores': 2, 'memory_mb': 512} :param project_id: Optional project_id for scoping the limit check to a different project than in the context :param user_id: Optional user_id for scoping the limit check to a different user than in the context """ return self._driver.limit_check_project_and_user( context, self._resources, project_values=project_values, user_values=user_values, project_id=project_id, user_id=user_id) @property def resources(self): return sorted(self._resources.keys()) def get_reserved(self): return self._driver.get_reserved() @api_db_api.context_manager.reader def _user_id_queued_for_delete_populated(context, project_id=None): """Determine whether user_id and queued_for_delete are set. This will be used to determine whether we need to fall back on the legacy quota counting method (if we cannot rely on counting instance mappings for the instance count). If any records with user_id=None and queued_for_delete=False are found, we need to fall back to the legacy counting method. If any records with queued_for_delete=None are found, we need to fall back to the legacy counting method. Note that this check specifies queued_for_deleted=False, which excludes deleted and SOFT_DELETED instances. The 'populate_user_id' data migration migrates SOFT_DELETED instances because they could be restored at any time in the future. However, for this quota-check-time method, it is acceptable to ignore SOFT_DELETED instances, since we just want to know if it is safe to use instance mappings to count instances at this point in time (and SOFT_DELETED instances do not count against quota limits). We also want to fall back to the legacy counting method if we detect any records that have not yet populated the queued_for_delete field. We do this instead of counting queued_for_delete=None records since that might not accurately reflect the project or project user's quota usage. :param project_id: The project to check :returns: True if user_id is set for all non-deleted instances and queued_for_delete is set for all instances, else False """ user_id_not_populated = sql.and_( api_models.InstanceMapping.user_id == sql.null(), api_models.InstanceMapping.queued_for_delete == sql.false()) # If either queued_for_delete or user_id are unmigrated, we will return # False. unmigrated_filter = sql.or_( api_models.InstanceMapping.queued_for_delete == sql.null(), user_id_not_populated) query = context.session.query(api_models.InstanceMapping).filter( unmigrated_filter) if project_id: query = query.filter_by(project_id=project_id) return not context.session.query(query.exists()).scalar() def _keypair_get_count_by_user(context, user_id): count = objects.KeyPairList.get_count_by_user(context, user_id) return {'user': {'key_pairs': count}} def _server_group_count_members_by_user_legacy(context, group, user_id): filters = {'deleted': False, 'user_id': user_id, 'uuid': group.members} def group_member_uuids(cctxt): return {inst.uuid for inst in objects.InstanceList.get_by_filters( cctxt, filters, expected_attrs=[])} # Ignore any duplicates since build requests and instances can co-exist # for a short window of time after the instance is created in a cell but # before the build request is deleted. instance_uuids = set() # NOTE(melwitt): Counting across cells for instances means we will miss # counting resources if a cell is down. per_cell = nova_context.scatter_gather_all_cells( context, group_member_uuids) for uuids in per_cell.values(): instance_uuids |= uuids # Count build requests using the same filters to catch group members # that are not yet created in a cell. build_requests = objects.BuildRequestList.get_by_filters(context, filters) for build_request in build_requests: instance_uuids.add(build_request.instance_uuid) return {'user': {'server_group_members': len(instance_uuids)}} def is_qfd_populated(context): """Check if user_id and queued_for_delete fields are populated. This method is related to counting quota usage from placement. It is not yet possible to count instances from placement, so in the meantime we can use instance mappings for counting. This method is used to determine whether the user_id and queued_for_delete columns are populated in the API database's instance_mappings table. Instance mapping records are not deleted from the database until the database is archived, so queued_for_delete tells us whether or not we should count them for instance quota usage. The user_id field enables us to scope instance quota usage to a user (legacy quota). Scoping instance quota to a user is only possible when counting quota usage from placement is configured and unified limits is not configured. When unified limits is configured, quotas are scoped only to projects. In the future when it is possible to count instance usage from placement, this method will no longer be needed. """ global UID_QFD_POPULATED_CACHE_ALL if not UID_QFD_POPULATED_CACHE_ALL: LOG.debug('Checking whether user_id and queued_for_delete are ' 'populated for all projects') UID_QFD_POPULATED_CACHE_ALL = _user_id_queued_for_delete_populated( context) return UID_QFD_POPULATED_CACHE_ALL def _server_group_count_members_by_user(context, group, user_id): """Get the count of server group members for a group by user. :param context: The request context for database access :param group: The InstanceGroup object with members to count :param user_id: The user_id to count across :returns: A dict containing the user-scoped count. For example: {'user': 'server_group_members': }} """ # Because server group members quota counting is not scoped to a project, # but scoped to a particular InstanceGroup and user, we have no reasonable # way of pruning down our migration check to only a subset of all instance # mapping records. # So, we check whether user_id/queued_for_delete is populated for all # records and cache the result to prevent unnecessary checking once the # data migration has been completed. if is_qfd_populated(context): count = objects.InstanceMappingList.get_count_by_uuids_and_user( context, group.members, user_id) return {'user': {'server_group_members': count}} LOG.warning('Falling back to legacy quota counting method for server ' 'group members') return _server_group_count_members_by_user_legacy(context, group, user_id) def _instances_cores_ram_count_legacy(context, project_id, user_id=None): """Get the counts of instances, cores, and ram in cell databases. :param context: The request context for database access :param project_id: The project_id to count across :param user_id: The user_id to count across :returns: A dict containing the project-scoped counts and user-scoped counts if user_id is specified. For example: {'project': {'instances': , 'cores': , 'ram': }, 'user': {'instances': , 'cores': , 'ram': }} """ # NOTE(melwitt): Counting across cells for instances, cores, and ram means # we will miss counting resources if a cell is down. # NOTE(tssurya): We only go into those cells in which the tenant has # instances. We could optimize this to avoid the CellMappingList query # for single-cell deployments by checking the cell cache and only doing # this filtering if there is more than one non-cell0 cell. # TODO(tssurya): Consider adding a scatter_gather_cells_for_project # variant that makes this native to nova.context. if CONF.api.instance_list_per_project_cells: cell_mappings = objects.CellMappingList.get_by_project_id( context, project_id) else: nova_context.load_cells() cell_mappings = nova_context.CELLS results = nova_context.scatter_gather_cells( context, cell_mappings, nova_context.CELL_TIMEOUT, objects.InstanceList.get_counts, project_id, user_id=user_id) total_counts = {'project': {'instances': 0, 'cores': 0, 'ram': 0}} if user_id: total_counts['user'] = {'instances': 0, 'cores': 0, 'ram': 0} for result in results.values(): if not nova_context.is_cell_failure_sentinel(result): for resource, count in result['project'].items(): total_counts['project'][resource] += count if user_id: for resource, count in result['user'].items(): total_counts['user'][resource] += count return total_counts def _cores_ram_count_placement(context, project_id, user_id=None): return report.report_client_singleton().get_usages_counts_for_quota( context, project_id, user_id=user_id) def _instances_cores_ram_count_api_db_placement(context, project_id, user_id=None): # Will return a dict with format: {'project': {'instances': M}, # 'user': {'instances': N}} # where the 'user' key is optional. total_counts = objects.InstanceMappingList.get_counts(context, project_id, user_id=user_id) cores_ram_counts = _cores_ram_count_placement(context, project_id, user_id=user_id) total_counts['project'].update(cores_ram_counts['project']) if 'user' in total_counts: total_counts['user'].update(cores_ram_counts['user']) return total_counts def _instances_cores_ram_count(context, project_id, user_id=None): """Get the counts of instances, cores, and ram. :param context: The request context for database access :param project_id: The project_id to count across :param user_id: The user_id to count across :returns: A dict containing the project-scoped counts and user-scoped counts if user_id is specified. For example: {'project': {'instances': , 'cores': , 'ram': }, 'user': {'instances': , 'cores': , 'ram': }} """ global UID_QFD_POPULATED_CACHE_BY_PROJECT if CONF.quota.count_usage_from_placement: # If a project has all user_id and queued_for_delete data populated, # cache the result to avoid needless database checking in the future. if (not UID_QFD_POPULATED_CACHE_ALL and project_id not in UID_QFD_POPULATED_CACHE_BY_PROJECT): LOG.debug('Checking whether user_id and queued_for_delete are ' 'populated for project_id %s', project_id) uid_qfd_populated = _user_id_queued_for_delete_populated( context, project_id) if uid_qfd_populated: UID_QFD_POPULATED_CACHE_BY_PROJECT.add(project_id) else: uid_qfd_populated = True if uid_qfd_populated: return _instances_cores_ram_count_api_db_placement(context, project_id, user_id=user_id) LOG.warning('Falling back to legacy quota counting method for ' 'instances, cores, and ram') return _instances_cores_ram_count_legacy(context, project_id, user_id=user_id) def _server_group_count(context, project_id, user_id=None): """Get the counts of server groups in the database. :param context: The request context for database access :param project_id: The project_id to count across :param user_id: The user_id to count across :returns: A dict containing the project-scoped counts and user-scoped counts if user_id is specified. For example: {'project': {'server_groups': }, 'user': {'server_groups': }} """ return objects.InstanceGroupList.get_counts(context, project_id, user_id=user_id) QUOTAS = QuotaEngine( resources=[ CountableResource( 'instances', _instances_cores_ram_count, 'instances'), CountableResource( 'cores', _instances_cores_ram_count, 'cores'), CountableResource( 'ram', _instances_cores_ram_count, 'ram'), AbsoluteResource( 'metadata_items', 'metadata_items'), AbsoluteResource( 'injected_files', 'injected_files'), AbsoluteResource( 'injected_file_content_bytes', 'injected_file_content_bytes'), AbsoluteResource( 'injected_file_path_bytes', 'injected_file_path_length'), CountableResource( 'key_pairs', _keypair_get_count_by_user, 'key_pairs'), CountableResource( 'server_groups', _server_group_count, 'server_groups'), CountableResource( 'server_group_members', _server_group_count_members_by_user, 'server_group_members'), # Deprecated nova-network quotas, retained to avoid changing API # responses AbsoluteResource('fixed_ips'), AbsoluteResource('floating_ips'), AbsoluteResource('security_groups'), AbsoluteResource('security_group_rules'), ], ) def _valid_method_call_check_resource(name, method, resources): if name not in resources: raise exception.InvalidQuotaMethodUsage(method=method, res=name) res = resources[name] if res.valid_method != method: raise exception.InvalidQuotaMethodUsage(method=method, res=name) def _valid_method_call_check_resources(resource_values, method, resources): """A method to check whether the resource can use the quota method. :param resource_values: Dict containing the resource names and values :param method: The quota method to check :param resources: Dict containing Resource objects to validate against """ for name in resource_values.keys(): _valid_method_call_check_resource(name, method, resources) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/rpc.py0000664000175000017500000003764600000000000014703 0ustar00zuulzuul00000000000000# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools from oslo_log import log as logging import oslo_messaging as messaging from oslo_messaging.rpc import dispatcher from oslo_serialization import jsonutils from oslo_service import periodic_task from oslo_utils import importutils import nova.conf import nova.context import nova.exception from nova.i18n import _ from nova import utils __all__ = [ 'init', 'cleanup', 'set_defaults', 'add_extra_exmods', 'clear_extra_exmods', 'get_allowed_exmods', 'RequestContextSerializer', 'get_client', 'get_server', 'get_notifier', ] profiler = importutils.try_import("osprofiler.profiler") CONF = nova.conf.CONF LOG = logging.getLogger(__name__) # TODO(stephenfin): These should be private TRANSPORT = None LEGACY_NOTIFIER = None NOTIFICATION_TRANSPORT = None NOTIFIER = None # NOTE(danms): If rpc_response_timeout is over this value (per-call or # globally), we will enable heartbeating HEARTBEAT_THRESHOLD = 60 ALLOWED_EXMODS = [ nova.exception.__name__, ] EXTRA_EXMODS = [] def init(conf): global TRANSPORT, NOTIFICATION_TRANSPORT, LEGACY_NOTIFIER, NOTIFIER exmods = get_allowed_exmods() TRANSPORT = create_transport(get_transport_url()) NOTIFICATION_TRANSPORT = messaging.get_notification_transport( conf, allowed_remote_exmods=exmods) serializer = RequestContextSerializer(JsonPayloadSerializer()) if conf.notifications.notification_format == 'unversioned': LEGACY_NOTIFIER = messaging.Notifier(NOTIFICATION_TRANSPORT, serializer=serializer) NOTIFIER = messaging.Notifier(NOTIFICATION_TRANSPORT, serializer=serializer, driver='noop') elif conf.notifications.notification_format == 'both': LEGACY_NOTIFIER = messaging.Notifier(NOTIFICATION_TRANSPORT, serializer=serializer) NOTIFIER = messaging.Notifier( NOTIFICATION_TRANSPORT, serializer=serializer, topics=conf.notifications.versioned_notifications_topics) else: LEGACY_NOTIFIER = messaging.Notifier(NOTIFICATION_TRANSPORT, serializer=serializer, driver='noop') NOTIFIER = messaging.Notifier( NOTIFICATION_TRANSPORT, serializer=serializer, topics=conf.notifications.versioned_notifications_topics) def cleanup(): global TRANSPORT, NOTIFICATION_TRANSPORT, LEGACY_NOTIFIER, NOTIFIER assert TRANSPORT is not None assert NOTIFICATION_TRANSPORT is not None assert LEGACY_NOTIFIER is not None assert NOTIFIER is not None TRANSPORT.cleanup() NOTIFICATION_TRANSPORT.cleanup() TRANSPORT = NOTIFICATION_TRANSPORT = LEGACY_NOTIFIER = NOTIFIER = None def set_defaults(control_exchange): messaging.set_transport_defaults(control_exchange) def add_extra_exmods(*args): EXTRA_EXMODS.extend(args) def clear_extra_exmods(): del EXTRA_EXMODS[:] def get_allowed_exmods(): return ALLOWED_EXMODS + EXTRA_EXMODS class JsonPayloadSerializer(messaging.NoOpSerializer): @staticmethod def fallback(obj): """Serializer fallback This method is used to serialize an object which jsonutils.to_primitive does not otherwise know how to handle. This is mostly only needed in tests because of the use of the nova CheatingSerializer fixture which keeps some non-serializable fields on the RequestContext, like db_connection. """ if isinstance(obj, nova.context.RequestContext): # This matches RequestContextSerializer.serialize_context(). return obj.to_dict() # The default fallback in jsonutils.to_primitive() is str. return str(obj) def serialize_entity(self, context, entity): return jsonutils.to_primitive(entity, convert_instances=True, fallback=self.fallback) class RequestContextSerializer(messaging.Serializer): def __init__(self, base): self._base = base def serialize_entity(self, context, entity): if not self._base: return entity return self._base.serialize_entity(context, entity) def deserialize_entity(self, context, entity): if not self._base: return entity return self._base.deserialize_entity(context, entity) def serialize_context(self, context): return context.to_dict() def deserialize_context(self, context): return nova.context.RequestContext.from_dict(context) class ProfilerRequestContextSerializer(RequestContextSerializer): def serialize_context(self, context): _context = super(ProfilerRequestContextSerializer, self).serialize_context(context) prof = profiler.get() if prof: # FIXME(DinaBelova): we'll add profiler.get_info() method # to extract this info -> we'll need to update these lines trace_info = { "hmac_key": prof.hmac_key, "base_id": prof.get_base_id(), "parent_id": prof.get_id() } _context.update({"trace_info": trace_info}) return _context def deserialize_context(self, context): trace_info = context.pop("trace_info", None) if trace_info: profiler.init(**trace_info) return super(ProfilerRequestContextSerializer, self).deserialize_context(context) def get_transport_url(url_str=None): return messaging.TransportURL.parse(CONF, url_str) def get_client(target, version_cap=None, serializer=None, call_monitor_timeout=None): assert TRANSPORT is not None if profiler: serializer = ProfilerRequestContextSerializer(serializer) else: serializer = RequestContextSerializer(serializer) return messaging.get_rpc_client(TRANSPORT, target, version_cap=version_cap, serializer=serializer, call_monitor_timeout=call_monitor_timeout) def get_server(target, endpoints, serializer=None): assert TRANSPORT is not None if profiler: serializer = ProfilerRequestContextSerializer(serializer) else: serializer = RequestContextSerializer(serializer) access_policy = dispatcher.DefaultRPCAccessPolicy exc = "threading" if utils.concurrency_mode_threading() else "eventlet" return messaging.get_rpc_server(TRANSPORT, target, endpoints, executor=exc, serializer=serializer, access_policy=access_policy) def get_notifier(service, host=None): assert LEGACY_NOTIFIER is not None publisher_id = '%s.%s' % (service, host or CONF.host) return LegacyValidatingNotifier( LEGACY_NOTIFIER.prepare(publisher_id=publisher_id)) def get_versioned_notifier(publisher_id): assert NOTIFIER is not None return NOTIFIER.prepare(publisher_id=publisher_id) def if_notifications_enabled(f): """Calls decorated method only if versioned notifications are enabled.""" @functools.wraps(f) def wrapped(*args, **kwargs): if (NOTIFIER.is_enabled() and CONF.notifications.notification_format in ('both', 'versioned')): return f(*args, **kwargs) else: return None return wrapped def create_transport(url): exmods = get_allowed_exmods() return messaging.get_rpc_transport(CONF, url=url, allowed_remote_exmods=exmods) class LegacyValidatingNotifier(object): """Wraps an oslo.messaging Notifier and checks for allowed event_types.""" # If true an exception is thrown if the event_type is not allowed, if false # then only a WARNING is logged fatal = False # This list contains the already existing therefore allowed legacy # notification event_types. New items shall not be added to the list as # Nova does not allow new legacy notifications any more. This list will be # removed when all the notification is transformed to versioned # notifications. allowed_legacy_notification_event_types = [ 'aggregate.addhost.end', 'aggregate.addhost.start', 'aggregate.create.end', 'aggregate.create.start', 'aggregate.delete.end', 'aggregate.delete.start', 'aggregate.removehost.end', 'aggregate.removehost.start', 'aggregate.updatemetadata.end', 'aggregate.updatemetadata.start', 'aggregate.updateprop.end', 'aggregate.updateprop.start', 'compute.instance.create.end', 'compute.instance.create.error', 'compute.instance.create_ip.end', 'compute.instance.create_ip.start', 'compute.instance.create.start', 'compute.instance.delete.end', 'compute.instance.delete_ip.end', 'compute.instance.delete_ip.start', 'compute.instance.delete.start', 'compute.instance.evacuate', 'compute.instance.exists', 'compute.instance.finish_resize.end', 'compute.instance.finish_resize.start', 'compute.instance.live.migration.abort.start', 'compute.instance.live.migration.abort.end', 'compute.instance.live.migration.force.complete.start', 'compute.instance.live.migration.force.complete.end', 'compute.instance.live_migration.post.dest.end', 'compute.instance.live_migration.post.dest.start', 'compute.instance.live_migration._post.end', 'compute.instance.live_migration._post.start', 'compute.instance.live_migration.pre.end', 'compute.instance.live_migration.pre.start', 'compute.instance.live_migration.rollback.dest.end', 'compute.instance.live_migration.rollback.dest.start', 'compute.instance.live_migration._rollback.end', 'compute.instance.live_migration._rollback.start', 'compute.instance.pause.end', 'compute.instance.pause.start', 'compute.instance.power_off.end', 'compute.instance.power_off.start', 'compute.instance.power_on.end', 'compute.instance.power_on.start', 'compute.instance.reboot.end', 'compute.instance.reboot.error', 'compute.instance.reboot.start', 'compute.instance.rebuild.end', 'compute.instance.rebuild.error', 'compute.instance.rebuild.scheduled', 'compute.instance.rebuild.start', 'compute.instance.rescue.end', 'compute.instance.rescue.start', 'compute.instance.resize.confirm.end', 'compute.instance.resize.confirm.start', 'compute.instance.resize.end', 'compute.instance.resize.error', 'compute.instance.resize.prep.end', 'compute.instance.resize.prep.start', 'compute.instance.resize.revert.end', 'compute.instance.resize.revert.start', 'compute.instance.resize.start', 'compute.instance.restore.end', 'compute.instance.restore.start', 'compute.instance.resume.end', 'compute.instance.resume.start', 'compute.instance.shelve.end', 'compute.instance.shelve_offload.end', 'compute.instance.shelve_offload.start', 'compute.instance.shelve.start', 'compute.instance.shutdown.end', 'compute.instance.shutdown.start', 'compute.instance.snapshot.end', 'compute.instance.snapshot.start', 'compute.instance.soft_delete.end', 'compute.instance.soft_delete.start', 'compute.instance.suspend.end', 'compute.instance.suspend.start', 'compute.instance.trigger_crash_dump.end', 'compute.instance.trigger_crash_dump.start', 'compute.instance.unpause.end', 'compute.instance.unpause.start', 'compute.instance.unrescue.end', 'compute.instance.unrescue.start', 'compute.instance.unshelve.start', 'compute.instance.unshelve.end', 'compute.instance.update', 'compute.instance.volume.attach', 'compute.instance.volume.detach', 'compute.libvirt.error', 'compute.metrics.update', 'compute_task.build_instances', 'compute_task.migrate_server', 'compute_task.rebuild_server', 'HostAPI.power_action.end', 'HostAPI.power_action.start', 'HostAPI.set_enabled.end', 'HostAPI.set_enabled.start', 'HostAPI.set_maintenance.end', 'HostAPI.set_maintenance.start', 'keypair.create.start', 'keypair.create.end', 'keypair.delete.start', 'keypair.delete.end', 'keypair.import.start', 'keypair.import.end', 'network.floating_ip.allocate', 'network.floating_ip.associate', 'network.floating_ip.deallocate', 'network.floating_ip.disassociate', 'scheduler.select_destinations.end', 'scheduler.select_destinations.start', 'servergroup.addmember', 'servergroup.create', 'servergroup.delete', 'volume.usage', ] message = _('%(event_type)s is not a versioned notification and not ' 'whitelisted. See ./doc/source/reference/notifications.rst') def __init__(self, notifier): self.notifier = notifier for priority in ['debug', 'info', 'warn', 'error', 'critical']: setattr(self, priority, functools.partial(self._notify, priority)) def _is_wrap_exception_notification(self, payload): # nova.exception_wrapper.wrap_exception decorator emits notification # where the event_type is the name of the decorated function. This # is used in many places but it will be converted to versioned # notification in one run by updating the decorator so it is pointless # to white list all the function names here we white list the # notification itself detected by the special payload keys. return {'exception', 'args'} == set(payload.keys()) def _notify(self, priority, ctxt, event_type, payload): if (event_type not in self.allowed_legacy_notification_event_types and not self._is_wrap_exception_notification(payload)): if self.fatal: raise AssertionError(self.message % {'event_type': event_type}) else: LOG.warning(self.message, {'event_type': event_type}) getattr(self.notifier, priority)(ctxt, event_type, payload) class ClientRouter(periodic_task.PeriodicTasks): """Creates RPC clients that honor the context's RPC transport or provides a default. """ def __init__(self, default_client): super(ClientRouter, self).__init__(CONF) self.default_client = default_client self.target = default_client.target self.version_cap = default_client.version_cap self.serializer = default_client.serializer def client(self, context): transport = context.mq_connection if transport: cmt = self.default_client.call_monitor_timeout return messaging.get_rpc_client(transport, self.target, version_cap=self.version_cap, serializer=self.serializer, call_monitor_timeout=cmt) else: return self.default_client ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/safe_utils.py0000664000175000017500000000303700000000000016240 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utilities and helper functions that won't produce circular imports.""" def get_wrapped_function(function): """Get the method at the bottom of a stack of decorators.""" if not hasattr(function, '__closure__') or not function.__closure__: return function def _get_wrapped_function(function): if not hasattr(function, '__closure__') or not function.__closure__: return None for closure in function.__closure__: func = closure.cell_contents deeper_func = _get_wrapped_function(func) if deeper_func: return deeper_func elif hasattr(closure.cell_contents, '__call__'): return closure.cell_contents return function return _get_wrapped_function(function) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.4096088 nova-32.0.0/nova/scheduler/0000775000175000017500000000000000000000000015503 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/scheduler/__init__.py0000664000175000017500000000151000000000000017611 0ustar00zuulzuul00000000000000# Copyright (c) 2010 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ :mod:`nova.scheduler` -- Scheduler Nodes ===================================================== .. automodule:: nova.scheduler :platform: Unix :synopsis: Module that picks a compute node to run a VM instance. """ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315689.4096088 nova-32.0.0/nova/scheduler/client/0000775000175000017500000000000000000000000016761 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/scheduler/client/__init__.py0000664000175000017500000000000000000000000021060 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/scheduler/client/query.py0000664000175000017500000001030100000000000020473 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.scheduler import rpcapi as scheduler_rpcapi class SchedulerQueryClient(object): """Client class for querying to the scheduler.""" def __init__(self): self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI() def select_destinations(self, context, spec_obj, instance_uuids, return_objects=False, return_alternates=False): """Returns destinations(s) best suited for this request_spec and filter_properties. When return_objects is False, the result will be the "old-style" list of dicts with 'host', 'nodename' and 'limits' as keys. The value of return_alternates is ignored. When return_objects is True, the result will be a list of lists of Selection objects, with one list per instance. Each instance's list will contain a Selection representing the selected (and claimed) host, and, if return_alternates is True, zero or more Selection objects that represent alternate hosts. The number of alternates returned depends on the configuration setting `CONF.scheduler.max_attempts`. """ return self.scheduler_rpcapi.select_destinations(context, spec_obj, instance_uuids, return_objects, return_alternates) def update_aggregates(self, context, aggregates): """Updates HostManager internal aggregates information. :param aggregates: Aggregate(s) to update :type aggregates: :class:`nova.objects.Aggregate` or :class:`nova.objects.AggregateList` """ self.scheduler_rpcapi.update_aggregates(context, aggregates) def delete_aggregate(self, context, aggregate): """Deletes HostManager internal information about a specific aggregate. :param aggregate: Aggregate to delete :type aggregate: :class:`nova.objects.Aggregate` """ self.scheduler_rpcapi.delete_aggregate(context, aggregate) def update_instance_info(self, context, host_name, instance_info): """Updates the HostManager with the current information about the instances on a host. :param context: local context :param host_name: name of host sending the update :param instance_info: an InstanceList object. """ self.scheduler_rpcapi.update_instance_info(context, host_name, instance_info) def delete_instance_info(self, context, host_name, instance_uuid): """Updates the HostManager with the current information about an instance that has been deleted on a host. :param context: local context :param host_name: name of host sending the update :param instance_uuid: the uuid of the deleted instance """ self.scheduler_rpcapi.delete_instance_info(context, host_name, instance_uuid) def sync_instance_info(self, context, host_name, instance_uuids): """Notifies the HostManager of the current instances on a host by sending a list of the uuids for those instances. The HostManager can then compare that with its in-memory view of the instances to detect when they are out of sync. :param context: local context :param host_name: name of host sending the update :param instance_uuids: a list of UUID strings representing the current instances on the specified host """ self.scheduler_rpcapi.sync_instance_info(context, host_name, instance_uuids) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/scheduler/client/report.py0000664000175000017500000036661000000000000020662 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import contextlib import copy import functools import random import time import typing as ty from keystoneauth1 import exceptions as ks_exc import os_resource_classes as orc import os_traits from oslo_log import log as logging from oslo_middleware import request_id from oslo_utils import excutils from oslo_utils import versionutils import retrying from nova.compute import provider_tree import nova.conf from nova import context as nova_context from nova import exception from nova.i18n import _ from nova import objects from nova.objects import fields from nova import utils CONF = nova.conf.CONF LOG = logging.getLogger(__name__) WARN_EVERY = 10 SAME_SUBTREE_VERSION = '1.36' RESHAPER_VERSION = '1.30' CONSUMER_GENERATION_VERSION = '1.28' ALLOW_RESERVED_EQUAL_TOTAL_INVENTORY_VERSION = '1.26' POST_RPS_RETURNS_PAYLOAD_API_VERSION = '1.20' AGGREGATE_GENERATION_VERSION = '1.19' NESTED_PROVIDER_API_VERSION = '1.14' POST_ALLOCATIONS_API_VERSION = '1.13' GET_USAGES_VERSION = '1.9' PLACEMENTCLIENT = None AggInfo = collections.namedtuple('AggInfo', ['aggregates', 'generation']) TraitInfo = collections.namedtuple('TraitInfo', ['traits', 'generation']) ProviderAllocInfo = collections.namedtuple( 'ProviderAllocInfo', ['allocations']) def warn_limit(self, msg): if self._warn_count: self._warn_count -= 1 else: self._warn_count = WARN_EVERY LOG.warning(msg) def report_client_singleton(): """Return a reference to the global placement client singleton. This initializes the placement client once and returns a reference to that singleton on subsequent calls. Errors are raised (particularly ks_exc.*) but context-specific error messages are logged for consistency. """ # NOTE(danms): The report client maintains internal state in the # form of the provider tree, which will be shared across all users # of this global client. That is not a problem now, but in the # future it may be beneficial to fix that. One idea would be to # change the behavior of the client such that the static-config # pieces of the actual keystone client are separate from the # internal state, so that we can return a new object here with a # context-specific local state object, but with the client bits # shared. global PLACEMENTCLIENT if PLACEMENTCLIENT is None: try: PLACEMENTCLIENT = SchedulerReportClient() except ks_exc.EndpointNotFound: LOG.error('The placement API endpoint was not found.') raise except ks_exc.MissingAuthPlugin: LOG.error('No authentication information found for placement API.') raise except ks_exc.Unauthorized: LOG.error('Placement service credentials do not work.') raise except ks_exc.DiscoveryFailure: LOG.error('Discovering suitable URL for placement API failed.') raise except (ks_exc.ConnectFailure, ks_exc.RequestTimeout, ks_exc.GatewayTimeout): LOG.error('Placement API service is not responding.') raise except Exception: LOG.error('Failed to initialize placement client ' '(is keystone available?)') raise return PLACEMENTCLIENT def safe_connect(f): @functools.wraps(f) def wrapper(self, *a, **k): try: return f(self, *a, **k) except ks_exc.EndpointNotFound: warn_limit( self, 'The placement API endpoint was not found.') # Reset client session so there is a new catalog, which # gets cached when keystone is first successfully contacted. self._client = self._create_client() except ks_exc.MissingAuthPlugin: warn_limit( self, 'No authentication information found for placement API.') except ks_exc.Unauthorized: warn_limit( self, 'Placement service credentials do not work.') except ks_exc.DiscoveryFailure: # TODO(_gryf): Looks like DiscoveryFailure is not the only missing # exception here. In Pike we should take care about keystoneauth1 # failures handling globally. warn_limit(self, 'Discovering suitable URL for placement API failed.') except ks_exc.ConnectFailure: LOG.warning('Placement API service is not responding.') return wrapper class Retry(Exception): def __init__(self, operation, reason): self.operation = operation self.reason = reason def retries(f): """Decorator to retry a call three times if it raises Retry Note that this returns the actual value of the inner call on success or returns False if all the retries fail. """ @functools.wraps(f) def wrapper(self, *a, **k): for retry in range(0, 4): try: sleep_time = random.uniform(0, retry * 2) time.sleep(sleep_time) return f(self, *a, **k) except Retry as e: LOG.debug( 'Unable to %(op)s because %(reason)s; retrying...', {'op': e.operation, 'reason': e.reason}) LOG.error('Failed scheduler client operation %s: out of retries', f.__name__) return False return wrapper def _move_operation_alloc_request(source_allocs, dest_alloc_req): """Given existing allocations for a source host and a new allocation request for a destination host, return a new allocation_request that contains resources claimed against both source and destination, accounting for shared providers. This is expected to only be used during an evacuate operation. :param source_allocs: Dict, keyed by resource provider UUID, of resources allocated on the source host :param dest_alloc_req: The allocation_request for resources against the destination host """ LOG.debug("Doubling-up allocation_request for move operation. Current " "allocations: %s", source_allocs) # Remove any allocations against resource providers that are # already allocated against on the source host (like shared storage # providers) cur_rp_uuids = set(source_allocs.keys()) new_rp_uuids = set(dest_alloc_req['allocations']) - cur_rp_uuids current_allocs = { cur_rp_uuid: {'resources': alloc['resources']} for cur_rp_uuid, alloc in source_allocs.items() } new_alloc_req = {'allocations': current_allocs} for rp_uuid in dest_alloc_req['allocations']: if rp_uuid in new_rp_uuids: new_alloc_req['allocations'][rp_uuid] = dest_alloc_req[ 'allocations'][rp_uuid] LOG.debug("New allocation_request containing both source and " "destination hosts in move operation: %s", new_alloc_req) return new_alloc_req def get_placement_request_id(response): if response is not None: return response.headers.get(request_id.HTTP_RESP_HEADER_REQUEST_ID) # TODO(mriedem): Consider making SchedulerReportClient a global singleton so # that things like the compute API do not have to lazy-load it. That would # likely require inspecting methods that use a ProviderTree cache to see if # they need locks. class SchedulerReportClient(object): """Client class for updating the scheduler.""" def __init__(self, adapter=None): """Initialize the report client. :param adapter: A prepared keystoneauth1 Adapter for API communication. If unspecified, one is created based on config options in the [placement] section. Note: You should not instantiate this directly. You should call the report_client_singleton() function instead. """ self._adapter = adapter # An object that contains a nova-compute-side cache of resource # provider and inventory information self._provider_tree: provider_tree.ProviderTree = None # Track the last time we updated providers' aggregates and traits self._association_refresh_time: ty.Dict[str, float] = {} self._client = self._create_client() # NOTE(danms): Keep track of how naggy we've been self._warn_count = 0 def clear_provider_cache(self, init=False): if not init: LOG.info("Clearing the report client's provider cache.") self._provider_tree = provider_tree.ProviderTree() self._association_refresh_time = {} def _clear_provider_cache_for_tree(self, rp_uuid): """Clear the provider cache for only the tree containing rp_uuid. This exists for situations where we encounter an error updating placement, and therefore need to refresh the provider tree cache before redriving the update. However, it would be wasteful and inefficient to clear the *entire* cache, which may contain many separate trees (e.g. ironic nodes or sharing providers) which should be unaffected by the error. :param rp_uuid: UUID of a resource provider, which may be anywhere in a a tree hierarchy, i.e. need not be a root. For non-root providers, we still clear the cache for the entire tree including descendants, ancestors up to the root, siblings/cousins and *their* ancestors/descendants. """ try: uuids = self._provider_tree.get_provider_uuids_in_tree(rp_uuid) except ValueError: # If the provider isn't in the tree, it should also not be in the # timer dict, so nothing to clear. return # get_provider_uuids_in_tree returns UUIDs in top-down order, so the # first one is the root; and .remove() is recursive. self._provider_tree.remove(uuids[0]) for uuid in uuids: self._association_refresh_time.pop(uuid, None) def _create_client(self): """Create the HTTP session accessing the placement service.""" # Flush provider tree and associations so we start from a clean slate. self.clear_provider_cache(init=True) client = self._adapter or utils.get_sdk_adapter( "placement", admin=True) # Set accept header on every request to ensure we notify placement # service of our response body media type preferences. client.additional_headers = {'accept': 'application/json'} return client def get(self, url, version=None, global_request_id=None): return self._client.get(url, microversion=version, global_request_id=global_request_id) def post(self, url, data, version=None, global_request_id=None): # NOTE(sdague): using json= instead of data= sets the # media type to application/json for us. Placement API is # more sensitive to this than other APIs in the OpenStack # ecosystem. return self._client.post(url, json=data, microversion=version, global_request_id=global_request_id) def put(self, url, data, version=None, global_request_id=None): # NOTE(sdague): using json= instead of data= sets the # media type to application/json for us. Placement API is # more sensitive to this than other APIs in the OpenStack # ecosystem. return self._client.put(url, json=data, microversion=version, global_request_id=global_request_id) def delete(self, url, version=None, global_request_id=None): return self._client.delete(url, microversion=version, global_request_id=global_request_id) @safe_connect def get_allocation_candidates(self, context, resources): """Returns a tuple of (allocation_requests, provider_summaries, allocation_request_version). The allocation_requests are a collection of potential JSON objects that can be passed to the PUT /allocations/{consumer_uuid} Placement REST API to claim resources against one or more resource providers that meet the requested resource constraints. The provider summaries is a dict, keyed by resource provider UUID, of inventory and capacity information and traits for any resource provider involved in the allocation_requests. :returns: A tuple with a list of allocation_request dicts, a dict of provider information, and the microversion used to request this data from placement, or (None, None, None) if the request failed :param context: The security context :param nova.scheduler.utils.ResourceRequest resources: A ResourceRequest object representing the requested resources, traits, and aggregates from the request spec. Example member_of (aggregates) value in resources: [('foo', 'bar'), ('baz',)] translates to: "Candidates are in either 'foo' or 'bar', but definitely in 'baz'" """ # Note that claim_resources() will use this version as well to # make allocations by `PUT /allocations/{consumer_uuid}` version = SAME_SUBTREE_VERSION qparams = resources.to_querystring() url = "/allocation_candidates?%s" % qparams resp = self.get(url, version=version, global_request_id=context.global_id) if resp.status_code == 200: data = resp.json() return (data['allocation_requests'], data['provider_summaries'], version) args = { 'resource_request': str(resources), 'status_code': resp.status_code, 'err_text': resp.text, } msg = ("Failed to retrieve allocation candidates from placement " "API for filters: %(resource_request)s\n" "Got %(status_code)d: %(err_text)s.") LOG.error(msg, args) return None, None, None @safe_connect def _get_provider_aggregates(self, context, rp_uuid): """Queries the placement API for a resource provider's aggregates. :param rp_uuid: UUID of the resource provider to grab aggregates for. :return: A namedtuple comprising: * .aggregates: A set() of string aggregate UUIDs, which may be empty if the specified provider is associated with no aggregates. * .generation: The resource provider generation. :raise: ResourceProviderAggregateRetrievalFailed on errors. In particular, we raise this exception (as opposed to returning None or the empty set()) if the specified resource provider does not exist. """ resp = self.get("/resource_providers/%s/aggregates" % rp_uuid, version=AGGREGATE_GENERATION_VERSION, global_request_id=context.global_id) if resp.status_code == 200: data = resp.json() return AggInfo(aggregates=set(data['aggregates']), generation=data['resource_provider_generation']) placement_req_id = get_placement_request_id(resp) msg = ("[%(placement_req_id)s] Failed to retrieve aggregates from " "placement API for resource provider with UUID %(uuid)s. " "Got %(status_code)d: %(err_text)s.") args = { 'placement_req_id': placement_req_id, 'uuid': rp_uuid, 'status_code': resp.status_code, 'err_text': resp.text, } LOG.error(msg, args) raise exception.ResourceProviderAggregateRetrievalFailed(uuid=rp_uuid) def get_provider_traits(self, context, rp_uuid): """Queries the placement API for a resource provider's traits. :param context: The security context :param rp_uuid: UUID of the resource provider to grab traits for. :return: A namedtuple comprising: * .traits: A set() of string trait names, which may be empty if the specified provider has no traits. * .generation: The resource provider generation. :raise: ResourceProviderTraitRetrievalFailed on errors. In particular, we raise this exception (as opposed to returning None or the empty set()) if the specified resource provider does not exist. :raise: keystoneauth1.exceptions.ClientException if placement API communication fails. """ resp = self.get("/resource_providers/%s/traits" % rp_uuid, version='1.6', global_request_id=context.global_id) if resp.status_code == 200: json = resp.json() return TraitInfo(traits=set(json['traits']), generation=json['resource_provider_generation']) placement_req_id = get_placement_request_id(resp) LOG.error( "[%(placement_req_id)s] Failed to retrieve traits from " "placement API for resource provider with UUID %(uuid)s. Got " "%(status_code)d: %(err_text)s.", {'placement_req_id': placement_req_id, 'uuid': rp_uuid, 'status_code': resp.status_code, 'err_text': resp.text}) raise exception.ResourceProviderTraitRetrievalFailed(uuid=rp_uuid) def get_resource_provider_name(self, context, uuid): """Return the name of a RP. It tries to use the internal of RPs or falls back to calling placement directly. :param context: The security context :param uuid: UUID identifier for the resource provider to look up :return: The name of the RP :raise: ResourceProviderRetrievalFailed if the RP is not in the cache and the communication with the placement is failed. :raise: ResourceProviderNotFound if the RP does not exist. """ try: return self._provider_tree.data(uuid).name except ValueError: rsp = self._get_resource_provider(context, uuid) if rsp is None: raise exception.ResourceProviderNotFound(name_or_uuid=uuid) else: return rsp['name'] @safe_connect def _get_resource_provider(self, context, uuid): """Queries the placement API for a resource provider record with the supplied UUID. :param context: The security context :param uuid: UUID identifier for the resource provider to look up :return: A dict of resource provider information if found or None if no such resource provider could be found. :raise: ResourceProviderRetrievalFailed on error. """ resp = self.get("/resource_providers/%s" % uuid, version=NESTED_PROVIDER_API_VERSION, global_request_id=context.global_id) if resp.status_code == 200: data = resp.json() return data elif resp.status_code == 404: return None else: placement_req_id = get_placement_request_id(resp) msg = ("[%(placement_req_id)s] Failed to retrieve resource " "provider record from placement API for UUID %(uuid)s. Got " "%(status_code)d: %(err_text)s.") args = { 'uuid': uuid, 'status_code': resp.status_code, 'err_text': resp.text, 'placement_req_id': placement_req_id, } LOG.error(msg, args) raise exception.ResourceProviderRetrievalFailed(uuid=uuid) @safe_connect def _get_sharing_providers(self, context, agg_uuids): """Queries the placement API for a list of the resource providers associated with any of the specified aggregates and possessing the MISC_SHARES_VIA_AGGREGATE trait. :param context: The security context :param agg_uuids: Iterable of string UUIDs of aggregates to filter on. :return: A list of dicts of resource provider information, which may be empty if no provider exists with the specified UUID. :raise: ResourceProviderRetrievalFailed on error. """ if not agg_uuids: return [] maxuuids = CONF.compute.sharing_providers_max_uuids_per_request agg_uuids = list(agg_uuids) resource_providers = {} for i in range(0, len(agg_uuids), maxuuids): aggs = ','.join(agg_uuids[i:i + maxuuids]) url = "/resource_providers?member_of=in:%s&required=%s" % ( aggs, os_traits.MISC_SHARES_VIA_AGGREGATE) resp = self.get(url, version='1.18', global_request_id=context.global_id) if resp.status_code == 200: # We want to ensure that an RP on different aggregate # will not be duplicated. for rp in resp.json()['resource_providers']: if not rp['uuid'] in resource_providers: resource_providers[rp['uuid']] = rp else: msg = _("[%(placement_req_id)s] %(iquery)s/%(isize)s Failed " "to retrieve sharing resource providers associated " "with the following aggregates from placement API: " "%(aggs)s. Got %(status_code)d: %(err_text)s.") args = { 'aggs': aggs, 'status_code': resp.status_code, 'err_text': resp.text, 'placement_req_id': get_placement_request_id(resp), 'iquery': i + 1, 'isize': len(agg_uuids) } LOG.error(msg, args) raise exception.ResourceProviderRetrievalFailed( message=msg % args) return list(resource_providers.values()) def get_providers_in_tree(self, context, uuid): """Queries the placement API for a list of the resource providers in the tree associated with the specified UUID. :param context: The security context :param uuid: UUID identifier for the resource provider to look up :return: A list of dicts of resource provider information, which may be empty if no provider exists with the specified UUID. :raise: ResourceProviderRetrievalFailed on error. :raise: keystoneauth1.exceptions.ClientException if placement API communication fails. """ resp = self.get("/resource_providers?in_tree=%s" % uuid, version=NESTED_PROVIDER_API_VERSION, global_request_id=context.global_id) if resp.status_code == 200: return resp.json()['resource_providers'] # Some unexpected error placement_req_id = get_placement_request_id(resp) msg = ("[%(placement_req_id)s] Failed to retrieve resource provider " "tree from placement API for UUID %(uuid)s. Got " "%(status_code)d: %(err_text)s.") args = { 'uuid': uuid, 'status_code': resp.status_code, 'err_text': resp.text, 'placement_req_id': placement_req_id, } LOG.error(msg, args) raise exception.ResourceProviderRetrievalFailed(uuid=uuid) @safe_connect def _create_resource_provider(self, context, uuid, name, parent_provider_uuid=None): """Calls the placement API to create a new resource provider record. :param context: The security context :param uuid: UUID of the new resource provider :param name: Name of the resource provider :param parent_provider_uuid: Optional UUID of the immediate parent :return: A dict of resource provider information object representing the newly-created resource provider. :raise: ResourceProviderCreationFailed or ResourceProviderRetrievalFailed on error. """ url = "/resource_providers" payload = { 'uuid': uuid, 'name': name, } if parent_provider_uuid is not None: payload['parent_provider_uuid'] = parent_provider_uuid # Bug #1746075: First try the microversion that returns the new # provider's payload. resp = self.post(url, payload, version=POST_RPS_RETURNS_PAYLOAD_API_VERSION, global_request_id=context.global_id) placement_req_id = get_placement_request_id(resp) if resp: msg = ("[%(placement_req_id)s] Created resource provider record " "via placement API for resource provider with UUID " "%(uuid)s and name %(name)s.") args = { 'uuid': uuid, 'name': name, 'placement_req_id': placement_req_id, } LOG.info(msg, args) return resp.json() # TODO(efried): Push error codes from placement, and use 'em. name_conflict = 'Conflicting resource provider name:' if resp.status_code == 409 and name_conflict not in resp.text: # Another thread concurrently created a resource provider with the # same UUID. Log a warning and then just return the resource # provider object from _get_resource_provider() msg = ("[%(placement_req_id)s] Another thread already created a " "resource provider with the UUID %(uuid)s. Grabbing that " "record from the placement API.") args = { 'uuid': uuid, 'placement_req_id': placement_req_id, } LOG.info(msg, args) return self._get_resource_provider(context, uuid) # A provider with the same *name* already exists, or some other error. msg = ("[%(placement_req_id)s] Failed to create resource provider " "record in placement API for UUID %(uuid)s. Got " "%(status_code)d: %(err_text)s.") args = { 'uuid': uuid, 'status_code': resp.status_code, 'err_text': resp.text, 'placement_req_id': placement_req_id, } LOG.error(msg, args) raise exception.ResourceProviderCreationFailed(name=name) def _ensure_resource_provider(self, context, uuid, name=None, parent_provider_uuid=None): """Ensures that the placement API has a record of a resource provider with the supplied UUID. If not, creates the resource provider record in the placement API for the supplied UUID, passing in a name for the resource provider. If found or created, the provider's UUID is returned from this method. If the resource provider for the supplied uuid was not found and the resource provider record could not be created in the placement API, an exception is raised. If this method returns successfully, callers are assured that the placement API contains a record of the provider; and that the local cache of resource provider information contains a record of: - The specified provider - All providers in its tree - All sharing providers associated via aggregate with all providers in said tree and for each of those providers: - The UUIDs of its aggregates - The trait strings associated with the provider Note that if the provider did not exist prior to this call, the above reduces to just the specified provider as a root, with no aggregates or traits. :param context: The security context :param uuid: UUID identifier for the resource provider to ensure exists :param name: Optional name for the resource provider if the record does not exist. If empty, the name is set to the UUID value :param parent_provider_uuid: Optional UUID of the immediate parent, which must have been previously _ensured. :raise ResourceProviderCreationFailed: If we expected to be creating providers, but couldn't. :raise: keystoneauth1.exceptions.ClientException if placement API communication fails. """ # NOTE(efried): We currently have no code path where we need to set the # parent_provider_uuid on a previously-parent-less provider - so we do # NOT handle that scenario here. # If we already have the root provider in the cache, and it's not # stale, don't refresh it; and use the cache to determine the # descendants to (soft) refresh. # NOTE(efried): This assumes the compute service only cares about # providers it "owns". If that ever changes, we'll need a way to find # out about out-of-band changes here. Options that have been # brainstormed at this time: # - Make this condition more frequently True # - Some kind of notification subscription so a separate thread is # alerted when . # - "Cascading generations" - i.e. a change to a leaf node percolates # generation bump up the tree so that we bounce 409 the next time we # try to update anything and have to refresh. if (self._provider_tree.exists(uuid) and not self._associations_stale(uuid)): uuids_to_refresh = [ u for u in self._provider_tree.get_provider_uuids(uuid) if self._associations_stale(u)] else: # We either don't have it locally or it's stale. Pull or create it. created_rp = None rps_to_refresh = self.get_providers_in_tree(context, uuid) if not rps_to_refresh: created_rp = self._create_resource_provider( context, uuid, name or uuid, parent_provider_uuid=parent_provider_uuid) # If @safe_connect can't establish a connection to the # placement service, like if placement isn't running or # nova-compute is mis-configured for authentication, we'll get # None back and need to treat it like we couldn't create the # provider (because we couldn't). if created_rp is None: raise exception.ResourceProviderCreationFailed( name=name or uuid) # Don't add the created_rp to rps_to_refresh. Since we just # created it, it has no aggregates or traits. # But do mark it as having just been "refreshed". self._association_refresh_time[uuid] = time.time() self._provider_tree.populate_from_iterable( rps_to_refresh or [created_rp]) uuids_to_refresh = [rp['uuid'] for rp in rps_to_refresh] # At this point, the whole tree exists in the local cache. for uuid_to_refresh in uuids_to_refresh: self._refresh_associations(context, uuid_to_refresh, force=True) return uuid def _delete_provider(self, rp_uuid, global_request_id=None): resp = self.delete('/resource_providers/%s' % rp_uuid, global_request_id=global_request_id) # Check for 404 since we don't need to warn/raise if we tried to delete # something which doesn"t actually exist. if resp or resp.status_code == 404: if resp: LOG.info("Deleted resource provider %s", rp_uuid) # clean the caches self.invalidate_resource_provider(rp_uuid) return msg = ("[%(placement_req_id)s] Failed to delete resource provider " "with UUID %(uuid)s from the placement API. Got " "%(status_code)d: %(err_text)s.") args = { 'placement_req_id': get_placement_request_id(resp), 'uuid': rp_uuid, 'status_code': resp.status_code, 'err_text': resp.text } LOG.error(msg, args) # On conflict, the caller may wish to delete allocations and # redrive. (Note that this is not the same as a # PlacementAPIConflict case.) if resp.status_code == 409: raise exception.ResourceProviderInUse() raise exception.ResourceProviderDeletionFailed(uuid=rp_uuid) def _get_inventory(self, context, rp_uuid): url = '/resource_providers/%s/inventories' % rp_uuid result = self.get(url, global_request_id=context.global_id) if not result: # TODO(efried): Log. return None return result.json() def _refresh_and_get_inventory(self, context, rp_uuid): """Helper method that retrieves the current inventory for the supplied resource provider according to the placement API. If the cached generation of the resource provider is not the same as the generation returned from the placement API, we update the cached generation and attempt to update inventory if any exists, otherwise return empty inventories. """ curr = self._get_inventory(context, rp_uuid) if curr is None: return None LOG.debug('Updating ProviderTree inventory for provider %s from ' '_refresh_and_get_inventory using data: %s', rp_uuid, curr['inventories']) self._provider_tree.update_inventory( rp_uuid, curr['inventories'], generation=curr['resource_provider_generation']) return curr def _refresh_associations(self, context, rp_uuid, force=False, refresh_sharing=True): """Refresh inventories, aggregates, traits, and (optionally) aggregate- associated sharing providers for the specified resource provider uuid. Only refresh if there has been no refresh during the lifetime of this process, CONF.compute.resource_provider_association_refresh seconds have passed, or the force arg has been set to True. :param context: The security context :param rp_uuid: UUID of the resource provider to check for fresh inventories, aggregates, and traits :param force: If True, force the refresh :param refresh_sharing: If True, fetch all the providers associated by aggregate with the specified provider, including their inventories, traits, and aggregates (but not *their* sharing providers). :raise: On various placement API errors, one of: - ResourceProviderAggregateRetrievalFailed - ResourceProviderTraitRetrievalFailed - ResourceProviderRetrievalFailed :raise: keystoneauth1.exceptions.ClientException if placement API communication fails. """ if force or self._associations_stale(rp_uuid): # Refresh inventories msg = "Refreshing inventories for resource provider %s" LOG.debug(msg, rp_uuid) self._refresh_and_get_inventory(context, rp_uuid) # Refresh aggregates agg_info = self._get_provider_aggregates(context, rp_uuid) # If @safe_connect makes the above return None, this will raise # TypeError. Good. aggs, generation = agg_info.aggregates, agg_info.generation msg = ("Refreshing aggregate associations for resource provider " "%s, aggregates: %s") LOG.debug(msg, rp_uuid, ','.join(aggs or ['None'])) # NOTE(efried): This will blow up if called for a RP that doesn't # exist in our _provider_tree. self._provider_tree.update_aggregates( rp_uuid, aggs, generation=generation) # Refresh traits trait_info = self.get_provider_traits(context, rp_uuid) traits, generation = trait_info.traits, trait_info.generation msg = ("Refreshing trait associations for resource provider %s, " "traits: %s") LOG.debug(msg, rp_uuid, ','.join(traits or ['None'])) # NOTE(efried): This will blow up if called for a RP that doesn't # exist in our _provider_tree. self._provider_tree.update_traits( rp_uuid, traits, generation=generation) if refresh_sharing: # Refresh providers associated by aggregate for rp in self._get_sharing_providers(context, aggs): if not self._provider_tree.exists(rp['uuid']): # NOTE(efried): Right now sharing providers are always # treated as roots. This is deliberate. From the # context of this compute's RP, it doesn't matter if a # sharing RP is part of a tree. self._provider_tree.new_root( rp['name'], rp['uuid'], generation=rp['generation']) # Now we have to (populate or) refresh that provider's # traits, aggregates, and inventories (but not *its* # aggregate-associated providers). No need to override # force=True for newly-added providers - the missing # timestamp will always trigger them to refresh. self._refresh_associations(context, rp['uuid'], force=force, refresh_sharing=False) self._association_refresh_time[rp_uuid] = time.time() def _associations_stale(self, uuid): """Respond True if aggregates and traits have not been refreshed "recently". Associations are stale if association_refresh_time for this uuid is not set or is more than CONF.compute.resource_provider_association_refresh seconds ago. Always False if CONF.compute.resource_provider_association_refresh is zero. """ rpar = CONF.compute.resource_provider_association_refresh refresh_time = self._association_refresh_time.get(uuid, 0) # If refresh is disabled, associations are "never" stale. (But still # load them if we haven't yet done so.) if rpar == 0 and refresh_time != 0: # TODO(efried): If refresh is disabled, we could avoid touching the # _association_refresh_time dict anywhere, but that would take some # nontrivial refactoring. return False return (time.time() - refresh_time) > rpar def get_provider_tree_and_ensure_root(self, context, rp_uuid, name=None, parent_provider_uuid=None): """Returns a fresh ProviderTree representing all providers which are in the same tree or in the same aggregate as the specified provider, including their aggregates, traits, and inventories. If the specified provider does not exist, it is created with the specified UUID, name, and parent provider (which *must* already exist). :param context: The security context :param rp_uuid: UUID of the resource provider for which to populate the tree. (This doesn't need to be the UUID of the root.) :param name: Optional name for the resource provider if the record does not exist. If empty, the name is set to the UUID value :param parent_provider_uuid: Optional UUID of the immediate parent, which must have been previously _ensured. :return: A new ProviderTree object. """ # TODO(efried): We would like to have the caller handle create-and/or- # cache-if-not-already, but the resource tracker is currently # structured to handle initialization and update in a single path. At # some point this should be refactored, and this method can *just* # return a deep copy of the local _provider_tree cache. # (Re)populate the local ProviderTree self._ensure_resource_provider( context, rp_uuid, name=name, parent_provider_uuid=parent_provider_uuid) # Return a *copy* of the tree. return copy.deepcopy(self._provider_tree) def set_inventory_for_provider(self, context, rp_uuid, inv_data): """Given the UUID of a provider, set the inventory records for the provider to the supplied dict of resources. The provider must exist - this method does not attempt to create it. :param context: The security context :param rp_uuid: The UUID of the provider whose inventory is to be updated. :param inv_data: Dict, keyed by resource class name, of inventory data to set for the provider. Use None or the empty dict to remove all inventory for the provider. :raises: InventoryInUse if inv_data indicates removal of inventory in a resource class which has active allocations for this provider. :raises: InvalidResourceClass if inv_data contains a resource class which cannot be created. :raises: ResourceProviderUpdateConflict if the provider's generation doesn't match the generation in the cache. Callers may choose to retrieve the provider and its associations afresh and redrive this operation. :raises: ResourceProviderUpdateFailed on any other placement API failure. """ # NOTE(efried): This is here because _ensure_resource_class already has # @safe_connect, so we don't want to decorate this whole method with it @safe_connect def do_put(url, payload): # NOTE(vdrok): in microversion 1.26 it is allowed to have inventory # records with reserved value equal to total return self.put( url, payload, global_request_id=context.global_id, version=ALLOW_RESERVED_EQUAL_TOTAL_INVENTORY_VERSION) # If not different from what we've got, short out if not self._provider_tree.has_inventory_changed(rp_uuid, inv_data): LOG.debug('Inventory has not changed for provider %s based ' 'on inventory data: %s', rp_uuid, inv_data) return # Ensure non-standard resource classes exist, creating them if needed. self._ensure_resource_classes(context, set(inv_data)) url = '/resource_providers/%s/inventories' % rp_uuid inv_data = inv_data or {} generation = self._provider_tree.data(rp_uuid).generation payload = { 'resource_provider_generation': generation, 'inventories': inv_data, } resp = do_put(url, payload) if resp.status_code == 200: LOG.debug('Updated inventory for provider %s with generation %s ' 'in Placement from set_inventory_for_provider using ' 'data: %s', rp_uuid, generation, inv_data) json = resp.json() self._provider_tree.update_inventory( rp_uuid, json['inventories'], generation=json['resource_provider_generation']) return # Some error occurred; log it msg = ("[%(placement_req_id)s] Failed to update inventory to " "[%(inv_data)s] for resource provider with UUID %(uuid)s. Got " "%(status_code)d: %(err_text)s") args = { 'placement_req_id': get_placement_request_id(resp), 'uuid': rp_uuid, 'inv_data': str(inv_data), 'status_code': resp.status_code, 'err_text': resp.text, } LOG.error(msg, args) if resp.status_code == 409: # If a conflict attempting to remove inventory in a resource class # with active allocations, raise InventoryInUse err = resp.json()['errors'][0] # TODO(efried): If there's ever a lib exporting symbols for error # codes, use it. if err['code'] == 'placement.inventory.inuse': # The error detail includes the resource class and provider. raise exception.InventoryInUse(err['detail']) # Other conflicts are generation mismatch: raise conflict exception raise exception.ResourceProviderUpdateConflict( uuid=rp_uuid, generation=generation, error=resp.text) # Otherwise, raise generic exception raise exception.ResourceProviderUpdateFailed(url=url, error=resp.text) @safe_connect def _ensure_traits(self, context, traits): """Make sure all specified traits exist in the placement service. :param context: The security context :param traits: Iterable of trait strings to ensure exist. :raises: TraitCreationFailed if traits contains a trait that did not exist in placement, and couldn't be created. When this exception is raised, it is possible that *some* of the requested traits were created. :raises: TraitRetrievalFailed if the initial query of existing traits was unsuccessful. In this scenario, it is guaranteed that no traits were created. """ if not traits: return # Query for all the requested traits. Whichever ones we *don't* get # back, we need to create. # NOTE(efried): We don't attempt to filter based on our local idea of # standard traits, which may not be in sync with what the placement # service knows. If the caller tries to ensure a nonexistent # "standard" trait, they deserve the TraitCreationFailed exception # they'll get. resp = self.get('/traits?name=in:' + ','.join(traits), version='1.6', global_request_id=context.global_id) if resp.status_code == 200: traits_to_create = set(traits) - set(resp.json()['traits']) # Might be neat to have a batch create. But creating multiple # traits will generally happen once, at initial startup, if at all. for trait in traits_to_create: resp = self.put('/traits/' + trait, None, version='1.6', global_request_id=context.global_id) if not resp: raise exception.TraitCreationFailed(name=trait, error=resp.text) return # The initial GET failed msg = ("[%(placement_req_id)s] Failed to retrieve the list of traits. " "Got %(status_code)d: %(err_text)s") args = { 'placement_req_id': get_placement_request_id(resp), 'status_code': resp.status_code, 'err_text': resp.text, } LOG.error(msg, args) raise exception.TraitRetrievalFailed(error=resp.text) @safe_connect def set_traits_for_provider( self, context: nova_context.RequestContext, rp_uuid: str, traits: ty.Iterable[str], generation: ty.Optional[int] = None ): """Replace a provider's traits with those specified. The provider must exist - this method does not attempt to create it. :param context: The security context :param rp_uuid: The UUID of the provider whose traits are to be updated :param traits: Iterable of traits to set on the provider :param generation: The resource provider generation if known. If not provided then the value from the provider tree cache will be used. :raises: ResourceProviderUpdateConflict if the provider's generation doesn't match the generation in the cache. Callers may choose to retrieve the provider and its associations afresh and redrive this operation. :raises: ResourceProviderUpdateFailed on any other placement API failure. :raises: TraitCreationFailed if traits contains a trait that did not exist in placement, and couldn't be created. :raises: TraitRetrievalFailed if the initial query of existing traits was unsuccessful. """ # If not different from what we've got, short out if not self._provider_tree.have_traits_changed(rp_uuid, traits): return self._ensure_traits(context, traits) url = '/resource_providers/%s/traits' % rp_uuid # NOTE(efried): Don't use the DELETE API when traits is empty, because # that method doesn't return content, and we need to update the cached # provider tree with the new generation. traits = list(traits) if traits else [] if generation is None: generation = self._provider_tree.data(rp_uuid).generation payload = { 'resource_provider_generation': generation, 'traits': traits, } resp = self.put(url, payload, version='1.6', global_request_id=context.global_id) if resp.status_code == 200: json = resp.json() self._provider_tree.update_traits( rp_uuid, json['traits'], generation=json['resource_provider_generation']) return # Some error occurred; log it msg = ("[%(placement_req_id)s] Failed to update traits to " "[%(traits)s] for resource provider with UUID %(uuid)s. Got " "%(status_code)d: %(err_text)s") args = { 'placement_req_id': get_placement_request_id(resp), 'uuid': rp_uuid, 'traits': ','.join(traits), 'status_code': resp.status_code, 'err_text': resp.text, } LOG.error(msg, args) # If a conflict, raise special conflict exception if resp.status_code == 409: raise exception.ResourceProviderUpdateConflict( uuid=rp_uuid, generation=generation, error=resp.text) # Otherwise, raise generic exception raise exception.ResourceProviderUpdateFailed(url=url, error=resp.text) @safe_connect def set_aggregates_for_provider(self, context, rp_uuid, aggregates, use_cache=True, generation=None): """Replace a provider's aggregates with those specified. The provider must exist - this method does not attempt to create it. :param context: The security context :param rp_uuid: The UUID of the provider whose aggregates are to be updated. :param aggregates: Iterable of aggregates to set on the provider. :param use_cache: If False, indicates not to update the cache of resource providers. :param generation: Resource provider generation. Required if use_cache is False. :raises: ResourceProviderUpdateConflict if the provider's generation doesn't match the generation in the cache. Callers may choose to retrieve the provider and its associations afresh and redrive this operation. :raises: ResourceProviderUpdateFailed on any other placement API failure. """ # If a generation is specified, it trumps whatever's in the cache. # Otherwise... if generation is None: if use_cache: generation = self._provider_tree.data(rp_uuid).generation else: # Either cache or generation is required raise ValueError( _("generation is required with use_cache=False")) # Check whether aggregates need updating. We can only do this if we # have a cache entry with a matching generation. try: if (self._provider_tree.data(rp_uuid).generation == generation and not self._provider_tree.have_aggregates_changed( rp_uuid, aggregates)): return except ValueError: # Not found in the cache; proceed pass url = '/resource_providers/%s/aggregates' % rp_uuid aggregates = list(aggregates) if aggregates else [] payload = {'aggregates': aggregates, 'resource_provider_generation': generation} resp = self.put(url, payload, version=AGGREGATE_GENERATION_VERSION, global_request_id=context.global_id) if resp.status_code == 200: # Try to update the cache regardless. If use_cache=False, ignore # any failures. try: data = resp.json() self._provider_tree.update_aggregates( rp_uuid, data['aggregates'], generation=data['resource_provider_generation']) except ValueError: if use_cache: # The entry should've been there raise return # Some error occurred; log it msg = ("[%(placement_req_id)s] Failed to update aggregates to " "[%(aggs)s] for resource provider with UUID %(uuid)s. Got " "%(status_code)d: %(err_text)s") args = { 'placement_req_id': get_placement_request_id(resp), 'uuid': rp_uuid, 'aggs': ','.join(aggregates), 'status_code': resp.status_code, 'err_text': resp.text, } # If a conflict, invalidate the cache and raise special exception if resp.status_code == 409: # No reason to condition cache invalidation on use_cache - if we # got a 409, the cache entry is still bogus if it exists; and the # below is a no-op if it doesn't. try: self._provider_tree.remove(rp_uuid) except ValueError: pass self._association_refresh_time.pop(rp_uuid, None) LOG.warning(msg, args) raise exception.ResourceProviderUpdateConflict( uuid=rp_uuid, generation=generation, error=resp.text) # Otherwise, raise generic exception LOG.error(msg, args) raise exception.ResourceProviderUpdateFailed(url=url, error=resp.text) @safe_connect def _ensure_resource_classes(self, context, names): """Make sure resource classes exist. :param context: The security context :param names: Iterable of string names of the resource classes to check/create. Must not be None. :raises: exception.InvalidResourceClass if an attempt is made to create an invalid resource class. """ # Placement API version that supports PUT /resource_classes/CUSTOM_* # to create (or validate the existence of) a consumer-specified # resource class. version = '1.7' to_ensure = set(n for n in names if n.startswith(orc.CUSTOM_NAMESPACE)) for name in to_ensure: # no payload on the put request resp = self.put( "/resource_classes/%s" % name, None, version=version, global_request_id=context.global_id) if not resp: msg = ("Failed to ensure resource class record with placement " "API for resource class %(rc_name)s. Got " "%(status_code)d: %(err_text)s.") args = { 'rc_name': name, 'status_code': resp.status_code, 'err_text': resp.text, } LOG.error(msg, args) raise exception.InvalidResourceClass(resource_class=name) def _reshape(self, context, inventories, allocations): """Perform atomic inventory & allocation data migration. :param context: The security context :param inventories: A dict, keyed by resource provider UUID, of: { "inventories": { inventory dicts, keyed by resource class }, "resource_provider_generation": $RP_GEN } :param allocations: A dict, keyed by consumer UUID, of: { "project_id": $PROJ_ID, "user_id": $USER_ID, "consumer_generation": $CONSUMER_GEN, "allocations": { $RP_UUID: { "resources": { $RC: $AMOUNT, ... } }, ... } } :return: The Response object representing a successful API call. :raises: ReshapeFailed if the POST /reshaper request fails. :raises: keystoneauth1.exceptions.ClientException if placement API communication fails. """ # We have to make sure any new resource classes exist for invs in inventories.values(): self._ensure_resource_classes(context, list(invs['inventories'])) payload = {"inventories": inventories, "allocations": allocations} resp = self.post('/reshaper', payload, version=RESHAPER_VERSION, global_request_id=context.global_id) if not resp: if resp.status_code == 409: err = resp.json()['errors'][0] if err['code'] == 'placement.concurrent_update': raise exception.PlacementReshapeConflict(error=resp.text) raise exception.ReshapeFailed(error=resp.text) return resp def _set_up_and_do_reshape(self, context, old_tree, new_tree, allocations): LOG.info("Performing resource provider inventory and allocation " "data migration.") new_uuids = new_tree.get_provider_uuids() inventories = {} for rp_uuid in new_uuids: data = new_tree.data(rp_uuid) inventories[rp_uuid] = { "inventories": data.inventory, "resource_provider_generation": data.generation } # Even though we're going to delete them immediately, we still want # to send "inventory changes" for to-be-removed providers in this # reshape request so they're done atomically. This prevents races # where the scheduler could allocate between here and when we # delete the providers. to_remove = set(old_tree.get_provider_uuids()) - set(new_uuids) for rp_uuid in to_remove: inventories[rp_uuid] = { "inventories": {}, "resource_provider_generation": old_tree.data(rp_uuid).generation } # Now we're ready to POST /reshaper. This can raise ReshapeFailed, # but we also need to convert any other exception (including e.g. # PlacementAPIConnectFailure) to ReshapeFailed because we want any # failure here to be fatal to the caller. try: self._reshape(context, inventories, allocations) except (exception.ReshapeFailed, exception.PlacementReshapeConflict): raise except Exception as e: # Make sure the original stack trace gets logged. LOG.exception('Reshape failed') raise exception.ReshapeFailed(error=e) def update_from_provider_tree(self, context, new_tree, allocations=None): """Flush changes from a specified ProviderTree back to placement. The specified ProviderTree is compared against the local cache. Any changes are flushed back to the placement service. Upon successful completion, the local cache should reflect the specified ProviderTree. This method is best-effort and not atomic. When exceptions are raised, it is possible that some of the changes have been flushed back, leaving the placement database in an inconsistent state. This should be recoverable through subsequent calls. :param context: The security context :param new_tree: A ProviderTree instance representing the desired state of providers in placement. :param allocations: A dict, keyed by consumer UUID, of allocation records of the form returned by GET /allocations/{consumer_uuid} representing the comprehensive final picture of the allocations for each consumer therein. A value of None indicates that no reshape is being performed. :raises: ResourceProviderUpdateConflict if a generation conflict was encountered - i.e. we are attempting to update placement based on a stale view of it. :raises: ResourceProviderSyncFailed if any errors were encountered attempting to perform the necessary API operations, except reshape (see below). :raises: ReshapeFailed if a reshape was signaled (allocations not None) and it fails for any reason. :raises: keystoneauth1.exceptions.base.ClientException on failure to communicate with the placement API """ # NOTE(efried): We currently do not handle the "rename" case. This is # where new_tree contains a provider named Y whose UUID already exists # but is named X. @contextlib.contextmanager def catch_all(rp_uuid): """Convert all "expected" exceptions from placement API helpers to ResourceProviderSyncFailed* and invalidate the caches for the tree around `rp_uuid`. * Except ResourceProviderUpdateConflict, which signals the caller to redrive the operation; and ReshapeFailed, which triggers special error handling behavior in the resource tracker and compute manager. """ # TODO(efried): Make a base exception class from which all these # can inherit. helper_exceptions = ( exception.InvalidResourceClass, exception.ResourceProviderAggregateRetrievalFailed, exception.ResourceProviderDeletionFailed, exception.ResourceProviderInUse, exception.ResourceProviderRetrievalFailed, exception.ResourceProviderTraitRetrievalFailed, exception.ResourceProviderUpdateFailed, exception.TraitCreationFailed, exception.TraitRetrievalFailed, # NOTE(efried): We do not trap/convert ReshapeFailed - that one # needs to bubble up right away and be handled specially. ) try: yield except exception.ResourceProviderUpdateConflict: # Invalidate the tree around the failing provider and reraise # the conflict exception. This signals the resource tracker to # redrive the update right away rather than waiting until the # next periodic. self._clear_provider_cache_for_tree(rp_uuid) raise except helper_exceptions: # Invalidate the relevant part of the cache. It gets rebuilt on # the next pass. self._clear_provider_cache_for_tree(rp_uuid) raise exception.ResourceProviderSyncFailed() # Helper methods herein will be updating the local cache (this is # intentional) so we need to grab up front any data we need to operate # on in its "original" form. old_tree = self._provider_tree old_uuids = old_tree.get_provider_uuids() new_uuids = new_tree.get_provider_uuids() uuids_to_add = set(new_uuids) - set(old_uuids) uuids_to_remove = set(old_uuids) - set(new_uuids) # In case a reshape is happening, we first have to create (or load) any # "new" providers. # We have to do additions in top-down order, so we don't error # attempting to create a child before its parent exists. for uuid in new_uuids: if uuid not in uuids_to_add: continue provider = new_tree.data(uuid) with catch_all(uuid): self._ensure_resource_provider( context, uuid, name=provider.name, parent_provider_uuid=provider.parent_uuid) # We have to stuff the freshly-created provider's generation # into the new_tree so we don't get conflicts updating its # inventories etc. later. # TODO(efried): We don't have a good way to set the generation # independently; this is a hack. new_tree.update_inventory( uuid, new_tree.data(uuid).inventory, generation=self._provider_tree.data(uuid).generation) # If we need to reshape, do it here. if allocations is not None: # NOTE(efried): We do not catch_all here, because ReshapeFailed # needs to bubble up right away and be handled specially. try: self._set_up_and_do_reshape( context, old_tree, new_tree, allocations) except exception.PlacementReshapeConflict: # The conflict means we need to invalidate the local caches and # let the retry mechanism in _update_to_placement to re-drive # the reshape top of the fresh data with excutils.save_and_reraise_exception(): self.clear_provider_cache() # The reshape updated provider generations, so the ones we have in # the cache are now stale. The inventory update below will short # out, but we would still bounce with a provider generation # conflict on the trait and aggregate updates. for uuid in new_uuids: # TODO(efried): GET /resource_providers?uuid=in:[list] would be # handy here. Meanwhile, this is an already-written, if not # obvious, way to refresh provider generations in the cache. with catch_all(uuid): self._refresh_and_get_inventory(context, uuid) # Now we can do provider deletions, because we should have moved any # allocations off of them via reshape. # We have to do deletions in bottom-up order, so we don't error # attempting to delete a parent who still has children. (We get the # UUIDs in bottom-up order by reversing old_uuids, which was given to # us in top-down order per ProviderTree.get_provider_uuids().) for uuid in reversed(old_uuids): if uuid not in uuids_to_remove: continue with catch_all(uuid): self._delete_provider(uuid) # At this point the local cache should have all the same providers as # new_tree. Whether we added them or not, walk through and diff/flush # inventories, traits, and aggregates as necessary. Note that, if we # reshaped above, any inventory changes have already been done. But the # helper methods are set up to check and short out when the relevant # property does not differ from what's in the cache. # If we encounter any error and remove a provider from the cache, all # its descendants are also removed, and set_*_for_provider methods on # it wouldn't be able to get started. Walking the tree in bottom-up # order ensures we at least try to process all of the providers. (We # get the UUIDs in bottom-up order by reversing new_uuids, which was # given to us in top-down order per ProviderTree.get_provider_uuids().) for uuid in reversed(new_uuids): pd = new_tree.data(uuid) with catch_all(pd.uuid): self.set_inventory_for_provider( context, pd.uuid, pd.inventory) self.set_aggregates_for_provider( context, pd.uuid, pd.aggregates) self.set_traits_for_provider(context, pd.uuid, pd.traits) # TODO(efried): Cut users of this method over to get_allocs_for_consumer def get_allocations_for_consumer(self, context, consumer): """Legacy method for allocation retrieval. Callers should move to using get_allocs_for_consumer, which handles errors properly and returns the entire payload. :param context: The nova.context.RequestContext auth context :param consumer: UUID of the consumer resource :returns: A dict of the form: { $RP_UUID: { "generation": $RP_GEN, "resources": { $RESOURCE_CLASS: $AMOUNT ... }, }, ... } """ try: return self.get_allocs_for_consumer( context, consumer)['allocations'] except ks_exc.ClientException as e: LOG.warning("Failed to get allocations for consumer %(consumer)s: " "%(error)s", {'consumer': consumer, 'error': e}) # Because this is what @safe_connect did return None except exception.ConsumerAllocationRetrievalFailed as e: LOG.warning(e) # Because this is how we used to treat non-200 return {} def get_allocs_for_consumer(self, context, consumer): """Makes a GET /allocations/{consumer} call to Placement. :param context: The nova.context.RequestContext auth context :param consumer: UUID of the consumer resource :return: Dict of the form: { "allocations": { $RP_UUID: { "generation": $RP_GEN, "resources": { $RESOURCE_CLASS: $AMOUNT ... }, }, ... }, "consumer_generation": $CONSUMER_GEN, "project_id": $PROJ_ID, "user_id": $USER_ID, } :raises: keystoneauth1.exceptions.base.ClientException on failure to communicate with the placement API :raises: ConsumerAllocationRetrievalFailed if the placement API call fails """ resp = self.get('/allocations/%s' % consumer, version=CONSUMER_GENERATION_VERSION, global_request_id=context.global_id) if not resp: # TODO(efried): Use code/title/detail to make a better exception raise exception.ConsumerAllocationRetrievalFailed( consumer_uuid=consumer, error=resp.text) return resp.json() # NOTE(jaypipes): Currently, this method is ONLY used in three places: # 1. By the scheduler to allocate resources on the selected destination # hosts. # 2. By the conductor LiveMigrationTask to allocate resources on a forced # destination host. In this case, the source node allocations have # already been moved to the migration record so the instance should not # have allocations and _move_operation_alloc_request will not be called. # 3. By the conductor ComputeTaskManager to allocate resources on a forced # destination host during evacuate. This case will call the # _move_operation_alloc_request method. # This method should not be called by the resource tracker. @safe_connect @retries def claim_resources(self, context, consumer_uuid, alloc_request, project_id, user_id, allocation_request_version, consumer_generation=None): """Creates allocation records for the supplied instance UUID against the supplied resource providers. We check to see if resources have already been claimed for this consumer. If so, we assume that a move operation is underway and the scheduler is attempting to claim resources against the new (destination host). In order to prevent compute nodes currently performing move operations from being scheduled to improperly, we create a "doubled-up" allocation that consumes resources on *both* the source and the destination host during the move operation. :param context: The security context :param consumer_uuid: The instance's UUID. :param alloc_request: The JSON body of the request to make to the placement's PUT /allocations API :param project_id: The project_id associated with the allocations. :param user_id: The user_id associated with the allocations. :param allocation_request_version: The microversion used to request the allocations. :param consumer_generation: The expected generation of the consumer. None if a new consumer is expected :returns: True if the allocations were created, False otherwise. :raise AllocationUpdateFailed: If consumer_generation in the alloc_request does not match with the placement view. """ # Ensure we don't change the supplied alloc request since it's used in # a loop within the scheduler against multiple instance claims ar = copy.deepcopy(alloc_request) url = '/allocations/%s' % consumer_uuid payload = ar # We first need to determine if this is a move operation and if so # create the "doubled-up" allocation that exists for the duration of # the move operation against both the source and destination hosts r = self.get(url, global_request_id=context.global_id, version=CONSUMER_GENERATION_VERSION) if r.status_code == 200: body = r.json() current_allocs = body['allocations'] if current_allocs: if 'consumer_generation' not in ar: # this is non-forced evacuation. Evacuation does not use # the migration.uuid to hold the source host allocation # therefore when the scheduler calls claim_resources() then # the two allocations need to be combined. Scheduler does # not know that this is not a new consumer as it only sees # allocation candidates. # Therefore we need to use the consumer generation from # the above GET. # If between the GET and the PUT the consumer generation # changes in placement then we raise # AllocationUpdateFailed. # NOTE(gibi): This only detect a small portion of possible # cases when allocation is modified outside of the this # code path. The rest can only be detected if nova would # cache at least the consumer generation of the instance. consumer_generation = body['consumer_generation'] else: # this is forced evacuation and the caller # claim_resources_on_destination() provides the consumer # generation it sees in the conductor when it generates the # request. consumer_generation = ar['consumer_generation'] payload = _move_operation_alloc_request(current_allocs, ar) payload['project_id'] = project_id payload['user_id'] = user_id if (versionutils.convert_version_to_tuple( allocation_request_version) >= versionutils.convert_version_to_tuple( CONSUMER_GENERATION_VERSION)): payload['consumer_generation'] = consumer_generation r = self._put_allocations( context, consumer_uuid, payload, version=allocation_request_version) if r.status_code != 204: err = r.json()['errors'][0] if err['code'] == 'placement.concurrent_update': # NOTE(jaypipes): Yes, it sucks doing string comparison like # this but we have no error codes, only error messages. # TODO(gibi): Use more granular error codes when available if 'consumer generation conflict' in err['detail']: reason = ('another process changed the consumer %s after ' 'the report client read the consumer state ' 'during the claim ' % consumer_uuid) raise exception.AllocationUpdateFailed( consumer_uuid=consumer_uuid, error=reason) # this is not a consumer generation conflict so it can only be # a resource provider generation conflict. The caller does not # provide resource provider generation so this is just a # placement internal race. We can blindly retry locally. reason = ('another process changed the resource providers ' 'involved in our attempt to put allocations for ' 'consumer %s' % consumer_uuid) raise Retry('claim_resources', reason) return r.status_code == 204 def add_resources_to_instance_allocation( self, context: nova_context.RequestContext, consumer_uuid: str, resources: ty.Dict[str, ty.Dict[str, ty.Dict[str, int]]], ) -> None: """Adds certain resources to the current allocation of the consumer. :param context: the request context :param consumer_uuid: the uuid of the consumer to update :param resources: a dict of resources in the format of allocation request. E.g.: { : { 'resources': { : amount, : amount } } : { 'resources': { : amount } } } :raises AllocationUpdateFailed: if there was multiple generation conflict and we run out of retires. :raises ConsumerAllocationRetrievalFailed: If the current allocation cannot be read from placement. :raises: keystoneauth1.exceptions.base.ClientException on failure to communicate with the placement API """ if not resources: # nothing to do return # This either raises on error, or returns fails if we run out of # retries due to conflict. Convert that return value to an exception # too. if not self._add_resources_to_instance_allocation( context, consumer_uuid, resources): error_reason = _( "Cannot add resources %s to the allocation due to multiple " "successive generation conflicts in placement.") raise exception.AllocationUpdateFailed( consumer_uuid=consumer_uuid, error=error_reason % resources) @retries def _add_resources_to_instance_allocation( self, context: nova_context.RequestContext, consumer_uuid: str, resources: ty.Dict[str, ty.Dict[str, ty.Dict[str, int]]], ) -> bool: current_allocs = self.get_allocs_for_consumer(context, consumer_uuid) for rp_uuid in resources: if rp_uuid not in current_allocs['allocations']: current_allocs['allocations'][rp_uuid] = {'resources': {}} alloc_on_rp = current_allocs['allocations'][rp_uuid]['resources'] for rc, amount in resources[rp_uuid]['resources'].items(): if rc in alloc_on_rp: alloc_on_rp[rc] += amount else: alloc_on_rp[rc] = amount r = self._put_allocations(context, consumer_uuid, current_allocs) if r.status_code != 204: err = r.json()['errors'][0] if err['code'] == 'placement.concurrent_update': reason = ( "another process changed the resource providers or the " "consumer involved in our attempt to update allocations " "for consumer %s so we cannot add resources %s to the " "current allocation %s" % (consumer_uuid, resources, current_allocs)) raise Retry( '_add_resources_to_instance_allocation', reason) raise exception.AllocationUpdateFailed( consumer_uuid=consumer_uuid, error=err['detail']) return True def remove_resources_from_instance_allocation( self, context: nova_context.RequestContext, consumer_uuid: str, resources: ty.Dict[str, ty.Dict[str, ty.Dict[str, int]]] ) -> None: """Removes certain resources from the current allocation of the consumer. :param context: the request context :param consumer_uuid: the uuid of the consumer to update :param resources: a dict of resources in allocation request format E.g.: { : { 'resources': { : amount, : amount } } : { 'resources': { : amount } } } :raises AllocationUpdateFailed: if the requested resource cannot be removed from the current allocation (e.g. rp is missing from the allocation) or there was multiple generation conflict and we run out of retires. :raises ConsumerAllocationRetrievalFailed: If the current allocation cannot be read from placement. :raises: keystoneauth1.exceptions.base.ClientException on failure to communicate with the placement API """ # NOTE(gibi): It is just a small wrapper to raise instead of return # if we run out of retries. if not self._remove_resources_from_instance_allocation( context, consumer_uuid, resources): error_reason = _("Cannot remove resources %s from the allocation " "due to multiple successive generation conflicts " "in placement. To clean up the leaked resource " "allocation you can use nova-manage placement " "audit.") raise exception.AllocationUpdateFailed( consumer_uuid=consumer_uuid, error=error_reason % resources) @retries def _remove_resources_from_instance_allocation( self, context: nova_context.RequestContext, consumer_uuid: str, resources: ty.Dict[str, ty.Dict[str, ty.Dict[str, int]]] ) -> bool: if not resources: # Nothing to remove so do not query or update allocation in # placement. # The True value is only here because the retry decorator returns # False when runs out of retries. It would be nicer to raise in # that case too. return True current_allocs = self.get_allocs_for_consumer(context, consumer_uuid) if not current_allocs['allocations']: error_reason = _("Cannot remove resources %(resources)s from " "allocation %(allocations)s. The allocation is " "empty.") raise exception.AllocationUpdateFailed( consumer_uuid=consumer_uuid, error=error_reason % {'resources': resources, 'allocations': current_allocs}) try: for rp_uuid, resources_to_remove in resources.items(): allocation_on_rp = current_allocs['allocations'][rp_uuid] for rc, value in resources_to_remove['resources'].items(): allocation_on_rp['resources'][rc] -= value if allocation_on_rp['resources'][rc] < 0: error_reason = _( "Cannot remove resources %(resources)s from " "allocation %(allocations)s. There are not enough " "allocated resources left on %(rp_uuid)s resource " "provider to remove %(amount)d amount of " "%(resource_class)s resources.") raise exception.AllocationUpdateFailed( consumer_uuid=consumer_uuid, error=error_reason % {'resources': resources, 'allocations': current_allocs, 'rp_uuid': rp_uuid, 'amount': value, 'resource_class': rc}) if allocation_on_rp['resources'][rc] == 0: # if no allocation left for this rc then remove it # from the allocation del allocation_on_rp['resources'][rc] except KeyError as e: error_reason = _("Cannot remove resources %(resources)s from " "allocation %(allocations)s. Key %(missing_key)s " "is missing from the allocation.") # rp_uuid is missing from the allocation or resource class is # missing from the allocation raise exception.AllocationUpdateFailed( consumer_uuid=consumer_uuid, error=error_reason % {'resources': resources, 'allocations': current_allocs, 'missing_key': e}) # we have to remove the rps from the allocation that has no resources # any more current_allocs['allocations'] = { rp_uuid: alloc for rp_uuid, alloc in current_allocs['allocations'].items() if alloc['resources']} r = self._put_allocations( context, consumer_uuid, current_allocs) if r.status_code != 204: err = r.json()['errors'][0] if err['code'] == 'placement.concurrent_update': reason = ('another process changed the resource providers or ' 'the consumer involved in our attempt to update ' 'allocations for consumer %s so we cannot remove ' 'resources %s from the current allocation %s' % (consumer_uuid, resources, current_allocs)) # NOTE(gibi): automatic retry is meaningful if we can still # remove the resources from the updated allocations. Retry # works here as this function (re)queries the allocations. raise Retry( 'remove_resources_from_instance_allocation', reason) # It is only here because the retry decorator returns False when runs # out of retries. It would be nicer to raise in that case too. return True def remove_provider_tree_from_instance_allocation(self, context, consumer_uuid, root_rp_uuid): """Removes every allocation from the consumer that is on the specified provider tree. Note that this function does not try to remove allocations from sharing providers. :param context: The security context :param consumer_uuid: The UUID of the consumer to manipulate :param root_rp_uuid: The root of the provider tree :raises: keystoneauth1.exceptions.base.ClientException on failure to communicate with the placement API :raises: ConsumerAllocationRetrievalFailed if this call cannot read the current state of the allocations from placement :raises: ResourceProviderRetrievalFailed if it cannot collect the RPs in the tree specified by root_rp_uuid. """ current_allocs = self.get_allocs_for_consumer(context, consumer_uuid) if not current_allocs['allocations']: LOG.error("Expected to find current allocations for %s, but " "found none.", consumer_uuid) # TODO(gibi): do not return False as none of the callers # do anything with the return value except log return False rps = self.get_providers_in_tree(context, root_rp_uuid) rp_uuids = [rp['uuid'] for rp in rps] # go through the current allocations and remove every RP from it that # belongs to the RP tree identified by the root_rp_uuid parameter has_changes = False for rp_uuid in rp_uuids: changed = bool( current_allocs['allocations'].pop(rp_uuid, None)) has_changes = has_changes or changed # If nothing changed then don't do anything if not has_changes: LOG.warning( "Expected to find allocations referencing resource " "provider tree rooted at %s for %s, but found none.", root_rp_uuid, consumer_uuid) # TODO(gibi): do not return a value as none of the callers # do anything with the return value except logging return True r = self._put_allocations(context, consumer_uuid, current_allocs) # TODO(gibi): do not return a value as none of the callers # do anything with the return value except logging return r.status_code == 204 def _put_allocations( self, context, consumer_uuid, payload, version=CONSUMER_GENERATION_VERSION): url = '/allocations/%s' % consumer_uuid r = self.put(url, payload, version=version, global_request_id=context.global_id) if r.status_code != 204: LOG.warning("Failed to save allocation for %s. Got HTTP %s: %s", consumer_uuid, r.status_code, r.text) return r @safe_connect @retries def move_allocations(self, context, source_consumer_uuid, target_consumer_uuid): """Move allocations from one consumer to the other Note that this call moves the current allocation from the source consumer to the target consumer. If parallel update happens on either consumer during this call then Placement will detect that and this code will raise AllocationMoveFailed. If you want to move a known piece of allocation from source to target then this function might not be what you want as it always moves what source has in Placement. If the target consumer has allocations but the source consumer does not, this method assumes the allocations were already moved and returns True. :param context: The security context :param source_consumer_uuid: the UUID of the consumer from which allocations are moving :param target_consumer_uuid: the UUID of the target consumer for the allocations :returns: True if the move was successful (or already done), False otherwise. :raises AllocationMoveFailed: If the source or the target consumer has been modified while this call tries to move allocations. """ source_alloc = self.get_allocs_for_consumer( context, source_consumer_uuid) target_alloc = self.get_allocs_for_consumer( context, target_consumer_uuid) if target_alloc and target_alloc['allocations']: # Check to see if the source allocations still exist because if # they don't they might have already been moved to the target. if not (source_alloc and source_alloc['allocations']): LOG.info('Allocations not found for consumer %s; assuming ' 'they were already moved to consumer %s', source_consumer_uuid, target_consumer_uuid) return True LOG.debug('Overwriting current allocation %(allocation)s on ' 'consumer %(consumer)s', {'allocation': target_alloc, 'consumer': target_consumer_uuid}) new_allocs = { source_consumer_uuid: { # 'allocations': {} means we are removing the allocation from # the source consumer 'allocations': {}, 'project_id': source_alloc['project_id'], 'user_id': source_alloc['user_id'], 'consumer_generation': source_alloc['consumer_generation']}, target_consumer_uuid: { 'allocations': source_alloc['allocations'], # NOTE(gibi): Is there any case when we need to keep the # project_id and user_id of the target allocation that we are # about to overwrite? 'project_id': source_alloc['project_id'], 'user_id': source_alloc['user_id'], 'consumer_generation': target_alloc.get('consumer_generation') } } r = self.post('/allocations', new_allocs, version=CONSUMER_GENERATION_VERSION, global_request_id=context.global_id) if r.status_code != 204: err = r.json()['errors'][0] if err['code'] == 'placement.concurrent_update': # NOTE(jaypipes): Yes, it sucks doing string comparison like # this but we have no error codes, only error messages. # TODO(gibi): Use more granular error codes when available if 'consumer generation conflict' in err['detail']: raise exception.AllocationMoveFailed( source_consumer=source_consumer_uuid, target_consumer=target_consumer_uuid, error=r.text) reason = ('another process changed the resource providers ' 'involved in our attempt to post allocations for ' 'consumer %s' % target_consumer_uuid) raise Retry('move_allocations', reason) else: LOG.warning( 'Unable to post allocations for consumer ' '%(uuid)s (%(code)i %(text)s)', {'uuid': target_consumer_uuid, 'code': r.status_code, 'text': r.text}) return r.status_code == 204 @retries def put_allocations(self, context, consumer_uuid, payload): """Creates allocation records for the supplied consumer UUID based on the provided allocation dict :param context: The security context :param consumer_uuid: The instance's UUID. :param payload: Dict in the format expected by the placement PUT /allocations/{consumer_uuid} API :returns: True if the allocations were created, False otherwise. :raises: Retry if the operation should be retried due to a concurrent resource provider update. :raises: AllocationUpdateFailed if placement returns a consumer generation conflict :raises: PlacementAPIConnectFailure on failure to communicate with the placement API """ try: r = self._put_allocations(context, consumer_uuid, payload) except ks_exc.ClientException: raise exception.PlacementAPIConnectFailure() if r.status_code != 204: err = r.json()['errors'][0] # NOTE(jaypipes): Yes, it sucks doing string comparison like this # but we have no error codes, only error messages. # TODO(gibi): Use more granular error codes when available if err['code'] == 'placement.concurrent_update': if 'consumer generation conflict' in err['detail']: raise exception.AllocationUpdateFailed( consumer_uuid=consumer_uuid, error=err['detail']) # this is not a consumer generation conflict so it can only be # a resource provider generation conflict. The caller does not # provide resource provider generation so this is just a # placement internal race. We can blindly retry locally. reason = ('another process changed the resource providers ' 'involved in our attempt to put allocations for ' 'consumer %s' % consumer_uuid) raise Retry('put_allocations', reason) return r.status_code == 204 @safe_connect def delete_allocation_for_instance( self, context, uuid, consumer_type='instance', force=False ): """Delete the instance allocation from placement :param context: The security context :param uuid: the instance or migration UUID which will be used as the consumer UUID towards placement :param consumer_type: The type of the consumer specified by uuid. 'instance' or 'migration' (Default: instance) :param force: True if the allocations should be deleted without regard to consumer generation conflicts, False will attempt to GET and PUT empty allocations and use the consumer generation which could result in a conflict and need to retry the operation. :return: Returns True if the allocation is successfully deleted by this call. Returns False if the allocation does not exist. :raises AllocationDeleteFailed: If the allocation cannot be read from placement (if force=False), is changed by another process while we tried to delete it (if force=False), or if some other server side error occurred (if force=True) """ url = '/allocations/%s' % uuid if force: # Do not bother with consumer generations, just delete the # allocations. r = self.delete(url, global_request_id=context.global_id) if r: LOG.info('Deleted allocations for %(consumer_type)s %(uuid)s', {'consumer_type': consumer_type, 'uuid': uuid}) return True # Check for 404 since we don't need to log a warning if we # tried to delete something which doesn't actually exist. if r.status_code != 404: LOG.warning( 'Unable to delete allocation for %(consumer_type)s ' '%(uuid)s: (%(code)i %(text)s)', {'consumer_type': consumer_type, 'uuid': uuid, 'code': r.status_code, 'text': r.text}) raise exception.AllocationDeleteFailed(consumer_uuid=uuid, error=r.text) return False # We read the consumer generation then try to put an empty allocation # for that consumer. If between the GET and the PUT the consumer # generation changes then we raise AllocationDeleteFailed. # NOTE(gibi): This only detect a small portion of possible cases when # allocation is modified outside of the delete code path. The rest can # only be detected if nova would cache at least the consumer generation # of the instance. # NOTE(gibi): placement does not return 404 for non-existing consumer # but returns an empty consumer instead. Putting an empty allocation to # that non-existing consumer won't be 404 or other error either. r = self.get(url, global_request_id=context.global_id, version=CONSUMER_GENERATION_VERSION) if not r: # at the moment there is no way placement returns a failure so we # could even delete this code LOG.warning('Unable to delete allocation for %(consumer_type)s ' '%(uuid)s. Got %(code)i while retrieving existing ' 'allocations: (%(text)s)', {'consumer_type': consumer_type, 'uuid': uuid, 'code': r.status_code, 'text': r.text}) raise exception.AllocationDeleteFailed(consumer_uuid=uuid, error=r.text) allocations = r.json() if allocations['allocations'] == {}: # the consumer did not exist in the first place LOG.debug('Cannot delete allocation for %s consumer in placement ' 'as consumer does not exist', uuid) return False # removing all resources from the allocation will auto delete the # consumer in placement allocations['allocations'] = {} r = self.put(url, allocations, global_request_id=context.global_id, version=CONSUMER_GENERATION_VERSION) if r.status_code == 204: LOG.info('Deleted allocation for %(consumer_type)s %(uuid)s', {'consumer_type': consumer_type, 'uuid': uuid}) return True else: LOG.warning('Unable to delete allocation for %(consumer_type)s ' '%(uuid)s: (%(code)i %(text)s)', {'consumer_type': consumer_type, 'uuid': uuid, 'code': r.status_code, 'text': r.text}) raise exception.AllocationDeleteFailed(consumer_uuid=uuid, error=r.text) def get_allocations_for_resource_provider(self, context, rp_uuid): """Retrieves the allocations for a specific provider. :param context: The nova.context.RequestContext auth context :param rp_uuid: The UUID of the provider. :return: ProviderAllocInfo namedtuple. :raises: keystoneauth1.exceptions.base.ClientException on failure to communicate with the placement API :raises: ResourceProviderAllocationRetrievalFailed if the placement API call fails. """ url = '/resource_providers/%s/allocations' % rp_uuid resp = self.get(url, global_request_id=context.global_id) if not resp: raise exception.ResourceProviderAllocationRetrievalFailed( rp_uuid=rp_uuid, error=resp.text) data = resp.json() return ProviderAllocInfo(allocations=data['allocations']) def get_allocations_for_provider_tree(self, context, nodename): """Retrieve allocation records associated with all providers in the provider tree. This method uses the cache exclusively to discover providers. The caller must ensure that the cache is populated. This method is (and should remain) used exclusively in the reshaper flow by the resource tracker. Note that, in addition to allocations on providers in this compute node's provider tree, this method will return allocations on sharing providers if those allocations are associated with a consumer on this compute node. This is intentional and desirable. But it may also return allocations belonging to other hosts, e.g. if this is happening in the middle of an evacuate. ComputeDriver.update_provider_tree is supposed to ignore such allocations if they appear. :param context: The security context :param nodename: The name of a node for whose tree we are getting allocations. :returns: A dict, keyed by consumer UUID, of allocation records: { $CONSUMER_UUID: { # The shape of each "allocations" dict below is identical # to the return from GET /allocations/{consumer_uuid} "allocations": { $RP_UUID: { "generation": $RP_GEN, "resources": { $RESOURCE_CLASS: $AMOUNT, ... }, }, ... }, "project_id": $PROJ_ID, "user_id": $USER_ID, "consumer_generation": $CONSUMER_GEN, }, ... } :raises: keystoneauth1.exceptions.ClientException if placement API communication fails. :raises: ResourceProviderAllocationRetrievalFailed if a placement API call fails. :raises: ValueError if there's no provider with the specified nodename. """ # NOTE(efried): Despite our best efforts, there are some scenarios # (e.g. mid-evacuate) where we can still wind up returning allocations # against providers belonging to other hosts. We count on the consumer # of this information (i.e. the reshaper flow of a virt driver's # update_provider_tree) to ignore allocations associated with any # provider it is not reshaping - and it should never be reshaping # providers belonging to other hosts. # We can't get *all* allocations for associated sharing providers # because some of those will belong to consumers on other hosts. So we # have to discover all the consumers associated with the providers in # the "local" tree (we use the nodename to figure out which providers # are "local"). # All we want to do at this point is accumulate the set of consumers we # care about. consumers = set() # TODO(efried): This could be more efficient if placement offered an # operation like GET /allocations?rp_uuid=in: for u in self._provider_tree.get_provider_uuids(name_or_uuid=nodename): alloc_info = self.get_allocations_for_resource_provider(context, u) # The allocations dict is keyed by consumer UUID consumers.update(alloc_info.allocations) # Now get all the allocations for each of these consumers to build the # result. This will include allocations on sharing providers, which is # intentional and desirable. But it may also include allocations # belonging to other hosts, e.g. if this is happening in the middle of # an evacuate. ComputeDriver.update_provider_tree is supposed to ignore # such allocations if they appear. # TODO(efried): This could be more efficient if placement offered an # operation like GET /allocations?consumer_uuid=in: return {consumer: self.get_allocs_for_consumer(context, consumer) for consumer in consumers} def _remove_allocations_for_evacuated_instances(self, context, compute_node): filters = { 'source_compute': compute_node.host, 'status': ['done'], 'migration_type': fields.MigrationType.EVACUATION, } evacuations = objects.MigrationList.get_by_filters(context, filters) for evacuation in evacuations: if not self.remove_provider_tree_from_instance_allocation( context, evacuation.instance_uuid, compute_node.uuid): LOG.error("Failed to clean allocation of evacuated " "instance on the source node %s", compute_node.uuid, instance=evacuation.instance) def delete_resource_provider(self, context, compute_node, cascade=False): """Deletes the ResourceProvider record for the compute_node. :param context: The security context :param compute_node: The nova.objects.ComputeNode object that is the resource provider being deleted. :param cascade: Boolean value that, when True, will first delete any associated Allocation records for the compute node :raises: keystoneauth1.exceptions.base.ClientException on failure to communicate with the placement API """ nodename = compute_node.hypervisor_hostname host = compute_node.host rp_uuid = compute_node.uuid if cascade: # Delete any allocations for this resource provider. # Since allocations are by consumer, we get the consumers on this # host, which are its instances. instance_uuids = objects.InstanceList.get_uuids_by_host_and_node( context, host, nodename) for instance_uuid in instance_uuids: self.delete_allocation_for_instance( context, instance_uuid, force=True) # When an instance is evacuated, its allocation remains in # the source compute node until the node recovers again. # If the broken compute never recovered but instead it is # decommissioned, then we should delete the allocations of # successfully evacuated instances during service delete. self._remove_allocations_for_evacuated_instances(context, compute_node) # Ensure to delete resource provider in tree by top-down # traversable order. rps_to_refresh = self.get_providers_in_tree(context, rp_uuid) self._provider_tree.populate_from_iterable(rps_to_refresh) provider_uuids = self._provider_tree.get_provider_uuids_in_tree( rp_uuid) for provider_uuid in provider_uuids[::-1]: try: self._delete_provider(provider_uuid, global_request_id=context.global_id) except (exception.ResourceProviderInUse, exception.ResourceProviderDeletionFailed): # TODO(efried): Raise these. Right now this is being # left a no-op for backward compatibility. pass def invalidate_resource_provider(self, name_or_uuid, cacheonly=False): """Invalidate the cache for a resource provider. :param name_or_uuid: Name or UUID of the resource provider to look up. :param cacheonly: Only reset the cache but do not remove the provider from the tree """ if not cacheonly: try: self._provider_tree.remove(name_or_uuid) except ValueError: pass self._association_refresh_time.pop(name_or_uuid, None) def get_provider_by_name(self, context, name): """Queries the placement API for resource provider information matching a supplied name. :param context: The security context :param name: Name of the resource provider to look up :return: A dict of resource provider information including the provider's UUID and generation :raises: `exception.ResourceProviderNotFound` when no such provider was found :raises: PlacementAPIConnectFailure if there was an issue making the API call to placement. """ try: resp = self.get("/resource_providers?name=%s" % name, global_request_id=context.global_id) except ks_exc.ClientException as ex: LOG.error('Failed to get resource provider by name: %s. Error: %s', name, str(ex)) raise exception.PlacementAPIConnectFailure() if resp.status_code == 200: data = resp.json() records = data['resource_providers'] num_recs = len(records) if num_recs == 1: return records[0] elif num_recs > 1: msg = ("Found multiple resource provider records for resource " "provider name %(rp_name)s: %(rp_uuids)s. " "This should not happen.") LOG.warning(msg, { 'rp_name': name, 'rp_uuids': ','.join([r['uuid'] for r in records]) }) elif resp.status_code != 404: msg = ("Failed to retrieve resource provider information by name " "for resource provider %s. Got %d: %s") LOG.warning(msg, name, resp.status_code, resp.text) raise exception.ResourceProviderNotFound(name_or_uuid=name) @retrying.retry(stop_max_attempt_number=4, retry_on_exception=lambda e: isinstance( e, exception.ResourceProviderUpdateConflict)) def aggregate_add_host(self, context, agg_uuid, host_name=None, rp_uuid=None): """Looks up a resource provider by the supplied host name, and adds the aggregate with supplied UUID to that resource provider. :note: This method does NOT use the cached provider tree. It is only called from the Compute API when a nova host aggregate is modified :param context: The security context :param agg_uuid: UUID of the aggregate being modified :param host_name: Name of the nova-compute service worker to look up a resource provider for. Either host_name or rp_uuid is required. :param rp_uuid: UUID of the resource provider to add to the aggregate. Either host_name or rp_uuid is required. :raises: `exceptions.ResourceProviderNotFound` if no resource provider matching the host name could be found from the placement API :raises: `exception.ResourceProviderAggregateRetrievalFailed` when failing to get a provider's existing aggregates :raises: `exception.ResourceProviderUpdateFailed` if there was a failure attempting to save the provider aggregates :raises: `exception.ResourceProviderUpdateConflict` if a concurrent update to the provider was detected. :raises: PlacementAPIConnectFailure if there was an issue making an API call to placement. """ if host_name is None and rp_uuid is None: raise ValueError(_("Either host_name or rp_uuid is required")) if rp_uuid is None: rp_uuid = self.get_provider_by_name(context, host_name)['uuid'] # Now attempt to add the aggregate to the resource provider. We don't # want to overwrite any other aggregates the provider may be associated # with, however, so we first grab the list of aggregates for this # provider and add the aggregate to the list of aggregates it already # has agg_info = self._get_provider_aggregates(context, rp_uuid) # @safe_connect can make the above return None if agg_info is None: raise exception.PlacementAPIConnectFailure() existing_aggs, gen = agg_info.aggregates, agg_info.generation if agg_uuid in existing_aggs: return new_aggs = existing_aggs | set([agg_uuid]) self.set_aggregates_for_provider( context, rp_uuid, new_aggs, use_cache=False, generation=gen) @retrying.retry(stop_max_attempt_number=4, retry_on_exception=lambda e: isinstance( e, exception.ResourceProviderUpdateConflict)) def aggregate_remove_host(self, context, agg_uuid, host_name): """Looks up a resource provider by the supplied host name, and removes the aggregate with supplied UUID from that resource provider. :note: This method does NOT use the cached provider tree. It is only called from the Compute API when a nova host aggregate is modified :param context: The security context :param agg_uuid: UUID of the aggregate being modified :param host_name: Name of the nova-compute service worker to look up a resource provider for :raises: `exceptions.ResourceProviderNotFound` if no resource provider matching the host name could be found from the placement API :raises: `exception.ResourceProviderAggregateRetrievalFailed` when failing to get a provider's existing aggregates :raises: `exception.ResourceProviderUpdateFailed` if there was a failure attempting to save the provider aggregates :raises: `exception.ResourceProviderUpdateConflict` if a concurrent update to the provider was detected. :raises: PlacementAPIConnectFailure if there was an issue making an API call to placement. """ rp_uuid = self.get_provider_by_name(context, host_name)['uuid'] # Now attempt to remove the aggregate from the resource provider. We # don't want to overwrite any other aggregates the provider may be # associated with, however, so we first grab the list of aggregates for # this provider and remove the aggregate from the list of aggregates it # already has agg_info = self._get_provider_aggregates(context, rp_uuid) # @safe_connect can make the above return None if agg_info is None: raise exception.PlacementAPIConnectFailure() existing_aggs, gen = agg_info.aggregates, agg_info.generation if agg_uuid not in existing_aggs: return new_aggs = existing_aggs - set([agg_uuid]) self.set_aggregates_for_provider( context, rp_uuid, new_aggs, use_cache=False, generation=gen) @staticmethod def _handle_usages_error_from_placement(resp, project_id, user_id=None): msg = ('[%(placement_req_id)s] Failed to retrieve usages for project ' '%(project_id)s and user %(user_id)s. Got %(status_code)d: ' '%(err_text)s') args = {'placement_req_id': get_placement_request_id(resp), 'project_id': project_id, 'user_id': user_id or 'N/A', 'status_code': resp.status_code, 'err_text': resp.text} LOG.error(msg, args) raise exception.UsagesRetrievalFailed(project_id=project_id, user_id=user_id or 'N/A') @retrying.retry(stop_max_attempt_number=4, retry_on_exception=lambda e: isinstance( e, ks_exc.ConnectFailure)) def _get_usages(self, context, project_id, user_id=None): url = '/usages?project_id=%s' % project_id if user_id: url = ''.join([url, '&user_id=%s' % user_id]) return self.get(url, version=GET_USAGES_VERSION, global_request_id=context.global_id) def get_usages_counts_for_limits(self, context, project_id): """Get the usages counts for the purpose of enforcing unified limits The response from placement will not contain a resource class if there is no usage. i.e. if there is no usage, you get an empty dict. Note resources are counted as placement sees them, as such note that VCPUs and PCPUs will be counted independently. :param context: The request context :param project_id: The project_id to count across :return: A dict containing the project-scoped counts, for example: {'VCPU': 2, 'MEMORY_MB': 1024} :raises: `exception.UsagesRetrievalFailed` if a placement API call fails """ LOG.debug('Getting usages for project_id %s from placement', project_id) resp = self._get_usages(context, project_id) if resp: data = resp.json() return data['usages'] self._handle_usages_error_from_placement(resp, project_id) def get_usages_counts_for_quota(self, context, project_id, user_id=None): """Get the usages counts for the purpose of counting quota usage. :param context: The request context :param project_id: The project_id to count across :param user_id: The user_id to count across :returns: A dict containing the project-scoped and user-scoped counts if user_id is specified. For example: {'project': {'cores': , 'ram': }, {'user': {'cores': , 'ram': }, :raises: `exception.UsagesRetrievalFailed` if a placement API call fails """ def _get_core_usages(usages): """For backward-compatible with existing behavior, the quota limit on flavor.vcpus. That included the shared and dedicated CPU. So we need to count both the orc.VCPU and orc.PCPU at here. """ vcpus = usages['usages'].get(orc.VCPU, 0) pcpus = usages['usages'].get(orc.PCPU, 0) return vcpus + pcpus total_counts: ty.Dict[str, ty.Dict[str, int]] = {'project': {}} # First query counts across all users of a project LOG.debug('Getting usages for project_id %s from placement', project_id) resp = self._get_usages(context, project_id) if resp: data = resp.json() # The response from placement will not contain a resource class if # there is no usage. We can consider a missing class to be 0 usage. cores = _get_core_usages(data) ram = data['usages'].get(orc.MEMORY_MB, 0) total_counts['project'] = {'cores': cores, 'ram': ram} else: self._handle_usages_error_from_placement(resp, project_id) # If specified, second query counts across one user in the project if user_id: LOG.debug('Getting usages for project_id %s and user_id %s from ' 'placement', project_id, user_id) resp = self._get_usages(context, project_id, user_id=user_id) if resp: data = resp.json() cores = _get_core_usages(data) ram = data['usages'].get(orc.MEMORY_MB, 0) total_counts['user'] = {'cores': cores, 'ram': ram} else: self._handle_usages_error_from_placement(resp, project_id, user_id=user_id) return total_counts ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.413609 nova-32.0.0/nova/scheduler/filters/0000775000175000017500000000000000000000000017153 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/scheduler/filters/__init__.py0000664000175000017500000000776100000000000021277 0ustar00zuulzuul00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Scheduler host filters """ from oslo_log import log as logging from nova import filters LOG = logging.getLogger(__name__) class BaseHostFilter(filters.BaseFilter): """Base class for host filters.""" # This is set to True if this filter should be run for rebuild. # For example, with rebuild, we need to ask the scheduler if the # existing host is still legit for a rebuild with the new image and # other parameters. We care about running policy filters (i.e. # ImagePropertiesFilter) but not things that check usage on the # existing compute node, etc. # This also means that filters marked with RUN_ON_REBUILD = True cannot # filter on allocation candidates or need to handle the rebuild case # specially. RUN_ON_REBUILD = False def _filter_one(self, obj, spec): """Return True if the object passes the filter, otherwise False.""" # Do this here so we don't get scheduler.filters.utils from nova.scheduler import utils if not self.RUN_ON_REBUILD and utils.request_is_rebuild(spec): # If we don't filter, default to passing the host. return True else: # We are either a rebuild filter, in which case we always run, # or this request is not rebuild in which case all filters # should run. return self.host_passes(obj, spec) def host_passes(self, host_state, filter_properties): """Return True if the HostState passes the filter, otherwise False. Override this in a subclass. """ raise NotImplementedError() class CandidateFilterMixin: """Mixing that helps to implement a Filter that needs to filter host by Placement allocation candidates. """ def filter_candidates(self, host_state, filter_func): """Checks still viable allocation candidates by the filter_func and keep only those that are passing it. :param host_state: HostState object holding the list of still viable allocation candidates :param filter_func: A callable that takes an allocation candidate and returns a True like object if the candidate passed the filter or a False like object if it doesn't. """ good_candidates = [] for candidate in host_state.allocation_candidates: LOG.debug( f'{self.__class__.__name__} tries allocation candidate: ' f'{candidate}', ) if filter_func(candidate): LOG.debug( f'{self.__class__.__name__} accepted allocation ' f'candidate: {candidate}', ) good_candidates.append(candidate) else: LOG.debug( f'{self.__class__.__name__} rejected allocation ' f'candidate: {candidate}', ) host_state.allocation_candidates = good_candidates return good_candidates class HostFilterHandler(filters.BaseFilterHandler): def __init__(self): super(HostFilterHandler, self).__init__(BaseHostFilter) def all_filters(): """Return a list of filter classes found in this directory. This method is used as the default for available scheduler filters and should return a list of all filter classes available. """ return HostFilterHandler().get_all_classes() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/scheduler/filters/affinity_filter.py0000664000175000017500000001432000000000000022703 0ustar00zuulzuul00000000000000# Copyright 2012, Piston Cloud Computing, Inc. # Copyright 2012, OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import netaddr from oslo_log import log as logging from nova.scheduler import filters from nova.scheduler.filters import utils LOG = logging.getLogger(__name__) class DifferentHostFilter(filters.BaseHostFilter): """Schedule the instance on a different host from a set of instances.""" # The hosts the instances are running on doesn't change within a request run_filter_once_per_request = True RUN_ON_REBUILD = False def host_passes(self, host_state, spec_obj): affinity_uuids = spec_obj.get_scheduler_hint('different_host') if affinity_uuids: overlap = utils.instance_uuids_overlap(host_state, affinity_uuids) return not overlap # With no different_host key return True class SameHostFilter(filters.BaseHostFilter): """Schedule the instance on the same host as another instance in a set of instances. """ # The hosts the instances are running on doesn't change within a request run_filter_once_per_request = True RUN_ON_REBUILD = False def host_passes(self, host_state, spec_obj): affinity_uuids = spec_obj.get_scheduler_hint('same_host') if affinity_uuids: overlap = utils.instance_uuids_overlap(host_state, affinity_uuids) return overlap # With no same_host key return True class SimpleCIDRAffinityFilter(filters.BaseHostFilter): """Schedule the instance on a host with a particular cidr""" # The address of a host doesn't change within a request run_filter_once_per_request = True RUN_ON_REBUILD = False def host_passes(self, host_state, spec_obj): affinity_cidr = spec_obj.get_scheduler_hint('cidr', '/24') affinity_host_addr = spec_obj.get_scheduler_hint('build_near_host_ip') host_ip = host_state.host_ip if affinity_host_addr: affinity_net = netaddr.IPNetwork(str.join('', (affinity_host_addr, affinity_cidr))) return netaddr.IPAddress(host_ip) in affinity_net # We don't have an affinity host address. return True class _GroupAntiAffinityFilter(filters.BaseHostFilter): """Schedule the instance on a different host from a set of group hosts. """ RUN_ON_REBUILD = False def host_passes(self, host_state, spec_obj): # Only invoke the filter if 'anti-affinity' is configured instance_group = spec_obj.instance_group policy = instance_group.policy if instance_group else None if self.policy_name != policy: return True # NOTE(hanrong): Move operations like resize can check the same source # compute node where the instance is. That case, AntiAffinityFilter # must not return the source as a non-possible destination. if spec_obj.instance_uuid in host_state.instances.keys(): return True # The list of instances UUIDs on the given host instances = set(host_state.instances.keys()) # The list of instances UUIDs which are members of this group members = set(spec_obj.instance_group.members) # The set of instances on the host that are also members of this group servers_on_host = instances.intersection(members) rules = instance_group.rules if rules and 'max_server_per_host' in rules: max_server_per_host = rules['max_server_per_host'] else: max_server_per_host = 1 # Very old request specs don't have a full InstanceGroup with the UUID group_uuid = (instance_group.uuid if instance_group and 'uuid' in instance_group else 'n/a') LOG.debug("Group anti-affinity: check if the number of servers from " "group %(group_uuid)s on host %(host)s is less than " "%(max_server)s.", {'group_uuid': group_uuid, 'host': host_state.host, 'max_server': max_server_per_host}) # NOTE(yikun): If the number of servers from same group on this host # is less than the max_server_per_host, this filter will accept the # given host. In the default case(max_server_per_host=1), this filter # will accept the given host if there are 0 servers from the group # already on this host. return len(servers_on_host) < max_server_per_host class ServerGroupAntiAffinityFilter(_GroupAntiAffinityFilter): def __init__(self): self.policy_name = 'anti-affinity' super(ServerGroupAntiAffinityFilter, self).__init__() class _GroupAffinityFilter(filters.BaseHostFilter): """Schedule the instance on to host from a set of group hosts. """ RUN_ON_REBUILD = False def host_passes(self, host_state, spec_obj): # Only invoke the filter if 'affinity' is configured policies = (spec_obj.instance_group.policies if spec_obj.instance_group else []) if self.policy_name not in policies: return True group_hosts = (spec_obj.instance_group.hosts if spec_obj.instance_group else []) LOG.debug("Group affinity: check if %(host)s in " "%(configured)s", {'host': host_state.host, 'configured': group_hosts}) if group_hosts: return host_state.host in group_hosts # No groups configured return True class ServerGroupAffinityFilter(_GroupAffinityFilter): def __init__(self): self.policy_name = 'affinity' super(ServerGroupAffinityFilter, self).__init__() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/scheduler/filters/aggregate_image_properties_isolation.py0000664000175000017500000000536200000000000027160 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Cloudwatt # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging import nova.conf from nova.scheduler import filters from nova.scheduler.filters import utils CONF = nova.conf.CONF LOG = logging.getLogger(__name__) class AggregateImagePropertiesIsolation(filters.BaseHostFilter): """AggregateImagePropertiesIsolation works with image properties.""" # Aggregate data and instance type does not change within a request run_filter_once_per_request = True RUN_ON_REBUILD = True def host_passes(self, host_state, spec_obj): """Checks a host in an aggregate that metadata key/value match with image properties. """ cfg_namespace = (CONF.filter_scheduler. aggregate_image_properties_isolation_namespace) cfg_separator = (CONF.filter_scheduler. aggregate_image_properties_isolation_separator) image_props = spec_obj.image.properties if spec_obj.image else {} metadata = utils.aggregate_metadata_get_by_host(host_state) for key, options in metadata.items(): if (cfg_namespace and not key.startswith(cfg_namespace + cfg_separator)): continue prop = None try: prop = image_props.get(key) except AttributeError: LOG.warning("Host '%(host)s' has a metadata key '%(key)s' " "that is not present in the image metadata.", {"host": host_state.host, "key": key}) continue # NOTE(sbauza): Aggregate metadata is only strings, we need to # stringify the property to match with the option # TODO(sbauza): Fix that very ugly pattern matching if prop and str(prop) not in options: LOG.debug("%(host_state)s fails image aggregate properties " "requirements. Property %(prop)s does not " "match %(options)s.", {'host_state': host_state, 'prop': prop, 'options': options}) return False return True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/scheduler/filters/aggregate_instance_extra_specs.py0000664000175000017500000000567400000000000025753 0ustar00zuulzuul00000000000000# Copyright (c) 2012 OpenStack Foundation # Copyright (c) 2012 Cloudscaling # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from nova.scheduler import filters from nova.scheduler.filters import extra_specs_ops from nova.scheduler.filters import utils LOG = logging.getLogger(__name__) _SCOPE = 'aggregate_instance_extra_specs' class AggregateInstanceExtraSpecsFilter(filters.BaseHostFilter): """AggregateInstanceExtraSpecsFilter works with flavor records.""" # Aggregate data and instance type does not change within a request run_filter_once_per_request = True RUN_ON_REBUILD = False def host_passes(self, host_state, spec_obj): """Return a list of hosts that can create flavor. Check that the extra specs associated with the instance type match the metadata provided by aggregates. If not present return False. """ flavor = spec_obj.flavor # If 'extra_specs' is not present or extra_specs are empty then we # need not proceed further if 'extra_specs' not in flavor or not flavor.extra_specs: return True metadata = utils.aggregate_metadata_get_by_host(host_state) for key, req in flavor.extra_specs.items(): # Either not scope format, or aggregate_instance_extra_specs scope scope = key.split(':', 1) if len(scope) > 1: if scope[0] != _SCOPE: continue else: del scope[0] key = scope[0] aggregate_vals = metadata.get(key, None) if not aggregate_vals: LOG.debug( "%(host_state)s fails flavor extra_specs requirements. " "Extra_spec %(key)s is not in aggregate.", {'host_state': host_state, 'key': key}) return False for aggregate_val in aggregate_vals: if extra_specs_ops.match(aggregate_val, req): break else: LOG.debug( "%(host_state)s fails flavor extra_specs requirements. " "'%(aggregate_vals)s' do not match '%(req)s'", { 'host_state': host_state, 'req': req, 'aggregate_vals': aggregate_vals, }) return False return True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/scheduler/filters/aggregate_multitenancy_isolation.py0000664000175000017500000000437300000000000026337 0ustar00zuulzuul00000000000000# Copyright (c) 2011-2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from nova.scheduler import filters from nova.scheduler.filters import utils LOG = logging.getLogger(__name__) class AggregateMultiTenancyIsolation(filters.BaseHostFilter): """Isolate tenants in specific aggregates.""" # Aggregate data and tenant do not change within a request run_filter_once_per_request = True RUN_ON_REBUILD = False def host_passes(self, host_state, spec_obj): """If a host is in an aggregate that has the metadata key is prefixed with "filter_tenant_id" it can only create instances from that tenant(s). A host can be in different aggregates. If a host doesn't belong to an aggregate with the metadata key prefixed with "filter_tenant_id" The filter keeps non-specified tenants out of an aggregate that has restrictions, but allows anyone into unrestricted aggregates. """ tenant_id = spec_obj.project_id metadata = utils.aggregate_metadata_get_by_host(host_state) if metadata != {}: configured_tenant_ids = set() for name, values in metadata.items(): if name.startswith("filter_tenant_id"): configured_tenant_ids.update(set(values)) if configured_tenant_ids: if tenant_id not in configured_tenant_ids: LOG.debug("%s fails tenant id on aggregate", host_state) return False LOG.debug("Host tenant id %s matched", tenant_id) else: LOG.debug("No tenant id's defined on host. Host passes.") return True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/scheduler/filters/all_hosts_filter.py0000664000175000017500000000170600000000000023066 0ustar00zuulzuul00000000000000# Copyright (c) 2011-2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.scheduler import filters class AllHostsFilter(filters.BaseHostFilter): """NOOP host filter. Returns all hosts.""" # list of hosts doesn't change within a request run_filter_once_per_request = True RUN_ON_REBUILD = False def host_passes(self, host_state, spec_obj): return True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/scheduler/filters/compute_capabilities_filter.py0000664000175000017500000001171500000000000025264 0ustar00zuulzuul00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_serialization import jsonutils from nova.scheduler import filters from nova.scheduler.filters import extra_specs_ops LOG = logging.getLogger(__name__) class ComputeCapabilitiesFilter(filters.BaseHostFilter): """HostFilter hard-coded to work with InstanceType records.""" # Instance type and host capabilities do not change within a request run_filter_once_per_request = True RUN_ON_REBUILD = False def _get_capabilities(self, host_state, scope): cap = host_state for index in range(0, len(scope)): try: if isinstance(cap, str): try: cap = jsonutils.loads(cap) except ValueError as e: LOG.debug("%(host_state)s fails. The capabilities " "'%(cap)s' couldn't be loaded from JSON: " "%(error)s", {'host_state': host_state, 'cap': cap, 'error': e}) return None if not isinstance(cap, dict): if getattr(cap, scope[index], None) is None: # If can't find, check stats dict cap = cap.stats.get(scope[index], None) else: cap = getattr(cap, scope[index], None) else: cap = cap.get(scope[index], None) except AttributeError as e: LOG.debug("%(host_state)s fails. The capabilities couldn't " "be retrieved: %(error)s.", {'host_state': host_state, 'error': e}) return None if cap is None: LOG.debug("%(host_state)s fails. There are no capabilities " "to retrieve.", {'host_state': host_state}) return None return cap def _satisfies_extra_specs(self, host_state, flavor): """Check that the host_state provided by the compute service satisfies the extra specs associated with the instance type. """ if 'extra_specs' not in flavor: return True especs = flavor.extra_specs.copy() # Replace it with a capabilities filter specially. bits = especs.get('hw:maxphysaddr_bits') if bits is not None: especs['capabilities:cpu_info:maxphysaddr:bits'] = '>= ' + bits del especs['hw:maxphysaddr_bits'] for key, req in especs.items(): # Either not scope format, or in capabilities scope scope = key.split(':') # If key does not have a namespace, the scope's size is 1, check # whether host_state contains the key as an attribute. If not, # ignore it. If it contains, deal with it in the same way as # 'capabilities:key'. This is for backward-compatible. # If the key has a namespace, the scope's size will be bigger than # 1, check that whether the namespace is 'capabilities'. If not, # ignore it. if len(scope) == 1: stats = getattr(host_state, 'stats', {}) has_attr = hasattr(host_state, key) or key in stats if not has_attr: continue else: if scope[0] != "capabilities": continue else: del scope[0] cap = self._get_capabilities(host_state, scope) if cap is None: return False if not extra_specs_ops.match(str(cap), req): LOG.debug("%(host_state)s fails extra_spec requirements. " "'%(req)s' does not match '%(cap)s'", {'host_state': host_state, 'req': req, 'cap': cap}) return False return True def host_passes(self, host_state, spec_obj): """Return a list of hosts that can create flavor.""" if not self._satisfies_extra_specs(host_state, spec_obj.flavor): LOG.debug( "%(host_state)s fails flavor extra_specs requirements", {'host_state': host_state}) return False return True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/scheduler/filters/compute_filter.py0000664000175000017500000000326500000000000022554 0ustar00zuulzuul00000000000000# Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from nova.scheduler import filters from nova import servicegroup LOG = logging.getLogger(__name__) class ComputeFilter(filters.BaseHostFilter): """Filter on active Compute nodes.""" RUN_ON_REBUILD = False def __init__(self): self.servicegroup_api = servicegroup.API() # Host state does not change within a request run_filter_once_per_request = True def host_passes(self, host_state, spec_obj): """Returns True for only active compute nodes.""" service = host_state.service if service['disabled']: LOG.debug("%(host_state)s is disabled, reason: %(reason)s", {'host_state': host_state, 'reason': service.get('disabled_reason')}) return False else: if not self.servicegroup_api.service_is_up(service): LOG.warning("%(host_state)s has not been heard from in a " "while", {'host_state': host_state}) return False return True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/scheduler/filters/extra_specs_ops.py0000664000175000017500000000431700000000000022733 0ustar00zuulzuul00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import operator # 1. The following operations are supported: # =, s==, s!=, s>=, s>, s<=, s<, , , , ==, !=, >=, <= # 2. Note that is handled in a different way below. # 3. If the first word in the extra_specs is not one of the operators, # it is ignored. op_methods = {'=': lambda x, y: float(x) >= float(y), '': lambda x, y: y in x, '': lambda x, y: all(val in x for val in y), '==': lambda x, y: float(x) == float(y), '!=': lambda x, y: float(x) != float(y), '>=': lambda x, y: float(x) >= float(y), '<=': lambda x, y: float(x) <= float(y), 's==': operator.eq, 's!=': operator.ne, 's<': operator.lt, 's<=': operator.le, 's>': operator.gt, 's>=': operator.ge} def match(value, req): words = req.split() op = method = None if words: op = words.pop(0) method = op_methods.get(op) if op != '' and not method: return value == req if value is None: return False if op == '': # Ex: v1 v2 v3 while True: if words.pop(0) == value: return True if not words: break words.pop(0) # remove a keyword if not words: break return False if words: if op == '': # requires a list not a string return method(value, words) return method(value, words[0]) return False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/scheduler/filters/image_props_filter.py0000664000175000017500000001204700000000000023403 0ustar00zuulzuul00000000000000# Copyright (c) 2011-2012 OpenStack Foundation # Copyright (c) 2012 Canonical Ltd # Copyright (c) 2012 SUSE LINUX Products GmbH # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_utils import versionutils import nova.conf from nova.objects import fields from nova.scheduler import filters LOG = logging.getLogger(__name__) CONF = nova.conf.CONF class ImagePropertiesFilter(filters.BaseHostFilter): """Filter compute nodes that satisfy instance image properties. The ImagePropertiesFilter filters compute nodes that satisfy any architecture, hypervisor type, or virtual machine mode properties specified on the instance's image properties. Image properties are contained in the image dictionary in the request_spec. """ RUN_ON_REBUILD = True # Image Properties and Compute Capabilities do not change within # a request run_filter_once_per_request = True def _get_default_architecture(self): return CONF.filter_scheduler.image_properties_default_architecture def _instance_supported(self, host_state, image_props, hypervisor_version): default_img_arch = self._get_default_architecture() img_arch = image_props.get('hw_architecture', default_img_arch) img_h_type = image_props.get('img_hv_type') img_vm_mode = image_props.get('hw_vm_mode') checked_img_props = ( fields.Architecture.canonicalize(img_arch), fields.HVType.canonicalize(img_h_type), fields.VMMode.canonicalize(img_vm_mode) ) # Supported if no compute-related instance properties are specified if not any(checked_img_props): return True supp_instances = host_state.supported_instances # Not supported if an instance property is requested but nothing # advertised by the host. if not supp_instances: LOG.debug("Instance contains properties %(image_props)s, " "but no corresponding supported_instances are " "advertised by the compute node", {'image_props': image_props}) return False def _compare_props(props, other_props): for i in props: if i and i not in other_props: return False return True def _compare_product_version(hyper_version, image_props): version_required = image_props.get('img_hv_requested_version') if not (hypervisor_version and version_required): return True img_prop_predicate = versionutils.VersionPredicate( version_required) hyper_ver_str = versionutils.convert_version_to_str(hyper_version) return img_prop_predicate.satisfied_by(hyper_ver_str) def _compare_maxphysaddr_bits(host_state, image_props): bits_required = image_props.get('hw_maxphysaddr_bits') if not bits_required: return True bits = host_state.cpu_info.get('maxphysaddr', {}).get('bits') if not bits: return True return bits >= bits_required for supp_inst in supp_instances: if _compare_props(checked_img_props, supp_inst): if _compare_product_version(hypervisor_version, image_props): if _compare_maxphysaddr_bits(host_state, image_props): return True LOG.debug("Instance contains properties %(image_props)s " "that are not provided by the compute node " "supported_instances %(supp_instances)s or " "hypervisor version %(hypervisor_version)s do not match", {'image_props': image_props, 'supp_instances': supp_instances, 'hypervisor_version': hypervisor_version}) return False def host_passes(self, host_state, spec_obj): """Check if host passes specified image properties. Returns True for compute nodes that satisfy image properties contained in the request_spec. """ image_props = spec_obj.image.properties if spec_obj.image else {} if not self._instance_supported(host_state, image_props, host_state.hypervisor_version): LOG.debug("%(host_state)s does not support requested " "instance_properties", {'host_state': host_state}) return False return True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/scheduler/filters/io_ops_filter.py0000664000175000017500000000465400000000000022373 0ustar00zuulzuul00000000000000# Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging import nova.conf from nova.scheduler import filters from nova.scheduler.filters import utils LOG = logging.getLogger(__name__) CONF = nova.conf.CONF class IoOpsFilter(filters.BaseHostFilter): """Filter out hosts with too many concurrent I/O operations.""" RUN_ON_REBUILD = False def _get_max_io_ops_per_host(self, host_state, spec_obj): return CONF.filter_scheduler.max_io_ops_per_host def host_passes(self, host_state, spec_obj): """Use information about current vm and task states collected from compute node statistics to decide whether to filter. """ num_io_ops = host_state.num_io_ops max_io_ops = self._get_max_io_ops_per_host( host_state, spec_obj) passes = num_io_ops < max_io_ops if not passes: LOG.debug("%(host_state)s fails I/O ops check: Max IOs per host " "is set to %(max_io_ops)s", {'host_state': host_state, 'max_io_ops': max_io_ops}) return passes class AggregateIoOpsFilter(IoOpsFilter): """AggregateIoOpsFilter with per-aggregate the max io operations. Fall back to global max_io_ops_per_host if no per-aggregate setting found. """ def _get_max_io_ops_per_host(self, host_state, spec_obj): max_io_ops_per_host = CONF.filter_scheduler.max_io_ops_per_host aggregate_vals = utils.aggregate_values_from_key( host_state, 'max_io_ops_per_host') try: value = utils.validate_num_values( aggregate_vals, max_io_ops_per_host, cast_to=int) except ValueError as e: LOG.warning("Could not decode max_io_ops_per_host: '%s'", e) value = max_io_ops_per_host return value ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/scheduler/filters/isolated_hosts_filter.py0000664000175000017500000000570000000000000024120 0ustar00zuulzuul00000000000000# Copyright (c) 2011-2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import nova.conf from nova.scheduler import filters CONF = nova.conf.CONF class IsolatedHostsFilter(filters.BaseHostFilter): """Keep specified images to selected hosts.""" # The configuration values do not change within a request run_filter_once_per_request = True RUN_ON_REBUILD = True def host_passes(self, host_state, spec_obj): """Result Matrix with 'restrict_isolated_hosts_to_isolated_images' set to True:: | | isolated_image | non_isolated_image | -------------+----------------+------------------- | iso_host | True | False | non_iso_host | False | True Result Matrix with 'restrict_isolated_hosts_to_isolated_images' set to False:: | | isolated_image | non_isolated_image | -------------+----------------+------------------- | iso_host | True | True | non_iso_host | False | True """ # If the configuration does not list any hosts, the filter will always # return True, assuming a configuration error, so letting all hosts # through. isolated_hosts = CONF.filter_scheduler.isolated_hosts isolated_images = CONF.filter_scheduler.isolated_images restrict_isolated_hosts_to_isolated_images = ( CONF.filter_scheduler.restrict_isolated_hosts_to_isolated_images) if not isolated_images: # As there are no images to match, return True if the filter is # not restrictive otherwise return False if the host is in the # isolation list. return ((not restrict_isolated_hosts_to_isolated_images) or (host_state.host not in isolated_hosts)) # Check to see if the image id is set since volume-backed instances # can be created without an imageRef in the server create request. image_ref = spec_obj.image.id \ if spec_obj.image and 'id' in spec_obj.image else None image_isolated = image_ref in isolated_images host_isolated = host_state.host in isolated_hosts if restrict_isolated_hosts_to_isolated_images: return (image_isolated == host_isolated) else: return (not image_isolated) or host_isolated ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/scheduler/filters/json_filter.py0000664000175000017500000001115700000000000022050 0ustar00zuulzuul00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import operator from oslo_serialization import jsonutils from nova.scheduler import filters class JsonFilter(filters.BaseHostFilter): """Host Filter to allow simple JSON-based grammar for selecting hosts. """ RUN_ON_REBUILD = False def _op_compare(self, args, op): """Returns True if the specified operator can successfully compare the first item in the args with all the rest. Will return False if only one item is in the list. """ if len(args) < 2: return False if op is operator.contains: bad = args[0] not in args[1:] else: bad = [arg for arg in args[1:] if not op(args[0], arg)] return not bool(bad) def _equals(self, args): """First term is == all the other terms.""" return self._op_compare(args, operator.eq) def _less_than(self, args): """First term is < all the other terms.""" return self._op_compare(args, operator.lt) def _greater_than(self, args): """First term is > all the other terms.""" return self._op_compare(args, operator.gt) def _in(self, args): """First term is in set of remaining terms.""" return self._op_compare(args, operator.contains) def _less_than_equal(self, args): """First term is <= all the other terms.""" return self._op_compare(args, operator.le) def _greater_than_equal(self, args): """First term is >= all the other terms.""" return self._op_compare(args, operator.ge) def _not(self, args): """Flip each of the arguments.""" return [not arg for arg in args] def _or(self, args): """True if any arg is True.""" return any(args) def _and(self, args): """True if all args are True.""" return all(args) commands = { '=': _equals, '<': _less_than, '>': _greater_than, 'in': _in, '<=': _less_than_equal, '>=': _greater_than_equal, 'not': _not, 'or': _or, 'and': _and, } def _parse_string(self, string, host_state): """Strings prefixed with $ are capability lookups in the form '$variable' where 'variable' is an attribute in the HostState class. If $variable is a dictionary, you may use: $variable.dictkey """ if not string: return None if not string.startswith("$"): return string path = string[1:].split(".") obj = getattr(host_state, path[0], None) if obj is None: return None for item in path[1:]: obj = obj.get(item, None) if obj is None: return None return obj def _process_filter(self, query, host_state): """Recursively parse the query structure.""" if not query: return True cmd = query[0] method = self.commands[cmd] cooked_args = [] for arg in query[1:]: if isinstance(arg, list): arg = self._process_filter(arg, host_state) elif isinstance(arg, str): arg = self._parse_string(arg, host_state) if arg is not None: cooked_args.append(arg) result = method(self, cooked_args) return result def host_passes(self, host_state, spec_obj): """Return a list of hosts that can fulfill the requirements specified in the query. """ query = spec_obj.get_scheduler_hint('query') if not query: return True # NOTE(comstud): Not checking capabilities or service for # enabled/disabled so that a provided json filter can decide result = self._process_filter(jsonutils.loads(query), host_state) if isinstance(result, list): # If any succeeded, include the host result = any(result) if result: # Filter it out. return True return False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/scheduler/filters/metrics_filter.py0000664000175000017500000000355600000000000022551 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Intel, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging import nova.conf from nova.scheduler import filters from nova.scheduler import utils LOG = logging.getLogger(__name__) CONF = nova.conf.CONF class MetricsFilter(filters.BaseHostFilter): """Metrics Filter This filter is used to filter out those hosts which don't have the corresponding metrics so these the metrics weigher won't fail due to these hosts. """ RUN_ON_REBUILD = False def __init__(self): super(MetricsFilter, self).__init__() opts = utils.parse_options(CONF.metrics.weight_setting, sep='=', converter=float, name="metrics.weight_setting") self.keys = set([x[0] for x in opts]) def host_passes(self, host_state, spec_obj): metrics_on_host = set(m.name for m in host_state.metrics) if not self.keys.issubset(metrics_on_host): unavail = metrics_on_host - self.keys LOG.debug("%(host_state)s does not have the following " "metrics: %(metrics)s", {'host_state': host_state, 'metrics': ', '.join(unavail)}) return False return True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/scheduler/filters/num_instances_filter.py0000664000175000017500000000456100000000000023746 0ustar00zuulzuul00000000000000# Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging import nova.conf from nova.scheduler import filters from nova.scheduler.filters import utils LOG = logging.getLogger(__name__) CONF = nova.conf.CONF class NumInstancesFilter(filters.BaseHostFilter): """Filter out hosts with too many instances.""" RUN_ON_REBUILD = False def _get_max_instances_per_host(self, host_state, spec_obj): return CONF.filter_scheduler.max_instances_per_host def host_passes(self, host_state, spec_obj): num_instances = host_state.num_instances max_instances = self._get_max_instances_per_host( host_state, spec_obj) passes = num_instances < max_instances if not passes: LOG.debug("%(host_state)s fails num_instances check: Max " "instances per host is set to %(max_instances)s", {'host_state': host_state, 'max_instances': max_instances}) return passes class AggregateNumInstancesFilter(NumInstancesFilter): """AggregateNumInstancesFilter with per-aggregate the max num instances. Fall back to global max_num_instances_per_host if no per-aggregate setting found. """ def _get_max_instances_per_host(self, host_state, spec_obj): max_instances_per_host = CONF.filter_scheduler.max_instances_per_host aggregate_vals = utils.aggregate_values_from_key( host_state, 'max_instances_per_host') try: value = utils.validate_num_values( aggregate_vals, max_instances_per_host, cast_to=int) except ValueError as e: LOG.warning("Could not decode max_instances_per_host: '%s'", e) value = max_instances_per_host return value ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/scheduler/filters/numa_topology_filter.py0000664000175000017500000001260200000000000023767 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from nova import objects from nova.objects import fields from nova.scheduler import filters from nova.virt import hardware LOG = logging.getLogger(__name__) class NUMATopologyFilter( filters.BaseHostFilter, filters.CandidateFilterMixin, ): """Filter on requested NUMA topology.""" # NOTE(sean-k-mooney): In change I0322d872bdff68936033a6f5a54e8296a6fb343 # we validate that the NUMA topology does not change in the api. If the # requested image would alter the NUMA constraints we reject the rebuild # request and therefore do not need to run this filter on rebuild. RUN_ON_REBUILD = False def _satisfies_cpu_policy(self, host_state, extra_specs, image_props): """Check that the host_state provided satisfies any available CPU policy requirements. """ host_topology = host_state.numa_topology # NOTE(stephenfin): There can be conflicts between the policy # specified by the image and that specified by the instance, but this # is not the place to resolve these. We do this during scheduling. cpu_policy = [extra_specs.get('hw:cpu_policy'), image_props.get('hw_cpu_policy')] cpu_thread_policy = [extra_specs.get('hw:cpu_thread_policy'), image_props.get('hw_cpu_thread_policy')] if not host_topology: return True if fields.CPUAllocationPolicy.DEDICATED not in cpu_policy: return True if fields.CPUThreadAllocationPolicy.REQUIRE not in cpu_thread_policy: return True if not host_topology.has_threads: LOG.debug("%(host_state)s fails CPU policy requirements. " "Host does not have hyperthreading or " "hyperthreading is disabled, but 'require' threads " "policy was requested.", {'host_state': host_state}) return False return True def host_passes(self, host_state, spec_obj): # TODO(stephenfin): The 'numa_fit_instance_to_host' function has the # unfortunate side effect of modifying 'spec_obj.numa_topology' - an # InstanceNUMATopology object - by populating the 'cpu_pinning' field. # This is rather rude and said function should be reworked to avoid # doing this. That's a large, non-backportable cleanup however, so for # now we just duplicate spec_obj to prevent changes propagating to # future filter calls. spec_obj = spec_obj.obj_clone() ram_ratio = host_state.ram_allocation_ratio cpu_ratio = host_state.cpu_allocation_ratio extra_specs = spec_obj.flavor.extra_specs image_props = spec_obj.image.properties requested_topology = spec_obj.numa_topology host_topology = host_state.numa_topology pci_requests = spec_obj.pci_requests network_metadata = None if 'network_metadata' in spec_obj: network_metadata = spec_obj.network_metadata if pci_requests: pci_requests = pci_requests.requests if not self._satisfies_cpu_policy(host_state, extra_specs, image_props): return False if requested_topology and host_topology: limits = objects.NUMATopologyLimits( cpu_allocation_ratio=cpu_ratio, ram_allocation_ratio=ram_ratio) if network_metadata: limits.network_metadata = network_metadata good_candidates = self.filter_candidates( host_state, lambda candidate: hardware.numa_fit_instance_to_host( host_topology, requested_topology, limits=limits, pci_requests=pci_requests, pci_stats=host_state.pci_stats, provider_mapping=candidate["mappings"], ), ) if not good_candidates: LOG.debug("%(host)s, %(node)s fails NUMA topology " "requirements. The instance does not fit on this " "host.", {'host': host_state.host, 'node': host_state.nodename}, instance_uuid=spec_obj.instance_uuid) return False host_state.limits['numa_topology'] = limits return True elif requested_topology: LOG.debug("%(host)s, %(node)s fails NUMA topology requirements. " "No host NUMA topology while the instance specified " "one.", {'host': host_state.host, 'node': host_state.nodename}, instance_uuid=spec_obj.instance_uuid) return False else: return True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/scheduler/filters/pci_passthrough_filter.py0000664000175000017500000000470100000000000024276 0ustar00zuulzuul00000000000000# Copyright (c) 2013 ISP RAS. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from nova.scheduler import filters LOG = logging.getLogger(__name__) class PciPassthroughFilter( filters.BaseHostFilter, filters.CandidateFilterMixin, ): """Pci Passthrough Filter based on PCI request Filter that schedules instances on a host if the host has devices to meet the device requests in the 'extra_specs' for the flavor. PCI resource tracker provides updated summary information about the PCI devices for each host, like:: | [{"count": 5, "vendor_id": "8086", "product_id": "1520", | "extra_info":'{}'}], and VM requests PCI devices via PCI requests, like:: | [{"count": 1, "vendor_id": "8086", "product_id": "1520",}]. The filter checks if the host passes or not based on this information. """ RUN_ON_REBUILD = False def host_passes(self, host_state, spec_obj): """Return true if the host has the required PCI devices.""" pci_requests = spec_obj.pci_requests if not pci_requests or not pci_requests.requests: return True if not host_state.pci_stats: LOG.debug("%(host_state)s doesn't have the required PCI devices" " (%(requests)s)", {'host_state': host_state, 'requests': pci_requests}) return False good_candidates = self.filter_candidates( host_state, lambda candidate: host_state.pci_stats.support_requests( pci_requests.requests, provider_mapping=candidate["mappings"] ), ) if not good_candidates: LOG.debug("%(host_state)s doesn't have the required PCI devices" " (%(requests)s)", {'host_state': host_state, 'requests': pci_requests}) return False return True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/scheduler/filters/type_filter.py0000664000175000017500000000272400000000000022060 0ustar00zuulzuul00000000000000# Copyright (c) 2012 The Cloudscaling Group, Inc. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.scheduler import filters from nova.scheduler.filters import utils class AggregateTypeAffinityFilter(filters.BaseHostFilter): """AggregateTypeAffinityFilter limits flavors by aggregate return True if no flavor key is set or if the aggregate metadata key 'instance_type' has the instance_type name as a value """ # Aggregate data does not change within a request run_filter_once_per_request = True RUN_ON_REBUILD = False def host_passes(self, host_state, spec_obj): # TODO(stephenfin): Add support for 'flavor' key aggregate_vals = utils.aggregate_values_from_key( host_state, 'instance_type') for val in aggregate_vals: if spec_obj.flavor.name in [x.strip() for x in val.split(',')]: return True return not aggregate_vals ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/scheduler/filters/utils.py0000664000175000017500000000557700000000000020703 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Bench of utility methods used by filters.""" import collections from oslo_log import log as logging LOG = logging.getLogger(__name__) def aggregate_values_from_key(host_state, key_name): """Returns a set of values based on a metadata key for a specific host.""" aggrlist = host_state.aggregates return {aggr.metadata[key_name] for aggr in aggrlist if key_name in aggr.metadata } def aggregate_metadata_get_by_host(host_state, key=None): """Returns a dict of all metadata based on a metadata key for a specific host. If the key is not provided, returns a dict of all metadata. """ aggrlist = host_state.aggregates metadata = collections.defaultdict(set) for aggr in aggrlist: if key is None or key in aggr.metadata: for k, v in aggr.metadata.items(): metadata[k].update(x.strip() for x in v.split(',')) return metadata def validate_num_values(vals, default=None, cast_to=int, based_on=min): """Returns a correctly casted value based on a set of values. This method is useful to work with per-aggregate filters, It takes a set of values then return the 'based_on'{min/max} converted to 'cast_to' of the set or the default value. Note: The cast implies a possible ValueError """ num_values = len(vals) if num_values == 0: return default if num_values > 1: if based_on == min: LOG.info("%(num_values)d values found, " "of which the minimum value will be used.", {'num_values': num_values}) else: LOG.info("%(num_values)d values found, " "of which the maximum value will be used.", {'num_values': num_values}) return based_on([cast_to(val) for val in vals]) def instance_uuids_overlap(host_state, uuids): """Tests for overlap between a host_state and a list of uuids. Returns True if any of the supplied uuids match any of the instance.uuid values in the host_state. """ if isinstance(uuids, str): uuids = [uuids] set_uuids = set(uuids) # host_state.instances is a dict whose keys are the instance uuids host_uuids = set(host_state.instances.keys()) return bool(host_uuids.intersection(set_uuids)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/scheduler/host_manager.py0000664000175000017500000012271100000000000020530 0ustar00zuulzuul00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Manage hosts in the current zone. """ import collections import functools import time import iso8601 from oslo_log import log as logging from oslo_utils import timeutils import nova.conf from nova import context as context_module from nova import exception from nova import objects from nova.pci import stats as pci_stats from nova.scheduler import filters from nova.scheduler import weights from nova import utils from nova.virt import hardware CONF = nova.conf.CONF LOG = logging.getLogger(__name__) HOST_INSTANCE_SEMAPHORE = "host_instance" class ReadOnlyDict(collections.UserDict): """A read-only dict.""" def __init__(self, source=None): self.data = {} if source: self.data.update(source) def __setitem__(self, key, item): raise TypeError() def __delitem__(self, key): raise TypeError() def clear(self): raise TypeError() def pop(self, key, *args): raise TypeError() def popitem(self): raise TypeError() def update(self): raise TypeError() @utils.expects_func_args('self', 'spec_obj') def set_update_time_on_success(function): """Set updated time of HostState when consuming succeed.""" @functools.wraps(function) def decorated_function(self, spec_obj): return_value = None try: return_value = function(self, spec_obj) except Exception as e: # Ignores exception raised from consume_from_request() so that # booting instance would fail in the resource claim of compute # node, other suitable node may be chosen during scheduling retry. LOG.warning("Selected host: %(host)s failed to consume from " "instance. Error: %(error)s", {'host': self.host, 'error': e}) else: now = timeutils.utcnow() # NOTE(sbauza): Objects are UTC tz-aware by default self.updated = now.replace(tzinfo=iso8601.UTC) return return_value return decorated_function class HostState(object): """Mutable and immutable information tracked for a host. This is an attempt to remove the ad-hoc data structures previously used and lock down access. """ def __init__(self, host, node, cell_uuid): self.host = host self.nodename = node self.uuid = None self._lock_name = (host, node) # Mutable available resources. # These will change as resources are virtually "consumed". self.total_usable_ram_mb = 0 self.total_usable_disk_gb = 0 self.disk_mb_used = 0 self.free_ram_mb = 0 self.free_disk_mb = 0 self.vcpus_total = 0 self.vcpus_used = 0 self.pci_stats = None self.numa_topology = None # Additional host information from the compute node stats: self.num_instances = 0 self.num_io_ops = 0 self.failed_builds = 0 # Other information self.host_ip = None self.hypervisor_type = None self.hypervisor_version = None self.hypervisor_hostname = None self.cpu_info = None self.supported_instances = None # Resource oversubscription values for the compute host: self.limits = {} # Generic metrics from compute nodes self.metrics = None # List of aggregates the host belongs to self.aggregates = [] # Instances on this host self.instances = {} # Allocation ratios for this host self.ram_allocation_ratio = None self.cpu_allocation_ratio = None self.disk_allocation_ratio = None # Host cell (v2) membership self.cell_uuid = cell_uuid self.updated = None self.allocation_candidates = [] def update(self, compute=None, service=None, aggregates=None, inst_dict=None): """Update all information about a host.""" @utils.synchronized(self._lock_name) def _locked_update(self, compute, service, aggregates, inst_dict): # Scheduler API is inherently multi-threaded as every incoming RPC # message will be dispatched in it's own green thread. So the # shared host state should be updated in a consistent way to make # sure its data is valid under concurrent write operations. if compute is not None: LOG.debug("Update host state from compute node: %s", compute) self._update_from_compute_node(compute) if aggregates is not None: LOG.debug("Update host state with aggregates: %s", aggregates) self.aggregates = aggregates if service is not None: LOG.debug("Update host state with service dict: %s", service) self.service = ReadOnlyDict(service) if inst_dict is not None: LOG.debug("Update host state with instances: %s", list(inst_dict)) self.instances = inst_dict return _locked_update(self, compute, service, aggregates, inst_dict) def _update_from_compute_node(self, compute): """Update information about a host from a ComputeNode object.""" # NOTE(jichenjc): if the compute record is just created but not updated # some field such as free_disk_gb can be None if 'free_disk_gb' not in compute or compute.free_disk_gb is None: LOG.debug('Ignoring compute node %s as its usage has not been ' 'updated yet.', compute.uuid) return if (self.updated and compute.updated_at and self.updated > compute.updated_at): return all_ram_mb = compute.memory_mb self.uuid = compute.uuid # Assume virtual size is all consumed by instances if use qcow2 disk. free_gb = compute.free_disk_gb least_gb = compute.disk_available_least if least_gb is not None: if least_gb > free_gb: # can occur when an instance in database is not on host LOG.warning( "Host %(hostname)s has more disk space than database " "expected (%(physical)s GB > %(database)s GB)", {'physical': least_gb, 'database': free_gb, 'hostname': compute.hypervisor_hostname}) free_gb = min(least_gb, free_gb) free_disk_mb = free_gb * 1024 self.disk_mb_used = compute.local_gb_used * 1024 # NOTE(jogo) free_ram_mb can be negative self.free_ram_mb = compute.free_ram_mb self.total_usable_ram_mb = all_ram_mb self.total_usable_disk_gb = compute.local_gb self.free_disk_mb = free_disk_mb self.vcpus_total = compute.vcpus self.vcpus_used = compute.vcpus_used self.updated = compute.updated_at # the ComputeNode.numa_topology field is a StringField so deserialize self.numa_topology = objects.NUMATopology.obj_from_db_obj( compute.numa_topology) if compute.numa_topology else None self.pci_stats = pci_stats.PciDeviceStats( self.numa_topology, stats=compute.pci_device_pools) # All virt drivers report host_ip self.host_ip = compute.host_ip self.hypervisor_type = compute.hypervisor_type self.hypervisor_version = compute.hypervisor_version self.hypervisor_hostname = compute.hypervisor_hostname self.cpu_info = compute.cpu_info if compute.supported_hv_specs: self.supported_instances = [spec.to_list() for spec in compute.supported_hv_specs] else: self.supported_instances = [] # Don't store stats directly in host_state to make sure these don't # overwrite any values, or get overwritten themselves. Store in self so # filters can schedule with them. self.stats = compute.stats or {} # Track number of instances on host self.num_instances = int(self.stats.get('num_instances', 0)) self.num_io_ops = int(self.stats.get('io_workload', 0)) # update metrics self.metrics = objects.MonitorMetricList.from_json(compute.metrics) # update allocation ratios given by the ComputeNode object self.cpu_allocation_ratio = compute.cpu_allocation_ratio self.ram_allocation_ratio = compute.ram_allocation_ratio self.disk_allocation_ratio = compute.disk_allocation_ratio # update failed_builds counter reported by the compute self.failed_builds = int(self.stats.get('failed_builds', 0)) def consume_from_request(self, spec_obj): """Incrementally update host state from a RequestSpec object.""" @utils.synchronized(self._lock_name) @set_update_time_on_success def _locked(self, spec_obj): # Scheduler API is inherently multi-threaded as every incoming RPC # message will be dispatched in its own green thread. So the # shared host state should be consumed in a consistent way to make # sure its data is valid under concurrent write operations. self._locked_consume_from_request(spec_obj) return _locked(self, spec_obj) def _locked_consume_from_request(self, spec_obj): disk_mb = (spec_obj.root_gb + spec_obj.ephemeral_gb) * 1024 ram_mb = spec_obj.memory_mb vcpus = spec_obj.vcpus self.free_ram_mb -= ram_mb self.free_disk_mb -= disk_mb self.vcpus_used += vcpus # Track number of instances on host self.num_instances += 1 pci_requests = spec_obj.pci_requests if pci_requests and self.pci_stats: pci_requests = pci_requests.requests else: pci_requests = None # Calculate the NUMA usage... if self.numa_topology and spec_obj.numa_topology: spec_obj.numa_topology = hardware.numa_fit_instance_to_host( self.numa_topology, spec_obj.numa_topology, limits=self.limits.get('numa_topology'), pci_requests=pci_requests, pci_stats=self.pci_stats, provider_mapping=spec_obj.get_request_group_mapping()) self.numa_topology = hardware.numa_usage_from_instance_numa( self.numa_topology, spec_obj.numa_topology) # ...and the PCI usage if pci_requests: instance_cells = None if spec_obj.numa_topology: instance_cells = spec_obj.numa_topology.cells self.pci_stats.apply_requests( pci_requests, spec_obj.get_request_group_mapping(), instance_cells ) # NOTE(sbauza): By considering all cases when the scheduler is called # and when consume_from_request() is run, we can safely say that there # is always an IO operation because we want to move the instance self.num_io_ops += 1 def __repr__(self): return ( "(%(host)s, %(node)s) ram: %(free_ram)sMB " "disk: %(free_disk)sMB io_ops: %(num_io_ops)s " "instances: %(num_instances)s, " "allocation_candidates: %(num_a_c)s" % { "host": self.host, "node": self.nodename, "free_ram": self.free_ram_mb, "free_disk": self.free_disk_mb, "num_io_ops": self.num_io_ops, "num_instances": self.num_instances, "num_a_c": len(self.allocation_candidates), } ) class HostManager(object): """Base HostManager class.""" # Can be overridden in a subclass def host_state_cls(self, host, node, cell, **kwargs): return HostState(host, node, cell) def __init__(self): self.refresh_cells_caches() self.filter_handler = filters.HostFilterHandler() filter_classes = self.filter_handler.get_matching_classes( CONF.filter_scheduler.available_filters) self.filter_cls_map = {cls.__name__: cls for cls in filter_classes} self.filter_obj_map = {} self.enabled_filters = self._choose_host_filters(self._load_filters()) self.weight_handler = weights.HostWeightHandler() weigher_classes = self.weight_handler.get_matching_classes( CONF.filter_scheduler.weight_classes) self.weighers = [cls() for cls in weigher_classes] # Dict of aggregates keyed by their ID self.aggs_by_id = {} # Dict of set of aggregate IDs keyed by the name of the host belonging # to those aggregates self.host_aggregates_map = collections.defaultdict(set) self._init_aggregates() self.track_instance_changes = ( CONF.filter_scheduler.track_instance_changes) # Dict of instances and status, keyed by host self._instance_info = {} if self.track_instance_changes: self._init_instance_info() def _load_filters(self): return CONF.filter_scheduler.enabled_filters def _init_aggregates(self): elevated = context_module.get_admin_context() aggs = objects.AggregateList.get_all(elevated) for agg in aggs: self.aggs_by_id[agg.id] = agg for host in agg.hosts: self.host_aggregates_map[host].add(agg.id) def update_aggregates(self, aggregates): """Updates internal HostManager information about aggregates.""" if isinstance(aggregates, (list, objects.AggregateList)): for agg in aggregates: self._update_aggregate(agg) else: self._update_aggregate(aggregates) def _update_aggregate(self, aggregate): self.aggs_by_id[aggregate.id] = aggregate for host in aggregate.hosts: self.host_aggregates_map[host].add(aggregate.id) # Refreshing the mapping dict to remove all hosts that are no longer # part of the aggregate for host in self.host_aggregates_map: if (aggregate.id in self.host_aggregates_map[host] and host not in aggregate.hosts): self.host_aggregates_map[host].remove(aggregate.id) def delete_aggregate(self, aggregate): """Deletes internal HostManager information about a specific aggregate. """ if aggregate.id in self.aggs_by_id: del self.aggs_by_id[aggregate.id] for host in self.host_aggregates_map: if aggregate.id in self.host_aggregates_map[host]: self.host_aggregates_map[host].remove(aggregate.id) def _init_instance_info(self, computes_by_cell=None): """Creates the initial view of instances for all hosts. As this initial population of instance information may take some time, we don't wish to block the scheduler's startup while this completes. The async method allows us to simply mock out the _init_instance_info() method in tests. :param compute_nodes: a list of nodes to populate instances info for if is None, compute_nodes will be looked up in database """ def _async_init_instance_info(computes_by_cell): context = context_module.get_admin_context() LOG.debug("START:_async_init_instance_info") self._instance_info = {} count = 0 if not computes_by_cell: computes_by_cell = {} for cell in self.cells.values(): with context_module.target_cell(context, cell) as cctxt: cell_cns = objects.ComputeNodeList.get_all( cctxt).objects computes_by_cell[cell] = cell_cns count += len(cell_cns) LOG.debug("Total number of compute nodes: %s", count) for cell, compute_nodes in computes_by_cell.items(): # Break the queries into batches of 10 to reduce the total # number of calls to the DB. batch_size = 10 start_node = 0 end_node = batch_size while start_node <= len(compute_nodes): curr_nodes = compute_nodes[start_node:end_node] start_node += batch_size end_node += batch_size filters = {"host": [curr_node.host for curr_node in curr_nodes], "deleted": False} with context_module.target_cell(context, cell) as cctxt: result = objects.InstanceList.get_by_filters( cctxt, filters) instances = result.objects LOG.debug("Adding %s instances for hosts %s-%s", len(instances), start_node, end_node) for instance in instances: host = instance.host if host not in self._instance_info: self._instance_info[host] = {"instances": {}, "updated": False} inst_dict = self._instance_info[host] inst_dict["instances"][instance.uuid] = instance # Call sleep() to cooperatively yield time.sleep(0) LOG.debug("END:_async_init_instance_info") # Run this async so that we don't block the scheduler start-up utils.spawn(_async_init_instance_info, computes_by_cell) def _choose_host_filters(self, filter_cls_names): """Since the caller may specify which filters to use we need to have an authoritative list of what is permissible. This function checks the filter names against a predefined set of acceptable filters. """ if not isinstance(filter_cls_names, (list, tuple)): filter_cls_names = [filter_cls_names] good_filters = [] bad_filters = [] for filter_name in filter_cls_names: if filter_name not in self.filter_obj_map: if filter_name not in self.filter_cls_map: bad_filters.append(filter_name) continue filter_cls = self.filter_cls_map[filter_name] self.filter_obj_map[filter_name] = filter_cls() good_filters.append(self.filter_obj_map[filter_name]) if bad_filters: msg = ", ".join(bad_filters) raise exception.SchedulerHostFilterNotFound(filter_name=msg) return good_filters def get_filtered_hosts(self, hosts, spec_obj, index=0): """Filter hosts and return only ones passing all filters.""" def _strip_ignore_hosts(host_map, hosts_to_ignore): ignored_hosts = [] for host in hosts_to_ignore: for (hostname, nodename) in list(host_map.keys()): if host.lower() == hostname.lower(): del host_map[(hostname, nodename)] ignored_hosts.append(host) ignored_hosts_str = ', '.join(ignored_hosts) LOG.info('Host filter ignoring hosts: %s', ignored_hosts_str) def _match_forced_hosts(host_map, hosts_to_force): forced_hosts = [] lowered_hosts_to_force = [host.lower() for host in hosts_to_force] for (hostname, nodename) in list(host_map.keys()): if hostname.lower() not in lowered_hosts_to_force: del host_map[(hostname, nodename)] else: forced_hosts.append(hostname) if host_map: forced_hosts_str = ', '.join(forced_hosts) LOG.info('Host filter forcing available hosts to %s', forced_hosts_str) else: forced_hosts_str = ', '.join(hosts_to_force) LOG.info("No hosts matched due to not matching " "'force_hosts' value of '%s'", forced_hosts_str) def _match_forced_nodes(host_map, nodes_to_force): forced_nodes = [] for (hostname, nodename) in list(host_map.keys()): if nodename not in nodes_to_force: del host_map[(hostname, nodename)] else: forced_nodes.append(nodename) if host_map: forced_nodes_str = ', '.join(forced_nodes) LOG.info('Host filter forcing available nodes to %s', forced_nodes_str) else: forced_nodes_str = ', '.join(nodes_to_force) LOG.info("No nodes matched due to not matching " "'force_nodes' value of '%s'", forced_nodes_str) def _get_hosts_matching_request(hosts, requested_destination): """Get hosts through matching the requested destination. We will both set host and node to requested destination object and host will never be None and node will be None in some cases. Starting with API 2.74 microversion, we also can specify the host/node to select hosts to launch a server: - If only host(or only node)(or both host and node) is supplied and we get one node from get_compute_nodes_by_host_or_node which is called in resources_from_request_spec function, the destination will be set both host and node. - If only host is supplied and we get more than one node from get_compute_nodes_by_host_or_node which is called in resources_from_request_spec function, the destination will only include host. """ (host, node) = (requested_destination.host, requested_destination.node) if node: requested_nodes = [x for x in hosts if x.host == host and x.nodename == node] else: requested_nodes = [x for x in hosts if x.host == host] if requested_nodes: LOG.info('Host filter only checking host %(host)s and ' 'node %(node)s', {'host': host, 'node': node}) else: # NOTE(sbauza): The API level should prevent the user from # providing a wrong destination but let's make sure a wrong # destination doesn't trample the scheduler still. LOG.info('No hosts matched due to not matching requested ' 'destination (%(host)s, %(node)s)', {'host': host, 'node': node}) return iter(requested_nodes) ignore_hosts = spec_obj.ignore_hosts or [] force_hosts = spec_obj.force_hosts or [] force_nodes = spec_obj.force_nodes or [] requested_node = spec_obj.requested_destination if requested_node is not None and 'host' in requested_node: # NOTE(sbauza): Reduce a potentially long set of hosts as much as # possible to any requested destination nodes before passing the # list to the filters hosts = _get_hosts_matching_request(hosts, requested_node) if ignore_hosts or force_hosts or force_nodes: # NOTE(deva): we can't assume "host" is unique because # one host may have many nodes. name_to_cls_map = {(x.host, x.nodename): x for x in hosts} if ignore_hosts: _strip_ignore_hosts(name_to_cls_map, ignore_hosts) if not name_to_cls_map: return [] # NOTE(deva): allow force_hosts and force_nodes independently if force_hosts: _match_forced_hosts(name_to_cls_map, force_hosts) if force_nodes: _match_forced_nodes(name_to_cls_map, force_nodes) check_type = ('scheduler_hints' in spec_obj and spec_obj.scheduler_hints.get('_nova_check_type')) if not check_type and (force_hosts or force_nodes): # NOTE(deva,dansmith): Skip filters when forcing host or node # unless we've declared the internal check type flag, in which # case we're asking for a specific host and for filtering to # be done. if name_to_cls_map: return name_to_cls_map.values() else: return [] hosts = name_to_cls_map.values() return self.filter_handler.get_filtered_objects(self.enabled_filters, hosts, spec_obj, index) def get_weighed_hosts(self, hosts, spec_obj): """Weigh the hosts.""" return self.weight_handler.get_weighed_objects(self.weighers, hosts, spec_obj) def _get_computes_for_cells(self, context, cells, compute_uuids): """Get a tuple of compute node and service information. :param context: request context :param cells: list of CellMapping objects :param compute_uuids: Optional list of ComputeNode UUIDs. If this is None, all compute nodes from each specified cell will be returned, otherwise only the ComputeNode objects with a UUID in the list of UUIDs in any given cell is returned. If this is an empty list, the returned compute_nodes tuple item will be an empty dict. Returns a tuple (compute_nodes, services) where: - compute_nodes is cell-uuid keyed dict of compute node lists - services is a dict of services indexed by hostname """ def targeted_operation(cctxt): services = objects.ServiceList.get_by_binary( cctxt, 'nova-compute', include_disabled=True) if compute_uuids is None: return services, objects.ComputeNodeList.get_all(cctxt) else: return services, objects.ComputeNodeList.get_all_by_uuids( cctxt, compute_uuids) timeout = context_module.CELL_TIMEOUT results = context_module.scatter_gather_cells(context, cells, timeout, targeted_operation) compute_nodes = collections.defaultdict(list) services = {} for cell_uuid, result in results.items(): if isinstance(result, Exception): LOG.warning('Failed to get computes for cell %s', cell_uuid) elif result is context_module.did_not_respond_sentinel: LOG.warning('Timeout getting computes for cell %s', cell_uuid) else: _services, _compute_nodes = result compute_nodes[cell_uuid].extend(_compute_nodes) services.update({service.host: service for service in _services}) return compute_nodes, services def _get_cell_by_host(self, ctxt, host): '''Get CellMapping object of a cell the given host belongs to.''' try: host_mapping = objects.HostMapping.get_by_host(ctxt, host) return host_mapping.cell_mapping except exception.HostMappingNotFound: LOG.warning('No host-to-cell mapping found for selected ' 'host %(host)s.', {'host': host}) return def get_compute_nodes_by_host_or_node(self, ctxt, host, node, cell=None): '''Get compute nodes from given host or node''' def return_empty_list_for_not_found(func): def wrapper(*args, **kwargs): try: ret = func(*args, **kwargs) except exception.NotFound: ret = objects.ComputeNodeList() return ret return wrapper @return_empty_list_for_not_found def _get_by_host_and_node(ctxt): compute_node = objects.ComputeNode.get_by_host_and_nodename( ctxt, host, node) return objects.ComputeNodeList(objects=[compute_node]) @return_empty_list_for_not_found def _get_by_host(ctxt): return objects.ComputeNodeList.get_all_by_host(ctxt, host) @return_empty_list_for_not_found def _get_by_node(ctxt): compute_node = objects.ComputeNode.get_by_nodename(ctxt, node) return objects.ComputeNodeList(objects=[compute_node]) if host and node: target_fnc = _get_by_host_and_node elif host: target_fnc = _get_by_host else: target_fnc = _get_by_node if host and not cell: # optimization not to issue queries to every cell DB cell = self._get_cell_by_host(ctxt, host) cells = [cell] if cell else self.enabled_cells timeout = context_module.CELL_TIMEOUT nodes_by_cell = context_module.scatter_gather_cells( ctxt, cells, timeout, target_fnc) # Only one cell should have values for the compute nodes # so we get them here, or return an empty list if no cell # has a value; be sure to filter out cell failures. nodes = next( (nodes for nodes in nodes_by_cell.values() if nodes and not context_module.is_cell_failure_sentinel(nodes)), objects.ComputeNodeList()) return nodes def refresh_cells_caches(self): # NOTE(tssurya): This function is called from the scheduler manager's # reset signal handler and also upon startup of the scheduler. context = context_module.get_admin_context() temp_cells = objects.CellMappingList.get_all(context) # NOTE(tssurya): filtering cell0 from the list since it need # not be considered for scheduling. for c in temp_cells: if c.is_cell0(): temp_cells.objects.remove(c) # once its done break for optimization break # NOTE(danms, tssurya): global dict, keyed by cell uuid, of cells # cached which will be refreshed every time a SIGHUP is sent to the # scheduler. self.cells = {cell.uuid: cell for cell in temp_cells} LOG.debug('Found %(count)i cells: %(cells)s', {'count': len(self.cells), 'cells': ', '.join(self.cells)}) # NOTE(tssurya): Global cache of only the enabled cells. This way # scheduling is limited only to the enabled cells. However this # cache will be refreshed every time a cell is disabled or enabled # or when a new cell is created as long as a SIGHUP signal is sent # to the scheduler. self.enabled_cells = [c for c in temp_cells if not c.disabled] # Filtering the disabled cells only for logging purposes. if LOG.isEnabledFor(logging.DEBUG): disabled_cells = [c for c in temp_cells if c.disabled] LOG.debug('Found %(count)i disabled cells: %(cells)s', {'count': len(disabled_cells), 'cells': ', '.join( [c.identity for c in disabled_cells])}) # Dict, keyed by host name, to cell UUID to be used to look up the # cell a particular host is in (used with self.cells). self.host_to_cell_uuid = {} def get_host_states_by_uuids(self, context, compute_uuids, spec_obj): if not self.cells: LOG.warning("No cells were found") # Restrict to a single cell if and only if the request spec has a # requested cell and allow_cross_cell_move=False. if (spec_obj and 'requested_destination' in spec_obj and spec_obj.requested_destination and 'cell' in spec_obj.requested_destination and not spec_obj.requested_destination.allow_cross_cell_move): only_cell = spec_obj.requested_destination.cell else: only_cell = None if only_cell: cells = [only_cell] else: cells = self.enabled_cells compute_nodes, services = self._get_computes_for_cells( context, cells, compute_uuids=compute_uuids) return self._get_host_states(context, compute_nodes, services) def _get_host_states(self, context, compute_nodes, services): """Returns a generator over HostStates given a list of computes. Also updates the HostStates internal mapping for the HostManager. """ # Get resource usage across the available compute nodes: host_state_map = {} seen_nodes = set() for cell_uuid, computes in compute_nodes.items(): for compute in computes: service = services.get(compute.host) if not service: LOG.warning( "No compute service record found for host %(host)s", {'host': compute.host}) continue host = compute.host node = compute.hypervisor_hostname state_key = (host, node) host_state = host_state_map.get(state_key) if not host_state: host_state = self.host_state_cls(host, node, cell_uuid, compute=compute) host_state_map[state_key] = host_state # We force to update the aggregates info each time a # new request comes in, because some changes on the # aggregates could have been happening after setting # this field for the first time host_state.update(compute, dict(service), self._get_aggregates_info(host), self._get_instance_info(context, compute)) seen_nodes.add(state_key) return (host_state_map[host] for host in seen_nodes) def _get_aggregates_info(self, host): return [self.aggs_by_id[agg_id] for agg_id in self.host_aggregates_map[host]] def _get_cell_mapping_for_host(self, context, host_name): """Finds the CellMapping for a particular host name Relies on a cache to quickly fetch the CellMapping if we have looked up this host before, otherwise gets the CellMapping via the HostMapping record for the given host name. :param context: nova auth request context :param host_name: compute service host name :returns: CellMapping object :raises: HostMappingNotFound if the host is not mapped to a cell """ # Check to see if we have the host in our cache. if host_name in self.host_to_cell_uuid: cell_uuid = self.host_to_cell_uuid[host_name] if cell_uuid in self.cells: return self.cells[cell_uuid] # Something is wrong so log a warning and just fall through to # lookup the HostMapping. LOG.warning('Host %s is expected to be in cell %s but that cell ' 'uuid was not found in our cache. The service may ' 'need to be restarted to refresh the cache.', host_name, cell_uuid) # We have not cached this host yet so get the HostMapping, cache the # result and return the CellMapping. hm = objects.HostMapping.get_by_host(context, host_name) cell_mapping = hm.cell_mapping self.host_to_cell_uuid[host_name] = cell_mapping.uuid return cell_mapping def _get_instances_by_host(self, context, host_name): try: cm = self._get_cell_mapping_for_host(context, host_name) except exception.HostMappingNotFound: # It's possible to hit this when the compute service first starts # up and casts to update_instance_info with an empty list but # before the host is mapped in the API database. LOG.info('Host mapping not found for host %s. Not tracking ' 'instance info for this host.', host_name) return {} with context_module.target_cell(context, cm) as cctxt: uuids = objects.InstanceList.get_uuids_by_host(cctxt, host_name) # Putting the context in the otherwise fake Instance object at # least allows out of tree filters to lazy-load fields. return {uuid: objects.Instance(cctxt, uuid=uuid) for uuid in uuids} def _get_instance_info(self, context, compute): """Gets the host instance info from the compute host. Some sites may disable ``track_instance_changes`` for performance or isolation reasons. In either of these cases, there will either be no information for the host, or the 'updated' value for that host dict will be False. In those cases, we need to grab the current InstanceList instead of relying on the version in _instance_info. """ host_name = compute.host host_info = self._instance_info.get(host_name) if host_info and host_info.get("updated"): inst_dict = host_info["instances"] else: # Updates aren't flowing from nova-compute. inst_dict = self._get_instances_by_host(context, host_name) return inst_dict def _recreate_instance_info(self, context, host_name): """Get the InstanceList for the specified host, and store it in the _instance_info dict. """ inst_dict = self._get_instances_by_host(context, host_name) host_info = self._instance_info[host_name] = {} host_info["instances"] = inst_dict host_info["updated"] = False @utils.synchronized(HOST_INSTANCE_SEMAPHORE) def update_instance_info(self, context, host_name, instance_info): """Receives an InstanceList object from a compute node. This method receives information from a compute node when it starts up, or when its instances have changed, and updates its view of hosts and instances with it. """ host_info = self._instance_info.get(host_name) if host_info: inst_dict = host_info.get("instances") for instance in instance_info.objects: # Overwrite the entry (if any) with the new info. inst_dict[instance.uuid] = instance host_info["updated"] = True else: instances = instance_info.objects if len(instances) > 1: # This is a host sending its full instance list, so use it. host_info = self._instance_info[host_name] = {} host_info["instances"] = {instance.uuid: instance for instance in instances} host_info["updated"] = True else: self._recreate_instance_info(context, host_name) LOG.info("Received an update from an unknown host '%s'. " "Re-created its InstanceList.", host_name) @utils.synchronized(HOST_INSTANCE_SEMAPHORE) def delete_instance_info(self, context, host_name, instance_uuid): """Receives the UUID from a compute node when one of its instances is terminated. The instance in the local view of the host's instances is removed. """ host_info = self._instance_info.get(host_name) if host_info: inst_dict = host_info["instances"] # Remove the existing Instance object, if any inst_dict.pop(instance_uuid, None) host_info["updated"] = True else: self._recreate_instance_info(context, host_name) LOG.info("Received a delete update from an unknown host '%s'. " "Re-created its InstanceList.", host_name) @utils.synchronized(HOST_INSTANCE_SEMAPHORE) def sync_instance_info(self, context, host_name, instance_uuids): """Receives the uuids of the instances on a host. This method is periodically called by the compute nodes, which send a list of all the UUID values for the instances on that node. This is used by the scheduler's HostManager to detect when its view of the compute node's instances is out of sync. """ host_info = self._instance_info.get(host_name) if host_info: local_set = set(host_info["instances"].keys()) compute_set = set(instance_uuids) if not local_set == compute_set: self._recreate_instance_info(context, host_name) LOG.info("The instance sync for host '%s' did not match. " "Re-created its InstanceList.", host_name) return host_info["updated"] = True LOG.debug("Successfully synced instances from host '%s'.", host_name) else: self._recreate_instance_info(context, host_name) LOG.info("Received a sync request from an unknown host '%s'. " "Re-created its InstanceList.", host_name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/scheduler/manager.py0000664000175000017500000011305400000000000017473 0ustar00zuulzuul00000000000000# Copyright (c) 2010 OpenStack Foundation # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Scheduler Service """ import collections import copy import random from keystoneauth1 import exceptions as ks_exc from oslo_log import log as logging import oslo_messaging as messaging from oslo_serialization import jsonutils from oslo_service import periodic_task from nova.compute import utils as compute_utils import nova.conf from nova import exception from nova.i18n import _ from nova import manager from nova import objects from nova.objects import fields as fields_obj from nova.objects import host_mapping as host_mapping_obj from nova.objects import service as obj_service from nova import quota from nova import rpc from nova.scheduler.client import report from nova.scheduler import host_manager from nova.scheduler import request_filter from nova.scheduler import utils from nova import servicegroup CONF = nova.conf.CONF LOG = logging.getLogger(__name__) QUOTAS = quota.QUOTAS HOST_MAPPING_EXISTS_WARNING = False class SchedulerManager(manager.Manager): """Chooses a host to run instances on. Filters and weighs compute hosts to determine the best host to schedule an instance to. """ target = messaging.Target(version='4.5') _sentinel = object() def __init__(self, *args, **kwargs): self.host_manager = host_manager.HostManager() self.servicegroup_api = servicegroup.API() self.notifier = rpc.get_notifier('scheduler') self._placement_client = None try: # Test our placement client during initialization self.placement_client except (ks_exc.EndpointNotFound, ks_exc.DiscoveryFailure, ks_exc.RequestTimeout, ks_exc.GatewayTimeout, ks_exc.ConnectFailure) as e: # Non-fatal, likely transient (although not definitely); # continue startup but log the warning so that when things # fail later, it will be clear why we can not do certain # things. LOG.warning('Unable to initialize placement client (%s); ' 'Continuing with startup, but scheduling ' 'will not be possible.', e) except (ks_exc.MissingAuthPlugin, ks_exc.Unauthorized) as e: # This is almost definitely fatal mis-configuration. The # Unauthorized error might be transient, but it is # probably reasonable to consider it fatal. LOG.error('Fatal error initializing placement client; ' 'config is incorrect or incomplete: %s', e) raise except Exception as e: # Unknown/unexpected errors here are fatal LOG.error('Fatal error initializing placement client: %s', e) raise super().__init__(service_name='scheduler', *args, **kwargs) @property def placement_client(self): return report.report_client_singleton() @periodic_task.periodic_task( spacing=CONF.scheduler.discover_hosts_in_cells_interval, run_immediately=True) def _discover_hosts_in_cells(self, context): services = obj_service.ServiceList.get_by_binary( context, 'nova-scheduler') leader = sorted( [service.host for service in services if self.servicegroup_api.service_is_up(service)])[0] if CONF.host != leader: LOG.debug( f"Current leader is {leader}, " f"skipping discover hosts on {CONF.host}") return global HOST_MAPPING_EXISTS_WARNING try: host_mappings = host_mapping_obj.discover_hosts(context) if host_mappings: LOG.info( 'Discovered %(count)i new hosts: %(hosts)s', { 'count': len(host_mappings), 'hosts': ','.join([ '%s:%s' % (hm.cell_mapping.name, hm.host) for hm in host_mappings ]), }, ) except exception.HostMappingExists as exp: msg = ( 'This periodic task should only be enabled if discover hosts ' 'is not run via nova-manage, schedulers: %s' % str(exp) ) if not HOST_MAPPING_EXISTS_WARNING: LOG.warning(msg) HOST_MAPPING_EXISTS_WARNING = True else: LOG.debug(msg) def reset(self): # NOTE(tssurya): This is a SIGHUP handler which will reset the cells # and enabled cells caches in the host manager. So every time an # existing cell is disabled or enabled or a new cell is created, a # SIGHUP signal has to be sent to the scheduler for proper scheduling. # NOTE(mriedem): Similarly there is a host-to-cell cache which should # be reset if a host is deleted from a cell and "discovered" in another # cell. self.host_manager.refresh_cells_caches() @messaging.expected_exceptions(exception.NoValidHost) def select_destinations( self, context, request_spec=None, filter_properties=None, spec_obj=_sentinel, instance_uuids=None, return_objects=False, return_alternates=False, ): """Returns destinations(s) best suited for this RequestSpec. Starting in Queens, this method returns a list of lists of Selection objects, with one list for each requested instance. Each instance's list will have its first element be the Selection object representing the chosen host for the instance, and if return_alternates is True, zero or more alternate objects that could also satisfy the request. The number of alternates is determined by the configuration option `CONF.scheduler.max_attempts`. The ability of a calling method to handle this format of returned destinations is indicated by a True value in the parameter `return_objects`. However, there may still be some older conductors in a deployment that have not been updated to Queens, and in that case return_objects will be False, and the result will be a list of dicts with 'host', 'nodename' and 'limits' as keys. When return_objects is False, the value of return_alternates has no effect. The reason there are two kwarg parameters return_objects and return_alternates is so we can differentiate between callers that understand the Selection object format but *don't* want to get alternate hosts, as is the case with the conductors that handle certain move operations. """ LOG.debug("Starting to schedule for instances: %s", instance_uuids) # TODO(sbauza): Change the method signature to only accept a spec_obj # argument once API v5 is provided. if spec_obj is self._sentinel: spec_obj = objects.RequestSpec.from_primitives( context, request_spec, filter_properties) is_rebuild = utils.request_is_rebuild(spec_obj) alloc_reqs_by_rp_uuid, provider_summaries, allocation_request_version \ = None, None, None if not is_rebuild: try: request_filter.process_reqspec(context, spec_obj) except exception.RequestFilterFailed as e: raise exception.NoValidHost(reason=e.message) resources = utils.resources_from_request_spec( context, spec_obj, self.host_manager, enable_pinning_translate=True) res = self.placement_client.get_allocation_candidates( context, resources) if res is None: # We have to handle the case that we failed to connect to the # Placement service and the safe_connect decorator on # get_allocation_candidates returns None. res = None, None, None alloc_reqs, provider_summaries, allocation_request_version = res alloc_reqs = alloc_reqs or [] provider_summaries = provider_summaries or {} # if the user requested pinned CPUs, we make a second query to # placement for allocation candidates using VCPUs instead of PCPUs. # This is necessary because users might not have modified all (or # any) of their compute nodes meaning said compute nodes will not # be reporting PCPUs yet. This is okay to do because the # NUMATopologyFilter (scheduler) or virt driver (compute node) will # weed out hosts that are actually using new style configuration # but simply don't have enough free PCPUs (or any PCPUs). # TODO(stephenfin): Remove when we drop support for 'vcpu_pin_set' if ( resources.cpu_pinning_requested and not CONF.workarounds.disable_fallback_pcpu_query ): LOG.debug( 'Requesting fallback allocation candidates with ' 'VCPU instead of PCPU' ) resources = utils.resources_from_request_spec( context, spec_obj, self.host_manager, enable_pinning_translate=False) res = self.placement_client.get_allocation_candidates( context, resources) if res: # merge the allocation requests and provider summaries from # the two requests together alloc_reqs_fallback, provider_summaries_fallback, _ = res alloc_reqs.extend(alloc_reqs_fallback) provider_summaries.update(provider_summaries_fallback) if not alloc_reqs: LOG.info( "Got no allocation candidates from the Placement API. " "This could be due to insufficient resources or a " "temporary occurrence as compute nodes start up." ) raise exception.NoValidHost(reason="") # Build a dict of lists of allocation requests, keyed by # provider UUID, so that when we attempt to claim resources for # a host, we can grab an allocation request easily alloc_reqs_by_rp_uuid = collections.defaultdict(list) for ar in alloc_reqs: for rp_uuid in ar['allocations']: alloc_reqs_by_rp_uuid[rp_uuid].append(ar) # Only return alternates if both return_objects and return_alternates # are True. return_alternates = return_alternates and return_objects selections = self._select_destinations( context, spec_obj, instance_uuids, alloc_reqs_by_rp_uuid, provider_summaries, allocation_request_version, return_alternates) # If `return_objects` is False, we need to convert the selections to # the older format, which is a list of host state dicts. if not return_objects: selection_dicts = [sel[0].to_dict() for sel in selections] return jsonutils.to_primitive(selection_dicts) return selections def _select_destinations( self, context, spec_obj, instance_uuids, alloc_reqs_by_rp_uuid, provider_summaries, allocation_request_version=None, return_alternates=False, ): self.notifier.info( context, 'scheduler.select_destinations.start', {'request_spec': spec_obj.to_legacy_request_spec_dict()}) compute_utils.notify_about_scheduler_action( context=context, request_spec=spec_obj, action=fields_obj.NotificationAction.SELECT_DESTINATIONS, phase=fields_obj.NotificationPhase.START) # Only return alternates if both return_objects and return_alternates # are True. selections = self._schedule( context, spec_obj, instance_uuids, alloc_reqs_by_rp_uuid, provider_summaries, allocation_request_version, return_alternates) self.notifier.info( context, 'scheduler.select_destinations.end', {'request_spec': spec_obj.to_legacy_request_spec_dict()}) compute_utils.notify_about_scheduler_action( context=context, request_spec=spec_obj, action=fields_obj.NotificationAction.SELECT_DESTINATIONS, phase=fields_obj.NotificationPhase.END) return selections def _schedule( self, context, spec_obj, instance_uuids, alloc_reqs_by_rp_uuid, provider_summaries, allocation_request_version=None, return_alternates=False ): """Returns a list of lists of Selection objects. :param context: The RequestContext object :param spec_obj: The RequestSpec object :param instance_uuids: List of instance UUIDs to place or move. :param alloc_reqs_by_rp_uuid: Optional dict, keyed by resource provider UUID, of the allocation_requests that may be used to claim resources against matched hosts. If None, indicates either the placement API wasn't reachable or that there were no allocation_requests returned by the placement API. If the latter, the provider_summaries will be an empty dict, not None. :param provider_summaries: Optional dict, keyed by resource provider UUID, of information that will be used by the filters/weighers in selecting matching hosts for a request. If None, indicates that we should grab all compute node information locally and that the Placement API is not used. If an empty dict, indicates the Placement API returned no potential matches for the requested resources. :param allocation_request_version: The microversion used to request the allocations. :param return_alternates: When True, zero or more alternate hosts are returned with each selected host. The number of alternates is determined by the configuration option `CONF.scheduler.max_attempts`. """ elevated = context.elevated() # Find our local list of acceptable hosts by repeatedly # filtering and weighing our options. Each time we choose a # host, we virtually consume resources on it so subsequent # selections can adjust accordingly. def hosts_with_alloc_reqs(hosts_gen): """Extend the HostState objects returned by the generator with the allocation requests of that host """ for host in hosts_gen: host.allocation_candidates = copy.deepcopy( alloc_reqs_by_rp_uuid[host.uuid]) yield host # Note: remember, we are using a generator-iterator here. So only # traverse this list once. This can bite you if the hosts # are being scanned in a filter or weighing function. hosts = self._get_all_host_states( elevated, spec_obj, provider_summaries) # alloc_reqs_by_rp_uuid is None during rebuild, so this mean we cannot # run filters that are using allocation candidates during rebuild if alloc_reqs_by_rp_uuid is not None: # wrap the generator to extend the HostState objects with the # allocation requests for that given host. This is needed to # support scheduler filters filtering on allocation candidates. hosts = hosts_with_alloc_reqs(hosts) # NOTE(sbauza): The RequestSpec.num_instances field contains the number # of instances created when the RequestSpec was used to first boot some # instances. This is incorrect when doing a move or resize operation, # so prefer the length of instance_uuids unless it is None. num_instances = (len(instance_uuids) if instance_uuids else spec_obj.num_instances) # For each requested instance, we want to return a host whose resources # for the instance have been claimed, along with zero or more # alternates. These alternates will be passed to the cell that the # selected host is in, so that if for some reason the build fails, the # cell conductor can retry building the instance on one of these # alternates instead of having to simply fail. The number of alternates # is based on CONF.scheduler.max_attempts; note that if there are not # enough filtered hosts to provide the full number of alternates, the # list of hosts may be shorter than this amount. num_alts = CONF.scheduler.max_attempts - 1 if return_alternates else 0 if instance_uuids is None or alloc_reqs_by_rp_uuid is None: # If there was a problem communicating with the # placement API, alloc_reqs_by_rp_uuid will be None, so we skip # claiming in that case as well. In the case where instance_uuids # is None, that indicates an older conductor, so we need to return # the objects without alternates. They will be converted back to # the older dict format representing HostState objects. # TODO(stephenfin): Remove this when we bump scheduler the RPC API # version to 5.0 # NOTE(gibi): We cannot remove this branch as it is actively used # when nova calls the scheduler during rebuild (not evacuate) to # check if the current host is still good for the new image used # for the rebuild. In this case placement cannot be used to # generate candidates as that would require space on the current # compute for double allocation. So no allocation candidates for # rebuild and therefore alloc_reqs_by_rp_uuid is None return self._legacy_find_hosts( context, num_instances, spec_obj, hosts, num_alts, instance_uuids=instance_uuids) # A list of the instance UUIDs that were successfully claimed against # in the placement API. If we are not able to successfully claim for # all involved instances, we use this list to remove those allocations # before returning claimed_instance_uuids = [] # The list of hosts that have been selected (and claimed). claimed_hosts = [] # The allocation request allocated on the given claimed host claimed_alloc_reqs = [] for num, instance_uuid in enumerate(instance_uuids): # In a multi-create request, the first request spec from the list # is passed to the scheduler and that request spec's instance_uuid # might not be the same as the instance we're processing, so we # update the instance_uuid in that case before passing the request # spec to filters since at least one filter # (ServerGroupAntiAffinityFilter) depends on that information being # accurate. spec_obj.instance_uuid = instance_uuid # Reset the field so it's not persisted accidentally. spec_obj.obj_reset_changes(['instance_uuid']) hosts = self._get_sorted_hosts(spec_obj, hosts, num) if not hosts: # NOTE(jaypipes): If we get here, that means not all instances # in instance_uuids were able to be matched to a selected host. # Any allocations will be cleaned up in the # _ensure_sufficient_hosts() call. break # Attempt to claim the resources against one or more resource # providers, looping over the sorted list of possible hosts # looking for an allocation_request that contains that host's # resource provider UUID claimed_host = None for host in hosts: if not host.allocation_candidates: LOG.debug( "The nova scheduler removed every allocation candidate" "for host %s so this host was skipped.", host ) continue # TODO(jaypipes): Loop through all allocation_requests instead # of just trying the first one. For now, since we'll likely # want to order the allocation_requests in the future based on # information in the provider summaries, we'll just try to # claim resources using the first allocation_request alloc_req = host.allocation_candidates[0] if utils.claim_resources( elevated, self.placement_client, spec_obj, instance_uuid, alloc_req, allocation_request_version=allocation_request_version, ): claimed_host = host break if claimed_host is None: # We weren't able to claim resources in the placement API # for any of the sorted hosts identified. So, clean up any # successfully-claimed resources for prior instances in # this request and return an empty list which will cause # select_destinations() to raise NoValidHost LOG.debug("Unable to successfully claim against any host.") break claimed_instance_uuids.append(instance_uuid) claimed_hosts.append(claimed_host) claimed_alloc_reqs.append(alloc_req) # update the provider mapping in the request spec based # on the allocated candidate as the _consume_selected_host depends # on this information to temporally consume PCI devices tracked in # placement for request_group in spec_obj.requested_resources: request_group.provider_uuids = alloc_req[ 'mappings'][request_group.requester_id] # Now consume the resources so the filter/weights will change for # the next instance. self._consume_selected_host( claimed_host, spec_obj, instance_uuid=instance_uuid) # Check if we were able to fulfill the request. If not, this call will # raise a NoValidHost exception. self._ensure_sufficient_hosts( context, claimed_hosts, num_instances, claimed_instance_uuids) # We have selected and claimed hosts for each instance along with a # claimed allocation request. Now we need to find alternates for each # host. return self._get_alternate_hosts( claimed_hosts, spec_obj, hosts, num, num_alts, alloc_reqs_by_rp_uuid, allocation_request_version, claimed_alloc_reqs, ) def _ensure_sufficient_hosts( self, context, hosts, required_count, claimed_uuids=None, ): """Checks that we have selected a host for each requested instance. If not, log this failure, remove allocations for any claimed instances, and raise a NoValidHost exception. """ if len(hosts) == required_count: # We have enough hosts. return if claimed_uuids: self._cleanup_allocations(context, claimed_uuids) # NOTE(Rui Chen): If multiple creates failed, set the updated time # of selected HostState to None so that these HostStates are # refreshed according to database in next schedule, and release # the resource consumed by instance in the process of selecting # host. for host in hosts: host.updated = None # Log the details but don't put those into the reason since # we don't want to give away too much information about our # actual environment. LOG.debug( 'There are %(hosts)d hosts available but ' '%(required_count)d instances requested to build.', {'hosts': len(hosts), 'required_count': required_count}) reason = _('There are not enough hosts available.') raise exception.NoValidHost(reason=reason) def _cleanup_allocations(self, context, instance_uuids): """Removes allocations for the supplied instance UUIDs.""" if not instance_uuids: return LOG.debug("Cleaning up allocations for %s", instance_uuids) for uuid in instance_uuids: self.placement_client.delete_allocation_for_instance( context, uuid, force=True) def _legacy_find_hosts( self, context, num_instances, spec_obj, hosts, num_alts, instance_uuids=None, ): """Find hosts without invoking placement. We may not be able to claim if the Placement service is not reachable. Additionally, we may be working with older conductors that don't pass in instance_uuids. """ # The list of hosts selected for each instance selected_hosts = [] for num in range(num_instances): instance_uuid = instance_uuids[num] if instance_uuids else None if instance_uuid: # Update the RequestSpec.instance_uuid before sending it to # the filters in case we're doing a multi-create request, but # don't persist the change. spec_obj.instance_uuid = instance_uuid spec_obj.obj_reset_changes(['instance_uuid']) hosts = self._get_sorted_hosts(spec_obj, hosts, num) if not hosts: # No hosts left, so break here, and the # _ensure_sufficient_hosts() call below will handle this. break selected_host = hosts[0] selected_hosts.append(selected_host) self._consume_selected_host( selected_host, spec_obj, instance_uuid=instance_uuid) # Check if we were able to fulfill the request. If not, this call will # raise a NoValidHost exception. self._ensure_sufficient_hosts(context, selected_hosts, num_instances) # This the overall list of values to be returned. There will be one # item per instance, and each item will be a list of Selection objects # representing the selected host along with zero or more alternates # from the same cell. return self._get_alternate_hosts( selected_hosts, spec_obj, hosts, num, num_alts) @staticmethod def _consume_selected_host(selected_host, spec_obj, instance_uuid=None): LOG.debug( "Selected host: %(host)s", {'host': selected_host}, instance_uuid=instance_uuid) selected_host.consume_from_request(spec_obj) # If we have a server group, add the selected host to it for the # (anti-)affinity filters to filter out hosts for subsequent instances # in a multi-create request. if spec_obj.instance_group is not None: spec_obj.instance_group.hosts.append(selected_host.host) # hosts has to be not part of the updates when saving spec_obj.instance_group.obj_reset_changes(['hosts']) # The ServerGroupAntiAffinityFilter also relies on # HostState.instances being accurate within a multi-create request. if instance_uuid and instance_uuid not in selected_host.instances: # Set a stub since ServerGroupAntiAffinityFilter only cares # about the keys. selected_host.instances[instance_uuid] = objects.Instance( uuid=instance_uuid) def _get_alternate_hosts( self, selected_hosts, spec_obj, hosts, index, num_alts, alloc_reqs_by_rp_uuid=None, allocation_request_version=None, selected_alloc_reqs=None, ): """Generate the main Selection and possible alternate Selection objects for each "instance". :param selected_hosts: This is a list of HostState objects. Each HostState represents the main selection for a given instance being scheduled (we can have multiple instances during multi create). :param selected_alloc_reqs: This is a list of allocation requests that are already allocated in placement for the main Selection for each instance. This list is matching with selected_hosts by index. So for the first instance the selected host is selected_host[0] and the already allocated placement candidate is selected_alloc_reqs[0]. """ # We only need to filter/weigh the hosts again if we're dealing with # more than one instance and are going to be picking alternates. if index > 0 and num_alts > 0: # The selected_hosts have all had resources 'claimed' via # _consume_selected_host, so we need to filter/weigh and sort the # hosts again to get an accurate count for alternates. hosts = self._get_sorted_hosts(spec_obj, hosts, index) # This is the overall list of values to be returned. There will be one # item per instance, and each item will be a list of Selection objects # representing the selected host along with alternates from the same # cell. selections_to_return = [] for i, selected_host in enumerate(selected_hosts): # This is the list of hosts for one particular instance. if alloc_reqs_by_rp_uuid: selected_alloc_req = selected_alloc_reqs[i] else: selected_alloc_req = None selection = objects.Selection.from_host_state( selected_host, allocation_request=selected_alloc_req, allocation_request_version=allocation_request_version) selected_plus_alts = [selection] cell_uuid = selected_host.cell_uuid # This will populate the alternates with many of the same unclaimed # hosts. This is OK, as it should be rare for a build to fail. And # if there are not enough hosts to fully populate the alternates, # it's fine to return fewer than we'd like. Note that we exclude # any claimed host from consideration as an alternate because it # will have had its resources reduced and will have a much lower # chance of being able to fit another instance on it. for host in hosts: if len(selected_plus_alts) >= num_alts + 1: break # TODO(gibi): In theory we could generate alternatives on the # same host if that host has different possible allocation # candidates for the request. But we don't do that today if host.cell_uuid == cell_uuid and host not in selected_hosts: if alloc_reqs_by_rp_uuid is not None: if not host.allocation_candidates: msg = ("A host state with uuid = '%s' that did " "not have any remaining allocation_request " "was encountered while scheduling. This " "host was skipped.") LOG.debug(msg, host.uuid) continue # TODO(jaypipes): Loop through all allocation_requests # instead of just trying the first one. For now, since # we'll likely want to order the allocation_requests in # the future based on information in the provider # summaries, we'll just try to claim resources using # the first allocation_request # NOTE(gibi): we are using, and reusing, allocation # candidates for alternatives here. This is OK as # these candidates are not yet allocated in placement # and we don't know if an alternate will ever be used. # To increase our success we could try to use different # candidate for different alternative though. alloc_req = host.allocation_candidates[0] alt_selection = objects.Selection.from_host_state( host, alloc_req, allocation_request_version) else: alt_selection = objects.Selection.from_host_state(host) selected_plus_alts.append(alt_selection) selections_to_return.append(selected_plus_alts) return selections_to_return def _get_sorted_hosts(self, spec_obj, host_states, index): """Returns a list of HostState objects that match the required scheduling constraints for the request spec object and have been sorted according to the weighers. """ filtered_hosts = self.host_manager.get_filtered_hosts(host_states, spec_obj, index) LOG.debug("Filtered %(hosts)s", {'hosts': filtered_hosts}) if not filtered_hosts: return [] weighed_hosts = self.host_manager.get_weighed_hosts( filtered_hosts, spec_obj) if CONF.filter_scheduler.shuffle_best_same_weighed_hosts: # NOTE(pas-ha) Randomize best hosts, relying on weighed_hosts # being already sorted by weight in descending order. # This decreases possible contention and rescheduling attempts # when there is a large number of hosts having the same best # weight, especially so when host_subset_size is 1 (default) best_hosts = [ w for w in weighed_hosts if w.weight == weighed_hosts[0].weight ] random.shuffle(best_hosts) weighed_hosts = best_hosts + weighed_hosts[len(best_hosts):] # Log the weighed hosts before stripping off the wrapper class so that # the weight value gets logged. LOG.debug("Weighed %(hosts)s", {'hosts': weighed_hosts}) # Strip off the WeighedHost wrapper class... weighed_hosts = [h.obj for h in weighed_hosts] # We randomize the first element in the returned list to alleviate # congestion where the same host is consistently selected among # numerous potential hosts for similar request specs. host_subset_size = CONF.filter_scheduler.host_subset_size if host_subset_size < len(weighed_hosts): weighed_subset = weighed_hosts[0:host_subset_size] else: weighed_subset = weighed_hosts chosen_host = random.choice(weighed_subset) weighed_hosts.remove(chosen_host) return [chosen_host] + weighed_hosts def _get_all_host_states(self, context, spec_obj, provider_summaries): """Template method, so a subclass can implement caching.""" # The provider_summaries variable will be an empty dict when the # Placement API found no providers that match the requested # constraints, which in turn makes compute_uuids an empty list and # get_host_states_by_uuids will return an empty generator-iterator # also, which will eventually result in a NoValidHost error. compute_uuids = None if provider_summaries is not None: compute_uuids = list(provider_summaries.keys()) return self.host_manager.get_host_states_by_uuids( context, compute_uuids, spec_obj) def update_aggregates(self, ctxt, aggregates): """Updates HostManager internal aggregates information. :param aggregates: Aggregate(s) to update :type aggregates: :class:`nova.objects.Aggregate` or :class:`nova.objects.AggregateList` """ # NOTE(sbauza): We're dropping the user context now as we don't need it self.host_manager.update_aggregates(aggregates) def delete_aggregate(self, ctxt, aggregate): """Deletes HostManager internal information about a specific aggregate. :param aggregate: Aggregate to delete :type aggregate: :class:`nova.objects.Aggregate` """ # NOTE(sbauza): We're dropping the user context now as we don't need it self.host_manager.delete_aggregate(aggregate) def update_instance_info(self, context, host_name, instance_info): """Receives information about changes to a host's instances, and updates the HostManager with that information. """ self.host_manager.update_instance_info( context, host_name, instance_info) def delete_instance_info(self, context, host_name, instance_uuid): """Receives information about the deletion of one of a host's instances, and updates the HostManager with that information. """ self.host_manager.delete_instance_info( context, host_name, instance_uuid) def sync_instance_info(self, context, host_name, instance_uuids): """Receives a sync request from a host, and passes it on to the HostManager. """ self.host_manager.sync_instance_info( context, host_name, instance_uuids) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/scheduler/request_filter.py0000664000175000017500000004513500000000000021122 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import os_traits from oslo_log import log as logging from oslo_utils import timeutils import nova.conf from nova import context as nova_context from nova import exception from nova.i18n import _ from nova.network import neutron from nova import objects from nova.scheduler.client import report from nova.scheduler import utils from nova.virt import hardware CONF = nova.conf.CONF LOG = logging.getLogger(__name__) TENANT_METADATA_KEY = 'filter_tenant_id' def trace_request_filter(fn): @functools.wraps(fn) def wrapper(ctxt, request_spec): timer = timeutils.StopWatch() ran = False with timer: try: ran = fn(ctxt, request_spec) finally: if ran: # Only log info if the filter was enabled and not # excluded for some reason LOG.debug('Request filter %r took %.1f seconds', fn.__name__, timer.elapsed()) return ran return wrapper @trace_request_filter def isolate_aggregates(ctxt, request_spec): """Prepare list of aggregates that should be isolated. This filter will prepare the list of aggregates that should be ignored by the placement service. It checks if aggregates has metadata 'trait:='required' and if is not present in either of flavor extra specs or image properties, then those aggregates will be included in the list of isolated aggregates. Precisely this filter gets the trait request form the image and flavor and unions them. Then it accumulates the set of aggregates that request traits are "non_matching_by_metadata_keys" and uses that to produce the list of isolated aggregates. """ if not CONF.scheduler.enable_isolated_aggregate_filtering: return False # Get required traits set in flavor and image res_req = utils.ResourceRequest.from_request_spec(request_spec) required_traits = res_req.all_required_traits keys = ['trait:%s' % trait for trait in required_traits] isolated_aggregates = ( objects.aggregate.AggregateList.get_non_matching_by_metadata_keys( ctxt, keys, 'trait:', value='required')) # Set list of isolated aggregates to destination object of request_spec if isolated_aggregates: if ('requested_destination' not in request_spec or request_spec.requested_destination is None): request_spec.requested_destination = objects.Destination() destination = request_spec.requested_destination destination.append_forbidden_aggregates( agg.uuid for agg in isolated_aggregates) return True @trace_request_filter def require_tenant_aggregate(ctxt, request_spec): """Require hosts in an aggregate based on tenant id. This will modify request_spec to request hosts in an aggregate defined specifically for the tenant making the request. We do that by looking for a nova host aggregate with metadata indicating which tenant it is for, and passing that aggregate uuid to placement to limit results accordingly. """ enabled = CONF.scheduler.limit_tenants_to_placement_aggregate agg_required = CONF.scheduler.placement_aggregate_required_for_tenants if not enabled: return False aggregates = objects.AggregateList.get_by_metadata( ctxt, value=request_spec.project_id) aggregate_uuids_for_tenant = set([]) for agg in aggregates: for key, value in agg.metadata.items(): if key.startswith(TENANT_METADATA_KEY): aggregate_uuids_for_tenant.add(agg.uuid) break if aggregate_uuids_for_tenant: if ('requested_destination' not in request_spec or request_spec.requested_destination is None): request_spec.requested_destination = objects.Destination() destination = request_spec.requested_destination destination.require_aggregates(aggregate_uuids_for_tenant) LOG.debug('require_tenant_aggregate request filter added ' 'aggregates %s for tenant %r', ','.join(aggregate_uuids_for_tenant), request_spec.project_id) elif agg_required: LOG.warning('Tenant %(tenant)s has no available aggregates', {'tenant': request_spec.project_id}) raise exception.RequestFilterFailed( reason=_('No hosts available for tenant')) return True @trace_request_filter def map_az_to_placement_aggregate(ctxt, request_spec): """Map requested nova availability zones to placement aggregates. This will modify request_spec to request hosts in an aggregate that matches the desired AZ of the user's request. """ az_hint = request_spec.availability_zone if not az_hint: return False aggregates = objects.AggregateList.get_by_metadata(ctxt, key='availability_zone', value=az_hint) if aggregates: if ('requested_destination' not in request_spec or request_spec.requested_destination is None): request_spec.requested_destination = objects.Destination() agg_uuids = [agg.uuid for agg in aggregates] request_spec.requested_destination.require_aggregates(agg_uuids) LOG.debug('map_az_to_placement_aggregate request filter added ' 'aggregates %s for az %r', ','.join(agg_uuids), az_hint) return True @trace_request_filter def require_image_type_support(ctxt, request_spec): """Request type-specific trait on candidates. This will modify the request_spec to request hosts that support the disk_format of the image provided. """ if not CONF.scheduler.query_placement_for_image_type_support: return False if request_spec.is_bfv: # We are booting from volume, and thus compute node image # disk_format support does not matter. return False disk_format = request_spec.image.disk_format trait_name = 'COMPUTE_IMAGE_TYPE_%s' % disk_format.upper() if not hasattr(os_traits, trait_name): LOG.error( 'Computed trait name %r is not valid; is os-traits up to date?', trait_name) return False request_spec.root_required.add(trait_name) LOG.debug('require_image_type_support request filter added required ' 'trait %s', trait_name) return True @trace_request_filter def transform_image_metadata(ctxt, request_spec): """Transform image metadata to required traits. This will modify the request_spec to request hosts that support virtualisation capabilities based on the image metadata properties. """ if not CONF.scheduler.image_metadata_prefilter: return False prefix_map = { 'hw_cdrom_bus': 'COMPUTE_STORAGE_BUS', 'hw_disk_bus': 'COMPUTE_STORAGE_BUS', 'hw_video_model': 'COMPUTE_GRAPHICS_MODEL', 'hw_vif_model': 'COMPUTE_NET_VIF_MODEL', 'hw_architecture': 'HW_ARCH', 'hw_emulation_architecture': 'COMPUTE_ARCH', 'hw_viommu_model': 'COMPUTE_VIOMMU', } trait_names = [] for key, prefix in prefix_map.items(): if key in request_spec.image.properties: value = request_spec.image.properties.get(key).replace( '-', '_').upper() trait_name = f'{prefix}_{value}' if not hasattr(os_traits, trait_name): LOG.error('Computed trait name %r is not valid; ' 'is os-traits up to date?', trait_name) return False trait_names.append(trait_name) for trait_name in trait_names: LOG.debug( 'transform_image_metadata request filter added required ' 'trait %s', trait_name ) request_spec.root_required.add(trait_name) return True @trace_request_filter def compute_status_filter(ctxt, request_spec): """Pre-filter compute node resource providers using COMPUTE_STATUS_DISABLED The ComputeFilter filters out hosts for compute services that are disabled. Compute node resource providers managed by a disabled compute service should have the COMPUTE_STATUS_DISABLED trait set and be excluded by this mandatory pre-filter. """ trait_name = os_traits.COMPUTE_STATUS_DISABLED request_spec.root_forbidden.add(trait_name) LOG.debug('compute_status_filter request filter added forbidden ' 'trait %s', trait_name) return True @trace_request_filter def accelerators_filter(ctxt, request_spec): """Allow only compute nodes with accelerator support. This filter retains only nodes whose compute manager published the COMPUTE_ACCELERATORS trait, thus indicating the version of n-cpu is sufficient to handle accelerator requests. """ trait_name = os_traits.COMPUTE_ACCELERATORS if request_spec.flavor.extra_specs.get('accel:device_profile'): request_spec.root_required.add(trait_name) LOG.debug('accelerators_filter request filter added required ' 'trait %s', trait_name) return True @trace_request_filter def packed_virtqueue_filter(ctxt, request_spec): """Allow only compute nodes with Packed virtqueue. This filter retains only nodes whose compute manager published the COMPUTE_NET_VIRTIO_PACKED trait, thus indicates virtqueue packed feature. """ trait_name = os_traits.COMPUTE_NET_VIRTIO_PACKED if (hardware.get_packed_virtqueue_constraint(request_spec.flavor, request_spec.image)): request_spec.root_required.add(trait_name) LOG.debug('virtqueue_filter request filter added required ' 'trait %s', trait_name) return True @trace_request_filter def routed_networks_filter( ctxt: nova_context.RequestContext, request_spec: 'objects.RequestSpec' ) -> bool: """Adds requested placement aggregates that match requested networks. This will modify request_spec to request hosts in aggregates that matches segment IDs related to requested networks. :param ctxt: The usual suspect for a context object. :param request_spec: a classic RequestSpec object containing the request. :returns: True if the filter was used or False if not. :raises: exception.InvalidRoutedNetworkConfiguration if something went wrong when trying to get the related segment aggregates. """ if not CONF.scheduler.query_placement_for_routed_network_aggregates: return False # NOTE(sbauza): On a create operation with no specific network request, we # allocate the network only after scheduling when the nova-compute service # calls Neutron. In this case, here we just want to accept any destination # as fine. # NOTE(sbauza): This could be also going from an old compute reschedule. if 'requested_networks' not in request_spec: return True # This object field is not nullable requested_networks = request_spec.requested_networks # NOTE(sbauza): This field could be not created yet. if ( 'requested_destination' not in request_spec or request_spec.requested_destination is None ): request_spec.requested_destination = objects.Destination() # Get the clients we need network_api = neutron.API() report_api = report.report_client_singleton() for requested_network in requested_networks: network_id = None # Check for a specifically requested network ID. if "port_id" in requested_network and requested_network.port_id: # We have to lookup the port to see which segment(s) to support. port = network_api.show_port(ctxt, requested_network.port_id)[ "port" ] if port['fixed_ips']: # The instance already exists with a related subnet. We need to # stick on this subnet. # NOTE(sbauza): In case of multiple IPs, we could have more # subnets than only one but given they would be for the same # port, just looking at the first subnet is needed. subnet_id = port['fixed_ips'][0]['subnet_id'] try: aggregates = utils.get_aggregates_for_routed_subnet( ctxt, network_api, report_api, subnet_id) except exception.InvalidRoutedNetworkConfiguration as e: raise exception.RequestFilterFailed( reason=_('Aggregates not found for the subnet %s' ) % subnet_id) from e else: # The port was just created without a subnet. network_id = port["network_id"] elif ( "network_id" in requested_network and requested_network.network_id ): network_id = requested_network.network_id if network_id: # As the user only requested a network or a port unbound to a # segment, we are free to choose any segment from the network. try: aggregates = utils.get_aggregates_for_routed_network( ctxt, network_api, report_api, network_id) except exception.InvalidRoutedNetworkConfiguration as e: raise exception.RequestFilterFailed( reason=_('Aggregates not found for the network %s' ) % network_id) from e if aggregates: LOG.debug( 'routed_networks_filter request filter added the following ' 'aggregates for network ID %s: %s', network_id, ', '.join(aggregates)) # NOTE(sbauza): All of the aggregates from this request will be # accepted, but they will have an AND relationship with any other # requested aggregate, like for another NIC request in this loop. request_spec.requested_destination.require_aggregates(aggregates) return True @trace_request_filter def remote_managed_ports_filter( context: nova_context.RequestContext, request_spec: 'objects.RequestSpec', ) -> bool: """Filter out hosts without remote managed port support (driver or hw). If a request spec contains VNIC_TYPE_REMOTE_MANAGED ports then a remote-managed port trait (COMPUTE_REMOTE_MANAGED_PORTS) is added to the request in order to pre-filter hosts that do not use compute drivers supporting remote managed ports and the ones that do not have the device pools providing remote-managed ports (actual device availability besides a pool presence check is done at the time of PciPassthroughFilter execution). """ if request_spec.requested_networks: network_api = neutron.API() for request_net in request_spec.requested_networks: if request_net.port_id and network_api.is_remote_managed_port( context, request_net.port_id): request_spec.root_required.add( os_traits.COMPUTE_REMOTE_MANAGED_PORTS) LOG.debug('remote_managed_ports_filter request filter added ' f'trait {os_traits.COMPUTE_REMOTE_MANAGED_PORTS}') return True @trace_request_filter def ephemeral_encryption_filter( ctxt: nova_context.RequestContext, request_spec: 'objects.RequestSpec' ) -> bool: """Pre-filter resource provides by ephemeral encryption support This filter will only retain compute node resource providers that support ephemeral storage encryption when the associated image properties or flavor extra specs are present within the request spec. """ # Skip if ephemeral encryption isn't requested in the flavor or image if not hardware.get_ephemeral_encryption_constraint( request_spec.flavor, request_spec.image): LOG.debug("ephemeral_encryption_filter skipped") return False # Always add the feature trait regardless of the format being provided request_spec.root_required.add(os_traits.COMPUTE_EPHEMERAL_ENCRYPTION) LOG.debug("ephemeral_encryption_filter added trait " "COMPUTE_EPHEMERAL_ENCRYPTION") # Try to find the format in the flavor or image and add as a trait eph_format = hardware.get_ephemeral_encryption_format( request_spec.flavor, request_spec.image) if eph_format: # We don't need to validate the trait here because the earlier call to # get_ephemeral_encryption_format will raise if it is not valid trait_name = f"COMPUTE_EPHEMERAL_ENCRYPTION_{eph_format.upper()}" request_spec.root_required.add(trait_name) LOG.debug(f"ephemeral_encryption_filter added trait {trait_name}") return True @trace_request_filter def virtio_sound_filter( ctxt: nova_context.RequestContext, request_spec: 'objects.RequestSpec' ) -> bool: """Filter out hosts which do not support virtio sound devices This filter will only retain compute node resource providers that support virtio sound devices. """ # Skip if the instance does not request a virtio sound device model = hardware.get_sound_model(request_spec.flavor, request_spec.image) if model != objects.fields.SoundModelType.VIRTIO: LOG.debug('virtio_sound_filter skipped') return False request_spec.root_required.add(os_traits.COMPUTE_SOUND_MODEL_VIRTIO) LOG.debug('virtio_sound_filter added trait COMPUTE_SOUND_MODEL_VIRTIO') return True ALL_REQUEST_FILTERS = [ require_tenant_aggregate, map_az_to_placement_aggregate, require_image_type_support, compute_status_filter, isolate_aggregates, transform_image_metadata, accelerators_filter, packed_virtqueue_filter, routed_networks_filter, remote_managed_ports_filter, ephemeral_encryption_filter, virtio_sound_filter ] def process_reqspec(ctxt, request_spec): """Process an objects.ReqestSpec before calling placement. :param ctxt: A RequestContext :param request_spec: An objects.RequestSpec to be inspected/modified """ for filter in ALL_REQUEST_FILTERS: filter(ctxt, request_spec) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/scheduler/rpcapi.py0000664000175000017500000001730600000000000017342 0ustar00zuulzuul00000000000000# Copyright 2013, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Client side of the scheduler manager RPC API. """ import oslo_messaging as messaging import nova.conf from nova import exception as exc from nova.objects import base as objects_base from nova import profiler from nova import rpc CONF = nova.conf.CONF RPC_TOPIC = "scheduler" @profiler.trace_cls("rpc") class SchedulerAPI(object): '''Client side of the scheduler rpc API. API version history: * 1.0 - Initial version. * 1.1 - Changes to prep_resize(): * remove instance_uuid, add instance * remove instance_type_id, add instance_type * remove topic, it was unused * 1.2 - Remove topic from run_instance, it was unused * 1.3 - Remove instance_id, add instance to live_migration * 1.4 - Remove update_db from prep_resize * 1.5 - Add reservations argument to prep_resize() * 1.6 - Remove reservations argument to run_instance() * 1.7 - Add create_volume() method, remove topic from live_migration() * 2.0 - Remove 1.x backwards compat * 2.1 - Add image_id to create_volume() * 2.2 - Remove reservations argument to create_volume() * 2.3 - Remove create_volume() * 2.4 - Change update_service_capabilities() * accepts a list of capabilities * 2.5 - Add get_backdoor_port() * 2.6 - Add select_hosts() ... Grizzly supports message version 2.6. So, any changes to existing methods in 2.x after that point should be done such that they can handle the version_cap being set to 2.6. * 2.7 - Add select_destinations() * 2.8 - Deprecate prep_resize() -- JUST KIDDING. It is still used by the compute manager for retries. * 2.9 - Added the legacy_bdm_in_spec parameter to run_instance() ... Havana supports message version 2.9. So, any changes to existing methods in 2.x after that point should be done such that they can handle the version_cap being set to 2.9. * Deprecated live_migration() call, moved to conductor * Deprecated select_hosts() 3.0 - Removed backwards compat ... Icehouse and Juno support message version 3.0. So, any changes to existing methods in 3.x after that point should be done such that they can handle the version_cap being set to 3.0. * 3.1 - Made select_destinations() send flavor object * 4.0 - Removed backwards compat for Icehouse * 4.1 - Add update_aggregates() and delete_aggregate() * 4.2 - Added update_instance_info(), delete_instance_info(), and sync_instance_info() methods ... Kilo and Liberty support message version 4.2. So, any changes to existing methods in 4.x after that point should be done such that they can handle the version_cap being set to 4.2. * 4.3 - Modify select_destinations() signature by providing a RequestSpec obj ... Mitaka, Newton, and Ocata support message version 4.3. So, any changes to existing methods in 4.x after that point should be done such that they can handle the version_cap being set to 4.3. * 4.4 - Modify select_destinations() signature by providing the instance_uuids for the request. ... Pike supports message version 4.4. So any changes to existing methods in 4.x after that point should be done such that they can handle the version_cap being set to 4.4. * 4.5 - Modify select_destinations() to optionally return a list of lists of Selection objects, along with zero or more alternates. ''' VERSION_ALIASES = { 'grizzly': '2.6', 'havana': '2.9', 'icehouse': '3.0', 'juno': '3.0', 'kilo': '4.2', 'liberty': '4.2', 'mitaka': '4.3', 'newton': '4.3', 'ocata': '4.3', 'pike': '4.4', } def __init__(self): super(SchedulerAPI, self).__init__() target = messaging.Target(topic=RPC_TOPIC, version='4.0') version_cap = self.VERSION_ALIASES.get(CONF.upgrade_levels.scheduler, CONF.upgrade_levels.scheduler) serializer = objects_base.NovaObjectSerializer() self.client = rpc.get_client(target, version_cap=version_cap, serializer=serializer) def select_destinations(self, ctxt, spec_obj, instance_uuids, return_objects=False, return_alternates=False): # Modify the parameters if an older version is requested version = '4.5' msg_args = {'instance_uuids': instance_uuids, 'spec_obj': spec_obj, 'return_objects': return_objects, 'return_alternates': return_alternates} if not self.client.can_send_version(version): if msg_args['return_objects'] or msg_args['return_alternates']: # The client is requesting an RPC version we can't support. raise exc.SelectionObjectsWithOldRPCVersionNotSupported( version=self.client.version_cap) del msg_args['return_objects'] del msg_args['return_alternates'] version = '4.4' if not self.client.can_send_version(version): del msg_args['instance_uuids'] version = '4.3' if not self.client.can_send_version(version): del msg_args['spec_obj'] msg_args['request_spec'] = spec_obj.to_legacy_request_spec_dict() msg_args['filter_properties' ] = spec_obj.to_legacy_filter_properties_dict() version = '4.0' cctxt = self.client.prepare( version=version, call_monitor_timeout=CONF.rpc_response_timeout, timeout=CONF.long_rpc_timeout) return cctxt.call(ctxt, 'select_destinations', **msg_args) def update_aggregates(self, ctxt, aggregates): # NOTE(sbauza): Yes, it's a fanout, we need to update all schedulers cctxt = self.client.prepare(fanout=True, version='4.1') cctxt.cast(ctxt, 'update_aggregates', aggregates=aggregates) def delete_aggregate(self, ctxt, aggregate): # NOTE(sbauza): Yes, it's a fanout, we need to update all schedulers cctxt = self.client.prepare(fanout=True, version='4.1') cctxt.cast(ctxt, 'delete_aggregate', aggregate=aggregate) def update_instance_info(self, ctxt, host_name, instance_info): cctxt = self.client.prepare(version='4.2', fanout=True) cctxt.cast(ctxt, 'update_instance_info', host_name=host_name, instance_info=instance_info) def delete_instance_info(self, ctxt, host_name, instance_uuid): cctxt = self.client.prepare(version='4.2', fanout=True) cctxt.cast(ctxt, 'delete_instance_info', host_name=host_name, instance_uuid=instance_uuid) def sync_instance_info(self, ctxt, host_name, instance_uuids): cctxt = self.client.prepare(version='4.2', fanout=True) cctxt.cast(ctxt, 'sync_instance_info', host_name=host_name, instance_uuids=instance_uuids) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/scheduler/utils.py0000664000175000017500000020245700000000000017227 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utility methods for scheduling.""" import collections import re import sys import typing as ty from urllib import parse import os_resource_classes as orc import os_traits from oslo_log import log as logging from oslo_serialization import jsonutils from nova.compute import flavors from nova.compute import utils as compute_utils import nova.conf from nova import context as nova_context from nova import exception from nova.i18n import _ from nova import objects from nova.objects import base as obj_base from nova.objects import fields as obj_fields from nova.objects import instance as obj_instance from nova import rpc from nova.scheduler.filters import utils as filters_utils from nova.virt import hardware LOG = logging.getLogger(__name__) CONF = nova.conf.CONF GroupDetails = collections.namedtuple('GroupDetails', ['hosts', 'policy', 'members']) class ResourceRequest(object): """Presents a granular resource request via RequestGroup instances.""" # extra_specs-specific consts XS_RES_PREFIX = 'resources' XS_TRAIT_PREFIX = 'trait' # Regex patterns for suffixed or unsuffixed resources/trait keys XS_KEYPAT = re.compile(r"^(%s)([a-zA-Z0-9_-]{1,64})?:(.*)$" % '|'.join((XS_RES_PREFIX, XS_TRAIT_PREFIX))) def __init__(self): """Create an empty ResourceRequest Do not call this directly, use the existing static factory methods from_*() """ self._rg_by_id: ty.Dict[str, objects.RequestGroup] = {} self._group_policy: ty.Optional[str] = None # Default to the configured limit but _limit can be # set to None to indicate "no limit". self._limit = CONF.scheduler.max_placement_results self._root_required: ty.Set[str] = set() self._root_forbidden: ty.Set[str] = set() self._same_subtree: ty.List[ty.List[str]] = [] self.suffixed_groups_from_flavor = 0 # TODO(stephenfin): Remove this parameter once we drop support for # 'vcpu_pin_set' self.cpu_pinning_requested = False @classmethod def from_request_spec( cls, request_spec: 'objects.RequestSpec', enable_pinning_translate: bool = True ) -> 'ResourceRequest': """Create a new instance of ResourceRequest from a RequestSpec. Examines the flavor, flavor extra specs, (optional) image metadata, and (optional) requested_resources and request_level_params of the provided ``request_spec``. For extra specs, items of the following form are examined: - ``resources:$RESOURCE_CLASS``: $AMOUNT - ``resources$S:$RESOURCE_CLASS``: $AMOUNT - ``trait:$TRAIT_NAME``: "required" - ``trait$S:$TRAIT_NAME``: "required" ...where ``$S`` is a string suffix as supported via Placement microversion 1.33 https://docs.openstack.org/placement/train/specs/train/implemented/2005575-nested-magic-1.html#arbitrary-group-suffixes .. note:: This does *not* yet handle ``member_of[$S]``. The string suffix is used as the RequestGroup.requester_id to facilitate mapping of requests to allocation candidates using the ``mappings`` piece of the response added in Placement microversion 1.34 https://docs.openstack.org/placement/train/specs/train/implemented/placement-resource-provider-request-group-mapping-in-allocation-candidates.html For image metadata, traits are extracted from the ``traits_required`` property, if present. For the flavor, ``VCPU``, ``MEMORY_MB`` and ``DISK_GB`` are calculated from Flavor properties, though these are only used if they aren't overridden by flavor extra specs. requested_resources, which are existing RequestGroup instances created on the RequestSpec based on resources specified outside of the flavor/ image (e.g. from ports) are incorporated as is, but ensuring that they get unique group suffixes. request_level_params - settings associated with the request as a whole rather than with a specific RequestGroup - are incorporated as is. :param request_spec: An instance of ``objects.RequestSpec``. :param enable_pinning_translate: True if the CPU policy extra specs should be translated to placement resources and traits. :return: a ResourceRequest instance """ res_req = cls() # root_required+=these res_req._root_required = request_spec.root_required # root_required+=!these res_req._root_forbidden = request_spec.root_forbidden res_req._same_subtree = request_spec.same_subtree # TODO(efried): Handle member_of[$S], which will need to be reconciled # with destination.aggregates handling in resources_from_request_spec # request_spec.image is nullable if 'image' in request_spec and request_spec.image: image = request_spec.image else: image = objects.ImageMeta(properties=objects.ImageMetaProps()) # Parse the flavor extra specs res_req._process_extra_specs(request_spec.flavor) # NOTE(gibi): this assumes that _process_extra_specs() was already # called but _process_requested_resources() hasn't called it yet. res_req.suffixed_groups_from_flavor = ( res_req.get_num_of_suffixed_groups()) # Now parse the (optional) image metadata res_req._process_image_meta(image) if enable_pinning_translate: # Next up, let's handle those pesky CPU pinning policies res_req._translate_pinning_policies(request_spec.flavor, image) # Add on any request groups that came from outside of the flavor/image, # e.g. from ports or device profiles. res_req._process_requested_resources(request_spec) # Parse the flavor itself, though we'll only use these fields if they # don't conflict with something already provided by the flavor extra # specs. These are all added to the unsuffixed request group. merged_resources = res_req.merged_resources() if (orc.VCPU not in merged_resources and orc.PCPU not in merged_resources): res_req._add_resource(orc.VCPU, request_spec.vcpus) if orc.MEMORY_MB not in merged_resources: res_req._add_resource(orc.MEMORY_MB, request_spec.memory_mb) if orc.DISK_GB not in merged_resources: disk = request_spec.ephemeral_gb disk += compute_utils.convert_mb_to_ceil_gb(request_spec.swap) if 'is_bfv' not in request_spec or not request_spec.is_bfv: disk += request_spec.root_gb if disk: res_req._add_resource(orc.DISK_GB, disk) res_req._translate_mem_encryption_request(request_spec.flavor, image) res_req._translate_vpmems_request(request_spec.flavor) res_req._translate_vtpm_request(request_spec.flavor, image) res_req._translate_pci_numa_affinity_policy(request_spec.flavor, image) res_req._translate_secure_boot_request(request_spec.flavor, image) res_req._translate_maxphysaddr_request(request_spec.flavor, image) res_req._translate_stateless_firmware_request(image) res_req.strip_zeros() return res_req @classmethod def from_request_groups( cls, request_groups: ty.List['objects.RequestGroup'], request_level_params: 'objects.RequestLevelParams', group_policy: str, ) -> 'ResourceRequest': """Create a new instance of ResourceRequest from a list of RequestGroup objects. """ res_req = cls() res_req._root_required = request_level_params.root_required res_req._root_forbidden = request_level_params.root_forbidden res_req._same_subtree = request_level_params.same_subtree res_req.group_policy = group_policy for request_group in request_groups: res_req._add_request_group(request_group) res_req.strip_zeros() return res_req def _process_requested_resources(self, request_spec): requested_resources = (request_spec.requested_resources if 'requested_resources' in request_spec and request_spec.requested_resources else []) for group in requested_resources: self._add_request_group(group) def _process_extra_specs(self, flavor): if 'extra_specs' not in flavor: return for key, val in flavor.extra_specs.items(): if key == 'group_policy': self._add_group_policy(val) continue match = self.XS_KEYPAT.match(key) if not match: continue # 'prefix' is 'resources' or 'trait' # 'suffix' is $S or None # 'name' is either the resource class name or the trait name. prefix, suffix, name = match.groups() # Process "resources[$S]" if prefix == self.XS_RES_PREFIX: self._add_resource(name, val, group=suffix) # Process "trait[$S]" elif prefix == self.XS_TRAIT_PREFIX: self._add_trait(name, val, group=suffix) def _process_image_meta(self, image): if not image or 'properties' not in image: return for trait in image.properties.get('traits_required', []): # required traits from the image are always added to the # unsuffixed request group, granular request groups are not # supported in image traits self._add_trait(trait, 'required') def _translate_secure_boot_request(self, flavor, image): sb_policy = hardware.get_secure_boot_constraint(flavor, image) if sb_policy != obj_fields.SecureBoot.REQUIRED: return trait = os_traits.COMPUTE_SECURITY_UEFI_SECURE_BOOT self._add_trait(trait, 'required') LOG.debug("Requiring secure boot support via trait %s.", trait) def _translate_maxphysaddr_request(self, flavor, image): mode = hardware.get_maxphysaddr_mode(flavor, image) if mode is None: return trait = None if mode == obj_fields.MaxPhyAddrMode.PASSTHROUGH: trait = os_traits.COMPUTE_ADDRESS_SPACE_PASSTHROUGH elif mode == obj_fields.MaxPhyAddrMode.EMULATE: trait = os_traits.COMPUTE_ADDRESS_SPACE_EMULATED if trait: self._add_trait(trait, 'required') LOG.debug("Requiring maxphysaddr support via trait %s.", trait) def _translate_stateless_firmware_request(self, image): if hardware.get_stateless_firmware_constraint(image): self._add_trait(os_traits.COMPUTE_SECURITY_STATELESS_FIRMWARE, 'required') def _translate_vtpm_request(self, flavor, image): vtpm_config = hardware.get_vtpm_constraint(flavor, image) if not vtpm_config: return # Require the appropriate vTPM model support trait on a host. model_trait = os_traits.COMPUTE_SECURITY_TPM_TIS if vtpm_config.model == obj_fields.TPMModel.CRB: model_trait = os_traits.COMPUTE_SECURITY_TPM_CRB # Require the appropriate vTPM version support trait on a host. version_trait = os_traits.COMPUTE_SECURITY_TPM_1_2 if vtpm_config.version == obj_fields.TPMVersion.v2_0: version_trait = os_traits.COMPUTE_SECURITY_TPM_2_0 self._add_trait(model_trait, 'required') self._add_trait(version_trait, 'required') LOG.debug("Requiring emulated TPM support via trait %s and %s.", version_trait, model_trait) def _translate_mem_encryption_request(self, flavor, image): """When the hw:mem_encryption extra spec or the hw_mem_encryption image property are requested, translate into a request for resources:MEM_ENCRYPTION_CONTEXT=1 which requires a slot on a host which can support encryption of the guest memory. Also require the specific trait for the requested memory encryption feature of CPU. """ # NOTE(aspiers): In theory this could raise FlavorImageConflict, # but we already check it in the API layer, so that should never # happen mem_enc_config = hardware.get_mem_encryption_constraint(flavor, image) if not mem_enc_config: # No memory encryption required, so no further action required. return self._add_resource(orc.MEM_ENCRYPTION_CONTEXT, 1) LOG.debug("Added %s=1 to requested resources", orc.MEM_ENCRYPTION_CONTEXT) me_trait = os_traits.HW_CPU_X86_AMD_SEV if mem_enc_config.model == obj_fields.MemEncryptionModel.AMD_SEV_ES: me_trait = os_traits.HW_CPU_X86_AMD_SEV_ES self._add_trait(me_trait, 'required') LOG.debug("Requiring memory encryption model %s via trait %s", mem_enc_config.model, me_trait) def _translate_vpmems_request(self, flavor): """When the hw:pmem extra spec is present, require hosts which can provide enough vpmem resources. """ vpmem_labels = hardware.get_vpmems(flavor) if not vpmem_labels: # No vpmems required return amount_by_rc: ty.DefaultDict[str, int] = collections.defaultdict(int) for vpmem_label in vpmem_labels: resource_class = orc.normalize_name( "PMEM_NAMESPACE_" + vpmem_label) amount_by_rc[resource_class] += 1 for resource_class, amount in amount_by_rc.items(): self._add_resource(resource_class, amount) LOG.debug("Added resource %s=%d to requested resources", resource_class, amount) def _translate_pinning_policies(self, flavor, image): """Translate the legacy pinning policies to resource requests.""" # NOTE(stephenfin): These can raise exceptions but these have already # been validated by 'nova.virt.hardware.numa_get_constraints' in the # API layer (see change I06fad233006c7bab14749a51ffa226c3801f951b). # This call also handles conflicts between explicit VCPU/PCPU # requests and implicit 'hw:cpu_policy'-based requests, mismatches # between the number of CPUs in the flavor and explicit VCPU/PCPU # requests, etc. cpu_policy = hardware.get_cpu_policy_constraint( flavor, image) cpu_thread_policy = hardware.get_cpu_thread_policy_constraint( flavor, image) emul_thread_policy = hardware.get_emulator_thread_policy_constraint( flavor) # We don't need to worry about handling 'SHARED' - that will result in # VCPUs which we include by default if cpu_policy == obj_fields.CPUAllocationPolicy.DEDICATED: # TODO(stephenfin): Remove when we drop support for 'vcpu_pin_set' self.cpu_pinning_requested = True # Switch VCPU -> PCPU pcpus = flavor.vcpus LOG.debug('Translating request for %(vcpu_rc)s=%(pcpus)d to ' '%(vcpu_rc)s=0,%(pcpu_rc)s=%(pcpus)d', {'vcpu_rc': orc.VCPU, 'pcpu_rc': orc.PCPU, 'pcpus': pcpus}) if cpu_policy == obj_fields.CPUAllocationPolicy.MIXED: # Get dedicated CPU list from flavor extra spec. For a mixed # instance a non-empty 'hw:cpu_dedicated_mask' or realtime CPU # mask configuration must exist, which is already ensured in # the API layer. dedicated_cpus = hardware.get_dedicated_cpu_constraint(flavor) realtime_cpus = hardware.get_realtime_cpu_constraint(flavor, image) pcpus = len(dedicated_cpus or realtime_cpus or []) vcpus = flavor.vcpus - pcpus # apply for the VCPU resource of a 'mixed' instance self._add_resource(orc.VCPU, vcpus) if cpu_policy in ( obj_fields.CPUAllocationPolicy.DEDICATED, obj_fields.CPUAllocationPolicy.MIXED, ): if emul_thread_policy == 'isolate': pcpus += 1 LOG.debug('Adding additional %(pcpu_rc)s to account for ' 'emulator threads', {'pcpu_rc': orc.PCPU}) self._add_resource(orc.PCPU, pcpus) trait = { obj_fields.CPUThreadAllocationPolicy.ISOLATE: 'forbidden', obj_fields.CPUThreadAllocationPolicy.REQUIRE: 'required', }.get(cpu_thread_policy) if trait: LOG.debug('Adding %(trait)s=%(value)s trait', {'trait': os_traits.HW_CPU_HYPERTHREADING, 'value': trait}) self._add_trait(os_traits.HW_CPU_HYPERTHREADING, trait) def _translate_pci_numa_affinity_policy(self, flavor, image): policy = hardware.get_pci_numa_policy_constraint(flavor, image) # only the socket policy supports a trait if policy == objects.fields.PCINUMAAffinityPolicy.SOCKET: trait = os_traits.COMPUTE_SOCKET_PCI_NUMA_AFFINITY self._add_trait(trait, 'required') LOG.debug( "Requiring 'socket' PCI NUMA affinity support via trait %s.", trait) @property def group_policy(self): return self._group_policy @group_policy.setter def group_policy(self, value): self._group_policy = value def get_request_group(self, ident): if ident not in self._rg_by_id: rq_grp = objects.RequestGroup( use_same_provider=bool(ident), requester_id=ident) self._rg_by_id[ident] = rq_grp return self._rg_by_id[ident] def _add_request_group(self, request_group): """Inserts the existing group with a unique suffix. The groups coming from the flavor can have arbitrary suffixes; those are guaranteed to be unique within the flavor. A group coming from "outside" (ports, device profiles) must be associated with a requester_id, such as a port UUID. We use this requester_id as the group suffix (but ensure that it is unique in combination with suffixes from the flavor). Groups coming from "outside" are not allowed to be no-ops. That is, they must provide resources and/or required/forbidden traits/aggregates :param request_group: the RequestGroup to be added. :raise: ValueError if request_group has no requester_id, or if it provides no resources or (required/forbidden) traits or aggregates. :raise: RequestGroupSuffixConflict if request_group.requester_id already exists in this ResourceRequest. """ # NOTE(efried): Deliberately check False-ness rather than None-ness # here, since both would result in the unsuffixed request group being # used, and that's bad. if not request_group.requester_id: # NOTE(efried): An "outside" RequestGroup is created by a # programmatic agent and that agent is responsible for guaranteeing # the presence of a unique requester_id. This is in contrast to # flavor extra_specs where a human is responsible for the group # suffix. raise ValueError( _('Missing requester_id in RequestGroup! This is probably a ' 'programmer error. %s') % request_group) if request_group.is_empty(): # NOTE(efried): It is up to the calling code to enforce a nonempty # RequestGroup with suitable logic and exceptions. raise ValueError( _('Refusing to add no-op RequestGroup with requester_id=%s. ' 'This is a probably a programmer error.') % request_group.requester_id) if request_group.requester_id in self._rg_by_id: raise exception.RequestGroupSuffixConflict( suffix=request_group.requester_id) self._rg_by_id[request_group.requester_id] = request_group def _add_resource(self, rclass, amount, group=None): """Add resource request to specified request group. Defaults to the unsuffixed request group if no group is provided. """ self.get_request_group(group).add_resource(rclass, amount) def _add_trait(self, trait_name, trait_type, group=None): """Add trait request to specified group. Defaults to the unsuffixed request group if no group is provided. """ self.get_request_group(group).add_trait(trait_name, trait_type) def _add_group_policy(self, policy): # The only valid values for group_policy are 'none' and 'isolate'. if policy not in ('none', 'isolate'): LOG.warning( "Invalid group_policy '%s'. Valid values are 'none' and " "'isolate'.", policy) return self._group_policy = policy def get_num_of_suffixed_groups(self): return len([ident for ident in self._rg_by_id.keys() if ident is not None]) def merged_resources(self): """Returns a merge of {resource_class: amount} for all resource groups. Amounts of the same resource class from different groups are added together. :return: A dict of the form {resource_class: amount} """ ret: ty.DefaultDict[str, int] = collections.defaultdict(lambda: 0) for rg in self._rg_by_id.values(): for resource_class, amount in rg.resources.items(): ret[resource_class] += amount return dict(ret) def strip_zeros(self): """Remove any resources whose amounts are zero.""" for rg in self._rg_by_id.values(): rg.strip_zeros() # Get rid of any empty RequestGroup instances. for ident, rg in list(self._rg_by_id.items()): if rg.is_empty(): self._rg_by_id.pop(ident) def to_querystring(self): """Produce a querystring of the form expected by GET /allocation_candidates. """ if self._limit is not None: qparams = [('limit', self._limit)] else: qparams = [] if self._group_policy is not None: qparams.append(('group_policy', self._group_policy)) if self._root_required or self._root_forbidden: vals = sorted(self._root_required) + ['!' + t for t in sorted(self._root_forbidden)] qparams.append(('root_required', ','.join(vals))) for group_suffixes in self._same_subtree: qparams.append(('same_subtree', ','.join(sorted(group_suffixes)))) for rg in self._rg_by_id.values(): # [('resources[$S]', 'rclass:amount,rclass:amount,...'), # ('required[$S]', 'trait_name,!trait_name,...'), # ('member_of[$S]', 'in:uuid,uuid,...'), # ('member_of[$S]', 'in:uuid,uuid,...')] qparams.extend(rg.to_queryparams()) return parse.urlencode(sorted(qparams)) @property def all_required_traits(self): traits: ty.Set[str] = set() for rr in self._rg_by_id.values(): traits = traits.union(rr.required_traits) return traits def __str__(self): return ', '.join(sorted( list(str(rg) for rg in list(self._rg_by_id.values())))) def build_request_spec(image, instances, flavor=None): """Build a request_spec (ahem, not a RequestSpec) for the scheduler. The request_spec assumes that all instances to be scheduled are the same type. :param image: optional primitive image meta dict :param instances: list of instances; objects will be converted to primitives :param flavor: optional flavor; objects will be converted to primitives :return: dict with the following keys:: 'image': the image dict passed in or {} 'instance_properties': primitive version of the first instance passed 'instance_type': primitive version of the flavor or None 'num_instances': the number of instances passed in """ instance = instances[0] if flavor is None: if isinstance(instance, obj_instance.Instance): flavor = instance.get_flavor() else: flavor = flavors.extract_flavor(instance) if isinstance(instance, obj_instance.Instance): instance = obj_base.obj_to_primitive(instance) # obj_to_primitive doesn't copy this enough, so be sure # to detach our metadata blob because we modify it below. instance['system_metadata'] = dict(instance.get('system_metadata', {})) if isinstance(flavor, objects.Flavor): flavor = obj_base.obj_to_primitive(flavor) # NOTE(danms): Replicate this old behavior because the # scheduler RPC interface technically expects it to be # there. Remove this when we bump the scheduler RPC API to # v5.0 try: flavors.save_flavor_info( instance.get('system_metadata', {}), flavor) except KeyError: # If the flavor isn't complete (which is legit with a # flavor object, just don't put it in the request spec pass request_spec = { 'image': image or {}, 'instance_properties': instance, 'instance_type': flavor, 'num_instances': len(instances), } # NOTE(mriedem): obj_to_primitive above does not serialize everything # in an object, like datetime fields, so we need to still call to_primitive # to recursively serialize the items in the request_spec dict. return jsonutils.to_primitive(request_spec) def resources_from_flavor(instance, flavor): """Convert a flavor into a set of resources for placement, taking into account boot-from-volume instances. This takes an instance and a flavor and returns a dict of resource_class:amount based on the attributes of the flavor, accounting for any overrides that are made in extra_specs. """ is_bfv = compute_utils.is_volume_backed_instance(instance._context, instance) return _get_resources(flavor, is_bfv) def _get_resources(flavor, is_bfv): # create a fake RequestSpec as a wrapper to the caller req_spec = objects.RequestSpec(flavor=flavor, is_bfv=is_bfv) # TODO(efried): This method is currently only used from places that # assume the compute node is the only resource provider. So for now, we # just merge together all the resources specified in the flavor and pass # them along. This will need to be adjusted when nested and/or shared RPs # are in play. res_req = ResourceRequest.from_request_spec(req_spec) return res_req.merged_resources() def resources_for_limits(flavor, is_bfv): """Work out what unified limits may be exceeded.""" return _get_resources(flavor, is_bfv) def resources_from_request_spec(ctxt, spec_obj, host_manager, enable_pinning_translate=True): """Given a RequestSpec object, returns a ResourceRequest of the resources, traits, and aggregates it represents. :param context: The request context. :param spec_obj: A RequestSpec object. :param host_manager: A HostManager object. :param enable_pinning_translate: True if the CPU policy extra specs should be translated to placement resources and traits. :return: A ResourceRequest object. :raises NoValidHost: If the specified host/node is not found in the DB. """ res_req = ResourceRequest.from_request_spec( spec_obj, enable_pinning_translate) # values to get the destination target compute uuid target_host = None target_node = None target_cell = None if 'requested_destination' in spec_obj: destination = spec_obj.requested_destination if destination: if 'host' in destination: target_host = destination.host if 'node' in destination: target_node = destination.node if 'cell' in destination: target_cell = destination.cell if destination.aggregates: grp = res_req.get_request_group(None) # If the target must be either in aggA *or* in aggB and must # definitely be in aggC, the destination.aggregates would be # ['aggA,aggB', 'aggC'] # Here we are converting it to # [['aggA', 'aggB'], ['aggC']] grp.aggregates = [ored.split(',') for ored in destination.aggregates] if destination.forbidden_aggregates: grp = res_req.get_request_group(None) grp.forbidden_aggregates |= destination.forbidden_aggregates if 'force_hosts' in spec_obj and spec_obj.force_hosts: # Prioritize the value from requested_destination just in case # so that we don't inadvertently overwrite it to the old value # of force_hosts persisted in the DB target_host = target_host or spec_obj.force_hosts[0] if 'force_nodes' in spec_obj and spec_obj.force_nodes: # Prioritize the value from requested_destination just in case # so that we don't inadvertently overwrite it to the old value # of force_nodes persisted in the DB target_node = target_node or spec_obj.force_nodes[0] if target_host or target_node: nodes = host_manager.get_compute_nodes_by_host_or_node( ctxt, target_host, target_node, cell=target_cell) if not nodes: reason = (_('No such host - host: %(host)s node: %(node)s ') % {'host': target_host, 'node': target_node}) raise exception.NoValidHost(reason=reason) if len(nodes) == 1: if 'requested_destination' in spec_obj and destination: # When we only supply hypervisor_hostname in api to create a # server, the destination object will only include the node. # Here when we get one node, we set both host and node to # destination object. So we can reduce the number of HostState # objects to run through the filters. destination.host = nodes[0].host destination.node = nodes[0].hypervisor_hostname grp = res_req.get_request_group(None) grp.in_tree = nodes[0].uuid else: # Multiple nodes are found when a target host is specified # without a specific node. Since placement doesn't support # multiple uuids in the `in_tree` queryparam, what we can do here # is to remove the limit from the `GET /a_c` query to prevent # the found nodes from being filtered out in placement. res_req._limit = None # Don't limit allocation candidates when using affinity/anti-affinity. if ('scheduler_hints' in spec_obj and any( key in ['group', 'same_host', 'different_host'] for key in spec_obj.scheduler_hints)): res_req._limit = None if res_req.get_num_of_suffixed_groups() >= 2 and not res_req.group_policy: LOG.warning( "There is more than one numbered request group in the " "allocation candidate query but the flavor did not specify " "any group policy. This query would fail in placement due to " "the missing group policy. If you specified more than one " "numbered request group in the flavor extra_spec then you need to " "specify the group policy in the flavor extra_spec. If it is OK " "to let these groups be satisfied by overlapping resource " "providers then use 'group_policy': 'none'. If you want each " "group to be satisfied from a separate resource provider then " "use 'group_policy': 'isolate'.") if res_req.suffixed_groups_from_flavor <= 1: LOG.info( "At least one numbered request group is defined outside of " "the flavor (e.g. in a port that has a QoS minimum bandwidth " "policy rule attached) but the flavor did not specify any " "group policy. To avoid the placement failure nova defaults " "the group policy to 'none'.") res_req.group_policy = 'none' return res_req def claim_resources_on_destination( context, reportclient, instance, source_node, dest_node, source_allocations=None, consumer_generation=None): """Copies allocations from source node to dest node in Placement Normally the scheduler will allocate resources on a chosen destination node during a move operation like evacuate and live migration. However, because of the ability to force a host and bypass the scheduler, this method can be used to manually copy allocations from the source node to the forced destination node. This is only appropriate when the instance flavor on the source node is the same on the destination node, i.e. don't use this for resize. :param context: The request context. :param reportclient: An instance of the SchedulerReportClient. :param instance: The instance being moved. :param source_node: source ComputeNode where the instance currently lives :param dest_node: destination ComputeNode where the instance is being moved :param source_allocations: The consumer's current allocations on the source compute :param consumer_generation: The expected generation of the consumer. None if a new consumer is expected :raises NoValidHost: If the allocation claim on the destination node fails. :raises: keystoneauth1.exceptions.base.ClientException on failure to communicate with the placement API :raises: ConsumerAllocationRetrievalFailed if the placement API call fails :raises: AllocationUpdateFailed: If a parallel consumer update changed the consumer """ # Get the current allocations for the source node and the instance. # NOTE(gibi) For the live migrate case, the caller provided the # allocation that needs to be used on the dest_node along with the # expected consumer_generation of the consumer (which is the instance). if not source_allocations: # NOTE(gibi): This is the forced evacuate case where the caller did not # provide any allocation request. So we ask placement here for the # current allocation and consumer generation and use that for the new # allocation on the dest_node. If the allocation fails due to consumer # generation conflict then the claim will raise and the operation will # be aborted. # NOTE(gibi): This only detect a small portion of possible # cases when allocation is modified outside of the this # code path. The rest can only be detected if nova would # cache at least the consumer generation of the instance. allocations = reportclient.get_allocs_for_consumer( context, instance.uuid) source_allocations = allocations.get('allocations', {}) consumer_generation = allocations.get('consumer_generation') if not source_allocations: # This shouldn't happen, so just raise an error since we cannot # proceed. raise exception.ConsumerAllocationRetrievalFailed( consumer_uuid=instance.uuid, error=_( 'Expected to find allocations for source node resource ' 'provider %s. Retry the operation without forcing a ' 'destination host.') % source_node.uuid) # Generate an allocation request for the destination node. # NOTE(gibi): if the source allocation allocates from more than one RP # then we need to fail as the dest allocation might also need to be # complex (e.g. nested) and we cannot calculate that allocation request # properly without a placement allocation candidate call. # Alternatively we could sum up the source allocation and try to # allocate that from the root RP of the dest host. It would only work # if the dest host would not require nested allocation for this server # which is really a rare case. if len(source_allocations) > 1: reason = (_('Unable to move instance %(instance_uuid)s to ' 'host %(host)s. The instance has complex allocations ' 'on the source host so move cannot be forced.') % {'instance_uuid': instance.uuid, 'host': dest_node.host}) raise exception.NoValidHost(reason=reason) alloc_request = { 'allocations': { dest_node.uuid: { 'resources': source_allocations[source_node.uuid]['resources']} }, } # import locally to avoid cyclic import from nova.scheduler.client import report # The claim_resources method will check for existing allocations # for the instance and effectively "double up" the allocations for # both the source and destination node. That's why when requesting # allocations for resources on the destination node before we move, # we use the existing resource allocations from the source node. if reportclient.claim_resources( context, instance.uuid, alloc_request, instance.project_id, instance.user_id, allocation_request_version=report.CONSUMER_GENERATION_VERSION, consumer_generation=consumer_generation): LOG.debug('Instance allocations successfully created on ' 'destination node %(dest)s: %(alloc_request)s', {'dest': dest_node.uuid, 'alloc_request': alloc_request}, instance=instance) else: # We have to fail even though the user requested that we force # the host. This is because we need Placement to have an # accurate reflection of what's allocated on all nodes so the # scheduler can make accurate decisions about which nodes have # capacity for building an instance. reason = (_('Unable to move instance %(instance_uuid)s to ' 'host %(host)s. There is not enough capacity on ' 'the host for the instance.') % {'instance_uuid': instance.uuid, 'host': dest_node.host}) raise exception.NoValidHost(reason=reason) def set_vm_state_and_notify(context, instance_uuid, service, method, updates, ex, request_spec): """Updates the instance, sets the fault and sends an error notification. :param context: The request context. :param instance_uuid: The UUID of the instance to update. :param service: The name of the originating service, e.g. 'compute_task'. This becomes part of the publisher_id for the notification payload. :param method: The method that failed, e.g. 'migrate_server'. :param updates: dict of updates for the instance object, typically a vm_state and task_state value. :param ex: An exception which occurred during the given method. :param request_spec: Optional request spec. """ # e.g. "Failed to compute_task_migrate_server: No valid host was found" LOG.warning("Failed to %(service)s_%(method)s: %(ex)s", {'service': service, 'method': method, 'ex': ex}) # Convert the request spec to a dict if needed. if request_spec is not None: if isinstance(request_spec, objects.RequestSpec): request_spec = request_spec.to_legacy_request_spec_dict() else: request_spec = {} # TODO(mriedem): We should make vm_state optional since not all callers # of this method want to change the vm_state, e.g. the Exception block # in ComputeTaskManager._cold_migrate. vm_state = updates['vm_state'] properties = request_spec.get('instance_properties', {}) notifier = rpc.get_notifier(service) state = vm_state.upper() LOG.warning('Setting instance to %s state.', state, instance_uuid=instance_uuid) instance = objects.Instance(context=context, uuid=instance_uuid, **updates) instance.obj_reset_changes(['uuid']) instance.save() compute_utils.add_instance_fault_from_exc( context, instance, ex, sys.exc_info()) payload = dict(request_spec=request_spec, instance_properties=properties, instance_id=instance_uuid, state=vm_state, method=method, reason=ex) event_type = '%s.%s' % (service, method) notifier.error(context, event_type, payload) compute_utils.notify_about_compute_task_error( context, method, instance_uuid, request_spec, vm_state, ex) def build_filter_properties( scheduler_hints, forced_host, forced_node, flavor, ): """Build the filter_properties dict from data in the boot request.""" filter_properties = dict(scheduler_hints=scheduler_hints) filter_properties['instance_type'] = flavor # TODO(alaski): It doesn't seem necessary that these are conditionally # added. Let's just add empty lists if not forced_host/node. if forced_host: filter_properties['force_hosts'] = [forced_host] if forced_node: filter_properties['force_nodes'] = [forced_node] return filter_properties def populate_filter_properties(filter_properties, selection): """Add additional information to the filter properties after a node has been selected by the scheduling process. :param filter_properties: dict of filter properties (the legacy form of the RequestSpec) :param selection: Selection object """ host = selection.service_host nodename = selection.nodename # Need to convert SchedulerLimits object to older dict format. if "limits" in selection and selection.limits is not None: limits = selection.limits.to_dict() else: limits = {} # Adds a retry entry for the selected compute host and node: _add_retry_host(filter_properties, host, nodename) # Adds oversubscription policy if not filter_properties.get('force_hosts'): filter_properties['limits'] = limits def populate_retry(filter_properties, instance_uuid): max_attempts = CONF.scheduler.max_attempts force_hosts = filter_properties.get('force_hosts', []) force_nodes = filter_properties.get('force_nodes', []) # In the case of multiple force hosts/nodes, scheduler should not # disable retry filter but traverse all force hosts/nodes one by # one till scheduler gets a valid target host. if (max_attempts == 1 or len(force_hosts) == 1 or len(force_nodes) == 1): # re-scheduling is disabled, log why if max_attempts == 1: LOG.debug('Re-scheduling is disabled due to "max_attempts" config') else: LOG.debug("Re-scheduling is disabled due to forcing a host (%s) " "and/or node (%s)", force_hosts, force_nodes) return # retry is enabled, update attempt count: retry = filter_properties.setdefault( 'retry', { 'num_attempts': 0, 'hosts': [] # list of compute hosts tried }) retry['num_attempts'] += 1 _log_compute_error(instance_uuid, retry) exc_reason = retry.pop('exc_reason', None) if retry['num_attempts'] > max_attempts: msg = (_('Exceeded max scheduling attempts %(max_attempts)d ' 'for instance %(instance_uuid)s. ' 'Last exception: %(exc_reason)s') % {'max_attempts': max_attempts, 'instance_uuid': instance_uuid, 'exc_reason': exc_reason}) raise exception.MaxRetriesExceeded(reason=msg) def _log_compute_error(instance_uuid, retry): """If the request contained an exception from a previous compute build/resize operation, log it to aid debugging """ exc = retry.get('exc') # string-ified exception from compute if not exc: return # no exception info from a previous attempt, skip hosts = retry.get('hosts', None) if not hosts: return # no previously attempted hosts, skip last_host, last_node = hosts[-1] LOG.error( 'Error from last host: %(last_host)s (node %(last_node)s): %(exc)s', {'last_host': last_host, 'last_node': last_node, 'exc': exc}, instance_uuid=instance_uuid) def _add_retry_host(filter_properties, host, node): """Add a retry entry for the selected compute node. In the event that the request gets re-scheduled, this entry will signal that the given node has already been tried. """ retry = filter_properties.get('retry', None) if not retry: return hosts = retry['hosts'] hosts.append([host, node]) def parse_options(opts, sep='=', converter=str, name=""): """Parse a list of options, each in the format of . Also use the converter to convert the value into desired type. :params opts: list of options, e.g. from oslo_config.cfg.ListOpt :params sep: the separator :params converter: callable object to convert the value, should raise ValueError for conversion failure :params name: name of the option :returns: a lists of tuple of values (key, converted_value) """ good = [] bad = [] for opt in opts: try: key, seen_sep, value = opt.partition(sep) value = converter(value) except ValueError: key = None value = None if key and seen_sep and value is not None: good.append((key, value)) else: bad.append(opt) if bad: LOG.warning("Ignoring the invalid elements of the option " "%(name)s: %(options)s", {'name': name, 'options': ", ".join(bad)}) return good def validate_filter(filter): """Validates that the filter is configured in the default filters.""" return filter in CONF.filter_scheduler.enabled_filters def validate_weigher(weigher): """Validates that the weigher is configured in the default weighers.""" weight_classes = CONF.filter_scheduler.weight_classes if 'nova.scheduler.weights.all_weighers' in weight_classes: return True return weigher in weight_classes _SUPPORTS_AFFINITY = None _SUPPORTS_ANTI_AFFINITY = None _SUPPORTS_SOFT_AFFINITY = None _SUPPORTS_SOFT_ANTI_AFFINITY = None def reset_globals(): global _SUPPORTS_AFFINITY _SUPPORTS_AFFINITY = None global _SUPPORTS_ANTI_AFFINITY _SUPPORTS_ANTI_AFFINITY = None global _SUPPORTS_SOFT_AFFINITY _SUPPORTS_SOFT_AFFINITY = None global _SUPPORTS_SOFT_ANTI_AFFINITY _SUPPORTS_SOFT_ANTI_AFFINITY = None def _get_group_details(context, instance_uuid, user_group_hosts=None): """Provide group_hosts and group_policies sets related to instances if those instances are belonging to a group and if corresponding filters are enabled. :param instance_uuid: UUID of the instance to check :param user_group_hosts: Hosts from the group or empty set :returns: None or namedtuple GroupDetails """ global _SUPPORTS_AFFINITY if _SUPPORTS_AFFINITY is None: _SUPPORTS_AFFINITY = validate_filter( 'ServerGroupAffinityFilter') global _SUPPORTS_ANTI_AFFINITY if _SUPPORTS_ANTI_AFFINITY is None: _SUPPORTS_ANTI_AFFINITY = validate_filter( 'ServerGroupAntiAffinityFilter') global _SUPPORTS_SOFT_AFFINITY if _SUPPORTS_SOFT_AFFINITY is None: _SUPPORTS_SOFT_AFFINITY = validate_weigher( 'nova.scheduler.weights.affinity.ServerGroupSoftAffinityWeigher') global _SUPPORTS_SOFT_ANTI_AFFINITY if _SUPPORTS_SOFT_ANTI_AFFINITY is None: _SUPPORTS_SOFT_ANTI_AFFINITY = validate_weigher( 'nova.scheduler.weights.affinity.' 'ServerGroupSoftAntiAffinityWeigher') if not instance_uuid: return try: group = objects.InstanceGroup.get_by_instance_uuid(context, instance_uuid) except exception.InstanceGroupNotFound: return policies = set(('anti-affinity', 'affinity', 'soft-affinity', 'soft-anti-affinity')) if group.policy in policies: if not _SUPPORTS_AFFINITY and 'affinity' == group.policy: msg = _("ServerGroupAffinityFilter not configured") LOG.error(msg) raise exception.UnsupportedPolicyException(reason=msg) if not _SUPPORTS_ANTI_AFFINITY and 'anti-affinity' == group.policy: msg = _("ServerGroupAntiAffinityFilter not configured") LOG.error(msg) raise exception.UnsupportedPolicyException(reason=msg) if (not _SUPPORTS_SOFT_AFFINITY and 'soft-affinity' == group.policy): msg = _("ServerGroupSoftAffinityWeigher not configured") LOG.error(msg) raise exception.UnsupportedPolicyException(reason=msg) if (not _SUPPORTS_SOFT_ANTI_AFFINITY and 'soft-anti-affinity' == group.policy): msg = _("ServerGroupSoftAntiAffinityWeigher not configured") LOG.error(msg) raise exception.UnsupportedPolicyException(reason=msg) group_hosts = set(group.get_hosts()) user_hosts = set(user_group_hosts) if user_group_hosts else set() return GroupDetails(hosts=user_hosts | group_hosts, policy=group.policy, members=group.members) def _get_instance_group_hosts_all_cells(context, instance_group): def get_hosts_in_cell(cell_context): # NOTE(melwitt): The obj_alternate_context is going to mutate the # cell_instance_group._context and to do this in a scatter-gather # with multiple parallel greenthreads, we need the instance groups # to be separate object copies. cell_instance_group = instance_group.obj_clone() with cell_instance_group.obj_alternate_context(cell_context): return cell_instance_group.get_hosts() results = nova_context.scatter_gather_skip_cell0(context, get_hosts_in_cell) hosts = [] for result in results.values(): # TODO(melwitt): We will need to handle scenarios where an exception # is raised while targeting a cell and when a cell does not respond # as part of the "handling of a down cell" spec: # https://blueprints.launchpad.net/nova/+spec/handling-down-cell if not nova_context.is_cell_failure_sentinel(result): hosts.extend(result) return hosts def setup_instance_group(context, request_spec): """Add group_hosts and group_policies fields to filter_properties dict based on instance uuids provided in request_spec, if those instances are belonging to a group. :param request_spec: Request spec """ # NOTE(melwitt): Proactively query for the instance group hosts instead of # relying on a lazy-load via the 'hosts' field of the InstanceGroup object. if (request_spec.instance_group and 'hosts' not in request_spec.instance_group): group = request_spec.instance_group # If the context is already targeted to a cell (during a move # operation), we don't need to scatter-gather. We do need to use # obj_alternate_context here because the RequestSpec is queried at the # start of a move operation in compute/api, before the context has been # targeted. # NOTE(mriedem): If doing a cross-cell move and the group policy # is anti-affinity, this could be wrong since there could be # instances in the group on other hosts in other cells. However, # ServerGroupAntiAffinityFilter does not look at group.hosts. if context.db_connection: with group.obj_alternate_context(context): group.hosts = group.get_hosts() else: group.hosts = _get_instance_group_hosts_all_cells(context, group) if request_spec.instance_group and request_spec.instance_group.hosts: group_hosts = request_spec.instance_group.hosts else: group_hosts = None instance_uuid = request_spec.instance_uuid # This queries the group details for the group where the instance is a # member. The group_hosts passed in are the hosts that contain members of # the requested instance group. group_info = _get_group_details(context, instance_uuid, group_hosts) if group_info is not None: request_spec.instance_group.hosts = list(group_info.hosts) request_spec.instance_group.policy = group_info.policy request_spec.instance_group.members = group_info.members def request_is_rebuild(spec_obj): """Returns True if request is for a rebuild. :param spec_obj: An objects.RequestSpec to examine (or None). """ if not spec_obj: return False if 'scheduler_hints' not in spec_obj: return False check_type = spec_obj.scheduler_hints.get('_nova_check_type') return check_type == ['rebuild'] def claim_resources(ctx, client, spec_obj, instance_uuid, alloc_req, allocation_request_version=None): """Given an instance UUID (representing the consumer of resources) and the allocation_request JSON object returned from Placement, attempt to claim resources for the instance in the placement API. Returns True if the claim process was successful, False otherwise. :param ctx: The RequestContext object :param client: The scheduler client to use for making the claim call :param spec_obj: The RequestSpec object - needed to get the project_id :param instance_uuid: The UUID of the consuming instance :param alloc_req: The allocation_request received from placement for the resources we want to claim against the chosen host. The allocation_request satisfies the original request for resources and can be supplied as-is (along with the project and user ID to the placement API's PUT /allocations/{consumer_uuid} call to claim resources for the instance :param allocation_request_version: The microversion used to request the allocations. """ if request_is_rebuild(spec_obj): # NOTE(danms): This is a rebuild-only scheduling request, so we should # not be doing any extra claiming LOG.debug('Not claiming resources in the placement API for ' 'rebuild-only scheduling of instance %(uuid)s', {'uuid': instance_uuid}) return True LOG.debug("Attempting to claim resources in the placement API for " "instance %s", instance_uuid) project_id = spec_obj.project_id # We didn't start storing the user_id in the RequestSpec until Rocky so # if it's not set on an old RequestSpec, use the user_id from the context. if 'user_id' in spec_obj and spec_obj.user_id: user_id = spec_obj.user_id else: # FIXME(mriedem): This would actually break accounting if we relied on # the allocations for something like counting quota usage because in # the case of migrating or evacuating an instance, the user here is # likely the admin, not the owner of the instance, so the allocation # would be tracked against the wrong user. user_id = ctx.user_id # NOTE(gibi): this could raise AllocationUpdateFailed which means there is # a serious issue with the instance_uuid as a consumer. Every caller of # utils.claim_resources() assumes that instance_uuid will be a new consumer # and therefore we passing None as expected consumer_generation to # reportclient.claim_resources() here. If the claim fails # due to consumer generation conflict, which in this case means the # consumer is not new, then we let the AllocationUpdateFailed propagate and # fail the build / migrate as the instance is in inconsistent state. return client.claim_resources(ctx, instance_uuid, alloc_req, project_id, user_id, allocation_request_version=allocation_request_version, consumer_generation=None) def get_weight_multiplier(host_state, multiplier_name, multiplier_config): """Given a HostState object, multplier_type name and multiplier_config, returns the weight multiplier. It reads the "multiplier_name" from "aggregate metadata" in host_state to override the multiplier_config. If the aggregate metadata doesn't contain the multiplier_name, the multiplier_config will be returned directly. :param host_state: The HostState object, which contains aggregate metadata :param multiplier_name: The weight multiplier name, like "cpu_weight_multiplier". :param multiplier_config: The weight multiplier configuration value """ aggregate_vals = filters_utils.aggregate_values_from_key(host_state, multiplier_name) try: value = filters_utils.validate_num_values( aggregate_vals, multiplier_config, cast_to=float) except ValueError as e: LOG.warning("Could not decode '%(name)s' weight multiplier: %(exce)s", {'exce': e, 'name': multiplier_name}) value = multiplier_config return value def fill_provider_mapping(request_spec, host_selection): """Fills out the request group - resource provider mapping in the request spec. :param request_spec: The RequestSpec object associated with the operation :param host_selection: The Selection object returned by the scheduler for this operation """ # Exit early if this request spec does not require mappings. if not request_spec.maps_requested_resources: return # Technically out-of-tree scheduler drivers can still not create # allocations in placement but if request_spec.maps_requested_resources # is not empty and the scheduling succeeded then placement has to be # involved mappings = jsonutils.loads(host_selection.allocation_request)['mappings'] for request_group in request_spec.requested_resources: # NOTE(efried): We can count on request_group.requester_id being set: # - For groups from flavors, ResourceRequest.get_request_group sets it # to the group suffix. # - For groups from other sources (e.g. ports, accelerators), it is # required to be set by ResourceRequest._add_request_group, and that # method uses it as the suffix. # And we can count on mappings[requester_id] existing because each # RequestGroup translated into a (replete - empties are disallowed by # ResourceRequest._add_request_group) group fed to Placement. request_group.provider_uuids = mappings[request_group.requester_id] def fill_provider_mapping_based_on_allocation( context, report_client, request_spec, allocation): """Fills out the request group - resource provider mapping in the request spec based on the current allocation of the instance. The fill_provider_mapping() variant is expected to be called in every scenario when a Selection object is available from the scheduler. However in case of revert operations such Selection does not exists. In this case the mapping is calculated based on the allocation of the source host the move operation is reverting to. This is a workaround as placement does not return which RP fulfills which granular request group except in the allocation candidate request (because request groups are ephemeral, only existing in the scope of that request). .. todo:: Figure out a better way to preserve the mappings so we can get rid of this workaround. :param context: The security context :param report_client: SchedulerReportClient instance to be used to communicate with placement :param request_spec: The RequestSpec object associated with the operation :param allocation: allocation dict of the instance, keyed by RP UUID. """ # Exit early if this request spec does not require mappings. if not request_spec.maps_requested_resources: return # NOTE(gibi): Getting traits from placement for each instance in a # instance multi-create scenario is unnecessarily expensive. But # instance multi-create cannot be used with pre-created neutron ports # and this code can only be triggered with such pre-created ports so # instance multi-create is not an issue. If this ever become an issue # in the future then we could stash the RP->traits mapping on the # Selection object since we can pull the traits for each provider from # the GET /allocation_candidates response in the scheduler (or leverage # the change from the spec mentioned in the docstring above). provider_traits = { rp_uuid: report_client.get_provider_traits( context, rp_uuid).traits for rp_uuid in allocation} # NOTE(gibi): The allocation dict is in the format of the PUT /allocations # and that format can change. The current format can be detected from # allocation_request_version key of the Selection object. request_spec.map_requested_resources_to_providers( allocation, provider_traits) # FIXME(sbauza) : Move this method closer to the prefilter once split. def get_aggregates_for_routed_network( context, network_api, report_client, network_uuid): """Collects the aggregate UUIDs describing the segmentation of a routed network from Nova perspective. A routed network consists of multiple network segments. Each segment is available on a given set of compute hosts. Such segmentation is modelled as host aggregates from Nova perspective. :param context: The security context :param network_api: nova.network.neutron.API instance to be used to communicate with Neutron :param report_client: SchedulerReportClient instance to be used to communicate with Placement :param network_uuid: The UUID of the Neutron network to be translated to aggregates :returns: A list of aggregate UUIDs :raises InvalidRoutedNetworkConfiguration: if something goes wrong when try to find related aggregates """ aggregates = [] segment_ids = network_api.get_segment_ids_for_network( context, network_uuid) # Each segment is a resource provider in placement and is in an # aggregate for the routed network, so we have to get the # aggregates for each segment provider - and those aggregates are # mirrored as nova host aggregates. # NOTE(sbauza): In case of a network with non-configured routed segments, # we will get an empty list of segment UUIDs, so we won't enter the loop. for segment_id in segment_ids: # TODO(sbauza): Don't use a private method. agg_info = report_client._get_provider_aggregates(context, segment_id) # @safe_connect can return None but we also want to hard-stop here if # we can't find the aggregate that Neutron created for the segment. if agg_info is None or not agg_info.aggregates: raise exception.InvalidRoutedNetworkConfiguration( 'Failed to find aggregate related to segment %s' % segment_id) aggregates.extend(agg_info.aggregates) return aggregates # FIXME(sbauza) : Move this method closer to the prefilter once split. def get_aggregates_for_routed_subnet( context, network_api, report_client, subnet_id): """Collects the aggregate UUIDs matching the segment that relates to a particular subnet from a routed network. A routed network consists of multiple network segments. Each segment is available on a given set of compute hosts. Such segmentation is modelled as host aggregates from Nova perspective. :param context: The security context :param network_api: nova.network.neutron.API instance to be used to communicate with Neutron :param report_client: SchedulerReportClient instance to be used to communicate with Placement :param subnet_id: The UUID of the Neutron subnet to be translated to aggregate :returns: A list of aggregate UUIDs :raises InvalidRoutedNetworkConfiguration: if something goes wrong when try to find related aggregates """ segment_id = network_api.get_segment_id_for_subnet( context, subnet_id) if segment_id: # TODO(sbauza): Don't use a private method. agg_info = report_client._get_provider_aggregates(context, segment_id) # @safe_connect can return None but we also want to hard-stop here if # we can't find the aggregate that Neutron created for the segment. if agg_info is None or not agg_info.aggregates: raise exception.InvalidRoutedNetworkConfiguration( 'Failed to find aggregate related to segment %s' % segment_id) return agg_info.aggregates return [] ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.417609 nova-32.0.0/nova/scheduler/weights/0000775000175000017500000000000000000000000017155 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/scheduler/weights/__init__.py0000664000175000017500000000255200000000000021272 0ustar00zuulzuul00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Scheduler host weights """ from nova import weights class WeighedHost(weights.WeighedObject): def to_dict(self): x = dict(weight=self.weight) x['host'] = self.obj.host return x def __repr__(self): return "WeighedHost [host: %r, weight: %s]" % ( self.obj, self.weight) class BaseHostWeigher(weights.BaseWeigher): """Base class for host weights.""" pass class HostWeightHandler(weights.BaseWeightHandler): object_class = WeighedHost def __init__(self): super(HostWeightHandler, self).__init__(BaseHostWeigher) def all_weighers(): """Return a list of weight plugin classes found in this directory.""" return HostWeightHandler().get_all_classes() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/scheduler/weights/affinity.py0000664000175000017500000000505200000000000021342 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Ericsson AB # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Affinity Weighers. Weigh hosts by the number of instances from a given host. AffinityWeigher implements the soft-affinity policy for server groups by preferring the hosts that has more instances from the given group. AntiAffinityWeigher implements the soft-anti-affinity policy for server groups by preferring the hosts that has less instances from the given group. """ from oslo_config import cfg from oslo_log import log as logging from nova.scheduler import utils from nova.scheduler import weights CONF = cfg.CONF LOG = logging.getLogger(__name__) class _SoftAffinityWeigherBase(weights.BaseHostWeigher): policy_name = None def _weigh_object(self, host_state, request_spec): """Higher weights win.""" if not request_spec.instance_group: return 0 policy = request_spec.instance_group.policy if self.policy_name != policy: return 0 instances = set(host_state.instances.keys()) members = set(request_spec.instance_group.members) member_on_host = instances.intersection(members) return len(member_on_host) class ServerGroupSoftAffinityWeigher(_SoftAffinityWeigherBase): policy_name = 'soft-affinity' def weight_multiplier(self, host_state): return utils.get_weight_multiplier( host_state, 'soft_affinity_weight_multiplier', CONF.filter_scheduler.soft_affinity_weight_multiplier) class ServerGroupSoftAntiAffinityWeigher(_SoftAffinityWeigherBase): policy_name = 'soft-anti-affinity' def weight_multiplier(self, host_state): return utils.get_weight_multiplier( host_state, 'soft_anti_affinity_weight_multiplier', CONF.filter_scheduler.soft_anti_affinity_weight_multiplier) def _weigh_object(self, host_state, request_spec): weight = super(ServerGroupSoftAntiAffinityWeigher, self)._weigh_object( host_state, request_spec) return -1 * weight ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/scheduler/weights/compute.py0000664000175000017500000000250300000000000021203 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ BuildFailure Weigher. Weigh hosts by the number of recent failed boot attempts. """ import nova.conf from nova.scheduler import utils from nova.scheduler import weights CONF = nova.conf.CONF class BuildFailureWeigher(weights.BaseHostWeigher): def weight_multiplier(self, host_state): """Override the weight multiplier. Note this is negated.""" return -1 * utils.get_weight_multiplier( host_state, 'build_failure_weight_multiplier', CONF.filter_scheduler.build_failure_weight_multiplier) def _weigh_object(self, host_state, weight_properties): """Higher weights win. Our multiplier is negative, so reduce our weight by number of failed builds. """ return host_state.failed_builds ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/scheduler/weights/cpu.py0000664000175000017500000000314600000000000020322 0ustar00zuulzuul00000000000000# Copyright (c) 2016, Red Hat Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ CPU Weigher. Weigh hosts by their CPU usage. The default is to spread instances across all hosts evenly. If you prefer stacking, you can set the 'cpu_weight_multiplier' option (by configuration or aggregate metadata) to a negative number and the weighing has the opposite effect of the default. """ import nova.conf from nova.scheduler import utils from nova.scheduler import weights CONF = nova.conf.CONF class CPUWeigher(weights.BaseHostWeigher): minval = 0 def weight_multiplier(self, host_state): """Override the weight multiplier.""" return utils.get_weight_multiplier( host_state, 'cpu_weight_multiplier', CONF.filter_scheduler.cpu_weight_multiplier) def _weigh_object(self, host_state, weight_properties): """Higher weights win. We want spreading to be the default.""" vcpus_free = ( host_state.vcpus_total * host_state.cpu_allocation_ratio - host_state.vcpus_used) return vcpus_free ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/scheduler/weights/cross_cell.py0000664000175000017500000000556100000000000021666 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Cross-cell move weigher. Weighs hosts based on which cell they are in. "Local" cells are preferred when moving an instance. In other words, select a host from the source cell all other things being equal. """ from nova import conf from nova.scheduler import utils from nova.scheduler import weights CONF = conf.CONF class CrossCellWeigher(weights.BaseHostWeigher): def weight_multiplier(self, host_state): """How weighted this weigher should be.""" return utils.get_weight_multiplier( host_state, 'cross_cell_move_weight_multiplier', CONF.filter_scheduler.cross_cell_move_weight_multiplier) def _weigh_object(self, host_state, weight_properties): """Higher weights win. Hosts within the "preferred" cell are weighed higher than hosts in other cells. :param host_state: nova.scheduler.host_manager.HostState object representing a ComputeNode in a cell :param weight_properties: nova.objects.RequestSpec - this is inspected to see if there is a preferred cell via the requested_destination field and if so, is the request spec allowing cross-cell move :returns: 1 if cross-cell move and host_state is within the preferred cell, -1 if cross-cell move and host_state is *not* within the preferred cell, 0 for all other cases """ # RequestSpec.requested_destination.cell should only be set for # move operations. The allow_cross_cell_move value will only be True if # policy allows. if ('requested_destination' in weight_properties and weight_properties.requested_destination and 'cell' in weight_properties.requested_destination and weight_properties.requested_destination.cell and weight_properties.requested_destination.allow_cross_cell_move): # Determine if the given host is in the "preferred" cell from # the request spec. If it is, weigh it higher. if (host_state.cell_uuid == weight_properties.requested_destination.cell.uuid): return 1 # The host is in another cell, so weigh it lower. return -1 # We don't know or don't care what cell we're going to be in, so noop. return 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/scheduler/weights/disk.py0000664000175000017500000000277600000000000020475 0ustar00zuulzuul00000000000000# Copyright (c) 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Disk Weigher. Weigh hosts by their disk usage. The default is to spread instances across all hosts evenly. If you prefer stacking, you can set the 'disk_weight_multiplier' option (by configuration or aggregate metadata) to a negative number and the weighing has the opposite effect of the default. """ import nova.conf from nova.scheduler import utils from nova.scheduler import weights CONF = nova.conf.CONF class DiskWeigher(weights.BaseHostWeigher): minval = 0 def weight_multiplier(self, host_state): """Override the weight multiplier.""" return utils.get_weight_multiplier( host_state, 'disk_weight_multiplier', CONF.filter_scheduler.disk_weight_multiplier) def _weigh_object(self, host_state, weight_properties): """Higher weights win. We want spreading to be the default.""" return host_state.free_disk_mb ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/scheduler/weights/hypervisor_version.py0000664000175000017500000000275200000000000023514 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Hypervisor Version Weigher. Weigh hosts by their relative hypervisor version. The default is to select newer hosts. If you prefer to invert the behavior set the 'hypervisor_version_weight_multiplier' option to a negative number and the weighing has the opposite effect of the default. """ import nova.conf from nova.scheduler import utils from nova.scheduler import weights CONF = nova.conf.CONF class HypervisorVersionWeigher(weights.BaseHostWeigher): def weight_multiplier(self, host_state): """Override the weight multiplier.""" return utils.get_weight_multiplier( host_state, 'hypervisor_version_weight_multiplier', CONF.filter_scheduler.hypervisor_version_weight_multiplier) def _weigh_object(self, host_state, weight_properties): """Higher weights win. We want newer hosts by default.""" # convert None to 0 return host_state.hypervisor_version or 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315633.0 nova-32.0.0/nova/scheduler/weights/image_props.py0000664000175000017500000001011700000000000022034 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Image Properties Weigher. Weigh hosts by the image metadata properties related to the existing instances. A positive value will favor hosts with the same image properties (packing strategy) while a negative value will follow a spread strategy that will favor hosts not already having instances with those image properties. The default value of the multiplier is 0, which disables the weigher. """ import nova.conf from nova import exception from nova import objects from nova.scheduler import utils from nova.scheduler import weights CONF = nova.conf.CONF class ImagePropertiesWeigher(weights.BaseHostWeigher): def __init__(self): self._parse_setting() def _parse_setting(self): self.setting = dict(utils.parse_options( CONF.filter_scheduler.image_props_weight_setting, sep='=', converter=float, name="filter_scheduler.image_props_weight_setting")) def weight_multiplier(self, host_state): """Override the weight multiplier.""" return utils.get_weight_multiplier( host_state, 'image_props_weight_multiplier', CONF.filter_scheduler.image_props_weight_multiplier) def _weigh_object(self, host_state, request_spec): """Higher weights win. We want to choose hosts with the more common existing image properties that are used by instances by default. If you want to spread instances with the same properties between hosts, change the multiplier value to a negative number. """ weight = 0.0 # Disable this weigher if we don't use it as it's a bit costly. if CONF.filter_scheduler.image_props_weight_multiplier == 0.0: return weight # request_spec is a RequestSpec object which can have its image # field set to None if request_spec.image: # List values aren't hashable so we need to stringify them. requested_props = {(key, f"{value}") for key, value in request_spec.image.properties.to_dict().items()} else: requested_props = set() existing_props = [] insts = objects.InstanceList(objects=host_state.instances.values()) # system_metadata isn't loaded yet, let's do this. insts.fill_metadata() for inst in insts: try: props = {(key, str(value)) for key, value in inst.image_meta.properties.to_dict().items() } if inst.image_meta else set() except exception.InstanceNotFound: # the host state can be a bit stale as the instance could no # longer exist on the host if the instance deletion arrives # before the scheduler gets the RPC message of the deletion props = set() # We want to unpack the set of tuples as items to the total list # of properties we need to compare. existing_props.extend(tup for tup in props) common_props = requested_props & set(existing_props) for (prop, value) in common_props: if self.setting: # Calculate the weigh for each property by what was set # If it wasn't defined, then don't weigh this property. weight += self.setting.get( prop, 0.0) * existing_props.count((prop, value)) else: # By default, all properties are weighed evenly. weight += existing_props.count((prop, value)) return weight ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/scheduler/weights/io_ops.py0000664000175000017500000000311100000000000021013 0ustar00zuulzuul00000000000000# Copyright (c) 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Io Ops Weigher. Weigh hosts by their io ops number. The default is to preferably choose light workload compute hosts. If you prefer choosing heavy workload compute hosts, you can set 'io_ops_weight_multiplier' option (by configuration or aggregate metadata) to a positive number and the weighing has the opposite effect of the default. """ import nova.conf from nova.scheduler import utils from nova.scheduler import weights CONF = nova.conf.CONF class IoOpsWeigher(weights.BaseHostWeigher): minval = 0 def weight_multiplier(self, host_state): """Override the weight multiplier.""" return utils.get_weight_multiplier( host_state, 'io_ops_weight_multiplier', CONF.filter_scheduler.io_ops_weight_multiplier) def _weigh_object(self, host_state, weight_properties): """Higher weights win. We want to choose light workload host to be the default. """ return host_state.num_io_ops ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/scheduler/weights/metrics.py0000664000175000017500000000553600000000000021206 0ustar00zuulzuul00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Metrics Weigher. Weigh hosts by their metrics. This weigher can compute the weight based on the compute node host's various metrics. The to-be weighed metrics and their weighing ratio are specified in the configuration file as follows: [metrics] weight_setting = name1=1.0, name2=-1.0 The final weight would be name1.value * 1.0 + name2.value * -1.0. """ import nova.conf from nova import exception from nova.scheduler import utils from nova.scheduler import weights CONF = nova.conf.CONF class MetricsWeigher(weights.BaseHostWeigher): def __init__(self): self._parse_setting() def _parse_setting(self): self.setting = utils.parse_options(CONF.metrics.weight_setting, sep='=', converter=float, name="metrics.weight_setting") def weight_multiplier(self, host_state): """Override the weight multiplier.""" return utils.get_weight_multiplier( host_state, 'metrics_weight_multiplier', CONF.metrics.weight_multiplier) def _weigh_object(self, host_state, weight_properties): value = 0.0 # NOTE(sbauza): Keying a dict of Metrics per metric name given that we # have a MonitorMetricList object metrics_dict = {m.name: m for m in host_state.metrics or []} for (name, ratio) in self.setting: try: value += metrics_dict[name].value * ratio except KeyError: if CONF.metrics.required: raise exception.ComputeHostMetricNotFound( host=host_state.host, node=host_state.nodename, name=name) else: # We treat the unavailable metric as the most negative # factor, i.e. set the value to make this obj would be # at the end of the ordered weighed obj list # Do nothing if ratio or weight_multiplier is 0. if ratio * self.weight_multiplier(host_state) != 0: return CONF.metrics.weight_of_unavailable return value ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/scheduler/weights/num_instances.py0000664000175000017500000000304700000000000022401 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Num instances Weigher. Weigh hosts by their number of instances. The default is to select hosts with less instances for a spreading strategy. If you prefer to invert this behavior set the 'num_instances_weight_multiplier' option to a positive number and the weighing has the opposite effect of the default. """ import nova.conf from nova.scheduler import utils from nova.scheduler import weights CONF = nova.conf.CONF class NumInstancesWeigher(weights.BaseHostWeigher): def weight_multiplier(self, host_state): """Override the weight multiplier.""" return utils.get_weight_multiplier( host_state, 'num_instances_weight_multiplier', CONF.filter_scheduler.num_instances_weight_multiplier) def _weigh_object(self, host_state, weight_properties): """Higher weights win. We want to choose hosts with fewer instances as the default, hence the negative value of the multiplier. """ return host_state.num_instances ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/scheduler/weights/pci.py0000664000175000017500000000561100000000000020305 0ustar00zuulzuul00000000000000# Copyright (c) 2016, Red Hat Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ PCI Affinity Weigher. Weigh hosts by their PCI availability. Prefer hosts with PCI devices for instances with PCI requirements and vice versa. Configure the importance of this affinitization using the 'pci_weight_multiplier' option (by configuration or aggregate metadata). """ import nova.conf from nova.scheduler import utils from nova.scheduler import weights CONF = nova.conf.CONF # An arbitrary value used to ensure PCI-requesting instances are stacked rather # than spread on hosts with PCI devices. The actual value of this filter is in # the scarcity case, where there are very few PCI devices left in the cloud and # we want to preserve the ones that do exist. To this end, we don't really mind # if a host with 2000 PCI devices is weighted the same as one with 500 devices, # as there's clearly no shortage there. MAX_DEVS = 100 class PCIWeigher(weights.BaseHostWeigher): def weight_multiplier(self, host_state): """Override the weight multiplier.""" return utils.get_weight_multiplier( host_state, 'pci_weight_multiplier', CONF.filter_scheduler.pci_weight_multiplier) def _weigh_object(self, host_state, request_spec): """Higher weights win. We want to keep PCI hosts free unless needed. Prefer hosts with the least number of PCI devices. If the instance requests PCI devices, this will ensure a stacking behavior and reserve as many totally free PCI hosts as possible. If PCI devices are not requested, this will ensure hosts with PCI devices are avoided completely, if possible. """ pools = host_state.pci_stats.pools if host_state.pci_stats else [] free = sum(pool['count'] for pool in pools) or 0 # reverse the "has PCI" values. For instances *without* PCI device # requests, this ensures we avoid the hosts with the most free PCI # devices. For the instances *with* PCI devices requests, this helps to # prevent fragmentation. If we didn't do this, hosts with the most PCI # devices would be weighted highest and would be used first which would # prevent instances requesting a larger number of PCI devices from # launching successfully. weight = MAX_DEVS - min(free, MAX_DEVS - 1) return weight ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/scheduler/weights/ram.py0000664000175000017500000000276700000000000020322 0ustar00zuulzuul00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ RAM Weigher. Weigh hosts by their RAM usage. The default is to spread instances across all hosts evenly. If you prefer stacking, you can set the 'ram_weight_multiplier' option (by configuration or aggregate metadata) to a negative number and the weighing has the opposite effect of the default. """ import nova.conf from nova.scheduler import utils from nova.scheduler import weights CONF = nova.conf.CONF class RAMWeigher(weights.BaseHostWeigher): minval = 0 def weight_multiplier(self, host_state): """Override the weight multiplier.""" return utils.get_weight_multiplier( host_state, 'ram_weight_multiplier', CONF.filter_scheduler.ram_weight_multiplier) def _weigh_object(self, host_state, weight_properties): """Higher weights win. We want spreading to be the default.""" return host_state.free_ram_mb ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/service.py0000664000175000017500000003062100000000000015541 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Generic Node base class for all workers that run on hosts.""" import os import os.path import random import sys from oslo_log import log as logging import oslo_messaging as messaging from oslo_service import service from oslo_utils import importutils from nova import baserpc from nova import conductor import nova.conf from nova import context from nova import exception from nova.i18n import _ from nova import objects from nova.objects import base as objects_base from nova.objects import service as service_obj from nova import rpc from nova import servicegroup from nova import utils from nova import version osprofiler = importutils.try_import("osprofiler") osprofiler_initializer = importutils.try_import("osprofiler.initializer") CONF = nova.conf.CONF LOG = logging.getLogger(__name__) SERVICE_MANAGERS = { 'nova-compute': 'nova.compute.manager.ComputeManager', 'nova-conductor': 'nova.conductor.manager.ConductorManager', 'nova-scheduler': 'nova.scheduler.manager.SchedulerManager', } def _create_service_ref(this_service, context): service = objects.Service(context) service.host = this_service.host service.binary = this_service.binary service.topic = this_service.topic service.report_count = 0 service.create() return service def _update_service_ref(service): if service.version != service_obj.SERVICE_VERSION: LOG.info('Updating service version for %(binary)s on ' '%(host)s from %(old)i to %(new)i', {'binary': service.binary, 'host': service.host, 'old': service.version, 'new': service_obj.SERVICE_VERSION}) service.version = service_obj.SERVICE_VERSION service.save() def setup_profiler(binary, host): if osprofiler and CONF.profiler.enabled: osprofiler.initializer.init_from_conf( conf=CONF, context=context.get_admin_context().to_dict(), project="nova", service=binary, host=host) LOG.info("OSProfiler is enabled.") class Service(service.Service): """Service object for binaries running on hosts. A service takes a manager and enables rpc by listening to queues based on topic. It also periodically runs tasks on the manager and reports its state to the database services table. """ def __init__(self, host, binary, topic, manager, report_interval=None, periodic_enable=None, periodic_fuzzy_delay=None, periodic_interval_max=None, *args, **kwargs): super(Service, self).__init__() self.host = host self.binary = binary self.topic = topic self.manager_class_name = manager self.servicegroup_api = servicegroup.API() manager_class = importutils.import_class(self.manager_class_name) if objects_base.NovaObject.indirection_api: conductor_api = conductor.API() conductor_api.wait_until_ready(context.get_admin_context()) self.manager = manager_class(host=self.host, *args, **kwargs) self.rpcserver = None self.report_interval = report_interval self.periodic_enable = periodic_enable self.periodic_fuzzy_delay = periodic_fuzzy_delay self.periodic_interval_max = periodic_interval_max self.saved_args, self.saved_kwargs = args, kwargs self.backdoor_port = None setup_profiler(binary, self.host) def __repr__(self): return "<%(cls_name)s: host=%(host)s, binary=%(binary)s, " \ "manager_class_name=%(manager)s>" % { 'cls_name': self.__class__.__name__, 'host': self.host, 'binary': self.binary, 'manager': self.manager_class_name } def start(self): """Start the service. This includes starting an RPC service, initializing periodic tasks, etc. """ # NOTE(melwitt): Clear the cell cache holding database transaction # context manager objects. We do this to ensure we create new internal # oslo.db locks to avoid a situation where a child process receives an # already locked oslo.db lock when it is forked. When a child process # inherits a locked oslo.db lock, database accesses through that # transaction context manager will never be able to acquire the lock # and requests will fail with CellTimeout errors. # See https://bugs.python.org/issue6721 for more information. # With python 3.7, it would be possible for oslo.db to make use of the # os.register_at_fork() method to reinitialize its lock. Until we # require python 3.7 as a minimum version, we must handle the situation # outside of oslo.db. context.CELL_CACHE = {} verstr = version.version_string_with_package() LOG.info('Starting %(topic)s node (version %(version)s)', {'topic': self.topic, 'version': verstr}) self.basic_config_check() ctxt = context.get_admin_context() self.service_ref = objects.Service.get_by_host_and_binary( ctxt, self.host, self.binary) self.manager.init_host(self.service_ref) self.model_disconnected = False if self.service_ref: _update_service_ref(self.service_ref) else: try: self.service_ref = _create_service_ref(self, ctxt) except (exception.ServiceTopicExists, exception.ServiceBinaryExists): # NOTE(danms): If we race to create a record with a sibling # worker, don't fail here. self.service_ref = objects.Service.get_by_host_and_binary( ctxt, self.host, self.binary) self.manager.pre_start_hook(self.service_ref) if self.backdoor_port is not None: self.manager.backdoor_port = self.backdoor_port LOG.debug("Creating RPC server for service %s", self.topic) target = messaging.Target(topic=self.topic, server=self.host) endpoints = [ self.manager, baserpc.BaseRPCAPI(self.manager.service_name, self.backdoor_port) ] endpoints.extend(self.manager.additional_endpoints) serializer = objects_base.NovaObjectSerializer() self.rpcserver = rpc.get_server(target, endpoints, serializer) self.rpcserver.start() self.manager.post_start_hook() LOG.debug("Join ServiceGroup membership for this service %s", self.topic) # Add service to the ServiceGroup membership group. self.servicegroup_api.join(self.host, self.topic, self) if self.periodic_enable: if self.periodic_fuzzy_delay: initial_delay = random.randint(0, self.periodic_fuzzy_delay) else: initial_delay = None self.tg.add_dynamic_timer(self.periodic_tasks, initial_delay=initial_delay, periodic_interval_max= self.periodic_interval_max) def __getattr__(self, key): manager = self.__dict__.get('manager', None) return getattr(manager, key) @classmethod def create(cls, host=None, binary=None, topic=None, manager=None, report_interval=None, periodic_enable=None, periodic_fuzzy_delay=None, periodic_interval_max=None): """Instantiates class and passes back application object. :param host: defaults to CONF.host :param binary: defaults to basename of executable :param topic: defaults to bin_name - 'nova-' part :param manager: defaults to CONF._manager :param report_interval: defaults to CONF.report_interval :param periodic_enable: defaults to CONF.periodic_enable :param periodic_fuzzy_delay: defaults to CONF.periodic_fuzzy_delay :param periodic_interval_max: if set, the max time to wait between runs """ if not host: host = CONF.host if not binary: binary = os.path.basename(sys.argv[0]) if not topic: topic = binary.rpartition('nova-')[2] if not manager: manager = SERVICE_MANAGERS.get(binary) if report_interval is None: report_interval = CONF.report_interval if periodic_enable is None: periodic_enable = CONF.periodic_enable if periodic_fuzzy_delay is None: periodic_fuzzy_delay = CONF.periodic_fuzzy_delay service_obj = cls(host, binary, topic, manager, report_interval=report_interval, periodic_enable=periodic_enable, periodic_fuzzy_delay=periodic_fuzzy_delay, periodic_interval_max=periodic_interval_max) # NOTE(gibi): This have to be after the service object creation as # that is the point where we can safely use the RPC to the conductor. # E.g. the Service.__init__ actually waits for the conductor to start # up before it allows the service to be created. The # raise_if_old_compute() depends on the RPC to be up and does not # implement its own retry mechanism to connect to the conductor. try: utils.raise_if_old_compute() except exception.TooOldComputeService as e: if CONF.workarounds.disable_compute_service_check_for_ffu: LOG.warning(str(e)) else: raise return service_obj def kill(self): """Destroy the service object in the datastore. NOTE: Although this method is not used anywhere else than tests, it is convenient to have it here, so the tests might easily and in clean way stop and remove the service_ref. """ self.stop() try: self.service_ref.destroy() except exception.NotFound: LOG.warning('Service killed that has no database entry') def stop(self): """stop the service and clean up.""" try: self.rpcserver.stop() self.rpcserver.wait() except Exception: pass try: self.manager.cleanup_host() except Exception: LOG.exception('Service error occurred during cleanup_host') pass super(Service, self).stop() def periodic_tasks(self, raise_on_error=False): """Tasks to be run at a periodic interval.""" ctxt = context.get_admin_context() return self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error) def basic_config_check(self): """Perform basic config checks before starting processing.""" # Make sure the tempdir exists and is writable try: with utils.tempdir(): pass except Exception as e: LOG.error('Temporary directory is invalid: %s', e) sys.exit(1) def reset(self): """reset the service.""" self.manager.reset() # Reset the cell cache that holds database transaction context managers context.CELL_CACHE = {} def process_launcher(): return service.ProcessLauncher(CONF, restart_method='mutate') # NOTE(vish): the global launcher is to maintain the existing # functionality of calling service.serve + # service.wait _launcher = None def serve(server, workers=None): global _launcher if _launcher: raise RuntimeError(_('serve() can only be called once')) _launcher = service.launch(CONF, server, workers=workers, restart_method='mutate') def wait(): _launcher.wait() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/service_auth.py0000664000175000017500000000370100000000000016561 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystoneauth1 import loading as ks_loading from keystoneauth1 import service_token from oslo_log import log as logging import nova.conf CONF = nova.conf.CONF LOG = logging.getLogger(__name__) _SERVICE_AUTH = None def reset_globals(): """For async unit test consistency.""" global _SERVICE_AUTH _SERVICE_AUTH = None def get_auth_plugin(context, user_auth=None): # user_auth may be passed in when the RequestContext is anonymous, such as # when get_admin_context() is used for API calls by nova-manage. user_auth = user_auth or context.get_auth_plugin() if CONF.service_user.send_service_user_token: global _SERVICE_AUTH if not _SERVICE_AUTH: _SERVICE_AUTH = ks_loading.load_auth_from_conf_options( CONF, group= nova.conf.service_token.SERVICE_USER_GROUP) if _SERVICE_AUTH is None: # This indicates a misconfiguration so log a warning and # return the user_auth. LOG.warning('Unable to load auth from [service_user] ' 'configuration. Ensure "auth_type" is set.') return user_auth return service_token.ServiceTokenAuthWrapper( user_auth=user_auth, service_auth=_SERVICE_AUTH) return user_auth ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.417609 nova-32.0.0/nova/servicegroup/0000775000175000017500000000000000000000000016242 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/servicegroup/__init__.py0000664000175000017500000000144700000000000020361 0ustar00zuulzuul00000000000000# Copyright 2012 IBM Corp. # Copyright (c) AT&T Labs Inc. 2012 Yun Mao # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ The membership service for Nova. Different implementations can be plugged according to the Nova configuration. """ from nova.servicegroup import api API = api.API ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/servicegroup/api.py0000664000175000017500000000610400000000000017366 0ustar00zuulzuul00000000000000# Copyright 2012 IBM Corp. # Copyright (c) AT&T Labs Inc. 2012 Yun Mao # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Define APIs for the servicegroup access.""" from oslo_log import log as logging from oslo_utils import importutils import nova.conf LOG = logging.getLogger(__name__) _driver_name_class_mapping = { 'db': 'nova.servicegroup.drivers.db.DbDriver', 'mc': 'nova.servicegroup.drivers.mc.MemcachedDriver' } CONF = nova.conf.CONF # NOTE(geekinutah): By default drivers wait 5 seconds before reporting INITIAL_REPORTING_DELAY = 5 class API(object): def __init__(self, *args, **kwargs): '''Create an instance of the servicegroup API. args and kwargs are passed down to the servicegroup driver when it gets created. ''' # Make sure report interval is less than service down time report_interval = CONF.report_interval if CONF.service_down_time <= report_interval: new_service_down_time = int(report_interval * 2.5) LOG.warning("Report interval must be less than service down " "time. Current config: . Setting service_down_time " "to: %(new_service_down_time)s", {'service_down_time': CONF.service_down_time, 'report_interval': report_interval, 'new_service_down_time': new_service_down_time}) CONF.set_override('service_down_time', new_service_down_time) driver_class = _driver_name_class_mapping[CONF.servicegroup_driver] self._driver = importutils.import_object(driver_class, *args, **kwargs) def join(self, member, group, service=None): """Add a new member to a service group. :param member: the joined member ID/name :param group: the group ID/name, of the joined member :param service: a `nova.service.Service` object """ return self._driver.join(member, group, service) def service_is_up(self, member): """Check if the given member is up.""" # NOTE(johngarbutt) no logging in this method, # so this doesn't slow down the scheduler if member.get('forced_down'): return False return self._driver.is_up(member) def get_updated_time(self, member): """Get the updated time from drivers except db""" return self._driver.updated_time(member) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.417609 nova-32.0.0/nova/servicegroup/drivers/0000775000175000017500000000000000000000000017720 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/servicegroup/drivers/__init__.py0000664000175000017500000000000000000000000022017 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/servicegroup/drivers/base.py0000664000175000017500000000221500000000000021204 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. class Driver(object): """Base class for all ServiceGroup drivers.""" def join(self, member, group, service=None): """Add a new member to a service group. :param member: the joined member ID/name :param group: the group ID/name, of the joined member :param service: a `nova.service.Service` object """ raise NotImplementedError() def is_up(self, member): """Check whether the given member is up.""" raise NotImplementedError() def updated_time(self, service_ref): """Get the updated time""" raise NotImplementedError() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/servicegroup/drivers/db.py0000664000175000017500000001252700000000000020666 0ustar00zuulzuul00000000000000# Copyright 2012 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging import oslo_messaging as messaging from oslo_utils import timeutils import nova.conf from nova import exception from nova.i18n import _ from nova.servicegroup import api from nova.servicegroup.drivers import base CONF = nova.conf.CONF LOG = logging.getLogger(__name__) class DbDriver(base.Driver): def __init__(self, *args, **kwargs): self.service_down_time = CONF.service_down_time def join(self, member, group, service=None): """Add a new member to a service group. :param member: the joined member ID/name :param group: the group ID/name, of the joined member :param service: a `nova.service.Service` object """ LOG.debug('DB_Driver: join new ServiceGroup member %(member)s to ' 'the %(group)s group, service = %(service)s', {'member': member, 'group': group, 'service': service}) if service is None: raise RuntimeError(_('service is a mandatory argument for DB based' ' ServiceGroup driver')) report_interval = service.report_interval if report_interval: service.tg.add_timer_args( report_interval, self._report_state, args=[service], initial_delay=api.INITIAL_REPORTING_DELAY) def is_up(self, service_ref): """Moved from nova.utils Check whether a service is up based on last heartbeat. """ last_heartbeat = (service_ref.get('last_seen_up') or service_ref['created_at']) if isinstance(last_heartbeat, str): # NOTE(russellb) If this service_ref came in over rpc via # conductor, then the timestamp will be a string and needs to be # converted back to a datetime. last_heartbeat = timeutils.parse_strtime(last_heartbeat) else: # Objects have proper UTC timezones, but the timeutils comparison # below does not (and will fail) last_heartbeat = last_heartbeat.replace(tzinfo=None) # Timestamps in DB are UTC. elapsed = timeutils.delta_seconds(last_heartbeat, timeutils.utcnow()) is_up = abs(elapsed) <= self.service_down_time if not is_up: LOG.debug('Seems service %(binary)s on host %(host)s is down. ' 'Last heartbeat was %(lhb)s. Elapsed time is %(el)s', {'binary': service_ref.get('binary'), 'host': service_ref.get('host'), 'lhb': str(last_heartbeat), 'el': str(elapsed)}) return is_up def updated_time(self, service_ref): """Get the updated time from db""" return service_ref['updated_at'] def _report_state(self, service): """Update the state of this service in the datastore.""" try: service.service_ref.report_count += 1 service.service_ref.save() # TODO(termie): make this pattern be more elegant. if getattr(service, 'model_disconnected', False): service.model_disconnected = False LOG.info('Recovered from being unable to report status.') except messaging.MessagingTimeout: # NOTE(johngarbutt) during upgrade we will see messaging timeouts # as nova-conductor is restarted, so only log this error once. if not getattr(service, 'model_disconnected', False): service.model_disconnected = True LOG.warning( 'Lost connection to nova-conductor for reporting service ' 'status.' ) except exception.ServiceNotFound: # The service may have been deleted via the API but the actual # process is still running. Provide a useful error message rather # than the noisy traceback in the generic Exception block below. LOG.error('The services table record for the %s service on ' 'host %s is gone. You either need to stop this service ' 'if it should be deleted or restart it to recreate the ' 'record in the database.', service.service_ref.binary, service.service_ref.host) service.model_disconnected = True except Exception: # NOTE(rpodolyaka): we'd like to avoid catching of all possible # exceptions here, but otherwise it would become possible for # the state reporting thread to stop abruptly, and thus leave # the service unusable until it's restarted. LOG.exception('Unexpected error while reporting service status') # trigger the recovery log message, if this error goes away service.model_disconnected = True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/servicegroup/drivers/mc.py0000664000175000017500000001031300000000000020667 0ustar00zuulzuul00000000000000# Service heartbeat driver using Memcached # Copyright (c) 2013 Akira Yoshiyama # # This is derived from nova/servicegroup/drivers/db.py. # Copyright 2012 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import iso8601 from oslo_log import log as logging from oslo_utils import timeutils from nova import cache_utils import nova.conf from nova.i18n import _ from nova.servicegroup import api from nova.servicegroup.drivers import base CONF = nova.conf.CONF LOG = logging.getLogger(__name__) class MemcachedDriver(base.Driver): def __init__(self, *args, **kwargs): self.mc = cache_utils.get_memcached_client( expiration_time=CONF.service_down_time) def join(self, member_id, group_id, service=None): """Join the given service with its group.""" LOG.debug('Memcached_Driver: join new ServiceGroup member ' '%(member_id)s to the %(group_id)s group, ' 'service = %(service)s', {'member_id': member_id, 'group_id': group_id, 'service': service}) if service is None: raise RuntimeError(_('service is a mandatory argument for ' 'Memcached based ServiceGroup driver')) report_interval = service.report_interval if report_interval: service.tg.add_timer_args( report_interval, self._report_state, args=[service], initial_delay=api.INITIAL_REPORTING_DELAY) def is_up(self, service_ref): """Moved from nova.utils Check whether a service is up based on last heartbeat. """ key = "%(topic)s:%(host)s" % service_ref is_up = self.mc.get(str(key)) is not None if not is_up: LOG.debug('Seems service %s is down', key) return is_up def updated_time(self, service_ref): """Get the updated time from memcache""" key = "%(topic)s:%(host)s" % service_ref updated_time_in_mc = self.mc.get(str(key)) updated_time_in_db = service_ref['updated_at'] if updated_time_in_mc: # Change mc time to offset-aware time updated_time_in_mc = updated_time_in_mc.replace(tzinfo=iso8601.UTC) # If [DEFAULT]/enable_new_services is set to be false, the # ``updated_time_in_db`` will be None, in this case, use # ``updated_time_in_mc`` instead. if (not updated_time_in_db or updated_time_in_db <= updated_time_in_mc): return updated_time_in_mc return updated_time_in_db def _report_state(self, service): """Update the state of this service in the datastore.""" try: key = "%(topic)s:%(host)s" % service.service_ref # memcached has data expiration time capability. # set(..., time=CONF.service_down_time) uses it and # reduces key-deleting code. self.mc.set(str(key), timeutils.utcnow()) # TODO(termie): make this pattern be more elegant. if getattr(service, 'model_disconnected', False): service.model_disconnected = False LOG.info( 'Recovered connection to memcache server for reporting ' 'service status.' ) # TODO(vish): this should probably only catch connection errors except Exception: if not getattr(service, 'model_disconnected', False): service.model_disconnected = True LOG.warning( 'Lost connection to memcache server for reporting service ' 'status.' ) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.417609 nova-32.0.0/nova/share/0000775000175000017500000000000000000000000014627 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/share/__init__.py0000664000175000017500000000000000000000000016726 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/share/manila.py0000664000175000017500000002720000000000000016443 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Handles all requests relating to shares + manila. """ from dataclasses import dataclass import functools from typing import Optional from openstack import exceptions as sdk_exc from oslo_log import log as logging import nova.conf from nova import exception from nova import utils CONF = nova.conf.CONF LOG = logging.getLogger(__name__) MIN_SHARE_FILE_SYSTEM_MICROVERSION = "2.82" def _manilaclient(context, admin=False): """Constructs a manila client object for making API requests. :return: An openstack.proxy.Proxy object for the specified service_type. :raise: ConfGroupForServiceTypeNotFound If no conf group name could be found for the specified service_type. :raise: ServiceUnavailable if the service is down """ return utils.get_sdk_adapter( "shared-file-system", admin=admin, check_service=True, context=context, shared_file_system_api_version=MIN_SHARE_FILE_SYSTEM_MICROVERSION, global_request_id=context.global_id ) @dataclass(frozen=True) class Share(): id: str size: int availability_zone: Optional[str] created_at: str status: str name: Optional[str] description: Optional[str] project_id: str snapshot_id: Optional[str] share_network_id: Optional[str] share_proto: str export_location: str metadata: dict share_type: Optional[str] is_public: bool @classmethod def from_manila_share(cls, manila_share, export_location): return cls( id=manila_share.id, size=manila_share.size, availability_zone=manila_share.availability_zone, created_at=manila_share.created_at, status=manila_share.status, name=manila_share.name, description=manila_share.description, project_id=manila_share.project_id, snapshot_id=manila_share.snapshot_id, share_network_id=manila_share.share_network_id, share_proto=manila_share.share_protocol, export_location=export_location, metadata=manila_share.metadata, share_type=manila_share.share_type, is_public=manila_share.is_public, ) @dataclass(frozen=True) class Access(): id: str access_level: str state: str access_type: str access_to: str access_key: Optional[str] @classmethod def from_manila_access(cls, manila_access): return cls( id=manila_access.id, access_level=manila_access.access_level, state=manila_access.state, access_type=manila_access.access_type, access_to=manila_access.access_to, access_key= getattr(manila_access, 'access_key', None) ) @classmethod def from_dict(cls, manila_access): return cls( id=manila_access['id'], access_level=manila_access['access_level'], state=manila_access['state'], access_type=manila_access['access_type'], access_to=manila_access['access_to'], access_key=manila_access['access_key'], ) def translate_sdk_exception(method): """Transforms a manila exception but keeps its traceback intact.""" @functools.wraps(method) def wrapper(self, *args, **kwargs): try: res = method(self, *args, **kwargs) except (exception.ServiceUnavailable, exception.ConfGroupForServiceTypeNotFound) as exc: raise exception.ManilaConnectionFailed(reason=str(exc)) from exc except (sdk_exc.BadRequestException) as exc: raise exception.InvalidInput(reason=str(exc)) from exc except (sdk_exc.ForbiddenException) as exc: raise exception.Forbidden(str(exc)) from exc return res return wrapper def translate_share_exception(method): """Transforms the exception for the share but keeps its traceback intact. """ def wrapper(self, *args, **kwargs): try: res = method(self, *args, **kwargs) except (sdk_exc.ResourceNotFound) as exc: raise exception.ShareNotFound( share_id=args[1], reason=exc) from exc except (sdk_exc.BadRequestException) as exc: raise exception.ShareNotFound( share_id=args[1], reason=exc) from exc return res return translate_sdk_exception(wrapper) def translate_allow_exception(method): """Transforms the exception for allow but keeps its traceback intact. """ def wrapper(self, *args, **kwargs): try: res = method(self, *args, **kwargs) except (sdk_exc.BadRequestException) as exc: raise exception.ShareAccessGrantError( share_id=args[1], reason=exc) from exc except (sdk_exc.ResourceNotFound) as exc: raise exception.ShareNotFound( share_id=args[1], reason=exc) from exc return res return translate_sdk_exception(wrapper) def translate_deny_exception(method): """Transforms the exception for deny but keeps its traceback intact. """ def wrapper(self, *args, **kwargs): try: res = method(self, *args, **kwargs) except (sdk_exc.BadRequestException) as exc: raise exception.ShareAccessRemovalError( share_id=args[1], reason=exc) from exc except (sdk_exc.ResourceNotFound) as exc: raise exception.ShareNotFound( share_id=args[1], reason=exc) from exc return res return translate_sdk_exception(wrapper) class API(object): """API for interacting with the share manager.""" @translate_share_exception def get(self, context, share_id): """Get the details about a share given its ID. :param share_id: the id of the share to get :raises: ShareNotFound if the share_id specified is not available. :returns: Share object. """ def filter_export_locations(export_locations): # Return the preferred path otherwise choose the first one paths = [] try: for export_location in export_locations: if export_location.is_preferred: return export_location.path else: paths.append(export_location.path) return paths[0] except (IndexError, NameError): return None client = _manilaclient(context, admin=False) LOG.debug("Get share id:'%s' data from manila", share_id) share = client.get_share(share_id) export_locations = client.export_locations(share.id) export_location = filter_export_locations(export_locations) return Share.from_manila_share(share, export_location) @translate_share_exception def get_access( self, context, share_id, access_type, access_to, ): """Get share access :param share_id: the id of the share to get :param access_type: the type of access ("ip", "cert", "user") :param access_to: ip:cidr or cert:cn or user:group or user name :raises: ShareNotFound if the share_id specified is not available. :returns: Access object or None if there is no access granted to this share. """ LOG.debug("Get share access id for share id:'%s'", share_id) access_list = _manilaclient( context, admin=True).access_rules(share_id) for access in access_list: if ( access.access_type == access_type and access.access_to == access_to ): return Access.from_manila_access(access) return None @translate_allow_exception def allow( self, context, share_id, access_type, access_to, access_level, ): """Allow share access :param share_id: the id of the share :param access_type: the type of access ("ip", "cert", "user") :param access_to: ip:cidr or cert:cn or user:group or user name :param access_level: "ro" for read only or "rw" for read/write :raises: ShareNotFound if the share_id specified is not available. :raises: BadRequest if the share already exists. :raises: ShareAccessGrantError if the answer from manila allow API is not the one expected. """ def check_manila_access_response(access): if not ( isinstance(access, Access) and access.access_type == access_type and access.access_to == access_to and access.access_level == access_level ): raise exception.ShareAccessGrantError(share_id=share_id) LOG.debug("Allow host access to share id:'%s'", share_id) access = _manilaclient(context, admin=True).create_access_rule( share_id, access_type=access_type, access_to=access_to, access_level=access_level, lock_visibility=True, lock_deletion=True, lock_reason="Lock by nova", ) access = Access.from_manila_access(access) check_manila_access_response(access) return access @translate_deny_exception def deny( self, context, share_id, access_type, access_to, ): """Deny share access :param share_id: the id of the share :param access_type: the type of access ("ip", "cert", "user") :param access_to: ip:cidr or cert:cn or user:group or user name :raises: ShareAccessNotFound if the access_id specified is not available. :raises: ShareAccessRemovalError if the manila deny API does not respond with a status code 202. """ access = self.get_access( context, share_id, access_type, access_to, ) if access: client = _manilaclient(context, admin=True) LOG.debug("Deny host access to share id:'%s'", share_id) resp = client.delete_access_rule( access.id, share_id, unrestrict=True ) if resp.status_code != 202: raise exception.ShareAccessRemovalError( share_id=share_id, reason=resp.reason ) else: raise exception.ShareAccessNotFound(share_id=share_id) def has_access(self, context, share_id, access_type, access_to): """Helper method to check if a policy is applied to a share :param context: The request context. :param share_id: the id of the share :param access_type: the type of access ("ip", "cert", "user") :param access_to: ip:cidr or cert:cn or user:group or user name :returns: boolean, true means the policy is applied. """ access = self.get_access( context, share_id, access_type, access_to ) return access is not None and access.state == 'active' ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.417609 nova-32.0.0/nova/storage/0000775000175000017500000000000000000000000015171 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/storage/__init__.py0000664000175000017500000000000000000000000017270 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/storage/rbd_utils.py0000664000175000017500000004644500000000000017547 0ustar00zuulzuul00000000000000# Copyright 2012 Grid Dynamics # Copyright 2013 Inktank Storage, Inc. # Copyright 2014 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import urllib from eventlet import tpool from oslo_concurrency import processutils from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_service import loopingcall from oslo_utils import encodeutils from oslo_utils import excutils import nova.conf from nova import exception from nova.i18n import _ try: import rados import rbd except ImportError: rados = None rbd = None CONF = nova.conf.CONF LOG = logging.getLogger(__name__) RESIZE_SNAPSHOT_NAME = 'nova-resize' class RbdProxy(object): """A wrapper around rbd.RBD class instance to avoid blocking of process. Offloads all calls to rbd.RBD class methods to native OS threads, so that we do not block the whole process while executing the librbd code. """ def __init__(self): self._rbd = tpool.Proxy(rbd.RBD()) def __getattr__(self, attr): return getattr(self._rbd, attr) class RBDVolumeProxy(object): """Context manager for dealing with an existing rbd volume. This handles connecting to rados and opening an ioctx automatically, and otherwise acts like a librbd Image object. The underlying librados client and ioctx can be accessed as the attributes 'client' and 'ioctx'. """ def __init__(self, driver, name, pool=None, snapshot=None, read_only=False): client, ioctx = driver._connect_to_rados(pool) try: self.volume = tpool.Proxy(rbd.Image(ioctx, name, snapshot=snapshot, read_only=read_only)) except rbd.ImageNotFound: with excutils.save_and_reraise_exception(): LOG.debug("rbd image %s does not exist", name) driver._disconnect_from_rados(client, ioctx) except rbd.Error: with excutils.save_and_reraise_exception(): LOG.exception("error opening rbd image %s", name) driver._disconnect_from_rados(client, ioctx) self.driver = driver self.client = client self.ioctx = ioctx def __enter__(self): return self def __exit__(self, type_, value, traceback): try: self.volume.close() finally: self.driver._disconnect_from_rados(self.client, self.ioctx) def __getattr__(self, attrib): return getattr(self.volume, attrib) class RADOSClient(object): """Context manager to simplify error handling for connecting to ceph.""" def __init__(self, driver, pool=None): self.driver = driver self.cluster, self.ioctx = driver._connect_to_rados(pool) def __enter__(self): return self def __exit__(self, type_, value, traceback): self.driver._disconnect_from_rados(self.cluster, self.ioctx) @property def features(self): features = self.cluster.conf_get('rbd_default_features') if ((features is None) or (int(features) == 0)): features = rbd.RBD_FEATURE_LAYERING return int(features) class RBDDriver(object): def __init__(self, pool=None, user=None, ceph_conf=None, connect_timeout=None): # NOTE(lyarwood): Ensure the rbd and rados modules have been imported # correctly before continuing, this is done in a separate private # method to allow us to skip this check in unit tests etc. self._check_for_import_failure() self.pool = pool or CONF.libvirt.images_rbd_pool self.rbd_user = user or CONF.libvirt.rbd_user self.rbd_connect_timeout = ( connect_timeout or CONF.libvirt.rbd_connect_timeout) self.ceph_conf = ceph_conf or CONF.libvirt.images_rbd_ceph_conf def _check_for_import_failure(self): # NOTE(lyarwood): If the original import of the required rbd or rados # modules failed then repeat the imports at runtime within this driver # to log the full exception in order to provide context to anyone # debugging the failure in the logs. global rados, rbd if rbd is None or rados is None: try: # NOTE(lyarwood): noqa is required on both imports here as they # are unused (F401) even if successful. import rados # noqa: F401 import rbd # noqa: F401 except ImportError: LOG.exception("Unable to import the rados or rbd modules") raise RuntimeError(_('rbd python libraries not found')) def _connect_to_rados(self, pool=None): client = rados.Rados(rados_id=self.rbd_user, conffile=self.ceph_conf) try: client.connect(timeout=self.rbd_connect_timeout) pool_to_open = pool or self.pool # NOTE(luogangyi): open_ioctx >= 10.1.0 could handle unicode # arguments perfectly as part of Python 3 support. # Therefore, when we turn to Python 3, it's safe to remove # str() conversion. ioctx = client.open_ioctx(str(pool_to_open)) return client, ioctx except rados.Error: # shutdown cannot raise an exception client.shutdown() raise def _disconnect_from_rados(self, client, ioctx): # closing an ioctx cannot raise an exception ioctx.close() client.shutdown() def ceph_args(self): """List of command line parameters to be passed to ceph commands to reflect RBDDriver configuration such as RBD user name and location of ceph.conf. """ args = [] if self.rbd_user: args.extend(['--id', self.rbd_user]) if self.ceph_conf: args.extend(['--conf', self.ceph_conf]) return args def get_mon_addrs(self, strip_brackets=True): args = ['ceph', 'mon', 'dump', '--format=json'] + self.ceph_args() out, _ = processutils.execute(*args) lines = out.split('\n') if lines[0].startswith('dumped monmap epoch'): lines = lines[1:] monmap = jsonutils.loads('\n'.join(lines)) addrs = [mon['addr'] for mon in monmap['mons']] hosts = [] ports = [] for addr in addrs: host_port = addr[:addr.rindex('/')] host, port = host_port.rsplit(':', 1) if strip_brackets: host = host.strip('[]') hosts.append(host) ports.append(port) return hosts, ports def parse_url(self, url): prefix = 'rbd://' if not url.startswith(prefix): reason = _('Not stored in rbd') raise exception.ImageUnacceptable(image_id=url, reason=reason) pieces = [urllib.parse.unquote(piece) for piece in url[len(prefix):].split('/')] if '' in pieces: reason = _('Blank components') raise exception.ImageUnacceptable(image_id=url, reason=reason) if len(pieces) != 4: reason = _('Not an rbd snapshot') raise exception.ImageUnacceptable(image_id=url, reason=reason) return pieces def get_fsid(self): with RADOSClient(self) as client: return encodeutils.safe_decode(client.cluster.get_fsid()) def is_cloneable(self, image_location, image_meta): url = image_location['url'] try: fsid, pool, image, snapshot = self.parse_url(url) except exception.ImageUnacceptable as e: LOG.debug('not cloneable: %s', e) return False fsid = encodeutils.safe_decode(fsid) if self.get_fsid() != fsid: reason = '%s is in a different ceph cluster' % url LOG.debug(reason) return False if image_meta.get('disk_format') != 'raw': LOG.debug("rbd image clone requires image format to be " "'raw' but image %s is '%s'", url, image_meta.get('disk_format')) return False # check that we can read the image try: return self.exists(image, pool=pool, snapshot=snapshot) except rbd.Error as e: LOG.debug('Unable to open image %(loc)s: %(err)s', dict(loc=url, err=e)) return False def clone(self, image_location, dest_name, dest_pool=None): _fsid, pool, image, snapshot = self.parse_url( image_location['url']) LOG.debug('cloning %(pool)s/%(img)s@%(snap)s to ' '%(dest_pool)s/%(dest_name)s', dict(pool=pool, img=image, snap=snapshot, dest_pool=dest_pool, dest_name=dest_name)) with RADOSClient(self, str(pool)) as src_client: with RADOSClient(self, dest_pool) as dest_client: try: RbdProxy().clone(src_client.ioctx, image, snapshot, dest_client.ioctx, str(dest_name), features=src_client.features) except rbd.PermissionError: raise exception.Forbidden(_('no write permission on ' 'storage pool %s') % dest_pool) def size(self, name): with RBDVolumeProxy(self, name, read_only=True) as vol: return vol.size() def resize(self, name, size): """Resize RBD volume. :name: Name of RBD object :size: New size in bytes """ LOG.debug('resizing rbd image %s to %d', name, size) with RBDVolumeProxy(self, name) as vol: vol.resize(size) def parent_info(self, volume, pool=None): """Returns the pool, image and snapshot name for the parent of an RBD volume. :volume: Name of RBD object :pool: Name of pool """ try: with RBDVolumeProxy(self, str(volume), pool=pool, read_only=True) as vol: return vol.parent_info() except rbd.ImageNotFound: raise exception.ImageUnacceptable(_("no usable parent snapshot " "for volume %s") % volume) def flatten(self, volume, pool=None): """"Flattens" a snapshotted image with the parents' data, effectively detaching it from the parent. :volume: Name of RBD object :pool: Name of pool """ LOG.debug('flattening %(pool)s/%(vol)s', dict(pool=pool, vol=volume)) with RBDVolumeProxy(self, str(volume), pool=pool) as vol: vol.flatten() def exists(self, name, pool=None, snapshot=None): try: with RBDVolumeProxy(self, name, pool=pool, snapshot=snapshot, read_only=True): return True except rbd.ImageNotFound: return False def remove_image(self, name): """Remove RBD volume :name: Name of RBD volume """ with RADOSClient(self, self.pool) as client: try: RbdProxy().remove(client.ioctx, name) except rbd.ImageNotFound: LOG.warning('image %(volume)s in pool %(pool)s can not be ' 'found, failed to remove', {'volume': name, 'pool': self.pool}) except rbd.ImageHasSnapshots: LOG.error('image %(volume)s in pool %(pool)s has ' 'snapshots, failed to remove', {'volume': name, 'pool': self.pool}) def import_image(self, base, name): """Import RBD volume from image file. Uses the command line import instead of librbd since rbd import command detects zeroes to preserve sparseness in the image. :base: Path to image file :name: Name of RBD volume """ args = ['--pool', self.pool, base, name] # Image format 2 supports cloning, # in stable ceph rbd release default is not 2, # we need to use it explicitly. args += ['--image-format=2'] args += self.ceph_args() processutils.execute('rbd', 'import', *args) def export_image(self, base, name, snap, pool=None): """Export RBD volume to image file. Uses the command line export to export rbd volume snapshot to local image file. :base: Path to image file :name: Name of RBD volume :snap: Name of RBD snapshot :pool: Name of RBD pool """ if pool is None: pool = self.pool args = ['--pool', pool, '--image', name, '--path', base, '--snap', snap] args += self.ceph_args() processutils.execute('rbd', 'export', *args) def _destroy_volume(self, client, volume, pool=None): """Destroy an RBD volume, retrying as needed. """ def _cleanup_vol(ioctx, volume, retryctx): try: RbdProxy().remove(ioctx, volume) raise loopingcall.LoopingCallDone(retvalue=False) except rbd.ImageHasSnapshots: self.remove_snap(volume, RESIZE_SNAPSHOT_NAME, ignore_errors=True) except (rbd.ImageBusy, rbd.ImageHasSnapshots): LOG.warning('rbd remove %(volume)s in pool %(pool)s failed', {'volume': volume, 'pool': self.pool}) retryctx['retries'] -= 1 if retryctx['retries'] <= 0: raise loopingcall.LoopingCallDone() # NOTE(sandonov): We let it go for: # rbd_destroy_volume_retries*rbd_destroy_volume_retry_interval seconds retryctx = {'retries': CONF.libvirt.rbd_destroy_volume_retries} timer = loopingcall.FixedIntervalLoopingCall( _cleanup_vol, client.ioctx, volume, retryctx) timed_out = timer.start( interval=CONF.libvirt.rbd_destroy_volume_retry_interval).wait() if timed_out: # NOTE(danms): Run this again to propagate the error, but # if it succeeds, don't raise the loopingcall exception try: _cleanup_vol(client.ioctx, volume, retryctx) except loopingcall.LoopingCallDone: pass def cleanup_volumes(self, filter_fn): with RADOSClient(self, self.pool) as client: volumes = RbdProxy().list(client.ioctx) for volume in filter(filter_fn, volumes): self._destroy_volume(client, volume) def get_pool_info(self): # NOTE(melwitt): We're executing 'ceph df' here instead of calling # the RADOSClient.get_cluster_stats python API because we need # access to the MAX_AVAIL stat, which reports the available bytes # taking replication into consideration. The global available stat # from the RADOSClient.get_cluster_stats python API does not take # replication size into consideration and will simply return the # available storage per OSD, added together across all OSDs. The # MAX_AVAIL stat will divide by the replication size when doing the # calculation. args = ['ceph', 'df', '--format=json'] + self.ceph_args() try: out, _ = processutils.execute(*args) except processutils.ProcessExecutionError: LOG.exception('Could not determine disk usage') raise exception.StorageError( reason='Could not determine disk usage') stats = jsonutils.loads(out) # Find the pool for which we are configured. pool_stats = None for pool in stats['pools']: if pool['name'] == self.pool: pool_stats = pool['stats'] break if pool_stats is None: raise exception.NotFound('Pool %s could not be found.' % self.pool) return {'total': stats['stats']['total_bytes'], 'free': pool_stats['max_avail'], 'used': pool_stats['bytes_used']} def create_snap(self, volume, name, pool=None, protect=False): """Create a snapshot of an RBD volume. :volume: Name of RBD object :name: Name of snapshot :pool: Name of pool :protect: Set the snapshot to "protected" """ LOG.debug('creating snapshot(%(snap)s) on rbd image(%(img)s)', {'snap': name, 'img': volume}) with RBDVolumeProxy(self, str(volume), pool=pool) as vol: vol.create_snap(name) if protect and not vol.is_protected_snap(name): vol.protect_snap(name) def remove_snap(self, volume, name, ignore_errors=False, pool=None, force=False): """Removes a snapshot from an RBD volume. :volume: Name of RBD object :name: Name of snapshot :ignore_errors: whether or not to log warnings on failures :pool: Name of pool :force: Remove snapshot even if it is protected """ with RBDVolumeProxy(self, str(volume), pool=pool) as vol: if name in [snap.get('name', '') for snap in vol.list_snaps()]: if vol.is_protected_snap(name): if force: vol.unprotect_snap(name) elif not ignore_errors: LOG.warning('snapshot(%(name)s) on rbd ' 'image(%(img)s) is protected, skipping', {'name': name, 'img': volume}) return LOG.debug('removing snapshot(%(name)s) on rbd image(%(img)s)', {'name': name, 'img': volume}) vol.remove_snap(name) elif not ignore_errors: LOG.warning('no snapshot(%(name)s) found on rbd ' 'image(%(img)s)', {'name': name, 'img': volume}) def rollback_to_snap(self, volume, name): """Revert an RBD volume to its contents at a snapshot. :volume: Name of RBD object :name: Name of snapshot """ with RBDVolumeProxy(self, volume) as vol: if name in [snap.get('name', '') for snap in vol.list_snaps()]: LOG.debug('rolling back rbd image(%(img)s) to ' 'snapshot(%(snap)s)', {'snap': name, 'img': volume}) vol.rollback_to_snap(name) else: raise exception.SnapshotNotFound(snapshot_id=name) def destroy_volume(self, volume, pool=None): """A one-shot version of cleanup_volumes() """ with RADOSClient(self, pool) as client: self._destroy_volume(client, volume) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/test.py0000664000175000017500000011700500000000000015062 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base classes for our unit tests. Allows overriding of flags for use of fakes, and some black magic for inline callbacks. """ # autopep8: off from nova import monkey_patch ; monkey_patch.patch() # noqa # autopep8: on import abc import builtins import collections import contextlib import copy import datetime import inspect import itertools import os import os.path import pprint import sys from unittest import mock import fixtures from oslo_cache import core as cache from oslo_concurrency import lockutils from oslo_config import cfg from oslo_config import fixture as config_fixture from oslo_log.fixture import logging_error as log_fixture from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils.fixture import uuidsentinel as uuids from oslo_utils import timeutils from oslo_versionedobjects import fixture as ovo_fixture from oslotest import base from oslotest import mock_fixture from sqlalchemy.dialects import sqlite import testtools from nova.api.openstack import wsgi_app from nova.compute import rpcapi as compute_rpcapi from nova import context import nova.crypto from nova.db.main import api as db_api from nova import exception from nova import objects from nova.objects import base as objects_base from nova.pci import request from nova import quota from nova.scheduler.client import report from nova.scheduler import utils as scheduler_utils from nova.tests import fixtures as nova_fixtures from nova.tests.unit import matchers from nova import utils from nova.virt import images CONF = cfg.CONF logging.register_options(CONF) CONF.set_override('use_stderr', False) logging.setup(CONF, 'nova') cache.configure(CONF) LOG = logging.getLogger(__name__) _TRUE_VALUES = ('True', 'true', '1', 'yes') CELL1_NAME = 'cell1' # For compatibility with the large number of tests which use test.nested nested = utils.nested_contexts class TestingException(Exception): pass # NOTE(claudiub): this needs to be called before any mock.patch calls are # being done, and especially before any other test classes load. This fixes # the mock.patch autospec issue: # https://github.com/testing-cabal/mock/issues/396 mock_fixture.patch_mock_module() def _poison_unfair_compute_resource_semaphore_locking(): """Ensure that every locking on COMPUTE_RESOURCE_SEMAPHORE is called with fair=True. """ orig_synchronized = utils.synchronized def poisoned_synchronized(*args, **kwargs): # Only check fairness if the decorator is used with # COMPUTE_RESOURCE_SEMAPHORE. But the name of the semaphore can be # passed as args or as kwargs. # Note that we cannot import COMPUTE_RESOURCE_SEMAPHORE as that would # apply the decorators we want to poison here. if len(args) >= 1: name = args[0] else: name = kwargs.get("name") if name == "compute_resources" and not kwargs.get("fair", False): raise AssertionError( 'Locking on COMPUTE_RESOURCE_SEMAPHORE should always be fair. ' 'See bug 1864122.') # go and act like the original decorator return orig_synchronized(*args, **kwargs) # replace the synchronized decorator factory with our own that checks the # params passed in utils.synchronized = poisoned_synchronized # NOTE(gibi): This poisoning needs to be done in import time as decorators are # applied in import time on the ResourceTracker _poison_unfair_compute_resource_semaphore_locking() class NovaExceptionReraiseFormatError(object): real_log_exception = exception.NovaException._log_exception @classmethod def patch(cls): exception.NovaException._log_exception = cls._wrap_log_exception @staticmethod def _wrap_log_exception(self): exc_info = sys.exc_info() NovaExceptionReraiseFormatError.real_log_exception(self) raise exc_info[1] # NOTE(melwitt) This needs to be done at import time in order to also catch # NovaException format errors that are in mock decorators. In these cases, the # errors will be raised during test listing, before tests actually run. NovaExceptionReraiseFormatError.patch() class TestCase(base.BaseTestCase): """Test case base class for all unit tests. Due to the slowness of DB access, please consider deriving from `NoDBTestCase` first. """ # USES_DB is set to False for tests that inherit from NoDBTestCase. USES_DB = True # USES_DB_SELF is set to True in tests that specifically want to use the # database but need to configure it themselves, for example to setup the # API DB but not the cell DB. In those cases the test will override # USES_DB_SELF = True but inherit from the NoDBTestCase class so it does # not get the default fixture setup when using a database (which is the # API and cell DBs, and adding the default flavors). USES_DB_SELF = False REQUIRES_LOCKING = False # Setting to True makes the test use the RPCFixture. STUB_RPC = True # The number of non-cell0 cells to create. This is only used in the # base class when USES_DB is True. NUMBER_OF_CELLS = 1 # The stable compute id stuff is intentionally singleton-ish, which makes # it a nightmare for testing multiple host/node combinations in tests like # we do. So, mock it out by default, unless the test is specifically # designed to handle it. STUB_COMPUTE_ID = True def setUp(self): """Run before each test method to initialize test environment.""" # Ensure BaseTestCase's ConfigureLogging fixture is disabled since # we're using our own (StandardLogging). with fixtures.EnvironmentVariable('OS_LOG_CAPTURE', '0'): super(TestCase, self).setUp() self.useFixture( nova_fixtures.PropagateTestCaseIdToChildEventlets(self.id())) # Ensure that the pci alias is reset between test cases running in # the same process request.get_alias_from_config.cache_clear() # How many of which service we've started. {$service-name: $count} self._service_fixture_count = collections.defaultdict(int) self.useFixture(nova_fixtures.OpenStackSDKFixture()) self.useFixture(nova_fixtures.IsolatedExecutorFixture(self.id())) self.useFixture(log_fixture.get_logging_handle_error_fixture()) self.stdlog = self.useFixture(nova_fixtures.StandardLogging()) # NOTE(sdague): because of the way we were using the lock # wrapper we ended up with a lot of tests that started # relying on global external locking being set up for them. We # consider all of these to be *bugs*. Tests should not require # global external locking, or if they do, they should # explicitly set it up themselves. # # The following REQUIRES_LOCKING class parameter is provided # as a bridge to get us there. No new tests should be added # that require it, and existing classes and tests should be # fixed to not need it. if self.REQUIRES_LOCKING: lock_path = self.useFixture(fixtures.TempDir()).path self.fixture = self.useFixture( config_fixture.Config(lockutils.CONF)) self.fixture.config(lock_path=lock_path, group='oslo_concurrency') self.useFixture(nova_fixtures.ConfFixture(CONF)) if self.STUB_RPC: self.useFixture(nova_fixtures.RPCFixture('nova.test')) # we cannot set this in the ConfFixture as oslo only registers the # notification opts at the first instantiation of a Notifier that # happens only in the RPCFixture CONF.set_default('driver', ['test'], group='oslo_messaging_notifications') # NOTE(danms): Make sure to reset us back to non-remote objects # for each test to avoid interactions. Also, backup the object # registry. objects_base.NovaObject.indirection_api = None self._base_test_obj_backup = copy.copy( objects_base.NovaObjectRegistry._registry._obj_classes) self.addCleanup(self._restore_obj_registry) objects.Service.clear_min_version_cache() # NOTE(danms): Reset the cached list of cells from nova.compute import api api.CELLS = [] context.CELL_CACHE = {} context.CELLS = [] self.computes = {} self.cell_mappings = {} self.host_mappings = {} # NOTE(danms): If the test claims to want to set up the database # itself, then it is responsible for all the mapping stuff too. if self.USES_DB: # NOTE(danms): Full database setup involves a cell0, cell1, # and the relevant mappings. self.useFixture(nova_fixtures.Database(database='api')) self._setup_cells() self.useFixture(nova_fixtures.DefaultFlavorsFixture()) elif not self.USES_DB_SELF: # NOTE(danms): If not using the database, we mock out the # mapping stuff and effectively collapse everything to a # single cell. self.useFixture(nova_fixtures.SingleCellSimple()) self.useFixture(nova_fixtures.DatabasePoisonFixture()) # NOTE(blk-u): WarningsFixture must be after the Database fixture # because sqlalchemy-migrate messes with the warnings filters. self.useFixture(nova_fixtures.WarningsFixture()) self.useFixture(ovo_fixture.StableObjectJsonFixture()) # Reset the global QEMU version flag. images.QEMU_VERSION = None # Reset the compute RPC API globals (mostly the _ROUTER). compute_rpcapi.reset_globals() self.addCleanup(self._clear_attrs) self.useFixture(fixtures.EnvironmentVariable('http_proxy')) self.policy = self.useFixture(nova_fixtures.PolicyFixture()) self.useFixture(nova_fixtures.PoisonFunctions()) self.useFixture(nova_fixtures.ForbidNewLegacyNotificationFixture()) # NOTE(mikal): make sure we don't load a privsep helper accidentally self.useFixture(nova_fixtures.PrivsepNoHelperFixture()) self.useFixture(mock_fixture.MockAutospecFixture()) # FIXME(danms): Disable this for all tests by default to avoid breaking # any that depend on default/previous ordering self.flags(build_failure_weight_multiplier=0.0, group='filter_scheduler') # NOTE(melwitt): Reset the cached set of projects quota.UID_QFD_POPULATED_CACHE_BY_PROJECT = set() quota.UID_QFD_POPULATED_CACHE_ALL = False self.useFixture(nova_fixtures.GenericPoisonFixture()) self.useFixture(nova_fixtures.SysFsPoisonFixture()) # Additional module names can be added to this set if needed self.useFixture(nova_fixtures.ImportModulePoisonFixture( set(['guestfs', 'libvirt']))) # make sure that the wsgi app is fully initialized for all testcase # instead of only once initialized for test worker wsgi_app.init_global_data.reset() wsgi_app.init_application.reset() # Reset the placement client singleton report.PLACEMENTCLIENT = None # Reset our local node uuid cache (and avoid writing to the # local filesystem when we generate a new one). if self.STUB_COMPUTE_ID: self.useFixture(nova_fixtures.ComputeNodeIdFixture()) # Reset globals indicating affinity filter support. Some tests may set # self.flags(enabled_filters=...) which could make the affinity filter # support globals get set to a non-default configuration which affects # all other tests. scheduler_utils.reset_globals() # Wait for bare greenlets spawn()'ed from a GreenThreadPoolExecutor # to finish before moving on from the test. When greenlets from a # previous test remain running, they may attempt to access structures # (like the database) that have already been torn down and can cause # the currently running test to fail. self.useFixture(nova_fixtures.GreenThreadPoolShutdownWait()) # Reset the global key manager nova.crypto._KEYMGR = None # Reset the global identity client nova.limit.utils.IDENTITY_CLIENT = None def _setup_cells(self): """Setup a normal cellsv2 environment. This sets up the CellDatabase fixture with two cells, one cell0 and one normal cell. CellMappings are created for both so that cells-aware code can find those two databases. """ celldbs = nova_fixtures.CellDatabases() ctxt = context.get_context() fake_transport = 'fake://nowhere/' c0 = objects.CellMapping( context=ctxt, uuid=objects.CellMapping.CELL0_UUID, name='cell0', transport_url=fake_transport, database_connection=objects.CellMapping.CELL0_UUID) c0.create() self.cell_mappings[c0.name] = c0 celldbs.add_cell_database(objects.CellMapping.CELL0_UUID) for x in range(self.NUMBER_OF_CELLS): name = 'cell%i' % (x + 1) uuid = getattr(uuids, name) cell = objects.CellMapping( context=ctxt, uuid=uuid, name=name, transport_url=fake_transport, database_connection=uuid) cell.create() self.cell_mappings[name] = cell # cell1 is the default cell celldbs.add_cell_database(uuid, default=(x == 0)) self.useFixture(celldbs) def _restore_obj_registry(self): objects_base.NovaObjectRegistry._registry._obj_classes = \ self._base_test_obj_backup def _clear_attrs(self): # Delete attributes that don't start with _ so they don't pin # memory around unnecessarily for the duration of the test # suite for key in [k for k in self.__dict__.keys() if k[0] != '_']: # NOTE(gmann): Skip attribute 'id' because if tests are being # generated using testscenarios then, 'id' attribute is being # added during cloning the tests. And later that 'id' attribute # is being used by test suite to generate the results for each # newly generated tests by testscenarios. if key != 'id': del self.__dict__[key] def stub_out(self, old, new): """Replace a function for the duration of the test. Use the monkey patch fixture to replace a function for the duration of a test. Useful when you want to provide fake methods instead of mocks during testing. """ self.useFixture(fixtures.MonkeyPatch(old, new)) @staticmethod def patch_exists(patched_path, result, other=None): """Provide a static method version of patch_exists(), which if you haven't already imported nova.test can be slightly easier to use as a context manager within a test method via: def test_something(self): with self.patch_exists(path, True): ... """ return patch_exists(patched_path, result, other) @staticmethod def patch_open(patched_path, read_data): """Provide a static method version of patch_open() which is easier to use as a context manager within a test method via: def test_something(self): with self.patch_open(path, "fake contents of file"): ... """ return patch_open(patched_path, read_data) def flags(self, **kw): """Override flag variables for a test.""" group = kw.pop('group', None) for k, v in kw.items(): CONF.set_override(k, v, group) # loading and validating alias is cached so if it is reconfigured # we need to reset the cache if k == 'alias' and group == 'pci': request.get_alias_from_config.cache_clear() def reset_flags(self, *k, **kw): """Reset flag variables for a test.""" group = kw.pop('group') for flag in k: CONF.clear_override(flag, group) def enforce_fk_constraints(self, engine=None): if engine is None: engine = db_api.get_engine() dialect = engine.url.get_dialect() if dialect == sqlite.dialect: engine.connect().exec_driver_sql("PRAGMA foreign_keys = ON") def start_service(self, name, host=None, cell_name=None, **kwargs): # Disallow starting multiple scheduler services if name == 'scheduler' and self._service_fixture_count[name]: raise TestingException("Duplicate start_service(%s)!" % name) cell = None # if the host is None then the CONF.host remains defaulted to # 'fake-mini' (originally done in ConfFixture) if host is not None: # Make sure that CONF.host is relevant to the right hostname self.useFixture(nova_fixtures.ConfPatcher(host=host)) if name == 'compute' and self.USES_DB: # NOTE(danms): We need to create the HostMapping first, because # otherwise we'll fail to update the scheduler while running # the compute node startup routines below. ctxt = context.get_context() cell_name = cell_name or CELL1_NAME cell = self.cell_mappings[cell_name] if (host or name) not in self.host_mappings: # NOTE(gibi): If the HostMapping does not exists then this is # the first start of the service so we create the mapping. hm = objects.HostMapping(context=ctxt, host=host or name, cell_mapping=cell) hm.create() self.host_mappings[hm.host] = hm svc = self.useFixture( nova_fixtures.ServiceFixture(name, host, cell=cell, **kwargs)) # Keep track of how many instances of this service are running. self._service_fixture_count[name] += 1 real_stop = svc.service.stop # Make sure stopping the service decrements the active count, so that # start,stop,start doesn't trigger the "Duplicate start_service" # exception. def patch_stop(*a, **k): self._service_fixture_count[name] -= 1 return real_stop(*a, **k) self.useFixture(fixtures.MockPatchObject( svc.service, 'stop', patch_stop)) return svc.service def _start_compute(self, host, cell_name=None): """Start a nova compute service on the given host :param host: the name of the host that will be associated to the compute service. :param cell_name: optional name of the cell in which to start the compute service :return: the nova compute service object """ compute = self.start_service('compute', host=host, cell_name=cell_name) self.computes[host] = compute return compute def _run_periodics(self, raise_on_error=False): """Run the update_available_resource task on every compute manager This runs periodics on the computes in an undefined order; some child class redefine this function to force a specific order. """ ctx = context.get_admin_context() for host, compute in self.computes.items(): LOG.info('Running periodic for compute (%s)', host) # Make sure the context is targeted to the proper cell database # for multi-cell tests. with context.target_cell( ctx, self.host_mappings[host].cell_mapping) as cctxt: compute.manager.update_available_resource(cctxt) if raise_on_error: if 'Traceback (most recent call last' in self.stdlog.logger.output: # Get the last line of the traceback, for example: # TypeError: virNodeDeviceLookupByName() argument 2 must be # str or None, not Proxy last_tb_line = self.stdlog.logger.output.splitlines()[-1] raise TestingException(last_tb_line) LOG.info('Finished with periodics') def restart_compute_service(self, compute, keep_hypervisor_state=True): """Stops the service and starts a new one to have realistic restart :param:compute: the nova-compute service to be restarted :param:keep_hypervisor_state: If true then already defined instances will survive the compute service restart. If false then the new service will see an empty hypervisor :returns: a new compute service instance serving the same host and and node """ # NOTE(gibi): The service interface cannot be used to simulate a real # service restart as the manager object will not be recreated after a # service.stop() and service.start() therefore the manager state will # survive. For example the resource tracker will not be recreated after # a stop start. The service.kill() call cannot help as it deletes # the service from the DB which is unrealistic and causes that some # operation that refers to the killed host (e.g. evacuate) fails. # So this helper method will stop the original service and then starts # a brand new compute service for the same host and node. This way # a new ComputeManager instance will be created and initialized during # the service startup. compute.stop() # this service was running previously so we have to make sure that # we restart it in the same cell cell_name = self.host_mappings[compute.host].cell_mapping.name if keep_hypervisor_state: # NOTE(gibi): FakeDriver does not provide a meaningful way to # define some servers that exists already on the hypervisor when # the driver is (re)created during the service startup. This means # that we cannot simulate that the definition of a server # survives a nova-compute service restart on the hypervisor. # Instead here we save the FakeDriver instance that knows about # the defined servers and inject that driver into the new Manager # class during the startup of the compute service. old_driver = compute.manager.driver with mock.patch( 'nova.virt.driver.load_compute_driver') as load_driver: load_driver.return_value = old_driver new_compute = self.start_service( 'compute', host=compute.host, cell_name=cell_name) else: new_compute = self.start_service( 'compute', host=compute.host, cell_name=cell_name) return new_compute def assertJsonEqual(self, expected, observed, message=''): """Asserts that 2 complex data structures are json equivalent. We use data structures which serialize down to json throughout the code, and often times we just need to know that these are json equivalent. This means that list order is not important, and should be sorted. Because this is a recursive set of assertions, when failure happens we want to expose both the local failure and the global view of the 2 data structures being compared. So a MismatchError which includes the inner failure as the mismatch, and the passed in expected / observed as matchee / matcher. """ if isinstance(expected, str): expected = jsonutils.loads(expected) if isinstance(observed, str): observed = jsonutils.loads(observed) def sort_key(x): if isinstance(x, (set, list)) or isinstance(x, datetime.datetime): return str(x) if isinstance(x, dict): items = ((sort_key(key), sort_key(value)) for key, value in x.items()) return sorted(items) return x def inner(expected, observed, path='root'): if isinstance(expected, dict) and isinstance(observed, dict): self.assertEqual( len(expected), len(observed), ('path: %s. Different dict key sets\n' 'expected=%s\n' 'observed=%s\n' 'difference=%s') % (path, sorted(expected.keys()), sorted(observed.keys()), list(set(expected.keys()).symmetric_difference( set(observed.keys()))))) expected_keys = sorted(expected) observed_keys = sorted(observed) self.assertEqual( expected_keys, observed_keys, 'path: %s. Dict keys are not equal' % path) for key in expected: inner(expected[key], observed[key], path + '.%s' % key) elif (isinstance(expected, (list, tuple, set)) and isinstance(observed, (list, tuple, set))): self.assertEqual( len(expected), len(observed), ('path: %s. Different list items\n' 'expected=%s\n' 'observed=%s\n' 'difference=%s') % (path, sorted(expected, key=sort_key), sorted(observed, key=sort_key), [a for a in itertools.chain(expected, observed) if (a not in expected) or (a not in observed)])) expected_values_iter = iter(sorted(expected, key=sort_key)) observed_values_iter = iter(sorted(observed, key=sort_key)) for i in range(len(expected)): inner(next(expected_values_iter), next(observed_values_iter), path + '[%s]' % i) else: self.assertEqual(expected, observed, 'path: %s' % path) try: inner(expected, observed) except testtools.matchers.MismatchError as e: difference = e.mismatch.describe() if message: message = 'message: %s\n' % message msg = "\nexpected:\n%s\nactual:\n%s\ndifference:\n%s\n%s" % ( pprint.pformat(expected), pprint.pformat(observed), difference, message) error = AssertionError(msg) error.expected = expected error.observed = observed error.difference = difference raise error def assertXmlEqual(self, expected, observed, **options): self.assertThat(observed, matchers.XMLMatches(expected, **options)) def assertPublicAPISignatures(self, baseinst, inst): def get_public_apis(inst): methods = {} def findmethods(object): return inspect.ismethod(object) or inspect.isfunction(object) for (name, value) in inspect.getmembers(inst, findmethods): if name.startswith("_"): continue methods[name] = value return methods baseclass = baseinst.__class__.__name__ basemethods = get_public_apis(baseinst) implmethods = get_public_apis(inst) extranames = [] for name in sorted(implmethods.keys()): if name not in basemethods: extranames.append(name) self.assertEqual([], extranames, "public APIs not listed in base class %s" % baseclass) for name in sorted(implmethods.keys()): # NOTE(stephenfin): We ignore type annotations baseargs = inspect.getfullargspec(basemethods[name])[:-1] implargs = inspect.getfullargspec(implmethods[name])[:-1] self.assertEqual(baseargs, implargs, "%s args don't match base class %s" % (name, baseclass)) class APICoverage(object): cover_api = None def test_api_methods(self): self.assertIsNotNone(self.cover_api) api_methods = [x for x in dir(self.cover_api) if not x.startswith('_')] test_methods = [x[5:] for x in dir(self) if x.startswith('test_')] self.assertThat( test_methods, testtools.matchers.ContainsAll(api_methods)) class SubclassSignatureTestCase(testtools.TestCase, metaclass=abc.ABCMeta): """Ensure all overridden methods of all subclasses of the class under test exactly match the signature of the base class. A subclass of SubclassSignatureTestCase should define a method _get_base_class which: * Returns a base class whose subclasses will all be checked * Ensures that all subclasses to be tested have been imported SubclassSignatureTestCase defines a single test, test_signatures, which does a recursive, depth-first check of all subclasses, ensuring that their method signatures are identical to those of the base class. """ @abc.abstractmethod def _get_base_class(self): raise NotImplementedError() def setUp(self): self.useFixture(nova_fixtures.ConfFixture(CONF)) self.base = self._get_base_class() super(SubclassSignatureTestCase, self).setUp() @staticmethod def _get_argspecs(cls): """Return a dict of method_name->argspec for every method of cls.""" argspecs = {} # getmembers returns all members, including members inherited from # the base class. It's redundant for us to test these, but as # they'll always pass it's not worth the complexity to filter them out. for (name, method) in inspect.getmembers(cls, inspect.isfunction): # Subclass __init__ methods can usually be legitimately different if name == '__init__': continue # Skip subclass private functions if name.startswith('_'): continue while hasattr(method, '__wrapped__'): # This is a wrapped function. The signature we're going to # see here is that of the wrapper, which is almost certainly # going to involve varargs and kwargs, and therefore is # unlikely to be what we want. If the wrapper manipulates the # arguments taken by the wrapped function, the wrapped function # isn't what we want either. In that case we're just stumped: # if it ever comes up, add more knobs here to work round it (or # stop using a dynamic language). # # Here we assume the wrapper doesn't manipulate the arguments # to the wrapped function and inspect the wrapped function # instead. method = getattr(method, '__wrapped__') argspecs[name] = inspect.getfullargspec(method) return argspecs @staticmethod def _clsname(cls): """Return the fully qualified name of cls.""" return "%s.%s" % (cls.__module__, cls.__name__) def _test_signatures_recurse(self, base, base_argspecs): for sub in base.__subclasses__(): sub_argspecs = self._get_argspecs(sub) # Check that each subclass method matches the signature of the # base class for (method, sub_argspec) in sub_argspecs.items(): # Methods which don't override methods in the base class # are good. if method in base_argspecs: self.assertEqual(base_argspecs[method], sub_argspec, 'Signature of %(sub)s.%(method)s ' 'differs from superclass %(base)s' % {'base': self._clsname(base), 'sub': self._clsname(sub), 'method': method}) # Recursively check this subclass self._test_signatures_recurse(sub, sub_argspecs) def test_signatures(self): self._test_signatures_recurse(self.base, self._get_argspecs(self.base)) class TimeOverride(fixtures.Fixture): """Fixture to start and remove time override.""" def __init__(self, override_time=None): self.override_time = override_time def setUp(self): super(TimeOverride, self).setUp() timeutils.set_time_override(override_time=self.override_time) self.addCleanup(timeutils.clear_time_override) class NoDBTestCase(TestCase): """`NoDBTestCase` differs from TestCase in that DB access is not supported. This makes tests run significantly faster. If possible, all new tests should derive from this class. """ USES_DB = False class MatchType(object): """Matches any instance of a specified type The MatchType class is a helper for use with the mock.assert_called_with() method that lets you assert that a particular parameter has a specific data type. It enables stricter checking than the built in mock.ANY helper. Example usage could be: mock_some_method.assert_called_once_with( "hello", MatchType(objects.Instance), mock.ANY, "world", MatchType(objects.KeyPair)) """ def __init__(self, wanttype): self.wanttype = wanttype def __eq__(self, other): return type(other) is self.wanttype def __ne__(self, other): return type(other) is not self.wanttype def __repr__(self): return "" class MatchObjPrims(object): """Matches objects with equal primitives.""" def __init__(self, want_obj): self.want_obj = want_obj def __eq__(self, other): return objects_base.obj_equal_prims(other, self.want_obj) def __ne__(self, other): return not other == self.want_obj def __repr__(self): return '' class ContainKeyValue(object): """Checks whether a key/value pair is in a dict parameter. The ContainKeyValue class is a helper for use with the mock.assert_*() method that lets you assert that a particular dict contain a key/value pair. It enables stricter checking than the built in mock.ANY helper. Example usage could be: mock_some_method.assert_called_once_with( "hello", ContainKeyValue('foo', bar), mock.ANY, "world", ContainKeyValue('hello', world)) """ def __init__(self, wantkey, wantvalue): self.wantkey = wantkey self.wantvalue = wantvalue def __eq__(self, other): try: return other[self.wantkey] == self.wantvalue except (KeyError, TypeError): return False def __ne__(self, other): try: return other[self.wantkey] != self.wantvalue except (KeyError, TypeError): return True def __repr__(self): return "" @contextlib.contextmanager def patch_exists(patched_path, result, other=None): """Selectively patch os.path.exists() so that if it's called with patched_path, return result. Calls with any other path are passed through to the real os.path.exists() function if other is not provided. If other is provided then that will be the result of the call on paths other than patched_path. Either import and use as a decorator / context manager, or use the nova.TestCase.patch_exists() static method as a context manager. Currently it is *not* recommended to use this if any of the following apply: - You want to patch via decorator *and* make assertions about how the mock is called (since using it in the decorator form will not make the mock available to your code). - You want the result of the patched exists() call to be determined programmatically (e.g. by matching substrings of patched_path). - You expect exists() to be called multiple times on the same path and return different values each time. Additionally within unit tests which only test a very limited code path, it may be possible to ensure that the code path only invokes exists() once, in which case it's slightly overkill to do selective patching based on the path. In this case something like like this may be more appropriate: @mock.patch('os.path.exists', return_value=True) def test_my_code(self, mock_exists): ... mock_exists.assert_called_once_with(path) """ real_exists = os.path.exists def fake_exists(path): if path == patched_path: return result elif other is not None: return other else: return real_exists(path) with mock.patch.object(os.path, "exists") as mock_exists: mock_exists.side_effect = fake_exists yield mock_exists @contextlib.contextmanager def patch_open(patched_path, read_data): """Selectively patch open() so that if it's called with patched_path, return a mock which makes it look like the file contains read_data. Calls with any other path are passed through to the real open() function. Either import and use as a decorator, or use the nova.TestCase.patch_open() static method as a context manager. Currently it is *not* recommended to use this if any of the following apply: - The code under test will attempt to write to patched_path. - You want to patch via decorator *and* make assertions about how the mock is called (since using it in the decorator form will not make the mock available to your code). - You want the faked file contents to be determined programmatically (e.g. by matching substrings of patched_path). - You expect open() to be called multiple times on the same path and return different file contents each time. Additionally within unit tests which only test a very limited code path, it may be possible to ensure that the code path only invokes open() once, in which case it's slightly overkill to do selective patching based on the path. In this case something like like this may be more appropriate: @mock.patch('builtins.open') def test_my_code(self, mock_open): ... mock_open.assert_called_once_with(path) """ real_open = builtins.open m = mock.mock_open(read_data=read_data) def selective_fake_open(path, *args, **kwargs): if path == patched_path: return m(patched_path) return real_open(path, *args, **kwargs) with mock.patch('builtins.open') as mock_open: mock_open.side_effect = selective_fake_open yield m ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.417609 nova-32.0.0/nova/tests/0000775000175000017500000000000000000000000014667 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/tests/__init__.py0000664000175000017500000000000000000000000016766 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315689.421609 nova-32.0.0/nova/tests/fixtures/0000775000175000017500000000000000000000000016540 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/tests/fixtures/__init__.py0000664000175000017500000000313100000000000020647 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from .cast_as_call import CastAsCallFixture # noqa: F401, H304 from .cinder import CinderFixture # noqa: F401, H304 from .conf import ConfFixture # noqa: F401, H304, F403 from .cyborg import CyborgFixture # noqa: F401, H304 from .filesystem import SysFileSystemFixture # noqa: F401, H304 from .filesystem import TempFileSystemFixture # noqa: F401, H304 from .glance import GlanceFixture # noqa: F401, H304 from .libvirt import LibvirtFixture # noqa: F401, H304 from .libvirt_imagebackend import \ LibvirtImageBackendFixture # noqa: F401, H304 from .manila import ManilaFixture # noqa: F401, H304 from .neutron import NeutronFixture # noqa: F401, H304 from .notifications import NotificationFixture # noqa: F401, H304 from .nova import * # noqa: F401, F403, H303, H304 from .os_brick import OSBrickFixture # noqa: F401, H304 from .policy import OverridePolicyFixture # noqa: F401, H304 from .policy import PolicyFixture # noqa: F401, H304 from .policy import RealPolicyFixture # noqa: F401, H304 from .policy import RoleBasedPolicyFixture # noqa: F401, H304 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/tests/fixtures/cast_as_call.py0000664000175000017500000000476400000000000021535 0ustar00zuulzuul00000000000000# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys import fixtures import oslo_messaging as messaging class CastAsCallFixture(fixtures.Fixture): """Make RPC 'cast' behave like a 'call'. This is a little hack for tests that need to know when a cast operation has completed. The idea is that we wait for the RPC endpoint method to complete and return before continuing on the caller. See Ia7f40718533e450f00cd3e7d753ac65755c70588 for more background. """ def __init__(self, testcase): super().__init__() self.testcase = testcase @staticmethod def _stub_out(testcase, obj=None): if obj: orig_prepare = obj.prepare else: orig_prepare = messaging.RPCClient.prepare def prepare(self, *args, **kwargs): # Casts with fanout=True would throw errors if its monkeypatched to # the call method, so we must override fanout to False if 'fanout' in kwargs: kwargs['fanout'] = False cctxt = orig_prepare(self, *args, **kwargs) CastAsCallFixture._stub_out(testcase, cctxt) # woo, recurse! return cctxt if obj: cls = getattr(sys.modules[obj.__class__.__module__], obj.__class__.__name__) testcase.stub_out('%s.%s.prepare' % (obj.__class__.__module__, obj.__class__.__name__), prepare) testcase.stub_out('%s.%s.cast' % (obj.__class__.__module__, obj.__class__.__name__), cls.call) else: testcase.stub_out('oslo_messaging.RPCClient.prepare', prepare) testcase.stub_out('oslo_messaging.RPCClient.cast', messaging.RPCClient.call) def setUp(self): super().setUp() self._stub_out(self.testcase) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/tests/fixtures/cinder.py0000664000175000017500000005177100000000000020371 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Cinder fixture.""" import collections import copy import fixtures from oslo_log import log as logging from oslo_utils.fixture import uuidsentinel as uuids from oslo_utils import uuidutils from nova import exception from nova.tests.fixtures import nova as nova_fixtures LOG = logging.getLogger(__name__) class CinderFixture(fixtures.Fixture): """A fixture to volume operations with the new Cinder attach/detach API""" # the default project_id in OSAPIFixtures tenant_id = nova_fixtures.PROJECT_ID SWAP_OLD_VOL = 'a07f71dc-8151-4e7d-a0cc-cd24a3f11113' SWAP_NEW_VOL = '227cc671-f30b-4488-96fd-7d0bf13648d8' SWAP_ERR_OLD_VOL = '828419fa-3efb-4533-b458-4267ca5fe9b1' SWAP_ERR_NEW_VOL = '9c6d9c2d-7a8f-4c80-938d-3bf062b8d489' SWAP_ERR_ATTACH_ID = '4a3cd440-b9c2-11e1-afa6-0800200c9a66' MULTIATTACH_VOL = '4757d51f-54eb-4442-8684-3399a6431f67' MULTIATTACH_RO_SWAP_OLD_VOL = uuids.multiattach_ro_swap_old_vol MULTIATTACH_RO_SWAP_NEW_VOL = uuids.multiattach_ro_swap_new_vol MULTIATTACH_RO_MIGRATE_OLD_VOL = uuids.multiattach_ro_migrate_old_vol MULTIATTACH_RO_MIGRATE_NEW_VOL = uuids.multiattach_ro_migrate_new_vol # This represents a bootable image-backed volume to test # boot-from-volume scenarios. IMAGE_BACKED_VOL = '6ca404f3-d844-4169-bb96-bc792f37de98' # This represents a bootable image-backed volume to test # boot-from-volume scenarios with # os_require_quiesce # hw_qemu_guest_agent IMAGE_BACKED_VOL_QUIESCE = '6ca404f3-d844-4169-bb96-bc792f37de26' # This represents a bootable image-backed volume with required traits # as part of volume image metadata IMAGE_WITH_TRAITS_BACKED_VOL = '6194fc02-c60e-4a01-a8e5-600798208b5f' # This represents a bootable volume backed by iSCSI storage. ISCSI_BACKED_VOL = uuids.iscsi_backed_volume # Dict of connection_info for the above volumes keyed by the volume id VOLUME_CONNECTION_INFO = { uuids.iscsi_backed_volume: { 'driver_volume_type': 'iscsi', 'data': { 'target_lun': '1' } }, 'fake': { 'driver_volume_type': 'fake', 'data': { 'foo': 'bar', } } } def __init__(self, test, az='nova'): """Initialize this instance of the CinderFixture. :param test: The TestCase using this fixture. :param az: The availability zone to return in volume GET responses. Defaults to "nova" since that is the default we would see from Cinder's storage_availability_zone config option. """ super().__init__() self.test = test self.swap_volume_instance_uuid = None self.swap_volume_instance_error_uuid = None self.attachment_error_id = None self.multiattach_ro_migrated = False self.az = az # A dict, keyed by volume id, to a dict, keyed by attachment id, # with keys: # - id: the attachment id # - instance_uuid: uuid of the instance attached to the volume # - connector: host connector dict; None if not connected # Note that a volume can have multiple attachments even without # multi-attach, as some flows create a blank 'reservation' attachment # before deleting another attachment. However, a non-multiattach volume # can only have at most one attachment with a host connector at a time. self.volumes = collections.defaultdict(dict) self.volume_to_attachment = collections.defaultdict(dict) self.volume_snapshots = collections.defaultdict(dict) def setUp(self): super().setUp() self._create_fakes() def _create_fakes(self): self.useFixture(fixtures.MockPatch( 'nova.volume.cinder.API.attachment_create', side_effect=self.fake_attachment_create, autospec=False)) self.useFixture(fixtures.MockPatch( 'nova.volume.cinder.API.attachment_update', side_effect=self.fake_attachment_update, autospec=False)) self.useFixture(fixtures.MockPatch( 'nova.volume.cinder.API.attachment_delete', side_effect=self.fake_attachment_delete, autospec=False)) self.useFixture(fixtures.MockPatch( 'nova.volume.cinder.API.attachment_complete', side_effect=self.fake_attachment_complete, autospec=False)) self.useFixture(fixtures.MockPatch( 'nova.volume.cinder.API.attachment_get', side_effect=self.fake_attachment_get, autospec=False)) self.useFixture(fixtures.MockPatch( 'nova.volume.cinder.API.begin_detaching', lambda *args, **kwargs: None)) self.useFixture(fixtures.MockPatch( 'nova.volume.cinder.API.get', side_effect=self.fake_get, autospec=False)) self.useFixture(fixtures.MockPatch( 'nova.volume.cinder.API.migrate_volume_completion', side_effect=self.fake_migrate_volume_completion, autospec=False)) self.useFixture(fixtures.MockPatch( 'nova.volume.cinder.API.roll_detaching', side_effect=(lambda *args, **kwargs: None), autospec=False)) self.useFixture(fixtures.MockPatch( 'nova.volume.cinder.is_microversion_supported', side_effect=(lambda ctxt, microversion: None), autospec=False)) self.useFixture(fixtures.MockPatch( 'nova.volume.cinder.API.check_attached', side_effect=(lambda *args, **kwargs: None), autospec=False)) self.useFixture(fixtures.MockPatch( 'nova.volume.cinder.API.get_all_volume_types', side_effect=self.fake_get_all_volume_types, autospec=False)) # TODO(lyarwood): These legacy cinderv2 APIs aren't currently wired # into the fixture but should be in the future before we migrate any # remaining legacy exports to cinderv3 attachments. self.useFixture(fixtures.MockPatch( 'nova.volume.cinder.API.initialize_connection', side_effect=(lambda *args, **kwargs: None), autospec=False)) self.useFixture(fixtures.MockPatch( 'nova.volume.cinder.API.terminate_connection', side_effect=lambda *args, **kwargs: None, autospec=False)) self.useFixture(fixtures.MockPatch( 'nova.volume.cinder.API.reimage_volume', side_effect=self.fake_reimage_volume, autospec=False)) self.useFixture(fixtures.MockPatch( 'nova.volume.cinder.API.get_absolute_limits', side_effect=self.fake_get_absolute_limits, autospec=False)) self.useFixture(fixtures.MockPatch( 'nova.volume.cinder.API.attachment_get_all', side_effect=self.fake_attachment_get_all, autospec=False)) self.useFixture(fixtures.MockPatch( 'nova.volume.cinder.API.create_snapshot_force', side_effect=self.fake_create_snapshot_force, autospec=False)) self.useFixture(fixtures.MockPatch( 'nova.volume.cinder.API.get_snapshot', side_effect=self.fake_get_snapshot, autospec=False)) self.useFixture(fixtures.MockPatch( 'nova.volume.cinder.API.create', side_effect=self.fake_vol_create, autospec=False)) def _is_multiattach(self, volume_id): return volume_id in [ self.MULTIATTACH_VOL, self.MULTIATTACH_RO_SWAP_OLD_VOL, self.MULTIATTACH_RO_SWAP_NEW_VOL, self.MULTIATTACH_RO_MIGRATE_OLD_VOL, self.MULTIATTACH_RO_MIGRATE_NEW_VOL] def _find_attachment(self, attachment_id): """Find attachment corresponding to ``attachment_id``. :returns: A tuple of the volume ID, an attachment dict for the given attachment ID, and a dict (keyed by attachment id) of attachment dicts for the volume. """ for volume_id, attachments in self.volume_to_attachment.items(): for attachment in attachments.values(): if attachment_id == attachment['id']: return volume_id, attachment, attachments raise exception.VolumeAttachmentNotFound( attachment_id=attachment_id) def _find_connection_info(self, volume_id, attachment_id): """Find the connection_info associated with an attachment :returns: A connection_info dict based on a deepcopy associated with the volume_id but containing both the attachment_id and volume_id making it unique for the attachment. """ connection_info = copy.deepcopy( self.VOLUME_CONNECTION_INFO.get( volume_id, self.VOLUME_CONNECTION_INFO.get('fake') ) ) connection_info['data']['volume_id'] = volume_id connection_info['data']['attachment_id'] = attachment_id return connection_info def fake_migrate_volume_completion( self, context, old_volume_id, new_volume_id, error, ): if new_volume_id == self.MULTIATTACH_RO_MIGRATE_NEW_VOL: # Mimic the behaviour of Cinder here that renames the new # volume to the old UUID once the migration is complete. # This boolean is used above to signal that the old volume # has been deleted if callers try to GET it. self.multiattach_ro_migrated = True return {'save_volume_id': old_volume_id} return {'save_volume_id': new_volume_id} def fake_get(self, context, volume_id, microversion=None): if volume_id in self.volumes: volume = self.volumes[volume_id] else: volume = { 'display_name': volume_id, 'id': volume_id, 'size': 1, 'multiattach': self._is_multiattach(volume_id), 'availability_zone': self.az } # Add any attachment details the fixture has fixture_attachments = self.volume_to_attachment[volume_id] if fixture_attachments: attachments = {} for attachment in list(fixture_attachments.values()): instance_uuid = attachment['instance_uuid'] # legacy cruft left over from notification tests if ( volume_id == self.SWAP_OLD_VOL and self.swap_volume_instance_uuid ): instance_uuid = self.swap_volume_instance_uuid if ( volume_id == self.SWAP_ERR_OLD_VOL and self.swap_volume_instance_error_uuid ): instance_uuid = self.swap_volume_instance_error_uuid attachments[instance_uuid] = { 'attachment_id': attachment['id'], 'mountpoint': '/dev/vdb', } migration_status = ( None if volume_id not in ( self.SWAP_OLD_VOL, self.SWAP_ERR_OLD_VOL) else "migrating") volume.update({ 'status': 'in-use', 'attach_status': 'attached', 'attachments': attachments, 'migration_status': migration_status }) # Otherwise mark the volume as available and detached else: volume.update({ 'status': 'available', 'attach_status': 'detached', }) if volume_id == self.IMAGE_BACKED_VOL: volume['bootable'] = True volume['volume_image_metadata'] = { 'image_id': '155d900f-4e14-4e4c-a73d-069cbf4541e6' } if volume_id == self.IMAGE_BACKED_VOL_QUIESCE: volume['bootable'] = True volume['volume_image_metadata'] = { "os_require_quiesce": "True", "hw_qemu_guest_agent": "True" } if volume_id == self.IMAGE_WITH_TRAITS_BACKED_VOL: volume['bootable'] = True volume['volume_image_metadata'] = { 'image_id': '155d900f-4e14-4e4c-a73d-069cbf4541e6', "trait:HW_CPU_X86_SGX": "required", } # If we haven't called migrate_volume_completion then return # a migration_status of migrating if ( volume_id == self.MULTIATTACH_RO_MIGRATE_OLD_VOL and not self.multiattach_ro_migrated ): volume['migration_status'] = 'migrating' # If we have migrated and are still GET'ing the new volume # return raise VolumeNotFound if ( volume_id == self.MULTIATTACH_RO_MIGRATE_NEW_VOL and self.multiattach_ro_migrated ): raise exception.VolumeNotFound( volume_id=self.MULTIATTACH_RO_MIGRATE_NEW_VOL) return volume def fake_get_all_volume_types(self, *args, **kwargs): return [{ # This is used in the 2.67 API sample test. 'id': '5f9204ec-3e94-4f27-9beb-fe7bb73b6eb9', 'name': 'lvm-1' }] def fake_attachment_get(self, context, attachment_id): # Ensure the attachment exists and grab the volume_id volume_id, _, _ = self._find_attachment(attachment_id) attachment_ref = { 'id': attachment_id, 'connection_info': self._find_connection_info( volume_id, attachment_id) } return attachment_ref def fake_attachment_create( self, context, volume_id, instance_uuid, connector=None, mountpoint=None, ): attachment_id = uuidutils.generate_uuid() if self.attachment_error_id is not None: attachment_id = self.attachment_error_id attachment = {'id': attachment_id} if connector: attachment['connection_info'] = self._find_connection_info( volume_id, attachment_id) self.volume_to_attachment[volume_id][attachment_id] = { 'id': attachment_id, 'instance_uuid': instance_uuid, 'connector': connector, } if volume_id in [self.MULTIATTACH_RO_SWAP_OLD_VOL, self.MULTIATTACH_RO_SWAP_NEW_VOL, self.MULTIATTACH_RO_MIGRATE_OLD_VOL, self.MULTIATTACH_RO_MIGRATE_NEW_VOL]: attachment['attach_mode'] = 'ro' LOG.info( 'Created attachment %s for volume %s. Total attachments ' 'for volume: %d', attachment_id, volume_id, len(self.volume_to_attachment[volume_id])) return attachment def fake_attachment_update( self, context, attachment_id, connector, mountpoint=None, ): # Ensure the attachment exists volume_id, attachment, attachments = self._find_attachment( attachment_id) # Cinder will only allow one "connected" attachment per # non-multiattach volume at a time. if volume_id != self.MULTIATTACH_VOL: for _attachment in attachments.values(): if _attachment['connector'] is not None: raise exception.InvalidInput( 'Volume %s is already connected with attachment ' '%s on host %s' % ( volume_id, _attachment['id'], _attachment['connector'].get('host'))) # If the mountpoint was provided stash it in the connector as we do # within nova.volume.cinder.API.attachment_update before calling # c-api and then stash the connector in the attachment record. if mountpoint: connector['device'] = mountpoint attachment['connector'] = connector LOG.info('Updating volume attachment: %s', attachment_id) attachment_ref = { 'id': attachment_id, 'connection_info': self._find_connection_info( volume_id, attachment_id) } if attachment_id == self.SWAP_ERR_ATTACH_ID: # This intentionally triggers a TypeError for the # instance.volume_swap.error versioned notification tests. attachment_ref = {'connection_info': ()} return attachment_ref def fake_attachment_complete(self, _context, attachment_id): # Ensure the attachment exists self._find_attachment(attachment_id) LOG.info('Completing volume attachment: %s', attachment_id) def fake_attachment_delete(self, context, attachment_id): # 'attachment' is a tuple defining a attachment-instance mapping volume_id, attachment, attachments = ( self._find_attachment(attachment_id)) del attachments[attachment_id] LOG.info( 'Deleted attachment %s for volume %s. Total attachments ' 'for volume: %d', attachment_id, volume_id, len(attachments)) def fake_reimage_volume(self, *args, **kwargs): if self.IMAGE_BACKED_VOL not in args: raise exception.VolumeNotFound() if 'reimage_reserved' not in kwargs: raise exception.InvalidInput('reimage_reserved not specified') def fake_get_absolute_limits(self, context): limits = {'totalSnapshotsUsed': 0, 'maxTotalSnapshots': -1} return limits def fake_attachment_get_all( self, context, instance_id=None, volume_id=None): if not instance_id and not volume_id: raise exception.InvalidRequest( "Either instance or volume id must be passed.") if volume_id in self.volume_to_attachment: return self.volume_to_attachment[volume_id] all_attachments = [] for _, attachments in self.volume_to_attachment.items(): all_attachments.extend( [attach for attach in attachments.values() if instance_id == attach['instance_uuid']]) return all_attachments def volume_ids_for_instance(self, instance_uuid): for volume_id, attachments in self.volume_to_attachment.items(): for attachment in attachments.values(): if attachment['instance_uuid'] == instance_uuid: # we might have multiple volumes attached to this instance # so yield rather than return yield volume_id break def attachment_ids_for_instance(self, instance_uuid): attachment_ids = [] for volume_id, attachments in self.volume_to_attachment.items(): for attachment in attachments.values(): if attachment['instance_uuid'] == instance_uuid: attachment_ids.append(attachment['id']) return attachment_ids def create_vol_attachment(self, volume_id, instance_id): attachment_id = uuidutils.generate_uuid() if self.attachment_error_id is not None: attachment_id = self.attachment_error_id attachment = {'id': attachment_id} self.volume_to_attachment[volume_id][attachment_id] = { 'id': attachment_id, 'instance_uuid': instance_id, } return attachment def get_vol_attachment(self, _id): for _, attachments in self.volume_to_attachment.items(): for attachment_id in attachments: if _id == attachment_id: # return because attachment id is unique return attachments[attachment_id] def delete_vol_attachment(self, vol_id): del self.volume_to_attachment[vol_id] def fake_create_snapshot_force(self, _ctxt, volume_id, name, description): _id = uuidutils.generate_uuid() snapshot = { 'id': _id, 'volume_id': volume_id, 'display_name': name, 'display_description': description, 'status': 'creating', } self.volume_snapshots[_id] = snapshot return snapshot def fake_get_snapshot(self, _ctxt, snap_id): if snap_id in self.volume_snapshots: # because instance is getting unquiesce_instance self.volume_snapshots[snap_id]['status'] = 'available' return self.volume_snapshots[snap_id] def fake_vol_create(self, _ctxt, size, name, description, snapshot=None, image_id=None, volume_type=None, metadata=None, availability_zone=None): _id = uuidutils.generate_uuid() volume = { 'id': _id, 'status': 'available', 'display_name': name or 'fake-cinder-vol', 'attach_status': 'detached', 'size': size, 'display_description': description or 'fake-description', } self.volumes[_id] = volume return volume ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/tests/fixtures/conf.py0000664000175000017500000000530100000000000020036 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import fixture as config_fixture from oslo_policy import opts as policy_opts from nova.conf import devices from nova.conf import neutron from nova.conf import paths from nova import config class ConfFixture(config_fixture.Config): """Fixture to manage global conf settings.""" def setUp(self): super(ConfFixture, self).setUp() # default group self.conf.set_default('compute_driver', 'fake.SmallFakeDriver') self.conf.set_default('host', 'fake-mini') self.conf.set_default('periodic_enable', False) # api_database group self.conf.set_default('connection', "sqlite://", group='api_database') self.conf.set_default('sqlite_synchronous', False, group='api_database') # database group self.conf.set_default('connection', "sqlite://", group='database') self.conf.set_default('sqlite_synchronous', False, group='database') # key_manager group self.conf.set_default('backend', 'nova.keymgr.conf_key_mgr.ConfKeyManager', group='key_manager') # wsgi group self.conf.set_default('api_paste_config', paths.state_path_def('etc/nova/api-paste.ini'), group='wsgi') # api group self.conf.set_default('response_validation', 'error', group='api') # many tests synchronizes on the reception of versioned notifications self.conf.set_default( 'notification_format', "both", group="notifications") # oslo.limit requires endpoint_id since 2.3.0 self.conf.set_default('endpoint_id', 'ENDPOINT_ID', group='oslo_limit') config.parse_args([], default_config_files=[], configure_db=False, init_rpc=False) policy_opts.set_defaults(self.conf) neutron.register_dynamic_opts(self.conf) devices.register_dynamic_opts(self.conf) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/tests/fixtures/cyborg.py0000664000175000017500000002746500000000000020415 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import fixtures from oslo_config import cfg from oslo_log import log as logging from nova import exception CONF = cfg.CONF LOG = logging.getLogger(__name__) def _get_device_profile(dp_name, trait): dp = { 'fakedev-dp': [ { 'name': 'fakedev-dp', 'uuid': 'cbec22f3-ac29-444e-b4bb-98509f32faae', 'groups': [ { 'resources:FPGA': 1, 'trait:' + trait: 'required', }, ], # Skipping links key in Cyborg API return value }, ], 'fakedev-dp-port': [ { 'name': 'fakedev-dp', 'uuid': 'cbec22f3-ac29-444e-b4bb-98509f32faae', 'groups': [ { 'resources:FPGA': 1, 'trait:' + trait: 'required', }, ], # Skipping links key in Cyborg API return value }, ], 'fakedev-dp-multi': [ { 'name': 'fakedev-dp-multi', 'uuid': 'cbec22f3-ac29-444e-b4bb-98509f32faae', 'groups': [ { 'resources:FPGA': 2, 'resources:FPGA2': 1, 'trait:' + trait: 'required', }, ], # Skipping links key in Cyborg API return value }, ], } return dp[dp_name] def get_arqs(dp_name): # prepare fixture arqs and bound info arqs = [ { 'uuid': 'b59d34d3-787b-4fb0-a6b9-019cd81172f8', 'device_profile_name': dp_name, 'device_profile_group_id': 0, 'state': 'Initial', 'device_rp_uuid': None, 'hostname': None, 'instance_uuid': None, 'attach_handle_info': {}, 'attach_handle_type': '', }, {'uuid': '73d5f9f3-23e9-4b45-909a-e8a1db4cf24c', 'device_profile_name': dp_name, 'device_profile_group_id': 0, 'state': 'Initial', 'device_rp_uuid': None, 'hostname': None, 'instance_uuid': None, 'attach_handle_info': {}, 'attach_handle_type': '', }, {'uuid': '69b83caf-dd1c-493d-8796-40af5a16e3f6', 'device_profile_name': dp_name, 'device_profile_group_id': 0, 'state': 'Initial', 'device_rp_uuid': None, 'hostname': None, 'instance_uuid': None, 'attach_handle_info': {}, 'attach_handle_type': '', }, {'uuid': 'e5fc1da7-216b-4102-a50d-43ba77bcacf7', 'device_profile_name': dp_name, 'device_profile_group_id': 0, 'state': 'Initial', 'device_rp_uuid': None, 'hostname': None, 'instance_uuid': None, 'attach_handle_info': {}, 'attach_handle_type': '', } ] # arqs bound info attach_handle_list = [ { 'bus': '0c', 'device': '0', 'domain': '0000', 'function': '1', 'physical_network': 'PHYNET1' }, { 'bus': '0c', 'device': '0', 'domain': '0000', 'function': '2', 'physical_network': 'PHYNET1' }, { 'bus': '0c', 'device': '0', 'domain': '0000', 'function': '3', 'physical_network': 'PHYNET1' }, { 'bus': '0c', 'device': '0', 'domain': '0000', 'function': '4', 'physical_network': 'PHYNET1' } ] bound_arqs = [] # combine bond info to arq generating a bonded arqs list for idx, arq in enumerate(arqs): bound_arq = copy.deepcopy(arq) bound_arq.update( {'state': 'Bound', 'attach_handle_type': 'TEST_PCI', 'attach_handle_info': attach_handle_list[idx]}, ) bound_arqs.append(bound_arq) return arqs, bound_arqs class CyborgFixture(fixtures.Fixture): """Fixture that mocks Cyborg APIs used by nova/accelerator/cyborg.py""" dp_name = 'fakedev-dp' trait = 'CUSTOM_FAKE_DEVICE' arq_list, bound_arq_list = copy.deepcopy(get_arqs(dp_name)) arq_uuids = [] for arq in arq_list: arq_uuids.append(arq["uuid"]) call_create_arq_count = 0 # NOTE(Sundar): The bindings passed to the fake_bind_arqs() from the # conductor are indexed by ARQ UUID and include the host name, device # RP UUID and instance UUID. (See params to fake_bind_arqs below.) # # Later, when the compute manager calls fake_get_arqs_for_instance() with # the instance UUID, the returned ARQs must contain the host name and # device RP UUID. But these can vary from test to test. # # So, fake_bind_arqs() below takes bindings indexed by ARQ UUID and # converts them to bindings indexed by instance UUID, which are then # stored in the dict below. This dict looks like: # { $instance_uuid: [ # {'hostname': $hostname, # 'device_rp_uuid': $device_rp_uuid, # 'arq_uuid': $arq_uuid # } # ] # } # Since it is indexed by instance UUID, and that is presumably unique # across concurrently executing tests, this should be safe for # concurrent access. def setUp(self): super().setUp() self.mock_get_dp = self.useFixture(fixtures.MockPatch( 'nova.accelerator.cyborg._CyborgClient._get_device_profile_list', side_effect=self.fake_get_device_profile_list)).mock self.mock_create_arqs = self.useFixture(fixtures.MockPatch( 'nova.accelerator.cyborg._CyborgClient.create_arqs', side_effect=self.fake_create_arqs)).mock self.mock_bind_arqs = self.useFixture(fixtures.MockPatch( 'nova.accelerator.cyborg._CyborgClient.bind_arqs', side_effect=self.fake_bind_arqs)).mock self.mock_get_arqs = self.useFixture(fixtures.MockPatch( 'nova.accelerator.cyborg._CyborgClient.' 'get_arqs_for_instance', side_effect=self.fake_get_arqs_for_instance)).mock self.mock_del_arqs = self.useFixture(fixtures.MockPatch( 'nova.accelerator.cyborg._CyborgClient.' 'delete_arqs_for_instance', side_effect=self.fake_delete_arqs_for_instance)).mock self.mock_get_arq_by_uuid = self.useFixture(fixtures.MockPatch( 'nova.accelerator.cyborg._CyborgClient.' 'get_arq_by_uuid', side_effect=self.fake_get_arq_by_uuid)).mock self.mock_get_arq_device_rp_uuid = self.useFixture(fixtures.MockPatch( 'nova.accelerator.cyborg._CyborgClient.' 'get_arq_device_rp_uuid', side_effect=self.fake_get_arq_device_rp_uuid)).mock self.mock_create_arq_and_match_rp = self.useFixture(fixtures.MockPatch( 'nova.accelerator.cyborg._CyborgClient.' 'create_arqs_and_match_resource_providers', side_effect=self.fake_create_arq_and_match_rp)).mock self.mock_fake_delete_arqs_by_uuid = self.useFixture( fixtures.MockPatch( 'nova.accelerator.cyborg._CyborgClient.' 'delete_arqs_by_uuid', side_effect=self.fake_delete_arqs_by_uuid)).mock def fake_get_device_profile_list(self, dp_name): return _get_device_profile(dp_name, self.trait) @staticmethod def fake_bind_arqs(bindings): """Simulate Cyborg ARQ bindings. Since Nova calls Cyborg for binding on per-instance basis, the instance UUIDs would be the same for all ARQs in a single call. This function converts bindings indexed by ARQ UUID to bindings indexed by instance UUID, so that fake_get_arqs_for_instance can retrieve them later. :param bindings: { "$arq_uuid": { "hostname": STRING "device_rp_uuid": UUID "instance_uuid": UUID }, ... } :returns: None """ if bindings.keys() and CyborgFixture.arq_uuids is None: LOG.error("ARQ not found") raise exception.AcceleratorRequestOpFailed() for arq_uuid, binding in bindings.items(): for bound_arq in CyborgFixture.bound_arq_list: if arq_uuid == bound_arq["uuid"]: bound_arq["hostname"] = binding["hostname"] bound_arq["instance_uuid"] = binding["instance_uuid"] bound_arq["device_rp_uuid"] = binding["device_rp_uuid"] break @staticmethod def fake_get_arqs_for_instance(instance_uuid, only_resolved=False): """Get list of bound ARQs for this instance. This function uses bindings indexed by instance UUID to populate the bound ARQ templates in CyborgFixture.bound_arq_list. """ bound_arq_list = copy.deepcopy(CyborgFixture.bound_arq_list) instance_bound_arqs = [] for arq in bound_arq_list: if arq["instance_uuid"] == instance_uuid: instance_bound_arqs.append(arq) return instance_bound_arqs def fake_get_arq_by_uuid(self, uuid): for arq in self.arq_list: if uuid == arq['uuid']: return arq return None def fake_delete_arqs_for_instance(self, instance_uuid): # clean up arq binding info while delete arqs for arq in self.bound_arq_list: if arq["instance_uuid"] == instance_uuid: arq["instance_uuid"] = None arq["hostname"] = None arq["device_rp_uuid"] = None def fake_create_arq_and_match_rp(self, dp_name, rg_rp_map=None, owner=None): # sync the device_rp_uuid to fake arq arqs = self.fake_create_arqs(dp_name) for arq in arqs: dp_group_id = arq['device_profile_group_id'] requester_id = ("device_profile_" + str(dp_group_id) + (str(owner) if owner else "")) arq["device_rp_uuid"] = rg_rp_map[requester_id][0] return arqs def fake_create_arqs(self, dp_name): index = self.call_create_arq_count self.call_create_arq_count += 1 if index < len(self.arq_list): return [self.arq_list[index]] else: return None def fake_get_arq_device_rp_uuid(self, arq_arg, rg_rp_map=None, port_id=None): # sync the device_rp_uuid to fake arq for arq in self.arq_list: if arq["uuid"] == arq_arg['uuid']: dp_group_id = arq['device_profile_group_id'] requester_id = ("device_profile_" + str(dp_group_id) + str(port_id)) arq["device_rp_uuid"] = rg_rp_map[requester_id][0] return arq["device_rp_uuid"] return None def fake_delete_arqs_by_uuid(self, arq_uuids): # clean up arq binding info while delete arqs for arq_uuid in arq_uuids: for arq in self.bound_arq_list: if arq["uuid"] == arq_uuid: arq["instance_uuid"] = None arq["hostname"] = None arq["device_rp_uuid"] = None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/tests/fixtures/filesystem.py0000664000175000017500000000671600000000000021310 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import shutil import tempfile from unittest import mock import fixtures from nova import filesystem from nova.virt.libvirt.cpu import core SYS = 'sys' class TempFileSystemFixture(fixtures.Fixture): """Creates a fake / filesystem""" def _setUp(self): self.temp_dir = tempfile.TemporaryDirectory(prefix='fake_fs') # NOTE(sbauza): I/O disk errors may raise an exception here, as we # don't ignore them. If that's causing a problem in our CI jobs, the # recommended solution is to use shutil.rmtree instead of cleanup() # with ignore_errors parameter set to True (or wait for the minimum # python version to be 3.10 as TemporaryDirectory will provide # ignore_cleanup_errors parameter) self.addCleanup(self.temp_dir.cleanup) class SysFileSystemFixture(TempFileSystemFixture): def __init__(self, cpus_supported=None, cpufreq_enabled=True): """Instantiates a fake sysfs. :param cpus_supported: number of devices/system/cpu (default: 10) :param cpufreq_enabled: cpufreq subdir created (default: True) """ self.cpus_supported = cpus_supported or 10 self.cpufreq_enabled = cpufreq_enabled def _setUp(self): super()._setUp() self.sys_path = os.path.join(self.temp_dir.name, SYS) self.addCleanup(shutil.rmtree, self.sys_path, ignore_errors=True) sys_patcher = mock.patch( 'nova.filesystem.SYS', new_callable=mock.PropertyMock(return_value=self.sys_path)) self.sys_mock = sys_patcher.start() self.addCleanup(sys_patcher.stop) avail_path_patcher = mock.patch( 'nova.virt.libvirt.cpu.core.AVAILABLE_PATH', new_callable=mock.PropertyMock( return_value=os.path.join(self.sys_path, 'devices/system/cpu/present'))) self.avail_path_mock = avail_path_patcher.start() self.addCleanup(avail_path_patcher.stop) cpu_path_patcher = mock.patch( 'nova.virt.libvirt.cpu.core.CPU_PATH_TEMPLATE', new_callable=mock.PropertyMock( return_value=os.path.join(self.sys_path, 'devices/system/cpu/cpu%(core)s'))) self.cpu_path_mock = cpu_path_patcher.start() self.addCleanup(cpu_path_patcher.stop) for cpu_nr in range(self.cpus_supported): cpu_dir = os.path.join(self.cpu_path_mock % {'core': cpu_nr}) os.makedirs(cpu_dir) filesystem.write_sys(os.path.join(cpu_dir, 'online'), data='1') if self.cpufreq_enabled: os.makedirs(os.path.join(cpu_dir, 'cpufreq')) filesystem.write_sys( os.path.join(cpu_dir, 'cpufreq/scaling_governor'), data='powersave') filesystem.write_sys(core.AVAILABLE_PATH, f'0-{self.cpus_supported - 1}') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/tests/fixtures/glance.py0000664000175000017500000002773500000000000020361 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime import fixtures from oslo_log import log as logging from oslo_utils.fixture import uuidsentinel from oslo_utils import uuidutils from nova import exception from nova.objects import fields as obj_fields from nova.tests.fixtures import nova as nova_fixtures LOG = logging.getLogger(__name__) class GlanceFixture(fixtures.Fixture): """A fixture for simulating Glance.""" # NOTE(justinsb): The OpenStack API can't upload an image? # So, make sure we've got one.. timestamp = datetime.datetime( 2011, 1, 1, 1, 2, 3, tzinfo=datetime.timezone.utc ) image1 = { 'id': '155d900f-4e14-4e4c-a73d-069cbf4541e6', 'name': 'fakeimage123456', 'created_at': timestamp, 'updated_at': timestamp, 'deleted_at': None, 'deleted': False, 'status': 'active', 'is_public': False, 'container_format': 'raw', 'disk_format': 'raw', 'size': 25165824, 'min_ram': 0, 'min_disk': 0, 'protected': False, 'visibility': 'public', 'tags': ['tag1', 'tag2'], 'properties': { 'kernel_id': 'nokernel', 'ramdisk_id': 'nokernel', 'architecture': obj_fields.Architecture.X86_64, }, } image2 = { 'id': 'a2459075-d96c-40d5-893e-577ff92e721c', 'name': 'fakeimage123456', 'created_at': timestamp, 'updated_at': timestamp, 'deleted_at': None, 'deleted': False, 'status': 'active', 'is_public': True, 'container_format': 'ami', 'disk_format': 'ami', 'size': 58145823, 'min_ram': 0, 'min_disk': 0, 'protected': False, 'visibility': 'public', 'tags': [], 'properties': { 'kernel_id': 'nokernel', 'ramdisk_id': 'nokernel', }, } image3 = { 'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6', 'name': 'fakeimage123456', 'created_at': timestamp, 'updated_at': timestamp, 'deleted_at': None, 'deleted': False, 'status': 'active', 'is_public': True, 'container_format': 'bare', 'disk_format': 'raw', 'size': 83594576, 'min_ram': 0, 'min_disk': 0, 'protected': False, 'visibility': 'public', 'tags': ['tag3', 'tag4'], 'properties': { 'kernel_id': 'nokernel', 'ramdisk_id': 'nokernel', 'architecture': obj_fields.Architecture.X86_64, }, } image4 = { 'id': 'cedef40a-ed67-4d10-800e-17455edce175', 'name': 'fakeimage123456', 'created_at': timestamp, 'updated_at': timestamp, 'deleted_at': None, 'deleted': False, 'status': 'active', 'is_public': True, 'container_format': 'ami', 'disk_format': 'ami', 'size': 84035174, 'min_ram': 0, 'min_disk': 0, 'protected': False, 'visibility': 'public', 'tags': [], 'properties': { 'kernel_id': 'nokernel', 'ramdisk_id': 'nokernel', }, } image5 = { 'id': 'c905cedb-7281-47e4-8a62-f26bc5fc4c77', 'name': 'fakeimage123456', 'created_at': timestamp, 'updated_at': timestamp, 'deleted_at': None, 'deleted': False, 'status': 'active', 'is_public': True, 'container_format': 'ami', 'disk_format': 'ami', 'size': 26360814, 'min_ram': 0, 'min_disk': 0, 'protected': False, 'visibility': 'public', 'tags': [], 'properties': { 'kernel_id': '155d900f-4e14-4e4c-a73d-069cbf4541e6', 'ramdisk_id': None, }, } auto_disk_config_disabled_image = { 'id': 'a440c04b-79fa-479c-bed1-0b816eaec379', 'name': 'fakeimage6', 'created_at': timestamp, 'updated_at': timestamp, 'deleted_at': None, 'deleted': False, 'status': 'active', 'is_public': False, 'container_format': 'ova', 'disk_format': 'vhd', 'size': 49163826, 'min_ram': 0, 'min_disk': 0, 'protected': False, 'visibility': 'public', 'tags': [], 'properties': { 'kernel_id': 'nokernel', 'ramdisk_id': 'nokernel', 'architecture': obj_fields.Architecture.X86_64, 'auto_disk_config': 'False', }, } auto_disk_config_enabled_image = { 'id': '70a599e0-31e7-49b7-b260-868f441e862b', 'name': 'fakeimage7', 'created_at': timestamp, 'updated_at': timestamp, 'deleted_at': None, 'deleted': False, 'status': 'active', 'is_public': False, 'container_format': 'ova', 'disk_format': 'vhd', 'size': 74185822, 'min_ram': 0, 'min_disk': 0, 'protected': False, 'visibility': 'public', 'tags': [], 'properties': { 'kernel_id': 'nokernel', 'ramdisk_id': 'nokernel', 'architecture': obj_fields.Architecture.X86_64, 'auto_disk_config': 'True', }, } eph_encryption = copy.deepcopy(image1) eph_encryption['id'] = uuidsentinel.eph_encryption eph_encryption['properties'] = { 'hw_ephemeral_encryption': 'True' } eph_encryption_disabled = copy.deepcopy(image1) eph_encryption_disabled['id'] = uuidsentinel.eph_encryption_disabled eph_encryption_disabled['properties'] = { 'hw_ephemeral_encryption': 'False' } eph_encryption_luks = copy.deepcopy(image1) eph_encryption_luks['id'] = uuidsentinel.eph_encryption_luks eph_encryption_luks['properties'] = { 'hw_ephemeral_encryption': 'True', 'hw_ephemeral_encryption_format': 'luks' } eph_encryption_plain = copy.deepcopy(image1) eph_encryption_plain['id'] = uuidsentinel.eph_encryption_plain eph_encryption_plain['properties'] = { 'hw_ephemeral_encryption': 'True', 'hw_ephemeral_encryption_format': 'plain' } def __init__(self, test): super().__init__() self.test = test self.images = {} def setUp(self): super().setUp() self.test.useFixture(nova_fixtures.ConfPatcher( group='glance', api_servers=['http://localhost:9292'])) self.test.stub_out( 'nova.image.glance.API.get_remote_image_service', lambda context, image_href: (self, image_href)) self.test.stub_out( 'nova.image.glance.get_default_image_service', lambda: self) self.create(None, self.image1) self.create(None, self.image2) self.create(None, self.image3) self.create(None, self.image4) self.create(None, self.image5) self.create(None, self.auto_disk_config_disabled_image) self.create(None, self.auto_disk_config_enabled_image) self.create(None, self.eph_encryption) self.create(None, self.eph_encryption_disabled) self.create(None, self.eph_encryption_luks) self.create(None, self.eph_encryption_plain) self._imagedata = {} # TODO(bcwaldon): implement optional kwargs such as limit, sort_dir def detail(self, context, **kwargs): """Return list of detailed image information.""" return copy.deepcopy(list(self.images.values())) def download( self, context, image_id, data=None, dst_path=None, trusted_certs=None, ): self.show(context, image_id) if data: data.write(self._imagedata.get(image_id, b'')) elif dst_path: with open(dst_path, 'wb') as data: data.write(self._imagedata.get(image_id, b'')) def show( self, context, image_id, include_locations=False, show_deleted=True, ): """Get data about specified image. Returns a dict containing image data for the given opaque image id. """ image = self.images.get(str(image_id)) if image: return copy.deepcopy(image) LOG.warning( 'Unable to find image id %s. Have images: %s', image_id, self.images) raise exception.ImageNotFound(image_id=image_id) def create(self, context, metadata, data=None): """Store the image data and return the new image id. :raises: Duplicate if the image already exist. """ image_id = str(metadata.get('id', uuidutils.generate_uuid())) metadata['id'] = image_id if image_id in self.images: raise exception.CouldNotUploadImage(image_id=image_id) image_meta = copy.deepcopy(metadata) if image_meta.get('min_disk'): # min_disk should be of int type only. image_meta['min_disk'] = int(image_meta['min_disk']) # Glance sets the size value when an image is created, so we # need to do that here to fake things out if it's not provided # by the caller. This is needed to avoid a KeyError in the # image-size API. if 'size' not in image_meta: image_meta['size'] = 74185822 # Similarly, Glance provides the status on the image once it's created # and this is checked in the compute API when booting a server from # this image, so we just fake it out to be 'active' even though this # is mostly a lie on a newly created image. if 'status' not in metadata: image_meta['status'] = 'active' # The owner of the image is by default the request context project_id. if context and 'owner' not in image_meta.get('properties', {}): # Note that normally "owner" is a top-level field in an image # resource in glance but we have to fake this out for the images # proxy API by throwing it into the generic "properties" dict. image_meta.get('properties', {})['owner'] = context.project_id # Glance would always populate these fields, so we need to ensure we do # the same if not image_meta.get('created_at'): image_meta['created_at'] = self.timestamp if not image_meta.get('updated_at'): image_meta['updated_at'] = self.timestamp self.images[image_id] = image_meta if data: self._imagedata[image_id] = data.read() return self.images[image_id] def update(self, context, image_id, metadata, data=None, purge_props=False): """Replace the contents of the given image with the new data. :raises: ImageNotFound if the image does not exist. """ if not self.images.get(image_id): raise exception.ImageNotFound(image_id=image_id) if purge_props: self.images[image_id] = copy.deepcopy(metadata) else: image = self.images[image_id] try: image['properties'].update(metadata.pop('properties')) except KeyError: pass image.update(metadata) return self.images[image_id] def delete(self, context, image_id): """Delete the given image. :raises: ImageNotFound if the image does not exist. """ removed = self.images.pop(image_id, None) if not removed: raise exception.ImageNotFound(image_id=image_id) def get_location(self, context, image_id): if image_id in self.images: return 'fake_location' return None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/nova/tests/fixtures/libvirt.py0000664000175000017500000026354200000000000020601 0ustar00zuulzuul00000000000000# Copyright 2010 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import os import sys import textwrap import time import typing as ty from unittest import mock import fixtures from lxml import etree from oslo_log import log as logging from oslo_utils.fixture import uuidsentinel as uuids from oslo_utils import versionutils from nova import conf from nova.objects import fields as obj_fields from nova.tests.fixtures import libvirt_data as fake_libvirt_data from nova.virt.libvirt import config as vconfig from nova.virt.libvirt import driver as libvirt_driver from nova.virt.libvirt import host # Allow passing None to the various connect methods # (i.e. allow the client to rely on default URLs) allow_default_uri_connection = True # Has libvirt connection been used at least once connection_used = False def _reset(): global allow_default_uri_connection allow_default_uri_connection = True LOG = logging.getLogger(__name__) CONF = conf.CONF # virDomainState VIR_DOMAIN_NOSTATE = 0 VIR_DOMAIN_RUNNING = 1 VIR_DOMAIN_BLOCKED = 2 VIR_DOMAIN_PAUSED = 3 VIR_DOMAIN_SHUTDOWN = 4 VIR_DOMAIN_SHUTOFF = 5 VIR_DOMAIN_CRASHED = 6 # NOTE(mriedem): These values come from include/libvirt/libvirt-domain.h VIR_DOMAIN_XML_SECURE = 1 VIR_DOMAIN_XML_INACTIVE = 2 VIR_DOMAIN_XML_UPDATE_CPU = 4 VIR_DOMAIN_XML_MIGRATABLE = 8 VIR_DOMAIN_BLOCK_COPY_SHALLOW = 1 VIR_DOMAIN_BLOCK_COPY_REUSE_EXT = 2 VIR_DOMAIN_BLOCK_COPY_TRANSIENT_JOB = 4 VIR_DOMAIN_BLOCK_REBASE_SHALLOW = 1 VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT = 2 VIR_DOMAIN_BLOCK_REBASE_COPY = 8 VIR_DOMAIN_BLOCK_REBASE_RELATIVE = 16 VIR_DOMAIN_BLOCK_REBASE_COPY_DEV = 32 # virDomainBlockResize VIR_DOMAIN_BLOCK_RESIZE_BYTES = 1 VIR_DOMAIN_BLOCK_JOB_ABORT_ASYNC = 1 VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT = 2 VIR_DOMAIN_EVENT_ID_LIFECYCLE = 0 VIR_DOMAIN_EVENT_DEFINED = 0 VIR_DOMAIN_EVENT_UNDEFINED = 1 VIR_DOMAIN_EVENT_STARTED = 2 VIR_DOMAIN_EVENT_SUSPENDED = 3 VIR_DOMAIN_EVENT_RESUMED = 4 VIR_DOMAIN_EVENT_STOPPED = 5 VIR_DOMAIN_EVENT_SHUTDOWN = 6 VIR_DOMAIN_EVENT_PMSUSPENDED = 7 VIR_DOMAIN_EVENT_ID_DEVICE_REMOVED = 15 VIR_DOMAIN_EVENT_ID_DEVICE_REMOVAL_FAILED = 22 VIR_DOMAIN_EVENT_SUSPENDED_MIGRATED = 1 VIR_DOMAIN_EVENT_SUSPENDED_POSTCOPY = 7 VIR_DOMAIN_UNDEFINE_MANAGED_SAVE = 1 VIR_DOMAIN_UNDEFINE_NVRAM = 4 VIR_DOMAIN_AFFECT_CURRENT = 0 VIR_DOMAIN_AFFECT_LIVE = 1 VIR_DOMAIN_AFFECT_CONFIG = 2 VIR_CPU_COMPARE_ERROR = -1 VIR_CPU_COMPARE_INCOMPATIBLE = 0 VIR_CPU_COMPARE_IDENTICAL = 1 VIR_CPU_COMPARE_SUPERSET = 2 VIR_CRED_USERNAME = 1 VIR_CRED_AUTHNAME = 2 VIR_CRED_LANGUAGE = 3 VIR_CRED_CNONCE = 4 VIR_CRED_PASSPHRASE = 5 VIR_CRED_ECHOPROMPT = 6 VIR_CRED_NOECHOPROMPT = 7 VIR_CRED_REALM = 8 VIR_CRED_EXTERNAL = 9 VIR_MIGRATE_LIVE = 1 VIR_MIGRATE_PEER2PEER = 2 VIR_MIGRATE_TUNNELLED = 4 VIR_MIGRATE_PERSIST_DEST = 8 VIR_MIGRATE_UNDEFINE_SOURCE = 16 VIR_MIGRATE_NON_SHARED_INC = 128 VIR_MIGRATE_AUTO_CONVERGE = 8192 VIR_MIGRATE_POSTCOPY = 32768 VIR_MIGRATE_TLS = 65536 VIR_NODE_CPU_STATS_ALL_CPUS = -1 VIR_DOMAIN_START_PAUSED = 1 # libvirtError enums # (Intentionally different from what's in libvirt. We do this to check, # that consumers of the library are using the symbolic names rather than # hardcoding the numerical values) VIR_FROM_QEMU = 100 VIR_FROM_DOMAIN = 200 VIR_FROM_SECRET = 300 VIR_FROM_NWFILTER = 330 VIR_FROM_REMOTE = 340 VIR_FROM_RPC = 345 VIR_FROM_NODEDEV = 666 VIR_ERR_INVALID_ARG = 8 VIR_ERR_NO_SUPPORT = 3 VIR_ERR_XML_ERROR = 27 VIR_ERR_XML_DETAIL = 350 VIR_ERR_NO_DOMAIN = 420 VIR_ERR_OPERATION_FAILED = 510 VIR_ERR_OPERATION_INVALID = 55 VIR_ERR_OPERATION_TIMEOUT = 68 VIR_ERR_NO_NWFILTER = 620 VIR_ERR_SYSTEM_ERROR = 900 VIR_ERR_INTERNAL_ERROR = 950 VIR_ERR_CONFIG_UNSUPPORTED = 951 VIR_ERR_NO_NODE_DEVICE = 667 VIR_ERR_INVALID_SECRET = 65 VIR_ERR_NO_SECRET = 66 VIR_ERR_AGENT_UNRESPONSIVE = 86 VIR_ERR_ARGUMENT_UNSUPPORTED = 74 VIR_ERR_OPERATION_UNSUPPORTED = 84 VIR_ERR_DEVICE_MISSING = 99 # Readonly VIR_CONNECT_RO = 1 # virConnectBaselineCPU flags VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES = 1 # snapshotCreateXML flags VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA = 4 VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY = 16 VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT = 32 VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE = 64 # blockCommit flags VIR_DOMAIN_BLOCK_COMMIT_RELATIVE = 4 VIR_CONNECT_LIST_DOMAINS_ACTIVE = 1 VIR_CONNECT_LIST_DOMAINS_INACTIVE = 2 # virConnectListAllNodeDevices flags VIR_CONNECT_LIST_NODE_DEVICES_CAP_PCI_DEV = 2 VIR_CONNECT_LIST_NODE_DEVICES_CAP_NET = 1 << 4 VIR_CONNECT_LIST_NODE_DEVICES_CAP_VDPA = 1 << 17 VIR_CONNECT_LIST_NODE_DEVICES_CAP_MDEV = 1 << 5 VIR_CONNECT_LIST_NODE_DEVICES_INACTIVE = 1 << 8 # secret type VIR_SECRET_USAGE_TYPE_NONE = 0 VIR_SECRET_USAGE_TYPE_VOLUME = 1 VIR_SECRET_USAGE_TYPE_CEPH = 2 VIR_SECRET_USAGE_TYPE_ISCSI = 3 # metadata types VIR_DOMAIN_METADATA_DESCRIPTION = 0 VIR_DOMAIN_METADATA_TITLE = 1 VIR_DOMAIN_METADATA_ELEMENT = 2 # Libvirt version to match MIN_LIBVIRT_VERSION in driver.py FAKE_LIBVIRT_VERSION = versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_VERSION) # Libvirt version to match MIN_QEMU_VERSION in driver.py FAKE_QEMU_VERSION = versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_VERSION) PCI_VEND_ID = '8086' PCI_VEND_NAME = 'Intel Corporation' PCI_PROD_ID = '1533' PCI_PROD_NAME = 'I210 Gigabit Network Connection' PCI_DRIVER_NAME = 'igb' PF_PROD_ID = '1528' PF_PROD_NAME = 'Ethernet Controller 10-Gigabit X540-AT2' PF_DRIVER_NAME = 'ixgbe' PF_CAP_TYPE = 'virt_functions' VF_PROD_ID = '1515' VF_PROD_NAME = 'X540 Ethernet Controller Virtual Function' VF_DRIVER_NAME = 'ixgbevf' VF_CAP_TYPE = 'phys_function' MDEV_CAPABLE_VEND_ID = '10DE' MDEV_CAPABLE_VEND_NAME = 'Nvidia' MDEV_CAPABLE_PROD_ID = '0FFE' MDEV_CAPABLE_PROD_NAME = 'GRID M60-0B' MDEV_CAPABLE_DRIVER_NAME = 'nvidia' MDEV_CAPABLE_CAP_TYPE = 'mdev_types' NVIDIA_11_VGPU_TYPE = 'nvidia-11' NVIDIA_12_VGPU_TYPE = 'nvidia-12' MLX5_CORE_TYPE = 'mlx5_core' MDEVCAP_DEV1_PCI_ADDR = 'pci_0000_81_00_0' MDEVCAP_DEV2_PCI_ADDR = 'pci_0000_81_01_0' MDEVCAP_DEV3_PCI_ADDR = 'pci_0000_81_02_0' os_uname = collections.namedtuple( 'uname_result', ['sysname', 'nodename', 'release', 'version', 'machine'], ) def _get_libvirt_nodedev_name(bus, slot, function): """Convert an address to a libvirt device name string.""" return f'pci_0000_{bus:02x}_{slot:02x}_{function:d}' class FakePCIDevice(object): """Generate a fake PCI device. Generate a fake PCI devices corresponding to one of the following real-world PCI devices. - I210 Gigabit Network Connection (8086:1533) - Ethernet Controller 10-Gigabit X540-AT2 (8086:1528) - X540 Ethernet Controller Virtual Function (8086:1515) """ pci_default_parent = "pci_0000_80_01_0" pci_device_template = textwrap.dedent(""" pci_0000_%(bus)02x_%(slot)02x_%(function)d /sys/devices/pci0000:80/0000:80:01.0/0000:%(bus)02x:%(slot)02x.%(function)d %(parent)s %(driver)s 0 %(bus)d %(slot)d %(function)d %(prod_name)s %(vend_name)s %(capability)s %(vpd_capability)s
""".strip()) # noqa cap_templ = "%(addresses)s" addr_templ = "
" # noqa mdevtypes_templ = textwrap.dedent(""" GRID M60-0Bvfio-pci %(instances)s """.strip()) # noqa vpd_cap_templ = textwrap.dedent(""" %(name)s %(fields)s """.strip()) vpd_fields_templ = textwrap.dedent(""" %(section_fields)s""".strip()) vpd_field_templ = """<%(field_name)s>%(field_value)s""" is_capable_of_mdevs = False def __init__( self, dev_type, bus, slot, function, iommu_group, numa_node, *, vf_ratio=None, multiple_gpu_types=False, generic_types=False, parent=None, vend_id=None, vend_name=None, prod_id=None, prod_name=None, driver_name=None, vpd_fields=None, mac_address=None, ): """Populate pci devices :param dev_type: (str) Indicates the type of the device (PCI, PF, VF, MDEV_TYPES). :param bus: (int) Bus number of the device. :param slot: (int) Slot number of the device. :param function: (int) Function number of the device. :param iommu_group: (int) IOMMU group ID. :param numa_node: (int) NUMA node of the device. :param vf_ratio: (int) Ratio of Virtual Functions on Physical. Only applicable if ``dev_type`` is one of: ``PF``, ``VF``. :param multiple_gpu_types: (bool) Supports different vGPU types. :param generic_types: (bool) Support both mlx5 and nvidia-12 types. :param parent: (int, int, int) A tuple of bus, slot and function corresponding to the parent. :param vend_id: (str) The vendor ID. :param vend_name: (str) The vendor name. :param prod_id: (str) The product ID. :param prod_name: (str) The product name. :param driver_name: (str) The driver name. :param mac_address: (str) The MAC of the device. Used in case of SRIOV PFs """ self.dev_type = dev_type self.bus = bus self.slot = slot self.function = function self.iommu_group = iommu_group self.numa_node = numa_node self.vf_ratio = vf_ratio self.multiple_gpu_types = multiple_gpu_types self.generic_types = generic_types self.parent = parent self.vend_id = vend_id self.vend_name = vend_name self.prod_id = prod_id self.prod_name = prod_name self.driver_name = driver_name self.mac_address = mac_address self.vpd_fields = vpd_fields self.generate_xml() def generate_xml(self, skip_capability=False): # initial validation assert self.dev_type in ('PCI', 'VF', 'PF', 'MDEV_TYPES'), ( f'got invalid dev_type {self.dev_type}') if self.dev_type == 'PCI': assert not self.vf_ratio, 'vf_ratio does not apply for PCI devices' if self.dev_type in ('PF', 'VF'): assert ( self.vf_ratio is not None ), 'require vf_ratio for PFs and VFs' if self.dev_type == 'VF': assert self.parent, 'require parent for VFs' assert isinstance(self.parent, tuple), 'parent must be an address' assert len(self.parent) == 3, 'parent must be an address' vend_id = self.vend_id or PCI_VEND_ID vend_name = self.vend_name or PCI_VEND_NAME capability = '' if self.dev_type == 'PCI': prod_id = self.prod_id or PCI_PROD_ID prod_name = self.prod_name or PCI_PROD_NAME driver = self.driver_name or PCI_DRIVER_NAME elif self.dev_type == 'PF': prod_id = self.prod_id or PF_PROD_ID prod_name = self.prod_name or PF_PROD_NAME driver = self.driver_name or PF_DRIVER_NAME if not skip_capability: capability = self.cap_templ % { 'cap_type': PF_CAP_TYPE, 'addresses': '\n'.join([ self.addr_templ % { 'bus': self.bus, # these are the slot, function values of the child # VFs, we can only assign 8 functions to a slot # (0-7) so bump the slot each time we exceed this 'slot': self.slot + (x // 8), # ...and wrap the function value 'function': x % 8, # the offset is because the PF is occupying function 0 } for x in range(1, self.vf_ratio + 1)]) } elif self.dev_type == 'VF': prod_id = self.prod_id or VF_PROD_ID prod_name = self.prod_name or VF_PROD_NAME driver = self.driver_name or VF_DRIVER_NAME if not skip_capability: capability = self.cap_templ % { 'cap_type': VF_CAP_TYPE, 'addresses': self.addr_templ % { 'bus': self.bus, # this is the slot, function value of the parent PF # if we're e.g. device 8, we'll have a different slot # to our parent so reverse this 'slot': self.slot - ((self.vf_ratio + 1) // 8), # the parent PF is always function 0 'function': 0, } } elif self.dev_type == 'MDEV_TYPES': prod_id = self.prod_id or MDEV_CAPABLE_PROD_ID prod_name = self.prod_name or MDEV_CAPABLE_PROD_NAME driver = self.driver_name or MDEV_CAPABLE_DRIVER_NAME vend_id = self.vend_id or MDEV_CAPABLE_VEND_ID vend_name = self.vend_name or MDEV_CAPABLE_VEND_NAME types = [self.mdevtypes_templ % { 'type_id': NVIDIA_11_VGPU_TYPE, 'instances': 16, }] if self.multiple_gpu_types: types.append(self.mdevtypes_templ % { 'type_id': NVIDIA_12_VGPU_TYPE, 'instances': 8, }) if self.generic_types: types = [self.mdevtypes_templ % { 'type_id': MLX5_CORE_TYPE, 'instances': 16, }] types.append(self.mdevtypes_templ % { 'type_id': NVIDIA_12_VGPU_TYPE, 'instances': 8, }) if not skip_capability: capability = self.cap_templ % { 'cap_type': MDEV_CAPABLE_CAP_TYPE, 'addresses': '\n'.join(types) } self.is_capable_of_mdevs = True parent = self.pci_default_parent if self.parent: parent = _get_libvirt_nodedev_name(*self.parent) self.pci_device = self.pci_device_template % { 'bus': self.bus, 'slot': self.slot, 'function': self.function, 'vend_id': vend_id, 'vend_name': vend_name, 'prod_id': prod_id, 'prod_name': prod_name, 'driver': driver, 'capability': capability, 'vpd_capability': self.format_vpd_cap(), 'iommu_group': self.iommu_group, 'numa_node': self.numa_node, 'parent': parent, } # -1 is the sentinel set in /sys/bus/pci/devices/*/numa_node # for no NUMA affinity. When the numa_node is set to -1 on a device # Libvirt omits the NUMA element so we remove it. if self.numa_node == -1: self.pci_device = self.pci_device.replace("", "") def format_vpd_cap(self): if not self.vpd_fields: return '' fields = [] for access_type in ('readonly', 'readwrite'): section_fields = [] for field_name, field_value in self.vpd_fields.get( access_type, {}).items(): section_fields.append(self.vpd_field_templ % { 'field_name': field_name, 'field_value': field_value, }) if section_fields: fields.append( self.vpd_fields_templ % { 'access': access_type, 'section_fields': '\n'.join(section_fields), } ) return self.vpd_cap_templ % { 'name': self.vpd_fields.get('name', ''), 'fields': '\n'.join(fields) } def XMLDesc(self, flags): return self.pci_device @property def address(self): return "0000:%02x:%02x.%1x" % (self.bus, self.slot, self.function) # TODO(stephenfin): Remove all of these HostFooDevicesInfo objects in favour of # a unified devices object class HostPCIDevicesInfo(object): """Represent a pool of host PCI devices.""" TOTAL_NUMA_NODES = 2 def __init__(self, num_pci=0, num_pfs=2, num_vfs=8, num_mdevcap=0, numa_node=None, multiple_gpu_types=False, generic_types=False, bus=0x81, product_ids=["1515"]): """Create a new HostPCIDevicesInfo object. :param num_pci: (int) The number of (non-SR-IOV) and (non-MDEV capable) PCI devices. :param num_pfs: (int) The number of PCI SR-IOV Physical Functions. :param num_vfs: (int) The number of PCI SR-IOV Virtual Functions. :param num_mdevcap: (int) The number of PCI devices capable of creating mediated devices. :param numa_node: (int) NUMA node of the device; if set all of the devices will be assigned to the specified node else they will be split between ``$TOTAL_NUMA_NODES`` nodes. :param multiple_gpu_types: (bool) Supports different vGPU types :param generic_types: (bool) Supports both nvidia-12 and mlx5 types :param bus: (int) PCI bus number, allow to specify a different bus to have an asymmetric configuration between src and dst computes. :param product_ids: (list) List of product IDs. Warning: This multiplies the number of PF and VF devices by the number of product IDs. It creates separate sets of VF devices, one for each product ID. The first set is assigned to slot 0, and each subsequent set is assigned to the next slot incrementally. """ self.devices = {} if not (num_vfs or num_pfs or num_pci) and not num_mdevcap: return if num_vfs and not num_pfs: raise ValueError('Cannot create VFs without PFs') if num_pfs and num_vfs % num_pfs: raise ValueError('num_vfs must be a factor of num_pfs') bus = bus slot = 0x0 function = 0 iommu_group = 40 # totally arbitrary number # Generate PCI devs for dev in range(num_pci): self.add_device( dev_type='PCI', bus=bus, slot=slot, function=function, iommu_group=iommu_group, numa_node=self._calc_numa_node(dev, numa_node)) slot += 1 iommu_group += 1 # Generate MDEV capable devs for dev in range(num_mdevcap): self.add_device( dev_type='MDEV_TYPES', bus=bus, slot=slot, function=function, iommu_group=iommu_group, numa_node=self._calc_numa_node(dev, numa_node), multiple_gpu_types=multiple_gpu_types, generic_types=generic_types) slot += 1 iommu_group += 1 vf_ratio = num_vfs // num_pfs if num_pfs else 0 # Generate PFs for prod_id in product_ids: for dev in range(num_pfs): function = 0 numa_node_pf = self._calc_numa_node(dev, numa_node) self.add_device( dev_type='PF', bus=bus, slot=slot, function=function, iommu_group=iommu_group, numa_node=numa_node_pf, vf_ratio=vf_ratio) parent = (bus, slot, function) # Generate VFs for _ in range(vf_ratio): function += 1 iommu_group += 1 if function % 8 == 0: # functions must be 0-7 slot += 1 function = 0 self.add_device( dev_type='VF', bus=bus, prod_id=prod_id, slot=slot, function=function, iommu_group=iommu_group, numa_node=numa_node_pf, vf_ratio=vf_ratio, parent=parent) slot += 1 def add_device( self, dev_type, bus, slot, function, iommu_group, numa_node, vf_ratio=None, multiple_gpu_types=False, generic_types=False, parent=None, vend_id=None, vend_name=None, prod_id=None, prod_name=None, driver_name=None, vpd_fields=None, mac_address=None, ): pci_dev_name = _get_libvirt_nodedev_name(bus, slot, function) LOG.info('Generating %s device %r', dev_type, pci_dev_name) dev = FakePCIDevice( dev_type=dev_type, bus=bus, slot=slot, function=function, iommu_group=iommu_group, numa_node=numa_node, vf_ratio=vf_ratio, multiple_gpu_types=multiple_gpu_types, generic_types=generic_types, parent=parent, vend_id=vend_id, vend_name=vend_name, prod_id=prod_id, prod_name=prod_name, driver_name=driver_name, vpd_fields=vpd_fields, mac_address=mac_address, ) self.devices[pci_dev_name] = dev return dev @classmethod def _calc_numa_node(cls, dev, numa_node): return dev % cls.TOTAL_NUMA_NODES if numa_node is None else numa_node def get_all_devices(self): return self.devices.keys() def get_device_by_name(self, device_name): pci_dev = self.devices.get(device_name) return pci_dev def get_all_mdev_capable_devices(self): return [dev for dev in self.devices if self.devices[dev].is_capable_of_mdevs] def get_pci_address_mac_mapping(self): return { device.address: device.mac_address for dev_addr, device in self.devices.items() if device.mac_address } class FakeMdevDevice(object): template = """ %(dev_name)s /sys/devices/pci0000:00/0000:00:02.0/%(path)s %(parent)s vfio_mdev """ def __init__(self, dev_name, type_id, parent): self.xml = self.template % { 'dev_name': dev_name, 'type_id': type_id, 'path': dev_name[len('mdev_'):], 'parent': parent} def XMLDesc(self, flags): return self.xml class HostMdevDevicesInfo(object): def __init__(self, devices=None): if devices is not None: self.devices = devices else: self.devices = {} def get_all_devices(self): return self.devices.keys() def get_device_by_name(self, device_name): dev = self.devices[device_name] return dev class FakeVDPADevice: template = textwrap.dedent(""" %(name)s %(path)s %(parent)s vhost_vdpa /dev/vhost-vdpa-%(idx)d """.strip()) def __init__(self, name, idx, parent): assert isinstance(parent, FakePCIDevice) assert parent.dev_type == 'VF' self.name = name self.idx = idx self.parent = parent self.generate_xml() def generate_xml(self): pf_pci = self.parent.parent vf_pci = (self.parent.bus, self.parent.slot, self.parent.function) pf_addr = '0000:%02x:%02x.%d' % pf_pci vf_addr = '0000:%02x:%02x.%d' % vf_pci parent = _get_libvirt_nodedev_name(*vf_pci) path = f'/sys/devices/pci0000:00/{pf_addr}/{vf_addr}/vdpa{self.idx}' self.xml = self.template % { 'name': self.name, 'idx': self.idx, 'path': path, 'parent': parent, } def XMLDesc(self, flags): return self.xml class HostVDPADevicesInfo: def __init__(self): self.devices = {} def get_all_devices(self): return self.devices.keys() def get_device_by_name(self, device_name): dev = self.devices[device_name] return dev def add_device(self, name, idx, parent): LOG.info('Generating vDPA device %r', name) dev = FakeVDPADevice(name=name, idx=idx, parent=parent) self.devices[name] = dev return dev class HostInfo(object): def __init__(self, cpu_nodes=1, cpu_sockets=1, cpu_cores=2, cpu_threads=1, kB_mem=16780000, mempages=None): """Create a new Host Info object :param cpu_nodes: (int) the number of NUMA cell, 1 for unusual NUMA topologies or uniform :param cpu_sockets: (int) number of CPU sockets per node if nodes > 1, total number of CPU sockets otherwise :param cpu_cores: (int) number of cores per socket :param cpu_threads: (int) number of threads per core :param kB_mem: (int) memory size in KBytes """ self.arch = obj_fields.Architecture.X86_64 self.kB_mem = kB_mem self.cpus = cpu_nodes * cpu_sockets * cpu_cores * cpu_threads self.cpu_mhz = 800 self.cpu_nodes = cpu_nodes self.cpu_cores = cpu_cores self.cpu_threads = cpu_threads self.cpu_sockets = cpu_sockets self.cpu_model = "Penryn" self.cpu_vendor = "Intel" self.numa_topology = NUMATopology(self.cpu_nodes, self.cpu_sockets, self.cpu_cores, self.cpu_threads, self.kB_mem, mempages) class NUMATopology(vconfig.LibvirtConfigCapsNUMATopology): """A batteries-included variant of LibvirtConfigCapsNUMATopology. Provides sane defaults for LibvirtConfigCapsNUMATopology that can be used in tests as is, or overridden where necessary. """ def __init__(self, cpu_nodes=4, cpu_sockets=1, cpu_cores=1, cpu_threads=2, kb_mem=1048576, mempages=None, **kwargs): super(NUMATopology, self).__init__(**kwargs) cpu_count = 0 cell_count = 0 for socket_count in range(cpu_sockets): for cell_num in range(cpu_nodes): cell = vconfig.LibvirtConfigCapsNUMACell() cell.id = cell_count cell.memory = kb_mem // (cpu_nodes * cpu_sockets) for cpu_num in range(cpu_cores * cpu_threads): cpu = vconfig.LibvirtConfigCapsNUMACPU() cpu.id = cpu_count cpu.socket_id = socket_count cpu.core_id = cpu_num // cpu_threads cpu.siblings = set([cpu_threads * (cpu_count // cpu_threads) + thread for thread in range(cpu_threads)]) cell.cpus.append(cpu) cpu_count += 1 # If no mempages are provided, use only the default 4K pages if mempages: cell.mempages = mempages[cell_count] else: cell.mempages = create_mempages([(4, cell.memory // 4)]) self.cells.append(cell) cell_count += 1 def create_mempages(mappings): """Generate a list of LibvirtConfigCapsNUMAPages objects. :param mappings: (dict) A mapping of page size to quantity of said pages. :returns: [LibvirtConfigCapsNUMAPages, ...] """ mempages = [] for page_size, page_qty in mappings: mempage = vconfig.LibvirtConfigCapsNUMAPages() mempage.size = page_size mempage.total = page_qty mempages.append(mempage) return mempages VIR_DOMAIN_JOB_NONE = 0 VIR_DOMAIN_JOB_BOUNDED = 1 VIR_DOMAIN_JOB_UNBOUNDED = 2 VIR_DOMAIN_JOB_COMPLETED = 3 VIR_DOMAIN_JOB_FAILED = 4 VIR_DOMAIN_JOB_CANCELLED = 5 def _parse_disk_info(element): disk_info = {} disk_info['type'] = element.get('type', 'file') disk_info['device'] = element.get('device', 'disk') driver = element.find('./driver') if driver is not None: disk_info['driver_name'] = driver.get('name') disk_info['driver_type'] = driver.get('type') source = element.find('./source') if source is not None: disk_info['source'] = source.get('file') if not disk_info['source']: disk_info['source'] = source.get('dev') if not disk_info['source']: disk_info['source'] = source.get('path') encryption = element.find('./source/encryption') if encryption is not None and len(encryption): disk_info['encryption_format'] = encryption.get('format') secret = encryption.find('./secret') if secret is not None: disk_info['encryption_secret'] = secret.get('uuid') target = element.find('./target') if target is not None: disk_info['target_dev'] = target.get('dev') disk_info['target_bus'] = target.get('bus') return disk_info def _parse_vcpu_info(element): vcpu_info = {} vcpu_info['number'] = int(element.text) vcpu_info['cpuset'] = element.get('cpuset') return vcpu_info def _parse_filesystem_info(element): filesystem_info = {} filesystem_info['type'] = element.get('type', 'mount') driver = element.find('./driver') if driver is not None: filesystem_info['driver_type'] = driver.get('type') source = element.find('./source') if source is not None: filesystem_info['source'] = source.get('dir') target = element.find('./target') if target is not None: filesystem_info['target'] = target.get('dir') return filesystem_info def _parse_nic_info(element): nic_info = {} nic_info['type'] = element.get('type', 'bridge') driver = element.find('./mac') if driver is not None: nic_info['mac'] = driver.get('address') source = element.find('./source') if source is not None: nic_info['source'] = source.get('bridge') target = element.find('./target') if target is not None: nic_info['target_dev'] = target.get('dev') return nic_info def _parse_hostdev_info(element): hostdev_info = {} hostdev_info['type'] = element.get('type', 'pci') hostdev_info['managed'] = element.get('managed', 'pci') source = element.find('./source') if source is not None: address = source.find('./address') if address is not None: hostdev_info['domain'] = address.get('domain') hostdev_info['bus'] = address.get('bus') hostdev_info['slot'] = address.get('slot') hostdev_info['function'] = address.get('function') return hostdev_info def disable_event_thread(self): """Disable nova libvirt driver event thread. The Nova libvirt driver includes a native thread which monitors the libvirt event channel. In a testing environment this becomes problematic because it means we've got a floating thread calling sleep(1) over the life of the unit test. Seems harmless? It's not, because we sometimes want to test things like retry loops that should have specific sleep patterns. An unlucky firing of the libvirt thread will cause a test failure. """ # because we are patching a method in a class MonkeyPatch doesn't # auto import correctly. Import explicitly otherwise the patching # may silently fail. import nova.virt.libvirt.host # noqa def evloop(*args, **kwargs): pass self.useFixture(fixtures.MockPatch( 'nova.virt.libvirt.host.Host._init_events', side_effect=evloop)) class libvirtError(Exception): """This class was copied and slightly modified from `libvirt-python:libvirt-override.py`. Since a test environment will use the real `libvirt-python` version of `libvirtError` if it's installed and not this fake, we need to maintain strict compatibility with the original class, including `__init__` args and instance-attributes. To create a libvirtError instance you should: # Create an unsupported error exception exc = libvirtError('my message') exc.err = (libvirt.VIR_ERR_NO_SUPPORT,) self.err is a tuple of form: (error_code, error_domain, error_message, error_level, str1, str2, str3, int1, int2) Alternatively, you can use the `make_libvirtError` convenience function to allow you to specify these attributes in one shot. """ def __init__(self, defmsg, conn=None, dom=None, net=None, pool=None, vol=None): Exception.__init__(self, defmsg) self.err = None def get_error_code(self): if self.err is None: return None return self.err[0] def get_error_domain(self): if self.err is None: return None return self.err[1] def get_error_message(self): if self.err is None: return None return self.err[2] def get_error_level(self): if self.err is None: return None return self.err[3] def get_str1(self): if self.err is None: return None return self.err[4] def get_str2(self): if self.err is None: return None return self.err[5] def get_str3(self): if self.err is None: return None return self.err[6] def get_int1(self): if self.err is None: return None return self.err[7] def get_int2(self): if self.err is None: return None return self.err[8] class NodeDevice(object): def __init__(self, connection, xml=None): self._connection = connection self._xml = xml if xml is not None: self._parse_xml(xml) def _parse_xml(self, xml): tree = etree.fromstring(xml) root = tree.find('.') self._name = root.find('name').text self._parent = root.find('parent').text def attach(self): pass def detach(self): pass def reset(self): pass def XMLDesc(self, flags: int) -> str: return self._xml def parent(self) -> str: return self._parent def name(self) -> str: return self._name def listCaps(self) -> ty.List[str]: return [self.name().split('_')[0]] class Domain(object): def __init__(self, connection, xml, running=False, transient=False): self._connection = connection if running: connection._mark_running(self) self._state = running and VIR_DOMAIN_RUNNING or VIR_DOMAIN_SHUTOFF self._transient = transient self._def = self._parse_definition(xml) self._has_saved_state = False self._snapshots = {} self._id = self._connection._id_counter self._job_type = VIR_DOMAIN_JOB_UNBOUNDED def _parse_definition(self, xml): try: tree = etree.fromstring(xml) except etree.ParseError: raise make_libvirtError( libvirtError, "Invalid XML.", error_code=VIR_ERR_XML_DETAIL, error_domain=VIR_FROM_DOMAIN) definition = {} name = tree.find('./name') if name is not None: definition['name'] = name.text uuid_elem = tree.find('./uuid') if uuid_elem is not None: definition['uuid'] = uuid_elem.text else: definition['uuid'] = uuids.fake vcpu = tree.find('./vcpu') if vcpu is not None: definition['vcpu'] = _parse_vcpu_info(vcpu) memory = tree.find('./memory') if memory is not None: definition['memory'] = int(memory.text) os = {} os_type = tree.find('./os/type') if os_type is not None: os['type'] = os_type.text os['arch'] = os_type.get('arch', self._connection.host_info.arch) os_loader = tree.find('./os/loader') if os_loader is not None: os['loader_stateless'] = os_loader.get('stateless') os_kernel = tree.find('./os/kernel') if os_kernel is not None: os['kernel'] = os_kernel.text os_initrd = tree.find('./os/initrd') if os_initrd is not None: os['initrd'] = os_initrd.text os_cmdline = tree.find('./os/cmdline') if os_cmdline is not None: os['cmdline'] = os_cmdline.text os_boot = tree.find('./os/boot') if os_boot is not None: os['boot_dev'] = os_boot.get('dev') definition['os'] = os features = {} acpi = tree.find('./features/acpi') if acpi is not None: features['acpi'] = True definition['features'] = features cpu_pins = {} pins = tree.findall('./cputune/vcpupin') for pin in pins: cpu_pins[pin.get('vcpu')] = pin.get('cpuset') definition['cpu_pins'] = cpu_pins emulator_pin = tree.find('./cputune/emulatorpin') if emulator_pin is not None: definition['emulator_pin'] = emulator_pin.get('cpuset') memnodes = {} for node in tree.findall('./numatune/memnode'): memnodes[node.get('cellid')] = node.get('nodeset') definition['memnodes'] = memnodes devices = {} device_nodes = tree.find('./devices') if device_nodes is not None: disks_info = [] disks = device_nodes.findall('./disk') for disk in disks: disks_info += [_parse_disk_info(disk)] devices['disks'] = disks_info # Manage shares filesystems_info = [] filesystems = device_nodes.findall('./filesystem') for filesystem in filesystems: filesystems_info += [_parse_filesystem_info(filesystem)] devices['filesystem'] = filesystems_info nics_info = [] nics = device_nodes.findall('./interface') for nic in nics: nic_info = {} nic_info['type'] = nic.get('type') mac = nic.find('./mac') if mac is not None: nic_info['mac'] = mac.get('address') source = nic.find('./source') if source is not None: if nic_info['type'] == 'network': nic_info['source'] = source.get('network') elif nic_info['type'] == 'bridge': nic_info['source'] = source.get('bridge') elif nic_info['type'] == 'hostdev': # is for VF when vnic_type # is direct. Add sriov vf pci information in nic_info address = source.find('./address') pci_type = address.get('type') pci_domain = address.get('domain').replace('0x', '') pci_bus = address.get('bus').replace('0x', '') pci_slot = address.get('slot').replace('0x', '') pci_function = address.get('function').replace( '0x', '') pci_device = "%s_%s_%s_%s_%s" % (pci_type, pci_domain, pci_bus, pci_slot, pci_function) nic_info['source'] = pci_device elif nic_info['type'] == 'vdpa': nic_info['source'] = source.get('dev') nics_info += [nic_info] devices['nics'] = nics_info hostdev_info = [] hostdevs = device_nodes.findall('./hostdev') for hostdev in hostdevs: address = hostdev.find('./source/address') # NOTE(gibi): only handle mdevs as pci is complicated dev_type = hostdev.get('type') if dev_type == 'mdev': hostdev_info.append({ 'type': dev_type, 'model': hostdev.get('model'), 'address_uuid': address.get('uuid') }) if dev_type == 'pci': hostdev_info.append(_parse_hostdev_info(hostdev)) devices['hostdevs'] = hostdev_info vpmem_info = [] vpmems = device_nodes.findall('./memory') for vpmem in vpmems: model = vpmem.get('model') if model == 'nvdimm': source = vpmem.find('./source') target = vpmem.find('./target') path = source.find('./path').text alignsize = source.find('./alignsize').text size = target.find('./size').text node = target.find('./node').text vpmem_info.append({ 'path': path, 'size': size, 'alignsize': alignsize, 'node': node}) devices['vpmems'] = vpmem_info definition['devices'] = devices return definition def verify_hostdevs_interface_are_vfs(self): """Verify for interface type hostdev if the pci device is VF or not. """ error_message = ("Interface type hostdev is currently supported on " "SR-IOV Virtual Functions only") nics = self._def['devices'].get('nics', []) for nic in nics: if nic['type'] == 'hostdev': pci_device = nic['source'] pci_info_from_connection = self._connection.pci_info.devices[ pci_device] if 'phys_function' not in pci_info_from_connection.pci_device: raise make_libvirtError( libvirtError, error_message, error_code=VIR_ERR_CONFIG_UNSUPPORTED, error_domain=VIR_FROM_DOMAIN) def create(self): self.createWithFlags(0) def createWithFlags(self, flags): # FIXME: Not handling flags at the moment self.verify_hostdevs_interface_are_vfs() self._state = VIR_DOMAIN_RUNNING self._connection._mark_running(self) self._has_saved_state = False def isActive(self): return int(self._state == VIR_DOMAIN_RUNNING) def undefine(self): self._connection._undefine(self) def isPersistent(self): return True def undefineFlags(self, flags): self.undefine() if flags & VIR_DOMAIN_UNDEFINE_MANAGED_SAVE: if self.hasManagedSaveImage(0): self.managedSaveRemove() def destroy(self): self._state = VIR_DOMAIN_SHUTOFF self._connection._mark_not_running(self) def ID(self): return self._id def name(self): return self._def['name'] def UUIDString(self): return self._def['uuid'] def interfaceStats(self, device): return [10000242400, 1234, 0, 2, 213412343233, 34214234, 23, 3] def blockStats(self, device): return [2, 10000242400, 234, 2343424234, 34] def setTime(self, time=None, flags=0): pass def suspend(self): self._state = VIR_DOMAIN_PAUSED def shutdown(self): self._state = VIR_DOMAIN_SHUTDOWN self._connection._mark_not_running(self) def reset(self, flags): # FIXME: Not handling flags at the moment self._state = VIR_DOMAIN_RUNNING self._connection._mark_running(self) def info(self): return [self._state, int(self._def['memory']), int(self._def['memory']), self._def['vcpu'], 123456789] def migrateToURI3(self, dconnuri, params, flags): raise make_libvirtError( libvirtError, "Migration always fails for fake libvirt!", error_code=VIR_ERR_INTERNAL_ERROR, error_domain=VIR_FROM_QEMU) def migrateSetMaxDowntime(self, downtime): pass def attachDevice(self, xml): result = False if xml.startswith("' % self._def['os'].get('loader_stateless')) disks = '' for disk in self._def['devices']['disks']: if disk['type'] == 'file': source_attr = 'file' else: source_attr = 'dev' strformat = """ """ strformat += """
""" disks += strformat % dict(source_attr=source_attr, **disk) filesystems = '' for filesystem in self._def['devices']['filesystem']: filesystems += ''' ''' % dict(source_attr=source_attr, **filesystem) nics = '' for func, nic in enumerate(self._def['devices']['nics']): if func > 7: # this should never be raised but is just present to highlight # the limitations of the current fake when writing new tests. # if you see this raised when add a new test you will need # to extend this fake to use both functions and slots. # the pci function is limited to 3 bits or 0-7. raise RuntimeError( 'Test attempts to add more than 8 PCI devices. This is ' 'not supported by the fake libvirt implementation.') nic['func'] = func if nic['type'] in ('ethernet',): # this branch covers kernel ovs interfaces nics += '''
''' % nic elif nic['type'] in ('vdpa',): # this branch covers hardware offloaded ovs with vdpa nics += '''
''' % nic # this branch covers most interface types with a source # such as linux bridge interfaces. elif 'source' in nic: nics += '''
''' % nic else: # This branch covers the macvtap vnic-type. # This is incomplete as the source dev should be unique # and map to the VF netdev name but due to the mocking in # the fixture we hard code it. nics += '''
''' % nic hostdevs = '' for index, hostdev in enumerate(self._def['devices']['hostdevs']): if hostdev['type'] == 'mdev': hostdevs += '''
''' % hostdev # noqa if hostdev['type'] == 'pci': hostdevs += '''
'''.format(index=index, vslot=index + 5, **hostdev) # noqa vpmems = '' for vpmem in self._def['devices']['vpmems']: vpmems += ''' %(path)s %(alignsize)s %(size)s %(node)s ''' % vpmem cputune = '' for vcpu, cpuset in self._def['cpu_pins'].items(): cputune += '' % (int(vcpu), cpuset) emulatorpin = None if 'emulator_pin' in self._def: emulatorpin = ('' % self._def['emulator_pin']) if cputune or emulatorpin: cputune = '%s%s' % (emulatorpin, cputune) numatune = '' for cellid, nodeset in self._def['memnodes'].items(): numatune += '' % (int(cellid), nodeset) numatune += '' % ','.join( self._def['memnodes'].values()) if numatune: numatune = '%s' % numatune serial_console = '' if CONF.serial_console.enabled: serial_console = """ """ vcpuset = '' if self._def['vcpu'].get('cpuset'): vcpuset = ' cpuset="' + self._def['vcpu']['cpuset'] + '"' return ''' %(name)s %(uuid)s %(memory)s %(memory)s %(vcpu)s hvm %(loader)s destroy restart restart %(cputune)s %(numatune)s /usr/bin/kvm %(disks)s %(filesystems)s
%(nics)s %(serial_console)s

 4 >UhzQ9 #AA#ZA!J`f3FI' BHN MozӋ[Hİ] 2b\B qD `2biVl+qېeNI e\0q:I lӱ`gB_@ 81!@N~P@CBfxwr|5Q5Vabe9iEovhwD_CrA) @"dlSaAiW.Fsb?FyOa`?P p ? 'l)݆9t$1d$G|,6J]w : ~gBt*<wxUGD" # h <^T$(YY#IUFY,b@F"H$+@' P} B &( u` ?U U(( Fu#PtSNA@tTLb!d ^% j#"MG`Make Bh cj groundQ` E;vl nt{!pb~- J%9-H%W#??$ZB##0U2#sF =$&#M4"6"6 ""\#2qƢ0??6#F\.?QE?@)#A!'`?C"yz ix h (t )n 20W@9n MC@cz"s| fIB:Ar vAa i| n.n Al@ AHsJBei@ez !d@ 1`VC@s_u0RXY@cPm! 06JW@2@3!&H/<e#ha!#3 H%Y #H Z%|T[?%|TWgda_C +T#!!"H!#8d3 (cQA6@0EHa?%Tk,@doEd 3F3TA`f;nd 3` D| N| tsB!aC@nEG!#^E %b{Gd?"6!3lr'0tv$@b#"6=s_H8> r =T"?KF& #9 RTB H ]^aд  @+x La7TPHa$ c(G,}UFD  h(^TYYBBUF\.ߗ?x<F BP(?hP?X` ]B66]  TTTTTdH?ܸ? Q?B@L&d2?-(,1(.sUW!U&(w/x  0KB` Serv",n two k p"i h"al d v0c@ 1^| (SG|& b?,>?TTwpp pqqppqwqpqwqwq+p pppwwwznw6;5wqpwwwhghw>Drag onut hepe.Rih-c]lckaUdcos UPo etQeAvUwreP 5aabjZֿ~??+ ؿ)j??E(i\.r3r\.CUG DF P# h @T(PYY#E3UF\.?@jZ?F~߻?P} ΍L .B":J :^:rY:f 7 7ָAu` ?Uč"U66JJU^^rr)u#",'tU"J%q4??4bp0{[`: #|2&6#*'Q79" z^L1$" 7'@>>ÿ@q@@5EӅ??D5F0Zr&rBxC2u0`-1Buu&@ CAB@C$9u`u `m"[ b!wu`)3kQQ1Q71`Vis_PRXY%cPm!#5jP4P2B`?CopWyrPgPtf0u(P)f020PU9f0MPcPoP'ofRQrPQUaPiPn% f0WAl` XsbUePePv/`d%SRQ1#d:4%!*@8=3R6#@d\6\6 X2$B#30Ub\3]zg,7^-q>@VpcT<`of0m`sP4a>mmdasIJ'BT1/t"0t.2QQ3QQB3 tj1j1"5hbI6% 72EC?|8LPZX116Q;sa>arExAsF@>>ý__0Ru'@`*Xr>a cc^`Roo H?䣬!e'2$r d bpf`bffb bpf"}Be%Rbk ``.e;Rkbrđf@B #[_z}R=3đđBޅrr++r,$t -tH;:@0a(IafcX(Pq`uK`AP!eqPrqbBq b%F@rBq8%5=3_tD&&|rf"e#F3s͵NSYr'X$G,@R!B衅W_i_.] Q QBHʀ=34I'F}@` %_&͒#J0 Q`wNElWODpKDH0PِRTߐRlIِSqIQiOO|1<3;K'0U?}10~U3Uee8sTeq rqqq13@H36EBM$BM4BeMLBMB3M̅BM-uBG2HD: # h0>Th]]9 MP /AU@jZ?F͂T??FSFظ?P6 .]A]   JuM` W? EKu4bJt1 ytuo[=Jb(A =>JU2zGzt?@dM#߀A@bM;#z1 /#\I(bW)d+)d&J["*(w"?& ?AO$Tf/%B115 `?CopyrigTt(c)2`09MC0c.A0os;0fI2:1r=0v1aI0i;0n. Al0 A8s2ei0ejA0v0d0 1Y4-X 3  7&C0b59 FX7}'0UB)Ź&!$aI 7FKJlJ]Uhz211O! !u6?Fh,\.>*0II=*t >J:@T&ȑ/ [WpY\QEUzU . X[:;QRuSU5UF,®\oZOnlQ 8MP@u?FR >Nk"\T`GbmMlQi_f0%=mUt.m {=4+ ߥP> D"? 5-g?T0tp@bTh]]9 MP qAU@;?F5K?@xEL?6?F6?Pn6 L>M  P$  #uM` ?=`cc.@uRJtO  bm&QȽtA@Lؽl]WI@[>JU2zGzt?+PS@M/#J?&A@bbbPzs q#b](b)+&J[9#,ɺ ("a?& ?AR$ /%B9#V1V15 `?CopyrigTt (c)02`090M0c0os}0f2|1r01a0i}0n.0 Al0 8s2e0e0v0d0'"O1YS40#-9#b3 b7&0%0AbE9 YFX+G'0UB&!4a yFJlJ]Uhzt1h.A!-(:9#t.%-+0JN H߃W%$Jq-&?F mO" >Nk'"\RSWNh[H@_[4I 뇥P> 'CK=o? nO-?N]`@ b<couor5%BUV 6?woZA^ǿXEU@ T6?F __ jv6dDlVߦyZgPRkro}oiBdalN!\z3>6>CRHS X?BnDeTGvN7VHD: # h0>Th]]9 MP JU@jpiH?F[^?@$?F]Fڻ?P6 >JuM` ?j uJt  (a&aEt9S;-FEN=ۯ=Eh?u;>2zGzt?@M|JA@a7b#z}b(b!).+.&J[-m(?& I?A$0/Q%B!!5 c`?CopyrigTt (c)02`090M 0c 0oKs0f21r0@1a0i0n.0 Al[0 8s_2e30e 0vq0dS0!(Y$-# '& 0b59 6uX7_'0UJBŃ&J!$a FJlJ]Uhz!1!劵:F% Y.  8?zY^J4 ]9$]$R9S/RJUE UH$S!\1\? GX5ZU_YQJU __-_?\HD H# Uh4>T#A]]9 M /AU@jZ?FL|+??F@{~?P6 m. >b  JuM` ?+ILKu8J1t5 }ty_7??J]bA>tJU2]U"?=@MJ&A@3(WbA)N+N&JJ[ "#?& ?A9/Z) !!5 `?CopyrigTt(c])20A09uM-0c+0os%0If32$1r'0`1ai%0n. WAl{0 +8s2UeS0e+0v0ds0!E$5 򤲝-? 3  7&6iieE9 6X7 "'0#Ŵ&!$0 TIF]JlJ8>U#9AA `1!:Nk"\T`ObmM\"f4N[7 zex-K=FAa տ[jr D"? WZvD?4xa@RB#Th]]9 M IAU@FK?F\gVb?FY%?F_wᳬ?PJwW?>$[ >  JuM` ? ;  )u*Jt'  ^Zmta{֙3.JvdE @x?"_r?v{4܂ >pJU43f<!.&P~?b3bbfz@bAe&bq$Jd 2zGzt&.#@M#T"I&r(~&i$q)Q+}'i"~*B#81815 `?Co yrigTt (c)o02`09o0Mg0ce0os_0fm2^1r 1am0i_0n.o0 Al0 e8s2e0ee0v0d011Y54#-#D3 D7.&%g0Ab59 ;FX G[G'0URB.&!I$a [FoJl&}>UhE3MV1Ai!(# jV? & `& T` V_RUHPD  # #>h0TpHeeJ EU@u6?F5c{g&?@{r`5B?FF:Iͷ?mj>  M   *  (Y JuM` ? Y ":N`u#xJtu IoI%t"' % 'ii%$'`OkŁJU2"?@Ms#J&A@"b(bU)++&RJp}#>1#19 ?#bbbz| &/5> }#11"`?Co0yrig\t (c)02h090M0c0o%s0f21r0Aua0i0n.0_ Al!@ 8Us%Be0e0v7@ d@k"1Ue4t#B-}#3 746t%0 jU FXyGG'0UR46!O4i FJlJ$jUp5 3 h19 1!q(q4}#&J,t R j# 8bD?@@)?FvP؞?PEkU O|\QnS .܁nts}ped`,of :o ,M ? &d2? bȩ?tP@RBBoogr"55ZQUf_x____bY@ßAc_\jF?4ևi$on&h{c8oJo\opoo T)Y oaoooo o 6$|U7I[mS Uy{䷰7h7}'g"(I%ZvShX"cwӏiďa$xUGD  3 h0TdYYB U@%)?Fxg^?@ ߯H?Fw_7Uw?P}   f  f0 D fX lu` ?00DDXXll)ut  871%t%!?"Ư)1%:'ֿTeΦ1%T'ʤU!!O"`?CopyrigPt (c) 2\09 M c oKs f"!r 1a i n. Al00 (s42e0e vF0d(0"#S$#B-#l# '?R=#ǒ 2$Z##0U'B3@& ^5 DFX7dG'R&Dŭ6!4] dFxJeYk} #HFQ%RT][b5RT0][3RTD][5RTX][RTl]WHD:  H h4>T]]9 MAU@ ߯H?Fԋz)e??F9q?P6  >IM (#<Pd`<#P#dJuM` ? #2FZnubJt E-tA!["XѮJV&bN$p'N]I֤>J)U5 J[-# ?6 ??" A@.2Sb64B#2zGzt3@M#Jc678C6.? ;B72C: #115 k"`?CopyrigTt^ (c])^ 20@@9^ uM,@c*@os$@If2B#Ar&@_Aa2@i$@n.^ WAlz@ *Hs~BUeR@e*@v@dr@"14ʺ#-/ C  G6&iieP59 &XG W'0UiRŴ6!#40 $ V4ZlJ]Uhz@A!1(`C@U&pz?FDUy{?,d@#b`sb`"J-d K-q?F@F?F F0 ?Nk"?GiE*]CpdadC0[dhZe@#eGDQ> @" i$a3n&Y+`~rrood]io;e s"yr1C82K?Fѻ4G{?F-qx>tFjR^{|ZlYZe tmP|{Mޥa 0 1iS? q1+?wa@rc(<;"̏A!rFÀ5H5aE=*[^N?F%P?@"P&@ c?F{MTh]]9 M]AU@}%?F[$,?@+#V?Fd_3֡w?P6 B>M @ $ JuM` ^?3YY.KuHJtE  L<ܩt.'_3zMҩ,wsQ>JU2N贁N[?@M#J+&Aw@b](]i+bk)+)&Jp%#4"?>"<5!?& ?M#bbbz$ Rp&"/%B%#L1L15 `?Co yrigTt (c)02`090M{0cy0oss0f2r1r 1a0is0n.0 Al0 y8s2e0ey0v0d0"E1YI4#b-%#X3 X7A&%{0bP59 OFX!GoG'0UBŔ&!$a oFJlJ(>UhoI j1h$Ac!(`%#@'$F?>D{Q>L!gfQQ R "Jg V?Fo-R?@bHQ??FT!R?P& ߤlH\f6V\P5_E+UDv;*UmʙHQ:? w8s`*%$? .l?Q? ?\?D4_!R oogr5%Q3p_S@hIL?F~Kz?P)Oy޾ F @c"rA b$^sPj+u`dl(ET+QdAf u8<$HD:  H h4>T]]9 MIAU@ ܋?FOa>?@q`5B?F1*ݷ?P6 m8> (JuM` ?#SS2)uBJt?  \Eto֙>_B|KR>JU5 Jp'>͙#<?Z& ?bbbz@b#;A&b$B 2N/N[Z$@MI #"&(&D$ :;'"*#d1d15 `?Co; yrigTt (c)02U0090M0c0os0f21r; 1a0i0n}.0 Al0U 8s2e0e05v0d0"]1a4 #-,/p3 p7Z&'&iieU59 '&X9GG'K0UBZ&!u$K0 FJlJAUhhG1!- (T9C&J> RbT$R}R( "J:]Uy{䷧_8S s_@ o4%BT@.Tw?F:^֝ä >Nk"?YUU!f>)?\(K8ӷjmKl? a\$QU5Ui,SlQ%iu~!8hE_= T?F;uQuo`"rA 4sjQ޽lğ=onHD:  H h4>T]]9 MIAU@ ܋?F<j?@q`5B?F^ݷ?P6 m8>M (JuM` ?#SS2)uBJt?  \Et[Πؙ>_B?&}>J)>JU5 J#2N迴Nk?S@M #JC&AP bu(b)J++&B1[- #L#?&| ?$H*'#O1O15 `?CopyrigTt (c)020090M~0c|0osv0f2u1rx01a0iv0n.0 Al0 |8s2e0e|0v0d0"H1L4 #-/[3 %[7&'&iie@59 '&X|$GrG'0UBi&!40 rFJlJAUhh7m1!{! (Z$CR&FUy{?X> 81cKR RV "J{T@ ![?F=$. >Nk_"?YPSW>OcK8RKl? ף_p= $B:]'i,S\6aYss9$Q@5UV+6o'lQ:ifOdD`i[Dž}soąo82{_U{"rA z$s"HD:  H h4>T]]9 M!AU@ ܋?F@_v?@q`5B?F1*ݷ?P6 m$> JuM` ??Su.Jt+  \Epteap'Eqz>Bq|7>5 Jp>G͙;?2& ?bbbz@bAi&s&Bh 2zGztM?+"@MrX"&v(bu)+s/8*T<1<15 `?Co yrigTt (c)s02009s0Mk0ci0oKsc0fq2b1r 1aq0ic0n.s0 Al0 i8s2e0ei0v0d051P94-H3 H72&iie-59 XG_G'0UB2&M$0 _FsJlJ( >Uh_I `Z1m!afR* zRQd J:`]Uy{w_$S s@ +o4B`T@.Tw?F:^֝ä >Nk?"?zYQYV*)\§(7$d?ӷjm7lb+d ha\HD:  H h8>T ]]9 M!AU@ ܋?F*?@q`5Bǯ?F1?P6 $6>  JuzM`l? C"u2Jt/  \Etti|u~>Bu뒣;>RJU5 Jp>͙#<?6& ?bbbz@b#Am&by$Bl 2zGzt6#I@M\"&Rz(&q$y)+'q"*h,#:3o1o15 `?Co yrigTtk(c)k2009kM0c0os0f21r 1a0i0n.k Al0 8s2e0e0v@d0h1l4e-:?{3 {7I6&&i bP159 &XDGG'0UBX6&Q$0 FRJlJ(>UhI 1lq!a4DC9Nk"?CYQYf.Q;(hğ=;Af/h (\UHLuD" # ;3h( TEJ kU@jZ?F@{W~?P VN- I A `*%#.>>uA` ?%i i4iHu#XpTb>tU b7>t  a>U2zGz]?@9 %#>5&A@g(bu)+&X" !f$>F/#"&#="& "m"bu);r'X"i,X"/N/#11-#`?CopyrwigXt dwc)020090M0c0o%s0f21r01ua0i0n.0_ Al0 8UsBe0e0v@d0"i f &%!X/#]'0UB&Z5%0 VJl>8fUlY(C '(#vAm!#(:/#_Cgwu?F( mO"\>sl?/h%mq.%-+?@lփ?F^Uw>o|m7yQySf|wA=$*<~a3d@9SӀaۏ펃y'e0a#FX_`H> V+s LyC{:,YFH1r#δ u2&OB C 3~dд 4@+W QsGCflf>ߣBHF(I_M8;P?PzTW?\`Xc$(hg.]jAhn?mtUFD  h(^TYYBBUF\.ߗ?x<F BP(?hP?X` ]!B66U 66 66%66ȅH??f]B@L&d2?-(1(.sUW!U&w/ҍ~  0QB`#Mainfr me, etwo k p rr pP1al d v c %1^| (SUG|& p??(:L ?qpwqqq wq wwpwq  qqwwzEwwtqwqw~wzqwqwp~*?Drag onut hepe.Rih-c]lckaUdcos UPo etQeAvUwreP 5aabL&d2ٿ\.??{ڿaAv??"jr3r\.CUG DF P# h>@/T(6D # #UF\.?@L&d2W?@w?P} <.4! D:J D:^:r,: A7f77f7 7!7&"l70&u`?( "66JJ^^rrTU/&0&0&D"u#N) ^2<7qU ?4b@{[`:#B'FT3i"'RQ92 z1L>ÿ@q@E?BDF,@rRSBu@`҈-g d(ag;_3@JAJA2hhXB)3*ÄR+Є.bq q2&&:r816a,s2+66u\L %8WQA$A[(BBSP@RXESTO$@,r23rUrD.e53_4&F3s͵N?SYFer=@:u]Qv,@F=a`up]XE628ABTOfD"J-$RRO(R[)VO`Rpasd`.e Tpx``B2+ 0Ls'_"x#0!UJ"b_ 簋 B)U2brJC\Rr˨83vvF_<U"D"=m2Q:2Q]q2XBƅ3/nR0{.b[a[a22:raa  A AE,,f7Ԣ18Q`#A#AQ; ^rprqqMMr1"8J6=a~Ak ]\SP@` SPa`e`lBsa_` E0ud`px`e`JXEas` T``U\cam`uF3dr7` SbM`i`fbUD7SLgn*f`c`ub/.eQXJAPbd. Nm@Je@=O! P`rx7x.[b`m DraeNiu9KoAe1S`u^8 Sri`a /J T//A/^`L`ch@{/Ҡ///Bl pQ/ @?#?<R`#Q?W??*oNwbkl"b`KҸH?[Ij`ad ps*OT]]9 M JU@z^?@]z?@ZVjźP6 ܠAJuM` ?juJt  3\It=W(\I*RIl#>2zGz?@M|JA@e7b #zb(b%)2+2&J[-m(?& I?A$4/U%B!!5 g`?CopyrigTt (c)020%090M0c0oKs 0f21r 0D1a0i 0n.0 Al_0 8sc2e70e0vu0dW0!$-X# '&?6iie59 6X7/'0UvBŇ&-!$0 -FAJlJAn h71Z1!TlL2 $<[V  ORvVJ:%'d2ɩ PY]PeS YY˱QEaZ_`YQU5U=Vr\. l\QY?))bUb526_;"rA #cHD: # h4>T]]9 M JU@d2L&?@P( ??@>?P6 e]A#uM` W?SuJt  RQ͸It=W\_(\IRIlƒ_',#>[,ɺ (? ?Ab A@"Sb$J2zGz?+P@MJ=&#fbbbPz (bE)&,"*UB!!5 g`?CopyrigTt (c)020%090M0c.0os 0f21r 0D1a0i 0n.0 Al_0 8sc2e70ej0vu0dW0@!$b-# 'Y?6iie59 6X7'0UvBŴ!0 D-FAJlJAh71h1!Z@bgX,  <)gV#\ M OR|VJ a9PY]PPReS[RUE:]r\.M\Q`YO Q5U=VN_YQĖU526_HU"rA #cHD: # h4>T]]9 M JU@z^?@<0 ?@bX,?@_~w?P6 >JuM` ?uJt  ףp= It=Wk t@IR)\_(IlK&D#>[-m(? ?AbA@"b$J2zGz?@M»J=&#fb{#z (bE)&,"*UB!!5 g`?CopyrigTt (c)020%090M0c.0os 0f21r 0D1a0i 0n.0 Al_0 8sc2e70ej0vu0dW0@!$b-# 'Y?6iie59 6X7'0UvBŴ!0 D-FAJlJAh71h1!TL&1 $<[V  ORvVJ:?`0 PY]PeS a+QEaZ_`YQU5U~=Vr\.m\QY%NX526_;"]rA #cHD: # h4>T]]9 M JU@RT*J?@r\.?@zP6  >#uM` ?juJt  ףp= It=W\(\IRQIl#>[,ɺ (a? ?Ab Aw@"b$J2zGz?+P@M»J=&#fbbbPz (b)&,"*B !!5 g`?CopyrigTt (wc)020%090M0c0o%s 0f21r 0D1ua0i 0n.0_ Al_0 8Usc2e70e0vu0 dW0!H$-,# 'K?6i@ie59 6X7'0UvB!0 -FAJlJAh711-!Z@3F  <F9Kt#  OR|VJa9PY]PPReS[RUE:]M\Q`YLQ5U=V N_YQU526_HUv"rA # cHD: # h4>T]]9 M JU@L&d2?@|??@t:?P6  >JuM` W?SuJt At=Wi_6#J]RbJlGX%#R>[*(? ?AbA;@"b $J2zGz?@dM#7& #fbu#z$ ( (&,"*BB!!5 g`?CopyrigT}tZ(c)ZW2009ZM 0]c 0os0f2R1r0>1a0i0n.Z AUlY0 8s]2e10e 0vo0dQ0!$ę-# '?6iie59 6X7}'0UpBi!0 'F;JlJF!( ]A!1-!Z@ B <yO#,4#BQ YR VJF!L&?@bX,_S RQ#jYiFQEU,b?@%\\(\\aX5U@?@ՔJR\p=o ף\kQX5Ufl6?@3oF\H8!\4rOiX:=?F&[f3r|UnTeRU ugZY8oJi QyZV'j[3rJa=yUU2@_RU"rA # HD: H ih(>T9 M3AU@L&d2?@\.?JP6 t >%M  &:"S:JuM` ?S0DVhurlnb#4"Jto ob'#t3&>JU#g"X#a?~& ?AJbAW@"+7 #J!%(,Ja#2zGz~"@MW#"6/%b)_;_6" /*Ba#115 2`?CopyrwigTt `wc)020090M0c0o%s0f21r0Aua0i0n.0_ Al/@ 8Us3Be@e0vE@d'@O"ib =X%`!Xa#Y'0UB~&%0 JlJ UhMQQ A75A!U(!1L&?@bX,Ơ h # RQ{h# >_,S{J,b?@xz^?@1o=?@h?PQ ̿ A!Y\(\\tگ@ UnĿvUohú{:i dk6!d P Ln4!lBoogr5%BUfl6ˣ_nH1_Xq~?4F6H/ ݴpX =?!6@,9?@HW8?P`ڒ੿ ) !oqf%$$UjGil!j( gtoomddvg0|RQPbرɠ )8!Oy;DcwVQ]z,Nz(6abxZQ~FZVjp[3’L%q^QFvc~b-Tt$bxbQ蟯 ' OBNz)6bXfUU^@ss9 ?@u?P1r _RUj2ilpfg.ɏmbd,>gxHD: # h0>Th]]9 MP IAU@Dxb?@F+?@`Wڔ?@u5F&?Pps-8R?>$> n JuM{` ?e ;  Su*Jt'  ʼnhmta{|_3.Jv9?\̧q?v-F$>JU2N'N[?@M#J&A@bI(bW)U+RU+U&J[# (<!J m?%P?AR^$(u/%B##1#15 `?CopyrigTt c)Z02`09Z0MR0cP0osJ0fX2I1rL01aX0iJ0n.Z0 l0 P8s2ex0ejP0v0d01Y 4#-X#/3 /7#(R0b59 &FX7FG_'2WUAFJ!$a FFD$&#l,&}>UhEMA11-O!(#H UV & `&d T` V_RHD: # h4>T]]9 M JU@^z?@ `?@<?@>_w?P6 >#uM` ?uJt  QIt=WwIRGzIlƒO_,#>*5 g`?CopyrigTt (c) 2U0 9 Mcosf"r-!a in}. AlH U sL"e e5v^ d@ PB[,ɺ ("a?& ?Ab Aw@"b$- &2?+P@MJ\6#fbbbPz (bE)&,"*%?@6iie 59 @6X7'K0UvBŽ&!$K0 -FAJlJ A>Uh1MDH]A U H!1|4#hLjA*lX!効:@MJR Y <RyJk1LbX,?@`0  S qq#Y`?[G=`a]~mΓAdžeЕQQ0F0ZVCXQ`@iqm2.uQQ=0r"H$YZ~X#Q`kwU{qm)F&uQ'QaZS0W+Q6`V BPXUmt~QR٘6p?@B |"Y(aaX$_کȑYr%e"U1/AoJ\` V"X;Q}rܢZ;o6hׯA\a ?,H%5eQCQ.|JZ'Ja6h1Aupm +p;eQ3@ Z,6hOQ'AE;Y}R8sRYh~Df7gWQAhVǫjfjchiL&E7gjE3ɏd2L14~.yl߶7ggQA?p8;M7 AİeXkQ9QUoQA4F G`XsUWiZñeHD: # h4>T]]9 M JU@JRT*?@+@ ?@ `0?@ ?P6 >n#uM` ?uJt  zGIt=W贁NIRQjIl{^z#R>[,ɺ (? ?Ab A;@"b$UJ=!=!5 g`?CopyrigTt (c)t 20 9t Ml c.j osd fr"c!rf !ar id n.t Al j(s"e ejj v d  6!E:$-I# I'L2]z?+OP@MJ\6#fbbbPz\ (b)&T,"*?@6iie 59 @6X7'0UvBŴ!0 D-FAJlJAhK7[!h1!Z@߆gp8  <JmQ#  OR|VJa9PY]PPReS[RUE:]6BGP(M\Q`YmRbU 5U=VN_YQUM526_HU"]rA #cHD: # h4>T]]9 M JU@jZV?@@?@,b?@&_d2?P(-DTw! @>>JuM` ?uJt  QIst=WR\(\I-lU#>[-m(w?ZP?AbA@"b$J2zG/z?HJ=&#fb{#z (b)&,"*B!!5 g`?CopyrigTt (c)0W20%090M0]c0os 0f2R1r 0D1a0i 0n.0 AUl_0 8sc2e70e0vu0dW0!$ę-# '?6iie59 6X7='0Z!0 -FRAJlJ(>Uh-I 1l!`a9 $< J:@t: HYdQ]S lQE-UV_XYTQHD: # h0>Th]]9 MP 5AU@D 1!?@&?@`Wڔ?@ 6F&?Pps-8R?>$ > n JuMy`?e ;  Su*Jt'  :mta{$_#pYmvK@{3.Jv׊ߖ.?k-I?>JU2N贁N[?@MJ&A@b5(bUC)A+A+A&J[? (<!6 ?%MP?AJ$(a/%B115 `?CopyrigTt] c)F02`W09F0M>0c<0os60fD251r80q1aD0i60nU.F0 l0 <8Us2ed0e<0v0 d01Y 4-3 7 #>0b59 FX72G'2KWU|A F!$a 2F#l(&}>UhE M-11;! AVI & `!&P }T` V_RHD: # h4>T]]9 M JU@&d2?@<0 ?@|>?@_~w?P6 !>#uM` ?uJt  )\(It=Wk t@IRף_p= ׳IlK&D#>[, (? ?Ab A@"b$J2zGz?+P)@MJ=&#fbbbPz (b)&,"*B!!5 g`?CopyrigTt (c)0W20%090M0]c0os 0f2R1r 0D1a0i 0n.0 AUl_0 8sc2e70e0vu0dW0!$ę-# '?6iie59 6X7}'0UvBa!0I -FAJlJ]Uhz11!Z@`0 f <:a+ѐ#B SRԀVJ:BP(?L&YS RJ)#hT_RUEtL!ZrMP  _QdYP^CyQ5Ul6aTYa\8{?h5a=ZR_YyaĚU2:_LU"rA #sHD H# Uh4>T#A]]9 M AU@L&d2?@\.?tP6 j">  ( <$U<mJuM` /?U2FXjutJtq t ,J&b2$H &C">[JU7w$?& ?A5"bA@R"&Jtm#2U"?s"@Mc#J&(IC (/58*m#p1p15 "`?Copy_rigTt (c) 2U009 M0c0os0f21rԙ01a i0n}. Al0U 8s2e0e05v@d0["i1Em4d#(m#5 -EO|3K |7&WFi@ieE9 WFXmGs"'K0#Ŋ&!$K0 FJlJ U#%]VQVQ ]9 1HA!a(Zm#]/z JpC"j(\g}QQ R"fd"J:TZV՛jjS 3sbd dW&@aa5Le@%c~\ja}_,‘up2u?pf}Lb .lukut PF Lpa @cBr5%3q?u,b?@xz^?@oo=?@?PqQ ̿ us&r\ϑ|t@ ħ|v˽|oohwà0B  "K2[@a!fl6NwekH?zGёtm !+s KwYZHCEl#Fsy9 uwOB Ȇ -v~d,Ѵ o@+XW oLsG_qofH $@ ؇dxzRP+=Oa7?;//%/7/ȍ9WՁo8?,k"|ShAl%UFD  h ^TYY>UFD"H$'@F @Fx,/T6D # 9UFD"H$@]F@&u@P} jihf,l>u` ?V,@y^u#hQtSfttRt'A@y3dbr(+#tTB/(u/!/ <"/S.pd#@i#5J~?/"._`Make Bx0cz0groundT#X3Ҵ14;1`VIS_PRXYn0CHM!_#60080"`?C0py0i0ht~0(0)~02009~0M@cJ2s0fB Ar@GAa@i0nn0 ~0Alb@ HsfBe:@e0v|0d$n2"e (^C5@ #5 X3hHA%DK5D,K3D@Gg$aq_DaS T@H11"T2-T3[T ZTET@(APa%1 C1o1Do1y.bg B2Q5SiC@nU3bk,qoUcj?bhKrQi? ^ebo #!} baRe#d,UEohbcD&'C1(cQ(QR=S1fZi#3 D0N0UtDBn@a@nEEg_09YBaPPo1o1"(S11iee52>bXc1`I!]&` H@d|2&F0q}A0q` SPowُbb+e"vM- ~-!bb bL~f]Dfah 7Qbdҏ䅕HD: # ;h4>T]]9 # ,AUFD"H$@FZVj@&@@P -u `u bu  _@"f)l A)0t#` bFNt?p [eܠAuv` ?5urJϞ>Xq& $##./@%W(l"r/ tvel 46<R?$&(Q/$BU2q$0?#?56?\.?Ef`?F 7`BackgroundC05l0rz)z5=t3I1 3&?h<:JA#3+0UQB3_ 3117;l`?1py0i0ht (0)@20@9@M@c2s0fB1r@1a@i0n.@ KA0l@Gs RUe@e0vPd@0l>#$!Uh#3 A A\TYJ*3BU_1HD: ##=h0>Th]]9 3 UF>T/?FD"H$@Fn- @FN?t?P>u` o? u3`>$t  `"?A@>D$_~Ut W"p [VJ5 .Lc0U?AGG}YL?b` LineColA r&zY!"'&ɆX#mddd#nFFF"?&?#!"'&&` ]STadA w?/#/D+$1# F9 lC '2I&aWY*1` Ac= nt/#Ybdy3zg9*#!$)$|I!8<e Q3#8$f$3$FP3(B#GB117;1#?!pyG i}gTt (u)@2`09@uM9 cG osA IfB!r@F!aV0iA n.@ UA2 HsBe@eG v= d@0 l>]UhzЁA3!!`J !V)XU__Y3^_oXgU5^Bo_foU@__oojEb59 Mb{7/v!3rav44K7 %iPCH6Hg7w4'~vv !"608e @Q:(F|@v+?=!$Yz,Z#b~~-T{ HD: ##=h0>Th]]9 3 #UFH$$@FFJx-C-?F BP(ԭ??Pn>u` ?6u3`b>"6(t  2U0*?A@BHV-kYt ["p: [ZՄJU5 CLc0U"?AG}]LC` Line_ColI rz])"/&Cmddd#nFFF"?0&?#)$1$)"/&&` STadI wG/#/L+$9# %FA lK 72Q&a]A1` AcE nt?#]bd3zՃ~92#!c'  3#e8((3F7BGk+B$JA3117;9#?1pyO igTtW ()@2`W09@MA cO osI fB1rԗ@N!am0iI n].@ A22 HUsBe@eO vE d@0l>]U@hzA4K ?BEOJ !9"XU__Y3^o(o{gU5^eo_oU_oo$3ojEb5(9 M{7/6v!3ra4v54ve}%i6FWig74 '$~vx 1)"&8ea:@e@ @("4Q:/=@3 9)']zE,Z#(T(-ӁTjQc@0@HD ##=h4>TB]]9 3UF.L@FD"H$@F+@FNt?P>u` o? u3`>"(t L F%u?A@BHڬ\mY(t["p[ZJ5 .LT0U"?A?G}FCb` LineColE rzR˵#nwFFFw#?x&?$#u+&&` STadE wC/#/H+$5# F= lDG '2M&%,`h9!!Btn! 3#.> (3672M%11Ԓ7;5#?!pyK igTt (c)'@W203@9'@M= ]cK osE f%BR!r@J!a%@iE Un0 '@A2 HUsqBeE@eK vA Id2l>0>UhhCA3%!bJ@&)C6XU!_A_SY3g^__WWU@5^_3__WU@E_y_9o_bT= tG e j5ie@5(9 M{x7/69v!3LrA,Tv5!$;7laiooXx7w'2#x~ov]x vw_H<> Eל ߪJAC BFȓw#C 74B >lѴ k]U@ekmo+_ o]BaGy]#PH/*<N`rXfUFD  h ^TYYHUFD"H$'@F @FxT]]9 #Q ,AUFD"H$@F BߡP(?&@@P-DT! -u `u bu  @")lt A)0t#` bԚp [WenAu` ?urJ &@Fi#$!Uh#3 A Q3 "$BA 3ŊJ*3&"B%e>o1UGDF P# h>,/T6D # 9UFD"H$@]F@&u@P} jih,D >u`} ?VU,@y^u#hQtSft{tRt'A@3<db(+#tTDB/(u/!/<"/S.d#@4#5~%?/"._`Make Bx0cz0gro_und*#X314;1`VIS_PRXYn0CHM!#60082݉"`?C0puy0i0ht~0(0)~0200U9~0M@c2s0IfB Ar@GAa@i0nn0 ~0AUlb@ HsfBe:@e0v|0dn2"e (^C5 (#5 X3hHA%DK5D,K3DGg$aq_aS T11"IT2-T3[T ZTETɴ(Ta%1 C18o1Hy.bgBE 2Q5Si!@nU3lk,{oUZmj?bhuKrQi? ^eb #! >baeo1,UEohlc&'C1(cQPQR=S1fi#3 D0N0tDBn@a@nEEg_09YBaPo1o1"(S11see52>Xc1`I!&` H@d|2&F0}A0q` SPo]wbb+e"vU`6ȅ- ~-!b)bbVf]Dfah 7Qld܏HD: ##=h 0>T@h  9 T3UF}ð?FD"H$@FYjB @F l?uP>u` ? u3`>$t  u?A@>D9ȯvUQt W"up [VJJ5 Lc0U?AG}YL?b` Line_ColA rzY!"'&##nFFF"?&S?#!"'&&w` STadA Iw?/#/D+$1#K F9 lC '2I+'&!$)$|!$L8X< (3#.$3$3632M1P17;1#?!wpyG igT_t ()<@]2`09<@M9 ]cG osA f:BR!r.@F!a:@iA Un0 <@A2 2HUsBeZ@eG v= Id2l>]Uhz#A3!!J !,VKKX%U6_V_hY3|^__WlU5%^_H_olU%__No_j5b59 M{7/'v!3:raBv5X4;s7B %i5oog74'f~]vKx Nv!"I68e ^@Q:(F|O@f+~?!$\Yz,ZB#b ~-~T{ HD: ##=h 0>T@h  9 T3#UFG1"@FF$I @F BP(?V?P>u`} ?u3`b[>"(t  c]K?A@BHuV5Yt T["p [ZjJU5 CKLc0U"?AGQ}]LC` LineColI rzI])"/&Emddd#nFwFF"?&?#)$1$)"/&&` ]STadI wG/#/L+$9# FRA lK 72Q&eoa]R4` AcE nt83Q%#]b[d3z92#M!c'  Q3#8(f(3F7BGkBBLA3B117;9#?1pyO i}gTt (u)@2`09@uMA cO osI IfB1r@N!am0iI n.@ UA22 HsBe@eO vE d@0 l>]UhzA4 ABGOJ!9"XU__Y3^o*o}gU5^go_oU@_oo5ojEb59 Mb{7/v!3rav44vg}%iP8FYkg7w4'$~vv 3)"&P8ea@e@ @$6Q:>@e3 9)']z,Z#(T(-\ՁTQc@ 0@HD: ##=h4>T]]9 @3 UF=U~@FD"H$@F[M l_?uP>u` ? u3`>"(t  {?A@BH-ϝY(Qt ["up [ZJJ5 Li0U"?AG}FLCb` Lne_ColE rz|&.#E#mddd#nFFF"a?&?#%"+&&` S.TadE wC/#i/H+$5# FlG 72M&ea]Ra` AcX0e? t83M%#]bd3zs9.#%$,-$!]/(h9B>3z C#8> (3JF7NBIGJAA!G;5#?1pyK igTt (X0)@20@9@Mc.K osE fB1r@J!a`0iE n.@ A%2 Hs Re@eK v"A d@@l> 0>Uhh @CA3%!J@&CXU__Y3no+o~gU5^ho_oUE_oo6obTt>G e jiHle59 M{G/v!3r@QvE4,K7qgG^"D'2#x [%"6)_H<>? Dל ߪJAC BFXy# ߒ5B} Ѵ ]@eko+> ]BBhɯ@PGQ&P+=Oasw;?IUFD  h ^TYYRUFD"H$'@F @FxT]]9 #AUFD"H$@&@FTЃ$?P ]Au` ? un#`t A@`@I:t5ZE$:"p: [g5J=U5 ? ?B#0{GzI@hQ"*!!5 o`?CopyrigTt](wc)]20 9]M c o%s f"!r !ua i n.]_ Al0 (Us2e e v00-d0l>0>Uhh!#k!UJ`F6[48{  FMBC!??1$E34NMO_OG$E4E6O?O$E28?;1rA TRZSB3HD:  # ;h0>Th]]9 #UFzEI@FD"H$@FE I#@F)[3#Gӹ?P ]Au` ^? u#`t  I.!?_A@=Dgj+U-tA`WU"p [VJ#0{GzK?<@/BhJA/mdddG G) ?AgaYT6` Fil Co orzYbWYAY{bd#;z),!=D" #g#]#M%!!7;}`?!py io ht (c)y02`09y0M c os fw2!rk0!aw0i n.y0 A" o8s2e0e v0d0 l>]Uhz`1X#!J`iF  8H\ 3FM@BsOOAE3NO_WWE%EjFA_Oe_E2cO;QArA RS3HD:  # ;h0>Th]]9 #AUF_k'$@FC@F9C?Fl6_f?P n>u{` ?m u#7`t  Fx $?A@=DsU,tA`Fp5׹?p [WW"WJ#U0{Gz?<@MBh G ^) ?A_aYL!.` Fil Co orVzYAY{b#z)=/["  3~##M% 1 17;}`?!py igTt (c)02`090M c os f2!rz0!a0i n.0 A" ~8s2e0e v0d00l>]Uhzo1X#!Je`FxF ) 8HR )3FMBOOAE3N__fWE%EyFP_Ot_E2rO;`ArA bc3HD:  # ;h0>Th]]9 #AUF_k'$@FF9C?F)[3#ӹ?u?P u` ?u#`b>"(t  Fx $?A@BH_sYCt ["p [ZJ#U0{Gz%?<@?aBhA(G XO) ?A_oa]L.` Fil Co orz]A]bv#z)=/L" !#o#"(#JM!h!7;`?!wpy igTt (cu)y02`09y0uM c os Ifw2!rk0!aw0i n.y0 UA" o8s2e0e v0d0 l>]Uhz`1$ "H/1BJ!`9H""" 3FMBOOE3N __oWE%EY_O}_E2{O;rA bc3HD:  # ;h0>Th]]9 #AUFzEI@FFE I#@F)[?3#ӹ?P u` ?mu#`bn >"m(t  I.!?A@BHgj+YtP ["up [ZJ#U0U?<@/BhA^mdddG `O) ?Aga]T6` Fwil Co orz]b]A]bdv#z)4!=/L" 3o#"(#M%11 7;`?!py iw ht (c)02`090M c os f2!rs0!a0i n.0 A" w8s2e0e v0d00l>]Uhzh1Z$ "71BJ`9H"( "3FMBOOE3N_$_wWEEa_O_E2O;rA bc3UGDF P# h>,/T6D # UFD"H$@]F@&u@P}  k0g,@DTh |u`} ?LU,@TUh|u#QtSt.%& 0"tR.$t|7/I'A@%#tT.%U/I(/g-3b)5/G((""/.d\63@?3e5~???62.`Make B0c0grou/nd& ,3؞314G;1`VIS_PRXY0CHM!#w60@82"`?C0py0i0ht0(0)02=@090MZ@c2s0f`BQArT@Aa`@i0n0 0Al@ XHsBe*@e0v0d2-2 eh8^hhH-Q659TD[e59ThD[39T@D[9TD[E9ThD[5X|D[%U9TDWndaKoc7TP116238ycA1I51x>Xr1I/1&` HZ@d2&F0o`@A0#` SPowo '"0!+x"vde&s ~s!4r:r4r%pw:rgAo%UhmI {a1$162te2-t3I[ ZE*R38\yca651+4r:rB1b?xav@|5Rq'e5ycBE@G'3,'㫊?GMKc`? 3b #/1 bq3Hep@,T*h|62+EM6H'1\+U=yceq@ D0NrQAn`@aZ@5a-2] ,Th O7qouHD:  ##=h0>Th]]9 3!#UFaD@Fi4F?@F3mq?P݀>u` ?u3>(.t  (~k ?A@HrN_atR bp [_a"aJڍU5 ILc0U~$"?AG}cLI` LineCoklf rzcF"L&nFFF#?&S?#F"L&&w` STadf wd/#/i/J(i" Qg3#$.f.36h@H1$g1g1o7;V#?#1pyl igTt ( )@2`09@M^ c.l os$0fB#1r@k!a@i$0n.@ A&0lT@ Gs\Be0@el Evb dP@e0l>]Uhz143F!J2!XVD(Q(E _1_CT3R^k_}_WBUL5N___BUOd_H$o_j5bPL59 M{U7l/f!3raXve5!$fDm2%T12ooXU7w'B#$<~3v!x vI8!e fwQU:(F|@~f5T?F$c"z,Z#bV~-(T{{<B2>/ HD:  ##=h0>Th]]9 3!#UF_k'$@Fi4F?F9C?F3m]?P>u` ?iu3>k(.t  Fx $?A@HN?s_a't bp [a"aJU5 - ILT0U$"?A?G}F/I` LineColf rz!#nFFF#a?&?#F,F"L&&` ]STadf wd/#/i+$V# F^ lh #72n&` =?o)z 3#B..23`6=27kH A3117;V#?i1pyl igTt (c)g@2`09g@M^ cl oKsj0feBi1rY@k!ae@ij0n.g@ A@2 ]HsBe@el vb dH@0l>]UhzNA3F!J@32&2&}Q(PUa__T3^__%gU5P^os_3oUP__yo_ j5b59 M{7/Rv!3eramv4!$Rv}ai6z7 '#$ő~vmv vU%T"b xe@i?8Te'aa@eg@ fQ:?@,3 9F'c"z,Z#.T5-TAci@0m@HD:  ##=h4>T]]9 P3#UFD3@F:N@ @F~?Pn>u` ? vu3`[>"(t  #?A@BHY,t#`F B_P(?p [_["[JU5 Lc0U~"?AG}]TLCb` LineCokl\ rz]<"B&#nFFF#?&?#<"B&&` ]STad\ wZ/#/_+$L# FT l^ 72d+A'Ah9ѤB! Q3#.(f(36(72M3117;L#?1pyb igTt ( )M@20Y@9M@MT cb os\ fKB1r?@a!aK@i\ n0 M@A62 CHsBek@eb vX d2l>0>UhhC4A3 ל ߪJAC BFؗ|#Hֵ w7B} Ҵ ]@e8ko+ ]K|a][HXͩ%ԩX ˩! GyP+=Oasاߥx# mUFD  h(^TYYBUF\.?x<F BP(?P?< 5 6IHC 6gfa6a9SBU@L&d2U?(sU*!(&J/BH8$q?/&?(H])|  0OB`"Firewal0,n0t0o0k"0p0r0pP711,d 0v0c0 U1^| a(){U5GO& h?0B?E\\\o   w w wy ߹{w yw yϹw {y{wwy w  ߛ{ 뛐*8p  } ߰ i|jDrag onut hepe.Rih-c]lckaUdcos UPo etQeAvUwreP 5aab\.ҿ~?? ;Կ)j??r]3rCUG DF P# h @T(PYY#EψUF\.?@~߻?P} ,.B ! :J :^:rY:k Au` ?"66JJ^^rru# *^b"h,h')qU ?3?4b {[`:#"&a#'JQ)b" ^^l!L1$l" l"7'>>ÿ@q0-5?46 rz23S2-uY0`-1Bu@ l"r3A`'Bl" L$9u`u `"[ bzb!u`R$"% أ#Q!'1`Vis_PRXYcPm!#5HP47"2`?CopyUr.PgPt (>P]) 20{P9 M.PcePo0PoIfmR^QraPQamPi_Pn AUlP eXsRe0PeePvPdB@!#d$%b!(g#>@f!^#&& "2bbbz12bGbf!cB##0Ub#]g,7^aN @V cT8fda[+_'s1 1"hh"2)t3 *tB+tBaa"qqb(!*paaSb"+6u\%G(!!["@Чcq"53T@ R"q3f uEf;uE5's_t$v&F3s͵NSYFuEf0we'Aj,@QZPUpQ"5x}L1*p"(AT?ݟZ2wB2;6q`RPQs.PU TPxmPSP^2l"+ 0^<@5b ۠۠"x5R"۫br3-PBrw('ٛ`ff_< $R-mA.AQS*pl!u "u"2u3/bBA0oB%Q%Q"2b3 !!E؁؁| NPl!#(>*pQ~ [ ]\@U0` SPaaPePl6rsRQA)P E u.PpBPeP{"5X .0` TcPѼSP"`DPvR3 `r ޹b` S;b4SP&SF.PQ!wPQJEd%TMPn;fPEcmPuR"/6HEQI[]1PRd Nm Eh! PPr"h"CU]%R`sRU3eA(ew3`tJرe^`BT , SRi+~K׻T `L_PcXG&8*JBlPiP;anIY0 -R_PoUS>b©'s)&U1:`` %2R&RviR`f`NEtWO6PKK`SHPP60 4ROJ2R*tI60S1QR`f`s7fitac'Eympx]b7'dYfjBr 3aQrf$i-QIaaL&E`v?`HvADK^DKDQKDqKED3aGHD H# h4>T]]9 MT JU@5wn?@'d2?@vP6 AJuM` W?SuJt  w]It=WuVIRY]JIl#>>@fY(? ?ALbbbz@A ebbfb)]b 6(. T#Jq!q!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d j!n$EB5 -F?}# }'X62zGz?3S@MJD6*-!./@ D)QQ&K,<"Q*i@ieE9 X6Xn73'K0UB!K0 aFuJlJAUhhc7![hz]0T ]]JU@\.?@WjZ?@* `?@JRT?P NamuM` ?,2<u#FtC mtimuV~ڻunO2zGz?>?@MA@bb?bz+ )#bC(+bQ)^+^&Ɇ"@fx?0&?x|5/-&bf^ >/`/%N B1B1`?Copyrigt(c)209Mq0co0o%si0fw2h1rk01uaw0ii0n._ Al0 o8Us2e0eo0v0 d0;1U?4-N3 N7&q045 EFXGeG'0UBų&!$$ eFyJlU`1#El,B^ PhO (!p&d2?@`0  <S ?؉O<S 5<Q3::*\QY'aQ5____Tehf'W{oaYQE##$Wuu']C_U_ kuVn_j@ղ_bl|>xoo uJSAU(:L^xSp8wag?%ehYjdcp i!rx  -?QcxS ?@ je,㨏O4Fӏ 5:vox] 2DVhxSPbtߔ尒 ? ypv y FXj|t:NѧwmN6@4~펰pdNAQcu.0ܰ+ 5P/Q&a/4EHD H# h4>T]]9 MT JU@\.?@W~?P6 >JuM` ?juJt At=WJRb~lT>5 g`?CopyrigTtZ(c)Z20 9ZMcoKsfr!!ain.Z Al< s@"e evR d4 HT5 B[}#?& ?AAw@"b4-/ &&t2Uy2?")@MJx6D8<6<2:iieE9 &5X.7"/'0y3&-!$0 FJ)lJ8>Uh ޓ$Ja7Z@?@ 0<Mbh#B] ?RlVJ:vn?6\ES YwQ#TTKRU%Ur=\PYZd;O#UEd^_OZaQU%5UPBgP([Ma\QU-V'?d2=_OZuV%hb2&_8U2rA 3LsQ% bHD H# h4>T]]9 MT JU@* `?@~?@PBWP(P6  n>JuM{` ?5u#t  5]It=WZd;IRMIl#>>@f.? ?ALbbbz@A ebbfb)]b6(. T#Jq!q!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d j!n$EB5 -F?}# }'X62zGz?S@MJP6-21/D)Q&K,<"Q*iiePE9 X6Xn7'0UB!%0 aFuJlJAUhhc7!-oiLaUe52j_|Uv<"rA ;#c8 bHD H# h4>T]]9 MT JU@\.?@@ ??@{?^?P6 v >JuM` ?)uJt At=W;On#JRbJl?ʡE#>>@f?.a? ?ALbbbz@A ebbfb)bE0(( #Jk!k!5 g`?CopyrigTtZ(c)Z20 9ZM c os f"!r !a i n.Z Al (s"e e v d d!h$B5 ɤ-@?w# Rw'R62zGzM?@MAJ6-2+/>)QK&E,6"K*i@ieE9 R6Xh7'0UB!0 [FoJl8JAUhh]7!6!Z@( <xLK#  }RSVJ:vn?6S YwQUTRU5UXz{\YR/IѫQEUPBP3([MabaiU_52d_vU6"rA 5#c8THD: H ih(>T9 M3JU@\.?@~?ҷP6 m >JuM` ^?5ulbSD_Jt bstE>2zGz;?@MJA@u+b+&K !JS"j& 1"Ab+Kb )-,-/B!!5 `?CopyrigTt `c)502U0A0950M-0c+0os%0f32$1r'0`1a30i%0n}.50 Al{0U +8s2eS0e+0v0ds0ib =L!XY/'0U Bj&%0 :l*J8>Uh@[A[A 1:vݏn? Y YwAQi@qJEr Ti Z?d;O!U3!^9_KZXQAUEPBϡP(KMQLQ I@'d2\ QLYuV_XAo@ ,oKZ?Mb_XEOOO\_H8> G*+s d:TzR4D$o Fh# wLB H ~dlҴ i @+ kB}G% _P(3  (X5 : 2: l 3.  0 , / UFD  h(^TYYB>UF~?x<F BP(c?P?  s 6 6DSM 6gM 6D{M6M 6TM66x6K6v6HBU?8/&?R)B@L&d2?h-y(\.gҋ(i.sU!A&/@)Y(  0B`KLaptuo>0,>0or@0ableF0nB0tV0bB0oukF0mB0biT4UcB0m>0u^2rj4nr0A11nV0twJ2i1eq0i>0mV0n@0,PaL0d01e11^| ()IU5G& p??U"6? QQQ ``pwwpw w wނ ppK_wpwpwwppwwpppp ~ w w~~@w?~MpopJpppp~ppwpkwwZDrag onut hepe.Rih-c]lckaUdcos UPo etQeAvUwreP 5aab~߿?_)jk?3@ ?DUG D # h @^T(YYEUF~K? P} 7K.B!C D :T:h1Y:| 73 7 73v7x7377&u` ? "66JTThh||j&&"uJ&)R2%X<X7eU ?0?' 4b0{[`:1262Q3R2A"'QT9R2 ^L1$K" 7"Gd' @Bc]D?MQ0rBCSBuY@`-1"Ru$ @mGrC$Q'('R7"A& <4' &)9uv`u `"[bR1'$u.`425ؓ3a171`Vis_P_RXYcPm!#5H`o06QB`?CopWyr.`gPt0U(>`)02L`0P`W M.`ce`o0`'ofmb^ara`aUam`i_`n 0WAl` ehsbUe0`ee`v`dR13d45R1*83 Br3 66 2'QbtzV[1SBuL@`` F.`a^al^bz1 -2uߐqݐvR2CB#30U3F:w,7^F`^ P sT<`o0m`sm`a>g da;_zAA2Hhh"B)B* R+R::2mDmr81}ahcwP#6u\$ %W11%[2P@RA4"ECT'$@b2AEȜUiBȜU5z_4ɓ&F3sNSYF=U0ʚu s,@a#Z`ep"EˍLA}28ABTO0V""-sRxQ'(BKF>`R`as.`e T`xm`S`^B 0^LK'7"P#P E""b ..2EB2.˯brCRqʯܯ8z,BF3<^-""B=mQQ<*dh2"B B/R0R%a%a2qqr3 11E++0|ss7C8*PbQ11A;Q(r,`LA=k^u1MAE!ffuVҶ8}aY k q]PU@` SPUaa`e`lsRaQ)` Equ .`pB`e`β"Ep` QTc`S`&c^aEma`u9E^Mx_ ` S*bS`u`EL t_b9UdxM`n*f`cm`ub%7Uh5GAPbds Nm0{U0! P`rxqp2D@%b` Dbaew3uĮAe:fC`c'97/I/ Sbi`ao//¹T//` L_`ch/6?'?9?B"l`i`]?8(Y@?0?R_`?65?ON*h"wbk"a"R`K9B-/H`OI4`ad`cOLO/OOnh$gsC_L(t˰=_O_TQm2/atfa I4aoO_g¹f__㪲`Uk f`Pbao%ov8_No\_MPCOoNo(m Qt Q6LM5GMLvEHbdr!SC qpsY` W@DQQ:V6xHS`C@MOя^u`MpmB :V¡3EYx!O U1vqyRdnu}Ϗo߳(©`X^~"` %_@&דq|`NTWORKurH`PaRTRǰISQk:i*,Ŕ{Xo'0U¹ňaY`y}֏(!(H*ڽ(5|ѳy(!{5{ᤰdvHv˷v d,v5˧v(!&v˟"vĈ~(%vċ~Λ%vİQ5vѼd{5v5UGD  3 h0TdYYB IUF%D"?Fب̓m?Fre?F_jYfw?P} 8Yf u` ?0)u:t7  (\toLӑpG?WS̆{c٤CU!!`?CopyrigPt (c)J 2\09J MB c@ oKs: fH"9!r< u!aH i: n.J Al @(s"eh e@ v d  #S$#B- #l# '?R;= # &#24 6 6 2$Z# #0U2 3@&B ^#5 6X'7'R4 6!4] 6:e6^P#[EBM #HA%DGHD: H ih ,>T  9 M/AUF~?FtC&j+?P6 8V> a JuM` ?0K Ku:yJ6bqlt7~ bJt  $OC>JU2zGz?#@MJ?A@+(b9)F+,F&" !*$5J" "& w"\A1"b9)+6'"s,"s/B-  P3v#9 P1P1X7;`?CopyrigTt (c)020090M0c0os0f21r01a0i0n.0 Al@ 8s Be0e0v@d0N0i @b9 l6(X>7JG'0UBX&50 JF^JlJ UhMAA $ A11!:Lh4 0 tt3J!@|?Fhdzt 0 ?kCYk XEjUqA '[\<1ߑUjU!o6Pa@oVUhq`V r])m܆YaYi*X(QYaD"H?F=0 \S㥛\blXfYFt:N\[ZR ÅبX0Q^pp\.zZ ,\ 6?iwUS5?FrSGo?PE ?=TvE?!Y?.`txv6mT]]9 M 5AUF@ ?Ft:N?FM~u_7w?P6 .>M JuM` ? IKSu8Jt5  rtyV_->5^IWA>JUT5 J#/!/!5 `?CopyrigTt (c)f 20r 9f M^ c\ osV fd"U!rX !ad iV n.f Al \(s"e e\ v d (!E,$#E)edq%rVA٫llx51o>2rA 53eHD H# ih#4>TA#3 AM JUF8?FzwJHM?F,+?F%VA?P6  n>JuM{` ?5uJt  f߰nZIt=Wv%MIRG|׈,IljE7#D>5 J5 g`?CopyrigTt (c)* 206 9* M" c oKs f("!r U!a( i n.* Alp (st"eH e v dh PB~A!"#?& ?AlJa@Y;6` F" q!z!lT"z0_b0A0#bmT3z0B9Ub25181 $?25?G5\ P1u`"?46?8C_1A1b?t0bAFbI&DɤG b&LBFAL2zGz?")@MJFp'CfT<(Gb&I3F-LB3FiieE9 1X]G/'0UR&-!40 jV~Z 3Q hRG!-AT6yf3@$<f@C  bfJ: @BS+c X$}#e`e%akqcEAyHh?F~l0I#c ,aTE1vo>B]rA Cs+( bHD: # h#4>T#3 AMT JUF ?FH$D?F8}, WcSP6  n>JuM{` ?5uJt  s߿ 0eIt=W(\IR}WIl#>5 UJ5 g`?CopyrigTt (c)* 206 9* M" c. os f("!r U!a( i n.* Alp (st"eH ej v dh  E~A!"w#?X& ?AlJa@Y6` F" q!!lT"z}0b0A0#bT3z0UB9b25U181 $?25?*G5\P1u`"?46 ?8_1A1b?t0}bAF'b&D%G &LBFAL2zGz?"@MJF'CfT<(GbE&I3F-LB3Fiie%9 1X]G'0URŴ&!40 jV~Z 3Q hRG!A`Lޖ3?F\.0hO *1J#<=#,AB  f!JabdP$dreEAJT]]9 M qAUFp8?Ft:N?P6 mL >M ($JuM` ?AggL2DuVJtS  {G7zt.%_>JU5 J =#k!k!5 `?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v d +"d!Eh$4#=#|x!F2 J3?]6 ?Ab4z@0Szu< `` F !!l"z֔0` ?251ݔ6b A6b4(4"Y==/w# w']6T4"BK/C2?=@M3#JF3@C37b"9F<2Fiie759 O&5X/GB_A/'0U{R]6-!x40 2VFZlJQD0h$G!Z@!11(Z/CFF\.30AR@Lu)_G  RaCf4"J? 6Bf*}~2?FVor?PE &TvE?~1LZc Jxdid`gR*YB_Lc׏_:M m&!&Ra DBBBTfwr5UBoPb2]rA 3q+"HD: # h4>T]]9 M 5AUFVj?Fޣ ~-?Fe2L&?FZT?P6 m. >b JuM` ?+ILKu8Jt5  O7nty>+B`"$'-A>JU5 J #/!/!5 `?CopyrigTt (wc)f 20r 9f M^ c\ o%sV fd"U!rX !uad iV n.f _ Al \(Us"e e\ v d (!E,$~|y7, SzQ|nF?F.`d %TvE?B1ZBrA YCc4!*uxnG@#r & 4xavDB#Bsrb5UHD: # h4>T]]9 M 5AUF{y3I?F~??Fd2Lq?P6 . >  mJuM` ?0IKu8Jt5  n`ݴڅtyx&1UԅbX9AR>JU5 J#/!/!5 `?CopyrigTt (c)f W20r 9f M^ ]c\ osV fd"RU!rX !ad iV n.f AUl \(s"e e\ v d @(!E,$~ 0T  JUFe?F:[??FO&Q,"7?P }uM` ?j`fpu#zAtw  [{t2Oۨy`<%t2N贁Nk?@MM+&A8 b*](i+i+i&p!!`?Copyrigt (cu) 209 uM c os If"!r 1a# i n. WAl30 (s72Ue 0e vI0d+0"!3 $#)Ry&#‘4?6Q?\c"bk)i+Q;'-,# '6% h5 IFXGiG'K0UBŨ6!4I iF}JlU!#A(ZFj^I30vi p-ڏ|Y &V"(:!]H?F{wX@0pS l01 o(fy { xU,>Pbtv3m?F/@M\lSFp ^ \ 0+??Fް}ulHyƊqWwi?Fܾrޝ۷cl)-i]oa?F5o רo`lۗ]Cfϟy { xY^p𔟦pO4?F0eO,plkleW,^ \ -r[v?Fp =\LhlmhG N1?F[Q{ l/5:1 fz7[;KՃT0P1ey { x¿Կ{dޯ?Fv9|eޝN.l?DɨT[\[fk'?F#Y}3Zflٙ(oWU"NhLޝ].Yt?BدW>lrt~]|%߸'0(f%y{xa v ?FϘǫḿl$h|a[s!?Fۂe2'[\YRSWW ?FIǧmp~Xl-TBi9w_Ku~Սe~< 8qOfSx#5y=Qgk?FG[Pm j%a ;(|a[sjT?FjwZ̮̈́S"o& +~?F 'APm*?w?3f|o7gyP( '7Ƀp5{/$ x //-/?/Q/c/ya-x?FlS+0ƄɰOYOe[s/?F@A ǻDÄZNF7?>.N?F nMoi?~q,P>Ʉ (:^?OD*x 7OIO[OmOOOyNaZBPK+qǀ̩BF6Ks _S~MGt='7e]Վe?Fɍ#cAcaKҌVB_,ϠZp"IؿoodK eowooooo k4@\{-=(eKs;o@,P%T̆m ?;e_#ֶZpp\J)~Wf ,Padҧɻ;y ÏՏ5A ?Frg,mr2 +nrsc9az?FWn ~}\+ :|wص@ymwQkܳ 8I2=d]? ń̙ "Ͻ)i8j7i3 ͯ߯i+?Fu߫˶<yOKs@,0YvjO}ȶѭ $Y MMeܚ-O ߟ4~ <-f5]Xкj 3ʶPbtf2+ PpߚDڋp,ψyӅ猜,4p:9?Fo}!s>ybH E3Ɲp ?Fn~p6Ü~y˓lj~ݨ" : (0!϶<'y'K+HHZl~f~ӋE?F&Ý A;yƿ* ,/=r?Fegm yQEڳs/e\?Ff"h=y/k.h@̵I y|!??4JkYhs??????iHu)?F͎ l䲥aβ ,IO;o\?F!yhX ejyROm,F @^f]ywO~6bԢ`V 6y O_o%d r______??FlO?=Ȣtc&O ,wo<ڐbrqM~@ O`> s>?Fcme~Vve~sO~%HlĀW<]|F<>~zK_]]M),ͭ`7E̺ژB>mfT# ,Bg)?F[='JFmjG;B|'|`yq])tK } /:Pp0&%;Taqԃa/E-? _ [X!////??D Tw2?Fw]12%W嬨$:W{,߰ M >:6yEpV(0AzGxaNtb#P?P-yp\J~%wMIn17O~:3݃* 3%\K?|\^OT[__ { xOO__+_=_"^j^I<=ADǂr-+ű!M!?夿e{*% m9N?:ߤzf"h<=b\㭖h1boyt65o %#M#'VW?FGn5> 9ؗϽrqCug<#ֶ?Fv^݇qLJ|U[>j?F^@;# qYq(ٿ2#>#Md TH7D6D # 4#T  U U !"#$%&'()*+,-./012*3>5h T  M YUFBg?FJ:?FAؔPM.uM`?u#t  jm7}%t!" .Π%'&!%4-Ut}2N贁NKk?@M#&A b(J++&Yz}/R/$38?#b`4zt (+b/R5N}11/"`?Copyrig t(c 09M0c.0os0f21r0Aa i0n Al6@ 8s:Be@e0vL@d{"1U%4e3 756%01hEPFX|GG'0U%R!56S1I FJlY)U4FX1#Aց(`}oO40OeOW0fnj# Qˣ-Y R5b 8!EЏv?F[ =\}g5ly$Oa3[e_q[~haWWrm)T`fnӟ_C1hhE:]_cd0g`h _R"rg t4{s{"oQ#w______Vcޯ?FW9|e!|vr.l`(n :l Jo}`[Q ?F#Y!|helƳoZfuj'7czX!|ߝ fUo(oZf?M :rm3~Tɶi(R}ty { xUV ?Fǫ!|?UvʁD!iip_(n :l r}9!?Fʂߤe2!|ωY\*!ijlߐjǏȯگ` K <ZfB0񁚍axCmƉR}Bty { xYџ+BQgk?FG[!| =jlU)+R(n :l }jT?FwZ!|B9nl(Z!X󁟜 ")p Tϑ` 4/]$R}p ty { x#5GY,h+?F߼߫!| ?iOf+k:l}‚,Y?FqvjO!|6zKi%SI#Zf@,0ƪ!|~9RĞX}ߘNd̮ Q>'ەߘux x&8J\nYJa-x?FglS+0!| @m}i˜?Wz(n}?F@ D!|¶RiBQ:4i!@Se L]yEڨZf?g5 J[#PR}ty xTfxoPoof+k*\onooocN1?FEߋ]2φ gm_h{ 'aV ?FIQ|X"Z~2z}5o]Sh]CYR`-`CLVI h oo&8@6>Pbtg[`#|϶U /(+~?F0'Au=0cLas` =E ߴI h 0BTfYl~g[ 2DFd-?FGt\e(-upғh6SFOp \pH%y|C/h6+S{0 ӼvAf@g+hPљV M H<l5-W/ :FdAg?F#ccMTbzm1Gcqe~1-B?`ZAvkNngoPoboto OkH?Fɝ?) V1? zk "M ,o"r?FLgmƦ J TܪMcSS6-F؏{wKJ\M,z% #߭ҋEŁN"}3cQLYNVV䯀@.HZl~ n P?F͚Dڋ~Ͱd@~y6{:M ,"Ęp:9?Fn8ͣϕ茻yb H v_=x?vQܠK='+ #X;F.+*3 rz@M\vЯ *ͭ?F;Y ~Eؚ)~ykM ,L5?F)-ffA煤 0{l[΁F~Uk߸?I36&&d>!ё*a0tM@ޞs0COρn{϶ f5A ?Frg, {35ٰy^5PנN,z5kzX_z?F-Wn \?/qy?%b/'8a'߀x\tRʓVxK?q%im!Fg?Jq /2_D_V_hV {+,qO_6}i?FrsM<+bSykY=T=/a]e/r_`x ;8$6HZ>asu=?6|./6l,`F@^f\/j~9̿ia/nLHThmO;~?}[h<11?C?U?g?y??@妎Ė  O0B_f^O8e\?FU"hύ?@B^bybOl?Q ,Mo~SA(5 `_ - <\_n_____⩼Pο!!2o^po8p ?Fna9hfoʌn%Zm?7 ZT![!j( ""`DpHxs/ȏ]-߼LbqS?FҾ+cCXA9x1ˠWC s\3EP|`0""ʟܟy*<N##A?FyYǟìiQ9!.Q^=&J+#ͷFSPA &&Dxj!.?Fo,,M V~< XO~U?F>6S p??HUK,HY&/ ?` _/& A[&PXp//////y*ӋE?FTmcͦQl~V)a ''F? |kټ]c¡J:?YnD((!ooo&$ ?FEc:d9~.]Gl))W_?F޶P dS&9~ԫߟFRB1 > 'Nzrmw }}?=In됿Tr)˻)ڸ" 0BT Yoo?F\/8BҡdifO g **ПA Y{?FְgmƦ͌!MdihMcSS%@'9 )|z֠?H,g]r~ei8YY$m**#(:L^p Y'4?FÄ!GQJ?Zqqqdi +"++,#a?F-ߕfadi [΁S<]?~qUg  ʕ7mI[[ߣވ/ZG +' +6$Vhzߌߞ߰@ Y???6 ,,,X?j??"6?FB6"MRp4KTp#1 ?mJ,` O4PHOZG\,U+,d(% Y__of';--Z__G/_t^> s>?Fyme%]U`vdicdUʔQQ ,?w/ ?~efvoZG*/\-K-H& / YP$6HU[../uO/t|?FaqS} 4dIalBt:K7? \T-E_?!#hTXO\.k.h'??OO(O:O)@R(dvK//Oo_t͇W?FЧji QI?e_N|%ЏIMs.qe$ү䧆o*\/ߋ/(o o2oDoVoho)nπϒƗK00o"яF>t44I8?F?f_<'kԸ!Iv' Pr۱|@? }.q?5d䧴*\0 0)Ptlt)K?FZIl_<緬TF!Ǿ iϿ­.L.7\1 ;1J*j|į)"FM?FO4_<`s31In1)0` 202@q1.?FzT~_<ʱI (빸~u7#ߩ MǍKKRw 2 lPiCϨ6\2i2x+Ϫϼ)PK303nߒ[Pr| ?Fo3on1Ox0 ~L":rCF]KSDJlع0 MPq6>B33, aZdFy^#I1Af4b4J?A^31K404dlI?FjwX@_<\X*߰~LϡD/g\Hb1),-FuPx~K :ehDo^1GEG?7l*B4;48-*<NLIw?F`sT5]|N~LZQCݞaQK505dm%?FGn5;TW~ A,p"wQYy>)N1`B@PKB^qq_'T?B5[5h.?(?:?L?^?p???F1 4c}C0\wPN)qk6<6?d]@?`pF*;Z"}^װIAOfQ;8aVȯ#{Wn\=͘OLP$Zɺ}_.qV'@IO^6 !60/D_V_h_z___2\֟?F4vS[p-̬)K7<7o\?y1<<<:y6yrЃ&~?!1b W@=yBP(lf7?۵FSF&Oi{// bxRC//Jy<<8X5X/j/|//// ???6[=2 l=.?f&M?dsl?~?+eXO-y?Fp\JlR ?ɦp{?\s__ ~C +OOJy==ftSr BTGHD: $ Uh4> T]]9 PM /AUF~?FtC&j?P6 .v> mJuM` ?0IKu8Jt5 }ty$AJbA>JU5 *J)";/#?@& ?Abk$zo@w Su`` Fil Co orzw ` Z/2%w!w&}bA&E&UBh115 `?!wpy igT}t(c)W20[09M ]c os fM2R!rA0!aM0i wn. A"U E8s2em0e 5v0d01 4Y=$3 $7@&T"+t32UHB?=@MJGF(I(/HFiie59 &X7G'K0HC@&![$K0 FJlJ$+VA' (++ ]61h!Z3F\ O6?F9.4?.`tx@! y@c#CJVAbX,?F?&d2b iQA.$c Ȑ #Sa3:TLh4b bcitt3h5eqA 'k?\<1lOe5e4P܇a5f_eD"H?F0 wlS㥛ԍlZblheFt:ǝNl+aiR أhQ\.j ,X^F5~I?F\O;\?PcG &TvE?a!"rA #c4xLexmdAa S ύ4xa$B#T]]9 M qAUFbX?Ffl6?FI$D"?Fz^?P6 L6x> ](m$JuM{` ?Agg2DuVJtS  y&1tQ rhGzI>JU5 J0! #<l!A?% ?A^Jb @bbbz #"b9#$"b$B 2% # ?<@M3#J6(&$%"QD;'"**=#115 `?CopyrigTt (c)02U0090M0c0os0f21r0Aa0i0nU.0 l@ 8Us!Be0e0v3@ d@+"1 (44#-=/3K 73L)i@ie59 O&XuGG'0U RŚF!$0R FJlJ8>UhW^Q^Q 1@!V!1(ZuCFWF3Ф JR L?[<œ_Q3$R$4"J1n6f?Fr\. LSW_Y?)$8aE:TCVb]k&aPdd *#8d2L?Fw;?FH?PBzt &TvE?!YaYcR&ekre~RMv'(J9_b 2}&(!&Ra@cBwrÀ5%8aOEev?n?V]kAyqiQ4eDe'BP(\orlh )$a^F)?F.\?PpU#u#rA =4Pz d|oO_Y~sEzw uFtQcwHD: H h4>T]]9 MT JUF|?Ffl6?F'd2?Fz^?P6 n>JuM{` ?5uJt  bX9It=WQIRuVIlGOz|>5 Jr2?@MJA@]gb%(1+Wb3)O+O&B!!5 g`?CopyrigTt (c) W20 9 M ]c os f"R!r !a i n. AUl 0 (s2e e v 0d0!$صh2?6 ?ALb4z@'!Sgu`` F 1!l"z' ` ?25߶1ݐ6:/[)ʼ=# '66Kiie<59 XQGG'0UB6!4%0 FJlJ(>UhI !+!:6V߫j֋#0=1 JUF'ÇpС0RSO S ;[5)۱Q3a9iQY'aHD: H h4>T]]9 MT JUFt:?FFh4?Fz^?FkZV{?P9>JuM` ?uJt  lIt=W9v׾IR}?_5^IIljt#>5 UJ5 g`?CopyrigTt (c)* 206 9* M" c. os f("!r U!a( i n.* Alp (st"eH ej v dh  3Eص!"#a?&?ALb4z@0Sgu`` F" q!!lT"z0` 2?2C516bANt6b4J= &;02zGz#)@MJF86x?OK7XB 6iie%(9 X7G_'0UB&Z!40 FRJlJ(>UhI !lXAa20$)0=~0M:XbX,\QS }A_б5Y9mYQ_H8>z 9s -b@J)F #B ! OB L# ~dҴs o@+Zo8sG(= $ f֛ؼ ϯ. CX 1 r 5 h  9  Z= ( A +  8E a I B M  b !Pas//'/9/K/]/o/////////?#?5?G?Y?k?}???????? OO1OCOUOgOyOOOOOOOO __-_?_Q_c_u________oo)o;oMo_oqoooooooo%7I[m!3EWi{ i>?  Џ*>ÿ@qZPU?TVpPr&bcwRu}P`-*Q"&bu &@WS$Aa(Kb""ch@D )9u`u `"[bz$u`؂CaAG1`Vis_PRXY%cPm!#52madPa%VR%TKa/ZB0gBqq32REqEqAAB5r6$ 7”ECϔ|A8ܔspꓮQQ;< !LAE7ĂFD a,!J^hdh")xA*HKa./Hd$Gpha1]` Mpnupa2ptqrFqa` E@u"pp 6peptuEQ`ѯ]8QP}rd NmP3P9KZ! PprtX˿]r`iDr|q uϯ\/AA$pqc`#E^-% Sripq߫uT+=,LSpcxwߠ E߱Bl pipgE}Ph RSpoO"|ґyz(ޥA` qp鴜D`NwrkhaGpB`e#AI(pqdpss@|'߸ֿSbpsQx/%mF#qtvq Iq{uQf{ r (pqq@(3/+URqm6put%  SGt5QVeqσąV///(C(pA` WlګO`/iMpmr1//\Qp?!?OUpqsIyP?b?XiBplrgrTSp#"2NO)OS4paB2Cpo! 7iO{O` GT01@ `p#_OOGO _u% `_#kapTq#(?=MpCv/Qv__SH"d\"ZqCB??Pq|&Љ و3UF~?vZS\u`?d?7ruAy}5-.B"TqqՈASw>>{ÿc6Hr92u?`s`Bxrq 07K`R oF@IAeB$H|8Ex~8"7=rb ӀӀƞFSrӋb{ra@RKᙑsz}_MSaabn.{&$,"-Ѵh!PPa@iy60`ZUuu`a p*LQ. p`RÑ.1=f@ߒbÑ;J&;i1J5MS_dK&#„BCF3sNSYFw";(LլDQT]]9 M !AUF8?F:?Fm](?FF]?P6 e$A  JuM` ??Su.Jt+  Rqte_|tH1qzrMZqaMKI7>JU5 J["a?2& ?AbAw@X"b`$B2zGz?")@MJ&a#b#z\c a(b`)m&g,X"m* 21215 `?CopyrigTt (wc)i020u09i0Ma0c_0o%sY0fg2X1r[01uag0iY0n.i0_ Al0 _8Us2e0e_0v0 d0+1 (/4->3K >72&i@ie#59 XG'0UB2&M$K0 UFiJlJA h7P1X!ZCFt,ܿ?F9$*m ;6dm S)RJ:^T<@T R}S $S @gQEU23\T xY _5ˀ aYQU#5UFY&yw\QY!ħ[68^_zd {?FC:\4 `]3?X"rA kf#sS*<7$dFl~r7lb+d [!}UHLuD  #;3h0TlaaJ )UF~?һ P m.N >uA` W?I?u#8b>t5 }ty>b"[>U?& ?bAK@8">&>*m!m!`?CopyrigXt(c)2d09M c oKs f"!r !a i n. Al (s"e e v0d f!3(aj$B-y# y'&t2U2?@9 H>6A(A(>/J8M*ifPE p6XB7^G'03&J!-$e F/Jl>!$Ul8AAa  @!#E18!1Zb?F _4 .40nCA.xMѧnA[(:DTPFmq4 _WtY\ FهX3^&o`_sZRXECU֚|I?F'@[\b Eq\ ypQUdKq\;rdXXQϓCگ K/ڇTXK^F!}} p$?P8-DT! .3!^_p_U.~4}]~q.x~TA#3 AM JUF#H^?FՃv?Fꖞ@?FĶ?P6 n>JuM{` ?5uJt  ߀It=WKꑔIR ׼IlV #D>5 J[? & ?AbA@0"b8$2zGz?@M»Je&9#fb#z; 9(bE8)E&?,0"E*UB 1 15 g`?CopyrigTt (c)A020M09A0M90c.70os10f?201r30l1a?0i10n.A0 Al0 78s2e_0ej70v0d0@1 4Y-3 7 &iieE9 X7'0UvBŴ &!%$0 D-FAJlJAh7(1h0!T#VcS( bHD: # h#4>T#3 AMT JUFȹޘ?FOd?FoT#?FqJĻ?P6  >JuM` ?juJt  uD It=W36EIRj׌IlB?@-#>5 J[? & ?AbA@0"b8$B2zGz?@M»Je&9#fb#z; 9(bE8)E&?,0"E* 1 15 g`?CopyrigTt (c)A0W20M09A0M90]c70os10f?2R01r30l1a?0i10n.A0 AUl0 78s2e_0e70v0d01 4-3 7 &iie%9 X7}'0UvBi &!%$0 -FAJlJAh7(10!`3'e?FfAs!h, ?nN#<bydb   VJaRTT $jTRUE:]gy_eZ-ֱQ%UOfkZoaU526_HU0"rA 5>#cHD: # h4>T]]9 M JUFS6h+?Fڨ?FC7LԠ?F_oo߯w?P6  >JuM` ?uJt  k6It=WHףrIR'c_hIlX_#>5 ՆJ[;? & ?AbA@N0"b8$B2zG7z?@MJe&9#fb#z; 9(b8)E&?,0"RE* 1 15 g`?CopyrigTt (c)A020M09A0M90c70os10f?201r30l1a?0i10n.A0 Al0 78s2e_0e70v0d01 4e-3 7 &iie%9 X7'0UvB &!%$0 -FAJlJ]Uhz(10!Z'F#?F9=7Z#<E $ ĀVJt!#cGP!2, TYŏ.ZfQoS 5!;KƱQE:F1aQ, jRYSuRYFU>X%e1lER[6C/I .g\wPU5egㅪlaYpO0X2:_;0"rA >#sHD: # h#4>T#3 AMT JUFJV?FO7x?F g8t8?FxMƻ?P6  >JuM` ?juJt  .![It=WBaIIR#!xIl?7O##>5 J[? & ?AbA@0"b8$B2zGz?@M»Je&9#fb#z; 9(bE8)E&?,0"E* 1 15 g`?CopyrigTt (c)A0W20M09A0M90]c70os10f?2R01r30l1a?0i10n.A0 AUl0 78s2e_0e70v0d01 4-3 7 &iie%9 X7}'0UvBi &!%$0 -FAJlJAh7(10!ZF/, E <{O!#4  OR|VAJaN[]PPReSRUE:]?Cm{ɾ__Z"u)Q%U=V9P]Y\QĖU526_HU0"rA >#cUHLuD" # ;3h( TEJ UF~K? P .N  >uA` ^?A u#0p,bKp>t- bFtq>U2zGz?@9 >?A@(b),+,,&" !$i>"&p ]""b)+'"Y,"VY/N*1*1`?CopyrigXwt dc)a0W20m09a0MY0]cW0osQ0f_2RP1rS01a_0iQ0n.a0 AUl0 W8s2e0eW0v0d0:i f x!X]'0U5BX&50 Jl>$Ul8AAC U  B # A!֚|I?F'@ & b E9& y'9m(^L0T,_>_ *XX3U{?Fr\HzdKB\;rdXXXAk}pCڇ K//XTTZbP _?F92}&~`m`[ ]3PY0nC]MnBU,Nvlza&p (91n-pQYWAliPFmqٸ EY\ FXX܏&o`DZ?RXXE_&__J_|D#AD1guOKOOjkFs"DI@[sDz.q[]J,bu?kZ̯_ofd $?QRp؞lq o|T]]9 M !AUFM4_?FX({'?F8as?F'|~?P6 m$ > JuM` ??Su.Jt+  ,ئqte_GqzLxg q}KRI7>JU5 J["a?2& ?AbAw@X"b`$B2zGz?")@MJ&a#b#z\c a(b`)m&g,X"m* 21215 `?CopyrigTt (wc)i020u09i0Ma0c_0o%sY0fg2X1r[01uag0iY0n.i0_ Al0 _8Us2e0e_0v0 d0+1 (/4->3K >72&i@ie#59 XG'0UB2&M$K0 UFiJlJ]UhzP1X!ZCFFMaT J*m $d2rv 7|QL{RJ:bTVͶ$S ?!͟TT QE!K:?Fc\?7YMQ#5UC6ǎȥy\aY = GZgh5Uc?hV[\]PfaiQb_tSF菐-?FҮYT˳ ]3?X"]rA f#asS*ԍ֯JQed~V(L qAb+d ׻HD: # h4>T]]9 M JUFd $?Fs"D?tP6 >JuM` ?uJt  RpIt=WcsW.IRI%l#>Q5 J2zGzK?=@MJA@neb1#z' %#b?(1 ?(Z+Z&B[ "?& &?AE$ \/}% 1 15 g`?CopyrigTt (c)A0W20M09A0M90]c70os10f?2R01r30l1a?0i10n.A0 AUl0 78s2e_0e70v0d01 4-3 7&iie%9 X7|'0UvBiů&!$0 -FAJlJA h7(1E!bTc1 $<K $  OR)vVJ: KvŦM\lQeS -^PűQEaZ9]`Y\QU%U=V,M_Zڕ]X526_HUE"rA D#cHD: # h#4>T#3 AMT JUFEM?FZI{?F'ϸBl?Fĺ?P6 >mJuM` ?uJt  ^J˛"It=W0"شIR}*IlFw#>5 J2zGz?)@MJ߀A@eb1#z' %#b?(bJM)Z+Z&B[?& ?$AE$\/}% 1 15 g`?CopyrigTt (c)A020M09A0M90c70oKs10f?201r30l1a?0i10n.A0 Al0 78s2e_0e70v0d01 P4-3 7&iie%9 X7/'0UvBů&-!$0 -FAJlJA h7(1ZTlT]]9 M JUF5 J2zGz?S@MJA@eb1#z' %#b?(bJM)Z+Z&B[?& ?$AE$\/}% 1 15 g`?CopyrigTt (c)A020M09A0M90c70oKs10f?201r30l1a?0i10n.A0 Al0 78s2e_0e70v0d01 P4-3 7&iie%9 X7/'0UvBů&-!$0 -FAJlJA h7(1ZE!ZF" Z <] Gʼ#OR  ORS|VJ:-US DZ/`#dTK QEUHǻM\Q`YO$iQ%UY $?)/Ep?Fa_V0w?P} 8f u` ?0)u:t7  Id̝txhˑgFݑZo`ҤCU!!`?CopyrigPt (c)J 2\09J MB c@ oKs: fH"9!r< u!aH i: n.J Al @(s"eh e@ v d  #S$#B- #l# '?R;= # &#24 6 6 2$Z# #0U2 3@&B ^#5 6X'7'R4 6!4] 6:e6^P#[EBM #HA%DGH7D6D # 4# *  !"#$%&'()*+,-./012*3>5h]T]]M YUFhop?F>+?FoPp?Fsqє?PM]uM`^?u#t  EF't!"UY%'~P*%4'?"U}2N贁Nk?@dM#&A b(+++&Y}/R/`$B8?V#bo4z @/'/5N }11/"`?Copyrig t(Uc 09uM0c0os0If21r0*Aa0i0n AUlE@ 8sIBe@e0v[@d{"1U%4e,3 7D6%01wEPFXGG' 0U4RD6b1I FJlY)U4FX14#A(!L15?Fd4Mf0:O$}#?)߰=^!8U}?Fz d@ lxrNj$!la.PX7h3URWa?3am:"~vn*&^x%>awE:}JC4` k|| a(d08gh_oo)o{T#______Hv?F d1a|3`[]w|c?7oHv.?FMFa|S3w|\£|oHvJKmc~ea|{ oHv(sna|;6n5coMA}#5G~QUHv>Η4?F@va|+|w|JCMHv,f V3Sa|p )zyBr7椏Hv]DWrGsɾa|k eą﫚l2 +7a|QWgQ-<̾э_q}Ra&8JHvˁ?F~廽a|/ͼhzyoχڡHvkT?FS;a|3zy? HvJbS6̨6a|ω9?OHvS1Ɇb._ja|^~_\@{}R -?Qc&Wa#UwvnnE>wm|<jBmm*'Waɱ`nk}.V̲o~'݌ssK/8}߯mEKx6iBJP1 UB}K^~o>P/6i@A}///* ! %/7/I/[/m//&A2mo~Kf΍4D2=y|Bm;m`(=w}zNmWUȣ(K\S}|t|d\L|L+YlQn0T%%$ċ^QUHQOOOO& AOSOeOwOOO&놮 G3!(E> H}OĹ[DnXXt' U!2h`Xn b_ Pg3~qP3fbAu~R)umooo& ]oooooo|o&g-N+_!eW?1 il}1HiDYH'rg|xMMXjQ%f|ɒ= %;vro]ݓinÞ]S( & yӏ&f@e!`¥ G8M Z³绾hv0W5gS1a8|QXSbW~7mȧՠ~=߬lB|y:߾nuTѺ'9& ˯ݯﯾ+A 7>M%- q?j j dEuk uQ9)?FAަIif[WvpQ鐲9 =~OIѮ]xz~êJ 1CU& E`=U4 Xw׵\ o5|݄ erKv7(\2Έf!9"pϼRѡ{ R}\ $”ގ#q[\ƧGJ Q\BĪ&iZf}#Xr_N|1i"@w\y =~X@/n????Te/?#?5?G?Y?3㒾?Fu}A{ZϹ?$=l?Fߣ؝@鴡s! U/q2 y#iwjQ rt}\\ǟ~nxBt@fP@Q*(g˼ߙ`]N1YY͓___Z_-_?_Q_c_u_՜t?F#|IΝ} C)u18h6o]?F\ v!ۊ5,'pm~ "߹3*_CRڟ8Yq_S˼_}5#7o7I[m ռwc?F䱹!? Uܟ*d4IM}l+m/r?F!zxy4GXM g1rH-odip'ʽh:PF}LHM\БqWČMQyS:UŸԟSewZ=x?FrJS=W+}| 8*_ޛU z0~+WzBJ/pVD W| 2'ؚ5F] 0+!31rߠ߲߯450i;h4MLTJMUs`I}$z8.=u2fC}1 O2V{uf`5MEp4}o7o9lM* g`S}+M?=;??1O'9K&)E_i) QiğuPmk6nɣ{j}Q}L]_h?>nj^$oVLɓ߱ mkVo@0Vp > vCu!9QAdmLG~ >Qo1/C/U/g/V/ /T7} 7վkEA*w}+% B`;$gK*"V2w@96קM-nvSiF!nLLgqNy4~ QUcqÍZS)^MO_OqOO-v??OO'O|9O F1'ߢM UPiPK>´}]"}9'uH0 A5]mI;;#ǧ66hx~r JQa~&qrMi~߭Uȴ߯E~߻ io{oooI_ oo1oCo|Uo$C$g;#,b2?.x}CwMLW1b[̵yx8ج`}eM}oJjRݣS62`M\|"XB}B+<5asݴf);M_|q7;ӱ,_|#Vx~ C.t 1_!; }i틺%~?3]9r?Fn?59?趒;>L]ߛ^_u^VQ$2Ս޻}ǸПƯد3EWi{7TS@f𿟜L)l\m߇WMw~쾟K,:֑Zь?FSƊl\?i@9 <>̩#Տߘ]l\ s?BZBDȕ݇k2PU}l\q3h^ ϞOasυϗϩS#ѣn>x8Bި9ڞMh #sUx=BT.< @?F %婢?2#.5>_9K 罰5L]!g6L}MJA?**=k}1TӦy?F'sO=/ge [A\{9M?F>Y=k X*e S(N $9-g[?ow-$.Tzv )\$?3߇Nx'fl\|z $M}~' /IpLֵ?F˦6k]zFq&>}w^YM|Ԅ?F<']?0#Jh#h /xIkHw1]ǥK]P|G>q2:|@_3Y 1I l\"_@=H?` @/?$?6?H?U //////1bJx?FL!E#ԇ}Se γybE?FcN} Ɗx3c }CRD80}>~^ϝ{ª\3s./M?F^Y⣝iϟʁi{s_4I[H?FJ8oxJJϑ+ϊUϰʉhFq$ >LT ]~~[4xYpŬ0Ex}LYTfxy."ooo#5yP?Fឍ~QTJZvW9 ?FڡGSۈrQ5|橁5ǤˑIGYh($Qw~5tjl@fxF# -?Qy?FOT ʼn@-1ş9Ыva*S?FR?fB V1?ݰف]沛tk{Τ/Yv{ʙޅ݉}1MZѰb$%7I[my -}\SU=cx|[)T*Oў}|d.KnޟR{nIxߒΫ}@B j-2YpM:H`FZT=e-B~yh,?/߯}%/ASew߉ߞyf}x0 Q>zsg=ڟUbw;IS ڀ>3LI~ goSy?~wib`W?YM7+mɼ7V9H3pvsSs/M/}~iV9@gM!&K]oZBdPeF]ʼY mV6| ig5 ח9]#.Sp_xuT&~?Ftm*ԍ`_rYHp O~![ިKmU|ױ.jriwo /F'gy|yrqFYH?߁h}FUϯz5ꞵ,WCˑB`Ԟ`H}sU2w~xoӄڭfDjt! J Hna)(J6w~nv؜p oo1oCo)______ֹ2JI`H~Ͱ f tɾ4EwߗMMpW[̵yط]?F3,棣SLLh ?mu`Xr}Q͘~?Dp);M_ *ֹci+!c) x+Ҳ{&3oٶ[i?FqTƊF,]H'^ <Əٶ H;z0=}F,XQʮ}7$zLٶzTǬGWF,4$ZgsRF{^ j4Z>~[T-G^8?Fz^.H ~L?%aasS Xޟno#ά:HTFF}6CF J'bO[;| ~^-!3EWiKYYx4?F -t[|L?r69BMj-1(?FN^4>Nsz9ET~.Q^=#94o4>ݠ_J? H~&U7`dU \(5Vm H FۄܱykyU} қh?nBN 2DV3ïկ IQY1Q( &&] 6P^JY8}Soj11Qً;]n;P^u񟷱g_ G!.H?Fh\<;Qq7B1b;Y|#Kk{[oY??cgCo;M_q4' IsЪN =͐\h \[Vmq:Dyxj?FYi&?Gbx)U͖֒z/^bA/ F5f;d#\EH{=Xj|8%5 1C Id匽 轫 (Z-q͜DD D58|UBn FxV5?F+A玕R\ȚE Fk$!2 +\HC.P?%ҧnP A @6D .THD: # h0>Th]]9 MP IAUFOѬ?FA^?FMOh?F9d*C?P68R?>$y> n JuM{` ?e ;  Su*Jt'  q3 Pmta{Z_ebN3.JvH?zhǿUv?vh e:Z}.b%{>JU2N?贁N[?)@M#J&A@wbI(bW)U+U+U&J π(<!lJ ?%P?AO"d+b Y)(u/%B #A1A15 `?Co}p rigTwt c)x0]2`09x0Mp0]cn0osh0fv2Rg1rj01av0ih0Wn.x0 l0U n8s2e0en05v0d0:1PY>4#-#,M3 M7#(p0b59 DFXGdG'/2WUA;F%!$a dF$&#l,&}>UhET ]]9 M qAUFr ?F l?F?Fg!ِ3u?P6 L}>   h,(JuM`l? Ek"k6HuZJtW  7OttG97i;y4v'ʚc>JU #<H!-A?^% ?AJb^ @bbbz #"b#$"b$J 2zGz?<@M7#J&(&$%" ;'"*@!EA#5 hy<=A#B115 `?CopyrigT}tk(c)kW20@9kM@]c@os0fBR1r05Aa@i0Wn.k ] lP@U HsTBe(@e@5vf@dH@/"148#-,?3 7#9 i b59 6XGG'0U?RF!y$0 F ZlJ]Uhz 1hy1!5(`A#FB1?F#)Q PV?d"cP΄ ťc(Mb(8"J! jo0o Cƣ8-$DqaE4d\b@d Kc;d .#8磵g.p~?FTHR?PV7 /!i q7ikWPV]DT#3 AMT 5AUFr ?F l?F?Fg!ِ3?P6 .6> JuM` ?IKu8Jt5  Otyt_G97iͅ;y4vʚIA>JU5 J#2zGz?@MJ/&A@bm#ztc a#b{(Wb)+&B#!!5 `?CopyrigTt (c) 0W2009 0M0]c0os f 2R!r 61a 0i n. 0 AUlQ0 8sU2e)0e0vg0dI0!$[#8"8"?6 ?A$/%-/# '6&iie59 &XG}'0UBi6!40I iF}JlJ(>UhiI !!AaR4 Rx J:j]ݿb~)_.S buBqVy3'?F(ËE?P,a,[ /A1YQYV4hA.xeAR +3l4xa@ R#Bsoogrb5%UGD  3 h0TdYYB UFW)eK?FI{Ä?F9n;*Y?FI_tnw?P} tf  f0 D Xu` ?00DDXXl)uvts  < %t"F0" %'ӿp %,'qdX¤U!!'"`?CopyrigPt (c) 2\09 M c oKs f"!r !a i n. Al0 (s 2e e v0d0s"#S$|#B-#l# '?Rw=#&|#466 2|$Z##0U23@{& ^5 FX`7Th]]9 M]AUF?FMHR  h$ JuM` ? 3YY.KuHJtE  0oI~$tq/袋[Dmk.IQ>JU2N贁NK[?<@M#J+&A@b](bk)i+i+i&Jy%#7 ?3#) ?Ac"i+h//%*B%#U1U15 `?Cop rigTt (c)02`090M0c0o%s|0f2{1r~01ua0i|0n.0_ Al0 8Us2e0e0v0 d0"N1YR4#--0" a7&%0bE9 XFX'xG'0UB&!$$a xFJlJ]Uhzs1Z!1(`%#FXxcӔ?F>D [TQ>Q) R "J:!l?FQXP\C\6m/m$BQYsZ_?FFt%7?F{hUuW{ ]3\+B MTT0TDiQ>8aQ4? /Ȟv/x i[? `RT*?D`@ S <oor5%QEUVV__ <#<>e__f{,Vxh`d3rAg 4 ccgd$ymAv AGuPHD: H h4>T]]9 MJUFoc?F24ͷ?F7?FXPq?P6 >JuM` ^?MuJt  ~27It=W]tEIURF]IlGEw|>2N/Nk?<@MJA@gbb ) +R + &J[_ffy) ?/3)"B5 !!5 g`?CopyrigTt (c)6020B0960M.0c,0oKs&0f42%1r(0a1a40i&0n.60 Al|0 ,8s2eT0e,0v0dt0!P$-/ 3  7|&&iie%9 &X7"G'0UkB|&!$%0 "F6JlJAUhh71!!T $#< $M DRkVJ:+;5jB\aQZS kBձQ3aZ9]UYQQU%U2VZ?9=_30B_ZK&DiaX52+_=U"rA $cHD: H h4>T]]9 MJUFoc?F24ͷ?F7?FXPq?P6 >JuM` ^?MuJt  ~27It=W]tEIURF]IlGEw|>5 J2zGzKt?<@MJA@gb%(b3)1+1+T1&B!!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al 0 (s2e e v 0d0!$)[&`h4|9 ?A(:/[)-#K '6i@ie<59 X7"G'0UkB6!40 "F6JlJ(>Uh"I !e1+!AaBc1 $<@ J:?+;5j:\YQ,RS kBձaQ3"U9]MYIQHD: H h4>T]]9 MJUFoc?FXP?F7?F+;5jq?P6 >JuM` ^?MuJt  ~27It=WE]tIURF]Il\tE#>5 J2z_Gzt?<@MJA@gb]%(b3)1+1+1&B !!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al 0 (Us2e e v 0 d0!H$[&h4|9 C?A:/[)Y-# '6iie<59 X7"G'0UkB6!40 "F6Jl*$>Uh @3 !Ie1+!a1 $<@ JU99YUQIYEQHD: H h4>T]]9 MJUFc?F24ͷ?F7?FXP㟸?Pn6 >JuM` ?uJt  ~o2It=W]tEZIRF^IlEw|>5 J2zGzt?)@MJA@gb%(1+b3)O+O&*B!!5 g`?CopyrigTt (c) 2U0 9 M c os f"!rԶ !a i n}. Al 0U (s2e e 5v 0d0! $![h4?6 ?A:/[)e-# ' 6iie<5(9 X7"G_'0UkB6Z!40 "FR6JlJ(>Uh"I !4e1+!ahBO $<1 J:#]Z9=_30:_RS &DQaQ3"U9=YYQMYIQUHLuD" # ;3h0TlaaBJ >UFE3kh?F3?F_T?FU Whu?P vN>uA` ?)u#>t  5@{It=W#H9IRj;Il廤#2zGzt?@9 >A@gbb )bJ )'+'&>yga?|& ? " +'+,5'UN!!g`?Cop] rigXt (c),02d09,0M$0c."0os0f*21r0W1a*0i0n.,0 Alr0 "8sv2eJ0ej"0v0dj0!Ea$-X3 7|&$0fE 6X7G/'0UaB|&%!$e F,J l>aUl~1#1!jQ?FN @ ?[ql#<3ٽECUS(: 9[#UT eW3wU( R@SRQY?I?dXE ^ߑC8_J_ dX _2_=oV_D#OOOOO _vVFmR^\j:=pd_ofI Њ?F0떜lZ$&Оl ?:Ynx3~ ,az kXao41}gK}YWq`m0BTf|UH luD( " #  #A h]0T]]JMUF:%?F6emP?Fiz9vJ?P N&&M %4&U8HCM 9 / 9 / 9 /9/9CWW W WuM` ?% &W ">"R'&j'&~'&'&'&;&SuQ)t  bD%t!2 F_%7J">67šʿ~ U2zGzK?@Mk3{6A@n4b3z0312b808;6Tu3 A A2`?Copyrigxt c)W@209W@MO@c.M@osG@fUBFArI@AaU@iG@n.W@ AlTW@NGsBeu@eM@5v@d@c2AQ}Dl3R[u3-A B?V?4 ?E-Xu3,C ,GVl5O@(E ԕVXgW|2'0UR)V!-TA VZMU@QjQ1i81g\a?F;|~4P~G #W.?a4Hd3.䛵?Fp _`9},jX?F>@ ]3?iFS_lUӧ:ޜ !hރe#ģ[ Q " i!$a4BH:}k_wg0wEuf.29]~4pO˯ô>P|o!S#_`1y;6}wfl{o@opoQQ$Ug@EiE{iooeD?FD ~a]#Q`7\X!?Fw^?FrY?Fo2?P'DT! /3Q9 մ7'||U|6ys r #, Jދ]? $oe?Ň41:/ B"tŇrf5EU>PbtItnɟ ^HY*Gą 2DhaspA]鯱ï ֯z %7I[mOb罞О )E@ͿXP-?;a+ >L &WEW thqϧ"mϏ] 0BTfߪߘW4. Vwp,> i\_XT&8JZ;?FT}:/?$qxE~yE?FA5g`u/V/ 123\?FrMT/?d3~~?FEa,蕱/?CZ@6 De //*/]0T]]JUFp>)/Ep?F2왵??F ɏD,"7?P uM` ?j`fpu#zAtw tnٳ \b "OMG2N贿Nk?@dM#%&A2 bW(be)Jr+r+r&*!ܿ!`?Copy_rigt_(c)2W09M c os f"!r !1a i n}. Al<0U (s@2e0e 5vR0d40 "!3]$#Ry"&#4?6 Q?]"W*s*;'-X# '6% h5 RFX$GrG'0UBű6!4 rFJlU!h#'A(:FYSocg0vr p_y $(4!Z[7?FL30pS a۫YQ?\I`X3U5 Tީ?FʨQYC\f"qoY5U%?Fq$B\F\bJFXz___\Q# __1_C_U_g_VNx?FM=\>ۆ\34=(_V5T?F}\#R)\b@QoVr]?Fc߰#Z}L \$6ԡyqm0:?F u\.Iſ\)?oUU/ASewV?FN,]\\wzV]~?F@(r\F-~Ȩ\(p|ymIxg?F[j| 7+*y\= qmh?Y?FYgR\Aa\ulӏɟ۟UY=Oas!KC4?F}7PM\8HJ|\ HNVh?Fwi@­\A>xAV%+j7ٽ}#E~\I&)5eF=s '7["mSpуdĿֿRtYk}Ȏ~+]~nn)`]͟Cc֦ъ]#.?FPZk+N:2\y_@W߁2ʞ54}d\e҉qm}&?FUfK­thV\9\ލQak}ߏߡ߳~VZwۿ4&b-} ߜ.c|~kd$~Y""A6\lJ(i0(sVTloV?F} \ǀ_?$qmR'%"2?D= bY\r6yލ 2 QYI0`鎍1/ 48Ve[bV$?Fi뭩1?0pS!K;?F"~[:{Q~b?S)qm#|W-?FG~b .f-?R[Ϡ ލ /*/fɾw2Op>)/Ep?Fd(_a9~ {F!_ ~`2DMz2m^Gm}>IލPobotoj ___o o2o̺m@UP1-JO}^^fon l?FSƊ1̓ ;]\OogMNE0A`^?~uݟ$E~ɰ~%^~ =7}ÀMuҳ1?4tx鑡?3:\ `r $6Hj(i?F ڷ1^̄ZϭSԢ%^XP$֧zw]<<E?F6 "Rf2 =s 7Pq=[~1̇1%]?F | .@Rdc)Jl pe},X|4]-Huvzxډj?FwJ%H]ǕDG7t{GtY~Ǔ.PH#T~]?9+T =~B3~-L /=m$+)Hϰʛ&8J\n|JoWx#0z ),?5Uz!xz?Vsf&; `ߌ 猖gߺ8˺A}2!¹diqBZ$@S =u V N,.zu~6J1&ߴEWi{f&~c?Fۿ>4U, }J\1?Fξ9rзX=YR Qm#07q X; I6 =4T37 xU,=A S)d= Yqas{%?Fyx{wmQ7Z6n4Nl]?F7c2Gsj3j뙚;(N8 i<=ۇ4W-ɫ-9)]-?F`x}SoA뙳x@///?"?Rz//////xmYԒ?F0ufx)r蜅وWjFQ1?GUF MDe9gsaEo2I^~T@s@܂]Y/|sMv۶ IY?}lD?B9kP/ߊ.x/ONϋaz`3O__)_;_uOOOOOOxHOB?Fi$0%oAlOo-M_k"W?F[y\x_ ]g蜢, )+b77bћ9}DxZ]Qey=zFVjx/I bvdRo$6HZooooo  p? {,Բ5~}j8>6zB?FӺ ~|g=)s-?F8H~ݖ}T M?lfa=Kh݁5};p/"_f/tl 8at""4FXj| ߬XNT'(/E9?&I/ i {.PvwHkOQ qӟĶO@ߒ#'ߛ[w̜zfdm ___ooŅp______DAL4?FGww̆Ia{VU$oEFem(5tc(yÂ;UKG whsi}2Ca6o? ?F20?æmEyo6\iq,"4ߤsZ4?F"EwH | QUY% (?FZ <#a;:wPID8 n7 ~f* /e'Mǃac5'?Fԫ|>ka?I}|b:QH,>P גT_HZ> )6s ;gLjOhLAݨF #ȱ ^OB   ~|dҴ o@+Xa o8sG/ o P : x ߨ |P d >R 1T Ӵ hV ; "8 ] LY ѻ[ z  z  ʔ] B:  d&/#/5/G/Y/k/}//////// ??1?C?U?g?y???????? OO-O?OQOcOuOOOOOOOO__)_;_M___q________oo%o7oIo[omooooooooo!3EWi{ /ASew h ԁ < bҏ+=Oas͟ߟ'9KH  Xs_ eт ~  n g    , J o"/  Yk_ S HUFDfP h TPYYhUFL&d2?F`0 ?Fx<F BP(c?P? w<lh -^&88Kh;_ 8srY3 8 83 8 838838 &8&381&8E&38Y&8m&38&d*8&38&8&8&]; 63!8!6#8563$8I6&8]63'8q6(86d2)86*8f6+86,8f6-86.8fF/8F08f/F18CF28fWF38kF48fF58F6U789;8fF<8F=8&F>8 V;fV@83VB8fGVC8[VE8boV;VG8ȗVRH8VI8VJ8VK8VL8fM8fN8-fO`8Af;UfQ8fifR8}fS8ffT8fUUVWXZ8ff[8f\8f v]8v^8f1v_8Eva8fYvb8mvd8fve8vf8Hvrg8vQ;vi8vj8k8l8+m8?n8So8gp8{q8r8s8tuvwy8z8{8|8/}8C~8W8k8Ѕ q  0A`Ap4icbptpo\ evq !aE  -QuYda|z !T3Q_`|]1n@` X?W&&.11 T_?xpxx p  pxp%{wxw?w _w xwOw  p JwwxwpxbL&d2鿃`0 +?? {ҿ ȿUGDF # hz8T YY# B{UFL&d2?F`0 ?P} [u`ho?u#   @B<66D^nY^-] [ [ [ [[[&7^"&[6&[J&[^&]r&[&^&[&d^&[&[&[&[6[&6[:6[N6h^b6![v6"[6#[6$[6%[6&[6'[6([F)[F*[*F+[>F,[RF-[fF.[zF/[F0[F1[F2[F3[F4[F5[V6[V7[.V]_BV9[VV:[jV;[~V<[V=[V>[V?[V@[VA[VB[ fC[f^2fEL[FfF[Zf^nfH[fI[fJ[fK[fL[fM[fN[fO[v^"vQ[6vR[JvS[^vT[rvU[vV[vW[vX[vY[vZ[v[[v\[][&^[:_[N`[ba[vb[c[d[e[Ɔf[چg[Q^i[j[*k[>l[Rm[fn[zo[p[q[r[ʖs[ޖt[u[v[w[.x[By[Vz[j{[~|[}[~[[Φ[[[ ު-2<ZZnn&&"&"&6&6&J&J&^&^&r& r&&&<&U&&&&U&&&&U&&66U&6&6:6:6UN6N6b6b6Uv6v666U6666U6666U66FFUFF*F*FU>F>FRFRFUfFfFzFzFUFFFFUFFFFUFFFFUVVVVU.V.VBVBVUVVVVjVjVU~V~VVVUVVVVUVVVVUVV f fUff2f2fUFfFfZfZfUnfnfffUffffUffffUffffUvv"v"vU6v6vJvJvU^v^vrvrvUvvvvUvvvvUvvvvUvvU&&::UNNbbUvvUUƆƆچچUU**U>>RRUffzzUUʖʖUޖޖUU..BBUVVjjU~~UUΦΦU  $BWW.(%6QDUQ?S>S3Q?RdV"qwX[?( 8RLS`S~SSSSıSرS챔SBRS(Sv|Hv|Rv|\v|fv @ 4񸣢vvy|v|v|v|v|v|v|v-|A|U|U$$}|.|8|V|`|j |t|~1|E|Y|mx؆؆|| "|!"|5"| ]"|q"|("|<"|F" |P"ZZ"|d"|n92|M2|a2|u2|2|2|2|Ȗ2|Җ21@1 AQB|eB|"yB|,B|6B|@B`|TB|^ދQUVVVVUVVVVV SRVfff&f0f :fDfNfSRbfUlfvfffUffffUffffUfffvU vv v*vU4v>vHvRvU\vfvpvzvUvvvvUvvvvUvvvvUvU$.8BULV`jUt~UUĆΆ؆U U(2U<FPZUdnxUUȖҖUܖU"U,6@JUT^h "UȦUܦb&l&U&,@UTh|U&&&&U&̶U046UDXR6lUU66U*>URfz6UFFF$FUUjF.UBFVjU~UFFU(1H1R1\1f1p1z1Ä1Î1Ø1â1ì1ö11111111AAA$A.A8ABALAð÷jAtA~AÈAÒAÜAæAðAúAAAAAAAQ QQQ(Q2QBU=Us/?Se?SU?SѲ?SđPÿ@qP?@R?TV?ܲu`W@jqRu`a@r URfchqb?SuLiup`u`@բѠ+u`ѓb&Y faH1crgg`Ÿ /"(úfiic` dFpaMvaf&afl,s  a?5 %` pq q wu`Vis_PR Y. hm!#5N 93N"+b`?Copyr gt  c)M20Y9MM cCo"o Lÿ҆u` ?u~pQ(-DT! @ p@U au@`ba@? ìҀ@^0FH%@Q.FӀOռB\i@?yQ/FQd@ߐRrf i`R$pnst*oM Pe]ف p ջ bmCuE``-u5@`^0aՁ^0d qn pN<udRE %EЎ%$-+h 0JTlaaM>UFN&d2?F`0 ?F|G?Fm]ri?P U>tA`  M?t q۠ FZ>u hu!s7YaMJMRKm? ?M>bA@bKbmbz #e8 ۢ =# b#2O[(ta k-3thk-![(}/tГ/w t/ t/w t/ t?w t&?t;: Ne F;,";&a#68;R;W *P>B m42A2A`?CopyrigXt _(c)i@2dW09i@Ma@c_@osY@fgBXAr[@Aag@iY@nU.i@ 0l@ _HUsBe@e_@v@-d@\2lH#"p# @K4JUlQQQQPA!@@2Ruݯ?F#& hnGHge) ]NUq Z?FG ߄\.r\oMGhEUKar?F3ojp0m]-Eά\?T\iEUe^?F`1<<0mN[ʰ~\on\i^*S__` 5C3\i__B_TFTA'VW@qqx_[QQ JR! J!4J78 h !a!hE!lU]T T!"#$%T__o{?FlӖ|н1|Ϥo} rhQя Me]otv۲ǀFRH0ύ|dr٣|WӍ[Smp"pʍ|q5JK tvp?FZEoZɍ| 9EPڣ||lbtvX9|"@G!/ ւ?F9|H&3|?<=$_4F+\>30m_d 5\i`U0m~ !Qx$(gN/ ?P0wf>nZ?l? r?;{|{*K1?Fݿ K 1wz_ 9*S?FAcM@=orOyg?_P|* /$?F#K i >捵JLJ=|*zJ@w/d;ЍuI@ͺhˏ4pHD: )&# T=h0>Th]]9 Q BUFܷqP?Fԁ43a?F7??FtYOȻ?P Bt`  XM6 ?t׻4n%O__Bu aquo7>KJASTfw?X ?Az@bbb! ( 6f )  '+ ߈?I-Z#g!z A Qu +%/,8)E!'+ k(?r+o?(a4 8) E"#{ #BstG8tU: Ba L>2?!88;D;68?5;D; tj>8LALA1 `?CopyrigTt (c)@2`09@M{@c.y@oss@fBrAru@Aa@is@n.@ 0l@ yHsBe@ey@vZ@d@v2h<M#l#@ M@?4>UhjA6Q>1| L`Fkdj]J}vMjR]M 6)fMB@Wa\ k_稇avCe=ESeR9Y? l`gESeFeabkod iSQޗh~??5@+ c;Ꞗi"2_U3rA/04sCHD: )&# T=h0>Th]]9 Q BUFdHKJAUVJp?X ?Awz@bbb! ( "f Փ'+ ' qq?z A  +/.&68))E! '+ go?W-sU!n*a4 8) "u#{ u#D!st?8tM: Bav L62?!08;<;60?5;<; tb>{8DADA1 `?CopyrigTt (c){@2`09{@Ms@c.q@osk@fyBjArm@Aay@ik@n.{@ 0l@ qHsBe@eq@vZ@d@n2h<M#l#@ M@?]UhzbA.QR61b L` KdjMaQ  6fM{"@Ftnq\[3K_,7e5EGe<\ֽE?Nf[ NA_qd7eEGec6i\m j'f܋h2_UB3rA'04msCHD: )&# T=h0>Th]]9 Q BUF)8/?FFT?F!/$?Fᱝ̻?P Bt`  vNC?t|¢ϛT H0Bu auo7 >JTA0f88v?WYALv@z bbb!'n 6 % )! ! ,+ <?' o?z #a47% =) J""#K#{% (+ ( tQ#BXYщ?A> ?A "+(/+/=/O/a!y:5rl/@ sU10( /???? "?0<2?M{!b 1/Ĝ/*%,"t:"t>TQQ'@`?CopyrigTt (c)FP2`09FPM>PcTh]]9 PB IAUFJ1>?F}?FF(e?F=PFo?P t`  0!?tj$h5q+ tЖZu Mauo8 >    PEJƱZVJp!?&. ?Az@b_bbe!Y(݉e 6 ,p )l l w+ g?Z-#U!׷!ze Ae [+j/|//-g#+ s"?(a490 9_b!#{t0#tN8tϻ: Ua BL2$18KE;F?VEK]; t>B 4AA0`?CopyrigTt _(c)@2`W09@M@c@os@fBAr@Qa@i@nU.@ @l/P HUs3RePe@vEP-d'P2h<M#bl# M/P?]Uhz@AQ1 L@3FO#۲?F?o2zP dj|aW{'8cUF:&x0?F?F?F_'qoO 0Yigull4aj?N~f0c~o+ fEee?F<'?F@]?FwF%ޕol}dQ3ll BXl귺m|?bBhaU~e`ba`?FV ?F?Th ofl?PI3qoikeðl8` |GT?heFnF$OĒ?F o* leowk~y{f~tGg˂a ,8b1;MHD: )&# ;h0>Th]]9 PB IAUFGF?FEs?F+\(?F~@ t?P t`  9`Ik?t{ͻIu -ZrqKZu Mauo8 >    PEJƱZVJp!?&. ?Az@b_bbe!Y(݉e 6 ,p )l l w+ g?Z-#U!׷!ze Ae [+j/|//-g#+ s"?(a40 9 .*2!#{0:#t8'tϻ:Ua L2$18K;F?VEK.; t>B4AA0`?CopyrigTt (c)@2`09@M@c@oKs@fBAr@Qa@i@n.@ @l/P Hs3RePe@vEPd'P2hb<M#l1# M/P?]UhzAQ1 L@3FMbX9?F4ҫP dj}ah8cUF[~K?F|D?F+Xc?F&PRcGoO 0YiM+FllZaj}0%|f0c̭ fEe.V?Fȿ̅)`oSt?F?(X:olGGTll .Xl{e{|rww@haUe:&x0?F%Clw?FAz?F 1vņflux1qoiFA(]`t߼ |~Th]]9 PB IAUFGF?F\Dֳ?F+\(?F~@ t?P t`  9`Ik?t%h2tMu -ZrqKZu Mauo8 >    PEJƱZVJp!?&. ?Az@b_bbe!Y(݉e 6 ,p )l l w+ g?Z-#U!׷!ze Ae [+j/|//-g#+ s"?(a40 9 .*2!#{0:#t8'tϻ:Ua L2$18K;F?VEK.; t>B4AA0`?CopyrigTt (c)@2`09@M@c@oKs@fBAr@Qa@i@n.@ @l/P Hs3RePe@vEPd'P2hb<M#l1# M/P?]UhzAQ1 L@3FMbX9?F4ҫP dj}ah8cUF[~K?F|D?F+Xc?F&PRcGoO 0YiM+FllZaj}0%|f0c̭ fEe.V?Fȿ̅)`oSt?F?(X:olGGTll .Xl{e{|rww@haUe:&x0?F%Clw?FAz?F 1vņflux1qoiFA(]`t߼ |~Th]]9 PB IAUFXIn?FLw¾?F+\(?F-ku?P t`  -k?tZ߅BTu -CmoLZu Mauo8 >    PEJڍU[\/(?&. ?AbbY!z@Ae bbCt c(a4b  b(h#{ h#΍t(tI*#0f88vb.#i L5<1v(&$7/(D%"6/5;0( t.*B#11`?CopyrigTt] c).@2`W09.@M&@c$@os@f,BAr @YAa,@i@n}..@ Alt@U $HsxBeL@e$@v@dl@"h<M#l# Mt@?]UhzAA!J@#FMbX9?F$x0OԛP dj}תa4(SUF[~K?FE?FSc?F?#oO 0YM+Fӱ\*aj,.}a$?# XEU"n?F +?FbEd?F v<olTBJ\LMYBltavXlp@XEUv/?Fpvw;pg?F $hއvol/AD0qYJg ַBl)$ԞXlD?B:XU^F.fّPKk^Rl_[Ei1GAf`dSOq+a,(bvHD: )&# ;h0>Th]]9 PB IAUFXIn?F(?F+\(?F-ku?P t`  -k?t7/;3bu -+CmoLZu Mauo8>    PEJڍU[\/(?&. ?AbbY!z@Ae bbCt c(a4b  # b(h#{ h#΍t(tI*#0f88vb.#i L1<1v(&$7/(D%"6/5;0( t.*B#11`?CopyrigTt] c).@2`W09.@M&@c$@os@f,BAr @YAa,@i@n}..@ Alt@U $HsxBeL@e$@v@dl@"h<M#l# Mt@?]UhzAA!J@#FMbX9?F%x0OԛP dj}תa4(SUF[~K?FE?Fdc?F,?#oO 0YM+Fӱ\*aj ,.}a?# XEU"n?F{ +?F(%/Nd?F<olTBJ\MYBlU{XlpDc@XEUv/vw;pg?F^$hއ?Fvol/AD0qYf ַBls$ԞXl+?B:XU^F.fّPyKk^Rl_[Ei1GAf`dSOq+a,(bvHD: )&# T=h0>Th]]9 Q BUF贷F$?Ft\@?F\HRq?FƯK7?P Bt`  x#?tK#ll$nZVBu auo7>JA]^'? ?Az@b ( 6f  )ϓb= + _? ' 88o?z A s +=  /)/;/M$[+["-j!Q U1[!x(ar4 ) "'}# }#BtΜQ8t_:Ba~ LH2!B8;N;6B?5;.N; tt>8VAVA `?CopyrigTt (c)@2`09@M@c@oKs}@fB|Ar@Aa@i}@n.@ 0l@ HsBe@e@v@d@2hb<M#l1# M@?4>UhuQuQPtA@Qj LZFχϰ 7j bL\2] 6R1bMB:MbPdc Ɵmdb`GeGEWe$oZ\aibvIGeU@Vϰ+@?F23<~pk_xu jlTh]]9 Q BUF#/?FKGY?F\HRq?FƯK7?P Bt`  ~B?tIH#ll$nZVBu acuo77AJA]^'? ?Az@b ( 6f  )ϓb= + _? ' 88o?z A s +=  /)/;/M$[+["-j!Q U1[!x(ar4 )] "'}#{t }#BtNQ8t_:Ba~ PLH2!B8;N;Q6B?5;N; tt>8VAVA `?CopyrigTt (c)@2`09@M@c@o%s}@fB|Ar@Aua@i}@n.@U 0l@ HsBUe@e@v@d@ 2h<M#l# MT@?4>UhuQuQtA(@QH1j LZFهgϰ 7j bL\G6] 6S-fMB:MbPdc mdb`GeGEWe$oˏZ\ai?bvIGeUWeVϰ+@?Vpk_xu j aGeWe _ooh2K]u:3rA 4s2HD: )&# T=h0>Th]]9 Q BUF/ ֛9?FD6Hr?F\HRq?FƯK7?P Bt`   a1?t -_9#ll$nZVBu auo7>JA]^'? ?Az@b ( 6f  )ϓb= + _? ' 88o?z A s +=  /)/;/M$[+["-j!Q U1[!x(ar4 )] "'}#{t }#BtNQ8t_:Ba~ PLH2!B8;N;Q6B?5;N; tt>8VAVA `?CopyrigTt (c)@2`09@M@c@o%s}@fB|Ar@Aua@i}@n.@U 0l@ HsBUe@e@v@d@ 2h<M#l# MT@?4>UhuQuQtA(@QH1j LZFهgϰ 7j bL\] 6-fMB:MbPdc Ɵmdb`GeGEWenZ\aiavIGeUWeVϰ+@?Vpk_xu j aGe@We_ooh2Kt]u3rA 4s2HD: )&# T=h0>Th]]9 Q BUF[@#?F\O?Fo?FƯK7?P Bt`  ?tJ8Xj$nZVBu auo7>JA]^'? ?Az@b ( 6f  )ϓb= + _? ' 88o?z A s +=  /)/;/M$[+["-j!Q U1[!x(ar4 )] "'}#{t }#BtNQ8t_:Ba~ PLH2!B8;N;Q6B?5;N; tt>8VAVA `?CopyrigTt>(cu)>2`09>uM@c@os}@IfB|Ar@Aa@]i}@n.> 0Ul@ HsBe@e@v@d@B2h,<M#l# M@?U4>UhuQuQtAJ@QH1j LZFهϰ 7j bL\] 6-fMB:ʿMbPdc 2Ngodb`GeGEWe$oZ\aibvIGeUWe4?Vpku j aGeWe_ooh2K]u3rA 4s2HD: )&# ;h0>Th]]9 PB IAUF?FË.5?FQ?F?P t`  ?{R   ] EJ]_DRb(?&. ?Az@bY(e 6f h )ϓb k+ _?Z'e 88o?ze Ae s[+ h g/y//$# L:5 /sUK1*a4 9 2(E#{ #t8tϤ:U a L2T18K;6Ԉ?@EK; tͺ>B40AA0`?CopyrigTwt c)@]2`09@M@]c@os@fBRAr@Aa@i@Wn.@ 0lPU HsRe@e@v/PdP2h<M#l#A MP? <>Uh$QQ(]A(Q1 LZ3;'xP 7jbbFbL7a 6{fM8cUFnDdTh]]9 PB IAUF4{"?F=N?FQ?F?P t`  ^}W;o?tQxČ( 뾅@l* 9u auo8>   ] EJ]_DRb(?&. ?Az@bY(e 6f h )ϓb k+ _?Z'e 88o?ze Ae s[+ h g/y//$# L:5 /sU"K1*a4 9 2(E#{ #t8tϤ:U a L2T18K;6Ԉ?@EK; tͺ>B40AA0`?CopyrigTwt c)@]2`09@M@]c@os@fBRAr@Aa@i@Wn.@ 0lPU HsRe@e@v/PdP2h<M#l#A MP? <>Uh$QQ(]A(Q1 LZ3;'xP 7jbbFbL7a 6{fM8(a?nDFF!@~qWoO 0dTc I\rdcd bjUkgaPbeEef"v?Fr6d?F?kĀqo`OivddlXr`tF0sK|٢jr3t!KU:G}?{]rLl} ]|q3xnTrg?F<ے7pooZgtfio,V|=] 5ve&1?Fpn?FTh]]9 PB IAUFGC<-?F f?FQ?F?P t`  L?RD)o?tC%)Č( 뾅@l* 9u auo8>   ] EJ]_DRb(?&. ?Az@bY(e 6f h )ϓb k+ _?Z'e 88o?ze Ae s[+ h g/y//$# L:5 /sU"K1*a4 9 2(#{ #st8tϥ:RUa L21*8K;6?@EK; tR>B4AA0`?CopyrigTt c)@2`09@M@c.@os@fBAr@Aa@i@n.@ 0lP HsRe@e@vZ/PdP2h<M#l# MP? < >Uh$QQ]AQ1 LZ3;'xP 7jbUbFb7a 6{fM8cUFnDFK!@?FDqWoO 0dTc I\addǼbjUkgabeE(aFjv?Fr6d?FkĀq o`Oi^j^r`t0sKÊ#|pr9t!KU:M}{]r,Ll}]|9xLufTrg?F<ے7vodObdcZgtfi,V #|=c ;vLu&1?Fpn?FTh]]9 PB IAUFĊ?F$n6b?FQ?F?P t`  ro?t,Č( 뾅@l* 9u auo8dA d  ] EJ]_DRb(?&. ?Az@bY(e 6f h )ϓb k+ _?Z'e 88o?ze Ae s[+ h g/y//$# L:5 /sU"K1*a4 9 2(E#{ #t8tϤ:U a L2T18K;6Ԉ?@EK; tͺ>B40AA0`?CopyrigTwt c)@]2`09@M@]c@os@fBRAr@Aa@i@Wn.@ 0lPU HsRe@e@v/PdP2h<M#l#A MP? <>Uh$QQ(]A(Q1 LZ3;'xP 7jbbFbL7da 6{fM8(a?D +F2|>?FW@WoO 0j?rdcd bj{^WeabeEef"v?Fddd?F܃qo`OivddlXr`tuqE|E:jr3t!KU:G}{]rLl}]|q3xngqrg?F~pooZgt_ML|\ 5ve&1?FWf?F?#0b|[m낪 z'oǵ(|F$$!ayTh]]9 Q BUF贷F$?FlIJ?F\HRq?F#o˯Z?P Bt`  x#?tnZ/J#ll#VBu auo7>JTA0f88v?WYϳAjLv@z bbb!' 6Y % )! ! ,+ <?' o?z #a47% @& J"#K#({% (+ (G t#Beı?A>L ?A"&"&$/*8%,"t:"t>  A A6 `?CopyrigTt _(c)D@2`W09D@M<@c:@os4@fBB3Ar6@oAaB@i4@n}.D@ Al@U :HsBeb@e:@v@d@2h/1<M#l#@7? M@?(>Uh7E P+AA!{ L:F A7<6j% bRzg9<2 "\MbPdS mT W5U6ik0YaYvq 2HD: )&# T=h0>Th]]9 Q BUF#/?F ?F\HRq?Fn˯Z?P Bt`  ~B?t6#ll#VBu acuo7hAJTA0f88v?WYϳAjLv@z bbb!' 6Y % )! ! ,+ <?' o?z #a47% =) J"#K#({% (+ (G t#Beı?A>L ?A"&"&$/*8%,"t:"t>  A A6 `?CopyrigTt$(c)$2`09$M<@c:@o%s4@fBB3Ar6@oAuaB@i4@n.$_ Al@ :HUsBeb@e:@v@-d@2`h/1<M#l1#7? PM@?(>Uh7E +AA!{ L:F<F6j% bRzg9<2 "\7MbPdS mT W5UO?6ik0YaYvq 2HD: )&# T=h0>Th]]9 Q BUF/ ֛9?FT?F\HRq?Fn˯Z?P Bt`   a1?t#ll#VBu auo7>JTA0f88v?WYϳAjLv@z bbb!' 6Y % )! ! ,+ <?' o?z #a47% =) J"#K#({% (+ (G t#Beı?A>L ?A"&"&$/*8%,"t:"t>  A A6 `?CopyrigTt _(c)D@2`W09D@M<@c:@os4@fBB3Ar6@oAaB@i4@n}.D@ Al@U :HsBeb@e:@v@d@2h/1<M#l#@7? M@?(>Uh7E P+AA!{ L:F6j% bT ü2 "\MbPdS mT W5UO6ik0YaYvq 2HD: )&# T=h0>Th]]9 Q BUF[@#?Flo]?Fo?Fn˯Z?P Bt`  ?tz6,JJ\8Xj#VBu acuo7AJTA0f88v?WYϳAjLv@z bbb!' 6Y % )! ! ,+ <?' o?z #a47% =) J"#K#({% (+ (G t#Beı?A>L ?A"&"&$/*8%,"t:"t>  A A6 `?CopyrigTt>(c)>2`09>M<@c:@o%s4@fBB3Ar6@oAuaB@i4@n.>_ Al@ :HUsBeb@e:@v@-d@2`h/1<M#l1#7? PM@?(>Uh7E +AA!{ L:F<F6j% bRzg9<2 "\ʩ7MbPdS 2NoT W5UO?6ik0YaYvq 2UHT5D B &# T>h0JTtiiM ENUFlQ?Fڋ?FPaW?F ru?P WNtQ` c͕.:7?tU)oni8;(\I2-nNuj mu){E0f88wv?AVQ Ӎ!@#b#m@b  0#  #W(3 (o+&T" #t#NĽ/?. ?M"&$a%$$,/>*L%i,T"tΞ*T"tR.R11#`?Copyrig`t (c)02l090M0c0os0f21r01a0i0n.0 Al@ 8sBe0e0v0@d@"Vt!X#xB #/ @T,UtE1UA-!@nZ pvm?S׮(%UQG@Y9"S\FkV5%UЗn@‹Pهϰ=\WQS\OXbiTQY%_7_I_[_DQOOOO__^̔ԍ?f Q́)f@vVP|?5^BlӿIY:6Xl?Ib ʏ܏EABTfxF\HRqCk?-q_WaW?ϰ+@"Bl!H,BXQnB)fFޖBoT[inJ\ۙgyAHD: )&# T=h0>Th]]9 Q BUFhY?FlIJ?F&1?F3(e?P Bt`  6n?tҝw~m4tMuVBu auo7>JAS`? ?Az@bbb! ( 6 )  '+ ߈?/I-Z#mg!z A " ././@/R+,g/y'sU1,ar4 )] "#{t #BtNg8tu:Ba PL^2!X8;d;Q6X?E;d; t͊>8lAlA `?CopyrigTt (c)@2`09@M@c@o%s@fBAr@Aua@i@n.@U 0l@ HsBUe@e@v@d@ 2h<M#l# MT@?4>UhQQA(VQ^1| LZFX} g 7j b2ɶGOR] 6SCfMB:ZMbXdc $I$I܍+d `2aa]Eme'la'iL.G]eU@9/$E?;FslAka,dHּgmeoo(oh2gyu3rA 4s2HD: )&# T=h0>Th]]9 Q BUFy?F.޺s?F&1?F3(e?P Bt`  7 ?tN]m4t_MuBu aquo7>KJAS`?X ?Az@bbb! ( 6f )  '+ ߈?I-Z#g!z A " ././@/R+g/y'sU1,a4 ) ."#{ :#Btg8tu:B@a L^2!X8;d;6X?E;d; Kt͊>8alAlA `?CopyrigTt (c)@2`09@M@c@os@fBAr@Aa@i@n.@ 0l@ HsBe@e@v@d@2hX<M#l # M@?4>UhQQAVQ^1| LZFX} 3 7j b2ɶOK] 6SCfMB:ZMbXdc $I$I܍+d 2aa]Emekla'i.]eU@;0$E?FTh]]9 Q BUF߹e?F4jߤ?F!?FTUUUɺ?P Bt`  ?tiqn= ףc?ckBu1 ,duon7>)JAUVJp? ?Awz@b_bb! (݉ " '+ ' qq?z A  +S/.&68))E! '+ g?uW-sUUn*a4 8) "u#{ u#D!tΜ?8tM:Bav L62?!08;<;60?5;.<; tb>{8DADA1 `?CopyrigTt (c){@2`09{@Ms@cq@oKsk@fyBjArm@Aay@ik@n.{@ 0l@ qHsBe@eq@v@d@n2hb<M#l1# M@? Uh,cQcQ] '1PbA.Q61b L`w"?FMDA dj? |m cuۡ'1 6!1fM{":F$3<6d kS=ѣ<n2{!5E[nLa vogaKeE[eV_@cbc g@]amr\GDž}iNu?FS%f|]tElFTh]]9 Q BUFtx?F/!?FTUUU?P Bt`  K_?t)C= ףc?cZBu auo7[>ʜJAUVJp? ?Awz@bbWb! ( 7 " '+ ' qq?z A  +/.&68))E! '+ g?W-sUUn*a4 8) ."u#{ {8aDADA1 `?CopyrigTt (c){@2`09{@Ms@cq@osk@fyBjArm@Aay@ik@n.{@ 0l@ qHsBe@eq@v@d@n2hX<M#l # M@? Uh,@cQcQ] '1bA.Q61b L`zw"?FDA dj O|m cuۡi'1 61fM{":F$3<6d kS=ѣ<n2{!5E[nLah vogaKeE[eV_cbPc g@]amr\Dž}iNu?FS%f|]tElF<2PhQp[" |"*`KeQitVl:qid?5hQ@J_nohU2t3rA 4n2HD: )&# T=h0>Th]]9 Q BUFiv?F{<?FU?F3z[j˺?P Bt`  y]?thS&XRBu ,duo7oAJAa^!? ?Az@bbb! (n 6  )  '+ g~X?I-Z#!g!z A " ,/,/>/P-[ן+g/y'"ي,a4 ) "!E#{ #Btg8tu:Ba L^2!X8;d;6X?Eu;d; t͊>)8lAlA `?CopyrigTt (c)@]2`09@M@]c@os@fBRAr@Aa@i@Wn.@ 0l@U HsBe@e@v@d@2h<M#l#A M@?8>UhoQQ]AJVQ^1| L@}?F! dj ٯla6?ztB: 7d /kyԷ&Dh]EWemkp+ɉ 5d}bGeUWeFBϰ+@ol0juMDhnMbX0j`(DhnC<o+o"<EgUoo6o9HD: )&# T=h0>Th]]9 Q BUFT1#E2H?F]&UaU?F3z[j?P Bt`  ?t1:&XRZBu auo7[!>ʜJAa^Ė? ?Az@bbWb! ( 7 6 )  '+ g~X?I-Z#!g!zV A " ,/,/>/P-[O+g/y',*a4 ) "!#{ #Bstg8tu: Ba L^2!X8;d;6X?E;d; t͊>8lAlA `?CopyrigTt (c)@2`09@M@c.@os@fBAr@Aa@i@n.@ 0l@ HsBe@e@vZ@d@2h<M#l# M@?8>UhoQQ]AVQ^1| L@Ͼ?F! dj ٨la6ztB: 7d /kyԷ&Dh]EWemkp+ɉ 5d}bGeUWeFBϰ+@ol0juMDhnH?MbX0j`DhWef9o-ai qGeUoo$o6o3UHLD )&# R>h 0JTlaaM>UFObFd0?Fgj?F0U?F_gw?P >tA`  N+ru?t sM׵01,_Œ X">u eu!s7-"JɅJUU?HA9 j@7tt7 b:*H&tH  th*Nĵ) ?MB"b;)H&e8H m 3 H -US4o?FPjj?6ֈ@!QpoĬگ eoĩ́ F o 01G}}϶@b@mp?#nʼPV]O߸uy\l2_+ʦE}@p5"iРo~e*al|ٍşןHD: )&# ;h0>Th]]9 PB IAUF\6:?F}?FF(e?F=PFo?P t`  @?tP[5q+ tЖZu Mauo8#>    PEJƱZVJp!?&. ?Az@b_bbe!Y(݉e 6 ,p )l l w+ g?Z-#U!׷!ze Ae [+j/|//-g#+ s"?(a40 9 .*2!#{0:#t8'tϻ:Ua L2$18K;F?VEK.; t>B4AA0`?CopyrigTt (c)@2`09@M@c@oKs@fBAr@Qa@i@n.@ @l/P Hs3RePe@vEPd'P2hb<M#l1# M/P?]UhzAQ1 L@3FO#۲?Fo2zP dj|aW{ޓ8cUF:&x0?F?F?FX'qoO 0Yifull4ajS?ON~f0c?h+ fEeޖe?F<'?F@]?F\F%ޕol}dQ3ll BXy귺m|ԇbBhaUe```?FV ?F?Th ofl?PI3qoijeðl.` |=T?heFnF$OĒ?F o* leowk~y{f~tGg˂a ,8b1;MHD: )&# ;h0>Th]]9 PB IAUF\6:?Fx?FF(e?F=PFo?P t`  @?t%5q+ tЖZu Mauo8$>    PEJƱZVJp!?&. ?Az@b_bbe!Y(݉e 6 ,p )l l w+ g?Z-#U!׷!ze Ae [+j/|//-g#+ s"?(a40 9 .*2!#{0:#t8'tϻ:Ua L2$18K;F?VEK.; t>B4AA0`?CopyrigTt (c)@2`09@M@c@oKs@fBAr@Qa@i@n.@ @l/P Hs3RePe@vEPd'P2hb<M#l1# M/P?]UhzAQ1 L@3FO#۲?Fo2zP dj|a{ޓ8cUF:&x0?F?F?F'qoO 0Yifull4ajK?ON~f0c?+ fEeޖe?F<'?F@]?FsF%ޕol}dQ3ll BXyR귺m|bBhaUe```?FV ?F?Th ofl?PI3qoiGkeðl?2` ,yFT?heFnF*OĒ?F o* leowk~yf~tGg˂a ,8b1;MHD: )&# T=h0>Th]]9 Q BUFL&d?F`0?F(\µ^S?P Bt`  {Go?tNV yAP FBub ,duo7%>RJ[0U?<ALv@tt7 }b6*D&tD  td*JBı) ?A>"b7)D&a4D  V3 D 8/td/tRy.x1x1`?CopyrigTt (c)02`090M0c0os0f21r01a0i0n.0 Al0 8s2e0e0v @d0"h0!<M#l#/MlP> @?UhLAA]R 1 P1"7Ж1bA>!J`oO2z[ dja_q:z׍ Q 6VMB@챝/P\`_m;E,Ui5Uޖ?FQ[5lf\VU'EUY2Ruݯ?FU]˖7sf\pdѣSYUWfe _zx]݅om{ZcXUFRP\g[)JXAp| rhQ#5 IXAYaLPւ?FتcP\T f\3$sXAp"۲ ElU1YA-k?FFoZP\fyf\!XAYX%?F8K7P\{ 䘷f\&SAY&1l?Fr>|]P\-]qiYip~ X1#g~jt-? XAY?F ףp=P\' f\?P=Rw㹥X'RXT}7vP\__/vnTpljlX7U/?$?vQ[E7Ժf\wUQY|>qh|P\4GN"MDX U29_KU#rA0$@CHD: )&# T=h0>Th]]9 Q BUF8?Fā43?F7??FtYOȻ?P Bt`  1=o2?!88;D;68?5u;D; tj>)8LALA1 `?CopyrigTt (c)@]2`09@M{@]cy@oss@fBRrAru@Aa@is@Wn.@ 0l@U yHsBe@ey@v@d@v2h<M#l#A M@?4>UhkQkQjA6Q>1| L`Fdj]JK}vM41R] 6)fMB@Wa\ k_程avCe=E~SeR9Y? lgESeFeabkod i?SQޗh~?5@+ c;ꉞi2_U3rA/04sCHD: )&# T=h0>Th]]9 Q BUF2B9?FZ&?F'l?F{Cƻ?P Bt`  ,i?t('̺㯝4\YDrVBu auo7'>JAUVJp? ?Awz@bbb! ( " '+ ' qq?z A 9 +/.&68))E! '+ g?W-sU"!n*a4 8) "u#{ u#D!t?8tM:Bav L62?!08;<;60?5u;<; tb>){8DADA1 `?CopyrigTt (c){@]2`09{@Ms@]cq@osk@fyBRjArm@Aay@ik@Wn.{@ 0l@U qHsBe@eq@v@d@n2h<M#l# M@?]UhzbA.Q61b L` djM"aQ  6fM{"@Ftnq\[3K_,7e5EGe<\ֽE?Nf[ N_qd7eEGec6i\m jOf܋h2_U3rA'04msCHD: )&# T=h0>Th]]9 Q BUF] ?F t5?F!/$?Fᱝ̻?P Bt`  "?t4@+ϛ׊T, H_0Bu aquo7(>KJTA0f88v?WYgAL5v]@z bbob!'݉ 6 ,% )! ! ,+ <?' o?z #a47% =) J"E#K#{% (+ ( t#BXY?A> ?A "+(/+/=/O/a!y:5r=l/@ sU10( /???? "0<2(?M{!b 1//*%,"st:"t>0QQ'@`?CopyrigTt (cu)FP2`09FPuM>PcTh]]9 PB IAUFR?F}Y?FF(e?F=PFo?P t`  (\#?tƯ.5q+ tЖZu Mauo8)>    PEJƱZVJp!?&. ?Az@b_bbe!Y(݉e 6 p l l w+ g?-#U!׷!zme Ae [+j/|//-g#+ s"?(a4\0 9 *2!#{0#t8tϻ:񧂍Ua PL2$18K;QF?VEK; t>B4AA0`?CopyrigTt (c)@2`09@M@c@o%s@fBAr@Qua@i@n.@U @l/P Hs3RUePe@vEPd'P 2h<M#l# M/P?]UhzPAQ1 L@3FO#۲?FoO2zP dj|aW{8cUF:&x0?F?F?F_?'qoO 0Yigull4aj?N~f0co+ fEeޖe?F<'?F@]?FwF%ޕol}dQ3ll BXlﷺm|bBhaUeߔ`ba`?FV ?FTh oflPIϮ3qoikeðl8` |GT?heFnF$OĒ?F o* leowky{ftGg˂a,8b1;MHD: )&# ;h0>Th]]9 PB IAUFpX?F&U?F+\(?F~@ t?P t`  (TJ?t[uᯜ(=u -ZrqKZu Mauo8*>    PEJƱZVJp!?&. ?Az@b_bbe!Y(݉e 6 ,p )l l w+ g?Z-#U!׷!ze Ae [+j/|//-g#+ s"?(a40 9 .*2!#{0:#t8'tϻ:Ua L2$18K;F?VEK.; t>B4AA0`?CopyrigTt (c)@2`09@M@c@oKs@fBAr@Qa@i@n.@ @l/P Hs3RePe@vEPd'P2hb<M#l1# M/P?]UhzAQ1 L@3FMbX9?F4ҫP dj}ah8cUF[~K?F|D?F+Xc?F&PRcGoO 0YiM+FllZaj}0%|f0c̭ fEe.V?Fȿ̅)`oSt?F?(X:olGGTll .Xl{e{|rww@haUe:&x0?F%Clw?FAz?F 1vņflux1qoiFA(]`t߼ |~Th]]9 PB IAUFpX?F*g?F+\(?F~@ t?P t`  (TJ?t4%u -ZrqKZu Mauo8+>    PEJƱZVJp!?&. ?Az@b_bbe!Y(݉e 6 ,p )l l w+ g?Z-#U!׷!ze Ae [+j/|//-g#+ s"?(a40 9 .*2!#{0:#t8'tϻ:Ua L2$18K;F?VEK.; t>B4AA0`?CopyrigTt (c)@2`09@M@c@oKs@fBAr@Qa@i@n.@ @l/P Hs3RePe@vEPd'P2hb<M#l1# M/P?]UhzAQ1 L@3FMbX9?F4ҫP dj}ah8cUF[~K?F|D?F+Xc?F&PRcGoO 0YiM+FllZaj}0%|f0c̭ fEe.V?Fȿ̅)`oSt?F?(X:olGGTll .Xl{e{|rww@haUe:&x0?F%Clw?FAz?F 1vņflux1qoiFA(]`t߼ |~Th]]9 PB IAUF$?Fy}?F+\(?F-ku?P t`  p&k?tl    PEJڍU[\/(?&. ?AbbY!z@Ae bbCt c(a4b  # b(h#{ h#΍t(tI*#0f88vb.#i L5<1v(&$7/(D%"6/5;0( t.*B#11`?CopyrigTt] c).@2`W09.@M&@c$@os@f,BAr @YAa,@i@n}..@ Alt@U $HsxBeL@e$@v@dl@"h<M#l# Mt@?]UhzAA!J@#FMbX9?F$x0OԛP dj}תa4(SUF[~K?FE?FSc?F?#oO 0YM+Fӱ\*aj,.}a$?# XEU"n?F +?FbEd?F v<olTBJ\LMYBltavXlp@XEUv/?Fpvw;pg?F $hއ?FLvo~l/AD0qYJg ַBl)$ԞXlDB:XU^F.fّPKk^Rl_[Ei?1GAf`d?Sq+abvHD: )&# ;h0>Th]]9 PB IAUF$?FQ?F+\(?F-ku?P t`  p&k?t u -+CmoLZu Mauo8->    PEJڍU[\/(?&. ?AbbY!z@Ae bbCt c(a4b  # b(h#{ h#΍t(tI*#0f88vb.#i L5<1v(&$7/(D%"6/5;0( t.*B#11`?CopyrigTt] c).@2`W09.@M&@c$@os@f,BAr @YAa,@i@n}..@ Alt@U $HsxBeL@e$@v@dl@"h<M#l# Mt@?]UhzAA!J@#FMbX9?F%x0OԛP dj}תa4(SUF[~K?FE?Fdc?F,?#oO 0YM+Fӱ\*aj ,.}a?# XEU"n?F{ +?F(%/Nd?F<olTBJ\MYBlU{XlpDc@XEUv/vw;pg?F^$hއ?Fvol/AD0qYf ַBls$ԞXl+?B:XU^F.fّPyKk^Rl_[Ei1GAf`dSOq+a,(bvHD: )&# T=h0>Th]]9 Q BUFt[@#?FX J?F\HRq?FƯK7?P Bt`  =0[?t@'#ll$nZBu duo7.>JA]^'? ?Az@b ( 6f  )ϓb= + _? ' 88o?z A s +=  /)/;/M$[+["-j!Q U1[!x(ar4 )] "'}#{t }#BtNQ8t_:Ba~ PLH2!B8;N;Q6B?5;N; tt>8VAVA `?CopyrigTt (c)@2`09@M@c@o%s}@fB|Ar@Aua@i}@n.@U 0l@ HsBUe@e@v@d@ 2h<M#l# MT@?4>UhuQuQtA(@QH1j LZFهgϰ 7j bL\GB] 6S-fMB:MbPdc mdb`GeGEWe$oˏZ\ai?bvIGeU@Vϰ+@_?F23Th]]9 Q BUF偃?F?F\HRq?FƯK7?P Bt`  Xkw?t[I2#ll$nZVBu auo7/>JA]^'? ?Az@b ( 6f  )ϓb= + _? ' 88o?z A s +=  /)/;/M$[+["-j!Q U1[!x(ar4 )] "'}#{t }#BtNQ8t_:Ba~ PLH2!B8;N;Q6B?5;N; tt>8VAVA `?CopyrigTt (c)@2`09@M@c@o%s}@fB|Ar@Aua@i}@n.@U 0l@ HsBUe@e@v@d@ 2h<M#l# MT@?4>UhuQuQtA(@QH1j LZFهgϰ 7j bL\] 6-fMB:MbPdc Ɵmdb`GeGEWe$oZ\aibvIGeUWeVϰ+@?Vpk_xu j aGe@We_ooh2Kt]u3rA 4s2HD: )&# T=h0>Th]]9 Q BUF͜?Fil8?F\HRq?FƯK7?P Bt`  W׎?tt^ #ll$nZVBu acuo7aJ?JA]^'?X ?Az@bu ( 6  )b= + ? ' 88?z A  +=  /)/;/M$[+["-j!Q U1[!x(a4 ) ."'}#{ :}#BtQ8t_:B@a~ LH2!B8;N;6B?5;N; Ktt>8aVAVA `?CopyrigTt (c)@2`09@M@c@os}@fB|Ar@Aa@i}@n.@ 0l@ HsBe@e@v@d@2hX<M#l # M@?4>UhuQuQtA@QH1j LZFه3 7j bL\`] 6-fMB:MbPdc gmdb`GeGEWenZ\aiavIGeUWeVϰ+@?Vpk_xu j aGeWe_ooh2K]u3rA 4s2HD: )&# T=h0>Th]]9 Q BUF[@#"?F`>?Fo?FƯK7?P Bt`  ?tS'8Xj$nZVBu auo71>JA]^'? ?Az@b ( 6f  )ϓb= + _? ' 88o?z A s +=  /)/;/M$[+["-j!Q U1[!x(ar4 )] "'}#{t }#BtNQ8t_:Ba~ PLH2!B8;N;Q6B?5;N; tt>8VAVA `?CopyrigTt>(cu)>2`09>uM@c@os}@IfB|Ar@Aa@]i}@n.> 0Ul@ HsBe@e@v@d@B2h,<M#l# M@?U4>UhuQuQtAJ@QH1j LZFهϰ 7j bL\] 6-fMB:ʿMbPdc 2Ngodb`GeGEWe$oZ\aibvIGeUWe4?Vpku j aGeWe_ooh2K]u3rA 4s2HD: )&# ;h0>Th]]9 PB IAUFv?Fo]j?FQ?F?P t`  Smo?tT.qČ( 뾅@l* 9u duo82>   ] EJ]_DRb(?&. ?Az@bY(e 6f h )ϓb k+ _?Z'e 88o?ze Ae s[+ h g/y//$# L:5 /sU"K1*a4 9 2(E#{ #t8tϤ:U a L2T18K;6Ԉ?@EK; tͺ>B40AA0`?CopyrigTwt c)@]2`09@M@]c@os@fBRAr@Aa@i@Wn.@ 0lPU HsRe@e@v/PdP2h<M#l#A MP? <>Uh$QQ(]A(Q1 LZ3;'xP 7jbbFb7@c 6{fM8cUFnDdTh]]9 PB IAUFSQ=?F{3?FQ?F?P t`  4o?tPǭ\"Č( 뾅@l* 9u auo83>   ] EJ]_DRb(?&. ?Az@bY(e 6f h )ϓb k+ _?Z'e 88o?ze Ae s[+ h g/y//$# L:5 /sU K1(a4 9 2(E#{ #t8tϤ:U a L2T18K;6Ԉ?@EK; tͺ>B40AA0`?CopyrigTwt c)@]2`09@M@]c@os@fBRAr@Aa@i@Wn.@ 0lPU HsRe@e@v/PdP2h<M#l#A MP? <>Uh$QQ(]A(Q1 LZ3;'xP 7jbbFbL7a 6{fM8(a?nDFF!@~qWoO 0dTc I\rdcd bjUkgaPbeEef"v?Fr6d?F?kĀqo`OivddlXr`tF0sK|٢jr3t!KU:G}?{]rLl} ]|q3xnTrg?F<ے7pooZgtfio,V|=] 5ve&1?Fpn?FTh]]9 PB IAUF!?F?FQ?F?P t`  + {o?tn&Č( 뾅@l* 9u auo84>   ] EJ]_DRb(?&. ?Az@bY(e 6f h )ϓb k+ _?Z'e 88o?ze Ae s[+ h g/y//$# L:5 /sU"K1*a4 9 2(E#{ #t8tϤ:U a L2T18K;6Ԉ?@EK; tͺ>B40AA0`?CopyrigTwt c)@]2`09@M@]c@os@fBRAr@Aa@i@Wn.@ 0lPU HsRe@e@v/PdP2h<M#l#A MP? <>Uh$QQ(]A(Q1 LZ3;'xP 7jbbFbL7a 6{fM8cUFnDFK!@?FDqWoO 0dTc I\addbjUWkgabeE(aFjv?Fr6d?FkĀAqo`Oi^j^r`t0sK#|pr9t!KU:M}{]rYLl}]|9xLufTrg?F<7vodObdcZgtfi,V#|=c ;vLu&1?Fpn?FTh]]9 PB IAUFĊ?F)lIJ?FQ?F?P t`  o?t\Č( 뾅@l* 9u auo85>   ] EJ]_DRb(?&. ?Az@bY(e 6f h )ϓb k+ _?Z'e 88o?ze Ae s[+ h g/y//$# L:5 /sU"K1*a4 9 2(E#{ #t8tϤ:U a L2T18K;6Ԉ?@EK; tͺ>B40AA0`?CopyrigTwt c)@]2`09@M@]c@os@fBRAr@Aa@i@Wn.@ 0lPU HsRe@e@v/PdP2h<M#l#A MP? <>Uh$QQ(]A(Q1 LZ3;'xP 7jbbFbL7a 6{fM8(a?D +F2|>?FW@WoO 0dTc rdcd bj{^ea͂beEef"v?Fddd?F܃q o`OivddlXr`tuqEÊ|:jr3t!KU:G}{]r,Ll}]|q3xngqrg?F~pooZgtML|\ 5ve&1?FWf?F#0bĂ|[m낪 z'ǵ(|F$$!ayTh]]9 Q BUFt[@#?Fyوe??F\HRq?F#o˯Z?P Bt`  =0[?t&0N#ll#Bu duo76>JTA0f88v?WYϳAjL@z bbb!'  ,% )! ! ,+ <?' o?z #a47% =) J"E#K#{% (+ ( t#B2?A> &?A"&"I&$/*%,"t:"Kt> A A6 `?CopyrigTt (c)D@2`09D@M<@c:@oKs4@fBB3Ar6@oAaB@i4@n.D@ Al@ :HsBeb@e:@vZ@d@2h/1<M#bl#7? M@?(>Uh7E +A(A!{ L:F A<6j% bRzg9ǩ<2 "\MbPdS mT W5U6ik0YaYvq 2HD: )&# T=h0>Th]]9 Q BUF偃?F?P?F\HRq?Fn˯Z?P Bt`  Xkw?tGj#ll#VBu auo77>JTA0f88v?WYϳAjLv@z bbb!' 6Y % )! ! ,+ <?' o?z #a47% =) J"#K#({% (+ (G t#Beı?A>L ?A"&"&$/*8%,"t:"t>  A A6 `?CopyrigTt _(c)D@2`W09D@M<@c:@os4@fBB3Ar6@oAaB@i4@n}.D@ Al@U :HsBeb@e:@v@d@2h/1<M#l#@7? M@?(>Uh7E P+AA!{ L:F<6j% bRzg9ǩ<2 "\MbPdS mT W5UO6ik0YaYvq 2HD: )&# T=h0>Th]]9 Q BUF͜?F:'?F\HRq?Fn˯Z?P Bt`  W׎?ts aU#ll#VBu auo78>JTA0f88v?WYϳAjLv@z bbb!' 6Y % )! ! ,+ <?' o?z #a47% =) J"#K#({% (+ (G t#Beı?A>L ?A"&"&$/*8%,"t:"t>  A A6 `?CopyrigTt _(c)D@2`W09D@M<@c:@os4@fBB3Ar6@oAaB@i4@n}.D@ Al@U :HsBeb@e:@v@d@2h/1<M#l#@7? M@?(>Uh7E P+AA!{ L:F6j% bT ü2 "\MbPdS mT W5UO6ik0YaYvq 2HD: )&# T=h0>Th]]9 Q BUF[@#"?F.޺?Fo?Fn˯Z?P Bt`  ?tchE 8Xj#VBu auo79>JTA0f88v?WYϳAjLv@z bbb!' 6Y % )! ! ,+ <?' o?z #a47% =) J"#K#({% (+ (G t#Beı?A>L ?A"&"&$/*8%,"t:"t>  A A6 `?CopyrigTt>(c)>2`09>M<@c:@o%s4@fBB3Ar6@oAuaB@i4@n.>_ Al@ :HUsBeb@e:@v@-d@2`h/1<M#l1#7? PM@?(>Uh7E +AA!{ L:F<F6j% bRzg9<2 "\ʩ7MbPdS 2NoT W5UO?6ik0YaYvq 2UHT5D B &# T>h0JTtiiM ENUF|u(?F-?FPaW?F ru?P WNtQ` 786R7?tU溁i8;(\I2-nNuj mu){:E0f88wv?AVQ Ӎ!@#b#m@b  0#  #W(3 (o+&T" #t#NĽ/?. ?M"&$a%$$,/>*L%i,T"tΞ*T"tR.R11#`?Copyrig`t (c)02l090M0c0os0f21r01a0i0n.0 Al@ 8sBe0e0v0@d@"Vt!X#xB #/ @T,UtE1UA-!@nZ pvm?S׮(%UQG@Y9"S\FkV5%UЗn@‹Pهϰ=\WQS\OXbiTQY%_7_I_[_DQOOOO__^̔ԍ?f Q́)f@vVP|?5^BlӿIY:6Xl?Ib ʏ܏EABTfxF\HRqCk?-q_WaW?ϰ+@"Bl!H,BXQnB)fFޖBoT[inJ\ۙgyAHD: )&# T=h0>Th]]9 Q BUF4@ ,?FYوe?^?F&1?F3(e?P Bt`  N!?tx/m4tMuVBu auo7;>JAS`? ?Az@bbb! ( 6 )  '+ ߈?/I-Z#mg!z A " ././@/R+,g/y'sU1,ar4 )] "#{t #BtNg8tu:Ba PL^2!X8;d;Q6X?E;d; t͊>8lAlA `?CopyrigTt (c)@2`09@M@c@o%s@fBAr@Aua@i@n.@U 0l@ HsBUe@e@v@d@ 2h<M#l# MT@?4>UhQQA(VQ^1| LZFX} g 7j b2ɶO] 6CfMB:ZMbXdc $I$I܍+d 2aa]Eme'la'iL.]eU@9/$Ew?FslAka,dHޓּgmeoo(oh2gyu3rA 4s2HD: )&# T=h0>Th]]9 Q BUFnټk-?F]u)?F&1?F3(e?P Bt`  3?tg[ m4tMuVBu auo7<>JAS`? ?Az@bbb! ( 6 )  '+ ߈?/I-Z#z A* " ././@/R+Yg/y'sU1,a4 ) "#{ #BtΜg8tu:Ba L^2!X8;d;6X?E;.d; t͊>8lAlA `?CopyrigTt (c)@2`09@M@c@oKs@fBAr@Aa@i@n.@ 0l@ HsBe@e@v@d@2hb<M#l1# M@?4>UhQQPAVQ^1| LZFX}  7j b2/O]M 6CfMB:ZMbX͉dc ?$I$I܍+d 2aa]Emekla'i.]eU@;0$E?FTh]]9 Q BUFܲ?FH7H?F!?FTUUUɺ?P Bt`  &?tQ3ņ= ףc?ckBu5 auon7=>)JAUVJp? ?Awz@b_bb! (݉ " '+ ' qq?z A  +S/.&68))E! '+ g?uW-sUUn*a4 8) "u#{ u#D!tΜ?8tM:Bav L62?!08;<;60?5;.<; tb>{8DADA1 `?CopyrigTt (c){@2`09{@Ms@cq@oKsk@fyBjArm@Aay@ik@n.{@ 0l@ qHsBe@eq@v@d@n2hb<M#l1# M@? Uh,cQcQ] '1PbA.Q61b L`w"?FMDA dj? |m cuۡ'1 6!1fM{":F$3<6d kS=ѣ<n2{!5E[nLa vogaKeE[eV_@cbc g@]amr\GDž}iNu?FS%f|]tElFTh]]9 Q BUF:C<?FY!)^?F!?FTUUUɺ?P Bt`  Ȼ?ޫd?ttyI5= ףc?ckBu5 auon7>>)JAUVJp? ?Awz@b_bb! (݉ " '+ ' qq?z A  +S/.&68))E! '+ g?uW-sUUn*a4 8) "u#{ u#D!tΜ?8tM:Bav L62?!08;<;60?5;.<; tb>{8DADA1 `?CopyrigTt (c){@2`09{@Ms@cq@oKsk@fyBjArm@Aay@ik@n.{@ 0l@ qHsBe@eq@v@d@n2hb<M#l1# M@? Uh,cQcQ] '1PbA.Q61b L`w"?FMDA dj? |m cuۡ'1 6!1fM{":F$3<6d kS=ѣ<n2{!5E[nLa vogaKeE[eV_@cbc g@]amr\GDž}iNu?FS%f|]tElFTh]]9 Q BUFx]?FTy4߮?FU?F3z[j˺?P Bt`  Q`7lg?t-xW&XRkBu5 auon7?>)JAa^! ?Az@b_bb! (݉ 6 , )  '+ g~X?ZI-Z#!g![z A " ,/,/>/P-[?+g/y'Eي,a4 ) "!#{ #Btg8t)u:Ba L^2!*X8;d;6X?E;d; tR>8lAlA `?CopyrigTt (c)@2`09@M@c@os@fBAr@Aa@i@n.@ 0l@ HsBe@ej@v@d@2!h<M#l# M@? 8>UhoQQ]AVQ^1| L@?F҄! dj O٨l,d6ztJB: 7d /k?yԷ&Dh]EWemkp+ɉ 5d}bGeUWeFBϏ+@ol0ju?MDhnMbX0j`QDhnC<o+o"Th]]9 Q BUFT1#E2?FLΉ?FU?F3z[j˺?P Bt`  S|.?tп}Qm&XRkBu5 auon7@>)JAa^!? ?Az@b_bb! (݉ 6 , )  '+ g~X?ZI-Z#!g![z A " ,/,/>/P-[?+g/y'Eي,a4 ) "!#{ #Btg8t)u:Ba L^2!*X8;d;6X?E;d; tR>8lAlA `?CopyrigTt (c)@2`09@M@c@os@fBAr@Aa@i@n.@ 0l@ HsBe@ej@v@d@2!h<M#l# M@? 8>UhoQQ]AVQ^1| L?F҄! dj _٨la6ztJB: 7d /k?yԷ&Dh]EWemkp+ɉ 5d}bGeUWeFBϏ+@ol0ju?MDhnHMbX0j`DhWef9o-ai qGeUoo$o6o3UHLD )&# R>h 0JTlaaM>UF+1#E2?F* 9?F0U?F_gw?P >tA`  9|4?t +5f1,_Œ X">u eu!s7-AJɅJUU?H<j@7tt7 b:*H&tH  th*Nĵ) ?MB"b;)H&e8H m 3 H -US4o?FPjj?6ֈ@!QpoĬگ eoĩ́ F o 01G}}϶@b@mp?#nʼPV]O߸uy\l2_+ʦE}@p5"iРo~e*al|ٍşןHD: )&# ;h0>Th]]9 PB IAUF~Q?F?FF(e?F=PFo?P t`  IGr?tn 5q+ tЖZu Mauo8B>    PEJƱZVJp!?&. ?Az@b_bbe!Y(݉e 6 ,p )l l w+ g?Z-#U!׷!ze Ae [+j/|//-g#+ As"(a40 9 *2!#{0#΍t8tIϻ:AUa L2$18K;F?VEK; Kt>B4aAA0`?CopyrigTt (c)@2`09@M@c@os@fBAr@Qa@i@n.@ @l/P Hs3RePe@vEPd'P2hX<M#l # M/P?]UhzA(Q1 L@3FO#?Fo2zP dj|aW{8cUF:&x0?F?Fώ?FX'qoO 0Yifull4ajS?N~dh+ fEeޖe?F<'?F@]?F\F%ޕol}dQ3ll BXlm|ԇbBhaUe```?FV ?FTh oflPIϮ3qoijeðl.` |=T?heFnF$OĒ?F o* leowky{ftGg˂a,8b1;MHD: )&# ;h0>Th]]9 PB IAUF~Q?Fe?FF(e?F=PFo?P t`  IGr?t8n5q+ tЖZu Mauo8C>    PEJƱZVJp!?&. ?Az@b_bbe!Y(݉e 6 ,p )l l w+ g?Z-#U!׷!ze Ae [+j/|//-g#+ As"(a40 9 *2!#{0#΍t8tIϻ:AUa L2$18K;F?VEK; Kt>B4aAA0`?CopyrigTt (c)@2`09@M@c@os@fBAr@Qa@i@n.@ @l/P Hs3RePe@vEPd'P2hX<M#l # M/P?]UhzA(Q1 L@3FO#?Fo2zP dj|a{8cUF:&x0?F?FȎ?F'qoO 0Yifull4ajK?N~d+ fEeޖe?F<'?F@]?FsF%ޕol}dQ3ll BXlRm|bBhaUe`Ԗ``?FV ?FTh oflPIϮ3qoiGkeðl2` |FT?heFnF*OĒ?F o* leowkyftGg˂a,8b1;MHD: )&# T=h0>Th]]9 Q BUF&d29?F `0?F(\µ?F^Sӻ?P Bt`  o?tՐs] yAP FBuj auo7ARJ[0U?<ALv@tt7 }b6*D&tD  td*JBı) ?A>"b7)D&a4D  V3 D 8/td/tRy.x1x1`?CopyrigTt (c)02`090M0c0os0f21r01a0i0n.0 Al0 8s2e0e0v @d0"h0!<M#l#/MlP> @?UhLAA]R 1 P1"7Ж1bA>!J`oO2z[ djaq:z׍ QM 6VMB@챝P\`_m;?E,Ui5Uޖ?FQ[_5lf\VU'EUY2Ruݯ?FU]˖7sf\pdѿSYUWfe /zx]݅om{ZcXXUFRP\g[)JXAp| rhQ#5 ?IXAYaLPւ?FتcP\T f\3$sXAp" ElX1YA-k?FFoZP\fyf\?!,UAYX%?F8K7P\{ f\&SAY&1l?Fr>|]P\-]qiYip~ X1#g~jt-? ѰXAY?F ףp=P\' f\?P=RwX'RXT}7vP\__/vnTpډlX7U/$9SYE7Ժf\wUQY|>îqh|P\4GN?"MX U29_KU#rA0$@CHD: )&# T=h0>Th]]9 Q BUFܷqP?F@?F7??FtYOȻ?P Bt`  XM6 ?ti|9O__Bu aquo7E>KJASTfw?X ?Az@bbb! ( 6f )  '+ ߈?I-Z#g!z A Qu +%/,8)E!'+ k(?r+o,(a4 8) E"#{ #BtΜG8tU:Ba L>2?!88;D;68?5;.D; tj>8LALA1 `?CopyrigTt (c)@2`09@M{@cy@oKss@fBrAru@Aa@is@n.@ 0l@ yHsBe@ey@v@d@v2hb<M#l1# M@?4>UhkQkQPjA6Q>1| L`Fd~j]J}v)M]M 6)fMB@Wa\ k_稇avCe=ESeR9Y? l`gESeFeabkod iSQޗh~??5@+ c;Ꞗi"2_U3rA/04sCHD: )&# T=h0>Th]]9 Q BUFdHJAUVJp? ?Awz@bbb! ( " '+ ' qq?z A 9 +/.&68))E! '+ g?W-sU"!n*a4 8) "u#{ u#D!t?8tM:Bav L62?!08;<;60?5u;<; tb>){8DADA1 `?CopyrigTt (c){@]2`09{@Ms@]cq@osk@fyBRjArm@Aay@ik@Wn.{@ 0l@U qHsBe@eq@v@d@n2h<M#l# M@?]UhzbA.Q61b L` djM"aQ  6fM{"@Ftnq\[3K_,7e5EGe<\ֽE?Nf[ N_qd7eEGec6i\m jOf܋h2_U3rA'04msCHD: )&# T=h0>Th]]9 Q BUF)8/?FFT?F!/$?Fᱝ̻?P Bt`  vNC?tG(ϛT H0Bu auo7G>JTA0f88v?WYALv@z bbb!'n 6 % )! ! ,+ <?' o?z #a47% =) J""#K#{% (+ ( tQ#BXYщ?A> ?A "+(/+/=/O/a!y:5rl/@ sU10( /???? "?0<2?M{!b 1/Ĝ/*%,"t:"t>TQQ'@`?CopyrigTt (c)FP2`09FPM>PcTh]]9 PB IAUFJ1>?Fl?FF(e?F=PFo?P t`  0!?trj5q+ tЖZu Mauo8A    PEJƱZVJp!?&. ?Az@b_bbe!Y(݉e 6 ,p )l l w+ g?Z-#U!׷!ze Ae [+j/|//-g#+ s"?(a40 9 .*2!#{0:#t8'tϻ:Ua L2$18K;F?VEK.; t>B4AA0`?CopyrigTt (c)@2`09@M@c@oKs@fBAr@Qa@i@n.@ @l/P Hs3RePe@vEPd'P2hb<M#l1# M/P?]UhzAQ1 L@3FO#۲?Fo2zP dj|aW{ޓ8cUF:&x0?F?F?F_'qoO 0Yigull4aj?ON~f0c?o+ fEeޖe?F<'?F@]?FwF%ޕol}dQ3ll BXl귺m|bBhaUe`ba`?FV ?FTh oflPI3qoiؿkeðl8` |G?T?heFnF$O?F o* leowky?{ft?Gg˂a,8b1;MHD: )&# ;h0>Th]]9 PB IAUFGF?FEs?F+\(?F~@ t?P t`  9`Ik?t)6^u -ZrqKZu Mauo8I>    PEJƱZVJp!?&. ?Az@b_bbe!Y(݉e 6 ,p )l l w+ g?Z-#U!׷!ze Ae [+j/|//-g#+ s"?(a40 9 .*2!#{0:#t8'tϻ:Ua L2$18K;F?VEK.; t>B4AA0`?CopyrigTt (c)@2`09@M@c@oKs@fBAr@Qa@i@n.@ @l/P Hs3RePe@vEPd'P2hb<M#l1# M/P?]UhzAQ1 L@3FMbX9?F4ҫP dj}ah8cUF[~K?F|D?F+Xc?F&PRcGoO 0YiM+FllZaj}0%|f0c̭ fEe.V?Fȿ̅)`oSt?F?(X:olGGTll .Xl{e{|?rww@,eaUe:&x0?F%Clw?FAz?F 1vņflux1,oiFA(]`t߼ |~Th]]9 PB IAUFGF?FKDֳ?F+\(?F~@ t?P t`  9`Ik?t┯4;u -ZrqKZu Mauo8J>    PEJƱZVJp!?&. ?Az@b_bbe!Y(݉e 6 ,p )l l w+ g?Z-#U!׷!ze Ae [+j/|//-g#+ s"?(a40 9 .*2!#{0:#t8'tϻ:Ua L2$18K;F?VEK.; t>B4AA0`?CopyrigTt (c)@2`09@M@c@oKs@fBAr@Qa@i@n.@ @l/P Hs3RePe@vEPd'P2hb<M#l1# M/P?]UhzAQ1 L@3FMbX9?F4ҫP dj}ah8cUF[~K?F|D?F+Xc?F&PRcGoO 0YiM+FllZaj}0%|f0c̭ fEe.V?Fȿ̅)`oSt?F?(X:olGGTll .Xl{e{|rww@haUe:&x0?F%Clw?FAz?F 1vņflux1qoiFA(]`t߼ |~Th]]9 PB IAUFXIn?F    PEJڍU[\/(?&. ?AbbY!z@Ae bbCt c(a4b  # b(h#{ h#΍t(tI*#0f88vb.#i L5<1v(&$7/(D%"6/5;0( t.*B#11`?CopyrigTt] c).@2`W09.@M&@c$@os@f,BAr @YAa,@i@n}..@ Alt@U $HsxBeL@e$@v@dl@"h<M#l# Mt@?]UhzAA!J@#FMbX9?F$x0OԛP dj}תa4(SUF[~K?FE?FSc?F?#oO 0YM+Fӱ\*aj,.}a$?# XEU"n?F +?FbEd?F v<olTBJ\LMYBltavXlp@,UEUv/?Fpvw;pg?F $hއ?FLvo~l/AD0,YJg ַBl)$ԞXlDB:XU^F.fّPKk^Rl_[Ei?1GAf`d?Sq+a,(bvHD: )&# ;h0>Th]]9 PB IAUFXIn?F(?F+\(?F-ku?P t`  -k?tEeoEu -+CmoLZu Mauo8L>    PEJڍU[\/(?&. ?AbbY!z@Ae bbCt c(a4b  # b(h#{ h#΍t(tI*#0f88vb.#i L5<1v(&$7/(D%"6/5;0( t.*B#11`?CopyrigTt] c).@2`W09.@M&@c$@os@f,BAr @YAa,@i@n}..@ Alt@U $HsxBeL@e$@v@dl@"h<M#l# Mt@?]UhzAA!J@#FMbX9?F%x0OԛP dj}תa4(SUF[~K?FE?Fdc?F,?#oO 0YM+Fӱ\*aj ,.}a?# XEU"n?F{ +?F(%/Nd?F<olTBJ\MYBlU{XlpDc@XEUv/vw;pg?F^$hއ?Fvol/AD0qYf ַBls$ԞXl+?B:XU^F.fّPyKk^Rl_[Ei1GAf`dSOq+a,(bvHD: )&# T=h0>Th]]9 Q BUF贷F$?Ft\@?F\HRq?FƯK7?P Bt`  x#?t#ll$nZVBu auo7M>JA]^'? ?Az@b ( 6f  )ϓb= + _? ' 88o?z A s +=  /)/;/M$[+["-j!Q U1[!x(ar4 )] "'}#{t }#BtNQ8t_:Ba~ PLH2!B8;N;Q6B?5;N; tt>8VAVA `?CopyrigTt (c)@2`09@M@c@o%s}@fB|Ar@Aua@i}@n.@U 0l@ HsBUe@e@v@d@ 2h<M#l# MT@?4>UhuQuQtA(@QH1j LZFهgϰ 7j bL\G@[ 6S-fMB:MbPdc mdb`GeGEWe$oˏZ\ai?bvIGeU@Vϰ+@_?F23Th]]9 Q BUF#/?FKGY?F\HRq?FƯK7?P Bt`  ~B?tl#ll$nZVBu auo7N>JA]^'? ?Az@b ( 6f  )ϓb= + _? ' 88o?z A s +=  /)/;/M$[+["-j!Q U1[!x(ar4 )] "'}#{t }#BtNQ8t_:Ba~ PLH2!B8;N;Q6B?5;N; tt>8VAVA `?CopyrigTt (c)@2`09@M@c@o%s}@fB|Ar@Aua@i}@n.@U 0l@ HsBUe@e@v@d@ 2h<M#l# MT@?4>UhuQuQtA(@QH1j LZFهgϰ 7j bL\] 6-fMB:MbPdc Ɵmdb`GeGEWe$oZ\aibvIGeUWeVϰ+@?Vpk_xu j aGe@We_ooh2Kt]u3rA 4s2HD: )&# T=h0>Th]]9 Q BUF/ ֛9?F46Hr?F\HRq?FƯK7?P Bt`   a1?t 3JA]^'? ?Az@b ( 6f  )ϓb= + _? ' 88o?z A s +=  /)/;/M$[+["-j!Q U1[!x(ar4 )] "'}#{t }#BtNQ8t_:Ba~ PLH2!B8;N;Q6B?5;N; tt>8VAVA `?CopyrigTt (c)@2`09@M@c@o%s}@fB|Ar@Aua@i}@n.@U 0l@ HsBUe@e@v@d@ 2h<M#l# MT@?4>UhuQuQtA(@QH1j LZFهgϰ 7j bL\] 6-fMB:MbPdc Ɵmdb`GeGEWenZ\aiavIGeUWeVϰ+@?Vpk_xu j aGe@We_ooh2Kt]u3rA 4s2HD: )&# T=h0>Th]]9 Q BUF[@#?FLO?Fo?FƯK7?P Bt`  ?tSӈEw8Xj$nZVBu acuo7AJA]^'? ?Az@b ( 6f  )ϓb= + _? ' 88o?z A s +=  /)/;/M$[+["-j!Q U1[!x(ar4 )] "'}#{t }#BtNQ8t_:Ba~ PLH2!B8;N;Q6B?5;N; tt>8VAVA `?CopyrigTt>(cu)>2`09>uM@c@os}@IfB|Ar@Aa@]i}@n.> 0Ul@ HsBe@e@v@d@B2h,<M#l# M@?U4>UhuQuQtAJ@QH1j LZFهϰ 7j bL\ѻ] 6-fMB:ʩMbPdc 2N3odb`GeGEWe$oZ\aibvIGeUWe4?Vpku j aGeWe_ooh2K]u3rA 4s2HD: )&# ;h0>Th]]9 PB IAUF?FtË.5?FQ?F?P t`  ?{R   ] EJ]_DRb(?&. ?Az@bY(e 6f h )ϓb k+ _?Z'e 88o?ze Ae s[+ h g/y//$# L:5 /sU"K1*a4 9 2(E#{ #t8tϤ:U a L2T18K;6Ԉ?@EK; tͺ>B40AA0`?CopyrigTwt c)@]2`09@M@]c@os@fBRAr@Aa@i@Wn.@ 0lPU HsRe@e@v/PdP2h<M#l#A MP? <>Uh$QQ(]A(Q1 LZ3;'xP 7jbbFbL7a 6{fM8cUFnDdTh]]9 PB IAUF4{"?F=N?FQ?F?P t`  ^}W;o?t9pČ( 뾅@l* 9u auo8R>   ] EJ]_DRb(?&. ?Az@bY(e 6f h )ϓb k+ _?Z'e 88o?ze Ae s[+ h g/y//$# L:5 /sU"K1*a4 9 2(E#{ #t8tϤ:U a L2T18K;6Ԉ?@EK; tͺ>B40AA0`?CopyrigTwt c)@]2`09@M@]c@os@fBRAr@Aa@i@Wn.@ 0lPU HsRe@e@v/PdP2h<M#l#A MP? <>Uh$QQ(]A(Q1 LZ3;'xP 7jbbFbL7a 6{fM8(a?nDFF!@~qWoO 0dTc I\rdcd bjUkgaPbeEef"v?Fr6d?F?kĀqo`OivddlXr`tF0sK|٢jr3t!KU:G}?{]rLl} ]|q3xnTrg?F<ے7pooZgtfio,V|=] 5ve&1?Fpn?FTh]]9 PB IAUFGC<-?Ff?FQ?F?P t`  L?RD)o?t? )Č( 뾅@l* 9u auo8S>   ] EJ]_DRb(?&. ?Az@bY(e 6f h )ϓb k+ _?Z'e 88o?ze Ae s[+ h g/y//$# L:5 /sU"K1*a4 9 2(E#{ #t8tϤ:U a L2T18K;6Ԉ?@EK; tͺ>B40AA0`?CopyrigTwt c)@]2`09@M@]c@os@fBRAr@Aa@i@Wn.@ 0lPU HsRe@e@v/PdP2h<M#l#A MP? <>Uh$QQ(]A(Q1 LZ3;'xP 7jbbFbL7a 6{fM8cUFnDFK!@?FDqWoO 0dTc I\addbjUWkgabeE(aFjv?Fr6d?FkĀAqo`Oi^j^r`t0sK#|pr9t!KU:M}{]rYLl}]|9xLufTrg?F<7vodObdcZgtfi,V#|=c ;vLu&1?Fpn?FTh]]9 PB IAUFĊ?Fn6b?FQ?F?P t`  ro?tpwZČ( 뾅@l* 9u auo8T>   ] EJ]_DRb(?&. ?Az@bY(e 6f h )ϓb k+ _?Z'e 88o?ze Ae s[+ h g/y//$# L:5 /sU"K1*a4 9 2(E#{ #t8tϤ:U a L2T18K;6Ԉ?@EK; tͺ>B40AA0`?CopyrigTwt c)@]2`09@M@]c@os@fBRAr@Aa@i@Wn.@ 0lPU HsRe@e@v/PdP2h<M#l#A MP? <>Uh$QQ(]A(Q1 LZ3;'xP 7jbbFbL7a 6{fM8(a?D +F2|>?FW@WoO 0dTc rdcd bj{^ea͂beEef"v?Fddd?F܃q o`OivddlXr`tuqEÊ|:jr3t!KU:G}{]r,Ll}]|q3xngqrg?F~pooZgtML|\ 5ve&1?FWf?F#0bĂ|[m낪 z'ǵ(|F$$!ayTh]]9 Q BUF贷F$?FlIJ?F\HRq?F#o˯Z?P Bt`  x#?tx#ll#VBu auo7U>JTA0f88v?WYϳAjLv@z bbb!' 6Y % )! ! ,+ <?' o?z #a47% =) J"#K#({% (+ (G t#Beı?A>L ?A"&"&$/*8%,"t:"t>  A A6 `?CopyrigTt _(c)D@2`W09D@M<@c:@os4@fBB3Ar6@oAaB@i4@n}.D@ Al@U :HsBeb@e:@v@d@2h/1<M#l#@7? M@?(>Uh7E P+AA!{ L:F A7<6j% bRzg9<2 "\MbPdS mT W5U6ik0YaYvq 2HD: )&# T=h0>Th]]9 Q BUF#/?F ?F\HRq?Fn˯Z?P Bt`  ~B?t AF#ll#VBu auo7V>JTA0f88v?WYϳAjLv@z bbb!' 6Y % )! ! ,+ <?' o?z #a47% =) J"#K#({% (+ (G t#Beı?A>L ?A"&"&$/*8%,"t:"t>  A A6 `?CopyrigTt _(c)D@2`W09D@M<@c:@os4@fBB3Ar6@oAaB@i4@n}.D@ Al@U :HsBeb@e:@v@d@2h/1<M#l#@7? M@?(>Uh7E P+AA!{ L:F<6j% bRzg9ǩ<2 "\MbPdS mT W5UO6ik0YaYvq 2HD: )&# T=h0>Th]]9 Q BUF/ ֛9?FD?F\HRq?Fn˯Z?P Bt`   a1?t(9Sj#ll#VBu auo7W>JTA0f88v?WYϳAjLv@z bbb!' 6Y % )! ! ,+ <?' o?z #a47% =) J"#K#({% (+ (G t#Beı?A>L ?A"&"&$/*8%,"t:"t>  A A6 `?CopyrigTt _(c)D@2`W09D@M<@c:@os4@fBB3Ar6@oAaB@i4@n}.D@ Al@U :HsBeb@e:@v@d@2h/1<M#l#@7? M@?(>Uh7E P+AA!{ L:F6j% bT ü2 "\MbPdS mT W5UO6ik0YaYvq 2HD: )&# T=h0>Th]]9 Q BUF[@#?F\o]?Fo?Fn˯Z?P Bt`  ?tF]8Xj#VBu auo7X>JTA0f88v?WYϳAjLv@z bbb!' 6Y % )! ! ,+ <?' o?z #a47% =) J"#K#({% (+ (G t#Beı?A>L ?A"&"&$/*8%,"t:"t>  A A6 `?CopyrigTt>(c)>2`09>M<@c:@o%s4@fBB3Ar6@oAuaB@i4@n.>_ Al@ :HUsBeb@e:@v@-d@2`h/1<M#l1#7? PM@?(>Uh7E +AA!{ L:F<F6j% bRzg9<2 "\ʩ7MbPdS 2NoT W5UO?6ik0YaYvq 2UHT5D B &# T>h0JTtiiM ENUFlQ?Fڋ?FPaW?F ru?P WNtQ` c͕.:7?tUr=Ki8;(\I2-nNuj mu){YE0f88wv?AVQ Ӎ!@#b#m@b  0#  #W(3 (o+&T" #t#NĽ/?. ?M"&$a%$$,/>*L%i,T"tΞ*T"tR.R11#`?Copyrig`t (c)02l090M0c0os0f21r01a0i0n.0 Al@ 8sBe0e0v0@d@"Vt!X#xB #/ @T,UtE1UA-!@nZ pvm?S׮(%UQG@Y9"S\FkV5%UЗn@‹Pهϰ=\WQS\OXbiTQY%_7_I_[_DQOOOO__^̔ԍ?f Q́)f@vVP|?5^BlӿIY:6Xl?Ib ʏ܏EABTfxF\HRqCk?-q_WaW?ϰ+@"Bl!H,BXQnB)fFޖBoT[inJ\ۙgyAHD: )&# T=h0>Th]]9 Q BUFhY?FlIJ?F&1?F3(e?P Bt`  6n?t:m4tMuVBu auo7Z>JAS`? ?Az@bbb! ( 6 )  '+ ߈?/I-Z#,z A* " ././@/R+Yg/y'sU1,a4 ) "#{ #BtΜg8tu:Ba L^2!X8;d;6X?E;.d; t͊>8lAlA `?CopyrigTt (c)@2`09@M@c@oKs@fBAr@Aa@i@n.@ 0l@ HsBe@e@v@d@2hb<M#l1# M@?4>UhQQPAVQ^1| LFX}  7j b2ɏOR] 6CfMB:ZMbXdc $I$I܍+d 2aa]Eme'la'iL.]eU@9/$Ew?FslAka,dHޓּgmeoo(oh2gyu3rA 4s2HD: )&# T=h0>Th]]9 Q BUFy?Flo]9?F&1?F3(e?P Bt`  7 ?t)!m4tMuBu ,duo7[>JAS`? ?Az@bbb! ( 6 )  '+ ߈?/I-Z#mg!z A " ././@/R+,g/y'sU1,ar4 )] "#{t #BtNg8tu:Ba PL^2!X8;d;Q6X?E;d; t͊>8lAlA `?CopyrigTt (c)@2`09@M@c@o%s@fBAr@Aua@i@n.@U 0l@ HsBUe@e@v@d@ 2h<M#l# MT@?4>UhQQA(VQ^1| LZFX} g 7j b2ɶO] 6CfMB:ZMbXdc $I$I܍+d A2aa]Emekla'i.G]eU@;0$E?[FTh]]9 Q BUF߹e?F$jߤ?F!?FTUUUɺ?P Bt`  ?t&S= ףc?ckBu5 auon7\>)JAUVJp? ?Awz@b_bb! (݉ " '+ ' qq?z A  +S/.&68))E! '+ g?5W-sUU,p(a4 8) "u#{ u#D!tΜ?8tM:Bav L62?!08;<;60?5;.<; tb>{8DADA1 `?CopyrigTt (c){@2`09{@Ms@cq@oKsk@fyBjArm@Aay@ik@n.{@ 0l@ qHsBe@eq@v@d@n2hb<M#l1# M@? Uh,cQcQ] '1PbA.Q61b L`w"?FMDA dj? |m cuۡ'1 6!1fM{":F$3<6d kS=ѣ<n2{!5E[nLa vogaKeE[eV_@cbc g@]amr\GDž}iNu?FS%f|]tElFTh]]9 Q BUFtx?F/?F!?FTUUUɺ?P Bt`  K_?t= ףc?ckBu5 auon7]>)JAUVJp? ?Awz@b_bb! (݉ " '+ ' qq?z A  +S/.&68))E! '+ g?uW-sUUn*a4 8) "u#{ u#D!tΜ?8tM:Bav L62?!08;<;60?5;.<; tb>{8DADA1 `?CopyrigTt (c){@2`09{@Ms@cq@oKsk@fyBjArm@Aay@ik@n.{@ 0l@ qHsBe@eq@v@d@n2hb<M#l1# M@? Uh,cQcQ] '1PbA.Q61b L`w"?FMDA dj? |m cuۡ'1 6!1fM{":F$3<6d kS=ѣ<n2{!5E[nLa vogaKeE[eV_@cbc g@]amr\GDž}iNu?FS%f|]tElFTh]]9 Q BUFiv?F{<?FU?F3z[j˺?P Bt`  y]?tHIΧ&XRBu auo7^>JAa^!? ?Az@bbb! (n 6  )  '+ g~X?I-͊Z#!,[z A " ,/,/>/P-[?+g/y'Eي,a4 ) "!#{ #Btg8t)u:Ba L^2!*X8;d;6X?E;d; tR>8lAlA `?CopyrigTt (c)@2`09@M@c@os@fBAr@Aa@i@n.@ 0l@ HsBe@ej@v@d@2!h<M#l# M@? 8>UhoQQ]AVQ^1| L@?F҄! dj _٨la6ztJB: 7d /k?yԷ&Dh]EWemkp+,rb6c}bGeUWeFBϏ+@ol0ju?MDhnMbX0j`QDhnC<o+o"Th]]9 Q BUFT1#E2H?FK&Ua?FU?F3z[j˺?P Bt`  ?t &XRkBu5 auon7_>)JAa^!? ?Az@b_bb! (݉ 6 , )  '+ g~X?ZI-Z#!g![z A " ,/,/>/P-[?+g/y'Eي,a4 ) "!#{ #Btg8t)u:Ba L^2!*X8;d;6X?E;d; tR>8lAlA `?CopyrigTt (c)@2`09@M@c@os@fBAr@Aa@i@n.@ 0l@ HsBe@ej@v@d@2!h<M#l# M@? 8>UhoQQ]AVQ^1| L@?F҄! dj _٨la6ztJB: 7d /k?yԷ&Dh]EWemkp+ɉ 5d}bGeUWeFBϏ+@ol0ju?MDhnHMbX0j`DhWef9o-ai qGeUoo$o6o3UHLD )&# R>h 0JTlaaM>UFObFd0?F gj?F0U?F_gw?P >tA`  N+ru?t Tլ1,_Œ X">u eu!s7-`JɅJUU?HA9 j@7tt7 b:*H&tH  th*Nĵ) ?MB"b;)H&e8H m 3 H -US4o?FPjj?6ֈ@!QpoĬگ eoĩ́ F o 01G}}϶@b@mp?#nʼPV]O߸uy\l2_+ʦE}@p5"iРo~e*al|ٍşןHD: )&# ;h0>Th]]9 PB IAUF\6:?F}?FF(e?F=PFo?P t`  @?t^$h5q+ tЖZu Mauo8a>    PEJƱZVJp!?&. ?Az@b_bbe!Y(݉e 6 ,p )l l w+ g?Z-#U!׷!ze Ae [+j/|//-g#+ s"?(a40 9 .*2!#{0:#t8'tϻ:Ua L2$18K;F?VEK.; t>B4AA0`?CopyrigTt (c)@2`09@M@c@oKs@fBAr@Qa@i@n.@ @l/P Hs3RePe@vEPd'P2hb<M#l1# M/P?]UhzAQ1 L@3FO#۲?Fo2zP dj|aW{ޓ8cUF:&x0?F?F?FX'qoO 0Yifull4ajS?ON~f0c?h+ fEeޖe?F<'?F@]?F\F%ޕol}dQ3ll BXy귺m|ԇbBhaUe```?FV ?F?Th ofl?PI3qoijeðl.` |=T?heFnF$OĒ?F o* leowk~y{f~tGg˂a ,8b1;MHD: )&# ;h0>Th]]9 PB IAUF\6:?F}?FF(e?F=PFo?P t`  @?t8ȍ5q+ tЖZu Mauo8b>    PEJƱZVJp!?&. ?Az@b/b@Z'݉e 6 ,)l l w+ g?Z-#U!׷!ze Ae [+j/|//-g#+ s"?(a4 9 .*2!#{:#t8'tϻ:Ua !L2$18K8F?VEK.; t>B4AA0`?CopyrigTt (c)@2`09@M@c@oKs@fBAr@Qa@i@n.@ @l/P Hs3RePe@vEPd'P2hb<M#l1# M/P?]UhzAQ1 L@3FO#۲?Fo2zP dj|a{ޓ8cUF:&x0?F?F?F'qoO 0Yifull4ajK?ON~f0c?+ fEeޖe?F<'?F@]?FsF%ޕol}dQ3ll BXyR귺m|bBhaUe```?FV ?F?Th ofl?PI3qoiGkeðl2` |FT?heFnF*OĒ?F o* leowk~yf~tGg˂a ,8b1;MHD: )&# T=h0>Th]]9 Q BUFL&d?F`0?F(\µ?F^Sӻ?P Bt`  {G?t7F0 yA_P FBu aquo7c>KJm0U?<AL@tMt7( }b6*D&stD  td**Bıa) ?A.>"b7)D&ar4D [ 3 D 8/td/Kty.x1x1`?CopyrigTt (c)02`090M0c0oKs0f21r01a0i0n.0 Al0 8s2e0e0vZ @d0"h!<M#l#/MBl> @?UhLAA]J 1 A 1"7A1bA>!J`?o2z[ dUjaq:z׍40S 6VMՋB@_P\`_m;E,Ui5Uޖ??FQ[5lf\VU'EUY2Ruݯ?FU]˖7sf\pdGѿSYUWfe zx]om{Zc`,UUFRP\g[)JXAp|? rhQ#5 IXAYaLPւ?FتcP\T f\3$sXAp"۲ ElU1YA-k?FFoZP\fyf\!XAYX%?F8K7P\{ 䘷f\&SAY&1l?Fr>|]P\-]qiY?ip~ X1#g~jt-? GXAY?F ףp=P\' f\?P=RwX'RXT}7vP\__/vnTpډlX7U/$?vQ[E7Ժf\wUQY|>îqh|P\4GN"MX U29_KU#rA0$@CHD: )&# T=h0>Th]]9 Q BUF8?F43?F7??FtYOȻ?P Bt`  1=oJASTfw? ?Az@bbb! ( 6 )  '+ ߈?/I-Z#mg!z A u +%/,8)E!'+ k(?r+o#?(a4 8) E"E#{ #BtG8tU:Ba L>2?!88;D;68?5u;D; tj>)8LALA1 `?CopyrigTt (c)@]2`09@M{@]cy@oss@fBRrAru@Aa@is@Wn.@ 0l@U yHsBe@ey@v@d@v2h<M#l#A M@?4>UhkQkQjA6Q>1| L`Fdj]JK}vM4@[ 6)fMB@Wa\ k_程avCe=E~SeR9Y? lgESeFeabkod i?SQޗh~?5@+ c;ꉞi2_U3rA/04sCHD: )&# T=h0>Th]]9 Q BUF2B9?FW]i\?F'l?F{Cƻ?P Bt`  ,i?t^̺㯝4\YDrVBu auo7e>JAUVJp? ?Awz@bbb! ( " '+ ' qq?z A 9 +/.&68))E! '+ g?W-sU"!n*a4 8) "u#{ u#D!t?8tM:Bav L62?!08;<;60?5u;<; tb>){8DADA1 `?CopyrigTt (c){@]2`09{@Ms@]cq@osk@fyBRjArm@Aay@ik@Wn.{@ 0l@U qHsBe@eq@v@d@n2h<M#l# M@?]UhzbA.Q61b L` djMaQ@ 6fM{"@Ftnq\[3K_,7e5EGe<\ֽE?Nf[ NA_qd7eEGec6i\m j#f,7e2_UB3rA'04msCHD: )&# T=h0>Th]]9 Q BUF] ?FtF?F!/$?Fᱝ̻?P Bt`  "?tEQTϛT H0VBu auo7f>JTA0f88v?WYϳAjLv@z bbb!' 6Y % )! ! ,+ <?' o?z #a47% =) J"#K#({% (+ (G t#BXY'?A> ?A "+(/+/=/O/a!y:5r{l/@ sU10( /???? "0aQQ'@`?CopyrigTt (c)FP2`09FPM>PcTh]]9 PB IAUFR?F|}Y?FF(e?F=PFo?P t`  (\#?tN뻫5q+ tЖZu Mauo8g>    PEJƱZVJp!?&. ?Az@b_bbe!Y(݉e 6 ,p )l l w+ g?Z-#U!׷!ze Ae [+j/|//-g#+ s"?(a40 9 .*2!#{0:#t8'tϻ:Ua L2$18K;F?VEK.; t>B4AA0`?CopyrigTt (c)@2`09@M@c@oKs@fBAr@Qa@i@n.@ @l/P Hs3RePe@vEPd'P2hb<M#l1# M/P?]UhzAQ1 L@3FO#۲?Fo2zP dj|aW{ޓ8cUF:&x0?F?F?F_'qoO 0Yigull4aj?ON~f0c?o+ fEeޖe?F<'?F@]?FwF%ޕol}dQ3ll BXl귺m|bBhaUe`ba`?FV ?FTh oflPI3qoiؿkeðl8` |G?T?heFnF$O?F o* leowky?{ft?Gg˂a,8b1;MHD: )&# ;h0>Th]]9 PB IAUFpX?FEsR?F+\(?F~@ t?P t`  (TJ?th/3u -ZrqKZu L,duo8UAU    PEJƱZVJp!?&. ?Az@b_bbe!Y(݉e 6 ,p )l l w+ g?Z-#U!׷!ze Ae [+j/|//-g#+ s"?(a40 9 .*2!#{0:#t8'tϻ:Ua L2$18K;F?VEK.; t>B4AA0`?CopyrigTt (c)@2`09@M@c@oKs@fBAr@Qa@i@n.@ @l/P Hs3RePe@vEPd'P2hb<M#l1# M/P?]UhzAQ1 L@3FMbX9?F4ҫP dj}ah8cUF[~K?F|D?F+Xc?F&PRcGoO 0YiM+FllZaj}0%|f0c̭ fEe.V?Fȿ̅)`oSt?F?(X:olGGTll .Xl{e{|rww@haUe:&x0?F%Clw?FAz?F 1vņflux1qoiFA(]`t߼ |~Th]]9 PB IAUFpX?F<Dֳ`?F+\(?F~@ t?P t`  (TJ?tA u -ZrqKZu Mauo8i>    PEJƱZVJp!?&. ?Az@b_bbe!Y(݉e 6 ,p )l l w+ g?Z-#U!׷!ze Ae [+j/|//-g#+ s"?(a40 9 .*2!#{0:#t8'tϻ:Ua L2$18K;F?VEK.; t>B4AA0`?CopyrigTt (c)@2`09@M@c@oKs@fBAr@Qa@i@n.@ @l/P Hs3RePe@vEPd'P2hb<M#l1# M/P?]UhzAQ1 L@3FMbX9?F4ҫP dj}ah8cUF[~K?F|D?F+Xc?F&PRcGoO 0YiM+FllZaj}0%|f0c̭ fEe.V?Fȿ̅)`oSt?F?(X:olGGTll .Xl{e{|rww@haUe:&x0?F%Clw?FAz?F 1vņflux1qoiFA(]`t߼ |~Th]]9 PB IAUF$?F,w¾`?F+\(?F-ku?P t`  p&k?t?̈n=u -CmoLZu L,duo8j>    PEJڍU[\/(?&. ?AbbY!z@Ae bbCt c(a4b  # b(h#{ h#΍t(tI*#0f88vb.#i L5<1,(&$7/(D%"6/5;0( t.*B#11`?CopyrigTt] c).@2`W09.@M&@c$@os@f,BAr @YAa,@i@n}..@ Alt@U $HsxBeL@e$@v@dl@"h<M#l# Mt@?]UhzAA!J@#FMbX9?F$x0OԛP dj}תa4(SUF[~K?FE?FSc?F?#oO 0YM+Fӱ\*aj,.}a$?# XEU"n?F +?FbEd?F v<olTBJ\LMYBltavXlp@XEUv/?Fpvw;pg?F $hއ?FLvo~l/AD0qYJg ַBl)$ԞXlDB:XU^F.fّPKk^Rl_[Ei?1GAf`d?Sq+a,(bvHD: )&# ;h0>Th]]9 PB IAUF$?F(n?F+\(?F-ku?P t`  p&k?t A4u -+CmoLZu Mauo8k>    PEJڍU[\/(?&. ?AbbY!z@Ae bbCt c(a4b  # b(h#{ h#΍t(tI*#0f88vb.#i L5<1v(&$7/(D%"6/5;0( t.*B#11`?Copy_rigTt$_(c)$2`W09$M&@c$@os@f,BAr @YAa,@i@n}.$ Alt@U $HsxBeL@e$@v@dl@"h<M#l# Mt@?]UhzAA!J@#FMbX9?F%x0OԛP dj}תa4(SUF[~K?FE?Fdc?F,?#oO 0YM+Fӱ\*aj ,.}a?# XEU"n?F{ +?F(%/Nd?F<olTBJ\MYBlU{XlpDc@XEUv/vw;pg?F^$hއ?Fvol/AD0qYf ַBls$ԞXl+?B:XU^F.fّPyKk^Rl_[Ei1GAf`dSOq+a,(bvHD: )&# T=h0>Th]]9 Q BUFt[@#?Ft\?F\HRq?FƯK7?P Bt`  =0[?tRwq#ll$nZBu duo7l>JA]^'? ?Az@b ( 6f  )ϓb= + _? ' 88o?z A s +=  /)/;/M$[+["-j!Q U1[!x(ar4 )] "'}#{t }#BtNQ8t_:Ba~ PLH2!B8;N;Q6B?5;N; tt>8VAVA `?CopyrigTt (c)@2`09@M@c@o%s}@fB|Ar@Aua@i}@n.@U 0l@ HsBUe@e@v@d@ 2h<M#l# MT@?4>UhuQuQtA(@QH1j LZFهgϰ 7j bL\GlR] 6S-fMB:MbPdc mdb`GeGEWe$oˏZ\ai?bvIGeU@Vϰ+@_?F23Th]]9 Q BUF偃?FKG?F\HRq?FƯK7?P Bt`  Xkw?tO [#ll$nZVBu auo7m>JA]^'? ?Az@b ( 6f  )ϓb= + _? ' 88o?z A s +=  /)/;/M$[+["-j!Q U1[!x(ar4 )] "'}#{t }#BtNQ8t_:Ba~ PLH2!B8;N;Q6B?5;N; tt>8VAVA `?CopyrigTt (c)@2`09@M@c@o%s}@fB|Ar@Aua@i}@n.@U 0l@ HsBUe@e@v@d@ 2h<M#l# MT@?4>UhuQuQtA(@QH1j LZFهgϰ 7j bL\] 6-fMB:MbPdc Ɵmdb`GeGEWe$oZ\aibvIGeUWeVϰ+@?Vpk_xu j aGe@We_ooh2Kt]u3rA 4s2HD: )&# T=h0>Th]]9 Q BUF͜?F$6H?F\HRq?FƯK7?P Bt`  W׎?t #ll$nZVBu auo7n>JA]^'? ?Az@b ( 6f  )ϓb= + _? ' 88o?z A s +=  /)/;/M$[+["-j!Q U1[!x(ar4 )] "'}#{t }#BtNQ8t_:Ba~ PLH2!B8;N;Q6B?5;N; tt>8VAVA `?CopyrigTt (c)@2`09@M@c@o%s}@fB|Ar@Aua@i}@n.@U 0l@ HsBUe@e@v@d@ 2h<M#l# MT@?4>UhuQuQtA(@QH1j LZFهgϰ 7j bL\G@[ 6S-fMB:MbPdc mdb`GeGEWenˏZ\ai?avIGeUWeVϰ+@?Vpk_xu j aGeWe _ooh2K]u:3rA 4s2HD: )&# T=h0>Th]]9 Q BUF[@#"?FJA]^'? ?Az@b ( 6f  )ϓb= + _? ' 88o?z A s +=  /)/;/M$[+["-j!Q U1[!x(ar4 )] "'}#{t }#BtNQ8t_:Ba~ PLH2!B8;N;Q6B?5;N; tt>8VAVA `?CopyrigTt>(cu)>2`09>uM@c@os}@IfB|Ar@Aa@]i}@n.> 0Ul@ HsBe@e@v@d@B2h,<M#l# M@?U4>UhuQuQtAJ@QH1j LZFهϰ 7j bL\ѻ}@[ 6-fMB:ʩMbPdc 2N3odb`GeGEWe$oZ\aibvIGeUWe4?Vpku j aGeWe_ooh2K]u3rA 4s2HD: )&# ;h0>Th]]9 PB IAUFv?FdË.?FQ?F?P t`  Smo?tϰոČ( 뾅@l* 9u duo8p>   ] EJ]_DRb(?&. ?Az@bY(e 6f h )ϓb k+ _?Z'e 88o?ze Ae s[+ h g/y//$# L:5 /sU"K1*a4 9 2(E#{ #t8tϤ:U a L2T18K;6Ԉ?@EK; tͺ>B40AA0`?CopyrigTwt c)@]2`09@M@]c@os@fBRAr@Aa@i@Wn.@ 0lPU HsRe@e@v/PdP2h<M#l#A MP? <>Uh$QQ(]A(Q1 LZ3;'xP 7jbbFb7@c 6{fM8cUFnDdTh]]9 PB IAUFSQ=?F|=?FQ?F?P t`  4o?taVpRKČ( 뾅@l* 9u auo8q>   ] EJ]_DRb(?&. ?Az@bY(e 6f h )ϓb k+ _?Z'e 88o?ze Ae s[+ h g/y//$# L:5 /sU K1(a4 9 2(E#{ #t8tϤ:U a L2T18K;6Ԉ?@EK; tͺ>B40AA0`?CopyrigTwt c)@]2`09@M@]c@os@fBRAr@Aa@i@Wn.@ 0lPU HsRe@e@v/PdP2h<M#l#A MP? <>Uh$QQ(]A(Q1 LZ3;'xP 7jbbFbL7a 6{fM8(a?nDFF!@~qWoO 0dTc I\rdcd bjUkgaPbeEef"v?Fr6d?F?kĀqo`OivddlXr`tF0sK|٢jr3t!KU:G}?{]rLl} ]|q3xnTrg?F<ے7pooZgtfio,V|=] 5ve&1?Fpn?FTh]]9 PB IAUF!?F?FQ?F?P t`  + {o?tHUČ( 뾅@l* 9u auo8r>   ] EJ]_DRb(?&. ?Az@bY(e 6f h )ϓb k+ _?Z'e 88o?ze Ae s[+ h g/y//$# L:5 /sU"K1*a4 9 2(E#{ #t8tϤ:U a L2T18K;6Ԉ?@EK; tͺ>B40AA0`?CopyrigTwt c)@]2`09@M@]c@os@fBRAr@Aa@i@Wn.@ 0lPU HsRe@e@v/PdP2h<M#l#A MP? <>Uh$QQ(]A(Q1 LZ3;'xP 7jbbFb7@c 6{fM8cUFnDFK!@?FDqWoO 0dTc I\addǼbjUkgabeE(aFjv?Fr6d?FkĀq o`Oi^j^r`t0sKÊ#|pr9t!KU:M}{]r,Ll}]|9xLufTrg?F<ے7vodObdcZgtfi,V #|=c ;vLu&1?Fpn?FTh]]9 PB IAUFĊ?Fn6b?FQ?F?P t`  o?t]eČ( 뾅@l* 9u auo8s>   ] EJ]_DRb(?&. ?Az@bY(e 6f h )ϓb k+ _?Z'e 88o?ze Ae s[+ h g/y//$# L:5 /sU"K1*a4 9 2(E#{ #t8tϤ:U a L2T18K;6Ԉ?@EK; tͺ>B40AA0`?CopyrigTwt c)@]2`09@M@]c@os@fBRAr@Aa@i@Wn.@ 0lPU HsRe@e@v/PdP2h<M#l#A MP? <>Uh$QQ(]A(Q1 LZ3;'xP 7jbbFb7@c 6{fM8(aD +F2|>?FW@WoO 0dTc ?rdcd bj{^WeabeEef"v?Fddd?F܃qo`OivddlXr`tuqE|E:jr3t!KU:G}{]rLl}]|q3xngqrg?F~pooZgt_ML|\ 5ve&1?FWf?F?#0b|[m낪 z'oǵ(|F$$!ayTh]]9 Q BUFt[@#?FlIJ?F\HRq?F#o˯Z?P Bt`  =0[?t7c`C#ll#Bu duo7t>JTA0f88v?WYϳAjL@z bbb!' 6Y % )! ! ,+ <?' o?z #a47% =) J"#K#({% (+ (G t#Beı?A>L ?A"&"&$/*8%,"t:"t>  A A6 `?CopyrigTt _(c)D@2`W09D@M<@c:@os4@fBB3Ar6@oAaB@i4@n}.D@ Al@U :HsBeb@e:@v@d@2h/1<M#l#@7? M@?(>Uh7E P+AA!{ L:F A7<6j% bRzg9<2 "\MbPdS mT W5U6ik0YaYvq 2HD: )&# T=h0>Th]]9 Q BUF偃?F (?F\HRq?Fn˯Z?P Bt`  Xkw?tɣ#ll#VBu auo7u>JTA0f88v?WYϳAjLv@z bbb!' 6Y % )! ! ,+ <?' o?z #a47% =) J"#K#({% (+ (G t#Beı?A>L ?A"&"&$/*8%,"t:"t>  A A6 `?CopyrigTt _(c)D@2`W09D@M<@c:@os4@fBB3Ar6@oAaB@i4@n}.D@ Al@U :HsBeb@e:@v@d@2h/1<M#l#@7? M@?(>Uh7E P+AA!{ L:F<6j% bRzg9ǩ<2 "\MbPdS mT W5UO6ik0YaYvq 2HD: )&# T=h0>Th]]9 Q BUF͜?F4@?F\HRq?Fn˯Z?P Bt`  W׎?tϘsW~#ll#VBu auo7v>JTA0f88v?WYϳAjLv@z bbb!' 6Y % )! ! ,+ <?' o?z #a47% =) J"#K#({% (+ (G t#Beı?A>L ?A"&"&$/*8%,"t:"t>  A A6 `?CopyrigTt _(c)D@2`W09D@M<@c:@os4@fBB3Ar6@oAaB@i4@n}.D@ Al@U :HsBeb@e:@v@d@2h/1<M#l#@7? M@?(>Uh7E P+AA!{ L:F6j% bT ü2 "\MbPdS mT W5UO6ik0YaYvq 2HD: )&# T=h0>Th]]9 Q BUF[@#"?FLo]Y?Fo?Fn˯Z?P Bt`  ?tC2.8Xj#VBu auo7w>JTA0f88v?WYϳAjLv@z bbb!' 6Y % )! ! ,+ <?' o?z #a47% =) J"#K#({% (+ (G t#Beı?A>L ?A"&"&$/*8%,"t:"t>  A A6 `?CopyrigTt>(c)>2`09>M<@c:@o%s4@fBB3Ar6@oAuaB@i4@n.>_ Al@ :HUsBeb@e:@v@-d@2`h/1<M#l1#7? PM@?(>Uh7E +AA!{ L:F<F6j% bRzg9<2 "\ʩ7MbPdS 2NoT W5UO?6ik0YaYvq 2UHT5D B &# T>h0JTtiiM ENUF|u(?Fڋ?FPaW?F ru?P WNtQ` 786R7?tUau}wAi8;(\I2-nNuj mu){xE0f88wv?AVQ Ӎ!@#b#m@b  0#  #W(3 (o+&T" #t#NĽ/?. ?M"&$a%$$,/>*L%i,T"tΞ*T"tR.R11#`?Copyrig`t (c)02l090M0c0os0f21r01a0i0n.0 Al@ 8sBe0e0v0@d@"Vt!X#xB #/ @T,UtE1UA-!@nZ pvm?S׮(%UQG@Y9"S\FkV5%UЗn@‹Pهϰ=\WQS\OXbiTQY%_7_I_[_DQOOOO__^̔ԍ?f Q́)f@vVP|?5^BlӿIY:6Xl?Ib ʏ܏EABTfxF\HRqCk?-q_WaW?ϰ+@"Bl!H,BXQnB)fFޖBoT[inJ\ۙgyAHD: )&# T=h0>Th]]9 Q BUF4@ ,?FlIJ/?F&1?F3(e?P Bt`  N!?tw;%m4tMuVBu auo7y>JAS`? ?Az@bbb! ( 6 )  '+ ߈?/I-Z#mg!z A " ././@/R+,g/y'sU1,ar4 )] "#{t #BtNg8tu:Ba PL^2!X8;d;Q6X?E;d; t͊>8lAlA `?CopyrigTt (c)@2`09@M@c@o%s@fBAr@Aua@i@n.@U 0l@ HsBUe@e@v@d@ 2h<M#l# MT@?4>UhQQA(VQ^1| LZFX} g 7j b2ɶGO@[ 6SCfMB:ZMbXdc $I$I܍+d `2aa]Eme'la'iL.G]eU@9/$E?;FslAka,dHּgmeoo(oh2gyu3rA 4s2HD: )&# T=h0>Th]]9 Q BUFnټk-?F.޺s?F&1?F3(e?P Bt`  3?tʼnm4tMuVBu auo7z>JAS`? ?Az@bbb! ( 6 )  '+ ߈?/I-Z#z A* " ././@/R+Yg/y'sU1,a4 ) "#{ #BtΜg8tu:Ba L^2!X8;d;6X?E;.d; t͊>8lAlA `?CopyrigTt (c)@2`09@M@c@oKs@fBAr@Aa@i@n.@ 0l@ HsBe@e@v@d@2hb<M#l1# M@?4>UhQQPAVQ^1| LZFX}  7j b2/O]M 6CfMB:ZMbX͉dc ?$I$I܍+d 2aa]Emekla'i.]eU@;0$E?FTh]]9 Q BUFܲ?Fj$?F!?FTUUUɺ?P Bt`  &?t§= ףc?ckBu5 auon7{>)JAUVJp? ?Awz@b_bb! (݉ " '+ ' qq?z A  +S/.&68))E! '+ g?uW-sUUn*a4 8) "u#{ u#D!tΜ?8tM:Bav L62?!08;<;60?5;.<; tb>{8DADA1 `?CopyrigTt (c){@2`09{@Ms@cq@oKsk@fyBjArm@Aay@ik@n.{@ 0l@ qHsBe@eq@v@d@n2hb<M#l1# M@? Uh,cQcQ] '1PbA.Q61b L`w"?FMDA dj? |m cuۡ'1 6!1fM{":F$3<6d kS=ѣ<n2{!5E[nLa vogaKeE[eV_@cbc g@]amr\GDž}iNu?FS%f|]tElFTh]]9 Q BUF:C<?F8!)^?F!?FTUUUɺ?P Bt`  Ȼ?ޫd?tI= ףc?ckBu5 auon7|>)JAUVJp? ?Awz@b_bb! (݉ " '+ ' qq?z A  +S/.&68))E! '+ g?uW-sUUn*a4 8) "u#{ u#D!tΜ?8tM:Bav L62?!08;<;60?5;.<; tb>{8DADA1 `?CopyrigTt (c){@2`09{@Ms@cq@oKsk@fyBjArm@Aay@ik@n.{@ 0l@ qHsBe@eq@v@d@n2hb<M#l1# M@? Uh,cQcQ] '1PbA.Q61b L`w"?FMDA dj? |m cuۡ'1 6!1fM{":F$3<6d kS=ѣ<n2{!5E[nLa vogaKeE[eV_@cbc g@]amr\GDž}iNu?FS%f|]tElFTh]]9 Q BUFx]?F{<W?FU?F3z[j˺?P Bt`  Q`7lg?t M&XRkBu5 auon7}>)JAa^!? ?Az@b_bb! (݉ 6 , )  '+ g~X?ZI-Z#!g![z A " ,/,/>/P-[?+g/y'Eي,a4 ) "!#{ #Btg8t)u:Ba L^2!*X8;d;6X?E;d; tR>8lAlA `?CopyrigTt (c)@2`09@M@c@os@fBAr@Aa@i@n.@ 0l@ HsBe@ej@v@d@2!h<M#l# M@? 8>UhoQQ]AVQ^1| L@?F҄! dj _٨la6ztJB: 7d /k?yԷ&Dh]EWemkp+ɉ 5d}bGeUWeFBϏ+@ol0ju?MDhnMbX0j`QDhnC<o+o"Th]]9 Q BUFT1#E2?FxLΉ?FU?F3z[j˺?P Bt`  S|.?tk, &XRkBu5 auon7~>)JAa^!? ?Az@b_bb! (݉ 6 , )  '+ g~X?ZI-Z#!g![z A " ,/,/>/P-[?+g/y'Eي,a4 ) "!#{ #Btg8t)u:Ba L^2!*X8;d;6X?E;d; tR>8lAlA `?CopyrigTt (c)@2`09@M@c@os@fBAr@Aa@i@n.@ 0l@ HsBe@ej@v@d@2!h<M#l# M@? 8>UhoQQ]AVQ^1| L@?F҄! dj _٨la6ztJB: 7d /k?yԷ&Dh]EWemkp+ɉ 5d}bGeUWeFBϏ+@ol0ju?MDhnHMbX0j`DhWef9o-ai qGeUoo$o6o3UHLD )&# R>h 0JTlaaM>UF+1#E2?Fgj?F0U?F_gw?P >tA`  9|4?t <'Ŵ\1,_Œ X">u eu!s7-JɅJUU?HA9 j@7tt7 b:*H&tH  th*Nĵ) ?MB"b;)H&e8H m 3 H -US4o?FPjj?6ֈ@!QpoĬگ eoĩ́ F o 01G}}϶@b@mp?#nʼPV]O߸uy\l2_+ʦE}@p5"iРo~e*al|ٍşןHD: )&# ;h0>Th]]9 PB IAUF~Q?F|}Y?FF(e?F=PFo?P t`  IGr?ttƯ.5q+ ~Жu auo8M> `  EJƱZVJp!?&. ?Az@bbbe!Y(ne 6 p )l l w+ g?-#U!׷!zme Ae [+j/|//-g#+ s "(a40 9 *2!E#{0#t8tϤ:U a L2T$18K;FԞ?VEK; t>B40AA0`?CopyrigTt (cu)@2`09@uM@c@os@IfBAr@Qa@]i@n.@ @Ul/P Hs3RePe@vEPd'PB2h,<M#l#_9M/P?]UhzAQ1 L@3FO#۲?Fo2zP dj|_aW{8cUF:&x0?F?Fώ?FX'qoO 0Yifull4ajS?N~dh+ fEeޖe?F<'?F@]?F\F%ol}dQ3ll BXl귺m|ԇbBhaUew```?FV ?FTh oflPI3qoijeðl.` |=T?heFnF$OĒ?F o* leowky{ftGOg˂a,8b1;MHD: )&# ;h0>Th]]9 PB IAUF~Q?F8?FF(e?F=PFo?P t`  IGr?t+>`5q+ tЖZu Mauo8>    PEJƱZVJp!?&. ?Az@b_bbe!Y(݉e 6 ,p )l l w+ g?Z-#U!׷!ze Ae [+j/|//-g#+ As"(a40 9 *2!#{0#΍t8tIϻ:AUa L2$18K;F?VEK; Kt>B4aAA0`?CopyrigTt (c)@2`09@M@c@os@fBAr@Qa@i@n.@ @l/P Hs3RePe@vEPd'P2hX<M#l # M/P?]UhzA(Q1 L@3FO#?Fo2zP dj|a{8cUF:&x0?F?FȎ?F'qoO 0Yifu,oi4ajK?N~d+ fEeޖe?F<'?F@]?FsF%ޕol}dQ3ll BXlRm|bBhaUe`Ԗ``?FV ?FTh oflPIϮ3qoiGkeðl2` |FT?heFnF*OĒ?F o* leowkyftGg˂,,8b1;MHD: )&# T=h0>Th]]9 Q BUF&d29?Ft`0L?F(\µ?F^Sӻ?P Bt`  o?tbS yAP FBuj auo7>RJ[0U?<ALv@tt7 }b6*D&tD  td*JBı) ?A>"b7)D&a4D  V3 D 8/td/tRy.x1x1`?CopyrigTt (c)02`090M0c0os0f21r01a0i0n.0 Al0 8s2e0e0v @d0"h0!<M#l#/MlP> @?UhLAA]R 1 P1"7Ж1bA>!J`oO2z[ dja_q:z׍ Q 6VMB@챝/P\`_m;E,Ui5Uޖ?FQ[5lf\VU'EUY2Ruݯ?FU]˖7sf\pdѣSYUWfe _zx]݅om{ZcXUFRP\g[)JXAp| rhQ#5 IXAYaLPւ?FتcP\T f\3$sXAp"۲ ElX1YA-k?FFoZP\fyf\!XAYX%?F8K7P\{ 䘷f\&SAY&1l?Fr>|]P\-]qiYip~ X1#g~jt-? XAY?F ףp=P\' f\?P=Rw㹥X'RXT}7vP\__/vnTpljlX7U/?$?vQ[E7Ժf\wUQY|>qh|P\4GN"MDX U29_KU#rA0$@C_ HN>Lo3s rFYd߫" EMF(6 #H : B Ӵ N< @e,kPo+8i Ro9BGo oP~' Yp ' b x' e m' i ' l }O' >q^ج' u( Bz D(( ~6H( ʂ (( z (  ( ( c K) ) 9) N ) B]) ) @) iX* A* h* H* . ]* * K* Đ* ߑ hR & * o ~x#* &* l 1&((* )* 7 oG'+* ',* U {' + (#+ RH&+ E5')+  C,+ \ 8h.+  mX+  8+ P 9+  +  + 4 h+ 9Q, w! !(xS, $ U, '*IV, 8+ ?]X@.0Z, 3~9\, 69h^, ]: }'. n>I. B9(. "F . I C&. OW | @SI . W 0&h. +] . ` 8. 2d (. g H. ]l 5'XPp . bu e8. y|@2/ }JY4/ 96/ ;9x8/ ވ9X:/ 9=/ )&PK/ 9/ k~9(/ I/ E*IH߻/ *Iؼo/ ŨNIhB``I00 ~920 I940 i(H60 I70  ~'90 (?:0 L*@&<0 ( 1 }C81 (?$1 0G'&1 T n~''1 [h)1 rI,1 cG/pY21 Y51 9Y91 |9:1 +~j<1  i>1 p h1  ~w1  1 R 1  h1  2  =I߁2 #(2 U& OH2 )82 6.2 12 y5 H2 92 =2 ? ?T wp  ̠ p ʔ  pqwpqwqnwqpqqbqpppw wwwqqEwqpw߄wwrvrrwDrag onut hepe.Rih-c]lckaUdcos UPo etQeAvUwreP 5aabjZֿ~??cN׿ mF1??SbM1{V↑T*J?@ ?UG DF P# h @T(PYY# QU@jZ?@~??F?Pn} u{` ?Su#؍>>HX HlHYHH EO.LO4:UDDXXUllk'"&R",I'U25ĉq%4 ?"u'4b0{[`:#(26 U9$"3&x9"L1" ^!'>U@ÿ@ǽq^@?@VBӅ?]DSF0ZBu@`-1ZBuW@rCC!8GAB"2C9u`u `"[ b![u`2 %3Qo1o7B`?CopyrigPt0(c)020P90MPcPosPfRQrPQaPiPn 0Al` Xs bePePv`d/`VPs_SBNcPm!#5Qb41,a@o13dX453B^8=%3p63dz6z6 v24#V%30Urz3]ge<^1E U#5m Hq5twL5t{3t{ez1A+E*F7ԂX$18dT-`XQj2y`]r` MPWnuPaPtar,aOQ` UES0uPpK`ePt[eTL5Qdv],1PRd C`uK`bʦ30ޯ𯇡! PPr+=9H^p]ZaDbQUT\Կ濅APaB`Ȩ-5^]oυ% S biP aϭPeTυLPcXūa5DVߪhB:l(`iPg߸̨@߇RPoǩ|Z\@E pAD`~YxČUSPaPePlP,5ƪXp/` ATP-`54`%aPK]狥%Sb?`.A4`W`ZʦT3NwRk qa2$2`_ @Ȩ͒Y;MI`ad(`cz d ϴ兵nēAs DxP)ߑmr:atV%a Iâ@!auoDfP㞑 R `Qa/)/7M`Cl/Pد/M+QUmK`utP ?`/ D*?"?_ (b? (Dv??;Hd2QC K3` Wf}%5DVO#O `C` akO}O#ԅO` M`mRgOOɧDODp __1;OUQ1y?QI_[_CqtJw fo SU@jZ?Fű?%v%vKpIp(u.p` s?vbup` ? B=݉`C,qtMp5mQ-́<)oTm}QX.q͏Pߊӥ T.OtxFf@>|pÿruob쁙JRq KN̒rr$QUvvvrsE"`RoZK1& be9P bb OObr@muP`TaC]1eߊ_e71721bKa aXaaehAaYip P`u`T_`_`Qhڑx4ӥ5f_dP&b鄞pCwF3s͵NSY:3R rV@0e18TR <շqW,@rB* B}aaaahʾYVԐ;n` %̀P&²S$1aa`NEW_ORKP2H IP_!RRЕISQTҷiroohA'0UQ?8!Ԑ0oHD # =hj0>T h]]9  5AU@jZ?F͂T??Fx?SF?P6 jt`  o?tk[>(A Vu aSuo.]&A]  *ԮJU2zGzt?@L#&A@bA#z7 5#1bO(b])j+$j&[ "*(w "?& ?AU$l/%B115 `?CopyrigTt (cu)Q02`09Q0uMI0cG0osA0IfO2@1rC0|1aO0iA0n.Q0 WAl0 G8s2Ueo0eG0v0d0"1E]4MŤ-&3 &7&l>]Uhz811U!J!u6?F+\.>djIIa? >"(EǑ I%K9E 5EU . K ;;5QIWEEEF _KnSA?8@@Bu?FDR> ?Nk"\6`bmMM\j=am {:e P> #? D"? 5-g?j4B7!B{oogr57!ibDAbE9 MuX7_'0U~rN!$a)5 5vIzHD # =hj0>T h]]9  qAU@;?FI5K?@xEL?6ѿP6 t`  bm&Qo?t@@L]WI,#u aiuoL> JU2zGzt;?@L?&A@b}#zs q#1b(b)+$&[9#H",ɺ (wH"?& ?A$ /%9#BV1V15 `?CopyrigTt (cu)02`090uM0c0os}0If2|1r01a0i}0n.0 WAl0 8s2Ue0e0v0d0"O1E]S4MŤ-9#b3 b7&l>]Uhzt1.A!JN!m.%-+0djV1ax߃S(FJ-&?F mO"> ?Nk"L[Majnha_:e P> #? BK=o? n-?j4Bs!B{__Wr5s!D8E6V 6?wj_|_ ^XE.U@z T6?F @' d_mOoT \VyWxRkrVo_bVh*g:n"'UaEjz36>CN>?Bn}]zGvN7V iDAbE9 MX+G'0UJPN!4ae5 MaHD # =hj0>T h]]9  U@jpiH?F^?@$?F]F?P6 t`  !a&ao?t,F=۳=u;u auo>J2zGzwt?@ʐLAw@b#z1b](b!).+I.&[-m(?0& ?A$I0/Q%!!5 `?CopyrigTt (c)02`090M 0c 0os0f21r0@1a0i0n.0 Al[0 8s_2e30e 0vq0dS0T"!E]$M-# %'&l>]Uhz!1!JF% djc~YZ(_N9zOMGE%_EH$SwO"K ңHE ^&_8_WE_OqOHOOi&DAbPE9 MX7'0UUfb>!$a % f1jHD # Uh4>T#]]9 M5AU@jZ?FL|+?@?F({~?P6 JuM` ?u JYt )t7zEA2&L_D?. > >tJU2q K?=@MJ&A@-b9(+bG)T+T&J[#"#a?& ?AH?/`)#115 {`?CopyrigTt (c);020G09;0M30c10os+0f92*1r-0f1ai+0n.;0 Al0 18s2eY0e10v0dy0z"!4B#e-?3 7)& b 0K8>U# ]A]A "1!ъ!PzQ <%OYAJEU . K;;\%W%EF($L[.FX5Eai%?F 6?wL;] W-\TB⤲8?F@&SA?F @ ?Nk"L'`bmMW_Ye[ٯ7 VK=F: [jr ? D"? XZvD?ta@Boogr5;!QEx]SiLO MLGXa~WOTZiAle59 XG"'0Uc&N!$05 .HD # =hj0>T h]]9  IAU@K?FOgVb?FY%?Fwᳬ?PJW?>t`  ^Z?t#ə.E @x?r?/{4܂ G _Z+u u$ >  J JpU?43f<!.&P?b3bbfz@1bAe&bq$d 2zGzt.#@LT"&r(~&i$q)(+}'i"~*#81815 `?Co yrigTt (c)o02`09o0Mg0ce0os_0fm2^1r 1am0i_0n.o0 Al0 e8s2e0ee0v0d0"11E]54M-#D3 %D7.&lUh#7 V1#Ai!J# F/?B1B  VRiAAbE9 MX G{W'0U*R2N!I$a {VZUHPD  # #>h0JTpERI MU@u6?Fjc{g&?@%r`5B?Fm_F:Is?HNtQ`  II?t#]  _ii$!`kNu  iu%wj>M JQ T       "@&&JNU2"?H@Qs#N&A@b(bU)++&RNp}#>H1#19 ?#bbbz| R&/5R}#11`?Co0yrig\t (c)02h090M0c0os0f21r0Aa0i0n.0 Al!@ 8s%Be0e0v7@d@21a4t#-}#3 %746lJ#$JUp5 3 M19 1!q(!FnElrkRt# 8$kcD?@숒@)?FP?P"EkU |1\iOT/irvs}i>`_,of :m ,M ?? S&d2? *b?r4B!B oo1gr #5!>%(EQOOOO__FAc1_C_ D4ևS]_o_yRX+{_,__ #R__ (YQoSo+o=osd {oEeooooooFyUy{* hDVW(I X"c7I!pg bf'iMjSE t%XyG|'0UŢŞN!O4i |UGD # hz0TdYYBU@(%)?Fxg^?@0߯H?Fs7Uݹ?P} t` 87?t#_)TeΦu ]ukv  ̝L&UUU&I&3 U!!`?CopyrigPt _(c) 2\W09 M c os f"!r 1a i n}. Al00U (s42e0e uvF0d(0'HS$#-## 'I?k=#ǒ 2j$B##0U'BM3@&@F0Yk} #HA%DKb5DK3DK5DKED$!GiAZG5(|IDFX7yW'&Dŭ6!4YA yVZHD  # =hj4>T!]]>#AU@0߯H?Fz)e??F˞9q?P6 t`z  ?5t# rXѮu)Nu;):O]ע0 &> # " "+"+"+"+" $+"T-J[U7 ??& bA@ 2b4-#2zGzt#+@ Li68!6 ?}; 72U!:B#AA5 5`?CopyrigTt(c)20F@9M2@c.0@os*@f8B)Ar,@eAa8@i*@n. Al@ 0HsBeX@ej0@v@dx@)21"DM-,;?C G& l>]UhzAE!A#!ְ1J`#@{&pz?FUy{?NT,dKj&b&R&M/8T9K-q?F@F?FdG0 >Nk"?YE*]\LThKjfGD&Qhj ?iU3UGe&Y`?@/Jb)h5oWKoWo;èKod}l$ro`nE2K?Fˌ%4G{?F -q? eZR^{|\?6?vodmP}l>MBe 9 N 1iS? 1?grl#51:PO"4ga@Sq(UE^@O|ǐ[`ߡa v2rbA @3ijD&}laqOz o?#WHZ'Qu=*[^N?F]%P?@O&@ c?FаM<n ?` eb0)\(Pfodm}nv}l?V5(?q$Ix[cN#W{ #iA@&QeE9 MX''0UjdN!0 /HD  # =hj0T h]]9 #]AU@}%?FR$,?@6#V?Fc_3֡w?P6 u` ?u#t  3zM%t3ws%.<%H._'B> #PJU2N贁Nw[?@ʐL+&A@wbu](i+bk)%+&p%#4">9"<5!?& ?M#bbb͓z$ p&"/%T%#L1L15 w`?Co yrigTt (c)02`090M{0cy0oKss0f2r1r 1a0is0n.0 Al0 y8s2e0ey0v0d0"E1E(]I4M-%#X3 X7&l*>(UhE j1#$Ac!J`%#@5e$_F??!foA3nBM( V?FJ-R?@HQ??FS!R?P| llHL6VLD5E+v;*AůʙH: 8snP%$? k?Q? \?4_!__WrA5_! R,U3OC@IL?F4~Kz?P)Oy޾ T!]]>#IAU@܋?F2Oa>?@#r`5B?F*ݷ?P6 t`  >B?\oEt# ŖoZu 0Nu;Y:|8>$# JpU>##<?2& ?5bbbz@b#ANi&bu$'h 2N贁N[2$@ LX"D&v(&m$ :;T'm"*B#d1d15 5`?Co yrigTt (c)020090M0c0os0f21r 1a0i0n.0 Al0 8s2e0e0v0d0"]1"a4M-/p3 p72&l>0>Uhh= 1#!eJ`#&FNTM\jh1B,RBM(!JoUy{O_ e@ So4(D@^Tw?F]֝ä ?Nk"?w_KT\T+\_(Kjӷjmplhj -`\TPQE\Uui,St_o u~!ՠXEO9= T?FL?;uQ_Pm"rA @3sjQ1lğ=GoYniAleE9 MX|'w'0Ur^N!M$|A vzHD  # =hj4>T!]]>#IAU@܋?Fj?@2r`5B?F^ݷ?P6 t`  >B?\oEt# 6ΠZu 0Nu;Y~:#&}>0m8>  PJU2N贁Nwk?@ eL&A( 5bM(b[)h+h+$h&#[-M$#?& ?b$Z*'*B#O1O15 5`?CopyrigTt (c)02U0090M~0c|0osv0f2u1rx01a0iv0n}.0 Al0U |8s2e0e|05v0d0"H1P"L4M-/[3 [7&l>0>Uhh= m1#!S!J`#*&FPUy{_?NTMjC1gc RVMʣ(D#@ ![?F"$. ?Nk"W?IKTLTcKjRlhj p=' (*!)i,SO2Kes9T;QEGUFw6qX}__TM7fTZT\Dž}_P^52XOES"rA 0S#Es3iAleE9 MX|$Gxw'0UrIN!4gA xvzHD  # =hj4>T!]]>#!AU@܋?Fc_v?@#r`5B?F*ݷ?P6 t`  >B?\oEt# ap'EZu 0Nu;Y:|$># BJpU>#͙;? & ?5bbbz@}bAA& K&@ 2zGzt?"K@ L0"&N(bM)+TK/8Z*B<1<15 5`?CoyrigTt (c)s02009s0Mk0ci0osc0fq2b1r1aq0ic0n.s0 Al0 i8s2e0ei0v0d0b"5194M-t/H3 H7T &l>(>UhE=3Z1#E!J!FENTMjN@1h(NoUy{OO e@ o)4(D@^Tw?F]֝ä ?Nk"?'_KD3\T+\(Kj?ӷjmlhj P\ DiAlePsI=MXsG'0UzbZ6N!%$0 sFEjHD  # =hj8>T! ]]>#!AU@܋?F*?@#r`5BW?Fw?P6 t`  >B? \Et#տ|u l4RQu?]>I$>I# JpU>͙#<?& ?9bbbz@b#AE&bQ$$D 2zG[zt#@ L4"&R(^&I$Q)+]'I"^*Bh{.M#To1o15 9`?CoyrigTtk(c)k2009kM0c0os0f21r1a0i0n.k Al0 8s2e0e0v@d0f"h1l4M-:?{3 %{7&l>(>UhE= `51#I!J49fAn^ R)X f"B!Jui,SG?RSlOS u~!N]8-19= T?FL;uQ ?Nk"?YVTQRXQOnğ/=i n (\ wTiAaePE9 MXLg'0UbZiN!)$0 Lf`jUGD # hz4TYYT#BIU@Sj'ో?FL]&?@j>(?FeTot7Ѻ?P} u` ?u#8k/8CU(.88Lt `lm3tϭ^}ѯnAf'wyU~##0UH"?@&#!!*`?CopyrigPt (c) 20 9 M c os f"!r !a i n. Al (s2e e v0d z!~$#B-#,# 'O#^ =$&#4 {6{6 w2$e#ab3W) #LG,%,Dj8Hg\&dPaxO6'TXAXA"(2Ca%?28(B,"iha05( e&XV7 W'G${6!t40 VZHD: # =h4>TB#]]9 T!AU@%?FYp5?@_RP6 u` ?Mu t  !o)t~7RnB25"!)Lv$>   EJtU2q?=@dLA@-wb%(b3)%@+@&[w#?& ?A+/L)R!!5 {`?CopyrigTt (c)'020309'0M0c0os0f%21r0R1ai0n.'0 Alm0 8sq2eE0e0v0de0f"!$MEBY-?# '&rl U# #7 1* #p| '&)1&JH FFѷ |-B1 @V&Rl( b 0Ale59 MX7'0URN!$0% VZUGD D# h0T@dYY]BIU@j>(?F8??F/I6?Pn} 8WdT t`  7?ht#rL&~n[f1uowuwU!!`?CopyrigPtv(cu)v2\09vuMB c@ os: IfH"9!r< u!aH i: n.v WAl @(s"Ueh e@ v d  #S$#$- ## '?w= # &#24 6 6 $B# #0U2 3@& wa#3~  #dH1%4Gi^#5 6X'EGK'4 6! ] EFYJHD # =h4>TB]]9 \#!AU@#.?F/I6?@P t`  gjo?t#T U $YhAn m7 u)hnx Fu!)J hI 97eyh?#!?9& aAbA@_"bg)b$g)bIg$2zGz9#@L@&h*%}$f*+T'_"*BL1L1`?Co}p rigT}t((c)(W2009(M{0]cy0oss0f2Rr1ru01a0is0n.( AUl0 y8s2e0ey0v0d0E1I4Mb-X3 X79&l)Uh# j1#_!J FF$[ 7#e T$V3RiAlPeE9 XW'0URZFN!^0 VHD # =h0>T@h]]9 #U@딃r?F/(?@2?FkF_w?P t`  ;0K?t# y_h:.A>Dj# ixsj||yyh|j|Ay"s"y,"s,"yJ"y^" yr""y"/y"a|"1y"8y":y";y"|`AUhMq@q] y a7T^1 hjP!,!J!^!o$!$U#$%&U'()*+,t|q!a!U234567$9$!1&1>74Ua#[aJ`S@ɞd?F9^2d3Qqs^2tY7N26J6M#hN1y-loG?gn`bDPu6(āThq 6p?F!_T?@R h^*B?PQi e>amabƫWSuMD,Xud3, utSқJ! \&\xZ? $Zas? V =? @S?d2tR@B8BoRį֧r3Cc EH ]&H[Bѻ?@lML6~ ;m`~b&¨r?+J^t/qO@? [,!rPצ 2XQqH zg?F,53lܿu  ?FM9(֜2Ar}JND7UØB]ٿ?@h߾#?Fd~5ȿMuPu`~dg}-S/9u}DѺ f <-@uušJAV+ǥ8xP?\ q|?FKyhQ~2vTPf=?PoyGV(na~u3vu;*G1+u~ݏֻA_ ^Gϯᬅ?oQo!|b0QJX^?Fsm?@@޻Pg?POWLKz Fw^=g%fԧ!6&]JVbGᬤ? o[oQr!rmz@zg?@f(?FVɱC OPKMgBlVܖVVwVsS3O_ܘ=w?FKL?@jP0A`QʱH=lTG5OxOeuF|Fv=/Fꞏ ,-<60 ]Cs~?Mv1E GZ}v -# &<"ݝK-'>TAFd|6δ̙2{# ,',v #cX#JREB'~5-O$ ,RI~  `ܝ-C-X-_]bd% ,Wl' 4%ŀ>-խ@ײc& , Ǡ4 \lb\c>-R땳J ' ,+E :{(b\s"=]& a:U( ,/?Fb\.bLOOMևQ:U) ,!`3?F k?%6fSԿr`* ,Oڿ?F&Kba,e~Lʴ2ͷ+ ,b d?FjFŋݸML, ,Z׺ı?F bҢu W%r;z- ,TUI8?Fs%:v ~LSa@ . ,3Pވ?Fb^vO$)yϊ&Lؾ1/*mCe%f(~5JOAjuLīt~eq>aT~.j&],_10y՚?Fr`QSٵ?F艿O:McfdilFU1i;}m&R F$w|!l~1`-?F~J~ 03y<$-t9BIL{HiG,LDd|jph:OE 4b\{~-K 6,Ku20e%+~L-r; 7,kh}6`E׻L?S)` {A8 $Fٞ@`o?F%&0MuPu9KdfǸ;}-UL󥶚!s@+C9,*}3?F:ט26-*NY'| ݑU8am?FYe~Kp?@ŮM?F@_O`P jBނ> l'xk超VSOB;6{.f9;?@ v%e??Fh1*o/m_~!Wx ~)j |ALyUƅ?PbO__<J+t?F"e?@'IȂl:xl@׭9,&͓S2&n&Yۙo> =~vU^4Q?@,z"TEo-<[?|8q&?FCv?@s&Ŕck>\ :A:z5,7ح-idsjѦD;gcZT#n$!$ɞd?Fх@Z f<x.Ϗ:"r 23ED'佴9Ki PyT A$))X#!('0Uqŝ)) (h(J TEI 3aAU@jZ?F({~+?P >uA` ?u#ΕMJ*A )83*#<VQ 9jQ 9~Q 9Q 93(QA = Q(Q.$* BT`t &h)&4t   4t"z"pbE#"J>U2zGz?@9 3>+6A@"b]8bk9x;x6N2 !\4N%323326 2Mc2b"k9Kh7N2ׯa : R;l   4"ABoogr#5"Ap(U3TH P/?@ yjPvf MuPu@YF]ul9;VmMgHr f 9LTBUx]Si\QE[?MLx/r?FPzQ- %YAxlQY?U . [ ;;\upQYF)$r[.Fxai%?F 6?w\^ W-\Xj ~ɽ7yކ}dOflA=$'p$`4U@x@4S>aai6$xi'e!aS#8&`iMiI51Xԥ'0U6PE0 H?>_;s 枹\$qG"F] #ȸ h_ 5OB ` ~d`Ӵ a @+xn [sGW9zfX :*l  :o :?s :v (:z X:_} F֛Ȩ:ҁ :h 6:? ,: :a : 7:? N: ;% 8:ߠ^: '; (: &UFD  h(^TYYBUFjZ?F~??x<F BPG(?P } X ]B66]  TTUTTT"9Td9TȼHBU?F? ?B@L&d2?@-Q(\.,c(A.sU!&(/)1(  0%YB`'FT,serv2,comput"4di0t0ib04d0n0t;w*0rk!ea| (SG& i?!3E?? ?| wp  p | qw qwqw{pqpqwqqpqvppww'wwwnbwqpwoww6zwxvDrag onut hepe.Rih-c]lckaUdcos UPo etQeAvUwreP 5aabjZֿ~??cN׿ mF1??*t {V↑T*J?@ ?UG DF P# h @T(PYY# QU@jZ?F~?tP} u` ?u#2>D>HX DHlHYHH EOe.O4:DDXXll*k'"&J",':U2%5%4 ?"'{4b0u{[`:Q1#26"*!^ U9$"3&x9"L61 2!'@>e@ÿ@qn@?@fB?BmDcF0)B-u/@`-1Bu@rCC5GAB"2C9uv`u `"[ۢ b!]u` %3Qo1o7B`?CopyrigPt0(c)020P90MPcPosPfRQrPQaPiPn 0Al` XsbePePv,`d/`VPs_SBNcPm!#E5ab3P=` o13dX45CB ^8=%3p63dz6Pz6 v24#%3+0U'rz3]Jge<^AE U#D5m Hq5twL5t{3t{LEt{t{y {et!{|tYwmAdaT׃IUBT$1H@a@a20)L21632CLB3P1$125jb6w  7C|hh2E)*dQ 8ŔIQIQA11CB;1XrbYqea>!bz1AE:FG$18dT=`XQj2`]r` M`n+uPaPt+arp` ?bup` ? B=ݙ`S,t']p5#mQ-݁<9odm'Qh>qݏPT'._t'xVf@>pÿЅobZbq [Nܒrr$ aUvvvrsU"`Roj[16 beI`bb __br@muP`TaSm1u _u'G1G2Ab[a ahaauhAaYypP` u*`To`o` axx%D5f_d`&b鄮pSF3s͵NSYJCb rV@̓@e8d=`LqW,@r B:р.ǏR}qaqahYV䐷Kn ` %݀P&S41qa`NEWORK`2H Po!RRIJSQ*io"ohAo'0Ua?!0oHD # =hj0>T h]]9  5AU@jZ?F͂T??Fx?SF?P6 jt`  o?tk[>(A Vu aSuo.]&A]  *ԮJU2zGzt?@L#&A@bA#z7 5#1bO(b])j+$j&[ "*(w "?& ?AU$l/%B115 `?CopyrigTt (cu)Q02`09Q0uMI0cG0osA0IfO2@1rC0|1aO0iA0n.Q0 WAl0 G8s2Ueo0eG0v0d0"1E]4MŤ-&3 &7&l>]Uhz811U!J!u6?F+\.>djIIa? >"(EǑ I%K9E 5EU . K ;;5QIWEEEF _KnSA?8@@Bu?FDR> ?Nk"\6`bmMM\j=am {:e P> #? D"? 5-g?j4B7!B{oogr57!ibDAbE9 MuX7_'0U~rN!$a)5 5vIzHD # =hj0>T h]]9  qAU@;?FI5K?@xEL?6ѿP6 t`  bm&Qo?t@@L]WI,#u aiuoL> JU2zGzt;?@L?&A@b}#zs q#1b(b)+$&[9#H",ɺ (wH"?& ?A$ /%9#BV1V15 `?CopyrigTt (cu)02`090uM0c0os}0If2|1r01a0i}0n.0 WAl0 8s2Ue0e0v0d0"O1E]S4MŤ-9#b3 b7&l>]Uhzt1.A!JN!m.%-+0djV1ax߃S(FJ-&?F mO"> ?Nk"L[Majnha_:e P> #? BK=o? n-?j4Bs!B{__Wr5s!D8E6V 6?wj_|_ ^XE.U@z T6?F @' d_mOoT \VyWxRkrVo_bVh*g:n"'UaEjz36>CN>?Bn}]zGvN7V iDAbE9 MX+G'0UJPN!4ae5 MaHD # =hj0>T h]]9  U@jpiH?F^?@$?F]F?P6 t`  !a&ao?t,F=۳=u;u auo>J2zGzwt?@ʐLAw@b#z1b](b!).+I.&[-m(?0& ?A$I0/Q%!!5 `?CopyrigTt (c)02`090M 0c 0os0f21r0@1a0i0n.0 Al[0 8s_2e30e 0vq0dS0T"!E]$M-# %'&l>]Uhz!1!JF% djc~YZ(_N9zOMGE%_EH$SwO"K ңHE ^&_8_WE_OqOHOOi&DAbPE9 MX7'0UUfb>!$a % f1jHD # Uh4>T#]]9 M5AU@jZ?FL|+?@?F({~?P6 JuM` ?u JYt )t7zEA2&L_D?. > >tJU2q K?=@MJ&A@-b9(+bG)T+T&J[#"#a?& ?AH?/`)#115 {`?CopyrigTt (c);020G09;0M30c10os+0f92*1r-0f1ai+0n.;0 Al0 18s2eY0e10v0dy0z"!4B#e-?3 7)& b 0K8>U# ]A]A "1!ъ!PzQ <%OYAJEU . K;;\%W%EF($L[.FX5Eai%?F 6?wL;] W-\TB⤲8?F@&SA?F @ ?Nk"L'`bmMW_Ye[ٯ7 VK=F: [jr ? D"? XZvD?ta@Boogr5;!QEx]SiLO MLGXa~WOTZiAle59 XG"'0Uc&N!$05 .HD # =hj0>T h]]9  IAU@K?FOgVb?FY%?Fwᳬ?PJW?>t`  ^Z?t#ə.E @x?r?/{4܂ G _Z+u u$ >  J JpU?43f<!.&P?b3bbfz@1bAe&bq$d 2zGzt.#@LT"&r(~&i$q)(+}'i"~*#81815 `?Co yrigTt (c)o02`09o0Mg0ce0os_0fm2^1r 1am0i_0n.o0 Al0 e8s2e0ee0v0d0"11E]54M-#D3 %D7.&lUh#7 V1#Ai!J# F/?B1B  VRiAAbE9 MX G{W'0U*R2N!I$a {VZUHPD  # #>h0JTpERI MU@u6?Fjc{g&?@%r`5B?Fm_F:Is?HNtQ`  II?t#]  _ii$!`kNu  iu%wj>M JQ T       "@&&JNU2"?H@Qs#N&A@b(bU)++&RNp}#>H1#19 ?#bbbz| R&/5R}#11`?Co0yrig\t (c)02h090M0c0os0f21r0Aa0i0n.0 Al!@ 8s%Be0e0v7@d@21a4t#-}#3 %746lJ#$JUp5 3 M19 1!q(!FnElrkRt# 8$kcD?@숒@)?FP?P"EkU |1\iOT/irvs}i>`_,of :m ,M ?? S&d2? *b?r4B!B oo1gr #5!>%(EQOOOO__FAc1_C_ D4ևS]_o_yRX+{_,__ #R__ (YQoSo+o=osd {oEeooooooFyUy{* hDVW(I X"c7I!pg bf'iMjSE t%XyG|'0UŢŞN!O4i |UGD # hz0TdYYBU@(%)?Fxg^?@0߯H?Fs7Uݹ?P} t` 87?t#_)TeΦu ]ukv  ̝L&UUU&I&3 U!!`?CopyrigPt _(c) 2\W09 M c os f"!r 1a i n}. Al00U (s42e0e uvF0d(0'HS$#-## 'I?k=#ǒ 2j$B##0U'BM3@&@F0Yk} #HA%DKb5DK3DK5DKED$!GiAZG5(|IDFX7yW'&Dŭ6!4YA yVZHD  # =hj4>T!]]>#AU@0߯H?Fz)e??F˞9q?P6 t`z  ?5t# rXѮu)Nu;):O]ע0 &> # " "+"+"+"+" $+"T-J[U7 ??& bA@ 2b4-#2zGzt#+@ Li68!6 ?}; 72U!:B#AA5 5`?CopyrigTt(c)20F@9M2@c.0@os*@f8B)Ar,@eAa8@i*@n. Al@ 0HsBeX@ej0@v@dx@)21"DM-,;?C G& l>]UhzAE!A#!ְ1J`#@{&pz?FUy{?NT,dKj&b&R&M/8T9K-q?F@F?FdG0 >Nk"?YE*]\LThKjfGD&Qhj ?iU3UGe&Y`?@/Jb)h5oWKoWo;èKod}l$ro`nE2K?Fˌ%4G{?F -q? eZR^{|\?6?vodmP}l>MBe 9 N 1iS? 1?grl#51:PO"4ga@Sq(UE^@O|ǐ[`ߡa v2rbA @3ijD&}laqOz o?#WHZ'Qu=*[^N?F]%P?@O&@ c?FаM<n ?` eb0)\(Pfodm}nv}l?V5(?q$Ix[cN#W{ #iA@&QeE9 MX''0UjdN!0 /HD  # =hj0T h]]9 #]AU@}%?FR$,?@6#V?Fc_3֡w?P6 u` ?u#t  3zM%t3ws%.<%H._'B> #PJU2N贁Nw[?@ʐL+&A@wbu](i+bk)%+&p%#4">9"<5!?& ?M#bbb͓z$ p&"/%T%#L1L15 w`?Co yrigTt (c)02`090M{0cy0oKss0f2r1r 1a0is0n.0 Al0 y8s2e0ey0v0d0"E1E(]I4M-%#X3 X7&l*>(UhE j1#$Ac!J`%#@5e$_F??!foA3nBM( V?FJ-R?@HQ??FS!R?P| llHL6VLD5E+v;*AůʙH: 8snP%$? k?Q? \?4_!__WrA5_! R,U3OC@IL?F4~Kz?P)Oy޾ T!]]>#IAU@܋?F2Oa>?@#r`5B?F*ݷ?P6 t`  >B?\oEt# ŖoZu 0Nu;Y:|8>$# JpU>##<?2& ?5bbbz@b#ANi&bu$'h 2N贁N[2$@ LX"D&v(&m$ :;T'm"*B#d1d15 5`?Co yrigTt (c)020090M0c0os0f21r 1a0i0n.0 Al0 8s2e0e0v0d0"]1"a4M-/p3 p72&l>0>Uhh= 1#!eJ`#&FNTM\jh1B,RBM(!JoUy{O_ e@ So4(D@^Tw?F]֝ä ?Nk"?w_KT\T+\_(Kjӷjmplhj -`\TPQE\Uui,St_o u~!ՠXEO9= T?FL?;uQ_Pm"rA @3sjQ1lğ=GoYniAleE9 MX|'w'0Ur^N!M$|A vzHD  # =hj4>T!]]>#IAU@܋?Fj?@2r`5B?F^ݷ?P6 t`  >B?\oEt# 6ΠZu 0Nu;Y~:#&}>0m8>  PJU2N贁Nwk?@ eL&A( 5bM(b[)h+h+$h&#[-M$#?& ?b$Z*'*B#O1O15 5`?CopyrigTt (c)02U0090M~0c|0osv0f2u1rx01a0iv0n}.0 Al0U |8s2e0e|05v0d0"H1P"L4M-/[3 [7&l>0>Uhh= m1#!S!J`#*&FPUy{_?NTMjC1gc RVMʣ(D#@ ![?F"$. ?Nk"W?IKTLTcKjRlhj p=' (*!)i,SO2Kes9T;QEGUFw6qX}__TM7fTZT\Dž}_P^52XOES"rA 0S#Es3iAleE9 MX|$Gxw'0UrIN!4gA xvzHD  # =hj4>T!]]>#!AU@܋?Fc_v?@#r`5B?F*ݷ?P6 t`  >B?\oEt# ap'EZu 0Nu;Y:|$># BJpU>#͙;? & ?5bbbz@}bAA& K&@ 2zGzt?"K@ L0"&N(bM)+TK/8Z*B<1<15 5`?CoyrigTt (c)s02009s0Mk0ci0osc0fq2b1r1aq0ic0n.s0 Al0 i8s2e0ei0v0d0b"5194M-t/H3 H7T &l>(>UhE=3Z1#E!J!FENTMjN@1h(NoUy{OO e@ o)4(D@^Tw?F]֝ä ?Nk"?'_KD3\T+\(Kj?ӷjmlhj P\ DiAlePsI=MXsG'0UzbZ6N!%$0 sFEjHD  # =hj8>T! ]]>#!AU@܋?F*?@#r`5BW?Fw?P6 t`  >B? \Et#տ|u l4RQu?]>I$>I# JpU>͙#<?& ?9bbbz@b#AE&bQ$$D 2zG[zt#@ L4"&R(^&I$Q)+]'I"^*Bh{.M#To1o15 9`?CoyrigTtk(c)k2009kM0c0os0f21r1a0i0n.k Al0 8s2e0e0v@d0f"h1l4M-:?{3 %{7&l>(>UhE= `51#I!J49fAn^ R)X f"B!Jui,SG?RSlOS u~!N]8-19= T?FL;uQ ?Nk"?YVTQRXQOnğ/=i n (\ wTiAaePE9 MXLg'0UbZiN!)$0 Lf`jUGD # hz4TYYT#BU@5?F;?@ t2?F`0 ֺ?P} u` ?u#+/8<&L9`d<ft99(.8P8L``Uttyt mH 8"tC"hfc!N&>'nJ!O%X'm= 'p ~i##0U"K?@&S#a!1!1*"`?CopyrigPt (c])X020d09X0uMP0cN0osH0IfV2G1rJ01aV0iH0n.X0 WAl0 N8s2Uev0eN0v0d0"14J#B-#-3 -7#^=$.6#@DFPF B$e 0^dA\C #HA%DK%D8K5D`KEDdKEDg&Ca_'TAA""(Pa%288BKc`DcE ciha5 6rX7fg'$)F!DA ffzjUHPD  # #>h,JTEI MU@jZ?F@s??FX7`/?HmNuQ` o?u# j>tMtQ 1@t PATOQ E O#l|O6,2c J^vNt, "t "QwNbo ?!FVxNo]2УɆX3BUYU>VZ\)_ W거QUTAQ LG4CaCa_* Q_*____ o2f 8CT]?FoYDȡ?@ ~1pc} 9dS0]Yb<=p\/_ RT)4HtvzI!| ?^_AV,uݹ:4Z\o &Ja ʆXBU 9A1p@߄ߝZ\X9 (p\P΀dˆXEBUp2t Z\5q[,=6B҆XaU̓쬏 qXa>YU . [[ ;;p\Ua>YF`rZYoQUaai%?FzP4Z\^ W-p\] xTy8?F.?@@SA?FWk@ ?Nk"_|5`bmMp\PB|y[7 ơ|`?])?p*FWAX|a筃,H7~Z\?bxT?:!G4QڡUeB_T_f_x_DOOkoooZ}u?FU2Ty9=ŏsҟGO"vP_?@mlփ?Ff>> }= R3|Ofѡ|&['aޯ)d3#ux"@4SK 6ASʎy'e>'"Y`r,iMm׹ ߵl%1X~G׷_'0UJ26L50Ov ׶HD: # =h4>TB#]]9 TqAU@1D)?FL"ߛ$??Fٵ #ָ?P6 u` ?u4 t  ]<)t7񿗯)2)LEkL>  JtU2qD ?=@LC&A@-bu(b)+&񆤍[=#L"#a?& ?AH{/)=#@1@15 {`?CopyrigTt (c)w02009w0Mo0cm0osg0fu2f1ri01a; ig0n.w0 Al0 m8s2e0em0v0d0"91=4MB=#e-OL3 L7&l>  U#,|A|A9 $] t^1*=# #p Kw&&J`=#R&F 2'_b0CwvA kRqVM(~"!;ɡ?@<3?F ?P-DT! ;d1?Y=:ZTO㿂f{OaB J4 13? v,? (N_?r5w!:8oJo\g"a@jS(.8R!ZFI nx4__ D_U5U,VS?@g$ ?F~4,ێ~@ 8_H_ZTml`KDfl`'E4q2d8R鿃oo`rmkod&'aE2%_7USr(A@z#y3ae&b{, fZT5sae~t?FG=Va|7>-\氹pAiʈ$%?F(XoZ1a|}%\Iii ^WB^:fvEZT˒AiU?FAx߾Ya|k#ӝY~/Yt(Ae o0AleP59 MX=GL"'0UŢbN!40 O5 |HD: # =h4>TB#]]9 T]AU@R7)?Fy߈C??F"ָ?P6 u` ?u4 t  *5y")t7n)2)LI"+BA B JU2q0 ?=@L/&A@ybm#zc a#-bK{(m {(+I&)#Kڬ !?& ?A"&g$bbu/b /%)#Bo1o15 {`?CopyrigTt (c])020090uM0c0os0If21r01a0i0n.0 WAl0 8s2Ue0e0v@d0"h1l4M!E)#𤲝-DO{3 {7&l> U#,AA9 ] 18! #p %J`)#@~t?F # 0hU-~-/HA RVM(ET"rYD?@J-?F-' 9d 0_Y b%r\_oӫ([MFoFl 77]8>!Zݹ:4\\o?gƭkdQEF5W4?@A=?F[Q`??P-DT! :U^Zs[ #l@*p9lJjV\\l_ I&vpkhAzi!J?F߸TF\\Ͼ\zT khzinA9f@.mY <\\ԕ$ ,S{khAzi׍8r?Fį^?m ? ~_5 ә(Ae 0AleE9 MXlGR'0UuőN!40 ~5 ,@HD: # =h4>TB#]]9 TU@~t?FYaJE??F? ~?P6 u` 7?u t  }A_)t7Ǫ+)2)L^A]>REJt2q?R=@LA@-bbJ )+&L ;W!?m&L ?A" &zbbbOz /R;%!!5 {`?CopyrigTt (c)(020409(0M 0c0os0f&21r0S1ai0n.(0 Aln0 8sr2eF0e0v0df0>"!$MDO!EY-?# 'm&l> 0>U#3 -A9 B1 #p ;%J`F(jdO C]Se֑  FM(HC-O"K[%qU5)UuBТ:DY=~W\GUE2OE#rA 0"S3QhA%YB[)xIB_STD(Pd 0Ale59 MX7|B'0UbN!$1A5 fjHD # =hj0>T h]]9 U@/?Fxpz?@]JQ?Fk],>Uh@ !f$; hp _Q%Jd(jcZ(sEp!$a VZHD # =hj0>T h]]9 U@~t?Fxpz?@?Fx?vÂE?P6 t`  }A_?t#%_ ,u auo>J2zGzwt?@ʐLA@bu+b)%#+#&!yHa$?x&L ?A+!#+,1'!!5 `?CopY rigTt (c)(02`09(0M 0c0os0f&21r0S1a&0i0n.(0 Aln0 8sr2eF0e0v0df0I"!E]$M-# %'x&l>],>Uhh 1[$ hp  F%Jdjc%O(Eq!$a VjHD: )# Uh4> T]]9 M#AU@#ܡ,?Fb?JP WJtM`  ?t# AJb $>h g JQubhr6 u!>h=U7Jyb?!A?3& [AJ~CA@Y"wba)b$a)'ba$2zGz3#@MJ&b*%w$`*Q+'Y"**F1F1`?Cop rigTt(c)2009Mu0cs0osm0f{2l1ro01a{0im0n. Al0 s8s2e0es0v0d0?1EC4*-R3 %R73&l& UhVE9 `d1#Y!  FFU 7C@VCi@Ale759 XW'0UjR@N!N$0 VHD: )# Uh0>Th]]9 PM#U@X?Fg.?@DE`v?FepA1D?P JtM`  ;0K?t#˯ yh:.A>j# ixsj||"yy h|yy"s"j|,"s," |J""y^" yr"""y"/y" a|"1y""8y":y"";y"JU2z?Gz:1?N1HSJVAL0bXbY[R[VJSON9;?Ax74Ual#aX`S@fN:O6?F9^2d3QqsR^2tY7+"LN26J6RJN1v{+0?F2rn`XDPk6(āThqZMiڲ?FȒ?@%~?džm@`?PR߷i e>amaXƫWSkMD,Xkd3, ktS?J! \&\xZ? $Zas? V =? ?@?d2tR%߀@8BoR4̧r3cu>XRVSwSy?@r$Wè ;m`X&¨r+J^j{/qO@ [jrPͦ 2XQqE>L?F,D?@ cK'ةhzq 6ѱn|3wsW\ۉ>53lҿ侨gnxP?Fa"&~̜2Ah~sJN:@8PKNWM?@0R?Fw+PuP~y`~dg~~s̜ʪ}Bk3a ,?|IF%̜O,nA3O"?̜g'R S;x+#]Ǯ.uǥ8nP\ {r|%ʿs?Fcj˰?@3?FJ?Poy=V(nxW~u3vk;*G!v+u~ݏֻA_ ^=ů׬{5oGorb&Q"P C.?@M:P̈́} ?PNWLKz Fw^=g%fuԧ!6&{]JVbGᢤ oQo׬Qhzrmٿdlƣn@"N@0?@NPXcC OwPKMgBlVܖF{VmVsOS3E_dNF+P?F_iVJ͸?@9;+0}0'Z,H3wlTG5EnOe{uF Fv=/<~R2d0Pm%s~? MvO1; ܿ;t l h-`w?F?6z?Ϋ9W7FOQ1}M4ڔC͏&ِ1Wi!~RѪV? el 3~]_>-#O &2"ܿ#˕G GWAS?@nA8 ?1ІCѵzT]4-'>T7Fd6Ĵ̏2q#~R|Rl uްJREB'+-E$RߎD~ CXн-C-X-]bd%R| j#$ 4-խ@c&Rf^+ !̠Bd% c~4-Rꕳ@ 'R[\e'? }ķp)~ s"ޗ=?]& as(R lM?FAo .bLOMQs)R ^D^*%6VIrV*R2T?F8]X^*a,eLʴ2ͭ+R ug?FdpA1DݸoL,R;0 W%r;p-RH)?F76˸ LSa@ .RPe6˜R|)yϊ&Lؾ1/|EԈ1f?FFX ر}+ ķ&OjuLīt[q>aJ.j&S"_10|*?F `Jԭ%1+.nNMcfdilFU1i1}m&H F$w|!bt1|q59>?F>FK׀*F/BIL{Hi<[cԍ(=Pї!32$f ˂}sD|2k!f{30%_ʣz?T `c`Tw4G,~LDd|`pta?FVynZ{~~-K6~Ka0?F>&Dw@}|%+)~K-r;7F?FTa|E׻LS)`qA8|}FSP`M{ r?FhPuS@/}Kdf1}-U{Ls@+C9\}K;:*N}O'| ݇Uaqyli!&,$e?@hu?FA3]{ӿP jBނ>\'xk|OVO;|Pq2.?@`Ȩ?Fn |%c_Wx ~)j |A LyU{PbE__<|*&G&?F4J?@2s+M{0xl@ף9,͟S2n?&Yۏo4=|4P `RZC?@b* o0Z d֣r~Pz;'Uo};}לSU>T|m?F3|TsDܷ[ڣ̭?|X?F _PМؓ?@1}v򅗫0z5,7-i~ dsjќD;gcZT#dfN:6RDž@2]Kc$Lŏ0r ڝ 23E솤Dʟ佴/AiऑTAX'0Ugœ 2UGD # hz0TdYYBIU@zUϰ?Fz^?@#ܡ,?Fbݱ?P} t`  ?t#H$IpZίP!::8vgpqu B`Ufpp5uU!!`?CopyrigPt (c)J 2\09J MB c@ o%s: fH"9!r< u!uaH i: n.J _ Al @(Us"eh e@ v d  #S$#- #l# '?R= # &#24 6 6 2$ZB# #0U2 3@& a#3~  #pH1%4GicP#5 6X'.EG'4 6%!4] EFYJ_dH >;s w2mUqCU_ܾFv #F  VOB U j ~dӴ uo@+Uw o[sG:3 G fF3? H3 HK3 (M3 XO3  14 Z F֛54 64 6 6:4 l <4 n ?4 / 5 75 NH5 [ B3 b _5 *9x5  <9H 5 ' Vx 5 } X /5 ( %x5  5 3 G&UFD  h(^TYYBBUFjZ?F~??x<F BP(?P } kXt BW66 W TTTTT9ȅH?Q?G ?B@L&ɯd2?"-3(?\.E(#. sUk!i&/J( G 0gB`.email,SNM,serv 2,co puT.3d s.r b 4d n 0tw0rk!e^| (QSG& q ?%7I??? wpwp pqwqqqwqwq}pqqqpppw,wwwqqgawqp~wwww;yDrag onut hepe.Rih-c]lckaUdcos UPo etQeAvUwreP 5aabjZֿ~??cN׿ mF1?? m{V↑T*J?@ ?UG DF P# h @T(PYY# [U@jZ?@~?tP} u` ?u#2>D>HX DHlHYHH EAEdR9.&؍>DDXXllRu'9b"",'US2&5/4&3 ?"'4Wb0{['`:[1#2!6"d^Q_9$7&9"/L6 2&!'ݯ>o@ÿ@qx@?@-pB?wDmF03Bu9@`҈-1Bu @r#CC5 ",CAB2Cm9u`u `"[ b!u`/3Qy1y7B`?CopyrigPt0(c)020P90MPc.PosPfRQrPaaPiPn 0Al ` Xs$bePePv6`d"/`VPs_SBN cPm_!#5P8k` 7Fay1&3db4&5܀MB=/3z6&3d66 2&4#/30U1r3]%ge<^AE U#"&5m Hq&5twV5t{3t{VEt{t{y {etd{|tY wmda*V_BTAJa$Ja&203V21@3 2MVB3Z1125tb6 ta$taC|èEhhPÓnQ* ϔ8ܔAPꓰMB111;@cqcqba>*Ab1%EQF^.1XA#8dTG`XQ"y`])r` M`WnuPaPt5arFaiQ` WEqiPm6`nPvd{V5QX]1PRd*ơ ]`ue`bͯ߯@30! P`r@RdSH@ȱ]C5D6bQ۱eʿܿ{"\ ϬAP9a:B`諸75^ϖϬ% S:bi`#aweT ߬ぶLPchCekT}ߏBalB`i`g̨9@߮,RPoڂ|EZ\@ =phDBU T)?ar0TPpVrKB` B`sR;IY`A Y`Q@;bmr:PeExab!gD"qQr2w`ijJSCC ` TsR\͒kXV` ?ON`1ڒPN`SbY`(|~PEe`a 蘒Z Nwbk4a+.2`\d: IpadB`5cwϯJ)nĺskAx/&/ImTat f?a ];a£rl/kf//ő R pa/a//((/&?4/M`Ci?5?'+ pu"tPf!?BkQ OO"Ȁ\=O_OOkv(OOKHdBQ#CġHC` Wc!wV_ _ C]0ah_z_yi_'`M@m@_2d__vkpoo.KOB%fy "(p5j-T^u uqE珽 ST1.i1xACw>門ÿތc$bel erłłՆ߂^B2`RoUK=e2jrb ]]br-@O"Wyrl]u``dvx~jc*<Q_c--4A4Bj(6yB1B1*P{£qqqDqcҴzQaig6``!-uA`}ż @avh{zI5_}v&ﲄПF3s͵NSYFvy߂f!@v>8M{>6M>Ocլra~g,@wBQ&8P\UqUq}zʷ@ifz}!` %P&4odAA첱UqNNTWORKJeH0P\1RRISQBito̔3GQM'0UƗ?Ϳ10WƕHD # =hj0>T h]]9  5AU@jZ?@͂T??@x?SF?P6 jt`  o?tk[>(A Vu aSuo.]&A]  *ԮJU2zGzt?@L#&A@bA#z7 5#1bO(b])j+$j&[ "*(w "?& ?AU$l/%B115 `?CopyrigTt (cu)Q02`09Q0uMI0cG0osA0IfO2@1rC0|1aO0iA0n.Q0 WAl0 G8s2Ueo0eG0v0d0"1E]4MŤ-&3 &7&l>]Uhz811U!J!u6?@+\.>djIIa? >"(EǑ I%K9E 5EU . K ;;5QIWEEE@ _KnSA?8@VBu?@DR> ?Nk"\6`ObmMM\j=am {:e 뇿P> #? D"? 5-gg?j47!B{oogrK57!ibDAbE9 MX7'0U~rN!R$a)5 5vIzHD # =hj0>T h]]9  qAU@;?@J5K?@xEL?6ѿP6 t`  bm&Qo?t~@@L]WI,#u auoL> @JU2zGzt?@L?&A@b}#zts q#1b(Wb)+&[9#H",ɺ (;H"?&L ?A$ /R%9#V1V15 `?CopyrigTt (c)02`090M0c0os}0f2|1r01a0i}0n.0 Al0 8s2e0e0v0d0"O1E]S4Mb-9#b3 b7I&l>]Uhzt1.AF!JN!n.%-+0rdjV1ax߃)(@K-&?@ mO"> ?Nk"PL[Majnha_:e 뇿P> #? BK=o? n-g?j4s!B{__Wr5s!D8E6V 6?wj_|_ ^XE.U6Vz T6?@ @ d_mOoT ݬ\VyWxRkrVo_bVh*g:n"'UaEjz36>C~N>?Bn]_zGvN7ViDAbE9 MX+G'0UPN!R4ae5 MaHD # =hj0>T h]]9  U@jpiH?@^?@$?@]F?P6 t`  !a&ao?t,F=۳=u;u auo>J2zGzwt?@ʐLAw@b#z1b](b!).+I.&[-m(?0& ?A$I0/Q%!!5 `?CopyrigTt (c)02`090M 0c 0os0f21r0@1a0i0n.0 Al[0 8s_2e30e 0vq0dS0T"!E]$M-# %'&l>]Uhz!1!J@% djc~YZ(_N9zOMGE%_EH$SwO"K ңHE ^&_8_WE_OqOHOOi&DAbPE9 MX7'0UUfb>!$a % f1jHD # Uh4>T#]]9 M5AU@jZ?@L|+?@?@({~?P6 JuM` ?u JYt )t7zEA2&L_D?. > >tJU2q K?=@MJ&A@-b9(+bG)T+T&J[#"#a?& ?AH?/`)#115 {`?CopyrigTt (c);020G09;0M30c10os+0f92*1r-0f1ai+0n.;0 Al0 18s2eY0e10v0dy0z"!4B#e-?3 7)& b 0K8>U# ]A]A "1!ъ!PzQ <%OYAJEU . K;;\%W%E@($L[.FX5Eai%?@ 6?wL;] W-\TB⤲8?W&SA?@ @ ?Nk"L'`bmMW_Ye[7 VK=F: [jr ? D"? X?ZvD?a@Boogr5;!QEؿx]SiLO MLXa~WOTZiAle59 XG"'0Ujc&N!$05 .HD # =hj0>T h]]9  IAU@K?@OgVb?@O?@X5?PJW?>t`  ^Z?t#ə.b?T@[ u?/CG<_Z+u u$ >  J JpU?43f<!.&P?b3bbfz@1bAe&bq$d 2zGzt.#@LT"&r(~&i$q)(+}'i"~*#81815 `?Co yrigTt (c)o02`09o0Mg0ce0os_0fm2^1r 1am0i_0n.o0 Al0 e8s2e0ee0v0d0"11E]54M-#D3 %D7.&lUh#7 V1#Ai!J# F/?B1B  VRiAAbE9 MX G{W'0U*R2N!I$a {VZUHPD  # #>h0JTpERI MU@u6?@ic{g&?@%r`5B?@n_F:Is?HNtQ`  II?t#\  _ii$"`kNu  iu%wj>M JQ T       "@&&JNU2"?H@Qs#N&A@b(bU)++&RNp}#>H1#19 ?#bbbz| R&/5R}#11`?Co0yrig\t (c)02h090M0c0os0f21r0Aa0i0n.0 Al!@ 8s%Be0e0v7@d@21a4t#-}#3 %746lJ#$JUp5 3 M19 1!q(!@oElrkRt# 8$lcD?@숒@)?@P?P"EkU |1\iOT/irvs}i>`_,of :m ,M ?? S&d2? *b?r4B!B oo1gr #5!>%(EQOOOO__@Ac1_C_ D4ևS]_o_zRX,{_,__ #R__ (YQoSo+o=osd {oEeoooooo@zUy{* hDV!VV(I X"c7I!pg bf'iMjSE t%XyG|'0UŢŞN!O4i |UGD # hz0TdYYBU@(%)?@xg^?@0߯H?@s7Uݹ?P} t` 87?t#_)TeΦu ]ukv  ̝L&UUU&I&3 U!!`?CopyrigPt _(c) 2\W09 M c os f"!r 1a i n}. Al00U (s42e0e uvF0d(0'HS$#-## 'I?k=#ǒ 2j$B##0U'BM3@&@F0Yk} #HA%DKb5DK3DK5DKED$!GiAZG5(|IDFX7yW'&Dŭ6!4YA yVZHD  # =hj4>T!]]>#AU@0߯H?@z)e??@˞9q?P6 t`z  ?5t# rXѮu)Nu;):O]ע0 &> # " "+"+"+"+" $+"T-J[U7 ??& bA@ 2b4-#2zGzt#+@ Li68!6 ?}; 72U!:B#AA5 5`?CopyrigTt(c)20F@9M2@c.0@os*@f8B)Ar,@eAa8@i*@n. Al@ 0HsBeX@ej0@v@dx@)21"DM-,;?C G& l>]UhzAE!A#!ְ1J`#@{&pz?@Uy{?NT,dKj&b&R&M/8T9K-q?x6@F?@dG0 >Nk"?YE*]\LT_hKjfGD&Qhj iU3UGe&Y_`?@/b%)h5oWKoWo;KTod}l$roҥ`nE2K?@ˌ%4G{?@ -q eZ̿R^{|\6?vodmP}l>MBe 9 N 1iS? 1?g]rl#51:O("4ga@Sq(UE^@O|ǐ[`Ρa g v2rA @3ijD&}l迲aqOz o?#WHZ'Qu=*[^N?@]%P?@O&@ c?@аM<n ?` eb0)\(Pfodm}nv}l?V5(?q$Ix[cN#W{#iA&QeE9 MX|''0UdN!0 /HD  # =hj0T h]]9 #]AU@}%?@R$,?@6#V?@c_3֡w?P6 u` ?u#t  3zM%t3ws%.<%H._'B> #PJU2N贁Nw[?@ʐL+&A@wbu](i+bk)%+&p%#4">9"<5!?& ?M#bbb͓z$ p&"/%T%#L1L15 w`?Co yrigTt (c)02`090M{0cy0oKss0f2r1r 1a0is0n.0 Al0 y8s2e0ey0v0d0"E1E(]I4M-%#X3 X7&l*>(UhE j1#$Ac!J`%#@5e$_F??!foA3nBM( V?@J-RHQ??@S!R?P| llHL6VLD5E+v;*AʙH: 8snP%$? k?Q? \?f4_!_t_Wr5_! R,U3OC@IL?@4~Kz?P)Oy޾ T!]]>#IAU@܋?@2Oa>?@#r`5B?@*ݷ?P6 t`  >B?\oEt# ŖoZu 0Nu;Y:|8>$# JpU>##<?2& ?5bbbz@b#ANi&bu$'h 2N贁N[2$@ LX"D&v(&m$ :;T'm"*B#d1d15 5`?Co yrigTt (c)020090M0c0os0f21r 1a0i0n.0 Al0 8s2e0e0v0d0"]1"a4M-/p3 p72&l>0>Uhh= 1#!eJ`#&@NTM\jh1B,RBM(!JoUy{O_ e@ So4(DF^Tw?@]֝ä ?Nk"?w_KT\T+\(Kj?ӷjmlhj -`\TPQE\Uu?i,St_o u~!XEO9= T?@L;uQ_Pm"rA @3sjQ1lğ= GoYniAlePE9 MX'w'0UrJ^N!M$|A vzHD  # =hj4>T!]]>#IAU@܋?@j?@2r`5B?@^ݷ?P6 t`  >B?\oEt# 6ΠZu 0Nu;Y~:#&}>0m8>  PJU2N贁Nwk?@ eL&A( 5bM(b[)h+h+$h&#[-M$#?& ?b$Z*'*B#O1O15 5`?CopyrigTt (c)02U0090M~0c|0osv0f2u1rx01a0iv0n}.0 Al0U |8s2e0e|05v0d0"H1P"L4M-/[3 [7&l>0>Uhh= m1#!S!J`#*&@PUy{_?NTMjC1gc RVMʣ(D#F ![?@"$. ?Nk"?+IKTLTcKjRlhj p= (*!)i,SO2Kes9T;QEGUFw6qX}__TM7fTZT\Dž}I_P^52O,ES"rA 0S# Es3iAlePE9 MX$Gxw'0UrJIN!4gA xvzHD  # =hj4>T!]]>#!AU@܋?@c_v?@#r`5B?@*ݷ?P6 t`  >B?\oEt# ap'EZu 0Nu;Y:|$># BJpU>#͙;? & ?5bbbz@}bAA& K&@ 2zGzt?"K@ L0"&N(bM)+TK/8Z*B<1<15 5`?CoyrigTt (c)s02009s0Mk0ci0osc0fq2b1r1aq0ic0n.s0 Al0 i8s2e0ei0v0d0b"5194M-t/H3 H7T &l>(>UhE=3Z1#E!J!@ENTMjN@1h(NoUy{OO e@ o)4(DF^Tw?@]֝ ?Nk_"?'_KD3\T+\(Kjjmlhj P\DiAlesI(=MXsG_'0Uzb6N-!%$0 sFEjHD  # =hj8>T! ]]>#!AU@܋?@*?@#r`5BW?@w?P6 t`  >B? \Et#տ|u l4RQu?]>I$>I# JpU>͙#<?& ?9bbbz@b#AE&bQ$$D 2zG[zt#@ L4"&R(^&I$Q)+]'I"^*Bh{.M#To1o15 9`?CoyrigTtk(c)k2009kM0c0os0f21r1a0i0n.k Al0 8s2e0e0v@d0f"h1l4M-:?{3 %{7&l>(>UhE= `51#I!J49fAn^ R)X f"B!Jui,SG?RSlOS u~!N]8-19= T?@L;uQ ?Nk"?YVTQRXQOnğ/=i n (\ wTiAaePE9 MXLg'0UbZiN!)$0 Lf`jUGD # hz4TYYT#BqU@ t2?@D&d2?@$p?@|>׺?P} u` ?u#Lk/89L<`(.88LL`t m۶m3tبפUC~#9#0Up"?҃@/&9#!!!G*`?CopyrigPt (c) 20 9 M c. os f"!r 1a i n. Al&0 (s*2e ej v<0d0'"@!$0#B-9## 'w#^=9$&0#466 2R0$e#$^ P#30%T% 9#LHLA0%XDcKT%XD8cGg&daO^'BTcAcA0"-()Ca0%g2 ATB_6Si@haX5 &X~7aW'o$Ŵ6!40 aVuZHD: # =h4>TB#]]9 TU@$p?@|>W?P6 u` ?Mu t  )At72(L>REJt2q?=@LA@-bb )+&񆤍[Z#?m& ?A/$)R!!5 {`?CopyrigTt:(c):20 09:M c. os f"!r *1ai n.: AlE0 (sI2e0ej v[0d=0>"@!$MBQ-?# 'm&l>0>U#3 C!*; #p _ &J`@4?g8oyƏ C?,'ߑK FMD(9IMBIGE%UZ8!<A% fjHD: # =h4>TB#]]9 TU@$p?@|>?׸?P6 u` ?u4 t  )t7J)2(LAEJ޵2q?=@L|A@y7b #z]-b( J(2+2&*;q!?& ?A"#&$bb/bI4/U% 1 15 {`?CopyrigTt:(c):20N09:M:0c80os20f@211r40m1a@0i20n.: Al0 88s2e`0e80v0d0X"14MDi!EY-?3 7&Rl>0>U#3 C)1 #p| U%J`@g8oyƩ C2,'ߑB %VM^(9IMBIG3U%CUY8#M)U@$p?@|>?tP >tA`  ?t# >b24 C>u\u!.M# *H>U2llf?@9 >A@b-(C-(b;)W+W&>yg–#?& ?3"B-*X*G,e'񩣕p%1%1`?Cop _rigXt_(c)2dW09MT0cR0osL0fZ2K1rN01aZ0iL0n}. Al0U R8s2ez0eR05v0d0}"1P^"4-,13 17&!Qi 0,fUl4#A5C1CU3! !R \box(<v9s?@^!?@sckz%?PD-DT! /[S@K0Lobp׻nxw_gBs 3CUy_@ xh ,J T  M#EMU@jZ?FH2)'*??@n>?P m>uA` o?u#VlMJl -<lA -PK#TdK2(.K<FZnf>t  t3|>b!F**JRuKJ>U2zGz?@9 ?#>O&Aw@b(Wb)+&r" !$NI#"@#W"6 "\M"b)2;'r",r"/I#11G#`?CopyrigXt (c)020090M0c0os0f21r01a0i0n.0 Al@ 8sBe0e0v-@d@7"BMI#3 76@"!"t1"=ElJ Ul4@AAC 9" =Q A(4#UA!=(^!#?@RXM(0 # g# '?DnMN:U~TR\u5i[HmvQ~X3^w='__ '-~XE:UU . S[ ;;h\gU:U@>wR9o\jZ|K5~XAjai%?@@dQ]^ W-h\_~T9;Tꤲ8?@@)#SpSA?@jK@ ?Nk"V`UY5`bmMh\ly[7 / *?<~X6YxS?LR\cpTqUQ g8oy1 cJQ6YD}uϯ?^&S[vm۶modUQ6Y ~ÀC8[H`K?)K?M~X U@:_L_^_p_DA(G!EM_"_]u?@S.~{>O]UM~1xQDuMuRw?@lփ?@2'>w9Z^|)Sf|u?U35~8q3Du$-@8S,/: CYy$'eisa"+ziM̱iٙI@%!XRGٗ'0UL6t5 0f ٖ_H>x;s \<6x,c @LaF07 #CJ b9 XOB P : ~dӴ 8; @+{ <[sGa6 8 Pl6 F o6 J q6 ~N xs6 \Q u6 QU w6 ,X F֛H{6 r\ (}6 _ 5 N =d ,#N >h %N k ((N o6X*N r N,N 'v N x .N { 3N  i6  9!UFD  h(^TYYBވUFjZ?F~??x<F BPG(?P } XZ B66  TTTTTTd &!&Y"/&%'D/+6,k&/TȅHBU??/&:& ?B#@L&d2?=8\.Y+8 >sUQ1O6Pq?)(:  0%$gB`.man0ge]m0ntt s0rv2,co0pu03di0t0ibDdt n0twN@rk1ea| 8(SGv6 ߃m `???(:Lpppp wpppp pqp~pqwwqwqwpwqpqqwwqqp pp w. wwwicwqwwwww}Drag onut hepe.Rih-c]lckaUdcos UPo etQeAvUwreP 5aabjZֿ~??cN׿ mF1??M7{V↑T*J?@ ?UG DF P# h @T(PYY# QU@jZ?@~??F?Pn} u{` ?Su#؍>>HX HlHYHH EO.LO4:UDDXXUllk'"&R",I'U25ĉ%4 ?"u'4b0{N[`:Q1#2T6"!^U9$"3&9"/L6 2&!'ݯ>e@ÿ@qn@?@-fB?mDcFԸ0)Bu/@`҈-1Bu@rCC 5GAB"2Cm9u`u `"[ b!u`%3Qo1o7B`?CopyrigPt0(c)020P90MPc.PosPfRQrPQaPiPn 0Al` XsbePePv,`d/`VPs_SBNcPm_!#5ab4P=`o13dX45CB^8=%3p63dz6z6 v24#%30U'rz3]ge<^AE@ U#5m Hq5twL5t{3t{LEt{t{y {et@!{|tYwm@Ada׃IUBT$1@a@a20)L2163jajaLBA3P1125jb6w 7C |hhE)*dQ8ŔIQIQA11CB!;1XrYqea>!bz1AXE:FG$1 8dT=`XQj2`]r` M`nuPaPt+arp` ?bup` B=ݧ`S,t']p"51m-݁<9orm'Qh>qݏBKT'.mt'xydf@>p{ÿ#ГobZpq [ܒrr$avvvrs{c"`RQoxi16 e5`(bb_ mmbr@ uP`TaS@u._u'U1U2A-b[aahaDauhAaYg6s(P` -u*`}`ᕁ }`ax3R5Y_dn&bpSF3s͵NSYJQbrVC@ړN-e8%dE%'XLqW,@rb:*<Տ`}aahʀYVYn ` %P&S41Ģʢa`NEWOR+Kn2H P}!RȱRISQĢʢĢ ʦio0oŌh^A%'0Uo?č!0ÎoHD # =hj0>T h]]9  5AU@jZ?F͂T??Fx?SF?P6 jt`  o?tk[>(A Vu aSuo.]&A]  *ԮJU2zGzt?@L#&A@bA#z7 5#1bO(b])j+$j&[ "*(w "?& ?AU$l/%B115 `?CopyrigTt (cu)Q02`09Q0uMI0cG0osA0IfO2@1rC0|1aO0iA0n.Q0 WAl0 G8s2Ueo0eG0v0d0"1E]4MŤ-&3 &7&l>]Uhz811U!J!u6?F+\.>djIIa? >"(EǑ I%K9E 5EU . K ;;5QIWEEEF _KnSA?8@@Bu?FDR> ?Nk"\6`bmMM\j=am {:e P> #? D"? 5-g?j4B7!B{oogr57!ibDAbE9 MuX7_'0U~rN!$a)5 5vIzHD # =hj0>T h]]9  qAU@;?FI5K?@xEL?6ѿP6 t`  bm&Qo?t@@L]WI,#u aiuoL> JU2zGzt;?@L?&A@b}#zs q#1b(b)+$&[9#H",ɺ (wH"?& ?A$ /%9#BV1V15 `?CopyrigTt (cu)02`090uM0c0os}0If2|1r01a0i}0n.0 WAl0 8s2Ue0e0v0d0"O1E]S4MŤ-9#b3 b7&l>]Uhzt1.A!JN!m.%-+0djV1ax߃S(FJ-&?F mO"> ?Nk"L[Majnha_:e P> #? BK=o? n-?j4Bs!B{__Wr5s!D8E6V 6?wj_|_ ^XE.U@z T6?F @' d_mOoT \VyWxRkrVo_bVh*g:n"'UaEjz36>CN>?Bn}]zGvN7V iDAbE9 MX+G'0UJPN!4ae5 MaHD # =hj0>T h]]9  U@jpiH?F^?@$?F]F?P6 t`  !a&ao?t,F=۳=u;u auo>J2zGzwt?@ʐLAw@b#z1b](b!).+I.&[-m(?0& ?A$I0/Q%!!5 `?CopyrigTt (c)02`090M 0c 0os0f21r0@1a0i0n.0 Al[0 8s_2e30e 0vq0dS0T"!E]$M-# %'&l>]Uhz!1!JF% djc~YZ(_N9zOMGE%_EH$SwO"K ңHE ^&_8_WE_OqOHOOi&DAbPE9 MX7'0UUfb>!$a % f1jHD # Uh4>T#]]9 M5AU@jZ?FL|+?@?F({~?P6 JuM` ?u JYt )t7zEA2&L_D?. > >tJU2q K?=@MJ&A@-b9(+bG)T+T&J[#"#a?& ?AH?/`)#115 {`?CopyrigTt (c);020G09;0M30c10os+0f92*1r-0f1ai+0n.;0 Al0 18s2eY0e10v0dy0z"!4B#e-?3 7)& b 0K8>U# ]A]A "1!ъ!PzQ <%OYAJEU . K;;\%W%EF($L[.FX5Eai%?F 6?wL;] W-\TB⤲8?F@&SA?F @ ?Nk"L'`bmMW_Ye[ٯ7 VK=F: [jr ? D"? XZvD?ta@Boogr5;!QEx]SiLO MLGXa~WOTZiAle59 XG"'0Uc&N!$05 .HD # =hj0>T h]]9  IAU@K?FOgVb?FY%?Fwᳬ?PJW?>t`  ^Z?t#ə.E @x?r?/{4܂ G _Z+u u$ >  J JpU?43f<!.&P?b3bbfz@1bAe&bq$d 2zGzt.#@LT"&r(~&i$q)(+}'i"~*#81815 `?Co yrigTt (c)o02`09o0Mg0ce0os_0fm2^1r 1am0i_0n.o0 Al0 e8s2e0ee0v0d0"11E]54M-#D3 %D7.&lUh#7 V1#Ai!J# F/?B1B  VRiAAbE9 MX G{W'0U*R2N!I$a {VZUHPD  # #>h0JTpERI MU@u6?Fjc{g&?@%r`5B?Fm_F:Is?HNtQ`  II?t#]  _ii$!`kNu  iu%wj>M JQ T       "@&&JNU2"?H@Qs#N&A@b(bU)++&RNp}#>H1#19 ?#bbbz| R&/5R}#11`?Co0yrig\t (c)02h090M0c0os0f21r0Aa0i0n.0 Al!@ 8s%Be0e0v7@d@21a4t#-}#3 %746lJ#$JUp5 3 M19 1!q(!FnElrkRt# 8$kcD?@숒@)?FP?P"EkU |1\iOT/irvs}i>`_,of :m ,M ?? S&d2? *b?r4B!B oo1gr #5!>%(EQOOOO__FAc1_C_ D4ևS]_o_yRX+{_,__ #R__ (YQoSo+o=osd {oEeooooooFyUy{* hDVW(I X"c7I!pg bf'iMjSE t%XyG|'0UŢŞN!O4i |UGD # hz0TdYYBU@(%)?Fxg^?@0߯H?Fs7Uݹ?P} t` 87?t#_)TeΦu ]ukv  ̝L&UUU&I&3 U!!`?CopyrigPt _(c) 2\W09 M c os f"!r 1a i n}. Al00U (s42e0e uvF0d(0'HS$#-## 'I?k=#ǒ 2j$B##0U'BM3@&@F0Yk} #HA%DKb5DK3DK5DKED$!GiAZG5(|IDFX7yW'&Dŭ6!4YA yVZHD  # =hj4>T!]]>#AU@0߯H?Fz)e??F˞9q?P6 t`z  ?5t# rXѮu)Nu;):O]ע0 &> # " "+"+"+"+" $+"T-J[U7 ??& bA@ 2b4-#2zGzt#+@ Li68!6 ?}; 72U!:B#AA5 5`?CopyrigTt(c)20F@9M2@c.0@os*@f8B)Ar,@eAa8@i*@n. Al@ 0HsBeX@ej0@v@dx@)21"DM-,;?C G& l>]UhzAE!A#!ְ1J`#@{&pz?FUy{?NT,dKj&b&R&M/8T9K-q?F@F?FdG0 >Nk"?YE*]\LThKjfGD&Qhj ?iU3UGe&Y`?@/Jb)h5oWKoWo;èKod}l$ro`nE2K?Fˌ%4G{?F -q? eZR^{|\?6?vodmP}l>MBe 9 N 1iS? 1?grl#51:PO"4ga@Sq(UE^@O|ǐ[`ߡa v2rbA @3ijD&}laqOz o?#WHZ'Qu=*[^N?F]%P?@O&@ c?FаM<n ?` eb0)\(Pfodm}nv}l?V5(?q$Ix[cN#W{ #iA@&QeE9 MX''0UjdN!0 /HD  # =hj0T h]]9 #]AU@}%?FR$,?@6#V?Fc_3֡w?P6 u` ?u#t  3zM%t3ws%.<%H._'B> #PJU2N贁Nw[?@ʐL+&A@wbu](i+bk)%+&p%#4">9"<5!?& ?M#bbb͓z$ p&"/%T%#L1L15 w`?Co yrigTt (c)02`090M{0cy0oKss0f2r1r 1a0is0n.0 Al0 y8s2e0ey0v0d0"E1E(]I4M-%#X3 X7&l*>(UhE j1#$Ac!J`%#@5e$_F??!foA3nBM( V?FJ-R?@HQ??FS!R?P| llHL6VLD5E+v;*AůʙH: 8snP%$? k?Q? \?4_!__WrA5_! R,U3OC@IL?F4~Kz?P)Oy޾ T!]]>#IAU@܋?F2Oa>?@#r`5B?F*ݷ?P6 t`  >B?\oEt# ŖoZu 0Nu;Y:|8>$# JpU>##<?2& ?5bbbz@b#ANi&bu$'h 2N贁N[2$@ LX"D&v(&m$ :;T'm"*B#d1d15 5`?Co yrigTt (c)020090M0c0os0f21r 1a0i0n.0 Al0 8s2e0e0v0d0"]1"a4M-/p3 p72&l>0>Uhh= 1#!eJ`#&FNTM\jh1B,RBM(!JoUy{O_ e@ So4(D@^Tw?F]֝ä ?Nk"?w_KT\T+\_(Kjӷjmplhj -`\TPQE\Uui,St_o u~!ՠXEO9= T?FL?;uQ_Pm"rA @3sjQ1lğ=GoYniAleE9 MX|'w'0Ur^N!M$|A vzHD  # =hj4>T!]]>#IAU@܋?Fj?@2r`5B?F^ݷ?P6 t`  >B?\oEt# 6ΠZu 0Nu;Y~:#&}>0m8>  PJU2N贁Nwk?@ eL&A( 5bM(b[)h+h+$h&#[-M$#?& ?b$Z*'*B#O1O15 5`?CopyrigTt (c)02U0090M~0c|0osv0f2u1rx01a0iv0n}.0 Al0U |8s2e0e|05v0d0"H1P"L4M-/[3 [7&l>0>Uhh= m1#!S!J`#*&FPUy{_?NTMjC1gc RVMʣ(D#@ ![?F"$. ?Nk"W?IKTLTcKjRlhj p=' (*!)i,SO2Kes9T;QEGUFw6qX}__TM7fTZT\Dž}_P^52XOES"rA 0S#Es3iAleE9 MX|$Gxw'0UrIN!4gA xvzHD  # =hj4>T!]]>#!AU@܋?Fc_v?@#r`5B?F*ݷ?P6 t`  >B?\oEt# ap'EZu 0Nu;Y:|$># BJpU>#͙;? & ?5bbbz@}bAA& K&@ 2zGzt?"K@ L0"&N(bM)+TK/8Z*B<1<15 5`?CoyrigTt (c)s02009s0Mk0ci0osc0fq2b1r1aq0ic0n.s0 Al0 i8s2e0ei0v0d0b"5194M-t/H3 H7T &l>(>UhE=3Z1#E!J!FENTMjN@1h(NoUy{OO e@ o)4(D@^Tw?F]֝ä ?Nk"?'_KD3\T+\(Kj?ӷjmlhj P\ DiAlePsI=MXsG'0UzbZ6N!%$0 sFEjHD  # =hj8>T! ]]>#!AU@܋?F*?@#r`5BW?Fw?P6 t`  >B? \Et#տ|u l4RQu?]>I$>I# JpU>͙#<?& ?9bbbz@b#AE&bQ$$D 2zG[zt#@ L4"&R(^&I$Q)+]'I"^*Bh{.M#To1o15 9`?CoyrigTtk(c)k2009kM0c0os0f21r1a0i0n.k Al0 8s2e0e0v@d0f"h1l4M-:?{3 %{7&l>(>UhE= `51#I!J49fAn^ R)X f"B!Jui,SG?RSlOS u~!N]8-19= T?FL;uQ ?Nk"?YVTQRXQOnğ/=i n (\ wTiAaePE9 MXLg'0UbZiN!)$0 Lf`jUGD # hz4TYYT#B#U@f?FjK?@瘛?F1.?ٺ?P} u` ?ut#nW/8d`<Lg<`9ft99b<!9f"9#9f&9&'9f&(9(&)9<&.9P&P&/9n&*9&(.88LL``tt&&&&(&(&J<&<&P/b(n&n&&&t : zBtBIE GeroBE$G?3龅U~0UB?@4sF}CAAJSB`?CopyrigPt(}c; 200P9MPcPo%sPf"RQrPOQUa"PiPnO WAljP XsnRUeBPePvPdO"kBADtCB-}CC EGC=}DFtC dVV $RtDe\^RdYQ qd*a!gtEd8HjUdd{|dh{Sft{ah{ah{ah{ah{ah{ahkah!{ah!{tah(!{edh ,J T  M#EMU@jZ?F??Fĩ!3ܾ?P m>uA` o?u#jpMp -<p=PKA -dK#hxK2(.K<FZn>t,  t"lgyPb;!Fi&d2u!J>U2zGz?@9 g#>w&A@b(b)+&" !$NFq#2h#".6 "M"b)Z;'"i,"/*q#11o#`?CopyrigXt (c)020@90M0c.0os0f21r0$Aa0i0n.0 Al?@ 8sCBe@ej0vU@d7@_"ŤBMq#3 b7.6h"P&0"tY"eElJpYUl@{OX(QYuq'`0\q ٬\:SضX,Qp? X̏ ,MgX0QYŞ6ex`WYBaZuU4QY[S FP\ߴ_\16 4X8Qo џ 4WTX&tqYˣhMGX]}/s]GwXHQYt@tԚ?Fu\ɢ|~\oDlQLQ¿԰:G޿ ˘~XPU~U%BUT__ZAA(GE]T_f_[}u?Fʢ \9=Ŭ\G_ԑuQuucߟ?@ml?F=^`> P+CzOOߎf] B]$嗿<Љq3ux5@4Sa/JYԹ6TfJ'e0k?iMiIh%1X|zG'0U]1.650/ (HD # =h#4>T#3 >U@Ac?FO|_D?@ !?Foצ ?P6 u` ?u >,2t  f?5dLtW1qcRH_Axcl^?kJ2N贁Nk?W@ LA@b ++b )'+'&񉄉ԑ"?& ?A' !zR @$)$)/J%B 31315 `?CopyrigTt (wc)j020v09j0Mb0c`0o%sZ0fh2Y1r\01uah0iZ0n.j0_ Al0 `8Us2e0e`0v0 d0,1(04M-_/?3K ?7&l> 0>Uhh= Q1Zb!!J`F9A,2 %,VMS(Jt= OMmѱQ~%+UD_!OSUE^8__ {GzoXb52O;"rAQ 0#Qc3iAleE9 MXG'0Ub-N!R$KAB5 fjHD # =hj4>T]]>U@CC?Fd?@?FԬVq?P6 u` ?)u hAk,2t  Rg(LtW\jwS}ժcRV]clMJ2N贁Nk?@ LA@]b +Wb )'+'&𩆑;"?& ?A' !zR @$I)$)/J%B31315 `?CopyrigTt (c)j020v09j0Mb0c`0oKsZ0fh2Y1r\01ah0iZ0n.j0 Al0 `8s2e0e`0v0d0,1P04M-_/?3 ?7&l>0>Uhh= Q1b!!J`Fuz T ʼ)UB ,VMS(UjExK?DZ/`D%Q~%+U\bHNOkTI?$i|UE+Ugr?F9Y7TRY_DU52OE"rA 0#Qc3iAleE9 MXG}'0Ub-N!$KAB5 fjHD # =hj4>T]]>!AU@0M,?F (Q!?@2"\?F21기?P6 mu` o?u $lAl 1@,2<UJt  gttqzh&vJU? & ?Ab5"z@AA 6$>)ub ^(6 D#@ D2N߁Nk?@ L&^(+bl)+y&s,d"y*BB[1[15 `?CopyrigTt (c])020090uM0c0os0If21r01a0i0n.0 WAl0 8s2Ue0e0v0d0T1X4M-/g3 g7 &l>0>Uhh= y1!d!J`@"G3?F9;<#R<M(!A|jL, I%% [F?gGQ%SUl"F?F7K^5˖Q YEQCUESUF]k_T Y0ħ[HOMTb^?FWȬs рY+ d"rA 0#c<Fl~rl r!}iA2gE9 MX0G'0U*arUN!%$sAj5 v,zHDJ # =h#4>T#3 >BU@m?Fyg!B?@u>71?F!A0d6u?P6 u` ?iu >k,2t  eypLtWVcRB )clOODJ? ?Ab "z@A $)Mb 6( # D2N߁Nk?@ L&6(+bD)+Q&K,J<"Q*31315 `?CopyrigTt (c)j020v09j0Mb0c`0osZ0fh2Y1r\01ah0iZ0n.j0 Al0 `8s2e0e`0v0d0,104MB-k/?3 ?7l>0>UhhE= Q1n!T#3 >U@rڌ?FlUM?@MX?FI?P6 u` ?u >,2t  ?ou mLtWWcR$_˟clɽJw? ?Ab "zG@A $)]Mb6( # 2N贁Nk?V@ L\&6(bD)+QQ&K,<"Q*B31315 `?CopyrigTt (c)j020v09j0Mb0c`0oKsZ0fh2Y1r\01ah0iZ0n.j0 Al0 `8s2e0e`0v0d0,1P04M-k/?3 ?7l>0>Uhh= Q1n!T]]>TU@n2L?FIҮ?@y“:K?F[dg?P6 u` ?Mu [>,2t  K4LtWc4cR^rCcl{STJ?0 ?Ab "z@A $Һ)Mb6(@ # 2N贁Nk?@ L&6(bD)+Q&K,<"Q*T31315 `?CopyrigTt (c)j020v09j0Mb0c.`0osZ0fh2Y1r\01ah0iZ0n.j0 Al0 `8s2e0ej`0v0d0,1E04M-,k/?3 ?7l>#aUh[3 A Q1n!sXE/U8όKBI .O"tW/U{ؔG\b# aio?p0sX2O;<"rA (0#c3q_(RAeoiAVAeP$59 MXG'0U_r-N!0 B5 v*zHDJ # =h#4>T(#3 >U@ҙK?F<;H?@jB?FEk9?Pn6 u{` ?Su  A,2t  ןG}LtWҤcRMNcl?\zՆJ;? ?Ab "z@A $).Mb6( # 酑2N贁Nk?+@ L&6(bD)+(Q&K,<"Q*31315 `?CopyrigTt (c)j020v09j0Mb0c`0oKsZ0fh2Y1r\01ah0iZ0n.j0 Al0 `8s2e0e`0v0d0,1E(04M-k/?3K ?7l> 0>Uhh= Q1Zn!T]]>!AU@ꪓH|?Fto?@P"?F%dݳ?P6 mu` o?u $!>2 1@,2<UJt  ghttb뽘)ub^(6 D#@ 2N迴Nk?@ L&^(Wbl)+y&Ts,d"y*B[1[15 `?CopyrigTt (c)020090M0c0os0f21r01a0i0n.0 Al0 8s2e0e0v0d0T1X4M-/g3 %g7 &l>#aUh[3 A]@ y1!d!J`&F?ޓ, W_rv J<'R<M(!kuϢ?FK!TBKQ%WUZ&L#\ = GZɛXEWU8(?/&?F9I'\]\WHOC@}?F,^ рYZ+ d"rA 0#c֪J~V(L ~A ׻XWU8TL)?Fj;KL~\M2aiA~AeE9 MX0G}'0UrUN2(!0j5 svzHD # =hj4>T]]>U@gr?F^i`?@?Fyrq?P6 u` ?iu ">k,2t  !LtW( UcR]cl ;_@J2N贁Nk?*<@ LA@wbb ) + + &D  ") ?A !EzR @$)R$ )/J%B31315 `?CopyrigTt (c)j020v09j0Mb0c`0osZ0fh2Y1r\01ah0iZ0n.j0 Al0 `8s2e0e`0v0d0,104M-_/?3 %?7&l>0>Uhh= Q1b!-!J`[F%),2 ,VMыS(K=_LD^^PCűQ~%+U9IS_JUE+UF XOO ڕ]oX52OE"rA 0#Qc3iAleE9 MXG}'0Ub-N!$KAB5 fjUGD # hz0TdYYBIU@a ?Ftnm?@Me?FpWe`ݷ?P} mu` o?u#8#5$+4%5HU$*44HHt  g t3 at?Ы O` *0U"?҃@&T #M!M!`?CopyrigPt (c) 2\09 M| cz ost f"s!rv !a it n. Al z(s"e ez v d F!3YJ$#$- #Y# Y'#^ B= #Z&#l4G6PG6 C2$e^.317;( #HH1|H44Gi^P5 1&X"7.EG'$G6%!@4] EFYJUHuDh#" # J#UU U UUA>a0JTaaMU@Me̷?Fk ?@N?F0Cꮷ,"7?P AtQ`  7?`t#jn \v\fw"MGwugoquo$Kt2zG/z?@Q#+&A8 b*](i+i+i&)j#3$(Qc"bk)+'Rp7171{`?Copyrig}tn(c)n]209nMf0]cd0os^0fl2R]1r`01a# i^0n.n AUl0 d8s2e0ed0v0d0@"01Y44-C3 C7$&lU@U1#A(:!F?SD [6yw(E ?F{ŦL.D;\I`H3E^?FpJ"MC=\C"qoI5Eo9?F(bȵLvF=\#bJFHOOOO@DQKO]OoOOOOf??Fޝ,lCۆ=\)?4=(Of`ẏ?FC+mR)=\/b@QS_fFȄ?F7qm ~=\76ԡy]4։U?F*t,lI=\oooo ?E#goyooooo/?F:}W=\wzfu=pӣ?F߼<,l-~Ȩ]p|y^]x?F}+*y=\+ 3]X0l?F-Zߣh,l Bߒa=\ vl%?EYˏݏfO/?FI42,llHJ|=\HN4f:f?F`\},l>x싟f~?F3x!##E=\J&)O]챘?F/8-k%mSp=\< 2DBB]ǯٯ~S*|ǾȰq~Fn)`]]c֦]L½r]7rG2=\`_g@P 4?F8^d=\he]|J%%D2Vthz1hV~=\K m0BTfCAſ+i /НJ+mV.c|~kdny].!̛ ?FW,lJ(Jd0(fä j[+mޣ =\_?ny]~"2!GD= bY~=\6y@ mL^pCA .i}-|{zv,l3  4f #[tU?Fߞp=,l0ϓpBKg嵿?FdNw:{QBb?ɣ]gۼ?F5̅h.fQ[Ϡ mhzCA&8J 7?FG1^}a aB Kf?F@;,l^s};]K_dШ&?FfOBl?w2OKNe9@?+o)F?{qOCw_PvI4~=z2m?Gm}>ID ___Z1 (_:_L_^_p__H>ho 2 DVhzHMgz?FC_߯():?1ZSGbZ2@ҽw]6<730b837SI|~"RM?L2]-m?Fq,cɥ~)̲1%]? FZ ҟ2 `rѩ]/U Bkn|)<|4];-@uvzwWPFMǕDGt{GI"MqcB߀~@6?HPͿ]M+T/]-$A#8FNL /Y-m$+|*1 |Ŀֿ~ѩ'Z?s =-B^l?2UzVs0~ԦLnQ ~덧쭕g8A}2!"M 2:2?ְqBYZ$@SK]-?FkL}-.zus1v8 .@2ߪ߼ѩ|[s(^lU }J\!Omy͹?F$зX-R P"M^b=-s ,G4,c^lX I6g ]-BS3>-U,-A -,>Pb 1Ԧ(?F~^lAw,?Q7ZьkP?F=S~-sj3jK~)4~"M`dqhY W-߇4ĭɫ)^v 0$3^l0}S1Ix@YpB/T/f/x/"R//*/ѩ??FTmȵ^l5rЯLjوWjFQ/Zyk?F|X"\=De9gs jEo9 ^80>~sMY/|=?t۶ I4TSx?F/doM~^l ONQZW`@?[OmOOO;u??O"O4OFO??FhWy?0WvOo-uѣOd[?FSҮ- ]gWy, l Sz?Fl}WmxZWQ灝in]\h?F.j.\=/I Wdvdp_zooooboo,o>oPoboѩm4p=u_0S}Ÿj8>4W[դ?F:mq_Wş|g #Z?F'[=ݛ}T r=lfa4J0-p/"_flȏr$6HZl~ѩ;h4=(V.@ ʯܯa@Rdv`j^l8g Uw*$B_.nV?F" }"SqN̎?F-;u6D!FA\Y] œ1g ^~SI6? r1 b\nϒϤ϶`kx7|>@|SA-:N^{a*0An?F~ _1ZA}2!g-О?F\b F! qZtq*s 1Mm#)*:ݧ#VْG%E\־ |)߽BJx;mmMQs~߯X9Y Z}l@/P N?M/?1?C?U?U!///// ?7 ;{?Ft\,;) q~-a.d?~A3U'3҈>}>/Eܑp%ߠߤ@^^l{;HkOQ ҠO\ۼ?Fߙ4zf]ziO;_M___q_OOO__&_juW/?Fia~Ia{pU߀_~ Z lS;܃(Siri`RriZ;ӱhsm2Ca6.o&s ?F~f(;EyoIs6\aoZl~:oo 0BzP1c/?FAd)? |hAقUYܙK?FxtQمwPIr2] ~€+Xy'MፍǃaM*2S%ʰ?F+#f~;-a~p}|b:ѧ`vXT 3T %iX3ha TaPaMYU@?F{dܫ?@B?Fx?PM]tQ`  -F7?t#UY~PUu0"u;(,\"b&hUt}2zGz?@dQ#&A b(++&Y}/SV0$38?b`4z */!"!f2$R}11`?Copyriug t(cU 09M0]c0os0f2R1r0!Aa i0un Al<@U 8s@Be@e0vR@d21Y%4e3K 756lU)U4FX1#A2%!Gj0?F73W00"6#8$}-%L#1O)߰=! 8@U$h?FTۜX\rNj$n\B.PXX3@U xQwv&5}X\:"^?5&^,!UnE@UO/NPY[||m]P5Wh@_R_d_v_DQOOO __-_f/,?FRal_`[]lS_fV>ս?F)b rlS3le£ϲ_fq l+{ ~W2ofG Dv~iqhCs}6n~#coo[mE#o%7If[v6/?F2l+|lTCf% ?F92mXlp )B&ur7f}ܑ]{}k ~Xą=mg?]̧lHWg#tdf߻mAY /ASefWc{?Fu]S䱴ls :Llgu伟flo gU?F;^U=lChlx-jfF^eۙq~j0xi|m~c&Mz?lG6OɿE]+=Oas8,ǿ?l:<]izT}ۿ~*P?F=|{FlqHlο:.2~DB'ǘ}l5(3V߆ϾfD ыh殴li7>-I<̾ϲBbGYk}ߏGM~}?FF*.^/ͼi?oߛfu?Ff l3Mi[ KP~0굂c}lOF?آロfIӂr=Eldݠ_\B\n"w\㾥P8bnnE]<0j]SEj`B^ݷk. ρV̲_#u悔&ƀ ܠ\mmElxY=mu1~!Ii)\mz^.P/كYm//&*Ax"x$qQ} F4D2}{=yM].u #q2}~s}z0N?lWU$?μǵF!i}߶bܑ!n@垇D}LnXXt~#ViԂ,iߝXQ b4Oq>ad䅔tnR)u@o0oBoTon _____ o"u2$j1}Xι_!eΟ`?7-:ܐ9Ri>DYH-#Gߍ=ԑ4MXjQ9Ef׹ tt}k͟in?S(:L^pn &*~]JB&u¥ G48Ġ7-a#|GgS>a8|I>3; nP#bWU6mȧ .ֹc`}l:2?xuTѺVhzn 0B=M#p0}!:rd Q%- qOj 7-Vsb: u>O9e>(xx&?FgH??[WvpLQ?M9 Ԣ1/!ﰳm 7xzNªJrτϖϨn (:L^sjJ^/0ЧIs*!\ٖ o5|OF ȷӸ@`?F/ ߃7\!\ψf+!"p@)-_!\?$”Mގ#q|f@LS`8}!\߬BĪ&Zf<1ߒ2 2DVhz=U[[0m{TfP7-^u,b%:k#ԍ?FȘ!\(V!I/iUBg78;L5?Qcuӵ3@?F:Ě ],‰L?L/LY!ԣ?FRs;=w] PʉL?^+UϰGIm߲E,1u5w]LK>P#XrO?? vnj!\Zz -~X@////UX/j/|////=R?FQJ<}A{ХloZ ?@lsچk?Fp"mI鴡si}/q2 ym9HQ em\\g^rBt@|? [úA'5r?#7-oo#5ߕoooooo=zg?Fzl) Ud4I堹> /k͹?FV{<˽h4GX'1rn=["jyN%.:PɟLIUw ~#Q)9=Syeӄ0_6=6LTL=Us|9ks}ߵX5>2f$~1b?Uw0'/N`%;M p4~mmo7o 9Q5:+G(j=?=;J?1OnN&4'9K]Vv*RQxR]6g^ɣ|Zk%O!QڑQ^^h?g^j^}_Txk|j~WmVo@?xp >eBcN#c G!Tx5Pm|LGf.Q,ojF.@RdvSzL``qn ~GA>~wz^gqҕim~x$gK>~ V2wTx"dQᆠA(Os:!nLĥlqNCyQӫKcAahG}6ZSN?H????fF1;?M?_?q???Vv|&5}7@R"oPiyK> k#>ġ"}?A |M-¿t6j`~r_QW44zaqYX8Tȴ߯~n߻d ____T_f_x____ Y<,2?(rݾW1[̵yѿ㸗_go4͵S62M\{Qo^~(Z&T!+<5&s pBr~y>08x޻ ϛCE/RuCQݞ%~޽3]Fb?F?|~퀶5 ~ 趒;پQtoqΣ6 ޻֮Ǹ) 1Ÿԟ)e]+akDH~6 WMw׭K,@&"c?F'%|6 ξi@a > <ŗ&8`kb|-~ 6  s??BD&m/V%߇6 b3hD5^E);M̿޿Ͼ)`]|i!x899|/*a|>5i=ϭ,?F#6 K.5> ս5|[ݍvϒe*y34ݶJA)=`2DVh j@?F-/g ?hA\)/N/U?FM2e-j X*?v(N })2-OrYQ-~.Tk )\}/'?^qq|*=3laz ~} 4}ס@Rdv22(:)*?FN !kyMdFq&w^I/(Y[?Fߧ!Ma#Jh|ϻ/x9P8!il{mM{G~.2:O˕BĈy!" _뼙-Y` k/}///KU/ /2/D/V/4^F?}e_mu? y/X /?Fjnǚq7}? D?Ɗx^o߾AytÉmA~ ^{yoQfAYvӗ-ׂoNZc?OOOOgu O*O~gL<7@p?ʼn@-1ş9p6=n?FQ>~:B !~ ޾UC?i^Jak{_f/˲p6J%V{垇͉}1JMZ ۯ#l~Ư4ʩ݆!~ cx|[) T@ Ѳg.Kn>$?{nIn880ߊfr2Yp=ȓ }UptCE_}lB~?h,,$ϚϬm9eG\'1rth|~1=9zsg-?Ubw;]9 _Y*'1`-LI@0goSy?o7x>PEu{ X~=8+m?ɼ7)}^E"S0 }٫i)}.@R%m9Y; dA Z8M]Y ]V6yY+t8skA rM#\$Sp!_n8$Ye#=]*}_I}4I H~`]ت]~| .ʗjri_.@RdF&|m9;q;gi}I߂h(}(Uϯz5y+[Bq}X~v"x>?0~1>4W *wFI.i}A0p}} &N{*y/ K?]?o??+e'//??$?6?MIbLR$UFײ;nC -+}IDQ$Օi}-HsC V2wYNHCw?F3kDjt!eJ }=Zܵ|#aOQcܝw~BnW؜p f_x___F(O __._@_R_MIst@xAOK~` f ͹+ixrݧJQp~[̵yvOGo?Flg6uMh ٘]oSC`qb?':^Bo}^~^?>Dp$ςb)&8J\nMImc@xgAx+_f{&34'Hee÷@%|~g܂HV <|PFXdTX׊ćgߩXQ#֟7$zLvPFBrX}g$Zg̞Rש<͏ß՟*0BTfxMIߵ.W_=%eiJz#Ms2,MD_c|T%#5JLYXD?F5hFg y@8|ѕSCw]&.mᕼA?鯻Ϳ߿+L^pP>FS`D0 e4Z>+-G_SCa\0І?FKȝ ǶH ~%aWNwZ=!/OG#b q1{~! tIWxa@$,"-MIkO/?F|R |~A&'A;߯bc @``5('Mh-nf=!SCrRat!2vIgn/=gyzu  jf&q#=-uS),/*/NR<btB1*<|N t!ߡ..Nsz9_-ֽ'}.<94o._/?{iPא5] HV:;$HĠ﬉ қZ?nBӭ_2"4FXj ErT@y}Mo 6N?PJY_-_`o颾Mn;NOqȝR?FOZ|<;QaA1b;oIaWW6/H]ToY?vbgCt_Ϭz3,>PbtφϞ m9`Xizh \~wKVaϤ~gPϐsY6i&vb8l5' f EXe]/; ^bAߡqEiK )~E?H{=߷4HZl~-Yf"b }h 0JTlaHaMCMU@Kk?FVٖ?@uW< ?FY鷺?P >uA` ?u &J2A:2,t  #)taN_"舺m\pmv*IJ>U2zGz?@UL?A>A@Wb#q)!?(b'=&Nm ?0& ?M "b?)'7,I! O'11`?CopyrigXt (c)902d0990M10c/0oKs)0f72(1r+0d1a70i)0n.90 Al0 /8s2eW0e/0v0dw0!(]$B-3 7&lJaUl~ 1l !`@9  颥 nw&. ʙF,2 FFՋN@DW9_k7BLL␕U?A%E?AL9C=QIDZB3>X5EER_`y5qLWO ]7@D>XE2OE#rM) 0 " c3AAO(O:OLO^OpOFF&(LKSoW N L Od]uvx?FZl6>LI3nZձL)LA_x1ur/ fLP $e'Hٰ_FfIXDoo p_Uoi &kpiMf5kiX7!'0Uʂ>$e5 UH luD( " # #Aha0JTaaMQU@Ad{?FdT#3 >5AU@c^?F`Ld?@9x(g?F_Uw?P6 u` ?u .6(>2Q 1@;,2<_Jt  N3 trBNٟ,PޟbX"; JU2N贁Nk?@ʄ L&A@bu9(E+bG)%c+c&# #!!5 `?CopyrigTt (wc)020 090M c o%s f"!r +1ua i n.0_ AlF0 (UsJ2e0e v\0 d>0!E$M[#!2?06 ?AN$)e/%-/#K '6l> 2UhE=%!?!J!(NѠuOO u(:@¤̥?FbP;?PEB GݐAY1&YMAJFHe׵B `,a,[ /? :7i? >y4vʚ?rJ5;!:_o#ga@^AOiAlerI=MuXG_'0Ub5N!40% rFjUHLuD" # R>h0JT#3 _aRMC5MU@u{v?F ??F];\zظ?P >uA` ?u .)J2 1@;,2<_J>t  w?t&9W`ݟu㵩OJ[>U $?& ?MbA@D"J&N#y!y!`?CopyrigXt (c) 2d09 M c o%s f"!r !ua i n. _ Al (Us"e e v 0 d r!]$v$-## '&zBt#2q0[?"@9 q>6M(bL)J/8Y*lJ.-$U#8APAa A =!4Q1D!1A9 {?F%@ 0nCϿMnNEuýL9%K FH(5N?:G_&_ RHE5E#f/?FR=JvK ϽLQ ELxAEE^]R__ *HE8,;@ƫn~LnzdKL5rdXHsA8#5 뇯/tH@ED@/GpOc#?P-DT! рY?!IO F_}]~3v!9$e% UGD # hz0TdYYBqU@UJm?FBO ?@IxJC?F+9lbݠ?P} mu` o?u#L*5-+4,5HS+5\$*U44HH\\t  j@g> t~pm J)4 {ו' -U;!;! "`?CopyrigPt (c)r ]2\09r Mj ]ch osb fp"Ra!rd !ap ib n.r AUl h(s"e eh v d #"4#S8$,#4G# &G'?=5#H&,#Z45656 12,$B#5#05U253@+& $^ #E K5 5#\HA%$DH /KK5$D4/GiA^K5 6X7G'4Ŕ56!.4] FJHD* # =hj0>Th]]>]AU@ ąMu?F*]!?@v$?Fu?P6 u` ?iut  Dm%t3._袋%.0I~$%UHSB+> PJlU?B& ?AuA@o +bs(b$%#2zGz?+"@ L&s#v ##zu s/ 5&,J2*%#c1c15 w`?CopyrigTt (c)02`090M0c0os0f21r01a0i0n.0 KA# l07s2Ue0e0v0d0"\1E]`4MŤ-%#o3 o7B&$!A]UhzA 11J`%#@frq?F ["(M(!T?FyIRaZL\m۶mQ8ϐ?F@$mZK\٩?F}e?P,DT! VрYc!ITB M~E'QY5a XȞv/ ,^$ Ui[? RRT*?r.5 1:Oo aosg4 1"KUE[UF5sRŐO_ <tf ?@Gui&PbE9 M5X8G+"!'0UE]N!R]$ar5 vUHLuD*" # R>h-4JTaTa9 JB>U@@(/&?F94;??FIRaZ?P>uA` ?u>t   e~27-t!;E]t-6-P\E`,J%EJff? ?Mdb"z@A }$)1b:( #>!f!?X(d, P酑2N贁Nk@9 ^"&:(+!D:/ 5;T'2U&Ra1a1`?CopyrigXt (c)020090M0c0os0f21r01a0i0n.0 Al0 8s2e0e0v0d0"Z1E^4-/m3 m7lJ0JUllI 1!1` C  /VN!b͉(O"TYURIUD%YU+u7tYF_QER5:MT?7 T [Nё\#CTAE2OEv2rM $Fyc"mA$zG %I OUOYU`fsRsŠq_ _ wT#3 >U@+(/&?F94;??FIRaZw?P6 u` ?u t  <~2)t7E]tU)2)LXE\->J2N迴Nk?+@ LA@{b +b )'+'&D*!!5 {`?CopyrigTt (c) 2U0 9 M c os f"!rԶ !a i n}. Al 0U (s2e e 5v 0d0M"!E$M[h2?6 ?$A$)/J%,b,# '6l>(>UhE=~%!6!JXS(xNWOO D QH~%xE9OAOAiAle6I=MX7'0UjR>!40% 6FZHD:  # ;h0>T@h]]9 BIAU@Y"2?F h?F\9?F_X*qw?P68Rw?>u` ?u$.>. V- (.8:Q [t  !g?t{Yu?pvMl{~ݕvs5Q!fJU2N贁N[R?@L&A@bI(b*W)U+U+U&񩆍y#? (<!J ?%MP?AO"d+b Y)(u/%*B#A1A15 `?Cop rigTt cu)x02`09x0uMp0cn0osh0Ifv2g1rj01av0]ih0n.x0 Ul0 n8s2e0en0v0d0@:1Y>4M-#M3 M7#lUh/#7 _1AO!J # F qBd V(R iAb59 MXGW'2WUQ;N!R$aP5 VM&#HD # =hj8>T! ]]9 UqAU@c^?F`Ld?@D6 5D?6Hb?b06]DN`lt   ?N t!rBN,_PaX"; jJU #<nH!A?^%Y ?Ab^ @$bbbz #"b#$"b$ 2N_Nk?<@L&(&$%" ;'"!*@!A#BBhy?H *A#115 "`?CopyrigTtk(wc)k20@9kM@c@o%s0fB1r05Aua@i0n.kU ] lP@ HsTBUe(@e@vf@dH@/"14M-?3 7#l>]Uhz11!J`A#@SfVo?FnOo0  d" ΄ ť^R^M(!`ZQaSt__ Cƣ8-$Րj8:ۦ?@u-{?FrgY?PC d f"Y!Y1a ]D_;s ˿}nE>0FH0& #N  ZOB CR jύ ~d o@}+hĶ o[sGN _G fHO xO O? ؗO O 8O _` F֛O AP < 6EP r϶ GP t JP 5 LP 7NP  NP?P a N a P(SP w $UP B//'/VP YP P~H[P 6~(]P pX_P 1Q Q .Q QQ , ^(Q J&#S /KH&S 3-S <p/S @98T EdT 1H%KT VL'T OT R(T cU1UFD  h(^TYYB UFjZ?F~??x<F BPG(?P } X ]B66]  TTUTTT9T9ȴHBU?Q?G ?B@L&ɯd2?6-G(?\.Y(7. sU!}&/) *%  0eB`-ecom0er 0eW,s2v2, 4put$4dui0t0ib24ud0n0tw 0rk!% ^| (S G& q?%7I???"" wp ""ppqwqqwqw^qpi ppwpw)wwwqqd^wqp~wwwwwyDrag onut hepe.Rih-c]lckaUdcos UPo etQeAvUwreP 5aabjZֿ~??cN׿ mF1??%!{V↑T*J?@ ?UG DF P# h @T(PYY# [U@jZ?@~?tP} u` ?u#2>D>HX DHlHYHH EEdR.؍>DDXXllRu'9h",'US2&5/4&3 ?"'4Wb0{['`:[1#26"d^Q_9$7&9"/L6 2&!'ݯ>o@ÿ@qx@?@-pB?wDmF03Bu9@`҈-1Bu@r#CC5",CAB2Cm9u`u `"[ b!u`/3Qy1y7B`?CopyrigPt0(c)020P90MPc.PosPfRQrPaaPiPn 0Al ` Xs$bePePv6`d/`VPs_SBNcPm_!#5P8k` 8Fay1&3db4&5܀MB=/3z6&3d66 2&4#/30U1r3]%ge<^AE U#"&5m Hq&5twV5t{3t{VEt{t{y {etd{|tY wmda*V_BTAJa$Ja&203V21@3 2MVB3Z1125tb6 7$C|èEhhPÓnQ*ϔtataAPꓰMB111;@cqcqba>*Ab1%EQF^.1XA#8dTG`XQy`])r` M`WnuPaPt5arFaiQ` WEqiPm6`nPvd{V5QX]1PRd*ơ ]`ue`bͯ߯@30! P`r@RdSH@ȱ]C5D6bQ۱eʿܿ{"\ ϬAP9a:B`諸75^ϖϬ% S:bi`#aweT ߬ぶLPchCekT}ߏBalB`i`g̨9@߮,RPoڂ|EZ\@ =phDBU T)?ar0TPpVrKB` B`sR;IY`A Y`Q@;bmr:PeExab!gD"qQr2w`ijJSCC ` TsR\͒kXV` ?ON`1ڒBkfHZSbY`|` E@cqTZ Nwbk4qa+.2`f dDIpadB`5cϐT)nĺs kx/0/(mTa t f?a ];a£|v///ő R pa/a@/?(/0?>/M`Cs?5?'U+pu"tPf!?BkQO)O"f=OiODOkvOOKHdBQ#CġRC<` Wm!wV_*_ Cg0ar__yi_'`M@mi2n__v"kpo$o8KOB%fy"FaPoboj qt1dw:1NvcU@jZ?@ű? _v pBup` ?}rup`  B=ݐpj,U>(p5$j-T^u*uq, ST1.s1'xAMw>ÿ$revK ˆeGrςłˆTς߆L2`R"oUK=e2j"rb ggbr-}@"Wbrgu``dv~jc 4F[_c@-->A>Bj(@yL1L1*Z{­q qqqcҴQ:ai6“`k`!uA`aż@q@ʇ{„S5_&ﲄПF3s͵N?SYFy 邉f@ H8W{H5WH`Yc|ag,@ BQр0Bf_q_qʷifz!` %.P&4odAA_qNNTWOWRKeH0PRf1RRI%SQ"it!̔=GQW'70U? 10a?ƕHD # =hj0>T h]]9  5AU@jZ?@͂T??@x?SF?P6 jt`  o?tk[>(A Vu aSuo.]&A]  *ԮJU2zGzt?@L#&A@bA#z7 5#1bO(b])j+$j&[ "*(w "?& ?AU$l/%B115 `?CopyrigTt (cu)Q02`09Q0uMI0cG0osA0IfO2@1rC0|1aO0iA0n.Q0 WAl0 G8s2Ueo0eG0v0d0"1E]4MŤ-&3 &7&l>]Uhz811U!J!u6?@+\.>djIIa? >"(EǑ I%K9E 5EU . K ;;5QIWEEE@ _KnSA?8@VBu?@DR> ?Nk"\6`ObmMM\j=am {:e 뇿P> #? D"? 5-gg?j47!B{oogrK57!ibDAbE9 MX7'0U~rN!R$a)5 5vIzHD # =hj0>T h]]9  qAU@;?@J5K?@xEL?6ѿP6 t`  bm&Qo?t~@@L]WI,#u auoL> @JU2zGzt?@L?&A@b}#zts q#1b(Wb)+&[9#H",ɺ (;H"?&L ?A$ /R%9#V1V15 `?CopyrigTt (c)02`090M0c0os}0f2|1r01a0i}0n.0 Al0 8s2e0e0v0d0"O1E]S4Mb-9#b3 b7I&l>]Uhzt1.AF!JN!n.%-+0rdjV1ax߃)(@K-&?@ mO"> ?Nk"PL[Majnha_:e 뇿P> #? BK=o? n-g?j4s!B{__Wr5s!D8E6V 6?wj_|_ ^XE.U6Vz T6?@ @ d_mOoT ݬ\VyWxRkrVo_bVh*g:n"'UaEjz36>C~N>?Bn]_zGvN7ViDAbE9 MX+G'0UPN!R4ae5 MaHD # =hj0>T h]]9  U@jpiH?@^?@$?@]F?P6 t`  !a&ao?t,F=۳=u;u auo>J2zGzwt?@ʐLAw@b#z1b](b!).+I.&[-m(?0& ?A$I0/Q%!!5 `?CopyrigTt (c)02`090M 0c 0os0f21r0@1a0i0n.0 Al[0 8s_2e30e 0vq0dS0T"!E]$M-# %'&l>]Uhz!1!J@% djc~YZ(_N9zOMGE%_EH$SwO"K ңHE ^&_8_WE_OqOHOOi&DAbPE9 MX7'0UUfb>!$a % f1jHD # Uh4>T#]]9 M5AU@jZ?@L|+?@?@({~?P6 JuM` ?u JYt )t7zEA2&L_D?. > >tJU2q K?=@MJ&A@-b9(+bG)T+T&J[#"#a?& ?AH?/`)#115 {`?CopyrigTt (c);020G09;0M30c10os+0f92*1r-0f1ai+0n.;0 Al0 18s2eY0e10v0dy0z"!4B#e-?3 7)& b 0K8>U# ]A]A "1!ъ!PzQ <%OYAJEU . K;;\%W%E@($L[.FX5Eai%?@ 6?wL;] W-\TB⤲8?W&SA?@ @ ?Nk"L'`bmMW_Ye[7 VK=F: [jr ? D"? X?ZvD?a@Boogr5;!QEؿx]SiLO MLXa~WOTZiAle59 XG"'0Ujc&N!$05 .HD # =hj0>T h]]9  IAU@K?@OgVb?@O?@X5?PJW?>t`  ^Z?t#ə.b?T@[ u?/CG<_Z+u u$ >  J JpU?43f<!.&P?b3bbfz@1bAe&bq$d 2zGzt.#@LT"&r(~&i$q)(+}'i"~*#81815 `?Co yrigTt (c)o02`09o0Mg0ce0os_0fm2^1r 1am0i_0n.o0 Al0 e8s2e0ee0v0d0"11E]54M-#D3 %D7.&lUh#7 V1#Ai!J# F/?B1B  VRiAAbE9 MX G{W'0U*R2N!I$a {VZUHPD  # #>h0JTpERI MU@u6?@ic{g&?@%r`5B?@n_F:Is?HNtQ`  II?t#\  _ii$"`kNu  iu%wj>M JQ T       "@&&JNU2"?H@Qs#N&A@b(bU)++&RNp}#>H1#19 ?#bbbz| R&/5R}#11`?Co0yrig\t (c)02h090M0c0os0f21r0Aa0i0n.0 Al!@ 8s%Be0e0v7@d@21a4t#-}#3 %746lJ#$JUp5 3 M19 1!q(!@oElrkRt# 8$lcD?@숒@)?@P?P"EkU |1\iOT/irvs}i>`_,of :m ,M ?? S&d2? *b?r4B!B oo1gr #5!>%(EQOOOO__@Ac1_C_ D4ևS]_o_zRX,{_,__ #R__ (YQoSo+o=osd {oEeoooooo@zUy{* hDV!VV(I X"c7I!pg bf'iMjSE t%XyG|'0UŢŞN!O4i |UGD # hz0TdYYBU@(%)?@xg^?@0߯H?@s7Uݹ?P} t` 87?t#_)TeΦu ]ukv  ̝L&UUU&I&3 U!!`?CopyrigPt _(c) 2\W09 M c os f"!r 1a i n}. Al00U (s42e0e uvF0d(0'HS$#-## 'I?k=#ǒ 2j$B##0U'BM3@&@F0Yk} #HA%DKb5DK3DK5DKED$!GiAZG5(|IDFX7yW'&Dŭ6!4YA yVZHD  # =hj4>T!]]>#AU@0߯H?@z)e??@˞9q?P6 t`z  ?5t# rXѮu)Nu;):O]ע0 &> # " "+"+"+"+" $+"T-J[U7 ??& bA@ 2b4-#2zGzt#+@ Li68!6 ?}; 72U!:B#AA5 5`?CopyrigTt(c)20F@9M2@c.0@os*@f8B)Ar,@eAa8@i*@n. Al@ 0HsBeX@ej0@v@dx@)21"DM-,;?C G& l>]UhzAE!A#!ְ1J`#@{&pz?@Uy{?NT,dKj&b&R&M/8T9K-q?x6@F?@dG0 >Nk"?YE*]\LT_hKjfGD&Qhj iU3UGe&Y_`?@/b%)h5oWKoWo;KTod}l$roҥ`nE2K?@ˌ%4G{?@ -q eZ̿R^{|\6?vodmP}l>MBe 9 N 1iS? 1?g]rl#51:O("4ga@Sq(UE^@O|ǐ[`Ρa g v2rA @3ijD&}l迲aqOz o?#WHZ'Qu=*[^N?@]%P?@O&@ c?@аM<n ?` eb0)\(Pfodm}nv}l?V5(?q$Ix[cN#W{#iA&QeE9 MX|''0UdN!0 /HD  # =hj0T h]]9 #]AU@}%?@R$,?@6#V?@c_3֡w?P6 u` ?u#t  3zM%t3ws%.<%H._'B> #PJU2N贁Nw[?@ʐL+&A@wbu](i+bk)%+&p%#4">9"<5!?& ?M#bbb͓z$ p&"/%T%#L1L15 w`?Co yrigTt (c)02`090M{0cy0oKss0f2r1r 1a0is0n.0 Al0 y8s2e0ey0v0d0"E1E(]I4M-%#X3 X7&l*>(UhE j1#$Ac!J`%#@5e$_F??!foA3nBM( V?@J-RHQ??@S!R?P| llHL6VLD5E+v;*AʙH: 8snP%$? k?Q? \?f4_!_t_Wr5_! R,U3OC@IL?@4~Kz?P)Oy޾ T!]]>#IAU@܋?@2Oa>?@#r`5B?@*ݷ?P6 t`  >B?\oEt# ŖoZu 0Nu;Y:|8>$# JpU>##<?2& ?5bbbz@b#ANi&bu$'h 2N贁N[2$@ LX"D&v(&m$ :;T'm"*B#d1d15 5`?Co yrigTt (c)020090M0c0os0f21r 1a0i0n.0 Al0 8s2e0e0v0d0"]1"a4M-/p3 p72&l>0>Uhh= 1#!eJ`#&@NTM\jh1B,RBM(!JoUy{O_ e@ So4(DF^Tw?@]֝ä ?Nk"?w_KT\T+\(Kj?ӷjmlhj -`\TPQE\Uu?i,St_o u~!XEO9= T?@L;uQ_Pm"rA @3sjQ1lğ= GoYniAlePE9 MX'w'0UrJ^N!M$|A vzHD  # =hj4>T!]]>#IAU@܋?@j?@2r`5B?@^ݷ?P6 t`  >B?\oEt# 6ΠZu 0Nu;Y~:#&}>0m8>  PJU2N贁Nwk?@ eL&A( 5bM(b[)h+h+$h&#[-M$#?& ?b$Z*'*B#O1O15 5`?CopyrigTt (c)02U0090M~0c|0osv0f2u1rx01a0iv0n}.0 Al0U |8s2e0e|05v0d0"H1P"L4M-/[3 [7&l>0>Uhh= m1#!S!J`#*&@PUy{_?NTMjC1gc RVMʣ(D#F ![?@"$. ?Nk"?+IKTLTcKjRlhj p= (*!)i,SO2Kes9T;QEGUFw6qX}__TM7fTZT\Dž}I_P^52O,ES"rA 0S# Es3iAlePE9 MX$Gxw'0UrJIN!4gA xvzHD  # =hj4>T!]]>#!AU@܋?@c_v?@#r`5B?@*ݷ?P6 t`  >B?\oEt# ap'EZu 0Nu;Y:|$># BJpU>#͙;? & ?5bbbz@}bAA& K&@ 2zGzt?"K@ L0"&N(bM)+TK/8Z*B<1<15 5`?CoyrigTt (c)s02009s0Mk0ci0osc0fq2b1r1aq0ic0n.s0 Al0 i8s2e0ei0v0d0b"5194M-t/H3 H7T &l>(>UhE=3Z1#E!J!@ENTMjN@1h(NoUy{OO e@ o)4(DF^Tw?@]֝ ?Nk_"?'_KD3\T+\(Kjjmlhj P\DiAlesI(=MXsG_'0Uzb6N-!%$0 sFEjHD  # =hj8>T! ]]>#!AU@܋?@*?@#r`5BW?@w?P6 t`  >B? \Et#տ|u l4RQu?]>I$>I# JpU>͙#<?& ?9bbbz@b#AE&bQ$$D 2zG[zt#@ L4"&R(^&I$Q)+]'I"^*Bh{.M#To1o15 9`?CoyrigTtk(c)k2009kM0c0os0f21r1a0i0n.k Al0 8s2e0e0v@d0f"h1l4M-:?{3 %{7&l>(>UhE= `51#I!J49fAn^ R)X f"B!Jui,SG?RSlOS u~!N]8-19= T?@L;uQ ?Nk"?YVTQRXQOnğ/=i n (\ wTiAaePE9 MXLg'0UbZiN!)$0 Lf`jUGD # hz4TYYT#BqU@",?@~?@$p?@|>׺?P} u` ?u#L+/8<L9`( .88L``t uPu3Pt۶m!tUH~#9#[0Up"?@/& 9#!!G*`?CopyrigPt(c)20 9M c os f"!r 1a i n. Al&0 (s*2e e v<0d0'"!$0#B-9#,# 'w#^ =9$&0#4 66 20$e#$^ #3 % 9#HLA0%XD`cKT%XD8cG g&daO^'*TcATcA0"-()Ca0% g2ATBK6SihaX5 &X~7aWK'o$ţ6!4 0 aVuZHD: # =h4>TB#]]9 TU@$p?@|>W?P6 u` ?Mu t  )At72(L>REJt2q?=@LA@-bb )+&񆤍[Z#?m& ?A/$)R!!5 {`?CopyrigTt:(c):20 09:M c. os f"!r *1ai n.: AlE0 (sI2e0ej v[0d=0>"@!$MBQ-?# 'm&l>0>U#3 C!*; #p _ &J`@7?g8oyƏ C?,'ߑK FMD(9IMBIGE%U\8!<A% fjHD: # =h4>TB#]]9 TU@$p?@|>??@?Pn6 u{` ?u th  )t*7)2(QLAEzJ2q?=@LA@yb #zt-b() (2+2&醍N>{??& ?A"b%$$>b7b{/b4/U% 1 15 {`?Copy_rigTt:(c):2U0N09:M:0c80os20f@211r40m1a@0i20n}.: Al0U 88s2e`0e805v0d0X"14Mi!E(-?3K 7&l> 0>U#3 C)1 #p U%J`@!g8oyƩ C2,'ߑB DVM^(9IMBIG3U%~CU[8 h:0T5 AB#MU@1 g?@4?@84C?@Kk^Ӻ?P t`  WwB?t#_L&,qۗ'0 .(='u uu1t6#I    ҩ""PE&&E&U2llf?43f@#&A@)b3bbfzt #Eb(Wb) ; 6#"w?_6C?/$ ;H,7#11+`?Copyright (c)@2t09@M@c@oKs @fB Ar @EAa@i @n.@ Al`@ HsdBe8@e@vv@dX@021M(=4#-#3 7_6( v `@K( vU `A#1(Og0x~u- h'#Q  "8!}Z_]02QEUCT/\?@Wh{vž\<ˉ\78u2/k٠' QQQ,WeI_[Xg_`zT'!68?@J[8j/OW\U M "RSDrV95Uۮ4R?@ rё\!l§\Wԫ{d8m k?NR܆?@ֿK #? }+OqZx~b_Xu5sZ OZ{d+i2o,u2r @3C QAoooooouYYG?@Pb|[p o _7~I|׿zT6Q3M7f?@Cs|h')`ρ?|ώ&a@-_4o0:02i@wE%tHXG'W0UN!z4 0N  UHLuD" # I>h ,J T  M#EMU@jZ?FJX??@HX?P m>uA` o?u#VlMJl -<lA -PK#TdK2(.K<FZnf>t  tt|>b!FQ~uKJ>U2zGz?@9 ?#>O&Aw@b(Wb)+&r" !$NI#"@#W"6 "\M"b)2;'r",r"/I#11G#`?CopyrigXt (c)020090M0c0os0f21r01a0i0n.0 Al@ 8sBe0e0v-@d@7"BMI#3 76@"!"t1"=ElJ Ul0@AAC 9 =QA(#UA!=(^!j9?@+P(0 # 4.!# քOCN6ULw-iN\u5e[EYzX3^GxQ1__ pPzXE6UU . O[ ;;d\@#Q}U6U@ _\fZ__"zXAjai%?@NT_N\^ W-d\ƿFvwzT97Tꤲ8?@)#?@@SAWp>@ ?Nk"R`QY5`ߛbmMd\?E_Wqy[7 9(  UDGzX2YGf?@2CL[N\AlTzXQ2YY a?^&O[?++d\g}UQ2Y?i`hCN\,@Иg*%:׉zXU6U?_z^_p^AA(G!EM __0[u?@ N\>4t>GCc-xQ@uIuc ?@lփ?@nc>s9ZQh |)Sf|Dҍu~4q3@uɕҔ@8S7^A?$My$'eCO9c]ziMqi~I@%!XRG~'0U6t5 Ff ~_H>Y;s ~HA10Hù:zF0\# ^JOB S i_~d,Դ o@+̶ o[sGUT F`: PHT kxT 5o0U /s1U v4U zH6U |F֛9U #;U 5?U ,V V V U6V N V ؚlV ~( V 7X_V T 9!UFD  h(^TYYBBUFL&d2?x<F BP(?P? t B66  TTTTTTdH?? Q?BZ@?"-3(\.E(#.sUk!i&/(  0B`PPublic,pr vUa.e0k0y0s0r01, og"a ,n.0tw*0r01y0 1m0t*0p *2r 0pP1!91Us0c*0n82c.!iz275 eA^| (SG& q ?%7I??? wpwp pqwqqqwqwq}pqqqpppw,wwwqqgawqp~wwww;yDrag onut hepe.Rih-c]lckaUdcos UPo etQeAvUwreP 5aabjZֿ~??cN׿ mF1??f&d23L4CUG DF P# h @T(PYY#E[UFL&d2?jZ?F~?]P} .B":J :^:rY:3 7 77 Au`?؍"66JJ^^*rrUu#J",':U2%&5/4f??z4b0u{[`:[1#2&6#"'V"^%Q_93 2 L1$" "3'@>>ÿ@qZx@mE?wDmF0r&BCZBu%@`-1ZBu&@ >CAB""C$9u`u `"[ bz!u`!Qy1y71`Vis_PRXY%cPm!#5P168B`?CopyUrPgPt0(P])020`90MPc`oPoIf bQrP6aa `iPn% 0AUlQ` hsUbePe`vg`d%R@y1&3db4&5! h8=/3z6&3d6P6 2&4B#/3+0U3r3]*g,^eqV5@cT<*G`o0m?`s `la>t&5 pISO`PdaTs"_BT /&20*V2QQ32DVB3Q1$125kb6x I7EC|ßgh`nQ*ƄQQA``b111;<balrqqAb1#8dsPXQ1`]+r` ]M?`nu-`aPtfarQiQ_` EqiPmg`n `xd>V5Q`N`]1yP%bd Nm0:30ȟڟZq!v P?`r'SHHZ]R`Dgb$a Be>\ЯoAPja}C`y75^GYo-%v Skbi?`Ta:eT̿o-vLPc?hϠk5.@RB$l s`iG`gpϢE%@`ϮqyRPorϱ|ߩZPm\@x px(+s&5ՈUm lparJ0TPpg`PKBg` Br"a;s;0S0LQmrPeExRg""tXqQrb2:`bv+mSJ)CQ`qsĂXC` qP@тuSbp?1PKg`yނn۠UN~w5bka.2` dHEIPNads`fc6H n $׮n~}sIH xJymQt=fpa ஑la1+fHZ㼁y +b P6a`a@lu MO`C(/F,Ÿb/ U+v3utP)Ɵ/H 9// ?Hq?(1tVEImx_dGF~?AvAvp BuJpGYB#uBGYpLJqsp(7Q7Q(xAF@>>ÿOoQ=ONRRCRIHށrA 7S>S"`AR(a1DceF"$?\[?RVRVR?R]?R">UDBb_ PPaMUZB[br@2\ zOz_sv(YqYq!!BO\ɅiqTqvxUzaO9ꂃSS{P_`u`(0y⢀@Bb 2a8RL7@(0aB{8Q|B{BQ|&Cz,@R2ivOOMoM()A)Awb8z[9 N6dՁ:w`` %c`P&!!i2<B)ANDTWORK(%H PD!R1@URIDS{QR<B<hB!i#??=ly|a'0U?Th]]9 MP /AUFjZ?F͂T??FSFظ?P6 .]A]   JuM` W? EKu4bJt1 ytuo[=Jb(A =>JU2zGzt?@dM#߀A@bM;#z1 /#\I(bW)d+)d&J["*(w"?& ?AO$Tf/%B115 `?CopyrigTt(c)2`09MC0c.A0os;0fI2:1r=0v1aI0i;0n. Al0 A8s2ei0ejA0v0d0 1Y4-X 3  7&C0b59 FX7}'0UB)Ź&!$aI 7FKJlJ]Uhz211O! !u6?Fh,\.>*0II=*t >J:@T&ȑ/ [WpY\QEUzU . X[:;QRuSU5UF,®\oZOnlQ 8MP@fu?FR >Nk"\T`bmMlQi_f0%=mUt.m {=4+ 뇥P> D"? 5-g?0tp@bTh]]9 MP qAUF;?F5K?FxEL?6?F6?Pn6 L>M  P$  #uM` ?=`cc.@uRJtO  bm&QȽtA@Lؽl]WI@[>JU2zGzt?@M/#J?&A@b}#zs q#b(b)R+&J[9#H",ɺ (H"?0& ?A$ /%B9#BV1V15 `?CopyrigTt (cu)02`090uM0c0os}0If2|1r01a0i}0n.0 WAl0 8s2Ue0e0v0d0'"O1YS40#Ť-9#b3 b7&0%0bE9 YFX+G'K0UB&!4Ia yFJlJ]Uhzt1.A!-(:9#t.%-+0JN H߃+%$Jq-&?F mO" >Nk"\RSWNh[H@[4I 뇥P> 'CK=o? n-?N]`@ b<couo]r5%BUV 6?woZÃ^ǿXEUV T6?F __ jv6dDlVyZgPRkro}oiBdalN!\z3ˏ6>CRHS X?BnDe>TGvN7VHD: # h0>Th]]9 MP JUFjpiH?F[^?F$?F]Fڻ?P6 >JuM` ?j uJt  (a&aEt9S;-FEN=ۯ=Eh?u;>2zGzt?@M|JA@a7b#z}b(b!).+.&J[-m(?& I?A$0/Q%B!!5 c`?CopyrigTt (c)02`090M 0c 0oKs0f21r0@1a0i0n.0 Al[0 8s_2e30e 0vq0dS0!(Y$-# '& 0b59 6uX7_'0UJBŃ&J!$a FJlJ]Uhz!1!劵:F% Y.  8?zY^J4 ]9$]$R9S/RJUE UH$S!\1\? GX5ZU_YQJU __-_?\HD H# Uh4>T#A]]9 M /AUFjZ?FL|+??F@{~?P6 m. >b  JuM` ?+ILKu8J1t5 }ty_7??J]bA>tJU2wq ?=)@MJ&A@\3(bA)N+)N&J[ "#?& C?A9/Z)*!!5 `?CopyrigTt(wc)20A09M-0c+0o%s%0f32$1r'0`1uai%0n._ Al{0 +8Us2eS0e+0v0 ds0!E$$5 -? 3  7&6iiePE9 6X7 "'0UBţ&!$0 IF]JlJ 8>U#9AA 1[!:Nk"\T`bmM\"f4N[7 zex-K=FAa [jr D"? WZvDS?4xa@R#Th]]9 M IAUFFK?F\gVb?FO?F$_5?PJwW?>$[ >  JuM` ? ;  )u*Jt'  ^Zmta{֙3.Jvt?@[_ u?vC >pJU43f<!.&P~?b3bbfz@bAe&bq$Jd 2zGzt&.#@M#T"I&r(~&i$q)Q+}'i"~*B#81815 `?Co yrigTt (c)o02`09o0Mg0ce0os_0fm2^1r 1am0i_0n.o0 Al0 e8s2e0ee0v0d011Y54#-#D3 D7.&%g0Ab59 ;FX G[G'0URB.&!I$a [FoJl&}>UhE3MV1Ai!(# jV? & `& T` V_RUHPD  # #>h0TpHeeJ EUFu6?F5c{g&?F{r`5B?FF:Iͷ?mj>  M   *  (Y JuM` ? Y ":N`u#xJtu IoI%t"' % 'ii%$'`OkŁJU2"?@Ms#J&A@"b(bU)++&RJp}#>1#19 ?#bbbz| &/5> }#11"`?Co0yrig\t (c)02h090M0c0o%s0f21r0Aua0i0n.0_ Al!@ 8Us%Be0e0v7@ d@k"1Ue4t#B-}#3 746t%0 jU FXyGG'0UR46!O4i FJlJ$jUp5 3 h19 1!q(4}#FXJt R j# 8bD?F@)?FvP؞?PEkU O|Ä\QnS ?.܁nts}ped`,of :o ,M ? &d2? bS?tP@RBoogr"55ZQUf_x____bY@?Ac_\jF4ևi$on&h{ǡ8oJo\opoo T)YAoaoooo o 6$|U7I[mS Uy{䷰7ho}V(f"(I%ZvShX?"cwӏ叭iďa$xUGD  3 h0TdYYB UF%)?Fxg^?F ߯H?Fw_7Uw?P}   f  f0 D fX lu` ?00DDXXll)ut  871%t%!?"Ư)1%:'ֿTeΦ1%T'ʤU!!O"`?CopyrigPt (c) 2\09 M c oKs f"!r 1a i n. Al00 (s42e0e vF0d(0"#S$#B-#l# '?R=#ǒ 2$Z##0U'B3@& ^5 DFX7dG'R&Dŭ6!4] dFxJeYk} #HFQ%RT][b5RT0][3RTD][5RTX][RTl]WHD:  H h4>T]]9 MAUF ߯H?Fԋz)e??F9q?P6  >IM (#<Pd`<#P#dJuM` ? #2FZnubJt E-tA!["XѮJV&bN$p'N]I֤>J)U5 J[-# ?6 ??" A@.2Sb64B#2zGzt3@M#Jc678C6.? ;B72C: #115 k"`?CopyrigTt^ (c])^ 20@@9^ uM,@c*@os$@If2B#Ar&@_Aa2@i$@n.^ WAlz@ *Hs~BUeR@e*@v@dr@"14ʺ#-/ C  G6&iieP59 &XG W'0UiRŴ6!#40 $ V4ZlJ]Uhz@A!1(`CFU&pz?FDUy{?,d@#b`sb`"J-d K-q?FFF?F F0 ?Nk"?GiE*]CpdadC0[dhZe@#eGDQ> @" i$a3n&Y+`~rrood]io;e s"yr1C82K?Fѻ4G{?F-qx>tFjR^{|ZlYZe tmP|{Mޥa 0 1iS? q1+?wa@rc(<;"̏A!rFÀ5H5aE=*[^N?F%P?F"P&@ c?F{MTh]]9 M]AUF}%?F[$,?F+#V?Fd_3֡w?P6 B>M @ $ JuM` ^?3YY.KuHJtE  L<ܩt.'_3zMҩ,wsQ>JU2N贁N[?@M#J+&Aw@b](]i+bk)+)&Jp%#4"?>"<5!?& ?M#bbbz$ Rp&"/%B%#L1L15 `?Co yrigTt (c)02`090M{0cy0oss0f2r1r 1a0is0n.0 Al0 y8s2e0ey0v0d0"E1YI4#b-%#X3 X7A&%{0bP59 OFX!GoG'0UBŔ&!$a oFJlJ(>UhoI j1h$Ac!(`%#F'$F?>D{Q>L!gfQQ R "Jg V?Fo-RbHQ??FT!R?P& lH\f6V\P5E+UDv_;*UmʙHQ:? w8s`*%$? .l?Q? \ς?D4h_!R otogr5%Q3p_SFhIL?F~Kz?P)Oy޾ F @c"rA b$^sPj+u`dl(?ET+QdAf u8<$HD:  H h4>T]]9 MIAUF ܋?FOa>?Fq`5B?F1*ݷ?P6 m8> (JuM` ?#SS2)uBJt?  \Eto֙>_B|KR>JU5 Jp'>͙#<?Z& ?bbbz@b#;A&b$B 2N/N[Z$@MI #"&(&D$ :;'"*#d1d15 `?Co; yrigTt (c)02U0090M0c0os0f21r; 1a0i0n}.0 Al0U 8s2e0e05v0d0"]1a4 #-,/p3 p7Z&'&iieU59 '&X9GG'K0UBZ&!u$K0 FJlJAUhhG1! (T9CFJ> kRbT$R}R( "J:]Uy{䷧_8S s@ o4%BTV.Tw?F:^֝ä >Nk"?YUU!f>)?\(K8ӷjmKl? a\$QU5Ui,SlQ%iu~!8hE_= T?F;uQuo`"rA 4sjQ޽lğ=onHD:  H h4>T]]9 MIAUF ܋?F<j?Fq`5B?F^ݷ?P6 m8>M (JuM` ?#SS2)uBJt?  \Et[Πؙ>_B?&}>J)>JU5 J#2N迴Nk?S@M #JC&AP bu(b)J++&B1[- #L#?&| ?$H*'#O1O15 `?CopyrigTt (c)020090M~0c|0osv0f2u1rx01a0iv0n.0 Al0 |8s2e0e|0v0d0"H1L4 #-/[3 %[7&'&iie@59 '&X|$GrG'0UBi&!40 rFJlJAUhh7m1!{!Պ (Z$CFFUy{?> 81cKR R)V "J{TV ![?F=$. >Nk_"?YPSW>OcK8RKl? ף_p= $B:]'i,S\6aYss9$Q@5UV+6o'lQ:ifOdD`i[Dž}soąo82{_U{"rA z$s"HD:  H h4>T]]9 M!AUF ܋?F@_v?Fq`5B?F1*ݷ?P6 m$> JuM` ??Su.Jt+  \Epteap'Eqz>Bq|7>5 Jp>G͙;?2& ?bbbz@bAi&s&Bh 2zGztM?+"@MrX"&v(bu)+s/8*T<1<15 `?Co yrigTt (c)s02009s0Mk0ci0oKsc0fq2b1r 1aq0ic0n.s0 Al0 i8s2e0ei0v0d051P94-H3 H72&iie-59 XG_G'0UB2&M$0 _FsJlJ( >Uh_I `Z1m!afR* zRQd J:`]Uy{w_$S s@ +o4B`TgV.Tw?F:^֝ >Nk"?zYQYV*)\(7$djm7lb+d ha\HD:  H h8>T ]]9 M!AUF ܋?F*?Fq`5Bǯ?F1?P6 $6>  JuzM`l? C"u2Jt/  \Etti|u~>Bu뒣;>RJU5 Jp>͙#<?6& ?bbbz@b#Am&by$Bl 2zGzt6#I@M\"&Rz(&q$y)+'q"*h,#:3o1o15 `?Co yrigTtk(c)k2009kM0c0os0f21r 1a0i0n.k Al0 8s2e0e0v@d0h1l4e-:?{3 {7I6&&i bP159 &XDGG'0UBX6&Q$0 FRJlJ(>UhI 1lq!a4DC9Nk"?CYQYf.Q;(hğ=;Af/h (\UGD # hz4TYYP# 9UFs2?FM&d2?F$p?F|>׺?P} 5   & >hRQ p  u` W?   *<J>R\npUu# t  mom%ty!V"!%' %'ؤUC~#30U82?҃@&T3q1q1:"`?Copy_rigPt (c) 2U009 M0c0os0f21rԚ01a0i0n}. Al0U 8s2e0e05v@d0"j1En4#-d3}3 }7Q?3^=4~6#DkFkF gB$gdaO&7*THT"(Pa%/BQ  2iRcE U6rXFGE'74ikF!dD0 EZeYk} 3 QYg%kd>vk5kd#avk3kdpvkEkdvk=Ekdvkkd vgHD H# Uh4>T#A]]9 MUAUF‰IJ?F#-c?F?FFt=?P6 &> A](# D<# P# dd# xJuM` ? #2FZnuJt  }7Fi8&t-!G"R7NA9%UB'DM"9%\'+d>tJU2wq ?=@M#&A@q"b(bJ);6J鏵#"wJ3?]6 ?A<#0Ɖ3z #V#bA38/9*#115 W"`?CopyrigTt (c)#@2U0/@9#@M@c@os@f!BAr@NAa i@n}.#@ Ali@U HsmBeA@e@5v@da@"1E4#(#5 -O3K 7]6Fi@ieE9 FXG"'0UR]6!x40 7VKZl:JdTU#& uUT ]9a x 1 A*# 2&#p +5(Z#FF<g60Q0,#0Q4"1 Rf"J! {-?F'<0c 0TYǮixxLpH$a5e r&7?FQh|a-|,CxEe}$X%lE?FV}w{|C|80}-|?"l!eEeaM?FO|7#Nqi}=ԡ$dduC_?F:s?FQ+7?FO9 uPu~0y jBqi/3-u_2O<-u,#$6v›l* ," z3H[o'?F hnd?FCsM>-|6`؄Ì~[U0!܉f;-aaiq x?F#^|P-|hP`]H<dp&B?FŜgN,5ҫi~l/WG~Ɖ|] ܉NQ*TMW.DPF)?Fa$IKbүnexC؆˄ILMƯدaWe=j$,$,hx?F3%?F+S 雖dNɛ˄@&fUٌ[?承uW[w?FqoW?Fsg:o?F)3⿗1܊5#AKƉsω5`R!Oǟ1a:dj{+ hʲdde5a:f?Foj&GCx9e2oeWcrA 3"5a5PeHD: H h0>Th]]9 MIAUFe?FM᜕?FXWF-0?FO?PDJW?A$> n JuM{` ?e ;  u*t'  Oț2mta{ԿHM3.Jv??v*Ij/I>JU2N贁NKk?<@M#J&A@bI(bW)U+U+TU&J#!!5 `?CopyrigTt (c) 2`09 M c os f"!r 1a i n. Al.0 (s22e0e vD0d&0!3]$#y#&#4#9PO"Pd+T/u/%-#,# '6% b}59 DFXGdG'0UBţ6!4a dFxJl(,&}>UhETh]]9 M9MUFJsx?F6?FI?FnjrL?P9V> $ 8 L ` t JuM` ?.BVj~SuJt  5hޅ%ty!"_:~څ%''"%'Bs>JU2N贁Nk?<@M#J6A@"b98bG9JE;E;E6J3115 "`?CopyrigTt (c)02`090M0c0oKs0f21r0Aa0i0n.0 Al@ 8s"Be0e0v4@d@"13H]4#y36#|D3I?A?2T;D?e?5-33 7F%0AbmE9 4VXWTW'0URRœF!DaR TVhZlJTUh6UT]$8]tA1 QJVb(T3RT0bLx"fA RSf"J:mGӎ!=Мoc ]rte$a31sE?Fd0!@c +HjẏvR%amE@uRU?F.$wX|'1Tn|pA-x@uӆN?FFoX|1Rqyh -x@u`CB2OX|Ә {n|O$ dCe?F_';?F4Hy ?F ! uPu@[y!Xn|kb_6nu(nux#vi8˿IQv x" YꥬM$5zG?F}Bh?u?FoyD?FYό Ə،m\ҋ:n|XAB0B5R+?F;(h?F?F;S4، nB6{n|Jqe!{q o.2ZQI{]>F˻w&t 1t?FOiL،?ö}>{} SS~DxĶȯ]z#0bb?Fwϯ،BZc4s  x̶.jlT1|t[mi 0?F_?Ft ҿ t߇R?ɨV=7Th]]9 MIAUF9PR1?Fq`?F]Pġ?F_Ӱ?PDJW?A$> n JuM{` ?e ;  Su*Jt'  Oځmta{_ t-3.JvĬN %e?7Ùf?vGOw{jyƤ>JU2N贁Nk?<@M#J&A@bI(bUW)U+U+U&J#!!5 `?CopyrigTt (c) ]2`09 M ]c os f"R!r 1a i n. AUl.0 (s22e0e vD0d&0@!3]$# y#&#4#J9PO"d+(T/u/%-## '6% g}59 DFXGdG'K0UBţ6!4Ia dFxJl,&}>UhETh]]9 MJUFâ[w?FVFc-??Fx?P9v>JuM` ? )uJt  81QEt9Siޯ2ENEh^ڻ IH^>2N贁Nk?<@MJA@cbbU)++&Jy;?u)?AbP)/%/F%B!!5 c`?CopY rigTt (c)(02`09(0M 0c0os0f&21r0S1a&0i0n.(0 Aln0 8sr2eF0e0v0df0!Y$b-# 'Ax& 0bP59 6X7G'0U]BŔx&!$a $F(JlJ]Uhz@11!ZF(?~O攚   8P[&~ 6RcVJ:)Hpv ?#V Th]]9 MMUF"q?F땘n?Fq0>?F_A!w?P9@7>M  $ 8 L ` t      8L`tJuM` ?""."B"V"j"~""pu)Jt!  1X{%5t132m_%5.7RjX6H7!>JU2N贁Nk?<@M3J6A@C2b8b9J;;6J=y3;?3UI?A2b9?O&EB3AA5 C2`?Cop9@rigTt _(c)P2`W09PMPc@os@fRAr@3QaPi@n}.P AlNPU HsRRe&Pe@5vdPdFP2APYD3-3,C GXF5PbU9 VXWW'0U=bXF!sDa Vjl:J Uh^maa]`t AQ֍A8Z3FWY?F9#}"# C22@fCsa Ur)[v2JAURo?FN?F=!?F G< 6uPuyA/ybMv Js 9o#"u#|u32$N 4 ?A~0 f? h#|S)?"2@Ts(<2"41rÀ5&Eyq3u0,Ǥ?Fs^?Fq?FS|xX1y6>|>E<ti!Z$@nqcN#`qBsq- dM?F"7z@Er4s0{욹nZ|q9a!i[+|ҼN#`V}188| m vqt(?&4yq`y:#Z?Fvd@^=PiY$|n׽\|6 ^yEb$ ^S ݿN#`"jtyVp`iK?F3H?FڛY ʜ|B6yI^a};I| Yb[soq|U?n \*O#`7I~Fmp1BB{?F>v?F 6Qqy hq|o]^})f~^K!OM3`Xjy%gzp jh?F t?Fŷ 9X|9} |)x?pM` #e2e:~Cra D)2HD: H h0>Th]]9 M9MUF$sж?Fp b?F4tV ?F_<?P96>M  $83L 3]`3t3 JuM` ?3.BVj~)uJt  DYU%ty!"8Ӆ%'fY*%';ƐnƤ>JU2N贁Nk?<@M#J6A@"b98bUG9E;E;E6Jy3;?39?A?2bPG9D?e?5B31A1A5 "`?Cop0rigTt (c)h@2`09h@M`@c^@osX@ffBWArZ@Aaf@iX@n.h@ Al@ ^HsBe@e^@v@d@"*AY.D#b-3=C =GA6%`@bPE9 4VXWTW'0URŔ6!4a TTVhZlJ <>Uh$]T]tOA Q1(`3F ?FiXe& 0On?Tڿx#EA_0_;Jb"JaFK`E?F /?F߱)\ uPu1ad0kvX'qeDz`Ֆex#P_v^ܿ4 Sm?T0 -? YzO?x"pp1@c](Qv:y!r55a3id{ Y,?F\?F O&&`VW:E3 xui?k0M5eigJA|@ W|wv0rIQv v uB@#aE^0:O o?F^k#?FTL?FY.}*\/~kU?F#]ҝZ?G3´<]lѤqA|? yFז_t&o˿Wi}id3.aДgSh?FDR<<ddT0~B{*!ZyqH_C,qÿ́w?F?F}SR?FD?ƙ!FsU.RM .fMA.zM +M++M.++M."M+"M+."M+B" M / 9Z#j"M M / 9#"Q& / 9#"U@#"a#"y&/9# 2Q&/M#"322y&M='y&x(-63"-6(-6uM`? A7242H2\2p22222222,2$,28,2L,6d(6x(6(6(6(6(F(%F(68686,86,3B?T?f?x???>u#9|J2b&baFU*JRu0;t1 b.!ta  zKdS'& 0JU2zGz?@dMc vA@b\`;xbIyV{YVv,r !:tJsrcrv rArb"Iy{Fw,r|,rMNsTTs`?Copyright%(c])%209%uMcos{Ifzr}ai{n.% WAlр sՂUeevdɀ+bMs` `vba3Wbtbiv eqX z/'0UÒv.0 zlJvUt@S +HW!!1<!.!@B!oe#Aqhq}?FTOp2c r\2"02c JWidxȯ(U?F:IВ?F -e젌"v?PN,DT! uPuqMk 6̍B!23ץE c#)!:2 Fbk  1d2b4?Q& Bbҿr45u)3ߤi]~젙K.LL̖bTJIr^F_ l ^c ׳fif|F~1r!o` GR["hJY@]+ݾ?F6vb"BFϤ %7ɝh|ֻL~|OߔhcAXT4B;CS;YBUߤ_qt|ȝ\ {~TqUAdɼx1ֶ?F|DB젓 Sb\.B| JLǝrZ~HĢg^ޥFg?F$@Į﫶 ,3JaE̲Sι?x<#̿- B 俋}p4<'xld~1<ܬCs?FicuU FЪ֌?< 1TZ pq~Y,ͲJOuOOadO"o4orVbP}q1EʄOCZ d:d?FQC;sl_<>Д]xeW. buDSwV*ɟjoko}o(ZoUgrrPʃL?Fse9mpi1@ps?C>O"Y+Aq[*2l>M_Xp#:,?F[`XK ^$ǽhXvFLzN?F-\Z?FM+ ")?P@Μ<^'k<\ 4.iyq1Kqkjцpqx"pȯ(UP:IВMk=6̍Bᦺi(֋P:mk?FJNHP?PN,.y1",+~]s\߂~]5zn5]?]ȻEtAF5G u$6GaMș<ѶgQ>E% Jצ#{~[:ĂKǯsԪhZ*ߝVU4db62GA,ϹJLk81-s߭f>T (ז0g1!e\%zynA,=͒}s c 5P=D mOAؿvP $kL`C@Y?F,ɴZ`2a]o*"+H) $WV= ً5Od.t@//GB"I0}!!v!M ~+ ت!fLZf-?FOgE>k^:Ўmz@ܐE! cykA$i/ Ok/5BOkFBKHgJ&?كd v:Ì?JO\O1/6O1_C_y%tNw&q00[^cAD[F=k"l4 x QP*% T|0IWj&?F?0ۃ4_#BDM[ٺF1يC nf1Wob!x///oHokJ?~CvY_0 ?Fy]Gp0V~YPM'aܿuiTױ ?pNo=W/_$OpG/_قuO> >F6q0>XY'O?/?A6ُ}Deٱ 5?u_aX݆ϟs+?QcuAuu?FH_*|>Pe hV8?FeHN55/Ԡlփ~+'vc>`h?Nk"\T`bmMPyG&_Sf \l>{f`lER ͮF9S*#s B}PBA9!F0# `XOB UV ~d`Դ w6@+_XѶ 8Ms}GV |fjW lW XoW HqW ߆xsW guW C@PyW ߃zW h~W &,X &X $X p-]X AX (3Z X X NX %8X  X OZ&X =9V &UFDfP h>$KT 6D UF~@x! <`>!kbs{kbsi^% X\/7'TFJ!DbA[ qʕHDB # =hj8>T ]]9 #2AUFM&d2?]?Q6 ~A@9 hpAW(4?u`l?Xuhib  #AEJU `0.UH :l$'!f$5 #!`?CopyrigTtk(c])k20 9kuM c os If"!r !a i n.k WAl (s"Ue e v d $\#2q+0?*?<6M?\.?lp*E&B=#Hn&M4?6PMM4-,(,m# m'6##0OoNk3@*6'l6 ?Uhh#3 t!,$ BWaBE@BJ!>BdCRQ, 9VM (DAAO?_SSUR%c^|__WSU5cU ___C2O;rAc#p (e jO(heoiAb59 7,X\,'w'<4N%!4!m; v$zHD # =hj8>T! ]]9 T #iAUFL&d2ٿ} ??FP!3|>2u `u bu  @[A-0' - luu`l?ubC Ae"& J0G6G'0<p'"R('%.&' '!$ +/ "&"7/"',? U`0 h:HM %B7145 11`?CopyrigTtk(c)k20-@9kM@c@oKs@fBAr@LAa@i@n.k Alg@ HskBe?@e@v}@d_@4="36MD?F-P?M-83 7ԿF#30O贁NkC @Lg@?!Uh35 A"@ aRKaSPJOQsb2t8UoAo,giAreE9 6X7g'2Uq`^!D1 5 fj_(Hgyr Ds{ 9O5LF70(#H! *B 7K +a״ o@}+8ٶ o4aG75Z oiP(=Z? I1H;Z ?4PUFD  h,^TYYBBUF\.?x<F BP(c?P?tO @L^^{;B_@L&d2?sU!&"/ȍH$б?W/i&? ~t  0 ;QB`#Com ,link\netw r*"p0r pP1]a ,d0v c0 /1^ ; YP!3u &?  ;;" ;;;gr330;G;#7Drag onut heqp ad likb tw e cmGuiCaisd vOe]sBKcnTe)*`i]^o 9il^)n5k].b\.BP(?? M7Pfo??H D #  B>T>':)DNYh>) e0T9 EM dAU@\.?@?@?BP(?P6 .2A7 7P u` ?8=Q Keuoasu`u`bu`u -##@"L( (0!>zLU<?&(?bAR%g#2q l?AG}F]#qa0 c{`Con0eoct 0r 1l2z0 A0 +` Li2?05U"g# ~#"9 `.4 u{0 `:q1276' x:^%g#@KF$=mG<hB<wOEAGMOQ8u?^"f 4 #G<H7g#AwQ17;1w`VA0s_ RXYJchm!#e5P4PQ`? 1py0igPt (0)P209PMA0c0oPofR 1rP1a0i 2.P ARK0lPWs)bePe0v0dJU" Ip`@#neϥ?xdnf'#rK"c'#f `/2 k@ebum 7@w!;FPSeapb po18 `Qps-8R_t-0`R%J1^#B TU7^%\f*?v~!)?LEp &L,5bK`&! CT<0oPm`s0@a> m D/^""QQl2 2B33B11B56hR6C 7$PMC]E(j )w*:B^%]wQ1{U_ B@#"w` M`nu`a2u9bP1vpuqMc;Ca𡟳œTr`PrRdؐ1 Nؐ!m0k6['Zœ!̓ P`rRdEkœR`GD;bQiPeկ3EĜAP>aůYU_hœ%̓ S?bi`(aѿhUz1h B@̓L 0chAWej|ώUBؐiK0dA2gDMMœ̓ R 0oEAYCUœ`Α Apm0n0}Rfz\SP)aPF1lsQ˒W` ;iC`i0y,B@ ̓ ITPOP` 3c/@śSؐbP` 1mP e B1U"%Ѝ# A77^"[(qpPU]=`% &ROái;bBA`NEWOPKPSA ETPQOkRIWS1PT#ѿq;A11^"!! mBz3BB@9191A??´[(3F&p`g1q1 !0 2`T#;4l5cTIt,@r^";E3EU$hU9_D&zFBECF3s͵NSY";ų`pv*HA`bTfo?wQ&bu0HybDr)Ixbkfz`RQz:c exhGFRbBb<\ARbBg"ұeT2b @@-,eu LbrZSt?A////3s8BP(?N#F@<M?Orb:B$Ix\iĺAqG(.f\gXtFZ'0ye] IV]Zq 8BU~\2C }'qbšg q9#3^sGE[Tb%@tbSAc2} 2FA ]bF" JZd~Eq`axe@:F.qTba'g UD?@ ?`0o:C)aY3pra0]! \2rBa(a 3G2T3rA 0ex9dvn]GWrC jae%ah4F?@ p8Gp  Sfeb]oobs5{HZ,+s 5)d5ukDnmF08q#>K 9B <:aԴ o@+X/ o=aX?Z ; PUFD  h(^TYYBBUF~?x<F BP(?P? ` B 66 6 66^I6ȅH??A>IB@L&ɯd2?-(?\.1(. sUW!U&w/J G 0B`ETerminal,s"v",c i nt wo k t t o , !1i 01, et 05, qu p 5h rd0]1e!i1A^| (]G|& n?_ Q4FPR? pww {pv w wxwږ#wpwp~pw~? ~' ~~  wwpwl+w~wWv-{wywDrag onut hepe.Rih-c]lckaUdcos UPo etQeAvUwreP 5aab~߿?)j࿚i?3@ ?DUG DF P# h @T(PYY#EUF~K? P} 7K.B! : DTDh1YD| A3 AA3AA3AA&u` ? "6@@TThh||j&&"u#L&)R2%X<X7eqU ??' 4Wb0{[W`:#2P6Q3A"'򉄉Q9 ^ L1$K" H7"G' @>>ÿ@q@E?DF0܇rzBCCB-uI@`-1"Bu$ @]GbC$ Q`'(R7"A& <4' &)9u`u `"[ bz'$u`R425 ؓ3Q171`Vis_PRXYcPm!#58`0o68B`?CopWyr`gPt0(.`)02<`0U90M`cU`o `'of]bNarQ`aUa]`iO`n 0WAl` UhsbUe `eU`v`dR13d45*8=363@2t66 24B#30Ur3]w,A$T!^@cT<`o0m`s]`a>gdPa!gRsAA2hhB)p3+}RqqRqq281pacR2+6uM\$ %G11[2 P@ЯRECT'$@ b23;.UM5s_4/&ׄF3s͵NSYFU00UQ#,!@Q#J`epE>Ҡj_7Nam uBtS` SB~&_o<м_oB=Eoo/.c` WI4q*ՄVoC`C0N`҃?w`M`mI2JmhO@E!4ay>",>jeрϿ55 ©ltgИa ` %_0&ȳa5`NTW_ORK3bHPIPBQRБR5ISЀQ<) i$ eu؊6ŔX·q-/'0UwŻѡ ˈ74hH臘h5: '4ANѤ%Nb͡"44?4A?4'?e4[?4N?]4?4?Y4"4?9%4?%4ɥܣUHLuD" # ;3h( TEJ eUF~K? P VN% @*&>uA` ^?Ci i4Fu#Xp^Tb>tU b7t>U2zGz?@9 #>/&A@qa(bo)e|+|&R" !I`$>)##" #7"& "g"bo);l'R",R"/N )#z1z1'#`?CopyrigXt dc)020090M0c0os0f21r01a0i0n.0 Al0 8s2e0e0v @d0"i f  %!X)#]'0UB&T50 PJl>8fUlYC !(#pAg!(:)#FM&d2?qFGN tQ!P"(>!p8?ST0gYfRaNrTaU3^\.,__ 31bXEU[Vj?F@  l(\\ rX HYF `0QYp= ףXo`?̺oZqapXUL_^_p_\A*# ?Fg?l6 l\1"X%Du~ |!YRQi3a\ayo7;mDucP]{1cr|%iU{^?F^ lʡE\1_Zd럄ht X01?FXE#?FFb?PƚK G2G2 lû c_tFaXNTN]\R-\^Dr7 јٜ ZLT4&1pBHZlr À5%8yU`0 ST*J! o`rcݘEF3?FAM)ỷ?PMޟ+=){ )LFj?cl~Hӿ忷ǨHD: # h4>T]]9 M qAUFU*JR?FKRT*M&d2?Fp_8w?P6 L]A]M ($JuM` ?Agg2DSuVJtS  tq= ףUp$_>JU2zG_z?=@M3#JC&Aw@b#zw u#b( (+T&J[=# "?0& ?A$ /%B=#BZ1Z15 `?CopyrigTt (c])020090uM0c0os0If21r01a0i0n.0 WAl0 8s2Ue0e0v0d0+"S1W44#-=#f3 f7&4%/O]FiieP E9 ]FXsGL"'0UB&!4%0 FJlJQ h@$Gx12A!1(Z=#FF!0JR Lx{פ_Q\  R)V4"JVT?F]Q!?P-M G2G2\PRSWRc^_Lr___:M -LdTd&!&Ra@SBЯoogr5%Bb#iOD"ZoZD?B!)$a EeVHc^?F7zT?PxT8QoWmilvd)TJEvސl5 gvtmFd(:w.|&E2_U"rA k#ރ+"HD: # h4>T]]9 M 5AUFOt:?Fr\.?F#H$?F|>?P6 e.#A#b  JuM` ?+ILKu8Jt5  r7htyw/ؤA>[JU"?& ?AbA@D"bL$J#2zGz?"S@MJy&M#b#zO M(bL)Y&S,D"Y*B #115 `?CopyrigTt(c])20a09uMM0cK0osE0IfS2D1rG01aS0iE0n. WAl0 K8s2Ues0eK0v0d014-#*3 *7&?!FiieP59 !FX7G'0UB&!9$%0 iF}JlJA h@7<11D!Z#F&*JR?F9.4mL AxY S+RJ:rTDa@ RS .S X%QEU~[r\wY aU5UFGh4\Q iʧEQ8y^Ffj?F~@W?PbʒK G2G2@ D"]rA R#s c4v(O A.x$&AR q|4xa@S#Bsdvwrb5^%HD: # h#4>T#3 AMT JUFx<?Ft:N?F~?F7ͯfл?P6  >JuM` ?juJt It=WGWzHRIlQI#>[? ?AbA@"b$J2z6v?@MJ=&#fb{#z (b)&,"*B!!5 g`?Copy_rigTtZ(c)Z2U0%09ZM0c0os 0f21r 0D1a0i 0n}.Z Al_0U 8sc2e70e05vu0dW0! $-# ',?6iie59 6uX7_'0UvBZ!0 -F"AJlJAh711!TصTh]]9 MP qAUFbX,?F3L&?Ffl6?FWjZ?P6 L6 >M   $ m JuM{` ?=cc.@uRJtO  \(\tu_VQӽmI[>JU s#<@!A?V% ?zAJbV @bbbz #|"b#$|"bĄ$J 2zGz?<)@M/#J&B(&$%|";T'|"*B9#t1t15 `?CopyrigTt (c)02`090M0c0os0f21r01a0i0n.0 U l0 8s2e0e0v@d0'"m1Yq40#b-9#3 7A#-(0bP#E9 wFXIGG'0UBŔnF!q$a TFJlJ8>UhS2Q2Q]1LA|!-(`9#FBP(?FMx HNټOqɠ[Hl[yQ RV0"JU}e\?FAϡ`2If9?PaEQosqvyifdY0_rޖlͅhn4g_oeBdy$w4|!Z$ܿ__  %$?qyU2_U|#rA 4'"HD H# ih#4>TA#3 AM 5AUFbX,?F3L&?Ffl6?FW_jZw?P6 . > JuM` ? IKSu8Jt5  \(\tyuVQӅmݤA>JU2zGz;?@MJ&A@bE#z; 9#bS(ba)n+n&J[#";"?&L ?AY$p/R%#115 `?CopyrigTt (c)U020a09U0MM0cK0osE0fS2D1rG01aS0iE0n.U0 Al0 K8s2es0eK0v0d01E4D#5 Y-?*3 *7&FiieE9 FXG'0UBŴ&!$0 iF}JlJ(>UhiI <1Y!aRY4n Rx_ J:j]_`0 Ɓ_.S F) 8qViH$D?F@\.?Pַ  G2GC2\QYV4GK`mA.x ΤAR Ԡ3l4xa@ R#Bsoog]rb5% 8 bHD: # h4>T]]9 M 5AUFQ( B?F].r?Fx?F`0?P6 m. >b JuM` ?+ILKu8Jt5  ty(\{GzAt>tJU2U"?=@MJ&Aw@b9(WbG)T+T&JJ[#";'?& ?A?/`)UB#115 `?CopyrigTt (c);020G09;0M30c.10os+0f92*1r-0f1ai+0n.;0 Al0 18s2eY0ej10v0dy0@!4b-#3 7Y&?Fiie59 FjXG"^'0#ũ&Z!$0 OFcJlJ8>Uh9AA x"1DJau'Z#FFp8 J4T .xDD3AQ]M RVJ:n] ̅_Zr==B&d2?tW~ɲ`?FH!{?PZss G2G2 .S =;;RSW4O+EA.xSfϽ؍AR X]ded4Jxa@S#Bsoogrb5w%Q5U}H$D\\~?BBQ5\~CftZ a axUv~lk&lqU2n_U"rA 5>#pHD: # h4>T]]9 M JUFp8?F}>??FbX,?P6 >JuM` W?SuJt  f\It=W|?5^UIRIlI +#>5 ՆJ[;? & ?AbA@N0"b8$Be!e!5 g`?CopyrigTt (c) 20 9 M c oKs f"!r !a i n. Al (s"e e v d ^!b$-,q# q' &2zGz?@MJ69#nfb3z; .9(b8)E&?, 0"E*iie59 X:7'0UvB &!%$0 -FAJlJAh/7!0!eZF43, E <ᢋ/.# M OR|VJ:VjUS |bPReS0QEUxx#cHD H# ih#4>TA#3 AM JUFVj?F\.?F`0 Pn6 >JuM` ?uJt  3\ߦIt=WIRfIl$#>5 J[w? & ?AbA;@0"b8$񩣑e!e!5 g`?CopyrigTt (c) W20 9 M ]c os f"R!r !a i n. AUl (s"e e v d @^!Eb$-q# q' &2zGz?S@MJ69#fb3z; 9(b8)(E&?,0"E*iieE9 X:7'K0UvB &!%$K0 -FAJlJAh/7!T#cS( bUGD  3 h0TdYYB IUF}>?F&G/?Fgl6?FB\_z17=w?P} 8f u` ?0)u:t7 ? = ףptDʑ_Gzޑ?"L쇪iU!!`?CopyrigPt (c)J 2\09J MB c@ os: fH"9!r< u!aH i: n.J Al @(s"eh e@ v d  #S$#%B- ## '?;= # &#24 6 6 2$# #0U2 3@&B A^#5 6X'7'4Ŕ 6!4] 6:e6^#[EBM #HA%DGUHuDh" # R#U U  >]0T]]JUFgl6?F &p&??F{jG,"7?P  uM` ?j`fpu#zAtw tnٳ \b "OMGt2N贿Nk?@MM%&A2 bW(c+c+c& !!`?Copyrigt(c)209M c o%s f"!r 1ua i n._ Al-0 (Us12e0e vC0 d%0 "!3]$#Ry&#4?6QC?]"W(c+Q;'-,# '6% h|5 CFXGcG'K0UBŢ6!4I cFwJlU!#A(:F<3)0 vc p/y $(4!Xv݃`?FZyE}0pS a۫YQ\I`X3Uk?FQYC~\f"qoY|5UD/5?Fԧ߾ \F\bJFXk_}__\A#O_"_4_F_X_Vh#?F=]W\>ۆ\34=(_V[e¯?F'e*m#R)\b@QoWZʵ?F,ށmL \$6ԡybm&>e?FDȆY\.Iſ\)ozEU 2DVhVၱ?F}!\\wzVxtD?FYY4F-~Ȩ\(p|y m! ?FߝBF7+*y\= ڙbm&R{?FbZd\Aa\?ulď̟EY.@Rdv m5[?FRⳭ8HJ|\ HN۟VHD2?FߦM\\A>x2V8`?Fitͳ}#E\I&)|5e?:Xa"7["mSpd㯵ǿٿBeJ\n~.?oJ4=|nn)`]Cc֦Š]]7PPE5߁m:2\y_1W3E?F#Bd\eÉbm^9^?FV;.O!thV\9MύAa\n߀ߒߤ߶߻VV ZKX=\ .c|~k綖d]!:`="K\lJ(i0(dV;amЏ9ta*m \ǀ_?bmcO"QGm= bY~\r6y@ύ# AuYKtxoa{6\/ 4)V w!>?F ߄*"̕0ϓpD!K>?FWP! -ߟ:{Qb?D)bmh4(?F|hS ߵ.f-R[Ϡ ύ //-/?* Y7Q%i?F_s"'a E/`'r?Fs7"̍^sÁm;]֜/O?F/RE"ןh~pMM/㿈E8:͉@;$Pe"̪ (M۽^mJ?O.O@ORO ?????O%?kZ@ I~}P"P7|Msk aO=˩ ?Fa7 >fɾw2θOgl6'"*’ ܏{7_(4?F8W!"̿z2m^Gm}>IύAoSoeowj ____o#o\+w(@ `&"-JO}=O^f~oyV 6?F9~"̓ ;M\Oo~ahFp@o!"?ݟ$Eߡ?~%^]"Q>IۢwFX"̰4txڑ3:\Qcu '9لndu%?F<2}G"^̄ZϭSԓD-?F=ߓC.k0w]<<7 Qv@b" "R؝f2]bwƈWРJk "̇1%~]F@m 1CUb^fĽ w \,X|4]-HuvziP=?F ##BOP9]ǕDG(t{GeY zKgPfO2~]9+T~]~;?o-L /=m$+)9ϡʌ);M_q 0;F,5Uz!izVsJ}0W }f6i,g8~ٜ˺A}2!~ tLհ#QqBZ$@S]5tC6 e`,.zu'J1ߥ6HZl~"~ѠZ cX, }ٜ?J\1~C( k0ޜcAKrзX=YϯR p0%N. X, I6]+ Ӂ?FI:5;iU,=A  D)U= 1qRdvE[%ڻ2i{w׾mQ7Z'ütUl?F3&Gsj3jܙ-Ow) S-ۇ4H-ɫ9].E|"?F&ti}S`Aܙx@ ///??Rk/}/////b"m X i)r~ٜوWjFQ"?|PP?Fo%s=De9gsܙaEo#IOl֪E@e~iY/|dM?v۶ :YhSQv}T\Pˇi/ON|aܙz?`$OO__,_uOOOнOOOI@B?FKTG.i`AٜlOo->_|?F!i_ ]gٜ?, )ʼn!|?F3Ck*}DxZٜ?]QVyhk߻?FX[g]/I ٜ?bvd Co '9Koooooopj T&}j8>ͯ߮\nE@L@Gٜ|gxgx0Y3N,*ݖ}T  MlfahhÔ}Dq:6};p/"_f/-?Qc я$]+w(0p:dy~͇JO} M?p 0!s?FYY4ڼ~iS!(p|ʟEDk}'pҧO?zA'xў?R_F!] EQ>sNϽ=(V-Sewaۯ#5ٿ9/{?i߼$&B4O% C3i?y}o3qo?Fq ZiD!'FA\]0$=ڵ}SI6?Ͳ Ai{ߍߟI 0BT~#a\.LSA-BN^{q"?p?Fe6i㪡<_#AA}2![?F1:!6F! [jtE\qRD%yjǍ6Pݿ#V<>tl )ae"%7I[mzj3?C_%?Zzh̿Z} ?-P N?/????eK?]?o????yxz`h qDa.??>#L3ўh>/Ew9&:/ ЎP.JhHkOQ qӠħOʜ- &h̜zfUm@O___oa_s_____}2Z?F$'_{uh̆Ia{V?Uoǿ7e~h̰(ecy) - mz-$.hhsZ}2Ca6o`(~ "?F0.MX×mEyoށ顷6\Zq%բ}t7Z?F~ Oh?H |A?UYwkE?F?>C ?<a,:wPI5) ߈2/WfsM% e'MvǃaËgO?F%F92\aI}|b:B9 /VA ȒTH7D6D # 4# J  !"#$%&'()*+,-./012*3>5haATaaMYUF%>h`?Fr<."L?FcG?Fqј?PM ]u\`^?u#t  EF't !#"UY%'~P*%8'C"Ut2N贁Nk?@d\#&A b(++&Y/R/$78?#bMd4z (+b/513%R 11;3"`?Copyriug t(cU 09M@]c@os @fBR Ar@GAa i @un Alb@U HsfBe:@e@vx@d"1Y]4i<3K 7966i@15P6XGW'0UBQR96W1 VZlY]U]l@TGA#1(!l[?FuO[0:$}#)ߓ=b!8e,չ?FAOw(lxrNj$>la.PXTh|3ecztaLN E'm:"n*&G^|%[a5:q m5a)k?||=a Ed0UgEo"o4oFoT#______evHG?F3'y~|3`[]Ք|cToev_kfP?F;~|S3ٔ|\£oevV!ex<ڤ~|{ |ev-+3t#~|;6nRcoj^}.@RdQUev~؇Z?Fi~|+|ܔ|JCjev<)OO~|p )yBr7ev1 aNְk ~Ůeąܩ#A 5OfYQWgn?FO*~|Ch|`-jݯev%~?&nq΃0xi7y :kOop ~|4ZO勿]oρϓϗU+=Oσ+ӑ!]~|:<]їyT}ςT ?FO'>]P~|qH~|ο:.H!|ςto w'"~| 5(CşVPevR`t)/~|?7>-<Ѫ|Ra 1CUgevo I?Fb7~~|/ͼ셡yoevkY?F>_ ~|3y evMXy"}~|ω9?lev ?< "~|^_\˱R&8J\n|by\M[`bD'nnEm|<z_m6itaW}nk.V̲ocxL45MR9?}.mE?KxSi#;m1LL'7,}K^>P/Si^}///*=! B/T/f/x///byYEvk덚4D2=y_mS M߾}zNmWUdw4֢DB&™t|d˽\Li#hމQ?7B%$Ĩ^QUHnOOO_8& ^OpOOOOObyV~%װ!(E>ܩu;ofUN1[D}~XXtۯ~'haCjmt{ܿ`Xn b_փ%hkqwu~R)uoo 8& zoooooo~KL}"_!eΟW?N eFjӈHi>DYH'h(%i?}ou MMXjQ %f[݉gm`¥ G8j ,E/R&5gS>Na8|'hhv_ ߖ3bW7mȧս[PebݱJ}:?nuTѺ 2DV8& į֯ &i~FR?[M%- q?j  DJP}uQ9/'hdfO?FV+p\[Wvpa9| wdѿOtZ(UxzêJUfeC(6,b#l+eR(߫\(V1:9Oi:4ߋcG1!Gq}9 ?C)uNUBv(Bo?FO& v!~5I-lym(,.$23G_CRHCM4q6vl~_}5#7o Tfxy?F{UX>͘ Uܼ*d4Ij@ ?Fi; Ly4GXj9 g1re-#d\D-h:Pc}Lw4mv/*ĩMQy S:U ߟpʟy7j?FZ?W+}(U*<SW?F .턟bGF赁9 <8߁-`Z͸~~5 CTc/ws}e v#P3ׂo(r.i,"4Nr¿Կy=Lu?F7G3z 9 ˇ 5F?FJ.3$w 1tK -o9a|ዙCѾ;-z0ƛ+WzBg/w-%&G,ߪ 2Dؚ5F] MH,>PNr#߽v<߯0ʨ)QMLTgMUs}I$FATG}=u2f`?}1&OwuXp+MEp41}o7o9ݭ&fgPy}}M?=;??1O2DVh6 yJ]}z aFgRlnk6+nɣj4 anf-n_h?+nj^AoxvTN.g!uh-Vo@MVp >ߜ)sPU 1D'[}LG*>QoN/`/r//.V//+/=/v(#xEAGw} }%(Lʧ1;$gKG"V2w]xR)vJD)3 7F!nLigqN|ݭƃN'QM7ZSF^ jO|OOOJv?O O2ODOVOOh_D PicPK>ѩ݅cDchQ>M cAѩ JBɆf!\.S6x~r|#ݭ#.CqQzUȴ߯b~(oooofo*o<* 8}\q3h^ ߻l~ϢϴQ9 AS@XC^}|/9K ս5١TB㰉\JA**=@$,Q9G\鐒?FU ^d /g [A\9mpï?FnrB=k X* S(N A9]'Nx =$.T~v )\A?S7|#!8+|z AM}D(:LfQ9j,,j?F4*a.ȉ\zFq&ǻ}w^Ym[6?FG!Ɖ\0#Jh@ /x|I]11\\o=P|G>q2:]_1BM4=1vq"_]=H` @]//?A?S?e?e ////??/ԸƑ?F qT}S҂ ϵymޯZ?FQo8G} ϊx"1IQ }M}>~~^{ªy1}eYQrŁ& [ׂox^.cyOK_]_o__+!OO__$_6_/ٗW(?F 5w*zi۟灜i{ސ_26w?Fߒ(xJJ쑻+Uϰ|]FuqyUq~iT ]~[4vp=!;L }?LYh- pzK"o .@R/R7?Fhߤ? QTJZ׬23l%?FO& ?GSۈQ?5])B.zhɅ($Q1(5} X㺾 ?lرc#&8J\n/_?FdW2ʼn@-1ş9ȟ2֮tp?F<*fB s1]V%sAS})Tk{#/v2OB!XLi.݉}1̾MZͯÿտ$0BTfx~P _! QR6#I -hCFUz5|Y}N?#TG}"F$=c*c,j4Z>[T-G#ϊt9a?FQڧc,H ~{%a -2FA`~hLr Xޟno#:L$FFǕgtg'bl[;| ߛ{-,>PbtG)AQHZ?F;ƃOc,t[|{r6<9{mgE?F Cg=:*\R鬱YG/7)- I;-Z?N5şZ=/I'nD5-!bp1{ IWx^q2.BTfx\PX{Zxߛwtu,]R |{A&BQbhì?FB#5һ5(B]-nf!Pb,$21Nw gnV~B]gyz9^p 7QoGI|q~-9@d]/// :/^/p/////\ˍ|x?FȯI|hLYosլ{m|^>`tzϞI|ύKYʰ;!l?bsdzNQN鮢I|\Jm^*Hq^o_`n~APpⶍߥzȣs^ˀ≜O__(ZA}OOOOOO_~ޯ1}^i8Z`֋ ŸLS$W+wvqO j2~Lx/b6^Ni~ KC} mN;-럴 =T=qip8/~ h ;-#霝 2Dz1oooooo\~DG߷&wz P}/p쯹BxN]dK]y.8@h&V&%-2|ЏLWM;| =yidݑ%%-tڨNR 2DV2ď֏ : O21f& =Q>Nsz99]yX21 ;>#94oQ>ݠ_g?]^%mT`>rG.l(5sm H_`?#~6\9Ҵ} қhnBk=Oas#3ί(\Ofe#y~NQmr갯]ߕ 6m^ϕJY9]7}}NQ X]n;m^uԱ_^?$R?F[5l<;Qq<7B1b;-Y='!"/;mZoY?4cgC2oXj|ߎ84 2D\M'g\elh \~5[Vq:a,8ω?F0 iWi&4\GbF|aS)H9z#/<^bAL_߶,fdlEH{=uU%5*eBn v;!#4?FADo)XLȚ|bv!7A!*EKGalH`.P%ҧqp6t KT4HD #h34>T33 AMT JUFM&d2?F W`?P6 >JuM` ?juJt  5]It=Wp= ףIURIl#>5 ՆJ[ ;? & ?Ab A@N0"b8$*e!e!5 g`?CopyrigTt (c) 2U0 9 M c os f"!rԎ !a i n}. Al U (s"e e 5v d ^!Eb$-,q# q' &2zGz?=@MJ69#fE 3z; 9(b8)E&?,0"E*iieE9 jX:72'0UvBŴ &!%$0 D-FAJlJAh/7!h0!ZFg, E <ēNmP#  &OR|VJa9PY]PPReS[RUV%:]x#c S( bHD H# h4>T]]9 MT JUFp8?F@ W?P6 >JuM` ?juJt  f\It=W rI*RIl#>5 J[? & ?AbA@0"'b8$e!e!5 g`?CopyrigTt (c) 20 9 M c oKs f"!r !a i n. Al (s"e e v d ^!EHb$-q# q' &ڝt2U2?@MJ69(5,E&?,2E*iieE9 X:7'03Ŵ &!%$0 TF'JlJ8>UhAA `!1ZF? `, E <A#] =RjVJ:VjCS |b>RSS6 QV%U\.;\QNYu=!ٱQE^?&d2_MZW#h415U)Hb?*V[E! YB\JQUU+Vp;_MZw@0$[D#h2$_6U2rA >#JsS(#e_zH"%u@s $I/bO`MF(0E# 1GZOB HX H~dմ I@+^C 8s}GZ CZfZ SHZ XZ \Z H`HZ [cRZ g^] k+Z n6X[ *r[ -uAh[ nwp:[ z&] f x] qH'UFD  h(^TYYBUFjZ?F~??x<F BPG(?P } X ]B66]  TTUTTT9T9d9iȅHBU?? ?B@L&_d2?@-Q(\.c(A.sU!&/)1(  0%[B`(file,s0rv2,compuUt$4d0s40r0b24d0n0tw,0rk!e^| (QSG& i?!3E?? ?qq wp  p w{{ qw qwqw{pqpqwqt}pqppw'wwwbwqpwww|zwxvDrag onut hepe.Rih-c]lckaUdcos UPo etQeAvUwreP 5aabjZֿ~??cN׿ mF1??*t {V↑T*J?@ ?UG DF P# h @T(PYY# QU@jZ?@~??F?Pn} u{` ?Su#؍>>HX HlHYHH EO.LO4:UDDXXUllk'"&R",I'U25ĉ%4 ?"u'4b0{N[`:Q1#2T6"!^U9$"3&9"/L6 2&!'ݯ>e@ÿ@qn@?@-fB?mDcFԸ0)Bu/@`҈-1Bu@rCC 5GAB"2Cm9u`u `"[ b!u`%3Qo1o7B`?CopyrigPt0(c)020P90MPc.PosPfRQrPQaPiPn 0Al` XsbePePv,`d/`VPs_SBNcPm_!#5P8a` 6!bz1AXE:FG$1 8dT=`XQj2`]r` M`nuPaPt+arp{` ?bup` BO=ݛ`S,t@']p5%m-T݁<9ofm'Qh>qݏ,BT'.at'xXf@>pÿЇobLZdq [ܒrr$ avvvrsW"`ERol]16 бeI`~bb a}abr@6uP`TaSo1u"_u'I1I2A!b[aahaauhAaYypP`Z u*`q`*q`azAx'F5_db&bpSF3s͵NSYJEbrV@ΓB!e 8d : LqW,@rB:@0ɏT}sasahYV搹Mny ` %߀P&S41sa`NEWOWRKb2H PRq!RRI%SQio$o¹h^A'70Uc?!0oHD # =hj0>T h]]9  5AU@jZ?F͂T??Fx?SF?P6 jt`  o?tk[>(A Vu aSuo.]&A]  *ԮJU2zGzt?@L#&A@bA#z7 5#1bO(b])j+$j&[ "*(w "?& ?AU$l/%B115 `?CopyrigTt (cu)Q02`09Q0uMI0cG0osA0IfO2@1rC0|1aO0iA0n.Q0 WAl0 G8s2Ueo0eG0v0d0"1E]4MŤ-&3 &7&l>]Uhz811U!J!u6?F+\.>djIIa? >"(EǑ I%K9E 5EU . K ;;5QIWEEEF _KnSA?8@@Bu?FDR> ?Nk"\6`bmMM\j=am {:e P> #? D"? 5-g?j4B7!B{oogr57!ibDAbE9 MuX7_'0U~rN!$a)5 5vIzHD # =hj0>T h]]9  qAU@;?FI5K?@xEL?6ѿP6 t`  bm&Qo?t@@L]WI,#u aiuoL> JU2zGzt;?@L?&A@b}#zs q#1b(b)+$&[9#H",ɺ (wH"?& ?A$ /%9#BV1V15 `?CopyrigTt (cu)02`090uM0c0os}0If2|1r01a0i}0n.0 WAl0 8s2Ue0e0v0d0"O1E]S4MŤ-9#b3 b7&l>]Uhzt1.A!JN!m.%-+0djV1ax߃S(FJ-&?F mO"> ?Nk"L[Majnha_:e P> #? BK=o? n-?j4Bs!B{__Wr5s!D8E6V 6?wj_|_ ^XE.U@z T6?F @' d_mOoT \VyWxRkrVo_bVh*g:n"'UaEjz36>CN>?Bn}]zGvN7V iDAbE9 MX+G'0UJPN!4ae5 MaHD # =hj0>T h]]9  U@jpiH?F^?@$?F]F?P6 t`  !a&ao?t,F=۳=u;u auo>J2zGzwt?@ʐLAw@b#z1b](b!).+I.&[-m(?0& ?A$I0/Q%!!5 `?CopyrigTt (c)02`090M 0c 0os0f21r0@1a0i0n.0 Al[0 8s_2e30e 0vq0dS0T"!E]$M-# %'&l>]Uhz!1!JF% djc~YZ(_N9zOMGE%_EH$SwO"K ңHE ^&_8_WE_OqOHOOi&DAbPE9 MX7'0UUfb>!$a % f1jHD # Uh4>T#]]9 M5AU@jZ?FL|+?@?F({~?P6 JuM` ?u JYt )t7zEA2&L_D?. > >tJU2q K?=@MJ&A@-b9(+bG)T+T&J[#"#a?& ?AH?/`)#115 {`?CopyrigTt (c);020G09;0M30c10os+0f92*1r-0f1ai+0n.;0 Al0 18s2eY0e10v0dy0z"!4B#e-?3 7)& b 0K8>U# ]A]A "1!ъ!PzQ <%OYAJEU . K;;\%W%EF($L[.FX5Eai%?F 6?wL;] W-\TB⤲8?F@&SA?F @ ?Nk"L'`bmMW_Ye[ٯ7 VK=F: [jr ? D"? XZvD?ta@Boogr5;!QEx]SiLO MLGXa~WOTZiAle59 XG"'0Uc&N!$05 .HD # =hj0>T h]]9  IAU@K?FOgVb?FY%?Fwᳬ?PJW?>t`  ^Z?t#ə.E @x?r?/{4܂ G _Z+u u$ >  J JpU?43f<!.&P?b3bbfz@1bAe&bq$d 2zGzt.#@LT"&r(~&i$q)(+}'i"~*#81815 `?Co yrigTt (c)o02`09o0Mg0ce0os_0fm2^1r 1am0i_0n.o0 Al0 e8s2e0ee0v0d0"11E]54M-#D3 %D7.&lUh#7 V1#Ai!J# F/?B1B  VRiAAbE9 MX G{W'0U*R2N!I$a {VZUHPD  # #>h0JTpERI MU@u6?Fjc{g&?@%r`5B?Fm_F:Is?HNtQ`  II?t#]  _ii$!`kNu  iu%wj>M JQ T       "@&&JNU2"?H@Qs#N&A@b(bU)++&RNp}#>H1#19 ?#bbbz| R&/5R}#11`?Co0yrig\t (c)02h090M0c0os0f21r0Aa0i0n.0 Al!@ 8s%Be0e0v7@d@21a4t#-}#3 %746lJ#$JUp5 3 M19 1!q(!FnElrkRt# 8$kcD?@숒@)?FP?P"EkU |1\iOT/irvs}i>`_,of :m ,M ?? S&d2? *b?r4B!B oo1gr #5!>%(EQOOOO__FAc1_C_ D4ևS]_o_yRX+{_,__ #R__ (YQoSo+o=osd {oEeooooooFyUy{* hDVW(I X"c7I!pg bf'iMjSE t%XyG|'0UŢŞN!O4i |UGD # hz0TdYYBU@(%)?Fxg^?@0߯H?Fs7Uݹ?P} t` 87?t#_)TeΦu ]ukv  ̝L&UUU&I&3 U!!`?CopyrigPt _(c) 2\W09 M c os f"!r 1a i n}. Al00U (s42e0e uvF0d(0'HS$#-## 'I?k=#ǒ 2j$B##0U'BM3@&@F0Yk} #HA%DKb5DK3DK5DKED$!GiAZG5(|IDFX7yW'&Dŭ6!4YA yVZHD  # =hj4>T!]]>#AU@0߯H?Fz)e??F˞9q?P6 t`z  ?5t# rXѮu)Nu;):O]ע0 &> # " "+"+"+"+" $+"T-J[U7 ??& bA@ 2b4-#2zGzt#+@ Li68!6 ?}; 72U!:B#AA5 5`?CopyrigTt(c)20F@9M2@c.0@os*@f8B)Ar,@eAa8@i*@n. Al@ 0HsBeX@ej0@v@dx@)21"DM-,;?C G& l>]UhzAE!A#!ְ1J`#@{&pz?FUy{?NT,dKj&b&R&M/8T9K-q?F@F?FdG0 >Nk"?YE*]\LThKjfGD&Qhj ?iU3UGe&Y`?@/Jb)h5oWKoWo;èKod}l$ro`nE2K?Fˌ%4G{?F -q? eZR^{|\?6?vodmP}l>MBe 9 N 1iS? 1?grl#51:PO"4ga@Sq(UE^@O|ǐ[`ߡa v2rbA @3ijD&}laqOz o?#WHZ'Qu=*[^N?F]%P?@O&@ c?FаM<n ?` eb0)\(Pfodm}nv}l?V5(?q$Ix[cN#W{ #iA@&QeE9 MX''0UjdN!0 /HD  # =hj0T h]]9 #]AU@}%?FR$,?@6#V?Fc_3֡w?P6 u` ?u#t  3zM%t3ws%.<%H._'B> #PJU2N贁Nw[?@ʐL+&A@wbu](i+bk)%+&p%#4">9"<5!?& ?M#bbb͓z$ p&"/%T%#L1L15 w`?Co yrigTt (c)02`090M{0cy0oKss0f2r1r 1a0is0n.0 Al0 y8s2e0ey0v0d0"E1E(]I4M-%#X3 X7&l*>(UhE j1#$Ac!J`%#@5e$_F??!foA3nBM( V?FJ-R?@HQ??FS!R?P| llHL6VLD5E+v;*AůʙH: 8snP%$? k?Q? \?4_!__WrA5_! R,U3OC@IL?F4~Kz?P)Oy޾ T!]]>#IAU@܋?F2Oa>?@#r`5B?F*ݷ?P6 t`  >B?\oEt# ŖoZu 0Nu;Y:|8>$# JpU>##<?2& ?5bbbz@b#ANi&bu$'h 2N贁N[2$@ LX"D&v(&m$ :;T'm"*B#d1d15 5`?Co yrigTt (c)020090M0c0os0f21r 1a0i0n.0 Al0 8s2e0e0v0d0"]1"a4M-/p3 p72&l>0>Uhh= 1#!eJ`#&FNTM\jh1B,RBM(!JoUy{O_ e@ So4(D@^Tw?F]֝ä ?Nk"?w_KT\T+\_(Kjӷjmplhj -`\TPQE\Uui,St_o u~!ՠXEO9= T?FL?;uQ_Pm"rA @3sjQ1lğ=GoYniAleE9 MX|'w'0Ur^N!M$|A vzHD  # =hj4>T!]]>#IAU@܋?Fj?@2r`5B?F^ݷ?P6 t`  >B?\oEt# 6ΠZu 0Nu;Y~:#&}>0m8>  PJU2N贁Nwk?@ eL&A( 5bM(b[)h+h+$h&#[-M$#?& ?b$Z*'*B#O1O15 5`?CopyrigTt (c)02U0090M~0c|0osv0f2u1rx01a0iv0n}.0 Al0U |8s2e0e|05v0d0"H1P"L4M-/[3 [7&l>0>Uhh= m1#!S!J`#*&FPUy{_?NTMjC1gc RVMʣ(D#@ ![?F"$. ?Nk"W?IKTLTcKjRlhj p=' (*!)i,SO2Kes9T;QEGUFw6qX}__TM7fTZT\Dž}_P^52XOES"rA 0S#Es3iAleE9 MX|$Gxw'0UrIN!4gA xvzHD  # =hj4>T!]]>#!AU@܋?Fc_v?@#r`5B?F*ݷ?P6 t`  >B?\oEt# ap'EZu 0Nu;Y:|$># BJpU>#͙;? & ?5bbbz@}bAA& K&@ 2zGzt?"K@ L0"&N(bM)+TK/8Z*B<1<15 5`?CoyrigTt (c)s02009s0Mk0ci0osc0fq2b1r1aq0ic0n.s0 Al0 i8s2e0ei0v0d0b"5194M-t/H3 H7T &l>(>UhE=3Z1#E!J!FENTMjN@1h(NoUy{OO e@ o)4(D@^Tw?F]֝ä ?Nk"?'_KD3\T+\(Kj?ӷjmlhj P\ DiAlePsI=MXsG'0UzbZ6N!%$0 sFEjHD  # =hj8>T! ]]>#!AU@܋?F*?@#r`5BW?Fw?P6 t`  >B? \Et#տ|u l4RQu?]>I$>I# JpU>͙#<?& ?9bbbz@b#AE&bQ$$D 2zG[zt#@ L4"&R(^&I$Q)+]'I"^*Bh{.M#To1o15 9`?CoyrigTtk(c)k2009kM0c0os0f21r1a0i0n.k Al0 8s2e0e0v@d0f"h1l4M-:?{3 %{7&l>(>UhE= `51#I!J49fAn^ R)X f"B!Jui,SG?RSlOS u~!N]8-19= T?FL;uQ ?Nk"?YVTQRXQOnğ/=i n (\ wTiAaePE9 MXLg'0UbZiN!)$0 Lf`jUGD # hz4TYYT#BU@5?F;?@ t2?F`0 ֺ?P} u` ?u#t+/8<&L9`d<t9( .88LU``ttyt m "t"hf;!&&'n"!'%0'm= 'p ~i##0U"K?@&S#a!!*_"`?CopyrigPt (c])0020<0900uM(0c&0os 0If.21r"0[1a.0i 0n.00 WAlv0 &8sz2UeN0e&0v0dn0w"!$J#B-#3 7#^=$6#D66 2$e,^6C% #HA%DK%D8K5D`KEDdGg&}c__'TAA""}(Pa%288BKS`DS\U Siha5 &rX7g'$i6!40 f-jUHPD  # #>h,JTEI MU@jZ?F@s??FX7`/?HmNuQ` o?u# j>tMtQ 1@t PATOQ E O#l|O6,2c J^vNt, "t "QwNbo ?!FVxNo]2УɆX3BUYU>VZ\)_ W거QUTAQ LG4CaCa_* Q_*____ o2f 8CT]?FoYDȡ?@ ~1pc} 9dS0]Yb<=p\/_ RT)4HtvzI!| ?^_AV,uݹ:4Z\o &Ja ʆXBU 9A1p@߄ߝZ\X9 (p\P΀dˆXEBUp2t Z\5q[,=6B҆XaU̓쬏 qXa>YU . [[ ;;p\Ua>YF`rZYoQUaai%?FzP4Z\^ W-p\] xTy8?F.?@@SA?FWk@ ?Nk"_|5`bmMp\PB|y[7 ơ|`?])?p*FWAX|a筃,H7~Z\?bxT?:!G4QڡUeB_T_f_x_DOOkoooZ}u?FU2Ty9=ŏsҟGO"vP_?@mlփ?Ff>> }= R3|Ofѡ|&['aޯ)d3#ux"@4SK 6ASʎy'e>'"Y`r,iMm׹ ߵl%1X~G׷_'0UJ26L50Ov ׶HD: # =h4>TB#]]9 TqAU@1D)?FL"ߛ$??Fٵ #ָ?P6 u` ?u4 t  ]<)t7񿗯)2)LEkL>  JtU2qD ?=@LC&A@-bu(b)+&񆤍[=#L"#a?& ?AH{/)=#@1@15 {`?CopyrigTt (c)w02009w0Mo0cm0osg0fu2f1ri01a; ig0n.w0 Al0 m8s2e0em0v0d0"91=4MB=#e-OL3 L7&l>  U#,|A|A9 $] t^1*=# #p Kw&&J`=#R&F 2'_b0CwvA kRqVM(~"!;ɡ?@<3?F ?P-DT! ;d1?Y=:ZTO㿂f{OaB J4 13? v,? (N_?r5w!:8oJo\g"a@jS(.8R!ZFI nx4__ D_U5U,VS?@g$ ?F~4,ێ~@ 8_H_ZTml`KDfl`'E4q2d8R鿃oo`rmkod&'aE2%_7USr(A@z#y3ae&b{, fZT5sae~t?FG=Va|7>-\氹pAiʈ$%?F(XoZ1a|}%\Iii ^WB^:fvEZT˒AiU?FAx߾Ya|k#ӝY~/Yt(Ae o0AleP59 MX=GL"'0UŢbN!40 O5 |HD: # =h4>TB#]]9 T]AU@R7)?Fy߈C??F"ָ?P6 u` ?u4 t  *5y")t7n)2)LI"+BA B JU2q0 ?=@L/&A@ybm#zc a#-bK{(m {(+I&)#Kڬ !?& ?A"&g$bbu/b /%)#Bo1o15 {`?CopyrigTt (c])020090uM0c0os0If21r01a0i0n.0 WAl0 8s2Ue0e0v@d0"h1l4M!E)#𤲝-DO{3 {7&l> U#,AA9 ] 18! #p %J`)#@~t?F # 0hU-~-/HA RVM(ET"rYD?@J-?F-' 9d 0_Y b%r\_oӫ([MFoFl 77]8>!Zݹ:4\\o?gƭkdQEF5W4?@A=?F[Q`??P-DT! :U^Zs[ #l@*p9lJjV\\l_ I&vpkhAzi!J?F߸TF\\Ͼ\zT khzinA9f@.mY <\\ԕ$ ,S{khAzi׍8r?Fį^?m ? ~_5 ә(Ae 0AleE9 MXlGR'0UuőN!40 ~5 ,@HD: # =h4>TB#]]9 TU@~t?FYaJE??F? ~?P6 u` 7?u t  }A_)t7Ǫ+)2)L^A]>REJ2q?R=@LA@yb #z.-b( (%2+2&L q!?& &?A"#&$bb/b 4/U%  1 15 {`?CopyrigTt (wc)B020N09B0M:0c80o%s20f@211r40m1ua@0i20n.B0_ Al0 88Us2e`0e80v0 d0X"1 4Mi!E-?3 R7&l> 0>U#3 GA9 B)1 #p| U%J`F(jdO C]/S֑K VM(HC-O"K[%q3U5CU?uBТ:^Y=~q\G3UE2OE#rA 0"c37QA?Y\[)xIB_mT^(`d :0A@le59 MXGB'0Ur-N!$KA5 fjHD # =hj0>T h]]9 U@/?Fxpz?@]JQ?Fk],>Uh@ !f$; hp _Q%Jd(jcZ(sEp!$a VZHD # =hj0>T h]]9 U@~t?Fxpz?@?Fx?vÂE?P6 t`  }A_?t#%_ ,u auo>J2zGzwt?@ʐLA@bu+b)%#+#&![Ha$?x& ?A//)񩣑!!5 `?CopyrigTt (c) 0]2`09 0M0]c0os f2R!r 51a0i n. 0 AUlP0 8sT2e(0e0vf0dH0@I"!E]$M-# 'x&l>],>Uh ![$ hp| F%JdjcO(hEq<cBɀODI$xD\AEOOOOD~hEuBТ91Y=~D\GiAbE9 MX|7W'0Ub>!$a VZ_H$;s nXk_$:@@_Fx# ߯VOB Y ~d8մs o@+C o[sG*7of7Ș7TH7P(7.X7%Q8F֛U8FV86Z8\8_8a8|7d8NHf8ؒ7_8I*9xh8s<9Hk8 ]xm8 Xo8!UFD  h> T6D 4U?\.??F BP(?3P<; (^.h1 B ._ .s....UBU G 0`%Document,aUp, r2 SPe, 1!LF"t:$Fil, 3#g, k!^ x%% -G( S?'5 7NWwp\xpcu wf`oo\Drag onut hep adn"y aex.bL&d2ٿ~[?? ].r\5r\.EUGD" # h <^T$(YY#ӌU@\.?@L&d2?@~߻?P} u` ?Mu#3:D:DTDhYD| A A A AAAA&A&5V0&0& 16@@TThh||&&&h&S"4"!X2^7H^7D',6{҂_a?6 ?bbvbz@ t#RCCYCCCCCCX2&UHF%2?BX2]"BW3Fg3RFh3FX2 FU X2 F X2 FU X2 F X2 F5 X2F F"BC\"CS3F"CS=F2CSGF2CSQF/2CS[FC2CSeF2tsU2URM3@S[CT"bS$bZCfC f|d78JSB<D>`ÿ@{q`?@bK?df2uL -1bu .1ocr4#b vs"xh4C"M7 qx24S"_|um2u`u7`"0`au`rK=pg2 3 @D:'0A2Er:+Jw"+`@rApdBS(pԁ .r`?CopyrigPt (wc) 20U9 McoP'of r6ua in. U DlQ sUUePevgdI1`Vs_0RXYIcPm!#5378eH^0ӕYcU5#.U#Y.3#.E#.u#.=##.#.|#!.#!.#.pfmcaȹW!T4!4!28Toԁ arQ(-DT! @b0Q `aqqbvab? { q¬rrh@@($ &  qàq#@tq3t)'`Rg:sB Tgx Er@9'/'Tw\M8^6uk u  bu\ %qfŪ0`0#`abukv33u &rg𔫿Ͽ`28x1,@9"HD: &# ;ih ,>T!  9 U@D&d2?@KJ}0@@sbA*%BAG W$?n& ?A$#"t9,(tϺ*r(, [ # ͐(tl-t?t - t>?t- th? t}? tݒ? t?t :hT!<M#h $A$A5 s`?CopyrigTt (c)[@20g@9[@MS@cQ@oKsK@fYBJArM@AaY@iK@n.[@ Al@ QHsBey@eQ@vZ@d@?"l>U <>Uh$@AA BAJZ?-ku  ?V|=R"Q jRVMB:]GZpS ,TvRU5UC QkY[zZ%=| hUnd<+o{Yh<gttgYCNboio&1\oBٽB>PhQYwk_>g"U2Q_cU#rA $s?"HD: &# ;ih ,>T!  9 U@ʛ߹0?@nIw?@DI ?@MԿ?P u` ?u4t  !t/ ms!*bB!~DF=nm>J}02@@}%bAUB A?f. ?s,zb/b@'݉ 6 ,) ޷+ 3"?-#ϿS㥛!z /(, ) .":'`hT!<M#hT 115 u`?CopyrigTt (c)020@90M0c.0os0f21r0Aa0i0n.0 Al:@ 8s>Be@ej0vP@d2@?"Ul>8>UhpApA 1 0LZW|?U@7>0XB ,V MB4GRS_FU5:W]dT!  9 U@xj89?@F?@oU?@T?P u` 7?ut  K~D!t/0!*ҹ_!!DS_ Vn[>J}0@L@%_bAU)B AO9tG ?f. ?Y,zbb@' aY + :pΈ?'!x?z!(, ) !b#/h<M#h 115 u`?CopyrigTt (c)0W20090M0]c0os0f2R1r0 Aa0i0n.0 AUl&@ 8s*Be0e0v<@d@?"l>4>Uh\A\A 1 LZX9v?;0 0af҉A BVMB:66C JO DB.Uk5`AOWI[n_/T^9_WQ.U>U?jtOJoBdA D.UE2OErAg D4c?"HD: &# ;ih ,>T!  9 U@fB?@`mH?@i~jJ}02@@}%bAUB"G ?f. ? ,Cb /(,n  # :'hT!<MQ#h 71715 s`?CopyrigTt (c)n020z09n0Mf0cd0os^0fl2]1r`01al0i^0n.n0 Al0 d8s2e0ed0v0d0%?"l>]UhzŒT2JTyF mBFMB:jt?lK{@C vTC E%4TMlO~IADE2TO;rAg ${S?"HD: &# ;ih ,>T!  9 U@D&d2?@b1X΁?@K~?@;d;O ?P u` ?u4t  'V3X!t/XQ_x!*!D?7n  >J2U?R<@Ls,A@Tb+&tt0/t 2- tBy) ?b++'񠀑h!<M]#hR1R15 s`?Cop rigTt (c)020090M0c0osy0f2x1r{01a0iy0n.0 Al0 8s2e0e0v0d0U{"l> <>Uh$AA* p1JZ-ku? _V|=PA BFMB:]GZC ,DBE5E@C QI[J%=| E@^d~XLAIJgOlo.WPE2OE#rA $s{"HD: &# ;ih ,>T!  9 U@^?@K?@ZRJ}e0@@%bAJBAk?f. ?Y,zbb@' 6f ) + w/?-#S!zs+/&a&!+ Pkw?; d;O?8(, ) N" /=$h<MQ#h /A/A5 u`?CopyrigTt (c)f@20r@9f@M^@c\@osV@fdBUArX@Aad@iV@n.f@ Al@ \HsBe@e\@v@d@?"l>UhCAAMA 0LTuV  iRVMB4`_rQw_?"Q5^_zYQU@Uor_:k2P_t;rA $c?"HD: &# ;ih ,>T!  9 U@PaX?@f)?@MbX?@TaWk~n?P u` ?u4t  N=!t/ӯw~!*Dʮ!D#M8  >RJ}e0@@%bAJBAk?f. ?Y,zbb@' 6f ) + w/?-#S!zs+/&a&!+ Pkw?; d;O8,(\, ) ":'hb<M#hT /A/A5 u`?CopyrigTt (c)f@20r@9f@M^@c.\@osV@fdBUArX@Aad@iV@n.f@ Al@ \HsBe@ej\@v@d@?"l>UhCAAMAd 0LTcuV  iR)VMB4`_rQw_?"Q5^_zYQUUor_:k2P_;rA $c?"HD: &# ;ih ,>T!  9 U@PaX?@ w4?@MbX?@DkuL`~n?P u` ?u4t  N=!t/^~!*Dʮ!Duϧ{z  >RJ}e0@@%bAJBAk?f. ?Y,zbb@' 6) + w/?-#oS!zrs+/Ⱦ&a&!+ Pkw?; d;O?8(\, ) "' /=$h<M#h /A/A5 u`?CopyrigTt (c)f@W20r@9f@M^@]c\@osV@fdBRUArX@Aad@iV@n.f@ AUl@ \HsBe@e\@v@d@?"l>UhCAA`MA 0LTƌLuV  iRSVMB4`_rQw_?"Q5^_zYQUU or_:k2P_;:rA $c?"HD: &# ;ih ,>T!  9 U@PaX?@b`;?@MbX?@TaWk~n?P u` ?u4t  N=!t/ӯw~!*Dʮ!D%h >RJ}e0@@%bAJBAk?f. ?Y,zbb@' 6f ) + w/?-#S!zs+/&a&!+ Pkw?; d;O?8,r(, )] ":'h<MQ#h /A/A5 u`?CopyrigTt (c)f@20r@9f@M^@c\@osV@fdBUArX@Aad@iV@n.f@ Al@ \HsBe@e\@v@d@?"l>UhCAAMA 0LTuV  iRVMB4`_rQw_?"Q5^_zYQU@Uor_:k2P_t;rA $c?"HD: &# ;ih ,>T!  9 U@PaX?@iMbX?@TaWk~n?P u` 7?ut  N=!t/Ӕw~!*Dʮ!DNEr >TJ}0@@bAUBA_k?f. ?Y,zbb@t' 6 ) + w/?-#oS!zrs+/Ⱦ&a&!+ Pkw?; d;O?8(\, ) "' /=$h<M#h /A/A5 u`?CopyrigTt (c)f@W20r@9f@M^@]c\@osV@fdBRUArX@Aad@iV@n.f@ AUl@ \HsBe@e\@v@d@?"l>UhCAA`MA 0LTƌuV  iR(=RRMB4`_rQw_?"Q5^_zYQUUor_:k2P_;rA $c?"HD: &# ;ih ,>T!  9 U@PaX?@Cz?@MbX?@aWk~n?P u` ?u4t  N=!t/ӯw~!*Dʮ!D`q1 ARJ}e0@@%bAJBAk?f. ?Y,zbb@' 6f ) + w/?-#S!zs+/&a&!+ Pkw?; d;O?8,r(, )] ":'h<MQ#h /A/A5 u`?CopyrigTt (c)f@20r@9f@M^@c\@osV@fdBUArX@Aad@iV@n.f@ Al@ \HsBe@e\@v@d@?"l>UhCAAMA 0LTIuViR  iRSVMB4`_rQw_?"Q5^_zYQUU or_:k2P_;:rA $c?"HD: &# ;ih ,>T!  9 U@PaX?@h"o?@MbX?@DkuL`~n?P u` ?u4t  N=!t/^~!*Dʮ!D >RJ}e0@@bAJBAk?f. ?Y.zbb@' 6f ) + w/?-#S!zs+/&a&!+ Pkw?; d;O?8(, ) N" /=$h<MQ#h /A/A5 u`?CopyrigTt (c)f@20r@9f@M^@c\@osV@fdBUArX@Aad@iV@n.f@ Al@ \HsBe@e\@v@d@?"l>UhCAAMA 0LTuV  iRVMB4`_rQw_?"Q5^_zYQU@Uor_:k2P_t;rA $c?",H?_bs A/}뿟@JZqՆFMQ3## ߄rB} S ]Dalմ :@+Ck<:aGg8vkPH8v(8W#A8&08?)8],59/*92)ȓ95$9998<~h9b?H9B+UFD  h(^TYYBBUA@ ?I?P?  j FXj$| ȅH?? B@L&ɯd2?-)(?\.;(. sUa!_&/J ( G 0B`@iMac,$intosP,Ap 0le c0m 0u e+r m0n !#1n0tw02k eq0i 0mT0!,Pa"0d<0]1e!i1^| ( 3P B'?/ ~σ(/('v2 ?;%4I^b /pcv  .ԳDrag onut hepe.Rih-c]lckaUdcos UPo etQeAvUwreP 5aab?ɿt@ ?03%?贁NkHD # =h4>TB]]9 uAU@?%?@jOT\wQ?MΊFA9rr$r&A -34#u `7u ub A V >E& "* MJz:@q&" " ""@)"",Y#LM=b1+$'")"$"$27Y#EM 39 45#^Y#2q0l3SF'Y$?@J3N G>uLA-l5!HBuJAXRBr.#=wCbBܟ0?Cuԟ0I su `VB(#X3&'"2ql>!Uh@r5Y#1 a]WRa_SfPJA\'!2(1\ 4;?P,2Fu`bu@W(&PV$5K:ci3?+3__R8bVf5UVUmgWQheocrATM"AaAa$2f2aa2$(t 5tBtEJb.|wb_z9ի tBi8gvF$t| '"9P(Aqt% wuEwprp{Ϗ'9" nP=3EFre$w'2$9͟\oyЏRsRtruP!s1e!`Hih/P",>w=rr*˶ m4EWlqqPӏMū >sSoeei`%/;@%;LTY`2_3_4_U5O2c3c4cĉ5ZFf=G,[\F-[fpF.[F/[fF0[F1[fF2[F3[VF-2<UZZnnUUUU&&U"&"&6&6&UJ&J&^&^&Ur&r&&&S&&&&&&&&&&&66&6&6:6:6N6N6b6b6v6v6666666666666FFFF**F*F>FHOZHU\FpFpFFUFFFFUFFFFFFFR)!#FQ_? ?҈#lPYPPPPPPP!P1&PJ!P^!Pr!P!PP!PdP!P؂&P1P&1P:1PN1PhPv1P1P1P1P1P1P1PAPAP*AP>AP\APpAPAPAPAPAPAPAPGDPGR"`[R"j[RnS"tR"~R"R"R"R"b"Ė#b"Ζ7b"ؖKb"_b"sb^d^!^!^(2,2/2,A\ApA\AAAAHA{ZU@JT^_&&2c3&2c3&Bc3&GBc3 6yBc36Bc316Bc3E6Bc3Y2ibYCYCJ YC^YC&YC&YC&YC& YC 6ѢYC6YC16YCE6 YCxOrb&bxbsS;6{<F\.?@>Pÿ@qP?@VR?TV?2uL`,@-"Ru p`\@ZJrRf%c!h-" bqn?SuplNiE2u`u[`Fp@auL`bxΆΆ(؂g苤^q_US! >!a{4_b:pu{`:A~PHp\Ov΁Qe7`ESΆl.sPb ΁L'`q q w ;3`Vis_1RXY@chm#589{4*-b`?CopWyrg tU()2&@0,W McEo'ofM>rAzUaMi?n@ WAl EsUeeEvdBBgrRTUs`Hl"et6BTl\桇ub~aaLt~aLđΑؑ (2F@HD # h0>Th]]9 MP# JUF\.?F?Fb-2;?P JtM`  7?t#_B-K~LQGJu duo7]A$>aRAMJob@K+ tQJA$"a?& ?Aa4 ۢ # b)ta*"t =td=!%(U?tB? tс? tݖ? tޫ? t? t?ta.#A#A`?CopyrigTt (c)Z@2`09Z@MR@c.P@osJ@fXBIArL@AaX@iJ@n.Z@ l@ PHsBex@eP@vZ@d@m"Bh<#jl& @K @?UhLBQBQ] 1 !7AA#!V!@}ac RdjaE,JU'5l[Cdk%(gEU˖7s?FXE]ʚdkddѿS&iEn?Dǫoo` j!'hUFBG\k?*J`*e?~7CWi <I'hQYP ?F_}tk)vƫs'hQpsf l'hQYfyP\l'h1Y΋Q?FR&1\kQY-]?F2(Gf/\Rk> u'hQEW?7uL"las \.'h!YJ' ?F^3~\Nk?:P'hQY|y%.?FSo\Wkŏm'hRXD7Ժ?Fv[Fios*e7U l_9;\"kyޙ"M'hQYǬ! om?q:z'hUU3_Ő{ m!j,H0d ǕHD: )&# T=h0>Th]]9 Q BUFpɷL?FE@XV?F&1?Fbi0 ѻ?P Bt`  S0?tFp֯hݿQVBu auo7>JASTfw? ?Az@bbb! ( 6 )  '+ ߈?/I-Z#,z AF u +%/,8)E!'+ k(??r+oF(a4 8) E"#{ #BstG8tU: Ba L>2?!88;D;68?5;D; tj>8LALA1 `?CopyrigTt (c)@2`09@M{@c.y@oss@fBrAru@Aa@is@n.@ 0l@ yHsBe@ey@vZ@d@v2h<M#l#@ M@?4>UhjA6Q>1| L`k$k;cdj]J}vMjR]M 6)fMB@prwp\ k_稇avCe=ESe??F9Y? lgESeF:h%ykod iSQޗh~Kjo*` Oc;Ꞗi2_U3rA/04sCHD: )&# T=h0>Th]]9 Q BUFfFY?F,"+?FYU/?Fʯ8ϻ?P Bt`  I$lG(?t=`j>_PS ,Bu aquo7>KJAUVJp?X ?Awz@bbb! ( "f Փ'+ ' qq?z A  +/.&68))E! '+ go?W-sU!n*a4 8) "u#{ u#D!st?8tM: Bav L62?!08;<;60?5;<; tb>{8DADA1 `?CopyrigTt (c){@2`09{@Ms@c.q@osk@fyBjArm@Aay@ik@n.{@ 0l@ qHsBe@eq@vZ@d@n2h<M#l#@ M@?]UhzbA.QR61b L` KdjMaQ  6fM{"@FT4\[3K_,7e5EGe9?Nf[ N_qd7eEGeXәj'SW\m jOf܋h2_U3rA'04msCHD: )&# T=h0>Th]]9 Q BUF` l?Fċ?FC7Ժ?FBGԻ?P Bt`  I4z?t? D ǯ^\+Bu Fduo7 >JTA0f88v?WYϳAjLF@z bbb!' 6Y % )! ! ,+ <?' o?z #a47% =) J"#K#({% (+ (G t#BXY'?A> ?A "+(/+/=/O/a!y:5r{l/@ sU10( /???? "0aQQ'@`?CopyrigTt (c)FP2`09FPM>PcTh]]9 PB IAUF++g?F2?F {J?F%?P t`  r^?t鯕 { Li@Zu Mauo8 >    PEJƱZVJp!?&. ?Az@b_bbe!Y(݉e 6 ,p )l l w+ g?Z-#U!׷!ze Ae [+j/|//-g#+ s"?(a490 9_b!#{t0#tN8tϻ: Ua BL2$18KE;F?VEK]; t>B 4AA0`?CopyrigTt$(c)$2`09$M@c@o%s@fBAr@Qua@i@n.$U @l/P Hs3RUePe@vEPd'P 2h<M#l# M/P?]UhzPAQ1 L@3F l?F OqP dj|aW{,8cUFwW?F0ev"?F%Ti ?FUѣoO 0Yig߻ull4_aj??N~f0co+ fEeC.kn?FtdFِ?Fd?F?#l/Оol}dQ3ll BXl귺m|bBhaUeAvؼg`r4g?FÇ?F6|vflPI3qoikeðl1` |QT?heFnFc^?Fq`h leowkyyftGg˂a,8b1;MHD: )&# ;h0>Th]]9 PB IAUFd    PEJƱZVJp!?&. ?Az@b_bbe!Y(݉e 6 ,p )l l w+ g?Z-#U!׷!ze Ae [+j/|//-g#+ s"?(a40 9 .*2!#{0:#t8'tϻ:Ua L2$18K;F?VEK.; t>B4AA0`?CopyrigTt (c)@2`09@M@c@oKs@fBAr@Qa@i@n.@ @l/P Hs3RePe@vEPd'P2hb<M#l1# M/P?]UhzAQ1 L@3Fy*,[ܶ?F5]P dj}ah8cUFąiBQ?Fۨ.?Fao#ѻoO 0YiM+Fll_Zaj?}0%|f0c˭ fEem<_څ?Fb?F+}?F?oolGGTll .Xlhe{|kww@haUec(x,`H^ǀ?Fw?F234 flux1qoiFA(]l |~Th]]9 PB IAUFd    PEJƱZVJp!?&. ?Az@b_bbe!Y(݉e 6 ,p )l l w+ g?Z-#U!׷!ze Ae [+j/|//-g#+ s"?(a40 9 .*2!#{0:#t8'tϻ:Ua L2$18K;F?VEK.; t>B4AA0`?CopyrigTt (c)@2`09@M@c@oKs@fBAr@Qa@i@n.@ @l/P Hs3RePe@vEPd'P2hb<M#l1# M/P?]UhzAQ1 L@3Fy*,[ܶ?F5]P dj}ah8cUFąiBQ?Fۨ.?Fao#ѻoO 0YiM+Fll_Zaj?}0%|f0c˭ fEem<_څ?Fb?F+}?F?oolGGTll .Xlhe{|kww@haUec(x,`H^ǀ?Fw?F>34 flux1qoiFA(]l |~Th]]9 PB IAUF;L?]?F]hk?FXN׶?F`V?P t`  ??tQ0I9xE "˪Zu Mauo8 >    PEJڍU[\/(?&. ?AbbY!z@Ae bbCt c(a4b  b(h#{ h#΍t(tI*#0f88vb.#i L5<1v(&$7/(D%"6/5;0( t.*B#11`?CopyrigTt] c).@2`W09.@M&@c$@os@f,BAr @YAa,@i@n}..@ Alt@U $HsxBeL@e$@v@dl@"h<M#l# Mt@?]UhzAA!J@#Fy*,[ܦ?FVTP dj}תa4(SUFąi?F6^G?F/?F?oO 0YM+Fӱ\*aj,.}a$?# XEUYu?F #?Ft Ǜjm?FRtolTBJ\LMYBltavXlp@XEUg6PK|$yp?F`1А?FAD3 o~l/AD0qYJg ַBl*$ԞXlFB:XU^Fi?FfaEHRl_[Ei1GAf`dSq+a,(bvHD: )&# ;h0>Th]]9 PB IAUF;L?]?Fԁ_IRE?FXN׶?FgaV?P t`  ??tnG9xE"˪Zu Mauo8>    PEJڍU[\/(?&. ?AbbY!z@Ae bbCt c(a4b  # b(h#{ h#΍t(tI*#0f88vb.#i L1<1v(&$7/(D%"6/5;0( t.*B#11`?CopyrigTt] c).@2`W09.@M&@c$@os@f,BAr @YAa,@i@n}..@ Alt@U $HsxBeL@e$@v@dl@"h<M#l# Mt@?]UhzAA!J@#Fy*,[ܦ?FVTP dj}תa4(SUFąi?F^G?F/?F?oO 0YM+Fӱ\*aj ,.}a?# XEUYu?F#?Fm?F*m{olTBJ\MYBlU{XljDc@XEUg6PK|$yp?F1А?FD3 o~l/AD0qYf ַBlq$ԞXl.B:XU^Fk?FaEHRl_[Ei1GAf`dSq+a,(bvHD: )&# T=h0>Th]]9 Q BUFa)DbJA]^'? ?Az@b ( 6f  )ϓb= + _? ' 88o?z A s +=  /)/;/M$[+["-j!Q U1[!x(ar4 ) "'}# }#BtΜQ8t_:Ba~ LH2!B8;N;6B?5;.N; tt>8VAVA `?CopyrigTt (c)@2`09@M@c@oKs}@fB|Ar@Aa@i}@n.@ 0l@ HsBe@e@v@d@2hb<M#l1# M@?4>UhuQuQPtA@Qj LZF>7ܾ 7j bL\2] 6R1bMB: ZWdc Ɵmdb`GeGEWe2T|`I\aibvIGeU@OCJQTh]]9 Q BUFq \?FWY۷?F \?F0;?P Bt`  4fտݛ?t? 2N (+Xۙʿ-srVBu acuo77AJA]^'? ?Az@b ( 6f  )ϓb= + _? ' 88o?z A s +=  /)/;/M$[+["-j!Q U1[!x(ar4 )] "'}#{t }#BtNQ8t_:Ba~ PLH2!B8;N;Q6B?5;N; tt>8VAVA `?CopyrigTt (c)@2`09@M@c@o%s}@fB|Ar@Aua@i}@n.@U 0l@ HsBUe@e@v@d@ 2h<M#l# MT@?4>UhuQuQtA(@QH1j LZF>7gܾ 7j bL\G6] 6S-fMB: ZWdc mdb`GeGEWe2T|`I\ai?bvIGeUWeOC9ri_xu j aGeWe_ooh2K]u3rA 4s2HD: )&# T=h0>Th]]9 Q BUF&?FUa|AY?F \?F0;?P Bt`  \vJA]^'? ?Az@b ( 6f  )ϓb= + _? ' 88o?z A s +=  /)/;/M$[+["-j!fQ U1x(a4 ) "'}#{ }#BtΜQ8t_:Ba~ LH2!B8;N;6B?5;.N; tt>8VAVA `?CopyrigTt (c)@2`09@M@c@oKs}@fB|Ar@Aa@i}@n.@ 0l@ HsBe@e@v@d@2hb<M#l1# M@?4>UhuQuQPtA@QH1j LZFr7ܾ 7j bL/\]M 6-fMB: ZWdc ?mdb`GeGEWeA?2T|`I\aiavIGeUWeOC9ri_xu j aGe@We_ooh2Kt]u3rA 4s2HD: )&# T=h0>Th]]9 Q BUFR ?F(Zп?F&?F0;?P Bt`  U'?t)S Pʿ-srVBu auo7>JA]^'? ?Az@b ( 6f  )ϓb= + _? ' 88o?z A s +=  /)/;/M$[+["-j!Q U1[!x(ar4 )] "'}#{t }#BtNQ8t_:Ba~ PLH2!B8;N;Q6B?5;N; tt>8VAVA `?CopyrigTt (c)@2`09@M@c@o%s}@fB|Ar@Aua@i}@n.@U 0l@ HsBUe@e@v@d@ 2h<M#l# MT@?4>UhuQuQtA(@QH1j LZF>7gܾ 7j bL\] 6-fMB:D ZWdc 2Nodb`GeGEWe2T|`I\aibvIGeUWe-B9riu j aGeWe _ooh2K]u:3rA 4s2HD: )&# ;h0>Th]]9 PB IAUF(*?F?+8?F-?FP덋?P t`  g ?tr㯰4\ls,Zu Mauo8>  ]  PEJ]_DRb(?&. ?Az@bY(ne 6 h )b k+ ?Z'e 88?z6e Ae [+ h g/y//$# L:5, /sUK1*ap4 9] 2(#{t #tN8tϥ: Ua BL218KE;6?@EK]; tͺ>B 4AA0`?CopyrwigTt c)@2`09@M@c@o%s@fBAr@Aua@i@n.@U 0lP HsRUe@e@v/PdP 2h<M#l# MTP? <>Uh$QQ]AQ1 LZ3?ZkP 7jbb;'ߨL7a 6{fM8cUF*eE(aFr+}G`_=m?F7T҄xo`Oi^jva~t0sK`+t!kĀ9t!KU:M} -'sLl}]|]r9xLuf.Lp?F4gHvodObdcZgt_=,V~#|..7 ;vLuN?Fbݲt{?FNch|[m?&1 zoUh+#|?n1%*!ayTh]]9 PB IAUF/vt?FxN7?F-?FP덋?P t`  q?t} ܯ` \ls,Zu Mauo8>  ]  PEJ]_DRb(?&. ?Az@bY(ne 6 h )b k+ ?Z'e 88?z6e Ae [+ h g/y//$# L:5, /sUK1*ar4 9] 2(#{t #tN8tϥ: Ua BL218KE;6?@EK]; tͺ>B 4AA0`?CopyrwigTt c)@2`09@M@c@o%s@fBAr@Aua@i@n.@U 0lP HsRUe@e@v/PdP 2h<M#l# MTP? <>Uh$QQ]AQ1 LZ3?ZkP 7jbb;'ߨL7a 6{fM8(a?*F?F}ޑ`oO 0dTc I\rdcd bjӯokga>?>eEef?+}G?F_=m?FSxo`OivddlvatG0sK`%tkĀѡ3t!KU:G} -'sLl}]?|]r3xn.Lp?F4gHpooZgt_=,V|..7 5veN?Fbݲt{?FNcb|[m&1 zoUh+|n1%$!ay Th]]9 PB IAUF & W?Fq<9?F-?FP덋?P t`  q -?tv3m\ls,Zu Mauo8>  ]  PEJ]_DRb(?&. ?Az@bY(ne 6 h )b k+ ?Z'e 88?z6e Ae [+ h g/y//$# L:5, /sUK1*ar4 9] 2(#{8 #t8'tϥ:Ua L218K;6?@EK.; tͺ>B4AA0`?CopyrigTt c)@2`09@M@c@os@fBAr@Aa@i@n.@ 0lP HsRe@e@v/PdP2hX<M#l # MP? <>Uh$@QQ]AAQ1 LZ3˟ZkP 7]jbb;'ߨ7a 6{fM8cUF*r|p蠼F?F}`oO 0dTc ?I\addǼbjokgaz>eE(aFr+}G?FC_=m?FS҄xo`Oi^jvϾat0sK`+tkĀ9t!KU:M} -'sLl}]|]r9xLuf.Lp?F4gHvodObdcZgt_=,V#|..7 ;vLuN?Fbݲt{?FNch|[m&1 zoUh+#|n1%*!ayTh]]9 PB IAUFt5?FYz?F-?FP덋?P t`  @"?tgQ,n\ls,Zu Mauo8dAd  ]  PEJ]_DRb(?&. ?Az@bY(ne 6 h )b k+ ?Z'e 88?z6e Ae [+ h g/y//$# L:5, /sUK1*ar4 9] 2(#{t #tN8tϥ: Ua BL218KE;6?@EK]; tͺ>B 4AA0`?CopyrwigTt c)@2`09@M@c@o%s@fBAr@Aua@i@n.@U 0lP HsRUe@e@v/PdP 2h<M#l# MTP? <>Uh$QQ]AQ1 LZ3?ZkP 7jbb;'ߨL7da 6{fM8(a?#FT ?F1uRؑ`oO 0j?rdcd bj]ea }eEef?+}G?F~xW m?Fxo`Oivddlva~ttqE`%t:܃3t!KU:G} -'sLl}]|]r3xn2M#p?FcGpooZgt9L|8 5veN?Fl"Ry?F-#Fkb|[m&1 z'ǵ(|F$0$!ayTh]]9 Q BUFa)DbXX +Xۙ ^MVBu auo7>JTA0f88v?WYϳAjLv@z bbb!' 6Y % )! ! ,+ <?' o?z #a47% @& J"#K#({% (+ (G t#Beı?A>L ?A"&"&$/*8%,"t:"t>  A A6 `?CopyrigTt _(c)D@2`W09D@M<@c:@os4@fBB3Ar6@oAaB@i4@n}.D@ Al@U :HsBeb@e:@v@d@2h/1<M#l#@7? M@?(>Uh7E P+AA!{ L:FQ덋<6j% bRzg9<2 "\ ZWdS ?mT W5U5k0YaY?vq 2HD: )&# T=h0>Th]]9 Q BUFq \?F8˻?F \?FB2T|`I?P Bt`  4fտݛ?t.QW+Xۙ]MVBu acuo7hAJTA0f88v?WYϳAjLv@z bbb!' 6Y % )! ! ,+ <?' o?z #a47% =) J"#K#({% (+ (G t#Beı?A>L ?A"&"&$/*8%,"t:"t>  A A6 `?CopyrigTt _(c)D@2`W09D@M<@c:@os4@fBB3Ar6@oAaB@i4@n}.D@ Al@U :HsBeb@e:@v@d@2h/1<M#l#@7? M@?(>Uh7E P+AA!{ L:FP덋<6j% bRzg9<2 "\ ZWdS ?mT W5U4k0YaY?vq 2HD: )&# T=h0>Th]]9 Q BUF&?F 77 \?FB2T|`I?P Bt`  \vJTA0f88v?WYALv@z bbb!'n 6 % )! ! ,+ <?' o?z #a47% =) J""#K#{% (+ ( tQ#Bı?A> ?A"&"$&$/*%,"t:"t> A A6 `?CopyrigTt (c)D@2`09D@M<@c:@o%s4@fBB3Ar6@oAuaB@i4@n.D@_ Al@ :HUsBeb@e:@v@-d@2`h/1<M#l1#7? PM@?(>Uh7E +AA!{ L:F|po蠼6j% bT  ü2 "\ ZWdS mT W5U4k0YaYvq 2HD: )&# T=h0>Th]]9 Q BUFR ?FB?F&?FB2T|`I?P Bt`  U'?tǡ P]MVBu acuo7AJTA0f88v?WYϳAjLv@z bbb!' 6Y % )! ! ,+ <?' o?z #a47% =) J"#K#({% (+ (G t#Beı?A>L ?A"&"&$/*8%,"t:"t>  A A6 `?CopyrigTt _(c)D@2`W09D@M<@c:@os4@fBB3Ar6@oAaB@i4@n}.D@ Al@U :HsBeb@e:@v@d@2h/1<M#l#@7? M@?(>Uh7E P+AA!{ L:FP덋<6j% bRzg9<2 "\D ZWdS ?2NoT W5U4k0YaY?vq 2UHT5D B &# T>h0JTtiiM ENUF =?F>?FumK?F_u?P WNtQ` Kl7?tUo>TX*F(бoNuj mu){E0f88wv?AVQ Ӎ!@#b#m@b  0#  #W(3 (o+&T" #t#NĽ/?. ?M"&$a%$$,/>*L%i,T"tΞ*T"tR.R11#`?Copyrig`t (c)02l090M0c0os0f21r01a0i0n.0 Al@ 8sBe0e0v0@d@"Vt!X#xB #/ @T,UtE1UA-!@:2T|`I pvm?S׮(%U;jX[5G@Y9"S\FkV5%U#ɯ7ܾ=\WQS\O?XbiTQY%_7_I_[_DQOOOO__Ԫ?FH փBl[{Xl?G漫i_)fV u``9`-YBlO N}0,*x]@$?F>BlǏXlwg%ooo EU}ooooooM5#?F߳Y`;Blr*XlYmC*)fWBǽ?F#nZBl>^̔ԍ?f Q́)f?aD%?F qwJBlIY:6XlIb ʏ܏EABTfxF) \Ck?-q_Wa1Cv(Ԅ~Bl!H,BXQnB)fF01DBoT[inJ\ۙgyAHD: )&# T=h0>Th]]9 Q BUFKpJAS`? ?Az@bbb! ( 6 )  '+ ߈?/I-Z#mg!z A " ././@/R+,g/y'sU1,ar4 )] "#{t #BtNg8tu:Ba PL^2!X8;d;Q6X?E;d; t͊>8lAlA `?CopyrigTt (c)@2`09@M@c@o%s@fBAr@Aua@i@n.@U 0l@ HsBUe@e@v@d@ 2h<M#l# MT@?4>UhQQA(VQ^1| LZFg 7j b2ɶGOR] 6SCfMB:1ۑdc $I$I܍+d 2aa]EmeL.la'iL?.]eU@5/LN?FOOka,dHޓgmeoo(oh2gyu3rA 34s2HD: )&# T=h0>Th]]9 Q BUFܛ?FYQt?F#P3?F f7?P Bt`  _?t+%`' VBu auo7>JAS`? ?Az@bbb! ( 6 )  '+ ߈?/I-Z#mg!z A " ././@/R+,g/y'sU1,ar4 )] "#{t #BtNg8tu:Ba PL^2!X8;d;Q6X?E;d; t͊>8lAlA `?CopyrigTt (c)@2`09@M@c@o%s@fBAr@Aua@i@n.@U 0l@ HsBUe@e@v@d@ 2h<M#l# MT@?4>UhQQA(VQ^1| LZFg 7j b2ɶO] 6CfMB:1ۑdc $I$I܍+d 2aa]Eme^L.la'i.]eU@-7/LN?F$_fq.Th]]9 Q BUFn?F_S?Fi$?Fo2ͯS3»?P Bt`  -1{?t8b ?VBu auo7>JAUVJp? ?Awz@bbb! ( " '+ ' qq?z A 9 +/.&68))E! '+ g?W-sU"!n*a4 8) "u#{ u#D!t?8tM:Bav L62?!08;<;60?5u;<; tb>){8DADA1 `?CopyrigTt (c){@]2`09{@Ms@]cq@osk@fyBRjArm@Aay@ik@Wn.{@ 0l@U qHsBe@eq@v@d@n2h<M#l#A M@? Uh,cQcQ] '1bA.Q61b L`?g{Op dj? |m cuۡ'1 6!1fM{":F7Q덋<6d kS=ѣ<n2{!5E[nc^ vogaKeE[eV_@cbc g@]N4o r\Dž}iNuPna?FTPf|]tElF<2hQp_m痪 |"*hQiyl:qid5hQJ_noDhU23rAg 4n2HD: )&# T=h0>Th]]9 Q BUFD7n?FWw?Fi$?Fo2ͯS3»?P Bt`  ;#p!5?tR?9E ?VBu auo7>JAUVJp? ?Awz@bbb! ( " '+ ' qq?z A 9 +/.&68))E! '+ g?W-sU"!n*a4 8) "u#{ u#D!t?8tM:Bav L62?!08;<;60?5u;<; tb>){8DADA1 `?CopyrigTt (c){@]2`09{@Ms@]cq@osk@fyBRjArm@Aay@ik@Wn.{@ 0l@U qHsBe@eq@v@d@n2h<M#l#A M@? Uh,cQcQ] '1bA.Q61b L`?g{Op dj? |m cuۡ'1 6!1fM{":F7Q덋<6d kS=ѣ<n2{!5E[nc^ vogaKeE[eV_@cbc g@]N4o r\Dž}iNuPna?FTPf|]tElF<2hQp_m痪 |"*hQiyl:qid5hQJ_noDhU23rAg 4n2HD: )&# T=h0>Th]]9 Q BUF}h!?F0,?FTZLP?F Qû?P Bt`   Mǿ?t `,1fǰvz;ZBu ,dcuo7oAJAa^!? ?Az@bbb! ( 6 )  '+ g~X?I-Z#!g!z UA " ,/,/>/P-[+g/y'U1Ȋ,a4 u) "!#{ #Bt9g8tu:Ba BL^2!X8;Ed;6X?E;]d; t͊> 8lAlA `?CopyrigTt _(c)@2`W09@M@c@os@fBArԕ@Aa@i@nU.@ 0l@ HUsBe@e@v@-d@2h<M#bl#P M@?8>UhoQQ]AVQ^1| L@%fq.?F?c^ dj ٨la6ztB:'3S3 7d /kyԷ&Dh]EWe?g~kp+ɉ 5d}bGeUWeFTol0juMDhn1ۑ0j`Dhn.R덋<o+o"Th]]9 Q BUF}?Fb-P?FTZLP?F Qû?P Bt`  R딿i?tT`,1fǰvz;ZVBu auo7!>JAa?^? ?Az@bbb! ( 6 )  '+ g~X?I-Z#!g!z UA " ,/,/>/P-[+g/y'U1Ȋ,a4 u) "!#{ #Bt9g8tu:Ba BL^2!X8;Ed;6X?E;]d; t͊> 8lAlA `?CopyrigTt _(c)@2`W09@M@c@os@fBArԕ@Aa@i@nU.@ 0l@ HUsBe@e@v@-d@2h<M#bl#P M@?8>UhoQQ]AVQ^1| L@%fq.?F?c^ dj ٨la6ztB:'3S3 7d /kyԷ&Dh]EWe?g~kp+ɉ 5d}bGeUWeFTol0juMDhn1ۑ0j`Dhf e9o-ai qGeUoo$o6o3UHLD )&# R>h 0JTlaaM>UF .?F@=~9?F î>?FesY?P >tA`  A,:?t _VČ= W:`>u eu!s7"JJ0U?HA9 @tt7 b2:*H&tH  th*N) ?MB"b";)H&e8H  3 H -US4oTZLP?FK+0j6֏@!QpoĬ?> گ ݷeo?gց́ F?i?;0~1G}}϶iM@mp?#nC%fq.?F)Ouy\l2_+ʦmx~E}@p5"iQ덋Рoe*al|ٍşןHD: )&# ;h0>Th]]9 PB IAUF>'?FЖ''?F {J?F%?P t`  V k?t˟R{ Li@Zu Mauo8#>    PEJƱZVJp!?&. ?Az@b_bbe!Y(݉e 6 ,p )l l w+ g?Z-#U!׷!ze Ae [+j/|//-g#+ s"?(a40 9 .*2!#{0:#t8'tϻ:Ua L2$18K;F?VEK.; t>B4AA0`?CopyrigTt (c)@2`09@M@c@oKs@fBAr@Qa@i@n.@ @l/P Hs3RePe@vEPd'P2hb<M#l1# M/P?]UhzAQ1 L@3F l?FƟ qP dj|aW{ޓ8cUFwW?F0ev"?F%Ti ?FGUoO 0Yifull4ajT?N~f0ci+ fEeC.kn?FtdFِ?Fd?F#l/Оol}dQ3ll BXl귺m|؇bBhaUeAvؼgԵ`r4g?FÇ?Fϓ|vflPIϮ3qoijeðl4` |7T?heFnFc^?F]`h leowkyyftGg˂a,8b1;MHD: )&# ;h0>Th]]9 PB IAUF>'?FtQ>c?F {J?F%?P t`  V k?t^i{ Li@Zu Mauo8$>    PEJƱZVJp!?&. ?Az@b_bbe!Y(݉e 6 ,p )l l w+ g?Z-#U!׷!ze Ae [+j/|//-g#+ s"?(a40 9 .*2!#{0:#t8'tϻ:Ua L2$18K;F?VEK.; t>B4AA0`?CopyrigTt (c)@2`09@M@c@oKs@fBAr@Qa@i@n.@ @l/P Hs3RePe@vEPd'P2hb<M#l1# M/P?]UhzAQ1 L@3F l?FƟ qP dj|a{ޓ8cUFwW?F0ev"?F{%Ti ?FGUoO 0Yifull4ajM?N~f0c+ fEeC.kn?FtdFِ?FBd?F#l/Оol}dQ3ll BXlF귺m|bBhaUeAvؼgԵ`弄r4g]pÇ?F*|vflPI3qoiGkeðl2` |HT?heFnFc^?F`h leowkyftGg˂a,8b1;MHD: )&# T=h0>Th]]9 Q BUF\.?FW?Fb_-2;w?P Bt`  t0zեK~LQGVBu auo7%>J0U?R<AL@tt7P }b6*D&tD  tTd*Bı) ?\A>"b7)D&a4D  3 rD 8/td/ty. x1x1`?CopyrigTt _(c)02`W090M0c0os0f21rԡ01a0i0n}.0 Al0U 8s2e0e0v @d0"h!<M#l#/Ml> @?UhLAA] 1 1"71bAV>!J`Ƭ! Jdjbq:z׍i Q 6VMB@˿acP\`_m;E,Ui5U 5l?FQ[dg[VU'EU˖7s?F'YE]fdg[pdѣSYUfe3_GN]݅om{ZcXUFBGP\g[)J`UAp3C#5 IXAYT ?F_jy}tg[3$sXApsf ElX1Yfy?FPP\pg[!XAYw 䘷?F~_uO]{DŽ~g[&SAY-]?Fr]0P\g[ip~ X1#"5j-? XAY ' ?F@G_|P\̔g[?P=RwX'RX__/?Fv㗂P\#g[?pډlX7UB7Ժ ?vQ[Ezg[wU|QY1GN~V7;P\4Ѥl"MX U29_KU#rA0$@CHDJ  # =h4>T#]A B#BUFF?F|ل?F?F_U[?PBt`  t} :?tӕ##L xZZBu eu!s7[&>V Jߵ?0 ?Ab "z@A $)55 l H#b"#]! #BtΜz(tψ*Bh<M#h B`"ݵ0qql?E LUL1;?k(by$<$@7F/X(a4j$"'6k/5;w+ tE͝.%p! A A`?CopyrigTt (c])C@20O@9C@uM;@c9@os3@IfAB2Ar5@nAaA@i3@n.C@ WAl@ 9HsBUea@e9@v@d@b$JM@?0>Uh#5= ! 5*A3QA]!J!FEÍ hng[GʑB  6SVMB@j]9_^fU3QY>h_"[} [U%7nPobogU 52j_|Uq"ErA Z3s#HDJ # =h4>T ZA B|qAUFi$z?Ff,?F.@?FCCZYݥ?PBt`  zԜ?tƿSᆟ J kBu5 eu!sL'>D jJBU C"?0Z& ?Ab"z@A $ʎ)55 l #b "#! #BtΜ(t:Bh<M#h=# B`!2=#0qqlZ#Q LU1?@(!*7/(*"6/(E1K.+ t>Q5h1AA`?CopyrigTt (c)@20@9@M@c@oKs@fBAr@Aa@i@n.@ AlP HsRe@e@vRPd@v4&=#JMP?0>UhO3uE= !5AQ?Q!Jt1EÍT| hne887  6.fMB^;>?F>,Se?F1Ez ?P~-< kߎ:\zzl knJz۰fL:i 54a}:?  `? D"H$?n:a@Bowr @H'c(T@TFnXJ\kǓ1HeY5XevTݤ?Fc. ?Pfz`_vNST#]DA B#BUFik?F?Fa'D~?F?PBt`  ;#?t_Q$_[G ¾ Bu eu!s7(> Jߵa? ?Ab "z@A $)55 ml H#/b"#]! #Bt9z(tψ*Bh<M;#hu BI`"ݵ0qql? jLUL1w?k(by$<$7F/X(a4j$"'6k/5;w+ t͝.%! A A`?CopyrigTt (c)C@20O@9C@M;@c9@os3@fAB2Ar5@nAaA@i3@n.C@ Al@ 9HsBea@e9@v@dԁ@$&@JM@?0>Uh#5= !@*A3QA]!J!F? hngZ/S/E@M 6VMB@j]9_]gU3QY!w_"[87U%7n PobogU 52j_|Uq"rA Z3s#HD # =h#4>T ZA B|5AUF;|jd?FQn?F&TXdJ?F2gݸ?PBt`  ؽ?tѿ벏9   kWBu5 eu!s.)>D  JBU;"?& ?AbI"z@AU J$R)55J l #beX"X#!J :X#Bt('t*Bhb<M#h#. B`"酑#0qql?!QY LU1?(b$x$7/(4$"c6/5u;+ t.5,1HAHA`?CopyrigTt (c)@W20@9@Mw@]cu@oso@f}BRnArq@Aa}@io@n.@ AUl@ uHsBe@eu@v@d@:4&#YJM(@?0>Uh39E= !fAhoQQ!J81l?F9hnyye7@6SMB@TH0pp0@ Y9%[Zaz eoQioe[:4ևzZIa e5eF/14oTi&qQB8^FqcG?FBP?P :Y@ "rA :03fsgn^KDelr:i n0SpȨ? P( ? UVj?na@Bw-rx @SHD # =h4>TZDA B|qAUFG_?FMOff?Fl?F5Gú?PWBt`  #HEw?t[J |B gzRBuj eu!sL*> @JBU C"a?Z& ?Ab"z@A $)55 ml #/b "#! #Bt9(t:Bh<M;#hu=# BI`!2=#0qqlZ# LU1?(!*7/(D*"6/(E1K]+ t>Q5h1AA`?CopyrigTt (c)@2U0@9@M@c@os@fBArԭ@Aa@i@n}.@ AlPU HsRe@e@vPd@v4&=#JMP? 0>UhO3uE= !AQ?Q!Jt1| hne]Lg137 M 6.fMBFwݡ?F1?P1/`( KP\kEen8Kΰem:i 9`/]R? i4F? +?r?na@BoZwr @'c(T@T>.)qB?F48t\B"SOxlM_HeY5XeV`f΂7?Fwڽ胔?Pw>&T{¿ ?'srmooeN lnD\YgLE~]Z?)tTwd~&'q52_U"r"A v036w3HD # =h 4>TZ Ay IAUF3d?FM?FXxA?F4J^z?PQd93@>WBt`  M|Vt7?t# | .B9jHߜ? Vu*Z?3bLKj"k^Bue u!$+>   JBUB43f"<?2&P?Ab]$z@Ai b3bbff)55b ml #/b"l"l#! l#Bt9(t*񩧉#K B`"#0qq\2#m LU.m1?(&$H7/(*"H6Ի/5;+ t.yh<M#h %1\A\A`?CopyrigTt (c)@2U0@9@M@c@os@fBArԅ@Aa@i@n}.@ Al@U HsBe@e@v@d@4&#mJM@?UhMEI;nIzA#Q!J#6F>x@=&}?F&`?F5zT MST2N`R uܡgrf'QHDJ  # =h4>T#ZA B| AUFn?FZF?FXt?F5Gݮ?PBt`  g?t80ɹ gzkRBu5 eu!sn,>U !JBU;('? ?Ab!"z@A- " 0#e55" # Q#6!b>#0(<&Bst(tϑ*"Bh<vM#h B`"0qqEl1 LUU1?0/B'7O/a/s$"06t/`5;.+ tͦ.%!AA`?CopyrigTt (c)L@20X@9L@MD@cB@oKs<@fJB;Ar>@wAaJ@i<@n.L@ Al@ BHsBej@eB@vR@d@4&&JM@?!Uh!@!9 | 3AAc1J18$FF?F˨O hn؂-؂e/tӉ7R6MB@T21uG*\\VU5"QXޒI9YSD|a\Bg%UF?htR\gYl(Ah5UNl?F[ ݴ\*aUEU-?Fۻ\P-\Kp}AhU+;>?F! ɬ\q\g:,AhU>זtY?FUCZY\/\iCy 5AhTQY{'?FAuJ\l\jGוAdUX_SFКi?CF%ds 0c2rAb4:gn0 L;jHD # =h 8>T ZUy AUFEM&,?FX?FEtT?Fy_-{?PBt`  ڹ2Ԇd?t#P.Øz u_>$ |MBu liu%wM-> # ""%"%"%"$%"-JBBU(`Jyɹ#G 4?&6 ?ABbA@L2WbT459 .a0 ]a0UL1R5U8a6Bt 8t϶:#0qql&3V0L U#A?U?g77t??42Q6?.EK; t>h0 1<M#h^@%!QQ`?CopyrigTtk(c)k20UP9kMAPc?PoKs9PfGR8Qr;PtQaGPi9Pn.k AlP ?XsRegPe?PvRPdP$&$A/#ZMP?0>UhCaa9 #0Q#!L1J!9t?F9lrif7&6&MBCʯ?F{@a?F"[?PM Q ƲOH0i!lѹ3BDirT:i1%u51:m gOp? 0dq? ##Y?r4-ABhwr>#j0@cC(e#iJ"tS?FowO8?F*&خ?P ݿ Z _l!}keaɨ4i2{VtS7Sd|#- cq}tkwK>tɏw\#~eiF'2L?F靕I?FfʶݲDb tS!}gi(@Pzpzd|;٧VzwYiY"}a؏twz#~eAewnF][ߚ~`l<5C)` [-TH0L3rA0DģRaTvd|Hzwpg-JPU4 s!w#~'HD: ) # T=h4>+TZA B|AUFɡo?FD7%?F;+g3P?FPa=!u?PBt`  ~ۿ?tҽJI6 ~&8Bu eu!s.> ! ""!"!"!"P$!" -JBU "?& ?Ab"z@A 0T$955 l 83b 2L 3M1 3Btj8tDx:Bh<M#h#% B`2ݵ#0qNjql# 0jLU51AA`?CopyrigTt (c)3P2U0?P93PM+Pc)Pos#Pf1R"Qr%P^Qa1Pi#Pn}.3P AlyPU )Xs}ReQPe)PvPdqP4&# ZMyP? 0>Uh3E= 5QQM1J1?1!9hn}Ke71R"6MBaf$/ſ Dn/{?PT nN tom2qenלf_E.I:i FDOL?PWC  ?Q 038|!®Ú>O{b 0dw8pYDh[mÏsԟ@wv#~e5mF!u?F+#?Pҿ QC~ra2rbA 0JC:zLYON|n$dw0 kt?>|s w #~'UHPuD R # >hj 4JTb M#5MUFUk ?F[&?F")?F{(%˝ͭ?HNtQ`  y!?tA_?ѭ Σ(|rɛNu mu){.6>/J Q !JNU "?&& f?MbQ"z@QA] !R$Z)5=R t #b 2`"`#!R `#Nt(t* # N`" #0qql&#Qa Q Ua1?@(k!z*7/(*"<6/5;.+ t.pH%# p %1PAPA#`?Copyrig\t (c)@20@9@M@c}@osw@fBvAry@Aa@iw@n.@ Al@ }HsBe*@e}@v@d@45& #aJ(%@0JUp7C5I %nA9  Q!1AuJɤH pv1m˳=7 &6V"N@T3D?F'2L\5eMYSk\QeU$ndG?oQmjf E$eoxolb%2_U"rMen4Js38QQ@_R_d_v___VFBkN\wY{a^ l o}?>h 9rO#,hozf-b?F [4b\#aoܚ;yӏ aꖄ(~Fatk?F@]% a;G!Dx 0Ry@ov{ZmE_K:q ;w c? lZV? Qt:?v:i@B柴 r @S8QA ,sw %]o oofW h2A%mWvbP,?F˴mԠr\ߦ)k\{Xi(vCyRyF"HDJ  # =h 4>TZ y qAUF}!v~?F>!?F7}}?F3 _?u?PBt`  fno?t# W@O림 UBu eut!sL0># |HBU\`J=#G k$?& ?ABbA@"Wb$55 l #!%(&Bt8tI:y=#0qql# LU1?/'7//$ 2QZ6/5;; t'>h0h!<M#h0,%C!nAnA`?CopyrigTt (c)@20@9@M@c.@os@fBAr@Aa@i@n.@ Al@ HsBe@eJ@vPd@Q$ &=#JM@?8>Uhs0E= # Ah#)Q!JO!FU'vC hngQX\A7@6SMB@Trt3ϯ?V[GzaiWR}nwݷ?F6/?FgȝG?F*> ߂?Pqe> d.\SQxlV_gD|enX}el`:i :`Kts` L&d2? )jO?na@B>Pbwr @c(:eFif[θoo $::e4%JeǤ@_"k(\%JeVfY2DAoYnia<5^F}=4?Fm?P 63 J; #rA4 ze<120"yZL8t+T Z >|AUFN~?F8?FOa=!Pt`  Ie%8?t#ŠA |&8/$HMu liu%w1&>   P""%"%"@%"$%"-Jh<M#Nh#u`%Ĺ$?%> ?AbA@S2b[409 h0 ! }3 h0\8h6st 8tϸ:"%!11`?CopyrigTtk(c)k204@9kM @c.@os@f&BAr@SAa&@i@n.k Aln@ HsrBeF@eJ@v@df@$B#0UB-3]0 M2t =tS0=tD7=F?_5;sth0= t> \#-?_MVD?lF_LB#__Bl>x0@?UhL*a*a9 Az1 1!S17!A$!a7J!\;>1?O0lrvoieq7a 6&vMD@dd.H?Fdz3l\xEn;|7xO;R(PtvN?F^Mm?F3?Pfw ? ON1iM|BBy/MirFiiB:~m tu]{qnY? /b.? N&d2?r4^1BЃDVhr>#5^1 rC(@u5Pui?T!k? ph yH5w5u{jN?F_u=p@_?FdAT?PT '&Y}N\J}q y߅Q<ϵhsq(=Sg$>c(ôewϟ៣\#@uEPuM[T?F7glοuҰ|13@uvUPuPt?F߹Zj#mIhq@uPuKĥE2YǾluƔݨPu%bb?Fi$ l޿\+|?p= ףب1ewFB} EzG4uyF-{(%˝?Fr?Fߞ a;}{(\U販%զ;ѼIzϣz#೎@uaLyrt3ϯ?F;lB8|Ԧè1Ly7}}?FqXJlM|szĔx!y߶6u?*BQ?FZ[θ?Ppϐ Bеlj=*he~~|k%zµ;N7#*?6ԉܲy#'DqS1Sp>hlDr}ʨ7+U[CU @u!Lyf ??@VŲoK Ѩe2oeS3rYa0a4QR#iDYaVbT!P5Qih "8XW.'DŗV %!H4q1;  !j^du<"81hT_r!3! Re[>c&kbd1<"8vQU#`a51 HD # =h4>TZDA B|IAUFA>?F+u-?Fe烇?FL"4?PWBt`  wcZw?t4!A׳Yۮ 讚Buj eu!s82>I (@JBU"?2& f?Ab]"z@QAi ^$f)55^ l #b2l"l#!^ l#Bt(t*Bh<M#hS# B`"#0q/ql2#m LU1?(w!*H7/(*"w6Ի/E K+ tE.)5p@1\A\A`?CopyrigTt (c])@20@9@uM@c@os@IfBAr@Aa@i@n.@ WAl@ HsBUe@e@v@d@jN4&#mJM@?0>Uh'3ME= !zAQQ!JL1n@ez?F)hn Q*e7i@ 6f MB]@gS sS _]RfnfYl [aT@~TНJ9 9oi@?qQoa` e150efUFSo!fo^iMeon]nY5_,T "rA N03.O3HD # =h4>TZDA B|qAUFA>?F+u-?FAuJ?F\@̣?PWBt`  wcZw?t4!A׳%f. 5àBuj eu!sL3> @JBU;C"AxQ5h1AA`?CopyrigTt (c)@20@9@M@c.@os@fBAr@Aa@i@n.@ AlP HsRe@eJ@vPd@v4 &=#JMP?0>UhO3uE= !A4Q?Q!Jt1M$;4?F[hnRe7i  6.f MBVhgZK]2 @v&_mfnfh1:i PʬaBv$ /˸? `0?na@aowr" @'c(Ta02"Pggk| ǃodHeY5XeҔu4 /u?Zb&s 3mIܳqfoedm\f9r C:$yTwd~&'q52_U"rA v036w3_HH3s ".6HUK;#4FxMT.# VYB մ NX@ekPo+c Ro(BG(] zo] #f(] ;k] noX Fr u zz( ~H |Gh 6 x  `8 l  Ϳ q *x  D]  7B yE( ٺϰ'q ]s &u h)v #'x y q{Ͽ ~ q s Hu Pr8w Kx 5'zп ~| X~ n5'` aWxc \Hfѿ h j n !p? F%;1UFD  h>$T 6D UuI@@?k??)3P  B B*J*AȅHy\?br  0E`Bainstom,leg nd ic n LH(i^( K%X_'0U"-!0cBeO/a/ AG M  ڭUO)UU)UʪUUW}w | _x ?1?󅈂 ??8??Xb?8?=?LDrag thuesapWonodwipe.bu*B?^+?4l)!y]贁Nk??l_uzkUGD" # h8^T (YY#UPU@?@@u*B?@^+?4l?P} ] Y X   u`h?UVY g# $V. _ _u#`bA@$Ct?.n.tY$.?U"`lgnd` _/cm@ =H$M1y#i%~R#!$"+W!`Vis_SM D2 cPm!#50\06`?Copyr gPtg(H )g2 09gM c:0o of2!r0;1a0i0n2 gAlV0 8sZ2e e0vl0d2" "-X)|# '!  3Bd!%$} i$=r$&# D66 $Z##0U_B3)7gdaOC ]KTD"Di",D3112(jCh?!KrAh? 8/Ubo #1/U!-i"Qi%9_&PtSAftQ@tRT "Q3eQ^"Q,5[,@WnsFOz)IC "(C%!h h%-06`!&> f2uj2 Ll0gl0?#.Q9/K)2X*iXQa5 =|'Lw'2rq6J!a LvZ%RbY "vseaa7m5C #HR%^ii%^i3^.i5^$iE^YiHDB # 3hj8>T! ]]9 JAU@@\+?4d?@u*B?@^P6 $JA u`bCAJ "u#`b5(.tA` t^ft/]7uL0..7>U2N?贁Nk?<.A3T7 "+&<oa){bA-3Q&!,5 JuJ\q +?'"'?\.?l8/: hP50@=Bd1d15 `?CopyrigTtk(c)k2009kM0c0os0f21r01a0i0n.k Al0 8s2e0e0v0d0-,/p3 p7> &2/4q6fDAFAF =BA/b$m# "!n"3=%l K!Z$b/ghe_y'Rɭ TA,RTCTl@ d1"0Ab&59 =9Q$W_'2r"AFX!=A0 V#LlJ aUh]E@C1#! ! T9!Jpb]e (bx(4do_nu3!~:iHqu&5!uosE2o;CrA D##8!eN0 0ymb0l2ex0sT21 0h0 0a0t[dspj0a0.`UHLD # J#h8/T >M U@@^+?4l?@u*B?@؟[?P. VJ# 0  A>uA`mbAjJ+x&x: N   ^u#`b k>te it<> 3.?"?AT@m>u`&d.W&2N贁GNk?ABa"im"+I&>#Dհg0# 9 ?^bbboz/3%"apoJT##J!#"2JN\$q0?727#?\.? pn)OBl_@%#B #AA; "`?CopyrigXto(c])o20@9ouM@c@os@IfBAr@Aa@i@n.o WAl P HsRUe@e@v!PdP#z"!-?C G6Qg@li_'>T,T"l("Tp@A"@?fQ gIXFWg/'0U^b6,1.10 f%"lJaUl~ A#1!(T#g1d2LuD 8jr"(xܯ)T?@ >20Aw »k F?rUs SSJ޾p8&t8B" ?@(mZ3X?@d*\5 +"?ywiմ|.\us8=us~Up ae i?=n.5tqQ:u=Pu{@_pPyvPEufvS.A @{3?@=m?@yj5#R>NtHOPgChNm{j{a{B4&t6P}u2&eS]ra #*z"aeSb@ PEoo t?ꟃz@Яx&3-?QɯQMLiL!Pg!Pn-P PHD: # T h8> T ]]9 # Bz1AU@@@?@u*B?@$nWs?#P6  >m u[`bKAaK    N Nu#`b+th% )'tr w]v-]-uL&$.->2N?贁Nk?<T)-J*"0+0&z#E;?#) ?A(Ӏ9,3a+lavvA.\$q"0?!7#227?\.?l.^?:WɄ!#5 Jh0@=T#115 `?CopyrigTtk(c)k20>@9kM*@c(@oKs"@f0B!Ar$@]Aa0@i"@n.k Alx@ (Hs|BeP@e(@vZ@dp@BY-?C G&$A2i b5(9 9XGW_'2r"2&X1!0 V'jO_{G/V!%3bjWkVDaWQ^>o9k__5%",@@heo]' TA,tl*"Tl@1rclkQUhm5KA#Z1*!T#X91$rqu   (8r\(4ts~ 34yB 5D M2;!srA 8#J8*bL@g@n@ SuiAtx@epHDB ##3hj8>T! ]]9 Js#U@@ ?@Uq?@$ns?#P6 mBJ >b]  k 3u`bCA$*J Fu3`bSJuthMQ5t] tU*uLNdp-U2L.W>7U5 2@ '\C#2q ?ʗ'"':#?\.?lV/:2J$N贏Nk?<h"QU-23;36<a)!b#rQt6.%)C#O(A439 ! T! ]]9 J0#U@Uq?@ ?@iFc?@$ns?@@#P6 a >]  u`mbCAJ <u3`bItC 4l5thPl]l \K<uLD#-MBI.M>7uU5 @J '重\#2ǭqU ?T'V"e'?\.W?lLP/: JN$N贁Nk?H<^GK"+&<ސa)bٖG16K#  (j4#~9 w!/lM3"!N235l2l:4B?&1Bhg1@@=/T#MAMA5 `?CopyrigTtk(c)k20@9kM|@cz@oKst@fBsArv@Aa@it@n.k Al@ zHsBe@ez@vZ@d@X-YC YG6:&Fi3b[5(9 &IgWSW_'2rU"6Z!40 SV<WKhe oKT,@dMdE"43Tl@MA @ ZdE%ikSlJQUhm>ECkAi!!4S9QBPgEqB(u/iHZt3i~yqYu[5~A{E{#TvX8esAu@t `HDB ##3hj8>T! ]]9 J"#U@UQ?@Ŀ ?7?@$ns?@@#P6 $J > mu`bC[AJ (u3`b*5t/ hi J75tTX]X 7(uL0*#-9..9>7U5 v.@J \2[qG ?F'H"W'?\.?l8/:vJ@$N贁Nk?<JP37"+&<au)b-3#6\4#p9 i!/X?3"!@235l X,44?1B@hY1@@=/?A?A5 `?Copy_rigTtk(c)k2U0@9kMn@cl@osf@ftBeArh@Aat@if@n}.k Al@U lHsBe@el@v@d@KC KG$s6Fi bM5(9 IgGEW_'2rG"s6Z!40 EVa,ִ o@+k oGa(sћ oPu 28 6: ;| ?©z xCxx? GUFDfP h>$KT 6D UA@ ?-I?3MP?% ȅH?BT ?2~t  03]`)disk,torageSLIT,flwcPat,(e^ 3%B ;o(7/I/f2u` F. `" ?&&b TJ /?|wxpf6lbx{s'pwwpl } wwLDrag thuesapWonodwipe.UH PD  (H# Uh -MPA&A AnQrQ  sQM B>B}diiMJN:!@XH`#pr~" %Nrdb#$u=P`N#"WL>$#3184+1N`Vis_S`0DL.c\m!#50346"`?Copy ig\t (j0)02v0090MZ0c: o\0of21r01a0i0nh0 0Al0 8s2e\0e v"0dh00-7 R30ER-)#?3 M?7?&/ & !ԩ=#@13 $BkFkFS)$##2N贁Nk?QIJ&QGlJ0AUppES19 !JB (w*- :"P!$bS",'Ai hiM  "ԟ (W]Z]a_*t@cP cn"!8*\e/S&@Ysg$" f$  )%c}}Uv U?a`3m_|"c2aU_s ?ovbFs}}2,ucrxsHQtXTE9 $5I _C_Spjc +gMEa%{}B\cxv (w%t%<g*hd>M!Pm`wYM#XJ&[f$@[dEfH'S#},b"41B(gǖhUw bf Д_DUUXj|J3P QX% f"%m  `b/"x,ȶ=q#mEPew_fm8J&J&@2MbS-f#%AOrlt%V/{܁/%TR$5 qmPpm+N%TA""11$2 (X;135Ѭ  J B`1s00(B`/R2%kA.Q7*QCDu:\$5sb8S"W` R2oc2DQw %AQAA" (k7?0ASW` %`&2p21vbb 5 !:#DCNg6d=O EAEA$2!8,@B"iNXG'2qkFu!Ŀ0AB57Omb?ؿ贁N8i6??54 _(H_Nf;s s)EFM߮KB#7 LB V ML?w|w{vwpwOwp ߃w pwwwww~wppwp|wp %,:}vLDrag thuesapWonodwipe.b~߿?)j?xk:3?@ ?DUGDF P# h @T(PYY*# UF~K? P} t. ! :ATAhu` ?j"6@@TThh|u# ^",':U? ?4b {[`:O!#"&#'"hQHS)# h!/L!$@" "#' @>>ÿ@qZl0a5?k4a6p Tr&232u0`-!|2u@ "23|12" 9uv `u`"[ۢ b!]u`JS$A"J%)S#Am!m'1`Vis_PRXYcPm!#5nP08B2`?CopWyr@gPt (@) 2 P0U9 M@c%Po@'of-RQr!PZQUa-PiPn WAluP %XsyRUe@e%PvPdBm!J# dV$J%T!B=S(n! J#bx&x& t"3#S#0UWbx#]:W,A^a`N0STgdaoC _c }a}e"!"+@t3hhB )ZtB*gtA"aa G(3T Q J"q%X!`aSl"!6u\%7Ř!O!%["0@Bq35c_J$s&F3s͵NSYFuE{|E{|E{.|U1w;,@A SUro%}h `J"G(paefefs_<%||-p 1CUG(Arb6AKR8A2;6>]q`RP^Qs@fU TPx-PP2"! 0<X@5b ??"5"?br# BmߟE}Cq`#PuJ"]u"Tju3G(fcAuK ]00` SPa*!PeWPlbsQOc+` QnkPe@fQv̢yU%q P]]A6 :T#P@P2` Yc.ĢtucPlp3Zϗ,Ob` MSbPC#DcRqBi!a]X\S''Vdx&!pE0*o سcTD m HQJ%] h%]Th3]hhUHLuD" # ;3h , T  JEUF~?FfQuܽ?P NA  #. A  FV= *oBoVm>uA` /?y $8P`fxu#>b<"W!F&Sྻupt bF{#t=!&>U2zGz?@9 #>&?A@}" (b);,6" !$i>#[2#"r6p 92"b);'"5<"V5?N#AA#`?CopyrigXt (c])=@20I@9=@uM5@c3@os-@If;B,Ar/@hAa;@i-@n.=@ WAl@ 3HsBUe[@e3@v@d{@+"BM#C Gr6"L!3'L"tu"Ei fE %T1XG,W/'0UuRr650 ,V@Zl >(fUlL!@,Y %#A!(! Pprǹ\0Q# ?aGZa# QI8p;94?V?F?_YC!X?P*G0i jOpDl~Yma:Ze~MzYf<#(Wu: wd d 2v<"#41BeB7" t2Dwr$5@5wa3eL 9aImAaFNaaGNbZXoHh{VXn&[|mciynoRZbz8mG)cuTfw8xQ#8W@_C _o#n~smSɿ>F |r}W'`TEkpf>[ktT I% s"MqP SLxNrFH$DҔ0L)_\j(\ҞE\8bF3Ɍ|6ՕEkDZ8TWoiH&7toQcuǿٿw`Ǭޥ:ҡX:jw_o,,o>oPoEЕHD: # h4>T]]9 M !AUFnzjp?FU8<\?F?FoowǙD?]P6 $]A  mJuM` ?l? $u.Jt+  E:n]qte\gIqzq-0m7>JU2zGz?@MJA@b1#z' %#b?(bM)RZ+Z&J!!5 `?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al0 (s2e e v+0d 0!E$[;?6L ?AE$\/}%-# '6? FiiePd59 FX#G'0UBŊ640 UFiJl&>Uh3 GM!Z1E!FϗjN>F-nTdܬV)iJZQVc qQHD: # h4>T]]9 M kAUF~?F˿r\?P6 mL>" ($JuM` ?AggL2DuVJ1tS tQ_uJb%_>-JU=!=!5 `?CopyrigTt(wc)20 9Ml cj o%sd fr"c!rf !uar id n._ Al j(Us"e ej v d %"6#T:$.#B[7#J! w2?/6 ?Ab A;@U2b]4-7#I# eI'/6.%7#2zG_z?=@ M-#J6^3Lj0 Cz`0^8 ^8j6dUh$ ]7T ]([!1 +(aJRj0ER"J:]SnQ7Ir5o5Q5U`oUzo;iU_S<U Pb#iaGc=cUeJ>H:a'Tt?PXX~|k/?lej> yrL&s ; _1t8~^~FutFXqxHD: # h4>T]]9 M kAUF~?F[8?F?K?P6 L6>M (m$JuM{` ?Agg2DuVJtS tH_Jb5_>JU=!=!5 `?CopyrigTt(c)20 9Ml cj osd fr"c!rf !ar id n. Al j(s"e ej v d %"6#:$.#B[7#J!7?/6 ?A A@U2b$]4-7#I# I'/6.%it7#2U2?2@MB-#J6^8Z<j6d<Bj::6iie{59 6jXSG2^'03/6Z!J40 FJlJ8>UhW Q Q [!A+(1.#ptn\Q0LR_7 W_L)E#$J kp>F|QFg[h'`T?PXX*G0i jOp\^">[O13SLR/Ɯ ""a A8l &Ra@lb<oor5o5B:TFH$DQ0mbSj0[K0HRX{5U\QuFə96oXDZ8TZ^DwWi^mi4q}doonFdal5 uueJuqrSWWqU PjrᇰYaG,H?_s  @FrշFȨM4Uc# ߗVB HX WaGU7 o-PH *^ 8/c*< )fLh >j]UFD  h(^TYYB UFjZ?F~??x<F BPG(?P } X ]B66]  TTUTTT)9T9ȅHBU??:& ?B@L&d2?J-[(\.Ym(K.sU!&/")!>%:  0 %wB`6conte 2,ma 0ag$0m$6s$0rvD2,1mpsu"2M1diB0]tF0ibX4d*0Un$0t0oF0k!e^| D(SG& iz ?!3E?? ? DDD wpGGDDtD pqwtDD@pqwqwqqqqpppw%wwwqq`Zwqpwww{w;\Drag onut hepe.Rih-c]lckaUdcos UPo etQeAvUwreP 5aabjZֿ~??cN׿ mF1?? V↑T*J??@ ?UG DF P# h @T(PYY# [U@jZ?@~??F?Pn} u{` ?Su#>>HX HlHYHH E.iOOQ؍>DDUXXllU*u'T9- &",'tU2J&5q/4 Q?"'4b0{[`:#26Q_9$"3&9"OL1" ^!'t>_@ÿ@qh@?@`B?gDP]F0#Bu)@K`-1Bu@rCC'5GAB2C9u`u `"[ bz!uK`2/3AQy1y7B`?CopyrigP}t0(c)0W20P90MP]cPosPfRRQrPQaPiPun 0Al`U XsbePePv&`d/`VPs_SBNcPm!#5P8#6P7`y1&3dpb4&5=Bh8=/3z6&3d66 2&4#/30U!r3]%ge<^1E  U#"em HqV5tw3t{FEt{t{y {et{k5t !{|tYwmAdaу*IOBT.1:a$:a&20#V2103dadaFB3J1125db6q 7~C|hhE)*^Q_`BCQCQA11=B;8ARrSqea !> b`1AE4FAނ.1+#8dT7`8XQ`]r` MPnuJPaPt%ar6aYQ` E]0u*PpU`e`tee^V5Qn]1PRd M`uU`b¯Ԧ30说! PPr#5GCHhz]3D&bQ e^\޿𿢏AP)aB#`Ҩ75^gyZ% S*biP aϷZeTZLPcX@&ϫk5N`rBDl2`i`g̨)@ߑ !RPoʂѩ|(Z\@ p(KD`ĖSPaPePlP6?ЪX.9` TP7`?>` /aZgSb4I`.3`QΡ@̮ʒ=N•wRka<`q ҨגcM_I` ad2`%c& d nĝs Nx);鑙mDatV/a I rNf󨑙 R `Qa// %;/IM`C~/⯸/+QmU`utP I`/ %N4"?4?q 2t? 2Nv??;EHd2QC]3` W@xڙ%?NV#O05OCr a}OO#ޅO/`M&`mRyOOӧYNp_/_C;!OUc1yQQH[_m_Mqt!Tw foSU@jZ?F~?/v/vUpSp2u8p` s?bup`TaM,{t!Wp5%m-ׁ< 3ofm!Qb8q׏ݥBT!.at!2hAXf@>pý$ bUdq ҹvU֒QrrrvvvrsW"`Ro(l]10 e)Zbb UUbr}@":btbUuP`hoeo1o"4zI}_obI1I2\!b^¸aaaaFѴ_AaYY6P`u$K`bq`ݡ%q`an䑷xh^rݥ5_bv&bЂF3s͵NSYDq\rV!@&!e68E^6*E6ǐGFխZ,@rb40PT}sasavs@YVynv` % P&S.1sa`NEWORKb2H PӰq!RTٰRIӰSIQƑDži|o$ohXAE'0U?!c0 oHD # =hj0>T h]]9  5AU@jZ?F͂T??Fx?SF?P6 jt`  o?tk[>(A Vu aSuo.]&A]  *ԮJU2zGzt?@L#&A@bA#z7 5#1bO(b])j+$j&[ "*(w "?& ?AU$l/%B115 `?CopyrigTt (cu)Q02`09Q0uMI0cG0osA0IfO2@1rC0|1aO0iA0n.Q0 WAl0 G8s2Ueo0eG0v0d0"1E]4MŤ-&3 &7&l>]Uhz811U!J!u6?F+\.>djIIa? >"(EǑ I%K9E 5EU . K ;;5QIWEEEF _KnSA?8@@Bu?FDR> ?Nk"\6`bmMM\j=am {:e P> #? D"? 5-g?j4B7!B{oogr57!ibDAbE9 MuX7_'0U~rN!$a)5 5vIzHD # =hj0>T h]]9  qAU@;?FI5K?@xEL?6ѿP6 t`  bm&Qo?t@@L]WI,#u aiuoL> JU2zGzt;?@L?&A@b}#zs q#1b(b)+$&[9#H",ɺ (wH"?& ?A$ /%9#BV1V15 `?CopyrigTt (cu)02`090uM0c0os}0If2|1r01a0i}0n.0 WAl0 8s2Ue0e0v0d0"O1E]S4MŤ-9#b3 b7&l>]Uhzt1.A!JN!m.%-+0djV1ax߃S(FJ-&?F mO"> ?Nk"L[Majnha_:e P> #? BK=o? n-?j4Bs!B{__Wr5s!D8E6V 6?wj_|_ ^XE.U@z T6?F @' d_mOoT \VyWxRkrVo_bVh*g:n"'UaEjz36>CN>?Bn}]zGvN7V iDAbE9 MX+G'0UJPN!4ae5 MaHD # =hj0>T h]]9  U@jpiH?F^?@$?F]F?P6 t`  !a&ao?t,F=۳=u;u auo>J2zGzwt?@ʐLAw@b#z1b](b!).+I.&[-m(?0& ?A$I0/Q%!!5 `?CopyrigTt (c)02`090M 0c 0os0f21r0@1a0i0n.0 Al[0 8s_2e30e 0vq0dS0T"!E]$M-# %'&l>]Uhz!1!JF% djc~YZ(_N9zOMGE%_EH$SwO"K ңHE ^&_8_WE_OqOHOOi&DAbPE9 MX7'0UUfb>!$a % f1jHD # Uh4>T#]]9 M5AU@jZ?FL|+?@?F({~?P6 JuM` ?u JYt )t7zEA2&L_D?. > >tJU2q K?=@MJ&A@-b9(+bG)T+T&J[#"#a?& ?AH?/`)#115 {`?CopyrigTt (c);020G09;0M30c10os+0f92*1r-0f1ai+0n.;0 Al0 18s2eY0e10v0dy0z"!4B#e-?3 7)& b 0K8>U# ]A]A "1!ъ!PzQ <%OYAJEU . K;;\%W%EF($L[.FX5Eai%?F 6?wL;] W-\TB⤲8?F@&SA?F @ ?Nk"L'`bmMW_Ye[ٯ7 VK=F: [jr ? D"? XZvD?ta@Boogr5;!QEx]SiLO MLGXa~WOTZiAle59 XG"'0Uc&N!$05 .HD # =hj0>T h]]9  IAU@K?FOgVb?FY%?Fwᳬ?PJW?>t`  ^Z?t#ə.E @x?r?/{4܂ G _Z+u u$ >  J JpU?43f<!.&P?b3bbfz@1bAe&bq$d 2zGzt.#@LT"&r(~&i$q)(+}'i"~*#81815 `?Co yrigTt (c)o02`09o0Mg0ce0os_0fm2^1r 1am0i_0n.o0 Al0 e8s2e0ee0v0d0"11E]54M-#D3 %D7.&lUh#7 V1#Ai!J# F/?B1B  VRiAAbE9 MX G{W'0U*R2N!I$a {VZUHPD  # #>h0JTpERI MU@u6?Fjc{g&?@%r`5B?Fm_F:Is?HNtQ`  II?t#]  _ii$!`kNu  iu%wj>M JQ T       "@&&JNU2"?H@Qs#N&A@b(bU)++&RNp}#>H1#19 ?#bbbz| R&/5R}#11`?Co0yrig\t (c)02h090M0c0os0f21r0Aa0i0n.0 Al!@ 8s%Be0e0v7@d@21a4t#-}#3 %746lJ#$JUp5 3 M19 1!q(!FnElrkRt# 8$kcD?@숒@)?FP?P"EkU |1\iOT/irvs}i>`_,of :m ,M ?? S&d2? *b?r4B!B oo1gr #5!>%(EQOOOO__FAc1_C_ D4ևS]_o_yRX+{_,__ #R__ (YQoSo+o=osd {oEeooooooFyUy{* hDVW(I X"c7I!pg bf'iMjSE t%XyG|'0UŢŞN!O4i |UGD # hz0TdYYBU@(%)?Fxg^?@0߯H?Fs7Uݹ?P} t` 87?t#_)TeΦu ]ukv  ̝L&UUU&I&3 U!!`?CopyrigPt _(c) 2\W09 M c os f"!r 1a i n}. Al00U (s42e0e uvF0d(0'HS$#-## 'I?k=#ǒ 2j$B##0U'BM3@&@F0Yk} #HA%DKb5DK3DK5DKED$!GiAZG5(|IDFX7yW'&Dŭ6!4YA yVZHD  # =hj4>T!]]>#AU@0߯H?Fz)e??F˞9q?P6 t`z  ?5t# rXѮu)Nu;):O]ע0 &> # " "+"+"+"+" $+"T-J[U7 ??& bA@ 2b4-#2zGzt#+@ Li68!6 ?}; 72U!:B#AA5 5`?CopyrigTt(c)20F@9M2@c.0@os*@f8B)Ar,@eAa8@i*@n. Al@ 0HsBeX@ej0@v@dx@)21"DM-,;?C G& l>]UhzAE!A#!ְ1J`#@{&pz?FUy{?NT,dKj&b&R&M/8T9K-q?F@F?FdG0 >Nk"?YE*]\LThKjfGD&Qhj ?iU3UGe&Y`?@/Jb)h5oWKoWo;èKod}l$ro`nE2K?Fˌ%4G{?F -q? eZR^{|\?6?vodmP}l>MBe 9 N 1iS? 1?grl#51:PO"4ga@Sq(UE^@O|ǐ[`ߡa v2rbA @3ijD&}laqOz o?#WHZ'Qu=*[^N?F]%P?@O&@ c?FаM<n ?` eb0)\(Pfodm}nv}l?V5(?q$Ix[cN#W{ #iA@&QeE9 MX''0UjdN!0 /HD  # =hj0T h]]9 #]AU@}%?FR$,?@6#V?Fc_3֡w?P6 u` ?u#t  3zM%t3ws%.<%H._'B> #PJU2N贁Nw[?@ʐL+&A@wbu](i+bk)%+&p%#4">9"<5!?& ?M#bbb͓z$ p&"/%T%#L1L15 w`?Co yrigTt (c)02`090M{0cy0oKss0f2r1r 1a0is0n.0 Al0 y8s2e0ey0v0d0"E1E(]I4M-%#X3 X7&l*>(UhE j1#$Ac!J`%#@5e$_F??!foA3nBM( V?FJ-R?@HQ??FS!R?P| llHL6VLD5E+v;*AůʙH: 8snP%$? k?Q? \?4_!__WrA5_! R,U3OC@IL?F4~Kz?P)Oy޾ T!]]>#IAU@܋?F2Oa>?@#r`5B?F*ݷ?P6 t`  >B?\oEt# ŖoZu 0Nu;Y:|8>$# JpU>##<?2& ?5bbbz@b#ANi&bu$'h 2N贁N[2$@ LX"D&v(&m$ :;T'm"*B#d1d15 5`?Co yrigTt (c)020090M0c0os0f21r 1a0i0n.0 Al0 8s2e0e0v0d0"]1"a4M-/p3 p72&l>0>Uhh= 1#!eJ`#&FNTM\jh1B,RBM(!JoUy{O_ e@ So4(D@^Tw?F]֝ä ?Nk"?w_KT\T+\_(Kjӷjmplhj -`\TPQE\Uui,St_o u~!ՠXEO9= T?FL?;uQ_Pm"rA @3sjQ1lğ=GoYniAleE9 MX|'w'0Ur^N!M$|A vzHD  # =hj4>T!]]>#IAU@܋?Fj?@2r`5B?F^ݷ?P6 t`  >B?\oEt# 6ΠZu 0Nu;Y~:#&}>0m8>  PJU2N贁Nwk?@ eL&A( 5bM(b[)h+h+$h&#[-M$#?& ?b$Z*'*B#O1O15 5`?CopyrigTt (c)02U0090M~0c|0osv0f2u1rx01a0iv0n}.0 Al0U |8s2e0e|05v0d0"H1P"L4M-/[3 [7&l>0>Uhh= m1#!S!J`#*&FPUy{_?NTMjC1gc RVMʣ(D#@ ![?F"$. ?Nk"W?IKTLTcKjRlhj p=' (*!)i,SO2Kes9T;QEGUFw6qX}__TM7fTZT\Dž}_P^52XOES"rA 0S#Es3iAleE9 MX|$Gxw'0UrIN!4gA xvzHD  # =hj4>T!]]>#!AU@܋?Fc_v?@#r`5B?F*ݷ?P6 t`  >B?\oEt# ap'EZu 0Nu;Y:|$># BJpU>#͙;? & ?5bbbz@}bAA& K&@ 2zGzt?"K@ L0"&N(bM)+TK/8Z*B<1<15 5`?CoyrigTt (c)s02009s0Mk0ci0osc0fq2b1r1aq0ic0n.s0 Al0 i8s2e0ei0v0d0b"5194M-t/H3 H7T &l>(>UhE=3Z1#E!J!FENTMjN@1h(NoUy{OO e@ o)4(D@^Tw?F]֝ä ?Nk"?'_KD3\T+\(Kj?ӷjmlhj P\ DiAlePsI=MXsG'0UzbZ6N!%$0 sFEjHD  # =hj8>T! ]]>#!AU@܋?F*?@#r`5BW?Fw?P6 t`  >B? \Et#տ|u l4RQu?]>I$>I# JpU>͙#<?& ?9bbbz@b#AE&bQ$$D 2zG[zt#@ L4"&R(^&I$Q)+]'I"^*Bh{.M#To1o15 9`?CoyrigTtk(c)k2009kM0c0os0f21r1a0i0n.k Al0 8s2e0e0v@d0f"h1l4M-:?{3 %{7&l>(>UhE= `51#I!J49fAn^ R)X f"B!Jui,SG?RSlOS u~!N]8-19= T?FL;uQ ?Nk"?YVTQRXQOnğ/=i n (\ wTiAaePE9 MXLg'0UbZiN!)$0 Lf`jUHLuD" # I>h(J TEI 3MU@jZ?F({~+?P >uA` ?u#jJ* )8* 9LGA )`G#dtG.$*G8BVj~>t  e t>"z "p{b+#7"J>U2zGwz?@9 S#>c&A@b(b)+,&" !$5N]#2T#k"6 "M"b)HF;'","/]#11[#`?CopyrigXt dc)020090M0c.0os0f21r0Aa0i0n.0 Al+@ 8s/Be@ej0vA@d#@K"lJ r!UleaAaAC 5LX%#!Q(r!Io?F8.<01# p炊 A# _W NEx]SiL5 [ML"X35^P?zQM___ %YA"XEEU . K ;; \W%U IF)$_ [.ǪF"XA6jai%?F ߭6?wL^ W- \_"T5Dꤲ8?F@@SA?F @ ?Nk"?I5?`bmModiy[7 K=F  %j1"XI1¦e?FrՓ`L dU TZ<٭hEeN@ (?FG+ MuPu;0&z_[Syкf|C`u|p?9L"TYAAekBp OOG}u?F9 mO"L9=T/hQeel.%-+?@mlփ?FU?> J4ɽ7yfXtOff|A=$ 䖏p$za3ex/@4S}a6N`Sy'e!a#q&ziMסiIT%!X '0UW65 0. "UGD # hz4TYYT#BqU@ t2?F`0 ?@jZֿƿP} mu` o?u#Ld28<LS9`(.88L``t, Stfm۶mەtUH~#9#[0Up"?@/& 9#!!G*"`?CopyrigPt (c) 20 9 M c oKs f"!r 1a i n. Al&0 (s*2e e v<0d0'"!P$0#B-9## '(w#^=9$&0#466 20$e$^ #E X5 9#HLA0%XD`cKX5XDdcGg&AcOT^'TcAcA0"-(Ca0%g20"ihaPX5 &X~7.GW'o$ţ6-!40 GV[ZHD: # =h4>T]]9 !AU@jZ?F`0 ?P6 mu` o?u t  M)t7@2(L$A F  EJU'N((? & ?AbA@ybbu?bz; 9#0"b(8$GbKn!M#X#n 9#2zGzt?088r@Lh"&9*?bRbPM/6&0"m!S('0"*TH1H15 {`?CopyrigTt (c)020090Mw0c.u0oso0f}2n1rq01a}0io0n.0 Al0 u8s2e0eju0v0d0"A1E4MO DQ-OT3 T7 &l\ Uh#7 f1 A0!J $FF, |B-&R6 HVWR( b w0AleP59 MXEG'0UbjN!%$0 W5 VZUHXD # U>h4JTmmMWQU@jZ?F`0 ?@P uQ` ?u4Ut9t-GQ"PB6\"mR"Q UA@"Q    #:"    Z]&& """",&*/5D,& I& & I&J&]U2zGzt?088r)@Qo36߀A@bbRbPz鄳03=b8b9;6xRn7Ry3SASA,K`?CopyrigdtJ(c)J20@9JM@c.@osz@fByAr|@Aa@iz@n.J Al@ HsBe@ej@v@d@ B@LAYPDZ- 'N((/`C?EVOea0RAq` A@c@n@yAlZB3z۳0ٳ0bbbzY3mqUZA<__bܠ_bNbbº_8(? EGO_C _GEV<$flmUxqAU1m81`п1?F?nWwgPt,r#51:*2~d|%X|RLr ?9@tIdE;.O ŏm87tZV?Fz?@. ?Fo6tpb&#?9?FnpH5#d|239}A$>k<^Ꟃsv+D?EYzi6ߛfk@bߊ௘,?@.ݮBk_RN|"N(קy9$T^8(8DY#0Y.qeq~ `0u?@e"&c?F+F?@CS ?FEJ N|{8] vtqSH sA$)k4y~ rU/ݡgaidaaTaep5FUhXlgdd'K0UV"őkh`!`T+0FUbE !*HD "#=h0>T@++]]9 !AU@jZ?F`0 ?ֿtP6 u` ?u#Ft %t3.$H$+A  R J}tU2q?=@LAw@)b!(Wb/)<+<&R[G z$?& ;AH'/H)!!5 w`?CopyrigTt6(c)62`096M0c0os0f!21r0N1ai0n.6 Ali0 8sm2eA0e0v0da0b"!E]$Mb-# '&lU+#7 1h#w!!JA tFkFֳ "x)B- FBiAbE9 MX7/W'0UxR>!8a /VCZ_H;s tY,-6nIgnFM&o#CB pFOB ] r~dִ uo@+ oVsG^ of؇ }ӿ b ^h <0 32O ^֛H6 T(8 6;? > "@ (C 7XE׿ Nh ,9G ;I ʹK i _UFD  h(^TYYBBUFjZ?F~??x<F BP(?P } kXj BW66 W TTTTTTȮH?Q?G ?B@L&ɯd2?-)(?\.;(. sUa!_&/J ( G 0qB`3streaming, ed a s rv 2,co pu 31#Wib 4d n wtw0rk!e^| D(SG& m?!3E?? ?   wp x  p  ʵqwp qwqwqpqq/qpppw.wwwqqiwqpwwwweDrag onut hepe.Rih-c]lckaUdcos UPo etQeAvUwreP 5aabjZֿ~??cN׿ mF1? ؠ?VT*J?@ ?UG DF P# h @T(PYY# QU@jZ?@~??F?Pn} u{` ?Su#؍>>HX HlHYHH EO.DR4:UDDXXUllk'"&R",I'U25ĉ%4 ?"u'4b0{N[`:Q1#2T6"^U9$"3&9"/L6 2&!'ݯ>e@ÿ@qn@?@-fB?mDcFԸ0)Bu/@`҈-1Bu@rCC 5GAB"2Cm9u`u `"[ b!u`%3Qo1o7B`?CopyrigPt (c) 20P9 MPc.PosPfRQrPQaPiPn  Al` XsbePePv,`d/`VPs_SBNcPm_!#5ab4P=`o13dX45CB^8=%3p63dz6z6 v24#%30U'rz3]ge<^AE@ U#5m Hq5twL5t{3t{LEt{t{y {et@{|tYwm@Ada׃IUBT$1@a@a2ja$jaL21632CLBA3P1125jb6w 7C |hhE)*dQ8ŔIQIQA11CB!;1XrYqea>ez1AXE:FG$1 8dT=`XQj2`]r` M`nuPaPt+arp` ?bup` ? B=ݝ`S,t']p5'mQ-݁<9ohm'Qh>qݏBT'.ct'xZf@>pÿ7Љob2Zfq [ܒrr$ avvvrsY"`Ron_16 eI`bb ccbr@uP`TaSq1u$_u'K1K2A#b[@aahaauhA+}pPk` u*`s`s`a|x)H5_dd&һbpSF3s͵N?SYJGbrV@ГD#eb 8d E LqW,5@rB: 2ˏV}uaua hYVȻOn `; %P&S41ua`NEWORKd2H Ps!RR*ISQio&oh^A'0Ueq?!0 oHD # =hj0>T h]]9  5AU@jZ?F͂T??Fx?SF?P6 jt`  o?tk[>(A Vu aSuo.]&A]  *ԮJU2zGzt?@L#&A@bA#z7 5#1bO(b])j+$j&[ "*(w "?& ?AU$l/%B115 `?CopyrigTt (cu)Q02`09Q0uMI0cG0osA0IfO2@1rC0|1aO0iA0n.Q0 WAl0 G8s2Ueo0eG0v0d0"1E]4MŤ-&3 &7&l>]Uhz811U!J!u6?F+\.>djIIa? >"(EǑ I%K9E 5EU . K ;;5QIWEEEF _KnSA?8@@Bu?FDR> ?Nk"\6`bmMM\j=am {:e P> #? D"? 5-g?j4B7!B{oogr57!ibDAbE9 MuX7_'0U~rN!$a)5 5vIzHD # =hj0>T h]]9  qAU@;?FI5K?@xEL?6ѿP6 t`  bm&Qo?t@@L]WI,#u aiuoL> JU2zGzt;?@L?&A@b}#zs q#1b(b)+$&[9#H",ɺ (wH"?& ?A$ /%9#BV1V15 `?CopyrigTt (cu)02`090uM0c0os}0If2|1r01a0i}0n.0 WAl0 8s2Ue0e0v0d0"O1E]S4MŤ-9#b3 b7&l>]Uhzt1.A!JN!m.%-+0djV1ax߃S(FJ-&?F mO"> ?Nk"L[Majnha_:e P> #? BK=o? n-?j4Bs!B{__Wr5s!D8E6V 6?wj_|_ ^XE.U@z T6?F @' d_mOoT \VyWxRkrVo_bVh*g:n"'UaEjz36>CN>?Bn}]zGvN7V iDAbE9 MX+G'0UJPN!4ae5 MaHD # =hj0>T h]]9  U@jpiH?F^?@$?F]F?P6 t`  !a&ao?t,F=۳=u;u auo>J2zGzwt?@ʐLAw@b#z1b](b!).+I.&[-m(?0& ?A$I0/Q%!!5 `?CopyrigTt (c)02`090M 0c 0os0f21r0@1a0i0n.0 Al[0 8s_2e30e 0vq0dS0T"!E]$M-# %'&l>]Uhz!1!JF% djc~YZ(_N9zOMGE%_EH$SwO"K ңHE ^&_8_WE_OqOHOOi&DAbPE9 MX7'0UUfb>!$a % f1jHD # Uh4>T#]]9 M5AU@jZ?FL|+?@?F({~?P6 JuM` ?u JYt )t7zEA2&L_D?. > >tJU2q K?=@MJ&A@-b9(+bG)T+T&J[#"#a?& ?AH?/`)#115 {`?CopyrigTt (c);020G09;0M30c10os+0f92*1r-0f1ai+0n.;0 Al0 18s2eY0e10v0dy0z"!4B#e-?3 7)& b 0K8>U# ]A]A "1!ъ!PzQ <%OYAJEU . K;;\%W%EF($L[.FX5Eai%?F 6?wL;] W-\TB⤲8?F@&SA?F @ ?Nk"L'`bmMW_Ye[ٯ7 VK=F: [jr ? D"? XZvD?ta@Boogr5;!QEx]SiLO MLGXa~WOTZiAle59 XG"'0Uc&N!$05 .HD # =hj0>T h]]9  IAU@K?FOgVb?FY%?Fwᳬ?PJW?>t`  ^Z?t#ə.E @x?r?/{4܂ G _Z+u u$ >  J JpU?43f<!.&P?b3bbfz@1bAe&bq$d 2zGzt.#@LT"&r(~&i$q)(+}'i"~*#81815 `?Co yrigTt (c)o02`09o0Mg0ce0os_0fm2^1r 1am0i_0n.o0 Al0 e8s2e0ee0v0d0"11E]54M-#D3 %D7.&lUh#7 V1#Ai!J# F/?B1B  VRiAAbE9 MX G{W'0U*R2N!I$a {VZUHPD  # #>h0JTpERI MU@u6?Fjc{g&?@%r`5B?Fm_F:Is?HNtQ`  II?t#]  _ii$!`kNu  iu%wj>M JQ T       "@&&JNU2"?H@Qs#N&A@b(bU)++&RNp}#>H1#19 ?#bbbz| R&/5R}#11`?Co0yrig\t (c)02h090M0c0os0f21r0Aa0i0n.0 Al!@ 8s%Be0e0v7@d@21a4t#-}#3 %746lJ#$JUp5 3 M19 1!q(!FnElrkRt# 8$kcD?@숒@)?FP?P"EkU |1\iOT/irvs}i>`_,of :m ,M ?? S&d2? *b?r4B!B oo1gr #5!>%(EQOOOO__FAc1_C_ D4ևS]_o_yRX+{_,__ #R__ (YQoSo+o=osd {oEeooooooFyUy{* hDVW(I X"c7I!pg bf'iMjSE t%XyG|'0UŢŞN!O4i |UGD # hz0TdYYBU@(%)?Fxg^?@0߯H?Fs7Uݹ?P} t` 87?t#_)TeΦu ]ukv  ̝L&UUU&I&3 U!!`?CopyrigPt _(c) 2\W09 M c os f"!r 1a i n}. Al00U (s42e0e uvF0d(0'HS$#-## 'I?k=#ǒ 2j$B##0U'BM3@&@F0Yk} #HA%DKb5DK3DK5DKED$!GiAZG5(|IDFX7yW'&Dŭ6!4YA yVZHD  # =hj4>T!]]>#AU@0߯H?Fz)e??F˞9q?P6 t`z  ?5t# rXѮu)Nu;):O]ע0 &> # " "+"+"+"+" $+"T-J[U7 ??& bA@ 2b4-#2zGzt#+@ Li68!6 ?}; 72U!:B#AA5 5`?CopyrigTt(c)20F@9M2@c.0@os*@f8B)Ar,@eAa8@i*@n. Al@ 0HsBeX@ej0@v@dx@)21"DM-,;?C G& l>]UhzAE!A#!ְ1J`#@{&pz?FUy{?NT,dKj&b&R&M/8T9K-q?F@F?FdG0 >Nk"?YE*]\LThKjfGD&Qhj ?iU3UGe&Y`?@/Jb)h5oWKoWo;èKod}l$ro`nE2K?Fˌ%4G{?F -q? eZR^{|\?6?vodmP}l>MBe 9 N 1iS? 1?grl#51:PO"4ga@Sq(UE^@O|ǐ[`ߡa v2rbA @3ijD&}laqOz o?#WHZ'Qu=*[^N?F]%P?@O&@ c?FаM<n ?` eb0)\(Pfodm}nv}l?V5(?q$Ix[cN#W{ #iA@&QeE9 MX''0UjdN!0 /HD  # =hj0T h]]9 #]AU@}%?FR$,?@6#V?Fc_3֡w?P6 u` ?u#t  3zM%t3ws%.<%H._'B> #PJU2N贁Nw[?@ʐL+&A@wbu](i+bk)%+&p%#4">9"<5!?& ?M#bbb͓z$ p&"/%T%#L1L15 w`?Co yrigTt (c)02`090M{0cy0oKss0f2r1r 1a0is0n.0 Al0 y8s2e0ey0v0d0"E1E(]I4M-%#X3 X7&l*>(UhE j1#$Ac!J`%#@5e$_F??!foA3nBM( V?FJ-R?@HQ??FS!R?P| llHL6VLD5E+v;*AůʙH: 8snP%$? k?Q? \?4_!__WrA5_! R,U3OC@IL?F4~Kz?P)Oy޾ T!]]>#IAU@܋?F2Oa>?@#r`5B?F*ݷ?P6 t`  >B?\oEt# ŖoZu 0Nu;Y:|8>$# JpU>##<?2& ?5bbbz@b#ANi&bu$'h 2N贁N[2$@ LX"D&v(&m$ :;T'm"*B#d1d15 5`?Co yrigTt (c)020090M0c0os0f21r 1a0i0n.0 Al0 8s2e0e0v0d0"]1"a4M-/p3 p72&l>0>Uhh= 1#!eJ`#&FNTM\jh1B,RBM(!JoUy{O_ e@ So4(D@^Tw?F]֝ä ?Nk"?w_KT\T+\_(Kjӷjmplhj -`\TPQE\Uui,St_o u~!ՠXEO9= T?FL?;uQ_Pm"rA @3sjQ1lğ=GoYniAleE9 MX|'w'0Ur^N!M$|A vzHD  # =hj4>T!]]>#IAU@܋?Fj?@2r`5B?F^ݷ?P6 t`  >B?\oEt# 6ΠZu 0Nu;Y~:#&}>0m8>  PJU2N贁Nwk?@ eL&A( 5bM(b[)h+h+$h&#[-M$#?& ?b$Z*'*B#O1O15 5`?CopyrigTt (c)02U0090M~0c|0osv0f2u1rx01a0iv0n}.0 Al0U |8s2e0e|05v0d0"H1P"L4M-/[3 [7&l>0>Uhh= m1#!S!J`#*&FPUy{_?NTMjC1gc RVMʣ(D#@ ![?F"$. ?Nk"W?IKTLTcKjRlhj p=' (*!)i,SO2Kes9T;QEGUFw6qX}__TM7fTZT\Dž}_P^52XOES"rA 0S#Es3iAleE9 MX|$Gxw'0UrIN!4gA xvzHD  # =hj4>T!]]>#!AU@܋?Fc_v?@#r`5B?F*ݷ?P6 t`  >B?\oEt# ap'EZu 0Nu;Y:|$># BJpU>#͙;? & ?5bbbz@}bAA& K&@ 2zGzt?"K@ L0"&N(bM)+TK/8Z*B<1<15 5`?CoyrigTt (c)s02009s0Mk0ci0osc0fq2b1r1aq0ic0n.s0 Al0 i8s2e0ei0v0d0b"5194M-t/H3 H7T &l>(>UhE=3Z1#E!J!FENTMjN@1h(NoUy{OO e@ o)4(D@^Tw?F]֝ä ?Nk"?'_KD3\T+\(Kj?ӷjmlhj P\ DiAlePsI=MXsG'0UzbZ6N!%$0 sFEjHD  # =hj8>T! ]]>#!AU@܋?F*?@#r`5BW?Fw?P6 t`  >B? \Et#տ|u l4RQu?]>I$>I# JpU>͙#<?& ?9bbbz@b#AE&bQ$$D 2zG[zt#@ L4"&R(^&I$Q)+]'I"^*Bh{.M#To1o15 9`?CoyrigTtk(c)k2009kM0c0os0f21r1a0i0n.k Al0 8s2e0e0v@d0f"h1l4M-:?{3 %{7&l>(>UhE= `51#I!J49fAn^ R)X f"B!Jui,SG?RSlOS u~!N]8-19= T?FL;uQ ?Nk"?YVTQRXQOnğ/=i n (\ wTiAaePE9 MXLg'0UbZiN!)$0 Lf`jUGD # hz4TYYT#BqU@1XAɖ?F&fQԽ?@#[5?F/8tֺ?P} u` ?u#Lk/8CW(.J88L`6t stP_m13i-; NIU~񅴉#9#0Up"?@/&9#!!G*"`?CopyrigPt (c) 20 9 M c os f"!r 1a i n. Al&0 (s*2e e v<0d0'"!$0#%B-9## 'w#^=9$&0#466H 20$e#$^A #3  9#LFG0%XD`AHdJT%XD8Gg&daO^' TAA0"-(`@a0%g2!8TBT"i@haX5 &X~7WW'o$Ŵ6!40 WVkZHD: # =h4>TB#]]9 TuU@#[5?F3_(n?P6 ڍ` ?u Ft  )t7 \)2(jL>  ] " " "" >" R"g"g"g"g" $g",g"*/<$g"H,g"\#JtU2wqH0?=@L#G6A@-by8b9;6[A3P2;3?6@ ?A?9*A3DADA5 {`?CopyrigTt:(wc):20@9:Ms@cq@o%sk@fyBjArm@Aua?0ik@n.:_ Al@ qHUsBe@eq@v@ d@2=A(ADMBA3-_PC RPG6l> L>U#aQQ 9 ]R! ;$bA*A3; #p _{66J`A3@>?F(?mؙ @` lt V/&N&wbN&M8A%?@׬?Fk>P6%U ?)o.m  AKiMBaiVϥ]瓧sa5f*>B r7k, Pxz? S? \?r#5{1:PDVhwa@vcą(2HV1Qj\"ocHl\aiu].eEe*bGk?F}2?@6w#`ȸ)|?P ,m!^l_zP$vtR`&V~$}EP{sa>t}<~45 a#y{Ϗ@ᏫteE1oCc@OrbV?FZbyZ Pu @crAP~3zw h%|̏v saliv &Pxeen'n`ὓM?@3?`2UǎKiDc$vfeW^|Ywt$}k~G+BIW?FzUH(?@q||f?F%CRO .1^lztep{ha;y%|U4N*)qyr"[kSHle $rWȂe>!Q7?*GF?@D`uʄ3=CUFS>+悍Wzt0 ;ӝjLpTߙj[>`;]`?@?F?P` ֔zYmkai{﾿|{t(yc|[%tOH~ݽ*<4#>t,iw#ytuR!y7`߯ZiꠗKk~HlJa#(-)0"!8Qe s@AleE9 MXAWP2'0Uwf^!V<0SE .BUGD D# h0T@dYY]BIU@-H?Fc[B<(?@j>(?F/I6Ѹ?P} 8d Ut`  ?ht#rS8n'L֌rX_uo uU!!`?CopyrigPt (c)J 2\09J MB c.@ os: fH"9!r< u!aH i: n.J Al @(s"eh ej@ v d  #S$#-d ## '?= # &#24 6 6 2$B# #0U2 3@& a#3~ #dH@1%4Gi^#5 6rX'EG'4) 6!4] EFYJHD # =h4>TB]]9 \#!AU@#.?F/I6?@P t`  gjo?t#T U $YhAn m7 u)hnx Fu!)J hI 97eyh?#!?9& aAbA@_"bg)b$g)bIg$2zGz9#@L@&h*%}$f*+T'_"*BL1L1`?Co}p rigT}t((c)(W2009(M{0]cy0oss0f2Rr1ru01a0is0n.( AUl0 y8s2e0ey0v0d0E1I4Mb-X3 X79&l)Uh# j1#_!J FF$[ 7#e T$V3RiAlPeE9 XW'0URZFN!^0 VHD # =h0>T@h]]9 #U@딃r?F/(?@2?FkF_w?P t`  ;0K?t# y_h:.A>Dj# ixsj||yyh|j|Ay"s"y,"s,"yJ"y^" yr""y"/y"a|"1y"8y":y";y"|`AUhMq@q] y a7T^1 hjP!,!J!^!o$!$U#$%&U'()*+,t|q!a!U234567$9$!1&1>74Ua#[aJ`S@ɞd?F9^2d3Qqs^2tY7N26J6M#hN1y-loG?gn`bDPu6(āThq 6p?F!_T?@R h^*B?PQi e>amabƫWSuMD,Xud3, utSқJ! \&\xZ? $Zas? V =? @S?d2tR@B8BoRį֧r3Cc EH ]&H[Bѻ?@lML6~ ;m`~b&¨r?+J^t/qO@? [,!rPצ 2XQqH zg?F,53lܿu  ?FM9(֜2Ar}JND7UØB]ٿ?@h߾#?Fd~5ȿMuPu`~dg}-S/9u}DѺ f <-@uušJAV+ǥ8xP?\ q|?FKyhQ~2vTPf=?PoyGV(na~u3vu;*G1+u~ݏֻA_ ^Gϯᬅ?oQo!|b0QJX^?Fsm?@@޻Pg?POWLKz Fw^=g%fԧ!6&]JVbGᬤ? o[oQr!rmz@zg?@f(?FVɱC OPKMgBlVܖVVwVsS3O_ܘ=w?FKL?@jP0A`QʱH=lTG5OxOeuF|Fv=/Fꞏ ,-<60 ]Cs~?Mv1E GZ}v -# &<"ݝK-'>TAFd|6δ̙2{# ,',v #cX#JREB'~5-O$ ,RI~  `ܝ-C-X-_]bd% ,Wl' 4%ŀ>-խ@ײc& , Ǡ4 \lb\c>-R땳J ' ,+E :{(b\s"=]& a:U( ,/?Fb\.bLOOMևQ:U) ,!`3?F k?%6fSԿr`* ,Oڿ?F&Kba,e~Lʴ2ͷ+ ,b d?FjFŋݸML, ,Z׺ı?F bҢu W%r;z- ,TUI8?Fs%:v ~LSa@ . ,3Pވ?Fb^vO$)yϊ&Lؾ1/*mCe%f(~5JOAjuLīt~eq>aT~.j&],_10y՚?Fr`QSٵ?F艿O:McfdilFU1i;}m&R F$w|!l~1`-?F~J~ 03y<$-t9BIL{HiG,LDd|jph:OE 4b\{~-K 6,Ku20e%+~L-r; 7,kh}6`E׻L?S)` {A8 $Fٞ@`o?F%&0MuPu9KdfǸ;}-UL󥶚!s@+C9,*}3?F:ט26-*NY'| ݑU8am?FYe~Kp?@ŮM?F@_O`P jBނ> l'xk超VSOB;6{.f9;?@ v%e??Fh1*o/m_~!Wx ~)j |ALyUƅ?PbO__<J+t?F"e?@'IȂl:xl@׭9,&͓S2&n&Yۙo> =~vU^4Q?@,z"TEo-<[?|8q&?FCv?@s&Ŕck>\ :A:z5,7ح-idsjѦD;gcZT#n$!$ɞd?Fх@Z f<x.Ϗ:"r 23ED'佴9Ki PyT A$))X#!('0Uqŝ)) (h0JTxmmMU@L?FfZN?@]ޫ6?F^ ?P t`  HVRHRbLRvLRODRONVOFuH-HU2N贁NKk?@A@'bՈb񄝃P𫃘Q ? b]z@ՏH񧝃ؑؑ'`?Copyrigdt (c)2p09McoKsf r:a in. AlU sYe-evkdMёY(iՔZ- Tl8UxDE?mP!h#`@yr?FiªD#vBCh?OH#B|/e#L% +.M\?Fx5J+,ՏpeƘb|{0?@ T?F울'5 QuPuu|2P A_O:폵C]>ڏ|,#w| |fɀ~E?F` aFTSL!r?P,DT! c{d}u͏3aT^xƵif"Y:B Y7 E? O? ?ǀ4#XBwߡrS5"#]rpw ?F!~8ӱ-&?P|)\ݏ?̛C/oͰQ 0K8f ߱QI֭f"^!3|DڼCQb32btrX@}䡻XA(XAXA% 4FOaҔK?FgGyp{lQ Bť Dz#.3?Fo>?@vָsy`s&ag?P-Sg  书۟pB.GҢ?k .G TxSb/t/nSɕwe?F%/ȡ?@p̙L?Fyp5O?PN@w+߁aL\񖋷̍A߄ޘp vS&}?x/RޖSԕ??׌SaG9)ܣ?F"/2?@ď6.=H6ĻK(prN=׀zk ɓ??ۣ&OO*̱ص2F-e1fOxO "0vHgO?FN ?@rѴ?F~xy1wWD{{6o֟gvMEX}߆8@,_d9Rodo N3Ř6t?FaV?@Z_g?F,߉$8Yà]OJ r/3ŵm"ʏ%^6o;X0uoщ0邀?o}o±&r Bޚ=$?F?@ ddw?Fy[HEtD0?@ȕ 9pwJK{NӏrE]U 1*<صǥL?F_;+]yC,E{)qb=H1I ?F݄u{?@7J`Hύr_y\zYf̏-pܒj8܏B( + /~) A+3Bَ?@5/?FS}k̥=xEu&}K ѱPpAɯ̹[V]?Fx?@8^f`ó LvbK<IYLYA˲)ƫ̿T1 T]%E Q_U@IYWj~4u\/N+ܡ_[eL4{jERY\~PeC˧EU+?9DGaI-oG7抄Bm턇f޹d&U ]%ߏaE x1꤭?F퇑Ph~8-U_Խqսlr/M1#[~$g H9Ga0B ?FnB@rJo?FtVB$?b8><=QܻE2l@"%;Ghc#?ܒEz&v!3ETȟڟN2]9@:Sǚ~u\4@X~DG㘊1VO4GŚ?Fq?@Z61U-, K1析|LfJ8vĥ.Sh* S7 F//r & 0E=̨qu?@6}[?Fdۗ}CEOd5#|Li1Y.1,2lY?RvCƯ'?FcO +?@H=?F6EКɘCxj/ y<(辂m:}F;ܕЧdqj|BJUS-?/-6OOhP:2H(hhOzO R(RyO5Fnw0gs[`JɤL@p [Bc%0dO swE~n#kN}?˽6yU;%?-_Oa6TofoP>y%PD 0;fL#10t+XˉBH?, -#-/.{Mq12s=cOyU6;ƅ[P`qB!!ME=*By61M -7C !6dFsgyf%R_oo6J\&F^ /P*hTgiE!?FYRRG=V?FU tn2 OAKL2^ق̜ҁk:z焳'!hLK;i B7Hm$M]iH?FګZD?@vit=qN~ p<C MA՝n{jvDr^n4l4Z\{!V!;΂`? Heq$AT s>P/X+]1WU?Fbdi"ܽgD?P3F#?M2T8sY4݃MYec=ݗD}SQU]6⛸Ȍ' k('޲XQqN?FZ D; hwi5mND{I?FXPY?@2 }͘?FBҏÎxv'za݇/i\?M;L0nMuhg}IN;&?F9'w(=xF\_3ѦUi5dm?FC Ax"2009)~4lHN%+)GYo,VB΍z ^?@¼2?F>Yہ3|궕TtjDG:J4l)Gfd{ 2d?:p+/=/ߊdY"֍YyI6YPuHvD~wdVͶ?F *Rō U0)QY?=Խf 4l﫿#b3Mddo/ލ^?p?yǛ2mY̕[Y ?@Ho+ѐσfȝt3ɗøLCW~PHi_̻ԱMd^r\??eOOyLjG]2ğ֕USraARGSriaaT=YXwZ'0mUR?B!0] zVZUHXuD # %#>5h:,JT5 ]IA U@y?FJYﻛ??FbQa?P mNuQ` o?u#6QMIQ 9H#L\WYLpW LWLCWICWA ICW ICW I"CD"W I."WIB"WLV"WLDj"WI~"WI("WQ M W#"k#"#"&U M & M W32C2&MW@3P2W>4:& o2Ro2fo2zo2o2o2o2o2$o2/,$o28,o2L,o2`,o2t,o2,o2,6(6(6(6(68686,?>06J866^8Nt  ::Rt%RR_܋NbD$QFMpR @(uQNU2zGz?N@Q cNfA@RbMh+b[ihkhf>bK !LdNcb c#bf b\MSbb[ikXg>bl>boRcfqfqc`?Copyrigdt (c)p20p9pMpcpospfrqrpqapipn.p Alp xsrepepvpdpbMcrs rwf bQWRtR lQUxeQQWB $.! =1SaQ?$>a~!x!e#!Sa h*anvO)g?Fz ^`S 3?-IS ܇Gh;q?Fψ?@Lm)şƿOzHp MuPu`UR-ga8}|g$_sh}$ GB~3 dGcnjpǟ(Az3F#? Oǟ@fhؔ~ `$6#;zZ?FfuHvD?@ߠM=H *R U\hAᅪN5Ozq7*;]?F[Y ?@|%σf SMY^Qh993+!ڌ )~(;6mlR&&/OO~E~v_bD%%>X-"T G#+EK63 11WA/ T3ٳEC4}$F|rO 2DV0ps-*  ԀCV@59)5AFSƾ\ՌpބTvrD92C#!~V>rA%!?@4%-&?PR^BhTyYWX+  '!vN-^/p/PCZCdAHC$t~HgE;%'e~\aI/pԗ~{φhh~- Z1q܁ɥjZ谔T0Rc M`xhXϔ=0Ҋ??+E?? Y܅+)ȪU . ?F%pAHhAuQeFķeeǜRe !1g쁷Jai%?F,#9 R6$*h~*Aꤲ8iY 4?@?SA?F0pF@ ?Nk"͜!\Th% 4 [Xg?c乇?x)|";}p?FtVXb?F<X?@~LI%?F7ja8Ɵqֳ6h ǰMVX[=l?F@ ?Λh` !X)gxW<>2?߯An`di~RnKgwBXVXM_mpX`}뀁e'}NJc6fgl"0Uř 4ҵIp^\BeHeɉ0$e {eBpD=VX5vIߪ%e?@$ "ĩL¼A$ h|~&Ep=qp\J0~ \m"(@8v"~+1ap=3Ѐᔿ%t f\߱[P-?QcEp=GOM RϡvbxzV ?@%f'1rN&&"ҮcrA '|-(o>?e1pp$>%d|&k`{T~zQoD=gs {~@~IMho=|Ϸͯ߫噮|+2Z(,1t,>{eFud:L^pz$ozq820X3Y[?@7Mu]0mFﶜ݇$I~D={N/ݿGǙZ9:G-LPb~?2$m00 ["]Мߢ""10eVWLKz Fw{M 3D=h(jA+cF, (W$R-//"'p /""H1{,eO<-!t(>Et{ŧ&k}u?FԿ9*\.VԔ?1_)hVrUe ?@llփ?Fx!S> _ؐ%_ؐ.✼=~ߋ{p" xwϨ1wjf-@4Sa0%Ti:w?ؐ?$:~ ;IŊ=?,ģ:?yji6hAATD)IX$*'0UR?I!&c= &J_HZ|`;s dV(]bCV!FHM#F ߌVOB U_ ~d״ `@+^C d_s}G Áfv yҦxؿ /H{ +(} X ! F֛% !& 6* , /  W7 NH xc Gx JX  #/5/G/Y/ $%( 9%UFD  h(^TYYBBUFjZ?F~??x<F BP(?P } kXt BW66 W TTTUTT6"TRȅH?? ?B@L&_d2?"-3(\.E(#.sUk!i&/D % G 0eB`-dir c.o y,s rv 2, ompu.3!s.r b4d0n twB2k!%^| (SG& i?? /,>H ?{ wp | pqpqwq߂qwqwqpqmwlqppwpw'wwwqqb\wqpwwww;^Drag onut hepe.Rih-c]lckaUdcos UPo etQeAvUwreP 5aabjZֿ~??cN׿ mF1?ouzq7? %!VT*J?@ ?UG DF P# h @T(PYY# QU@jZ?@~??F?Pn} u{` ?Su#؍>>HX HlHYHH EO.LO4:UDDXXUllk'"&R",I'U25ĉ%4 ?"u'4b0{N[`:Q1#2T6"!^U9$"3&9"/L6 2&!'ݯ>e@ÿ@qn@?@-fB?mDcFԸ0)Bu/@`҈-1Bu@rCC 5GAB"2Cm9u`u `"[ b!u`%3Qo1o7B`?CopyrigPt0(c)020P90MPc.PosPfRQrPQaPiPn 0Al` XsbePePv,`d/`VPs_SBNcPm!#5ab4 3!bz1AE:FG$18dT=`XQj2`]rw` M`nuP%aPt+ar? { @&8~? 8Tv??;Hd2QCg3` Wڣ%ETV-O?O C| aOO#䅿O`M,`m@.O_"_Tp'_9_M;Oem1y [Qe_w_Sqt'aZw0fo&SU@jZ?Fű?5v5v[pYp8u>p` ?bup` B=ݥ`S,t']p 5/m-݁<9opm'Qh>qݏBKT'.kt'xybf@>p{ÿ#БobZnq [ܒrr$avvvrs{a"`RQovg16 eI`&bb_ kkbr@ uP`TaSy1@u,_u'S1S2A+b[aahaDauhAaYg6s&P` -u*`{`ᕁ {`ax1P5Y_dl&bpSF3s͵NSYJObrVC@ؓL+e8#d3#%XLqW,@rb:(:ӏ^}}a}ahʀYVWn ` %P&S41¢Ȣ}a`NEWOR+Kl2H P{!RƱRISQ¢Ȣ¢ Ȧio.oÌh^A#'0Um?!0oHD # =hj0>T h]]9  5AU@jZ?F͂T??Fx?SF?P6 jt`  o?tk[>(A Vu aSuo.]&A]  *ԮJU2zGzt?@L#&A@bA#z7 5#1bO(b])j+$j&[ "*(w "?& ?AU$l/%B115 `?CopyrigTt (cu)Q02`09Q0uMI0cG0osA0IfO2@1rC0|1aO0iA0n.Q0 WAl0 G8s2Ueo0eG0v0d0"1E]4MŤ-&3 &7&l>]Uhz811U!J!u6?F+\.>djIIa? >"(EǑ I%K9E 5EU . K ;;5QIWEEEF _KnSA?8@@Bu?FDR> ?Nk"\6`bmMM\j=am {:e P> #? D"? 5-g?j4B7!B{oogr57!ibDAbE9 MuX7_'0U~rN!$a)5 5vIzHD # =hj0>T h]]9  qAU@;?FI5K?@xEL?6ѿP6 t`  bm&Qo?t@@L]WI,#u aiuoL> JU2zGzt;?@L?&A@b}#zs q#1b(b)+$&[9#H",ɺ (wH"?& ?A$ /%9#BV1V15 `?CopyrigTt (cu)02`090uM0c0os}0If2|1r01a0i}0n.0 WAl0 8s2Ue0e0v0d0"O1E]S4MŤ-9#b3 b7&l>]Uhzt1.A!JN!m.%-+0djV1ax߃S(FJ-&?F mO"> ?Nk"L[Majnha_:e P> #? BK=o? n-?j4Bs!B{__Wr5s!D8E6V 6?wj_|_ ^XE.U@z T6?F @' d_mOoT \VyWxRkrVo_bVh*g:n"'UaEjz36>CN>?Bn}]zGvN7V iDAbE9 MX+G'0UJPN!4ae5 MaHD # =hj0>T h]]9  U@jpiH?F^?@$?F]F?P6 t`  !a&ao?t,F=۳=u;u auo>J2zGzwt?@ʐLAw@b#z1b](b!).+I.&[-m(?0& ?A$I0/Q%!!5 `?CopyrigTt (c)02`090M 0c 0os0f21r0@1a0i0n.0 Al[0 8s_2e30e 0vq0dS0T"!E]$M-# %'&l>]Uhz!1!JF% djc~YZ(_N9zOMGE%_EH$SwO"K ңHE ^&_8_WE_OqOHOOi&DAbPE9 MX7'0UUfb>!$a % f1jHD # Uh4>T#]]9 M5AU@jZ?FL|+?@?F({~?P6 JuM` ?u JYt )t7zEA2&L_D?. > >tJU2q K?=@MJ&A@-b9(+bG)T+T&J[#"#a?& ?AH?/`)#115 {`?CopyrigTt (c);020G09;0M30c10os+0f92*1r-0f1ai+0n.;0 Al0 18s2eY0e10v0dy0z"!4B#e-?3 7)& b 0K8>U# ]A]A "1!ъ!PzQ <%OYAJEU . K;;\%W%EF($L[.FX5Eai%?F 6?wL;] W-\TB⤲8?F@&SA?F @ ?Nk"L'`bmMW_Ye[ٯ7 VK=F: [jr ? D"? XZvD?ta@Boogr5;!QEx]SiLO MLGXa~WOTZiAle59 XG"'0Uc&N!$05 .HD # =hj0>T h]]9  IAU@K?FOgVb?FY%?Fwᳬ?PJW?>t`  ^Z?t#ə.E @x?r?/{4܂ G _Z+u u$ >  J JpU?43f<!.&P?b3bbfz@1bAe&bq$d 2zGzt.#@LT"&r(~&i$q)(+}'i"~*#81815 `?Co yrigTt (c)o02`09o0Mg0ce0os_0fm2^1r 1am0i_0n.o0 Al0 e8s2e0ee0v0d0"11E]54M-#D3 %D7.&lUh#7 V1#Ai!J# F/?B1B  VRiAAbE9 MX G{W'0U*R2N!I$a {VZUHPD  # #>h0JTpERI MU@u6?Fjc{g&?@%r`5B?Fm_F:Is?HNtQ`  II?t#]  _ii$!`kNu  iu%wj>M JQ T       "@&&JNU2"?H@Qs#N&A@b(bU)++&RNp}#>H1#19 ?#bbbz| R&/5R}#11`?Co0yrig\t (c)02h090M0c0os0f21r0Aa0i0n.0 Al!@ 8s%Be0e0v7@d@21a4t#-}#3 %746lJ#$JUp5 3 M19 1!q(!FnElrkRt# 8$kcD?@숒@)?FP?P"EkU |1\iOT/irvs}i>`_,of :m ,M ?? S&d2? *b?r4B!B oo1gr #5!>%(EQOOOO__FAc1_C_ D4ևS]_o_yRX+{_,__ #R__ (YQoSo+o=osd {oEeooooooFyUy{* hDVW(I X"c7I!pg bf'iMjSE t%XyG|'0UŢŞN!O4i |UGD # hz0TdYYBU@(%)?Fxg^?@0߯H?Fs7Uݹ?P} t` 87?t#_)TeΦu ]ukv  ̝L&UUU&I&3 U!!`?CopyrigPt _(c) 2\W09 M c os f"!r 1a i n}. Al00U (s42e0e uvF0d(0'HS$#-## 'I?k=#ǒ 2j$B##0U'BM3@&@F0Yk} #HA%DKb5DK3DK5DKED$!GiAZG5(|IDFX7yW'&Dŭ6!4YA yVZHD  # =hj4>T!]]>#AU@0߯H?Fz)e??F˞9q?P6 t`z  ?5t# rXѮu)Nu;):O]ע0 &> # " "+"+"+"+" $+"T-J[U7 ??& bA@ 2b4-#2zGzt#+@ Li68!6 ?}; 72U!:B#AA5 5`?CopyrigTt(c)20F@9M2@c.0@os*@f8B)Ar,@eAa8@i*@n. Al@ 0HsBeX@ej0@v@dx@)21"DM-,;?C G& l>]UhzAE!A#!ְ1J`#@{&pz?FUy{?NT,dKj&b&R&M/8T9K-q?F@F?FdG0 >Nk"?YE*]\LThKjfGD&Qhj ?iU3UGe&Y`?@/Jb)h5oWKoWo;èKod}l$ro`nE2K?Fˌ%4G{?F -q? eZR^{|\?6?vodmP}l>MBe 9 N 1iS? 1?grl#51:PO"4ga@Sq(UE^@O|ǐ[`ߡa v2rbA @3ijD&}laqOz o?#WHZ'Qu=*[^N?F]%P?@O&@ c?FаM<n ?` eb0)\(Pfodm}nv}l?V5(?q$Ix[cN#W{ #iA@&QeE9 MX''0UjdN!0 /HD  # =hj0T h]]9 #]AU@}%?FR$,?@6#V?Fc_3֡w?P6 u` ?u#t  3zM%t3ws%.<%H._'B> #PJU2N贁Nw[?@ʐL+&A@wbu](i+bk)%+&p%#4">9"<5!?& ?M#bbb͓z$ p&"/%T%#L1L15 w`?Co yrigTt (c)02`090M{0cy0oKss0f2r1r 1a0is0n.0 Al0 y8s2e0ey0v0d0"E1E(]I4M-%#X3 X7&l*>(UhE j1#$Ac!J`%#@5e$_F??!foA3nBM( V?FJ-R?@HQ??FS!R?P| llHL6VLD5E+v;*AůʙH: 8snP%$? k?Q? \?4_!__WrA5_! R,U3OC@IL?F4~Kz?P)Oy޾ T!]]>#IAU@܋?F2Oa>?@#r`5B?F*ݷ?P6 t`  >B?\oEt# ŖoZu 0Nu;Y:|8>$# JpU>##<?2& ?5bbbz@b#ANi&bu$'h 2N贁N[2$@ LX"D&v(&m$ :;T'm"*B#d1d15 5`?Co yrigTt (c)020090M0c0os0f21r 1a0i0n.0 Al0 8s2e0e0v0d0"]1"a4M-/p3 p72&l>0>Uhh= 1#!eJ`#&FNTM\jh1B,RBM(!JoUy{O_ e@ So4(D@^Tw?F]֝ä ?Nk"?w_KT\T+\_(Kjӷjmplhj -`\TPQE\Uui,St_o u~!ՠXEO9= T?FL?;uQ_Pm"rA @3sjQ1lğ=GoYniAleE9 MX|'w'0Ur^N!M$|A vzHD  # =hj4>T!]]>#IAU@܋?Fj?@2r`5B?F^ݷ?P6 t`  >B?\oEt# 6ΠZu 0Nu;Y~:#&}>0m8>  PJU2N贁Nwk?@ eL&A( 5bM(b[)h+h+$h&#[-M$#?& ?b$Z*'*B#O1O15 5`?CopyrigTt (c)02U0090M~0c|0osv0f2u1rx01a0iv0n}.0 Al0U |8s2e0e|05v0d0"H1P"L4M-/[3 [7&l>0>Uhh= m1#!S!J`#*&FPUy{_?NTMjC1gc RVMʣ(D#@ ![?F"$. ?Nk"W?IKTLTcKjRlhj p=' (*!)i,SO2Kes9T;QEGUFw6qX}__TM7fTZT\Dž}_P^52XOES"rA 0S#Es3iAleE9 MX|$Gxw'0UrIN!4gA xvzHD  # =hj4>T!]]>#!AU@܋?Fc_v?@#r`5B?F*ݷ?P6 t`  >B?\oEt# ap'EZu 0Nu;Y:|$># BJpU>#͙;? & ?5bbbz@}bAA& K&@ 2zGzt?"K@ L0"&N(bM)+TK/8Z*B<1<15 5`?CoyrigTt (c)s02009s0Mk0ci0osc0fq2b1r1aq0ic0n.s0 Al0 i8s2e0ei0v0d0b"5194M-t/H3 H7T &l>(>UhE=3Z1#E!J!FENTMjN@1h(NoUy{OO e@ o)4(D@^Tw?F]֝ä ?Nk"?'_KD3\T+\(Kj?ӷjmlhj P\ DiAlePsI=MXsG'0UzbZ6N!%$0 sFEjHD  # =hj8>T! ]]>#!AU@܋?F*?@#r`5BW?Fw?P6 t`  >B? \Et#տ|u l4RQu?]>I$>I# JpU>͙#<?& ?9bbbz@b#AE&bQ$$D 2zG[zt#@ L4"&R(^&I$Q)+]'I"^*Bh{.M#To1o15 9`?CoyrigTtk(c)k2009kM0c0os0f21r1a0i0n.k Al0 8s2e0e0v@d0f"h1l4M-:?{3 %{7&l>(>UhE= `51#I!J49fAn^ R)X f"B!Jui,SG?RSlOS u~!N]8-19= T?FL;uQ ?Nk"?YVTQRXQOnğ/=i n (\ wTiAaePE9 MXLg'0UbZiN!)$0 Lf`jUGD # hz4TYYT#BU@ѝ%bm?F? -wʬ?@aL?FHzaֺ?P} u` ?u#k/89bLd<`9ft"929<(.88LL``ttt g`"tk"[&Pw%f'aw%'LOd@:U~##0-U2?@M&#I1I1*"`?CopyrigPt (wc)020090Mx0cv0o%sp0f~2o1rr01ua~0ip0n.0_ Al0 v8Us2e0ev0v0 d0"B1(F4#B-#YU3 U73^=$V6#@hDCFCF ?B$e!Zl AC) 6 #HA%T8[5TL[ETA[Tt[AX[E(TWg$6iBH_'ɥ TQQ"(['ca %BL8RKjcU jcjctjcjciAD5 -6X\Gg'4CFZ!h ,J T  M#EMU@sހɞ?FRX??FHXܾ?P m>uA` o?u#pMJp -<p =PK =dK!=xKA -@K#K2(.K<FZn>t  8dIL"tW"tb!>bo!FQ~ouT!J>U2zGz?W@9 #>~&A@xb(b9;6" !$NF#g2#"~6 E2M"b9;7"iA<"A?*#AA#`?CopyrigXt (c)I@20U@9I@MA@c.?@os9@fGB8Ar;@tAaG@i9@n.I@ Al@ ?HsBeg@ej?@v@d@"ŤBM#C bG~6"!B'&"t"ElJ!XJUl&!!C 9P a u%#AR(!9J?F64EӠ0# C><# KONUjZ?FZw-i\֊\EYh3nNxQ1-o?o pPhEUU . [|iձ9\5gUF'?\x~Z__"QelQzai%?F\T_\{a+\FvwKd9Tꤲ8?F#?@?SA?F>@ ?Nk"`YUd}\kEWq_ʉh9(  GL%hYH.?FVRM\ߣ;b'\;uxPyJ}u?F`&d2?@s OuPu0%23[yO0]0N/\nn*\h|QY]#?FPWYmrWoDteQYx߫ *JR\uk'\eFJxdyX,?@ȼl<~ `0 ~ W$p-0K CF%X[myQYgPln\ g!A\?khQYHza2tY& CXeqUuUZT@3N}4#`Q :$  _?V3?C~Sa7BfN$!QA(.WLYET]__[{u?F7\H\U9ETt>CcxQuuj ?@llփ?Fc> Y6h 0)>AYIDҍuґ[Frq3ux@4S_헓^Agu"3J>ޟ9c]lIriMiI%`1-QD_'0U~6L50 HD: # =h4>TB#]]9 TqAU@aL?FHza_?P6 u` 7?u t  )t7U)2)LL>  @]JtU2qD ?=@LC&A@-bu(b)+&񆤍[=#L"#?& YA{/)R=#<@1@15 {`?Copy_rigTtT(c)T2U009TMo0cm0osg0fu2f1ri01a; ig0n}.T Al0U m8s2e0em05v0d0"91=4MB(=#-OL3K L7&l> R!U#( yD9 ]AB^1*=#; #p _w&&J`=#@0(v?F]?&d20ec+Η֑gRM(R!5?F`*JR8\9dFN\{IA\(T.8!T̠UX,?@azMھPZV OuPu0;Y?_  Ц;%u_6??&VUvl {aOǽXE!_3S@#) ۶?Fvn#o*`SrA@z#cXj3?llf"QooUF`z^?@|;h(;?F Bo*lN\_sVl]C#ll θ%67I~QUz$Ϙ&?F9;Y=FP-N\AY(V?`0 8_J_ t7YVZ\UAY%:$?Fx\.јAra#N\2T(AeGiAle59 MX=G|L"'0UbN!V0O5 HD: # =h4>TB#]]9 TU@[?F$Fh?@rp&?FQ^?P6 u` ?Mu t  c(o#)t7 5~/)2ڦ{)L5EEH AEJ2N迴Nk?=@LA@yb #z-bK( (2+I2&y<$v!Aƿx 0>U#3 TB#]]9 TIAU@$&?F`_0 ?P6 u` 7?u t  OFP-)t7t7)2)jL8> PJU2N_Nk?<@ʐL&A@{b]M(b[)Y+%Y+Y&y#$!Ax1B4MB(#-OQ3K Q7##l> 0>U#3 8A9 Bc1BD #p %J`#@0(v?F^?&d2 C<$I$IRMRM38T\.?@Y{_?F OuPu !YM4\ѶmPGq?l £iXD8*!}9!Y,a7Y.cqQE_S@ ۶?F?vn_P#rA 0S#cZ,1+\Ro#oDT#3 H[]9 U@֧?FS0?@2?F#+/º?P6 t`  )s}&?tM_%b̺P,\u duo#AԖJ2zGz?<)@LA@wbb)++&T[e#au) ?AH//)!!5 `?CopyrigTt (c) 02`09 0M0c0os f2!r 51a0i n. 0 AlP0 8sT2e(0e0vf0dH0I"!E]$M-# 'x&l> .U#,11 ] !a! #p| F%JYiC?F9dj6 {"aI"O(EF~! IGI4FpE%ExQ:^?FLgo$ҮLOHXEEq7R?FVs4մL`VR®LqlhndFEEH?F)HמLsVIfVEE6>6W?F{RLLlV±I]VVpE.A|IyW?Fjt6L'/#7M'$xvX2A|I*{q?FG>BL(fIfpE6AB|Iv7RPKeVR@ m%G:EERH]uwiyd`viAbE9 MX7MB'W0U:>!$)a% vHD # =h*0>T#3 H[]9 U@5 W?F)j?@#˳=.?F~0 ?P6 t`  5YY?tcxWUbYaۃku5 auom>J2zGz?R<@LA@bb*)++&񩆍[~e#u) ?A//) !!5 `?CopyrigTt (c) 02`09 0M0c0o%s f2!r 51ua0i n. 0_ AlP0 8UsT2e(0e0vf0 dH0I"!E]$M-#K 'x&l>U4>U#11 !a! #p F%JyO[f?FVj d~jɧd`X <݉O(pEF9IKG`E%pE9Hd?FBP(~LIHдDdAEpE|'R?F0RLFR׊L\RٴHpEJW*?FvݮL).uL[HpEO!ONciA1eE9 M*X7=B_'0Ub>!$a% fjHD: # =h4>TB#]]9 TIAU@s52W?Fp?@??F3bb!?P6 u` 7?u t  a pʙ)t7E d(2А3(L1qͯ8M"> BJU2N贁Nk?<)@L&A@{wbM(b[)Y+Y+Y&ԍ#;!Ax 0>AU#3 A hEn1MD #p %J`#@z?F"4_ G_ JXRM(*!*[%@[WB_$S@ʹ?FT! OuWPu #rDR$$cm9ha}Jl |>QxUUF9_C_bUATjHZ}?F\*JR?@x\?Ffl6 o`,Y\v ?\/{?TB#]]9 T]AU@_H?FX,ź?@M&?F?Pn6 u{` ?u t  9B)t7 _Ц;B2Zg)UL WB2> JU2N贁Nk?<@L/&A@{ba(bUo)m+m+m&j)##Ax 0>U#3 pA9 Bh1GD #p %J`)#@Ӯ?F9f&*ѦC)RRMU88ˢ?FZV WuPu1&Y9\M%_x;GKa YH_ ? >xx? eQj?r 5c!:o1oCgEa@QSE8>!_*JR\_ EarUE _d2?Fz`0  QU3rbA 0g#QsZ"?Rl BZqlDf +rGaeAiV\.岾\2_qd(Vpd y0Ale59 MXGG|B'0UlN!$AY5 @T_H.;s QEe OOH 3FMJ#J /L[OB M~d8״ N@+𘽷 _sGg kf$ #YH& \( `* c, g/ jF֛ nh aq6 vX z Z~X 7a 8υNc # Ht Kxh MYj ߦлxm v׻`//'/9/8rLj/|//G&Xo /////?#?5?G?Y?k?}????e '&UFD  h(^TYYBUF\.?F `??x<F BPG(?P } Xn B66  TTTTT66d9TTTT!T"*T$6%6&6]W)T,T-T/T0T2636466T7T:T;JT=T>TȅHBU??/&& ?B#@L&d2?=-8,?8>sUe1c6(?) 8  0%$iB`/ap0l0cat0on,servB,.0om0u0CUd0s0r0bDud@n@tw@'rk1eA^| 8SG6 u);M  ?ppqpw~xpqpppwywxpwppJp@t qq~>pwp;ww~w;NqOwqw<ZsfsqpDrag onut hepe.Rih-c]lckaUdcos UPo etQeAvUwreP 5aabi4Fѿ `??Ԇҿ4ӿw??n\.r0h47?=UG DF P# h @T(PYY# UF\.?@ `?@i4FѺP} u` ?Mu#b.CR;cbwe#1~U:DNblbb''U ^:"@,I@'Ui"r%ĉq{$ ?:"'4Wb {[W`:#2R 6Q )$#@&9D"D!L1:" ^:!ɜ'>0ÿ@q޴0?@2? @!260nuu0`i-12u]R@r\CC@5D"Ch3A 3/BD"'C9u`un `"[{ b:!u`!2{#A!'!B`?CopyrigPt (c) 20"P9 MPc. PosPfRQrPAQaPiPnQ Al\P Xs`Re4Pe PvrPdQ/`VPs_SBNQcPm!#5R46Q!r#d$r%2(={#&r#d&& "r$0Umb#]We,^l Um g%dk3dkUdbk%d lHmAdasVT/AQQr"0t"$32t23D!!i"5RQQ 76CC|8 PEAAD!11VA;w<A>1.b!EbF,J҄hdhr)z!*H/Abao(dTPHAS]ebw` MJPnu8P%aPtgqQrQA_` E uPpPeRPtU#%Q3E]!^PR0Rdgn PuPbu3 ZV![ PJPr 8p-?] ^ݝDrR/QMUr#D\TA4PuQb2`^%^,>T-%[ SvRiJP_Qj|UTT,^LPcJX뿠%%7B l ~PiRPgUχu0`ϓVb^RPorϖ|FJ(R@] p]D`MTNcw@Rkܲa҃Pz"`ۗh(mII@`YQd~PqSsߠt,JSgbRPdbsg삂xTz^VmQtHV{Q IwQjfT^ f 6R @`AQkQhz+[QmPu?t P PF쩂ŀ'ʨg삪eVT1^C@`!T` WDLÂ$X&݁^MrPm@RAyM_Ђŀp6 OPwQKSVyC݂IV[oBrPlPRglRTPuԋ//SPa)"C\PŀX//T` 7T= *![0`ѳ{QpT?f?Z?H%` APp\P-Q_/(/MZPCSŀv`ÿo!$q a f hrb`RoP!e82b bffs"`uRbk |p|ptR|{b{rՑwз@3u@`]^ā'vIo[jvpm_3 ՑՑ'<<މ!#QQaa ´$L0aI6s`ɱu`'ŘP!2P!r7{#453_&w"v#F3sNSYF!bF@!]8l#]no Ŵ1Gk,@b!,RvEoWo{mQQʽ3IFy !^ɲw` %2P&Ao)$0 Q`NE}WORKJUHu@PRR}ISQ  7BiA9_K_6hM3Ll'0Uƶ?5fAy 104(_HD # =hj0>T h]]9 5AU@(b?@L??@C?ִm?P6 jt`  o?ti[=(A Vu aSuo.]&A] *ԮJU2zGzt?@L#&A@bA#z7 5#1bO(b])j+$j&[ "*(w "?& ?AU$l/%B115 `?CopyrigTt (cu)Q02`09Q0uMI0cG0osA0IfO2@1rC0|1aO0iA0n.Q0 WAl0 G8s2Ueo0eG0v0d0"1E]4MŤ-&3 &7&l>]Uhz811U!J!Ž?@昫>djIIa? >"(E_~_ I%K9E 5E!*dK ;;5QIWEEE@sT&p _KnSA?<#R@V$T2ݰ?@ʡ /6q!I6`ObmMM\j=am {:e 뇿P> #? D"? 5-gg?j47!B{oogrK57!ibDAbE9 MX7'0U~rN!R$a)5 5vIzHD # =hj0>T h]]9 qAU@'P?@@="?@EP¿P6 t`  bm&Qo?t@@L]WI,#u aiuoL> JU2zGzt;?@L?&A@b}#zs q#1b(b)+$&[9#H",ɺ (wH"?& ?A$ /%9#BV1V15 `?CopyrigTt (cu)02`090uM0c0os}0If2|1r01a0i}0n.0 WAl0 8s2Ue0e0v0d0"O1E]S4MŤ-9#b3 b7&l>]Uhzt1.A!JN!DØ$j0djV1ax߃S(@où}sװ?@l[ 06q1I[Majnha_:e P> #? BK=o? n-?j4Bs!B{__Wr5s!D8E6V7гj_|_ ^XE.UB6VtMhNPڮs} /d_mOoT ݬ\VyWxRkrVo_bVh*g:n"'UaEɈ庾hBN>?Bn]zGvN7ViDAbE9 MX+G'0UPN!4ae5 MaHD # =hj0>T h]]9 U@Nr"?@v->u?@_w`|ach޺?P6 t`  !a&a?t,F=۳=uk;u5 auom>J2zGzt?@LA@b#zt1b(Wb!).+.&[-m(;?&L ?A$0/RQ%!!5 `?CopyrigTt (c)02`090M 0c 0os0f21r0@1a0i0n.0 Al[0 8s_2e30e 0vq0dS0T"!E]$Mb-# 'I&l>]Uhz!1!J@ 9ե djcYZ(_N9zOMGE%_Eohp3wO"K ңHE ^&_8_WE_OqOOOi&DAbE9 MX7}'0Ufb>!$a% f1jHD: # Uh4>T#]]9 M5AU@1b?@ 0?@(?@n?P6 JuM` ?u JYt  )t7xEA2&L_C?. > >tJU2q K?=@MJ&A@-b9(+bG)T+T&J[#"#a?& ?AH?/`)#115 {`?CopyrigTt (c);020G09;0M30c10os+0f92*1r-0f1ai+0n.;0 Al0 18s2eY0e10v0dy0z"!4B#e-?3 7)& b 0K8>U# ]A]A "1!ъ!$cIo <%OYAJE*dK;;\%W%E@-.L[.FX5ET^gY?@7гL;] W-\TB<#R?WLq#'?@Eڮs /6q!I'?`bmMW_Ye[7 VK=F: [jr ? D"? XZvD?a@Boog]r5;!QE _2%g@LO MLΈXa~#OTZiAle59 XG|"'0Uc&N!$05 .HD # =hj0>T h]]9 IAU@Du??@ e?@=?@n? 3?PJW?>t`  ^Z?t#Ǚ.YX?H"͋s?/HImG_BqhZ+u u$ >  J JpU?43f<!.&P?b3bbfz@1bAe&bq$d 2zGzt.#@LT"&r(~&i$q)(+}'i"~*#81815 `?Co yrigTt (c)o02`09o0Mg0ce0os_0fm2^1r 1am0i_0n.o0 Al0 e8s2e0ee0v0d0"11E]54M-#D3 %D7.&lUh#7 V1#Ai!J# F/?B1B  VRiAAbE9 MX G{W'0U*R2N!I$a {VZUHPuD  # #%>h 0JTpEI MU@Ž?@`?@4xS?@JBf&?HNtQ`  II?t#[  ii$ `kZNu  Miu%wj> JQ       % "&&tJNU2"K?H@Qs#N&A@b(b)++&Np}#> H1#19 ?#bbbz| &/5R }#11`?Co0yrig\t (c)02h090M0c0o%s0f21r0Aua0i0n.0_ Al!@ 8Us%Be0e0v7@ d@21a4t#-}#3K 746lJ #$JUp5 3 M19 1!aq(!@Llrkt# 837^1?@!?@km?P i ?e=1\iOT/irvs}i>`,of :m ,M ?? S&d2? *b?r4!B oo1gr. #5!>%(EQOOOO__@i1_C_ D4և]_o_#RX Ph'X___ #R__ (YQoSo+o=osd {oEeoooooopَm* hDV!VVi&n X"c7I!Hpg bf'iMjSE t%XyG|'0UUŢŞN!O4i |UGD # hz0TdYYBU@R-?@c1*?@=?@(kݫ?P} t`  87?t#_)TeΦu ]ukv  ̝L&UUU&I&3 U!!`?CopyrigPt _(c) 2\W09 M c os f"!r 1a i n}. Al00U (s42e0e uvF0d(0'HS$#-## 'I?k=#ǒ 2j$B##0U'BM3@&@F0Yk} #HA%DKb5DK3DK5DKED$!GiAZG5(|IDFX7yW'&Dŭ6!4YA yVZHD  # =hj4>T!]]>#AU@=?@!-??@$rq?P6 t`z  ?5t# rXѮu)Nu;):O]ע0 &> # " "+"+"+"+" $+"T-J[U7 ??& bA@ 2b4-#2zGzt#+@ Li68!6 ?}; 72U!:B#AA5 5`?CopyrigTt(c)20F@9M2@c.0@os*@f8B)Ar,@eAa8@i*@n. Al@ 0HsBeX@ej0@v@dx@)21"DM-,;?C G& l>]UhzAE!A#!ְ1J`#@l?@َm?NT,dKj&b&R&M/8T>f:?x6@8?@ߛ} _6q0YE*]\LThנKjfGD&Qhj iU3ULeVP۶W?@bs)h5oWKoWo;Kod}l$ro`nEBYFv?@D?%먪?@@3gZR^{|\6?vodmP}l>MBe 9 N 1iS? 1?grl#51:O"4gBa@Sq(UE^@A N?@CJ?m Y&v2rA @3ijD&}laqOz o?#WHZ'Qu{r>@\J"ۭ?@ yT?@ (U" +Web0)\ϼ(Pfodm}nv}l?V5(?q$Ix[cN#W{#iA&QeE9 MX''W0UdN! 0 /HD  # =hj0T h]]9 #]AU@~M)M?@۵+?@)?@4_=w?P6 u` ?u#t  3zM%t3ws%.<%H._'B> #PJU2N贁Nw[?@ʐL+&A@wbu](i+bk)%+&p%#4">9"<5!?& ?M#bbb͓z$ p&"/%T%#L1L15 w`?Co yrigTt (c)02`090M{0cy0oKss0f2r1r 1a0is0n.0 Al0 y8s2e0ey0v0d0"E1E(]I4M-%#X3 X7&l*>(UhE j1#$Ac!J`%#@J԰ί9?䟥!foAnBM(TfH?@3A,?@A') ?@:?PT% ibL6VLD5E+v;*AיH: 8snP%$? k?Q? \?4_!__Wr5 _! R,U3OC@bD}?@,en?P)Oy޾ ?pV?c"rA b$cZ-,u`κ\DT+V ?8<iAaeI=MX!GG'W0UxrFN!$ a FCzHD  # =hj4>T!]]>#IAU@+&Z?@A3?@0xS?@F#tݩ?P6 t`  >B?\oEt# ŖoZu 0Nu;Y:|8>$# JpU>##<?2& ?5bbbz@b#ANi&bu$'h 2N贁N[2$@ LX"D&v(&m$ :;T'm"*B#d1d15 5`?Co yrigTt (c)020090M0c0os0f21r 1a0i0n.0 Al0 8s2e0e0v0d0"]1"a4M-/p3 p72&l>0>Uhh= 1#!eJ`#&@NTM\jh1B,RBM(!JَmO_ e@ So4(DFtI?@:7f .6+qS w_KT\T+\(Kjӷjmlhj -`\TPQE\Upt_o ?u~!ՠXEOd<"PP'_Pm"rA @3sj߅Q1lğ=GoYniAleE9 MX'w'0Ur^N!M$|A vzHD  # =hj4>T!]]>#IAU@+&Z?@ ~*?@AxS?@mݩ?P6 t`  >B?\oEt# 6ΠZu 0Nu;Y~:#&}>0m8>  PJU2N贁Nwk?@ eL&A( 5bM(b[)h+h+$h&#[-M$#?& ?b$Z*'*B#O1O15 5`?CopyrigTt (c)02U0090M~0c|0osv0f2u1rx01a0iv0n}.0 Al0U |8s2e0e|05v0d0"H1P"L4M-/[3 [7&l>0>Uhh= m1#!S!J`#*&@[َm_?NTMjC1gc RVMʣ(D1FʞB.• .6q0IKTLTcKjRlhj ?p= (*!]ݏp{O2Ke?s9T;QEGUF/4qX-}__TM7fTZT\Dž}_P^52OES"rA 0S#Es3iAleE9 MX$Gxw'0UrIN!4gA xvzHD  # =hj4>T!]]>#!AU@+&Z?@r<I?@0xS?@F#tݩ?P6 t`  >B?\oEt# ap'EZu 0Nu;Y:|$># BJpU>#͙;? & ?5bbbz@}bAA& K&@ 2zGzt?"K@ L0"&N(bM)+TK/8Z*B<1<15 5`?CoyrigTt (c)s02009s0Mk0ci0osc0fq2b1r1aq0ic0n.s0 Al0 i8s2e0ei0v0d0b"5194M-t/H3 H7T &l>(>UhE=3Z1#E!J!@ENTMjN@1h(NَmOO e@ o)4(DFtI?@:7f .6q+ '_KD3\T+\(Kjӷjmlhjw P\DiAlesI=MXsG'W0Uzb6N!%$ 0 sFEjHD  # =hj8>T! ]]>#!AU@+&Z?@F#t?@0xSW?@w?P6 t`  >B? \Et#տ|u l4RQu?]>I$>I# JpU>͙#<?& ?9bbbz@b#AE&bQ$$D 2zG[zt#@ L4"&R(^&I$Q)+]'I"^*Bh{.M#To1o15 9`?CoyrigTtk(c)k2009kM0c0os0f21r1a0i0n.k Al0 8s2e0e0v@d0f"h1l4M-:?{3 %{7&l>(>UhE= `51#I!J49fAn^ R)X f"B!JpG?RSlOS u~!N]8-1d<"P?@:' .6q/ YVTQRXQOnğ=i n (\wTiAaeE9 MXLg'0UbiN!)$0 Lf`jUGD # hz0TdYYB%U@,b?@#@Ս??@c`q?P} Wt`{ f?t#F]tEY6<=P=d =x =f = =u )2<<PPd*dxxUu uE#U!!`?CopyrigPt (c)&02\09&0M0c.0os0f$21r0Q1a$0i0n.&0 All0 8sp2eD0ej0v0dd0"#S$#-`#d0" '?=#&#D66 2$B##0UcB3@P&0P4^ PP|H!YHA%DPK5DdK3DxK5DH!KED@K|DiAAa5IFrX7W'bD)6!4] VZUHPD  # #R>h-(JTeeM QU@f4F?@0`?pNuQ` ?u#>.R -<. =PK=dKQ -xK#|s } K2(.K<FZn#&خ7&Nt, t"t""tb#"NU2zGz?S@Q#N&A@u"b8b9,;,62 !4NF#2#"6 ]2M2b9;72iY<2Y?R#*A*A#`?Copyrwig\t hwc)a@20m@9a@MY@cW@o%sQ@f_BPArS@Aua_@iQ@n.a@_ Al@ WHUsBe@eW@v@-d@"l.]Upd驕XPQۿ~9`9(XTU_ϸTZAQeBtQ \_n_WT2ݰ?@-Ʒ꾌 ˠіTrivUqp5u ]M?@`?@#v;"ұ.+K oY\a'N%Z\nq3u@Q}6Ї'0)N77x[p EOGF؇AU;M_q߃{\Ԡ+ۜݾ4:` Һ!#S ?@.؏`r/A ?ǸBy vSul` g qMݞ@Y+9qZ U N)/` ?u[qirT@%֥x1֥X'0U "Ř6֥E0֥% UGD # hz0TdYYB%U@,b?@"@Ս??@c`q?P} Wt`{ f?t#E]tEuj )u74,bhi|1dlialiiX^hh||#R -u"iU!!`?CopyrigPt (c)&02\09&0M0c0os0f$21r0Q1a$0i0n.&0 All0 8sp2eD0e0v0dd0"#S$#I-#6# '?7)=#&#@D66 2$B##0UcB3@&04^AA| #,HA%D|K5DdK3DK5DKEDK|DiAcP5IFX7.W'bD6%!4] VZHD D# =h50>Th]]9 5AU@(b?@L??@Cmw?P6 t`  7?ti[=(_A u a)uo.dAd JU2zGzt?N@L &A@bA#zt7 5#1bO(Wb])j+j&[ "*(; "?&L ?AU$l/R%115 `?CopyrigTt (c)Q02`09Q0MI0cG0osA0fO2@1rC0|1aO0iA0n.Q0 Al0 G8s2eo0eG0v0d0"1E]4Mb-&3 &7I&l>]Uhz811U!J!Ž?@>djIׄIa? >"(E_~_ I%K9?E 5E!*dK ;;5QIWEEE@sT&p _KnA<#R@V$T2ݰ?@ʡ /6q!I6`bmMM\j=am {:e 뇥P> #? D"? 5-g?j47!B{oogr%57!ibDAAbE9 MX7'W0U~rN!$)a)5 5vIzHD # =hj0>T h]]9 qAU@'P?@@="?@EP¿P6 t`  bm&Qo?t@@L]WI,#u aiuoL> JU2zGzt;?@L ?&Aw@b}#zs q#1b](b)+I&[9#H",ɺ (H"?0& ?A$I /%9#V1V15 `?CopyrigTt (c)02`090M0c0os}0f2|1r01a0i}0n.0 Al0 8s2e0e0v0d0"O1E]S4M-9#b3 %b7&l>]Uhzt1.A!JN!DØ$j0djV1ax߃ݧ(@où}sװ?@lߵ[ 06Aq1I[Majnha_׾:e 뇥P> #? BK=o? n-?j4s!B{__Wr.5s!D8E6V7гj_|_ ?^XE.UB6VtMhNPڮs /d_mOoT ݬ\VߦyWx?RkrVo_bVh*g:n"'UaEɈ}hBN>?Bn]z/GvN7ViDAAbE9 MX+G'W0UPN!4)ae5 MaHD # =hj0>T h]]9 U@Nr"?@v->u?@_w`|ach޺?P6 t`  !a&a?t,F=۳=uk;u5 auoehAJ2zGzt?@L A@b#z1b(b!).+$.&[-m(w?& ?A$0/Q%B!!5 `?CopyrigTt (cu)02`090uM 0c 0os0If21r0@1a0i0n.0 WAl[0 8s_2Ue30e 0vq0dS0T"!E]$MŤ-# '&l>]Uhz!1 !J@ 9D djc?YZ(_N9zOMGE%_Eohp3wO"K ңHE ^&_8_WE_OqOO$Oi&DAbE9 MX7'0U*fb>!$a% f1jHD: # Uh4>T#]]9 M5AU@1b?@ 0?@(?@n?P6 JuM` ?u JYt  )t7xEA2&L_C?.> >tJU2q K?=@M &Aw@-b9(WbG)T+T&JJ[#"#?& ?A?/`) #115 {`?CopyrigTt (wc);020G09;0M30c10o%s+0f92*1r-0f1uai+0n.;0_ Al0 18Us2eY0e10v0 dy0z"!(4B#-?3 R7& bR 0K8>U# ]A]A `"1!!$?cIo <%YAJE*dK;;\%W%E@-.L[.FX5ET^gY?@߄7гL;] W-\_TB<#R?WLq#'?@Eڮs ?/6q!I'`bmMW_Ye[7 VK=F: [jr ? D"? X?ZvD?a@Boogr5;!QE 2%g@LO MLXa~#OTZiAle59 XG"'0Ujc&N!$05 .HD # =hj0>T h]]9 IAU@Du??@ e?@=?@n? 3?PJW?>t`  ^Z?t#Ǚ.YX?H"͋s?/HImG_BqhZ+u Su$A  H JpU?43f<!.&P? b3bbfz@1bAe&bq$d 2zGzt.#@LT"&r(~&i$q)(+}'i"~*#81815 `?Co yrigTt (c)o02`09o0Mg0ce0os_0fm2^1r 1am0i_0n.o0 Al0 e8s2e0ee0v0d0"11E]54M-#D3 %D7.&lUh#7 V1#Ai!J# F/?B1B  VRiAAbE9 MX G{W'0U*R2N!I$a {VZUHPuD  # #%>h 0JTpEI MU@Ž?@`?@4xS?@JBf&?HNtQ`  II?t#[  ii$ `kZNu  Miu%wj>JQ       % "&&tJNU2"K?H@Qs# &Aw@b(Wb)++I&Np}#>H1#19 ?#bbbIz| &/5R}#11`?Co0yrig\t (c)02h090M0c0oKs0f21r0Aa0i0n.0 Al!@ 8s%Be0e0v7@d@21(a4t#-}#3 746lJ#$JUp5 @3 M19 1!q(!@LlrkIt# 837^1?@!?@km?P i e=1\iOT/irv_s}i>`,of :m ,M ?? S&d2? *?b?r 4!B oo1g]r #5!>%(EQOOOO__@iڲ1_C_ D4O]_o_#RX Ph'___ #R__ (YQoSo+o=osd {oEeoooooopَm* hDV!VVi&n X"c7I!pg bf'iMjSE t%XyG|'0UŢŞN!O4i |UGD # hz0TdYYBU@R-?@c1*?@=?@(kݫ?P} t`  87?t#_)TeΦu ]ukvĝk!L"&UUU&I&3 U!!`?CopyrigPt(c)2\09M c o%s f"!r 1ua i n._ Al00 (Us42e0e vF0d(0'S$#-#l# '?Rk=#ǒ 2$ZB##0U'B3@&F0PYk} #HA%DKb5DK3DkK5DKED!GiA_B5|IDFX7yW'&Dŭ61YA yVZHD # =hj4>T ]]>#AU@=?@!-??@$rq?P6 t`z  ?5t# rXѮu)Nu;):O]ע0&> # " "+"+"+"+" $+"T-J[U7 ??& bA@ 2b4-#2zGzt#+@ Li68!6 ?}; 72U!:B#AA5 5`?CopyrigTt(c)20F@9M2@c.0@os*@f8B)Ar,@eAa8@i*@n. Al@ 0HsBeX@ej0@v@dx@)21"DM-,;?C G& l>]UhzAE!A#!ְ1J`#@l?@َm?NT,dKj&b&R&M/8T>f:?x6@8?@ߛ} _6q0YE*]\LThנKjfGD hj iU3ULeVP۶W?@bs)h5oWKoWo;Kod}l$ro`nEBYFv?@D?%먪?@@3gZR^{|\6?vodmP}l>MBe 9 N 1iS? 1?grl#51:O"4gBa@Sq(UE^@A N?@CJ?m Y&v2rA @3ijD&}laqOz o?#WHZ'Qu{r>@\J"ۭ?@ yT?@ (U" +Web0)\ϼ(Pfodm}nv}l?V5(?q$Ix[cN#W{#iA eE9 MX''W0UdN! 0 /HD # =hj0T h]]9 #]AU@~M)M?@۵+?@)?@4_=w?P6 u` ?u#t  3zM%t3ws%.<%H._'B> #PJU2N贁Nw[?@ʐL+&A@wbu](i+bk)%+&p%#4">9"<5!?& ?M#bbb͓z$ p&"/%T%#L1L15 w`?Co yrigTt (c)02`090M{0cy0oKss0f2r1r 1a0is0n.0 Al0 y8s2e0ey0v0d0"E1E(]I4M-%#X3 X7&l*>(UhE j1#$Ac!J`%#@J԰ί9?䟥!foAnBM(TfH?@3A,?@A') ?@:?PT% ibL6VLD5E+v;*AיH: 8snP%$? k?Q? \?4_!__Wr5 _! R,U3OC@bD}?@,en?P)Oy޾ ?pV?c"rA b$cZ-,u`κ\DT+ V ?8<iA eI=MX!GG'W0UxrFN!$ a FCzHD # =hj4>T ]]>#IAU@+&Z?@A3?@0xS?@F#tݩ?P6 t`  >B?\oEt# ŖoZu 0Nu;Y:|8>$# JpU>##<?2& ?5bbbz@}b#Ai&'bu$ 'h 2N贁N[J2$@ LX"&"v(&m$ :;'m"*B#Bd1d15 5`?Co yrigTt (c)0W20090M0]c0os0f2R1r 1a0i0n.0 AUl0 8s2e0e0v0d0@"]1"a4MY-/p3 p72&Rl>0>Uhh= Ђ1#!m!J`#&@ENTMj.h1%,RM(!JَmO_ e@ o)4(DFtI?@:7f .6qS w_KT\T+\(Kjӷjm hjw -`\TPQE\Up癙t_o u~!ՠXEOd<"PP'X_Pm"rA @3sjQ1lğO=GoYniA eE9 MX'w'0UUr^N!M$|A vzHD # =hj4>T ]]>#IAU@+&Z?@ ~*?@AxS?@mݩ?P6 t`  >B?\oEt# 6ΠZu 0Nu;Y~:#&}>0e8YAY Y PJU2N贁Nwk?@ eL&A( 5bM(b[)h+h+$h&#[-M$#?& ?b$Z*'*B#O1O15 5`?CopyrigTt (c)02U0090M~0c|0osv0f2u1rx01a0iv0n}.0 Al0U |8s2e0e|05v0d0"H1P"L4M-/[3 [7&l>0>Uhh= m1#!S!J`#*&@[َm_?NTMjC1cYK RVM(D1FʞB.• .W6q0IKTLT_cKjR hj p= (*!]pǙ{O2Kes9T;QEGUFڈ4qX-}__TM7fTZT\ODž}_P^b52OES"rAQ 0S#Es3iA eE9 MX$Gxw'0UUrIN!4gA xvzHD # =hj4>T ]]>#!AU@+&Z?@r<I?@0xS?@F#tݩ?P6 t`  >B?\oEt# ap'EZu 0Nu;Y:|$!># BJpU>#͙;? & ?5bbbz@}bAA&IK&@ 2zGzt?"K@ L0"&N(bM)+TK/8Z*B<1<15 5`?CoyrigTt (c)s02009s0Mk0ci0osc0fq2b1r1aq0ic0n.s0 Al0 i8s2e0ei0v0d0b"5194M-t/H3 H7T &l>(>UhE=3Z1#E!J!@ENTMjN@1h(NَmOO e@ o)4(DFtI?@:7f .6q+ '_KD3\T+\(Kjӷjm hjw P\DiA esI=MXsG'0Uzb6N(! 0 sFEjHD # =hj8>T! ]]>#!AU@+&Z?@F#t?@0xSW?@w?P6 t`  >B? \Et#տ|u l4RQu?]>I$">I# JpU>͙#<?& ?9bbbz@b#AE&bQ$$D 2zG[zt#@ L4"&R(^&I$Q)+]'I"^*Bh{.M#To1o15 9`?CoyrigTtk(c)k2009kM0c0os0f21r1a0i0n.k Al0 8s2e0e0v@d0f"h1l4M-:?{3 %{7&l>(>UhE= `51#I!J49fAn^ R)X f"B!JpG?RSlOS u~!N]8-1d<"P?@:' .6q/ YVTQRXQOnğ= l n (\wTiA eE9 MXLg'0UbiN!)$0 Lf`jUGD # hz0TdYYB%U@n?@c`?@,bػȻP} jt` o?t#uE]tZu M)u7#$_fh%i|&if'i(i̸)i*TiX^hUh||UTf"%-U!!`?CopyrigPtu c 2\W09&0M0c0os0f$21r0Q1a$0i0n}.&0 All0U 8sp2eD0e05v0dd0"#HS$#-## 'I?7=#&#D66 2b$B^0UcBM3@&T04^AA| #hHA%D|K5DK3DK5DKED@K|DiAAa5IFrX7W'bD)6!4] VZHD# # =hj0>T h]]9 5AU@(b?@L??@C?ִm?P6 jt`  o?ti[=(A Vu auo.$&> *ԮJU2zGzt?@L#&A@bA#z7 5#1bO(b])j+$j&[ "*(w "?& ?AU$l/%B115 `?CopyrigTt (cu)Q02`09Q0uMI0cG0osA0IfO2@1rC0|1aO0iA0n.Q0 WAl0 G8s2Ueo0eG0v0d0"1E]4MŤ-&3 &7&l>]Uhz811U!J!Ž?@昫>djIIa? >"(E_~_ I%K9E 5E!*dK ;;5QIWEEE@sT&p _KnSA?<#R@V$T2ݰ?@ʡ /6q!I6`ObmMM\j=am {:e 뇿P> #? D"? 5-gg?j47!B{oogrK57!ibDAbE9 MX7'0U~rN!R$a)5 5vIzHD# # =hj0>T h]]9 qAU@'P?@@="?@EP¿P6 t`  bm&Qo?t@@L]WI,#u aiuoL%> JU2zGzt;?@L?&A@b}#zs q#1b(b)+$&[9#H",ɺ (wH"?& ?A$ /%9#BV1V15 `?CopyrigTt (cu)02`090uM0c0os}0If2|1r01a0i}0n.0 WAl0 8s2Ue0e0v0d0"O1E]S4MŤ-9#b3 b7&l>]Uhzt1.A!JN!DØ$j0djV1ax߃S(@où}sװ?@l[ 06q1I[Majnha_:e P> #? BK=o? n-?j4Bs!B{__Wr5s!D8E6V7гj_|_ ^XE.UB6VtMhNPڮs} /d_mOoT ݬ\VyWxRkrVo_bVh*g:n"'UaEɈ庾hBN>?Bn]zGvN7ViDAbE9 MX+G'0UPN!4ae5 MaHD# # =hj0>T h]]9 U@Nr"?@v->u?@_w`|ach޺?P6 t`  !a&a?t,F=۳=uk;u5 auom&>J2zGzt?@LA@b#zt1b(Wb!).+.&[-m(;?&L ?A$0/RQ%!!5 `?CopyrigTt (c)02`090M 0c 0os0f21r0@1a0i0n.0 Al[0 8s_2e30e 0vq0dS0T"!E]$Mb-# 'I&l>]Uhz!1!J@ 9ե djcYZ(_N9zOMGE%_Eohp3wO"K ңHE ^&_8_WE_OqOOOi&DAbE9 MX7}'0Ufb>!$a% f1jHD:# # Uh4>T#]]9 M5AU@1b?@ 0?@(?@n?P6 JuM` ?u JYt  )t7xEA2&L_C?.'> >tJU2q K?=@MJ&A@-b9(+bG)T+T&J[#"#a?& ?AH?/`)#115 {`?CopyrigTt (c);020G09;0M30c10os+0f92*1r-0f1ai+0n.;0 Al0 18s2eY0e10v0dy0z"!4B#e-?3 7)& b 0K8>U# ]A]A "1!ъ!$cIo <%OYAJE*dK;;\%W%E@-.L[.FX5ET^gY?@7гL;] W-\TB<#R?WLq#'?@Eڮs /6q!I'?`bmMW_Ye[7 VK=F: [jr ? D"? XZvD?a@Boog]r5;!QE _2%g@LO MLΈXa~#OTZiAle59 XG|"'0Uc&N!$05 .HD# # =hj0>T h]]9 IAU@Du??@ e?@=?@n? 3?PJW?>t`  ^Z?t#Ǚ.YX?H"͋s?/HImG_BqhZ+u u$(>  J JpU?43f<!.&P?b3bbfz@1bAe&bq$d 2zGzt.#@LT"&r(~&i$q)(+}'i"~*#81815 `?Co yrigTt (c)o02`09o0Mg0ce0os_0fm2^1r 1am0i_0n.o0 Al0 e8s2e0ee0v0d0"11E]54M-#D3 %D7.&lUh#7 V1#Ai!J# F/?B1B  VRiAAbE9 MX G{W'0U*R2N!I$a {VZUHPuD # # #%>h 0JTpEI MU@Ž?@`?@4xS?@JBf&?HNtQ`  II?t#[  ii$ `kZNu  Miu%wj>)JQ       % "&&tJNU2"K?H@Qs#N&A@b(b)++&Np}#> H1#19 ?#bbbz| &/5R }#11`?Co0yrig\t (c)02h090M0c0o%s0f21r0Aua0i0n.0_ Al!@ 8Us%Be0e0v7@ d@21a4t#-}#3K 746lJ #$JUp5 3 M19 1!aq(!@Llrkt# 837^1?@!?@km?P i ?e=1\iOT/irvs}i>`,of :m ,M ?? S&d2? *b?r4!B oo1gr. #5!>%(EQOOOO__@i1_C_ D4և]_o_#RX Ph'X___ #R__ (YQoSo+o=osd {oEeoooooopَm* hDV!VVi&n X"c7I!Hpg bf'iMjSE t%XyG|'0UUŢŞN!O4i |UGD# # hz0TdYYBU@R-?@c1*?@=?@(kݫ?P} t`  87?t#_)TeΦu ]ukv*+,-̝./D]_&UUU&I&3 U!!`?CopyrigPt _(c) 2\W09 M c os f"!rT 1a i n] Al00 (Us42e0e vF0d'S$# # '?k=# 66 2$B ְ0U'B3@&F0Yk} #HA%DKb5DK3DK5D@KEDYAGiAZG5|IDFrX7yW'&D)ŭ6!4YA yVZHD* # =hj4>T!]]>#AU@=?@!-??@$rq?P6 t`z  ?5t# rXѮu)Nu;):O]ע0+&> # " "+"+"+"+" $+"T-J[U7 ??& bA@ 2b4-#2zGzt#+@ Li68!6 ?}; 72U!:B#AA5 5`?CopyrigTt(c)20F@9M2@c.0@os*@f8B)Ar,@eAa8@i*@n. Al@ 0HsBeX@ej0@v@dx@)21"DM-,;?C G& l>]UhzAE!A#!ְ1J`#@l?@َm?NT,dKj&b&R&M/8T>f:?x6@8?@ߛ} _6q0YE*]\LThנKjfGD&Qhj iU3ULeVP۶W?@bs)h5oWKoWo;Kod}l$ro`nEBYFv?@D?%먪?@@3gZR^{|\6?vodmP}l>MBe 9 N 1iS? 1?grl#51:O"4gBa@Sq(UE^@A N?@CJ?m Y&v2rA @3ijD&}laqOz o?#WHZ'Qu{r>@\J"ۭ?@ yT?@ (U" +Web0)\ϼ(Pfodm}nv}l?V5(?q$Ix[cN#W{#iA&QeE9 MX''W0UdN! 0 /HD* # =hj0T h]]9 #]AU@~M)M?@۵+?@)?@4_=w?P6 u` ?u#t  3zM%t3ws%.<%H._'B,> #PJU2N贁Nw[?@ʐL+&A@wbu](i+bk)%+&p%#4">9"<5!?& ?M#bbb͓z$ p&"/%T%#L1L15 w`?Co yrigTt (c)02`090M{0cy0oKss0f2r1r 1a0is0n.0 Al0 y8s2e0ey0v0d0"E1E(]I4M-%#X3 X7&l*>(UhE j1#$Ac!J`%#@J԰ί9?䟥!foAnBM(TfH?@3A,?@A') ?@:?PT% ibL6VLD5E+v;*AיH: 8snP%$? k?Q? \?4_!__Wr5 _! R,U3OC@bD}?@,en?P)Oy޾ ?pV?c"rA b$cZ-,u`κ\DT+V ?8<iAaeI=MX!GG'W0UxrFN!$ a FCzHD* # =hj4>T!]]>#IAU@+&Z?@A3?@0xS?@F#tݩ?P6 t`  >B?\oEt# ŖoZu 0Nu;Y:|8->$# JpU>##<?2& ?5bbbz@}b#Ai&'bu$ 'h 2N贁N[J2$@ LX"&"v(&m$ :;'m"*B#Bd1d15 5`?Co yrigTt (c)0W20090M0]c0os0f2R1r 1a0i0n.0 AUl0 8s2e0e0v0d0@"]1"a4MX,p3 p72&Rl>0>Uhh= Ђ1#!m!J`#&@ENTMj.h1%,RM(!JَmO_ e@ o)4(DFtI?@:7f .6qS w_KT\T+\(Kjӷjmlhjw -`\TPQE\Up癙t_o u~!ՠXEOd<"PP'X_Pm"rA @3sjQ1lğO=GoYniAleE9 MX'w'0UUr^N!M$|A vzHD* # =hj4>T!]]>#IAU@+&Z?@ ~*?@AxS?@mݩ?P6 t`  >B?\oEt# 6ΠZu 0Nu;Y~:#&}>0m8.>  PJU2N贁Nwk?@ eL&A( 5bM(b[)h+h+$h&#[-M$#?& ?b$Z*'*B#O1O15 5`?CopyrigTt (c)02U0090M~0c|0osv0f2u1rx01a0iv0n}.0 Al0U |8s2e0e|05v0d0"H1P"L4M-/[3 [7&l>0>Uhh= m1#!S!J`#*&@[َm_?NTMjC1cK RVM(D1FʞB.• .W6q0IKTLT_cKjRlhj p= (*!]pǙ{O2Kes9T;QEGUFڈ4qX-}__TM7fTZT\ODž}_P^b52OES"rAQ 0S#Es3iAleE9 MX$Gxw'0UUrIN!4gA xvzHD* # =hj4>T!]]>#!AU@+&Z?@r<I?@0xS?@F#tݩ?P6 t`  >B?\oEt# ap'EZu 0Nu;Y:|$/># BJpU>#͙;? & ?5bbbz@}bAA&IK&@ 2zGzt?"K@ L0"&N(bM)+TK/8Z*B<1<15 5`?CoyrigTt (c)s02009s0Mk0ci0osc0fq2b1r1aq0ic0n.s0 Al0 i8s2e0ei0v0d0b"5194M-t/H3 H7T &l>(>UhE=3Z1#E!J!@ENTMjN@1h(NَmOO e@ o)4(DFtI?@:7f .6q+ '_KD3\T+\(Kjӷjmlhjw P\DiAlesI=MXsG'W0Uzb6N!%$ 0 sFEjHD* # =hj8>T! ]]>#!AU@+&Z?@F#t?@0xSW?@w?P6 t`  >B? \Et#տ|u l4RQu?]>I$0>I# JpU>͙#<?& ?9bbbz@b#AE&bQ$$D 2zG[zt#@ L4"&R(^&I$Q)+]'I"^*Bh{.M#To1o15 9`?CoyrigTtk(c)k2009kM0c0os0f21r1a0i0n.k Al0 8s2e0e0v@d0f"h1l4M-:?{3 %{7&l>(>UhE= `51#I!J49fAn^ R)X f"B!JpG?RSlOS u~!N]8-1d<"P?@:' .6q/ YVTQRXQOnğ=i n (\wTiAaeE9 MXLg'0UbiN!)$ Lf`jUGD # hz0TdYYB%U@n?@T h]]9 5AU@(b?@L??@C?ִm?P6 jt`  o?ti[=(A Vu auo.2&> *ԮJU2zGzt?@L#&A@bA#z7 5#1bO(b])j+$j&[ "*(w "?& ?AU$l/%B115 `?CopyrigTt (cu)Q02`09Q0uMI0cG0osA0IfO2@1rC0|1aO0iA0n.Q0 WAl0 G8s2Ueo0eG0v0d0"1E]4MŤ-&3 &7&l>]Uhz811U!J!Ž?@昫>djIIa? >"(E_~_ I%K9E 5E!*dK ;;5QIWEEE@sT&p _KnSA?<#R@V$T2ݰ?@ʡ /6q!I6`ObmMM\j=am {:e 뇿P> #? D"? 5-gg?j47!B{oogrK57!ibDAbE9 MX7'0U~rN!R$a)5 5vIzHD1 # =hj0>T h]]9 qAU@'P?@@="?@EP¿P6 t`  bm&Qo?t@@L]WI,#u aiuoL3> JU2zGzt;?@L?&A@b}#zs q#1b(b)+$&[9#H",ɺ (wH"?& ?A$ /%9#BV1V15 `?CopyrigTt (cu)02`090uM0c0os}0If2|1r01a0i}0n.0 WAl0 8s2Ue0e0v0d0"O1E]S4MŤ-9#b3 b7&l>]Uhzt1.A!JN!DØ$j0djV1ax߃S(@où}sװ?@l[ 06q1I[Majnha_:e P> #? BK=o? n-?j4Bs!B{__Wr5s!D8E6V7гj_|_ ^XE.UB6VtMhNPڮs} /d_mOoT ݬ\VyWxRkrVo_bVh*g:n"'UaEɈ庾hBN>?Bn]zGvN7ViDAbE9 MX+G'0UPN!4ae5 MaHD1 # =hj0>T h]]9 U@Nr"?@v->u?@_w`|ach޺?P6 t`  !a&a?t,F=۳=uk;u5 auom4>J2zGzt?@LA@b#zt1b(Wb!).+.&[-m(;?&L ?A$0/RQ%!!5 `?CopyrigTt (c)02`090M 0c 0os0f21r0@1a0i0n.0 Al[0 8s_2e30e 0vq0dS0T"!E]$Mb-# 'I&l>]Uhz!1!J@ 9ե djcYZ(_N9zOMGE%_Eohp3wO"K ңHE ^&_8_WE_OqOOOi&DAbE9 MX7}'0Ufb>!$a% f1jHD:1 # Uh4>T#]]9 M5AU@1b?@ 0?@(?@n?P6 JuM` ?u JYt  )t7xEA2&L_C?.5> >tJU2q K?=@MJ&A@-b9(+bG)T+T&J[#"#a?& ?AH?/`)#115 {`?CopyrigTt (c);020G09;0M30c10os+0f92*1r-0f1ai+0n.;0 Al0 18s2eY0e10v0dy0z"!4B#e-?3 7)& b 0K8>U# ]A]A "1!ъ!$cIo <%OYAJE*dK;;\%W%E@-.L[.FX5ET^gY?@7гL;] W-\TB<#R?WLq#'?@Eڮs /6q!I'?`bmMW_Ye[7 VK=F: [jr ? D"? XZvD?a@Boogr@QE 2%g@LO MLXa~#OTZiAle59 XG"'0Ujc&N!$05 .HD1 # =hj0>T h]]9 IAU@Du??@ e?@=?@n? 3?PJW?>t`  ^Z?t#Ǚ.YX?H"͋s?/HImG_BqhZ+u u$6>  J JpU?43f<!.&P?b3bbfz@1bAe&bq$d 2zGzt.#@LT"&r(~&i$q)(+}'i"~*#81815 `?Co yrigTt (c)o02`09o0Mg0ce0os_0fm2^1r 1am0i_0n.o0 Al0 e8s2e0ee0v0d0"11E]54M-#D3 %D7.&lUh#7 V1#Ai!J# F/?B1B  VRiAAbE9 MX G{W'0U*R2N!I$a {VZUHPuD 1 # #%>h 0JTpEI MU@Ž?@`?@4xS?@JBf&?HNtQ`  II?t#[  ii$ `kZNu  Miu%wj>7JQ       % "&&tJNU2"K?H@Qs#N&A@b(b)++&Np}#> H1#19 ?#bbbz| &/5R }#11`?Co0yrig\t (c)02h090M0c0o%s0f21r0Aua0i0n.0_ Al!@ 8Us%Be0e0v7@ d@21a4t#-}#3K 746lJ #$JUp5 3 M19 1!aq(!@Llrkt# 837^1?@!?@km?P i ?e=1\iOT/irvs}i>`,of :m ,M ?? S&d2? *b?r4!B oo1gr. #5!>%(EQOOOO__@i1_C_ D4և]_o_#RX Ph'X___ #R__ (YQoSo+o=osd {oEeoooooopَm* hDV!VVi&n X"c7I!Hpg bf'iMjSE t%XyG|'0UUŢŞN!O4i |UGD1 # hz0TdYYBU@R-?@c1*?@=?@(kݫ?P} t`  87?t#_)TeΦu ]ukv89:;̝<=L>&UUU&I&3 U!!`?CopyrigPt _(c) 2\]0 M c os f"!r 1a i n}. Al00U (s42e0e uvF0d(0'HS$#-## ' ?kǒ 2j$B##0U'BM3@&@F0Yk} #HA%DKb5DK3DK5DKED$!GiAZG5(|IDFX7yW'&Dŭ6!4YA yVZHD8 # =hj4>T!]]>#AU@=?@!-??@$rq?P6 t`z  ?5t# rXѮu)Nu;):O]ע09&> # " "+"+"+"+" $+"T-J[U7 ??& bA@ 2b4-#2zGzt#+@ Li68!6 ?}; 72U!:B#AA5 5`?CopyrigTt(c)20F@9M2@c.0@os*@f8B)Ar,@eAa8@i*@n. Al@ 0HsBeX@ej0@v@dx@)21"DM-,;?C G& l>]UhzAE!A#!ְ1J`#@l?@َm?NT,dKj&b&R&M/8T>f:?x6@8?@ߛ} _6q0YE*]\LThנKjfGD&Qhj iU3ULeVP۶W?@bs)h5oWKoWo;Kod}l$ro`nEBYFv?@D?%먪?@@3gZR^{|\6?vodmP}l>MBe 9 N 1iS? 1?grl#51:O"4gBa@Sq(UE^@A N?@CJ?m Y&v2rA @3ijD&}laqOz o?#WHZ'Qu{r>@\J"ۭ?@ yT?@ (U" +Web0)\ϼ(Pfodm}nv}l?V5(?q$Ix[cN#W{#iA&QeE9 MX''W0UdN! 0 /HD8 # =hj0T h]]9 #]AU@~M)M?@۵+?@)?@4_=w?P6 u` ?u#t  3zM%t3ws%.<%H._'B:> #PJU2N贁Nw[?@ʐL+&A@wbu](i+bk)%+&p%#4">9"<5!?& ?M#bbb͓z$ p&"/%T%#L1L15 w`?Co yrigTt (c)02`090M{0cy0oKss0f2r1r 1a0is0n.0 Al0 y8s2e0ey0v0d0"E1E(]I4M-%#X3 X7&l*>(UhE j1#$Ac!J`%#@J԰ί9?䟥!foAnBM(TfH?@3A,?@A') ?@:?PT% ibL6VLD5E+v;*AH 8snP%$? k?Q? \?4_!__Wr5 _! R,U3OC@bD}?@,en?P)Oy޾ ?pV?c"rA b$cZ-,u`κ\DT+V ?8<iAaeI=MX!GG'W0UxrFN!$ a FCzHD8 # =hj4>T!]]>#IAU@+&Z?@A3?@0xS?@F#tݩ?P6 t`  >B?\oEt# ŖoZu 0Nu;Y:| ;>$# JpU>##<?2& ?5bbbz@}b#Ai&'bu$ 'h 2N贁N[J2$@ LX"&"v(&m$ :;'m"*B#Bd1d15 5`?Co yrigTt (c)0W20090M0]c0os0f2R1r 1a0i0n.0 AUl0 8s2e0e0v0d0@"]1"a4MY-/p3 p72&Rl>0>Uhh= Ђ1#!m!J`#&@ENTMj.h1%,RM(!JَmO_ e@ o)4(DFtI?@:7f .6qS w_KT\T+\(Kjӷjmlhjw -`\TPQE\Up癙t_o u~!ՠXEOd<"PP'X_Pm"rA @3sjQ1lğO=GoYniAleE9 MX'w'0UUr^N!M$|A vzHD8 # =hj4>T!]]>#IAU@+&Z?@ ~*?@AxS?@mݩ?P6 t`  >B?\oEt# 6ΠZu 0Nu;Y~:#&}>0n <>  PJU2N贁Nwk?@ eL&A( 5bM(b[)h+h+$h&#[-M$#?& ?b$Z*'*B#O1O15 5`?CopyrigTt (c)02U0090M~0c|0osv0f2u1rx01a0iv0n}.0 Al0U |8s2e0e|05v0d0"H1P"L4M-/[3 [7&l>0>Uhh= m1#!S!J`#*&@[َm_?NTMjC1cK RVM(D1FʞB.• .W6q0IKTLT_cKjRlhj p= (*!]pǙ{O2Kes9T;QEGUFڈ4qX-}__TM7fTZT\ODž}_P^b52OES"rAQ 0S#Es3iAleE9 MX$Gxw'0UUrIN!4gA xvzHD8 # =hj4>T!]]>#!AU@+&Z?@r<I?@0xS?@F#tݩ?P6 t`  >B?\oEt# ap'EZu 0Nu;Y:|$=># BJpU>#͙;? & ?5bbbz@}bAA&IK&@ 2zGzt?"K@ L0"&N(bM)+TK/8Z*B<1<15 5`?CoyrigTt (c)s02009s0Mk0ci0osc0fq2b1r1aq0ic0n.s0 Al0 i8s2e0ei0v0d0b"5194M-t/H3 H7T &l>(>UhE=3Z1#E!J!@ENTMjN@1h(NَmOO e@ o)4(DFtI?@:7f .6q+ '_KD3\T+\(Kjӷjmlhjw P\DiAlesI=MXsG'W0Uzb6N!%$ 0 sFEjHD8 # =h TB ]]>T#!AU@+&Z?@F#t?@0xS?@?P6 ։t`  >B? \E7t#| u l4Ru?]>jI$>>#  ՆJpU>͙#<?& ?9bbbz@b#;AE&bQ$ID 2zGzt#@ $L4"&R(^&I$EQ)+]'I"^*aBh{.M#o1o15 9`?CoyrigTtk(wc)k2009kM0c0o%s0f21r1ua0i0n.k_ Al0 8Us2e0e0v@ d0f"h1(l4M-:?{3K {7&l>*(>UhE= `51#-I!J49An^ RRX f"B!Jp?RSlOS u~! N]8-1d<"P?@:' .6q/ YVTQRXQOnğ=i n (\wTiAaeE(9 MXLg_'0UbiN-!)$0 Lf`j_Hf:L;s O}N#N۱?FX# ߥ"OB H Dz~d?ZE@{+g,IVsG8;՟afH;=x=====}H=WE֛==11=b,>c> >4>Lh9=C4=}>s>$>o>x>>> FH>(>\29PKK(SK'UKL(XK"5XZK%&M\Kr)h^K,'L/~(L3 )L68L:hLp=LA%LJD78L{IIL|Mz)8L8QLT3L X)LZ[| '0N^)82Na)4Ne )6Nh)8Nl9:Neo.9h>NsX@N@vR9CNq{d9HFNrHN-9HKN9xMN%AUFD  h(^TYYBHUFjZ?F~??x<F BPG(?P } Xč ]B66]  TTUTTTT99d9bȅBU?? /&-& ?B#@L&_d2?r-Ƀ(\.ҕ(s.sU!&/DJ) f% G 04%eB`-r<0al,tim<0,s<0rvZ2,coR0pSuN0a3dP0sN0UrP0bn4dL0n<0wtwh0rk195^| (SG& iz ?!3E?? ? DuDGGD wp{Dtp pqwtDD@pqwqDwqWt pqpq ppw)wwwqd^wqpuwwwww`Drag onut hepe.Rih-c]lckaUdcos UPo etQeAvUwreP 5aabjZֿ~??cN׿ mF1??V↑T*J??@ ?UG DF P# h @T(PYY# [U@jZ?@~?tP} u` ?u#2>D>HX DHlHYHH HE .E9O؍>DDXXllRu'9j &",' !)2&5qq/4 F?"'4b0{[`+:#2J6Q_9$"3&9<"L1"e ^!'>_@ÿ@qh@?@`B?BgD]F0#B-u)@`-1Bu@r#CC5GAB2C9uv`u `"[ۢ b!-u`2/3Qy1y7B`?CopyrigPt0(c])020P90uMPcPosPIfRQrPQaPiPn 0AUl` XsbePePv&`d/`VPs_SBNcPm!#5>P8616a񁧙y1&3db4&5=B+h8=/3z6&3@d66 2&4#/30U!r3(]ge<^1E U#&5m Hq&5twV5t{3t{FEt{t{y {et !{|tYwmAdPaуIOB!T !:a:a&20#V2Hdada32=FB3J1125db6$q 7~C|hhE)*^Q_`CQCQ"A11=B;8ARrSqea> e1A+E4FAނX !#8dT7`XQy`]r` MPWnuPaPt%ar6aYQ` UE]0uPpU`e`tee^V5Qn],1PRd M`uU`b¯Ԧ30! PPr#5GCHhz]daD&bQe^\޿𿏫AP)aB`Ҩ75^gyϏ% S*biPaϷZeTϏLPcX&ϫk5N`ߪrBDl2`i`g̨)@ߑRPoʂѩ|(Z\@E pKD`YĖUSPaPePlP6?ЪXp9` ATP7`?>` /aZ4\nJSbI`W`QmU`u`QW&Zʒ=NwRka8<.2` Ҩ גc[mI` ad2`%c& dϐnĝs   Nx7I(鑙mDa9tV/a I͢+a Nf󨑙 R `Qa/ / 3I/ WM`C//+ tP I`? %3?E? 2Ă? 2Nv??;H"d2QCxk3` W ڧ%?NV1OCOC aOO#ޅO`M&`mRO_ӧYNp+_=_Q;OUq1y_Qi_${_Mqt!Tw*̩fo*SU@jZ?@ű܁?/v/vUpSp2u8p` ?bup`  B=ݩ`M,{t!Wp$53m-ׁ<3otm!#Qbq`׏ݥBT!).ot!xAfg>pÿ$XbUrq vU:֒rrrvvvrse"`Rozk10 eCZbb ccbr@O":tblcuP`TfnM}1o0BzW}_obW1W2\/b^@aaaaFҴ_tAaY6*P`u$`Rb`ݡ` a|.x^€ݥ5_b]&bЂF3s͵NSY`H\rV@4/eD8S^D&@SDՐUFZk,@rb4,>b}aaYV nw` %P& S.1a`NEWORKp2%H P!RURISϑQԑR!i| o2ohX|AS'0U?! 0oHD # =hj0>T h]]9  5AU@jZ?@͂T??@x?SF?P6 jt`  o?tk[>(A Vu aSuo.]&A]  *ԮJU2zGzt?@L#&A@bA#z7 5#1bO(b])j+$j&[ "*(w "?& ?AU$l/%B115 `?CopyrigTt (cu)Q02`09Q0uMI0cG0osA0IfO2@1rC0|1aO0iA0n.Q0 WAl0 G8s2Ueo0eG0v0d0"1E]4MŤ-&3 &7&l>]Uhz811U!J!u6?@+\.>djIIa? >"(EǑ I%K9E 5EU . K ;;5QIWEEE@ _KnSA?8@VBu?@DR> ?Nk"\6`ObmMM\j=am {:e 뇿P> #? D"? 5-gg?j47!B{oogrK57!ibDAbE9 MX7'0U~rN!R$a)5 5vIzHD # =hj0>T h]]9  qAU@;?@I5K?@xEL?6ѿP6 t`  bm&Qo?t@@L]WI,#u aiuoL> JU2zGzt;?@L?&A@b}#zs q#1b(b)+$&[9#H",ɺ (wH"?& ?A$ /%9#BV1V15 `?CopyrigTt (cu)02`090uM0c0os}0If2|1r01a0i}0n.0 WAl0 8s2Ue0e0v0d0"O1E]S4MŤ-9#b3 b7&l>]Uhzt1.A!JN!m.%-+0djV1ax߃S(@J-&?@ mO"> ?Nk"L[Majnha_:e P> #? BK=o? n-?j4Bs!B{__Wr5s!D8E6V 6?wj_|_ ^XE.U6Vz T6 @ d_mOoT ݬ\VߦyWx?RkrVo_bVh*g:n"'UaEjz}36>CN>?Bn]z/GvN7ViDAAbE9 MX+G'W0UPN!4)ae5 MaHD # =hj0>T h]]9  U@jpiH?@^?@$?@]F?P6 t`  !a&ao?t,F=۳=u;u auo>J2zGzwt?@ʐLAw@b#z1b](b!).+I.&[-m(?0& ?A$I0/Q%!!5 `?CopyrigTt (c)02`090M 0c 0os0f21r0@1a0i0n.0 Al[0 8s_2e30e 0vq0dS0T"!E]$M-# %'&l>]Uhz!1!J@% djc~YZ(_N9zOMGE%_EH$SwO"K ңHE ^&_8_WE_OqOHOOi&DAbPE9 MX7'0UUfb>!$a % f1jHD # Uh4>T#]]9 M5AU@jZ?@L|+?@?@({~?P6 JuM` ?u JYt )t7zEA2&L_D?. > >tJU2q K?=@MJ&A@-b9(+bG)T+T&J[#"#a?& ?AH?/`)#115 {`?CopyrigTt (c);020G09;0M30c10os+0f92*1r-0f1ai+0n.;0 Al0 18s2eY0e10v0dy0z"!4B#e-?3 7)& b 0K8>U# ]A]A "1!ъ!PzQ <%OYAJEU . K;;\%W%E@($L[.FX5Eai%?@ 6?wL;] W-\TB⤲8?W&SA?@ @ ?Nk"L'`bmMW_Ye[7 VK=F: [jr ? D"? X?ZvD?a@Boogr5;!QEؿx]SiLO MLXa~WOTZiAle59 XG"'0Ujc&N!$05 .HD # =hj0>T h]]9  IAU@K?@OgVb?@Y%?@wᳬ?PJW?>t`  ^Z?t#ə.E @x?r?/{4܂ G _Z+u u$ >  J JpU?43f<!.&P?b3bbfz@1bAe&bq$d 2zGzt.#@LT"&r(~&i$q)(+}'i"~*#81815 `?Co yrigTt (c)o02`09o0Mg0ce0os_0fm2^1r 1am0i_0n.o0 Al0 e8s2e0ee0v0d0"11E]54M-#D3 %D7.&lUh#7 V1#Ai!J# F/?B1B  VRiAAbE9 MX G{W'0U*R2N!I$a {VZUHPD  # #>h0JTpERI MU@u6?@jc{g&?@%r`5B?@m_F:Is?HNtQ`  II?t#]  _ii$!`kNu  iu%wj>M JQ T       "@&&JNU2"?H@Qs#N&A@b(bU)++&RNp}#>H1#19 ?#bbbz| R&/5R}#11`?Co0yrig\t (c)02h090M0c0os0f21r0Aa0i0n.0 Al!@ 8s%Be0e0v7@d@21a4t#-}#3 %746lJ#$JUp5 3 M19 1!q(!@nElrkRt# 8$kcD?@숒@)?@P?P"EkU |1\iOT/irvs}i>`_,of :m ,M ?? S&d2? *b?r4B!B oo1gr #5!>%(EQOOOO__@Ac1_C_ D4ևS]_o_yRX+{_,__ #R__ (YQoSo+o=osd {oEeoooooo@yUy{* hDV!VV(I X"c7I!pg bf'iMjSE t%XyG|'0UŢŞN!O4i |UGD # hz0TdYYBU@(%)?@xg^?@0߯H?@s7Uݹ?P} t` 87?t#_)TeΦu ]ukv  ̝L&UUU&I&3 U!!`?CopyrigPt _(c) 2\W09 M c os f"!r 1a i n}. Al00U (s42e0e uvF0d(0'HS$#-## 'I?k=#ǒ 2j$B##0U'BM3@&@F0Yk} #HA%DKb5DK3DK5DKED$!GiAZG5(|IDFX7yW'&Dŭ6!4YA yVZHD  # =hj4>T!]]>#AU@0߯H?@z)e??@˞9q?P6 t`z  ?5t# rXѮu)Nu;):O]ע0 &> # " "+"+"+"+" $+"T-J[U7 ??& bA@ 2b4-#2zGzt#+@ Li68!6 ?}; 72U!:B#AA5 5`?CopyrigTt(c)20F@9M2@c.0@os*@f8B)Ar,@eAa8@i*@n. Al@ 0HsBeX@ej0@v@dx@)21"DM-,;?C G& l>]UhzAE!A#!ְ1J`#@{&pz?@Uy{?NT,dKj&b&R&M/8T9K-q?x6@F?@dG0 >Nk"?YE*]\LT_hKjfGD&Qhj iU3UGe&Y_`?@/b%)h5oWKoWo;KTod}l$roҥ`nE2K?@ˌ%4G{?@ -q eZ̿R^{|\6?vodmP}l>MBe 9 N 1iS? 1?g]rl#51:O("4ga@Sq(UE^@O|ǐ[`Ρa g v2rA @3ijD&}l迲aqOz o?#WHZ'Qu=*[^N?@]%P?@O&@ c?@аM<n ?` eb0)\(Pfodm}nv}l?V5(?q$Ix[cN#W{#iA&QeE9 MX|''0UdN!0 /HD  # =hj0T h]]9 #]AU@}%?@R$,?@6#V?@c_3֡w?P6 u` ?u#t  3zM%t3ws%.<%H._'B> #PJU2N贁Nw[?@ʐL+&A@wbu](i+bk)%+&p%#4">9"<5!?& ?M#bbb͓z$ p&"/%T%#L1L15 w`?Co yrigTt (c)02`090M{0cy0oKss0f2r1r 1a0is0n.0 Al0 y8s2e0ey0v0d0"E1E(]I4M-%#X3 X7&l*>(UhE j1#$Ac!J`%#@5e$_F??!foA3nBM( V?@J-RHQ??@S!R?P| llHL6VLD5E+v;*AʙH: 8snP%$? k?Q? \?f4_!_t_Wr5_! R,U3OC@IL?@4~Kz?P)Oy޾ T!]]>#IAU@܋?@2Oa>?@#r`5B?@*ݷ?P6 t`  >B?\oEt# ŖoZu 0Nu;Y:|8>$# JpU>##<?2& ?5bbbz@b#ANi&bu$'h 2N贁N[2$@ LX"D&v(&m$ :;T'm"*B#d1d15 5`?Co yrigTt (c)020090M0c0os0f21r 1a0i0n.0 Al0 8s2e0e0v0d0"]1"a4M-/p3 p72&l>0>Uhh= 1#!eJ`#&@NTM\jh1B,RBM(!JoUy{O_ e@ So4(DF^Tw?@]֝ä ?Nk"?w_KT\T+\(Kj?ӷjmlhj -`\TPQE\Uu?i,St_o u~!XEO9= T?@L;uQ_Pm"rA @3sjQ1lğ= GoYniAlePE9 MX'w'0UrJ^N!M$|A vzHD  # =hj4>T!]]>#IAU@܋?@j?@2r`5B?@^ݷ?P6 t`  >B?\oEt# 6ΠZu 0Nu;Y~:#&}>0m8>  PJU2N贁Nwk?@ eL&A( 5bM(b[)h+h+$h&#[-M$#?& ?b$Z*'*B#O1O15 5`?CopyrigTt (c)02U0090M~0c|0osv0f2u1rx01a0iv0n}.0 Al0U |8s2e0e|05v0d0"H1P"L4M-/[3 [7&l>0>Uhh= m1#!S!J`#*&@PUy{_?NTMjC1gc RVMʣ(D#F ![?@"$. ?Nk"?+IKTLTcKjRlhj p= (*!)i,SO2Kes9T;QEGUFw6qX}__TM7fTZT\Dž}I_P^52O,ES"rA 0S# Es3iAlePE9 MX$Gxw'0UrJIN!4gA xvzHD  # =hj4>T!]]>#!AU@܋?@c_v?@#r`5B?@*ݷ?P6 t`  >B?\oEt# ap'EZu 0Nu;Y:|$># BJpU>#͙;? & ?5bbbz@}bAA& K&@ 2zGzt?"K@ L0"&N(bM)+TK/8Z*B<1<15 5`?CoyrigTt (c)s02009s0Mk0ci0osc0fq2b1r1aq0ic0n.s0 Al0 i8s2e0ei0v0d0b"5194M-t/H3 H7T &l>(>UhE=3Z1#E!J!@ENTMjN@1h(NoUy{OO e@ o)4(DF^Tw?@]֝ ?Nk_"?'_KD3\T+\(Kjjmlhj P\DiAlesI(=MXsG_'0Uzb6N-!%$0 sFEjHD  # =hj8>T! ]]>#!AU@܋?@*?@#r`5BW?@w?P6 t`  >B? \Et#տ|u l4RQu?]>I$>I# JpU>͙#<?& ?9bbbz@b#AE&bQ$$D 2zG[zt#@ L4"&R(^&I$Q)+]'I"^*Bh{.M#To1o15 9`?CoyrigTtk(c)k2009kM0c0os0f21r1a0i0n.k Al0 8s2e0e0v@d0f"h1l4M-:?{3 %{7&l>(>UhE= `51#I!J49fAn^ R)X f"B!Jui,SG?RSlOS u~!N]8-19= T?@L;uQ ?Nk"?YVTQRXQOnğ/=i n (\ wTiAaePE9 MXLg'0UbZiN!)$0 Lf`jUGD # hz4TYYT#BU@5?@`0 ?@ t2пP} mu` o?u#`/8d<L1<`9tU(.88LL`Zttt n۶m۶tgf!m-2!tUH~#a#[0U"?@W& a#!!o*"`?CopyrigPt (c)020090M0c oKs f2!r 31a0i n.0 AlN0 (sR2e&0e vd0dF0O"!P$X#B-a## '(#^=a$&X#466 2X$e(^%(5 a#dHoJX%DK|%D@tK5D8Gg&AUc_'TAAX"U(@SaX%2X"ihalI&X7lG'җ$6!40 lFZHD # =hj0>T h]]9 U@oC(?@mWں?@?@Vw?P6 u` ?ut  zU%t3b~}U%.9%UHS> " "" :"" N" b"w"w"w"w"w",w"&/8$w"D,w"PX,w"l#JU 'N((?6 ?A\bbRbz@A0ubbub9)b(8bI4e32zGz?088r@LF:Z2P?b96<2: e3AA5 w`?CopyrigTt (c)@2`09@M@c@o%s@fBAr@Qua@i@n.@_ Al3P HUs7Re Pe@vIP d+P2AE]DM-e3CK G6l>]4Uh8QQ9 !$]b!N!:"AZQ1J`e3@%?@~霤0df'#'/^&b^&M8Xd,O?@PR?@?cߡj ZW0?ri5kHiN׶_\(Úw ckfH AjG|olAGeEA@:?@wUѳ?@wN?P-DT! ]ujye6|ñlL|*%k UUB H 7 C? J? _7ͫ?kwr351:kw41b 8eXojc@{h`荞ۚӴh2rhA%`3`8z?ZJJ=aOy?5qexzaeyw?@J?@yZI?@G& hbt=lkKv>tZa7L|W顟tpҷ!iY#?@#?@sڎ`{R2zo |r}q#z_Ţ6|;|L|8dt~aay W(Ci?@%91?@;8.n?Pris%r{i"6|v}YaL|q|zKΏ#e!yEy?@ ,&G>@"_yy޼X/JCl;~e*6|<8L|wK|༬g >P`r#'9Ze%aiHJo?@U ?@qU}?@;E | /BYlx6|3@*aqOy5#*:!iZO?@C?@cQ`p`~ |IJ1Mlw^6|G#U_L|-hPbqN!yU_s8?@#`Ӳol?U6Xl?RiAeu9 MuXXWt'0UjDŰ^!40uE  HD: # =h4>T]]9 !AU@Fr+?@;?@Ě9?@UGݳ?P6 mu` o?u t  ܍`)t7Sׅ!)25Ia(L޿f$A F  EJU 'N(( ? & ?A`bbRbz@AA ybbub>)-b(D#GbKy!>#c#y D#2zGzt?088r@Ls"&D*8"PX/bl$d"x!(^('d"*S1S15 {`?CopyrigTt (c)020090M0c0oKsz0f2y1r|01a0iz0n.0 Al0 8s2e0e0v0d0"L1@P4M@ D-(O_3 _7 &l  Uh#7 q1+Ad!J VD, |-1R0SVbR( b 0AleE9 MXPG'0U$bZuN!%$0b5 VZHD "#=h0>T@++]]9 ِU@XΞ(?@?@/?@?P6 u` 7?u#t  ʿ%t3{%.9*%HS@7> ]A"" D:" N" b"b"""""""/$"&/8$"D,"X,@"l/~$"$J}tU2q0?=@L6Aw@)b8Wb9;6R[3G 2D?IF !?A?I3AA5 w`?CopyrigTt (c)@2`09@M@c@oKs@fBAr@Qa0i@n.@ Al!P Hs%Re@e@v7PdPBAE(]DM-3C GIFlT>oAT>U+<oAoA]!:" _$ }$AT#/ARJ`3@w{?@Vk@E$|&b|&M HA@nY?@C<ϖ?@:[?P-DT! ]W0alxk>8 <_ 敶`TWbXB p = ? >)PK? ?r#51:]ow41b(H1|_s8?@#`ӲalaDw5Xwl5dEJdZO?@C?@m-p?@w  [edir[wlWV(|uY:l>|j/v w$117JGJo?@U ?@tf+?@?ktzDzXwl(|}x|'=}A[tJ%?@~ל?@~?@.RV~7bߌ:|wla_=v}V痒_Fts4$imƉEy?@Q`/&G>@7;[8|{oodEJCwlv(|5t0\>|9?jՉt0GutWt1Cw3tw#e!i W(?@i?@ :=-?@|`̍f?PodiT rwlAD:(|F|v>| 8O/'ݦ[dvπw&30#e:!FY#?@>#?@r1?@4rzXw\.wl[2jq(|kj祐m>|VOdvs|N!F~oCO !L?@iOS#~Lf%>~zi4ptc+y$>|A ߟqayy-詺alB{TJ7b!iWyw?@f?@$1F?@ݏOD bϑANoBrQN 3*z׳]SqAy -;b>Sgi8TQTQB(YAeeXXyW _'0UiŞ^-!dD0^ 4UHLuD" # R>h 0JTlaHaMCqMU@?NB?@S[s~?@$O?@WZ}8Ÿ?P >uA` ?u4 >t  aU֠y)t7Fϯ1)2_G\Lȟ9flMlA  Z   J>U2zGzt?088r@9 3#>C&A@ybbRbPzw u#-b(bJ)+&N=#!!{`?CopyrigXt (c)02d090M0c.0os0f21r0J1a0i0n.0 Ale0 8si2e=0ej0v{0d]0" !]$4#- 'N((#?6 ?Me>aۀw R4` A#0c{0n01lI23zjw Mw bbbz5Iu#m{EZA<O&Ob5ObNbbOO((/%BG=## '6,lo>R4"lTJ$JUl 5I 1qYƕ!1(R!dӘ`?@S?il0"NX N i*ƙ?@`1Ww?@ټ?P^ÿ D* @[Q ʸZZ\!PX@: \Ŀ gsY @ [,˿? @?t@@Boogr5w!(NU^_p____Xj?R&[T2\o~e!ϏW_"?@\L-ed5?PO6h  U6bMmpN_W\4VI~zl{yfdDԻ~g,+?Ȏ !ood{ &'iMf5 4%uXW_'0U&[J!4e% 7KUHLuD" # I>h(J TEI 3AU@jZ?@({~+?P >uA` ?u#J* )8* 9LG 9`G 9tG 9G 9G9G9G!9GA )@G#"G.$*G8"B"V"j"~""""""l" ,>t   2t2z%2pbEC3O2J>U2zGz?@9 k3>{6A@wb8b9e;62 !4Nu3Bl322F 2M2bE9^K72S!υ`T9`U ?@ `kd\|SiDlؓ<N oldџtGlI?@~?@(t RV~'\Wc-N3pMq$$z$ơ𑯣@&'/6&G>@9 ;?@{gPDlCɚ>#(䞌!gP5|Tf- $` iR9$a/|`;fF`kDlӀ 7A?,s-mqBNf)ϭB*$`#?@ yC$`4rzY|˱AM^)Cm#ŗƦ}?zƀ9b"fjO?@m0?kOS3\?bUAfqGi?SbRS۾Cf?il߯aapeX/G|.lRxX U n@42PrŮ 8b:oLn?Ce(jY榄el`ƀEqQAeRp _og}u?@9 mO".l9=8/ ee )l.%-+mlփ?@U->V.lɽ7yHvOfўA=$}$ʊ x @4S>a U}62D 'e !aS#Ł&ʊi$!Tl51A1X/'0U;"2F1&E01 *_H;s eoY޴OXFȭMo#0 #KOB U n~d״ uo@+_ oVsGv FfD F hI ϦHK xM O dF֛(  @5 u v 6?` 68b Nhd _f k xm  K q#!UFD  h(^TYYB UFjZ?F~??x<F BPG(?P } X ]B66]  TTUTTTT96ȴHBU?Q?G ?B@L&ɯd2?6-G(?\.Y(7. sU!}&/)J'( G 0]B`)proxy,se 0v2,c 0mput4di0t 0ib*4d0n0t;w 0rk! e^| (SG& i?!3E?? ?    wp   p qw qw]qwqpqwwqqpvppww&www]qawqpwoww6zwxvDrag onut hepe.Rih-c]lckaUdcos UPo etQeAvUwreP 5aabjZֿ~??cN׿ mF1??S{V↑T*J?@ ?UG DF P# h @T(PYY# [U@jZ?@~?tP} u` ?u#2>D>HX DHlHYHH HE .E9O؍>DDXXllRu'9j &",'US2&5/4 ?"'4Wb0{['`:[1#26" !^Q_9$"3&9"/L6 2&!'ݯ>o@ÿ@qx@?@-pB?wDmF03Bu9@`҈-1Bu@r#CC 5(GAB2Cm9u`u `"[ b!u`/3Qy1y7B`?CopyrigPt0(c)020P90MPc.PosPfRQrPaaPiPn 0Al ` Xs$bePePv6`d/`VPs_SBNcPm!#5P86PG`y1&3db4&5MBh8=/3z6&3d66 Ԁ2&4#/30U1r3]ge<^AE U#&5m Hq&5twV5t{3t{VEt{t{y {et !{|tYwmAdaI_BT.1JaJa&2tataV21@32MVB3Z112I5tb6 7C|hhE) *”nQo`Г!SQSQA11BMB;1brcqe a> !b1AEDFQ.1#8dTG`XQ`])rw` M`nuP%aPt5arFaiQ` EqiPm6`nPvdnV5Q~]1PRd ]`ue`bү30 ! P`r3EWSHx]C(D6bQα eϿn\ϢAP9aB#`75^wωZ% S:bi` #ajeTZLPch@6߫k5^p߂BTlB`i`g̨9@ߡRPoڂ|8Z\@ 0p([D`ĦSPaPe`l`FOX.I` $TP*G`ON`?aj͒soSb Y`qP(xP⨠ڒMNwbk'aL.2`} sYkIpadB`5c6 d  nĭs  ^x5GmTat f?a Iݢ;a ^f󸑩 R pa/a //1G/UM`C/(/+Qme`utP Y`? 5^D.?@?} 0B? B^v??;EHd2QC'i3` W@ڥ%O^V/O0AOC~ aOO#O/`M6`mbO_i^p)_;_O;!O) eo1y]QHg_y_]qt1dw:fo(SU@jZ?@~??v?vepcpBuHp` ?bup`Ta],t1(gp"51m-< Corm1 QrHqBT1.mt1xAdg>pÿސc$bepq verrrvvvrs^c"`Roxi1@ e2Sjbb aabr >@"JtbauP`hoe{1.@zU}_ rU1U2l-bŕnaaaaVҴoAaY6(P`u4`r}`}`azxn4~5_r&bВF3sNSYT}lrV@z-eB8QnBLQBӐSV֜Z,@rBD*<`}(aaY Vn` %P&@!S>1a`NEWORKJn2H P߰}!R尪RI߰S͑QґBio0ohhAQ'0Uƛ?!10oHD # =hj0>T h]]9  5AU@jZ?@͂T??@x?SF?P6 jt`  o?tk[>(A Vu aSuo.]&A]  *ԮJU2zGzt?@L#&A@bA#z7 5#1bO(b])j+$j&[ "*(w "?& ?AU$l/%B115 `?CopyrigTt (cu)Q02`09Q0uMI0cG0osA0IfO2@1rC0|1aO0iA0n.Q0 WAl0 G8s2Ueo0eG0v0d0"1E]4MŤ-&3 &7&l>]Uhz811U!J!u6?@+\.>djIIa? >"(EǑ I%K9E 5EU . K ;;5QIWEEE@ _KnSA?8@VBu?@DR> ?Nk"\6`ObmMM\j=am {:e 뇿P> #? D"? 5-gg?j47!B{oogrK57!ibDAbE9 MX7'0U~rN!R$a)5 5vIzHD # =hj0>T h]]9  qAU@;?@I5K?@xEL?6ѿP6 t`  bm&Qo?t@@L]WI,#u aiuoL> JU2zGzt;?@L?&A@b}#zs q#1b(b)+$&[9#H",ɺ (wH"?& ?A$ /%9#BV1V15 `?CopyrigTt (cu)02`090uM0c0os}0If2|1r01a0i}0n.0 WAl0 8s2Ue0e0v0d0"O1E]S4MŤ-9#b3 b7&l>]Uhzt1.A!JN!m.%-+0djV1ax߃S(@J-&?@ mO"> ?Nk"L[Majnha_:e P> #? BK=o? n-?j4Bs!B{__Wr5s!D8E6V 6?wj_|_ ^XE.U6Vz T6 @ d_mOoT ݬ\VߦyWx?RkrVo_bVh*g:n"'UaEjz}36>CN>?Bn]z/GvN7ViDAAbE9 MX+G'W0UPN!4)ae5 MaHD # =hj0>T h]]9  U@jpiH?@^?@$?@]F?P6 t`  !a&ao?t,F=۳=u;u auo>J2zGzwt?@ʐLAw@b#z1b](b!).+I.&[-m(?0& ?A$I0/Q%!!5 `?CopyrigTt (c)02`090M 0c 0os0f21r0@1a0i0n.0 Al[0 8s_2e30e 0vq0dS0T"!E]$M-# %'&l>]Uhz!1!J@% djc~YZ(_N9zOMGE%_EH$SwO"K ңHE ^&_8_WE_OqOHOOi&DAbPE9 MX7'0UUfb>!$a % f1jHD # Uh4>T#]]9 M5AU@jZ?@L|+?@?@({~?P6 JuM` ?u JYt )t7zEA2&L_D?. > >tJU2q K?=@MJ&A@-b9(+bG)T+T&J[#"#a?& ?AH?/`)#115 {`?CopyrigTt (c);020G09;0M30c10os+0f92*1r-0f1ai+0n.;0 Al0 18s2eY0e10v0dy0z"!4B#e-?3 7)& b 0K8>U# ]A]A "1!ъ!PzQ <%OYAJEU . K;;\%W%E@($L[.FX5Eai%?@ 6?wL;] W-\TB⤲8?W&SA?@ @ ?Nk"L'`bmMW_Ye[7 VK=F: [jr ? D"? X?ZvD?a@Boogr5;!QEؿx]SiLO MLXa~WOTZiAle59 XG"'0Ujc&N!$05 .HD # =hj0>T h]]9  IAU@K?@OgVb?@Y%?@wᳬ?PJW?>t`  ^Z?t#ə.E @x?r?/{4܂ G _Z+u u$ >  J JpU?43f<!.&P?b3bbfz@1bAe&bq$d 2zGzt.#@LT"&r(~&i$q)(+}'i"~*#81815 `?Co yrigTt (c)o02`09o0Mg0ce0os_0fm2^1r 1am0i_0n.o0 Al0 e8s2e0ee0v0d0"11E]54M-#D3 %D7.&lUh#7 V1#Ai!J# F/?B1B  VRiAAbE9 MX G{W'0U*R2N!I$a {VZUHPD  # #>h0JTpERI MU@u6?@jc{g&?@%r`5B?@m_F:Is?HNtQ`  II?t#]  _ii$!`kNu  iu%wj>M JQ T       "@&&JNU2"?H@Qs#N&A@b(bU)++&RNp}#>H1#19 ?#bbbz| R&/5R}#11`?Co0yrig\t (c)02h090M0c0os0f21r0Aa0i0n.0 Al!@ 8s%Be0e0v7@d@21a4t#-}#3 %746lJ#$JUp5 3 M19 1!q(!@nElrkRt# 8$kcD?@숒@)?@P?P"EkU |1\iOT/irvs}i>`_,of :m ,M ?? S&d2? *b?r4B!B oo1gr #5!>%(EQOOOO__@Ac1_C_ D4ևS]_o_yRX+{_,__ #R__ (YQoSo+o=osd {oEeoooooo@yUy{* hDV!VV(I X"c7I!pg bf'iMjSE t%XyG|'0UŢŞN!O4i |UGD # hz0TdYYBU@(%)?@xg^?@0߯H?@s7Uݹ?P} t` 87?t#_)TeΦu ]ukv  ̝L&UUU&I&3 U!!`?CopyrigPt _(c) 2\W09 M c os f"!r 1a i n}. Al00U (s42e0e uvF0d(0'HS$#-## 'I?k=#ǒ 2j$B##0U'BM3@&@F0Yk} #HA%DKb5DK3DK5DKED$!GiAZG5(|IDFX7yW'&Dŭ6!4YA yVZHD  # =hj4>T!]]>#AU@0߯H?@z)e??@˞9q?P6 t`z  ?5t# rXѮu)Nu;):O]ע0 &> # " "+"+"+"+" $+"T-J[U7 ??& bA@ 2b4-#2zGzt#+@ Li68!6 ?}; 72U!:B#AA5 5`?CopyrigTt(c)20F@9M2@c.0@os*@f8B)Ar,@eAa8@i*@n. Al@ 0HsBeX@ej0@v@dx@)21"DM-,;?C G& l>]UhzAE!A#!ְ1J`#@{&pz?@Uy{?NT,dKj&b&R&M/8T9K-q?x6@F?@dG0 >Nk"?YE*]\LT_hKjfGD&Qhj iU3UGe&Y_`?@/b%)h5oWKoWo;KTod}l$roҥ`nE2K?@ˌ%4G{?@ -q eZ̿R^{|\6?vodmP}l>MBe 9 N 1iS? 1?g]rl#51:O("4ga@Sq(UE^@O|ǐ[`Ρa g v2rA @3ijD&}l迲aqOz o?#WHZ'Qu=*[^N?@]%P?@O&@ c?@аM<n ?` eb0)\(Pfodm}nv}l?V5(?q$Ix[cN#W{#iA&QeE9 MX|''0UdN!0 /HD  # =hj0T h]]9 #]AU@}%?@R$,?@6#V?@c_3֡w?P6 u` ?u#t  3zM%t3ws%.<%H._'B> #PJU2N贁Nw[?@ʐL+&A@wbu](i+bk)%+&p%#4">9"<5!?& ?M#bbb͓z$ p&"/%T%#L1L15 w`?Co yrigTt (c)02`090M{0cy0oKss0f2r1r 1a0is0n.0 Al0 y8s2e0ey0v0d0"E1E(]I4M-%#X3 X7&l*>(UhE j1#$Ac!J`%#@5e$_F??!foA3nBM( V?@J-RHQ??@S!R?P| llHL6VLD5E+v;*AʙH: 8snP%$? k?Q? \?f4_!_t_Wr5_! R,U3OC@IL?@4~Kz?P)Oy޾ T!]]>#IAU@܋?@2Oa>?@#r`5B?@*ݷ?P6 t`  >B?\oEt# ŖoZu 0Nu;Y:|8>$# JpU>##<?2& ?5bbbz@b#ANi&bu$'h 2N贁N[2$@ LX"D&v(&m$ :;T'm"*B#d1d15 5`?Co yrigTt (c)020090M0c0os0f21r 1a0i0n.0 Al0 8s2e0e0v0d0"]1"a4M-/p3 p72&l>0>Uhh= 1#!eJ`#&@NTM\jh1B,RBM(!JoUy{O_ e@ So4(DF^Tw?@]֝ä ?Nk"?w_KT\T+\(Kj?ӷjmlhj -`\TPQE\Uu?i,St_o u~!XEO9= T?@L;uQ_Pm"rA @3sjQ1lğ= GoYniAlePE9 MX'w'0UrJ^N!M$|A vzHD  # =hj4>T!]]>#IAU@܋?@j?@2r`5B?@^ݷ?P6 t`  >B?\oEt# 6ΠZu 0Nu;Y~:#&}>0m8>  PJU2N贁Nwk?@ eL&A( 5bM(b[)h+h+$h&#[-M$#?& ?b$Z*'*B#O1O15 5`?CopyrigTt (c)02U0090M~0c|0osv0f2u1rx01a0iv0n}.0 Al0U |8s2e0e|05v0d0"H1P"L4M-/[3 [7&l>0>Uhh= m1#!S!J`#*&@PUy{_?NTMjC1gc RVMʣ(D#F ![?@"$. ?Nk"?+IKTLTcKjRlhj p= (*!)i,SO2Kes9T;QEGUFw6qX}__TM7fTZT\Dž}I_P^52O,ES"rA 0S# Es3iAlePE9 MX$Gxw'0UrJIN!4gA xvzHD  # =hj4>T!]]>#!AU@܋?@c_v?@#r`5B?@*ݷ?P6 t`  >B?\oEt# ap'EZu 0Nu;Y:|$># BJpU>#͙;? & ?5bbbz@}bAA& K&@ 2zGzt?"K@ L0"&N(bM)+TK/8Z*B<1<15 5`?CoyrigTt (c)s02009s0Mk0ci0osc0fq2b1r1aq0ic0n.s0 Al0 i8s2e0ei0v0d0b"5194M-t/H3 H7T &l>(>UhE=3Z1#E!J!@ENTMjN@1h(NoUy{OO e@ o)4(DF^Tw?@]֝ ?Nk_"?'_KD3\T+\(Kjjmlhj P\DiAlesI(=MXsG_'0Uzb6N-!%$0 sFEjHD  # =hj8>T! ]]>#!AU@܋?@*?@#r`5BW?@w?P6 t`  >B? \Et#տ|u l4RQu?]>I$>I# JpU>͙#<?& ?9bbbz@b#AE&bQ$$D 2zG[zt#@ L4"&R(^&I$Q)+]'I"^*Bh{.M#To1o15 9`?CoyrigTtk(c)k2009kM0c0os0f21r1a0i0n.k Al0 8s2e0e0v@d0f"h1l4M-:?{3 %{7&l>(>UhE= `51#I!J49fAn^ R)X f"B!Jui,SG?RSlOS u~!N]8-19= T?@L;uQ ?Nk"?YVTQRXQOnğ/=i n (\ wTiAaePE9 MXLg'0UbZiN!)$0 Lf`jUGD # hz4TYYT#BqU@I7@(?@M&d2?@ t2?@`0 ֺ?P} u` ?u#L+/8<L9`( .88L``t $Itmj gf! !UC~#9#0Up"?҃@/&T9#ة!!G*`?CopyrigPt (c) W20 9 M ]c os f"R!r 1a i n. AUl&0 (s*2e e v<0d0'"!$0#B-9## E'w#^=9$&0#466 20$e#$^ #30%T% 9#HLA0%XD`cKT%XD8cGg&AdaO^'TcAcA0"J-(3Ca0%g2AATBK6S`6SihaX5 &X~7kW'o$ţ6!40 kVZUHLuD" # I>h ,J T  M#EMU@jZ?F"??@`^?P m>uA` o?u#VpMJp -<pA -PK#TdK2(.K<FZnf>t  t3|>b!F\.岿uKJ>U2zGz?@9 ?#>O&Aw@b(Wb)+&r" !$NI#"@#W"6 "\M"b)2;'r",r"/I#11G#`?CopyrigXt (c)020090M0c0os0f21r01a0i0n.0 Al@ 8sBe0e0v-@d@7"BMI#3 76@"!"t1"=ElJ4Ul8@AAC 9 =Q A(#UA!=(^!rCa?@ئ3 (0 # ?˕8# ; MkKUN>UV\u5m[} 0҂X3^E] 0__  yXE>UU . W[ ?;;l\gU>U@}bAo\nZM?NXAjai%?@;D%V\^ W-l\jp҂T9?T8?@)#?@@SA?@9ؾ@ ?Nk"?YY5`bmMl\K,q_ܱy[_7 GH5'  ΀X:YC}u?@H[V\u۶m۶l\&Dn۹XQ`C"4` NQ:Y%u?^&W[DqodUQ:YxLPp"2V\$I$Ik]T@!X Q LPݟd(: ?j=YU>UsG_YY f_x^AA(G!EM_&_8[}u?@R6V\9=8uG5xQHuQu?@mlփ?@t;/> ~t~Y|Of|͸ 4zTB#]]9 TU@ t2?@`0 ??@?Pn6 u{` ?u th  )t72(LMm>EJ2wq?=@LA@yb #z.-b( (%2+2&>@f'?& ?A`bbb)$bf/ !4/U%T115 {`?CopyrigTt:(c):20Y09:ME0cC0oKs=0fK2<1r?0x1aK0i=0n.: Al0 C8s2ek0eC0v0d0X"1P4MB-?"3 "7&l> 8>U# RARA9 ]41 #p U%J`@`C C?s9ӑ] ,VM^(s"2ǟO?Y!c1ƸFU5V^dln__ iQsX5VUF__ _ ROJ)XE2O,E"rA 0"|c3JQVU-?@k}A_Є\a#BFUVU~t?kb\+g!^(`d E0Ale59 MXGB'W0Ur8N!<+0%5 zvzUHxuD4" # J#UU >ha0JTaaMU@ t2?@* 7?@H"G?P tQ`  7?,t#6Z hӀ6C2wqGCLvnCu3;uM;2zGzt?>@QA@Ebbbz+ )#abC(+bQ)^+^&)y#?&)?I"O+^+N,l'R,1,1G`?Co}p rig}t:(c):]209:M[0]cY0osS0fa2RR1rU01aa0iS0n.: AUl0 Y8s2e0eY0v0d0@"%1Y)4-83 87$&lUмJ1$ KAp +!~ F F FFFFFFFFF"`!Q[C(CUH?@ &[\uP^CyyTQX3CUjdO[_TtY/`T7Q5____TCUQegG idpogZ4EQ$EGy#3 oEMF#r 7f;_\tTQZsR om_ hlРpv_QeT_2z|ro.FQpt8Auo,>PC^Awz^ @f xY 0BTfx+ 6yI8_mBԐYˏRdٮy %D+x]&8J\n+ }C2q}[eu៿x **xa@Rdv+ zܿȯگ ?{kk 1/ g_xeZl~ƺfZ?@&KWYܿvl")Q )yxitϘϪϼƹCG?@sw8⼕5Pvl'<(fT( ,ӓxmߠ߲ƹ"?@7.&ὡ }'i,D @?.v0B/ Ixq7L+&rZS^e ap\1r;K[J\ vfx N 8cBym <ÊvK%d!dv +@x$6k& LJ (4 e/ P}/@M x //)/;/M/ +'λ fe/?ioH?d/jiAlDlBATKA@egI^%L8X<ӈ9G'0mUB? <!d0M 9FMJHD: # =h4>TB#]]9 TU@ t2?@`0 ??@?Pn6 u{` ?u th  )t72(L>REJt2q?R=@LA@-bbJ )+&[Z#?m& ?A/$)񩣑!!5 {`?Copy_rigTt:(c):2U0 09:M c os f"!r *1ai n}.: AlE0U (sI2e0e 5v[0d=0>"!$MB(-?#K 'm&l>8>U# AA9 ]p! #p +;%J`@`CD Cs9ӑY] FMD(s"2O?I!c1ƸE%^d _2_ iAsLXw5UF_w_O ?RJ)LXE2OE"rA l0".cm3AU-?@[}A_6\G#BEU~tS?[~b6\WD(3`d Alew59 M*X7^B_'0Uur>!<0% ,v@z_Hp;s 7E HHlrF(3#4 POB !~d״ o@+ o[sGSofS(SoShSF S;SF֛XS\HS5S'8S("S%8S)6hS,NU0 S29آU8U<UB[UFD  h$^T YYBBUF\.??x<F BP(?i3^P? .vV B2% 2 2ȅH?p G?`1 Bt  0XGB`User,n tWwo k p"ui h"al d v# c >!^ K%OU# r?/%?IHPNL@DD@@D@tDDtDDGpDtpG ttttG_GOOGGW D@DDGD@%DD@@zD@DGGDDM_iDpicZR?Drag onut hepe.Rih-c]lckaUdcos UPo etQeAvUwreP 5aab\.ҿL&d2??s׿4?@?p2эr3rCUG DF P# h @T(PYY#EU@\.?@L&d2?]P} .B"H:J "bbrY:f 7 Amu` _?U"66UJJ^^UrrUSu#*",'񩤅MU 02#?T"'#^Ax)"L[$ $3' >>ÿ@q05ӥ?46Zr&23G2uM0`-N12uu&@ f3AB@3t$9u`u `m"[ b!wu`)#A!'1`Vis_PRXY%cPm!#52840!2`?Copyr@gPt (P) 2P09 M@c:)Po@of1R"Qr%P^Qa1Pi#Pn% AlyP )Xs}Re@e)PvPd%BH!#$U~%d$%! =#d&P& "$B##+0Ub#]  gmAdaoc_Td"5 tR6$t 7#tEC0t|hh9PKs(&q`#L ]G2`NgPmPTZqU!hj|]sL#PcgXx'e|Bu@lPioPgyEf|R#Pobv|3qdv]@Vq DPpgPr1PBqn1PxHrfcQA!эSPUa%Pe[PlgPs@ c` "QnoPePjQv@t'PxUrT sAp ;T'PA r`UTTq1PaE m\#ff%<z-q#o&5m1`P]](cEAE5:Z%C %B&MRAiR p!]`NEWO@K SH-0Pr BORIrS1QN1^d^dpЭ&&"(AT܍?<$aylA2;6Mq`CRPQllS exR2" 0R<@5b :::4u"5z":br3@@BK ڿUmRc11"@u&2Mu3+ԼB,@"Q(!`a#b!qo+`9R7[d[#uҳB&53@R"3阼E q`_$&F3s͵NSYFO0UAC,@d_"PUi(X''d&E! EQ7 jXq!Ia2ܬوW.Hƒ%Q &5f ~5! E8! U. UHD: # h0>Th]]9 MP MU@˙?@[ bg?@ƒ?@?]P6 ]&A]M  $ 8A] LGD`G t  DG G GG$oJuM` ?.BVj~uJt  &7&t!"Mg7%'tU% 762>[JU 'N((?6 ?A!2b(A@2b4Jte32zGz?)77s@M[3J68b96<2:Be3hAhA5 2`?CopyrigTt (c)@]2`09@M@]c@os@fBRAr@Aa0i@n.@ AUl@ HsBe@e@v@d@@S2aAYeD\3-e3tC tG 6\5@bU9 kVX=W'0URRł6!4aR VZlJ4Uh8&a&a\qA@Q1Y8`e3@omi?@o˟+00f'##Rܗ'/Rb\2JAL)?@}Ɯ?@FGy?@?Ҿi ǒ$Y1iM/JCl;~e*e<8e#K4 @ J? 7ͫ?"p@c <"!r55qE*uzf7?@|ݨ?@ړ]?@,_oTl%rl |}}Ya|tqw(Kx#0.:Ud:Q5>[}W*??@#Y8'&WSyirlE_Ţ|g;||8Q  cGkfɑudpJ?@P"|*u]ad@k!iNŒ|\(|ݙÚw }:xɒ|)3@*ȡyQ5l~eoc@q(Y?@6qﴐ2rA 3zƟZJJ=ay*/AHD H# h4>T]]9 MT !AU@ `h?@ E?@( y?@b|?P6 $]A n JuM{` ?e? $Su.Jt+  SnpteGVBqzE0E>qچۤ7>[JU 'N(( ? & ?Ab(A@0"Sb8$J2zGz?CUUU@MJe&9#ވ"bU#z.;&b8)E&?,J0"E*115 ^"`?CopyrigTt (c)<020H09<0M40c20os,0f:2+1r.0g1a:0i,0n.<0 Al0 28s2eZ0e20v0dz0!E45 e-?3 7 &6iieE9 6XG{'0o bo &%$0 PFdJlz&>UhE=^!Z10! _V, $*$d* Td$VcR( bHD: # h4>T]]9 M 6AU@ù?@C?@V_#?@ zrC?@+>_$6>M JuM` ??u` noD<Ju ,u ,bsu *| -Jz@"JL>Cјղ?@~T]]9 M 0AU@H}^Wm?@,71?@[>(?@Cn9`P B0wR?>$ >M lJuM{` ?E?Ju`bJu ,u ~,bmWu v {-J)yt@e"J>>?@zc?@c$Û?@s?-'7/I%u'A-(?/I Jt+  >Nkt7"te"^_'n3%':4%'~7JJU2zGz?CUUU@MJ76yAZ2bUu3 zii32Wb9;6B13115 02`?CopyrigTt (c)@W20@9@M@]c@os0f BR1r@9Aa @i0n.@ AUlT@ HsXBe,@e@vj@dL@14[13 'N((3FP% 4(d?U3 7F@T%jeE9 LVXW''0A0A0F-!D0 lVZ9lUhzE ,5i 1a^*0^dJid:SYwh>F R e?ijm$*D@@y46$d tNe%7lb+d hc٢4HD: # h0>Th]]9 MP !AU@܄?@?@oEݒ?@fc?P6 $6 >M JuM` ? ;)u*Jt'  sZlta{Jxlmv _=m)>d3>JU2zGz?CUUU@MJA@"bU-#z#!bs!#<"bD)Q+Q&J[ 'N((?& &?A<$(S/t%UB115 `?CopyrigTt (c)802`0980M00c..0os(0f62'1r*0c1a60i(0n.80 Al~0 .8s2eV0ej.0v0dv0!Y$-X 3  7&00b59 FX7='0Ŧ&$a $F8JlUh E ITh]]9 MP MU@\gr?@KeF{2?@0_'LھP6 @M >M   $ 83]L3` t  3 3  3[3tJuM` ?""."B"V"j"~""""u)Jt!  N7F$6t132_y:G#7.7w%5%H=!>}tJU2q0?=@M3J6Aw@]2b8Wb9;6JJ[326C?IF ?PA?IB3AA5 C2`?CopyrigTt (c)@2`09@M@c@os@fBAr@Qa0i@n.@ Al!P Hs%Re@e@v7PdP2AYD3-3C GIF5@AbSU9 VXyW2'0UbIF!dDa VZlTJoAT>Uh<oAoA]`t AVb8`3@X?@ bgk@#O 3G$ ):r2JXq@|KC8 uV_Itu4$9K]bkt)Y?@>W?@t |jdEJC!|v|#t0\|`jՉw?0GuN.#?ɿۿjjt^qt'f7?@?@cVB?@-~̍{U |S r!|ÿAD:|S|v|8O/w#L,3?j~;̈މ Q5?@]}W*??@ׅK0M='T!x\.!|[2jq|jm|Äމ}ƒpJ?@Ggo: xυAf%r1$yA5pt| D(ǓI |!TJψ' 55)n?@dѓ`iDF_ὐ#ǮD"ϑ<)tyϏ6qy-8n|r!t@Ikݟ -?jta~TUdQ ;f |I ([%6qZueos@xk.?@U.foB]ra 3z׳]yqʰ-;bq2"HD H# ih ,>T  9 M#EMUFuj?@&d2?@ƒUһP6  > M    4/ H/ \/ p/ / // / mJuM` /?*>Rfzu#J"bo1FI AcﰿuJt  ?t,2t!72B1>JU2zGz?@Mg3Jw6A@-2b8b9;62 !4JFq3Bh32.F 2A2b9ZK72i<2?*q3AA5 p2`?CopyrigTt (c)@2U0P9@M@c@os@fBAr@$Qa@i@n}.@ Al?PU HsCRePe@vUPd7P_2bBMq3C G1.Fh2!"t%2eUibE9 h5AXzWW'0Ub1b.FE0I VZlJ1T>Uh<11@ H\p A l5#}Qbe81@;X?@ bgP@3 ggq3 6MgW7]4JAq@dgLjv2xT?@&e ?>Ȓ$YOA"4[Dzx=Buگ+Bu#տ(: (-DT!o Ut  %"4nARB"&r55Hu™0PL?@dl2,|?U6XB|$e6XxEt\s綡?@kZO?@-9b?@B(® Ǣt/y1MB|sf| +6l|?MQ ' K +Xx}uƻ?@3W?@tM/JCB|w|RDDk|& mzf7?@ݨ?@cVB?@w̍{?PFk%rB|:|F,r8|?P)̿޿:Q5?@P[}W*??@ׅK0mM='a3urB|(q|̔mn|{iϡϕtZpJ?@pgo: x+y|xuty>ÿ@q@E?DF0܇rzBCWB-u]@`- A&"Ru. @qGvC&$!Q`1(+RA"p2K& P4 0)9u`]u `"[{ b1$u`K4)25ا3 a171`Vis_PRXYcPm!#5L`463B`?Copyr2`gPt (B`) 20`9 M2`ci`o4`ofqbbare`aaq`ic`n  Al` ihsbe*4`ei`v`dR13d458=363Ft66 24B#V30Ur3]w,7^q^PsT<`o m`sq`a>g d(a5;_@AA2hh&B)3*R+Rq q2qqr81acf2+66u\. %WQ11[2B!P@Rȁ&ECT1$@b2ȁ3@ HOU@NOU5_4P&F3s͵NSYFąU@` @Qu+QD,@ a' [cep+&ERT28ABTO޷4J&",-B{R1(BKF'`R`as2`e T`xq`W`bBp2+ 0bLU'A"Z#P) E,"b 䵰2EV2brC*Rr@Qcu8vv_<7"&"V=mQQ+qz2&B3/r?qDbu1r8 ak ]!PY@` SPae`e`lbsVa-` E0u2`pF`e`U&EX);Mr` [Tg`aW``D`vb3^Mr` S}bR"c`uUdT1M`n}f`Ecq`ubnrxUQ]AWPbd Nm0U0O!W P`r<N`x8|)b`1 sbe7uPc9AjeC`u`W Sbi`aڐT/MTLc`ch?/d/v/*/B]l`i`/ڐ]@//O Wdo?頼%D?V?9Nwbk0"acR`;|?SI8`ad`cm?L&OAnsCaOL,tROONWPAm23atfa Ia?Oڐfp__M1`  b 8`aab_t_P҇O_OM`CMO_> oWKCs2m]:QtR F@Q6WoLuMoo=9KoHMa:|| R©ʳyvڐߡuR"w` %aP&2DaQiCr:@|`NENWORKJbHE`PXQR^mRNIXS'Q,:@:@ikRw$}ŔȟUّˇ_'0UʨڐLԈ{Ѽ=1tıɼј5ׅ-MTрan{шѕ:55ȕHɑ ՔauՔnՔ{HՔՔRuՔՔwՔՔ_%Ք%Ք55ՔHD: # h4>T]]9 M /AU@xJU2zGz?)@MJ&߀A@bM?#z5 3#\M(b[)h+)h&J[ "(.w "?& ?AS$Tj/%B115 `?CopyrigTt(c)20[09MG0c.E0os?0fM2>1rA0z1aM0i?0n. Al0 E8s2em0ejE0v0d0@14b-$3 $7Y&?Fiie59 FX1G'0UBŴ&!$0 TcFwJlJ8>Uh9AA 611S!Z&@_vn J4h .x?+zfAQ5@V RVJ!D"H$?@bRX .S 3 bA~Y2f@B |>?@?RT*?@fyת?@6T]]9 M JU@1&d2?@Vj?@҄BP(P6 ]AJuM` ?ju#t  k]It=W(\ZIRUIl">2zGzw?@MJAw@eb #zb](b%)2+)2&J[/.w?& ?A$T4/U%B!!5 g`?CopyrigTt (c)020%090M0c0os 0f21r 0D1a0i 0n.0 Al_0 8sc2e70e0vu0dW0!$-# e'& ?6iie59 6X7{'0UŇ&!$0R -FAJlJ8>UhAA 11!T2 $S<cV{ WR~VJd2L&?@ubX,X ]S fR#mS [ÐڱQE^Fh4__ K&oeX5:f'l6 XYePY?ۭsHMQ5aZjoohY}aUVeEVx_;"rA #dsHD: # h4>T]]9 M qAU@|BP(?@h_4FP6 L>D   ($JuM` ?Agg2DuVrJtS  Ut{_GzU_>JU2mz?3@M3#JC&Aw@b#zw u#b( (+T&J[=# ("?0& ?A$ /%B=#BZ1Z15 `?CopyrigTt (c])020090uM0c0os0If21r01a0i0n.0 WAl0 8s2Ue0e0v0d0+"S1W44#-=#f3 f7&4%/O]FiieP E9 ]FXsG{3'0U&!40R FJlJ  ]@Q@Q ]$ x1Z2A!1(Z=#R&@o.r!0JR LuI!QRQ R f4"JR!@ ?@bx!0LS ؒ_a_Y)))$*aE6njϵZNo`o azh E6eWD"H$?@b¥l rdlIU"q)$#Hy|>?@RT*?@mfyת?@6 C_7 1o(!YpdeR;κdeώ"1_:M "96 rs{ ? }Ra@cBЯwr5%*aa9YPRSR&ersx<1\Y<q&eiCvFhw H^+u(7yf@ Sx S?PWe?nWÑ R0X1yat:2);EGWQjV_&eU2_U"rA 5#+"HD: # h4>T]]9 M )AU@xb JuM` ?+ILKu8J1t5 }tyuJb>tJU2U?R=@MJA@-(b;)RH+H&J["#?& ?A3/T)*B!!5 `?CopyrigTt(wc)20;09M'0c%0o%s0f-21r!0Z1uai0n._ Alu0 %8Usy2eM0e%0v0 dm0!H$-,3 7&K?6i@ie59 6XG"'K0ŝ&!$K0 CFWJlJ]$Uh8 AA ]A* 1DA+ai'Z &@h4Fb J4H .x{GzIAQ,R=Q RSVJ:z]].rɑ_~Z)\(@UE !C@ ?@-bȿ . c 1d`AY"b\(h5;njϵZSoeo a(h5;eD"H$?dl3ril ףp= (d |>?@KRT*?@fyת?@D6Th]]9 MP JU@BP(?@,?@HD0J?@VG?P6  >JuM` ?j uJt  =5UYEt9S̿6EN{4P[Ehoþ'>2N贁N[?<@MJAw@cbWb)++&Jy (<b!?2x% ?AM+bf )(%/F%B!!5 c`?CopY rwigTt j c)(02`09(0M 0c0o%s0f&21r0S1ua&0i0n.(0U w ln0 8sr2UeF0e0v0df0!Y$Ť-# ' 0b59 6X|7G'2WY)6!$a FlJ]Uhz11-Z@Pٶ̚   8gF_K  6RScVJ497YDP7RLSBR}UE:]{^{4\QGYl>x}U5U$V5_YQ}U2_/U#rA cHD: H h0>Th]]9 M JU@,`0?@Z~g_?@0]q?@ ?P6 n>JuM{` ?5 uJt  W;6-Et9Sݘ/$ENaf`~yEhte>2zGz??R?@MJA@abb zt}b() (.+.&J43f;??& f?AHb3"Af .!/4/B115 c`?CopyrigTt (c)>02`09>0M60c40oKs.0f<2-1r00i1a<0i.0n.>0 Al0 48s2e\0e40v0d|01(Y4-3 7&60b59 FX7*G'K0UsBŃ&!$Ia *F>JlJ (>Uh*I %1-!4B Q8PRJ:@$ɣB\ ZS Y?^iQ3uUΛrH? JS #קYUQHD: H h0>Th]]9 M JU@~"7?@:@?@0]q?@M7qO?P6 n>JuM{` ?5 uJt  j߫GUHEt9S.5i#ENaf`~yEh<|>2zGz??R?@MJA@abb zbR( (.+.&J43!f;??& ?AHb3"f .!/4/*B115 c`?CopyrigTt _(c)>02`W09>0M60c40os.0f<2-1r00i1a<0i.0n}.>0 Al0U 48s2e\0e405v0d|01PY4-,3 7&60b59 FX7*G'0UsBŃ&!$a *F>JlJ(>Uh*I %1[4B 8PRJ:@?f2B\ ZS Y^iQ3uUΛr7H? JS #קYUQHD: H h0>Th]]9 M JU@jZ?@_7?@0]q?@M-?]P6 nAJuM{` ?4 uEJt Et9SHENaf_`~yEh`6H>2zGz??K?@MJA@~abb z}b( (.+.&J43Cf;??& ?AHb3"f .!/4/UB115 c`?CopyrigTtV(c)V2`09VM60c40oKs.0f<2-1r00i1a<0i.0n.V Al0 48s2e\0e40v0d|01(Y4-3 7&60b59 FX7*G'K0UsBŃ&!$Ia *F>JlJ (>Uh*I %1-!4B Q8PRJ:@ YxɥB\ ZS Y?^iQ3uUΛrH? JS #קYUQHD: H h0>Th]]9 M JU@fl6?@ ?@0]q?@^J5?P6 n>JuM{` ?5 uJt  YEt9SS_ENaf`~yEhI>2zGz/???@MJA@abbM z}b( (R.+.&J43f;?a?& ?AHb3"f .!/T4/B115 c`?CopyrigTt (c)>02`09>0M60c40os.0f<2-1r00i1a<0i.0n.>0 Al0 48s2e\0e40v0d|01Y4b-3 7A&60bP59 FX7*G'0UsBŔ&!$a *F>JlJ(>Uh*I %1!ҵ4B 8PRJ:@;{áB\ ZS Y^iQ3uUΛrH? JS #קYUQHD: H h0>Th]]9 M JU@^|>?@(=?@0]q?@pJuM{` ?5 uJt  LYEt9S޿ToH$ENaf`~yEhP׭wU>2zGz_???@MJA@abb z.}b( (.+.&J43f;??& ?,AHb3"f .!/4/BB115 c`?CopyrigTt (cu)>02`09>0uM60c40os.0If<2-1r00i1a<0i.0n.>0 WAl0 48s2Ue\0e40v0d|01Y4Ť-3 7&60b59 FX|7*G'0UsB)Ń&!$aI *F>JlJ(>Uh*I %1!4B1 8PRJ:@?ͮB\ ZS Y^iQ3uUrH? JS #קYUQHD: H h8>T ]]9 MT AU@BP(?@xJuM`lW? uJu u bOu X J]-J[Vaqrm"J>JU@'  $ ' ! " /+% W'P#-/a/+ Jt.  sU!'"StA"3"'J\2q ?/&?\.?lA8lBt$N贁Nk?Mi JX6 0@b8+b9;6115 h`?Copy_rigTtk(c)k2U0&@9kM@c@os @fB Ar @EAaP0i @n}.k Al`@U HsdBe8@e@5vv@dX@1 4E[6D?FP?A?92~U3 7F@6%b59 XVX*WxW'M0"UF!DK0 xVZl !Uh\@3 A*iAA !9 T0#JtekdHD: # h4>T]]9 M qAU@|BP(?@I$D"?@?@fl6?P6 LM > D($[JuM`^?Agg2D)uVJtS  Ut(\ªHzG_>JU2$"z?3@M3#JC&A@b#zw u#bK( (+&J[=# ("a?& ?AR$ /%B=#Z1Z15 `?CopyrigTt (c)020090M0c0os0f21r01a0i0n.0 Al0 8s2e0e0v0d0+"S1$W44#-=#f3 f7&4%%/O]Fiie E9 ]FXsG3'0U&!40 FJlJ  ,@Q@Q ]( x1h2A!1(Z=#R&@6.r!0JR L^BO{ _Q0S Rf4"J:]k͐_Z 6%)$&aER!WD"H$?@b!0LRc #̫b_Ys &Hy|>?@ĤRT*?@mfyת?@6&W>>uA` ?"W 4HZlu#vprb #">ts b7>t  xڪ>U2zGz]?@9 a#>q&A@-"; (bJ)+&" $>Fk#2b#y"(6 ""b)T;'"i,"/Nk#11i#`?CopyrwigXt dwc)020090M0c0o%s0f21r0Aua0i0n.0_ Al9@ 8Us=Be@e0vO@d1@Y"i f b% 1Xk#]'0UB(650 Jl> \fUlQQC  + !pj!#A!_(:k#&@IJ0 )VPl!# _K|W$(^H鞜_ZU׮]~U3^.roZ'\l<@iE!S@ ?@@bJ0l# Q`Y1)X o`jϵZƽoo C?g`|X\QiD"H$W?@\r|3zrliMX%W$ |>?@%RT*?@#UX?@73zj?Pz :C3_7I1yq~Y@Perx|eN, R k̃t t c r4h1 pBGYkr 5%QPi'BP()?&{sB %!$QUhQjv߻?@`X,|YwVV!Yu?zOJXlQ*?S1UxPDT1\,(S# ՜SBExKVSFON2Mw>Kzh|AwӛJxXQzM Fw3XT1\| wH YYhw8F3 wM~VU;V\؏օRw(Λx0Z660| KrT Yg/>U +U _wU  H X&} 3 & CX 9 6 Ǘ w Y&(wx0&ȉ0W7DhQ0Xw*78E&w!7,TQ0)&.0J0T+0?n=0b!8HHw@ǭ7;O @wSE#7Y vu6S / S U UU5*FU !"U#$%&'G)*+,-./01234567T9:<=y ?D"H$ @~?@(g( "`*^p?(4wq #5GYk}D C:\Progam FilesMcsfUtOfe 14vsocne:\$03J\ANROT_.VSbw}q"4FXj| D C:\Progam FilesMcsftOfe\ 14vUsocnue:\$03J\OPS]_.VXSyq"4FXj|D C:\Progam FilesMcsftOfe\ 14vUsocnue:\$03J\DTLNER_.7VSdyq"4FXj|D C:\Progam FilesMcsftOfe\ 14vUsocnue:\$03JERIH_.VSdyq"4FXj|D C:\Progam FilesMcsftOfe\ 14vUsocnue:\$03J\NETL C_.7VSdyq"4FXj|D C:\Progam FilesMcsftOfe\ 14vUsocnue:\$03J\NETSY_.VVSyq"4FXj|D C:\Progam FilesMcsftOfe\ 14vUsocnue:\$03J\SERUVR_.VSPy}q"4FXj|؎D C:\Progam FilesMcsfUtOfe 14vsocne:\$03J\RKSVP_.XSV_F(Oq *eدUC!d -/ =AA0B{A sd / [> s >*#Q`8?D"H$ @~@PFDyB ? uhzn,T 4UFY,b'@F% @Fx1#{QR%UVq:#W1"#X1#Y1#Z1#\]^abc fgJ#D#iLA#jD`A#ktA#oUpqtuJ"#vA#wA"#xA#yQ#N&|_&l(&A}e&AA1m&z&As&AA(AM&[&p1T&f1\1{?&L&mE@&!!'~U&&U&&&FQ@JaTahaj"#a#q"#&q#:qgaaamz##q#q#q#&&&&Ϫйѹҹ⪹&&&ҹ# !#$*+-./1289;<=#@F¹ùĹƪֹͪ׹تٹڹ۹ ݹat~#숹##&#:# đΑ # # # #  $S#p"#Ƅ#Ƙ#Ƭ@PHZ$%#&D#'#($#)8ᆧ 3k#4ƈ#5Ɯ#6ư#7̧ܡ桼A#B&d("#D<#EPT4Z[\U^_efhWijlmsJKLNORSUVnx$"T`s #aƐ#bƤTTƸ#dPDn D#p0TD#rX#luvxy~{ƀƂƋƥƦƧƨƩDZɻʻ˪ͱλձ֪ػٻڻܪݻ㱖䱖檻绖軖걖못hec EFUF FF F@ F!F"F#F% F&F, F- F/F0F1F3* F4F: F;BUƘƙƛU!BӶ!B!ƱiZcƆQcƚQƮQcQcƻƼƾ» Żƻ!B1B1B$12OjJшlaJҩƀaJƔaJԩƨa<7V1jJ߈aJ qJ qJ4q7дƜ1䴩gz ĩq ĩƘq4ĩƬqJq7Rĩ1B$1BzJ"J$J8JLGBA aDh8q=ŝҰ xF6A'Et4V>>ÿ@ǵq"`e?!df,VtZW.KߞoYBi&eoo%z5 €måDD9i7{5IWTb1b1k2AC1$t2tl3t_A4tb5t6Tt%27t bCt0)9tU: b;b<$1>16AeabFBXb44 Jr%b(B)⠴QQJ=B&bp '/(>RuT&aak2h8b1jpo$oWgt qŒNy]Zy_Aaak2òr+$ɔl,֔r-bҴh8cTa{5Qr0\ְU%1Lְ&ųЪI'qóВ@ 1jpٴ@k2h80wlտqjp_k4x>al2X3s͵NSY[reUyen,@r&6EAտ@<켫B?~?o6AVc!}AJѱTDIqqB^{%9@eqH_AIUIk5VBcc!lխ}AeEn!4ّqƱVV.V{5";PSrvrOUQkiqtƱFDXF$ܙ.X m%Uz?l3bbOŒb/6Iq3"q 5BSI`OrOƑvp1\e=K.@-1`?=i{՗/xM}`?qBDAW1=+iy'qV DY݅.X1Е߰grU@ Qh@@hl6 @@"H$ @3G-Z&?1E puCQ^STj1F;FEBu]QPH$NBOOmo7l~??/ rqqH?KBbYeEB{MqqUK RKB@ BP(?S{MukH2oM 6#ǒ >.OUq_=[s?؊},y1,cɧUgrBslҏG:_//KqKGӱ4 T9ң e:H5e*<$>c w >t*'o9oVobotooooohM^?@6ə})KEqwm 1CUgyq"y$\/j~E7]+y#$ jy/$F]aviUW}K1UDbEBȏ?&8JhI6T?@6hW)2䛟џ+= >ajrȠc;Q*ǧf8 +.@ߨdv+1 zAִ <!ڵZ]oρńG=!3WJQϑ FAg\nSq ÿտyj9̣hE|[ipbe?@vS?PJW5~_a&_tǁH&BAG$^RDVhzxsmCV?@ߘw ߟ߾ VF0KluNN&BVFV?@'~ &\$NEB>y֭Fl* )/Mp?Uu?-:zvXj!3Q"?QcS/e/w///Oas߅(T?nN`r2N? 98O\OnOOH&O*&EO.TO_!_3_E_Eh_z_______No o'('so2Gp!pq8ad0q CBom0qqi#½/{E%0x<@@KE& @@`4F?@2_R7?ŕ|{t`` j?t@ \eU2?kTS0q夑x=9 bf4qU'p2& ί!Eq'Orj.q\!'](;'!Ж>>ÿ@qXyWpp?xnB|v 0qq$!"× %SV4 `ٕG !!'9k![mKm!q I T/Ԕr!#1$'2' 34BI4AB5Nt6[Ԫu7h 0qCu HCeGa=9r:U 0q;0q<&>Իa?r(AFT0qJ0qqq`)gh(a0qpa aeͻT!!!rxf7JgDuTu_Q@rb!囂+[ ,hB-uBdtxCTaC]u`%uL!T)0!a)0A2@@rea A@ra8 Q_t| 23F3s͵NSYёE XE ,@²ra&t nTAA?tt~jZ?~? #d񼡦q!*[kx!"eruGa u/n(ѨaA'uS$^+eS$a^+S$^+ S$^+ES$^+ES$q^+tS$!8p'!AP*&xυPSrv2B}t_0OMPF%htזaAeu&iE߿a4e94C͙E}vr//+/w'AQ]? owT?{#CՙG^Wnwg]X xMhndj ?@FE+GeU2,DVh$4WO?@/?PX ?ډdQE^hN@]XtxeS^qO_,_q xl`?x7sQWbΐ.P8UNC>$mUo@-Bp?=i{Kޅ$1s$1c=?)%Il%1E'ADuoehOa,Hy1,Ж5eͷEUb`OooWKqP?Gӱa_~H[T9Wi]xV_^7H5e__ft<c >}3 -?QcM^κ?@6ə҇@)a ҢƏ؏ 2DR4tfE7M`*ys*5X^ef$F`e8mkefUWa-UDA$1J\Ayͯ߯cF6T@?@~P6h0BTfxҿ[:ajrrX6OQףf8Wi]ke=\S1+7I1EFCB 6QIa|~HڵZO OO(pj?OOOQ _mUl$K_﨟_Sϥ_%rg_op8oJo\onkAC$1!fx1[mߑj9̣`E|[i@pbe?@vS@5I5?PJW^q]o#b/R) @T>A=FZ6x aaQ$~1/-o///? [CV?@:H ?@7WLJV#UPv??bgw-?$VV(<?@"qۇ_PSv^_pWeQ&85GYQ!B @*?n+DT,E-DE.E/@!~/@) -?/X&7ن\hň%\׈鏊RQ 4=Xg{q?];t?@֣Q@m#y@w?@&`c]?@\u{7`#$%@ ךpU?@.ӄT}K0WBThzOqOO |7p#9(G?@9ABBQ0Z}@㋀,P_xt[QLA=#߯EU'N7䤙)A!ymQ?@?-@S8 F?@7fd?P& lHpR@8@P t?P)Oy F !ɑ¯ԯ[o"9߀z^a?@Tw%# 4P>Pb-:r<`"4S)E'^&{VS֯?@%~&ԦQU>@O]p?N"=X\,!>J\nj߲H  83'9K]o@U!/|[ߜ?@>֝^.w.j"ϡ/?M_EO J //A?(?:?L?^?p????Q"?? OO0OH"dO(//OB//5, ___oo耻6oJUemodoooooUeo!3EWi{w/.//pGAE+M^LWozC qSIEErMō`萏9@@9EMQS@@ -%?@H] ֯h?RZGA?AHAuA`&Z?juz@HAt  εt±ܲ7bK"Sε׷_u3ε'^稰!5LԾ@6Bs2Z0U"…DvA!c4Wb|iņ-[ɔeeDFZ??@̧"&JJqAu;O MO_OqOF#՚q<וC S!5\N`g-ޝ@]GkZBs1PaiALW A_,Ø-Instace i"mѴ󘂠fԔXGU'2Aq7u1ѿuъ %B+1ϯ)f}-U@y~M >-G@?)UesQc u䫳Ѹ Su:AOaѳ,w9җ6HZl~ϐϢϴ߰)i{ߍߟ߱CUgA?w?OcO?O+?OO3OP/ܢDer,T'9K]oG|$ Z2/Ł !D bŁNaM5:3E?m1Y1e5%فL&d@@gl@@`4F?@2R7?jt%uy{&iٹ N/ Ѿa/ [;8$=L).Z8 ek!'Zv2,w% 8-\%0Oڢ.UF0hՅ0v>>ÿ@UτmS ΆuFXjqX__Z8Z89u_0&ˑ͟md21aG+`Z8I20T11ڢ:1:3qR!!b3$ 4Y25b6r7Ȥ QCdդb8(9Vc!: Q; fmQ<!>#@"!QFJBeFaJBdQb)~_rqѱQbRJQqUO LzLpdi1!jw%HT211\Ə9gq׼]yL_atHw"qR+b,Ă-Y2;;b,TaP\p%uj `21Ŷ@.i1@m1k%w%!\@1фqU}8e\_v2v3F3s͵NSYM}`l Y5,@"(.e; Knh׿3 QAQAetD!jZ?~ ?}o!ˡ q&Ӕetq@G%Vye?klÀw%qUeˡe Y5e eFaCФ21HGHM5mSTSPrvcm1j Ueq ketooh6i?Qli9v-yAzv3.5{@2g}?_9HÇ$p]??$9%T{#C'qU%Fwg(e%Mhndj ?@FE+tr8ÁU2,Dʖ$4WO?@/?P ?Nk"(.hN@ǽ(e/*(!e:"qia_){//; xlq0x7>8Q>b.p.508U;@s>=E1qUQ5_N@-@=i{իO5eyeVdCC]mթe$2LI5 Eň,y1,0vS#2zGzt?aye-(%c:"o[/G71OCO7Kq_0Gӱc/;?T9ҷ9-&U?.7H5Ǯe>?Ob6aD<cg >]_._K_W_i_{____\M^?@6)Ɗ_0⮅xoo*i+P:oLo^opoooooooxDpOub6~E7q0*yCwz;88>՝ub6/$Fq0u56uWEfUW}_0-UDӖe7BS!#5GYk}\F6T?@ 6hW䞵ŧҏ*o,>Pbt6:ajrԕ8P+Uf89=Ͽeaد%7CUc{zڵZ// 0ńx%/7/I/km/Uڴ?F//?Jrg1?g?p ???I ]&B ߸4Xj$j9̣q0E|[ipbe?@vS^g?PJWڴ׸=243fk~ط%ŏ@׸Er_&x X!94k! O+cCV?@:H ?@7WLJ65j>53@?G Q/76֟6(<?@"qR&JRJRQa o 8?",v\hx?%\xݓڵA(4"1o(q?];t?@mom#i@w?@&`c]?@\u{Pȿڶ>ۮ@ ךpU?@.ӄT}qr 2DV&?zJ?\?|7P#9(G?@9ABBoZKՎ VӋ?:"?? OOD(ϙoN7OԤ)AymQ?@?- S8 F?@7fםd?P& lH:t@8@脇0 t?P)Oy޾ F DϗϽh 6cDo"9pz^a?@4w% 0o}u4۱1#߳2Vwl*DVWo xN&EFS֯?@%~&wpkl?>ؿT@O]~PN"ܳ8 ف1C`l~kϏ?j߲? 83Ÿ);$ 1CE寛l[?@>֝wj"/8?MOd6*O;W (݆n]3SD26HZl˟"au#P.}̥ D7pLx)U@r?@,?@HD0J?@VGՒ՛ .@l}Ϗϡau4".@ﭯdѯ㯚I$D"fOl6x\BrxSBTf2DVh/'uG);M_߆G,`0PZ~g_?@0]q?@ _ 55u??,?>?P?b?t???7~"7~p:@6M7qO8C?? Oנ,O>O[OgOyOOНOOOpZ?@_78M-Ic __1_JO_a_QTa_______n@ 8^J58As4oFoXoKuooa@ oooooo ^|>?@(=`6pD6nԬ???-,?f]$r?QR[1YVWXYZi_zTX>8e_A/ZAYAB ʑ$ٽߑBvV8іqug dk0ՇdQkdQkʕd1k$dQkd>kdQkbdQy{JdQy{ɵdQy{|BgQy{sedQyw+'ʑ6/6RзIRGut`N 6PZ|0ՈBƸ!tYMI_F iyDKa/ѵ) Ŕ@dʑO'0UxH!2A&AK`6 OzA8O _6j|H jDB6OG@!3Hꢒ)H@ė!kDQc@@QtK`` g;6s?tОË((뙻#2Z1g p߶ a2!`r26`!|3ր2$6H+Vvmy<< m1CUgy҉40(:L^p$6HZl~//V/h/z////// ??.?@7ߨ6r3?0?դ?/???O"O4OFOXOjO|OOOOOOOOO?/B_T]h_z_^]}~eQ^Y\х䇴_TX{ oo1oCoUogoyo^QotQoxQo|QoQQ7QXy\њQQh14OXjP2 EY߿5G,SX[ ҹ')Mqh1ѫ끾_ɿK{,< w=0 bؓqؕsMq%:UT`r+a/i7=Qߘ}r\bw2UjP?.ӭ4ba5._f@#5n5"` 0ʐ>׵IZb P4#?ώRA^!ǷQ.gQ%NZl`~Ư@1UT@Vj?@҄BP(% 2D hzPW˿ݿe\nπ7*U 5U>TF3JAeVOu3$F3K/]/"44uB"uP////6??(?@AQgOzOO1OOO,_PI$D"efl6%s6OHO_//+/=/ ooa/goyoo///ooofOV Fq4FX(vw 1uUӣ,`0pZ~g_?@0]q?@ W$6d5p?ᙃ@{43AfoӏSWȟڟ'9K]o~"7-:@dM7qO XÙǯٯ!3EWiOXKa¿Կ Ͻ|!Z?@_7‘M-^pςϔϦ$ߊY@R{Mr~ߐߴ߽|u@ ȑ?^J5-?Qcu H5Z7'9K]o}^|>?@(=ГpRV&ɯd2?0485A5787|&xQe_O/'oYKcs!Uo .odU{a9Gu\ntq$=D1$Q/-8!uu4vw,aA8! !M7U@+JR @@(?ʏ@@d=5@wT1?&VQts-8R!RAӭq8"Xʵ]U40@@JE& @) !p!Sg0@z3MMbSoeo "p"1.dfooneq (uS@2?@qp~+wU715L0"ac}1{vzw Br8"d}58 tUr?YYYF s #8"] "a@J0Q`sAnpect]BWpigt~EU0v8!ك@EKlDBuaAO`b# H*+Qc3awbuNq5qRY@updl@n` v*uFqq``Pptez@nFw*EQ338#OL$BA !srUr8&^q1%!8!8!! ---%"2T]0`57T3R]0|@69P8b6Xނ !MQ!$+%:L11$18"ɤ"֤Ur}2 a a2 A2(L@:N?'%3 %3BlUu3PRC@}5VC5 K#A˻5ӿpX}"(3!T8! a a8"(sAT8!8 DCUp` w/?2Bz8-P,d*FU!Y}X5>QH="H`5T0 Q Q8"c"pUr}}2؊/ҥ2AڤAe@2ܾ0MR@U@-ԅ1Y(9I"Ҷ"$$%->PTvk-}5 /A5[mA5 1 HZld~m4Uq"$UuI !*8".@9"(1 q3103\5G%TUpv%(}X<#Ad}%?jOT\w?%_ A:MQQ#U/# h%‘Uq}5 0r4(4!q^#YȎ$AV_q<(Uu%V?݇(}5-&NSAqOTq_d?7_I_[_m____t|> 8`9rP(ko9?@fu#XE_`q oi6qE:oLcpMhFoOONooroOomB?@?mEl+=OܟsuqsVVVR%7ImoǏُ!3EWi{cUvǟ_*/3EWi ïկ 4/>tB.M(hz¿Կ .RdvψJ^P{HUd@ ߃/ASew߉߭߿acυϩmxKM_qߓߧ߹57I[Uy1CUgyaG a///A/S/e/w//%I/. @)/'EiW/#?^3?E7rS?w???????BT@ ?@ Z%@@1W@a^<HW%1OC@rJO\I<& F%+JR @@(?ʏG_H&O//CqE&6?%? _2_2N#@ BWtY_k_}__UrS666Y\oo,o>oPobos\{jIlAb il#rOooo 0BTfxsrsfBUo8J\noڏ쏀"4FXj@?&d2np娟̟ޟ&8J\noȯڣ?q+4!3gX A?X+S Koɿۿ#5GϓۯC5G}ſ׿J-1CugyϋF:3'9]o! r%7r]oe)?PMN&bp<_G0! @0~͑!=je0Yĉ3!()5- F%$wn?@q_c?w?PixRt2``͑ Ϝ)*QtEФL)kv3Q1̅QcIo;"!'0WFF7L3UXBKQcŁMu y_ch""!1u15fÿ@iF[r؉%?$ȉ&Jg xUCB/y<<ff b;0O@Ȍd#S?``K F@lQlBVD` Q?2EX@>-48eHo?ljo֠i;̂)=D#*JD +WDRyaya3aa~D~ae76/bu\FM%{ uLFRg1{ 2ao`#UA$"A \ hN\U_;S1ׄmmF3s͵NSYF+`=\c?,@&{[@_ԏ.{d/t/nJ`2 8oJo\onoh8ShUpm@u;o=o;e3E;@E@ޒME#aa:R0vò1R~1~13]]BAA|7ф p8 ބE[[F:S;`mz>,{Q#}UU.FcŬVݐxOOq5OAOSOeOwOOOOtڍqc?@QqG?@iy؀wO __/_oS_e_z_____9oKo]ooo&oooooRooo"{^:L^pޏpS7?@)@cF bVBΏfi$6HZl~9cꑫ#5GY" *Ђnဨɯۯȣ #oW (*<]6S"ڿ17oϐϢϴ,q + Y=Oc@OS=|Av?@)jNJ%s(zL&=ub~5V#H-5? @G xaC8MP>&Vjõx&|8vJL,f?? G!:2['%B/T*d7Ir!iuѩٚAe!PヮF2$?@m)İ?@9yT UU,>PbAjAI /./6O@Z/ FVN?V l/ OUojWOiO{OOI e@2AWOOOOe__AQ/_A_S_1ݧ_U@b_9r x(j?@tGFgPB,8CEwxI5 4$>D{C?h5?ez2%H`=&4'U>N`0O6ona4$>!~?(`rO̿޿"Ig!J8J\e_w___[2,?@O'^t?@ZOD?@ t%}%(%k(8g/ v./!~rh "YR?$WVP&8\n0Gq/ /2/D/[GXoN$v&]O3(5S//////??(?:?/[Z?!~l(Or3n???????OO&O8OJOlahOzOOOOOOOOЈ[L&OP²Bv&VV;YsM___q________|o~p|OЮ9oKo]oooooooooooj-Vq$MYk}xm@ 5|t(m,'#\q&(:L^pƏA0H&8J\nkџ  20DVhy>Ft(Mg7qs(WӤ̯ޯ&8J\Q|A? < ʿܿ$6H@Zl~ŬъϜ Ů,A1Ѯ 4(U@$#?@qc?q߃ߕB҄NC㌀y<<A~3 ?}9bb[z㜰;S9u^`` FlwCoor[zۜ` 2.K4;q~ ~ 38M) UK珵ZV1C@OS\^Av?@)j֓(".]ti'A5I B?@J2Г$oq?>?@m6fI/Bˑ128V ҢC_7!ܤ'Iz~!-A>5EMQSē(>2V`ȓ(!oi'?Q{(!2 Pi'"%P/b*AG7 !m$KHBT;uo{"%ɝ4ǣAIE /t)2@@$=IO?@~E)01e &c2 [ts `^Q m5;j7?t@YcߦfeUgƖfeog4dGf`js%ctEQ'7 mB7!Gf|f vevFr YdM@igEQ/ffÿ@iFruC?tt2G&o;A5PjD bPw"4FXgp$Aĉ EH:KI;X!nRd@vbAACbQ] @xN#SHgNYǠ%H5E\!f?@?Ǹ1nH6^>HEKr_?@xR̍ѐ4au@"p ?@ F?@ъ޸?P{ 8C_7uAuEG?ߣGNE&0w7ƍH&%1n3KþHI%E6X,%An?gOғ$IEwě7GI_P( HV@!`XVHEOJne5&BE%19KIOOX{SA ?@?q6BMUQA^3BP(КO?@i|@&q]QgU~@|4/=@f?]h!΂??/////J.8FSA8"4WC/=I?U=g}5kA[AN_I?< 8L\\82NOUnH"zkwUjP.ÐL#?cUdaۀp_zu}`` Fg l|pCElEzp` -AUqqpp A&8JT\nm BI&]"bObPzl#}̾MXuᗬc U'(IQ4$D QUGOYKXRџh4O_^\V9U @sU)KC΢].]oP_m%h Zy_o2' ڀh!gӏ1 2DJYkʟܟ2(?ʏ0`b?@Q`L^pJϦPcͯ߯ ¿yӿ ϥ-?Qcu˱ϟ@1N\GQG?@dCn]&Qnn.]onY]^~?2!?SewIq=Q4@RdTa\A41 ITe8'gU@μ`Ar XSXU .F(2zG/z?/Bev Xh%n (}ESq-?Qc//wg9) M$\MNqNql/~//UQQ//?2-Z%&v!f?@Ƹ1n/>(?CvKdmծw? @aV/=aV7>h% 准BP(Q؏d\JߥRTڗߔ6BӃQf?? U4jUwuz???O#O5OGOYOkOaOOQOOOd!T!h)eU7_?@׿`8H0w0%:\e___S=%CO"_ćy\7ǟ@!^ߏ=^)"DnA0gT?1C 0D2WI[msҡү䯀ah!ph--8U@ph fp8?@$=X~kPD,8C$ם$؜iGPR܀ @ 6?@x<ŠW[ ƅ-dsn)EoWobqbbuAoooof%o"+!9)q$J'qy)%?/~,>PGmߑOtt߽T!߀OO_"_QF_ _XzM2qX#ŀ____oo%o7oASmgoooo3r7By!̴ !uᅨu}ϡOi`gFSuf*ҵ(֏L ‡]mOjZї_߅ę a}_I%΀%O۟,5Pbt"u (ia5%EM/F46 t nh?@:~#?@!U_j:KT///#! =??.:UD43fyPbt@>M`HK݃L㺿̿޿&8J\nπϒ϶ ў?91/4FXj|ߎߠ߲ߖx(Q'HTfx@y$@8tXK} #}P_:#+=Oas'96)Wўx#5Guuybt` /Qa?@2{jp*KW7ٯ]SS/e/w////////??+?=?O?a?s??Y?ўT>pƔ???OO'O9O@KO]OoOOOzOOA OOO X",W,qzyQ"/YOQ;QP_~~?@)?ʏ. …@˅___RLXf}f;U-sPj9oKo]bvpRbbbPzVwwavSQup`` FJplh`C]oh`orqwk` ڟ2땀ߴݴ{!dzy; C\js$×eiyd8| oE[" pQ p oon%P( o ?Z%+v!f?@Ƹ1nPbuc&U,uKr?+@M²̲tLX0qumQ?@B @ @ ?@U)8V C_7`e,;U,uG;},,u&Ӏv7P^FKҾPӡ4͆z $&QӀ`Oғ$Pۡ(z=ԇ&ߥ  K]oy{|3SNp,8F X\n߽ܧU@xw휏5_5>[obA!xͺ}ASSqIx a%U7=Q蓩} %Ux!zGz?En涧hncUUv{[|s,`w~y<<p?.Re b#2%PA_,`u`` %F l@1l "$` .2%p@ph-D!'9K]/B/T%Z" /0w$a,/a%S^1 eM<}?<: @ (~-?hѩq_k}ߏߡ߳ 1&d2?@Vj?@҄pxo#5GYk@}&n/?/$ @?4 DVhz;( a -?Q ||h4WFxExER//,/UHs (\/n/'S@/? ?2?S2b?8 A????Dwk(р!O3O\A JO\OnO7x餉eMYBZOU@{?@,?@HD0J?@VG+uu_b_t_(сooq+Ta#.@Q{QEw B@Ҿ?@?<ӃڇP,W@TBk1 U'…˅ …˄SqBW:!TS__%Wbn?U0buxo o2oDo9{bRaU@oooa1(т?Hq6BTOOOOKI$D"Hf'l6xt(_:_L_R_____o HoZo5{oGYk}ˢΐӯ=x(0߱g}"еEU@,ߍ`0hZ~g_?@0]q?@ ŀ'嗍?:!е43fy`)ۿ0Xj~ߐߴ~"7:@@ M7OqO0BTfxE:LCOasJjZ?@_7?M-!3EWi{vnh! /ASeJ@ ^J50]3//&/8/J/\/n/e//!/////? ?I^|>?@(= ?p$&ňvbn/@$ hQhQCZlc`-[dUJ**U****Q"?8**e(nehqQ-Eq*WVf L1@A7Mu4!;uU4M;-4!K u4!KE4<1K2u4H1KZ41K+41Ke41K 4 1K71K54!Gkaga qvao`@vmeVeʉ4R'utO0~va4ІBQ_Z-_VBPZF%)iGo Mu 1ed; qW_'0Ubň*!f,DGMq V8!(s 1MudMq oood- om ja!ǏY1U17!ψ!3rgf/Hy b W Xb$R A!BTAq}ߏ5߻ UU@|h4F ԁ5ma&HETjEk (S@R)1AV ^l~@(;ARASQ qdA`orjdE܋YfU@?@,?@HD0J?@VG+7E@Eg^Qv??1???$1hQ'{]19A1RO{?@Љ<4&jTrWP,W'@4!Dh'ZUcU ZUcTCoA5{OX$pkCpqs//0""uP////ÚBVB`)?;?M?(kƁOOA1OOO-?QB_uI$D"fOl6 [OmO_//,/>/P/b/.o@o/~ooo///q?oou:ё0Blq Yk}(w]1y Au5Ut,ߍ`0Z~g_?@0]q?@ %7IcH5T?e43f)oD:ђ)(:L^p~"7:@ˉM7qOȨ}Ùگ"4FXj|]1ۿ /A|jZ?@_7¶M-9ϧϹ@%7I߯ewߡrߣߵ|@ ȶ^JO5ȨRdvm5!3]L^p}^|>?@(=$p~Pwew1 QEL#Ba)cR1RrvvHzr5 Ur?AF 1#"]2"a@JY@8Q`mAnecutBW"ig:MtEav!٘@RC\YBI&Y@bCB@NFO!vXaXba5UR@ud@n2H9vOYbF``ԏPt&e@n8O %Q3:3#OL$ՒQ5!sÒrÒzr8r} (%1!!10=j--8"2T0pW57i3g0@W697P8w6m5!rQ 14 ;8l^:111"Iޤ5"zr2 "EaEa2$f,:28ѱa@-L`0@Р<5%HW2XXzuHPb}@5HfX5 K8f}:5pX75(3!T!/a/a"8AT-! YC P` }/'?"B8APَ @K>SQ]R7]u5TY@0Q0Q"x5"օzrԢ2؟ "T2Aڹf@:20rR@jԺ@R1Y89^7綕79#D$5%0BSeyvB5 % 2DV5pf:5"4F]o "o~m4 Q,7zu ^5!"S@5"8΁01c 313qa55%T P%(X<3Q}%?jOT\7w?J_QɡrQQ3U ?3}%q Q5d@E 8!Qn#Cn$Af,0BY!D1zu%Vz?685I=%6#CQ_*W18_?\_n______T)qP$=IO?@G6%@5?IˊGh|E@o(`1/oAiaFkE@@4KҾd!eFoO _scDeKV7_J_RsC@+ >Pbtu1s fg2s{P"4FXjbPoϏ);M_q1Ó?)bןz 1CUgyӯ -?z^WC(o?~ƿؿ 2DhzόϞK_5;&՞@ @C?d دDVhzߌߞ߰ "vϚ+߾$6Zl`ߨoDVhJ .R4Fj|//0/1J/\/ 1///////I ?>?M6?rsF5:(a?O&$~?@ ;?$?@~?@%#?Prt` 6v!2tj,2 a885'=85A7dg##)vQŅ+nł!15f6ł14 U@:PfpqU=91gB!amhOxCy<<A++@9'ŀbfgb`"tOm7B"ACySd `` F`lqlbd` jQ_2e}@cCxU-U?@UG?@b4ɓK*UU6ơ o.@CcłIqIqO\d11rRdXDQDDQ"Td!/3X>!ekZV?@'IOғoo7k <)~e|> }>]owexVp@ohX~#'E~.r/AkP( "! o%%&o_!-(11HX)\"*iBv"b,ג.f8_`WA " #F3s͵NSOYFPD~ "e2226`opM 6TR`_l³ `aѠ!3?%FJ/!!QV0UV0V0V0e"0Χԭ""/P[@H&챨!Q5˰Հ"a`CP+% PC\RudV0B7A+ ɓL%m=(1ZÔ%NPTW^`RK3@A- i`3PӢ̹XXG '2Kqм)F%!De m0'qV0 Ӣ+=Iw !0W]@ohWPD5Wsѫ_QF4? 2@'#$"4)1oV-Եv%!Ѥ%+鰕)_{/?14 7d`1CU&%%11/+!"a1j!7AYga[GQ@@1S?@O$,qٻDh8{Z rGg?@;mx>@ۿ9h7 E(>5~%?@V?@4W e`/HC-Z?@F?@P?L߯Q/֕ UI?@Y?@섰a?@@\ K/"U@\n?@:?@3pDٳx?P+k= 2`!U5rP%3> C4?@4?Pu>5 w 5mY*?@uӧ?@?@* ?P% u1E"euf $0l?@|/"/M͐!@ŧ78E [13nٶ7g+O+6Y.(v0Tߕ+L!@k zO~EAr 0K(0F0zkdOjpT꞊j\pmB).~r_'6?@v9"0'K ;(f_)~C;Wpb]/?@J_5@jlJ?@Ho co_?QE o ŀ2ţt*(Xy/<<'"sֽքy?;q3Su`` FUХl CrzŐ` T2e߇݇őbbz(Y=K 7=3$EYQ`thud/ȭMd e*uӧ?@l0| Oh&i?@:J@?@* ?P$ ?1Eh&i\9o C?@Q( BPEP >5 w h*e UI Y?@3 Dٳx?Ptk= `h9H -Z?@)${P\?;X}\Oh9~%?@V@F0L߯_h94Z rGg?@4W eh_X"U@\>@I(16o蝩 /AS?@AjЁwQu0o{sŚ'a0BTfx,&9K]oCf֩?@1Qd6(B@˵0,/kP]?@8{#΅h\* YP-hP~E ?@_pLViwM-/6%Kx?@ oi?@PuȏT/i-kbܓk"uTy f =yǃ?˯2?6l_0'1㸠Z=ص ɇllp?5Jx@&y_ ~@j{ Y=6w+4 1?@NȸO>A{?@l!%\@_^ nO69+2 jd?@oGy=o =M2VϽO69z^r`Zf5U?@F/`_`f Pǎ _Vd6ENju/-gPRrg n^˞h\9f]OK4n룠?@{׀ BiϪ_6Il) uRuF?@B5r#`n$Xp _6;KG6P} yFa%?@O?"HoV@7?d>@M'0hooaooo);M@`0 \0<~ @9{vlȋ *~d2L&G0\sVR\s ws` a$6HZl~ "q(R~@$uNH?@~HYbאztHt0BTfxI}^`wo" Y?@]l o 5?yh Ѭ:9/m?@ڼtɤ_i?@0>mĭ/ZhE" ߣXE]K2yx#.b+?@Q Oi0#d 0K&?/eG0gk̛L "Cg|pv?5Y,0xd> 9{eD=90,"f~PXetpv!Jx8Ob> W?@8+0ybOѦH c $j #?@?BY0ϱOѦ>j @-}JӦ?@զ.k]7a%_T-w< `?@*O7N$ٌZ]l : gk?@۫?Ն|^tߞ_~Ѧ_g\+P˚yP0ŵ?@!tr_ѦOt(?@.EF?@A ԝ֦i"abOtOf.5x[@?O8NpO_JN.r4_F_SǾ;~-__ge@9h E(>~%?@V?@4W e0EH߸H-Z?@F?@PL߯Ve UI?@Y?@a?@@\ KU\sQ?@:?@3@Dٳx?P+k= 2`ZU B *CC4?@4?Pu>5 w UmY*?@uӧ?@?@* ?P% u1Euuf$l?@| "/M͐&ŧ78E [13nٶ7g00Y.(vT+L&ɇk  EAr K(F0z@kd  j@Tja@mB).~r/, 6?@v9''K;(k/a H;W@b]/?@J/@jlJ?@H ?¤Ğa զ?dDVJ#?ߨߺwa4Dr/v!jP''C¦¦Iס?Sru_`` FZliCrrz`k` I_2ZU|Q|QaqbbbP1zT=O «R[$"OTREmE¤4zk~H 5o/oAoSoleoo?@#rooheooodoVhx<hA2DVhzgB᫏Ϗ -j{C^`мΟ?kp?@'IOғ#51K]oЯ{@Lϯo4ϳ1`?*<N(G}(Mqt?@Q( B P >5 w 8  UIY?@3Dٳx?Ptk= `8 H-Z?@)${0\䵇;X}a8 ~%?@VFL߯/8~ 4Z rGg?@4W em/('@\>@I(1;?Qÿ1"4FXϚ?@?Aj?wQutƂCşϱ' /A2_ew߽߭ߗ_ 0[BTfxtCf֩?@?1Qd(B@˺,/k ]?@8{#Ί8\*)Uh ~E ?@_pLVnpwM2%Kx?@ oi?@P~zT/9kb~ܓk"puTy k?=y?7~l_'1pZ=غɇll@ ߞJx&yp_ j㡀) 6w+4 1?@jNȽ$CA{?@l!%\_^ s9+2%jd?@oGy=t=M2[9z^w0Zf5U?@F/`_`k?P/~d6ENjz/-ُg Rrln_^8\>~~k-OK4pn룠?@{׀Bi/Il.uRuF?@B5r(0n$Xp /;KG; } yFa%?@O"M?n@7?d>@?M58??1a??? OO.O@ORO@`0 a<#@>FqȐOOOOOOO_ _M_D_V_h_z_____S__oo&e)FBH 6I Txy X0*Ozp`ǝ?!N\ ?O/(S ^/6+: A9e1f.]~F[Pq?R9"ͦpZݯ/[/KKbĶv./R@ %{8=ޱԿő0 d2L&LaCpV㗇RމaC|CϥϷ#5._Yk}߱ߡ_ܛ1 [Wő~@$uܠNMb?@~H^cgcוJ DYE5GYk}I}^0wo" Y?@]qo5?~8 Ѭ:>m?@ڼtɤ_i?@0>mIJZhE"ߣXpE]Kܠ2yx# b+?@Q Oiܠ0#dP+/eLgk̛Lܠ "Cg@{Y,xd>p 9ÀeDy 9,"f~ Xey@v!Jx=Ïb> W?@8+y񨌟gH h$j #?@?B^۟>j -}JӦ?@զ.k]7a*/-w<`?@*O7N)}_-l :p gk?@۫p?Ն|^y/~_g\+ ˚~ 0ŵ?@!tr/Ot(?@.EF?@A Ԣ֦i"fA?e~@|^f Z t0 d2h粵8A\q<ԦD14??#54 Glea1iIMQ޲rd ;?@8.G?@?@eQts-8GR`A)qBRx@ ?@ Z%o@)bp1@@z3pQ|ߔapE_`.ߠ4e5Deþ0#sxs?@ qJHp\ kUx\3R1BR`P2U?1F BP(?1%C]bzUJQB`6Qn(ectVRW,ig0t\UQ]t_SR]P_TR]aoNQ5 U%RVPu(dJP!nP4j|y_,S)Ry?@ж-l ?@F:\]?@?Rz! ن 2С9Kk)@@$='IO0|ԠNUƂAT!}?@H4GYk}桡***P,>PbtbP!3EWi{IG);M_q&//%/7/I.j4Fa/M*0L&`-//////??&?8?J?n?@??6?.??OP D @Ei2*ONO`OrOOOOOOOO__&_,_?_5O_ O?.O@O"odOvOOjoOOOoO__y_ N_`_r_T___o_o8o_\o>Poto(:Tf}Ÿԟ(㳏w@|}0?Sצ$"+b# +0BTfx,>5Wfo,oPbt x< ptV+=Oas//'/8b/t//P,qhC?ۦ/r///??(?:?L?^?p????ooonoOR/$O/HO/\///O/?g?O!6m>! 0BTfx|d$f |~%7Bfxҟ~fl@@;μ3@@P4[¿@H?ΥM F@ 29K+5@i{Əԏϯ )=@ UQQHZl~ (ѿ+=Oasϯϗϩѯ'9K]o߁ߥ߷ݻT'9K]o#5GYvnq]P}"J3՗'9K]ρyPb|oBB?>::^p//$/6/OOOO///2?4Fz?|?I/O/0/B/dOf/x///~E/,9?O?/2?__(_L_^_p______ O_mo&oMLo^opoooooTonQC 4LV=X_1+^Ni|O8 b"eR5׵uG2O_]d0'wt @@ P@@ڋe?d?P DyoQ|=u`gPof=Cwwtq `̏@utDNrjۛY(@uIwЙ_ы@ucwEnrPH wL˵2s0TTVAɷ[4T͙I[m DqA >CJEۅlBwdi&5_G_YRf_M^&иCT8TUVS֯?@%~& Կ>Nk"TGXaT)/do5Te^Addq=9LɟiQZV@@̺+@@h@@_3 w q-qMvssfss 21 sKAv_`s`+`9`Gl`HW`Xjfj wj;1xvSOyPc3zjw1cPUU#R7'KK_C ssT'%  ')9O 7ccTwH7zz?ΊM-9iFA3ru$es#񰠂agӼ&A[XzbA1A t &*2ahdݡ$DZ&QuA0Gz"DNuDNDNeDGDd[Dd[DiAHN3FNDqAHNuAHN@AHd['DN@}AH!d[HNH1!d[D_d[&Hsd[Dd[AHd[Hd[AH[ݡHd[Ah[Dd[@Ah d[IEDu~[hv~h;[AAhO[=Ahc[ҥDw[ǴE[PEDWMpv[!vBvOAv\AcviQvv?QvM񰐑cvQvQđa1 vё&aޑha!av1!c_vsv, v9sscvSv`vm 6vWcvO vġcTԍ%G?mG QAuAA4@?<gUiA4mHliAUT]I͛1RT*J@@x^?@;~ׄ?@}SBy躵Q?ts-8R2J1AnA2ʘL&d@@gl@)ppq.fC6@@z3"06HpJ<ǢuϚ/8E䱳yெ~5?@qάԵ4,?bS^R^^҅E`Uҕ?YYYF BP(?]a@JA`Co}nectwrW @igtz"࣭Ajց1ف '@lZYAdb2z @b#c +H4FaNAU5Rudnڬ +FAT`UPteyn֕Ab 5rUQA33?7 2$A/fӑ{ґ8҉1U?:,_s@au*1)IT0c577350I6U98E6;/qAqDjW'*H/mA2/8QR++rRRaaR&a&a2a/@`4F?n /ܠRb?%8Pֲ&QUԶ&U̅eU(qUpp`R/ A/12Tς/1f]Zu8` w?BQU=LyZ@ !rQA /@R@u /a942F4/S48`4QRm4rR|2RAڇ4RЖ22ܡ4Rݮ4b޻4b$4b4b4r9,fEcE?1/?Gm!O3O7NOOrOOQUOOOOrUO__$_>_P_b_t_c______oou.o@oRoPm48/R8Շ //摪/oi$ W!`3?qB*q/T@nf8Yq(`PbtĜfl@@;μ3@@9!·@?Կߟ/+awvϧЯ񓧿@i)  2߿Vb ߗߩ'cK]ρ#5GYk}ֻ#5GY} ~%@YOO2!kO}L/OpoOOOoooZ$6HZl~_}zm$6HZlve{?#]ߏV~R+Q6Zl~Ɵt|>?@Ķ-l ?@_Qx?@bAjK&Q-?o./SM?@Em%ޯDãYEXr?@ ? 7Z1dvncuSvփb?@l2˟ݟ~%a2EUgFa{ïկ0 b9K]oɿۿ#5GaQew寛ϭϿ߇=O߀as߅ߗߩ߻@? (ʵԵ/z /ASew+fxB߮,>Pbth?z??N?*(G k} /1CUl//X?-2?QV?u7????? OO.O@OROdOvOOOsbaOO4=aOO_#_5_G_Y_iw_^6a!\1_6f?G0J略_G\Gad b_A'o9oWpaosoo|?oWa|>?@Ķ-l ?7t?@~5U`@S׻?Prt@`a ggiRttR&UoẀUWވBsa0m`ㅃ!VvǰHfDztU!p!%de=@Yr_Ae#̡دoePj{cZYǰJXCbP{LS^t`` Fl l` 2@XsHb^BgXUH_@Ua(?@b4ɓKU$YV'BP]/BOX?T51uSQDz@OۄWbZQZQpphRqqBT/47SI^@8kZV?@?'IOғ@RdWe8OP<䖟8%SFP}>pܟf8xՐG+k U @ShzGĞ.r 8, "qQ 8%d_AA-WbV1V1+0!!)۴B*贙s,# 0$_!óa"#F3s͵NSYF 1 [ P ̡iQ;>bDbJf %тmWaѵ&b޵@Wb)@_A[7U@UOS_A$===e0GS UO.wp~`H&kw@wWewt!/w0wV1&aa1@a=sԪ% CruAdmDaC%NwTW݀RK`Bu@im&aS/KwHXgX'2qRf%!)t(K x%vqs O:Ba?s?=H(!WFP6'x%7Vs  We6Pk8 RgojR //#e vkDH!^B|IDaߞMSsEQDův /Yb=_O_rQ/7dL&a7a4sUSnE7a7aKA'dDap`AaQHWagXs!Q@1S?@O$,qts8T{Z?@ rGg?@;mx>@9hYW E(>IHWe\E~%?@V?@4W eFOIH/\EHğ-Z?@F?@PL߯OIHU\E UI?@Y?@섰a?@@\ K$_IHu\E\?@:?@3!Dٳx?P+k= 2`X\E⇒#S@C4?@4?Pu>5 w X\EmY*?@u?@?@* ?P% u?1EX\Euf~P$N`l?@|s_X\E"/M`ŧ78E~J0 [13n80ٶ7gۯoX6nY.(v`T+L`k oXpYEAr `K(`F0z>A0kd@0NJG`Yj!T꞊jېmB)~t@.~rXY6?@v9P'KWP;(IY;Wb]/?@J:XE\E9N@jlJ?@HဉTv$.%% 5??-hzI??'?9?K?]<t?v'+V7ֆHjP'.?a Su_`` Fl0iCzyrzۖk` ȯ2٥DbbbP1zAei8=(  (.5A$7juKO%O*Ǜ7Tҿ6!?@Qr*!";>!>!ZDaK!jqqW5ȓ_qȏڏ"b4!Щu*_ruAuX_dY,~_hoi=_loeoDoH^uӧ?@l8`|ǑoAci4?@:`?@* ?P$ 1EK!ci\@s?@Q( BpeuP >5 w ge UIKPY?@39PDٳx?Ptk= `(AciHě@-Z?@)${\䵇;X}o,Aci~%?@VpFv`L߯0A~ci4Zp@ rGg?@4W ek}gejN@\>@I(1Һ!-?ky_k}?@?Aj9&?wQu~`/0/B/T/f.'x///////?@?M'9eYewџ@`0 P<9P#@!3EWi{̯ïկ /AS ]oS;Ϳ߿'9K,T'n-k}G2,}X < ffU:Ϗ Z[;9C5 `϶Ou4sDeSF_jod2L&PEV㗇R ?P/$/6/H/Z/l/~///////0? ?D<W?i?{??5UX~@$u[N̲?@~oHݳh4TŴ?????O O2ODOVEI}^wonO f" Y?@]@o@5? Ѭ:O fm?@ڼtxɤ_i?@0>m1 _ fZhE"-PߣXdE]K[2yx#(e^b+?@Q Oi[0#d̏Pߪ_ f/ePgk̛L[ " Cg_ eY,,`xd>v 9eDBSm9,`,"f~UXev!JxoBb> W?@8+,`y o fH @$j #?@?BPZ5 f>j Vp-}JӦ?@զ.k]7a f-w}l : gk?@۫vК?Ն|^"~ f_g\+C˚p0ŵ?@!trq fOt(?@.EF?@A !֦i" f@|^f Z d2h4GBj)&;fQMY7-ya4aenπȝ] ;g"f&#jtm` g?"tz#&%-%'8Ka ,n#.#1*@6u]N<#m4Fcue2B Y]sO+@PfU\^y<<rG ܠ6&O ?ux0ίN7Uõ.@UG>b4ɓK^ST,`gSΗBP;2PgE(dOppjȍ_;bH;!TSa:1:1]NdmAOd2Q6dRCd;⠡*"T]d/Se$SX.7lbmyekZV?@'IOғoo7yk<ooye}>/yexpolh;K~# *%~.ryk@gĖ\2~N__S*!-脒2,+($;)*"*) 6(8P0@3FR4c5S_,bІF3sNSY#FB;Ra*%"RaR a(T!|226m]_ MˏPz],2*%`ax%% * 4 e;0;;2/׸@&0YHVUEûŸIû5MûոQûո1÷@W끊UPCRud!UNCPTW`RK0A\iPՌbHX7U'2KqSмXFU!4ibK ֘UbqaF!F!ՐbHi!|YpU 欓WјUx&? h[PU]ؘ5D\sё@xE_WTK:2I7I?[::2M_UñYañ/F!Y[UaCuR)D_{~//1Cux4Tx1uCUUx1x1@+*!av]R$QbH>!X~@1S?@O$,qXDh8~{Z rGg?@;mx>@9hҚ' E(>5~%?@V?@4W eŇ`Cu~H-Z?@F?@PL/ UI?@Y?@섰a?@@\ Ke/؊u\.?@:?@3bpDٳx?P+k= 2`C(bdP# C4?@4?Pu>5 w ([mY*?@uӧ?@?@* ?P% u1E(Tuf\ $Տ0l?@|Ǵ/(ȅ"/M͐0ߧ78E [13nyٶ7g?(w>Y.(vH0Tߕ+L0k ?O(~)EAr H0K(H0F0zYpkdO)jbpT꞊jpmB).~rO()6?@v9 'K ;(+_R~);W`b]/?@J{_(Dz@jlJ?@H_Zñ EvYfo#$ ߻oDVhz ѵhtlxYlpy/<<@'c>PZZIyo?S2u`` FJlCr[zۅ` 2+<<ㅑ\bbW1zQHDEʧ=i= =Zio$EPoHjrTfu-uZԋ:>xeݏl%b?@!rYYk}Ɵ؟oQ(?Qcukx<ڭѯ}a(:k}!ɿۿ*;s^ϸB 1Xj|ώϠk02?@'IOғϭa /ASߐ;p dߡ/dva 1io(gD= moL 2D~w`|t}(*<amKI?`&?`U@ K#?`Ugcms хhq>|ihqhqoe AAAIQQU0nuXȓcW_U ooVQ_~cUb4!P?Щ_ )e_Uk_I/VeOEu_/Ua$9mNO/UaO#I~O6?Ve?5/O?Ve >u?@ly0|?X]9u?@:@?@* ?P$ 1EX9\4 WC?@Q( B@EP >5 w Xը5 UI Y?@3z Dٳx?Ptk= `Xi9H-Z?@)${P\;X}!OXm9~%?@V]@Fͷ0?L߯]_Xq94Z rGg?@4W e-_X5@\>@?I(1_bnaZ?@Ajz~FwQu04@s_q'%7I}mW ga&8J\n$Cf֩?@1Qdƻ6(B@˾z0,/kP]?@8{#Jh?\*I-hSP~E ?@_pLV.?wMY/6%Kx?@ oi?@P:T/IiL-kbUܓk"~}uTy + =y?Ӑ/6l_~ '1}Z=z ɇllEpG?5JxT@߼&y֐_ T@j@I=6w+4L 1?@*0N}?A{?@l!%\T@_^ ̿3O69+2jd?@oGy=4 ?=M2ςO~69z^7`Zf5U?@F/`_`+ PjO6d6ENj:/-ُg~KPRr, n^Jh\+]OK4֐n룠?@{׀Bio_6IluRuF?@B5rPn$Xp W߾_6;KG@} yFa%?@O"Æ o6@7?d>@M\hhozoaSoooooo@`0 !0F[P1 P߾O~tR9րpZZ_[/KKbĶv߫_@? %;]ibtQQ08)M1ʓ|>°d2ǙL& 0!sVRI!s h Ϭ:L6m?@߼tﹰɤ_i?@0?>mrM/~L6ZhE"n X߽E]K2yx#R˯.b+?@Q Oi0#d /L6/e 0gk̛L "MCgAp`;?M5Y,m0xd> 9@eD=9m0,"fPXe9pv!Jx?b> W?@8+m0y٨L'OL6H ( $j #?@?B0vOL6>j @-}JӦ?@զ.kU]7aOL6-w< `?@*OIoNɌ~]l :V gk?@ܷ۫Ն|^9c_L6_g\+~P˚>P0ŵ?@!tr߲_L6Ot(?@.EF?@A b֦i"&oL6%@|^f Z 4` ?d2hu8Gd`ܔjagdcd!omqEiE1ojE5!AvG#O@@쯠UsܼnU{t`` -߆?t_`3{{{Ru LU_ujP5qɏۋ'd`e` ~@q( hUvJ!a [a X%G@iq@%uw1wmWP/qd`EITd`/R$LLd`1 l$ޢ3$"I4$"5$E26$27$2C$bM1$M1B9$B:$RI;$nR<4R>488pF<4_JJV4q(c4W)p4@Q!jaqiMޥ2)qeEX7' XqL&6 Dbpۤa/E(e:-T.DU!N O „g*PL= >)_Ckf5bs5+WTޢ,dT--qT|kYTa"p4`%d`uLp^d`ad0UeN j@RQ[8lޥ!N _cG3F3s͵NSY?15b-[1j|W,@‚rvQkkonxPO%/_I;VktMfdEjZ? ¶SSǪ;$̪eĸPf @ewבqHCOqZoEOqZOqZޥOqZ-OqZ|OqZkOqdДP:2&tQ瑺N SprvB2^'DoN?E۸:eõO*?@GAؒ8ebE!9qM?@?~?PTr ?Nk"ⱬeþ2?.۸kÿպFDSXeEHAƩ}˹y@!Μ8oEoΚw M?@>ev?@<ڲ|>cSo}@y0Lu~б{ZqoAZ/aa{ h@3$M4se5h:5J s 2e#2zGzt?"oAoH:eFaf#X}aeygOaǿiGdB}?41߿~իM 26`ǏW\ϟ#T&\$ >&q:LQiuf<I?@'Ş4Q.ǎ$6H+PXj|7Ԏ߻#&<ψ YV:e#__F2?)eR"?}-S"i &Zqq5ASew,ߒQ ݦ?@saY_ ͍EE/H&/8/J/\/n//////T?nQb%.^I?UGτH{<;N?_ZZq?OO%O1OCOaOsJYYO>P1_3 0*_<_NU鈨tf@CUg__Uh__dM?#=oZNϬϧ;Ap<>l{"Dƛoom oooRO vOOB ·z- ۞J}F ?@f |?PJW)tx=P?43fܨypQZ't_wCUx}B+߄q,ʞ!+;%7IC~!,[?@M@V+JƼź~ĥ>үy?Sr^ϵoU/ӛr?@Y܎_?@~ Gp!tj˝'j=G?hp8uhpo.ϙ 'oV@8Jw/w~b%ȭ^'k>#suw:|x;4w;;@1O_Fkt%.]?b?@.r $hF?@ȟl ??L ?N8jpv녜~AFO>!ԓ?@J!0q0{E=_9T?K oI6 ?@p@?@6g?@@ROF\>@? <a?@I3_h*/?@oJnAGD$o@&?8?g#oooP?t92$׏HbtuH$ғNcMb H?@OH?Ze5Bc Or@BTgExOaŁ~/o// E}8QRI P-VHGYB?џ+=OaOxb?@hH(u*!O9V_߂HZҿҏ()Gqw*<ϒrԝCU%^<@uʯܯhd! cuX10j=@K.@Rdv#5GYk@!}BTV] 41=FR3 o4~q4ѹq./SM?@@@-WG\@#GΔv@׬EQts-8R? !1ӄA"8QOART*J@@x^?w) p9 й.{(W @+z3"R(_:_F pSX{Vg_z_Č^c3ţC@ FqPe\gE|$!u'),"HVQC2TbPf)Pfrh йp2Ub?PF BP(?!3] aТJ1`Con`ect`rW`igtzۊУf يАKGQ\ReIa&b$pbPGz rf^!&8wtaLReNa5aoR`u `d`n`jfmY#ReFaFP`UP`t`e`5nfDrmU/Q4 (U H#1Xcsmbs@"q6o9m Ї5,@ U1U1'-"T0UP573026908'6r"AL G!<"IfǨmmAAѤϔܔ<8@ ;&_@Pf@?sྭϿPB-F %-<蘯p@bHBfi#T HAA"83T_Pd H Ru` '+c ?B(APt!/i8J޷!ղ  (%"I(f5BĂOp^ iġ`x<܃"Bݐޝߪ"ġ!,<89HŗE鿚$f O0TfՀߒߤ- 2DVpԥ Nߏ]9nmܿ*i!?" pNf8~qࡨ! !3! 3!0%\fiTkq@Hx0"5!<A9m%?jOT\w??A!"A-΁$---@??S7NC?U4A6^4zm4x5U5F+~ƃ7?7G!?y/ OO0OBOTOfOxODV:?@>ZP@-x 06,5O@G!OI?fl@@;μ3@ TpSX6_??#S+SX6??__#3PN)_oo$oHe(G!TcFFFwoloooooo y%rpRBt_[m!3EWiG!sfB*Ϗ);_q˟ݟY"a<0[9-?Qcuϯ);M^O垈}'ۦп*Pb$`G! yG!2DVhz:9?#6M򣖆/U4/QK/]$Rh////////v?4j9ѿ@A'?F?X0R_?q?9n@@#9r??? //p3%X&D/W/5OGO@p1nOOOO!ERC -6-6-6O __-_?_Q_c_u_____ O__oo)o;oMo_oqoooooooRcLVJ2_=Oas)o'9K]o8?2]lΏ(:L^poCoʟܖ/BwJ|&B-Qcuϯ) /߃ş8˿ ϟ1C%gyگmӯ |QcuWߙϱ;_ASw+=aWiݯa+iC},wnbaV 1CU./SM?@@@PN~G̿@2 a@  @@`/0 0(m/R#3%//@ Y///?~_%5a13T?f<Py???????:)@bP-OF/8OJO\OnOOOOOOOOO_"_4_FSaPS6d_v_O______ooO͙!壸66G9Xf}A/AQQ߽Wr̀ϘϪ$q>Ԑ8-O]?@N"ܰF >_Nk"GxtQc`zg{9  +ՙQ + 5ѽ@@Y@@@@/W6 F(u5,E&:bNb!vM _!Dc{/@|`&&cu0`dp&d&::NNbbvv&&&&bc6'55??J{h^A6e#c udes x vuqqeA 7@~ t9aa,5qV7@ad^AU\єG5DK~e DaKUDNUDKuDd!GUD:V[ eDaV[DeDNrGND^AH^NH|N~D\NjAHV[rG]NGvV[DV[GV[DV[XuDV[VŦDEV[AHK*! iWV[EDT1G8aPAƠ7@q7@q Wqc7@q9a7@ {a7@Q1W$ W1aW>EWKQWXQ1:WrFd!WrWQW1WA"W!BȮapJNtN!\՜[D\d1"u'WU4 QdFI 15Ѷ\.@@hXwB?@>~@vDPbt @@H\r?@]7@?>8Ә*6x< @@~?- z^IK H ůצ!@?QS&J6VewѿC+=σasυϩϻ߀'9K]6q˶˶ߓ ,OP _t?6ˢ?|ԅ2c:L^p6HZ>w{rwA?sKCzd(:L^pOOOO$/DH/l/N/Y?.@RV?v??/*/?_*_<_N_`_____#?__q_ oxM0oBoTofoxooo8on?D9oF!^m T^2I[tf_7x@@d4 ?@[s]@p,31eDV]onehwe~D шv YǛ#5c@v+ӀN`rŸԟ .@RdЯ*P? wnbNgK3pˉ߭߿+=OsPݜ?HFxB oV>,Pbt( ??.?/ $DVlY/.@RV/v///<@?R?v?????O??O*O?@-l ?@ xO@^( hEo` o{JE `H\rgZSaXb_t_Qrp-/xV__^sCӿ@YH{[0sSfffr&3PFXj|\I4WbP#d*K /ASewωϛϭߴnBTEk7Z`ԊB,>PbthzNr(G k}߂߳ 1CUlX-2QVu7 //z/@/R/d///sAi//3i//?"?4?F?X?Iv?>'s픅?e@eh\?.?uODv"O/FOXOjO|OOOOd~?@ ;?@8,=@: r3[THC5_Pv_+Y(625x@@h4_ YW`U6_??SXF?O_ob:3@u<(o:oLo^oۏPevcFFFol!ooo 1CUy_rp|t_%7I[mvvRӏA -?Qcuϟ) H$P@֙čvЯ*NruO¿ԲVxϼ(:L^pϔϦϸ6UZy~ߝ_q"  ?QczϫϽ;Mfd߈߹߾PQ*uN`rӡ#ӡI[mQ9;ZC:6;^-(/lK/ߢb/t$///////?QT*J@@x^?@;~ׄ?@}SBJ8]?o0v?9OL&d@@?gl@6?E&'O/./ C=R4o&[/n/LO^OJ0~5օOOOO8EࡔCD6D6D6^ \YYY3_E_W_i_{__TWcaV!SUb#_+bO__o o2oDoVohozoooooook ssVa20_TfxPo,>Pbt`4F??Rb? |ӏ -?QcuFojoϟ/i1w=a)ͯ߯'9K-?ퟓŸۿ ̟ޟ5dv}ϬЯ1*N`ryߨӿYkϏ5 YCU.MnM0B~xLCZ3AVIYZ $6HZlfl@@;μ3@@9!·@Z /%0J%:/9Ki////,@i?) //??_<5ZH3aaak?}???????? OIO1OCOH/gOyOOOOOOOO __-_?_Q_c]ZwS6~__?____ oo-o?omOcouooooooon~ j0Y<1CUgyfO-?Qcn(oԏ .@Rdv>Pb$*N-QcϏ8);qڿ˟ݟ>r L3b${ύϟo /Atewwqߝwq /M~_veK{#]ߢrqw/ASew|>?@-l ?@{?@SQGb*Єq ]?@?Qȗ[E`3qd%!?@p#5?YqeP/$\!*I!1d!b Pi#M,#]%////////?!?3?E?W?i?{?0q3??(/??OO&O8OJO\O/OOOOOOOO^?5(_:ZN_`_r________oo/8oJo\onoAS\NoooOo'9K]oϯ#BoGmokoLoooŏooW ,>PUt(:Qpuz܏>_);M_q˿ݿQ wQ0BTfxϊϜ8Q-k!KsS2RI[QfԿߜ߮#~?@ ; Pa?@4 #1DVQ]olv|NDŋ{G '@+#VBU3E~?@ҬWl~/Q+E++ -?Qcu $!3EWi{QJH//;/M/_/q/////'//??%?7?I?[?m>??q:a%?????OO)O;OMO_OqO OOOOx3@>__*_/N_`_r________oo&o,oOoOoOO __"A_S__j______Vo +o=oOoTsoooooҏoo9>P?tΟ(:KAUgԍKAïկ)kAZ~\k5-lGUDSA XAPǿٿu@%I|>?@Ķ-l ?@m?@_~m?P t@` wqwqǁ0 tu1& Z5]5 )?Bsā4 C% A/㟶>A/㳶⧦3U(NX%=<)r XA!l-xdϜ Pj662'bP+6So3z@`` FzldSl` =52e@@K@ӑ3WB=ǜ83U@Ua(?@b4ɓKϻտ!9KTTBOX" BR䩢00щT9B,9/飿鮫x,1EkZV?@'IOғ'YkxRF0A.r+='$|w ߐE|u"_#-$"B+>RI)X*e9BrFB/%,T"9/q&U _DS_wF3s͵NSYF[/)/,D  ޲궬uIl>=NI[BhnXA-?2XArAAII$IeAe6D0k-W WeAH|v1E4rA;E4A;41;-41;U417}In '|uu CHud tV"k|uNT]WZRK/BiDI T!-9 u|xXT'2qP)%|u!Ŀ1G[Q FVqp p%|x1_ c}{|w$a0eY5@HoZo0kno h0ecoo*߶õ1 11 pk?>5 ^nj%i-ݯ u-״O3IױU!ױױo}AQR3O=WNJvܕ@1S?@O$,qٷĐ8{Z rGg?@;mx>@9h E(>Ƙٕ~%?@V?@4W eƘٕHp-Z?@F?@PL߯RƘٕ UI?@Y?@섰a?@@\ aKƘٕ\j?@:?@3Dٳx?P+k= 2`ƘD%ٕ&?C4?@4?Pu>5 ?w Qj%ٕmY*?@uӧ?@?@* ?P% u1EQٕuf$˰l?@?|Qٕ"/M͐~"ŧ78Eǀ [13n?ٶ7g,QUY.(v~T+L"k {QAEAr K(~F0zkdǗQj~T꞊jXmB)?.~rQQ6?@v9#'K~Ԡ;(gp;Wb]/?@JQٕ@jlJ?@HT1A}At~_i`@RF__ȏڌoe+H"VR&wSjP'#zsVVQ?pS+Bu_`` F.Pl iC@Ar0zp` E2V0@ˀx!bbbPcz@A/oc=ys %yw[ /$ŌxRod}ei+Hvz+=Oa?@rU//8lR/d/3{/////x<=/ ?.?@?R?d?v?/;cs??? ??OO)OfOw0ZOO~O\OOOOOkl?@'IOғ __1_G_Y_k_}___eH?__k\&8J4(瀕y4$8o\ne( 0BTfxaۀJI&@J%# a ܋ zca;Iq6х`#5oOo1ȓ>eEWߟb4!~ЩE4>Pէ߅uYկ`ƩΟ $M_ɺrkFuӧ?@l|虑๱R?@:K?@* ?P$ 1Eq\:!pî?@?Q( BP >5 w  e UIȠY?@3Dٳx?Ptk= `襑H߸-Z?@)${\;X}]詑๳~%?@VF~L߯譑4Z rGg?@4W ei`#u@\>@I(17rh▔l1doox+ u CQ4w$|CQqaO}]n@@#9rstիǿ@WGp?@TuQts-8RӔԘfxw@@-{vI @)apNp@z34bp.3W"CVhh_u@ټdrdpqRTwHx@J0,,Kr٦2Ul?YYYF_P(?a]bJ`~Yb#윏+/RӀa(N5UR\udPnzƍrF"`p/UPte^nƍQܳ Qa4_2I_2Kr@;䀐,@[ap[a?Tw-Tx"HrT057@3@0`69*8@6 @"a@qt8O)njTxH>0QmD䂻zDbDKrǔDYA YA2QѻDDܲԘ0@v?<7AeEztVLKuDP࿀KDKQD E|[?\ܵD_p>.OeUMBsT䁻䂪Ԙ_sTPo41{uKp` ?w?r!1!_!onC0dd b$dKr tt2'rQ2tBfArBLt+p[r ftkurti܁܁.Ԙ9341uV14uo0q ew;2/I[m5ϏQ 9K]oܵr֟m-toUM3 a bj!Q1=40xd %a_`3 @11ѥ eU2TW!C41((XGNQ?)?;?M?_8g،w=#>3BTOfOmjѼ??GqjP±j2,AHv1v1NR0-T1:T1ee$3TTђ++WrG!G!JҏS7T8T %$)dr:Tqr;T~r@hhrr>Tb²a1OO@O= $ c`ȡmաȡggNgggggodh ggeGtQe+QRۿBZ'!'єqHqjՑta{5ta{Ata{%ta{ᅑtq{1tq{$ta{ѕta{ta{Jta{Rwa{tawf±`½ SQ% R\utpHBGP"b9KIiTj6d%Iy X/'0U Rg!K0Qj2qqjǦ!oқձß_P_2$E.xY!ay_ 20¾?@Љ?@?(=Ep3@//+/=/O/Ej/|/!F///ojFX ?xb&d2? DEEHX?? O#.Gpr?į֯E%AZ%7I(>k_ 1D./Qd1ODoqXOdMbaT4(3TT U%WUI^az@  @@`0 @@l@1?@F1~?6vQts-8R1bPaӔ b_gytwn@@-{vI @W)2p^.|SDP@z3LrpwWDWi3Qvs5730`69*`86 1 AAD9ϭKɠH PaQQo2{Ě2 TǕ/RZZ"qqWRѼ2rPbl8^@t:?=5ŨVdcPr/U1ԣvWUB1 ck2u}@Pe1p?x/ϝJ8Y7STo1qqo2 l83TQo1=o0ST` w?NR!ߝJUKTl0o2䒚2T /R "(WR32r$`BPbMB.`\&g1)`v2"7Q! l89=2T5<0/UJ\n%WU 2u:L^pPemTTTJV1$o2>E2l8k>QE-y Xn37QHP3KU!5VTXD.=(T1o5(|X>ÿ̇Gs݈GtV74r&U!1 5q{'Oڹ "87}U$G?+ !"o'2Dvn!`x//I fTΑ/ׄ,Q,ST2O4ukk>I627?NCLBR8Yq)"*! ":s ";ei1i1r>HH JJ2ǡǡEAJ۔Y5]1)ց>b?aQBA"B?P/BM(O߸!aa9ځ>"1N}LiZ_>}a}am+UOһu|>Jz5:4u\[%uL+@q+@+@ϩfT3%ߩ8O$_Óa_bF3s͵NSYđ`}uQ,@rrvٲϗ[UN%aa9a5tӁc;jZ??~?zA0000|00P4"30e\=) Li=1"GY \!@H6A3MXmMXBMXuMXMX2MXa5MѽAĭ431=r5SrvVчHEBIOO6Y6~9 V`Y`Zi132%_7?eOkE.@-0=i{"?%XѐXD# 3 U$q,ń%3C=ѓ??????OO(O:LM^κ?@6ə}^)Er%up0s}O-UDJX_oqoooooooo:LF6Tf?@U6hWI[O!:ajrK_xz+Uf8.)$6XOaM~ ѺکUڵZewdUpH/<_X|"goop /!/3/& /A૟eϟ៛πj9̣E|[iO@pbe?@vS?PJW֔[30=M43fwM[557A!\q$ptON%B8?a>z>JՔ/l~~pSpCV?@:H ?@7LJ&% >+=OasA٘V&Mn &(<?@"qۇ@w?@&`c]?@\u{x0?QnR@ pU?@.T}a'|7F@#9(G?@9ABBZµͳm=/O*"]/o///Aْ(]=$6GN7ƴ)AbmymQ?@?-S8 F?@7fםd?P& lHm@8@ t?P)Oy޾ F +4 q"4F|m a_o"9 `z^a?@ $w% r! }hBeJW!#*<ϩ`͉ng}o0oO20}Rs$>&]t,6S֯?@%~&Rt>O@O]F@N"@*(M Pq!ߺmS~ej߲9~t y<*<N`r-5\\[ߜ?@>/֝Ohjz?M/e/*Pq! ++=aQ"q]$ //x/O~-7d/_#5eP????OX'O`FAZOlO@DOOOOOO__)_CHF_X_j_|_____,o_hzmQoIG-!p!8{axdJ qaZomJ!u[) d UuaFAa -$ey9@  @@`0 @@x2I+ ,FB-'b4´xnCTaU6un`%JuLp%Ja a@eЖ@`pDDe >5N5_pg_nF3s͵NSYF@FEe.V,@#&DŐJpaae۳TPp11px/T_@>1$6xN 7Y17I1+=amSDp͵>T1pտbb@MM>28!8!1$FB<$311nCD 11eccӤ S*pp!ppk9 p p p p /'0UYr)Je!t!n鍲p A1j!8\0um@tOasс }jg$p70i(ݕbh!3 ywzvy)ʃr brBwkt$2D |@@AcQ1XDǒTs,c}1}1!1q?'h!H,xxw=0Y5!XQ1oXԈ!!-!U«.2aࢻ+_su 0B{5َm`rȄϖϨʐOӶOIX4b,аbPzP{.a1aXa x!  {syQ1i{@Aȯگ"1b4@Vj?@҄BP(Ĩʱ崨\nȿڿ);M_Ϲ<]%YIymٱA}ߏߡ߀ QADA0UJ\UxqgU@|h4Fñ"%ӱ$%"4@F6GUV m up# (GݕS2 5GYkhqA  1 ;QA]oQ!QzU,i/U@?@,?@HD0J?@VGEEO/a/s/DsQO)OIA@OROdO!4 amA@IAO!"3 (?@?<& dgP,W@!^h#U'Ue UdQE_'s%$ …?&?2=2u`G?Y?k?}?r;Rw@???j{g=_O_oQAo_{___+I$D"+(f'l6˱譃O_o///////?oo9?1C??nq?uD'  n(?@?(=Yp? Tu@!l?x?S'XA4<GnXA)jZ9V:?@>Z@@nn=#?@]_W?d֗em+Kt `Q m6)o ?tcxeg|OegXO֡`drQnA<uaTq|P'u0v:rCuaExcEYcm_jBBQr$vkХuvѹvRv!Q'|P:FkLRO2.My9_@EpuQS!{e/%ffÿ@iFor؛?+FZ9pAy<a 2nOd4FXD!!c%3Q EE_Q@@J@@@ @%@A@9 gr9@@e4Q5#a] iA!Q)VmXA|P<ba3 yE(%,I0\CE8\%\kE\%H\5L\5P\beT\BeX\4\15A10==+:TR\utc+BeTb}0a`iA.a4CE0I((XCD/'0U2Œ[|P!T@; >6CD\CATRY1r]CEi/?A?"jiYK!AАfCiEjJK@UOAUR 1wE|PHQŠwb!za܉_b.t^^XIt^#)ҌXotUcy?@qM6ьX^ЌXtUh?@DTbЕ` "p?@ӻ?@[ ]iݶ`{ 9C_7MQtUfv/X݅׍W5tU\(\4S=OMIo͖?saPР_u_`W` F&0l;C l 1zP` X.€MMUPPAA%7I[ʐZUBI &10z Y.Ϲ10hW񔼚  8,EaD ~ae_[Rh\0u i]Sf^m~f͉d48޿PXϺТm]ooG}?7yCmv$ '14A n͟@ߟX*GSew~Ѧ]W@r/`p!3EWi{óʿܿϑϣZl /_/1/ Ϝ/ 4FXj|;ߠ߲]^X}?@~D](FX-~]X~PmۢOiPbt1a1=OasѦprce jR-?Qcu6 (ݸS#RX/j/|//""/$U?0?B?T?|x5 4]??1Ra ,p%q??ORe)# cy?@FM6"CMAcO HDC?MEavjt3}_? fOMfN5E—!FuNRea嗖g5Ҙ^"?Pe?ڃ e' _(_:_L_^_p______Q܁_oo'eA139eEZero?@w '?@,%?@"wŤo .sx53c2>oG@Npu&k֙CMALJs|kߢŏOl~#5G0Yk2Wҿ $h1,DAPA==sU@:ߌ @ 0?@(|FPF,8Ckgkh!fbmD@ BP(?@?x<E*wEhtbr/ ru|3b-59K ]f1x5$7N$B6b`O O1CUgy 1$6'o9oKo]oo㉋[+ȚhBӬh3ûooo8*<N`r|%'1CUo5[;A!TE);A*Ə؅T; o)u{ݒB%͝rޕ5F,–𒥨KŊyjҘw7Y´`foh55ǯ o1/C/U/g/p////$// 1??(?1(#:p5?U@?@Npj#?@DWH2??9?OrA<%OT43f wOpZ"O4OY' 2Y_{9o/Eͬco 9-Q/ 0Bv/fxˏ*Pbtq9~D/e//(/@:/L/^/p////!////? ?2?D?acOr?@!x;5=c??????? OO0OBOTOfOxOOOOOiOvО2__,_>_P_b_t________aA o$o6oHh],ᡟva]jipponn=#?@]Wi0@o{rϔF#v֑xy<<L^prbbz@@Su߀`` uFlpCoporpz@` 2T僚q3ט44tv ~oz$KQuǤt8pgҙ]) pq p )GX)Tjcy?@bM6ыve-Wh?ʋĐgʱ b"p{048 hPXϺ?P8V ӢC_7sug-f?v/Xvgڕ-"%nϔm~~6=xׂM_qAʂPFIHA vD/7$HSDӓEdVl⛸nʁ530?܁\s w6kXBh ݡ??bq????O O2ODOVF sྭ1B>?@?/ RYPOOOOOO__$_6_HSO_a_s___oo(o__俶oKo]ooo!ϥiooooq%7I[mF9h?@wG־r[Ǡ[y@`BPՏ  /ASe9!BŸԟ敘k5U@>/^[CY~ʯܯ摡L՝ⲯ("DW (=OaSݿ4:rϩeIǀq | !!,>pPbv]VϨvݛ?@PpmpE?@[^e%eCUgyŃ79/m//O@p*/vf?f?si?o'eozOOOOI[eP2W __-_D?_~#W_i_Q___(,wyD(D_аD?@t ?@'@Y>*xPJ,8CELEG04@ BP(&@x_-?Q$$k}ѝ% 1@@oRoy-Qcuu_Ŭȯگ|endفʹe%oK/]%=oas/rbo_/?&>ȵ?>Q>c5m4Рu>}?}?v J2vH=6~D-7UN1zW8[MT9@a=Du> g@LOOȿڿO"4Fd%`rϒAϛϭʈ(c c7~?@gh?@{gߨ?eN/!/`&>/P/p!g/y///////I$FVZH+В8;O 8ӆү8bc/?A?S?e?w????????OO+O=OOOaO^iOg=.UOOOOO__'_9_K_]_o_W'__Q_____o"o4o@7cł?@E 8E\8{oooooooo /ASewg$*`&d2pΡɯ;cBt`ہ t(?cBtP֓ yҗ a_T/*xKXs5fÿ@iFspU`^pU671OO?.v4пҦ*B'6<?DM_5 _PZCB쫿\8&>G5D=!y<<AOSOeBhB@sOOOOOFh[U?$vf0Pg!'$J88)T_1@?j 2*+Ԃ++8ԟ(1(1-R__29Ta?Q6!um\5%uL5?P$Q)k @?oD 5 +N5_?ӒwѢF3s͵NSYFk*cAV,@ o_5pd|TP??X/KTJJ@++=OaXyMǧǯ٪VhKm@?.]0"T?!. 2x x+qb1<bggB3V_2--G2IIlrёїr7 rOQOQR9cR:;A<BA>"{32nY;;E!|-?.?/?8R?0?)3?4?5?6?7,/'U1(2?,?e\4Q#-)QUk_}_]K#"&QP1H"&1?4!;4!;e4!;+4!;qe41;e41;a%4!;E4!;Au4!;lu4!;u4!;u4!7Hhtq$@/PVUфRCut0J.@RYp 11Qʌނ߉a/[!__ltDw=0pceS1!:o߉iouoo ad-ag9lo@7=Qh}OѪ #ZDzGz?;ewa%wiU۲ֲ.V΢baۀ$`_Ѡ7I[mϦ$+=Odas'ÁAI#vFў棬!¾R"IGYD1!4F]coзr1$@Vj?@҄BP(s|s'9K]oɏۃ*r(ޟ$D8!HZl~۞1!ݯ 1CUr|h4F| |塿Ÿbֿ@ 0a%S>ׇ (`rSb$69]ҕ܂ׂS[r;uV0$6H="R5K*Q2//:!L:/F/X//I$D"?fl6|xS,>PV?????OL^9AKO ]OoOEk3OOBAOOOA(W>yQkQUI_U@,`0lPZ~g_?@0]q?@ "+__Y?_H>U t43fy?do-z__4Ykq~"7:@$M?7qO4FXj| ď֏5'G/AGSewN\jZ?@_7"M-%7I[m6ѯl!3EWiN\U@ "^J54aӕп*<N`r7ϟϿ`$M]^|>?@(=$pSTNAANNq/5sOI$UaQcuN%lH% FGOJBAVmNJ r"TzLuNYu@yfu120g1t6aa^R34 2Xqq#7´ V!! V9ܴ":$iB;<>QX6"7!q>*e.azn(llGv^Up!X_5h5/!;N)<"NFN>NUANBNCNDNAE#.צ.@N:NeڱprWڱڲu11=!#Ͳ[`Z!; PDNy15Ԧ6L^U  5okzs@z %q)8pR{`'utSЂaNz8pV15B 8aP`@ :vL8~ڠ #iK!tsYedS_'0UŌ[ʢ!@djUDKa a.ͱ8F"ea 11  jʴag }ŕW%o.f!33LZ [K \@k״^QEcPaDbT!QQ. O.3/sh}//88Nw=0hń4P|>7id/d)Ł? ??ujtMŁySeh?H7=QЌ8}ў8C?b#S2zGz?as 2QS>_PWˆTOTwbUbb.CB5oGaHoa`C`_q`ooooojrPp____ jcaUIQb`bPzK mA1oinAӤD\~ ~>bk???OO+O=OOOaOsO1@Vj?@҄BP(  OOOO__+_=_O_a_sSz_____/oAoSo ooMqovooooܟioo(uVFXuխz wU@|h4F ԅ9qLXnЇoW ( S@򘟪Ο-eZbp@,qVүW Ghdv h܏jU@?@,?@HD0J?@VG+;D囲Ŀֿk#b"}ߏߵ߰$l+́VCB?@Љ<8nvP,W'@8̈́A'^g ^g̓sց\tRoR̓twω!0 ucZ͂dڅ-?Q,q !@1CUFyI$D"fOl6#_q 0BTf2Dςh3;>qA4Ff ]oځ(a5IA!څ9x,ߍ`0 Z~g_?@0]q?@ EE)/;/M/LX?/*i%443flj-/D:>qB?O#AO,O>OPObOtOOO~"7:@ˍFM7qOHcOO__&_8_J_\_n___C__Q___o!o3oEojZ?@_7ºHM-i=oooooo@);MDi{qv"%@ ȺH^JO5HVhzԏ qE%7W0Pbt^|>?@(=(Fp*A'œ6ȒV` p@XCs?'tv ѽA k&k,ܵA>Hm6 ,6I T(!(!1{$3n4I5j6>7Ʉ*&Cք 8ㄻS9CR: ; A<>$q\򒱒FKEAQJefR)`}¤BBA KO]OoOOA"OOOSt9p/9"š9Ls%T.ª(!8]gw>_ԝiM_jaau9‚+f- zRsKTa0\dk%u`Ё9Ü9ű] 4@a8̦ղ]_|âq D3s͵NSYNnY}V ,@r֘zUnX_4$11JzUtAjZ?~?#_AJԐKԐLԐMԐNԐOԐPԐQJԐRԐSԐUԐ VԐYe恎 6it[9Y#Lԕ^i~^i^iE^in^i^zU^A GԁԑII5v؃~UCTSrv37:L-?TLq넱ßovqzU?vYGB Y_,_.KYoY~Cz.VYZ~S$6xW/pu-)IwP&g9e)?ة<&%F6O<>EO*?@_GAؒd8B1bE09qM?@?~?@ ?Nk"u2.zUUUSg1 A +y@! Μk(~~.w M?@>ev?@<ڲ|Js>-r!bu~%>@y0Lu0{?%~D>3p q/] ԕOU$CYUv%:5ԕx:5JX OsXSA#s2zGzt?a:~~q=wUqug,qut'^?p?'iGdB 4'+իM) -&UA/(.6`Wk/?&T4&\3W C>5MIO[OxOOOOOOOLf<I?@'Ş4`Q.ǎ+a"qups3_E_WY+Pg_y________ oFhv4?5&v )3b# m/ ~d&$&ӛr?@Y܎_?@~ _G.13y#6Q/ y%u%V%3Kh"VH[ /U@h`2p,l?@6Lbмee /G3 U?g7eTmph"|hpQfkqwtR~ wtS~wtT~M5wttQ{wtVz~q Q $o6o e/-OuGYf/w~b%ȼh^k>h oI1J4JJO@^_¿Ut%.]b?@._r XhF?@ȟl N?[+8jpvhUM5$!ԓ?@J!0q{E=`/9TNZI6 ?@p@?@6Οg?@@Rً.@7$@? <a?@I`Bȟq9K_qS/w//ݏx Z%@#u GxŻÖ#/:"?%?7?I?qH(ڟ쟡|Ģ?R"L#}[?@#jHMż?@U)?P& lHg#@.Cܙ?@Z?P)Oy޾ F `4qĿؿ8#CcqohS>?@oJ@%nA#D( 35Ge 1#_߃$w3Wq߃߄_@$30ËرNr*Vb H?@O N*Bc @BTg%@Ex(q^p#ϼΏ E}8*QRI/\dr !Z/`/Yk&?8:/)OOK P^T1宦Bbte1T@;M1ZBw8PUThN_w%"Nva-cmpu?te 3@uq$)=\d !'ۅ!уEut 88k1y quq G[ ,@D!ddeMy\dPЕF1kjD1=&\>7?H!3"$ھM-W>RÂkt*/C gW8'ёuu;u1jc1'@,'u1^WurdհuM2Y Ac1èOWM&uMFfÿ@iFrƠ cӅ?(dfЪIUzSfqy<<1CUXRx_ ÞπȎWa H&2_N01? ^CM(2gʹ A@Cs6p\!Tڑ@SQSQ1؁؃@)t*+zz-)R).cTa1s6`%@-uð`1a@Ձ[DxĵF[p@ b1F ҮN5s_@T!uu[ A1qqR3G11R Bhdh279Ă8F229S:`a1Q1Qп({U%h?+@D2ʒdXI~aЕ] "pϮ?@ӻ?@[ ]iK`{ 9C_7U!e%fv/X'U%&OQ"za&CGu[8nj Be<A_%I$ l59/K+__J_\R?h\0u?9-Ɔ6=Nm6448SPXϺA5d= =OB?zM7:Iv=F(W>PZsQooooo$6~_K]U!z_]WBEb0@BTfx ҏ!3Eğ֟@dC ?/S1gyI ӯ -Q]-^X}?@5N-[X`N = 挋NK= 'P^QϕϧϹ;y\2Dddp߂ߔߦ߸)@ R35,>P"`r| (Sg2 W%Qc uR[ *Ի-QQ1 0XA$/6"e)Vcy?@?FM6Uvtf${wCrv̧3_? ʾ169"-6.g?!Fú5fg5x^"?Pe?  c=o65ZgeAoSjI/[/m/// ////%]/ ?*1a*?6?H?Z5j4 ,E5?청?@w '?@,%?@"_wʕӕ?=OOOaC1f3q?2w_o-o5@NpuYo̖Mv̖ALJsoՕVhz 2Wɏۏ",^7.@RW8,g7 U@: @p 0?@(|G&ó٧PF,8CEGHf2Jpmyt@ BP(?@x<]GDCOO2RBuOO __BB`Ql_~__JVQ$YWuirD0/@dvۿT_+KKWiZ?l?~???㉋[J88OO'Ok]OoOOOOO##_5_GX Z_dvQh+*n:wne]_ eC;SoL?BG)uͮivBnn}rut#~CD+h}yr$_}fug~~yjxwY p`fs O7S?Idv@` @7I[d(}DUYmU@?@N@j#?@DH2ee+?:fJ$43f@i*Ug)QZcL@@?@%?@3sྭFO]{m1uP` DCo?zuP~p~tщ )u1ۻt揯-&|֘e3׆!jɈЕuPE;~p'pv2lRqq|uM?i峅А҃p~\ xZfÿ@iFGr+`6+*]vwh&?ɱCE[At?PjwOxbP1CUgyqBAPzLxG|?ԌgMDa ձ4zhDEɭxAT q``1$3p)R*VA+2aar-6r.. r3Ta62um\%puL6pQ1 QOPE@oSDU VŻN55_x_rDCF3s͵NSYFOuufV,@BFS u+/ppƔQ DPH TR.@4/#c?r0@V/?!?3?E8d]=cvW'l%f:O~md ?ܾ? phbTDQ2R\1\1VCC1DrMMr3T rb-! -!uuE7FTpHM9`T:mT0=>~T)bnttOOO=AAuϳpDxet????1V???OO(O:O^_׽9?h?@wB"hm-vǘX(~4_]@^_ [_______o oH j?oQoqaqo}ooeSѭojS8o&oU@BAns@x9qLq@X}mx㕺> ( ScΏ-d5^ep 1_븕 7dȔ׽Œ+ 1VcOuFݛ]F/9vEKɎHeE~`n)c ?Pf? N g N`Vhz@¿Կ2ik77CUϡeϑs㙉UŲHSB?@P@mpE?@[^%%"4Ӹ>Qsʒ~?ק(:B@p*fuF&Z&si?%ϯ?:cu52W9bl/$/D!;/M/_/h,GI/fD?@t ?@'S8PJ,8CGHqGW@ BP(x<" 4PE?0qrrC? u,O$VBmy撚W{xQ$ŗ7d??"D?=M^qOOOOOHOO _m&_8_XQϑX_d_v_gyϋϝ_ϊ%Ȣ/ ?|o4FXj|ߎߠ߲߼oo 00BTgqu75A)G4QA j? ?.`Y-bo_D -(0PM(Q81J1l §~%1z_[MTa0ĕ"`VqˏݏDn-MDVhXiq(WQqbaiuȟ#7~?@gh?@{Uy(N!Hϛ'G+ONBH*X(V!۸1O'Z%/*-?Q^u@N!|zT1Zѧ%G4GAI!4~W ;?@!Ƚi@X#p@7beQt?s-8R@J}Q_QBH@oRdޜB @@^k)BpgG@z34booApEX@. t1v0B~<.Yc@=(R_?@qp~иu|e6Q R^qcABRVEuU2/S?F BP(?B]Ba/JPQ`MnecutmWigtVPE*A/2\9n5Iw&Pb#W.bѓ܏aTrnNw5|Rmud@nTY+rnF\w``:PmUteTn#ZQ1C$}QC%B(h؁7]@E@ jP-"T0]pW57Ϡ3͠0qW69P8ݠ6ӠY!AD ɠǠBID"Q^B##Zҫaa+bѤҒHģ@VR@%]?8̿P~bE|f+e ŢFNp@h"TAHaaBH.TA@9` Vx?BHPFJ;PAa@ur,1ÑϜÑJTBԒ"B ,2+b bZ.9H"S䮱|bmz qH9ģMdT% E6HZl,5+e  e&8J\v% E@7m(d n!ߜuģ!AB""H4mBbPB`3נ0%%T!PA!E(h?@Ķ-l ?@O,r{@=oQhiE|o`1oiOCl>L@@"g x_VF^_p_"+sf[xV__k}\2C@(FV`@Y1(fff`b2DPRdv<ЊbP]#5GYk}şן 1)b=Osͯ߯'9K]oNr0v|З! ~?,>PbtφϘϪ|߱o>PEk7Z\nC:~MF甙{d>߼(:L^pdvJ$f(ߊߜ~3,Pbthw L.pRV//v/.JH?FD\?..?w-ALBRIJ"%%E-`E: #`eI]1@ @@%#`ͪ@'bK5T5k?PzԀ} Ri}!ɲuWE`ܰ S6ɲ)ϳ7t1A X"$57Z^ 5tб2?Nܴ\a-ԏ- LWaW2l0-s0hT~pUo[ DDp5z𽱇LBJ%0u= <U/Hzɲ9ɹ@WUM6_7ܰ_Y`e^V5_oVgJ-^@o_doJ%__ooW|hZ3D?G w ըCzDps)aipq 6av"p p#-7 9t- X*G'2Y0k2!!Q{ 6-WjB {X*GOv!3v~ԏ{!x- /"1$/.9[%476I%0OU'9/A @@.V ?@`K @c(݀Wߖ}3z -Дz^ @@x=g? 60? yGz*KܴTU֔~@)Su=e0u筬6Ro/JFQA給-@2eC?YYYF B_P(?|H]7oaJ`}`CogpnectwrWig ut`z@`eY-@CV QY.`@zb#cBɶ+ҴN`Ru gpdpnhɽVF3U^Pmpteup5nɽAQC>-  EBMI֮ҿ&+QtQ:)A-HWq0.730\@6186}l6V] Q>[-$huOrǂHaaʜoѩҶ>A@p%FV^? UǙ-"GOuP ovj(-rpU] *T--ΣTկ>-ywD(q4twEeҟvoF\Kq$-%Or )ΰ8oCRd]VjKqwA2231"*)"9"vBU )Nv /./@/Z/l/~//e////o/ ??0?J?\?n??????u?<(/iO7O?[Om䙑Ou %Tf(R%X=If e*`3аA2UUCTEN"љeQ(?t?A/.O/RO:/vO^/ //OO/O__*_<__`_r__/__1!q__M!q oo0oBoTofoxoooooooo&tz)|9rOcRxU%jwup14(ZS_OEy%Tl6f@@ ,]l}?@?@e ck?PhtuဓHUg/GQҲ/{fX"$Ǐ j.8M%Ȁ铏 5yEDi L!Tl2%S0[FFH[D,t[[:e"Tk|krqzU0?G=eO}OU/MƱJWOiX^c䌇 MefvQg\eL eՏ:]e$HUeΏ䴅8HxN*_E 0UDs aMq av K7Y=u(XwG'2#`5bQ!Dq| VjW(l~&({w˦!3|@ܦˮUEOQ{!O\f_v__%]47)Ѯ#X_U@z?@N&d2 @@v:N?[@ ¹?Ł9muc߱cIoqf_o[ah ?hi2¬kDA((kDO)ѦDkDҌdo 0BTfx+1@q59tslwcbzpub޲@S^0`` FclpCopoYEPyG@` ‭@!\%S5?\.?4\[4&ߓhAE=# ~"uubt4FXƟ؟6 bD:^}  P:LͽK?`zA../^uNv[-cmputea42ۺd%SuD0S˯ݮ鍴89yG$į/80 u43$=>A&IAIMEU=CP #SF5K O4K0O Q(:FaVOz19Mw> q`*7kjtOOh\^̵Gl| G QSh[Q5B"kˀ&pEɺanx<@@B&@@`4F?@2R7+?dшeA9ݕjP \ej WW_e ԲEP)Q'lp&QO ew.5evvv&Ekf>>ÿ@Y52?;15Ȳm14S;RqtX\_n_HH#elr!.r0m #a$Ë4.5ITlj j l1ݔ&3MNN15m627J+ C8b8Ef5Q9R:_e ;lE! ?Nk" (.hN@ (7e/*Oe.ÝI (; xl x7(wu.bݑ`.Ø 8U;^>-!u%>@-@=i{O%DwqDTt3  {U eUpp5+$,~Sy1,KfLS]r~c#o׾n??,7Kq Gӱቡ$/1;T99V-;&LUw/^.7H5e/?4<ceW >kMOOOOOI$]\ ]_U@M^κ?@6əJ) rupse_y#S2zGzt?+PYy________ oF`v4?ef&~vE7 *yGCgj`od(.$evf&/$F %96pպ5fUW} -UDD <HZOO__xF6TR?@A 6h俥ȥCUgyӏa+:ajrA5od( PU?f89-ğD9,8J\xqJbAZQcuqPRnU^Ԉ4?koD/Th/Dbg /oo/p/ ??DkDgy Ϣϴl|j9 E|[iIpbe?@vSEN?P?JW^ =43fRxew߱h@s8  5YF u_pH ^ aNՀ?~cpCV?@:H ?@7WLJv&%Q{>#'/78&5y&(<?@"q@w?@&`c]?@\u{0%ž@ ךpU?@.ӄT}XYq+= /a1/C/|7@#9(G?@9ABBV?Z2u"=Ëݏ/*"///?+٫(܎󭿀唟VN76Ĥ)AҟymQ?@?-uS8 F?@7fd?P& lH![@8@n t?P)Oy F 4zql~O%b{qJo+o"9`z^a߬?@{$w% VdDzed !#ϚϬ'9gSo+ =>_0h>&͝,6S֯?@%~&^WRS>뢿;@O]@N"ܚ( qq*GSeRݿvjϲ© #" *5̟x\[ߜ?@>E^ujp/Mf?Ku?#:qqN`!}wQ"I/[/;M$fxz//ϟO -d? K>oo`1-Dq,O>OROdOvOHlOlA OO@TO __-_?_Q_Ou___H_____o"o4ooXom o_oޙ Aq*HQU@x<?@Aj?@~p_Qu^gA|q#jP'(.@RSxu~p`` Fi lCo omr z0` 2,110\xam0I & bvbPzUX0'ɓx[s>"Ғ "û%a#a)@"'AGq Q w  = ͏/QCf֩?@1Qd)?f(B@?@,/k]p8{#s\*6fh?@~E ?@_pLVWwM?f%Kx?@ oi?@PxT/jfkb?@ܓk"uTy j=y?ӹضEel_?@'1Z=?@ɇlln jueJx?@&y_ ~࣒ji,ٹi6w+4f 1~,fNȦ|);A{`l!%\_^ ؀i9+2 jdioGy=?@=M2Dwi9z^Ef5U F/`_`вPÎied6ENjx/-g?@Rr?@n^s\'ތiOK4n?@{׀?@BiؐiIlRuF?@B5r?@n$Xp ؔi;KGۨ} yFa%?@O" e@7?d>@MHÑ|ßϟ);@`0 ?@fd2L&5`Jx?VRr JPe|//////// ??B?T?f???|mv_ 9i eD߬ٽm9`,"fXebv!Jx&b> W?@8+`y٨uP`<>j p-}JӦ?@զ.k~?]7a~<-wѼ !QÝA4@mne2(e[ ;?@"MR%#ht`#! wqwq?2t3&+57%2+5478?Ka+0yŸ>K.-$3S46>ҒוͿ2 uB; !ò>ҿpݼpO=@И$UƤȑy<< ܯcsF`O u0&8J "HG!e-.@UGb4ɓKlS`S~p$oK9ɐȥT1$1>NVdגOcdBHQ}dVRd "TdC;/BcTX.<%GbוekZV?@'IOғook_<0eJ}>dvTex]poh~#t%L~.r6HAkl/BgS l-oI_Bc-/B,<+IV(V)$c"*pC}AE`;@;@챀3噛ZV5Bc_>ԩpׄB#F3s͵NSYFD噛%C .1Ø*226m> owMIE`Y>fisBY%a !m#?%=/U !>Б>В>U>Д>Џ>e0ۭ/T"P0РH> i E  V ?>FMF2ӵE`CSRud0!MӵNPTWe`R+K:@Ai|ۡӹX_GӴ'/2qмFӵ%!)Dk Qbq!?Ͱ!V׿Ȱ<&pT/;d@Seh;P (E;sx_T*27?:25 a $ "')0vI-۵i%!􄢅au)_{//1u4>d1CUۡ%11+!@]!Ĩ!@1S?@O$,qh8{Z rGg?@;mx>@9h7 E(>E~%?@V?@4W e`/uH'-Z?@F?@PL߯]/ݕ UI?@Y?@섰a?@@\ aK/\u?@:?@3pDٳx?P+k= 2`\rP13J C4?@4?Pu>5 ?w \8mY*?@uӧ?@?@* ?P% u1E\8uf $0l?@?|/\8"/M͐~-@ŧ78E [13n?ٶ7g7O\8վ>Y.(v~0T+L-@k O\89EAr 0K(~0F0zpkdO9j~pT꞊jcpmB)?.~r$_\8 96?@v9.0'K~ ;(r_)9';Wpb]/?@J_\8@jlJ?@Ho o ojkK]Q o 6ȯtVsy<<'@.s֡֐y?SJu``+ F9lCmr$z̐` P2a߃@̑bbzQä= ۰$%E)rĭYutuԋ6ȁxe$`6HZl?@*1rՠĘן C옟]o沆ίx<!Ę#q9K]oF!ᲿĿֿ1"4qςseωπg1ϱFkw2?@'IO*uӧ?@l0|O#h9?@:V@?@* ?P$ 1E#h9\E{ C?@Q( BPEP >5 w #h5 UI Y?@3 Dx?Ptk= ?`#h~9H# -Z?@)${`\;X}hO#h9~%?@V@F0Lᯤ_#h94Z rGg?@4W et_h.5@\>@I(1 Bo蒵a);M_?@AjЍwQu#@{sŦ'$6H9l~ a!3I[m4Cf?@1Qd/6(B@0,/kP]?@8{#Αh\*Y\-hP~E ?@_pLVuwM9/6%Kx?@ oi?@PTǵ/i-kbܓk"ĠuTy r =y?ׯ>?6l_"0'1ĠZ=} ɇllp'?5Jx@&y~_ @?j㡇Y~=6w+4 1?@q0?NĿ+OJA{?@l!%\@_^ zO69+2, jd?@oGy={ =M2bO69z^~`Zf5U?@F/`_`r P㏠_6d6ENj/-ُgPRrs n^姑h\Er]OK4n룠?@{׀# Bi߶_6Il5 uRuF?@B5r/`n$ϏXp o6;KGBP} yFa%?@O"To6@7?d>@M/uʼsDeSί/ ?d2L&S0hs~͠VRސhs s*<5`r a '݋3^~@$uNT?@~ e nzl'Htܕ<N`r~I}^`wo6" Y?@]x o~! 5?h Ѭ:E/6m?@ڼtɤ_i?@0>mĹ/6ZhE" XE]K~2yx#.b+?@Q Oi0#d0W2?6/eS0gk̛L~ "Cgp?~5Y,0xd> 9ÇeDʩ=90,"fPXepv!JxDOʿb> W?@8+0y٨nO6H o $j #?@?Be0?ϽO~6>j @-}JӦ?@զ.k]7a1 _6-w< `?@*O~N0ƭf]l : gk?@۫Ն|^ߪ_6_g\+P˚P0ŵ?@!trǿ_6Ot(?@.EF?@A ԩ?֦i"mHo6l@|^f Z {` d2h)G&Had s(d!omFUy1#z5F=1L̵+$?@M7RJ@@.# g?@x̋@k/0n{u{` ?.#uJ8 (k`t5A a AztK r&Бi fʑMA煴u5F"(':eK` W@q`Uo `Wv !r׃,A"@q=5w.u1md/jIނTA/$r1$bb%3$"4$$=25$26$2 74*BC40r11B9(4R:54fR;$B4R\4/IKdFb4 $.J4%F(4)4!zْaFi%y)FeX7'a" m6bpdZ/Pe-#T.DrY! MO gL M )_C00 r5b5+T% ,Tt-TòYKTa",{`%uIL0K5ëJwU0e2 9@RrQ=k8Ll%! _tMcBCF3s͵NSY1Rt=k`xNjõzAg,@"rvQk nOl/\_ I!!VtjZg?g[UUUe^3v8( v/Hv7%qEq֥%t õ$tjd2@mvQb. S2wrvْB6PgH&LaV q??JLxJ%&Tw/1`3 DERdmf#JLF-bfCN HN blg;UFHXj|J&%˞䋳H œH^#ǚ j(L"ȁe :G=$?@nCvTeÿFhU^%?@xL?Pr ?Nk")e = -" ʍS值A Ĺ*9%?@ʲ>ͰEx1O.?@gxLK?K >@x }wઍ[ơqȱ4vӨa¯g7%z${e/E7%1Lz"UhNܮhv Ry#2zGzt?iȱH !e/)'a#d}aegߨ.׿?u_1&~3ۉzcX=Ry`Λez£j 1k >mqQ($U]?@xO?@(hWb!T "% #k}+P@ /A~&j& j+K-8fȔ΁e&jBCy?peՖad)AD86M_|(pg g\ɔ?@J\BCV줘U U7/I/m/////////??$ "o?UWs>ك^Oo $j=OOOlOxOOEOJYY|OCPS ewq__U0t_ҿ_Uo6*oFϔ?joҴ:n>!߳ 'Éooq /OSOO~;o;Hă6pf]?@X)zḁ̊?POJWI!'=;43fe;I#yB%/nJC^b_(U'\׮(ĭݦrjqsh,8rZl~^ oH?@k?@ | Ʒŵ ?>+=Oa/ED;?\=?@놓S10a%Ż?PEkU S|3n@۟lG|*IGBjnV8bņLU@! ?@#I?@RLߟekW´___Oh"`D*LDůկu2 \nGAekReO3hB3 *)?0 4@Kx_+"?Rz?@UGDLN.0艆?PcN|ŞW==BOI\?@iz TH0~@Kإ<媆ڒPU@:T&?@{Г?@@Vb?@S'}gf-_?V\@N@?`#Z[?@ /ݸz_0hq//////?:)W]V?ifx eԠab&B#ec[?+="K]o߁ߩ?/ a(@K +oO$O/XdFPO[G3|V?@_wJ ?@b3Y2?P& lHO[~@Nn/?@Jz?P)Oy޾ F l0__o"o4o,ps[b{/ 1~8T,~~Snbkom??gh1Eљ#*?N9\2kkk JNMb^"O?@soܞHZС@uub!! ŏя/[o/?AS[gWB'bjێgPt*X?*!:!̿޿+oOxQ_qϹ˟K flݥ߇ɯ#[!Fh1`wj=4HZ@4u͟14FXj|VhCP?VQ _'UA1fЍCM 482CU]4){Q]?@Q[E?@kB֚@N#@#EQts-8RXQ1AfAY"f8MEOT*J@@x^?)RpT.ܙCoW @z3*iRo__RCpQНUV__^MќeC@!L?2f!ăf89U ަ `34'9 Txߊ,߶t*4DVhz1Cr߳Cnm $ U?1cY"Qprf8q'!Ӵ c#! 3h%Tq d%1<%Am%?jOT\w?AO"Q1iAt4(tt7=K?]?7N?4AF.(DÃ6455F(?6g٦ (-GO!Gk!/O/SOeOwOOOOO4|NDŋ?@G ']Pjj@nZ>IP5 _Pk!&_8Y5?fl@@;μ3@gTpX6_??jSXF OO_o?RG3@nB5oGoYoko菏ek!cFFFoloo,>Pbylrpt_ 2DVhzk!vRΏN(:L^pʟܟ$6s+N`;fy?uϯ);_qBO Ͽ}'ɟ5GYk}ϏϡCbg߆ߪl~/(-L^pχϸ$)HZsqߕ߬]^7[mk!0k!Vhz^9 /#G6f$5/ yX/l/~'////// ???@5%V0@ eFW8j?|0??;9n@@#9r???-/?/pT(|&h/{/YOkO}C@4OOOOEECQ6Q6Q6_-_?_Q_c_u_______.Ooo)o;oMo_oqoooooooospVn2+=_asMo'9K]ol+*ьX?r3_,я+=OasFo͟ߟ.f,>z'geIB`x6P&dwI|A!ܒ Q uϯ);M/AS刺˿\1UgIϋӯ-?3u{߽ϿB;_!σewϛA+Oa{ =O"Xg TǕHz 1CUgy]?@Qȗ[E?@L;ƿ@?xL;-@H@  @@`0 " 'T(/"v#T(//$@e/??@%?_I5HU3x?<P?????? OJM@bPQOAF/\OnOOOOOOOOO_"_4_F_X_jSHtS6__+O____oo*oOPObOtOOOOO ?OO__(_:_L_^_p^w]]ztVҮ_____oo,o>oPoboto?ooooS&^ rw'tQOu);M߯ůoˏooooI(:pٟ=3$6{Zl~ߏկƏ؏x_A_ewѿ+=jcuχπϫϽkλ?T[1HBFeGoa } фӑU=߬`|ՀEave.Lxvb11B )2*#Bb!!bH2>TaveMS6Ru\%uL}#񎰁H!1?}E!:B(?}E #Lb~DN _vd|eбF3s͵NSYDOVG oEVUG,@ IOQO OFы ^/5 S.`H#AP/"/4(樌L- ߣA)?;?_Bq@/~/:DR__nQa???-VCK]XmvȿK᫘UiY\_WX e:uAuE`uAuBq/`۠QH`ZafdQqk%fdQqkfdQqkfdQqkufdQqk`fdQqkVfdQqkfdQqkfdQqkfdQqk[BcgQqkfdQqg;ŗ@(egR1utPiXD PDi`Xfr'0U'뎰! Aڼr@qaQӁ_Կcu,Gw=0Im|cqaQ VA (3EWS  0hՇQcN;qaQ @Ja*6Hv1q Gfu$)uU@R?@,?@HD0J?@V_GvEE9/K/]#qaQ????OdfqTq*IV16iNOҿƢ?@Љ<#&TʱWPܟ,W@#!$|hB'UU UTV3AuOY'@g%$V3//Y.2E"u)`// ??BR2V2\1%h?z??g{qOO QA __+_Zl~_I$D"?fl6OKO/#/O)oY/k/}///moo/ooo?1? nqR? 0BTuśyѱoBqH(wq>M͑U@,`0?Z~g_?@0]q?@ dv?qߔ43fIho7ďuղ,>^Ugyӯ~"7W:@M?7qO+=Oas Ϳq&8J\n!jZ?@_7M-x ߀.@Rdv߈F߶?*?@(=cp>ÿ~wSӏxTiF$UQ`qn-(Ō#-]bE' x__(roo( *xoTNIT/t"13TT2 tѡѡ̒4t'H6t7tCt28 Qa :% ; 2!!$>L>Ku"yyk5o12J %!Qr)qBAyAz2{1ϯj͚QQΌq_@>Ԣ!݌#-I _/Q@/Q"9r+䔀mm򠡠' ҋru\:С%uL:Ё! Waj 9q Gұ5#׿[18Ŕ_$EQ:9F3sNSYvn”/̒'yAVY,@KbQbWf IAQI Eݏ\!!Ȗ%ts1fݫjZ?~E/+s1 ¼ ê ļ ż Ƽ  eԄ I-!9 K5We@-=i{ձA4I훱coµ5q$#8µX,谂y1,`YF##g#S2zG_zt? Qg8aDoVg.3?7a7I~KqeGӱO T9 %7H5eDhg<c; 7 >-E"/4/Q/]/o/////,M^κ?@6ə+)eRUPS ??09+P@?R?d?v???? ???uHVvEhVE7Mw*ypGJAO>EVh$Fw{gF]fUWe-UDAfIO__)_;_M___q__,F6T?@6h䤅__0?o o2oDoVohozoooo<?:ajreO1Uf8 $  ŚM +=Ii|ڵZ $6vؐ+=OqsU'˟LO5 %g7OOmzp~OۉcʭѾ Я:^p*j9̣wE|[i pbe?@vSdm?PJW'݈=843f@qJpйއ+ϟ݈xe~(84A # 1^3PCV?@:H ?@7LJp>й';*FW=ϥϠoŘ(<?@"qۇWjPTX^ePTÈX^'PTX^,PTX^PTXZWQe1 ?O>(U JF\hH%\HcOjʷ"OѸ#4%##(_7?.q?];t?@s?~m#9@w?@&`c]?@\u{ ΏD~@ ךpU?@.ӄT}wxQo$o8oJo\o,oPb~o|7 #9(G?@9ABBuZQD\o "@"Jй!(%켭̟'_7N7U)AymQ?@?-S8 F?@7fםd?P& ?lH@z@8@ t?P)Oy޾ Fȃ љQß՟nloޏZ@O] ?N"ܹQQ7Ifrπqoooj߲ր >ˈ/A*@%7I<[ߜ?@>֝}j 򂏏!>MjS u~j`` FilCoomr1 z>` -2݈rУuVU$QnPUv.f1gU nT T5VRa$c1 )ĬB*I+,"-ĒRğx#TaVU6bun5`%1 uLVQŴ1 A>ANUd%@VRDE; J;NJ5a_VTK__F3s͵NSYF%;ێLډUV?,@qb/1 pA)̎NUPjTPVQVRx/a¨T@}x+'Ɵ؟m$VQ"D2͢T$!xVRNRŬB@1"˱˱R34+ѭ9aK7 EA I9:;"bIV$!n$.@T+!+!sUkSqqVPVPJVPVPVPVPUVPVPVP!,TO/Z'!(VPVPe2mUio~ҥIaupVAp|!Q'VU$!;NU$!;E$!;$ !;$l!;b$x!;U$0!;$4!;+$8!;9$QäuX?,M Gk߲AmA!Տq"4FXj|1@Vj?@҄BP(մȟڟ"4FXj|˯8J\%Vzɿſٹ! 1_mA!Oa!~ϊE9EU@|!h4FԎؐBߎߠ߲zU5aYwxUx (%S6ݵa1cky5+mA1!n1m_Z5IsU@ Ҥ?@,?@HD0J?@VGD5M5cmA//!///DA4M 1)Y 1H_??@Љ@@qq{?Ô?@ǐWq˕ԕ2v-߲Yѷau`\!?޳ ucjuY-Y aL۶wkUфZ0EU|Ytpy[Ss6Tw'ɬɢϪmJu[Y҇D" lrqcUopsru 7(c /aظqF7w 0;4a͗"8J^הwωNJu+O 5 p0ߔ BxQP:|=AzK ݨ>eOpent4ck Mu0tINIc fauJWe`idqgPJu7yia(X\rRp'2XƑ,!a+ 9Jtr=oOo{ooaJA(gQ)JqJEeBEo,`0 pZ~g_?@0]q?@ W2DV Sq 5}?ᙵzru43AfΊɡ!3EWil~"7p:@ÚM7qO a*<N`rz%ҍQ%ljZ?@_7šM-g3y //-/I/[/{!V1{///////l+u@ ^J5C6?H?Z?l?~???????4eOO67OCOUOgOyOOOl^|>?@(=06p<'dOcO__(_:_L_^_p______Q__oaahzRe佱Ja_o&x<_&ɯd2?9AJ@oooJ!tJePj?HZ$r2bbOPz5S>^`` F lCooz` I] 33{rrN0tymVGp  pU@Zama4w.yJaa sd|ND?@G '@@!y3?@?1x?ҤڥnTI7"t O55"t\@!hW#5)hq kĪu1cr%p&'}JeMʹȟw)˜refÿ@iFrC?@n{ބy<<@$'x2DVhz!&ʿq uu6aPf# #6+Bشgtqt}txlT--r+a+cO)L䙑*䆂+bqq^b-JbdxrcTaun‘6ru\ [%OuL qŬOЁqËu@"rk }nN}e5Ɣ_t~ׄ!sF3s͵NSYF^enJe"r,@Jbf&R bOpq\xCTq.rx/!цx v vݶ #/Q#!mtqUyTqrr 0$bA1$^bJb34ff(T(7I4 !8V4*N9c4!:p4;}46b<4C/r,QQ>4NRrn[dw/// q qJes=LU.p*ppppUIUppppQ|LOG|Hppe9ufBNqqViaT2O{ƥv`AQ@WuRTkA][uRToA][RTsA[RT{A[eRTA[^eRTA[JeRTA[RTA[RTA[RTA[RTA[>URTwAWqqqp}}{n1RuAt@n19T{b0iQ4qqPtsk픃xX|C'0Ur)՛O!Ŀ!{ v"QAAqO:!UVrΞ]Ӧ׃ׅqC/ U 92 __/_n12OͷqxRԋq1ǶϘ2ϘVN}j|?@J$U8<f9Ϙ;ŷ{-?@RԐHY1c)O?@1;=?@ڿ?@x>H?Pz ;C_7֑ŷP"Зxŷ`+&%6YƳϘ35aυ^Ϙe|ѹDϘ>UY/Uk8Зn2 @Ξ0Ϙ{2@t?YnAjϘɚg(rMZLZ4?@sIut]Kc3?@L*!0_&q :節@߉O4W(6S`?@f?Jjq҈o~ҟQkA!Q$Y1$KWqum!=$+aü=@ވ|5?8#c2zGz?!tɧXCUF2B!0Hw"Uy<<.@3R%?pζa0_0u0`W` Fipl~C:pl:pCqz` ] 1qߐ9 @q"Vhz "EIN&_qap&z` q1$SqWsa1  RLxoY1& I[Z _zڝà^˥?;_(lQEԃТ0Re?t{޿8g' EN/`*jqw!" 4FoA[m !'1xR>Ʋ?@! SUWRdv 1CUPootoS O?ocAEwY{//+/=/a?ڝP/f:E"kXp0VL[(^7?`έBa?;???????O#OKBOTOtAtOOOOOOOOe"S0֣0V֥<_N_`Xp______ z# (_ ogSfoooobb0ga@sb˻u :Dtڝq yh 4R?2f/x&M}j|?@h;f9e)-%.5%m>%e0$$$> .A=ߦI2ߦ!uy%\Kc3‘(%q8х,I'e?0M`j#QcYk}şן5I::FXjqv@y<5ǣR?@)u?ݠO?@%+M_quvrBڇ+=E@u]oi]y.߮ҏfx2AW'G>Pbgq,'!)}U@ɗ9?@tc?JB3PߟA!$H qɡ΢Z@ BP(?@x<%aS%mQDRR#ÿտ!e0B `u /Rs'6"p |ώϠϩqZ[u$wɑg$@P&>t/////(//?)?;?[1q[?g?y?j|?į_"%ݨY˂ s%7 {OmOO_!_3_3EWujt___x:~,qJ1i~#m@ _og"c\f#XeBWuޓ*!+3S-(T;44o׎4+aҌi߇*'`f3u%nu cYtooooooo"t0PqGYkt (ZTQf}uYU@^EJ?@k8˰jZsr?@w߶{}Օ);?JvA43fPٺOꏳew֙jLub9 o|%n#n餡&|(E_oOasπϹoϻ7.%7I[mߑߣ !k.ptě#p֘A' #5GYk} =%nljpgy -uHZzTQz@ f9Ę@#yjؐ#&lC8J\n/"/4/F/X/j/gJ/%nPZvxM&///// ??1? C?U?g?y?u??13Q????OO,O>O@Z@ #؞M6ָsOOOOOOO__'_9_K_]_o_____y_Onb_o#o5oGoYoko}ooooo"ooq -?Qcub'?@vW@֚]R'+=Oas͏ߏ!O}FuK]oɟ۟?,>^ Ugy,ɹѭίv!y3?@??1xp9@I?пⲄfy<<Џbb)z m Su"Я`` FlCoorz۩ ` H2Y22v¤w !Î  8H ر '9K;Ϥ0lϊB2ƗϩqM}j|?@?9 ]RD{-?>0Ԑ 5?@ 1;=p?Z4?@(lm১8V ԢC_7𶵪P"ѹͪ`+&Q%6YƳLaυ^QPKYtر./QR/XYPR] ς0BT߂:G ]у1u߶1]ر4'!)U%XA2]勉Qȗ[E?@': QࢂFEfZXC?aCl!u` #?BuH{ДI1"5F!tA1 ̝@!tAB _@ EGݮA+EGPkTݧ@HAc"BB5¥lV ţVVV 1'|MOYӕ@(1[kEf@iFr-ce?mdcfm'&O_o:k*%Pj<#+bP/ASew*gbU%PB g!$ج(TQQ"I):*Gf2+Tѭ-n>Ad>ATa%n6"u\Sp[%uLSp!nq𰁮AF{рGqD@" f5N5_$aׄ2#F3s͵NSYFHp!,@Rز޶&כpn|T=` ST!.x"/[o2hqf15GYk} sEǷbVl_~__fRrֳ1m!گ.xTT0"=J@f2S1S1/1qq311ee*"U"A7Ā"  ]92!:;ԎC<Q1E>)u91n +#1T=x\f1l4La29g:=f59 ,2ITn?/?rkKLXF\#gI\%{JQU!5Ґ@2 1(!RWXaO㝍iV<> }HT8<>ΈBǝT8%<5M5ޫ|?@u U[1t>{B ,(T8<5 _ ?@BVXa+?@Т?@Bhn~`e,?P{ 8C_7e[1Le<53jMU7FHpnk 5(T8N@*wKT8<5e&.?h1T8[~^8S~9<5v6l'U7?@w?k "T8_@-ST85N $?@j{[>?@J‡ &qqM2A>Eyn@) Eama@f? 7tJr //.@Rdzdr!#JYqYu%!q>tY݀J|u)Yu?/Q(9Āc(?F&u(]/#L#2zGz?cNdBxU$0Cw8UjP.7drv׉q?Yp;ra䶝PC_Cu^'`` Fl`eClz۝Pc` ⟐UPYqٝP!۟#U""ʕIӑ&bbP#zW!& uYsHь\b beA%A$ F|AE/+GoYo_R?U`9_=bF%NmFFQD\ |cJ^_րd͢uM=O?]yO~Oc_ӿ6HZl~-?QcuFEft 1KR?@rP屓׏ @1CUgyȟڟGYk"4'دԯįd 2D Vhz¿=_=5 rPpN=cN=!]=^WRϓ/1*.o@oRodoooaooooo(,?QʕүPv2mgOZ`xr&8J\n𤏡Yofע"4FXj|͟ߟ /ASew@]B ,(?@#cNx/lo^˫ӽϯ);M_q˿ݿϪ_38J\nπϤ϶Ϛ+K*KWi`{ߍߟ߱@ @*Rn~Nx5㠮ۀ@bv= .@Rdv*<9Z^=Bgr&8Jxeew QvR?@UfPv?9Pգ`xCVhz //./@/R/d/v//I/l"+fћ///??*?ASO': ?@FEf1 suuxOOOBOHimV>EmPj<_N_`Ry`[bbbboPzzzS[au``` FM`lkPConkPor@zzk` ͏2ޅ߷ݷQfybhx$Tgi>r FH_ZclU|T8fl_HKuR%q @A @ __^(Ɲk "_B睟o.fj3ޫ|?@u e/eef_)El  ?@ŢdOHppA?@C\ ?@ J^_p8V ѢC_7cU/>El3j>m/u6Hpnp?5(SuaW*wKS֑7v #)Ap1SSޑ7@w)"No`orooiooooG@ޑ qtcs PuA/;}I֑4dcp>AUƝŪymfl@@;μ3@@xEMuN`/x&QUQub@qyC<<4_߀ȏڏ1gBuvVf 0Րg T%qjgI`xBuplQTq33AC@I)aOr*nn+{BkqkqRB->B4r|TaF6du\x%@uLxT@ Qgl U@ 2@²Ou+ H),n+N),E5@_*#ЍF3s͵N?SYFRE+m+*>E',@ PBFu+?@p[ Q X2ITኂª/@?En\?n?(??8=ՀB< OO%mOq$OpbT;Wd BqOr11n1TߢZ3Trppъr!!ڲ7T’$9T:T!3;d<dIX23>(d5vrnO __3M~A~ARZdѿX8eSah|.9wx etfpbi╮ҝ fv֑?[qHvqta{ Eta{^ta{nta{tKq{ߥtWq{etq{+tq{tq{tq{ڵtq{taw;g@baovuCRut^pWQ_qσϕ0 Iq[q "C9Ddӟ~w=000 qa}"A񬯸ʯ#LXmA'y@7=Q7}P[#2zG_z?G~F A H=I[A(ewU?..@Ra6g@_zgn߀ߒߤ߶Yj]If\؉<)aeǎqawĿֿ 01d@Vj?@҄_BP(Ķʿj|ώϠϲ %7I[mߵJk!3gW{a +qa'1 ,>Pbt0|h4+Fÿ05ӿ25ܢ+=Oase (ݣS@CUgya//)/;/ Kqak/}/q///s օ9s?U@?@,?@HD0J?@VGWUU]???3qKq(_:_N_`_r_tցw褀sYy_/*2A06¾?@Љ<6t!wP,W'@1x1' uu utas*o75DO1O0B~KBupUOgOyOOIbBA5OOOxmKo]o}aQ}ooo///?o$;I$D"98fOl6˿ oo???o????OGO-?QOO|ށOą5ߏ,X(J&]U@,ߍ`0Z~g_?@0]q?@ eŀnŗԟ ?fO43fp"4"αſ׿ 1C~"7:@80gM7OqOw,㙉ϭϿ+=_%XjߊrߖߨߺߑjZ?@_7~eȟM-2DVhz Af&FFRdv͕@ e^J'5wȤ%7I[mUɠ 1CUg^|>?@?(=gpP69VAy\`_kdvMEBo{c/ H&<51ETPj?__RPbP_ oo1oCoUhu5!@=UvѠf1,pg4AADME=U HlTA2H!#9p)tr*QR+2AAyR-$ʹڴBX=3sTa5đ62uL`%9puLJ1ˡ9p15蠒tqVa05{`mqBnD3EWk11X=Tuuo@0䑸0J0 000U 0 0 0 8Tfq800eDE ?X1tq5`}-qfQ9p=V-q~`!5$'+5$++tu$/[+QU$7[+5$[+yU$[+$G[+E$K[+E$O[+Pu$S[+ 'W[+$3['AAQQA@ME=Uбy{RutšёXA{̘QAXO!Jtu6O!FiPbtROdOOOOSQ7@__'_9U{^T_f_Q}___Z('UaaUY5_,`0$`Z~g_?@0]q?@ +څㅗIo[omo$Cl#%x?ojet43f MOz^ #C:L^p~"7~<:@˭܆M7qO숡"4FXj| ͟ߟA /ASeljZ?@_7ڈM-]ù˯ݯ%7I[m ǿٿ!lBe@ ڈ^J5vψϚϬϾ@*ߑ EWw>pp߂ߔߦ߸m^|>?@(=H܆p<'dO2DVhz 3Q3?QQQ*?<:U_Ԡ e2Ο&_d2?yށيof*AE6O pOOI[mNKI5=Q+OQ`Y1OQ`V3l5)Ee_'9@Eeafd6 @@qq%`1@@A E?D Vu``|QP7?ABuKIB;1aBsFrtHAT1 <=T@rt^ABj.8EG̿@by EGBT@e4/b BLqeXe! 0UARE$L bh v#eQ[S'6/H//uc\cRiZ&_o!Ll 0VU11?b7!/ L%L"ABGIVC]IaVeFoIGTEodzUnE owenYo}e@5o'0BVvPNTzRlE ɖ_FwX"3%VLAN MAGR 7XW'Q2 @Bm!41aJ VG`QY YZ?'xxzQ$7QcRob!@^@@~@@μ3 ϛT1wCVRR޽ns̄̄DŽӦ̄䄦Ws#:2ssF3$K$_3-$s;$3<$>$3?$ö=$׶3N$O$FNAGQJ[eoVUUӦUӦU##sFGKK__ssöö׶׶*WOSAU_2j!Y-"^?7e 7<ToZF 0QA 81`Tv)4)09d0A(큥Q00LAp6r & QC1Wi[tY ietzUiq"iq5iq i s%iq^i sFioCWe e itedeQe!ee |5ie 5iӡ e %e 1 ĀA_AƀAQĀQcQ!Ā)QĀ6Q1 ĀCQQcĀ]QKĀjQ_ĀwQs!ĀQ1A@uMi#t Mxi>UCKO FK1flafSy%ʅxi't\.@@jXwB?@.# g?@x̏@?@/*b֓e1( MTb<ם!tjqG a & rtr r&!uw!!!u*wӿetRą=_swʆbK'򡒜rSEtf` p@u?tv1ʇ7h;"Ăb2s!QBĂ!@EA'P`nᡑ/imṑ5IBT-/C#a#cI1]R2j3w4'5H6$i7CI8Ŕ9Ҕ:ߔ22/qqAF-"117"JGX"(Ty")a"q q"QQ"ie@!#ᔮX'l{7#a\BpAE+T. F?6PwgiAy/_yHqqWB$dR+H,U@^^'raraHsTaqu\%uLHԁAHNaE@EP?@ U8?_Ä|sF3s͵N?SY0~€'Z,@Ȃ΂ԆH&nOhAw,Ht!kjZ?~?_!ppppppppJppQpYQpeiȑ ∀yVeNa>i]Ut\(\(\(\(\(\(:\(7XY+ |se݁?SrvƃQTk7dyG*h>Sq͔(d}e``Z7rKNaUdRY]ifi~-bc`,#i,%j|slVU8>x1O.?@gx* K?Kc>=1U5N@x w@[O6KKTTC]Ye$$g iJe٥Y/xz"3(hNܮhv؜h0cW/V#2zGzt?q`Ӈk%W}"/tOO70݀u ?Kzc96=60eW?>>ez?O D1Ig >K]__q________l$U]?@xO?@(hb!0T"Io[omi+P}oooooooo \xDOU_ j+@D" ~D8{>UB@C5WNv5a0d)DK+=ZfxҌpg \ɔ?@J\B!0V줘'moK]oɟ۟y$ C?σ+nU?s>9a=K-@JVhzɾɽ!(P1/C/U/OasWh/z///UO߉$?rH?br t??ͪna??? 'i w1g;o0;Hă6f]?@X)z?PJW'=u43fC'(% (!<@hŏ@U&PXAXAQF! P`O8J\nЎ //-/?/ 1x3#?b7"/z6:6=?@놓Sa%Ż?PEkU S|uLOOOOOO@?l|__,S3XE_WXUj_|_____@QO__#5V[ cxo'G13 La4?55l51a#~2l/^k#?U@!?@#I?@R uu?]C#kO}Ged~2Ϥ϶G)>{#d5cE50q :L%{?Ce]wvkRex?3hx _A(`4)``eVto "Rz?@o3 GDiLN~ d?q .d5A|_=x=k'cE:?@iz T&~@KpإPbtef(l߉1`_< .,!4.ͭG!̿^/{?a<1˧ORmOJ//1/// M?-xQ=Oa??t??D_hzJMaOe///T9 _____$؀ȡ_Uȭa&o8o@4pdSoeowoooooooh$6HZl~4/@F.!g/y/qGÐ< ÐGDq YD+AAnd,?>?P?d-ƅ: M`Q@Wud Qkd$Qkud(Qkd0Qkd|QkdQkƅd@QkdDQkUdHQkdLQkvdPQkUd,Qgҁz=qBO#AtR|utP#AvTb0iDҁb= HXs 'K0U/!)1 Q"@aQQ j_+.!G'91Tˤ Ķӹj IJB___#AK=B"ЉB(NJ(q )͙Vᤋ@lEa0Ȅil.!DŽlP}?@cuUqhĄlSt}?@L/=A.[WHf?@ ?=в;?@6PD?Pz ;C_7ml@Rҥ'Ul9~%-'辈?sѷjulW}.񸄨U>BFUlwN%œ#B@}h,Kф0B@xn˄>l~pݒ>uKBTH򅿼?@.bQ%nAk"wT?@| hp2·&q :bin@:7+@f?=OL^pLva QaلzA i e4"1DMzA൬ eo|͓U?[yϥ](#|2zGz?!.!Q(dCUp)K`@cwh2Uy<<~!.6gC ?ރBaֶ@_@uW@`` Fl3Cpllpqz` 1&EE @ב /ASRגI&DzpQ&L ēOA # '$+˕A۔ vw߉~!ALċIUv:R╬UsVſ?CT!0f?:8fxa$Q"?K]o_!礐{?@42SUW+=O@as wRdW)//)E,/>/P/b/t!///////ݹ?XP^?g?@" %VqP(^?Hr»Oß`KHOZOlO~OOOOOO#O _)QQ)_5_G_Y_k_}___ VUQS0X_ohb%o7oIo[omoo斺|÷ (ݰoogS5Pbtrr%( :Lt!p DсџbJ YÏՏ .!?6P}?@/uUq00hќ[6|75@>Y5[Zc4l4t5}5> >=睔A֞.6?Ak"F8JY5܈DU;Fe? > 2D@VhzI$ϯ /񵑕Rj|R ?@rg0mV蟠?@فG&p1+6ߏ@ő6;trǟtc/d/v*-?Qc w2W|%b,,WG9kU@Gh=?@0it/\'D'P"A֖1f5Hqȑ >@ BP(?@x${C c觙~`f`n*$6HZ@c~ ' )(B a2)hiU@S䧀-Em)]s?@%$sy~~̏ޏ?pLQG43fo_h,"*Ʃo8ˀ1nE}[=>]F(:n^p߂ߔ7("4FXa]OVGR~%O 2DVhznĠ]D.@Rdvu)/ a/;M_q@hќ?@Vb.ѦN!S/#/5/G/Y/k/}//////// ??Z>?nJ*`og?y??????@?? OO-Ov*IO[O{AQ{OOOOOOOO@Ƅ@1~~ĈGm:_L_^_p________oo$o6oHoZoloioϊBO%ooooo 2DVhz"+q*N-Ìpy<<DVhbbzSu`` uF}lCoorz^0` 2T+;4,Ėn vg$CI 8ܳhU)   ޤfh,K!?.!L^֠P}?@QuʏUqƃTn_ժSt}?@bԐ_Q.[?@E?=sRsV"8V ԢC_7k__Ձ@Rn__Ց9~$-'.?sѷ[~S/BFȃ[4o"@R~ߐߢߴ 7JG@- ;85Aun_k?4P19 5  @@L\r?@(?@#_SZ UU"u` ?F?RuIYC5A2CqVtFQNA Wg}JRPt\QR6?&1UWp~2UWqďxRPZOTd;.|Rw!fSw5fC5A'@1bITMi|3JP5AMCOk薵Vfÿ@iFrؖu?tv" 7&oc %Pj<ϴbP ,>gqBPK=AMHgU1Sy5hxTSaSa;OO@I)ۄ*B+nb-AdAⴄ薳Ta;nv6u\p[%@uLp@NFq(JT@H;,Ŕ ENn5ƺ_;QqׄаrF3s͵NSYF(bp,@pb,&x@p MȬicTaa;/o<ur A֯P 6(Uo7l f!o3oO"b%Om{>_х0ޅ녎55BAA1b&&3AA "NN 27G52H9a:nf;{C<ԨiabMq>u1nuχϙ zqzq̆35A MxS?8~>0[1%54{?@癆|*"I6N鱏7)p8J50?O BſWa[ G ?@냼j$?@t?@L?P{ 8C_7e1e53AG7q5L| ?c81^;v8I5&G;dS8^叁96 {'7lO@l8yo@?QsvY85?:e%Ճ&25!)K9??Xk253?@ ?5!ĤH~N~~P?@+S~0&qMAEn@j64qB?! =q@f? 9JhI//riᲑ"$W3/-I/U-q0}%k1(tqu_90/('(t(6?*#zGz?f !ɧxUHBŦG h0wUjP.rP#?p "am0h0_h0u`` Fgl|`C8l8Az0` [ oߪݎ0q 0 1Tfx 2CIL&]bbGPzo"LUgH՜ EJm5AW$4 |AEG?Y;ooXo~jbHuu?H۟M\}FEgcCJOCtM.M_PO]]k HYMV6LgL^h ! 2DmYkʏ܏ _SZ R?@P PbtΟ /ASүNrQM=a?uWϿ); M=mj?@ C^DMi&0n^.M^PYM5^nV@_ ?ۑߣߵ!I1@RrAr~TQpTU'WU@|‚Pbo CE:r6M%Y2n|s ( S r.eE_@q`k 8=aaEA,Q //2?20[н5U/U@4{?@?癆|*fy-v7)p$/Nvى…_Žlբ}? ?FG/0=FN>%~~vV^5!?Pe?.G >Ec!uOazW?i?{??? ????32OO8AA8ODOVO!tU՛EO¡HЀ&?@(qZ?@TWإᥗO_#_5S?tCO@o');C@?gۥ[-ڦPOH/dv2Wşן:3%E?N/"rτϖϨϺß d4'9YYewhOzOOOO0g=_JH&"H}5_G_Y_k_}_____ 11oCoUhhorav;8B!*Hх B!!ukuaZOG|ɼy}v^~Ԅ~ ()1QR9ve"2mtwˎ|I-2D  af_.aOWr@ 5.NEWij(XgRcibajE£ᴬg}MQPcZM#?8@u|u/'/9/K#y!-?/*U%443fNy/:8bahs2s`kI7O &k~zO.ZC?XOL&`E]Mo_oqoooooo6o ,q#5GYk}0ag V<r{x*Oex!3EWi{ÏՏ ;#U+ewџ+7FXxRxਯ̯ޯ@7)p?@&W#!'$j6HZl~ƿؿ 2DVhe#,.g @.@Rdv8ѣ*<@$$@絟]GxI.@v%7I[m,nXd2-$s!3EWi{De9+=Oas0aǐR?@K>4UvrOxS//)/;/M/_/q////////?I?MdZ?l?~????????O<:*OG=_._@_RRHmVENPjе__R`rbbbbPzm[[Sra{u p`` F`lPCoPomr@z?0` F-2W00QtvbTi r Z9sIUT8l@oKq @ HQ @ %o7oIn96ljo~0[-uoU@!4{?@癆#|*˩e߮Pͧ[Ue0O?@2>.tH 51[A?@냼j$g53?@BJ?P8V ҢC_7UEe3AGѷmeL|O?c̨ھ%;v̨OI&ͧHQ,ǪW빱Pͧ[ooooy.@R;}~ӯRA!O4ɷAI+O^Q24\.@@"?@`Vj?@!R41XXcuku` B?up錒~t *῟t2Ş!! ?wM5ޟࣅWL5SUs50/ԅ7y\%3Uu23_%%.~і9K]o ^Q\dT>մ?I?&AR@XX&Q2yU\şל@Y,QT7,,YWcU/yyQ5:"&G! %Z+/P/Ьq.c//'./=//%//C?/@BQPUTPxHdsj {u">N`va-netwdkNz_ d 3W,2q}@7I鍬 4dםh8lyp"vOOOcؒAK%ER.Mq-qa}kU7cT2 SFUJk_T_@j)ϊ#l#hwF!e3ybPQ YJB>Q)hGk@o%o|a}q< ߘߪ߼ @-z-. p#1.b1 @@>?@p`0 ?@~W'| Lq$℆kڭݺ & ~~# R}=Oas#pu[pyGco  u D/???Y/ /寸/ ) 2\??)4oӯf?ۿ?fXqdm!9ԺUD@sm@s+q@R@vc@AUp @aFPl @sHo XIX OO_ȰH9H bq7oC{Sowogno n1U4-|"K{rJyTq=M_qi2 4p @@4JR?@@T*J'G9K]osԈ~ٛя ` ` ՗);M]e)s3pD[Ÿԟ H.@Rdv,>G/N6,ƿW/꿥///K/ ?GYK}ϡϳϓߥ 1CUgyO O G_Y_?Q2E'bӋb{>P{B(w~M >-@@A)Ues@@ -%?@H] h?sd{ei{xQfa0 Oa ԰E x9҇*<NM2_F9}Q !3fWi{C?U?g?/Ytf/4{/X}eo?/X")?j*:DpEI21nc@?OO1OCOUC\O nOOK줧BG @ 2}Q RjqQ.a_Ceᖃ碡PFo\L&d@@gl@@`4FM?2R7?V_`ؖfmAA  N# [= ?[;QѤHqG P6Tג'RM5Yf}^qR%OVR.֥M۱;ۜE\iƔ>>ÿ@5`5jCQzX.^R `3QQqrb4V!2QmD?CjIT R 1 "qq3.bH115 6*7 QCֲ8a9(q:5 Q;B1\T q%Prѿiaiuy~ۋ xlx7T%gbg.~K8U4>[gu@-?=i{{QQAz{Q'Ym1@so½i/%⵬$g3H_#/%o,xy1,!i388#2zGzt?z HeTf>u1PrJq]GY߇KquG_T9͉ v*7H5eTw<c !>UA2Damǯ٬M^κ?@6ə`_)uպ.@+PPbtο/xoE7*y"ǴQNex$Fϋ*ІmfUWu-UD{QYAiq-9K]oߓߥF6Tp?@p6hŽ@0BTfxL:ajrA&kAU?f8͉4{Q]AQ);M1YyhZ("4F4o;M_U\E5/`g GϷ}pHҏ_[sJ!E//! ///J?n:?j9E|[i`pbe?@vSte}e?P?JWAh=HDQ43f3SDh9Se??91@4I;Onumvf#F p1T$aFq_]#E3 oo/oAoúCV?@:H ?@7WLJ?oe>ooo9K2VG]gMOO U(<?@"q7eDVQew1QH !N`Ih"`JhޯE`Khp"`Lhޕ`Mhڎ! gѮ_PY _/N?50?B7\h%\Ϻ3?72P<83A47<3A3E8)GG>+q?];t?@փ\m#H<% Us ?@ekD  ʐ7XCZ_U@C%Y>@w?@&`c]?@\u{TU U@ pU?@.T}+("4HZl<`r|7#9(G?@9ABB`Za%pl# ܏" 2Z9A(7@8@脝 t?P)Oy޾ F ʔћ~!3 ",?!yZgo"9z^a?@w% US/0'hŸd#///H/l ׂO@Z?l?mѐU8Þ&[ V˖S֯?@%~&Ԇa%5V>jn@O]N"@ɈlKAGOYOAvOOO /O~Sj߲`~V %VN?_Q_:____o#o5oGoYo̕o[ߜ?@>/֝nՊ~jr1N?MzhL@Rq}OOOO WQ"_"_4_xj_|____?ί;?M?48zomϞoooo`\ [mƯ`(* @C&8J\n~_ȿ -?Qcχ~:LώN^v^>ߢp4,w@1$!%)4Kmpq@@Ye?@?@xG+̑Ӛրn!u^{` |3?cu& t ן%-{t?(Gƹ(1!?W1 x!sLNƂ:0U?";^_bz![(N ]oͲlAU")[&}1b&/ ;ˏݏ%/;8' (p V1=)5:?_?>??7m)>?L? O$)??RO?f`!tO+ms%C oudpi+Pe -PcT!Ԛ$(X''2q(P'!~!+ f^j't>_P_,({'//8&!%3NbWXMӮV^o%;B(qPPM JңZ+O^@a4wQ$vљ-mU@b?@ZV@@`0 ?@!R41 rַu a ii( 2ŞB ˺\ ?Bv2LU, -ɚM*(/:/L$T#i'Eb{SC`` F-Pl C%P$Qr6zB` ځb*b\ځՆ?\.?p`Ao(d!t$ݵ/b*(Ebk jd ͹g雴///m/? ?OOOh?jsp??ɼukO&O8D/ϓSuOrۿb^d2P_D$h#յY"[N%Pv_a-c%Pm+Put1ZrqqT'u&qYd㾁__Wx4XdF8fbmUUy _rq W@.I@9P9 '^MTEDF x5 @Q&__ o(oLo^gnoټa] '> )`gak_ti{S|aPo>to+W$6qrp [?@܊ x?@BrHWx؁- \,E" &O+_"3 -'|"]u_ޓɏۏ#5GYk}şן#5OYBůׯ顳; );CUgϯKz_A2_V\m__mļ_ __Oh_(o_P!uɾ&N~=:Lnjtw2kl~ߐߢߴDUg"4Fj||H 0TJ\n,> t3e'()*+,-./0123456789:;<=>?@CDEFGJMNOPQRSTǪҪ֪ڪު  !"U4<`Y,b'@"H$ @ z#=C-,` A;XU-UU4<Y,b'@"H$ @ C-T007"A@xsjTR@rKRL<(L<(E(lRElRU`8?~%@*JR)@ LFDNTyB@ uh(TBԆ U?Y,b'@?"H$ @FxP ocAzF)@#V1W]VB?hT1%vPDT1vPجU5U1( O-D&UA%U f -h"/T))U,   '-P/!(8(B(-$#@/F DU! (,(E(-(U((((U(+-6U?AJE(UH(R(T(e(Ug(y({(0(U3(B(UM(ENPUT]_cUlnr}UUh(t((U((Q(W(U\(`(a(c(Uf(s(v(~(U((((U((((U((((Uk(o(p(r(Uu(w(z((U((((U((((U((((U((((U((((U((((U((N((]((l( "* *+"59:<=>@CILMP5VSUE$%.)U.34YUZ\^`UadefUmopqUstvwUz{*S!+#&/+Kܪ((T(( ((-(((#($(&(>(@(D(Y(Z(_(i(n(q(x(|(}ªͪ278RX[|,,,,,,,,앪짪쯪Ū˪Ъ֪ڪJȝ U U "'*U+,-/U1567U89:;U<=?CUFGIJULOPSUVX[^Udjm8ʪҪ֪ڪު ѴؼUؼؼؼؼUؼؼ ؼ ؼU ؼ ؼ ؼؼUؼؼؼؼUؼؼؼؼUؼؼؼؼUؼؼؼ ؼU!ؼ"ؼ#ؼ$ؼU%ؼ&ؼ'ؼ(ؼU*ؼ+ؼ,ؼ-ؼU.ؼ/ؼ0ؼ1ؼU2ؼ3ؼ4ؼ5ؼU6ؼ7ؼ8ؼ9ؼU:ؼ;ؼ<ؼ=ؼU>ؼ?ؼ@ؼAؼUBؼCؼDؼEؼUFؼGؼIؼJؼUKؼLؼMؼNؼ%OؼPؼagմղT$IFsT Q@>HմJDUYl4Q 7`%hT$ @aEQ/-/?/=UCf"}&PA+e-`#A)U##A.#v`#0U"?@ feArial UncodeMiS6?/?`4 R$fSymbol$67fWingds*7 fECalibr@  ?$fSwimunt(S$fPMingLU(w$fMS PGothicj@ $fDotum"|i0@ S$fESylaen  $fEstrangeloU dsaC 9$fEVrind1a8 Q$fEShrutqi| Q$fEM_angl$fETunga<"@ Q$fGSendya*/ (   R$fERavi"o Q$fGDhenu"*/ (  R$fELath#/ R$fEGautmqi | Q$fECordia NewE R$fEArial"*Cx /@~4 R$fMalgun Gothic |w L$fETimes NwRoan*Ax /@4$fGMS Farsi{{ ( _ R$ fGj ( / R$EB_.B%B=B3B"5BWGB:B7BABP0B1B0B0B+;BL.Bz9B-B2B;BM8BIBJB>BV,BGuideTheDocPage-1"Gestur Fom aSwitch&msvNoAWutCn 7ecCa}loutPC SidePC TopPC FrontPC OutlineButonScren"Handhel Frot Handhe}l SieHandhel Top&Handhel Ou_tLieCo}nectrNet ormal"Periph Out5ln NetworukFonTower pTower }FontTower Sid TowerOutli nPeriph ToPeriph SdPeriph Font PeriphSadowPeopl"Peopl Outi n3D NetFacu 3DNetShadow&3D NetHighltSymbol TpSymbol Frnt"Symbol OutineSymbol} ide(Default Sv]gtye#HasText&ShapeC5ls&ShapeT ySubhapeTy&SolSH"'visLegndSh7ap;visVerion1Row_1!Manufctre !ProductNmb e!PartNumbe*!ProductDes_ripinu!AsetNumbr!SerialNumb"Locatin"Buildng"RoomNetworkamIPAdre sSubnetMask"AdminIterf7ac NumberofPrtsMACdre s$Comuni_tySrng*NetworwkDscWipinPropetis$Lapto cmuerHubDepart5mn HardDi_veSzCPUMemory$OperatingSwysem IsViableSe7rvBelong7sTPC$Datbse rv CloudRow_2Row_3Row_4Row_5Row_6Row_7Row_8(Dynamic onetrTextPoWsiinBridgeu*Aplicaton servEthernt Scale AntiScaleRow_9Row_10Row_1Row_12Row_13Row_14Row_15Row_16Row_17Row_18Row_19Row_20Row_21Row_2Row_23Row_24Row_25Row_26Row_27Row_28Row_29Row_30Row_31Row_32Row_3Row_34Row_35Row_36Row_37Row_38Row_39Row_40Row_41Row_42Row_43Row_4Row_45Row_46Row_47Row_48Row_49Row_50Row_51Row_52Row_53Row_54Row_5Row_56Row_57Row_58Row_59Row_60Row_61Row_62Row_63Row_64Patch }pnel4Fiber _optctansemtAusterTranscedvisSpecIDBlocksATM switchBridge.14.Dynamic onetr15ModemRing etwork VBackground-1RegistraionTilesF msvSDContaierExclud daegr eShowFoterBackground*msvSha_peCtgorisCityServ.29SkinColrMainfrme$msvViioCreat dUrbanAlphabetModuleVerveNone Solid$Background FeNone .26viwsSTXTechniCom-linkFirewa lBox ca}lout(Oblique coUn etrdXdY8Public/prvate Ukysr TerminalangOblique0Oblique con etr.213Web sr vFTP se7rvEmail servType(Ma}ngemnt s7rv (E-Comewrc s vangShpeslopeindexFile s7rvDocumentsignslopebendPosfl_ipDrs00s01s02s03s04s05s06s07z0z4z1z3restBndsflipRouterRouter7.7.Oblique coUn etr92.Oblique coUn etr93.Oblique coUn etr94Print sevLegndvisAntScalevisScale visLegndGUIDUserName"visICo}nectdFlow NrmaDisk torageCostDurationResour cData8Conte wmaagumsrv2Streaming Wedas7rv&Direct}oy s7rv u0Aplicaton serv.57DReal-tim c}omuncossnrv)Router.109Proxy sev0Oblique con etr.124Router.78Router.1370Oblique con etr.150Oblique con etr.152Cloud.1600Oblique con etr.1670Oblique con etr.1680Oblique con etr.1690Oblique con etr.17Sewrv.3Router.95Serv.328Router.34Router.3580Oblique con etr.370Oblique con etr.3740Oblique con etr.3760Oblique con etr.37Serv.50Serv.380#223 ZE3f#jG3f#G35%G3\f#G35(G3@f#G3$f# G3f#$G3 89G3ئ1SG3e#qG3e#G35%G31#G301"G3h5)G3 8,G3X1FG385c&G31#G3h 8G31G3ا1G31$G301&G3X1DG31c"G31#G3e#G3y%G3ب1G31#G35$%G301IG3X1f"G35&G31G385)G3e#G31G38+G3ة1EG3 dE3h5t&G31G3 E301 G3X1$G31 G3 )-G31VG3ت1t!G38G3h8G3 E31G3H8G301G35-&G3X1S$G31wG35&G3H +G31G35%G30i.% E3ث11G31N#G3Hi.q E3k#}G3(5(G3(8G3\l#G38G3\i. E3X5!G3 /E3h ?E3 VOE3P1_E3h1oE31E31E31E35)G301 G3j#G3 ,G3 89G31RE3 8bG31~E3l#G3$j#G3\e#G3@e#G3$e#G3e#G3d# G3d#!G3d#6G3d#KG3\d#`G3@d#uG3$d#G3d#G3c#G3c#G3c#G3c#G3\c#G3@c#G3$c#2G3c#GG3b#\G3b#qG3b#G3b#G3\b#G3@b#G3$b#G3b#G3a#G3a#G3a#.G3a#CG3\a#XG3@a#mG3$a#G3a#G3`#G3`#G3@`#G3\`#G3`#G3$m#G3@j#*G3$%?G3%TG3%iG3%~G3%G3%G3\%G3@%G3$%G3%G3X1G3R:04G3%dG3 8|G31G3%G31G3 8G3 /G3 12E3ج1B"G31d%G301"G381E3U1HG3X1G31 G3 >-G3P1kE3h 8yG3H 8G3( 8G35'G3h1E3 8G3%G31.E31>E31NE35^&G38G38G3%G3r2E38G38G31G35'*G3pi.Q E3i.[ E3Q:e9G3h8G3ح1G3 3G31 G301$G3X1@!G31aE3H5o(G3x5&G3Ts2E3H8G3`s2E3\%G31E31G3, E380$E380(E3 90,E3(80G31JE3 1XE3@%hG3$%G3i. E3j. E3j. E30j. E3Hj. E3j. E3j. E3j. E3j. E3k. E3k.  E3Hk. E38G38E381@E3mNG3cG30G3,90E3890E390E3@40G3՚0G31G3%:G3خ1N"G38pG31$G3`90E3l90E3x90E390E390E390E3U E3 U E35$G301G3X1%"G3P1GE3( 8UG3 8oG3h1E390E3Q:/G3HQ:2G35'G3@ #1G390TE3HV1X@G3iG31G390E390E3> 3G3G3:0,E3:00E3F 4G3h6R3G3h{ 3G3hG3H 3G3  3G3ү : 3G3ٯ m 1G3Hb G3hb G3 G3 :0 E3X G3  G3. 3G3 a 3G3H 3G3X 3G3hc G3  G3T20. A3`202 A3l206 A3x20: A320> A320B A320F A320J A320N A320R A320V A320Z A320^ A320b A330f A  !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~      !"#$%&'()*+,-./01UU U UUUU U!"#$U%&U4<$Y,b'@"H$ @ U%uC-SO& AU4< &}A-H007 "A@Y&RR@&8RL<(L<(E!(REJ4/(R?a#R  g"4FX883(}'M@(F|@(auHֹ2P eBX k P (#vU1`.دl6(P82)vR$CL!xMa!7ݚR'h0z1?b 5P25j lTT? _&*ODU=(H%a!././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/figures/SCH_5009_V00_NUAC-VNC_OpenStack.png0000664000175000017500000023011400000000000025012 0ustar00zuulzuul00000000000000PNG  IHDR9A"iCCPICC ProfileXyTMvX%眓 s%Q"AEPTPAP@1!("dPA=ϙꪚؘI(jBB# qh@(+*\meFK-_xDyY";+ Y#!r-`H X-췃ٷֱA.d)X/?1 ET3O,~}H~Xn@Tx0)8{ ^"GZoKAa&[POs "q~ch?`I&FdeH۶>< v{FYGD>F>B3FF2 Րo`o`"lLvGuDXoqGog\i35m wlaG(G?ܼ}tv8>va$wm3Ã-w3>; _c"IyIƖ;hKnh40:@pa 7#vz D?w%,{B HioXDWs۽A`!hV:Zm5KVcMX=. a\ Lb1( 0Az}1o1 ;2{qo1f`'b=G-Gkw4#向h5#d?sm1ʉD]#oɿ5,&?6pfL4mnA?:RfI " }⣷NXxBd4#pzIᖑV`k>[od>ErY j;`G&ݑnT`#c @h=` ,-pnȬuRA&A8 ΂ nfG)X+` D A,$A2A5y@~P(ҡ\: U@5u74}~`E@ $QJ(- 凊@%2PPP+&Sj5ZL3<83 GV$/Qx^Ecthn8h;:}}}} ݄@Dп1vFcqa0bLӉO,ˈ*"k Mc 2cp8.+]ý}$ "!'s& %K#+&%Kll\\܂ܛ<<y+sOkx^ oħOaw ^ e +S(SQh +!pPMOxCNII)HILMy!O"QhD&&KM/TTTZTnTTT7SSS RPRRߢ^ 9BSKM3Mգ͠=OvӡK@IK/DoDHK~A!(#(h̘xqS6S l̚>9̿XYXXYY޳YEYXXϰvγѳy`{ˎbefOb?þaQcS3. :WW!=Ynn-`S <<<1<<Qhs1XXX={* ĵc/I0JJI4K|t̗-%/,uA4tt7Q/RWɲ-rbr>rg^ɛoPPTTSQWP,SRWT:XܦrC嫪jj^>{/PU#Us{S iTjkizkViNihj]-ݨCGE}]X@7GWVNވ>ey$C|!##/cE&&㦢f(3cf{K!VX+KRIki]6t666+ڶyb]k8:8:J:p|swr^vs)r*:Oh_n7V`;T$ZuiYuk[ӻ{GͧgWͷwOߌ|N@?,6CB `ىBœ¥"b''cN2=R_rdR2/h;q6s^WT4U VǞ=?yBE5UUUագ/u(Բ]F]ث\yKr_k/4^O023‘"d -H򘔃4Ll<$ߩhD4\ʡ:d:IM-A5G|LǚzYXzZYXڈڲQ+SN\J]s%;z$= YF*‌ `PJ$G"xcJbĽHO\Oqp2w 6CjcZ^zd!LY9Ws4m8v=NgL;Pvگ4,Ys*\xyqxVūufלȼyncHtV6Sջ<@=ozQYxסljݑOf2>?KW)C_?vXH^L O韣,E+ן Cߞ|TbC϶_kuݍK@sr -a3YqE Vgh޵xBw-xO^>co@ "kP ^ _xe.lw`d$o{#aʭԚC.zE0YKrss׎}vV޹] TNX-,Q|S%o>*{]>{fewy êVg_:RRKxxu9oܴlHjlh٢|+;v[~`~æs[ɫ}F/^i!ύ./np[ T'R 5; ;:s;XR` PEBN,4p! T8_&Ny ws0ʑ w(ʇ@I%2EEʑ:ob[oz#|`KkHL<OϦS')(ۈjV*%Ԇhii3X0`tgT¬!i)oV|6lN=.j>z`I4XeO>~T&ES{C CGF#bǷ$'U>X˜H=|80k.'Hȱ|kNf,+?zreYU+j]>~UɆ馹։{Ltuzttk<|&\?![~"N\u’ џOV}Z{ްq Zx(j@6(5x ރBRO kP/EE9Q7P`.ϣ&3ľp2=rIZ =%a2HN,⡺ԯhhi>`3bg||%>[;?0G #3ro^)>@R͊DNd&]Qqˏwg\xpȪњwY<@ (` ܐ@V~h 탒R4"GNB`fGg1NgX){(<=&4SF?:KhbhEXYأ9<9m̹xxM ׊<C)(+yZjPU[^~MRJ^'uqumt$mw {LLL%,XIX7 :8ᝯڻxPx{|S  5 {97(d_i\9gRзhdz 2gg}+ʲ Jcjj_I3&yC!%թнNqu{2+/#z#9QM83)=4}n<3 o-xPqOMMk4e^#٬\['[\_>Qdco'?{SzgfV|ee fds 677*777#0wl5m}O[~r6yDiTXtXML:com.adobe.xmp 1081 429 C)[-@IDATx tcy0" !L4QP0U\R8^c I]ozqL׀1鼦u6Ӣm&%&mAI DDA}#Yч-o}o?g% $@$@$@$@$@$@$@$Pά$@$@$@$@$@$@$@$` HHHHHHH&P䨉fd%HHHHHHH(r        9jY        HHHHHHHjEhFVHHHHHHH"       @&       kHHHHHHH&P䨉fd%HHHHHHH(r        9jY        HHHHHHHjEhFVHHHHHHH"       @&       kHHHHHHH&P䨉fd%HHHHHHH"$㩧ƆQ^z /}Wsy睇kHHHHHHjoI RTx' q^+j_|1n\wuQɌL$@uN@{ɐ *3 l8uсJ·GsȑўPqG5n(́p뭷K.)FrLHj?|XʩoX;mG$@F@#< zjjEZ٢UUfw HPˍ;3>^:+=rLc6$@$@u@:hdVH ''OĞ afO+gЎ*,'uE$@$@$@$@$@$PlRlHOEBohll_\-tcz)/g.-+]u~N]IꞀ "[n $+<8>Gx%EbEZ#_̭v~)vKOEymN]g?KOƹ@8 @]PVT& '>GXpk8]e w:]$WPˍV_g⡡!j-SW1      D9*Ur:U덱]YXeZi2V?Q@$@$@$@$@$@$P*~dx'&|!z#k<Zu\wuYP秺d T {R+#[P R׻ۡ_N[ѥlHHHHHHHZZqds3qh)iO} e`$efI$@5@@녬UMUgM7TMEfYIHsD6aE=l3]5[PkBfK꘦}7ΛZshuZ  s裏X1ԟE*l8HsD"GU]U[XNWæ6ڧIuZ"GWti[      أVR]8pݡWkЩ)VA-9J5e*?# Z!rb#c*oK$@N#چvV޾P(۬)g8rN&r*+6 @KޯZQ &zFQbUUɲ:UBi1ʺG- l#@c*娤r,$@$@$@$@$@$@$`G"וKZr;9]%'     $95ԙ[J:n,' @y P(/dnW\qEr;sgIHHHHHHP䨸&tEr{6s= @{٬##H9Wz٣`$PstY8N^I+ @ P(1`9T*tdbN?SYO+W9IrPa'0믿w1 T{b*貭vSSqttttD뮻,e`&$@#`0Iw?U{17 o^~x<˘g @O=ll:[UM6662 T.Ocqq7>VC2}ccc dtt*2 @ȱ-y 7殣NjuG͚GyAsG13T; BHc7|aqM7AŎF<ƾ= @$@$@$@$@E @wZIs9?PKR5_fš&|\LH ?\ wbFau4l{ G1I$@$@$@$@J"\.jnj/6­ޚ0 !@UUgq!XhOn{]-C+:E}w<e"Ș5 @ȱǍ.5G/СNs 7x#5 @PBDO~ɓ'~?qℱOaZ0 @5*zj͡v:ԉn\~8B2q? PS09MEKs2NUQC}QSbPDI$@$@$@$PhQ…ZNd *t&rRhPvG8|0}q IY]NbNm;IHHHH ВBZHWE0GgIGZO_HG_V^jT Y[RGw9qHK@:-%wOT8M 7j:  T#jwm*)vh<:4˩Sm})ԏ4@$PLgfIշF>=fZgԠ+SӱI=$@$@$@$@$P(rTP˨HKAz}17X{O~+?I* X j}zL[IԌy$@$@$@$@$P(rTX멙g?YF1ċ|STh)!!p%9 U8B-R豸6rj˒ @~(rǩԢC`ygP̘8 @Q貱O>dZZzJ>͓2}r$j$B_,j,>L$Pd?OiN)39 J&@[GչRlQ\ f .7p!r ]ҰO\QA#SPR˕@x7 M C{C^+{g$g(r2ӗb7㾛XOW\pI璀-3<Z]x}ito7ĕW^gyƘ*>ii% Kz -=gB!i`߅}ZPks9'k<$oۍg_}Ԗ$0 P0ITЧ()vAG&-~/?}x"hye>7x^^:\boQ"Gk+ax^:bm Qn*OJ[M=^帲KN$P&mwމT#H:g~||:L=n[M3U0E sw vLh.mGM@Ea5BZfy5ef~_җ;;D91pxaqlVΊca <'%mb`XX,4fl:/oPy/XmF]6W Fej: @$PhɱG`sUAE] A-8r@TQ+081xVlX>%BNNO)muoo>gSGwqGՁԺ%?Z' g鰩NWh8P` 6nt21&4:/SVHaI| ER ["Vw:pp&5|ޔ8%8i2̩(fR`(#uE3}ObZU/gSsbҪȱGdPסB( d'!͕t-P.B_ŵqcnφ q|?`,9GկjJC+P|PB1WEσl 2|r8jhՀF\2%A]ilü̉P] 0,@zii"gKXOC#Ač8-t3O2X0>֕(VYcIkc2M"r\EYUW\V7X´L1ە`ccXX;´4,A8[szr!,di=crI ͺ;?ܺ'InX '?3([ .i`-m,=X [;lXqNaE\ÈGFYam}#:!W؊y]1Uou`H?dZRmyS*LJK&3 x T($cW_~ *蔚B\h>`HKic‘N; M$"gGeǢtnθJWe /tC:"X_?orqŗ-b2+ĠLO)ćO%se[8e{f/b"k 6lt),l{wO5y?gߘƸ *(t71'1PKh,'N`~4 LIAs%/rSڈfxM ͡w8N,ϣ-Ϙ,yG#k5U])7ka,4bq/#CI3-d}u#[͈*BĐ7*iXAsT~>kf^Hx%ыO0"xMl;~5;@OЖ{FUϛdb$PRg4u&Ft򗶳/j͡NwP+Ԣ=չa1nةwӅרx4.4ށCGΣݧ1Iјhy]iC}lO8YE_ې'J-鐎(4t0\.פc,i% &g `}Q7JD+$R7_l_:s;릣D>EלVspv1tH'^C`x~eэ zYG1r7D,d};KY(8&{xBn[QȺm;#ÊttȭJ2^^ahN[Dža 1׌:vʲ~tHM'*jv@ib jͦύ|Nr~{jo_NLUw2Cpvn!,4NLS~nmF SO]n:b-i /rZ@$7:lzӸmF66o9U̜eD}/aw#)o/K)T 1뀻ŇO;&&ˮʿUL}f?<\6|g\pHv7urd`@b=2})/"a`UbUj퉬0Pv CXV~4=Nc"nx 2gRhXl":(OkiPg/'E[W&ou57bϼ6)ג׋aO361#^@3vҲ&  o41)W %}>5`enNšEm/\X_lcid$,cS~ru/e1+l?u^x!gT>or"bS9ʄSoڱXqvVug>@!2!.I6vo̴Ci|W_m-G<2(6!JÈ%GRfdDiChZIh^L:fIDKI'$aD%^1eA,/JH:DpK"cNq':9!cYF"}2'n8q0~GzZOuB%L؀8G(sޝ;⣔Qllx1:g$統f HDW0!#w񮨞ԼY[hn"pdİSz+s+ٌ"(z`;e0"eIF%GUƤ-61u6k(/ҹXύM?%]QDwxO] /޶ "}*{供^f.Qo)2- -§G}cg]~+dʶlۑ~Q+2mF844͡~oX݉udsŧUѸLL ]ll+xYUbI fiqDzX;Ot'K >yvrg[;sqB"fVnǬ<^/|-.%#4%STV%i{Y*1ɧ/lS@ H+%#ϭcV*akܓZǷOXB2-6)$ʒa+c+Z,TK/EupffkױUx|M~c%EK Y:|o%/>ԗN$21O('e]"tYG,;I '9.B!!3+B9ruLX;j)BJY>0-~׻ޕyox稝v_}U&w'GӆS{V#)1ԎCMMS¶Ѷe-QCxk{")nŨZT:4ҍKܰ/hU7Q"ct,=>hXjh:?-|/椏, >(1g%@˫A"ge)cTU{Tʯ,LL,_,cRCT#/-0̞2]f߾nX 䄛08t3ӟ<ԂB|^,*u4|u%ٱ#/d'Faz_etUF׼d0;'2.Ħ ó41ƿejG53M-_ViI/#]]At=C-$lor-})Oq:oy'`o~ bY ŅTZ=ҙSiS ?~bt~FDeTTP{{8YL %Ǚg#V[g891Әr'Nt CPLF27~5uayMezaܬ`ˈ/VÒ`j<9`]YUqhf黇O"/sF֪}u D1 \i4Pxl9R㎸ɜtÉ imPr_VFVqD9ћ5h]<<(eO\4Vm $v4JVz8&,ק:Aߺ=uzIKUihulڸ)ykF3|sH Y::R3lAE&ba,L :PC.CGRBY\jzj_Ժ[<խGR)fލ2-D>DhI&3Lto{FC:(M#>1s5g]7Q"q(ɪ43XJLW:hoRF{:<fìY{W#q&V׏t cKwbx+,GGUk$15n%Qløcpꨔ:֓5&9%{ ts'8ST-ϋnj).g4[޿vBGۻ>hG< q%mˠdm9z ]NI#Ađǒ6;-۴9^DNK!\g[egWn}!S]7ғf s3A3<|f8VRgl[sI_e,g/m_6 /rgsϑI)x(@)'>)\вnf:ߩuM*X1*_xui-}YdW '*jKuZ9:vrsza.rݲ:= ~8i*rk.=cޜwlEߐ^^{κٲ\+6#vvϑ{ϨV._*Ϯ|u/YN*d84NK zrȖP\a||0i;u%l$z*fˠq`9hY7ro2aQ6lZU9:eo/#ZW T J8.?l'*VGPN_d|ڣL}XYUq[3znұbfJVq/;l]Y)*&phh 8=X~X D.   Qd*,KBC&8n|`P貶*؅\yU_㋗Y$;ڹZNFsa pT2WWD)*l, ?ɜ EFKX e͡766eq7瑊F PXo (UMk1wU Z+ts'F,7_)>8*T4zurrtY9?a-Wu#(Zي9 P9mgGd\7Bˎ<}KZ .vku בD+ Zg֫:'?,hKI3d'"1Tۿ%! ,tJyzyH Yz(׍Jj͡I(ԊBmڊb_/^zm]}0@%PN3H*N]G~!   29`8.₦dK-@YQ₮ nw^VK믿;#:*ϑ5qYg+*J@[ ʬ ^hvN)vNgr "]-TP삊 :UDfAS+3_QZ@_H*'s*`~g1PT $@$P;yGyX^9РDU!GZ|se bw~Ygs@7qYJ'"!nSVC=c.8= /bXPk/@äߧvNN0;˙ekN؍/|HHp*^l\{v"G^8aA[8]eŎrMU)hvw2"$Ur9]&؝$P  8R˫JRlGyfz*#h(p$    de){PB9ԊXSfRsMH@G02DE4p*z |:BAgFTpEС" *pɪZp$%   t9vB/DɕB092Ŷ0rdS篗ypI 7~rIj>.tȑiѤs`s9Raȑ#IDˤvje *vu]M6IHHHjE]6i6Sr](ۈnֹwyg$Ješgq4" ͮVFf}ˌk~W1C30UΫ>f$    j$@c͒#f≾esiʊC֗1rRu(r$Y jBUWrY|U8svj>aUB'4B$@$@$@$P(rq^vqf9Jia? }꨸?#!8J.&V&*-;v#pht5b-뜊D]IM8RpHHHHPe+g{ٿ.S~z.k]5=ꔕ3~d˨i~mnx%Yg__̈ǯH ..R[ |YQl!GHHHHV P(af"+;k]O@1Qjee=b.z!ry8/ $pg^ _bx,Uidd"JfP5OWʱӸE|U' s9blʤ>Gt:ǧ @#HΚC-G(ߑ_7O]mWi3S8pe;z-qA~9~pjAw2tZRоL \LCKlA4:\f\8swML[w.' [<16R9sDc@50 dȑI gZsˊP]yR5CH~!` $s eb3Xf :2;;%ȡ{/#8ULVMEbX93 (1 QjHrQg>Q 7u` ˊcpK\YGq o a5@9di;N"ckhG!Ah]ӽX> D،Oaia3 g/W kCNG'{K.u*t`sRT=굺-9ߞvI4z?U S*Mn{jNڛvf$@{CMUGOu{L9RJa~{S(rtj[UmKn橝GPi=Zs[&d IK ߍEsqŗׯ*iۡvBH,ZFAxu}}Ch\jA- @ObTͶY49:Ѕ)^ <Bh [f*afՑ QHPZζdZfq/N)JJVe}7VYziX`@$@@@ڗee $PGGm^u *g^DEUNb 'zN"{5/}zhŢGt.-X"XF"H>:vo}]s ΢76X?fZݲ?fĆ@8\ ܘYBh .H\O0ĕxy DA!^ 3jcBI>c /yXv:ܡq|"yVrcsu mK>,@-C .aS&"^.ïIdm nh zz4@^GRvNtQ`rYKtJZ\!Hv6 ~چ2^_ !55 :Bzp\ƶU<}S +PQɓƒřqU 8pޑ)p1kWBP+bre+|V1 Z"}J8t 2{B1SKbM"5ꋋȡ77u(S̺K3?S3~/yW} /;]t``bYٯctx bvG9\ˍqUKYc \4w I,FĢ^L (!݊l ?cXy/ ShL{rG@IDAT!5^sөDX5sg 0oՁљZQENƧSORvDКIZ]FJC$Cn+E|Zo+y__Q(Z:174q>u%rYUP+(*_C.Z}zH*@wk1bjE"k)+ī;uhXAHIW%r?п|=j25ޗn6(|Ek+N4S8yp(!&pc}3`L0&WޔS7  unh!vq)8'È d."F hijNU"RF17)jGaףIю^%b:>DV)>H\ GQ2SxҐmE,r{JL`sM(D 5WTc韝X:E&ڈL"9*'M2(3a$xJn;VUKcŖHHsDc99*ʭP(BT]_u>[)nEBIXP<.|ZSxoՊCŸBx "G\L0"˲^jh_@[`!A8eW`_^3g~)846]Cډ);ôHHHHHP(b4{ zTB-?GG}Z0*lG>|qXd׮L#1 dnp?f *r-!<$@$@$@$@$@$@K"GF_LU|(WA7t[]5Fiő-O圯8tCC!SNƾ}⢋v#=aJzL~#G-ƤSmY^ #Pk]t,s1~X PȟU^1Ւn3VSuJ7u`Z ABߒLi8O,nȱł[$@ /_3*[էit$@$RKV $Pi(rEtYԍ SSRTQNR܋bFwc/<+д2R @Qi+:4 w&RtJ.{k *pS AǪ+R aLto{sev Vi0-*HHG܃v5ӁbX)WsR!%dKABUqԡvG~Im@/enZj9ZldTf$@$PN*[eJ`saIjJx?\RMQWtZ zQKRZ +փH: CΚlMů6S$s X\\s ũ>G9[TJ-QUEW;pL9&oKS{J$POԢC+-bMKɕOƒ D]A&ƙH3DJ}W_H> j:5LwP(#{⧣˙Çq7$@$@{ HQw;ATHH`O E0,{te pfqHt$Ky QKa %I@GGAEk8" [y 7x#rg^%GAY}ђ*MvJ @~OVRzy_ϯ]H,sD3te_Y)rT@|+|z˨Nҹu:s*`HHHHHH "@ \l:(RCri:x1|{\Y 鹪jZ $@$@$@$@$@$@$PΪBr̈UHͥ> Dԧ&zc*$I$@x ;cuYKqtUM@O~؉tBۣƻܣ0۽&@c[ anE ZhQHH P_~9>qL,YkV^,.(V+-u#)} k~3myvXiCzmE"G.BrY-8 (q%0C [CLro9ntڌȹX-^8dJsX\C$}uP/V+A_'w +ssXXjR7"@Ê LUJ80<,vb+N>]_D?[t\]ê$/V :KAdNIbigPőZZJ>،.:yI'>r7 Oű`'‚OvTp9Ңvf4А0wg|l%_\.I1&ԴMaA?]z<0%'.(3%YArn-.^yPy4Pg{p},2'1=?GpHUI`pHec2 (ȝB10!ц1cSj8t4{=5vO o"wbs-Vc݃ݯklcb_n`fy ShrM'g1{F0`~p#q=H,myvIn@z]D1$}+k :Gs=!xĴHH84gŵ^[b!00zœҋaP30VpINe.2\tEVH7M\z饸 KXOy|Z  -yE1g@sBOSEgKĔ̂*q\);Yx{kg%G58K$@$st?Ogw@sssx=cŒIH9c꛲#SjbQwM @mQZ @QwM @mQZ @QwM @mQZ @QwM @mQZ @QwM @mQZ @QwM @mQZ @QwM @mQZ @QwM @mQZ @QwM @mQZ @QwM @mQZ S*YRHHHHHH"677vٹa; @K@)$@$@$@$@$@$@C''> \r%hii~3N&9ꨱYU      -wggBcll ñ[UCUXP      \|Myf|Ԙw]ttt} k)xgOZ%@ @,,,_*.b| 7ܰ-Æ199s=?0gש*W^yeNd[ܱ[{^<HHHHHHrW\|3~*`ßɟॗ^~icȑ#IC_4|szВzڊ%%      V*V\uUG???W_ ]-qIDQ|4ooRi,|#p:i:P䨎vb)IHHHHH,]zd/m)˿ t77 @:ډ$  R83. Wsk i_ho^U382Gj馛/guKKk=P1~>9Xb   *"s vS@$P^y睇wxsSˇj/CL-1,4>'č7޸ߍ-\rɶQ}(rT_$@$@$@UF@=يǃ>hh]٢ @ mo矏 .)3RWW~K_>lo~J,syH^ 3 @NoVAr&%'SO333OO݂a'%d+XB   :#pNtIC` (\\^{5曆]2V-:9V~W~Wre"@ڋ%  q*ja5z$P9(r-u;;yg*X 4x7~69R}WWPG G|;ZWގwrz#܄ިvN@o\s 9眝'3IHH A@m̠+Quzo|FNU$$PZEy^99D`k{›17l?V=H1YS\C_t '󛖗5J8 `E;uo#s"7SNǰ) ;FU%rc].WݛbvwwM5͐"G 6.D$@{@`AEӂC/,,@GHC@_|XAd]F6plleV9btt9|Kx]Cpr\*HDpgK/6ؑpRq|Wn߾}е͎@ū7jY  ̨`QHS[58> ND2և`}tKۀ;tD{#CXwOXmp7\scf ]=&LGtQu4opG0pt~« cПbH90lbѲdzAIbpvMU{Zz0:{M +>ԆYL fb流R~o} x; /~9NͰ_|]aU%rd7VbHHHjǑ+5QiV%G,!/n >G|MD0+hB TRm៘Ǣk:W (G61܁e j!rb gl7YCZNtcx4!f f&:zX^aNޙhmbc3}I,Lیh `40!Q,4sCG|n@ǤyhaW$5xK#K=1(ShP+g}֘rEV8_i(z O0ά% @HY$@T"Q5ā2qRm[Q$-|j"$om!؎aec덣 &t߸ _jš9.rNU_e̪'P#9J|v"&\bëR\ob60҇Kf˅3>8UJמUC$@$@#*cɔH즫ŨΙQCB$SXT|$İZX3>S03B>>|5sN> C,V99䇵BYtd8q<[޷6/-f Q;汭̥-l2uCv\dB   *, d!j́nHW[eؘV@zښ]DAX :ht}m͉. ̆a)wLY|ačF<~q7vwYB$$%DlB,.u4q1ǀt`<:nc$/MLq$!i0QYd5 Ym+_]߫[d9;^]{ݫs<&?f>D c-8}! Ac9}j˴3%‡ 0[PG;~G;z)oEZ)Jrp %?߯,9;<ӟDrn&kXB &I\p)9 ;Ձ1L\lQCfOHcj ;Yn:ܖ`':MGNx| މw@Kґ#GrI Actr:ꫯG<5זuGeS"O>z[ߚ7[.σ1`?,;Ls|uF/ʟҗ\@#^D.EKm*S)L6&QpO<^>+{`swA%BmK3y?H<t tˍtezxzN|ƪݣl2L"y) YpQI0'{2⥜X=eǣ^CS,r(8)&x(F4ty X{KI= ј7;!_"w2Jqb[7k\&gdZzɿ" J$J}mʔ.?]LQ zc\F6+!db6^eرA,pBM9<|!_JQ\wC@n:xy^ 6@-@v'C΂K!`c7W~8iyy>XIK!PA }?.p\p' ͇BK\:* ~(\"G fZ%I-e#x3.0E΄HOHFRD%O9A'`Èuu9IH `8 \&O>NM$ O6x'淚cAvL!d1hiKBHo=DEmy-4 Qx(^{׳ %%qSg^uw=+L1ai*~j0r\iS)xtOKIB'*) !P6gϞӧO_6;MBi˳HW[YMIF'' ! j@$ 1Xk@Gq'+}꺃t(=4]7SkM! (H f|"s2j*eKL" tPzUn /R9B>4MA)N3=4ʳT9gIb4ΫZan@C j'| Qcf^j(AN#ArRz}aL4N模h>D6Tϟ ap ap^NƱ8Lms8ΝhrĖ=`X _tF<jhLgVN`sDHR[-0MLO&&8Fs!XTay!} g .S0VNS?<2<}C4Y2 ](8:^{7uu7.^I4]4>p&O-INh}: ם7}sƚ] FH p_-jvB1cB@!P|#t%G< aJ>:߷v7_m%s{'xb][Qk|h{'N~ڭn]{:{vRYoN>'Nm|/=k:ډ_r'<@jmt[|c}kk63믿㏛ɵ:v~gU[ɻP|}'Kow+ǽuSC__;)ӧ}}>G>{j(F|l{}Ľ|\k=vأk/>SG <{*}Osv]>5H'Z;qvޛ>K\SO#Ck:v>=U'7⚇kr>gO=q]kVZ`ABK49L86CI~ 435E3˫XΥfg^ՎlJ }V5EN*VaӐ0)s l ?LP7%(` L17v e )iE>v&dmܜAvnpr_/'X!.(5zd(R㼐7h:\{L9ૣla U/g'QZ?xzONi 'f:qgĔe رF6ٜE̩")'Ly)AlK M*2G'uuIo\Gw†vP')2Mʩl`A;/]+}ݲؑ)i}0^˅[ilb^b斄s<0M!oZjE}Wz406Ac(£!7OO4['o} 4y qa;L:p=54Sc(+l^ B@! v!?yz衇㻰Eq t{$3 y$ A9@tZ_[&<8~線Ύ#Б%Iqt=/7^+lwho 9ˮ1*Z=óa:= 'e|aÙʹ=Ī tW8Kqb̩hƊ{*IxΖ?8fiX+a#t"kO!Piz2)~=SRS8^9=S )r<[H\Y>=v`N^ʗmaҲ%# p5+ҿs|4ϘЀ,U^$_Yt %ևX&'PIdB@j#Ga{ᅴqv|P8љ3yժT(Q?Mٟ+aP_A{0̹t-edͥ;tl"Mnø)4>MݴItl'r* ]gkj^2ER;Ui-3uL'[9bOqPڲMo3/ɋFh~NÇؙ)vOZݜ(M/uߑ1p71ar=]8NR&΋l}$w޸ӌl1uF&μ58v>8+Ewưz}*hvwE}!aB~Ej髮&v-fZXWepSfF=Yr`37e'gF9yqohbqܫԜu)/ Sc3>J4G?h0O/'WF4I?f%>u3iɇu_<]2QfFJ6<=MtRׅtTz cm6=a@IDATv4gv;sN=y;]n!&?'WmԌ<>^#bv]Ɠ]40=X@C>n?}r$ [ P@9H7XH!qS`ZN|1Sn4ZiqS&rE ߠn9A#\mIWviRsI;>:op0ǣd뤕DdҖϳmSF3tH;-u B4|c,n &![c*/}r7?C[;.uW6Q$Pa-ZJ啽B@! DN8=>6 )Tquc4b - +W'/-23ʼe\16kTknilAApd:x~ ǩsx<yUA^C*άt_ fVG__K_|Gb6-f'){@ x5e_n|g (-dt6@rea\L}c]Ҏer EPjg~\zXr{yNz JHؿoռ}C#K !PgϞ@N+??TO vwR d r`&7H%;ࡣkazTr#/Е:+7]S}uu9cV*pgG:..s03=sXVpIVزGʌc=9eڑ17=f! lHG-+;:ATy2 UQ%T|a3 O-bs<ȆzNS"vj x)Ş#!MD#xfA+͉tru9iufK`P?bjPB;1H3Vgă](.QnZsiF,uO ќwn L_D;ӱ  /%0=41K3y̔&|s|#8.pR7?W6!LO )96.L#_~\֡N.|hӂ-NMX $Me_mRr9Bl̉h+|Cᥡ%ַO5C{8N<(ԁ}e뛩k+nHMrmwu5>ӊؿxWTe;pm? ?靤yaD/DH%pPDwGzY\~ 1"YvhGIv}HHCҙr«R8NJ} x~EˇL$.H<A켔_sOP E"f~WZНs<*Ezyy}K-OΛ Ol{4!*1vӼ}xB4>IC&W?7a燏BqӘ&6Lcҧ-B&zuy2]B5QW-4/MI}@7̗G)ז]:Is=P3}hzi7xE-VŊZ '^9 / xy睷yop;8Ki)5Az!y姣ȗ|&W0]L0csYȖɜd^ux(豱O BMD}RY^SD\tJynZ%}Yu.RBk5U/ⓣ*Ù$}no.t}+?; gO|;쓉`zoA ]BF>9u$‹L\qjQoJ1CȄ61^%WFѻH[z[E'GS.YxL4ddvY}@Hl1_wʗ7A,'e4kc..~I{@j} rm!S\r=t]AP_/>< Jbk*Oe5VV^ҽVb{TtQ‚`?S0KR:쿡BẠ6w-,/deu(Tj6LΒ]B`O CUr;Nr)˕/`k碋.Z"r(%j4^t>=9RgN!Ut$-P\D9BCM[}1J+J(rY)ڎI,~ 4+ vNWQ*I :c;䒬W< U^.^q>oy[t++Q4tq.Xl+OW+T^x::fžW2ӗHnU4^ʹP+@mP|lnU!RTa\]%rluʖiIB@! BF`C8=+GwO%@,9 ?Ξv;muKd[:(bhhz>Aaxnޏqf؉(|Z-wД2 B/XIcPRɵHP fCruliqg-P? 91OM؆r{SajB^X 9?cz- خ|&Wqk0BnNyڇh$SNbuں#~݂ 3fq)H]mQ:A^2Sڇ)KwX% sZ<̲Yhunqx ^rqhH/wFzȵCu\嵏/OTR0>uhj MRߥNЖ|yM!^PWv^|? f%'X_mB\$M! B@Z'zvZ?9u|,p<|eBjpֵi8CB~A+ZXZ⁻z:¼$<,^Vg`ѕME/ iәh29 0MqC%P?tS#dEK !|}4ANi:ʹ "NC4;?ϫ W+t%j9rj9M:ad8C0?M8%pY0U> i(Q{xxfEY 4---P>J}aa#<ͳ Tcvrl0.9i>]N;=no唛|+41U s'fn}䛘ɁF>wN`o29> })HmYX[sq+rX]:pDŅTVî+Z^B@! ;N?ca |mcj Z0[KNggꅸkf#]e1XFٗDld">=|< tSxZqzyT>H6 G`#oOѕe 9ƒEC-$z3tvG,xp1؊^r b^Fr|AX {hx9FR;ItRɂ:qV3R,\D_Zx -N7Š~%FR|!rL+ƫ::C Gŗɢ,싋jfMC|'8&5:R{;r8قڜ~zL:|AM=dZX> .X} 륀M\`Z`YEOgX$u[Qx4ƳY tR@ba'?k]ˋc!q,iQ}:eQLV1wS]#r*L{ķ"e}׿5立)CdFŔz! Y˯>=u^x.J?;SY>}7*c;|e R҆ņvZy0^ WWSTjle<xwqa 3V .44;K>@Ux}3M2yӘv(Z0&Z>cc"/:JGyC 1r0VGqu)3E G7yQV'(PTv0\ʢ~lLj؞FU2a,&{[s|6^N>23H'yaQ8֓В&}T+{7ԁ:{F큱Fݽ ?!}<jqf[SS |ى8']#rqkUCR>~8?OXX ྫy)B@l+[}οK+37Oj9b<:~Wo: YpSbVVȼ lt"5zFydlMĊ17S,b#35݇BT`ڽ4P2y[V&pbDr]>J-Sxe318'B'uo.KPE}UT~$Fif%H^-,FL7Hru9iu=X󡫔lNlqbՉ/ ttVZGkb/ -FK 6rqhg*4<8iU x SJ#MvDXӥչ::!hm ,UltDl<+(5r$G#len%7z# E hG{ygE ! B@Kc7\AL0;fSGs&O12Fn~}b]09BoH/%h5/cwU֕NKz:}ܠa"7~&<iOBY g(ؗCi<~RftYM<b kŒ.&{XPi66g& j >x 2smUUj#sfffhͨ/e!:2;p!m_m{qe] jD0BcPS X>ߘy~I9T ya0 ~r9)z2'ʍHf_Nfq/=(ω/i2渹E6GmjIYe@Yh}zCfl'(|p X#֧UE\"B@! %3t Ps=2|.aUeM;\[sgN&(cǼ5f|7/ʊgN$ݝ4YJZxUfcP۪iq1鏗m)ck?mUVCaʘo *t%f{ /ϴsjŸڕO! B@!+y w+ٯD}*rDKKX94-qai܀Y1;%8ܐ4tx^%%!PY8TvD"x#iIS! *L@ _^!PCrV@-A! B@!  !7B@! B@!PD䨉(B@! B@! D{@! B@!  "reJ! B@E̙3RZ! v9veB ! B@"0<<\[!P\؁B?'O?Qwx++k8PT%! B@J@g|:TɢHB@9wީQ˥ꪫj2fu^}Uzg~&SZ !g vmݶ%333 7@]wݖҒI?Jc@%//Tυdo|CpijILE_W/G*jR6s??/BsLW]F{ěL-u%h_KD/'''U{fɅh&2ZbMƶ}??<555 7|!~o6|Y놱&f@9 5L| V4::JO="Z /\U'^6y-b 4mmm9YQ`!~Ⱦ&PU?f$z|-oy;\YO ȡg?JtLQFghese:L)I2B@b;zCĜexi b2%F'i@3 ecט[/B@fyvfhC{)5~jmfw=c"/Z50{>`Rȡ]G-m'5e-O}m"#I@D4$!G>xTVpcRI!|ɝL)zktaMS$ͩ^2-&$wu@Ya%(㹄Svn81X4ªMozK>+5׫ާR):qD8;ޑ~B{OX2}gl~A[ &x ~f"x"MYa̬³ 3AXL>JM@D YhQ,+ 4؆*%)eCGGCQjjQ-UJ<#̦&}S_N,3Hmǘ-Ul1yat:XG&=裏D]}ՄUf)gi!r }iX@l6l ?x[aW(Of L JY\!`E@lK dkM=z4+phVzWtq K_!4Lh,A+A! j:}hAsfܯgE!QBжޔ*.f UJAR 1+qBmRa{]6mtE\XCt(WT`d&rhjUIG# "@)(h+vL CoVT B@!P[:xY(\3yfNjySf%oƔ",V$! 80@F`8)߰60X.@{ O0\1VH M@DQt0 rwww BBj} ?V(=9.Oyɘg/Ӛê̗H`7V#L1r}O*m[*&ҁrIřcWcrSiUMUZ Vڛ}~0pwҗ/ bxVm3rCB90>*4‹b+bM!DŽ@)'GDXY \yTH40y4f谢660׃ i/"ɇB@"0śJxƪ-x~3 lTue-%on_-_:r9m 0?2 |`?>aoht YI#,ҵg +3a|PVu/l*#CK3݅|q''5g܏3DyVeAg`4XZ%B@!{ ]79 xN:EW^_lxLn-tn)w;MLȞbcp2T^as9e6ҡMr)yS;P/&&ɹżmMLԑD}d[QTJ)6շ"fw(ʟ/-]0Xkm >1aiiI a6C J۴]D^B";|2 r ͨȾ$ "EuJ.k U@w6C~1ttN1\B@Gm:q6B~vVSU0XRs Ub4Xj1b$Lá:Z6+s4 ?:rZ'ȟZ471}; [k Rh# 8V, v՘f"7sVbCY`8ƪD9%ϽA@|ruTGX]Ƭq?.+A! j~<xޙyG``dFKFq,ј8L?/M.45Fi*^1Xa#04I SBs+b,9JcV#bz:œnX蜘OOqQa``rCu * / i~Ӽ@!(c(|6$#3,p8yg`8ǦOQ*/'P$|yEqYh48M}b)Ly^)r')T84S&̧$ܺeOQ",9M!pyWcB4Q? n. 4S_˨Lc!+LD(,sY[4FQ)T8̍hwpS6jUP`z4:V׍8B|9:6އN 4[ Z;x/&"LM*y*LA^(Ñy؂@l'8t=3 {MlOa."MMͨw{=Y R;U QupQ,~i~v%N:zU%V|4A34~^ODH. 3z~c(4 u F._9X%MǵwtRIWuu k8_V#UZ!?u7zf5ĴW2buRkGG*pz(iO __h+3V¾Pirv1ecQgJ|ԊՄY#sl"3OaPq-?-3~ X'iuu5aPū2ṂhjϠJVX'X[ *N"ruL >C9F4gpTJwxB@b?Vf:׿.]Y b ]ѣG7%Sb7tSSVγ ??LWWq}J 蒊x}h`wɗ9n36ƍ:?!`Qߒ-XK]-tdҀ>e59 ߵ{Y4X1Fcyy}HaɃlPuCٟ9_#֤~;Ut' ze tu ٨j9ZzLt*]w )ige9U}9=\\]A3GXIJ2n$Dҗ[|e$X[U7sʤ+kl=xS<j>1Xbץ0Q~ytҋ:0F֦ "_gMh013a,9$AΑ}B@JR_WkF7|3˿KU҄,uy3,bf>q肹Vk48Dgs=䓦"~x2Sت`p"F!:xMlQ,3,i6' B;[]dOXm}l/A1?؜H?yXdptE&&#QZOY!4-s<íVr׳˻D"ؓ82wTX{ 2O:qz8k-3R /҄KDn1|_.} F8WۼcWfe}Y~PgF;),O%x{$):`ː^jd To^j][V)+Q/Fc^ţiPW%ܛu(UKRm/Юb BϞBjFMHGctjuǡRi+Zy1Kv:}@,=l'4Rn 5^S;]nB؂K퀣jOPW~8zӦP$xI3[O~zuN@ ;3KT+?,?Po<'m\/8@@@1qe-:m@gP*J__߆9Տ kN㘾a'9Vb#GUF}{yo3b-H݅f M>]ZZ9?Tzc}Sw.Z0\<|o4+4:zX&t;Bۿ5Vj^ޗbqc8)tb0c>1eA<1q~yӗ'߶EZuL453N˲nc/X (Yywα:`vs1P! J%OOpQe r??Woz?S8ymE*us oՖS7m,AXϳ)1Klzꤜ_(Y=YgEZuV4U-/[)yX0ȧ`ZKO27\r+LSecG `,a%X Ywwc\k +ABXOxBl7Wŗ}{vӵG1L;҃h4S_ё}B@lK/o=ƀ`!~ӟh4C ;>>N]073L0Ee7Xpx(A! G˻ == t[yE?߶fa/ "CЬ7D:͂qsdB`+oJ*ֽKGG +duww+x?:LV(Jy7nVV}{~l ! fjInʏO4D)/ DrM@|rÔ` C=K+:8Ǭë)ȧOG?Zk}襗^9VDٿQ`rzEUK(U 烕x3K4VKbN;34*w! @9``/y cǎ&k@:::=yy}U'TvNkx!(NY9ʃN/VA;`[e/uF\눢u>a c0RB@ \p7զa&$mk hW")g]XIDt3F{6B܆(r<56$3ꥴV>ȑnjn/~jD;,h/aѸ`S_B+|Zxh9ڟgys#ϧnF~ Ev'ru*+֪NYgKUء#? X'XaܬPFoZ&8+ _&ˉ-DhHjXN+ G՝a5NƵyfe~-/3dqz\TO>9^~eJ$5f0 QkVWWk5YkFg:R8sZ VSUyxiV@0Dr 9xno+A:8x3 Ť/}9&؍4Kc Yr㹥~Ǹ>YqSB~/Ҁn&r9Q,2KCM"r;:P'1؁;ICcw% HveγHe>L:ߘR;>M>pBϫϐB`Whp8T LӧꫯVoe t);oG:Hi'Fj'X"p3o oݐ>?o3p8H'",C'׬]8`DArXNܰe:CX2};ax9£1[칚մ}/ t<# U i` ׷.]F{ϡF$oO2jOׂ z/r>ioILMM>OԼ)(k&p/׾82:ϴ>w+욉#vV/ ! IʊC3/KL΀狙X"rl'ݝ&O@ ?*D4 ~(tLvi*h`&  ("pQ%p]p] y2>x 5dѶw2F+Q"׵dONb<~/٭#<]Y&\[IdC.)ۆdwLK˔poE~w~'[_W466ٮZް+b݆!̂B91U@^xVtK4 0ʊ1`e4Es2d|>574(U2H=-Qhdv圊/SORpK"e)Rq%,c8>JS#X<ſ%gtRY춣L:{9Pv D':0_ײyGKVZ>,uv;:V wYMX̔YS‰[gl@} *'?fAMP͌0P0WlHݾU*Ĵ MMM$`XZ}GҤ[5,4$"2|dX'{q(D&Et32las]LKL- 'g$jeGk?"KMGmO{(fdIzSn_tiHjeIrG_-lթP餔8$dy_ʘj'e(3=>UWZQ׺r_lQ3cI'-Ms="a>@ʐdİJWE'98,KA-۹;-Kr2~Y }-Kw?ۺEe.1ˍªD,n3ᑞg$kdxnCV2;.-S'V-K_|72wUo3 D9 bLo핇>3 m 64G2C\3XAx< \Bצ,<% Z#Q`^̳~pZ6_dD[_L1n}XYl&E^R=OKJU4|h3|1ԎHX^q ?nlV?,s2ع(PPkCɺnEע"tOz2(x2[>8NR/L(PI}n:> vbʤ qސ'm><Є:s! p.G-Ҵ, .4Y͖D$${h ޮr6|l66G_ГCbv0'ѸڇXkh6Tؐg%$xM J ]2qh+(k>_^IH_٪I`eYP81"Ziv\腎eȌGRu uߺ.7s#j ,um~uLO;:X,&-pXY e}}]hmխ~B@¬u3u=`>ÝK!p2c:凁hBX5"bXzHj^,sD[ 6:X'M`{JĔoSD\|-,QL o)VP1w}Q׷kXbN얙 Cf*x~ANhx`FA-.\g( V֕f8)^66*_*s~|nNֻb`RLd6JX9AwI[[HfVKفaA< nKdm=aZq1"G>uF  6 "@#!jhZ2n8D^ëBw[2u7:~8J i 4k*/ Ucc@xd3Yz[A5FZvU$cztu.]FƦ>wI<锚rrU:e~vD%tl6}_DuJ/(^FH$j5S%&1_<*th*%=:ՅUwP<N잓XnOOҘZ>x}j**bI\3LR ύȂ.kٖxZ u6ЭMdJtًb(?5P£TTxQ / ]czՖ;%T]i;.aVހU I֫B8}-9^|E׾f0#-tp/abQFB|dUs= ÚipWzmՓV @d>xǺ}V7r p,U0~4 \uB?Y2\xQ׶!jn~!!ߦ^.rX"F{EDYO/NLkw.94D׺._98MFh;-^<4g퀏 @0Qͷ@4Rr NhlVѨ_w CzZ6lVme 8hpa 올uʡdY}ߖ(8݃:{0Bk~餜l;jm OWͤh:9(fRIհ_DT_Eݛj=F'ZCp,H$&^]3>*Xs}m9vÇūIpH kX'P!†SQVc݁Ac1냝>qyݏ:nKp]@$b  p&%ӺS|v:0v/\PȁSe '2ܗ>5+NYc1, >920(48U?C&$ jx|p9vSү=|kR*baJߜgvSl{4܄ P·@,9:l7rطąOspЩ;9ھuˌt/Jo %`}3<,P '{!l8þ1l9brJ'> 8 ,)uB޸fⲕHH&u \蔾[KRRYNGsX $?q\;~UV\p-Ic4s!}8Yo];#5t\0f4J*.p*ZL&u2YbS(# (0&B䰴 ||569t}UA><'wY("_A3x4RɻvS+q1ձDT.F 8+NLy @(VpDRUNs_chXuڪꫯŋO$P'p p<#)8 ɟq'fʜ [1KVE$@$\FXkD)p6a}bu_&xiQIk @mjϔ8[+p)*&*fwy0> @Jp,;a &а"$Wx[8@Qn{\ՒzF%,#fJiug$@$Pny0-#,`T (Nsk@䨇: T@}dy\+_| _ფ& u DZ8Q> No/+Do/xW}K?r\  ~:>?x Ʋ78yǀQx"mPr8 9Ns 4+h ^nO>)UJFn< N}N9'9x>n`ւHN:~7'3K|z'v\0_N{b!o)K# "6b;bz@Āu^4'gݽcnw@qq#r ;f+[J[_ZkB&f-H\Љ\Tu*pˍB.nDG3{y睅%g\\;bBBZ( br X$xz?2\ZZ,CM73-};Bz0E߅mo.tg'!o @klb|`/@BP@ rv#p `ID7O ݘktVδa , &&&r @-(}E{|y衇q <.Ё?N! o$S^31acvv) 58]·4꓀f6;SEǺY}[UA51G1KBEL]UoƀH)Mba2 LX?]_ \;lAXp Gu;s?8}݂oۏIB1 T;_~eRRX,oq t,]n*lD"8˂L8ecP1<@Êt4u_NixWJQ/L5n3{&oFs$@$P/п8 CA>v#t@D%|I WN`!X_REAaEB6nP*APLAtN5 >fcFg<I 08r<{/-8s| `- Hq5c\//c= Rʉ'FLMyM$@J> NNHM^17gX/qs/X AތbCdn,or`)c9蓣nfֈ!s3\4 ~;0P"Ggm0QGل䏙77/thh(EŻ+GҞIH`/ dɑ\ݖݴل݄J,&Jp7WmXǣQ76kG3x8` GMi˄kZnkiXh+>553tӧ)InE|١MB%ݭAV\n 4A忹  B:J<%Xx.G=M̶96ݭ3LZ4_60!BbS,#w#8 Wm2LNN֒| JV,q*{^G]B h.\^<OT9zfzܔosFkm%*`0x2oiD\+̶9Ͳ/88pYl1ëbӃ RK2|e|xM$@H'xbGfPnaSC(Xh!-qzQQn@$` 7 +r'-FW^$VG?Jˍ7ޘ~ D"]O?(pl]M ,~X(gF0 26NnoqJOT6 .8S +H, D32Ths;^-5oB_k>0~D?˼RxYń[?H=+qVOƾ|W$rM7"sW\mU.8t@C馛ҧbm1x5o)pG~o@pbf6,;)K#`{ vkL_{yup| 7s@rn1iHjLXJ: `}}7u~(E@D߰Duj!l`lM.<0Y|nub,JL<~9.a]8 WcߩeW_rB??{&6D;6tN0k080VL&QfDH0$r`m DRI9 {۸O0쇘qR,_Gw9K;IH @6qfUav87'CJ3>h+ \8& Yos*[)98}AaŧJ,(gSߌȾ}8~QCKrjPK~\҇2!Wn?39y=]' {=]0'`EB-ey4ǵ?qk82$)p}p_|f`r Fz![&t`qV|*&Hn…)7ux %&tB'hhǣAQAe 9~6!ϽG:/TxJ>/΀%R=#|Gb'ŷ? O%.Uy|6_z15qyo+!9xJZ-~o5VIvC7ϋg<$Ǻ+Λ$ sƑ('M>t ȉ%%:ij<.k"e!^Y$ҥH?":OYieF;=_Ss6ivgdI{&b29*ֻIBcrkSzp_{dܰL.%3k훒V$"29-^,C*eD R^?~8 =kRN%]528}'1Fh<#ss^k|0=}N) @&U/iT0b97V޷|\ ,gx N5̪}bɔAaEZpxq 0~$oȷ޹$+/AiqPN}\wGZ} NBMKtPw6xfacJQ#V~(K?LGɇ(dpH]C 1,'d45Y$Hp̯Th\e**s.9&^<צ?,i!!㣒:-+z<OO&B2Ꙓ}-ٔ@j )ò^ߐBf&$g"SOlYbr܊7#KVIa)ylAH@8AΫw{2_f|Mu1G67;̲+HH`;Fx} `||B:Dj|`͉80iB:Mv!h;anv +rOA9;WJF[ҩVɋzuaY;mB˟5<4\ #rӳOJT¶4msp*i* /<pbV>9-/Gk-xGygZB |#‡oD~}ZPq|@Kh|STh=+ HPHGWe3!:>{M?*^P:W" *%EW5)QE2ӖMtKnzIsh ?>#$P)EUWVVkkx ĭj.g5 0%tkQm`iny*Aۙ9j3`uKIH @ <%hOs X^k*зu#?)w98@O|IAW\}zOmG*Iz> cc#5o|,siyK\]#wO%#R]+-S9(5=S4sHtt@~3.ɟ|TMSr`CՇVGa J%Y` BtڡRČ,.-ҡV94%'$63(gTP"a%.:ef}S. f]3/c vu٘3878LFp[CxoBGuāXhIH A=_Ô{9{.# 8uQ'>H mQB.J )7,zj`hP ,;TudTGر?ЩitnEW=] (ff0ǨǹX`N+Wُ'䫟gS:GpKR-%,O3Ǔeu8nzRz]H%Rs 32?&ƔI-5zeCjL,p _"ã@JH&:;2vRdsNIS`GïKT"*|2''/ 2S 'MF dYNle(]`%uKSN8!^{mxsPw@37 :2}SnUmX @(d 1V $P(*I<0a,`N*$~(zʁM 2`s}'^EVǣIi#e adZJxGP{t6T`Ґӷ,ʴ@t YᝯW,s\CC Xu LC58-K']mrL xqZf4:ŚC.q bdde+&͟^㒊؊Gt L:=z~*y F{+S$ZHi%Um;$PVn&KѦup{ ʅ'`m׌? T#%b˸p[ eL4槠ʮSJzȳ"ǧ[?%wz0)<3үz@5++%sxoP;/~ 3c; э<5Y ,U)2mf0; 8ux pf.gH"Mqz̅ (r& *"vMXx-~Mt6MMĔ7Ș7N+Cߕ а"fT<]v5vY~\~UvZ._̰v;ʳO>_7 T,*9@ Z` 3]Xe H p A&|bWOv*& '78X f NqЗ_L_L7> ل V.ӗeWg;73.@)\RDj1ZQhЍ ^֣f6L8.#GMzp/fњ\~7Ah/0r;P~Y?/Ap1Š=n zY V[&C@"а{W|[Si_X9.Ws«mik2g%g{4ݶ!T`m Bpm-' (-7>sYG@ )r^ -H/ʃrˑ4f$P,9蓣ۆU>xݡ+X^/Ӳ`"6n.k/Nb} _*r ]7 b-# (D+gbnΰsIu!)IX) 4*V5{Xe9ᕁrC525U!k 3N^!` ,*D䇁 *c9\/) @2-@D5,$vLNO, &/Nǰ"Sp_J )r@`    ɷ=] 41 0s `: "q!ZLLLw[Ni|c6y; X'#;$& (XA6@L ӿ@0ɵ ї9 *m(@Ê|}l1 ! 8r% Cq[.YkxJ`F-`A'*4`JPjO$@#~ІӿO%$r@xA&Ն /2$@CG18 @LvaN: N 1`Gّ,ގ; = PHD8 nxnqmBN֊ٖј߰Pp_sMhHK/LKq؋wpHjn`v 0jka҂ 1;;L9'Nv޸? $@$@C.C(v|!}Z}A4 g* KhHKU~^<HH>#Q#8dv`D36 @u0b:Nu!>,) їe[FVndRZN@3K S# UKY0-Fp\>]#jɑ @ @R5ߥ,X*RLo{Zڰ^[üZo/2r}WD%|" @yٷ\KY0,&ss@hQ UC$@M ߥ,@?TLps@jҢՆ!j"P"^id^WMY 뮳r3I@,o{vfIRZm5f$@$Xbw@J(2 IDAT̳t6WPKqGFF?Ͳ#? =*o|Xoxkq9IH k>^Oh ?qK(~:YRl<HHznL2NLL6$ Ե%{̎a?ya2ww_u+A$@L֋|Yw (/c˔KOE155%/^(ثp3BAp'?čw*`ߟ      &P".s(O?H`70&f)-'f$@$@$@$@$@$@M!D\bN?]Hճ6Q.$@$@$@$@$@$@$0"8򗿴8`P/>yXj     (+92I+ke*'@U~X<     c +r1wfO$@$@$@$@$@$@$Pb8=&G$@$@$@$@$@$@$@{B"Ǟ`g$@$@$@$@$@$@$@&@D lOr-ax OȤ^:ijj$P^z%5u_QV ^{\r b $@e!獲`-[xYUW]Upyw#_}f +F{vmre5W3""z0WW~&r 7T_X" #pE-lbPWe'}KK4^3 T?Fk,aEΣԼ(deHq.K @`?UoPu!rppܠw/M$P0xU T#_X,H@]\]& !v**ҧb%̏IH`y?HH"U^=ϜX4MY_].% .O$'9žDT&K{{?$]7H{QI$@ YEhoYKEi]LoVjQY*ʱJ,=-OHe"hlһɀCrhfCFC [".h,#oJ$Ɇ1ԸN"GAB"qL̞ŷ8)[0 UDsòM0 4z-J4yFBe0Ԑ첌<HCb<,#D?-~r͡Y9= ]eg$I) OsHN4*H"Ga+^6$mܒ[ti\zfVu _NDZ2ÚՙȑKi f""Ger?`  C7^ zerp1i~mp)ӦǪ|mw).q6w1ޝmz1Y7RNnin$\t3AKqj77Yέ%s>&]Mq7n僁TF4s ksr"N[m駒} IR7/IB떐b~Y{"!φy"#$bֳE<##cG`)3g/GdXm˰W8A|Ƞ͟&pItz6+g<|i[zڒj$bYʀm^YE̟?/S]27-qZti5~6!E]X i_lYFbr܊W4T9>^VJpzn3!k ߶rR`$@F`/&']C?!s:"dOEe|ѧ997;!Q9_gDtMEބx}r< Y6t 2.䆦1*>8ORh،Iy@Ν;- ce;K*/y9nVbs=34pv>i9"!& Z3z4Dw Oj -]2u m{[AA$@5J`/,Q0>eK{+6}-jk1'kҭKZfůĦIkgVӶ_KfϞ69#˄,ZhLΞpb:fxdY'edjuʦ'AN`UCtAƜ (0@͋{18.Dž0i~im k#!:d0bpY|x,r in>=9^KWP"g6dSeK] U,k#7&^mZUQ4{=]_ҳ㲼 s &li-8%ifOLM$@%&PyKT@&\ZM]_FuO&ijn!@(o5.'Y4zIu:0.ڇuZ[a%1kCa$@H>|PG@34qhc pOK^f]ekS"Ոk3qfLz].#bLѣ"s)ej):">q6B ecVjy8!VY#WeͭLXQ! "m-M܉tشMl Lj|Z$_ ,^\^-8i4CM LE$@e#h|:KI-w*~&t)rת ͫk♉jqqȰC Yz<ҶІLvfҭv>>#GT EK0ЦYh@߀E z!Y&$\n @It[?L/xԺBdStJGOmM_>Gȸ{eAE)`}>qjޒ`f\ԲBDgΤH+FMffۯj`ϯ:N,K["dcfZ -MZv4IkD4HMZ_*tcUHH*m{]|20mS:'6`Y5`M !0,Up,(K㺬Onz qmGJE.e@X-tm9g ֑n2-Z,rD6(C2 @ȁ5|$ߦ#H|rQQ/~K$=GZsѰT[aչO,c&G [>9rM-2HYX Pht#xTpdu][;ݪxgvLֺGb2jĬ< M%+2Vn`VQ7w"Ӂ6n]2Jcr+m(HHڡOZe:5P}hΪjKׄWk8 t%@ˌtF՜=jt:yN\-!CmpDFM'Chᑦ@c;T+Au87*3=ϯu:KZ"rxEN?:%46 4O@YËIl-#2ͭ_}0>=#R=}Kx$H[VCmMb&L6Zyˑ:G#GFEGvo5}1$3m[`}*@G>)p* }*ԴJ3[I 4V:,Ahv uڬJ"}߰,S.+KD$^ַeUn:hʏ vuG,xW>mKO (=8\w=s\7 '_BnJe_>h׋x'OܱW|(OFEXE,) 畠,[E0 @]@;^J+o\xN\v;ϧ w㘞Sc$@$PЕK@T ~R&m.ٶU,rbIH`;p @@?@$P;(rεbIIH. PJ @]Fúʐ@ ԅFw" QÅHH(w _]\"G]\VHHHHHHH`j'Ǯ 0       @..#+A$@$@$@$@$@$@$@ @]Q       {HHHHHHH.P䨋J P=@$@$@$@$@$@$@$P(red%HHHHHHH(r        92$@$@$@$@$@$@$@9x uqY        HHHHHHHE E$@$@$@$@$@$@$@uA"G]\FVHHHHHHH"       @..#+A$@$@$@$@$@$@$@ @]Q       {HHHHHHH.P䨋J P=@$@$@$@$@$@$@$P(red%HHHHHHH(r        92$@$@$@$@$@$@$@9x uqY        HHHHHHHE E$@$@$@$@$@$@$@uA"G]\FVHHHHHHH"       @..#+A$@$@$@$@$@$@$;#B2yIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/figures/SCH_5009_V00_NUAC-VNC_OpenStack.svg0000664000175000017500000016507100000000000025036 0ustar00zuulzuul00000000000000 Schéma Réseau VBackground-1 Solid None Page-1 Sheet.58 User.50 Add list item permissions Sheet.3 Network.80 Sheet.2 Nova-api Nova-api Sheet.4 Network.80 Sheet.12 Compute node Compute node Sheet.13 Sheet.14 The api sends a «get_vnc_console» message The api sends a «get_vnc_console» message Sheet.19 Sheet.21 Sheet.18 Sheet.20 Generates a token Generates a token Sheet.22 Sheet.23 Sheet.24 Sheet.25 Sheet.26 Sheet.27 Sheet.34 Configure Sheet.33 Libvirt driver Libvirt driver Sheet.35 Configure Sheet.39 Nova-consoleauth Nova-consoleauth Sheet.41 Sheet.46 Sheet.42 Sheet.44 Sends «authorize_console » message Sends «authorize_console » message Sheet.55 Sheet.56 Document Sheet.5 Sheet.6 Sheet.49 Caches the connection informations and token Caches the connection informations and token New Sheet.47 Sheet.53 Browses the url returned Http://novncip:port/?path=%3Ftoken%3Dxyz Browses the url returnedHttp://novncip:port/?path=%3Ftoken%3Dxyz Sheet.28 Sheet.29 Sends a «get_vnc_connection» message Sends a «get_vnc_connection» message Sheet.30 Sheet.31 Document Sheet.51 Dialog form Sheet.96 Sheet.97 Sheet.98 Sheet.100 Sheet.50 Browser Browser Sheet.57 noVNC noVNC Sheet.70 Sheet.67 Sends « check_token » message Sends « check_token » message Key Sheet.61 Sheet.62 Sheet.63 Sheet.64 Sheet.65 Sheet.66 Internet Sheet.72 Sheet.83 Sheet.74 Proxy starts Proxy starts Power Sheet.102 Sheet.106 2 2 Sheet.107 3 3 Sheet.108 4 4 Sheet.105 Sheet.104 Returns a url with a token Returns a url with a token Link.84 Sheet.109 Sheet.43 The user requests an access URL The user requests an access URL Internet.103 Sheet.101 1 1 Sheet.110 5 5 Sheet.111 6 6 Sheet.112 7 7 Sheet.113 8 8 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315632.0 nova-32.0.0/doc/source/admin/figures/SCH_5009_V00_NUAC-VNC_OpenStack.vsd0000664000175000017500000530100000000000000025020 0ustar00zuulzuul00000000000000ࡱ> 23456789:;<=>?@ABCDEFRoot EntryRoot EntryFi˛VisioDocumentGSummaryInformation( ODocumentSummaryInformation84  !"#$%&'()*+,-./01HIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~VisioInformation" ՜.+,D՜.+,<HP X`hp x  YPage-10VBackground-1ESSwitchoServeroDatabase serverCloudseDynamic connector.1Bridge EthernetonnHub Patch panelFiber optic transmitter ATM switch Bridge.14 Dynamic connector.15erModemc Ring networkctoTilesetUrbanet AlphabetorkModuletNone tSolidtRegistrationctoVervera None .26ionCity.26Technic Server.29on MainframeonAustere TranscendonBlocksn FirewallonLaptop computerPCtApplication server5 Web server FTP server Email serverervManagement server5E-Commerce server5Public/private key serveric Box callout Comm-linkt Terminalt File serverContactUserctUser.50Status bar itemForwardStatus bar iconCheck user permissionveDynamic connectorioAdd list item permissionsic DiscussioneFindssiUserssiNetworkLinkrk ConfigureeSearchr PropertieseFilteriShuffleKeyLockleUser.69User.70 Round keype Tapered keypeCard reader with keypad Card accessKeywaycUSB keyChateyChevron DocumentssNewInformation iconkeyScripttCopyttLink.84 List boxon List box itemon Dialog form InternetrmPoweret PagesMastersW0|_PID_LINKBASE_VPID_ALTERNATENAMES _TemplateIDATC010498141033Oh+'0OHPhx Schma RseauRaziqueLC:\Program Files\Microsoft Office\Office14\visio content\1033\DTLNET_M.VSTRaziqueMicrosoft Visio@b>˛GN2 EMFxNl8}U HVISIODrawingLM23 ??d(LM(3LMcccYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYxxx^^^YYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYnnnnnnՃcccbbbvvvxxxvvv~~~xxxcccrrrvvvxxxvvvvvvvvv|||xxxvvvxxxxxxnnn|||vvvkkkvvvcccꋋsssssssssssssssssssssssssssssssssssssssqqq{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{|||Јiiiꢢqqqkkk||||||^^^xxxꘘВբqqqkkk|||gggYYYYYYfff̌bbbYYYYYYYYYYYYYYYxxxЍꃃqqqٱ٣kkk|||qqqQQQUUUVVVTTTUUUTTTYYYYYY^^^Ɲ˶坝ƈqqqkkk|||բ~~~~~~ꈈƢsssiii~~~nnn|||bbb}}}aaaxxxžꘘ~~~жՀչ|||rrrշΖfffXXXrrr~~~{{{~~~sss刈ꍍ廻~~~Հ|||興|||xxx|||||||||||||||^^^YYY]]]ˬpppYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYY山౱Հ|||}}}sssՃiiiƱ僃Հ|||僃sssxxxrrrnnnvvvxxxrrrnnnrrrnnnrrrkkkrrrxxxkkkxxxnnnvvvvvvvvvfffnnnvvvxxxgggˢڒmmmղըuuueeemmmYYYnnnYYYnnnYYYhhhЃlll{{{YYY|||ζYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYxxxwww~~~xxx湹اٷ⺺һYYYϾnnn游aaaؠ⾾YYYjjj|||kkk||||||vvvvvvxxxvvvvvvvvvmmmvvv|||nnnvvvxxxrrr^^^zzz\\\]]]vvvxxxvvvxxx|||vvvxxx{{{sssxxxxxxrrrxxxYYYYYYlllYYYbbbxxxYYY___QQQUUUVVVTTTTTTUUUTTTVVVVVVYYYYYYYYY___ZZZbbbYYYnnnΧfffYYYnnnˬYYYdddnnnxxxxxxxxxnnn꺺Ϻ⺺Ƭ{{{YYYTTTXXXXXXXXXYYYYYY^^^^^^YYYVVVXXXVVVYYYXXXYYYXXXUUUYYYYYY^^^xxxYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYsss~~~xxxnnn~~~sssxxxxxx\\\լYYYYYYYYYƍՃڈ]]]rrrrrrrrrvvvxxxrrr|||kkkxxxvvvrrrvvvrrrmmmbbbsssՒxxxxxxcccնʞ՗էՈssssss~~~lllhhhhhhxxxնYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYY՘՘nnniii{{{풒Ϯrrrsss~~~xxxYYYtttaaa⺺XXXhhhլꬬ^^^SSSXXXYYYVVVYYYXXXVVVXXXVVVXXXYYYVVVXXXXXXVVVXXXTTTYYYXXXsssrrr||||||vvvvvvrrrxxxvvvvvv}}}||||||vvv|||vvvvvv|||vvvvvvxxxrrr|||iiiլ১˻լssscccլnnniiiլiiiiiinnnxxxnnnsss      !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~      !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~      !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~      !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~      !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~      !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~      !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~      !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~                           ! " # $ % & ' ( ) * + , - . / 0 1 2 3 4 5 6 7 8 9 : ; < = > ? @ A B C D E F G H I J K L M N O P Q R S T U V W X Y Z [ \ ] ^ _ ` a b c d e f g h i j k l m n o p q r s t u v w x y z { | } ~                            ! " # $ % & ' ( ) * + , - . / 0 1 2 3 4 5 6 7 8 9 : ; < = > ? @ A B C D E F G H I J K L M N O P Q R S T U V W X Y Z [ \ ] ^ _ ` a b c d e f g h i j k l m n o p q r s t u v w x y z { | } ~  Visio (TM) Drawing ؒwRµ%*@8 ߳!fffMMM333-yN77sPɺm88rcc3f{ڌ626?3ff?G}UUUհc5ڬм>{N9KmfwJpYDRb߷qaXPF91I@dddFFF9{$<<$JJ1tl77A@@@p0Ezٕ㰬VęյTBj~daPc*p PT幵.|AJ Pj( YYY `???wwwI}G &&&g \\\Ĭ# {>TTTPX`LLL VVV(Workflw/cmd=2C/Na/Na/Na/Na/Na/Na/&,CU.J:DT5I[1hXT@. /UbW##_b0U]?ȾL \&B1&Wb%e !oM $d )P?;$,, & #& , 5&?D#//?M?\.? 7A  T~,,,'q/%P6$Y6 ((}k?l)P?"  U UA3#o >S@#B:aU_ReUeUeUUeUeUeUePONOD`T_ReUeUeUUeUeUeUeUUb RSR;25OM% >Qiq;RRRggR qh>Qj /A^F' %p3|CbFp#| |% | vi?=Os<34*x,,,/HRQAmeY q]U@Q-,_vL__q____OO_OOYJAe@L&d2K?ɠQ3Ҳ s]֦xΟ$H"Ŧ)Լ Dץ4@dUA??ϩJ[D!# f:45ŧ5ŷ5ŧ 0H 0D$#U   [#Wb ;Ch( D?@eDrqr:ePB#<D5,ND0DDA7XDbabX#F:FDF(A:#7 D% #7H(DRQRHBA7D B7DJBA:B78DBAB8BAtstt<tB9DB9D%B9(!(D2&1'2&(!BA9d!Dn&m'n&d!BY0C&'&!<!BȁD&'ȁ<ȅ"A6761<1B#AT6S7T6T1<T1B$91D6761B%91D67%61B'AF GFA<AB!(9DABaicTgdhTsG'0f6U[e0 sFf|tvu dd4φFRd,c  F،3*d4V56Fs 9Faaj9eOwOkŕh8FTo!32pbGFdF毭oʳUųUvoft\W?޳P`ކmW"> gAkA oojx5lR}J<DTqC`` {<eh]-w>yqyu4x.~O6ARH-D@>QWACGcwc]oP{Jk? `Aooookt|/R|a`OЖ󞦆B1C~}XQ_q^Gʯ$6l~ؿ 2DVhzϛ/// .@Rd&EFߔߦ߸$6HZU@S; 0UUCš@ЖSY+ 2@e?N4nW__ o"4FFoKo|o%///0/B/T/f/x//1/Ug/??,?>?P?b?t?vA<???QV?? OO-O?OQOcMZeH,2T3F_LB֖Eh_c,f;5OHUUx_v_____o__'o*o#Z`orooooo7uI(:L^p8 ʏ ߏ@##5#$Ǡձ=9t++aO##廟3ДPN[UY9q R[p?^܅bpނb JD ಕϧϹFdƞ'2W#2`~cD@J$uO0fB = = 0BvCt346C9C$%S1ղfxߊߜ߮ߡG>VaE$ #㇤bbOA%7I[X{ߪχ4 F9T=5GYk}41u9_3MM$ORdJBį֯ 0B _fx?__KV__-oQcOo}///q/// ?b΅ օ(?:?Vy1d}l?~????????pC%O7O=o"y$oE/Og䀭䭠TA6n` Fo`lɀConɀornzۭ_bݭKQ٭ᷫ熐C㭡pa*ᵃVA8 Əڏ숧1_B,:S/e/KG_OL/7qA6rKRO__+_(O_a_$[____oo'o9oKooooooaoooozY8J`ޡn| #ď o%&oOOǏBM,<>ߓbY`B џA .@Rdv Я⯽N`̿޿&.vIFX*8ϘϪϼ/MA/=ZC9ߙAQE)2TC>OO:/7I:?mI?/D??!OEWi{0h1s_y!](Pb!,AGLL!%. ڕbRV$QraK!qPl#F^/{j1n,`eWctӂW0`ih4`A7'o9o,dSb ~eYl!`VoPz@Uta`e@/$6H  dݏ%R[mǟٟ/m,:DVh!3EWi2x=Ql~Ưد 1CUgyӿ+PbbP5I~M,ɺ ^poo߸/~b'b/~@xߊ@߮H&%$6HZl~^OO _ DVh_k}_ ?o_R__ '9K]o/ 5/??~Q-mx//ϔ%?7?݀L?/?ԏ'bbbmf???????8u7vO$O6OHOZOlO~OOxOOOO' _2__V_h_z_1_}oo__ oo.oYϟdoÿտoooooo*{>>PbtHn2T1?>J溓1hn9'}48Sbǟ̦2Vhԯ @Rvϝϯ*C1p\%&,%);M1k___$6HZooooDVhz); //./@/R/d/v//J/////??%?7?I?[?m?/M___q_L9/@?????;wO O2ODOVOhKlb#裈M?OO@2OO_!W=S_{o____4oFoXojo*/oooo Ty1dvơ9 I``7I*<N`r'Uᶏȏڏ"4F Xj|~?f޳ ? 2 -37%J@J*<EU⸟: ]o䢿ƿؿIDVhz&/d/*Tv߈ߚ*?p/ߔ//5GYk}徭]1+=OassH 晑9Cў<N -?Qouoo2///;/M/_/</b?t?/p/?dP????????Oc#O5OGOYOkO}OOOOOOOO ^)77s"_>7sF_*I=aP 'N£(o__[eaPR:a4p` AcentColr3pzPaP#bbbS$QmCZ:a<b!bNbb;(D___)G!o*lyahg::o#^opopϔooooojHZů~ߴ/ 2?VL?͏ߏ'9KH.psџ:+3ahXXEH2q#q_/VU?_Q_UiSЯ^[_WYݿ￾%4ocϙϙϫϽK/);߷/_q5ߧ߹/ 0BNfp*D));M_088Yr889Qr ޯg&8J\_A̿cuϙ8AFfUϹo//,/>/Qb/t/////s_?? L?^?p????????OӲ-?Qcu|pkCcuBvx~B-qı~@OROvOOOOOODVhuQYSoR1cc?2n__@bVo!o3g>o`k_ob(ϖQc4jn?|fÍonॸoooata*;֤rA ]_5 .@dv߬*/*N` +=Ȍޏ&8Oǯٯ!3EWi{3߯ӿRd-o-?L^u΁b!dph>|f5# h*&N`rߖߨA?8J\n?9OO"4FXj|H" 1oTJ\nFna g );gFT16 F@lvdAV1!` o2o6*/!/xs>/P/b/t//0]U\V/////?xO3?E?Oi?{?Ϗ?????; OO@/O{SOeOEUjO(OOQ{Guf!3BWVtWN~OSܟOOOO__)_;_M]#^_p________oo$o6o.eb dovo;0GtvGÁsbz@-&ÁzqH D{-M6qߧ%%*[mǟʯ!3EWi4Fկ /ASUU$u0Ŀֿ 0hBq7H2TQ%+brGBauE'} ϞȞ 4W/Ugyߋߝ߆? ?9??Qcuu?z?OTOO,>P`bt%n*<N`r4ߖ/*G`b?#/5/RgReT!6 q &x2akR}eeV!1a` o2o{vs ?s//ye?/,)1@U?V`/?? ?FD?V?h?y????@? Oo.O@OvOOO\RoB2SeDž Q`qşןOOOOO__$_cQH\X_j_|_m_@_____. jP屦399j0ʅ&o+oaKs^%7I@nYbO * Oaѳ{K,WJ ҲInπ&)}A9D2%l.Pt =$OӞ@bw֏GX"[D;MnxiĿֿ@ Ȳnw*߽߫y&e/(HZ^3q(:L^nw+A-\}q}@~uf$5fFop?jo|ϼU"%\ 0X^?/60 ϲ??#! tq, /}///4Uf42F_2Fy4?sOO\U??6xSerVoeovoaLOOO4BTfx0BTw0oiNo7B% qFv14:ValidtonJ<>' Prpeges Sh_wIgog}e='0]/E Rue{es D1 NSmm|aPitWkfD U vcgikRxt'esB Sz: %iSgg x]pglyycsrc |xktVncn(U.Iw)iA.MJpt8s%Cey!CUT}mD !vwD!X5@#1!ld/F]":#Tut4/H?z)2/o(#l////? 7@`?:7N7?5 8XAH!t&??_?H??~%3O5"v1HT8OJO\OnOO;7UO9 OE6$o6oHkxcC" bZ1Qczm?_"_4_F_oz)4w_PiTPgdQ_____'Ljaapk &p{xoil ooo $Xz)5OY`rkB#b{ 7@k:7o#tybok2DVhzʟz)6ݒrUHSs߀h]"d}t܏$ 7:#6:7eN9ud~gaƟ؟꟤x+7#BI E*smBn?"vXj|6ίOD@5Pbr8"exZ~eS\/0O"A#>!h //1/C/@=aN9hÒz"#c"a am! Pmsfa`8////?TO*;v_`smM~d?v????'AaI ace^An?@8 `aaCdqoHa aW0mUb8B_T_c^c^gS2C8:aAA_f3 OR SiW0c.O@OROdOvOo *;N`q~AkxOO__&_8Uw^_\___[__o o2oDoVohozoo ooooo5o)hR}xv$6HZl\p!.0uwӏC)<6YXrbRdv ПA]rFxmlns:v14htp/'sUcea .i-ro oUf.-o/GUf=e'v=s=]o'200'Eet1 lnUn'(U)*+,U-./0U1234U5678U9:;?@U4<zE8:&@bN%@ sh8[C-07 AU'()U+,-.U/012U3456U789:U;<=>U?@CDUEFGIJU4< zE8:&@bN%@ 9lC-7 AU'()U*+,-U./01U2345U6789U:;<=U>?@CUDEFGHLU4()*U+,-.UMNOPUQRSTU4<zE8:&@bN%@ ~8 :}C-7 AVU4<zE8:&@bN%@ \;; }A-E7Un'()U*+,-U./01U2345U6789U:;<=U>?@WU4<zE8:&@bN%@ r;[C-07 Aj()+,-./0123456789:;<=>?CNOPQRSTYZ[\]^_`abcdefghRiklUmnopUqrstUuvwxUyz{|U}~UUUUU4?@BU4<zE8:&@bN%@ r=[C-\+> AUn'()U*+,-U./01U2345U6789U:;<=U>?@U4<zE8:&@bN%@ q>[C-07 AU&'()U*+,-.U4< zE8:&@bN%@ xk=?!C-^7 AUn'()U*+,-U./01U2345U6789U:;<=U>?@U4<zE8:&@bN%@ q?[C-ܷ AUn'()U*+,-U./01U2345U6789U:;<=U>?@U4<zE8:&@bN%@ o[C-* A-VU4<zE8:&@bN%@ h(#AA-K17 ;UN'()U*+,-U./01U2345U67@U4<zE8:&@bN%@ (t9A7C-\7 :b()+,-./0123456789:;<=>?CNOPQRST\]^_J`acUdefgUhijkUlmnoUpqrsUtuvwUxyz{U|}~UUUUUU4U?@CDUEFGJU4< zE8:&@bN%@ +HlC-ܺI AUn'()U*+,-U./01U2345U6789U:;<=U>?@U4<zE8:&@bN%@ dI[C-7 AU&-.ZU[U4< zE8:&@bN%@ J!C-;7 AU&-.ZU[U4< zE8:&@bN%@ xJ!C-ȴ7 AU&-.ZU[U4< zE8:&@bN%@ K!C-ܽ37 AUN'()U*+,-U./01U2345U67@U4<zE8:&@bN%@ (w9KAC-7 :U~'()U*+,-U./01U2345U6789U:;<=U>?@AUDEFGU4<zE8:&@bN%@ +LhC-7 AU'()U*+,-U./01U2345U6789U:;<=U>?@CUDEFGJKU4?@CUDEFGHU4?@CUDEFGHU4?@CUDEFGHU4<"zE8:&@bN%@ /OrC-\ P AU'()U*+,-U./01U2345U6789U:;<=U>?@CUDEFGHU4?@CUDEFGHU4<"zE8:&@bN%@ -FQrC-7 AUz'()U*+,-U./01U2345U6789U:;<=U>?@CHU4<zE8:&@bN%@ (-ReC-ܸx7 AU-.N[U4<zE8:&@bN%C@ HXRC-U7 AUN'()U*+,-U./01U2345U67CU4<zE8:&@bN%@ w9@SAC-C7 :Uz'()U+,-.U/012U3456U789:U;<=>U?@DEFGU4<zE8:&@bN%@ SeC-ώBT AU'()U*+,-U./01U2345U6789U:;<=U>?@CUDEFGHU4()+U-UUUU4<zE8:&@bN%@ ~(=X}C-7 AU.()+U-U4< zE8:&@bN%@XY$CH:*7 A-U4<zE8:&@bN%@ )YA-pK7 ;-U4<zE8:&@bN%@ *YA-H?Z ;-U4<zE8:&@bN%@ *aZA-Ko7 ;-U4<zE8:&@bN%@ h)ZA-HK7 ;-U4<zE8:&@bN%@ )/[A-pK=7 ;-U4<zE8:&@bN%@ )[A-K7 ;-U4<zE8:&@bN%@ )[A-М? \ ;-@U4<zE8:&@bN%@ *f\A-ќKt7 ;- U4<zE8:&@bN%@ h*\A-ќK7 ;-U4<zE8:&@bN%@ '4]A-ҜKB7 ;U&'()U*+,-.U4< zE8:&@bN%@ ؈]!C-Ӝ7 A-U4<zE8:&@bN%@ '^A-\՜K%7 ;Uv-.UUUUU}UU U UU4<zE8:&@bN%@ ~^cC-ל7 A- U4<zE8:&@bN%@ P'<_A-؜KJ7 ;U.'-N}[ U    U4< zE8:&@bN%@_%C\ٜ*7 AUF'-.UNOP[}UU4<zE8:&@bN%@ ~x=`}C-Hڜ[7 AU6'()U*+-.}[ !U4< zE8:&@bN%@ `/C-ܜU7 AUB'()U*+-.}[U !"#$%U4<zE8:&@bN%@ =:a9C-0ޜs7 AUB'-.N[( ) * + , - . / 0 1 U4<zE8:&@bN%@ >a8C-0ߜb A-2U4<zE8:&@bN%@ *bbA-l1p7"A-3U4<zE8:&@bN%@ h'bA-$K7 ;-4U4<zE8:&@bN%@ %$c }A-.7-U4<zE8:&@bN%@ 'cA-%K7 ;-U4<zE8:&@bN%@ ; cA-%K7 ;- U4<zE8:&@bN%@ &Md }A-W7-U4<zE8:&@bN%@ ; dA-\(K7 ;-6U4<zE8:&@bN%@ ; eA-H+K7 ;-?U4<zE8:&@bN%@ ; veA-\+K7 ;U>-U_5U789:;U4<zE8:&@bN%@ >e6C--οf AU^-.NUOPQRUST!U<)=)>)@)UA)B)C)D)E)uU4<zE8:&@bN%@ (ofPC-8m7A2-7W89;UFGHIJKuU4< zE8:&@bN%@ Hg,C-PmI7A-LU4<zE8:&@bN%@ < gA-H/K7 ;-MU4<zE8:&@bN%@ < hA-\/K7 ;$ Z@8QR@9QR@!^:RR@)#:MR@#O;GR@($;QR@؂>~=RR@ȃD5>RR@X>QR@h?QR@;@QR@@OR@;AOR@ARR@HvSCTR@ȃCQR@8NDQR@-DQR@(}FEQR@}EOR@h~FJR@HFQR@"FOR@X%kGOR@8GQR@苮MHOR@IRR@<IQR@EJQR@JQR@H˲=KQR@6KRR@87LQR@7gMRR@x8MJR@9NQR@9\ORR@X:*PRR@:PQR@;QQR@8<RQR@<ROR@x=SRR@>LTRR@URR@UOR@ VRR@uVOR@ VNR@hLWOR@qWOR@(:XQR@ȣXMR@hDYMR@YOR@ZQR@HyZOR@覒ZOR@G[OR@([OR@Ȩ\QR@h~\OR@\OR@L]OR@H]QR@諒/^OR@^QR@OT_OR@ȭ_MR@he`MR@`OR@}aQR@XbRR@rbKR@bOR@88cGR@cOR@xcOR@adGR@dOR@X'eOR@eOR@fRR@8fPR@WgPR@xgOR@ &hORL<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<(L<($ ZE8nRE( nRE!oRE-#oREX#!oREx$/oREx>=oREDKoREYoREHgoRE8<uoREHoREoRE軕oREvoREoREoRE.oREx}oRE~oRE~pREpRE"pRE%+pRE9pRE8GpRE8UpRE(=cpREXqpREpRE˲pRE6pRE7pRE(8pRE8pREh9pRE:pRE:pREH;pRE; qRE<qRE(='qRE=5qREh>CqREXQqRE8_qREmqRE({qREH qREqREqqRExqREqREqREXqREqREqRE8rREاrREx#rRE1rRE?rREXMrRE[rREirRE8wrREجrRExrRErRErREXrRErRErREHrRErREsRE(sREsREh-sRE;sREIsREHWsREesREssRE(sREsREh sRUFD  h(^TYYBBUF\.ߗ?x<F BP(?hP?X$ ]B66ȅHq?j|?G IB@L&ɯd2?sU!&;/xt  0RKB` SwitcP,ne work pʴ r pP!aUl d v c z !^| R( G@&L >V&7?J7 ?ppwpwpwnwwyZwpw wwwpwwpwyplRp)~pw߇p}~zZDrag onut hepe.Rih-c]lckaUdcos UPo etQeAvUwreP 5aabL&d2ٿ\.??{ڿ ;?k?r3r CUG DF P# h @T(PYY#EψUF\.?@L&_d2?@ w?P} Y.B! ":J :f^7r7ֆAu` ?U"U66JJU^^rrUSu#U ^b"h,h'RqU f??z4b u{[`:#"&a#'QT)b" ^l!L1$l" l"7'>>ÿ@qZ05?46 r23ZS2uY0`-1ZBu@ l"r3A'Bl" hL$9u`u `"[ bzb!u`R$"% أ#Q!'1`Vis_PRXYcPm!#5HP4{6NP2`?CopWyr.PgPt u(>P) 20{PU9 M.PcePo0P'ofmR^QraPQUamPi_Pn WAlP eXsRUe0PeePvPdB!#d$%b!*(=#&#@Bd&& "$B##0Ub#]g,7{TrN @ cTg d(a1+_c@11"hh"2)t3*tB+tBa a"aab(!BpaSb"+QQou\%G!!["@@Bq"53T@R"q3<1 KEq`RPQs.PU TPxmPSP^2l"+ 0^<@5b 䱠"5R"br3&BrM_q(pffs_<៵R-m11'rvu"u"2$u3/8B0EBH%Q%Q"2_b3l !!E|QQQQ67l!8ĢAP!!1;(bL1D:b;aqq^eD!b(pQu [ ]@U0` SPa*aPePl rsRQV)P E u.PpBP!ePQ"5X%7\In` WTcPx]SP`DP!vR3^߮߼Iӏb` Syb9Sw>?QEd-MPnyfPcmPuRmt߆EQ`]1SPRd Nm E ZK!P PPr8J\h}]%R'`-sRU3e_ 5Afe3`؇e ,S SRiPQ ՑրTZIPL_PcX@;۞`rBYlPiPq٫€րY0KSR_PoŲ/ٸ!ר@/R/5NwRk,a_B`+x/OI4PQdPSi/, "?=ns#]?,(dN??(JSL1m "/Q9tVQ IQ /?րf?O\I-` R 4PQQ^OpOƒ?O?MPCI?O._qSCRo"mY6AtN BT]]9 M JU@L&d2?@|>??@Fh4?P6 AJuM` W?SuJt At=WG_z#J]RbJlp= ף#R>2yw?@MJA@eb&#z(b),+,&J[(.;?&L ?A$./O%B!!5 g`?Copy_rigTtZ(c)Z2U009ZM 0c 0os0f21r0>1a0i0n}.Z AlY0U 8s]2e10e 05vo0dQ0! $-# ',&?6iie59 6uX7_'0UpBŁ&Z!$0 'F";JlJAh h7!1!Z@x<3ף , <~y#  IRvVJ:?OS QlU^TURUEU{bX,G\ZYaaȫQ5UL&[RQQ .aYU520_BUv"rA # cHD H# h4>T]]9 MT JU@Fh?@+@ ??@?]P6 ]nAJuM{` ?5uJt  Q߸It=W贁NUIRIl%0w#>2zG7z?3@MJA@eb #zbR( (2+2&J[ (wp"?& ?A$ 4/U%B!!5 g`?CopyrigTt (c])020%090uM0c0os 0If21r 0D1a0i 0n.0 WAl_0 8sc2Ue70e0vu0dW0!E$5 򤲝-?# '&6iieE9 6X73'0UvBŴ&!$0 T-FAJlJ8>UhAA 11!`m`0?@rO\.ש ,#<#{ RVJ:9XYUي#rT Q%UE?,b R8]S# l_@ѱQEakQY,aU5U^fGO+|no/j3asChAZGQaX_j_i2>_PU"rA #js( bUGD  3 h0TdYYB 1 U@ BP(?@'J?@ V?,?@f_(@yw?P} ,w 5 &M& DD bb 4  M  4  &M& 4&4& R&R& p&p&d &M& && &&4 && 66o"u` ?J$0BN`l~$I/( /2(>/P(\/n($z/(/(/(I/(/8?"4u.9!t+1  QyUtmQRkyUWq#6&yU~W6͔QUQQR`?CopyrigPt (c)2`2\092`M*`c(`o%s"`f0b!ar$`]aua0`i"`n.2`_ Alx` (hUs|beP`e(`v` dp`RSST SB-S`6b g?/4)=SfS@tff bT#S0Uorc@V*` @4c u vXgw'ntŔf!d] vze!`^H@!!Yd&Db&VUʄ&ՋeʄDՋ3ʄbՋ uʄՋʄՋ|ʄՋƈՋcȆՋƈ!Ջ&ƈ4!ՋDƈR!Ջbƈp!ՋƈRƈ!Ջƈ!Ջƈ!Ջʄ1VHD: # h0>Th]]9 MTIAU@?@kE8?@ ͂?@SSfhr~?Pps-8R?>$ > n JuM{` ?e ;  Su*Jt'  $mCLmta{\_I 3.Jv  q?#cwn?v(A[}Kѭ>JU2N贁N[?@M#J&A@bI(bUW)U+U+U&*Jy#?!lJ ?%P?AO"d+bW+$u/%B #A1A15 `?Co}p rigTt (cu)x02`09x0uMp0cn0osh0Ifv2g1rj01av0]ih0n.x0 Ul0 n8s2e0en0v0d0@:1Y>4#-#M3 M7 #(p0b5(9 DFXGdG'2WUAŔ;F!$a dF$&#l,&}>Uh ETh]]9 MTIAU@?@-Ы?@ ͂?@SSfhr~?Pps-8R?>$ > n JuM{` ?e ;  Su*Jt'  H-mta{n_s9/3.Jv  q?#cwn?v(A[}Kѭ>JU2N贁N[?@M#J&A@bI(bUW)U+U+U&Jy#?!lJ ?%P?AO"d+bW+$u/%B #A1A15 `?Co}p rigTt (cu)x02`09x0uMp0cn0osh0Ifv2g1rj01av0]ih0n.x0 Ul0 n8s2e0en0v0d0@:1Y>4#-#M3 M7 #(p0b5(9 DFXGdG'2WUAŔ;F!$a dF$&#l,&}>Uh ETh]]9 MTIAU@B?@c?@ ͂?@SSfhr~?Pps-8R?>$#A n JuM{` ?e ;  Su*Jt'  m@mta{7._ xX3.Jv  q?#cwn?v(A[}Kѭ>JU2N贁N[?@M#J&A@bI(bUW)U+U+U&*Jy#?!lJ ?%P?AO"d+bW+$u/%B #A1A15 `?Co}p rigTt (cu)x02`09x0uMp0cn0osh0Ifv2g1rj01av0]ih0n.x0 Ul0 n8s2e0en0v0d0@:1Y>4#-#M3 M7 #(p0b5(9 DFXGdG'2WUAŔ;F!$a dF$&#l,&}>Uh ETh]]9 MTIAU@eZԩ?@z?@ ͂?@SSfhr~?Pps-8R?>$ > n JuM{` ?e ;  Su*Jt'  }$~mta{_P@3.Jv  q?#cwn?v(A[}Kѭ>JU2N贁N[?@M#J&A@bI(bUW)U+U+U&*Jy#?!lJ ?%P?AO"d+bW+$u/%B #A1A15 `?Co}p rigTt (cu)x02`09x0uMp0cn0osh0Ifv2g1rj01av0]ih0n.x0 Ul0 n8s2e0en0v0d0@:1Y>4#-#M3 M7 #(p0b5(9 DFXGdG'2WUAŔ;F!$a dF$&#l,&}>Uh ETh]]9 MTIAU@h!I?@Zzv?@ ͂?@SSfhr~?Pps-8R?>$ > n JuM{` ?e ;  Su*Jt'  XƿHmta{_z3.Jv  q?#cwn?v(A[}Kѭ>JU2N贁N[?@M#J&A@bI(bUW)U+U+U&Jy#?!lJ ?%P?AO"d+bW+$u/%B #A1A15 `?Co}p rigTt (cu)x02`09x0uMp0cn0osh0Ifv2g1rj01av0]ih0n.x0 Ul0 n8s2e0en0v0d0@:1Y>4#-#M3 M7 #(p0b5(9 DFXGdG'2WUAŔ;F!$a dF$&#l,&}>Uh ETh]]9 MTIAU@sLGv?@Bw6t?@ ͂?@SSfhr~?Pps-8R?>$> n JuM{` ?e ;  Su*Jt'  [Rw͖mta{__j3.Jv  q?#cwn?~v(A[}Kѭ>JU2N贁N[R?@M#J&A@bI(b*W)U+U+U&ՆJy#a?!J ?%P?AO"d+bRW+$u/%B#A1A15 `?Cop rigTt (c)x02`09x0Mp0cn0osh0fv2g1rj01av0ih0n.x0 l0 n8s2e0ejn0v0d0:1Y>4#-X#M3 M7#(p0b59 DFXGdG_'2WUA;FJ!$a dFD$&#l,&}>Uh ETh]]9 MTIAU@?@9 EU?@ ͂?@SSfhr~?Pps-8R?>$> n JuM{` ?e ;  Su*Jt'  $mCLmta{_3.Jv  q?#cwn?v(A[}Kѭ>JU2N贁N[?@M#J&A@bI(bW)U+U+TU&Jy#?!J ?%P?AO"d+KbW+$u/%B#A1A15 `?Cop rigTt (c)x02`09x0Mp0cn0osh0fv2g1rj01av0ih0n.x0 l0 n8s2e0en0v0d0:1Y>4#b-#M3 M7A#(p0bP59 DFXG~dG'2WUA);F!$a dF$&#l,&}>Uh ETh]]9 MTIAU@?@,?@ ͂?@SSfhr~?Pps-8R?>$> n JuM{` ?e ;  Su*Jt'  H-mta{X_ 3.Jv  q?#cwn?v(A[}Kѭ>JU2N贁N[?@M#J&A@bI(bUW)U+U+U&Jy#?!lJ ?%P?AO"d+bW+$u/%B #A1A15 `?Co}p rigTt (cu)x02`09x0uMp0cn0osh0Ifv2g1rj01av0]ih0n.x0 Ul0 n8s2e0en0v0d0@:1Y>4#-#M3 M7 #(p0b5(9 DFXGdG'2WUAŔ;F!$a dF$&#l,&}>Uh ETh]]9 MTIAU@B?@-r-.?@ ͂?@SSfhr~?Pps-8R?>$> n JuM{` ?e ;  Su*Jt'  m@mta{_ :3.Jv  q?#cwn?v(A[}Kѭ>JU2N贁N[?@M#J&A@bI(bUW)U+U+U&*Jy#?!lJ ?%P?AO"d+bW+$u/%B #A1A15 `?Co}p rigTt (cu)x02`09x0uMp0cn0osh0Ifv2g1rj01av0]ih0n.x0 Ul0 n8s2e0en0v0d0@:1Y>4#-#M3 M7 #(p0b5(9 DFXGdG'2WUAŔ;F!$a dF$&#l,&}>Uh ETh]]9 MTIAU@eZԩ?@ƍu?@ ͂?@SSfhr~?Pps-8R?>$> n JuM{` ?e ;  Su*Jt'  }$~mta{_D3.Jv  q?#cwn?v(A[}Kѭ>JU2N贁N[?@M#J&A@bI(bUW)U+U+U&*Jy#?!lJ ?%P?AO"d+bW+$u/%B #A1A15 `?Co}p rigTt (cu)x02`09x0uMp0cn0osh0Ifv2g1rj01av0]ih0n.x0 Ul0 n8s2e0en0v0d0@:1Y>4#-#M3 M7 #(p0b5(9 DFXGdG'2WUAŔ;F!$a dF$&#l,&}>Uh ETh]]9 MTIAU@h!I?@O2?@ ͂?@SSfhr~?Pps-8R?>$> n JuM{` ?e ;  Su*Jt'  XƿHmta{p_'F3.Jv  q?#cwn?v(A[}Kѭ>JU2N贁N[?@M#J&A@bI(bUW)U+U+U&Jy#?!lJ ?%P?AO"d+bW+$u/%B #A1A15 `?Co}p rigTt (cu)x02`09x0uMp0cn0osh0Ifv2g1rj01av0]ih0n.x0 Ul0 n8s2e0en0v0d0@:1Y>4#-#M3 M7 #(p0b5(9 DFXGdG'2WUAŔ;F!$a dF$&#l,&}>Uh ET]]9 M JU@]u?@x?@,ww'?@d?Pn6 >JuM` ?u#t  fIt=W紁NIRkgS|lWxI#>2zGz;?@MJA@eb #zb(b%)2+2&J[/.;?&L ?A$4/U%B!!5 g`?CopyrigTt (c)0W20%090M0]c0os 0f2R1r 0D1a0i 0n.0 AUl_0 8sc2e70e0vu0dW0!$ę-# '&?6iie59 6X7}'0UvBiŇ&!$0 -FAJlJ8>UhAA@ 11!T2 $<cV] &WR~VJ3kq?@BP( ]S m၏jy#mS zjaֱQE^y`0__ lOA/X5:Fh4 XYePYv81sX5aZjoohY}aUU߭?@,bl'4/\K)aX2>_;"rA #jsHD: # h0>Th]]9 MTIAU@sLGv?@_?@ ͂?@SSfhr~?Pps-8R?>$> n JuM{` ?e ;  Su*Jt'  [Rw͖mta{_3.Jv  q?#cwn?~v(A[}Kѭ>JU2N贁N[?@M#&A@bI(bUW)U+U+U&Jy#?!lJ ?%P?AO"d+bW+$u/%B #A1A15 `?Co}p rigTt (cu)x02`09x0uMp0cn0osh0Ifv2g1rj01av0]ih0n.x0 Ul0 n8s2e0en0v0d0@:1Y>4#-#M3 M7 #(p0b5(9 DFXGdG'2WUAŔ;F!$a dF$&#l,&}>Uh ETh]]9 MTIAU@?@*y>{?@ ͂?@SSfhr~?Pps-8R?>$>  uM` ? ; E)u*Jt'  $mCLmta{)4G3.Jv  q?#cwn_?v(A[}Kѭ>JU2N贁N[R?@M#J&A@bI(b*W)U+U+U&Jy#c?!J ?%P?AO"d+bRW+$u/%B#A1A15 `?Cop rigTt (c)x02`09x0Mp0cn0osh0fv2g1rj01av0ih0n.x0 l0 n8s2e0ejn0v0d0:1Y>4#-X#M3 M7#(p0b59 DFXGdG_'2WUA;FJ!$a dFD$&#l,&}>Uh ET]]9 M JU@L&d2?@\.?tP6 >JuM` ?uJt ASt=WJRb~li>t2U?=@MJA@b + &J[N#?0a& ?AT)B!!5 g`?CopyrigTtZ(c)Z20 9ZM c. os f"!r 1ai n.Z Al90 (s=2e0ej vO0d10@!$b-# 'Ya&?6iiek59 6jX7^'0a&Z!|$0 FJlJ UhqMAA ]A !4Ja-'Z@?@  <0贁N#A OR|VJ:6]G+|M__Zw?/#UEF`?0?@ aQS xQ#`Y{GzYk5^ro!o :mX5U'h?gkT:&#dT[RUU4F?@|?BP(flwaw%l5p!Xnc 3dXAYEh4\`Yb/XAdx̼_ZWnQXAYL&tgkRwpQlQE26_HU"]rA HD: # h0>Th]]9 MTIAU@?@?]0?@ ͂?@SSfhr~?Pps-8R?>$hA n JuM{` ?e ;  Su*Jt'  H-mta{AJ_ap3.Jv  q?#cwn?v(A[}Kѭ>JU2N贁N[?@M#J&A@bI(bUW)U+U+U&Jy#?!lJ ?%P?AO"d+bW+$u/%B #A1A15 `?Co}p rigTt (cu)x02`09x0uMp0cn0osh0Ifv2g1rj01av0]ih0n.x0 Ul0 n8s2e0en0v0d0@:1Y>4#-#M3 M7 #(p0b5(9 DFXGdG'2WUAŔ;F!$a dF$&#l,&}>Uh ETh]]9 MTIAU@B?@d;]?@ ͂?@SSfhr~?Pps-8R?>$> n JuM{` ?e ;  Su*Jt'  m@mta{d_v3.Jv  q?#cwn?v(A[}Kѭ>JU2N贁N[?@M#J&A@bI(bUW)U+U+U&*Jy#?!lJ ?%P?AO"d+bW+$u/%B #A1A15 `?Co}p rigTt (cu)x02`09x0uMp0cn0osh0Ifv2g1rj01av0]ih0n.x0 Ul0 n8s2e0en0v0d0@:1Y>4#-#M3 M7 #(p0b5(9 DFXGdG'2WUAŔ;F!$a dF$&#l,&}>Uh ETh]]9 MTIAU@eZԩ?@>6#TTĶ?@ ͂?@SSfhr~?Pps-8R?>$> n JuM{` ?e ;  u*t'  }$~mta{%$苵3.Jv  q?#cwn?v(A[}KI>JU2N贁NI[?@M#J&A@bI(bW)U+U+TU&Jy#?!J ?%P?AO"d+KbW+$u/%B#A1A15 `?Cop rigTt (c)x02`09x0Mp0cn0osh0fv2g1rj01av0ih0n.x0 l0 n8s2e0en0v0d0:1Y>4#b-#M3 M7A#(p0bP59 DFXG~dG'2WUA);F!$a dF$&#l,&}>Uh ETh]]9 MTIAU@h!I?@Ž?@ ͂?@SSfhr~?Pps-8R?>$> n JuM{` ?e ;  Su*Jt'  XƿHmta{_v3.Jv  q?#cwn?v(A[}Kѭ>JU2N贁N[?@M#J&A@bI(bUW)U+U+U&Jy#?!lJ ?%P?AO"d+bW+$u/%B #A1A15 `?Co}p rigTt (cu)x02`09x0uMp0cn0osh0Ifv2g1rj01av0]ih0n.x0 Ul0 n8s2e0en0v0d0@:1Y>4#-#M3 M7 #(p0b5(9 DFXGdG'2WUAŔ;F!$a dF$&#l,&}>Uh ETh]]9 MTIAU@sLGv?@c?@ ͂?@SSfhr~?Pps-8R?>$> n JuM{` ?e ;  Su*Jt'  [Rw͖mta{_] 3.Jv  q?#cwn?~v(A[}Kѭ>JU2N贁N[R?@M#J&A@bI(b*W)U+U+U&ՆJy#a?!J ?%P?AO"d+bRW+$u/%B#A1A15 `?Cop rigTt (c)x02`09x0Mp0cn0osh0fv2g1rj01av0ih0n.x0 l0 n8s2e0ejn0v0d0:1Y>4#-X#M3 M7#(p0g59 DFXGdG_'2WUA;FJ!$a dFD$&#l,&}>Uh ET9 M3JU@L&d2?@\.?ҷP6 m>JuM` ^?5ulbSD_Jt bstE>2zGz;?@MJA@u+b+&K !JS"j& 1"A.b+-,-/*B!!5 `?CopyrwigTt `wc)5020A0950M-0c+0o%s%0f32$1r'0`1ua30i%0n.50_ Al{0 +8Us2eS0e+0v0ds0ib =L!XY'0U Bj&%0 :lJ Uh*M[A[A 9A Z1:'h? Y T:&Qi@J4F?@|BP( IQ!i pP!!U31^ `0,I_[_ {uXEEh4Ẍ TbYb/uX _dx_aZW贁NuXAIL&tKRPQ\%QAI@@ HlQbY 0hauXAo G+|oaZw/(uXAY RU:q Ib_`yA\ppr׌ :muXEOO_\_dH >Z(+s .ṿrAA'FHMza# {!OB 3|~d}@+x!BBGULge PLφ&LYrK߻t*8w yҖ|~{ HO"P9ZΧ6 v XLJ|XƳ ߲Hpmߵ*طZؼ}Ţ} ?xLQ˰]!oWeIL UM'QYAU BN [U h u R.8UEU_l y!񷪆"A#K$X%&'ª()Ū*+$,$-"$./$/ͪ<$0I$1JV$2f!3Up$4}$5U$6U$7U$8$9U$:$;U$<$=U$>$?@ 8 3Y&4Q34CU@4DM4E) 0]1F0j1G0w1H01"01Jb'01201301)401N01QO01P 0J1Q01R)60AS?0AT A!<0-AVFb10cL10M10DUFD  h(^TYYBBUFjZ?F~??x<F BP(?P } kX` BW66 W TTT%TTȅ5H?? (?B@L&d2?-(\.g1(.sUW!AU&w/~  0QB`#serv",comput$dWis r0b$ud n tw 'rk!eA^| (SG|& b?,>?TTwpp pqqppqwqpqwqwq+p pppwwwznw6;5wqpwwwhghw>Drag onut hepe.Rih-c]lckaUdcos UPo etQeAvUwreP 5aabjZֿ~??cN׿ mF1??SbM1{V↑T*J?@ ?UG DF P# h @T(PYY# 3U@jZ?@~??F?Pn} u{` ?Su#΍>>HX HlHYHH E.Oč>UDDXXUllBa'9-",'tU"J%q4 Q?"'4b0{[`:#26Q79$"t3&9"OL1" ^!'܈ >7@ÿ@q@@?@8B?@!ABP5F02u@K`-1xBu@rCC53A 3Bo2гC9uv`u `"[ۢ b!-u`23kQQ1Q7B`?CopyrigPtf0(c])f020P9f0uMPcPosPIfRQrPQaPiPn f0AUlP XsRePePvPd/`VPs_SBNcPm!#5JP83`5a Q1#d:4%B @8=3R6#d\6P\6 X2$#3+0Ub\3]Jxge8^ E U#".5m Hvq%t{.5tpw3t{Et{t{^y {|tYwmkdaIB'BT1aa"0ք.2132B3z1z1"|+q+qEQQX116Q;r<A>Ab\1EF`rAJ͔!hh)1baA (dT6vQ1oUkQ]b` MPnuPaPtUQra1Q` E50uPp -`ePt=e.5Q`!3]t1LPRdU\ %`u-`bcu30D-!I PPr֯H^-]L˭DRQqU`r\BAPaPrB`LDž5^,B%I SbiPQXj feTϟB LLPcXςPC5%Bl `iPgCų@o߰DLRPo}߄|AqZ@@K ӱpKDQ`;BNQwRkax`2`녨cϣ[I$`Qd `SsPo8SUbPRPsU}AxBuLDmatVa IaXAfBLT R `QQVhP+IQUm-`u-tP !`4줒ӿU챒VBALC`<1B` W2: /XؑLMPmRAy;/M/˒pq//$OPaSDy1!//%ؒ7VI]BPlRgRTP訰y??SPa2CP򿃪X??B` 7T+01I@"`a^%{/MPCAD vOOHKdQ>C2Z>6?H?qgtԁ/tufvop#U@jZ?F~?vvIC-pˑup` s?bujp`&T0a,Stԁ/p]e-2q?@< o5pmH!q."Tԁ.dԂxAƒ@T@>$pÿ^$gqa OvcrSr`Rno>㳫1e&BIrSrOvcv=ss"u~$bb pmp sty:b{bráe@6Cu&P`?hi@k""oűRT h]]9  5AU@jZ?F͂T??Fx?SF?P6 jt`  o?tk[>(A Vu aSuo.]&A]  *ԮJU2zGzt?@L#&A@bA#z7 5#1bO(b])j+$j&[ "*(w "?& ?AU$l/%B115 `?CopyrigTt (cu)Q02`09Q0uMI0cG0osA0IfO2@1rC0|1aO0iA0n.Q0 WAl0 G8s2Ueo0eG0v0d0"1E]4MŤ-&3 &7&l>]Uhz811U!J!u6?F+\.>djIIa? >"(EǑ I%K9E 5EU . K ;;5QIWEEEF _KnSA?8@@Bu?FDR> ?Nk"\6`bmMM\j=am {:e P> #? D"? 5-g?j4B7!B{oogr57!ibDAbE9 MuX7_'0U~rN!$a)5 5vIzHD # =hj0>T h]]9  qAU@;?FI5K?@xEL?6ѿP6 t`  bm&Qo?t@@L]WI,#u aiuoL> JU2zGzt;?@L?&A@b}#zs q#1b(b)+$&[9#H",ɺ (wH"?& ?A$ /%9#BV1V15 `?CopyrigTt (cu)02`090uM0c0os}0If2|1r01a0i}0n.0 WAl0 8s2Ue0e0v0d0"O1E]S4MŤ-9#b3 b7&l>]Uhzt1.A!JN!m.%-+0djV1ax߃S(FJ-&?F mO"> ?Nk"L[Majnha_:e P> #? BK=o? n-?j4Bs!B{__Wr5s!D8E6V 6?wj_|_ ^XE.U@z T6?F @' d_mOoT \VyWxRkrVo_bVh*g:n"'UaEjz36>CN>?Bn}]zGvN7V iDAbE9 MX+G'0UJPN!4ae5 MaHD # =hj0>T h]]9  U@jpiH?F^?@$?F]F?P6 t`  !a&ao?t,F=۳=u;u auo>J2zGzwt?@ʐLAw@b#z1b](b!).+I.&[-m(?0& ?A$I0/Q%!!5 `?CopyrigTt (c)02`090M 0c 0os0f21r0@1a0i0n.0 Al[0 8s_2e30e 0vq0dS0T"!E]$M-# %'&l>]Uhz!1!JF% djc~YZ(_N9zOMGE%_EH$SwO"K ңHE ^&_8_WE_OqOHOOi&DAbPE9 MX7'0UUfb>!$a % f1jHD # Uh4>T#]]9 M5AU@jZ?FL|+?@?F({~?P6 JuM` ?u JYt )t7zEA2&L_D?. > >tJU2q K?=@MJ&A@-b9(+bG)T+T&J[#"#a?& ?AH?/`)#115 {`?CopyrigTt (c);020G09;0M30c10os+0f92*1r-0f1ai+0n.;0 Al0 18s2eY0e10v0dy0z"!4B-#3K 7&)? b 0K8>U# ]A]A "1!ъ!PzQ <%OYAJEU . K;;\%W%EF($L[.FX5Eai%?F 6?wL;] W-\TB⤲8?F@&SA?F @ ?Nk"L'`bmMW_Ye[ٯ7 VK=F: [jr ? D"? XZvD?ta@Boogr5;!QEx]SiLO MLGXa~WOTZiAle59 XG"'0Uc>!$05 .HD # =hj0>T h]]9  IAU@K?FOgVb?FY%?Fwᳬ?PJW?>t`  ^Z?t#ə.E @x?r?/{4܂ G _Z+u u$ >  J JpU?43f<!.&P?b3bbfz@1bAe&bq$d 2zGzt.#@LT"&r(~&i$q)(+}'i"~*#81815 `?Co yrigTt (c)o02`09o0Mg0ce0os_0fm2^1r 1am0i_0n.o0 Al0 e8s2e0ee0v0d0"11E]54M-#D3 %D7.&lUh#7 V1#Ai!J# F/?B1B  VRiAAbE9 MX G{W'0U*R2N!I$a {VZUHPD  # #>h0JTpERI MU@u6?Fjc{g&?@%r`5B?Fm_F:Is?HNtQ`  II?t#]  _ii$!`kNu  iu%wj>M JQ T       "@&&JNU2"?H@Qs#N&A@b(bU)++&RNp}#>H1#19 ?#bbbz| R&/5R}#11`?Co0yrig\t (c)02h090M0c0os0f21r0Aa0i0n.0 Al!@ 8s%Be0e0v7@d@21a4t#-}#3 %746lJ#$JUp5 3 M19 1!q(!FnElrkRt# 8$kcD?@숒@)?FP?P"EkU |1\iOT/irvs}i>`_,of :m ,M ?? S&d2? *b?r4B!B oo1gr #5!>%(EQOOOO__FAc1_C_ D4ևS]_o_yRX+{_,__ #R__ (YQoSo+o=osd {oEeooooooFyUy{* hDVW(I X"c7I!pg bf'iMjSE t%XyG|'0UŢŞN!O4i |UGD # hz0TdYYBU@(%)?Fxg^?@0߯H?Fs7Uݹ?P} t` 87?t#_)TeΦu ]ukv  ̝L&UUU&I&3 U!!`?CopyrigPt _(c) 2\W09 M c os f"!r 1a i n}. Al00U (s42e0e uvF0d(0'HS$#-## 'I?k=#ǒ 2j$B##0U'BM3@&@F0Yk} #HA%DKb5DK3DK5DKED$!GiAZG5(|IDFX7yW'&Dŭ6!4YA yVZHD  # =hj4>T!]]>#AU@0߯H?Fz)e??F˞9q?P6 t`z  ?5t# rXѮu)Nu;):O]ע0 &> # " "+"+"+"+" $+"T-J[U7 ??& bA@ 2b4-#2zGzt#+@ Li68!6 ?}; 72U!:B#AA5 5`?CopyrigTt(c)20F@9M2@c.0@os*@f8B)Ar,@eAa8@i*@n. Al@ 0HsBeX@ej0@v@dx@)21"DM-,;?C G& l>]UhzAE!A#!ְ1J`#@{&pz?FUy{?NT,dKj&b&R&M/8T9K-q?F@F?FdG0 >Nk"?YE*]\LThKjfGD&Qhj ?iU3UGe&Y`?@/Jb)h5oWKoWo;èKod}l$ro`nE2K?Fˌ%4G{?F -q? eZR^{|\?6?vodmP}l>MBe 9 N 1iS? 1?grl#51:PO"4ga@Sq(UE^@O|ǐ[`ߡa v2rbA @3ijD&}laqOz o?#WHZ'Qu=*[^N?F]%P?@O&@ c?FаM<n ?` eb0)\(Pfodm}nv}l?V5(?q$Ix[cN#W{ #iA@&QeE9 MX''0UjdN!0 /HD  # =hj0T h]]9 #]AU@}%?FR$,?@6#V?Fc_3֡w?P6 u` ?u#t  3zM%t3ws%.<%H._'B> #PJU2N贁Nw[?@ʐL+&A@wbu](i+bk)%+&p%#4">9"<5!?& ?M#bbb͓z$ p&"/%T%#L1L15 w`?Co yrigTt (c)02`090M{0cy0oKss0f2r1r 1a0is0n.0 Al0 y8s2e0ey0v0d0"E1E(]I4M-%#X3 X7&l*>(UhE j1#$Ac!J`%#@5e$_F??!foA3nBM( V?FJ-R?@HQ??FS!R?P| llHL6VLD5E+v;*AůʙH: 8snP%$? k?Q? \?4_!__WrA5_! R,U3OC@IL?F4~Kz?P)Oy޾ T!]]>#IAU@܋?F2Oa>?@#r`5B?F*ݷ?P6 t`  >B?\oEt# ŖoZu 0Nu;Y:|8>$# JpU>##<?2& ?5bbbz@b#ANi&bu$'h 2N贁N[2$@ LX"D&v(&m$ :;T'm"*B#d1d15 5`?Co yrigTt (c)020090M0c0os0f21r 1a0i0n.0 Al0 8s2e0e0v0d0"]1"a4M-/p3 p72&l>0>Uhh= 1#!eJ`#&FNTM\jh1B,RBM(!JoUy{O_ e@ So4(D@^Tw?F]֝ä ?Nk"?w_KT\T+\_(Kjӷjmplhj -`\TPQE\Uui,St_o u~!ՠXEO9= T?FL?;uQ_Pm"rA @3sjQ1lğ=GoYniAleE9 MX|'w'0Ur^N!M$|A vzHD  # =hj4>T!]]>#IAU@܋?Fj?@2r`5B?F^ݷ?P6 t`  >B?\oEt# 6ΠZu 0Nu;Y~:#&}>0m8>  PJU2N贁Nwk?@ eL&A( 5bM(b[)h+h+$h&#[-M$#?& ?b$Z*'*B#O1O15 5`?CopyrigTt (c)02U0090M~0c|0osv0f2u1rx01a0iv0n}.0 Al0U |8s2e0e|05v0d0"H1P"L4M-/[3 [7&l>0>Uhh= m1#!S!J`#*&FPUy{_?NTMjC1gc RVMʣ(D#@ ![?F"$. ?Nk"W?IKTLTcKjRlhj p=' (*!)i,SO2Kes9T;QEGUFw6qX}__TM7fTZT\Dž}_P^52XOES"rA 0S#Es3iAleE9 MX|$Gxw'0UrIN!4gA xvzHD  # =hj4>T!]]>#!AU@܋?Fc_v?@#r`5B?F*ݷ?P6 t`  >B?\oEt# ap'EZu 0Nu;Y:|$># BJpU>#͙;? & ?5bbbz@}bAA& K&@ 2zGzt?"K@ L0"&N(bM)+TK/8Z*B<1<15 5`?CoyrigTt (c)s02009s0Mk0ci0osc0fq2b1r1aq0ic0n.s0 Al0 i8s2e0ei0v0d0b"5194M-t/H3 H7T &l>(>UhE=3Z1#E!J!FENTMjN@1h(NoUy{OO e@ o)4(D@^Tw?F]֝ä ?Nk"?'_KD3\T+\(Kj?ӷjmlhj P\ DiAlePsI=MXsG'0UzbZ6N!%$0 sFEjHD  # =hj8>T! ]]>#!AU@܋?F*?@#r`5BW?Fw?P6 t`  >B? \Et#տ|u l4RQu?]>I$>I# JpU>͙#<?& ?9bbbz@b#AE&bQ$$D 2zG[zt#@ L4"&R(^&I$Q)+]'I"^*Bh{.M#To1o15 9`?CoyrigTtk(c)k2009kM0c0os0f21r1a0i0n.k Al0 8s2e0e0v@d0f"h1l4M-:?{3 %{7&l>(>UhE= `51#I!J49fAn^ R)X f"B!Jui,SG?RSlOS u~!N]8-19= T?FL;uQ ?Nk"?YVTQRXQOnğ/=i n (\ wTiAaePE9 MXLg'0UbZiN!)$0 Lf`jUHLuD" # I>h(J TEI 3qMU@jZ?F({~+?P >uA` ?u#VMJ*  )8*A )LG#P`G.$*G8BV6j>t   t|zpb"#"J>U2zGz?@9 +#>;&A@b]m(b{)+Y&^" !l$jN5#",#C"&p "Ms"b{);x'^",^"V/5#113#`?CopyrigXwt dc)0W20090M0]c0os0f2R1r01a0i0n.0 AUl@ 8sBe0e0v@d0R#"lJ8JUlC -(#s!)(J!O?zQ0 # # %YA NEU . K ;;LGE3EF)$OK.FHE\Vai%?F 6?wt\[ W-L/D5Dꤲ8?F@@SA?F @ ?Nk"?wY2`bmM3_Yw[7 K=ދF  #j1H|AIx]Sit\1IMLHDEnPOoDJ1AAeCBp OOG|u?F9 mO"t\7=L驣/XQ eel.%-+?@llփ?FU~> @oVoɽ7y2VzdOfшlA=$o`"jQ3 ex}@4SSa[?o4ui'e!Oa#Əa$jiMiyI,%!Xtz'0U &`50HF vؚ_`H>;s `Dt![x_H=ߚF(M#c&OB 6~d@+8 [sG^#ff !?#%|((W@֛+x-36i,k,x 7  NXUFD  h(^TYYB UFjZ?F~??x<F BPG(?P } X ]B66]  TTUTTTB9T9ȅHBU?? ?B@L&d2?6-G(\.Y(7.sU!}&/)Ѥ&)  0cB`,dat 0b 0se,2rv2,0ompu 0#3di0t0i013d0n0tw*0rk!e ^| (S G& iz ?!3E?? ?  wpp  pqw~ qwqwwpqwqqpppw'wwwqqbwqpwww7{{w^Drag onut hepe.Rih-c]lckaUdcos UPo etQeAvUwreP 5aabjZֿ~??cN׿ mF1??SbM1{V↑T*J?@ ?UG DF P# h @T(PYY# [U@jZ?@~?tP} u` ?u#2>D>HX DHlHYHH (E9.EUO؍:DUDXXlUlH"' &T",'U)2&5qq/4 ?""'4b0{[`:#26%Q_9 $7&9"L1" 2^!'>~_@ÿ@qh@o?@`B?gD]F0#Bu)@`-1֠Bu@Ur#CC5CAB""Ch9u`u `"[ b!u`2ւ/3Qy1y7B`?CopyrigPt0(c)020P90MPcPosPfRQrPQaPiPn 0Al` XsbePejPv&`d/`VPs__SBNcPm!#5P8[`CQ2y1&3db4&5=Bh8=/3z6&3d66 Ԁ2&4#/30U!r3]ge<^1E U#&5m Hq&5twV5t{3t{FEt{t{y {et !{|tYwmAdaуIOBT.1:a:a&20#V21032=FB3J112I5db6q 7~C|hhE) *^Q_`!CQCQA11B=B;8ARrSqe a> !b1AE4FAނ.1#8dT7`XQ`]rw` MPnuP%aPt%ar6aYQ_` E]0uPpU`e`tee^V5Qn]1PRRd M`uU`b¯Ԧ30Z! PPr#5GCHhz]3 D&bQe^\޿𿏫AP)aB`Ҩ75^gyϏ-% S*biPaϷZeTϏ-LPcX&ߠϫk5N`rBDl 2`i`g̨)@`ߑRPoʂѩ|(ZP\@ pKD`ĖUSPaPEePlP6?ЪX9` TP7`?>` /aZPa_qSbI`zy`DRa0"ZʒT=NwRkqa<.2`s @ҨגcOaI` ad2`%c& d nĝAs Nx+P=鑙mrDatV/a I͢@+aNfP󨑙 R `Qa// '=/KM`C/P⯺/+QUmU`utP I`/ %N4$?6?s 2v? 2Nv??;Hd2QC_3` Wz%?NV%O7OCt aOO#ޅO`M&`mR{OOӧY"Np_1_E;OUe1ySQ]_o_M qt!Tw*foSU@jZ?@?/v/vUpSp2u8p` ?bup` B=ݝ`M,{t!Wp5'm-ׁ<3ohm!Qb8q׏XݥBT!.ct!xAZg>pÿ=$ bUfq vU֒rrrvvvrsY"`ERon_10 бeCZ~bb W}Wbr@":tbWuP`TfnMq1o$6zK}_obK1K2\#b^ºaaaaFҴ_AaY6P`u$`bTs`ݡs`ap䑹x^tݥ5f_b&bׄЂF3s͵N?SYDs\rV@(#e"88G^80G8ɐIFZ,@rb4 2V}uauaxuYV{nx` % P&S.1ua`NEW_ORKd2H IPհs!R۰RЕIհSÑQȑTi|o&ohXAG'0U?8!0oHD # =hj0>T h]]9  5AU@jZ?@͂T??@x?SF?P6 jt`  o?tk[>(A Vu aSuo.]&A]  *ԮJU2zGzt?@L#&A@bA#z7 5#1bO(b])j+$j&[ "*(w "?& ?AU$l/%B115 `?CopyrigTt (cu)Q02`09Q0uMI0cG0osA0IfO2@1rC0|1aO0iA0n.Q0 WAl0 G8s2Ueo0eG0v0d0"1E]4MŤ-&3 &7&l>]Uhz811U!J!u6?@+\.>djIIa? >"(EǑ I%K9E 5EU . K ;;5QIWEEE@ _KnSA?8@VBu?@DR> ?Nk"\6`ObmMM\j=am {:e 뇿P> #? D"? 5-gg?j47!B{oogrK57!ibDAbE9 MX7'0U~rN!R$a)5 5vIzHD # =hj0>T h]]9  qAU@;?@I5K?@xEL?6ѿP6 t`  bm&Qo?t@@L]WI,#u aiuoL> JU2zGzt;?@L?&A@b}#zs q#1b(b)+$&[9#H",ɺ (wH"?& ?A$ /%9#BV1V15 `?CopyrigTt (cu)02`090uM0c0os}0If2|1r01a0i}0n.0 WAl0 8s2Ue0e0v0d0"O1E]S4MŤ-9#b3 b7&l>]Uhzt1.A!JN!m.%-+0djV1ax߃S(@J-&?@ mO"> ?Nk"L[Majnha_:e P> #? BK=o? n-?j4Bs!B{__Wr5s!D8E6V 6?wj_|_ ^XE.U6Vz T6 @ d_mOoT ݬ\VߦyWx?RkrVo_bVh*g:n"'UaEjz}36>CN>?Bn]z/GvN7ViDAAbE9 MX+G'W0UPN!4)ae5 MaHD # =hj0>T h]]9  U@jpiH?@^?@$?@]F?P6 t`  !a&ao?t,F=۳=u;u auo>J2zGzwt?@ʐLAw@b#z1b](b!).+I.&[-m(?0& ?A$I0/Q%!!5 `?CopyrigTt (c)02`090M 0c 0os0f21r0@1a0i0n.0 Al[0 8s_2e30e 0vq0dS0T"!E]$M-# %'&l>]Uhz!1!J@% djc~YZ(_N9zOMGE%_EH$SwO"K ңHE ^&_8_WE_OqOHOOi&DAbPE9 MX7'0UUfb>!$a % f1jHD # Uh4>T#]]9 M5AU@jZ?@L|+?@?@({~?P6 JuM` ?u JYt )t7zEA2&L_D?. > >tJU2q K?=@MJ&A@-b9(+bG)T+T&J[#"#a?& ?AH?/`)#115 {`?CopyrigTt (c);020G09;0M30c10os+0f92*1r-0f1ai+0n.;0 Al0 18s2eY0e10v0dy0z"!4B#e-?3 7)& b 0K8>U# ]A]A "1!ъ!PzQ <%OYAJEU . K;;\%W%E@($L[.FX5Eai%?@ 6?wL;] W-\TB⤲8?W&SA?@ @ ?Nk"L'`bmMW_Ye[7 VK=F: [jr ? D"? X?ZvD?a@Boogr5;!QEؿx]SiLO MLXa~WOTZiAle59 XG"'0Ujc&N!$05 .HD # =hj0>T h]]9  IAU@K?@OgVb?@Y%?@wᳬ?PJW?>t`  ^Z?t#ə.E @x?r?/{4܂ G _Z+u u$ >  J JpU?43f<!.&P?b3bbfz@1bAe&bq$d 2zGzt.#@LT"&r(~&i$q)(+}'i"~*#81815 `?Co yrigTt (c)o02`09o0Mg0ce0os_0fm2^1r 1am0i_0n.o0 Al0 e8s2e0ee0v0d0"11E]54M-#D3 %D7.&lUh#7 V1#Ai!J# F/?B1B  VRiAAbE9 MX G{W'0U*R2N!I$a {VZUHPD  # #>h0JTpERI MU@u6?@jc{g&?@%r`5B?@m_F:Is?HNtQ`  II?t#]  _ii$!`kNu  iu%wj>M JQ T       "@&&JNU2"?H@Qs#N&A@b(bU)++&RNp}#>H1#19 ?#bbbz| R&/5R}#11`?Co0yrig\t (c)02h090M0c0os0f21r0Aa0i0n.0 Al!@ 8s%Be0e0v7@d@21a4t#-}#3 %746lJ#$JUp5 3 M19 1!q(!@nElrkRt# 8$kcD?@숒@)?@P?P"EkU |1\iOT/irvs}i>`_,of :m ,M ?? S&d2? *b?r4B!B oo1gr #5!>%(EQOOOO__@Ac1_C_ D4ևS]_o_yRX+{_,__ #R__ (YQoSo+o=osd {oEeoooooo@yUy{* hDV!VV(I X"c7I!pg bf'iMjSE t%XyG|'0UŢŞN!O4i |UGD # hz0TdYYBU@(%)?@xg^?@0߯H?@s7Uݹ?P} t` 87?t#_)TeΦu ]ukv  ̝L&UUU&I&3 U!!`?CopyrigPt _(c) 2\W09 M c os f"!r 1a i n}. Al00U (s42e0e uvF0d(0'HS$#-## 'I?k=#ǒ 2j$B##0U'BM3@&@F0Yk} #HA%DKb5DK3DK5DKED$!GiAZG5(|IDFX7yW'&Dŭ6!4YA yVZHD  # =hj4>T!]]>#AU@0߯H?@z)e??@˞9q?P6 t`z  ?5t# rXѮu)Nu;):O]ע0 &> # " "+"+"+"+" $+"T-J[U7 ??& bA@ 2b4-#2zGzt#+@ Li68!6 ?}; 72U!:B#AA5 5`?CopyrigTt(c)20F@9M2@c.0@os*@f8B)Ar,@eAa8@i*@n. Al@ 0HsBeX@ej0@v@dx@)21"DM-,;?C G& l>]UhzAE!A#!ְ1J`#@{&pz?@Uy{?NT,dKj&b&R&M/8T9K-q?x6@F?@dG0 >Nk"?YE*]\LT_hKjfGD&Qhj iU3UGe&Y_`?@/b%)h5oWKoWo;KTod}l$roҥ`nE2K?@ˌ%4G{?@ -q eZ̿R^{|\6?vodmP}l>MBe 9 N 1iS? 1?g]rl#51:O("4ga@Sq(UE^@O|ǐ[`Ρa g v2rA @3ijD&}l迲aqOz o?#WHZ'Qu=*[^N?@]%P?@O&@ c?@аM<n ?` eb0)\(Pfodm}nv}l?V5(?q$Ix[cN#W{#iA&QeE9 MX|''0UdN!0 /HD  # =hj0T h]]9 #]AU@}%?@R$,?@6#V?@c_3֡w?P6 u` ?u#t  3zM%t3ws%.<%H._'B> #PJU2N贁Nw[?@ʐL+&A@wbu](i+bk)%+&p%#4">9"<5!?& ?M#bbb͓z$ p&"/%T%#L1L15 w`?Co yrigTt (c)02`090M{0cy0oKss0f2r1r 1a0is0n.0 Al0 y8s2e0ey0v0d0"E1E(]I4M-%#X3 X7&l*>(UhE j1#$Ac!J`%#@5e$_F??!foA3nBM( V?@J-RHQ??@S!R?P| llHL6VLD5E+v;*AʙH: 8snP%$? k?Q? \?f4_!_t_Wr5_! R,U3OC@IL?@4~Kz?P)Oy޾ T!]]>#IAU@܋?@2Oa>?@#r`5B?@*ݷ?P6 t`  >B?\oEt# ŖoZu 0Nu;Y:|8>$# JpU>##<?2& ?5bbbz@b#ANi&bu$'h 2N贁N[2$@ LX"D&v(&m$ :;T'm"*B#d1d15 5`?Co yrigTt (c)020090M0c0os0f21r 1a0i0n.0 Al0 8s2e0e0v0d0"]1"a4M-/p3 p72&l>0>Uhh= 1#!eJ`#&@NTM\jh1B,RBM(!JoUy{O_ e@ So4(DF^Tw?@]֝ä ?Nk"?w_KT\T+\(Kj?ӷjmlhj -`\TPQE\Uu?i,St_o u~!XEO9= T?@L;uQ_Pm"rA @3sjQ1lğ= GoYniAlePE9 MX'w'0UrJ^N!M$|A vzHD  # =hj4>T!]]>#IAU@܋?@j?@2r`5B?@^ݷ?P6 t`  >B?\oEt# 6ΠZu 0Nu;Y~:#&}>0m8>  PJU2N贁Nwk?@ eL&A( 5bM(b[)h+h+$h&#[-M$#?& ?b$Z*'*B#O1O15 5`?CopyrigTt (c)02U0090M~0c|0osv0f2u1rx01a0iv0n}.0 Al0U |8s2e0e|05v0d0"H1P"L4M-/[3 [7&l>0>Uhh= m1#!S!J`#*&@PUy{_?NTMjC1gc RVMʣ(D#F ![?@"$. ?Nk"?+IKTLTcKjRlhj p= (*!)i,SO2Kes9T;QEGUFw6qX}__TM7fTZT\Dž}I_P^52O,ES"rA 0S# Es3iAlePE9 MX$Gxw'0UrJIN!4gA xvzHD  # =hj4>T!]]>#!AU@܋?@c_v?@#r`5B?@*ݷ?P6 t`  >B?\oEt# ap'EZu 0Nu;Y:|$># BJpU>#͙;? & ?5bbbz@}bAA& K&@ 2zGzt?"K@ L0"&N(bM)+TK/8Z*B<1<15 5`?CoyrigTt (c)s02009s0Mk0ci0osc0fq2b1r1aq0ic0n.s0 Al0 i8s2e0ei0v0d0b"5194M-t/H3 H7T &l>(>UhE=3Z1#E!J!@ENTMjN@1h(NoUy{OO e@ o)4(DF^Tw?@]֝ ?Nk_"?'_KD3\T+\(Kjjmlhj P\DiAlesI(=MXsG_'0Uzb6N-!%$0 sFEjHD  # =hj8>T! ]]>#!AU@܋?@*?@#r`5BW?@w?P6 t`  >B? \Et#տ|u l4RQu?]>I$>I# JpU>͙#<?& ?9bbbz@b#AE&bQ$$D 2zG[zt#@ L4"&R(^&I$Q)+]'I"^*Bh{.M#To1o15 9`?CoyrigTtk(c)k2009kM0c0os0f21r1a0i0n.k Al0 8s2e0e0v@d0f"h1l4M-:?{3 %{7&l>(>UhE= `51#I!J49fAn^ R)X f"B!Jui,SG?RSlOS u~!N]8-19= T?@L;uQ ?Nk"?YVTQRXQOnğ/=i n (\ wTiAaePE9 MXLg'0UbZiN!)$0 Lf`jUGD # hz8T YYT#BqU@5?@`0 ?@ t2?@?P} ։u`h?u# Lv3<@PT=d,2<<Pddt m۶YtjfUh!NUdI,@=#B!!u+`?CopyrigP}tg(c)gW20 9gM ]c os f"R!r 1a i n.g AUl0 (s#2e e v50d0+"!$4#bB-m)|# '?h==$&$#66 2j4$#=#0UB%3#3&50$^ #Q9 =#HAQ5D<KE%DdGg*Fj#aOb'*TATA4"1(3&Sa4% `2HAEBOiSHdiSi^PQ5 3FXm'.W'DŜ6-!40 VZUHLuD" # I>h(J TEI 3MU@jZ?@({~+?P >uA` ?u#jJ* )8* 9LGA )`G#dtG.$*G8BVj~>t  e t>"z "p{b+#7"J>U2zGwz?@9 S#>c&A@b(b)+,&" !$5N]#2T#k"6 "M"b)HF;'","/]#11[#`?CopyrigXt dc)020090M0c.0os0f21r0Aa0i0n.0 Al+@ 8s/Be@ej0vA@d#@K"ulJ Ul,aAaAC 5LU(#!Q(r!wW?@s80&9<01# 0IK A# ѥ,Og NEx]SiL5[ML&X39^PzQQ_c_ ?%YAQ)UEEU . K ;;\W)U I@)$_[.F&XA:jai%?@ 6?wL^ W-\/&T5Dꤲ8?Eg@SA?@ @ ?Nk"?I5`bmMohiy[7 K=F  %?j1&XIOD?@u@rLu7j T_ʹhLi,KV@8$Ȯ?@1x}b} yy_@){yEZJ]%8O@G \t;8j|c1}p?i @'WE"NO_T ZYAAekBp OOLJ}u?@9 mO"9=\/hQeel.%-+@mlփ?@U> "8?ɽ7yf\tOfj|A=$䚏Bp$za3ex@4Sa!6Wy'eU!a)#kq&ziM2i?IT%!Xh<'0Ub6502 ?}HD # =hj0>T h]]9 !AU@+3]q?@8i?@.!'?@\S۳r?P6 t`  C=?t#9sxZlRJ)u duo$>  J JU?'N<?& ?AbA@bNbbz87 5#,"b4$,"'b4$2zG7z?@L&5/G/Y 3%,"j&d,,"Rj*/1/15 `?CopyrigTt (c)f02`09f0M^0c\0osV0fd2U1rX01ad0iV0n.f0 Al0 \8s2e0e\0v0d0r"(1E],4Mb-;3 ;7&loUh#7 M1D hp  o%J F( dj!1B2 bdV.RiAbE9 MX|GW'0UR)N!!$a VZHD: # =h4>TB#]]9 TIAU@1q?@!9?@aww'?@տԌU?P6 t`  9F׿=?t ڰ#slgW ª%?)Zu Lhu!s8hAh hJU'N!?2& ?AbA@bNbbzc a#X"b`$X"b`$#2N贁Nk? "@L&a/s/ _%X"&,X"*T#[1[15 `?CopyrigTt (c)020090M0c.0os0f21r01a0i0n.0 Al0 8s2e0ej0v0d0"T1X4M!EQ#-0Og3 g72&l*>0>U#3 A y1vXD #p %J`#@.T'oo>hn56>fcRM(! Zך4_F_ b/OSϏHT >ɪ#f:?@*S)Y=> yy_@YjeeRT;Gq!Qh~n~OkzEMsZJPl -iU@SQEU"pL._qw;m;aU_ BnhX#rA 0X"IsTj߿kHhl!Qdok!(Npd 0Ale E9 MXXGB'W0U*}N!M$)Aj5 vzHD: # =h4>TB#]]9 BIAU@b?@;h?@ ?@F_a4w?P6 t`  /?t {/_  g?xRJu hu!s8&> JtU2]q ?=@ʐL&A@5b]M(b[)h+Ih&[#$"#?& ?AS/t)T#115 `?CopyrigTt (c)O020[09O0MG0c.E0os?0fM2>1rA0z1a i?0n.O0 Al0 E8s2em0ejE0v0d0"@14M#B-,?$3 $7&l>0>U#3 CE761$!; #p _%J*!ԌSU hneNw3󛍩(D#2q>@t|M?@a^!v:1 yy_@I56>[ŀQhnO:@K0hd7l aV@S8E8Vؚ㱫L{_pZ5X50U-0f:?@D?cHe-1>[W9k]geϺQ Y" KaĘ\Bqj]GRׂP_FR}LY5E:/U=uOqsR Y2JZ8 b G0Ale59 MXGB'0Ur:N!R$XA'5 vz_Hp>,ÿ;s ZH(GBFXM/# EOBH8~druo@+CX"o[sGx o$$a(xZ,X8/!!-3#!6F֛8'!N:)!<5?,!B/!F!I!M6H!PN!TVT9! \(!^K!?bUFD  h$^T YYBBUF\.?x<F BP(?43^P? Vs V6gH?R\d ?8EB G 09gB`.Cloud,netwrskcaUins t,pa !i sa ! %e^ M%J 1Gg YR 1H?_^p Qwwx0w x3wxwx0wxwww xQww ww0wp wwwwwwwz pww/ p7^LDrag thuesapWonodwipe.b~߿~?? XAP7d??O\.ߗr3r\.CUG DF P# h @T(PYYUEUF\.?~?F~׻?P} <Y ^.B" N'O'*Q'R'S'T' *^ Y&Y u` ? "s'_":s"Ds"Ns"Xs"bs"ls"vs"s"s"s"s"'&&:&D"uN)*2<7)VU2^L[b1j2 ^`s" H3z3'N‚??lF ?ma[Z<` Ac@entColn@r3z[_b[[bNbbzI1dq9E 8AЇEO 24bYP{[`:BeR&nVB#32Q?]q ؂3Q8A*W1`Vis_MRXY%cPm!P5`12`݉`?Ap]y@igPt?Pu(@)?P20L`U9?PMPc@o`'of>bAr2`AUa@i@n% ?PA@l?P7gsbe*`e@v@d%Bd455uO#4[%B+@q~9p?@IBp7u@usPRD"J-D"t=2u0`B-1D"֝ruL &@BGm3ry=_"n4O N)9u`u `"AA b1O$u`B, ^aepcT<@o?Pm@sĴ@a>5SRTJAn@e@?aT]N@twBk?PD@vVbe)EяFE*UM_q|U۟e"4Fpd(aC l &&2::)BDD BRXX|bb2llbvv TNA5 rT&_Qj~}8pK!bdzӲ=`pM0 : D N X b l v%‡'_"D_"N_"X_"b_"l_"v%™2:2D2N2X2b2Hl2v2BS&2NC󨕃FkZV?F'IOғp~կˍW 2r033(FE9<QߐlzBO$ߪ߼߮(U9|>}>MR,AZl~(|9x uSV!+=  (UǎoC  (ָe.rMi>3LÔ  eD2(dv  Q!Uh!%5QU}4< ?VV$`fBh}Ȳ7O!<?F/*Ȳ/2sU~1p%)E+D>L?/Q<,6FE%mEB?T?f411%U\+:-yO?H1BdhPUifb}WSȊOt6XQ4X @ pp` k*`n`RPn`dxQdЯd79@#AUUiB+T.(T;)dH*d db$"Tt ,@AaiE)khp:j%56_:9c&ЩD3s͵NSY2a)k8la)k(8l|)k 8hm8_ZI۩Qy`Uee8^6JVAAD @^R` WShpCꥁs!`L ctsPzTq`ss wTyr"^RS` pu EqC#5#s SzbCRsqozA`BiEOO\_xX#Nsqe'2q[[!c0_ZˑU#|7t:}#/'_x # fHavU# 1H !eU iEQ % UH duD " # J U> hzi@T(iiJ\ UF~?F~?tP @Yu`?(.8uBbSA{szat? Cteb"0#$YE['?a&?.tb'|z酱2Q?AG}$&}qam{c8`Con0ect0rz 1l2z{{+` Li2?15-tzYaU#F131 ?WPa0t0r0)4;1Z7}`T5M151?UR0u0dB2g6Rz8%"Q-!c_C~#J *$&#FH-B;"2MC &BS{%ZMCQ-!dgG 1`?1py0i@h0 c)20RP9MB0c0os0fDDR1r8P11i2]. AL0lJ=WsRedP1v0dP]@>ֵDCz\MC2qP?_f[!?\.? ~:o -D~% IKMC&&UgyQh @ 1_`S0aP2BiEfoxoyXMGw'0cia&!|$0 vjlAUEu P-QyB:MCz?bkp Tbb-.(},8>8dup*ar%@68K]ou ÏՏ ?|>R9=PbtuYȟڟ x<w(LA7BUgyu]ͯ߯"}>%7GZl~uAҿ '%OHL_qσϧϠue kZV?F'IOғ %NMQdvߚ߬ߠui+.Q7Vi{Q"QucwHD: # h8>T ]]9 M UF~?F~?JP6 7@ZAAM ,'@' T'h'|']"' ' "  " '""'"'0"`''JuM`l? 'y""y"6y"Jy"^y"ry"y"y"y"y"y"y"y",y"&,y":/L/^"uh)!b_A@36Jte! i(Bt1 LJFb4D"FEB>fCJ[oC'N(?F ?"b96BhE#fCtoC2Q͞C@MEMJ3V8E@8;G2JoCQuAuA5 `B`?CopyrigTtk(c])k20 `9kuMPcPosPIfRQrP+aa+PiPn.k WAlF` XsJbUe`ePv\`d>`PoCQ! \l-OS WFfEP b$U9 fuXG_'0UHrũFZ!D0 fzlJ\>Uhqq]!!0!PQAcHQ@1S?Fȓ$,_q?^"d#e4Eq ^"3d]q 0!0"/"Q!0"fBJ4?F;W?FjlJ?FH E(>?NpKv7atuXad#hX\a3g6a0e!2 4#q E6?Fv9?Fb4!?FЩԏ挍maVv]2:{(?-yjI>P!Xj?FT꞊j?F'K?F;(嗟éT.kvd*ĮVq#(]HXEAr ?FK(umB)?F.~rYYH_amAáF+x(~*=į֯uhY*?FY.(v0zkd)D6GdSZpB{(i]"/M͐?Fߧ78ET+Lk ]D am++Tz&{+69[Ϟuf?FP$?F [13nٶ7gۣ挠Y,Idkim0H@eQY"ߤİuӧ?Fl?|f ͲKlKBG]fkϐߵAZV?F:?F* ?P$ 1EK3 3i{߮Gz~}Ԗ0>_NO5q :_" s3t? ט P&d2g? 41z3B2_qr5E~\.儐?FQ( BP >5 w pmD0VY l j٩ 3 UIPY?F3>Dx?Ptk= `K0z߂3S0kg; z0f=kVN?PX#Z3d1H-Z?F)$F\;X})挗J hG~W*fU4~%?FVFKL/GaH)r~(H  N/?t4ZA rGg?F4W eſ?FH?Ýi`)6¿l#kpc'??4FF\>FI(1 O怘Rrq SCH>t v|RuONHD: # h8>T ]]9 M UFx<?FAj?F~?FwQuu?P6 7@FA]A]M ],'@' T'h'|']"' ' "' ' " '""'"'0"JuM`^l? 'e""e"6e"Je"^e"re"e"e"e"e"e"e"e",e"&,e":/KuT)!bA@*36JtQ! 2l] 2t12zG57RQ57)\(1>RJCJ[SC'N'(?F ?^"b'96BtSC2B??088r0MICJF8;;G2J2SC@?JE=26S0#CSCQYAYA5 `2`?CopyrigTtk(c)k20 `9kMPc.PosPfRQrP)aa@iPn.k AlD` XsHbe`ePvZ`d<`P񾔝mT!l-pHS %WF{Vi AbdU9 {YXpG'0UFrōF!D0 fzl*J\>Uhqq!!0!QsAGHQCf֩?F1Qd?J"P#Oe4E] J"3d]] b0!\0"/",&JBJ2(B@?F,/k?F]?F8{# \*?LpKv7_tuX_P#_hX\_3?g6a0Q!2 4#?DEh?F~E ?F_pLVwMҏ䌍m_Vv]$A2:{&-yjI-H+ŕ' YbҖ.K?uAc9z^̰f5U?FF/`_`P \!b@ir_f|Ƣn y(Tf>rpd6ENj/-g?FRr?Fn^\K0z߀3S0k~mr{~.*{ S4K" =b™? C? "@wD#<2tr5EOK4n?F{׀?FBi䌗J hGW*fUÝ)k4Il RuF?FB5r?Fn?$Xp ?G'gH᷷?r~.H  N//4;KGۨI} yFa%?FO"Z/g`)l#&Ƚkpc%//2DF7?d>FM?MRrq LS3H>{v|R؇?>HD: # h8>T ]]9 M UF`0 ?F M  ,'@' T'h'|'] ' ' "' ' " '""'"'0"JuM`^l? 'e""e"6e"Je"^e"re"e"e"e"e"e"e"e",e"&,e":/KuT)!bA@*36JtQ!  \(\] 2t12 _p= 57]@ף57%)2] >JCJ[SC'N'?(?F ?"b'96BtSC2Q?0G88r0MICQJF8;;G2J2SC@?JE=Z26S0#RCSCQYAYA5 `2`?CopyrigTtk(wc)k20 `9kMPcPo%sPfRQrP)aua@iPn.k_ AlD` XUsHbe`ePvZ`d<`PRmT!l-pHS W$F{Vi bdU9 {YXpG'0UFrōF!D0R fzlJ\>Uhqq]!!0!QsAGHQ'n-?F1Qd?J"P#e4E] J"3d]] 0!0"/",&JBJ2G2,?F,/k?F}X?F8{# < ?LpKv7_tuX_P#hX\_3g6a0Q!2 4#?DEU: Z[?F~E ɀ;9C5 ?F?wMҏm_Vv]2:{&-yjIAim.H@eQW °A{?FH 6I_^ d ͲKlK~BG]fkߵy X0?Fjd?F*OzE=M2'-H+ŕ' YbҖ.KuAc!N\?FZf5U?FO/(Sÿ?P? \!b@ir_f|Ƣn? y(Tf>rp^N/-ُg?F6+:n^ºK0z߀3S0kmr{.*{ S4K" =b ? _C? @wD#<2:r5E1f.]n룠?FF[PBi䌗J ~hG~W*fU)k4R9uRuFpZ?Fn$Xp G'gHr~.H  N//4[/I} KKbv"Z/Ýg`)l#&Ƚkp!c%//2DF %>FM?MRrq kLS3H>{v|R?>HD H# h @>T(]]9 MT  UF|>?Fd2L&??FVR?P6 @ZfA]A] >M /]8HC8\C 8pC8C8C8C8C||C 5C 8" 5"C5$""C58"C5L"JuM` ?  "4'C"R"f"z""""""",",".,"B,"V/Kup)8!bSA_36Jtm! q$y BYt1B5A!EM!E*G[~By >zCJ[C'N'(?F ?7"b'96tC2Q??088r_HEyCJV8;Q;G2JB2C@?zE= BFS_#CCF~F$u?FNR?F~Sߪe f"l#ͽU>aſf"J3RRy f"l#>b@Ed3̀ͅap`L!  f$#p)bq65q p)CaAA5 `%B`?CopyrigTt@(c)@20:p9@M&pc$poKspf,rqr pYqaPipn.@ Altp $xsxreLpe$pv"pdlppT$!q$-Hc gFV$VibE9 YXG{'0>bUŽF!D0 UilJ"\>Uh"" !$!8!L!q'ArwHqI}^?Fwo?0je4EZeLdd?]e\DL"K""L"zBJT" Y?F]?Fo?F5? Ѭ:?1ipKv7tuXZlhX\Zexdg6gJ0m!2 4?#oD Um?Fڼt?Fɤ_i?F0>m* 9?F?eD;]D m++Zl?Tz&{si?69Vu9,"f?FXev!Jx W?F8+y٨HZ ͲKlK~ZlBG]Lfk&8}H ?F$j #?F?B?FH+g'Zl Yb*xdKuAc>j ?F-}JӦ?Fզ.ki]7aB< x!Gi@ir_Zlf|Ƣ y($M-wF ?d2huON{v|RONqNwHD: # h0>Th]]9 MP UF~?F~_?P6 dnA >IM .)B) V)]j ~   ) )) ) ) ")")2"e)~)JuM` ?E){"${"8{"L{"`{"t{"{"{"{"{"{"{",{",{"(,{"JU2Q?@M?CJOFA@!@HbʏIKFrB !RDJICB'?Vp BABbI2[GrBLrBOB-ICR SC9 _VICQQW;HB`?CopyrigTt (c)=`2`09=`M5`c3`os-`f;b,ar/`haa;`i-`n.=` Al` 3hsbe[`e3`v`d{`Pi @bU9 VHXWXA'2qpVtUIa fjlJ Q\>UhQQ]~ !!2!DEQ!=H^A@1S?FO$,q(P`"3 e4Es `"C ?]s Jt{Z rGg?F;mx>F9h E(>?NÝ*`)*f#퐲>+3cݛs a0g!2 4#@ES~%?FV?F4W eſ?F}G*H᷷Ļl#*لȽkpcXSH?F-Z?FF?FPL߯@J *⁈hGr~لH  NᵪS UI?FY?F섰a?F@\ K0z~**3S0k~Ä&~ԉ~$ARm}A\.?F:?F3?FDٳx?P+k= 1`竪3*{Gzޟg; z?ԉf=ks :a" F f 94FQ2! 3B2:9rt5EG~[ZVn-C4?F4?Pu>5 Οw ð}H 1唜2fi0ƱVX 23@0Bk{̒[mY*?Fuӧ?F?F* ?P% t1EC6G*ͲKlK~}>NO5 RԋS2Z#@cukǜ{̦Ouf~$l?F|ƯY,*kBG]fk0BO"/M͐ŧ78E [13nٶ7g"]D *m++仌imIلH@eQOY.(vT+Lɇk h!˿S컌Tz&{u69Rd~OEAr K(F0z?Fkd?YH_2mA B{لi]I'p1OjT꞊j?FmB)k.~rkéT.kܶĮ~+x~ل~*=u1O6?Fv9m'K;(-/m*Vv]BÄVq#ޔ]H//rP;W?Fb]/?F?J/pKv7*tuX滌 &ה`Ypl?GuSFjlJ?FH?#5~hX\~ g61!O3K_H8>X"s D')K'g FMth1#iXB H{j]@L'} V6lX-1u. Bj2u2Z2j2#u9r#ULH/MZ1+B#AD5 60`Vis_SE.cTm!#20AD%`;Copy<0wigTt ?DU1f@M)@c<0o+@'ofdBUArX@AUad@iV@n3@ f@WAl@ \HsBUe+@e<0v@d3@J@=## A G?9#"n"Ea&444 70#AHB53_lj#SV6U8l>(UhnE / J$&9"(Fn_g5Fe#pheo'r#T X!! Jc0xo)vR2'"U7A%O3W_7rr5-6 `F2 Vrqu\@|Q:0b)`R@As)@E %T@xd@Di  ei=G]U6Tg'0"q)V!DHAn 6SWaH>"> O`_E )F(Mjw#p"N7B C8"?edo@7.#PUFD  h(^TYYB UF\.?x<F BP(?P?Xv B6u6  T^ThWZTTȅHBU?? ?B@L&d2?J-[(m(K.sU!&/")ҍx;(  0KB` Bridge,n&0two0k(0p&01pP=1al(0d&0v 0c@&0 [1^P| ( G&e WPW"*U---?-. wwwowwwp~wwp!wxxp}wwhd:lww w pvw>ÿ@qZ@E?DFp6@rR$Sn0!u@`i-FAD"BRuL @GCD$]QO(gR_"|i& 4O N)9u`u `"[bO$]u`J425)3Ga171`Vis_PRXYcPm!#5n`46B.R`?CopWyrn`gPt@u(~`)@20`U9@Mn`c`op`'ofbar`aUa`i`n @WAl` hsbUep`e`v pdR13d45*8=363@tFF B4B#30UrC]Vw,7%$T!nLPLsT<`o@m`s`q>g d(aq;_=@TATA2hhbB)3*̈́b+ڄ8bq q200Dr81@a6s2+66u\L %BWQ(A.A[2BB]P@RbESTO$@6r23| e|8e5=_4&F3s͵NSYFețG@DugQ̗,@Ga cupgbE@*28ABTOopD"%J-.RRO(R$[3VÁ`R pasn`e T px``B| 0Ls'_"x#PG $UJ"bk B3U2brZTCfRr ïը8=vv_<H!U"D"=m0!D0!g!@2ÅbBЅ3/xԂb08beaea22Dr3 AAE|66v7 |8Ԑ`b-A-A;hr#zr{qV W!rA8T@GaKk ]]P@` USPa`e`lLsaQi` E0u n`p`e`TbEXew` —T``8` D pvbT3^r` MSb Bbd`5ed;YRmM`nf`c"`u r88eQTAPbd Nm@Te@h=O! P`rxAx8eb`m +sbeXsuĨ9KuAe;S`uJB Sri`a /TTh//A/L`ch{/Ҡ///Blpi`[/@?#?FR`oS?a??uNwbkl"aXb`K?[It`adp s*OT]]9 M JU@x2zGz?@MJA@eb#z(b),+,&J[(.?& I?A$./O%B!!5 g`?CopyrigTtZ(c)Z2009ZM 0c 0os0f21r0>1a0i0n.Z AlY0 8s]2e10e 0vo0dQ0!$$-# '&%?6iie59 6X7'K0UpBŁ&!$K0 'F;JlJAh h7!1!Z@vnǣ f, <AA#  IRvVJ:'BP(?OS spBlU^TURUEUr\.G\ZY?PuPիQ,5U҄Q[pB.aYU520_BU"rA 5#cHD: # h4>T]]9 M JU@1&d2?@Vj?@҄BP(P6 ]AJuM` ?ju#t  k]It=W(\ZIRUIl">2zGzw?@MJAw@eb #zb](b%)2+)2&J[/.w?& ?A$T4/U%B!!5 g`?CopyrigTt (c)020%090M0c0os 0f21r 0D1a0i 0n.0 Al_0 8sc2e70e0vu0dW0!$-# e'& ?6iie59 6X7{'0UŇ&!$0R -FAJlJ8>UhAA 11!T2 $S<cV{ WR~VJd2L&?@ubX,X ]S fR#mS [ÐڱQE^Fh4__ K&oeX5:f'l6 XYePY?ۭsHMQ5aZjoohY}aUVeEVx_;"rA #dsHD: # h4>T]]9 M JU@|BP(?@h4WFP6 v>JuM` ?)uJt  U]It=W{GzUIR]I%l#>m2zw?3S@MJA@eb #z.b( (2+2&J[ (p"?& I?A$ 4/U%B!!5 g`?CopyrigTt (c)020%090M0c0oKs 0f21r 0D1a0i 0n.0 Al_0 8sc2e70e0vu0dW0!$-X# '&?6iie59 6X73'0]UŇ&!$0 -FAJlJ8>Uh 1Z1!Z@o.rɩ  <uCB] WRVJ@ ?@bx ]S ؒQ#hY))QE^jϵZ__ CaX5a9XYePXRmScRU5:]mxc_PU"]rA #dsHD: H ih(>T9 M3JU@xJuM` ^?5ulbSD_Jt bstE>2zGz;?@MJA@u+b+&K !JS"j& 1"A.b+-,-/*B!!5 `?CopyrwigTt `wc)5020A0950M-0c+0o%s%0f32$1r'0`1ua30i%0n.50_ Al{0 +8Us2eS0e+0v0ds0ib =L!XY'0U Bj&%0 :lJ Uh*M[A[A 9A Z1:'BP(?X Y s BQi@JSv?@bX, IYwYQ!i )\(!!U31^Fh4I_[_ p= ףuXEfl6 TbYHuX _Vgj_aZ(oQ !UAI҄AK B\%QAI@h4GFHlQbY{uXAo ].Oroa_(miY"@ _?@-BQȠ\ p_\oR\uXAppD`Zsƌ uXEOO_\HD: H h0>Th]]9 M JU@$?@-#?@%G֖g?@q;Ƙ?P6  n>JuM{` ?5 uJt  -|ߥ_Et9S8-zENسא\Eh)Fx%[v>2N_Nk?<@MJA@cb]b)++&J"< u) ?AHb$z@J+%/F%B!!5 c`?CopyrigTt (c)302`0930M+0c)0oKs#0f12"1r%0^1a10i#0n.30 Aly0 )8s}2eQ0e)0v0dq0!(Y$-3 7x&+0b59 6X7G'K0UhBx&!$Ia F3JlJ]Uhz11Z@S3   8gF/_ M ARnVJ!49BYOPBRWSMRUE:]|`X?\QRYl>Q5U/V@_YQĈU2(_:UrA cHD: H h0>Th]]9 M JU@!\?@JI?@8HL?@VG?P6  n>JuM{` ?5 uJt  4 ^sEt9SPDEN׈]Ehoþ>2N_Nk?<@MJA@cb]b)++&Jޱ< u) Y?AHb$z@+%/F%*B!!5 c`?CopyrigTt _(c)302`W0930M+0c)0os#0f12"1r%0^1a10i#0n}.30 Aly0U )8s}2eQ0e)05v0dq0!PY$-,3 7x&+0b59 6X7G'0UhBx&!$a F3JlJ]Uhz1h1Z@Pg   8gF__  ARnVBJ49BYOPBRWSMRUE:]?{^{?\QRYl>Q5U/V@_YQU2(_:UrA cHD: H h0>Th]]9 M JU@F5x3?@5yF_?@%G֖g?@q;Ƙ?P6 n>JuM{` ?5 uJt  a8~4Et9SuEJENسא\Eh)Fx%[v>2N_Nk?<@MJA@cb]b)++&J:3f< u) ?AHbʣ$z@bb3?bfb)$%/F%BB115 c`?CopyrigTt (cu)>02`09>0uM60c40os.0If<2-1r00i1a<0i.0n.>0 WAl0 48s2Ue\0e40v0d|01Y4Ť-3 7x&60b59 FX|7*G'0UsB)x&!$aI *F>JlJ]Uhz%11Z@S݆ f  8gF_  LR)yVJ49MYZPMRbSXRUE:]|`XJ\Q]Yl>Q5U:VK_YQU23_EUrA cHD: H h0>Th]]9 M JU@ ~?@gŬs7.?@%G֖g?@q;Ƙ?P6 n>JuM{` ?5 uJt  YoDt9~S-/ENس\Eh)Fx%'[v>2N贁Nk?<@MJA@bb)+R+&J"< u) ?AHb$z@J+%/F%B!!5 c`?CopyrigTt (c)302`0930M+0c)0oKs#0f12"1r%0^1a10i#0n.30 Aly0 )8s}2eQ0e)0v0dq0!(Y$-3 7x&+0b59 6X7G'K0UhBx&!$Ia F3JlJ]Uhz11Z@S3   8gF/_ E ARRrRJ!49BYOPBRWSMRUE:]|`X?\QRYl>Q5U/V@_YQĈU2(_:UrA cHD: H h0>Th]]9 M JU@ ôOS?@S !p?@%G֖g?@q;Ƙ?]P6 nAJuM{` ?4 uJt  M:AEt9SENسא\Eh)Fx%[v>2N_Nk?<@MJA@cb]b)++&J~"< u) Y?AHb$z@+%/F%*B!!5 c`?CopyrigTt _(c)302`W0930M+0c)0os#0f12"1r%0^1a10i#0n}.30 Aly0U )8s}2eQ0e)05v0dq0!PY$-,3 7x&+0b59 6X7G'0UhBx&!$a F3JlJ]Uhz1h1Z@Sg݆   8gF_AR M ARnVJ!49BYOPBRWSMRUE:]|`X?\QRYl>Q5U/V@_YQĈU2(_:UrA cHD: H h0>Th]]9 M JU@]C}?@8?@%G֖g?@q;Ƙ?P6 n>JuM{` ?5 uJt  [J}$Et9S=d ENسא\Eh)Fx%[v>2N_Nk?<@MJA@cb]b)++&J"< u) ?AHb$z@J+%/F%B!!5 c`?CopyrigTt (c)302`0930M+0c)0oKs#0f12"1r%0^1a10i#0n.30 Aly0 )8s}2eQ0e)0v0dq0!(Y$-3 7x&+0b59 6X7G'K0UhBx&!$Ia F3JlJ]Uhz11Z@S3   8gF/_ M ARnVJ!49BYOPBRWSMRUE:]|`X?\QRYl>Q5U/V@_YQĈU2(_:UrA cHD: H h0>Th]]9 M JU@Y ?@fJ7%G֖g?@q;u?P6 >JuM` ^?M uJt  (N7Et9Sp%6ENس\Eh)Fx%'[v>2N贁Nk?<@MJAw@cbWb)++&J:3f< u) ?AHb$z@bb3bOfb)$%/F%B115 c`?CopyrigTt (c)>0]2`09>0M60]c40os.0f<2R-1r00i1a<0i.0n.>0 AUl0 48s2e\0e40v0d|0@1Y4-3 7 x&60b5(9 FX7*G_'0UsBx&J!$a *F>JlJ]Uhz%11劵Z@S݆ Y  8gF_i  LRyV J49MYZPMRbSXRUE:]|`XJ\Q]Yl>Q5U:V K_YQU23_EUvrA cHD: H h0>Th]]9 M JU@$䣹?@R?@%G֖g?@q;Ƙ?P6 n>JuM{` ?5 uJt  >b3Et9SWENسא\Eh)Fx%[v>2N_Nk?<@MJA@cb]b)++&J~"< u) Y?AHb$z@+%/F%*B!!5 c`?CopyrigTt _(c)302`W0930M+0c)0os#0f12"1r%0^1a10i#0n}.30 Aly0U )8s}2eQ0e)05v0dq0!PY$-,3 7x&+0b59 6X7G'0UhBx&!$a F3JlJ]Uhz1h1Z@Sg݆   8gF__  ARnVBJ49BYOPBRWSMRUE:]?|`X?\QRYl>Q5U/V@_YQU2(_:UrA cHD: H h0>Th]]9 M JU@֤M?@ yo?@%G֖g?@q;Ƙ?P6 n>JuM{` ?5 uJt  ߸ Et9S)hBDNس\Eh)FxO%[v>2N/Nk?<@MJA@cbb)+R+&J:3f< u) f?AHb$z@bb3bfb)$T%/F%B115 c`?CopyrigTt (c)>02`09>0M60c40os.0f<2-1r00i1a<0i.0n.>0 Al0 48s2e\0e40v0d|01Y4b-3 7Ax&60bP59 FX7*G'0UsBŔx&!$a $*F>JlJ]Uhz@%11Z@?S݆   8gF_  LRyVJ49MYZPMRbSXRUE:]|`XJ\Q]Yl>Q5U:V@K_YQU23_EUrA cHD: # h4>T]]9 M JU@xJuM` ?uJt ASt=WJRb~li>t2U?=@MA@+b + &J[N#a?a& ?A)BB!!5 g`?CopyrigT}tZ(c)ZW20 9ZM ]c os f"R!r 1ai n.Z AUl90 (s=2e0e vO0d10!$ę-# 'a&?6iiek59 Կ6X7'0Ŵa&!|$0 FJlJ UhqMAA ])A !4Ja-'Z@h4FӃ f <{Gz#A OR|VJ:6]].rM__Z)\("UEC@ ?@-bȃ S 1 `#`YR\Xk5^jZso!o ̌aX5U'BOP(?gks#dT[RUUSv?@n aX,flYwq%lSXn?Fh4  p= ףXAYfl6\`YHzmPXAV``ʼ_Z(QYAY҄agklQbE26_HU"rA _H<>gI+s nsMg@q3JFM\#7OB H#~dwso@+_><oKs}G#o'f# (#+\#N#ȇX$V߂$W$X$X$g$$hK$jZ$kH$y$ȓ$z$UFD  h(^TYYBBUF\.ߗ?x<F BP(?hP?XV 55r55 Jbggah5Zg\ȅH?8?f)B@L&d2U?$-5(G(%.sUm!k&/|(  0O~B`"EtPern t],4wo k0p"i0#al*0d v0c 91^| ( 5G&u  * AAS ~pni'TDrag onut he]pe,nUdu /p64lies9Lt]wrkcQm tOfTly lGo_"imdO.bL&d2~?K?=tp'I??C\.r3?r\.CUG D  # h L^T4YY  BUF\.?@L&d2?@~?PYU^f6.NGOGPGQGRGSGTG\G]G^G_G`GaGbGcGdGeGfGgGQJiGjGkGlGmGnGoGpGqGrGsGtGuGvGwGxGyGzG{G|G}G~GGGGGGGGGGGGGGGGGGGGG736363u` o?%$7G-7B<7BF_G#BP7BZ7Bd7Bn7Bx7B7B7B7B7B7B7B7B7B7B7B7B7B7B"7B"7B"7B""7B,"7B6"7B@"7BJ"7BT"7B^"7Bh"7Br"7B|"7B"7B"7B"7B"7B"7B"7B"7B"7B"7B"7B"7B"7B"7B27B27B27B&27B027B:27BD27BN27BX27Bb27Bl27Bv27B27B27B27B27B27BH27B26g6U6666MBuIU$FrL|Lw&1~rW]@Es  ^7B( ( ~s!v̨ q?@h!;p`:1P IFPr,s~rD1''@A3{:18E9 'N( d?pxh` Acn CS lS r3!ۦpڈ٦p1bbbz0q@!PZY<^pbbNbb™qb"b7+d||u@+@Q Q?@G}x^?@IH‘BMB6}Vu]`-qZBuFgw#/s;}#B(0t@I9u`u`"TYYbzFqDuK`EWAαq1W`V sPwXYEcPm!#528gX1`U?ԑp r gPt1()12U0B91M cq Eo{ o5ԑr] ua iS nE J1A5 l1-sUe{ eq vdE*E WN~`~u01 l2T'sFt5ӤӨὭ w?gߏBq&W,s^KVVTK~u5TԑnI e5TJ Nt uk1DvLN'9K]utc担c8E&cNI[mc}c濵c0BcewcFլcc晱6HZB_z\\Fq ////_ qR/d/v/_///\`//?c+?=?O?cn???@_???_? OO\VCOUOgO_OOO_qOOO__'_9_\\_n__\픱____ꌑ__o_3oEoWo^;t~ooob!ooo\폰,^#M_q\$\%@\%7I_'i{\(ԏ\!) \*>Pb\+\ϟ_,>@c]o_/ů\0 \ 2DV_2v\3Ͽ\ +=5K]oB\6Ϥ϶\7\8 2D\!9gyߋ\:߄\;<x?#5m`3ACTE5[ɳaj1,.tu[񼱼[[_[[([ [CZ==2ffyѭ";;N"""@#2WWj2d`e U@s` M;aiuf,pueQ` Eq0ipeith 'UPouoN:mb>Pb btq' P,r9! / u'EsorK"t=h;/M/$\l/~/A s}-.1 '7`1^/?!' S6"i,l 0?B?`>Th?z?X[Q'Lc,(?`K???B*"liigOP<XLHOZO' RoO_eZOCA8wa' BEp&$/qf`Y4$_8]Sra e\l,)1@#` fi!bty_l_~_W"` WTPQ@^2` Eh6"*lt\a4 l.c`_O|N(,TQw`#|`p/xo6_IPsAd<Told?o/S:bEd +sc*lAxTf;'qmBi tCv9 I[7!3oW='/) fsPt@$:PftMpCnp㏊gTˑ'fmuQ SF l!M_(|/@"ha  c%>UcAXE_`>Z ** [j$w1tt>,3K̗?nؑ`tQgctRm#ZC̗,@_Cc%SuK`rϔ? h!b M^b!bC%_5Ÿ(56c_t&YF3sNSYF??Eq! $U dkV@~?>? ijTMwu `b|tϔ?b+cU@L&d2FX ~u~ߐk7լj(ߎp.AɯD c0I.NO Pl$u1S>GAGAKaaX]8eԁԁrݡݡiaiaa lc/a/aQQAAIIA0,j=al$BAe"!A#Mwx!g%9!@QRQ@ϒ^^!:q"v}#rq01RQz3{| +}*~t36 /*&03!@2M3Z4x|Ut6񡊁7񡕋8񡌛O:񡎵q=>?R0i[8s]&?-ܑ-!lVőncb[r ]#d%+`[ ob Q'eu"$I7 | "#####!#2";#ԁ#ݡ#ia#o#|###/a#Q#A#I###a##A##%#x##L#Q#f#s###q#Q##########)#6CCCPC]C|CwCCCCCCCC`CC "&eR"eR2eR 2eR2eR 2eR*2eR2eR>2eRH2eRR2eR\2eRf2eRp2eRz2eR2eR2eR2eR2eR2eR2eR2eR2eR2eR2eR2eR2eR2eRBeRBeRBeR$BeR.BeR8BeRBBeRLBeRVBeR`BeRjBeRtBeR~BeRBeRBeRBeRBeRBeRBeRBeRBeRBeRBeRBeRBeRReR ReRReRReR(ReR2ReR2(ݤ>2&̶Pl~3+CkߏH2ݤH2&̶ 1CU4}XRoݤR& ̶ F!R8JݤR*&̶R(ݤR&̶TRݤR&̶Xj4!X/|/R//ݤR ̶/0/B/!?j/E?b\?n?ݤb*̶// ?1?3?O b%O7Oݤ b̶???xAO?ObO_ݤb̶yOOOAQ|_O_g__{b ̶B_T_f_ aEo_ioh|ooDr ̶'qo o 4aWo2iEW ̶oooq j Sւc ̶eďk׏&! !̶fx.il٨h ""ڡ2D 42{Vmi{1## Dn2Dk{$$ӯ寉Ŀ o 4D% 9%Rֿp،* &9&Se wVߟzqߟߨU'9'.@hCrVh(9(1 s1Xh) 9)!t!*1*9*"w ?zuy+9+#@RdCgvz(B"&,9,$ - /U0/wC/U/|8 26- 9-%!///x ??EH2*UF.9.&// /c1?/?y??XBV/L/'d?v??,AgO?OzOOXfRV0L0(-O?OQOA0_yOT_{g_y_h/bf1 \1)O__Q_B_o|0oBoixb*yv2\2*__ _ao oo}o 2rB3\3+oooPqo~ 4\4,QcuTxĘSԖ5 \5-,>fATf 6\6Gv: 謫/ /Vf7\7k/ПӯȮ/8\8k0u=xw9 \9k1>PbAϊeτxϊϱ@*:\:k2 + S.߅ASߨz ;\;3Ϙ߆ CS<\<4߽߫a߂ = \=5bt*ed*>\>6+= O.wRew-?\?7@.@g(w&@\@8  /08"@6A \A9N!////82* FB\B:O/a/ s/1R?/v???HQBFC\C;?*?s__ _;avo_ooo(xurvGGK>oPo@q?ocv>HɌH@)qQ,?QxIIdTtsTssSrU} @ `EtherntnrAnkssAs%t sΑϐΑor#r` %P&o]ptis ߢ``N `TWORKÐS{AEÐPOR2I `Q2 ia1ṃ'0Uyu1!1 0tbų)zw tر)1 ۲eʡ0(&100@w뷤H1wLĭGX+L_W8LWGEDrDC  @   !"#$%&'()*+,-./0123456789:;<=>?>A*BCheATe(eJ\U@L&d2?@0~{??@?PYmUU%.u`o?&UH&T"""u,)ct)! q-tm!ʇ"L5 .&b$&"} U}!Uɤa/ '19 b {V?`%2H6"\>3c6?c82\.?A?9[B>30/D?P?y A@lBw0uC񁠙A>9D@SVCA'1X7i`"`?CopyrigPt(Uc 09uM PcPosPIfRQrP=QaPiPn AUlXP Xs\Re0PePvnPd"Z,AuL-$cA2T%c3C#lBF{L u,G"aOCTOU_2sOU!OOOOO_"_4_F_SO*^x_b._o_p__ ooARodovoooo_j_oC.@+k@asx@j`D.mѐBǏُx!0BTfx"ja.ӟ7 -?Qgx:A̯ޯjbu._qxl jeB.k!xґ.@Rdvjf ".5  ;+=Oe>s8jg/s2)N@7/N?[/0p/K///Ns!//@ ??0?B?O/jht?B^?O?P?B[? OO1nsNO`OrOOOO?"jiO?bn_o'_g`<_R{]_o__~sj_____o_6j@or[ ioopo ooo s,>Pbtok !3!);Mc!s6!ȏڏ@q'"5 LYn"ɾ"s1 .@Mhmrײ#ԯ!#/#sQL^pnؿ=$%e:P$[mϕ$shaψ 4o>ߣY%@g~߶%%sq*@Kf:rp">N!/=(<(//-N(?HJ/\/n////Js/;BN)/O#?c@8?N[)Y?k?}?^)XfѰ????? OD?2jt38b/.J~8/2SN4a/x?/0/K4///N4X?$?6?H?Z?l?y/Z?R^5@?O?+POk5!O3OEO[n5mh.xOO@OOOO?j_ib~6-_DoQ_`f_B|{6___~6x__oo&o8oE_"`jor7oopo7oo'79DVhzoƚ5랁8]2H8Sew8ʑΏ,6Q9_và9˟ݟ91"4FXjDwʄ·:ů ܿ):1CY:k,Qv пݯڅg;+BOϏdz!;ϗϩϿ;a$6C^h<ߨ<%<7qBTfx 3=@=Qcu=^@**4O.>]tB+>.>8ġ 2DVhu :!<">?/'0KF /1/C/YL?iH*t//////J3A\eB^@)?@OM?@b?x[@???^@X???O"O4OA?\jAmlRnAO_OPOkAOO _#~A5x@_R_d_v___DOz_1r~B_ oYp.oDBOoaosoB\ooo oo o(2 Ma^uC!CC 0BTfsD؟%D-?UDg(1rُ̟cE@'>K`vEEȎAد@ 2?ZڐdFϱƿBF !F3Q>PbtφϘϥ"/G W,BGM_qGZq߶ & 0KHYp} HHj .@RdqI#+I+=S.Ie$# TUHL5D" # =h @^T(aaJB [U@L&d2?@~?P e.N Q  m>uA` _?`N  Y c$ A.Y8>uQH>tEt#bH ">U.%>R.#N-7/de!#?A. \|#&/,?\.5?IA@/B[|# 'N(#AxT`?CopyrigXt(wc)20(@9M@c@o%s @fB Ar@GAua@i @n._ Alb@ HUsfBe:@e@vx@dZ@ V4 ?)o32TUA??)77se<-#v26ZbhafYv2r|cVqQN "QQQdl5C/oAe+(dVd>lrd "!s(8r4."%S(QVouodC~w.N}" (-DT!I@[0v2rzCsc "oP%3}`wȄՏyE|?v2>E *P&ai7HD H# h @>T(]]9 MT  )AU@L&d2?@W~?P6 $LA n  JuM` ?8* M^ K*0u:Jt7 StJb)>JU5 JD-//!X#V? .B\F#k&/k,?\.?;IA /[F#Y!3/ g"Ai @@2'bH42F/ -o31/!`'; J`U!`?CopyrigTt(c)2009M0c0os0f21r0Aa0i0n. Al,@ 8s0Be@e0vB@dH$@V o4 x?)eno32TUA?2IaH@2U6TO<@2U6X7ibE9 &Xo72'0BUik&!d$0 V1ZlJQUhhf5`'1@1Kdo30C^*Va&JQ&dfl64iAc7FlUnbi:(}m (-DT!;@-0@2rdUj"Uc60eE3&m9GtZTe%|&ov+=~u @2stktbqvu@<&]ew7UHLD  Jh0TlE>M U@L&d2?@W~?JP jJ %A Q"2  .>uA` ?E_  (< _Z&?uMt>p/b ">tq bo>t6  }配>U2TmUg!?#@9 U#!"/ Aw@"b)+&> !v?(t($#>_#2( m"6 G"\"b)H;'"bu3z=&b3~8/D_V_8E"lJaUl~X'4M1!S(t!?@9j# p}j# K$(°b`0w8-DT! lq0$#qj{jr}4k (Mt6 P pp@U<r(5%8:dfl6/wkrvQ uC EW`#0!y0!yD#|ud)<$ooooQdQAuddT8 %9oeS(<@EYeCw<ȏڄؙhjsu qyoNK$ i 8_\H>+s !SFO^? FHM}#!WqB9Yma |o@+Xo>oNaGȘ$FP< Pj=o=!uH<@%RUFD  h(^TYYBCUF\.?x<F BP(?P?\Xt3 D5S?>?5%6qW{qk66b6HCI6CJ6CN6!C&aS6/!C9&R6M!CW&L6k!Cu&16!C&a26!C&36!C&46!C&561C 6a661C)676=1CG6 6[1Ce6 6y1C6a 61C6 61C6 61C661C6!6ACFh9hC7F6KACUF6iACsFa6ACF6ACF6ACF6ACF!6AC Vd9dC'V6;QCEV6YQCcVa6wQCV6QCV6QCV6QCVa6QCV 6 aCf!6+aC5f"6IaCSfa#6gaCqf$60aCfȅU?of?Z0Bc@L&d2?"}3xEx#~sUkqiviҍrx  0bEB`Hub,netwopkpppri hal*pdpvcpp-Qa| xy3U5Gv\ NP?U*U---?-ppwpwwppw׆%p6ppw pwwww }σw ^?w wpwp pd_&wpwwfDrag onut hepe.Rih-c]lckaUdcos UPo etQeAvUwreP 5aabx<οL&d2??9GΑsп=t??\.r3r\.CUG D # h L^T4YYT UF\.?@x2" $9u`un `"0Yb!u`$"%؂#UQ!'1`Vis_PRXY'cPm!#5P46ޖPg da +_Pb1b1"(Ɣp2)Ӕ2*R+FR"((Rb(!SaDc"76u&`~PG1"1 0&2k@c4p5,CT@Db"5 UaAFUQS_$|lF3s͵NSYF'U;0Re>1,@UQP)epzp51S(("(AT?sR !U/!/keF/X/Ae`'e^//,U SbiPa??g TT]]9 M JU@x2zGz?@MJA@eb#z(b),+,&J[(.?& I?A$./O%B!!5 g`?CopyrigTtZ(c)Z2009ZM 0c 0os0f21r0>1a0i0n.Z AlY0 8s]2e10e 0vo0dQ0!$$-# '&%?6iie59 6X7'K0UpBŁ&!$K0 'F;JlJAh h7!1!Z@vnǣ f, <AA#  IRvVJ:'BP(?OS spBlU^TURUEUr\.G\ZY?PuPիQ,5U҄Q[pB.aYU520_BU"rA 5#cHD: # h4>T]]9 M JU@1&d2?@Vj?@҄BP(P6 ]AJuM` ?ju#t  k]It=W(\ZIRUIl">2zGzw?@MJAw@eb #zb](b%)2+)2&J[/.w?& ?A$T4/U%B!!5 g`?CopyrigTt (c)020%090M0c0os 0f21r 0D1a0i 0n.0 Al_0 8sc2e70e0vu0dW0!$-# e'& ?6iie59 6X7{'0UŇ&!$0R -FAJlJ8>UhAA 11!T2 $S<cV{ WR~VJd2L&?@ubX,X ]S fR#mS [ÐڱQE^Fh4__ K&oeX5:f'l6 XYePY?ۭsHMQ5aZjoohY}aUVeEVx_;"rA #dsHD: # h4>T]]9 M JU@҄BP(?@h4FtP6 >JuM` ?uJt  kU]It=W{GzتIRIl#>2mzw?3@MJAw@eb #zb( (2+T2&J[ (p"?0& ?A$ 4/U%BB!!5 g`?CopyrigTt (c])020%090uM0c0os 0If21r 0D1a0i 0n.0 WAl_0 8sc2Ue70e0vu0dW0!$-# '&?6iieP59 6X7{3'0]UŇ&!$0 -FAJlJ8>Uh@ 11!Z@o?.rɩ  ~<uhB] WRVJ@ ?@bȩ ]S 9Q#hY?))QE^jZ__ CaX5a9XY@ePXRmScRU5:]mx_PU"rA k#dsHD:@ # h4>T]]9 MTJU@aܹ?@&cM?@5?@$D"?P6 v >JuM` ?qu#t  @@|It=Wn5_6uIRFIlI#>2zGzK? HJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A/.7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6#)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ# $<Ef4B 9b`fJ x۰Z @]0?c Q#Oc Q\}ȱ~aE:Õ)0:iG`iw*h<5a#j8oJiqze~EA#o>"]rA #sHD:@ # h4>T]]9 MTJU@ (?@hZ?@>TC?@ݿJWc^?P6 v >JuM` ?)uJt  It=WX IR?8IlRv\#>2zGz? HJAebM #zWb(#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A(.?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LFO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!ZA!Z!?v"rA # sHD:@ # h4>T]]9 MTJU@?!Qw?@k?@BZ?@ib+iF?P6 v >JuM` ?qu#t  1It=W_OIRUo}WIlTy6ވI#>2zGzK? HJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A/.7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6#)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ# $<Ef4B 9b`fJ:O@]0:iG`Oc qy"zeEajoJiaze<5e'f`0mFaij? mh~EA#o>"rA k#sHD:@ # h4>T]]9 MTJU@P4K?@l?@u'?@ݿJWc^?P6 v >JuM` ?)uJt  {d5Z\It=W2;wEIRkzIlRv\#>2zGz? HJAebM #zWb(#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A(.?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LFO ie<59 FXG/'0U`Rb6-!}40 V+Z lJ]UhzЎ!A!Z!?&sA#<# $BK jfJ 'j `00>oPaYc l]]beE:Ͷc0TbCcba`iK8K8h<5eg۰Zk(qQlqe~Ee@||`ai !hA'o>v"rA # sHD:@ # h4>T]]9 MTJU@#zUv?@f s9?@ә!?@8laxy?P6 v >JuM` ?)uJt  V"(It=W7:LIRE! YBIl p#>2zGz? HJAebM #zWb(#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A(.?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LFO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!A!19b $<# $B 9b`fJ: muۘ]0:mOc ?i'A~aEesDͅ?c ? V#NeFaze<5e@|lVaiU3Lh`~EA#o>"rA #sHD:@ # h4>T]]9 MTJU@"z>?@^i2VS?@ˋ>WB?@=E?P6 v>JuM` ?)uJt  7It=WܯHIRݮIl$dvĤ#>2zGz?(3 HJsAeb #z (#+#+#&*Jp!p!5 g`?CopyrigTt (c) 2U0 9 M c os f"!rԙ !a i n}. Al U (s"e e 5v0d i!Em$}! (K2?b6 ?ALb4($I3/ 4/U%b-|# |'Yb6?Fiie<59 FjX/G3_'0UBb6Z!}40 aFuJlJ8>UhAA Ў!1!Z}'[- E|zo}#<b$,] VJ:@8Qw0RS S IBf>QE !?@\BU--# i[F @ h<53n:CU9fKo]o ڵ h5a9YUU aUUwiǤ]Q işeq5 h2r_U"rA 5#sHD:@ # h8>T ]]9 MTJU@IKY?@#v\?@(w)٫?@Q`m ^?P6 >JuM`lW? SuJt  o|MtA[_"oRMVYBMpry2VI'>\??\.?lFA8 J2llf?3 HRJB& @i7b#zv t# b( (+&B(ݙ" ?APb)4(z$CD3//&*115 k`?CopyrigTtk(wc)k2009kM0c0o%s0f21r01ua0i0n.k_ Al @ 8UsBe0e0v @ d@1H4-,3 7%bO&&i Ab~59 &&XG3'K0U!R&!4K0 FJlJI  ( sQsQ] 1heA Zl0|z3 0 @?/n' QE bR;bJ:]~^PsojRփUaE !?@q`0 0,c s!R'i e:}ɟh~5aef9oa|b c gT]]9 MTJU@5^?@hP?@5?@$D"?P6 rAJuM` ? qu#t  .9BWyIt=Wn_q8IRFIlI#>2zGzK? HJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A/.7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6#>F O @ie<59 FXG'0U`Rb6!}40 V+ZlJQI0h|G!A !1C# $I<EfB C`fJ x۰Z @]0?c Q#Oc Q\G}ֱ~aE:Õ)0:iG`iw*h<5a#j8oJiqze~EA#o>"rA #sHD:@ # h4>T]]9 MTJU@H?@6͍~>TC?@JWc^?P6 >JuM` ?juJt  HIt=WByIR?8Il?Rv\#>2zGz?) HJsAeb #zbU(#+#+#&Jp!p!5 g`?CopyrigTt (c) W20 9 M ]c os f"R!r !a i n. AUl (s"e e v0d @i!Em$+A(.?b6 ?AgJaT6` F !!l"zbAR +6ig5V181 ?2?;:C?'$4/U%G|# |'b6L3FL ie<59 FXG'K0U`Rb6!}4K0 V+ZlJQI0h|G!A!Z!?"]rA #sHD:@ # h4>T]]9 MTJU@Sw?@K?@BZ?@ib+iF?P6 v>JuM` ?qu#t  fd>It=WN6sgHRUo}WIlTy6ވ#>2zGz? HJAebM #zWb(#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A/.?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼#FO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!A!1lL# $<EfB 9b`fJ:'@]0:iG`Oc qy"zeEajoJiaze<5e'f`0mFaij mh~EA#o>"rA 5#sHD:@ # h4>T]]9 MTJU@t)H׾?@Ȑ?@u'?@ݿJWc^?P6 v>JuM` ?)uJt  ُuߓIt=Wί IRkzIlRv\#>2zGz? HJAebM #zWb(#+#+#&JBp!p!5 g`?CopyrigT}tt(c)tW20 9tM ]c os f"R!r !a i n.t AUl (s"e e v0d @i!Em$+A(.?b6 ?AgJaT6` F !!l"zbAR +6ig5V181 ?2?;:C?'$4/U%G|# |'b6L3FO ie<59 FXG'K0U`Rb6!}4K0 V+ZlJ]Uhz!A!Z!刭?&sA#<# $B jfJ 'j `00>oPaYc l]]aE:Ͷ10TbCcba`iK8K8h<5eg۰Zk(qQlqe~Ee@||`ai !hA'o>"rA #sHD:@ # h4>T]]9 MTJU@?^oO?@^?@ә!?@8laxy?P6 v>JuM` ?)uJt  L+It=WݑHIRE! YBIl p#>2zGz? HJAebM #zWb(#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A(.?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LFO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!A!19b $<# $B 9b`fJ: muۘ]0:mOc ?i'A~aEesDͅ?c ? V#NeFaze<5e@|lVaiU3Lh`~EA#o>"rA #sHD:@ # h4>T]]9 MTJU@tr,?@Uw?@ˋ>WB?@=E?P6 v>JuM` ?)uJt  6j>It=WNFFIRݮIl$dvĤ#>2zGz?3 HAeb& #z *(#+#+#&UJp!p!5 g`?CopyrigTt (c) 20 9 M c. os f"!r !a i n. Al (s"e ej v0d  i!Em$}! (K2?b6 ?ALb4($3/ 4/U%ę-|# |'b6?Fiie<59 FX/G3'0UBŴb6!}40 TaFuJlJ8>UhAA !1!Z}'- E|zo}v#<b$,] VJ:@8Qw0RS S IBf>QE !?@\BU--# i[F @ h<53n:CU9fKo]o ڵ h5a9YUU aUUwiǤ]Q işeq5 h2r_U"rA 5#sHD:@ # h8>T ]]9 MTJU@^5=~?@,=Vݼ?@(w)٫?@Q`m ^?P6 > uM`lW? SuJt  ?aV_MtA[_=p'MVYBMpry2VI'>\??\.?lFA8 J2llf?3 HRJB& @i7b#zv t#]b( J(+&B(" ?APb)4(z$!D3//&115 k`?CopyrigTtk(c)k2009kM0c0os0f21r01a0i0n.k Al @ 8sBe0e0v @d@1$4-3 7bO&&i b~59 &&XG3'0U!R&!40 FJlJ $  (sQsQ] 1eA!Zl0|z 0 @?n' Q b7fJ:]~^PsojR֯UaE !?@q?`0 0c s!R'i ?e:}ɟh~5aef9oa|b c ؠgT]]9 MTJU@?έ?@=?@5?@$D"?P6 v>JuM` ?qu#t  &lIt=WA_I߃IRFIlI#>2zGzK? HJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A/.7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6#)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ# $<Ef4B 9b`fJ x۰Z @]0?c Q#Oc Q\}ȱ~aE:Õ)0:iG`iw*h<5a#j8oJiqze~EA#o>"]rA #sHD:@ # h4>T]]9 MTJU@Iq?@q֌?@>TC?@ݿJWc^?P6 rhAJuM` ?)uJt  0@hIt=WmiIR?8IlRv\#>2zGz? HJAebM #zWb(#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A(.?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LhFO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!ZA!Z!?v"rA # sHD:@ # h4>T]]9 MTJU@ڮ?@7 ?@BZ?@ib+iF?P6 v>JuM` ?qu#t  JJ&qIt=Wr_hIRUo}WIlTy6ވI#>2zGzK? HJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A/.7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6#)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ# $<Ef4B 9b`fJ:O@]0:iG`Oc qy"zeEajoJiaze<5e'f`0mFaij? mh~EA#o>"rA k#sHD:@ # h4>T]]9 MTJU@=9_?@o(Ѿ?@u'?@ݿJWc^?P6 v>JuM` ?qut  V6>nIt=WgD_~baIRkzIlRv\I#>2zGzK? HJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A(.7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6B>F O @ie<59 FXG'0U`Rb6!}40 V+ZlJ]Uhz!hA!Z![?&sA#<# $B %jfJ 'j `00>oPaYc l]]|eE:Ͷ10TbCcba`iK8K8h<5eg۰Zk(qQlqe~Ee@||`ai !hA'o>"rA #sHD:@ # h4>T]]9 MTJU@BP(?@xQ8?@ә!?@8laxy?P6 v>JuM` ?)uJt  9KIt=WYGJIRE! YBIl p#>2zGz? HJAebM #zWb(#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A(.?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LFO ie<59 FXGB'0U`Rb6!}40 V+ZlJQI0h|G!A !19b$<# $BM 9b`fJ: muۘ]ᇄ0:mOc i'A~aEesD?c  V#NeFaze<5e@|lVaiU?3Lh~EA#o>"rA k#sHD:@ # h4>T]]9 MTJU@W?@M6N?@ˋ>WB?@=E?P6 rlAJuM` ?)uJt  1|It=WƯ:IRݮIl$dvĤ#>2zGz?(3 HJsAeb #z (#+#+#&*Jp!p!5 g`?CopyrigTt (c) 2U0 9 M c os f"!rԙ !a i n}. Al U (s"e e 5v0d i!Em$}! (K2?b6 ?ALb4($I3/ 4/U%b-|# |'Yb6?Fiie<59 FjX/G3_'0UBb6Z!}40 aFuJlJ8>UhAA Ў!1!Z}'[- E|zo}#;<b$k] VJ:@8Qw0RS S IBf>QE !?@\BU--| i[F @ h<53n:CU9fKo]o ڵ h5a9YUU aUUwi]Q ieq'5 h2r_Uv"rA # sHD:@ # h8>T ]]9 MTJU@&J?@_5+:?@(w)٫?@Q`m ^?P6 >JuM`lW? SuJt  y SksMtA[_eMVYBMpry2VI'>\??\.?lFA8 J2llf?3 HRJB& @i7b#zv t#]b( J(+&B(" ?APb)4(z$!D3//&115 k`?CopyrigTtk(c)k2009kM0c0os0f21r01a0i0n.k Al @ 8sBe0e0v @d@1$4-3 7bO&&i b~59 &&XG3'0U!R&!40 FJlJ $  (sQsQ] 1eA!Zl0|z 0 @?n' Q b7fJ:]~^PsojR֯UaE !?@q?`0 0c s!R'i ?e:}ɟh~5aef9oa|b c ؠgT]]9 MTJU@B?@-8?@5?@$D"?P6 v>JuM` ?qu#t  4E$p͡It=W_Lx4IRFIlI#>2zGzK? HJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A/.7?b6 ?AgJaT6` F !!l"zbAJ +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼#FO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!A!1lL# $<EfB 9b`fJ x۰Z @]0?c Q菌#Oc Q\}ֱ~aE:ÕO)0:iG`iw*h<5a#j8oJiqze~EA#o>"rA #sHD:@ # h4>T]]9 MTJU@*ӏ?@c?@>TC?@ݿJWc^?P6 v>JuM` ?)uJt  uIt=Wt]aIR?8IlRv\#>2zGz? HJAebM #zWb(#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A(.?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LFO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!ZA!Z!?v"rA # sHD:@ # h4>T]]9 MTJU@"?@ "S/?@BZ?@ib+iF?P6 v >JuM`?qu#t  B?ͳIt=WF_p\cIRUo}WIlTy6ވI#>2zGzK? HJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A/.7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6#( DO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ# $<Ef4B 9b`fJ:O@]0:iG`Oc qy"zeEajoJiaze<5e'f`0mFaij? mh~EA#o>"rA k#sHD:@ # h4>T]]9 MTJU@d?@ 3\D?@u'異JWc^?P6 !>JuM` ?juJt  rs"It=WVIRkzIl?Rv\#>2zGz?) HJsAeb #zbU(#+#+#&Jp!p!5 g`?Copy_rigTtZ(c)Z2U0 9ZM c os f"!rԙ !a i n}.Z Al T[&s"e e 5v0d i!Em$+A(.a?b6 ?AgJaT6` F !!l"z}bA +6ig5V*181 ?2?;#:C?'4/U%G|# |'b6LLFO ie<59 FXG'0U`Rb610 V+ZlJ]Uhz!A!Z!?&osA#<# $B jfJ 'j `00>oPaYc l]]#aE:Ͷ0TbCcba`iK8K8h<5eg۰Zk(qQlqe~Ee@?||`ai !hA'o>"rA #sHD:@ # h4>T]]9 MTJU@69 Ğ?@l_n.?@ә!?@8laxy?P6 v">JuM` ?)uJt  sd It=W> IRE! YBIl p#>2zGz? HJAebM #zWb(#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A(.?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LFO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!A!19b $<# $B 9b`fJ: muۘ]0:mOc ?i'A~aEesDͅ?c ? V#NeFaze<5e@|lVaiU3Lh`~EA#o>"rA #sHD:@ # h4>T]]9 MTJU@#&\?@ԋZ|?@ˋ>WB?@=E?P6 v#>JuM` ?)uJt  [It=Wq.IRݮIl$dvĤ#>2zGz?(3 HJsAeb #z (#+#+#&*Jp!p!5 g`?CopyrigTt (c) 2U0 9 M c os f"!rԙ !a i n}. Al U (s"e e 5v0d i!Em$}! (K2?b6 ?ALb4($I3/ 4/U%b-|# |'Yb6?Fiie<59 FjX/G3_'0UBb6Z!}40 aFuJlJ8>UhAA Ў!1!Z}'[- E|zo}#<b$,] VJ:@8Qw0RS S IBf>QE !?@\BU--# i[F @ h<53n:CU9fKo]o ڵ h5a9YUU aUUwiǤ]Q işeq5 h2r_U"rA 5#sHD:@ # h8>T ]]9 MTJU@\W>?@47 ?@(w)٫?@Q`m ^?P6 $>JuM`lW? SuJt  /êQPͰMtA[_Y#MVYBMpry2VI'>\??\.?lFA8 J2llf?3 HRJB& @i7b#zv t#]b( J(+&B(" ?APb)4(z$!D3//&115 k`?CopyrigTtk(c)k2009kM0c0os0f21r01a0i0n.k Al @ 8sBe0e0v @d@1$4-3 7bO&&i b~59 &&XG3'0U!R&!40 FJlJ $  (sQsQ] 1eA!Zl0|z 0 @?n' Q b7fJ:]~^PsojR֯UaE !?@q?`0 0c s!R'i ?e:}ɟh~5aef9oa|b c ؠgTh]]9 M JU@xJuM` ?Uu Jb[LgJt b{tM>2zGz?)77Os@MJA@b7sz}"b)+,& !5Jp"( ?& N"A"b)+ 'b##b#(J,J/B-  Q3M#9 \/??\.?!?f=Q1Q1Y7;m`?CopyrigTt (c)3@2`093@M+@c)@oKs#@f1B"Ar%@^Aa1@i#@n.3@ Aly@ )Hs}BeQ@e)@v@dq@O0i bz59 m6m?7/'0URś65a FTR#l:J Uh(] nQnQ]FQ AB1!:'BP(Ա?a sRYqJSv?@bX,, iYwla)q )\()4eEDnFh4\ono ?p= ףЈhz5Ufl6a \uiHшhnVgj tj(a(4eU҄Q kR!l8aQY@h4GF[|0aui{hFT]1rto(yQiC@ ?@-uUaȳl1rlb\߈hQ:WpZƟ ܈hU_oo)lHD:@ # h4>T]]9 MTJU@D//?@w 8?@5?@$D"?P6 v1>JuM` ?qu#t  z7It=W_iIRFIlI#>2zGzK? HJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A/.7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6#)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ# $<Ef4B 9b`fJ x۰Z @]0?c Q#Oc Q\}ȱ~aE:Õ)0:iG`iw*h<5a#j8oJiqze~EA#o>"]rA #sHD:@ # h4>T]]9 MTJU@qK?@>,Λ?@>TC?@ݿJWc^?P6 v2>JuM` ?)uJt  jDFIt=W-M3}IR?8IlRv\#>2zGz? HJAebM #zWb(#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A(.?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 0??8:C?'4/U%GX|# |'b6LLBBO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!ZA!Z!?v"rA # sHD:@ # h4>T]]9 MTJU@QL ?@L+?@BZ?@ib+iF?P6 v3>JuM` ?qu#t  Ç\UIt=W_ @ٶIRUo}WIlTy6ވI#>2zGzK? HJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A/.7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6#)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ# $<Ef4B 9b`fJ:O@]0:iG`Oc qy"zeEajoJiaze<5e'f`0mFaij? mh~EA#o>"rA k#sHD:@ # h4>T]]9 MTJU@#li[?@e>?@u'?@ݿJWc^?P6 a?JuM` ?uJt  ICIt=WN3אsIRk_zIlRv\#>2zGz?R HJAeb #zb(#+#+T#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A(. ?b6 ?AgJamT6G` F !!l"zbA +6iTg5V181 ?2?;:C?'I4/U%G,|# |'b6&La`B O @ie<59 FXG'0U`Rb6!}40 V+ZlJ]Uhz!hA!Z![?&sA#<# $` %jfJ 'j `00>oPaYc l]]aE:Ͷc0TbCcba`iK8K8h<5eg۰Zk(qQlqe~Ee@||`ai !hA'o>v"rA # sHD:@ # h4>T]]9 MTJU@ 3?@8.G?@ә!?@8laxy?P6 v5>JuM` ?)uJt  ͧILIt=Wf IRE! YBIl p#>2zGz? HJAebM #zWb(#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A(.?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LFO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!A!19b $<# $B 9b`fJ: muۘ]0:mOc ?i'A~aEesDͅ?c ? V#NeFaze<5e@|lVaiU3Lh`~EA#o>"rA #sHD:@ # h4>T]]9 MTJU@'HY?@pN?@ˋ>WB?@=E?P6 v6>JuM` ?)uJt  PrIt=WoDIRݮIl$dvĤ#>2zGz?(3 HJsAeb #z (#+#+#&*Jp!p!5 g`?CopyrigTt (c) 2U0 9 M c os f"!rԙ !a i n}. Al U (s"e e 5v0d i!Em$}! (K2?b6 ?ALb4($I3/ 4/U%b-|# |'Yb6?Fiie<59 FjX/G3_'0UBb6Z!}40 aFuJlJ8>UhAA Ў!1!Z}'[- E|zo}#<b$,] VJ:@8Qw0RS S IBf>QE !?@\BU--# i[F @ h<53n:CU9fKo]o ڵ h5a9YUU aUUwiǤ]Q işeq5 h2r_U"rA 5#sHD:@ # h8>T ]]9 MTJU@mڷ?@WZgL?@(w)٫?@Q`m ^?P6 7>JuM`lW? SuJt  NKMtA[X_*`lMVYBMpry2VI'>\??\.?lFA8 J2llf?3 HRJB& @i7b#zv t#]b( J(+&B(" ?APb)4(z$!D3//&115 k`?CopyrigTtk(c)k2009kM0c0os0f21r01a0i0n.k Al @ 8sBe0e0v @d@1$4-3 7bO&&i b~59 &&XG3'0U!R&!40 FJlJ $  (sQsQ] 1eA!Zl0|z 0 @?n' Q b7fJ:]~^PsojR֯UaE !?@q?`0 0c s!R'i ?e:}ɟh~5aef9oa|b c ؠgTh]]9 MTIAU@?@|be?@X͂?@SSfhr~?Pps-8R?>$9> n JuM{` ?e ;  Su*Jt'  $mCLmta{x_&3.Jv  q?D=:%z?v(A[|Jq>JU2N贁N[?@M#J&A@bI(bUW)U+U+U&*Jy#?!lJ ?%P?AO"d+bW+$u/%B #A1A15 `?Co}p rigTt (cu)x02`09x0uMp0cn0osh0Ifv2g1rj01av0]ih0n.x0 Ul0 n8s2e0en0v0d0@:1Y>4#-#M3 M7 #(p0b5(9 DFXGdG'2WUAŔ;F!$a dF$&#l,&}>UhETh]]9 MTIAU@?@2R} ?@X͂?@SSfhr~?Pps-8R?>$:> n JuM{` ?e ;  Su*Jt'  H-mta{_mLL}3.Jv  q?D=:%z?v(A[|Jq>JU2N贁N[?@M#J&A@bI(bUW)U+U+U&Jy#?!lJ ?%P?AO"d+bW+$u/%B #A1A15 `?Co}p rigTt (cu)x02`09x0uMp0cn0osh0Ifv2g1rj01av0]ih0n.x0 Ul0 n8s2e0en0v0d0@:1Y>4#-#M3 M7 #(p0b5(9 DFXGdG'2WUAŔ;F!$a dF$&#l,&}>UhETh]]9 MTIAU@B?@ K?@X͂?@SSfhr~?Pps-8R?>$;> n JuM{` ?e ;  Su*Jt'  m@mta{ɀ_13.Jv  q?D=:%z?v(A[|Jq>JU2N贁N[?@M#J&A@bI(bUW)U+U+U&*Jy#?!lJ ?%P?AO"d+bW+$u/%B #A1A15 `?Co}p rigTt (cu)x02`09x0uMp0cn0osh0Ifv2g1rj01av0]ih0n.x0 Ul0 n8s2e0en0v0d0@:1Y>4#-#M3 M7 #(p0b5(9 DFXGdG'2WUAŔ;F!$a dF$&#l,&}>UhETh]]9 MTIAU@Zԩ?@j4ʼn?@X͂?@SSfhr~?Pps-8R?>$<> n JuM{` ?e ;  Su*Jt'  }$~mta{_M {3.Jv  q?D=:%z?v(A[|Jq>JU2N贁N[?@M#J&A@bI(bUW)U+U+U&*Jy#?!lJ ?%P?AO"d+bW+$u/%B #A1A15 `?Co}p rigTt (cu)x02`09x0uMp0cn0osh0Ifv2g1rj01av0]ih0n.x0 Ul0 n8s2e0en0v0d0@:1Y>4#-#M3 M7 #(p0b5(9 DFXGdG'2WUAŔ;F!$a dF$&#l,&}>UhETh]]9 MTIAU@h!I?@u\?@X͂?@SSfhr~?Pps-8R?>$=> n JuM{` ?e ;  Su*Jt'  XƿHmta{"_}{3.Jv  q?D=:%z?v(A[|Jq>JU2N贁N[?@M#J&A@bI(bUW)U+U+U&Jy#?!lJ ?%P?AO"d+bW+$u/%B #A1A15 `?Co}p rigTt (cu)x02`09x0uMp0cn0osh0Ifv2g1rj01av0]ih0n.x0 Ul0 n8s2e0en0v0d0@:1Y>4#-#M3 M7 #(p0b5(9 DFXGdG'2WUAŔ;F!$a dF$&#l,&}>UhETh]]9 MTIAU@?@d\0?@X͂?@SSfhr~?Pps-8R?>$>> n JuM{` ?e ;  Su*Jt'  H-mta{pu_3.Jv  q?D=:%z?v(A[|Jq>JU2N贁N[?@M#J&A@bI(bUW)U+U+U&Jy#?!lJ ?%P?AO"d+bW+$u/%B #A1A15 `?Co}p rigTt (cu)x02`09x0uMp0cn0osh0Ifv2g1rj01av0]ih0n.x0 Ul0 n8s2e0en0v0d0@:1Y>4#-#M3 M7 #(p0b5(9 DFXGdG'2WUAŔ;F!$a dF$&#l,&}>UhETh]]9 MTIAU@sLGv?@m\?@X͂?@SSfhr~?Pps-8R?>$R>  JuM` ? ;  )u*Jt'  [Rwmta{ēd3.Jv  q?D=:%_z?v(A[|?Jq>JU2N?贁N[?)@M#J&A@wbI(bW)U+U+U&Jy#0?!J ?%P?tAO"d+bW+$u/%B#BA1A15 `?Cop rigTt (c)x0]2`09x0Mp0]cn0osh0fv2Rg1rj01av0ih0Wn.x0 l0U n8s2e0en05v0d0:1PY>4#-#,M3 M7#(p0b59 DFXGdG'/2WUA;F%!$a dF$&#l,&}>UhEuwγ u~w;'bAUSSr`?CopyrigPt (cu) 2\09 uMcoszIfyr|aizn. WAlЀ sԂUeevdȀ;LSPDB-M_ M_?[D =M`DrMM ID1+0UǒM"C $cPc X(.'ƔM$!I] Ue*^Y30DXl ! !dH!\!p!!!!!!!!1$1%&'*()11pD113Ac M?  F4 K &E& 4&4&U R&R&u` W?I$0BN`l~$I/( /2(>/2P(\/n$uz)tw!  IQEtABOJ-EG14%F4GaO ۃ ZUAA/B`?CopyrigPt (c)@2\0 M@c@os@fBAr@Aa@i@n.@ AlP HsRe@e@v&PdPA{BCSDCB-CC "G{$FCTVV RD#C05UbS@qF@ ^PU $fXhW.Dg'dōV%!T] DfXje H^maPaYqT  2qa8wEJtU{BUJtU{3Jt!U{UJtU{JtR!U{|JtU{&qFx&U{tSHvDU{.qFxbU{2qFxU{6uJtUwHD:B # h0>Th]]9 MTIAU@?@&*y>{?@X͂?@SSfhr~?Pps-8R?>$A n JuM{` ?e ;  Su*Jt'  $mCLmta{_n3.Jv  q?D=:%z?v(A[|Jq>JU2N贁N[?@M#J&A@bI(bUW)U+U+U&*Jy#?!lJ ?%P?AO"d+bW+$u/%B #A1A15 `?Co}p rigTt (cu)x02`09x0uMp0cn0osh0Ifv2g1rj01av0]ih0n.x0 Ul0 n8s2e0en0v0d0@:1Y>4#-#M3 M7 #(p0b5(9 DFXGdG'2WUAŔ;F!$a dF$&#l,&}>UhET]]9 M JU@xJuM` ?uJt ASt=WJRb~li>t2U?=@MJA@b + &J[N#?0a& ?AT)B!!5 g`?CopyrigTtZ(c)Z20 9ZM c. os f"!r 1ai n.Z Al90 (s=2e0ej vO0d10@!$b-# 'Ya&?6iiek59 6jX7^'0a&Z!|$0 FJlJ UhqMAA ]A !4Ja-'Z@?h4FӃ  <{Gz#A OR|VJ:6]].rM__Z)\("UEC@ ?@-bȃ S 1 `#`YR\Xk5^jZso!o ̌aX5U'BOP(?gks#dT[RUUSv?@n aX,flYwq%lSXn?Fh4  p= ףXAYfl6\`YHzmPXAV``ʼ_Z(QYAY҄agklQbE26_HU"rA HD:B # h0>Th]]9 MTIAU@B?@Dd;]?@X͂?@SSfhr~?Pps-8R?>$F> n JuM{` ?e ;  Su*Jt'  m@mta{_dA3.Jv  q?D=:%z?v(A[|Jq>JU2N贁N[?@M#J&A@bI(bUW)U+U+U&*Jy#?!lJ ?%P?AO"d+bW+$u/%B #A1A15 `?Co}p rigTt (cu)x02`09x0uMp0cn0osh0Ifv2g1rj01av0]ih0n.x0 Ul0 n8s2e0en0v0d0@:1Y>4#-#M3 M7 #(p0b5(9 DFXGdG'2WUAŔ;F!$a dF$&#l,&}>UhET]]9 MTJU@F6&?@}%4+ޱ?@5?@$D"?P6 rAJuM` ?qu#t  (LbIt=W_,MyIRFIlI#>2zGzK? HJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A/.7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6#(BO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ# $<Ef4 9b`fJ x۰Z @]0?c Q#Oc Q\}|zeE:ÕO)0:iG`iw*h<5a#j8oJiqze~EA#o>"rA #sHD:@ # h4>T]]9 MTJU@)?@Uj?@>TC?@ݿJWc^?P6 vI>JuM` ?)uJt  6|.It=WL\} IR?8IlRv\#>2zGz? HJAebM #zWb(#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A(.?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LFO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!ZA!Z!?v"rA # sHD:@ # h4>T]]9 MTJU@#{5?@ř?@BZ?@ib+iF?P6 vJ>JuM` ?qu#t  DqIt=W_{*IRUo}WIlTy6ވI#>2zGzK? HJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A/.7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6#)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ# $<Ef4B 9b`fJ:O@]0:iG`Oc qy"zeEajoJiaze<5e'f`0mFaij? mh~EA#o>"rA k#sHD:B # h0>Th]]9 MTIAU@Zԩ?@6#TTĶ?@X͂?@SSfhr~?Pps-8R?>$K> n JuM{` ?e ;  Su*Jt'  }$~mta{=g2.Jv  q?D=:%_z?v(A[|Jq>JU2N贁N[R?@M#J&A@bI(b*W)U+U+U&Jy#c?!J ?%P?AO"d+bRW+$u/%B#A1A15 `?Cop rigTt (c)x02`09x0Mp0cn0osh0fv2g1rj01av0ih0n.x0 l0 n8s2e0ejn0v0d0:1Y>4#-X#M3 M7#(p0b59 DFXGdG_'2WUA;FJ!$a dFD$&#l,&}>UhET ]]9 MTJU@fl6?@eb,)?@(w)٫?@Q`m ^?P6 L>JuM`lW? SuJt  LUMtA[_ʓWMVYBMpry2VI'>\??\.?lFA8 J2llf?3 HRJB& @i7b#zv t#]b( J(+&B(" ?APb)4(z$!D3//&115 k`?CopyrigTtk(c)k2009kM0c0os0f21r01a0i0n.k Al @ 8sBe0e0v @d@1$4-3 7bO&&i b~59 &&XG3'0U!R&!40 FJlJ $  (sQsQ] 1eA!Zl0|z 0 @?n' Q b7fJ:]~^PsojR֯UaE !?@q?`0 0c s!R'i ?e:}ɟh~5aef9oa|b c ؠgT]]9 MTJU@S?@ަƢ?@u'?@ݿJWc^?P6 vN>JuM` ?)uJt  cIt=W(?IRkzIlRv\#>2zGz? HJAebM #zWb(#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A(.?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LFO ie<59 FXG/'0U`Rb6-!}40 V+Z lJ]UhzЎ!A!Z!?&sA#<# $BK jfJ 'j `00>oPaYc l?]]aE:Ͷǝ0TbCcba`iK8K8h<5eg۰Zk(qQlqe~Ee@||`ai !hA'o>"rA #sHD:B # h0>Th]]9 MTIAU@h!I?@Ž?@X͂?@SSfhr~?Pps-8R?>$P> n JuM{` ?e ;  Su*Jt'  XƿHmta{0_Y3.Jv  q?D=:%z?v(A[|Jq>JU2N贁N[?@M#J&A@bI(bUW)U+U+U&Jy#?!,J ?%?AO"d+bW+$u/%B #A1A15 `?Co}p rigTt (cu)x02`09x0uMp0cn0osh0Ifv2g1rj01av0]ih0n.x0 Ul0 n8s2e0en0v0d0@:1Y>4#-#M3 M7 #(p0b5(9 DFXGdG'2WUAŔ;F!$a dF$&#l,&}>UhET]]9 MTJU@ةA!z?@B]?@ˋ>WB?@=E?P6 vR>JuM` ?)uJt  wT vIt=W?mIRݮIl$dvĤ#>2zGz?(3 HJsAeb #z (#+#+#&*Jp!p!5 g`?CopyrigTtt(wc)t20 9tM c o%s f"!r !ua i n.t_ Al (Us"e e v0 d i!Em$}! (K2?0b6 ?ALcb4($3/ 4/U%-X|# |'b6?Fiie<59 FX/G3'0UBb6!}40 aFuJl*J8>UhAA !1!Z}'- E|zo}#.<b$]K VJ:@8Qw0RS S IBf>QE !?@\BU--# i[F @ h<53n:CU9fKo]o ڵ h5a9YUU aUUwi]Q ieq'5 h2r_Uv"rA # sHD:@ # h4>T]]9 MTJU@?@zų[?@ә!?@8laxy?P6 vS>JuM` ?)uJt  {It=W ůIRE! YBIl p#>2zGz? HJAebM #zWb(#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A(.?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LFO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!A!19b $<# $B 9b`fJ: muۘ]0:mOc ?i'A~aEesDͅ?c ? V#NeFaze<5e@|lVaiU3Lh`~EA#o>"rA #sHD:B # h0>Th]]9 MTIAU@sLGv?@c?@X͂?@SSfhr~?Pps-8R?>$U> n JuM{` ?e ;  Su*Jt'  [Rw͖mta{ۡ_TL3.Jv  q?D=:%z?~v(A[|Jq>JU2N贁N[R?@M#J&A@bI(b*W)U+U+U&ՆJy#a?!J ?%P?AO"d+bRW+$u/%B#A1A15 `?Cop rigTt (c)x02`09x0Mp0cn0osh0fv2g1rj01av0ih0n.x0 l0 n8s2e0ejn0v0d0:1Y>4#-X#M3 M7#(p0b59 DFXGdG_'2WUA;FJ!$a dFD$&#l,&}>UhE9_%O+s u8+DOʅVFMi*I#C ->B CC.~dȿ@n/@{+yDpKsGy>fx>?>:O>]=\H?@LfBDxhBGjBLKlBNoBRC.VCYC]CBa Cd8 Crh$hC/l6CoHCVsC=w 8CzC~CCͅChChCِ'CzC"HC߭(Ck6XCHDߒ'>?zO//?#?5?G?Y?k?}???f@p i@k@xm@Eo@6aB~HdB+1O"&?4)?NI+?;NJ.?NI?^P@PG?1NI(??QB?.OB?J-O6?QFx??NE?_ R?u???IEdRd@*O9?$_"!|PT_ Rb@E(X`@ 7)o2$? DMaUFD  h(^TYYBXCUF~?x<F BP(?P?  B+66 J] 6qa{6a!6ah9ha6a6!a&`8%!a/&6C!aM&6a!ak&6!a&`d9da&6!a&6!a&6!a6a61a6631a=66Q1a[66o1ay6a 61a6!61a6"61a6#61a6a$6AaF%6#Aa-F&6AAaKF'6_AaiF`]9]aF)6AaF*6AaF+6AaFa,6AaF-6QaV.61Qa;V/6OQaYVa06mQawV16QaV26QaV36QaVa46QaV56aa f66!aa+f86?aaIfa96]aagf:6{aaf;6aaf<6aafa=6aaf>6aaf9av@6/qa9vaA6MqaWvB6kqauvC6qav9av!E6qav9avG6a H6a)aI6=aGJ6[aeK6yaL6aaM6aN6Ӂa݆O6a9aQ06-a7R6K aUS6iasT6aU06aV6Ñ a͖W6aX6a Y06a'Z6; aE[6Yac\6wa]06a^6 a_6ѡaۦ`6aa06 ac6+ a5d6IaSe6gaqf06ag6aQ9Qa˶i6߱aj06ak6 a%l69aCm6Waan06uao6 ap6aq6ar06as6 at6)a3u6GaQv06eaow6 ax6ay6az06a{6 a|6a#}67aA~06Ua_6s a}6a6a06a6 a6 a6'a106EaO6c am6a6a06a6 a6a6a!065a?6S a]6qa{ra0rar arara0r%a/rC MraQkrQ0rQr QrQrQ&0r!Q&r3! Q=&rQ!Q[&ro!Qy&0r!Q&r! Q&r!Q&r!Q&0r1Q6r#1 Q-6rA1QK6r_1Qi60r}1Q6r1 Q6r1Q6r1Q60r1Q6rA QFr1AQ;FrOAQYF0rmAQwFrA QFrAQFrAQFrAQFJȖ SUqJ?_,V S=V Q'  T UrQS S@L&ɯd2?]ɓX?\.ҥX^ sUQV T_\ UJ%Q xS T -b@Y T`&P t>h,aeL,dbtok^`eupjet^`h r\wbgQgQ*QJ_i( RU SJWS> 0V]!? ?wwww{np0wzZp~uwpwwppw^w|{pdp^ZDrag onut hepe.Rih-c]lckaUdcos UPo etQeAvUwreP 5aab~߿~??)j࿔S5E?0FJ?nɶؿ3@ ?DHD: # h4>T]]9 MTJUF~?F͓??FtQ+/?P6 AmJuM` ?uJt At=Wrb#JRbJli:O#>[a? ?AbAw@"b $J2zGz?)@MJ7& #fbu#z$ ( (&,"*BB!!5 g`?CopyrigT}tZ(c)ZW2009ZM 0]c 0os0f2R1r0>1a0i0n.Z AUlY0 8s]2e10e 0vo0dQ0!$ę-# '?6iie59 6X7}'0UpBi!0 'F;JlJAh7!1!ZFTE <EgQ# IRSvVJ:M&d2?6VOSe\^TUR UEU@SG\BZ_ɫQ5[@_,cYU520_BU"rA #cHD: # h4>T]]9 MTJUFM&d2?FDx??Fl5?P6 AmJuM` ?uJt\  ]It=WIW$IRIl ڜI#>[ ? ?Ab A@"b$J2zGz?R=@MJ`=&#f {#z .(b)&,"*BB!!5 g`?CopyrigTt (c])020%090uM0c0os 0If21r 0D1a0i 0n.0 WAl_0 8sc2Ue70e0vu0dW0!$-# '?6iieP59 6X7F"'0UvB!0 -FAJlJ $L!(AA ]A1h1!ZFhmg  <!q}"##M _RVJL!%t?FTE\QeS , φ+#pYYSUKQUE^,n__ "+^HX5:?LV&k?SN#tTkRU5Ulf܄BP(%lgpY?BeO#X(dec]\m\cX ~.K$oZhLYUiXQaIZ^_i3qU32F_XU"rA 5#HD: # h#4>T#3 AMJUFbX?FϏ ?FM&d2?F _gw?P6 #AJuM` ?uJt  k]It=~W.*IRIl̆Rkפ#>[a? ?AbAw@"b$J2zGz?)@MJ=&#fb{#z\ (b)&T,"*B!!5 g`?CopyrigTt (c)020%090M0c0os 0f21r 0D1a0i 0n.0 Al_0 8sc2e70e0vu0dW0!$$-# '%?6iie59 6X7'K0UvB!K0 -FAJlJAh711T?ҿP mVN  Q  . *>uA` ?Gm$m8JuQ\>XbK>tY  wetVy 5Ie>U2zGz?@9 /#>?&Aw@bq(Wb)+&b" !p$4>9#"?& "w"b)";|'b"i,b"/N9#0%B 9#11;8"`?CopyrigXt (wc)020090M0c0o%s0f21r0Aua0i0n.0_ Al/@ 8Us3Be@e0vE@d'@'"i fE 6(X7H!'_2q@&,d50 tFJul>4Ul8QQCaR 1(Q1w!-(N!R?FJ?@ `0R QyUeR /ge%$(U⤖侇_X?,_XN Uωr?F r?FOtO?P"ZO %o1~Y.$!$iA4Y!eX]?C!ebRҡe:S |we"d  1X461Boo wr$5%>aEJe_~?F.ԵN?FaV-u"ؾ?PWRrR~|`%s mK)m >'ʲl} fdjg tsC uL+t-VwfxQUv`q7&ҹ _XRQb%d2[i֥TWVQFd50_#jS aUTf imp#jpm0"XY֘t?FTEY`Qj;!lЙXbQ?,n$ xXfQYJhbPnZ\>]!h"bXjQV6W_QUnU_o_)oTQaRp v__N-aZ?Fah<b}煔M_Ophz y7vPQP]BP({ύ 贁NPE 0BXЕHD: # h4>T]]9 MTJUF~?FS BP??Fx<?P6 #AmJuM` ?uJt At=W5_v@"J]RbJl>t2U.?=@MJA@+b)+&J[P'?g& ?A)*B!!5 g`?CopyrigTtZ(wc)Z2009ZM c o%s f"!r $1uai n.Z_ Al?0 (UsC2e0e vU0 d70!H$-,# 'g&K?6i@ieq59 6X7'K0g&!$K0 F!JlJ\#Uh( #3 ]At!"3'ZF>h f <YDc#BA NR{VJ:5]dGL_^Zh?.2\QE]t?FgPg܉ S )?w#_YXq5^(l]o o e]ǂX5U%?D"?;Vfk(\#cTZRUUM\_Y7ۨ Xno ^Z~#b%{XAYM&d2fkBqlUE25_tGUSrA   UGD  3 h0TdYYBjUFțau?F73X?FGV^bd?FZ^Zp?P} X  f  f0 D fX l f  f  bd  f  & f & 4& fH& \& fp& &! f&" &# f&$ &% f&& &' f6( $6) f86* L6+ f`6, t6- f6. 6/ b6]_61 f62 63 fF4 F5 (F6 T#3 AMJUF^]P'?Fݱ?F#6?F{_ "w?P6  >JuM` ?uJt  (LbIt=W,MyIR_FIl#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4B 9b`fJ %ޜ\0?c Q#Oc Q\}|zeE:pOϫ0:iG`iw*h<5a#j8oJiqze~EA#o>"rA #sHD:  # h4>T]]9 MTJUFH[b?F:%lj?FݡD?F,6^?P6 v >JuM` ?)uJt  6|.It=WL\} IR?8IlRv\#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 DV+ZlJQI0h|G!hA!ZV\槈?F9"rA #sHD:  # h#4>T#3 AMJUFiW>E?FTb?F %?FP_EyFw?P6 >JuM` ?uJt  DqIt=W{*IRUo_}WIlTy6ވ#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4B 9b`fJ:%O\0:iG`Oc qy"zeEajoJiaze<5e'fÏ 0mFaij? mh~EA#o>"rA k#sHD:  # h4>T]]9 MTJUFF ?FƢ?F%9.?F,6^?P6 v>JuM` ?)uJt  cIt=W(?IRkzIlRv\#>2zGz?@M2JAe7b #zb(J#+#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c oKs f"!r !a i n. Al (s"e e v0d i!EHm$+A?b6 ?AgJaTv6` F !!l"z۾bZA +J6ig5V181 ?2?;ߑ:C?'4/U%ęG|# b|'b6LFO ie<59 FXG}'0U`Rib6!}40I V+ZlJ]Uhz!AZֺV\?F9&sA#<# $ B bnbJ'jWĞ 00>oPaYc l]]ȱaE:P^0TbCcba`iK8K8h<5e %k?(qQlqe~EeF$||`ai? !hA'oFrA k#sHD:  # h4>T]]9 MTJUFw??FxOe?FV\?F. xy?P6 rAJuM` ? )uJt  {It=W ůIRE! YBIl p#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6L>FO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!A!19b $<# $BJ C`fJ: m$D]ᇄ0:mOc i'A~aEeTE?c  V#NeFaze<5eF$|lVaiU?3Lh~EA#o>"rA k#sHD:  # h4>T]]9 MTJUF% !(@{?FmN,']?F~ tC?F7܄E?P6 v>JuM` ?)uJt  wT vIt=W?mIRݮIl$dvĤ#>2zGz?@MJAebM #zT (#+#+#&Jp!p!5 g`?Copy_rigTtt(c)t2U0 9tM c os f"!rԙ !a i n}.t Al U (s"e e 5v0d i!Em$}! K2?b6 ?ALb4($I3/ 4/U%b-|# |'b6<Fiie<59 FjX/G'0UBib6!}40 aFuJlJ8>UhAA@ !1!Zo(. E|zo}#<bX$-B] VʼJ:F,?0ȒS S I?Bf>QEҺV\?Ft@K\BU--# i[ÏF @ h<53n?I9fKo]o ڵ h5a9YUU aUU 3yk]Q i?eq5 h2r_U"rA k#sHD:  # h8>T ]]9 MTJUFG"7?F~8)?F AE?Fzl]?P6 >JuM`lW? SuJt  LUMtA[_ʓWMVYBMpry2VI'>\??\.?lFA8 J2ll/f?=@MJB& @nib#zv t#b( (+&B" ?APb)4(z$CD3//&*115 k`?CopyrigTtk(wc)k2009kM0c0o%s0f21r01ua0i0n.k_ Al @ 8UsBe0e0v @ d@1H4-,3 7%bO&&i Ab~59 &&XGK"'0U!R&!40 FJlJ Q! (sQsQ]31heA!Zsꈐ43 0 @?/n' #M b7fJ:]n#vca71xp]] 8usUQPUQS^%S+ %R&bpGba8DaP`Q]`NE_TWOPKJ0WSH+`Ps RUORuIsSk1J0@^e_ erP}28\usvvP62<%-g ۏ'9_us1125q5q2A+Bhh@R)pR*) 2hqhq|b(8 1xpa[3!1Qq6`:zGQ`1f1[j2B@@7RF5VCT @nb2FE5us_4Σ&F3s͵NSYFB@UͬpU ͬQUHͬ|eA§,@Q_Ɔp׏(ɟ 8A B<F5$lAVB\KkF`RC`asPe uexPP2!"! %(P%PORJ1 '2qa` P֒ȑ8(S˱AȕE{8H5AALA˱LܥAL(ARGMAWЛtA5ЛA͞/%AELHD:  # h#4>T#3 AMJUF͹30?FY˟8?F#6?F{_ "w?P6 >JuM` ?uJt  z7It=WiIR_FIl#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4B 9b`fJ %ޜ\0?c Q#Oc Q\}ȱ~aE:pϫ0:iG`iw*h<5a#j8oJiqze~EA#o>"]rA #sHD:  # h4>T]]9 MTJUF,2?F/LΛ?FݡD?F,6^?P6 v>JuM` ?)uJt  jDFIt=W-M3}IR?8IlRv\#>2zGz?@MlAneb #zb(J#+#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c oKs f"!r !a i n. Al (s"e e v0d i!EHm$+A?b6 ?AgJaTv6` F !!l"z۾bZA +J6ig5V181 ?2?;ߑ:C?'4/U%ęG|# b|'b6LBO ie<59 FXG}'0U`Rib6!}40 V+ZlJQI0h|GЎ!A!ZV\O?F9"rA 5#sHD:  # h#4>T#3 AMJUF *.?F)s*?F %?FP_EyFw?P6 >uM` ?uJt  Ç\UIt=W @ٶIRUo_}WIlTy6ވ#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4B 9b`fJ:%O\0:iG`Oc qy"zeEajoJiaze<5e'fÏ 0mFaij? mh~EA#o>"rA k#sHD:  # h4>T]]9 MTJUFJ.?Fh ?F%9.?F,6^?P6 v>JuM` ?)uJt  ICIt=WN3sIRkzIlRv\#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 $V+ZlJ]Uhz@!A!ZֺV?\?F9&sA #<# $B jfJ'jWĞ 00>oPaYc l]]aE:P^0TbCcba`iK8K8h<5e %k(qQlqe~EeF$ǧ||`ai !hA'o>"rA 5#sHD:  # h4>T]]9 MTJUFPY?F3pG?FV\?F. xy?P6 rhAJuM` ?)uJt  ͧILIt=Wf IRE! YBIl p#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LhSFO ie<59 FXG'0U`RŴb6!}40 DV+ZlJQI0h|G!hA!`197b$<# h$B 9b`fJ: m$D]0:mOc i'A~aEeT7E?c  V#NeFaze<5eF$|lVaiU3Lh~EA#o>"]rA #sHD:  # h4>T]]9 MTJUF]cZ?FVIܳ?F~ tC?F7܄E?P6 v>JuM` ?)uJt  PrIt=WoDIRݮIl$dvĤ#>2zGz?@MJAebM #zT (#+#+#&Jp!p!5 g`?CopyrigTt (c) W20 9 M ]c os f"R!r !a i n. AUl (s"e e v0d @i!Em$}! K2?b6 3?ALb4(&$3/ 4/U%-|# e|'b6 ?Fiie<59 FX/G'0UBb6!}40 aFuJlJ 8>UhAA !1!Z(. E|zo}#<b$] RVJ:F,a0RS S IBf>QEҺV\?Ft@K\B?U--# i[F @ h<53nI9fKo]o ڵ h5a9YUU aUU~ 3yk]Q ieq5 h2r_U"]rA #sHD:  # h8>T ]]9 MTJUFyh ?F*nK?F AE?Fzl]?P6 >JuM`lW? u t  NKMtA[X*`lMVYBMpry2V'>\¹??\.?lA8 J2l_lf?=@JMJB& @ib#ztv t#b() (+&ՆB3" 3?APb)4(z$D3//&T115 k`?CopyrigTtk(c)k2009kM0c0oKs0f21r01a0i0n.k Al @ 8sBe0e0v @d@14-X3 7JbO&&i b~59 &&XGK"/'0U!R&-!40 FJ%lJ Q! (sQsQ]3Ы1eA!Zsꈐg4 0֚ @?n'#2#M b7fJ:]T#3 AMJUF~S5e?Fk]?F#6?F{_ "w?P6 >JuM` ?uJt  @@|It=Wn56uIR_FIl#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L)FO ie<59 FXGB/'0U`Rb6-!}40 V+ZlJQI0h|G!A!1l&fL# $<EfB 9b`fJ %ޜ\0?c Q菌#Oc Q\}ֱ~aE:pOϫ0:iG`iw*h<5a#j8oJiqze~EA#o>"rA #sHD:  # h4>T]]9 MTJUF3w?Fr(IY?FݡD?F,6^?P6 rlAJuM` ?)uJt  It=WX IR?8IlRv\#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LlQkBO ie<59 FXG'0U`RŴb6!}40 DV+ZlJQI0h|G!hA!ZV\槈?F9"rA #sHD:  # h#4>T#3 AMJUFZ)x?F~m?F %?FP_EyFw?P6 >JuM` ?uJt  1It=WOIRUo_}WIlTy6ވ#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4B 9b`fJ:%O\0:iG`Oc qy"zeEajoJiaze<5e'fÏ 0mFaij? mh~EA#o>"rA k#sHD:  # h4>T]]9 MTJUF:N[%?F ɇ?F%9.?F,6^?P6 v>JuM` ?)uJt  {d5Z\It=W2;wEIRkzIlRv\#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L)FO ie<59 FuXG_'0U`Rb6Z!}40 V+ZlJ]Uhz!A!ZֺV\?F9&sA#<# $BK jfJ'jWĞ 00>oPaYc l]]beE:P^c0TbCcba`iK8K8h<5e %k(qQlqe~EeF$||`ai !hA'o>v"rA # sHD:  # h4>T]]9 MTJUFydY?F?FV\?F. xy?P6 v>JuM` ?)uJt  V"(It=W7:LIRE! YBIl p#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 DV+ZlJQI0h|G!hA!`197b$<# h$B 9b`fJ: m$D]0:mOc i'A~aEeT7E?c  V#NeFaze<5eF$|lVaiU3Lh~EA#o>"]rA #sHD:  # h4>T]]9 MTJUFEtJuM`?)uJt  7It=WܯHIRݮIl$dvĤ#>2zGz?@MJAebM #zT (#+#+#&Jp!p!5 g`?CopyrigTt (c) W20 9 M ]c os f"R!r !a i n. AUl (s"e e v0d @i!Em$}! K2?b6 3?ALb4(&$3/ 4/U%-|# e|'b6 ?Fiie<59 FX/G'0UBb6!}40 aFuJlJ 8>UhAA !1!Z(. E|zo}#<bc$ R] )VJ:F,00RS S IBf>QEҺV\?Ft@K\BU--# i?[F @ h<53nI9fKo]o ڵ h5a9YUU aUU? 3yk]Q ieq5 h2r_U"rA #sHD:  # h8>T ]]9 MTJUF̉?F7|?F AE?Fzl]?P6 !>JuM`lW? SuJt  o|MtA[_"oRMVYBMpry2VI'>\??\.?lFA8 J2ll/f?=@MJB& @nib#zv t#b( (+&B" ?APb)4(z$CD3//&*115 k`?CopyrigTtk(wc)k2009kM0c0o%s0f21r01ua0i0n.k_ Al @ 8UsBe0e0v @ d@1H4-,3 7%bO&&i Ab~59 &&XGK"'0U!R& 10 FJlJ Q! (sQsQ]31heA!Zsꈐ43 0 @?ҏn'8R# b7fJ:]T#3 AMJUFJuM` ?uJt  .9BWyIt=Wnq׏8IR_FIl#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4B 9b`fJ %ޜ\0?c Q#Oc Q\}ȱ~aE:pϫ0:iG`iw*h<5a#j8oJiqze~EA#o>"]rA #sHD:  # h4>T]]9 MTJUF%?FG>'~ݡD?F,6^?P6 #>JuM` ?juJt  HIt=WByIR?8Il?Rv\#>2zGz?@MJAebM #zWb(#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LFO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!ZA!ZV\?F9"rA #sHD:  # h#4>T#3 AMJUFQK݆x?F-1?F %?FP_EyFw?P6 $>JuM` ?uJt  fd>It=WN6sgHRUo}WIlT?y6ވ#>2zGz?@MJAebM #zWb(#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LFO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!A!1l&fL# $<EfB 9b`fJ:%ޜ'\0:iG`Oc qy"zeEajoJiaze<5e'fÞ 0mFaij mh~EA#o>"rA 5#sHD:  # h4>T]]9 MTJUF|k~ؾ?F;?F%9.?F,6^?P6 v%>JuM` ?)uJt  ُuߓIt=Wί IRkzIlRv\#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTtt(c])t20 9tuM c os If"!r !a i n.t WAl (s"Ue e v0d i!Em$+A?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LFO ie<59 FXG/'0U`Rb6-!}40 V+Z lJ]UhzЎ!A!ZֺV\O?F9&sA#<# $B %jfJ'jWĞ 00>oPaYc l]]aE:P^c0TbCcba`iK8K8h<5e %k(qQlqe~EeF$||`ai !hA'o>v"rA # sHD:  # h4>T]]9 MTJUFFJuM` ?)uJt  L+It=WݑHIRE! YBIl p#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 DV+ZlJQI0h|G!hA!`197b$<# h$B 9b`fJ: m$D]0:mOc i'A~aEeT7E?c  V#NeFaze<5eF$|lVaiU3Lh~EA#o>"]rA #sHD:  # h4>T]]9 MTJUFQt?FRLxw?F~ tC?F7܄E?P6 v'>JuM` ?)uJt  6j>It=WNFFIRݮIl$dvĤ#>2zGz?@MJAebM #zT (#+#+#&Jp!p!5 g`?CopyrigTt (c) W20 9 M ]c os f"R!r !a i n. AUl (s"e e v0d @i!Em$}! K2?b6 3?ALb4(&$3/ 4/U%-|# e|'b6 ?Fiie<59 FX/G'0UBb6!}40 aFuJlJ 8>UhAA !1!Z(. E|zo}#<b$] RVJ:F,a0RS S IBf>QEҺV\?Ft@K\B?U--# i[F @ h<53nI9fKo]o ڵ h5a9YUU aUU~ 3yk]Q ieq5 h2r_U"]rA #sHD:  # h8>T ]]9 MTJUFXf?Fݼ?F AE?Fzl]?P6 (>JuM`lW? SuJt  ?aV_MtA[_=p'MVYBMpry2VI'>\??\.?lFA8 J2ll/f?=@MJB& @nib#zv t#b( (+&B" ?APb)4(z$CD3//&*115 k`?CopyrigTtk(wc)k2009kM0c0o%s0f21r01ua0i0n.k_ Al @ 8UsBe0e0v @ d@1H4-,3 7%bO&&i Ab~59 &&XGK"'0U!R&!40 FJlJ Q! sQsQ]31heA!Zsꈐ43 0 @?ҏn'R# b7fJ:]T#3 AMJUF;Э?Fn?F#6?F{_ "w?P6 )>JuM` ?uJt  &lIt=WAI׃IR_FIl#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L( DO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4B 9b`fJ %ޜ\0?c Q#Oc Q\}ȱ~aE:pϫ0:iG`iw*h<5a#j8oJiqze~EA#o>"]rA #sHD:  # h4>T]]9 MTJUFAYr?F L?FݡD?F,6^?P6 v*>JuM` ?)uJt  0@hIt=WmiIR?8IlRv\#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 DV+ZlJQI0h|G!hA!ZV\槈?F9"rA #sHD:  # h#4>T#3 AMJUFZ?F!ќ?F %?FP_EyFw?P6 +>JuM` ?uJt  JJ&qIt=WrhIRUo_}WIlTy6ވ#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4B 9b`fJ:%O\0:iG`Oc qy"zeEajoJiaze<5e'fÏ 0mFaij? mh~EA#o>"rA k#sHD:  # h4>T]]9 MTJUF9 `?F2{ о?F%9.?F,6^?P6 v,>JuM` ?)uJt  V6>nIt=WgD~baIRkzIlRv\#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 $V+ZlJ]Uhz@!A!ZֺV?\?F9&sA #<# $B jfJ'jWĞ 00>oPaYc ?l]]|eE:P^0TbCcba`iK8K8h<5e %k(qQlqe~EeF$||`ai !hA'o>"rA #sHD:  # h4>T]]9 MTJUFq[')?F9i7?FV\?F. xy?P6 v->JuM` ?)uJt  9KIt=WYGJIRE! YBIl p#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 DV+ZlJQI0h|G!hA!`197b$<# h$B 9b`fJ: m$D]0:mOc i'A~aEeT7E?c  V#NeFaze<5eF$|lVaiU3Lh~EA#o>"]rA #sHD:  # h4>T]]9 MTJUFӅN?FZDM?F~ tC?F7܄E?P6 v.>JuM` ?)uJt  1|It=WƯ:IRݮIl$dvĤ#>2zGz?@MJAebM #zT (#+#+#&Jp!p!5 g`?CopyrigTt (c) W20 9 M ]c os f"R!r !a i n. AUl (s"e e v0d @i!Em$}! K2?b6 3?ALb4(&$3/ 4/U%-|# e|'b6 ?Fiie<59 FX/G'0UBb6!}40 aFuJlJ 8>UhAA !1!Z(. E|zo}#<bc$2] )VJ:F,00RS S IBf>QEҺV\?Ft@K\BU--| i[F @ h<53nI9fKo]o  h5a9YUU aUU 3yk]Q ieq5 hb2r_U"rA #sHD:  # h8>T ]]9 MTJUFv?Fz?F AE?Fzl]?P6 />JuM`lW? SuJt  y SksMtA[_eMVYBMpry2VI'>\??\.?lFA8 J2ll/f?=@MJB& @nib#zv t#b( (+&B" ?APb)4(z$CD3//&*115 k`?CopyrigTtk(wc)k2009kM0c0o%s0f21r01ua0i0n.k_ Al @ 8UsBe0e0v @ d@1H4-,3 7%bO&&i Ab~59 &&XGK"'0U!R&!40 FJlJ Q! (sQsQ]31heA!Zsꈐ43 0 @?/n' #M b7fJ:]T#3 AMJUF`;?F:7?F#6?F{_ "w?P6 0>JuM` ?uJt  4E$pIt=WLx4IR_FIl#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L( DO ie<59 FuXG_'0U`Rb6J!}4 V"+ZlJUhh|G!A!1l&fL# $<EfB 9b`fJ %ޜ\0?c Q菌#Oc Q\}ֱ~aE:pOϫ0:iG`iw*h<5a#j8oJiqze~EA#o>"rA #sHD:  # h4>T]]9 MTJUF [kԏ?F4 c?FݡD?F,6^?P6 v1>JuM` ?)uJt  uIt=Wt]aIR?8IlRv\#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 DV+ZlJQI0h|G!hA!ZV\槈?F9"rA #sHD:  # h#4>T#3 AMJUFYļ#?FŬ /?F %?FP_EyFw?P6 2>JuM` ?uJt  B?˳It=WFp\cIRUo_}WIlTy6ވ#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181a ??8:C$?'4/U%G|# |'b6L(BBO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4B 9b`fJ:%O\0:iG`Oc qy"zeEajoJiaze<5e'fÏ 0mFaij? mh~E#o>"rA k#sHD:  # h4>T]]9 MTJUF-!4?FL$$?F%9.,6^?P6 3>JuM` ?juJt  rs"It=WVIRkzIl?Rv\#>2zGz?@MJAebM #zWb(#+#+#&JBp!p!5 g`?CopyrigT}tZ(c)ZW20 9ZM ]c os f"R!r !a i n.Z AQl [&s"e e v0d @i!Em$+A?b6 ?AgJaT6` F !!l"zbAR +6ig5V181 ?2?;:C?'$4/U%G|# |'b6L3FO ie<59 FXG'K0U`Rb6!}4K0 V+ZlJ]Uhz!A!ZֺV\?F9&߮sA#`<# $B jfJ'jW?Ğ 00>oPaYc l]G]aE:P^0TbCcba`i?K8K8h<5e %k(qׁQlqe~EeF$||`ai !hA'o>"]rA #sHD:  # h4>T]]9 MTJUFX?Ş?F).?FV\?F. xy?P6 a?JuM` ?uJt  sd It=W> IRE!_ YBIl p#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6La(`BO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!19b$<# $4` 9b`fJ: m$D]0:mOc i'A~aEeTE?c  V#NeFaze<5eF?$|lVaiU3Lh~EA#o>"rA #sHD:  # h4>T]]9 MTJUFQC]?FD?b.?F~ tC?F7܄E?P6 v5>JuM` ?)uJt  [It=Wq.IRݮIl$dvĤ#>2zGz?@MJAebM #zT (#+#+#&Jp!p!5 g`?CopyrigTt (c) W20 9 M ]c os f"R!r !a i n. AUl (s"e e v0d @i!Em$}! K2?b6 3?ALb4(&$3/ 4/U%-|# e|'b6 ?Fiie<59 FX/G'0UBb6!}40 aFuJlJ 8>UhAA !1!Z(. E|zo}#<b$] RVJ:F,a0RS S IBf>QEҺV\?Ft@K\B?U--# i[F @ h<53nI9fKo]o ڵ h5a9YUU aUU~ 3yk]Q ieq5 h2r_U"]rA #sHD:  # h8>T ]]9 MTJUF:??Fq12?F AE?Fzl]?P6 6>JuM`lW? SuJt  /êQPͰMtA[_Y#MVYBMpry2VI'>\??\.?lFA8 J2ll/f?=@MJB& @nib#zv t#b( (+&B" ?APb)4(z$CD3//&*115 k`?CopyrigTtk(wc)k2009kM0c0o%s0f21r01ua0i0n.k_ Al @ 8UsBe0e0v @ d@1H4-,3 7%bO&&i Ab~59 &&XGK"'0U!R&!40 FJlJ Q! (sQsQ]31heA!Zsꈐ43 0 @?/n' #M b7fJ:] f? @ fA B &C  fE  &F b & 4&H fH&I \&J fp&K &L f&M &N &&O & f&Q &R f6S $6T f86U L6V f`6W t6X f6Y 6Z f6[ 6\ f6] 6^ fF_ F` f(Fa ~L1B~`1F~t1[D1e1R~1~1Z~1^~1b~Af~Aj~(AnT#3 AMJUF6٫&?Fh%4+ޱ?F*5?F$_D"w?P6 8>JuM` ?uJt  (LbIt=W,MyIR_FIl#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6ig5V1`e ?2?;:C$?'4/U%G|# |'b6L)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4B 9b`fJ۰Z@]0?c Q#Oc Q\}|zeE:O)0:iG`iw*h<5a#j8oJiqze~EA#o>"rA #sHD:7 # h4>T]]9 MTJUFI)?FUj?FeTC?FǿJWc^?P6 v9>JuM` ?)uJt  6|.It=WL\} IR?8IlRv\#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LQ DO ie<59 FXG'0U`RŴb6!}40 DV+ZlJQI0h|G!hA!Z!姈?F9"rA #sHD:7 # h#4>T#3 AMJUFe{5?F?Fs۰Z?FNb_+iFw?P6 :>JuM` ?uJt  DqIt=W{*IRUo_}WIlTy6ވ#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4B 9b`fJrO@]0:iG`Oc qy"zeEajoJiaze<5e'f`0mFaij? mh~EA#o>"rA k#sHD:7 # h4>T]]9 MTJUFS?FަƢ?F'?FǿJWc^?P6 v;>JuM` ?)uJt  cIt=W(?IRkzIlRv\#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 $V+ZlJ]Uhz@!A!Z5?!?F9&sA #<# $B jfJ'j`00>oPaYc l]]aE:Ͷ0TbCcba`iK8K8h<5e۰ϿZk(qQlqe~EeFͭ||`ai !hA'o>"rA 5#sHD:7 # h4>T]]9 MTJUF蕯?Fcų[?F!?Flaxy?P6 v<>JuM` ?)uJt  {It=W ůIRE! YBIl p#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 DV+ZlJQI0h|G!hA!`197b$<# h$B 9b`fJ: m٠uۘ]0:mOc i'A~aEe7D?c  V#NeFaze<5eF|lVaiU3Lh~EA#o>"]rA #sHD:7 # h4>T]]9 MTJUFA!z?FA]?F>WB?Ft=E?P6 v=>JuM` ?)uJt  wT vIt=W?mIRݮIl$dvĤ#>2zGz?@MJAebM #zT (#+#+#&Jp!p!5 g`?Copy_rigTtt(c)t2U0 9tM c os f"!rԙ !a i n}.t Al U (s"e e 5v0d i!Em$}! K2?b6 ?ALb4($I3/ 4/U%b-|# |'Yb6?Fiie<59 FjX/G'0UBib6!}40 aFuJlJ8>UhAA@ !1!Z}o'- E|zo}#<b$] VJ:FQw0RS S IBf>QE2!?Fp\BU--# i[F @ h<53n CU9fKo]o  h5a9YUU aUUwi]Q ieq5 hb2r_U"rA #sHD:7 # h8>T ]]9 MTJUF gl6?FDb,)?FUw)٫?FQ`m ^?P6 >>JuM`lW? SuJt  LUMtA[_ʓWMVYBMpry2VI'>\??\.?lFA8 J2ll/f?=@MJB& @nib#zv t#b( (+&B" ?APb)4(z$CD3//&*115 k`?CopyrigTtk(wc)k2009kM0c0o%s0f21r01ua0i0n.k_ Al @ 8UsBe0e0v @ d@1H4-,3 7%bO&&i Ab~59 &&XGK"'0U!R&!40 FJlJ Q! (sQsQ]31heA!ZU0|z3 0 @?/n' #M b7fJ:]h^PsojRփUaEQ!?!`0 0c s!R'i e:}ɟh~5aef9oa|b c gT#3 AMJUFE//?F_ 8?F*5?F$_D"w?P6 ?>JuM` ?uJt  z7It=WiIR_FIl#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+Ab6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L(a2BO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4a2 9b`fJ۰Z@]0?c Q#Oc Q\}ȱ~aE:)0:iG`iw*h<5a#j8oJiqze~EA#o>"]rA #sHD:7 # h4>T]]9 MTJUFK?Fc>,Λ?FeTC?FǿJWc^?P6 v@>JuM` ?)uJt  jDFIt=W-M3}IR?8IlRv\#>2zGz?MHJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L(BO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!A!Z!?F9v"rA # sHD:7 # h#4>T#3 AMJUFQL ?F7ԩ+?Fs۰Z?FNb_+iFw?P6 A>JuM` ?uJt  Ç\UIt=W @ٶIRUo_}WIlTy6ވ#>2zGz?N@MJ@eb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L( DO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4B 9b`fJ:rO@]0:iG`Oc qy"zeEajoJiaze<5e'f`0mFaij? mh~EA#o>"rA k#sHD:7 # h4>T]]9 MTJUFLli[?FQ>?F'?FǿJWc^?P6 vB>JuM` ?)uJt  ICIt=WN3sIRkzIlRv\#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 $V+ZlJ]Uhz@!A!Z5?!?F9&sA #<# $B jfJ'j`00>oPaYc l]]aE:Ͷ0TbCcba`iK8K8h<5e۰ϿZk(qQlqe~EeFͭ||`ai !hA'o>"rA 5#sHD:7 # h4>T]]9 MTJUF63?F8.G?F!?Flaxy?P6 vC>JuM` ?)uJt  ͧILIt=Wf IRE! YBIl p#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LQ DO ie<59 FXG'0U`RŴb6!}40 DV+ZlJQI0h|G!hA!`197b$<# h$B 9b`fJ: m٠uۘ]0:mOc i'A~aEe7D?c  V#NeFaze<5eF|lVaiU3Lh~EA#o>"]rA #sHD:7 # h4>T]]9 MTJUFPHY?FpN?F>WB?Ft=E?P6 rAJuM` ?)uJt  PrIt=WoDIRݮIl$dvĤ#>2zGz?@MJAebM #zT (#+#+#&Jp!p!5 g`?CopyrigTt (c) W20 9 M ]c os f"R!r !a i n. AUl (s"e e v0d @i!Em$}! K2?b6 3?ALb4(&$3/ 4/U%-|# e|'b6 ?Fiie<59 FX/G'0UBb6!}40 aFuJlJ 8>UhAA !1!Z}'- E|zo}#<bc$] )VJ:FQw00RS S IBf>QE2!?Fp\BU--# i?[F @ h<53n CU9fKo]o ڵ h5a9YUU aUU?wi]Q ieq5 h2r_U"rA #sHD:7 # h8>T ]]9 MTJUFǦmڷ?FWZgL?FUw)٫?FQ`m ^?P6 E>JuM`lW? SuJt  NKMtA[X_*`lMVYBMpry2VI'>\??\.?lFA8 J2ll/f?=@MJB& @nib#zv t#b( (+&B" ?APb)4(z$CD3//&*115 k`?CopyrigTtk(wc)k2009kM0c0o%s0f21r01ua0i0n.k_ Al @ 8UsBe0e0v @ d@1H4-,3 7%bO&&i Ab~59 &&XGK"'0U!R&!40 FJlJ Q! (sQsQ]31heA!ZU0|z3 0 @?/n' #M b7fJ:]h^PsojRփUaEQ!?!`0 0c s!R'i e:}ɟh~5aef9oa|b c gT#3 AMJUFܹ?F cM?F*5?F$_D"w?P6 F>JuM` ?uJt  @@|It=Wn56uIR_FIl#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4B 9b`fJ۰Z@]0?c Q#Oc Q\}ȱ~aE:)0:iG`iw*h<5a#j8oJiqze~EA#o>"]rA #sHD:7 # h4>T]]9 MTJUF (?FthZ?FeTC?FǿJWc^?P6 vG>JuM` ?)uJt  It=WX IR?8IlRv\#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%`|# |'1b6LQBBO ie<59 FXG'0U`RŴb6!}40 DV+ZlJQI0h|G!hA!Z!姈?F9"rA #sHD:7 # h#4>T#3 AMJUFr!Qw?Fvk?Fs۰Z?FNb_+iFw?P6 AJuM` ?uJt  1It=WOIRUo_}WIlTy6ވ#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L(BO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4 9b`fJ:rO@]0:iG`Oc qy"zeEajoJiaze<5e'f`0mFaij? mh~EA#o>"rA k#sHD:7 # h4>T]]9 MTJUF!P4K?F l?F'?FǿJWc^?P6 vI>JuM` ?)uJt  {d5Z\It=W2;wEIRkzIlRv\#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 $V+ZlJ]Uhz@!A!Z5?!?F9&sA #<# $B jfJ'j`00>oPaYc ?l]]beE:Ͷǝ0TbCcba`iK8K8h<5e۰Zk(qQlqe~EeFͭ||`ai !hA'o>"rA #sHD:7 # h4>T]]9 MTJUFYzUv?FS s9?F!?Flaxy?P6 vJ>JuM` ?)uJt  V"(It=W7:LIRE! YBIl p#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 DV+ZlJQI0h|G!hA!`197b$<# h$B 9b`fJ: m٠uۘ]0:mOc i'A~aEe7D?c  V#NeFaze<5eF|lVaiU3Lh~EA#o>"]rA #sHD:7 # h4>T]]9 MTJUF)"z>?F^i2VS?F>WB?Ft=E?P6 vK>JuM` ?)uJt  7It=WܯHIRݮIl$dvĤ#>2zGz?@MJAebM #zT (#+#+#&Jp!p!5 g`?CopyrigTt (c) W20 9 M ]c os f"R!r !a i n. AUl (s"e e v0d @i!Em$}! K2?b6 3?ALb4(&$3/ 4/U%-|# e|'b6 ?Fiie<59 FX/G'0UBb6!}40 aFuJlJ 8>UhAA !1!Z}'- E|zo}#<b$] RVJ:FQwa0RS S IBf>QE2!?Fp\B?U--# i[F @ h<53n CU9fKo]o ڵ h5a9YUU aUU~wi]Q ieq5 h2r_U"]rA #sHD:7 # h8>T ]]9 MTJUF{KY?F#v\?FUw)٫?FQ`m ^?P6 L>JuM`lW? SuJt  o|MtA[_"oRMVYBMpry2VI'>\??\.?lFA8 J2ll/f?=@MJB& @nib#zv t#b( (+&B" ?APb)4(z$CD3//&*115 k`?CopyrigTtk(wc)k2009kM0c0o%s0f21r01ua0i0n.k_ Al @ 8UsBe0e0v @ d@1H4-,3 7%bO&&i Ab~59 &&XGK"'0U!R&!40 FJlJ Q! (sQsQ]31heA!ZU0|z3 0 @?/n' #M b7fJ:]h^PsojRփUaEQ!?!`0 0c s!R'i e:}ɟh~5aef9oa|b c gT#3 AMJUF<5^?FUP?F*5?F$_D"w?P6 M>JuM` ?uJt  .9BWyIt=Wnq׏8IR_FIl#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L( DO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4B 9b`fJ۰Z@]0?c Q#Oc Q\}ȱ~aE:)0:iG`iw*h<5a#j8oJiqze~EA#o>"]rA #sHD:7 # h4>T]]9 MTJUF?F͍~eTC?FJWc^?P6 N>JuM` ?juJt  HIt=WByIR?8Il?Rv\#>2zGz?@MJAebM #zWb(#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LFO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!ZA!Z!?F9"rA #sHD:7 # h#4>T#3 AMJUFSw?FK?Fs۰Z?FNb_+iFw?P6 O>JuM` ?uJt  fd>It=WN6sgHRUo}WIlT?y6ވ#>2zGz?@MJAebM #zWb(#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LFO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!A!1l&fL# $<EfB 9b`fJ:r'@]0:iG`Oc qy"zeEajoJiaze<5e'f`0mFaij mh~EA#o>"rA 5#sHD:7 # h4>T]]9 MTJUF%u)H׾?FȐ?F'?FǿJWc^?P6 rAJuM` ?)uJt  ُuߓIt=Wί IRkzIlRv\#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTtt(c])t20 9tuM c os If"!r !a i n.t WAl (s"Ue e v0d i!Em$+A?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6LLs2BO ie<59 FXG/'0U`Rb6-!}40 V+Z lJ]UhzЎ!A!Z5!O?F9&sA#<# $s2 %jfJ'j`00>oPaYc l]]aE:Ͷc0TbCcba`iK8K8h<5e۰Zk(qQlqe~EeFͭ||`ai !hA'o>v"rA # sHD:7 # h4>T]]9 MTJUFf^oO?F]?F!?Flaxy?P6 vQ>JuM` ?)uJt  L+It=WݑHIRE! YBIl p#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 DV+ZlJQI0h|G!hA!`197b$<# h$B 9b`fJ: m٠uۘ]0:mOc i'A~aEe7D?c  V#NeFaze<5eF|lVaiU3Lh~EA#o>"]rA #sHD:7 # h4>T]]9 MTJUFr,?F4w?F>WB?Ft=E?P6 vR>JuM` ?)uJt  6j>It=WNFFIRݮIl$dvĤ#>2zGz?@MJAebM #zT (#+#+#&Jp!p!5 g`?CopyrigTt (c) W20 9 M ]c os f"R!r !a i n. AUl (s"e e v0d @i!Em$}! K2?b6 3?ALb4(&$3/ 4/U%-|# e|'b6 ?Fiie<59 FX/G'0UBb6!}40 aFuJlJ 8>UhAA !1!Z}'- E|zo}#<b$] RVJ:FQwa0RS S IBf>QE2!?Fp\B?U--# i[F @ h<53n CU9fKo]o ڵ h5a9YUU aUU~wi]Q ieq5 h2r_U"]rA #sHD:7 # h8>T ]]9 MTJUF_5=~?Fe,=Vݼ?FUw)٫?FQ`m ^?P6 S>JuM`lW? SuJt  ?aV_MtA[_=p'MVYBMpry2VI'>\??\.?lFA8 J2ll/f?=@MJB& @nib#zv t#b( (+&B" ?APb)4(z$CD3//&*115 k`?CopyrigTtk(wc)k2009kM0c0o%s0f21r01ua0i0n.k_ Al @ 8UsBe0e0v @ d@1H4-,3 7%bO&&i Ab~59 &&XGK"'0U!R&!40 FJlJ Q! (sQsQ]31heA!ZU0|z3 0 @?/n' #M b7fJ:]h^PsojRփUaEQ!?!`0 0c s!R'i e:}ɟh~5aef9oa|b c gT#3 AMJUF<@έ?F&¬?F*5?F$_D"w?P6 T>JuM` ?uJt  &lIt=WAI׃IR_FIl#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJa6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6 LeE O @ie<59 FXG'0U`Rb6!}40 V+ZlJQI0h|G!A !6&f# &$<EfBM 9b`fJ۰Z@]0?c Q#Oc ?Q\}ֱ~aE:)'0:iG`iw*h<5a#j8oJiqze`~EA#o>"rA #sHD:7 # h4>T]]9 MTJUFpq?Fq֌?FeTC?FǿJWc^?P6 vU>JuM` ?)uJt  0@hIt=WmiIR?8IlRv\#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LQBBO ie<59 FXG'0U`RŴb6!}40 V+ZlJQ@hh|G!A!Z!?F9v"rA # sHD:7 # h#4>T#3 AMJUF߳ڮ?F$ ?Fs۰Z?FNb_+iFw?P6 V>JuM` ?uJt  JJ&qIt=WrhIRUo_}WIlTy6ވ#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6big581e ?2?;:C$?'4/U%G|# |'b6L)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4B 9b`fJ:rO@]0:iG`Oc qy"zeEajoJiaze<5e'f`0mFaij? mh~EA#o>"rA k#sHD:7 # h4>T]]9 MTJUF>=9_?FL(Ѿ?F'?FǿJWc^?P6 vW>JuM` ?)uJt  V6>nIt=WgD~baIRkzIlRv\#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 $V+ZlJ]Uhz@!A!Z5?!?F9&sA #<# $B jfJ'j`00>oPaYc ?l]]|eE:Ͷǝ0TbCcba`iK8K8h<5e۰Zk(qQlqe~EeFͭ||`ai !hA'o>"rA #sHD:7 # h4>T]]9 MTJUFBP(?FxQ8?F!?Flaxy?P6 vX>JuM` ?)uJt  9KIt=WYGJIRE! YBIl p#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FD'0U`RŴb6!}40 DV+ZlJQI0h|G!hA!`197b$<# h$B 9b`fJ: m٠uۘ]0:mOc i'A~aEe7D?c  V#NeFaze<5eF|lVaiU3Lh~EA#o>"]rA #sHD:7 # h4>T]]9 MTJUFMW?FM6N?F>WB?Ft=E?P6 vY>JuM` ?)uJt  1|It=WƯ:IRݮIl$dvĤ#>2zGz?@MJAebM #zT (#+#+#&Jp!p!5 g`?CopyrigTt (c) W20 9 M ]c os f"R!r !a i n. AUl (s"e e v0d @i!Em$}! K2?b6 3?ALb4(&$3/ 4/U%-|# e|'b6 ?Fiie<59 FX/G'0UBb6!}40 aFuJlJ 8>UhAA !1!Z}'- E|zo}#<b$] RVJ:FQwa0RS S IBf>QE2!?Fp\BU--| i?[F @ h<53n CU9fKo]o ڵ h5a9YUU aUU?wi]Q ieq5 h2r_U"rA #sHD:7 # h8>T ]]9 MTJUF('J?FK5+:?FUw)٫?FQ`m ^?P6 Z>JuM`lW? SuJt  y SksMtA[_eMVYBMpry2VI'>\??\.?lFA8 J2ll/f?=@MJB& @nib#zv t#b( (+&B" ?APb)4(z$CD3//&*115 k`?CopyrigTtk(wc)k2009kM0c0o%s0f21r01ua0i0n.k_ Al @ 8UsBe0e0v @ d@1H4-,3 7%bO&&i Ab~59 &&XGK"'0U!R&!40 FJlJ Q! (sQsQ]31heA! U0|z3 0 @?ҏn'R# b7fJ:]h^PsojR֯UaEQ!?ߟ!`0 0c s!R'i e:}ɟh~5aef9oa|b c gT#3 AMJUFB?F̒-8?F*5?F$_D"w?P6 [>JuM` ?uJt  4E$pIt=WLx4IR_FIl#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4B 9b`fJ۰Z@]0?c Q#Oc Q\}ȱ~aE:)0:iG`iw*h<5a#j8oJiqze~EA#o>"]rA #sHD:7 # h4>T]]9 MTJUFJ*ӏ?Fc?FeTC?FǿJWc^?P6 v\>JuM` ?)uJt  uIt=Wt]aIR?8IlRv\#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 DV+ZlJQI0h|G!hA!Z!姈?F9"rA #sHD:7 # h#4>T#3 AMJUF"?F!S/?Fs۰Z?FNb_+iFw?P6 ]>JuM` ?uJt  B?˳It=WFp\cIRUo_}WIlTy6ވ#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4B 9b`fJ:rO@]0:iG`Oc qy"zeEajoJiaze<5e'f`0mFaij? mh~EA#o>"rA k#sHD:7 # h4>T]]9 MTJUF?F2\D?F'異JWc^?P6 ^>JuM` ?juJt  rs"It=WVIRkzIl?Rv\#>2zGz?@MJAebM #zWb(#+#+#&JBp!p!5 g`?CopyrigT}tZ(c)ZW20 9ZM ]c os f"R!r !a i n.Z AQl [&s"e e v0d @i!Em$+A?b6 ?AgJaT6` F !!l"zbAR +6ig5V181 ?2?;:C?'$4/U%G|# |'b6L3FO ie<59 FXG'K0U`Rb6!}4K0 V+ZlJ]Uhz!A!Z5!?F9&߮sA#`<# $B jfJ'j?`00>oPaYc l]G]aE:Ͷ0TbCcba`i?K8K8h<5e۰Zk(qׁQlqe~EeFͭ||`ai !hA'o>"]rA #sHD:7 # h4>T]]9 MTJUF79 Ğ?Fl_n.?F!?Flaxy?P6 v_>JuM` ?)uJt  sd It=W> IRE! YBIl p#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 DV+ZlJQI0h|G!hA!`197b$<# h$B 9b`fJ: m٠uۘ]0:mOc i'A~aEe7D?c  V#NeFaze<5eF|lVaiU3Lh~EA#o>"]rA #sHD:7 # h4>T]]9 MTJUF-#&\?FZ|?F>WB?Ft=E?P6 v`>JuM` ?)uJt  [It=Wq.IRݮIl$dvĤ#>2zGz?@MJAebM #zT (#+#+#&Jp!p!5 g`?CopyrigTt (c) W20 9 M ]c os f"R!r !a i n. AUl (s"e e v0d @i!Em$}! K2?b6 3?ALb4(&$3/ 4/U%-|# e|'b6 ?Fiie<59 FX/G'0UBb6!}40 aFuJlJ 8>UhAA !1!Z}'- E|zo}#<b$] RVJ:FQwa0RS S IBf>QE2!?Fp\B?U--# i[F @ h<53n CU9fKo]o ڵ h5a9YUU aUU~wi]Q ieq5 h2r_U"]rA #sHD:7 # h8>T ]]9 MTJUFW>?F7 ?FUw)٫?FQ`m ^?P6 a>JuM`lW? SuJt  /êQPͰMtA[_Y#MVYBMpry2VI'>\??\.?lFA8 J2ll/f?=@MJB& @nib#zv t#b( (+&B" ?APb)4(z$CD3//&*115 k`?CopyrigTtk(wc)k2009kM0c0o%s0f21r01ua0i0n.k_ Al @ 8UsBe0e0v @ d@1H4-,3 7%bO&&i Ab~59 &&XGK"'0U!R&!40 FJlJ Q! (sQsQ]31heA!ZU0|z3 0 @?ҏn'0S b7fJ:]h^PsojR֯UaEQ!?ߟ!`0 0c s!R'i e:}ɟh~5aef9oa|b c g~L1B~`1F~t1[D1e1R~1~1Z~1^~1b~Af~Aj~(AnT#3 AMJUF6٫&?Fh%4+ޱ?F*5?F$_D"w?P6 c>JuM` ?uJt  (LbIt=W,MyIR_FIl#>2zGz?N@MJAeb #z (#+#+T#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A ?b6 ?AgJamT6G` F !!l"zbA +6iTg5V181 ?2?;:C?'I4/U%G,|# |'b6&L D O @ie<59 FXG'0U`Rb6!}40 V+ZlJQI0h|G!A !16&f &$<EfBM 9b`fJ۰Z@]0?c Q#Oc Q\}|zeE:)0:iG`i?w*h<5a#j8oJiqze~EA#o>"rA k#sHD:b # h4>T]]9 MTJUFI)?FUj?FeTC?FǿJWc^?P6 vd>JuM` ?)uJt  6|.It=WL\} IR?8IlRv\#>2zGz?@M2JAe7b #zQ (#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6LL0DO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!ZA!Z!?F9"rA #sHD:b # h#4>T#3 AMJUFe{5?F?Fs۰Z?FNb_+iFw?P6 e>JuM` ?uJt  DqIt=W{*IRUo_}WIlTy6ވ#>2zGz?N@MJAeb #z (#+#+T#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A ?b6 ?AgJamT6G` F !!l"zbA +6iTg5V181 ?2?;:C?'I4/U%G,|# |'b6&L0D O @ie<59 FXG'0U`Rb6!}40 V+ZlJQI0h|G!A !16&f &$<EfBM 9b`fJ:r@]0:iG`Oc qy"zeEajoJiaze<5e'f`0mFaij mh~EA#o>"rA #sHD:b # h4>T]]9 MTJUFS?FަƢ?F'?FǿJWc^?P6 vf>JuM` ?)uJt  cIt=W(?IRkzIlRv\#>2zGz?@M2JAe7b #zQ (#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6LL DO ie<59 FXG/'0U`Rb6-!}40 V+Z lJ]UhzЎ!A!Z5!O?F9&sA#< $B %jfJ'j`00>oPaYc l]]aE:Ͷc0TbCcba`iK8K8h<5e۰Zk(qQlqe~EeFͭ||`ai !hA'o>v"rA # sHD:b # h4>T]]9 MTJUF蕯?Fcų[?F!?Flaxy?P6 vg>JuM` ?)uJt  {It=W ůIRE! YBIl p#>2zGz?@M2JAe7b #zQ (#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A?b6 ?A mT6G` F !!l"zbA +6iTg5V181 ?2?;:C?'I4/U%G,|# |'b6&L2B O @ie<59 FXG'0U`Rb6!}40 V+ZlJQI0h|G!A !19b$< $2M 9b`fJ: m٠uۘ]ᇄ0:mOc i'A~aEeD?c  V#NeFaze<5eF|lVaiU?3Lh~EA#o>"rA k#sHD:b # h4>T]]9 MTJUFA!z?FA]?F>WB?Ft=E?P6 rUAJuM` ?)uJt  wT vIt=W?mIRݮIl$dvĤ#>2zGz?@M JAe #z (#+#+#&*Jp!p!5 g`?CopyrigTtt(wc)t20 9tM c o%s f"!r !ua i n.t_ Al (Us"e e v0 d i!Em$}! K2?0b6 ?ALcb4($3/ 4/U%-X|# |'b6?Fiie<59 FX/G_'0UBb6Z!}40 aFuJlJ8>UhAA Ў!1!Z}'[- E|zo}#;<b$T] VJ:FQw0RS S IBf>QE2!?Fp\BU--# i[F @ h<53n CU9fKo]o ?ڵ h5a9YUU aUUwi㼕]Q ieOq5 h2r_U"rA #sHD:b # h8>T ]]9 MTJUF gl6?FDb,)?FUw)٫?FQ`m ^?P6 i>JuM`lW? SuJt  LUMtA[_ʓWMVYBMpry2VI'>\??\.?lFA8 J2ll/f?=@MJB& @i #zv t#.b( (+&B"f ?APb)4(z$D3//& 115 k`?CopyrigTtk(c])k2009kuM0c0os0If21r01a0i0n.k WAl @ 8sBUe0e0v @d@14-3K 7!bO&& b~59 &&XGK"'0U!R&!40R FJlJ Q! (sQsQ]31eA-!ZU0|z 0f @?n'B# b7fJ:]h^Psoj?R֯UaEQ!?!`0 0c s!R'i e:}ɟh~5aef9oa|b c gT#3 AMJUFE//?F_ 8?F*5?F$_D"w?P6 j>JuM` ?uJt  z7It=WiIR_FIl#>2zGz?N@MJAeb #z (#+#+T#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A ?b6 ?AgJamT6G` F !!l"zbA +6iTg5V181 ?2?;:C?'I4/U%G,|# |'b6fLF O @ie<59 FXG'0U`Rb6!}40 V+ZlJQI0h|G!A !16&f &$<EfBM 9b`fJ۰Z@]0?c Q#Oc ?Q\}ֱ~aE:)'0:iG`iw*h<5a#j8oJiqze`~EA#o>"rA #sHD:b # h4>T]]9 MTJUFK?Fc>,Λ?FeTC?FǿJWc^?P6 vk>JuM` ?)uJt  jDFIt=W-M3}IR?8IlRv\#>2zGz?@M2JAe7b #zQ (#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LFO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!ZA!Z!?F9"rA #sHD:b # h#4>T#3 AMJUFQL ?F7ԩ+?Fs۰Z?FNb_+iFw?P6 l>JuM` ?uJt  Ç\UIt=W @ٶIRUo_}WIlTy6ވ#>2zGz?N@MJAeb #z (#+#+T#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A ?b6 ?AgJamT6G` F !!l"zbA +6iTg5V181 ?2?;:C?'I4/U%G,|# |'b6&L D O @ie<59 FXG'0U`Rb6!}40 V+ZMQI0h|G!A !16&f &$<EfBM 9b`fJ:r@]0:iG`Oc qy"zeEajoJiaze<5e'f`0mFaij mh~EA#o>"rA #sHD:b # h4>T]]9 MTJUFLli[?FQ>?F'?FǿJWc^?P6 vm>JuM` ?)uJt  ICIt=WN3sIRkzIlRv\#>2zGz?@M2JAe7b #zQ (#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LFO ie<59 FXG/'0U`Rb6-!}40 V+Z lJ]UhzЎ!A!Z5!O?F9&sA#< $B %jfJ'j`00>oPaYc l]]aE:Ͷc0TbCcba`iK8K8h<5e۰Zk(qQlqe~EeFͭ||`ai !hA'o>v"rA # sHD:b # h4>T]]9 MTJUF63?F8.G?F!?Flaxy?P6 vn>JuM` ?)uJt  ͧILIt=Wf IRE! YBIl p#>2zGz?@M2JAe7b #zQ (#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6LL DO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!A!19b $< $B 9b`fJ: m٠uۘ]0:mOc ?i'A~aEeDͅ?c ? V#NeFaze<5eF|lVaiU3Lh`~EA#o>"rA #sHD:b # h4>T]]9 MTJUFPHY?FpN?F>WB?Ft=E?P6 vo>JuM` ?)uJt  PrIt=WoDIRݮIl$dvĤ#>2zGz?@M JAe #z (#+#+#&*Jp!p!5 g`?CopyrigTt (c) 2U0 9 M c os f"!rԙ !a i n}. Al U (s"e e 5v0d i!Em$}! K2?b6 ?ALb4($I3/ 4/U%b-|# |'Yb6?Fiie<59 FjX/G'0UBib6!}40 aFuJlJ8>UhAA@ !1!Z}o'- E|zo}#<bX$ [ VʼJ:F?Qw0ȒS S I?Bf>QE2!?Fp\BU--# i[ÏF @ h<53n ?CU9fKo]o ڵ h5a9YUU aUUwi]Q i?eq5 h2r_U"rA k#sHD:b # h8>T ]]9 MTJUFǦmڷ?FWZgL?FUw)٫?FQ`m ^?P6 p>JuM`lW? SuJt  NKMtA[X_*`lMVYBMpry2VI'>\??\.?lFA8 J2ll/f?=@MJB& @i #zv t#.b( (+&B"f ?APb)4(z$D3//& 115 k`?CopyrigTtk(c])k2009kuM0c0os0If21r01a0i0n.k WAl @ 8sBUe0e0v @d@14-3K 7IbO&&i bP~59 &&XGK"'0U!R&!40 FJlJ $Q! (sQsQ]31ZeA!ZU0|z 0 @?n'0S b7fJ:]h?^PsojR֯UaEQ!?!`c0 0c s!R'i e:}ɟh~5aef9oa|b c gT#3 AMJUFܹ?F cM?F*5?F$_D"w?P6 q>JuM` ?uJt  @@|It=Wn56uIR_FIl#>2zGz?N@MJAeb #z (#+#+T#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A ?b6 ?AgJamT6G` F !!l"zbA +6iTg5V181 ?2?;:C?'I4/U%G,|# |'b6fLF O @ie<59 FXG'0U`Rb6!}40 V+ZlJQI0h|G!A !16&f &$<EfBM 9b`fJ۰Z@]0?c Q#Oc ?Q\}ֱ~aE:)'0:iG`iw*h<5a#j8oJiqze`~EA#o>"rA #sHD:b # h4>T]]9 MTJUF (?FthZ?FeTC?FǿJWc^?P6 vr>JuM` ?)uJt  It=WX IR?8IlRv\#>2zGz?@M2JAe7b #zQ (#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6LL2BO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!ZA!Z!?F9"rA #sHD:b # h#4>T#3 AMJUFr!Qw?Fvk?Fs۰Z?FNb_+iFw?P6 s>JuM` ?uJt  1It=WOIRUo_}WIlTy6ވ#>2zGz?N@MJAeb #z (#+#+T#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A ?b6 ?AgJamT6G` F !!l"zbA +6iTg5V181 ?2?;:C?'I4/U%G,|# |'b6&L D O @ie<59 FXG'0U`Rb6!}40 V+ZlJQI0h|G!A !16&f &$<EfBM 9b`fJ:r@]0:iG`Oc qy"zeEajoJiaze<5e'f`0mFaij mh~EA#o>"rA #sHD:b # h4>T]]9 MTJUF!P4K?F l?F'?FǿJWc^?P6 vt>JuM` ?)uJt  {d5Z\It=W2;wEIRkzIlRv\#>2zGz?@M2JAe7b #zQ (#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6LL DO ie<59 FXG/'0U`Rb6-!}40 V+Z lJ]UhzЎ!A!Z5!O?F9&sA#< $B %jfJ'j`00>oPaYc l]]beE:Ͷ10TbCcba`iK8K8h<5e۰Zk(qQlqe~EeFͭ||`ai !hA'o>"rA #sHD:b # h4>T]]9 MTJUFYzUv?FS s9?F!?Flaxy?P6 vu>JuM` ?)uJt  V"(It=W7:LIRE! YBIl p#>2zGz?@M2JAe7b #zQ (#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LFO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!A!19b $< $B 9b`fJ: m٠uۘ]0:mOc ?i'A~aEeDͅ?c ? V#NeFaze<5eF|lVaiU3Lh`~EA#o>"rA #sHD:b # h4>T]]9 MTJUF)"z>?F^i2VS?F>WB?Ft=E?P6 vv>JuM` ?)uJt  7It=WܯHIRݮIl$dvĤ#>2zGz?@M JAe #z (#+#+#&*Jp!p!5 g`?CopyrigTt (c) 2U0 9 M c os f"!rԙ !a i n}. Al U (s"e e 5v0d i!Em$}! K2?b6 ?ALb4($I3/ 4/U%b-|# |'Yb6?Fiie<59 FjX/G'0UBib6!}40 aFuJlJ8>UhAA@ !1!Z}o'- E|zo}#<bX$ 0[ VʼJ:F?Qw0ȒS S I?Bf>QE2!?Fp\BU--# i[ÏF @ h<53n ?CU9fKo]o ڵ h5a9YUU aUUwi]Q i?eq5 h2r_U"rA k#sHD:b # h8>T ]]9 MTJUF{KY?F#v\?FUw)٫?FQ`m ^?P6 w>JuM`lW? SuJt  o|MtA[_"oRMVYBMpry2VI'>\??\.?lFA8 J2ll/f?=@MJB& @i #zv t#.b( (+&B"f ?APb)4(z$D3//& 115 k`?CopyrigTtk(c])k2009kuM0c0os0If21r01a0i0n.k WAl @ 8sBUe0e0v @d@14-3K 7IbO&&i bP~59 &&XGK"'0U!R&!40 FJlJ $Q! (sQsQ]31ZeA!ZU0|z 0 @?nK' # bS7fJ:]h^PsojR֯UaEQ!?!`0 0c s!R'i e:}ɟh~5aef9oa|b c gT#3 AMJUF<5^?FUP?F*5?F$_D"w?P6 x>JuM` ?uJt  .9BWyIt=Wnq׏8IR_FIl#>2zGz?N@MJAeb #z (#+#+T#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A ?b6 ?AgJamT6G` F !!l"zbA +6iTg5V181 ?2?;:C?'I4/U%G,|# |'b6fLF O @ie<59 FXG'0U`Rb6!}40 V+ZlJQI0h|G!A !16&f &$<EfBM 9b`fJ۰Z@]0?c Q#Oc ?Q\}ֱ~aE:)'0:iG`iw*h<5a#j8oJiqze`~EA#o>"rA #sHD:b # h4>T]]9 MTJUF?F͍~eTC?FJWc^?P6 y>JuM` ?juJt  HIt=WByIR?8Il?Rv\#>2zGz?@MJAebM #zT (#+#+#&Jp!p!5 g`?CopyrigTt (c) W20 9 M ]c os f"R!r !a i n. AUl (s"e e v0d @i!Em$+A?b6 ?AgJaT6` F !!l"zbAR +6ig5V181 ?2?;:C?'$4/U%G|# |'b6L DO ie<59 FXG'K0U`Rb6!}4K0 V+ZlJQI0h|G!A!Z!?F9"rA #sHD:b # h#4>T#3 AMJUFSw?FK?Fs۰Z?FNb_+iFw?P6 z>JuM` ?uJt  fd>It=WN6sgHRUo}WIlT?y6ވ#>2zGz?@MJAebM #zT (#+#+#&Jp!p!5 g`?CopyrigTt (c) W20 9 M ]c os f"R!r !a i n. AUl (s"e e v0d @i!Em$+A?b6 ?AgJaT6` F !!l"zbAR +6ig5V181 ?2?;:C?'$4/U%G|# |'b6L3FO ie<59 FXG'K0U`Rb6!}4K0 V+ZlJQI0h|G!A!1&f $<EfB 9b`fJ:r@]0:iG`Oc ?qy"zeEajoJiaze<5e'f`0mFaij mh~EA#o>v"rA # sHD:b # h4>T]]9 MTJUF%u)H׾?FȐ?F'?FǿJWc^?P6 v{>JuM` ?)uJt  ُuߓIt=Wί IRkzIlRv\#>2zGz?@M2JAe7b #zQ (#+#+#&JBp!p!5 g`?CopyrigT}tt(c)tW20 9tM ]c os f"R!r !a i n.t AUl (s"e e v0d @i!Em$+A?b6 ?AgJaT6` F !!l"zbAR +6ig5V181 ?2?;:C?'$4/U%G|# |'b6L3FO ie<59 FXG'K0U`Rb6!}4K0 V+ZlJ]Uhz!A!Z5!?F9&߮sA#`< $B jfJ'j?`00>oPaYc l]G]aE:Ͷ0TbCcba`i?K8K8h<5e۰Zk(qׁQlqe~EeFͭ||`ai !hA'o>"]rA #sHD:b # h4>T]]9 MTJUFf^oO?F]?F!?Flaxy?P6 v|>JuM` ?)uJt  L+It=WݑHIRE! YBIl p#>2zGz?@M2JAe7b #zQ (#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LFO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!A!19b $< $B 9b`fJ: m٠uۘ]0:mOc ?i'A~aEeDͅ?c ? V#NeFaze<5eF|lVaiU3Lh`~EA#o>"rA #sHD:b # h4>T]]9 MTJUFr,?F4w?F>WB?Ft=E?P6 v}>JuM` ?)uJt  6j>It=WNFFIRݮIl$dvĤ#>2zGz?@M JAe #z (#+#+#&*Jp!p!5 g`?CopyrigTt (c) 2U0 9 M c os f"!rԙ !a i n}. Al U (s"e e 5v0d i!Em$}! K2?b6 ?ALb4($I3/ 4/U%b-|# |'Yb6?Fiie<59 FjX/G'0UBib6!}40 aFuJlJ8>UhAA@ !1!Z}o'- E|zo}#<b$] VJ:FQw0RS S IBf>QE2!?Fp\BU--# i[F @ h<53n CU9fKo]o  h5a9YUU aUUwi]Q ieq5 hb2r_U"rA #sHD:b # h8>T ]]9 MTJUF_5=~?Fe,=Vݼ?FUw)٫?FQ`m ^?P6 ~>JuM`lW? SuJt  ?aV_MtA[_=p'MVYBMpry2VI'>\??\.?lFA8 J2ll/f?=@MJB& @i #zv t#.b( (+&B"f ?APb)4(z$D3//& 115 k`?CopyrigTtk(c])k2009kuM0c0os0If21r01a0i0n.k WAl @ 8sBUe0e0v @d@14-3K 7IbO&&i bP~59 &&XGK"'0U!R&!40 FJlJ $Q! (sQsQ]31ZeA!ZU0|z 0 @?nK' # bS7fJ:]h^PsojR֯UaEQ!?!`0 0c s!R'i e:}ɟh~5aef9oa|b c gT#3 AMJUF<@έ?F&¬?F*5?F$_D"w?P6 >JuM` ?uJt  &lIt=WAI׃IR_FIl#>2zGz?N@MJAeb #z (#+#+T#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A ?b6 ?AgJamT6G` F !!l"zbA +6iTg5V181 ?2?;:C?'I4/U%G,|# |'b6fLF O @ie<59 FXG'0U`Rb6!}40 V+ZlJQI0h|G!A !16&f &$<EfBM 9b`fJ۰Z@]0?c Q#Oc ?Q\}ֱ~aE:)'0:iG`iw*h<5a#j8oJiqze`~EA#o>"rA #sHD:b # h4>T]]9 MTJUFpq?Fq֌?FeTC?FǿJWc^?P6 v>JuM` ?)uJt  0@hIt=WmiIR?8IlRv\#>2zGz?@M2JAe7b #zQ (#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LFO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!ZA!Z!?F9"rA #sHD:b # h#4>T#3 AMJUF߳ڮ?F$ ?Fs۰Z?FNb_+iFw?P6 >JuM` ?uJt  JJ&qIt=WrhIRUo_}WIlTy6ވ#>2zGz?N@MJAeb #z (#+#+T#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A ?b6 ?AgJamT6G` F !!l"zbA +6iTg5V181 ?2?;:C?'I4/U%G,|# |'b6fLF O @ie<59 FXG'0U`Rb6!}40 V+ZlJQI0h|G!A !16&f &$<EfBM 9b`fJ:r@]0:iG`Oc qy"zeEajoJiaze<5e'f`0mFaij mh~EA#o>"rA #sHD:b # h4>T]]9 MTJUF>=9_?FL(Ѿ?F'?FǿJWc^?P6 v>JuM` ?)uJt  V6>nIt=WgD~baIRkzIlRv\#>2zGz?@M2JAe7b #zQ (#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LFO ie<59 FXG/'0U`Rb6-!}40 V+Z lJ]UhzЎ!A!Z5!O?F9&sA#< $B %jfJ'j`00>oPaYc l]]|eE:Ͷ10TbCcba`iK8K8h<5e۰Zk(qQlqe~EeFͭ||`ai !hA'o>"rA #sHD:b # h4>T]]9 MTJUFBP(?FxQ8?F!?Flaxy?P6 v>JuM` ?)uJt  9KIt=WYGJIRE! YBIl p#>2zGz?@M2JAeWb #z@Q (#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LFO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!A!19b $< $B 9b`fJ: m٠uۘ]0:mOc ?i'A~aEeDͅ?c ? V#NeFaze<5eF|lVaiU3Lh`~EA#o>"rA #sHD:b # h4>T]]9 MTJUFMW?FM6N?F>WB?Ft=E?P6 v>JuM` ?)uJt  1|It=WƯ:IRݮIl$dvĤ#>2zGz?@M JAe #z (#+#+#&*Jp!p!5 g`?CopyrigTt (c) 2U0 9 M c os f"!rԙ !a i n}. Al U (s"e e 5v0d i!Em$}! K2?b6 ?ALb4($I3/ 4/U%b-|# |'Ib6?Fiie<59 FjX/G'0UBib6!}40 aFuJlJ8>UhAA@ !1!Z}o'- E|zo}#<bX$!B] VʼJ:F?Qw0ȒS S I?Bf>QE2!?Fp\BU--| i[F @ h<53n CU9fKo]o ڵ h5a9YUU aUUwiǤ]Q işeq5 h2r_U"rA 5#sHD:b # h8>T ]]9 MTJUF('J?FK5+:?FUw)٫?FQ`m ^?P6 >JuM`lW? SuJt  y SksMtA[_eMVYBMpry2VI'>\??\.?lA8 M2ll/f?=@MJB& @i #zv t#.b( (+&B"f ?APb)4(z$D3//& 115 k`?CopyrigTtk(c])k2009kuM0c0os0If21r01a0i0n.k WAl @ 8sBUe0e0v @d@14-3K 7IbO&&i bP~59 &&XGK"'0U!R&!40 FJlJ $Q! (sQsQ]31ZeA!ZU0|z 0 @?n'."# b7fJ:]h?^PsojR֯UaEQ!?!`c0 0c s!R'i e:}ɟh~5aef9oa|b c gT#3 AMJUFB?F̒-8?F*5?F$_D"w?P6 >JuM` ?uJt  4E$pIt=WLx4IR_FIl#>2zGz?N@MJAeb #z (#+#+T#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A ?b6 ?AgJamT6G` F !!l"zbA +6iTg5V181 ?2?;:C?'I4/U%G,|# |'b6&L72B O @ie<59 FXG'0U`Rb6!}40 V+ZlJQI0h|G!A !16&f &$<Ef72M 9b`fJ۰Z@]0?c Q#Oc ?Q\}ֱ~aE:)'0:iG`iw*h<5a#j8oJiqze`~EA#o>"rA #sHD:b # h4>T]]9 MTJUFJ*ӏ?Fc?FeTC?FǿJWc^?P6 v>JuM` ?)uJt  uIt=Wt]aIR?8IlRv\#>2zGz?@M2JAe7b #zQ (#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LFO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!ZA!Z!?F9"rA #sHD:b # h#4>T#3 AMJUF"?F!S/?Fs۰Z?FNb_+iFw?P6 >JuM` ?uJt  B?˳It=WFp\cIRUo_}WIlTy6ވ#>2zGz?N@MJAeb #z (#+#+T#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A ?b6 ?AgJamT6G` F !!l"zbA +6iTg5V181 ?2?;:C?'I4/U%G,|# |'b6fLF O @ie<59 FXG'0U`Rb6!}40 V+ZlJQI0h|G!A !16&f &$<EfBM 9b`fJ:r@]0:iG`Oc qy"zeEajoJiaze<5e'f`0mFaij mh~EA#o>"rA #sHD:b # h4>T]]9 MTJUF?F2\D?F'異JWc^?P6 ݉>JuM` ?juJt  rs"It=WVIRkzIl?Rv\#>2zGz?@MJAebM #zT (#+#+#&Jp!p!5 `?CopyrigTtZ(c)Z20 9ZM c. os f"!r !a i n.Z Al [&s"e ej v0d  i!Em$+A?b6 ?zAgJaۢT6` F !!l"zbjA) +6ig5UV181 ?2?F;:C?'4/U%G|# |'b6LA#BO ie<59 FXG'0U`Rb6!}40 V+ZlJ]UhzA!Z5!?F9&s7A#< X$" jfJ'j`00>oPaYc l]]aE:?Ͷ0TbCcba`iK8K8h<5e?۰Zk(qQlqe~EeFͭ||`ai !h`A'o>"rA #sHD:b # h4>T]]9 MTJUF79 Ğ?Fl_n.?F!?Flaxy?P6 v>JuM` ?)uJt  sd It=W> IRE! YBIl p#>2zGz?@M2JAe7b #zQ (#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LFO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!A!B19b$< $BM 9b`fJ: m٠uۘ]ᇄ0:mOc i'A~aEeD?c  V#NeFaze<5eF|lVaiU?3Lh~EA#o>"rA k#sHD:b # h4>T]]9 MTJUF-#&\?FZ|?F>WB?Ft=E?P6 v>JuM` ?)uJt  [It=Wq.IRݮIl$dvĤ#>2zGz?@M JAe #z (#+#+#&*Jp!p!5 g`?CopyrigTt (c) 2U0 9 M c os f"!rԙ !a i n}. Al U (s"e e 5v0d i!Em$}! K2?b6 ?ALb4($I3/ 4/U%b-|# |'Yb6?Fiie<59 FjX/G'0UBib6!}40 aFuJlJ8>UhAA@ !1!Z}o'- E|zo}#<b$] VM:FQw0RS S IBf>E2!?Fp\BU--# i[F @ h<53n CU9fKo]o  h5a9YUU aUUwi]Q ieq5 hb2r_U"rA #sHD:b # h8>T ]]9 MTJUFW>?F7 ?FUw)٫?FQ`m ^?P6 >JuM`lW? SuJt  /êQPͰMtA[_Y#MVYBMpry2VI'>\??\.?lFA8 J2ll/f?=@MJB& @i #zv t#.b( (+&B"f ?APb)4(z$D3//& 115 k`?CopyrigTtk(c])k2009kuM0c0os0If21r01a0i0n.k WAl @ 8sBUe0e0v @d@14-3K 7IbO&&i bP~59 &&XGK"'0U!R&!40 FJlJ $Q! (sQsQ]31ZeA!ZU0|z 0 @?nK' # bS7fJ:]h^PsojR֯UaEQ!?!`0 0c s!R'i e:}ɟh~5aef9oa|b c g~L1B~`1F~t1[D1e1R~1~1Z~1^~1b~Af~Aj~(AnT#3 AMJUF6٫&?Fh%4+ޱ?F*5?F$_D"w?P6 >JuM` ?uJt  (LbIt=W,MyIR_FIl#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L(BO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4 9b`fJ۰Z@]0?c Q#Oc Q\}|zeE:O)0:iG`iw*h<5a#j8oJiqze~EA#o>"rA #sHD: # h4>T]]9 MTJUFI)?FUj?FeTC?FǿJWc^?P6 v>JuM` ?)uJt  6|.It=WL\} IR?8IlRv\#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 DV+ZlJQI0h|G!hA!Z!姈?F9"rA #sHD: # h#4>T#3 AMJUFe{5?F?Fs۰Z?FNb_+iFw?P6 >JuM` ?uJt  DqIt=W{*IRUo_}WIlTy6ވ#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4B 9b`fJ:rO@]0:iG`Oc qy"zeEajoJiaze<5e'f`0mFaij? mh~EA#o>"rA k#sHD: # h4>T]]9 MTJUFS?FަƢ?F'?FǿJWc^?P6 v>JuM` ?)uJt  cIt=W(?IRkzIlRv\#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 $V+ZlJ]Uhz@!A!Z5?!?F9&sA #<# $B jfJ'j`00>oPaYc l]]aE:Ͷ0TbCcba`iK8K8h<5e۰ϿZk(qQlqe~EeFͭ||`ai !hA'o>"rA 5#sHD: # h4>T]]9 MTJUF蕯?Fcų[?F!?Flaxy?P6 v>JuM` ?)uJt  {It=W ůIRE! YBIl p#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 DV+ZlJQI0h|G!hA!`197b$<# h$B 9b`fJ: m٠uۘ]0:mOc i'A~aEe7D?c  V#NeFaze<5eF|lVaiU3Lh~EA#o>"]rA #sHD: # h4>T]]9 MTJUFA!z?FA]?F>WB?Ft=E?P6 v>JuM` ?)uJt  wT vIt=W?mIRݮIl$dvĤ#>2zGz?@MJAebM #zT (#+#+#&Jp!p!5 g`?Copy_rigTtt(c)t2U0 9tM c os f"!rԙ !a i n}.t Al U (s"e e 5v0d i!Em$}! K2?b6 ?ALb4($I3/ 4/U%b-|# |'Yb6?Fiie<59 FjX/G'0UBib6!}40 aFuJlJ8>UhAA@ !1!Z}o'- E|zo}#<b$] VJ:FQw0RS S IBf>QE2!?Fp\BU--# i[F @ h<53n CU9fKo]o  h5a9YUU aUUwi]Q ieq5 hb2r_U"rA #sHD: # h8>T ]]9 MTJUF gl6?FDb,)?FUw)٫?FQ`m ^?P6 >JuM`lW? SuJt  LUMtA[_ʓWMVYBMpry2VI'>\??\.?lFA8 J2ll/f?=@MJB& @nib#zv t#b( (+&B" ?APb)4(z$CD3//&*115 k`?CopyrigTtk(wc)k2009kM0c0o%s0f21r01ua0i0n.k_ Al @ 8UsBe0e0v @ d@1H4-,3 7%bO&&i b~59 && XGK"'0U!R&!40 FJlJ Q! (sQsQ]31heA!ZU0|z3 0 @?ҏn'B# b7fJ:]h^PsojR֯UaEQ!?ߟ!`0 0c s!R'i e:}ɟh~5aef9oa|b c gT#3 AMJUFE//?F_ 8?F*5?F$_D"w?P6 >JuM` ?uJt  z7It=WiIR_FIl#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4B 9b`fJ۰Z@]0?c Q#Oc Q\}ȱ~aE:)0:iG`iw*h<5a#j8oJiqze~EA#o>"]rA #sHD: # h4>T]]9 MTJUFK?Fc>,Λ?FeTC?FǿJWc^?P6 v>JuM` ?)uJt  jDFIt=W-M3}IR?8IlRv\#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 DV+ZlJQI0h|G!hA!Z!姈?F9"rA #sHD: # h#4>T#3 AMJUFQL ?F7ԩ+?Fs۰Z?FNb_+iFw?P6 >JuM` ?uJt  Ç\UIt=W @ٶIRUo_}WIlTy6ވ#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4B 9b`fJ:rO@]0:iG`Oc qy"zeEajoJiaze<5e'f`0mFaij? mh~EA#o>"rA k#sHD: # h4>T]]9 MTJUFLli[?FQ>?F'?FǿJWc^?P6 v>JuM` ?)uJt  ICIt=WN3sIRkzIlRv\#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 $V+ZlJ]Uhz@!A!Z5?!?F9&sA #<# $B jfJ'j`00>oPaYc l]]aE:Ͷ0TbCcba`iK8K8h<5e۰ϿZk(qQlqe~EeFͭ||`ai !hA'o>"rA 5#sHD: # h4>T]]9 MTJUF63?F8.G?F!?Flaxy?P6 v>JuM` ?)uJt  ͧILIt=Wf IRE! YBIl p#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 DV+ZlJQI0h|G!hA!`197b$<# h$B 9b`fJ: m٠uۘ]0:mOc i'A~aEe7D?c  V#NeFaze<5eF|lVaiU3Lh~EA#o>"]rA #sHD: # h4>T]]9 MTJUFPHY?FpN?F>WB?Ft=E?P6 v>JuM` ?)uJt  PrIt=WoDIRݮIl$dvĤ#>2zGz?@MJAebM #zT (#+#+#&Jp!p!5 g`?CopyrigTt (c) W20 9 M ]c os f"R!r !a i n. AUl (s"e e v0d @i!Em$}! K2?b6 3?ALb4(&$3/ 4/U%-|# e|'b6 ?Fiie<59 FX/G'0UBb6!}40 aFuJlJ 8>UhAA !1!Z}'- E|zo}#<b$] RVJ:FQwa0RS S IBf>QE2!?Fp\B?U--# i[F @ h<53n CU9fKo]o ڵ h5a9YUU aUU~wi]Q ieq5 h2r_U"]rA #sHD: # h8>T ]]9 MTJUFǦmڷ?FWZgL?FUw)٫?FQ`m ^?P6 AJuM`lW? SuJt  NKMtA[X_*`lMVYBMpry2VI'>\??\.?lFA8 J2ll/f?=@MJB& @nib#zv t#b( (+&B" ?APb)4(z$CD3//&*115 k`?CopyrigTtk(wc)k2009kM0c0o%s0f21r01ua0i0n.k_ Al @ 8UsBe0e0v @ d@1H4-,3 7%bO&&i Ab~59 &&XGK"'0U!R&!40 FJlJ Q! (sQsQ]31heA!ZU0|z3 0 @?ҏn'# b7fJ:]h^PsojR֯UaEQ!?ߟ!`0 0c s!R'i e:}ɟh~5aef9oa|b c gT#3 AMJUFܹ?F cM?F*5?F$_D"w?P6 >JuM` ?uJt  @@|It=Wn56uIR_FIl#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4B 9b`fJ۰Z@]0?c Q#Oc Q\}ȱ~aE:)0:iG`iw*h<5a#j8oJiqze~EA#o>"]rA #sHD: # h4>T]]9 MTJUF (?FthZ?FeTC?FǿJWc^?P6 v>JuM` ?)uJt  It=WX IR?8IlRv\#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 DV+ZlJQI0h|G!hA!Z!姈?F9"rA #sHD: # h#4>T#3 AMJUFr!Qw?Fvk?Fs۰Z?FNb_+iFw?P6 >JuM` ?uJt  1It=WOIRUo_}WIlTy6ވ#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4B 9b`fJ:rO@]0:iG`Oc qy"zeEajoJiaze<5e'f`0mFaij? mh~EA#o>"rA k#sHD: # h4>T]]9 MTJUF!P4K?F l?F'?FǿJWc^?P6 v>JuM` ?)uJt  {d5Z\It=W2;wEIRkzIlRv\#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 $V+ZlJ]Uhz@!A!Z5?!?F9&sA #<# $B jfJ'j`00>oPaYc ?l]]beE:Ͷǝ0TbCcba`iK8K8h<5e۰Zk(qQlqe~EeFͭ||`ai !hA'o>"rA #sHD: # h4>T]]9 MTJUFYzUv?FS s9?F!?Flaxy?P6 v>JuM` ?)uJt  V"(It=W7:LIRE! YBIl p#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 DV+ZlJQI0h|G!hA!`197b$<# h$B 9b`fJ: m٠uۘ]0:mOc i'A~aEe7D?c  V#NeFaze<5eF|lVaiU3Lh~EA#o>"]rA #sHD: # h4>T]]9 MTJUF)"z>?F^i2VS?F>WB?Ft=E?P6 v>JuM` ?)uJt  7It=WܯHIRݮIl$dvĤ#>2zGz?@MJAebM #zT (#+#+#&Jp!p!5 g`?CopyrigTt (c) W20 9 M ]c os f"R!r !a i n. AUl (s"e e v0d @i!Em$}! K2?b6 3?ALb4(&$3/ 4/U%-|# e|'b6 ?Fiie<59 FX/G'0UBb6!}40 aFuJlJ 8>UhAA !1!Z}'- E|zo}#<b$] RVJ:FQwa0RS S IBf>QE2!?Fp\B?U--# i[F @ h<53n CU9fKo]o ڵ h5a9YUU aUU~wi]Q ieq5 h2r_U"]rA #sHD: # h8>T ]]9 MTJUF{KY?F#v\?FUw)٫?FQ`m ^?P6 >JuM`lW? SuJt  o|MtA[_"oRMVYBMpry2VI'>\??\.?lFA8 J2ll/f?=@MJB& @nib#zv t#b( (+&B" ?APb)4(z$CD3//&*115 k`?CopyrigTtk(wc)k2009kM0c0o%s0f21r01ua0i0n.k_ Al @ 8UsBe0e0v @ d@1H4-,3 7%bO&&i Ab~59 &&XGK"'0U!R&!40 FJlJ Q! (sQsQ]31heA!ZU0|z3 0 @?/n' #M b7fJ:]h^PsojRփUaEQ!?!`0 0c s!R'i e:}ɟh~5aef9oa|b c gT#3 AMJUF<5^?FUP?F*5?F$_D"w?P6 >JuM` ?uJt  .9BWyIt=Wnq׏8IR_FIl#>2zGz?N@MJAeb #zb(#+B#+#&Mp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L(\"BO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4\" 9b`fJ۰Z@]0?c Q#Oc Q\}ȱ~aE:)0:iG`iw*h<5a#j8oJiqze~EA#o>"]rA #sHD: # h4>T]]9 MTJUF?F͍~eTC?FJWc^?P6 ݤ>JuM` ?juJt  HIt=WByIR?8Il?Rv\#>2zGz?@MJAebM #zWb(#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6LLyBBO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!ZA!Z!?F9"]rA #sHD: # h#4>T#3 AMJUFSw?FK?Fs۰Z?FNb_+iFw?P6 >JuM` ?uJt  fd>It=WN6sgHRUo}WIlT?y6ވ#>2zGz?@MJAebM #zWb(#+#+#&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$+A?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LFO ie<59 FXG/'0U`Rb6-!}40 V+ZlJQI0h|G!A!1l&fL# $<EfB 9b`fJ:r'@]0:iG`Oc qy"zeEajoJiaze<5e'f`0mFaij mh~EA#o>"rA 5#sHD: # h4>T]]9 MTJUF%u)H׾?FȐ?F'?FǿJWc^?P6 v>JuM` ?)uJt  ُuߓIt=Wί IRkzIlRv\#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTtt(c])t20 9tuM c os If"!r !a i n.t WAl (s"Ue e v0d i!Em$+A?b6 ?AgJaۀT6` F !!l"zbWAK +6ig5V181 2?2?;:C?'4/U%GX|# |'b6̼LFO ie<59 FXG/'0U`Rb6-!}40 V+Z lJ]UhzЎ!A!Z5!O?F9&sA#<# $B %jfJ'j`00>oPaYc l]]aE:Ͷc0TbCcba`iK8K8h<5e۰Zk(qQlqe~EeFͭ||`ai !hA'o>v"rA # sHD: # h4>T]]9 MTJUFf^oO?F]?F!?Flaxy?P6 v>JuM` ?)uJt  L+It=WݑHIRE! YBIl p#>2zGz?@M2JAe7b #z]b(#+%#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'b6L5*6O ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!19b$<# $4h" 9b`fJ: m٠uۇ]0:mOc i'A~aEeכD?c  V#NeFaze<5eF?|lVaiU3Lh~EA#o>"rA #sHD: # h4>T]]9 MTJUFr,?F4w?F>WB?Ft=E?P6 v>JuM` ?)uJt  6j>It=WNFFIRݮIl$dvĤ#>2zGz?@MJAebM #zT (#+#+#&Jp!p!5 g`?CopyrigTt (c) W20 9 M ]c os f"R!r !a i n. AUl (s"e e v0d @i!Em$}! K2?b6 3?ALb4(&$3/ 4/U%-|# e|'b6 ?Fiie<59 FX/G'0UBb6!}40 aFuJlJ 8>UhAA !1!Z}'- E|zo}#<b$] RVJ:FQwa0RS S IBf>QE2!?Fp\B?U--# i[F @ h<53n CU9fKo]o ڵ h5a9YUU aUU~wi]Q ieq5 h2r_U"]rA #sHD: # h8>T ]]9 MTJUF_5=~?Fe,=Vݼ?FUw)٫?FQ`m ^?P6 >JuM`lW? SuJt  ?aV_MtA[_=p'MVYBMpry2VI'>\??\.?lFA8 J2ll/f?=@MJB& @nib#zv t#b( (+&B" ?APb)4(z$CD3//&*115 k`?CopyrigTtk(wc)k2009kM0c0o%s0f21r01ua0i0n.k_ Al @ 8UsBe0e0v @ d@1H4-,3 7%bO&&i Ab~59 &&XGK"'0U!R&!40 FJlJ Q! (sQsQ]31heA!ZU0|z3 0 @?/n' #M b7fJ:]h^PsojRփUaEQ!?!`0 0c s!R'i e:}ɟh~5aef9oa|b c gT#3 AMJUF<@έ?F&¬?F*5?F$_D"w?P6 >JuM` ?uJt  &lIt=WAI׃IR_FIl#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4B 9b`fJ۰Z@]0?c Q#Oc Q\}ȱ~aE:)0:iG`iw*h<5a#j8oJiqze~EA#o>"]rA #sHD: # h4>T]]9 MTJUFpq?Fq֌?FeTC?FǿJWc^?P6 v>JuM` ?)uJt  0@hIt=WmiIR?8IlRv\#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 DV+ZlJQI0h|G!hA!Z!姈?F9"rA #sHD: # h#4>T#3 AMJUF߳ڮ?F$ ?Fs۰Z?FNb_+iFw?P6 >JuM` ?uJt  JJ&qIt=WrhIRUo_}WIlTy6ވ#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4B 9b`fJ:rO@]0:iG`Oc qy"zeEajoJiaze<5e'f`0mFaij? mh~EA#o>"rA k#sHD: # h4>T]]9 MTJUF>=9_?FL(Ѿ?F'?FǿJWc^?P6 v>JuM` ?)uJt  V6>nIt=WgD~baIRkzIlRv\#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 $V+ZlJ]Uhz@!A!Z5?!?F9&sA #<# $B jfJ'j`00>oPaYc ?l]]|eE:Ͷǝ0TbCcba`iK8K8h<5e۰Zk(qQlqe~EeFͭ||`ai !hA'o>"rA #sHD: # h4>T]]9 MTJUFBP(?FxQ8?F!?Flaxy?P6 v>JuM` ?)uJt  9KIt=WYGJIRE! YBIl p#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 DV+ZlJQI0h|G!hA!`197b$<# h$B 9b`fJ: m٠uۘ]0:mOc i'A~aEe7D?c  V#NeFaze<5eF|lVaiU3Lh~EA#o>"]rA #sHD: # h4>T]]9 MTJUFMW?FM6N?F>WB?Ft=E?P6 v>JuM` ?)uJt  1|It=WƯ:IRݮIl$dvĤ#>2zGz?@MJAebM #zT (#+#+#&Jp!p!5 g`?CopyrigTt (c) W20 9 M ]c os f"R!r !a i n. AUl (s"e e v0d @i!Em$}! K2?b6 3?ALb4(&$3/ 4/U%-|# e|'b6 ?Fiie<59 FX/G'0UBb6!}40 aFuJlJ 8>UhAA !1!Z}'- E|zo}#<b$] RVJ:FQwa0RS S IBf>QE2!?Fp\BU--| i?[F @ h<53n CU9fKo]o ڵ h5a9YUU aUU?wi]Q ieq5 h2r_U"rA #sHD: # h8>T ]]9 MTJUF('J?FK5+:?FUw)٫?FQ`m ^?P6 >JuM`lW? SuJt  y SksMtA[_eMVYBMpry2VI'>\??\.?lFA8 J2ll/f?=@MJB& @nib#zv t#b( (+&B" ?APb)4(z$CD3//&*115 k`?CopyrigTtk(wc)k2009kM0c0o%s0f21r01ua0i0n.k_ Al @ 8UsBe0e0v @ d@1H4-,3 7%bO&&i Ab~59 &&XGK"'0U!R&!40 FJlJ Q! (sQsQ]31heA!ZU0|z3 0 @?/n' #M b7fJ:]h^PsojRփUaEQ!?!`0 0c s!R'i e:}ɟh~5aef9oa|b c gT#3 AMJUFB?F̒-8?F*5?F$_D"w?P6 >JuM` ?uJt  4E$pIt=WLx4IR_FIl#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4B 9b`fJ۰Z@]0?c Q#Oc Q\}ȱ~aE:)0:iG`iw*h<5a#j8oJiqze~EA#o>"]rA #sHD: # h4>T]]9 MTJUFJ*ӏ?Fc?FeTC?FǿJWc^?P6 v>JuM` ?)uJt  uIt=Wt]aIR?8IlRv\#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 DV+ZlJQI0h|G!hA!Z!姈?F9"rA #sHD: # h#4>T#3 AMJUF"?F!S/?Fs۰Z?FNb_+iFw?P6 >JuM` ?uJt  B?˳It=WFp\cIRUo_}WIlTy6ވ#>2zGz?N@MJAeb #zb(#+R#+#&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$+A7?b6 ?AgJaT6` F !!l"zbAٖ +6Rig5V181e ?2?;:C$?'4/U%G|# |'b6L)FO ie<59 FuXG_'0U`Rb6Z!}40 V"+ZlJQI0h|G!4A!1ظ&f# $<Ef4B 9b`fJ:rO@]0:iG`Oc qy"zeEajoJiaze<5e'f`0mFaij? mh~EA#o>"rA k#sHD: # h4>T]]9 MTJUF?F2\D?F'異JWc^?P6 ݴ>JuM` ?juJt  rs"It=WVIRkzIl?Rv\#>2zGz?@MJAebM #zWb(#+#+#&JBp!p!5 g`?CopyrigT}tZ(c)ZW20 9ZM ]c os f"R!r !a i n.Z AQl [&s"e e v0d @i!Em$+A?b6 ?AgJaT6` F !!l"zbAR +6ig5V181 ?2?;:C?'$4/U%G|# |'b6L3FO ie<59 FXG'K0U`Rb6!}4K0 V+ZlJ]Uhz!A!Z5!?F9&߮sA#`<# $B jfJ'j?`00>oPaYc l]G]aE:Ͷ0TbCcba`i?K8K8h<5e۰Zk(qׁQlqe~EeFͭ||`ai !hA'o>"]rA #sHD: # h4>T]]9 MTJUF79 Ğ?Fl_n.?F!?Flaxy?P6 v>JuM` ?)uJt  sd It=W> IRE! YBIl p#>2zGz?@M2JAe7b #z]b(#+#+#&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$+Ao?Xb6 ?AgJoaT;6` F !z!l"z_bA- +6ig5V18ʜ1 ?2?;H:C?'4/U%bG|# |'1b6LSFO ie<59 FXG'0U`RŴb6!}40 DV+ZlJQI0h|G!hA!`197b$<# h$B 9b`fJ: m٠uۘ]0:mOc i'A~aEe7D?c  V#NeFaze<5eF|lVaiU3Lh~EA#o>"]rA #sHD: # h4>T]]9 MTJUF-#&\?FZ|?F>WB?Ft=E?P6 v>JuM` ?)uJt  [It=Wq.IRݮIl$dvĤ#>2zGz?@MJAebM #zT (#+#+#&Jp!p!5 g`?CopyrigTt (c) W20 9 M ]c os f"R!r !a i n. AUl (s"e e v0d @i!Em$}! K2?b6 3?ALb4(&$3/ 4/U%-|# e|'b6 ?Fiie<59 FX/G'0UBb6!}40 aFuJlJ 8>UhAA !1!Z}'- E|zo}#<b$] RVJ:FQwa0RS S IBf>QE2!?Fp\B?U--# i[F @ h<53n CU9fKo]o ڵ h5a9YUU aUU~wi]Q ieq5 h2r_U"]rA #sHD: # h8>T ]]9 MTJUFW>?F7 ?FUw)٫?FQ`m ^?P6 >JuM`lW? SuJt  /êQPͰMtA[_Y#MVYBMpry2VI'>\??\.?lFA8 J2ll/f?=@MJB& @nib#zv t#b( (+&B" ?APb)4(z$CD3//&*115 k`?CopyrigTtk(wc)k2009kM0c0o%s0f21r01ua0i0n.k_ Al @ 8UsBe0e0v @ d@1H4-,3 7%bO&&i Ab~59 &&XGK"'0U!R&!40 FJlJ Q! (sQsQ]31heA!ZU0|z3 0 @?/n' #M b7fJ:]h^PsojRփUaEQ!?!`0 0c s!R'i e:}ɟh~5aef9oa|b c g_+s +EiHG%>FM# oBH~dp@+8‘SsHqEZKQxsEm!D% 8D:(-GuE1wEv7yE;{E>~EIBX0F F2FI'4F3MhDQ ~:6FZ9Fw^H;F"bHx=Fe?Fji9AhmKDFpFFt~xHF#xJF{HLFWOF}!H)x#HB=%H+'H͑*Hu8,Hh.H|'HY)HHЧ0%Hs8hHHH)Hc~z)I~8Ij "'hߤIwI6ȨIߪIM)(I XI)QJ:אSJ"UJ'$WJZJr %\Jh^J*`JYY'bJ )eJ'gJt~@:iJaHkJmJf PK (8RK9TK}9VKXK9ZKU9]K#{(X_K&YA*(*./81((u5(X8[ZAlؗkE~~*I8LhߞXPP@TƩRWZU[l8W}_Yb[pf]ߏj(ߐmIqʙh5uܙx{|x ~( XƩ2Zێld("XO~7Ʃ٤Z(lX ɯg1UFD  h(^TYYBBUF~?x<F BP(?P? V B666 6 B66h9ȅ2H??\IB@L&d2?-(\.Y'(.sUM!K&Pm/:  0oB`2Fiber], pt c t ansm t #n tw rk equ p e t h rd 0-1e!91A^| (gUGr& wr  .@ wvwpppopp wwpwxwwwwpx=pp pwvLDrag thuesapWonodwipe.bL&d2ɿ~?P?=t˿)"g??)j3@ ?DHD: # h4>T]]9 MTAUF}Da 0?FH7r?F_/++?FXϸ?P6 `A$M (#<#lPJuM{` ?#{{20{F{ZujJtg  ɟ߇{tј` 'ɭ?s>JU2zGz?@M[#|Jk&A@$7b#z #!"b(b)+&J[e#t"t"?'6 I?A$/%Be#115 "`?CopyrigTt (c)020090M0c0oKs0f21r01a0i0n.0 Al0 8sBe0e0v@d0S"{14\#-Xe#3 7'6\% ZLFiie1E9 FXG/'0UR'6-!B40 FJlJ7Q0hLG1ZA!Y(}=K?Fl@I0`fS?Ks`ZsLbL\"JTy+HlPrW?F腺Z>FbyC ua)H0Ynǿl_Vef[U>dxԷErlg uVcQ$@aELe|}?FCʞ*Nc`vna Oc>wok&O8XWleBPmlfVArieoo.HLe+pQ?FYR&`O]vol``}; PaiEz6!iDF`ޣKH_SFn.ki`"rA k#ڃjnw5}6!i{HD: # h4>T]]9 MTUF)Q~ R?F\L#?Fx?F=:?P6 AA M (<Pdx#  $<PdxmJuM` /?9"9"29"F9"Z9"n9"9"9"9"/(Su()Jt%!  _lcd6tY1s2˟ke5n7rs=e57Fq1 >JU2zGzK?=@M3J6A@n4b%Cz@C2b3H%@3HNKNFJ[3 B?F &?A9D POqEUB3AA5 2`?CopyrigTt (c)5P20AP95PM-Pc.+Pos%Pf3R$Qr'P`Qa3Pi%Pn.5P Al{P +XsReSPej+PvPdsP2@AB3b-3 S  WF5 \fiieU9 fjXg2'0UbŴF!D0 TIf]jlJ @maa ]xQQ9A8`3F+G?F| @"$#әF1 "X3]/Lֲ1r2JAn?FkS?FΘtpb N ?_)Ay*t^(|CN7ȗu$#D[DP1X3aT1 :" $z 2? I? uK?$"X2a@s(BS2}:r5qEqEtFdo?F[K ۃ?FJ(OL?P$DT!  bqt%@{rfJHG^"f~Bt2)Џ⏪32Uu5*&?Fl5?F)b&&~?Fcle?P,A&!٘UީȽ|;!GmH﹡^@pw*oAΏ ՟4ޯFUuu #?F{+.W?,b26I>P-nx:aG|ZÉ-DuH rε50!au>uM"D$.@4#ZqTmz?FD?Fb.\i ]aJ`?@~{bcEt/^Vu1 a{&DFT?@l4ϳ$3<@ƒ>Pn}11WBx~|kz}~tesDޠux)LBk(a\6 `G $#Ppztt~Pz+ݿ]s~aϔb-4쇂3~uy0cفMk?Fǒ?F 6_?P(AT~' 6>Ľ?e+P?Toa?CB1wa1 83쇖y&Nfn~bS9xvD<$Q$#QcuNI|" 0)^VUıwm2G:LB3 .쇪;utq~F>`7bt?di д9Brq 8CJ &*짦f4omGL3A/S/#HD: # h4>T]]9 MTMUF؋@?Fce?F5svL*?F7= ?P6 ]A]M (<P]d_]x#_(_<s__JuM` ?_""2"F"Z"n"""uJt  oUxJ5t!21l]5 7,ȃ5$7ǁO>JU2zG/z?=@Ms3J6A@4b3z郷0392bR808;6J[}3 w(B??F ?A4 T? EB}3AA5 2`?CopyrigTt (c)@20@9@M@c@os@fBAr@Aa@i@n.@ AlP HsRe@e@v-PdPk2ADt3-}3C eG?Ft5 r\ViieIU9 VXW2'0U.b?F!ZD0 VZl1 &@(aa 3ArQ1q8Z}3F{و>r/2"0,s\>rt2JA%)?F+?FvWZk>Fy4 wa)`Ayf/!\3s M_lu|a<1u#Ca aܵ0 lkj? &_8)?"a@=s(<" !rÀ5 EbqEnu3.h?Fn`?F=/*lq?FF|oĵWA|GK|Wu|V#H1CRW%7#I3Et2~IUnuj "?FBĥج|+zw~>{Iσ]X41yu,A1BaNycЍ@2Wa4y}K6#I@ZltF~fUds!'Hɇ?FQy~<?F$f㷖꿈xywF,(|pgF|'c7|,X&l x קi4bqJ+ o N9z)e~zy0l44t;Ff4t/p1znuf.ʐ Lˮ?F I`V'0'|֠{yl7#y%m"gs7#It~jyozY_ƀ%çM,fFI|^t-,捇&F9~lFV6#I.@t~3ot~F+}|$+p?F?~a@v2rA 3z9Xt|L;W-yG7IGYt#~HD: # h4>T]]9 MTMUF}W?F?F5svL*?F7= ?P6 >M (<Pd_]x#_(_<s__JuM` ?_""2"F"Z"n"""uJt  wCHݛ5t!2-8Lc5 7,ȃ5$7ǁO>JU2zG/z?=@Ms3J6A@4b3z郷0392bR808;6J[}3 w(B??F ?A4 T? EB}3AA5 2`?CopyrigTt (c)@20@9@M@c@os@fBAr@Aa@i@n.@ AlP HsRe@e@v-PdPk2ADt3-}3C eG?Ft5 r\ViieIU9 VXW2'0U.b?F!ZD0 VZl1 &@(aa ]ArQ1q8Z}3F{و>r_2h"0,sf >r)t2JA%)?F+?FvWZk>Fy4 wa)`Ayf/!\3s ޿Mlu|a<1u#Ca aܵ0 lkj? &_8S?"a@=sB(<" !r5 EbqEnu3.h?Fn`?F=/*lq?FF|oĵWA|GK|WuǢ|V#HCRW%7#I3Et2~IUnuj "?FBĥج|ﲖ+zw~>{I]X41yu,A1BϠaNycߍ@2Wa4y}K6#IZltF~fU~ds!'Hɇ?FQy~<?F$fՖxywF,(|pgF|'c7|,X&l x ק1^uJ+ o N9z)e~zy0l44t;Ff4t/p1znuf.ʐ Lˮ?F I`V'0'|֠ yl7#y%m"gs7#It~jyozY_ƀ%çM,fFI|^t-,捇&F9~lFV6#I.@t~3ot~F+}|$+p?F?~a@v2rA 3z9Xt|L;W-yG7IGYt#~HD H# h4>T]]9 MMUF:TJ3?F ?FW7")?F(GP@?P6 f@#A#M (<Pd]x#$<PdxmJuM` /?""2"F"Z"n"""u )Jt!  ߆,ߑ'7t172DJM)527HB)5L7r Dɸ!>JU2zGz?=)@M3J6߀A@E4bM3z03a2b808RKFJ[3 PB?gF ?$A4 O5E3AA5 G2`?CopyrigTt (c)@20P9@M@c@oKs@fBAr@$Qa@i@n.@ Al?P HsCRePe@vUPd7P2AED3 5 -_C %GgFViieE9 VXW2'K0UVbgF!DK0 f!jl$1N@(aa ]3AQ18`3FPʞ>FQ?͉@"#] >"3n3v1lr2JA_[3nB?F#lNa?F ?Fe a)A@y?2oW Vytu#|_ӄ 5u3?\1a u׿@ ]h? d?#t?"2%a@ks(<29tK1r55EqEu}?FG?F U?F(k=|S@ܱ|ŗ-_`ǿmgV0ÖpITf#was2Eu2* ?Fd|ڀ >{$*Jr| ZȘ*'vY9!=a,z0WJ⅏ze#wFUuUfNڿ?F6 +C?F}ۛ)NQv1Vy>ﴺkcY︉0h ӯe#wZq~zd! nS@3(}|{gu2lqZt@>% rgrܥg/?B~fȿe#wϢnuex9?Fqp1C5bb=|=I"~;Jt^E>~KAOA/0ƛOu=e#w yJG?Fާ;נpIZ=gE܎=|grE2`~%-Bg0L0;e3w0䬌3t-~F`3FgG ߈A2rA 3+j[K~@])nATGf@B7e3w7I#Ha`dgHD H# h4>T]]9 MUFQ|3?Fͪ'?FH(])?F?\s?P6 A > (<@P]dx#D $<PdxJuM` ?9"9"29"F9"Z9"n9"9"9"9"/()u()Jt%!  [4c7tY1s2&뒺e5n7-V;e57n?ͷqJ1 >JU2zGz?R=@M3J6A@4b%Cz@C.2b3H%@3HNKNFJ[3 B?F I?A9D POqE*3AA5 2`?CopyrigTt (c)5P2U0AP95PM-Pc+Pos%Pf3R$Qr'P`Qa3Pi%Pn}.5P Al{PU +XsReSPe+P5vPdsP2AED3 (5 -_ SK  WFVi@ieE9 VXW2'0UbţF!D0 If]jl J @maa ]xQQ9A8Z3F꺷S"$#P8hB1 2X2?@s6vK r2J:jtFEZ qrs%@"s )1HA\HG@HA"lNa?F|/(?Fb) a)AyF7U1 "fWmA}$# c6n>}X3?$1 a }x ?@ %? z?M?$"X2%a@s(}{^<[}̈ιƋ/90!3'x+;m3; 3)1F;+x(Nڿ?FT&B?Fb{f,-JQ1;Ejv㓌 +ߌK$ؿLrZ\b03>P1Z;~+p•%ʲSpFZ>ppd-Tfs1u񶛄D𷱄K 瓌)h:GrKr߿?#4B31;;uSq~F7ސ?FF;Kf@9Brq 8CysZLs뿇* %L3 /1#;H4B`gHD: # h4>T]]9 MTMUFM&d2?F}{?P6  >M (< Pd_#x_tt_ ___(_<JuM` ?2FZnuJt -t!"J&b$=>tJU2Uj2?R=@MY3Ji6A@%2308b9R;6J[c3r27a? F ?A?9Bc3BfAfA5 2`?CopyrigT}t (c) W20@9 M@]c@os@fBRAr@Aaa0i@n. AUl@ HsBe@e@v@d@Q2_AcDZ3ę-c3rC 2rG FZ5 >\iViieU9 iVXW\r2'0j3 FZ!&D0 VZlJ`>UhLaLa ]Rd u 1A A1AW8Pa o|2U?F,%-@Nx[Jk#b?o--hi  6vZ2JAp?F}l^?F +ْ?F G&S- a),Ayx|S<ݻuz}קu#qSBa y= 30 4*?? ?"2a@/s<"t!r55TqE`ud ?F /wpc*uGs>{T#|f{}b*yZ>voigjJ*#;%7f2pU`uTK`j0rr?F);L^fFp/Xx1ѣ?V|%O4Tqde㼕?FHyt߲Xzΐ_3]ؘ#yԄT|qTS|t]<8l j m?@P3ɡեZ:+a{c3~|+2|}ZliL~|d($7! C]{-m;BP4~ΐKjgVhߩ~D8V-l|G2y)PlƳÿտa:dBl#ꃋ-@t32y ?c$p3~,xEr l|0?FS$^ΐWqp>hmN.Ey?'<(+t^+}m܀3}Tq(*wmnp:~qмZBT<i}~۸73q1&3–(Ko|>F}ѣ ?Fs?FUޒ[g$,y~vZ؆t=<}!uɡaѩYK&h0?FL#|ߘ 1D|\ ťa8 ?Fd\Ў!ݯ|? ﮉ2ɡbҨg7+?FBl;|qyS ~L](MB-eS?F@J9F:?FrIÅ hߏ¢h|@b$yoSk9r|hͦ |ɡe2oe1Br{a 53Q2HD: H h0>Th]]9 MIAUFd^V?F߫C?F>?F?_j|w?P6 8 >M $mJuM` /?`OO.u>Jt;  sxЕt=M4&ٕQ䣯+? G>yJU$"#?.& ?JbA@T"Z+bR\)Z&J#2N贁Nk.#@M# J&]/o/T"Z+'T"*B#BA1A15 `?Cop rigTt (c)x0]2`09x0Mp0]cn0osh0fv2Rg1rj01av0ih0n.x0 AUl0 n8s2e0en0v0d0@:1Y>4#-'M3 M7 .&%p0b5(9 DFX'dG_'0UB.&J!I$a dFxJlJ]Uhz_1!T!Պ(Z#FF-D}ؔ?cJ:Z 4qgjGQ RV"JmTtV,Щ=?FG с aW)O YPRSW:fOtG4Pi3GYA; l'z&@B!@C?F[,P4S *?j0[OeT?UƖaU5mTV.b9 Nj?FZp^9o`isoT0e(a~Th]]9 MIAUFHm?Fʋ?FqZzS̩?Fo_ w?P6 8 >M $mJuM` /?`OO.u>Jt;  zt/ Wٕᆝ?a%?uG>yJU$"#?.& ?JbA@T"Z+bR\)Z&J#2N贁Nk.#@M# J&]/o/T"Z+'T"*B#BA1A15 `?Cop rigTt (c)x0]2`09x0Mp0]cn0osh0fv2Rg1rj01av0ih0n.x0 AUl0 n8s2e0en0v0d0@:1Y>4#-'M3 M7 .&%p0b5(9 DFX'dG_'0UB.&J!I$a dFxJlJ]Uhz_1!T!(Z#Fb?d ?F94:,X,TGZ SY RV"JmTig/?F6o?FşY] `)O 1RSx 4S x$G4:74C͐GYA; ^^s¯&@B:~mTn%y2[9)A\(aU5UF O 2Wα?FG?8LolQ,i33((?l+dUl1"Canoo2m_;T#rA $sHD: H h0>Th]]9 MIAUFk"?Fwc?Fymh?F_!sw?P6 8>M $mJuM` /?`OO.u>Jt;  aMtl#/ו1`ng n}_GT>yJU"#?.& ~?JbA@uT"Z+b\)Z&J#2N贁Nk.#S@M#J&]/o/T"Z+'T"U*B#A1A15 `?Cop rigTt (c)x02`09x0Mp0c.n0osh0fv2g1rj01av0ih0n.x0 Al0 n8s2e0ejn0v0d0:1Y>4#-X'M3 M7.&%p0b59 DFX'dG/'0UB.&%!I$a dFxJ lJ]Uhz_1!T!(Z#FFút[?J:Z 4=iLGQ RSV"JmTVoEXNx A`?F&nƜ?FӪr#> `)O 4S }#GYkhkn/e~:KF4}3>YA; ΣW$@B4mT[PRSbU5U2{6E,?F72*ʶ'?FD;6o;^}p/l@i.fMdӟ2Z[lxg҇.atoo2m_UT#rA $sHD: # h4>T]]9 MTAUF\f?FQ)Xd?Fv?FZW?P6 `>$M (#<#lPJuM{` ?#{{20{F{ZujJtg  ntL_פ| 'RGU˲s>JU2zGz?@M[#|Jk&A@$7b#z #b(b)+&J[e#t"t"?'6 I?A$/%Be#115 "`?CopyrigTt (c)020090M0c0oKs0f21r01a0i0n.0 Al0 8sBe0e0v@d0S"{14\#-Xe#3 7'6\% ZLFiie1E9 FXG/'0UR'6-!B40 FJlJ7Q0hLG1ZAY(э6?FξRI0`fS?Ks`ZsLbRP\"JT6mPD2n ?FHOX\>F->wW Wa)H0YnǿlᜯVef[U>dxԷErlg u?VcQ$@aELeANS?FZ ͉?Fޓ{lʤm>wok&O8XWleBPmlfVArieoo.HLec=}pLwS?FXʍ`ˏvol``}; PaiEz|DF`ޣKH_SF:x~ALrA k#ڃjnw5}Oi{HD: # h4>T]]9 MTAUF8XV?Fp#enD?Fv?FZW?P6 `>$M (#<#lPJuM{` ?#{{20{F{ZujJtg  ґߝ t$wOפ| 'RGU˲s>JU2zGz?@M[#|Jk&A@$7b#z #!"b(b)+&J[e#t"t"?'6 I?A$/%Be#115 "`?CopyrigTt (c)020090M0c0oKs0f21r01a0i0n.0 Al0 8sBe0e0v@d0S"{14\#-Xe#3 7'6\% ZLFiie1E9 FXG/'0UR'6-!B40 FJlJ7Q0hLG1ZA!Y(э6?FξRI0`fS?Ks`?Zs$LL!bL\"JT6mPD2n ?FHOX\>F->wW Wa)H0YnǿlVef[U>d~xԷErlg uVcQ$@aELeANS?FZ ?F{lʤm>wok&O8XWleBPmlfVArieoo.HLec=}pLwS?FXʍ`ˏvol``}; PaiEz|DF`ޣKH_SF:x}~AL"rA k#ڃjnw5}Oi{UGDF P# h @T(PYYU UF~?M&Wd2P} (-. +0@ 0T0h0|Y03 - -3 --3--& &#:u` ? T,_'@@TUThh|U|UU&&K"&5&&0"u:)!z2<7q:U? ?; 4b0{[`+:B Fy3U"'ĉQ9" L A$6 K",G'@I@@q@E?DF@[r&BCkBuq@`-A0"Ruu8 @GC0$5Q;(?RK"U& d4; :)9u `u`["[b!;$wu`񉾑4?R5ػ3a171`Vis_PRXYcPm!#5``08``R`?CopyrF`gPt0(V`)02d`090MF`c:}`oH`ofbvary`aa`iw`n 0Al` }hsbeH`e}`v`dRh13d45!B=31ZrA66 24#30Ur?P].w,-^q^$P$sT<`o0mR`s`a>rv "saI8G28\vvk`2 hY,TYYRJ )eUF,,?F6_v?P fQ#"I#M !:#Q>NIU>bIY>vI]>I M ? I ? I ? I"+M?+Q#*"I*S%&S]V#f"+YV#z"V#"IaV#"IeV#"IAV#"IqV#" IV#"I S&2"IS&2IS&.2IS&B2I V#V2 I>2!>2IS&~2 IV#2+S&2"IS&2+S&2+V#2&?PI3 B&?I("C2B?IJCZB%&"9'a'77@I2I2GUG1uM` ?"I&6BDBXBlBBVVVVVV(V$(V8(VL(V`(/Vt(CV(WV(B,B,B,B,B,B};Āi 1a` Q|Ϳs2e8-ߕq`R~,y\1BTC\Zݼc6[?F[hsF߱~Q>qˁ&t ?FgMuN?Dv` ?,?6 EPU-?-+" Ԍ/UFz[aDv>tJ~M^3;0?gt{s4)/~Z!WJ=˧ب 5Z=fO/߱&////??{?q?;d?*V-?F`xM0P01W=͝4F??=ӎC !]=@/V54, OOS9qW@0kM9~0iZ <ȝ0*DT! SU7:w$ ]UZNOW~/mà' ]: e9T ,,? 6v?G@B1B$Gr65߱O__,_>_P_Fs[Q?F/e[p؟?M ?Sl ʟ_tfS$lߕ름Fx1cn)@aCo?20 6-Ԭ,65vٝJ w(-_q9KrUuw;y+OUF!R>7~n字/䉄5 _fL6&lLXqzݍMJ13LIo%]f Fz^(ʲS | u6)|LXX'} Ĥԭ67,M'5LmmFs]mrtoɊ!3EٿEݍv LSB@ d@}U00|Wdby#ANU 0 0 0 qH3ۓ] _WbB'P.>3]_A0hx%>Hލ?1;>s,}O+NڿW >gEn(F_>3 ]Q3}2bu$ˣSfQNFe'-$Sد|;~*YQoU(_HԪb~xBß;6H~ TEbݍ}nO*/<9n-@P,e'}eݴآm'viޞMTQދP?F/VJ$CճѵȌܷݍ1+OOĮY"ؑX:fDx~@Wt)L[~3x _AVW~IA]m_jݍX;AQq__s*~pm.@a)sI_8`}^ߔfЄ_j]~V^1P0oBoTzyE|N8^~{*ϳ}8CތҢ&s%X>F @4ύ?F*棕 L4?n~g&vn˹5so L~<@D~ew,[{ܿc=(SHǑo ,AU@y2ѝrͲs!?F N?F3?FcT(}03@ya?B᳠{j@1y8bvw ??a醼nMk?FCZώek/Ê?F@ٕ:?P(ܴ'B1/?{!K @-X;L_ @1aѶߘ>3C?FqFLc0/?Fd?<+v7D?6.zY Uco غ9JȆ߈ѿrTߣDqV\s"7G\DN,9FslD7Q-r|O╣'YQF{ F@) l$u(U)}}bQr)g|}1p1<G7t}bHA8sah77@y5?F]}H 0NXUVMFQ¡/.}=!8՞%د-+1pQ4ADF*.}K 0BoaKVWg>t}x=$GB=bvaA Q/R"i L1}/9/K/]')uw?F%ly?F*껩/ѵP﹈b@D>3bSSmlGAȔ|#h`*b5%F[ A-@zjaDi~b.x4dM?O+zOOu (?:?L?^?p??d??Fb2*.Q_ DCY?ˉFY] ) ~ zM?F!d]CZXjĞ82Dҥ4Y 0YLD|^,DYA_^F{aE`?`?Nd,/__b6mʝǧNAIY/////?r6FH?> _$s 1+/,5B/XM6#s"B N~??vLl⿤愈?[G=u?aK&d23L4CUG DF P# h @T(PYY#EψUFL&d2?|>?F~?]P} . B!D:J :^:rY: Au` ?"66JJ^^rrju# ^Jb"h,h'U ?L?4b {N[`:!#"&a#'b"^DQ)3 z$2l!L1$l" l"3'@>>ÿ@q05?B46 r&23c2ui0`҈-1Bu:@ l"3-A7Bl"F L$9u`u `m"[ bb!wu`)$"%أ#Q!'1`Vis_PRXYcPm!#5XP1XP02`?Co_pyr>PgPUt (NP) 2`PW09 M>PcuPo@Pof}RnQrTqPQa}PioPn] AlP uXUsRe@PeuPvP dB!#d$%b!(=#&#Rd&& "$B##0Ub#]&g,7^aNX@cTg@ daA+_ s11"hh22)t3*tBaaR+t"qqb(!pacb"!6u\%G!!["-@@Bq253T@b"q3L [ELP@[UL0]E5 s_$\&F3s͵NSYFue7A6P,@QjPUn725^}@$1p]]( sUQŅUQbN9P %BR&RpRQQcPFL]`NETWOFPK SHPP5 DROIR7IR5S!:0@ L*FrLr]@o"( qffs_PU 7ex}PcPn2l"! 0n<@5~b -"ER"^br36 Bm Ak Adzau"u22u3/BaQaQR5Q5Q"2b3 !!E|7l!!8ԲAP! !1; 8b,/$1JbKa'q'qne!b($pǕW ]-@$ SPaCPqA9P _Equ>PpRP!ePk`25Xq\` TsPxcP`DP!vR*`3^5b` Sb8C.SwXOQEdeyMPnfPc}PuRU`1%PRd$  N$m $`E I[!" PPe5R'`y 7sRU dCeEWAe C`eV^` SRiP@Q//` T;/M/"LoPcX/Ҭ///B lPiP+q/ ?/?R% RoPo_?Pm??NwRkx"adB`K?g IDPQdPS6O@HL7/nOnsICOHL8dhOPO#%AmV2irUQ ]Q 1O+_6 fH_Z_\G` fM#AtO_BO_OM@CO(oFNRCbogq%C2mQtBQW6oHL_Roof MHHiAאe2U w'0U W0 /ix4*є"%ݩ܅uuY>2 H4ֹhńЋńЋNńЋTńЋ>ńЇHD: %# h4>T]]9 M JUF/\r?F.U?F~?F_w?P6 AJuM` ?uJt  ;uAIt=WzIRUIl7O],#>2zGz?)@MJ߀A@ebM #zwb(b%)2+2&J p!p!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r !ua i n. _ Al (Us"e e v0 d i!E$m$[w?b6 ?A$$4/U%-|# |'b6%?6iie<59 6X7'M0Ub6!}4K0 -FAJlJAI0h7!1!ZF9|$0f2 <ڒ#  OR|VJa9Z9P] PReS[RUE:dHܣM\]\ɱQ<5U oYaU526_HU"]rA #cHD: "# h4>T]]9 M JUF ?F"]?F|>?Fuvw?P6 ]AJuM` ?uJt  ].It=W`ĆW #JRbJlLXz#>2zGz?S@MJA@eb#z(b)R,+,&Jj!j!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v d c!Eg$[;?\6L ?A$./O%-v# v'\6?6iieP659 6X7'0UpB\6!w4%0 'F;JlJAC0h@7!1!ZF?Hܣ~0, <pB#\  IRvVJ:~?6VOS sgZ^TURUEUEBQG\ZYpBaիQ65UQ[fS.aYUb520_BU"rA #cHD: ## h4>T]]9 M JUFyE+?F0]я?F~?F@?d{w?P6 >JuM` ?uJt  vr It=WP|IRsUIlO.a#>2zG_z?=@MJAw@eb #zb( (2+T2&Jp!p!5 g`?CopyrigTt (c) 20 9 M c os f"!r !a i n. Al (s"e e v0d i!Em$[}! K2?b6 &?A$ 4/U%-|# e|'b6 ?6iie<59 6X7y'0Ub6!}40 -FAJlJAI0h7!1!効ZFHܣۄ0Y <? 6ybi  OR|VJa9PY]PPReS[RUE:]|$M\Q`YdC4U<5U=VN_YQU526_HU"rA #cHD: $# h4>T]]9 M JUF ?FDL?F|>?F~w?P6  >JuM` ?uJt  ].It=W,t@ "JRb~l>t2UK?=@MJA@bJ + &Ji[J'?a& ?A)B !!5 g`?CopyrigTt (wc) 20 9 M c o%s f"!r 1uai n. _ Al90 (Us=2e0e vO0 d10!H$-,# 'a&K?6i@iek59 6X7'K0a&!|$K0 FJlJ8>UhAA !!ZF@?d3{  </.a#]M 1R^VJ:]ɜHܣ/_AZ2Ey#xUEUVS s#FT=RxUk5U؝Qˠ\BY3QX5*n?֬BoAZ7],XU[Bl|Q2_*U"rA >sHD: ## h4>T]]9 M qAUFA[?FFw:?FE12Oz_:w?P6 L[ >  (m$JuM{` ?Agg2DuVJtS  .!tw|3s,kIɸ _>JU2N贁/Nk?=@M3#JC&A@b#zw u#bR( (+&jJ=#%'?& ?A"b${$:3/*T'B=#115 `?CopyrigTt (c)020090M0c0os0f21r01a0i0n.0 Al@ 8sBe0e0v@d0+"|144#-=#3 7&4%"XOFiie2E9 ԆFXG|L"'0URi&!40 FJlJ" >UhiQiQD ]GQ N1DJc$LBhL!l%,W2[AN11(Z=#FC[RRTFQ ucR/Q Rf4"J:GdFX_!0wbfc L|c :D)$aER!Jj^opdiUjh2E~V! b# 2 ?߮.hOEu<ɛ?F#Pw|hؾ6|ެ^hu?F/Lw|iU6|^^e~?EYo< thQ[T`vg2w 0M6weQT`Dgwӏ u5XPo`Iϣ4oj7::eQT`Bvu*< f|hQT`r :{T_t Pa?F3SXk͖o?F vT?P);N 90 ,)Ri0m6uR. 6u`yyb !` /K!? -hO.?Ra @cB)r5%aQ K)XiyBG\h5b-Ob?Fԇ*sR6|FsaeQ Q~B?F)qqiQhAeQ9dϢqwiaeQe񯁜ai|hQEWXYWuwZl a++hf! l?FQD0)ˑ6|P?U/hL t9?Fz2)v<>KU6|*hQyո ?ghhWyh= A=}!eQW%|6^T?`NAA_owA j;[6te UI:vdv =KylI?ε~T_Ţr+a W  /~nIX(dur|?F 4?F?PَKK :d?Ltؤ?yl!?(!@[l Xj2GtaCpzm&褯 "4gyC?); 1ecDhe2GoYe%3rQ M3#k2HD: H ih ,>T  9 MJUF ?F~?F|>һP6 m >JuM` ^?5uJHbcFFmtulyt b#tI>2zGwz?@MJA@Wb)+& !Jg"~& E"\Ab)+'A,A/UB115 `?CopyrigTt (c)I020U09I0MA0c.?0os90fG281r;0t1aG0i90n.I0 Al0 ?8s2eg0ej?0v0d0ŤM3 b7~&XXt5i b9 `!X78G'0UB~&%0R 8FLJlJ8>UhAA( 1:9m EsBU[JHU؝Hܣˠ Xm] 3Ey%U3^֬_Z7]G,XHU)a[BUQHUF@?d{lQY?.aXQ,o`ɜQۤoZ 2QXUH_Z_l_~\_H8>t"s ûdGC, FPd(#)wfB s^+~d uo@+WoSsG1,t P05828)h4; 6??8FB RJ0UFD  h(^TYYBBUFL&d2?x<F BP(?P? 8$ B66ȅH?j|;? 5JB@?\.ϗsU!&;/ҍ  0RB`=Bridge,lo ica ,n tw rk sy t m t p"r pPy"i"s c n"cB i2%z e^| R(* G@& ߃l?"488Q ]frsww ww ~ww w!%} Iceqs^QZQDrag onut hepe.Rih-c]lckaUdcos UPo etQeAvUwreP 5aab|>~??[q㿦)j?&Iz?&d23L4CUG DF P# h @T(PYY#EUFL&d2?@F|>?@_~w?P} Y.  !:J :^:rYDu` ?~"66JJ^^rru# *^:"@,@'酅#U2N贁Nk?H]b{# ?#^4Wb {['`:!#"h69#':"UY^"Q)+3 62D!L1$D" D"&3ɜ'>>ÿ@kq05?460r&BCku2u{0`i-1$Bu@ D"3?AIBD" $$9u`u `"[ b:!𿻐u`$i"&){#)Q!'1`Vis_PRXYcPm!y 5jP1jPAB`?CopyrPPgPt (`P) 20P9 MPPcPoRPofRQrPQaPiPn AlP XsReRPeJPvPdB !r#(d$r%:!B={#&r#dd&& t"r$,7^aN.@.cT<*Po mPsPQ>g da +_c&1&1r"hh")ht3* ut2aaB+ti"aa bo(!`ac:"!6uM\%$G!![2?@@Bq%CT@ br"q3$ 35$6Ep05E5c_r$4&F3s͵N?SYFu eIAt,@)Q ySUn%6}61`]]o(c-U)Q)U)QNqKP % TR&RpRQQuP$]`NETWuOXPK SHPUP VRO!RI S1L0@T$J$r5GYkr"o(pa&&s_<*-pq o(A֨57IABKFkq`RPQsPPU exPuP2D"! 0<@Eb Z"E*"br&3BmACAfa^ur"ku"xu3/\Ԭ20 iBGQGQi"2 b3 !!E0|qq7D!C8ArRA!!A;Jb61\b]aaae!bo(q`軕1W c]?@ SPaCPaAKP EquPPpBdPePC8%XI[m` {TPuP`DBPvR83^xmz"` SQbBRdP5d=QMPnfPcPuRiEQ`]&1PRd Nm 8E !3o! PP_q eiGR'`Q sRU<&eФ/YAeC`Ue.^Ԓ` SRiP@Q8T/%/mLPcX_/҄///B }lPiPq/p/?o* RPo7?PEd?v?YNwRkP"a =Yko HiԿAhǖ꥔ Uはw'0Ur/lAx ;,u;uM1, صpH ֍@噄ѤѤ&Ѥ,ѤHD: %# h4>T]]9 M JUFX^?@ ׬?F~?@?P.6 AJuM` ?urJt  ]It=W_7],IRUIlw#>2zGzw?@MJAw@eb #zb](b%)2+2&JBp!p!5 g`?CopyrigTt (c]) 20 9 uM c os If"!r !a i n. WAl (s"Ue e v0d i!Em$Ɇ[?b6 &?A$4/U%-|# e|'b6 ?6iie<59 6X7{'0Ub6!}40 -FAJlJAI0h7!1!効Z@9|$0Y2 <ڒ#4  OR|VJa9Z9P]PReS[RUE:d_HܣM\]\ɱQ<5U oYaUb526_HU"rA #cHD: "# h4>T]]9 M CAUF|>?@:x??@V|+q?P6 8]AI]M ] (JuM` ?#SS2uBJt? tzEUQؕKJbtU]OK>JU2zGz;?@M#J&A@bS#zI G#a(+bo)|+|&UJ#!!5 `?CopyrigTt(c)20 9M c oKs f"!r 1a i n. Al70 (s;2e0e vM0d/0!EH$#[#""?06 ?Ag$I~/%-#,# '6%KO/Fi@ie59 /FXEG'0UBŬ6!40 wFJlJ]Uhz!hAg!(Z#F@H/DžB0J>| 8Ene&KQ4\ RV"JT~?@gf_?F)?@ $C?8S sBKYbFe>J^R,UFeTGK!Q? ӧ|+@B:TPܛ\YܨMJe5$!JP/.`*7 `3iK=Dn鷯hZbU5TQ`R?@[uo0lBqaRSgdd*lxrl{!Joo2_Uvg"rA f# =HD: ## h4>T]]9 M !AUFnx?@?d{?Fg݅?@@?P6 $&> JuM` ^?1?u.Jt+  Rqtes.aqzWWqI7>JU2zGz?=@MJA@b1#zt' %#b?()1 ?(Z+Z&UJ!!5 `?CopyrigTt (c) 20 9 M c. os f"!r !a i n. Al0 (s2e ej v+0d 0 !E$[! s2?6 I?AE$ \/}%b-# 'Y6? Fiied59 FjX#G'0UBX640 UF"iJlJAq0h7!1E!`Fmr?@Hӣ۬0$*fq`7$d 6yb7  R VJa9Q|TK ST1 QE:]|$0]Y_dC4a8^] 7sP@EU < KB?x_QYFf*ɴ%Ud,Z7"_+dK lV}?? g52^_pUE"rA kD#TsHD: $# h4>T]]9 M AUF|>?@~?P6 n# >] JuM` ??Su.Jt+L iteJ]zb>tJU2U?=@MJ?A@(b')4+4&J[vr'?& ?A/@)B!!5 `?Copy_rigTt(c)2U0'09M0c0os 0f2 1r 0F1ai 0n}. Ala0U 8se2e90e05vw0dY0! $-# ',&?6iie59 65X7/'0ʼn&-!$0 /FCJ)lJ<>Uh3 C ]1!ZF@@_?d{ J*4 $d?.a7^QiBQ ]RVJ:D]ɜHܣ[_mZ2Ey7UEfU$S sTrTiRU5U؝QX\nY3QX5Vn֬nomZ7],Xf8:`ϋ iؚ~Gc2anY_Wt T}զDTk 0Q: %C?J i3l}f*i&udWl˔alb+d ?ܲVXU2D_VU"rA k#-UHLuD" # ;3h , T  JEuUF'_?@~?F|>ếP VNM A #$Ae  >uA` ?KG `mm. RSu#\>b]XFOmrߞuZptY b7#t&>U2N贁Nk?@9 3#>C&A@q" u(b)e+&f" !It$>=##"4#K"& "{"b)&;'f",f"/N =#11;#`?CopyrigXt (c)020090M0c0oKs0f21r01a0i0n.0 Al @ 8sBe0e0vZ!@d@+"BM=#3 7&4"Ht1EiIfE 4%!X|FGG'0UB&h50 TFJl>(fUlI 8%#IA{!1(R!gG?@c\;0R dUeR ?!c e(T@|$?Fn-?@Z %_C?YsB\Ӌ$UX?uj3dR-=wnQAY ܲVX3 e^Fx*aqRS3jIBFm_[viH{goRJdo~b KYeTGQ# #s sFR-Dn?% FQsN# OwYB tSQ~dlvo@+HWoMsG#) R^ P~[8^bhfiPUFD # hVTYYzU?@ ?IF?h PaR"|ou]ofu2/uU ` C&nect&r` e-?{U H  ^   -}|5 p`I`V?}`xa833ސ]3σ3u 33?, ?Gxpx^& This coUnetrWaumtUcl#yrebtwe h *hp3is.HD # =h @>T(]]9 T 2AYUa? uP6 u]`u buA_ u  >3.TAuV ` lxMui"a)J@-?x;'6"wE- xo'6"y( _rq?o@I ?$m%? @horVA$"38*EL2-_br @P' Z6\11u .$n2y2^2n2u9ȑ#ULH/MM^1 # A D5 :0`Vis_SE.cTm!#20ED)`?Copy@0igTt; kc)j@DCU9j@M-@c@0o/@'ofhBYAr\@AUah@iZ@n7@ j@WAl@ `HsBUe/@e@0v@d7@N@B= #x  A G{?9#r22&0444 MG4'ALB5?_$k&(SZ6Y8# #2"qlSF,C`l>lUhr5 QA+/J *&%(og5k#p f3e'r#TA(11MJEsooR6'2Y7E5S3Wc7r516Ũ_aJ2_quU\DQ>0]b;)`R@As-@E T@xh@DiA eJi=S]UBTJg'0SV!@LA,SrWg 1Cy$A$AMJ6!Hp,@R/TAEb?RQP? _H>Ff"s O`E )F(SWo#*p7BaqeHdo@P+Xu"sUFD  h(^TYYBBUF\.ߗ?x<F BP(?hP?X8 ]B66 TbH?~? ?B@L&dW2?Y (sU/!-&PO/v:  0eIB`Modemw,n tw rk p rui h"al !v c !^| f(U GT&( F?_""U*++'?MwpwtwpwDwwp tGwww G@wwDGwD@w p@wyww[wp|`npXDrag onut hepe.Rih-c]lckaUdcos UPo etQeAvUwreP 5aabi4FѿL&d2?j?oҿ=t??\.r3r\.CUG DF P# h @T(PYY#EUF\.?@i4F?@M&d2ɻ?P} č,.B ! :J :^:rYD 7 7l7u` ?"66JJ^^rr)u#",'񤔅qU ??4^b,0{][`:#A82A6#'%Q)U" ^/LV1$  d7'>>ÿ@qZ05?46F0r.B4CZ2u0`-V1ZRBu@ 3mAwB h$9u`u `"[bz!u`R$"% #WQ 1 71`Vis_PRXYcPm!#5P4o67>B`?CopWyr~PgPt"0u(P)"020PU9"0M~PcPoP'ofRQrPQUaPiPn "0WAl` Xs bUePePv`dR 1#d$%!*(=#6#@d66 2$B##0Ub3]fg,7^q+^\@V\cTg Pda+_Msd1d1"hhr2)$t3*tR+tHR@ q q"@q@qTb(!PpaFc"+m6u\%RG81>1[B2m@@Rr5.CT@Fb"31 UFHUM5Ms_$&ׄF3s͵NSYF)UW0TewA,@WQ S+epwr5}1PpP"(A2yT?+>BB).B4KCFq`R`Qs\~PU T`xPP2 0<#@4Eb_ &2CE"brd3vRrӟ(Mff.F_<1-mTwNcPpu"ur2u3/R0HRuQ$uQ"2Tb3 &1&1E|FF0RQ(dPpWQ,[[ ]m@0` USPaPePl\rsQAyP E u ~PpPePr5X 1V` ?TPEѣPH` D`vR3𯴊ߜb` SabߣPLB PMBPd`rUdKMPnafPcPubHU\nHUQi{]d1;PRd Nm.0Š)U,03!8 PPr0BQhHpcu]uR`sRUѠeANe>`pe^bt1R; SbiPa¢&T18LPcX!mFTXjB?l'`iPkqo&0Z38RPoO,d/?c dDψ(ȱȱ"(Ms3) &&&WUՁ:p/` %R&RE"ibȱ`NEWOPKPSUH`PV0 ROj2URIV0S$AQxR7 i q'U,ygm5xbb7'dũfɵR3D-҃aa8')uyqarL?aTe5x`H7vAտD?aKDKDaKЅDKDK7eDAKTeDJHD: # h4>T]]9 M JU@i4F?@??@hĻ?P6 ܠAJuM` ?juJt At=W ףp= #JRbJlQO#>2zGz?)@MJ߀A@ebM#z\(b),+),&J[(.w?& ?A$T./O%B!!5 g`?CopyrigTtZ(c)Z2009ZM 0c. 0os0f21r0>1a0i0n.Z AlY0 8s]2e10ej 0vo0dQ0@!$b-# 'Y&?6iie59 6X7'0UpBŴ&!$0 D'F;JlJAh h7!h1!YZ@x f, <#  IRvVJ:ǿxT]]9 M JU@"&d2?@ `?@BP(ĺP6 ]AJuM` ?ju#t  m۶mIt=WwIR$I$+IIl#>2zGz?@M|JA@e7b #zb(b%)2+2&J[/.?& I?A$4/U%B!!5 g`?CopyrigTt (c)020%090M0c0oKs 0f21r 0D1a0i 0n.0 Al_0 8sc2e70e0vu0dW0!$-X# '&?6iie59 6X7/'0UvBŇ&-!$0 -FAJUlJ8>UhAA 1h1!T12 $<cV5{ WR~VJd2L&?@bX,Ʃ ]S fR#mS ?ZZQ4E^0__ %zzX5:U XYePY؉X5aZjoohY}aUVeEVŠRldQY)+R+X2>_;"rA 5#dsHD: # h4>T]]9 M JU@xmJuM` ?uJt  Am۶mIt=WHzGIR]Il#>2nz?3@MJAw@eb #zb( (2+T2&J[ (p"?0& ?A$ 4/U%BB!!5 g`?CopyrigTt (c])020%090uM0c0os 0If21r 0D1a0i 0n.0 WAl_0 8sc2Ue70e0vu0dW0!$-# '&?6iieP59 6X73'0UvBŇ&!$0 -FAJlJ8>Uh@ 11!Z@?t:Né  <qqh#B] WRVJ.@ ?@L&d© ]S )Q#hY8?Kh/QE^ `__ ^BX5a9XYePXRmScRU5:]md2L&U\ahY*PX\eEVV_iaU2>_PU"rA #dsHD: # h4>T]]9 M JU@i4F?@M&ɯd2?tP6  >JuM` ?uJt ASt=WJRb~li>t2U?=@MJA@b + &J[N#?0a& ?AT)B!!5 g`?CopyrigTtZ(c)Z20 9ZM c. os f"!r 1ai n.Z Al90 (s=2e0ej vO0d10@!$b-# 'Ya&?6iiek59 6jX7^'0a&Z!|$0 FJlJ UhqMAA ]A !4Ja-'Z@?fl6˃  <HzGh#BA OR|VJ:6]t:ϝNM__Z(\#UEL@ _?@Lƒ S AA#`YQ(\Xk5^ s`o!o ̌aX5UxT9 M3JU@i4F?@M&d2?ҷP6 m >JuM` ^?5ulbSD_Jt bstE>2zGz;?@MJA@u+b+&K !JS"j& 1"A.b+-,-/*B!!5 `?CopyrwigTt `wc)5020A0950M-0c+0o%s%0f32$1r'0`1ua30i%0n.50_ Al{0 +8Us2eS0e+0v0ds0ib =L!XY'0U Bj&%0 :lJ Uh*M[A[A 9A 1:xTh]]9 M JU@w[2?@2?@?뇤?@bf?P6  n>JuM{` ?5 uJt  hxvEt9SYEN^Ӟ~ӞEh!D#<>>2N_Nk?<@MJA@cb]b)++&J"< u) ?AHb$z@J+%/F%B!!5 c`?CopyrigTt (c)302`0930M+0c)0oKs#0f12"1r%0^1a10i#0n.30 Aly0 )8s}2eQ0e)0v0dq0!(Y$-3 7x&+0b59 6X7G'K0UhBx&!$Ia F3JlJ]Uhz11Z@`Y3   8gF/_ M ARnVJ!49BYOPBRWSMRUE:]R.?\QRYl>Q5U/V@_YQĈU2(_:UrA cHD: H h0>Th]]9 M JU@p8?@d2L&?@αB?@.?P6  n>JuM{` ?5 uJt  YEt9SRQEN߆$1Eh'>2N贁NKk?<@MJA@cbb)++&J[ < u)| ?$%/F%BB!!5 c`?CopyrigTt (cu) 02`09 0uM0c0os If2!r 51a0i n. 0 WAlP0 8sT2Ue(0e0vf0dH0!Y$Ť-['# 'x&0b59 6X|['7'0U?B)x&!$aI 6 JlJ]Uhz!^!Z@mN/?  8gF_  REVJ49Y&PR.S$R_UE:p]"`3  \Q)Yl?>cQ5oUV_YQ_Ub2OUrAg wcHD: H h0>Th]]9 M JU@&6?@0ސ?@?뇤?@bf?P6 n>JuM{` ?5 uJt  lUUEt9S;EN^Ӟ~ӞEh!D#<>>2N_Nk?<@MJA@cb]b)++&J~"< u) Y?AHb$z@+%/F%*B!!5 c`?CopyrigTt _(c)302`W0930M+0c)0os#0f12"1r%0^1a10i#0n}.30 Aly0U )8s}2eQ0e)05v0dq0!PY$-,3 7x&+0b59 6X7G'0UhBx&!$a F3JlJ]Uhz1h1Z@`Yg   8gF__  ARnVBJ49BYOPBRWSMRUE:]?R.?\QRYl>Q5U/V@_YQU2(_:UrA c_H>vQ+s #X8L(r FUߤvf## x!B-|+y~|dlxo@+蠖oLsGhoomP d*ߎUHTx7 z*^_Q$OUFD  h,^TYYBBUF\.?x<F BP(c?Ps?m. 9c:M:O @z0 BH ?? V@L&dW2?F-W(Yi(G.sU!&P/)7(:  0 `WB`&Ring\network ?p$0rr0pPE1al\d$0v0c$0 c1^ (J YG& r+?!) +)!q q ǿ~~|p~~ṗ ~p~;ކ~pp2AAqDrag onut he]pe,nUdu /p64lies9Lt]wrkcQm tOfTly lGo_"imdO.b\.L&d2??;IOғEMQS?=?mar3r\.CUG D  # h T,YYEUF\.?@?FL&d2?P} ϛ% ^.&N+O+.Q+R+S+T+\+]+^+_+`+a+b+c+d*+e+f+g+Q.i+j+k+l+m+n+o+p+q+r+s+t+u+v+w+x+y+z+{+|+}+~++++++++++++++++++*++++73636 3 l_Fu{` ? &CG/B>CBHCBRCB\CBfCBpCBzCBCBCBCBCBCBCBCBCBCBCBCBCBCB"CB"CB"CB$"CB."CB8"CBB"CBL"CBV"CB`"CBj"CBt"CB~"CB"CB"CB"CB"CB"CB"CB"CB"CB"CB"CB"CB"CB2CB 2CB2CB2CB(2CB22CB<2CBF2CBP2CBZ2CBd2CBn2CBx2CB2CB2CB2CB2CB2CB2T6g666U66FFMBuIUHrN|Nw1r L@Gs5 ^CB0  $sV!v q)?@4v!p`:!@ K*RGs Br(1=s0s)) %Rt؉sq1`V sPeXY)cPm!#52862F1`?UCA p r gPt3()32U0Z93M c_ Eoi oM=rK yas iA n) J3A# l3EsUei e_ vd)21wrޟҀs?@A3{d|R$Bƕ~u +@@âɸ!BMBj8}rup`i-qBu *@rw1s!8=}/B 2t@I9u`un`"p2] HqDou`t1E[B2TU뱥)8s5ä  r}vh^T`%AT<7 o3mss >XxĀu7T=n7 eMTNtT yk3Dvd{uu ƃ@Rdư5ߙ߫ƕ9'9h\nE|1CU t@ʴ#FXjHqʶq-?bt[Ű//A:/L/^/~///A///p?!?3?ʼqV?h?z????z?? O@ɯ+O=OOOoOOO9OOO =t__'_!D_V_h_ͪ___#___$o+o=oB%`oroo&ooo'o!(5GY)|*+ .HTfx{şÏƹ/&8J!0m1Ɵ؟2 3BTf͐B_5Я6);7^p ̿_9:3EW;zόϞ(Oas?ߨߺDqm4# 1T 5h86%8llc౓#"AAш]]Ѥ C){q{qpqq22EyyNNaѕѨHr/d~CU|@c` MaЧufu e nA` EqipeBtm/h~4qP o|uNmb /b XQ Pr6HZ/{+ srt_q/\A6s6!P/^za Sil/T1 LcAHR\fpz$.8BLV`jt~""0":"D"N"X"b"|"v""""""""""""""" "" "*"4">"H"R"\"f"p"z"""""""""""""""""$"."8"B"L"V"`"j"t"~""AaXT@b FTP%జJKTltφT0ƐĮbPQwr SS#TS”GLF2DFbXO%—ȩP_(_:_S…LF?Hd}OƁLQ____KFLpXO( B}KoЁ?_T_o(ooS L FY2ణ3oaoЁ !eo}cGYkS L Fft]eҼ–B" E L FsxLfO͏ߏ*H L F l_go}<*A]LF џrmБ Є!7I[EʟLF `0 毯ʯ_&tEULF OT~N򤢿ƿTؿFLF O!H$6 Hϱa6ϗ*FLF ?ZبP6HZߙBLFR5xqy?o%L%9F//"q8?onT&L&9F?ᏼJ`_-?*e'L'r?UO{O$(L(roy:uoڟ) L)r!_@4 oj|*L*r"oݿF\);a+L+r#xQwχP ,L,r$|[6qȟϨ߹-L-r%Ϗ0 fTx.L.r&BX%7*] /L/r'7Msߙ0 0r(3 W2m1  1r)˿@,bt-2 2r*>T!/3/Y=3 3r+p/Iߔ/o!/P//M4 4r,tS?.1i?/??M5 5r-O1(O/^OpO]6 6r;Q?\AO:OP_/_*Um7 7r//?E_Q_{?__}8 8r0+oOo*aeo/_oo}9  9r1oa@$ _Zl: :v2/q6LO+Q; ;v3hA?gwoP?؏<  >v6~_ɯ߯2H'*M? ?v7'=ocsoԿ @ @v8#oGπ"]ϴ¿ϥA  Av9ϻ@RdߊB Bv:zߠߠ.D#IC Cv;`9_oP D Dvv*@  E-GV,Gv !7-[k=HLB,N5>LB_r:?C&2F3s͵NSYQFEB,N>HdDq7ME5qD68W#"U%&EfW@@cX @ S1` Token?_RiP1nD?[Q?##qh1ab /X##zQ?AzQgVD6e]r##"` %P&rPpPrtPe+sA D2`A!_`NTMPUOPKfPS A``UEfPPPObR`5ISi0QAqD_!_\X\OT.U_C0?ni4oo\XXOSw'K0Ur%V!0^ v2Zei1DAT\1sEPuQ1MEX OS%HVl2UxAUx1'ExAMEx HCDrDC  @   !"#$%&'()*+,-./0123456789:;<=>?*>ABhiATiiJ\ 3U@\.?FL&d2?JP}@YIYqX"u`W?"&&5&,"u6)~}bE@{#&t03! 7("tm!,v&b$&"qK!Y$30^?2-,"l-2""2!l"6 #",$=F#"f$6)![0$%J[LD0D=F?|" (&3AA1A1 ;m`"`?CopyrigPt(c 09M-Pc+Pos%Pf3R$Qr'P`Qa3Pi%Pn Al{P +XsRe*SPe+PvPd"!ZOAy 7$^ ?P?b?[!H${eQnB? ???O %O.O@@OROdOvO6CkaOROOOO_ %!_3_E_W_i_{_Vx<kb_b____ o %&o8oJo\onooy_ocoroooo %_.@@Rdvwd %0BTfxyheҏ %5GYk}~/mfĢן  % =Oas~_?gůɲܯ $ %?Q@cuOwhʿ) %RGYk}ϏϜ_i . %I[mߑߣoj!3 %N`r k &8 %Sew+= %Xj|)m 0BU]o9n"/#/5/G/Ub/t/////Io/2? ?(?:?L?Ug?y?@????ϟYp?B OO-O?OQOeGQoOOOOO?ߧfqOR_ _2_D_V_"u!q______yr_ro%o7oIo[o'"voooooos *<N`,#{@yt /ASe1ˏݏu "4FXj6%͟ߟ/yv'9K]o;&ү?yw, >Pbt@'@ſ׿Oyxπ1CUgyE(Ϧϸ_yy#6HZl~J)߽߫oyz$(;M_qO*y{)-@ RdvT+@y|.2EWi{Y%,y}37"J\n^5-/y~8/<2O/a/s///cE.///// ?y=?ABT? f?x???hU/??@???OyBOFRYOkO}OOOme0OOOO__yG_Kb^_p____ru1____ oo yLoPrcouoooow2oooo QUh z|3@%VZm4Џ*/[_r5՟ /? Ɇ`dw6گ"4#Oهei| Ŀ7߿@'9(_jπnҁϓϥϷϕ8,>-o',sߘߪ߼9 1C2! F1 xR'9K:)6} ;@);M<+)~%< .@RA09"5=/!/3/E/W/F5I/2/////E>??&?8?J?\?K:Y?B? ????U?OO@+O=OOOaOP?iOROOOOOeQ_!_3_E_W_dOXGv_b_____uAo#o5oGoYokoZIoroooooDžB(:L^p_NɁHD: H h <>T$]]9 MT kAU@\.?FL&d2=??F?P6 LA P>  + 4DJuM` ?"?o`0?oNu^Jt[ tgJbg>6#J?#,b/ @TZT"Z""P& T/@n_^B% 'N< ?m6 ?AeJa@ۢR4` Ac0entCol0r3z۝0Aٝ0bbbz90“2???9Nbb?1"bAYFbeDG?#E![C39 , B6"hLJ?#QE!TG; J`A`?Apy$@igTt(@)20mP9MYPc$@os@f_RArSP#Aa@i@n. AR @lXWsRePe$@v0dP@ᾡ`1O_o?#2TUam3 FD5#]BrFlL]BrF 0PuGiAbE9 6%yXGg'0bUm6!40 fjlJ qUhhUGHQA3(Z\*{ ٠0"#")b5DrD6"J Az~%"#"BA}/t?PSQ< E!?yET#-#tgaD[#,E:g@qVE`0w>]BrA \Cď'$$+HD  hj @>TA(]]9 M  eAU@\.?FL&d2+?]P6 LA > M /8HJuM` ^? C s4CLsRubJ1t_ tuJb$">JU5 JI4#=#/ @X^X"^"B"6 X/r cb!Bh@,G-v31k!k!5 J``?CopyrigTt(c)2009M0c.0os0f21r0Aa0i0n. Al3@ 8s7Be @e0vI@d+@0tv3?c- =< ?F ?AeJa[_Rf14W` A0cI@n0j1lB3z_ՁA_AbbbzY_)mEZf1T  9 MSAU@\.?Fr??F~q?P6 B >%M a*JuM` ? %U U4:uDJ@bEF&d2ɿuBltA bJt EIM>JoU2TU!?#S@M #~A@@"WbH)U+U&mJ !?v(GtI#J#"# %"& "A@"bH);E'|"/-<*B#h1h15 "`?CopyrigTt(wc)2009M0c0o%s0f21r01ua0i0n._ Al0 8Us2e0e0v0d0"MX#t3 t7&"tt Ei b@9 %!X GގG'0"UX&-50 FʢJlJAUhh)Q)Q!#A@! (:#`RB: 3R$JTV  h1?[_R: @AABUVeLٮ\2maXUe.o_jLo^m2aroo__7\HD H# h4>T]]9 MT CAU@\.?F??F~߻?P6 8M >M (JuM` ?#0SS2uBJt? tTKJWbK>[JU#?,& ?AA@R"bZ$J#B!!5 `?CopyrigT}t(c)W20 9M ]c os f"R!r !a i n. AUl0 (s2e e v0d !$#bB-## ',&%en#2TU1?"@M#R"g&a,R"g&j'\?6iieE9 6XG"7'02U,&-!G$0 NFbJqlJAUhh7!lR!(Q:#N J>83R JWTfV  !?qY_R8>qRl?38VUVL?&d2n\] aٔXEUoe6._x_j omQ2oDo5V_h_o\056 b_HR>+s 1ǡY-MSMלC FHVZ#Cܞ!B hd|@+MsG8oՁPwgF:p uЇ7xsUFD  h ^TYY>UFD"H$'@F @FxT]]9 #4AUFD"H$@F@ @&@F?x0>Uhhn1n1`!#!J`F6 [>A{ (FMBa!? OABE3RNkO}OGBERE6 O?OBE2?;.1rA _0rRxS`3HD: ##=h0>Th]]9 (3q #UFGJg:#@F@ @FWtyz@F.???Pv> u{` ?u3>(.t  ( y?_A@HNpΈ_-t#`aU"p [`JڍU5 ILҹc0U~ "?AG}cTLIb` LineCoklK rzc+"1&Z/#nFFF"?&?#+"1&&` STadK wI/#/N+$;# FC lM 72zS&eacۢR4` uA cG nt)?@#cb3zs94#+$3$I!8<>3 Q3#8.f.30FP=4B/G3B11G;;#?1pyQ i}gTt ( u)@2`09@uMC cQ osK IfB1r@P!a`0iK n.@ UA%2 HsBe@eQ vG d@0 l>]UhzЍA3+!`J!VQAU__T3^_odgU5^No_roU __ooj'EAb59 M{7/v!3rQav44CK7%iOHBTg7D'$~vv I+"688e @ Q:(3O@v5?=+$cz,Z#b~-(T{ HD: ##=h0>Th]]9 3#UF u` ?6u3`b>"6(t  <,?A@U"p [ZBH|гYYt [ՄJU5 CLT0U"?AG}eC]` LineColZI rz۷!#nF;FF{#?&?#),)"/&&` STaKdI wG/#/ZL+$9# FA lK r72Q&a` ?RR) 3#B2((236727kpvB1o31P17;9#?L1wpyO igTt (cu)J@2`09J@uMA cO osM0IfHBL1r<@N!aH@iM0n.J@ UA#2 @HsBeh@eO vE d@0 l>]Uhz1A3)!BJ@o39DYX3UD_d_vY3^__gzUf53^_V_ozU3__H\o_j5bPf59 M{o7l/5v!3HraPv4!$ 5v|mai6oozo7]'$t~kvPv v8%TE xH@L?c8 eaD@eJ@ l@Qo:0@3 9r)']z,Z#(T5-sTAcL@0P@HD: ##=h4>T]]9 P3#UFpo@F@ @F18Q[ @F.q??P> u` ?u3 >,2t  eX?A@LR[ A\c2t#`e"p [NdJU5 MLc0U "?AG}gLMb` LineColO rz$g/"5&0mddd#nFF;F"?&L?#/"5&&` STa%dO wM/#/R+-$?# FG lQ 72W&eagRa` uA cK nt 85W%#gbd3zRw98#/$h(Ph9BB3 (C#8>239FA=B8G3APAG;?#? 1wpyU igT_t ( )@W20@9@MG ]cU osO fBR 1r@T!ad0iO wn.@ A)2U HsBe@eU EvK d@@l> 0>UhhCA3/!J!VCQU__T3^oomgU5^Wo_{oUE_oo%obTG t>Q e jXHle59 M{7/v!3rX/Qz#=!$K 7%inug7D/'2 #vtx J/":UGDF P# h>,/T6D # UFD"H$@]F@&u@P} jih,D@Su`} ?.U@Nu#hQgtSftܸtRtA@3db#t%T/M/"j/+.d#%@?/"t`Make B 0c"0ground#3%~ؒ314+1`VIS_PRXY0CHM!#60z070"w`?C40py20]i00ht&0(,0u)&02009&0UM0c22s40f2R1r0Aa0i40un0 &0Al:@U 8s>Be@e20%v$0d2"e (^q5 J% 3hHA%DK%D,K3DGg $aI_9SFTv4y13TU-T[Te ZTATU J8>wSa%F1Q01$DtybgURwSi@^UR-n,?oUR1j?RRTuKrQi? eb #! >baes?,3QRoX0cD&q1cQ(QR=wS1VZi#3 D40N40UtBn0a0n`Eq#?_^9jYazPtt"(wS117eU5`>Xcy1`I!&` H0d$2&F40qUAw0޸q` SPo]wRR+="v6 ~!R)RRLf] Th 'Q0d_HV> ל ߪJAC BFWt#h<;B} ~w]@ekyo+^CC{]KaX]m*<N`r|3 y@HG8w$UFD  h ^TYYUFD"H$'@F @FxT h]]9 # AUFD"H$@Fcѿ?&@F5 L?sFC?P6 u` ?iu#t A@t   q?7Q.7)CWb7]ARMJqmdddpnFFF? ?Agam9T6` Fil> wCo> orz9b2j9A9obdk#zY)א9S|uX9/K&` /2L%!X!#\#w&` STWadD w/" /;)#82#b5"oU y3#B&(23"I&3cg30U2?AGQ}9L"|w` L ne%?(75N582M:Bhj<:AgCy1y17;C?C!pyJ i! htj(0)j2`09jM cJ osD fBC!r@I!a@iD n.j A" HsRe@eJ v#@dPw0l>]UhzAh#j181Ja`gCVQ'  2 bM[8A_^#e33nLo^og#eE3eVo_o#e!_>82rRA w082YsASi6bE9 MzgG]'4Ō;J!$a vvET#@x@&OnAHD # =hj0>T h]]9 # AUFD"H$@F@ ?&@Fq)?sFC?P6 u` ?iu#t A@t  ^)?7.7C_(\T[>JS/qnFFFa? ?Aga9T6` Fil* Con* orz9_b9A9bW#zE)9S|u%/7&k` /28%ߵ!D!o##&` STaUd0 w/"/+oUz G3#B&( 3"&3iE530U2?AG}x "|` L n e?#2Bh8<:*ACG1G1O7;3?/!py6 i htj(c)j2`09jM c6 os0 fB/!ru@5!a@i0 n.j A" yHsBe@e6 v0d@E0l>]UhzjADJ`XC66Q'  2RM)8@ mT}_^U3^_oagUEUtVKo_ooUp!p_>[QrA E0r sC)84Aeio6AbE9 MzG'4Z;!$Da Wvkv5T0x@?AHD # =hj0>T h]]9 # AUFD"H$@Fɻ1V?&@F&7 ?sFC?P6 u` ?iu#t A@t  Biq?7.7C!_lV}T[>JS/qnFFF? ?ga9T6` Fil* Co* orz}9b 9A9bW#zE)k9S|u%/7&` /28%!ߵ!D!o##w&` STWad0 w/"/+oU QG3#B&(3"&3E530U2?AG}u "|` L 'ne?#2BhP8<:A2CBG1G1O7;3]?/!py6 i htj(cu)j2`09juM c6 os0 IfB/!ru@5!a@i0 n.j UA" yHsBe@e6 v0d@E0 l>]UhzXjADJ`C66Q' C 2RM)8A@mT}_^U3^_oagUEUtVKo_ooU!p_>[QrRA E0r sCio6bE9 MzG]'4Z;J!$a T h]]9 #  AUFR_!@F;(kY?F̅$@F_fw?P6 u` ?u#t  \ Ac?oA@'t3(?.Zd;O?M&S?- >JS/qnFFF? ?{gaCۢT6` Fil4 Co4 orz۾Cb CACbۖa#zO)5CSu//A&` /2B%ߐ!N!y##&` STad: w/"H/+oU (Q3#B&(23"&3E?30U2?AG}u "` L 'ne?-2BhB<:A!CQ1Q1Y7;3?9!py@ i ht (c)@2`09@M c@ oKs: fB9!r@?!a@i: n.@ A" HsBe@e@ v0dH@O0l>]UhztA#B1J`!C660Q'  2RM38@wT_^U3^ookgUEU~VUo_yoU!z_>eQrA O0 rsCiy6bE9 Mz!G'R4d;! $a FvZv5T0x@?(AHD # =hj0>T h]]9 #  AUFR_!@F6??F̅$@F_q)w?P6 u` ?u#t  \ Ac?oA@'t3^)?.Zd;O?Mɯ??- >JS/qnFFF? ?AgލaCT6` Fwil4 Co4 orzCbCACbva#zO)CSu//A&`5 /2B%߿!N!y##&` STad*: w/"/+RoU= Q3# B&(!23"&3iE?30U2?AG}x "` L n e?-2BhB<:*A!CQ1Q1Y7;3?9!py@ i ht (c)@2`09@M c@ os: fB9!r@?!a@i: n.@ A" HsBe*@e@ v0d@O0l>]Uhz`tA#B1ŊJ`!C66"Q '  2RM38@wT_^U3^ookgUEU~VUo_yoU!z_>eQrA rs:Ciy6bE9 Mz!G'4Ŕd;! $a (FvZv5T0x@?(AHD # =hj0>T h]]9 #  AUF^!@Fp>?FKg@FmЁ??P6 u` 7?u#t  ?߀A@'t3;On?.Dioׅ?M@_߾?[ >JUonFFF? ?ob"ozCC]b,#&` STad_owCP lP -rz=&G/Y+[ ##e.2"#&3iE0U2?AG}x "` Li'nez/"Bh,:A!!';:3?I1py\ igTt (c)02`090MB0c\ oKsv f2I1r0[!a0iv n J0AX l07s"BUe0e\ vF0d""l>]Uhz1#!2!ŊJ`%6%6"A2  "RM(@DON(U38^Q_c_W(UE8UF_O_(U2O;2"rA 2#^cFCi%bE9 Mzl7]'4+J! $a ff95TF0x0I?s1HD # =hj0>T h]]9 #  AUF+ $@FmЁ?Fh_IŒ??P6 mu` o?u#t  $C?A@'t3;On?.N@?M? >RJUonFFF? ~?{b"zCCb,#&` STadowCjP lP rz=&G/Y+[ (##."#I&3E0U2?AG}""` LiOnez/"BPh,:A !!';u:3?I1py\ igTt _(c)02`W090MB0c\ osv f2I1rT0[!a0iv n 0AX l07s"Be0e\ vF0d$""l>]Uhz@1#!2!J`%6%6EdA  ("RM(@DON(U38^Q_c_W(UE8UF _O_(U2O;2"rA 2#^c%FCi%bE9 Mzl7'4Ŕ+! $a (ff95TF0x0I?s1HD # =hj0>T h]]9 #  AUFP0&@F'ǖ~rl?FMO?Fq_Ww?P6 u` ?u#t  ~jtx?oA@'t3ʡE?.Cl]M/$A >JUffnFF;F? ?b$zCuCb,#&` STadowCP lP rz=&HG/Y+[z ##."#&3E0U2?AG} "` Linez/"Bh,:A!!';:3?I1py\ igTt (c)02`090MB0c.\ osv f2I1r0[!a0iv n* 0AX l07Us"Be0e\ vF0Id""l>]Uhz1#!2!J`%6%6A  P"RM(@DON(U38^Q_c_W(UE8U@F_O_(U2O,;2"rA 2#J^cFCi%bPE9 Mzl7w'4)+! $aQ ff95TF0x0I?s1HD # =hj0>T h]]9 # AUF€#%@FL&d2?FM_O?ݐ?P6 mu` o?u#t  ~jth?A@t b@(.eX'?(MA>RJUffnFFF? ?b$zCCb&#&` STadowCJ lJ rz7&A/S+I[ # #.2"#&3E0U2?AG} "` Linet/"Bh,T:AB!!';43?C1pyV igTt(cu)2`09uM<0cV osp If2C1r0U!a0Uip n AR l7sBe0eJV v@0d""l>]Uhz1#!,!J`X66DA  " RMB(@DON"U32^K_]_W"UE2UF_O_"Ub2O;,"rAQ ,#Xc@Ci%bE9 Mzf7K'4Ž+!$a ff35T@0x0C?m1HD # =hj0>T h]]9 #  AUFb%@F'ǖ~rl?F2wa?Fq_W}w?P6 u` ?u#t  _LU?oA@'t3ʡE?.j+]M/$A>JUffnFF;F? ?b$zCCb,#&` STadowCjP lP rz=&G/Y+[ (##.2">"#&3E0U2?AG} "` Linez/"Bh,:A!!';:3?I1py\ igTt (c)02`090MB0c.\ osv f2I1r0[!a0iv n* 0AX l07Us"Be0e\ vF0Id""l>]Uhz1#!"J`%6%6EdA  ("RM(@DON(U38^Q_c_W(UE8UF _O_(U2O;#rA 2#^cFCi%bE9 Mzl7]'4+J! $a ff95TF0x0I?s1HD # =hj0>T h]]9 #  AUFu %@F'ǖ~rl?FMO?Fq_Ww?P6 u` ?u#t  ~jth?oA@'t3ʡE?.٬\m]M/S$AAJUnFF;F? ?b$zCuCb,#&` STadowCP lP rz=&HG/Y+[z ##. "#>3E0U2?AG} "` LiOnez/"BPh,:A !!';u:3?I1py\ igTt _(c)02`W090MB0c\ osv f2I1rT0[!a0iv n 0AX l07s"Be0e\ vF0d$""l>]Uhz@1#!2!J`%6%6EA#  R#R M(@DON(U38^Q_c_W(UE8UF_O_(U2O;2"ErA 2#^cFC i%bE9 Mzl7.'4+%! $a ff 95TF0x0I?s1HD # =hj0>T h]]9 #  AUF;%@F'ǖ~rl?FOr?Fq_Ww?P6 u` ?u#t  _vO~?oA@'t3ʡE?.]K=]M/$A>JUnFF;F? ?b$zCuCb,#&` STadowCP lP rz=&HG/Y+[z ##."#&3E0U2?AG} "` Linez/"Bh,:A!!';:3?I1py\ igTt (c)02`090MB0c.\ osv f2I1r0[!a0iv n* 0AX l07Us"Be0e\ vF0Id""l>]Uhz1#!2!J`%6%6A  P"RM(@DON(U38^Q_c_W(UE8U@F_O_(U2O,;2"rA 2#J^cFCi%bPE9 Mzl7w'4)+! $aQ ff95TF0x0I?s1HD # =hj0>T h]]9 #  AUF`9d[%@F?F{?Fߢ_-Jkw?P6 u` ?u#t  a2U0*C?oA@'t3 +?.ڊe]M~jtA>JUnFF;F? ?b$zCuCb,#&` STadowCP lP rz=&HG/Y+[z ##."#&3E0U2?AG} "` Linez/"Bh,:A!!';:3?I1py\ igTt (c)02`090MB0c.\ osv f2I1r0[!a0iv n* 0AX l07Us"Be0e\ vF0Id""l>]Uhz1#!2!J`%6%6A  P"RM(@DON(U38^Q_c_W(UE8U@F_O_(U2O,;2"rA 2#J^cFCi%bPE9 Mzl7w'4)+! $aQ ff95TF0x0I?s1HD: ##=h0>Th]]9 3$#UFt>!@Fp@FU^+m@F BP(??Pv> u{` ?u3>.6.t  ?A@RXqhki)t bk"p [SjJmU5 SLiT0U'"?AG}yFSb` LineColi rz!#nFFF#?&?#I,I"O&&` STadi wg/#/l+$Y# Fa lk &72q&` @?r)o 3#B..36=27.JM3117;Y#?l1pyo igTt (c)R@2`09R@Ma co osm0fPBl1rD@n!aP@im0n.R@ AC2 HHsBep@eo ve d@0l>]Uhz9Ah3I!J@35&5&hQ (;UL_q_T3^__gU5;^_^_oU;__do_j5b59 M{؏7/=v!3PrhaXv4!$A;7ai6oo74'&$|~svXv v_am A` Te xP@l>#mbzR#p8!e t@PQ:(3O'@f5ɔ?{I$m."z,Z#!b~-(T{<\B2>2 HD: ##=h0>Th]]9 3$#UFfG#@F BP(?FUiԄUV?P> u` ?iu3>W..t  x&1?A@RXʡE5i)t bk"p [TjJU5 [ SLT0U'"?AG}F^S` LineColi rz!#nFFF#?&?#I,I"O&&` STadi wg/#/l+$Y# %Fa lk &72q&'` @?r) 3#hR@BL..236=27$1311ԩ7;Y#?l1pyo igTt (c)j@]2`09j@Ma ]co osm0fhBRl1r\@n!ah@im0wn.j@ AC2U `HsBe@eo Eve d@0l>]UhzQA3I!J@X35&5&DQ(SUd__T3^__(gU5S^ov_6oUS__H|o_j5bP59 M{7l/Uv!3hrapv4!$ Uv}aiHz7]'&$Ŕ~vpv vX%Te xh@l?8 e'ad@ej@ @Q:1@3 9rI'm"z,Z#.T5-TAcl@0p@HD: ##=h4>T]]9 P3$#UFl6@Fp@Flf^J @F BP(q??P> u` ?u3 ]>2m2t  %䃞?A@V\m)t bo"p: [nՄJU5 WLT0U+"?AG}FWWb` LineCoklm rz醑!#nFFF#?0&?#M,M"S&&` S.Tadm wk/#i/p+$]# Fe lo *72u&` D?v)h9I!Bo? 3#eB223FA BGT2M3B117;]#?p1pys igTt(c])20@9uMe cs osq0IfBp1rw@r!a@iq0n. UAG2 {HsBe@es vi d@0)l>0>UhhClA3ZM!J@3,9&9&C"Q(nU__T3^__CgU5n^-o_QoUEn__o_(bTe ts0e j$Fje59 Mb{7/v!3rQv5!$;7aiPDV74'2B+#~vx _akq A` Ti x@p>#qb[zމV#qUGDF P# h4TPYY# %UFD"H$@F@&@P} j6   L 4 Hn[u` _?BU 4H9 u#p3lbtntQtRA%@> tS%tK/]'4( tTi%i/](/z.R&U@?/".g`Make BJ0cL0ground #d.#4#30U2)1_FB3!5~*؀3A$D;1`VIS_PRXY@0CHM!060<@78݉"`?C^0puy\0iZ0htP0(V0)P02b@0U9P0M@c\2s^0IfBvAry@Aa@i^0n@0 P0AUl@ }HsBe@e\0vN0d@2"e (^E 5 3r ca%A oCjA$A13 f3ba!5 cq@~n^e3jU?~bhKr;aq? |ub #h bauA1*4H f5ohc&Ac.abaEkQ,o^e= cAzf y3` D^0N^0tBn@a@nEi#_9Y@a `A1A1"( c'AAezeE>Xs A pIp1!` H@dN2&F^0YA @ޮ1` SPo]w:bb+"v 6 ~!bbbf,4T4?Had3E_HV>ל ߪJAC BFY#~6B} ,]@e8ko+>]BBGy]Phe{qX0Q24X55&7 19 $; .x=8/?g6H6(L!%l)JUFD  h ^TYYRUFD"H$'@F @FxH5@@@0ۀ HD:  # ;h4>T]]9 #U,AUFD"H$@FjZV@&@@P -u `u bu  @")lt A)0t#` bFv_n?p [_eAu` ?jurJ:26W&$##D./@%W(l"r/tvel JYO8?$"'D{(/$B}U2q(0?'?96?\.?fd?F 7`BackgroundC0l0rz\)z 5G!3M1 3?l"M@h<:A酙!30O贁Nk?mdddclak)2w` A0ce0t?A)n1bdCzI7!3117;l`?1py0i0h@ (0)2P20>P92PM*PcJ2s0f0R1r$P1a@i0n..2P A0l2P)WUs|RePPe0v@dpP0l>#$!Uh#3 pA QYŊJ*!3"Be(o1HD:  # ;h4>T]]9 #U,AUFD"H$@FVj?&@@P -u `u bu  @")lt A)0t#` bԚp [euv` ?SurAJ\tt&!$)'$"3/E X/j$=(tvel gsS?$"'*Q/$BU2q(0?'?96?\.?Efd?F 7`BackgroundC05l0rz)  5Gt!3M1 3&?l"(MPh<:A!30U_B??mdddLcla)2` ]A0ce0t?A)1b[dCzI7%!3117;l`?1py0i0h@ (0)2P20>P92PM*Pc2s0f0R1r$P1a@i0n.2P KA0l2P)Ws|RUePPe0v@dpP0l>#$!Uh#3 A Q!3 (BHA 3J*!32B'e@o1VcHD:  # ;h4>T]]9 #U,AUFD"H$@Fr߹\.@&@@P -u `u bu  @")lt A)0t#` bF_xP92PM*Pc2s0f0R1r$P1a@i0n.2P KA0l2P)Ws|RUePPe0v@dpP0l>#$!Uh#3 A Q\YJ*!32Be(o1>cHD:  # ;h4>T]]9 #U,AUFD"H$@F~߿?&@@P -u `u bu  @")lt A)0t#` bԚp [euv` ?ur>JvSȱ&$)'$"3/ E X/j$=(Etvel 37?$"'*/$BUo2q(0?'?96?\.?fd?F 7`BackgroundC0l0rz) K 5G!3:M1 3?l"Mh0<:A!300b/?mdddzcla)2` A0ce0t?A)1bdCzI7!3117;l`?1py0i0h@ u(0)2P20>PU92PM*Pc2s0If0R1r$P1a@i0n.2P A0l2P)Ws|Re*PPe0v@dpP0l>#$!Uh#3 A Q]!3 (BA 3J*X!32B'e@o1VcUGDF P# h>,/T6D # UFD"H$@]F@&u@P}  ih0D *@3-T-hc-|u` ?W΍U,@T'*hh||-"| 'u#QtSt% "{tR$t/'A@#KtT%/(/-3b/(08"A2?X>d.3@??2.`Make B0c0ground 335~J3hAmD;1`VIS_PRXY0CHM!#60@79݉j2`?C@puy@i @ht@U(@)@2@0@ @i@As@fBAr@Aa@i@n0 @AlP HsRe@eJ@v@d22e h4^hh P T3 3hHQ5T[5T[3T@[TT[ETh[TX|WndPao7T1128cpAhAhEhATh]]9 3#UFԚ?F BP(?F @F߅d\?P>u` ?iu3>k(.t  9#J{?Ao@HtSݓ_N׳_ambԚp [aJڍU5 ILc0U~!"?AG}cLIc"` LineColc rz$cC"I&nFFF#?&^?B#I&&` STa%dc wa/#/f/G(iF" d3#$..36hH1$d1d1l7;S#? 1pyi igTt ( )@2`09@M[ ci os!0f B 1r@h!a @i!0n.@ AR#0l@GsYBe-@ei v_ dM@b0 l>]Uhz13C!`J/!F%QA(E _._@T3O^h_z_W?UI5N___?U Oa_!o_j5AbI59 M{R7/f!3 raavb5!$fAm/%Q12ooXR7w ' $9~0vx] vF88e 1@tQR:(3O@{f5Q?=C$cz,Z#bS~-(T{<B2 >, HD:  ##=h0>Th]]9 3#UFGZت$@F BP(?Fu?Fa]?P>u` ?iu3>k(.t  aTR'?Ao@HtSˡE_N4@_ambԚp [aJڍU5 ILb0U!"?@2LhcC"II&ynFFF#($(?#R,C"I&&` STadowC l rz&/+_y" QB3#.3i6h H1q$B1B1J7;v`? 1py igTt _(c)02`W090M0c os0f2 1r0!a0i0nU.0  l 8Us-Be@e v?@d!@@0l>]Uhz13 C!J/!FA(EO_T3#^<_N_WU'5N_O_UO5__Y_jD#b'59 M{07/f!3baf-A4!$fm/%ij5|oorX07Ww' $ ~vf Wv $8e'ua0e0 @PHZQ0:^2@3 9R'cz,Z#.T.'~-qT@Ac000HD:  ##=h4>T]]9 P3#UFD"H$@F@Fq!j@FC1[z??P>u` ?u3 >,2t  0*?A7@LtWzGcQR e0qbFL&d2?p [>eՄJU5 MLT0U%"?AG}FWMbg"` LineColg rzR#nwFFF#?x&?F#uM&&` STadg we/#/j+$W# F_ lDi 72o&G,`h9C!Bt! 3#.2236A2JM3117;W#?1pym igTt(c)20U@9M_ c.m osg fGB1r;@l!aG@ig n0 A22 ?HsBeg@em vc d2l>UhhC0A3G!bJ@33&3&C_QA(2VC_h_zT3^__gyUb52^_U_oyUE2__[o_(bT_ ti e j5ieb59 M{7/[v!3nrvv-4!$;7aiX7w'2%#՚~vvv vw_H<>SGל ߪJAC BFZ/|#(-ߖ0T]]9 3#UFD"H$@Fh4@Fj~L$@FZV{?@F?P>mu` o?u3 #,2t  K46?A@LR e,֍t#`F BP(?pz [e"ejJU5 MKLT0U$"?AG}Mb` LineColf -rz!#nFFF#?&F?#F,F"L&&` STa%df wd/#/i+-$V# F^ lh #792n&` =?o))eCQ3#8&327A22\ތ32q@?O FM$? "fKO:F!%311Ԧ7;V#?i1p]yl igTty(c)y2U0@9yM^ cl osj0fBi1rԪ@k!a@ij0n].y A@2 HUsRe@el vb d@0l>0>Uhh7A3gBbJ@32&2&QA(U_ _T3^ o%oxeU5^`o_oU5_ oo.oi3 eB!3)m!#1'/28pȱ;!$8Q 52xbszgO#u%w#vj#lei:M{!/!3wxAJ dQ`$U8!e fQ:(3O@$v9K?F$g"z]#b?M-,T{{/ Ck\tq ĂHD  # =hj0>T h]]9 # AUFD"H$@&@FxitI%"p: [kՆJsnFFF? ?Abm$zOOSsu` FilA ConA orzOk` >/2O%(!(&\#&` STad*G wE/"/+ReU= 3#J8'32H23"&3E#0Ut2??AG}ΐ` L? n /Bh,:A311 7;3?F!pyM igTta(c)a2`09aM? cM oKsG f3BF!r'@L!a3@iG n.a AA" +HsBeS@eM v dHs@0l>]UhzA#!J`366KaDX' (32oRM(@T/_O_KQU3^__gUEU&V_A_!oUk!"_> QrA *bcCi%AbE9 Mz7's4;! $Da fv5T x3@?1HD  # =hj8>T! ]]9 #,AUFD"H$@Fx@&@@P6 -u `u `bu  @e" - A-0t#` b ̮?p [iuu`l?SuvAJ`rJ &$-'("7/ I \/n$A(tzip Q?$"'(/$񓾍U2q,0?+?=6?\.?ljh?F 7`BackgroundCj0l0rz-.~5e%3Q1 3?a t##FGB%3!#rٿnFFFf6AۚsA,? ("bCzy-lB-3&` STa0ow?(!` Fi0lrH2F1` OE=%:?Q1&gp!K7h<:AJ%3117;XQ@?1py0i0htk(0)k20P9kM@c2s0fR1rP1aPi0n.k AB Xs=be*`e0v0d1`0l>#(!Uh #3 XQ\iJQ*%3'928e&o i1HSeE9 MzW'0URr;!4o ]vqv1` T0xP_QH D:  ##=h4>T]]9 3#UFD"H$@F~@Ff>$@FZV{?@F?P>mu` o?u3 >,2t  Q|a?A@LR e,֍t#`Fi4F?pz [e"ejJU5 MKLT0U$"?AGq}2Mb` LineColf rz!#nFFF#?&?#F,F"L&&` STadf wd/#/i+$V# F^ lh #72n&` =?o)eC(3#82A"36A27\327q@?O FM$? HfKO:F! 3117;uV#?i1pyl igTty(wc)y20@9yM^ cl o%sj0fBi1r@k!ua@ij0n.yW A@2 HsRUe@el vb d@0l>0>Uhh7A3gBJ@X32&2&DQ(U__T3^ o%oxeU5^`o_oU5 _ oo.oi3 e23)pm!4/'2$#ձ;!$8Q 52xbszgO#u%t#xj#lei:M{!/!3wxAJ VdQ`$U8bT^ tl0e @he}}k\tq ĂHD:  ##=h0>Th]]9 3#UF3钿Y"@FFە @FNPQkӭ??Pn>u` ?6u3`b >"6(t  z6?A@BHK=kUYt ["p: [ZՄJU5 CLT0U"?AG}eC` LineColZI rz۷!#nF;FF{#?&?#),)"/&&` STaKdI wG/#/ZL+$9# FA lK r72Q&` ?RR) 3#B2((236727kB1o31P17;9#?L1wpyO igTt (cu)J@2`09J@uMA cO osM0IfHBL1r<@N!aH@iM0n.J@ UA#2 @HsBeh@eO vE d@0 l>]Uhz1A3)!BJ@o39DYX3UD_d_vY3^__gzUf53^_V_ozU3__H\o_j5bPf59 M{o7l/5v!3HraPv4!$ 5v|mai6oozo7]'$t~kvPv v8%TE xH@L?c8eaD@e J@ fQo:3@e3 9)']z,Z#bK~-\sTAcL@ 0P@UGDF P# h>,/T6D # CUFD"H$@]F@&u@P} ~ k ,D@Tu` ?LR,ru#|QΉtSzttRt 'A@tT$/ (L/+-3xOb ((A"i/.d#@3)5~??".s`Make B0c0ground#b314 ;1`VIS_PRXYx0CHM!#6008!"`?C0py0i0ht0(0)02@090M@cJ2s0f$BAr@QAa$@i0nx0 0All@ HspBeD@e0v0dx2"eYk} !HA%DK)5D,K3DKDTGnDAD_'7BTAy1y1 "(S11}51<>@XBr1I!&` H@d.2&F0o$@Az0#` SPowRo +<"v8d7e7 J@cbbg bg__0R11"Sa%1+bb$y1r,QHEq)5Se@~|u3{,|uVz?rxK2cxfq<`}? rjb #! bIgaj?@TT,3qEGxsD& 'M1sYq(q{r=SeS,fa0 D0NZbAn$@a@Q"AC@M7LqtKo]e_vH>ל @(T6D UFY,b'@F#H$ @FxT ]]9 TaAUFY,b@F#H$@&@WFF"W@P6 u`bt A@u)΍tStS<tTJ(S3d i)Ob  ADREJ#U0U8"?@$ t0 ?$( ?U kaX:`B c groundC l rzAb#z.)s$%B//#b w/bn3H? K1#5 CL.`M kek2?dQ5.1#A D5 v$`?!py;0]i90htk(50])k20S@9kUM?@c;2s=0fEBR!r9@!aE@i=0wn.k A lk>GsBee@e;0v0d@B1#lhA@=^M G5C G&dRMl>0>UhhE p.A!1 $R0SPJf l#b(.hD Ob"YbM_(F!o+g?7o3coe5noogoeV%efo+o oeE2 o;rAsCghe[+TQQM- &EtJsTl@S6V%B,QE=BAA!D)B0#` D=0N=0t*E2nE@a?@nE,15eq3FB(OqiAbV%9 M!4.'7$ň&!$Q2 $H>%1x (z)ub"`\NWF]}h#C?jeB J]PaHĥ]@+>ީ]B@k!UFDfP h>,/T6D ؉UFY,b'@F#H$ @Fx%B2 6gdaO{'TT'T= S5Ҧ13C#C D)@N)@tC)@nsPaiyPr$^ Ph(:LV?DOowpx`,p|rag ont] hep ad ful--> siz "]bckru"im.bY,b#H$@@5pm;μ@@3F" HD  ?hZ8>T H]]9 UUFY,b@F#H$W@' P6 lA u`l?U  u(tS&A@ctTl$bsTJh=M/ m20U?@LebTqBG H#$\( ?A3`BackgrounWdC l rzuu!$)(=~ H" #{#9"Mj%7 M`M kek/dpQu` EvNO0nt!p'!~u eJع1!';1#Vis_o RXY. hm!#6050 v`?!py ui hx0 ( u)k2009kUM0c"s fBR!r0!ax0i Un0 kA lk*GsdBe0e s1Qd0 &+?"A %l>0>Uhh5@ 7A!!!JnGV lfX 2 3VM@ !Q_&1q_mQU3^__5gU%UHVoc_CoЧU"5G_ #rRA !"cCgThe'+RT!!M-Qt^t JcTl!nqz{,@{3#T811 C!## D N t"w1a0nEH>Or S-"GqS@w`FX_o"#rTB Ks]^a`ĥ]@+Cߧ]BsPUFD  h ^TYYUFD"H$'@F @FxT]]9 #U/P%g(|"./t  {Pk?e"p _o*'6#4!Uh#3 A AYAJ*=3F"+2BU_ ASHD: # ;h4>T]]9 #U#AUF@F\.?Fl6 @@P Vt`  ?"u u u " '@l0+bA80&t# bp [5Ru7?u_eAYJUFL8~i/6a房D!@ '!"*(E,{Pko"] U?N@*(~k *Q/Bo2q0??6??S??F 7`BackgroundCq0lq0rz~8b%I=(1g 3?"&Ph<:AZ20U0B3 Lb+ 7117;Y`]?z1pyo0im0Wht(i0)W20@9M@co2sq0fBz1rԡ@1a@iq0n]. A}0lGsBe@eo0v" Pd@0l>#$>Uh,#3 A AUE 2teA}o #J*A @"BU_1SHD:  # ;ih(>TA9 35AUFi4F? <Ѕ2P $>  u` ^?3 u4#"t  q?A@g#tYsn$"Jh?hB="5 +#?A&\ ?~!~!5 `?CopyrigTt(c)20 9M c os f"!r !a i n. Al (s"e e v0d l>#UhEI !#J?Xl]b4X6WBHD:  # ;h0>Th]]9 #AUFi4F?k?@P -u `u bu  @",% AGh[ >u`] ?u#J" ѿ$ #p/$.?/ ")Z/l)t )baht Da&(/Ԯ$BU27q 0??16?\._?b\?F 7`Backgrou_ndC0l0rz%b5=t3E1 3&? ?9!J31h17;h`?1py0i0hUt (0) 2`W09 M{@c2%s0fB1ru@1ua@i0n. A0l zGsBe@e0v@d@0l># !Uh#3 A jAYbJ*3B`Uy_1HD:  # ;h0>Th]]9 #AUFi4F;? ?@P'-DT!>-u `u bu  @"E% AG[mu` 7?u#e@)J"9$' "/-%Y'%-(c/- t aht aR"/$&QbaBU2q 0??16?\.?Eb\?F 7`BackgroundC0l0rz%b_5=3E1 3? ?9!3117;h`?1py0i0ht (0) 2`09 M{@cJ2s0fB1ru@1a@i0n.. A0l zGUsBe@e0v@d@0l># !Uh#3 pA jAYŊJ*3&"2B`Uy_1SUGD  # h#$T #3 BUFL8?Fc?@Fi4F? uP} VV    $ u` ?ULV){$$ASu#L"p [A@)HtI  {Pk5?0t bF,/|񁠅dyBM#!!Z* "`?CopyrigPt (c) 20 9 M c os f"!r !a i n. Al (s2e e v0d ;"eYk}$Hp1D%|4;E|4$7UGD  # h#$T #3 BUFibAA%@Fc?@Fi4F? P}  u` ?u# VrVh -(^<c=PLVU((<<PP"p [A@Kt  6TA9 35AUFi4F? <Ѕ2P u` ?Mu# t  q?A7@t+"=&9Enm$ >  , Jh?B="5 +#?A&\ ?~!~!5 ~`?CopyrigTtl(c)l20 9lM c os f"!r !a i n.l Al (s"e e v0d. l>UhE`I !#J?l]2b6BHD:  # ;h,>T]]9 #AUFi4Fu?k?@P -vu `u bu  @"E! ACWmu` 7?~u#m>)J" ѻt b[]dt ]&( '/$$y'!t"/%'-/ BU2q0??-6?\.?^X?F 7`BackgroundCj0l0rz!?b!I=3A1g 3?3117;vd`?1py0]i0ht/ (0])/ 20\@9/ UMH@c2s0fNBR1rB@1aN@i0wn./ A0l/ GGsBen@e0v@d@0l8>#t!Uh #3 A 7AI+J*3[2B-UF_1\SHD:  # ;h,>T]]9 #AUFi4F? ?@P'-DT!ݿ>-u `u b_u  @"! ACWu` ?~u#->J"9Bt ]d!t ]"/ $-&b[]$y't"/%'-/ BU2q0??-6?\.?^X?F 7`BackgroundC0l0rz!b!=t3A1 3&?J31h17;d`?1py0i0ht (0) 2U0\@9 MH@c2%s0fNB1rB@1uaN@i0n. A0l GGsBen@e0v@d@0l>#t!Uh #3 A 7AI#J*X3[2B-UF_1\SHD: # ;h4>TB]]#AAUFD"H$@Fc?@F%@FCHy5?P6 Amu` ?A@ u#`%t  ŏ1w-#LRtA`%"p [S$J=#UG ?| ?釉\2q- ?,/>&?\.+?&i/: -4 # #20U" @ #Mb Bh,:*A!!';`?CopyrigTty(c)y2009yM0c.0os0f21r01a0i0n.y Al0 8s2e0e0v0d0 l*>,>UhE=%y1# 1J! rBA?(wN9OOEE3wEOKOY% wESO#R] JiAle%(9 MX&7WW'$ž.! $0 V #x"j__$f!32bGgHf_1 `5kyoobq %?1HD: ##=h0>Th]]9 3#UF( x"@FFxQ}H( @Fy0խ??Pn>u` ?A@u3`b>,2t  ?sFLR +5t ""p [NJU5 MLc0U"?AG}(LM"` LineColS rz$3"9&nFFF#?&^?2#9&&` STa%dS wQ/#/V/7(iF" T3#$2236hL1$T1T1\7;C#?1pyY igTt ( )02`090MK cY os0f21r0X!a0i0n.0 AR0l07sIBe@eY vO d=@R0 l>]Uhz133!J!9 RXEO?_)[3?^X_j_W/U95N_ __/U OQ_ou_j|5Ab959 M{B7/f!3bѿavS4!$f1m%A12(ooXB7sw'$)~ vv: sv688e !@dvQB:(3O@kf9A?=3$z,Z#bԁC~-,T{< B2 > HD: ##=h4>T]]9 P3!#UFD"H$@F<@F ҵ #@Fx萢5?@FN@?Pv> u{` ?{u3` #>,2t  ]?A@LR e0t bF@ ?p [e"eJU5 - MLT0U("?A?G}FMb` LineColj rzR#nwFFF#?x&?I#uP&&` STadj wh/#/m+$Z# Fb lDl 72r&J,`h9F!Bt! 3#.2236A2JM3117;Z#?1pyp igTty(c)y20X@9yMb c.p osj fJB1r>@o!aJ@ij n0 yA52 BHsBej@ep vf d2l>UhhC3A3J!bJ@36&6&C\WA(5UF_k_}T3^__ g|Ue55^_X_o|UE5__^o_(bTb tl e j5iee59 M{7/^v!3qryv-4!$;7ai X7w'2(#՝~vyv vwUGDF P# h>,/T6D # UFD"H$@]F@&u@P}  D 3,@+  h?u` ?U~@@T^,u#Q3tStB%: D"tRB$tK/]'A@3bI/[(#:"tTB%/](/{/A*""/.dJ3@S3y5~h?S?J2.`Make B0c0ground: @3ز3AD[;1`VIS_PRXY0CHM!#60+@892`?]C0py0i0Wht0(0)0U2Q@0W@ I@i0J1s0ftBeArh@Aat@i0n0 0Al@ lHsBe@e0v0dT2A2e4^AA @Jy5 3H=QJ5ITT[y5IT,T[3IT@T[ITT7WEIT^[@EXhTWnda:oTw7T(11J2G8hcAAEA>Xr1IC1&` Hn@d2&F0ot@A0#` SPowoTD!+"vde* @"s)r#r%_w)rgA0o^So=@ib@1$1J2ty2-t3I[t ZtE "bG8TQk`aJ5AK#r)r1{1Qg ga$y5hce@6D3,φ+?6ם ߪJAC BF:wb# ߜxCB} lƥ]@exko+> ]BBȃ)yPA|񈇩DOh?GSφO.( XlX/o?7rUFDfP h,TPYYCUFY,b'@F"H$ @Fx siz "]bckru"im.bY,b"H$@@\L[/f&.{@ͺl@;μ3D UGD" # h <^T$(YY#UFY,b@F"H$+@' ]P} j&(<XP"u` ? ((<<PjP nu#:xtSv߀A@#tT /tb"(9'do `%  `"Mo`Make B c ground!}Q ` Ev nt!pb ~  "%JT%-%'W+3S?$B#i#k0Ud2>3F&=i$46`#4r6r6 :2g"\i#2q0??F`#F\.?y( .O@i#dA+137]D"`?C"y ui h ( ]) 20@9 UM@c"s fBRAr Aa i n. AUl@ HsBe@Ie !d@)01w`V@s_0_RXY@cPWm![06@2.P QW"&/H:L e(^!6D _] i#HZZ`%T[%T@[3TYWgda^oC +T# 1 1`"13Rd3](cvydA C08Ua%k,@odL33TdAfx\3` D N tB!a@n8UA^E `%{MW?r6!3r0v*4*P83r69MTHD # =hj0>T h]]#A UFf0!? c@F"H$@FL( jKb' P6 ]Au` ^?M u#t  ]oEt9SENQ,>ChJ#0O迴Nk?{F 2q ?/&F\.?A8@73`Backgrou_ndCp lp rz@b Q Q#t$]A -]&MB±~<# u;c/u/!O2݇ #Iob< @P2V?h?!ܔ6!P!';3?1py0i0hUtp(0)p2`W09pM@c2%s0f!B1r@!ua!@i0n.p A| lpGsmBeA@e0v@dHa@ l> ,>Uh =E A#!1ŊJ`+6R  8x\ UVMY!9#_x9_5QoU3U+>_3__E2 _U2KrA 2NcCiA1eE(9 MX'E'0UbŔ+!ra .fjvoo{'_ !3rgfc`#fI-pHD # =hj0>T h]]#A UFL( jb@F"H$+@' P6 n>u{` ?5 u#t  ]EQt9SENEEhJ#0O贁Nk?F  ޱ2q ?/&F\.?MA8@73`BackgroundCp lp rz@bI Q:'! #t*$ -&MB±}_'!<# u;c/u/!<݇ #Ib-< @P2V?h?!܂6!!';3?1py0i0htp(0)p2`09pM@c2s0f!B1r@!a!@i0n.p AR| lpGsmBeA@e0v@da@ l> ,>Uh =E A#!1J`X+6  8x" UVMY!!_v9_xoU3^9__1UoUbE2 _U2rA 2NcCiA1eE9 MŔX'E'0Ub+!ra fjvoob{'*?!3rgfc`#fI-pHD # ;hj0>T h]]#A!AUF04qy#@FǮX?Fqo#@uP6 $> lu{` ? ;u4#*t' ‡tfmta{mvm[W"m3J#U0O贁Nk?F 2\2q* ?)/;&F\.?+A,8 iG #f?pH&M<# ";`BackgroundCJ0lJ0rz@ba0a072=?O?a4i6qbWd @B !!';us3?1py0i0ht (}0) @2`0U9 @M@c2s0IfB1r0Y1a@i0n. @ A0l @GsTBe*(@e0vf@dH@ l>,>UhE=%1#!1J`Fr  $&M3 `Q 6RFxAgߊ |? YF_W&d>2 ~`_21^'` _}A_(@M9 __Ue%2OU2rA T 2cxCiA1e%9 MX'g'0EUrŻ+$a f#xB"joo!$m_!3r =w>v12y q-> Ako(#HD # =hj0>T h]]#A UFOvE%@FǮX?FU1@?P6  >u` ?j u#t  ΈEt9SsENCп+EhJ"A|7<F ?ka@X:` Ac entCol* rz} b A bQ#z?)ۀ Ib'Ak!" //.*?/b!#Z/l 񽓱2q0??6F\.?Ey =?:@73`B cwkg0 ou d/b'Q Q3$A -6MB80UB2  !?'117;`?x1py0 ik0h ( )@2`0U9@M@cm2s* IfBx1r@/!a i* n.@ A, l@GsBe*@e0 v d@0Rl> ,>Uh =EAh#1o!J`z*`1  86n8[2 VM(X1_[gU3^9o!oUUE2~_Uo"KrA 0o"c%SiAbE9 M!4E.'D;;Da f jooŕ{7v!3r_w`v`Wvv+H=R_\H>` r =T"?KF(#TB9h^^aǥ_o@{+CVcoQaG膪ʹoPHLUIxéX UFDfP h>(/T6D UFY,b'@F#H$ @FxT P]]9 %AUFY,b@F#H$@'R P6 u`bA@utS!WtT6 WWb; Z RADEJ#U0U?t":#a?L& ? kaX:`Bw cq groundCj l rzAb#z)sx%8B//#b ܭ/b23/!5 /Lf1w`Mw kekz/d!/R:}y` Ev0'nt1p~O2%145 8`?!py i h0 ( )k20>@9kM*@cJ"s0f0B!r0!a0i0n..k A lk)Gs|BeP@e 1dp@B+lhA@=M Ga53+ 7L&AORMl> 0>UhhE A!d1 s$R S PJ8VlbhXD !:bDbM#( !0og?"ocZeX5jnoogZe%jeVoooZeE12_;rA*sCgPhe[+!TQQM- IEJCTf%qbt!%s3,@=M! E=s11 DB` D0NR0t 21a*@nE!X5Xeq3FB(qiA b%9 M!d4?#ԁ'ŔL&!g$oQ5 H> r (z)ub"`\NWF'#Cw"ѷeB CЫ6Paץo@}+8oB(# o4UFD  h$^T YYBUFL&d2?F~?Fx<F BP(?| ~P?| w VBW< 2h5h? Pm?2?2P'2ȅHBU? ? D G 0eB`-City,n_ tworko ly cak iy no si"eo pR a !iq sk a !%= \%E^  G n?~#0?688?U[fcffgf 3fwf c3gwfw` f{~6a wxxxwwp` px wpu 0226yR2gy?LDrag thuesapWonodwipe.b~??Q뿤i??"&d2}L4oҿ޿UGDF P# h @T(PYY*# U@L&d2?@~?FUP} . !P6 :T 7h7|777 ,7%A'7.u`_?U"6@RThh||"u#) ^2<7:U? ?4b0{[`:S1#26X3#"2^QHW93 2 1/L1$-"@ 2  2"3'>>ÿ@ǵqp@eE?oDeF0|r&BCkBu@`i-1Bu @  26CA (B" 2#& "0)9u `u`"[ b1 $𿻐u`񾄑W3J#N5)W3Qq1q71`Vis_PRXYcPm!#5n `13RB`?CopWyrPgPtu(`)20?`U9MPc)`oP'of1b"ar%`^aUa1`i#`n WAly` )hs}bUePe)`v`dRq1N3 dZ4V1B=Tr6N3@t|6|6 x2N4#W30U[r|3]W,7^q_@VcTg dagRsqu2+ D3hhR)^RHqqE2K8CT $0aN2{5\1pac2!6u%\ GŜ1S1J[2@@R4{35s_N4]&F3s͵NSYFwUUaU,@Qr#`er51pN2K8ڀqiviv_<׀,=pޟ &K8AWBCgE/s"ITh]]9 M IAUF~?@s?F,jג?FRW_7r6?Pps-8Rw?>$Y]A  JuM` ? ;  )u*Jt' eta{+23.JvюV?lv_ ?vx]PqE>JU43f;?4.&P?Apb3bbfz@Ave bh(bv)t&*J#!!5 `?CopyrigTt~(c)~2`09~M c o%s f"!r 1ua i n.~_ Al/0 (Us32e0e vE0 d'0!3]$$#-## '.&%d 2zoGz.#@ M#J6h/z/ BK' B*ib~59 6X7oG'K0UB.&!I$Ia oFJl6.}UhU=!1 A(# ~VIP & `!& T` V_RUGD  3 h0TdYYB qUF9r?@H$$?Fvn?@@u?P} Lk f 0u` ?00DuNrtK  jtK޹ѹMWZU;!;!`?CopyrigPt(c)2\09Mj c.h osb fp"a!rd !ap ib n. Al h(s"e ejh v d #"P4#S8$,#B-5# H" G'I?O=5#H&,#Z45656 12j,$#5#0U2%53+&j  ^K5 6X77'456!.4] 6Je$B^ 6D 0HA,%DK%D0GHD: # h 4>T  9 P#CUF0C3?@&@ P?FμW3P6 n>u{` ?5u#t  (]It=W!IVR1_IlI#>y;?? ?bA@"b])b)&2\"@ #[&/#'*+:'";*t\U$q ?/6 46?> Jh1h15 g`?CoprigTt (c)02U0090M0c0os0f21rԑ01a0i0nU.0 0l0 8Us2e0e0v0 d0a1(e4B-t3 t70ieM9 kF:X=GA_'2r"Z!0 F3j|OO{=GyV!%3RGFNDGN_:w5k&_8_Rq @ElAUhhEG1#@A-!T6j& f$<f bfI(:x<'0i`c q$q3a(z=iKquM$ufE`|aOyX+cqE2o;#rA $2H bWHD: # h 4>T  9 P#CUF?@&@ _P?FP6 v >u` ?)u#t  P]It=W!IRg]Hl#>y? ~?bA@"b)b)b$񅴍2\"@#[&*<%&$*Q+:'";*\U$q ?/6 b46?> UJh1h15 g`?CoprigTt (c)020090M0c0oKs0f21r01a0i0n.0 0l0 8s2e0e0v0d0a1e4B-t3 t70@ieM9 kFX=G'2r"!u0 F3j|OO{X=GyV!3RGFNDGN_:w5!k&_8_q @ElAUhhEGІ1#@A!Z@2r80; <qL#4 bvI(a9i@`bcbu3:+}E`lQqi?X+cqM*ufoUyQquE2oe#rA $2H bWHD H# h0>Th]]9 M#JUFvn?@&@ P??@*~w?P6  >JuM` ? u#Jt =t9S!WJNbFh.>y*<? ?AJA@+b)b$J2N7Nk@MJQ&*2%$*+0'1*T!!5 c`?CoprigTtV(c)V2`09VM0c0os0f 21r0M1a 0i0n.V Alh0 8sl2e@0e0v~0d`0!E]$-# '0AbE9 6X7G'0URWB!a F"JlJ]Uhz 1#1-Z61 K 8 0R]VJ:9 6S gPQETT]]B9 #CUF^?@z?FO&_~uP6  >u`3?ju#t  @WIt=WUr mIR~[WPHl#>? ?Lb "z@A gb(N(+b*$2g"?088r #f&#fb8!r.DF&""+E'""F* \ ?X64?+96N?UJ~1~15 g`?CopyrigTt (c)020090M0c.0os0f21r01a0i0n.0 0l0 8s2e0e0v@d0w1P{4B-,3 70ieM9 FXSGG'2WUA!0 F%jOO{SGV!%3R W VdDWN_:5k<_N_Rq VEl QUhhE+G1#VA-"!T6u&F f$<f bvT(:+/U?k`c 2g.q%:uoc xWJ#eaq*uM:uf-l6R|aeyj xx-E2o;""rA !$N*H bWHD:  %$h4>T]]B9 #CUFD%?@\f#??@k߻?P6  >u` ?ju#t  I_IIt=WPaIRIJl*w#>G?? ?b߀A@eb=!z #"b $"bI$2g"?088r b#f&*8!r%/F&"+E'"%F*\ ` ?64?6N?U J~1~15 g`?CopyrigTt (c)020090M0c0os0f21r01a0i0n.0 0l0 8s2e0ej0v@d0@w1{4B-3 70ieM9 FXSGG_'2WUAZ!0 Fw%j(OO{SGV!3R W V@dDWN_:5kH<_N_q VEql QUhhE+G1#VA!Z@+*S?F <`?"B## bv@T(a9i`bcb0u%:A}?l6lgqiQaF 4qMuЍ=7{cc )A #ougq0u-E2oe#rA 0#VN*H bWHD:  %!$h4>T]]B9 #CUFW-?@=?Fh?@uV?P6 v>u` ?)u#t  Ht=WB/aקIR_noIl3ur#>!;??| ?Lb "zo@A gwb(b*)I(&2g"?@#f&#f #-(+(+E'""F*\ ?64?6N? *J~1~15 g`?CopyrigTt (c)02U0090M0c0os0f21rԧ01a0i0nU.0 0l0 8Us2e0e0v@ d0w1({4B-3 70ieM9 FXSGG'2WUA30 F%jOOŕ{SGV!3R W VdDWN_:5k<_N_)q VEl QUhhE+G1#VA"!Z@%g 1?( <Kg" bSvT(:?31J9c 0_M#db0u%@u~~mlfl?"0uM@ur\8ˊc`[y뵸m}q0u-E2oe""rA 3!$Tu bWUGD  3 h0TdYYB qUF 0KR?@xՈ ?F?@_@w?P} Lf  0u` ?00D)uNtK  Uɹt0zkn̹ k*ѹWU;!;!`?CopyrigPt (c)r 2\09r Mj ch o%sb fp"a!rd !uap ib n.r _ Al h(Us"e eh v d #"4#S8$J,#B-5#6G# G'?O)=5#H&,#@Z45656 12,$u#5#0UU53@+&j ^K5 6X77'R456!.4]R 6Je$^ 6D  5#HA,%DK%D0GHD: # Th4> T]]9 #CUF/KR?@wn?F#vP.6 Au`= ? u#t  o,It=W:CIRGm Mw+Il#i>y9 ?? ? bA@"b)b%)&񭃵2\"@# [&/#'*+:'J";*\U$7q ?/6P 46?> J h1h15 g`?Co}prigTt (c])020090uM0c0os0If21r01a0]i0n.0 0Ul0 8s2e0e0v0d0a1e4bB-t3 t70ieM9 kFX=GA'2r"!0 F3jP|OO{=G,yV!3RGFNDGN_:w5k&_8_q @ElAUhhEG1h#@A Tj&1 $<fib bfʼI(:0 0i`c 498˱q3a(z=iKquM$uft:ۍ|HaOybxE2ot;#rA $V2H bWHD: # Th4> T]]9 #CUF?@wݯn?tP6 >u` ?u#t  ]yY'It=W菗:CIRHl#>Hc? ?Lbbcbz@A } b(b*) #a 2g"@#f&*G%1$*+E'""F*t\`$q0??6 *4A?> Js1s15 g`?CopyrigTt (c)0W20090M0]c0os0f2R1r01a0i0Wn.0 )0l0U 8s2e0e05v@d0l1p4B-X3 70ieM9 vFXHG/'2r 2!0 F 3jOOŕ{HGV!3RGVYDGN_:5k1_C_)q KElQUhhE G1#KA Z@~#0f < 1#4 b vT(a9i@`bcb%u3:6}t:l\qi)qM5ufo`y\q%u"E2oe""rA Y!$=H bWHD H# h0>Th]]9 M#JUFʺ?@؏pǶ??@/Qw?P6 >JuM` ? u#Jt =t9S6WBBJNbFhՁ$>:3f<? ?A A@ab3bfbz #b$b$J 2zGz@MJ\&!(<&'$*+;'<*B!!5 c`?CopyrigT}tV(c)V]2`09VM%0]c#0os0f+2R1r0X1a+0i0n.V AUls0 #8sw2eK0e#0v0dk0@!E]$-3 7 %0bE(9 6X7G_'0UbBJ!a F-JlJ]Uhz1#1 劯Z@`Y<  8KAF4 ;RhVJ:Ç9 AS ]yY'PTGRU3U2_DQL_UEUu^jYH ]U2"_UrA Y$c( bUGD  3 h0TdYYB UFZV?@[V?FRHG?@ _BPhw?P} `f  f0 Du` ?00DDX)ubt_  .HLt %+,!iUc!c!`?CopyrigPt (c) 2\09 M c os f"!r !a i n. Al (s"e e v d AK"\#S`$T#B-]#o# &o'?c=]#p&T#4]6]6 Y2T$#]#05U2]3@qS& ^Ps5 6X87.G'4]6%!V4] F(J)e(^(I ]#HAT%D[5D0[3DDWHD: % $h4>T]]B9 #CUFO/?@/`p?FqRA?@?P6 n>u`3?u#t  noIt=WoIR#U HlAw#>2?088r@2#Aeb8 !rztb(#+#+#&y<u"?& ?"#+b%)+@'\?64?+6C? *Js1s15 g`?Coph rigTt (wc)020090M0c0o%s0f21r01ua0i0n.0U 0l0 8s2Ue0e0v@d0l1p4B-j(3 7&0ieM9 vFXj'G'2WUAŇ&!$0 F%CjOO{j'/V!3RGVYDGN_:B5k1_C_q m&lQUhhE GБ1#m!!bTc# $<f b)v^(::'Ԝ?k`c S#q%/u3;c .9~#eVquM/ufflG|aZy{ґ#q"E2o;#rA $CH bWHD: %$h4>T]]B9 #CUF3;?@/`p??@w?P6 >u` ?u#t  oIt=WoWIRIlAw#>2?088r@<#Aeb{8 !rzb(J#+#+#& yL?q#?& ?w"b%)#+b4/U%\?64?6C? *Js1s15 g`?Coph rigTt (wc)020090M0c0o%s0f21r01ua0i0n.0U 0l0 8s2Ue0e0v@d0l1p4B-3 7&0ieM9 vFXHGG'2WUAŇ&!$0 F%CjOO{HG/V!3RGVYDGN_:B5k1_C_q KElQUhhE G1#KA!効Z@x<?f# <h-nѶ# b v^(a9i`bcb%u%:6}fll\qi{ґ)qM5uf o`y\q%u"E2oe:#rA $=+H bWUGD  3 h0TdYYB qUF%@?@5,k?F ?@Q_rw?P} Lbd  0u` ^?U00_uNtK  :Z[tZJ[jIӹ._jѹԒWU;!;!`?CopyrigPt (c)r 2\09r Mj ch o%sb fp"a!rd !uap ib n.r _ Al h(Us"e eh v d #"4#S8$J,#B-5#6G# G'?O)=5#H&,#@Z45656 12,$#5#0U253@+&j ^K5 6rX77'4)56!.4] 6Je$^ P6D  5#HA,%DdK%D0GHD: # Th4> T]]9 #CUFy?@vn?F#?@u?P6 > u` ^?Mu#t  %7YIt=WaR9IR_L=*Hlw#>y!;?? ?bA@"b)Kb)&Z2\"y@#A[&/#'*+:'";*\nU$q ?/6 X46?> Jh1h15 g`?CoprigTt (c)020090M0c0os0f21r01a0i0n.0 0l0 8s2e0e0v0d0a1e4ŤB-t3 t70iePM9 kFX=GA'K2r"!0 FB3j|OO{=GyV!3RGFNDGN_:Bw5k&_8_q @ElAUhhEG1#@A!ŊTj& $L<f bfI(j! ?.۾0c _*#c ˱q3aklquM:mx<ۓ|aUyM8'hxE2o;:#rA $8+H bWHD: # Th4> T]]9 #CUF_xa*?@vn??@?]P6 hvA u{` ?5u#t  ߔ@gIt=WaR9UIRIl%w#>y%? ?bA@"b);b)b$i2\"@#[&*<%&$*+:'";*t\U$q ?/6 46?> Jh1h15 g`?CoprigTt (c)02U0090M0c0os0f21rԑ01a0i0nU.0 0l0 8Us2e0e0v0 d0a1(e4B-t3 t70ieM9 kFX=G'K2r"!0 FB3j|OO{=GyV!3RGFNDGN_:Bw5k&_8_q @ElAUhhEG1#@A!効Z@ xQ0Y; <3H#4Zb bvI(a9i@`bcbu3:+}x<lQqiM8qMuuUo/] d =xd #YuQquE2oe:#rA $8+H bWHD H# h0>Th]]9 M#JUFs`m?@|nH˲?Fdvn?@MQY?P6 v> uM{` ?5 u#Jt  o\SDt9S2X2ENFYEhI5KOV4>y<? ?AJbA@" +b )b $J2N贁NkS@MJ W& *8%"$ *+6'J"7*!!5 c`?CoprigTt (c)(02`09(0M 0c0os0f&21r0S1a&0i0n.(0 Aln0 8sr2eF0e0v0df0!E]$b-# 'A 0bPE9 6X7G'0U]BŔ!a $F(JlJ]Uhz@1#1!Z@?~ 7  >8ħ]i 6RcVJ:_3?f& .;T 1_QKTBR}U3U-_?QD_VT}UEU7&eD YZM8ʺ]a}U2_U#rA $c( bUGD  3 h0TdYYB qUFx<^?@ȵ?Fʺ?@Ot?Pn} L  0u` W?U050DuNtK Ot@ڹ *"!-U;!;!`?Copy_rigPt_(c)2\W09Mj ch osb fp"a!rd !ap ib n}. Al U h(s"e eh 5v d #"4#(S8$,#B-5#G# G'?O=5#H&,#Z45656 12,$#5#0U2&53@+&j  %cK5 6X77'R456!.4]R 6Je$^ 6D  5#HA,%DK%D0GHD: # Th4> T]]9 #CUF/KR?@}JR4?F#vnP6 >u` ?ju# t  ,It=WXBIRGm_ MwIlb>y;?? ?bA@"b.)b)&i2\"@#[&/#'*Q+:'";*\U$q ?/6 b46?> UJh1h15 g`?CoprigTt (c)020090M0c0oKs0f21r01a0i0n.0 0l0 8s2e0e0v0d0a1e4B-t3 t70@ieM9 kFX=GA/'2r"!0 F 3j|OOŕ{=GyV!3RGFNDGN_:w5k&_8_)q @ElAUhhEG1#@A!Tj& $3<fM bfI(:0i`c l$Zaq3a(z=iKquM$ufh6fҍ|aOy"?0x,bxE2o;#rA Y$2H bWHD: # Th4> T]]9 #CUF?@2L&??@@?]P6 lnAu{` ?5u# t  ]yY'It=Wȅ,d!K IRHl%mw#>y%? ?bA@"b);b)b$i2\"@#[&*<%&$*+:'";*t\U$q ?/6 46?> Jh1h15 g`?CoprigTt (c)02U0090M0c0os0f21rԑ01a0i0nU.0 0l0 8Us2e0e0v0 d0a1(e4B-t3 t70ieM9 kFX=G'K2r"!0 FB3j|OO{=GyV!3RGFNDGN_:Bw5k&_8_q @ElAUhhEG1#@A!効Z@v0Y; <B{ %#4k bvI(a9i@`bcbu3:+}76flQqi*h/qM*ufoUyQquE2oe#rA $2H bWHD H# h0>Th]]9 M#JUFʺ?@BP(T??@W0 w?P6 >JuM` ? u8# t =t9S?JNbFh?s>yr<?X ?AJ~A@+b)b$J2N贁NkS@MJ Q&*2%$*+0'J1*!!5 c`?CoprigTtV(c)V2`09VM0c.0os0f 21r0M1a 0i0n.V Alh0 8sl2e@0ej0v~0d`0!E]$-X# '0bE9 6X7G/'0UWB%!a F"J lJ]Uhz 1#1Z@~1  8[_B! 0R]VJ:9 6S ]yY'ETTh]]9 MJUF3;?@z^??@x<w?P6 >JuM` ? uJt  oEt9S֡WlENEhII>yI$<?X ?AJbA@"b )b$ )&J2zGz@M JW& /'*+6'"7*BB!!5 c`?CoprigTt (c)(0]2`09(0M 0]c0os0f&2R1r0S1a&0i0n.(0 AUln0 8sr2eF0e0v0df0@!Y$-# '  0b5(9 6X7G_'0U]BJ!a FR(JlJ,>Uh11!T0 9 F 8  2RYVJ4]1_CY?QsU3:@|>\OQ HS m`wQ52_;#rA 3$@cHD H# h0>Th]]9 M#JUFfl?@z^?F3;?@x<?P6 n>JuM{` ?5 u#Jt  KEt9S֡lENoחEhᐟI>y$<? ?AJbA@"b] )b$ )&J2z_Gz@ MJW& /'*+6'"7*T!!5 c`?CoprigTt (c)(02`09(0M 0c0oKs0f&21r0S1a&0i0n.(0 Aln0 8sr2eF0e0v0df0!E(]$-# ' 0bE9 6X7G'K0U]B!Ia F(JlJ*,>Uh1#1!Tf& 38>VM 2RYVJ:|> 3Y@P HS mۻPwQ34]1_CYQsUE2_;#rA $@c( bHD # h4>T]]9 P#CqAUFe!@b?@M?FQEV?@^?P6 L6 > #(m$uy`?Agg2Du#VtS  n4BH=t; w9_W*Ut3۶_>UF$<?Z& ?bA@b!z #"b$$"b$ 2"Z$@#&(&$:;'"*\$qy0?x?64#( Z4?V> J=#115 `?CopyrigTt (c)"@20.@9"@M@c@os@f BAr@MAa @i@n."@ 0lh@ HslBe@@e@v~@d`@+"1 44#ŤB-=#3 7Z&4%@iePM9 FXG>'K2r2Z&!u$0 VB3jO_{GZ/V!3bwWxVDnW^^Ko:B5k__q JE4"l(>Uhe Ah#A!1(Z=#F@ g0aR LoWGa"@&b$4"(qC^]?Fs(?@|0 ֿ%L|\|HD_LR4Уӟ^L-ٙ"X_a y4 pW? +?Ra@ts<ЯTh]]9 M!AUF<%?@[?FdHǩ?@W_Mw?P6 $[!>  JuM` ? ;  )u*Jt'  P&'mta{T 6Dmv޿mxb$y3>yJU]<?& ?AJbA@,"b4)b4)Sb4$J2zGzR$@MJ &5*`%J$*+^',"_*BB115 `?Cop_rigTt_(c)2`W09MH0cF0os@0fN2?1rB0{1aN0i@0n}. Al0U F8s2en0eF05v0d01PY4-,%3 %7&H0b59 FX7T]]9 MAUFvIڳ?@J?F8?@P} TP>">JuM` W?uJu u bKu `u Y-JWR]nio@&>٣~ C,ᗢeuܥ]h[o?C$ '"/'%S'-+]/' Jt  $Ӗb#"t="! Ư%'_deѬ&'YY-t#JJU2N贁Nk?<MJ6uAdbG8WbU9S;S;S6B3B115 d`?CopyrigTt (c])020090uM0c0os0If21r0Aa0i0n.0 WAl,@ 8s0BUe@e0vB@d$@14Ry36D3IP!BM2b;R?s?fzU3 7F02%je^E(9 BVXWbW_'0URšFZ!D0 bVrvZl!UhX3 1iAA! P@B#J^ewkdHD:  H h0>Th]]9 M!AUFٟx7$?@!{?FdHǩ?@W_Mw?P6 $[#>  JuM` ? ;  )u*Jt'  Y@mta{kbmv޿mxb$y3>yJU]<?& ?AJbA@,"b4)b4)Sb4$J2zGzR$@MJ &5*`%J$*+^',"_*BB115 `?Cop_rigTt_(c)2`W09MH0cF0os@0fN2?1rB0{1aN0i@0n}. Al0U F8s2en0eF05v0d01PY4-,%3 %7&H0b59 FX7T]]9 MAUF4?@R ?F8?@P} TP>$>JuM` W?uJu u bKu `u Y-JWR]ni_@"J&>kn OS?F4H⋘?"  'B"/'%S'-+]/' Jt  |3#"t="Y(ئ%'~#&' bS#JJU2N贁Nk?<MJ6AdbG8bU9JS;S;S6B3115 d`?CopyrigTt (c)020090M0c0oKs0f21r0Aa0i0n.0 Al,@ 8s0Be@e0vB@d$@14y36D3IP* M2b;R?ds?zU3 7F02%je^E9 BVXWbW'0URšF!D%0 bVvZlUhX3 h1iAA0P@#J^ewkdUHTuD" # A#A hz ,T! YJ MUFv{h?@K]`?FU UP N% # H, @ T  cO,OmJuM` /? O"6J^ Ocu#X"bs!Fۅb^uJt  7͉"tY!"^OJU2zGz?)@M#J&A@i"wb8b#9!;!;!62 %!4J#2#"6 p2.2b#9;/72l<2l?TN#=A=Ai"`?Copyrig`t (c)t@20@9t@Ml@cj@osd@frBcArf@Aar@id@n.t@ Al@ jHsBe@ej@v@d@"M#IC IG6"xh"t"Eiyn %1XGcW'0URũ6EK0 cVwZlJUdnUtLQQS) T =1T[A#A1(!`Iοͺ?@|0}# f ǿձm# Կ׌$8n:Moo` =[Weh$+HeQj?@)v&?FQ-?@iH?PS~>  F߈s|.Zߥ"lE _ e?h=X##Rȵ: Izu:t ɩ X"41(RCBS"r"5w5ae2oG[pF&ߞ֡h|}VhYr~!{/te,gEe DT?nxM殏Ea$6ЇK;@R d9:x,IyyߧUC6 -R&6t/}GyzνTB4gwqLAq@dR!5j;?@iwE?FmnX Űu{jlk8 fl?y>ĕl?_'t?quEt&XQV  :T;haai~?@^vνl쇢&UebhMHz?@^Z|b>jca>lTס/hi-Ya\1~|oW.f? 45=uTIy^. ?@}drPFuN?@zH?Q;|Uuelfay1| @pUmɯR^iqeoo oT##$g}i3 uoouBwoҕF})EL h0m&}~+̖ݿBmȇVQ9adv_UF ]"6l|]ΖmC\+~}i3 CQ!]!dΕ+ (al?@֤ |b$G4).&ܕ\w7f_Utpx'Wẏ,k/~? 5kՕHD: # h4>T]]9 M qAUF~?@%g?Fs7s^?@WҔh?P6 mL'>E  ($JuM` ?Agg2DuVbJtS t~Dzb 013v ';_>-JUC!C!5 `?CopyrigTt(wc)20 9Mr cp o%sj fx"i!rl !uax ij n._ Al p(Us"e ep v d +"<#T@$4#B=#uP!7?56 ?Ab`4z@9Al0b{3j8]b8a0$o3-=#O# O'564%酙t=#2mq @?2@ M3#JF?=EQ6<@B: ?6iie59 6XG2'0UR56!P40R FJlJD4Uh8ZQZQ ]   %a!@A1(A/.F?@":W0LRb O_LP)$J^2_o ꗦ`T8a.eEUZ+mQ?@_Vl}l?9+h5Uu?@w Vlm1lrU|pi5Ui焞?@Gd{y}.xbli\V)+hUrw?@wVleHl~s!2+hUpp%Vl߂'}|U'r,.?@C[>?Fi(w?@[ P?PU>b ԿftVlIֶ6l{)eRX0~eGdQR@_:M I~> zg@&? ɿI#?!$?Ra@B r552aQ:iт0;Kq𾙵QP!µ""]JT豸*e2 Z"s 7tG@-(Fm#x"+OB 9 siz "]bckru"im.bY,b"H$@@5pm;μ@@}_3 D HD  # =hj8>T! ]]9 #UFY,b@F"H$@' P6 eA [u`l?  u#(΍tS&Aw@ctTl$bsTJ20-U?FW]ebqRG # ?sakunB`BZ ckgrou_ndCy ly rzubuAub#z)u=~ " #L#9"#MBh\"C? j0>Uhh5`7A#!Jn00.lPX2 3VM(Q_&1_QU3n o2ogUEe0&oo@_oUb5_KrA -sCgSXhe_'r!TH!!M-t "J'sTl!Mq %F{,@wj1%5bPE9 M{k7l!3k'QPsBp#!J$G=1ƀHD  # =hj0>T h]]?5AUF\,b@FW5?'P6 .L]A]  u` ?Eiu#4t1  tuKˁ=JU^;:Fx& sa@`B`BE ckgroundCd ld rz}J b J AJ b#z)ۀJ bA!"b#"bx##釉2q ?/ 6F\.W?5 8?:@7oW/i/.{!'G!1 03] \NB20UB4&'$$%117;`?s1pyh0if0ht (b0)@2`09@M{@cJh2sj0fBs1ru@s!a@ij0n..@ Ap l@zGUsBe@eh0v@Qd@0l> ^Uh[A"A]@MjA#1!J4/%CrA 0"S C`Q%S1Y9**0=*Qt&2t31:?F@F̬%?PKM! [t?[WY[ZnaU0fJ$U~tr<S! YM# ? 3&M@T@0tr$5!:+oog4! `Q^__1js+TN!p_^Wo[r5V rvM,(b  jbE9 M{7! 3a򪆢P3!5$;ۏ7愕HD:  # ;h0>Th]]#TAqAUFv\.%@F4O"Ƈ@F.r?F"H$ @P6 L6>  #. .u` ?)c,c8u#RtO  ߢ.t|} [J@}@@@FV&  ;`BackgroundC l rz@b ݶ bOA!|"/Ԕ/!K)!?#2!3z29b13{z&b@F3:3BU27qm0?l?~6F+?S ? :@7//!'#Qf31 ("C$ $lSF-2f30UB5DV&$$f3"AP"A*G;#?1py0i0hUt(0)2`W09M@c2%s0fB1r@!ua@i0n. A lGsHRePe0vZPd